summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.git-blame-ignore-revs95
-rw-r--r--deps/v8/.gitignore3
-rw-r--r--deps/v8/.style.yapf2
-rw-r--r--deps/v8/AUTHORS7
-rw-r--r--deps/v8/BUILD.bazel365
-rw-r--r--deps/v8/BUILD.gn965
-rw-r--r--deps/v8/DEPS84
-rw-r--r--deps/v8/PPC_OWNERS1
-rw-r--r--deps/v8/PRESUBMIT.py9
-rw-r--r--deps/v8/S390_OWNERS1
-rw-r--r--deps/v8/WATCHLISTS17
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h15
-rw-r--r--deps/v8/bazel/config/BUILD.bazel15
-rw-r--r--deps/v8/bazel/defs.bzl159
-rw-r--r--deps/v8/build_overrides/build.gni3
-rw-r--r--deps/v8/gni/snapshot_toolchain.gni4
-rw-r--r--deps/v8/gni/v8.gni88
-rw-r--r--deps/v8/include/cppgc/cross-thread-persistent.h6
-rw-r--r--deps/v8/include/cppgc/heap-consistency.h4
-rw-r--r--deps/v8/include/cppgc/internal/api-constants.h5
-rw-r--r--deps/v8/include/cppgc/internal/gc-info.h68
-rw-r--r--deps/v8/include/cppgc/internal/member-storage.h26
-rw-r--r--deps/v8/include/cppgc/internal/pointer-policies.h40
-rw-r--r--deps/v8/include/cppgc/internal/write-barrier.h20
-rw-r--r--deps/v8/include/cppgc/macros.h11
-rw-r--r--deps/v8/include/cppgc/member.h296
-rw-r--r--deps/v8/include/cppgc/persistent.h37
-rw-r--r--deps/v8/include/cppgc/type-traits.h7
-rw-r--r--deps/v8/include/cppgc/visitor.h17
-rw-r--r--deps/v8/include/js_protocol.pdl12
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h4
-rw-r--r--deps/v8/include/v8-array-buffer.h41
-rw-r--r--deps/v8/include/v8-callbacks.h12
-rw-r--r--deps/v8/include/v8-context.h50
-rw-r--r--deps/v8/include/v8-cppgc.h6
-rw-r--r--deps/v8/include/v8-embedder-heap.h169
-rw-r--r--deps/v8/include/v8-fast-api-calls.h10
-rw-r--r--deps/v8/include/v8-function-callback.h111
-rw-r--r--deps/v8/include/v8-function.h1
-rw-r--r--deps/v8/include/v8-inspector.h25
-rw-r--r--deps/v8/include/v8-internal.h390
-rw-r--r--deps/v8/include/v8-isolate.h62
-rw-r--r--deps/v8/include/v8-local-handle.h129
-rw-r--r--deps/v8/include/v8-metrics.h30
-rw-r--r--deps/v8/include/v8-object.h24
-rw-r--r--deps/v8/include/v8-persistent-handle.h43
-rw-r--r--deps/v8/include/v8-platform.h88
-rw-r--r--deps/v8/include/v8-primitive.h31
-rw-r--r--deps/v8/include/v8-profiler.h1
-rw-r--r--deps/v8/include/v8-script.h30
-rw-r--r--deps/v8/include/v8-snapshot.h12
-rw-r--r--deps/v8/include/v8-template.h4
-rw-r--r--deps/v8/include/v8-traced-handle.h46
-rw-r--r--deps/v8/include/v8-util.h12
-rw-r--r--deps/v8/include/v8-value.h30
-rw-r--r--deps/v8/include/v8-version.h8
-rw-r--r--deps/v8/include/v8-wasm.h3
-rw-r--r--deps/v8/include/v8config.h60
-rw-r--r--deps/v8/infra/mb/gn_isolate_map.pyl6
-rw-r--r--deps/v8/infra/mb/mb_config.pyl203
-rw-r--r--deps/v8/infra/testing/builders.pyl385
-rw-r--r--deps/v8/src/DEPS9
-rw-r--r--deps/v8/src/api/api-arguments-inl.h34
-rw-r--r--deps/v8/src/api/api-arguments.h60
-rw-r--r--deps/v8/src/api/api-inl.h105
-rw-r--r--deps/v8/src/api/api-macros.h29
-rw-r--r--deps/v8/src/api/api-natives.cc4
-rw-r--r--deps/v8/src/api/api.cc1093
-rw-r--r--deps/v8/src/api/api.h150
-rw-r--r--deps/v8/src/asmjs/asm-js.cc2
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc2
-rw-r--r--deps/v8/src/asmjs/asm-parser.h2
-rw-r--r--deps/v8/src/asmjs/asm-types.h2
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc4
-rw-r--r--deps/v8/src/ast/ast-value-factory.h4
-rw-r--r--deps/v8/src/ast/ast.h7
-rw-r--r--deps/v8/src/ast/prettyprinter.cc4
-rw-r--r--deps/v8/src/ast/scopes.cc39
-rw-r--r--deps/v8/src/ast/scopes.h2
-rw-r--r--deps/v8/src/base/DEPS6
-rw-r--r--deps/v8/src/base/bit-field.h2
-rw-r--r--deps/v8/src/base/bits.h29
-rw-r--r--deps/v8/src/base/build_config.h10
-rw-r--r--deps/v8/src/base/container-utils.h66
-rw-r--r--deps/v8/src/base/contextual.h (renamed from deps/v8/src/torque/contextual.h)67
-rw-r--r--deps/v8/src/base/cpu.cc31
-rw-r--r--deps/v8/src/base/cpu.h4
-rw-r--r--deps/v8/src/base/ieee754.cc15
-rw-r--r--deps/v8/src/base/ieee754.h25
-rw-r--r--deps/v8/src/base/immediate-crash.h4
-rw-r--r--deps/v8/src/base/ios-headers.h31
-rw-r--r--deps/v8/src/base/logging.h10
-rw-r--r--deps/v8/src/base/numbers/diy-fp.h9
-rw-r--r--deps/v8/src/base/numbers/double.h3
-rw-r--r--deps/v8/src/base/numbers/fast-dtoa.cc113
-rw-r--r--deps/v8/src/base/platform/memory.h4
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-darwin.cc153
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc4
-rw-r--r--deps/v8/src/base/platform/platform-ios.cc22
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc133
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc43
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc6
-rw-r--r--deps/v8/src/base/platform/platform.cc29
-rw-r--r--deps/v8/src/base/platform/platform.h16
-rw-r--r--deps/v8/src/base/platform/time.cc44
-rw-r--r--deps/v8/src/base/platform/time.h15
-rw-r--r--deps/v8/src/base/small-vector.h82
-rw-r--r--deps/v8/src/base/sys-info.cc16
-rw-r--r--deps/v8/src/base/sys-info.h6
-rw-r--r--deps/v8/src/base/template-utils.h26
-rw-r--r--deps/v8/src/base/threaded-list.h22
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h8
-rw-r--r--deps/v8/src/base/vector.h48
-rw-r--r--deps/v8/src/base/vlq.h17
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h103
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h150
-rw-r--r--deps/v8/src/baseline/baseline-assembler-inl.h5
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h32
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc4
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc103
-rw-r--r--deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h113
-rw-r--r--deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h133
-rw-r--r--deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h6
-rw-r--r--deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h113
-rw-r--r--deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h184
-rw-r--r--deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h112
-rw-r--r--deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h184
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h195
-rw-r--r--deps/v8/src/bigint/tostring.cc3
-rw-r--r--deps/v8/src/builtins/accessors.cc52
-rw-r--r--deps/v8/src/builtins/accessors.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc186
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc504
-rw-r--r--deps/v8/src/builtins/array-from.tq29
-rw-r--r--deps/v8/src/builtins/array-join.tq70
-rw-r--r--deps/v8/src/builtins/array-of.tq23
-rw-r--r--deps/v8/src/builtins/array-reverse.tq77
-rw-r--r--deps/v8/src/builtins/array-to-reversed.tq95
-rw-r--r--deps/v8/src/builtins/arraybuffer.tq21
-rw-r--r--deps/v8/src/builtins/base.tq71
-rw-r--r--deps/v8/src/builtins/builtins-api.cc23
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-array.cc27
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc181
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-async-module.cc3
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.h119
-rw-r--r--deps/v8/src/builtins/builtins-bigint.tq384
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc37
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc32
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.h10
-rw-r--r--deps/v8/src/builtins/builtins-console.cc2
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc35
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc36
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-date.cc8
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h62
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h7
-rw-r--r--deps/v8/src/builtins/builtins-error.cc2
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc21
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc229
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc383
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.h58
-rw-r--r--deps/v8/src/builtins/builtins-object.cc36
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc26
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc60
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-shadow-realm-gen.cc29
-rw-r--r--deps/v8/src/builtins/builtins-shadow-realm.cc8
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc496
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h22
-rw-r--r--deps/v8/src/builtins/builtins-string.tq8
-rw-r--r--deps/v8/src/builtins/builtins-struct.cc80
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc1
-rw-r--r--deps/v8/src/builtins/builtins-utils.h24
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc24
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-web-snapshots.cc119
-rw-r--r--deps/v8/src/builtins/builtins.cc190
-rw-r--r--deps/v8/src/builtins/builtins.h69
-rw-r--r--deps/v8/src/builtins/cast.tq8
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc7
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h6
-rw-r--r--deps/v8/src/builtins/conversion.tq25
-rw-r--r--deps/v8/src/builtins/convert.tq13
-rw-r--r--deps/v8/src/builtins/data-view.tq27
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc219
-rw-r--r--deps/v8/src/builtins/ic-callable.tq2
-rw-r--r--deps/v8/src/builtins/iterator-from.tq182
-rw-r--r--deps/v8/src/builtins/iterator-helpers.tq456
-rw-r--r--deps/v8/src/builtins/iterator.tq52
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc657
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc116
-rw-r--r--deps/v8/src/builtins/number.tq138
-rw-r--r--deps/v8/src/builtins/object.tq147
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc321
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq9
-rw-r--r--deps/v8/src/builtins/proxy-set-property.tq2
-rw-r--r--deps/v8/src/builtins/reflect.tq16
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq60
-rw-r--r--deps/v8/src/builtins/riscv/builtins-riscv.cc291
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc332
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc31
-rw-r--r--deps/v8/src/builtins/string-iswellformed.tq48
-rw-r--r--deps/v8/src/builtins/string-towellformed.tq62
-rw-r--r--deps/v8/src/builtins/torque-internal.tq17
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq22
-rw-r--r--deps/v8/src/builtins/typed-array-from.tq8
-rw-r--r--deps/v8/src/builtins/typed-array.tq14
-rw-r--r--deps/v8/src/builtins/wasm.tq210
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc857
-rw-r--r--deps/v8/src/codegen/OWNERS1
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h7
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc17
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h6
-rw-r--r--deps/v8/src/codegen/arm/constants-arm.h28
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h3
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc529
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h158
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h1
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h23
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc213
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h647
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h161
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64-inl.h32
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64.h1
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h3
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h398
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc1055
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h524
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h39
-rw-r--r--deps/v8/src/codegen/assembler.cc17
-rw-r--r--deps/v8/src/codegen/assembler.h21
-rw-r--r--deps/v8/src/codegen/background-merge-task.h7
-rw-r--r--deps/v8/src/codegen/bailout-reason.h37
-rw-r--r--deps/v8/src/codegen/callable.h8
-rw-r--r--deps/v8/src/codegen/code-comments.h2
-rw-r--r--deps/v8/src/codegen/code-desc.h2
-rw-r--r--deps/v8/src/codegen/code-factory.cc61
-rw-r--r--deps/v8/src/codegen/code-factory.h9
-rw-r--r--deps/v8/src/codegen/code-reference.cc11
-rw-r--r--deps/v8/src/codegen/code-reference.h17
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc1403
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h199
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc4
-rw-r--r--deps/v8/src/codegen/compiler.cc398
-rw-r--r--deps/v8/src/codegen/compiler.h58
-rw-r--r--deps/v8/src/codegen/cpu-features.h4
-rw-r--r--deps/v8/src/codegen/external-reference.cc129
-rw-r--r--deps/v8/src/codegen/external-reference.h31
-rw-r--r--deps/v8/src/codegen/handler-table.cc8
-rw-r--r--deps/v8/src/codegen/handler-table.h15
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h14
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc20
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h20
-rw-r--r--deps/v8/src/codegen/ia32/constants-ia32.h11
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h17
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc442
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h106
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h4
-rw-r--r--deps/v8/src/codegen/interface-descriptors-inl.h28
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc2
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h124
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64-inl.h65
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.cc69
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.h47
-rw-r--r--deps/v8/src/codegen/loong64/constants-loong64.h17
-rw-r--r--deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h3
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc1019
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h250
-rw-r--r--deps/v8/src/codegen/loong64/register-loong64.h21
-rw-r--r--deps/v8/src/codegen/machine-type.h2
-rw-r--r--deps/v8/src/codegen/macro-assembler-base.cc (renamed from deps/v8/src/codegen/turbo-assembler.cc)33
-rw-r--r--deps/v8/src/codegen/macro-assembler-base.h (renamed from deps/v8/src/codegen/turbo-assembler.h)39
-rw-r--r--deps/v8/src/codegen/macro-assembler.h24
-rw-r--r--deps/v8/src/codegen/maglev-safepoint-table.cc14
-rw-r--r--deps/v8/src/codegen/maglev-safepoint-table.h16
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64-inl.h6
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc22
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h16
-rw-r--r--deps/v8/src/codegen/mips64/constants-mips64.h17
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h3
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc908
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h230
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h1
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc10
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h3
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h10
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc16
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h6
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h73
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h3
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc1541
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h259
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h7
-rw-r--r--deps/v8/src/codegen/register-base.h9
-rw-r--r--deps/v8/src/codegen/reloc-info.cc101
-rw-r--r--deps/v8/src/codegen/reloc-info.h45
-rw-r--r--deps/v8/src/codegen/riscv/assembler-riscv-inl.h28
-rw-r--r--deps/v8/src/codegen/riscv/assembler-riscv.cc498
-rw-r--r--deps/v8/src/codegen/riscv/assembler-riscv.h40
-rw-r--r--deps/v8/src/codegen/riscv/base-assembler-riscv.h2
-rw-r--r--deps/v8/src/codegen/riscv/base-constants-riscv.h58
-rw-r--r--deps/v8/src/codegen/riscv/base-riscv-i.cc8
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-a.h71
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-c.h90
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-d.h97
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-f.h86
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-i.h124
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-m.h46
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-v.h1027
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-zicsr.h16
-rw-r--r--deps/v8/src/codegen/riscv/constant-riscv-zifencei.h4
-rw-r--r--deps/v8/src/codegen/riscv/extension-riscv-v.cc41
-rw-r--r--deps/v8/src/codegen/riscv/extension-riscv-v.h26
-rw-r--r--deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h3
-rw-r--r--deps/v8/src/codegen/riscv/macro-assembler-riscv.cc1116
-rw-r--r--deps/v8/src/codegen/riscv/macro-assembler-riscv.h261
-rw-r--r--deps/v8/src/codegen/riscv/register-riscv.h7
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h10
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc12
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h9
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.h71
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h3
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc1298
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h134
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h1
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc25
-rw-r--r--deps/v8/src/codegen/safepoint-table.h18
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc379
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h48
-rw-r--r--deps/v8/src/codegen/signature.h37
-rw-r--r--deps/v8/src/codegen/source-position-table.cc3
-rw-r--r--deps/v8/src/codegen/source-position-table.h8
-rw-r--r--deps/v8/src/codegen/source-position.cc31
-rw-r--r--deps/v8/src/codegen/source-position.h21
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h73
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc202
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h211
-rw-r--r--deps/v8/src/codegen/x64/constants-x64.h13
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h3
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc1163
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h261
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h17
-rw-r--r--deps/v8/src/common/assert-scope.cc16
-rw-r--r--deps/v8/src/common/assert-scope.h57
-rw-r--r--deps/v8/src/common/code-memory-access-inl.h13
-rw-r--r--deps/v8/src/common/globals.h161
-rw-r--r--deps/v8/src/common/message-template.h25
-rw-r--r--deps/v8/src/common/operation.h2
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h165
-rw-r--r--deps/v8/src/common/ptr-compr.cc19
-rw-r--r--deps/v8/src/common/ptr-compr.h73
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc7
-rw-r--r--deps/v8/src/compiler/DEPS1
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc66
-rw-r--r--deps/v8/src/compiler/access-builder.h15
-rw-r--r--deps/v8/src/compiler/access-info.cc177
-rw-r--r--deps/v8/src/compiler/access-info.h90
-rw-r--r--deps/v8/src/compiler/all-nodes.cc14
-rw-r--r--deps/v8/src/compiler/all-nodes.h8
-rw-r--r--deps/v8/src/compiler/allocation-builder.h7
-rw-r--r--deps/v8/src/compiler/backend/OWNERS2
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc388
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h6
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc6
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc58
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc721
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h10
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc9
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc217
-rw-r--r--deps/v8/src/compiler/backend/bitcast-elider.cc57
-rw-r--r--deps/v8/src/compiler/backend/bitcast-elider.h9
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h28
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc164
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h24
-rw-r--r--deps/v8/src/compiler/backend/frame-elider.cc12
-rw-r--r--deps/v8/src/compiler/backend/gap-resolver.cc286
-rw-r--r--deps/v8/src/compiler/backend/gap-resolver.h36
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc253
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc55
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h129
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.h2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc179
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h13
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc50
-rw-r--r--deps/v8/src/compiler/backend/instruction.h54
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc10
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc408
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h742
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc280
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc686
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h5
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc63
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.cc35
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc826
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h10
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc5
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc33
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc6
-rw-r--r--deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc339
-rw-r--r--deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h3
-rw-r--r--deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc5
-rw-r--r--deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h69
-rw-r--r--deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc23
-rw-r--r--deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc31
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc219
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h8
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc5
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc32
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc2503
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h168
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc161
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc692
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc11
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc88
-rw-r--r--deps/v8/src/compiler/branch-elimination.h1
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc71
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc59
-rw-r--r--deps/v8/src/compiler/code-assembler.cc12
-rw-r--r--deps/v8/src/compiler/code-assembler.h30
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc41
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h11
-rw-r--r--deps/v8/src/compiler/common-operator.cc23
-rw-r--r--deps/v8/src/compiler/common-operator.h7
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc262
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h9
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc9
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc10
-rw-r--r--deps/v8/src/compiler/decompression-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc1703
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc16
-rw-r--r--deps/v8/src/compiler/fast-api-calls.cc12
-rw-r--r--deps/v8/src/compiler/frame.cc1
-rw-r--r--deps/v8/src/compiler/globals.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc56
-rw-r--r--deps/v8/src/compiler/graph-assembler.h72
-rw-r--r--deps/v8/src/compiler/graph-reducer.h6
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc31
-rw-r--r--deps/v8/src/compiler/graph.cc8
-rw-r--r--deps/v8/src/compiler/graph.h8
-rw-r--r--deps/v8/src/compiler/heap-refs.cc762
-rw-r--r--deps/v8/src/compiler/heap-refs.h465
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc178
-rw-r--r--deps/v8/src/compiler/int64-lowering.h33
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc465
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h7
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc70
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc357
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h8
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc111
-rw-r--r--deps/v8/src/compiler/js-graph.cc41
-rw-r--r--deps/v8/src/compiler/js-graph.h11
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc118
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h90
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc80
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h15
-rw-r--r--deps/v8/src/compiler/js-inlining.cc188
-rw-r--r--deps/v8/src/compiler/js-inlining.h12
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc19
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc1466
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h20
-rw-r--r--deps/v8/src/compiler/js-operator.cc46
-rw-r--r--deps/v8/src/compiler/js-operator.h100
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc77
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc119
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h2
-rw-r--r--deps/v8/src/compiler/late-escape-analysis.cc1
-rw-r--r--deps/v8/src/compiler/linear-scheduler.cc4
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/load-elimination.cc1
-rw-r--r--deps/v8/src/compiler/load-elimination.h2
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc57
-rw-r--r--deps/v8/src/compiler/loop-analysis.h6
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc10
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc53
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.h2
-rw-r--r--deps/v8/src/compiler/machine-graph.cc6
-rw-r--r--deps/v8/src/compiler/machine-graph.h3
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc483
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h28
-rw-r--r--deps/v8/src/compiler/machine-operator.cc109
-rw-r--r--deps/v8/src/compiler/machine-operator.h30
-rw-r--r--deps/v8/src/compiler/map-inference.cc3
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc109
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h2
-rw-r--r--deps/v8/src/compiler/node-matchers.h39
-rw-r--r--deps/v8/src/compiler/node-properties.cc82
-rw-r--r--deps/v8/src/compiler/node-properties.h10
-rw-r--r--deps/v8/src/compiler/opcodes.h548
-rw-r--r--deps/v8/src/compiler/operation-typer.cc142
-rw-r--r--deps/v8/src/compiler/operation-typer.h5
-rw-r--r--deps/v8/src/compiler/operator-properties.cc5
-rw-r--r--deps/v8/src/compiler/phase.h40
-rw-r--r--deps/v8/src/compiler/pipeline.cc740
-rw-r--r--deps/v8/src/compiler/pipeline.h20
-rw-r--r--deps/v8/src/compiler/processed-feedback.h14
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc26
-rw-r--r--deps/v8/src/compiler/property-access-builder.h10
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h8
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc84
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h8
-rw-r--r--deps/v8/src/compiler/representation-change.cc45
-rw-r--r--deps/v8/src/compiler/revectorizer.cc964
-rw-r--r--deps/v8/src/compiler/revectorizer.h212
-rw-r--r--deps/v8/src/compiler/schedule.cc18
-rw-r--r--deps/v8/src/compiler/scheduler.cc41
-rw-r--r--deps/v8/src/compiler/simplified-lowering-verifier.cc6
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc283
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc588
-rw-r--r--deps/v8/src/compiler/simplified-operator.h111
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc13
-rw-r--r--deps/v8/src/compiler/string-builder-optimizer.cc1193
-rw-r--r--deps/v8/src/compiler/string-builder-optimizer.h378
-rw-r--r--deps/v8/src/compiler/turbofan-disabled.cc25
-rw-r--r--deps/v8/src/compiler/turbofan-enabled.cc27
-rw-r--r--deps/v8/src/compiler/turbofan.h39
-rw-r--r--deps/v8/src/compiler/turboshaft/assembler.cc16
-rw-r--r--deps/v8/src/compiler/turboshaft/assembler.h2529
-rw-r--r--deps/v8/src/compiler/turboshaft/assert-types-reducer.h153
-rw-r--r--deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h476
-rw-r--r--deps/v8/src/compiler/turboshaft/build-graph-phase.cc31
-rw-r--r--deps/v8/src/compiler/turboshaft/build-graph-phase.h23
-rw-r--r--deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h144
-rw-r--r--deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.cc19
-rw-r--r--deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/dead-code-elimination-reducer.h465
-rw-r--r--deps/v8/src/compiler/turboshaft/decompression-optimization-phase.cc16
-rw-r--r--deps/v8/src/compiler/turboshaft/decompression-optimization-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/decompression-optimization.cc13
-rw-r--r--deps/v8/src/compiler/turboshaft/define-assembler-macros.inc69
-rw-r--r--deps/v8/src/compiler/turboshaft/deopt-data.h11
-rw-r--r--deps/v8/src/compiler/turboshaft/fast-hash.h9
-rw-r--r--deps/v8/src/compiler/turboshaft/graph-builder.cc1254
-rw-r--r--deps/v8/src/compiler/turboshaft/graph-builder.h6
-rw-r--r--deps/v8/src/compiler/turboshaft/graph-visualizer.cc24
-rw-r--r--deps/v8/src/compiler/turboshaft/graph-visualizer.h3
-rw-r--r--deps/v8/src/compiler/turboshaft/graph.cc24
-rw-r--r--deps/v8/src/compiler/turboshaft/graph.h325
-rw-r--r--deps/v8/src/compiler/turboshaft/index.h358
-rw-r--r--deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.cc101
-rw-r--r--deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.h67
-rw-r--r--deps/v8/src/compiler/turboshaft/late-optimization-phase.cc26
-rw-r--r--deps/v8/src/compiler/turboshaft/late-optimization-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/layered-hash-map.h194
-rw-r--r--deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc21
-rw-r--r--deps/v8/src/compiler/turboshaft/machine-lowering-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/machine-lowering-reducer.h1811
-rw-r--r--deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h150
-rw-r--r--deps/v8/src/compiler/turboshaft/memory-optimization-reducer.cc166
-rw-r--r--deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h269
-rw-r--r--deps/v8/src/compiler/turboshaft/operation-matching.h25
-rw-r--r--deps/v8/src/compiler/turboshaft/operations.cc449
-rw-r--r--deps/v8/src/compiler/turboshaft/operations.h1903
-rw-r--r--deps/v8/src/compiler/turboshaft/optimization-phase.h678
-rw-r--r--deps/v8/src/compiler/turboshaft/optimize-phase.cc30
-rw-r--r--deps/v8/src/compiler/turboshaft/optimize-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/phase.cc82
-rw-r--r--deps/v8/src/compiler/turboshaft/phase.h81
-rw-r--r--deps/v8/src/compiler/turboshaft/recreate-schedule-phase.cc18
-rw-r--r--deps/v8/src/compiler/turboshaft/recreate-schedule-phase.h22
-rw-r--r--deps/v8/src/compiler/turboshaft/recreate-schedule.cc176
-rw-r--r--deps/v8/src/compiler/turboshaft/recreate-schedule.h2
-rw-r--r--deps/v8/src/compiler/turboshaft/reducer-traits.h81
-rw-r--r--deps/v8/src/compiler/turboshaft/representations.h50
-rw-r--r--deps/v8/src/compiler/turboshaft/runtime-call-descriptors.h104
-rw-r--r--deps/v8/src/compiler/turboshaft/select-lowering-reducer.h21
-rw-r--r--deps/v8/src/compiler/turboshaft/sidetable.h32
-rw-r--r--deps/v8/src/compiler/turboshaft/snapshot-table.h190
-rw-r--r--deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.cc16
-rw-r--r--deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/tag-untag-lowering-reducer.h64
-rw-r--r--deps/v8/src/compiler/turboshaft/tracing.h47
-rw-r--r--deps/v8/src/compiler/turboshaft/type-assertions-phase.cc31
-rw-r--r--deps/v8/src/compiler/turboshaft/type-assertions-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/type-inference-analysis.h566
-rw-r--r--deps/v8/src/compiler/turboshaft/type-inference-reducer.h557
-rw-r--r--deps/v8/src/compiler/turboshaft/type-parser.cc33
-rw-r--r--deps/v8/src/compiler/turboshaft/type-parser.h124
-rw-r--r--deps/v8/src/compiler/turboshaft/typed-optimizations-phase.cc30
-rw-r--r--deps/v8/src/compiler/turboshaft/typed-optimizations-phase.h20
-rw-r--r--deps/v8/src/compiler/turboshaft/typed-optimizations-reducer.h129
-rw-r--r--deps/v8/src/compiler/turboshaft/typer.cc99
-rw-r--r--deps/v8/src/compiler/turboshaft/typer.h1594
-rw-r--r--deps/v8/src/compiler/turboshaft/types.cc715
-rw-r--r--deps/v8/src/compiler/turboshaft/types.h919
-rw-r--r--deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc40
-rw-r--r--deps/v8/src/compiler/turboshaft/uniform-reducer-adapter.h160
-rw-r--r--deps/v8/src/compiler/turboshaft/utils.h19
-rw-r--r--deps/v8/src/compiler/turboshaft/value-numbering-reducer.h16
-rw-r--r--deps/v8/src/compiler/turboshaft/variable-reducer.h308
-rw-r--r--deps/v8/src/compiler/type-cache.h6
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc48
-rw-r--r--deps/v8/src/compiler/typer.cc63
-rw-r--r--deps/v8/src/compiler/types.cc34
-rw-r--r--deps/v8/src/compiler/types.h21
-rw-r--r--deps/v8/src/compiler/use-info.h8
-rw-r--r--deps/v8/src/compiler/verifier.cc73
-rw-r--r--deps/v8/src/compiler/wasm-call-descriptors.cc57
-rw-r--r--deps/v8/src/compiler/wasm-call-descriptors.h67
-rw-r--r--deps/v8/src/compiler/wasm-compiler-definitions.h8
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1997
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h203
-rw-r--r--deps/v8/src/compiler/wasm-gc-lowering.cc750
-rw-r--r--deps/v8/src/compiler/wasm-gc-lowering.h25
-rw-r--r--deps/v8/src/compiler/wasm-gc-operator-reducer.cc129
-rw-r--r--deps/v8/src/compiler/wasm-gc-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/wasm-graph-assembler.cc137
-rw-r--r--deps/v8/src/compiler/wasm-graph-assembler.h45
-rw-r--r--deps/v8/src/compiler/wasm-inlining-into-js.cc346
-rw-r--r--deps/v8/src/compiler/wasm-inlining-into-js.h39
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc233
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h37
-rw-r--r--deps/v8/src/compiler/wasm-load-elimination.cc526
-rw-r--r--deps/v8/src/compiler/wasm-load-elimination.h155
-rw-r--r--deps/v8/src/compiler/wasm-typer.cc171
-rw-r--r--deps/v8/src/compiler/wasm-typer.h1
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc31
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.h4
-rw-r--r--deps/v8/src/d8/d8-console.cc81
-rw-r--r--deps/v8/src/d8/d8-console.h13
-rw-r--r--deps/v8/src/d8/d8-test.cc176
-rw-r--r--deps/v8/src/d8/d8.cc913
-rw-r--r--deps/v8/src/d8/d8.h44
-rw-r--r--deps/v8/src/debug/debug-coverage.cc8
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc125
-rw-r--r--deps/v8/src/debug/debug-frames.cc3
-rw-r--r--deps/v8/src/debug/debug-interface.cc167
-rw-r--r--deps/v8/src/debug/debug-interface.h48
-rw-r--r--deps/v8/src/debug/debug-scopes.cc27
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc21
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.h3
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.cc4
-rw-r--r--deps/v8/src/debug/debug.cc159
-rw-r--r--deps/v8/src/debug/debug.h6
-rw-r--r--deps/v8/src/debug/interface-types.h27
-rw-r--r--deps/v8/src/debug/liveedit.cc10
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.cc2
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc10
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h2
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h2
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc325
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h35
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc359
-rw-r--r--deps/v8/src/deoptimizer/translated-state.h2
-rw-r--r--deps/v8/src/deoptimizer/translation-array.cc474
-rw-r--r--deps/v8/src/deoptimizer/translation-array.h116
-rw-r--r--deps/v8/src/deoptimizer/translation-opcode.h24
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc266
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc26
-rw-r--r--deps/v8/src/diagnostics/etw-jit-win.cc1
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc4
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.h4
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc226
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc234
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc102
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h10
-rw-r--r--deps/v8/src/diagnostics/riscv/disasm-riscv.cc61
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc2
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc5
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.cc6
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h2
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc4
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.cc10
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h2
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc359
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h8
-rw-r--r--deps/v8/src/execution/execution.cc10
-rw-r--r--deps/v8/src/execution/execution.h2
-rw-r--r--deps/v8/src/execution/frames-inl.h90
-rw-r--r--deps/v8/src/execution/frames.cc975
-rw-r--r--deps/v8/src/execution/frames.h190
-rw-r--r--deps/v8/src/execution/futex-emulation.cc6
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.cc6
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.h2
-rw-r--r--deps/v8/src/execution/interrupts-scope.cc9
-rw-r--r--deps/v8/src/execution/interrupts-scope.h38
-rw-r--r--deps/v8/src/execution/isolate-data.h64
-rw-r--r--deps/v8/src/execution/isolate-inl.h6
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h48
-rw-r--r--deps/v8/src/execution/isolate.cc897
-rw-r--r--deps/v8/src/execution/isolate.h187
-rw-r--r--deps/v8/src/execution/local-isolate.h7
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.cc6
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.h6
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.cc104
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.h4
-rw-r--r--deps/v8/src/execution/messages.cc24
-rw-r--r--deps/v8/src/execution/messages.h10
-rw-r--r--deps/v8/src/execution/microtask-queue.cc6
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.cc6
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.h2
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc4
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.cc6
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h13
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc29
-rw-r--r--deps/v8/src/execution/protectors.h9
-rw-r--r--deps/v8/src/execution/riscv/frame-constants-riscv.cc6
-rw-r--r--deps/v8/src/execution/riscv/frame-constants-riscv.h2
-rw-r--r--deps/v8/src/execution/riscv/simulator-riscv.cc85
-rw-r--r--deps/v8/src/execution/riscv/simulator-riscv.h3
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.cc6
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h8
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc4
-rw-r--r--deps/v8/src/execution/simulator.h5
-rw-r--r--deps/v8/src/execution/stack-guard.cc6
-rw-r--r--deps/v8/src/execution/stack-guard.h6
-rw-r--r--deps/v8/src/execution/thread-local-top.cc2
-rw-r--r--deps/v8/src/execution/thread-local-top.h6
-rw-r--r--deps/v8/src/execution/tiering-manager.cc136
-rw-r--r--deps/v8/src/execution/tiering-manager.h9
-rw-r--r--deps/v8/src/execution/vm-state-inl.h7
-rw-r--r--deps/v8/src/execution/vm-state.h18
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.cc8
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h8
-rw-r--r--deps/v8/src/extensions/gc-extension.cc3
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc3
-rw-r--r--deps/v8/src/flags/flag-definitions.h441
-rw-r--r--deps/v8/src/flags/flags.cc88
-rw-r--r--deps/v8/src/flags/flags.h1
-rw-r--r--deps/v8/src/handles/global-handles-inl.h8
-rw-r--r--deps/v8/src/handles/global-handles.cc112
-rw-r--r--deps/v8/src/handles/global-handles.h29
-rw-r--r--deps/v8/src/handles/handles.cc8
-rw-r--r--deps/v8/src/handles/shared-object-conveyor-handles.cc2
-rw-r--r--deps/v8/src/handles/traced-handles.cc252
-rw-r--r--deps/v8/src/handles/traced-handles.h25
-rw-r--r--deps/v8/src/heap/OWNERS1
-rw-r--r--deps/v8/src/heap/allocation-observer.cc16
-rw-r--r--deps/v8/src/heap/allocation-observer.h30
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc73
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.h18
-rw-r--r--deps/v8/src/heap/base-space.h7
-rw-r--r--deps/v8/src/heap/base/asm/arm/push_registers_asm.cc39
-rw-r--r--deps/v8/src/heap/base/asm/arm/save_registers_asm.cc36
-rw-r--r--deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc62
-rw-r--r--deps/v8/src/heap/base/asm/arm64/push_registers_masm.S32
-rw-r--r--deps/v8/src/heap/base/asm/arm64/save_registers_asm.cc50
-rw-r--r--deps/v8/src/heap/base/asm/arm64/save_registers_masm.S24
-rw-r--r--deps/v8/src/heap/base/asm/ia32/push_registers_asm.cc53
-rw-r--r--deps/v8/src/heap/base/asm/ia32/push_registers_masm.asm48
-rw-r--r--deps/v8/src/heap/base/asm/ia32/save_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/base/asm/ia32/save_registers_masm.asm36
-rw-r--r--deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/base/asm/loong64/save_registers_asm.cc40
-rw-r--r--deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc49
-rw-r--r--deps/v8/src/heap/base/asm/mips64/save_registers_asm.cc41
-rw-r--r--deps/v8/src/heap/base/asm/ppc/push_registers_asm.cc97
-rw-r--r--deps/v8/src/heap/base/asm/ppc/save_registers_asm.cc110
-rw-r--r--deps/v8/src/heap/base/asm/riscv/push_registers_asm.cc93
-rw-r--r--deps/v8/src/heap/base/asm/riscv/save_registers_asm.cc68
-rw-r--r--deps/v8/src/heap/base/asm/s390/push_registers_asm.cc37
-rw-r--r--deps/v8/src/heap/base/asm/s390/save_registers_asm.cc32
-rw-r--r--deps/v8/src/heap/base/asm/x64/push_registers_asm.cc106
-rw-r--r--deps/v8/src/heap/base/asm/x64/push_registers_masm.asm57
-rw-r--r--deps/v8/src/heap/base/asm/x64/save_registers_asm.cc94
-rw-r--r--deps/v8/src/heap/base/asm/x64/save_registers_masm.asm43
-rw-r--r--deps/v8/src/heap/base/basic-slot-set.h5
-rw-r--r--deps/v8/src/heap/base/stack.cc188
-rw-r--r--deps/v8/src/heap/base/stack.h122
-rw-r--r--deps/v8/src/heap/base/worklist.cc13
-rw-r--r--deps/v8/src/heap/base/worklist.h22
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc21
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h21
-rw-r--r--deps/v8/src/heap/code-range.cc270
-rw-r--r--deps/v8/src/heap/code-range.h64
-rw-r--r--deps/v8/src/heap/code-stats.cc15
-rw-r--r--deps/v8/src/heap/combined-heap.cc3
-rw-r--r--deps/v8/src/heap/combined-heap.h1
-rw-r--r--deps/v8/src/heap/concurrent-allocator-inl.h1
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc24
-rw-r--r--deps/v8/src/heap/concurrent-allocator.h9
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc570
-rw-r--r--deps/v8/src/heap/conservative-stack-visitor.cc150
-rw-r--r--deps/v8/src/heap/conservative-stack-visitor.h20
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc318
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h65
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h26
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-marking-state.h4
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc17
-rw-r--r--deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.cc36
-rw-r--r--deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.h54
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h35
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc8
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h5
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc26
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h29
-rw-r--r--deps/v8/src/heap/cppgc-js/wrappable-info-inl.h50
-rw-r--r--deps/v8/src/heap/cppgc-js/wrappable-info.h34
-rw-r--r--deps/v8/src/heap/cppgc/DEPS5
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc14
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc32
-rw-r--r--deps/v8/src/heap/cppgc/globals.h2
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc44
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h13
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc3
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc11
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc31
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.cc47
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h113
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc6
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h2
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.cc3
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h18
-rw-r--r--deps/v8/src/heap/cppgc/member-storage.cc12
-rw-r--r--deps/v8/src/heap/cppgc/member-storage.h5
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc33
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h43
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h32
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc2
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc10
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h9
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc59
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc49
-rw-r--r--deps/v8/src/heap/embedder-tracing-inl.h46
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc211
-rw-r--r--deps/v8/src/heap/embedder-tracing.h240
-rw-r--r--deps/v8/src/heap/evacuation-verifier-inl.h6
-rw-r--r--deps/v8/src/heap/evacuation-verifier.cc26
-rw-r--r--deps/v8/src/heap/evacuation-verifier.h10
-rw-r--r--deps/v8/src/heap/factory-base-inl.h2
-rw-r--r--deps/v8/src/heap/factory-base.cc118
-rw-r--r--deps/v8/src/heap/factory-base.h36
-rw-r--r--deps/v8/src/heap/factory.cc616
-rw-r--r--deps/v8/src/heap/factory.h73
-rw-r--r--deps/v8/src/heap/free-list.h1
-rw-r--r--deps/v8/src/heap/gc-tracer.cc106
-rw-r--r--deps/v8/src/heap/gc-tracer.h6
-rw-r--r--deps/v8/src/heap/global-handle-marking-visitor.h36
-rw-r--r--deps/v8/src/heap/heap-allocator-inl.h12
-rw-r--r--deps/v8/src/heap/heap-allocator.cc7
-rw-r--r--deps/v8/src/heap/heap-inl.h60
-rw-r--r--deps/v8/src/heap/heap-verifier.cc479
-rw-r--r--deps/v8/src/heap/heap-verifier.h24
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h172
-rw-r--r--deps/v8/src/heap/heap-write-barrier.cc117
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h55
-rw-r--r--deps/v8/src/heap/heap.cc1695
-rw-r--r--deps/v8/src/heap/heap.h483
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h22
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc1
-rw-r--r--deps/v8/src/heap/incremental-marking.cc274
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h5
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc5
-rw-r--r--deps/v8/src/heap/invalidated-slots.h1
-rw-r--r--deps/v8/src/heap/large-spaces.cc149
-rw-r--r--deps/v8/src/heap/large-spaces.h7
-rw-r--r--deps/v8/src/heap/linear-allocation-area.h9
-rw-r--r--deps/v8/src/heap/local-heap.cc62
-rw-r--r--deps/v8/src/heap/local-heap.h8
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h113
-rw-r--r--deps/v8/src/heap/mark-compact.cc2603
-rw-r--r--deps/v8/src/heap/mark-compact.h204
-rw-r--r--deps/v8/src/heap/marking-barrier-inl.h90
-rw-r--r--deps/v8/src/heap/marking-barrier.cc237
-rw-r--r--deps/v8/src/heap/marking-barrier.h38
-rw-r--r--deps/v8/src/heap/marking-state-inl.h34
-rw-r--r--deps/v8/src/heap/marking-state.h22
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h307
-rw-r--r--deps/v8/src/heap/marking-visitor.h76
-rw-r--r--deps/v8/src/heap/marking-worklist-inl.h11
-rw-r--r--deps/v8/src/heap/marking-worklist.cc9
-rw-r--r--deps/v8/src/heap/marking-worklist.h10
-rw-r--r--deps/v8/src/heap/marking.h32
-rw-r--r--deps/v8/src/heap/memory-allocator.cc124
-rw-r--r--deps/v8/src/heap/memory-allocator.h51
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.cc26
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h24
-rw-r--r--deps/v8/src/heap/memory-chunk.cc78
-rw-r--r--deps/v8/src/heap/memory-chunk.h31
-rw-r--r--deps/v8/src/heap/memory-reducer.cc169
-rw-r--r--deps/v8/src/heap/memory-reducer.h73
-rw-r--r--deps/v8/src/heap/minor-gc-job.cc74
-rw-r--r--deps/v8/src/heap/minor-gc-job.h (renamed from deps/v8/src/heap/scavenge-job.h)17
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h7
-rw-r--r--deps/v8/src/heap/new-spaces.cc257
-rw-r--r--deps/v8/src/heap/new-spaces.h125
-rw-r--r--deps/v8/src/heap/object-lock.h50
-rw-r--r--deps/v8/src/heap/object-start-bitmap-inl.h168
-rw-r--r--deps/v8/src/heap/object-start-bitmap.h109
-rw-r--r--deps/v8/src/heap/object-stats.cc52
-rw-r--r--deps/v8/src/heap/object-stats.h4
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h117
-rw-r--r--deps/v8/src/heap/objects-visiting.cc36
-rw-r--r--deps/v8/src/heap/objects-visiting.h48
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h6
-rw-r--r--deps/v8/src/heap/paged-spaces.cc287
-rw-r--r--deps/v8/src/heap/paged-spaces.h41
-rw-r--r--deps/v8/src/heap/parked-scope.h26
-rw-r--r--deps/v8/src/heap/pretenuring-handler-inl.h11
-rw-r--r--deps/v8/src/heap/pretenuring-handler.cc63
-rw-r--r--deps/v8/src/heap/pretenuring-handler.h12
-rw-r--r--deps/v8/src/heap/read-only-heap-inl.h26
-rw-r--r--deps/v8/src/heap/read-only-heap.cc18
-rw-r--r--deps/v8/src/heap/read-only-heap.h18
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc127
-rw-r--r--deps/v8/src/heap/read-only-spaces.h14
-rw-r--r--deps/v8/src/heap/reference-summarizer.cc8
-rw-r--r--deps/v8/src/heap/remembered-set-inl.h28
-rw-r--r--deps/v8/src/heap/remembered-set.h31
-rw-r--r--deps/v8/src/heap/safepoint.cc74
-rw-r--r--deps/v8/src/heap/safepoint.h24
-rw-r--r--deps/v8/src/heap/scavenge-job.cc67
-rw-r--r--deps/v8/src/heap/scavenger-inl.h80
-rw-r--r--deps/v8/src/heap/scavenger.cc108
-rw-r--r--deps/v8/src/heap/scavenger.h9
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc814
-rw-r--r--deps/v8/src/heap/slot-set.h16
-rw-r--r--deps/v8/src/heap/spaces.cc98
-rw-r--r--deps/v8/src/heap/spaces.h29
-rw-r--r--deps/v8/src/heap/stress-marking-observer.cc22
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h26
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc7
-rw-r--r--deps/v8/src/heap/sweeper.cc859
-rw-r--r--deps/v8/src/heap/sweeper.h126
-rw-r--r--deps/v8/src/heap/traced-handles-marking-visitor.cc (renamed from deps/v8/src/heap/global-handle-marking-visitor.cc)22
-rw-r--r--deps/v8/src/heap/traced-handles-marking-visitor.h40
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc410
-rw-r--r--deps/v8/src/ic/accessor-assembler.h33
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc253
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h8
-rw-r--r--deps/v8/src/ic/handler-configuration.cc22
-rw-r--r--deps/v8/src/ic/handler-configuration.h12
-rw-r--r--deps/v8/src/ic/ic-inl.h2
-rw-r--r--deps/v8/src/ic/ic.cc65
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc29
-rw-r--r--deps/v8/src/ic/stub-cache.cc23
-rw-r--r--deps/v8/src/ic/stub-cache.h9
-rw-r--r--deps/v8/src/ic/unary-op-assembler.cc3
-rw-r--r--deps/v8/src/init/bootstrapper.cc223
-rw-r--r--deps/v8/src/init/heap-symbols.h196
-rw-r--r--deps/v8/src/init/isolate-allocator.cc110
-rw-r--r--deps/v8/src/init/isolate-allocator.h14
-rw-r--r--deps/v8/src/init/setup-isolate-deserialize.cc22
-rw-r--r--deps/v8/src/init/setup-isolate-full.cc27
-rw-r--r--deps/v8/src/init/setup-isolate.h12
-rw-r--r--deps/v8/src/init/v8.cc11
-rw-r--r--deps/v8/src/init/v8.h2
-rw-r--r--deps/v8/src/inspector/BUILD.gn4
-rw-r--r--deps/v8/src/inspector/crc32.cc85
-rw-r--r--deps/v8/src/inspector/crc32.h16
-rw-r--r--deps/v8/src/inspector/injected-script.cc2
-rw-r--r--deps/v8/src/inspector/inspected-context.h9
-rw-r--r--deps/v8/src/inspector/string-16.cc18
-rw-r--r--deps/v8/src/inspector/string-16.h1
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc231
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h14
-rw-r--r--deps/v8/src/inspector/v8-debugger-barrier.cc19
-rw-r--r--deps/v8/src/inspector/v8-debugger-barrier.h28
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc251
-rw-r--r--deps/v8/src/inspector/v8-debugger.h24
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc25
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc17
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h9
-rw-r--r--deps/v8/src/inspector/v8-regex.cc4
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc51
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h8
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc18
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h2
-rw-r--r--deps/v8/src/inspector/value-mirror.cc52
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc208
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h27
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.cc4
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h22
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecodes.h31
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc33
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h27
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc162
-rw-r--r--deps/v8/src/interpreter/interpreter.cc16
-rw-r--r--deps/v8/src/interpreter/interpreter.h6
-rw-r--r--deps/v8/src/json/json-parser.cc302
-rw-r--r--deps/v8/src/json/json-parser.h29
-rw-r--r--deps/v8/src/json/json-stringifier.cc41
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc11
-rw-r--r--deps/v8/src/libsampler/sampler.cc17
-rw-r--r--deps/v8/src/logging/code-events.h33
-rw-r--r--deps/v8/src/logging/counters-definitions.h107
-rw-r--r--deps/v8/src/logging/counters.cc6
-rw-r--r--deps/v8/src/logging/counters.h21
-rw-r--r--deps/v8/src/logging/local-logger.cc3
-rw-r--r--deps/v8/src/logging/local-logger.h2
-rw-r--r--deps/v8/src/logging/log-file.cc1
-rw-r--r--deps/v8/src/logging/log.cc274
-rw-r--r--deps/v8/src/logging/log.h21
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h32
-rw-r--r--deps/v8/src/maglev/DEPS6
-rw-r--r--deps/v8/src/maglev/arm64/maglev-assembler-arm64-inl.h864
-rw-r--r--deps/v8/src/maglev/arm64/maglev-assembler-arm64.cc901
-rw-r--r--deps/v8/src/maglev/arm64/maglev-ir-arm64.cc2304
-rw-r--r--deps/v8/src/maglev/maglev-assembler-inl.h219
-rw-r--r--deps/v8/src/maglev/maglev-assembler.cc345
-rw-r--r--deps/v8/src/maglev/maglev-assembler.h394
-rw-r--r--deps/v8/src/maglev/maglev-basic-block.h47
-rw-r--r--deps/v8/src/maglev/maglev-code-gen-state.h43
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.cc779
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.h4
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.cc1
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.cc7
-rw-r--r--deps/v8/src/maglev/maglev-compiler.cc170
-rw-r--r--deps/v8/src/maglev/maglev-compiler.h4
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.cc42
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.h4
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.cc4385
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.h1224
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.cc90
-rw-r--r--deps/v8/src/maglev/maglev-graph-processor.h36
-rw-r--r--deps/v8/src/maglev/maglev-graph-verifier.h303
-rw-r--r--deps/v8/src/maglev/maglev-graph.h59
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.cc485
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.h500
-rw-r--r--deps/v8/src/maglev/maglev-ir-inl.h104
-rw-r--r--deps/v8/src/maglev/maglev-ir.cc6072
-rw-r--r--deps/v8/src/maglev/maglev-ir.h3812
-rw-r--r--deps/v8/src/maglev/maglev-phi-representation-selector.cc589
-rw-r--r--deps/v8/src/maglev/maglev-phi-representation-selector.h179
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.cc671
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.h30
-rw-r--r--deps/v8/src/maglev/maglev-vreg-allocator.h55
-rw-r--r--deps/v8/src/maglev/maglev.cc12
-rw-r--r--deps/v8/src/maglev/maglev.h4
-rw-r--r--deps/v8/src/maglev/x64/maglev-assembler-x64-inl.h717
-rw-r--r--deps/v8/src/maglev/x64/maglev-assembler-x64.cc821
-rw-r--r--deps/v8/src/maglev/x64/maglev-ir-x64.cc2303
-rw-r--r--deps/v8/src/numbers/conversions.cc123
-rw-r--r--deps/v8/src/numbers/conversions.h5
-rw-r--r--deps/v8/src/objects/all-objects-inl.h2
-rw-r--r--deps/v8/src/objects/backing-store.cc22
-rw-r--r--deps/v8/src/objects/backing-store.h4
-rw-r--r--deps/v8/src/objects/bigint.cc180
-rw-r--r--deps/v8/src/objects/bigint.h37
-rw-r--r--deps/v8/src/objects/call-site-info-inl.h21
-rw-r--r--deps/v8/src/objects/call-site-info.cc6
-rw-r--r--deps/v8/src/objects/call-site-info.h2
-rw-r--r--deps/v8/src/objects/code-inl.h1503
-rw-r--r--deps/v8/src/objects/code.cc342
-rw-r--r--deps/v8/src/objects/code.h1222
-rw-r--r--deps/v8/src/objects/code.tq2
-rw-r--r--deps/v8/src/objects/compilation-cache-table.cc4
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h64
-rw-r--r--deps/v8/src/objects/contexts-inl.h20
-rw-r--r--deps/v8/src/objects/contexts.cc24
-rw-r--r--deps/v8/src/objects/contexts.h21
-rw-r--r--deps/v8/src/objects/contexts.tq9
-rw-r--r--deps/v8/src/objects/data-handler.tq11
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h88
-rw-r--r--deps/v8/src/objects/descriptor-array.h102
-rw-r--r--deps/v8/src/objects/descriptor-array.tq5
-rw-r--r--deps/v8/src/objects/dictionary-inl.h26
-rw-r--r--deps/v8/src/objects/dictionary.h52
-rw-r--r--deps/v8/src/objects/elements-kind.h14
-rw-r--r--deps/v8/src/objects/elements.cc9
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h14
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h29
-rw-r--r--deps/v8/src/objects/feedback-vector.cc58
-rw-r--r--deps/v8/src/objects/feedback-vector.h28
-rw-r--r--deps/v8/src/objects/feedback-vector.tq4
-rw-r--r--deps/v8/src/objects/field-index-inl.h4
-rw-r--r--deps/v8/src/objects/field-index.h1
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h6
-rw-r--r--deps/v8/src/objects/fixed-array.h16
-rw-r--r--deps/v8/src/objects/free-space-inl.h32
-rw-r--r--deps/v8/src/objects/free-space.h14
-rw-r--r--deps/v8/src/objects/free-space.tq2
-rw-r--r--deps/v8/src/objects/hash-table-inl.h18
-rw-r--r--deps/v8/src/objects/hash-table.h54
-rw-r--r--deps/v8/src/objects/heap-object.h25
-rw-r--r--deps/v8/src/objects/instance-type-inl.h291
-rw-r--r--deps/v8/src/objects/instance-type.h40
-rw-r--r--deps/v8/src/objects/intl-objects.cc30
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h39
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc22
-rw-r--r--deps/v8/src/objects/js-array-buffer.h35
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq7
-rw-r--r--deps/v8/src/objects/js-array.h4
-rw-r--r--deps/v8/src/objects/js-array.tq27
-rw-r--r--deps/v8/src/objects/js-atomics-synchronization-inl.h1
-rw-r--r--deps/v8/src/objects/js-atomics-synchronization.h11
-rw-r--r--deps/v8/src/objects/js-atomics-synchronization.tq2
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc6
-rw-r--r--deps/v8/src/objects/js-duration-format.cc44
-rw-r--r--deps/v8/src/objects/js-function-inl.h43
-rw-r--r--deps/v8/src/objects/js-function.cc95
-rw-r--r--deps/v8/src/objects/js-function.h21
-rw-r--r--deps/v8/src/objects/js-function.tq3
-rw-r--r--deps/v8/src/objects/js-iterator-helpers-inl.h35
-rw-r--r--deps/v8/src/objects/js-iterator-helpers.h112
-rw-r--r--deps/v8/src/objects/js-iterator-helpers.tq26
-rw-r--r--deps/v8/src/objects/js-locale.cc2
-rw-r--r--deps/v8/src/objects/js-number-format.cc56
-rw-r--r--deps/v8/src/objects/js-objects-inl.h27
-rw-r--r--deps/v8/src/objects/js-objects.cc359
-rw-r--r--deps/v8/src/objects/js-objects.h39
-rw-r--r--deps/v8/src/objects/js-objects.tq9
-rw-r--r--deps/v8/src/objects/js-raw-json-inl.h4
-rw-r--r--deps/v8/src/objects/js-raw-json.cc11
-rw-r--r--deps/v8/src/objects/js-raw-json.h17
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h4
-rw-r--r--deps/v8/src/objects/js-regexp.cc7
-rw-r--r--deps/v8/src/objects/js-regexp.h5
-rw-r--r--deps/v8/src/objects/js-shared-array-inl.h1
-rw-r--r--deps/v8/src/objects/js-shared-array.h16
-rw-r--r--deps/v8/src/objects/js-shared-array.tq2
-rw-r--r--deps/v8/src/objects/js-struct-inl.h2
-rw-r--r--deps/v8/src/objects/js-struct.cc49
-rw-r--r--deps/v8/src/objects/js-struct.h16
-rw-r--r--deps/v8/src/objects/js-struct.tq9
-rw-r--r--deps/v8/src/objects/js-temporal-objects.cc21
-rw-r--r--deps/v8/src/objects/keys.cc46
-rw-r--r--deps/v8/src/objects/lookup-inl.h8
-rw-r--r--deps/v8/src/objects/lookup.cc79
-rw-r--r--deps/v8/src/objects/lookup.h4
-rw-r--r--deps/v8/src/objects/map-inl.h42
-rw-r--r--deps/v8/src/objects/map.cc85
-rw-r--r--deps/v8/src/objects/map.h33
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h2
-rw-r--r--deps/v8/src/objects/module.cc2
-rw-r--r--deps/v8/src/objects/object-list-macros.h46
-rw-r--r--deps/v8/src/objects/object-macros.h11
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h130
-rw-r--r--deps/v8/src/objects/objects-definitions.h50
-rw-r--r--deps/v8/src/objects/objects-inl.h198
-rw-r--r--deps/v8/src/objects/objects.cc582
-rw-r--r--deps/v8/src/objects/objects.h73
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc52
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h21
-rw-r--r--deps/v8/src/objects/property-array-inl.h16
-rw-r--r--deps/v8/src/objects/property-descriptor-object.tq142
-rw-r--r--deps/v8/src/objects/property-descriptor.cc39
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h6
-rw-r--r--deps/v8/src/objects/prototype-info.h2
-rw-r--r--deps/v8/src/objects/scope-info.cc16
-rw-r--r--deps/v8/src/objects/scope-info.h5
-rw-r--r--deps/v8/src/objects/scope-info.tq4
-rw-r--r--deps/v8/src/objects/script-inl.h53
-rw-r--r--deps/v8/src/objects/script.h24
-rw-r--r--deps/v8/src/objects/script.tq12
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h55
-rw-r--r--deps/v8/src/objects/shared-function-info.cc30
-rw-r--r--deps/v8/src/objects/shared-function-info.h26
-rw-r--r--deps/v8/src/objects/shared-function-info.tq7
-rw-r--r--deps/v8/src/objects/simd.cc9
-rw-r--r--deps/v8/src/objects/slots-inl.h62
-rw-r--r--deps/v8/src/objects/source-text-module.cc6
-rw-r--r--deps/v8/src/objects/string-comparator.cc5
-rw-r--r--deps/v8/src/objects/string-forwarding-table-inl.h13
-rw-r--r--deps/v8/src/objects/string-forwarding-table.cc107
-rw-r--r--deps/v8/src/objects/string-forwarding-table.h3
-rw-r--r--deps/v8/src/objects/string-inl.h101
-rw-r--r--deps/v8/src/objects/string-table.cc20
-rw-r--r--deps/v8/src/objects/string.cc141
-rw-r--r--deps/v8/src/objects/string.h46
-rw-r--r--deps/v8/src/objects/string.tq42
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary-inl.h13
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.h4
-rw-r--r--deps/v8/src/objects/synthetic-module.cc3
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h13
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/tagged-impl-inl.h11
-rw-r--r--deps/v8/src/objects/tagged-impl.cc9
-rw-r--r--deps/v8/src/objects/tagged-impl.h16
-rw-r--r--deps/v8/src/objects/tagged-value-inl.h8
-rw-r--r--deps/v8/src/objects/transitions.cc31
-rw-r--r--deps/v8/src/objects/turboshaft-types-inl.h33
-rw-r--r--deps/v8/src/objects/turboshaft-types.h115
-rw-r--r--deps/v8/src/objects/turboshaft-types.tq234
-rw-r--r--deps/v8/src/objects/type-hints.cc2
-rw-r--r--deps/v8/src/objects/type-hints.h1
-rw-r--r--deps/v8/src/objects/value-serializer.cc220
-rw-r--r--deps/v8/src/objects/value-serializer.h14
-rw-r--r--deps/v8/src/objects/visitors.h57
-rw-r--r--deps/v8/src/parsing/parse-info.cc17
-rw-r--r--deps/v8/src/parsing/parse-info.h24
-rw-r--r--deps/v8/src/parsing/parser-base.h66
-rw-r--r--deps/v8/src/parsing/parser.cc52
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc49
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h31
-rw-r--r--deps/v8/src/parsing/preparser.h2
-rw-r--r--deps/v8/src/parsing/scanner-inl.h29
-rw-r--r--deps/v8/src/parsing/scanner.cc3
-rw-r--r--deps/v8/src/parsing/scanner.h8
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h41
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc5
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h30
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc191
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h19
-rw-r--r--deps/v8/src/profiler/profile-generator.cc23
-rw-r--r--deps/v8/src/profiler/profile-generator.h20
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc35
-rw-r--r--deps/v8/src/profiler/profiler-listener.h3
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc9
-rw-r--r--deps/v8/src/profiler/symbolizer.cc9
-rw-r--r--deps/v8/src/profiler/symbolizer.h12
-rw-r--r--deps/v8/src/profiler/tick-sample.cc12
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc19
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h11
-rw-r--r--deps/v8/src/profiler/weak-code-registry.h5
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc192
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h59
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc135
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h64
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc21
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc12
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc145
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h68
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc197
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h59
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc205
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h58
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc164
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h64
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc67
-rw-r--r--deps/v8/src/regexp/regexp-ast.h102
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc279
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc4
-rw-r--r--deps/v8/src/regexp/regexp-error.h4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc12
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h6
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc972
-rw-r--r--deps/v8/src/regexp/regexp.cc44
-rw-r--r--deps/v8/src/regexp/regexp.h1
-rw-r--r--deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.cc164
-rw-r--r--deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.h63
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc166
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h54
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc187
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h101
-rw-r--r--deps/v8/src/roots/roots-inl.h67
-rw-r--r--deps/v8/src/roots/roots.cc30
-rw-r--r--deps/v8/src/roots/roots.h95
-rw-r--r--deps/v8/src/roots/static-roots.h1515
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc19
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc12
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc70
-rw-r--r--deps/v8/src/runtime/runtime-date.cc3
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc13
-rw-r--r--deps/v8/src/runtime/runtime-function.cc2
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc21
-rw-r--r--deps/v8/src/runtime/runtime-object.cc334
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc47
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc2
-rw-r--r--deps/v8/src/runtime/runtime-shadow-realm.cc12
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc105
-rw-r--r--deps/v8/src/runtime/runtime-test-wasm.cc100
-rw-r--r--deps/v8/src/runtime/runtime-test.cc99
-rw-r--r--deps/v8/src/runtime/runtime-trace.cc7
-rw-r--r--deps/v8/src/runtime/runtime-utils.h2
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc161
-rw-r--r--deps/v8/src/runtime/runtime.h66
-rw-r--r--deps/v8/src/sandbox/external-pointer-inl.h87
-rw-r--r--deps/v8/src/sandbox/external-pointer-table-inl.h13
-rw-r--r--deps/v8/src/sandbox/sandbox.cc164
-rw-r--r--deps/v8/src/sandbox/sandbox.h18
-rw-r--r--deps/v8/src/sanitizer/OWNERS3
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc55
-rw-r--r--deps/v8/src/snapshot/code-serializer.h8
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc5
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc28
-rw-r--r--deps/v8/src/snapshot/deserializer.cc879
-rw-r--r--deps/v8/src/snapshot/deserializer.h66
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc95
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h16
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h2
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc5
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc2
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc2
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc2
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc8
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc84
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.h16
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc134
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h25
-rw-r--r--deps/v8/src/snapshot/references.h11
-rw-r--r--deps/v8/src/snapshot/roots-serializer.h2
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h3
-rw-r--r--deps/v8/src/snapshot/serializer-inl.h14
-rw-r--r--deps/v8/src/snapshot/serializer.cc277
-rw-r--r--deps/v8/src/snapshot/serializer.h50
-rw-r--r--deps/v8/src/snapshot/shared-heap-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/shared-heap-serializer.cc6
-rw-r--r--deps/v8/src/snapshot/snapshot.cc4
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc4
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc19
-rw-r--r--deps/v8/src/snapshot/static-roots-gen.cc135
-rw-r--r--deps/v8/src/snapshot/static-roots-gen.h21
-rw-r--r--deps/v8/src/strings/string-builder-inl.h30
-rw-r--r--deps/v8/src/strings/string-builder.cc23
-rw-r--r--deps/v8/src/strings/unicode.cc31
-rw-r--r--deps/v8/src/strings/unicode.h16
-rw-r--r--deps/v8/src/torque/ast.h21
-rw-r--r--deps/v8/src/torque/cc-generator.cc7
-rw-r--r--deps/v8/src/torque/cfg.cc2
-rw-r--r--deps/v8/src/torque/constants.h3
-rw-r--r--deps/v8/src/torque/csa-generator.cc7
-rw-r--r--deps/v8/src/torque/declarable.cc2
-rw-r--r--deps/v8/src/torque/declarable.h12
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc30
-rw-r--r--deps/v8/src/torque/declarations.cc11
-rw-r--r--deps/v8/src/torque/declarations.h3
-rw-r--r--deps/v8/src/torque/earley-parser.h2
-rw-r--r--deps/v8/src/torque/global-context.cc3
-rw-r--r--deps/v8/src/torque/global-context.h6
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc113
-rw-r--r--deps/v8/src/torque/instructions.cc6
-rw-r--r--deps/v8/src/torque/instructions.h10
-rw-r--r--deps/v8/src/torque/kythe-data.cc2
-rw-r--r--deps/v8/src/torque/kythe-data.h4
-rw-r--r--deps/v8/src/torque/ls/globals.h5
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc6
-rw-r--r--deps/v8/src/torque/server-data.cc5
-rw-r--r--deps/v8/src/torque/server-data.h2
-rw-r--r--deps/v8/src/torque/source-positions.cc7
-rw-r--r--deps/v8/src/torque/source-positions.h5
-rw-r--r--deps/v8/src/torque/torque-compiler.h2
-rw-r--r--deps/v8/src/torque/torque-parser.cc30
-rw-r--r--deps/v8/src/torque/type-oracle.cc2
-rw-r--r--deps/v8/src/torque/type-oracle.h4
-rw-r--r--deps/v8/src/torque/types.cc13
-rw-r--r--deps/v8/src/torque/types.h2
-rw-r--r--deps/v8/src/torque/utils.cc4
-rw-r--r--deps/v8/src/torque/utils.h2
-rw-r--r--deps/v8/src/tracing/DEPS3
-rw-r--r--deps/v8/src/tracing/trace-categories.cc3
-rw-r--r--deps/v8/src/tracing/trace-categories.h16
-rw-r--r--deps/v8/src/tracing/trace-event.h7
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc20
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.h14
-rw-r--r--deps/v8/src/trap-handler/OWNERS8
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.cc9
-rw-r--r--deps/v8/src/trap-handler/handler-inside-win.cc5
-rw-r--r--deps/v8/src/trap-handler/handler-outside-simulator.cc22
-rw-r--r--deps/v8/src/trap-handler/trap-handler-simulator.h20
-rw-r--r--deps/v8/src/utils/allocation.cc117
-rw-r--r--deps/v8/src/utils/allocation.h47
-rw-r--r--deps/v8/src/utils/ostreams.h23
-rw-r--r--deps/v8/src/wasm/DEPS4
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h231
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h521
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h189
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h5
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc524
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h241
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc1790
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h57
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h556
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h571
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h772
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h262
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h286
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h240
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h409
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h213
-rw-r--r--deps/v8/src/wasm/c-api.cc47
-rw-r--r--deps/v8/src/wasm/canonical-types.cc51
-rw-r--r--deps/v8/src/wasm/canonical-types.h17
-rw-r--r--deps/v8/src/wasm/code-space-access.cc53
-rw-r--r--deps/v8/src/wasm/code-space-access.h21
-rw-r--r--deps/v8/src/wasm/compilation-environment.h35
-rw-r--r--deps/v8/src/wasm/constant-expression-interface.cc55
-rw-r--r--deps/v8/src/wasm/constant-expression.cc7
-rw-r--r--deps/v8/src/wasm/decoder.h293
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1256
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc24
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h13
-rw-r--r--deps/v8/src/wasm/function-compiler.cc67
-rw-r--r--deps/v8/src/wasm/function-compiler.h51
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc645
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h32
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc126
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h10
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1557
-rw-r--r--deps/v8/src/wasm/module-compiler.h49
-rw-r--r--deps/v8/src/wasm/module-decoder-impl.h873
-rw-r--r--deps/v8/src/wasm/module-decoder.cc369
-rw-r--r--deps/v8/src/wasm/module-decoder.h67
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc663
-rw-r--r--deps/v8/src/wasm/module-instantiate.h89
-rw-r--r--deps/v8/src/wasm/pgo.cc12
-rw-r--r--deps/v8/src/wasm/stacks.cc16
-rw-r--r--deps/v8/src/wasm/stacks.h2
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc193
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h19
-rw-r--r--deps/v8/src/wasm/struct-types.h99
-rw-r--r--deps/v8/src/wasm/sync-streaming-decoder.cc2
-rw-r--r--deps/v8/src/wasm/value-type.cc20
-rw-r--r--deps/v8/src/wasm/value-type.h37
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc719
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h224
-rw-r--r--deps/v8/src/wasm/wasm-constants.h1
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc21
-rw-r--r--deps/v8/src/wasm/wasm-disassembler.cc129
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc215
-rw-r--r--deps/v8/src/wasm/wasm-engine.h53
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc26
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h9
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h72
-rw-r--r--deps/v8/src/wasm/wasm-features.cc12
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc4
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h15
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.cc6
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.h27
-rw-r--r--deps/v8/src/wasm/wasm-js.cc173
-rw-r--r--deps/v8/src/wasm/wasm-js.h14
-rw-r--r--deps/v8/src/wasm/wasm-limits.h10
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc72
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h10
-rw-r--r--deps/v8/src/wasm/wasm-module.cc34
-rw-r--r--deps/v8/src/wasm/wasm-module.h221
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h14
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc688
-rw-r--r--deps/v8/src/wasm/wasm-objects.h62
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq22
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc16
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h28
-rw-r--r--deps/v8/src/wasm/wasm-result.h26
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc134
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h1
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc235
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h28
-rw-r--r--deps/v8/src/wasm/wasm-tier.h4
-rw-r--r--deps/v8/src/wasm/wasm-value.h3
-rw-r--r--deps/v8/src/wasm/well-known-imports.cc64
-rw-r--r--deps/v8/src/wasm/well-known-imports.h82
-rw-r--r--deps/v8/src/web-snapshot/OWNERS5
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc4289
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.h669
-rw-r--r--deps/v8/src/zone/type-stats.cc5
-rw-r--r--deps/v8/src/zone/zone-containers.h566
-rw-r--r--deps/v8/src/zone/zone.cc25
-rw-r--r--deps/v8/src/zone/zone.h25
-rw-r--r--deps/v8/test/BUILD.gn20
-rw-r--r--deps/v8/test/OWNERS1
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status3
-rw-r--r--deps/v8/test/benchmarks/cpp/BUILD.gn13
-rw-r--r--deps/v8/test/benchmarks/cpp/dtoa.cc1069
-rw-r--r--deps/v8/test/cctest/BUILD.gn125
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.h2
-rw-r--r--deps/v8/test/cctest/cctest.cc33
-rw-r--r--deps/v8/test/cctest/cctest.h32
-rw-r--r--deps/v8/test/cctest/cctest.status59
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h10
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc3
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc26
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc19
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc590
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc128
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc29
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc30
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc40
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc94
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc38
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h1
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc4
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h28
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc41
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc97
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc12
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc476
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc32
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc113
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc48
-rw-r--r--deps/v8/test/cctest/heap/test-unmapper.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc13
-rw-r--r--deps/v8/test/cctest/heap/test-write-barrier.cc23
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.cc20
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.h9
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc22
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc65
-rw-r--r--deps/v8/test/cctest/test-api-typed-array.cc5
-rw-r--r--deps/v8/test/cctest/test-api.cc1100
-rw-r--r--deps/v8/test/cctest/test-api.h8
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc86
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc899
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc44
-rw-r--r--deps/v8/test/cctest/test-assembler-loong64.cc111
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc200
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc26
-rw-r--r--deps/v8/test/cctest/test-assembler-riscv32.cc73
-rw-r--r--deps/v8/test/cctest/test-assembler-riscv64.cc123
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc36
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc326
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc143
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc12
-rw-r--r--deps/v8/test/cctest/test-debug.cc403
-rw-r--r--deps/v8/test/cctest/test-descriptor-array.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-regex-helper.cc6
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc14
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc38
-rw-r--r--deps/v8/test/cctest/test-helper-riscv32.cc5
-rw-r--r--deps/v8/test/cctest/test-helper-riscv32.h24
-rw-r--r--deps/v8/test/cctest/test-helper-riscv64.cc5
-rw-r--r--deps/v8/test/cctest/test-helper-riscv64.h24
-rw-r--r--deps/v8/test/cctest/test-js-to-wasm.cc54
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc11
-rw-r--r--deps/v8/test/cctest/test-lockers.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc4
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-loong64.cc64
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc30
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-riscv32.cc22
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-riscv64.cc24
-rw-r--r--deps/v8/test/cctest/test-mementos.cc8
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc163
-rw-r--r--deps/v8/test/cctest/test-ptr-compr-cage.cc42
-rw-r--r--deps/v8/test/cctest/test-regexp.cc263
-rw-r--r--deps/v8/test/cctest/test-serialize.cc161
-rw-r--r--deps/v8/test/cctest/test-shared-strings.cc856
-rw-r--r--deps/v8/test/cctest/test-simple-riscv32.cc12
-rw-r--r--deps/v8/test/cctest/test-simple-riscv64.cc12
-rw-r--r--deps/v8/test/cctest/test-strings.cc48
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc33
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc20
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc183
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc185
-rw-r--r--deps/v8/test/cctest/wasm/test-grow-memory.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc34
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc333
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc76
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc174
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc288
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc298
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc353
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc192
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc37
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc30
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc200
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc96
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc1519
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc1470
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc24
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc69
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-metrics.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc90
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc7
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc15
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc16
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc130
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h56
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.cc206
-rw-r--r--deps/v8/test/common/call-tester.h11
-rw-r--r--deps/v8/test/common/code-assembler-tester.h7
-rw-r--r--deps/v8/test/common/types-fuzz.h7
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc77
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.h2
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h16
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc28
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h17
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js38
-rw-r--r--deps/v8/test/debugger/testcfg.py2
-rw-r--r--deps/v8/test/debugging/testcfg.py6
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc8
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc3
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc38
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc14
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc323
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc272
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h49
-rw-r--r--deps/v8/test/fuzzer/wasm-streaming.cc26
-rw-r--r--deps/v8/test/fuzzer/wasm.cc39
-rw-r--r--deps/v8/test/fuzzer/wasm/regress-1404619.wasmbin0 -> 38 bytes
-rw-r--r--deps/v8/test/fuzzer/wasm_async/regress-1405322.wasmbin0 -> 58 bytes
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-stack-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception-async-gen-expected.txt45
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception-async-gen.js118
-rw-r--r--deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js7
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-expected.txt138
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt126
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-nested-super.js26
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-preview-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt82
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt47
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-nested.js16
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static.js21
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt84
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused.js16
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods.js28
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict-expected.txt55
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict.js12
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-expected.txt231
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module-expected.txt16
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module.js32
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static-expected.txt231
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static.js12
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-super-expected.txt (renamed from deps/v8/test/mjsunit/web-snapshot-helpers.js)0
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member.js12
-rw-r--r--deps/v8/test/inspector/debugger/instrumentation-multiple-sessions-expected.txt64
-rw-r--r--deps/v8/test/inspector/debugger/instrumentation-multiple-sessions.js292
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt5
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-instrumentation-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-instrumentation.js25
-rw-r--r--deps/v8/test/inspector/debugger/restore-breakpoint-expected.txt73
-rw-r--r--deps/v8/test/inspector/debugger/restore-breakpoint.js47
-rw-r--r--deps/v8/test/inspector/debugger/session-stop-expected.txt20
-rw-r--r--deps/v8/test/inspector/debugger/session-stop.js86
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoints-active-expected.txt36
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoints-active.js69
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-es-module-expected.txt9
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-es-module.js37
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-top-frame-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/suspended-generator-scopes.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-anyref-expected.txt19
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-anyref.js40
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt114
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-a-lot.js2
-rw-r--r--deps/v8/test/inspector/inspector-test.cc28
-rw-r--r--deps/v8/test/inspector/isolate-data.cc29
-rw-r--r--deps/v8/test/inspector/isolate-data.h13
-rw-r--r--deps/v8/test/inspector/private-class-member-inspector-test.js195
-rw-r--r--deps/v8/test/inspector/protocol-test.js13
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1220203.js7
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1246896.js2
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1270780-expected.txt22
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1270780.js107
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1401674-expected.txt5
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1401674.js38
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async-expected.txt43
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async.js45
-rw-r--r--deps/v8/test/inspector/runtime/console-spec-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict-expected.txt55
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict.js12
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-private-class-member-expected.txt231
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-private-class-member-static-expected.txt231
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-private-class-member-static.js12
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-private-class-member.js12
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions-expected.txt24
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions.js71
-rw-r--r--deps/v8/test/inspector/runtime/function-scopes.js2
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt8
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-prototype-chain-expected.txt61
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-prototype-chain.js39
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties.js2
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-expected.txt108
-rw-r--r--deps/v8/test/inspector/runtime/remote-object.js19
-rw-r--r--deps/v8/test/inspector/runtime/run-if-waiting-for-debugger-expected.txt12
-rw-r--r--deps/v8/test/inspector/runtime/run-if-waiting-for-debugger.js35
-rw-r--r--deps/v8/test/inspector/runtime/terminate-execution-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/terminate-execution.js14
-rw-r--r--deps/v8/test/inspector/sessions/create-session-expected.txt3
-rw-r--r--deps/v8/test/inspector/task-runner.cc6
-rw-r--r--deps/v8/test/inspector/tasks.cc84
-rw-r--r--deps/v8/test/inspector/tasks.h28
-rw-r--r--deps/v8/test/inspector/testcfg.py10
-rw-r--r--deps/v8/test/inspector/wasm-inspector-test.js6
-rw-r--r--deps/v8/test/intl/assert.js14
-rw-r--r--deps/v8/test/intl/date-format/en-format-range-to-parts.js6
-rw-r--r--deps/v8/test/intl/date-format/format-range.js24
-rw-r--r--deps/v8/test/intl/intl.status5
-rw-r--r--deps/v8/test/intl/locale/locale-collations.js1
-rw-r--r--deps/v8/test/intl/number-format/unified/style-unit.js4
-rw-r--r--deps/v8/test/intl/regress-9912.js6
-rw-r--r--deps/v8/test/intl/relative-time-format/format-en.js218
-rw-r--r--deps/v8/test/intl/string-localecompare.js13
-rw-r--r--deps/v8/test/intl/testcfg.py2
-rw-r--r--deps/v8/test/js-perf-test/BigInt/add-no-opt.js75
-rw-r--r--deps/v8/test/js-perf-test/BigInt/add.js24
-rw-r--r--deps/v8/test/js-perf-test/BigInt/bigint-util.js5
-rw-r--r--deps/v8/test/js-perf-test/BigInt/exponentiate.js48
-rw-r--r--deps/v8/test/js-perf-test/BigInt/shift.js47
-rw-r--r--deps/v8/test/js-perf-test/BigInt/subtract.js24
-rw-r--r--deps/v8/test/js-perf-test/JSTests1.json49
-rw-r--r--deps/v8/test/js-perf-test/JSTests2.json5
-rw-r--r--deps/v8/test/js-perf-test/Keys/keys.js13
-rw-r--r--deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.js9
-rw-r--r--deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.out9
-rw-r--r--deps/v8/test/message/fail/strict-prior-octal-escape.js9
-rw-r--r--deps/v8/test/message/fail/strict-prior-octal-escape.out9
-rw-r--r--deps/v8/test/message/fail/strict-prior-octal-literal.js9
-rw-r--r--deps/v8/test/message/fail/strict-prior-octal-literal.out9
-rw-r--r--deps/v8/test/message/fail/var-prior-conflict.js9
-rw-r--r--deps/v8/test/message/fail/var-prior-conflict.out9
-rw-r--r--deps/v8/test/message/message.status21
-rw-r--r--deps/v8/test/message/testcfg.py2
-rw-r--r--deps/v8/test/message/wasm-recognize-imports.js45
-rw-r--r--deps/v8/test/message/wasm-recognize-imports.out5
-rw-r--r--deps/v8/test/message/wasm-trace-liftoff.js3
-rw-r--r--deps/v8/test/message/wasm-trace-liftoff.out4
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.js2
-rw-r--r--deps/v8/test/message/wasm-trace-memory64-liftoff.js7
-rw-r--r--deps/v8/test/message/wasm-trace-memory64-liftoff.out28
-rw-r--r--deps/v8/test/message/wasm-trace-memory64.js116
-rw-r--r--deps/v8/test/message/wasm-trace-memory64.out28
-rw-r--r--deps/v8/test/message/wasm-trace-turbofan.out4
-rw-r--r--deps/v8/test/mjsunit/array-reverse.js2
-rw-r--r--deps/v8/test/mjsunit/array-tostring.js9
-rw-r--r--deps/v8/test/mjsunit/asm/regress-1395401.js16
-rw-r--r--deps/v8/test/mjsunit/code-stats-flag.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-bitwise-and.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-bitwise-or.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-bitwise-xor.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-compare.js61
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-constructor.js107
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-equal.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-shift-left.js110
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-shift-right.js140
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint64-array.js71
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-pretenure.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-calls-pointer.js140
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-clamp-annotations.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/misc-ensure-no-deopt.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/omit-default-ctors-array-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/omit-default-ctors.js84
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1399490.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1399626.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1399627.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1408013.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/typedarray-resizablearraybuffer.js77
-rw-r--r--deps/v8/test/mjsunit/const-dict-tracking.js734
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking.js235
-rw-r--r--deps/v8/test/mjsunit/dataview-resizablearraybuffer.js29
-rw-r--r--deps/v8/test/mjsunit/ensure-growing-store-learns.js6
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-arraylike-string-length.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-arraylike.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-holey-array.js4
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-holey.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-with-dupes.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-spreadable-function.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-spreadable-nonarraylike-proxy.js4
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-spreadable-regexp.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-spreadable-sparse-object.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-strict-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-from.js21
-rw-r--r--deps/v8/test/mjsunit/es6/array-of.js16
-rw-r--r--deps/v8/test/mjsunit/es6/array-spread-holey.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-sloppy-function.js3
-rw-r--r--deps/v8/test/mjsunit/es6/super.js12
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-tostring.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-groupby.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-to-reversed.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/array-to-sorted.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/arraybuffer-transfer.js257
-rw-r--r--deps/v8/test/mjsunit/harmony/iterator-constructor.js31
-rw-r--r--deps/v8/test/mjsunit/harmony/iterator-from.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/iterator-helpers.js320
-rw-r--r--deps/v8/test/mjsunit/harmony/json-parse-with-source-snapshot.js92
-rw-r--r--deps/v8/test/mjsunit/harmony/json-parse-with-source.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/public-instance-class-fields.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/public-static-class-fields.js40
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-unicode-sets.js126
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-1410963.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-1422812.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-1423310.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-callsite-throw.js114
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-importvalue.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs2
-rw-r--r--deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js23
-rw-r--r--deps/v8/test/mjsunit/harmony/string-iswellformed-external-uncached.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/string-iswellformed-flat-indirect.js35
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js45
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js85
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js81
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js90
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js50
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js58
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js60
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js71
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime-multiple.js66
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js80
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js111
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js57
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js33
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js64
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/symbol-as-weakref-target-gc.js43
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/symbol-in-finalizationregistry.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js72
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js62
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js72
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js67
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js81
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js84
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js85
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js62
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js62
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js33
-rw-r--r--deps/v8/test/mjsunit/ic-megadom-2.js2
-rw-r--r--deps/v8/test/mjsunit/ic-megadom-3.js2
-rw-r--r--deps/v8/test/mjsunit/ic-megadom.js2
-rw-r--r--deps/v8/test/mjsunit/interrupt-budget-override.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-store-array-literal.js2
-rw-r--r--deps/v8/test/mjsunit/maglev/add-smi.js5
-rw-r--r--deps/v8/test/mjsunit/maglev/continuation-after-inlined.js29
-rw-r--r--deps/v8/test/mjsunit/maglev/exceptions.js39
-rw-r--r--deps/v8/test/mjsunit/maglev/function-apply.js126
-rw-r--r--deps/v8/test/mjsunit/maglev/inline-fresh-parent-deopt-frame.js22
-rw-r--r--deps/v8/test/mjsunit/maglev/inline-phi-leak.js24
-rw-r--r--deps/v8/test/mjsunit/maglev/lots-of-args.js46
-rw-r--r--deps/v8/test/mjsunit/maglev/math-ceil.js23
-rw-r--r--deps/v8/test/mjsunit/maglev/math-floor.js23
-rw-r--r--deps/v8/test/mjsunit/maglev/math-round.js23
-rw-r--r--deps/v8/test/mjsunit/maglev/negate.js67
-rw-r--r--deps/v8/test/mjsunit/maglev/nested-continuations.js35
-rw-r--r--deps/v8/test/mjsunit/maglev/omit-default-ctors.js2
-rw-r--r--deps/v8/test/mjsunit/maglev/phi-untagging-conversions.js135
-rw-r--r--deps/v8/test/mjsunit/maglev/polymorphic-load-number.js18
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-1403324.js29
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-1405092.js29
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-1406456.js17
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-1407606.js18
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-1411075.js17
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-1417125.js20
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-4349817-1.js23
-rw-r--r--deps/v8/test/mjsunit/maglev/regress-4349817-2.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-1383712.js26
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-1392936.js16
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-1394279.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-1403575.js14
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-1405651.js51
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-1407959.js27
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1392061.js37
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1394036.js19
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403280.js14
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403323.js15
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403399.js36
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403470.js14
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403749.js25
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1405445.js16
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416693.js22
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416795.js18
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1417386.js27
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421237.js19
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421375.js20
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421712.js20
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1422864.js32
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423580.js45
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423610.js32
-rw-r--r--deps/v8/test/mjsunit/maglev/regress/regress-crbug-1425124.js21
-rw-r--r--deps/v8/test/mjsunit/maglev/resumable-loop-context.js19
-rw-r--r--deps/v8/test/mjsunit/maglev/resumable.js17
-rw-r--r--deps/v8/test/mjsunit/maglev/shift-right-logical-smi.js6
-rw-r--r--deps/v8/test/mjsunit/maglev/shift-right-logical.js6
-rw-r--r--deps/v8/test/mjsunit/maglev/store-oddball-to-double-elements.js28
-rw-r--r--deps/v8/test/mjsunit/maglev/typedarray-out-of-bounds.js23
-rw-r--r--deps/v8/test/mjsunit/maglev/typedarray-resizablearraybuffer.js815
-rw-r--r--deps/v8/test/mjsunit/maglev/unused-checkedsmitag.js22
-rw-r--r--deps/v8/test/mjsunit/mjsunit-assert-equals.js6
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js14
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status218
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js2
-rw-r--r--deps/v8/test/mjsunit/optimized-string-includes.js18
-rw-r--r--deps/v8/test/mjsunit/rab-gsab-transfer-to-worker.js232
-rw-r--r--deps/v8/test/mjsunit/regress-1400809.js11
-rw-r--r--deps/v8/test/mjsunit/regress-1417125.js20
-rw-r--r--deps/v8/test/mjsunit/regress-crbug-1359991.js2
-rw-r--r--deps/v8/test/mjsunit/regress-v8-13459.js111
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-1402270.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1320641.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-13652.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1376663.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1383362.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1385368.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1393865.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1393942.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1394663.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1400053.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1400056.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1400897.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1403742.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1404607.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1404863.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1407070.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1407349.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1408086.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1408400.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1408606.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1409058.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1412629.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1412975.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1414200.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1414376.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1414659.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1415210.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1416520.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1416697.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1416830.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1418509.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1418571.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1419636.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1419740.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1420536.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1421373.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1421685.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1422166.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1423703.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-165637.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-633998.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804177.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-chromium-1409294.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1381404.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1383883.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1383976.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant2.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant3.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1384474.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1392577.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1393375.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1394741.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1395117.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1399695.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1399799.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1402139.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1404820.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1406774.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1407080.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1407384.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1408310.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1412938.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1415249.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1415581.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1416248.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1416395.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1417495.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1417882.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1420860.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1421198.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1421451.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1423650.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1424486.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1424699.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-13410.js15
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/export-wrapper-canonical-types.js69
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1179065.js9
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-12874.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-12945.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-13230.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-13290.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-13700.js31
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-13715.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-13732.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-13826.js51
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1388938.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1388942.js17
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1395604.js34
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1403398.js119
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1408337.js24
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1412940.js40
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1416758.js25
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1417516.js37
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1417908.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1418706.js17
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1407594.js64
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-struct-set-into-unreachable.js46
-rw-r--r--deps/v8/test/mjsunit/set-prototype-of-Object_prototype.js17
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js4
-rw-r--r--deps/v8/test/mjsunit/shared-memory/cannot-redefine-properties.js40
-rw-r--r--deps/v8/test/mjsunit/shared-memory/condition.js7
-rw-r--r--deps/v8/test/mjsunit/shared-memory/mutex-lock-twice.js19
-rw-r--r--deps/v8/test/mjsunit/shared-memory/mutex.js7
-rw-r--r--deps/v8/test/mjsunit/shared-memory/non-instance-prototype.js42
-rw-r--r--deps/v8/test/mjsunit/shared-memory/private-field.js25
-rw-r--r--deps/v8/test/mjsunit/shared-memory/private-name.js13
-rw-r--r--deps/v8/test/mjsunit/shared-memory/regress-crbug-1425710.js19
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-array-surface.js14
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-array-workers.js15
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js4
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share-large.js53
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share.js62
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-string-in-global-object-optimized.js22
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js59
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js14
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shared-value-barrier-optimization.js29
-rw-r--r--deps/v8/test/mjsunit/shared-memory/shrink-large-object.js17
-rw-r--r--deps/v8/test/mjsunit/testcfg.py9
-rw-r--r--deps/v8/test/mjsunit/tools/processor-bigint.mjs59
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.mjs4
-rw-r--r--deps/v8/test/mjsunit/turboshaft/type-inference.js118
-rw-r--r--deps/v8/test/mjsunit/turboshaft/typed-optimizations.js51
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer-array-methods.js11
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js606
-rw-r--r--deps/v8/test/mjsunit/wasm/array-bulk-operations.js131
-rw-r--r--deps/v8/test/mjsunit/wasm/array-copy-benchmark.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/array-init-from-segment.js87
-rw-r--r--deps/v8/test/mjsunit/wasm/bit-shift-right.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/call-ref.js83
-rw-r--r--deps/v8/test/mjsunit/wasm/call_indirect.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/code-flushing.js40
-rw-r--r--deps/v8/test/mjsunit/wasm/code-space-overflow.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/committed-code-exhaustion.js (renamed from deps/v8/test/mjsunit/wasm/code-space-exhaustion.js)2
-rw-r--r--deps/v8/test/mjsunit/wasm/enter-and-leave-debug-state.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/enter-debug-state.js (renamed from deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js)31
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-liftoff.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-cast-type-inference.js172
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-casts-from-any.js417
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-casts-subtypes.js557
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-async-debugger.js11
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-collections.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-global-constructors.js35
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-helpers.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-import.mjs1
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-objects.js37
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop-wasm.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-js-interop.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-nominal.js59
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-null-traps.js177
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-optimizations.js89
-rw-r--r--deps/v8/test/mjsunit/wasm/i31ref.js85
-rw-r--r--deps/v8/test/mjsunit/wasm/inlining.js166
-rw-r--r--deps/v8/test/mjsunit/wasm/load-immutable.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/log-code-after-post-message.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/max-wasm-functions.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/memory64.js68
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-value.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/origin-trial-flags.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/recognize-imports.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-globals-import.js49
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-globals.js61
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-table-js-interop.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-tables.js92
-rw-r--r--deps/v8/test/mjsunit/wasm/return-calls-eh.js113
-rw-r--r--deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/simd-lane-memory64.js93
-rw-r--r--deps/v8/test/mjsunit/wasm/speculative-inlining.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/stack-push-root.js35
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js80
-rw-r--r--deps/v8/test/mjsunit/wasm/stringrefs-exec-gc.js52
-rw-r--r--deps/v8/test/mjsunit/wasm/stringrefs-exec.js155
-rw-r--r--deps/v8/test/mjsunit/wasm/stringrefs-regressions.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/stringrefs-valid.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/stringview-valuestack.js73
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access-liftoff.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-numeric-ops.js144
-rw-r--r--deps/v8/test/mjsunit/wasm/type-based-optimizations.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-externalize-internalize.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-inlining.js639
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js55
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-source-location.js91
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-js-inlining-code-reloc.js54
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js159
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-wrapper-inlining.js55
-rw-r--r--deps/v8/test/mjsunit/wasm/wrapper-compilation.js33
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-array-buffer.js85
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-array.js133
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-base.js147
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-bigint.js122
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-builtin.js39
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-circular-reference.js20
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-class.js177
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-data-view.js127
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js80
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-function-context.js444
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-function.js270
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js25
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-holey-array.js49
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-object.js204
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-prototype.js105
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-sparse-array.js99
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-symbol.js47
-rw-r--r--deps/v8/test/mjsunit/web-snapshot/web-snapshot-typed-array.js439
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc2
-rw-r--r--deps/v8/test/test262/BUILD.gn1
-rw-r--r--deps/v8/test/test262/harness-adapt.js9
-rw-r--r--deps/v8/test/test262/harness-done.js17
-rw-r--r--deps/v8/test/test262/test262.status624
-rw-r--r--deps/v8/test/test262/testcfg.py7
-rw-r--r--deps/v8/test/unittests/BUILD.gn278
-rw-r--r--deps/v8/test/unittests/api/api-wasm-unittest.cc50
-rw-r--r--deps/v8/test/unittests/api/deserialize-unittest.cc105
-rw-r--r--deps/v8/test/unittests/api/exception-unittest.cc2
-rw-r--r--deps/v8/test/unittests/api/v8-script-unittest.cc170
-rw-r--r--deps/v8/test/unittests/assembler/assembler-x64-unittest.cc48
-rw-r--r--deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc147
-rw-r--r--deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc8
-rw-r--r--deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc2
-rw-r--r--deps/v8/test/unittests/assembler/disasm-x64-unittest.cc19
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-arm-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc)34
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc325
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-ia32-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-loong64-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-mips64-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-ppc-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc)75
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-riscv-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-riscv-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-s390-unittest.cc (renamed from deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc)16
-rw-r--r--deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc59
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc254
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc65
-rw-r--r--deps/v8/test/unittests/base/ieee754-unittest.cc245
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc18
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc37
-rw-r--r--deps/v8/test/unittests/base/vector-unittest.cc27
-rw-r--r--deps/v8/test/unittests/codegen/code-layout-unittest.cc21
-rw-r--r--deps/v8/test/unittests/codegen/code-pages-unittest.cc60
-rw-r--r--deps/v8/test/unittests/codegen/factory-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc23
-rw-r--r--deps/v8/test/unittests/compiler/codegen-tester.h10
-rw-r--r--deps/v8/test/unittests/compiler/compiler-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/function-tester.cc11
-rw-r--r--deps/v8/test/unittests/compiler/function-tester.h4
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc74
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc38
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc372
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc1
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h2
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/revec-unittest.cc239
-rw-r--r--deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/run-deopt-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc324
-rw-r--r--deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc346
-rw-r--r--deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc787
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc12
-rw-r--r--deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc17
-rw-r--r--deps/v8/test/unittests/flags/flag-definitions-unittest.cc14
-rw-r--r--deps/v8/test/unittests/heap/allocation-observer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc18
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc287
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc34
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc347
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc25
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h9
-rw-r--r--deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc401
-rw-r--r--deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc62
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc42
-rw-r--r--deps/v8/test/unittests/heap/cppgc/logging-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/member-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc208
-rw-r--r--deps/v8/test/unittests/heap/cppgc/test-platform.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h1
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc1212
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc19
-rw-r--r--deps/v8/test/unittests/heap/global-handles-unittest.cc130
-rw-r--r--deps/v8/test/unittests/heap/global-safepoint-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc24
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.cc13
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.h30
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc18
-rw-r--r--deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc164
-rw-r--r--deps/v8/test/unittests/heap/marking-worklist-unittest.cc46
-rw-r--r--deps/v8/test/unittests/heap/memory-reducer-unittest.cc247
-rw-r--r--deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc179
-rw-r--r--deps/v8/test/unittests/heap/shared-heap-unittest.cc33
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc17
-rw-r--r--deps/v8/test/unittests/inspector/inspector-unittest.cc48
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden28
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden9
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden20
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden9
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden33
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden30
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-unittest.cc28
-rw-r--r--deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc3
-rw-r--r--deps/v8/test/unittests/libplatform/tracing-unittest.cc20
-rw-r--r--deps/v8/test/unittests/logging/log-unittest.cc9
-rw-r--r--deps/v8/test/unittests/objects/concurrent-string-unittest.cc4
-rw-r--r--deps/v8/test/unittests/objects/roots-unittest.cc11
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc172
-rw-r--r--deps/v8/test/unittests/objects/weakmaps-unittest.cc6
-rw-r--r--deps/v8/test/unittests/parser/decls-unittest.cc2
-rw-r--r--deps/v8/test/unittests/parser/parsing-unittest.cc12
-rw-r--r--deps/v8/test/unittests/regexp/regexp-unittest.cc7
-rw-r--r--deps/v8/test/unittests/test-utils.cc20
-rw-r--r--deps/v8/test/unittests/test-utils.h36
-rw-r--r--deps/v8/test/unittests/testcfg.py20
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc2
-rw-r--r--deps/v8/test/unittests/unittests.status35
-rw-r--r--deps/v8/test/unittests/utils/identity-map-unittest.cc1
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc137
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc199
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc5
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc29
-rw-r--r--deps/v8/test/unittests/wasm/memory-protection-unittest.cc66
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc356
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc80
-rw-r--r--deps/v8/test/unittests/wasm/struct-types-unittest.cc70
-rw-r--r--deps/v8/test/unittests/wasm/subtyping-unittest.cc157
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-arm64-unittest.cc (renamed from deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc)145
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc145
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc135
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc107
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc1
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc31
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc9
-rw-r--r--deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc49
-rw-r--r--deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc1135
-rw-r--r--deps/v8/test/unittests/zone/zone-vector-unittest.cc373
-rw-r--r--deps/v8/test/wasm-api-tests/callbacks.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/finalize.cc7
-rw-r--r--deps/v8/test/wasm-api-tests/serialize.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/table.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/traps.cc9
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-test.h4
-rw-r--r--deps/v8/test/wasm-js/testcfg.py34
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status8
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py35
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status34
-rw-r--r--deps/v8/test/webkit/testcfg.py2
-rw-r--r--deps/v8/test/webkit/webkit.status7
-rw-r--r--deps/v8/third_party/glibc/LICENSE502
-rw-r--r--deps/v8/third_party/glibc/METADATA18
-rw-r--r--deps/v8/third_party/glibc/README.v88
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.c143
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.h79
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h38
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h21
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h34
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c312
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c913
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h14
-rw-r--r--deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h47
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/maybe.h2
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/protocol_core.h2
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/status.h17
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gni8
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.md51
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.sha5121
-rw-r--r--deps/v8/third_party/jinja2/README.chromium10
-rw-r--r--deps/v8/third_party/jinja2/README.rst26
-rw-r--r--deps/v8/third_party/jinja2/__init__.py71
-rw-r--r--deps/v8/third_party/jinja2/_compat.py132
-rw-r--r--deps/v8/third_party/jinja2/_identifier.py2
-rw-r--r--deps/v8/third_party/jinja2/async_utils.py84
-rw-r--r--deps/v8/third_party/jinja2/asyncfilters.py158
-rw-r--r--deps/v8/third_party/jinja2/asyncsupport.py264
-rw-r--r--deps/v8/third_party/jinja2/bccache.py190
-rw-r--r--deps/v8/third_party/jinja2/compiler.py1202
-rw-r--r--deps/v8/third_party/jinja2/constants.py3
-rw-r--r--deps/v8/third_party/jinja2/debug.py199
-rw-r--r--deps/v8/third_party/jinja2/defaults.py18
-rw-r--r--deps/v8/third_party/jinja2/environment.py1095
-rw-r--r--deps/v8/third_party/jinja2/exceptions.py103
-rw-r--r--deps/v8/third_party/jinja2/ext.py497
-rw-r--r--deps/v8/third_party/jinja2/filters.py1080
-rw-r--r--deps/v8/third_party/jinja2/idtracking.py176
-rw-r--r--deps/v8/third_party/jinja2/jinja2.gni4
-rw-r--r--deps/v8/third_party/jinja2/lexer.py442
-rw-r--r--deps/v8/third_party/jinja2/loaders.py425
-rw-r--r--deps/v8/third_party/jinja2/meta.py62
-rw-r--r--deps/v8/third_party/jinja2/nativetypes.py70
-rw-r--r--deps/v8/third_party/jinja2/nodes.py502
-rw-r--r--deps/v8/third_party/jinja2/optimizer.py18
-rw-r--r--deps/v8/third_party/jinja2/parser.py341
-rw-r--r--deps/v8/third_party/jinja2/patches/0001-jinja2-make-compiled-template-deterministic-for-pyth.patch30
-rw-r--r--deps/v8/third_party/jinja2/py.typed0
-rw-r--r--deps/v8/third_party/jinja2/runtime.py816
-rw-r--r--deps/v8/third_party/jinja2/sandbox.py264
-rw-r--r--deps/v8/third_party/jinja2/tests.py114
-rw-r--r--deps/v8/third_party/jinja2/utils.py594
-rw-r--r--deps/v8/third_party/jinja2/visitor.py35
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq6
-rw-r--r--deps/v8/third_party/zlib/BUILD.gn17
-rw-r--r--deps/v8/third_party/zlib/CMakeLists.txt234
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/chunkcopy.h2
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c71
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.h26
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/insert_string.h5
-rw-r--r--deps/v8/third_party/zlib/google/BUILD.gn6
-rw-r--r--deps/v8/third_party/zlib/google/OWNERS2
-rw-r--r--deps/v8/third_party/zlib/google/zip.cc2
-rw-r--r--deps/v8/third_party/zlib/google/zip.h2
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc18
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.h2
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader_unittest.cc2
-rw-r--r--deps/v8/third_party/zlib/google/zip_unittest.cc2
-rw-r--r--deps/v8/third_party/zlib/patches/0010-cmake-enable-simd.patch96
-rw-r--r--deps/v8/third_party/zlib/zconf.h.cmakein549
-rw-r--r--deps/v8/third_party/zlib/zlib.3149
-rw-r--r--deps/v8/third_party/zlib/zlib.pc.cmakein13
-rw-r--r--deps/v8/tools/.vpython391
-rw-r--r--deps/v8/tools/BUILD.gn14
-rw-r--r--deps/v8/tools/PRESUBMIT.py17
-rwxr-xr-xdeps/v8/tools/android-sync.sh2
-rwxr-xr-xdeps/v8/tools/bash-completion.sh4
-rw-r--r--deps/v8/tools/bazel/bazel.sha12
-rw-r--r--deps/v8/tools/builtins-pgo/arm.profile6371
-rw-r--r--deps/v8/tools/builtins-pgo/arm64.profile6403
-rwxr-xr-xdeps/v8/tools/builtins-pgo/download_profiles.py151
-rw-r--r--deps/v8/tools/builtins-pgo/download_profiles_test.py62
-rwxr-xr-xdeps/v8/tools/builtins-pgo/generate.py7
-rwxr-xr-xdeps/v8/tools/builtins-pgo/profile_only.py83
-rw-r--r--deps/v8/tools/builtins-pgo/profiles/.gitkeep0
-rw-r--r--deps/v8/tools/builtins-pgo/x64.profile6415
-rw-r--r--deps/v8/tools/callstats.html2
-rw-r--r--deps/v8/tools/callstats_groups.py1
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt2
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt2
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt2
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt2
-rwxr-xr-xdeps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py18
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json9
-rw-r--r--deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json4
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh0
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js553
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js11
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js32
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js16
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js17
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutation_order.js56
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js23
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/input.js23
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js119
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js3
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js1
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py5
-rw-r--r--deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json15
-rw-r--r--deps/v8/tools/codemap.mjs18
-rw-r--r--deps/v8/tools/debug_helper/BUILD.gn9
-rw-r--r--deps/v8/tools/debug_helper/debug-helper-internal.cc9
-rw-r--r--deps/v8/tools/debug_helper/gen-heap-constants.py2
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc40
-rwxr-xr-xdeps/v8/tools/dev/gen-static-roots.py130
-rwxr-xr-xdeps/v8/tools/dev/gm.py471
-rwxr-xr-xdeps/v8/tools/dev/update-compile-commands.py25
-rwxr-xr-xdeps/v8/tools/dev/update-vscode.sh26
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn91
-rw-r--r--deps/v8/tools/gcmole/PRESUBMIT.py32
-rw-r--r--deps/v8/tools/gcmole/gcmole-test.cc10
-rwxr-xr-xdeps/v8/tools/gcmole/gcmole.py590
-rw-r--r--deps/v8/tools/gcmole/gcmole_args.py64
-rw-r--r--deps/v8/tools/gcmole/gcmole_test.py436
-rwxr-xr-xdeps/v8/tools/gcmole/run-gcmole.py25
-rw-r--r--deps/v8/tools/gcmole/test-expectations.txt203
-rw-r--r--deps/v8/tools/gcmole/testdata/v8/BUILD.gn35
-rw-r--r--deps/v8/tools/gcmole/testdata/v8/out/obj/v8_base_without_compiler.ninja7
-rw-r--r--deps/v8/tools/gcmole/testdata/v8/test/cctest/BUILD.gn12
-rw-r--r--deps/v8/tools/gdbinit7
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py667
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py2
-rw-r--r--deps/v8/tools/heap-stats/categories.js2
-rw-r--r--deps/v8/tools/logreader.mjs27
-rwxr-xr-xdeps/v8/tools/process-wasm-compilation-times.py14
-rw-r--r--deps/v8/tools/profile.mjs12
-rwxr-xr-xdeps/v8/tools/profiling/linux-perf-chrome.py16
-rw-r--r--deps/v8/tools/profview/profile-utils.js12
-rw-r--r--deps/v8/tools/profview/profview.js8
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py35
-rw-r--r--deps/v8/tools/release/common_includes.py21
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py53
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/run_perf.py32
-rw-r--r--deps/v8/tools/sanitizers/sancov_formatter_test.py122
-rw-r--r--deps/v8/tools/snapshot/compare_mksnapshot_output.py59
-rw-r--r--deps/v8/tools/system-analyzer/log/tick.mjs2
-rw-r--r--deps/v8/tools/system-analyzer/processor.mjs70
-rw-r--r--deps/v8/tools/system-analyzer/view/helper.mjs4
-rw-r--r--deps/v8/tools/system-analyzer/view/property-link-table.mjs4
-rw-r--r--deps/v8/tools/system-analyzer/view/script-panel.mjs170
-rw-r--r--deps/v8/tools/testrunner/base_runner.py59
-rw-r--r--deps/v8/tools/testrunner/build_config.py66
-rw-r--r--deps/v8/tools/testrunner/local/command.py63
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py13
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py17
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_test.py6
-rw-r--r--deps/v8/tools/testrunner/local/variants.py137
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py3
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py118
-rw-r--r--deps/v8/tools/testrunner/objects/testcase_test.py74
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py3
-rw-r--r--deps/v8/tools/testrunner/standard_runner_test.py50
-rw-r--r--deps/v8/tools/testrunner/test_config.py6
-rw-r--r--deps/v8/tools/testrunner/testdata/expected_test_results1.json246
-rw-r--r--deps/v8/tools/testrunner/testdata/expected_test_results2.json156
-rw-r--r--deps/v8/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json17
-rw-r--r--deps/v8/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json17
-rw-r--r--deps/v8/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json17
-rw-r--r--deps/v8/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json17
-rw-r--r--deps/v8/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json17
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py10
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py18
-rw-r--r--deps/v8/tools/testrunner/testproc/indicators.py13
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py5
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/resultdb.py61
-rw-r--r--deps/v8/tools/testrunner/testproc/seed.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/shard.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/clusterfuzz_fakes.py30
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util.py107
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util_test.py65
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.txt20
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.txt91
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.txt118
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.txt50
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.txt43
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.txt48
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.txt30
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.txt33
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.txt30
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.txt26
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.txt55
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.txt37
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.txt21
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.txt24
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.txt13
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.txt9
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.txt15
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.txt60
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.txt37
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.txt37
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.txt35
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.txt24
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.txt1
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.txt5
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.expected.json4
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.txt28
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.txt91
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.txt118
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.txt50
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.txt190
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.txt198
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.txt112
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.txt510
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.expected3
-rw-r--r--deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.txt128
-rw-r--r--deps/v8/tools/testrunner/testproc/util.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/variant.py7
-rw-r--r--deps/v8/tools/testrunner/utils/augmented_options.py5
-rw-r--r--deps/v8/tools/testrunner/utils/test_utils.py24
-rw-r--r--deps/v8/tools/torque/vim-torque/syntax/torque.vim2
-rwxr-xr-xdeps/v8/tools/try_perf.py1
-rw-r--r--deps/v8/tools/turbolizer/css/turbo-visualizer-ranges.css298
-rw-r--r--deps/v8/tools/turbolizer/css/turbo-visualizer.css12
-rw-r--r--deps/v8/tools/turbolizer/src/common/constants.ts6
-rw-r--r--deps/v8/tools/turbolizer/src/common/util.ts4
-rw-r--r--deps/v8/tools/turbolizer/src/common/view-elements.ts2
-rw-r--r--deps/v8/tools/turbolizer/src/graphmultiview.ts2
-rw-r--r--deps/v8/tools/turbolizer/src/phases/instructions-phase.ts41
-rw-r--r--deps/v8/tools/turbolizer/src/phases/schedule-phase.ts22
-rw-r--r--deps/v8/tools/turbolizer/src/phases/sequence-phase.ts84
-rw-r--r--deps/v8/tools/turbolizer/src/resizer.ts23
-rw-r--r--deps/v8/tools/turbolizer/src/selection/selection-broker.ts30
-rw-r--r--deps/v8/tools/turbolizer/src/selection/selection-handler.ts12
-rw-r--r--deps/v8/tools/turbolizer/src/selection/selection-map.ts11
-rw-r--r--deps/v8/tools/turbolizer/src/selection/selection-storage.ts14
-rw-r--r--deps/v8/tools/turbolizer/src/source-resolver.ts12
-rw-r--r--deps/v8/tools/turbolizer/src/source.ts6
-rw-r--r--deps/v8/tools/turbolizer/src/views/bytecode-source-view.ts5
-rw-r--r--deps/v8/tools/turbolizer/src/views/disassembly-view.ts28
-rw-r--r--deps/v8/tools/turbolizer/src/views/graph-view.ts13
-rw-r--r--deps/v8/tools/turbolizer/src/views/range-view.ts1101
-rw-r--r--deps/v8/tools/turbolizer/src/views/schedule-view.ts32
-rw-r--r--deps/v8/tools/turbolizer/src/views/sequence-view.ts77
-rw-r--r--deps/v8/tools/turbolizer/src/views/text-view.ts249
-rw-r--r--deps/v8/tools/turbolizer/src/views/turboshaft-graph-view.ts27
-rw-r--r--deps/v8/tools/turbolizer/tslint.json1
-rw-r--r--deps/v8/tools/ubsan/ignorelist.txt3
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py48
-rw-r--r--deps/v8/tools/v8heapconst.py935
-rw-r--r--deps/v8/tools/v8windbg/src/cur-isolate.cc49
-rw-r--r--deps/v8/tools/v8windbg/src/cur-isolate.h4
-rw-r--r--deps/v8/tools/v8windbg/src/v8windbg-extension.cc2
-rw-r--r--deps/v8/tools/v8windbg/test/v8windbg-test.cc43
-rw-r--r--deps/v8/tools/vim/ninja-build.vim2
-rw-r--r--deps/v8/tools/wasm/BUILD.gn2
-rw-r--r--deps/v8/tools/wasm/module-inspector.cc249
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh2
-rw-r--r--deps/v8/tools/whitespace.txt6
-rw-r--r--deps/v8/tools/windbg.js2
2560 files changed, 174760 insertions, 108286 deletions
diff --git a/deps/v8/.git-blame-ignore-revs b/deps/v8/.git-blame-ignore-revs
index 4c53e208e3..29372f34c1 100644
--- a/deps/v8/.git-blame-ignore-revs
+++ b/deps/v8/.git-blame-ignore-revs
@@ -18,11 +18,106 @@
# - Because you must use a hash, you need to append to this list in a follow-up
# CL to the actual reformatting CL that you are trying to ignore.
+# objects.h splitting
+0604031eb1d01c52b6c1c9ae3012d80b23d74a68
+09e405453359000d66cc0faaa102854e626bebeb
+766ef168fbcac6bd0728cc2c9bb3ae7cbd74278a
+b5a2839b927be04bdb50a236071c11764e4d6400
+c911f91b5b6219e038c0117b05a8375bdf3db0b0
+1bb48bf91ba8c887884a0fbd674c91f64964d8a5
+19da9f24df7b59fec72b9dd8a979ad0ce5639c87
+b090d7e7468236ffce0afdb55bb496bf0073f2ee
+f40638d148b7a435522d5b714993908061e3b10d
+e8a1c25f6afae9b77921abb70fad49da252eb6f0
+6fa8283d0e031c9585d190f751907ed45bf85de0
+9aa861c4bcfed612039259f93c2cd2b01337e99e
+8175648018bd9f70af866f9fa433f1d79644d86b
+c7b1ceb801ec7f639a093468d8e6424212cc197c
+e39d2cbe1b1baa6513ddce2d73c981e335cc34fb
+eda00a5c499b7a83479115eb275a816b8a2ed104
+68deca9b418976ca8b3375e81058a9e0a815357f
+0525e17847f39f80e3fd163021a58f68d8fcaf06
+81a3c699d6eef936452ac3d10c7c59a2c1e38c0c
+01452bedfca2b5447a7f62bda87edbbb76259a6e
+1baf1050113a5418696839c273e05ea5ad1b5c4d
+4b39fe3d608916b1cfea015de287511a1623fc7f
+c6effdbba9b301244475553538f6eb1b3d9670b9
+71e4c573199466ea4541e3d6b307c9b33d7bb785
+efc92f0d4aa77bb90f5b56606b6f0d0819fba4af
+a9db2c74b5bae2345ac52be404748954a3b5050d
+0a01b6202226bbe99c0b83acf6c5a80344f5fb6a
+a6c44361c8f2dc07b935e3f2bb3e0d3ad4f4a383
+10d8aab1de430695a69e9d75af6ea42c2cdc9d6d
+dd3c4fca2f0a2761b8b95cd47fcd62836d714890
+e9c932233980866074025e65051003d1f298516c
+2b1f79881c3f0b69bfb9274bda57ea50f7304982
+7f031160d71a3d836667dc98288eaff4c94e6f56
+490fabb4578f8a3c4096fdccff688c17ed5ed00d
+d953b2ab726acca0b3abe90ce090a16d7ccc2ae3
+bb514c426b9438cfb1149d219ac4ec2d8d1c8458
+dfb453d713d8a05e76f720a6aae2871eec210276
+b490fd66b873c89fca37b21eab58502b6367a864
+9a71683d9c8ff9470eda6be5b2b11babac7b9863
+37945f731c4d800ef788e3c32f8663773a93450e
+b90c98fc29a8d896354de4a22c055f6d98376171
+35f3e9d0e654e84646a0b98f29e4a2786cdca4b1
+260eb5bb9b62ea3d5fa6ad0b0e8c2de75d48bad4
+cc2c11441ce352360acce8638a19f58edf361f7d
+7be0159e4b1e0b064e215ae4ced34d649cb2552e
+95a7cfe0eaabbcff0f730ed60e1805779f6cfe41
+8f54d18ba4ad10770e9537a2803459feccfe79a3
+f44759d9ff52a3e5563e5f2bb23ee2c08222fcfd
+09050c8a967f5f2956305e5d016b304d7bf5e669
+c769745d5856a7eb3a0dbe6af5376c7638944364
+a1547aa914aeedd7862f74124c18d2bbaf432c36
+5f950698c0dc7c36b855961feb929022f74102fb
+4aedeb1bd50c12ebcd6cf954c4cbef1205fff5ac
+7366d8954cb1bd277d3283241da2fae62b886c48
+bc35251f5e55a65c3a4acf7cba52cee505c86a46
+4fb60b215801db70c694a799e735b64bfead59bb
+03762b8488de0e393077e3f40fe7b63e675b3af3
+a8a45d875f0a98b192cf0063ceda12aaf75ddfaf
+a48e5ab8804e9e97b5ea577d6f2667bacee92eb2
+
# Update of quotations in DEPS file.
e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
# Rewrite code base to use "." instead of "->" to access Object members.
878ccb33bd3cf0e6dc018ff8d15843f585ac07be
+# Splitting src/ into subfolders
+632239011db501e76475d82ff6492f37fa8c1edc
+f455f86d899716df3b9550950ce172f5b867619a
+24a51e1eee4e286165dd0bba6afb4c35e8177a25
+f9a88acbc928f0fc5e9a3acbcd3b4ece52355f3d
+dec3298d9cfbe95759774a0e00302a08836b5f3d
+a0c3797461810e3159662851e64946e17654236e
+b72941e8b0d2843adf768442024d8950da798db1
+4c986c625f19e35c95f3492c662822f4695218b4
+0fa243af7096ee5b748b194476be2e4efecaec59
+786ce26341b7ab11b4d42f1c77202530d5138ad2
+a6eeea35cb7ff0c29b6cfdd1c786f382110241ce
+be014256adea1552d4a044ef80616cdab6a7d549
+93d3b7173fec7d010539057cdbd78d497f09fa9b
+5bfe84a0dab60289b3470c080908ce83ac2212d4
+a7695520556665ba73ab02c497ab73b162a5fb13
+61523c45a335fe3be76498e0b16bf8e7aec0d058
+bf372a73d8a5f4029fc9f4f69b675ef0cad80ada
+8ad6b335376c6275ffb3361c662a1a45c853f4fc
+06bf8261cf2c94fc071652652600b5790f719c05
+81a0102fe8586071cc68e9595b26c5c1207ee5b3
+5f28539599f6a6a265e18b8c897cc96ccbeec9c4
+3253767622a784866dc34aeb7b5d0f02ebdff61e
+9ac8b20086f95f1158a1901eefe12e25fd0333e4
+3cb560adfe26edb586a0e6e655e5a7c4755cad1a
+7bbd0bfe5161d57bcf268716ce4d1ce14d6786e6
+c39cabbcbea26891558b81fd2236c38a7aeada08
+a3187716d31a0ab9d7051adde6be9bd2b2c6fec1
+
# Move test/mjsunit/regress-*.js => test/mjsunit/regress/
cb67be1a3842fcf6a0da18aee444e3b7ea789e04
+
+# [include] Split out v8.h
+d1b27019d3bf86360ea838c317f8505fac6d3a7e
+44fe02ced6e4c6b49d627807e3b3fd0edbbeb36e
+ec06bb6ce5641cf65e400ec55b7421f87d04b999
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index f1bd2f33fe..ed10b52270 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -28,6 +28,7 @@
.clangd
.cpplint-cache
.cproject
+.DS_Store
.gclient_entries
.gdb_history
.idea
@@ -67,6 +68,7 @@
/third_party/jsoncpp/source
!/third_party/colorama
/third_party/colorama/src
+!/third_party/glibc
!/third_party/googletest
/third_party/googletest/src/*
!/third_party/googletest/src/googletest
@@ -79,6 +81,7 @@
!/third_party/test262-harness
!/third_party/v8
!/third_party/wasm-api
+/tools/builtins-pgo/profiles/*.profile
/tools/clang
/tools/gcmole/bootstrap
/tools/gcmole/gcmole-tools
diff --git a/deps/v8/.style.yapf b/deps/v8/.style.yapf
index de0c6a70f3..fdd07237cb 100644
--- a/deps/v8/.style.yapf
+++ b/deps/v8/.style.yapf
@@ -1,2 +1,2 @@
[style]
-based_on_style = chromium
+based_on_style = yapf
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index c9d4c6eb4c..d84c722b8d 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -44,6 +44,7 @@ CodeWeavers, Inc. <*@codeweavers.com>
Alibaba, Inc. <*@alibaba-inc.com>
SiFive, Inc. <*@sifive.com>
+Aapo Alasuutari <aapo.alasuutari@gmail.com>
Aaron Bieber <deftly@gmail.com>
Aaron O'Mullan <aaron.omullan@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
@@ -81,6 +82,7 @@ Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Chao Wang <chao.w@rioslab.org>
Charles Kerr <charles@charleskerr.com>
+Cheng Zhao <zcbenz@gmail.com>
Chengzhong Wu <legendecas@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Nardi <hichris123@gmail.com>
@@ -153,6 +155,8 @@ Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
+Johan Levin <johan13@gmail.com>
+John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Jonathan Liu <net147@gmail.com>
Juan Arboleda <soyjuanarbol@gmail.com>
Julien Brianceau <jbriance@cisco.com>
@@ -256,6 +260,7 @@ Ujjwal Sharma <usharma1998@gmail.com>
Vadim Gorbachev <bmsdave@gmail.com>
Varun Varada <varuncvarada@gmail.com>
Victor Costan <costan@gmail.com>
+Victor Polevoy <fx@thefx.co>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
@@ -286,3 +291,5 @@ Zheng Liu <i6122f@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
Yang Xiang <xiangyangemail@gmail.com>
+Kotaro Ohsugi <dec4m4rk@gmail.com>
+Jing Peiyang <jingpeiyang@eswincomputing.com>
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index 3be7da8f3b..81a9286d26 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -16,7 +16,8 @@ load(
"v8_library",
"v8_mksnapshot",
"v8_string",
- "v8_torque",
+ "v8_torque_definitions",
+ "v8_torque_initializers",
)
load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression")
@@ -42,6 +43,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_conservative_stack_scanning
# v8_enable_concurrent_marking
# v8_enable_ignition_dispatch_counting
+# v8_enable_builtins_optimization
# v8_enable_builtins_profiling
# v8_enable_builtins_profiling_verbose
# v8_builtins_profiling_log_file
@@ -68,6 +70,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_sandbox
# cppgc_enable_caged_heap
# cppgc_enable_check_assignments_in_prefinalizers
+# cppgc_enable_slim_write_barrier
# cppgc_enable_object_names
# cppgc_enable_pointer_compression
# cppgc_enable_verify_heap
@@ -132,6 +135,8 @@ v8_flag(name = "v8_enable_snapshot_code_comments")
v8_flag(name = "v8_enable_snapshot_native_code_counters")
+v8_flag(name = "v8_enable_static_roots")
+
v8_flag(name = "v8_enable_trace_maps")
v8_flag(name = "v8_enable_v8_checks")
@@ -145,10 +150,20 @@ v8_flag(name = "v8_enable_verify_predictable")
v8_flag(name = "v8_enable_test_features")
v8_flag(
+ name = "v8_enable_turbofan",
+ default = True,
+)
+
+v8_flag(
name = "v8_enable_webassembly",
default = True,
)
+v8_flag(
+ name = "v8_jitless",
+ default = False,
+)
+
v8_int(
name = "v8_typed_array_max_size_in_heap",
default = 64,
@@ -316,16 +331,18 @@ v8_config(
"v8_enable_slow_dchecks": "ENABLE_SLOW_DCHECKS",
"v8_enable_runtime_call_stats": "V8_RUNTIME_CALL_STATS",
"v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS",
+ "v8_enable_static_roots": "V8_STATIC_ROOTS",
"v8_enable_trace_maps": "V8_TRACE_MAPS",
+ "v8_enable_turbofan": "V8_ENABLE_TURBOFAN",
"v8_enable_v8_checks": "V8_ENABLE_CHECKS",
"v8_enable_verify_csa": "ENABLE_VERIFY_CSA",
"v8_enable_verify_heap": "VERIFY_HEAP",
"v8_enable_verify_predictable": "VERIFY_PREDICTABLE",
"v8_enable_webassembly": "V8_ENABLE_WEBASSEMBLY",
+ "v8_jitless": "V8_JITLESS",
},
defines = [
"GOOGLE3",
- "ENABLE_DEBUGGER_SUPPORT",
"V8_ADVANCED_BIGINT_ALGORITHMS",
"V8_CONCURRENT_MARKING",
] + select({
@@ -595,6 +612,7 @@ filegroup(
"src/base/build_config.h",
"src/base/compiler-specific.h",
"src/base/container-utils.h",
+ "src/base/contextual.h",
"src/base/cpu.cc",
"src/base/cpu.h",
"src/base/debug/stack_trace.cc",
@@ -653,6 +671,7 @@ filegroup(
"src/base/platform/mutex.cc",
"src/base/platform/mutex.h",
"src/base/platform/platform.h",
+ "src/base/platform/platform.cc",
"src/base/platform/semaphore.cc",
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
@@ -706,7 +725,6 @@ filegroup(
"@v8//bazel/config:is_macos": [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-darwin.cc",
- "src/base/platform/platform-macos.cc",
],
"@v8//bazel/config:is_windows": [
"src/base/win32-headers.h",
@@ -816,6 +834,8 @@ filegroup(
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",
"src/builtins/iterator.tq",
+ "src/builtins/iterator-from.tq",
+ "src/builtins/iterator-helpers.tq",
"src/builtins/math.tq",
"src/builtins/number.tq",
"src/builtins/object-fromentries.tq",
@@ -859,6 +879,7 @@ filegroup(
"src/builtins/string-html.tq",
"src/builtins/string-includes.tq",
"src/builtins/string-indexof.tq",
+ "src/builtins/string-iswellformed.tq",
"src/builtins/string-iterator.tq",
"src/builtins/string-match-search.tq",
"src/builtins/string-pad.tq",
@@ -868,6 +889,7 @@ filegroup(
"src/builtins/string-startswith.tq",
"src/builtins/string-substr.tq",
"src/builtins/string-substring.tq",
+ "src/builtins/string-towellformed.tq",
"src/builtins/string-trim.tq",
"src/builtins/symbol.tq",
"src/builtins/torque-internal.tq",
@@ -924,6 +946,7 @@ filegroup(
"src/objects/js-collection.tq",
"src/objects/js-function.tq",
"src/objects/js-generator.tq",
+ "src/objects/js-iterator-helpers.tq",
"src/objects/js-objects.tq",
"src/objects/js-promise.tq",
"src/objects/js-proxy.tq",
@@ -963,6 +986,7 @@ filegroup(
"src/objects/templates.tq",
"src/objects/torque-defined-classes.tq",
"src/objects/turbofan-types.tq",
+ "src/objects/turboshaft-types.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
] + select({
@@ -1008,7 +1032,6 @@ filegroup(
"src/torque/cfg.h",
"src/torque/class-debug-reader-generator.cc",
"src/torque/constants.h",
- "src/torque/contextual.h",
"src/torque/cpp-builder.cc",
"src/torque/cpp-builder.h",
"src/torque/csa-generator.cc",
@@ -1152,7 +1175,6 @@ filegroup(
"src/builtins/builtins-utils-inl.h",
"src/builtins/builtins-utils.h",
"src/builtins/builtins-weak-refs.cc",
- "src/builtins/builtins-web-snapshots.cc",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/builtins/constants-table-builder.cc",
@@ -1231,8 +1253,8 @@ filegroup(
"src/codegen/tick-counter.h",
"src/codegen/tnode.cc",
"src/codegen/tnode.h",
- "src/codegen/turbo-assembler.cc",
- "src/codegen/turbo-assembler.h",
+ "src/codegen/macro-assembler-base.cc",
+ "src/codegen/macro-assembler-base.h",
"src/codegen/unoptimized-compilation-info.cc",
"src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.cc",
@@ -1245,11 +1267,13 @@ filegroup(
"src/common/message-template.h",
"src/common/operation.h",
"src/common/ptr-compr-inl.h",
+ "src/common/ptr-compr.cc",
"src/common/ptr-compr.h",
"src/compiler-dispatcher/lazy-compile-dispatcher.cc",
"src/compiler-dispatcher/lazy-compile-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
+ "src/compiler/turbofan.h",
"src/date/date.cc",
"src/date/date.h",
"src/date/dateparser-inl.h",
@@ -1429,6 +1453,8 @@ filegroup(
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h",
+ "src/heap/cppgc-js/cross-heap-remembered-set.cc",
+ "src/heap/cppgc-js/cross-heap-remembered-set.h",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
@@ -1436,9 +1462,8 @@ filegroup(
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
- "src/heap/embedder-tracing.cc",
- "src/heap/embedder-tracing.h",
- "src/heap/embedder-tracing-inl.h",
+ "src/heap/cppgc-js/wrappable-info.h",
+ "src/heap/cppgc-js/wrappable-info-inl.h",
"src/heap/evacuation-verifier.cc",
"src/heap/evacuation-verifier.h",
"src/heap/evacuation-verifier-inl.h",
@@ -1459,8 +1484,6 @@ filegroup(
"src/heap/gc-tracer.cc",
"src/heap/gc-tracer-inl.h",
"src/heap/gc-tracer.h",
- "src/heap/global-handle-marking-visitor.cc",
- "src/heap/global-handle-marking-visitor.h",
"src/heap/heap-allocator-inl.h",
"src/heap/heap-allocator.cc",
"src/heap/heap-allocator.h",
@@ -1528,6 +1551,7 @@ filegroup(
"src/heap/new-spaces-inl.h",
"src/heap/new-spaces.cc",
"src/heap/new-spaces.h",
+ "src/heap/object-lock.h",
"src/heap/object-stats.cc",
"src/heap/object-stats.h",
"src/heap/objects-visiting-inl.h",
@@ -1551,8 +1575,8 @@ filegroup(
"src/heap/remembered-set.h",
"src/heap/safepoint.cc",
"src/heap/safepoint.h",
- "src/heap/scavenge-job.cc",
- "src/heap/scavenge-job.h",
+ "src/heap/minor-gc-job.cc",
+ "src/heap/minor-gc-job.h",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
@@ -1561,12 +1585,12 @@ filegroup(
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
- "src/heap/stress-marking-observer.cc",
- "src/heap/stress-marking-observer.h",
"src/heap/stress-scavenge-observer.cc",
"src/heap/stress-scavenge-observer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
+ "src/heap/traced-handles-marking-visitor.cc",
+ "src/heap/traced-handles-marking-visitor.h",
"src/heap/weak-object-worklists.cc",
"src/heap/weak-object-worklists.h",
"src/ic/call-optimization.cc",
@@ -1662,6 +1686,8 @@ filegroup(
"src/numbers/conversions.cc",
"src/numbers/conversions.h",
"src/numbers/hash-seed-inl.h",
+ "src/numbers/integer-literal-inl.h",
+ "src/numbers/integer-literal.h",
"src/numbers/math-random.cc",
"src/numbers/math-random.h",
"src/objects/all-objects-inl.h",
@@ -1758,6 +1784,8 @@ filegroup(
"src/objects/js-function.h",
"src/objects/js-generator-inl.h",
"src/objects/js-generator.h",
+ "src/objects/js-iterator-helpers-inl.h",
+ "src/objects/js-iterator-helpers.h",
"src/objects/js-objects-inl.h",
"src/objects/js-objects.cc",
"src/objects/js-objects.h",
@@ -1777,6 +1805,7 @@ filegroup(
"src/objects/js-shadow-realm-inl.h",
"src/objects/js-shared-array.h",
"src/objects/js-shared-array-inl.h",
+ "src/objects/js-struct.cc",
"src/objects/js-struct.h",
"src/objects/js-struct-inl.h",
"src/objects/js-temporal-objects.h",
@@ -1915,6 +1944,8 @@ filegroup(
"src/objects/transitions.h",
"src/objects/turbofan-types-inl.h",
"src/objects/turbofan-types.h",
+ "src/objects/turboshaft-types-inl.h",
+ "src/objects/turboshaft-types.h",
"src/objects/type-hints.cc",
"src/objects/type-hints.h",
"src/objects/value-serializer.cc",
@@ -2031,6 +2062,7 @@ filegroup(
"src/roots/roots-inl.h",
"src/roots/roots.cc",
"src/roots/roots.h",
+ "src/roots/static-roots.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-bigint.cc",
@@ -2190,8 +2222,6 @@ filegroup(
"src/utils/utils.h",
"src/utils/version.cc",
"src/utils/version.h",
- "src/web-snapshot/web-snapshot.h",
- "src/web-snapshot/web-snapshot.cc",
"src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
"src/zone/compressed-zone-ptr.h",
@@ -2233,10 +2263,6 @@ filegroup(
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
"src/codegen/ia32/reglist-ia32.h",
- "src/compiler/backend/ia32/code-generator-ia32.cc",
- "src/compiler/backend/ia32/instruction-codes-ia32.h",
- "src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
- "src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
@@ -2263,12 +2289,6 @@ filegroup(
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
"src/codegen/x64/reglist-x64.h",
- "src/compiler/backend/x64/code-generator-x64.cc",
- "src/compiler/backend/x64/instruction-codes-x64.h",
- "src/compiler/backend/x64/instruction-scheduler-x64.cc",
- "src/compiler/backend/x64/instruction-selector-x64.cc",
- "src/compiler/backend/x64/unwinding-info-writer-x64.cc",
- "src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
@@ -2293,12 +2313,6 @@ filegroup(
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
"src/codegen/arm/reglist-arm.h",
- "src/compiler/backend/arm/code-generator-arm.cc",
- "src/compiler/backend/arm/instruction-codes-arm.h",
- "src/compiler/backend/arm/instruction-scheduler-arm.cc",
- "src/compiler/backend/arm/instruction-selector-arm.cc",
- "src/compiler/backend/arm/unwinding-info-writer-arm.cc",
- "src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
@@ -2334,12 +2348,6 @@ filegroup(
"src/codegen/arm64/reglist-arm64.h",
"src/codegen/arm64/utils-arm64.cc",
"src/codegen/arm64/utils-arm64.h",
- "src/compiler/backend/arm64/code-generator-arm64.cc",
- "src/compiler/backend/arm64/instruction-codes-arm64.h",
- "src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
- "src/compiler/backend/arm64/instruction-selector-arm64.cc",
- "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
- "src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.h",
@@ -2369,12 +2377,6 @@ filegroup(
"src/codegen/s390/macro-assembler-s390.h",
"src/codegen/s390/register-s390.h",
"src/codegen/s390/reglist-s390.h",
- "src/compiler/backend/s390/code-generator-s390.cc",
- "src/compiler/backend/s390/instruction-codes-s390.h",
- "src/compiler/backend/s390/instruction-scheduler-s390.cc",
- "src/compiler/backend/s390/instruction-selector-s390.cc",
- "src/compiler/backend/s390/unwinding-info-writer-s390.cc",
- "src/compiler/backend/s390/unwinding-info-writer-s390.h",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
@@ -2401,10 +2403,6 @@ filegroup(
"src/codegen/riscv64/macro-assembler-riscv64.h",
"src/codegen/riscv64/register-riscv64.h",
"src/codegen/riscv64/reglist-riscv64.h",
- "src/compiler/backend/riscv64/code-generator-riscv64.cc",
- "src/compiler/backend/riscv64/instruction-codes-riscv64.h",
- "src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
- "src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
"src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
"src/diagnostics/riscv64/disasm-riscv64.cc",
"src/diagnostics/riscv64/unwinder-riscv64.cc",
@@ -2430,12 +2428,6 @@ filegroup(
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/codegen/ppc/reglist-ppc.h",
- "src/compiler/backend/ppc/code-generator-ppc.cc",
- "src/compiler/backend/ppc/instruction-codes-ppc.h",
- "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
- "src/compiler/backend/ppc/instruction-selector-ppc.cc",
- "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
- "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@@ -2598,6 +2590,8 @@ filegroup(
"src/wasm/wasm-subtyping.h",
"src/wasm/wasm-tier.h",
"src/wasm/wasm-value.h",
+ "src/wasm/well-known-imports.cc",
+ "src/wasm/well-known-imports.h",
],
"//conditions:default": [],
}),
@@ -2607,7 +2601,6 @@ filegroup(
name = "icu/v8_base_without_compiler_files",
srcs = [
"src/builtins/builtins-intl.cc",
- "src/builtins/builtins-intl-gen.cc",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-break-iterator.cc",
@@ -2849,6 +2842,7 @@ filegroup(
"src/compiler/osr.h",
"src/compiler/per-isolate-compiler-cache.h",
"src/compiler/persistent-map.h",
+ "src/compiler/phase.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
@@ -2881,36 +2875,91 @@ filegroup(
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
+ "src/compiler/string-builder-optimizer.cc",
+ "src/compiler/string-builder-optimizer.h",
"src/compiler/store-store-elimination.h",
+ "src/compiler/turbofan-enabled.cc",
+ "src/compiler/turbofan.h",
+ "src/compiler/turboshaft/assembler.cc",
"src/compiler/turboshaft/assembler.h",
+ "src/compiler/turboshaft/assert-types-reducer.h",
+ "src/compiler/turboshaft/branch-elimination-reducer.h",
+ "src/compiler/turboshaft/build-graph-phase.cc",
+ "src/compiler/turboshaft/build-graph-phase.h",
+ "src/compiler/turboshaft/builtin-call-descriptors.h",
+ "src/compiler/turboshaft/dead-code-elimination-phase.cc",
+ "src/compiler/turboshaft/dead-code-elimination-phase.h",
+ "src/compiler/turboshaft/dead-code-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.cc",
"src/compiler/turboshaft/decompression-optimization.h",
+ "src/compiler/turboshaft/decompression-optimization-phase.cc",
+ "src/compiler/turboshaft/decompression-optimization-phase.h",
+ "src/compiler/turboshaft/define-assembler-macros.inc",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/fast-hash.h",
"src/compiler/turboshaft/graph-builder.cc",
"src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph.cc",
"src/compiler/turboshaft/graph.h",
+ "src/compiler/turboshaft/index.h",
"src/compiler/turboshaft/graph-visualizer.cc",
"src/compiler/turboshaft/graph-visualizer.h",
+ "src/compiler/turboshaft/late-escape-analysis-reducer.h",
+ "src/compiler/turboshaft/late-escape-analysis-reducer.cc",
+ "src/compiler/turboshaft/late-optimization-phase.cc",
+ "src/compiler/turboshaft/late-optimization-phase.h",
+ "src/compiler/turboshaft/layered-hash-map.h",
+ "src/compiler/turboshaft/machine-lowering-phase.cc",
+ "src/compiler/turboshaft/machine-lowering-phase.h",
+ "src/compiler/turboshaft/machine-lowering-reducer.h",
"src/compiler/turboshaft/machine-optimization-reducer.h",
+ "src/compiler/turboshaft/memory-optimization-reducer.cc",
+ "src/compiler/turboshaft/memory-optimization-reducer.h",
"src/compiler/turboshaft/operations.cc",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/operation-matching.h",
"src/compiler/turboshaft/optimization-phase.cc",
"src/compiler/turboshaft/optimization-phase.h",
+ "src/compiler/turboshaft/optimize-phase.cc",
+ "src/compiler/turboshaft/optimize-phase.h",
+ "src/compiler/turboshaft/phase.cc",
+ "src/compiler/turboshaft/phase.h",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/recreate-schedule.h",
+ "src/compiler/turboshaft/recreate-schedule-phase.cc",
+ "src/compiler/turboshaft/recreate-schedule-phase.h",
+ "src/compiler/turboshaft/reducer-traits.h",
"src/compiler/turboshaft/representations.cc",
"src/compiler/turboshaft/representations.h",
+ "src/compiler/turboshaft/runtime-call-descriptors.h",
"src/compiler/turboshaft/select-lowering-reducer.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/turboshaft/simplify-tf-loops.cc",
"src/compiler/turboshaft/simplify-tf-loops.h",
"src/compiler/turboshaft/snapshot-table.h",
+ "src/compiler/turboshaft/tag-untag-lowering-phase.cc",
+ "src/compiler/turboshaft/tag-untag-lowering-phase.h",
+ "src/compiler/turboshaft/tag-untag-lowering-reducer.h",
+ "src/compiler/turboshaft/tracing.h",
+ "src/compiler/turboshaft/type-inference-analysis.h",
+ "src/compiler/turboshaft/type-inference-reducer.h",
+ "src/compiler/turboshaft/typer.cc",
+ "src/compiler/turboshaft/typer.h",
+ "src/compiler/turboshaft/type-assertions-phase.cc",
+ "src/compiler/turboshaft/type-assertions-phase.h",
+ "src/compiler/turboshaft/type-parser.cc",
+ "src/compiler/turboshaft/type-parser.h",
+ "src/compiler/turboshaft/typed-optimizations-phase.cc",
+ "src/compiler/turboshaft/typed-optimizations-phase.h",
+ "src/compiler/turboshaft/typed-optimizations-reducer.h",
+ "src/compiler/turboshaft/types.cc",
+ "src/compiler/turboshaft/types.h",
+ "src/compiler/turboshaft/undef-assembler-macros.inc",
+ "src/compiler/turboshaft/uniform-reducer-adapter.h",
"src/compiler/turboshaft/utils.cc",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
+ "src/compiler/turboshaft/variable-reducer.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.cc",
@@ -2930,14 +2979,71 @@ filegroup(
"src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h",
] + select({
+ "@v8//bazel/config:v8_target_ia32": [
+ "src/compiler/backend/ia32/code-generator-ia32.cc",
+ "src/compiler/backend/ia32/instruction-codes-ia32.h",
+ "src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
+ "src/compiler/backend/ia32/instruction-selector-ia32.cc",
+ ],
+ "@v8//bazel/config:v8_target_x64": [
+ "src/compiler/backend/x64/code-generator-x64.cc",
+ "src/compiler/backend/x64/instruction-codes-x64.h",
+ "src/compiler/backend/x64/instruction-scheduler-x64.cc",
+ "src/compiler/backend/x64/instruction-selector-x64.cc",
+ "src/compiler/backend/x64/unwinding-info-writer-x64.cc",
+ "src/compiler/backend/x64/unwinding-info-writer-x64.h",
+ ],
+ "@v8//bazel/config:v8_target_arm": [
+ "src/compiler/backend/arm/code-generator-arm.cc",
+ "src/compiler/backend/arm/instruction-codes-arm.h",
+ "src/compiler/backend/arm/instruction-scheduler-arm.cc",
+ "src/compiler/backend/arm/instruction-selector-arm.cc",
+ "src/compiler/backend/arm/unwinding-info-writer-arm.cc",
+ "src/compiler/backend/arm/unwinding-info-writer-arm.h",
+ ],
+ "@v8//bazel/config:v8_target_arm64": [
+ "src/compiler/backend/arm64/code-generator-arm64.cc",
+ "src/compiler/backend/arm64/instruction-codes-arm64.h",
+ "src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
+ "src/compiler/backend/arm64/instruction-selector-arm64.cc",
+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
+ ],
+ "@v8//bazel/config:v8_target_s390x": [
+ "src/compiler/backend/s390/code-generator-s390.cc",
+ "src/compiler/backend/s390/instruction-codes-s390.h",
+ "src/compiler/backend/s390/instruction-scheduler-s390.cc",
+ "src/compiler/backend/s390/instruction-selector-s390.cc",
+ "src/compiler/backend/s390/unwinding-info-writer-s390.cc",
+ "src/compiler/backend/s390/unwinding-info-writer-s390.h",
+ ],
+ "@v8//bazel/config:v8_target_riscv64": [
+ "src/compiler/backend/riscv64/code-generator-riscv64.cc",
+ "src/compiler/backend/riscv64/instruction-codes-riscv64.h",
+ "src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
+ "src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
+ ],
+ "@v8//bazel/config:v8_target_ppc64le": [
+ "src/compiler/backend/ppc/code-generator-ppc.cc",
+ "src/compiler/backend/ppc/instruction-codes-ppc.h",
+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/backend/ppc/instruction-selector-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
+ ],
+ }) + select({
":is_v8_enable_webassembly": [
"src/compiler/int64-lowering.cc",
"src/compiler/int64-lowering.h",
+ "src/compiler/wasm-call-descriptors.cc",
+ "src/compiler/wasm-call-descriptors.h",
"src/compiler/wasm-compiler-definitions.h",
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-escape-analysis.cc",
"src/compiler/wasm-escape-analysis.h",
+ "src/compiler/wasm-load-elimination.cc",
+ "src/compiler/wasm-load-elimination.h",
"src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-loop-peeling.h",
"src/compiler/wasm-gc-lowering.cc",
@@ -2948,6 +3054,8 @@ filegroup(
"src/compiler/wasm-graph-assembler.h",
"src/compiler/wasm-inlining.cc",
"src/compiler/wasm-inlining.h",
+ "src/compiler/wasm-inlining-into-js.cc",
+ "src/compiler/wasm-inlining-into-js.h",
"src/compiler/wasm-typer.cc",
"src/compiler/wasm-typer.h",
],
@@ -2956,7 +3064,7 @@ filegroup(
)
filegroup(
- name = "v8_initializers_files",
+ name = "noicu/v8_initializers_files",
srcs = [
"src/builtins/builtins-array-gen.cc",
"src/builtins/builtins-array-gen.h",
@@ -2990,6 +3098,7 @@ filegroup(
"src/builtins/builtins-microtask-queue-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
+ "src/builtins/builtins-object-gen.h",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
@@ -3047,6 +3156,14 @@ filegroup(
)
filegroup(
+ name = "icu/v8_initializers_files",
+ srcs = [
+ "src/builtins/builtins-intl-gen.cc",
+ ":noicu/v8_initializers_files",
+ ],
+)
+
+filegroup(
name = "cppgc_base_files",
srcs = [
"src/heap/cppgc/allocation.cc",
@@ -3160,16 +3277,16 @@ filegroup(
# Note these cannot be v8_target_is_* selects because these contain
# inline assembly that runs inside the executable. Since these are
# linked directly into mksnapshot, they must use the actual target cpu.
- "@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/save_registers_asm.cc"],
- "@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/save_registers_asm.cc"],
- "@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/save_registers_asm.cc"],
- "@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/save_registers_asm.cc"],
- "@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/save_registers_asm.cc"],
- "@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/save_registers_asm.cc"],
- "@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/save_registers_asm.cc"],
- "@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/save_registers_masm.asm"],
- "@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/save_registers_masm.asm"],
- "@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/save_registers_masm.S"],
+ "@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/push_registers_asm.cc"],
+ "@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/push_registers_asm.cc"],
+ "@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.asm"],
+ "@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.asm"],
+ "@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"],
}),
)
@@ -3215,6 +3332,8 @@ filegroup(
"src/snapshot/embedded/platform-embedded-file-writer-mac.h",
"src/snapshot/embedded/platform-embedded-file-writer-win.cc",
"src/snapshot/embedded/platform-embedded-file-writer-win.h",
+ "src/snapshot/static-roots-gen.cc",
+ "src/snapshot/static-roots-gen.h",
"src/snapshot/mksnapshot.cc",
"src/snapshot/snapshot-empty.cc",
],
@@ -3223,6 +3342,8 @@ filegroup(
filegroup(
name = "v8_inspector_files",
srcs = [
+ "src/inspector/crc32.cc",
+ "src/inspector/crc32.h",
"src/inspector/custom-preview.cc",
"src/inspector/custom-preview.h",
"src/inspector/injected-script.cc",
@@ -3249,6 +3370,8 @@ filegroup(
"src/inspector/v8-debugger.h",
"src/inspector/v8-debugger-agent-impl.cc",
"src/inspector/v8-debugger-agent-impl.h",
+ "src/inspector/v8-debugger-barrier.cc",
+ "src/inspector/v8-debugger-barrier.h",
"src/inspector/v8-debugger-id.cc",
"src/inspector/v8-debugger-id.h",
"src/inspector/v8-debugger-script.cc",
@@ -3374,8 +3497,8 @@ filegroup(
# TODO(victorgomes): Add support to tools/debug_helper,
# which needs class-debug-readers and debug-macros.
-v8_torque(
- name = "generated_torque_files",
+v8_torque_definitions(
+ name = "generated_torque_definitions",
args = select({
":is_v8_annotate_torque_ir": ["-annotate-ir"],
"//conditions:default": [],
@@ -3391,12 +3514,8 @@ v8_torque(
"class-forward-declarations.h",
"class-verifiers.cc",
"class-verifiers.h",
- "csa-types.h",
# "debug-macros.cc",
# "debug-macros.h",
- "enum-verifiers.cc",
- "exported-macros-assembler.cc",
- "exported-macros-assembler.h",
"factory.cc",
"factory.inc",
"instance-types.h",
@@ -3409,8 +3528,28 @@ v8_torque(
noicu_srcs = [":noicu/torque_files"],
)
+v8_torque_initializers(
+ name = "generated_torque_initializers",
+ args = select({
+ ":is_v8_annotate_torque_ir": ["-annotate-ir"],
+ "//conditions:default": [],
+ }) + select({
+ "@v8//bazel/config:v8_target_is_32_bits": ["-m32"],
+ "//conditions:default": [],
+ }),
+ extras = [
+ "csa-types.h",
+ "enum-verifiers.cc",
+ "exported-macros-assembler.cc",
+ "exported-macros-assembler.h",
+ ],
+ icu_srcs = [":icu/torque_files"],
+ noicu_srcs = [":noicu/torque_files"],
+)
+
py_binary(
name = "code_generator",
+ python_version = "PY3",
srcs = [
"third_party/inspector_protocol/code_generator.py",
"third_party/inspector_protocol/pdl.py",
@@ -3476,8 +3615,6 @@ filegroup(
name = "v8_common_libshared_files",
srcs = [
":torque_runtime_support_files",
- ":v8_compiler_files",
- ":v8_initializers_files",
":v8_libplatform_files",
":v8_libsampler_files",
":v8_shared_internal_headers",
@@ -3547,14 +3684,16 @@ v8_mksnapshot(
# NOTE: This allow headers to be accessed without the icu/noicu prefixes.
cc_library(
- name = "icu/generated_torque_headers",
- hdrs = [":icu/generated_torque_files"],
+ name = "icu/generated_torque_definitions_headers",
+ hdrs = [":icu/generated_torque_definitions"],
+ copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "icu",
)
cc_library(
- name = "noicu/generated_torque_headers",
- hdrs = [":noicu/generated_torque_files"],
+ name = "noicu/generated_torque_definitions_headers",
+ hdrs = [":noicu/generated_torque_definitions"],
+ copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "noicu",
)
@@ -3564,6 +3703,7 @@ v8_library(
":v8_libbase_files",
":v8_shared_internal_headers",
],
+ copts = ["-Wno-implicit-fallthrough"],
)
cc_library(
@@ -3572,6 +3712,7 @@ cc_library(
"src/torque/kythe-data.h",
"src/torque/torque-compiler.h",
],
+ copts = ["-Wno-implicit-fallthrough"],
include_prefix = "third_party/v8",
includes = ["."],
)
@@ -3581,7 +3722,7 @@ cc_library(
srcs = [
":torque_base_files",
],
- copts = select({
+ copts = ["-Wno-implicit-fallthrough"] + select({
"@v8//bazel/config:is_posix": ["-fexceptions"],
"//conditions:default": [],
}),
@@ -3597,21 +3738,27 @@ v8_library(
srcs = [
":v8_base_without_compiler_files",
":v8_common_libshared_files",
- ],
+ ] + select({
+ ":is_v8_enable_turbofan": [
+ ":v8_compiler_files",
+ ],
+ "//conditions:default": [],
+ }),
+ copts = ["-Wno-implicit-fallthrough"],
icu_deps = [
- ":icu/generated_torque_headers",
+ ":icu/generated_torque_definitions_headers",
"//external:icu",
],
icu_srcs = [
":generated_regexp_special_case",
- ":icu/generated_torque_files",
+ ":icu/generated_torque_definitions",
":icu/v8_base_without_compiler_files",
],
noicu_deps = [
- ":noicu/generated_torque_headers",
+ ":noicu/generated_torque_definitions_headers",
],
noicu_srcs = [
- ":noicu/generated_torque_files",
+ ":noicu/generated_torque_definitions",
],
deps = [
":v8_libbase",
@@ -3621,8 +3768,17 @@ v8_library(
v8_library(
name = "v8",
- srcs = [":v8_inspector_files"],
+ srcs = [
+ ":v8_inspector_files",
+ ] + select({
+ ":is_not_v8_enable_turbofan": [
+ # With Turbofan disabled, we only include the stubbed-out API.
+ "src/compiler/turbofan-disabled.cc",
+ ],
+ "//conditions:default": [],
+ }),
hdrs = [":public_header_files"],
+ copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8_libshared"],
icu_srcs = [":icu/snapshot_files"],
noicu_deps = [":noicu/v8_libshared"],
@@ -3635,6 +3791,7 @@ v8_library(
name = "wee8",
srcs = [":wee8_files"],
hdrs = [":public_wasm_c_api_header_files"],
+ copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "third_party",
visibility = ["//visibility:public"],
deps = [":noicu/v8"],
@@ -3664,6 +3821,7 @@ v8_binary(
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
],
+ copts = ["-Wno-implicit-fallthrough"],
deps = ["v8_libbase"],
)
@@ -3675,6 +3833,7 @@ v8_binary(
":v8_libbase_files",
":v8_shared_internal_headers",
],
+ copts = ["-Wno-implicit-fallthrough"],
defines = [
"V8_INTL_SUPPORT",
"ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC",
@@ -3693,10 +3852,11 @@ v8_binary(
"src/torque/torque.cc",
":torque_base_files",
],
- copts = select({
+ copts = ["-Wno-implicit-fallthrough"] + select({
"@v8//bazel/config:is_posix": ["-fexceptions"],
"//conditions:default": [],
}),
+ icu_defines = [ "V8_INTL_SUPPORT" ],
features = ["-use_header_modules"],
linkopts = select({
"@v8//bazel/config:is_android": ["-llog"],
@@ -3707,26 +3867,49 @@ v8_binary(
v8_binary(
name = "mksnapshot",
- srcs = [":mksnapshot_files"],
+ srcs = [
+ ":mksnapshot_files",
+ ] + select({
+ ":is_not_v8_enable_turbofan": [
+ # Turbofan is needed to generate builtins.
+ ":v8_compiler_files",
+ ],
+ "//conditions:default": [],
+ }),
+ copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8_libshared"],
linkopts = select({
"@v8//bazel/config:is_android": ["-llog"],
"//conditions:default": [],
}),
+ icu_srcs = [
+ ":icu/generated_torque_initializers",
+ ":icu/v8_initializers_files",
+ ],
noicu_deps = [":v8_libshared_noicu"],
+ noicu_srcs = [
+ ":noicu/generated_torque_initializers",
+ ":noicu/v8_initializers_files",
+ ],
)
v8_binary(
name = "d8",
srcs = [":d8_files"],
+ copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8"],
noicu_deps = [":noicu/v8"],
)
# This target forces torque to be compiled without pointer compression.
v8_binary_non_pointer_compression(
- name = "torque_non_pointer_compression",
- binary = "torque",
+ name = "noicu/torque_non_pointer_compression",
+ binary = "noicu/torque",
+)
+
+v8_binary_non_pointer_compression(
+ name = "icu/torque_non_pointer_compression",
+ binary = "icu/torque",
)
alias(
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 78fd8cd2d3..8f93779407 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -4,16 +4,14 @@
import("//build/config/android/config.gni")
import("//build/config/arm.gni")
+import("//build/config/coverage/coverage.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/host_byteorder.gni")
import("//build/config/mips.gni")
+import("//build/config/riscv.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/build.gni")
-if (is_android) {
- import("//build/config/android/rules.gni")
-}
-
import("gni/snapshot_toolchain.gni")
import("gni/v8.gni")
@@ -120,6 +118,13 @@ declare_args() {
# as per the --native-code-counters flag.
v8_enable_snapshot_native_code_counters = ""
+ # Use pre-generated static root pointer values from static-roots.h.
+ v8_enable_static_roots = ""
+
+ # Mode used by gen-static-roots.py to have a heap layout which is identical
+ # to when v8_enable_static_roots is enabled.
+ v8_enable_static_root_generation = false
+
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@@ -204,10 +209,6 @@ declare_args() {
# Sets -dV8_EXTERNAL_CODE_SPACE
v8_enable_external_code_space = ""
- # Enable the Maglev compiler.
- # Sets -dV8_ENABLE_MAGLEV
- v8_enable_maglev = ""
-
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@@ -272,6 +273,9 @@ declare_args() {
# Use switch-based dispatch if this is false
v8_enable_regexp_interpreter_threaded_dispatch = true
+ # Check mksnapshot determinism by running it multiple times.
+ v8_verify_deterministic_mksnapshot = false
+
# Enable additional targets necessary for verification of torque
# file generation
v8_verify_torque_generation_invariance = false
@@ -310,9 +314,6 @@ declare_args() {
# Sets -DV8_ENABLE_SANDBOX.
v8_enable_sandbox = ""
- # Enable all available sandbox features. Implies v8_enable_sandbox.
- v8_enable_sandbox_future = false
-
# Expose the memory corruption API to JavaScript. Useful for testing the sandbox.
# WARNING This will expose builtins that (by design) cause memory corruption.
# Sets -DV8_EXPOSE_MEMORY_CORRUPTION_API
@@ -376,6 +377,17 @@ declare_args() {
# Compile V8 using zlib as dependency.
# Sets -DV8_USE_ZLIB
v8_use_zlib = true
+
+ # Make ValueDeserializer crash if the data to deserialize is invalid.
+ v8_value_deserializer_hard_fail = false
+
+ # Enable jitless mode, including compile-time optimizations. Note that even
+ # when this is set to 'false', one can run V8 in jitless mode at runtime by
+ # passing the `--jitless` flag; but then you miss out on compile-time
+ # optimizations.
+ # iOS (non-simulator) does not have executable pages for 3rd party
+ # applications yet so disable jit.
+ v8_jitless = v8_enable_lite_mode || target_is_ios_device
}
# Derived defaults.
@@ -459,20 +471,17 @@ if (v8_enable_external_code_space == "") {
(target_os != "fuchsia" && v8_current_cpu == "arm64"))
}
if (v8_enable_maglev == "") {
- v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression
-}
-if (v8_builtins_profiling_log_file == "default") {
- v8_builtins_profiling_log_file = ""
- if (is_debug == false) {
- if (v8_current_cpu == "x64") {
- v8_builtins_profiling_log_file = "tools/builtins-pgo/x64.profile"
- } else if (v8_current_cpu == "arm64") {
- v8_builtins_profiling_log_file = "tools/builtins-pgo/arm64.profile"
- } else if (v8_current_cpu == "arm") {
- v8_builtins_profiling_log_file = "tools/builtins-pgo/arm.profile"
- }
- }
+ v8_enable_maglev = v8_enable_turbofan &&
+ (v8_current_cpu == "x64" || v8_current_cpu == "arm64") &&
+ v8_enable_pointer_compression
}
+assert(v8_enable_turbofan || !v8_enable_maglev,
+ "Maglev is not available when Turbofan is disabled.")
+
+assert(!v8_jitless ||
+ (!v8_enable_maglev && !v8_enable_turbofan && !v8_enable_webassembly),
+ "Maglev, Turbofan and Wasm are not available in jitless mode")
+
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
@@ -500,6 +509,10 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized,
"Ignition tracing requires unoptimized tracing to be enabled.")
assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized,
"Baseline tracing requires unoptimized tracing to be enabled.")
+assert(
+ v8_enable_debugging_features == true || dcheck_always_on ||
+ !v8_enable_slow_dchecks,
+ "v8_enable_slow_dchecks requires v8_enable_debugging_features or dcheck_always_on.")
if (v8_enable_short_builtin_calls &&
(!v8_enable_pointer_compression && v8_current_cpu != "x64")) {
@@ -522,9 +535,64 @@ if (v8_enable_sandbox == "") {
v8_enable_external_code_space && target_os != "fuchsia"
}
-# Enable all available sandbox features if sandbox future is enabled.
-if (v8_enable_sandbox_future) {
- v8_enable_sandbox = true
+if (v8_enable_static_roots == "") {
+ # Static roots are only valid for builds with pointer compression and a
+ # shared read-only heap.
+ # TODO(olivf, v8:13466) Some configurations could be supported if we
+ # introduce different static root files for different build configurations:
+ # Non-wasm and non-i18n builds have fewer read only roots. Configurations
+ # without external code space allocate read only roots at a further
+ # location relative to the cage base.
+ v8_enable_static_roots =
+ v8_enable_pointer_compression && v8_enable_shared_ro_heap &&
+ v8_enable_pointer_compression_shared_cage &&
+ v8_enable_external_code_space && v8_enable_webassembly &&
+ v8_enable_i18n_support
+}
+
+assert(!v8_enable_static_roots ||
+ (v8_enable_pointer_compression && v8_enable_shared_ro_heap &&
+ v8_enable_pointer_compression_shared_cage &&
+ v8_enable_external_code_space && v8_enable_webassembly &&
+ v8_enable_i18n_support),
+ "Trying to enable static roots in a configuration that is not supported")
+
+assert(
+ !(v8_enable_static_roots && v8_enable_static_root_generation),
+ "Static root values must be generated in a build that does not rely on static roots itself")
+
+if (v8_builtins_profiling_log_file == "default") {
+ v8_builtins_profiling_log_file = ""
+
+ # Don't use existing profile when
+ # * v8_enable_builtins_optimization is disabled,
+ # * generating a new one (i.e. v8_enable_builtins_profiling),
+ # * is_debug or dcheck_always_on because they add more checks to the
+ # builtins control flow which we don't want to generate,
+ # * !v8_enable_sandbox because it affects the way how external pointer values
+ # are accessed,
+ # * v8_enable_webassembly because it changes the set of opcodes which affects
+ # graphs hashes,
+ # * !is_clang because it might affect argument evaluation order, which
+ # makes node IDs not predictable for subgraphs like Op1(Op2(), Op3()) and
+ # as a result different graph hash.
+ if (v8_enable_builtins_optimization && !v8_enable_builtins_profiling &&
+ is_clang && !is_debug && !dcheck_always_on && v8_enable_webassembly) {
+ if ((v8_current_cpu == "x64" || v8_current_cpu == "arm64") &&
+ v8_enable_pointer_compression && v8_enable_external_code_space &&
+ v8_enable_sandbox) {
+ # Note, currently x64 profile can be applied to arm64 but not the other
+ # way round.
+ v8_builtins_profiling_log_file = "tools/builtins-pgo/profiles/x64.profile"
+ } else if (v8_current_cpu == "x86" || v8_current_cpu == "arm") {
+ # Note, x86 profile can be applied to arm but not the other way round.
+ v8_builtins_profiling_log_file = "tools/builtins-pgo/profiles/x86.profile"
+ }
+ }
+}
+
+if (v8_enable_webassembly && !target_is_simulator && v8_current_cpu == "x64") {
+ v8_enable_wasm_simd256_revec = true
}
assert(!v8_disable_write_barriers || v8_enable_single_generation,
@@ -567,8 +635,9 @@ assert(
assert(
!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
- v8_current_cpu == "arm64" || v8_current_cpu == "riscv64",
- "Sharing a pointer compression cage is only supported on x64,arm64 and riscv64")
+ v8_current_cpu == "arm64" || v8_current_cpu == "riscv64" ||
+ v8_current_cpu == "ppc64",
+ "Sharing a pointer compression cage is only supported on x64,arm64, ppc64 and riscv64")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
@@ -583,12 +652,6 @@ assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
assert(!cppgc_enable_pointer_compression || cppgc_enable_caged_heap,
"Pointer compression in CppGC requires caged heap")
-assert(
- !v8_enable_conservative_stack_scanning ||
- v8_enable_inner_pointer_resolution_osb ||
- v8_enable_inner_pointer_resolution_mb,
- "Conservative stack scanning requires inner pointer resolution (OSB or MB)")
-
if (v8_enable_single_generation == true) {
assert(
v8_enable_unconditional_write_barriers || v8_disable_write_barriers,
@@ -741,6 +804,7 @@ external_v8_defines = [
"V8_USE_PERFETTO",
"V8_MAP_PACKING",
"V8_IS_TSAN",
+ "V8_ENABLE_CONSERVATIVE_STACK_SCANNING",
]
enabled_external_v8_defines = []
@@ -780,6 +844,9 @@ if (v8_enable_map_packing) {
if (is_tsan) {
enabled_external_v8_defines += [ "V8_IS_TSAN" ]
}
+if (v8_enable_conservative_stack_scanning) {
+ enabled_external_v8_defines += [ "V8_ENABLE_CONSERVATIVE_STACK_SCANNING" ]
+}
disabled_external_v8_defines = external_v8_defines - enabled_external_v8_defines
@@ -800,6 +867,7 @@ config("v8_header_features") {
external_cppgc_defines = [
"CPPGC_SUPPORTS_OBJECT_NAMES",
"CPPGC_CAGED_HEAP",
+ "CPPGC_SLIM_WRITE_BARRIER",
"CPPGC_YOUNG_GENERATION",
"CPPGC_POINTER_COMPRESSION",
]
@@ -829,6 +897,9 @@ if (cppgc_enable_pointer_compression) {
if (cppgc_enable_2gb_cage) {
enabled_external_cppgc_defines += [ "CPPGC_2GB_CAGE" ]
}
+if (cppgc_enable_slim_write_barrier) {
+ enabled_external_cppgc_defines += [ "CPPGC_SLIM_WRITE_BARRIER" ]
+}
disabled_external_cppgc_defines =
external_cppgc_defines - enabled_external_cppgc_defines
@@ -944,15 +1015,6 @@ config("features") {
if (v8_enable_single_generation) {
defines += [ "V8_ENABLE_SINGLE_GENERATION" ]
}
- if (v8_enable_conservative_stack_scanning) {
- defines += [ "V8_ENABLE_CONSERVATIVE_STACK_SCANNING" ]
- }
- if (v8_enable_inner_pointer_resolution_osb) {
- defines += [ "V8_ENABLE_INNER_POINTER_RESOLUTION_OSB" ]
- }
- if (v8_enable_inner_pointer_resolution_mb) {
- defines += [ "V8_ENABLE_INNER_POINTER_RESOLUTION_MB" ]
- }
if (v8_disable_write_barriers) {
defines += [ "V8_DISABLE_WRITE_BARRIERS" ]
}
@@ -1010,6 +1072,12 @@ config("features") {
if (v8_enable_maglev) {
defines += [ "V8_ENABLE_MAGLEV" ]
}
+ if (v8_enable_turbofan) {
+ defines += [ "V8_ENABLE_TURBOFAN" ]
+ }
+ if (v8_jitless) {
+ defines += [ "V8_JITLESS" ]
+ }
if (v8_enable_swiss_name_dictionary) {
defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ]
}
@@ -1052,9 +1120,24 @@ config("features") {
if (v8_enable_pointer_compression_8gb) {
defines += [ "V8_COMPRESS_POINTERS_8GB" ]
}
+ if (v8_enable_static_roots) {
+ defines += [ "V8_STATIC_ROOTS" ]
+ }
+ if (v8_enable_static_root_generation) {
+ defines += [ "V8_STATIC_ROOT_GENERATION" ]
+ }
if (v8_use_zlib) {
defines += [ "V8_USE_ZLIB" ]
}
+ if (v8_use_libm_trig_functions) {
+ defines += [ "V8_USE_LIBM_TRIG_FUNCTIONS" ]
+ }
+ if (v8_value_deserializer_hard_fail) {
+ defines += [ "V8_VALUE_DESERIALIZER_HARD_FAIL" ]
+ }
+ if (v8_enable_wasm_simd256_revec) {
+ defines += [ "V8_ENABLE_WASM_SIMD256_REVEC" ]
+ }
}
config("toolchain") {
@@ -1189,8 +1272,9 @@ config("toolchain") {
if (!is_clang) {
cflags += [ "-ffp-contract=off" ]
}
- if (target_is_simulator) {
+ if (riscv_use_rvv || target_is_simulator) {
defines += [ "CAN_USE_RVV_INSTRUCTIONS" ]
+ defines += [ "RVV_VLEN=${riscv_rvv_vlen}" ]
}
}
@@ -1242,6 +1326,9 @@ config("toolchain") {
} else if (target_os == "win") {
defines += [ "V8_HAVE_TARGET_OS" ]
defines += [ "V8_TARGET_OS_WIN" ]
+ } else if (target_os == "chromeos") {
+ defines += [ "V8_HAVE_TARGET_OS" ]
+ defines += [ "V8_TARGET_OS_CHROMEOS" ]
}
# TODO(infra): Support v8_enable_prof on Windows.
@@ -1251,13 +1338,12 @@ config("toolchain") {
if ((is_linux || is_chromeos) && v8_enable_backtrace) {
ldflags += [ "-rdynamic" ]
}
-
+ }
+ if (v8_enable_debugging_features || dcheck_always_on) {
defines += [ "DEBUG" ]
if (v8_enable_slow_dchecks) {
defines += [ "ENABLE_SLOW_DCHECKS" ]
}
- } else if (dcheck_always_on) {
- defines += [ "DEBUG" ]
}
if (v8_enable_verify_csa) {
@@ -1508,23 +1594,6 @@ config("always_turbofanimize") {
}
}
-# Configs for code coverage with gcov. Separate configs for cflags and ldflags
-# to selectively influde cflags in non-test targets only.
-config("v8_gcov_coverage_cflags") {
- cflags = [
- "-fprofile-arcs",
- "-ftest-coverage",
-
- # We already block on gcc warnings on other bots. Let's not block here to
- # always generate coverage reports.
- "-Wno-error",
- ]
-}
-
-config("v8_gcov_coverage_ldflags") {
- ldflags = [ "-fprofile-arcs" ]
-}
-
###############################################################################
# Actions
#
@@ -1556,108 +1625,91 @@ template("asm_to_inline_asm") {
}
}
-if (is_android && enable_java_templates) {
- android_assets("v8_external_startup_data_assets") {
- if (v8_use_external_startup_data) {
- deps = [ "//v8" ]
- renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
- if (current_cpu == "arm" || current_cpu == "x86") {
- renaming_destinations = [ "snapshot_blob_32.bin" ]
- } else {
- renaming_destinations = [ "snapshot_blob_64.bin" ]
- }
- disable_compression = true
- }
- }
-}
-
-action("postmortem-metadata") {
- # Only targets in this file and the top-level visibility target can
- # depend on this.
- visibility = [
- ":*",
- "//:gn_visibility",
- ]
-
- script = "tools/gen-postmortem-metadata.py"
-
- # NOSORT
- sources = [
- "$target_gen_dir/torque-generated/instance-types.h",
- "src/objects/allocation-site.h",
- "src/objects/allocation-site-inl.h",
- "src/objects/cell.h",
- "src/objects/cell-inl.h",
- "src/objects/code.h",
- "src/objects/code-inl.h",
- "src/objects/data-handler.h",
- "src/objects/data-handler-inl.h",
- "src/objects/descriptor-array.h",
- "src/objects/descriptor-array-inl.h",
- "src/objects/feedback-cell.h",
- "src/objects/feedback-cell-inl.h",
- "src/objects/fixed-array.h",
- "src/objects/fixed-array-inl.h",
- "src/objects/heap-number.h",
- "src/objects/heap-number-inl.h",
- "src/objects/heap-object.h",
- "src/objects/heap-object-inl.h",
- "src/objects/instance-type.h",
- "src/objects/js-array-buffer.h",
- "src/objects/js-array-buffer-inl.h",
- "src/objects/js-array.h",
- "src/objects/js-array-inl.h",
- "src/objects/js-function-inl.h",
- "src/objects/js-function.cc",
- "src/objects/js-function.h",
- "src/objects/js-objects.cc",
- "src/objects/js-objects.h",
- "src/objects/js-objects-inl.h",
- "src/objects/js-promise.h",
- "src/objects/js-promise-inl.h",
- "src/objects/js-raw-json.cc",
- "src/objects/js-raw-json.h",
- "src/objects/js-raw-json-inl.h",
- "src/objects/js-regexp.cc",
- "src/objects/js-regexp.h",
- "src/objects/js-regexp-inl.h",
- "src/objects/js-regexp-string-iterator.h",
- "src/objects/js-regexp-string-iterator-inl.h",
- "src/objects/map.cc",
- "src/objects/map.h",
- "src/objects/map-inl.h",
- "src/objects/megadom-handler.h",
- "src/objects/megadom-handler-inl.h",
- "src/objects/name.h",
- "src/objects/name-inl.h",
- "src/objects/objects.h",
- "src/objects/objects-inl.h",
- "src/objects/oddball.h",
- "src/objects/oddball-inl.h",
- "src/objects/primitive-heap-object.h",
- "src/objects/primitive-heap-object-inl.h",
- "src/objects/scope-info.h",
- "src/objects/scope-info-inl.h",
- "src/objects/script.h",
- "src/objects/script-inl.h",
- "src/objects/shared-function-info.cc",
- "src/objects/shared-function-info.h",
- "src/objects/shared-function-info-inl.h",
- "src/objects/string.cc",
- "src/objects/string-comparator.cc",
- "src/objects/string-comparator.h",
- "src/objects/string.h",
- "src/objects/string-inl.h",
- "src/objects/struct.h",
- "src/objects/struct-inl.h",
- ]
-
- outputs = [ "$target_gen_dir/debug-support.cc" ]
+if (v8_postmortem_support) {
+ action("postmortem-metadata") {
+ # Only targets in this file can depend on this.
+ visibility = [ ":*" ]
- args = rebase_path(outputs, root_build_dir) +
- rebase_path(sources, root_build_dir)
+ script = "tools/gen-postmortem-metadata.py"
- deps = [ ":run_torque" ]
+ # NOSORT
+ sources = [
+ "$target_gen_dir/torque-generated/instance-types.h",
+ "src/objects/allocation-site.h",
+ "src/objects/allocation-site-inl.h",
+ "src/objects/cell.h",
+ "src/objects/cell-inl.h",
+ "src/objects/code.h",
+ "src/objects/code-inl.h",
+ "src/objects/data-handler.h",
+ "src/objects/data-handler-inl.h",
+ "src/objects/descriptor-array.h",
+ "src/objects/descriptor-array-inl.h",
+ "src/objects/feedback-cell.h",
+ "src/objects/feedback-cell-inl.h",
+ "src/objects/fixed-array.h",
+ "src/objects/fixed-array-inl.h",
+ "src/objects/heap-number.h",
+ "src/objects/heap-number-inl.h",
+ "src/objects/heap-object.h",
+ "src/objects/heap-object-inl.h",
+ "src/objects/instance-type.h",
+ "src/objects/js-array-buffer.h",
+ "src/objects/js-array-buffer-inl.h",
+ "src/objects/js-array.h",
+ "src/objects/js-array-inl.h",
+ "src/objects/js-function-inl.h",
+ "src/objects/js-function.cc",
+ "src/objects/js-function.h",
+ "src/objects/js-objects.cc",
+ "src/objects/js-objects.h",
+ "src/objects/js-objects-inl.h",
+ "src/objects/js-promise.h",
+ "src/objects/js-promise-inl.h",
+ "src/objects/js-raw-json.cc",
+ "src/objects/js-raw-json.h",
+ "src/objects/js-raw-json-inl.h",
+ "src/objects/js-regexp.cc",
+ "src/objects/js-regexp.h",
+ "src/objects/js-regexp-inl.h",
+ "src/objects/js-regexp-string-iterator.h",
+ "src/objects/js-regexp-string-iterator-inl.h",
+ "src/objects/map.cc",
+ "src/objects/map.h",
+ "src/objects/map-inl.h",
+ "src/objects/megadom-handler.h",
+ "src/objects/megadom-handler-inl.h",
+ "src/objects/name.h",
+ "src/objects/name-inl.h",
+ "src/objects/objects.h",
+ "src/objects/objects-inl.h",
+ "src/objects/oddball.h",
+ "src/objects/oddball-inl.h",
+ "src/objects/primitive-heap-object.h",
+ "src/objects/primitive-heap-object-inl.h",
+ "src/objects/scope-info.h",
+ "src/objects/scope-info-inl.h",
+ "src/objects/script.h",
+ "src/objects/script-inl.h",
+ "src/objects/shared-function-info.cc",
+ "src/objects/shared-function-info.h",
+ "src/objects/shared-function-info-inl.h",
+ "src/objects/string.cc",
+ "src/objects/string-comparator.cc",
+ "src/objects/string-comparator.h",
+ "src/objects/string.h",
+ "src/objects/string-inl.h",
+ "src/objects/struct.h",
+ "src/objects/struct-inl.h",
+ ]
+
+ outputs = [ "$target_gen_dir/debug-support.cc" ]
+
+ args = rebase_path(outputs, root_build_dir) +
+ rebase_path(sources, root_build_dir)
+
+ deps = [ ":run_torque" ]
+ }
}
torque_files = [
@@ -1713,6 +1765,8 @@ torque_files = [
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",
"src/builtins/iterator.tq",
+ "src/builtins/iterator-from.tq",
+ "src/builtins/iterator-helpers.tq",
"src/builtins/math.tq",
"src/builtins/number.tq",
"src/builtins/object-fromentries.tq",
@@ -1756,6 +1810,7 @@ torque_files = [
"src/builtins/string-html.tq",
"src/builtins/string-includes.tq",
"src/builtins/string-indexof.tq",
+ "src/builtins/string-iswellformed.tq",
"src/builtins/string-iterator.tq",
"src/builtins/string-match-search.tq",
"src/builtins/string-pad.tq",
@@ -1765,6 +1820,7 @@ torque_files = [
"src/builtins/string-startswith.tq",
"src/builtins/string-substr.tq",
"src/builtins/string-substring.tq",
+ "src/builtins/string-towellformed.tq",
"src/builtins/string-trim.tq",
"src/builtins/symbol.tq",
"src/builtins/torque-internal.tq",
@@ -1821,6 +1877,7 @@ torque_files = [
"src/objects/js-collection.tq",
"src/objects/js-function.tq",
"src/objects/js-generator.tq",
+ "src/objects/js-iterator-helpers.tq",
"src/objects/js-objects.tq",
"src/objects/js-promise.tq",
"src/objects/js-proxy.tq",
@@ -1860,6 +1917,7 @@ torque_files = [
"src/objects/templates.tq",
"src/objects/torque-defined-classes.tq",
"src/objects/turbofan-types.tq",
+ "src/objects/turboshaft-types.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
@@ -2005,7 +2063,7 @@ if (v8_verify_torque_generation_invariance) {
group("v8_maybe_icu") {
if (v8_enable_i18n_support) {
- public_deps = [ "//third_party/icu" ]
+ public_deps = [ v8_icu_path ]
}
}
@@ -2114,12 +2172,6 @@ template("run_mksnapshot") {
suffix = "_$name"
}
action("run_mksnapshot_" + name) {
- # Only targets in this file and running mkgrokdump can depend on this.
- visibility = [
- ":*",
- "tools/debug_helper:run_mkgrokdump",
- ]
-
deps = [ ":mksnapshot($v8_snapshot_toolchain)" ]
script = "tools/run.py"
@@ -2159,6 +2211,12 @@ template("run_mksnapshot") {
"--turbo-profiling-input",
rebase_path(v8_builtins_profiling_log_file, root_build_dir),
]
+
+ # Replace this with --warn-about-builtin-profile-data to see the full
+ # list of builtins with incompatible profiles.
+ # TODO(crbug.com/v8/13647): Do not fail for invalid profiles
+ # args += [ "--abort-on-bad-builtin-profile-data" ]
+ args += [ "--warn-about-builtin-profile-data" ]
}
# This is needed to distinguish between generating code for the simulator
@@ -2253,10 +2311,45 @@ if (emit_builtins_as_inline_asm) {
}
}
+if (v8_verify_deterministic_mksnapshot) {
+ runs = [
+ "run_0",
+ "run_1",
+ "run_2",
+ "run_3",
+ "run_4",
+ "run_5",
+ "run_6",
+ ]
+
+ foreach(i, runs) {
+ run_mksnapshot(i) {
+ args = []
+ embedded_variant = "Default"
+ }
+ }
+
+ action("verify_deterministic_mksnapshot") {
+ deps = []
+ foreach(i, runs) {
+ deps += [ ":run_mksnapshot_$i" ]
+ }
+ report_file = "$target_gen_dir/mksnapshot_comparison.txt"
+ script = "tools/snapshot/compare_mksnapshot_output.py"
+ args = [
+ rebase_path("$report_file", root_build_dir),
+ rebase_path("$target_gen_dir", root_build_dir),
+ rebase_path("$root_out_dir", root_build_dir),
+ "7", # Length of the 'runs' list.
+ ]
+ outputs = [ report_file ]
+ }
+}
+
action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py"
outputs = [ "$root_out_dir/v8_build_config.json" ]
- is_gcov_coverage = v8_code_coverage && !is_clang
+ is_DEBUG_defined = v8_enable_debugging_features || dcheck_always_on
is_full_debug = v8_enable_debugging_features && !v8_optimized_debug
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
@@ -2266,37 +2359,48 @@ action("v8_dump_build_config") {
"is_asan=$is_asan",
"is_cfi=$is_cfi",
"is_clang=$is_clang",
+ "is_clang_coverage=$use_clang_coverage",
"is_component_build=$is_component_build",
"is_debug=$v8_enable_debugging_features",
+ "is_DEBUG_defined=$is_DEBUG_defined",
"is_full_debug=$is_full_debug",
- "is_gcov_coverage=$is_gcov_coverage",
"is_msan=$is_msan",
"is_tsan=$is_tsan",
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
+ "v8_code_comments=$v8_code_comments",
+ "v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_current_cpu=\"$v8_current_cpu\"",
+ "v8_dict_property_const_tracking=$v8_dict_property_const_tracking",
+ "v8_disable_write_barriers=$v8_disable_write_barriers",
"v8_enable_atomic_object_field_writes=" +
"$v8_enable_atomic_object_field_writes",
+ "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
+ "v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
"v8_enable_conservative_stack_scanning=" +
"$v8_enable_conservative_stack_scanning",
- "v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
- "v8_enable_single_generation=$v8_enable_single_generation",
+ "v8_enable_debug_code=$v8_enable_debug_code",
+ "v8_enable_disassembler=$v8_enable_disassembler",
+ "v8_enable_gdbjit=$v8_enable_gdbjit",
"v8_enable_i18n_support=$v8_enable_i18n_support",
- "v8_enable_verify_predictable=$v8_enable_verify_predictable",
- "v8_enable_verify_csa=$v8_enable_verify_csa",
"v8_enable_lite_mode=$v8_enable_lite_mode",
- "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats",
+ "v8_enable_maglev=$v8_enable_maglev",
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
"v8_enable_pointer_compression_shared_cage=" +
"$v8_enable_pointer_compression_shared_cage",
+ "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats",
"v8_enable_sandbox=$v8_enable_sandbox",
"v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap",
+ "v8_enable_single_generation=$v8_enable_single_generation",
+ "v8_enable_slow_dchecks=$v8_enable_slow_dchecks",
"v8_enable_third_party_heap=$v8_enable_third_party_heap",
+ "v8_enable_turbofan=$v8_enable_turbofan",
+ "v8_enable_verify_csa=$v8_enable_verify_csa",
+ "v8_enable_verify_heap=$v8_enable_verify_heap",
+ "v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_enable_webassembly=$v8_enable_webassembly",
- "v8_dict_property_const_tracking=$v8_dict_property_const_tracking",
- "v8_control_flow_integrity=$v8_control_flow_integrity",
+ "v8_jitless=$v8_jitless",
"v8_target_cpu=\"$v8_target_cpu\"",
- "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
]
if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
@@ -2312,11 +2416,15 @@ action("v8_dump_build_config") {
#
v8_source_set("v8_snapshot") {
- visibility = [ ":*" ] # Targets in this file can depend on this.
+ # Let external targets depend on v8_snapshot.
+ if (v8_use_external_startup_data) {
+ visibility = [ ":*" ] # Targets in this file can depend on this.
+ }
deps = [
":v8_internal_headers",
":v8_libbase",
+ ":v8_tracing",
]
public_deps = [
# This should be public so downstream targets can declare the snapshot
@@ -2345,9 +2453,6 @@ v8_source_set("v8_snapshot") {
sources += [ "src/snapshot/snapshot-external.cc" ]
} else {
- # Also top-level visibility targets can depend on this.
- visibility += [ "//:gn_visibility" ]
-
public_deps += [ ":v8_maybe_icu" ]
sources += [ "$target_gen_dir/snapshot.cc" ]
@@ -2404,6 +2509,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-microtask-queue-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
+ "src/builtins/builtins-object-gen.h",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
@@ -2454,57 +2560,57 @@ v8_source_set("v8_initializers") {
if (v8_current_cpu == "x86") {
sources += [
- ### gcmole(arch:ia32) ###
+ ### gcmole(ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [
- ### gcmole(arch:x64) ###
+ ### gcmole(x64) ###
"src/builtins/x64/builtins-x64.cc",
]
} else if (v8_current_cpu == "arm") {
sources += [
- ### gcmole(arch:arm) ###
+ ### gcmole(arm) ###
"src/builtins/arm/builtins-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
sources += [
- ### gcmole(arch:arm64) ###
+ ### gcmole(arm64) ###
"src/builtins/arm64/builtins-arm64.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
- ### gcmole(arch:mips64el) ###
+ ### gcmole(mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
} else if (v8_current_cpu == "loong64") {
sources += [
- ### gcmole(arch:loong64) ###
+ ### gcmole(loong64) ###
"src/builtins/loong64/builtins-loong64.cc",
]
} else if (v8_current_cpu == "ppc") {
sources += [
- ### gcmole(arch:ppc) ###
+ ### gcmole(ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
]
} else if (v8_current_cpu == "ppc64") {
sources += [
- ### gcmole(arch:ppc64) ###
+ ### gcmole(ppc64) ###
"src/builtins/ppc/builtins-ppc.cc",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
- ### gcmole(arch:s390) ###
+ ### gcmole(s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "riscv64") {
sources += [
- ### gcmole(arch:riscv64) ###
+ ### gcmole(riscv64) ###
"src/builtins/riscv/builtins-riscv.cc",
]
} else if (v8_current_cpu == "riscv32") {
sources += [
- ### gcmole(arch:riscv32) ###
+ ### gcmole(riscv32) ###
"src/builtins/riscv/builtins-riscv.cc",
]
}
@@ -2775,6 +2881,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/interface-descriptors.h",
"src/codegen/label.h",
"src/codegen/machine-type.h",
+ "src/codegen/macro-assembler-base.h",
"src/codegen/macro-assembler-inl.h",
"src/codegen/macro-assembler.h",
"src/codegen/maglev-safepoint-table.h",
@@ -2795,7 +2902,6 @@ v8_header_set("v8_internal_headers") {
"src/codegen/source-position.h",
"src/codegen/tick-counter.h",
"src/codegen/tnode.h",
- "src/codegen/turbo-assembler.h",
"src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.h",
"src/common/checks.h",
@@ -2911,6 +3017,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/osr.h",
"src/compiler/per-isolate-compiler-cache.h",
"src/compiler/persistent-map.h",
+ "src/compiler/phase.h",
"src/compiler/pipeline-statistics.h",
"src/compiler/pipeline.h",
"src/compiler/processed-feedback.h",
@@ -2928,25 +3035,61 @@ v8_header_set("v8_internal_headers") {
"src/compiler/simplified-operator.h",
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.h",
+ "src/compiler/string-builder-optimizer.h",
+ "src/compiler/turbofan.h",
"src/compiler/turboshaft/assembler.h",
+ "src/compiler/turboshaft/assert-types-reducer.h",
+ "src/compiler/turboshaft/branch-elimination-reducer.h",
+ "src/compiler/turboshaft/build-graph-phase.h",
+ "src/compiler/turboshaft/builtin-call-descriptors.h",
+ "src/compiler/turboshaft/dead-code-elimination-phase.h",
+ "src/compiler/turboshaft/dead-code-elimination-reducer.h",
+ "src/compiler/turboshaft/decompression-optimization-phase.h",
"src/compiler/turboshaft/decompression-optimization.h",
+ "src/compiler/turboshaft/define-assembler-macros.inc",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/fast-hash.h",
"src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph-visualizer.h",
"src/compiler/turboshaft/graph.h",
+ "src/compiler/turboshaft/index.h",
+ "src/compiler/turboshaft/late-escape-analysis-reducer.h",
+ "src/compiler/turboshaft/late-optimization-phase.h",
+ "src/compiler/turboshaft/layered-hash-map.h",
+ "src/compiler/turboshaft/machine-lowering-phase.h",
+ "src/compiler/turboshaft/machine-lowering-reducer.h",
"src/compiler/turboshaft/machine-optimization-reducer.h",
+ "src/compiler/turboshaft/memory-optimization-reducer.h",
"src/compiler/turboshaft/operation-matching.h",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/optimization-phase.h",
+ "src/compiler/turboshaft/optimize-phase.h",
+ "src/compiler/turboshaft/phase.h",
+ "src/compiler/turboshaft/recreate-schedule-phase.h",
"src/compiler/turboshaft/recreate-schedule.h",
+ "src/compiler/turboshaft/reducer-traits.h",
"src/compiler/turboshaft/representations.h",
+ "src/compiler/turboshaft/runtime-call-descriptors.h",
"src/compiler/turboshaft/select-lowering-reducer.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/turboshaft/simplify-tf-loops.h",
"src/compiler/turboshaft/snapshot-table.h",
+ "src/compiler/turboshaft/tag-untag-lowering-phase.h",
+ "src/compiler/turboshaft/tag-untag-lowering-reducer.h",
+ "src/compiler/turboshaft/tracing.h",
+ "src/compiler/turboshaft/type-assertions-phase.h",
+ "src/compiler/turboshaft/type-inference-analysis.h",
+ "src/compiler/turboshaft/type-inference-reducer.h",
+ "src/compiler/turboshaft/type-parser.h",
+ "src/compiler/turboshaft/typed-optimizations-phase.h",
+ "src/compiler/turboshaft/typed-optimizations-reducer.h",
+ "src/compiler/turboshaft/typer.h",
+ "src/compiler/turboshaft/types.h",
+ "src/compiler/turboshaft/undef-assembler.macros.inc",
+ "src/compiler/turboshaft/uniform-reducer-adapater.h",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
+ "src/compiler/turboshaft/variable-reducer.h",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.h",
"src/compiler/typed-optimization.h",
@@ -3057,12 +3200,13 @@ v8_header_set("v8_internal_headers") {
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-marking-state.h",
"src/heap/cppgc-js/cpp-snapshot.h",
+ "src/heap/cppgc-js/cross-heap-remembered-set.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
- "src/heap/embedder-tracing-inl.h",
- "src/heap/embedder-tracing.h",
+ "src/heap/cppgc-js/wrappable-info-inl.h",
+ "src/heap/cppgc-js/wrappable-info.h",
"src/heap/evacuation-allocator-inl.h",
"src/heap/evacuation-allocator.h",
"src/heap/evacuation-verifier-inl.h",
@@ -3078,7 +3222,6 @@ v8_header_set("v8_internal_headers") {
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer-inl.h",
"src/heap/gc-tracer.h",
- "src/heap/global-handle-marking-visitor.h",
"src/heap/heap-allocator-inl.h",
"src/heap/heap-allocator.h",
"src/heap/heap-controller.h",
@@ -3118,8 +3261,10 @@ v8_header_set("v8_internal_headers") {
"src/heap/memory-measurement-inl.h",
"src/heap/memory-measurement.h",
"src/heap/memory-reducer.h",
+ "src/heap/minor-gc-job.h",
"src/heap/new-spaces-inl.h",
"src/heap/new-spaces.h",
+ "src/heap/object-lock.h",
"src/heap/object-stats.h",
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.h",
@@ -3136,15 +3281,13 @@ v8_header_set("v8_internal_headers") {
"src/heap/remembered-set-inl.h",
"src/heap/remembered-set.h",
"src/heap/safepoint.h",
- "src/heap/scavenge-job.h",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.h",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.h",
- "src/heap/stress-marking-observer.h",
- "src/heap/stress-scavenge-observer.h",
"src/heap/sweeper.h",
+ "src/heap/traced-handles-marking-visitor.h",
"src/heap/weak-object-worklists.h",
"src/ic/call-optimization.h",
"src/ic/handler-configuration-inl.h",
@@ -3281,6 +3424,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/js-function.h",
"src/objects/js-generator-inl.h",
"src/objects/js-generator.h",
+ "src/objects/js-iterator-helpers-inl.h",
+ "src/objects/js-iterator-helpers.h",
"src/objects/js-objects-inl.h",
"src/objects/js-objects.h",
"src/objects/js-promise-inl.h",
@@ -3406,6 +3551,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/transitions.h",
"src/objects/turbofan-types-inl.h",
"src/objects/turbofan-types.h",
+ "src/objects/turboshaft-types-inl.h",
+ "src/objects/turboshaft-types.h",
"src/objects/type-hints.h",
"src/objects/value-serializer.h",
"src/objects/visitors-inl.h",
@@ -3473,6 +3620,7 @@ v8_header_set("v8_internal_headers") {
"src/regexp/special-case.h",
"src/roots/roots-inl.h",
"src/roots/roots.h",
+ "src/roots/static-roots.h",
"src/runtime/runtime-utils.h",
"src/runtime/runtime.h",
"src/sandbox/bounded-size-inl.h",
@@ -3592,12 +3740,17 @@ v8_header_set("v8_internal_headers") {
"src/maglev/maglev-interpreter-frame-state.h",
"src/maglev/maglev-ir-inl.h",
"src/maglev/maglev-ir.h",
+ "src/maglev/maglev-phi-representation-selector.h",
"src/maglev/maglev-regalloc-data.h",
"src/maglev/maglev-regalloc.h",
"src/maglev/maglev-register-frame-array.h",
- "src/maglev/maglev-vreg-allocator.h",
"src/maglev/maglev.h",
]
+ if (v8_current_cpu == "arm64") {
+ sources += [ "src/maglev/arm64/maglev-assembler-arm64-inl.h" ]
+ } else if (v8_current_cpu == "x64") {
+ sources += [ "src/maglev/x64/maglev-assembler-x64-inl.h" ]
+ }
}
if (v8_enable_webassembly) {
@@ -3608,13 +3761,16 @@ v8_header_set("v8_internal_headers") {
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-types.h",
"src/compiler/int64-lowering.h",
+ "src/compiler/wasm-call-descriptors.h",
"src/compiler/wasm-compiler-definitions.h",
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-escape-analysis.h",
"src/compiler/wasm-gc-lowering.h",
"src/compiler/wasm-gc-operator-reducer.h",
"src/compiler/wasm-graph-assembler.h",
+ "src/compiler/wasm-inlining-into-js.h",
"src/compiler/wasm-inlining.h",
+ "src/compiler/wasm-load-elimination.h",
"src/compiler/wasm-loop-peeling.h",
"src/compiler/wasm-typer.h",
"src/debug/debug-wasm-objects-inl.h",
@@ -3680,11 +3836,15 @@ v8_header_set("v8_internal_headers") {
"src/wasm/wasm-subtyping.h",
"src/wasm/wasm-tier.h",
"src/wasm/wasm-value.h",
+ "src/wasm/well-known-imports.h",
]
}
if (v8_enable_wasm_simd256_revec) {
- sources += [ "src/compiler/linear-scheduler.h" ]
+ sources += [
+ "src/compiler/linear-scheduler.h",
+ "src/compiler/revectorizer.h",
+ ]
}
if (!v8_enable_third_party_heap) {
@@ -3720,8 +3880,6 @@ v8_header_set("v8_internal_headers") {
"src/objects/js-segment-iterator.h",
"src/objects/js-segmenter-inl.h",
"src/objects/js-segmenter.h",
- "src/objects/js-segments-inl.h",
- "src/objects/js-segments.h",
]
}
@@ -3733,13 +3891,6 @@ v8_header_set("v8_internal_headers") {
sources += [ "src/heap/conservative-stack-visitor.h" ]
}
- if (v8_enable_inner_pointer_resolution_osb) {
- sources += [
- "src/heap/object-start-bitmap-inl.h",
- "src/heap/object-start-bitmap.h",
- ]
- }
-
if (v8_enable_wasm_gdb_remote_debugging) {
sources += [
"src/debug/wasm/gdb-server/gdb-remote-util.h",
@@ -3758,7 +3909,8 @@ v8_header_set("v8_internal_headers") {
}
if (v8_current_cpu == "x86") {
- sources += [ ### gcmole(arch:ia32) ###
+ sources += [
+ ### gcmole(ia32) ###
"src/baseline/ia32/baseline-assembler-ia32-inl.h",
"src/baseline/ia32/baseline-compiler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32-inl.h",
@@ -3776,7 +3928,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
- sources += [ ### gcmole(arch:x64) ###
+ sources += [
+ ### gcmole(x64) ###
"src/baseline/x64/baseline-assembler-x64-inl.h",
"src/baseline/x64/baseline-compiler-x64-inl.h",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
@@ -3812,7 +3965,8 @@ v8_header_set("v8_internal_headers") {
}
}
} else if (v8_current_cpu == "arm") {
- sources += [ ### gcmole(arch:arm) ###
+ sources += [
+ ### gcmole(arm) ###
"src/baseline/arm/baseline-assembler-arm-inl.h",
"src/baseline/arm/baseline-compiler-arm-inl.h",
"src/codegen/arm/assembler-arm-inl.h",
@@ -3830,7 +3984,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
- sources += [ ### gcmole(arch:arm64) ###
+ sources += [
+ ### gcmole(arm64) ###
"src/baseline/arm64/baseline-assembler-arm64-inl.h",
"src/baseline/arm64/baseline-compiler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64-inl.h",
@@ -3859,7 +4014,7 @@ v8_header_set("v8_internal_headers") {
if (v8_enable_webassembly) {
# Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux
# and Mac.
- if ((current_cpu == "arm64" && is_mac) ||
+ if ((current_cpu == "arm64" && (is_mac || is_ios)) ||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [ "src/trap-handler/handler-inside-posix.h" ]
}
@@ -3872,7 +4027,8 @@ v8_header_set("v8_internal_headers") {
sources += [ "src/diagnostics/unwinding-info-win64.h" ]
}
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
- sources += [ ### gcmole(arch:mips64el) ###
+ sources += [
+ ### gcmole(mips64el) ###
"src/baseline/mips64/baseline-assembler-mips64-inl.h",
"src/baseline/mips64/baseline-compiler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64-inl.h",
@@ -3888,7 +4044,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "loong64") {
- sources += [ ### gcmole(arch:loong64) ###
+ sources += [
+ ### gcmole(loong64) ###
"src/baseline/loong64/baseline-assembler-loong64-inl.h",
"src/baseline/loong64/baseline-compiler-loong64-inl.h",
"src/codegen/loong64/assembler-loong64-inl.h",
@@ -3904,7 +4061,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/loong64/liftoff-assembler-loong64.h",
]
} else if (v8_current_cpu == "ppc") {
- sources += [ ### gcmole(arch:ppc) ###
+ sources += [
+ ### gcmole(ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.h",
@@ -3920,7 +4078,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "ppc64") {
- sources += [ ### gcmole(arch:ppc64) ###
+ sources += [
+ ### gcmole(ppc64) ###
"src/baseline/ppc/baseline-assembler-ppc-inl.h",
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
@@ -3938,7 +4097,8 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += [ ### gcmole(arch:s390) ###
+ sources += [
+ ### gcmole(s390) ###
"src/baseline/s390/baseline-assembler-s390-inl.h",
"src/baseline/s390/baseline-compiler-s390-inl.h",
"src/codegen/s390/assembler-s390-inl.h",
@@ -3956,16 +4116,15 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
} else if (v8_current_cpu == "riscv64") {
- sources += [ ### gcmole(arch:riscv64) ###
+ sources += [
+ ### gcmole(riscv64) ###
"src/baseline/riscv/baseline-assembler-riscv-inl.h",
"src/baseline/riscv/baseline-compiler-riscv-inl.h",
"src/codegen/riscv/assembler-riscv-inl.h",
- "src/codegen/riscv/assembler-riscv-inl.h",
"src/codegen/riscv/assembler-riscv.h",
"src/codegen/riscv/base-assembler-riscv.h",
"src/codegen/riscv/base-constants-riscv.h",
"src/codegen/riscv/base-riscv-i.h",
- "src/codegen/riscv/base-riscv-i.h",
"src/codegen/riscv/constant-riscv-a.h",
"src/codegen/riscv/constant-riscv-c.h",
"src/codegen/riscv/constant-riscv-d.h",
@@ -3978,7 +4137,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/riscv/extension-riscv-a.h",
"src/codegen/riscv/extension-riscv-c.h",
"src/codegen/riscv/extension-riscv-d.h",
- "src/codegen/riscv/extension-riscv-d.h",
+ "src/codegen/riscv/extension-riscv-f.h",
"src/codegen/riscv/extension-riscv-inl.h",
"src/codegen/riscv/extension-riscv-m.h",
"src/codegen/riscv/extension-riscv-v.h",
@@ -3995,11 +4154,12 @@ v8_header_set("v8_internal_headers") {
"src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
]
} else if (v8_current_cpu == "riscv32") {
- sources += [ ### gcmole(arch:riscv32) ###
+ sources += [
+ ### gcmole(riscv32) ###
"src/baseline/riscv/baseline-assembler-riscv-inl.h",
"src/baseline/riscv/baseline-compiler-riscv-inl.h",
+ "src/codegen/riscv/assembler-riscv-inl.h",
"src/codegen/riscv/assembler-riscv.h",
- "src/codegen/riscv/assembler-riscv32-inl.h",
"src/codegen/riscv/base-assembler-riscv.h",
"src/codegen/riscv/base-constants-riscv.h",
"src/codegen/riscv/base-riscv-i.h",
@@ -4157,6 +4317,8 @@ v8_compiler_sources = [
"src/compiler/simplified-operator.cc",
"src/compiler/state-values-utils.cc",
"src/compiler/store-store-elimination.cc",
+ "src/compiler/string-builder-optimizer.cc",
+ "src/compiler/turbofan-enabled.cc",
"src/compiler/type-cache.cc",
"src/compiler/type-narrowing-reducer.cc",
"src/compiler/typed-optimization.cc",
@@ -4165,30 +4327,123 @@ v8_compiler_sources = [
"src/compiler/value-numbering-reducer.cc",
"src/compiler/verifier.cc",
"src/compiler/zone-stats.cc",
- "src/utils/hex-format.cc",
- "src/utils/sha-256.cc",
]
+if (v8_current_cpu == "x86") {
+ v8_compiler_sources += [
+ ### gcmole(ia32) ###
+ "src/compiler/backend/ia32/code-generator-ia32.cc",
+ "src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
+ "src/compiler/backend/ia32/instruction-selector-ia32.cc",
+ ]
+} else if (v8_current_cpu == "x64") {
+ v8_compiler_sources += [
+ ### gcmole(x64) ###
+ "src/compiler/backend/x64/code-generator-x64.cc",
+ "src/compiler/backend/x64/instruction-scheduler-x64.cc",
+ "src/compiler/backend/x64/instruction-selector-x64.cc",
+ "src/compiler/backend/x64/unwinding-info-writer-x64.cc",
+ ]
+} else if (v8_current_cpu == "arm") {
+ v8_compiler_sources += [
+ ### gcmole(arm) ###
+ "src/compiler/backend/arm/code-generator-arm.cc",
+ "src/compiler/backend/arm/instruction-scheduler-arm.cc",
+ "src/compiler/backend/arm/instruction-selector-arm.cc",
+ "src/compiler/backend/arm/unwinding-info-writer-arm.cc",
+ ]
+} else if (v8_current_cpu == "arm64") {
+ v8_compiler_sources += [
+ ### gcmole(arm64) ###
+ "src/compiler/backend/arm64/code-generator-arm64.cc",
+ "src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
+ "src/compiler/backend/arm64/instruction-selector-arm64.cc",
+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
+ ]
+} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
+ v8_compiler_sources += [
+ ### gcmole(mips64el) ###
+ "src/compiler/backend/mips64/code-generator-mips64.cc",
+ "src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
+ "src/compiler/backend/mips64/instruction-selector-mips64.cc",
+ ]
+} else if (v8_current_cpu == "loong64") {
+ v8_compiler_sources += [
+ ### gcmole(loong64) ###
+ "src/compiler/backend/loong64/code-generator-loong64.cc",
+ "src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
+ "src/compiler/backend/loong64/instruction-selector-loong64.cc",
+ ]
+} else if (v8_current_cpu == "ppc") {
+ v8_compiler_sources += [
+ ### gcmole(ppc) ###
+ "src/compiler/backend/ppc/code-generator-ppc.cc",
+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/backend/ppc/instruction-selector-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
+ ]
+} else if (v8_current_cpu == "ppc64") {
+ v8_compiler_sources += [
+ ### gcmole(ppc64) ###
+ "src/compiler/backend/ppc/code-generator-ppc.cc",
+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/backend/ppc/instruction-selector-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
+ ]
+} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ v8_compiler_sources += [
+ ### gcmole(s390) ###
+ "src/compiler/backend/s390/code-generator-s390.cc",
+ "src/compiler/backend/s390/instruction-scheduler-s390.cc",
+ "src/compiler/backend/s390/instruction-selector-s390.cc",
+ "src/compiler/backend/s390/unwinding-info-writer-s390.cc",
+ ]
+} else if (v8_current_cpu == "riscv64") {
+ v8_compiler_sources += [
+ ### gcmole(riscv64) ###
+ "src/compiler/backend/riscv/code-generator-riscv.cc",
+ "src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
+ "src/compiler/backend/riscv/instruction-selector-riscv64.cc",
+ ]
+} else if (v8_current_cpu == "riscv32") {
+ v8_compiler_sources += [
+ ### gcmole(riscv32) ###
+ "src/compiler/backend/riscv/code-generator-riscv.cc",
+ "src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
+ "src/compiler/backend/riscv/instruction-selector-riscv32.cc",
+ ]
+}
+
if (v8_enable_webassembly) {
v8_compiler_sources += [
"src/compiler/int64-lowering.cc",
+ "src/compiler/wasm-call-descriptors.cc",
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-escape-analysis.cc",
"src/compiler/wasm-gc-lowering.cc",
"src/compiler/wasm-gc-operator-reducer.cc",
"src/compiler/wasm-graph-assembler.cc",
+ "src/compiler/wasm-inlining-into-js.cc",
"src/compiler/wasm-inlining.cc",
+ "src/compiler/wasm-load-elimination.cc",
"src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-typer.cc",
]
}
if (v8_enable_wasm_simd256_revec) {
- v8_compiler_sources += [ "src/compiler/linear-scheduler.cc" ]
+ v8_compiler_sources += [
+ "src/compiler/linear-scheduler.cc",
+ "src/compiler/revectorizer.cc",
+ ]
}
-# The src/compiler files with optimizations.
-v8_source_set("v8_compiler_opt") {
+# The src/compiler files for use in mksnapshot.
+# - These might be built with additional optimizations if
+# v8_enable_fast_mksnapshot is set.
+# - We always include Turbofan even if v8_enable_turbofan is unset s.t.
+# builtins can be generated by mksnapshot.
+v8_source_set("v8_compiler_for_mksnapshot_source_set") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = v8_compiler_sources
@@ -4219,11 +4474,16 @@ v8_source_set("v8_compiler_opt") {
}
}
-# The src/compiler files with default optimization behavior.
+# The src/compiler files with default behavior.
v8_source_set("v8_compiler") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- sources = v8_compiler_sources
+ if (v8_enable_turbofan) {
+ sources = v8_compiler_sources
+ } else {
+ # With Turbofan disabled, we only include the stubbed-out API.
+ sources = [ "src/compiler/turbofan-disabled.cc" ]
+ }
public_deps = [
":generate_bytecode_builtins_list",
@@ -4247,15 +4507,32 @@ v8_source_set("v8_turboshaft") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ "src/compiler/turboshaft/assembler.cc",
+ "src/compiler/turboshaft/build-graph-phase.cc",
+ "src/compiler/turboshaft/dead-code-elimination-phase.cc",
+ "src/compiler/turboshaft/decompression-optimization-phase.cc",
"src/compiler/turboshaft/decompression-optimization.cc",
"src/compiler/turboshaft/graph-builder.cc",
"src/compiler/turboshaft/graph-visualizer.cc",
"src/compiler/turboshaft/graph.cc",
+ "src/compiler/turboshaft/late-escape-analysis-reducer.cc",
+ "src/compiler/turboshaft/late-optimization-phase.cc",
+ "src/compiler/turboshaft/machine-lowering-phase.cc",
+ "src/compiler/turboshaft/memory-optimization-reducer.cc",
"src/compiler/turboshaft/operations.cc",
"src/compiler/turboshaft/optimization-phase.cc",
+ "src/compiler/turboshaft/optimize-phase.cc",
+ "src/compiler/turboshaft/phase.cc",
+ "src/compiler/turboshaft/recreate-schedule-phase.cc",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/representations.cc",
"src/compiler/turboshaft/simplify-tf-loops.cc",
+ "src/compiler/turboshaft/tag-untag-lowering-phase.cc",
+ "src/compiler/turboshaft/type-assertions-phase.cc",
+ "src/compiler/turboshaft/type-parser.cc",
+ "src/compiler/turboshaft/typed-optimizations-phase.cc",
+ "src/compiler/turboshaft/typer.cc",
+ "src/compiler/turboshaft/types.cc",
"src/compiler/turboshaft/utils.cc",
]
@@ -4277,8 +4554,14 @@ v8_source_set("v8_turboshaft") {
}
group("v8_compiler_for_mksnapshot") {
- if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
- deps = [ ":v8_compiler_opt" ]
+ if ((is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) ||
+ !v8_enable_turbofan) {
+ # mksnapshot needs its own version of the compiler, either because
+ # a) we're optimizing for mksnapshot execution speed and the compiler
+ # should be optimized even if the rest of V8 is not; or
+ # b) Turbofan is disabled and thus not compiled into the rest of V8, yet
+ # mksnapshot still needs TF to generate builtins.
+ deps = [ ":v8_compiler_for_mksnapshot_source_set" ]
} else {
deps = [ ":v8_compiler" ]
}
@@ -4297,7 +4580,11 @@ group("v8_tracing") {
}
v8_source_set("v8_base_without_compiler") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
+ # Only targets in this file and gcmole can depend on this.
+ visibility = [
+ ":*",
+ "tools/gcmole/:*",
+ ]
# Split static libraries on windows into two.
split_count = 2
@@ -4351,7 +4638,6 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins-trace.cc",
"src/builtins/builtins-typed-array.cc",
"src/builtins/builtins-weak-refs.cc",
- "src/builtins/builtins-web-snapshots.cc",
"src/builtins/builtins.cc",
"src/builtins/constants-table-builder.cc",
"src/codegen/aligned-slot-allocator.cc",
@@ -4371,6 +4657,7 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/handler-table.cc",
"src/codegen/interface-descriptors.cc",
"src/codegen/machine-type.cc",
+ "src/codegen/macro-assembler-base.cc",
"src/codegen/maglev-safepoint-table.cc",
"src/codegen/optimized-compilation-info.cc",
"src/codegen/pending-optimization-table.cc",
@@ -4381,10 +4668,10 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/source-position.cc",
"src/codegen/tick-counter.cc",
"src/codegen/tnode.cc",
- "src/codegen/turbo-assembler.cc",
"src/codegen/unoptimized-compilation-info.cc",
"src/common/assert-scope.cc",
"src/common/code-memory-access.cc",
+ "src/common/ptr-compr.cc",
"src/compiler-dispatcher/lazy-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/date/date.cc",
@@ -4460,10 +4747,10 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/concurrent-marking.cc",
"src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-snapshot.cc",
+ "src/heap/cppgc-js/cross-heap-remembered-set.cc",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
- "src/heap/embedder-tracing.cc",
"src/heap/evacuation-verifier.cc",
"src/heap/factory-base.cc",
"src/heap/factory.cc",
@@ -4471,7 +4758,6 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/free-list.cc",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-tracer.cc",
- "src/heap/global-handle-marking-visitor.cc",
"src/heap/heap-allocator.cc",
"src/heap/heap-controller.cc",
"src/heap/heap-layout-tracer.cc",
@@ -4494,6 +4780,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/memory-chunk.cc",
"src/heap/memory-measurement.cc",
"src/heap/memory-reducer.cc",
+ "src/heap/minor-gc-job.cc",
"src/heap/new-spaces.cc",
"src/heap/object-stats.cc",
"src/heap/objects-visiting.cc",
@@ -4502,13 +4789,12 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/read-only-heap.cc",
"src/heap/read-only-spaces.cc",
"src/heap/safepoint.cc",
- "src/heap/scavenge-job.cc",
"src/heap/scavenger.cc",
"src/heap/slot-set.cc",
"src/heap/spaces.cc",
- "src/heap/stress-marking-observer.cc",
"src/heap/stress-scavenge-observer.cc",
"src/heap/sweeper.cc",
+ "src/heap/traced-handles-marking-visitor.cc",
"src/heap/weak-object-worklists.cc",
"src/ic/call-optimization.cc",
"src/ic/handler-configuration.cc",
@@ -4584,6 +4870,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-segment-iterator.cc",
"src/objects/js-segmenter.cc",
"src/objects/js-segments.cc",
+ "src/objects/js-struct.cc",
"src/objects/js-temporal-objects.cc",
"src/objects/keys.cc",
"src/objects/literal-objects.cc",
@@ -4736,13 +5023,13 @@ v8_source_set("v8_base_without_compiler") {
"src/utils/allocation.cc",
"src/utils/bit-vector.cc",
"src/utils/detachable-vector.cc",
+ "src/utils/hex-format.cc",
"src/utils/identity-map.cc",
"src/utils/memcopy.cc",
"src/utils/ostreams.cc",
+ "src/utils/sha-256.cc",
"src/utils/utils.cc",
"src/utils/version.cc",
- "src/web-snapshot/web-snapshot.cc",
- "src/web-snapshot/web-snapshot.h",
"src/zone/accounting-allocator.cc",
"src/zone/type-stats.cc",
"src/zone/zone-segment.cc",
@@ -4765,13 +5052,26 @@ v8_source_set("v8_base_without_compiler") {
"src/maglev/maglev-graph-printer.cc",
"src/maglev/maglev-interpreter-frame-state.cc",
"src/maglev/maglev-ir.cc",
+ "src/maglev/maglev-phi-representation-selector.cc",
"src/maglev/maglev-regalloc.cc",
"src/maglev/maglev.cc",
]
+ if (v8_current_cpu == "arm64") {
+ sources += [
+ "src/maglev/arm64/maglev-assembler-arm64.cc",
+ "src/maglev/arm64/maglev-ir-arm64.cc",
+ ]
+ } else if (v8_current_cpu == "x64") {
+ sources += [
+ "src/maglev/x64/maglev-assembler-x64.cc",
+ "src/maglev/x64/maglev-ir-x64.cc",
+ ]
+ }
}
if (v8_enable_webassembly) {
- sources += [ ### gcmole(all) ###
+ sources += [
+ ### gcmole(all) ###
"src/asmjs/asm-js.cc",
"src/asmjs/asm-parser.cc",
"src/asmjs/asm-scanner.cc",
@@ -4822,6 +5122,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/wasm-result.cc",
"src/wasm/wasm-serialization.cc",
"src/wasm/wasm-subtyping.cc",
+ "src/wasm/well-known-imports.cc",
]
}
@@ -4853,14 +5154,12 @@ v8_source_set("v8_base_without_compiler") {
}
if (v8_current_cpu == "x86") {
- sources += [ ### gcmole(arch:ia32) ###
+ sources += [
+ ### gcmole(ia32) ###
"src/codegen/ia32/assembler-ia32.cc",
"src/codegen/ia32/cpu-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.cc",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
- "src/compiler/backend/ia32/code-generator-ia32.cc",
- "src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
- "src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
@@ -4868,15 +5167,12 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
]
} else if (v8_current_cpu == "x64") {
- sources += [ ### gcmole(arch:x64) ###
+ sources += [
+ ### gcmole(x64) ###
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/cpu-x64.cc",
"src/codegen/x64/macro-assembler-x64.cc",
- "src/compiler/backend/x64/code-generator-x64.cc",
- "src/compiler/backend/x64/instruction-scheduler-x64.cc",
- "src/compiler/backend/x64/instruction-selector-x64.cc",
- "src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
@@ -4906,15 +5202,12 @@ v8_source_set("v8_base_without_compiler") {
}
}
} else if (v8_current_cpu == "arm") {
- sources += [ ### gcmole(arch:arm) ###
+ sources += [
+ ### gcmole(arm) ###
"src/codegen/arm/assembler-arm.cc",
"src/codegen/arm/constants-arm.cc",
"src/codegen/arm/cpu-arm.cc",
"src/codegen/arm/macro-assembler-arm.cc",
- "src/compiler/backend/arm/code-generator-arm.cc",
- "src/compiler/backend/arm/instruction-scheduler-arm.cc",
- "src/compiler/backend/arm/instruction-selector-arm.cc",
- "src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
@@ -4924,7 +5217,8 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/arm/regexp-macro-assembler-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
- sources += [ ### gcmole(arch:arm64) ###
+ sources += [
+ ### gcmole(arm64) ###
"src/codegen/arm64/assembler-arm64.cc",
"src/codegen/arm64/cpu-arm64.cc",
"src/codegen/arm64/decoder-arm64.cc",
@@ -4933,10 +5227,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/arm64/macro-assembler-arm64.cc",
"src/codegen/arm64/register-arm64.cc",
"src/codegen/arm64/utils-arm64.cc",
- "src/compiler/backend/arm64/code-generator-arm64.cc",
- "src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
- "src/compiler/backend/arm64/instruction-selector-arm64.cc",
- "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/eh-frame-arm64.cc",
@@ -4950,7 +5240,7 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_webassembly) {
# Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux,
# Mac, and Windows.
- if ((current_cpu == "arm64" && is_mac) ||
+ if ((current_cpu == "arm64" && is_apple) ||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [
"src/trap-handler/handler-inside-posix.cc",
@@ -4971,15 +5261,13 @@ v8_source_set("v8_base_without_compiler") {
sources += [ "src/diagnostics/unwinding-info-win64.cc" ]
}
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
- sources += [ ### gcmole(arch:mips64el) ###
+ sources += [
+ ### gcmole(mips64el) ###
"src/codegen/mips64/assembler-mips64.cc",
"src/codegen/mips64/constants-mips64.cc",
"src/codegen/mips64/cpu-mips64.cc",
"src/codegen/mips64/interface-descriptors-mips64-inl.h",
"src/codegen/mips64/macro-assembler-mips64.cc",
- "src/compiler/backend/mips64/code-generator-mips64.cc",
- "src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
- "src/compiler/backend/mips64/instruction-selector-mips64.cc",
"src/deoptimizer/mips64/deoptimizer-mips64.cc",
"src/diagnostics/mips64/disasm-mips64.cc",
"src/diagnostics/mips64/unwinder-mips64.cc",
@@ -4988,15 +5276,13 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
]
} else if (v8_current_cpu == "loong64") {
- sources += [ ### gcmole(arch:loong64) ###
+ sources += [
+ ### gcmole(loong64) ###
"src/codegen/loong64/assembler-loong64.cc",
"src/codegen/loong64/constants-loong64.cc",
"src/codegen/loong64/cpu-loong64.cc",
"src/codegen/loong64/interface-descriptors-loong64-inl.h",
"src/codegen/loong64/macro-assembler-loong64.cc",
- "src/compiler/backend/loong64/code-generator-loong64.cc",
- "src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
- "src/compiler/backend/loong64/instruction-selector-loong64.cc",
"src/deoptimizer/loong64/deoptimizer-loong64.cc",
"src/diagnostics/loong64/disasm-loong64.cc",
"src/diagnostics/loong64/unwinder-loong64.cc",
@@ -5005,15 +5291,12 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/loong64/regexp-macro-assembler-loong64.cc",
]
} else if (v8_current_cpu == "ppc") {
- sources += [ ### gcmole(arch:ppc) ###
+ sources += [
+ ### gcmole(ppc) ###
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
- "src/compiler/backend/ppc/code-generator-ppc.cc",
- "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
- "src/compiler/backend/ppc/instruction-selector-ppc.cc",
- "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@@ -5023,15 +5306,12 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
]
} else if (v8_current_cpu == "ppc64") {
- sources += [ ### gcmole(arch:ppc64) ###
+ sources += [
+ ### gcmole(ppc64) ###
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
- "src/compiler/backend/ppc/code-generator-ppc.cc",
- "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
- "src/compiler/backend/ppc/instruction-selector-ppc.cc",
- "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@@ -5041,15 +5321,12 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += [ ### gcmole(arch:s390) ###
+ sources += [
+ ### gcmole(s390) ###
"src/codegen/s390/assembler-s390.cc",
"src/codegen/s390/constants-s390.cc",
"src/codegen/s390/cpu-s390.cc",
"src/codegen/s390/macro-assembler-s390.cc",
- "src/compiler/backend/s390/code-generator-s390.cc",
- "src/compiler/backend/s390/instruction-scheduler-s390.cc",
- "src/compiler/backend/s390/instruction-selector-s390.cc",
- "src/compiler/backend/s390/unwinding-info-writer-s390.cc",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
@@ -5059,7 +5336,8 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/s390/regexp-macro-assembler-s390.cc",
]
} else if (v8_current_cpu == "riscv64") {
- sources += [ ### gcmole(arch:riscv64) ###
+ sources += [
+ ### gcmole(riscv64) ###
"src/codegen/riscv/assembler-riscv.cc",
"src/codegen/riscv/base-assembler-riscv.cc",
"src/codegen/riscv/base-constants-riscv.cc",
@@ -5074,9 +5352,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/riscv/extension-riscv-zicsr.cc",
"src/codegen/riscv/extension-riscv-zifencei.cc",
"src/codegen/riscv/macro-assembler-riscv.cc",
- "src/compiler/backend/riscv/code-generator-riscv.cc",
- "src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
- "src/compiler/backend/riscv/instruction-selector-riscv64.cc",
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
"src/diagnostics/riscv/disasm-riscv.cc",
"src/diagnostics/riscv/unwinder-riscv.cc",
@@ -5085,7 +5360,8 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/riscv/regexp-macro-assembler-riscv.cc",
]
} else if (v8_current_cpu == "riscv32") {
- sources += [ ### gcmole(arch:riscv32) ###
+ sources += [
+ ### gcmole(riscv32) ###
"src/codegen/riscv/assembler-riscv.cc",
"src/codegen/riscv/base-assembler-riscv.cc",
"src/codegen/riscv/base-constants-riscv.cc",
@@ -5100,9 +5376,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/riscv/extension-riscv-zicsr.cc",
"src/codegen/riscv/extension-riscv-zifencei.cc",
"src/codegen/riscv/macro-assembler-riscv.cc",
- "src/compiler/backend/riscv/code-generator-riscv.cc",
- "src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
- "src/compiler/backend/riscv/instruction-selector-riscv32.cc",
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
"src/diagnostics/riscv/disasm-riscv.cc",
"src/diagnostics/riscv/unwinder-riscv.cc",
@@ -5152,7 +5425,7 @@ v8_source_set("v8_base_without_compiler") {
deps += [ ":run_gen-regexp-special-case" ]
sources += [ "$target_gen_dir/src/regexp/special-case.cc" ]
if (is_win) {
- deps += [ "//third_party/icu:icudata" ]
+ deps += [ "$v8_icu_path:icudata" ]
}
} else {
sources -= [
@@ -5178,8 +5451,8 @@ v8_source_set("v8_base_without_compiler") {
if (v8_use_zlib) {
deps += [
- "//third_party/zlib",
- "//third_party/zlib/google:compression_utils_portable",
+ "$v8_zlib_path",
+ "$v8_zlib_path/google:compression_utils_portable",
]
}
@@ -5223,8 +5496,11 @@ group("v8_base") {
public_deps = [
":v8_base_without_compiler",
":v8_compiler",
- ":v8_turboshaft",
]
+
+ if (v8_enable_turbofan) {
+ public_deps += [ ":v8_turboshaft" ]
+ }
}
v8_source_set("torque_base") {
@@ -5240,7 +5516,6 @@ v8_source_set("torque_base") {
"src/torque/cfg.h",
"src/torque/class-debug-reader-generator.cc",
"src/torque/constants.h",
- "src/torque/contextual.h",
"src/torque/cpp-builder.cc",
"src/torque/cpp-builder.h",
"src/torque/csa-generator.cc",
@@ -5370,6 +5645,7 @@ v8_component("v8_libbase") {
"src/base/build_config.h",
"src/base/compiler-specific.h",
"src/base/container-utils.h",
+ "src/base/contextual.h",
"src/base/cpu.cc",
"src/base/cpu.h",
"src/base/debug/stack_trace.cc",
@@ -5427,6 +5703,7 @@ v8_component("v8_libbase") {
"src/base/platform/memory.h",
"src/base/platform/mutex.cc",
"src/base/platform/mutex.h",
+ "src/base/platform/platform.cc",
"src/base/platform/platform.h",
"src/base/platform/semaphore.cc",
"src/base/platform/semaphore.h",
@@ -5528,7 +5805,6 @@ v8_component("v8_libbase") {
sources += [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-darwin.cc",
- "src/base/platform/platform-macos.cc",
]
} else {
sources += [
@@ -5556,12 +5832,12 @@ v8_component("v8_libbase") {
sources += [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-darwin.cc",
- "src/base/platform/platform-macos.cc",
]
} else if (is_ios) {
sources += [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-darwin.cc",
+ "src/base/platform/platform-ios.cc",
]
} else if (is_win) {
# TODO(infra): Add support for cygwin.
@@ -5612,9 +5888,35 @@ v8_component("v8_libbase") {
[ "//build/config/clang:llvm-symbolizer_data($host_toolchain)" ]
}
+ if (v8_use_libm_trig_functions) {
+ deps += [ ":libm" ]
+ }
+
# TODO(infra): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
}
+if (v8_use_libm_trig_functions) {
+ source_set("libm") {
+ sources = [
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.c",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.h",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h",
+ "third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h",
+ ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ if (!is_debug) {
+ # Build code using -O3, see: crbug.com/1084371.
+ configs += [ "//build/config/compiler:optimize_speed" ]
+ }
+ }
+}
+
v8_component("v8_libplatform") {
sources = [
"//base/trace_event/common/trace_event_common.h",
@@ -5764,31 +6066,31 @@ v8_source_set("v8_heap_base") {
if (is_clang || !is_win) {
if (current_cpu == "x64") {
- sources += [ "src/heap/base/asm/x64/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/x64/push_registers_asm.cc" ]
} else if (current_cpu == "x86") {
- sources += [ "src/heap/base/asm/ia32/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/ia32/push_registers_asm.cc" ]
} else if (current_cpu == "arm") {
- sources += [ "src/heap/base/asm/arm/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/arm/push_registers_asm.cc" ]
} else if (current_cpu == "arm64") {
- sources += [ "src/heap/base/asm/arm64/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/arm64/push_registers_asm.cc" ]
} else if (current_cpu == "ppc64") {
- sources += [ "src/heap/base/asm/ppc/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ]
} else if (current_cpu == "s390x") {
- sources += [ "src/heap/base/asm/s390/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
- sources += [ "src/heap/base/asm/mips64/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
} else if (current_cpu == "loong64") {
- sources += [ "src/heap/base/asm/loong64/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
} else if (current_cpu == "riscv64" || current_cpu == "riscv32") {
- sources += [ "src/heap/base/asm/riscv/save_registers_asm.cc" ]
+ sources += [ "src/heap/base/asm/riscv/push_registers_asm.cc" ]
}
} else if (is_win) {
if (current_cpu == "x64") {
- sources += [ "src/heap/base/asm/x64/save_registers_masm.asm" ]
+ sources += [ "src/heap/base/asm/x64/push_registers_masm.asm" ]
} else if (current_cpu == "x86") {
- sources += [ "src/heap/base/asm/ia32/save_registers_masm.asm" ]
+ sources += [ "src/heap/base/asm/ia32/push_registers_masm.asm" ]
} else if (current_cpu == "arm64") {
- sources += [ "src/heap/base/asm/arm64/save_registers_masm.S" ]
+ sources += [ "src/heap/base/asm/arm64/push_registers_masm.S" ]
}
}
@@ -6033,11 +6335,12 @@ if (v8_check_header_includes) {
#
if (v8_monolithic) {
- # A component build is not monolithic.
- assert(!is_component_build)
+ assert(!is_component_build,
+ "Set `is_component_build = false` for v8_monolithic.")
# Using external startup data would produce separate files.
- assert(!v8_use_external_startup_data)
+ assert(!v8_use_external_startup_data,
+ "Set `v8_use_external_startup_data = false` for v8_monolithic.")
v8_static_library("v8_monolith") {
deps = [
":v8",
@@ -6124,6 +6427,8 @@ if (current_toolchain == v8_snapshot_toolchain) {
"src/snapshot/embedded/platform-embedded-file-writer-win.h",
"src/snapshot/mksnapshot.cc",
"src/snapshot/snapshot-empty.cc",
+ "src/snapshot/static-roots-gen.cc",
+ "src/snapshot/static-roots-gen.h",
]
if (v8_control_flow_integrity) {
@@ -6222,7 +6527,7 @@ if (v8_enable_i18n_support) {
":v8_libbase",
":v8_shared_internal_headers",
"//build/win:default_exe_manifest",
- "//third_party/icu",
+ v8_icu_path,
]
configs = [ ":internal_config" ]
@@ -6402,10 +6707,14 @@ if (is_component_build) {
":torque_ls_base",
":v8_base",
":v8_headers",
- ":v8_initializers",
":v8_snapshot",
]
+ if (v8_enable_turbofan) {
+ # For cctest/test-serialize.
+ public_deps += [ ":v8_initializers" ]
+ }
+
configs = [ ":internal_config" ]
public_configs = [ ":external_config" ]
@@ -6458,10 +6767,14 @@ if (is_component_build) {
":torque_base",
":torque_ls_base",
":v8_base",
- ":v8_initializers",
":v8_snapshot",
]
+ if (v8_enable_turbofan) {
+ # For cctest/test-serialize.
+ public_deps += [ ":v8_initializers" ]
+ }
+
public_configs = [ ":external_config" ]
}
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index ab0a9bf543..05b32b571c 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -30,8 +30,18 @@ vars = {
'checkout_instrumented_libraries': False,
'checkout_ittapi': False,
+
+ # Fetch the prebuilt binaries for llvm-cov and llvm-profdata. Needed to
+ # process the raw profiles produced by instrumented targets (built with
+ # the gn arg 'use_clang_coverage').
+ 'checkout_clang_coverage_tools': False,
+
# Fetch clang-tidy into the same bin/ directory as our clang binary.
'checkout_clang_tidy': False,
+
+ # Fetch and build V8 builtins with PGO profiles
+ 'checkout_v8_builtins_pgo_profiles': False,
+
'chromium_url': 'https://chromium.googlesource.com',
'android_url': 'https://android.googlesource.com',
'download_gcmole': False,
@@ -43,22 +53,22 @@ vars = {
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
# reclient CIPD package version
- 'reclient_version': 're_client_version:0.83.0.da55f4f-gomaip',
+ 'reclient_version': 're_client_version:0.99.0.3f95625-gomaip',
# GN CIPD package version.
- 'gn_version': 'git_revision:a4d67be044b42963de801001e7146f9657c7fad4',
+ 'gn_version': 'git_revision:41fef642de70ecdcaaa26be96d56a0398f95abd4',
# ninja CIPD package version
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
- 'ninja_version': 'version:2@1.8.2.chromium.3',
+ 'ninja_version': 'version:2@1.11.1.chromium.6',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:f8f64a8c560d2bf68a3ad1137979d17cffb36d30',
+ 'luci_go': 'git_revision:320bf3ed60cd4d24549d0ea9ee3a94394f2665ce',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
- 'fuchsia_version': 'version:10.20221109.1.1',
+ 'fuchsia_version': 'version:12.20230322.3.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -91,18 +101,18 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_cmdline-tools_version': 'IPzAG-uU5zVMxohpg9-7-N0tQC1TCSW1VbrBFw7Ld04C',
+ 'android_sdk_cmdline-tools_version': '3Yn5Sn7BMObm8gsoZCF0loJMKg9_PpgU07G9DObCLdQC',
}
deps = {
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '147f65333c38ddd1ebf554e89965c243c8ce50b3',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '875cb19167f2e0d7b1eca89a4d5b5693421424c6',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '9e9a4341dd24e68cba0f228567a6edbaff1c665b',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '49ac7cf34ab2e59a10629a7a722cfb94348c4996',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0a6c69640f1841d9109eac70a25af310d4c1d8c7',
'buildtools/clang_format/script':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef',
'buildtools/linux64': {
'packages': [
{
@@ -124,11 +134,11 @@ deps = {
'condition': 'host_os == "mac"',
},
'buildtools/third_party/libc++/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '4218f3525ad438b22b0e173d963515a09d143398',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'e44019bfac2b2d3ebe1618628884f85c8600e322',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '1a32724f721e1c3b6c590a07fe4a954344f15e48',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '9643f2cf13d6935a84a30b7da7de53327733e190',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a318d6a4c283a9d342d2a1e20292c1496fe12997',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '29a6dda8c6588ba4abeafdb21be531e757983e31',
'buildtools/win': {
'packages': [
{
@@ -147,14 +157,14 @@ deps = {
}
],
'dep_type': 'cipd',
- 'condition': '(host_os == "linux" or host_os == "mac" or host_os == "win") and host_cpu != "s390" and host_cpu != "ppc"',
+ 'condition': '(host_os == "linux" or host_os == "mac" or host_os == "win") and host_cpu != "s390" and host_cpu != "ppc" and host_cpu != "arm64"',
},
'test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ade328d530525333751e8a3b58f02e18624da085',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd216cc197269fc41eb6eca14710529c3d6650535',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
'condition': 'checkout_android',
@@ -202,15 +212,15 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'f0b11967c94cba8f7cca91d2da20c98d4420fc25',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '018d397758e54d6a6d3b6ddf28a1784664d63f83',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
- 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
+ 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '3de9f013df4b470069d03d250224062e8cf15c49',
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ae1a70891738fb14f64fbb884e00b87ac663aa15',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '479e1e9055020c8d1351bf2194d0a606aeca93d5',
'third_party/fuchsia-sdk/sdk': {
'packages': [
{
@@ -227,9 +237,9 @@ deps = {
'third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'da07448619763d1cde255b361324242646f5b268',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1e49ac26ddc712b1ab702f69023cbc57e9ae6628',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '7bb87a375ffc3effd17a50f690099dcfb9ee280b',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0f536d22dbed454b1254c7e6d7130eab28fba1fa',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
@@ -237,7 +247,7 @@ deps = {
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/jinja2':
- Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '4633bf431193690c3491244f5a0acbe9ac776233',
+ Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '264c07d7e64f2874434a3b8039e101ddf1b01e7e',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
'third_party/logdog/logdog':
@@ -255,7 +265,7 @@ deps = {
'condition': 'host_cpu != "s390" and host_cpu != "ppc"'
},
'third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + '0eba417b2c72264fa825dc21067b9adc9b8adf70',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + '0d180f46481a96cbe8340734fa5cdce3bba636c8',
'third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3',
'third_party/requests': {
@@ -263,9 +273,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '8bbd6c3129b5146489f2321f054e855c347857f4',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '5edb52d4302d7aef232d585ec9ae27ef5c3c5438',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'd3df9cc5362e0af4cda798b0612dde39783b3dc0',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '7a7207a7f2eb0f7f5c9f304a51077a2fd504b3ed',
'tools/luci-go': {
'packages': [
{
@@ -499,7 +509,7 @@ hooks = [
'--arch=x64'],
},
{
- 'name': 'msan_chained_origins',
+ 'name': 'msan_chained_origins_focal',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
@@ -507,11 +517,11 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
- '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins.tgz.sha1',
+ '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-focal.tgz.sha1',
],
},
{
- 'name': 'msan_no_origins',
+ 'name': 'msan_no_origins_focal',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
@@ -519,7 +529,7 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
- '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins.tgz.sha1',
+ '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1',
],
},
{
@@ -558,6 +568,14 @@ hooks = [
'action': ['python3', 'tools/clang/scripts/update.py'],
},
{
+ # This is supposed to support the same set of platforms as 'clang' above.
+ 'name': 'clang_coverage',
+ 'pattern': '.',
+ 'condition': 'checkout_clang_coverage_tools',
+ 'action': ['python3', 'tools/clang/scripts/update.py',
+ '--package=coverage_tools'],
+ },
+ {
'name': 'clang_tidy',
'pattern': '.',
'condition': 'checkout_clang_tidy',
@@ -607,6 +625,16 @@ hooks = [
],
},
{
+ 'name': 'checkout_v8_builtins_pgo_profiles',
+ 'pattern': '.',
+ 'condition': 'checkout_v8_builtins_pgo_profiles',
+ 'action': [
+ 'python3',
+ 'tools/builtins-pgo/download_profiles.py',
+ 'download',
+ ],
+ },
+ {
# Clean up build dirs for crbug.com/1337238.
# After a libc++ roll and revert, .ninja_deps would get into a state
# that breaks Ninja on Windows.
diff --git a/deps/v8/PPC_OWNERS b/deps/v8/PPC_OWNERS
index 02c2cd757c..6edd45a6ef 100644
--- a/deps/v8/PPC_OWNERS
+++ b/deps/v8/PPC_OWNERS
@@ -2,4 +2,3 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 03db4c126f..5c31d4dfab 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -85,6 +85,7 @@ def _V8PresubmitChecks(input_api, output_api):
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from v8_presubmit import CppLintProcessor
+ from v8_presubmit import GCMoleProcessor
from v8_presubmit import JSLintProcessor
from v8_presubmit import TorqueLintProcessor
from v8_presubmit import SourceProcessor
@@ -126,6 +127,9 @@ def _V8PresubmitChecks(input_api, output_api):
if not StatusFilesProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=True)):
results.append(output_api.PresubmitError("Status file check failed"))
+ if not GCMoleProcessor().RunOnFiles(
+ input_api.AffectedFiles(include_deletes=False)):
+ results.append(output_api.PresubmitError("GCMole pattern check failed"))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api, bot_allowlist=[
'v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com'
@@ -257,8 +261,9 @@ def _CheckHeadersHaveIncludeGuards(input_api, output_api):
files_to_check=(file_inclusion_pattern, ),
files_to_skip=files_to_skip)
- leading_src_pattern = input_api.re.compile(r'^src/')
- dash_dot_slash_pattern = input_api.re.compile(r'[-./]')
+ leading_src_pattern = input_api.re.compile(r'^src[\\\/]')
+ dash_dot_slash_pattern = input_api.re.compile(r'[-.\\\/]')
+
def PathToGuardMacro(path):
"""Guards should be of the form V8_PATH_TO_FILE_WITHOUT_SRC_H_."""
x = input_api.re.sub(leading_src_pattern, 'v8_', path)
diff --git a/deps/v8/S390_OWNERS b/deps/v8/S390_OWNERS
index 02c2cd757c..6edd45a6ef 100644
--- a/deps/v8/S390_OWNERS
+++ b/deps/v8/S390_OWNERS
@@ -2,4 +2,3 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index a078c5568e..d853a0de2a 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -101,11 +101,16 @@
'|test/unittests/heap/cppgc/' \
'|test/unittests/heap/cppgc-js/',
},
+ 'trap-handler': {
+ 'filepath': 'src/trap-handler/',
+ },
+ 'tests': {
+ 'filepath': 'test/',
+ },
},
'WATCHLISTS': {
'maglev': [
- 'jgruber+watch@chromium.org',
'leszeks+watch@chromium.org',
'verwaest+watch@chromium.org',
'victorgomes+watch@chromium.org',
@@ -122,6 +127,7 @@
],
'feature_shipping_status': [
'hablich@chromium.org',
+ 'saelo+watch@chromium.org',
],
'heap_changes': [
'hpayer@chromium.org',
@@ -168,5 +174,14 @@
'cppgc': [
'oilpan-reviews+v8@chromium.org',
],
+ 'trap-handler': [
+ 'ahaas@chromium.org',
+ 'clemensb@chromium.org',
+ 'mark@chromium.org',
+ 'mseaborn@chromium.org',
+ ],
+ 'tests': [
+ 'almuthanna+watch@chromium.org',
+ ],
},
}
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index f07f19b309..f2ee4d36b3 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -208,9 +208,6 @@
#include "base/time/time.h"
#include "build/build_config.h"
-// Export Perfetto symbols in the same way as //base symbols.
-#define PERFETTO_COMPONENT_EXPORT BASE_EXPORT
-
// Enable legacy trace event macros (e.g., TRACE_EVENT{0,1,2}).
#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 1
@@ -224,11 +221,6 @@
// variable a unique name based on the line number to prevent name collisions.
#define INTERNAL_TRACE_EVENT_UID(name_prefix) PERFETTO_UID(name_prefix)
-// Special trace event macro to trace log messages.
-// TODO(skyostil): Convert this into a regular typed trace event.
-#define TRACE_LOG_MESSAGE(file, message, line) \
- INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
-
// Declare debug annotation converters for base time types, so they can be
// passed as trace event arguments.
// TODO(skyostil): Serialize timestamps using perfetto::TracedValue instead.
@@ -250,7 +242,8 @@ WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation, ::base::Time);
} // namespace perfetto
// Pull in the tracing macro definitions from Perfetto.
-#include "third_party/perfetto/include/perfetto/tracing.h"
+#include "third_party/perfetto/include/perfetto/tracing/track_event.h"
+#include "third_party/perfetto/include/perfetto/tracing/track_event_legacy.h"
namespace perfetto {
namespace legacy {
@@ -983,10 +976,6 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-// Special trace event macro to trace log messages.
-#define TRACE_LOG_MESSAGE(file, message, line) \
- INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
-
// TRACE_EVENT_METADATA* events are information related to other
// injected events, not events in their own right.
#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel
index 448260de88..67454fa90e 100644
--- a/deps/v8/bazel/config/BUILD.bazel
+++ b/deps/v8/bazel/config/BUILD.bazel
@@ -23,12 +23,27 @@ config_setting(
)
config_setting(
+ name = "is_opt",
+ values = {
+ "compilation_mode": "opt",
+ },
+)
+
+config_setting(
name = "is_debug",
values = {
"compilation_mode": "dbg",
},
)
+selects.config_setting_group(
+ name = "is_opt_android",
+ match_all = [
+ ":is_opt",
+ ":is_android",
+ ],
+)
+
config_setting(
name = "platform_cpu_x64",
constraint_values = ["@platforms//cpu:x86_64"],
diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl
index 77e86ef6f2..d4ff96887e 100644
--- a/deps/v8/bazel/defs.bzl
+++ b/deps/v8/bazel/defs.bzl
@@ -2,7 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-FlagInfo = provider(fields = ["value"])
+"""
+This module contains helper functions to compile V8.
+"""
+
+FlagInfo = provider("The value of an option.",
+fields = ["value"])
def _options_impl(ctx):
return FlagInfo(value = ctx.build_setting_value)
@@ -151,6 +156,13 @@ def _default_args():
"-fno-integrated-as",
],
"//conditions:default": [],
+ }) + select({
+ "@v8//bazel/config:is_opt_android": [
+ "-fvisibility=hidden",
+ "-fvisibility-inlines-hidden",
+ ],
+ "//conditions:default": [
+ ],
}),
includes = ["include"],
linkopts = select({
@@ -175,29 +187,33 @@ ENABLE_I18N_SUPPORT_DEFINES = [
"-DUNISTR_FROM_CHAR_EXPLICIT=",
]
-def _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps):
- return noicu_srcs != [] or noicu_deps != [] or icu_srcs != [] or icu_deps != []
+def _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, noicu_defines, icu_srcs, icu_deps, icu_defines):
+ return noicu_srcs != [] or noicu_deps != [] or noicu_defines != [] or icu_srcs != [] or icu_deps != [] or icu_defines != []
# buildifier: disable=function-docstring
def v8_binary(
name,
srcs,
deps = [],
+ defines = [],
includes = [],
copts = [],
linkopts = [],
noicu_srcs = [],
noicu_deps = [],
+ noicu_defines = [],
icu_srcs = [],
icu_deps = [],
+ icu_defines = [],
**kwargs):
default = _default_args()
- if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps):
+ if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, noicu_defines, icu_srcs, icu_deps, icu_defines):
native.cc_binary(
name = "noicu/" + name,
srcs = srcs + noicu_srcs,
deps = deps + noicu_deps + default.deps,
- includes = includes + default.includes,
+ defines = defines + noicu_defines + default.defines,
+ includes = includes + ["noicu/"] + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
**kwargs
@@ -206,7 +222,8 @@ def v8_binary(
name = "icu/" + name,
srcs = srcs + icu_srcs,
deps = deps + icu_deps + default.deps,
- includes = includes + default.includes,
+ includes = includes + ["icu/"] + default.includes,
+ defines = defines + icu_defines + default.defines,
copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES,
linkopts = linkopts + default.linkopts,
**kwargs
@@ -216,6 +233,7 @@ def v8_binary(
name = name,
srcs = srcs,
deps = deps + default.deps,
+ defines = defines + default.defines,
includes = includes + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
@@ -232,16 +250,18 @@ def v8_library(
linkopts = [],
noicu_srcs = [],
noicu_deps = [],
+ noicu_defines = [],
icu_srcs = [],
icu_deps = [],
+ icu_defines = [],
**kwargs):
default = _default_args()
- if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps):
+ if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, noicu_defines, icu_srcs, icu_deps, icu_defines):
native.cc_library(
name = name + "_noicu",
srcs = srcs + noicu_srcs,
deps = deps + noicu_deps + default.deps,
- includes = includes + default.includes,
+ includes = includes + ["noicu/"] + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
alwayslink = 1,
@@ -260,7 +280,7 @@ def v8_library(
name = name + "_icu",
srcs = srcs + icu_srcs,
deps = deps + icu_deps + default.deps,
- includes = includes + default.includes,
+ includes = includes + ["icu/"] + default.includes,
copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES,
linkopts = linkopts + default.linkopts,
alwayslink = 1,
@@ -288,7 +308,7 @@ def v8_library(
**kwargs
)
-def _torque_impl(ctx):
+def _torque_initializers_impl(ctx):
if ctx.workspace_name == "v8":
v8root = "."
else:
@@ -309,7 +329,7 @@ def _torque_impl(ctx):
# Generate/declare output files
outs = []
for src in ctx.files.srcs:
- root, period, ext = src.path.rpartition(".")
+ root, _period, _ext = src.path.rpartition(".")
# Strip v8root
if root[:len(v8root)] == v8root:
@@ -317,6 +337,84 @@ def _torque_impl(ctx):
file = ctx.attr.prefix + "/torque-generated/" + root
outs.append(ctx.actions.declare_file(file + "-tq-csa.cc"))
outs.append(ctx.actions.declare_file(file + "-tq-csa.h"))
+ outs += [ctx.actions.declare_file(ctx.attr.prefix + "/torque-generated/" + f) for f in ctx.attr.extras]
+ ctx.actions.run(
+ outputs = outs,
+ inputs = ctx.files.srcs,
+ arguments = args,
+ executable = ctx.executable.tool,
+ mnemonic = "GenTorqueInitializers",
+ progress_message = "Generating Torque initializers",
+ )
+ return [DefaultInfo(files = depset(outs))]
+
+_v8_torque_initializers = rule(
+ implementation = _torque_initializers_impl,
+ # cfg = v8_target_cpu_transition,
+ attrs = {
+ "prefix": attr.string(mandatory = True),
+ "srcs": attr.label_list(allow_files = True, mandatory = True),
+ "extras": attr.string_list(),
+ "tool": attr.label(
+ allow_files = True,
+ executable = True,
+ cfg = "exec",
+ ),
+ "args": attr.string_list(),
+ },
+)
+
+def v8_torque_initializers(name, noicu_srcs, icu_srcs, args, extras):
+ _v8_torque_initializers(
+ name = "noicu/" + name,
+ prefix = "noicu",
+ srcs = noicu_srcs,
+ args = args,
+ extras = extras,
+ tool = select({
+ "@v8//bazel/config:v8_target_is_32_bits": ":noicu/torque_non_pointer_compression",
+ "//conditions:default": ":noicu/torque",
+ }),
+ )
+ _v8_torque_initializers(
+ name = "icu/" + name,
+ prefix = "icu",
+ srcs = icu_srcs,
+ args = args,
+ extras = extras,
+ tool = select({
+ "@v8//bazel/config:v8_target_is_32_bits": ":icu/torque_non_pointer_compression",
+ "//conditions:default": ":icu/torque",
+ }),
+ )
+
+def _torque_definitions_impl(ctx):
+ if ctx.workspace_name == "v8":
+ v8root = "."
+ else:
+ v8root = "external/v8"
+
+ # Arguments
+ args = []
+ args += ctx.attr.args
+ args.append("-o")
+ args.append(ctx.bin_dir.path + "/" + v8root + "/" + ctx.attr.prefix + "/torque-generated")
+ args.append("-strip-v8-root")
+ args.append("-v8-root")
+ args.append(v8root)
+
+ # Sources
+ args += [f.path for f in ctx.files.srcs]
+
+ # Generate/declare output files
+ outs = []
+ for src in ctx.files.srcs:
+ root, _period, _ext = src.path.rpartition(".")
+
+ # Strip v8root
+ if root[:len(v8root)] == v8root:
+ root = root[len(v8root):]
+ file = ctx.attr.prefix + "/torque-generated/" + root
outs.append(ctx.actions.declare_file(file + "-tq-inl.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.cc"))
@@ -326,13 +424,13 @@ def _torque_impl(ctx):
inputs = ctx.files.srcs,
arguments = args,
executable = ctx.executable.tool,
- mnemonic = "GenTorque",
- progress_message = "Generating Torque files",
+ mnemonic = "GenTorqueDefinitions",
+ progress_message = "Generating Torque definitions",
)
return [DefaultInfo(files = depset(outs))]
-_v8_torque = rule(
- implementation = _torque_impl,
+_v8_torque_definitions = rule(
+ implementation = _torque_definitions_impl,
# cfg = v8_target_cpu_transition,
attrs = {
"prefix": attr.string(mandatory = True),
@@ -347,31 +445,33 @@ _v8_torque = rule(
},
)
-def v8_torque(name, noicu_srcs, icu_srcs, args, extras):
- _v8_torque(
+def v8_torque_definitions(name, noicu_srcs, icu_srcs, args, extras):
+ _v8_torque_definitions(
name = "noicu/" + name,
prefix = "noicu",
srcs = noicu_srcs,
args = args,
extras = extras,
tool = select({
- "@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
- "//conditions:default": ":torque",
+ "@v8//bazel/config:v8_target_is_32_bits": ":noicu/torque_non_pointer_compression",
+ "//conditions:default": ":noicu/torque",
}),
)
- _v8_torque(
+ _v8_torque_definitions(
name = "icu/" + name,
prefix = "icu",
srcs = icu_srcs,
args = args,
extras = extras,
tool = select({
- "@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
- "//conditions:default": ":torque",
+ "@v8//bazel/config:v8_target_is_32_bits": ":icu/torque_non_pointer_compression",
+ "//conditions:default": ":icu/torque",
}),
)
-def _v8_target_cpu_transition_impl(settings, attr):
+def _v8_target_cpu_transition_impl(settings,
+ attr, # @unused
+ ):
# Check for an existing v8_target_cpu flag.
if "@v8//bazel/config:v8_target_cpu" in settings:
if settings["@v8//bazel/config:v8_target_cpu"] != "none":
@@ -499,10 +599,10 @@ def build_config_content(cpu, icu):
("is_asan", "false"),
("is_cfi", "false"),
("is_clang", "true"),
+ ("is_clang_coverage", "false"),
("is_component_build", "false"),
("is_debug", "false"),
("is_full_debug", "false"),
- ("is_gcov_coverage", "false"),
("is_msan", "false"),
("is_tsan", "false"),
("is_ubsan_vptr", "false"),
@@ -525,7 +625,18 @@ def build_config_content(cpu, icu):
("v8_enable_single_generation", "false"),
("v8_enable_sandbox", "false"),
("v8_enable_shared_ro_heap", "false"),
+ ("v8_disable_write_barriers", "false"),
("v8_target_cpu", cpu),
+ ("v8_code_comments", "false"),
+ ("v8_enable_debug_code", "false"),
+ ("v8_enable_verify_heap", "false"),
+ ("v8_enable_slow_dchecks", "false"),
+ ("v8_enable_maglev", "false"),
+ ("v8_enable_turbofan", "true"),
+ ("v8_enable_disassembler", "false"),
+ ("is_DEBUG_defined", "false"),
+ ("v8_enable_gdbjit", "false"),
+ ("v8_jitless", "false"),
])
# TODO(victorgomes): Create a rule (instead of a macro), that can
diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni
index 4e0c284efa..64abd3e92a 100644
--- a/deps/v8/build_overrides/build.gni
+++ b/deps/v8/build_overrides/build.gni
@@ -32,6 +32,9 @@ use_perfetto_client_library = false
# Some non-Chromium builds don't support building java targets.
enable_java_templates = false
+# Enables assertions on safety checks in libc++.
+enable_safe_libcxx = true
+
# Allows different projects to specify their own suppressions files.
asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"
diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index 99de816372..c7faa8ca58 100644
--- a/deps/v8/gni/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
@@ -104,6 +104,10 @@ if (v8_snapshot_toolchain == "") {
# cross compile Windows arm64 with host toolchain.
v8_snapshot_toolchain = host_toolchain
}
+ } else if (host_cpu == "arm64" && current_cpu == "arm64" &&
+ host_os == "mac") {
+ # cross compile iOS arm64 with host_toolchain
+ v8_snapshot_toolchain = host_toolchain
}
}
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 3f093597fa..d38f376b94 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -2,17 +2,15 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/compiler/pgo/pgo.gni")
import("//build/config/gclient_args.gni")
+import("//build/config/ios/config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
import("release_branch_toggle.gni")
import("split_static_library.gni")
declare_args() {
- # Set flags for tracking code coverage. Uses gcov with gcc and sanitizer
- # coverage with clang.
- v8_code_coverage = false
-
# Includes files needed for correctness fuzzing.
v8_correctness_fuzzer = false
@@ -36,6 +34,9 @@ declare_args() {
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
+ # Includes profiles to optimize builtins.
+ v8_enable_builtins_optimization = ""
+
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
v8_enable_i18n_support = true
@@ -63,10 +64,19 @@ declare_args() {
# Sets -DV8_LITE_MODE.
v8_enable_lite_mode = false
+ # Enable the Turbofan compiler.
+ # Sets -dV8_ENABLE_TURBOFAN.
+ v8_enable_turbofan = ""
+
+ # Enable the Maglev compiler.
+ # Sets -dV8_ENABLE_MAGLEV
+ v8_enable_maglev = ""
+
# Include support for WebAssembly. If disabled, the 'WebAssembly' global
# will not be available, and embedder APIs to generate WebAssembly modules
# will fail. Also, asm.js will not be translated to WebAssembly and will be
# executed as standard JavaScript instead.
+ # Sets -dV8_ENABLE_WEBASSEMBLY.
v8_enable_webassembly = ""
# Enable 256-bit long vector re-vectorization pass in WASM compilation pipeline.
@@ -81,12 +91,6 @@ declare_args() {
# Scan the call stack conservatively during garbage collection.
v8_enable_conservative_stack_scanning = false
- # Use the object start bitmap for inner pointer resolution.
- v8_enable_inner_pointer_resolution_osb = false
-
- # Use the marking bitmap for inner pointer resolution.
- v8_enable_inner_pointer_resolution_mb = false
-
v8_enable_google_benchmark = false
cppgc_is_standalone = false
@@ -97,6 +101,12 @@ declare_args() {
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
+ # Enables a slim write barrier that only performs a single check in the fast
+ # path and delegates all further checks to a slow path call. This is fast
+ # in a setting with few slow-path checks, i.e., with disabled young generation
+ # GC.
+ cppgc_enable_slim_write_barrier = true
+
# Enable pointer compression in cppgc.
cppgc_enable_pointer_compression = false
@@ -107,6 +117,20 @@ declare_args() {
# Enable advanced BigInt algorithms, costing about 10-30 KB binary size
# depending on platform. Disabled on Android to save binary size.
v8_advanced_bigint_algorithms = !is_android
+
+ # TODO: macros for determining endian type are clang specific.
+ v8_use_libm_trig_functions = is_clang
+
+ # iOS device does not support executable code pages. Not we
+ # use target_os == "ios" here because it isn't equivalent
+ # to is_ios (is_ios is based on host_os).
+ target_is_ios_device = target_os == "ios" && target_environment == "device"
+
+ # Location of icu.
+ v8_icu_path = "//third_party/icu"
+
+ # Location of zlib.
+ v8_zlib_path = "//third_party/zlib"
}
if (v8_use_external_startup_data == "") {
@@ -125,13 +149,36 @@ if (build_with_chromium && use_perfetto_client_library) {
v8_use_perfetto = true
}
+# Includes profiles to optimize builtins if
+# * it is a Chromium build, and
+# * Chromium builds with optimization.
+# If no profiles are downloaded during gclient runhooks, optimization fails
+# silently.
+if (v8_enable_builtins_optimization == "") {
+ v8_enable_builtins_optimization = build_with_chromium && chrome_pgo_phase == 2
+}
+
+# TODO(jgruber): Move v8_jitless from BUILD.gn here as these
+# all depend on each other and really should be derived from
+# v8_jitless.
# WebAssembly is enabled by default, except in lite mode.
if (v8_enable_webassembly == "") {
- v8_enable_webassembly = !v8_enable_lite_mode
+ # iOS (non-simulator) does not have executable pages for 3rd party
+ # applications yet so disable webassembly.
+ v8_enable_webassembly = !v8_enable_lite_mode && !target_is_ios_device
}
assert(!(v8_enable_webassembly && v8_enable_lite_mode),
"Webassembly is not available in lite mode.")
+# Turbofan is enabled by default, except in lite mode.
+if (v8_enable_turbofan == "") {
+ # iOS (non-simulator) does not have executable pages for 3rd party
+ # applications yet so disable turbofan.
+ v8_enable_turbofan = !v8_enable_lite_mode && !target_is_ios_device
+}
+assert(v8_enable_turbofan || !v8_enable_webassembly,
+ "Webassembly is not available when Turbofan is disabled.")
+
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
@@ -172,13 +219,6 @@ if (!is_debug) {
]
}
-if (v8_code_coverage && !is_clang) {
- v8_add_configs += [
- v8_path_prefix + ":v8_gcov_coverage_cflags",
- v8_path_prefix + ":v8_gcov_coverage_ldflags",
- ]
-}
-
if (v8_symbol_level != symbol_level) {
v8_remove_configs += [ "//build/config/compiler:default_symbols" ]
if (v8_symbol_level == 0) {
@@ -260,23 +300,13 @@ template("v8_executable") {
# For enabling ASLR.
ldflags = [ "-pie" ]
}
- if (defined(testonly) && testonly && v8_code_coverage) {
- # Only add code coverage cflags for non-test files for performance
- # reasons.
- if (is_clang) {
- configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
- configs +=
- [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
- } else {
- configs -= [ v8_path_prefix + ":v8_gcov_coverage_cflags" ]
- }
- }
deps += [ v8_path_prefix + ":v8_dump_build_config" ]
}
}
template("v8_component") {
component(target_name) {
+ output_name = target_name
forward_variables_from(invoker,
"*",
[
diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h
index 1fa28afa13..a5f8bac0b1 100644
--- a/deps/v8/include/cppgc/cross-thread-persistent.h
+++ b/deps/v8/include/cppgc/cross-thread-persistent.h
@@ -148,10 +148,11 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicCrossThreadPersistent(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
- MemberCheckingPolicy>
+ MemberCheckingPolicy, MemberStorageType>
member,
const SourceLocation& loc = SourceLocation::Current())
: BasicCrossThreadPersistent(member.Get(), loc) {}
@@ -230,10 +231,11 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
// Assignment from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicCrossThreadPersistent& operator=(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
- MemberCheckingPolicy>
+ MemberCheckingPolicy, MemberStorageType>
member) {
return operator=(member.Get());
}
diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h
index 35c59ed1ad..eb7fdaee8c 100644
--- a/deps/v8/include/cppgc/heap-consistency.h
+++ b/deps/v8/include/cppgc/heap-consistency.h
@@ -62,10 +62,10 @@ class HeapConsistency final {
* \returns whether a write barrier is needed and which barrier to invoke.
*/
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
+ typename CheckingPolicy, typename StorageType>
static V8_INLINE WriteBarrierType GetWriteBarrierType(
const internal::BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& value,
+ CheckingPolicy, StorageType>& value,
WriteBarrierParams& params) {
return internal::WriteBarrier::GetWriteBarrierType(
value.GetRawSlot(), value.GetRawStorage(), params);
diff --git a/deps/v8/include/cppgc/internal/api-constants.h b/deps/v8/include/cppgc/internal/api-constants.h
index 023426e94b..453ab88b46 100644
--- a/deps/v8/include/cppgc/internal/api-constants.h
+++ b/deps/v8/include/cppgc/internal/api-constants.h
@@ -32,7 +32,7 @@ static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1};
static constexpr size_t kPageSize = size_t{1} << 17;
-#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
+#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_DARWIN)
constexpr size_t kGuardPageSize = 0;
#else
constexpr size_t kGuardPageSize = 4096;
@@ -57,6 +57,9 @@ static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
// Granularity of heap allocations.
constexpr size_t kAllocationGranularity = sizeof(void*);
+// Default cacheline size.
+constexpr size_t kCachelineSize = 64;
+
} // namespace api_constants
} // namespace internal
diff --git a/deps/v8/include/cppgc/internal/gc-info.h b/deps/v8/include/cppgc/internal/gc-info.h
index e8f90fed57..08ffd411a8 100644
--- a/deps/v8/include/cppgc/internal/gc-info.h
+++ b/deps/v8/include/cppgc/internal/gc-info.h
@@ -10,6 +10,7 @@
#include <type_traits>
#include "cppgc/internal/finalizer-trait.h"
+#include "cppgc/internal/logging.h"
#include "cppgc/internal/name-trait.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -20,12 +21,12 @@ namespace internal {
using GCInfoIndex = uint16_t;
struct V8_EXPORT EnsureGCInfoIndexTrait final {
- // Acquires a new GC info object and returns the index. In addition, also
- // updates `registered_index` atomically.
+ // Acquires a new GC info object and updates `registered_index` with the index
+ // that identifies that new info accordingly.
template <typename T>
- V8_INLINE static GCInfoIndex EnsureIndex(
+ V8_INLINE static void EnsureIndex(
std::atomic<GCInfoIndex>& registered_index) {
- return EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
+ EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
}
private:
@@ -34,38 +35,32 @@ struct V8_EXPORT EnsureGCInfoIndexTrait final {
bool = NameTrait<T>::HasNonHiddenName()>
struct EnsureGCInfoIndexTraitDispatch;
- static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback,
- FinalizationCallback,
- NameCallback);
- static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback,
- FinalizationCallback);
- static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback, NameCallback);
- static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback);
- static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback,
- FinalizationCallback,
- NameCallback);
- static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback,
- FinalizationCallback);
- static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback,
- NameCallback);
- static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
- TraceCallback);
+ static void V8_PRESERVE_MOST
+ EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback,
+ FinalizationCallback, NameCallback);
+ static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>&, TraceCallback, FinalizationCallback);
+ static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>&, TraceCallback, NameCallback);
+ static void V8_PRESERVE_MOST
+ EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback);
+ static void V8_PRESERVE_MOST
+ EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback,
+ FinalizationCallback, NameCallback);
+ static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>&, TraceCallback, FinalizationCallback);
+ static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>&, TraceCallback, NameCallback);
+ static void V8_PRESERVE_MOST
+ EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback);
};
#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
template <typename T> \
struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
- V8_INLINE GCInfoIndex \
- operator()(std::atomic<GCInfoIndex>& registered_index) { \
- return function; \
+ V8_INLINE void operator()(std::atomic<GCInfoIndex>& registered_index) { \
+ function; \
} \
};
@@ -143,9 +138,16 @@ struct GCInfoTrait final {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
- const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
- return index ? index
- : EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
+ GCInfoIndex index = registered_index.load(std::memory_order_acquire);
+ if (V8_UNLIKELY(!index)) {
+ EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
+ // Slow path call uses V8_PRESERVE_MOST which does not support return
+ // values (also preserves RAX). Avoid out parameter by just reloading the
+ // value here which at this point is guaranteed to be set.
+ index = registered_index.load(std::memory_order_acquire);
+ CPPGC_DCHECK(index != 0);
+ }
+ return index;
}
};
diff --git a/deps/v8/include/cppgc/internal/member-storage.h b/deps/v8/include/cppgc/internal/member-storage.h
index 0eb6382070..3dfafc4b08 100644
--- a/deps/v8/include/cppgc/internal/member-storage.h
+++ b/deps/v8/include/cppgc/internal/member-storage.h
@@ -17,6 +17,11 @@
namespace cppgc {
namespace internal {
+enum class WriteBarrierSlotType {
+ kCompressed,
+ kUncompressed,
+};
+
#if defined(CPPGC_POINTER_COMPRESSION)
#if defined(__clang__)
@@ -30,16 +35,16 @@ namespace internal {
#define CPPGC_REQUIRE_CONSTANT_INIT
#endif // defined(__clang__)
-class CageBaseGlobal final {
+class V8_EXPORT CageBaseGlobal final {
public:
V8_INLINE CPPGC_CONST static uintptr_t Get() {
CPPGC_DCHECK(IsBaseConsistent());
- return g_base_;
+ return g_base_.base;
}
V8_INLINE CPPGC_CONST static bool IsSet() {
CPPGC_DCHECK(IsBaseConsistent());
- return (g_base_ & ~kLowerHalfWordMask) != 0;
+ return (g_base_.base & ~kLowerHalfWordMask) != 0;
}
private:
@@ -47,12 +52,15 @@ class CageBaseGlobal final {
static constexpr uintptr_t kLowerHalfWordMask =
(api_constants::kCagedHeapReservationAlignment - 1);
- static V8_EXPORT uintptr_t g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
+ static union alignas(api_constants::kCachelineSize) Base {
+ uintptr_t base;
+ char cache_line[api_constants::kCachelineSize];
+ } g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
CageBaseGlobal() = delete;
V8_INLINE static bool IsBaseConsistent() {
- return kLowerHalfWordMask == (g_base_ & kLowerHalfWordMask);
+ return kLowerHalfWordMask == (g_base_.base & kLowerHalfWordMask);
}
friend class CageBaseGlobalUpdater;
@@ -64,6 +72,8 @@ class CageBaseGlobal final {
class V8_TRIVIAL_ABI CompressedPointer final {
public:
using IntegralType = uint32_t;
+ static constexpr auto kWriteBarrierSlotType =
+ WriteBarrierSlotType::kCompressed;
V8_INLINE CompressedPointer() : value_(0u) {}
V8_INLINE explicit CompressedPointer(const void* ptr)
@@ -173,6 +183,8 @@ class V8_TRIVIAL_ABI CompressedPointer final {
class V8_TRIVIAL_ABI RawPointer final {
public:
using IntegralType = uintptr_t;
+ static constexpr auto kWriteBarrierSlotType =
+ WriteBarrierSlotType::kUncompressed;
V8_INLINE RawPointer() : ptr_(nullptr) {}
V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
@@ -225,9 +237,9 @@ class V8_TRIVIAL_ABI RawPointer final {
};
#if defined(CPPGC_POINTER_COMPRESSION)
-using MemberStorage = CompressedPointer;
+using DefaultMemberStorage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
-using MemberStorage = RawPointer;
+using DefaultMemberStorage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
} // namespace internal
diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h
index 8455b3df81..06fa884f49 100644
--- a/deps/v8/include/cppgc/internal/pointer-policies.h
+++ b/deps/v8/include/cppgc/internal/pointer-policies.h
@@ -33,20 +33,53 @@ struct DijkstraWriteBarrierPolicy {
// barrier doesn't break the tri-color invariant.
}
+ template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
+#ifdef CPPGC_SLIM_WRITE_BARRIER
+ if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
+ WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
+#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, value, params);
WriteBarrier(type, params, slot, value);
+#endif // !CPPGC_SLIM_WRITE_BARRIER
}
+ template <WriteBarrierSlotType SlotType>
+ V8_INLINE static void AssigningBarrier(const void* slot, RawPointer storage) {
+ static_assert(
+ SlotType == WriteBarrierSlotType::kUncompressed,
+ "Assigning storages of Member and UncompressedMember is not supported");
+#ifdef CPPGC_SLIM_WRITE_BARRIER
+ if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
+ WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
+#else // !CPPGC_SLIM_WRITE_BARRIER
+ WriteBarrier::Params params;
+ const WriteBarrier::Type type =
+ WriteBarrier::GetWriteBarrierType(slot, storage, params);
+ WriteBarrier(type, params, slot, storage.Load());
+#endif // !CPPGC_SLIM_WRITE_BARRIER
+ }
+
+#if defined(CPPGC_POINTER_COMPRESSION)
+ template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot,
- MemberStorage storage) {
+ CompressedPointer storage) {
+ static_assert(
+ SlotType == WriteBarrierSlotType::kCompressed,
+ "Assigning storages of Member and UncompressedMember is not supported");
+#ifdef CPPGC_SLIM_WRITE_BARRIER
+ if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
+ WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
+#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, storage, params);
WriteBarrier(type, params, slot, storage.Load());
+#endif // !CPPGC_SLIM_WRITE_BARRIER
}
+#endif // defined(CPPGC_POINTER_COMPRESSION)
private:
V8_INLINE static void WriteBarrier(WriteBarrier::Type type,
@@ -68,7 +101,9 @@ struct DijkstraWriteBarrierPolicy {
struct NoWriteBarrierPolicy {
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
+ template <WriteBarrierSlotType>
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
+ template <WriteBarrierSlotType, typename MemberStorage>
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
};
@@ -197,7 +232,8 @@ template <typename T, typename WeaknessPolicy,
typename CheckingPolicy = DefaultPersistentCheckingPolicy>
class BasicPersistent;
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy = DefaultMemberCheckingPolicy>
+ typename CheckingPolicy = DefaultMemberCheckingPolicy,
+ typename StorageType = DefaultMemberStorage>
class BasicMember;
} // namespace internal
diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h
index 37bc5c973e..566724d30a 100644
--- a/deps/v8/include/cppgc/internal/write-barrier.h
+++ b/deps/v8/include/cppgc/internal/write-barrier.h
@@ -70,6 +70,7 @@ class V8_EXPORT WriteBarrier final {
static V8_INLINE Type GetWriteBarrierType(const void* slot, const void* value,
Params& params);
// Returns the required write barrier for a given `slot` and `value`.
+ template <typename MemberStorage>
static V8_INLINE Type GetWriteBarrierType(const void* slot, MemberStorage,
Params& params);
// Returns the required write barrier for a given `slot`.
@@ -79,6 +80,15 @@ class V8_EXPORT WriteBarrier final {
// Returns the required write barrier for a given `value`.
static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params);
+#ifdef CPPGC_SLIM_WRITE_BARRIER
+ // A write barrier that combines `GenerationalBarrier()` and
+ // `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber
+ // as few registers as possible.
+ template <WriteBarrierSlotType>
+ static V8_NOINLINE void V8_PRESERVE_MOST
+ CombinedWriteBarrierSlow(const void* slot);
+#endif // CPPGC_SLIM_WRITE_BARRIER
+
static V8_INLINE void DijkstraMarkingBarrier(const Params& params,
const void* object);
static V8_INLINE void DijkstraMarkingBarrierRange(
@@ -163,7 +173,8 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
- template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
+ template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback,
+ typename MemberStorage>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
@@ -207,7 +218,7 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
template <>
struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kValuePresent> {
- template <typename HeapHandleCallback>
+ template <typename HeapHandleCallback, typename MemberStorage>
static V8_INLINE WriteBarrier::Type Get(const void* slot,
MemberStorage storage,
WriteBarrier::Params& params,
@@ -305,11 +316,9 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
- static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
+ static V8_INLINE WriteBarrier::Type Get(const void* slot, RawPointer value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
- // `MemberStorage` will always be `RawPointer` for non-caged heap builds.
- // Just convert to `void*` in this case.
return ValueModeDispatch<value_mode>::Get(slot, value.Load(), params,
callback);
}
@@ -383,6 +392,7 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
}
// static
+template <typename MemberStorage>
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, MemberStorage value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(slot, value,
diff --git a/deps/v8/include/cppgc/macros.h b/deps/v8/include/cppgc/macros.h
index 030f397e3d..a9ac22d7af 100644
--- a/deps/v8/include/cppgc/macros.h
+++ b/deps/v8/include/cppgc/macros.h
@@ -11,7 +11,10 @@
namespace cppgc {
-// Use if the object is only stack allocated.
+// Use CPPGC_STACK_ALLOCATED if the object is only stack allocated.
+// Add the CPPGC_STACK_ALLOCATED_IGNORE annotation on a case-by-case basis when
+// enforcement of CPPGC_STACK_ALLOCATED should be suppressed.
+#if defined(__clang__)
#define CPPGC_STACK_ALLOCATED() \
public: \
using IsStackAllocatedTypeMarker CPPGC_UNUSED = int; \
@@ -20,6 +23,12 @@ namespace cppgc {
void* operator new(size_t) = delete; \
void* operator new(size_t, void*) = delete; \
static_assert(true, "Force semicolon.")
+#define CPPGC_STACK_ALLOCATED_IGNORE(bug_or_reason) \
+ __attribute__((annotate("stack_allocated_ignore")))
+#else // !defined(__clang__)
+#define CPPGC_STACK_ALLOCATED() static_assert(true, "Force semicolon.")
+#define CPPGC_STACK_ALLOCATED_IGNORE(bug_or_reason)
+#endif // !defined(__clang__)
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h
index 9bc383634f..b6382a0235 100644
--- a/deps/v8/include/cppgc/member.h
+++ b/deps/v8/include/cppgc/member.h
@@ -28,13 +28,11 @@ namespace internal {
// MemberBase always refers to the object as const object and defers to
// BasicMember on casting to the right type as needed.
+template <typename StorageType>
class V8_TRIVIAL_ABI MemberBase {
public:
-#if defined(CPPGC_POINTER_COMPRESSION)
- using RawStorage = CompressedPointer;
-#else // !defined(CPPGC_POINTER_COMPRESSION)
- using RawStorage = RawPointer;
-#endif // !defined(CPPGC_POINTER_COMPRESSION)
+ using RawStorage = StorageType;
+
protected:
struct AtomicInitializerTag {};
@@ -75,16 +73,19 @@ class V8_TRIVIAL_ABI MemberBase {
// The basic class from which all Member classes are 'generated'.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
+ typename CheckingPolicy, typename StorageType>
+class V8_TRIVIAL_ABI BasicMember final : private MemberBase<StorageType>,
private CheckingPolicy {
+ using Base = MemberBase<StorageType>;
+
public:
using PointeeType = T;
+ using RawStorage = typename Base::RawStorage;
V8_INLINE constexpr BasicMember() = default;
- V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT
- V8_INLINE BasicMember(SentinelPointer s) : MemberBase(s) {} // NOLINT
- V8_INLINE BasicMember(T* raw) : MemberBase(raw) { // NOLINT
+ V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT
+ V8_INLINE BasicMember(SentinelPointer s) : Base(s) {} // NOLINT
+ V8_INLINE BasicMember(T* raw) : Base(raw) { // NOLINT
InitializingWriteBarrier(raw);
this->CheckPointer(Get());
}
@@ -94,13 +95,13 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
// Atomic ctor. Using the AtomicInitializerTag forces BasicMember to
// initialize using atomic assignments. This is required for preventing
// data races with concurrent marking.
- using AtomicInitializerTag = MemberBase::AtomicInitializerTag;
+ using AtomicInitializerTag = typename Base::AtomicInitializerTag;
V8_INLINE BasicMember(std::nullptr_t, AtomicInitializerTag atomic)
- : MemberBase(nullptr, atomic) {}
+ : Base(nullptr, atomic) {}
V8_INLINE BasicMember(SentinelPointer s, AtomicInitializerTag atomic)
- : MemberBase(s, atomic) {}
+ : Base(s, atomic) {}
V8_INLINE BasicMember(T* raw, AtomicInitializerTag atomic)
- : MemberBase(raw, atomic) {
+ : Base(raw, atomic) {
InitializingWriteBarrier(raw);
this->CheckPointer(Get());
}
@@ -119,7 +120,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
std::enable_if_t<internal::IsDecayedSameV<T, U>>* = nullptr>
V8_INLINE BasicMember( // NOLINT
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>& other)
+ OtherCheckingPolicy, StorageType>& other)
: BasicMember(other.GetRawStorage()) {}
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
@@ -127,7 +128,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
std::enable_if_t<internal::IsStrictlyBaseOfV<T, U>>* = nullptr>
V8_INLINE BasicMember( // NOLINT
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>& other)
+ OtherCheckingPolicy, StorageType>& other)
: BasicMember(other.Get()) {}
// Move ctor.
@@ -142,8 +143,9 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
std::enable_if_t<internal::IsDecayedSameV<T, U>>* = nullptr>
- V8_INLINE BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>&& other) noexcept
+ V8_INLINE BasicMember(
+ BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
+ StorageType>&& other) noexcept
: BasicMember(other.GetRawStorage()) {
other.Clear();
}
@@ -151,8 +153,9 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
std::enable_if_t<internal::IsStrictlyBaseOfV<T, U>>* = nullptr>
- V8_INLINE BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>&& other) noexcept
+ V8_INLINE BasicMember(
+ BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
+ StorageType>&& other) noexcept
: BasicMember(other.Get()) {
other.Clear();
}
@@ -179,7 +182,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
typename OtherCheckingPolicy>
V8_INLINE BasicMember& operator=(
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>& other) {
+ OtherCheckingPolicy, StorageType>& other) {
if constexpr (internal::IsDecayedSameV<T, U>) {
return operator=(other.GetRawStorage());
} else {
@@ -201,8 +204,8 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherWeaknessTag, typename OtherBarrierPolicy,
typename OtherCheckingPolicy>
V8_INLINE BasicMember& operator=(
- BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>&& other) noexcept {
+ BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
+ StorageType>&& other) noexcept {
if constexpr (internal::IsDecayedSameV<T, U>) {
operator=(other.GetRawStorage());
} else {
@@ -226,7 +229,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
}
V8_INLINE BasicMember& operator=(T* other) {
- SetRawAtomic(other);
+ Base::SetRawAtomic(other);
AssigningWriteBarrier(other);
this->CheckPointer(Get());
return *this;
@@ -237,20 +240,20 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
return *this;
}
V8_INLINE BasicMember& operator=(SentinelPointer s) {
- SetRawAtomic(s);
+ Base::SetRawAtomic(s);
return *this;
}
template <typename OtherWeaknessTag, typename OtherBarrierPolicy,
typename OtherCheckingPolicy>
V8_INLINE void Swap(BasicMember<T, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>& other) {
+ OtherCheckingPolicy, StorageType>& other) {
auto tmp = GetRawStorage();
*this = other;
other = tmp;
}
- V8_INLINE explicit operator bool() const { return !IsCleared(); }
+ V8_INLINE explicit operator bool() const { return !Base::IsCleared(); }
V8_INLINE operator T*() const { return Get(); }
V8_INLINE T* operator->() const { return Get(); }
V8_INLINE T& operator*() const { return *Get(); }
@@ -264,10 +267,12 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
// The const_cast below removes the constness from MemberBase storage. The
// following static_cast re-adds any constness if specified through the
// user-visible template parameter T.
- return static_cast<T*>(const_cast<void*>(MemberBase::GetRaw()));
+ return static_cast<T*>(const_cast<void*>(Base::GetRaw()));
}
- V8_INLINE void Clear() { SetRawStorageAtomic(RawStorage{}); }
+ V8_INLINE void Clear() {
+ Base::SetRawStorageAtomic(RawStorage{});
+ }
V8_INLINE T* Release() {
T* result = Get();
@@ -276,41 +281,44 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
}
V8_INLINE const T** GetSlotForTesting() const {
- return reinterpret_cast<const T**>(GetRawSlot());
+ return reinterpret_cast<const T**>(Base::GetRawSlot());
}
V8_INLINE RawStorage GetRawStorage() const {
- return MemberBase::GetRawStorage();
+ return Base::GetRawStorage();
}
private:
- V8_INLINE explicit BasicMember(RawStorage raw) : MemberBase(raw) {
+ V8_INLINE explicit BasicMember(RawStorage raw) : Base(raw) {
InitializingWriteBarrier(Get());
this->CheckPointer(Get());
}
V8_INLINE BasicMember& operator=(RawStorage other) {
- SetRawStorageAtomic(other);
+ Base::SetRawStorageAtomic(other);
AssigningWriteBarrier();
this->CheckPointer(Get());
return *this;
}
V8_INLINE const T* GetRawAtomic() const {
- return static_cast<const T*>(MemberBase::GetRawAtomic());
+ return static_cast<const T*>(Base::GetRawAtomic());
}
V8_INLINE void InitializingWriteBarrier(T* value) const {
- WriteBarrierPolicy::InitializingBarrier(GetRawSlot(), value);
+ WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier(T* value) const {
- WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), value);
+ WriteBarrierPolicy::template AssigningBarrier<
+ StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier() const {
- WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), GetRawStorage());
+ WriteBarrierPolicy::template AssigningBarrier<
+ StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(),
+ Base::GetRawStorage());
}
- V8_INLINE void ClearFromGC() const { MemberBase::ClearFromGC(); }
+ V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); }
V8_INLINE T* GetFromGC() const { return Get(); }
@@ -319,19 +327,20 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U>
friend struct cppgc::TraceTrait;
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
- typename CheckingPolicy1>
+ typename CheckingPolicy1, typename StorageType1>
friend class BasicMember;
};
// Member equality operators.
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
- typename WriteBarrierPolicy2, typename CheckingPolicy2>
+ typename WriteBarrierPolicy2, typename CheckingPolicy2,
+ typename StorageType>
V8_INLINE bool operator==(
- const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
- member1,
- const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
- member2) {
+ const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
+ StorageType>& member1,
+ const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
+ StorageType>& member2) {
if constexpr (internal::IsDecayedSameV<T1, T2>) {
// Check compressed pointers if types are the same.
return member1.GetRawStorage() == member2.GetRawStorage();
@@ -345,31 +354,32 @@ V8_INLINE bool operator==(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
- typename WriteBarrierPolicy2, typename CheckingPolicy2>
+ typename WriteBarrierPolicy2, typename CheckingPolicy2,
+ typename StorageType>
V8_INLINE bool operator!=(
- const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
- member1,
- const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
- member2) {
+ const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
+ StorageType>& member1,
+ const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
+ StorageType>& member2) {
return !(member1 == member2);
}
// Equality with raw pointers.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy, typename U>
-V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member,
- U* raw) {
+ typename CheckingPolicy, typename StorageType, typename U>
+V8_INLINE bool operator==(
+ const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>& member,
+ U* raw) {
// Never allow comparison with erased pointers.
static_assert(!internal::IsDecayedSameV<void, U>);
if constexpr (internal::IsDecayedSameV<T, U>) {
// Check compressed pointers if types are the same.
- return member.GetRawStorage() == MemberBase::RawStorage(raw);
+ return member.GetRawStorage() == StorageType(raw);
} else if constexpr (internal::IsStrictlyBaseOfV<T, U>) {
// Cast the raw pointer to T, which may adjust the pointer.
- return member.GetRawStorage() ==
- MemberBase::RawStorage(static_cast<T*>(raw));
+ return member.GetRawStorage() == StorageType(static_cast<T*>(raw));
} else {
// Otherwise, decompressed the member.
return member.Get() == raw;
@@ -377,104 +387,112 @@ V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy, typename U>
-V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member,
- U* raw) {
+ typename CheckingPolicy, typename StorageType, typename U>
+V8_INLINE bool operator!=(
+ const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>& member,
+ U* raw) {
return !(member == raw);
}
template <typename T, typename U, typename WeaknessTag,
- typename WriteBarrierPolicy, typename CheckingPolicy>
-V8_INLINE bool operator==(T* raw,
- const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member) {
+ typename WriteBarrierPolicy, typename CheckingPolicy,
+ typename StorageType>
+V8_INLINE bool operator==(
+ T* raw, const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>& member) {
return member == raw;
}
template <typename T, typename U, typename WeaknessTag,
- typename WriteBarrierPolicy, typename CheckingPolicy>
-V8_INLINE bool operator!=(T* raw,
- const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member) {
+ typename WriteBarrierPolicy, typename CheckingPolicy,
+ typename StorageType>
+V8_INLINE bool operator!=(
+ T* raw, const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>& member) {
return !(raw == member);
}
// Equality with sentinel.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member,
- SentinelPointer) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator==(
+ const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>& member,
+ SentinelPointer) {
return member.GetRawStorage().IsSentinel();
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member,
- SentinelPointer s) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator!=(
+ const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>& member,
+ SentinelPointer s) {
return !(member == s);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator==(SentinelPointer s,
- const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator==(
+ SentinelPointer s, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>& member) {
return member == s;
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator!=(SentinelPointer s,
- const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator!=(
+ SentinelPointer s, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>& member) {
return !(s == member);
}
// Equality with nullptr.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member,
- std::nullptr_t) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator==(
+ const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>& member,
+ std::nullptr_t) {
return !static_cast<bool>(member);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member,
- std::nullptr_t n) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator!=(
+ const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>& member,
+ std::nullptr_t n) {
return !(member == n);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator==(std::nullptr_t n,
- const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator==(
+ std::nullptr_t n, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>& member) {
return member == n;
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
-V8_INLINE bool operator!=(std::nullptr_t n,
- const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
- CheckingPolicy>& member) {
+ typename CheckingPolicy, typename StorageType>
+V8_INLINE bool operator!=(
+ std::nullptr_t n, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>& member) {
return !(n == member);
}
// Relational operators.
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
- typename WriteBarrierPolicy2, typename CheckingPolicy2>
+ typename WriteBarrierPolicy2, typename CheckingPolicy2,
+ typename StorageType>
V8_INLINE bool operator<(
- const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
- member1,
- const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
- member2) {
+ const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
+ StorageType>& member1,
+ const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
+ StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@@ -483,12 +501,13 @@ V8_INLINE bool operator<(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
- typename WriteBarrierPolicy2, typename CheckingPolicy2>
+ typename WriteBarrierPolicy2, typename CheckingPolicy2,
+ typename StorageType>
V8_INLINE bool operator<=(
- const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
- member1,
- const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
- member2) {
+ const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
+ StorageType>& member1,
+ const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
+ StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@@ -497,12 +516,13 @@ V8_INLINE bool operator<=(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
- typename WriteBarrierPolicy2, typename CheckingPolicy2>
+ typename WriteBarrierPolicy2, typename CheckingPolicy2,
+ typename StorageType>
V8_INLINE bool operator>(
- const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
- member1,
- const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
- member2) {
+ const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
+ StorageType>& member1,
+ const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
+ StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@@ -511,21 +531,23 @@ V8_INLINE bool operator>(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
- typename WriteBarrierPolicy2, typename CheckingPolicy2>
+ typename WriteBarrierPolicy2, typename CheckingPolicy2,
+ typename StorageType>
V8_INLINE bool operator>=(
- const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
- member1,
- const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
- member2) {
+ const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
+ StorageType>& member1,
+ const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
+ StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
return member1.GetRawStorage() >= member2.GetRawStorage();
}
-template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy>
-struct IsWeak<
- internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy, CheckingPolicy>>
+template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy,
+ typename StorageType>
+struct IsWeak<internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy,
+ CheckingPolicy, StorageType>>
: std::true_type {};
} // namespace internal
@@ -536,8 +558,9 @@ struct IsWeak<
* trace method.
*/
template <typename T>
-using Member = internal::BasicMember<T, internal::StrongMemberTag,
- internal::DijkstraWriteBarrierPolicy>;
+using Member = internal::BasicMember<
+ T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy,
+ internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
/**
* WeakMember is similar to Member in that it is used to point to other garbage
@@ -548,8 +571,9 @@ using Member = internal::BasicMember<T, internal::StrongMemberTag,
* will automatically be set to null.
*/
template <typename T>
-using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
- internal::DijkstraWriteBarrierPolicy>;
+using WeakMember = internal::BasicMember<
+ T, internal::WeakMemberTag, internal::DijkstraWriteBarrierPolicy,
+ internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
/**
* UntracedMember is a pointer to an on-heap object that is not traced for some
@@ -558,8 +582,22 @@ using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
* must be kept alive through other means.
*/
template <typename T>
-using UntracedMember = internal::BasicMember<T, internal::UntracedMemberTag,
- internal::NoWriteBarrierPolicy>;
+using UntracedMember = internal::BasicMember<
+ T, internal::UntracedMemberTag, internal::NoWriteBarrierPolicy,
+ internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
+
+namespace subtle {
+
+/**
+ * UncompressedMember. Use with care in hot paths that would otherwise cause
+ * many decompression cycles.
+ */
+template <typename T>
+using UncompressedMember = internal::BasicMember<
+ T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy,
+ internal::DefaultMemberCheckingPolicy, internal::RawPointer>;
+
+} // namespace subtle
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h
index 3a66ccc086..709f3fd6ab 100644
--- a/deps/v8/include/cppgc/persistent.h
+++ b/deps/v8/include/cppgc/persistent.h
@@ -114,11 +114,12 @@ class BasicPersistent final : public PersistentBase,
// Constructor from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicPersistent(
- const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
- MemberCheckingPolicy>& member,
- const SourceLocation& loc = SourceLocation::Current())
+ BasicPersistent(const internal::BasicMember<
+ U, MemberBarrierPolicy, MemberWeaknessTag,
+ MemberCheckingPolicy, MemberStorageType>& member,
+ const SourceLocation& loc = SourceLocation::Current())
: BasicPersistent(member.Get(), loc) {}
~BasicPersistent() { Clear(); }
@@ -154,10 +155,12 @@ class BasicPersistent final : public PersistentBase,
// Assignment from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicPersistent& operator=(
const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
- MemberCheckingPolicy>& member) {
+ MemberCheckingPolicy, MemberStorageType>&
+ member) {
return operator=(member.Get());
}
@@ -286,36 +289,39 @@ bool operator!=(const BasicPersistent<T1, WeaknessPolicy1, LocationPolicy1,
template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
- typename MemberWeaknessTag, typename MemberCheckingPolicy>
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename MemberStorageType>
bool operator==(
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p,
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>& m) {
+ MemberCheckingPolicy, MemberStorageType>& m) {
return p.Get() == m.Get();
}
template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
- typename MemberWeaknessTag, typename MemberCheckingPolicy>
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename MemberStorageType>
bool operator!=(
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p,
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>& m) {
+ MemberCheckingPolicy, MemberStorageType>& m) {
return !(p == m);
}
template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
- typename T2, typename PersistentWeaknessPolicy,
- typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
+ typename MemberStorageType, typename T2,
+ typename PersistentWeaknessPolicy, typename PersistentLocationPolicy,
+ typename PersistentCheckingPolicy>
bool operator==(
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>& m,
+ MemberCheckingPolicy, MemberStorageType>& m,
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p) {
@@ -324,11 +330,12 @@ bool operator==(
template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
- typename T2, typename PersistentWeaknessPolicy,
- typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
+ typename MemberStorageType, typename T2,
+ typename PersistentWeaknessPolicy, typename PersistentLocationPolicy,
+ typename PersistentCheckingPolicy>
bool operator!=(
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
- MemberCheckingPolicy>& m,
+ MemberCheckingPolicy, MemberStorageType>& m,
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p) {
diff --git a/deps/v8/include/cppgc/type-traits.h b/deps/v8/include/cppgc/type-traits.h
index 2f499e6886..4651435390 100644
--- a/deps/v8/include/cppgc/type-traits.h
+++ b/deps/v8/include/cppgc/type-traits.h
@@ -16,7 +16,7 @@ class Visitor;
namespace internal {
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy>
+ typename CheckingPolicy, typename StorageType>
class BasicMember;
struct DijkstraWriteBarrierPolicy;
struct NoWriteBarrierPolicy;
@@ -126,9 +126,10 @@ template <typename BasicMemberCandidate, typename WeaknessTag,
typename WriteBarrierPolicy>
struct IsSubclassOfBasicMemberTemplate {
private:
- template <typename T, typename CheckingPolicy>
+ template <typename T, typename CheckingPolicy, typename StorageType>
static std::true_type SubclassCheck(
- BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy>*);
+ BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
+ StorageType>*);
static std::false_type SubclassCheck(...);
public:
diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h
index f7ebc1d01f..9b135e39a0 100644
--- a/deps/v8/include/cppgc/visitor.h
+++ b/deps/v8/include/cppgc/visitor.h
@@ -99,6 +99,20 @@ class V8_EXPORT Visitor {
&HandleWeak<WeakMember<T>>, &weak_member);
}
+#if defined(CPPGC_POINTER_COMPRESSION)
+ /**
+ * Trace method for UncompressedMember.
+ *
+ * \param member UncompressedMember reference retaining an object.
+ */
+ template <typename T>
+ void Trace(const subtle::UncompressedMember<T>& member) {
+ const T* value = member.GetRawAtomic();
+ CPPGC_DCHECK(value != kSentinelPointer);
+ TraceImpl(value);
+ }
+#endif // defined(CPPGC_POINTER_COMPRESSION)
+
/**
* Trace method for inlined objects that are not allocated themselves but
* otherwise follow managed heap layout and have a Trace() method.
@@ -229,7 +243,8 @@ class V8_EXPORT Visitor {
}
/**
- * Trace method for retaining containers weakly.
+ * Trace method for retaining containers weakly. Note that weak containers
+ * should emit write barriers.
*
* \param object reference to the container.
* \param callback to be invoked.
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 6efcf78785..6285d9b6f2 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -511,6 +511,7 @@ domain Debugger
CompileError
BlockedByActiveGenerator
BlockedByActiveFunction
+ BlockedByTopLevelEsModuleChange
# Exception details if any. Only present when `status` is `CompileError`.
optional Runtime.ExceptionDetails exceptionDetails
@@ -1402,6 +1403,13 @@ domain Runtime
optional string objectGroup
# Whether to throw an exception if side effect cannot be ruled out during evaluation.
experimental optional boolean throwOnSideEffect
+ # An alternative way to specify the execution context to call function on.
+ # Compared to contextId that may be reused across processes, this is guaranteed to be
+ # system-unique, so it can be used to prevent accidental function call
+ # in context different than intended (e.g. as a result of navigation across process
+ # boundaries).
+ # This is mutually exclusive with `executionContextId`.
+ experimental optional string uniqueContextId
# Whether the result should contain `webDriverValue`, serialized according to
# https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but
# resulting `objectId` is still provided.
@@ -1734,7 +1742,9 @@ domain Runtime
event executionContextDestroyed
parameters
# Id of the destroyed context
- ExecutionContextId executionContextId
+ deprecated ExecutionContextId executionContextId
+ # Unique Id of the destroyed context
+ experimental string executionContextUniqueId
# Issued when all executionContexts were cleared in browser
event executionContextsCleared
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index 12489327c5..6039a9c520 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -282,12 +282,12 @@ class V8_PLATFORM_EXPORT TracingController
const char* name, uint64_t handle) override;
static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
-#endif // !defined(V8_USE_PERFETTO)
void AddTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) override;
void RemoveTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) override;
+#endif // !defined(V8_USE_PERFETTO)
void StartTracing(TraceConfig* trace_config);
void StopTracing();
@@ -307,7 +307,6 @@ class V8_PLATFORM_EXPORT TracingController
std::unique_ptr<base::Mutex> mutex_;
std::unique_ptr<TraceConfig> trace_config_;
std::atomic_bool recording_{false};
- std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
#if defined(V8_USE_PERFETTO)
std::ostream* output_stream_ = nullptr;
@@ -316,6 +315,7 @@ class V8_PLATFORM_EXPORT TracingController
TraceEventListener* listener_for_testing_ = nullptr;
std::unique_ptr<perfetto::TracingSession> tracing_session_;
#else // !defined(V8_USE_PERFETTO)
+ std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
std::unique_ptr<TraceBuffer> trace_buffer_;
#endif // !defined(V8_USE_PERFETTO)
diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h
index 841bd02a79..804fc42c4b 100644
--- a/deps/v8/include/v8-array-buffer.h
+++ b/deps/v8/include/v8-array-buffer.h
@@ -54,12 +54,28 @@ class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
size_t ByteLength() const;
/**
+ * The maximum length (in bytes) that this backing store may grow to.
+ *
+ * If this backing store was created for a resizable ArrayBuffer or a growable
+ * SharedArrayBuffer, it is >= ByteLength(). Otherwise it is ==
+ * ByteLength().
+ */
+ size_t MaxByteLength() const;
+
+ /**
* Indicates whether the backing store was created for an ArrayBuffer or
* a SharedArrayBuffer.
*/
bool IsShared() const;
/**
+ * Indicates whether the backing store was created for a resizable ArrayBuffer
+ * or a growable SharedArrayBuffer, and thus may be resized by user JavaScript
+ * code.
+ */
+ bool IsResizableByUserJavaScript() const;
+
+ /**
* Prevent implicit instantiation of operator delete with size_t argument.
* The size_t argument would be incorrect because ptr points to the
* internal BackingStore object.
@@ -190,6 +206,11 @@ class V8_EXPORT ArrayBuffer : public Object {
size_t ByteLength() const;
/**
+ * Maximum length in bytes.
+ */
+ size_t MaxByteLength() const;
+
+ /**
* Create a new ArrayBuffer. Allocate |byte_length| bytes.
* Allocated memory will be owned by a created ArrayBuffer and
* will be deallocated when it is garbage-collected,
@@ -236,6 +257,21 @@ class V8_EXPORT ArrayBuffer : public Object {
void* deleter_data);
/**
+ * Returns a new resizable standalone BackingStore that is allocated using the
+ * array buffer allocator of the isolate. The result can be later passed to
+ * ArrayBuffer::New.
+ *
+ * |byte_length| must be <= |max_byte_length|.
+ *
+ * This function is usable without an isolate. Unlike |NewBackingStore| calls
+ * with an isolate, GCs cannot be triggered, and there are no
+ * retries. Allocation failure will cause the function to crash with an
+ * out-of-memory error.
+ */
+ static std::unique_ptr<BackingStore> NewResizableBackingStore(
+ size_t byte_length, size_t max_byte_length);
+
+ /**
* Returns true if this ArrayBuffer may be detached.
*/
bool IsDetachable() const;
@@ -393,6 +429,11 @@ class V8_EXPORT SharedArrayBuffer : public Object {
size_t ByteLength() const;
/**
+ * Maximum length in bytes.
+ */
+ size_t MaxByteLength() const;
+
+ /**
* Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
* Allocated memory will be owned by a created SharedArrayBuffer and
* will be deallocated when it is garbage-collected,
diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h
index 0ffdfb6656..f0fa561a3d 100644
--- a/deps/v8/include/v8-callbacks.h
+++ b/deps/v8/include/v8-callbacks.h
@@ -7,6 +7,7 @@
#include <stddef.h>
+#include <functional>
#include <string>
#include "cppgc/common.h"
@@ -328,6 +329,10 @@ using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly exceptions are enabled ---
using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
+// --- Callback for checking if WebAssembly GC is enabled ---
+// If the callback returns true, it will also enable Wasm stringrefs.
+using WasmGCEnabledCallback = bool (*)(Local<Context> context);
+
// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
using SharedArrayBufferConstructorEnabledCallback =
bool (*)(Local<Context> context);
@@ -369,6 +374,13 @@ using HostImportModuleDynamicallyCallback = MaybeLocal<Promise> (*)(
Local<FixedArray> import_assertions);
/**
+ * Callback for requesting a compile hint for a function from the embedder. The
+ * first parameter is the position of the function in source code and the second
+ * parameter is embedder data to be passed back.
+ */
+using CompileHintCallback = bool (*)(int, void*);
+
+/**
* HostInitializeImportMetaObjectCallback is called the first time import.meta
* is accessed for a module. Subsequent access will reuse the same value.
*
diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h
index 3ce0eb0af3..3589caabd4 100644
--- a/deps/v8/include/v8-context.h
+++ b/deps/v8/include/v8-context.h
@@ -7,8 +7,11 @@
#include <stdint.h>
+#include <vector>
+
#include "v8-data.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
#include "v8-snapshot.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
@@ -163,6 +166,42 @@ class V8_EXPORT Context : public Data {
*/
void Exit();
+ /**
+ * Delegate to help with Deep freezing embedder-specific objects (such as
+ * JSApiObjects) that can not be frozen natively.
+ */
+ class DeepFreezeDelegate {
+ public:
+ /**
+ * Performs embedder-specific operations to freeze the provided embedder
+ * object. The provided object *will* be frozen by DeepFreeze after this
+ * function returns, so only embedder-specific objects need to be frozen.
+ * This function *may not* create new JS objects or perform JS allocations.
+ * Any v8 objects reachable from the provided embedder object that should
+ * also be considered for freezing should be added to the children_out
+ * parameter. Returns true if the operation completed successfully.
+ */
+ virtual bool FreezeEmbedderObjectAndGetChildren(
+ Local<Object> obj, std::vector<Local<Object>>& children_out) = 0;
+ };
+
+ /**
+ * Attempts to recursively freeze all objects reachable from this context.
+ * Some objects (generators, iterators, non-const closures) can not be frozen
+ * and will cause this method to throw an error. An optional delegate can be
+ * provided to help freeze embedder-specific objects.
+ *
+ * Freezing occurs in two steps:
+ * 1. "Marking" where we iterate through all objects reachable by this
+ * context, accumulating a list of objects that need to be frozen and
+ * looking for objects that can't be frozen. This step is separated because
+ * it is more efficient when we can assume there is no garbage collection.
+ * 2. "Freezing" where we go through the list of objects and freezing them.
+ * This effectively requires copying them so it may trigger garbage
+ * collection.
+ */
+ Maybe<void> DeepFreeze(DeepFreezeDelegate* delegate = nullptr);
+
/** Returns the isolate associated with a current context. */
Isolate* GetIsolate();
@@ -365,13 +404,18 @@ Local<Value> Context::GetEmbedderData(int index) {
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
- value =
- I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
+ value = I::DecompressTaggedField(embedder_data, static_cast<uint32_t>(value));
#endif
+
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ return Local<Value>(reinterpret_cast<Value*>(value));
+#else
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
*reinterpret_cast<A*>(this));
A* result = HandleScope::CreateHandle(isolate, value);
return Local<Value>(reinterpret_cast<Value*>(result));
+#endif
+
#else
return SlowGetEmbedderData(index);
#endif
@@ -381,7 +425,7 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
#if !defined(V8_ENABLE_CHECKS)
using A = internal::Address;
using I = internal::Internals;
- A ctx = *reinterpret_cast<const A*>(this);
+ A ctx = internal::ValueHelper::ValueAsAddress(this);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset = I::kEmbedderDataArrayHeaderSize +
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index 3a6582cd25..4a457027c9 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -77,6 +77,12 @@ struct WrapperDescriptor final {
};
struct V8_EXPORT CppHeapCreateParams {
+ CppHeapCreateParams(
+ std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces,
+ WrapperDescriptor wrapper_descriptor)
+ : custom_spaces(std::move(custom_spaces)),
+ wrapper_descriptor(wrapper_descriptor) {}
+
CppHeapCreateParams(const CppHeapCreateParams&) = delete;
CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete;
diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h
index f994cdfdf3..9e2e3ef58c 100644
--- a/deps/v8/include/v8-embedder-heap.h
+++ b/deps/v8/include/v8-embedder-heap.h
@@ -5,27 +5,14 @@
#ifndef INCLUDE_V8_EMBEDDER_HEAP_H_
#define INCLUDE_V8_EMBEDDER_HEAP_H_
-#include <stddef.h>
-#include <stdint.h>
-
-#include <utility>
-#include <vector>
-
-#include "cppgc/common.h"
-#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-traced-handle.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
-class Data;
class Isolate;
class Value;
-namespace internal {
-class LocalEmbedderHeapTracer;
-} // namespace internal
-
/**
* Handler for embedder roots on non-unified heap garbage collections.
*/
@@ -62,162 +49,6 @@ class V8_EXPORT EmbedderRootsHandler {
virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
};
-/**
- * Interface for tracing through the embedder heap. During a V8 garbage
- * collection, V8 collects hidden fields of all potential wrappers, and at the
- * end of its marking phase iterates the collection and asks the embedder to
- * trace through its heap and use reporter to report each JavaScript object
- * reachable from any of the given wrappers.
- */
-class V8_EXPORT
-// GCC doesn't like combining __attribute__(()) with [[deprecated]].
-#ifdef __clang__
-V8_DEPRECATED("Use CppHeap when working with v8::TracedReference.")
-#endif // __clang__
- EmbedderHeapTracer {
- public:
- using EmbedderStackState = cppgc::EmbedderStackState;
-
- enum TraceFlags : uint64_t {
- kNoFlags = 0,
- kReduceMemory = 1 << 0,
- kForced = 1 << 2,
- };
-
- /**
- * Interface for iterating through |TracedReference| handles.
- */
- class V8_EXPORT TracedGlobalHandleVisitor {
- public:
- virtual ~TracedGlobalHandleVisitor() = default;
- virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
- };
-
- /**
- * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
- * summary is reported.
- */
- struct TraceSummary {
- /**
- * Time spent managing the retained memory in milliseconds. This can e.g.
- * include the time tracing through objects in the embedder.
- */
- double time = 0.0;
-
- /**
- * Memory retained by the embedder through the |EmbedderHeapTracer|
- * mechanism in bytes.
- */
- size_t allocated_size = 0;
- };
-
- virtual ~EmbedderHeapTracer() = default;
-
- /**
- * Iterates all |TracedReference| handles created for the |v8::Isolate| the
- * tracer is attached to.
- */
- void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
-
- /**
- * Called by the embedder to set the start of the stack which is e.g. used by
- * V8 to determine whether handles are used from stack or heap.
- */
- void SetStackStart(void* stack_start);
-
- /**
- * Called by v8 to register internal fields of found wrappers.
- *
- * The embedder is expected to store them somewhere and trace reachable
- * wrappers from them when called through |AdvanceTracing|.
- */
- virtual void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) = 0;
-
- void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
-
- /**
- * Called at the beginning of a GC cycle.
- */
- virtual void TracePrologue(TraceFlags flags) {}
-
- /**
- * Called to advance tracing in the embedder.
- *
- * The embedder is expected to trace its heap starting from wrappers reported
- * by RegisterV8References method, and report back all reachable wrappers.
- * Furthermore, the embedder is expected to stop tracing by the given
- * deadline. A deadline of infinity means that tracing should be finished.
- *
- * Returns |true| if tracing is done, and false otherwise.
- */
- virtual bool AdvanceTracing(double deadline_in_ms) = 0;
-
- /*
- * Returns true if there no more tracing work to be done (see AdvanceTracing)
- * and false otherwise.
- */
- virtual bool IsTracingDone() = 0;
-
- /**
- * Called at the end of a GC cycle.
- *
- * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
- * overriden to fill a |TraceSummary| that is used by V8 to schedule future
- * garbage collections.
- */
- virtual void TraceEpilogue(TraceSummary* trace_summary) {}
-
- /**
- * Called upon entering the final marking pause. No more incremental marking
- * steps will follow this call.
- */
- virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
-
- /*
- * Called by the embedder to request immediate finalization of the currently
- * running tracing phase that has been started with TracePrologue and not
- * yet finished with TraceEpilogue.
- *
- * Will be a noop when currently not in tracing.
- *
- * This is an experimental feature.
- */
- void FinalizeTracing();
-
- /**
- * See documentation on EmbedderRootsHandler.
- */
- virtual bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle);
-
- /**
- * See documentation on EmbedderRootsHandler.
- */
- virtual void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle);
-
- /*
- * Called by the embedder to signal newly allocated or freed memory. Not bound
- * to tracing phases. Embedders should trade off when increments are reported
- * as V8 may consult global heuristics on whether to trigger garbage
- * collection on this change.
- */
- void IncreaseAllocatedSize(size_t bytes);
- void DecreaseAllocatedSize(size_t bytes);
-
- /*
- * Returns the v8::Isolate this tracer is attached too and |nullptr| if it
- * is not attached to any v8::Isolate.
- */
- v8::Isolate* isolate() const { return v8_isolate_; }
-
- protected:
- v8::Isolate* v8_isolate_ = nullptr;
-
- friend class internal::LocalEmbedderHeapTracer;
-};
-
} // namespace v8
#endif // INCLUDE_V8_EMBEDDER_HEAP_H_
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 9ea43fe253..0fe7cd2489 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -247,6 +247,7 @@ class CTypeInfo {
kUint64,
kFloat32,
kFloat64,
+ kPointer,
kV8Value,
kSeqOneByteString,
kApiObject, // This will be deprecated once all users have
@@ -435,6 +436,7 @@ struct AnyCType {
uint64_t uint64_value;
float float_value;
double double_value;
+ void* pointer_value;
Local<Object> object_value;
Local<Array> sequence_value;
const FastApiTypedArray<uint8_t>* uint8_ta_value;
@@ -620,6 +622,7 @@ class CFunctionInfoImpl : public CFunctionInfo {
kReturnType == CTypeInfo::Type::kUint32 ||
kReturnType == CTypeInfo::Type::kFloat32 ||
kReturnType == CTypeInfo::Type::kFloat64 ||
+ kReturnType == CTypeInfo::Type::kPointer ||
kReturnType == CTypeInfo::Type::kAny,
"64-bit int, string and api object values are not currently "
"supported return types.");
@@ -658,13 +661,14 @@ struct CTypeInfoTraits {};
#define PRIMITIVE_C_TYPES(V) \
V(bool, kBool) \
+ V(uint8_t, kUint8) \
V(int32_t, kInt32) \
V(uint32_t, kUint32) \
V(int64_t, kInt64) \
V(uint64_t, kUint64) \
V(float, kFloat32) \
V(double, kFloat64) \
- V(uint8_t, kUint8)
+ V(void*, kPointer)
// Same as above, but includes deprecated types for compatibility.
#define ALL_C_TYPES(V) \
@@ -698,13 +702,13 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS)
};
#define TYPED_ARRAY_C_TYPES(V) \
+ V(uint8_t, kUint8) \
V(int32_t, kInt32) \
V(uint32_t, kUint32) \
V(int64_t, kInt64) \
V(uint64_t, kUint64) \
V(float, kFloat32) \
- V(double, kFloat64) \
- V(uint8_t, kUint8)
+ V(double, kFloat64)
TYPED_ARRAY_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA)
diff --git a/deps/v8/include/v8-function-callback.h b/deps/v8/include/v8-function-callback.h
index 2adff99b1c..60ecca1fd2 100644
--- a/deps/v8/include/v8-function-callback.h
+++ b/deps/v8/include/v8-function-callback.h
@@ -21,6 +21,7 @@ class Value;
namespace internal {
class FunctionCallbackArguments;
class PropertyCallbackArguments;
+class Builtins;
} // namespace internal
namespace debug {
@@ -74,6 +75,11 @@ class ReturnValue {
V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
V8_INLINE internal::Address GetDefaultValue();
V8_INLINE explicit ReturnValue(internal::Address* slot);
+
+ // See FunctionCallbackInfo.
+ static constexpr int kIsolateValueIndex = -2;
+ static constexpr int kDefaultValueValueIndex = -1;
+
internal::Address* value_;
};
@@ -116,19 +122,35 @@ class FunctionCallbackInfo {
V8_INLINE Isolate* GetIsolate() const;
/** The ReturnValue for the call. */
V8_INLINE ReturnValue<T> GetReturnValue() const;
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
- protected:
+ private:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
friend class debug::ConsoleCallArguments;
- static const int kHolderIndex = 0;
- static const int kIsolateIndex = 1;
- static const int kReturnValueDefaultValueIndex = 2;
- static const int kReturnValueIndex = 3;
- static const int kDataIndex = 4;
- static const int kNewTargetIndex = 5;
+ friend class internal::Builtins;
+ static constexpr int kHolderIndex = 0;
+ static constexpr int kIsolateIndex = 1;
+ static constexpr int kReturnValueDefaultValueIndex = 2;
+ static constexpr int kReturnValueIndex = 3;
+ static constexpr int kDataIndex = 4;
+ static constexpr int kNewTargetIndex = 5;
+
+ static constexpr int kArgsLength = 6;
+ static constexpr int kArgsLengthWithReceiver = 7;
+
+ // Codegen constants:
+ static constexpr int kSize = 3 * internal::kApiSystemPointerSize;
+ static constexpr int kImplicitArgsOffset = 0;
+ static constexpr int kValuesOffset =
+ kImplicitArgsOffset + internal::kApiSystemPointerSize;
+ static constexpr int kLengthOffset =
+ kValuesOffset + internal::kApiSystemPointerSize;
+
+ static constexpr int kThisValuesIndex = -1;
+ static_assert(ReturnValue<Value>::kDefaultValueValueIndex ==
+ kReturnValueDefaultValueIndex - kReturnValueIndex);
+ static_assert(ReturnValue<Value>::kIsolateValueIndex ==
+ kIsolateIndex - kReturnValueIndex);
V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
internal::Address* values, int length);
@@ -229,22 +251,24 @@ class PropertyCallbackInfo {
*/
V8_INLINE bool ShouldThrowOnError() const;
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 7;
-
- protected:
+ private:
friend class MacroAssembler;
friend class internal::PropertyCallbackArguments;
friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kShouldThrowOnErrorIndex = 0;
- static const int kHolderIndex = 1;
- static const int kIsolateIndex = 2;
- static const int kReturnValueDefaultValueIndex = 3;
- static const int kReturnValueIndex = 4;
- static const int kDataIndex = 5;
- static const int kThisIndex = 6;
-
- V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
+ static constexpr int kShouldThrowOnErrorIndex = 0;
+ static constexpr int kHolderIndex = 1;
+ static constexpr int kIsolateIndex = 2;
+ static constexpr int kReturnValueDefaultValueIndex = 3;
+ static constexpr int kReturnValueIndex = 4;
+ static constexpr int kDataIndex = 5;
+ static constexpr int kThisIndex = 6;
+
+ static constexpr int kArgsLength = 7;
+
+ static constexpr int kSize = 1 * internal::kApiSystemPointerSize;
+
+ V8_INLINE explicit PropertyCallbackInfo(internal::Address* args)
+ : args_(args) {}
internal::Address* args_;
};
@@ -285,7 +309,7 @@ void ReturnValue<T>::Set(const Local<S> handle) {
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ *value_ = internal::ValueHelper::ValueAsAddress(*handle);
}
}
@@ -328,41 +352,46 @@ void ReturnValue<T>::Set(bool value) {
} else {
root_index = I::kFalseValueRootIndex;
}
- *value_ = *I::GetRoot(GetIsolate(), root_index);
+ *value_ = I::GetRoot(GetIsolate(), root_index);
}
template <typename T>
void ReturnValue<T>::SetNull() {
static_assert(std::is_base_of<T, Primitive>::value, "type check");
using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
+ *value_ = I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
}
template <typename T>
void ReturnValue<T>::SetUndefined() {
static_assert(std::is_base_of<T, Primitive>::value, "type check");
using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
+ *value_ = I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
}
template <typename T>
void ReturnValue<T>::SetEmptyString() {
static_assert(std::is_base_of<T, String>::value, "type check");
using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
+ *value_ = I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
}
template <typename T>
Isolate* ReturnValue<T>::GetIsolate() const {
// Isolate is always the pointer below the default value on the stack.
- return *reinterpret_cast<Isolate**>(&value_[-2]);
+ return *reinterpret_cast<Isolate**>(&value_[kIsolateValueIndex]);
}
template <typename T>
Local<Value> ReturnValue<T>::Get() const {
using I = internal::Internals;
- if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
- return Local<Value>(*Undefined(GetIsolate()));
+#if V8_STATIC_ROOTS_BOOL
+ if (I::is_identical(*value_, I::StaticReadOnlyRoot::kTheHoleValue)) {
+#else
+ if (*value_ == I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex)) {
+#endif
+ return Undefined(GetIsolate());
+ }
return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
}
@@ -375,7 +404,7 @@ void ReturnValue<T>::Set(S* whatever) {
template <typename T>
internal::Address ReturnValue<T>::GetDefaultValue() {
// Default value is always the pointer below value_ on the stack.
- return value_[-1];
+ return value_[kDefaultValueValueIndex];
}
template <typename T>
@@ -387,31 +416,29 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
template <typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
// values_ points to the first argument (not the receiver).
- if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
- return Local<Value>(reinterpret_cast<Value*>(values_ + i));
+ if (i < 0 || length_ <= i) return Undefined(GetIsolate());
+ return Local<Value>::FromSlot(values_ + i);
}
template <typename T>
Local<Object> FunctionCallbackInfo<T>::This() const {
// values_ points to the first argument (not the receiver).
- return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
+ return Local<Object>::FromSlot(values_ + kThisValuesIndex);
}
template <typename T>
Local<Object> FunctionCallbackInfo<T>::Holder() const {
- return Local<Object>(
- reinterpret_cast<Object*>(&implicit_args_[kHolderIndex]));
+ return Local<Object>::FromSlot(&implicit_args_[kHolderIndex]);
}
template <typename T>
Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
- return Local<Value>(
- reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
+ return Local<Value>::FromSlot(&implicit_args_[kNewTargetIndex]);
}
template <typename T>
Local<Value> FunctionCallbackInfo<T>::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
+ return Local<Value>::FromSlot(&implicit_args_[kDataIndex]);
}
template <typename T>
@@ -441,17 +468,17 @@ Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
template <typename T>
Local<Value> PropertyCallbackInfo<T>::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
+ return Local<Value>::FromSlot(&args_[kDataIndex]);
}
template <typename T>
Local<Object> PropertyCallbackInfo<T>::This() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
+ return Local<Object>::FromSlot(&args_[kThisIndex]);
}
template <typename T>
Local<Object> PropertyCallbackInfo<T>::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
+ return Local<Object>::FromSlot(&args_[kHolderIndex]);
}
template <typename T>
diff --git a/deps/v8/include/v8-function.h b/deps/v8/include/v8-function.h
index 2dc7e722bb..1e35bfc8bf 100644
--- a/deps/v8/include/v8-function.h
+++ b/deps/v8/include/v8-function.h
@@ -59,6 +59,7 @@ class V8_EXPORT Function : public Object {
void SetName(Local<String> name);
Local<Value> GetName() const;
+ V8_DEPRECATED("No direct replacement")
MaybeLocal<UnboundScript> GetUnboundScript() const;
/**
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index aa5a044afb..563ad196d6 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -32,19 +32,19 @@ namespace Debugger {
namespace API {
class SearchMatch;
}
-}
+} // namespace Debugger
namespace Runtime {
namespace API {
class RemoteObject;
class StackTrace;
class StackTraceId;
-}
-}
+} // namespace API
+} // namespace Runtime
namespace Schema {
namespace API {
class Domain;
}
-}
+} // namespace Schema
} // namespace protocol
class V8_EXPORT StringView {
@@ -134,6 +134,13 @@ class V8_EXPORT V8DebuggerId {
int64_t m_second = 0;
};
+struct V8_EXPORT V8StackFrame {
+ StringView sourceURL;
+ StringView functionName;
+ int lineNumber;
+ int columnNumber;
+};
+
class V8_EXPORT V8StackTrace {
public:
virtual StringView firstNonEmptySourceURL() const = 0;
@@ -151,6 +158,8 @@ class V8_EXPORT V8StackTrace {
// Safe to pass between threads, drops async chain.
virtual std::unique_ptr<V8StackTrace> clone() = 0;
+
+ virtual std::vector<V8StackFrame> frames() const = 0;
};
class V8_EXPORT V8InspectorSession {
@@ -203,6 +212,9 @@ class V8_EXPORT V8InspectorSession {
std::unique_ptr<StringBuffer>* objectGroup) = 0;
virtual void releaseObjectGroup(StringView) = 0;
virtual void triggerPreciseCoverageDeltaUpdate(StringView occasion) = 0;
+
+ // Prepare for shutdown (disables debugger pausing, etc.).
+ virtual void stop() = 0;
};
class V8_EXPORT WebDriverValue {
@@ -365,9 +377,12 @@ class V8_EXPORT V8Inspector {
virtual void flushProtocolNotifications() = 0;
};
enum ClientTrustLevel { kUntrusted, kFullyTrusted };
+ enum SessionPauseState { kWaitingForDebugger, kNotWaitingForDebugger };
+ // TODO(chromium:1352175): remove default value once downstream change lands.
virtual std::unique_ptr<V8InspectorSession> connect(
int contextGroupId, Channel*, StringView state,
- ClientTrustLevel client_trust_level) {
+ ClientTrustLevel client_trust_level,
+ SessionPauseState = kNotWaitingForDebugger) {
return nullptr;
}
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index a4b8ee0824..e61bb6e415 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -21,15 +21,13 @@ class Array;
class Context;
class Data;
class Isolate;
-template <typename T>
-class Local;
namespace internal {
class Isolate;
typedef uintptr_t Address;
-static const Address kNullAddress = 0;
+static constexpr Address kNullAddress = 0;
constexpr int KB = 1024;
constexpr int MB = KB * 1024;
@@ -82,7 +80,7 @@ struct SmiTagging<4> {
static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
- V8_INLINE static int SmiToInt(const internal::Address value) {
+ V8_INLINE static int SmiToInt(Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Truncate and shift down (requires >> to be sign extending).
return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
@@ -107,7 +105,7 @@ struct SmiTagging<8> {
static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
- V8_INLINE static int SmiToInt(const internal::Address value) {
+ V8_INLINE static int SmiToInt(Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
@@ -150,8 +148,9 @@ const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
+constexpr bool Is64() { return kApiSystemPointerSize == sizeof(int64_t); }
-V8_INLINE static constexpr internal::Address IntToSmi(int value) {
+V8_INLINE static constexpr Address IntToSmi(int value) {
return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
kSmiTag;
}
@@ -242,6 +241,7 @@ static_assert(1ULL << (64 - kBoundedSizeShift) ==
#ifdef V8_COMPRESS_POINTERS
+#ifdef V8_TARGET_OS_ANDROID
// The size of the virtual memory reservation for an external pointer table.
// This determines the maximum number of entries in a table. Using a maximum
// size allows omitting bounds checks on table accesses if the indices are
@@ -249,14 +249,18 @@ static_assert(1ULL << (64 - kBoundedSizeShift) ==
// value must be a power of two.
static const size_t kExternalPointerTableReservationSize = 512 * MB;
-// The maximum number of entries in an external pointer table.
-static const size_t kMaxExternalPointers =
- kExternalPointerTableReservationSize / kApiSystemPointerSize;
-
// The external pointer table indices stored in HeapObjects as external
// pointers are shifted to the left by this amount to guarantee that they are
// smaller than the maximum table size.
static const uint32_t kExternalPointerIndexShift = 6;
+#else
+static const size_t kExternalPointerTableReservationSize = 1024 * MB;
+static const uint32_t kExternalPointerIndexShift = 5;
+#endif // V8_TARGET_OS_ANDROID
+
+// The maximum number of entries in an external pointer table.
+static const size_t kMaxExternalPointers =
+ kExternalPointerTableReservationSize / kApiSystemPointerSize;
static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers,
"kExternalPointerTableReservationSize and "
"kExternalPointerIndexShift don't match");
@@ -345,6 +349,14 @@ using ExternalPointer_t = Address;
// that the Embedder is not using this byte (really only this one bit) for any
// other purpose. This bit also does not collide with the memory tagging
// extension (MTE) which would use bits [56, 60).
+//
+// External pointer tables are also available even when the sandbox is off but
+// pointer compression is on. In that case, the mechanism can be used to easy
+// alignment requirements as it turns unaligned 64-bit raw pointers into
+// aligned 32-bit indices. To "opt-in" to the external pointer table mechanism
+// for this purpose, instead of using the ExternalPointer accessors one needs to
+// use ExternalPointerHandles directly and use them to access the pointers in an
+// ExternalPointerTable.
constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62;
constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000;
constexpr uint64_t kExternalPointerTagShift = 48;
@@ -367,71 +379,58 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = {
0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001,
0b11100010, 0b11100100, 0b11101000, 0b11110000};
+#define TAG(i) \
+ ((kAllExternalPointerTypeTags[i] << kExternalPointerTagShift) | \
+ kExternalPointerMarkBit)
+
// clang-format off
-// New entries should be added with state "sandboxed".
+
// When adding new tags, please ensure that the code using these tags is
// "substitution-safe", i.e. still operate safely if external pointers of the
// same type are swapped by an attacker. See comment above for more details.
-#define TAG(i) (kAllExternalPointerTypeTags[i])
// Shared external pointers are owned by the shared Isolate and stored in the
// shared external pointer table associated with that Isolate, where they can
// be accessed from multiple threads at the same time. The objects referenced
// in this way must therefore always be thread-safe.
-#define SHARED_EXTERNAL_POINTER_TAGS(V) \
- V(kFirstSharedTag, sandboxed, TAG(0)) \
- V(kWaiterQueueNodeTag, sandboxed, TAG(0)) \
- V(kExternalStringResourceTag, sandboxed, TAG(1)) \
- V(kExternalStringResourceDataTag, sandboxed, TAG(2)) \
- V(kLastSharedTag, sandboxed, TAG(2))
+#define SHARED_EXTERNAL_POINTER_TAGS(V) \
+ V(kFirstSharedTag, TAG(0)) \
+ V(kWaiterQueueNodeTag, TAG(0)) \
+ V(kExternalStringResourceTag, TAG(1)) \
+ V(kExternalStringResourceDataTag, TAG(2)) \
+ V(kLastSharedTag, TAG(2))
// External pointers using these tags are kept in a per-Isolate external
// pointer table and can only be accessed when this Isolate is active.
-#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
- V(kForeignForeignAddressTag, sandboxed, TAG(10)) \
- V(kNativeContextMicrotaskQueueTag, sandboxed, TAG(11)) \
- V(kEmbedderDataSlotPayloadTag, sandboxed, TAG(12)) \
- V(kExternalObjectValueTag, sandboxed, TAG(13)) \
- V(kCallHandlerInfoCallbackTag, sandboxed, TAG(14)) \
- V(kAccessorInfoGetterTag, sandboxed, TAG(15)) \
- V(kAccessorInfoSetterTag, sandboxed, TAG(16)) \
- V(kWasmInternalFunctionCallTargetTag, sandboxed, TAG(17)) \
- V(kWasmTypeInfoNativeTypeTag, sandboxed, TAG(18)) \
- V(kWasmExportedFunctionDataSignatureTag, sandboxed, TAG(19)) \
- V(kWasmContinuationJmpbufTag, sandboxed, TAG(20)) \
- V(kArrayBufferExtensionTag, sandboxed, TAG(21))
+#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
+ V(kForeignForeignAddressTag, TAG(10)) \
+ V(kNativeContextMicrotaskQueueTag, TAG(11)) \
+ V(kEmbedderDataSlotPayloadTag, TAG(12)) \
+/* This tag essentially stands for a `void*` pointer in the V8 API, and */ \
+/* it is the Embedder's responsibility to ensure type safety (against */ \
+/* substitution) and lifetime validity of these objects. */ \
+ V(kExternalObjectValueTag, TAG(13)) \
+ V(kCallHandlerInfoCallbackTag, TAG(14)) \
+ V(kAccessorInfoGetterTag, TAG(15)) \
+ V(kAccessorInfoSetterTag, TAG(16)) \
+ V(kWasmInternalFunctionCallTargetTag, TAG(17)) \
+ V(kWasmTypeInfoNativeTypeTag, TAG(18)) \
+ V(kWasmExportedFunctionDataSignatureTag, TAG(19)) \
+ V(kWasmContinuationJmpbufTag, TAG(20)) \
+ V(kArrayBufferExtensionTag, TAG(21))
// All external pointer tags.
#define ALL_EXTERNAL_POINTER_TAGS(V) \
SHARED_EXTERNAL_POINTER_TAGS(V) \
PER_ISOLATE_EXTERNAL_POINTER_TAGS(V)
-// When the sandbox is enabled, external pointers marked as "sandboxed" above
-// use the external pointer table (i.e. are sandboxed). This allows a gradual
-// rollout of external pointer sandboxing. If the sandbox is off, no external
-// pointers are sandboxed.
-//
-// Sandboxed external pointer tags are available when compressing pointers even
-// when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used
-// manually with the external pointer table even when the sandbox is off to ease
-// alignment requirements.
-#define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit
-#define unsandboxed(X) kUnsandboxedExternalPointerTag
-#if defined(V8_COMPRESS_POINTERS)
-#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits),
-#else
-#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits),
-#endif
-
+#define EXTERNAL_POINTER_TAG_ENUM(Name, Tag) Name = Tag,
#define MAKE_TAG(HasMarkBit, TypeTag) \
((static_cast<uint64_t>(TypeTag) << kExternalPointerTagShift) | \
(HasMarkBit ? kExternalPointerMarkBit : 0))
enum ExternalPointerTag : uint64_t {
// Empty tag value. Mostly used as placeholder.
kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
- // Tag to use for unsandboxed external pointers, which are still stored as
- // raw pointers on the heap.
- kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
// External pointer tag that will match any external pointer. Use with care!
kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
// The free entry tag has all type bits set so every type check with a
@@ -445,20 +444,11 @@ enum ExternalPointerTag : uint64_t {
};
#undef MAKE_TAG
-#undef unsandboxed
-#undef sandboxed
#undef TAG
#undef EXTERNAL_POINTER_TAG_ENUM
// clang-format on
-// True if the external pointer is sandboxed and so must be referenced through
-// an external pointer table.
-V8_INLINE static constexpr bool IsSandboxedExternalPointerType(
- ExternalPointerTag tag) {
- return tag != kUnsandboxedExternalPointerTag;
-}
-
// True if the external pointer must be accessed from the shared isolate's
// external pointer table.
V8_INLINE static constexpr bool IsSharedExternalPointerType(
@@ -467,12 +457,10 @@ V8_INLINE static constexpr bool IsSharedExternalPointerType(
}
// Sanity checks.
-#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
- static_assert(!IsSandboxedExternalPointerType(Tag) || \
- IsSharedExternalPointerType(Tag));
+#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
+ static_assert(IsSharedExternalPointerType(Tag));
#define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
- static_assert(!IsSandboxedExternalPointerType(Tag) || \
- !IsSharedExternalPointerType(Tag));
+ static_assert(!IsSharedExternalPointerType(Tag));
SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS)
PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS)
@@ -490,7 +478,7 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
// Returns if we need to throw when an error occurs. This infers the language
// mode based on the current context and the closure. This returns true if the
// language mode is strict.
-V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
+V8_EXPORT bool ShouldThrowOnError(internal::Isolate* isolate);
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@@ -498,8 +486,7 @@ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
*/
class Internals {
#ifdef V8_MAP_PACKING
- V8_INLINE static constexpr internal::Address UnpackMapWord(
- internal::Address mapword) {
+ V8_INLINE static constexpr Address UnpackMapWord(Address mapword) {
// TODO(wenyuzhao): Clear header metadata.
return mapword ^ kMapWordXorMask;
}
@@ -533,6 +520,8 @@ class Internals {
static const int kStackGuardSize = 7 * kApiSystemPointerSize;
static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize;
static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize;
+ static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize;
+ static const int kThreadLocalTopSize = 25 * kApiSystemPointerSize;
// ExternalPointerTable layout guarantees.
static const int kExternalPointerTableBufferOffset = 0;
@@ -545,31 +534,60 @@ class Internals {
static const int kVariousBooleanFlagsOffset =
kIsolateStackGuardOffset + kStackGuardSize;
static const int kBuiltinTier0EntryTableOffset =
- kVariousBooleanFlagsOffset + kApiSystemPointerSize;
+ kVariousBooleanFlagsOffset + 8;
static const int kBuiltinTier0TableOffset =
kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize;
- static const int kIsolateEmbedderDataOffset =
+ static const int kNewAllocationInfoOffset =
kBuiltinTier0TableOffset + kBuiltinTier0TableSize;
+ static const int kOldAllocationInfoOffset =
+ kNewAllocationInfoOffset + kLinearAllocationAreaSize;
static const int kIsolateFastCCallCallerFpOffset =
- kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
+ kOldAllocationInfoOffset + kLinearAllocationAreaSize;
static const int kIsolateFastCCallCallerPcOffset =
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateFastApiCallTargetOffset =
kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
static const int kIsolateLongTaskStatsCounterOffset =
kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
+ static const int kIsolateThreadLocalTopOffset =
+ kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
+ static const int kIsolateEmbedderDataOffset =
+ kIsolateThreadLocalTopOffset + kThreadLocalTopSize;
#ifdef V8_COMPRESS_POINTERS
static const int kIsolateExternalPointerTableOffset =
- kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
+ kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kIsolateSharedExternalPointerTableAddressOffset =
kIsolateExternalPointerTableOffset + kExternalPointerTableSize;
static const int kIsolateRootsOffset =
kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize;
#else
static const int kIsolateRootsOffset =
- kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
+ kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
#endif
+#if V8_STATIC_ROOTS_BOOL
+
+// These constants need to be initialized in api.cc.
+#define EXPORTED_STATIC_ROOTS_PTR_LIST(V) \
+ V(UndefinedValue) \
+ V(NullValue) \
+ V(TrueValue) \
+ V(FalseValue) \
+ V(EmptyString) \
+ V(TheHoleValue)
+
+ using Tagged_t = uint32_t;
+ struct StaticReadOnlyRoot {
+#define DEF_ROOT(name) V8_EXPORT static const Tagged_t k##name;
+ EXPORTED_STATIC_ROOTS_PTR_LIST(DEF_ROOT)
+#undef DEF_ROOT
+
+ V8_EXPORT static const Tagged_t kFirstStringMap;
+ V8_EXPORT static const Tagged_t kLastStringMap;
+ };
+
+#endif // V8_STATIC_ROOTS_BOOL
+
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
@@ -623,15 +641,15 @@ class Internals {
#endif
}
- V8_INLINE static bool HasHeapObjectTag(const internal::Address value) {
+ V8_INLINE static bool HasHeapObjectTag(Address value) {
return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
}
- V8_INLINE static int SmiValue(const internal::Address value) {
+ V8_INLINE static int SmiValue(Address value) {
return PlatformSmiTagging::SmiToInt(value);
}
- V8_INLINE static constexpr internal::Address IntToSmi(int value) {
+ V8_INLINE static constexpr Address IntToSmi(int value) {
return internal::IntToSmi(value);
}
@@ -639,16 +657,30 @@ class Internals {
return PlatformSmiTagging::IsValidSmi(value);
}
- V8_INLINE static int GetInstanceType(const internal::Address obj) {
- typedef internal::Address A;
- A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
+#if V8_STATIC_ROOTS_BOOL
+ V8_INLINE static bool is_identical(Address obj, Tagged_t constant) {
+ return static_cast<Tagged_t>(obj) == constant;
+ }
+
+ V8_INLINE static bool CheckInstanceMapRange(Address obj, Tagged_t first_map,
+ Tagged_t last_map) {
+ auto map = ReadRawField<Tagged_t>(obj, kHeapObjectMapOffset);
+#ifdef V8_MAP_PACKING
+ map = UnpackMapWord(map);
+#endif
+ return map >= first_map && map <= last_map;
+ }
+#endif
+
+ V8_INLINE static int GetInstanceType(Address obj) {
+ Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
#ifdef V8_MAP_PACKING
map = UnpackMapWord(map);
#endif
return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
}
- V8_INLINE static int GetOddballKind(const internal::Address obj) {
+ V8_INLINE static int GetOddballKind(Address obj) {
return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
}
@@ -669,80 +701,92 @@ class Internals {
static_cast<unsigned>(kLastJSApiObjectType - kJSObjectType));
}
- V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
+ V8_INLINE static uint8_t GetNodeFlag(Address* obj, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & static_cast<uint8_t>(1U << shift);
}
- V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value,
- int shift) {
+ V8_INLINE static void UpdateNodeFlag(Address* obj, bool value, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = static_cast<uint8_t>(1U << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
- V8_INLINE static uint8_t GetNodeState(internal::Address* obj) {
+ V8_INLINE static uint8_t GetNodeState(Address* obj) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & kNodeStateMask;
}
- V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) {
+ V8_INLINE static void UpdateNodeState(Address* obj, uint8_t value) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
void* data) {
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
- kIsolateEmbedderDataOffset +
- slot * kApiSystemPointerSize;
+ Address addr = reinterpret_cast<Address>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
uint32_t slot) {
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
- kIsolateEmbedderDataOffset +
- slot * kApiSystemPointerSize;
+ Address addr = reinterpret_cast<Address>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
return *reinterpret_cast<void* const*>(addr);
}
V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) {
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
- kIsolateLongTaskStatsCounterOffset;
+ Address addr =
+ reinterpret_cast<Address>(isolate) + kIsolateLongTaskStatsCounterOffset;
++(*reinterpret_cast<size_t*>(addr));
}
- V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
- kIsolateRootsOffset +
- index * kApiSystemPointerSize;
- return reinterpret_cast<internal::Address*>(addr);
+ V8_INLINE static Address* GetRootSlot(v8::Isolate* isolate, int index) {
+ Address addr = reinterpret_cast<Address>(isolate) + kIsolateRootsOffset +
+ index * kApiSystemPointerSize;
+ return reinterpret_cast<Address*>(addr);
+ }
+
+ V8_INLINE static Address GetRoot(v8::Isolate* isolate, int index) {
+#if V8_STATIC_ROOTS_BOOL
+ Address base = *reinterpret_cast<Address*>(
+ reinterpret_cast<uintptr_t>(isolate) + kIsolateCageBaseOffset);
+ switch (index) {
+#define DECOMPRESS_ROOT(name) \
+ case k##name##RootIndex: \
+ return base + StaticReadOnlyRoot::k##name;
+ EXPORTED_STATIC_ROOTS_PTR_LIST(DECOMPRESS_ROOT)
+#undef DECOMPRESS_ROOT
+ default:
+ break;
+ }
+#undef EXPORTED_STATIC_ROOTS_PTR_LIST
+#endif // V8_STATIC_ROOTS_BOOL
+ return *GetRootSlot(isolate, index);
}
#ifdef V8_ENABLE_SANDBOX
- V8_INLINE static internal::Address* GetExternalPointerTableBase(
- v8::Isolate* isolate) {
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
- kIsolateExternalPointerTableOffset +
- kExternalPointerTableBufferOffset;
- return *reinterpret_cast<internal::Address**>(addr);
+ V8_INLINE static Address* GetExternalPointerTableBase(v8::Isolate* isolate) {
+ Address addr = reinterpret_cast<Address>(isolate) +
+ kIsolateExternalPointerTableOffset +
+ kExternalPointerTableBufferOffset;
+ return *reinterpret_cast<Address**>(addr);
}
- V8_INLINE static internal::Address* GetSharedExternalPointerTableBase(
+ V8_INLINE static Address* GetSharedExternalPointerTableBase(
v8::Isolate* isolate) {
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
- kIsolateSharedExternalPointerTableAddressOffset;
- addr = *reinterpret_cast<internal::Address*>(addr);
+ Address addr = reinterpret_cast<Address>(isolate) +
+ kIsolateSharedExternalPointerTableAddressOffset;
+ addr = *reinterpret_cast<Address*>(addr);
addr += kExternalPointerTableBufferOffset;
- return *reinterpret_cast<internal::Address**>(addr);
+ return *reinterpret_cast<Address**>(addr);
}
#endif
template <typename T>
- V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
- int offset) {
- internal::Address addr = heap_object_ptr + offset - kHeapObjectTag;
+ V8_INLINE static T ReadRawField(Address heap_object_ptr, int offset) {
+ Address addr = heap_object_ptr + offset - kHeapObjectTag;
#ifdef V8_COMPRESS_POINTERS
if (sizeof(T) > kApiTaggedSize) {
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
@@ -757,29 +801,28 @@ class Internals {
return *reinterpret_cast<const T*>(addr);
}
- V8_INLINE static internal::Address ReadTaggedPointerField(
- internal::Address heap_object_ptr, int offset) {
+ V8_INLINE static Address ReadTaggedPointerField(Address heap_object_ptr,
+ int offset) {
#ifdef V8_COMPRESS_POINTERS
uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
- internal::Address base =
- GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
- return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
+ Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
+ return base + static_cast<Address>(static_cast<uintptr_t>(value));
#else
- return ReadRawField<internal::Address>(heap_object_ptr, offset);
+ return ReadRawField<Address>(heap_object_ptr, offset);
#endif
}
- V8_INLINE static internal::Address ReadTaggedSignedField(
- internal::Address heap_object_ptr, int offset) {
+ V8_INLINE static Address ReadTaggedSignedField(Address heap_object_ptr,
+ int offset) {
#ifdef V8_COMPRESS_POINTERS
uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
- return static_cast<internal::Address>(static_cast<uintptr_t>(value));
+ return static_cast<Address>(static_cast<uintptr_t>(value));
#else
- return ReadRawField<internal::Address>(heap_object_ptr, offset);
+ return ReadRawField<Address>(heap_object_ptr, offset);
#endif
}
- V8_INLINE static v8::Isolate* GetIsolateForSandbox(internal::Address obj) {
+ V8_INLINE static v8::Isolate* GetIsolateForSandbox(Address obj) {
#ifdef V8_ENABLE_SANDBOX
return reinterpret_cast<v8::Isolate*>(
internal::IsolateFromNeverReadOnlySpaceObject(obj));
@@ -790,40 +833,37 @@ class Internals {
}
template <ExternalPointerTag tag>
- V8_INLINE static internal::Address ReadExternalPointerField(
- v8::Isolate* isolate, internal::Address heap_object_ptr, int offset) {
+ V8_INLINE static Address ReadExternalPointerField(v8::Isolate* isolate,
+ Address heap_object_ptr,
+ int offset) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
- // it can be inlined and doesn't require an additional call.
- internal::Address* table =
- IsSharedExternalPointerType(tag)
- ? GetSharedExternalPointerTableBase(isolate)
- : GetExternalPointerTableBase(isolate);
- internal::ExternalPointerHandle handle =
- ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
- uint32_t index = handle >> kExternalPointerIndexShift;
- std::atomic<internal::Address>* ptr =
- reinterpret_cast<std::atomic<internal::Address>*>(&table[index]);
- internal::Address entry =
- std::atomic_load_explicit(ptr, std::memory_order_relaxed);
- return entry & ~tag;
- }
-#endif
+ static_assert(tag != kExternalPointerNullTag);
+ // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
+ // it can be inlined and doesn't require an additional call.
+ Address* table = IsSharedExternalPointerType(tag)
+ ? GetSharedExternalPointerTableBase(isolate)
+ : GetExternalPointerTableBase(isolate);
+ internal::ExternalPointerHandle handle =
+ ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
+ uint32_t index = handle >> kExternalPointerIndexShift;
+ std::atomic<Address>* ptr =
+ reinterpret_cast<std::atomic<Address>*>(&table[index]);
+ Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed);
+ return entry & ~tag;
+#else
return ReadRawField<Address>(heap_object_ptr, offset);
+#endif // V8_ENABLE_SANDBOX
}
#ifdef V8_COMPRESS_POINTERS
- V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
- internal::Address addr) {
+ V8_INLINE static Address GetPtrComprCageBaseFromOnHeapAddress(Address addr) {
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
}
- V8_INLINE static internal::Address DecompressTaggedAnyField(
- internal::Address heap_object_ptr, uint32_t value) {
- internal::Address base =
- GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
- return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
+ V8_INLINE static Address DecompressTaggedField(Address heap_object_ptr,
+ uint32_t value) {
+ Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
+ return base + static_cast<Address>(static_cast<uintptr_t>(value));
}
#endif // V8_COMPRESS_POINTERS
@@ -861,8 +901,58 @@ class BackingStoreBase {};
// This is needed for histograms sampling garbage collection reasons.
constexpr int kGarbageCollectionReasonMaxValue = 27;
-} // namespace internal
+// Helper functions about values contained in handles.
+class ValueHelper final {
+ public:
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ static constexpr Address kLocalTaggedNullAddress = 1;
+
+ template <typename T>
+ static constexpr T* EmptyValue() {
+ return reinterpret_cast<T*>(kLocalTaggedNullAddress);
+ }
+
+ template <typename T>
+ V8_INLINE static Address ValueAsAddress(const T* value) {
+ return reinterpret_cast<Address>(value);
+ }
+
+ template <typename T, typename S>
+ V8_INLINE static T* SlotAsValue(S* slot) {
+ return *reinterpret_cast<T**>(slot);
+ }
+
+ template <typename T>
+ V8_INLINE static T* ValueAsSlot(T* const& value) {
+ return reinterpret_cast<T*>(const_cast<T**>(&value));
+ }
+
+#else // !V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+
+ template <typename T>
+ static constexpr T* EmptyValue() {
+ return nullptr;
+ }
+
+ template <typename T>
+ V8_INLINE static Address ValueAsAddress(const T* value) {
+ return *reinterpret_cast<const Address*>(value);
+ }
+
+ template <typename T, typename S>
+ V8_INLINE static T* SlotAsValue(S* slot) {
+ return reinterpret_cast<T*>(slot);
+ }
+
+ template <typename T>
+ V8_INLINE static T* ValueAsSlot(T* const& value) {
+ return value;
+ }
+#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+};
+
+} // namespace internal
} // namespace v8
#endif // INCLUDE_V8_INTERNAL_H_
diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h
index e9f531973b..0284e6e769 100644
--- a/deps/v8/include/v8-isolate.h
+++ b/deps/v8/include/v8-isolate.h
@@ -233,7 +233,7 @@ class V8_EXPORT Isolate {
* Explicitly specify a startup snapshot blob. The embedder owns the blob.
* The embedder *must* ensure that the snapshot is from a trusted source.
*/
- StartupData* snapshot_blob = nullptr;
+ const StartupData* snapshot_blob = nullptr;
/**
* Enables the host application to provide a mechanism for recording
@@ -333,12 +333,9 @@ class V8_EXPORT Isolate {
const DisallowJavascriptExecutionScope&) = delete;
private:
- OnFailure on_failure_;
- v8::Isolate* v8_isolate_;
-
- bool was_execution_allowed_assert_;
- bool was_execution_allowed_throws_;
- bool was_execution_allowed_dump_;
+ v8::Isolate* const v8_isolate_;
+ const OnFailure on_failure_;
+ bool was_execution_allowed_;
};
/**
@@ -356,7 +353,7 @@ class V8_EXPORT Isolate {
const AllowJavascriptExecutionScope&) = delete;
private:
- Isolate* v8_isolate_;
+ Isolate* const v8_isolate_;
bool was_execution_allowed_assert_;
bool was_execution_allowed_throws_;
bool was_execution_allowed_dump_;
@@ -537,6 +534,8 @@ class V8_EXPORT Isolate {
kTurboFanOsrCompileStarted = 115,
kAsyncStackTaggingCreateTaskCall = 116,
kDurationFormat = 117,
+ kInvalidatedNumberStringPrototypeNoReplaceProtector = 118,
+ kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode = 119, // Unused.
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@@ -924,27 +923,10 @@ class V8_EXPORT Isolate {
void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
void RemoveGCPrologueCallback(GCCallback callback);
- START_ALLOW_USE_DEPRECATED()
- /**
- * Sets the embedder heap tracer for the isolate.
- * SetEmbedderHeapTracer cannot be used simultaneously with AttachCppHeap.
- */
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- /*
- * Gets the currently active heap tracer for the isolate that was set with
- * SetEmbedderHeapTracer.
- */
- EmbedderHeapTracer* GetEmbedderHeapTracer();
- END_ALLOW_USE_DEPRECATED()
-
/**
* Sets an embedder roots handle that V8 should consider when performing
- * non-unified heap garbage collections.
- *
- * Using only EmbedderHeapTracer automatically sets up a default handler.
- * The intended use case is for setting a custom handler after invoking
- * `AttachCppHeap()`.
+ * non-unified heap garbage collections. The intended use case is for setting
+ * a custom handler after invoking `AttachCppHeap()`.
*
* V8 does not take ownership of the handler.
*/
@@ -955,8 +937,6 @@ class V8_EXPORT Isolate {
* embedder maintains ownership of the CppHeap. At most one C++ heap can be
* attached to V8.
*
- * AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer.
- *
* Multi-threaded use requires the use of v8::Locker/v8::Unlocker, see
* CppHeap.
*/
@@ -1143,9 +1123,8 @@ class V8_EXPORT Isolate {
*
* This should only be used for testing purposes and not to enforce a garbage
* collection schedule. It has strong negative impact on the garbage
- * collection performance. Use IdleNotificationDeadline() or
- * LowMemoryNotification() instead to influence the garbage collection
- * schedule.
+ * collection performance. Use MemoryPressureNotification() instead to
+ * influence the garbage collection schedule.
*/
void RequestGarbageCollectionForTesting(GarbageCollectionType type);
@@ -1156,9 +1135,8 @@ class V8_EXPORT Isolate {
*
* This should only be used for testing purposes and not to enforce a garbage
* collection schedule. It has strong negative impact on the garbage
- * collection performance. Use IdleNotificationDeadline() or
- * LowMemoryNotification() instead to influence the garbage collection
- * schedule.
+ * collection performance. Use MemoryPressureNotification() instead to
+ * influence the garbage collection schedule.
*/
void RequestGarbageCollectionForTesting(GarbageCollectionType type,
StackState stack_state);
@@ -1310,6 +1288,8 @@ class V8_EXPORT Isolate {
* that function. There is no guarantee that the actual work will be done
* within the time limit.
*/
+ V8_DEPRECATE_SOON(
+ "Use MemoryPressureNotification() to influence the GC schedule.")
bool IdleNotificationDeadline(double deadline_in_seconds);
/**
@@ -1346,11 +1326,13 @@ class V8_EXPORT Isolate {
* V8 uses this notification to guide heuristics which may result in a
* smaller memory footprint at the cost of reduced runtime performance.
*/
+ V8_DEPRECATED("Use IsolateInBackgroundNotification() instead")
void EnableMemorySavingsMode();
/**
* Optional notification which will disable the memory savings mode.
*/
+ V8_DEPRECATED("Use IsolateInBackgroundNotification() instead")
void DisableMemorySavingsMode();
/**
@@ -1530,6 +1512,13 @@ class V8_EXPORT Isolate {
V8_DEPRECATED("Wasm exceptions are always enabled")
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
+ /**
+ * Register callback to control whehter Wasm GC is enabled.
+ * The callback overwrites the value of the flag.
+ * If the callback returns true, it will also enable Wasm stringrefs.
+ */
+ void SetWasmGCEnabledCallback(WasmGCEnabledCallback callback);
+
void SetSharedArrayBufferConstructorEnabledCallback(
SharedArrayBufferConstructorEnabledCallback callback);
@@ -1684,7 +1673,8 @@ uint32_t Isolate::GetNumberOfDataSlots() {
template <class T>
MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
- T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ T* data =
+ internal::ValueHelper::SlotAsValue<T>(GetDataFromSnapshotOnce(index));
if (data) internal::PerformCastCheck(data);
return Local<T>(data);
}
diff --git a/deps/v8/include/v8-local-handle.h b/deps/v8/include/v8-local-handle.h
index cbf87f949d..0dbbfdbc9f 100644
--- a/deps/v8/include/v8-local-handle.h
+++ b/deps/v8/include/v8-local-handle.h
@@ -50,9 +50,14 @@ class TracedReference;
class TracedReferenceBase;
class Utils;
+namespace debug {
+class ConsoleCallArguments;
+}
+
namespace internal {
template <typename T>
class CustomArguments;
+class SamplingHeapProfiler;
} // namespace internal
namespace api_internal {
@@ -92,6 +97,9 @@ class V8_EXPORT V8_NODISCARD HandleScope {
HandleScope(const HandleScope&) = delete;
void operator=(const HandleScope&) = delete;
+ static internal::Address* CreateHandleForCurrentIsolate(
+ internal::Address value);
+
protected:
V8_INLINE HandleScope() = default;
@@ -122,6 +130,33 @@ class V8_EXPORT V8_NODISCARD HandleScope {
friend class Context;
};
+namespace internal {
+
+/**
+ * Helper functions about handles.
+ */
+class HandleHelper final {
+ public:
+ /**
+ * Checks whether two handles are equal.
+ * They are equal iff they are both empty or they are both non-empty and the
+ * objects to which they refer are physically equal.
+ *
+ * If both handles refer to JS objects, this is the same as strict equality.
+ * For primitives, such as numbers or strings, a `false` return value does not
+ * indicate that the values aren't equal in the JavaScript sense.
+ * Use `Value::StrictEquals()` to check primitives for equality.
+ */
+ template <typename T1, typename T2>
+ V8_INLINE static bool EqualHandles(const T1& lhs, const T2& rhs) {
+ if (lhs.IsEmpty()) return rhs.IsEmpty();
+ if (rhs.IsEmpty()) return false;
+ return lhs.address() == rhs.address();
+ }
+};
+
+} // namespace internal
+
/**
* An object reference managed by the v8 garbage collector.
*
@@ -154,7 +189,8 @@ class V8_EXPORT V8_NODISCARD HandleScope {
template <class T>
class Local {
public:
- V8_INLINE Local() : val_(nullptr) {}
+ V8_INLINE Local() : val_(internal::ValueHelper::EmptyValue<T>()) {}
+
template <class S>
V8_INLINE Local(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
/**
@@ -168,55 +204,40 @@ class Local {
/**
* Returns true if the handle is empty.
*/
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+ V8_INLINE bool IsEmpty() const {
+ return val_ == internal::ValueHelper::EmptyValue<T>();
+ }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
- V8_INLINE void Clear() { val_ = nullptr; }
+ V8_INLINE void Clear() { val_ = internal::ValueHelper::EmptyValue<T>(); }
V8_INLINE T* operator->() const { return val_; }
V8_INLINE T* operator*() const { return val_; }
/**
- * Checks whether two handles are the same.
- * Returns true if both are empty, or if the objects to which they refer
- * are identical.
+ * Checks whether two handles are equal or different.
+ * They are equal iff they are both empty or they are both non-empty and the
+ * objects to which they refer are physically equal.
*
- * If both handles refer to JS objects, this is the same as strict equality.
- * For primitives, such as numbers or strings, a `false` return value does not
- * indicate that the values aren't equal in the JavaScript sense.
- * Use `Value::StrictEquals()` to check primitives for equality.
+ * If both handles refer to JS objects, this is the same as strict
+ * non-equality. For primitives, such as numbers or strings, a `true` return
+ * value does not indicate that the values aren't equal in the JavaScript
+ * sense. Use `Value::StrictEquals()` to check primitives for equality.
*/
+
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ return internal::HandleHelper::EqualHandles(*this, that);
}
- /**
- * Checks whether two handles are different.
- * Returns true if only one of the handles is empty, or if
- * the objects to which they refer are different.
- *
- * If both handles refer to JS objects, this is the same as strict
- * non-equality. For primitives, such as numbers or strings, a `true` return
- * value does not indicate that the values aren't equal in the JavaScript
- * sense. Use `Value::StrictEquals()` to check primitives for equality.
- */
template <class S>
V8_INLINE bool operator!=(const Local<S>& that) const {
return !operator==(that);
@@ -263,12 +284,12 @@ class Local {
V8_INLINE static Local<T> New(Isolate* isolate,
const PersistentBase<T>& that) {
- return New(isolate, that.val_);
+ return New(isolate, internal::ValueHelper::SlotAsValue<T>(that.val_));
}
V8_INLINE static Local<T> New(Isolate* isolate,
const BasicTracedReference<T>& that) {
- return New(isolate, *that);
+ return New(isolate, internal::ValueHelper::SlotAsValue<T>(*that));
}
private:
@@ -277,12 +298,6 @@ class Local {
template <class F>
friend class Eternal;
template <class F>
- friend class PersistentBase;
- template <class F, class M>
- friend class Persistent;
- template <class F>
- friend class Local;
- template <class F>
friend class MaybeLocal;
template <class F>
friend class FunctionCallbackInfo;
@@ -309,19 +324,31 @@ class Local {
friend class ReturnValue;
template <class F>
friend class Traced;
- template <class F>
- friend class BasicTracedReference;
- template <class F>
- friend class TracedReference;
+ friend class internal::SamplingHeapProfiler;
+ friend class internal::HandleHelper;
+ friend class debug::ConsoleCallArguments;
explicit V8_INLINE Local(T* that) : val_(that) {}
+
+ V8_INLINE internal::Address address() const {
+ return internal::ValueHelper::ValueAsAddress(val_);
+ }
+
+ V8_INLINE static Local<T> FromSlot(internal::Address* slot) {
+ return Local<T>(internal::ValueHelper::SlotAsValue<T>(slot));
+ }
+
V8_INLINE static Local<T> New(Isolate* isolate, T* that) {
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ return Local<T>(that);
+#else
if (that == nullptr) return Local<T>();
- T* that_ptr = that;
- internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
reinterpret_cast<internal::Isolate*>(isolate), *p)));
+#endif
}
+
T* val_;
};
@@ -344,13 +371,15 @@ using Handle = Local<T>;
template <class T>
class MaybeLocal {
public:
- V8_INLINE MaybeLocal() : val_(nullptr) {}
+ V8_INLINE MaybeLocal() : val_(internal::ValueHelper::EmptyValue<T>()) {}
template <class S>
V8_INLINE MaybeLocal(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
static_assert(std::is_base_of<T, S>::value, "type check");
}
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+ V8_INLINE bool IsEmpty() const {
+ return val_ == internal::ValueHelper::EmptyValue<T>();
+ }
/**
* Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
@@ -358,7 +387,7 @@ class MaybeLocal {
*/
template <class S>
V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
- out->val_ = IsEmpty() ? nullptr : this->val_;
+ out->val_ = IsEmpty() ? internal::ValueHelper::EmptyValue<T>() : this->val_;
return !IsEmpty();
}
@@ -367,7 +396,7 @@ class MaybeLocal {
* V8 will crash the process.
*/
V8_INLINE Local<T> ToLocalChecked() {
- if (V8_UNLIKELY(val_ == nullptr)) api_internal::ToLocalEmpty();
+ if (V8_UNLIKELY(IsEmpty())) api_internal::ToLocalEmpty();
return Local<T>(val_);
}
@@ -399,9 +428,13 @@ class V8_EXPORT V8_NODISCARD EscapableHandleScope : public HandleScope {
*/
template <class T>
V8_INLINE Local<T> Escape(Local<T> value) {
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ return value;
+#else
internal::Address* slot =
Escape(reinterpret_cast<internal::Address*>(*value));
return Local<T>(reinterpret_cast<T*>(slot));
+#endif
}
template <class T>
diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h
index 887012ac8c..5c6c1c4705 100644
--- a/deps/v8/include/v8-metrics.h
+++ b/deps/v8/include/v8-metrics.h
@@ -12,6 +12,7 @@
#include "v8-internal.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -96,16 +97,42 @@ struct GarbageCollectionYoungCycle {
};
struct WasmModuleDecoded {
+ WasmModuleDecoded() = default;
+ WasmModuleDecoded(bool async, bool streamed, bool success,
+ size_t module_size_in_bytes, size_t function_count,
+ int64_t wall_clock_duration_in_us)
+ : async(async),
+ streamed(streamed),
+ success(success),
+ module_size_in_bytes(module_size_in_bytes),
+ function_count(function_count),
+ wall_clock_duration_in_us(wall_clock_duration_in_us) {}
+
bool async = false;
bool streamed = false;
bool success = false;
size_t module_size_in_bytes = 0;
size_t function_count = 0;
int64_t wall_clock_duration_in_us = -1;
- int64_t cpu_duration_in_us = -1;
};
struct WasmModuleCompiled {
+ WasmModuleCompiled() = default;
+
+ WasmModuleCompiled(bool async, bool streamed, bool cached, bool deserialized,
+ bool lazy, bool success, size_t code_size_in_bytes,
+ size_t liftoff_bailout_count,
+ int64_t wall_clock_duration_in_us)
+ : async(async),
+ streamed(streamed),
+ cached(cached),
+ deserialized(deserialized),
+ lazy(lazy),
+ success(success),
+ code_size_in_bytes(code_size_in_bytes),
+ liftoff_bailout_count(liftoff_bailout_count),
+ wall_clock_duration_in_us(wall_clock_duration_in_us) {}
+
bool async = false;
bool streamed = false;
bool cached = false;
@@ -115,7 +142,6 @@ struct WasmModuleCompiled {
size_t code_size_in_bytes = 0;
size_t liftoff_bailout_count = 0;
int64_t wall_clock_duration_in_us = -1;
- int64_t cpu_duration_in_us = -1;
};
struct WasmModuleInstantiated {
diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h
index d7332ba0c8..d805dbe9e7 100644
--- a/deps/v8/include/v8-object.h
+++ b/deps/v8/include/v8-object.h
@@ -608,6 +608,19 @@ class V8_EXPORT Object : public Value {
}
/**
+ * Gets the context in which the object was created (see GetCreationContext())
+ * and if it's available reads respective embedder field value.
+ * If the context can't be obtained nullptr is returned.
+ * Basically it's a shortcut for
+ * obj->GetCreationContext().GetAlignedPointerFromEmbedderData(index)
+ * which doesn't create a handle for Context object on the way and doesn't
+ * try to expand the embedder data attached to the context.
+ * In case the Local<Context> is already available because of other reasons,
+ * it's fine to keep using Context::GetAlignedPointerFromEmbedderData().
+ */
+ void* GetAlignedPointerFromEmbedderDataInCreationContext(int index);
+
+ /**
* Checks whether a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
* When an Object is callable this method returns true.
@@ -707,7 +720,7 @@ Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
int instance_type = I::GetInstanceType(obj);
@@ -717,12 +730,17 @@ Local<Value> Object::GetInternalField(int index) {
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
- value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
+ value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
#endif
+
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ return Local<Value>(reinterpret_cast<Value*>(value));
+#else
internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj);
A* result = HandleScope::CreateHandle(isolate, value);
return Local<Value>(reinterpret_cast<Value*>(result));
+#endif
}
#endif
return SlowGetInternalField(index);
@@ -732,7 +750,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
#if !defined(V8_ENABLE_CHECKS)
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
diff --git a/deps/v8/include/v8-persistent-handle.h b/deps/v8/include/v8-persistent-handle.h
index dbda4edb9b..08c1e22926 100644
--- a/deps/v8/include/v8-persistent-handle.h
+++ b/deps/v8/include/v8-persistent-handle.h
@@ -55,7 +55,7 @@ class Eternal {
V8_INLINE Local<T> Get(Isolate* isolate) const {
// The eternal handle will never go away, so as with the roots, we don't
// even need to open a handle.
- return Local<T>(val_);
+ return Local<T>(internal::ValueHelper::SlotAsValue<T>(val_));
}
V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
@@ -68,6 +68,10 @@ class Eternal {
}
private:
+ V8_INLINE internal::Address address() const {
+ return *reinterpret_cast<internal::Address*>(val_);
+ }
+
T* val_;
};
@@ -122,20 +126,12 @@ class PersistentBase {
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
@@ -221,8 +217,15 @@ class PersistentBase {
template <class F1, class F2>
friend class PersistentValueVector;
friend class Object;
+ friend class internal::HandleHelper;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
+ V8_INLINE T* operator*() const { return this->val_; }
+ V8_INLINE internal::Address address() const {
+ return *reinterpret_cast<internal::Address*>(val_);
+ }
+
+ V8_INLINE static T* New(Isolate* isolate, Local<T> that);
V8_INLINE static T* New(Isolate* isolate, T* that);
T* val_;
@@ -252,7 +255,7 @@ class NonCopyablePersistentTraits {
* This will clone the contents of storage cell, but not any of the flags, etc.
*/
template <class T>
-struct CopyablePersistentTraits {
+struct V8_DEPRECATED("Use v8::Global instead") CopyablePersistentTraits {
using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
static const bool kResetInDestructor = true;
template <class S, class M>
@@ -282,11 +285,13 @@ class Persistent : public PersistentBase<T> {
* When the Local is non-empty, a new storage cell is created
* pointing to the same object, and no flags are set.
*/
+
template <class S>
V8_INLINE Persistent(Isolate* isolate, Local<S> that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that)) {
static_assert(std::is_base_of<T, S>::value, "type check");
}
+
/**
* Construct a Persistent from a Persistent.
* When the Persistent is non-empty, a new storage cell is created
@@ -356,7 +361,6 @@ class Persistent : public PersistentBase<T> {
friend class ReturnValue;
explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
- V8_INLINE T* operator*() const { return this->val_; }
template <class S, class M2>
V8_INLINE void Copy(const Persistent<S, M2>& that);
};
@@ -381,7 +385,7 @@ class Global : public PersistentBase<T> {
*/
template <class S>
V8_INLINE Global(Isolate* isolate, Local<S> that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that)) {
static_assert(std::is_base_of<T, S>::value, "type check");
}
@@ -425,7 +429,6 @@ class Global : public PersistentBase<T> {
private:
template <class F>
friend class ReturnValue;
- V8_INLINE T* operator*() const { return this->val_; }
};
// UniquePersistent is an alias for Global for historical reason.
@@ -443,6 +446,12 @@ class V8_EXPORT PersistentHandleVisitor {
};
template <class T>
+T* PersistentBase<T>::New(Isolate* isolate, Local<T> that) {
+ return PersistentBase<T>::New(isolate,
+ internal::ValueHelper::ValueAsSlot(*that));
+}
+
+template <class T>
T* PersistentBase<T>::New(Isolate* isolate, T* that) {
if (that == nullptr) return nullptr;
internal::Address* p = reinterpret_cast<internal::Address*>(that);
@@ -486,7 +495,7 @@ void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
static_assert(std::is_base_of<T, S>::value, "type check");
Reset();
if (other.IsEmpty()) return;
- this->val_ = New(isolate, other.val_);
+ this->val_ = New(isolate, internal::ValueHelper::ValueAsSlot(*other));
}
/**
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 32a82f881e..4f476f5c3c 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -5,9 +5,11 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
+#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h> // For abort.
+
#include <memory>
#include <string>
@@ -266,6 +268,38 @@ class JobTask {
};
/**
+ * A "blocking call" refers to any call that causes the calling thread to wait
+ * off-CPU. It includes but is not limited to calls that wait on synchronous
+ * file I/O operations: read or write a file from disk, interact with a pipe or
+ * a socket, rename or delete a file, enumerate files in a directory, etc.
+ * Acquiring a low contention lock is not considered a blocking call.
+ */
+
+/**
+ * BlockingType indicates the likelihood that a blocking call will actually
+ * block.
+ */
+enum class BlockingType {
+ // The call might block (e.g. file I/O that might hit in memory cache).
+ kMayBlock,
+ // The call will definitely block (e.g. cache already checked and now pinging
+ // server synchronously).
+ kWillBlock
+};
+
+/**
+ * This class is instantiated with CreateBlockingScope() in every scope where a
+ * blocking call is made and serves as a precise annotation of the scope that
+ * may/will block. May be implemented by an embedder to adjust the thread count.
+ * CPU usage should be minimal within that scope. ScopedBlockingCalls can be
+ * nested.
+ */
+class ScopedBlockingCall {
+ public:
+ virtual ~ScopedBlockingCall() = default;
+};
+
+/**
* The interface represents complex arguments to trace events.
*/
class ConvertableToTraceFormat {
@@ -285,6 +319,8 @@ class ConvertableToTraceFormat {
* V8 Tracing controller.
*
* Can be implemented by an embedder to record trace events from V8.
+ *
+ * Will become obsolete in Perfetto SDK build (v8_use_perfetto = true).
*/
class TracingController {
public:
@@ -348,10 +384,16 @@ class TracingController {
virtual void OnTraceDisabled() = 0;
};
- /** Adds tracing state change observer. */
+ /**
+ * Adds tracing state change observer.
+ * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
+ */
virtual void AddTraceStateObserver(TraceStateObserver*) {}
- /** Removes tracing state change observer. */
+ /**
+ * Removes tracing state change observer.
+ * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
+ */
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
};
@@ -534,7 +576,7 @@ static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
// to avoid pulling in large OS header files into this header file. Instead,
// the users of these routines are expected to include the respecitve OS
// headers in addition to this one.
-#if V8_OS_MACOS
+#if V8_OS_DARWIN
// Convert between a shared memory handle and a mach_port_t referencing a memory
// entry object.
inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
@@ -945,11 +987,12 @@ class Platform {
virtual void OnCriticalMemoryPressure() {}
/**
- * Gets the number of worker threads used by
- * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
- * of tasks a work package should be split into. A return value of 0 means
- * that there are no worker threads available. Note that a value of 0 won't
- * prohibit V8 from posting tasks using |CallOnWorkerThread|.
+ * Gets the max number of worker threads that may be used to execute
+ * concurrent work scheduled for any single TaskPriority by
+ * Call(BlockingTask)OnWorkerThread() or PostJob(). This can be used to
+ * estimate the number of tasks a work package should be split into. A return
+ * value of 0 means that there are no worker threads available. Note that a
+ * value of 0 won't prohibit V8 from posting tasks using |CallOnWorkerThread|.
*/
virtual int NumberOfWorkerThreads() = 0;
@@ -1065,6 +1108,14 @@ class Platform {
TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
/**
+ * Instantiates a ScopedBlockingCall to annotate a scope that may/will block.
+ */
+ virtual std::unique_ptr<ScopedBlockingCall> CreateBlockingScope(
+ BlockingType blocking_type) {
+ return nullptr;
+ }
+
+ /**
* Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least
* millisecond-precision values. For this reason,
@@ -1074,11 +1125,28 @@ class Platform {
virtual double MonotonicallyIncreasingTime() = 0;
/**
- * Current wall-clock time in milliseconds since epoch.
- * This function is expected to return at least millisecond-precision values.
+ * Current wall-clock time in milliseconds since epoch. Use
+ * CurrentClockTimeMillisHighResolution() when higher precision is
+ * required.
+ */
+ virtual int64_t CurrentClockTimeMilliseconds() {
+ return floor(CurrentClockTimeMillis());
+ }
+
+ /**
+ * This function is deprecated and will be deleted. Use either
+ * CurrentClockTimeMilliseconds() or
+ * CurrentClockTimeMillisecondsHighResolution().
*/
virtual double CurrentClockTimeMillis() = 0;
+ /**
+ * Same as CurrentClockTimeMilliseconds(), but with more precision.
+ */
+ virtual double CurrentClockTimeMillisecondsHighResolution() {
+ return CurrentClockTimeMillis();
+ }
+
typedef void (*StackTracePrinter)();
/**
diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h
index 4fef8da7f8..4148a7053a 100644
--- a/deps/v8/include/v8-primitive.h
+++ b/deps/v8/include/v8-primitive.h
@@ -493,9 +493,16 @@ class V8_EXPORT String : public Name {
/**
* Returns true if this string can be made external.
*/
+ V8_DEPRECATE_SOON("Use the version that takes an encoding as argument.")
bool CanMakeExternal() const;
/**
+ * Returns true if this string can be made external, given the encoding for
+ * the external string resource.
+ */
+ bool CanMakeExternal(Encoding encoding) const;
+
+ /**
* Returns true if the strings values are equal. Same as JS ==/===.
*/
bool StringEquals(Local<String> str) const;
@@ -776,14 +783,14 @@ Local<String> String::Empty(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
- return Local<String>(reinterpret_cast<String*>(slot));
+ S* slot = I::GetRootSlot(isolate, I::kEmptyStringRootIndex);
+ return Local<String>::FromSlot(slot);
}
String::ExternalStringResource* String::GetExternalStringResource() const {
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
@@ -804,7 +811,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
String::Encoding* encoding_out) const {
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
int type = I::GetInstanceType(obj) & I::kStringRepresentationAndEncodingMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
ExternalStringResourceBase* resource;
@@ -829,32 +836,32 @@ V8_INLINE Local<Primitive> Undefined(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
- return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
+ S* slot = I::GetRootSlot(isolate, I::kUndefinedValueRootIndex);
+ return Local<Primitive>::FromSlot(slot);
}
V8_INLINE Local<Primitive> Null(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
- return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
+ S* slot = I::GetRootSlot(isolate, I::kNullValueRootIndex);
+ return Local<Primitive>::FromSlot(slot);
}
V8_INLINE Local<Boolean> True(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
- return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
+ S* slot = I::GetRootSlot(isolate, I::kTrueValueRootIndex);
+ return Local<Boolean>::FromSlot(slot);
}
V8_INLINE Local<Boolean> False(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
- return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
+ S* slot = I::GetRootSlot(isolate, I::kFalseValueRootIndex);
+ return Local<Boolean>::FromSlot(slot);
}
Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 6b73fc60bf..d394151221 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -596,6 +596,7 @@ class V8_EXPORT HeapGraphNode {
kBigInt = 13, // BigInt.
kObjectShape = 14, // Internal data used for tracking the shapes (or
// "hidden classes") of JS objects.
+ kWasmObject = 15, // A WasmGC struct or array.
};
/** Returns node type (see HeapGraphNode::Type). */
diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h
index e2ba845268..4a8ccab7e2 100644
--- a/deps/v8/include/v8-script.h
+++ b/deps/v8/include/v8-script.h
@@ -11,6 +11,7 @@
#include <memory>
#include <vector>
+#include "v8-callbacks.h" // NOLINT(build/include_directory)
#include "v8-data.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-maybe.h" // NOLINT(build/include_directory)
@@ -347,6 +348,12 @@ class V8_EXPORT Script {
* ScriptOrigin. This can be either a v8::String or v8::Undefined.
*/
Local<Value> GetResourceName();
+
+ /**
+ * If the script was compiled, returns the positions of lazy functions which
+ * were eventually compiled and executed.
+ */
+ std::vector<int> GetProducedCompileHints() const;
};
enum class ScriptType { kClassic, kModule };
@@ -407,6 +414,8 @@ class V8_EXPORT ScriptCompiler {
V8_INLINE explicit Source(
Local<String> source_string, CachedData* cached_data = nullptr,
ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
+ CompileHintCallback callback, void* callback_data);
V8_INLINE ~Source() = default;
// Ownership of the CachedData or its buffers is *not* transferred to the
@@ -434,6 +443,10 @@ class V8_EXPORT ScriptCompiler {
// set when calling a compile method.
std::unique_ptr<CachedData> cached_data;
std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
+
+ // For requesting compile hints from the embedder.
+ CompileHintCallback compile_hint_callback = nullptr;
+ void* compile_hint_callback_data = nullptr;
};
/**
@@ -562,7 +575,9 @@ class V8_EXPORT ScriptCompiler {
enum CompileOptions {
kNoCompileOptions = 0,
kConsumeCodeCache,
- kEagerCompile
+ kEagerCompile,
+ kProduceCompileHints,
+ kConsumeCompileHints
};
/**
@@ -775,6 +790,19 @@ ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
cached_data(data),
consume_cache_task(consume_cache_task) {}
+ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
+ CompileHintCallback callback,
+ void* callback_data)
+ : source_string(string),
+ resource_name(origin.ResourceName()),
+ resource_line_offset(origin.LineOffset()),
+ resource_column_offset(origin.ColumnOffset()),
+ resource_options(origin.Options()),
+ source_map_url(origin.SourceMapUrl()),
+ host_defined_options(origin.GetHostDefinedOptions()),
+ compile_hint_callback(callback),
+ compile_hint_callback_data(callback_data) {}
+
const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
const {
return cached_data.get();
diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h
index 2400357cf6..d47bcbaec9 100644
--- a/deps/v8/include/v8-snapshot.h
+++ b/deps/v8/include/v8-snapshot.h
@@ -91,7 +91,7 @@ class V8_EXPORT SnapshotCreator {
*/
SnapshotCreator(Isolate* isolate,
const intptr_t* external_references = nullptr,
- StartupData* existing_blob = nullptr);
+ const StartupData* existing_blob = nullptr);
/**
* Create and enter an isolate, and set it up for serialization.
@@ -102,7 +102,7 @@ class V8_EXPORT SnapshotCreator {
* that must be equivalent to CreateParams::external_references.
*/
SnapshotCreator(const intptr_t* external_references = nullptr,
- StartupData* existing_blob = nullptr);
+ const StartupData* existing_blob = nullptr);
/**
* Destroy the snapshot creator, and exit and dispose of the Isolate
@@ -179,16 +179,12 @@ class V8_EXPORT SnapshotCreator {
template <class T>
size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
- T* object_ptr = *object;
- internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
- return AddData(context, *p);
+ return AddData(context, internal::ValueHelper::ValueAsAddress(*object));
}
template <class T>
size_t SnapshotCreator::AddData(Local<T> object) {
- T* object_ptr = *object;
- internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
- return AddData(*p);
+ return AddData(internal::ValueHelper::ValueAsAddress(*object));
}
} // namespace v8
diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h
index 669012a981..11296cd488 100644
--- a/deps/v8/include/v8-template.h
+++ b/deps/v8/include/v8-template.h
@@ -30,7 +30,9 @@ class Signature;
F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
F(ErrorPrototype, initial_error_prototype) \
F(IteratorPrototype, initial_iterator_prototype) \
- F(ObjProto_valueOf, object_value_of_function)
+ F(MapIteratorPrototype, initial_map_iterator_prototype) \
+ F(ObjProto_valueOf, object_value_of_function) \
+ F(SetIteratorPrototype, initial_set_iterator_prototype)
enum Intrinsic {
#define V8_DECL_INTRINSIC(name, iname) k##name,
diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h
index e0fd57c49d..d92c3fdc86 100644
--- a/deps/v8/include/v8-traced-handle.h
+++ b/deps/v8/include/v8-traced-handle.h
@@ -62,7 +62,8 @@ class TracedReferenceBase {
*/
V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const {
if (IsEmpty()) return Local<Value>();
- return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
+ return Local<Value>::New(isolate,
+ internal::ValueHelper::SlotAsValue<Value>(val_));
}
/**
@@ -103,10 +104,13 @@ class TracedReferenceBase {
V8_EXPORT void CheckValue() const;
+ V8_INLINE internal::Address address() const { return *val_; }
+
// val_ points to a GlobalHandles node.
internal::Address* val_ = nullptr;
friend class internal::BasicTracedReferenceExtractor;
+ friend class internal::HandleHelper;
template <typename F>
friend class Local;
template <typename U>
@@ -117,11 +121,11 @@ class TracedReferenceBase {
/**
* A traced handle with copy and move semantics. The handle is to be used
- * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
- * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
+ * together as part of GarbageCollected objects (see v8-cppgc.h) or from stack
+ * and specifies edges from C++ objects to JavaScript.
*
* The exact semantics are:
- * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
+ * - Tracing garbage collections using CppHeap.
* - Non-tracing garbage collections refer to
* |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
* be treated as root or not.
@@ -135,7 +139,12 @@ class BasicTracedReference : public TracedReferenceBase {
/**
* Construct a Local<T> from this handle.
*/
- Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
+ Local<T> Get(Isolate* isolate) const {
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ if (val_ == nullptr) return Local<T>();
+#endif
+ return Local<T>::New(isolate, *this);
+ }
template <class S>
V8_INLINE BasicTracedReference<S>& As() const {
@@ -166,7 +175,6 @@ class BasicTracedReference : public TracedReferenceBase {
Isolate* isolate, T* that, void* slot,
internal::GlobalHandleStoreMode store_mode);
- friend class EmbedderHeapTracer;
template <typename F>
friend class Local;
friend class Object;
@@ -181,13 +189,7 @@ class BasicTracedReference : public TracedReferenceBase {
/**
* A traced handle without destructor that clears the handle. The embedder needs
* to ensure that the handle is not accessed once the V8 object has been
- * reclaimed. This can happen when the handle is not passed through the
- * EmbedderHeapTracer. For more details see BasicTracedReference.
- *
- * The reference assumes the embedder has precise knowledge about references at
- * all times. In case V8 needs to separately handle on-stack references, the
- * embedder is required to set the stack start through
- * |EmbedderHeapTracer::SetStackStart|.
+ * reclaimed. For more details see BasicTracedReference.
*/
template <typename T>
class TracedReference : public BasicTracedReference<T> {
@@ -207,7 +209,7 @@ class TracedReference : public BasicTracedReference<T> {
*/
template <class S>
TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ = this->New(isolate, that.val_, &this->val_,
+ this->val_ = this->New(isolate, *that, &this->val_,
internal::GlobalHandleStoreMode::kInitializingStore);
static_assert(std::is_base_of<T, S>::value, "type check");
}
@@ -291,7 +293,7 @@ template <class T>
internal::Address* BasicTracedReference<T>::New(
Isolate* isolate, T* that, void* slot,
internal::GlobalHandleStoreMode store_mode) {
- if (that == nullptr) return nullptr;
+ if (that == internal::ValueHelper::EmptyValue<T>()) return nullptr;
internal::Address* p = reinterpret_cast<internal::Address*>(that);
return internal::GlobalizeTracedReference(
reinterpret_cast<internal::Isolate*>(isolate), p,
@@ -306,21 +308,13 @@ void TracedReferenceBase::Reset() {
V8_INLINE bool operator==(const TracedReferenceBase& lhs,
const TracedReferenceBase& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ return internal::HandleHelper::EqualHandles(lhs, rhs);
}
template <typename U>
V8_INLINE bool operator==(const TracedReferenceBase& lhs,
const v8::Local<U>& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
+ return internal::HandleHelper::EqualHandles(lhs, rhs);
}
template <typename U>
@@ -353,7 +347,7 @@ void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
this->Reset();
if (other.IsEmpty()) return;
this->SetSlotThreadSafe(
- this->New(isolate, other.val_, &this->val_,
+ this->New(isolate, *other, &this->val_,
internal::GlobalHandleStoreMode::kAssigningStore));
}
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 159027d317..3661169763 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -181,7 +181,11 @@ class PersistentValueMapBase {
* Get value stored in map.
*/
Local<V> Get(const K& key) {
- return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key)));
+ V* p = FromVal(Traits::Get(&impl_, key));
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ if (p == nullptr) return Local<V>();
+#endif
+ return Local<V>::New(isolate_, p);
}
/**
@@ -236,7 +240,8 @@ class PersistentValueMapBase {
: value_(other.value_) { }
Local<V> NewLocal(Isolate* isolate) const {
- return Local<V>::New(isolate, FromVal(value_));
+ return Local<V>::New(
+ isolate, internal::ValueHelper::SlotAsValue<V>(FromVal(value_)));
}
bool IsEmpty() const {
return value_ == kPersistentContainerNotFound;
@@ -613,7 +618,8 @@ class V8_DEPRECATE_SOON("Use std::vector<Global<V>>.") PersistentValueVector {
* Retrieve the i-th value in the vector.
*/
Local<V> Get(size_t index) const {
- return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, index)));
+ return Local<V>::New(isolate_, internal::ValueHelper::SlotAsValue<V>(
+ FromVal(Traits::Get(&impl_, index))));
}
/**
diff --git a/deps/v8/include/v8-value.h b/deps/v8/include/v8-value.h
index 866da20124..c80ae064ee 100644
--- a/deps/v8/include/v8-value.h
+++ b/deps/v8/include/v8-value.h
@@ -345,6 +345,11 @@ class V8_EXPORT Value : public Data {
bool IsWasmModuleObject() const;
/**
+ * Returns true if this value is the WasmNull object.
+ */
+ bool IsWasmNull() const;
+
+ /**
* Returns true if the value is a Module Namespace Object.
*/
bool IsModuleNamespaceObject() const;
@@ -469,10 +474,14 @@ bool Value::IsUndefined() const {
bool Value::QuickIsUndefined() const {
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
+#if V8_STATIC_ROOTS_BOOL
+ return I::is_identical(obj, I::StaticReadOnlyRoot::kUndefinedValue);
+#else
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
+#endif // V8_STATIC_ROOTS_BOOL
}
bool Value::IsNull() const {
@@ -486,10 +495,14 @@ bool Value::IsNull() const {
bool Value::QuickIsNull() const {
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
+#if V8_STATIC_ROOTS_BOOL
+ return I::is_identical(obj, I::StaticReadOnlyRoot::kNullValue);
+#else
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kNullOddballKind);
+#endif // V8_STATIC_ROOTS_BOOL
}
bool Value::IsNullOrUndefined() const {
@@ -501,13 +514,17 @@ bool Value::IsNullOrUndefined() const {
}
bool Value::QuickIsNullOrUndefined() const {
+#if V8_STATIC_ROOTS_BOOL
+ return QuickIsNull() || QuickIsUndefined();
+#else
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
int kind = I::GetOddballKind(obj);
return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
+#endif // V8_STATIC_ROOTS_BOOL
}
bool Value::IsString() const {
@@ -521,9 +538,14 @@ bool Value::IsString() const {
bool Value::QuickIsString() const {
using A = internal::Address;
using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
+ A obj = internal::ValueHelper::ValueAsAddress(this);
if (!I::HasHeapObjectTag(obj)) return false;
+#if V8_STATIC_ROOTS_BOOL && !V8_MAP_PACKING
+ return I::CheckInstanceMapRange(obj, I::StaticReadOnlyRoot::kFirstStringMap,
+ I::StaticReadOnlyRoot::kLastStringMap);
+#else
return (I::GetInstanceType(obj) < I::kFirstNonstringType);
+#endif // V8_STATIC_ROOTS_BOOL
}
} // namespace v8
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index a9d6f92aff..f2146fa1ea 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 10
-#define V8_MINOR_VERSION 9
-#define V8_BUILD_NUMBER 194
-#define V8_PATCH_LEVEL 9
+#define V8_MAJOR_VERSION 11
+#define V8_MINOR_VERSION 3
+#define V8_BUILD_NUMBER 244
+#define V8_PATCH_LEVEL 4
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8-wasm.h b/deps/v8/include/v8-wasm.h
index 05acd2e86d..7151461546 100644
--- a/deps/v8/include/v8-wasm.h
+++ b/deps/v8/include/v8-wasm.h
@@ -144,7 +144,7 @@ class V8_EXPORT WasmStreaming final {
/**
* {Finish} should be called after all received bytes where passed to
* {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
- * does not have to be called after {Abort} has been called already.
+ * must not be called after {Abort} has been called already.
* If {can_use_compiled_module} is true and {SetCompiledModuleBytes} was
* previously called, the compiled module bytes can be used.
* If {can_use_compiled_module} is false, the compiled module bytes previously
@@ -156,6 +156,7 @@ class V8_EXPORT WasmStreaming final {
* Abort streaming compilation. If {exception} has a value, then the promise
* associated with streaming compilation is rejected with that value. If
* {exception} does not have value, the promise does not get rejected.
+ * {Abort} must not be called repeatedly, or after {Finish}.
*/
void Abort(MaybeLocal<Value> exception);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 2ac27b36b4..bee208ffa9 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -173,6 +173,7 @@ path. Add it with -I<path> to the command line
// V8_TARGET_OS_LINUX
// V8_TARGET_OS_MACOS
// V8_TARGET_OS_WIN
+// V8_TARGET_OS_CHROMEOS
//
// If not set explicitly, these fall back to corresponding V8_OS_ values.
@@ -184,7 +185,8 @@ path. Add it with -I<path> to the command line
&& !defined(V8_TARGET_OS_IOS) \
&& !defined(V8_TARGET_OS_LINUX) \
&& !defined(V8_TARGET_OS_MACOS) \
- && !defined(V8_TARGET_OS_WIN)
+ && !defined(V8_TARGET_OS_WIN) \
+ && !defined(V8_TARGET_OS_CHROMEOS)
# error No known target OS defined.
# endif
@@ -195,7 +197,8 @@ path. Add it with -I<path> to the command line
|| defined(V8_TARGET_OS_IOS) \
|| defined(V8_TARGET_OS_LINUX) \
|| defined(V8_TARGET_OS_MACOS) \
- || defined(V8_TARGET_OS_WIN)
+ || defined(V8_TARGET_OS_WIN) \
+ || defined(V8_TARGET_OS_CHROMEOS)
# error A target OS is defined but V8_HAVE_TARGET_OS is unset.
# endif
@@ -308,6 +311,9 @@ path. Add it with -I<path> to the command line
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
// V8_HAS_BUILTIN_FRAME_ADDRESS - __builtin_frame_address() supported
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
+// V8_HAS_BUILTIN_ADD_OVERFLOW - __builtin_add_overflow() supported
+// V8_HAS_BUILTIN_SUB_OVERFLOW - __builtin_sub_overflow() supported
+// V8_HAS_BUILTIN_MUL_OVERFLOW - __builtin_mul_overflow() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
@@ -339,9 +345,25 @@ path. Add it with -I<path> to the command line
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_CONSTINIT \
(__has_attribute(require_constant_initialization))
+# define V8_HAS_ATTRIBUTE_CONST (__has_attribute(const))
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
+// Support for the "preserve_most" attribute is limited:
+// - 32-bit platforms do not implement it,
+// - component builds fail because _dl_runtime_resolve clobbers registers,
+// - we see crashes on arm64 on Windows (https://crbug.com/1409934), which can
+// hopefully be fixed in the future.
+// Additionally, the initial implementation in clang <= 16 overwrote the return
+// register(s) in the epilogue of a preserve_most function, so we only use
+// preserve_most in clang >= 17 (see https://reviews.llvm.org/D143425).
+#if (defined(_M_X64) || defined(__x86_64__) /* x64 (everywhere) */ \
+ || ((defined(__AARCH64EL__) || defined(_M_ARM64)) /* arm64, but ... */ \
+ && !defined(_WIN32))) /* not on windows */ \
+ && !defined(COMPONENT_BUILD) /* no component build */\
+ && __clang_major__ >= 17 /* clang >= 17 */
+# define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most))
+#endif
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
@@ -360,6 +382,9 @@ path. Add it with -I<path> to the command line
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define V8_HAS_BUILTIN_FRAME_ADDRESS (__has_builtin(__builtin_frame_address))
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+# define V8_HAS_BUILTIN_ADD_OVERFLOW (__has_builtin(__builtin_add_overflow))
+# define V8_HAS_BUILTIN_SUB_OVERFLOW (__has_builtin(__builtin_sub_overflow))
+# define V8_HAS_BUILTIN_MUL_OVERFLOW (__has_builtin(__builtin_mul_overflow))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
@@ -455,6 +480,16 @@ path. Add it with -I<path> to the command line
#endif
+// A macro to mark functions whose values don't change (e.g. across calls)
+// and thereby compiler is free to hoist and fold multiple calls together.
+// Use like:
+// V8_CONST int foo() { ... }
+#if V8_HAS_ATTRIBUTE_CONST
+# define V8_CONST __attribute__((const))
+#else
+# define V8_CONST
+#endif
+
// A macro to mark a declaration as requiring constant initialization.
// Use like:
// int* foo V8_CONSTINIT;
@@ -487,6 +522,21 @@ path. Add it with -I<path> to the command line
#endif
+// A macro used to change the calling conventions to preserve all registers (no
+// caller-saved registers). Use this for cold functions called from hot
+// functions.
+// Note: The attribute is considered experimental, so apply with care. Also,
+// "preserve_most" is currently not handling the return value correctly, so only
+// use it for functions returning void (see https://reviews.llvm.org/D141020).
+// Use like:
+// V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod();
+#if V8_HAS_ATTRIBUTE_PRESERVE_MOST
+# define V8_PRESERVE_MOST __attribute__((preserve_most))
+#else
+# define V8_PRESERVE_MOST /* NOT SUPPORTED */
+#endif
+
+
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS)
# define V8_DEPRECATED(message) [[deprecated(message)]]
@@ -884,4 +934,10 @@ V8 shared library set USING_V8_SHARED.
#undef V8_HAS_CPP_ATTRIBUTE
+#if !defined(V8_STATIC_ROOTS)
+#define V8_STATIC_ROOTS_BOOL false
+#else
+#define V8_STATIC_ROOTS_BOOL true
+#endif
+
#endif // V8CONFIG_H_
diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl
index 5732d86218..40df0f013f 100644
--- a/deps/v8/infra/mb/gn_isolate_map.pyl
+++ b/deps/v8/infra/mb/gn_isolate_map.pyl
@@ -31,6 +31,10 @@
"label": "//test:v8_d8_default",
"type": "script",
},
+ "d8_pgo": {
+ "label": "//test:d8_pgo",
+ "type": "script",
+ },
"generate-bytecode-expectations": {
"label": "//test/unittests:generate-bytecode-expectations",
"type": "script",
@@ -72,7 +76,7 @@
"type": "script",
},
"unittests": {
- "label": "//test/unittests:unittests",
+ "label": "//test/unittests:v8_unittests",
"type": "script",
},
"fuchsia-unittests": {
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index b5d6231600..741ae615cb 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -52,6 +52,10 @@
'linux-v8-dr': 'release_x64',
},
'client.v8': {
+ # PGO
+ 'V8 Linux PGO instrumentation - builder' : 'builtins_profiling_x86',
+ 'V8 Linux64 PGO instrumentation - builder' : 'builtins_profiling_x64',
+
# Linux.
'V8 Linux - builder': 'release_x86_gcmole',
'V8 Linux - debug builder': 'debug_x86',
@@ -59,14 +63,13 @@
'V8 Linux - noi18n - debug builder': 'debug_x86_no_i18n',
'V8 Linux - verify csa - builder': 'release_x86_verify_csa',
# Linux64.
- 'V8 Linux64 - builder': 'release_x64',
+ 'V8 Linux64 - builder': 'release_x64_gcmole',
'V8 Linux64 - builder (goma cache silo)': 'release_x64',
'V8 Linux64 - builder (reclient)': 'release_x64_reclient',
'V8 Linux64 - builder (reclient compare)': 'release_x64_reclient',
'V8 Linux64 - debug builder': 'debug_x64',
'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space',
'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
- 'V8 Linux64 - heap sandbox - debug - builder': 'debug_x64_heap_sandbox',
'V8 Linux64 - internal snapshot - builder': 'release_x64_internal',
'V8 Linux64 - debug - header includes - builder': 'debug_x64_header_includes',
'V8 Linux64 - no sandbox - debug builder': 'debug_x64_no_sandbox',
@@ -80,6 +83,7 @@
'V8 Win32 - builder (reclient)': 'release_x86_minimal_symbols_reclient',
'V8 Win32 - builder (reclient compare)': 'release_x86_minimal_symbols_reclient',
'V8 Win32 - debug builder': 'debug_x86_minimal_symbols',
+ 'V8 Win32 - msvc - debug builder': 'debug_x86_msvc',
# TODO(machenbach): Remove after switching to x64 on infra side.
'V8 Win64 ASAN - builder': 'release_x64_asan_no_lsan',
'V8 Win64 - builder': 'release_x64_minimal_symbols',
@@ -103,14 +107,16 @@
# Sanitizers.
'V8 Linux64 ASAN - builder': 'release_x64_asan',
'V8 Linux64 TSAN - builder': 'release_x64_tsan',
+ 'V8 Linux64 TSAN - debug builder': 'debug_x64_tsan_minimal_symbols',
'V8 Linux64 TSAN - no-concurrent-marking - builder': 'release_x64_tsan_no_cm',
'V8 Linux - arm64 - sim - CFI - builder': 'release_simulate_arm64_cfi',
'V8 Linux - arm64 - sim - MSAN - builder': 'release_simulate_arm64_msan',
# FYI.
'V8 iOS - sim - builder': 'release_x64_ios_simulator',
- 'V8 Linux64 - arm64 - sim - heap sandbox - debug - builder': 'debug_x64_heap_sandbox_arm64_sim',
'V8 Linux64 - arm64 - sim - no pointer compression - builder':
'release_simulate_arm64_no_pointer_compression',
+ 'V8 Linux64 - coverage': 'release_x64_coverage',
+ 'V8 Linux64 - coverage - debug': 'debug_x64_coverage',
'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - disable runtime call stats - builder': 'release_x64_disable_runtime_call_stats',
@@ -125,9 +131,9 @@
'V8 Linux64 - cfi - builder': 'release_x64_cfi',
'V8 Linux64 UBSan - builder': 'release_x64_ubsan',
'V8 Linux - vtunejit': 'debug_x86_vtunejit',
- 'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
'V8 Linux64 - Fuzzilli - builder': 'release_x64_fuzzilli',
'V8 Linux64 - predictable - builder': 'release_x64_predictable',
+ 'V8 Linux64 - verify deterministic': 'release_x64_verify_deterministic',
'V8 Linux - full debug builder': 'full_debug_x86',
'V8 Mac64 - full debug builder': 'full_debug_x64',
'V8 Random Deopt Fuzzer - debug': 'debug_x64',
@@ -168,22 +174,28 @@
'V8 Clusterfuzz Linux64 UBSan - release builder':
'release_x64_ubsan_recover',
'V8 Clusterfuzz Linux64 ASAN sandbox testing - release builder':
- 'release_x64_asan_sandbox_testing',
+ 'release_x64_asan_symbolized_expose_memory_corruption',
},
'client.v8.perf' : {
'V8 Arm - builder - perf': 'official_arm',
+ 'V8 Arm - builder - pgo - perf': 'official_arm_pgo',
'V8 Android Arm - builder - perf': 'official_android_arm',
+ 'V8 Android Arm - builder - pgo - perf': 'official_android_arm_pgo',
'V8 Android Arm64 - builder - perf': 'official_android_arm64',
+ 'V8 Android Arm64 - builder - pgo - perf': 'official_android_arm64_pgo',
'V8 Linux - builder - perf': 'official_x86',
+ 'V8 Linux - builder - pgo - perf': 'official_x86_pgo',
'V8 Linux64 - builder - perf': 'official_x64',
+ 'V8 Linux64 - builder - pgo - perf': 'official_x64_pgo',
'V8 Mac Arm64 - builder - perf': 'official_mac_arm64',
+ 'V8 Mac Arm64 - builder - pgo - perf': 'official_mac_arm64_pgo',
},
'client.v8.ports': {
# Arm.
'V8 Arm - builder': 'release_arm',
'V8 Arm - debug builder': 'debug_arm',
'V8 Android Arm - builder': 'release_android_arm',
- 'V8 Linux - arm - sim - builder': 'release_simulate_arm',
+ 'V8 Linux - arm - sim - builder': 'release_simulate_arm_gcmole',
'V8 Linux - arm - sim - debug builder': 'debug_simulate_arm',
'V8 Linux - arm - sim - lite - builder': 'release_simulate_arm_lite',
'V8 Linux - arm - sim - lite - debug builder': 'debug_simulate_arm_lite',
@@ -191,7 +203,7 @@
'V8 Android Arm64 - builder': 'release_android_arm64',
'V8 Android Arm64 - debug builder': 'debug_android_arm64',
'V8 Arm64 - builder': 'release_arm64_hard_float',
- 'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64',
+ 'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64_gcmole',
'V8 Linux - arm64 - sim - debug builder': 'debug_simulate_arm64',
'V8 Linux - arm64 - sim - gc stress - builder': 'debug_simulate_arm64',
# Mips.
@@ -209,6 +221,7 @@
'v8_android_arm_compile_rel': 'release_android_arm',
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
'v8_android_arm64_n5x_compile_rel': 'release_android_arm64',
+ 'v8_android_arm64_d8_compile_rel': 'release_android_arm64',
'v8_fuchsia_compile_rel': 'release_x64_fuchsia_trybot',
'v8_ios_simulator': 'release_x64_ios_simulator',
'v8_linux_compile_rel': 'release_x86_gcmole_trybot',
@@ -225,6 +238,8 @@
'release_simulate_arm64_no_pointer_compression',
'v8_linux64_cppgc_non_default_compile_dbg': 'debug_x64_non_default_cppgc',
'v8_linux64_compile_dbg': 'debug_x64_trybot',
+ 'v8_linux64_coverage_dbg': 'debug_x64_coverage',
+ 'v8_linux64_coverage_rel': 'release_x64_coverage',
'v8_linux64_no_sandbox_compile_dbg': 'debug_x64_no_sandbox',
'v8_linux64_dict_tracking_compile_dbg': 'debug_x64_dict_tracking_trybot',
'v8_linux64_disable_runtime_call_stats_compile_rel': 'release_x64_disable_runtime_call_stats',
@@ -235,16 +250,13 @@
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_gcc_light_compile_dbg': 'debug_x64_gcc',
'v8_linux64_gcc_compile_rel': 'release_x64_gcc',
- 'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
- 'v8_linux64_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox',
'v8_linux64_minor_mc_compile_dbg': 'debug_x64_trybot',
- 'v8_linux_arm64_sim_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox_arm64_sim',
'v8_linux64_fyi_compile_rel': 'release_x64_test_features_trybot',
'v8_linux64_nodcheck_compile_rel': 'release_x64',
'v8_linux64_perfetto_compile_dbg': 'debug_x64_perfetto',
'v8_linux64_no_pointer_compression_compile_rel': 'release_x64_no_pointer_compression',
- 'v8_linux64_compile_rel': 'release_x64_test_features_trybot',
+ 'v8_linux64_compile_rel': 'release_x64_test_features_gcmole_trybot',
'v8_linux64_no_sandbox_compile_rel': 'release_x64_no_sandbox',
'v8_linux64_predictable_compile_rel': 'release_x64_predictable',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
@@ -259,15 +271,18 @@
'v8_linux_riscv32_compile_rel': 'release_simulate_riscv32',
'v8_linux64_riscv64_compile_rel': 'release_simulate_riscv64',
'v8_linux64_tsan_compile_rel': 'release_x64_tsan_minimal_symbols',
+ 'v8_linux64_tsan_compile_dbg': 'debug_x64_tsan_minimal_symbols',
'v8_linux64_tsan_no_cm_compile_rel': 'release_x64_tsan_no_cm',
'v8_linux64_tsan_isolates_compile_rel':
'release_x64_tsan_minimal_symbols',
'v8_linux64_ubsan_compile_rel': 'release_x64_ubsan_minimal_symbols',
+ 'v8_linux64_verify_deterministic_rel': 'release_x64_verify_deterministic',
'v8_odroid_arm_compile_rel': 'release_arm',
'v8_linux_torque_compare': 'torque_compare',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_compile_dbg': 'debug_x86_trybot',
'v8_win_compile_rel': 'release_x86_trybot',
+ 'v8_win_msvc_light_compile_dbg': 'debug_x86_msvc',
'v8_win64_asan_compile_rel': 'release_x64_asan_no_lsan',
'v8_win64_msvc_light_compile_rel': 'release_x64_msvc',
'v8_win64_compile_dbg': 'debug_x64_minimal_symbols',
@@ -284,15 +299,15 @@
'v8_mac64_compile_rel': 'release_x64_trybot',
'v8_mac64_dbg': 'debug_x64',
'v8_mac64_compile_dbg': 'debug_x64',
- 'v8_mac64_compile_full_compile_dbg': 'full_debug_x64',
+ 'v8_mac64_noopt_compile_dbg': 'full_debug_x64',
'v8_mac64_asan_compile_rel': 'release_x64_asan_no_lsan',
- 'v8_linux_arm_compile_rel': 'release_simulate_arm_trybot',
+ 'v8_linux_arm_compile_rel': 'release_simulate_arm_gcmole_trybot',
'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite',
'v8_linux_arm_lite_compile_rel': 'release_simulate_arm_lite_trybot',
'v8_linux_arm_compile_dbg': 'debug_simulate_arm',
'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm',
- 'v8_linux_arm64_compile_rel': 'release_simulate_arm64_trybot',
+ 'v8_linux_arm64_compile_rel': 'release_simulate_arm64_gcmole_trybot',
'v8_linux_arm64_cfi_compile_rel' : 'release_simulate_arm64_cfi',
'v8_linux_arm64_compile_dbg': 'debug_simulate_arm64',
'v8_linux_arm64_gc_stress_compile_dbg': 'debug_simulate_arm64',
@@ -399,20 +414,26 @@
'debug_bot', 'simulate_arm64', 'asan', 'lsan'],
# Release configs for simulators.
- 'release_simulate_arm': [
- 'release_bot', 'simulate_arm'],
+ 'release_simulate_arm_gcmole': [
+ 'release_bot', 'simulate_arm', 'gcmole'],
'release_simulate_arm_lite': [
'release_bot', 'simulate_arm', 'v8_enable_lite_mode'],
- 'release_simulate_arm_trybot': [
- 'release_trybot', 'simulate_arm'],
+ 'release_simulate_arm_gcmole_trybot': [
+ 'release_trybot', 'simulate_arm', 'gcmole'],
'release_simulate_arm_lite_trybot': [
'release_trybot', 'simulate_arm', 'v8_enable_lite_mode'],
+ 'release_simulate_arm_trybot': [
+ 'release_trybot', 'simulate_arm'],
'release_simulate_arm64': [
'release_bot', 'simulate_arm64'],
'release_simulate_arm64_cfi': [
'release_bot', 'simulate_arm64', 'v8_control_flow_integrity'],
+ 'release_simulate_arm64_gcmole': [
+ 'release_bot', 'simulate_arm64', 'gcmole'],
+ 'release_simulate_arm64_gcmole_trybot': [
+ 'release_trybot', 'simulate_arm64', 'gcmole'],
'release_simulate_arm64_no_pointer_compression': [
- 'release_bot', 'simulate_arm64_no_sandbox', 'dcheck_always_on',
+ 'release_bot', 'simulate_arm64', 'no_sandbox', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_disable_pointer_compression'],
'release_simulate_arm64_msan': [
'release_bot', 'simulate_arm64', 'msan'],
@@ -445,7 +466,7 @@
'debug_arm64': [
'debug_bot', 'arm64'],
'debug_arm64_no_pointer_compression': [
- 'debug_bot', 'arm64_no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
+ 'debug_bot', 'arm64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
'v8_disable_pointer_compression'],
'full_debug_arm64': [
'debug_bot', 'arm64', 'v8_full_debug'],
@@ -466,15 +487,29 @@
# Official configs for arm
'official_arm': [
- 'release_bot', 'arm', 'hard_float', 'official', 'disable_pgo'],
+ 'release_bot', 'arm', 'hard_float', 'official', 'disable_chrome_pgo'],
+ 'official_arm_pgo': [
+ 'release_bot', 'arm', 'hard_float', 'official', 'disable_chrome_pgo',
+ 'builtins_optimization'],
'official_android_arm': [
'release_bot', 'arm', 'android', 'minimal_symbols',
- 'android_strip_outputs', 'official', 'disable_pgo'],
+ 'android_strip_outputs', 'official', 'disable_chrome_pgo'],
+ 'official_android_arm_pgo': [
+ 'release_bot', 'arm', 'android', 'minimal_symbols',
+ 'android_strip_outputs', 'official', 'disable_chrome_pgo',
+ 'builtins_optimization'],
'official_android_arm64': [
'release_bot', 'arm64', 'android', 'minimal_symbols',
- 'android_strip_outputs', 'official', 'disable_pgo'],
+ 'android_strip_outputs', 'official', 'disable_chrome_pgo'],
+ 'official_android_arm64_pgo': [
+ 'release_bot', 'arm64', 'android', 'minimal_symbols',
+ 'android_strip_outputs', 'official', 'disable_chrome_pgo',
+ 'builtins_optimization'],
'official_mac_arm64': [
- 'release_bot', 'arm64', 'official', 'disable_pgo'],
+ 'release_bot', 'arm64', 'official', 'disable_chrome_pgo'],
+ 'official_mac_arm64_pgo': [
+ 'release_bot', 'arm64', 'official', 'disable_chrome_pgo',
+ 'builtins_optimization'],
# Release configs for x64.
'release_x64': [
@@ -492,6 +527,9 @@
'release_x64_asan_no_lsan_verify_heap_dchecks': [
'release_bot', 'x64', 'asan', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_heap'],
+ 'release_x64_asan_symbolized_expose_memory_corruption': [
+ 'release_bot', 'x64', 'asan', 'symbolized',
+ 'v8_expose_memory_corruption_api'],
'release_x64_asan_symbolized_verify_heap': [
'release_bot', 'x64', 'asan', 'lsan', 'symbolized',
'v8_verify_heap'],
@@ -499,9 +537,13 @@
'release_bot', 'x64', 'cfi'],
'release_x64_cfi_clusterfuzz': [
'release_bot', 'x64', 'cfi_clusterfuzz'],
+ 'release_x64_coverage': [
+ 'release_bot', 'x64', 'clang_coverage'],
'release_x64_fuzzilli': [
'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
'v8_verify_heap', 'v8_verify_csa', 'fuzzilli'],
+ 'release_x64_gcmole': [
+ 'release_bot', 'x64', 'gcmole'],
'release_x64_msvc': [
'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'],
'release_x64_correctness_fuzzer' : [
@@ -514,9 +556,6 @@
'release_trybot', 'x64', 'fuchsia'],
'release_x64_gcc': [
'release_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx'],
- 'release_x64_gcc_coverage': [
- 'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'lld',
- 'no_custom_libcxx', 'no_sysroot'],
'release_x64_ios_simulator': [
'release_bot', 'x64', 'ios_simulator'],
'release_x64_internal': [
@@ -526,14 +565,16 @@
'release_x64_minimal_symbols_reclient': [
'release_bot_reclient', 'x64', 'minimal_symbols'],
'release_x64_no_pointer_compression': [
- 'release_bot', 'x64_no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
- 'v8_disable_pointer_compression'],
+ 'release_bot', 'x64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks',
+ 'v8_enable_javascript_promise_hooks', 'v8_disable_pointer_compression'],
'release_x64_reclient': [
'release_bot_reclient', 'x64'],
'release_x64_no_sandbox': [
- 'release_bot', 'x64_no_sandbox'],
+ 'release_bot', 'x64', 'no_sandbox'],
'release_x64_trybot': [
'release_trybot', 'x64'],
+ 'release_x64_test_features_gcmole_trybot': [
+ 'release_trybot', 'x64', 'v8_enable_test_features', 'gcmole'],
'release_x64_test_features_trybot': [
'release_trybot', 'x64', 'v8_enable_test_features'],
'release_x64_tsan': [
@@ -553,15 +594,17 @@
'release_x64_verify_csa': [
'release_bot', 'x64', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
+ 'release_x64_verify_deterministic': [
+ 'release_bot', 'x64', 'v8_verify_deterministic'],
'release_x64_webassembly_disabled': [
'release_bot', 'x64', 'webassembly_disabled'],
- 'release_x64_asan_sandbox_testing': [
- 'release_bot', 'x64', 'asan', 'symbolized', 'v8_enable_sandbox_future',
- 'v8_expose_memory_corruption_api'],
# Official configs for x64.
'official_x64': [
- 'release_bot', 'x64', 'official', 'disable_pgo'],
+ 'release_bot', 'x64', 'official', 'disable_chrome_pgo'],
+ 'official_x64_pgo': [
+ 'release_bot', 'x64', 'official', 'disable_chrome_pgo',
+ 'builtins_optimization'],
# Debug configs for x64.
'debug_x64': [
@@ -573,6 +616,8 @@
'x64', 'asan'],
'debug_x64_conservative_stack_scanning': [
'debug_bot', 'x64', 'conservative_stack_scanning'],
+ 'debug_x64_coverage': [
+ 'debug_bot', 'x64', 'clang_coverage'],
'debug_x64_custom': [
'debug_bot', 'x64', 'v8_snapshot_custom'],
'debug_x64_external_code_space': [
@@ -583,10 +628,6 @@
'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
- 'debug_x64_heap_sandbox': [
- 'debug_bot', 'x64', 'v8_enable_sandbox_future', 'v8_expose_memory_corruption_api'],
- 'debug_x64_heap_sandbox_arm64_sim': [
- 'debug_bot', 'simulate_arm64', 'v8_enable_sandbox_future', 'v8_expose_memory_corruption_api'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
'debug_x64_non_default_cppgc': [
@@ -594,7 +635,7 @@
'debug_x64_perfetto': [
'debug_bot', 'x64', 'perfetto'],
'debug_x64_no_sandbox': [
- 'debug_bot', 'x64_no_sandbox'],
+ 'debug_bot', 'x64', 'no_sandbox'],
'debug_x64_single_generation': [
'debug_bot', 'x64', 'v8_enable_single_generation'],
'debug_x64_trybot': [
@@ -603,6 +644,9 @@
'debug_trybot', 'x64', 'v8_enable_dict_property_const_tracking'],
'debug_x64_trybot_custom': [
'debug_trybot', 'x64', 'v8_snapshot_custom'],
+ 'debug_x64_tsan_minimal_symbols': [
+ 'debug_bot_no_slow_dchecks', 'minimal_symbols', 'x64', 'dcheck_always_on',
+ 'tsan', 'v8_disable_verify_heap', 'v8_fast_mksnapshot'],
'full_debug_x64': [
'debug_bot', 'x64', 'v8_full_debug'],
@@ -613,6 +657,8 @@
'debug_bot', 'x86', 'asan', 'lsan'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
+ 'debug_x86_msvc': [
+ 'debug_bot_no_goma', 'x86', 'minimal_symbols', 'msvc'],
'debug_x86_no_i18n': [
'debug_bot', 'x86', 'v8_no_i18n'],
'debug_x86_trybot': [
@@ -650,11 +696,18 @@
# Official configs for x86.
'official_x86': [
- 'release_bot', 'x86', 'official', 'disable_pgo'],
+ 'release_bot', 'x86', 'official', 'disable_chrome_pgo'],
+ 'official_x86_pgo': [
+ 'release_bot', 'x86', 'official', 'disable_chrome_pgo',
+ 'builtins_optimization'],
# Torque compare test
'torque_compare': [
- 'release_bot', 'verify_torque']
+ 'release_bot', 'verify_torque'],
+
+ # PGO
+ 'builtins_profiling_x86': ['builtins_profiling', 'x86'],
+ 'builtins_profiling_x64': ['builtins_profiling', 'x64'],
},
'mixins': {
@@ -671,11 +724,7 @@
},
'arm64': {
- 'gn_args': 'target_cpu="arm64" v8_enable_sandbox=true',
- },
-
- 'arm64_no_sandbox': {
- 'gn_args': 'target_cpu="arm64" v8_enable_sandbox=false',
+ 'gn_args': 'target_cpu="arm64"',
},
'asan': {
@@ -683,6 +732,15 @@
'gn_args': 'is_asan=true',
},
+ 'builtins_profiling': {
+ 'mixins' : ['release_bot_reclient'],
+ 'gn_args': 'v8_enable_builtins_profiling=true',
+ },
+
+ 'builtins_optimization': {
+ 'gn_args': 'v8_enable_builtins_optimization=true',
+ },
+
'cfi': {
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_icall=true '
@@ -699,13 +757,12 @@
'gn_args': 'is_clang=true',
},
- 'conservative_stack_scanning': {
- 'gn_args': 'v8_enable_conservative_stack_scanning=true '
- 'v8_enable_inner_pointer_resolution_mb=true',
+ 'clang_coverage': {
+ 'gn_args': 'use_clang_coverage=true',
},
- 'coverage': {
- 'gn_args': 'v8_code_coverage=true',
+ 'conservative_stack_scanning': {
+ 'gn_args': 'v8_enable_conservative_stack_scanning=true',
},
'dcheck_always_on': {
@@ -716,6 +773,12 @@
'gn_args': 'is_debug=true v8_enable_backtrace=true',
},
+ 'debug_bot_no_slow_dchecks': {
+ 'mixins': [
+ 'debug', 'shared', 'goma', 'v8_disable_slow_dchecks',
+ 'v8_optimized_debug', 'v8_enable_google_benchmark'],
+ },
+
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
@@ -741,7 +804,7 @@
'v8_enable_atomic_object_field_writes=false ',
},
- 'disable_pgo': {
+ 'disable_chrome_pgo': {
'gn_args': 'chrome_pgo_phase=0',
},
@@ -793,12 +856,12 @@
'msan': {
'mixins': ['v8_enable_test_features'],
- 'gn_args': 'is_msan=true msan_track_origins=2',
+ 'gn_args': 'is_msan=true msan_track_origins=2 instrumented_libraries_release="focal"',
},
'msan_no_origins': {
'mixins': ['v8_enable_test_features'],
- 'gn_args': 'is_msan=true msan_track_origins=0',
+ 'gn_args': 'is_msan=true msan_track_origins=0 instrumented_libraries_release="focal"',
},
'msvc': {
@@ -813,8 +876,8 @@
'gn_args': 'use_goma=false',
},
- 'no_sysroot': {
- 'gn_args': 'use_sysroot=false',
+ 'no_sandbox': {
+ 'gn_args': 'v8_enable_sandbox=false',
},
'non_default_cppgc': {
@@ -862,11 +925,7 @@
},
'simulate_arm64': {
- 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64" v8_enable_sandbox=true',
- },
-
- 'simulate_arm64_no_sandbox': {
- 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64" v8_enable_sandbox=false',
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
},
'simulate_loong64': {
@@ -938,8 +997,8 @@
'gn_args': 'v8_enable_runtime_call_stats=false',
},
- 'v8_enable_sandbox_future': {
- 'gn_args': 'v8_enable_sandbox_future=true',
+ 'v8_disable_verify_heap': {
+ 'gn_args': 'v8_enable_verify_heap=false',
},
'v8_expose_memory_corruption_api': {
@@ -954,6 +1013,10 @@
'gn_args': 'v8_enable_slow_dchecks=true',
},
+ 'v8_disable_slow_dchecks': {
+ 'gn_args': 'v8_enable_slow_dchecks=false',
+ },
+
'v8_enable_javascript_promise_hooks': {
'gn_args': 'v8_enable_javascript_promise_hooks=true',
},
@@ -989,6 +1052,10 @@
'gn_args': 'v8_enable_vtunejit=true v8_enable_vtunetracemark=true',
},
+ 'v8_fast_mksnapshot': {
+ 'gn_args': 'v8_enable_fast_mksnapshot=true',
+ },
+
'v8_full_debug': {
'gn_args': 'v8_optimized_debug=false',
},
@@ -1018,6 +1085,10 @@
'gn_args': 'v8_enable_verify_csa=true',
},
+ 'v8_verify_deterministic': {
+ 'gn_args': 'v8_verify_deterministic_mksnapshot=true',
+ },
+
's390x': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390x"',
},
@@ -1031,11 +1102,7 @@
},
'x64': {
- 'gn_args': 'target_cpu="x64" v8_enable_sandbox=true',
- },
-
- 'x64_no_sandbox': {
- 'gn_args': 'target_cpu="x64" v8_enable_sandbox=false',
+ 'gn_args': 'target_cpu="x64"',
},
'x86': {
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 19d5e18452..63d675bddd 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -89,7 +89,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -128,8 +129,8 @@
{'name': 'benchmarks', 'variant': 'extra'},
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
- {'name': 'test262', 'shards': 2},
- {'name': 'test262', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262', 'shards': 4},
+ {'name': 'test262', 'variant': 'extra', 'shards': 4},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
{
@@ -149,7 +150,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'variant': 'default', 'shards': 2},
],
},
@@ -179,7 +180,7 @@
],
'shards': 4,
},
- {'name': 'gcmole'},
+ {'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4},
],
},
'v8_linux_optional_rel': {
@@ -210,6 +211,7 @@
'--extra-flags',
'--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx',
],
+ 'shards': 2,
},
{
'name': 'v8testing',
@@ -237,6 +239,7 @@
'--extra-flags',
'--noenable-ssse3 --noenable-sse4-1 --noenable-avx',
],
+ 'shards': 2,
},
{
'name': 'v8testing',
@@ -258,6 +261,7 @@
'suffix': 'nosse4',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'],
+ 'shards': 2,
},
{
'name': 'v8testing',
@@ -275,7 +279,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2,
},
{
'name': 'v8testing',
@@ -325,6 +330,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
+ {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
],
},
##############################################################################
@@ -348,10 +354,28 @@
{'name': 'benchmarks'},
{'name': 'mozilla'},
{'name': 'optimize_for_size'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
],
},
+ 'v8_linux64_coverage_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'variant': 'default'},
+ {'name': 'v8testing', 'variant': 'future'},
+ ],
+ },
+ 'v8_linux64_coverage_rel': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'variant': 'default'},
+ {'name': 'v8testing', 'variant': 'future'},
+ ],
+ },
'v8_linux64_cppgc_non_default_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@@ -361,6 +385,14 @@
{'name': 'v8testing', 'shards': 3},
],
},
+ 'v8_linux64_css_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 5},
+ ],
+ },
'v8_linux64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@@ -368,12 +400,12 @@
},
'tests': [
{'name': 'benchmarks', 'shards': 2},
- {'name': 'benchmarks', 'variant': 'extra'},
+ {'name': 'benchmarks', 'variant': 'extra', 'shards': 2},
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'test262', 'variant': 'extra', 'shards': 9},
+ {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'test262', 'variant': 'extra', 'shards': 12},
{'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'variant': 'minor_mc'},
@@ -383,6 +415,7 @@
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_linux64_dict_tracking_dbg': {
@@ -430,14 +463,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
- # Maglev.
- {'name': 'mjsunit', 'variant': 'maglev'},
- # Stress maglev.
- {'name': 'mjsunit', 'variant': 'stress_maglev'},
+ # Stress maglev-future.
+ {'name': 'mjsunit', 'variant': 'maglev_future'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Wasm write protect code space.
- {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'v8_linux64_gc_stress_custom_snapshot_dbg': {
@@ -481,22 +510,6 @@
{'name': 'v8testing'},
],
},
- 'v8_linux64_gcov_coverage': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing'},
- ],
- },
- 'v8_linux64_heap_sandbox_dbg': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 4},
- ],
- },
'v8_linux64_minor_mc_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@@ -512,11 +525,11 @@
},
'v8_linux64_msan_rel': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
+ 'os': 'Ubuntu-20.04',
},
'tests': [
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 5},
+ {'name': 'test262', 'variant': 'default', 'shards': 4},
+ {'name': 'v8testing', 'shards': 4},
],
},
'v8_linux64_nodcheck_rel': {
@@ -533,9 +546,9 @@
{'name': 'mozilla', 'variant': 'assert_types'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'perf_integration'},
- {'name': 'test262', 'shards': 2},
- {'name': 'test262', 'variant': 'assert_types', 'shards': 2},
- {'name': 'test262', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262', 'shards': 4},
+ {'name': 'test262', 'variant': 'assert_types'},
+ {'name': 'test262', 'variant': 'extra', 'shards': 4},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
@@ -543,6 +556,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_linux64_perfetto_dbg': {
@@ -589,7 +603,7 @@
{'name': 'mjsunit_sp_frame_access'},
{'name': 'optimize_for_size'},
{'name': 'test262', 'shards': 4},
- {'name': 'test262', 'variant': 'extra', 'shards': 3},
+ {'name': 'test262', 'variant': 'extra', 'shards': 4},
{'name': 'v8initializers'},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
@@ -599,6 +613,21 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
+ # GCMole.
+ {'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
+ {
+ 'name': 'gcmole_v2',
+ 'variant': 'x64',
+ 'suffix': 'test single host',
+ 'test_args': ['--test-run'],
+ },
+ {
+ 'name': 'gcmole_v3',
+ 'variant': 'x64',
+ 'suffix': 'test multi host',
+ 'test_args': ['--test-run'],
+ },
],
},
'v8_linux64_predictable_rel': {
@@ -630,11 +659,23 @@
{'name': 'mozilla', 'shards': 2},
{'name': 'test262', 'variant': 'default', 'shards': 5},
{'name': 'v8testing', 'shards': 6},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 5},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 2},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
],
},
+ 'v8_linux64_tsan_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'benchmarks', 'shards': 2},
+ {'name': 'mozilla', 'shards': 4},
+ {'name': 'test262', 'variant': 'default', 'shards': 5},
+ {'name': 'v8testing', 'shards': 12},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
+ ],
+ },
'v8_linux64_tsan_no_cm_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -699,14 +740,6 @@
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12},
],
},
- 'v8_linux_arm64_sim_heap_sandbox_dbg': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 14},
- ],
- },
'v8_linux_arm64_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -717,6 +750,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 14},
+ {'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
],
},
'v8_linux_arm64_cfi_rel': {
@@ -787,7 +821,7 @@
'v8_win_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Windows-7-SP1',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@@ -797,7 +831,7 @@
'v8_win_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Windows-7-SP1',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@@ -809,7 +843,7 @@
# Win64
'v8_win64_asan_rel': {
'swarming_dimensions' : {
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@@ -818,7 +852,7 @@
'v8_win64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@@ -830,7 +864,7 @@
'v8_win64_msvc_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@@ -841,7 +875,7 @@
'v8_win64_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@@ -855,7 +889,7 @@
'v8_mac64_asan_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -864,7 +898,7 @@
'v8_mac64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@@ -876,16 +910,25 @@
'v8_mac64_gc_stress_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6},
],
},
+ 'v8_mac64_noopt_dbg': {
+ 'swarming_dimensions' : {
+ 'cpu': 'x86-64',
+ 'os': 'Mac-12',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 6},
+ ],
+ },
'v8_mac64_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@@ -902,6 +945,9 @@
},
'tests': [
{'name': 'v8testing'},
+ # Maglev -- move to extra once more architectures are supported.
+ {'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_mac_arm64_dbg': {
@@ -912,6 +958,9 @@
},
'tests': [
{'name': 'v8testing'},
+ # Maglev -- move to extra once more architectures are supported.
+ {'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_mac_arm64_full_dbg': {
@@ -922,6 +971,9 @@
},
'tests': [
{'name': 'v8testing'},
+ # Maglev -- move to extra once more architectures are supported.
+ {'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_mac_arm64_no_pointer_compression_dbg': {
@@ -937,7 +989,7 @@
'v8_mac_arm64_sim_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -947,7 +999,7 @@
'v8_mac_arm64_sim_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -957,7 +1009,7 @@
'v8_mac_arm64_sim_nodcheck_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -1042,7 +1094,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1050,7 +1103,7 @@
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
- {'name': 'gcmole'},
+ {'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4},
],
},
'V8 Linux - arm64 - sim - CFI': {
@@ -1064,10 +1117,10 @@
},
'V8 Linux - arm64 - sim - MSAN': {
'swarming_dimensions': {
- 'os': 'Ubuntu-18.04',
+ 'os': 'Ubuntu-20.04',
},
'tests': [
- {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 4},
],
},
@@ -1086,9 +1139,9 @@
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
- {'name': 'test262', 'shards': 6},
+ {'name': 'test262', 'shards': 12},
{'name': 'test262', 'variant': 'code_serializer', 'shards': 2},
- {'name': 'test262', 'variant': 'extra', 'shards': 5},
+ {'name': 'test262', 'variant': 'extra', 'shards': 10},
{'name': 'v8testing', 'shards': 3},
{
'name': 'v8testing',
@@ -1096,7 +1149,7 @@
'test_args': ['--isolates'],
'shards': 4
},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 4},
# Nosse3.
{
'name': 'mozilla',
@@ -1107,7 +1160,8 @@
'name': 'test262',
'suffix': 'nosse3',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1125,7 +1179,8 @@
'name': 'test262',
'suffix': 'nossse3',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1143,7 +1198,8 @@
'name': 'test262',
'suffix': 'nosse4',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1161,7 +1217,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1203,7 +1260,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'variant': 'default'},
],
},
@@ -1224,7 +1281,7 @@
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing'},
],
},
@@ -1236,6 +1293,14 @@
{'name': 'v8testing'},
],
},
+ 'V8 Linux PGO instrumentation - builder' : {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'pgo_instrumentation'}
+ ],
+ },
'V8 Linux64': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
@@ -1253,7 +1318,7 @@
{'name': 'optimize_for_size'},
{'name': 'perf_integration'},
{'name': 'test262', 'shards': 2},
- {'name': 'test262', 'variant': 'assert_types'},
+ {'name': 'test262', 'variant': 'assert_types', 'shards': 2},
{'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'v8initializers'},
{'name': 'v8testing'},
@@ -1264,6 +1329,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
# Noavx.
{
'name': 'mozilla',
@@ -1274,13 +1340,28 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
'suffix': 'noavx',
'test_args': ['--extra-flags', '--noenable-avx']
},
+ # GCMole.
+ {'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
+ {
+ 'name': 'gcmole_v2',
+ 'variant': 'x64',
+ 'suffix': 'test single host',
+ 'test_args': ['--test-run'],
+ },
+ {
+ 'name': 'gcmole_v3',
+ 'variant': 'x64',
+ 'suffix': 'test multi host',
+ 'test_args': ['--test-run'],
+ },
],
},
'V8 Linux64 - cfi': {
@@ -1291,10 +1372,28 @@
{'name': 'benchmarks'},
{'name': 'mozilla'},
{'name': 'optimize_for_size'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
],
},
+ 'V8 Linux64 - coverage': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'variant': 'default'},
+ {'name': 'v8testing', 'variant': 'future'},
+ ],
+ },
+ 'V8 Linux64 - coverage - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'variant': 'default'},
+ {'name': 'v8testing', 'variant': 'future'},
+ ],
+ },
'V8 Linux64 - custom snapshot - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1318,15 +1417,16 @@
{'name': 'test262', 'shards': 7},
{'name': 'test262', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 4},
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
- {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_inlining', 'shards': 2},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
# Noavx.
{
'name': 'mozilla',
@@ -1337,7 +1437,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
- 'test_args': ['--extra-flags', '--noenable-avx']
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1380,14 +1481,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
- # Maglev.
- {'name': 'mjsunit', 'variant': 'maglev'},
- # Stress maglev.
- {'name': 'mjsunit', 'variant': 'stress_maglev'},
+ # Stress maglev-future.
+ {'name': 'mjsunit', 'variant': 'maglev_future'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Wasm write protect code space.
- {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 - cppgc-non-default - debug': {
@@ -1446,14 +1543,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
- # Maglev.
- {'name': 'mjsunit', 'variant': 'maglev'},
- # Stress maglev.
- {'name': 'mjsunit', 'variant': 'stress_maglev'},
+ # Stress maglev-future.
+ {'name': 'mjsunit', 'variant': 'maglev_future'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Wasm write protect code space.
- {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 gcc': {
@@ -1489,22 +1582,6 @@
},
],
},
- 'V8 Linux64 - gcov coverage': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux64 - heap sandbox - debug': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 2},
- ],
- },
'V8 Linux64 - internal snapshot': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1545,7 +1622,7 @@
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing'},
],
},
@@ -1568,6 +1645,14 @@
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
],
},
+ 'V8 Linux64 css - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 5},
+ ],
+ },
'V8 Linux64 GC Stress - custom snapshot': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1580,6 +1665,14 @@
},
],
},
+ 'V8 Linux64 PGO instrumentation - builder' : {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'pgo_instrumentation'}
+ ],
+ },
'V8 Linux64 TSAN': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1594,6 +1687,18 @@
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
],
},
+ 'V8 Linux64 TSAN - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'benchmarks', 'shards': 2},
+ {'name': 'mozilla', 'shards': 4},
+ {'name': 'test262', 'variant': 'default', 'shards': 5},
+ {'name': 'v8testing', 'shards': 12},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
+ ],
+ },
'V8 Linux64 TSAN - stress-incremental-marking': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1667,7 +1772,7 @@
'V8 Mac64': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@@ -1679,7 +1784,7 @@
'V8 Mac64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@@ -1691,7 +1796,7 @@
'V8 Mac64 ASAN': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 10},
@@ -1700,7 +1805,7 @@
'V8 Mac64 GC Stress': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6},
@@ -1720,6 +1825,9 @@
'tests': [
{'name': 'v8testing'},
{'name': 'v8testing', 'variant': 'extra'},
+ # Maglev -- move to extra once more architectures are supported.
+ {'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'V8 Mac - arm64 - debug': {
@@ -1736,6 +1844,9 @@
'tests': [
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ # Maglev -- move to extra once more architectures are supported.
+ {'name': 'mjsunit', 'variant': 'maglev'},
+ {'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'V8 Mac - arm64 - no pointer compression debug': {
@@ -1751,7 +1862,7 @@
'V8 Mac - arm64 - sim - debug': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1766,7 +1877,7 @@
'V8 Mac - arm64 - sim - release': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.15',
+ 'os': 'Mac-12',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1781,39 +1892,39 @@
'V8 Win32': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Windows-7-SP1',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing'},
],
},
'V8 Win32 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Windows-7-SP1',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 5},
],
},
'V8 Win64': {
'swarming_dimensions': {
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
],
},
'V8 Win64 - debug': {
'swarming_dimensions': {
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@@ -1824,17 +1935,17 @@
},
'V8 Win64 - msvc': {
'swarming_dimensions': {
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
],
},
'V8 Win64 ASAN': {
'swarming_dimensions': {
- 'os': 'Windows-10-19042',
+ 'os': 'Windows-10-19045',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@@ -1951,7 +2062,8 @@
'name': 'test262',
'suffix': 'armv8-a',
'variant': 'default',
- 'test_args': ['--extra-flags', '--enable-armv8']
+ 'test_args': ['--extra-flags', '--enable-armv8'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1970,6 +2082,7 @@
'suffix': 'novfp3',
'variant': 'default',
'test_args': ['--novfp3'],
+ 'shards': 2
},
{
'name': 'v8testing',
@@ -1977,6 +2090,8 @@
'test_args': ['--novfp3'],
'shards': 6
},
+ # GCMole.
+ {'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
],
},
'V8 Linux - arm - sim - debug': {
@@ -2055,9 +2170,10 @@
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ {'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
],
},
'V8 Linux - arm64 - sim - debug': {
@@ -2093,19 +2209,6 @@
},
],
},
- 'V8 Linux64 - arm64 - sim - heap sandbox - debug': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-18.04',
- },
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 7200,
- 'priority': 35,
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 14},
- ],
- },
'V8 Linux - loong64 - sim': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
@@ -2129,7 +2232,7 @@
'priority': 35,
},
'tests': [
- {'name': 'test262', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 4},
],
},
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 8912d7fb25..ebe2dd0dc5 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -10,19 +10,20 @@ include_rules = [
"-src/bigint",
"+src/bigint/bigint.h",
"-src/compiler",
- "+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
+ "+src/compiler/turbofan.h",
"+src/compiler/wasm-compiler-definitions.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/basic-memory-chunk.h",
"+src/heap/code-range.h",
"+src/heap/combined-heap.h",
- "+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
# TODO(v8:10496): Don't expose so much (through transitive includes) outside
# of heap/.
+ "+src/heap/gc-tracer.h",
+ "+src/heap/gc-tracer-inl.h",
"+src/heap/heap.h",
"+src/heap/heap-verifier.h",
"+src/heap/heap-inl.h",
@@ -76,6 +77,7 @@ include_rules = [
"+starboard",
# Using cppgc inside v8 is not (yet) allowed.
"-include/cppgc",
+ "+include/cppgc/common.h",
"+include/cppgc/platform.h",
"+include/cppgc/source-location.h",
]
@@ -84,7 +86,8 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
"+include/libplatform/v8-tracing.h",
- "+perfetto/tracing.h"
+ "+perfetto/tracing/track_event.h",
+ "+perfetto/tracing/track_event_legacy.h"
],
"d8-platforms\.cc": [
"+include/libplatform/libplatform.h",
diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h
index 470ab95e18..31daa9a39c 100644
--- a/deps/v8/src/api/api-arguments-inl.h
+++ b/deps/v8/src/api/api-arguments-inl.h
@@ -11,51 +11,51 @@
#include "src/execution/vm-state-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/instance-type.h"
#include "src/objects/slots-inl.h"
+#include "v8-isolate.h"
namespace v8 {
namespace internal {
-void Object::VerifyApiCallResultType() {
#if DEBUG
- if (IsSmi()) return;
+bool Object::IsApiCallResultType() const {
+ if (IsSmi()) return true;
DCHECK(IsHeapObject());
- if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
- IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull())) {
- FATAL("API call returned invalid object");
- }
-#endif // DEBUG
+ return (IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
+ IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull());
}
+#endif // DEBUG
CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
template <typename T>
CustomArguments<T>::~CustomArguments() {
- slot_at(kReturnValueOffset).store(Object(kHandleZapValue));
+ slot_at(kReturnValueIndex).store(Object(kHandleZapValue));
}
template <typename T>
template <typename V>
-Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) const {
// Check the ReturnValue.
- FullObjectSlot slot = slot_at(kReturnValueOffset);
+ FullObjectSlot slot = slot_at(kReturnValueIndex);
// Nothing was set, return empty handle as per previous behaviour.
- if ((*slot).IsTheHole(isolate)) return Handle<V>();
- Handle<V> result = Handle<V>::cast(Handle<Object>(slot.location()));
- result->VerifyApiCallResultType();
- return result;
+ Object raw_object = *slot;
+ if (raw_object.IsTheHole(isolate)) return Handle<V>();
+ DCHECK(raw_object.IsApiCallResultType());
+ return Handle<V>::cast(Handle<Object>(slot.location()));
}
-inline JSObject PropertyCallbackArguments::holder() {
+inline JSObject PropertyCallbackArguments::holder() const {
return JSObject::cast(*slot_at(T::kHolderIndex));
}
-inline Object PropertyCallbackArguments::receiver() {
+inline Object PropertyCallbackArguments::receiver() const {
return *slot_at(T::kThisIndex);
}
-inline JSReceiver FunctionCallbackArguments::holder() {
+inline JSReceiver FunctionCallbackArguments::holder() const {
return JSReceiver::cast(*slot_at(T::kHolderIndex));
}
diff --git a/deps/v8/src/api/api-arguments.h b/deps/v8/src/api/api-arguments.h
index 39958964fd..9caf5decb0 100644
--- a/deps/v8/src/api/api-arguments.h
+++ b/deps/v8/src/api/api-arguments.h
@@ -6,6 +6,7 @@
#define V8_API_API_ARGUMENTS_H_
#include "include/v8-template.h"
+#include "src/builtins/builtins-utils.h"
#include "src/execution/isolate.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
@@ -24,7 +25,8 @@ class CustomArgumentsBase : public Relocatable {
template <typename T>
class CustomArguments : public CustomArgumentsBase {
public:
- static const int kReturnValueOffset = T::kReturnValueIndex;
+ static constexpr int kReturnValueIndex = T::kReturnValueIndex;
+ static_assert(T::kSize == sizeof(T));
~CustomArguments() override;
@@ -38,19 +40,20 @@ class CustomArguments : public CustomArgumentsBase {
: CustomArgumentsBase(isolate) {}
template <typename V>
- Handle<V> GetReturnValue(Isolate* isolate);
+ Handle<V> GetReturnValue(Isolate* isolate) const;
- inline Isolate* isolate() {
+ inline Isolate* isolate() const {
return reinterpret_cast<Isolate*>((*slot_at(T::kIsolateIndex)).ptr());
}
- inline FullObjectSlot slot_at(int index) {
+ inline FullObjectSlot slot_at(int index) const {
// This allows index == T::kArgsLength so "one past the end" slots
// can be retrieved for iterating purposes.
DCHECK_LE(static_cast<unsigned>(index),
static_cast<unsigned>(T::kArgsLength));
return FullObjectSlot(values_ + index);
}
+
Address values_[T::kArgsLength];
};
@@ -69,14 +72,14 @@ class PropertyCallbackArguments final
public:
using T = PropertyCallbackInfo<Value>;
using Super = CustomArguments<T>;
- static const int kArgsLength = T::kArgsLength;
- static const int kThisIndex = T::kThisIndex;
- static const int kHolderIndex = T::kHolderIndex;
- static const int kDataIndex = T::kDataIndex;
- static const int kReturnValueDefaultValueIndex =
+ static constexpr int kArgsLength = T::kArgsLength;
+ static constexpr int kThisIndex = T::kThisIndex;
+ static constexpr int kHolderIndex = T::kHolderIndex;
+ static constexpr int kDataIndex = T::kDataIndex;
+ static constexpr int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
- static const int kIsolateIndex = T::kIsolateIndex;
- static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
+ static constexpr int kIsolateIndex = T::kIsolateIndex;
+ static constexpr int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object data, Object self,
JSObject holder, Maybe<ShouldThrow> should_throw);
@@ -161,13 +164,13 @@ class PropertyCallbackArguments final
GenericNamedPropertyGetterCallback f, Handle<Name> name,
Handle<Object> info, Handle<Object> receiver = Handle<Object>());
- inline JSObject holder();
- inline Object receiver();
+ inline JSObject holder() const;
+ inline Object receiver() const;
#ifdef DEBUG
// This stores current value of Isolate::javascript_execution_counter().
// It's used for detecting whether JavaScript code was executed between
- // PropertyCallbackArguments's constructior and destructor.
+ // PropertyCallbackArguments's constructor and destructor.
uint32_t javascript_execution_counter_;
#endif // DEBUG
};
@@ -177,13 +180,21 @@ class FunctionCallbackArguments
public:
using T = FunctionCallbackInfo<Value>;
using Super = CustomArguments<T>;
- static const int kArgsLength = T::kArgsLength;
- static const int kHolderIndex = T::kHolderIndex;
- static const int kDataIndex = T::kDataIndex;
- static const int kReturnValueDefaultValueIndex =
+ static constexpr int kArgsLength = T::kArgsLength;
+ static constexpr int kArgsLengthWithReceiver = T::kArgsLengthWithReceiver;
+
+ static constexpr int kHolderIndex = T::kHolderIndex;
+ static constexpr int kDataIndex = T::kDataIndex;
+ static constexpr int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
- static const int kIsolateIndex = T::kIsolateIndex;
- static const int kNewTargetIndex = T::kNewTargetIndex;
+ static constexpr int kIsolateIndex = T::kIsolateIndex;
+ static constexpr int kNewTargetIndex = T::kNewTargetIndex;
+
+ static_assert(T::kThisValuesIndex == BuiltinArguments::kReceiverArgsOffset);
+ // Make sure all FunctionCallbackInfo constants are in sync.
+ static_assert(T::kImplicitArgsOffset == offsetof(T, implicit_args_));
+ static_assert(T::kValuesOffset == offsetof(T, values_));
+ static_assert(T::kLengthOffset == offsetof(T, length_));
FunctionCallbackArguments(Isolate* isolate, Object data, Object holder,
HeapObject new_target, Address* argv, int argc);
@@ -199,12 +210,17 @@ class FunctionCallbackArguments
inline Handle<Object> Call(CallHandlerInfo handler);
private:
- inline JSReceiver holder();
+ inline JSReceiver holder() const;
internal::Address* argv_;
- int argc_;
+ int const argc_;
};
+static_assert(BuiltinArguments::kNumExtraArgs ==
+ BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver);
+static_assert(BuiltinArguments::kNumExtraArgsWithReceiver ==
+ BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index ac675a87fc..73be24cbe0 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -7,8 +7,10 @@
#include "include/v8-fast-api-calls.h"
#include "src/api/api.h"
+#include "src/common/assert-scope.h"
#include "src/execution/interrupts-scope.h"
#include "src/execution/microtask-queue.h"
+#include "src/flags/flags.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/foreign-inl.h"
@@ -51,7 +53,10 @@ inline v8::internal::Handle<v8::internal::Object> FromCData(
template <class From, class To>
inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
DCHECK(obj.is_null() || (obj->IsSmi() || !obj->IsTheHole()));
- return Local<To>(reinterpret_cast<To*>(obj.location()));
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ if (obj.is_null()) return Local<To>();
+#endif
+ return Local<To>(internal::ValueHelper::SlotAsValue<To>(obj.location()));
}
// Implementations of ToLocal
@@ -61,6 +66,8 @@ inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
return Convert<v8::internal::From, v8::To>(obj); \
}
+TO_LOCAL_LIST(MAKE_TO_LOCAL)
+
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
@@ -68,65 +75,51 @@ inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}
-MAKE_TO_LOCAL(ToLocal, AccessorPair, debug::AccessorPair)
-MAKE_TO_LOCAL(ToLocal, Context, Context)
-MAKE_TO_LOCAL(ToLocal, Object, Value)
-MAKE_TO_LOCAL(ToLocal, Module, Module)
-MAKE_TO_LOCAL(ToLocal, Name, Name)
-MAKE_TO_LOCAL(ToLocal, String, String)
-MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
-MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
-MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
-MAKE_TO_LOCAL(ToLocal, JSObject, Object)
-MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
-MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, JSMap, Map)
-MAKE_TO_LOCAL(ToLocal, JSSet, Set)
-MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
-MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
-MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
-MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
-MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
-MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
-
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
-MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
-MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
-MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
-MAKE_TO_LOCAL(MessageToLocal, Object, Message)
-MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
-MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
-MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
-MAKE_TO_LOCAL(NumberToLocal, Object, Number)
-MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
-MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
-MAKE_TO_LOCAL(ToLocal, BigInt, BigInt)
-MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
-MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
-MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
-MAKE_TO_LOCAL(FixedArrayToLocal, FixedArray, FixedArray)
-MAKE_TO_LOCAL(PrimitiveArrayToLocal, FixedArray, PrimitiveArray)
-MAKE_TO_LOCAL(ToLocal, ScriptOrModule, ScriptOrModule)
-
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
+#undef TO_LOCAL_LIST
// Implementations of OpenHandle
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
- const v8::From* that, bool allow_empty_handle) { \
- DCHECK(allow_empty_handle || that != nullptr); \
- DCHECK(that == nullptr || \
- v8::internal::Object( \
- *reinterpret_cast<const v8::internal::Address*>(that)) \
- .Is##To()); \
- return v8::internal::Handle<v8::internal::To>( \
- reinterpret_cast<v8::internal::Address*>( \
- const_cast<v8::From*>(that))); \
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ DCHECK(allow_empty_handle || \
+ that != v8::internal::ValueHelper::EmptyValue<v8::From>()); \
+ DCHECK( \
+ that == v8::internal::ValueHelper::EmptyValue<v8::From>() || \
+ v8::internal::Object(v8::internal::ValueHelper::ValueAsAddress(that)) \
+ .Is##To()); \
+ if (that == v8::internal::ValueHelper::EmptyValue<v8::From>()) { \
+ return v8::internal::Handle<v8::internal::To>::null(); \
+ } \
+ return v8::internal::Handle<v8::internal::To>( \
+ v8::HandleScope::CreateHandleForCurrentIsolate( \
+ reinterpret_cast<v8::internal::Address>(that))); \
}
+#else
+
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ DCHECK(allow_empty_handle || \
+ that != v8::internal::ValueHelper::EmptyValue<v8::From>()); \
+ DCHECK( \
+ that == v8::internal::ValueHelper::EmptyValue<v8::From>() || \
+ v8::internal::Object(v8::internal::ValueHelper::ValueAsAddress(that)) \
+ .Is##To()); \
+ return v8::internal::Handle<v8::internal::To>( \
+ reinterpret_cast<v8::internal::Address*>( \
+ const_cast<v8::From*>(that))); \
+ }
+
+#endif
+
OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
@@ -150,12 +143,13 @@ class V8_NODISCARD CallDepthScope {
isolate_->thread_local_top()->IncrementCallDepth(this);
isolate_->set_next_v8_call_is_safe_for_termination(false);
if (!context.IsEmpty()) {
- i::Handle<i::Context> env = Utils::OpenHandle(*context);
+ i::DisallowGarbageCollection no_gc;
+ i::Context env = *Utils::OpenHandle(*context);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
if (isolate->context().is_null() ||
- isolate->context().native_context() != env->native_context()) {
+ isolate->context().native_context() != env.native_context()) {
impl->SaveContext(isolate->context());
- isolate->set_context(*env);
+ isolate->set_context(env);
did_enter_context_ = true;
}
}
@@ -207,7 +201,8 @@ class V8_NODISCARD CallDepthScope {
bool did_perform_microtask_checkpoint =
isolate_->thread_local_top()->CallDepthIsZero() && do_callback &&
microtask_queue &&
- microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto;
+ microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto &&
+ !isolate_->is_execution_terminating();
return !did_perform_microtask_checkpoint ||
isolate_->heap()->weak_refs_keep_during_job().IsUndefined(isolate_);
}
diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h
index 149dd0555a..bcad7df288 100644
--- a/deps/v8/src/api/api-macros.h
+++ b/deps/v8/src/api/api-macros.h
@@ -41,7 +41,8 @@
#define ENTER_V8_BASIC(i_isolate) \
/* Embedders should never enter V8 after terminating it */ \
- DCHECK(!i_isolate->is_execution_terminating()); \
+ DCHECK_IMPLIES(i::v8_flags.strict_termination_checks, \
+ !i_isolate->is_execution_terminating()); \
i::VMState<v8::OTHER> __state__((i_isolate))
#define ENTER_V8_HELPER_INTERNAL(i_isolate, context, class_name, \
@@ -91,29 +92,19 @@
bailout_value, HandleScopeClass, false); \
i::DisallowJavascriptExecutionDebugOnly __no_script__((i_isolate))
-#define DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate) \
+// Lightweight version for APIs that don't require an active context.
+#define DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
i::DisallowJavascriptExecutionDebugOnly __no_script__((i_isolate)); \
i::DisallowExceptions __no_exceptions__((i_isolate))
-// Lightweight version for APIs that don't require an active context.
-#define DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
- /* Embedders should never enter V8 after terminating it */ \
- DCHECK(!i_isolate->is_execution_terminating()); \
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate)
-
#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate)); \
DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate)
-// Used instead of ENTER_V8_NO_SCRIPT_NO_EXCEPTION where the V8 Api is entered
-// during termination sequences.
-#define ENTER_V8_MAYBE_TEARDOWN(i_isolate) \
- i::VMState<v8::OTHER> __state__((i_isolate)); \
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate)
-
-#define ENTER_V8_FOR_NEW_CONTEXT(i_isolate) \
- DCHECK(!(i_isolate)->is_execution_terminating()); \
- i::VMState<v8::OTHER> __state__((i_isolate)); \
+#define ENTER_V8_FOR_NEW_CONTEXT(i_isolate) \
+ DCHECK_IMPLIES(i::v8_flags.strict_termination_checks, \
+ !(i_isolate)->is_execution_terminating()); \
+ i::VMState<v8::OTHER> __state__((i_isolate)); \
i::DisallowExceptions __no_exceptions__((i_isolate))
#else // DEBUG
#define ENTER_V8_NO_SCRIPT(i_isolate, context, class_name, function_name, \
@@ -122,14 +113,10 @@
bailout_value, HandleScopeClass, false)
#define DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate)
-#define DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate)
#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate));
-#define ENTER_V8_MAYBE_TEARDOWN(i_isolate) \
- i::VMState<v8::OTHER> __state__((i_isolate));
-
#define ENTER_V8_FOR_NEW_CONTEXT(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate));
#endif // DEBUG
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index d0b2987234..05a883f2d5 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -83,7 +83,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(getter)),
Object);
- Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(getter)->set_code(*trampoline);
}
if (setter->IsFunctionTemplateInfo() &&
@@ -93,7 +93,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(setter)),
Object);
- Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(setter)->set_code(*trampoline);
}
RETURN_ON_EXCEPTION(
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index da75db119b..e286ccd254 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -38,6 +38,7 @@
#include "src/baseline/baseline-batch-compiler.h"
#include "src/builtins/accessors.h"
#include "src/builtins/builtins-utils.h"
+#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/script-details.h"
@@ -45,10 +46,6 @@
#include "src/common/globals.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/date/date.h"
-#include "src/objects/primitive-heap-object.h"
-#if V8_ENABLE_WEBASSEMBLY
-#include "src/debug/debug-wasm-objects.h"
-#endif // V8_ENABLE_WEBASSEMBLY
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/embedder-state.h"
@@ -64,7 +61,6 @@
#include "src/handles/persistent-handles.h"
#include "src/handles/shared-object-conveyor-handles.h"
#include "src/handles/traced-handles.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/safepoint.h"
@@ -85,6 +81,8 @@
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-object.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -95,6 +93,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/primitive-heap-object.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/property-details.h"
#include "src/objects/property.h"
@@ -115,6 +114,7 @@
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/tick-sample.h"
#include "src/regexp/regexp-utils.h"
+#include "src/roots/static-roots.h"
#include "src/runtime/runtime.h"
#include "src/sandbox/external-pointer.h"
#include "src/sandbox/sandbox.h"
@@ -127,10 +127,11 @@
#include "src/strings/unicode-inl.h"
#include "src/tracing/trace-event.h"
#include "src/utils/detachable-vector.h"
+#include "src/utils/identity-map.h"
#include "src/utils/version.h"
-#include "src/web-snapshot/web-snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/value-type.h"
@@ -143,6 +144,7 @@
#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
#include <signal.h>
+#include <unistd.h>
#if V8_ENABLE_WEBASSEMBLY
#include "include/v8-wasm-trap-handler-posix.h"
@@ -337,7 +339,7 @@ void Utils::ReportOOMFailure(i::Isolate* i_isolate, const char* location,
#ifdef V8_FUZZILLI
// Ignore OOM crashes for fuzzing but exit with an error such that
// samples are discarded by Fuzzilli.
- exit(1);
+ _exit(1);
#else
base::OS::Abort();
#endif // V8_FUZZILLI
@@ -358,19 +360,12 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
namespace {
#ifdef V8_ENABLE_SANDBOX
-// ArrayBufferAllocator to use when sandboxed pointers are used in which case
-// all ArrayBuffer backing stores need to be allocated inside the sandbox.
-// Note, the current implementation is extremely inefficient as it uses the
-// BoundedPageAllocator. In the future, we'll need a proper allocator
-// implementation.
+// ArrayBufferAllocator to use when the sandbox is enabled in which case all
+// ArrayBuffer backing stores need to be allocated inside the sandbox.
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
- ArrayBufferAllocator() { CHECK(page_allocator_); }
-
void* Allocate(size_t length) override {
- return page_allocator_->AllocatePages(nullptr, RoundUp(length, page_size_),
- page_size_,
- PageAllocator::kReadWrite);
+ return allocator_->Allocate(length);
}
void* AllocateUninitialized(size_t length) override {
@@ -378,12 +373,136 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
}
void Free(void* data, size_t length) override {
- page_allocator_->FreePages(data, RoundUp(length, page_size_));
+ return allocator_->Free(data);
}
private:
- PageAllocator* page_allocator_ = internal::GetArrayBufferPageAllocator();
- const size_t page_size_ = page_allocator_->AllocatePageSize();
+ // Backend allocator shared by all ArrayBufferAllocator instances. This way,
+ // there is a single region of virtual addres space reserved inside the
+ // sandbox from which all ArrayBufferAllocators allocate their memory,
+ // instead of each allocator creating their own region, which may cause
+ // address space exhaustion inside the sandbox.
+ // TODO(chromium:1340224): replace this with a more efficient allocator.
+ class BackendAllocator {
+ public:
+ BackendAllocator() {
+ CHECK(i::GetProcessWideSandbox()->is_initialized());
+ VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space();
+ constexpr size_t max_backing_memory_size = 8ULL * i::GB;
+ constexpr size_t min_backing_memory_size = 1ULL * i::GB;
+ size_t backing_memory_size = max_backing_memory_size;
+ i::Address backing_memory_base = 0;
+ while (!backing_memory_base &&
+ backing_memory_size >= min_backing_memory_size) {
+ backing_memory_base = vas->AllocatePages(
+ VirtualAddressSpace::kNoHint, backing_memory_size, kChunkSize,
+ PagePermissions::kNoAccess);
+ if (!backing_memory_base) {
+ backing_memory_size /= 2;
+ }
+ }
+ if (!backing_memory_base) {
+ i::V8::FatalProcessOutOfMemory(
+ nullptr,
+ "Could not reserve backing memory for ArrayBufferAllocators");
+ }
+ DCHECK(IsAligned(backing_memory_base, kChunkSize));
+
+ region_alloc_ = std::make_unique<base::RegionAllocator>(
+ backing_memory_base, backing_memory_size, kAllocationGranularity);
+ end_of_accessible_region_ = region_alloc_->begin();
+
+ // Install a on-merge callback to discard or decommit unused pages.
+ region_alloc_->set_on_merge_callback([this](i::Address start,
+ size_t size) {
+ mutex_.AssertHeld();
+ VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space();
+ i::Address end = start + size;
+ if (end == region_alloc_->end() &&
+ start <= end_of_accessible_region_ - kChunkSize) {
+ // Can shrink the accessible region.
+ i::Address new_end_of_accessible_region = RoundUp(start, kChunkSize);
+ size_t size =
+ end_of_accessible_region_ - new_end_of_accessible_region;
+ CHECK(vas->DecommitPages(new_end_of_accessible_region, size));
+ end_of_accessible_region_ = new_end_of_accessible_region;
+ } else if (size >= 2 * kChunkSize) {
+ // Can discard pages. The pages stay accessible, so the size of the
+ // accessible region doesn't change.
+ i::Address chunk_start = RoundUp(start, kChunkSize);
+ i::Address chunk_end = RoundDown(start + size, kChunkSize);
+ CHECK(vas->DiscardSystemPages(chunk_start, chunk_end - chunk_start));
+ }
+ });
+ }
+
+ ~BackendAllocator() {
+ // The sandbox may already have been torn down, in which case there's no
+ // need to free any memory.
+ if (i::GetProcessWideSandbox()->is_initialized()) {
+ VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space();
+ vas->FreePages(region_alloc_->begin(), region_alloc_->size());
+ }
+ }
+
+ BackendAllocator(const BackendAllocator&) = delete;
+ BackendAllocator& operator=(const BackendAllocator&) = delete;
+
+ void* Allocate(size_t length) {
+ base::MutexGuard guard(&mutex_);
+
+ length = RoundUp(length, kAllocationGranularity);
+ i::Address region = region_alloc_->AllocateRegion(length);
+ if (region == base::RegionAllocator::kAllocationFailure) return nullptr;
+
+ // Check if the memory is inside the accessible region. If not, grow it.
+ i::Address end = region + length;
+ size_t length_to_memset = length;
+ if (end > end_of_accessible_region_) {
+ VirtualAddressSpace* vas = i::GetProcessWideSandbox()->address_space();
+ i::Address new_end_of_accessible_region = RoundUp(end, kChunkSize);
+ size_t size = new_end_of_accessible_region - end_of_accessible_region_;
+ if (!vas->SetPagePermissions(end_of_accessible_region_, size,
+ PagePermissions::kReadWrite)) {
+ CHECK(region_alloc_->FreeRegion(region));
+ return nullptr;
+ }
+
+ // The pages that were inaccessible are guaranteed to be zeroed, so only
+ // memset until the previous end of the accessible region.
+ length_to_memset = end_of_accessible_region_ - region;
+ end_of_accessible_region_ = new_end_of_accessible_region;
+ }
+
+ void* mem = reinterpret_cast<void*>(region);
+ memset(mem, 0, length_to_memset);
+ return mem;
+ }
+
+ void Free(void* data) {
+ base::MutexGuard guard(&mutex_);
+ region_alloc_->FreeRegion(reinterpret_cast<i::Address>(data));
+ }
+
+ static BackendAllocator* SharedInstance() {
+ static base::LeakyObject<BackendAllocator> instance;
+ return instance.get();
+ }
+
+ private:
+ // Use a region allocator with a "page size" of 128 bytes as a reasonable
+ // compromise between the number of regions it has to manage and the amount
+ // of memory wasted due to rounding allocation sizes up to the page size.
+ static constexpr size_t kAllocationGranularity = 128;
+ // The backing memory's accessible region is grown in chunks of this size.
+ static constexpr size_t kChunkSize = 1 * i::MB;
+
+ std::unique_ptr<base::RegionAllocator> region_alloc_;
+ size_t end_of_accessible_region_;
+ base::Mutex mutex_;
+ };
+
+ BackendAllocator* allocator_ = BackendAllocator::SharedInstance();
};
#else
@@ -429,7 +548,7 @@ struct SnapshotCreatorData {
SnapshotCreator::SnapshotCreator(Isolate* v8_isolate,
const intptr_t* external_references,
- StartupData* existing_snapshot) {
+ const StartupData* existing_snapshot) {
SnapshotCreatorData* data = new SnapshotCreatorData(v8_isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i_isolate->set_array_buffer_allocator(&data->allocator_);
@@ -451,7 +570,7 @@ SnapshotCreator::SnapshotCreator(Isolate* v8_isolate,
}
SnapshotCreator::SnapshotCreator(const intptr_t* external_references,
- StartupData* existing_snapshot)
+ const StartupData* existing_snapshot)
: SnapshotCreator(Isolate::Allocate(), external_references,
existing_snapshot) {}
@@ -617,7 +736,7 @@ StartupData SnapshotCreator::CreateBlob(
i::Snapshot::ClearReconstructableDataForSerialization(
i_isolate, function_code_handling == FunctionCodeHandling::kClear);
- i::SafepointKind safepoint_kind = i_isolate->has_shared_heap()
+ i::SafepointKind safepoint_kind = i_isolate->has_shared_space()
? i::SafepointKind::kGlobal
: i::SafepointKind::kIsolate;
i::SafepointScope safepoint_scope(i_isolate, safepoint_kind);
@@ -796,10 +915,11 @@ i::Address* GlobalizeTracedReference(i::Isolate* i_isolate, i::Address* obj,
Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
"the address slot must be not null");
#endif
- auto result = i_isolate->traced_handles()->Create(*obj, slot, store_mode);
+ auto obj_addr = internal::ValueHelper::ValueAsAddress(obj);
+ auto result = i_isolate->traced_handles()->Create(obj_addr, slot, store_mode);
#ifdef VERIFY_HEAP
if (i::v8_flags.verify_heap) {
- i::Object(*obj).ObjectVerify(i_isolate);
+ i::Object(obj_addr).ObjectVerify(i_isolate);
}
#endif // VERIFY_HEAP
return result.location();
@@ -818,6 +938,41 @@ void DisposeTracedReference(internal::Address* location) {
TracedHandles::Destroy(location);
}
+#if V8_STATIC_ROOTS_BOOL
+
+// Initialize static root constants exposed in v8-internal.h.
+
+namespace {
+constexpr InstanceTypeChecker::RootIndexRange kStringMapRange =
+ *InstanceTypeChecker::UniqueMapRangeOfInstanceTypeRange(FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+constexpr Tagged_t kFirstStringMapPtr =
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>(kStringMapRange.first)];
+constexpr Tagged_t kLastStringMapPtr =
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>(
+ kStringMapRange.second)];
+} // namespace
+
+#define EXPORTED_STATIC_ROOTS_MAPPING(V) \
+ V(UndefinedValue, i::StaticReadOnlyRoot::kUndefinedValue) \
+ V(NullValue, i::StaticReadOnlyRoot::kNullValue) \
+ V(TrueValue, i::StaticReadOnlyRoot::kTrueValue) \
+ V(FalseValue, i::StaticReadOnlyRoot::kFalseValue) \
+ V(EmptyString, i::StaticReadOnlyRoot::kempty_string) \
+ V(TheHoleValue, i::StaticReadOnlyRoot::kTheHoleValue) \
+ V(FirstStringMap, kFirstStringMapPtr) \
+ V(LastStringMap, kLastStringMapPtr)
+
+static_assert(std::is_same<Internals::Tagged_t, Tagged_t>::value);
+#define DEF_STATIC_ROOT(name, internal_value) \
+ const Internals::Tagged_t Internals::StaticReadOnlyRoot::k##name = \
+ internal_value;
+EXPORTED_STATIC_ROOTS_MAPPING(DEF_STATIC_ROOT)
+#undef DEF_STATIC_ROOT
+#undef EXPORTED_STATIC_ROOTS_MAPPING
+
+#endif // V8_STATIC_ROOTS_BOOL
+
} // namespace internal
namespace api_internal {
@@ -930,6 +1085,15 @@ i::Address* HandleScope::CreateHandle(i::Isolate* i_isolate, i::Address value) {
return i::HandleScope::CreateHandle(i_isolate, value);
}
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+
+i::Address* HandleScope::CreateHandleForCurrentIsolate(i::Address value) {
+ i::Isolate* i_isolate = i::Isolate::Current();
+ return i::HandleScope::CreateHandle(i_isolate, value);
+}
+
+#endif
+
EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
escape_slot_ = CreateHandle(
@@ -1013,10 +1177,7 @@ void Context::Enter() {
i::DisallowGarbageCollection no_gc;
i::Context env = *Utils::OpenHandle(this);
i::Isolate* i_isolate = env.GetIsolate();
- // TODO(cbruni): Use ENTER_V8_NO_SCRIPT_NO_EXCEPTION which also checks
- // Isolate::is_execution_terminating
- // ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- ENTER_V8_MAYBE_TEARDOWN(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::HandleScopeImplementer* impl = i_isolate->handle_scope_implementer();
impl->EnterContext(env);
impl->SaveContext(i_isolate->context());
@@ -1026,7 +1187,7 @@ void Context::Enter() {
void Context::Exit() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* i_isolate = env->GetIsolate();
- ENTER_V8_MAYBE_TEARDOWN(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::HandleScopeImplementer* impl = i_isolate->handle_scope_implementer();
if (!Utils::ApiCheck(impl->LastEnteredContextWas(*env), "v8::Context::Exit()",
"Cannot exit non-entered context")) {
@@ -1069,7 +1230,7 @@ static i::Handle<i::EmbedderDataArray> EmbedderDataFor(Context* context,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
i::Isolate* i_isolate = env->GetIsolate();
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
bool ok = Utils::ApiCheck(env->IsNativeContext(), location,
"Not a native context") &&
Utils::ApiCheck(index >= 0, location, "Negative index");
@@ -1942,30 +2103,6 @@ void ObjectTemplate::SetCodeLike() {
// Internally, UnboundScript and UnboundModuleScript are SharedFunctionInfos,
// and Script is a JSFunction.
-namespace {
-inline Local<Value> GetSharedFunctionInfoSourceMappingURL(
- i::Isolate* isolate, i::Handle<i::SharedFunctionInfo> obj) {
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (obj->script().IsScript()) {
- i::Object url = i::Script::cast(obj->script()).source_mapping_url();
- return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
- } else {
- return Local<String>();
- }
-}
-
-inline Local<Value> GetSharedFunctionInfoSourceURL(
- i::Isolate* isolate, i::Handle<i::SharedFunctionInfo> obj) {
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (obj->script().IsScript()) {
- i::Object url = i::Script::cast(obj->script()).source_url();
- return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
- } else {
- return Local<String>();
- }
-}
-} // namespace
-
ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
BufferPolicy buffer_policy_)
: data(data_),
@@ -1986,9 +2123,10 @@ ScriptCompiler::StreamedSource::StreamedSource(
ScriptCompiler::StreamedSource::~StreamedSource() = default;
Local<Script> UnboundScript::BindToCurrentContext() {
- auto function_info =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = function_info->GetIsolate();
+ i::Handle<i::SharedFunctionInfo> function_info = Utils::OpenHandle(this);
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is gone.
+ DCHECK(!function_info->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*function_info);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSFunction> function =
i::Factory::JSFunctionBuilder{i_isolate, function_info,
@@ -1998,18 +2136,23 @@ Local<Script> UnboundScript::BindToCurrentContext() {
}
int UnboundScript::GetId() const {
- auto function_info = i::SharedFunctionInfo::cast(*Utils::OpenHandle(this));
- API_RCS_SCOPE(function_info.GetIsolate(), UnboundScript, GetId);
- return i::Script::cast(function_info.script()).id();
+ i::Handle<i::SharedFunctionInfo> function_info = Utils::OpenHandle(this);
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is gone.
+ DCHECK(!function_info->InReadOnlySpace());
+ API_RCS_SCOPE(i::GetIsolateFromWritableObject(*function_info), UnboundScript,
+ GetId);
+ return i::Script::cast(function_info->script()).id();
}
int UnboundScript::GetLineNumber(int code_pos) {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- API_RCS_SCOPE(i_isolate, UnboundScript, GetLineNumber);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ API_RCS_SCOPE(i_isolate, UnboundScript, GetLineNumber);
i::Handle<i::Script> script(i::Script::cast(obj->script()), i_isolate);
return i::Script::GetLineNumber(script, code_pos);
} else {
@@ -2018,12 +2161,14 @@ int UnboundScript::GetLineNumber(int code_pos) {
}
int UnboundScript::GetColumnNumber(int code_pos) {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- API_RCS_SCOPE(i_isolate, UnboundScript, GetColumnNumber);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ API_RCS_SCOPE(i_isolate, UnboundScript, GetColumnNumber);
i::Handle<i::Script> script(i::Script::cast(obj->script()), i_isolate);
return i::Script::GetColumnNumber(script, code_pos);
} else {
@@ -2032,12 +2177,14 @@ int UnboundScript::GetColumnNumber(int code_pos) {
}
Local<Value> UnboundScript::GetScriptName() {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- API_RCS_SCOPE(i_isolate, UnboundScript, GetName);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ API_RCS_SCOPE(i_isolate, UnboundScript, GetName);
i::Object name = i::Script::cast(obj->script()).name();
return Utils::ToLocal(i::Handle<i::Object>(name, i_isolate));
} else {
@@ -2046,35 +2193,67 @@ Local<Value> UnboundScript::GetScriptName() {
}
Local<Value> UnboundScript::GetSourceURL() {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- API_RCS_SCOPE(i_isolate, UnboundScript, GetSourceURL);
- return GetSharedFunctionInfoSourceURL(i_isolate, obj);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
+ if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ API_RCS_SCOPE(i_isolate, UnboundScript, GetSourceURL);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Object url = i::Script::cast(obj->script()).source_url();
+ return Utils::ToLocal(i::Handle<i::Object>(url, i_isolate));
+ } else {
+ return Local<String>();
+ }
}
Local<Value> UnboundScript::GetSourceMappingURL() {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- API_RCS_SCOPE(i_isolate, UnboundScript, GetSourceMappingURL);
- return GetSharedFunctionInfoSourceMappingURL(i_isolate, obj);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
+ if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ API_RCS_SCOPE(i_isolate, UnboundScript, GetSourceMappingURL);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Object url = i::Script::cast(obj->script()).source_mapping_url();
+ return Utils::ToLocal(i::Handle<i::Object>(url, i_isolate));
+ } else {
+ return Local<String>();
+ }
}
Local<Value> UnboundModuleScript::GetSourceURL() {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- API_RCS_SCOPE(i_isolate, UnboundModuleScript, GetSourceURL);
- return GetSharedFunctionInfoSourceURL(i_isolate, obj);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
+ if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ API_RCS_SCOPE(i_isolate, UnboundModuleScript, GetSourceURL);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Object url = i::Script::cast(obj->script()).source_url();
+ return Utils::ToLocal(i::Handle<i::Object>(url, i_isolate));
+ } else {
+ return Local<String>();
+ }
}
Local<Value> UnboundModuleScript::GetSourceMappingURL() {
- i::Handle<i::SharedFunctionInfo> obj =
- i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
- i::Isolate* i_isolate = obj->GetIsolate();
- API_RCS_SCOPE(i_isolate, UnboundModuleScript, GetSourceMappingURL);
- return GetSharedFunctionInfoSourceMappingURL(i_isolate, obj);
+ i::Handle<i::SharedFunctionInfo> obj = Utils::OpenHandle(this);
+ if (obj->script().IsScript()) {
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is
+ // gone.
+ DCHECK(!obj->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*obj);
+ API_RCS_SCOPE(i_isolate, UnboundModuleScript, GetSourceMappingURL);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Object url = i::Script::cast(obj->script()).source_mapping_url();
+ return Utils::ToLocal(i::Handle<i::Object>(url, i_isolate));
+ } else {
+ return Local<String>();
+ }
}
MaybeLocal<Value> Script::Run(Local<Context> context) {
@@ -2108,42 +2287,6 @@ MaybeLocal<Value> Script::Run(Local<Context> context,
}
#endif
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
-
- // TODO(crbug.com/1193459): remove once ablation study is completed
- base::ElapsedTimer timer;
- base::TimeDelta delta;
- if (i::v8_flags.script_delay > 0) {
- delta = v8::base::TimeDelta::FromMillisecondsD(i::v8_flags.script_delay);
- }
- if (i::v8_flags.script_delay_once > 0 && !i_isolate->did_run_script_delay()) {
- delta =
- v8::base::TimeDelta::FromMillisecondsD(i::v8_flags.script_delay_once);
- i_isolate->set_did_run_script_delay(true);
- }
- if (i::v8_flags.script_delay_fraction > 0.0) {
- timer.Start();
- } else if (delta.InMicroseconds() > 0) {
- timer.Start();
- while (timer.Elapsed() < delta) {
- // Busy wait.
- }
- }
-
- if (V8_UNLIKELY(i::v8_flags.experimental_web_snapshots)) {
- i::Handle<i::HeapObject> maybe_script =
- handle(fun->shared().script(), i_isolate);
- if (maybe_script->IsScript() &&
- i::Script::cast(*maybe_script).type() == i::Script::TYPE_WEB_SNAPSHOT) {
- i::WebSnapshotDeserializer deserializer(
- reinterpret_cast<i::Isolate*>(v8_isolate),
- i::Handle<i::Script>::cast(maybe_script));
- deserializer.Deserialize();
- RETURN_ON_FAILED_EXECUTION(Value);
- Local<Value> result = v8::Undefined(v8_isolate);
- RETURN_ESCAPED(result);
- }
- }
-
i::Handle<i::Object> receiver = i_isolate->global_proxy();
// TODO(cbruni, chromium:1244145): Remove once migrated to the context.
i::Handle<i::Object> options(
@@ -2153,15 +2296,6 @@ MaybeLocal<Value> Script::Run(Local<Context> context,
has_pending_exception = !ToLocal<Value>(
i::Execution::CallScript(i_isolate, fun, receiver, options), &result);
- if (i::v8_flags.script_delay_fraction > 0.0) {
- delta = v8::base::TimeDelta::FromMillisecondsD(
- timer.Elapsed().InMillisecondsF() * i::v8_flags.script_delay_fraction);
- timer.Restart();
- while (timer.Elapsed() < delta) {
- // Busy wait.
- }
- }
-
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -2185,19 +2319,44 @@ Local<Data> ScriptOrModule::HostDefinedOptions() {
Local<UnboundScript> Script::GetUnboundScript() {
i::DisallowGarbageCollection no_gc;
i::Handle<i::JSFunction> obj = Utils::OpenHandle(this);
- i::SharedFunctionInfo sfi = (*obj).shared();
- i::Isolate* i_isolate = sfi.GetIsolate();
- return ToApiHandle<UnboundScript>(i::handle(sfi, i_isolate));
+ i::Handle<i::SharedFunctionInfo> sfi =
+ i::handle(obj->shared(), obj->GetIsolate());
+ DCHECK(!sfi->InReadOnlySpace());
+ return ToApiHandle<UnboundScript>(sfi);
}
Local<Value> Script::GetResourceName() {
i::DisallowGarbageCollection no_gc;
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- i::SharedFunctionInfo sfi = (*func).shared();
- i::Isolate* i_isolate = func->GetIsolate();
+ i::SharedFunctionInfo sfi = func->shared();
CHECK(sfi.script().IsScript());
return ToApiHandle<Value>(
- i::handle(i::Script::cast(sfi.script()).name(), i_isolate));
+ i::handle(i::Script::cast(sfi.script()).name(), func->GetIsolate()));
+}
+
+std::vector<int> Script::GetProducedCompileHints() const {
+ i::DisallowGarbageCollection no_gc;
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = func->GetIsolate();
+ i::SharedFunctionInfo sfi = func->shared();
+ CHECK(sfi.script().IsScript());
+ i::Script script = i::Script::cast(sfi.script());
+ i::Object maybe_array_list = script.compiled_lazy_function_positions();
+ std::vector<int> result;
+ if (!maybe_array_list.IsUndefined(i_isolate)) {
+ i::ArrayList array_list = i::ArrayList::cast(maybe_array_list);
+ result.reserve(array_list.Length());
+ for (int i = 0; i < array_list.Length(); ++i) {
+ i::Object item = array_list.Get(i);
+ CHECK(item.IsSmi());
+ result.push_back(i::Smi::ToInt(item));
+ }
+ // Clear the data; the embedder can still request more data later, but it'll
+ // have to keep track of the original data itself.
+ script.set_compiled_lazy_function_positions(
+ i::ReadOnlyRoots(i_isolate).undefined_value());
+ }
+ return result;
}
// static
@@ -2304,7 +2463,7 @@ Local<Value> Module::GetException() const {
"Module status must be kErrored");
i::Handle<i::Module> self = Utils::OpenHandle(this);
i::Isolate* i_isolate = self->GetIsolate();
- ENTER_V8_MAYBE_TEARDOWN(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return ToApiHandle<Value>(i::handle(self->GetException(), i_isolate));
}
@@ -2565,6 +2724,12 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
no_cache_reason, i::NOT_NATIVES_CODE);
source->cached_data->rejected = cached_data->rejected();
}
+ } else if (options == kConsumeCompileHints) {
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithCompileHints(
+ i_isolate, str, script_details, source->compile_hint_callback,
+ source->compile_hint_callback_data, options, no_cache_reason,
+ i::NOT_NATIVES_CODE);
} else {
// Compile without any cache.
maybe_function_info = i::Compiler::GetSharedFunctionInfoForScript(
@@ -2573,6 +2738,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
has_pending_exception = !maybe_function_info.ToHandle(&result);
+ DCHECK_IMPLIES(!has_pending_exception, !result->InReadOnlySpace());
RETURN_ON_FAILED_EXECUTION(UnboundScript);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
@@ -2606,9 +2772,10 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
MaybeLocal<Module> ScriptCompiler::CompileModule(
Isolate* v8_isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason) {
- Utils::ApiCheck(options == kNoCompileOptions || options == kConsumeCodeCache,
- "v8::ScriptCompiler::CompileModule",
- "Invalid CompileOptions");
+ Utils::ApiCheck(
+ options == kNoCompileOptions || options == kConsumeCodeCache ||
+ options == kProduceCompileHints,
+ "v8::ScriptCompiler::CompileModule", "Invalid CompileOptions");
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
@@ -2743,7 +2910,8 @@ void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming(
Isolate* v8_isolate, StreamedSource* source, v8::ScriptType type,
CompileOptions options) {
- Utils::ApiCheck(options == kNoCompileOptions || options == kEagerCompile,
+ Utils::ApiCheck(options == kNoCompileOptions || options == kEagerCompile ||
+ options == kProduceCompileHints,
"v8::ScriptCompiler::StartStreaming",
"Invalid CompileOptions");
if (!i::v8_flags.script_streaming) return nullptr;
@@ -2864,36 +3032,39 @@ uint32_t ScriptCompiler::CachedDataVersionTag() {
ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
Local<UnboundScript> unbound_script) {
- i::Handle<i::SharedFunctionInfo> shared =
- i::Handle<i::SharedFunctionInfo>::cast(
- Utils::OpenHandle(*unbound_script));
- DCHECK_NO_SCRIPT_NO_EXCEPTION(shared->GetIsolate());
+ i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound_script);
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is gone.
+ DCHECK(!shared->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*shared);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
DCHECK(shared->is_toplevel());
- return i::CodeSerializer::Serialize(shared);
+ return i::CodeSerializer::Serialize(i_isolate, shared);
}
// static
ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
Local<UnboundModuleScript> unbound_module_script) {
i::Handle<i::SharedFunctionInfo> shared =
- i::Handle<i::SharedFunctionInfo>::cast(
- Utils::OpenHandle(*unbound_module_script));
- DCHECK_NO_SCRIPT_NO_EXCEPTION(shared->GetIsolate());
+ Utils::OpenHandle(*unbound_module_script);
+ // TODO(jgruber): Remove this DCHECK once Function::GetUnboundScript is gone.
+ DCHECK(!shared->InReadOnlySpace());
+ i::Isolate* i_isolate = i::GetIsolateFromWritableObject(*shared);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
DCHECK(shared->is_toplevel());
- return i::CodeSerializer::Serialize(shared);
+ return i::CodeSerializer::Serialize(i_isolate, shared);
}
ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
Local<Function> function) {
- auto js_function =
+ i::Handle<i::JSFunction> js_function =
i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*function));
- i::Handle<i::SharedFunctionInfo> shared(js_function->shared(),
- js_function->GetIsolate());
- DCHECK_NO_SCRIPT_NO_EXCEPTION(shared->GetIsolate());
+ i::Isolate* i_isolate = js_function->GetIsolate();
+ i::Handle<i::SharedFunctionInfo> shared(js_function->shared(), i_isolate);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
Utils::ApiCheck(shared->is_wrapped(),
"v8::ScriptCompiler::CreateCodeCacheForFunction",
"Expected SharedFunctionInfo with wrapped source code");
- return i::CodeSerializer::Serialize(shared);
+ return i::CodeSerializer::Serialize(i_isolate, shared);
}
MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
@@ -3694,7 +3865,8 @@ TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
bool Value::IsDataView() const {
- return Utils::OpenHandle(this)->IsJSDataView();
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->IsJSDataView() || obj->IsJSRabGsabDataView();
}
bool Value::IsSharedArrayBuffer() const {
@@ -3729,9 +3901,11 @@ VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
#if V8_ENABLE_WEBASSEMBLY
VALUE_IS_SPECIFIC_TYPE(WasmMemoryObject, WasmMemoryObject)
VALUE_IS_SPECIFIC_TYPE(WasmModuleObject, WasmModuleObject)
+VALUE_IS_SPECIFIC_TYPE(WasmNull, WasmNull)
#else
bool Value::IsWasmMemoryObject() const { return false; }
bool Value::IsWasmModuleObject() const { return false; }
+bool Value::IsWasmNull() const { return false; }
#endif // V8_ENABLE_WEBASSEMBLY
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
@@ -4074,10 +4248,18 @@ size_t v8::BackingStore::ByteLength() const {
return reinterpret_cast<const i::BackingStore*>(this)->byte_length();
}
+size_t v8::BackingStore::MaxByteLength() const {
+ return reinterpret_cast<const i::BackingStore*>(this)->max_byte_length();
+}
+
bool v8::BackingStore::IsShared() const {
return reinterpret_cast<const i::BackingStore*>(this)->is_shared();
}
+bool v8::BackingStore::IsResizableByUserJavaScript() const {
+ return reinterpret_cast<const i::BackingStore*>(this)->is_resizable_by_js();
+}
+
// static
std::unique_ptr<v8::BackingStore> v8::BackingStore::Reallocate(
v8::Isolate* v8_isolate, std::unique_ptr<v8::BackingStore> backing_store,
@@ -4168,8 +4350,8 @@ TYPED_ARRAYS(CHECK_TYPED_ARRAY_CAST)
void v8::DataView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSDataView(), "v8::DataView::Cast()",
- "Value is not a DataView");
+ Utils::ApiCheck(obj->IsJSDataView() || obj->IsJSRabGsabDataView(),
+ "v8::DataView::Cast()", "Value is not a DataView");
}
void v8::SharedArrayBuffer::CheckCast(Value* that) {
@@ -4774,7 +4956,7 @@ Local<String> v8::Object::GetConstructorName() {
// TODO(v8:12547): Consider adding GetConstructorName(Local<Context>).
auto self = Utils::OpenHandle(this);
i::Isolate* i_isolate;
- if (self->InSharedWritableHeap()) {
+ if (self->InWritableSharedSpace()) {
i_isolate = i::Isolate::Current();
} else {
i_isolate = self->GetIsolate();
@@ -4792,8 +4974,8 @@ Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
auto self = Utils::OpenHandle(this);
i::JSReceiver::IntegrityLevel i_level =
level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
- Maybe<bool> result =
- i::JSReceiver::SetIntegrityLevel(self, i_level, i::kThrowOnError);
+ Maybe<bool> result = i::JSReceiver::SetIntegrityLevel(
+ i_isolate, self, i_level, i::kThrowOnError);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -5155,6 +5337,40 @@ MaybeLocal<v8::Context> v8::Object::GetCreationContext() {
return MaybeLocal<v8::Context>();
}
+void* v8::Object::GetAlignedPointerFromEmbedderDataInCreationContext(
+ int index) {
+ const char* location =
+ "v8::Object::GetAlignedPointerFromEmbedderDataInCreationContext()";
+ auto self = Utils::OpenHandle(this);
+ auto maybe_context = self->GetCreationContextRaw();
+ if (!maybe_context.has_value()) return nullptr;
+
+ // The code below mostly mimics Context::GetAlignedPointerFromEmbedderData()
+ // but it doesn't try to expand the EmbedderDataArray instance.
+ i::DisallowGarbageCollection no_gc;
+ i::NativeContext native_context =
+ i::NativeContext::cast(maybe_context.value());
+ i::Isolate* i_isolate = native_context.GetIsolate();
+
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ // TODO(ishell): remove cast once embedder_data slot has a proper type.
+ i::EmbedderDataArray data =
+ i::EmbedderDataArray::cast(native_context.embedder_data());
+ if (V8_LIKELY(static_cast<unsigned>(index) <
+ static_cast<unsigned>(data.length()))) {
+ void* result;
+ Utils::ApiCheck(
+ i::EmbedderDataSlot(data, index).ToAlignedPointer(i_isolate, &result),
+ location, "Pointer is not aligned");
+ return result;
+ }
+ // Bad index, report an API error.
+ Utils::ApiCheck(index >= 0, location, "Negative index");
+ Utils::ApiCheck(index < i::EmbedderDataArray::kMaxLength, location,
+ "Index too large");
+ return nullptr;
+}
+
Local<v8::Context> v8::Object::GetCreationContextChecked() {
Local<Context> context;
Utils::ApiCheck(GetCreationContext().ToLocal(&context),
@@ -5318,7 +5534,14 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
"Function to be called is a null pointer");
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
static_assert(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
+#if V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ i::Handle<i::Object>* args = new i::Handle<i::Object>[argc];
+ for (int i = 0; i < argc; ++i) {
+ args[i] = Utils::OpenHandle(*argv[i]);
+ }
+#else
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+#endif
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
i::Execution::Call(i_isolate, self, recv_obj, argc, args), &result);
@@ -5418,10 +5641,10 @@ int Function::GetScriptColumnNumber() const {
}
MaybeLocal<UnboundScript> Function::GetUnboundScript() const {
- i::Handle<i::Object> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) return MaybeLocal<UnboundScript>();
i::SharedFunctionInfo sfi = i::JSFunction::cast(*self).shared();
- i::Isolate* i_isolate = sfi.GetIsolate();
+ i::Isolate* i_isolate = self->GetIsolate();
return ToApiHandle<UnboundScript>(i::handle(sfi, i_isolate));
}
@@ -5448,7 +5671,7 @@ bool Function::Experimental_IsNopFunction() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) return false;
i::SharedFunctionInfo sfi = i::JSFunction::cast(*self).shared();
- i::Isolate* i_isolate = sfi.GetIsolate();
+ i::Isolate* i_isolate = self->GetIsolate();
i::IsCompiledScope is_compiled_scope(sfi.is_compiled_scope(i_isolate));
if (!is_compiled_scope.is_compiled() &&
!i::Compiler::Compile(i_isolate, i::handle(sfi, i_isolate),
@@ -6144,7 +6367,8 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
.store_aligned_pointer(obj->GetIsolate(), value),
location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
- internal::WriteBarrier::MarkingFromInternalFields(i::JSObject::cast(*obj));
+ internal::WriteBarrier::CombinedBarrierFromInternalFields(
+ i::JSObject::cast(*obj), value);
}
void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
@@ -6167,7 +6391,8 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
- internal::WriteBarrier::MarkingFromInternalFields(js_obj);
+ internal::WriteBarrier::CombinedBarrierFromInternalFields(js_obj, argc,
+ values);
}
// --- E n v i r o n m e n t ---
@@ -6515,7 +6740,7 @@ Local<Context> NewContext(
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
// fail.
// Sanity-check that the isolate is initialized and usable.
- CHECK(i_isolate->builtins()->code(i::Builtin::kIllegal).IsCodeT());
+ CHECK(i_isolate->builtins()->code(i::Builtin::kIllegal).IsCode());
TRACE_EVENT_CALL_STATS_SCOPED(i_isolate, "v8", "V8.NewContext");
API_RCS_SCOPE(i_isolate, Context, New);
@@ -6607,6 +6832,287 @@ Local<Value> v8::Context::GetSecurityToken() {
return Utils::ToLocal(token_handle);
}
+namespace {
+
+bool MayContainObjectsToFreeze(i::InstanceType obj_type) {
+ if (i::InstanceTypeChecker::IsString(obj_type)) return false;
+ // SharedFunctionInfo is cross-context so it shouldn't be frozen.
+ if (i::InstanceTypeChecker::IsSharedFunctionInfo(obj_type)) return false;
+ return true;
+}
+
+bool RequiresEmbedderSupportToFreeze(i::InstanceType obj_type) {
+ DCHECK(i::InstanceTypeChecker::IsJSReceiver(obj_type));
+
+ return (i::InstanceTypeChecker::IsJSApiObject(obj_type) ||
+ i::InstanceTypeChecker::IsJSExternalObject(obj_type) ||
+ i::InstanceTypeChecker::IsJSObjectWithEmbedderSlots(obj_type));
+}
+
+bool IsJSReceiverSafeToFreeze(i::InstanceType obj_type) {
+ DCHECK(i::InstanceTypeChecker::IsJSReceiver(obj_type));
+
+ switch (obj_type) {
+ case i::JS_OBJECT_TYPE:
+ case i::JS_GLOBAL_OBJECT_TYPE:
+ case i::JS_GLOBAL_PROXY_TYPE:
+ case i::JS_PRIMITIVE_WRAPPER_TYPE:
+ case i::JS_FUNCTION_TYPE:
+ /* Function types */
+ case i::BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ case i::JS_ARRAY_CONSTRUCTOR_TYPE:
+ case i::JS_PROMISE_CONSTRUCTOR_TYPE:
+ case i::JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case i::JS_CLASS_CONSTRUCTOR_TYPE:
+ /* Prototype Types */
+ case i::JS_ARRAY_ITERATOR_PROTOTYPE_TYPE:
+ case i::JS_ITERATOR_PROTOTYPE_TYPE:
+ case i::JS_MAP_ITERATOR_PROTOTYPE_TYPE:
+ case i::JS_OBJECT_PROTOTYPE_TYPE:
+ case i::JS_PROMISE_PROTOTYPE_TYPE:
+ case i::JS_REG_EXP_PROTOTYPE_TYPE:
+ case i::JS_SET_ITERATOR_PROTOTYPE_TYPE:
+ case i::JS_SET_PROTOTYPE_TYPE:
+ case i::JS_STRING_ITERATOR_PROTOTYPE_TYPE:
+ case i::JS_TYPED_ARRAY_PROTOTYPE_TYPE:
+ /* */
+ case i::JS_ARRAY_TYPE:
+ return true;
+#if V8_ENABLE_WEBASSEMBLY
+ case i::WASM_ARRAY_TYPE:
+ case i::WASM_STRUCT_TYPE:
+#endif // V8_ENABLE_WEBASSEMBLY
+ case i::JS_PROXY_TYPE:
+ return true;
+ // These types are known not to freeze.
+ case i::JS_MAP_KEY_ITERATOR_TYPE:
+ case i::JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case i::JS_MAP_VALUE_ITERATOR_TYPE:
+ case i::JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case i::JS_SET_VALUE_ITERATOR_TYPE:
+ case i::JS_GENERATOR_OBJECT_TYPE:
+ case i::JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ case i::JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ case i::JS_ARRAY_ITERATOR_TYPE: {
+ return false;
+ }
+ default:
+ // TODO(behamilton): Handle any types that fall through here.
+ return false;
+ }
+}
+
+class ObjectVisitorDeepFreezer : i::ObjectVisitor {
+ public:
+ explicit ObjectVisitorDeepFreezer(i::Isolate* isolate,
+ Context::DeepFreezeDelegate* delegate)
+ : isolate_(isolate), delegate_(delegate) {}
+
+ bool DeepFreeze(i::Handle<i::Context> context) {
+ bool success = VisitObject(i::HeapObject::cast(*context));
+ DCHECK_EQ(success, !error_.has_value());
+ if (!success) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate_, NewTypeError(error_->msg_id, error_->name), false);
+ }
+ for (const auto& obj : objects_to_freeze_) {
+ MAYBE_RETURN_ON_EXCEPTION_VALUE(
+ isolate_,
+ i::JSReceiver::SetIntegrityLevel(isolate_, obj, i::FROZEN,
+ i::kThrowOnError),
+ false);
+ }
+ return true;
+ }
+
+ void VisitPointers(i::HeapObject host, i::ObjectSlot start,
+ i::ObjectSlot end) final {
+ VisitPointersImpl(start, end);
+ }
+ void VisitPointers(i::HeapObject host, i::MaybeObjectSlot start,
+ i::MaybeObjectSlot end) final {
+ VisitPointersImpl(start, end);
+ }
+ void VisitMapPointer(i::HeapObject host) final {
+ VisitPointer(host, host.map_slot());
+ }
+ void VisitCodePointer(i::Code host, i::CodeObjectSlot slot) final {}
+ void VisitCodeTarget(i::RelocInfo* rinfo) final {}
+ void VisitEmbeddedPointer(i::RelocInfo* rinfo) final {}
+ void VisitCustomWeakPointers(i::HeapObject host, i::ObjectSlot start,
+ i::ObjectSlot end) final {}
+
+ private:
+ struct ErrorInfo {
+ i::MessageTemplate msg_id;
+ i::Handle<i::String> name;
+ };
+
+ template <typename TSlot>
+ void VisitPointersImpl(TSlot start, TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = current.load(isolate_);
+ i::HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ if (!VisitObject(heap_object)) {
+ return;
+ }
+ }
+ }
+ }
+
+ bool FreezeEmbedderObjectAndVisitChildren(i::Handle<i::JSObject> obj) {
+ DCHECK(delegate_);
+ std::vector<Local<Object>> children;
+ if (!delegate_->FreezeEmbedderObjectAndGetChildren(Utils::ToLocal(obj),
+ children)) {
+ return false;
+ }
+ for (auto child : children) {
+ if (!VisitObject(*Utils::OpenHandle<Object, i::JSReceiver>(child))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool VisitObject(i::HeapObject obj) {
+ DCHECK(!obj.is_null());
+ if (error_.has_value()) {
+ return false;
+ }
+
+ i::DisallowGarbageCollection no_gc;
+ i::InstanceType obj_type = obj.map().instance_type();
+
+ // Skip common types that can't contain items to freeze.
+ if (!MayContainObjectsToFreeze(obj_type)) {
+ return true;
+ }
+
+ if (!done_list_.insert(obj).second) {
+ // If we couldn't insert (because it is already in the set) then we're
+ // done.
+ return true;
+ }
+
+ if (i::InstanceTypeChecker::IsAccessorPair(obj_type)) {
+ // For AccessorPairs we need to ensure that the functions they point to
+ // have been instantiated into actual JavaScript objects that can be
+ // frozen. TODO(behamilton): If they haven't then we need to save them to
+ // instantiate (and recurse) before freezing.
+ i::AccessorPair accessor_pair = i::AccessorPair::cast(obj);
+ if (accessor_pair.getter().IsFunctionTemplateInfo() ||
+ accessor_pair.setter().IsFunctionTemplateInfo()) {
+ // TODO(behamilton): Handle this more gracefully.
+ error_ = ErrorInfo{i::MessageTemplate::kCannotDeepFreezeObject,
+ isolate_->factory()->empty_string()};
+ return false;
+ }
+ } else if (i::InstanceTypeChecker::IsContext(obj_type)) {
+ // For contexts we need to ensure that all accessible locals are const.
+ // If not they could be replaced to bypass freezing.
+ i::ScopeInfo scope_info = i::Context::cast(obj).scope_info();
+ for (auto it : i::ScopeInfo::IterateLocalNames(&scope_info, no_gc)) {
+ if (scope_info.ContextLocalMode(it->index()) !=
+ i::VariableMode::kConst) {
+ DCHECK(!error_.has_value());
+ error_ = ErrorInfo{i::MessageTemplate::kCannotDeepFreezeValue,
+ i::handle(it->name(), isolate_)};
+ return false;
+ }
+ }
+ } else if (i::InstanceTypeChecker::IsJSReceiver(obj_type)) {
+ i::Handle<i::JSReceiver> receiver =
+ i::handle(i::JSReceiver::cast(obj), isolate_);
+ if (RequiresEmbedderSupportToFreeze(obj_type)) {
+ auto js_obj = i::Handle<i::JSObject>::cast(receiver);
+
+ // External objects don't have slots but still need to be processed by
+ // the embedder.
+ if (i::InstanceTypeChecker::IsJSExternalObject(obj_type) ||
+ js_obj->GetEmbedderFieldCount() > 0) {
+ if (!delegate_) {
+ DCHECK(!error_.has_value());
+ error_ = ErrorInfo{i::MessageTemplate::kCannotDeepFreezeObject,
+ i::handle(receiver->class_name(), isolate_)};
+ return false;
+ }
+
+ // Handle embedder specific types and any v8 children it wants to
+ // freeze.
+ if (!FreezeEmbedderObjectAndVisitChildren(js_obj)) {
+ return false;
+ }
+ } else {
+ DCHECK_EQ(js_obj->GetEmbedderFieldCount(), 0);
+ }
+ } else {
+ DCHECK_IMPLIES(
+ i::InstanceTypeChecker::IsJSObject(obj_type),
+ i::JSObject::cast(*receiver).GetEmbedderFieldCount() == 0);
+ if (!IsJSReceiverSafeToFreeze(obj_type)) {
+ DCHECK(!error_.has_value());
+ error_ = ErrorInfo{i::MessageTemplate::kCannotDeepFreezeObject,
+ i::handle(receiver->class_name(), isolate_)};
+ return false;
+ }
+ }
+
+ // Save this to freeze after we are done. Freezing triggers garbage
+ // collection which doesn't work well with this visitor pattern, so we
+ // delay it until after.
+ objects_to_freeze_.push_back(receiver);
+
+ } else {
+ DCHECK(!i::InstanceTypeChecker::IsAccessorPair(obj_type));
+ DCHECK(!i::InstanceTypeChecker::IsContext(obj_type));
+ DCHECK(!i::InstanceTypeChecker::IsJSReceiver(obj_type));
+ }
+
+ DCHECK(!error_.has_value());
+ obj.Iterate(isolate_, this);
+ // Iterate sets error_ on failure. We should propagate errors.
+ return !error_.has_value();
+ }
+
+ i::Isolate* isolate_;
+ Context::DeepFreezeDelegate* delegate_;
+ std::unordered_set<i::Object, i::Object::Hasher> done_list_;
+ std::vector<i::Handle<i::JSReceiver>> objects_to_freeze_;
+ base::Optional<ErrorInfo> error_;
+};
+
+} // namespace
+
+Maybe<void> Context::DeepFreeze(DeepFreezeDelegate* delegate) {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = env->GetIsolate();
+
+ // TODO(behamilton): Incorporate compatibility improvements similar to NodeJS:
+ // https://github.com/nodejs/node/blob/main/lib/internal/freeze_intrinsics.js
+ // These need to be done before freezing.
+
+ Local<Context> context = Utils::ToLocal(env);
+ ENTER_V8_NO_SCRIPT(i_isolate, context, Context, DeepFreeze, Nothing<void>(),
+ i::HandleScope);
+ ObjectVisitorDeepFreezer vfreezer(i_isolate, delegate);
+ has_pending_exception = !vfreezer.DeepFreeze(env);
+
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(void);
+ return JustVoid();
+}
+
v8::Isolate* Context::GetIsolate() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
return reinterpret_cast<Isolate*>(env->GetIsolate());
@@ -6656,7 +7162,7 @@ v8::Local<v8::Object> Context::Global() {
void Context::DetachGlobal() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* i_isolate = context->GetIsolate();
- ENTER_V8_MAYBE_TEARDOWN(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->DetachGlobal(context);
}
@@ -7122,7 +7628,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
// TODO(v8:12007): Consider adding
// MakeExternal(Isolate*, ExternalStringResource*).
i::Isolate* i_isolate;
- if (obj.IsShared()) {
+ if (obj.InWritableSharedSpace()) {
i_isolate = i::Isolate::Current();
} else {
// It is safe to call GetIsolateFromWritableHeapObject because
@@ -7155,7 +7661,7 @@ bool v8::String::MakeExternal(
// TODO(v8:12007): Consider adding
// MakeExternal(Isolate*, ExternalOneByteStringResource*).
i::Isolate* i_isolate;
- if (obj.IsShared()) {
+ if (obj.InWritableSharedSpace()) {
i_isolate = i::Isolate::Current();
} else {
// It is safe to call GetIsolateFromWritableHeapObject because
@@ -7186,6 +7692,21 @@ bool v8::String::CanMakeExternal() const {
return !i::Heap::InYoungGeneration(obj);
}
+bool v8::String::CanMakeExternal(Encoding encoding) const {
+ i::String obj = *Utils::OpenHandle(this);
+
+ if (obj.IsThinString()) {
+ obj = i::ThinString::cast(obj).actual();
+ }
+
+ if (!obj.SupportsExternalization(encoding)) {
+ return false;
+ }
+
+ // Only old space strings should be externalized.
+ return !i::Heap::InYoungGeneration(obj);
+}
+
bool v8::String::StringEquals(Local<String> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
@@ -8141,7 +8662,12 @@ void v8::ArrayBuffer::SetDetachKey(v8::Local<v8::Value> key) {
size_t v8::ArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- return obj->byte_length();
+ return obj->GetByteLength();
+}
+
+size_t v8::ArrayBuffer::MaxByteLength() const {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ return obj->max_byte_length();
}
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* v8_isolate,
@@ -8218,6 +8744,41 @@ std::unique_ptr<v8::BackingStore> v8::ArrayBuffer::NewBackingStore(
static_cast<v8::BackingStore*>(backing_store.release()));
}
+// static
+std::unique_ptr<BackingStore> v8::ArrayBuffer::NewResizableBackingStore(
+ size_t byte_length, size_t max_byte_length) {
+ Utils::ApiCheck(i::v8_flags.harmony_rab_gsab,
+ "v8::ArrayBuffer::NewResizableBackingStore",
+ "Constructing resizable ArrayBuffers is not supported");
+ Utils::ApiCheck(byte_length <= max_byte_length,
+ "v8::ArrayBuffer::NewResizableBackingStore",
+ "Cannot construct resizable ArrayBuffer, byte_length must be "
+ "<= max_byte_length");
+ Utils::ApiCheck(
+ byte_length <= i::JSArrayBuffer::kMaxByteLength,
+ "v8::ArrayBuffer::NewResizableBackingStore",
+ "Cannot construct resizable ArrayBuffer, requested length is too big");
+
+ size_t page_size, initial_pages, max_pages;
+ if (i::JSArrayBuffer::GetResizableBackingStorePageConfiguration(
+ nullptr, byte_length, max_byte_length, i::kDontThrow, &page_size,
+ &initial_pages, &max_pages)
+ .IsNothing()) {
+ i::V8::FatalProcessOutOfMemory(nullptr,
+ "v8::ArrayBuffer::NewResizableBackingStore");
+ }
+ std::unique_ptr<i::BackingStoreBase> backing_store =
+ i::BackingStore::TryAllocateAndPartiallyCommitMemory(
+ nullptr, byte_length, max_byte_length, page_size, initial_pages,
+ max_pages, i::WasmMemoryFlag::kNotWasm, i::SharedFlag::kNotShared);
+ if (!backing_store) {
+ i::V8::FatalProcessOutOfMemory(nullptr,
+ "v8::ArrayBuffer::NewResizableBackingStore");
+ }
+ return std::unique_ptr<v8::BackingStore>(
+ static_cast<v8::BackingStore*>(backing_store.release()));
+}
+
Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer;
@@ -8227,6 +8788,12 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
DCHECK(data_view->buffer().IsJSArrayBuffer());
buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()),
data_view->GetIsolate());
+ } else if (obj->IsJSRabGsabDataView()) {
+ i::Handle<i::JSRabGsabDataView> data_view(i::JSRabGsabDataView::cast(*obj),
+ obj->GetIsolate());
+ DCHECK(data_view->buffer().IsJSArrayBuffer());
+ buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()),
+ data_view->GetIsolate());
} else {
DCHECK(obj->IsJSTypedArray());
buffer = i::JSTypedArray::cast(*obj).GetBuffer();
@@ -8244,10 +8811,14 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
if (self->IsJSTypedArray()) {
i::Handle<i::JSTypedArray> array(i::JSTypedArray::cast(*self), i_isolate);
source = reinterpret_cast<char*>(array->DataPtr());
- } else {
- DCHECK(self->IsJSDataView());
+ } else if (self->IsJSDataView()) {
i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*self), i_isolate);
source = reinterpret_cast<char*>(data_view->data_pointer());
+ } else {
+ DCHECK(self->IsJSRabGsabDataView());
+ i::Handle<i::JSRabGsabDataView> data_view(
+ i::JSRabGsabDataView::cast(*self), i_isolate);
+ source = reinterpret_cast<char*>(data_view->data_pointer());
}
memcpy(dest, source, bytes_to_copy);
}
@@ -8267,13 +8838,24 @@ size_t v8::ArrayBufferView::ByteOffset() {
}
size_t v8::ArrayBufferView::ByteLength() {
- i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- return obj->WasDetached() ? 0 : obj->byte_length();
+ i::DisallowGarbageCollection no_gc;
+ i::JSArrayBufferView obj = *Utils::OpenHandle(this);
+ if (obj.WasDetached()) {
+ return 0;
+ }
+ if (obj.IsJSTypedArray()) {
+ return i::JSTypedArray::cast(obj).GetByteLength();
+ }
+ if (obj.IsJSDataView()) {
+ return i::JSDataView::cast(obj).byte_length();
+ }
+ return i::JSRabGsabDataView::cast(obj).GetByteLength();
}
size_t v8::TypedArray::Length() {
- i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- return obj->WasDetached() ? 0 : obj->length();
+ i::DisallowGarbageCollection no_gc;
+ i::JSTypedArray obj = *Utils::OpenHandle(this);
+ return obj.WasDetached() ? 0 : obj.GetLength();
}
static_assert(
@@ -8322,14 +8904,16 @@ static_assert(
TYPED_ARRAYS(TYPED_ARRAY_NEW)
#undef TYPED_ARRAY_NEW
+// TODO(v8:11111): Support creating length tracking DataViews via the API.
Local<DataView> DataView::New(Local<ArrayBuffer> array_buffer,
size_t byte_offset, size_t byte_length) {
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
i::Isolate* i_isolate = buffer->GetIsolate();
API_RCS_SCOPE(i_isolate, DataView, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSDataView> obj =
- i_isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
+ i::Handle<i::JSDataView> obj = i::Handle<i::JSDataView>::cast(
+ i_isolate->factory()->NewJSDataViewOrRabGsabDataView(buffer, byte_offset,
+ byte_length));
return Utils::ToLocal(obj);
}
@@ -8340,14 +8924,20 @@ Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
i::Isolate* i_isolate = buffer->GetIsolate();
API_RCS_SCOPE(i_isolate, DataView, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSDataView> obj =
- i_isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
+ i::Handle<i::JSDataView> obj = i::Handle<i::JSDataView>::cast(
+ i_isolate->factory()->NewJSDataViewOrRabGsabDataView(buffer, byte_offset,
+ byte_length));
return Utils::ToLocal(obj);
}
size_t v8::SharedArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- return obj->byte_length();
+ return obj->GetByteLength();
+}
+
+size_t v8::SharedArrayBuffer::MaxByteLength() const {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ return obj->max_byte_length();
}
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* v8_isolate,
@@ -8692,21 +9282,6 @@ void Isolate::RemoveGCEpilogueCallback(GCCallback callback) {
RemoveGCEpilogueCallback(CallGCCallbackWithoutData, data);
}
-START_ALLOW_USE_DEPRECATED()
-
-void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- CHECK_NULL(i_isolate->heap()->cpp_heap());
- i_isolate->heap()->SetEmbedderHeapTracer(tracer);
-}
-
-EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- return i_isolate->heap()->GetEmbedderHeapTracer();
-}
-
-END_ALLOW_USE_DEPRECATED()
-
void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
i_isolate->heap()->SetEmbedderRootsHandler(handler);
@@ -8714,7 +9289,6 @@ void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
void Isolate::AttachCppHeap(CppHeap* cpp_heap) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- CHECK_NULL(GetEmbedderHeapTracer());
i_isolate->heap()->AttachCppHeap(cpp_heap);
}
@@ -8905,9 +9479,9 @@ void Isolate::Initialize(Isolate* v8_isolate,
i_isolate->set_embedder_wrapper_object_index(
params.embedder_wrapper_object_index);
- if (!i_isolate->is_shared() && !i::V8::GetCurrentPlatform()
- ->GetForegroundTaskRunner(v8_isolate)
- ->NonNestableTasksEnabled()) {
+ if (!i::V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(v8_isolate)
+ ->NonNestableTasksEnabled()) {
FATAL(
"The current platform's foreground task runner does not have "
"non-nestable tasks enabled. The embedder must provide one.");
@@ -8981,23 +9555,18 @@ void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* v8_isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
- : on_failure_(on_failure), v8_isolate_(v8_isolate) {
+ : v8_isolate_(v8_isolate), on_failure_(on_failure) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
switch (on_failure_) {
case CRASH_ON_FAILURE:
- i::DisallowJavascriptExecution::Open(i_isolate,
- &was_execution_allowed_assert_);
+ i::DisallowJavascriptExecution::Open(i_isolate, &was_execution_allowed_);
break;
case THROW_ON_FAILURE:
- i::ThrowOnJavascriptExecution::Open(i_isolate,
- &was_execution_allowed_throws_);
+ i::ThrowOnJavascriptExecution::Open(i_isolate, &was_execution_allowed_);
break;
case DUMP_ON_FAILURE:
- i::DumpOnJavascriptExecution::Open(i_isolate,
- &was_execution_allowed_dump_);
+ i::DumpOnJavascriptExecution::Open(i_isolate, &was_execution_allowed_);
break;
- default:
- UNREACHABLE();
}
}
@@ -9005,19 +9574,14 @@ Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
switch (on_failure_) {
case CRASH_ON_FAILURE:
- i::DisallowJavascriptExecution::Close(i_isolate,
- was_execution_allowed_assert_);
+ i::DisallowJavascriptExecution::Close(i_isolate, was_execution_allowed_);
break;
case THROW_ON_FAILURE:
- i::ThrowOnJavascriptExecution::Close(i_isolate,
- was_execution_allowed_throws_);
+ i::ThrowOnJavascriptExecution::Close(i_isolate, was_execution_allowed_);
break;
case DUMP_ON_FAILURE:
- i::DumpOnJavascriptExecution::Close(i_isolate,
- was_execution_allowed_dump_);
+ i::DumpOnJavascriptExecution::Close(i_isolate, was_execution_allowed_);
break;
- default:
- UNREACHABLE();
}
}
@@ -9470,6 +10034,7 @@ void Isolate::ClearCachesForTesting() {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
i_isolate->AbortConcurrentOptimization(i::BlockingBehavior::kBlock);
i_isolate->ClearSerializerData();
+ i_isolate->compilation_cache()->Clear();
}
void Isolate::EnableMemorySavingsMode() {
@@ -9541,7 +10106,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() {
{i::Builtin::kJSRunMicrotasksEntry,
&entry_stubs.js_run_microtasks_entry_stub}}};
for (auto& pair : stubs) {
- i::CodeT js_entry = i_isolate->builtins()->code(pair.first);
+ i::Code js_entry = i_isolate->builtins()->code(pair.first);
pair.second->code.start =
reinterpret_cast<const void*>(js_entry.InstructionStart());
pair.second->code.length_in_bytes = js_entry.InstructionSize();
@@ -9600,6 +10165,9 @@ CALLBACK_SETTER(WasmAsyncResolvePromiseCallback,
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
wasm_load_source_map_callback)
+CALLBACK_SETTER(WasmGCEnabledCallback, WasmGCEnabledCallback,
+ wasm_gc_enabled_callback)
+
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
SharedArrayBufferConstructorEnabledCallback,
sharedarraybuffer_constructor_enabled_callback)
@@ -10522,71 +11090,6 @@ void HeapProfiler::SetGetDetachednessCallback(GetDetachednessCallback callback,
data);
}
-void EmbedderHeapTracer::SetStackStart(void* stack_start) {
- CHECK(v8_isolate_);
- reinterpret_cast<i::Isolate*>(v8_isolate_)
- ->heap()
- ->SetStackStart(stack_start);
-}
-
-void EmbedderHeapTracer::FinalizeTracing() {
- if (v8_isolate_) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
- if (i_isolate->heap()->incremental_marking()->IsMarking()) {
- i_isolate->heap()->FinalizeIncrementalMarkingAtomically(
- i::GarbageCollectionReason::kExternalFinalize);
- }
- }
-}
-
-void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
- if (v8_isolate_) {
- i::LocalEmbedderHeapTracer* const tracer =
- reinterpret_cast<i::Isolate*>(v8_isolate_)
- ->heap()
- ->local_embedder_heap_tracer();
- DCHECK_NOT_NULL(tracer);
- tracer->IncreaseAllocatedSize(bytes);
- }
-}
-
-void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
- if (v8_isolate_) {
- i::LocalEmbedderHeapTracer* const tracer =
- reinterpret_cast<i::Isolate*>(v8_isolate_)
- ->heap()
- ->local_embedder_heap_tracer();
- DCHECK_NOT_NULL(tracer);
- tracer->DecreaseAllocatedSize(bytes);
- }
-}
-
-void EmbedderHeapTracer::RegisterEmbedderReference(
- const BasicTracedReference<v8::Data>& ref) {
- if (ref.IsEmpty()) return;
-
- i::Heap* const heap = reinterpret_cast<i::Isolate*>(v8_isolate_)->heap();
- heap->RegisterExternallyReferencedObject(
- reinterpret_cast<i::Address*>(ref.val_));
-}
-
-void EmbedderHeapTracer::IterateTracedGlobalHandles(
- TracedGlobalHandleVisitor* visitor) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
- i::DisallowGarbageCollection no_gc;
- i_isolate->traced_handles()->Iterate(visitor);
-}
-
-bool EmbedderHeapTracer::IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) {
- return true;
-}
-
-void EmbedderHeapTracer::ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) {
- UNREACHABLE();
-}
-
EmbedderStateScope::EmbedderStateScope(Isolate* v8_isolate,
Local<v8::Context> context,
EmbedderStateTag tag)
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 855ada9f0b..c9450a44b3 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -15,6 +15,7 @@
#include "src/execution/isolate.h"
#include "src/objects/bigint.h"
#include "src/objects/contexts.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
@@ -92,6 +93,46 @@ class RegisteredExtension {
static RegisteredExtension* first_extension_;
};
+#define TO_LOCAL_LIST(V) \
+ V(ToLocal, AccessorPair, debug::AccessorPair) \
+ V(ToLocal, Context, Context) \
+ V(ToLocal, Object, Value) \
+ V(ToLocal, Module, Module) \
+ V(ToLocal, Name, Name) \
+ V(ToLocal, String, String) \
+ V(ToLocal, Symbol, Symbol) \
+ V(ToLocal, JSRegExp, RegExp) \
+ V(ToLocal, JSReceiver, Object) \
+ V(ToLocal, JSObject, Object) \
+ V(ToLocal, JSFunction, Function) \
+ V(ToLocal, JSArray, Array) \
+ V(ToLocal, JSMap, Map) \
+ V(ToLocal, JSSet, Set) \
+ V(ToLocal, JSProxy, Proxy) \
+ V(ToLocal, JSArrayBuffer, ArrayBuffer) \
+ V(ToLocal, JSArrayBufferView, ArrayBufferView) \
+ V(ToLocal, JSDataView, DataView) \
+ V(ToLocal, JSRabGsabDataView, DataView) \
+ V(ToLocal, JSTypedArray, TypedArray) \
+ V(ToLocalShared, JSArrayBuffer, SharedArrayBuffer) \
+ V(ToLocal, FunctionTemplateInfo, FunctionTemplate) \
+ V(ToLocal, ObjectTemplateInfo, ObjectTemplate) \
+ V(SignatureToLocal, FunctionTemplateInfo, Signature) \
+ V(MessageToLocal, Object, Message) \
+ V(PromiseToLocal, JSObject, Promise) \
+ V(StackTraceToLocal, FixedArray, StackTrace) \
+ V(StackFrameToLocal, StackFrameInfo, StackFrame) \
+ V(NumberToLocal, Object, Number) \
+ V(IntegerToLocal, Object, Integer) \
+ V(Uint32ToLocal, Object, Uint32) \
+ V(ToLocal, BigInt, BigInt) \
+ V(ExternalToLocal, JSObject, External) \
+ V(CallableToLocal, JSReceiver, Function) \
+ V(ToLocalPrimitive, Object, Primitive) \
+ V(FixedArrayToLocal, FixedArray, FixedArray) \
+ V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \
+ V(ToLocal, ScriptOrModule, ScriptOrModule)
+
#define OPEN_HANDLE_LIST(V) \
V(Template, TemplateInfo) \
V(FunctionTemplate, FunctionTemplateInfo) \
@@ -115,7 +156,7 @@ class RegisteredExtension {
V(Int32Array, JSTypedArray) \
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
- V(DataView, JSDataView) \
+ V(DataView, JSDataViewOrRabGsabDataView) \
V(SharedArrayBuffer, JSArrayBuffer) \
V(Name, Name) \
V(String, String) \
@@ -155,104 +196,17 @@ class Utils {
static void ReportOOMFailure(v8::internal::Isolate* isolate,
const char* location, const OOMDetails& details);
- static inline Local<debug::AccessorPair> ToLocal(
- v8::internal::Handle<v8::internal::AccessorPair> obj);
- static inline Local<Context> ToLocal(
- v8::internal::Handle<v8::internal::Context> obj);
- static inline Local<Value> ToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Module> ToLocal(
- v8::internal::Handle<v8::internal::Module> obj);
- static inline Local<Name> ToLocal(
- v8::internal::Handle<v8::internal::Name> obj);
- static inline Local<String> ToLocal(
- v8::internal::Handle<v8::internal::String> obj);
- static inline Local<Symbol> ToLocal(
- v8::internal::Handle<v8::internal::Symbol> obj);
- static inline Local<RegExp> ToLocal(
- v8::internal::Handle<v8::internal::JSRegExp> obj);
- static inline Local<Object> ToLocal(
- v8::internal::Handle<v8::internal::JSReceiver> obj);
- static inline Local<Object> ToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<Function> ToLocal(
- v8::internal::Handle<v8::internal::JSFunction> obj);
- static inline Local<Array> ToLocal(
- v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<Map> ToLocal(
- v8::internal::Handle<v8::internal::JSMap> obj);
- static inline Local<Set> ToLocal(
- v8::internal::Handle<v8::internal::JSSet> obj);
- static inline Local<Proxy> ToLocal(
- v8::internal::Handle<v8::internal::JSProxy> obj);
- static inline Local<ArrayBuffer> ToLocal(
- v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
- static inline Local<ArrayBufferView> ToLocal(
- v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
- static inline Local<DataView> ToLocal(
- v8::internal::Handle<v8::internal::JSDataView> obj);
- static inline Local<TypedArray> ToLocal(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Uint8Array> ToLocalUint8Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Uint8ClampedArray> ToLocalUint8ClampedArray(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Int8Array> ToLocalInt8Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Uint16Array> ToLocalUint16Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Int16Array> ToLocalInt16Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Uint32Array> ToLocalUint32Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Int32Array> ToLocalInt32Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Float32Array> ToLocalFloat32Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<Float64Array> ToLocalFloat64Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<BigInt64Array> ToLocalBigInt64Array(
- v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<BigUint64Array> ToLocalBigUint64Array(
+#define DECLARE_TO_LOCAL(Name, From, To) \
+ static inline Local<v8::To> Name( \
+ v8::internal::Handle<v8::internal::From> obj);
+
+ TO_LOCAL_LIST(DECLARE_TO_LOCAL)
+
+#define DECLARE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
+ static inline Local<v8::Type##Array> ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<SharedArrayBuffer> ToLocalShared(
- v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
-
- static inline Local<Message> MessageToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Promise> PromiseToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<StackTrace> StackTraceToLocal(
- v8::internal::Handle<v8::internal::FixedArray> obj);
- static inline Local<StackFrame> StackFrameToLocal(
- v8::internal::Handle<v8::internal::StackFrameInfo> obj);
- static inline Local<Number> NumberToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Integer> IntegerToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Uint32> Uint32ToLocal(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<BigInt> ToLocal(
- v8::internal::Handle<v8::internal::BigInt> obj);
- static inline Local<FunctionTemplate> ToLocal(
- v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<ObjectTemplate> ToLocal(
- v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
- static inline Local<Signature> SignatureToLocal(
- v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<External> ExternalToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<Function> CallableToLocal(
- v8::internal::Handle<v8::internal::JSReceiver> obj);
- static inline Local<Primitive> ToLocalPrimitive(
- v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<FixedArray> FixedArrayToLocal(
- v8::internal::Handle<v8::internal::FixedArray> obj);
- static inline Local<PrimitiveArray> PrimitiveArrayToLocal(
- v8::internal::Handle<v8::internal::FixedArray> obj);
- static inline Local<ScriptOrModule> ToLocal(
- v8::internal::Handle<v8::internal::ScriptOrModule> obj);
+ TYPED_ARRAYS(DECLARE_TO_LOCAL_TYPED_ARRAY)
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> OpenHandle( \
@@ -261,6 +215,8 @@ class Utils {
OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
+#undef DECLARE_TO_LOCAL_TYPED_ARRAY
+#undef DECLARE_TO_LOCAL
template <class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj);
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index cb904a0be0..774153dfa5 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -77,7 +77,7 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
shared.builtin_id() != Builtin::kMath##FName) { \
return false; \
} \
- DCHECK_EQ(shared.GetCode(), \
+ DCHECK_EQ(shared.GetCode(isolate), \
isolate->builtins()->code(Builtin::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index d1c59ea601..9346be040b 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -2240,7 +2240,7 @@ AsmType* AsmJsParser::ValidateCall() {
function_type->AsFunctionType()->AddArgument(t);
}
FunctionSig* sig = ConvertSignature(return_type, param_types);
- uint32_t signature_index = module_builder_->AddSignature(sig);
+ uint32_t signature_index = module_builder_->AddSignature(sig, true);
// Emit actual function invocation depending on the kind. At this point we
// also determined the complete function type and can perform checking against
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 05105be91d..92dd17a867 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -133,7 +133,7 @@ class AsmJsParser {
public:
explicit CachedVectors(Zone* zone) : reusable_vectors_(zone) {}
- Zone* zone() const { return reusable_vectors_.get_allocator().zone(); }
+ Zone* zone() const { return reusable_vectors_.zone(); }
inline void fill(ZoneVector<T>* vec) {
if (reusable_vectors_.empty()) return;
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index 6dbeb3ac3a..90903b5ced 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -130,7 +130,7 @@ class V8_EXPORT_PRIVATE AsmFunctionType final : public AsmCallableType {
AsmFunctionType* AsFunctionType() final { return this; }
void AddArgument(AsmType* type) { args_.push_back(type); }
- const ZoneVector<AsmType*> Arguments() const { return args_; }
+ const ZoneVector<AsmType*>& Arguments() const { return args_; }
AsmType* ReturnType() const { return return_type_; }
bool CanBeInvokedWith(AsmType* return_type,
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 20ee458b44..6cf5796c66 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -181,6 +181,10 @@ int AstRawString::Compare(const AstRawString* lhs, const AstRawString* rhs) {
return lhs->byte_length() - rhs->byte_length();
}
+#ifdef OBJECT_PRINT
+void AstRawString::Print() const { printf("%.*s", byte_length(), raw_data()); }
+#endif // OBJECT_PRINT
+
template <typename IsolateT>
Handle<String> AstConsString::Allocate(IsolateT* isolate) const {
DCHECK(string_.is_null());
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index e673718bfb..f407f83e84 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -90,6 +90,10 @@ class AstRawString final : public ZoneObject {
return string_;
}
+#ifdef OBJECT_PRINT
+ void Print() const;
+#endif // OBJECT_PRINT
+
private:
friend class AstRawStringInternalizationKey;
friend class AstStringConstants;
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 8473f7fb67..f111660d95 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -1613,7 +1613,9 @@ enum AssignType {
PRIVATE_METHOD, // obj.#key: #key is a private method
PRIVATE_GETTER_ONLY, // obj.#key: #key only has a getter defined
PRIVATE_SETTER_ONLY, // obj.#key: #key only has a setter defined
- PRIVATE_GETTER_AND_SETTER // obj.#key: #key has both accessors defined
+ PRIVATE_GETTER_AND_SETTER, // obj.#key: #key has both accessors defined
+ PRIVATE_DEBUG_DYNAMIC, // obj.#key: #key is private that requries dynamic
+ // lookup in debug-evaluate.
};
class Property final : public Expression {
@@ -1650,6 +1652,9 @@ class Property final : public Expression {
return PRIVATE_SETTER_ONLY;
case VariableMode::kPrivateGetterAndSetter:
return PRIVATE_GETTER_AND_SETTER;
+ case VariableMode::kDynamic:
+ // From debug-evaluate.
+ return PRIVATE_DEBUG_DYNAMIC;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index e0cb7da7af..5705ed9ca3 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -1367,6 +1367,10 @@ void AstPrinter::VisitProperty(Property* node) {
PrintIndentedVisit("KEY", node->key());
break;
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ PrintIndentedVisit("PRIVATE_DEBUG_DYNAMIC", node->key());
+ break;
+ }
case NON_PROPERTY:
UNREACHABLE();
}
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index a66e4ea93f..cd8be1caec 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -632,7 +632,8 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// scope and we terminate the iteration there anyway.
do {
Variable* var = query_scope->LookupInScopeOrScopeInfo(name, query_scope);
- if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+ if (var != nullptr && IsLexicalVariableMode(var->mode()) &&
+ !var->is_sloppy_block_function()) {
should_hoist = false;
break;
}
@@ -649,6 +650,19 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
auto declaration = factory->NewVariableDeclaration(pos);
// Based on the preceding checks, it doesn't matter what we pass as
// sloppy_mode_block_scope_function_redefinition.
+ //
+ // This synthesized var for Annex B functions-in-block (FiB) may be
+ // declared multiple times for the same var scope, such as in the case of
+ // shadowed functions-in-block like the following:
+ //
+ // {
+ // function f() {}
+ // { function f() {} }
+ // }
+ //
+ // Redeclarations for vars do not create new bindings, but the
+ // redeclarations' initializers are still run. That is, shadowed FiB will
+ // result in multiple assignments to the same synthesized var.
Variable* var = DeclareVariable(
declaration, name, pos, VariableMode::kVar, NORMAL_VARIABLE,
Variable::DefaultInitializationFlag(VariableMode::kVar), &was_added,
@@ -1263,8 +1277,9 @@ Declaration* DeclarationScope::CheckConflictingVarDeclarations(
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
Scope* current = decl->AsVariableDeclaration()->AsNested()->scope();
- DCHECK(decl->var()->mode() == VariableMode::kVar ||
- decl->var()->mode() == VariableMode::kDynamic);
+ if (decl->var()->mode() != VariableMode::kVar &&
+ decl->var()->mode() != VariableMode::kDynamic)
+ continue;
// Iterate through all scopes until the declaration scope.
do {
// There is a conflict if there exists a non-VAR binding.
@@ -1796,6 +1811,8 @@ const char* Header(ScopeType scope_type, FunctionKind function_kind,
case CLASS_SCOPE:
return "class";
case WITH_SCOPE: return "with";
+ case SHADOW_REALM_SCOPE:
+ return "shadowrealm";
}
UNREACHABLE();
}
@@ -2058,6 +2075,15 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
return var;
}
+void Scope::ForceDynamicLookup(VariableProxy* proxy) {
+ // At the moment this is only used for looking up private names dynamically
+ // in debug-evaluate from top-level scope.
+ DCHECK(proxy->IsPrivateName());
+ DCHECK(is_script_scope() || is_module_scope() || is_eval_scope());
+ Variable* dynamic = NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ proxy->BindTo(dynamic);
+}
+
// static
template <Scope::ScopeLookupMode mode>
Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
@@ -3109,6 +3135,13 @@ void PrivateNameScopeIterator::AddUnresolvedPrivateName(VariableProxy* proxy) {
// be new.
DCHECK(!proxy->is_resolved());
DCHECK(proxy->IsPrivateName());
+
+ // Use dynamic lookup for top-level scopes in debug-evaluate.
+ if (Done()) {
+ start_scope_->ForceDynamicLookup(proxy);
+ return;
+ }
+
GetScope()->EnsureRareData()->unresolved_private_names.Add(proxy);
// Any closure scope that contain uses of private names that skips over a
// class scope due to heritage expressions need private name context chain
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 3d06268564..cba189ba0e 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -637,6 +637,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return nullptr;
}
+ void ForceDynamicLookup(VariableProxy* proxy);
+
protected:
explicit Scope(Zone* zone);
diff --git a/deps/v8/src/base/DEPS b/deps/v8/src/base/DEPS
index a9c31c20d6..3cead70516 100644
--- a/deps/v8/src/base/DEPS
+++ b/deps/v8/src/base/DEPS
@@ -5,3 +5,9 @@ include_rules = [
"-src",
"+src/base",
]
+
+specific_include_rules = {
+ "ieee754.h": [
+ "+third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h"
+ ],
+}
diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h
index ccfc23a065..06db44e3b2 100644
--- a/deps/v8/src/base/bit-field.h
+++ b/deps/v8/src/base/bit-field.h
@@ -62,7 +62,7 @@ class BitField final {
}
// Returns a type U with the bit field value updated.
- static constexpr U update(U previous, T value) {
+ V8_NODISCARD static constexpr U update(U previous, T value) {
return (previous & ~kMask) | encode(value);
}
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 0cb22a9a90..2d61878274 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -310,9 +310,13 @@ inline bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed summation resulted in an overflow.
inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+#if V8_HAS_BUILTIN_ADD_OVERFLOW
+ return __builtin_add_overflow(lhs, rhs, val);
+#else
uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
*val = base::bit_cast<int64_t>(res);
return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
+#endif
}
@@ -320,9 +324,34 @@ inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed subtraction resulted in an overflow.
inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+#if V8_HAS_BUILTIN_SUB_OVERFLOW
+ return __builtin_sub_overflow(lhs, rhs, val);
+#else
uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
*val = base::bit_cast<int64_t>(res);
return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
+#endif
+}
+
+// SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs|
+// and |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed multiplication resulted in an overflow.
+inline bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+#if V8_HAS_BUILTIN_MUL_OVERFLOW
+ return __builtin_mul_overflow(lhs, rhs, val);
+#else
+ int64_t res = base::bit_cast<int64_t>(static_cast<uint64_t>(lhs) *
+ static_cast<uint64_t>(rhs));
+ *val = res;
+
+ // Check for INT64_MIN / -1 as it's undefined behaviour and could cause
+ // hardware exceptions.
+ if ((res == INT64_MIN && lhs == -1)) {
+ return true;
+ }
+
+ return lhs != 0 && (res / lhs) != rhs;
+#endif
}
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index ff4a936709..673330236c 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -28,7 +28,8 @@
#endif
// pthread_jit_write_protect is only available on arm64 Mac.
-#if defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)
+#if defined(V8_HOST_ARCH_ARM64) && \
+ (defined(V8_OS_MACOS) || (defined(V8_OS_IOS) && TARGET_OS_SIMULATOR))
#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 1
#else
#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 0
@@ -70,9 +71,10 @@ constexpr int kPageSizeBits = 18;
// The minimal supported page size by the operation system. Any region aligned
// to that size needs to be individually protectable via
// {base::OS::SetPermission} and friends.
-#if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \
- defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64)
-// MacOS on arm64 uses 16kB pages.
+#if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \
+ defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64) || \
+ defined(V8_OS_IOS)
+// MacOS & iOS on arm64 uses 16kB pages.
// LOONG64 and MIPS64 also use 16kB pages.
constexpr int kMinimumOSPageSize = 16 * 1024;
#elif defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID) && \
diff --git a/deps/v8/src/base/container-utils.h b/deps/v8/src/base/container-utils.h
index 66a94bbd43..bf6826d5a8 100644
--- a/deps/v8/src/base/container-utils.h
+++ b/deps/v8/src/base/container-utils.h
@@ -6,6 +6,7 @@
#define V8_BASE_CONTAINER_UTILS_H_
#include <algorithm>
+#include <iterator>
#include <optional>
#include <vector>
@@ -14,16 +15,16 @@ namespace v8::base {
// Returns true iff the {element} is found in the {container}.
template <typename C, typename T>
bool contains(const C& container, const T& element) {
- const auto e = end(container);
- return std::find(begin(container), e, element) != e;
+ const auto e = std::end(container);
+ return std::find(std::begin(container), e, element) != e;
}
// Returns the first index of {element} in {container}. Returns std::nullopt if
// {container} does not contain {element}.
template <typename C, typename T>
std::optional<size_t> index_of(const C& container, const T& element) {
- const auto b = begin(container);
- const auto e = end(container);
+ const auto b = std::begin(container);
+ const auto e = std::end(container);
if (auto it = std::find(b, e, element); it != e) {
return {std::distance(b, it)};
}
@@ -34,8 +35,8 @@ std::optional<size_t> index_of(const C& container, const T& element) {
// {predicate}. Returns std::nullopt if no element satisfies {predicate}.
template <typename C, typename P>
std::optional<size_t> index_of_if(const C& container, const P& predicate) {
- const auto b = begin(container);
- const auto e = end(container);
+ const auto b = std::begin(container);
+ const auto e = std::end(container);
if (auto it = std::find_if(b, e, predicate); it != e) {
return {std::distance(b, it)};
}
@@ -48,9 +49,9 @@ std::optional<size_t> index_of_if(const C& container, const P& predicate) {
template <typename C>
inline size_t erase_at(C& container, size_t index, size_t count = 1) {
// TODO(C++20): Replace with std::erase.
- if (size(container) <= index) return 0;
- auto start = begin(container) + index;
- count = std::min<size_t>(count, std::distance(start, end(container)));
+ if (std::size(container) <= index) return 0;
+ auto start = std::begin(container) + index;
+ count = std::min<size_t>(count, std::distance(start, std::end(container)));
container.erase(start, start + count);
return count;
}
@@ -60,43 +61,48 @@ inline size_t erase_at(C& container, size_t index, size_t count = 1) {
// TODO(C++20): Replace with std::erase_if.
template <typename C, typename P>
inline size_t erase_if(C& container, const P& predicate) {
- size_t count = 0;
- auto e = end(container);
- for (auto it = begin(container); it != e;) {
- it = std::find_if(it, e, predicate);
- if (it == e) break;
- it = container.erase(it);
- e = end(container);
- ++count;
- }
+ auto it =
+ std::remove_if(std::begin(container), std::end(container), predicate);
+ auto count = std::distance(it, std::end(container));
+ container.erase(it, std::end(container));
return count;
}
// Helper for std::count_if.
template <typename C, typename P>
inline size_t count_if(const C& container, const P& predicate) {
- return std::count_if(begin(container), end(container), predicate);
+ return std::count_if(std::begin(container), std::end(container), predicate);
}
// Helper for std::all_of.
template <typename C, typename P>
inline bool all_of(const C& container, const P& predicate) {
- return std::all_of(begin(container), end(container), predicate);
+ return std::all_of(std::begin(container), std::end(container), predicate);
}
// Helper for std::none_of.
template <typename C, typename P>
inline bool none_of(const C& container, const P& predicate) {
- return std::none_of(begin(container), end(container), predicate);
+ return std::none_of(std::begin(container), std::end(container), predicate);
+}
+
+// Helper for std::sort.
+template <typename C>
+inline void sort(C& container) {
+ std::sort(std::begin(container), std::end(container));
+}
+template <typename C, typename Comp>
+inline void sort(C& container, Comp comp) {
+ std::sort(std::begin(container), std::end(container), comp);
}
// Returns true iff all elements of {container} compare equal using operator==.
template <typename C>
inline bool all_equal(const C& container) {
- if (size(container) <= 1) return true;
- auto b = begin(container);
+ if (std::size(container) <= 1) return true;
+ auto b = std::begin(container);
const auto& value = *b;
- return std::all_of(++b, end(container),
+ return std::all_of(++b, std::end(container),
[&](const auto& v) { return v == value; });
}
@@ -104,15 +110,15 @@ inline bool all_equal(const C& container) {
// operator==.
template <typename C, typename T>
inline bool all_equal(const C& container, const T& value) {
- return std::all_of(begin(container), end(container),
+ return std::all_of(std::begin(container), std::end(container),
[&](const auto& v) { return v == value; });
}
-// Appends to vector {v} all the elements in the range {begin(container)} and
-// {end(container)}.
-template <typename T, typename A, typename C>
-inline void vector_append(std::vector<T, A>& v, const C& container) {
- v.insert(end(v), begin(container), end(container));
+// Appends to vector {v} all the elements in the range {std::begin(container)}
+// and {std::end(container)}.
+template <typename V, typename C>
+inline void vector_append(V& v, const C& container) {
+ v.insert(std::end(v), std::begin(container), std::end(container));
}
} // namespace v8::base
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/base/contextual.h
index 76723e03c0..7f575997da 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/base/contextual.h
@@ -2,20 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TORQUE_CONTEXTUAL_H_
-#define V8_TORQUE_CONTEXTUAL_H_
+#ifndef V8_BASE_CONTEXTUAL_H_
+#define V8_BASE_CONTEXTUAL_H_
#include <type_traits>
+#include "src/base/export-template.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-namespace v8 {
-namespace internal {
-namespace torque {
-
-template <class Variable>
-V8_EXPORT_PRIVATE typename Variable::Scope*& ContextualVariableTop();
+namespace v8::base {
// {ContextualVariable} provides a clean alternative to a global variable.
// The contextual variable is mutable, and supports managing the value of
@@ -28,8 +24,10 @@ V8_EXPORT_PRIVATE typename Variable::Scope*& ContextualVariableTop();
// Note that contextual variables must only be used from the same thread,
// i.e. {Scope} and Get() have to be in the same thread.
template <class Derived, class VarType>
-class ContextualVariable {
+class V8_EXPORT_PRIVATE ContextualVariable {
public:
+ using VarT = VarType;
+
// A {Scope} contains a new object of type {VarType} and gives
// ContextualVariable::Get() access to it. Upon destruction, the contextual
// variable is restored to the state before the {Scope} was created. Scopes
@@ -47,6 +45,7 @@ class ContextualVariable {
DCHECK_EQ(this, Top());
Top() = previous_;
}
+
Scope(const Scope&) = delete;
Scope& operator=(const Scope&) = delete;
@@ -62,32 +61,42 @@ class ContextualVariable {
DISALLOW_NEW_AND_DELETE()
};
- // Access the most recent active {Scope}. There has to be an active {Scope}
- // for this contextual variable.
static VarType& Get() {
- DCHECK_NOT_NULL(Top());
+ DCHECK(HasScope());
return Top()->Value();
}
- private:
- template <class T>
- friend V8_EXPORT_PRIVATE typename T::Scope*& ContextualVariableTop();
- static Scope*& Top() { return ContextualVariableTop<Derived>(); }
-
static bool HasScope() { return Top() != nullptr; }
- friend class MessageBuilder;
+
+ private:
+ inline static thread_local Scope* top_ = nullptr;
+
+ // If there is a linking error for `Top()`, then the contextual variable
+ // probably needs to be exported using EXPORT_CONTEXTUAL_VARIABLE.
+#if defined(USING_V8_SHARED)
+ // Hide the definition from other DLLs/libraries to avoid access to `top_`,
+ // since access to thread_local variables from other DLLs/libraries does not
+ // work correctly.
+ static Scope*& Top();
+#else
+ static Scope*& Top() { return top_; }
+#endif
};
// Usage: DECLARE_CONTEXTUAL_VARIABLE(VarName, VarType)
#define DECLARE_CONTEXTUAL_VARIABLE(VarName, ...) \
- struct VarName \
- : v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {}
-
-#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
- template <> \
- V8_EXPORT_PRIVATE VarName::Scope*& ContextualVariableTop<VarName>() { \
- static thread_local VarName::Scope* top = nullptr; \
- return top; \
+ struct VarName : ::v8::base::ContextualVariable<VarName, __VA_ARGS__> {}
+
+// Contextual variables that are accessed in tests need to be
+// exported. For this, place the following macro in the global namespace inside
+// of a .cc file.
+#define EXPORT_CONTEXTUAL_VARIABLE(VarName) \
+ namespace v8::base { \
+ template <> \
+ V8_EXPORT_PRIVATE typename VarName::Scope*& \
+ ContextualVariable<VarName, typename VarName::VarT>::Top() { \
+ return top_; \
+ } \
}
// By inheriting from {ContextualClass} a class can become a contextual variable
@@ -95,8 +104,6 @@ class ContextualVariable {
template <class T>
using ContextualClass = ContextualVariable<T, T>;
-} // namespace torque
-} // namespace internal
-} // namespace v8
+} // namespace v8::base
-#endif // V8_TORQUE_CONTEXTUAL_H_
+#endif // V8_BASE_CONTEXTUAL_H_
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index f716403b05..fdef3c6695 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -404,6 +404,8 @@ CPU::CPU()
has_vfp3_(false),
has_vfp3_d32_(false),
has_jscvt_(false),
+ has_dot_prod_(false),
+ has_lse_(false),
is_fp64_mode_(false),
has_non_stop_time_stamp_counter_(false),
is_running_in_vm_(false),
@@ -726,20 +728,29 @@ CPU::CPU()
#if !defined(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE)
constexpr int PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44;
#endif
+#if !defined(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE)
+ constexpr int PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE = 43;
+#endif
has_jscvt_ =
IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE);
+ has_dot_prod_ =
+ IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE);
#elif V8_OS_LINUX
// Try to extract the list of CPU features from ELF hwcaps.
uint32_t hwcaps = ReadELFHWCaps();
if (hwcaps != 0) {
has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0;
+ has_dot_prod_ = (hwcaps & HWCAP_ASIMDDP) != 0;
+ has_lse_ = (hwcaps & HWCAP_ATOMICS) != 0;
} else {
// Try to fallback to "Features" CPUInfo field
CPUInfo cpu_info;
char* features = cpu_info.ExtractField("Features");
has_jscvt_ = HasListItem(features, "jscvt");
+ has_dot_prod_ = HasListItem(features, "asimddp");
+ has_lse_ = HasListItem(features, "atomics");
delete[] features;
}
#elif V8_OS_DARWIN
@@ -752,9 +763,27 @@ CPU::CPU()
} else {
has_jscvt_ = feat_jscvt;
}
+ int64_t feat_dot_prod = 0;
+ size_t feat_dot_prod_size = sizeof(feat_dot_prod);
+ if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &feat_dot_prod,
+ &feat_dot_prod_size, nullptr, 0) == -1) {
+ has_dot_prod_ = false;
+ } else {
+ has_dot_prod_ = feat_dot_prod;
+ }
+ int64_t feat_lse = 0;
+ size_t feat_lse_size = sizeof(feat_lse);
+ if (sysctlbyname("hw.optional.arm.FEAT_LSE", &feat_lse, &feat_lse_size,
+ nullptr, 0) == -1) {
+ has_lse_ = false;
+ } else {
+ has_lse_ = feat_lse;
+ }
#else
- // ARM64 Macs always have JSCVT.
+ // ARM64 Macs always have JSCVT, ASIMDDP and LSE.
has_jscvt_ = true;
+ has_dot_prod_ = true;
+ has_lse_ = true;
#endif // V8_OS_IOS
#endif // V8_OS_WIN
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 3050f2c466..ff22712625 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -123,6 +123,8 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
bool has_jscvt() const { return has_jscvt_; }
+ bool has_dot_prod() const { return has_dot_prod_; }
+ bool has_lse() const { return has_lse_; }
// mips features
bool is_fp64_mode() const { return is_fp64_mode_; }
@@ -176,6 +178,8 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3_;
bool has_vfp3_d32_;
bool has_jscvt_;
+ bool has_dot_prod_;
+ bool has_lse_;
bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_;
bool is_running_in_vm_;
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index 73672001cf..e71b63fd7c 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -105,9 +105,9 @@ namespace {
} while (false)
int32_t __ieee754_rem_pio2(double x, double* y) V8_WARN_UNUSED_RESULT;
-double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT;
int __kernel_rem_pio2(double* x, double* y, int e0, int nx, int prec,
const int32_t* ipio2) V8_WARN_UNUSED_RESULT;
+double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT;
double __kernel_sin(double x, double y, int iy) V8_WARN_UNUSED_RESULT;
/* __ieee754_rem_pio2(x,y)
@@ -1348,7 +1348,11 @@ double atan2(double y, double x) {
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+double fdlibm_cos(double x) {
+#else
double cos(double x) {
+#endif
double y[2], z = 0.0;
int32_t n, ix;
@@ -2440,7 +2444,11 @@ double cbrt(double x) {
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+double fdlibm_sin(double x) {
+#else
double sin(double x) {
+#endif
double y[2], z = 0.0;
int32_t n, ix;
@@ -3015,6 +3023,11 @@ double tanh(double x) {
#undef SET_HIGH_WORD
#undef SET_LOW_WORD
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS) && defined(BUILDING_V8_BASE_SHARED)
+double libm_sin(double x) { return glibc_sin(x); }
+double libm_cos(double x) { return glibc_cos(x); }
+#endif
+
} // namespace ieee754
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/ieee754.h b/deps/v8/src/base/ieee754.h
index f2b3a3eb58..953b5b00e6 100644
--- a/deps/v8/src/base/ieee754.h
+++ b/deps/v8/src/base/ieee754.h
@@ -7,6 +7,10 @@
#include "src/base/base-export.h"
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+#include "third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h" // nogncheck
+#endif
+
namespace v8 {
namespace base {
namespace ieee754 {
@@ -33,8 +37,24 @@ V8_BASE_EXPORT double atan(double x);
// the two arguments to determine the quadrant of the result.
V8_BASE_EXPORT double atan2(double y, double x);
-// Returns the cosine of |x|, where |x| is given in radians.
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+// To ensure there aren't problems with libm's sin/cos, both implementations
+// are shipped. The plan is to transition to libm once we ensure there are no
+// compatibility or performance issues.
+V8_BASE_EXPORT double fdlibm_sin(double x);
+V8_BASE_EXPORT double fdlibm_cos(double x);
+
+#if !defined(BUILDING_V8_BASE_SHARED) && !defined(USING_V8_BASE_SHARED)
+inline double libm_sin(double x) { return glibc_sin(x); }
+inline double libm_cos(double x) { return glibc_cos(x); }
+#else
+V8_BASE_EXPORT double libm_sin(double x);
+V8_BASE_EXPORT double libm_cos(double x);
+#endif
+#else
V8_BASE_EXPORT double cos(double x);
+V8_BASE_EXPORT double sin(double x);
+#endif
// Returns the base-e exponential of |x|.
V8_BASE_EXPORT double exp(double x);
@@ -68,9 +88,6 @@ V8_BASE_EXPORT double expm1(double x);
// behaviour is preserved for compatibility reasons.
V8_BASE_EXPORT double pow(double x, double y);
-// Returns the sine of |x|, where |x| is given in radians.
-V8_BASE_EXPORT double sin(double x);
-
// Returns the tangent of |x|, where |x| is given in radians.
V8_BASE_EXPORT double tan(double x);
diff --git a/deps/v8/src/base/immediate-crash.h b/deps/v8/src/base/immediate-crash.h
index 770cb273f9..ce6240ba06 100644
--- a/deps/v8/src/base/immediate-crash.h
+++ b/deps/v8/src/base/immediate-crash.h
@@ -140,9 +140,9 @@
[] { TRAP_SEQUENCE_(); }(); \
} while (false)
-#endif // !V8_CC_GCC
+#endif // !V8_CC_GNU
-#if defined(__clang__) || V8_CC_GCC
+#if defined(__clang__) || V8_CC_GNU
// __builtin_unreachable() hints to the compiler that this is noreturn and can
// be packed in the function epilogue.
diff --git a/deps/v8/src/base/ios-headers.h b/deps/v8/src/base/ios-headers.h
new file mode 100644
index 0000000000..6434435119
--- /dev/null
+++ b/deps/v8/src/base/ios-headers.h
@@ -0,0 +1,31 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_IOS_HEADERS_H_
+#define V8_BASE_IOS_HEADERS_H_
+
+// This file includes the necessary headers that are not part of the
+// iOS public SDK in order to support memory allocation on iOS.
+
+#include <mach/mach.h>
+#include <mach/vm_map.h>
+
+__BEGIN_DECLS
+
+kern_return_t mach_vm_remap(
+ vm_map_t target_task, mach_vm_address_t* target_address,
+ mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src_task,
+ mach_vm_address_t src_address, boolean_t copy, vm_prot_t* cur_protection,
+ vm_prot_t* max_protection, vm_inherit_t inheritance);
+
+kern_return_t mach_vm_map(vm_map_t target_task, mach_vm_address_t* address,
+ mach_vm_size_t size, mach_vm_offset_t mask, int flags,
+ mem_entry_name_port_t object,
+ memory_object_offset_t offset, boolean_t copy,
+ vm_prot_t cur_protection, vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+__END_DECLS
+
+#endif // V8_BASE_IOS_HEADERS_H_
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index a8ae430599..e333aefd0c 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -46,8 +46,14 @@ V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
#endif // !defined(OFFICIAL_BUILD)
#endif // DEBUG
-#define UNIMPLEMENTED() FATAL("unimplemented code")
-#define UNREACHABLE() FATAL("unreachable code")
+namespace v8::base {
+// These string constants are pattern-matched by fuzzers.
+constexpr const char* kUnimplementedCodeMessage = "unimplemented code";
+constexpr const char* kUnreachableCodeMessage = "unreachable code";
+} // namespace v8::base
+
+#define UNIMPLEMENTED() FATAL(::v8::base::kUnimplementedCodeMessage)
+#define UNREACHABLE() FATAL(::v8::base::kUnreachableCodeMessage)
// g++ versions <= 8 cannot use UNREACHABLE() in a constexpr function.
// TODO(miladfarca): Remove once all compilers handle this properly.
#if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ <= 8)
diff --git a/deps/v8/src/base/numbers/diy-fp.h b/deps/v8/src/base/numbers/diy-fp.h
index d68e915585..205277e1fa 100644
--- a/deps/v8/src/base/numbers/diy-fp.h
+++ b/deps/v8/src/base/numbers/diy-fp.h
@@ -48,9 +48,18 @@ class DiyFp {
// returns a * b;
static DiyFp Times(const DiyFp& a, const DiyFp& b) {
+#ifdef __SIZEOF_INT128__
+ // If we have compiler-assisted 64x64 -> 128 muls (e.g. x86-64 and
+ // aarch64), we can use that for a faster, inlined implementation.
+ // This rounds the same way as Multiply().
+ uint64_t hi = (a.f_ * static_cast<unsigned __int128>(b.f_)) >> 64;
+ uint64_t lo = (a.f_ * static_cast<unsigned __int128>(b.f_));
+ return {hi + (lo >> 63), a.e_ + b.e_ + 64};
+#else
DiyFp result = a;
result.Multiply(b);
return result;
+#endif
}
void Normalize() {
diff --git a/deps/v8/src/base/numbers/double.h b/deps/v8/src/base/numbers/double.h
index c56ab7b0d0..3d319b1482 100644
--- a/deps/v8/src/base/numbers/double.h
+++ b/deps/v8/src/base/numbers/double.h
@@ -134,10 +134,9 @@ class Double {
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
DCHECK_GT(value(), 0.0);
DiyFp v = this->AsDiyFp();
- bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
- if (significand_is_zero && v.e() != kDenormalExponent) {
+ if ((AsUint64() & kSignificandMask) == 0 && v.e() != kDenormalExponent) {
// The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.
diff --git a/deps/v8/src/base/numbers/fast-dtoa.cc b/deps/v8/src/base/numbers/fast-dtoa.cc
index 7c0345a061..87b424c581 100644
--- a/deps/v8/src/base/numbers/fast-dtoa.cc
+++ b/deps/v8/src/base/numbers/fast-dtoa.cc
@@ -39,9 +39,9 @@ static const int kMaximalTargetExponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
-static bool RoundWeed(Vector<char> buffer, int length,
- uint64_t distance_too_high_w, uint64_t unsafe_interval,
- uint64_t rest, uint64_t ten_kappa, uint64_t unit) {
+static bool RoundWeed(char* last_digit, uint64_t distance_too_high_w,
+ uint64_t unsafe_interval, uint64_t rest,
+ uint64_t ten_kappa, uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
@@ -120,7 +120,7 @@ static bool RoundWeed(Vector<char> buffer, int length,
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high
small_distance - rest >= rest + ten_kappa - small_distance)) {
- buffer[length - 1]--;
+ --*last_digit;
rest += ten_kappa;
}
@@ -200,13 +200,62 @@ static const uint32_t kTen7 = 10000000;
static const uint32_t kTen8 = 100000000;
static const uint32_t kTen9 = 1000000000;
+struct DivMagic {
+ uint32_t mul;
+ uint32_t shift;
+};
+
+// This table was computed by libdivide. Essentially, the shift is
+// floor(log2(x)), and the mul is 2^(33 + shift) / x, rounded up and truncated
+// to 32 bits.
+static const DivMagic div[] = {
+ {0, 0}, // Not used, since 1 is not supported by the algorithm.
+ {0x9999999a, 3}, // 10
+ {0x47ae147b, 6}, // 100
+ {0x0624dd30, 9}, // 1000
+ {0xa36e2eb2, 13}, // 10000
+ {0x4f8b588f, 16}, // 100000
+ {0x0c6f7a0c, 19}, // 1000000
+ {0xad7f29ac, 23}, // 10000000
+ {0x5798ee24, 26} // 100000000
+};
+
+// Returns *val / divisor, and does *val %= divisor. d must be the DivMagic
+// corresponding to the divisor.
+//
+// This algorithm is exactly the same as libdivide's branch-free u32 algorithm,
+// except that we add back a branch anyway to support 1.
+//
+// GCC/Clang uses a slightly different algorithm that doesn't need
+// the extra rounding step (and that would allow us to do 1 without
+// a branch), but it requires a pre-shift for the case of 10000,
+// so it ends up slower, at least on x86-64.
+//
+// Note that this is actually a small loss for certain CPUs with
+// a very fast divider (e.g. Zen 3), but a significant win for most
+// others (including the entire Skylake family).
+static inline uint32_t fast_divmod(uint32_t* val, uint32_t divisor,
+ const DivMagic& d) {
+ if (divisor == 1) {
+ uint32_t digit = *val;
+ *val = 0;
+ return digit;
+ } else {
+ uint32_t q = (static_cast<uint64_t>(*val) * d.mul) >> 32;
+ uint32_t t = ((*val - q) >> 1) + q;
+ uint32_t digit = t >> d.shift;
+ *val -= digit * divisor;
+ return digit;
+ }
+}
+
// Returns the biggest power of ten that is less than or equal than the given
// number. We furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
// Precondition: number < (1 << (number_bits + 1)).
-static void BiggestPowerTen(uint32_t number, int number_bits, uint32_t* power,
- int* exponent) {
+static inline void BiggestPowerTen(uint32_t number, int number_bits,
+ uint32_t* power, unsigned* exponent) {
switch (number_bits) {
case 32:
case 31:
@@ -354,8 +403,8 @@ static void BiggestPowerTen(uint32_t number, int number_bits, uint32_t* power,
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
-static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
- int* length, int* kappa) {
+static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, char** outptr,
+ int* kappa) {
DCHECK(low.e() == w.e() && w.e() == high.e());
DCHECK(low.f() + 1 <= high.f() - 1);
DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
@@ -389,20 +438,18 @@ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divisor;
- int divisor_exponent;
+ unsigned divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), &divisor,
&divisor_exponent);
*kappa = divisor_exponent + 1;
- *length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
- int digit = integrals / divisor;
- buffer[*length] = '0' + digit;
- (*length)++;
- integrals %= divisor;
+ uint32_t digit = fast_divmod(&integrals, divisor, div[divisor_exponent]);
+ **outptr = '0' + digit;
+ (*outptr)++;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
@@ -413,11 +460,17 @@ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
if (rest < unsafe_interval.f()) {
// Rounding down (by not emitting the remaining digits) yields a number
// that lies within the unsafe interval.
- return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
+ return RoundWeed(*outptr - 1, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
static_cast<uint64_t>(divisor) << -one.e(), unit);
}
+ if (*kappa <= 0) {
+ // Don't bother doing the division below. (The compiler ought to
+ // figure this out itself, but it doesn't.)
+ break;
+ }
divisor /= 10;
+ --divisor_exponent;
}
// The integrals have been generated. We are at the point of the decimal
@@ -435,12 +488,12 @@ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
unsafe_interval.set_f(unsafe_interval.f() * 10);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
- buffer[*length] = '0' + digit;
- (*length)++;
+ **outptr = '0' + digit;
+ (*outptr)++;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
if (fractionals < unsafe_interval.f()) {
- return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
+ return RoundWeed(*outptr - 1, DiyFp::Minus(too_high, w).f() * unit,
unsafe_interval.f(), fractionals, one.f(), unit);
}
}
@@ -492,7 +545,7 @@ static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
// Modulo by one is an and.
uint64_t fractionals = w.f() & (one.f() - 1);
uint32_t divisor;
- int divisor_exponent;
+ unsigned divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), &divisor,
&divisor_exponent);
*kappa = divisor_exponent + 1;
@@ -503,16 +556,16 @@ static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than 'integrals'.
while (*kappa > 0) {
- int digit = integrals / divisor;
+ uint32_t digit = fast_divmod(&integrals, divisor, div[divisor_exponent]);
buffer[*length] = '0' + digit;
(*length)++;
requested_digits--;
- integrals %= divisor;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
if (requested_digits == 0) break;
divisor /= 10;
+ --divisor_exponent;
}
if (requested_digits == 0) {
@@ -559,8 +612,7 @@ static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
-static bool Grisu3(double v, Vector<char> buffer, int* length,
- int* decimal_exponent) {
+static bool Grisu3(double v, char** outptr, int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
@@ -610,7 +662,7 @@ static bool Grisu3(double v, Vector<char> buffer, int* length,
// decreased by 2.
int kappa;
bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
- buffer, length, &kappa);
+ outptr, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
@@ -665,15 +717,20 @@ bool FastDtoa(double v, FastDtoaMode mode, int requested_digits,
DCHECK(!Double(v).IsSpecial());
bool result = false;
+ char* outptr = buffer.data();
int decimal_exponent = 0;
switch (mode) {
case FAST_DTOA_SHORTEST:
- result = Grisu3(v, buffer, length, &decimal_exponent);
+ result = Grisu3(v, &outptr, &decimal_exponent);
+ *length = static_cast<int>(outptr - buffer.data());
break;
- case FAST_DTOA_PRECISION:
- result =
- Grisu3Counted(v, requested_digits, buffer, length, &decimal_exponent);
+ case FAST_DTOA_PRECISION: {
+ int local_length = 0;
+ result = Grisu3Counted(v, requested_digits, buffer, &local_length,
+ &decimal_exponent);
+ *length = local_length;
break;
+ }
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/base/platform/memory.h b/deps/v8/src/base/platform/memory.h
index 36d32eeb58..48b0d70ec1 100644
--- a/deps/v8/src/base/platform/memory.h
+++ b/deps/v8/src/base/platform/memory.h
@@ -126,8 +126,8 @@ inline size_t MallocUsableSize(void* ptr) {
// Mimics C++23 `allocation_result`.
template <class Pointer>
struct AllocationResult {
- Pointer ptr;
- size_t count;
+ Pointer ptr = nullptr;
+ size_t count = 0;
};
// Allocates at least `n * sizeof(T)` uninitialized storage but may allocate
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 9c9adda389..e6421f37ac 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -136,7 +136,7 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
-Stack::StackSlot Stack::GetStackStart() {
+Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
// pthread_getthrds_np creates 3 values:
// __pi_stackaddr, __pi_stacksize, __pi_stackend
diff --git a/deps/v8/src/base/platform/platform-darwin.cc b/deps/v8/src/base/platform/platform-darwin.cc
index bf360e3136..c563693d46 100644
--- a/deps/v8/src/base/platform/platform-darwin.cc
+++ b/deps/v8/src/base/platform/platform-darwin.cc
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Platform-specific code shared between macOS and iOS goes here. The macOS
-// specific part is in platform-macos.cc, the POSIX-compatible parts in
-// platform-posix.cc.
+// Platform-specific code shared between macOS and iOS goes here. The
+// POSIX-compatible parts in platform-posix.cc.
#include <AvailabilityMacros.h>
#include <dlfcn.h>
@@ -16,6 +15,7 @@
#include <mach/mach_init.h>
#include <mach/semaphore.h>
#include <mach/task.h>
+#include <mach/vm_map.h>
#include <mach/vm_statistics.h>
#include <pthread.h>
#include <semaphore.h>
@@ -39,23 +39,60 @@
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
+#if defined(V8_TARGET_OS_IOS)
+#include "src/base/ios-headers.h"
+#else
+#include <mach/mach_vm.h>
+#endif
+
namespace v8 {
namespace base {
+namespace {
+
+vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ case OS::MemoryPermission::kNoAccessWillJitLater:
+ return VM_PROT_NONE;
+ case OS::MemoryPermission::kRead:
+ return VM_PROT_READ;
+ case OS::MemoryPermission::kReadWrite:
+ return VM_PROT_READ | VM_PROT_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ case OS::MemoryPermission::kReadExecute:
+ return VM_PROT_READ | VM_PROT_EXECUTE;
+ }
+ UNREACHABLE();
+}
+
+kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
+ mach_vm_size_t size, int flags,
+ mach_port_t port,
+ memory_object_offset_t offset,
+ vm_prot_t prot) {
+ vm_prot_t current_prot = prot;
+ vm_prot_t maximum_prot = current_prot;
+ return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset,
+ FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
+}
+
+} // namespace
+
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
if (header == nullptr) continue;
+ unsigned long size;
#if V8_HOST_ARCH_I32
- unsigned int size;
- char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+ uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size);
#else
- uint64_t size;
- char* code_ptr = getsectdatafromheader_64(
- reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
- &size);
+ const mach_header_64* header64 =
+ reinterpret_cast<const mach_header_64*>(header);
+ uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size);
#endif
if (code_ptr == nullptr) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
@@ -99,9 +136,105 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
-Stack::StackSlot Stack::GetStackStart() {
+Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
return pthread_get_stackaddr_np(pthread_self());
}
+// static
+PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
+ mach_vm_size_t vm_size = size;
+ mach_port_t port;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(), &vm_size, 0,
+ MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
+ MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle;
+ return SharedMemoryHandleFromMachMemoryEntry(port);
+}
+
+// static
+void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
+ DCHECK_NE(kInvalidSharedMemoryHandle, handle);
+ mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
+ CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
+}
+
+// static
+void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
+ PlatformSharedMemoryHandle handle, uint64_t offset) {
+ DCHECK_EQ(0, size % AllocatePageSize());
+
+ mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(hint);
+ vm_prot_t prot = GetVMProtFromMemoryPermission(access);
+ mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
+ kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
+ shared_mem_port, offset, prot);
+
+ if (kr != KERN_SUCCESS) {
+ // Retry without hint.
+ kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
+ offset, prot);
+ }
+
+ if (kr != KERN_SUCCESS) return nullptr;
+ return reinterpret_cast<void*>(addr);
+}
+
+// static
+bool OS::RemapPages(const void* address, size_t size, void* new_address,
+ MemoryPermission access) {
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(address), AllocatePageSize()));
+ DCHECK(
+ IsAligned(reinterpret_cast<uintptr_t>(new_address), AllocatePageSize()));
+ DCHECK(IsAligned(size, AllocatePageSize()));
+
+ vm_prot_t cur_protection = GetVMProtFromMemoryPermission(access);
+ vm_prot_t max_protection;
+ // Asks the kernel to remap *on top* of an existing mapping, rather than
+ // copying the data.
+ int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
+ mach_vm_address_t target = reinterpret_cast<mach_vm_address_t>(new_address);
+ kern_return_t ret =
+ mach_vm_remap(mach_task_self(), &target, size, 0, flags, mach_task_self(),
+ reinterpret_cast<mach_vm_address_t>(address), FALSE,
+ &cur_protection, &max_protection, VM_INHERIT_NONE);
+
+ if (ret != KERN_SUCCESS) return false;
+
+ // Did we get the address we wanted?
+ CHECK_EQ(new_address, reinterpret_cast<void*>(target));
+
+ return true;
+}
+
+bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
+ OS::MemoryPermission access,
+ PlatformSharedMemoryHandle handle,
+ uint64_t offset) {
+ DCHECK(Contains(address, size));
+
+ vm_prot_t prot = GetVMProtFromMemoryPermission(access);
+ mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(address);
+ mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
+ kern_return_t kr =
+ mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ shared_mem_port, offset, prot);
+ return kr == KERN_SUCCESS;
+}
+
+// See platform-ios.cc for the iOS implementation.
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT && !defined(V8_OS_IOS)
+// Ignoring this warning is considered better than relying on
+// __builtin_available.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability-new"
+
+V8_BASE_EXPORT void SetJitWriteProtected(int enable) {
+ pthread_jit_write_protect_np(enable);
+}
+
+#pragma clang diagnostic pop
+#endif
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 55d600283f..4314b10709 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -104,7 +104,7 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
-Stack::StackSlot Stack::GetStackStart() {
+Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
pthread_attr_t attr;
int error;
pthread_attr_init(&attr);
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 2659336b3b..885bffa340 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -293,9 +293,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
}
void OS::SetDataReadOnly(void* address, size_t size) {
- // TODO(v8:13194): Figure out which API to use on fuchsia. {vmar.protect}
- // fails.
- // CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead));
+ CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead));
}
// static
diff --git a/deps/v8/src/base/platform/platform-ios.cc b/deps/v8/src/base/platform/platform-ios.cc
new file mode 100644
index 0000000000..f0fc18648d
--- /dev/null
+++ b/deps/v8/src/base/platform/platform-ios.cc
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/base-export.h"
+#include "src/base/build_config.h"
+
+// pthread_jit_write_protect_np is marked as not available in the iOS
+// SDK but it is there for the iOS simulator. So we provide a thunk
+// and a forward declaration in a compilation target that doesn't
+// include pthread.h to avoid the compiler error.
+extern "C" void pthread_jit_write_protect_np(int enable);
+
+namespace v8::base {
+
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT && defined(V8_OS_IOS)
+V8_BASE_EXPORT void SetJitWriteProtected(int enable) {
+ pthread_jit_write_protect_np(enable);
+}
+#endif
+
+} // namespace v8::base
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
deleted file mode 100644
index a97295b4cc..0000000000
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for MacOS goes here. Code shared between iOS and
-// macOS is in platform-darwin.cc, while the POSIX-compatible are in in
-// platform-posix.cc.
-
-#include <mach/mach.h>
-#include <mach/mach_vm.h>
-#include <mach/vm_map.h>
-
-#include "src/base/platform/platform.h"
-
-namespace v8 {
-namespace base {
-
-namespace {
-
-vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) {
- switch (access) {
- case OS::MemoryPermission::kNoAccess:
- case OS::MemoryPermission::kNoAccessWillJitLater:
- return VM_PROT_NONE;
- case OS::MemoryPermission::kRead:
- return VM_PROT_READ;
- case OS::MemoryPermission::kReadWrite:
- return VM_PROT_READ | VM_PROT_WRITE;
- case OS::MemoryPermission::kReadWriteExecute:
- return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
- case OS::MemoryPermission::kReadExecute:
- return VM_PROT_READ | VM_PROT_EXECUTE;
- }
- UNREACHABLE();
-}
-
-kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
- mach_vm_size_t size, int flags,
- mach_port_t port,
- memory_object_offset_t offset,
- vm_prot_t prot) {
- vm_prot_t current_prot = prot;
- vm_prot_t maximum_prot = current_prot;
- return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset,
- FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
-}
-
-} // namespace
-
-// static
-PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
- mach_vm_size_t vm_size = size;
- mach_port_t port;
- kern_return_t kr = mach_make_memory_entry_64(
- mach_task_self(), &vm_size, 0,
- MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
- MACH_PORT_NULL);
- if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle;
- return SharedMemoryHandleFromMachMemoryEntry(port);
-}
-
-// static
-void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
- DCHECK_NE(kInvalidSharedMemoryHandle, handle);
- mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
- CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
-}
-
-// static
-void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
- PlatformSharedMemoryHandle handle, uint64_t offset) {
- DCHECK_EQ(0, size % AllocatePageSize());
-
- mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(hint);
- vm_prot_t prot = GetVMProtFromMemoryPermission(access);
- mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
- kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
- shared_mem_port, offset, prot);
-
- if (kr != KERN_SUCCESS) {
- // Retry without hint.
- kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
- offset, prot);
- }
-
- if (kr != KERN_SUCCESS) return nullptr;
- return reinterpret_cast<void*>(addr);
-}
-
-// static
-bool OS::RemapPages(const void* address, size_t size, void* new_address,
- MemoryPermission access) {
- DCHECK(IsAligned(reinterpret_cast<uintptr_t>(address), AllocatePageSize()));
- DCHECK(
- IsAligned(reinterpret_cast<uintptr_t>(new_address), AllocatePageSize()));
- DCHECK(IsAligned(size, AllocatePageSize()));
-
- vm_prot_t cur_protection = GetVMProtFromMemoryPermission(access);
- vm_prot_t max_protection;
- // Asks the kernel to remap *on top* of an existing mapping, rather than
- // copying the data.
- int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
- mach_vm_address_t target = reinterpret_cast<mach_vm_address_t>(new_address);
- kern_return_t ret =
- mach_vm_remap(mach_task_self(), &target, size, 0, flags, mach_task_self(),
- reinterpret_cast<mach_vm_address_t>(address), FALSE,
- &cur_protection, &max_protection, VM_INHERIT_NONE);
-
- if (ret != KERN_SUCCESS) return false;
-
- // Did we get the address we wanted?
- CHECK_EQ(new_address, reinterpret_cast<void*>(target));
-
- return true;
-}
-
-bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
- OS::MemoryPermission access,
- PlatformSharedMemoryHandle handle,
- uint64_t offset) {
- DCHECK(Contains(address, size));
-
- vm_prot_t prot = GetVMProtFromMemoryPermission(access);
- mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(address);
- mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
- kern_return_t kr =
- mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
- shared_mem_port, offset, prot);
- return kr == KERN_SUCCESS;
-}
-
-} // namespace base
-} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index d37b6219d4..40e870a346 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -444,8 +444,8 @@ void OS::Free(void* address, size_t size) {
CHECK_EQ(0, munmap(address, size));
}
-// macOS specific implementation in platform-macos.cc.
-#if !defined(V8_OS_MACOS)
+// Darwin specific implementation in platform-darwin.cc.
+#if !defined(V8_OS_DARWIN)
// static
void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
PlatformSharedMemoryHandle handle, uint64_t offset) {
@@ -456,7 +456,7 @@ void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
if (result == MAP_FAILED) return nullptr;
return result;
}
-#endif // !defined(V8_OS_MACOS)
+#endif // !defined(V8_OS_DARWIN)
// static
void OS::FreeShared(void* address, size_t size) {
@@ -505,8 +505,9 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
// https://crbug.com/823915
#if defined(V8_OS_DARWIN)
- if (access != OS::MemoryPermission::kNoAccess)
+ if (access != OS::MemoryPermission::kNoAccess) {
madvise(address, size, MADV_FREE_REUSE);
+ }
#endif
return ret == 0;
@@ -554,14 +555,19 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
#elif defined(_AIX) || defined(V8_OS_SOLARIS)
int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
- if (ret != 0 && errno == ENOSYS)
+ if (ret != 0 && errno == ENOSYS) {
return true; // madvise is not available on all systems.
- if (ret != 0 && errno == EINVAL)
+ }
+ if (ret != 0 && errno == EINVAL) {
ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
+ }
#else
int ret = madvise(address, size, MADV_DONTNEED);
#endif
- return ret == 0;
+ // madvise with MADV_DONTNEED only fails on illegal parameters. That's a bug
+ // in the caller.
+ CHECK_EQ(0, ret);
+ return true;
}
#if !defined(_AIX)
@@ -576,9 +582,14 @@ bool OS::DecommitPages(void* address, size_t size) {
// shall be removed, as if by an appropriate call to munmap(), before the new
// mapping is established." As a consequence, the memory will be
// zero-initialized on next access.
- void* ptr = mmap(address, size, PROT_NONE,
+ void* ret = mmap(address, size, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- return ptr == address;
+ if (V8_UNLIKELY(ret == MAP_FAILED)) {
+ CHECK_EQ(ENOMEM, errno);
+ return false;
+ }
+ CHECK_EQ(ret, address);
+ return true;
}
#endif // !defined(_AIX)
@@ -612,8 +623,8 @@ void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
Free(reservation.base(), reservation.size());
}
-// macOS specific implementation in platform-macos.cc.
-#if !defined(V8_OS_MACOS)
+// Darwin specific implementation in platform-darwin.cc.
+#if !defined(V8_OS_DARWIN)
// static
// Need to disable CFI_ICALL due to the indirect call to memfd_create.
DISABLE_CFI_ICALL
@@ -646,7 +657,7 @@ void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
int fd = FileDescriptorFromSharedMemoryHandle(handle);
CHECK_EQ(0, close(fd));
}
-#endif // !defined(V8_OS_MACOS)
+#endif // !defined(V8_OS_DARWIN)
// static
bool OS::HasLazyCommits() {
@@ -1004,8 +1015,8 @@ bool AddressSpaceReservation::Free(void* address, size_t size) {
return OS::DecommitPages(address, size);
}
-// macOS specific implementation in platform-macos.cc.
-#if !defined(V8_OS_MACOS)
+// Darwin specific implementation in platform-darwin.cc.
+#if !defined(V8_OS_DARWIN)
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
@@ -1016,7 +1027,7 @@ bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
return mmap(address, size, prot, MAP_SHARED | MAP_FIXED, fd, offset) !=
MAP_FAILED;
}
-#endif // !defined(V8_OS_MACOS)
+#endif // !defined(V8_OS_DARWIN)
bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
DCHECK(Contains(address, size));
@@ -1230,7 +1241,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
!defined(V8_OS_SOLARIS)
// static
-Stack::StackSlot Stack::GetStackStart() {
+Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
pthread_attr_t attr;
int error = pthread_getattr_np(pthread_self(), &attr);
if (!error) {
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 8f5dd0c9f1..6ff02e3278 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -72,7 +72,7 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
-Stack::StackSlot Stack::GetStackStart() {
+Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
pthread_attr_t attr;
int error;
pthread_attr_init(&attr);
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index ac44c70e2c..280fe18f7f 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -1022,7 +1022,7 @@ void OS::SetDataReadOnly(void* address, size_t size) {
unsigned long old_protection;
CHECK(VirtualProtect(address, size, PAGE_READONLY, &old_protection));
- CHECK_EQ(PAGE_READWRITE, old_protection);
+ CHECK(old_protection == PAGE_READWRITE || old_protection == PAGE_WRITECOPY);
}
// static
@@ -1739,7 +1739,7 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
-Stack::StackSlot Stack::GetStackStart() {
+Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
#if defined(V8_TARGET_ARCH_X64)
return reinterpret_cast<void*>(
reinterpret_cast<NT_TIB64*>(NtCurrentTeb())->StackBase);
@@ -1753,7 +1753,7 @@ Stack::StackSlot Stack::GetStackStart() {
::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
return reinterpret_cast<void*>(highLimit);
#else
-#error Unsupported GetStackStart.
+#error Unsupported ObtainCurrentThreadStackStart.
#endif
}
diff --git a/deps/v8/src/base/platform/platform.cc b/deps/v8/src/base/platform/platform.cc
new file mode 100644
index 0000000000..3914b7daba
--- /dev/null
+++ b/deps/v8/src/base/platform/platform.cc
@@ -0,0 +1,29 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+
+namespace base {
+
+namespace {
+
+// A pointer to current thread's stack beginning.
+thread_local void* thread_stack_start = nullptr;
+
+} // namespace
+
+// static
+Stack::StackSlot Stack::GetStackStart() {
+ DCHECK_IMPLIES(thread_stack_start,
+ thread_stack_start == ObtainCurrentThreadStackStart());
+
+ if (!thread_stack_start) {
+ thread_stack_start = ObtainCurrentThreadStackStart();
+ }
+ return thread_stack_start;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 88d35540b1..27369a7a8d 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -316,7 +316,7 @@ class V8_BASE_EXPORT OS {
// Whether the platform supports mapping a given address in another location
// in the address space.
V8_WARN_UNUSED_RESULT static constexpr bool IsRemapPageSupported() {
-#if (defined(V8_OS_MACOS) || defined(V8_OS_LINUX)) && \
+#if (defined(V8_OS_DARWIN) || defined(V8_OS_LINUX)) && \
!(defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390X))
return true;
#else
@@ -654,14 +654,22 @@ class V8_BASE_EXPORT Stack {
constexpr size_t kAsanRealFrameOffsetBytes = 32;
void* real_frame = __asan_addr_is_in_fake_stack(
__asan_get_current_fake_stack(), slot, nullptr, nullptr);
- return real_frame
- ? (static_cast<char*>(real_frame) + kAsanRealFrameOffsetBytes)
- : slot;
+ return real_frame ? StackSlot(static_cast<char*>(real_frame) +
+ kAsanRealFrameOffsetBytes)
+ : slot;
#endif // V8_USE_ADDRESS_SANITIZER
return slot;
}
+
+ private:
+ // Returns the current thread stack start pointer.
+ static Stack::StackSlot ObtainCurrentThreadStackStart();
};
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+V8_BASE_EXPORT void SetJitWriteProtected(int enable);
+#endif
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index f5f418947b..b6da0a690c 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -705,6 +705,17 @@ TimeTicks InitialTimeTicksNowFunction() {
return g_time_ticks_now_function();
}
+#if V8_HOST_ARCH_ARM64
+// From MSDN, FILETIME "Contains a 64-bit value representing the number of
+// 100-nanosecond intervals since January 1, 1601 (UTC)."
+int64_t FileTimeToMicroseconds(const FILETIME& ft) {
+ // Need to bit_cast to fix alignment, then divide by 10 to convert
+ // 100-nanoseconds to microseconds. This only works on little-endian
+ // machines.
+ return bit_cast<int64_t, FILETIME>(ft) / 10;
+}
+#endif
+
} // namespace
// static
@@ -822,6 +833,20 @@ ThreadTicks ThreadTicks::Now() {
ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
DCHECK(IsSupported());
+#if V8_HOST_ARCH_ARM64
+ // QueryThreadCycleTime versus TSCTicksPerSecond doesn't have much relation to
+ // actual elapsed time on Windows on Arm, because QueryThreadCycleTime is
+ // backed by the actual number of CPU cycles executed, rather than a
+ // constant-rate timer like Intel. To work around this, use GetThreadTimes
+ // (which isn't as accurate but is meaningful as a measure of elapsed
+ // per-thread time).
+ FILETIME creation_time, exit_time, kernel_time, user_time;
+ ::GetThreadTimes(thread_handle, &creation_time, &exit_time, &kernel_time,
+ &user_time);
+
+ int64_t us = FileTimeToMicroseconds(user_time);
+ return ThreadTicks(us);
+#else
// Get the number of TSC ticks used by the current thread.
ULONG64 thread_cycle_time = 0;
::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
@@ -835,6 +860,7 @@ ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
return ThreadTicks(
static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
+#endif
}
// static
@@ -845,16 +871,12 @@ bool ThreadTicks::IsSupportedWin() {
// static
void ThreadTicks::WaitUntilInitializedWin() {
- while (TSCTicksPerSecond() == 0)
- ::Sleep(10);
-}
-
-#ifdef V8_HOST_ARCH_ARM64
-#define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
-#else
-#define ReadCycleCounter() __rdtsc()
+#ifndef V8_HOST_ARCH_ARM64
+ while (TSCTicksPerSecond() == 0) ::Sleep(10);
#endif
+}
+#ifndef V8_HOST_ARCH_ARM64
double ThreadTicks::TSCTicksPerSecond() {
DCHECK(IsSupported());
@@ -875,12 +897,12 @@ double ThreadTicks::TSCTicksPerSecond() {
// The first time that this function is called, make an initial reading of the
// TSC and the performance counter.
- static const uint64_t tsc_initial = ReadCycleCounter();
+ static const uint64_t tsc_initial = __rdtsc();
static const uint64_t perf_counter_initial = QPCNowRaw();
// Make a another reading of the TSC and the performance counter every time
// that this function is called.
- uint64_t tsc_now = ReadCycleCounter();
+ uint64_t tsc_now = __rdtsc();
uint64_t perf_counter_now = QPCNowRaw();
// Reset the thread priority.
@@ -913,7 +935,7 @@ double ThreadTicks::TSCTicksPerSecond() {
return tsc_ticks_per_second;
}
-#undef ReadCycleCounter
+#endif // !defined(V8_HOST_ARCH_ARM64)
#endif // V8_OS_WIN
} // namespace base
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 0820c6f108..9a41d49b2c 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -91,10 +91,10 @@ class V8_BASE_EXPORT TimeDelta final {
return TimeDelta(nanoseconds / TimeConstants::kNanosecondsPerMicrosecond);
}
- static TimeDelta FromSecondsD(double seconds) {
+ static constexpr TimeDelta FromSecondsD(double seconds) {
return FromDouble(seconds * TimeConstants::kMicrosecondsPerSecond);
}
- static TimeDelta FromMillisecondsD(double milliseconds) {
+ static constexpr TimeDelta FromMillisecondsD(double milliseconds) {
return FromDouble(milliseconds *
TimeConstants::kMicrosecondsPerMillisecond);
}
@@ -210,8 +210,7 @@ class V8_BASE_EXPORT TimeDelta final {
}
private:
- // TODO(v8:10620): constexpr requires constexpr saturated_cast.
- static inline TimeDelta FromDouble(double value);
+ static constexpr inline TimeDelta FromDouble(double value);
template<class TimeClass> friend class time_internal::TimeBase;
// Constructs a delta given the duration in microseconds. This is private
@@ -224,7 +223,7 @@ class V8_BASE_EXPORT TimeDelta final {
};
// static
-TimeDelta TimeDelta::FromDouble(double value) {
+constexpr TimeDelta TimeDelta::FromDouble(double value) {
return TimeDelta(saturated_cast<int64_t>(value));
}
@@ -494,9 +493,15 @@ class V8_BASE_EXPORT ThreadTicks final
explicit constexpr ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
#if V8_OS_WIN
+#if V8_HOST_ARCH_ARM64
+ // TSCTicksPerSecond is not supported on Windows on Arm systems because the
+ // cycle-counting methods use the actual CPU cycle count, and not a consistent
+ // incrementing counter.
+#else
// Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
// been measured yet. Needs to be guarded with a call to IsSupported().
static double TSCTicksPerSecond();
+#endif
static bool IsSupportedWin();
static void WaitUntilInitializedWin();
#endif
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index eb65f8c930..44085e8665 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -35,13 +35,19 @@ class SmallVector {
: allocator_(allocator) {
resize_no_init(size);
}
- SmallVector(const SmallVector& other,
- const Allocator& allocator = Allocator()) V8_NOEXCEPT
+ SmallVector(const SmallVector& other) V8_NOEXCEPT
+ : allocator_(other.allocator_) {
+ *this = other;
+ }
+ SmallVector(const SmallVector& other, const Allocator& allocator) V8_NOEXCEPT
: allocator_(allocator) {
*this = other;
}
- SmallVector(SmallVector&& other,
- const Allocator& allocator = Allocator()) V8_NOEXCEPT
+ SmallVector(SmallVector&& other) V8_NOEXCEPT
+ : allocator_(std::move(other.allocator_)) {
+ *this = std::move(other);
+ }
+ SmallVector(SmallVector&& other, const Allocator& allocator) V8_NOEXCEPT
: allocator_(allocator) {
*this = std::move(other);
}
@@ -81,13 +87,13 @@ class SmallVector {
begin_ = other.begin_;
end_ = other.end_;
end_of_storage_ = other.end_of_storage_;
- other.reset_to_inline_storage();
} else {
DCHECK_GE(capacity(), other.size()); // Sanity check.
size_t other_size = other.size();
memcpy(begin_, other.begin_, sizeof(T) * other_size);
end_ = begin_ + other_size;
}
+ other.reset_to_inline_storage();
return *this;
}
@@ -110,6 +116,15 @@ class SmallVector {
bool empty() const { return end_ == begin_; }
size_t capacity() const { return end_of_storage_ - begin_; }
+ T& front() {
+ DCHECK_NE(0, size());
+ return begin_[0];
+ }
+ const T& front() const {
+ DCHECK_NE(0, size());
+ return begin_[0];
+ }
+
T& back() {
DCHECK_NE(0, size());
return end_[-1];
@@ -133,10 +148,10 @@ class SmallVector {
template <typename... Args>
void emplace_back(Args&&... args) {
- T* end = end_;
- if (V8_UNLIKELY(end == end_of_storage_)) end = Grow();
- new (end) T(std::forward<Args>(args)...);
- end_ = end + 1;
+ if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
+ void* storage = end_;
+ end_ += 1;
+ new (storage) T(std::forward<Args>(args)...);
}
void push_back(T x) { emplace_back(std::move(x)); }
@@ -146,6 +161,30 @@ class SmallVector {
end_ -= count;
}
+ T* insert(T* pos, const T& value) { return insert(pos, 1, value); }
+ T* insert(T* pos, size_t count, const T& value) {
+ DCHECK_LE(pos, end_);
+ size_t offset = pos - begin_;
+ T* old_end = end_;
+ resize_no_init(size() + count);
+ pos = begin_ + offset;
+ std::move_backward(pos, old_end, end_);
+ std::fill_n(pos, count, value);
+ return pos;
+ }
+ template <typename It>
+ T* insert(T* pos, It begin, It end) {
+ DCHECK_LE(pos, end_);
+ size_t offset = pos - begin_;
+ size_t count = std::distance(begin, end);
+ T* old_end = end_;
+ resize_no_init(size() + count);
+ pos = begin_ + offset;
+ std::move_backward(pos, old_end, end_);
+ std::copy(begin, end, pos);
+ return pos;
+ }
+
void resize_no_init(size_t new_size) {
// Resizing without initialization is safe if T is trivially copyable.
ASSERT_TRIVIALLY_COPYABLE(T);
@@ -156,21 +195,15 @@ class SmallVector {
// Clear without reverting back to inline storage.
void clear() { end_ = begin_; }
- private:
- V8_NO_UNIQUE_ADDRESS Allocator allocator_;
-
- T* begin_ = inline_storage_begin();
- T* end_ = begin_;
- T* end_of_storage_ = begin_ + kInlineSize;
- typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
- inline_storage_;
+ Allocator get_allocator() const { return allocator_; }
+ private:
// Grows the backing store by a factor of two. Returns the new end of the used
// storage (this reduces binary size).
- V8_NOINLINE T* Grow() { return Grow(0); }
+ V8_NOINLINE V8_PRESERVE_MOST void Grow() { Grow(0); }
// Grows the backing store by a factor of two, and at least to {min_capacity}.
- V8_NOINLINE T* Grow(size_t min_capacity) {
+ V8_NOINLINE V8_PRESERVE_MOST void Grow(size_t min_capacity) {
size_t in_use = end_ - begin_;
size_t new_capacity =
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
@@ -187,14 +220,13 @@ class SmallVector {
begin_ = new_storage;
end_ = new_storage + in_use;
end_of_storage_ = new_storage + new_capacity;
- return end_;
}
T* AllocateDynamicStorage(size_t number_of_elements) {
return allocator_.allocate(number_of_elements);
}
- void FreeDynamicStorage() {
+ V8_NOINLINE V8_PRESERVE_MOST void FreeDynamicStorage() {
DCHECK(is_big());
allocator_.deallocate(begin_, end_of_storage_ - begin_);
}
@@ -213,6 +245,14 @@ class SmallVector {
const T* inline_storage_begin() const {
return reinterpret_cast<const T*>(&inline_storage_);
}
+
+ V8_NO_UNIQUE_ADDRESS Allocator allocator_;
+
+ T* begin_ = inline_storage_begin();
+ T* end_ = begin_;
+ T* end_of_storage_ = begin_ + kInlineSize;
+ typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
+ inline_storage_;
};
} // namespace base
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 143aa4ae89..3d45c8571c 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -127,5 +127,21 @@ int64_t SysInfo::AmountOfVirtualMemory() {
#endif
}
+// static
+uintptr_t SysInfo::AddressSpaceEnd() {
+#if V8_OS_WIN
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ uintptr_t max_address =
+ reinterpret_cast<uintptr_t>(info.lpMaximumApplicationAddress);
+ return max_address + 1;
+#else
+ // We don't query POSIX rlimits here (e.g. RLIMIT_AS) as they limit the size
+ // of memory mappings, but not the address space (e.g. even with a small
+ // RLIMIT_AS, a process can still map pages at high addresses).
+ return std::numeric_limits<uintptr_t>::max();
+#endif
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/sys-info.h b/deps/v8/src/base/sys-info.h
index 772f44336f..6865207cb3 100644
--- a/deps/v8/src/base/sys-info.h
+++ b/deps/v8/src/base/sys-info.h
@@ -24,6 +24,12 @@ class V8_BASE_EXPORT SysInfo final {
// Returns the number of bytes of virtual memory of this process. A return
// value of zero means that there is no limit on the available virtual memory.
static int64_t AmountOfVirtualMemory();
+
+ // Returns the end of the virtual address space available to this process.
+ // Memory mappings at or above this address cannot be addressed by this
+ // process, so all pointer values will be below this value.
+ // If the virtual address space is not limited, this will return -1.
+ static uintptr_t AddressSpaceEnd();
};
} // namespace base
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index 3394b60f7a..30c4785777 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -60,6 +60,32 @@ struct has_output_operator<
T, TStream, decltype(void(std::declval<TStream&>() << std::declval<T>()))>
: std::true_type {};
+// Turn std::tuple<A...> into std::tuple<A..., T>.
+template <class Tuple, class T>
+using append_tuple_type = decltype(std::tuple_cat(
+ std::declval<Tuple>(), std::declval<std::tuple<T>>()));
+
+// Turn std::tuple<A...> into std::tuple<T, A...>.
+template <class T, class Tuple>
+using prepend_tuple_type = decltype(std::tuple_cat(
+ std::declval<std::tuple<T>>(), std::declval<Tuple>()));
+
+namespace detail {
+
+template <size_t N, typename T, size_t... Ints>
+auto tuple_drop_impl(const T& tpl, std::index_sequence<Ints...>) {
+ return std::tuple{std::get<N + Ints>(tpl)...};
+}
+
+} // namespace detail
+
+// Drop the first N elements from a tuple.
+template <size_t N, typename T>
+auto tuple_drop(const T& tpl) {
+ return detail::tuple_drop_impl<N>(
+ tpl, std::make_index_sequence<std::tuple_size_v<T> - N>());
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
index 0931d3f65c..0e9fbae5c6 100644
--- a/deps/v8/src/base/threaded-list.h
+++ b/deps/v8/src/base/threaded-list.h
@@ -261,6 +261,28 @@ class ThreadedListBase final : public BaseClass {
}
}
+ // Removes the element at `it`, and returns a new iterator pointing to the
+ // element following the removed element (if `it` was pointing to the last
+ // element, then `end()` is returned). The head and the tail are updated. `it`
+ // should not be `end()`. Iterators that are currently on the same element as
+ // `it` are invalidated. Other iterators are not affected. If the last element
+ // is removed, existing `end()` iterators will be invalidated.
+ Iterator RemoveAt(Iterator it) {
+ if (*it.entry_ == head_) {
+ DropHead();
+ return begin();
+ } else if (tail_ == TLTraits::next(*it.entry_)) {
+ tail_ = it.entry_;
+ *it.entry_ = nullptr;
+ return end();
+ } else {
+ T* old_entry = *it.entry_;
+ *it.entry_ = *TLTraits::next(*it.entry_);
+ *TLTraits::next(old_entry) = nullptr;
+ return Iterator(it.entry_);
+ }
+ }
+
bool is_empty() const { return head_ == nullptr; }
T* first() const { return head_; }
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 9a48848005..4811a9f06c 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -129,6 +129,14 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
static uint64_t MurmurHash3(uint64_t);
+ // Implement the UniformRandomBitGenerator interface.
+ using result_type = unsigned;
+ result_type operator()() { return NextInt(); }
+ static constexpr result_type min() { return 0; }
+ static constexpr result_type max() {
+ return std::numeric_limits<result_type>::max();
+ }
+
private:
static const int64_t kMultiplier = 0x5'deec'e66d;
static const int64_t kAddend = 0xb;
diff --git a/deps/v8/src/base/vector.h b/deps/v8/src/base/vector.h
index 1f5e103d4c..96c72be2c1 100644
--- a/deps/v8/src/base/vector.h
+++ b/deps/v8/src/base/vector.h
@@ -142,8 +142,8 @@ class Vector {
static Vector<T> cast(Vector<S> input) {
// Casting is potentially dangerous, so be really restrictive here. This
// might be lifted once we have use cases for that.
- static_assert(std::is_pod<S>::value);
- static_assert(std::is_pod<T>::value);
+ static_assert(std::is_trivial_v<S> && std::is_standard_layout_v<S>);
+ static_assert(std::is_trivial_v<T> && std::is_standard_layout_v<T>);
DCHECK_EQ(0, (input.size() * sizeof(S)) % sizeof(T));
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(input.begin()) % alignof(T));
return Vector<T>(reinterpret_cast<T*>(input.begin()),
@@ -193,22 +193,40 @@ class V8_NODISCARD ScopedVector : public Vector<T> {
template <typename T>
class OwnedVector {
public:
- MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(OwnedVector);
+ OwnedVector() = default;
+
OwnedVector(std::unique_ptr<T[]> data, size_t length)
: data_(std::move(data)), length_(length) {
DCHECK_IMPLIES(length_ > 0, data_ != nullptr);
}
- // Implicit conversion from {OwnedVector<U>} to {OwnedVector<T>}, instantiable
- // if {std::unique_ptr<U>} can be converted to {std::unique_ptr<T>}.
- // Can be used to convert {OwnedVector<T>} to {OwnedVector<const T>}.
+ // Disallow copying.
+ OwnedVector(const OwnedVector&) = delete;
+ OwnedVector& operator=(const OwnedVector&) = delete;
+
+ // Move construction and move assignment from {OwnedVector<U>} to
+ // {OwnedVector<T>}, instantiable if {std::unique_ptr<U>} can be converted to
+ // {std::unique_ptr<T>}. Can also be used to convert {OwnedVector<T>} to
+ // {OwnedVector<const T>}.
+ // These also function as the standard move construction/assignment operator.
+ // {other} is left as an empty vector.
template <typename U,
typename = typename std::enable_if<std::is_convertible<
std::unique_ptr<U>, std::unique_ptr<T>>::value>::type>
- OwnedVector(OwnedVector<U>&& other)
- : data_(std::move(other.data_)), length_(other.length_) {
+ OwnedVector(OwnedVector<U>&& other) V8_NOEXCEPT {
+ *this = std::move(other);
+ }
+
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<
+ std::unique_ptr<U>, std::unique_ptr<T>>::value>::type>
+ OwnedVector& operator=(OwnedVector<U>&& other) V8_NOEXCEPT {
static_assert(sizeof(U) == sizeof(T));
+ data_ = std::move(other.data_);
+ length_ = other.length_;
+ DCHECK_NULL(other.data_);
other.length_ = 0;
+ return *this;
}
// Returns the length of the vector as a size_t.
@@ -217,14 +235,16 @@ class OwnedVector {
// Returns whether or not the vector is empty.
constexpr bool empty() const { return length_ == 0; }
- // Returns the pointer to the start of the data in the vector.
- T* start() const {
+ constexpr T* begin() const {
DCHECK_IMPLIES(length_ > 0, data_ != nullptr);
return data_.get();
}
- constexpr T* begin() const { return start(); }
- constexpr T* end() const { return start() + size(); }
+ constexpr T* end() const { return begin() + length_; }
+
+ // In addition to {begin}, do provide a {data()} accessor for API
+ // compatibility with other sequential containers.
+ constexpr T* data() const { return begin(); }
// Access individual vector elements - checks bounds in debug mode.
T& operator[](size_t index) const {
@@ -233,7 +253,7 @@ class OwnedVector {
}
// Returns a {Vector<T>} view of the data in this vector.
- Vector<T> as_vector() const { return Vector<T>(start(), size()); }
+ Vector<T> as_vector() const { return {begin(), size()}; }
// Releases the backing data from this vector and transfers ownership to the
// caller. This vector will be empty afterwards.
@@ -269,7 +289,7 @@ class OwnedVector {
using non_const_t = typename std::remove_const<T>::type;
auto vec =
OwnedVector<non_const_t>::NewForOverwrite(std::distance(begin, end));
- std::copy(begin, end, vec.start());
+ std::copy(begin, end, vec.begin());
return vec;
}
diff --git a/deps/v8/src/base/vlq.h b/deps/v8/src/base/vlq.h
index 25dba27bfb..f17652bb0c 100644
--- a/deps/v8/src/base/vlq.h
+++ b/deps/v8/src/base/vlq.h
@@ -39,6 +39,16 @@ VLQEncodeUnsigned(Function&& process_byte, uint32_t value) {
} while (value > kDataMask);
}
+inline uint32_t VLQConvertToUnsigned(int32_t value) {
+ // This wouldn't handle kMinInt correctly if it ever encountered it.
+ DCHECK_NE(value, std::numeric_limits<int32_t>::min());
+ bool is_negative = value < 0;
+ // Encode sign in least significant bit.
+ uint32_t bits = static_cast<uint32_t>((is_negative ? -value : value) << 1) |
+ static_cast<uint32_t>(is_negative);
+ return bits;
+}
+
// Encodes value using variable-length encoding and stores it using the passed
// process_byte function.
template <typename Function>
@@ -46,12 +56,7 @@ inline typename std::enable_if<
std::is_same<decltype(std::declval<Function>()(0)), byte*>::value,
void>::type
VLQEncode(Function&& process_byte, int32_t value) {
- // This wouldn't handle kMinInt correctly if it ever encountered it.
- DCHECK_NE(value, std::numeric_limits<int32_t>::min());
- bool is_negative = value < 0;
- // Encode sign in least significant bit.
- uint32_t bits = static_cast<uint32_t>((is_negative ? -value : value) << 1) |
- static_cast<uint32_t>(is_negative);
+ uint32_t bits = VLQConvertToUnsigned(value);
VLQEncodeUnsigned(std::forward<Function>(process_byte), bits);
}
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index 5cb855e416..b63499c85c 100644
--- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -39,35 +39,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual = static_cast<uint32_t>(eq),
- kNotEqual = static_cast<uint32_t>(ne),
-
- kLessThan = static_cast<uint32_t>(lt),
- kGreaterThan = static_cast<uint32_t>(gt),
- kLessThanEqual = static_cast<uint32_t>(le),
- kGreaterThanEqual = static_cast<uint32_t>(ge),
-
- kUnsignedLessThan = static_cast<uint32_t>(lo),
- kUnsignedGreaterThan = static_cast<uint32_t>(hi),
- kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
- kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
-
- kOverflow = static_cast<uint32_t>(vs),
- kNoOverflow = static_cast<uint32_t>(vc),
-
- kZero = static_cast<uint32_t>(eq),
- kNotZero = static_cast<uint32_t>(ne),
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- // This is important for arm, where the internal::Condition where each value
- // represents an encoded bit field value.
- static_assert(sizeof(internal::Condition) == sizeof(Condition));
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -132,13 +103,21 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
__ tst(value, Operand(mask));
- __ b(AsMasmCondition(cc), target);
+ __ b(cc, target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ cmp(lhs, Operand(rhs));
- __ b(AsMasmCondition(cc), target);
+ __ b(cc, target);
+}
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -338,8 +317,8 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
@@ -355,11 +334,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ ldr(output, FieldMemOperand(source, offset));
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ ldrh(output, FieldMemOperand(source, offset));
@@ -401,15 +375,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
- __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
+ __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ b(eq, on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@@ -427,8 +401,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
@@ -450,8 +424,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
@@ -466,16 +440,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -484,33 +458,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -536,8 +506,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-arm.cc
ScratchRegisterScope scope(this);
- JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
- &fallthrough);
+ JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
__ CheckConstPool(true, true);
__ BlockConstPoolFor(num_labels);
@@ -591,8 +560,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
- __ JumpIf(Condition::kGreaterThanEqual, params_size,
- Operand(actual_params_size), &corrected_args_count);
+ __ JumpIf(kGreaterThanEqual, params_size, Operand(actual_params_size),
+ &corrected_args_count);
__ masm()->mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
@@ -600,8 +569,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index 08a9491ce8..6aa1419e92 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -38,32 +38,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-// TODO(v8:11461): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual = eq,
- kNotEqual = ne,
-
- kLessThan = lt,
- kGreaterThan = gt,
- kLessThanEqual = le,
- kGreaterThanEqual = ge,
-
- kUnsignedLessThan = lo,
- kUnsignedGreaterThan = hi,
- kUnsignedLessThanEqual = ls,
- kUnsignedGreaterThanEqual = hs,
-
- kOverflow = vs,
- kNoOverflow = vc,
-
- kZero = eq,
- kNotZero = ne,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -125,19 +99,42 @@ void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
- if (cc == Condition::kZero) {
+ if (cc == kZero) {
__ TestAndBranchIfAllClear(value, mask, target);
- } else if (cc == Condition::kNotZero) {
+ } else if (cc == kNotZero) {
__ TestAndBranchIfAnySet(value, mask, target);
} else {
__ Tst(value, Immediate(mask));
- __ B(AsMasmCondition(cc), target);
+ __ B(cc, target);
}
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- __ CompareAndBranch(lhs, rhs, AsMasmCondition(cc), target);
+ __ CompareAndBranch(lhs, rhs, cc, target);
+}
+#if V8_STATIC_ROOTS_BOOL
+void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
+ Label* target,
+ Label::Distance distance) {
+ __ AssertNotSmi(heap_object);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
+}
+#endif // V8_STATIC_ROOTS_BOOL
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ if (cc == eq || cc == ne) {
+ __ IsObjectType(object, scratch, scratch, instance_type);
+ __ B(cc, target);
+ return;
+ }
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -173,14 +170,14 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance distance) {
__ AssertSmi(value);
- __ CompareTaggedAndBranch(value, smi, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(value, smi, cc, target);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- __ CompareTaggedAndBranch(lhs, rhs, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(lhs, rhs, cc, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -188,7 +185,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
- __ CompareTaggedAndBranch(value, tmp, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(value, tmp, cc, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@@ -196,7 +193,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
- __ CompareTaggedAndBranch(tmp, value, AsMasmCondition(cc), target);
+ __ CompareTaggedAndBranch(tmp, value, cc, target);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@@ -395,9 +392,9 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
- __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@@ -412,11 +409,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ldrh(output, FieldMemOperand(source, offset));
@@ -456,27 +448,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
FeedbackSlot slot,
Label* on_result,
Label::Distance) {
- Label fallthrough, clear_slot;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
- __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
-
- // Is it marked_for_deoptimization? If yes, clear the slot.
- {
- ScratchRegisterScope temps(this);
- __ JumpIfCodeTIsMarkedForDeoptimization(
- scratch_and_result, temps.AcquireScratch(), &clear_slot);
- __ B(on_result);
- }
-
- __ bind(&clear_slot);
- __ Mov(scratch_and_result, __ ClearedValue());
- StoreTaggedFieldNoWriteBarrier(
- feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
- scratch_and_result);
-
- __ bind(&fallthrough);
- Move(scratch_and_result, 0);
+ __ TryLoadOptimizedOsrCode(scratch_and_result, feedback_vector, slot,
+ on_result, Label::Distance::kFar);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
@@ -485,8 +458,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
@@ -508,8 +481,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
@@ -524,16 +497,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -542,33 +515,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -601,16 +570,23 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
ScratchRegisterScope scope(this);
Register temp = scope.AcquireScratch();
Label table;
- JumpIf(Condition::kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
+ JumpIf(kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
__ Adr(temp, &table);
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
+ constexpr int instructions_per_jump_target = 1;
+#else
+ constexpr int instructions_per_jump_target = 0;
#endif
+ constexpr int instructions_per_label = 1 + instructions_per_jump_target;
__ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
__ Br(temp);
{
- TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
+ const int instruction_count =
+ num_labels * instructions_per_label + instructions_per_jump_target;
+ MacroAssembler::BlockPoolsScope block_pools(masm_,
+ instruction_count * kInstrSize);
__ Bind(&table);
for (int i = 0; i < num_labels; ++i) {
__ JumpTarget();
@@ -659,7 +635,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
- __ JumpIf(Condition::kGreaterThanEqual, params_size, actual_params_size,
+ __ JumpIf(kGreaterThanEqual, params_size, actual_params_size,
&corrected_args_count);
__ masm()->Mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
@@ -668,7 +644,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h
index f692af4e13..71c4e7a5eb 100644
--- a/deps/v8/src/baseline/baseline-assembler-inl.h
+++ b/deps/v8/src/baseline/baseline-assembler-inl.h
@@ -114,13 +114,12 @@ void BaselineAssembler::SmiUntag(Register output, Register value) {
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
int32_t index) {
- LoadTaggedAnyField(output, array,
- FixedArray::kHeaderSize + index * kTaggedSize);
+ LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
__ LoadMap(prototype, object);
- LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
+ LoadTaggedField(prototype, prototype, Map::kPrototypeOffset);
}
void BaselineAssembler::LoadContext(Register output) {
LoadRegister(output, interpreter::Register::current_context());
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index 3540cb65b5..6910df9ec8 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -17,8 +17,6 @@ namespace v8 {
namespace internal {
namespace baseline {
-enum class Condition : uint32_t;
-
class BaselineAssembler {
public:
class ScratchRegisterScope;
@@ -63,10 +61,20 @@ class BaselineAssembler {
inline void JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance distance = Label::kFar);
+#if V8_STATIC_ROOTS_BOOL
+ // Fast JS_RECEIVER test which assumes to receive either a primitive object or
+ // a js receiver.
+ inline void JumpIfJSAnyIsPrimitive(Register heap_object, Label* target,
+ Label::Distance distance = Label::kFar);
+#endif
inline void JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type, Register map,
Label* target,
Label::Distance distance = Label::kFar);
+ // Might not load the map into the scratch register.
+ inline void JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type, Label* target,
+ Label::Distance distance = Label::kFar);
inline void JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type, Label* target,
Label::Distance distance = Label::kFar);
@@ -149,13 +157,11 @@ class BaselineAssembler {
inline void TailCallBuiltin(Builtin builtin);
inline void CallRuntime(Runtime::FunctionId function, int nargs);
- inline void LoadTaggedPointerField(Register output, Register source,
- int offset);
+ inline void LoadTaggedField(Register output, Register source, int offset);
inline void LoadTaggedSignedField(Register output, Register source,
int offset);
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
int offset);
- inline void LoadTaggedAnyField(Register output, Register source, int offset);
inline void LoadWord16FieldZeroExtend(Register output, Register source,
int offset);
inline void LoadWord8Field(Register output, Register source, int offset);
@@ -172,16 +178,12 @@ class BaselineAssembler {
// X64 supports complex addressing mode, pointer decompression can be done by
// [%compressed_base + %r1 + K].
#if V8_TARGET_ARCH_X64
- inline void LoadTaggedPointerField(TaggedRegister output, Register source,
- int offset);
- inline void LoadTaggedPointerField(TaggedRegister output,
- TaggedRegister source, int offset);
- inline void LoadTaggedPointerField(Register output, TaggedRegister source,
- int offset);
- inline void LoadTaggedAnyField(Register output, TaggedRegister source,
- int offset);
- inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
- int offset);
+ inline void LoadTaggedField(TaggedRegister output, Register source,
+ int offset);
+ inline void LoadTaggedField(TaggedRegister output, TaggedRegister source,
+ int offset);
+ inline void LoadTaggedField(Register output, TaggedRegister source,
+ int offset);
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
int32_t index);
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index 9193551b84..e746113eb5 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -74,7 +74,7 @@ class BaselineCompilerTask {
return;
}
- shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
+ shared_function_info_->set_baseline_code(*code, kReleaseStore);
if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
std::stringstream ss;
@@ -116,6 +116,8 @@ class BaselineBatchCompilerJob {
// Skip functions where the bytecode has been flushed.
SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
if (!CanCompileWithConcurrentBaseline(shared, isolate)) continue;
+ // Skip functions that are already being compiled.
+ if (shared.is_sparkplug_compiling()) continue;
tasks_.emplace_back(isolate, handles_.get(), shared);
}
if (v8_flags.trace_baseline_concurrent_compilation) {
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index 25123cb7cd..185d5f49dd 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -439,8 +439,8 @@ void BaselineCompiler::LoadFeedbackVector(Register output) {
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
LoadFeedbackVector(output);
- __ LoadTaggedPointerField(output, output,
- FeedbackVector::kClosureFeedbackCellArrayOffset);
+ __ LoadTaggedField(output, output,
+ FeedbackVector::kClosureFeedbackCellArrayOffset);
}
void BaselineCompiler::SelectBooleanConstant(
@@ -526,7 +526,7 @@ void BaselineCompiler::VisitSingleBytecode() {
// though, since the control flow would not match the control flow of this
// scope.
if (v8_flags.debug_code &&
- !interpreter::Bytecodes::WritesAccumulator(bytecode) &&
+ !interpreter::Bytecodes::WritesOrClobbersAccumulator(bytecode) &&
!interpreter::Bytecodes::IsJump(bytecode) &&
!interpreter::Bytecodes::IsSwitch(bytecode)) {
accumulator_preserved_scope.emplace(&basm_);
@@ -561,8 +561,7 @@ void BaselineCompiler::VerifyFrame() {
__ Move(scratch, __ FeedbackVectorOperand());
Label is_smi, is_ok;
__ JumpIfSmi(scratch, &is_smi);
- __ JumpIfObjectType(Condition::kEqual, scratch, FEEDBACK_VECTOR_TYPE,
- scratch, &is_ok);
+ __ JumpIfObjectTypeFast(kEqual, scratch, FEEDBACK_VECTOR_TYPE, &is_ok);
__ Bind(&is_smi);
__ masm()->Abort(AbortReason::kExpectedFeedbackVector);
__ Bind(&is_ok);
@@ -669,8 +668,8 @@ void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Label* label,
// the original value into kInterpreterAccumulatorRegister, so we don't have
// to worry about it getting clobbered.
static_assert(kReturnRegister0 == kInterpreterAccumulatorRegister);
- __ JumpIfSmi(do_jump_if_true ? Condition::kNotEqual : Condition::kEqual,
- kReturnRegister1, Smi::FromInt(0), label, distance);
+ __ JumpIfSmi(do_jump_if_true ? kNotEqual : kEqual, kReturnRegister1,
+ Smi::FromInt(0), label, distance);
}
void BaselineCompiler::VisitLdaZero() {
@@ -754,8 +753,8 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch();
__ LoadContext(context);
- __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(Index(0)));
+ __ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(Index(0)));
}
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
@@ -942,7 +941,8 @@ void BaselineCompiler::VisitDefineKeyedOwnProperty() {
RegisterOperand(0), // object
RegisterOperand(1), // key
kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ Flag8AsSmi(2), // flags
+ IndexAsTagged(3)); // slot
}
void BaselineCompiler::VisitStaInArrayLiteral() {
@@ -1349,9 +1349,9 @@ void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
interpreter::RegisterList args) {
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
- __ LoadTaggedAnyField(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister,
- JSGeneratorObject::kResumeModeOffset);
+ __ LoadTaggedField(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister,
+ JSGeneratorObject::kResumeModeOffset);
}
void BaselineCompiler::VisitIntrinsicGeneratorClose(
@@ -1489,8 +1489,7 @@ void BaselineCompiler::VisitTestReferenceEqual() {
SelectBooleanConstant(
kInterpreterAccumulatorRegister,
[&](Label* is_true, Label::Distance distance) {
- __ JumpIfTagged(Condition::kEqual,
- __ RegisterFrameOperand(RegisterOperand(0)),
+ __ JumpIfTagged(kEqual, __ RegisterFrameOperand(RegisterOperand(0)),
kInterpreterAccumulatorRegister, is_true, distance);
});
}
@@ -1520,8 +1519,8 @@ void BaselineCompiler::VisitTestUndetectable() {
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
- __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
- Condition::kZero, &not_undetectable, Label::kNear);
+ __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask, kZero,
+ &not_undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@@ -1561,9 +1560,8 @@ void BaselineCompiler::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kNumber: {
Label is_smi, is_heap_number;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
- __ JumpIfObjectType(Condition::kEqual, kInterpreterAccumulatorRegister,
- HEAP_NUMBER_TYPE, scratch_scope.AcquireScratch(),
- &is_heap_number, Label::kNear);
+ __ JumpIfObjectTypeFast(kEqual, kInterpreterAccumulatorRegister,
+ HEAP_NUMBER_TYPE, &is_heap_number, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
__ Jump(&done, Label::kNear);
@@ -1577,10 +1575,9 @@ void BaselineCompiler::VisitTestTypeOf() {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
static_assert(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
- __ JumpIfObjectType(Condition::kGreaterThanEqual,
- kInterpreterAccumulatorRegister, FIRST_NONSTRING_TYPE,
- scratch_scope.AcquireScratch(), &bad_instance_type,
- Label::kNear);
+ __ JumpIfObjectType(kGreaterThanEqual, kInterpreterAccumulatorRegister,
+ FIRST_NONSTRING_TYPE, scratch_scope.AcquireScratch(),
+ &bad_instance_type, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@@ -1593,9 +1590,8 @@ void BaselineCompiler::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol: {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
- __ JumpIfObjectType(Condition::kNotEqual, kInterpreterAccumulatorRegister,
- SYMBOL_TYPE, scratch_scope.AcquireScratch(),
- &bad_instance_type, Label::kNear);
+ __ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
+ SYMBOL_TYPE, &bad_instance_type, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@@ -1623,9 +1619,8 @@ void BaselineCompiler::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt: {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
- __ JumpIfObjectType(Condition::kNotEqual, kInterpreterAccumulatorRegister,
- BIGINT_TYPE, scratch_scope.AcquireScratch(),
- &bad_instance_type, Label::kNear);
+ __ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
+ BIGINT_TYPE, &bad_instance_type, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@@ -1648,7 +1643,7 @@ void BaselineCompiler::VisitTestTypeOf() {
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
- Condition::kZero, &not_undetectable, Label::kNear);
+ kZero, &not_undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@@ -1667,10 +1662,10 @@ void BaselineCompiler::VisitTestTypeOf() {
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
- __ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask,
- Condition::kZero, &not_callable, Label::kNear);
+ __ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask, kZero,
+ &not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
- Condition::kNotZero, &undetectable, Label::kNear);
+ kNotZero, &undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@@ -1692,7 +1687,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// If the object's instance type isn't within the range, return false.
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Register map = scratch_scope.AcquireScratch();
- __ JumpIfObjectType(Condition::kLessThan, kInterpreterAccumulatorRegister,
+ __ JumpIfObjectType(kLessThan, kInterpreterAccumulatorRegister,
FIRST_JS_RECEIVER_TYPE, map, &bad_instance_type,
Label::kNear);
@@ -1702,8 +1697,7 @@ void BaselineCompiler::VisitTestTypeOf() {
__ TestAndBranch(map_bit_field,
Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask,
- Condition::kNotZero, &undetectable_or_callable,
- Label::kNear);
+ kNotZero, &undetectable_or_callable, Label::kNear);
__ Bind(&is_null);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
@@ -1902,6 +1896,7 @@ void BaselineCompiler::VisitCreateRestParameter() {
}
void BaselineCompiler::VisitJumpLoop() {
+#ifndef V8_JITLESS
Label osr_armed, osr_not_armed;
using D = OnStackReplacementDescriptor;
Register feedback_vector = Register::no_reg();
@@ -1917,11 +1912,12 @@ void BaselineCompiler::VisitJumpLoop() {
FeedbackVector::kOsrStateOffset);
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
FeedbackVector::kMaxOsrUrgency);
- __ JumpIfByte(Condition::kUnsignedGreaterThan, osr_state, loop_depth,
- &osr_armed, Label::kNear);
+ __ JumpIfByte(kUnsignedGreaterThan, osr_state, loop_depth, &osr_armed,
+ Label::kNear);
}
__ Bind(&osr_not_armed);
+#endif // !V8_JITLESS
Label* label = labels_[iterator().GetJumpTargetOffset()].GetPointer();
int weight = iterator().GetRelativeJumpTargetOffset() -
iterator().current_bytecode_size_without_prefix();
@@ -1930,6 +1926,7 @@ void BaselineCompiler::VisitJumpLoop() {
DCHECK(label->is_bound());
UpdateInterruptBudgetAndJumpToLabel(weight, label, label);
+#ifndef V8_JITLESS
{
ASM_CODE_COMMENT_STRING(&masm_, "OSR Handle Armed");
__ Bind(&osr_armed);
@@ -1945,13 +1942,14 @@ void BaselineCompiler::VisitJumpLoop() {
iterator().GetSlotOperand(2), &osr,
Label::kNear);
__ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch1);
- __ JumpIfByte(Condition::kUnsignedLessThanEqual, scratch1, loop_depth,
- &osr_not_armed, Label::kNear);
+ __ JumpIfByte(kUnsignedLessThanEqual, scratch1, loop_depth, &osr_not_armed,
+ Label::kNear);
__ Bind(&osr);
CallBuiltin<Builtin::kBaselineOnStackReplacement>(maybe_target_code);
__ Jump(&osr_not_armed, Label::kNear);
}
+#endif // !V8_JITLESS
}
void BaselineCompiler::VisitJump() {
@@ -2043,14 +2041,16 @@ void BaselineCompiler::VisitJumpIfUndefinedOrNull() {
}
void BaselineCompiler::VisitJumpIfJSReceiver() {
- BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
-
Label is_smi, dont_jump;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
- __ JumpIfObjectType(Condition::kLessThan, kInterpreterAccumulatorRegister,
- FIRST_JS_RECEIVER_TYPE, scratch_scope.AcquireScratch(),
- &dont_jump);
+#if V8_STATIC_ROOTS_BOOL
+ __ JumpIfJSAnyIsPrimitive(kInterpreterAccumulatorRegister, &dont_jump,
+ Label::Distance::kNear);
+#else
+ __ JumpIfObjectTypeFast(kLessThan, kInterpreterAccumulatorRegister,
+ FIRST_JS_RECEIVER_TYPE, &dont_jump);
+#endif
UpdateInterruptBudgetAndDoInterpreterJump();
__ Bind(&is_smi);
@@ -2096,8 +2096,7 @@ void BaselineCompiler::VisitForInContinue() {
[&](Label* is_true, Label::Distance distance) {
LoadRegister(kInterpreterAccumulatorRegister, 0);
__ JumpIfTagged(
- Condition::kNotEqual,
- kInterpreterAccumulatorRegister,
+ kNotEqual, kInterpreterAccumulatorRegister,
__ RegisterFrameOperand(RegisterOperand(1)),
is_true, distance);
});
@@ -2189,8 +2188,8 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
Register map_bit_field = scratch_scope.AcquireScratch();
__ LoadMap(map_bit_field, reg);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
- __ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask,
- Condition::kNotZero, &done, Label::kNear);
+ __ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask, kNotZero,
+ &done, Label::kNear);
CallRuntime(Runtime::kThrowNotSuperConstructor, reg, __ FunctionOperand());
@@ -2214,8 +2213,8 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
Register context = scratch_scope.AcquireScratch();
- __ LoadTaggedAnyField(context, generator_object,
- JSGeneratorObject::kContextOffset);
+ __ LoadTaggedField(context, generator_object,
+ JSGeneratorObject::kContextOffset);
__ StoreContext(context);
interpreter::JumpTableTargetOffsets offsets =
diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index 2c63dac2b6..f96f2e00a5 100644
--- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -44,32 +44,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
-// TODO(v8:11461): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual = equal,
- kNotEqual = not_equal,
-
- kLessThan = less,
- kGreaterThan = greater,
- kLessThanEqual = less_equal,
- kGreaterThanEqual = greater_equal,
-
- kUnsignedLessThan = below,
- kUnsignedGreaterThan = above,
- kUnsignedLessThanEqual = below_equal,
- kUnsignedGreaterThanEqual = above_equal,
-
- kOverflow = overflow,
- kNoOverflow = no_overflow,
-
- kZero = zero,
- kNotZero = not_zero,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#define __ masm_->
@@ -124,7 +98,7 @@ void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmp(left, Immediate(right));
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
@@ -139,21 +113,31 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
} else {
__ test(value, Immediate(mask));
}
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance distance) {
__ cmp(lhs, rhs);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
+}
+
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
+
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
Register map, Label* target,
Label::Distance distance) {
__ AssertNotSmi(object);
__ CmpObjectType(object, instance_type, map);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@@ -167,7 +151,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ movd(eax, xmm0);
}
__ CmpInstanceType(map, instance_type);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -181,31 +165,31 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
} else {
__ cmp(value, Immediate(smi));
}
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ cmp(lhs, rhs);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance distance) {
__ cmp(operand, value);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance distance) {
__ cmp(operand, value);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance distance) {
__ cmpb(value, Immediate(byte));
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
return __ mov(RegisterFrameOperand(output), source);
@@ -319,8 +303,8 @@ void BaselineAssembler::Pop(T... registers) {
(__ Pop(registers), ...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
__ mov(output, FieldOperand(source, offset));
}
@@ -336,11 +320,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ mov(output, FieldOperand(source, offset));
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzx_w(output, FieldOperand(source, offset));
@@ -380,16 +359,14 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
- Register scratch2 = temps.AcquireScratch();
- DCHECK(!AreAliased(scratch_and_result, scratch2));
- __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch2);
+ __ TestCodeIsMarkedForDeoptimization(scratch_and_result);
__ j(equal, on_result, distance);
__ mov(FieldOperand(feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
@@ -406,8 +383,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
if (skip_interrupt_label) {
@@ -423,8 +400,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
Register feedback_cell = scratch_scope.AcquireScratch();
DCHECK(!AreAliased(feedback_cell, weight));
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
weight);
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
@@ -433,16 +410,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -451,33 +428,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -567,8 +540,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, scratch,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
index cc5694554a..a80617c928 100644
--- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
+++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -37,32 +37,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-enum class Condition : uint32_t {
- kEqual = eq,
- kNotEqual = ne,
-
- kLessThan = lt,
- kGreaterThan = gt,
- kLessThanEqual = le,
- kGreaterThanEqual = ge,
-
- kUnsignedLessThan = Uless,
- kUnsignedGreaterThan = Ugreater,
- kUnsignedLessThanEqual = Uless_equal,
- kUnsignedGreaterThanEqual = Ugreater_equal,
-
- kOverflow = overflow,
- kNoOverflow = no_overflow,
-
- kZero = eq,
- kNotZero = ne,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- static_assert(sizeof(internal::Condition) == sizeof(Condition));
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -123,12 +97,20 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ And(scratch, value, Operand(mask));
- __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
+ __ Branch(target, cc, scratch, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+ __ Branch(target, cc, lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -137,7 +119,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
- __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+ __ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@@ -149,22 +131,18 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
- __ Ld_d(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+ __ Ld_hu(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
- ScratchRegisterScope temps(this);
- Register scratch = temps.AcquireScratch();
- __ li(scratch, Operand(smi));
- __ SmiUntag(scratch);
- __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+ __ CompareTaggedAndBranch(target, cc, value, Operand(smi));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+ __ CompareTaggedAndBranch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -172,7 +150,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld_d(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+ __ CompareTaggedAndBranch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@@ -180,11 +158,11 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld_d(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
+ __ CompareTaggedAndBranch(target, cc, scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
- __ Branch(target, AsMasmCondition(cc), value, Operand(byte));
+ __ Branch(target, cc, value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
@@ -322,13 +300,13 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
- __ Ld_d(output, FieldMemOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
- __ Ld_d(output, FieldMemOperand(source, offset));
+ __ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
Register source,
@@ -336,10 +314,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ Ld_d(output, FieldMemOperand(source, offset));
-}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ld_hu(output, FieldMemOperand(source, offset));
@@ -354,13 +328,13 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ li(scratch, Operand(value));
- __ St_d(scratch, FieldMemOperand(target, offset));
+ __ StoreTaggedField(scratch, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
ASM_CODE_COMMENT(masm_);
- __ St_d(value, FieldMemOperand(target, offset));
+ __ StoreTaggedField(value, FieldMemOperand(target, offset));
ScratchRegisterScope temps(this);
__ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
@@ -368,7 +342,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
- __ St_d(value, FieldMemOperand(target, offset));
+ __ StoreTaggedField(value, FieldMemOperand(target, offset));
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector,
@@ -376,15 +350,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
- __ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
- eq, on_result);
+ __ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
+ on_result);
__ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
@@ -400,8 +374,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Ld_w(interrupt_budget,
@@ -420,8 +394,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Ld_w(interrupt_budget,
@@ -436,16 +410,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -454,33 +428,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -489,7 +459,11 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
- __ Add_d(lhs, lhs, Operand(rhs));
+ if (SmiValuesAre31Bits()) {
+ __ Add_w(lhs, lhs, Operand(rhs));
+ } else {
+ __ Add_d(lhs, lhs, Operand(rhs));
+ }
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
@@ -504,8 +478,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ Sub_d(reg, reg, Operand(case_value_base));
}
- __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
- reg, Operand(num_labels));
+ __ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
__ GenerateSwitchTable(reg, num_labels,
[labels](size_t i) { return labels[i]; });
@@ -560,8 +533,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
index 50b522c8c2..36e2155352 100644
--- a/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
+++ b/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
@@ -45,7 +45,7 @@ void BaselineCompiler::PrologueFillFrame() {
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
- __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ __ masm()->Add_d(sp, sp, Operand(-(kSystemPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
@@ -55,12 +55,12 @@ void BaselineCompiler::PrologueFillFrame() {
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
- __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ __ masm()->Add_d(sp, sp, Operand(-(kSystemPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
- __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ __ masm()->Add_d(sp, sp, Operand(-(kSystemPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index 17bd834d5d..762ec1745b 100644
--- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -37,32 +37,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-enum class Condition : uint32_t {
- kEqual = eq,
- kNotEqual = ne,
-
- kLessThan = lt,
- kGreaterThan = gt,
- kLessThanEqual = le,
- kGreaterThanEqual = ge,
-
- kUnsignedLessThan = Uless,
- kUnsignedGreaterThan = Ugreater,
- kUnsignedLessThanEqual = Uless_equal,
- kUnsignedGreaterThanEqual = Ugreater_equal,
-
- kOverflow = overflow,
- kNoOverflow = no_overflow,
-
- kZero = eq,
- kNotZero = ne,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- static_assert(sizeof(internal::Condition) == sizeof(Condition));
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -123,12 +97,20 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ And(scratch, value, Operand(mask));
- __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
+ __ Branch(target, cc, scratch, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+ __ Branch(target, cc, lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -137,7 +119,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
- __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+ __ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@@ -150,7 +132,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+ __ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -158,7 +140,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+ __ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
@@ -166,13 +148,13 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Register scratch = temps.AcquireScratch();
__ li(scratch, Operand(smi));
__ SmiUntag(scratch);
- __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+ __ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+ __ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -180,7 +162,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+ __ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@@ -188,11 +170,11 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
+ __ Branch(target, cc, scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
- __ Branch(target, AsMasmCondition(cc), value, Operand(byte));
+ __ Branch(target, cc, value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
@@ -330,8 +312,8 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
__ Ld(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@@ -344,10 +326,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ Ld(output, FieldMemOperand(source, offset));
-}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Lhu(output, FieldMemOperand(source, offset));
@@ -386,15 +364,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
- __ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
- eq, on_result);
+ __ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
+ on_result);
__ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
@@ -410,8 +388,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@@ -430,8 +408,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@@ -446,16 +424,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -464,33 +442,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -514,8 +488,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ Dsubu(reg, reg, Operand(case_value_base));
}
- __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
- reg, Operand(num_labels));
+ __ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
__ GenerateSwitchTable(reg, num_labels,
[labels](size_t i) { return labels[i]; });
@@ -571,8 +544,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
index 90a06006bb..74a3db7ca3 100644
--- a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
+++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h
@@ -49,92 +49,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
-// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual,
- kNotEqual,
-
- kLessThan,
- kGreaterThan,
- kLessThanEqual,
- kGreaterThanEqual,
-
- kUnsignedLessThan,
- kUnsignedGreaterThan,
- kUnsignedLessThanEqual,
- kUnsignedGreaterThanEqual,
-
- kOverflow,
- kNoOverflow,
-
- kZero,
- kNotZero
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- static_assert(sizeof(internal::Condition) == sizeof(Condition));
- switch (cond) {
- case Condition::kEqual:
- return eq;
- case Condition::kNotEqual:
- return ne;
- case Condition::kLessThan:
- return lt;
- case Condition::kGreaterThan:
- return gt;
- case Condition::kLessThanEqual:
- return le;
- case Condition::kGreaterThanEqual:
- return ge;
-
- case Condition::kUnsignedLessThan:
- return lt;
- case Condition::kUnsignedGreaterThan:
- return gt;
- case Condition::kUnsignedLessThanEqual:
- return le;
- case Condition::kUnsignedGreaterThanEqual:
- return ge;
-
- case Condition::kOverflow:
- return overflow;
- case Condition::kNoOverflow:
- return nooverflow;
-
- case Condition::kZero:
- return eq;
- case Condition::kNotZero:
- return ne;
- default:
- UNREACHABLE();
- }
-}
-
-inline bool IsSignedCondition(Condition cond) {
- switch (cond) {
- case Condition::kEqual:
- case Condition::kNotEqual:
- case Condition::kLessThan:
- case Condition::kGreaterThan:
- case Condition::kLessThanEqual:
- case Condition::kGreaterThanEqual:
- case Condition::kOverflow:
- case Condition::kNoOverflow:
- case Condition::kZero:
- case Condition::kNotZero:
- return true;
-
- case Condition::kUnsignedLessThan:
- case Condition::kUnsignedGreaterThan:
- case Condition::kUnsignedLessThanEqual:
- case Condition::kUnsignedGreaterThanEqual:
- return false;
-
- default:
- UNREACHABLE();
- }
-}
-
#define __ assm->
// ppc helper
template <int width = 64>
@@ -143,19 +57,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
static_assert(width == 64 || width == 32,
"only support 64 and 32 bit compare");
if (width == 64) {
- if (IsSignedCondition(cc)) {
+ if (is_signed(cc)) {
__ CmpS64(lhs, rhs);
} else {
__ CmpU64(lhs, rhs);
}
} else {
- if (IsSignedCondition(cc)) {
+ if (is_signed(cc)) {
__ CmpS32(lhs, rhs);
} else {
__ CmpU32(lhs, rhs);
}
}
- __ b(AsMasmCondition(cc), target);
+ __ b(to_condition(cc), target);
}
#undef __
@@ -221,18 +135,27 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ AndU64(r0, value, Operand(mask), ip, SetRC);
- __ b(AsMasmCondition(cc), target, cr0);
+ __ b(to_condition(cc), target, cr0);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
- if (IsSignedCondition(cc)) {
+ if (is_signed(cc)) {
__ CmpS64(lhs, rhs, r0);
} else {
__ CmpU64(lhs, rhs, r0);
}
- __ b(AsMasmCondition(cc), target);
+ __ b(to_condition(cc), target);
+}
+
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
@@ -292,7 +215,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
ASM_CODE_COMMENT(masm_);
- __ LoadTaggedPointerField(ip, operand, r0);
+ __ LoadTaggedField(ip, operand, r0);
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
}
@@ -300,7 +223,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
ASM_CODE_COMMENT(masm_);
- __ LoadTaggedPointerField(ip, operand, r0);
+ __ LoadTaggedField(ip, operand, r0);
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
}
@@ -460,10 +383,10 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
ASM_CODE_COMMENT(masm_);
- __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
+ __ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@@ -479,12 +402,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- ASM_CODE_COMMENT(masm_);
- __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
ASM_CODE_COMMENT(masm_);
@@ -529,15 +446,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
- __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
+ __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
__ beq(on_result, cr0);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@@ -555,8 +472,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@@ -580,8 +497,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@@ -599,23 +516,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
ASM_CODE_COMMENT(masm_);
- if (depth > 0) {
- for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
- }
- if (COMPRESS_POINTERS_BOOL) {
- // Decompress tagged pointer.
- __ AddS64(context, context, kPtrComprCageBaseRegister);
- }
+ for (; depth > 0; --depth) {
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -625,34 +536,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
ASM_CODE_COMMENT(masm_);
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -680,8 +587,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
}
// Mostly copied from code-generator-arm.cc
- JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
- &fallthrough);
+ JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
int entry_size_log2 = 3;
__ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
@@ -743,8 +649,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
- JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size,
- actual_params_size, &corrected_args_count);
+ JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size,
+ &corrected_args_count);
__ masm()->mr(params_size, actual_params_size);
__ Bind(&corrected_args_count);
@@ -752,8 +658,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h b/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h
index 59cffa9e29..14b09eec72 100644
--- a/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h
+++ b/deps/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h
@@ -36,31 +36,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
-enum class Condition : uint32_t {
- kEqual = eq,
- kNotEqual = ne,
-
- kLessThan = lt,
- kGreaterThan = gt,
- kLessThanEqual = le,
- kGreaterThanEqual = ge,
-
- kUnsignedLessThan = Uless,
- kUnsignedGreaterThan = Ugreater,
- kUnsignedLessThanEqual = Uless_equal,
- kUnsignedGreaterThanEqual = Ugreater_equal,
-
- kOverflow = overflow,
- kNoOverflow = no_overflow,
-
- kZero = eq,
- kNotZero = ne,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#ifdef DEBUG
@@ -121,12 +96,20 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ And(tmp, value, Operand(mask));
- __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg));
+ __ Branch(target, cc, tmp, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+ __ Branch(target, cc, lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@@ -135,7 +118,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
- __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+ __ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@@ -148,7 +131,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ LoadWord(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+ __ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -156,7 +139,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadWord(temp, operand);
- __ Branch(target, AsMasmCondition(cc), value, Operand(temp));
+ __ Branch(target, cc, value, Operand(temp));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
@@ -164,14 +147,14 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Register temp = temps.AcquireScratch();
__ li(temp, Operand(smi));
__ SmiUntag(temp);
- __ Branch(target, AsMasmCondition(cc), value, Operand(temp));
+ __ Branch(target, cc, value, Operand(temp));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
// todo: compress pointer
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+ __ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -180,7 +163,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ LoadWord(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+ __ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@@ -189,11 +172,11 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ LoadWord(scratch, operand);
- __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
+ __ Branch(target, cc, scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
- __ Branch(target, AsMasmCondition(cc), value, Operand(byte));
+ __ Branch(target, cc, value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
@@ -322,9 +305,9 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
- __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
@@ -336,10 +319,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
-}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Lhu(output, FieldMemOperand(source, offset));
@@ -376,15 +355,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough, clear_slot;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
- __ JumpIfCodeTIsMarkedForDeoptimization(
- scratch_and_result, temps.AcquireScratch(), &clear_slot);
+ __ JumpIfCodeIsMarkedForDeoptimization(scratch_and_result,
+ temps.AcquireScratch(), &clear_slot);
Jump(on_result);
}
@@ -404,8 +383,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@@ -426,8 +405,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
@@ -444,16 +423,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -462,33 +441,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -519,8 +494,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-riscv64.cc
ScratchRegisterScope scope(this);
Label table;
- __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
- reg, Operand(num_labels));
+ __ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
int64_t imm64;
imm64 = __ branch_long_offset(&table);
CHECK(is_int32(imm64 + 0x800));
@@ -534,7 +508,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ CalcScaledAddress(t6, t6, reg, entry_size_log2);
__ Jump(t6);
{
- TurboAssembler::BlockTrampolinePoolScope(masm());
+ MacroAssembler::BlockTrampolinePoolScope(masm());
__ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table);
for (int i = 0; i < num_labels; ++i) {
diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
index c04e7734ae..517453c605 100644
--- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
+++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
@@ -48,92 +48,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
-// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual,
- kNotEqual,
-
- kLessThan,
- kGreaterThan,
- kLessThanEqual,
- kGreaterThanEqual,
-
- kUnsignedLessThan,
- kUnsignedGreaterThan,
- kUnsignedLessThanEqual,
- kUnsignedGreaterThanEqual,
-
- kOverflow,
- kNoOverflow,
-
- kZero,
- kNotZero
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- static_assert(sizeof(internal::Condition) == sizeof(Condition));
- switch (cond) {
- case Condition::kEqual:
- return eq;
- case Condition::kNotEqual:
- return ne;
- case Condition::kLessThan:
- return lt;
- case Condition::kGreaterThan:
- return gt;
- case Condition::kLessThanEqual:
- return le;
- case Condition::kGreaterThanEqual:
- return ge;
-
- case Condition::kUnsignedLessThan:
- return lt;
- case Condition::kUnsignedGreaterThan:
- return gt;
- case Condition::kUnsignedLessThanEqual:
- return le;
- case Condition::kUnsignedGreaterThanEqual:
- return ge;
-
- case Condition::kOverflow:
- return overflow;
- case Condition::kNoOverflow:
- return nooverflow;
-
- case Condition::kZero:
- return eq;
- case Condition::kNotZero:
- return ne;
- default:
- UNREACHABLE();
- }
-}
-
-inline bool IsSignedCondition(Condition cond) {
- switch (cond) {
- case Condition::kEqual:
- case Condition::kNotEqual:
- case Condition::kLessThan:
- case Condition::kGreaterThan:
- case Condition::kLessThanEqual:
- case Condition::kGreaterThanEqual:
- case Condition::kOverflow:
- case Condition::kNoOverflow:
- case Condition::kZero:
- case Condition::kNotZero:
- return true;
-
- case Condition::kUnsignedLessThan:
- case Condition::kUnsignedGreaterThan:
- case Condition::kUnsignedLessThanEqual:
- case Condition::kUnsignedGreaterThanEqual:
- return false;
-
- default:
- UNREACHABLE();
- }
-}
-
#define __ assm->
// s390x helper
template <int width = 64>
@@ -142,19 +56,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
static_assert(width == 64 || width == 32,
"only support 64 and 32 bit compare");
if (width == 64) {
- if (IsSignedCondition(cc)) {
+ if (is_signed(cc)) {
__ CmpS64(lhs, rhs);
} else {
__ CmpU64(lhs, rhs);
}
} else {
- if (IsSignedCondition(cc)) {
+ if (is_signed(cc)) {
__ CmpS32(lhs, rhs);
} else {
__ CmpU32(lhs, rhs);
}
}
- __ b(AsMasmCondition(cc), target);
+ __ b(to_condition(cc), target);
}
#undef __
@@ -220,18 +134,27 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ AndP(r0, value, Operand(mask));
- __ b(AsMasmCondition(cc), target);
+ __ b(to_condition(cc), target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
- if (IsSignedCondition(cc)) {
+ if (is_signed(cc)) {
__ CmpS64(lhs, rhs);
} else {
__ CmpU64(lhs, rhs);
}
- __ b(AsMasmCondition(cc), target);
+ __ b(to_condition(cc), target);
+}
+
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
@@ -287,7 +210,11 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
JumpIfHelper(masm_, cc, lhs, rhs, target);
}
+#ifdef V8_TARGET_BIG_ENDIAN
constexpr static int stack_bias = 4;
+#else
+constexpr static int stack_bias = 0;
+#endif
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@@ -297,9 +224,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
if (COMPRESS_POINTERS_BOOL) {
MemOperand addr =
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
- __ LoadTaggedPointerField(ip, addr, r0);
+ __ LoadTaggedField(ip, addr, r0);
} else {
- __ LoadTaggedPointerField(ip, operand, r0);
+ __ LoadTaggedField(ip, operand, r0);
}
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
}
@@ -312,9 +239,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
if (COMPRESS_POINTERS_BOOL) {
MemOperand addr =
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
- __ LoadTaggedPointerField(ip, addr, r0);
+ __ LoadTaggedField(ip, addr, r0);
} else {
- __ LoadTaggedPointerField(ip, operand, r0);
+ __ LoadTaggedField(ip, operand, r0);
}
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
}
@@ -473,10 +400,10 @@ void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
ASM_CODE_COMMENT(masm_);
- __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
+ __ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
@@ -492,12 +419,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
SmiUntag(output);
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- ASM_CODE_COMMENT(masm_);
- __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
-}
-
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
ASM_CODE_COMMENT(masm_);
@@ -542,15 +463,15 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Label* on_result,
Label::Distance) {
Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
+ LoadTaggedField(scratch_and_result, feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
- __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
+ __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ beq(on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@@ -568,8 +489,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@@ -593,8 +514,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
- LoadTaggedPointerField(feedback_cell, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ LoadU32(
@@ -611,16 +532,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
@@ -629,33 +550,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
- LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ LoadTaggedField(context, context, Context::kPreviousOffset);
}
- LoadTaggedPointerField(context, context, Context::kExtensionOffset);
- LoadTaggedPointerField(context, context,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(context, context, Context::kExtensionOffset);
+ LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -687,8 +604,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-arm.cc
ScratchRegisterScope scope(this);
- JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
- &fallthrough);
+ JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
int entry_size_log2 = 3;
__ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
@@ -745,8 +661,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
- JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size,
- actual_params_size, &corrected_args_count);
+ JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size,
+ &corrected_args_count);
__ masm()->mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
@@ -754,8 +670,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index f05d829cb5..9f475ad4a0 100644
--- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -46,32 +46,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
-// TODO(v8:11461): Unify condition names in the MacroAssembler.
-enum class Condition : uint32_t {
- kEqual = equal,
- kNotEqual = not_equal,
-
- kLessThan = less,
- kGreaterThan = greater,
- kLessThanEqual = less_equal,
- kGreaterThanEqual = greater_equal,
-
- kUnsignedLessThan = below,
- kUnsignedGreaterThan = above,
- kUnsignedLessThanEqual = below_equal,
- kUnsignedGreaterThanEqual = above_equal,
-
- kOverflow = overflow,
- kNoOverflow = no_overflow,
-
- kZero = zero,
- kNotZero = not_zero,
-};
-
-inline internal::Condition AsMasmCondition(Condition cond) {
- return static_cast<internal::Condition>(cond);
-}
-
namespace detail {
#define __ masm_->
@@ -130,21 +104,48 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
} else {
__ testl(value, Immediate(mask));
}
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance distance) {
__ cmpq(lhs, rhs);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
+}
+
+#if V8_STATIC_ROOTS_BOOL
+void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
+ Label* target,
+ Label::Distance distance) {
+ __ AssertNotSmi(heap_object);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
+}
+#endif // V8_STATIC_ROOTS_BOOL
+
+void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
+ InstanceType instance_type,
+ Label* target,
+ Label::Distance distance) {
+ __ AssertNotSmi(object);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ if (cc == Condition::kEqual || cc == Condition::kNotEqual) {
+ __ IsObjectType(object, instance_type, scratch);
+ } else {
+ __ CmpObjectType(object, instance_type, scratch);
+ }
+ __ j(cc, target, distance);
}
+
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
Register map, Label* target,
Label::Distance distance) {
__ AssertNotSmi(object);
__ CmpObjectType(object, instance_type, map);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@@ -156,30 +157,30 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(equal, AbortReason::kUnexpectedValue);
}
__ CmpInstanceType(map, instance_type);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance distance) {
__ cmpq(value, operand);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Smi smi,
Label* target, Label::Distance distance) {
__ SmiCompare(lhs, smi);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance distance) {
__ SmiCompare(lhs, rhs);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmpq(left, Immediate(right));
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
// cmp_tagged
@@ -187,18 +188,18 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance distance) {
__ cmp_tagged(value, operand);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance distance) {
__ cmp_tagged(operand, value);
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance distance) {
__ cmpb(value, Immediate(byte));
- __ j(AsMasmCondition(cc), target, distance);
+ __ j(cc, target, distance);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
@@ -313,9 +314,9 @@ void BaselineAssembler::Pop(T... registers) {
(__ Pop(registers), ...);
}
-void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
- int offset) {
- __ LoadTaggedPointerField(output, FieldOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
@@ -326,10 +327,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
int offset) {
__ SmiUntagField(output, FieldOperand(source, offset));
}
-void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
- int offset) {
- __ LoadAnyTaggedField(output, FieldOperand(source, offset));
-}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzxwq(output, FieldOperand(source, offset));
@@ -357,45 +354,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
__ StoreTaggedField(FieldOperand(target, offset), value);
}
-void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
- Register source, int offset) {
- __ LoadTaggedPointerField(output, FieldOperand(source, offset));
-}
-
-void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
- TaggedRegister source,
- int offset) {
- __ LoadTaggedPointerField(output, FieldOperand(source, offset));
-}
-
-void BaselineAssembler::LoadTaggedPointerField(Register output,
- TaggedRegister source,
- int offset) {
- __ LoadTaggedPointerField(output, FieldOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source,
+ int offset) {
+ __ LoadTaggedField(output, FieldOperand(source, offset));
}
-void BaselineAssembler::LoadTaggedAnyField(Register output,
- TaggedRegister source, int offset) {
- __ LoadAnyTaggedField(output, FieldOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(TaggedRegister output,
+ TaggedRegister source, int offset) {
+ __ LoadTaggedField(output, FieldOperand(source, offset));
}
-void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
- TaggedRegister source, int offset) {
- __ LoadAnyTaggedField(output, FieldOperand(source, offset));
+void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source,
+ int offset) {
+ __ LoadTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadFixedArrayElement(Register output,
TaggedRegister array,
int32_t index) {
- LoadTaggedAnyField(output, array,
- FixedArray::kHeaderSize + index * kTaggedSize);
+ LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
TaggedRegister array,
int32_t index) {
- LoadTaggedAnyField(output, array,
- FixedArray::kHeaderSize + index * kTaggedSize);
+ LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
@@ -403,24 +386,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
FeedbackSlot slot,
Label* on_result,
Label::Distance distance) {
- Label fallthrough;
- LoadTaggedPointerField(scratch_and_result, feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt()));
- __ LoadWeakValue(scratch_and_result, &fallthrough);
-
- // Is it marked_for_deoptimization? If yes, clear the slot.
- {
- DCHECK(!AreAliased(scratch_and_result, kScratchRegister));
- __ TestCodeTIsMarkedForDeoptimization(scratch_and_result, kScratchRegister);
- __ j(equal, on_result, distance);
- __ StoreTaggedField(
- FieldOperand(feedback_vector,
- FeedbackVector::OffsetOfElementAt(slot.ToInt())),
- __ ClearedValue());
- }
-
- __ bind(&fallthrough);
- __ Move(scratch_and_result, 0);
+ __ MacroAssembler::TryLoadOptimizedOsrCode(
+ scratch_and_result, feedback_vector, slot, on_result, distance);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
@@ -431,8 +398,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
LoadFunction(feedback_cell);
// Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
- LoadTaggedPointerField(tagged, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
if (skip_interrupt_label) {
@@ -449,8 +415,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
LoadFunction(feedback_cell);
// Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
- LoadTaggedPointerField(tagged, feedback_cell,
- JSFunction::kFeedbackCellOffset);
+ LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
}
@@ -462,17 +427,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
// addressing mode, any intermediate context pointer is loaded in compressed
// form.
if (depth == 0) {
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(index));
} else {
TaggedRegister tagged(context);
- LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
+ LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
- LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
+ LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
- Context::OffsetOfElementAt(index));
+ LoadTaggedField(kInterpreterAccumulatorRegister, tagged,
+ Context::OffsetOfElementAt(index));
}
}
@@ -484,10 +449,10 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
// form.
if (depth > 0) {
TaggedRegister tagged(context);
- LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
+ LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
- LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
+ LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
if (COMPRESS_POINTERS_BOOL) {
// Decompress tagged pointer.
@@ -505,29 +470,26 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
- LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
+ LoadTaggedField(tagged, context, Context::kExtensionOffset);
} else {
- LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
+ LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
- LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
+ LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
- LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
+ LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
}
if (cell_index > 0) {
- LoadTaggedPointerField(tagged, tagged,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
- LoadTaggedPointerField(tagged, tagged,
- SourceTextModule::kRegularImportsOffset);
+ LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(tagged, tagged, cell_index);
- LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
- Cell::kValueOffset);
+ LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
@@ -537,17 +499,16 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
- LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
+ LoadTaggedField(tagged, context, Context::kExtensionOffset);
} else {
- LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
+ LoadTaggedField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
- LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
+ LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
}
- LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
+ LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
}
- LoadTaggedPointerField(tagged, tagged,
- SourceTextModule::kRegularExportsOffset);
+ LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
@@ -629,8 +590,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, scratch,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc
index cfe7eefd70..4a89a45252 100644
--- a/deps/v8/src/bigint/tostring.cc
+++ b/deps/v8/src/bigint/tostring.cc
@@ -531,11 +531,11 @@ char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk,
// Step 5: Recurse.
char* end_of_right_part = ProcessLevel(level->next_, right, out, false);
+ if (processor_->should_terminate()) return out;
// The recursive calls are required and hence designed to write exactly as
// many characters as their level is responsible for.
DCHECK(end_of_right_part == out - level->char_count_);
USE(end_of_right_part);
- if (processor_->should_terminate()) return out;
// We intentionally don't use {end_of_right_part} here to be prepared for
// potential future multi-threaded execution.
return ProcessLevel(level->next_, left, out - level->char_count_,
@@ -575,6 +575,7 @@ void ProcessorImpl::ToStringImpl(char* out, int* out_length, Digits X,
}
int excess = formatter.Finish();
*out_length -= excess;
+ memset(out + *out_length, 0, excess);
}
Status Processor::ToString(char* out, int* out_length, Digits X, int radix,
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 6c843de4f2..9ac4d52665 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -17,6 +17,7 @@
#include "src/objects/contexts.h"
#include "src/objects/field-index-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-shared-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/prototype.h"
@@ -230,27 +231,6 @@ Handle<AccessorInfo> Accessors::MakeArrayLengthInfo(Isolate* isolate) {
}
//
-// Accessors::SharedArrayLength
-//
-
-void Accessors::SharedArrayLengthGetter(
- v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowGarbageCollection no_gc;
- HandleScope scope(isolate);
-
- Object value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
-
- Object result = Smi::FromInt(JSObject::cast(value).elements().length());
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
-}
-
-Handle<AccessorInfo> Accessors::MakeSharedArrayLengthInfo(Isolate* isolate) {
- return MakeAccessor(isolate, isolate->factory()->length_string(),
- &SharedArrayLengthGetter, nullptr);
-}
-
-//
// Accessors::ModuleNamespaceEntry
//
@@ -422,8 +402,8 @@ Handle<AccessorInfo> Accessors::MakeFunctionNameInfo(Isolate* isolate) {
namespace {
-Handle<JSObject> ArgumentsForInlinedFunction(JavaScriptFrame* frame,
- int inlined_frame_index) {
+Handle<JSObject> ArgumentsFromDeoptInfo(JavaScriptFrame* frame,
+ int inlined_frame_index) {
Isolate* isolate = frame->isolate();
Factory* factory = isolate->factory();
@@ -478,7 +458,7 @@ int FindFunctionInFrame(JavaScriptFrame* frame, Handle<JSFunction> function) {
}
Handle<JSObject> GetFrameArguments(Isolate* isolate,
- JavaScriptFrameIterator* it,
+ JavaScriptStackFrameIterator* it,
int function_index) {
JavaScriptFrame* frame = it->frame();
@@ -487,7 +467,7 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
// correct number of arguments and no allocated arguments object, so
// we can construct a fresh one by interpreting the function's
// deoptimization input data.
- return ArgumentsForInlinedFunction(frame, function_index);
+ return ArgumentsFromDeoptInfo(frame, function_index);
}
// Construct an arguments object mirror for the right frame and the underlying
@@ -512,6 +492,20 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
}
arguments->set_elements(*array);
+ // For optimized functions, the frame arguments may be outdated, so we should
+ // update them with the deopt info, while keeping the length and extra
+ // arguments from the actual frame.
+ if (CodeKindCanDeoptimize(frame->LookupCode().kind()) && length > 0) {
+ Handle<JSObject> arguments_from_deopt_info =
+ ArgumentsFromDeoptInfo(frame, function_index);
+ Handle<FixedArray> elements_from_deopt_info(
+ FixedArray::cast(arguments_from_deopt_info->elements()), isolate);
+ int common_length = std::min(length, elements_from_deopt_info->length());
+ for (int i = 0; i < common_length; i++) {
+ array->set(i, elements_from_deopt_info->get(i));
+ }
+ }
+
// Return the freshly allocated arguments object.
return arguments;
}
@@ -523,8 +517,8 @@ Handle<JSObject> Accessors::FunctionGetArguments(JavaScriptFrame* frame,
Isolate* isolate = frame->isolate();
Address requested_frame_fp = frame->fp();
// Forward a frame iterator to the requested frame. This is needed because we
- // potentially need for advance it to the arguments adaptor frame later.
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ // potentially need for advance it to the inlined arguments frame later.
+ for (JavaScriptStackFrameIterator it(isolate); !it.done(); it.Advance()) {
if (it.frame()->fp() != requested_frame_fp) continue;
return GetFrameArguments(isolate, &it, inlined_jsframe_index);
}
@@ -541,7 +535,7 @@ void Accessors::FunctionArgumentsGetter(
Handle<Object> result = isolate->factory()->null_value();
if (!function->shared().native()) {
// Find the top invocation of the function by traversing frames.
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ for (JavaScriptStackFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
int function_index = FindFunctionInFrame(frame, function);
if (function_index >= 0) {
@@ -661,7 +655,7 @@ class FrameFunctionIterator {
}
Isolate* isolate_;
Handle<JSFunction> function_;
- JavaScriptFrameIterator frame_iterator_;
+ JavaScriptStackFrameIterator frame_iterator_;
std::vector<FrameSummary> frames_;
int inlined_frame_index_;
};
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index 8a8ea66b1f..f2edcd8978 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -44,8 +44,6 @@ class JavaScriptFrame;
kHasSideEffectToReceiver) \
V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
- V(_, shared_array_length, SharedArrayLength, kHasNoSideEffect, \
- kHasSideEffectToReceiver) \
V(_, string_length, StringLength, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, value_unavailable, ValueUnavailable, kHasNoSideEffect, \
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index ee9b34a634..b7996476c7 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -130,8 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -278,8 +278,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(r1, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(r1, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -327,7 +327,10 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ __ LoadMap(scratch1, sfi_data);
+
+#ifndef V8_JITLESS
+ __ CompareInstanceType(scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
@@ -338,6 +341,10 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ b(eq, is_baseline);
}
__ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
+#else
+ __ CompareInstanceType(scratch1, scratch1, INTERPRETER_DATA_TYPE);
+#endif // !V8_JITLESS
+
__ b(ne, &done);
__ ldr(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -494,12 +501,13 @@ constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize -
kPointerSize /* FP */ +
kNumDoubleCalleeSaved * kDoubleSize +
5 * kPointerSize /* r5, r6, r7, fp, lr */ +
- EntryFrameConstants::kCallerFPOffset;
+ EntryFrameConstants::kNextExitFrameFPOffset;
// Assert that the EntryFrameConstants are in sync with the builtin.
-static_assert(kPushedStackSpace == EntryFrameConstants::kDirectCallerSPOffset +
- 3 * kPointerSize /* r5, r6, r7*/ +
- EntryFrameConstants::kCallerFPOffset,
+static_assert(kPushedStackSpace ==
+ EntryFrameConstants::kDirectCallerSPOffset +
+ 3 * kPointerSize /* r5, r6, r7*/ +
+ EntryFrameConstants::kNextExitFrameFPOffset,
"Pushed stack space and frame constants do not match. See "
"frame-constants-arm.h");
@@ -529,7 +537,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
const RegList kCalleeSavedWithoutFp = kCalleeSaved - fp;
// Update |pushed_stack_space| when we manipulate the stack.
- int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
+ int pushed_stack_space = EntryFrameConstants::kNextExitFrameFPOffset;
{
NoRootArrayScope no_root_array(masm);
@@ -565,15 +573,15 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ mov(r5, Operand::Zero());
__ str(r5, MemOperand(r4));
Register scratch = r6;
// Set up frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+ __ add(fp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
@@ -658,7 +666,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Reset the stack to the callee saved registers.
__ add(sp, sp,
- Operand(-EntryFrameConstants::kCallerFPOffset -
+ Operand(-EntryFrameConstants::kNextExitFrameFPOffset -
kSystemPointerSize /* already popped one */));
__ ldm(ia_w, sp, {fp, lr});
@@ -825,8 +833,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1110,7 +1118,6 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = r1;
- Register feedback_vector = r2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
@@ -1129,7 +1136,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
BYTECODE_ARRAY_TYPE);
__ b(ne, &compile_lazy);
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
+ Register feedback_vector = r2;
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1167,6 +1176,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1300,6 +1315,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
@@ -1332,6 +1348,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1351,7 +1368,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1499,7 +1516,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(r2, r2);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -1713,8 +1730,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ cmp(maybe_target_code, Operand(Smi::zero()));
__ b(ne, &jump_to_optimized_code);
}
@@ -1762,9 +1779,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ ldr(r1,
FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
+ __ LoadCodeEntry(r0, r0);
+
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -1813,8 +1831,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1890,8 +1908,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1933,8 +1951,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
__ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r0, r4, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1996,7 +2014,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2726,8 +2745,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r0: number of arguments including receiver
// r1: pointer to builtin function
@@ -2752,8 +2770,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, 0,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mov(r4, Operand(r0));
@@ -2814,7 +2831,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// Callee-saved register r4 still holds argc.
: r4;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
+ __ LeaveExitFrame(argc, false);
__ mov(pc, lr);
// Handling of exception.
@@ -3049,7 +3066,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK_EQ(stack_space, 0);
__ ldr(r4, *stack_space_operand);
}
- __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
+ __ LeaveExitFrame(r4, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r4, RootIndex::kTheHoleValue);
@@ -3083,6 +3100,17 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ jmp(&leave_exit_frame);
}
+MemOperand ExitFrameStackSlotOperand(int offset) {
+ static constexpr int kFrameOffset = 1 * kPointerSize;
+ return MemOperand(sp, kFrameOffset + offset);
+}
+
+MemOperand ExitFrameCallerStackSlotOperand(int index) {
+ return MemOperand(
+ fp, (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
} // namespace
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
@@ -3106,12 +3134,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
+ using FCI = FunctionCallbackInfo<v8::Value>;
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3119,12 +3148,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kPointerSize]: kHolder
+ // sp[0 * kPointerSize]: kHolder <= FCI::implicit_args_
// sp[1 * kPointerSize]: kIsolate
// sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kPointerSize]: undefined (kReturnValue)
// sp[4 * kPointerSize]: kData
// sp[5 * kPointerSize]: undefined (kNewTarget)
+ // Existing state:
+ // sp[6 * kPointerSize]: <= FCI:::values_
// Reserve space on the stack.
__ AllocateStackSpace(FCA::kArgsLength * kPointerSize);
@@ -3153,43 +3184,46 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- static constexpr int kApiStackSpace = 4;
- static constexpr bool kDontSaveDoubles = false;
+ static constexpr int kSlotsToDropSize = 1 * kPointerSize;
+ static constexpr int kApiStackSpace =
+ (FCI::kSize + kSlotsToDropSize) / kPointerSize;
+ static_assert(kApiStackSpace == 4);
+ static_assert(FCI::kImplicitArgsOffset == 0);
+ static_assert(FCI::kValuesOffset == 1 * kPointerSize);
+ static_assert(FCI::kLengthOffset == 2 * kPointerSize);
+
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ str(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ str(scratch, ExitFrameStackSlotOperand(FCI::kImplicitArgsOffset));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
- __ str(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ add(scratch, scratch,
+ Operand(FCA::kArgsLengthWithReceiver * kPointerSize));
+ __ str(scratch, ExitFrameStackSlotOperand(FCI::kValuesOffset));
// FunctionCallbackInfo::length_.
- __ str(argc, MemOperand(sp, 3 * kPointerSize));
+ __ str(argc, ExitFrameStackSlotOperand(FCI::kLengthOffset));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
+ MemOperand stack_space_operand =
+ ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropSize);
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
__ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
- __ str(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ str(scratch, stack_space_operand);
// v8::InvocationCallback's argument.
__ add(r0, sp, Operand(1 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // There are two stack slots above the arguments we constructed on the stack.
- // TODO(jgruber): Document what these arguments are.
- static constexpr int kStackSlotsAboveFCA = 2;
- MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
-
+ MemOperand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(sp, 4 * kPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3200,14 +3234,15 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -3229,15 +3264,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ constexpr int kNameHandleStackSize = 1;
+ constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mov(r0, sp); // r0 = Handle<Name>
__ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
- const int kApiStackSpace = 1;
+ constexpr int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
@@ -3246,13 +3282,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
-
__ ldr(api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
@@ -3261,8 +3294,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
- // purpose Code object) to be able to call into C functions that may trigger
- // GC and thus move the caller.
+ // purpose InstructionStream object) to be able to call into C functions that
+ // may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@@ -3556,7 +3589,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = r4;
__ ldr(code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3567,7 +3600,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -3580,7 +3613,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@@ -3654,16 +3687,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ PrepareCallCFunction(3, 0);
__ CallCFunction(get_baseline_pc, 3, 0);
}
+ __ LoadCodeEntry(code_obj, code_obj);
__ add(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
UseScratchRegisterScope temps(masm);
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire());
- Generate_OSREntry(masm, code_obj,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ Generate_OSREntry(masm, code_obj);
} else {
- __ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index fa539031d3..9d48902a31 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -163,7 +163,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
__ Ret();
__ Bind(&stack_overflow);
@@ -213,7 +213,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
@@ -348,7 +348,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
__ Ret();
// Otherwise we do a smi check and fall through to check if the return value
@@ -358,11 +358,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// If the result is a smi, it is *not* an object in the ECMA sense.
__ JumpIfSmi(x0, &use_receiver);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_and_return,
- ge);
+ // Check if the type of the result is not an object in the ECMA sense.
+ __ JumpIfJSAnyIsNotPrimitive(x0, x4, &leave_and_return);
__ B(&use_receiver);
__ Bind(&do_throw);
@@ -388,19 +385,19 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
-static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
- Register code, Register scratch) {
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
- __ Ldr(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
- __ DecodeField<CodeT::KindField>(scratch);
+ __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
__ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
-static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
- Register scratch) {
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
DCHECK(!AreAliased(code, scratch));
- return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
@@ -411,19 +408,26 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ __ LoadMap(scratch1, sfi_data);
+
+#ifndef V8_JITLESS
+ __ CompareInstanceType(scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ B(ne, &not_baseline);
- AssertCodeTIsBaseline(masm, sfi_data, scratch1);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ B(eq, is_baseline);
__ Bind(&not_baseline);
} else {
__ B(eq, is_baseline);
}
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
+#else
+ __ CompareInstanceType(scratch1, scratch1, INTERPRETER_DATA_TYPE);
+#endif // !V8_JITLESS
+
__ B(ne, &done);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ Bind(&done);
@@ -446,10 +450,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(x1);
// Load suspended function and context.
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
- __ LoadTaggedPointerField(cp,
- FieldMemOperand(x4, JSFunction::kContextOffset));
+ __ LoadTaggedField(x4,
+ FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -477,7 +480,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -493,8 +496,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
// Poke receiver into highest claimed slot.
- __ LoadTaggedPointerField(
- x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedField(x5,
+ FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ Poke(x5, __ ReceiverOperand(x10));
// ----------- S t a t e -------------
@@ -507,7 +510,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x5,
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -518,7 +521,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Bind(&loop);
__ Sub(x10, x10, 1);
- __ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
+ __ LoadTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
__ Cbnz(x10, &loop);
__ Bind(&done);
@@ -527,19 +530,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
- __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+ __ IsObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
__ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
{
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -549,8 +552,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x3, x1);
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ JumpCodeTObject(x2);
+ __ LoadTaggedField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ JumpCodeObject(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@@ -561,8 +564,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg, x4, x5);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(padreg, x1);
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(x4,
+ FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -572,8 +575,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(padreg, x1);
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(x4,
+ FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -639,7 +642,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// C calling convention. The first argument is passed in x0.
__ Mov(kRootRegister, x0);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
// Initialize the pointer cage base register.
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
@@ -665,8 +668,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've loaded its value to be pushed on the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ Str(xzr, MemOperand(x11));
// Set js_entry_sp if this is the outermost JS call.
@@ -758,7 +761,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
//
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
- Handle<CodeT> trampoline_code =
+ Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@@ -923,7 +926,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Mov(x23, x19);
__ Mov(x24, x19);
__ Mov(x25, x19);
-#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifndef V8_COMPRESS_POINTERS
__ Mov(x28, x19);
#endif
// Don't initialize the reserved registers.
@@ -932,9 +935,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// x28 : pointer cage base register (kPtrComprCageBaseRegister).
// x29 : frame pointer (fp).
- Handle<CodeT> builtin = is_construct
- ? BUILTIN_CODE(masm->isolate(), Construct)
- : masm->isolate()->builtins()->Call();
+ Handle<Code> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS internal frame and remove the parameters (except function),
@@ -1108,11 +1111,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = temps.AcquireX();
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector, x4);
// Check the tiering state.
@@ -1205,7 +1207,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
+ __ Pop<MacroAssembler::kAuthLR>(fp, lr);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ Trap();
}
@@ -1266,13 +1268,12 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = x1;
- Register feedback_vector = x2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
@@ -1283,22 +1284,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, x4, x4,
- BYTECODE_ARRAY_TYPE);
+ __ IsObjectType(kInterpreterBytecodeArrayRegister, x4, x4,
+ BYTECODE_ARRAY_TYPE);
__ B(ne, &compile_lazy);
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ Register feedback_vector = x2;
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadTaggedPointerField(
- x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedField(x7,
+ FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
@@ -1329,8 +1331,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ Bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ Push<TurboAssembler::kSignLR>(lr, fp);
+ __ Push<MacroAssembler::kSignLR>(lr, fp);
__ mov(fp, sp);
__ Push(cp, closure);
@@ -1342,7 +1350,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Push actual argument count, bytecode array, Smi tagged bytecode array
// offset and an undefined (to properly align the stack pointer).
- static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
+ static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
__ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1474,22 +1482,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
@@ -1503,11 +1512,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(x2, closure);
- __ JumpCodeTObject(x2);
+ __ JumpCodeObject(x2);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1582,7 +1592,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
}
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
- TurboAssembler::kDstLessThanSrcAndReverse);
+ MacroAssembler::kDstLessThanSrcAndReverse);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
@@ -1680,8 +1690,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ AssertNotSmi(
kInterpreterBytecodeArrayRegister,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
- BYTECODE_ARRAY_TYPE);
+ __ IsObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
+ BYTECODE_ARRAY_TYPE);
__ Assert(
eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
@@ -1732,18 +1742,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
- kInterpreterDispatchTableRegister,
- INTERPRETER_DATA_TYPE);
+ __ IsObjectType(x1, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister, INTERPRETER_DATA_TYPE);
__ B(ne, &builtin_trampoline);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
- __ LoadCodeTEntry(x1, x1);
+ __ LoadCodeEntry(x1, x1);
__ B(&trampoline_loaded);
__ Bind(&builtin_trampoline);
@@ -1882,7 +1891,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore fp, lr.
__ Mov(sp, fp);
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
+ __ Pop<MacroAssembler::kAuthLR>(fp, lr);
__ LoadEntryFromBuiltinIndex(builtin);
__ Jump(builtin);
@@ -1951,8 +1960,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
}
@@ -1993,13 +2002,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(x0, x0);
- }
-
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x1,
FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
@@ -2009,10 +2014,11 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ Add(x0, x0, x1);
- Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
+ __ LoadCodeEntry(x0, x0);
+
+ // Compute the target address = code_entry + osr_offset
+ // <entry_addr> = <code_entry> + <osr_offset>
+ Generate_OSREntry(masm, x0, x1);
}
} // namespace
@@ -2070,7 +2076,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
@@ -2159,7 +2165,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ SlotAddress(copy_from, count);
__ Add(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count,
- TurboAssembler::kSrcLessThanDst);
+ MacroAssembler::kSrcLessThanDst);
__ Drop(2);
}
@@ -2207,7 +2213,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
@@ -2265,7 +2271,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
@@ -2334,9 +2340,10 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
- Handle<CodeT> code) {
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : target
// -- x0 : number of parameters on the stack
@@ -2348,7 +2355,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
- __ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Cmp(x13, FIXED_ARRAY_TYPE);
__ B(eq, &ok);
@@ -2380,22 +2387,28 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
{
Label loop;
Register src = x10;
- Register the_hole_value = x11;
Register undefined_value = x12;
Register scratch = x13;
__ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
- __ LoadRoot(the_hole_value, RootIndex::kTheHoleValue);
+#if !V8_STATIC_ROOTS_BOOL
+ // We do not use the CompareRoot macro without static roots as it would do a
+ // LoadRoot behind the scenes and we want to avoid that in a loop.
+ Register the_hole_value = x11;
+ __ LoadTaggedRoot(the_hole_value, RootIndex::kTheHoleValue);
+#endif // !V8_STATIC_ROOTS_BOOL
__ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
- // We do not use the CompareRoot macro as it would do a LoadRoot behind the
- // scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
Register dst = x16;
__ SlotAddress(dst, argc);
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
- __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
+ __ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
+#if V8_STATIC_ROOTS_BOOL
+ __ CompareRoot(scratch, RootIndex::kTheHoleValue);
+#else
__ CmpTagged(scratch, the_hole_value);
+#endif
__ Csel(scratch, scratch, undefined_value, ne);
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
__ Cbnz(len, &loop);
@@ -2411,7 +2424,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
- Handle<CodeT> code) {
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
@@ -2426,7 +2439,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
- __ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
+ __ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
&new_target_constructor);
@@ -2486,14 +2499,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(x1);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(cp,
- FieldMemOperand(x1, JSFunction::kContextOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
@@ -2516,9 +2528,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label convert_to_object, convert_receiver;
__ Peek(x3, __ ReceiverOperand(x0));
__ JumpIfSmi(x3, &convert_to_object);
- static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
- __ B(hs, &done_convert);
+ __ JumpIfJSAnyIsNotPrimitive(x3, x4, &done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(x3, RootIndex::kUndefinedValue, &convert_global_proxy);
@@ -2545,7 +2555,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(cp, x1, x0, padreg);
__ SmiUntag(x0);
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Bind(&convert_receiver);
}
@@ -2579,7 +2589,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(bound_argc,
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
@@ -2662,7 +2672,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, total_argc);
__ Sub(copy_from, copy_to, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc,
- TurboAssembler::kSrcLessThanDst);
+ MacroAssembler::kSrcLessThanDst);
}
}
@@ -2681,8 +2691,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, 1);
__ Bind(&loop);
__ Sub(counter, counter, 1);
- __ LoadAnyTaggedField(scratch,
- MemOperand(bound_argv, kTaggedSize, PostIndex));
+ __ LoadTaggedField(scratch,
+ MemOperand(bound_argv, kTaggedSize, PostIndex));
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
__ Cbnz(counter, &loop);
}
@@ -2703,15 +2713,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(x1);
// Patch the receiver to [[BoundThis]].
- __ LoadAnyTaggedField(x10,
- FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
+ __ LoadTaggedField(x10,
+ FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ Poke(x10, __ ReceiverOperand(x0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@@ -2812,7 +2822,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAllClear(
@@ -2844,13 +2854,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ CmpTagged(x1, x3);
__ B(ne, &done);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2874,8 +2884,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(map,
- FieldMemOperand(target, HeapObject::kMapOffset));
+ __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
{
Register flags = x2;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@@ -2976,12 +2985,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = x10;
Label allocate_vector, done;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
vector, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset));
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
- __ LoadTaggedPointerField(vector,
- FieldMemOperand(vector, FixedArray::kHeaderSize));
+ __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ Push(vector, xzr);
@@ -2996,7 +3004,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
// Save registers.
__ PushXRegList(kSavedGpRegs);
__ PushQRegList(kSavedFpRegs);
- __ Push<TurboAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
+ __ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
// Arguments to the runtime function: instance, func_index, and an
// additional stack slot for the NativeModule. The first pushed register
@@ -3008,7 +3016,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ Mov(vector, kReturnRegister0);
// Restore registers and frame type.
- __ Pop<TurboAssembler::kAuthLR>(xzr, lr);
+ __ Pop<MacroAssembler::kAuthLR>(xzr, lr);
__ PopQRegList(kSavedFpRegs);
__ PopXRegList(kSavedGpRegs);
// Restore the instance from the frame.
@@ -3121,8 +3129,8 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
// We had to prepare the parameters for the Call: we have to put the context
// into kContextRegister.
- __ LoadAnyTaggedField(
- kContextRegister, // cp(x27)
+ __ LoadTaggedField(
+ kContextRegister, // cp(x27)
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
}
@@ -3210,7 +3218,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
__ Stp(wasm_instance, function_data,
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
kContextRegister,
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
@@ -3256,15 +3264,14 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
wasm::JumpBuffer::Retired);
}
Register parent = tmp2;
- __ LoadAnyTaggedField(
- parent,
- FieldMemOperand(active_continuation,
- WasmContinuationObject::kParentOffset));
+ __ LoadTaggedField(parent,
+ FieldMemOperand(active_continuation,
+ WasmContinuationObject::kParentOffset));
// Update active continuation root.
int32_t active_continuation_offset =
- TurboAssembler::RootRegisterOffsetForRootIndex(
- RootIndex::kActiveContinuation);
+ MacroAssembler::RootRegisterOffsetForRootIndex(
+ RootIndex::kActiveContinuation);
__ Str(parent, MemOperand(kRootRegister, active_continuation_offset));
jmpbuf = parent;
__ LoadExternalPointerField(
@@ -3293,7 +3300,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
__ StoreTaggedField(tmp2, state_loc);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
suspender,
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
@@ -3313,8 +3320,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
__ StoreTaggedField(tmp2, state_loc);
__ bind(&undefined);
int32_t active_suspender_offset =
- TurboAssembler::RootRegisterOffsetForRootIndex(
- RootIndex::kActiveSuspender);
+ MacroAssembler::RootRegisterOffsetForRootIndex(
+ RootIndex::kActiveSuspender);
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
}
@@ -3322,17 +3329,16 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
Register function_data,
Register wasm_instance) {
Register closure = function_data;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_data,
MemOperand(
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_data,
- FieldMemOperand(function_data,
- SharedFunctionInfo::kFunctionDataOffset));
+ FieldMemOperand(function_data, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
wasm_instance,
FieldMemOperand(function_data,
WasmExportedFunctionData::kInstanceOffset));
@@ -3573,7 +3579,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// A result of AllocateSuspender is in the return register.
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
DEFINE_SCOPED(target_continuation);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
target_continuation,
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
FREE_REG(suspender);
@@ -3901,8 +3907,10 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
original_fp);
// Truncate float64 to float32.
__ Fcvt(s1, kFPReturnRegister0);
- __ Str(s1, MemOperand(current_float_param_slot, -kSystemPointerSize,
- PostIndex));
+ // Store the full 64 bits to silence a spurious msan error (see
+ // crbug.com/1414270).
+ __ Str(d1,
+ MemOperand(current_float_param_slot, -kSystemPointerSize, PostIndex));
__ jmp(&param_conversion_done);
__ bind(&param_kWasmF64);
@@ -4229,7 +4237,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
__ Mov(scratch, 1);
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_entry,
FieldMemOperand(function_data,
WasmExportedFunctionData::kInternalOffset));
@@ -4317,7 +4325,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass
// the number of slots to remove in a Register as an argument.
- __ DropArguments(param_count, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(param_count, MacroAssembler::kCountExcludesReceiver);
__ Ret(lr);
// -------------------------------------------
@@ -4497,7 +4505,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
regs.ResetExcept(promise, suspender, continuation);
DEFINE_REG(suspender_continuation);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
suspender_continuation,
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
if (v8_flags.debug_code) {
@@ -4518,18 +4526,19 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// Update roots.
// -------------------------------------------
DEFINE_REG(caller);
- __ LoadAnyTaggedField(caller,
- FieldMemOperand(suspender_continuation,
- WasmContinuationObject::kParentOffset));
+ __ LoadTaggedField(caller,
+ FieldMemOperand(suspender_continuation,
+ WasmContinuationObject::kParentOffset));
int32_t active_continuation_offset =
- TurboAssembler::RootRegisterOffsetForRootIndex(
- RootIndex::kActiveContinuation);
+ MacroAssembler::RootRegisterOffsetForRootIndex(
+ RootIndex::kActiveContinuation);
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
DEFINE_REG(parent);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
int32_t active_suspender_offset =
- TurboAssembler::RootRegisterOffsetForRootIndex(RootIndex::kActiveSuspender);
+ MacroAssembler::RootRegisterOffsetForRootIndex(
+ RootIndex::kActiveSuspender);
__ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
regs.ResetExcept(promise, caller);
@@ -4596,7 +4605,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
// Load suspender from closure.
// -------------------------------------------
DEFINE_REG(sfi);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
sfi,
MemOperand(
closure,
@@ -4606,12 +4615,12 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
// RecordWriteField calls later.
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
DEFINE_REG(function_data);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_data,
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
// The write barrier uses a fixed register for the host object (rdi). The next
// barrier is on the suspender, so load it in rdi directly.
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
suspender,
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
// Check the suspender state.
@@ -4660,8 +4669,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
scratch,
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset));
int32_t active_suspender_offset =
- TurboAssembler::RootRegisterOffsetForRootIndex(
- RootIndex::kActiveSuspender);
+ MacroAssembler::RootRegisterOffsetForRootIndex(
+ RootIndex::kActiveSuspender);
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
// Next line we are going to load a field from suspender, but we have to use
@@ -4670,10 +4679,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
FREE_REG(suspender);
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
suspender = target_continuation;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
target_continuation,
- FieldMemOperand(suspender,
- WasmSuspenderObject::kContinuationOffset));
+ FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
suspender = no_reg;
__ StoreTaggedField(
@@ -4685,8 +4693,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
FREE_REG(active_continuation);
int32_t active_continuation_offset =
- TurboAssembler::RootRegisterOffsetForRootIndex(
- RootIndex::kActiveContinuation);
+ MacroAssembler::RootRegisterOffsetForRootIndex(
+ RootIndex::kActiveContinuation);
__ Str(target_continuation,
MemOperand(kRootRegister, active_continuation_offset));
@@ -4731,7 +4739,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
__ bind(&suspend);
__ LeaveFrame(StackFrame::STACK_SWITCH);
// Pop receiver + parameter.
- __ DropArguments(2, TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(2, MacroAssembler::kCountIncludesReceiver);
__ Ret(lr);
}
} // namespace
@@ -4751,8 +4759,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// The Abort mechanism relies on CallRuntime, which in turn relies on
// CEntry, so until this stub has been generated, we have to use a
// fall-back Abort mechanism.
@@ -4808,7 +4815,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space,
+ x10, extra_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Poke callee-saved registers into reserved space.
@@ -4889,7 +4896,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Peek(argc, 2 * kSystemPointerSize);
__ Peek(target, 3 * kSystemPointerSize);
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9);
+ __ LeaveExitFrame(x10, x9);
if (argv_mode == ArgvMode::kStack) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
@@ -5137,7 +5144,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Ldr(x19, *stack_space_operand);
}
- __ LeaveExitFrame(false, x1, x5);
+ __ LeaveExitFrame(x1, x5);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
@@ -5178,6 +5185,17 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ B(&leave_exit_frame);
}
+MemOperand ExitFrameStackSlotOperand(int offset) {
+ static constexpr int kFrameOffset = 1 * kSystemPointerSize;
+ return MemOperand(sp, kFrameOffset + offset);
+}
+
+MemOperand ExitFrameCallerStackSlotOperand(int index) {
+ return MemOperand(
+ fp, (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
} // namespace
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
@@ -5201,25 +5219,27 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
+ using FCI = FunctionCallbackInfo<v8::Value>;
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
- //
// Target state:
- // sp[0 * kSystemPointerSize]: kHolder
+ // sp[0 * kSystemPointerSize]: kHolder <= FCA::implicit_args_
// sp[1 * kSystemPointerSize]: kIsolate
// sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kSystemPointerSize]: undefined (kReturnValue)
// sp[4 * kSystemPointerSize]: kData
// sp[5 * kSystemPointerSize]: undefined (kNewTarget)
+ // Existing state:
+ // sp[6 * kSystemPointerSize]: <= FCA:::values_
// Reserve space on the stack.
__ Claim(FCA::kArgsLength, kSystemPointerSize);
@@ -5248,25 +5268,30 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
- static constexpr int kApiStackSpace = 4;
- static constexpr bool kDontSaveDoubles = false;
+ static constexpr int kSlotsToDropSize = 1 * kSystemPointerSize;
+ static constexpr int kApiStackSpace =
+ (FCI::kSize + kSlotsToDropSize) / kSystemPointerSize;
+ static_assert(kApiStackSpace == 4);
+ static_assert(FCI::kImplicitArgsOffset == 0);
+ static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize);
+ static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize);
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, x10,
- kApiStackSpace + kCallApiFunctionSpillSpace);
+ __ EnterExitFrame(x10, kApiStackSpace + kCallApiFunctionSpillSpace,
+ StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
- // Arguments are after the return address (pushed by EnterExitFrame()).
- __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
+ // Arguments are after the return address(pushed by EnterExitFrame()).
+ __ Str(scratch, ExitFrameStackSlotOperand(FCI::kImplicitArgsOffset));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ Add(scratch, scratch,
- Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
- __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
+ Operand(FCA::kArgsLengthWithReceiver * kSystemPointerSize));
+ __ Str(scratch, ExitFrameStackSlotOperand(FCI::kValuesOffset));
// FunctionCallbackInfo::length_.
- __ Str(argc, MemOperand(sp, 3 * kSystemPointerSize));
+ __ Str(argc, ExitFrameStackSlotOperand(FCI::kLengthOffset));
// We also store the number of slots to drop from the stack after returning
// from the API function here.
@@ -5274,8 +5299,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// drop, not the number of bytes. arm64 must always drop a slot count that is
// a multiple of two, and related helper functions (DropArguments) expect a
// register containing the slot count.
- __ Add(scratch, argc, Operand(FCA::kArgsLength + 1 /*receiver*/));
- __ Str(scratch, MemOperand(sp, 4 * kSystemPointerSize));
+ MemOperand stack_space_operand =
+ ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropSize);
+ __ Add(scratch, argc, Operand(FCA::kArgsLengthWithReceiver));
+ __ Str(scratch, stack_space_operand);
// v8::InvocationCallback's argument.
DCHECK(!AreAliased(x0, api_function_address));
@@ -5286,15 +5313,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// The current frame needs to be aligned.
DCHECK_EQ(FCA::kArgsLength % 2, 0);
- // There are two stack slots above the arguments we constructed on the stack.
- // TODO(jgruber): Document what these arguments are.
- static constexpr int kStackSlotsAboveFCA = 2;
- MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
-
+ MemOperand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kSpillOffset = 1 + kApiStackSpace;
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -5303,14 +5325,15 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
}
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -5322,12 +5345,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
name));
- __ LoadAnyTaggedField(data,
- FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadTaggedField(data,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, RootIndex::kUndefinedValue);
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
- __ LoadTaggedPointerField(
- name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedField(name,
+ FieldMemOperand(callback, AccessorInfo::kNameOffset));
// PropertyCallbackArguments:
// receiver, data, return value, return value default, isolate, holder,
@@ -5337,8 +5360,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ Push(receiver, data, undef, undef, isolate_address, holder, xzr, name);
// v8::PropertyCallbackInfo::args_ array and name handle.
- static const int kStackUnwindSpace =
- PropertyCallbackArguments::kArgsLength + 1;
+ static constexpr int kNameHandleStackSize = 1;
+ static const int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
static_assert(kStackUnwindSpace % 2 == 0,
"slots must be a multiple of 2 for stack pointer alignment");
@@ -5349,7 +5372,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+ __ EnterExitFrame(x10, kApiStackSpace + kCallApiFunctionSpillSpace,
+ StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
@@ -5357,37 +5381,35 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ SlotAddress(x1, 1);
// x1 = v8::PropertyCallbackInfo&
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
Register api_function_address = x2;
__ LoadExternalPointerField(
api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
kAccessorInfoGetterTag);
- const int spill_offset = 1 + kApiStackSpace;
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+ static constexpr int kSpillOffset = 1 + kApiStackSpace;
+ MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
+
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
- spill_offset, return_value_operand);
+ kSpillOffset, return_value_operand);
}
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
- // purpose Code object) to be able to call into C functions that may trigger
- // GC and thus move the caller.
+ // purpose InstructionStream object) to be able to call into C functions that
+ // may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
- __ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address.
+ __ Poke<MacroAssembler::kSignLR>(lr, 0); // Store the return address.
__ Blr(x10); // Call the C++ function.
- __ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code.
+ __ Peek<MacroAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ AssertFPCRState();
__ Ret();
}
@@ -5618,7 +5640,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ Lsr(frame_size, x3, kSystemPointerSizeLog2);
- __ Claim(frame_size);
+ __ Claim(frame_size, kXRegSize, /*assume_sp_aligned=*/false);
__ Add(x7, current_frame, FrameDescription::frame_content_offset());
__ SlotAddress(x6, 0);
@@ -5695,12 +5717,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = x22;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
@@ -5708,7 +5730,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
+ __ IsObjectType(code_obj, x3, x3, CODE_TYPE);
__ B(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -5721,29 +5743,25 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
+ __ IsObjectType(code_obj, x3, x3, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
- AssertCodeTIsBaseline(masm, code_obj, x3);
- }
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
+ AssertCodeIsBaseline(masm, code_obj, x3);
}
// Load the feedback vector.
Register feedback_vector = x2;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ CompareObjectType(feedback_vector, x3, x3, FEEDBACK_VECTOR_TYPE);
+ __ IsObjectType(feedback_vector, x3, x3, FEEDBACK_VECTOR_TYPE);
__ B(ne, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
@@ -5798,14 +5816,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallCFunction(get_baseline_pc, 3, 0);
}
+ __ LoadCodeEntry(code_obj, code_obj);
__ Add(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, padreg);
if (is_osr) {
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
- Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ Generate_OSREntry(masm, code_obj);
} else {
- __ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
diff --git a/deps/v8/src/builtins/array-from.tq b/deps/v8/src/builtins/array-from.tq
index d442f5026c..103892d740 100644
--- a/deps/v8/src/builtins/array-from.tq
+++ b/deps/v8/src/builtins/array-from.tq
@@ -140,14 +140,27 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
let a: JSReceiver;
// 9. If IsConstructor(C) is true, then
- typeswitch (c) {
- case (c: Constructor): {
- // a. Let A be ? Construct(C, « len »).
- a = Construct(c, len);
- }
- case (JSAny): {
- // a. Let A be ? ArrayCreate(len).
- a = ArrayCreate(len);
+ try {
+ // Allocate an array with PACKED elements kind for fast-path rather than
+ // calling the constructor which creates an array with HOLEY kind.
+ if (c != GetArrayFunction()) goto CreateWithConstructor;
+ if (len > kMaxFastArrayLength) goto CreateWithConstructor;
+ const smiLen: Smi = 0;
+ const capacity: intptr = Convert<intptr>(len);
+ const map: Map = GetFastPackedSmiElementsJSArrayMap();
+ a = AllocateJSArray(
+ ElementsKind::PACKED_SMI_ELEMENTS, map, capacity, smiLen,
+ AllocationFlag::kAllowLargeObjectAllocation);
+ } label CreateWithConstructor {
+ typeswitch (c) {
+ case (c: Constructor): {
+ // a. Let A be ? Construct(C, « len »).
+ a = Construct(c, len);
+ }
+ case (JSAny): {
+ // a. Let A be ? ArrayCreate(len).
+ a = ArrayCreate(len);
+ }
}
}
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index c88a0c2800..7fd9c07a1c 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -69,13 +69,23 @@ transitioning builtin ConvertToLocaleString(
try {
const callable: Callable = Cast<Callable>(prop) otherwise TypeError;
let result: JSAny;
- if (IsNullOrUndefined(locales)) {
- result = Call(context, callable, element);
- } else if (IsNullOrUndefined(options)) {
- result = Call(context, callable, element, locales);
- } else {
+
+ // According to the ECMA-402 specification, the optional arguments locales
+ // and options must be passed.
+ @if(V8_INTL_SUPPORT) {
result = Call(context, callable, element, locales, options);
}
+
+ // Without the ECMA-402 internationalization API, the optional arguments
+ // must not be passed.
+ // See: https://tc39.es/ecma262/#sec-array.prototype.tolocalestring
+ @ifnot(V8_INTL_SUPPORT) {
+ result = Call(context, callable, element);
+ // Use the remaining parameters.
+ const _locales = locales;
+ const _options = options;
+ }
+
return ToString_Inline(result);
} label TypeError {
ThrowTypeError(MessageTemplate::kCalledNonCallable, prop);
@@ -167,8 +177,15 @@ struct Buffer {
this.totalStringLength =
AddStringLength(this.totalStringLength, str.length_intptr);
- this.fixedArray =
- StoreAndGrowFixedArray(this.fixedArray, this.index++, str);
+ // String comparison is expensive, so we only check for pointer equality
+ // (i.e. only works for internalized strings).
+ if (TaggedEqual(str, this.lastString)) {
+ this.RepeatLast();
+ } else {
+ this.fixedArray =
+ StoreAndGrowFixedArray(this.fixedArray, this.index++, str);
+ this.lastString = str;
+ }
this.isOneByte =
IsOneByteStringInstanceType(str.instanceType) & this.isOneByte;
}
@@ -189,12 +206,33 @@ struct Buffer {
if (write) deferred {
this.fixedArray = StoreAndGrowFixedArray(
this.fixedArray, this.index++, Convert<Smi>(nofSeparatorsInt));
+ this.lastString = Null;
+ }
+ }
+
+ macro RepeatLast(): void {
+ dcheck(this.index > 0);
+ typeswitch (this.fixedArray.objects[this.index - 1]) {
+ case (String): {
+ this.fixedArray = StoreAndGrowFixedArray(
+ this.fixedArray, this.index++, SmiConstant(-1));
+ }
+ case (count: Smi): {
+ dcheck(count < 0);
+ dcheck(count - 1 < 0); // Check that there is no overflow.
+ this.fixedArray.objects[this.index - 1] = count - 1;
+ }
+ case (Object): {
+ unreachable;
}
+ }
}
// Fixed array holding elements that are either:
// 1) String result of `ToString(next)`.
- // 2) Smi representing the number of consecutive separators.
+ // 2) Smi representing either
+ // - the number of consecutive separators (positive smi).
+ // - the number to repeat the last stored string (negative smi).
// `BufferJoin()` will iterate and writes these entries to a flat string.
//
// To save space, reduce reads and writes, only separators at the beginning,
@@ -207,6 +245,10 @@ struct Buffer {
// Hole example
// receiver: [<hole>, 'hello', <hole>, 'world', <hole>]
// fixedArray: [1, 'hello', 2, 'world', 1]
+ //
+ // Repeat example
+ // receiver: ['hello', 'hello', hello', 'world']
+ // fixedArray: ['hello', -2, 'world']
fixedArray: FixedArray;
// Index to insert a new entry into `fixedArray`.
@@ -218,6 +260,8 @@ struct Buffer {
// `true` if the separator and all strings in the buffer are one-byte,
// otherwise `false`.
isOneByte: bool;
+
+ lastString: PrimitiveHeapObject;
}
macro NewBuffer(len: uintptr, sep: String): Buffer {
@@ -229,7 +273,8 @@ macro NewBuffer(len: uintptr, sep: String): Buffer {
fixedArray: AllocateZeroedFixedArray(cappedBufferSize),
index: 0,
totalStringLength: 0,
- isOneByte: IsOneByteStringInstanceType(sep.instanceType)
+ isOneByte: IsOneByteStringInstanceType(sep.instanceType),
+ lastString: Null
};
}
@@ -250,7 +295,8 @@ macro BufferJoin(implicit context: Context)(
// When the element is a smi, use StringRepeat to quickly build a memory
// efficient separator repeated string.
- case (nofSeparators: Number): {
+ case (nofSeparators: Smi): {
+ dcheck(nofSeparators > 0);
return StringRepeat(context, sep, nofSeparators);
}
case (Object): {
@@ -313,7 +359,9 @@ transitioning macro ArrayJoinImpl<T: type>(implicit context: Context)(
}
case (obj: JSAny): {
if (IsNullOrUndefined(obj)) continue;
- next = string::ToString(context, obj);
+ const str = string::ToString_Inline(context, obj);
+ if (str == kEmptyString) continue;
+ next = str;
}
}
}
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
index 44c2cdca27..b469ec5f5c 100644
--- a/deps/v8/src/builtins/array-of.tq
+++ b/deps/v8/src/builtins/array-of.tq
@@ -19,14 +19,21 @@ ArrayOf(
let a: JSReceiver;
// 4. If IsConstructor(C) is true, then
- typeswitch (c) {
- case (c: Constructor): {
- // a. Let A be ? Construct(C, « len »).
- a = Construct(c, len);
- }
- case (JSAny): {
- // a. Let A be ? ArrayCreate(len).
- a = ArrayCreate(len);
+ try {
+ // Allocate an array with PACKED elements kind for fast-path rather than
+ // calling the constructor which creates an array with HOLEY kind.
+ if (c != GetArrayFunction()) goto CreateWithConstructor;
+ a = NewJSArrayFilledWithZero(SmiUntag(len)) otherwise CreateWithConstructor;
+ } label CreateWithConstructor {
+ typeswitch (c) {
+ case (c: Constructor): {
+ // a. Let A be ? Construct(C, « len »).
+ a = Construct(c, len);
+ }
+ case (JSAny): {
+ // a. Let A be ? ArrayCreate(len).
+ a = ArrayCreate(len);
+ }
}
}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index 69a678a513..b5835f8be9 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -3,65 +3,50 @@
// found in the LICENSE file.
namespace array {
-macro LoadElement<ElementsAccessor : type extends ElementsKind, T: type>(
+macro LoadElement<Elements : type extends FixedArrayBase, T: type>(
elements: FixedArrayBase, index: Smi): T;
-LoadElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): Smi {
+LoadElement<FixedArray, Object>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): Object {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- return UnsafeCast<Smi>(elements.objects[index]);
+ return elements.objects[index];
}
-LoadElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): JSAny {
- const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- return UnsafeCast<JSAny>(elements.objects[index]);
-}
-
-LoadElement<array::FastPackedDoubleElements, float64>(
- implicit context: Context)(elements: FixedArrayBase, index: Smi): float64 {
+LoadElement<FixedDoubleArray, float64_or_hole>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): float64_or_hole {
const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- // This macro is only used for PACKED_DOUBLE, loading the hole should
- // be impossible.
- return elements.floats[index].Value() otherwise unreachable;
+ return elements.floats[index];
}
-macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
+macro StoreElement<Elements : type extends FixedArrayBase, T: type>(
implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: T): void;
-StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: Smi): void {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- StoreFixedArrayElement(elems, index, value);
-}
-
-StoreElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: JSAny): void {
+StoreElement<FixedArray, Object>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: Object): void {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
elements.objects[index] = value;
}
-StoreElement<array::FastPackedDoubleElements, float64>(
- implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: float64): void {
+StoreElement<FixedDoubleArray, float64_or_hole>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: float64_or_hole): void {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- StoreFixedDoubleArrayElement(elems, index, value);
+ elems.floats[index] = value;
}
// Fast-path for all PACKED_* elements kinds. These do not need to check
// whether a property is present, so we can simply swap them using fast
// FixedArray loads/stores.
-macro FastPackedArrayReverse<Accessor: type, T: type>(
+macro FastArrayReverse<Elements : type extends FixedArrayBase, T: type>(
implicit context: Context)(elements: FixedArrayBase, length: Smi): void {
let lower: Smi = 0;
let upper: Smi = length - 1;
while (lower < upper) {
- const lowerValue: T = LoadElement<Accessor, T>(elements, lower);
- const upperValue: T = LoadElement<Accessor, T>(elements, upper);
- StoreElement<Accessor>(elements, lower, upperValue);
- StoreElement<Accessor>(elements, upper, lowerValue);
+ const lowerValue: T = LoadElement<Elements, T>(elements, lower);
+ const upperValue: T = LoadElement<Elements, T>(elements, upper);
+ StoreElement<Elements>(elements, lower, upperValue);
+ StoreElement<Elements>(elements, upper, lowerValue);
++lower;
--upper;
}
@@ -144,19 +129,27 @@ macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny):
const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
const kind: ElementsKind = array.map.elements_kind;
- if (kind == ElementsKind::PACKED_SMI_ELEMENTS) {
- array::EnsureWriteableFastElements(array);
- FastPackedArrayReverse<array::FastPackedSmiElements, Smi>(
- array.elements, array.length);
- } else if (kind == ElementsKind::PACKED_ELEMENTS) {
+ if (kind == ElementsKind::PACKED_SMI_ELEMENTS ||
+ kind == ElementsKind::PACKED_ELEMENTS) {
array::EnsureWriteableFastElements(array);
- FastPackedArrayReverse<array::FastPackedObjectElements, JSAny>(
- array.elements, array.length);
+ FastArrayReverse<FixedArray, Object>(array.elements, array.length);
} else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
- FastPackedArrayReverse<array::FastPackedDoubleElements, float64>(
+ FastArrayReverse<FixedDoubleArray, float64_or_hole>(
array.elements, array.length);
} else {
- goto Slow;
+ if (!IsPrototypeInitialArrayPrototype(array.map)) goto Slow;
+ if (IsNoElementsProtectorCellInvalid()) goto Slow;
+
+ if (kind == ElementsKind::HOLEY_SMI_ELEMENTS ||
+ kind == ElementsKind::HOLEY_ELEMENTS) {
+ array::EnsureWriteableFastElements(array);
+ FastArrayReverse<FixedArray, Object>(array.elements, array.length);
+ } else if (kind == ElementsKind::HOLEY_DOUBLE_ELEMENTS) {
+ FastArrayReverse<FixedDoubleArray, float64_or_hole>(
+ array.elements, array.length);
+ } else {
+ goto Slow;
+ }
}
}
diff --git a/deps/v8/src/builtins/array-to-reversed.tq b/deps/v8/src/builtins/array-to-reversed.tq
index 5d97d6546e..cb098e1a45 100644
--- a/deps/v8/src/builtins/array-to-reversed.tq
+++ b/deps/v8/src/builtins/array-to-reversed.tq
@@ -3,14 +3,57 @@
// found in the LICENSE file.
namespace array {
-macro FastPackedArrayToReversed<Accessor: type, T: type>(
+
+macro FastPackedDoubleArrayToReversed(implicit context: Context)(
+ elements: FixedDoubleArray, length: Smi): JSArray {
+ // 3. Let A be ? ArrayCreate(𝔽(len)).
+ const copy: FixedDoubleArray =
+ UnsafeCast<FixedDoubleArray>(AllocateFixedArray(
+ ElementsKind::PACKED_DOUBLE_ELEMENTS, SmiUntag(length),
+ AllocationFlag::kAllowLargeObjectAllocation));
+
+ // 4. Let k be 0.
+ let k: Smi = 0;
+
+ // 5. Repeat, while k < len,
+ while (k < length) {
+ // a. Let from be ! ToString(𝔽(len - k - 1)).
+ // b. Let Pk be ! ToString(𝔽(k)).
+ const from = length - k - 1;
+
+ // c. Let fromValue be ? Get(O, from).
+ const fromValue: float64 =
+ elements.floats[from].Value() otherwise unreachable;
+
+ // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue).
+ StoreFixedDoubleArrayElement(copy, k, fromValue);
+
+ // e. Set k to k + 1.
+ ++k;
+ }
+
+ // 6. Return A.
+ const map: Map = LoadJSArrayElementsMap(
+ ElementsKind::PACKED_DOUBLE_ELEMENTS, LoadNativeContext(context));
+ return NewJSArray(map, copy);
+}
+
+macro FastArrayToReversed<FromElements : type extends FixedArrayBase>(
implicit context: Context)(
- kind: constexpr ElementsKind, elements: FixedArrayBase,
- length: Smi): JSArray {
+ kind: constexpr ElementsKind, elements: FromElements, length: Smi,
+ initializeArray: constexpr bool): JSArray {
// 3. Let A be ? ArrayCreate(𝔽(len)).
const copy: FixedArrayBase = AllocateFixedArray(
kind, SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation);
+ // Reversing HOLEY_DOUBLE_ELEMENTS array may allocate heap numbers.
+ // We need to initialize the array to avoid running GC with garbage values.
+ if (initializeArray) {
+ dcheck(Is<FixedArray>(copy));
+ FillFixedArrayWithSmiZero(
+ kind, UnsafeCast<FixedArray>(copy), 0, SmiUntag(length));
+ }
+
// 4. Let k be 0.
let k: Smi = 0;
@@ -21,10 +64,10 @@ macro FastPackedArrayToReversed<Accessor: type, T: type>(
const from = length - k - 1;
// c. Let fromValue be ? Get(O, from).
- const fromValue: T = LoadElement<Accessor, T>(elements, from);
+ const fromValue: Object = LoadElementOrUndefined(elements, from);
// d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue).
- StoreElement<Accessor>(copy, k, fromValue);
+ StoreElement<FixedArray>(copy, k, fromValue);
// e. Set k to k + 1.
++k;
@@ -35,7 +78,7 @@ macro FastPackedArrayToReversed<Accessor: type, T: type>(
return NewJSArray(map, copy);
}
-macro TryFastPackedArrayToReversed(implicit context: Context)(receiver: JSAny):
+macro TryFastArrayToReversed(implicit context: Context)(receiver: JSAny):
JSArray labels Slow {
const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
@@ -43,19 +86,33 @@ macro TryFastPackedArrayToReversed(implicit context: Context)(receiver: JSAny):
const kind: ElementsKind = array.map.elements_kind;
if (kind == ElementsKind::PACKED_SMI_ELEMENTS) {
- return FastPackedArrayToReversed<array::FastPackedSmiElements, Smi>(
- ElementsKind::PACKED_SMI_ELEMENTS, array.elements, array.length);
+ return FastArrayToReversed<FixedArray>(
+ ElementsKind::PACKED_SMI_ELEMENTS,
+ UnsafeCast<FixedArray>(array.elements), array.length, false);
+ } else if (kind == ElementsKind::PACKED_ELEMENTS) {
+ return FastArrayToReversed<FixedArray>(
+ ElementsKind::PACKED_ELEMENTS, UnsafeCast<FixedArray>(array.elements),
+ array.length, false);
+ } else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
+ return FastPackedDoubleArrayToReversed(
+ UnsafeCast<FixedDoubleArray>(array.elements), array.length);
+ } else {
+ if (!IsPrototypeInitialArrayPrototype(array.map)) goto Slow;
+ if (IsNoElementsProtectorCellInvalid()) goto Slow;
+
+ if (kind == ElementsKind::HOLEY_SMI_ELEMENTS ||
+ kind == ElementsKind::HOLEY_ELEMENTS) {
+ return FastArrayToReversed<FixedArray>(
+ ElementsKind::PACKED_ELEMENTS, UnsafeCast<FixedArray>(array.elements),
+ array.length, false);
+ } else if (kind == ElementsKind::HOLEY_DOUBLE_ELEMENTS) {
+ return FastArrayToReversed<FixedDoubleArray>(
+ ElementsKind::PACKED_ELEMENTS,
+ UnsafeCast<FixedDoubleArray>(array.elements), array.length, true);
+ }
+
+ goto Slow;
}
- if (kind == ElementsKind::PACKED_ELEMENTS) {
- return FastPackedArrayToReversed<array::FastPackedObjectElements, JSAny>(
- ElementsKind::PACKED_ELEMENTS, array.elements, array.length);
- }
- if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
- return FastPackedArrayToReversed<array::FastPackedDoubleElements, float64>(
- ElementsKind::PACKED_DOUBLE_ELEMENTS, array.elements, array.length);
- }
-
- goto Slow;
}
transitioning builtin GenericArrayToReversed(implicit context: Context)(
@@ -96,7 +153,7 @@ transitioning builtin GenericArrayToReversed(implicit context: Context)(
transitioning javascript builtin ArrayPrototypeToReversed(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
try {
- return TryFastPackedArrayToReversed(receiver) otherwise Slow;
+ return TryFastArrayToReversed(receiver) otherwise Slow;
} label Slow {
return GenericArrayToReversed(receiver);
}
diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq
index 611cabc4ed..3765e57d69 100644
--- a/deps/v8/src/builtins/arraybuffer.tq
+++ b/deps/v8/src/builtins/arraybuffer.tq
@@ -75,6 +75,27 @@ transitioning javascript builtin ArrayBufferPrototypeGetResizable(
return False;
}
+// #sec-get-arraybuffer.prototype.detached
+transitioning javascript builtin ArrayBufferPrototypeGetDetached(
+ js-implicit context: NativeContext, receiver: JSAny)(): Boolean {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get ArrayBuffer.prototype.detached';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. Return IsDetachedBuffer(O).
+ if (IsDetachedBuffer(o)) {
+ return True;
+ }
+ return False;
+}
+
// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
transitioning javascript builtin
SharedArrayBufferPrototypeGetMaxByteLength(
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index e1de5dee65..a6c8a1ba0f 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -15,6 +15,7 @@
#include 'src/objects/js-atomics-synchronization.h'
#include 'src/objects/js-function.h'
#include 'src/objects/js-generator.h'
+#include 'src/objects/js-iterator-helpers.h'
#include 'src/objects/js-promise.h'
#include 'src/objects/js-regexp-string-iterator.h'
#include 'src/objects/js-shadow-realm.h'
@@ -229,7 +230,7 @@ type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
type RawPtr<To: type> extends RawPtr;
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
-extern class Code extends HeapObject;
+extern class InstructionStream extends HeapObject;
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Number = Smi|HeapNumber;
@@ -266,7 +267,11 @@ type Callable = JSFunction|JSBoundFunction|JSWrappedFunction|CallableJSProxy|
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
-extern enum UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback }
+extern enum UpdateFeedbackMode {
+ kOptionalFeedback,
+ kGuaranteedFeedback,
+ kNoFeedback
+}
extern operator '==' macro UpdateFeedbackModeEqual(
constexpr UpdateFeedbackMode, constexpr UpdateFeedbackMode): constexpr bool;
@@ -358,7 +363,12 @@ constexpr 'CodeStubAssembler::ExtractFixedArrayFlag' {
...
}
+const kBigIntMaxLengthBits:
+ constexpr uintptr generates 'BigInt::kMaxLengthBits';
const kBigIntMaxLength: constexpr intptr generates 'BigInt::kMaxLength';
+const kBigIntDigitSize: constexpr intptr generates 'kSystemPointerSize';
+const kBitsPerByte: constexpr intptr generates 'kBitsPerByte';
+const kBigIntDigitBits: intptr = kBigIntDigitSize * kBitsPerByte;
extern enum MessageTemplate {
kAllPromisesRejected,
@@ -384,6 +394,7 @@ extern enum MessageTemplate {
kTypedArrayTooShort,
kTypedArrayTooLargeToSort,
kInvalidCountValue,
+ kConstructAbstractClass,
kConstructorNotFunction,
kSymbolToString,
kSymbolIteratorInvalid,
@@ -442,6 +453,10 @@ extern enum MessageTemplate {
kInvalidWeakRefsRegisterTarget,
kInvalidWeakRefsUnregisterToken,
kInvalidWeakRefsWeakRefConstructorTarget,
+ kObjectGetterCallable,
+ kObjectSetterCallable,
+ kPropertyDescObject,
+ kMustBePositive,
...
}
@@ -480,6 +495,8 @@ const kFixedDoubleArrayMaxLength:
constexpr int31 generates 'FixedDoubleArray::kMaxLength';
const kObjectAlignmentMask: constexpr intptr
generates 'kObjectAlignmentMask';
+const kObjectAlignment: constexpr intptr
+ generates 'kObjectAlignment';
const kMinAddedElementsCapacity:
constexpr int31 generates 'JSObject::kMinAddedElementsCapacity';
const kMaxFastArrayLength:
@@ -528,6 +545,7 @@ extern macro MatchSymbolConstant(): Symbol;
extern macro MessageStringConstant(): String;
extern macro NanConstant(): NaN;
extern macro NameStringConstant(): String;
+extern macro NextStringConstant(): String;
extern macro NullConstant(): Null;
extern macro NumberStringConstant(): String;
extern macro ReturnStringConstant(): String;
@@ -549,6 +567,7 @@ const False: False = FalseConstant();
const kEmptyString: EmptyString = EmptyStringConstant();
const kLengthString: String = LengthStringConstant();
const kMessageString: String = MessageStringConstant();
+const kNextString: String = NextStringConstant();
const kReturnString: String = ReturnStringConstant();
const kNaN: NaN = NanConstant();
@@ -777,13 +796,16 @@ extern transitioning runtime NormalizeElements(Context, JSObject): void;
extern transitioning runtime TransitionElementsKindWithKind(
Context, JSObject, Smi): void;
+extern macro LoadObjectField(HeapObject, constexpr int32): Object;
+
extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi;
extern macro LoadBufferIntptr(RawPtr, constexpr int32): intptr;
extern runtime StringEqual(Context, String, String): Oddball;
-extern builtin StringLessThan(Context, String, String): Boolean;
+extern builtin StringLessThan(String, String): Boolean;
+extern builtin StringCompare(String, String): Smi;
extern macro StringCharCodeAt(String, uintptr): char16;
extern macro StringFromSingleCharCode(char8): String;
extern macro StringFromSingleCharCode(char16): String;
@@ -873,6 +895,7 @@ extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
extern operator '~' macro WordNot(intptr): intptr;
extern operator '~' macro WordNot(uintptr): uintptr;
+extern operator '~' macro Word32BitwiseNot(int32): int32;
extern operator '~' macro Word64Not(uint64): uint64;
extern operator '~' macro Word64Not(int64): int64;
extern operator '~' macro ConstexprWordNot(constexpr intptr): constexpr intptr;
@@ -921,7 +944,7 @@ macro Float64IsNaN(n: float64): bool {
// The type of all tagged values that can safely be compared with TaggedEqual.
@if(V8_ENABLE_WEBASSEMBLY)
type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map |
- WeakCell | Context | EmptyString | Symbol | WasmInternalFunction;
+ WeakCell | Context | EmptyString | Symbol | WasmInternalFunction | WasmNull;
@ifnot(V8_ENABLE_WEBASSEMBLY)
type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map |
WeakCell | Context | EmptyString | Symbol;
@@ -960,6 +983,8 @@ extern operator '*' macro IntPtrMul(intptr, intptr): intptr;
extern operator '*' macro Int64Mul(int64, int64): int64;
extern operator '/' macro IntPtrDiv(intptr, intptr): intptr;
extern operator '/' macro Int64Div(int64, int64): int64;
+extern operator '%' macro IntPtrMod(intptr, intptr): intptr;
+extern operator '%' macro Int64Mod(int64, int64): int64;
extern operator '<<' macro WordShl(intptr, intptr): intptr;
extern operator '>>' macro WordSar(intptr, intptr): intptr;
extern operator '&' macro WordAnd(intptr, intptr): intptr;
@@ -991,7 +1016,9 @@ extern operator '-' macro Uint32Sub(uint32, uint32): uint32;
extern operator '*' macro Int32Mul(int32, int32): int32;
extern operator '*' macro Uint32Mul(uint32, uint32): uint32;
extern operator '/' macro Int32Div(int32, int32): int32;
+extern operator '/' macro Uint32Div(uint32, uint32): uint32;
extern operator '%' macro Int32Mod(int32, int32): int32;
+extern operator '%' macro Uint32Mod(uint32, uint32): uint32;
extern operator '&' macro Word32And(int32, int32): int32;
extern operator '&' macro Word32And(uint32, uint32): uint32;
extern operator '==' macro
@@ -1017,6 +1044,8 @@ extern macro ConstexprIntegerLiteralToInt32(constexpr IntegerLiteral):
constexpr int32;
extern macro ConstexprIntegerLiteralToUint32(constexpr IntegerLiteral):
constexpr uint32;
+extern macro ConstexprIntegerLiteralToInt64(constexpr IntegerLiteral):
+ constexpr int64;
extern macro ConstexprIntegerLiteralToUint64(constexpr IntegerLiteral):
constexpr uint64;
extern macro ConstexprIntegerLiteralToIntptr(constexpr IntegerLiteral):
@@ -1216,7 +1245,7 @@ extern macro ChangeBoolToInt32(bool): int32;
extern macro ChangeInt32ToFloat64(int32): float64;
extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
-extern macro ChangeInt32ToInt64(int32): intptr; // Sign-extends.
+extern macro ChangeInt32ToInt64(int32): int64; // Sign-extends.
extern macro ChangeUint32ToUint64(uint32): uint64; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
extern macro TruncateFloat64ToFloat32(float64): float32;
@@ -1375,6 +1404,9 @@ macro GetArrayBufferFunction(implicit context: Context)(): Constructor {
macro GetArrayBufferNoInitFunction(implicit context: Context)(): JSFunction {
return *NativeContextSlot(ContextSlot::ARRAY_BUFFER_NOINIT_FUN_INDEX);
}
+macro GetIteratorFunction(implicit context: Context)(): JSFunction {
+ return *NativeContextSlot(ContextSlot::ITERATOR_FUNCTION_INDEX);
+}
macro GetFastPackedElementsJSArrayMap(implicit context: Context)(): Map {
return *NativeContextSlot(ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
}
@@ -1475,6 +1507,23 @@ extern macro BranchIfSameValue(JSAny, JSAny): never labels Taken, NotTaken;
macro SameValue(a: JSAny, b: JSAny): bool {
BranchIfSameValue(a, b) otherwise return true, return false;
}
+macro SameValue(a: (JSAny|TheHole), b: (JSAny|TheHole)): bool {
+ typeswitch (a) {
+ case (a: TheHole): {
+ return UnsafeCast<TheHole>(b) == a;
+ }
+ case (a: JSAny): {
+ typeswitch (b) {
+ case (TheHole): {
+ return false;
+ }
+ case (b: JSAny): {
+ return SameValue(a, b);
+ }
+ }
+ }
+ }
+}
// Does "if (index1 + index2 > limit) goto IfOverflow" in an uintptr overflow
// friendly way where index1 and index2 are in [0, kMaxSafeInteger] range.
@@ -1913,6 +1962,17 @@ macro IsIntegerOrSomeInfinity(o: Object): bool {
}
}
+macro NumberIsSomeInfinity(n: Number): bool {
+ typeswitch (n) {
+ case (Smi): {
+ return false;
+ }
+ case (hn: HeapNumber): {
+ return Float64IsSomeInfinity(Convert<float64>(hn));
+ }
+ }
+}
+
// Assert that the objects satisfy SameValue or are both the hole.
builtin CheckSameObject(implicit context: Context)(
lhs: Object, rhs: Object): Undefined {
@@ -1969,6 +2029,7 @@ extern operator '[]' macro LoadWeakFixedArrayElement(
WeakFixedArray, intptr): MaybeObject;
extern operator '[]' macro LoadUint8Ptr(RawPtr<uint8>, intptr): uint8;
+extern operator '[]' macro LoadUint64Ptr(RawPtr<uint64>, intptr): uint64;
extern enum HashFieldType extends uint32 constexpr 'Name::HashFieldType' {
kHash,
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index d75549b298..ac4083df59 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -7,6 +7,7 @@
#include "src/base/small-vector.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
+#include "src/common/assert-scope.h"
#include "src/logging/log.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects-inl.h"
@@ -74,7 +75,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
ApiNatives::InstantiateObject(isolate, instance_template,
Handle<JSReceiver>::cast(new_target)),
Object);
- argv[-1] = js_receiver->ptr();
+ argv[BuiltinArguments::kReceiverArgsOffset] = js_receiver->ptr();
raw_holder = *js_receiver;
} else {
DCHECK(receiver->IsJSReceiver());
@@ -117,9 +118,13 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
return isolate->factory()->undefined_value();
}
// Rebox the result.
- result->VerifyApiCallResultType();
- if (!is_construct || result->IsJSReceiver())
- return handle(*result, isolate);
+ {
+ DisallowGarbageCollection no_gc;
+ Object raw_result = *result;
+ DCHECK(raw_result.IsApiCallResultType());
+ if (!is_construct || raw_result.IsJSReceiver())
+ return handle(raw_result, isolate);
+ }
}
return js_receiver;
@@ -135,14 +140,14 @@ BUILTIN(HandleApiCall) {
args.target()->shared().get_api_func_data(), isolate);
int argc = args.length() - 1;
Address* argv = args.address_of_first_argument();
- if (new_target->IsJSReceiver()) {
- RETURN_RESULT_OR_FAILURE(
- isolate, HandleApiCallHelper<true>(isolate, new_target, fun_data,
- receiver, argv, argc));
- } else {
+ if (new_target->IsUndefined()) {
RETURN_RESULT_OR_FAILURE(
isolate, HandleApiCallHelper<false>(isolate, new_target, fun_data,
receiver, argv, argc));
+ } else {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, HandleApiCallHelper<true>(isolate, new_target, fun_data,
+ receiver, argv, argc));
}
}
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index bfb248e0b2..011c7808d0 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -241,7 +241,7 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
a_ = processor(this, value.value(), index);
}
},
- incr, advance_mode);
+ incr, LoopUnrollingMode::kNo, advance_mode);
}
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
@@ -1808,7 +1808,7 @@ TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
- TNode<CodeT> code = HeapConstant(callable.code());
+ TNode<Code> code = HeapConstant(callable.code());
// We are going to call here ArrayNoArgumentsConstructor or
// ArraySingleArgumentsConstructor which in addition to the register arguments
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 49fe48d698..5049fa36c7 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -1104,7 +1104,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
!HasOnlySimpleElements(isolate, *receiver)) {
return IterateElementsSlow(isolate, receiver, length, visitor);
}
- Handle<JSObject> array = Handle<JSObject>::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
switch (array->GetElementsKind()) {
case PACKED_SMI_ELEMENTS:
@@ -1228,17 +1228,14 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
UNIMPLEMENTED();
case NO_ELEMENTS:
break;
+ // JSArrays cannot have the following elements kinds:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
- return IterateElementsSlow(isolate, receiver, length, visitor);
RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
- // TODO(v8:11111): Support RAB / GSAB.
- UNREACHABLE();
#undef TYPED_ARRAY_CASE
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
case SHARED_ARRAY_ELEMENTS:
- // |array| is guaranteed to be an array or typed array.
UNREACHABLE();
}
visitor->increase_index_offset(length);
@@ -1395,7 +1392,18 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
UNREACHABLE();
}
}
- if (failure) break;
+ if (failure) {
+#ifdef VERIFY_HEAP
+ // The allocated storage may contain uninitialized values which will
+ // cause FixedDoubleArray::FixedDoubleArrayVerify to fail, when the
+ // heap is verified (see: crbug.com/1415071). To prevent this, we
+ // initialize the array with holes.
+ if (v8_flags.verify_heap) {
+ double_storage->FillWithHoles(0, estimate_result_length);
+ }
+#endif // VERIFY_HEAP
+ break;
+ }
}
}
if (!failure) {
@@ -1852,5 +1860,12 @@ BUILTIN(ArrayPrototypeGroupToMap) {
return *map;
}
+BUILTIN(ArrayFromAsync) {
+ HandleScope scope(isolate);
+ DCHECK(v8_flags.harmony_array_from_async);
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 5f35972459..b1f285b8da 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -471,8 +471,9 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(true, array_buffer, kMethodName);
- DCHECK_EQ(array_buffer->max_byte_length(),
- array_buffer->GetBackingStore()->max_byte_length());
+ DCHECK_IMPLIES(!array_buffer->GetBackingStore()->is_wasm_memory(),
+ array_buffer->max_byte_length() ==
+ array_buffer->GetBackingStore()->max_byte_length());
// 4. Let length be ArrayBufferByteLength(O, SeqCst).
size_t byte_length = array_buffer->GetByteLength();
@@ -488,36 +489,25 @@ BUILTIN(ArrayBufferPrototypeResize) {
return ResizeHelper(args, isolate, kMethodName, kIsShared);
}
-// ES #sec-arraybuffer.prototype.transfer
-// ArrayBuffer.prototype.transfer([new_length])
-BUILTIN(ArrayBufferPrototypeTransfer) {
- const char kMethodName[] = "ArrayBuffer.prototype.transfer";
- HandleScope scope(isolate);
-
- Handle<Object> new_length = args.atOrUndefined(isolate, 1);
+namespace {
- // 1. Let O be the this value.
- // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
- CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+enum PreserveResizability { kToFixedLength, kPreserveResizability };
- // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
- CHECK_SHARED(false, array_buffer, kMethodName);
+Object ArrayBufferTransfer(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ Handle<Object> new_length,
+ PreserveResizability preserve_resizability,
+ const char* method_name) {
+ // 2. If IsSharedArrayBuffer(arrayBuffer) is true, throw a TypeError
+ // exception.
+ CHECK_SHARED(false, array_buffer, method_name);
size_t new_byte_length;
if (new_length->IsUndefined(isolate)) {
- // 4. If newLength is undefined,
- // a. If IsDetachedBuffer(O) is *true*, throw a *TypeError* exception.
- if (array_buffer->was_detached()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDetachedOperation,
- isolate->factory()->NewStringFromAsciiChecked(
- kMethodName)));
- }
-
- // b. Let newByteLength be O.[[ArrayBufferByteLength]].
+ // 3. If newLength is undefined, then
+ // a. Let newByteLength be arrayBuffer.[[ArrayBufferByteLength]].
new_byte_length = array_buffer->GetByteLength();
} else {
- // 5. Else,
+ // 4. Else,
// a. Let newByteLength be ? ToIndex(newLength).
Handle<Object> number_new_byte_length;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_new_byte_length,
@@ -532,20 +522,36 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
isolate,
NewRangeError(
MessageTemplate::kInvalidArrayBufferResizeLength,
- isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
+ isolate->factory()->NewStringFromAsciiChecked(method_name)));
}
+ }
- // b. If IsDetachedBuffer(O) is *true*, throw a *TypeError* exception.
- if (array_buffer->was_detached()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDetachedOperation,
- isolate->factory()->NewStringFromAsciiChecked(
- kMethodName)));
- }
+ // 5. If IsDetachedBuffer(arrayBuffer) is true, throw a TypeError exception.
+ if (array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
}
- // After this point the steps are not observable and are performed out of
- // spec order.
+ ResizableFlag resizable;
+ size_t new_max_byte_length;
+ if (preserve_resizability == kPreserveResizability &&
+ array_buffer->is_resizable_by_js()) {
+ // 6. If preserveResizability is preserve-resizability and
+ // IsResizableArrayBuffer(arrayBuffer) is true, then
+ // a. Let newMaxByteLength be arrayBuffer.[[ArrayBufferMaxByteLength]].
+ new_max_byte_length = array_buffer->max_byte_length();
+ resizable = ResizableFlag::kResizable;
+ } else {
+ // 7. Else,
+ // a. Let newMaxByteLength be empty.
+ new_max_byte_length = new_byte_length;
+ resizable = ResizableFlag::kNotResizable;
+ }
+
+ // 8. If arrayBuffer.[[ArrayBufferDetachKey]] is not undefined, throw a
+ // TypeError exception.
if (!array_buffer->is_detachable()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -553,27 +559,34 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
NewTypeError(MessageTemplate::kDataCloneErrorNonDetachableArrayBuffer));
}
+ // After this point the steps are not observable and are performed out of
+ // spec order.
+
// Case 1: We don't need a BackingStore.
if (new_byte_length == 0) {
- // Nothing to do for steps 6-12.
-
- // 13. Perform ? DetachArrayBuffer(O).
- MAYBE_RETURN(JSArrayBuffer::Detach(array_buffer),
- ReadOnlyRoots(isolate).exception());
-
- // 14. Return new.
+ // 15. Perform ! DetachArrayBuffer(arrayBuffer).
+ JSArrayBuffer::Detach(array_buffer).Check();
+
+ // 9. Let newBuffer be ? AllocateArrayBuffer(%ArrayBuffer%, newByteLength,
+ // newMaxByteLength).
+ //
+ // Nothing to do for steps 10-14.
+ //
+ // 16. Return newBuffer.
return *isolate->factory()
->NewJSArrayBufferAndBackingStore(
- 0, InitializedFlag::kUninitialized)
+ 0, new_max_byte_length, InitializedFlag::kUninitialized,
+ resizable)
.ToHandleChecked();
}
// Case 2: We can reuse the same BackingStore.
auto from_backing_store = array_buffer->GetBackingStore();
if (from_backing_store && !from_backing_store->is_resizable_by_js() &&
+ resizable == ResizableFlag::kNotResizable &&
(new_byte_length == array_buffer->GetByteLength() ||
from_backing_store->CanReallocate())) {
- // Reallocate covers steps 6-12.
+ // Reallocate covers steps 10-14.
if (new_byte_length != array_buffer->GetByteLength() &&
!from_backing_store->Reallocate(isolate, new_byte_length)) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -581,41 +594,50 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
}
- // 13. Perform ? DetachArrayBuffer(O).
- MAYBE_RETURN(JSArrayBuffer::Detach(array_buffer),
- ReadOnlyRoots(isolate).exception());
+ // 15. Perform ! DetachArrayBuffer(arrayBuffer).
+ JSArrayBuffer::Detach(array_buffer).Check();
- // 14. Return new.
+ // 9. Let newBuffer be ? AllocateArrayBuffer(%ArrayBuffer%, newByteLength,
+ // newMaxByteLength).
+ // 16. Return newBuffer.
return *isolate->factory()->NewJSArrayBuffer(std::move(from_backing_store));
}
// Case 3: We can't reuse the same BackingStore. Copy the buffer.
- // 6. Let new be ? Construct(%ArrayBuffer%, « 𝔽(newByteLength) »).
- // 7. NOTE: This method returns a fixed-length ArrayBuffer.
- Handle<JSArrayBuffer> new_;
+ if (new_byte_length > new_max_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+
+ // 9. Let newBuffer be ? AllocateArrayBuffer(%ArrayBuffer%, newByteLength,
+ // newMaxByteLength).
+ Handle<JSArrayBuffer> new_buffer;
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
- new_byte_length, InitializedFlag::kUninitialized);
- if (!result.ToHandle(&new_)) {
+ new_byte_length, new_max_byte_length, InitializedFlag::kUninitialized,
+ resizable);
+ if (!result.ToHandle(&new_buffer)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
}
- // 8. Let copyLength be min(newByteLength, O.[[ArrayBufferByteLength]]).
+ // 10. Let copyLength be min(newByteLength,
+ // arrayBuffer.[[ArrayBufferByteLength]]).
+ //
// (Size comparison is done manually below instead of using min.)
- // 9. Let fromBlock be O.[[ArrayBufferData]].
+ // 11. Let fromBlock be arrayBuffer.[[ArrayBufferData]].
uint8_t* from_data =
reinterpret_cast<uint8_t*>(array_buffer->backing_store());
- // 10. Let toBlock be new.[[ArrayBufferData]].
- uint8_t* to_data = reinterpret_cast<uint8_t*>(new_->backing_store());
+ // 12. Let toBlock be newBuffer.[[ArrayBufferData]].
+ uint8_t* to_data = reinterpret_cast<uint8_t*>(new_buffer->backing_store());
- // 11. Perform CopyDataBlockBytes(toBlock, 0, fromBlock, 0, copyLength).
- // 12. NOTE: Neither creation of the new Data Block nor copying from the old
- // Data Block are observable. Implementations reserve the right to implement
- // this method as a zero-copy move or a realloc.
+ // 13. Perform CopyDataBlockBytes(toBlock, 0, fromBlock, 0, copyLength).
+ // 14. NOTE: Neither creation of the new Data Block nor copying from the old
+ // Data Block are observable. Implementations reserve the right to
+ // implement this method as a zero-copy move or a realloc.
size_t from_byte_length = array_buffer->GetByteLength();
if (new_byte_length <= from_byte_length) {
CopyBytes(to_data, from_data, new_byte_length);
@@ -624,12 +646,39 @@ BUILTIN(ArrayBufferPrototypeTransfer) {
memset(to_data + from_byte_length, 0, new_byte_length - from_byte_length);
}
- // 13. Perform ? DetachArrayBuffer(O).
- MAYBE_RETURN(JSArrayBuffer::Detach(array_buffer),
- ReadOnlyRoots(isolate).exception());
+ // 15. Perform ! DetachArrayBuffer(arrayBuffer).
+ JSArrayBuffer::Detach(array_buffer).Check();
- // 14. Return new.
- return *new_;
+ // 16. Return newBuffer.
+ return *new_buffer;
+}
+
+} // namespace
+
+// ES #sec-arraybuffer.prototype.transfer
+// ArrayBuffer.prototype.transfer([new_length])
+BUILTIN(ArrayBufferPrototypeTransfer) {
+ const char kMethodName[] = "ArrayBuffer.prototype.transfer";
+ HandleScope scope(isolate);
+
+ // 1. Perform ? RequireInternalSlot(arrayBuffer, [[ArrayBufferData]]).
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+ Handle<Object> new_length = args.atOrUndefined(isolate, 1);
+ return ArrayBufferTransfer(isolate, array_buffer, new_length,
+ kPreserveResizability, kMethodName);
+}
+
+// ES #sec-arraybuffer.prototype.transferToFixedLength
+// ArrayBuffer.prototype.transferToFixedLength([new_length])
+BUILTIN(ArrayBufferPrototypeTransferToFixedLength) {
+ const char kMethodName[] = "ArrayBuffer.prototype.transferToFixedLength";
+ HandleScope scope(isolate);
+
+ // 1. Perform ? RequireInternalSlot(arrayBuffer, [[ArrayBufferData]]).
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+ Handle<Object> new_length = args.atOrUndefined(isolate, 1);
+ return ArrayBufferTransfer(isolate, array_buffer, new_length, kToFixedLength,
+ kMethodName);
}
// ES #sec-sharedarraybuffer.prototype.grow
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 6336654391..6abae74f86 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -180,7 +180,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
// which almost doubles the size of `await` builtins (unnecessarily).
TNode<Smi> builtin_id = LoadObjectField<Smi>(
shared_info, SharedFunctionInfo::kFunctionDataOffset);
- TNode<CodeT> code = LoadBuiltin(builtin_id);
+ TNode<Code> code = LoadBuiltin(builtin_id);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 26dcabe6c3..dc66bd8f77 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -510,6 +510,19 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
// Remember the {resume_type} for the {generator}.
StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kResumeModeOffset, resume_type);
+
+ Label if_instrumentation(this, Label::kDeferred),
+ if_instrumentation_done(this);
+ Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done);
+ BIND(&if_instrumentation);
+ {
+ const TNode<JSPromise> promise = LoadObjectField<JSPromise>(
+ next, AsyncGeneratorRequest::kPromiseOffset);
+ CallRuntime(Runtime::kDebugPushPromise, context, promise);
+ Goto(&if_instrumentation_done);
+ }
+ BIND(&if_instrumentation_done);
+
CallStub(CodeFactory::ResumeGenerator(isolate()), context,
LoadValueFromAsyncGeneratorRequest(next), generator);
var_state = LoadGeneratorState(generator);
@@ -598,8 +611,9 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
TakeFirstAsyncGeneratorRequestFromQueue(generator);
TNode<JSPromise> promise = LoadPromiseFromAsyncGeneratorRequest(next);
+ // No debug event needed, there was already a debug event that got us here.
Return(CallBuiltin(Builtin::kRejectPromise, context, promise, value,
- TrueConstant()));
+ FalseConstant()));
}
TF_BUILTIN(AsyncGeneratorYieldWithAwait, AsyncGeneratorBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index cbae195060..47bad91b5b 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -203,7 +203,7 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
GotoIf(TaggedIsSmi(iter_result), &if_notanobject);
const TNode<Map> iter_result_map = LoadMap(CAST(iter_result));
- GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject);
+ GotoIfNot(JSAnyIsNotPrimitiveMap(iter_result_map), &if_notanobject);
const TNode<Object> fast_iter_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
diff --git a/deps/v8/src/builtins/builtins-async-module.cc b/deps/v8/src/builtins/builtins-async-module.cc
index 417e6f1dfa..e1a2a71c42 100644
--- a/deps/v8/src/builtins/builtins-async-module.cc
+++ b/deps/v8/src/builtins/builtins-async-module.cc
@@ -19,7 +19,8 @@ BUILTIN(CallAsyncModuleFulfilled) {
.IsNothing()) {
// The evaluation of async module can not throwing a JavaScript observable
// exception.
- DCHECK(isolate->is_execution_termination_pending());
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ isolate->is_execution_termination_pending());
return ReadOnlyRoots(isolate).exception();
}
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h
index c6f5888b9d..0c292104e0 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.h
+++ b/deps/v8/src/builtins/builtins-bigint-gen.h
@@ -147,6 +147,125 @@ class BigIntBuiltinsAssembler : public CodeStubAssembler {
std::make_pair(MachineType::AnyTagged(), y));
}
+ void CppBitwiseOrPosPosAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference>
+ mutable_big_int_bitwise_or_pos_pos_and_canonicalize = ExternalConstant(
+ ExternalReference::
+ mutable_big_int_bitwise_or_pp_and_canonicalize_function());
+ CallCFunction(mutable_big_int_bitwise_or_pos_pos_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppBitwiseOrNegNegAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference>
+ mutable_big_int_bitwise_or_neg_neg_and_canonicalize = ExternalConstant(
+ ExternalReference::
+ mutable_big_int_bitwise_or_nn_and_canonicalize_function());
+ CallCFunction(mutable_big_int_bitwise_or_neg_neg_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppBitwiseOrPosNegAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference>
+ mutable_big_int_bitwise_or_pos_neg_and_canonicalize = ExternalConstant(
+ ExternalReference::
+ mutable_big_int_bitwise_or_pn_and_canonicalize_function());
+ CallCFunction(mutable_big_int_bitwise_or_pos_neg_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppBitwiseXorPosPosAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference>
+ mutable_big_int_bitwise_xor_pos_pos_and_canonicalize = ExternalConstant(
+ ExternalReference::
+ mutable_big_int_bitwise_xor_pp_and_canonicalize_function());
+ CallCFunction(mutable_big_int_bitwise_xor_pos_pos_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppBitwiseXorNegNegAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference>
+ mutable_big_int_bitwise_xor_neg_neg_and_canonicalize = ExternalConstant(
+ ExternalReference::
+ mutable_big_int_bitwise_xor_nn_and_canonicalize_function());
+ CallCFunction(mutable_big_int_bitwise_xor_neg_neg_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppBitwiseXorPosNegAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference>
+ mutable_big_int_bitwise_xor_pos_neg_and_canonicalize = ExternalConstant(
+ ExternalReference::
+ mutable_big_int_bitwise_xor_pn_and_canonicalize_function());
+ CallCFunction(mutable_big_int_bitwise_xor_pos_neg_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppLeftShiftAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<IntPtrT> shift) {
+ TNode<ExternalReference> mutable_big_int_left_shift_and_canonicalize =
+ ExternalConstant(
+ ExternalReference::
+ mutable_big_int_left_shift_and_canonicalize_function());
+ CallCFunction(mutable_big_int_left_shift_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::IntPtr(), shift));
+ }
+
+ TNode<Uint32T> CppRightShiftResultLength(TNode<BigInt> x,
+ TNode<Uint32T> x_sign,
+ TNode<IntPtrT> shift) {
+ TNode<ExternalReference> big_int_right_shift_result_length =
+ ExternalConstant(
+ ExternalReference::big_int_right_shift_result_length_function());
+ return UncheckedCast<Uint32T>(
+ CallCFunction(big_int_right_shift_result_length, MachineType::Uint32(),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::Uint32(), x_sign),
+ std::make_pair(MachineType::IntPtr(), shift)));
+ }
+
+ void CppRightShiftAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<IntPtrT> shift,
+ TNode<Uint32T> must_round_down) {
+ TNode<ExternalReference> mutable_big_int_right_shift_and_canonicalize =
+ ExternalConstant(
+ ExternalReference::
+ mutable_big_int_right_shift_and_canonicalize_function());
+ CallCFunction(mutable_big_int_right_shift_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::IntPtr(), shift),
+ std::make_pair(MachineType::Uint32(), must_round_down));
+ }
+
TNode<Int32T> CppAbsoluteCompare(TNode<BigInt> x, TNode<BigInt> y) {
TNode<ExternalReference> mutable_big_int_absolute_compare =
ExternalConstant(
diff --git a/deps/v8/src/builtins/builtins-bigint.tq b/deps/v8/src/builtins/builtins-bigint.tq
index feacfccdf9..40e42840bf 100644
--- a/deps/v8/src/builtins/builtins-bigint.tq
+++ b/deps/v8/src/builtins/builtins-bigint.tq
@@ -8,6 +8,10 @@ namespace bigint {
const kPositiveSign: uint32 = 0;
const kNegativeSign: uint32 = 1;
+const kGreaterThan: intptr = 1;
+const kLessThan: intptr = -1;
+
+const kMustRoundDownBitShift: uint32 = 30;
extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize(
MutableBigInt, BigIntBase, BigIntBase): void;
@@ -25,6 +29,24 @@ extern macro BigIntBuiltinsAssembler::CppBitwiseAndNegNegAndCanonicalize(
MutableBigInt, BigIntBase, BigIntBase): void;
extern macro BigIntBuiltinsAssembler::CppBitwiseAndPosNegAndCanonicalize(
MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppBitwiseOrPosPosAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppBitwiseOrNegNegAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppBitwiseOrPosNegAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppBitwiseXorPosPosAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppBitwiseXorNegNegAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppBitwiseXorPosNegAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppLeftShiftAndCanonicalize(
+ MutableBigInt, BigIntBase, intptr): void;
+extern macro BigIntBuiltinsAssembler::CppRightShiftResultLength(
+ BigIntBase, uint32, intptr): uint32;
+extern macro BigIntBuiltinsAssembler::CppRightShiftAndCanonicalize(
+ MutableBigInt, BigIntBase, intptr, uint32): void;
extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare(
BigIntBase, BigIntBase): int32;
@@ -34,6 +56,7 @@ extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength(
MutableBigInt, uint32, intptr): void;
extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt;
+extern macro CodeStubAssembler::AllocateRawBigInt(intptr): MutableBigInt;
extern macro CodeStubAssembler::StoreBigIntDigit(
MutableBigInt, intptr, uintptr): void;
extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr;
@@ -57,7 +80,7 @@ macro AllocateEmptyBigIntNoThrow(implicit context: Context)(
if (length > kBigIntMaxLength) {
goto BigIntTooBig;
}
- const result: MutableBigInt = AllocateBigInt(length);
+ const result: MutableBigInt = AllocateRawBigInt(length);
WriteBigIntSignAndLength(result, sign, length);
return result;
@@ -474,6 +497,365 @@ builtin BigIntBitwiseAnd(implicit context: Context)(
}
}
+macro BigIntBitwiseOrImpl(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+
+ // case: 0n | y
+ if (xlength == 0) {
+ return y;
+ }
+
+ // case: x | 0n
+ if (ylength == 0) {
+ return x;
+ }
+
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+ const resultLength = (xlength > ylength) ? xlength : ylength;
+
+ if (xsign == kPositiveSign && ysign == kPositiveSign) {
+ const result = AllocateEmptyBigIntNoThrow(kPositiveSign, resultLength)
+ otherwise unreachable;
+ CppBitwiseOrPosPosAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ } else if (xsign == kNegativeSign && ysign == kNegativeSign) {
+ const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength)
+ otherwise unreachable;
+ CppBitwiseOrNegNegAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ } else if (xsign == kPositiveSign && ysign == kNegativeSign) {
+ const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength)
+ otherwise unreachable;
+ CppBitwiseOrPosNegAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ } else {
+ const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength)
+ otherwise unreachable;
+ CppBitwiseOrPosNegAndCanonicalize(result, y, x);
+ return Convert<BigInt>(result);
+ }
+}
+
+builtin BigIntBitwiseOrNoThrow(implicit context: Context)(
+ x: BigInt, y: BigInt): Numeric {
+ return BigIntBitwiseOrImpl(x, y);
+}
+
+builtin BigIntBitwiseOr(implicit context: Context)(
+ xNum: Numeric, yNum: Numeric): BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntBitwiseOrImpl(x, y);
+ } label MixedTypes {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ }
+}
+
+macro BigIntBitwiseXorImpl(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+
+ // case: 0n ^ y
+ if (xlength == 0) {
+ return y;
+ }
+
+ // case: x ^ 0n
+ if (ylength == 0) {
+ return x;
+ }
+
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+
+ if (xsign == kPositiveSign && ysign == kPositiveSign) {
+ const resultLength = (xlength > ylength) ? xlength : ylength;
+ const result = AllocateEmptyBigIntNoThrow(kPositiveSign, resultLength)
+ otherwise unreachable;
+ CppBitwiseXorPosPosAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ } else if (xsign == kNegativeSign && ysign == kNegativeSign) {
+ const resultLength = (xlength > ylength) ? xlength : ylength;
+ const result = AllocateEmptyBigIntNoThrow(kPositiveSign, resultLength)
+ otherwise unreachable;
+ CppBitwiseXorNegNegAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ } else if (xsign == kPositiveSign && ysign == kNegativeSign) {
+ const resultLength = ((xlength > ylength) ? xlength : ylength) + 1;
+ const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength)
+ otherwise BigIntTooBig;
+ CppBitwiseXorPosNegAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ } else {
+ const resultLength = ((xlength > ylength) ? xlength : ylength) + 1;
+ const result = AllocateEmptyBigIntNoThrow(kNegativeSign, resultLength)
+ otherwise BigIntTooBig;
+ CppBitwiseXorPosNegAndCanonicalize(result, y, x);
+ return Convert<BigInt>(result);
+ }
+}
+
+builtin BigIntBitwiseXorNoThrow(implicit context: Context)(
+ x: BigInt, y: BigInt): Numeric {
+ try {
+ return BigIntBitwiseXorImpl(x, y) otherwise BigIntTooBig;
+ } label BigIntTooBig {
+ // Smi sentinel 0 is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+}
+
+builtin BigIntBitwiseXor(implicit context: Context)(
+ xNum: Numeric, yNum: Numeric): BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntBitwiseXorImpl(x, y) otherwise BigIntTooBig;
+ } label MixedTypes {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ } label BigIntTooBig {
+ ThrowRangeError(MessageTemplate::kBigIntTooBig);
+ }
+}
+
+macro MutableBigIntLeftShiftByAbsolute(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+
+ // case: 0n << y
+ if (xlength == 0) {
+ return x;
+ }
+
+ // case: x << 0n
+ if (ylength == 0) {
+ return x;
+ }
+
+ if (ylength > 1) {
+ // Depends on kBigIntMaxLengthBits <= (1 << kBigIntDigitSize).
+ goto BigIntTooBig;
+ }
+ const shiftAbs = LoadBigIntDigit(y, 0);
+ if (shiftAbs > kBigIntMaxLengthBits) {
+ goto BigIntTooBig;
+ }
+
+ // {shift} is positive.
+ const shift = Convert<intptr>(shiftAbs);
+ let resultLength = xlength + shift / kBigIntDigitBits;
+ const bitsShift = shift % kBigIntDigitBits;
+ const xmsd = LoadBigIntDigit(x, xlength - 1);
+ if (bitsShift != 0 &&
+ xmsd >>> Convert<uintptr>(kBigIntDigitBits - bitsShift) != 0) {
+ resultLength++;
+ }
+ const result = AllocateEmptyBigIntNoThrow(ReadBigIntSign(x), resultLength)
+ otherwise BigIntTooBig;
+ CppLeftShiftAndCanonicalize(result, x, shift);
+ return Convert<BigInt>(result);
+}
+
+macro RightShiftByMaximum(implicit context: Context)(sign: uint32): BigInt {
+ if (sign == kNegativeSign) {
+ const minusOne = AllocateEmptyBigInt(kNegativeSign, 1);
+ StoreBigIntDigit(minusOne, 0, 1);
+ return Convert<BigInt>(minusOne);
+ } else {
+ return Convert<BigInt>(AllocateEmptyBigInt(kPositiveSign, 0));
+ }
+}
+
+macro MutableBigIntRightShiftByAbsolute(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+
+ // case: 0n >> y
+ if (xlength == 0) {
+ return x;
+ }
+
+ // case: x >> 0n
+ if (ylength == 0) {
+ return x;
+ }
+
+ const sign = ReadBigIntSign(x);
+ if (ylength > 1) {
+ // Depends on kBigIntMaxLengthBits <= (1 << kBigIntDigitSize).
+ return RightShiftByMaximum(sign);
+ }
+ const shiftAbs = LoadBigIntDigit(y, 0);
+ if (shiftAbs > kBigIntMaxLengthBits) {
+ return RightShiftByMaximum(sign);
+ }
+
+ // {shift} is positive.
+ const shift = Convert<intptr>(shiftAbs);
+ const returnVal = CppRightShiftResultLength(x, sign, shift);
+ const mustRoundDown = returnVal >>> kMustRoundDownBitShift;
+ const lengthMask = (1 << kMustRoundDownBitShift) - 1;
+ const resultLength = Convert<intptr>(returnVal & lengthMask);
+ if (resultLength == 0) {
+ return RightShiftByMaximum(sign);
+ }
+
+ const result = AllocateEmptyBigIntNoThrow(sign, resultLength)
+ otherwise unreachable;
+ CppRightShiftAndCanonicalize(result, x, shift, mustRoundDown);
+ return Convert<BigInt>(result);
+}
+
+macro BigIntShiftLeftImpl(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
+ if (ReadBigIntSign(y) == kNegativeSign) {
+ return MutableBigIntRightShiftByAbsolute(x, y);
+ } else {
+ return MutableBigIntLeftShiftByAbsolute(x, y) otherwise BigIntTooBig;
+ }
+}
+
+macro BigIntShiftRightImpl(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
+ if (ReadBigIntSign(y) == kNegativeSign) {
+ return MutableBigIntLeftShiftByAbsolute(x, y) otherwise BigIntTooBig;
+ } else {
+ return MutableBigIntRightShiftByAbsolute(x, y);
+ }
+}
+
+builtin BigIntShiftLeftNoThrow(implicit context: Context)(
+ x: BigInt, y: BigInt): Numeric {
+ try {
+ return BigIntShiftLeftImpl(x, y) otherwise BigIntTooBig;
+ } label BigIntTooBig {
+ // Smi sentinel 0 is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+}
+
+builtin BigIntShiftLeft(implicit context: Context)(
+ xNum: Numeric, yNum: Numeric): BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntShiftLeftImpl(x, y) otherwise BigIntTooBig;
+ } label MixedTypes {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ } label BigIntTooBig {
+ ThrowRangeError(MessageTemplate::kBigIntTooBig);
+ }
+}
+
+builtin BigIntShiftRightNoThrow(implicit context: Context)(
+ x: BigInt, y: BigInt): Numeric {
+ try {
+ return BigIntShiftRightImpl(x, y) otherwise BigIntTooBig;
+ } label BigIntTooBig {
+ // Smi sentinel 0 is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+}
+
+builtin BigIntShiftRight(implicit context: Context)(
+ xNum: Numeric, yNum: Numeric): BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntShiftRightImpl(x, y) otherwise BigIntTooBig;
+ } label MixedTypes {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ } label BigIntTooBig {
+ ThrowRangeError(MessageTemplate::kBigIntTooBig);
+ }
+}
+
+builtin BigIntEqual(implicit context: Context)(x: BigInt, y: BigInt): Boolean {
+ if (ReadBigIntSign(x) != ReadBigIntSign(y)) {
+ return False;
+ }
+
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+ if (xlength != ylength) {
+ return False;
+ }
+
+ for (let i: intptr = 0; i < xlength; ++i) {
+ if (LoadBigIntDigit(x, i) != LoadBigIntDigit(y, i)) {
+ return False;
+ }
+ }
+
+ return True;
+}
+
+// Returns r such that r < 0 if |x| < |y|; r > 0 if |x| > |y|;
+// r == 0 if |x| == |y|.
+macro BigIntCompareAbsolute(implicit context: Context)(
+ x: BigInt, y: BigInt): intptr {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+ const diff = xlength - ylength;
+ if (diff != 0) {
+ return diff;
+ }
+
+ // case: {xlength} == {ylength}
+ for (let i: intptr = xlength - 1; i >= 0; --i) {
+ const xdigit = LoadBigIntDigit(x, i);
+ const ydigit = LoadBigIntDigit(y, i);
+ if (xdigit != ydigit) {
+ return (xdigit > ydigit) ? kGreaterThan : kLessThan;
+ }
+ }
+ return 0;
+}
+
+// Returns r such that r < 0 if x < y; r > 0 if x > y; r == 0 if x == y.
+macro BigIntCompare(implicit context: Context)(x: BigInt, y: BigInt): intptr {
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+ if (xsign != ysign) {
+ return xsign == kPositiveSign ? kGreaterThan : kLessThan;
+ }
+
+ // case: {xsign} == {ysign}
+ const diff = BigIntCompareAbsolute(x, y);
+ return xsign == kPositiveSign ? diff : 0 - diff;
+}
+
+builtin BigIntLessThan(implicit context: Context)(
+ x: BigInt, y: BigInt): Boolean {
+ return BigIntCompare(x, y) < 0 ? True : False;
+}
+
+builtin BigIntGreaterThan(implicit context: Context)(
+ x: BigInt, y: BigInt): Boolean {
+ return BigIntCompare(x, y) > 0 ? True : False;
+}
+
+builtin BigIntLessThanOrEqual(implicit context: Context)(
+ x: BigInt, y: BigInt): Boolean {
+ return BigIntCompare(x, y) <= 0 ? True : False;
+}
+
+builtin BigIntGreaterThanOrEqual(implicit context: Context)(
+ x: BigInt, y: BigInt): Boolean {
+ return BigIntCompare(x, y) >= 0 ? True : False;
+}
+
builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt {
const length = ReadBigIntLength(bigint);
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index ba70f97368..896875cada 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -737,7 +737,8 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback,
- args.GetLengthWithoutReceiver(), call_data, holder);
+ TruncateIntPtrToInt32(args.GetLengthWithoutReceiver()),
+ call_data, holder);
}
TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 4cbeaa6c3d..60f347f643 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -24,6 +24,7 @@ namespace internal {
isolate->factory()->NewStringFromAsciiChecked(method))); \
} \
Handle<CallSiteInfo> frame = Handle<CallSiteInfo>::cast(it.GetDataValue())
+
namespace {
Object PositiveNumberOrNull(int value, Isolate* isolate) {
@@ -31,6 +32,10 @@ Object PositiveNumberOrNull(int value, Isolate* isolate) {
return ReadOnlyRoots(isolate).null_value();
}
+bool NativeContextIsForShadowRealm(NativeContext native_context) {
+ return native_context.scope_info().scope_type() == SHADOW_REALM_SCOPE;
+}
+
} // namespace
BUILTIN(CallSitePrototypeGetColumnNumber) {
@@ -66,8 +71,22 @@ BUILTIN(CallSitePrototypeGetFileName) {
}
BUILTIN(CallSitePrototypeGetFunction) {
+ static const char method_name[] = "getFunction";
HandleScope scope(isolate);
- CHECK_CALLSITE(frame, "getFunction");
+ CHECK_CALLSITE(frame, method_name);
+ // ShadowRealms have a boundary: references to outside objects must not exist
+ // in the ShadowRealm, and references to ShadowRealm objects must not exist
+ // outside the ShadowRealm.
+ if (NativeContextIsForShadowRealm(isolate->raw_native_context()) ||
+ (frame->function().IsJSFunction() &&
+ NativeContextIsForShadowRealm(
+ JSFunction::cast(frame->function()).native_context()))) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(
+ MessageTemplate::kCallSiteMethodUnsupportedInShadowRealm,
+ isolate->factory()->NewStringFromAsciiChecked(method_name)));
+ }
if (frame->IsStrict() ||
(frame->function().IsJSFunction() &&
JSFunction::cast(frame->function()).shared().is_toplevel())) {
@@ -124,8 +143,22 @@ BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
}
BUILTIN(CallSitePrototypeGetThis) {
+ static const char method_name[] = "getThis";
HandleScope scope(isolate);
- CHECK_CALLSITE(frame, "getThis");
+ CHECK_CALLSITE(frame, method_name);
+ // ShadowRealms have a boundary: references to outside objects must not exist
+ // in the ShadowRealm, and references to ShadowRealm objects must not exist
+ // outside the ShadowRealm.
+ if (NativeContextIsForShadowRealm(isolate->raw_native_context()) ||
+ (frame->function().IsJSFunction() &&
+ NativeContextIsForShadowRealm(
+ JSFunction::cast(frame->function()).native_context()))) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(
+ MessageTemplate::kCallSiteMethodUnsupportedInShadowRealm,
+ isolate->factory()->NewStringFromAsciiChecked(method_name)));
+ }
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall);
#if V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index bf8d735a0c..fb3251a040 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -69,6 +69,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
BIND(&fast_loop);
{
Label if_exception_during_fast_iteration(this, Label::kDeferred);
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
TNode<JSArray> initial_entries_jsarray =
UncheckedCast<JSArray>(initial_entries);
#if DEBUG
@@ -81,9 +82,9 @@ void BaseCollectionsAssembler::AddConstructorEntries(
{
compiler::ScopedExceptionHandler handler(
this, &if_exception_during_fast_iteration, &var_exception);
- AddConstructorEntriesFromFastJSArray(variant, context, native_context,
- collection, initial_entries_jsarray,
- &if_may_have_side_effects);
+ AddConstructorEntriesFromFastJSArray(
+ variant, context, native_context, collection, initial_entries_jsarray,
+ &if_may_have_side_effects, var_index);
}
Goto(&exit);
@@ -113,9 +114,11 @@ void BaseCollectionsAssembler::AddConstructorEntries(
// non-trivial in case "return" callback is added somewhere in the
// iterator's prototype chain.
TNode<NativeContext> native_context = LoadNativeContext(context);
+ TNode<IntPtrT> next_index =
+ IntPtrAdd(var_index.value(), IntPtrConstant(1));
var_iterator_object = CreateArrayIterator(
native_context, UncheckedCast<JSArray>(initial_entries),
- IterationKind::kEntries);
+ IterationKind::kValues, SmiTag(next_index));
Goto(&if_exception);
}
}
@@ -143,7 +146,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
Variant variant, TNode<Context> context, TNode<Context> native_context,
TNode<Object> collection, TNode<JSArray> fast_jsarray,
- Label* if_may_have_side_effects) {
+ Label* if_may_have_side_effects, TVariable<IntPtrT>& var_current_index) {
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
TNode<Int32T> elements_kind = LoadElementsKind(fast_jsarray);
TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
@@ -167,8 +170,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
BIND(&if_smiorobjects);
{
auto set_entry = [&](TNode<IntPtrT> index) {
- TNode<Object> element = LoadAndNormalizeFixedArrayElement(
- CAST(elements), UncheckedCast<IntPtrT>(index));
+ TNode<Object> element =
+ LoadAndNormalizeFixedArrayElement(CAST(elements), index);
AddConstructorEntry(variant, context, collection, add_func, element,
if_may_have_side_effects);
};
@@ -177,7 +180,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// elements, a fast loop is used. This assumes that adding an element
// to the collection does not call user code that could mutate the elements
// or collection.
- BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
+ BuildFastLoop<IntPtrT>(var_current_index, IntPtrConstant(0), length,
+ set_entry, 1, LoopUnrollingMode::kNo,
IndexAdvanceMode::kPost);
Goto(&exit);
}
@@ -198,7 +202,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
elements, UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, entry);
};
- BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
+ BuildFastLoop<IntPtrT>(var_current_index, IntPtrConstant(0), length,
+ set_entry, 1, LoopUnrollingMode::kNo,
IndexAdvanceMode::kPost);
Goto(&exit);
}
@@ -435,8 +440,8 @@ void BaseCollectionsAssembler::GotoIfCannotBeHeldWeakly(
// TODO(v8:12547) Shared structs and arrays should only be able to point
// to shared values in weak collections. For now, disallow them as weak
// collection keys.
- GotoIf(IsJSSharedStructInstanceType(instance_type), if_cannot_be_held_weakly);
- GotoIf(IsJSSharedArrayInstanceType(instance_type), if_cannot_be_held_weakly);
+ GotoIf(IsAlwaysSharedSpaceJSObjectInstanceType(instance_type),
+ if_cannot_be_held_weakly);
Goto(&end);
Bind(&check_symbol_key);
GotoIfNot(HasHarmonySymbolAsWeakmapKeyFlag(), if_cannot_be_held_weakly);
@@ -1293,10 +1298,7 @@ void CollectionsBuiltinsAssembler::SameValueZeroString(
GotoIf(TaggedIsSmi(candidate_key), if_not_same);
GotoIfNot(IsString(CAST(candidate_key)), if_not_same);
- Branch(TaggedEqual(CallBuiltin(Builtin::kStringEqual, NoContextConstant(),
- key_string, candidate_key),
- TrueConstant()),
- if_same, if_not_same);
+ BranchIfStringEqual(key_string, CAST(candidate_key), if_same, if_not_same);
}
void CollectionsBuiltinsAssembler::SameValueZeroBigInt(
diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h
index 6dd2381ddd..723b680719 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.h
+++ b/deps/v8/src/builtins/builtins-collections-gen.h
@@ -51,12 +51,10 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Fast path for adding constructor entries. Assumes the entries are a fast
// JS array (see CodeStubAssembler::BranchIfFastJSArray()).
- void AddConstructorEntriesFromFastJSArray(Variant variant,
- TNode<Context> context,
- TNode<Context> native_context,
- TNode<Object> collection,
- TNode<JSArray> fast_jsarray,
- Label* if_may_have_side_effects);
+ void AddConstructorEntriesFromFastJSArray(
+ Variant variant, TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> collection, TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects, TVariable<IntPtrT>& var_current_index);
// Adds constructor entries to a collection using the iterator protocol.
void AddConstructorEntriesFromIterable(
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 5bca4342e2..8631b799e0 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -148,7 +148,7 @@ void ConsoleCall(
CHECK(!isolate->has_scheduled_exception());
if (!isolate->console_delegate()) return;
HandleScope scope(isolate);
- debug::ConsoleCallArguments wrapper(args);
+ debug::ConsoleCallArguments wrapper(isolate, args);
Handle<Object> context_id_obj = JSObject::GetDataProperty(
isolate, args.target(), isolate->factory()->console_context_id_symbol());
int context_id =
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index cb2b79bef7..7a47263c9d 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -252,8 +252,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
- TNode<CodeT> lazy_builtin =
- HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
+ TNode<Code> lazy_builtin = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
}
@@ -382,7 +381,7 @@ TNode<Context> ConstructorBuiltinsAssembler::FastNewFunctionContext(
[=](TNode<IntPtrT> offset) {
StoreObjectFieldNoWriteBarrier(function_context, offset, undefined);
},
- kTaggedSize, IndexAdvanceMode::kPost);
+ kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
return function_context;
}
@@ -664,7 +663,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
TNode<Object> field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field);
},
- kTaggedSize, IndexAdvanceMode::kPost);
+ kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
CopyMutableHeapNumbersInObject(copy, offset.value(), instance_size);
Goto(&done_init);
}
@@ -714,7 +713,7 @@ void ConstructorBuiltinsAssembler::CopyMutableHeapNumbersInObject(
}
BIND(&continue_loop);
},
- kTaggedSize, IndexAdvanceMode::kPost);
+ kTaggedSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 093b5e978a..d635a8cb13 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -21,6 +21,13 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
Return(ToNumber(context, input));
}
+TF_BUILTIN(ToBigInt, CodeStubAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto input = Parameter<Object>(Descriptor::kArgument);
+
+ Return(ToBigInt(context, input));
+}
+
TF_BUILTIN(ToNumber_Baseline, CodeStubAssembler) {
auto input = Parameter<Object>(Descriptor::kArgument);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
@@ -63,6 +70,13 @@ TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
Return(ToNumber(context, input, BigIntHandling::kConvertToNumber));
}
+TF_BUILTIN(ToBigIntConvertNumber, CodeStubAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto input = Parameter<Object>(Descriptor::kArgument);
+
+ Return(ToBigIntConvertNumber(context, input));
+}
+
// ES6 section 7.1.2 ToBoolean ( argument )
// Requires parameter on stack so that it can be used as a continuation from a
// LAZY deopt.
@@ -79,6 +93,27 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
Return(FalseConstant());
}
+// Requires parameter on stack so that it can be used as a continuation from a
+// LAZY deopt.
+TF_BUILTIN(MathRoundContinuation, CodeStubAssembler) {
+ auto value = Parameter<Number>(Descriptor::kArgument);
+ Return(ChangeFloat64ToTagged(Float64Round(ChangeNumberToFloat64(value))));
+}
+
+// Requires parameter on stack so that it can be used as a continuation from a
+// LAZY deopt.
+TF_BUILTIN(MathFloorContinuation, CodeStubAssembler) {
+ auto value = Parameter<Number>(Descriptor::kArgument);
+ Return(ChangeFloat64ToTagged(Float64Floor(ChangeNumberToFloat64(value))));
+}
+
+// Requires parameter on stack so that it can be used as a continuation from a
+// LAZY deopt.
+TF_BUILTIN(MathCeilContinuation, CodeStubAssembler) {
+ auto value = Parameter<Number>(Descriptor::kArgument);
+ Return(ChangeFloat64ToTagged(Float64Ceil(ChangeNumberToFloat64(value))));
+}
+
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
auto object = Parameter<Object>(Descriptor::kObject);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 8f3f789e0c..c6d8adecea 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -94,27 +94,45 @@ BUILTIN(DataViewConstructor) {
view_byte_length = byte_length->Number();
}
+ bool is_backed_by_rab =
+ array_buffer->is_resizable_by_js() && !array_buffer->is_shared();
+
// 12. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
// "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
// [[ByteLength]], [[ByteOffset]]»).
Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSDataView> data_view = Handle<JSDataView>::cast(result);
+ if (is_backed_by_rab || length_tracking) {
+ // Create a JSRabGsabDataView.
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedRabGsabDataViewMap(isolate, new_target));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::NewWithMap(isolate, initial_map,
+ Handle<AllocationSite>::null()));
+ } else {
+ // Create a JSDataView.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ }
+ Handle<JSDataViewOrRabGsabDataView> data_view =
+ Handle<JSDataViewOrRabGsabDataView>::cast(result);
{
- // Must fully initialize the JSDAtaView here so that it passes ObjectVerify,
- // which may for example be triggered when allocating error objects below.
+ // Must fully initialize the JSDataViewOrRabGsabDataView here so that it
+ // passes ObjectVerify, which may for example be triggered when allocating
+ // error objects below.
DisallowGarbageCollection no_gc;
- JSDataView raw = *data_view;
+ JSDataViewOrRabGsabDataView raw = *data_view;
+
for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
// TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
raw.SetEmbedderField(i, Smi::zero());
}
raw.set_bit_field(0);
- raw.set_is_backed_by_rab(array_buffer->is_resizable_by_js() &&
- !array_buffer->is_shared());
+ raw.set_is_backed_by_rab(is_backed_by_rab);
raw.set_is_length_tracking(length_tracking);
raw.set_byte_length(0);
raw.set_byte_offset(0);
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index f8ca669d1d..b3ad88c6d5 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -190,7 +190,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
// Check if the {receiver} is actually a JSReceiver.
Label receiver_is_invalid(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &receiver_is_invalid);
- GotoIfNot(IsJSReceiver(CAST(receiver)), &receiver_is_invalid);
+ GotoIfNot(JSAnyIsNotPrimitive(CAST(receiver)), &receiver_is_invalid);
// Dispatch to the appropriate OrdinaryToPrimitive builtin.
Label hint_is_number(this), hint_is_string(this),
@@ -207,18 +207,11 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
// Slow-case with actual string comparisons.
GotoIf(TaggedIsSmi(hint), &hint_is_invalid);
GotoIfNot(IsString(CAST(hint)), &hint_is_invalid);
- GotoIf(TaggedEqual(
- CallBuiltin(Builtin::kStringEqual, context, hint, number_string),
- TrueConstant()),
- &hint_is_number);
- GotoIf(TaggedEqual(
- CallBuiltin(Builtin::kStringEqual, context, hint, default_string),
- TrueConstant()),
- &hint_is_string);
- GotoIf(TaggedEqual(
- CallBuiltin(Builtin::kStringEqual, context, hint, string_string),
- TrueConstant()),
- &hint_is_string);
+
+ TNode<IntPtrT> hint_length = LoadStringLengthAsWord(CAST(hint));
+ GotoIfStringEqual(CAST(hint), hint_length, number_string, &hint_is_number);
+ GotoIfStringEqual(CAST(hint), hint_length, default_string, &hint_is_string);
+ GotoIfStringEqual(CAST(hint), hint_length, string_string, &hint_is_string);
Goto(&hint_is_invalid);
// Use the OrdinaryToPrimitive builtin to convert to a Number.
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index a07ce60d9f..c9cbccf4fe 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -75,7 +75,8 @@ Object SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
BUILTIN(DateConstructor) {
HandleScope scope(isolate);
if (args.new_target()->IsUndefined(isolate)) {
- double const time_val = JSDate::CurrentTimeValue(isolate);
+ double const time_val =
+ static_cast<double>(JSDate::CurrentTimeValue(isolate));
DateBuffer buffer = ToDateString(time_val, isolate->date_cache(),
ToDateStringMode::kLocalDateAndTime);
RETURN_RESULT_OR_FAILURE(
@@ -87,7 +88,7 @@ BUILTIN(DateConstructor) {
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
double time_val;
if (argc == 0) {
- time_val = JSDate::CurrentTimeValue(isolate);
+ time_val = static_cast<double>(JSDate::CurrentTimeValue(isolate));
} else if (argc == 1) {
Handle<Object> value = args.at(1);
if (value->IsJSDate()) {
@@ -163,7 +164,8 @@ BUILTIN(DateConstructor) {
// ES6 section 20.3.3.1 Date.now ( )
BUILTIN(DateNow) {
HandleScope scope(isolate);
- return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
+ return *isolate->factory()->NewNumberFromInt64(
+ JSDate::CurrentTimeValue(isolate));
}
// ES6 section 20.3.3.2 Date.parse ( string )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 859b5cee9a..3e746487bc 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -160,11 +160,12 @@ namespace internal {
\
/* String helpers */ \
TFC(StringFromCodePointAt, StringAtAsString) \
- TFC(StringEqual, Compare) \
- TFC(StringGreaterThan, Compare) \
- TFC(StringGreaterThanOrEqual, Compare) \
- TFC(StringLessThan, Compare) \
- TFC(StringLessThanOrEqual, Compare) \
+ TFC(StringEqual, StringEqual) \
+ TFC(StringGreaterThan, CompareNoContext) \
+ TFC(StringGreaterThanOrEqual, CompareNoContext) \
+ TFC(StringLessThan, CompareNoContext) \
+ TFC(StringLessThanOrEqual, CompareNoContext) \
+ TFC(StringCompare, CompareNoContext) \
TFC(StringSubstring, StringSubstring) \
\
/* OrderedHashTable helpers */ \
@@ -198,7 +199,6 @@ namespace internal {
\
/* Maglev Compiler */ \
ASM(MaglevOnStackReplacement, OnStackReplacement) \
- ASM(MaglevOutOfLinePrologue, NoContext) \
\
/* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \
@@ -257,10 +257,12 @@ namespace internal {
\
/* Type conversions */ \
TFC(ToNumber, TypeConversion) \
+ TFC(ToBigInt, TypeConversion) \
TFC(ToNumber_Baseline, TypeConversion_Baseline) \
TFC(ToNumeric_Baseline, TypeConversion_Baseline) \
TFC(PlainPrimitiveToNumber, TypeConversionNoContext) \
TFC(ToNumberConvertBigInt, TypeConversion) \
+ TFC(ToBigIntConvertNumber, TypeConversion) \
TFC(Typeof, Typeof) \
TFC(BigIntToI64, BigIntToI64) \
TFC(BigIntToI32Pair, BigIntToI32Pair) \
@@ -269,6 +271,9 @@ namespace internal {
\
/* Type conversions continuations */ \
TFC(ToBooleanLazyDeoptContinuation, SingleParameterOnStack) \
+ TFC(MathCeilContinuation, SingleParameterOnStack) \
+ TFC(MathFloorContinuation, SingleParameterOnStack) \
+ TFC(MathRoundContinuation, SingleParameterOnStack) \
\
/* Handlers */ \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
@@ -397,6 +402,7 @@ namespace internal {
CPP(ArrayShift) \
/* ES6 #sec-array.prototype.unshift */ \
CPP(ArrayUnshift) \
+ CPP(ArrayFromAsync) \
/* Support for Array.from and other array-copying idioms */ \
TFS(CloneFastJSArray, kSource) \
TFS(CloneFastJSArrayFillingHoles, kSource) \
@@ -425,8 +431,9 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
/* https://tc39.es/proposal-resizablearraybuffer/ */ \
CPP(ArrayBufferPrototypeResize) \
- /* proposal-resizablearraybuffer/#sec-arraybuffer.prototype.transfer */ \
+ /* https://tc39.es/proposal-arraybuffer-transfer/ */ \
CPP(ArrayBufferPrototypeTransfer) \
+ CPP(ArrayBufferPrototypeTransferToFixedLength) \
\
/* AsyncFunction */ \
TFS(AsyncFunctionEnter, kClosure, kReceiver) \
@@ -623,10 +630,6 @@ namespace internal {
CPP(JsonRawJson) \
CPP(JsonIsRawJson) \
\
- /* Web snapshots */ \
- CPP(WebSnapshotSerialize) \
- CPP(WebSnapshotDeserialize) \
- \
/* ICs */ \
TFH(LoadIC, LoadWithVector) \
TFH(LoadIC_Megamorphic, LoadWithVector) \
@@ -638,9 +641,11 @@ namespace internal {
TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \
TFH(KeyedLoadIC, KeyedLoadWithVector) \
TFH(KeyedLoadIC_Megamorphic, KeyedLoadWithVector) \
+ TFH(KeyedLoadIC_MegamorphicStringKey, KeyedLoadWithVector) \
TFH(KeyedLoadICTrampoline, KeyedLoad) \
TFH(KeyedLoadICBaseline, KeyedLoadBaseline) \
TFH(KeyedLoadICTrampoline_Megamorphic, KeyedLoad) \
+ TFH(KeyedLoadICTrampoline_MegamorphicStringKey, KeyedLoad) \
TFH(StoreGlobalIC, StoreGlobalWithVector) \
TFH(StoreGlobalICTrampoline, StoreGlobal) \
TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \
@@ -653,9 +658,9 @@ namespace internal {
TFH(KeyedStoreIC, StoreWithVector) \
TFH(KeyedStoreICTrampoline, Store) \
TFH(KeyedStoreICBaseline, StoreBaseline) \
- TFH(DefineKeyedOwnIC, StoreWithVector) \
- TFH(DefineKeyedOwnICTrampoline, Store) \
- TFH(DefineKeyedOwnICBaseline, StoreBaseline) \
+ TFH(DefineKeyedOwnIC, DefineKeyedOwnWithVector) \
+ TFH(DefineKeyedOwnICTrampoline, DefineKeyedOwn) \
+ TFH(DefineKeyedOwnICBaseline, DefineKeyedOwnBaseline) \
TFH(StoreInArrayLiteralIC, StoreWithVector) \
TFH(StoreInArrayLiteralICBaseline, StoreBaseline) \
TFH(LookupContextTrampoline, LookupTrampoline) \
@@ -668,8 +673,10 @@ namespace internal {
TFH(LoadGlobalICBaseline, LoadGlobalBaseline) \
TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
TFH(LoadGlobalICInsideTypeofBaseline, LoadGlobalBaseline) \
+ TFH(LookupGlobalIC, LookupWithVector) \
TFH(LookupGlobalICTrampoline, LookupTrampoline) \
TFH(LookupGlobalICBaseline, LookupBaseline) \
+ TFH(LookupGlobalICInsideTypeof, LookupWithVector) \
TFH(LookupGlobalICInsideTypeofTrampoline, LookupTrampoline) \
TFH(LookupGlobalICInsideTypeofBaseline, LookupBaseline) \
TFH(CloneObjectIC, CloneObjectWithVector) \
@@ -718,8 +725,8 @@ namespace internal {
CPP(NumberPrototypeToFixed) \
CPP(NumberPrototypeToLocaleString) \
CPP(NumberPrototypeToPrecision) \
- TFC(SameValue, Compare) \
- TFC(SameValueNumbersOnly, Compare) \
+ TFC(SameValue, CompareNoContext) \
+ TFC(SameValueNumbersOnly, CompareNoContext) \
\
/* Binary ops with feedback collection */ \
TFC(Add_Baseline, BinaryOp_Baseline) \
@@ -834,7 +841,6 @@ namespace internal {
ASM(ReflectApply, JSTrampoline) \
ASM(ReflectConstruct, JSTrampoline) \
CPP(ReflectDefineProperty) \
- CPP(ReflectGetOwnPropertyDescriptor) \
CPP(ReflectOwnKeys) \
CPP(ReflectSet) \
\
@@ -895,7 +901,8 @@ namespace internal {
kSpecifier, kExportName) \
TFJ(ShadowRealmImportValueFulfilled, kJSArgcReceiverSlots + 1, kReceiver, \
kExports) \
- TFJ(ShadowRealmImportValueRejected, kDontAdaptArgumentsSentinel) \
+ TFJ(ShadowRealmImportValueRejected, kJSArgcReceiverSlots + 1, kReceiver, \
+ kException) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
@@ -1071,17 +1078,12 @@ namespace internal {
TFJ(AsyncIteratorValueUnwrap, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* CEntry */ \
- ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
- ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit, \
- CEntry1ArgvOnStack) \
- ASM(CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
- ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
- ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \
- ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
- ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \
- ASM(CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
- ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
- ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \
+ ASM(CEntry_Return1_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
+ ASM(CEntry_Return1_ArgvOnStack_BuiltinExit, CEntry1ArgvOnStack) \
+ ASM(CEntry_Return1_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
+ ASM(CEntry_Return2_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
+ ASM(CEntry_Return2_ArgvOnStack_BuiltinExit, CEntryDummy) \
+ ASM(CEntry_Return2_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(DirectCEntry, CEntryDummy) \
\
/* String helpers */ \
@@ -1094,10 +1096,12 @@ namespace internal {
TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \
TFS(SetProperty, kReceiver, kKey, kValue) \
TFS(CreateDataProperty, kReceiver, kKey, kValue) \
+ TFS(GetOwnPropertyDescriptor, kReceiver, kKey) \
ASM(MemCopyUint8Uint8, CCall) \
ASM(MemMove, CCall) \
TFC(FindNonDefaultConstructorOrConstruct, \
FindNonDefaultConstructorOrConstruct) \
+ TFS(OrdinaryGetOwnPropertyDescriptor, kReceiver, kKey) \
\
/* Trace */ \
CPP(IsTraceCategoryEnabled) \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 12f7f58ec5..dd05ed4aa6 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -5,10 +5,9 @@
#ifndef V8_BUILTINS_BUILTINS_DESCRIPTORS_H_
#define V8_BUILTINS_BUILTINS_DESCRIPTORS_H_
-#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-definitions.h"
#include "src/codegen/interface-descriptors.h"
-#include "src/compiler/code-assembler.h"
-#include "src/objects/shared-function-info.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -17,7 +16,7 @@ namespace internal {
#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
struct Builtin_##Name##_InterfaceDescriptor { \
enum ParameterIndices { \
- kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ kJSTarget = kJSCallClosureParameterIndex, \
##__VA_ARGS__, \
kJSNewTarget, \
kJSActualArgumentsCount, \
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index adb180fba8..01e016252c 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -52,7 +52,7 @@ BUILTIN(ErrorCaptureStackTrace) {
// Explicitly check for frozen objects. Other access checks are performed by
// the LookupIterator in SetAccessor below.
- if (!JSObject::IsExtensible(object)) {
+ if (!JSObject::IsExtensible(isolate, object)) {
return isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kDefineDisallowed, name));
}
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 09e7f2e4ec..751003076d 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -243,7 +243,7 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
TimesSystemPointerSize(reg_index));
UnsafeStoreFixedArrayElement(parameters_and_registers, index, value);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
// Iterate over register file and write values into array.
// The mapping of register to array index must match that used in
@@ -262,7 +262,7 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
TimesSystemPointerSize(reg_index));
UnsafeStoreFixedArrayElement(parameters_and_registers, index, value);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
// The return value is unused, defaulting to undefined.
Return(UndefinedConstant());
@@ -304,7 +304,7 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
StaleRegisterConstant(),
SKIP_WRITE_BARRIER);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
Return(LoadJSGeneratorObjectInputOrDebugPos(generator));
}
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index dbcf2fa9fe..b9af2d84af 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -58,6 +58,11 @@ void Builtins::Generate_KeyedLoadIC_Megamorphic(
AccessorAssembler assembler(state);
assembler.GenerateKeyedLoadIC_Megamorphic();
}
+void Builtins::Generate_KeyedLoadIC_MegamorphicStringKey(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC_MegamorphicStringKey();
+}
void Builtins::Generate_KeyedLoadIC_PolymorphicName(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
@@ -78,6 +83,11 @@ void Builtins::Generate_KeyedLoadICTrampoline_Megamorphic(
AccessorAssembler assembler(state);
assembler.GenerateKeyedLoadICTrampoline_Megamorphic();
}
+void Builtins::Generate_KeyedLoadICTrampoline_MegamorphicStringKey(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICTrampoline_MegamorphicStringKey();
+}
void Builtins::Generate_LoadGlobalIC_NoFeedback(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
@@ -230,6 +240,11 @@ void Builtins::Generate_LoadGlobalICInsideTypeofBaseline(
assembler.GenerateLoadGlobalICBaseline(TypeofMode::kInside);
}
+void Builtins::Generate_LookupGlobalIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupGlobalIC(TypeofMode::kNotInside);
+}
+
void Builtins::Generate_LookupGlobalICTrampoline(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
@@ -242,6 +257,12 @@ void Builtins::Generate_LookupGlobalICBaseline(
assembler.GenerateLookupGlobalICBaseline(TypeofMode::kNotInside);
}
+void Builtins::Generate_LookupGlobalICInsideTypeof(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupGlobalIC(TypeofMode::kInside);
+}
+
void Builtins::Generate_LookupGlobalICInsideTypeofTrampoline(
compiler::CodeAssemblerState* state) {
AccessorAssembler assembler(state);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 907b41e8da..bbe9a22e5b 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -104,7 +104,7 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
BIND(&tailcall_to_shared);
// Tail call into code object on the SharedFunctionInfo.
- TNode<CodeT> code = GetSharedFunctionInfoCode(shared);
+ TNode<Code> code = GetSharedFunctionInfoCode(shared);
TailCallJSCode(code, context, function, new_target, arg_count);
}
@@ -126,6 +126,21 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
Int32Constant(0));
}
+ TNode<BoolT> IsSharedSpaceIsolate() {
+ TNode<ExternalReference> is_shared_space_isolate_addr = ExternalConstant(
+ ExternalReference::is_shared_space_isolate_flag_address(
+ this->isolate()));
+ return Word32NotEqual(Load<Uint8T>(is_shared_space_isolate_addr),
+ Int32Constant(0));
+ }
+
+ TNode<BoolT> UsesSharedHeap() {
+ TNode<ExternalReference> uses_shared_heap_addr = ExternalConstant(
+ ExternalReference::uses_shared_heap_flag_address(this->isolate()));
+ return Word32NotEqual(Load<Uint8T>(uses_shared_heap_addr),
+ Int32Constant(0));
+ }
+
TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) {
TNode<IntPtrT> page = PageFromAddress(object);
TNode<IntPtrT> flags = UncheckedCast<IntPtrT>(
@@ -135,14 +150,12 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
IntPtrConstant(0));
}
- TNode<BoolT> IsWhite(TNode<IntPtrT> object) {
- DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0);
+ TNode<BoolT> IsUnmarked(TNode<IntPtrT> object) {
TNode<IntPtrT> cell;
TNode<IntPtrT> mask;
GetMarkBit(object, &cell, &mask);
TNode<Int32T> mask32 = TruncateIntPtrToInt32(mask);
- // Non-white has 1 for the first bit, so we only need to check for the first
- // bit.
+ // Marked only requires checking a single bit here.
return Word32Equal(Word32And(Load<Int32T>(cell), mask32), Int32Constant(0));
}
@@ -314,8 +327,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
fp_mode);
BIND(&incremental_barrier);
- TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
- IncrementalWriteBarrier(slot, value, fp_mode);
+ IncrementalWriteBarrier(slot, fp_mode);
Goto(next);
}
@@ -367,12 +379,12 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
void IncrementalWriteBarrierMinor(TNode<IntPtrT> slot, TNode<IntPtrT> value,
SaveFPRegsMode fp_mode, Label* next) {
- Label check_is_white(this);
+ Label check_is_unmarked(this);
- InYoungGeneration(value, &check_is_white, next);
+ InYoungGeneration(value, &check_is_unmarked, next);
- BIND(&check_is_white);
- GotoIfNot(IsWhite(value), next);
+ BIND(&check_is_unmarked);
+ GotoIfNot(IsUnmarked(value), next);
{
TNode<ExternalReference> function = ExternalConstant(
@@ -389,51 +401,123 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
void IncrementalWriteBarrierMajor(TNode<IntPtrT> slot, TNode<IntPtrT> value,
SaveFPRegsMode fp_mode, Label* next) {
- Label call_incremental_wb(this);
+ Label marking_cpp_slow_path(this);
+
+ IsValueUnmarkedOrRecordSlot(value, &marking_cpp_slow_path, next);
+
+ BIND(&marking_cpp_slow_path);
+ {
+ TNode<ExternalReference> function = ExternalConstant(
+ ExternalReference::write_barrier_marking_from_code_function());
+ TNode<IntPtrT> object = BitcastTaggedToWord(
+ UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
+ CallCFunctionWithCallerSavedRegisters(
+ function, MachineTypeOf<Int32T>::value, fp_mode,
+ std::make_pair(MachineTypeOf<IntPtrT>::value, object),
+ std::make_pair(MachineTypeOf<IntPtrT>::value, slot));
+ Goto(next);
+ }
+ }
+
+ void IsValueUnmarkedOrRecordSlot(TNode<IntPtrT> value, Label* true_label,
+ Label* false_label) {
+ // This code implements the following condition:
+ // IsUnmarked(value) ||
+ // OnEvacuationCandidate(value) &&
+ // !SkipEvacuationCandidateRecording(value)
- // There are two cases we need to call incremental write barrier.
- // 1) value_is_white
- GotoIf(IsWhite(value), &call_incremental_wb);
+ // 1) IsUnmarked(value) || ....
+ GotoIf(IsUnmarked(value), true_label);
- // 2) is_compacting && value_in_EC && obj_isnt_skip
- // is_compacting = true when is_marking = true
+ // 2) OnEvacuationCandidate(value) &&
+ // !SkipEvacuationCandidateRecording(value)
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
- next);
+ false_label);
{
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
Branch(
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
- next, &call_incremental_wb);
+ false_label, true_label);
}
- BIND(&call_incremental_wb);
+ }
+
+ void IncrementalWriteBarrier(TNode<IntPtrT> slot, SaveFPRegsMode fp_mode) {
+ Label next(this), write_into_shared_object(this),
+ write_into_local_object(this), local_object_and_value(this);
+
+ TNode<IntPtrT> object = BitcastTaggedToWord(
+ UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
+ TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
+
+ // Without a shared heap, all objects are local. This is the fast path
+ // always used when no shared heap exists.
+ GotoIfNot(UsesSharedHeap(), &local_object_and_value);
+
+ // From the point-of-view of the shared space isolate (= the main isolate)
+ // shared heap objects are just local objects.
+ GotoIf(IsSharedSpaceIsolate(), &local_object_and_value);
+
+ // These checks here are now only reached by client isolates (= worker
+ // isolates). Now first check whether incremental marking is activated for
+ // that particular object's space. Incrementally marking might only be
+ // enabled for either local or shared objects on client isolates.
+ GotoIfNot(IsPageFlagSet(object, MemoryChunk::kIncrementalMarking), &next);
+
+ // We now know that incremental marking is enabled for the given object.
+ // Decide whether to run the shared or local incremental marking barrier.
+ InSharedHeap(object, &write_into_shared_object, &write_into_local_object);
+
+ BIND(&write_into_shared_object);
+
+ // Run the shared incremental marking barrier.
+ IncrementalWriteBarrierShared(object, slot, value, fp_mode, &next);
+
+ BIND(&write_into_local_object);
+
+ // When writing into a local object we can ignore stores of shared object
+ // values since for those no slot recording or marking is required.
+ InSharedHeap(value, &next, &local_object_and_value);
+
+ // Both object and value are now guaranteed to be local objects, run the
+ // local incremental marking barrier.
+ BIND(&local_object_and_value);
+ IncrementalWriteBarrierLocal(slot, value, fp_mode, &next);
+
+ BIND(&next);
+ }
+
+ void IncrementalWriteBarrierShared(TNode<IntPtrT> object, TNode<IntPtrT> slot,
+ TNode<IntPtrT> value,
+ SaveFPRegsMode fp_mode, Label* next) {
+ Label shared_marking_cpp_slow_path(this);
+
+ IsValueUnmarkedOrRecordSlot(value, &shared_marking_cpp_slow_path, next);
+
+ BIND(&shared_marking_cpp_slow_path);
{
TNode<ExternalReference> function = ExternalConstant(
- ExternalReference::write_barrier_marking_from_code_function());
- TNode<IntPtrT> object = BitcastTaggedToWord(
- UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
+ ExternalReference::write_barrier_shared_marking_from_code_function());
CallCFunctionWithCallerSavedRegisters(
function, MachineTypeOf<Int32T>::value, fp_mode,
std::make_pair(MachineTypeOf<IntPtrT>::value, object),
std::make_pair(MachineTypeOf<IntPtrT>::value, slot));
+
Goto(next);
}
}
- void IncrementalWriteBarrier(TNode<IntPtrT> slot, TNode<IntPtrT> value,
- SaveFPRegsMode fp_mode) {
- Label call_incremental_wb(this), is_minor(this), is_major(this), next(this);
-
+ void IncrementalWriteBarrierLocal(TNode<IntPtrT> slot, TNode<IntPtrT> value,
+ SaveFPRegsMode fp_mode, Label* next) {
+ Label is_minor(this), is_major(this);
Branch(IsMinorMarking(), &is_minor, &is_major);
BIND(&is_minor);
- IncrementalWriteBarrierMinor(slot, value, fp_mode, &next);
+ IncrementalWriteBarrierMinor(slot, value, fp_mode, next);
BIND(&is_major);
- IncrementalWriteBarrierMajor(slot, value, fp_mode, &next);
-
- BIND(&next);
+ IncrementalWriteBarrierMajor(slot, value, fp_mode, next);
}
void GenerateRecordWrite(SaveFPRegsMode fp_mode) {
@@ -908,7 +992,7 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
BranchIfSameValue(key, property, &skip, &continue_label);
Bind(&continue_label);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
}
CallBuiltin(Builtin::kCreateDataProperty, context, target, key,
@@ -1144,9 +1228,8 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
- TNode<CodeT> code =
- HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame));
+ TNode<Code> code = HeapConstant(
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame));
// Unconditionally push argc, target and new target as extra stack arguments.
// They will be used by stack frame iterators when constructing stack trace.
@@ -1218,56 +1301,34 @@ TF_BUILTIN(AbortCSADcheck, CodeStubAssembler) {
TailCallRuntime(Runtime::kAbortCSADcheck, NoContextConstant(), message);
}
-void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
+void Builtins::Generate_CEntry_Return1_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
+ Generate_CEntry(masm, 1, ArgvMode::kStack, false);
}
-void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
+void Builtins::Generate_CEntry_Return1_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
+ Generate_CEntry(masm, 1, ArgvMode::kStack, true);
}
-void Builtins::
- Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
- MacroAssembler* masm) {
- Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
-}
-
-void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
+void Builtins::Generate_CEntry_Return1_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
+ Generate_CEntry(masm, 1, ArgvMode::kRegister, false);
}
-void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit(
+void Builtins::Generate_CEntry_Return2_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
+ Generate_CEntry(masm, 2, ArgvMode::kStack, false);
}
-void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
+void Builtins::Generate_CEntry_Return2_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
+ Generate_CEntry(masm, 2, ArgvMode::kStack, true);
}
-void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
+void Builtins::Generate_CEntry_Return2_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
-}
-
-void Builtins::
- Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
- MacroAssembler* masm) {
- Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
-}
-
-void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
- MacroAssembler* masm) {
- Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
-}
-
-void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
- MacroAssembler* masm) {
- Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
+ Generate_CEntry(masm, 2, ArgvMode::kRegister, false);
}
#if !defined(V8_TARGET_ARCH_ARM)
@@ -1310,12 +1371,6 @@ void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) {
static_assert(D::kParameterCount == 1);
masm->Trap();
}
-void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
- using D =
- i::CallInterfaceDescriptorFor<Builtin::kMaglevOutOfLinePrologue>::type;
- static_assert(D::kParameterCount == 0);
- masm->Trap();
-}
#endif // V8_TARGET_ARCH_X64
// ES6 [[Get]] operation.
@@ -1505,7 +1560,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
// On failure, tail call back to regular JavaScript by re-calling the given
// function which has been reset to the compile lazy builtin.
- TNode<CodeT> code = LoadJSFunctionCode(function);
+ TNode<Code> code = LoadJSFunctionCode(function);
TailCallJSCode(code, context, function, new_target, arg_count);
}
@@ -1537,5 +1592,27 @@ TF_BUILTIN(FindNonDefaultConstructorOrConstruct, CodeStubAssembler) {
}
}
+// Dispatcher for different implementations of the [[GetOwnProperty]] internal
+// method, returning a PropertyDescriptorObject (a Struct representation of the
+// spec PropertyDescriptor concept)
+TF_BUILTIN(GetOwnPropertyDescriptor, CodeStubAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<JSReceiver>(Descriptor::kReceiver);
+ auto key = Parameter<Name>(Descriptor::kKey);
+
+ Label call_runtime(this);
+
+ TNode<Map> map = LoadMap(receiver);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
+ TailCallBuiltin(Builtin::kOrdinaryGetOwnPropertyDescriptor, context, receiver,
+ key);
+
+ BIND(&call_runtime);
+ TailCallRuntime(Runtime::kGetOwnPropertyDescriptorObject, context, receiver,
+ key);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 26be109106..8902662407 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -113,10 +113,6 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl(
ToLowerCaseKind kind, std::function<void(TNode<Object>)> ReturnFct) {
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
- // Early exit on empty strings.
- const TNode<Uint32T> length = LoadStringLengthAsWord32(string);
- GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string);
-
// Unpack strings if possible, and bail to runtime unless we get a one-byte
// flat string.
ToDirectStringAssembler to_direct(
@@ -153,6 +149,10 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl(
Bind(&fast);
}
+ // Early exit on empty string.
+ const TNode<Uint32T> length = LoadStringLengthAsWord32(string);
+ GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string);
+
const TNode<Int32T> instance_type = to_direct.instance_type();
CSA_DCHECK(this,
Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
@@ -196,7 +196,7 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl(
Increment(&var_cursor);
},
- kCharSize, IndexAdvanceMode::kPost);
+ kCharSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 26b6af670c..06564c6c60 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -46,7 +46,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(TNode<Context> context,
Label get_next(this), if_notobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iterator), &if_notobject);
- Branch(IsJSReceiver(CAST(iterator)), &get_next, &if_notobject);
+ Branch(JSAnyIsNotPrimitive(CAST(iterator)), &get_next, &if_notobject);
BIND(&if_notobject);
CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
@@ -91,7 +91,7 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
// Generic iterator result case:
{
// 3. If Type(result) is not Object, throw a TypeError exception.
- GotoIfNot(IsJSReceiverMap(result_map), &if_notobject);
+ GotoIfNot(JSAnyIsNotPrimitiveMap(result_map), &if_notobject);
// IteratorComplete
// 2. Return ToBoolean(? Get(iterResult, "done")).
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 1c283a353e..0428372b99 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
- TNode<CodeT> code, TNode<JSFunction> function) {
+ TNode<Code> code, TNode<JSFunction> function) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
@@ -25,7 +25,7 @@ void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, TNode<JSFunction> function) {
auto context = Parameter<Context>(Descriptor::kContext);
- TNode<CodeT> code = CAST(CallRuntime(function_id, context, function));
+ TNode<Code> code = CAST(CallRuntime(function_id, context, function));
GenerateTailCallToJSCode(code, function);
}
@@ -63,8 +63,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
- // Optimized code slot is a weak reference to CodeT object.
- TNode<CodeT> optimized_code = CAST(GetHeapObjectAssumeWeak(
+ // Optimized code slot is a weak reference to Code object.
+ TNode<Code> optimized_code = CAST(GetHeapObjectAssumeWeak(
maybe_optimized_code_entry, &heal_optimized_code_slot));
// Check if the optimized code is marked for deopt. If it is, call the
@@ -100,7 +100,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
TVARIABLE(Uint16T, sfi_data_type);
- TNode<CodeT> sfi_code =
+ TNode<Code> sfi_code =
GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function);
TNode<HeapObject> feedback_cell_value = LoadFeedbackCellValue(function);
@@ -124,22 +124,23 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
Goto(&maybe_use_sfi_code);
- // At this point we have a candidate Code object. It's *not* a cached
- // optimized Code object (we'd have tail-called it above). A usual case would
- // be the InterpreterEntryTrampoline to start executing existing bytecode.
+ // At this point we have a candidate InstructionStream object. It's *not* a
+ // cached optimized InstructionStream object (we'd have tail-called it above).
+ // A usual case would be the InterpreterEntryTrampoline to start executing
+ // existing bytecode.
BIND(&maybe_use_sfi_code);
Label tailcall_code(this), baseline(this);
- TVARIABLE(CodeT, code);
+ TVARIABLE(Code, code);
// Check if we have baseline code.
- GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
+ GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODE_TYPE), &baseline);
code = sfi_code;
Goto(&tailcall_code);
BIND(&baseline);
// Ensure we have a feedback vector.
- code = Select<CodeT>(
+ code = Select<Code>(
IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
[=]() {
return CAST(CallRuntime(Runtime::kInstallBaselineCode,
@@ -164,7 +165,7 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
auto function = Parameter<JSFunction>(Descriptor::kTarget);
- TNode<CodeT> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
+ TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
// Set the code slot inside the JSFunction to CompileLazy.
StoreObjectField(function, JSFunction::kCodeOffset, code);
GenerateTailCallToJSCode(code, function);
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
index b7dcbb71d9..c8760ec77e 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.h
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -17,7 +17,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- void GenerateTailCallToJSCode(TNode<CodeT> code, TNode<JSFunction> function);
+ void GenerateTailCallToJSCode(TNode<Code> code, TNode<JSFunction> function);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index ed9f72cae1..f0d21bba56 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-object-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/codegen/code-stub-assembler.h"
#include "src/common/globals.h"
#include "src/heap/factory-inl.h"
#include "src/ic/accessor-assembler.h"
@@ -18,43 +19,6 @@
namespace v8 {
namespace internal {
-// -----------------------------------------------------------------------------
-// ES6 section 19.1 Object Objects
-
-class ObjectBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void ReturnToStringFormat(TNode<Context> context, TNode<String> string);
-
- // TODO(v8:11167) remove |context| and |object| once OrderedNameDictionary
- // supported.
- void AddToDictionaryIf(TNode<BoolT> condition, TNode<Context> context,
- TNode<Object> object,
- TNode<HeapObject> name_dictionary, Handle<Name> name,
- TNode<Object> value, Label* bailout);
- TNode<JSObject> FromPropertyDescriptor(TNode<Context> context,
- TNode<PropertyDescriptorObject> desc);
- TNode<JSObject> FromPropertyDetails(TNode<Context> context,
- TNode<Object> raw_value,
- TNode<Word32T> details,
- Label* if_bailout);
- TNode<JSObject> ConstructAccessorDescriptor(TNode<Context> context,
- TNode<Object> getter,
- TNode<Object> setter,
- TNode<BoolT> enumerable,
- TNode<BoolT> configurable);
- TNode<JSObject> ConstructDataDescriptor(TNode<Context> context,
- TNode<Object> value,
- TNode<BoolT> writable,
- TNode<BoolT> enumerable,
- TNode<BoolT> configurable);
- TNode<HeapObject> GetAccessorOrUndefined(TNode<HeapObject> accessor,
- Label* if_bailout);
-};
-
class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
public:
explicit ObjectEntriesValuesBuiltinsAssembler(
@@ -738,7 +702,7 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
// If {value} is a primitive HeapObject, we need to return
// false instead of throwing an exception per order of the
// steps in the specification, so check that first here.
- GotoIfNot(IsJSReceiver(value_heap_object), &if_valueisnotreceiver);
+ GotoIfNot(JSAnyIsNotPrimitive(value_heap_object), &if_valueisnotreceiver);
// Simulate the ToObject invocation on {receiver}.
ToObject(context, receiver);
@@ -751,47 +715,43 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
+ TVARIABLE(String, var_default);
+ TVARIABLE(HeapObject, var_holder);
+ TVARIABLE(Map, var_holder_map);
+
Label checkstringtag(this), if_arguments(this), if_array(this),
if_boolean(this), if_date(this), if_error(this), if_function(this),
if_number(this, Label::kDeferred), if_object(this), if_primitive(this),
- if_proxy(this, Label::kDeferred), if_regexp(this), if_string(this),
- if_symbol(this, Label::kDeferred), if_value(this),
- if_bigint(this, Label::kDeferred), if_wasm(this);
+ if_proxy(this, {&var_holder, &var_holder_map}, Label::kDeferred),
+ if_regexp(this), if_string(this), if_symbol(this, Label::kDeferred),
+ if_value(this), if_bigint(this, Label::kDeferred);
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto context = Parameter<Context>(Descriptor::kContext);
- TVARIABLE(String, var_default);
- TVARIABLE(HeapObject, var_holder);
-
// This is arranged to check the likely cases first.
GotoIf(TaggedIsSmi(receiver), &if_number);
TNode<HeapObject> receiver_heap_object = CAST(receiver);
TNode<Map> receiver_map = LoadMap(receiver_heap_object);
var_holder = receiver_heap_object;
+ var_holder_map = receiver_map;
TNode<Uint16T> receiver_instance_type = LoadMapInstanceType(receiver_map);
GotoIf(IsPrimitiveInstanceType(receiver_instance_type), &if_primitive);
GotoIf(IsFunctionInstanceType(receiver_instance_type), &if_function);
const struct {
InstanceType value;
Label* label;
- } kJumpTable[] = {
- {JS_OBJECT_TYPE, &if_object},
- {JS_ARRAY_TYPE, &if_array},
- {JS_REG_EXP_TYPE, &if_regexp},
- {JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
- {JS_DATE_TYPE, &if_date},
- {JS_API_OBJECT_TYPE, &if_object},
- {JS_SPECIAL_API_OBJECT_TYPE, &if_object},
- {JS_PROXY_TYPE, &if_proxy},
- {JS_ERROR_TYPE, &if_error},
- {JS_PRIMITIVE_WRAPPER_TYPE, &if_value},
-#if V8_ENABLE_WEBASSEMBLY
- {WASM_STRUCT_TYPE, &if_wasm},
- {WASM_ARRAY_TYPE, &if_wasm},
-#endif
- };
+ } kJumpTable[] = {{JS_OBJECT_TYPE, &if_object},
+ {JS_ARRAY_TYPE, &if_array},
+ {JS_REG_EXP_TYPE, &if_regexp},
+ {JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
+ {JS_DATE_TYPE, &if_date},
+ {JS_API_OBJECT_TYPE, &if_object},
+ {JS_SPECIAL_API_OBJECT_TYPE, &if_object},
+ {JS_PROXY_TYPE, &if_proxy},
+ {JS_ERROR_TYPE, &if_error},
+ {JS_PRIMITIVE_WRAPPER_TYPE, &if_value}};
size_t const kNumCases = arraysize(kJumpTable);
Label* case_labels[kNumCases];
int32_t case_values[kNumCases];
@@ -825,6 +785,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadObjectField<HeapObject>(boolean_initial_map, Map::kPrototypeOffset);
var_default = BooleanToStringConstant();
var_holder = boolean_prototype;
+ var_holder_map = LoadMap(boolean_prototype);
Goto(&checkstringtag);
}
@@ -857,6 +818,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadObjectField<HeapObject>(number_initial_map, Map::kPrototypeOffset);
var_default = NumberToStringConstant();
var_holder = number_prototype;
+ var_holder_map = LoadMap(number_prototype);
Goto(&checkstringtag);
}
@@ -884,54 +846,6 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
Return(UndefinedToStringConstant());
}
- BIND(&if_proxy);
- {
- // Check if the proxy has been revoked.
- Label throw_proxy_handler_revoked(this, Label::kDeferred);
- TNode<HeapObject> handler = CAST(LoadObjectField(
- TNode<JSProxy>::UncheckedCast(receiver), JSProxy::kHandlerOffset));
- CSA_DCHECK(this, IsNullOrJSReceiver(handler));
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
-
- // If {receiver} is a proxy for a JSArray, we default to "[object Array]",
- // otherwise we default to "[object Object]" or "[object Function]" here,
- // depending on whether the {receiver} is callable. The order matters here,
- // i.e. we need to execute the %ArrayIsArray check before the [[Get]] below,
- // as the exception is observable.
- TNode<Object> receiver_is_array =
- CallRuntime(Runtime::kArrayIsArray, context, receiver);
- TNode<String> builtin_tag = Select<String>(
- IsTrue(receiver_is_array), [=] { return ArrayStringConstant(); },
- [=] {
- return Select<String>(
- IsCallableMap(receiver_map),
- [=] { return FunctionStringConstant(); },
- [=] { return ObjectStringConstant(); });
- });
-
- // Lookup the @@toStringTag property on the {receiver}.
- TVARIABLE(Object, var_tag,
- GetProperty(context, receiver,
- isolate()->factory()->to_string_tag_symbol()));
- Label if_tagisnotstring(this), if_tagisstring(this);
- GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring);
- Branch(IsString(CAST(var_tag.value())), &if_tagisstring,
- &if_tagisnotstring);
- BIND(&if_tagisnotstring);
- {
- var_tag = builtin_tag;
- Goto(&if_tagisstring);
- }
- BIND(&if_tagisstring);
- ReturnToStringFormat(context, CAST(var_tag.value()));
-
- BIND(&throw_proxy_handler_revoked);
- {
- ThrowTypeError(context, MessageTemplate::kProxyRevoked,
- "Object.prototype.toString");
- }
- }
-
BIND(&if_regexp);
{
var_default = RegexpToStringConstant();
@@ -949,6 +863,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadObjectField<HeapObject>(string_initial_map, Map::kPrototypeOffset);
var_default = StringToStringConstant();
var_holder = string_prototype;
+ var_holder_map = LoadMap(string_prototype);
Goto(&checkstringtag);
}
@@ -963,6 +878,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadObjectField<HeapObject>(symbol_initial_map, Map::kPrototypeOffset);
var_default = ObjectToStringConstant();
var_holder = symbol_prototype;
+ var_holder_map = LoadMap(symbol_prototype);
Goto(&checkstringtag);
}
@@ -977,6 +893,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadObjectField<HeapObject>(bigint_initial_map, Map::kPrototypeOffset);
var_default = ObjectToStringConstant();
var_holder = bigint_prototype;
+ var_holder_map = LoadMap(bigint_prototype);
Goto(&checkstringtag);
}
@@ -992,7 +909,6 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadJSPrimitiveWrapperValue(CAST(receiver_heap_object));
// We need to start with the object to see if the value was a subclass
// which might have interesting properties.
- var_holder = receiver_heap_object;
GotoIf(TaggedIsSmi(receiver_value), &if_value_is_number);
TNode<Map> receiver_value_map = LoadMap(CAST(receiver_value));
GotoIf(IsHeapNumberMap(receiver_value_map), &if_value_is_number);
@@ -1041,27 +957,48 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
// Check if all relevant maps (including the prototype maps) don't
// have any interesting symbols (i.e. that none of them have the
// @@toStringTag property).
- Label loop(this, &var_holder), return_default(this),
+ Label loop(this, {&var_holder, &var_holder_map}), return_default(this),
return_generic(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
+ Label interesting_symbols(this);
TNode<HeapObject> holder = var_holder.value();
+ TNode<Map> holder_map = var_holder_map.value();
GotoIf(IsNull(holder), &return_default);
- TNode<Map> holder_map = LoadMap(holder);
TNode<Uint32T> holder_bit_field3 = LoadMapBitField3(holder_map);
GotoIf(IsSetWord32<Map::Bits3::MayHaveInterestingSymbolsBit>(
holder_bit_field3),
- &return_generic);
+ &interesting_symbols);
var_holder = LoadMapPrototype(holder_map);
+ var_holder_map = LoadMap(var_holder.value());
Goto(&loop);
+ BIND(&interesting_symbols);
+ {
+ // Check flags for dictionary objects.
+ GotoIf(IsClearWord32<Map::Bits3::IsDictionaryMapBit>(holder_bit_field3),
+ &return_generic);
+ GotoIf(
+ InstanceTypeEqual(LoadMapInstanceType(holder_map), JS_PROXY_TYPE),
+ &if_proxy);
+ TNode<Object> properties =
+ LoadObjectField(holder, JSObject::kPropertiesOrHashOffset);
+ CSA_DCHECK(this, TaggedIsNotSmi(properties));
+ // TODO(pthier): Support swiss dictionaries.
+ if constexpr (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CSA_DCHECK(this, IsNameDictionary(CAST(properties)));
+ TNode<Smi> flags =
+ GetNameDictionaryFlags<NameDictionary>(CAST(properties));
+ GotoIf(IsSetSmi(flags,
+ NameDictionary::MayHaveInterestingSymbolsBit::kMask),
+ &return_generic);
+ var_holder = LoadMapPrototype(holder_map);
+ var_holder_map = LoadMap(var_holder.value());
+ }
+ Goto(&loop);
+ }
}
-#if V8_ENABLE_WEBASSEMBLY
- BIND(&if_wasm);
- ThrowTypeError(context, MessageTemplate::kWasmObjectsAreOpaque);
-#endif
-
BIND(&return_generic);
{
TNode<Object> tag = GetProperty(context, ToObject(context, receiver),
@@ -1074,6 +1011,57 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
BIND(&return_default);
Return(var_default.value());
}
+
+ BIND(&if_proxy);
+ {
+ receiver_heap_object = var_holder.value();
+ receiver_map = var_holder_map.value();
+ // Check if the proxy has been revoked.
+ Label throw_proxy_handler_revoked(this, Label::kDeferred);
+ TNode<HeapObject> handler =
+ CAST(LoadObjectField(receiver_heap_object, JSProxy::kHandlerOffset));
+ CSA_DCHECK(this, IsNullOrJSReceiver(handler));
+ GotoIfNot(JSAnyIsNotPrimitive(handler), &throw_proxy_handler_revoked);
+
+ // If {receiver_heap_object} is a proxy for a JSArray, we default to
+ // "[object Array]", otherwise we default to "[object Object]" or "[object
+ // Function]" here, depending on whether the {receiver_heap_object} is
+ // callable. The order matters here, i.e. we need to execute the
+ // %ArrayIsArray check before the [[Get]] below, as the exception is
+ // observable.
+ TNode<Object> receiver_is_array =
+ CallRuntime(Runtime::kArrayIsArray, context, receiver_heap_object);
+ TNode<String> builtin_tag = Select<String>(
+ IsTrue(receiver_is_array), [=] { return ArrayStringConstant(); },
+ [=] {
+ return Select<String>(
+ IsCallableMap(receiver_map),
+ [=] { return FunctionStringConstant(); },
+ [=] { return ObjectStringConstant(); });
+ });
+
+ // Lookup the @@toStringTag property on the {receiver_heap_object}.
+ TVARIABLE(Object, var_tag,
+ GetProperty(context, receiver_heap_object,
+ isolate()->factory()->to_string_tag_symbol()));
+ Label if_tagisnotstring(this), if_tagisstring(this);
+ GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring);
+ Branch(IsString(CAST(var_tag.value())), &if_tagisstring,
+ &if_tagisnotstring);
+ BIND(&if_tagisnotstring);
+ {
+ var_tag = builtin_tag;
+ Goto(&if_tagisstring);
+ }
+ BIND(&if_tagisstring);
+ ReturnToStringFormat(context, CAST(var_tag.value()));
+
+ BIND(&throw_proxy_handler_revoked);
+ {
+ ThrowTypeError(context, MessageTemplate::kProxyRevoked,
+ "Object.prototype.toString");
+ }
+ }
}
// ES #sec-object.create
@@ -1131,7 +1119,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&null_proto);
{
map = LoadSlowObjectWithNullPrototypeMap(native_context);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
new_properties =
AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity);
} else {
@@ -1319,6 +1307,67 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
}
}
+TF_BUILTIN(OrdinaryGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto object = Parameter<JSReceiver>(Descriptor::kReceiver);
+ auto name = Parameter<Name>(Descriptor::kKey);
+ CSA_DCHECK(this, Word32BinaryNot(IsSpecialReceiverInstanceType(
+ LoadMapInstanceType(LoadMap(object)))));
+
+ Label if_notunique_name(this), if_iskeyunique(this), done(this),
+ if_keyisindex(this), call_runtime(this);
+
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ TVARIABLE(Name, var_name, name);
+ TVARIABLE(HeapObject, result, UndefinedConstant());
+
+ TryToName(name, &if_keyisindex, &var_index, &if_iskeyunique, &var_name,
+ &call_runtime, &if_notunique_name);
+
+ BIND(&if_notunique_name);
+ {
+ Label not_in_string_table(this);
+ // If the string was not found in the string table, then no regular
+ // object can have a property with that name, so return |undefined|.
+ TryInternalizeString(CAST(name), &if_keyisindex, &var_index,
+ &if_iskeyunique, &var_name, &done, &call_runtime);
+ }
+
+ BIND(&if_iskeyunique);
+ {
+ Label if_found_value(this), if_not_found(this);
+
+ TVARIABLE(Object, var_value);
+ TVARIABLE(Uint32T, var_details);
+ TVARIABLE(Object, var_raw_value);
+ TNode<Map> map = LoadMap(object);
+ TNode<Int32T> instance_type = LoadMapInstanceType(map);
+
+ TryGetOwnProperty(context, object, object, map, instance_type,
+ var_name.value(), &if_found_value, &var_value,
+ &var_details, &var_raw_value, &done, &call_runtime,
+ kReturnAccessorPair);
+
+ BIND(&if_found_value);
+
+ // 4. Return FromPropertyDetails(desc).
+ result = AllocatePropertyDescriptorObject(context);
+ InitializePropertyDescriptorObject(CAST(result.value()), var_value.value(),
+ var_details.value(), &call_runtime);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ Return(result.value());
+
+ BIND(&if_keyisindex);
+ Goto(&call_runtime);
+
+ BIND(&call_runtime);
+ TailCallRuntime(Runtime::kGetOwnPropertyDescriptorObject, context, object,
+ var_name.value());
+}
+
// ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P )
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
@@ -1336,81 +1385,13 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
key = CallBuiltin(Builtin::kToName, context, key);
// 3. Let desc be ? obj.[[GetOwnProperty]](key).
- Label if_keyisindex(this), if_iskeyunique(this),
- call_runtime(this, Label::kDeferred),
- return_undefined(this, Label::kDeferred), if_notunique_name(this);
+ TNode<Object> desc =
+ CallBuiltin(Builtin::kGetOwnPropertyDescriptor, context, object, key);
- TNode<Map> map = LoadMap(object);
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
- GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
- {
- TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
- TVARIABLE(Name, var_name);
-
- TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_name,
- &call_runtime, &if_notunique_name);
+ // 4. Return FromPropertyDescriptor(desc).
+ TNode<HeapObject> result = FromPropertyDescriptor(context, desc);
- BIND(&if_notunique_name);
- {
- Label not_in_string_table(this);
- TryInternalizeString(CAST(key), &if_keyisindex, &var_index,
- &if_iskeyunique, &var_name, &not_in_string_table,
- &call_runtime);
-
- BIND(&not_in_string_table);
- {
- // If the string was not found in the string table, then no regular
- // object can have a property with that name, so return |undefined|.
- Goto(&return_undefined);
- }
- }
-
- BIND(&if_iskeyunique);
- {
- Label if_found_value(this), return_empty(this), if_not_found(this);
-
- TVARIABLE(Object, var_value);
- TVARIABLE(Uint32T, var_details);
- TVARIABLE(Object, var_raw_value);
-
- TryGetOwnProperty(context, object, object, map, instance_type,
- var_name.value(), &if_found_value, &var_value,
- &var_details, &var_raw_value, &return_empty,
- &if_not_found, kReturnAccessorPair);
-
- BIND(&if_found_value);
- // 4. Return FromPropertyDetails(desc).
- TNode<JSObject> js_desc = FromPropertyDetails(
- context, var_value.value(), var_details.value(), &call_runtime);
- args.PopAndReturn(js_desc);
-
- BIND(&return_empty);
- var_value = UndefinedConstant();
- args.PopAndReturn(UndefinedConstant());
-
- BIND(&if_not_found);
- Goto(&call_runtime);
- }
- }
-
- BIND(&if_keyisindex);
- Goto(&call_runtime);
-
- BIND(&call_runtime);
- {
- TNode<Object> desc =
- CallRuntime(Runtime::kGetOwnPropertyDescriptor, context, object, key);
-
- GotoIf(IsUndefined(desc), &return_undefined);
-
- TNode<PropertyDescriptorObject> desc_object = CAST(desc);
-
- // 4. Return FromPropertyDescriptor(desc).
- TNode<JSObject> js_desc = FromPropertyDescriptor(context, desc_object);
- args.PopAndReturn(js_desc);
- }
- BIND(&return_undefined);
- args.PopAndReturn(UndefinedConstant());
+ args.PopAndReturn(result);
}
// TODO(v8:11167) remove remove |context| and |object| parameters once
@@ -1533,6 +1514,24 @@ TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
return js_descriptor.value();
}
+TNode<HeapObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
+ TNode<Context> context, TNode<Object> desc) {
+ CSA_DCHECK(this, TaggedIsNotSmi(desc));
+
+ if (IsUndefinedConstant(desc)) return UndefinedConstant();
+
+ Label done(this);
+ TVARIABLE(HeapObject, result, UndefinedConstant());
+ GotoIf(IsUndefined(desc), &done);
+
+ TNode<PropertyDescriptorObject> property_descriptor = CAST(desc);
+ result = FromPropertyDescriptor(context, property_descriptor);
+ Goto(&done);
+
+ BIND(&done);
+ return result.value();
+}
+
TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDetails(
TNode<Context> context, TNode<Object> raw_value, TNode<Word32T> details,
Label* if_bailout) {
diff --git a/deps/v8/src/builtins/builtins-object-gen.h b/deps/v8/src/builtins/builtins-object-gen.h
new file mode 100644
index 0000000000..4c7bbe12c4
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-object-gen.h
@@ -0,0 +1,58 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
+#define V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
+
+#include "src/codegen/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.1 Object Objects
+
+class ObjectBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<HeapObject> FromPropertyDescriptor(TNode<Context>, TNode<Object> desc);
+
+ protected:
+ void ReturnToStringFormat(TNode<Context> context, TNode<String> string);
+
+ // TODO(v8:11167) remove |context| and |object| once OrderedNameDictionary
+ // supported.
+ void AddToDictionaryIf(TNode<BoolT> condition, TNode<Context> context,
+ TNode<Object> object,
+ TNode<HeapObject> name_dictionary, Handle<Name> name,
+ TNode<Object> value, Label* bailout);
+ TNode<JSObject> FromPropertyDescriptor(TNode<Context> context,
+ TNode<PropertyDescriptorObject> desc);
+ TNode<JSObject> FromPropertyDetails(TNode<Context> context,
+ TNode<Object> raw_value,
+ TNode<Word32T> details,
+ Label* if_bailout);
+ TNode<PropertyDescriptorObject> DescriptorFromPropertyDetails(
+ TNode<Context> context, TNode<Object> raw_value, TNode<Word32T> details,
+ Label* if_bailout);
+ TNode<JSObject> ConstructAccessorDescriptor(TNode<Context> context,
+ TNode<Object> getter,
+ TNode<Object> setter,
+ TNode<BoolT> enumerable,
+ TNode<BoolT> configurable);
+ TNode<JSObject> ConstructDataDescriptor(TNode<Context> context,
+ TNode<Object> value,
+ TNode<BoolT> writable,
+ TNode<BoolT> enumerable,
+ TNode<BoolT> configurable);
+ TNode<HeapObject> GetAccessorOrUndefined(TNode<HeapObject> accessor,
+ Label* if_bailout);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index e6d26ef7c7..77ad03715a 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -148,11 +148,7 @@ Object ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
}
return ObjectLookupAccessor(isolate, prototype, key, component);
}
-
case LookupIterator::WASM_OBJECT:
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kWasmObjectsAreOpaque));
-
case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::DATA:
return ReadOnlyRoots(isolate).undefined_value();
@@ -219,9 +215,10 @@ BUILTIN(ObjectFreeze) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
- FROZEN, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
+ MAYBE_RETURN(
+ JSReceiver::SetIntegrityLevel(isolate, Handle<JSReceiver>::cast(object),
+ FROZEN, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
}
return *object;
}
@@ -299,10 +296,11 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
BUILTIN(ObjectIsFrozen) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result = object->IsJSReceiver()
- ? JSReceiver::TestIntegrityLevel(
- Handle<JSReceiver>::cast(object), FROZEN)
- : Just(true);
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ isolate, Handle<JSReceiver>::cast(object), FROZEN)
+ : Just(true);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -311,10 +309,11 @@ BUILTIN(ObjectIsFrozen) {
BUILTIN(ObjectIsSealed) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result = object->IsJSReceiver()
- ? JSReceiver::TestIntegrityLevel(
- Handle<JSReceiver>::cast(object), SEALED)
- : Just(true);
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ isolate, Handle<JSReceiver>::cast(object), SEALED)
+ : Just(true);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -360,9 +359,10 @@ BUILTIN(ObjectSeal) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
- SEALED, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
+ MAYBE_RETURN(
+ JSReceiver::SetIntegrityLevel(isolate, Handle<JSReceiver>::cast(object),
+ SEALED, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
}
return *object;
}
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 29eec7c9f5..041f291291 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -104,7 +104,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
// 2. If handler is null, throw a TypeError exception.
CSA_DCHECK(this, IsNullOrJSReceiver(handler));
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
+ GotoIfNot(JSAnyIsNotPrimitive(handler), &throw_proxy_handler_revoked);
// 3. Assert: Type(handler) is Object.
CSA_DCHECK(this, IsJSReceiver(handler));
@@ -149,6 +149,8 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
CSA_DCHECK(this, IsCallable(proxy));
+ PerformStackCheck(context);
+
Label throw_proxy_handler_revoked(this, Label::kDeferred),
trap_undefined(this), not_an_object(this, Label::kDeferred);
@@ -158,7 +160,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
// 2. If handler is null, throw a TypeError exception.
CSA_DCHECK(this, IsNullOrJSReceiver(handler));
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
+ GotoIfNot(JSAnyIsNotPrimitive(handler), &throw_proxy_handler_revoked);
// 3. Assert: Type(handler) is Object.
CSA_DCHECK(this, IsJSReceiver(handler));
@@ -185,7 +187,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
// 9. If Type(newObj) is not Object, throw a TypeError exception.
GotoIf(TaggedIsSmi(new_obj), &not_an_object);
- GotoIfNot(IsJSReceiver(CAST(new_obj)), &not_an_object);
+ GotoIfNot(JSAnyIsNotPrimitive(CAST(new_obj)), &not_an_object);
// 10. Return newObj.
args.PopAndReturn(new_obj);
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index 5ce624cab9..7f5b2095bf 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -46,32 +46,6 @@ BUILTIN(ReflectDefineProperty) {
return *isolate->factory()->ToBoolean(result.FromJust());
}
-// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
-BUILTIN(ReflectGetOwnPropertyDescriptor) {
- HandleScope scope(isolate);
- DCHECK_LE(3, args.length());
- Handle<Object> target = args.at(1);
- Handle<Object> key = args.at(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.getOwnPropertyDescriptor")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- PropertyDescriptor desc;
- Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
- isolate, Handle<JSReceiver>::cast(target), name, &desc);
- MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception());
- if (!found.FromJust()) return ReadOnlyRoots(isolate).undefined_value();
- return *desc.ToObject(isolate);
-}
-
// ES6 section 26.1.11 Reflect.ownKeys
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index c40e21b1a1..32e9ae742c 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -45,49 +45,6 @@ TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
return IntPtrConstant(0);
}
-// If code is a builtin, return the address to the (possibly embedded) builtin
-// code entry, otherwise return the entry of the code object itself.
-TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<CodeT> code) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // When external code space is enabled we can load the entry point directly
- // from the CodeT object.
- return GetCodeEntry(code);
- }
-
- TVARIABLE(RawPtrT, var_result);
-
- Label if_code_is_off_heap(this), out(this);
- TNode<Int32T> builtin_index =
- LoadObjectField<Int32T>(code, Code::kBuiltinIndexOffset);
- {
- GotoIfNot(
- Word32Equal(builtin_index,
- Int32Constant(static_cast<int>(Builtin::kNoBuiltinId))),
- &if_code_is_off_heap);
- var_result = ReinterpretCast<RawPtrT>(
- IntPtrAdd(BitcastTaggedToWord(code),
- IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
- Goto(&out);
- }
-
- BIND(&if_code_is_off_heap);
- {
- TNode<IntPtrT> builtin_entry_offset_from_isolate_root =
- IntPtrAdd(IntPtrConstant(IsolateData::builtin_entry_table_offset()),
- ChangeInt32ToIntPtr(Word32Shl(
- builtin_index, Int32Constant(kSystemPointerSizeLog2))));
-
- var_result = ReinterpretCast<RawPtrT>(
- Load(MachineType::Pointer(),
- ExternalConstant(ExternalReference::isolate_root(isolate())),
- builtin_entry_offset_from_isolate_root));
- Goto(&out);
- }
-
- BIND(&out);
- return var_result.value();
-}
-
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
@@ -559,7 +516,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
#endif
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
- TNode<CodeT> code = CAST(var_code.value());
+ TNode<Code> code = CAST(var_code.value());
Label if_success(this), if_exception(this, Label::kDeferred);
{
@@ -623,7 +580,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
MachineType arg8_type = type_tagged;
TNode<JSRegExp> arg8 = regexp;
- TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
+ TNode<RawPtrT> code_entry = GetCodeEntry(code);
// AIX uses function descriptors on CFunction calls. code_entry in this case
// may also point to a Regex interpreter entry trampoline which does not
@@ -712,7 +669,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kTaggedSize);
},
- kInt32Size, IndexAdvanceMode::kPost);
+ kInt32Size, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
}
var_result = match_info;
@@ -779,10 +736,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
Label out(this);
TVARIABLE(BoolT, var_result);
-#ifdef V8_ENABLE_FORCE_SLOW_PATH
var_result = Int32FalseConstant();
GotoIfForceSlowPath(&out);
-#endif
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<HeapObject> regexp_fun =
@@ -1068,13 +1023,13 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
BIND(&next); \
} while (false)
+ CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices);
CASE_FOR_FLAG("global", JSRegExp::kGlobal);
CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase);
CASE_FOR_FLAG("multiline", JSRegExp::kMultiline);
CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll);
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
- CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices);
#undef CASE_FOR_FLAG
#define CASE_FOR_FLAG(NAME, V8_FLAG_EXTERN_REF, FLAG) \
@@ -1132,7 +1087,6 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
#undef CASE_FOR_FLAG
if (is_fastpath) {
-#ifdef V8_ENABLE_FORCE_SLOW_PATH
result = string;
Goto(&done);
@@ -1145,9 +1099,6 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
BIND(&done);
return result.value();
-#else
- return string;
-#endif
} else {
return string;
}
@@ -1639,7 +1590,8 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
GotoIfNot(SmiEqual(match_to, last_matched_until), &next);
const TNode<BoolT> is_unicode =
- FastFlagGetter(regexp, JSRegExp::kUnicode);
+ Word32Or(FastFlagGetter(regexp, JSRegExp::kUnicode),
+ FastFlagGetter(regexp, JSRegExp::kUnicodeSets));
const TNode<Number> new_next_search_from =
AdvanceStringIndex(string, next_search_from, is_unicode, true);
var_next_search_from = CAST(new_next_search_from);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index f894493561..96497a0a99 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -21,8 +21,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Smi> SmiZero();
TNode<IntPtrT> IntPtrZero();
- TNode<RawPtrT> LoadCodeObjectEntry(TNode<CodeT> code);
-
// Allocate either a JSRegExpResult or a JSRegExpResultWithIndices (depending
// on has_indices) with the given length (the number of captures, including
// the match itself), index (the index where the match starts), and input
diff --git a/deps/v8/src/builtins/builtins-shadow-realm-gen.cc b/deps/v8/src/builtins/builtins-shadow-realm-gen.cc
index 7c2223bbc2..2cf2b1aa96 100644
--- a/deps/v8/src/builtins/builtins-shadow-realm-gen.cc
+++ b/deps/v8/src/builtins/builtins-shadow-realm-gen.cc
@@ -38,6 +38,9 @@ class ShadowRealmBuiltinsAssembler : public CodeStubAssembler {
TNode<JSFunction> AllocateImportValueFulfilledFunction(
TNode<NativeContext> caller_context, TNode<NativeContext> eval_context,
TNode<String> specifier, TNode<String> export_name);
+ void ShadowRealmThrow(TNode<Context> context,
+ MessageTemplate fallback_message,
+ TNode<Object> exception);
};
TNode<JSObject> ShadowRealmBuiltinsAssembler::AllocateJSWrappedFunction(
@@ -97,6 +100,14 @@ void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode<DescriptorArray> array,
GotoIfNot(IsAccessorInfo(CAST(value)), bailout);
}
+void ShadowRealmBuiltinsAssembler::ShadowRealmThrow(
+ TNode<Context> context, MessageTemplate fallback_message,
+ TNode<Object> exception) {
+ TNode<Smi> template_index = SmiConstant(static_cast<int>(fallback_message));
+ CallRuntime(Runtime::kShadowRealmThrow, context, template_index, exception);
+ Unreachable();
+}
+
// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue
TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
@@ -109,7 +120,7 @@ TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) {
// 2. Return value.
GotoIf(TaggedIsSmi(value), &if_primitive);
- GotoIfNot(IsJSReceiver(CAST(value)), &if_primitive);
+ GotoIfNot(JSAnyIsNotPrimitive(CAST(value)), &if_primitive);
// 1. If Type(value) is Object, then
// 1a. If IsCallable(value) is false, throw a TypeError exception.
@@ -259,7 +270,7 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
StoreFixedArrayElement(
wrapped_args, IntPtrAdd(index, IntPtrConstant(1)), wrapped_value);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
TVARIABLE(Object, var_exception);
TNode<Object> result;
@@ -285,11 +296,8 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
// 11. Else,
BIND(&call_exception);
// 11a. Throw a TypeError exception.
- // TODO(v8:11989): provide a non-observable inspection on the
- // pending_exception to the newly created TypeError.
- // https://github.com/tc39/proposal-shadowrealm/issues/353
- ThrowTypeError(context, MessageTemplate::kCallShadowRealmFunctionThrown,
- var_exception.value());
+ ShadowRealmThrow(context, MessageTemplate::kCallWrappedFunctionThrew,
+ var_exception.value());
BIND(&target_not_callable);
// A wrapped value should not be non-callable.
@@ -416,10 +424,9 @@ TF_BUILTIN(ShadowRealmImportValueFulfilled, ShadowRealmBuiltinsAssembler) {
TF_BUILTIN(ShadowRealmImportValueRejected, ShadowRealmBuiltinsAssembler) {
TNode<Context> context = Parameter<Context>(Descriptor::kContext);
- // TODO(v8:11989): provide a non-observable inspection on the
- // pending_exception to the newly created TypeError.
- // https://github.com/tc39/proposal-shadowrealm/issues/353
- ThrowTypeError(context, MessageTemplate::kImportShadowRealmRejected);
+ TNode<Object> exception = Parameter<Object>(Descriptor::kException);
+ ShadowRealmThrow(context, MessageTemplate::kImportShadowRealmRejected,
+ exception);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-shadow-realm.cc b/deps/v8/src/builtins/builtins-shadow-realm.cc
index a76fd120fb..03f1cceaca 100644
--- a/deps/v8/src/builtins/builtins-shadow-realm.cc
+++ b/deps/v8/src/builtins/builtins-shadow-realm.cc
@@ -202,11 +202,11 @@ BUILTIN(ShadowRealmPrototypeEvaluate) {
*factory->NewError(isolate->syntax_error_function(), message));
}
// 21. If result.[[Type]] is not normal, throw a TypeError exception.
- // TODO(v8:11989): provide a non-observable inspection on the
- // pending_exception to the newly created TypeError.
- // https://github.com/tc39/proposal-shadowrealm/issues/353
+ Handle<String> string =
+ Object::NoSideEffectsToString(isolate, pending_exception);
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCallShadowRealmFunctionThrown));
+ isolate,
+ NewTypeError(MessageTemplate::kCallShadowRealmEvaluateThrew, string));
}
// 22. Return ? GetWrappedValue(callerRealm, result.[[Value]]).
Handle<Object> wrapped_result;
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 37f7879357..f6dc59899a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -13,6 +13,7 @@
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
+#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/objects/property-cell.h"
@@ -124,17 +125,15 @@ TNode<IntPtrT> StringBuiltinsAssembler::SearchOneByteInOneByteString(
}
void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
- TNode<String> right) {
+ TNode<String> right,
+ TNode<IntPtrT> length) {
TVARIABLE(String, var_left, left);
TVARIABLE(String, var_right, right);
Label if_equal(this), if_notequal(this), if_indirect(this, Label::kDeferred),
restart(this, {&var_left, &var_right});
- TNode<IntPtrT> lhs_length = LoadStringLengthAsWord(left);
- TNode<IntPtrT> rhs_length = LoadStringLengthAsWord(right);
-
- // Strings with different lengths cannot be equal.
- GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
+ CSA_DCHECK(this, IntPtrEqual(LoadStringLengthAsWord(left), length));
+ CSA_DCHECK(this, IntPtrEqual(LoadStringLengthAsWord(right), length));
Goto(&restart);
BIND(&restart);
@@ -144,7 +143,7 @@ void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
- StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, lhs_length,
+ StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, length,
&if_equal, &if_notequal, &if_indirect);
BIND(&if_indirect);
@@ -199,22 +198,42 @@ void StringBuiltinsAssembler::StringEqual_Core(
Int32Constant(0)),
if_indirect);
+ Label if_skip_fast_case(this), if_fast_case(this), if_oneonebytestring(this),
+ if_twotwobytestring(this), if_onetwobytestring(this),
+ if_twoonebytestring(this);
+
// Dispatch based on the {lhs} and {rhs} string encoding.
int const kBothStringEncodingMask =
kStringEncodingMask | (kStringEncodingMask << 8);
+ int const kBothExternalStringTag =
+ kExternalStringTag | (kExternalStringTag << 8);
int const kOneOneByteStringTag = kOneByteStringTag | (kOneByteStringTag << 8);
int const kTwoTwoByteStringTag = kTwoByteStringTag | (kTwoByteStringTag << 8);
int const kOneTwoByteStringTag = kOneByteStringTag | (kTwoByteStringTag << 8);
- Label if_oneonebytestring(this), if_twotwobytestring(this),
- if_onetwobytestring(this), if_twoonebytestring(this);
+
TNode<Word32T> masked_instance_types =
Word32And(both_instance_types, Int32Constant(kBothStringEncodingMask));
- GotoIf(
- Word32Equal(masked_instance_types, Int32Constant(kOneOneByteStringTag)),
- &if_oneonebytestring);
- GotoIf(
- Word32Equal(masked_instance_types, Int32Constant(kTwoTwoByteStringTag)),
- &if_twotwobytestring);
+ TNode<Word32T> both_are_one_byte =
+ Word32Equal(masked_instance_types, Int32Constant(kOneOneByteStringTag));
+ TNode<Word32T> both_are_two_byte =
+ Word32Equal(masked_instance_types, Int32Constant(kTwoTwoByteStringTag));
+
+ // If both strings are not external we know that their payload length is
+ // kTagged sized. When they have the same type we can compare in chunks. The
+ // padding bytes are set to zero.
+ GotoIf(Word32And(both_instance_types, Int32Constant(kBothExternalStringTag)),
+ &if_skip_fast_case);
+ TVARIABLE(IntPtrT, byte_length, length);
+ GotoIf(both_are_one_byte, &if_fast_case);
+ byte_length = WordShl(byte_length.value(), IntPtrConstant(1));
+ Branch(both_are_two_byte, &if_fast_case, &if_skip_fast_case);
+ BIND(&if_fast_case);
+ StringEqual_FastLoop(lhs, lhs_instance_type, rhs, rhs_instance_type,
+ byte_length.value(), if_equal, if_not_equal);
+
+ BIND(&if_skip_fast_case);
+ GotoIf(both_are_one_byte, &if_oneonebytestring);
+ GotoIf(both_are_two_byte, &if_twotwobytestring);
Branch(
Word32Equal(masked_instance_types, Int32Constant(kOneTwoByteStringTag)),
&if_onetwobytestring, &if_twoonebytestring);
@@ -240,44 +259,133 @@ void StringBuiltinsAssembler::StringEqual_Core(
if_not_equal);
}
+void StringBuiltinsAssembler::StringEqual_FastLoop(
+ TNode<String> lhs, TNode<Word32T> lhs_instance_type, TNode<String> rhs,
+ TNode<Word32T> rhs_instance_type, TNode<IntPtrT> byte_length,
+ Label* if_equal, Label* if_not_equal) {
+ TNode<RawPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
+ TNode<RawPtrT> rhs_data = DirectStringData(rhs, rhs_instance_type);
+
+ const int kChunk = kTaggedSize;
+ static_assert(kObjectAlignment % kChunk == 0);
+ // Round up the byte_length to `ceiling(length / kChunk) * kChunk`
+ TNode<IntPtrT> rounded_up_len = UncheckedCast<IntPtrT>(WordAnd(
+ UncheckedCast<WordT>(IntPtrAdd(byte_length, IntPtrConstant(kChunk - 1))),
+ UncheckedCast<WordT>(IntPtrConstant(~(kChunk - 1)))));
+ TNode<RawPtrT> lhs_end = RawPtrAdd(lhs_data, rounded_up_len);
+
+#ifdef ENABLE_SLOW_DCHECKS
+ // The padding must be zeroed for chunked comparison to be correct. This loop
+ // checks all bytes being 0 from byte_length up to rounded_up_len.
+ // If we ever stop zeroing the padding, GenerateStringRelationalComparison
+ // below will also need to be updated.
+ {
+ TVARIABLE(IntPtrT, var_padding_offset, byte_length);
+ Label loop(this, &var_padding_offset), loop_end(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ GotoIf(WordEqual(var_padding_offset.value(), rounded_up_len), &loop_end);
+
+ // Load the next byte
+ TNode<Word32T> lhs_value = UncheckedCast<Word32T>(Load(
+ MachineType::Uint8(), lhs_data,
+ WordShl(var_padding_offset.value(),
+ ElementSizeLog2Of(MachineType::Uint8().representation()))));
+ TNode<Word32T> rhs_value = UncheckedCast<Word32T>(Load(
+ MachineType::Uint8(), rhs_data,
+ WordShl(var_padding_offset.value(),
+ ElementSizeLog2Of(MachineType::Uint8().representation()))));
+
+ // Check the padding is zero.
+ CSA_CHECK(this, Word32Equal(lhs_value, Int32Constant(0)));
+ CSA_CHECK(this, Word32Equal(rhs_value, Int32Constant(0)));
+
+ // Advance to next byte.
+ var_padding_offset =
+ IntPtrAdd(var_padding_offset.value(), IntPtrConstant(1));
+ Goto(&loop);
+ }
+ BIND(&loop_end);
+ }
+#endif // ENABLE_SLOW_DCHECKS
+
+ // Compare strings in chunks of either 4 or 8 bytes, depending on the
+ // alignment of allocations.
+ static_assert(kChunk == ElementSizeInBytes(MachineRepresentation::kWord64) ||
+ kChunk == ElementSizeInBytes(MachineRepresentation::kWord32));
+ TVARIABLE(RawPtrT, rhs_ptr, rhs_data);
+ VariableList vars({&rhs_ptr}, zone());
+
+ if (kChunk == ElementSizeInBytes(MachineRepresentation::kWord64)) {
+ BuildFastLoop<RawPtrT>(
+ vars, lhs_data, lhs_end,
+ [&](TNode<RawPtrT> lhs_ptr) {
+ TNode<Word64T> lhs_value = Load<Uint64T>(lhs_ptr);
+ TNode<Word64T> rhs_value = Load<Uint64T>(rhs_ptr.value());
+ GotoIf(Word64NotEqual(lhs_value, rhs_value), if_not_equal);
+
+ // Advance {rhs_ptr} to next characters. {lhs_ptr} will be
+ // advanced along loop's {var_index}.
+ Increment(&rhs_ptr, kChunk);
+ },
+ kChunk, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
+ } else {
+ BuildFastLoop<RawPtrT>(
+ vars, lhs_data, lhs_end,
+ [&](TNode<RawPtrT> lhs_ptr) {
+ TNode<Word32T> lhs_value = Load<Uint32T>(lhs_ptr);
+ TNode<Word32T> rhs_value = Load<Uint32T>(rhs_ptr.value());
+ GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
+
+ // Advance {rhs_ptr} to next characters. {lhs_ptr} will be
+ // advanced along loop's {var_index}.
+ Increment(&rhs_ptr, kChunk);
+ },
+ kChunk, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
+ }
+ Goto(if_equal);
+}
+
void StringBuiltinsAssembler::StringEqual_Loop(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, MachineType lhs_type,
TNode<String> rhs, TNode<Word32T> rhs_instance_type, MachineType rhs_type,
TNode<IntPtrT> length, Label* if_equal, Label* if_not_equal) {
+ Comment("StringEqual_Loop");
CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(lhs), length));
CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Compute the effective offset of the first character.
TNode<RawPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
TNode<RawPtrT> rhs_data = DirectStringData(rhs, rhs_instance_type);
+ TNode<RawPtrT> lhs_end =
+ RawPtrAdd(lhs_data, WordShl(length, IntPtrConstant(ElementSizeLog2Of(
+ lhs_type.representation()))));
+ TVARIABLE(RawPtrT, rhs_ptr, rhs_data);
+ VariableList vars({&rhs_ptr}, zone());
// Loop over the {lhs} and {rhs} strings to see if they are equal.
- TVARIABLE(IntPtrT, var_offset, IntPtrConstant(0));
- Label loop(this, &var_offset);
- Goto(&loop);
- BIND(&loop);
- {
- // If {offset} equals {end}, no difference was found, so the
- // strings are equal.
- GotoIf(WordEqual(var_offset.value(), length), if_equal);
-
- // Load the next characters from {lhs} and {rhs}.
- TNode<Word32T> lhs_value = UncheckedCast<Word32T>(
- Load(lhs_type, lhs_data,
- WordShl(var_offset.value(),
- ElementSizeLog2Of(lhs_type.representation()))));
- TNode<Word32T> rhs_value = UncheckedCast<Word32T>(
- Load(rhs_type, rhs_data,
- WordShl(var_offset.value(),
- ElementSizeLog2Of(rhs_type.representation()))));
-
- // Check if the characters match.
- GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
-
- // Advance to next character.
- var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
- Goto(&loop);
- }
+ BuildFastLoop<RawPtrT>(
+ vars, lhs_data, lhs_end,
+ [&](TNode<RawPtrT> lhs_ptr) {
+ TNode<Word32T> lhs_value =
+ UncheckedCast<Word32T>(Load(lhs_type, lhs_ptr));
+ TNode<Word32T> rhs_value =
+ UncheckedCast<Word32T>(Load(rhs_type, rhs_ptr.value()));
+
+ // Check if the characters match.
+ GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
+
+ // Advance {rhs_ptr} to next characters. {lhs_ptr} will be
+ // advanced along loop's {var_index}.
+ Increment(&rhs_ptr, ElementSizeInBytes(rhs_type.representation()));
+ },
+ ElementSizeInBytes(lhs_type.representation()), LoopUnrollingMode::kNo,
+ IndexAdvanceMode::kPost);
+
+ // All characters are checked and no difference was found, so the strings
+ // are equal.
+ Goto(if_equal);
}
TNode<String> StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint(
@@ -315,15 +423,36 @@ TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
TNode<String> right) {
// Added string can be a cons string.
Comment("Allocating ConsString");
- TNode<Int32T> left_instance_type = LoadInstanceType(left);
- TNode<Int32T> right_instance_type = LoadInstanceType(right);
+ TVARIABLE(String, first, left);
+ TVARIABLE(Int32T, left_instance_type, LoadInstanceType(left));
+ Label handle_right(this);
+ GotoIfNot(InstanceTypeEqual(left_instance_type.value(), THIN_STRING_TYPE),
+ &handle_right);
+ {
+ first = LoadObjectField<String>(left, ThinString::kActualOffset);
+ left_instance_type = LoadInstanceType(first.value());
+ Goto(&handle_right);
+ }
+ BIND(&handle_right);
+ TVARIABLE(String, second, right);
+ TVARIABLE(Int32T, right_instance_type, LoadInstanceType(right));
+ Label allocate(this);
+ GotoIfNot(InstanceTypeEqual(right_instance_type.value(), THIN_STRING_TYPE),
+ &allocate);
+ {
+ second = LoadObjectField<String>(right, ThinString::kActualOffset);
+ right_instance_type = LoadInstanceType(second.value());
+ Goto(&allocate);
+ }
+
+ BIND(&allocate);
// Determine the resulting ConsString map to use depending on whether
// any of {left} or {right} has two byte encoding.
static_assert(kOneByteStringTag != 0);
static_assert(kTwoByteStringTag == 0);
TNode<Int32T> combined_instance_type =
- Word32And(left_instance_type, right_instance_type);
+ Word32And(left_instance_type.value(), right_instance_type.value());
TNode<Map> result_map = CAST(Select<Object>(
IsSetWord32(combined_instance_type, kStringEncodingMask),
[=] { return ConsOneByteStringMapConstant(); },
@@ -333,8 +462,10 @@ TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(result, ConsString::kRawHashFieldOffset,
Int32Constant(String::kEmptyHashField));
- StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset,
+ first.value());
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset,
+ second.value());
return CAST(result);
}
@@ -545,7 +676,7 @@ TF_BUILTIN(SubString, StringBuiltinsAssembler) {
}
void StringBuiltinsAssembler::GenerateStringRelationalComparison(
- TNode<String> left, TNode<String> right, Operation op) {
+ TNode<String> left, TNode<String> right, StringComparison op) {
TVARIABLE(String, var_left, left);
TVARIABLE(String, var_right, right);
@@ -583,59 +714,70 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
BIND(&if_bothonebyteseqstrings);
{
- // Load the length of {lhs} and {rhs}.
TNode<IntPtrT> lhs_length = LoadStringLengthAsWord(lhs);
TNode<IntPtrT> rhs_length = LoadStringLengthAsWord(rhs);
- // Determine the minimum length.
TNode<IntPtrT> length = IntPtrMin(lhs_length, rhs_length);
- // Compute the effective offset of the first character.
- TNode<IntPtrT> begin =
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Compute the first offset after the string from the length.
- TNode<IntPtrT> end = IntPtrAdd(begin, length);
-
// Loop over the {lhs} and {rhs} strings to see if they are equal.
+ constexpr int kBeginOffset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> begin = IntPtrConstant(kBeginOffset);
+ TNode<IntPtrT> end = IntPtrAdd(begin, length);
TVARIABLE(IntPtrT, var_offset, begin);
- Label loop(this, &var_offset);
- Goto(&loop);
- BIND(&loop);
+ Label chunk_loop(this, &var_offset), char_loop(this, &var_offset);
+ Label if_done(this);
+
+ // Unrolled first iteration.
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &if_done);
+ TNode<Uint32T> lhs_chunk = Load<Uint32T>(lhs, IntPtrConstant(kBeginOffset));
+ TNode<Uint32T> rhs_chunk = Load<Uint32T>(rhs, IntPtrConstant(kBeginOffset));
+ GotoIf(Word32NotEqual(lhs_chunk, rhs_chunk), &char_loop);
+ // We could make the chunk size depend on kTaggedSize, but kTaggedSize > 4
+ // is rare at the time of this writing.
+ constexpr int kChunkSize = sizeof(uint32_t);
+ var_offset = IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag +
+ kChunkSize);
+
+ Goto(&chunk_loop);
+
+ // Try skipping over chunks of 4 identical characters.
+ // This depends on padding (between strings' lengths and the actual end
+ // of the heap object) being zeroed out.
+ BIND(&chunk_loop);
{
- // Check if {offset} equals {end}.
- Label if_done(this), if_notdone(this);
- Branch(WordEqual(var_offset.value(), end), &if_done, &if_notdone);
+ GotoIf(IntPtrGreaterThanOrEqual(var_offset.value(), end), &if_done);
- BIND(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- TNode<Uint8T> lhs_value = Load<Uint8T>(lhs, var_offset.value());
- TNode<Uint8T> rhs_value = Load<Uint8T>(rhs, var_offset.value());
+ TNode<Uint32T> lhs_chunk = Load<Uint32T>(lhs, var_offset.value());
+ TNode<Uint32T> rhs_chunk = Load<Uint32T>(rhs, var_offset.value());
+ GotoIf(Word32NotEqual(lhs_chunk, rhs_chunk), &char_loop);
- // Check if the characters match.
- Label if_valueissame(this), if_valueisnotsame(this);
- Branch(Word32Equal(lhs_value, rhs_value), &if_valueissame,
- &if_valueisnotsame);
-
- BIND(&if_valueissame);
- {
- // Advance to next character.
- var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
- }
- Goto(&loop);
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kChunkSize));
+ Goto(&chunk_loop);
+ }
- BIND(&if_valueisnotsame);
- Branch(Uint32LessThan(lhs_value, rhs_value), &if_less, &if_greater);
- }
+ BIND(&char_loop);
+ {
+ GotoIf(WordEqual(var_offset.value(), end), &if_done);
- BIND(&if_done);
- {
- // All characters up to the min length are equal, decide based on
- // string length.
- GotoIf(IntPtrEqual(lhs_length, rhs_length), &if_equal);
- Branch(IntPtrLessThan(lhs_length, rhs_length), &if_less, &if_greater);
- }
+ TNode<Uint8T> lhs_char = Load<Uint8T>(lhs, var_offset.value());
+ TNode<Uint8T> rhs_char = Load<Uint8T>(rhs, var_offset.value());
+
+ Label if_charsdiffer(this);
+ GotoIf(Word32NotEqual(lhs_char, rhs_char), &if_charsdiffer);
+
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
+ Goto(&char_loop);
+
+ BIND(&if_charsdiffer);
+ Branch(Uint32LessThan(lhs_char, rhs_char), &if_less, &if_greater);
+ }
+
+ BIND(&if_done);
+ {
+ // All characters up to the min length are equal, decide based on
+ // string length.
+ GotoIf(IntPtrEqual(lhs_length, rhs_length), &if_equal);
+ Branch(IntPtrLessThan(lhs_length, rhs_length), &if_less, &if_greater);
}
}
@@ -646,102 +788,118 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
rhs_instance_type, &restart);
// TODO(bmeurer): Add support for two byte string relational comparisons.
switch (op) {
- case Operation::kLessThan:
+ case StringComparison::kLessThan:
TailCallRuntime(Runtime::kStringLessThan, NoContextConstant(), lhs,
rhs);
break;
- case Operation::kLessThanOrEqual:
+ case StringComparison::kLessThanOrEqual:
TailCallRuntime(Runtime::kStringLessThanOrEqual, NoContextConstant(),
lhs, rhs);
break;
- case Operation::kGreaterThan:
+ case StringComparison::kGreaterThan:
TailCallRuntime(Runtime::kStringGreaterThan, NoContextConstant(), lhs,
rhs);
break;
- case Operation::kGreaterThanOrEqual:
+ case StringComparison::kGreaterThanOrEqual:
TailCallRuntime(Runtime::kStringGreaterThanOrEqual, NoContextConstant(),
lhs, rhs);
break;
- default:
- UNREACHABLE();
+ case StringComparison::kCompare:
+ TailCallRuntime(Runtime::kStringCompare, NoContextConstant(), lhs, rhs);
+ break;
}
}
BIND(&if_less);
switch (op) {
- case Operation::kLessThan:
- case Operation::kLessThanOrEqual:
+ case StringComparison::kLessThan:
+ case StringComparison::kLessThanOrEqual:
Return(TrueConstant());
break;
- case Operation::kGreaterThan:
- case Operation::kGreaterThanOrEqual:
+ case StringComparison::kGreaterThan:
+ case StringComparison::kGreaterThanOrEqual:
Return(FalseConstant());
break;
- default:
- UNREACHABLE();
+
+ case StringComparison::kCompare:
+ Return(SmiConstant(-1));
+ break;
}
BIND(&if_equal);
switch (op) {
- case Operation::kLessThan:
- case Operation::kGreaterThan:
+ case StringComparison::kLessThan:
+ case StringComparison::kGreaterThan:
Return(FalseConstant());
break;
- case Operation::kLessThanOrEqual:
- case Operation::kGreaterThanOrEqual:
+ case StringComparison::kLessThanOrEqual:
+ case StringComparison::kGreaterThanOrEqual:
Return(TrueConstant());
break;
- default:
- UNREACHABLE();
+
+ case StringComparison::kCompare:
+ Return(SmiConstant(0));
+ break;
}
BIND(&if_greater);
switch (op) {
- case Operation::kLessThan:
- case Operation::kLessThanOrEqual:
+ case StringComparison::kLessThan:
+ case StringComparison::kLessThanOrEqual:
Return(FalseConstant());
break;
- case Operation::kGreaterThan:
- case Operation::kGreaterThanOrEqual:
+ case StringComparison::kGreaterThan:
+ case StringComparison::kGreaterThanOrEqual:
Return(TrueConstant());
break;
- default:
- UNREACHABLE();
+
+ case StringComparison::kCompare:
+ Return(SmiConstant(1));
+ break;
}
}
TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
auto left = Parameter<String>(Descriptor::kLeft);
auto right = Parameter<String>(Descriptor::kRight);
- GenerateStringEqual(left, right);
+ auto length = UncheckedParameter<IntPtrT>(Descriptor::kLength);
+ GenerateStringEqual(left, right, length);
}
TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
auto left = Parameter<String>(Descriptor::kLeft);
auto right = Parameter<String>(Descriptor::kRight);
- GenerateStringRelationalComparison(left, right, Operation::kLessThan);
+ GenerateStringRelationalComparison(left, right, StringComparison::kLessThan);
}
TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
auto left = Parameter<String>(Descriptor::kLeft);
auto right = Parameter<String>(Descriptor::kRight);
- GenerateStringRelationalComparison(left, right, Operation::kLessThanOrEqual);
+ GenerateStringRelationalComparison(left, right,
+ StringComparison::kLessThanOrEqual);
}
TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
auto left = Parameter<String>(Descriptor::kLeft);
auto right = Parameter<String>(Descriptor::kRight);
- GenerateStringRelationalComparison(left, right, Operation::kGreaterThan);
+ GenerateStringRelationalComparison(left, right,
+ StringComparison::kGreaterThan);
+}
+
+TF_BUILTIN(StringCompare, StringBuiltinsAssembler) {
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
+ GenerateStringRelationalComparison(left, right, StringComparison::kCompare);
}
TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
auto left = Parameter<String>(Descriptor::kLeft);
auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right,
- Operation::kGreaterThanOrEqual);
+ StringComparison::kGreaterThanOrEqual);
}
TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
@@ -772,7 +930,7 @@ TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
TNode<Uint32T> unsigned_argc =
Unsigned(TruncateIntPtrToInt32(arguments.GetLengthWithoutReceiver()));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
- // if the parent frame is not an arguments adaptor frame.
+ // if the parent frame is not an inlined arguments frame.
Label if_oneargument(this), if_notoneargument(this);
Branch(IntPtrEqual(arguments.GetLengthWithoutReceiver(), IntPtrConstant(1)),
&if_oneargument, &if_notoneargument);
@@ -993,19 +1151,38 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
RequireObjectCoercible(context, receiver, "String.prototype.replace");
// Redirect to replacer method if {search[@@replace]} is not undefined.
+ {
+ Label next(this);
+ Label check_for_replace(this);
+
+ // The protector guarantees that that the Number and String wrapper
+ // prototypes do not contain Symbol.replace (aka. @@replace).
+ GotoIf(IsNumberStringPrototypeNoReplaceProtectorCellInvalid(),
+ &check_for_replace);
+ // Smi is safe thanks to the protector.
+ GotoIf(TaggedIsSmi(search), &next);
+ // String is safe thanks to the protector.
+ GotoIf(IsString(CAST(search)), &next);
+ // HeapNumber is safe thanks to the protector.
+ Branch(IsHeapNumber(CAST(search)), &next, &check_for_replace);
+
+ BIND(&check_for_replace);
+ MaybeCallFunctionAtSymbol(
+ context, search, receiver, isolate()->factory()->replace_symbol(),
+ DescriptorIndexNameValue{
+ JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
+ RootIndex::kreplace_symbol, Context::REGEXP_REPLACE_FUNCTION_INDEX},
+ [=]() {
+ Return(CallBuiltin(Builtin::kRegExpReplace, context, search, receiver,
+ replace));
+ },
+ [=](TNode<Object> fn) {
+ Return(Call(context, fn, search, receiver, replace));
+ });
+ Goto(&next);
- MaybeCallFunctionAtSymbol(
- context, search, receiver, isolate()->factory()->replace_symbol(),
- DescriptorIndexNameValue{JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
- RootIndex::kreplace_symbol,
- Context::REGEXP_REPLACE_FUNCTION_INDEX},
- [=]() {
- Return(CallBuiltin(Builtin::kRegExpReplace, context, search, receiver,
- replace));
- },
- [=](TNode<Object> fn) {
- Return(Call(context, fn, search, receiver, replace));
- });
+ BIND(&next);
+ }
// Convert {receiver} and {search} to strings.
@@ -1289,7 +1466,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
StoreFixedArrayElement(elements, index, entry);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
result_array = AllocateJSArray(array_map, elements, length_smi);
@@ -1492,6 +1669,61 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
return var_result.value();
}
+TNode<BoolT> StringBuiltinsAssembler::HasUnpairedSurrogate(TNode<String> string,
+ Label* if_indirect) {
+ TNode<Uint16T> instance_type = LoadInstanceType(string);
+ CSA_DCHECK(this, Word32Equal(Word32And(instance_type,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)));
+ GotoIfNot(Word32Equal(Word32And(instance_type,
+ Int32Constant(kIsIndirectStringMask |
+ kUncachedExternalStringMask)),
+ Int32Constant(0)),
+ if_indirect);
+
+ TNode<RawPtrT> string_data = DirectStringData(string, instance_type);
+ TNode<IntPtrT> length = LoadStringLengthAsWord(string);
+
+ const TNode<ExternalReference> has_unpaired_surrogate =
+ ExternalConstant(ExternalReference::has_unpaired_surrogate());
+ return UncheckedCast<BoolT>(
+ CallCFunction(has_unpaired_surrogate, MachineType::Uint32(),
+ std::make_pair(MachineType::Pointer(), string_data),
+ std::make_pair(MachineType::IntPtr(), length)));
+}
+
+void StringBuiltinsAssembler::ReplaceUnpairedSurrogates(TNode<String> source,
+ TNode<String> dest,
+ Label* if_indirect) {
+ TNode<Uint16T> source_instance_type = LoadInstanceType(source);
+ CSA_DCHECK(this, Word32Equal(Word32And(source_instance_type,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)));
+ GotoIfNot(Word32Equal(Word32And(source_instance_type,
+ Int32Constant(kIsIndirectStringMask |
+ kUncachedExternalStringMask)),
+ Int32Constant(0)),
+ if_indirect);
+
+ TNode<RawPtrT> source_data = DirectStringData(source, source_instance_type);
+ // The destination string is a freshly allocated SeqString, and so is always
+ // direct.
+ TNode<Uint16T> dest_instance_type = LoadInstanceType(dest);
+ CSA_DCHECK(this, Word32Equal(Word32And(dest_instance_type,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)));
+ TNode<RawPtrT> dest_data = DirectStringData(dest, dest_instance_type);
+ TNode<IntPtrT> length = LoadStringLengthAsWord(source);
+ CSA_DCHECK(this, IntPtrEqual(length, LoadStringLengthAsWord(dest)));
+
+ const TNode<ExternalReference> replace_unpaired_surrogates =
+ ExternalConstant(ExternalReference::replace_unpaired_surrogates());
+ CallCFunction(replace_unpaired_surrogates, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), source_data),
+ std::make_pair(MachineType::Pointer(), dest_data),
+ std::make_pair(MachineType::IntPtr(), length));
+}
+
void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
TNode<Object> object, TNode<Context> context, Label* if_true,
Label* if_false) {
@@ -1573,7 +1805,7 @@ void StringBuiltinsAssembler::CopyStringCharacters(
Increment(&current_to_offset, to_increment);
}
},
- from_increment, IndexAdvanceMode::kPost);
+ from_increment, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
}
// A wrapper around CopyStringCharacters which determines the correct string
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index bd1390dc24..267dc9d9b7 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -33,6 +33,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<Int32T> LoadSurrogatePairAt(TNode<String> string, TNode<IntPtrT> length,
TNode<IntPtrT> index,
UnicodeEncoding encoding);
+ TNode<BoolT> HasUnpairedSurrogate(TNode<String> string, Label* if_indirect);
+
+ void ReplaceUnpairedSurrogates(TNode<String> source, TNode<String> dest,
+ Label* if_indirect);
TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
@@ -85,6 +89,18 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> start_position);
protected:
+ enum class StringComparison {
+ kLessThan,
+ kLessThanOrEqual,
+ kGreaterThan,
+ kGreaterThanOrEqual,
+ kCompare
+ };
+
+ void StringEqual_FastLoop(TNode<String> lhs, TNode<Word32T> lhs_instance_type,
+ TNode<String> rhs, TNode<Word32T> rhs_instance_type,
+ TNode<IntPtrT> byte_length, Label* if_equal,
+ Label* if_not_equal);
void StringEqual_Loop(TNode<String> lhs, TNode<Word32T> lhs_instance_type,
MachineType lhs_type, TNode<String> rhs,
TNode<Word32T> rhs_instance_type, MachineType rhs_type,
@@ -100,9 +116,11 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
const TNode<IntPtrT> search_length,
const TNode<IntPtrT> start_position);
- void GenerateStringEqual(TNode<String> left, TNode<String> right);
+ void GenerateStringEqual(TNode<String> left, TNode<String> right,
+ TNode<IntPtrT> length);
void GenerateStringRelationalComparison(TNode<String> left,
- TNode<String> right, Operation op);
+ TNode<String> right,
+ StringComparison op);
using StringAtAccessor = std::function<TNode<Object>(
TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>;
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 769b3223bc..f04ec2891b 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -24,8 +24,8 @@ transitioning macro ToStringImpl(context: Context, o: JSAny): String {
case (oddball: Oddball): {
return oddball.to_string;
}
- case (JSReceiver): {
- result = NonPrimitiveToPrimitive_String(context, result);
+ case (receiver: JSReceiver): {
+ result = conversion::NonPrimitiveToPrimitive_String_Inline(receiver);
continue;
}
case (Symbol): {
@@ -43,6 +43,10 @@ transitioning builtin ToString(context: Context, o: JSAny): String {
return ToStringImpl(context, o);
}
+transitioning macro ToString_Inline(context: Context, o: JSAny): String {
+ return ToStringImpl(context, o);
+}
+
extern macro StringBuiltinsAssembler::SubString(
String, uintptr, uintptr): String;
diff --git a/deps/v8/src/builtins/builtins-struct.cc b/deps/v8/src/builtins/builtins-struct.cc
index cc2e1278ec..626fd2b916 100644
--- a/deps/v8/src/builtins/builtins-struct.cc
+++ b/deps/v8/src/builtins/builtins-struct.cc
@@ -60,37 +60,47 @@ BUILTIN(SharedStructTypeConstructor) {
}
int num_properties = static_cast<int>(num_properties_double);
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(
- num_properties, 0, AllocationType::kSharedOld);
-
- // Build up the descriptor array.
- UniqueNameHandleSet all_field_names;
- for (int i = 0; i < num_properties; ++i) {
- Handle<Object> raw_field_name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, raw_field_name,
- JSReceiver::GetElement(isolate, field_names_arg, i));
- Handle<Name> field_name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
- Object::ToName(isolate, raw_field_name));
- field_name = factory->InternalizeName(field_name);
-
- // Check that there are no duplicates.
- const bool is_duplicate = !all_field_names.insert(field_name).second;
- if (is_duplicate) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDuplicateTemplateProperty,
- field_name));
+ Handle<DescriptorArray> maybe_descriptors;
+ if (num_properties != 0) {
+ maybe_descriptors = factory->NewDescriptorArray(num_properties, 0,
+ AllocationType::kSharedOld);
+
+ // Build up the descriptor array.
+ UniqueNameHandleSet all_field_names;
+ for (int i = 0; i < num_properties; ++i) {
+ Handle<Object> raw_field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, raw_field_name,
+ JSReceiver::GetElement(isolate, field_names_arg, i));
+ Handle<Name> field_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, field_name, Object::ToName(isolate, raw_field_name));
+ field_name = factory->InternalizeName(field_name);
+
+ // TOOD(v8:12547): Support Symbols?
+ if (field_name->IsSymbol()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kSymbolToString));
+ }
+
+ // Check that there are no duplicates.
+ const bool is_duplicate = !all_field_names.insert(field_name).second;
+ if (is_duplicate) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDuplicateTemplateProperty,
+ field_name));
+ }
+
+ // Shared structs' fields need to be aligned, so make it all tagged.
+ PropertyDetails details(
+ PropertyKind::kData, SEALED, PropertyLocation::kField,
+ PropertyConstness::kMutable, Representation::Tagged(), i);
+ maybe_descriptors->Set(InternalIndex(i), *field_name,
+ MaybeObject::FromObject(FieldType::Any()),
+ details);
}
-
- // Shared structs' fields need to be aligned, so make it all tagged.
- PropertyDetails details(
- PropertyKind::kData, SEALED, PropertyLocation::kField,
- PropertyConstness::kMutable, Representation::Tagged(), i);
- descriptors->Set(InternalIndex(i), *field_name,
- MaybeObject::FromObject(FieldType::Any()), details);
+ maybe_descriptors->Sort();
}
- descriptors->Sort();
Handle<SharedFunctionInfo> info =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
@@ -101,7 +111,7 @@ BUILTIN(SharedStructTypeConstructor) {
Handle<JSFunction> constructor =
Factory::JSFunctionBuilder{isolate, info, isolate->native_context()}
- .set_map(isolate->strict_function_map())
+ .set_map(isolate->strict_function_with_readonly_prototype_map())
.Build();
int instance_size;
@@ -113,7 +123,6 @@ BUILTIN(SharedStructTypeConstructor) {
JS_SHARED_STRUCT_TYPE, instance_size, TERMINAL_FAST_ELEMENTS_KIND,
in_object_properties, AllocationType::kSharedMap);
- instance_map->InitializeDescriptors(isolate, *descriptors);
// Structs have fixed layout ahead of time, so there's no slack.
int out_of_object_properties = num_properties - in_object_properties;
if (out_of_object_properties == 0) {
@@ -123,17 +132,16 @@ BUILTIN(SharedStructTypeConstructor) {
}
instance_map->set_is_extensible(false);
JSFunction::SetInitialMap(isolate, constructor, instance_map,
- factory->null_value());
-
- // The constructor is not a shared object, so the shared map should not point
- // to it.
- instance_map->set_constructor_or_back_pointer(*factory->null_value());
+ factory->null_value(), factory->null_value());
+ constructor->map().SetConstructor(ReadOnlyRoots(isolate).null_value());
+ constructor->map().set_has_non_instance_prototype(true);
// Pre-create the enum cache in the shared space, as otherwise for-in
// enumeration will incorrectly create an enum cache in the per-thread heap.
if (num_properties == 0) {
instance_map->SetEnumLength(0);
} else {
+ instance_map->InitializeDescriptors(isolate, *maybe_descriptors);
FastKeyAccumulator::InitializeFastPropertyEnumCache(
isolate, instance_map, num_properties, AllocationType::kSharedOld);
DCHECK_EQ(num_properties, instance_map->EnumLength());
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index b770607f4e..452012c096 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -510,17 +510,20 @@ template <typename TValue>
void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromPreparedValue(
TNode<Context> context, TNode<JSTypedArray> typed_array,
TNode<UintPtrT> index, TNode<TValue> prepared_value,
- ElementsKind elements_kind, Label* if_detached) {
+ ElementsKind elements_kind, Label* if_detached_or_out_of_bounds) {
static_assert(
std::is_same<TValue, Word32T>::value ||
std::is_same<TValue, Float32T>::value ||
std::is_same<TValue, Float64T>::value ||
std::is_same<TValue, BigInt>::value,
"Only Word32T, Float32T, Float64T or BigInt values are allowed");
- // ToNumber/ToBigInt may execute JavaScript code, which could detach
- // the array's buffer.
- TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
- GotoIf(IsDetachedBuffer(buffer), if_detached);
+ // ToNumber/ToBigInt (or other functions called by the upper level) may
+ // execute JavaScript code, which could detach the TypedArray's buffer or make
+ // the TypedArray out of bounds.
+ TNode<UintPtrT> length = LoadJSTypedArrayLengthAndCheckDetached(
+ typed_array, if_detached_or_out_of_bounds);
+ GotoIf(UintPtrGreaterThanOrEqual(index, length),
+ if_detached_or_out_of_bounds);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
StoreElement(data_ptr, elements_kind, index, prepared_value);
@@ -529,7 +532,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromPreparedValue(
void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
TNode<Context> context, TNode<JSTypedArray> typed_array,
TNode<UintPtrT> index, TNode<Object> value, ElementsKind elements_kind,
- Label* if_detached) {
+ Label* if_detached_or_out_of_bounds) {
switch (elements_kind) {
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
@@ -542,7 +545,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
value, elements_kind, context);
StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
prepared_value, elements_kind,
- if_detached);
+ if_detached_or_out_of_bounds);
break;
}
case FLOAT32_ELEMENTS: {
@@ -550,7 +553,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
value, elements_kind, context);
StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
prepared_value, elements_kind,
- if_detached);
+ if_detached_or_out_of_bounds);
break;
}
case FLOAT64_ELEMENTS: {
@@ -558,7 +561,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
value, elements_kind, context);
StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
prepared_value, elements_kind,
- if_detached);
+ if_detached_or_out_of_bounds);
break;
}
case BIGINT64_ELEMENTS:
@@ -567,7 +570,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
value, elements_kind, context);
StoreJSTypedArrayElementFromPreparedValue(context, typed_array, index,
prepared_value, elements_kind,
- if_detached);
+ if_detached_or_out_of_bounds);
break;
}
default:
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 997ace2c43..c2201f598c 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -104,12 +104,12 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<UintPtrT> index_node,
TNode<Object> value,
ElementsKind elements_kind,
- Label* if_detached);
+ Label* if_detached_or_out_of_bounds);
template <typename TValue>
void StoreJSTypedArrayElementFromPreparedValue(
TNode<Context> context, TNode<JSTypedArray> typed_array,
TNode<UintPtrT> index_node, TNode<TValue> value,
- ElementsKind elements_kind, Label* if_detached);
+ ElementsKind elements_kind, Label* if_detached_or_out_of_bounds);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index 55666ff761..eb4eea831f 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -189,6 +189,7 @@ BUILTIN(TypedArrayPrototypeFill) {
isolate->factory()->NewStringFromAsciiChecked(method_name);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
}
+ end = std::min(end, static_cast<int64_t>(array->GetLength()));
}
int64_t count = end - start;
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 9e3b49fc01..03a4b2e9b1 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -8,6 +8,7 @@
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
#include "src/execution/arguments.h"
+#include "src/execution/frame-constants.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/logging/runtime-call-stats-scope.h"
@@ -43,7 +44,7 @@ class BuiltinArguments : public JavaScriptArguments {
// Note: this should return the address after the receiver,
// even when length() == 1.
inline Address* address_of_first_argument() const {
- return address_of_arg_at(kArgsOffset + 1); // Skips receiver.
+ return address_of_arg_at(kFirstArgsOffset);
}
static constexpr int kNewTargetOffset = 0;
@@ -54,7 +55,11 @@ class BuiltinArguments : public JavaScriptArguments {
static constexpr int kNumExtraArgs = 4;
static constexpr int kNumExtraArgsWithReceiver = 5;
+
static constexpr int kArgsOffset = 4;
+ static_assert(kArgsOffset == kReceiverOffset);
+ static constexpr int kFirstArgsOffset = kArgsOffset + 1; // Skip receiver.
+ static constexpr int kReceiverArgsOffset = kArgsOffset - kFirstArgsOffset;
inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const;
inline Handle<Object> receiver() const;
@@ -66,6 +71,23 @@ class BuiltinArguments : public JavaScriptArguments {
int length() const { return Arguments::length() - kNumExtraArgs; }
};
+#define ASSERT_OFFSET(BuiltinsOffset, FrameOffset) \
+ static_assert(BuiltinArguments::BuiltinsOffset == \
+ (BuiltinExitFrameConstants::FrameOffset - \
+ BuiltinExitFrameConstants::kNewTargetOffset) / \
+ kSystemPointerSize)
+ASSERT_OFFSET(kNewTargetOffset, kNewTargetOffset);
+ASSERT_OFFSET(kTargetOffset, kTargetOffset);
+ASSERT_OFFSET(kArgcOffset, kArgcOffset);
+ASSERT_OFFSET(kPaddingOffset, kPaddingOffset);
+ASSERT_OFFSET(kReceiverOffset, kFirstArgumentOffset);
+#undef ASSERT_OFFSET
+
+static_assert(BuiltinArguments::kNumExtraArgs ==
+ BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver);
+static_assert(BuiltinArguments::kNumExtraArgsWithReceiver ==
+ BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
+
// ----------------------------------------------------------------------------
// Support macro for defining builtins in C++.
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 9d4fc4d258..98929d796b 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/objects/map-inl.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -17,6 +18,29 @@ TNode<WasmInstanceObject> WasmBuiltinsAssembler::LoadInstanceFromFrame() {
return CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceOffset));
}
+TNode<NativeContext> WasmBuiltinsAssembler::LoadContextFromWasmOrJsFrame() {
+ static_assert(BuiltinFrameConstants::kFunctionOffset ==
+ WasmFrameConstants::kWasmInstanceOffset);
+ TVARIABLE(NativeContext, context_result);
+ TNode<HeapObject> function_or_instance =
+ CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceOffset));
+ Label js(this);
+ Label done(this);
+ GotoIf(IsJSFunction(function_or_instance), &js);
+ context_result = LoadContextFromInstance(CAST(function_or_instance));
+ Goto(&done);
+
+ BIND(&js);
+ TNode<JSFunction> function = CAST(function_or_instance);
+ TNode<Context> context =
+ LoadObjectField<Context>(function, JSFunction::kContextOffset);
+ context_result = LoadNativeContext(context);
+ Goto(&done);
+
+ BIND(&done);
+ return context_result.value();
+}
+
TNode<NativeContext> WasmBuiltinsAssembler::LoadContextFromInstance(
TNode<WasmInstanceObject> instance) {
return CAST(Load(MachineType::AnyTagged(), instance,
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.h b/deps/v8/src/builtins/builtins-wasm-gen.h
index 1804957ef1..af568b329b 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.h
+++ b/deps/v8/src/builtins/builtins-wasm-gen.h
@@ -17,6 +17,8 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
TNode<WasmInstanceObject> LoadInstanceFromFrame();
+ TNode<NativeContext> LoadContextFromWasmOrJsFrame();
+
TNode<NativeContext> LoadContextFromInstance(
TNode<WasmInstanceObject> instance);
diff --git a/deps/v8/src/builtins/builtins-web-snapshots.cc b/deps/v8/src/builtins/builtins-web-snapshots.cc
deleted file mode 100644
index 1e85edd919..0000000000
--- a/deps/v8/src/builtins/builtins-web-snapshots.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils-inl.h"
-#include "src/builtins/builtins.h"
-#include "src/logging/counters.h"
-#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/js-array-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/web-snapshot/web-snapshot.h"
-
-namespace v8 {
-namespace internal {
-
-BUILTIN(WebSnapshotSerialize) {
- HandleScope scope(isolate);
- if (args.length() < 2 || args.length() > 3) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- }
- Handle<Object> object = args.at(1);
- Handle<FixedArray> block_list = isolate->factory()->empty_fixed_array();
- Handle<JSArray> block_list_js_array;
- if (args.length() == 3) {
- if (!args[2].IsJSArray()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- }
- block_list_js_array = args.at<JSArray>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, block_list,
- JSReceiver::GetOwnValues(isolate, block_list_js_array,
- PropertyFilter::ENUMERABLE_STRINGS));
- }
-
- auto snapshot_data = std::make_shared<WebSnapshotData>();
- WebSnapshotSerializer serializer(isolate);
- if (!serializer.TakeSnapshot(object, block_list, *snapshot_data)) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- if (!block_list_js_array.is_null() &&
- static_cast<uint32_t>(block_list->length()) <
- serializer.external_object_count()) {
- Handle<FixedArray> externals = serializer.GetExternals();
- Handle<Map> map = JSObject::GetElementsTransitionMap(block_list_js_array,
- PACKED_ELEMENTS);
- block_list_js_array->set_elements(*externals);
- block_list_js_array->set_length(Smi::FromInt(externals->length()));
- block_list_js_array->set_map(*map);
- }
-
- MaybeHandle<JSArrayBuffer> maybe_result =
- isolate->factory()->NewJSArrayBufferAndBackingStore(
- snapshot_data->buffer_size, InitializedFlag::kUninitialized);
- Handle<JSArrayBuffer> result;
- if (!maybe_result.ToHandle(&result)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kOutOfMemory,
- isolate->factory()->NewStringFromAsciiChecked(
- "WebSnapshotSerialize")));
- }
- uint8_t* data =
- reinterpret_cast<uint8_t*>(result->GetBackingStore()->buffer_start());
- memcpy(data, snapshot_data->buffer, snapshot_data->buffer_size);
-
- return *result;
-}
-
-BUILTIN(WebSnapshotDeserialize) {
- HandleScope scope(isolate);
- if (args.length() < 2 || args.length() > 3) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- }
-
- if (!args[1].IsJSArrayBuffer()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- }
- auto buffer = args.at<JSArrayBuffer>(1);
- std::shared_ptr<BackingStore> backing_store = buffer->GetBackingStore();
- if (backing_store.get() == nullptr) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- }
- const uint8_t* data =
- reinterpret_cast<uint8_t*>(backing_store->buffer_start());
-
- Handle<FixedArray> injected_references =
- isolate->factory()->empty_fixed_array();
- if (args.length() == 3) {
- if (!args[2].IsJSArray()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- }
- auto js_array = args.at<JSArray>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, injected_references,
- JSReceiver::GetOwnValues(isolate, js_array,
- PropertyFilter::ENUMERABLE_STRINGS));
- }
-
- WebSnapshotDeserializer deserializer(reinterpret_cast<v8::Isolate*>(isolate),
- data, backing_store->byte_length());
- if (!deserializer.Deserialize(injected_references)) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- Handle<Object> object;
- if (!deserializer.value().ToHandle(&object)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
- return *object;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 07b598f79f..2c84a811bc 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -121,7 +121,7 @@ const char* Builtins::Lookup(Address pc) {
return nullptr;
}
-Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCallFunction_ReceiverIsNullOrUndefined);
@@ -133,7 +133,7 @@ Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
UNREACHABLE();
}
-Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
+Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCall_ReceiverIsNullOrUndefined);
@@ -145,7 +145,7 @@ Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
UNREACHABLE();
}
-Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
+Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
return code_handle(Builtin::kNonPrimitiveToPrimitive_Default);
@@ -157,7 +157,7 @@ Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
UNREACHABLE();
}
-Handle<CodeT> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
+Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
switch (hint) {
case OrdinaryToPrimitiveHint::kNumber:
return code_handle(Builtin::kOrdinaryToPrimitive_Number);
@@ -179,21 +179,21 @@ FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) {
return FullObjectSlot(location);
}
-void Builtins::set_code(Builtin builtin, CodeT code) {
+void Builtins::set_code(Builtin builtin, Code code) {
DCHECK_EQ(builtin, code.builtin_id());
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
// The given builtin may be uninitialized thus we cannot check its type here.
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
}
-CodeT Builtins::code(Builtin builtin) {
+Code Builtins::code(Builtin builtin) {
Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)];
- return CodeT::cast(Object(ptr));
+ return Code::cast(Object(ptr));
}
-Handle<CodeT> Builtins::code_handle(Builtin builtin) {
+Handle<Code> Builtins::code_handle(Builtin builtin) {
Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)];
- return Handle<CodeT>(location);
+ return Handle<Code>(location);
}
// static
@@ -229,7 +229,7 @@ CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Builtin builtin) {
// static
Callable Builtins::CallableFor(Isolate* isolate, Builtin builtin) {
- Handle<CodeT> code = isolate->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate->builtins()->code_handle(builtin);
return Callable{code, CallInterfaceDescriptorFor(builtin)};
}
@@ -256,7 +256,7 @@ void Builtins::PrintBuiltinCode() {
base::CStrVector(v8_flags.print_builtin_code_filter))) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
OFStream os(trace_scope.file());
- CodeT builtin_code = code(builtin);
+ Code builtin_code = code(builtin);
builtin_code.Disassemble(builtin_name, os, isolate_);
os << "\n";
}
@@ -270,7 +270,7 @@ void Builtins::PrintBuiltinSize() {
++builtin) {
const char* builtin_name = name(builtin);
const char* kind = KindNameOf(builtin);
- CodeT code = Builtins::code(builtin);
+ Code code = Builtins::code(builtin);
PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
code.InstructionSize());
}
@@ -299,6 +299,13 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
}
// static
+bool Builtins::IsIsolateIndependentBuiltin(Code code) {
+ Builtin builtin = code.builtin_id();
+ return Builtins::IsBuiltinId(builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+}
+
+// static
void Builtins::InitializeIsolateDataTables(Isolate* isolate) {
EmbeddedData embedded_data = EmbeddedData::FromBlob(isolate);
IsolateData* isolate_data = isolate->isolate_data();
@@ -306,7 +313,7 @@ void Builtins::InitializeIsolateDataTables(Isolate* isolate) {
// The entry table.
for (Builtin i = Builtins::kFirst; i <= Builtins::kLast; ++i) {
DCHECK(Builtins::IsBuiltinId(isolate->builtins()->code(i).builtin_id()));
- DCHECK(isolate->builtins()->code(i).is_off_heap_trampoline());
+ DCHECK(!isolate->builtins()->code(i).has_instruction_stream());
isolate_data->builtin_entry_table()[ToInt(i)] =
embedded_data.InstructionStartOfBuiltin(i);
}
@@ -331,16 +338,16 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
int i = 0;
HandleScope scope(isolate);
for (; i < ToInt(Builtin::kFirstBytecodeHandler); i++) {
- Handle<CodeT> builtin_code(&builtins[i]);
- Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
+ Handle<Code> builtin_code(&builtins[i]);
+ Handle<AbstractCode> code = Handle<AbstractCode>::cast(builtin_code);
PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kBuiltin, code,
Builtins::name(FromInt(i))));
}
static_assert(kLastBytecodeHandlerPlusOne == kBuiltinCount);
for (; i < kBuiltinCount; i++) {
- Handle<CodeT> builtin_code(&builtins[i]);
- Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
+ Handle<Code> builtin_code(&builtins[i]);
+ Handle<AbstractCode> code = Handle<AbstractCode>::cast(builtin_code);
interpreter::Bytecode bytecode =
builtin_metadata[i].data.bytecode_and_scale.bytecode;
interpreter::OperandScale scale =
@@ -352,87 +359,6 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
}
}
-namespace {
-enum TrampolineType { kAbort, kJump };
-
-class OffHeapTrampolineGenerator {
- public:
- explicit OffHeapTrampolineGenerator(Isolate* isolate)
- : isolate_(isolate),
- masm_(isolate, AssemblerOptions::DefaultForOffHeapTrampoline(isolate),
- CodeObjectRequired::kYes,
- ExternalAssemblerBuffer(buffer_, kBufferSize)) {}
-
- CodeDesc Generate(Address off_heap_entry, TrampolineType type) {
- // Generate replacement code that simply tail-calls the off-heap code.
- DCHECK(!masm_.has_frame());
- {
- FrameScope scope(&masm_, StackFrame::NO_FRAME_TYPE);
- if (type == TrampolineType::kJump) {
- masm_.CodeEntry();
- masm_.JumpToOffHeapInstructionStream(off_heap_entry);
- } else {
- DCHECK_EQ(type, TrampolineType::kAbort);
- masm_.Trap();
- }
- }
-
- CodeDesc desc;
- masm_.GetCode(isolate_, &desc);
- return desc;
- }
-
- Handle<HeapObject> CodeObject() { return masm_.CodeObject(); }
-
- private:
- Isolate* isolate_;
- // Enough to fit the single jmp.
- static constexpr int kBufferSize = 256;
- byte buffer_[kBufferSize];
- MacroAssembler masm_;
-};
-
-constexpr int OffHeapTrampolineGenerator::kBufferSize;
-
-} // namespace
-
-// static
-Handle<Code> Builtins::GenerateOffHeapTrampolineFor(
- Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
- bool generate_jump_to_instruction_stream) {
- DCHECK_NOT_NULL(isolate->embedded_blob_code());
- DCHECK_NE(0, isolate->embedded_blob_code_size());
-
- OffHeapTrampolineGenerator generator(isolate);
-
- CodeDesc desc =
- generator.Generate(off_heap_entry, generate_jump_to_instruction_stream
- ? TrampolineType::kJump
- : TrampolineType::kAbort);
-
- return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
- .set_kind_specific_flags(kind_specific_flags)
- .set_read_only_data_container(!V8_EXTERNAL_CODE_SPACE_BOOL)
- .set_self_reference(generator.CodeObject())
- .set_is_executable(generate_jump_to_instruction_stream)
- .Build();
-}
-
-// static
-Handle<ByteArray> Builtins::GenerateOffHeapTrampolineRelocInfo(
- Isolate* isolate) {
- OffHeapTrampolineGenerator generator(isolate);
- // Generate a jump to a dummy address as we're not actually interested in the
- // generated instruction stream.
- CodeDesc desc = generator.Generate(kNullAddress, TrampolineType::kJump);
-
- Handle<ByteArray> reloc_info = isolate->factory()->NewByteArray(
- desc.reloc_size, AllocationType::kReadOnly);
- Code::CopyRelocInfoToByteArray(*reloc_info, desc);
-
- return reloc_info;
-}
-
// static
Handle<Code> Builtins::CreateInterpreterEntryTrampolineForProfiling(
Isolate* isolate) {
@@ -469,19 +395,13 @@ Handle<Code> Builtins::CreateInterpreterEntryTrampolineForProfiling(
CodeDesc::Verify(&desc);
- int kind_specific_flags;
- {
- CodeT code = isolate->builtins()->code(builtin);
- kind_specific_flags =
- CodeDataContainerFromCodeT(code).kind_specific_flags(kRelaxedLoad);
- }
+ const int kind_specific_flags =
+ isolate->builtins()->code(builtin).kind_specific_flags(kRelaxedLoad);
return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_kind_specific_flags(kind_specific_flags)
- .set_read_only_data_container(false)
// Mimic the InterpreterEntryTrampoline.
.set_builtin(Builtin::kInterpreterEntryTrampoline)
- .set_is_executable(true)
.Build();
}
@@ -526,64 +446,6 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
return isolate->MayAccess(responsible_context, target_global_proxy);
}
-// static
-bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
- // If the runtime/optimized code always knows when executing a given builtin
- // that it is a builtin, then that builtin does not need an executable Code
- // object. Such Code objects can go in read_only_space (and can even be
- // smaller with no branch instruction), thus saving memory.
-
- // Builtins with JS linkage will always have executable Code objects since
- // they can be called directly from jitted code with no way of determining
- // that they are builtins at generation time. E.g.
- // f = Array.of;
- // f(1, 2, 3);
- // TODO(delphick): This is probably too loose but for now Wasm can call any JS
- // linkage builtin via its Code object. Once Wasm is fixed this can either be
- // tighted or removed completely.
- if (Builtins::KindOf(builtin) != BCH && HasJSLinkage(builtin)) {
- return true;
- }
-
- // There are some other non-TF builtins that also have JS linkage like
- // InterpreterEntryTrampoline which are explicitly allow-listed below.
- // TODO(delphick): Some of these builtins do not fit with the above, but
- // currently cause problems if they're not executable. This list should be
- // pared down as much as possible.
- switch (builtin) {
- case Builtin::kInterpreterEntryTrampoline:
- case Builtin::kCompileLazy:
- case Builtin::kCompileLazyDeoptimizedCode:
- case Builtin::kCallFunction_ReceiverIsNullOrUndefined:
- case Builtin::kCallFunction_ReceiverIsNotNullOrUndefined:
- case Builtin::kCallFunction_ReceiverIsAny:
- case Builtin::kCallBoundFunction:
- case Builtin::kCall_ReceiverIsNullOrUndefined:
- case Builtin::kCall_ReceiverIsNotNullOrUndefined:
- case Builtin::kCall_ReceiverIsAny:
- case Builtin::kHandleApiCall:
- case Builtin::kInstantiateAsmJs:
-#if V8_ENABLE_WEBASSEMBLY
- case Builtin::kGenericJSToWasmWrapper:
- case Builtin::kWasmReturnPromiseOnSuspend:
-#endif // V8_ENABLE_WEBASSEMBLY
-
- // TODO(delphick): Remove this when calls to it have the trampoline inlined
- // or are converted to use kCallBuiltinPointer.
- case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
- return true;
- default:
-#if V8_TARGET_ARCH_MIPS64
- // TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE
- // caused MIPS platform to crash, and we need some time to handle it. Now
- // disable this change temporarily on MIPS platform.
- return true;
-#else
- return false;
-#endif // V8_TARGET_ARCH_MIPS64
- }
-}
-
Builtin ExampleBuiltinForTorqueFunctionPointerType(
size_t function_pointer_type_id) {
switch (function_pointer_type_id) {
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index c3935b53d0..91811adb8b 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -138,17 +138,17 @@ class Builtins {
}
// Convenience wrappers.
- Handle<CodeT> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
- Handle<CodeT> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
- Handle<CodeT> NonPrimitiveToPrimitive(
+ Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
- Handle<CodeT> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
+ Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
// Used by CreateOffHeapTrampolines in isolate.cc.
- void set_code(Builtin builtin, CodeT code);
+ void set_code(Builtin builtin, Code code);
- V8_EXPORT_PRIVATE CodeT code(Builtin builtin);
- V8_EXPORT_PRIVATE Handle<CodeT> code_handle(Builtin builtin);
+ V8_EXPORT_PRIVATE Code code(Builtin builtin);
+ V8_EXPORT_PRIVATE Handle<Code> code_handle(Builtin builtin);
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin);
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate,
@@ -173,7 +173,7 @@ class Builtins {
static bool IsCpp(Builtin builtin);
// True, iff the given code object is a builtin. Note that this does not
- // necessarily mean that its kind is Code::BUILTIN.
+ // necessarily mean that its kind is InstructionStream::BUILTIN.
static bool IsBuiltin(const Code code);
// As above, but safe to access off the main thread since the check is done
@@ -192,12 +192,7 @@ class Builtins {
}
// True, iff the given code object is a builtin with off-heap embedded code.
- template <typename CodeOrCodeT>
- static bool IsIsolateIndependentBuiltin(CodeOrCodeT code) {
- Builtin builtin = code.builtin_id();
- return Builtins::IsBuiltinId(builtin) &&
- Builtins::IsIsolateIndependent(builtin);
- }
+ static bool IsIsolateIndependentBuiltin(Code code);
static void InitializeIsolateDataTables(Isolate* isolate);
@@ -220,34 +215,15 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address);
static void Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame);
+ ArgvMode argv_mode, bool builtin_exit_frame);
static bool AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
Handle<JSObject> target_global_proxy);
- // Creates a trampoline code object that jumps to the given off-heap entry.
- // The result should not be used directly, but only from the related Factory
- // function.
- // TODO(delphick): Come up with a better name since it may not generate an
- // executable trampoline.
- static Handle<Code> GenerateOffHeapTrampolineFor(
- Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
- bool generate_jump_to_instruction_stream);
-
- // Generate the RelocInfo ByteArray that would be generated for an offheap
- // trampoline.
- static Handle<ByteArray> GenerateOffHeapTrampolineRelocInfo(Isolate* isolate);
-
// Creates a copy of InterpreterEntryTrampolineForProfiling in the code space.
static Handle<Code> CreateInterpreterEntryTrampolineForProfiling(
Isolate* isolate);
- // Only builtins with JS linkage should ever need to be called via their
- // trampoline Code object. The remaining builtins have non-executable Code
- // objects.
- static bool CodeObjectIsExecutable(Builtin builtin);
-
static bool IsJSEntryVariant(Builtin builtin) {
switch (builtin) {
case Builtin::kJSEntry:
@@ -288,10 +264,10 @@ class Builtins {
enum class CallOrConstructMode { kCall, kConstruct };
static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
- Handle<CodeT> code);
+ Handle<Code> code);
static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
- Handle<CodeT> code);
+ Handle<Code> code);
enum class InterpreterEntryTrampolineMode {
// The version of InterpreterEntryTrampoline used by default.
@@ -335,8 +311,8 @@ class Builtins {
};
V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
- // Check for kNoBuiltinId first to abort early when the current Code object
- // is not a builtin.
+ // Check for kNoBuiltinId first to abort early when the current
+ // InstructionStream object is not a builtin.
return builtin_id != Builtin::kNoBuiltinId &&
(builtin_id == Builtin::kInterpreterEntryTrampoline ||
builtin_id == Builtin::kInterpreterEnterAtBytecode ||
@@ -344,8 +320,8 @@ V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
}
V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) {
- // Check for kNoBuiltinId first to abort early when the current Code object
- // is not a builtin.
+ // Check for kNoBuiltinId first to abort early when the current
+ // InstructionStream object is not a builtin.
return builtin_id != Builtin::kNoBuiltinId &&
(builtin_id == Builtin::kBaselineOutOfLinePrologue ||
builtin_id == Builtin::kBaselineOutOfLinePrologueDeopt ||
@@ -359,4 +335,17 @@ Builtin ExampleBuiltinForTorqueFunctionPointerType(
} // namespace internal
} // namespace v8
+// Helper while transitioning some functions to libm.
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+#define SIN_IMPL(X) \
+ v8_flags.use_libm_trig_functions ? base::ieee754::libm_sin(X) \
+ : base::ieee754::fdlibm_sin(X)
+#define COS_IMPL(X) \
+ v8_flags.use_libm_trig_functions ? base::ieee754::libm_cos(X) \
+ : base::ieee754::fdlibm_cos(X)
+#else
+#define SIN_IMPL(X) base::ieee754::sin(X)
+#define COS_IMPL(X) base::ieee754::cos(X)
+#endif
+
#endif // V8_BUILTINS_BUILTINS_H_
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 0d347e3dd3..20eecf7352 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -30,13 +30,13 @@ macro IsCell(o: HeapObject): bool {
}
@export
-macro IsCode(o: HeapObject): bool {
- return Is<Code>(o);
+macro IsInstructionStream(o: HeapObject): bool {
+ return Is<InstructionStream>(o);
}
@export
-macro IsCodeDataContainer(o: HeapObject): bool {
- return Is<CodeDataContainer>(o);
+macro IsCode(o: HeapObject): bool {
+ return Is<Code>(o);
}
@export
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 49002a9c31..668a69dace 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -30,6 +30,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// accessibly from the root list.
RootIndex root_list_index;
DCHECK(!isolate_->roots_table().IsRootHandle(object, &root_list_index));
+ DCHECK_IMPLIES(object->IsMap(), !HeapObject::cast(*object).InReadOnlySpace());
// Not yet finalized.
DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
@@ -43,7 +44,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// All code objects should be loaded through the root register or use
// pc-relative addressing.
- DCHECK(!object->IsCode());
+ DCHECK(!object->IsInstructionStream());
#endif
auto find_result = map_.FindOrInsert(object);
@@ -73,7 +74,7 @@ void CheckPreconditionsForPatching(Isolate* isolate,
} // namespace
void BuiltinsConstantsTableBuilder::PatchSelfReference(
- Handle<Object> self_reference, Handle<Code> code_object) {
+ Handle<Object> self_reference, Handle<InstructionStream> code_object) {
CheckPreconditionsForPatching(isolate_, code_object);
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference).kind() ==
@@ -81,7 +82,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
uint32_t key;
if (map_.Delete(self_reference, &key)) {
- DCHECK(code_object->IsCode());
+ DCHECK(code_object->IsInstructionStream());
map_.Insert(code_object, key);
}
}
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
index 56547a445e..e8d79202ef 100644
--- a/deps/v8/src/builtins/constants-table-builder.h
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -33,10 +33,10 @@ class BuiltinsConstantsTableBuilder final {
uint32_t AddObject(Handle<Object> object);
// Self-references during code generation start out by referencing a handle
- // with a temporary dummy object. Once the final Code object exists, such
- // entries in the constants map must be patched up.
+ // with a temporary dummy object. Once the final InstructionStream object
+ // exists, such entries in the constants map must be patched up.
void PatchSelfReference(Handle<Object> self_reference,
- Handle<Code> code_object);
+ Handle<InstructionStream> code_object);
// References to the array that stores basic block usage counters start out as
// references to a unique oddball. Once the actual array has been allocated,
diff --git a/deps/v8/src/builtins/conversion.tq b/deps/v8/src/builtins/conversion.tq
index 266fcaa552..8fd07a1745 100644
--- a/deps/v8/src/builtins/conversion.tq
+++ b/deps/v8/src/builtins/conversion.tq
@@ -187,7 +187,7 @@ transitioning macro CallExoticToPrimitive(implicit context: Context)(
transitioning builtin NonPrimitiveToPrimitive_Default(
implicit context: Context)(input: JSReceiver): JSPrimitive {
const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input)
- otherwise return OrdinaryToPrimitive_Number(input);
+ otherwise return OrdinaryToPrimitive_Number_Inline(input);
return CallExoticToPrimitive(
input, exoticToPrimitive, DefaultStringConstant());
}
@@ -195,19 +195,24 @@ transitioning builtin NonPrimitiveToPrimitive_Default(
transitioning builtin NonPrimitiveToPrimitive_Number(implicit context: Context)(
input: JSReceiver): JSPrimitive {
const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input)
- otherwise return OrdinaryToPrimitive_Number(input);
+ otherwise return OrdinaryToPrimitive_Number_Inline(input);
return CallExoticToPrimitive(
input, exoticToPrimitive, NumberStringConstant());
}
-transitioning builtin NonPrimitiveToPrimitive_String(implicit context: Context)(
- input: JSReceiver): JSPrimitive {
+transitioning macro NonPrimitiveToPrimitive_String_Inline(
+ implicit context: Context)(input: JSReceiver): JSPrimitive {
const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input)
- otherwise return OrdinaryToPrimitive_String(input);
+ otherwise return OrdinaryToPrimitive_String_Inline(input);
return CallExoticToPrimitive(
input, exoticToPrimitive, StringStringConstant());
}
+transitioning builtin NonPrimitiveToPrimitive_String(implicit context: Context)(
+ input: JSReceiver): JSPrimitive {
+ return NonPrimitiveToPrimitive_String_Inline(input);
+}
+
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
transitioning macro TryToPrimitiveMethod(implicit context: Context)(
@@ -226,6 +231,11 @@ transitioning macro TryToPrimitiveMethod(implicit context: Context)(
transitioning builtin OrdinaryToPrimitive_Number(implicit context: Context)(
input: JSAny): JSPrimitive {
+ return OrdinaryToPrimitive_Number_Inline(input);
+}
+
+transitioning builtin OrdinaryToPrimitive_Number_Inline(
+ implicit context: Context)(input: JSAny): JSPrimitive {
try {
return TryToPrimitiveMethod(input, ValueOfStringConstant())
otherwise String;
@@ -239,6 +249,11 @@ transitioning builtin OrdinaryToPrimitive_Number(implicit context: Context)(
transitioning builtin OrdinaryToPrimitive_String(implicit context: Context)(
input: JSAny): JSPrimitive {
+ return OrdinaryToPrimitive_String_Inline(input);
+}
+
+transitioning macro OrdinaryToPrimitive_String_Inline(
+ implicit context: Context)(input: JSAny): JSPrimitive {
try {
return TryToPrimitiveMethod(input, ToStringStringConstant())
otherwise String;
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 23995e50e3..c5d235e18e 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -33,6 +33,10 @@ FromConstexpr<uint8, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
uint8 {
return ConstexprIntegerLiteralToUint8(i);
}
+FromConstexpr<int64, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
+ int64 {
+ return ConstexprIntegerLiteralToInt64(i);
+}
FromConstexpr<uint64, constexpr IntegerLiteral>(i: constexpr IntegerLiteral):
uint64 {
return ConstexprIntegerLiteralToUint64(i);
@@ -129,6 +133,9 @@ FromConstexpr<uint8, constexpr uint8>(i: constexpr uint8): uint8 {
FromConstexpr<uint32, constexpr uint32>(i: constexpr uint32): uint32 {
return Unsigned(%FromConstexpr<int32>(i));
}
+FromConstexpr<int64, constexpr int64>(i: constexpr int64): int64 {
+ return Int64Constant(i);
+}
FromConstexpr<uint64, constexpr uint64>(i: constexpr uint64): uint64 {
return Uint64Constant(i);
}
@@ -221,6 +228,9 @@ Convert<Smi, uint32>(ui: uint32): Smi {
Convert<uintptr, uint32>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
}
+Convert<int64, int32>(i: int32): int64 {
+ return ChangeInt32ToInt64(i);
+}
Convert<uint64, uint32>(ui: uint32): uint64 {
return ChangeUint32ToUint64(ui);
}
@@ -263,6 +273,9 @@ Convert<int32, intptr>(i: intptr): int32 {
Convert<int32, int64>(i: int64): int32 {
return TruncateInt64ToInt32(i);
}
+Convert<uint32, uint64>(i: uint64): uint32 {
+ return Unsigned(TruncateInt64ToInt32(Signed(i)));
+}
Convert<int32, Number>(n: Number): int32 {
typeswitch (n) {
case (s: Smi): {
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index a9a7277e6d..0ec6dc6dc2 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -67,11 +67,18 @@ macro WasDetached(view: JSArrayBufferView): bool {
return IsDetachedBuffer(view.buffer);
}
-macro ValidateDataView(context: Context, o: JSAny, method: String): JSDataView {
- try {
- return Cast<JSDataView>(o) otherwise CastError;
- } label CastError {
- ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method);
+macro ValidateDataView(
+ context: Context, o: JSAny, method: String): JSDataViewOrRabGsabDataView {
+ typeswitch (o) {
+ case (_x: JSDataView): {
+ return UnsafeCast<JSDataView>(o);
+ }
+ case (_x: JSRabGsabDataView): {
+ return UnsafeCast<JSRabGsabDataView>(o);
+ }
+ case (_x: JSAny): {
+ ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method);
+ }
}
}
@@ -79,7 +86,7 @@ macro ValidateDataView(context: Context, o: JSAny, method: String): JSDataView {
javascript builtin DataViewPrototypeGetBuffer(
js-implicit context: NativeContext,
receiver: JSAny)(...arguments): JSArrayBuffer {
- const dataView: JSDataView =
+ const dataView: JSDataViewOrRabGsabDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
return dataView.buffer;
}
@@ -87,7 +94,7 @@ javascript builtin DataViewPrototypeGetBuffer(
// ES6 section 24.2.4.2 get DataView.prototype.byteLength
javascript builtin DataViewPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
- const dataView: JSDataView =
+ const dataView: JSDataViewOrRabGsabDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_length');
if (IsVariableLengthJSArrayBufferView(dataView)) {
try {
@@ -110,7 +117,7 @@ javascript builtin DataViewPrototypeGetByteLength(
// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
javascript builtin DataViewPrototypeGetByteOffset(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
- const dataView: JSDataView =
+ const dataView: JSDataViewOrRabGsabDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset');
try {
typed_array::IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
@@ -376,7 +383,7 @@ transitioning macro DataViewGet(
requestedLittleEndian: JSAny, kind: constexpr ElementsKind): Numeric {
// 1. Perform ? RequireInternalSlot(view, [[DataView]]).
// 2. Assert: view has a [[ViewedArrayBuffer]] internal slot.
- const dataView: JSDataView =
+ const dataView: JSDataViewOrRabGsabDataView =
ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
try {
@@ -674,7 +681,7 @@ transitioning macro DataViewSet(
requestedLittleEndian: JSAny, kind: constexpr ElementsKind): JSAny {
// 1. Perform ? RequireInternalSlot(view, [[DataView]]).
// 2. Assert: view has a [[ViewedArrayBuffer]] internal slot.
- const dataView: JSDataView =
+ const dataView: JSDataViewOrRabGsabDataView =
ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
try {
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 589b5741b7..5e8ba2dee6 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -45,6 +45,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
namespace {
+constexpr int kReceiverOnStackSize = kSystemPointerSize;
+
enum class ArgumentsElementType {
kRaw, // Push arguments as they are.
kHandle // Dereference arguments before pushing.
@@ -99,7 +101,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// InvokeFunction.
// Set up pointer to first argument (skip receiver).
- __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ __ lea(esi, Operand(ebp, StandardFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
// Copy arguments to the expression stack.
// esi: Pointer to start of arguments.
@@ -125,8 +127,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -215,7 +217,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movd(xmm0, eax);
// Set up pointer to first argument (skip receiver).
- __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ __ lea(edi, Operand(ebp, StandardFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
// Restore argument count.
@@ -280,8 +282,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -369,8 +371,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ mov(__ ExternalReferenceAsOperand(c_entry_fp, edi), Immediate(0));
// Store the context address in the previously-reserved slot.
@@ -573,7 +575,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, CODET_TYPE);
+#ifndef V8_JITLESS
+ __ CmpInstanceType(scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ j(not_equal, &not_baseline);
@@ -583,6 +586,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
} else {
__ j(equal, is_baseline);
}
+#endif // !V8_JITLESS
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -689,7 +693,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
__ Pop(eax);
- __ CmpObjectType(ecx, CODET_TYPE, ecx);
+ __ CmpObjectType(ecx, CODE_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -768,8 +772,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
- __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -893,8 +897,6 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
// frame-constants.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
- Register closure = edi;
-
__ movd(xmm0, eax); // Spill actual argument count.
// The bytecode array could have been flushed from the shared function info,
@@ -909,6 +911,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
__ j(not_equal, &compile_lazy);
+#ifndef V8_JITLESS
+ Register closure = edi;
Register feedback_vector = ecx;
Label push_stack_frame;
// Load feedback vector and check if it is valid. If valid, check for
@@ -946,6 +950,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set
// up the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
@@ -1107,6 +1117,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
{
// Restore actual argument count.
@@ -1154,6 +1165,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ movd(eax, xmm0); // Recover argument count.
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1419,7 +1431,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ mov(scratch,
FieldOperand(scratch, InterpreterData::kInterpreterTrampolineOffset));
- __ add(scratch, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(scratch, scratch);
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
@@ -1525,6 +1537,7 @@ void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
auto descriptor =
@@ -1810,8 +1823,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
__ bind(&no_this_arg);
__ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -1919,8 +1932,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movd(xmm0, edx);
__ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -1978,8 +1991,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -2052,7 +2065,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2683,8 +2697,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ cmp(maybe_target_code, Immediate(0));
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
}
@@ -2737,8 +2751,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
kHeapObjectTag));
__ SmiUntag(ecx);
- // Compute the target address = code_obj + header_size + osr_offset
- __ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(eax, eax);
+
+ // Compute the target address = code_entry + osr_offset
+ __ add(eax, ecx);
Generate_OSREntry(masm, eax);
}
@@ -3004,8 +3020,11 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
+ CHECK(result_size == 1 || result_size == 2);
+
+ using ER = ExternalReference;
+
// eax: number of arguments including receiver
// edx: pointer to C function
// ebp: frame pointer (restored after C call)
@@ -3026,32 +3045,27 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
kRuntimeCallFunctionRegister, kContextRegister,
kJSFunctionRegister, kRootRegister));
- // Reserve space on the stack for the three arguments passed to the call. If
- // result size is greater than can be returned in registers, also reserve
- // space for the hidden argument for the result location, and space for the
- // result itself.
- int arg_stack_space = 3;
+ static constexpr int kReservedStackSlots = 3;
+ __ EnterExitFrame(
+ kReservedStackSlots,
+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT, edi);
- // Enter the exit frame that transitions from JavaScript to C++.
+ // Set up argv in a callee-saved register. It is reused below so it must be
+ // retained across the C call.
+ static constexpr Register kArgvRegister = edi;
if (argv_mode == ArgvMode::kRegister) {
- DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
- DCHECK(!builtin_exit_frame);
- __ EnterApiExitFrame(arg_stack_space, edi);
-
- // Move argc and argv into the correct registers.
- __ mov(esi, ecx);
- __ mov(edi, eax);
+ __ mov(kArgvRegister, ecx);
} else {
- __ EnterExitFrame(
- arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ int offset =
+ StandardFrameConstants::kFixedFrameSizeAboveFp - kReceiverOnStackSize;
+ __ lea(kArgvRegister, Operand(ebp, eax, times_system_pointer_size, offset));
}
// edx: pointer to C function
// ebp: frame pointer (restored after C call)
// esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
+ // eax: number of arguments including receiver
+ // edi: pointer to the first argument (C callee-saved)
// Result returned in eax, or eax+edx if result size is 2.
@@ -3060,9 +3074,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CheckStackAlignment();
}
// Call C function.
- __ mov(Operand(esp, 0 * kSystemPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kSystemPointerSize), esi); // argv.
- __ Move(ecx, Immediate(ExternalReference::isolate_address(masm->isolate())));
+ static_assert(kReservedStackSlots == 3);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // argc.
+ __ mov(Operand(esp, 1 * kSystemPointerSize), kArgvRegister); // argv.
+ __ Move(ecx, Immediate(ER::isolate_address(masm->isolate())));
__ mov(Operand(esp, 2 * kSystemPointerSize), ecx);
__ call(kRuntimeCallFunctionRegister);
@@ -3079,8 +3094,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ push(edx);
__ LoadRoot(edx, RootIndex::kTheHoleValue);
Label okay;
- ExternalReference pending_exception_address = ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ ER pending_exception_address =
+ ER::Create(IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ cmp(edx, __ ExternalReferenceAsOperand(pending_exception_address, ecx));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay, Label::kNear);
@@ -3089,35 +3104,37 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ pop(edx);
}
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
- argv_mode == ArgvMode::kStack);
+ __ LeaveExitFrame(esi);
+ if (argv_mode == ArgvMode::kStack) {
+ // Drop arguments and the receiver from the caller stack.
+ DCHECK(!AreAliased(esi, kArgvRegister));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(kArgvRegister, kReceiverOnStackSize));
+ __ PushReturnAddressFrom(ecx);
+ }
__ ret(0);
// Handling of exception.
__ bind(&exception_returned);
- ExternalReference pending_handler_context_address = ExternalReference::Create(
+ ER pending_handler_context_address = ER::Create(
IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
- ExternalReference pending_handler_entrypoint_address =
- ExternalReference::Create(
- IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
- ExternalReference pending_handler_fp_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
- ExternalReference pending_handler_sp_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+ ER pending_handler_entrypoint_address = ER::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ER pending_handler_fp_address =
+ ER::Create(IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ER pending_handler_sp_address =
+ ER::Create(IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
// Ask the runtime for help to determine the handler. This will set eax to
// contain the current pending exception, don't clobber it.
- ExternalReference find_handler =
- ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ ER find_handler = ER::Create(Runtime::kUnwindAndFindExceptionHandler);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, eax);
__ mov(Operand(esp, 0 * kSystemPointerSize), Immediate(0)); // argc.
__ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(0)); // argv.
- __ Move(esi,
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ Move(esi, Immediate(ER::isolate_address(masm->isolate())));
__ mov(Operand(esp, 2 * kSystemPointerSize), esi);
__ CallCFunction(find_handler, 3);
}
@@ -3137,8 +3154,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ bind(&skip);
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
- ExternalReference c_entry_fp_address = ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ ER c_entry_fp_address =
+ ER::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate());
__ mov(__ ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
// Compute the handler entry address and jump to it.
@@ -3249,6 +3266,12 @@ Operand ApiParameterOperand(int index) {
return Operand(esp, index * kSystemPointerSize);
}
+Operand ExitFrameCallerStackSlotOperand(int index) {
+ return Operand(ebp,
+ (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
// Prepares stack to put arguments (aligns and so on). Reserves
// space for return value if needed (assumes the return value is a handle).
// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
@@ -3256,7 +3279,7 @@ Operand ApiParameterOperand(int index) {
// stores the pointer to the reserved slot into esi.
void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
ASM_CODE_COMMENT(masm);
- __ EnterApiExitFrame(argc, scratch);
+ __ EnterExitFrame(argc, StackFrame::EXIT, scratch);
if (v8_flags.debug_code) {
__ mov(esi, Immediate(base::bit_cast<int32_t>(kZapValue)));
}
@@ -3322,7 +3345,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK_EQ(stack_space, 0);
__ mov(edx, *stack_space_operand);
}
- __ LeaveApiExitFrame();
+ __ LeaveExitFrame(esi);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
@@ -3437,7 +3460,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3509,14 +3532,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ mov(ApiParameterOperand(0), scratch);
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // There are two stack slots above the arguments we constructed on the stack:
- // the stored ebp (pushed by EnterApiExitFrame), and the return address.
- static constexpr int kStackSlotsAboveFCA = 2;
- Operand return_value_operand(
- ebp,
- (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
-
+ Operand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kUseStackSpaceOperand = 0;
Operand stack_space_operand = ApiParameterOperand(kApiArgc + 3);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3527,14 +3544,15 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -3555,7 +3573,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(scratch); // Restore return address.
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ const int kNameHandleStackSize = 1;
+ const int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
// Allocate v8::PropertyCallbackInfo object, arguments for callback and
// space for optional callback address parameter (in case CPU profiler is
@@ -3581,16 +3600,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Reserve space for optional callback address parameter.
Operand thunk_last_arg = ApiParameterOperand(2);
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
Register function_address = edx;
__ mov(function_address,
FieldOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset));
- // +3 is to skip prolog, return address and name handle.
- Operand return_value_operand(
- ebp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+ Operand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
Operand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
kStackUnwindSpace, kUseStackSpaceConstant,
@@ -4036,7 +4052,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
- __ mov(Operand(esp, 2 * kSystemPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 2 * kSystemPointerSize),
+ ecx); // InstructionStream address or 0.
__ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta.
__ Move(Operand(esp, 4 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
@@ -4198,7 +4215,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = eax;
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = esi;
__ mov(code_obj,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -4209,7 +4226,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODE_TYPE, kInterpreterBytecodeOffsetRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4222,7 +4239,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODE_TYPE, kInterpreterBytecodeOffsetRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
@@ -4294,8 +4311,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3);
}
- __ lea(code_obj,
- FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
+ __ LoadCodeEntry(code_obj, code_obj);
+ __ add(code_obj, kReturnRegister0);
__ pop(kInterpreterAccumulatorRegister);
if (is_osr) {
diff --git a/deps/v8/src/builtins/ic-callable.tq b/deps/v8/src/builtins/ic-callable.tq
index a9cc43c716..c1f7a32bd0 100644
--- a/deps/v8/src/builtins/ic-callable.tq
+++ b/deps/v8/src/builtins/ic-callable.tq
@@ -217,6 +217,8 @@ macro CastFeedbackVector(
} else if constexpr (
updateFeedbackMode == UpdateFeedbackMode::kOptionalFeedback) {
return Cast<FeedbackVector>(maybeFeedbackVector) otherwise goto Fallback;
+ } else if constexpr (updateFeedbackMode == UpdateFeedbackMode::kNoFeedback) {
+ goto Fallback;
} else {
unreachable;
}
diff --git a/deps/v8/src/builtins/iterator-from.tq b/deps/v8/src/builtins/iterator-from.tq
new file mode 100644
index 0000000000..b711c28971
--- /dev/null
+++ b/deps/v8/src/builtins/iterator-from.tq
@@ -0,0 +1,182 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace iterator {
+
+macro NewJSValidIteratorWrapper(implicit context: Context)(
+ underlying: IteratorRecord): JSValidIteratorWrapper {
+ return new JSValidIteratorWrapper{
+ map: *NativeContextSlot(ContextSlot::VALID_ITERATOR_WRAPPER_MAP_INDEX),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ underlying: underlying
+ };
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-getiteratorflattenable
+//
+// Currently never used with the async hint, so only the sync path is
+// implemented.
+transitioning macro GetIteratorFlattenable(implicit context: Context)(
+ obj: JSReceiver): IteratorRecord {
+ try {
+ // 1. If obj is not an Object, throw a TypeError exception.
+ // (Done by caller.)
+
+ // 2. Let alreadyAsync be false.
+ //
+ // (Unimplemented because the async path is unused.)
+
+ // 3. Let method be undefined.
+ //
+ // (Done below.)
+
+ // 4. If hint is async, then
+ // a. Set method to ? Get(obj, @@asyncIterator).
+ // b. Set alreadyAsync to true.
+ //
+ // (Unimplemented because unused.)
+
+ // 5. If IsCallable(method) is false, then
+ // a. Set method to ? Get(obj, @@iterator).
+ const method = GetProperty(obj, IteratorSymbolConstant());
+
+ // b. Set alreadyAsync to false.
+ //
+ // (Unimplemented because unused.)
+
+ let iterator: JSAny;
+
+ // 6. If IsCallable(method) is false, then
+ if (!Is<Callable>(method)) {
+ // a. Let iterator be obj.
+ iterator = obj;
+
+ // b. Set alreadyAsync to true.
+ //
+ // (Unimplemented because unused.)
+ } else {
+ // 7. Else,
+ // a. Let iterator be ? Call(method, obj).
+ iterator = Call(context, UnsafeCast<Callable>(method), obj);
+ }
+
+ // 8. If iterator is not an Object, throw a TypeError exception.
+ const iteratorObj = Cast<JSReceiver>(iterator)
+ otherwise goto IteratorNotObject(obj, method);
+
+ // 9. Let nextMethod be ? Get(iterator, "next").
+ const nextMethod = GetProperty(iteratorObj, kNextString);
+
+ // 10. If IsCallable(nextMethod) is false, throw a TypeError exception.
+ if (!Is<Callable>(nextMethod)) {
+ ThrowTypeError(
+ MessageTemplate::kPropertyNotFunction, nextMethod, kNextString, obj);
+ }
+
+ // 11. Let iteratorRecord be the Iterator Record { [[Iterator]]: iterator,
+ // [[NextMethod]]: nextMethod, [[Done]]: false }.
+ const iteratorRecord =
+ IteratorRecord{object: iteratorObj, next: nextMethod};
+
+ // 12. If hint is async and alreadyAsync is false, then
+ // a. Return CreateAsyncFromSyncIterator(iteratorRecord).
+ //
+ // (Unimplemented because unused.)
+
+ // 13. Return iteratorRecord.
+ return iteratorRecord;
+ } label IteratorNotObject(obj: JSAny, method: JSAny) deferred {
+ if (Is<Callable>(method)) {
+ ThrowTypeError(MessageTemplate::kSymbolIteratorInvalid);
+ } else {
+ ThrowTypeError(MessageTemplate::kNotIterable, obj);
+ }
+ }
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-iterator.from
+transitioning javascript builtin IteratorFrom(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(objArg: JSAny): JSReceiver {
+ // 1. If O is a String, set O to ! ToObject(O).
+ let obj: JSReceiver;
+ typeswitch (objArg) {
+ case (o: String): {
+ obj = ToObject_Inline(context, o);
+ }
+ case (o: JSReceiver): {
+ obj = o;
+ }
+ case (JSAny): {
+ ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Iterator.from');
+ }
+ }
+
+ // 2. Let iteratorRecord be ? GetIteratorFlattenable(O, sync).
+ const iteratorRecord = GetIteratorFlattenable(obj);
+
+ // 3. Let hasInstance be ? OrdinaryHasInstance(%Iterator%,
+ // iteratorRecord.[[Iterator]]).
+ const hasInstance = function::OrdinaryHasInstance(
+ context, GetIteratorFunction(), iteratorRecord.object);
+
+ // 4. If hasInstance is true, then
+ if (hasInstance == True) {
+ // a. Return iteratorRecord.[[Iterator]].
+ return iteratorRecord.object;
+ }
+
+ // 5. Let wrapper be OrdinaryObjectCreate(%WrapForValidIteratorPrototype%, «
+ // [[Iterated]] »).
+ // 6. Set wrapper.[[Iterated]] to iteratorRecord.
+ // 7. Return wrapper.
+ return NewJSValidIteratorWrapper(iteratorRecord);
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-wrapforvaliditeratorprototype.next
+transitioning javascript builtin WrapForValidIteratorPrototypeNext(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Let O be this value.
+ // 2. Perform ? RequireInternalSlot(O, [[Iterated]]).
+ const o = Cast<JSValidIteratorWrapper>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver,
+ '%WrapForValidIteratorPrototype%.next', receiver);
+
+ // 3. Let iteratorRecord be O.[[Iterated]].
+ const iteratorRecord = o.underlying;
+
+ // 4. Return ? Call(iteratorRecord.[[NextMethod]],
+ // iteratorRecord.[[Iterator]]).
+ return Call(context, iteratorRecord.next, iteratorRecord.object);
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-wrapforvaliditeratorprototype.return
+transitioning javascript builtin WrapForValidIteratorPrototypeReturn(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ try {
+ // 1. Let O be this value.
+ // 2. Perform ? RequireInternalSlot(O, [[Iterated]]).
+ const o = Cast<JSValidIteratorWrapper>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver,
+ '%WrapForValidIteratorPrototype%.return', receiver);
+
+ // 3. Let iterator be O.[[Iterated]].[[Iterator]].
+ const iterator = o.underlying.object;
+
+ // 4. Assert: iterator is an Object.
+ // 5. Let returnMethod be ? GetMethod(iterator, "return").
+ const returnMethod =
+ GetMethod(iterator, kReturnString) otherwise ReturnMethodUndefined;
+
+ // 7. Return ? Call(returnMethod, iterator).
+ return Call(context, returnMethod, iterator);
+ } label ReturnMethodUndefined {
+ // 6. If returnMethod is undefined, then
+ // a. Return CreateIterResultObject(undefined, true).
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+}
+
+} // namespace iterator
diff --git a/deps/v8/src/builtins/iterator-helpers.tq b/deps/v8/src/builtins/iterator-helpers.tq
new file mode 100644
index 0000000000..6998bc689f
--- /dev/null
+++ b/deps/v8/src/builtins/iterator-helpers.tq
@@ -0,0 +1,456 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// In GetIteratorDirect and the helpers below, this file implements the changes
+// in [1] and [2], which are highly likely to get consensus at the March 2023
+// TC39.
+//
+// [1] https://github.com/tc39/proposal-iterator-helpers/pull/265
+// [2] https://github.com/tc39/proposal-iterator-helpers/pull/267
+//
+// TODO(v8:13556): Remove this comment once PR is merged.
+
+// --- Utilities
+
+namespace iterator {
+
+// https://tc39.es/proposal-iterator-helpers/#sec-getiteratordirect
+transitioning macro GetIteratorDirect(implicit context: Context)(
+ obj: JSReceiver): IteratorRecord {
+ // 1. Let nextMethod be ? Get(obj, "next").
+ const nextProp = GetProperty(obj, kNextString);
+
+ // 2. If IsCallable(nextMethod) is false, throw a TypeError exception.
+ const nextMethod = Cast<Callable>(nextProp)
+ otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, 'next');
+
+ // 3. Let iteratorRecord be Record { [[Iterator]]: obj, [[NextMethod]]:
+ // nextMethod, [[Done]]: false }.
+ // 4. Return iteratorRecord.
+ return IteratorRecord{object: obj, next: nextMethod};
+}
+
+// --- Dispatch functions for all iterator helpers
+
+// https://tc39.es/proposal-iterator-helpers/#sec-%iteratorhelperprototype%.next
+transitioning javascript builtin IteratorHelperPrototypeNext(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Return ? GeneratorResume(this value, undefined, "Iterator Helper").
+
+ // Iterator helpers are specified as generators but we implement them as
+ // direct iterators.
+ typeswitch (receiver) {
+ case (mapHelper: JSIteratorMapHelper): {
+ return IteratorMapHelperNext(mapHelper);
+ }
+ case (filterHelper: JSIteratorFilterHelper): {
+ return IteratorFilterHelperNext(filterHelper);
+ }
+ case (takeHelper: JSIteratorTakeHelper): {
+ return IteratorTakeHelperNext(takeHelper);
+ }
+ case (dropHelper: JSIteratorDropHelper): {
+ return IteratorDropHelperNext(dropHelper);
+ }
+ case (Object): {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver,
+ 'Iterator Helper.prototype.next', receiver);
+ }
+ }
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-%iteratorhelperprototype%.return
+transitioning javascript builtin IteratorHelperPrototypeReturn(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSObject {
+ // 1. Let O be this value.
+ // 2. Perform ? RequireInternalSlot(O, [[UnderlyingIterator]]).
+ // 3. Assert: O has a [[GeneratorState]] slot.
+ // 4. If O.[[GeneratorState]] is suspendedStart, then
+ // a. Set O.[[GeneratorState]] to completed.
+ // b. Perform ? IteratorClose(O.[[UnderlyingIterator]],
+ // NormalCompletion(unused)).
+ // c. Return CreateIterResultObject(undefined, true).
+ // 5. Let C be Completion { [[Type]]: return, [[Value]]: undefined,
+ // [[Target]]: empty }.
+ // 6. Return ? GeneratorResumeAbrupt(O, C, "Iterator Helper").
+
+ // Iterator helpers are specified as generators. The net effect of this method
+ // is to close the underlying and return { value: undefined, done: true }.
+ const helper = Cast<JSIteratorHelper>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver,
+ 'Iterator Helper.prototype.return', receiver);
+
+ IteratorCloseOnException(helper.underlying);
+ return AllocateJSIteratorResult(Undefined, True);
+}
+
+// --- Map helper
+
+macro NewJSIteratorMapHelper(implicit context: Context)(
+ underlying: IteratorRecord, mapper: Callable): JSIteratorMapHelper {
+ return new JSIteratorMapHelper{
+ map: *NativeContextSlot(ContextSlot::ITERATOR_MAP_HELPER_MAP_INDEX),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ underlying: underlying,
+ mapper: mapper,
+ counter: 0
+ };
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-iteratorprototype.map
+transitioning javascript builtin IteratorPrototypeMap(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(mapper: JSAny): JSIteratorMapHelper {
+ const methodName: constexpr string = 'Iterator.prototype.map';
+
+ // 1. Let O be the this value.
+ // 2. If O is not an Object, throw a TypeError exception.
+ const o = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, methodName);
+
+ // 3. If IsCallable(mapper) is false, throw a TypeError exception.
+ const mapper = Cast<Callable>(mapper)
+ otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName);
+
+ // 4. Let iterated be ? GetIteratorDirect(O).
+ const iterated = GetIteratorDirect(o);
+
+ // Step 5 implemented in IteratorMapHelperNext
+
+ // 6. Let result be CreateIteratorFromClosure(closure, "Iterator Helper",
+ // %IteratorHelperPrototype%).
+ // 7. Set result.[[UnderlyingIterator]] to iterated.
+ // 8. Return result.
+ return NewJSIteratorMapHelper(iterated, mapper);
+}
+
+transitioning builtin IteratorMapHelperNext(implicit context: Context)(
+ helper: JSIteratorMapHelper): JSAny {
+ // a. Let counter be 0.
+ // (Done when creating JSIteratorMapHelper.)
+
+ const fastIteratorResultMap = GetIteratorResultMap();
+ const underlying = helper.underlying;
+ const counter = helper.counter;
+
+ // b. Repeat,
+ let next: JSReceiver;
+ try {
+ // i. Let next be ? IteratorStep(iterated).
+ next = IteratorStep(underlying, fastIteratorResultMap)
+ otherwise Done;
+ } label Done {
+ // ii. If next is false, return undefined.
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+
+ // iii. Let value be ? IteratorValue(next).
+ const value = IteratorValue(next, fastIteratorResultMap);
+
+ try {
+ // iv. Let mapped be Completion(
+ // Call(mapper, undefined, « value, 𝔽(counter) »)).
+ const mapped = Call(context, helper.mapper, Undefined, value, counter);
+
+ // viii. Set counter to counter + 1.
+ // (Done out of order. Iterator helpers are specified as generators with
+ // yields but we implement them as direct iterators.)
+ helper.counter = counter + 1;
+
+ // vi. Let completion be Completion(Yield(mapped)).
+ return AllocateJSIteratorResult(mapped, False);
+
+ // vii. IfAbruptCloseIterator(completion, iterated).
+ // (Done in IteratorHelperPrototypeReturn.)
+ } catch (e, message) {
+ // v. IfAbruptCloseIterator(mapped, iterated).
+ IteratorCloseOnException(underlying);
+ ReThrowWithMessage(context, e, message);
+ }
+}
+
+// --- Filter helper
+
+macro NewJSIteratorFilterHelper(implicit context: Context)(
+ underlying: IteratorRecord, predicate: Callable): JSIteratorFilterHelper {
+ return new JSIteratorFilterHelper{
+ map: *NativeContextSlot(ContextSlot::ITERATOR_FILTER_HELPER_MAP_INDEX),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ underlying: underlying,
+ predicate: predicate,
+ counter: 0
+ };
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-iteratorprototype.filter
+transitioning javascript builtin IteratorPrototypeFilter(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(predicate: JSAny): JSIteratorFilterHelper {
+ const methodName: constexpr string = 'Iterator.prototype.filter';
+
+ // 1. Let O be the this value.
+ // 2. If O is not an Object, throw a TypeError exception.
+ const o = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, methodName);
+
+ // 3. If IsCallable(predicate) is false, throw a TypeError exception.
+ const predicate = Cast<Callable>(predicate)
+ otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName);
+
+ // 4. Let iterated be ? GetIteratorDirect(O).
+ const iterated = GetIteratorDirect(o);
+
+ // Step 5 implemented in IteratorFilterHelperNext
+
+ // 6. Let result be CreateIteratorFromClosure(closure, "Iterator Helper",
+ // %IteratorHelperPrototype%).
+ // 7. Set result.[[UnderlyingIterator]] to iterated.
+ // 8. Return result.
+ return NewJSIteratorFilterHelper(iterated, predicate);
+}
+
+transitioning builtin IteratorFilterHelperNext(implicit context: Context)(
+ helper: JSIteratorFilterHelper): JSAny {
+ // a. Let counter be 0.
+ // (Done when creating JSIteratorFilterHelper.)
+
+ const fastIteratorResultMap = GetIteratorResultMap();
+ const underlying = helper.underlying;
+
+ while (true) {
+ const counter = helper.counter;
+
+ // b. Repeat,
+ let next: JSReceiver;
+ try {
+ // i. Let next be ? IteratorStep(iterated).
+ next = IteratorStep(underlying, fastIteratorResultMap)
+ otherwise Done;
+ } label Done {
+ // ii. If next is false, return undefined.
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+
+ // iii. Let value be ? IteratorValue(next).
+ const value = IteratorValue(next, fastIteratorResultMap);
+
+ try {
+ // iv. Let selected be Completion(
+ // Call(predicate, undefined, « value, 𝔽(counter) »)).
+ const selected =
+ Call(context, helper.predicate, Undefined, value, counter);
+
+ // vii. Set counter to counter + 1.
+ // (Done out of order. Iterator helpers are specified as generators with
+ // yields but we implement them as direct iterators.)
+ helper.counter = counter + 1;
+
+ // vi. If ToBoolean(selected) is true, then
+ if (ToBoolean(selected)) {
+ // 1. Let completion be Completion(Yield(value)).
+ return AllocateJSIteratorResult(value, False);
+ // 2. IfAbruptCloseIterator(completion, iterated).
+ // (Done in IteratorHelperPrototypeReturn.)
+ }
+ } catch (e, message) {
+ // v. IfAbruptCloseIterator(selected, iterated).
+ IteratorCloseOnException(underlying);
+ ReThrowWithMessage(context, e, message);
+ }
+ }
+ unreachable;
+}
+
+// --- Take helper
+
+macro NewJSIteratorTakeHelper(implicit context: Context)(
+ underlying: IteratorRecord, limit: Number): JSIteratorTakeHelper {
+ return new JSIteratorTakeHelper{
+ map: *NativeContextSlot(ContextSlot::ITERATOR_TAKE_HELPER_MAP_INDEX),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ underlying: underlying,
+ remaining: limit
+ };
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-iteratorprototype.take
+transitioning javascript builtin IteratorPrototypeTake(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(limit: JSAny): JSIteratorTakeHelper {
+ try {
+ const methodName: constexpr string = 'Iterator.prototype.take';
+
+ // 1. Let O be the this value.
+ // 2. If O is not an Object, throw a TypeError exception.
+ const o = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, methodName);
+
+ // 3. Let numLimit be ? ToNumber(limit).
+ const numLimit = ToNumber_Inline(limit);
+
+ // 4. If numLimit is NaN, throw a RangeError exception.
+ if (NumberIsNaN(numLimit)) goto RangeError;
+
+ // 5. Let integerLimit be ! ToIntegerOrInfinity(numLimit).
+ const integerLimit = ToInteger_Inline(numLimit);
+
+ // 6. If integerLimit < 0, throw a RangeError exception.
+ if (integerLimit < 0) goto RangeError;
+
+ // 5. Let iterated be ? GetIteratorDirect(O).
+ const iterated = GetIteratorDirect(o);
+
+ // Step 6 implemented in IteratorTakeHelperNext
+
+ // 7. Let result be CreateIteratorFromClosure(closure, "Iterator Helper",
+ // %IteratorHelperPrototype%).
+ // 8. Set result.[[UnderlyingIterator]] to iterated.
+ // 9. Return result.
+ return NewJSIteratorTakeHelper(iterated, integerLimit);
+ } label RangeError deferred {
+ ThrowRangeError(MessageTemplate::kMustBePositive, limit);
+ }
+}
+
+transitioning builtin IteratorTakeHelperNext(implicit context: Context)(
+ helper: JSIteratorTakeHelper): JSAny {
+ // a. Let remaining be integerLimit.
+ // (Done when creating JSIteratorTakeHelper.)
+
+ const fastIteratorResultMap = GetIteratorResultMap();
+ const underlying = helper.underlying;
+ const remaining = helper.remaining;
+
+ // b. Repeat,
+ let next: JSReceiver;
+
+ // i. If remaining is 0, then
+ if (remaining == 0) {
+ // 1. Return ? IteratorClose(iterated, NormalCompletion(undefined)).
+ IteratorClose(underlying);
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+
+ // ii. If remaining is not +∞, then
+ if (!NumberIsSomeInfinity(remaining)) {
+ // 1. Set remaining to remaining - 1.
+ helper.remaining = remaining - 1;
+ }
+
+ try {
+ // iii. Let next be ? IteratorStep(iterated).
+ next = IteratorStep(underlying, fastIteratorResultMap)
+ otherwise Done;
+ } label Done {
+ // iv. If next is false, return undefined.
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+
+ // v. Let completion be Completion(Yield(? IteratorValue(next))).
+ const value = IteratorValue(next, fastIteratorResultMap);
+ return AllocateJSIteratorResult(value, False);
+
+ // vi. IfAbruptCloseIterator(completion, iterated).
+ // (Done in IteratorHelperPrototypeReturn.)
+}
+
+// --- Drop helper
+
+macro NewJSIteratorDropHelper(implicit context: Context)(
+ underlying: IteratorRecord, limit: Number): JSIteratorDropHelper {
+ return new JSIteratorDropHelper{
+ map: *NativeContextSlot(ContextSlot::ITERATOR_DROP_HELPER_MAP_INDEX),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ underlying: underlying,
+ remaining: limit
+ };
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-iteratorprototype.drop
+transitioning javascript builtin IteratorPrototypeDrop(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(limit: JSAny): JSIteratorDropHelper {
+ try {
+ const methodName: constexpr string = 'Iterator.prototype.drop';
+
+ // 1. Let O be the this value.
+ // 2. If O is not an Object, throw a TypeError exception.
+ const o = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, methodName);
+
+ // 3. Let numLimit be ? ToNumber(limit).
+ const numLimit = ToNumber_Inline(limit);
+
+ // 4. If numLimit is NaN, throw a RangeError exception.
+ if (NumberIsNaN(numLimit)) goto RangeError;
+
+ // 5. Let integerLimit be ! ToIntegerOrInfinity(numLimit).
+ const integerLimit = ToInteger_Inline(numLimit);
+
+ // 6. If integerLimit < 0, throw a RangeError exception.
+ if (integerLimit < 0) goto RangeError;
+
+ // 5. Let iterated be ? GetIteratorDirect(O).
+ const iterated = GetIteratorDirect(o);
+
+ // Step 6 implemented in IteratorDropHelperNext
+
+ // 7. Let result be CreateIteratorFromClosure(closure, "Iterator Helper",
+ // %IteratorHelperPrototype%).
+ // 8. Set result.[[UnderlyingIterator]] to iterated.
+ // 9. Return result.
+ return NewJSIteratorDropHelper(iterated, integerLimit);
+ } label RangeError deferred {
+ ThrowRangeError(MessageTemplate::kMustBePositive, limit);
+ }
+}
+
+transitioning builtin IteratorDropHelperNext(implicit context: Context)(
+ helper: JSIteratorDropHelper): JSAny {
+ // a. Let remaining be integerLimit.
+ // (Done when creating JSIteratorDropHelper.)
+
+ const fastIteratorResultMap = GetIteratorResultMap();
+ const underlying = helper.underlying;
+ let remaining = helper.remaining;
+ let next: JSReceiver;
+
+ // b. Repeat, while remaining > 0,
+ try {
+ while (remaining > 0) {
+ // i. If remaining is not +∞, then
+ if (!NumberIsSomeInfinity(remaining)) {
+ // 1. Set remaining to remaining - 1.
+ remaining = remaining - 1;
+ helper.remaining = remaining;
+ }
+
+ // ii. Let next be ? IteratorStep(iterated).
+ IteratorStep(underlying, fastIteratorResultMap)
+ otherwise Done;
+ }
+
+ // c. Repeat,
+ // i. Let next be ? IteratorStep(iterated).
+ next = IteratorStep(underlying, fastIteratorResultMap)
+ otherwise Done;
+ } label Done {
+ // ii. If next is false, return undefined.
+ return AllocateJSIteratorResult(Undefined, True);
+ }
+
+ // iii. Let completion be Completion(Yield(? IteratorValue(next))).
+ const value = IteratorValue(next, fastIteratorResultMap);
+ return AllocateJSIteratorResult(value, False);
+
+ // iv. IfAbruptCloseIterator(completion, iterated).
+ // (Done in IteratorHelperPrototypeReturn.)
+}
+
+} // namespace iterator
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index b169da57a4..66f99aa77d 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -117,14 +117,16 @@ transitioning builtin CallIteratorWithFeedback(
}
// https://tc39.es/ecma262/#sec-iteratorclose
+// IteratorCloseOnException should be used to close iterators due to exceptions
+// being thrown.
@export
transitioning macro IteratorCloseOnException(implicit context: Context)(
iterator: IteratorRecord): void {
try {
- // 4. Let innerResult be GetMethod(iterator, "return").
+ // 3. Let innerResult be GetMethod(iterator, "return").
const method = GetProperty(iterator.object, kReturnString);
- // 5. If innerResult.[[Type]] is normal, then
+ // 4. If innerResult.[[Type]] is normal, then
// a. Let return be innerResult.[[Value]].
// b. If return is undefined, return Completion(completion).
if (method == Undefined || method == Null) return;
@@ -136,6 +138,50 @@ transitioning macro IteratorCloseOnException(implicit context: Context)(
// Swallow the exception.
}
- // (If completion.[[Type]] is throw) return Completion(completion).
+ // (5. If completion.[[Type]] is throw) return Completion(completion).
}
+
+@export
+transitioning macro IteratorClose(implicit context: Context)(
+ iterator: IteratorRecord): void {
+ // 3. Let innerResult be GetMethod(iterator, "return").
+ const method = GetProperty(iterator.object, kReturnString);
+
+ // 4. If innerResult.[[Type]] is normal, then
+ // a. Let return be innerResult.[[Value]].
+ // b. If return is undefined, return Completion(completion).
+ if (method == Undefined || method == Null) return;
+
+ // c. Set innerResult to Call(return, iterator).
+ const result = Call(context, method, iterator.object);
+
+ // 5. If completion.[[Type]] is throw, return Completion(completion).
+ // It is handled in IteratorCloseOnException.
+
+ // 7. If innerResult.[[Value]] is not an Object, throw a TypeError
+ // exception.
+ Cast<Object>(result)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'return');
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-iterator
+transitioning javascript builtin IteratorConstructor(
+ js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
+ target: JSFunction)(): JSObject {
+ const methodName: constexpr string = 'Iterator';
+
+ // 1. If NewTarget is undefined or the active function object, throw a
+ // TypeError exception.
+ if (newTarget == Undefined) {
+ ThrowTypeError(MessageTemplate::kConstructorNotFunction, methodName);
+ }
+ if (newTarget == target) {
+ ThrowTypeError(MessageTemplate::kConstructAbstractClass, methodName);
+ }
+
+ // 2. Return ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%Iterator.prototype%").
+ const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
+ return AllocateFastOrSlowJSObjectFromMap(map);
}
+} // namespace iterator
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
index 3c0b23ba2a..42f3f980a5 100644
--- a/deps/v8/src/builtins/loong64/builtins-loong64.cc
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -57,7 +57,7 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
__ Sub_d(scratch, argc, Operand(kJSArgcReceiverSlots));
__ Branch(&entry);
__ bind(&loop);
- __ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ __ Alsl_d(scratch2, scratch, array, kSystemPointerSizeLog2, t7);
__ Ld_d(scratch2, MemOperand(scratch2, 0));
if (element_type == ArgumentsElementType::kHandle) {
__ Ld_d(scratch2, MemOperand(scratch2, 0));
@@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(t3, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver, t3);
+ __ DropArguments(t3, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver, t3);
__ Ret();
}
@@ -142,14 +142,15 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(a3);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- a1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- a1 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
__ JumpIfIsInRange(
@@ -167,11 +168,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0: receiver
- // -- Slot 4 / sp[0*kPointerSize]: new target
- // -- Slot 3 / sp[1*kPointerSize]: padding
- // -- Slot 2 / sp[2*kPointerSize]: constructor function
- // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize]: context
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -197,12 +198,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3: new target
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -232,11 +233,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- s0: constructor result
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -257,7 +258,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ Ld_d(a0, MemOperand(sp, 0 * kSystemPointerSize));
__ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_and_return);
@@ -267,8 +268,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(a1, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver, a4);
+ __ DropArguments(a1, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@@ -318,18 +319,23 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
+
+#ifndef V8_JITLESS
if (v8_flags.debug_code) {
Label not_baseline;
- __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline);
__ bind(&not_baseline);
} else {
- __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
}
+#endif // !V8_JITLESS
+
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
- __ Ld_d(sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ LoadTaggedField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
}
@@ -342,15 +348,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- ra : return address
// -----------------------------------
// Store input value into generator object.
- __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ StoreTaggedField(
+ a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Check that a1 is still valid, RecordWrite might have clobbered it.
__ AssertGeneratorObject(a1);
// Load suspended function and context.
- __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
- __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -387,33 +395,38 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
- __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld_hu(
a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
__ Sub_d(a3, a3, Operand(kJSArgcReceiverSlots));
- __ Ld_d(t1, FieldMemOperand(
- a1, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedField(
+ t1,
+ FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
__ bind(&loop);
__ Sub_d(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ Alsl_d(kScratchReg, a3, t1, kPointerSizeLog2, t7);
- __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Alsl_d(kScratchReg, a3, t1, kTaggedSizeLog2, t7);
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
// Push receiver.
- __ Ld_d(kScratchReg,
- FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
- __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedField(
+ a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, t5, &is_baseline);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
@@ -423,7 +436,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld_hu(a0, FieldMemOperand(
a0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
@@ -432,7 +446,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a3, a1);
__ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ JumpCodeObject(a2);
}
@@ -445,7 +459,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1);
}
- __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Branch(&stepping_prepared);
__ bind(&prepare_step_in_suspended_generator);
@@ -455,7 +470,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(a1);
}
- __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Branch(&stepping_prepared);
__ bind(&stack_overflow);
@@ -484,7 +500,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause r2 to become negative.
__ sub_d(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ slli_d(scratch2, argc, kPointerSizeLog2);
+ __ slli_d(scratch2, argc, kSystemPointerSizeLog2);
__ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
// Out of stack space.
@@ -534,6 +550,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in a0.
__ mov(kRootRegister, a0);
+
+#ifdef V8_COMPRESS_POINTERS
+ // Initialize the pointer cage base register.
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
+#endif
}
// a1: entry address
@@ -554,12 +576,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ St_d(zero_reg, MemOperand(s5, 0));
// Set up frame pointer for the frame to be pushed.
- __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+ __ addi_d(fp, sp, -EntryFrameConstants::kNextExitFrameFPOffset);
// Registers:
// either
@@ -672,7 +694,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ St_d(a5, MemOperand(a4, 0));
// Reset the stack to the callee saved registers.
- __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+ __ addi_d(sp, sp, -EntryFrameConstants::kNextExitFrameFPOffset);
// Restore callee-saved fpu registers.
__ MultiPopFPU(kCalleeSavedFPU);
@@ -750,8 +772,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(s3, a4);
__ mov(s4, a4);
__ mov(s5, a4);
+#ifndef V8_COMPRESS_POINTERS
+ __ mov(s8, a4);
+#endif
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
+ // s8 is pointer cage base register (kPointerCageBaseRegister).
// Invoke the code.
Handle<Code> builtin = is_construct
@@ -792,7 +818,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Compute the size of the actual parameters + receiver (in bytes).
__ Ld_d(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
- __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ slli_d(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -803,8 +829,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -915,10 +941,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = temps.Acquire();
- __ Ld_d(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld_d(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1080,13 +1106,13 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = a1;
- Register feedback_vector = a2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ Ld_d(kScratchReg,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ld_d(
+ __ LoadTaggedField(
+ kScratchReg,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
@@ -1100,16 +1126,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
- __ Ld_d(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld_d(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ Register feedback_vector = a2;
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
@@ -1139,6 +1168,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1174,7 +1209,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Push(kInterpreterAccumulatorRegister);
// Continue loop if not done.
__ bind(&loop_check);
- __ Sub_d(a4, a4, Operand(kPointerSize));
+ __ Sub_d(a4, a4, Operand(kSystemPointerSize));
__ Branch(&loop_header, ge, a4, Operand(zero_reg));
}
@@ -1186,7 +1221,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
Operand(zero_reg));
- __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7);
+ __ Alsl_d(a5, a5, fp, kSystemPointerSizeLog2, t7);
__ St_d(a3, MemOperand(a5, 0));
__ bind(&no_incoming_new_target_or_generator_register);
@@ -1209,7 +1244,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
kInterpreterBytecodeOffsetRegister);
__ Ld_bu(a7, MemOperand(t5, 0));
__ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister,
- kPointerSizeLog2, t7);
+ kSystemPointerSizeLog2, t7);
__ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0));
__ Call(kJavaScriptCallCodeStartRegister);
@@ -1275,21 +1310,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ Ld_d(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld_d(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ Ld_d(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedField(
+ t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
@@ -1306,6 +1344,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1323,12 +1362,12 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register scratch, Register scratch2) {
// Find the address of the last argument.
__ Sub_d(scratch, num_args, Operand(1));
- __ slli_d(scratch, scratch, kPointerSizeLog2);
+ __ slli_d(scratch, scratch, kSystemPointerSizeLog2);
__ Sub_d(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, scratch2,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1462,16 +1501,18 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedField(
+ t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister);
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
Operand(INTERPRETER_DATA_TYPE));
- __ Ld_d(t0,
- FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
- __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadTaggedField(
+ t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ LoadCodeEntry(t0, t0);
__ Branch(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -1520,7 +1561,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Add_d(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Ld_bu(a7, MemOperand(a1, 0));
- __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7);
+ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kSystemPointerSizeLog2,
+ t7);
__ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0));
__ Jump(kJavaScriptCallCodeStartRegister);
}
@@ -1590,11 +1632,11 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} else {
// Overwrite the hole inserted by the deoptimizer with the return value
// from the LAZY deopt point.
- __ St_d(
- a0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
+ __ St_d(a0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() *
+ kSystemPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
}
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
@@ -1658,8 +1700,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
- __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
- __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ld_d(a0, MemOperand(sp, 0 * kSystemPointerSize));
+ __ Add_d(sp, sp, Operand(1 * kSystemPointerSize)); // Remove state.
__ Ret();
}
@@ -1682,10 +1724,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
- __ Branch(&jump_to_optimized_code, ne, maybe_target_code,
- Operand(Smi::zero()));
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
+ __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code,
+ Operand(Smi::zero()));
}
ASM_CODE_COMMENT(masm);
@@ -1695,7 +1737,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
}
// If the code object is null, just return to the caller.
- __ Ret(eq, maybe_target_code, Operand(Smi::zero()));
+ __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code,
+ Operand(Smi::zero()));
+ __ Ret();
+
__ bind(&jump_to_optimized_code);
DCHECK_EQ(maybe_target_code, a0); // Already in the right spot.
@@ -1724,21 +1769,23 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ld_d(a1, MemOperand(maybe_target_code,
- Code::kDeoptimizationDataOrInterpreterDataOffset -
- kHeapObjectTag));
+ __ LoadTaggedField(
+ a1, MemOperand(maybe_target_code,
+ Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
+ __ SmiUntagField(a1,
+ MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
+
+ __ LoadCodeEntry(maybe_target_code, maybe_target_code);
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ Add_d(maybe_target_code, maybe_target_code, a1);
- Generate_OSREntry(masm, maybe_target_code,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Compute the target address = code_entry + osr_offset
+ // <entry_addr> = <code_entry> + <osr_offset>
+ Generate_OSREntry(masm, maybe_target_code, Operand(a1));
}
} // namespace
@@ -1782,16 +1829,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// present) instead.
{
__ Sub_d(scratch, argc, JSParameterCount(0));
- __ Ld_d(this_arg, MemOperand(sp, kPointerSize));
- __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(this_arg, MemOperand(sp, kSystemPointerSize));
+ __ Ld_d(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
__ Movz(arg_array, undefined_value, scratch); // if argc == 0
__ Movz(this_arg, undefined_value, scratch); // if argc == 0
__ Sub_d(scratch, scratch, Operand(1));
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld_d(receiver, MemOperand(sp, 0));
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1807,8 +1854,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
- __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
+ __ LoadRoot(scratch, RootIndex::kNullValue);
+ __ CompareTaggedAndBranch(&no_arguments, eq, arg_array, Operand(scratch));
+ __ CompareTaggedAndBranch(&no_arguments, eq, arg_array,
+ Operand(undefined_value));
// 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
@@ -1872,9 +1921,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// consistent state for a simple pop operation.
__ Sub_d(scratch, argc, Operand(JSParameterCount(0)));
- __ Ld_d(target, MemOperand(sp, kPointerSize));
- __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize));
- __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize));
+ __ Ld_d(target, MemOperand(sp, kSystemPointerSize));
+ __ Ld_d(this_argument, MemOperand(sp, 2 * kSystemPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
__ Movz(arguments_list, undefined_value, scratch); // if argc == 0
__ Movz(this_argument, undefined_value, scratch); // if argc == 0
__ Movz(target, undefined_value, scratch); // if argc == 0
@@ -1885,8 +1934,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1932,9 +1981,9 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// consistent state for a simple pop operation.
__ Sub_d(scratch, argc, Operand(JSParameterCount(0)));
- __ Ld_d(target, MemOperand(sp, kPointerSize));
- __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize));
- __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize));
+ __ Ld_d(target, MemOperand(sp, kSystemPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 2 * kSystemPointerSize));
+ __ Ld_d(new_target, MemOperand(sp, 3 * kSystemPointerSize));
__ Movz(arguments_list, undefined_value, scratch); // if argc == 0
__ Movz(new_target, undefined_value, scratch); // if argc == 0
__ Movz(target, undefined_value, scratch); // if argc == 0
@@ -1945,8 +1994,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Movz(new_target, target, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1984,7 +2033,7 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
Register old_sp = scratch1;
Register new_space = scratch2;
__ mov(old_sp, sp);
- __ slli_d(new_space, count, kPointerSizeLog2);
+ __ slli_d(new_space, count, kSystemPointerSizeLog2);
__ Sub_d(sp, sp, Operand(new_space));
Register end = scratch2;
@@ -2055,13 +2104,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
__ Branch(&done, eq, len, Operand(zero_reg));
- __ slli_d(scratch, len, kPointerSizeLog2);
+ __ slli_d(scratch, len, kSystemPointerSizeLog2);
__ Sub_d(scratch, sp, Operand(scratch));
__ LoadRoot(t1, RootIndex::kTheHoleValue);
__ bind(&loop);
- __ Ld_d(a5, MemOperand(src, 0));
- __ addi_d(src, src, kPointerSize);
- __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadTaggedField(a5, MemOperand(src, 0));
+ __ addi_d(src, src, kTaggedSize);
+ __ CompareTaggedAndBranch(&push, ne, a5, Operand(t1));
__ LoadRoot(a5, RootIndex::kUndefinedValue);
__ bind(&push);
__ St_d(a5, MemOperand(a7, 0));
@@ -2093,7 +2142,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(a3, &new_target_not_constructor);
- __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadTaggedField(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
__ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
@@ -2140,9 +2189,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&loop);
{
__ Sub_w(a7, a7, Operand(1));
- __ Alsl_d(t0, a7, a6, kPointerSizeLog2, t7);
+ __ Alsl_d(t0, a7, a6, kSystemPointerSizeLog2, t7);
__ Ld_d(kScratchReg, MemOperand(t0, 0));
- __ Alsl_d(t0, a7, a2, kPointerSizeLog2, t7);
+ __ Alsl_d(t0, a7, a2, kSystemPointerSizeLog2, t7);
__ St_d(kScratchReg, MemOperand(t0, 0));
__ Branch(&loop, ne, a7, Operand(zero_reg));
}
@@ -2164,14 +2213,15 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- a0 : the number of arguments
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertCallableFunction(a1);
+ __ AssertFunction(a1);
- __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
@@ -2225,7 +2275,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(a0, a1);
__ SmiUntag(a0);
}
- __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ StoreReceiver(a3, a0, kScratchReg);
@@ -2254,13 +2305,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
{
- __ Ld_d(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ LoadTaggedField(t0,
+ FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ StoreReceiver(t0, a0, kScratchReg);
}
// Load [[BoundArguments]] into a2 and length of that into a4.
- __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ LoadTaggedField(
+ a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments
@@ -2272,7 +2325,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ slli_d(a5, a4, kPointerSizeLog2);
+ __ slli_d(a5, a4, kSystemPointerSizeLog2);
__ Sub_d(t0, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
@@ -2293,14 +2346,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Push [[BoundArguments]].
{
Label loop, done_loop;
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntagField(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Add_d(a0, a0, Operand(a4));
__ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Sub_d(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
- __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Alsl_d(a5, a4, a2, kTaggedSizeLog2, t7);
+ __ LoadTaggedField(kScratchReg, MemOperand(a5, 0));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
@@ -2310,7 +2363,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Push(t0);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedField(
+ a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2408,7 +2462,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
@@ -2432,8 +2487,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into a4.
- __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ LoadTaggedField(
+ a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments
@@ -2446,7 +2502,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ slli_d(a5, a4, kPointerSizeLog2);
+ __ slli_d(a5, a4, kSystemPointerSizeLog2);
__ Sub_d(t0, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
@@ -2467,14 +2523,14 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Push [[BoundArguments]].
{
Label loop, done_loop;
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntagField(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Add_d(a0, a0, Operand(a4));
__ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Sub_d(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
- __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Alsl_d(a5, a4, a2, kTaggedSizeLog2, t7);
+ __ LoadTaggedField(kScratchReg, MemOperand(a5, 0));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
@@ -2486,14 +2542,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
Label skip_load;
- __ Branch(&skip_load, ne, a1, Operand(a3));
- __ Ld_d(a3,
- FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ CompareTaggedAndBranch(&skip_load, ne, a1, Operand(a3));
+ __ LoadTaggedField(
+ a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip_load);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedField(
+ a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2518,7 +2575,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ Ld_d(map, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
{
Register flags = t3;
__ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
@@ -2612,10 +2669,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = t2;
Label allocate_vector, done;
- __ Ld_d(vector, FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kFeedbackVectorsOffset));
+ __ LoadTaggedField(
+ vector, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kFeedbackVectorsOffset));
__ Alsl_d(vector, func_index, vector, kTaggedSizeLog2);
- __ Ld_d(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
+ __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ Push(vector);
@@ -2754,8 +2812,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function
// a0: number of arguments including receiver
// a1: pointer to builtin function
@@ -2771,15 +2828,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(s1, a2);
} else {
// Compute the argv pointer in a callee-saved register.
- __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7);
- __ Sub_d(s1, s1, kPointerSize);
+ __ Alsl_d(s1, a0, sp, kSystemPointerSizeLog2, t7);
+ __ Sub_d(s1, s1, kSystemPointerSize);
}
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, 0,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -2805,8 +2861,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Check result for exception sentinel.
Label exception_returned;
- __ LoadRoot(a4, RootIndex::kException);
- __ Branch(&exception_returned, eq, a4, Operand(a0));
+ __ Branch(&exception_returned, eq, a0, RootIndex::kException);
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
@@ -2816,9 +2871,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ li(a2, pending_exception_address);
__ Ld_d(a2, MemOperand(a2, 0));
- __ LoadRoot(a4, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
- __ Branch(&okay, eq, a4, Operand(a2));
+ __ Branch(&okay, eq, a2, RootIndex::kTheHoleValue);
__ stop();
__ bind(&okay);
}
@@ -2832,7 +2886,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
+ __ LeaveExitFrame(argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2900,7 +2954,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
DoubleRegister double_scratch = kScratchDoubleReg;
// Account for saved regs.
- const int kArgumentOffset = 4 * kPointerSize;
+ const int kArgumentOffset = 4 * kSystemPointerSize;
__ Push(result_reg);
__ Push(scratch, scratch2, scratch3);
@@ -2909,104 +2963,42 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
// Try a conversion to a signed integer.
- __ ftintrz_w_d(double_scratch, double_scratch);
- // Move the converted value into the result register.
- __ movfr2gr_s(scratch3, double_scratch);
-
- // Retrieve and restore the FCSR.
- __ movfcsr2gr(scratch);
-
- // Check for overflow and NaNs.
- __ And(scratch, scratch,
- kFCSRExceptionCauseMask ^ kFCSRDivideByZeroCauseMask);
- // If we had no exceptions then set result_reg and we are done.
- Label error;
- __ Branch(&error, ne, scratch, Operand(zero_reg));
- __ Move(result_reg, scratch3);
- __ Branch(&done);
- __ bind(&error);
+ __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ Ld_w(input_low,
- MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
- __ Ld_w(input_high,
- MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
+ // TryInlineTruncateDoubleToI destory kScratchDoubleReg, so reload it.
+ __ Ld_d(result_reg, MemOperand(sp, kArgumentOffset));
- Label normal_exponent;
// Extract the biased exponent in result.
- __ bstrpick_w(result_reg, input_high,
- HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1,
- HeapNumber::kExponentShift);
-
- // Check for Infinity and NaNs, which should return 0.
- __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask);
- __ Movz(result_reg, zero_reg, scratch);
- __ Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Express exponent as delta to (number of mantissa bits + 31).
- __ Sub_w(result_reg, result_reg,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ bstrpick_d(input_high, result_reg,
+ HeapNumber::kMantissaBits + HeapNumber::kExponentBits - 1,
+ HeapNumber::kMantissaBits);
+
+ __ Sub_d(scratch, input_high,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ Label not_zero;
+ __ Branch(&not_zero, lt, scratch, Operand(zero_reg));
__ mov(result_reg, zero_reg);
__ Branch(&done);
+ __ bind(&not_zero);
- __ bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- __ Add_w(scratch, result_reg,
- Operand(kShiftBase + HeapNumber::kMantissaBits));
-
- // Save the sign.
- Register sign = result_reg;
- result_reg = no_reg;
- __ And(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // On ARM shifts > 31 bits are valid and will result in zero. On LOONG64 we
- // need to check for this specific case.
- Label high_shift_needed, high_shift_done;
- __ Branch(&high_shift_needed, lt, scratch, Operand(32));
- __ mov(input_high, zero_reg);
- __ Branch(&high_shift_done);
- __ bind(&high_shift_needed);
-
- // Set the implicit 1 before the mantissa part in input_high.
- __ Or(input_high, input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- __ sll_w(input_high, input_high, scratch);
-
- __ bind(&high_shift_done);
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- __ li(kScratchReg, 32);
- __ sub_w(scratch, kScratchReg, scratch);
- __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
-
- // Negate scratch.
- __ Sub_w(scratch, zero_reg, scratch);
- __ sll_w(input_low, input_low, scratch);
- __ Branch(&shift_done);
-
- __ bind(&pos_shift);
- __ srl_w(input_low, input_low, scratch);
-
- __ bind(&shift_done);
- __ Or(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- __ mov(scratch, sign);
- result_reg = sign;
- sign = no_reg;
- __ Sub_w(result_reg, zero_reg, input_high);
- __ Movz(result_reg, input_high, scratch);
+ // Isolate the mantissa bits, and set the implicit '1'.
+ __ bstrpick_d(input_low, result_reg, HeapNumber::kMantissaBits - 1, 0);
+ __ Or(input_low, input_low, Operand(1ULL << HeapNumber::kMantissaBits));
+
+ Label lessthan_zero_reg;
+ __ Branch(&lessthan_zero_reg, ge, result_reg, Operand(zero_reg));
+ __ Sub_d(input_low, zero_reg, Operand(input_low));
+ __ bind(&lessthan_zero_reg);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ __ Sub_d(input_high, input_high,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
+ __ sll_w(result_reg, input_low, input_high);
__ bind(&done);
@@ -3099,10 +3091,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Ld_d(s0, *stack_space_operand);
}
- static constexpr bool kDontSaveDoubles = false;
static constexpr bool kRegisterContainsSlotCount = false;
- __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
- kRegisterContainsSlotCount);
+ __ LeaveExitFrame(s0, NO_EMIT_RETURN, kRegisterContainsSlotCount);
// Check if the function scheduled an exception.
__ LoadRoot(a4, RootIndex::kTheHoleValue);
@@ -3164,7 +3154,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3172,37 +3162,37 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kPointerSize]: kHolder
- // sp[1 * kPointerSize]: kIsolate
- // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
- // sp[3 * kPointerSize]: undefined (kReturnValue)
- // sp[4 * kPointerSize]: kData
- // sp[5 * kPointerSize]: undefined (kNewTarget)
+ // sp[0 * kSystemPointerSize]: kHolder
+ // sp[1 * kSystemPointerSize]: kIsolate
+ // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
+ // sp[4 * kSystemPointerSize]: kData
+ // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Set up the base register for addressing through MemOperands. It will point
- // at the receiver (located at sp + argc * kPointerSize).
- __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7);
+ // at the receiver (located at sp + argc * kSystemPointerSize).
+ __ Alsl_d(base, argc, sp, kSystemPointerSizeLog2, t7);
// Reserve space on the stack.
- __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
// kHolder.
- __ St_d(holder, MemOperand(sp, 0 * kPointerSize));
+ __ St_d(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ li(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
- __ St_d(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 2 * kSystemPointerSize));
+ __ St_d(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
- __ St_d(call_data, MemOperand(sp, 4 * kPointerSize));
+ __ St_d(call_data, MemOperand(sp, 4 * kSystemPointerSize));
// kNewTarget.
- __ St_d(scratch, MemOperand(sp, 5 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3211,38 +3201,37 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
static constexpr int kApiStackSpace = 4;
- static constexpr bool kDontSaveDoubles = false;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// EnterExitFrame may align the sp.
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ Add_d(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
- __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
// Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI.
- __ St_w(argc, MemOperand(sp, 3 * kPointerSize));
+ __ St_w(argc, MemOperand(sp, 3 * kSystemPointerSize));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
// Note: Unlike on other architectures, this stores the number of slots to
// drop, not the number of bytes.
__ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
- __ St_d(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 4 * kSystemPointerSize));
// v8::InvocationCallback's argument.
DCHECK(!AreAliased(api_function_address, scratch, a0));
- __ Add_d(a0, sp, Operand(1 * kPointerSize));
+ __ Add_d(a0, sp, Operand(1 * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
@@ -3250,10 +3239,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueIndex) * kSystemPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(sp, 4 * kPointerSize);
+ MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3268,7 +3257,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
static_assert(PropertyCallbackArguments::kHolderIndex == 1);
static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
+ static_assert(PropertyCallbackArguments::kReturnValueIndex == 4);
static_assert(PropertyCallbackArguments::kDataIndex == 5);
static_assert(PropertyCallbackArguments::kThisIndex == 6);
static_assert(PropertyCallbackArguments::kArgsLength == 7);
@@ -3283,40 +3272,43 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Here and below +1 is for name() pushed after the args_ array.
using PCA = PropertyCallbackArguments;
- __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
+ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ St_d(scratch,
- MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ MemOperand(sp, (PCA::kReturnValueIndex + 1) * kSystemPointerSize));
__ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
- kPointerSize));
+ kSystemPointerSize));
__ li(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ __ St_d(scratch,
+ MemOperand(sp, (PCA::kIsolateIndex + 1) * kSystemPointerSize));
+ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kSystemPointerSize));
// should_throw_on_error -> false
DCHECK_EQ(0, Smi::zero().ptr());
- __ St_d(zero_reg,
- MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
- __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ St_d(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ St_d(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
+ kSystemPointerSize));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ St_d(scratch, MemOperand(sp, 0 * kSystemPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mov(a0, sp); // a0 = Handle<Name>
- __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+ __ Add_d(a1, a0, Operand(1 * kSystemPointerSize)); // a1 = v8::PCI::args_
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ St_d(a1, MemOperand(sp, 1 * kPointerSize));
- __ Add_d(a1, sp, Operand(1 * kPointerSize));
+ __ St_d(a1, MemOperand(sp, 1 * kSystemPointerSize));
+ __ Add_d(a1, sp, Operand(1 * kSystemPointerSize));
// a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
@@ -3328,7 +3320,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ fp,
+ (PropertyCallbackArguments::kReturnValueIndex + 3) * kSystemPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
@@ -3337,8 +3330,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
- // purpose Code object) to be able to call into C functions that may trigger
- // GC and thus move the caller.
+ // purpose InstructionStream object) to be able to call into C functions that
+ // may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@@ -3389,10 +3382,10 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
- __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
+ __ Sub_d(sp, sp, kNumberOfRegisters * kSystemPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs.bits() & (1 << i)) != 0) {
- __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ __ St_d(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
@@ -3401,7 +3394,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ St_d(fp, MemOperand(a2, 0));
const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// Get the address of the location in the code object (a2) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
@@ -3439,9 +3432,10 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((saved_regs.bits() & (1 << i)) != 0) {
- __ Ld_d(a2, MemOperand(sp, i * kPointerSize));
+ __ Ld_d(a2, MemOperand(sp, i * kSystemPointerSize));
__ St_d(a2, MemOperand(a1, offset));
} else if (v8_flags.debug_code) {
__ li(a2, Operand(kDebugZapValue));
@@ -3455,7 +3449,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
__ Fld_d(f0, MemOperand(sp, src_offset));
__ Fst_d(f0, MemOperand(a1, dst_offset));
}
@@ -3500,7 +3495,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// a1 = one past the last FrameDescription**.
__ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
- __ Alsl_d(a1, a1, a4, kPointerSizeLog2);
+ __ Alsl_d(a1, a1, a4, kSystemPointerSizeLog2);
__ Branch(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
@@ -3515,7 +3510,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ bind(&inner_loop_header);
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
- __ Add_d(a4, a4, Operand(kPointerSize));
+ __ Add_d(a4, a4, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
@@ -3539,7 +3534,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Restore the registers from the last output frame.
__ mov(t7, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs.bits() & (1 << i)) != 0) {
__ Ld_d(ToRegister(i), MemOperand(t7, offset));
}
@@ -3577,19 +3573,21 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = a1;
__ Ld_d(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = s1;
- __ Ld_d(code_obj,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ld_d(code_obj,
- FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
// Check if we have baseline code. For OSR entry it is safe to assume we
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2);
- __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODE_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3602,7 +3600,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ GetObjectType(code_obj, t2, t2);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODE_TYPE));
}
if (v8_flags.debug_code) {
@@ -3611,10 +3609,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
- __ Ld_d(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld_d(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@@ -3676,6 +3674,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ PrepareCallCFunction(3, 0, a4);
__ CallCFunction(get_baseline_pc, 3, 0);
}
+ __ LoadCodeEntry(code_obj, code_obj);
__ Add_d(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
@@ -3684,10 +3683,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Ld_d(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
- Generate_OSREntry(masm, code_obj,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ Generate_OSREntry(masm, code_obj);
} else {
- __ Add_d(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 6d7b18b4c5..25043609ce 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(t3, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver, t3);
+ __ DropArguments(t3, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver, t3);
__ Ret();
}
@@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(a1, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver, a4);
+ __ DropArguments(a1, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver, a4);
__ Ret();
__ bind(&check_receiver);
@@ -318,15 +318,19 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
+
+#ifndef V8_JITLESS
if (v8_flags.debug_code) {
Label not_baseline;
- __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ Branch(is_baseline);
__ bind(&not_baseline);
} else {
- __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
}
+#endif // !V8_JITLESS
+
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -431,8 +435,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a2);
+ __ JumpCodeObject(a2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -557,12 +560,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ Sd(zero_reg, MemOperand(s5));
// Set up frame pointer for the frame to be pushed.
- __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+ __ daddiu(fp, sp, -EntryFrameConstants::kNextExitFrameFPOffset);
// Registers:
// either
@@ -674,7 +677,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Sd(a5, MemOperand(a4));
// Reset the stack to the callee saved registers.
- __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+ __ daddiu(sp, sp, -EntryFrameConstants::kNextExitFrameFPOffset);
// Restore callee-saved fpu registers.
__ MultiPopFPU(kCalleeSavedFPU);
@@ -805,8 +808,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1079,7 +1082,6 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = a1;
- Register feedback_vector = a2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
@@ -1097,7 +1099,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
+ Register feedback_vector = a2;
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1135,6 +1139,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1270,6 +1280,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
@@ -1300,6 +1311,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
+
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
// Unreachable code.
@@ -1321,7 +1334,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments.
__ PushArray(start_address, num_args, scratch, scratch2,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1462,7 +1475,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Operand(INTERPRETER_DATA_TYPE));
__ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
- __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(t0, t0);
__ Branch(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -1673,8 +1686,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ Branch(&jump_to_optimized_code, ne, maybe_target_code,
Operand(Smi::zero()));
}
@@ -1713,6 +1726,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
}
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ Ld(a1, MemOperand(maybe_target_code,
@@ -1725,11 +1739,11 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ Daddu(maybe_target_code, maybe_target_code, a1);
- Generate_OSREntry(masm, maybe_target_code,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(maybe_target_code, maybe_target_code);
+
+ // Compute the target address = code_entry + osr_offset
+ // <entry_addr> = <code_entry> + <osr_offset>
+ Generate_OSREntry(masm, maybe_target_code, Operand(a1));
}
} // namespace
@@ -1781,8 +1795,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld(receiver, MemOperand(sp));
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1878,8 +1892,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1938,8 +1952,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Movz(new_target, target, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2774,8 +2788,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function
// a0: number of arguments including receiver
// a1: pointer to builtin function
@@ -2798,8 +2811,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, 0,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -2852,7 +2864,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
+ __ LeaveExitFrame(argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -3118,10 +3130,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Ld(s0, *stack_space_operand);
}
- static constexpr bool kDontSaveDoubles = false;
static constexpr bool kRegisterContainsSlotCount = false;
- __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
- kRegisterContainsSlotCount);
+ __ LeaveExitFrame(s0, NO_EMIT_RETURN, kRegisterContainsSlotCount);
// Check if the function scheduled an exception.
__ LoadRoot(a4, RootIndex::kTheHoleValue);
@@ -3184,7 +3194,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3231,9 +3241,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
static constexpr int kApiStackSpace = 4;
- static constexpr bool kDontSaveDoubles = false;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// EnterExitFrame may align the sp.
@@ -3270,7 +3279,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueIndex) * kPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
MemOperand stack_space_operand(sp, 4 * kPointerSize);
@@ -3288,7 +3297,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
static_assert(PropertyCallbackArguments::kHolderIndex == 1);
static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
+ static_assert(PropertyCallbackArguments::kReturnValueIndex == 4);
static_assert(PropertyCallbackArguments::kDataIndex == 5);
static_assert(PropertyCallbackArguments::kThisIndex == 6);
static_assert(PropertyCallbackArguments::kArgsLength == 7);
@@ -3308,7 +3317,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueIndex + 1) * kPointerSize));
__ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
kPointerSize));
__ li(scratch, ExternalReference::isolate_address(masm->isolate()));
@@ -3330,7 +3339,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
@@ -3346,7 +3355,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ fp, (PropertyCallbackArguments::kReturnValueIndex + 3) * kPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
@@ -3355,8 +3364,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
- // purpose Code object) to be able to call into C functions that may trigger
- // GC and thus move the caller.
+ // purpose InstructionStream object) to be able to call into C functions that
+ // may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@@ -3601,7 +3610,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = a1;
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = s1;
__ Ld(code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3613,7 +3622,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2);
- __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODE_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3626,7 +3635,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ GetObjectType(code_obj, t2, t2);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODE_TYPE));
}
if (v8_flags.debug_code) {
@@ -3699,6 +3708,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ PrepareCallCFunction(3, 0, a4);
__ CallCFunction(get_baseline_pc, 3, 0);
}
+ __ LoadCodeEntry(code_obj, code_obj);
__ Daddu(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
@@ -3707,10 +3717,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
- Generate_OSREntry(masm, code_obj,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ Generate_OSREntry(masm, code_obj);
} else {
- __ Daddu(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq
index 37dfea20be..3d0122d60e 100644
--- a/deps/v8/src/builtins/number.tq
+++ b/deps/v8/src/builtins/number.tq
@@ -51,9 +51,7 @@ extern macro NaNStringConstant(): String;
extern macro ZeroStringConstant(): String;
extern macro InfinityStringConstant(): String;
extern macro MinusInfinityStringConstant(): String;
-
-const kAsciiZero: constexpr int32 = 48; // '0' (ascii)
-const kAsciiLowerCaseA: constexpr int32 = 97; // 'a' (ascii)
+extern macro Log10OffsetTable(): RawPtr<uint64>;
transitioning macro ThisNumberValue(implicit context: Context)(
receiver: JSAny, method: constexpr string): Number {
@@ -61,35 +59,91 @@ transitioning macro ThisNumberValue(implicit context: Context)(
ToThisValue(receiver, PrimitiveType::kNumber, method));
}
-macro ToCharCode(input: int32): char8 {
- dcheck(0 <= input && input < 36);
- return input < 10 ?
- %RawDownCast<char8>(Unsigned(input + kAsciiZero)) :
- %RawDownCast<char8>(Unsigned(input - 10 + kAsciiLowerCaseA));
+macro ToCharCode(input: uint32): char8 {
+ dcheck(input < 36);
+ // 48 == '0', 97 == 'a'.
+ return input < 10 ? %RawDownCast<char8>(input + 48) :
+ %RawDownCast<char8>(input - 10 + 97);
+}
+
+macro IntToDecimalStringImpl(
+ x: int32, log10OffsetsTable: RawPtr<uint64>,
+ isPositive: constexpr bool): String {
+ dcheck(isPositive == (x >= 0));
+ let n: uint32 = isPositive ? Unsigned(x) : Unsigned(0 - x);
+ const log2: int32 = 31 - math::Word32Clz(Signed(n) | 1);
+ const tableEntry: uint64 = log10OffsetsTable[Convert<intptr>(log2)];
+ const digitCount: uint64 = (Convert<uint64>(n) + tableEntry) >>> 32;
+ let length = Convert<uint32>(digitCount);
+ if constexpr (!isPositive) length++; // For the '-'.
+ const string = AllocateNonEmptySeqOneByteString(length);
+ if constexpr (isPositive) {
+ string.raw_hash_field = MakeArrayIndexHash(n, length);
+ }
+ const lengthIntptr = Convert<intptr>(Signed(length));
+ let cursor: intptr = lengthIntptr - 1;
+ const rawChars = &string.chars;
+ while (true) {
+ const kInverse: uint64 = 0xcccccccd;
+ const quotient = Convert<uint32>((Convert<uint64>(n) * kInverse) >>> 35);
+ const remainder = n - quotient * 10;
+ const nextChar = %RawDownCast<char8>(remainder | 48); // 48 == '0'
+ // Writing to string.chars[cursor] directly would implicitly emit a
+ // bounds check, and we don't want no bounds check, thank you very much.
+ *UnsafeConstCast(rawChars.UncheckedAtIndex(cursor)) = nextChar;
+ cursor--;
+ n = quotient;
+ if (n == 0) break;
+ }
+ if constexpr (!isPositive) {
+ *UnsafeConstCast(rawChars.UncheckedAtIndex(0)) = 45; // 45 == '-'
+ }
+ return string;
}
@export
-macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
+macro IntToDecimalString(x: int32): String {
+ if constexpr (Is64()) {
+ const log10OffsetsTable: RawPtr<uint64> = Log10OffsetTable();
+ if (x >= 0) {
+ if (x < 10) {
+ if (x == 0) {
+ return ZeroStringConstant();
+ }
+ return StringFromSingleCharCode(ToCharCode(Unsigned(x)));
+ }
+ return IntToDecimalStringImpl(x, log10OffsetsTable, true);
+ } else {
+ return IntToDecimalStringImpl(x, log10OffsetsTable, false);
+ }
+ } else {
+ // The generic implementation doesn't rely on 64-bit instructions.
+ return IntToString(x, 10);
+ }
+}
+
+macro IntToString(x: int32, radix: uint32): String {
+ if constexpr (Is64()) {
+ dcheck(radix != 10); // Use IntToDecimalString otherwise.
+ }
const isNegative: bool = x < 0;
- let n: int32 = x;
+ let n: uint32;
if (!isNegative) {
// Fast case where the result is a one character string.
- if (x < radix) {
- if (x == 0) {
+ n = Unsigned(x);
+ if (n < radix) {
+ if (n == 0) {
return ZeroStringConstant();
}
return StringFromSingleCharCode(ToCharCode(n));
}
} else {
dcheck(isNegative);
- if (n == kMinInt32) {
- goto Slow;
- }
- n = 0 - n;
+ n = Unsigned(0 - x);
}
// Calculate length and pre-allocate the result string.
- let temp: int32 = n;
+ let temp: uint32 = n;
let length: int32 = isNegative ? Convert<int32>(1) : Convert<int32>(0);
while (temp > 0) {
temp = temp / radix;
@@ -99,7 +153,7 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
const strSeq = AllocateNonEmptySeqOneByteString(Unsigned(length));
let cursor: intptr = Convert<intptr>(length) - 1;
while (n > 0) {
- const digit: int32 = n % radix;
+ const digit: uint32 = n % radix;
n = n / radix;
*UnsafeConstCast(&strSeq.chars[cursor]) = ToCharCode(digit);
cursor = cursor - 1;
@@ -110,11 +164,12 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
*UnsafeConstCast(&strSeq.chars[0]) = 45;
} else {
dcheck(cursor == -1);
- // In sync with Factory::SmiToString: If radix = 10 and positive number,
- // update hash for string.
- if (radix == 10) {
- dcheck(strSeq.raw_hash_field == kNameEmptyHashField);
- strSeq.raw_hash_field = MakeArrayIndexHash(Unsigned(x), Unsigned(length));
+ if constexpr (!Is64()) {
+ if (radix == 10) {
+ dcheck(strSeq.raw_hash_field == kNameEmptyHashField);
+ strSeq.raw_hash_field =
+ MakeArrayIndexHash(Unsigned(x), Unsigned(length));
+ }
}
}
return strSeq;
@@ -146,8 +201,8 @@ transitioning javascript builtin NumberPrototypeToString(
// value using the radix specified by radixNumber.
if (TaggedIsSmi(x)) {
- return NumberToStringSmi(Convert<int32>(x), Convert<int32>(radixNumber))
- otherwise return runtime::DoubleToStringWithRadix(x, radixNumber);
+ return IntToString(
+ Convert<int32>(x), Unsigned(Convert<int32>(radixNumber)));
}
if (x == -0) {
@@ -311,8 +366,6 @@ transitioning javascript builtin NumberParseInt(
}
extern builtin NonNumberToNumeric(implicit context: Context)(JSAny): Numeric;
-extern builtin BitwiseAnd(implicit context: Context)(Number, Number): Number;
-extern builtin BitwiseXor(implicit context: Context)(Number, Number): Number;
extern builtin Subtract(implicit context: Context)(Number, Number): Number;
extern builtin Add(implicit context: Context)(Number, Number): Number;
extern builtin StringAddConvertLeft(implicit context: Context)(
@@ -323,6 +376,7 @@ extern builtin StringAddConvertRight(implicit context: Context)(
extern macro BitwiseOp(int32, int32, constexpr Operation): Number;
extern macro RelationalComparison(
constexpr Operation, JSAny, JSAny, Context): Boolean;
+extern macro TruncateNumberToWord32(Number): int32;
// TODO(bbudge) Use a simpler macro structure that doesn't loop when converting
// non-numbers, if such a code sequence doesn't make the builtin bigger.
@@ -699,7 +753,7 @@ builtin BitwiseNot(implicit context: Context)(value: JSAny): Numeric {
try {
UnaryOp1(value) otherwise Number, BigInt;
} label Number(n: Number) {
- tail BitwiseXor(n, -1);
+ return BitwiseOp(TruncateNumberToWord32(n), -1, Operation::kBitwiseXor);
} label BigInt(b: BigInt) {
return runtime::BigIntUnaryOp(
context, b, SmiTag<Operation>(Operation::kBitwiseNot));
@@ -754,20 +808,38 @@ builtin BitwiseAnd(implicit context: Context)(
try {
BinaryOp1(left, right) otherwise Number, AtLeastOneBigInt;
} label Number(left: Number, right: Number) {
- tail BitwiseAnd(left, right);
+ return BitwiseOp(
+ TruncateNumberToWord32(left), TruncateNumberToWord32(right),
+ Operation::kBitwiseAnd);
} label AtLeastOneBigInt(left: Numeric, right: Numeric) {
tail bigint::BigIntBitwiseAnd(left, right);
}
}
builtin BitwiseOr(implicit context: Context)(
- left: JSAny, right: JSAny): Object {
- return Generate_BitwiseBinaryOp(Operation::kBitwiseOr, left, right, context);
+ left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp1(left, right) otherwise Number, AtLeastOneBigInt;
+ } label Number(left: Number, right: Number) {
+ return BitwiseOp(
+ TruncateNumberToWord32(left), TruncateNumberToWord32(right),
+ Operation::kBitwiseOr);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail bigint::BigIntBitwiseOr(left, right);
+ }
}
builtin BitwiseXor(implicit context: Context)(
- left: JSAny, right: JSAny): Object {
- return Generate_BitwiseBinaryOp(Operation::kBitwiseXor, left, right, context);
+ left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp1(left, right) otherwise Number, AtLeastOneBigInt;
+ } label Number(left: Number, right: Number) {
+ return BitwiseOp(
+ TruncateNumberToWord32(left), TruncateNumberToWord32(right),
+ Operation::kBitwiseXor);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail bigint::BigIntBitwiseXor(left, right);
+ }
}
// Relational builtins.
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
index 0eb8206115..664b76830b 100644
--- a/deps/v8/src/builtins/object.tq
+++ b/deps/v8/src/builtins/object.tq
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include 'src/builtins/builtins-object-gen.h'
+
namespace runtime {
extern transitioning runtime
ObjectIsExtensible(implicit context: Context)(JSAny): JSAny;
@@ -201,4 +203,149 @@ transitioning javascript builtin ObjectPrototypeToLocaleString(
const method = GetProperty(receiver, 'toString');
return Call(context, method, receiver);
}
+
+// JSDataPropertyDescriptor constants
+const kJSDataPropertyDescriptorWritableOffset: constexpr int31
+ generates 'JSDataPropertyDescriptor::kWritableOffset';
+const kJSDataPropertyDescriptorEnumerableOffset: constexpr int31
+ generates 'JSDataPropertyDescriptor::kEnumerableOffset';
+const kJSDataPropertyDescriptorConfigurableOffset: constexpr int31
+ generates 'JSDataPropertyDescriptor::kConfigurableOffset';
+const kJSDataPropertyDescriptorValueOffset: constexpr int31
+ generates 'JSDataPropertyDescriptor::kValueOffset';
+
+// JSAccessorPropertyDescriptor constants
+const kJSAccessorPropertyDescriptorEnumerableOffset: constexpr int31
+ generates 'JSAccessorPropertyDescriptor::kEnumerableOffset';
+const kJSAccessorPropertyDescriptorConfigurableOffset: constexpr int31
+ generates 'JSAccessorPropertyDescriptor::kConfigurableOffset';
+const kJSAccessorPropertyDescriptorGetOffset: constexpr int31
+ generates 'JSAccessorPropertyDescriptor::kGetOffset';
+const kJSAccessorPropertyDescriptorSetOffset: constexpr int31
+ generates 'JSAccessorPropertyDescriptor::kSetOffset';
+
+// ToPropertyDescriptor (https://tc39.es/ecma262/#sec-topropertydescriptor)
+transitioning macro ToPropertyDescriptor(implicit context: Context)(
+ object: JSReceiver): PropertyDescriptorObject {
+ const result: PropertyDescriptorObject = AllocatePropertyDescriptorObject();
+
+ if (object.map == *NativeContextSlot<Map>(
+ context, ContextSlot::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX)) {
+ const writable = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSDataPropertyDescriptorWritableOffset));
+ result.flags.has_writable = true;
+ result.flags.is_writable = ToBoolean(writable);
+
+ const enumerable = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSDataPropertyDescriptorEnumerableOffset));
+ result.flags.has_enumerable = true;
+ result.flags.is_enumerable = ToBoolean(enumerable);
+
+ const configurable = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSDataPropertyDescriptorConfigurableOffset));
+ result.flags.has_configurable = true;
+ result.flags.is_configurable = ToBoolean(configurable);
+
+ result.flags.has_value = true;
+ result.value = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSDataPropertyDescriptorValueOffset));
+ } else if (
+ object.map == *NativeContextSlot<Map>(
+ context, ContextSlot::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX)) {
+ const enumerable = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSAccessorPropertyDescriptorEnumerableOffset));
+ result.flags.has_enumerable = true;
+ result.flags.is_enumerable = ToBoolean(enumerable);
+
+ const configurable = UnsafeCast<JSAny>(LoadObjectField(
+ object, kJSAccessorPropertyDescriptorConfigurableOffset));
+ result.flags.has_configurable = true;
+ result.flags.is_configurable = ToBoolean(configurable);
+
+ result.flags.has_get = true;
+ result.get = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSAccessorPropertyDescriptorGetOffset));
+
+ result.flags.has_set = true;
+ result.set = UnsafeCast<JSAny>(
+ LoadObjectField(object, kJSAccessorPropertyDescriptorSetOffset));
+ } else {
+ const hasEnumerable = HasProperty(object, 'enumerable');
+ if (hasEnumerable == True) {
+ const enumerable = ToBoolean(GetProperty(object, 'enumerable'));
+ result.flags.has_enumerable = true;
+ result.flags.is_enumerable = enumerable;
+ }
+
+ const hasConfigurable = HasProperty(object, 'configurable');
+ if (hasConfigurable == True) {
+ const configurable = ToBoolean(GetProperty(object, 'configurable'));
+ result.flags.has_configurable = true;
+ result.flags.is_configurable = configurable;
+ }
+
+ const hasValue = HasProperty(object, 'value');
+ if (hasValue == True) {
+ const value = GetProperty(object, 'value');
+ result.flags.has_value = true;
+ result.value = value;
+ }
+
+ const hasWritable = HasProperty(object, 'writable');
+ if (hasWritable == True) {
+ const writable = ToBoolean(GetProperty(object, 'writable'));
+ result.flags.has_writable = true;
+ result.flags.is_writable = writable;
+ }
+
+ const hasGet = HasProperty(object, 'get');
+ if (hasGet == True) {
+ let getter = GetProperty(object, 'get');
+ if (!Is<Undefined>(getter) && !Is<FunctionTemplateInfo>(getter)) {
+ getter = Cast<Callable>(getter) otherwise ThrowTypeError(
+ MessageTemplate::kObjectGetterCallable, getter);
+ }
+ result.flags.has_get = true;
+ result.get = getter;
+ }
+
+ const hasSet = HasProperty(object, 'set');
+ if (hasSet == True) {
+ let setter = GetProperty(object, 'set');
+ if (!Is<Undefined>(setter) && !Is<FunctionTemplateInfo>(setter)) {
+ setter = Cast<Callable>(setter) otherwise ThrowTypeError(
+ MessageTemplate::kObjectSetterCallable, setter);
+ }
+ result.flags.has_set = true;
+ result.set = setter;
+ }
+ }
+ return result;
+}
+
+@export
+transitioning macro ToPropertyDescriptor(implicit context: Context)(
+ object: JSAny): PropertyDescriptorObject|Undefined {
+ typeswitch (object) {
+ case (Undefined): {
+ return Undefined;
+ }
+ case (receiver: JSReceiver): {
+ return ToPropertyDescriptor(receiver);
+ }
+ case (JSAny): {
+ ThrowTypeError(MessageTemplate::kPropertyDescObject, object);
+ }
+ }
+}
+
+extern transitioning macro ObjectBuiltinsAssembler::FromPropertyDescriptor(
+ Context, JSAny): JSAny;
+
+@export
+transitioning macro FromPropertyDescriptor(implicit context: Context)(
+ object: JSAny): JSAny {
+ return FromPropertyDescriptor(context, object);
+}
+
} // namespace object
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index e5868c50d8..ed3fcfd55d 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -52,7 +52,10 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
USE(GetSharedFunctionInfoBytecodeOrBaseline);
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ __ LoadMap(scratch1, sfi_data);
+
+#ifndef V8_JITLESS
+ __ CompareInstanceType(scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
@@ -63,8 +66,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ beq(is_baseline);
}
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
+#else
+ __ CompareInstanceType(scratch1, scratch1, INTERPRETER_DATA_TYPE);
+#endif // !V8_JITLESS
+
__ bne(&done);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
@@ -118,12 +125,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset),
r0);
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = r9;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
r0);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
@@ -131,7 +138,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
+ __ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -144,7 +151,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
+ __ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@@ -154,12 +161,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = r5;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
+ r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@@ -225,15 +231,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Pop(code_obj);
+ __ LoadCodeEntry(code_obj, code_obj);
__ AddS64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
Register scratch = ip;
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, scratch);
- Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ Generate_OSREntry(masm, code_obj, 0);
} else {
- __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
@@ -358,8 +364,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&stack_overflow);
@@ -380,8 +386,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0);
__ bne(&jump_to_optimized_code);
}
@@ -426,18 +432,19 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
r0);
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
}
+ __ LoadCodeEntry(r3, r3);
+
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ SmiUntag(r4,
@@ -486,7 +493,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
@@ -602,8 +609,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(r4, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(r4, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&check_receiver);
@@ -651,10 +658,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r4);
// Load suspended function and context.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
- __ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
- r0);
+ __ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -694,12 +700,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
r0);
{
@@ -710,14 +716,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&done_loop);
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
__ Push(scratch);
__ b(&loop);
__ bind(&done_loop);
// Push receiver.
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
__ Push(scratch);
}
@@ -725,9 +731,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
@@ -737,7 +743,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(r3, FieldMemOperand(
r3, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -747,8 +753,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r6, r4);
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
- r0);
+ __ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
__ JumpCodeObject(r5);
}
@@ -760,7 +765,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
@@ -771,7 +776,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r4);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
@@ -836,6 +841,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in r3.
__ mr(kRootRegister, r3);
+
+#ifdef V8_COMPRESS_POINTERS
+ // Initialize the pointer cage base register.
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
+#endif
}
// Push a frame with special values setup to mark it as an entry frame.
@@ -861,14 +872,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ li(r0, Operand::Zero());
__ StoreU64(r0, MemOperand(r3));
Register scratch = r9;
// Set up frame pointer for the frame to be pushed.
- __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+ __ addi(fp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
@@ -956,7 +967,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ StoreU64(r6, MemOperand(scratch));
// Reset the stack to the callee saved registers.
- __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+ __ addi(sp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// Restore callee-saved double registers.
__ MultiPopDoubles(kCalleeSavedDoubles);
@@ -1104,8 +1115,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1197,12 +1208,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = ip;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
+ r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
__ AssertFeedbackVector(feedback_vector, r11);
// Check for an tiering state.
@@ -1359,14 +1369,13 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = r4;
- Register feedback_vector = r5;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
// Load original bytecode array or the debug copy.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
@@ -1381,18 +1390,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(
BYTECODE_ARRAY_TYPE);
__ bne(&compile_lazy);
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ Register feedback_vector = r5;
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
+ r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
@@ -1428,6 +1438,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1568,23 +1584,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset),
+ r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
@@ -1603,6 +1620,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1621,7 +1639,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, r0,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1758,19 +1776,19 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
r0);
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(r5, r5);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -2012,8 +2030,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2096,8 +2114,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2145,8 +2163,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2207,7 +2225,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2224,8 +2243,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(r5, HeapObject::kMapOffset), r0);
+ __ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
+ r0);
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
__ beq(&ok);
@@ -2260,7 +2279,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r7);
__ bind(&loop);
- __ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
+ __ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
__ addi(r5, r5, Operand(kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
@@ -2295,8 +2314,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r6, &new_target_not_constructor);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(r6, HeapObject::kMapOffset), r0);
+ __ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
+ r0);
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
@@ -2379,14 +2398,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(r4);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
- r0);
+ __ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
@@ -2440,7 +2458,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r3, r4);
__ SmiUntag(r3);
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ bind(&convert_receiver);
}
@@ -2471,7 +2489,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r5 and length of that into r7.
Label no_bound_arguments;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
__ beq(&no_bound_arguments, cr0);
@@ -2520,7 +2538,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ subi(r7, r7, Operand(1));
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
__ add(scratch, scratch, r5);
- __ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
+ __ LoadTaggedField(scratch, MemOperand(scratch), r0);
__ Push(scratch);
__ bdnz(&loop);
__ bind(&done);
@@ -2543,15 +2561,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
- __ LoadAnyTaggedField(
- r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
+ __ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
+ r0);
__ StoreReceiver(r6, r3, ip);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@@ -2651,7 +2669,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@@ -2683,12 +2701,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label skip;
__ CompareTagged(r4, r6);
__ bne(&skip);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2712,8 +2730,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(
- map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
+ __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
{
Register flags = r5;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@@ -2801,15 +2818,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = ip;
Label allocate_vector, done;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
vector,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset),
scratch);
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
__ AddS64(vector, vector, scratch);
- __ LoadTaggedPointerField(
- vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
+ __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
+ scratch);
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ push(kWasmInstanceRegister);
@@ -2949,8 +2966,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r3: number of arguments including receiver
// r4: pointer to builtin function
@@ -2986,9 +3002,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
arg_stack_space += result_size;
}
- __ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ __ EnterExitFrame(arg_stack_space, builtin_exit_frame
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mr(r14, r3);
@@ -3053,7 +3069,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// r14: still holds argc (callee-saved).
: r14;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
+ __ LeaveExitFrame(argc, false);
__ blr();
// Handling of exception.
@@ -3328,7 +3344,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
} else {
__ mov(r14, Operand(stack_space));
}
- __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
+ __ LeaveExitFrame(r14, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r14, RootIndex::kTheHoleValue);
@@ -3362,6 +3378,18 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ b(&leave_exit_frame);
}
+MemOperand ExitFrameStackSlotOperand(int offset) {
+ static constexpr int kFrameOffset = 1 * kSystemPointerSize;
+ return MemOperand(sp, (kStackFrameExtraParamSlot * kSystemPointerSize) +
+ offset + kFrameOffset);
+}
+
+MemOperand ExitFrameCallerStackSlotOperand(int index) {
+ return MemOperand(
+ fp, (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
} // namespace
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
@@ -3384,12 +3412,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
Register scratch = r7;
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
+ using FCI = FunctionCallbackInfo<v8::Value>;
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3397,12 +3426,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kSystemPointerSize]: kHolder
+ // sp[0 * kSystemPointerSize]: kHolder <= FCI::implicit_args_
// sp[1 * kSystemPointerSize]: kIsolate
// sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kSystemPointerSize]: undefined (kReturnValue)
// sp[4 * kSystemPointerSize]: kData
// sp[5 * kSystemPointerSize]: undefined (kNewTarget)
+ // Existing state:
+ // sp[6 * kSystemPointerSize]: <= FCI:::values_
// Reserve space on the stack.
__ subi(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
@@ -3437,52 +3468,46 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// [0] space for DirectCEntryStub's LR save
// [1-3] FunctionCallbackInfo
// [4] number of bytes to drop from the stack after returning
+ static constexpr int kSlotsToDropSize = 1 * kSystemPointerSize;
static constexpr int kApiStackSpace = 5;
- static constexpr bool kDontSaveDoubles = false;
+ static_assert(FCI::kImplicitArgsOffset == 0);
+ static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize);
+ static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize);
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
- kSystemPointerSize));
+ __ StoreU64(scratch, ExitFrameStackSlotOperand(FCI::kImplicitArgsOffset));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ addi(scratch, scratch,
- Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
- kSystemPointerSize));
+ Operand(FCA::kArgsLengthWithReceiver * kSystemPointerSize));
+ __ StoreU64(scratch, ExitFrameStackSlotOperand(FCI::kValuesOffset));
// FunctionCallbackInfo::length_.
- __ stw(argc,
- MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kSystemPointerSize));
+ __ stw(argc, ExitFrameStackSlotOperand(FCI::kLengthOffset));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
+ MemOperand stack_space_operand =
+ ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropSize);
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
- kSystemPointerSize));
+ __ StoreU64(scratch, stack_space_operand);
// v8::InvocationCallback's argument.
__ addi(r3, sp,
Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // There are two stack slots above the arguments we constructed on the stack.
- // TODO(jgruber): Document what these arguments are.
- static constexpr int kStackSlotsAboveFCA = 2;
- MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
-
+ MemOperand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(
- sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3497,14 +3522,15 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
int apiStackSpace = 0;
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -3516,20 +3542,21 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ constexpr int kNameHandleStackSize = 1;
+ constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mr(r3, sp); // r3 = Handle<Name>
@@ -3557,7 +3584,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, apiStackSpace);
+ __ EnterExitFrame(apiStackSpace, StackFrame::EXIT);
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
@@ -3573,16 +3600,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
-
__ LoadU64(
api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
r0);
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index c5ad5eefd5..768691ecaa 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -80,6 +80,7 @@ PromiseResolve(implicit context: Context)(
}
extern macro IsJSReceiverMap(Map): bool;
+extern macro JSAnyIsNotPrimitiveMap(Map): bool;
extern macro IsPromiseThenProtectorCellInvalid(): bool;
@@ -115,7 +116,7 @@ ResolvePromise(implicit context: Context)(
const heapResolution = UnsafeCast<HeapObject>(resolution);
const resolutionMap = heapResolution.map;
- if (!IsJSReceiverMap(resolutionMap)) {
+ if (!JSAnyIsNotPrimitiveMap(resolutionMap)) {
return FulfillPromise(promise, resolution);
}
@@ -161,12 +162,6 @@ ResolvePromise(implicit context: Context)(
}
goto Slow;
} label Slow deferred {
- // Skip "then" lookup for Wasm objects as they are opaque.
- @if(V8_ENABLE_WEBASSEMBLY)
- if (Is<WasmObject>(resolution)) {
- return FulfillPromise(promise, resolution);
- }
-
// 9. Let then be Get(resolution, "then").
// 10. If then is an abrupt completion, then
try {
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
index 8a7dfde9e5..2aa95ccae1 100644
--- a/deps/v8/src/builtins/proxy-set-property.tq
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -21,6 +21,8 @@ transitioning builtin
ProxySetProperty(implicit context: Context)(
proxy: JSProxy, name: PropertyKey|PrivateSymbol, value: JSAny,
receiverValue: JSAny): JSAny {
+ // Handle deeply nested proxy.
+ PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
dcheck(TaggedIsNotSmi(name));
dcheck(Is<Name>(name));
diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq
index c0591e7f6c..5ed70c81c5 100644
--- a/deps/v8/src/builtins/reflect.tq
+++ b/deps/v8/src/builtins/reflect.tq
@@ -94,4 +94,20 @@ ReflectHas(js-implicit context: NativeContext)(
otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Reflect.has');
return HasProperty(objectJSReceiver, key);
}
+
+extern transitioning builtin GetOwnPropertyDescriptor(
+ implicit context: Context)(JSAny, Name): JSAny;
+
+// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
+transitioning javascript builtin
+ReflectGetOwnPropertyDescriptor(js-implicit context: NativeContext)(
+ target: JSAny, propertyKey: JSAny): JSAny {
+ const targetReceiver = Cast<JSReceiver>(target)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Reflect.getOwnPropertyDescriptor');
+ const name = ToName(propertyKey);
+
+ const desc = GetOwnPropertyDescriptor(targetReceiver, name);
+ return object::FromPropertyDescriptor(desc);
+}
} // namespace reflect
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index ecd99af032..127755ec88 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -10,21 +10,23 @@ extern builtin
SubString(implicit context: Context)(String, Smi, Smi): String;
extern runtime RegExpExecMultiple(implicit context: Context)(
- JSRegExp, String, RegExpMatchInfo, JSArray): Null|JSArray;
+ JSRegExp, String, RegExpMatchInfo): Null|FixedArray;
extern transitioning runtime
RegExpReplaceRT(Context, JSReceiver, String, Object): String;
extern transitioning runtime
-StringBuilderConcat(implicit context: Context)(JSArray, Smi, String): String;
+StringBuilderConcat(implicit context: Context)(FixedArray, Smi, String): String;
extern transitioning runtime
StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
String, JSRegExp, Callable): String;
+// matchesCapacity is the length of the matchesElements FixedArray, and
+// matchesElements is allowed to contain holes at the end.
transitioning macro RegExpReplaceCallableNoExplicitCaptures(
implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr, string: String,
- replaceFn: Callable): void {
+ matchesElements: FixedArray, matchesCapacity: intptr, string: String,
+ replaceFn: Callable): intptr {
let matchStart: Smi = 0;
- for (let i: intptr = 0; i < matchesLength; i++) {
+ for (let i: intptr = 0; i < matchesCapacity; i++) {
typeswitch (matchesElements.objects[i]) {
// Element represents a slice.
case (elSmi: Smi): {
@@ -54,18 +56,29 @@ transitioning macro RegExpReplaceCallableNoExplicitCaptures(
matchesElements.objects[i] = replacement;
matchStart += elString.length_smi;
}
+ case (TheHole): deferred {
+ // No more elements.
+ return i;
+ }
case (Object): deferred {
unreachable;
}
}
}
+ return matchesCapacity;
}
+// matchesCapacity is the length of the matchesElements FixedArray, and
+// matchesElements is allowed to contain holes at the end.
transitioning macro
RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr,
- replaceFn: Callable): void {
- for (let i: intptr = 0; i < matchesLength; i++) {
+ matchesElements: FixedArray, matchesCapacity: intptr,
+ replaceFn: Callable): intptr {
+ for (let i: intptr = 0; i < matchesCapacity; i++) {
+ if (matchesElements.objects[i] == TheHole) {
+ // No more elements.
+ return i;
+ }
const elArray =
Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
@@ -78,29 +91,27 @@ RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
// we got back from the callback function.
matchesElements.objects[i] = ToString_Inline(replacementObj);
}
+ return matchesCapacity;
}
transitioning macro RegExpReplaceFastGlobalCallable(implicit context: Context)(
regexp: FastJSRegExp, string: String, replaceFn: Callable): String {
regexp.lastIndex = 0;
- const kInitialCapacity: intptr = 16;
- const kInitialLength: Smi = 0;
- const result: Null|JSArray = RegExpExecMultiple(
- regexp, string, GetRegExpLastMatchInfo(),
- AllocateJSArray(
- ElementsKind::PACKED_ELEMENTS, GetFastPackedElementsJSArrayMap(),
- kInitialCapacity, kInitialLength));
+ const result: Null|FixedArray =
+ RegExpExecMultiple(regexp, string, GetRegExpLastMatchInfo());
regexp.lastIndex = 0;
// If no matches, return the subject string.
if (result == Null) return string;
- const matches: JSArray = UnsafeCast<JSArray>(result);
- const matchesLength: Smi = Cast<Smi>(matches.length) otherwise unreachable;
- const matchesLengthInt: intptr = Convert<intptr>(matchesLength);
- const matchesElements: FixedArray = UnsafeCast<FixedArray>(matches.elements);
+ const matches: FixedArray = UnsafeCast<FixedArray>(result);
+ // The FixedArray will contain holes at the end and we've lost the information
+ // of its real length. This is OK because the users iterate it from the
+ // beginning.
+ const matchesCapacity: Smi = Cast<Smi>(matches.length) otherwise unreachable;
+ const matchesCapacityInt: intptr = Convert<intptr>(matchesCapacity);
// Reload last match info since it might have changed.
const nofCaptures: Smi = GetRegExpLastMatchInfo().NumberOfCaptures();
@@ -108,15 +119,16 @@ transitioning macro RegExpReplaceFastGlobalCallable(implicit context: Context)(
// If the number of captures is two then there are no explicit captures in
// the regexp, just the implicit capture that captures the whole match. In
// this case we can simplify quite a bit and end up with something faster.
+ let matchesLength: intptr;
if (nofCaptures == 2) {
- RegExpReplaceCallableNoExplicitCaptures(
- matchesElements, matchesLengthInt, string, replaceFn);
+ matchesLength = RegExpReplaceCallableNoExplicitCaptures(
+ matches, matchesCapacityInt, string, replaceFn);
} else {
- RegExpReplaceCallableWithExplicitCaptures(
- matchesElements, matchesLengthInt, replaceFn);
+ matchesLength = RegExpReplaceCallableWithExplicitCaptures(
+ matches, matchesCapacityInt, replaceFn);
}
- return StringBuilderConcat(matches, matchesLength, string);
+ return StringBuilderConcat(matches, Convert<Smi>(matchesLength), string);
}
transitioning macro RegExpReplaceFastString(implicit context: Context)(
diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc
index 04f91b24e2..3404562785 100644
--- a/deps/v8/src/builtins/riscv/builtins-riscv.cc
+++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc
@@ -155,7 +155,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register func_info = temps.Acquire();
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Load32U(func_info,
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
@@ -341,11 +341,22 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+
+#ifndef V8_JITLESS
+ if (v8_flags.debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
+ }
+#endif // !V8_JITLESS
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
Label::Distance::kNear);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -369,10 +380,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(a1);
// Load suspended function and context.
- __ LoadTaggedPointerField(
- a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
- __ LoadTaggedPointerField(cp,
- FieldMemOperand(a4, JSFunction::kContextOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -409,12 +419,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -423,23 +433,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ SubWord(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
- __ LoadAnyTaggedField(
- kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
// Push receiver.
- __ LoadAnyTaggedField(
- kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
__ GetObjectType(a3, a3, a3);
@@ -450,7 +460,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a0, FieldMemOperand(
a0, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -460,7 +470,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(a3, a1);
__ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ JumpCodeObject(a2);
}
@@ -473,8 +483,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1);
}
- __ LoadTaggedPointerField(
- a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Branch(&stepping_prepared);
__ bind(&prepare_step_in_suspended_generator);
@@ -484,8 +494,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(a1);
}
- __ LoadTaggedPointerField(
- a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ Branch(&stepping_prepared);
__ bind(&stack_overflow);
@@ -568,7 +578,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// C calling convention. The first argument is passed in a0.
__ Move(kRootRegister, a0);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
// Initialize the pointer cage base register.
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
@@ -592,11 +602,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Push(s1, s2, s3, s4);
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ StoreWord(zero_reg, MemOperand(s5));
// Set up frame pointer for the frame to be pushed.
- __ AddWord(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+ __ AddWord(fp, sp, -EntryFrameConstants::kNextExitFrameFPOffset);
// Registers:
// either
// a1: entry address
@@ -708,7 +718,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ StoreWord(a5, MemOperand(a4));
// Reset the stack to the callee saved registers.
- __ AddWord(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+ __ AddWord(sp, sp, -EntryFrameConstants::kNextExitFrameFPOffset);
// Restore callee-saved fpu registers.
__ MultiPopFPU(kCalleeSavedFPU);
@@ -789,7 +799,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Move(s3, a4);
__ Move(s4, a4);
__ Move(s5, a4);
-#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifndef V8_COMPRESS_POINTERS
__ Move(s11, a4);
#endif
// s6 holds the root address. Do not clobber.
@@ -1119,17 +1129,12 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = a1;
- Register feedback_vector = a2;
- UseScratchRegisterScope temps(masm);
- temps.Include(t0, t1);
- Register scratch = temps.Acquire();
- Register scratch2 = temps.Acquire();
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kScratchReg,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline;
@@ -1142,18 +1147,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+#ifndef V8_JITLESS
+ Register feedback_vector = a2;
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadTaggedPointerField(
- a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedField(a4,
+ FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
Label::Distance::kNear);
@@ -1182,6 +1188,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1293,7 +1305,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&do_return);
// The return value is in a0.
- LeaveInterpreterFrame(masm, scratch, scratch2);
+ LeaveInterpreterFrame(masm, t0, t1);
__ Jump(ra);
__ bind(&stack_check_interrupt);
@@ -1322,25 +1334,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Branch(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
- __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ Branch(&install_baseline_code, ne, scratch,
- Operand(FEEDBACK_VECTOR_TYPE));
+ __ LoadTaggedField(
+ t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
// Check for an tiering state.
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
@@ -1355,6 +1367,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1378,7 +1391,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments.
__ PushArray(start_address, num_args,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1508,18 +1521,18 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister);
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
- __ AddWord(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(t0, t0);
__ BranchShort(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -1733,8 +1746,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ Branch(&jump_to_optimized_code, ne, maybe_target_code,
Operand(Smi::zero()));
}
@@ -1747,14 +1760,33 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// If the code object is null, just return to the caller.
__ Ret(eq, a0, Operand(Smi::zero()));
__ bind(&jump_to_optimized_code);
+
+ // OSR entry tracing.
+ {
+ Label next;
+ __ li(a1, ExternalReference::address_of_log_or_trace_osr());
+ __ Lbu(a1, MemOperand(a1));
+ __ Branch(&next, eq, a1, Operand(zero_reg));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0); // Preserve the code object.
+ __ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
+ __ Pop(a0);
+ }
+
+ __ bind(&next);
+ }
+
if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
}
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
kHeapObjectTag));
@@ -1764,10 +1796,11 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ AddWord(a0, a0, a1);
- Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(a0, a0);
+
+ // Compute the target address = code_entry + osr_offset
+ // <entry_addr> = <code_entry> + <osr_offset>
+ Generate_OSREntry(masm, a0, Operand(a1));
}
} // namespace
@@ -2125,7 +2158,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ SubWord(scratch, sp, Operand(scratch));
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
__ bind(&loop);
- __ LoadTaggedPointerField(a5, MemOperand(src));
+ __ LoadTaggedField(a5, MemOperand(src));
__ AddWord(src, src, kTaggedSize);
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
__ LoadRoot(a5, RootIndex::kUndefinedValue);
@@ -2163,8 +2196,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ JumpIfSmi(a3, &new_target_not_constructor);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset));
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
@@ -2179,8 +2211,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
__ Move(a6, fp);
__ LoadWord(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
@@ -2244,7 +2274,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ AssertCallableFunction(a1);
Label class_constructor;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(kScratchReg, a3,
@@ -2254,8 +2284,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(cp,
- FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
@@ -2310,7 +2339,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(a0, a1);
__ SmiUntag(a0);
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
@@ -2352,7 +2381,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register bound_argv = a2;
// Load [[BoundArguments]] into a2 and length of that into a4.
Label no_bound_arguments;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(bound_argc,
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
@@ -2396,7 +2425,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SubWord(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
- __ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
+ __ LoadTaggedField(kScratchReg, MemOperand(a5));
__ Push(kScratchReg);
__ Branch(&loop);
__ bind(&done_loop);
@@ -2422,8 +2451,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ StoreReceiver(scratch, a0, kScratchReg);
}
@@ -2431,7 +2460,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@@ -2521,7 +2550,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@@ -2560,12 +2589,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
#endif
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2588,7 +2617,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
temps.Include(t0, t1);
Register map = temps.Acquire();
Register scratch = temps.Acquire();
- __ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
@@ -2783,8 +2812,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function
// a0: number of arguments including receiver
// a1: pointer to builtin function
@@ -2807,8 +2835,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, 0,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s3: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -2861,7 +2888,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// s3: still holds argc (callee-saved).
: s3;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
+ __ LeaveExitFrame(argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -3162,10 +3189,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ LoadWord(s3, *stack_space_operand);
}
- static constexpr bool kDontSaveDoubles = false;
static constexpr bool kRegisterContainsSlotCount = false;
- __ LeaveExitFrame(kDontSaveDoubles, s3, NO_EMIT_RETURN,
- kRegisterContainsSlotCount);
+ __ LeaveExitFrame(s3, NO_EMIT_RETURN, kRegisterContainsSlotCount);
// Check if the function scheduled an exception.
__ LoadRoot(a4, RootIndex::kTheHoleValue);
@@ -3191,6 +3216,11 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Branch(&leave_exit_frame);
}
+MemOperand ExitFrameCallerStackSlotOperand(int index) {
+ return MemOperand(
+ fp, (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
} // namespace
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
@@ -3223,7 +3253,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3270,9 +3300,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
static constexpr int kApiStackSpace = 4;
- static constexpr bool kDontSaveDoubles = false;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace);
// EnterExitFrame may align the sp.
@@ -3304,11 +3333,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
- // There are two stack slots above the arguments we constructed on the stack.
- // TODO(jgruber): Document what these arguments are.
- static constexpr int kStackSlotsAboveFCA = 2;
- MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
+ MemOperand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kUseStackSpaceOperand = 0;
MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
@@ -3322,14 +3348,15 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -3344,12 +3371,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
__ StoreWord(receiver,
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
- __ LoadAnyTaggedField(scratch,
- FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ StoreWord(scratch,
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ StoreWord(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) *
+ __ StoreWord(scratch, MemOperand(sp, (PCA::kReturnValueIndex + 1) *
kSystemPointerSize));
__ StoreWord(scratch,
MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
@@ -3363,12 +3390,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK_EQ(0, Smi::zero().ptr());
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
kSystemPointerSize));
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ static constexpr int kNameHandleStackSize = 1;
+ static const int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ Move(a0, sp); // a0 = Handle<Name>
@@ -3376,7 +3404,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
@@ -3384,18 +3412,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ AddWord(a1, sp, Operand(1 * kSystemPointerSize));
// a1 = v8::PropertyCallbackInfo&
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
__ LoadWord(
api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset));
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+ MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
+
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
return_value_operand);
@@ -3403,8 +3429,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
- // purpose Code object) to be able to call into C functions that may trigger
- // GC and thus move the caller.
+ // purpose InstructionStream object) to be able to call into C functions that
+ // may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@@ -3653,12 +3679,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = a1;
__ LoadWord(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = s1;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
@@ -3669,7 +3695,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
- __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
+ __ Branch(&start_with_baseline, eq, scratch, Operand(CODE_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3685,20 +3711,20 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
- Operand(CODET_TYPE));
+ Operand(CODE_TYPE));
}
if (v8_flags.debug_code) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
AssertCodeIsBaseline(masm, code_obj, scratch);
}
+
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
@@ -3759,6 +3785,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallCFunction(get_baseline_pc, 3, 0);
}
+ __ LoadCodeEntry(code_obj, code_obj);
__ AddWord(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
@@ -3770,10 +3797,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
- Generate_OSREntry(masm, code_obj,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ Generate_OSREntry(masm, code_obj);
} else {
- __ AddWord(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index f5f8fea2f5..ed4a8720c3 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -53,7 +53,10 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
USE(GetSharedFunctionInfoBytecodeOrBaseline);
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ __ LoadMap(scratch1, sfi_data);
+
+#ifndef V8_JITLESS
+ __ CompareInstanceType(scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
@@ -64,8 +67,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ beq(is_baseline);
}
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
+#else
+ __ CompareInstanceType(scratch1, scratch1, INTERPRETER_DATA_TYPE);
+#endif // !V8_JITLESS
+
__ bne(&done);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -73,11 +80,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
}
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
- intptr_t offset) {
- if (is_int20(offset)) {
- __ lay(r14, MemOperand(entry_address, offset));
+ Operand offset) {
+ if (!offset.is_reg() && is_int20(offset.immediate())) {
+ __ lay(r14, MemOperand(entry_address, offset.immediate()));
} else {
- __ AddS64(r14, entry_address, Operand(offset));
+ CHECK(offset.is_reg());
+ __ AddS64(r14, entry_address, offset.rm());
}
// "return" to the OSR entry point of the function.
@@ -118,12 +126,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = r3;
__ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = r8;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
@@ -131,7 +139,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
+ __ CompareObjectType(code_obj, r5, r5, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -144,7 +152,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
+ __ CompareObjectType(code_obj, r5, r5, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@@ -154,11 +162,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = r4;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@@ -221,6 +228,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ PrepareCallCFunction(3, 0, r1);
__ CallCFunction(get_baseline_pc, 3, 0);
}
+ __ LoadCodeEntry(code_obj, code_obj);
__ AddS64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
@@ -228,9 +236,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// disarm Sparkplug here.
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r1);
- Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ Generate_OSREntry(masm, code_obj, Operand(0));
} else {
- __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
@@ -269,8 +276,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0);
__ bne(&jump_to_optimized_code);
}
@@ -315,7 +322,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r3,
FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
@@ -325,10 +332,11 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ AddS64(r2, r3);
- Generate_OSREntry(masm, r2, Code::kHeaderSize - kHeapObjectTag);
+ __ LoadCodeEntry(r2, r2);
+
+ // Compute the target address = code_entry + osr_offset
+ // <entry_addr> = <code_entry> + <osr_offset>
+ Generate_OSREntry(masm, r2, Operand(r3));
}
} // namespace
@@ -422,8 +430,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&stack_overflow);
@@ -466,7 +474,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
@@ -578,8 +586,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(r3, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(r3, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ Ret();
__ bind(&check_receiver);
@@ -627,10 +635,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r3);
// Load suspended function and context.
- __ LoadTaggedPointerField(
- r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
- __ LoadTaggedPointerField(cp,
- FieldMemOperand(r6, JSFunction::kContextOffset));
+ __ LoadTaggedField(r6,
+ FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -671,12 +678,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4,
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -686,24 +693,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&done_loop);
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
__ la(scratch, MemOperand(r4, r1));
- __ LoadAnyTaggedField(scratch,
- FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
__ bind(&done_loop);
// Push receiver.
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
@@ -713,7 +720,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadS16(
r2,
@@ -724,7 +731,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(r5, r3);
__ mov(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
}
@@ -736,8 +743,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3);
- __ LoadTaggedPointerField(
- r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(r6,
+ FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -747,8 +754,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r3);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r3);
- __ LoadTaggedPointerField(
- r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(r6,
+ FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -772,7 +779,7 @@ namespace {
constexpr int kPushedStackSpace =
(kNumCalleeSaved + 2) * kSystemPointerSize +
kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize +
- EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
+ EntryFrameConstants::kNextExitFrameFPOffset - kSystemPointerSize;
// Called with the native C calling convention. The corresponding function
// signature is either:
@@ -861,8 +868,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ mov(r6, Operand::Zero());
__ StoreU64(r6, MemOperand(r1));
@@ -871,10 +878,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Set up frame pointer for the frame to be pushed.
// Need to add kSystemPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
- __ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset +
+ __ lay(fp, MemOperand(sp, -EntryFrameConstants::kNextExitFrameFPOffset +
kSystemPointerSize));
pushed_stack_space +=
- EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
+ EntryFrameConstants::kNextExitFrameFPOffset - kSystemPointerSize;
// restore r6
__ mov(r6, r0);
@@ -961,7 +968,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ StoreU64(r5, MemOperand(scrach));
// Reset the stack to the callee saved registers.
- __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
+ __ lay(sp, MemOperand(sp, -EntryFrameConstants::kNextExitFrameFPOffset));
// Reload callee-saved preserved regs, return address reg (r14) and sp
__ LoadMultipleP(r6, sp, MemOperand(sp, 0));
@@ -1142,8 +1149,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1239,11 +1246,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = ip;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector, r1);
// Check for an tiering state.
@@ -1396,14 +1402,13 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = r3;
- Register feedback_vector = r4;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
@@ -1418,18 +1423,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(
BYTECODE_ARRAY_TYPE);
__ bne(&compile_lazy);
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ Register feedback_vector = r4;
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadTaggedPointerField(
- r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedField(r6,
+ FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
@@ -1459,6 +1465,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1599,22 +1611,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
@@ -1633,6 +1646,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1651,7 +1665,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ SubS64(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, r1, scratch,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1786,18 +1800,18 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
- __ AddS64(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(r4, r4);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -2016,8 +2030,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2101,8 +2115,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2151,8 +2165,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r2, r6, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2216,7 +2230,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2233,8 +2248,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
Label ok, fail;
__ AssertNotSmi(r4);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadTaggedField(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadS16(scratch,
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
@@ -2270,7 +2284,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mov(r1, r6);
__ bind(&loop);
- __ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
+ __ LoadTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
__ la(r4, MemOperand(r4, kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
@@ -2305,8 +2319,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r5, &new_target_not_constructor);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
__ bne(&new_target_constructor);
@@ -2390,14 +2403,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(r3);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(cp,
- FieldMemOperand(r3, JSFunction::kContextOffset));
+ __ LoadTaggedField(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
@@ -2451,7 +2463,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r2, r3);
__ SmiUntag(r2);
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
@@ -2482,7 +2494,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r4 and length of that into r6.
Label no_bound_arguments;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ LoadAndTestP(r6, r6);
@@ -2528,7 +2540,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ SubS64(r1, r6, Operand(1));
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
- __ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
+ __ LoadTaggedField(scratch, MemOperand(r4, r1), r0);
__ Push(scratch);
__ SubS64(r6, r6, Operand(1));
__ bgt(&loop);
@@ -2552,15 +2564,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r3);
// Patch the receiver to [[BoundThis]].
- __ LoadAnyTaggedField(r5,
- FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+ __ LoadTaggedField(r5,
+ FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ StoreReceiver(r5, r2, r1);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@@ -2660,7 +2672,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@@ -2691,12 +2703,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label skip;
__ CompareTagged(r3, r5);
__ bne(&skip);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2720,8 +2732,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(map,
- FieldMemOperand(target, HeapObject::kMapOffset));
+ __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
{
Register flags = r4;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@@ -2804,13 +2815,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = r0;
Label allocate_vector, done;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
vector, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset));
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
__ AddS64(vector, vector, scratch);
- __ LoadTaggedPointerField(vector,
- FieldMemOperand(vector, FixedArray::kHeaderSize));
+ __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ push(kWasmInstanceRegister);
@@ -2942,8 +2952,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r2: number of arguments including receiver
// r3: pointer to builtin function
@@ -2983,9 +2992,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
arg_stack_space += 2;
#endif
- __ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ __ EnterExitFrame(arg_stack_space, builtin_exit_frame
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// Store a copy of argc, argv in callee-saved registers for later.
__ mov(r6, r2);
@@ -3052,7 +3061,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// r6: still holds argc (callee-saved).
: r6;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
+ __ LeaveExitFrame(argc, false);
__ b(r14);
// Handling of exception.
@@ -3313,7 +3322,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
DCHECK_EQ(stack_space, 0);
__ LoadU64(r6, *stack_space_operand);
}
- __ LeaveExitFrame(false, r6, stack_space_operand != nullptr);
+ __ LeaveExitFrame(r6, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ Move(r7, ExternalReference::scheduled_exception_address(isolate));
@@ -3346,6 +3355,18 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ b(&leave_exit_frame, Label::kNear);
}
+MemOperand ExitFrameStackSlotOperand(int offset) {
+ static constexpr int kFrameOffset = 1 * kSystemPointerSize;
+ return MemOperand(sp, (kStackFrameExtraParamSlot * kSystemPointerSize) +
+ offset + kFrameOffset);
+}
+
+MemOperand ExitFrameCallerStackSlotOperand(int index) {
+ return MemOperand(
+ fp, (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
} // namespace
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
@@ -3368,12 +3389,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
Register scratch = r6;
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
+ using FCI = FunctionCallbackInfo<v8::Value>;
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3381,12 +3403,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kSystemPointerSize]: kHolder
+ // sp[0 * kSystemPointerSize]: kHolder <= FCI::implicit_args_
// sp[1 * kSystemPointerSize]: kIsolate
// sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kSystemPointerSize]: undefined (kReturnValue)
// sp[4 * kSystemPointerSize]: kData
// sp[5 * kSystemPointerSize]: undefined (kNewTarget)
+ // Existing state:
+ // sp[6 * kSystemPointerSize]: <= FCI:::values_
// Reserve space on the stack.
__ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize)));
@@ -3421,52 +3445,46 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// [0] space for DirectCEntryStub's LR save
// [1-3] FunctionCallbackInfo
// [4] number of bytes to drop from the stack after returning
+ static constexpr int kSlotsToDropSize = 1 * kSystemPointerSize;
static constexpr int kApiStackSpace = 5;
- static constexpr bool kDontSaveDoubles = false;
+ static_assert(FCI::kImplicitArgsOffset == 0);
+ static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize);
+ static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize);
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
- kSystemPointerSize));
+ __ StoreU64(scratch, ExitFrameStackSlotOperand(FCI::kImplicitArgsOffset));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ AddS64(scratch, scratch,
- Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
- kSystemPointerSize));
+ Operand(FCA::kArgsLengthWithReceiver * kSystemPointerSize));
+ __ StoreU64(scratch, ExitFrameStackSlotOperand(FCI::kValuesOffset));
// FunctionCallbackInfo::length_.
- __ StoreU32(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
- kSystemPointerSize));
+ __ StoreU32(argc, ExitFrameStackSlotOperand(FCI::kLengthOffset));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
+ MemOperand stack_space_operand =
+ ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropSize);
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddS64(scratch, r1);
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
- kSystemPointerSize));
+ __ StoreU64(scratch, stack_space_operand);
// v8::InvocationCallback's argument.
__ lay(r2,
MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // There are two stack slots above the arguments we constructed on the stack.
- // TODO(jgruber): Document what these arguments are.
- static constexpr int kStackSlotsAboveFCA = 2;
- MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
-
+ MemOperand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(
- sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3480,14 +3498,15 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
int apiStackSpace = 0;
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -3499,20 +3518,21 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ constexpr int kNameHandleStackSize = 1;
+ constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mov(r2, sp); // r2 = Handle<Name>
@@ -3540,7 +3560,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, apiStackSpace);
+ __ EnterExitFrame(apiStackSpace, StackFrame::EXIT);
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
@@ -3556,15 +3576,11 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
-
__ LoadU64(
api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index a54c2c466c..ca7b4ca57c 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -52,8 +52,9 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) {
// PC-relative call/jump instructions can be used for builtin to builtin
// calls/tail calls. The embedded builtins blob generator also ensures that.
// However, there are serializer tests, where we force isolate creation at
- // runtime and at this point, Code space isn't restricted to a size s.t.
- // PC-relative calls may be used. So, we fall back to an indirect mode.
+ // runtime and at this point, Code space isn't restricted to a
+ // size s.t. PC-relative calls may be used. So, we fall back to an indirect
+ // mode.
options.use_pc_relative_calls_and_jumps_for_mksnapshot =
pc_relative_calls_fit_in_code_range;
@@ -211,7 +212,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
Code code) {
DCHECK_EQ(builtin, code.builtin_id());
- builtins->set_code(builtin, ToCodeT(code));
+ builtins->set_code(builtin, code);
}
// static
@@ -242,35 +243,35 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
PtrComprCageBase cage_base(isolate);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = FromCodeT(builtins->code(builtin));
+ Code code = builtins->code(builtin);
isolate->heap()->UnprotectAndRegisterMemoryChunk(
code, UnprotectMemoryOrigin::kMainThread);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
- Builtins::IsIsolateIndependent(target.builtin_id()));
- if (!target.is_builtin()) continue;
- CodeT new_target = builtins->code(target.builtin_id());
- rinfo->set_target_address(new_target.raw_instruction_start(),
+ Code target_code = Code::FromTargetAddress(rinfo->target_address());
+ DCHECK_IMPLIES(
+ RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
+ Builtins::IsIsolateIndependent(target_code.builtin_id()));
+ if (!target_code.is_builtin()) continue;
+ Code new_target = builtins->code(target_code.builtin_id());
+ rinfo->set_target_address(new_target.InstructionStart(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
Object object = rinfo->target_object(cage_base);
- if (!object.IsCodeT(cage_base)) continue;
- CodeT target = CodeT::cast(object);
+ if (!object.IsCode(cage_base)) continue;
+ Code target = Code::cast(object);
if (!target.is_builtin()) continue;
- CodeT new_target = builtins->code(target.builtin_id());
+ Code new_target = builtins->code(target.builtin_id());
rinfo->set_target_object(isolate->heap(), new_target,
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
flush_icache = true;
}
if (flush_icache) {
- FlushInstructionCache(code.raw_instruction_start(),
- code.raw_instruction_size());
+ FlushInstructionCache(code.InstructionStart(), code.instruction_size());
}
}
}
diff --git a/deps/v8/src/builtins/string-iswellformed.tq b/deps/v8/src/builtins/string-iswellformed.tq
new file mode 100644
index 0000000000..9eebf8fbff
--- /dev/null
+++ b/deps/v8/src/builtins/string-iswellformed.tq
@@ -0,0 +1,48 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace runtime {
+extern runtime StringIsWellFormed(Context, String): Boolean;
+}
+
+namespace string {
+
+extern macro StringBuiltinsAssembler::HasUnpairedSurrogate(String):
+ bool labels Indirect;
+
+transitioning javascript builtin
+StringPrototypeIsWellFormed(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(...arguments): Boolean {
+ const methodName: constexpr string = 'String.prototype.isWellFormed';
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const s = ToThisString(receiver, methodName);
+
+ // 3. Return IsStringWellFormedUnicode(S).
+
+ // Fast path: one-byte strings cannot have unpaired surrogates and are
+ // definitionally well-formed.
+ //
+ // See note in String::IsWellFormedUnicode.
+ if (s.IsOneByteRepresentation()) return True;
+
+ // Slow path: flatten the string and look for unpaired surrogates.
+ //
+ // TODO(v8:13557): The two-byte case can be optimized by extending the
+ // InstanceType. See
+ // https://docs.google.com/document/d/15f-1c_Ysw3lvjy_Gx0SmmD9qeO8UuXuAbWIpWCnTDO8/
+ const flat = Flatten(s);
+ if (flat.IsOneByteRepresentationUnderneath()) return True;
+ try {
+ const illFormed = HasUnpairedSurrogate(flat) otherwise Indirect;
+ return illFormed ? False : True;
+ } label Indirect deferred {
+ return runtime::StringIsWellFormed(context, flat);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/string-towellformed.tq b/deps/v8/src/builtins/string-towellformed.tq
new file mode 100644
index 0000000000..662037d4a8
--- /dev/null
+++ b/deps/v8/src/builtins/string-towellformed.tq
@@ -0,0 +1,62 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace runtime {
+extern runtime StringToWellFormed(Context, String): String;
+}
+
+namespace string {
+
+extern macro StringBuiltinsAssembler::ReplaceUnpairedSurrogates(
+ String, String): void labels Indirect;
+
+transitioning javascript builtin
+StringPrototypeToWellFormed(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.toWellFormed';
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const s = ToThisString(receiver, methodName);
+
+ // Fast path: one-byte strings cannot have unpaired surrogates and are
+ // definitionally well-formed.
+ //
+ // See note in String::IsWellFormedUnicode.
+ if (s.IsOneByteRepresentation()) return s;
+
+ // 3. Let strLen be the length of S.
+ const strLen = s.length_uint32;
+
+ // 4. Let k be 0.
+ // 5. Let result be the empty String.
+ const flat = Flatten(s);
+ if (flat.IsOneByteRepresentationUnderneath()) return flat;
+ let result = flat;
+
+ // 6. Repeat, while k < strLen,
+ // a. Let cp be CodePointAt(S, k).
+ // b. If cp.[[IsUnpairedSurrogate]] is true, then
+ // i. Set result to the string-concatenation of result and
+ // 0xFFFD (REPLACEMENT CHARACTER).
+ // c. Else,
+ // i. Set result to the string-concatenation of result and
+ // UTF16EncodeCodePoint(cp.[[CodePoint]]).
+ // d. Set k to k + cp.[[CodeUnitCount]].
+ try {
+ const illFormed = HasUnpairedSurrogate(flat) otherwise Indirect;
+ if (illFormed) {
+ result = AllocateSeqTwoByteString(strLen);
+ ReplaceUnpairedSurrogates(flat, result) otherwise Indirect;
+ }
+
+ // 7. Return result.
+ return result;
+ } label Indirect deferred {
+ return runtime::StringToWellFormed(context, flat);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index b36741e1dd..5baa32cd20 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -119,6 +119,11 @@ struct Slice<T: type, Reference: type> {
return this.TryAtIndex(i) otherwise unreachable;
}
+ macro AtIndex(index: uint32): Reference {
+ const i: intptr = Convert<intptr>(index);
+ return this.TryAtIndex(i) otherwise unreachable;
+ }
+
macro Iterator(): SliceIterator<T, Reference> {
const end = this.offset + TimesSizeOf<T>(this.length);
return SliceIterator<T, Reference>{
@@ -262,17 +267,23 @@ extern macro Allocate(
const kAllocateBaseFlags: constexpr AllocationFlag =
AllocationFlag::kAllowLargeObjectAllocation;
macro AllocateFromNew(
- sizeInBytes: intptr, map: Map, pretenured: bool): UninitializedHeapObject {
+ sizeInBytes: intptr, map: Map, pretenured: bool,
+ clearPadding: bool): UninitializedHeapObject {
dcheck(ValidAllocationSize(sizeInBytes, map));
+ let res: UninitializedHeapObject;
if (pretenured) {
- return Allocate(
+ res = Allocate(
sizeInBytes,
%RawConstexprCast<constexpr AllocationFlag>(
%RawConstexprCast<constexpr int32>(kAllocateBaseFlags) |
%RawConstexprCast<constexpr int32>(AllocationFlag::kPretenured)));
} else {
- return Allocate(sizeInBytes, kAllocateBaseFlags);
+ res = Allocate(sizeInBytes, kAllocateBaseFlags);
+ }
+ if (clearPadding) {
+ *unsafe::NewReference<Zero>(res, sizeInBytes - kObjectAlignment) = kZero;
}
+ return res;
}
macro InitializeFieldsFromIterator<T: type, Iterator: type>(
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index a8d2533123..4ec380da1e 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -59,13 +59,14 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.byte_offset = byteOffset;
if (isLengthTracking) {
dcheck(IsResizableArrayBuffer(buffer));
- // Make the byte_length of length-tracking TAs zero, so that we won't
- // accidentally use it and access invalid data.
+ // Set the byte_length and length fields of length-tracking TAs to zero, so
+ // that we won't accidentally use them and access invalid data.
typedArray.byte_length = 0;
+ typedArray.length = 0;
} else {
typedArray.byte_length = byteLength;
+ typedArray.length = length;
}
- typedArray.length = length;
typedArray.bit_field.is_length_tracking = isLengthTracking;
typedArray.bit_field.is_backed_by_rab =
IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer);
@@ -235,7 +236,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
const rabGsab: bool = IsResizableArrayBuffer(buffer) &&
(!IsSharedArrayBuffer(buffer) || isLengthTracking);
if (rabGsab) {
- map = GetDerivedRabGsabMap(target, newTarget);
+ map = GetDerivedRabGsabTypedArrayMap(target, newTarget);
} else {
map = GetDerivedMap(target, newTarget);
}
@@ -477,9 +478,16 @@ transitioning macro TypedArraySpeciesCreateByLength(implicit context: Context)(
const typedArray: JSTypedArray = TypedArraySpeciesCreate(
methodName, numArgs, exemplar, Convert<Number>(length), Undefined,
Undefined);
- if (typedArray.length < length) deferred {
- ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
- }
+ try {
+ const createdArrayLength =
+ LoadJSTypedArrayLengthAndCheckDetached(typedArray)
+ otherwise DetachedOrOutOfBounds;
+ if (createdArrayLength < length) deferred {
+ ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
+ }
+ } label DetachedOrOutOfBounds {
+ ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
+ }
return typedArray;
}
diff --git a/deps/v8/src/builtins/typed-array-from.tq b/deps/v8/src/builtins/typed-array-from.tq
index 60e08be27e..eba7c87ea1 100644
--- a/deps/v8/src/builtins/typed-array-from.tq
+++ b/deps/v8/src/builtins/typed-array-from.tq
@@ -197,16 +197,18 @@ TypedArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(
}
// 12e. Perform ? Set(targetObj, Pk, mappedValue, true).
- // Buffer may be detached during executing ToNumber/ToBigInt.
+ // The buffer may be detached or the target TypedArray may go out of
+ // bounds during executing ToNumber/ToBigInt or when we executed the
+ // mapper function above.
accessor.StoreJSAny(context, targetObj, k, mappedValue)
- otherwise IfDetached;
+ otherwise IfDetachedOrOutOfBounds;
// 12f. Set k to k + 1. (done by the loop).
}
return targetObj;
} label NotConstructor deferred {
ThrowTypeError(MessageTemplate::kNotConstructor, receiver);
- } label IfDetached deferred {
+ } label IfDetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFrom);
}
}
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index d51fbd7e91..26f2956a0d 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -97,7 +97,7 @@ type StoreJSAnyFn = builtin(Context, JSTypedArray, uintptr, JSAny) => Smi;
// The result codes returned by StoreNumericFn and StoreJSAnyFn builtins.
const kStoreSucceded: Smi = 0;
-const kStoreFailureArrayDetached: Smi = 1;
+const kStoreFailureArrayDetachedOrOutOfBounds: Smi = 1;
struct TypedArrayAccessor {
macro LoadNumeric(array: JSTypedArray, index: uintptr): Numeric {
@@ -115,11 +115,11 @@ struct TypedArrayAccessor {
macro StoreJSAny(
context: Context, array: JSTypedArray, index: uintptr,
- value: JSAny): void labels IfDetached {
+ value: JSAny): void labels IfDetachedOrOutOfBounds {
const storefn: StoreJSAnyFn = this.storeJSAnyFn;
const result = storefn(context, array, index, value);
- if (result == kStoreFailureArrayDetached) {
- goto IfDetached;
+ if (result == kStoreFailureArrayDetachedOrOutOfBounds) {
+ goto IfDetachedOrOutOfBounds;
}
dcheck(result == kStoreSucceded);
}
@@ -305,9 +305,9 @@ builtin StoreTypedElementJSAny<T : type extends ElementsKind>(
try {
StoreJSTypedArrayElementFromTagged(
context, typedArray, index, value, KindForArrayType<T>())
- otherwise IfDetached;
- } label IfDetached {
- return kStoreFailureArrayDetached;
+ otherwise IfDetachedOrOutOfBounds;
+ } label IfDetachedOrOutOfBounds {
+ return kStoreFailureArrayDetachedOrOutOfBounds;
}
return kStoreSucceded;
}
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index 5916c234ec..5fe8fd6a04 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -20,6 +20,7 @@ extern runtime WasmFunctionTableGet(
extern runtime WasmFunctionTableSet(
Context, WasmInstanceObject, Smi, Smi, Object): JSAny;
extern runtime ThrowWasmError(Context, Smi): JSAny;
+extern runtime WasmThrowTypeError(Context, Smi, JSAny): never;
extern runtime WasmThrow(Context, Object, FixedArray): JSAny;
extern runtime WasmReThrow(Context, Object): JSAny;
extern runtime WasmTriggerTierUp(Context, WasmInstanceObject): JSAny;
@@ -39,9 +40,9 @@ extern runtime WasmArrayCopy(
extern runtime WasmArrayNewSegment(
Context, WasmInstanceObject, Smi, Smi, Smi, Map): Object;
extern runtime WasmStringNewWtf8(
- Context, WasmInstanceObject, Smi, Smi, Number, Number): String;
+ Context, WasmInstanceObject, Smi, Smi, Number, Number): String|Null;
extern runtime WasmStringNewWtf8Array(
- Context, Smi, WasmArray, Smi, Smi): String;
+ Context, Smi, WasmArray, Smi, Smi): String|Null;
extern runtime WasmStringNewWtf16(
Context, WasmInstanceObject, Smi, Number, Number): String;
extern runtime WasmStringNewWtf16Array(Context, WasmArray, Smi, Smi): String;
@@ -59,6 +60,9 @@ extern runtime WasmStringViewWtf8Encode(
Context, WasmInstanceObject, Smi, ByteArray, Number, Number, Number): JSAny;
extern runtime WasmStringViewWtf8Slice(
Context, ByteArray, Number, Number): String;
+extern runtime WasmStringFromCodePoint(Context, Number): String;
+extern runtime WasmStringHash(NoContext, String): Smi;
+extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny;
}
namespace unsafe {
@@ -67,10 +71,8 @@ extern macro Allocate(intptr, constexpr AllocationFlag): HeapObject;
}
namespace wasm {
-const kAnyTableType: constexpr int31
+const kAnyType: constexpr int31
generates 'wasm::kWasmAnyRef.raw_bit_field()';
-const kAnyNonNullTableType: constexpr int31
- generates 'wasm::kWasmAnyNonNullableRef.raw_bit_field()';
const kMaxPolymorphism:
constexpr int31 generates 'wasm::kMaxPolymorphism';
@@ -86,6 +88,8 @@ extern macro WasmBuiltinsAssembler::LoadInternalFunctionsFromInstance(
WasmInstanceObject): FixedArray;
extern macro WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
WasmInstanceObject): FixedArray;
+extern macro WasmBuiltinsAssembler::LoadContextFromWasmOrJsFrame():
+ NativeContext;
macro LoadContextFromFrame(): NativeContext {
return LoadContextFromInstance(LoadInstanceFromFrame());
@@ -95,10 +99,10 @@ builtin WasmInt32ToHeapNumber(val: int32): HeapNumber {
return AllocateHeapNumberWithValue(Convert<float64>(val));
}
-builtin WasmFuncRefToJS(val: WasmInternalFunction|Null): JSFunction|Null|
+builtin WasmFuncRefToJS(val: WasmInternalFunction|WasmNull): JSFunction|Null|
Undefined {
typeswitch (val) {
- case (Null): {
+ case (WasmNull): {
return Null;
}
case (func: WasmInternalFunction): {
@@ -289,10 +293,11 @@ builtin WasmRefFunc(index: uint32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
try {
const table: FixedArray = LoadInternalFunctionsFromInstance(instance);
- if (table == Undefined) goto CallRuntime;
const functionIndex: intptr = Signed(ChangeUint32ToWord(index));
const result: Object = LoadFixedArrayElement(table, functionIndex);
- if (result == Undefined) goto CallRuntime;
+ // {result} is either a funcref or nullptr. A Smi check is the fastest
+ // way to distinguish these two cases.
+ if (TaggedIsSmi(result)) goto CallRuntime;
return result;
} label CallRuntime deferred {
tail runtime::WasmRefFunc(
@@ -402,10 +407,10 @@ builtin WasmArrayNewSegment(
builtin WasmArrayCopyWithChecks(
dstIndex: uint32, srcIndex: uint32, length: uint32, dstObject: Object,
srcObject: Object): JSAny {
- if (dstObject == Null) tail ThrowWasmTrapNullDereference();
- if (srcObject == Null) tail ThrowWasmTrapNullDereference();
- const dstArray = %RawDownCast<WasmArray>(dstObject);
- const srcArray = %RawDownCast<WasmArray>(srcObject);
+ if (dstObject == kWasmNull) tail ThrowWasmTrapNullDereference();
+ if (srcObject == kWasmNull) tail ThrowWasmTrapNullDereference();
+ const dstArray = UnsafeCast<WasmArray>(dstObject);
+ const srcArray = UnsafeCast<WasmArray>(srcObject);
// Check that the end of the copying range is in-bounds and that the range
// does not overflow.
if (dstIndex + length > dstArray.length || dstIndex + length < dstIndex ||
@@ -419,11 +424,14 @@ builtin WasmArrayCopyWithChecks(
}
builtin WasmArrayCopy(
- dstIndex: uint32, srcIndex: uint32, length: uint32, dstArray: WasmArray,
- srcArray: WasmArray): JSAny {
+ dstIndex: uint32, srcIndex: uint32, length: uint32, dstObject: Object,
+ srcObject: Object): JSAny {
+ if (dstObject == kWasmNull) tail ThrowWasmTrapNullDereference();
+ if (srcObject == kWasmNull) tail ThrowWasmTrapNullDereference();
if (length == 0) return Undefined;
tail runtime::WasmArrayCopy(
- LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray,
+ LoadContextFromFrame(), UnsafeCast<WasmArray>(dstObject),
+ SmiFromUint32(dstIndex), UnsafeCast<WasmArray>(srcObject),
SmiFromUint32(srcIndex), SmiFromUint32(length));
}
@@ -505,7 +513,6 @@ builtin WasmI64AtomicWait(
// Type feedback collection support for `call_ref`.
extern macro GetCodeEntry(Code): RawPtr;
-extern macro GetCodeEntry(CodeDataContainer): RawPtr;
struct TargetAndInstance {
target: RawPtr;
@@ -630,7 +637,7 @@ transitioning builtin WasmGetOwnProperty(implicit context: Context)(
// Trap builtins.
builtin WasmTrap(error: Smi): JSAny {
- tail runtime::ThrowWasmError(LoadContextFromFrame(), error);
+ tail runtime::ThrowWasmError(LoadContextFromWasmOrJsFrame(), error);
}
builtin ThrowWasmTrapUnreachable(): JSAny {
@@ -698,6 +705,10 @@ builtin ThrowWasmTrapArrayTooLarge(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayTooLarge));
}
+builtin ThrowWasmTrapStringOffsetOutOfBounds(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapStringOffsetOutOfBounds));
+}
+
macro TryNumberToIntptr(value: JSAny): intptr labels Failure {
typeswitch (value) {
case (s: Smi): {
@@ -804,14 +815,15 @@ transitioning javascript builtin ExperimentalWasmConvertStringToArray(
}
builtin WasmStringNewWtf8(
- offset: uint32, size: uint32, memory: Smi, utf8Variant: Smi): String {
+ offset: uint32, size: uint32, memory: Smi, utf8Variant: Smi): String|Null {
const instance = LoadInstanceFromFrame();
tail runtime::WasmStringNewWtf8(
LoadContextFromInstance(instance), instance, memory, utf8Variant,
WasmUint32ToNumber(offset), WasmUint32ToNumber(size));
}
builtin WasmStringNewWtf8Array(
- start: uint32, end: uint32, array: WasmArray, utf8Variant: Smi): String {
+ start: uint32, end: uint32, array: WasmArray, utf8Variant: Smi): String|
+ Null {
const context = LoadContextFromFrame();
try {
if (array.length < end) goto OffsetOutOfRange;
@@ -831,20 +843,119 @@ builtin WasmStringNewWtf16(
LoadContextFromInstance(instance), instance, SmiFromUint32(memory),
WasmUint32ToNumber(offset), WasmUint32ToNumber(size));
}
+
+struct TwoByteToOneByteIterator {
+ macro Next(): char8 labels NoMore {
+ if (this.offset == this.end_offset) goto NoMore;
+ const raw: char16 = *torque_internal::unsafe::NewReference<char16>(
+ this.array, this.offset);
+ const result: char8 = %RawDownCast<char8>(raw & 0xFF);
+ this.offset += 2;
+ return result;
+ }
+
+ array: WasmArray;
+ offset: intptr;
+ end_offset: intptr;
+}
+
builtin WasmStringNewWtf16Array(
array: WasmArray, start: uint32, end: uint32): String {
- const context = LoadContextFromFrame();
try {
if (array.length < end) goto OffsetOutOfRange;
if (end < start) goto OffsetOutOfRange;
- tail runtime::WasmStringNewWtf16Array(
- context, array, SmiFromUint32(start), SmiFromUint32(end));
+ const length: uint32 = end - start;
+ if (length == 0) return kEmptyString;
+ // Calling into the runtime has overhead, but once we're there it's faster,
+ // so it pays off for long strings. The threshold has been determined
+ // experimentally.
+ if (length >= 32) goto Runtime;
+ const intptrLength = Convert<intptr>(length);
+ const arrayContent = torque_internal::unsafe::NewConstSlice<char16>(
+ array, kWasmArrayHeaderSize, Convert<intptr>(array.length));
+ const substring =
+ Subslice(arrayContent, Convert<intptr>(start), intptrLength)
+ otherwise goto OffsetOutOfRange;
+
+ // Ideas for additional future improvements:
+ // (1) We could add a fast path for very short strings, e.g. <= 8 chars,
+ // and just allocate two-byte strings for them. That would save time
+ // here, and would only waste a couple of bytes at most. A concern is
+ // that such strings couldn't take one-byte fast paths later on, e.g.
+ // in toLower/toUpper case conversions.
+ // (2) We could load more than one array element at a time, e.g. using
+ // intptr-wide loads, or possibly even wider SIMD instructions. We'd
+ // have to make sure that non-aligned start offsets are handled,
+ // and the implementation would become more platform-specific.
+ // (3) We could shift the problem around by allocating two-byte strings
+ // here and checking whether they're one-byte-compatible later, e.g.
+ // when promoting them from new to old space. Drawback: rewriting
+ // strings to different maps isn't great for optimized code that's
+ // based on collected type feedback, or that wants to elide duplicate
+ // map checks within the function.
+ // (4) We could allocate space for a two-byte string, then optimistically
+ // start writing one-byte characters into it, and then either restart
+ // in two-byte mode if needed, or return the over-allocated bytes to
+ // the allocator in the end.
+ // (5) We could standardize a `string.new_ascii_array` instruction, which
+ // could safely produce one-byte strings without checking characters.
+ // See https://github.com/WebAssembly/stringref/issues/53.
+
+ try {
+ // To reduce the amount of branching, check 8 code units at a time. The
+ // tradeoff for choosing 8 is that we want to check for early termination
+ // of the loop often (to avoid unnecessary work) but not too often
+ // (because each check has a cost).
+ let i: intptr = 0;
+ const eightElementLoopEnd = intptrLength - 8;
+ while (i <= eightElementLoopEnd) {
+ const bits = Convert<uint32>(*substring.UncheckedAtIndex(i)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 1)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 2)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 3)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 4)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 5)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 6)) |
+ Convert<uint32>(*substring.UncheckedAtIndex(i + 7));
+ if (bits > 0xFF) goto TwoByte;
+ i += 8;
+ }
+ let bits: uint32 = 0;
+ while (i < intptrLength) {
+ bits |= Convert<uint32>(*substring.UncheckedAtIndex(i));
+ i += 1;
+ }
+ if (bits > 0xFF) goto TwoByte;
+ } label TwoByte {
+ return AllocateSeqTwoByteString(length, substring.Iterator());
+ }
+
+ return AllocateNonEmptySeqOneByteString(length, TwoByteToOneByteIterator{
+ array: array,
+ offset: kWasmArrayHeaderSize +
+ torque_internal::TimesSizeOf<char16>(Convert<intptr>(start)),
+ end_offset: kWasmArrayHeaderSize +
+ torque_internal::TimesSizeOf<char16>(Convert<intptr>(end))
+ });
} label OffsetOutOfRange deferred {
+ const context = LoadContextFromFrame();
const error = MessageTemplate::kWasmTrapArrayOutOfBounds;
runtime::ThrowWasmError(context, SmiConstant(error));
unreachable;
+ } label Runtime deferred {
+ const context = LoadContextFromFrame();
+ tail runtime::WasmStringNewWtf16Array(
+ context, array, SmiFromUint32(start), SmiFromUint32(end));
}
}
+
+// Contract: input is any string, output is a string that the TF operator
+// "StringPrepareForGetCodeunit" can handle.
+builtin WasmStringAsWtf16(str: String): String {
+ const cons = Cast<ConsString>(str) otherwise return str;
+ return Flatten(cons);
+}
+
builtin WasmStringConst(index: uint32): String {
const instance = LoadInstanceFromFrame();
tail runtime::WasmStringConst(
@@ -916,15 +1027,27 @@ builtin WasmStringEncodeWtf16Array(
unreachable;
}
}
+
+builtin ThrowToLowerCaseCalledOnNull(): JSAny {
+ const context = LoadContextFromFrame();
+ const error = MessageTemplate::kCalledOnNullOrUndefined;
+ const name = StringConstant('String.prototype.toLowerCase');
+ runtime::WasmThrowTypeError(context, SmiConstant(error), name);
+}
+
builtin WasmStringConcat(a: String, b: String): String {
const context = LoadContextFromFrame();
tail StringAdd_CheckNone(a, b);
}
+
+extern builtin StringEqual(NoContext, String, String, intptr): Boolean;
+
builtin WasmStringEqual(a: String, b: String): int32 {
if (TaggedEqual(a, b)) return 1;
if (a.length != b.length) return 0;
- const context = LoadContextFromFrame();
- if (StringEqual(context, a, b) == True) return 1;
+ if (StringEqual(kNoContext, a, b, Convert<intptr>(a.length)) == True) {
+ return 1;
+ }
return 0;
}
builtin WasmStringIsUSVSequence(str: String): int32 {
@@ -1049,7 +1172,7 @@ transitioning builtin WasmStringViewWtf16Slice(
return string::SubString(string, Convert<uintptr>(start), clampedEnd);
}
builtin WasmStringAsIter(string: String): WasmStringViewIter {
- return new WasmStringViewIter{string: string, offset: 0};
+ return new WasmStringViewIter{string: string, offset: 0, optional_padding: 0};
}
macro IsLeadSurrogate(code: char16): bool {
return (code & 0xfc00) == 0xd800;
@@ -1151,28 +1274,21 @@ builtin WasmStringViewIterSlice(
string::SubString(string, Convert<uintptr>(start), Convert<uintptr>(end));
}
-transitioning builtin WasmExternInternalize(implicit context: Context)(
- externObject: JSAny): JSAny {
- const innerObject =
- WasmGetOwnProperty(externObject, WasmWrappedObjectSymbolConstant());
- if (innerObject == Undefined) {
- return externObject;
- }
- return innerObject;
+builtin WasmStringFromCodePoint(codePoint: uint32): String {
+ tail runtime::WasmStringFromCodePoint(
+ LoadContextFromFrame(), WasmUint32ToNumber(codePoint));
}
-transitioning builtin WasmExternExternalize(implicit context: Context)(
- anyObject: JSAny): JSAny {
- typeswitch (anyObject) {
- case (wasmArray: WasmArray): {
- return WasmAllocateObjectWrapper(wasmArray);
- }
- case (wasmStruct: WasmStruct): {
- return WasmAllocateObjectWrapper(wasmStruct);
- }
- case (JSAny): {
- return anyObject;
- }
- }
+builtin WasmStringHash(string: String): int32 {
+ const result = runtime::WasmStringHash(kNoContext, string);
+ return SmiToInt32(result);
+}
+
+builtin WasmExternInternalize(externObject: JSAny): JSAny {
+ const instance = LoadInstanceFromFrame();
+ const context = LoadContextFromInstance(instance);
+
+ tail runtime::WasmJSToWasmObject(
+ context, externObject, SmiConstant(kAnyType));
}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 756df93be2..31d01a2099 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -51,6 +51,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
namespace {
+constexpr int kReceiverOnStackSize = kSystemPointerSize;
+
enum class ArgumentsElementType {
kRaw, // Push arguments as they are.
kHandle // Dereference arguments before pushing.
@@ -102,7 +104,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// InvokeFunction.
// Set up pointer to first argument (skip receiver).
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
// Copy arguments to the expression stack.
// rbx: Pointer to start of arguments.
@@ -125,7 +127,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIncludesReceiver);
__ ret(0);
@@ -137,6 +139,21 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
}
+// Provides access to exit frame stack space (not GCed).
+Operand ExitFrameStackSlotOperand(int index) {
+#ifdef V8_TARGET_OS_WIN
+ return Operand(rsp, (index + kWindowsHomeStackSlots) * kSystemPointerSize);
+#else
+ return Operand(rsp, index * kSystemPointerSize);
+#endif
+}
+
+Operand ExitFrameCallerStackSlotOperand(int index) {
+ return Operand(rbp,
+ (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -171,9 +188,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
const TaggedRegister shared_function_info(rbx);
- __ LoadTaggedPointerField(
- shared_function_info,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(shared_function_info,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movl(rbx,
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
@@ -217,7 +233,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movq(r8, rax);
// Set up pointer to first argument (skip receiver).
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
// Restore constructor function and argument count.
@@ -282,18 +298,15 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIncludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
__ bind(&check_result);
__ JumpIfSmi(rax, &use_receiver, Label::kNear);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(above_equal, &leave_and_return, Label::kNear);
+ // Check if the type of the result is not an object in the ECMA sense.
+ __ JumpIfJSAnyIsNotPrimitive(rax, rcx, &leave_and_return, Label::kNear);
__ jmp(&use_receiver);
__ bind(&do_throw);
@@ -338,7 +351,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
{
NoRootArrayScope uninitialized_root_register(masm);
- // Set up frame.
+
+ // Set up the frame.
+ //
+ // Note: at this point we are entering V8-generated code from C++ and thus
+ // rbp can be an arbitrary value (-fomit-frame-pointer). Since V8 still
+ // needs to know where the next interesting frame is for the purpose of
+ // stack walks, we instead push the stored EXIT frame fp
+ // (IsolateAddressId::kCEntryFPAddress) below to a dedicated slot.
__ pushq(rbp);
__ movq(rbp, rsp);
@@ -381,7 +401,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// C calling convention. The first argument is passed in arg_reg_1.
__ movq(kRootRegister, arg_reg_1);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
// Initialize the pointer cage base register.
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
@@ -392,13 +412,23 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference c_entry_fp = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
{
+ // Keep this static_assert to preserve a link between the offset constant
+ // and the code location it refers to.
+#ifdef V8_TARGET_OS_WIN
+ static_assert(EntryFrameConstants::kNextExitFrameFPOffset ==
+ -3 * kSystemPointerSize + -7 * kSystemPointerSize -
+ EntryFrameConstants::kXMMRegistersBlockSize);
+#else
+ static_assert(EntryFrameConstants::kNextExitFrameFPOffset ==
+ -3 * kSystemPointerSize + -5 * kSystemPointerSize);
+#endif // V8_TARGET_OS_WIN
Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
__ Push(c_entry_fp_operand);
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss
+ // the JS frames on top.
__ Move(c_entry_fp_operand, 0);
}
@@ -447,7 +477,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
- Handle<CodeT> trampoline_code =
+ Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@@ -620,9 +650,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(r9);
// Invoke the builtin code.
- Handle<CodeT> builtin = is_construct
- ? BUILTIN_CODE(masm->isolate(), Construct)
- : masm->isolate()->builtins()->Call();
+ Handle<Code> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the internal frame. Notice that this also removes the empty
@@ -647,19 +677,19 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
- Register code, Register scratch) {
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
- __ movl(scratch, FieldOperand(code, CodeT::kFlagsOffset));
- __ DecodeField<CodeT::KindField>(scratch);
+ __ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
__ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
-static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
- Register scratch) {
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
DCHECK(!AreAliased(code, scratch));
- return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
}
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -670,21 +700,23 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, CODET_TYPE);
+#ifndef V8_JITLESS
+ __ CmpInstanceType(scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ j(not_equal, &not_baseline);
- AssertCodeTIsBaseline(masm, sfi_data, scratch1);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ j(equal, is_baseline);
__ bind(&not_baseline);
} else {
__ j(equal, is_baseline);
}
+#endif // !V8_JITLESS
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
@@ -712,9 +744,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
// Load suspended function and context.
- __ LoadTaggedPointerField(
- rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
- __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ LoadTaggedField(rdi,
+ FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -751,12 +783,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(rcx,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
__ decq(rcx); // Exclude receiver.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -764,33 +796,32 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ decq(rcx);
__ j(less, &done_loop, Label::kNear);
- __ PushTaggedAnyField(
+ __ PushTaggedField(
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
decompr_scratch1);
__ jmp(&loop);
__ bind(&done_loop);
// Push the receiver.
- __ PushTaggedPointerField(
- FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
- decompr_scratch1);
+ __ PushTaggedField(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
+ decompr_scratch1);
}
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline, ok;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
&is_baseline);
- __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+ __ IsObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ jmp(&ok);
__ bind(&is_baseline);
- __ CmpObjectType(rcx, CODET_TYPE, rcx);
+ __ IsObjectType(rcx, CODE_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -799,7 +830,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(rax);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(rax, FieldOperand(
rax, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -807,8 +838,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ JumpCodeTObject(rcx);
+ __ LoadTaggedField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
+ __ JumpCodeObject(rcx);
}
__ bind(&prepare_step_in_if_stepping);
@@ -820,8 +851,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
- __ LoadTaggedPointerField(
- rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(rdi,
+ FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
@@ -831,8 +862,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(rdx);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(rdx);
- __ LoadTaggedPointerField(
- rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedField(rdi,
+ FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
@@ -873,8 +904,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave();
// Drop receiver + arguments.
- __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Tail-call |function_id| if |actual_state| == |expected_state|
@@ -997,18 +1028,16 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = rdi;
- Register feedback_vector = rbx;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
const TaggedRegister shared_function_info(kScratchRegister);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- kInterpreterBytecodeArrayRegister,
- FieldOperand(shared_function_info,
- SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedField(kInterpreterBytecodeArrayRegister,
+ FieldOperand(shared_function_info,
+ SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline(
@@ -1017,22 +1046,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- kScratchRegister);
+ __ IsObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ kScratchRegister);
__ j(not_equal, &compile_lazy);
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
+ Register feedback_vector = rbx;
TaggedRegister feedback_cell(feedback_vector);
- __ LoadTaggedPointerField(
- feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(feedback_vector,
- FieldOperand(feedback_cell, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_cell,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldOperand(feedback_cell, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadMap(rcx, feedback_vector);
- __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
+ __ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, rcx);
__ j(not_equal, &push_stack_frame);
// Check the tiering state.
@@ -1052,6 +1082,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ pushq(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
@@ -1196,6 +1232,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
__ int3(); // Should not return.
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector, closure);
@@ -1203,16 +1240,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(
{
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
- __ LoadTaggedPointerField(
- feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(feedback_vector,
- FieldOperand(feedback_cell, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_cell,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ LoadMap(rcx, feedback_vector);
- __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
+ __ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, rcx);
__ j(not_equal, &install_baseline_code);
// Check the tiering state.
@@ -1225,11 +1261,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ ReplaceClosureCodeWithOptimizedCode(
rcx, closure, kInterpreterBytecodeArrayRegister,
WriteBarrierDescriptor::SlotAddressRegister());
- __ JumpCodeTObject(rcx);
+ __ JumpCodeObject(rcx);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1248,7 +1285,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
kSystemPointerSize));
// Push the arguments.
__ PushArray(start_address, num_args, scratch,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1400,18 +1437,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// trampoline.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
const TaggedRegister shared_function_info(rbx);
- __ LoadTaggedPointerField(
- shared_function_info,
- FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- rbx, FieldOperand(shared_function_info,
- SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
+ __ LoadTaggedField(shared_function_info,
+ FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(rbx,
+ FieldOperand(shared_function_info,
+ SharedFunctionInfo::kFunctionDataOffset));
+ __ IsObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
- __ LoadCodeTEntry(rbx, rbx);
+ __ LoadCodeEntry(rbx, rbx);
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
@@ -1439,8 +1475,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (v8_flags.debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- rbx);
+ __ IsObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ rbx);
__ Assert(
equal,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
@@ -1538,10 +1574,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
- __ LoadTaggedPointerField(
- feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(feedback_vector,
- FieldOperand(feedback_cell, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_cell,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldOperand(feedback_cell, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector);
// Check the tiering state.
@@ -1797,8 +1833,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
__ bind(&no_this_arg);
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1902,8 +1938,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -1954,8 +1990,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2028,9 +2064,10 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
- Handle<CodeT> code) {
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- rdi : target
// -- rax : number of parameters on the stack
@@ -2079,8 +2116,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmpl(current, num);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
- FixedArray::kHeaderSize));
+ __ LoadTaggedField(value, FieldOperand(src, current, times_tagged_size,
+ FixedArray::kHeaderSize));
__ CompareRoot(value, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(value, RootIndex::kUndefinedValue);
@@ -2101,7 +2138,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
- Handle<CodeT> code) {
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the new target (for [[Construct]] calls)
@@ -2195,8 +2232,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
StackArgumentsAccessor args(rax);
__ AssertCallableFunction(rdi);
- __ LoadTaggedPointerField(
- rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(rdx,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the shared function info.
@@ -2206,7 +2243,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
@@ -2228,9 +2265,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label convert_to_object, convert_receiver;
__ movq(rcx, args.GetReceiverOperand());
__ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
- static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
- __ j(above_equal, &done_convert);
+ __ JumpIfJSAnyIsNotPrimitive(rcx, rbx, &done_convert, Label::kNear);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy,
@@ -2263,7 +2298,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(rax);
__ SmiUntagUnsigned(rax);
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
@@ -2294,8 +2329,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
- __ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ LoadTaggedField(rcx,
+ FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
@@ -2336,7 +2371,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push [[BoundArguments]] to the stack.
{
Label loop;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagFieldUnsigned(rbx,
FieldOperand(rcx, FixedArray::kLengthOffset));
@@ -2346,9 +2381,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// offset in order to be able to move decl(rbx) right before the loop
// condition. This is necessary in order to avoid flags corruption by
// pointer decompression code.
- __ LoadAnyTaggedField(
- r12, FieldOperand(rcx, rbx, times_tagged_size,
- FixedArray::kHeaderSize - kTaggedSize));
+ __ LoadTaggedField(r12,
+ FieldOperand(rcx, rbx, times_tagged_size,
+ FixedArray::kHeaderSize - kTaggedSize));
__ Push(r12);
__ decl(rbx);
__ j(greater, &loop);
@@ -2373,15 +2408,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rax);
- __ LoadAnyTaggedField(rbx,
- FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
+ __ LoadTaggedField(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
__ movq(args.GetReceiverOperand(), rbx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@@ -2480,9 +2514,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
const TaggedRegister shared_function_info(rcx);
- __ LoadTaggedPointerField(
- shared_function_info,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedField(shared_function_info,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
@@ -2510,13 +2543,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ cmpq(rdi, rdx);
__ j(not_equal, &done, Label::kNear);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2585,10 +2618,9 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
namespace {
void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
- // Overwrite the return address on the stack.
- __ movq(StackOperandForReturnAddress(0), entry_address);
-
- // And "return" to the OSR entry point of the function.
+ // Overwrite the return address on the stack and "return" to the OSR entry
+ // point of the function.
+ __ movq(Operand(rsp, 0), entry_address);
__ ret(0);
}
@@ -2603,8 +2635,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ testq(maybe_target_code, maybe_target_code);
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
}
@@ -2655,13 +2687,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ leave();
}
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(rax, rax);
- }
-
// Load deoptimization data from the code object.
const TaggedRegister deopt_data(rbx);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
deopt_data,
FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
@@ -2671,8 +2699,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
FieldOperand(deopt_data, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
- // Compute the target address = code_obj + header_size + osr_offset
- __ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
+ __ LoadCodeEntry(rax, rax);
+
+ // Compute the target address = code_entry + osr_offset
+ __ addq(rax, rbx);
Generate_OSREntry(masm, rax);
}
@@ -2703,211 +2733,6 @@ void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) {
D::MaybeTargetCodeRegister());
}
-// Called immediately at the start of Maglev-generated functions, with all
-// state (register and stack) unchanged, except:
-//
-// - the stack slot byte size and
-// - the tagged stack slot byte size
-//
-// are pushed as untagged arguments to the stack. This prologue builtin takes
-// care of a few things that each Maglev function needs on entry:
-//
-// - the deoptimization check
-// - tiering support (checking FeedbackVector flags)
-// - the stack overflow / interrupt check
-// - and finally, setting up the Maglev frame.
-//
-// If this builtin returns, the Maglev frame is fully set up and we are
-// prepared for continued execution. Otherwise, we take one of multiple
-// possible non-standard exit paths (deoptimization, tailcalling other code, or
-// throwing a stack overflow exception).
-void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
- using D =
- i::CallInterfaceDescriptorFor<Builtin::kMaglevOutOfLinePrologue>::type;
- static_assert(D::kParameterCount == 0);
-
- // This builtin is called by Maglev code prior to any register mutations, and
- // the only stack mutation is pushing the arguments for this builtin. In
- // other words:
- //
- // - The register state is the same as when we entered the Maglev code object,
- // i.e. set up for a standard JS call.
- // - The caller has not yet set up a stack frame.
- // - The caller has pushed the (untagged) stack parameters for this builtin.
-
- static constexpr int kStackParameterCount = 2;
- static constexpr int kReturnAddressCount = 1;
- static constexpr int kReturnAddressOffset = 0 * kSystemPointerSize;
- static constexpr int kTaggedStackSlotBytesOffset = 1 * kSystemPointerSize;
- static constexpr int kTotalStackSlotBytesOffset = 2 * kSystemPointerSize;
- USE(kReturnAddressOffset);
- USE(kTaggedStackSlotBytesOffset);
- USE(kTotalStackSlotBytesOffset);
-
- // Scratch registers. Don't clobber regs related to the calling
- // convention (e.g. kJavaScriptCallArgCountRegister).
- const Register scratch0 = rcx;
- const Register scratch1 = r9;
- const Register scratch2 = rbx;
-
- Label deoptimize, optimize, call_stack_guard, call_stack_guard_return;
-
- // A modified version of BailoutIfDeoptimized that drops the builtin frame
- // before deoptimizing.
- {
- static constexpr int kCodeStartToCodeDataContainerOffset =
- Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(scratch0,
- Operand(kJavaScriptCallCodeStartRegister,
- kCodeStartToCodeDataContainerOffset));
- __ testl(
- FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &deoptimize);
- }
-
- // Tiering support.
- const Register flags = scratch0;
- const Register feedback_vector = scratch1;
- {
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- __ AssertFeedbackVector(feedback_vector);
-
- __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
- flags, feedback_vector, CodeKind::MAGLEV, &optimize);
- }
-
- // Good to go - set up the MAGLEV stack frame and return.
-
- // First, tear down to the caller frame.
- const Register tagged_stack_slot_bytes = scratch1;
- const Register total_stack_slot_bytes = scratch0;
- const Register return_address = scratch2;
- __ PopReturnAddressTo(return_address);
- __ Pop(tagged_stack_slot_bytes);
- __ Pop(total_stack_slot_bytes);
-
- __ EnterFrame(StackFrame::MAGLEV);
-
- // Save arguments in frame.
- // TODO(leszeks): Consider eliding this frame if we don't make any calls
- // that could clobber these registers.
- __ Push(kContextRegister);
- __ Push(kJSFunctionRegister); // Callee's JS function.
- __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
-
- {
- ASM_CODE_COMMENT_STRING(masm, " Stack/interrupt check");
- // Stack check. This folds the checks for both the interrupt stack limit
- // check and the real stack limit into one by just checking for the
- // interrupt limit. The interrupt limit is either equal to the real stack
- // limit or tighter. By ensuring we have space until that limit after
- // building the frame we can quickly precheck both at once.
- // TODO(leszeks): Include a max call argument size here.
- __ Move(kScratchRegister, rsp);
- __ subq(kScratchRegister, total_stack_slot_bytes);
- __ cmpq(kScratchRegister,
- __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
- __ j(below, &call_stack_guard);
- __ bind(&call_stack_guard_return);
- }
-
- // Initialize stack slots:
- //
- // - tagged slots are initialized with smi zero.
- // - untagged slots are simply reserved without initialization.
- //
- // Tagged slots first.
- const Register untagged_stack_slot_bytes = total_stack_slot_bytes;
- {
- Label next, loop_condition, loop_header;
-
- DCHECK_EQ(total_stack_slot_bytes, untagged_stack_slot_bytes);
- __ subq(total_stack_slot_bytes, tagged_stack_slot_bytes);
-
- const Register smi_zero = rax;
- DCHECK(!AreAliased(smi_zero, scratch0, scratch1, scratch2));
- __ Move(smi_zero, Smi::zero());
-
- __ jmp(&loop_condition, Label::kNear);
-
- // TODO(leszeks): Consider filling with xmm + movdqa instead.
- // TODO(v8:7700): Consider doing more than one push per loop iteration.
- __ bind(&loop_header);
- __ pushq(rax);
- __ bind(&loop_condition);
- __ subq(tagged_stack_slot_bytes, Immediate(kSystemPointerSize));
- __ j(greater_equal, &loop_header, Label::kNear);
-
- __ bind(&next);
- }
-
- // Untagged slots second.
- __ subq(rsp, untagged_stack_slot_bytes);
-
- // The "all-good" return location. This is the only spot where we actually
- // return to the caller.
- __ PushReturnAddressFrom(return_address);
- __ ret(0);
-
- __ bind(&deoptimize);
- {
- // Drop the frame and jump to CompileLazyDeoptimizedCode. This is slightly
- // fiddly due to the CET shadow stack (otherwise we could do a conditional
- // Jump to the builtin).
- __ Drop(kStackParameterCount + kReturnAddressCount);
- __ Move(scratch0,
- BUILTIN_CODE(masm->isolate(), CompileLazyDeoptimizedCode));
- __ LoadCodeObjectEntry(scratch0, scratch0);
- __ PushReturnAddressFrom(scratch0);
- __ ret(0);
- }
-
- __ bind(&optimize);
- {
- __ Drop(kStackParameterCount + kReturnAddressCount);
- __ AssertFunction(kJSFunctionRegister);
- __ OptimizeCodeOrTailCallOptimizedCodeSlot(
- flags, feedback_vector, kJSFunctionRegister, JumpMode::kPushAndReturn);
- __ Trap();
- }
-
- __ bind(&call_stack_guard);
- {
- ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
-
- // Push the MAGLEV code return address now, as if it had been pushed by the
- // call to this builtin.
- __ PushReturnAddressFrom(return_address);
-
- {
- FrameScope inner_frame_scope(masm, StackFrame::INTERNAL);
- __ SmiTag(total_stack_slot_bytes);
- __ Push(total_stack_slot_bytes);
- __ SmiTag(tagged_stack_slot_bytes);
- __ Push(tagged_stack_slot_bytes);
- // Save any registers that can be referenced by maglev::RegisterInput.
- // TODO(leszeks): Only push those that are used by the graph.
- __ Push(kJavaScriptCallNewTargetRegister);
- // Push the frame size.
- __ Push(total_stack_slot_bytes);
- __ CallRuntime(Runtime::kStackGuardWithGap, 1);
- __ Pop(kJavaScriptCallNewTargetRegister);
- __ Pop(tagged_stack_slot_bytes);
- __ SmiUntag(tagged_stack_slot_bytes);
- __ Pop(total_stack_slot_bytes);
- __ SmiUntag(total_stack_slot_bytes);
- }
-
- __ PopReturnAddressTo(return_address);
- __ jmp(&call_stack_guard_return);
- }
-}
-
#if V8_ENABLE_WEBASSEMBLY
// Returns the offset beyond the last saved FP register.
@@ -2964,12 +2789,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ Push(rbp);
__ Move(rbp, rsp);
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
- __ LoadTaggedPointerField(
- vector, FieldOperand(kWasmInstanceRegister,
- WasmInstanceObject::kFeedbackVectorsOffset));
- __ LoadTaggedPointerField(vector,
- FieldOperand(vector, func_index, times_tagged_size,
- FixedArray::kHeaderSize));
+ __ LoadTaggedField(vector,
+ FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kFeedbackVectorsOffset));
+ __ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size,
+ FixedArray::kHeaderSize));
Label allocate_vector, done;
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
@@ -3119,7 +2943,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
__ pushq(function_data);
// We had to prepare the parameters for the Call: we have to put the context
// into rsi.
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
rsi,
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
@@ -3200,7 +3024,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
__ Move(GCScanSlotPlace, 2);
__ Push(wasm_instance);
__ Push(function_data);
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
kContextRegister,
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kNativeContextOffset)));
@@ -3240,7 +3064,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
wasm::JumpBuffer::Retired);
Register parent = tmp2;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
parent,
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
@@ -3271,7 +3095,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
__ StoreTaggedSignedField(
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
Smi::FromInt(WasmSuspenderObject::kInactive));
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
Label undefined;
@@ -3299,19 +3123,19 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
Register wasm_instance) {
Register closure = function_data;
Register shared_function_info = closure;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
shared_function_info,
MemOperand(
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
closure = no_reg;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_data,
MemOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
shared_function_info = no_reg;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
wasm_instance,
MemOperand(function_data,
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
@@ -3412,7 +3236,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register suspender = rax; // Fixed.
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
Register target_continuation = rax;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
target_continuation,
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
suspender = no_reg;
@@ -3591,7 +3415,6 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register current_param = rbx;
Register param_limit = rdx;
- constexpr int kReceiverOnStackSize = kSystemPointerSize;
__ Move(current_param,
kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize);
__ movq(param_limit, param_count);
@@ -3916,7 +3739,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
Register function_entry = function_data;
Register scratch = r12;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_entry,
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
__ LoadExternalPointerField(
@@ -4000,8 +3823,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass the
// number of slots to remove in a Register as an argument.
- __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(param_count, rbx, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountExcludesReceiver);
__ ret(0);
// --------------------------------------------------------------------------
@@ -4269,7 +4092,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// live: [rax, rbx, rcx]
Register suspender_continuation = rdx;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
suspender_continuation,
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
#ifdef DEBUG
@@ -4290,12 +4113,12 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
// Update roots.
// -------------------------------------------
Register caller = rcx;
- __ LoadAnyTaggedField(caller,
- FieldOperand(suspender_continuation,
- WasmContinuationObject::kParentOffset));
+ __ LoadTaggedField(caller,
+ FieldOperand(suspender_continuation,
+ WasmContinuationObject::kParentOffset));
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
Register parent = rdx;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
parent = no_reg;
@@ -4360,19 +4183,19 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
// Load suspender from closure.
// -------------------------------------------
Register sfi = closure;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
sfi,
MemOperand(
closure,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
Register function_data = sfi;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
function_data,
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
// The write barrier uses a fixed register for the host object (rdi). The next
// barrier is on the suspender, so load it in rdi directly.
Register suspender = rdi;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
// Check the suspender state.
Label suspender_is_suspended;
@@ -4421,7 +4244,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
Register target_continuation = suspender;
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
target_continuation,
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
suspender = no_reg;
@@ -4490,8 +4313,11 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
+ CHECK(result_size == 1 || result_size == 2);
+
+ using ER = ExternalReference;
+
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
// rbp: frame pointer of calling JS frame (restored after C call)
@@ -4502,49 +4328,39 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r15: pointer to the first argument
#ifdef V8_TARGET_OS_WIN
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
- // stack to be aligned to 16 bytes. It only allows a single-word to be
- // returned in register rax. Larger return sizes must be written to an address
- // passed as a hidden first argument.
- const Register kCCallArg0 = rcx;
- const Register kCCallArg1 = rdx;
- const Register kCCallArg2 = r8;
- const Register kCCallArg3 = r9;
- const int kArgExtraStackSpace = 2;
- const int kMaxRegisterResultSize = 1;
+ // Windows 64-bit ABI only allows a single-word to be returned in register
+ // rax. Larger return sizes must be written to an address passed as a hidden
+ // first argument.
+ static constexpr int kMaxRegisterResultSize = 1;
+ const int kReservedStackSlots =
+ result_size <= kMaxRegisterResultSize ? 0 : result_size;
#else
- // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
- // are returned in rax, and a struct of two pointers are returned in rax+rdx.
- // Larger return sizes must be written to an address passed as a hidden first
- // argument.
- const Register kCCallArg0 = rdi;
- const Register kCCallArg1 = rsi;
- const Register kCCallArg2 = rdx;
- const Register kCCallArg3 = rcx;
- const int kArgExtraStackSpace = 0;
- const int kMaxRegisterResultSize = 2;
+ // Simple results are returned in rax, and a struct of two pointers are
+ // returned in rax+rdx.
+ static constexpr int kMaxRegisterResultSize = 2;
+ static constexpr int kReservedStackSlots = 0;
+ CHECK_LE(result_size, kMaxRegisterResultSize);
#endif // V8_TARGET_OS_WIN
- // Enter the exit frame that transitions from JavaScript to C++.
- int arg_stack_space =
- kArgExtraStackSpace +
- (result_size <= kMaxRegisterResultSize ? 0 : result_size);
- if (argv_mode == ArgvMode::kRegister) {
- DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
- DCHECK(!builtin_exit_frame);
- __ EnterApiExitFrame(arg_stack_space);
- // Move argc into r12 (argv is already in r15).
- __ movq(r12, rax);
- } else {
- __ EnterExitFrame(
- arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ __ EnterExitFrame(kReservedStackSlots, builtin_exit_frame
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
+
+ // Set up argv in a callee-saved register. It is reused below so it must be
+ // retained across the C call. In case of ArgvMode::kRegister, r15 has
+ // already been set by the caller.
+ static constexpr Register kArgvRegister = r15;
+ if (argv_mode == ArgvMode::kStack) {
+ int offset =
+ StandardFrameConstants::kFixedFrameSizeAboveFp - kReceiverOnStackSize;
+ __ leaq(kArgvRegister,
+ Operand(rbp, rax, times_system_pointer_size, offset));
}
// rbx: pointer to builtin function (C callee-saved).
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
- // r12: number of arguments including receiver (C callee-saved).
+ // rax: number of arguments including receiver
// r15: argv pointer (C callee-saved).
// Check stack alignment.
@@ -4557,27 +4373,33 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (result_size <= kMaxRegisterResultSize) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax), or a register pair (rax, rdx).
- __ movq(kCCallArg0, r12); // argc.
- __ movq(kCCallArg1, r15); // argv.
- __ Move(kCCallArg2, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_1, rax); // argc.
+ __ movq(arg_reg_2, kArgvRegister); // argv.
+ __ Move(arg_reg_3, ER::isolate_address(masm->isolate()));
} else {
+#ifdef V8_TARGET_OS_WIN
DCHECK_LE(result_size, 2);
// Pass a pointer to the result location as the first argument.
- __ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
+ __ leaq(arg_reg_1, ExitFrameStackSlotOperand(0));
// Pass a pointer to the Arguments object as the second argument.
- __ movq(kCCallArg1, r12); // argc.
- __ movq(kCCallArg2, r15); // argv.
- __ Move(kCCallArg3, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_2, rax); // argc.
+ __ movq(arg_reg_3, kArgvRegister); // argv.
+ __ Move(arg_reg_4, ER::isolate_address(masm->isolate()));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_OS_WIN
}
__ call(rbx);
+#ifdef V8_TARGET_OS_WIN
if (result_size > kMaxRegisterResultSize) {
- // Read result values stored on stack. Result is stored
- // above the the two Arguments object slots on Win64.
- DCHECK_LE(result_size, 2);
- __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
- __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
+ // Read result values stored on stack.
+ DCHECK_EQ(result_size, 2);
+ __ movq(kReturnRegister0, ExitFrameStackSlotOperand(0));
+ __ movq(kReturnRegister1, ExitFrameStackSlotOperand(1));
}
+#endif // V8_TARGET_OS_WIN
+
// Result is in rax or rdx:rax - do not destroy these registers!
// Check result for exception sentinel.
@@ -4590,53 +4412,52 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (v8_flags.debug_code) {
Label okay;
__ LoadRoot(kScratchRegister, RootIndex::kTheHoleValue);
- ExternalReference pending_exception_address = ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, masm->isolate());
- Operand pending_exception_operand =
- masm->ExternalReferenceAsOperand(pending_exception_address);
- __ cmp_tagged(kScratchRegister, pending_exception_operand);
+ ER pending_exception_address =
+ ER::Create(IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ cmp_tagged(kScratchRegister,
+ masm->ExternalReferenceAsOperand(pending_exception_address));
__ j(equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
}
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
- argv_mode == ArgvMode::kStack);
+ __ LeaveExitFrame();
+ if (argv_mode == ArgvMode::kStack) {
+ // Drop arguments and the receiver from the caller stack.
+ __ PopReturnAddressTo(rcx);
+ __ leaq(rsp, Operand(kArgvRegister, kReceiverOnStackSize));
+ __ PushReturnAddressFrom(rcx);
+ }
__ ret(0);
// Handling of exception.
__ bind(&exception_returned);
- ExternalReference pending_handler_context_address = ExternalReference::Create(
+ ER pending_handler_context_address = ER::Create(
IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
- ExternalReference pending_handler_entrypoint_address =
- ExternalReference::Create(
- IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
- ExternalReference pending_handler_fp_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
- ExternalReference pending_handler_sp_address = ExternalReference::Create(
- IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+ ER pending_handler_entrypoint_address = ER::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ER pending_handler_fp_address =
+ ER::Create(IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ER pending_handler_sp_address =
+ ER::Create(IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
// Ask the runtime for help to determine the handler. This will set rax to
// contain the current pending exception, don't clobber it.
- ExternalReference find_handler =
- ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ ER find_handler = ER::Create(Runtime::kUnwindAndFindExceptionHandler);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Move(arg_reg_1, 0); // argc.
__ Move(arg_reg_2, 0); // argv.
- __ Move(arg_reg_3, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(arg_reg_3, ER::isolate_address(masm->isolate()));
__ PrepareCallCFunction(3);
__ CallCFunction(find_handler, 3);
}
#ifdef V8_ENABLE_CET_SHADOW_STACK
// Drop frames from the shadow stack.
- ExternalReference num_frames_above_pending_handler_address =
- ExternalReference::Create(
- IsolateAddressId::kNumFramesAbovePendingHandlerAddress,
- masm->isolate());
+ ER num_frames_above_pending_handler_address = ER::Create(
+ IsolateAddressId::kNumFramesAbovePendingHandlerAddress, masm->isolate());
__ movq(rcx, masm->ExternalReferenceAsOperand(
num_frames_above_pending_handler_address));
__ IncsspqIfSupported(rcx, kScratchRegister);
@@ -4657,8 +4478,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ bind(&skip);
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
- ExternalReference c_entry_fp_address = ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ ER c_entry_fp_address =
+ ER::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate());
Operand c_entry_fp_operand =
masm->ExternalReferenceAsOperand(c_entry_fp_address);
__ movq(c_entry_fp_operand, Immediate(0));
@@ -4810,7 +4631,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK_EQ(stack_space, 0);
__ movq(rbx, *stack_space_operand);
}
- __ LeaveApiExitFrame();
+ __ LeaveExitFrame();
// Check if the function scheduled an exception.
__ Move(rdi, scheduled_exception_address);
@@ -4923,12 +4744,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, holder, call_data,
kScratchRegister));
+ using FCI = FunctionCallbackInfo<v8::Value>;
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -4955,60 +4777,65 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ Push(kScratchRegister);
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
__ Push(holder);
- __ PushReturnAddressFrom(rax);
-
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
Register scratch = rbx;
- __ leaq(scratch, Operand(rsp, 1 * kSystemPointerSize));
+ __ movq(scratch, rsp);
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
+ __ PushReturnAddressFrom(rax);
+
+ // Allocate the v8::Arguments structure in the arguments' space since it's
+ // not controlled by GC.
static constexpr int kApiStackSpace = 4;
- __ EnterApiExitFrame(kApiStackSpace);
+ static_assert(kApiStackSpace == sizeof(FCI) / kSystemPointerSize + 1);
+
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
- __ movq(StackSpaceOperand(0), scratch);
+ constexpr int kImplicitArgsOffset = 0;
+ static_assert(kImplicitArgsOffset ==
+ offsetof(FCI, implicit_args_) / kSystemPointerSize);
+ __ movq(ExitFrameStackSlotOperand(kImplicitArgsOffset), scratch);
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
+ constexpr int kValuesOffset = 1;
+ static_assert(kValuesOffset == offsetof(FCI, values_) / kSystemPointerSize);
__ leaq(scratch,
Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
- __ movq(StackSpaceOperand(1), scratch);
+ __ movq(ExitFrameStackSlotOperand(kValuesOffset), scratch);
// FunctionCallbackInfo::length_.
- __ movq(StackSpaceOperand(2), argc);
+ constexpr int kLengthOffset = 2;
+ static_assert(kLengthOffset == offsetof(FCI, length_) / kSystemPointerSize);
+ __ movq(ExitFrameStackSlotOperand(2), argc);
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
- __ leaq(kScratchRegister,
- Operand(argc, times_system_pointer_size,
- (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
- __ movq(StackSpaceOperand(3), kScratchRegister);
+ constexpr int kBytesToDropOffset = kLengthOffset + 1;
+ static_assert(kBytesToDropOffset == kApiStackSpace - 1);
+ __ leaq(kScratchRegister, Operand(argc, times_system_pointer_size,
+ FCA::kArgsLength * kSystemPointerSize +
+ kReceiverOnStackSize));
+ __ movq(ExitFrameStackSlotOperand(kBytesToDropOffset), kScratchRegister);
Register arguments_arg = arg_reg_1;
Register callback_arg = arg_reg_2;
- // It's okay if api_function_address == callback_arg
- // but not arguments_arg
+ // It's okay if api_function_address == callback_arg, but not arguments_arg.
DCHECK(api_function_address != arguments_arg);
// v8::InvocationCallback's argument.
- __ leaq(arguments_arg, StackSpaceOperand(0));
+ __ leaq(arguments_arg, ExitFrameStackSlotOperand(kImplicitArgsOffset));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
- // There are two stack slots above the arguments we constructed on the stack:
- // the stored ebp (pushed by EnterApiExitFrame), and the return address.
- static constexpr int kStackSlotsAboveFCA = 2;
- Operand return_value_operand(
- rbp,
- (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
-
- static constexpr int kUseStackSpaceOperand = 0;
- Operand stack_space_operand = StackSpaceOperand(3);
+ Operand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
+ static constexpr int kUseExitFrameStackSlotOperand = 0;
+ Operand stack_space_operand = ExitFrameStackSlotOperand(3);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
- kUseStackSpaceOperand, &stack_space_operand,
+ kUseExitFrameStackSlotOperand, &stack_space_operand,
return_value_operand);
}
@@ -5027,44 +4854,49 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
// Insert additional parameters into the stack frame above return address.
__ PopReturnAddressTo(scratch);
__ Push(receiver);
- __ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
- decompr_scratch1);
+ __ PushTaggedField(FieldOperand(callback, AccessorInfo::kDataOffset),
+ decompr_scratch1);
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
__ Push(holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
- decompr_scratch1);
+ __ PushTaggedField(FieldOperand(callback, AccessorInfo::kNameOffset),
+ decompr_scratch1);
__ PushReturnAddressFrom(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
+ static constexpr int kNameHandleStackSize = 1;
+ static constexpr int kStackUnwindSpace =
+ PCA::kArgsLength + kNameHandleStackSize;
// Load address of v8::PropertyAccessorInfo::args_ array.
__ leaq(scratch, Operand(rsp, 2 * kSystemPointerSize));
- __ EnterApiExitFrame(kArgStackSpace);
+ // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
+ static constexpr int kArgStackSpace = 1;
+ static_assert(kArgStackSpace ==
+ sizeof(PropertyCallbackInfo<v8::Value>) / kSystemPointerSize);
+
+ __ EnterExitFrame(kArgStackSpace, StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- Operand info_object = StackSpaceOperand(0);
+ // its args_ field.
+ Operand info_object = ExitFrameStackSlotOperand(0);
__ movq(info_object, scratch);
__ leaq(name_arg, Operand(scratch, -kSystemPointerSize));
@@ -5072,11 +4904,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// could be used to pass arguments.
__ leaq(accessor_info_arg, info_object);
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- // It's okay if api_function_address == getter_arg
- // but not accessor_info_arg or name_arg
+ // It's okay if api_function_address == getter_arg, but not accessor_info_arg
+ // or name_arg.
DCHECK(api_function_address != accessor_info_arg);
DCHECK(api_function_address != name_arg);
__ LoadExternalPointerField(
@@ -5084,11 +4913,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
FieldOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
kAccessorInfoGetterTag, kScratchRegister);
- // +3 is to skip prolog, return address and name handle.
- Operand return_value_operand(
- rbp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+ Operand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
Operand* const kUseStackSpaceConstant = nullptr;
+
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
kStackUnwindSpace, kUseStackSpaceConstant,
return_value_operand);
@@ -5317,21 +5147,21 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = rdi;
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = rbx;
TaggedRegister shared_function_info(code_obj);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(shared_function_info,
- SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedField(code_obj,
+ FieldOperand(shared_function_info,
+ SharedFunctionInfo::kFunctionDataOffset));
// Check if we have baseline code. For OSR entry it is safe to assume we
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
+ __ IsObjectType(code_obj, CODE_TYPE, kScratchRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -5344,30 +5174,27 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
+ __ IsObjectType(code_obj, CODE_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
- AssertCodeTIsBaseline(masm, code_obj, r11);
- }
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
+ AssertCodeIsBaseline(masm, code_obj, r11);
}
// Load the feedback vector.
Register feedback_vector = r11;
TaggedRegister feedback_cell(feedback_vector);
- __ LoadTaggedPointerField(
- feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(feedback_vector,
- FieldOperand(feedback_cell, Cell::kValueOffset));
+ __ LoadTaggedField(feedback_cell,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedField(feedback_vector,
+ FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
+ __ IsObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ j(not_equal, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
@@ -5420,8 +5247,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3);
}
- __ leaq(code_obj,
- FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
+ __ LoadCodeEntry(code_obj, code_obj);
+ __ addq(code_obj, kReturnRegister0);
__ popq(kInterpreterAccumulatorRegister);
if (is_osr) {
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 97a7b2b563..d476577f06 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -8,5 +8,6 @@ leszeks@chromium.org
mslekova@chromium.org
nicohartmann@chromium.org
tebbi@chromium.org
+victorgomes@chromium.org
per-file compiler.*=marja@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index b1cd1d5205..0a0880db06 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -66,7 +66,8 @@ void RelocInfo::apply(intptr_t delta) {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_) ||
+ IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -111,8 +112,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 3fe769a0ec..179c309acb 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -553,13 +553,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
// Emit constant pool if necessary.
CheckConstPool(true, false);
@@ -831,7 +831,8 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_1 << 8
// orr dst, dst, #target8_2 << 16
- uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ uint32_t target24 =
+ target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
CHECK(is_uint24(target24));
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
@@ -1635,7 +1636,8 @@ void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
void Assembler::mov_label_offset(Register dst, Label* label) {
if (label->is_bound()) {
- mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
+ mov(dst, Operand(label->pos() +
+ (InstructionStream::kHeaderSize - kHeapObjectTag)));
} else {
// Emit the link to the label in the code stream followed by extra nop
// instructions.
@@ -5252,7 +5254,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 5366ff1abf..9444abea18 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -388,7 +388,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// ---------------------------------------------------------------------------
- // Code generation
+ // InstructionStream generation
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -1252,7 +1252,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void emit(Instr x);
- // Code generation
+ // InstructionStream generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private:
friend class Assembler;
- friend class TurboAssembler;
+ friend class MacroAssembler;
template <typename T>
bool CanAcquireVfp() const;
diff --git a/deps/v8/src/codegen/arm/constants-arm.h b/deps/v8/src/codegen/arm/constants-arm.h
index 6aedde8629..5b8636d3b9 100644
--- a/deps/v8/src/codegen/arm/constants-arm.h
+++ b/deps/v8/src/codegen/arm/constants-arm.h
@@ -54,10 +54,11 @@ constexpr int kNoRegister = -1;
constexpr int kLdrMaxReachBits = 12;
constexpr int kVldrMaxReachBits = 10;
-// Actual value of root register is offset from the root array's start
-// to take advantage of negative displacement values. Loads allow a uint12
-// value with a separate sign bit (range [-4095, +4095]), so the first root
-// is still addressable with a single load instruction.
+// The actual value of the kRootRegister is offset from the IsolateData's start
+// to take advantage of negative displacement values.
+//
+// Loads allow a uint12 value with a separate sign bit (range [-4095, +4095]),
+// so the first root is still addressable with a single load instruction.
constexpr int kRootRegisterBias = 4095;
// TODO(pkasting): For all the enum type aliases below, if overload resolution
@@ -104,6 +105,22 @@ constexpr Condition kNumberOfConditions = 16;
constexpr Condition hs = cs; // C set Unsigned higher or same.
constexpr Condition lo = cc; // C clear Unsigned lower.
+// Unified cross-platform condition names/aliases.
+constexpr Condition kEqual = eq;
+constexpr Condition kNotEqual = ne;
+constexpr Condition kLessThan = lt;
+constexpr Condition kGreaterThan = gt;
+constexpr Condition kLessThanEqual = le;
+constexpr Condition kGreaterThanEqual = ge;
+constexpr Condition kUnsignedLessThan = lo;
+constexpr Condition kUnsignedGreaterThan = hi;
+constexpr Condition kUnsignedLessThanEqual = ls;
+constexpr Condition kUnsignedGreaterThanEqual = hs;
+constexpr Condition kOverflow = vs;
+constexpr Condition kNoOverflow = vc;
+constexpr Condition kZero = eq;
+constexpr Condition kNotZero = ne;
+
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
@@ -695,6 +712,9 @@ class VFPRegisters {
static const char* names_[kNumVFPRegisters];
};
+// The maximum size of the code range s.t. pc-relative calls are possible
+// between all Code objects in the range.
+//
// Relative jumps on ARM can address ±32 MB.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 32;
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
index 23d4b31bc0..f1affc1bee 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -100,6 +100,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return r4; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return r5; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return r5; }
// static
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 488d87a260..9be1d37e03 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -42,7 +42,7 @@
namespace v8 {
namespace internal {
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -59,7 +59,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -77,7 +77,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -95,7 +95,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@@ -106,11 +106,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
ldr(destination, MemOperand(destination, offset));
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
ldr(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
@@ -119,7 +119,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -148,20 +148,20 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
+void MacroAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
mov(pc, Operand(target, rmode), LeaveCC, cond);
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -177,20 +177,20 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Move(scratch, reference);
Jump(scratch);
}
-void TurboAssembler::Call(Register target, Condition cond) {
+void MacroAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
blx(target, cond);
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
TargetAddressStorageMode mode,
bool check_constant_pool) {
// Check if we have to emit the constant pool before we block it.
@@ -225,7 +225,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
}
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -239,11 +239,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
// 'code' is always generated ARM code, never THUMB code
- DCHECK(code->IsExecutable());
Call(code.address(), rmode, cond, mode);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
static_assert(kSystemPointerSize == 4);
static_assert(kSmiShiftSize == 0);
@@ -259,25 +258,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
ASM_CODE_COMMENT(this);
ldr(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
@@ -295,7 +294,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
bl(code_target_index * kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
@@ -308,7 +307,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
@@ -327,7 +326,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
b(code_target_index * kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
@@ -340,79 +339,32 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
- ASM_CODE_COMMENT(this);
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call
- // its (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- b(ne, &if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- add(destination, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- jmp(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin
- // entry table.
- bind(&if_code_is_off_heap);
- ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
- }
- add(destination, destination, kRootRegister);
- ldr(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
+ ASM_CODE_COMMENT(this);
+ ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
+void MacroAssembler::CallCodeObject(Register code_object) {
ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
+ LoadCodeEntry(code_object, code_object);
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
+ LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
// Compute the return address in lr to return to after the jump below. The pc
// is already at '+ 8' from the current instruction; but return is after three
@@ -423,23 +375,21 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Call(target);
}
-void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
+void MacroAssembler::Ret(Condition cond) { bx(lr, cond); }
-void TurboAssembler::Drop(int count, Condition cond) {
+void MacroAssembler::Drop(int count, Condition cond) {
if (count > 0) {
add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
}
}
-void TurboAssembler::Drop(Register count, Condition cond) {
+void MacroAssembler::Drop(Register count, Condition cond) {
add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
- Register scratch) {
- ldr(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- ldr(scratch,
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
+ Register scratch) {
+ ldr(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
}
@@ -448,23 +398,23 @@ Operand MacroAssembler::ClearedValue() const {
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
-void TurboAssembler::Call(Label* target) { bl(target); }
+void MacroAssembler::Call(Label* target) { bl(target); }
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(handle));
push(scratch);
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(smi));
push(scratch);
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -494,9 +444,9 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
+void MacroAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
+void MacroAssembler::Move(Register dst, Handle<HeapObject> value) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -507,7 +457,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
mov(dst, Operand(value));
}
-void TurboAssembler::Move(Register dst, ExternalReference reference) {
+void MacroAssembler::Move(Register dst, ExternalReference reference) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -518,33 +468,33 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) {
mov(dst, Operand(reference));
}
-void TurboAssembler::Move(Register dst, Register src, Condition cond) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
if (dst != src) {
mov(dst, src, LeaveCC, cond);
}
}
-void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
Condition cond) {
if (dst != src) {
vmov(dst, src, cond);
}
}
-void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
Condition cond) {
if (dst != src) {
vmov(dst, src, cond);
}
}
-void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
+void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
if (dst != src) {
vmov(dst, src);
}
}
-void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
+void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1,
Register src1) {
DCHECK_NE(dst0, dst1);
if (dst0 != src1) {
@@ -560,7 +510,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
}
}
-void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
+void MacroAssembler::Swap(Register srcdst0, Register srcdst1) {
DCHECK(srcdst0 != srcdst1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -569,7 +519,7 @@ void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
mov(srcdst1, scratch);
}
-void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
@@ -585,7 +535,7 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
}
}
-void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
+void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
DCHECK(srcdst0 != srcdst1);
vswp(srcdst0, srcdst1);
}
@@ -658,7 +608,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
}
}
-void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
+void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
@@ -671,7 +621,7 @@ void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
}
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
ldr(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
@@ -715,19 +665,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
bind(&done);
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
ASM_CODE_COMMENT(this);
stm(db_w, sp, registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
ASM_CODE_COMMENT(this);
ldm(ia_w, sp, registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
@@ -744,7 +694,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Operand offset,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -762,7 +712,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
@@ -781,7 +731,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
}
}
-void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
Register object, Operand offset) {
DCHECK_NE(dst_object, dst_slot);
DCHECK(offset.IsRegister() || offset.IsImmediate());
@@ -844,9 +794,8 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, &done);
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
&done);
@@ -869,7 +818,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
bind(&done);
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
ASM_CODE_COMMENT(this);
if (marker_reg.is_valid()) {
if (marker_reg.code() > fp.code()) {
@@ -886,7 +835,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
ASM_CODE_COMMENT(this);
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
stm(db_w, sp, {function_reg, cp, fp, lr});
@@ -896,7 +845,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(kJavaScriptCallArgCountRegister);
}
-void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
@@ -905,35 +854,35 @@ void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
vsub(dst, src, kDoubleRegZero, cond);
}
-void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
+void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
+void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
const float src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const double src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
+void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond) {
@@ -942,7 +891,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
+void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
const float src2,
const Register fpscr_flags,
const Condition cond) {
@@ -951,7 +900,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond) {
@@ -960,7 +909,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const double src2,
const Register fpscr_flags,
const Condition cond) {
@@ -969,7 +918,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
+void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
if (src.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.high());
@@ -978,7 +927,7 @@ void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
}
}
-void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
+void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
if (dst.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.high(), src);
@@ -987,7 +936,7 @@ void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
}
}
-void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
+void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
if (src.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.low());
@@ -996,7 +945,7 @@ void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
}
}
-void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
+void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
if (dst.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.low(), src);
@@ -1005,7 +954,7 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
}
-void TurboAssembler::VmovExtended(Register dst, int src_code) {
+void MacroAssembler::VmovExtended(Register dst, int src_code) {
DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
if (src_code & 0x1) {
@@ -1015,7 +964,7 @@ void TurboAssembler::VmovExtended(Register dst, int src_code) {
}
}
-void TurboAssembler::VmovExtended(int dst_code, Register src) {
+void MacroAssembler::VmovExtended(int dst_code, Register src) {
DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
if (dst_code & 0x1) {
@@ -1025,7 +974,7 @@ void TurboAssembler::VmovExtended(int dst_code, Register src) {
}
}
-void TurboAssembler::VmovExtended(int dst_code, int src_code) {
+void MacroAssembler::VmovExtended(int dst_code, int src_code) {
if (src_code == dst_code) return;
if (src_code < SwVfpRegister::kNumRegisters &&
@@ -1095,7 +1044,7 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
}
}
-void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
+void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
if (dst_code < SwVfpRegister::kNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
@@ -1109,7 +1058,7 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
}
}
-void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
+void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
if (src_code < SwVfpRegister::kNumRegisters) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
@@ -1122,7 +1071,7 @@ void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
}
}
-void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
+void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
NeonDataType dt, int lane) {
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
@@ -1134,7 +1083,7 @@ void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
vmov(dt, dst, double_source, double_lane);
}
-void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
+void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
NeonDataType dt, int lane) {
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
@@ -1143,19 +1092,19 @@ void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
vmov(dt, dst, src, double_lane);
}
-void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
+void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
int lane) {
int s_code = src.code() * 4 + lane;
VmovExtended(dst.code(), s_code);
}
-void TurboAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src,
+void MacroAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src,
int lane) {
DwVfpRegister double_dst = DwVfpRegister::from_code(src.code() * 2 + lane);
vmov(dst, double_dst);
}
-void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
Register src_lane, NeonDataType dt, int lane) {
Move(dst, src);
int size = NeonSz(dt); // 0, 1, 2
@@ -1168,21 +1117,21 @@ void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
vmov(dt, double_dst, double_lane, src_lane);
}
-void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
SwVfpRegister src_lane, int lane) {
Move(dst, src);
int s_code = dst.code() * 4 + lane;
VmovExtended(s_code, src_lane.code());
}
-void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
DwVfpRegister src_lane, int lane) {
Move(dst, src);
DwVfpRegister double_dst = DwVfpRegister::from_code(dst.code() * 2 + lane);
vmov(double_dst, src_lane);
}
-void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list,
+void MacroAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list,
uint8_t lane, NeonMemOperand src) {
if (sz == Neon64) {
// vld1s is not valid for Neon64.
@@ -1192,7 +1141,7 @@ void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list,
}
}
-void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list,
+void MacroAssembler::StoreLane(NeonSize sz, NeonListOperand src_list,
uint8_t lane, NeonMemOperand dst) {
if (sz == Neon64) {
// vst1s is not valid for Neon64.
@@ -1202,7 +1151,7 @@ void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list,
}
}
-void TurboAssembler::LslPair(Register dst_low, Register dst_high,
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift) {
DCHECK(!AreAliased(dst_high, src_low));
@@ -1227,7 +1176,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::LslPair(Register dst_low, Register dst_high,
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK_GE(63, shift);
@@ -1250,7 +1199,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1276,7 +1225,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK_GE(63, shift);
@@ -1299,7 +1248,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1324,7 +1273,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK_GE(63, shift);
@@ -1347,7 +1296,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -1355,9 +1304,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
PushCommonFrame(scratch);
}
-void TurboAssembler::Prologue() { PushStandardFrame(r1); }
+void MacroAssembler::Prologue() { PushStandardFrame(r1); }
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes = (mode == kCountExcludesReceiver) ? kPointerSize : 0;
switch (type) {
@@ -1380,7 +1329,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
ArgumentsCountType type,
ArgumentsCountMode mode) {
@@ -1395,7 +1344,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
}
}
-void TurboAssembler::EnterFrame(StackFrame::Type type,
+void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
ASM_CODE_COMMENT(this);
// r0-r3: preserved
@@ -1411,7 +1360,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
#endif // V8_ENABLE_WEBASSEMBLY
}
-int TurboAssembler::LeaveFrame(StackFrame::Type type) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
// r0: preserved
// r1: preserved
@@ -1426,7 +1375,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) {
}
#ifdef V8_OS_WIN
-void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
// "Functions that allocate 4 KB or more on the stack must ensure that each
// page prior to the final page is touched in order." Source:
// https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack
@@ -1449,7 +1398,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
sub(sp, sp, bytes_scratch);
}
-void TurboAssembler::AllocateStackSpace(int bytes) {
+void MacroAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
UseScratchRegisterScope temps(this);
@@ -1467,7 +1416,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
}
#endif
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
@@ -1496,15 +1445,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
str(cp, MemOperand(scratch));
- // Optionally save all double registers.
- if (save_doubles) {
- SaveFPRegs(sp, scratch);
- // Note that d0 will be accessible at
- // fp - ExitFrameConstants::kFrameSize -
- // DwVfpRegister::kNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
// Reserve place for the return address and stack space and align the frame
// preparing for calling the runtime function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
@@ -1520,7 +1460,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1536,21 +1476,13 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_ARM
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+void MacroAssembler::LeaveExitFrame(Register argument_count,
bool argument_count_is_length) {
ASM_CODE_COMMENT(this);
ConstantPoolUnavailableScope constant_pool_unavailable(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- // Optionally restore all double registers.
- if (save_doubles) {
- // Calculate the stack location of the saved doubles and restore them.
- const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
- sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
- RestoreFPRegs(r3, scratch);
- }
-
// Clear top frame.
mov(r3, Operand::Zero());
Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
@@ -1580,7 +1512,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1589,7 +1521,7 @@ void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
}
// On ARM this is just a synonym to make the purpose clear.
-void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
+void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
}
@@ -1601,10 +1533,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
ldr(destination, MemOperand(kRootRegister, offset));
}
@@ -1899,7 +1831,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
b(ls, on_in_range);
}
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
ASM_CODE_COMMENT(this);
@@ -1925,7 +1857,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
b(lt, done);
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DwVfpRegister double_input,
StubCallMode stub_mode) {
@@ -1981,8 +1913,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
{
UseScratchRegisterScope temps(masm);
- __ TestCodeTIsMarkedForDeoptimization(optimized_code_entry,
- temps.Acquire());
+ __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire());
__ b(ne, &heal_optimized_code_slot);
}
@@ -1990,7 +1921,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// into the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ LoadCodeObjectEntry(r2, optimized_code_entry);
+ __ LoadCodeEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -2094,8 +2025,8 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(this, optimized_code_entry, r6);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// All parameters are on the stack. r0 has the return value after call.
@@ -2110,8 +2041,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
mov(r0, Operand(num_arguments));
Move(r1, ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -2136,16 +2066,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
DCHECK_EQ(builtin.address() & 1, 1);
#endif
Move(r1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
cmp(in, Operand(kClearedWeakHeapObjectLower32));
@@ -2181,11 +2106,11 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
}
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Assert(Condition cond, AbortReason reason) {
+void MacroAssembler::Assert(Condition cond, AbortReason reason) {
if (v8_flags.debug_code) Check(cond, reason);
}
-void TurboAssembler::AssertUnreachable(AbortReason reason) {
+void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
@@ -2294,7 +2219,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
#endif // V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Check(Condition cond, AbortReason reason) {
+void MacroAssembler::Check(Condition cond, AbortReason reason) {
Label L;
b(cond, &L);
Abort(reason);
@@ -2302,7 +2227,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
ASM_CODE_COMMENT(this);
Label abort_start;
bind(&abort_start);
@@ -2350,7 +2275,7 @@ void TurboAssembler::Abort(AbortReason reason) {
// will not return here
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
+void MacroAssembler::LoadMap(Register destination, Register object) {
ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
@@ -2367,7 +2292,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ldr(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void TurboAssembler::InitializeRootRegister() {
+void MacroAssembler::InitializeRootRegister() {
ASM_CODE_COMMENT(this);
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
mov(kRootRegister, Operand(isolate_root));
@@ -2385,17 +2310,17 @@ void MacroAssembler::SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
-void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
cmp(x, Operand(y));
b(eq, dest);
}
-void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
cmp(x, Operand(y));
b(lt, dest);
}
@@ -2405,14 +2330,14 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
b(ne, not_smi_label);
}
-void TurboAssembler::CheckFor32DRegs(Register scratch) {
+void MacroAssembler::CheckFor32DRegs(Register scratch) {
ASM_CODE_COMMENT(this);
Move(scratch, ExternalReference::cpu_features());
ldr(scratch, MemOperand(scratch));
tst(scratch, Operand(1u << VFP32DREGS));
}
-void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
+void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
@@ -2421,7 +2346,7 @@ void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
vstm(db_w, location, d0, d15);
}
-void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
+void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
@@ -2430,7 +2355,7 @@ void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
-void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
+void MacroAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
@@ -2439,7 +2364,7 @@ void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
-void TurboAssembler::RestoreFPRegsFromHeap(Register location,
+void MacroAssembler::RestoreFPRegsFromHeap(Register location,
Register scratch) {
ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
@@ -2450,7 +2375,7 @@ void TurboAssembler::RestoreFPRegsFromHeap(Register location,
}
template <typename T>
-void TurboAssembler::FloatMaxHelper(T result, T left, T right,
+void MacroAssembler::FloatMaxHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
@@ -2481,7 +2406,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right,
}
template <typename T>
-void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
+void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
DCHECK(left != right);
// ARMv8: At least one of left and right is a NaN.
@@ -2494,7 +2419,7 @@ void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
}
template <typename T>
-void TurboAssembler::FloatMinHelper(T result, T left, T right,
+void MacroAssembler::FloatMinHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
@@ -2540,7 +2465,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right,
}
template <typename T>
-void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
+void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
DCHECK(left != right);
// At least one of left and right is a NaN. Use vadd to propagate the NaN
@@ -2548,42 +2473,42 @@ void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
vadd(result, left, right);
}
-void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
+void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right, Label* out_of_line) {
FloatMaxHelper(result, left, right, out_of_line);
}
-void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
+void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right, Label* out_of_line) {
FloatMinHelper(result, left, right, out_of_line);
}
-void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
+void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right, Label* out_of_line) {
FloatMaxHelper(result, left, right, out_of_line);
}
-void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
+void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right, Label* out_of_line) {
FloatMinHelper(result, left, right, out_of_line);
}
-void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right) {
FloatMaxOutOfLineHelper(result, left, right);
}
-void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right) {
FloatMinOutOfLineHelper(result, left, right);
}
-void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right) {
FloatMaxOutOfLineHelper(result, left, right);
}
-void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right) {
FloatMinOutOfLineHelper(result, left, right);
}
@@ -2592,7 +2517,7 @@ static const int kRegisterPassedArguments = 4;
// The hardfloat calling convention passes double arguments in registers d0-d7.
static const int kDoubleRegisterPassedArguments = 8;
-int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (use_eabi_hardfloat()) {
@@ -2614,7 +2539,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
ASM_CODE_COMMENT(this);
@@ -2636,7 +2561,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
+void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
DCHECK(src == d0);
if (!use_eabi_hardfloat()) {
vmov(r0, r1, src);
@@ -2644,11 +2569,11 @@ void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
}
// On ARM this is just a synonym to make the purpose clear.
-void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
+void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
MovToFloatParameter(src);
}
-void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
+void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(src1 == d0);
DCHECK(src2 == d1);
@@ -2658,32 +2583,38 @@ void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
}
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Move(scratch, function);
- CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
@@ -2708,27 +2639,29 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
#endif
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- Register addr_scratch = r4;
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- str(pc,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()));
- str(fp,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Push(addr_scratch);
-
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_pc_address(isolate()));
- str(pc, MemOperand(addr_scratch));
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- str(fp, MemOperand(addr_scratch));
-
- Pop(addr_scratch);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ str(pc, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ str(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r4;
+ Push(addr_scratch);
+
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ str(pc, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ str(fp, MemOperand(addr_scratch));
+
+ Pop(addr_scratch);
+ }
}
// Just call directly. The function called cannot cause a GC, or
@@ -2736,24 +2669,28 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// stays correct.
Call(function);
- // We don't unset the PC; the FP is the source of truth.
- Register zero_scratch = r5;
- Push(zero_scratch);
- mov(zero_scratch, Operand::Zero());
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r5;
+ Push(zero_scratch);
+ mov(zero_scratch, Operand::Zero());
- if (root_array_available()) {
- str(zero_scratch,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Push(addr_scratch);
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- str(zero_scratch, MemOperand(addr_scratch));
- Pop(addr_scratch);
- }
+ if (root_array_available()) {
+ str(zero_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r4;
+ Push(addr_scratch);
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ str(zero_scratch, MemOperand(addr_scratch));
+ Pop(addr_scratch);
+ }
- Pop(zero_scratch);
+ Pop(zero_scratch);
+ }
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
@@ -2764,7 +2701,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
+void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Label* condition_met) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -2792,13 +2729,13 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
ASM_CODE_COMMENT(this);
// We can use the register pc - 8 for the address of the current instruction.
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -2820,10 +2757,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DCHECK(!has_pending_constants());
}
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) {
+void MacroAssembler::I64x2BitMask(Register dst, QwNeonRegister src) {
UseScratchRegisterScope temps(this);
QwNeonRegister tmp1 = temps.AcquireQ();
Register tmp = temps.Acquire();
@@ -2834,7 +2771,7 @@ void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) {
add(dst, dst, Operand(tmp, LSL, 1));
}
-void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
+void MacroAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
UseScratchRegisterScope temps(this);
Simd128Register scratch = temps.AcquireQ();
@@ -2843,7 +2780,7 @@ void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
vand(dst, dst, scratch);
}
-void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
+void MacroAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
UseScratchRegisterScope temps(this);
Simd128Register tmp = temps.AcquireQ();
@@ -2853,14 +2790,14 @@ void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
vorn(dst, dst, tmp);
}
-void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
+void MacroAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
ASM_CODE_COMMENT(this);
vqsub(NeonS64, dst, src2, src1);
vshr(NeonS64, dst, dst, 63);
}
-void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
+void MacroAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
ASM_CODE_COMMENT(this);
vqsub(NeonS64, dst, src1, src2);
@@ -2868,7 +2805,7 @@ void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
vmvn(dst, dst);
}
-void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
+void MacroAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
QwNeonRegister tmp = temps.AcquireQ();
@@ -2892,7 +2829,7 @@ void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
// = defintion of i64x2.all_true.
}
-void TurboAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) {
+void MacroAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Simd128Register tmp = temps.AcquireQ();
@@ -2921,17 +2858,17 @@ void F64x2ConvertLowHelper(Assembler* assm, QwNeonRegister dst,
}
} // namespace
-void TurboAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst,
+void MacroAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst,
QwNeonRegister src) {
F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_s32);
}
-void TurboAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst,
+void MacroAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst,
QwNeonRegister src) {
F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_u32);
}
-void TurboAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst,
+void MacroAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst,
QwNeonRegister src) {
F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32);
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 51f7907581..971b21661b 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -43,9 +43,9 @@ enum TargetAddressStorageMode {
NEVER_INLINE_TARGET_ADDRESS
};
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -250,12 +250,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
void MovFromFloatParameter(DwVfpRegister dst);
void MovFromFloatResult(DwVfpRegister dst);
@@ -265,11 +276,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug-code to enable.
- void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but without condition.
// Use --debug-code to enable.
- void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cond, AbortReason reason);
@@ -323,7 +334,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al);
- void LoadCodeObjectEntry(Register destination, Register code_object);
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
@@ -589,49 +601,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
- private:
- // Compare single values and then load the fpscr flags to a register.
- void VFPCompareAndLoadFlags(const SwVfpRegister src1,
- const SwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond = al);
- void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
- const Register fpscr_flags,
- const Condition cond = al);
-
- // Compare double values and then load the fpscr flags to a register.
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond = al);
- void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
- const Register fpscr_flags,
- const Condition cond = al);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-
- // Implementation helpers for FloatMin and FloatMax.
- template <typename T>
- void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
- template <typename T>
- void FloatMinHelper(T result, T left, T right, Label* out_of_line);
- template <typename T>
- void FloatMaxOutOfLineHelper(T result, T left, T right);
- template <typename T>
- void FloatMinOutOfLineHelper(T result, T left, T right);
-
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
@@ -661,14 +630,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
- void EnterExitFrame(bool save_doubles, int stack_space = 0,
- StackFrame::Type frame_type = StackFrame::EXIT);
+ void EnterExitFrame(int stack_space, StackFrame::Type frame_type);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool argument_count_is_length = false);
+ void LeaveExitFrame(Register argument_count, bool argument_count_is_length);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
@@ -772,7 +739,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Tiering support.
- void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
@@ -786,20 +753,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -809,9 +773,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -852,31 +813,31 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
template <typename Field>
void DecodeField(Register dst, Register src) {
@@ -888,7 +849,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
- void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
+ void TestCodeIsMarkedForDeoptimization(Register code, Register scratch);
Operand ClearedValue() const;
private:
@@ -897,6 +858,43 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeType type);
+ // Compare single values and then load the fpscr flags to a register.
+ void VFPCompareAndLoadFlags(const SwVfpRegister src1,
+ const SwVfpRegister src2,
+ const Register fpscr_flags,
+ const Condition cond = al);
+ void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
+ const Register fpscr_flags,
+ const Condition cond = al);
+
+ // Compare double values and then load the fpscr flags to a register.
+ void VFPCompareAndLoadFlags(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Register fpscr_flags,
+ const Condition cond = al);
+ void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
+ const Register fpscr_flags,
+ const Condition cond = al);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+
+ // Implementation helpers for FloatMin and FloatMax.
+ template <typename T>
+ void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
+ template <typename T>
+ void FloatMinHelper(T result, T left, T right, Label* out_of_line);
+ template <typename T>
+ void FloatMaxOutOfLineHelper(T result, T left, T right);
+ template <typename T>
+ void FloatMinOutOfLineHelper(T result, T left, T right);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
@@ -907,7 +905,7 @@ struct MoveCycleState {
VfpRegList scratch_v_reglist = 0;
// Available scratch registers during the move cycle resolution scope.
base::Optional<UseScratchRegisterScope> temps;
- // Code of the scratch register picked by {MoveToTempLocation}.
+ // InstructionStream of the scratch register picked by {MoveToTempLocation}.
int scratch_reg_code = -1;
};
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 4edcddaa6f..355d6b6f5e 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -308,7 +308,6 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
constexpr Register kJavaScriptCallExtraArg1Register = r2;
-constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
constexpr Register kRuntimeCallArgvRegister = r2;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 37158040ea..d564d06274 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -485,15 +485,15 @@ Tagged_t Assembler::target_compressed_address_at(Address pc,
return Memory<Tagged_t>(target_pointer_address_at(pc));
}
-Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
+Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- return Handle<CodeT>(reinterpret_cast<Address*>(
+ return Handle<Code>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
- return Handle<CodeT>::cast(
+ return Handle<Code>::cast(
GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
}
}
@@ -620,7 +620,7 @@ int RelocInfo::target_address_size() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsNearBuiltinEntry(rmode_) ||
- IsWasmCall(rmode_));
+ IsWasmCall(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -659,10 +659,11 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
Tagged_t compressed =
Assembler::target_compressed_address_at(pc_, constant_pool_);
DCHECK(!HAS_SMI_TAG(compressed));
- Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
- compressed));
- // Embedding of compressed Code objects must not happen when external code
- // space is enabled, because CodeDataContainers must be used instead.
+ Object obj(
+ V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
+ // Embedding of compressed InstructionStream objects must not happen when
+ // external code space is enabled, because Codes must be used
+ // instead.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!IsCodeSpaceObject(HeapObject::cast(obj)));
return HeapObject::cast(obj);
@@ -688,15 +689,15 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_,
- V8HeapCompressionScheme::CompressTagged(target.ptr()),
+ V8HeapCompressionScheme::CompressObject(target.ptr()),
icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index dc06c743a0..f753e0bcc8 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -64,17 +64,26 @@ unsigned SimulatorFeaturesFromCommandLine() {
constexpr unsigned CpuFeaturesFromCompiler() {
unsigned features = 0;
-#if defined(__ARM_FEATURE_JCVT)
+#if defined(__ARM_FEATURE_JCVT) && !defined(V8_TARGET_OS_IOS)
features |= 1u << JSCVT;
#endif
+#if defined(__ARM_FEATURE_DOTPROD)
+ features |= 1u << DOTPROD;
+#endif
+#if defined(__ARM_FEATURE_ATOMICS)
+ features |= 1u << LSE;
+#endif
return features;
}
constexpr unsigned CpuFeaturesFromTargetOS() {
unsigned features = 0;
#if defined(V8_TARGET_OS_MACOS) && !defined(V8_TARGET_OS_IOS)
- // TODO(v8:13004): Detect if an iPhone is new enough to support jscvt.
+ // TODO(v8:13004): Detect if an iPhone is new enough to support jscvt, dotprot
+ // and lse.
features |= 1u << JSCVT;
+ features |= 1u << DOTPROD;
+ features |= 1u << LSE;
#endif
return features;
}
@@ -106,6 +115,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_jscvt()) {
runtime |= 1u << JSCVT;
}
+ if (cpu.has_dot_prod()) {
+ runtime |= 1u << DOTPROD;
+ }
+ if (cpu.has_lse()) {
+ runtime |= 1u << LSE;
+ }
// Use the best of the features found by CPU detection and those inferred from
// the build system.
@@ -188,7 +203,8 @@ CPURegList CPURegList::GetCallerSavedV(int size) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::NEAR_BUILTIN_ENTRY) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -273,6 +289,14 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
return match;
}
+bool AreSameFormat(const Register& reg1, const Register& reg2,
+ const Register& reg3, const Register& reg4) {
+ DCHECK(reg1.is_valid());
+ return (!reg2.is_valid() || reg2.IsSameSizeAndType(reg1)) &&
+ (!reg3.is_valid() || reg3.IsSameSizeAndType(reg1)) &&
+ (!reg4.is_valid() || reg4.IsSameSizeAndType(reg1));
+}
+
bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
const VRegister& reg3, const VRegister& reg4) {
DCHECK(reg1.is_valid());
@@ -281,32 +305,49 @@ bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
(!reg4.is_valid() || reg4.IsSameFormat(reg1));
}
-bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
- const VRegister& reg3, const VRegister& reg4) {
+bool AreConsecutive(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4) {
DCHECK(reg1.is_valid());
+
if (!reg2.is_valid()) {
DCHECK(!reg3.is_valid() && !reg4.is_valid());
return true;
- } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) {
+ } else if (reg2.code() != ((reg1.code() + 1) % (reg1.MaxCode() + 1))) {
return false;
}
if (!reg3.is_valid()) {
DCHECK(!reg4.is_valid());
return true;
- } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) {
+ } else if (reg3.code() != ((reg2.code() + 1) % (reg1.MaxCode() + 1))) {
return false;
}
if (!reg4.is_valid()) {
return true;
- } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) {
+ } else if (reg4.code() != ((reg3.code() + 1) % (reg1.MaxCode() + 1))) {
return false;
}
return true;
}
+bool AreEven(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ DCHECK(reg1.is_valid());
+ bool even = reg1.IsEven();
+ even &= !reg2.is_valid() || reg2.IsEven();
+ even &= !reg3.is_valid() || reg3.IsEven();
+ even &= !reg4.is_valid() || reg4.IsEven();
+ even &= !reg5.is_valid() || reg5.IsEven();
+ even &= !reg6.is_valid() || reg6.IsEven();
+ even &= !reg7.is_valid() || reg7.IsEven();
+ even &= !reg8.is_valid() || reg8.IsEven();
+ return even;
+}
+
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
@@ -374,16 +415,16 @@ void Assembler::AllocateAndInstallRequestedHeapNumbers(Isolate* isolate) {
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
- SafepointTableBuilder* safepoint_table_builder,
+ SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
// Emit constant pool if necessary.
ForceConstantPoolEmissionWithoutJump();
@@ -1316,6 +1357,10 @@ Operand Operand::EmbeddedNumber(double number) {
if (DoubleToSmiInteger(number, &smi)) {
return Operand(Immediate(Smi::FromInt(smi)));
}
+ return EmbeddedHeapNumber(number);
+}
+
+Operand Operand::EmbeddedHeapNumber(double number) {
Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.heap_number_request_.emplace(number);
DCHECK(result.IsHeapNumberRequest());
@@ -1420,6 +1465,138 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
+#define COMPARE_AND_SWAP_W_X_LIST(V) \
+ V(cas, CAS) \
+ V(casa, CASA) \
+ V(casl, CASL) \
+ V(casal, CASAL)
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(IsEnabled(LSE)); \
+ DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
+ LoadStoreAcquireReleaseOp op = rt.Is64Bits() ? OP##_x : OP##_w; \
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
+ }
+COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define COMPARE_AND_SWAP_W_LIST(V) \
+ V(casb, CASB) \
+ V(casab, CASAB) \
+ V(caslb, CASLB) \
+ V(casalb, CASALB) \
+ V(cash, CASH) \
+ V(casah, CASAH) \
+ V(caslh, CASLH) \
+ V(casalh, CASALH)
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(IsEnabled(LSE)); \
+ DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
+ Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
+ }
+COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define COMPARE_AND_SWAP_PAIR_LIST(V) \
+ V(casp, CASP) \
+ V(caspa, CASPA) \
+ V(caspl, CASPL) \
+ V(caspal, CASPAL)
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const Register& rs, const Register& rs1, \
+ const Register& rt, const Register& rt1, \
+ const MemOperand& src) { \
+ DCHECK(IsEnabled(LSE)); \
+ DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
+ DCHECK(AreEven(rs, rt)); \
+ DCHECK(AreConsecutive(rs, rs1)); \
+ DCHECK(AreConsecutive(rt, rt1)); \
+ DCHECK(AreSameFormat(rs, rs1, rt, rt1)); \
+ LoadStoreAcquireReleaseOp op = rt.Is64Bits() ? OP##_x : OP##_w; \
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
+ }
+COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+// These macros generate all the variations of the atomic memory operations,
+// e.g. ldadd, ldadda, ldaddb, staddl, etc.
+// For a full list of the methods with comments, see the assembler header file.
+
+#define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \
+ V(DEF, add, LDADD) \
+ V(DEF, clr, LDCLR) \
+ V(DEF, eor, LDEOR) \
+ V(DEF, set, LDSET) \
+ V(DEF, smax, LDSMAX) \
+ V(DEF, smin, LDSMIN) \
+ V(DEF, umax, LDUMAX) \
+ V(DEF, umin, LDUMIN)
+
+#define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
+ V(NAME, OP##_x, OP##_w) \
+ V(NAME##l, OP##L_x, OP##L_w) \
+ V(NAME##b, OP##B, OP##B) \
+ V(NAME##lb, OP##LB, OP##LB) \
+ V(NAME##h, OP##H, OP##H) \
+ V(NAME##lh, OP##LH, OP##LH)
+
+#define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \
+ ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
+ V(NAME##a, OP##A_x, OP##A_w) \
+ V(NAME##al, OP##AL_x, OP##AL_w) \
+ V(NAME##ab, OP##AB, OP##AB) \
+ V(NAME##alb, OP##ALB, OP##ALB) \
+ V(NAME##ah, OP##AH, OP##AH) \
+ V(NAME##alh, OP##ALH, OP##ALH)
+
+#define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W) \
+ void Assembler::ld##FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(IsEnabled(LSE)); \
+ DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
+ AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \
+ Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base())); \
+ }
+#define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W) \
+ void Assembler::st##FN(const Register& rs, const MemOperand& src) { \
+ DCHECK(IsEnabled(LSE)); \
+ ld##FN(rs, AppropriateZeroRegFor(rs), src); \
+ }
+
+ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES,
+ DEFINE_ASM_LOAD_FUNC)
+ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES,
+ DEFINE_ASM_STORE_FUNC)
+
+#define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W) \
+ void Assembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(IsEnabled(LSE)); \
+ DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
+ AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \
+ Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base())); \
+ }
+
+ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP)
+
+#undef DEFINE_ASM_LOAD_FUNC
+#undef DEFINE_ASM_STORE_FUNC
+#undef DEFINE_ASM_SWP_FUNC
+
+void Assembler::sdot(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(CpuFeatures::IsSupported(DOTPROD));
+ DCHECK(vn.Is16B() && vd.Is4S());
+ DCHECK(AreSameFormat(vn, vm));
+ Emit(NEON_Q | NEON_SDOT | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn,
const VRegister& vm, NEON3DifferentOp vop) {
DCHECK(AreSameFormat(vn, vm));
@@ -3573,7 +3750,7 @@ Instr Assembler::ImmNEONFP(double imm) {
return ImmNEONabcdefgh(FPToImm8(imm));
}
-// Code generation helpers.
+// InstructionStream generation helpers.
void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
MoveWideImmediateOp mov_op) {
// Ignore the top 32 bits of an immediate if we're moving to a W register.
@@ -4356,7 +4533,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
DCHECK(constpool_.IsBlocked());
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
@@ -4379,7 +4557,7 @@ void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
void Assembler::near_call(HeapNumberRequest request) {
BlockPoolsScope no_pool_before_bl_instr(this);
RequestHeapNumber(request);
- EmbeddedObjectIndex index = AddEmbeddedObject(Handle<CodeT>());
+ EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
DCHECK(is_int32(index));
bl(static_cast<int>(index));
@@ -4482,7 +4660,8 @@ intptr_t Assembler::MaxPCOffsetAfterVeneerPoolIfEmittedNow(size_t margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
- RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
+ RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code(),
+ InstructionStream());
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 68f773a924..5c10dd8697 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -81,6 +81,7 @@ class Operand {
inline Operand(Register reg, Extend extend, unsigned shift_amount = 0);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedHeapNumber(double number);
inline bool IsHeapNumberRequest() const;
inline HeapNumberRequest heap_number_request() const;
@@ -190,9 +191,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
- static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ static constexpr SafepointTableBuilderBase* kNoSafepointTable = nullptr;
void GetCode(Isolate* isolate, CodeDesc* desc,
- SafepointTableBuilder* safepoint_table_builder,
+ SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset);
// Convenience wrapper for code without safepoint or handler tables.
@@ -261,7 +262,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
- inline Handle<CodeT> code_target_object_handle_at(Address pc);
+ inline Handle<Code> code_target_object_handle_at(Address pc);
inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc);
inline void set_embedded_object_index_referenced_from(
Address p, EmbeddedObjectIndex index);
@@ -779,12 +780,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void clz(const Register& rd, const Register& rn);
void cls(const Register& rd, const Register& rn);
- // Pointer Authentication Code for Instruction address, using key B, with
- // address in x17 and modifier in x16 [Armv8.3].
+ // Pointer Authentication InstructionStream for Instruction address, using key
+ // B, with address in x17 and modifier in x16 [Armv8.3].
void pacib1716();
- // Pointer Authentication Code for Instruction address, using key B, with
- // address in LR and modifier in SP [Armv8.3].
+ // Pointer Authentication InstructionStream for Instruction address, using key
+ // B, with address in LR and modifier in SP [Armv8.3].
void pacibsp();
// Authenticate Instruction address, using key B, with address in x17 and
@@ -878,6 +879,630 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Store-release exclusive half-word.
void stlxrh(const Register& rs, const Register& rt, const Register& rn);
+ // Compare and Swap word or doubleword in memory [Armv8.1].
+ void cas(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory, with Load-acquire semantics
+ // [Armv8.1].
+ void casa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory, with Store-release semantics
+ // [Armv8.1].
+ void casl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1].
+ void casal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory [Armv8.1].
+ void casb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory, with Load-acquire semantics [Armv8.1].
+ void casab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory, with Store-release semantics [Armv8.1].
+ void caslb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1].
+ void casalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory [Armv8.1].
+ void cash(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory, with Load-acquire semantics [Armv8.1].
+ void casah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory, with Store-release semantics
+ // [Armv8.1].
+ void caslh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1].
+ void casalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory [Armv8.1].
+ void casp(const Register& rs, const Register& rs2, const Register& rt,
+ const Register& rt2, const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory, with Load-acquire
+ // semantics [Armv8.1].
+ void caspa(const Register& rs, const Register& rs2, const Register& rt,
+ const Register& rt2, const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory, with Store-release
+ // semantics [Armv8.1].
+ void caspl(const Register& rs, const Register& rs2, const Register& rt,
+ const Register& rt2, const MemOperand& src);
+
+ // Compare and Swap Pair of words or doublewords in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1].
+ void caspal(const Register& rs, const Register& rs2, const Register& rt,
+ const Register& rt2, const MemOperand& src);
+
+ // Atomic add on byte in memory [Armv8.1]
+ void ldaddb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Load-acquire semantics [Armv8.1]
+ void ldaddab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Store-release semantics [Armv8.1]
+ void ldaddlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Load-acquire and Store-release semantics
+ // [Armv8.1]
+ void ldaddalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory [Armv8.1]
+ void ldaddh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Load-acquire semantics [Armv8.1]
+ void ldaddah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Store-release semantics [Armv8.1]
+ void ldaddlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldaddalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory [Armv8.1]
+ void ldadd(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldadda(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldaddl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldaddal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory [Armv8.1]
+ void ldclrb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Load-acquire semantics [Armv8.1]
+ void ldclrab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Store-release semantics [Armv8.1]
+ void ldclrlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldclralb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory [Armv8.1]
+ void ldclrh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldclrah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldclrlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldclralh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory [Armv8.1]
+ void ldclr(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldclra(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldclrl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldclral(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory [Armv8.1]
+ void ldeorb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldeorab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldeorlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldeoralb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory [Armv8.1]
+ void ldeorh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldeorah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldeorlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldeoralh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory [Armv8.1]
+ void ldeor(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldeora(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldeorl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldeoral(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory [Armv8.1]
+ void ldsetb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Load-acquire semantics [Armv8.1]
+ void ldsetab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Store-release semantics [Armv8.1]
+ void ldsetlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldsetalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory [Armv8.1]
+ void ldseth(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Load-acquire semantics [Armv8.1]
+ void ldsetah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsetlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void ldsetalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory [Armv8.1]
+ void ldset(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldseta(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldsetl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsetal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory [Armv8.1]
+ void ldsmaxb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsmaxab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsmaxlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsmaxalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory [Armv8.1]
+ void ldsmaxh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsmaxah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsmaxlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsmaxalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory [Armv8.1]
+ void ldsmax(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldsmaxa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldsmaxl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void ldsmaxal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory [Armv8.1]
+ void ldsminb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsminab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsminlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsminalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory [Armv8.1]
+ void ldsminh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldsminah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldsminlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldsminalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory [Armv8.1]
+ void ldsmin(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldsmina(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldsminl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void ldsminal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory [Armv8.1]
+ void ldumaxb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldumaxab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldumaxlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldumaxalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory [Armv8.1]
+ void ldumaxh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void ldumaxah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void ldumaxlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void ldumaxalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory [Armv8.1]
+ void ldumax(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldumaxa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void ldumaxl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void ldumaxal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory [Armv8.1]
+ void lduminb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void lduminab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Store-release semantics
+ // [Armv8.1]
+ void lduminlb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void lduminalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory [Armv8.1]
+ void lduminh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Load-acquire semantics
+ // [Armv8.1]
+ void lduminah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Store-release semantics
+ // [Armv8.1]
+ void lduminlh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Load-acquire and
+ // Store-release semantics [Armv8.1]
+ void lduminalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory [Armv8.1]
+ void ldumin(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire
+ // semantics [Armv8.1]
+ void ldumina(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Store-release
+ // semantics [Armv8.1]
+ void lduminl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire
+ // and Store-release semantics [Armv8.1]
+ void lduminal(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Atomic add on byte in memory, without return. [Armv8.1]
+ void staddb(const Register& rs, const MemOperand& src);
+
+ // Atomic add on byte in memory, with Store-release semantics and without
+ // return. [Armv8.1]
+ void staddlb(const Register& rs, const MemOperand& src);
+
+ // Atomic add on halfword in memory, without return. [Armv8.1]
+ void staddh(const Register& rs, const MemOperand& src);
+
+ // Atomic add on halfword in memory, with Store-release semantics and without
+ // return. [Armv8.1]
+ void staddlh(const Register& rs, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, without return. [Armv8.1]
+ void stadd(const Register& rs, const MemOperand& src);
+
+ // Atomic add on word or doubleword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void staddl(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, without return. [Armv8.1]
+ void stclrb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stclrlb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, without return. [Armv8.1]
+ void stclrh(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on halfword in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stclrlh(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, without return. [Armv8.1]
+ void stclr(const Register& rs, const MemOperand& src);
+
+ // Atomic bit clear on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stclrl(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, without return. [Armv8.1]
+ void steorb(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void steorlb(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, without return. [Armv8.1]
+ void steorh(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void steorlh(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void steor(const Register& rs, const MemOperand& src);
+
+ // Atomic exclusive OR on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void steorl(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, without return. [Armv8.1]
+ void stsetb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on byte in memory, with Store-release semantics and without
+ // return. [Armv8.1]
+ void stsetlb(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, without return. [Armv8.1]
+ void stseth(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on halfword in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stsetlh(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, without return. [Armv8.1]
+ void stset(const Register& rs, const MemOperand& src);
+
+ // Atomic bit set on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stsetl(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, without return. [Armv8.1]
+ void stsmaxb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stsmaxlb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, without return. [Armv8.1]
+ void stsmaxh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stsmaxlh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stsmax(const Register& rs, const MemOperand& src);
+
+ // Atomic signed maximum on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stsmaxl(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, without return. [Armv8.1]
+ void stsminb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stsminlb(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, without return. [Armv8.1]
+ void stsminh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stsminlh(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stsmin(const Register& rs, const MemOperand& src);
+
+ // Atomic signed minimum on word or doubleword in memory, with Store-release
+ // semantics and without return. semantics [Armv8.1]
+ void stsminl(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, without return. [Armv8.1]
+ void stumaxb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stumaxlb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, without return. [Armv8.1]
+ void stumaxh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stumaxlh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stumax(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned maximum on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stumaxl(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, without return. [Armv8.1]
+ void stuminb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on byte in memory, with Store-release semantics and
+ // without return. [Armv8.1]
+ void stuminlb(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, without return. [Armv8.1]
+ void stuminh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on halfword in memory, with Store-release semantics
+ // and without return. [Armv8.1]
+ void stuminlh(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, without return.
+ // [Armv8.1]
+ void stumin(const Register& rs, const MemOperand& src);
+
+ // Atomic unsigned minimum on word or doubleword in memory, with Store-release
+ // semantics and without return. [Armv8.1]
+ void stuminl(const Register& rs, const MemOperand& src);
+
+ // Swap byte in memory [Armv8.1]
+ void swpb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap byte in memory, with Load-acquire semantics [Armv8.1]
+ void swpab(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap byte in memory, with Store-release semantics [Armv8.1]
+ void swplb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap byte in memory, with Load-acquire and Store-release semantics
+ // [Armv8.1]
+ void swpalb(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory [Armv8.1]
+ void swph(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory, with Load-acquire semantics [Armv8.1]
+ void swpah(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory, with Store-release semantics [Armv8.1]
+ void swplh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap halfword in memory, with Load-acquire and Store-release semantics
+ // [Armv8.1]
+ void swpalh(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory [Armv8.1]
+ void swp(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory, with Load-acquire semantics [Armv8.1]
+ void swpa(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory, with Store-release semantics [Armv8.1]
+ void swpl(const Register& rs, const Register& rt, const MemOperand& src);
+
+ // Swap word or doubleword in memory, with Load-acquire and Store-release
+ // semantics [Armv8.1]
+ void swpal(const Register& rs, const Register& rt, const MemOperand& src);
+
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift
@@ -1219,6 +1844,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Signed minimum across vector.
void sminv(const VRegister& vd, const VRegister& vn);
+ // Signed dot product
+ void sdot(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
// One-element structure store from one register.
void st1(const VRegister& vt, const MemOperand& src);
@@ -2087,7 +2715,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
dc64(data);
}
- // Code generation helpers --------------------------------------------------
+ // InstructionStream generation helpers
+ // --------------------------------------------------
Instruction* pc() const { return Instruction::Cast(pc_); }
@@ -2662,7 +3291,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
std::deque<int> internal_reference_positions_;
protected:
- // Code generation
+ // InstructionStream generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index 7a054863d1..1866842f1e 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -28,6 +28,8 @@ static_assert(sizeof(1) == sizeof(int32_t));
namespace v8 {
namespace internal {
+// The maximum size of the code range s.t. pc-relative calls are possible
+// between all Code objects in the range.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
@@ -140,11 +142,9 @@ constexpr unsigned kFloat16MantissaBits = 10;
constexpr unsigned kFloat16ExponentBits = 5;
constexpr unsigned kFloat16ExponentBias = 15;
-// Actual value of root register is offset from the root array's start
+// The actual value of the kRootRegister is offset from the IsolateData's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
-// TODO(ishell): Choose best value for ptr-compr.
-constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 256 : 0;
+constexpr int kRootRegisterBias = 256;
using float16 = uint16_t;
@@ -300,25 +300,41 @@ SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
constexpr int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
// Condition codes.
-enum Condition {
- eq = 0,
- ne = 1,
- hs = 2,
- cs = hs,
- lo = 3,
- cc = lo,
- mi = 4,
- pl = 5,
- vs = 6,
- vc = 7,
- hi = 8,
- ls = 9,
- ge = 10,
- lt = 11,
- gt = 12,
- le = 13,
- al = 14,
- nv = 15 // Behaves as always/al.
+enum Condition : uint8_t {
+ eq = 0, // Equal
+ ne = 1, // Not equal
+ hs = 2, // Unsigned higher or same (or carry set)
+ cs = hs, // --
+ lo = 3, // Unsigned lower (or carry clear)
+ cc = lo, // --
+ mi = 4, // Negative
+ pl = 5, // Positive or zero
+ vs = 6, // Signed overflow
+ vc = 7, // No signed overflow
+ hi = 8, // Unsigned higher
+ ls = 9, // Unsigned lower or same
+ ge = 10, // Signed greater than or equal
+ lt = 11, // Signed less than
+ gt = 12, // Signed greater than
+ le = 13, // Signed less than or equal
+ al = 14, // Always executed
+ nv = 15, // Behaves as always/al.
+
+ // Unified cross-platform condition names/aliases.
+ kEqual = eq,
+ kNotEqual = ne,
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+ kUnsignedLessThan = lo,
+ kUnsignedGreaterThan = hi,
+ kUnsignedLessThanEqual = ls,
+ kUnsignedGreaterThanEqual = hs,
+ kOverflow = vs,
+ kNoOverflow = vc,
+ kZero = eq,
+ kNotZero = ne,
};
inline Condition NegateCondition(Condition cond) {
@@ -961,7 +977,7 @@ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET);
using LoadStoreAcquireReleaseOp = uint32_t;
constexpr LoadStoreAcquireReleaseOp LoadStoreAcquireReleaseFixed = 0x08000000;
constexpr LoadStoreAcquireReleaseOp LoadStoreAcquireReleaseFMask = 0x3F000000;
-constexpr LoadStoreAcquireReleaseOp LoadStoreAcquireReleaseMask = 0xCFC08000;
+constexpr LoadStoreAcquireReleaseOp LoadStoreAcquireReleaseMask = 0xCFE08000;
constexpr LoadStoreAcquireReleaseOp STLXR_b =
LoadStoreAcquireReleaseFixed | 0x00008000;
constexpr LoadStoreAcquireReleaseOp LDAXR_b =
@@ -995,6 +1011,101 @@ constexpr LoadStoreAcquireReleaseOp STLR_x =
constexpr LoadStoreAcquireReleaseOp LDAR_x =
LoadStoreAcquireReleaseFixed | 0xC0C08000;
+// Compare and swap acquire/release [Armv8.1].
+constexpr LoadStoreAcquireReleaseOp LSEBit_l = 0x00400000;
+constexpr LoadStoreAcquireReleaseOp LSEBit_o0 = 0x00008000;
+constexpr LoadStoreAcquireReleaseOp LSEBit_sz = 0x40000000;
+constexpr LoadStoreAcquireReleaseOp CASFixed =
+ LoadStoreAcquireReleaseFixed | 0x80A00000;
+constexpr LoadStoreAcquireReleaseOp CASBFixed =
+ LoadStoreAcquireReleaseFixed | 0x00A00000;
+constexpr LoadStoreAcquireReleaseOp CASHFixed =
+ LoadStoreAcquireReleaseFixed | 0x40A00000;
+constexpr LoadStoreAcquireReleaseOp CASPFixed =
+ LoadStoreAcquireReleaseFixed | 0x00200000;
+constexpr LoadStoreAcquireReleaseOp CAS_w = CASFixed;
+constexpr LoadStoreAcquireReleaseOp CAS_x = CASFixed | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASA_w = CASFixed | LSEBit_l;
+constexpr LoadStoreAcquireReleaseOp CASA_x = CASFixed | LSEBit_l | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASL_w = CASFixed | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASL_x = CASFixed | LSEBit_o0 | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASAL_w = CASFixed | LSEBit_l | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASAL_x =
+ CASFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASB = CASBFixed;
+constexpr LoadStoreAcquireReleaseOp CASAB = CASBFixed | LSEBit_l;
+constexpr LoadStoreAcquireReleaseOp CASLB = CASBFixed | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASALB = CASBFixed | LSEBit_l | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASH = CASHFixed;
+constexpr LoadStoreAcquireReleaseOp CASAH = CASHFixed | LSEBit_l;
+constexpr LoadStoreAcquireReleaseOp CASLH = CASHFixed | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASALH = CASHFixed | LSEBit_l | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASP_w = CASPFixed;
+constexpr LoadStoreAcquireReleaseOp CASP_x = CASPFixed | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASPA_w = CASPFixed | LSEBit_l;
+constexpr LoadStoreAcquireReleaseOp CASPA_x = CASPFixed | LSEBit_l | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASPL_w = CASPFixed | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASPL_x = CASPFixed | LSEBit_o0 | LSEBit_sz;
+constexpr LoadStoreAcquireReleaseOp CASPAL_w = CASPFixed | LSEBit_l | LSEBit_o0;
+constexpr LoadStoreAcquireReleaseOp CASPAL_x =
+ CASPFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz;
+
+#define ATOMIC_MEMORY_SIMPLE_OPC_LIST(V) \
+ V(LDADD, 0x00000000); \
+ V(LDCLR, 0x00001000); \
+ V(LDEOR, 0x00002000); \
+ V(LDSET, 0x00003000); \
+ V(LDSMAX, 0x00004000); \
+ V(LDSMIN, 0x00005000); \
+ V(LDUMAX, 0x00006000); \
+ V(LDUMIN, 0x00007000)
+
+// Atomic memory operations [Armv8.1].
+using AtomicMemoryOp = uint32_t;
+constexpr AtomicMemoryOp AtomicMemoryFixed = 0x38200000;
+constexpr AtomicMemoryOp AtomicMemoryFMask = 0x3B200C00;
+constexpr AtomicMemoryOp AtomicMemoryMask = 0xFFE0FC00;
+constexpr AtomicMemoryOp SWPB = AtomicMemoryFixed | 0x00008000;
+constexpr AtomicMemoryOp SWPAB = AtomicMemoryFixed | 0x00808000;
+constexpr AtomicMemoryOp SWPLB = AtomicMemoryFixed | 0x00408000;
+constexpr AtomicMemoryOp SWPALB = AtomicMemoryFixed | 0x00C08000;
+constexpr AtomicMemoryOp SWPH = AtomicMemoryFixed | 0x40008000;
+constexpr AtomicMemoryOp SWPAH = AtomicMemoryFixed | 0x40808000;
+constexpr AtomicMemoryOp SWPLH = AtomicMemoryFixed | 0x40408000;
+constexpr AtomicMemoryOp SWPALH = AtomicMemoryFixed | 0x40C08000;
+constexpr AtomicMemoryOp SWP_w = AtomicMemoryFixed | 0x80008000;
+constexpr AtomicMemoryOp SWPA_w = AtomicMemoryFixed | 0x80808000;
+constexpr AtomicMemoryOp SWPL_w = AtomicMemoryFixed | 0x80408000;
+constexpr AtomicMemoryOp SWPAL_w = AtomicMemoryFixed | 0x80C08000;
+constexpr AtomicMemoryOp SWP_x = AtomicMemoryFixed | 0xC0008000;
+constexpr AtomicMemoryOp SWPA_x = AtomicMemoryFixed | 0xC0808000;
+constexpr AtomicMemoryOp SWPL_x = AtomicMemoryFixed | 0xC0408000;
+constexpr AtomicMemoryOp SWPAL_x = AtomicMemoryFixed | 0xC0C08000;
+
+constexpr AtomicMemoryOp AtomicMemorySimpleFMask = 0x3B208C00;
+constexpr AtomicMemoryOp AtomicMemorySimpleOpMask = 0x00007000;
+#define ATOMIC_MEMORY_SIMPLE(N, OP) \
+ constexpr AtomicMemoryOp N##Op = OP; \
+ constexpr AtomicMemoryOp N##B = AtomicMemoryFixed | OP; \
+ constexpr AtomicMemoryOp N##AB = AtomicMemoryFixed | OP | 0x00800000; \
+ constexpr AtomicMemoryOp N##LB = AtomicMemoryFixed | OP | 0x00400000; \
+ constexpr AtomicMemoryOp N##ALB = AtomicMemoryFixed | OP | 0x00C00000; \
+ constexpr AtomicMemoryOp N##H = AtomicMemoryFixed | OP | 0x40000000; \
+ constexpr AtomicMemoryOp N##AH = AtomicMemoryFixed | OP | 0x40800000; \
+ constexpr AtomicMemoryOp N##LH = AtomicMemoryFixed | OP | 0x40400000; \
+ constexpr AtomicMemoryOp N##ALH = AtomicMemoryFixed | OP | 0x40C00000; \
+ constexpr AtomicMemoryOp N##_w = AtomicMemoryFixed | OP | 0x80000000; \
+ constexpr AtomicMemoryOp N##A_w = AtomicMemoryFixed | OP | 0x80800000; \
+ constexpr AtomicMemoryOp N##L_w = AtomicMemoryFixed | OP | 0x80400000; \
+ constexpr AtomicMemoryOp N##AL_w = AtomicMemoryFixed | OP | 0x80C00000; \
+ constexpr AtomicMemoryOp N##_x = AtomicMemoryFixed | OP | 0xC0000000; \
+ constexpr AtomicMemoryOp N##A_x = AtomicMemoryFixed | OP | 0xC0800000; \
+ constexpr AtomicMemoryOp N##L_x = AtomicMemoryFixed | OP | 0xC0400000; \
+ constexpr AtomicMemoryOp N##AL_x = AtomicMemoryFixed | OP | 0xC0C00000
+
+ATOMIC_MEMORY_SIMPLE_OPC_LIST(ATOMIC_MEMORY_SIMPLE);
+#undef ATOMIC_MEMORY_SIMPLE
+
// Conditional compare.
using ConditionalCompareOp = uint32_t;
constexpr ConditionalCompareOp ConditionalCompareMask = 0x60000000;
@@ -1637,6 +1748,7 @@ constexpr NEON3SameOp NEON_BSL = NEON3SameLogicalFixed | 0x20400000;
// NEON instructions with three different-type operands.
using NEON3DifferentOp = uint32_t;
constexpr NEON3DifferentOp NEON3DifferentFixed = 0x0E200000;
+constexpr NEON3DifferentOp NEON3DifferentDot = 0x0E800000;
constexpr NEON3DifferentOp NEON3DifferentFMask = 0x9F200C00;
constexpr NEON3DifferentOp NEON3DifferentMask = 0xFF20FC00;
constexpr NEON3DifferentOp NEON_ADDHN = NEON3DifferentFixed | 0x00004000;
@@ -1655,6 +1767,7 @@ constexpr NEON3DifferentOp NEON_SADDL = NEON3DifferentFixed | 0x00000000;
constexpr NEON3DifferentOp NEON_SADDL2 = NEON_SADDL | NEON_Q;
constexpr NEON3DifferentOp NEON_SADDW = NEON3DifferentFixed | 0x00001000;
constexpr NEON3DifferentOp NEON_SADDW2 = NEON_SADDW | NEON_Q;
+constexpr NEON3DifferentOp NEON_SDOT = NEON3DifferentDot | 0x00009400;
constexpr NEON3DifferentOp NEON_SMLAL = NEON3DifferentFixed | 0x00008000;
constexpr NEON3DifferentOp NEON_SMLAL2 = NEON_SMLAL | NEON_Q;
constexpr NEON3DifferentOp NEON_SMLSL = NEON3DifferentFixed | 0x0000A000;
diff --git a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
index 99f87ab917..aed1085357 100644
--- a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
@@ -76,6 +76,11 @@ void Decoder<V>::Decode(Instruction* instr) {
// Load/store register immediate post-index.
// Load/store register immediate pre-index.
// Load/store register offset.
+ // Load/store exclusive.
+ // Load/store ordered.
+ // Compare and swap [Armv8.1].
+ // Compare and swap pair [Armv8.1].
+ // Atomic memory operations [Armv8.1].
// C,D: Load/store register pair offset.
// Load/store register pair pre-index.
// Load/store register unsigned immediate.
@@ -218,15 +223,17 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) {
- if (instr->Mask(0xA08000) == 0x800000 ||
- instr->Mask(0xA00000) == 0xA00000) {
+ if (instr->Mask(0xA08000) == 0x800000) {
V::VisitUnallocated(instr);
- } else if (instr->Mask(0x808000) == 0) {
+ } else if (instr->Mask(0xA08000) == 0) {
// Load/Store exclusive without acquire/release are unimplemented.
V::VisitUnimplemented(instr);
} else {
V::VisitLoadStoreAcquireRelease(instr);
}
+ } else {
+ // This is handled by DecodeNEONLoadStore().
+ UNREACHABLE();
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
@@ -253,8 +260,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
V::VisitLoadLiteral(instr);
}
} else {
- if ((instr->Mask(0x84C00000) == 0x80C00000) ||
- (instr->Mask(0x44800000) == 0x44800000) ||
+ if ((instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
V::VisitUnallocated(instr);
} else {
@@ -294,7 +300,21 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
V::VisitLoadStoreRegisterOffset(instr);
}
} else {
- V::VisitUnallocated(instr);
+ if ((instr->Bits(11, 10) == 0x0) &&
+ (instr->Bits(26, 25) == 0x0)) {
+ if ((instr->Bit(15) == 1) &&
+ ((instr->Bits(14, 12) == 0x1) || (instr->Bit(13) == 1) ||
+ (instr->Bits(14, 12) == 0x5) ||
+ ((instr->Bits(14, 12) == 0x4) &&
+ ((instr->Bit(23) == 0) ||
+ (instr->Bits(23, 22) == 0x3))))) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAtomicMemory(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
}
}
}
diff --git a/deps/v8/src/codegen/arm64/decoder-arm64.h b/deps/v8/src/codegen/arm64/decoder-arm64.h
index 7621c516ce..2070abcdaf 100644
--- a/deps/v8/src/codegen/arm64/decoder-arm64.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64.h
@@ -39,6 +39,7 @@ namespace internal {
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
+ V(AtomicMemory) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
index 3b1801e031..78027177da 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
@@ -101,6 +101,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return x4; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return x5; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return x5; }
// static
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 0c7e735753..9694fa77e0 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -21,26 +21,26 @@ MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-void TurboAssembler::And(const Register& rd, const Register& rn,
+void MacroAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, AND);
}
-void TurboAssembler::Ands(const Register& rd, const Register& rn,
+void MacroAssembler::Ands(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ANDS);
}
-void TurboAssembler::Tst(const Register& rn, const Operand& operand) {
+void MacroAssembler::Tst(const Register& rn, const Operand& operand) {
DCHECK(allow_macro_instructions());
LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
}
-void TurboAssembler::Bic(const Register& rd, const Register& rn,
+void MacroAssembler::Bic(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -54,35 +54,35 @@ void MacroAssembler::Bics(const Register& rd, const Register& rn,
LogicalMacro(rd, rn, operand, BICS);
}
-void TurboAssembler::Orr(const Register& rd, const Register& rn,
+void MacroAssembler::Orr(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ORR);
}
-void TurboAssembler::Orn(const Register& rd, const Register& rn,
+void MacroAssembler::Orn(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ORN);
}
-void TurboAssembler::Eor(const Register& rd, const Register& rn,
+void MacroAssembler::Eor(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, EOR);
}
-void TurboAssembler::Eon(const Register& rd, const Register& rn,
+void MacroAssembler::Eon(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, EON);
}
-void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
+void MacroAssembler::Ccmp(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
@@ -92,7 +92,7 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
}
}
-void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
+void MacroAssembler::CcmpTagged(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
if (COMPRESS_POINTERS_BOOL) {
Ccmp(rn.W(), operand.ToW(), nzcv, cond);
@@ -101,7 +101,7 @@ void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
}
}
-void TurboAssembler::Ccmn(const Register& rn, const Operand& operand,
+void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
@@ -111,7 +111,7 @@ void TurboAssembler::Ccmn(const Register& rn, const Operand& operand,
}
}
-void TurboAssembler::Add(const Register& rd, const Register& rn,
+void MacroAssembler::Add(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
@@ -122,7 +122,7 @@ void TurboAssembler::Add(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::Adds(const Register& rd, const Register& rn,
+void MacroAssembler::Adds(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
@@ -133,7 +133,7 @@ void TurboAssembler::Adds(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::Sub(const Register& rd, const Register& rn,
+void MacroAssembler::Sub(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
@@ -144,7 +144,7 @@ void TurboAssembler::Sub(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::Subs(const Register& rd, const Register& rn,
+void MacroAssembler::Subs(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
@@ -155,17 +155,17 @@ void TurboAssembler::Subs(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::Cmn(const Register& rn, const Operand& operand) {
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
DCHECK(allow_macro_instructions());
Adds(AppropriateZeroRegFor(rn), rn, operand);
}
-void TurboAssembler::Cmp(const Register& rn, const Operand& operand) {
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
DCHECK(allow_macro_instructions());
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
-void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) {
+void MacroAssembler::CmpTagged(const Register& rn, const Operand& operand) {
if (COMPRESS_POINTERS_BOOL) {
Cmp(rn.W(), operand.ToW());
} else {
@@ -173,7 +173,7 @@ void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) {
}
}
-void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
+void MacroAssembler::Neg(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
if (operand.IsImmediate()) {
@@ -183,12 +183,12 @@ void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
}
}
-void TurboAssembler::Negs(const Register& rd, const Operand& operand) {
+void MacroAssembler::Negs(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
Subs(rd, AppropriateZeroRegFor(rd), operand);
}
-void TurboAssembler::Adc(const Register& rd, const Register& rn,
+void MacroAssembler::Adc(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -230,14 +230,14 @@ void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) {
Sbcs(rd, zr, operand);
}
-void TurboAssembler::Mvn(const Register& rd, uint64_t imm) {
+void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Mov(rd, ~imm);
}
#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
- void TurboAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
DCHECK(allow_macro_instructions()); \
LoadStoreMacro(REG, addr, OP); \
}
@@ -245,7 +245,7 @@ LS_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
- void TurboAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
+ void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
const MemOperand& addr) { \
DCHECK(allow_macro_instructions()); \
LoadStorePairMacro(REG, REG2, addr, OP); \
@@ -253,49 +253,94 @@ LS_MACRO_LIST(DEFINE_FUNCTION)
LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
-#define DECLARE_FUNCTION(FN, OP) \
- void TurboAssembler::FN(const Register& rt, const Register& rn) { \
+#define DEFINE_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rt, const Register& rn) { \
DCHECK(allow_macro_instructions()); \
OP(rt, rn); \
}
-LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
+LDA_STL_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
-#define DECLARE_FUNCTION(FN, OP) \
+#define DEFINE_FUNCTION(FN, OP) \
void MacroAssembler::FN(const Register& rs, const Register& rt, \
const Register& rn) { \
DCHECK(allow_macro_instructions()); \
OP(rs, rt, rn); \
}
-STLX_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
+STLX_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+#define DEFINE_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(allow_macro_instructions()); \
+ OP(rs, rt, src); \
+ }
+CAS_SINGLE_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+#define DEFINE_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rs, const Register& rs2, \
+ const Register& rt, const Register& rt2, \
+ const MemOperand& src) { \
+ DCHECK(allow_macro_instructions()); \
+ OP(rs, rs2, rt, rt2, src); \
+ }
+CAS_PAIR_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+#define DEFINE_LOAD_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(allow_macro_instructions_); \
+ OP(rs, rt, src); \
+ }
+#define DEFINE_STORE_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rs, const MemOperand& src) { \
+ DCHECK(allow_macro_instructions_); \
+ OP(rs, src); \
+ }
+
+ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES,
+ DEFINE_LOAD_FUNCTION, Ld, ld)
+ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES,
+ DEFINE_STORE_FUNCTION, St, st)
+
+#define DEFINE_SWP_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rs, const Register& rt, \
+ const MemOperand& src) { \
+ DCHECK(allow_macro_instructions_); \
+ OP(rs, rt, src); \
+ }
-void TurboAssembler::Asr(const Register& rd, const Register& rn,
+ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_SWP_FUNCTION, Swp, swp)
+
+void MacroAssembler::Asr(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
asr(rd, rn, shift);
}
-void TurboAssembler::Asr(const Register& rd, const Register& rn,
+void MacroAssembler::Asr(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
asrv(rd, rn, rm);
}
-void TurboAssembler::B(Label* label) {
+void MacroAssembler::B(Label* label) {
DCHECK(allow_macro_instructions());
b(label);
CheckVeneerPool(false, false);
}
-void TurboAssembler::B(Condition cond, Label* label) {
+void MacroAssembler::B(Condition cond, Label* label) {
DCHECK(allow_macro_instructions());
B(label, cond);
}
-void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb,
+void MacroAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -309,7 +354,7 @@ void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb,
bfxil(rd, rn, lsb, width);
}
-void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) {
+void MacroAssembler::Bind(Label* label, BranchTargetIdentifier id) {
DCHECK(allow_macro_instructions());
if (id == BranchTargetIdentifier::kNone) {
bind(label);
@@ -326,21 +371,21 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) {
}
}
-void TurboAssembler::CodeEntry() { CallTarget(); }
+void MacroAssembler::CodeEntry() { CallTarget(); }
-void TurboAssembler::ExceptionHandler() { JumpTarget(); }
+void MacroAssembler::ExceptionHandler() { JumpTarget(); }
-void TurboAssembler::BindExceptionHandler(Label* label) {
+void MacroAssembler::BindExceptionHandler(Label* label) {
BindJumpTarget(label);
}
-void TurboAssembler::JumpTarget() {
+void MacroAssembler::JumpTarget() {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
bti(BranchTargetIdentifier::kBtiJump);
#endif
}
-void TurboAssembler::BindJumpTarget(Label* label) {
+void MacroAssembler::BindJumpTarget(Label* label) {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Bind(label, BranchTargetIdentifier::kBtiJump);
#else
@@ -348,19 +393,27 @@ void TurboAssembler::BindJumpTarget(Label* label) {
#endif
}
-void TurboAssembler::CallTarget() {
+void MacroAssembler::CallTarget() {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
bti(BranchTargetIdentifier::kBtiCall);
#endif
}
-void TurboAssembler::JumpOrCallTarget() {
+void MacroAssembler::JumpOrCallTarget() {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
bti(BranchTargetIdentifier::kBtiJumpCall);
#endif
}
-void TurboAssembler::BindJumpOrCallTarget(Label* label) {
+void MacroAssembler::BindCallTarget(Label* label) {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Bind(label, BranchTargetIdentifier::kBtiCall);
+#else
+ Bind(label);
+#endif
+}
+
+void MacroAssembler::BindJumpOrCallTarget(Label* label) {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Bind(label, BranchTargetIdentifier::kBtiJumpCall);
#else
@@ -368,24 +421,24 @@ void TurboAssembler::BindJumpOrCallTarget(Label* label) {
#endif
}
-void TurboAssembler::Bl(Label* label) {
+void MacroAssembler::Bl(Label* label) {
DCHECK(allow_macro_instructions());
bl(label);
}
-void TurboAssembler::Blr(const Register& xn) {
+void MacroAssembler::Blr(const Register& xn) {
DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
blr(xn);
}
-void TurboAssembler::Br(const Register& xn) {
+void MacroAssembler::Br(const Register& xn) {
DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
br(xn);
}
-void TurboAssembler::Brk(int code) {
+void MacroAssembler::Brk(int code) {
DCHECK(allow_macro_instructions());
brk(code);
}
@@ -406,19 +459,19 @@ void MacroAssembler::Cinv(const Register& rd, const Register& rn,
cinv(rd, rn, cond);
}
-void TurboAssembler::Cls(const Register& rd, const Register& rn) {
+void MacroAssembler::Cls(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
cls(rd, rn);
}
-void TurboAssembler::Clz(const Register& rd, const Register& rn) {
+void MacroAssembler::Clz(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
clz(rd, rn);
}
-void TurboAssembler::Cneg(const Register& rd, const Register& rn,
+void MacroAssembler::Cneg(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -437,7 +490,7 @@ void MacroAssembler::CzeroX(const Register& rd, Condition cond) {
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
-void TurboAssembler::CmovX(const Register& rd, const Register& rn,
+void MacroAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
@@ -448,26 +501,26 @@ void TurboAssembler::CmovX(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::Csdb() {
+void MacroAssembler::Csdb() {
DCHECK(allow_macro_instructions());
csdb();
}
-void TurboAssembler::Cset(const Register& rd, Condition cond) {
+void MacroAssembler::Cset(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
cset(rd, cond);
}
-void TurboAssembler::Csetm(const Register& rd, Condition cond) {
+void MacroAssembler::Csetm(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csetm(rd, cond);
}
-void TurboAssembler::Csinc(const Register& rd, const Register& rn,
+void MacroAssembler::Csinc(const Register& rd, const Register& rn,
const Register& rm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -491,17 +544,17 @@ void MacroAssembler::Csneg(const Register& rd, const Register& rn,
csneg(rd, rn, rm, cond);
}
-void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dmb(domain, type);
}
-void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dsb(domain, type);
}
-void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) {
+void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
DCHECK(allow_macro_instructions());
debug(message, code, params);
}
@@ -513,25 +566,25 @@ void MacroAssembler::Extr(const Register& rd, const Register& rn,
extr(rd, rn, rm, lsb);
}
-void TurboAssembler::Fabs(const VRegister& fd, const VRegister& fn) {
+void MacroAssembler::Fabs(const VRegister& fd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
fabs(fd, fn);
}
-void TurboAssembler::Fadd(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fadd(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
DCHECK(allow_macro_instructions());
fadd(fd, fn, fm);
}
-void TurboAssembler::Fccmp(const VRegister& fn, const VRegister& fm,
+void MacroAssembler::Fccmp(const VRegister& fn, const VRegister& fm,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
fccmp(fn, fm, nzcv, cond);
}
-void TurboAssembler::Fccmp(const VRegister& fn, const double value,
+void MacroAssembler::Fccmp(const VRegister& fn, const double value,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
UseScratchRegisterScope temps(this);
@@ -540,12 +593,12 @@ void TurboAssembler::Fccmp(const VRegister& fn, const double value,
Fccmp(fn, tmp, nzcv, cond);
}
-void TurboAssembler::Fcmp(const VRegister& fn, const VRegister& fm) {
+void MacroAssembler::Fcmp(const VRegister& fn, const VRegister& fm) {
DCHECK(allow_macro_instructions());
fcmp(fn, fm);
}
-void TurboAssembler::Fcmp(const VRegister& fn, double value) {
+void MacroAssembler::Fcmp(const VRegister& fn, double value) {
DCHECK(allow_macro_instructions());
if (value != 0.0) {
UseScratchRegisterScope temps(this);
@@ -557,66 +610,66 @@ void TurboAssembler::Fcmp(const VRegister& fn, double value) {
}
}
-void TurboAssembler::Fcsel(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn,
const VRegister& fm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
fcsel(fd, fn, fm, cond);
}
-void TurboAssembler::Fcvt(const VRegister& fd, const VRegister& fn) {
+void MacroAssembler::Fcvt(const VRegister& fd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
fcvt(fd, fn);
}
-void TurboAssembler::Fcvtas(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtas(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtas(rd, fn);
}
-void TurboAssembler::Fcvtau(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtau(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtau(rd, fn);
}
-void TurboAssembler::Fcvtms(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtms(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtms(rd, fn);
}
-void TurboAssembler::Fcvtmu(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtmu(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtmu(rd, fn);
}
-void TurboAssembler::Fcvtns(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtns(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtns(rd, fn);
}
-void TurboAssembler::Fcvtnu(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtnu(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtnu(rd, fn);
}
-void TurboAssembler::Fcvtzs(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtzs(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtzs(rd, fn);
}
-void TurboAssembler::Fcvtzu(const Register& rd, const VRegister& fn) {
+void MacroAssembler::Fcvtzu(const Register& rd, const VRegister& fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtzu(rd, fn);
}
-void TurboAssembler::Fdiv(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fdiv(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
DCHECK(allow_macro_instructions());
fdiv(fd, fn, fm);
@@ -628,7 +681,7 @@ void MacroAssembler::Fmadd(const VRegister& fd, const VRegister& fn,
fmadd(fd, fn, fm, fa);
}
-void TurboAssembler::Fmax(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fmax(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
DCHECK(allow_macro_instructions());
fmax(fd, fn, fm);
@@ -640,7 +693,7 @@ void MacroAssembler::Fmaxnm(const VRegister& fd, const VRegister& fn,
fmaxnm(fd, fn, fm);
}
-void TurboAssembler::Fmin(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fmin(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
DCHECK(allow_macro_instructions());
fmin(fd, fn, fm);
@@ -652,7 +705,7 @@ void MacroAssembler::Fminnm(const VRegister& fd, const VRegister& fn,
fminnm(fd, fn, fm);
}
-void TurboAssembler::Fmov(VRegister fd, VRegister fn) {
+void MacroAssembler::Fmov(VRegister fd, VRegister fn) {
DCHECK(allow_macro_instructions());
// Only emit an instruction if fd and fn are different, and they are both D
// registers. fmov(s0, s0) is not a no-op because it clears the top word of
@@ -663,12 +716,12 @@ void TurboAssembler::Fmov(VRegister fd, VRegister fn) {
}
}
-void TurboAssembler::Fmov(VRegister fd, Register rn) {
+void MacroAssembler::Fmov(VRegister fd, Register rn) {
DCHECK(allow_macro_instructions());
fmov(fd, rn);
}
-void TurboAssembler::Fmov(VRegister vd, double imm) {
+void MacroAssembler::Fmov(VRegister vd, double imm) {
DCHECK(allow_macro_instructions());
if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
@@ -696,7 +749,7 @@ void TurboAssembler::Fmov(VRegister vd, double imm) {
}
}
-void TurboAssembler::Fmov(VRegister vd, float imm) {
+void MacroAssembler::Fmov(VRegister vd, float imm) {
DCHECK(allow_macro_instructions());
if (vd.Is1D() || vd.Is2D()) {
Fmov(vd, static_cast<double>(imm));
@@ -723,7 +776,7 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
}
}
-void TurboAssembler::Fmov(Register rd, VRegister fn) {
+void MacroAssembler::Fmov(Register rd, VRegister fn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fmov(rd, fn);
@@ -735,7 +788,7 @@ void MacroAssembler::Fmsub(const VRegister& fd, const VRegister& fn,
fmsub(fd, fn, fm, fa);
}
-void TurboAssembler::Fmul(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fmul(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
DCHECK(allow_macro_instructions());
fmul(fd, fn, fm);
@@ -753,7 +806,7 @@ void MacroAssembler::Fnmsub(const VRegister& fd, const VRegister& fn,
fnmsub(fd, fn, fm, fa);
}
-void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn,
+void MacroAssembler::Fsub(const VRegister& fd, const VRegister& fn,
const VRegister& fm) {
DCHECK(allow_macro_instructions());
fsub(fd, fn, fm);
@@ -769,52 +822,52 @@ void MacroAssembler::Hlt(int code) {
hlt(code);
}
-void TurboAssembler::Isb() {
+void MacroAssembler::Isb() {
DCHECK(allow_macro_instructions());
isb();
}
-void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
+void MacroAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
DCHECK(allow_macro_instructions());
ldr(rt, operand);
}
-void TurboAssembler::Lsl(const Register& rd, const Register& rn,
+void MacroAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lsl(rd, rn, shift);
}
-void TurboAssembler::Lsl(const Register& rd, const Register& rn,
+void MacroAssembler::Lsl(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lslv(rd, rn, rm);
}
-void TurboAssembler::Lsr(const Register& rd, const Register& rn,
+void MacroAssembler::Lsr(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lsr(rd, rn, shift);
}
-void TurboAssembler::Lsr(const Register& rd, const Register& rn,
+void MacroAssembler::Lsr(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lsrv(rd, rn, rm);
}
-void TurboAssembler::Madd(const Register& rd, const Register& rn,
+void MacroAssembler::Madd(const Register& rd, const Register& rn,
const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
madd(rd, rn, rm, ra);
}
-void TurboAssembler::Mneg(const Register& rd, const Register& rn,
+void MacroAssembler::Mneg(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -827,44 +880,38 @@ void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
movk(rd, imm, shift);
}
-void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
DCHECK(allow_macro_instructions());
DCHECK(!rt.IsZero());
mrs(rt, sysreg);
}
-void TurboAssembler::Msr(SystemRegister sysreg, const Register& rt) {
+void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
DCHECK(allow_macro_instructions());
msr(sysreg, rt);
}
-void TurboAssembler::Msub(const Register& rd, const Register& rn,
+void MacroAssembler::Msub(const Register& rd, const Register& rn,
const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
msub(rd, rn, rm, ra);
}
-void TurboAssembler::Mul(const Register& rd, const Register& rn,
+void MacroAssembler::Mul(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
mul(rd, rn, rm);
}
-void TurboAssembler::Rbit(const Register& rd, const Register& rn) {
+void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rbit(rd, rn);
}
-void TurboAssembler::Rev(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions());
- DCHECK(!rd.IsZero());
- rev(rd, rn);
-}
-
-void TurboAssembler::Ret(const Register& xn) {
+void MacroAssembler::Ret(const Register& xn) {
DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
ret(xn);
@@ -877,46 +924,46 @@ void MacroAssembler::Rev(const Register& rd, const Register& rn) {
rev(rd, rn);
}
-void TurboAssembler::Rev16(const Register& rd, const Register& rn) {
+void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rev16(rd, rn);
}
-void TurboAssembler::Rev32(const Register& rd, const Register& rn) {
+void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rev32(rd, rn);
}
-void TurboAssembler::Ror(const Register& rd, const Register& rs,
+void MacroAssembler::Ror(const Register& rd, const Register& rs,
unsigned shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
ror(rd, rs, shift);
}
-void TurboAssembler::Ror(const Register& rd, const Register& rn,
+void MacroAssembler::Ror(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rorv(rd, rn, rm);
}
-void TurboAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb,
+void MacroAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sbfx(rd, rn, lsb, width);
}
-void TurboAssembler::Scvtf(const VRegister& fd, const Register& rn,
+void MacroAssembler::Scvtf(const VRegister& fd, const Register& rn,
unsigned fbits) {
DCHECK(allow_macro_instructions());
scvtf(fd, rn, fbits);
}
-void TurboAssembler::Sdiv(const Register& rd, const Register& rn,
+void MacroAssembler::Sdiv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -937,80 +984,80 @@ void MacroAssembler::Smsubl(const Register& rd, const Register& rn,
smsubl(rd, rn, rm, ra);
}
-void TurboAssembler::Smull(const Register& rd, const Register& rn,
+void MacroAssembler::Smull(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smull(rd, rn, rm);
}
-void TurboAssembler::Smulh(const Register& rd, const Register& rn,
+void MacroAssembler::Smulh(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smulh(rd, rn, rm);
}
-void TurboAssembler::Umull(const Register& rd, const Register& rn,
+void MacroAssembler::Umull(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, xzr);
}
-void TurboAssembler::Umulh(const Register& rd, const Register& rn,
+void MacroAssembler::Umulh(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umulh(rd, rn, rm);
}
-void TurboAssembler::Sxtb(const Register& rd, const Register& rn) {
+void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sxtb(rd, rn);
}
-void TurboAssembler::Sxth(const Register& rd, const Register& rn) {
+void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sxth(rd, rn);
}
-void TurboAssembler::Sxtw(const Register& rd, const Register& rn) {
+void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sxtw(rd, rn);
}
-void TurboAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
+void MacroAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
ubfiz(rd, rn, lsb, width);
}
-void TurboAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
+void MacroAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sbfiz(rd, rn, lsb, width);
}
-void TurboAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb,
+void MacroAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
ubfx(rd, rn, lsb, width);
}
-void TurboAssembler::Ucvtf(const VRegister& fd, const Register& rn,
+void MacroAssembler::Ucvtf(const VRegister& fd, const Register& rn,
unsigned fbits) {
DCHECK(allow_macro_instructions());
ucvtf(fd, rn, fbits);
}
-void TurboAssembler::Udiv(const Register& rd, const Register& rn,
+void MacroAssembler::Udiv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -1031,41 +1078,41 @@ void MacroAssembler::Umsubl(const Register& rd, const Register& rn,
umsubl(rd, rn, rm, ra);
}
-void TurboAssembler::Uxtb(const Register& rd, const Register& rn) {
+void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
uxtb(rd, rn);
}
-void TurboAssembler::Uxth(const Register& rd, const Register& rn) {
+void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
uxth(rd, rn);
}
-void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
+void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
uxtw(rd, rn);
}
-void TurboAssembler::InitializeRootRegister() {
+void MacroAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Mov(kRootRegister, Operand(isolate_root));
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset());
#endif
}
-void TurboAssembler::SmiTag(Register dst, Register src) {
+void MacroAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Lsl(dst, src, kSmiShift);
}
-void TurboAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
-void TurboAssembler::SmiUntag(Register dst, Register src) {
+void MacroAssembler::SmiUntag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (v8_flags.enable_slow_asserts) {
AssertSmi(src);
@@ -1078,7 +1125,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
}
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
DCHECK(dst.Is64Bits());
if (SmiValuesAre32Bits()) {
if (src.IsImmediateOffset() && src.shift_amount() == 0) {
@@ -1104,22 +1151,24 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
-void TurboAssembler::SmiToInt32(Register smi) {
- DCHECK(smi.Is64Bits());
+void MacroAssembler::SmiToInt32(Register smi) { SmiToInt32(smi, smi); }
+
+void MacroAssembler::SmiToInt32(Register dst, Register smi) {
+ DCHECK(dst.Is64Bits());
if (v8_flags.enable_slow_asserts) {
AssertSmi(smi);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
- Asr(smi.W(), smi.W(), kSmiShift);
+ Asr(dst.W(), smi.W(), kSmiShift);
} else {
- Lsr(smi, smi, kSmiShift);
+ Lsr(dst, smi, kSmiShift);
}
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
+void MacroAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
static_assert((kSmiTagSize == 1) && (kSmiTag == 0));
// Check if the tag bit is set.
@@ -1134,11 +1183,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
}
}
-void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CompareAndBranch(x, y, eq, dest);
}
-void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
CompareAndBranch(x, y, lt, dest);
}
@@ -1146,10 +1195,16 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
JumpIfSmi(value, nullptr, not_smi_label);
}
-void TurboAssembler::jmp(Label* L) { B(L); }
+inline void MacroAssembler::AssertFeedbackVector(Register object) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ AssertFeedbackVector(object, scratch);
+}
+
+void MacroAssembler::jmp(Label* L) { B(L); }
-template <TurboAssembler::StoreLRMode lr_mode>
-void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+template <MacroAssembler::StoreLRMode lr_mode>
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr) ||
@@ -1170,8 +1225,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
PushHelper(count, size, src0, src1, src2, src3);
}
-template <TurboAssembler::StoreLRMode lr_mode>
-void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
+template <MacroAssembler::StoreLRMode lr_mode>
+void MacroAssembler::Push(const Register& src0, const VRegister& src1) {
DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr)));
DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr)));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
@@ -1189,8 +1244,8 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
-template <TurboAssembler::LoadLRMode lr_mode>
-void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+template <MacroAssembler::LoadLRMode lr_mode>
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
@@ -1216,8 +1271,8 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
#endif
}
-template <TurboAssembler::StoreLRMode lr_mode>
-void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
+template <MacroAssembler::StoreLRMode lr_mode>
+void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kSignLR), (src == lr));
DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
@@ -1236,8 +1291,8 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Str(src, MemOperand(sp, offset));
}
-template <TurboAssembler::LoadLRMode lr_mode>
-void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+template <MacroAssembler::LoadLRMode lr_mode>
+void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (v8_flags.debug_code) {
@@ -1256,7 +1311,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
#endif
}
-void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
+void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
@@ -1274,7 +1329,8 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
Sub(sp, sp, size);
}
-void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
+void MacroAssembler::Claim(const Register& count, uint64_t unit_size,
+ bool assume_sp_aligned) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
@@ -1302,7 +1358,13 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
Bind(&touch_next_page);
Sub(sp, sp, kStackPageSize);
// Just to touch the page, before we increment further.
- Str(xzr, MemOperand(sp));
+ if (assume_sp_aligned) {
+ Str(xzr, MemOperand(sp));
+ } else {
+ Register sp_copy = temps.AcquireX();
+ Mov(sp_copy, sp);
+ Str(xzr, MemOperand(sp_copy));
+ }
Sub(bytes_scratch, bytes_scratch, kStackPageSize);
Bind(&check_offset);
@@ -1315,7 +1377,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
#endif
}
-void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
+void MacroAssembler::Drop(int64_t count, uint64_t unit_size) {
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
@@ -1327,7 +1389,7 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
DCHECK_EQ(size % 16, 0);
}
-void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
+void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
@@ -1342,7 +1404,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
Add(sp, sp, size);
}
-void TurboAssembler::DropArguments(const Register& count,
+void MacroAssembler::DropArguments(const Register& count,
ArgumentsCountMode mode) {
int extra_slots = 1; // Padding slot.
if (mode == kCountExcludesReceiver) {
@@ -1356,7 +1418,7 @@ void TurboAssembler::DropArguments(const Register& count,
Drop(tmp, kXRegSize);
}
-void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) {
+void MacroAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) {
if (mode == kCountExcludesReceiver) {
// Add a slot for the receiver.
++count;
@@ -1364,13 +1426,13 @@ void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) {
Drop(RoundUp(count, 2), kXRegSize);
}
-void TurboAssembler::DropSlots(int64_t count) {
+void MacroAssembler::DropSlots(int64_t count) {
Drop(RoundUp(count, 2), kXRegSize);
}
-void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
+void MacroAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
-void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
+void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label) {
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne) || (cond == hi) || (cond == ls))) {
@@ -1385,7 +1447,7 @@ void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
}
}
-void TurboAssembler::CompareTaggedAndBranch(const Register& lhs,
+void MacroAssembler::CompareTaggedAndBranch(const Register& lhs,
const Operand& rhs, Condition cond,
Label* label) {
if (COMPRESS_POINTERS_BOOL) {
@@ -1395,7 +1457,7 @@ void TurboAssembler::CompareTaggedAndBranch(const Register& lhs,
}
}
-void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
+void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
@@ -1408,7 +1470,7 @@ void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
}
}
-void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
+void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
@@ -1421,6 +1483,10 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
}
}
+void MacroAssembler::MoveHeapNumber(Register dst, double value) {
+ Mov(dst, Operand::EmbeddedHeapNumber(value));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 25970c1cc3..2e54ce8bd1 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -39,9 +39,9 @@
namespace v8 {
namespace internal {
-CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
+CPURegList MacroAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
-CPURegList TurboAssembler::DefaultFPTmpList() {
+CPURegList MacroAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
@@ -57,7 +57,7 @@ constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits;
} // namespace
-void TurboAssembler::PushCPURegList(CPURegList registers) {
+void MacroAssembler::PushCPURegList(CPURegList registers) {
// If LR was stored here, we would need to sign it if
// V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
DCHECK(!registers.IncludesAliasOf(lr));
@@ -77,7 +77,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
}
}
-void TurboAssembler::PopCPURegList(CPURegList registers) {
+void MacroAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
DCHECK_EQ(0, (size * registers.Count()) % 16);
@@ -97,7 +97,49 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
}
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+void MacroAssembler::PushAll(RegList reglist) {
+ if (reglist.Count() % 2 != 0) {
+ DCHECK(!reglist.has(xzr));
+ reglist.set(xzr);
+ }
+
+ CPURegList registers(kXRegSizeInBits, reglist);
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // If LR was stored here, we would need to sign it if
+ // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
+ DCHECK(!registers.IncludesAliasOf(lr));
+
+ while (!registers.IsEmpty()) {
+ const CPURegister& src0 = registers.PopLowestIndex();
+ const CPURegister& src1 = registers.PopLowestIndex();
+ stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
+ }
+}
+
+void MacroAssembler::PopAll(RegList reglist) {
+ if (reglist.Count() % 2 != 0) {
+ DCHECK(!reglist.has(xzr));
+ reglist.set(xzr);
+ }
+
+ CPURegList registers(kXRegSizeInBits, reglist);
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // If LR was loaded here, we would need to authenticate it if
+ // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
+ DCHECK(!registers.IncludesAliasOf(lr));
+
+ while (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopHighestIndex();
+ const CPURegister& dst1 = registers.PopHighestIndex();
+ ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
+ }
+}
+
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
auto list = kCallerSaved;
list.Remove(exclusion);
@@ -113,7 +155,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
ASM_CODE_COMMENT(this);
auto list = kCallerSaved;
@@ -133,7 +175,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
@@ -153,7 +195,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
return bytes;
}
-void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
+void MacroAssembler::LogicalMacro(const Register& rd, const Register& rn,
const Operand& operand, LogicalOp op) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -261,7 +303,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
DCHECK(allow_macro_instructions());
DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
DCHECK(!rd.IsZero());
@@ -337,7 +379,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
}
}
-void TurboAssembler::Mov(const Register& rd, const Operand& operand,
+void MacroAssembler::Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -405,11 +447,11 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
-void TurboAssembler::Mov(const Register& rd, Smi smi) {
+void MacroAssembler::Mov(const Register& rd, Smi smi) {
return Mov(rd, Operand(smi));
}
-void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
+void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
int byte2 = ((imm >> 8) & 0xFF);
@@ -431,7 +473,7 @@ void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
}
}
-void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
+void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint32(imm));
uint8_t bytes[sizeof(imm)];
@@ -508,7 +550,7 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
}
}
-void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
+void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
// All bytes are either 0x00 or 0xFF.
{
bool all0orff = true;
@@ -544,7 +586,7 @@ void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
}
}
-void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
+void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
int shift_amount) {
DCHECK(allow_macro_instructions());
if (shift_amount != 0 || shift != LSL) {
@@ -565,7 +607,7 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
}
}
-void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
+void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
// TODO(v8:11033): Move 128-bit values in a more efficient way.
DCHECK(vd.Is128Bits());
Movi(vd.V2D(), lo);
@@ -577,7 +619,7 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
}
}
-void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
if (operand.NeedsRelocation(this)) {
@@ -600,7 +642,7 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
}
}
-unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) {
+unsigned MacroAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) {
DCHECK_EQ(reg_size % 16, 0);
#define HALFWORD(idx) (((imm >> ((idx)*16)) & 0xFFFF) ? 1u : 0u)
@@ -618,18 +660,18 @@ unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) {
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
-bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
return CountSetHalfWords(imm, reg_size) <= 1;
}
// The movn instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF.
-bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
return IsImmMovz(~imm, reg_size);
}
-void TurboAssembler::ConditionalCompareMacro(const Register& rn,
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
const Operand& operand,
StatusFlags nzcv, Condition cond,
ConditionalCompareOp op) {
@@ -657,7 +699,7 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn,
}
}
-void TurboAssembler::Csel(const Register& rd, const Register& rn,
+void MacroAssembler::Csel(const Register& rd, const Register& rn,
const Operand& operand, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -691,7 +733,7 @@ void TurboAssembler::Csel(const Register& rd, const Register& rn,
}
}
-bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst,
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
int64_t imm) {
unsigned n, imm_s, imm_r;
int reg_size = dst.SizeInBits();
@@ -713,7 +755,7 @@ bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst,
return false;
}
-Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
int64_t imm,
PreShiftImmMode mode) {
int reg_size = dst.SizeInBits();
@@ -763,7 +805,7 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
return Operand(dst);
}
-void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
+void MacroAssembler::AddSubMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd == rn && rd.Is64Bits() && rn.Is64Bits() &&
@@ -774,7 +816,9 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+ Register temp = temps.AcquireSameSizeAs(rn);
+ DCHECK_IMPLIES(temp.IsW(), RelocInfo::IsCompressedEmbeddedObject(
+ operand.ImmediateRMode()));
Ldr(temp, operand.immediate());
AddSubMacro(rd, rn, temp, S, op);
} else if ((operand.IsImmediate() &&
@@ -809,7 +853,7 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
}
}
-void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op) {
@@ -858,7 +902,7 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
}
}
-void TurboAssembler::LoadStoreMacro(const CPURegister& rt,
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
const MemOperand& addr, LoadStoreOp op) {
int64_t offset = addr.offset();
unsigned size = CalcLSDataSize(op);
@@ -888,7 +932,7 @@ void TurboAssembler::LoadStoreMacro(const CPURegister& rt,
}
}
-void TurboAssembler::LoadStorePairMacro(const CPURegister& rt,
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairOp op) {
@@ -921,7 +965,7 @@ void TurboAssembler::LoadStorePairMacro(const CPURegister& rt,
}
}
-bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
+bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
Label* label, ImmBranchType b_type) {
bool need_longer_range = false;
// There are two situations in which we care about the offset being out of
@@ -944,7 +988,7 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
return need_longer_range;
}
-void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
+void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -978,7 +1022,7 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
}
}
-void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
DCHECK((reg == NoReg || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
@@ -1008,7 +1052,7 @@ void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
}
}
-void TurboAssembler::B(Label* label, Condition cond) {
+void MacroAssembler::B(Label* label, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
@@ -1025,7 +1069,7 @@ void TurboAssembler::B(Label* label, Condition cond) {
bind(&done);
}
-void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
DCHECK(allow_macro_instructions());
Label done;
@@ -1041,7 +1085,7 @@ void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
bind(&done);
}
-void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
DCHECK(allow_macro_instructions());
Label done;
@@ -1057,7 +1101,7 @@ void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
bind(&done);
}
-void TurboAssembler::Cbnz(const Register& rt, Label* label) {
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
DCHECK(allow_macro_instructions());
Label done;
@@ -1073,7 +1117,7 @@ void TurboAssembler::Cbnz(const Register& rt, Label* label) {
bind(&done);
}
-void TurboAssembler::Cbz(const Register& rt, Label* label) {
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
DCHECK(allow_macro_instructions());
Label done;
@@ -1091,7 +1135,7 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) {
// Pseudo-instructions.
-void TurboAssembler::Abs(const Register& rd, const Register& rm,
+void MacroAssembler::Abs(const Register& rd, const Register& rm,
Label* is_not_representable, Label* is_representable) {
DCHECK(allow_macro_instructions());
DCHECK(AreSameSizeAndType(rd, rm));
@@ -1112,9 +1156,31 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm,
}
}
+void MacroAssembler::Switch(Register scratch, Register value,
+ int case_value_base, Label** labels,
+ int num_labels) {
+ Register table = scratch;
+ Label fallthrough, jump_table;
+ if (case_value_base != 0) {
+ Sub(value, value, case_value_base);
+ }
+ Cmp(value, Immediate(num_labels));
+ B(&fallthrough, hs);
+ Adr(table, &jump_table);
+ Ldr(table, MemOperand(table, value, LSL, kSystemPointerSizeLog2));
+ Br(table);
+ // Emit the jump table inline, under the assumption that it's not too big.
+ Align(kSystemPointerSize);
+ bind(&jump_table);
+ for (int i = 0; i < num_labels; ++i) {
+ dcptr(labels[i]);
+ }
+ bind(&fallthrough);
+}
+
// Abstracted stack operations.
-void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
@@ -1128,7 +1194,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
PushHelper(count - 4, size, src4, src5, src6, src7);
}
-void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5,
const CPURegister& dst6, const CPURegister& dst7) {
@@ -1174,7 +1240,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
Bind(&done);
}
-void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
+void MacroAssembler::PushHelper(int count, int size, const CPURegister& src0,
const CPURegister& src1,
const CPURegister& src2,
const CPURegister& src3) {
@@ -1212,7 +1278,7 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
}
}
-void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
+void MacroAssembler::PopHelper(int count, int size, const CPURegister& dst0,
const CPURegister& dst1, const CPURegister& dst2,
const CPURegister& dst3) {
// Ensure that we don't unintentially modify scratch or debug registers.
@@ -1250,7 +1316,7 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
}
}
-void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
+void MacroAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
@@ -1349,16 +1415,16 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ AssertCodeT(optimized_code_entry);
- __ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch,
- &heal_optimized_code_slot);
+ __ AssertCode(optimized_code_entry);
+ __ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch,
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Move(x2, optimized_code_entry);
- __ JumpCodeTObject(x2);
+ __ JumpCodeObject(x2);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
@@ -1372,7 +1438,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
if (v8_flags.debug_code) {
- CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
+ IsObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
Assert(eq, AbortReason::kExpectedFeedbackVector);
}
}
@@ -1383,7 +1449,7 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure.
- AssertCodeT(optimized_code);
+ AssertCode(optimized_code);
StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
@@ -1419,7 +1485,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- JumpCodeTObject(x2);
+ JumpCodeObject(x2);
}
// Read off the flags in the feedback vector and check if there
@@ -1458,15 +1524,20 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = x7;
- LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, x4);
}
+Condition MacroAssembler::CheckSmi(Register object) {
+ static_assert(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ return eq;
+}
+
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::AssertSpAligned() {
+void MacroAssembler::AssertSpAligned() {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
HardAbortScope hard_abort(this); // Avoid calls to Abort.
@@ -1479,7 +1550,7 @@ void TurboAssembler::AssertSpAligned() {
Check(eq, AbortReason::kUnexpectedStackPointer);
}
-void TurboAssembler::AssertFPCRState(Register fpcr) {
+void MacroAssembler::AssertFPCRState(Register fpcr) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Label unexpected_mode, done;
@@ -1503,7 +1574,7 @@ void TurboAssembler::AssertFPCRState(Register fpcr) {
Bind(&done);
}
-void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
+void MacroAssembler::AssertSmi(Register object, AbortReason reason) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -1519,16 +1590,35 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
Check(ne, reason);
}
-void MacroAssembler::AssertCodeT(Register object) {
+void MacroAssembler::AssertZeroExtended(Register int32_register) {
+ if (!v8_flags.debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Tst(int32_register.X(), kMaxUInt32);
+ Check(ls, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
+}
+
+void MacroAssembler::AssertMap(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
- AssertNotSmi(object, AbortReason::kOperandIsNotACodeT);
+ AssertNotSmi(object, AbortReason::kOperandIsNotAMap);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareObjectType(object, temp, temp, CODET_TYPE);
- Check(eq, AbortReason::kOperandIsNotACodeT);
+ IsObjectType(object, temp, temp, MAP_TYPE);
+ Check(eq, AbortReason::kOperandIsNotAMap);
+}
+
+void MacroAssembler::AssertCode(Register object) {
+ if (!v8_flags.debug_code) return;
+ ASM_CODE_COMMENT(this);
+ AssertNotSmi(object, AbortReason::kOperandIsNotACode);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ IsObjectType(object, temp, temp, CODE_TYPE);
+ Check(eq, AbortReason::kOperandIsNotACode);
}
void MacroAssembler::AssertConstructor(Register object) {
@@ -1580,7 +1670,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
+ IsObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
@@ -1615,7 +1705,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
Bind(&done_checking);
}
-void TurboAssembler::AssertPositiveOrZero(Register value) {
+void MacroAssembler::AssertPositiveOrZero(Register value) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Label done;
@@ -1625,18 +1715,18 @@ void TurboAssembler::AssertPositiveOrZero(Register value) {
Bind(&done);
}
-void TurboAssembler::Assert(Condition cond, AbortReason reason) {
+void MacroAssembler::Assert(Condition cond, AbortReason reason) {
if (v8_flags.debug_code) {
Check(cond, reason);
}
}
-void TurboAssembler::AssertUnreachable(AbortReason reason) {
+void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
#endif // V8_ENABLE_DEBUG_CODE
-void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) {
+void MacroAssembler::CopySlots(int dst, Register src, Register slot_count) {
DCHECK(!src.IsZero());
UseScratchRegisterScope scope(this);
Register dst_reg = scope.AcquireX();
@@ -1645,7 +1735,7 @@ void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) {
CopyDoubleWords(dst_reg, src, slot_count);
}
-void TurboAssembler::CopySlots(Register dst, Register src,
+void MacroAssembler::CopySlots(Register dst, Register src,
Register slot_count) {
DCHECK(!dst.IsZero() && !src.IsZero());
SlotAddress(dst, dst);
@@ -1653,7 +1743,7 @@ void TurboAssembler::CopySlots(Register dst, Register src,
CopyDoubleWords(dst, src, slot_count);
}
-void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
+void MacroAssembler::CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, src, count));
@@ -1724,15 +1814,15 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Bind(&done);
}
-void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
+void MacroAssembler::SlotAddress(Register dst, int slot_offset) {
Add(dst, sp, slot_offset << kSystemPointerSizeLog2);
}
-void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
+void MacroAssembler::SlotAddress(Register dst, Register slot_offset) {
Add(dst, sp, Operand(slot_offset, LSL, kSystemPointerSizeLog2));
}
-void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
+void MacroAssembler::CanonicalizeNaN(const VRegister& dst,
const VRegister& src) {
AssertFPCRState();
@@ -1742,15 +1832,31 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
Fsub(dst, src, fp_zero);
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
ASM_CODE_COMMENT(this);
- // TODO(jbramley): Most root values are constants, and can be synthesized
- // without a load. Refer to the ARM back end for details.
+ if (CanBeImmediate(index)) {
+ Mov(destination,
+ Immediate(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO));
+ return;
+ }
+ LoadRoot(destination, index);
+}
+
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
+ ASM_CODE_COMMENT(this);
+ if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index) &&
+ IsImmAddSub(ReadOnlyRootPtr(index))) {
+ DecompressTagged(destination, ReadOnlyRootPtr(index));
+ return;
+ }
+ // Many roots have addresses that are too large to fit into addition immediate
+ // operands. Evidence suggests that the extra instruction for decompression
+ // costs us more than the load.
Ldr(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::PushRoot(RootIndex index) {
+void MacroAssembler::PushRoot(RootIndex index) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
@@ -1758,14 +1864,14 @@ void TurboAssembler::PushRoot(RootIndex index) {
Push(tmp);
}
-void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
-void TurboAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); }
-void TurboAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
+void MacroAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); }
+void MacroAssembler::Move(Register dst, Register src) {
if (dst == src) return;
Mov(dst, src);
}
-void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
+void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1,
Register src1) {
DCHECK_NE(dst0, dst1);
if (dst0 != src1) {
@@ -1781,7 +1887,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
}
}
-void TurboAssembler::Swap(Register lhs, Register rhs) {
+void MacroAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK_NE(lhs, rhs);
UseScratchRegisterScope temps(this);
@@ -1791,7 +1897,7 @@ void TurboAssembler::Swap(Register lhs, Register rhs) {
Mov(lhs, temp);
}
-void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
+void MacroAssembler::Swap(VRegister lhs, VRegister rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK_NE(lhs, rhs);
UseScratchRegisterScope temps(this);
@@ -1809,8 +1915,8 @@ void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
Mov(lhs, temp);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// All arguments must be on the stack before this function is called.
// x0 holds the return value after the call.
@@ -1823,8 +1929,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
Mov(x0, num_arguments);
Mov(x1, ExternalReference::Create(f));
- Handle<CodeT> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -1832,17 +1937,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
Mov(x1, builtin);
- Handle<CodeT> code =
- CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Br(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
@@ -1857,7 +1956,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference::Create(fid));
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1873,73 +1972,81 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_ARM64
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_of_reg_args) {
- CallCFunction(function, num_of_reg_args, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_of_reg_args, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_of_reg_args,
- int num_of_double_args) {
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args, int num_of_double_args,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, function);
- CallCFunction(temp, num_of_reg_args, num_of_double_args);
+ CallCFunction(temp, num_of_reg_args, num_of_double_args,
+ set_isolate_data_slots);
}
static const int kRegisterPassedArguments = 8;
static const int kFPRegisterPassedArguments = 8;
-void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
- int num_of_double_args) {
+void MacroAssembler::CallCFunction(Register function, int num_of_reg_args,
+ int num_of_double_args,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- Register pc_scratch = x4;
- Register addr_scratch = x5;
- Push(pc_scratch, addr_scratch);
-
- Label get_pc;
- Bind(&get_pc);
- Adr(pc_scratch, &get_pc);
-
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- Str(pc_scratch,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()));
- Str(fp,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Mov(addr_scratch,
- ExternalReference::fast_c_call_caller_pc_address(isolate()));
- Str(pc_scratch, MemOperand(addr_scratch));
- Mov(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Str(fp, MemOperand(addr_scratch));
- }
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ Register pc_scratch = x4;
+ Register addr_scratch = x5;
+ Push(pc_scratch, addr_scratch);
+
+ Label get_pc;
+ Bind(&get_pc);
+ Adr(pc_scratch, &get_pc);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ Str(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ Str(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Mov(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Str(pc_scratch, MemOperand(addr_scratch));
+ Mov(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(fp, MemOperand(addr_scratch));
+ }
- Pop(addr_scratch, pc_scratch);
+ Pop(addr_scratch, pc_scratch);
+ }
// Call directly. The function called cannot cause a GC, or allow preemption,
// so the return address in the link register stays correct.
Call(function);
- // We don't unset the PC; the FP is the source of truth.
- if (root_array_available()) {
- Str(xzr,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Push(addr_scratch, xzr);
- Mov(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Str(xzr, MemOperand(addr_scratch));
- Pop(xzr, addr_scratch);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ Str(xzr, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = x5;
+ Push(addr_scratch, xzr);
+ Mov(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(xzr, MemOperand(addr_scratch));
+ Pop(xzr, addr_scratch);
+ }
}
if (num_of_reg_args > kRegisterPassedArguments) {
@@ -1956,21 +2063,21 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
}
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
- destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
- constant_index)));
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)));
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
Ldr(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Mov(destination, kRootRegister);
@@ -1979,7 +2086,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -2008,7 +2115,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(Register target, Condition cond) {
+void MacroAssembler::Jump(Register target, Condition cond) {
if (cond == nv) return;
Label done;
if (cond != al) B(NegateCondition(cond), &done);
@@ -2016,7 +2123,7 @@ void TurboAssembler::Jump(Register target, Condition cond) {
Bind(&done);
}
-void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
+void MacroAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
Condition cond) {
if (cond == nv) return;
Label done;
@@ -2038,7 +2145,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
// * the 'target' input unmodified if this is a Wasm call, or
// * the offset of the target from the current PC, in instructions, for any
// other type of call.
-int64_t TurboAssembler::CalculateTargetOffset(Address target,
+int64_t MacroAssembler::CalculateTargetOffset(Address target,
RelocInfo::Mode rmode, byte* pc) {
int64_t offset = static_cast<int64_t>(target);
if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) {
@@ -2052,13 +2159,13 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target,
return offset;
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
JumpHelper(offset, rmode, cond);
}
-void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -2079,19 +2186,19 @@ void TurboAssembler::Jump(Handle<CodeT> code, RelocInfo::Mode rmode,
}
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
Mov(scratch, reference);
Jump(scratch);
}
-void TurboAssembler::Call(Register target) {
+void MacroAssembler::Call(Register target) {
BlockPoolsScope scope(this);
Blr(target);
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
@@ -2102,7 +2209,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
}
}
-void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
BlockPoolsScope scope(this);
@@ -2113,7 +2220,6 @@ void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode) {
return;
}
- DCHECK(FromCodeT(*code).IsExecutable());
DCHECK(RelocInfo::IsCodeTarget(rmode));
if (CanUseNearCallOrJump(rmode)) {
@@ -2125,14 +2231,14 @@ void TurboAssembler::Call(Handle<CodeT> code, RelocInfo::Mode rmode) {
}
}
-void TurboAssembler::Call(ExternalReference target) {
+void MacroAssembler::Call(ExternalReference target) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, target);
Call(temp);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
@@ -2155,25 +2261,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
}
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
Ldr(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute: {
@@ -2195,7 +2301,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
near_call(static_cast<int32_t>(index), RelocInfo::CODE_TARGET);
@@ -2211,15 +2317,15 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
// TODO(ishell): remove cond parameter from here to simplify things.
-void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
// The control flow integrity (CFI) feature allows us to "sign" code entry
// points as a target for calls, jumps or both. Arm64 has special
// instructions for this purpose, so-called "landing pads" (see
- // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and
- // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call"
+ // MacroAssembler::CallTarget(), MacroAssembler::JumpTarget() and
+ // MacroAssembler::JumpOrCallTarget()). Currently, we generate "Call"
// landing pads for CPP builtins. In order to allow tail calling to those
// builtins we have to use a workaround.
// x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump"
@@ -2248,7 +2354,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
JumpHelper(static_cast<int64_t>(index), RelocInfo::CODE_TARGET, cond);
@@ -2261,72 +2367,21 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- LoadCodeDataContainerEntry(destination, code_object);
- return;
- }
-
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
-
- Ldr(scratch.W(), FieldMemOperand(code_object, Code::kFlagsOffset));
- TestAndBranchIfAnySet(scratch.W(), Code::IsOffHeapTrampoline::kMask,
- &if_code_is_off_heap);
-
- // Not an off-heap trampoline object, the entry point is at
- // Code::raw_instruction_start().
- Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- B(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- Add(destination, kRootRegister,
- Operand(scratch, LSL, kSystemPointerSizeLog2));
- Ldr(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- }
+ Ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
+void MacroAssembler::CallCodeObject(Register code_object) {
ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
+ LoadCodeEntry(code_object, code_object);
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
-
+ LoadCodeEntry(code_object, code_object);
UseScratchRegisterScope temps(this);
if (code_object != x17) {
temps.Exclude(x17);
@@ -2335,80 +2390,14 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
Jump(x17);
}
-void TurboAssembler::LoadCodeDataContainerEntry(
- Register destination, Register code_data_container_object) {
- ASM_CODE_COMMENT(this);
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
-
- Ldr(destination, FieldMemOperand(code_data_container_object,
- CodeDataContainer::kCodeEntryPointOffset));
-}
-
-void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
- Register destination, Register code_data_container_object) {
- ASM_CODE_COMMENT(this);
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Compute the Code object pointer from the code entry point.
- Ldr(destination, FieldMemOperand(code_data_container_object,
- CodeDataContainer::kCodeEntryPointOffset));
- Sub(destination, destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
-}
-
-void TurboAssembler::CallCodeDataContainerObject(
- Register code_data_container_object) {
- ASM_CODE_COMMENT(this);
- LoadCodeDataContainerEntry(code_data_container_object,
- code_data_container_object);
- Call(code_data_container_object);
-}
-
-void TurboAssembler::JumpCodeDataContainerObject(
- Register code_data_container_object, JumpMode jump_mode) {
- ASM_CODE_COMMENT(this);
- DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeDataContainerEntry(code_data_container_object,
- code_data_container_object);
- UseScratchRegisterScope temps(this);
- if (code_data_container_object != x17) {
- temps.Exclude(x17);
- Mov(x17, code_data_container_object);
- }
- Jump(x17);
-}
-
-void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
- ASM_CODE_COMMENT(this);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- LoadCodeDataContainerEntry(destination, code);
- } else {
- Add(destination, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-void TurboAssembler::CallCodeTObject(Register code) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CallCodeDataContainerObject(code);
- } else {
- CallCodeObject(code);
- }
-}
-
-void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- JumpCodeDataContainerObject(code, jump_mode);
- } else {
- JumpCodeObject(code, jump_mode);
- }
-}
-
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
UseScratchRegisterScope temps(this);
temps.Exclude(x16, x17);
@@ -2435,7 +2424,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Bind(&return_location);
}
-void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
+void MacroAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -2443,11 +2432,32 @@ void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
Blr(temp);
}
-bool TurboAssembler::IsNearCallOffset(int64_t offset) {
+bool MacroAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {Code} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void MacroAssembler::BailoutIfDeoptimized() {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ LoadTaggedField(scratch,
+ MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ Ldr(scratch.W(), FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
+ Label not_deoptimized;
+ Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, &not_deoptimized);
+ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET);
+ Bind(&not_deoptimized);
+}
+
+void MacroAssembler::CallForDeoptimization(
Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label) {
ASM_CODE_COMMENT(this);
@@ -2466,10 +2476,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
Ldr(destination, MemOperand(kRootRegister, offset));
}
@@ -2650,14 +2660,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(code,
- FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
- CallCodeTObject(code);
+ CallCodeObject(code);
break;
case InvokeType::kJump:
- JumpCodeTObject(code);
+ JumpCodeObject(code);
break;
}
B(&done);
@@ -2673,22 +2682,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Bind(&done);
}
-void MacroAssembler::JumpIfCodeTIsMarkedForDeoptimization(
- Register codet, Register scratch, Label* if_marked_for_deoptimization) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- Ldr(scratch.W(),
- FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
- Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
- if_marked_for_deoptimization);
-
- } else {
- LoadTaggedPointerField(
- scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- Ldr(scratch.W(),
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
- Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
- if_marked_for_deoptimization);
- }
+void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization(
+ Register code, Register scratch, Label* if_marked_for_deoptimization) {
+ Ldr(scratch.W(), FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
+ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
+ if_marked_for_deoptimization);
}
Operand MacroAssembler::ClearedValue() const {
@@ -2713,12 +2711,11 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_parameter_count = x2;
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
// (kDontAdaptArgumentsSentinel), so we need sign
// extension to correctly handle it.
- LoadTaggedPointerField(
+ LoadTaggedField(
expected_parameter_count,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
Ldrh(expected_parameter_count,
@@ -2742,14 +2739,13 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, x1);
// Set up the context.
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(function, no_reg, expected_parameter_count,
actual_parameter_count, type);
}
-void TurboAssembler::TryConvertDoubleToInt64(Register result,
+void MacroAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
ASM_CODE_COMMENT(this);
@@ -2774,7 +2770,7 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
B(vc, done);
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode,
@@ -2793,9 +2789,9 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
// If we fell through then inline version didn't succeed - call stub instead.
if (lr_status == kLRHasNotBeenSaved) {
- Push<TurboAssembler::kSignLR>(lr, double_input);
+ Push<MacroAssembler::kSignLR>(lr, double_input);
} else {
- Push<TurboAssembler::kDontStoreLR>(xzr, double_input);
+ Push<MacroAssembler::kDontStoreLR>(xzr, double_input);
}
// DoubleToI preserves any registers it needs to clobber.
@@ -2815,7 +2811,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
if (lr_status == kLRHasNotBeenSaved) {
// Pop into xzr here to drop the double input on the stack:
- Pop<TurboAssembler::kAuthLR>(xzr, lr);
+ Pop<MacroAssembler::kAuthLR>(xzr, lr);
} else {
Drop(2);
}
@@ -2825,21 +2821,21 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Uxtw(result.W(), result.W());
}
-void TurboAssembler::Prologue() {
+void MacroAssembler::Prologue() {
ASM_CODE_COMMENT(this);
- Push<TurboAssembler::kSignLR>(lr, fp);
+ Push<MacroAssembler::kSignLR>(lr, fp);
mov(fp, sp);
static_assert(kExtraSlotClaimedByPrologue == 1);
Push(cp, kJSFunctionRegister, kJavaScriptCallArgCountRegister, padreg);
}
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
if (StackFrame::IsJavaScript(type)) {
// Just push a minimal "machine frame", saving the frame pointer and return
// address, without any markers.
- Push<TurboAssembler::kSignLR>(lr, fp);
+ Push<MacroAssembler::kSignLR>(lr, fp);
Mov(fp, sp);
// sp[1] : lr
// sp[0] : fp
@@ -2858,7 +2854,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
} else {
fourth_reg = padreg;
}
- Push<TurboAssembler::kSignLR>(lr, fp, type_reg, fourth_reg);
+ Push<MacroAssembler::kSignLR>(lr, fp, type_reg, fourth_reg);
static constexpr int kSPToFPDelta = 2 * kSystemPointerSize;
Add(fp, sp, kSPToFPDelta);
// sp[3] : lr
@@ -2868,45 +2864,22 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(sp, fp);
- Pop<TurboAssembler::kAuthLR>(fp, lr);
+ Pop<MacroAssembler::kAuthLR>(fp, lr);
}
-void MacroAssembler::ExitFramePreserveFPRegs() {
- ASM_CODE_COMMENT(this);
- DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
- PushCPURegList(kCallerSavedV);
-}
-
-void MacroAssembler::ExitFrameRestoreFPRegs() {
- // Read the registers from the stack without popping them. The stack pointer
- // will be reset as part of the unwinding process.
- ASM_CODE_COMMENT(this);
- CPURegList saved_fp_regs = kCallerSavedV;
- DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
-
- int offset = ExitFrameConstants::kLastExitFrameField;
- while (!saved_fp_regs.IsEmpty()) {
- const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
- const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
- offset -= 2 * kDRegSize;
- Ldp(dst1, dst0, MemOperand(fp, offset));
- }
-}
-
-void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
- int extra_space,
+void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the new stack frame.
- Push<TurboAssembler::kSignLR>(lr, fp);
+ Push<MacroAssembler::kSignLR>(lr, fp);
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
@@ -2932,9 +2905,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
static_assert((-2 * kSystemPointerSize) ==
ExitFrameConstants::kLastExitFrameField);
- if (save_doubles) {
- ExitFramePreserveFPRegs();
- }
// Round the number of space we need to claim to a multiple of two.
int slots_to_claim = RoundUp(extra_space + 1, 2);
@@ -2947,7 +2917,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
- // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
@@ -2960,13 +2929,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
}
// Leave the current exit frame.
-void MacroAssembler::LeaveExitFrame(bool restore_doubles,
- const Register& scratch,
+void MacroAssembler::LeaveExitFrame(const Register& scratch,
const Register& scratch2) {
ASM_CODE_COMMENT(this);
- if (restore_doubles) {
- ExitFrameRestoreFPRegs();
- }
// Restore the context pointer from the top frame.
Mov(scratch,
@@ -2990,7 +2955,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
Mov(sp, fp);
- Pop<TurboAssembler::kAuthLR>(fp, lr);
+ Pop<MacroAssembler::kAuthLR>(fp, lr);
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
@@ -3031,6 +2996,66 @@ void MacroAssembler::JumpIfObjectType(Register object, Register map,
B(cond, if_cond_pass);
}
+void MacroAssembler::JumpIfJSAnyIsNotPrimitive(Register heap_object,
+ Register scratch, Label* target,
+ Label::Distance distance,
+ Condition cc) {
+ CHECK(cc == Condition::kUnsignedLessThan ||
+ cc == Condition::kUnsignedGreaterThanEqual);
+ if (V8_STATIC_ROOTS_BOOL) {
+#ifdef DEBUG
+ Label ok;
+ LoadMap(scratch, heap_object);
+ CompareInstanceTypeRange(scratch, scratch, FIRST_JS_RECEIVER_TYPE,
+ LAST_JS_RECEIVER_TYPE);
+ B(Condition::kUnsignedLessThanEqual, &ok);
+ LoadMap(scratch, heap_object);
+ CompareInstanceTypeRange(scratch, scratch, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
+ LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
+ B(Condition::kUnsignedLessThanEqual, &ok);
+ Abort(AbortReason::kInvalidReceiver);
+ bind(&ok);
+#endif // DEBUG
+
+ // All primitive object's maps are allocated at the start of the read only
+ // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
+ // addresses.
+ LoadCompressedMap(scratch, heap_object);
+ CmpTagged(scratch, Immediate(InstanceTypeChecker::kNonJsReceiverMapLimit));
+ } else {
+ static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ CompareObjectType(heap_object, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
+ }
+ B(cc, target);
+}
+
+// Sets equality condition flags.
+void MacroAssembler::IsObjectType(Register object, Register scratch1,
+ Register scratch2, InstanceType type) {
+ ASM_CODE_COMMENT(this);
+
+ if (V8_STATIC_ROOTS_BOOL) {
+ if (base::Optional<RootIndex> expected =
+ InstanceTypeChecker::UniqueMapOfInstanceType(type)) {
+ UseScratchRegisterScope temps(this);
+ Tagged_t ptr = ReadOnlyRootPtr(*expected);
+ if (IsImmAddSub(ptr) || scratch1 != scratch2 || temps.CanAcquire()) {
+ LoadCompressedMap(scratch1, object);
+ if (!IsImmAddSub(ptr) && scratch1 != scratch2) {
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(scratch2, ptr, kAnyShift);
+ CmpTagged(scratch1, imm_operand);
+ } else {
+ CmpTagged(scratch1, Immediate(ptr));
+ }
+ return;
+ }
+ }
+ }
+
+ CompareObjectType(object, scratch1, scratch2, type);
+}
+
// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
@@ -3039,9 +3064,14 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
CompareInstanceType(map, type_reg, type);
}
-void TurboAssembler::LoadMap(Register dst, Register object) {
+void MacroAssembler::LoadCompressedMap(Register dst, Register object) {
+ ASM_CODE_COMMENT(this);
+ Ldr(dst.W(), FieldMemOperand(object, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::LoadMap(Register dst, Register object) {
ASM_CODE_COMMENT(this);
- LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
}
// Sets condition flags based on comparison, and returns type in type_reg.
@@ -3076,6 +3106,10 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
+ if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
+ CmpTagged(obj, Immediate(ReadOnlyRootPtr(index)));
+ return;
+ }
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
@@ -3111,25 +3145,25 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
}
}
-void TurboAssembler::LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand) {
+void MacroAssembler::LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
Ldr(destination, field_operand);
}
}
-void TurboAssembler::LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand) {
+void MacroAssembler::LoadTaggedFieldWithoutDecompressing(
+ const Register& destination, const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
+ Ldr(destination.W(), field_operand);
} else {
Ldr(destination, field_operand);
}
}
-void TurboAssembler::LoadTaggedSignedField(const Register& destination,
+void MacroAssembler::LoadTaggedSignedField(const Register& destination,
const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedSigned(destination, field_operand);
@@ -3138,11 +3172,20 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination,
}
}
-void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
SmiUntag(dst, src);
}
-void TurboAssembler::StoreTaggedField(const Register& value,
+void MacroAssembler::StoreTwoTaggedFields(const Register& value,
+ const MemOperand& dst_field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Stp(value.W(), value.W(), dst_field_operand);
+ } else {
+ Stp(value, value, dst_field_operand);
+ }
+}
+
+void MacroAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) {
if (COMPRESS_POINTERS_BOOL) {
Str(value.W(), dst_field_operand);
@@ -3151,7 +3194,7 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
-void TurboAssembler::AtomicStoreTaggedField(const Register& value,
+void MacroAssembler::AtomicStoreTaggedField(const Register& value,
const Register& dst_base,
const Register& dst_index,
const Register& temp) {
@@ -3163,7 +3206,7 @@ void TurboAssembler::AtomicStoreTaggedField(const Register& value,
}
}
-void TurboAssembler::DecompressTaggedSigned(const Register& destination,
+void MacroAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Ldr(destination.W(), field_operand);
@@ -3174,27 +3217,36 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination,
}
}
-void TurboAssembler::DecompressTaggedPointer(const Register& destination,
- const MemOperand& field_operand) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Ldr(destination.W(), field_operand);
Add(destination, kPtrComprCageBaseRegister, destination);
}
-void TurboAssembler::DecompressTaggedPointer(const Register& destination,
- const Register& source) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ const Register& source) {
ASM_CODE_COMMENT(this);
Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
}
-void TurboAssembler::DecompressAnyTagged(const Register& destination,
- const MemOperand& field_operand) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ Tagged_t immediate) {
ASM_CODE_COMMENT(this);
- Ldr(destination.W(), field_operand);
- Add(destination, kPtrComprCageBaseRegister, destination);
+ if (IsImmAddSub(immediate)) {
+ Add(destination, kPtrComprCageBaseRegister,
+ Immediate(immediate, RelocInfo::Mode::NO_INFO));
+ } else {
+ // Immediate is larger than 12 bit and therefore can't be encoded directly.
+ // Use destination as a temporary to not acquire a scratch register.
+ DCHECK_NE(destination, sp);
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(destination, immediate, kAnyShift);
+ Add(destination, kPtrComprCageBaseRegister, imm_operand);
+ }
}
-void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
+void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination,
const Register& base,
const Register& index,
const Register& temp) {
@@ -3208,37 +3260,27 @@ void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
}
}
-void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination,
- const Register& base,
- const Register& index,
- const Register& temp) {
- ASM_CODE_COMMENT(this);
- Add(temp, base, index);
- Ldar(destination.W(), temp);
- Add(destination, kPtrComprCageBaseRegister, destination);
-}
-
-void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination,
- const Register& base,
- const Register& index,
- const Register& temp) {
+void MacroAssembler::AtomicDecompressTagged(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
ASM_CODE_COMMENT(this);
Add(temp, base, index);
Ldar(destination.W(), temp);
Add(destination, kPtrComprCageBaseRegister, destination);
}
-void TurboAssembler::CheckPageFlag(const Register& object, int mask,
+void MacroAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
- if (cc == eq) {
+ if (cc == ne) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
} else {
- DCHECK_EQ(cc, ne);
+ DCHECK_EQ(cc, eq);
TestAndBranchIfAllClear(scratch, mask, condition_met);
}
}
@@ -3282,7 +3324,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Bind(&done);
}
-void TurboAssembler::DecodeSandboxedPointer(const Register& value) {
+void MacroAssembler::DecodeSandboxedPointer(const Register& value) {
ASM_CODE_COMMENT(this);
#ifdef V8_ENABLE_SANDBOX
Add(value, kPtrComprCageBaseRegister,
@@ -3292,7 +3334,7 @@ void TurboAssembler::DecodeSandboxedPointer(const Register& value) {
#endif
}
-void TurboAssembler::LoadSandboxedPointerField(
+void MacroAssembler::LoadSandboxedPointerField(
const Register& destination, const MemOperand& field_operand) {
#ifdef V8_ENABLE_SANDBOX
ASM_CODE_COMMENT(this);
@@ -3303,7 +3345,7 @@ void TurboAssembler::LoadSandboxedPointerField(
#endif
}
-void TurboAssembler::StoreSandboxedPointerField(
+void MacroAssembler::StoreSandboxedPointerField(
const Register& value, const MemOperand& dst_field_operand) {
#ifdef V8_ENABLE_SANDBOX
ASM_CODE_COMMENT(this);
@@ -3317,22 +3359,21 @@ void TurboAssembler::StoreSandboxedPointerField(
#endif
}
-void TurboAssembler::LoadExternalPointerField(Register destination,
+void MacroAssembler::LoadExternalPointerField(Register destination,
MemOperand field_operand,
ExternalPointerTag tag,
Register isolate_root) {
DCHECK(!AreAliased(destination, isolate_root));
ASM_CODE_COMMENT(this);
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- DCHECK_NE(kExternalPointerNullTag, tag);
- DCHECK(!IsSharedExternalPointerType(tag));
- UseScratchRegisterScope temps(this);
- Register external_table = temps.AcquireX();
- if (isolate_root == no_reg) {
- DCHECK(root_array_available_);
- isolate_root = kRootRegister;
- }
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ DCHECK(!IsSharedExternalPointerType(tag));
+ UseScratchRegisterScope temps(this);
+ Register external_table = temps.AcquireX();
+ if (isolate_root == no_reg) {
+ DCHECK(root_array_available_);
+ isolate_root = kRootRegister;
+ }
Ldr(external_table,
MemOperand(isolate_root,
IsolateData::external_pointer_table_offset() +
@@ -3345,72 +3386,72 @@ void TurboAssembler::LoadExternalPointerField(Register destination,
Mov(destination, Operand(destination, LSR, shift_amount));
Ldr(destination, MemOperand(external_table, destination));
And(destination, destination, Immediate(~tag));
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+#else
Ldr(destination, field_operand);
+#endif // V8_ENABLE_SANDBOX
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
- if (registers.is_empty()) return;
- ASM_CODE_COMMENT(this);
- CPURegList regs(kXRegSizeInBits, registers);
- // If we were saving LR, we might need to sign it.
- DCHECK(!regs.IncludesAliasOf(lr));
- regs.Align();
- PushCPURegList(regs);
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers.is_empty()) return;
+ ASM_CODE_COMMENT(this);
+ CPURegList regs(kXRegSizeInBits, registers);
+ // If we were saving LR, we might need to sign it.
+ DCHECK(!regs.IncludesAliasOf(lr));
+ regs.Align();
+ PushCPURegList(regs);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
- if (registers.is_empty()) return;
- ASM_CODE_COMMENT(this);
- CPURegList regs(kXRegSizeInBits, registers);
- // If we were saving LR, we might need to sign it.
- DCHECK(!regs.IncludesAliasOf(lr));
- regs.Align();
- PopCPURegList(regs);
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers.is_empty()) return;
+ ASM_CODE_COMMENT(this);
+ CPURegList regs(kXRegSizeInBits, registers);
+ // If we were saving LR, we might need to sign it.
+ DCHECK(!regs.IncludesAliasOf(lr));
+ regs.Align();
+ PopCPURegList(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
- ASM_CODE_COMMENT(this);
- RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
- MaybeSaveRegisters(registers);
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
- MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(),
- WriteBarrierDescriptor::SlotAddressRegister(), object,
- offset);
+ MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(),
+ WriteBarrierDescriptor::SlotAddressRegister(), object,
+ offset);
- Call(isolate()->builtins()->code_handle(
- Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
- RelocInfo::CODE_TARGET);
- MaybeRestoreRegisters(registers);
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
+ RelocInfo::CODE_TARGET);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Operand offset,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
- ASM_CODE_COMMENT(this);
- RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
- MaybeSaveRegisters(registers);
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
- Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
- Register slot_address_parameter =
- WriteBarrierDescriptor::SlotAddressRegister();
- MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
- CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
+ CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode,
+ mode);
- MaybeRestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
- ASM_CODE_COMMENT(this);
- DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
- DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+ ASM_CODE_COMMENT(this);
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
#if V8_ENABLE_WEBASSEMBLY
if (mode == StubCallMode::kCallWasmRuntimeStub) {
auto wasm_target = wasm::WasmCode::GetRecordWriteStub(fp_mode);
@@ -3424,7 +3465,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
}
}
-void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
Register object, Operand offset) {
ASM_CODE_COMMENT(this);
DCHECK_NE(dst_object, dst_slot);
@@ -3475,7 +3516,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Register temp = temps.AcquireX();
DCHECK(!AreAliased(object, value, temp));
Add(temp, object, offset);
- LoadTaggedPointerField(temp, MemOperand(temp));
+ LoadTaggedField(temp, MemOperand(temp));
Cmp(temp, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -3492,16 +3533,15 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- ne, &done);
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
- CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
&done);
// Record the actual write.
if (lr_status == kLRHasNotBeenSaved) {
- Push<TurboAssembler::kSignLR>(padreg, lr);
+ Push<MacroAssembler::kSignLR>(padreg, lr);
}
Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
DCHECK(!AreAliased(object, slot_address, value));
@@ -3510,14 +3550,14 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Add(slot_address, object, offset);
CallRecordWriteStub(object, slot_address, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
- Pop<TurboAssembler::kAuthLR>(lr, padreg);
+ Pop<MacroAssembler::kAuthLR>(lr, padreg);
}
if (v8_flags.debug_code) Mov(slot_address, Operand(kZapValue));
Bind(&done);
}
-void TurboAssembler::Check(Condition cond, AbortReason reason) {
+void MacroAssembler::Check(Condition cond, AbortReason reason) {
Label ok;
B(cond, &ok);
Abort(reason);
@@ -3525,10 +3565,10 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
Bind(&ok);
}
-void TurboAssembler::Trap() { Brk(0); }
-void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
+void MacroAssembler::Trap() { Brk(0); }
+void MacroAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
ASM_CODE_COMMENT(this);
if (v8_flags.code_comments) {
RecordComment("Abort message: ");
@@ -3582,15 +3622,46 @@ void TurboAssembler::Abort(AbortReason reason) {
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
+}
+
+void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
+ Register feedback_vector,
+ FeedbackSlot slot,
+ Label* on_result,
+ Label::Distance) {
+ Label fallthrough, clear_slot;
+ LoadTaggedField(
+ scratch_and_result,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt())));
+ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
+
+ // Is it marked_for_deoptimization? If yes, clear the slot.
+ {
+ UseScratchRegisterScope temps(this);
+ JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temps.AcquireX(),
+ &clear_slot);
+ B(on_result);
+ }
+
+ bind(&clear_slot);
+ Mov(scratch_and_result, ClearedValue());
+ StoreTaggedField(
+ scratch_and_result,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt())));
+
+ bind(&fallthrough);
+ Mov(scratch_and_result, 0);
}
// This is the main Printf implementation. All other Printf variants call
// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
-void TurboAssembler::PrintfNoPreserve(const char* format,
+void MacroAssembler::PrintfNoPreserve(const char* format,
const CPURegister& arg0,
const CPURegister& arg1,
const CPURegister& arg2,
@@ -3624,7 +3695,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
fp_tmp_list.Remove(kPCSVarargsFP);
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
- // Override the TurboAssembler's scratch register list. The lists will be
+ // Override the MacroAssembler's scratch register list. The lists will be
// reset automatically at the end of the UseScratchRegisterScope.
UseScratchRegisterScope temps(this);
TmpList()->set_bits(tmp_list.bits());
@@ -3740,7 +3811,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
CallPrintf(arg_count, pcs);
}
-void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
+void MacroAssembler::CallPrintf(int arg_count, const CPURegister* args) {
ASM_CODE_COMMENT(this);
// A call to printf needs special handling for the simulator, since the system
// printf function will use a different instruction set and the procedure-call
@@ -3770,7 +3841,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
Call(ExternalReference::printf_function());
}
-void TurboAssembler::Printf(const char* format, CPURegister arg0,
+void MacroAssembler::Printf(const char* format, CPURegister arg0,
CPURegister arg1, CPURegister arg2,
CPURegister arg3) {
ASM_CODE_COMMENT(this);
@@ -3869,12 +3940,12 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
return result;
}
-void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
+void MacroAssembler::ComputeCodeStartAddress(const Register& rd) {
// We can use adr to load a pc relative location.
adr(rd, -pc_offset());
}
-void TurboAssembler::RestoreFPAndLR() {
+void MacroAssembler::RestoreFPAndLR() {
static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
StandardFrameConstants::kCallerPCOffset,
"Offsets must be consecutive for ldp!");
@@ -3893,7 +3964,7 @@ void TurboAssembler::RestoreFPAndLR() {
}
#if V8_ENABLE_WEBASSEMBLY
-void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
+void MacroAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
UseScratchRegisterScope temps(this);
temps.Exclude(x16, x17);
Adr(x17, return_location);
@@ -3905,7 +3976,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-void TurboAssembler::PopcntHelper(Register dst, Register src) {
+void MacroAssembler::PopcntHelper(Register dst, Register src) {
UseScratchRegisterScope temps(this);
VRegister scratch = temps.AcquireV(kFormat8B);
VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D();
@@ -3915,7 +3986,7 @@ void TurboAssembler::PopcntHelper(Register dst, Register src) {
Fmov(dst, tmp);
}
-void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
+void MacroAssembler::I64x2BitMask(Register dst, VRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
VRegister tmp1 = scope.AcquireV(kFormat2D);
@@ -3926,7 +3997,7 @@ void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
}
-void TurboAssembler::I64x2AllTrue(Register dst, VRegister src) {
+void MacroAssembler::I64x2AllTrue(Register dst, VRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
VRegister tmp = scope.AcquireV(kFormat2D);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 99121e3f4b..887d3b0e01 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -65,6 +65,56 @@ namespace internal {
V(Stlxrh, stlxrh) \
V(Stlxr, stlxr)
+#define CAS_SINGLE_MACRO_LIST(V) \
+ V(Cas, cas) \
+ V(Casa, casa) \
+ V(Casl, casl) \
+ V(Casal, casal) \
+ V(Casb, casb) \
+ V(Casab, casab) \
+ V(Caslb, caslb) \
+ V(Casalb, casalb) \
+ V(Cash, cash) \
+ V(Casah, casah) \
+ V(Caslh, caslh) \
+ V(Casalh, casalh)
+
+#define CAS_PAIR_MACRO_LIST(V) \
+ V(Casp, casp) \
+ V(Caspa, caspa) \
+ V(Caspl, caspl) \
+ V(Caspal, caspal)
+
+// These macros generate all the variations of the atomic memory operations,
+// e.g. ldadd, ldadda, ldaddb, staddl, etc.
+
+#define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \
+ V(DEF, MASM_PRE##add, ASM_PRE##add) \
+ V(DEF, MASM_PRE##clr, ASM_PRE##clr) \
+ V(DEF, MASM_PRE##eor, ASM_PRE##eor) \
+ V(DEF, MASM_PRE##set, ASM_PRE##set) \
+ V(DEF, MASM_PRE##smax, ASM_PRE##smax) \
+ V(DEF, MASM_PRE##smin, ASM_PRE##smin) \
+ V(DEF, MASM_PRE##umax, ASM_PRE##umax) \
+ V(DEF, MASM_PRE##umin, ASM_PRE##umin)
+
+#define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
+ V(MASM, ASM) \
+ V(MASM##l, ASM##l) \
+ V(MASM##b, ASM##b) \
+ V(MASM##lb, ASM##lb) \
+ V(MASM##h, ASM##h) \
+ V(MASM##lh, ASM##lh)
+
+#define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \
+ ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
+ V(MASM##a, ASM##a) \
+ V(MASM##al, ASM##al) \
+ V(MASM##ab, ASM##ab) \
+ V(MASM##alb, ASM##alb) \
+ V(MASM##ah, ASM##ah) \
+ V(MASM##alh, ASM##alh)
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -146,9 +196,9 @@ enum PreShiftImmMode {
// platforms are updated.
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
#if DEBUG
void set_allow_macro_instructions(bool value) {
@@ -407,6 +457,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(saddl, Saddl) \
V(saddw2, Saddw2) \
V(saddw, Saddw) \
+ V(sdot, Sdot) \
V(shadd, Shadd) \
V(shsub, Shsub) \
V(smaxp, Smaxp) \
@@ -561,22 +612,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void SmiTag(Register smi);
inline void SmiToInt32(Register smi);
+ inline void SmiToInt32(Register dst, Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
void AssertSmi(Register object,
AbortReason reason = AbortReason::kOperandIsNotASmi)
- NOOP_UNLESS_DEBUG_CODE
+ NOOP_UNLESS_DEBUG_CODE;
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object,
+ AbortReason reason = AbortReason::kOperandIsASmi)
+ NOOP_UNLESS_DEBUG_CODE;
+
+ // Abort execution if a 64 bit register containing a 32 bit payload does
+ // not have zeros in the top 32 bits, enabled via --debug-code.
+ void AssertZeroExtended(Register int32_register) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cond, AbortReason reason);
+ // Functions performing a check on a known or potential smi. Returns
+ // a condition that is satisfied if the check is successful.
+ Condition CheckSmi(Register src);
+
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
void Trap();
@@ -642,6 +707,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define a call (BLR) target. The target also allows tail calls (via BR)
// when the target is x16 or x17.
inline void CallTarget();
+ // Define a jump/call target and bind a label.
+ inline void BindCallTarget(Label* label);
// Define a jump/call target.
inline void JumpOrCallTarget();
// Define a jump/call target and bind a label.
@@ -695,7 +762,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const VRegister& fm, Condition cond);
// Emits a runtime assert that the stack pointer is aligned.
- void AssertSpAligned() NOOP_UNLESS_DEBUG_CODE
+ void AssertSpAligned() NOOP_UNLESS_DEBUG_CODE;
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
@@ -747,7 +814,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
- inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
+ inline void Claim(const Register& count, uint64_t unit_size = kXRegSize,
+ bool assume_sp_aligned = true);
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
@@ -779,13 +847,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Abort execution if argument is not a positive or zero integer, enabled via
// --debug-code.
- void AssertPositiveOrZero(Register value) NOOP_UNLESS_DEBUG_CODE
+ void AssertPositiveOrZero(Register value) NOOP_UNLESS_DEBUG_CODE;
#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
inline void FN(const REGTYPE REG, const MemOperand& addr);
- LS_MACRO_LIST(DECLARE_FUNCTION)
+ LS_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
+ // Caution: if {value} is a 32-bit negative int, it should be sign-extended
+ // to 64-bit before calling this function.
+ void Switch(Register scratch, Register value, int case_value_base,
+ Label** labels, int num_labels);
+
// Push or pop up to 4 registers of the same width to or from the stack.
//
// If an argument register is 'NoReg', all further arguments are also assumed
@@ -845,6 +918,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register object, Register slot_address, SaveFPRegsMode fp_mode,
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ inline void MoveHeapNumber(Register dst, double value);
+
// For a given |object| and |offset|:
// - Move |object| to |dst_object|.
// - Compute the address of the slot pointed to by |offset| in |object| and
@@ -917,6 +992,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
void LoadMap(Register dst, Register object);
+ void LoadCompressedMap(Register dst, Register object);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
@@ -953,12 +1029,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<CodeT> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Jump(const ExternalReference& reference);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode);
- void Call(Handle<CodeT> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void Call(ExternalReference target);
// Generate an indirect call (for when a direct call's range is not adequate).
@@ -973,36 +1049,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin, Condition cond = al);
- void LoadCodeObjectEntry(Register destination, Register code_object);
+ // Load code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
- // Load code entry point from the CodeDataContainer object.
- void LoadCodeDataContainerEntry(Register destination,
- Register code_data_container_object);
- // Load code entry point from the CodeDataContainer object and compute
- // Code object pointer out of it. Must not be used for CodeDataContainers
- // corresponding to builtins, because their entry points values point to
- // the embedded instruction stream in .text section.
- void LoadCodeDataContainerCodeNonBuiltin(Register destination,
- Register code_data_container_object);
- void CallCodeDataContainerObject(Register code_data_container_object);
- void JumpCodeDataContainerObject(Register code_data_container_object,
- JumpMode jump_mode = JumpMode::kJump);
-
- // Helper functions that dispatch either to Call/JumpCodeObject or to
- // Call/JumpCodeDataContainerObject.
- // TODO(v8:11880): remove since CodeT targets are now default.
- void LoadCodeTEntry(Register destination, Register code);
- void CallCodeTObject(Register code);
- void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
-
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
+ void BailoutIfDeoptimized();
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -1012,11 +1070,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_reg_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
@@ -1155,7 +1222,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
ucvtf(vd, vn, fbits);
}
- void AssertFPCRState(Register fpcr = NoReg) NOOP_UNLESS_DEBUG_CODE
+ void AssertFPCRState(Register fpcr = NoReg) NOOP_UNLESS_DEBUG_CODE;
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
@@ -1313,8 +1380,39 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
+#define DECLARE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const Register& rt, const MemOperand& src);
+ CAS_SINGLE_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define DECLARE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const Register& rs2, const Register& rt, \
+ const Register& rt2, const MemOperand& src);
+ CAS_PAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define DECLARE_LOAD_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const Register& rt, const MemOperand& src);
+#define DECLARE_STORE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const MemOperand& src);
+
+ ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES,
+ DECLARE_LOAD_FUNCTION, Ld, ld)
+ ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES,
+ DECLARE_STORE_FUNCTION, St, st)
+
+#define DECLARE_SWP_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const Register& rt, const MemOperand& src);
+
+ ATOMIC_MEMORY_LOAD_MACRO_MODES(DECLARE_SWP_FUNCTION, Swp, swp)
+
+#undef DECLARE_LOAD_FUNCTION
+#undef DECLARE_STORE_FUNCTION
+#undef DECLARE_SWP_FUNCTION
+
// Load an object from the root table.
void LoadRoot(Register destination, RootIndex index) final;
+ void LoadTaggedRoot(Register destination, RootIndex index);
void PushRoot(RootIndex index);
inline void Ret(const Register& xn = lr);
@@ -1389,14 +1487,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression Support
- // Loads a field containing a HeapObject and decompresses it if pointer
- // compression is enabled.
- void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand);
-
// Loads a field containing any tagged value and decompresses it if necessary.
- void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand);
+ void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand);
+
+ // Loads a field containing any tagged value but never decompresses it.
+ void LoadTaggedFieldWithoutDecompressing(const Register& destination,
+ const MemOperand& field_operand);
// Loads a field containing a tagged signed value and decompresses it if
// necessary.
@@ -1409,29 +1506,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand);
+ void StoreTwoTaggedFields(const Register& value,
+ const MemOperand& dst_field_operand);
+
+ // For compatibility with platform-independent code.
+ void StoreTaggedField(const MemOperand& dst_field_operand,
+ const Register& value) {
+ StoreTaggedField(value, dst_field_operand);
+ }
void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
const Register& dst_index, const Register& temp);
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
- void DecompressTaggedPointer(const Register& destination,
- const MemOperand& field_operand);
- void DecompressTaggedPointer(const Register& destination,
- const Register& source);
- void DecompressAnyTagged(const Register& destination,
- const MemOperand& field_operand);
+ void DecompressTagged(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressTagged(const Register& destination, const Register& source);
+ void DecompressTagged(const Register& destination, Tagged_t immediate);
void AtomicDecompressTaggedSigned(const Register& destination,
const Register& base, const Register& index,
const Register& temp);
- void AtomicDecompressTaggedPointer(const Register& destination,
- const Register& base,
- const Register& index,
- const Register& temp);
- void AtomicDecompressAnyTagged(const Register& destination,
- const Register& base, const Register& index,
- const Register& temp);
+ void AtomicDecompressTagged(const Register& destination, const Register& base,
+ const Register& index, const Register& temp);
// Restore FP and LR from the values stored in the current frame. This will
// authenticate the LR when pointer authentication is enabled.
@@ -1466,81 +1564,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
ExternalPointerTag tag,
Register isolate_root = Register::no_reg());
- protected:
- // The actual Push and Pop implementations. These don't generate any code
- // other than that required for the push or pop. This allows
- // (Push|Pop)CPURegList to bundle together run-time assertions for a large
- // block of registers.
- //
- // Note that size is per register, and is specified in bytes.
- void PushHelper(int count, int size, const CPURegister& src0,
- const CPURegister& src1, const CPURegister& src2,
- const CPURegister& src3);
- void PopHelper(int count, int size, const CPURegister& dst0,
- const CPURegister& dst1, const CPURegister& dst2,
- const CPURegister& dst3);
-
- void ConditionalCompareMacro(const Register& rn, const Operand& operand,
- StatusFlags nzcv, Condition cond,
- ConditionalCompareOp op);
-
- void AddSubWithCarryMacro(const Register& rd, const Register& rn,
- const Operand& operand, FlagsUpdate S,
- AddSubWithCarryOp op);
-
- // Call Printf. On a native build, a simple call will be generated, but if the
- // simulator is being used then a suitable pseudo-instruction is used. The
- // arguments and stack must be prepared by the caller as for a normal AAPCS64
- // call to 'printf'.
- //
- // The 'args' argument should point to an array of variable arguments in their
- // proper PCS registers (and in calling order). The argument registers can
- // have mixed types. The format string (x0) should not be included.
- void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
-
- private:
-#if DEBUG
- // Tell whether any of the macro instruction can be used. When false the
- // MacroAssembler will assert if a method which can emit a variable number
- // of instructions is called.
- bool allow_macro_instructions_ = true;
-#endif
-
- // Scratch registers available for use by the MacroAssembler.
- CPURegList tmp_list_ = DefaultTmpList();
- CPURegList fptmp_list_ = DefaultFPTmpList();
-
- // Helps resolve branching to labels potentially out of range.
- // If the label is not bound, it registers the information necessary to later
- // be able to emit a veneer for this branch if necessary.
- // If the label is bound, it returns true if the label (or the previous link
- // in the label chain) is out of range. In that case the caller is responsible
- // for generating appropriate code.
- // Otherwise it returns false.
- // This function also checks wether veneers need to be emitted.
- bool NeedExtraInstructionsOrRegisterBranch(Label* label,
- ImmBranchType branch_type);
-
- void Movi16bitHelper(const VRegister& vd, uint64_t imm);
- void Movi32bitHelper(const VRegister& vd, uint64_t imm);
- void Movi64bitHelper(const VRegister& vd, uint64_t imm);
-
- void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
- LoadStoreOp op);
-
- void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& addr, LoadStorePairOp op);
-
- int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
- byte* pc);
-
- void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
-};
-
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
// Instruction set functions ------------------------------------------------
// Logical macros.
inline void Bics(const Register& rd, const Register& rn,
@@ -1576,18 +1599,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Condition cond);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
- void Fcvtl(const VRegister& vd, const VRegister& vn) {
- DCHECK(allow_macro_instructions());
- fcvtl(vd, vn);
- }
void Fcvtl2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtl2(vd, vn);
}
- void Fcvtn(const VRegister& vd, const VRegister& vn) {
- DCHECK(allow_macro_instructions());
- fcvtn(vd, vn);
- }
void Fcvtn2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtn2(vd, vn);
@@ -1623,7 +1638,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DCHECK(allow_macro_instructions());
mvni(vd, imm8, shift, shift_amount);
}
- inline void Rev(const Register& rd, const Register& rn);
inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
@@ -1796,6 +1810,28 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
PopSizeRegList(regs, kSRegSizeInBits);
}
+ // These PushAll/PopAll respect the order of the registers in the stack from
+ // low index to high.
+ void PushAll(RegList registers);
+ void PopAll(RegList registers);
+
+ inline void PushAll(DoubleRegList registers,
+ int stack_slot_size = kDoubleSize) {
+ if (registers.Count() % 2 != 0) {
+ DCHECK(!registers.has(fp_zero));
+ registers.set(fp_zero);
+ }
+ PushDRegList(registers);
+ }
+ inline void PopAll(DoubleRegList registers,
+ int stack_slot_size = kDoubleSize) {
+ if (registers.Count() % 2 != 0) {
+ DCHECK(!registers.has(fp_zero));
+ registers.set(fp_zero);
+ }
+ PopDRegList(registers);
+ }
+
// Push the specified register 'count' times.
void PushMultipleTimes(CPURegister src, Register count);
@@ -1831,8 +1867,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void PopCalleeSavedRegisters();
// Tiering support.
+ inline void AssertFeedbackVector(Register object);
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
@@ -1856,8 +1893,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
- void JumpIfCodeTIsMarkedForDeoptimization(
- Register codet, Register scratch, Label* if_marked_for_deoptimization);
+ void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch,
+ Label* if_marked_for_deoptimization);
Operand ClearedValue() const;
Operand ReceiverOperand(const Register arg_count);
@@ -1866,52 +1903,51 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object,
- AbortReason reason = AbortReason::kOperandIsASmi)
- NOOP_UNLESS_DEBUG_CODE
+ // Abort execution if argument is not a Map, enabled via
+ // --debug-code.
+ void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE;
- // Abort execution if argument is not a CodeT, enabled via --debug-code.
- void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE
+ // Abort execution if argument is not a Code, enabled via
+ // --debug-code.
+ void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE;
- // Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ // Abort execution if argument is not a Constructor, enabled via
+ // --debug-code.
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
- // Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ // Abort execution if argument is not a JSFunction, enabled via
+ // --debug-code.
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
- // Abort execution if argument is not undefined or an AllocationSite, enabled
- // via --debug-code.
- void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE
+ // Abort execution if argument is not undefined or an AllocationSite,
+ // enabled via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE;
// ---- Calling / Jumping helpers ----
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
void TailCallRuntime(Runtime::FunctionId fid);
@@ -1920,9 +1956,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1949,7 +1982,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, Register expected_parameter_count,
Register actual_parameter_count, InvokeType type);
- // ---- Code generation helpers ----
+ // ---- InstructionStream generation helpers ----
// ---------------------------------------------------------------------------
// Support functions.
@@ -1963,6 +1996,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// other registers.
void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
+ // Variant of the above, which only guarantees to set the correct eq/ne flag.
+ // Neither map, nor type_reg might be set to any particular value.
+ void IsObjectType(Register heap_object, Register scratch1, Register scratch2,
+ InstanceType type);
// Compare object type for heap object, and branch if equal (or not.)
// heap_object contains a non-Smi whose object type should be compared with
@@ -1975,6 +2012,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
InstanceType type, Label* if_cond_pass,
Condition cond = eq);
+ // Fast check if the object is a js receiver type. Assumes only primitive
+ // objects or js receivers are passed.
+ void JumpIfJSAnyIsNotPrimitive(
+ Register heap_object, Register scratch, Label* target,
+ Label::Distance distance = Label::kFar,
+ Condition condition = Condition::kUnsignedGreaterThanEqual);
+ void JumpIfJSAnyIsPrimitive(Register heap_object, Register scratch,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
+ return JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target, distance,
+ Condition::kUnsignedLessThan);
+ }
+
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -2009,9 +2059,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Frames.
- void ExitFramePreserveFPRegs();
- void ExitFrameRestoreFPRegs();
-
// Enter exit frame. Exit frames are used when calling C code from generated
// (JavaScript) code.
//
@@ -2034,19 +2081,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
- void EnterExitFrame(bool save_doubles, const Register& scratch,
- int extra_space = 0,
- StackFrame::Type frame_type = StackFrame::EXIT);
+ void EnterExitFrame(const Register& scratch, int extra_space,
+ StackFrame::Type frame_type);
// Leave the current exit frame, after a C function has returned to generated
// (JavaScript) code.
//
// This effectively unwinds the operation of EnterExitFrame:
- // * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
- void LeaveExitFrame(bool save_doubles, const Register& scratch,
- const Register& scratch2);
+ void LeaveExitFrame(const Register& scratch, const Register& scratch2);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
@@ -2099,6 +2143,82 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LoadNativeContextSlot(Register dst, int index);
+ // Falls through and sets scratch_and_result to 0 on failure, jumps to
+ // on_result on success.
+ void TryLoadOptimizedOsrCode(Register scratch_and_result,
+ Register feedback_vector, FeedbackSlot slot,
+ Label* on_result, Label::Distance distance);
+
+ protected:
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size, const CPURegister& src0,
+ const CPURegister& src1, const CPURegister& src2,
+ const CPURegister& src3);
+ void PopHelper(int count, int size, const CPURegister& dst0,
+ const CPURegister& dst1, const CPURegister& dst2,
+ const CPURegister& dst3);
+
+ void ConditionalCompareMacro(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond,
+ ConditionalCompareOp op);
+
+ void AddSubWithCarryMacro(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack must be prepared by the caller as for a normal AAPCS64
+ // call to 'printf'.
+ //
+ // The 'args' argument should point to an array of variable arguments in their
+ // proper PCS registers (and in calling order). The argument registers can
+ // have mixed types. The format string (x0) should not be included.
+ void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
+
+ private:
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_ = true;
+#endif
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_ = DefaultTmpList();
+ CPURegList fptmp_list_ = DefaultFPTmpList();
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label* label,
+ ImmBranchType branch_type);
+
+ void Movi16bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi32bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi64bitHelper(const VRegister& vd, uint64_t imm);
+
+ void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
+ LoadStoreOp op);
+
+ void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& addr, LoadStorePairOp op);
+
+ int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
+ byte* pc);
+
+ void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
@@ -2108,38 +2228,38 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// emitted is what you specified when creating the scope.
class V8_NODISCARD InstructionAccurateScope {
public:
- explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
- : tasm_(tasm),
- block_pool_(tasm, count * kInstrSize)
+ explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+ : masm_(masm),
+ block_pool_(masm, count * kInstrSize)
#ifdef DEBUG
,
size_(count * kInstrSize)
#endif
{
- tasm_->CheckVeneerPool(false, true, count * kInstrSize);
- tasm_->StartBlockVeneerPool();
+ masm_->CheckVeneerPool(false, true, count * kInstrSize);
+ masm_->StartBlockVeneerPool();
#ifdef DEBUG
if (count != 0) {
- tasm_->bind(&start_);
+ masm_->bind(&start_);
}
- previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
- tasm_->set_allow_macro_instructions(false);
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
#endif
}
~InstructionAccurateScope() {
- tasm_->EndBlockVeneerPool();
+ masm_->EndBlockVeneerPool();
#ifdef DEBUG
if (start_.is_bound()) {
- DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
}
- tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+ masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
#endif
}
private:
- TurboAssembler* tasm_;
- TurboAssembler::BlockConstPoolScope block_pool_;
+ MacroAssembler* masm_;
+ MacroAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG
size_t size_;
Label start_;
@@ -2148,7 +2268,7 @@ class V8_NODISCARD InstructionAccurateScope {
};
// This scope utility allows scratch registers to be managed safely. The
-// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
@@ -2158,9 +2278,9 @@ class V8_NODISCARD InstructionAccurateScope {
// order as the constructors. We do not have assertions for this.
class V8_NODISCARD UseScratchRegisterScope {
public:
- explicit UseScratchRegisterScope(TurboAssembler* tasm)
- : available_(tasm->TmpList()),
- availablefp_(tasm->FPTmpList()),
+ explicit UseScratchRegisterScope(MacroAssembler* masm)
+ : available_(masm->TmpList()),
+ availablefp_(masm->FPTmpList()),
old_available_(available_->bits()),
old_availablefp_(availablefp_->bits()) {
DCHECK_EQ(available_->type(), CPURegister::kRegister);
@@ -2218,6 +2338,12 @@ class V8_NODISCARD UseScratchRegisterScope {
}
void ExcludeFP(const VRegister& reg) { ExcludeFP(CPURegList(reg)); }
+ CPURegList* Available() { return available_; }
+ void SetAvailable(const CPURegList& list) { *available_ = list; }
+
+ CPURegList* AvailableFP() { return availablefp_; }
+ void SetAvailableFP(const CPURegList& list) { *availablefp_ = list; }
+
private:
V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
CPURegList* available);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 728ac559d5..10ce9986a1 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -35,7 +35,7 @@ namespace internal {
R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
R(x27)
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(R)
#else
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(R) R(x28)
@@ -45,6 +45,9 @@ namespace internal {
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
+#define MAGLEV_SCRATCH_GENERAL_REGISTERS(R) \
+ R(x16) R(x17)
+
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
@@ -76,6 +79,10 @@ namespace internal {
R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
R(d25) R(d26) R(d27) R(d28)
+
+#define MAGLEV_SCRATCH_DOUBLE_REGISTERS(R) \
+ R(d30) R(d31)
+
// clang-format on
// Some CPURegister methods can return Register and VRegister types, so we
@@ -184,6 +191,16 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
bool IsSameSizeAndType(const CPURegister& other) const;
+ constexpr bool IsEven() const { return (code() % 2) == 0; }
+
+ int MaxCode() const {
+ if (IsVRegister()) {
+ return kNumberOfVRegisters - 1;
+ }
+ DCHECK(IsRegister());
+ return kNumberOfRegisters - 1;
+ }
+
protected:
uint8_t reg_size_;
RegisterType reg_type_;
@@ -479,10 +496,10 @@ ALIAS_REGISTER(Register, wip1, w17);
ALIAS_REGISTER(Register, kRootRegister, x26);
ALIAS_REGISTER(Register, rr, x26);
// Pointer cage base register.
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, x28);
#else
-ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, kRootRegister);
+ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, no_reg);
#endif
// Context pointer register.
ALIAS_REGISTER(Register, cp, x27);
@@ -529,6 +546,8 @@ V8_EXPORT_PRIVATE bool AreSameSizeAndType(
// AreSameFormat returns true if all of the specified VRegisters have the same
// vector format. Arguments set to NoVReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const Register& reg1, const Register& reg2,
+ const Register& reg3 = NoReg, const Register& reg4 = NoReg);
bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
@@ -537,10 +556,15 @@ bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
// consecutive in the register file. Arguments may be set to NoVReg, and if so,
// subsequent arguments must also be NoVReg. At least one argument (reg1) must
// be valid (not NoVReg).
-V8_EXPORT_PRIVATE bool AreConsecutive(const VRegister& reg1,
- const VRegister& reg2,
- const VRegister& reg3 = NoVReg,
- const VRegister& reg4 = NoVReg);
+V8_EXPORT_PRIVATE bool AreConsecutive(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg);
+
+bool AreEven(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg, const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg, const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg, const CPURegister& reg8 = NoReg);
using FloatRegister = VRegister;
using DoubleRegister = VRegister;
@@ -569,7 +593,6 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = x3;
constexpr Register kJavaScriptCallExtraArg1Register = x2;
-constexpr Register kOffHeapTrampolineRegister = ip0;
constexpr Register kRuntimeCallFunctionRegister = x1;
constexpr Register kRuntimeCallArgCountRegister = x0;
constexpr Register kRuntimeCallArgvRegister = x11;
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 961eb74e4d..30ec9118b9 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -85,15 +85,6 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
return options;
}
-AssemblerOptions AssemblerOptions::DefaultForOffHeapTrampoline(
- Isolate* isolate) {
- AssemblerOptions options = AssemblerOptions::Default(isolate);
- // Off-heap trampolines may not contain any metadata since their metadata
- // offsets refer to the off-heap metadata area.
- options.emit_code_comments = false;
- return options;
-}
-
namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
@@ -102,11 +93,11 @@ class DefaultAssemblerBuffer : public AssemblerBuffer {
: buffer_(base::OwnedVector<uint8_t>::NewForOverwrite(
std::max(AssemblerBase::kMinimalBufferSize, size))) {
#ifdef DEBUG
- ZapCode(reinterpret_cast<Address>(buffer_.start()), buffer_.size());
+ ZapCode(reinterpret_cast<Address>(buffer_.begin()), buffer_.size());
#endif
}
- byte* start() const override { return buffer_.start(); }
+ byte* start() const override { return buffer_.begin(); }
int size() const override { return static_cast<int>(buffer_.size()); }
@@ -264,7 +255,7 @@ void AssemblerBase::RequestHeapNumber(HeapNumberRequest request) {
heap_number_requests_.push_front(request);
}
-int AssemblerBase::AddCodeTarget(Handle<CodeT> target) {
+int AssemblerBase::AddCodeTarget(Handle<Code> target) {
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
@@ -276,7 +267,7 @@ int AssemblerBase::AddCodeTarget(Handle<CodeT> target) {
}
}
-Handle<CodeT> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
return code_targets_[code_target_index];
}
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 1073a9d4f6..e1ee4914e4 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -193,7 +193,6 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
bool emit_code_comments = v8_flags.code_comments;
static AssemblerOptions Default(Isolate* isolate);
- static AssemblerOptions DefaultForOffHeapTrampoline(Isolate* isolate);
};
class AssemblerBuffer {
@@ -323,7 +322,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
#ifdef V8_CODE_COMMENTS
class CodeComment {
public:
- explicit CodeComment(Assembler* assembler, const std::string& comment)
+ V8_NODISCARD CodeComment(Assembler* assembler, const std::string& comment)
: assembler_(assembler) {
if (v8_flags.code_comments) Open(comment);
}
@@ -340,7 +339,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
};
#else // V8_CODE_COMMENTS
class CodeComment {
- explicit CodeComment(Assembler* assembler, std::string comment) {}
+ V8_NODISCARD CodeComment(Assembler*, const std::string&) {}
};
#endif
@@ -355,8 +354,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
- int AddCodeTarget(Handle<CodeT> target);
- Handle<CodeT> GetCodeTarget(intptr_t code_target_index) const;
+ int AddCodeTarget(Handle<Code> target);
+ Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
// Add 'object' to the {embedded_objects_} vector and return the index at
// which it is stored.
@@ -412,7 +411,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// guaranteed to fit in the instruction's offset field. We keep track of the
// code handles we encounter in calls in this vector, and encode the index of
// the code handle in the vector instead.
- std::vector<Handle<CodeT>> code_targets_;
+ std::vector<Handle<Code>> code_targets_;
// If an assembler needs a small number to refer to a heap object handle
// (for example, because there are only 32bit available on a 64bit arch), the
@@ -483,12 +482,14 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
// Use this macro to mark functions that are only defined if
// V8_ENABLE_DEBUG_CODE is set, and are a no-op otherwise.
// Use like:
-// void AssertMyCondition() NOOP_UNLESS_DEBUG_CODE
+// void AssertMyCondition() NOOP_UNLESS_DEBUG_CODE;
#ifdef V8_ENABLE_DEBUG_CODE
-#define NOOP_UNLESS_DEBUG_CODE ;
+#define NOOP_UNLESS_DEBUG_CODE
#else
-#define NOOP_UNLESS_DEBUG_CODE \
- { static_assert(v8_flags.debug_code.value() == false); }
+#define NOOP_UNLESS_DEBUG_CODE \
+ { static_assert(v8_flags.debug_code.value() == false); } \
+ /* Dummy static_assert to swallow the semicolon after this macro */ \
+ static_assert(true)
#endif
} // namespace internal
diff --git a/deps/v8/src/codegen/background-merge-task.h b/deps/v8/src/codegen/background-merge-task.h
index c80624cd59..ad90ed36a3 100644
--- a/deps/v8/src/codegen/background-merge-task.h
+++ b/deps/v8/src/codegen/background-merge-task.h
@@ -25,14 +25,17 @@ struct ScriptDetails;
// compilation cache.
class V8_EXPORT_PRIVATE BackgroundMergeTask {
public:
- ~BackgroundMergeTask();
-
// Step 1: on the main thread, check whether the Isolate compilation cache
// contains the script.
void SetUpOnMainThread(Isolate* isolate, Handle<String> source_text,
const ScriptDetails& script_details,
LanguageMode language_mode);
+ // Alternative step 1: on the main thread, if the caller has already looked up
+ // the script in the Isolate compilation cache, set up the necessary
+ // persistent data for the background merge.
+ void SetUpOnMainThread(Isolate* isolate, Handle<Script> cached_script);
+
// Step 2: on the background thread, update pointers in the new Script's
// object graph to point to corresponding objects from the cached Script where
// appropriate. May only be called if HasPendingBackgroundWork returned true.
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index cdd9e12318..423dbf411d 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -25,6 +25,8 @@ namespace internal {
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedBaselineData, "Expected baseline data") \
+ V(kFloat64IsNotAInt32, \
+ "Float64 cannot be converted to Int32 without loss of precision") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kInputStringTooLong, "Input string too long") \
@@ -54,7 +56,8 @@ namespace internal {
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotACallableFunction, "Operand is not a callable function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
- V(kOperandIsNotACodeT, "Operand is not a CodeT") \
+ V(kOperandIsNotACode, "Operand is not a Code object") \
+ V(kOperandIsNotAMap, "Operand is not a Map object") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
@@ -64,6 +67,8 @@ namespace internal {
"Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
+ V(kUint32IsNotAInt32, \
+ "Uint32 cannot be converted to Int32 without loss of precision") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
V(kUnexpectedElementsKindInArrayConstructor, \
@@ -92,21 +97,23 @@ namespace internal {
V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \
- V(kUnexpectedThreadInWasmUnset, "thread_in_wasm flag was not set")
+ V(kUnexpectedThreadInWasmUnset, "thread_in_wasm flag was not set") \
+ V(kInvalidReceiver, "Expected JS object or primitive object")
-#define BAILOUT_MESSAGES_LIST(V) \
- V(kNoReason, "no reason") \
- \
- V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
- V(kCodeGenerationFailed, "Code generation failed") \
- V(kFunctionBeingDebugged, "Function is being debugged") \
- V(kGraphBuildingFailed, "Optimized graph construction failed") \
- V(kFunctionTooBig, "Function is too big to be optimized") \
- V(kTooManyArguments, "Function contains a call with too many arguments") \
- V(kLiveEdit, "LiveEdit") \
- V(kNativeFunctionLiteral, "Native function literal") \
- V(kOptimizationDisabled, "Optimization disabled") \
- V(kHigherTierAvailable, "A higher tier is already available") \
+#define BAILOUT_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
+ V(kConcurrentMapDeprecation, "Maps became deprecated during optimization") \
+ V(kCodeGenerationFailed, "Code generation failed") \
+ V(kFunctionBeingDebugged, "Function is being debugged") \
+ V(kGraphBuildingFailed, "Optimized graph construction failed") \
+ V(kFunctionTooBig, "Function is too big to be optimized") \
+ V(kTooManyArguments, "Function contains a call with too many arguments") \
+ V(kLiveEdit, "LiveEdit") \
+ V(kNativeFunctionLiteral, "Native function literal") \
+ V(kOptimizationDisabled, "Optimization disabled") \
+ V(kHigherTierAvailable, "A higher tier is already available") \
V(kNeverOptimize, "Optimization is always disabled")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
diff --git a/deps/v8/src/codegen/callable.h b/deps/v8/src/codegen/callable.h
index 79a70514af..40a238f486 100644
--- a/deps/v8/src/codegen/callable.h
+++ b/deps/v8/src/codegen/callable.h
@@ -11,19 +11,19 @@
namespace v8 {
namespace internal {
-class Code;
+class InstructionStream;
// Associates a body of code with an interface descriptor.
class Callable final {
public:
- Callable(Handle<CodeT> code, CallInterfaceDescriptor descriptor)
+ Callable(Handle<Code> code, CallInterfaceDescriptor descriptor)
: code_(code), descriptor_(descriptor) {}
- Handle<CodeT> code() const { return code_; }
+ Handle<Code> code() const { return code_; }
CallInterfaceDescriptor descriptor() const { return descriptor_; }
private:
- const Handle<CodeT> code_;
+ const Handle<Code> code_;
const CallInterfaceDescriptor descriptor_;
};
diff --git a/deps/v8/src/codegen/code-comments.h b/deps/v8/src/codegen/code-comments.h
index 1c5189aa9a..330b2bb0d6 100644
--- a/deps/v8/src/codegen/code-comments.h
+++ b/deps/v8/src/codegen/code-comments.h
@@ -17,7 +17,7 @@ namespace internal {
class Assembler;
-// Code comments section layout:
+// InstructionStream comments section layout:
// byte count content
// ------------------------------------------------------------------------
// 4 size as uint32_t (only for a check)
diff --git a/deps/v8/src/codegen/code-desc.h b/deps/v8/src/codegen/code-desc.h
index e051bb459c..7aed2eb962 100644
--- a/deps/v8/src/codegen/code-desc.h
+++ b/deps/v8/src/codegen/code-desc.h
@@ -63,7 +63,7 @@ class CodeDesc {
int code_comments_size = 0;
// TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have
- // been made consistent with Code layout.
+ // been made consistent with InstructionStream layout.
int body_size() const { return instr_size + unwinding_info_size; }
int instruction_size() const { return safepoint_table_offset; }
int metadata_size() const { return body_size() - instruction_size(); }
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index c611445512..e3f3fe79e7 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -14,60 +14,35 @@ namespace v8 {
namespace internal {
// static
-Handle<CodeT> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
+Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
return CodeFactory::CEntry(isolate, result_size);
}
-#define CENTRY_CODE(RS, SD, AM, BE) \
- BUILTIN_CODE(isolate, CEntry_##RS##_##SD##_##AM##_##BE)
-
// static
-Handle<CodeT> CodeFactory::CEntry(Isolate* isolate, int result_size,
- SaveFPRegsMode save_doubles,
- ArgvMode argv_mode, bool builtin_exit_frame) {
+Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Aliases for readability below.
const int rs = result_size;
- const SaveFPRegsMode sd = save_doubles;
const ArgvMode am = argv_mode;
const bool be = builtin_exit_frame;
- if (rs == 1 && sd == SaveFPRegsMode::kIgnore && am == ArgvMode::kStack &&
- !be) {
- return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore &&
- am == ArgvMode::kStack && be) {
- return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore &&
- am == ArgvMode::kRegister && !be) {
- return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit);
- } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
- !be) {
- return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
- be) {
- return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
- am == ArgvMode::kStack && !be) {
- return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
- am == ArgvMode::kStack && be) {
- return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
- am == ArgvMode::kRegister && !be) {
- return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit);
- } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
- !be) {
- return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
- be) {
- return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, BuiltinExit);
+ if (rs == 1 && am == ArgvMode::kStack && !be) {
+ return BUILTIN_CODE(isolate, CEntry_Return1_ArgvOnStack_NoBuiltinExit);
+ } else if (rs == 1 && am == ArgvMode::kStack && be) {
+ return BUILTIN_CODE(isolate, CEntry_Return1_ArgvOnStack_BuiltinExit);
+ } else if (rs == 1 && am == ArgvMode::kRegister && !be) {
+ return BUILTIN_CODE(isolate, CEntry_Return1_ArgvInRegister_NoBuiltinExit);
+ } else if (rs == 2 && am == ArgvMode::kStack && !be) {
+ return BUILTIN_CODE(isolate, CEntry_Return2_ArgvOnStack_NoBuiltinExit);
+ } else if (rs == 2 && am == ArgvMode::kStack && be) {
+ return BUILTIN_CODE(isolate, CEntry_Return2_ArgvOnStack_BuiltinExit);
+ } else if (rs == 2 && am == ArgvMode::kRegister && !be) {
+ return BUILTIN_CODE(isolate, CEntry_Return2_ArgvInRegister_NoBuiltinExit);
}
UNREACHABLE();
}
-#undef CENTRY_CODE
-
// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
return Builtins::CallableFor(isolate, Builtin::kCallApiGetter);
@@ -279,10 +254,8 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
// static
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
- // Note: If we ever use fpregs in the interpreter then we will need to
- // save fpregs too.
- Handle<CodeT> code = CodeFactory::CEntry(
- isolate, result_size, SaveFPRegsMode::kIgnore, ArgvMode::kRegister);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate, result_size, ArgvMode::kRegister);
if (result_size == 1) {
return Callable(code, InterpreterCEntry1Descriptor{});
} else {
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index 937ad2e5b4..1c33265498 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -26,12 +26,11 @@ class V8_EXPORT_PRIVATE CodeFactory final {
// stack and the arguments count is passed via register) which currently
// can't be expressed in CallInterfaceDescriptor. Therefore only the code
// is exported here.
- static Handle<CodeT> RuntimeCEntry(Isolate* isolate, int result_size = 1);
+ static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
- static Handle<CodeT> CEntry(
- Isolate* isolate, int result_size = 1,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore,
- ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false);
+ static Handle<Code> CEntry(Isolate* isolate, int result_size = 1,
+ ArgvMode argv_mode = ArgvMode::kStack,
+ bool builtin_exit_frame = false);
// Initial states for ICs.
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
diff --git a/deps/v8/src/codegen/code-reference.cc b/deps/v8/src/codegen/code-reference.cc
index 9dc0f46f7a..df7a7cd57f 100644
--- a/deps/v8/src/codegen/code-reference.cc
+++ b/deps/v8/src/codegen/code-reference.cc
@@ -18,9 +18,8 @@ namespace internal {
namespace {
-template <typename CodeOrCodeT>
-struct CodeOrCodeTOps {
- Handle<CodeOrCodeT> code;
+struct CodeOps {
+ Handle<Code> code;
Address constant_pool() const { return code->constant_pool(); }
Address instruction_start() const { return code->InstructionStart(); }
@@ -33,9 +32,6 @@ struct CodeOrCodeTOps {
int code_comments_size() const { return code->code_comments_size(); }
};
-using CodeOps = CodeOrCodeTOps<Code>;
-using CodeTOps = CodeOrCodeTOps<CodeT>;
-
#if V8_ENABLE_WEBASSEMBLY
struct WasmCodeOps {
const wasm::WasmCode* code;
@@ -98,9 +94,6 @@ struct CodeDescOps {
switch (kind_) { \
case Kind::CODE: \
return CodeOps{code_}.method(); \
- case Kind::CODET: \
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL); \
- return CodeTOps{codet_}.method(); \
case Kind::WASM_CODE: \
HANDLE_WASM(return WasmCodeOps{wasm_code_}.method()); \
case Kind::CODE_DESC: \
diff --git a/deps/v8/src/codegen/code-reference.h b/deps/v8/src/codegen/code-reference.h
index 3f74da92e2..8ba313aec1 100644
--- a/deps/v8/src/codegen/code-reference.h
+++ b/deps/v8/src/codegen/code-reference.h
@@ -12,8 +12,8 @@
namespace v8 {
namespace internal {
+class InstructionStream;
class Code;
-class CodeDataContainer;
class CodeDesc;
namespace wasm {
@@ -28,10 +28,6 @@ class CodeReference {
explicit CodeReference(const CodeDesc* code_desc)
: kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
explicit CodeReference(Handle<Code> code) : kind_(Kind::CODE), code_(code) {}
-#ifdef V8_EXTERNAL_CODE_SPACE
- explicit CodeReference(Handle<CodeT> codet)
- : kind_(Kind::CODET), codet_(codet) {}
-#endif // V8_EXTERNAL_CODE_SPACE
Address constant_pool() const;
Address instruction_start() const;
@@ -45,7 +41,6 @@ class CodeReference {
bool is_null() const { return kind_ == Kind::NONE; }
bool is_code() const { return kind_ == Kind::CODE; }
- bool is_codet() const { return kind_ == Kind::CODET; }
bool is_wasm_code() const { return kind_ == Kind::WASM_CODE; }
Handle<Code> as_code() const {
@@ -53,26 +48,18 @@ class CodeReference {
return code_;
}
-#ifdef V8_EXTERNAL_CODE_SPACE
- Handle<CodeT> as_codet() const {
- DCHECK_EQ(Kind::CODET, kind_);
- return codet_;
- }
-#endif // V8_EXTERNAL_CODE_SPACE
-
const wasm::WasmCode* as_wasm_code() const {
DCHECK_EQ(Kind::WASM_CODE, kind_);
return wasm_code_;
}
private:
- enum class Kind { NONE, CODE, CODET, WASM_CODE, CODE_DESC } kind_;
+ enum class Kind { NONE, CODE, WASM_CODE, CODE_DESC } kind_;
union {
std::nullptr_t null_;
const wasm::WasmCode* wasm_code_;
const CodeDesc* code_desc_;
Handle<Code> code_;
- Handle<CodeT> codet_;
};
DISALLOW_NEW_AND_DELETE()
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index b90f235acc..c5fdaac240 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -25,16 +25,37 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-cell.h"
+#include "src/objects/property-descriptor-object.h"
#include "src/roots/roots.h"
namespace v8 {
namespace internal {
+namespace {
+
+Builtin BigIntComparisonBuiltinOf(Operation const& op) {
+ switch (op) {
+ case Operation::kLessThan:
+ return Builtin::kBigIntLessThan;
+ case Operation::kGreaterThan:
+ return Builtin::kBigIntGreaterThan;
+ case Operation::kLessThanOrEqual:
+ return Builtin::kBigIntLessThanOrEqual;
+ case Operation::kGreaterThanOrEqual:
+ return Builtin::kBigIntGreaterThanOrEqual;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state),
TorqueGeneratedExportedMacrosAssembler(state) {
@@ -356,197 +377,279 @@ TNode<Float64T> CodeStubAssembler::Float64Round(TNode<Float64T> x) {
}
TNode<Float64T> CodeStubAssembler::Float64Ceil(TNode<Float64T> x) {
- if (IsFloat64RoundUpSupported()) {
- return Float64RoundUp(x);
- }
-
- TNode<Float64T> one = Float64Constant(1.0);
- TNode<Float64T> zero = Float64Constant(0.0);
- TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
- TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
TVARIABLE(Float64T, var_x, x);
- Label return_x(this), return_minus_x(this);
-
- // Check if {x} is greater than zero.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- BIND(&if_xgreaterthanzero);
+ Label round_op_supported(this), round_op_fallback(this), return_x(this);
+ // Use UniqueInt32Constant instead of BoolConstant here in order to ensure
+ // that the graph structure does not depend on the value of the predicate
+ // (BoolConstant uses cached nodes).
+ Branch(UniqueInt32Constant(IsFloat64RoundUpSupported()), &round_op_supported,
+ &round_op_fallback);
+
+ BIND(&round_op_supported);
{
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards Infinity.
- var_x = Float64Sub(Float64Add(two_52, x), two_52);
- GotoIfNot(Float64LessThan(var_x.value(), x), &return_x);
- var_x = Float64Add(var_x.value(), one);
+ // This optional operation is used behind a static check and we rely
+ // on the dead code elimination to remove this unused unsupported
+ // instruction. We generate builtins this way in order to ensure that
+ // builtins PGO profiles are interchangeable between architectures.
+ var_x = Float64RoundUp(x);
Goto(&return_x);
}
- BIND(&if_xnotgreaterthanzero);
+ BIND(&round_op_fallback);
{
- // Just return {x} unless it's in the range ]-2^52,0[
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoIfNot(Float64LessThan(x, zero), &return_x);
+ TNode<Float64T> one = Float64Constant(1.0);
+ TNode<Float64T> zero = Float64Constant(0.0);
+ TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
+ TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
- // Round negated {x} towards Infinity and return the result negated.
- TNode<Float64T> minus_x = Float64Neg(x);
- var_x = Float64Sub(Float64Add(two_52, minus_x), two_52);
- GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
- var_x = Float64Sub(var_x.value(), one);
- Goto(&return_minus_x);
- }
+ Label return_minus_x(this);
- BIND(&return_minus_x);
- var_x = Float64Neg(var_x.value());
- Goto(&return_x);
+ // Check if {x} is greater than zero.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+ BIND(&if_xgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards Infinity.
+ var_x = Float64Sub(Float64Add(two_52, x), two_52);
+ GotoIfNot(Float64LessThan(var_x.value(), x), &return_x);
+ var_x = Float64Add(var_x.value(), one);
+ Goto(&return_x);
+ }
+
+ BIND(&if_xnotgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]-2^52,0[
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoIfNot(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards Infinity and return the result negated.
+ TNode<Float64T> minus_x = Float64Neg(x);
+ var_x = Float64Sub(Float64Add(two_52, minus_x), two_52);
+ GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ var_x = Float64Sub(var_x.value(), one);
+ Goto(&return_minus_x);
+ }
+
+ BIND(&return_minus_x);
+ var_x = Float64Neg(var_x.value());
+ Goto(&return_x);
+ }
BIND(&return_x);
return var_x.value();
}
TNode<Float64T> CodeStubAssembler::Float64Floor(TNode<Float64T> x) {
- if (IsFloat64RoundDownSupported()) {
- return Float64RoundDown(x);
- }
-
- TNode<Float64T> one = Float64Constant(1.0);
- TNode<Float64T> zero = Float64Constant(0.0);
- TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
- TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
TVARIABLE(Float64T, var_x, x);
- Label return_x(this), return_minus_x(this);
-
- // Check if {x} is greater than zero.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- BIND(&if_xgreaterthanzero);
+ Label round_op_supported(this), round_op_fallback(this), return_x(this);
+ // Use UniqueInt32Constant instead of BoolConstant here in order to ensure
+ // that the graph structure does not depend on the value of the predicate
+ // (BoolConstant uses cached nodes).
+ Branch(UniqueInt32Constant(IsFloat64RoundDownSupported()),
+ &round_op_supported, &round_op_fallback);
+
+ BIND(&round_op_supported);
{
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards -Infinity.
- var_x = Float64Sub(Float64Add(two_52, x), two_52);
- GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
- var_x = Float64Sub(var_x.value(), one);
+ // This optional operation is used behind a static check and we rely
+ // on the dead code elimination to remove this unused unsupported
+ // instruction. We generate builtins this way in order to ensure that
+ // builtins PGO profiles are interchangeable between architectures.
+ var_x = Float64RoundDown(x);
Goto(&return_x);
}
- BIND(&if_xnotgreaterthanzero);
+ BIND(&round_op_fallback);
{
- // Just return {x} unless it's in the range ]-2^52,0[
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoIfNot(Float64LessThan(x, zero), &return_x);
+ TNode<Float64T> one = Float64Constant(1.0);
+ TNode<Float64T> zero = Float64Constant(0.0);
+ TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
+ TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
- // Round negated {x} towards -Infinity and return the result negated.
- TNode<Float64T> minus_x = Float64Neg(x);
- var_x = Float64Sub(Float64Add(two_52, minus_x), two_52);
- GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
- var_x = Float64Add(var_x.value(), one);
- Goto(&return_minus_x);
- }
+ Label return_minus_x(this);
- BIND(&return_minus_x);
- var_x = Float64Neg(var_x.value());
- Goto(&return_x);
+ // Check if {x} is greater than zero.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+ BIND(&if_xgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards -Infinity.
+ var_x = Float64Sub(Float64Add(two_52, x), two_52);
+ GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
+ var_x = Float64Sub(var_x.value(), one);
+ Goto(&return_x);
+ }
+
+ BIND(&if_xnotgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]-2^52,0[
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoIfNot(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards -Infinity and return the result negated.
+ TNode<Float64T> minus_x = Float64Neg(x);
+ var_x = Float64Sub(Float64Add(two_52, minus_x), two_52);
+ GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+ var_x = Float64Add(var_x.value(), one);
+ Goto(&return_minus_x);
+ }
+
+ BIND(&return_minus_x);
+ var_x = Float64Neg(var_x.value());
+ Goto(&return_x);
+ }
BIND(&return_x);
return var_x.value();
}
TNode<Float64T> CodeStubAssembler::Float64RoundToEven(TNode<Float64T> x) {
- if (IsFloat64RoundTiesEvenSupported()) {
- return Float64RoundTiesEven(x);
- }
- // See ES#sec-touint8clamp for details.
- TNode<Float64T> f = Float64Floor(x);
- TNode<Float64T> f_and_half = Float64Add(f, Float64Constant(0.5));
-
TVARIABLE(Float64T, var_result);
- Label return_f(this), return_f_plus_one(this), done(this);
-
- GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
- GotoIf(Float64LessThan(x, f_and_half), &return_f);
+ Label round_op_supported(this), round_op_fallback(this), done(this);
+ // Use UniqueInt32Constant instead of BoolConstant here in order to ensure
+ // that the graph structure does not depend on the value of the predicate
+ // (BoolConstant uses cached nodes).
+ Branch(UniqueInt32Constant(IsFloat64RoundTiesEvenSupported()),
+ &round_op_supported, &round_op_fallback);
+
+ BIND(&round_op_supported);
{
- TNode<Float64T> f_mod_2 = Float64Mod(f, Float64Constant(2.0));
- Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
- &return_f_plus_one);
+ // This optional operation is used behind a static check and we rely
+ // on the dead code elimination to remove this unused unsupported
+ // instruction. We generate builtins this way in order to ensure that
+ // builtins PGO profiles are interchangeable between architectures.
+ var_result = Float64RoundTiesEven(x);
+ Goto(&done);
}
- BIND(&return_f);
- var_result = f;
- Goto(&done);
+ BIND(&round_op_fallback);
+ {
+ // See ES#sec-touint8clamp for details.
+ TNode<Float64T> f = Float64Floor(x);
+ TNode<Float64T> f_and_half = Float64Add(f, Float64Constant(0.5));
- BIND(&return_f_plus_one);
- var_result = Float64Add(f, Float64Constant(1.0));
- Goto(&done);
+ Label return_f(this), return_f_plus_one(this);
+
+ GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
+ GotoIf(Float64LessThan(x, f_and_half), &return_f);
+ {
+ TNode<Float64T> f_mod_2 = Float64Mod(f, Float64Constant(2.0));
+ Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
+ &return_f_plus_one);
+ }
+
+ BIND(&return_f);
+ var_result = f;
+ Goto(&done);
+ BIND(&return_f_plus_one);
+ var_result = Float64Add(f, Float64Constant(1.0));
+ Goto(&done);
+ }
BIND(&done);
return var_result.value();
}
TNode<Float64T> CodeStubAssembler::Float64Trunc(TNode<Float64T> x) {
- if (IsFloat64RoundTruncateSupported()) {
- return Float64RoundTruncate(x);
- }
-
- TNode<Float64T> one = Float64Constant(1.0);
- TNode<Float64T> zero = Float64Constant(0.0);
- TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
- TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
TVARIABLE(Float64T, var_x, x);
- Label return_x(this), return_minus_x(this);
-
- // Check if {x} is greater than 0.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- BIND(&if_xgreaterthanzero);
+ Label trunc_op_supported(this), trunc_op_fallback(this), return_x(this);
+ // Use UniqueInt32Constant instead of BoolConstant here in order to ensure
+ // that the graph structure does not depend on the value of the predicate
+ // (BoolConstant uses cached nodes).
+ Branch(UniqueInt32Constant(IsFloat64RoundTruncateSupported()),
+ &trunc_op_supported, &trunc_op_fallback);
+
+ BIND(&trunc_op_supported);
{
- if (IsFloat64RoundDownSupported()) {
- var_x = Float64RoundDown(x);
- } else {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards -Infinity.
- var_x = Float64Sub(Float64Add(two_52, x), two_52);
- GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
- var_x = Float64Sub(var_x.value(), one);
- }
+ // This optional operation is used behind a static check and we rely
+ // on the dead code elimination to remove this unused unsupported
+ // instruction. We generate builtins this way in order to ensure that
+ // builtins PGO profiles are interchangeable between architectures.
+ var_x = Float64RoundTruncate(x);
Goto(&return_x);
}
- BIND(&if_xnotgreaterthanzero);
+ BIND(&trunc_op_fallback);
{
- if (IsFloat64RoundUpSupported()) {
- var_x = Float64RoundUp(x);
- Goto(&return_x);
- } else {
- // Just return {x} unless its in the range ]-2^52,0[.
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoIfNot(Float64LessThan(x, zero), &return_x);
+ TNode<Float64T> one = Float64Constant(1.0);
+ TNode<Float64T> zero = Float64Constant(0.0);
+ TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0);
+ TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0);
- // Round negated {x} towards -Infinity and return result negated.
- TNode<Float64T> minus_x = Float64Neg(x);
- var_x = Float64Sub(Float64Add(two_52, minus_x), two_52);
- GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
- var_x = Float64Sub(var_x.value(), one);
- Goto(&return_minus_x);
+ Label return_minus_x(this);
+
+ // Check if {x} is greater than 0.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ BIND(&if_xgreaterthanzero);
+ {
+ Label round_op_supported(this), round_op_fallback(this);
+ Branch(UniqueInt32Constant(IsFloat64RoundDownSupported()),
+ &round_op_supported, &round_op_fallback);
+ BIND(&round_op_supported);
+ {
+ // This optional operation is used behind a static check and we rely
+ // on the dead code elimination to remove this unused unsupported
+ // instruction. We generate builtins this way in order to ensure that
+ // builtins PGO profiles are interchangeable between architectures.
+ var_x = Float64RoundDown(x);
+ Goto(&return_x);
+ }
+ BIND(&round_op_fallback);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards -Infinity.
+ var_x = Float64Sub(Float64Add(two_52, x), two_52);
+ GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
+ var_x = Float64Sub(var_x.value(), one);
+ Goto(&return_x);
+ }
}
- }
- BIND(&return_minus_x);
- var_x = Float64Neg(var_x.value());
- Goto(&return_x);
+ BIND(&if_xnotgreaterthanzero);
+ {
+ Label round_op_supported(this), round_op_fallback(this);
+ Branch(UniqueInt32Constant(IsFloat64RoundUpSupported()),
+ &round_op_supported, &round_op_fallback);
+ BIND(&round_op_supported);
+ {
+ // This optional operation is used behind a static check and we rely
+ // on the dead code elimination to remove this unused unsupported
+ // instruction. We generate builtins this way in order to ensure that
+ // builtins PGO profiles are interchangeable between architectures.
+ var_x = Float64RoundUp(x);
+ Goto(&return_x);
+ }
+ BIND(&round_op_fallback);
+ {
+ // Just return {x} unless its in the range ]-2^52,0[.
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoIfNot(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards -Infinity and return result negated.
+ TNode<Float64T> minus_x = Float64Neg(x);
+ var_x = Float64Sub(Float64Add(two_52, minus_x), two_52);
+ GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ var_x = Float64Sub(var_x.value(), one);
+ Goto(&return_minus_x);
+ }
+ }
+ BIND(&return_minus_x);
+ var_x = Float64Neg(var_x.value());
+ Goto(&return_x);
+ }
BIND(&return_x);
return var_x.value();
}
@@ -1220,12 +1323,27 @@ void CodeStubAssembler::BranchIfJSReceiver(TNode<Object> object, Label* if_true,
void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
- const TNode<ExternalReference> force_slow_path_addr =
- ExternalConstant(ExternalReference::force_slow_path(isolate()));
- const TNode<Uint8T> force_slow = Load<Uint8T>(force_slow_path_addr);
-
- GotoIf(force_slow, if_true);
+ bool enable_force_slow_path = true;
+#else
+ bool enable_force_slow_path = false;
#endif
+
+ Label done(this);
+ // Use UniqueInt32Constant instead of BoolConstant here in order to ensure
+ // that the graph structure does not depend on the value of the predicate
+ // (BoolConstant uses cached nodes).
+ GotoIf(UniqueInt32Constant(!enable_force_slow_path), &done);
+ {
+ // This optional block is used behind a static check and we rely
+ // on the dead code elimination to remove it. We generate builtins this
+ // way in order to ensure that builtins PGO profiles are agnostic to
+ // V8_ENABLE_FORCE_SLOW_PATH value.
+ const TNode<ExternalReference> force_slow_path_addr =
+ ExternalConstant(ExternalReference::force_slow_path(isolate()));
+ const TNode<Uint8T> force_slow = Load<Uint8T>(force_slow_path_addr);
+ Branch(force_slow, if_true, &done);
+ }
+ BIND(&done);
}
TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
@@ -1613,28 +1731,28 @@ TNode<RawPtrT> CodeStubAssembler::ExternalPointerTableAddress(
TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
TNode<HeapObject> object, TNode<IntPtrT> offset, ExternalPointerTag tag) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- TNode<RawPtrT> external_pointer_table_address =
- ExternalPointerTableAddress(tag);
- TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
- Load(MachineType::Pointer(), external_pointer_table_address,
- UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
-
- TNode<ExternalPointerHandleT> handle =
- LoadObjectField<ExternalPointerHandleT>(object, offset);
- TNode<Uint32T> index =
- Word32Shr(handle, Uint32Constant(kExternalPointerIndexShift));
- // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code
- // that does one shift right instead of two shifts (right and then left).
- TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
- ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
-
- TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset);
- entry = UncheckedCast<UintPtrT>(WordAnd(entry, UintPtrConstant(~tag)));
- return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
- }
-#endif // V8_ENABLE_SANDBOX
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ TNode<RawPtrT> external_pointer_table_address =
+ ExternalPointerTableAddress(tag);
+ TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
+
+ TNode<ExternalPointerHandleT> handle =
+ LoadObjectField<ExternalPointerHandleT>(object, offset);
+ TNode<Uint32T> index =
+ Word32Shr(handle, Uint32Constant(kExternalPointerIndexShift));
+ // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code
+ // that does one shift right instead of two shifts (right and then left).
+ TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
+ ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
+
+ TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset);
+ entry = UncheckedCast<UintPtrT>(WordAnd(entry, UintPtrConstant(~tag)));
+ return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
+#else
return LoadObjectField<RawPtrT>(object, offset);
+#endif // V8_ENABLE_SANDBOX
}
void CodeStubAssembler::StoreExternalPointerToObject(TNode<HeapObject> object,
@@ -1642,30 +1760,29 @@ void CodeStubAssembler::StoreExternalPointerToObject(TNode<HeapObject> object,
TNode<RawPtrT> pointer,
ExternalPointerTag tag) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- TNode<RawPtrT> external_pointer_table_address =
- ExternalPointerTableAddress(tag);
- TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
- Load(MachineType::Pointer(), external_pointer_table_address,
- UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
-
- TNode<ExternalPointerHandleT> handle =
- LoadObjectField<ExternalPointerHandleT>(object, offset);
- TNode<Uint32T> index =
- Word32Shr(handle, Uint32Constant(kExternalPointerIndexShift));
- // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code
- // that does one shift right instead of two shifts (right and then left).
- TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
- ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
-
- TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer);
- value = UncheckedCast<UintPtrT>(WordOr(pointer, UintPtrConstant(tag)));
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), table,
- table_offset, value);
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ TNode<RawPtrT> external_pointer_table_address =
+ ExternalPointerTableAddress(tag);
+ TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
+
+ TNode<ExternalPointerHandleT> handle =
+ LoadObjectField<ExternalPointerHandleT>(object, offset);
+ TNode<Uint32T> index =
+ Word32Shr(handle, Uint32Constant(kExternalPointerIndexShift));
+ // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code
+ // that does one shift right instead of two shifts (right and then left).
+ TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
+ ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
+
+ TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer);
+ value = UncheckedCast<UintPtrT>(WordOr(pointer, UintPtrConstant(tag)));
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset,
+ value);
+#else
StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
+#endif // V8_ENABLE_SANDBOX
}
TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
@@ -1678,6 +1795,13 @@ TNode<Uint8T> CodeStubAssembler::LoadUint8Ptr(TNode<RawPtrT> ptr,
return Load<Uint8T>(IntPtrAdd(ReinterpretCast<IntPtrT>(ptr), offset));
}
+TNode<Uint64T> CodeStubAssembler::LoadUint64Ptr(TNode<RawPtrT> ptr,
+ TNode<IntPtrT> index) {
+ return Load<Uint64T>(
+ IntPtrAdd(ReinterpretCast<IntPtrT>(ptr),
+ IntPtrMul(index, IntPtrConstant(sizeof(uint64_t)))));
+}
+
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<HeapObject> object, int offset) {
// Please use LoadMap(object) instead.
@@ -1714,10 +1838,9 @@ TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
}
TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
- Handle<Map> map_handle(
- Map::GetInstanceTypeMap(ReadOnlyRoots(isolate()), instance_type),
- isolate());
- return HeapConstant(map_handle);
+ RootIndex map_idx = Map::TryGetMapRootIdxFor(instance_type).value();
+ return HeapConstant(
+ Handle<Map>::cast(ReadOnlyRoots(isolate()).handle_at(map_idx)));
}
TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
@@ -1738,11 +1861,25 @@ TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) {
TNode<BoolT> CodeStubAssembler::HasInstanceType(TNode<HeapObject> object,
InstanceType instance_type) {
+ if (V8_STATIC_ROOTS_BOOL) {
+ if (base::Optional<RootIndex> expected_map =
+ InstanceTypeChecker::UniqueMapOfInstanceType(instance_type)) {
+ TNode<Map> map = LoadMap(object);
+ return TaggedEqual(map, LoadRoot(*expected_map));
+ }
+ }
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType(
TNode<HeapObject> object, InstanceType instance_type) {
+ if (V8_STATIC_ROOTS_BOOL) {
+ if (base::Optional<RootIndex> expected_map =
+ InstanceTypeChecker::UniqueMapOfInstanceType(instance_type)) {
+ TNode<Map> map = LoadMap(object);
+ return TaggedNotEqual(map, LoadRoot(*expected_map));
+ }
+ }
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
}
@@ -1796,7 +1933,7 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
CSA_SLOW_DCHECK(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
NodeGenerator<HeapObject> make_empty = [=]() -> TNode<HeapObject> {
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
return EmptySwissPropertyDictionaryConstant();
} else {
return EmptyPropertyDictionaryConstant();
@@ -1804,7 +1941,7 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
};
NodeGenerator<HeapObject> cast_properties = [=] {
TNode<HeapObject> dict = CAST(properties);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(dict),
IsGlobalDictionary(dict)));
} else {
@@ -1996,7 +2133,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
&if_property_array);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
GotoIf(
InstanceTypeEqual(properties_instance_type, SWISS_NAME_DICTIONARY_TYPE),
&if_swiss_property_dictionary);
@@ -2023,7 +2160,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
var_hash = Signed(DecodeWord<PropertyArray::HashField>(length_and_hash));
Goto(&done);
}
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
BIND(&if_swiss_property_dictionary);
{
var_hash = Signed(
@@ -2244,8 +2381,9 @@ TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(
}
}
-TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
- return ReinterpretCast<MaybeObject>(BitcastWordToTagged(
+TNode<HeapObjectReference> CodeStubAssembler::MakeWeak(
+ TNode<HeapObject> value) {
+ return ReinterpretCast<HeapObjectReference>(BitcastWordToTagged(
WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag))));
}
@@ -3029,20 +3167,19 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
Label check_for_interpreter_data(this, &var_result);
Label done(this, &var_result);
- GotoIfNot(HasInstanceType(var_result.value(), CODET_TYPE),
+ GotoIfNot(HasInstanceType(var_result.value(), CODE_TYPE),
&check_for_interpreter_data);
{
- TNode<CodeT> code = CAST(var_result.value());
+ TNode<Code> code = CAST(var_result.value());
#ifdef DEBUG
TNode<Int32T> code_flags =
- LoadObjectField<Int32T>(code, CodeT::kFlagsOffset);
+ LoadObjectField<Int32T>(code, Code::kFlagsOffset);
CSA_DCHECK(
- this, Word32Equal(DecodeWord32<CodeT::KindField>(code_flags),
+ this, Word32Equal(DecodeWord32<Code::KindField>(code_flags),
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
#endif // DEBUG
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
- FromCodeTNonBuiltin(code),
- Code::kDeoptimizationDataOrInterpreterDataOffset);
+ code, Code::kDeoptimizationDataOrInterpreterDataOffset);
var_result = baseline_data;
}
Goto(&check_for_interpreter_data);
@@ -3112,22 +3249,15 @@ void CodeStubAssembler::StoreSharedObjectField(TNode<HeapObject> object,
TNode<Object> value) {
CSA_DCHECK(
this,
- WordNotEqual(WordAnd(LoadBasicMemoryChunkFlags(object),
- IntPtrConstant(BasicMemoryChunk::IN_SHARED_HEAP)),
- IntPtrConstant(0)));
- // JSSharedStructs are allocated in the shared old space, which is currently
- // collected by stopping the world, so the incremental write barrier is not
- // needed. They can only store Smis and other HeapObjects in the shared old
- // space, so the generational write barrier is also not needed.
- // TODO(v8:12547): Add a safer, shared variant of NoWriteBarrier instead of
- // using Unsafe.
+ WordNotEqual(
+ WordAnd(LoadBasicMemoryChunkFlags(object),
+ IntPtrConstant(BasicMemoryChunk::IN_WRITABLE_SHARED_SPACE)),
+ IntPtrConstant(0)));
int const_offset;
if (TryToInt32Constant(offset, &const_offset)) {
- UnsafeStoreObjectFieldNoWriteBarrier(object, const_offset, value);
+ StoreObjectField(object, const_offset, value);
} else {
- UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object,
- IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
- value);
+ Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
}
}
@@ -3617,6 +3747,10 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
return EmptyStringConstant();
}
TNode<HeapObject> result = Allocate(SeqOneByteString::SizeFor(length), flags);
+ StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, result,
+ IntPtrConstant(SeqOneByteString::SizeFor(length) -
+ kObjectAlignment - kHeapObjectTag),
+ SmiConstant(0));
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kOneByteStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
@@ -3639,6 +3773,10 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
return EmptyStringConstant();
}
TNode<HeapObject> result = Allocate(SeqTwoByteString::SizeFor(length), flags);
+ StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, result,
+ IntPtrConstant(SeqTwoByteString::SizeFor(length) -
+ kObjectAlignment - kHeapObjectTag),
+ SmiConstant(0));
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
@@ -3727,6 +3865,9 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex,
SmiConstant(PropertyArray::kNoHashSentinel),
SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kFlagsIndex,
+ SmiConstant(NameDictionary::kFlagsDefault),
+ SKIP_WRITE_BARRIER);
}
// Initialize NameDictionary elements.
@@ -4040,7 +4181,7 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value);
},
- kTaggedSize, IndexAdvanceMode::kPost);
+ kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
}
void CodeStubAssembler::MakeFixedArrayCOW(TNode<FixedArray> array) {
@@ -4784,7 +4925,8 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(
[this, value](TNode<HeapObject> array, TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
- });
+ },
+ LoopUnrollingMode::kYes);
}
template <typename TIndex>
@@ -4819,7 +4961,8 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
}
- });
+ },
+ LoopUnrollingMode::kYes);
}
template V8_EXPORT_PRIVATE void
@@ -5003,6 +5146,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
{
// Make a loop for the stores.
BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ LoopUnrollingMode::kYes,
ForEachDirection::kForward);
Goto(&finished);
}
@@ -5010,6 +5154,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
BIND(&iterate_backward);
{
BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ LoopUnrollingMode::kYes,
ForEachDirection::kReverse);
Goto(&finished);
}
@@ -5094,7 +5239,7 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
Store(dst_elements, delta_offset, element);
}
},
- ForEachDirection::kForward);
+ LoopUnrollingMode::kYes, ForEachDirection::kForward);
Goto(&finished);
}
BIND(&finished);
@@ -5328,7 +5473,8 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, offset,
value);
}
- });
+ },
+ LoopUnrollingMode::kYes);
#ifdef DEBUG
// Zap {from_array} if the copying above has made it invalid.
@@ -5595,11 +5741,11 @@ TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(TNode<Context> context,
// or find that it is a BigInt and jump to {if_bigint}.
void CodeStubAssembler::TaggedToWord32OrBigInt(
TNode<Context> context, TNode<Object> value, Label* if_number,
- TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Word32T>* var_word32, Label* if_bigint, Label* if_bigint64,
TVariable<BigInt>* var_maybe_bigint) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, value, if_number, var_word32, IsKnownTaggedPointer::kNo,
- if_bigint, var_maybe_bigint);
+ if_bigint, if_bigint64, var_maybe_bigint);
}
// Truncate {value} to word32 and jump to {if_number} if it is a Number,
@@ -5607,11 +5753,11 @@ void CodeStubAssembler::TaggedToWord32OrBigInt(
// store the type feedback in {var_feedback}.
void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
TNode<Context> context, TNode<Object> value, Label* if_number,
- TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Word32T>* var_word32, Label* if_bigint, Label* if_bigint64,
TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, value, if_number, var_word32, IsKnownTaggedPointer::kNo,
- if_bigint, var_maybe_bigint, var_feedback);
+ if_bigint, if_bigint64, var_maybe_bigint, var_feedback);
}
// Truncate {pointer} to word32 and jump to {if_number} if it is a Number,
@@ -5619,11 +5765,11 @@ void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
// store the type feedback in {var_feedback}.
void CodeStubAssembler::TaggedPointerToWord32OrBigIntWithFeedback(
TNode<Context> context, TNode<HeapObject> pointer, Label* if_number,
- TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Word32T>* var_word32, Label* if_bigint, Label* if_bigint64,
TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, pointer, if_number, var_word32, IsKnownTaggedPointer::kYes,
- if_bigint, var_maybe_bigint, var_feedback);
+ if_bigint, if_bigint64, var_maybe_bigint, var_feedback);
}
template <Object::Conversion conversion>
@@ -5631,7 +5777,8 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
TNode<Context> context, TNode<Object> value, Label* if_number,
TVariable<Word32T>* var_word32,
IsKnownTaggedPointer is_known_tagged_pointer, Label* if_bigint,
- TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
+ Label* if_bigint64, TVariable<BigInt>* var_maybe_bigint,
+ TVariable<Smi>* var_feedback) {
// We might need to loop after conversion.
TVARIABLE(Object, var_value, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone);
@@ -5652,14 +5799,18 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
{
value = var_value.value();
Label not_smi(this), is_heap_number(this), is_oddball(this),
- is_bigint(this), check_if_smi(this);
+ maybe_bigint64(this), is_bigint(this), check_if_smi(this);
TNode<HeapObject> value_heap_object = CAST(value);
TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &is_heap_number);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
if (conversion == Object::Conversion::kToNumeric) {
- GotoIf(IsBigIntInstanceType(instance_type), &is_bigint);
+ if (Is64() && if_bigint64) {
+ GotoIf(IsBigIntInstanceType(instance_type), &maybe_bigint64);
+ } else {
+ GotoIf(IsBigIntInstanceType(instance_type), &is_bigint);
+ }
}
// Not HeapNumber (or BigInt if conversion == kToNumeric).
@@ -5693,8 +5844,20 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
Goto(if_number);
if (conversion == Object::Conversion::kToNumeric) {
+ if (Is64() && if_bigint64) {
+ BIND(&maybe_bigint64);
+ GotoIfLargeBigInt(CAST(value), &is_bigint);
+ if (var_maybe_bigint) {
+ *var_maybe_bigint = CAST(value);
+ }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt64);
+ Goto(if_bigint64);
+ }
+
BIND(&is_bigint);
- *var_maybe_bigint = CAST(value);
+ if (var_maybe_bigint) {
+ *var_maybe_bigint = CAST(value);
+ }
CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(if_bigint);
}
@@ -6181,12 +6344,7 @@ void CodeStubAssembler::ThrowIfNotJSReceiver(TNode<Context> context,
GotoIf(TaggedIsSmi(value), &throw_exception);
- // Load the instance type of the {value}.
- TNode<Map> value_map = LoadMap(CAST(value));
- const TNode<Uint16T> value_instance_type = LoadMapInstanceType(value_map);
-
- Branch(IsJSReceiverInstanceType(value_instance_type), &done,
- &throw_exception);
+ Branch(JSAnyIsNotPrimitive(CAST(value)), &done, &throw_exception);
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
@@ -6388,6 +6546,14 @@ TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
return TaggedEqual(cell_value, invalid);
}
+TNode<BoolT>
+CodeStubAssembler::IsNumberStringPrototypeNoReplaceProtectorCellInvalid() {
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = NumberStringPrototypeNoReplaceProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return TaggedEqual(cell_value, invalid);
+}
+
TNode<BoolT> CodeStubAssembler::IsPrototypeInitialArrayPrototype(
TNode<Context> context, TNode<Map> map) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -6557,10 +6723,33 @@ TNode<BoolT> CodeStubAssembler::IsJSReceiverMap(TNode<Map> map) {
return IsJSReceiverInstanceType(LoadMapInstanceType(map));
}
+TNode<BoolT> CodeStubAssembler::JSAnyIsNotPrimitiveMap(TNode<Map> map) {
+#if V8_STATIC_ROOTS_BOOL
+ // Assuming this is only called with primitive objects or js receivers.
+ CSA_DCHECK(this, Word32Or(IsPrimitiveInstanceType(LoadMapInstanceType(map)),
+ IsJSReceiverMap(map)));
+ // All primitive object's maps are allocated at the start of the read only
+ // heap. Thus JS_RECEIVER's must have maps with larger (compressed) addresses.
+ return Uint32GreaterThanOrEqual(
+ TruncateIntPtrToInt32(BitcastTaggedToWord(map)),
+ Int32Constant(InstanceTypeChecker::kNonJsReceiverMapLimit));
+#else
+ return IsJSReceiverMap(map);
+#endif
+}
+
TNode<BoolT> CodeStubAssembler::IsJSReceiver(TNode<HeapObject> object) {
return IsJSReceiverMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::JSAnyIsNotPrimitive(TNode<HeapObject> object) {
+#if V8_STATIC_ROOTS_BOOL
+ return JSAnyIsNotPrimitiveMap(LoadMap(object));
+#else
+ return IsJSReceiver(object);
+#endif
+}
+
TNode<BoolT> CodeStubAssembler::IsNullOrJSReceiver(TNode<HeapObject> object) {
return UncheckedCast<BoolT>(Word32Or(IsJSReceiver(object), IsNull(object)));
}
@@ -6687,6 +6876,12 @@ TNode<BoolT> CodeStubAssembler::IsJSArrayIterator(TNode<HeapObject> object) {
return HasInstanceType(object, JS_ARRAY_ITERATOR_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsAlwaysSharedSpaceJSObjectInstanceType(
+ TNode<Int32T> instance_type) {
+ return IsInRange(instance_type, FIRST_ALWAYS_SHARED_SPACE_JS_OBJECT_TYPE,
+ LAST_ALWAYS_SHARED_SPACE_JS_OBJECT_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSSharedArrayInstanceType(
TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_SHARED_ARRAY_TYPE);
@@ -7040,6 +7235,10 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
return HasInstanceType(object, JS_DATA_VIEW_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSRabGsabDataView(TNode<HeapObject> object) {
+ return HasInstanceType(object, JS_RAB_GSAB_DATA_VIEW_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSRegExp(TNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_TYPE);
}
@@ -7525,8 +7724,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input,
BIND(&store_to_cache);
{
// Generate string and update string hash field.
- result = NumberToStringSmi(SmiToInt32(smi_input.value()),
- Int32Constant(10), bailout);
+ result = IntToDecimalString(SmiToInt32(smi_input.value()));
// Store string into cache.
StoreFixedArrayElement(number_string_cache, entry_index,
@@ -7849,61 +8047,81 @@ TNode<BigInt> CodeStubAssembler::ToBigInt(TNode<Context> context,
return var_result.value();
}
-void CodeStubAssembler::TaggedToNumeric(TNode<Context> context,
- TNode<Object> value,
- TVariable<Numeric>* var_numeric) {
- TaggedToNumeric(context, value, var_numeric, nullptr);
-}
+TNode<BigInt> CodeStubAssembler::ToBigIntConvertNumber(TNode<Context> context,
+ TNode<Object> input) {
+ TVARIABLE(BigInt, var_result);
+ Label if_bigint(this), if_not_bigint(this), done(this);
+
+ GotoIf(TaggedIsSmi(input), &if_not_bigint);
+ GotoIf(IsBigInt(CAST(input)), &if_bigint);
+ Goto(&if_not_bigint);
+
+ BIND(&if_bigint);
+ var_result = CAST(input);
+ Goto(&done);
+
+ BIND(&if_not_bigint);
+ var_result =
+ CAST(CallRuntime(Runtime::kToBigIntConvertNumber, context, input));
+ Goto(&done);
-void CodeStubAssembler::TaggedToNumericWithFeedback(
- TNode<Context> context, TNode<Object> value,
- TVariable<Numeric>* var_numeric, TVariable<Smi>* var_feedback) {
- DCHECK_NOT_NULL(var_feedback);
- TaggedToNumeric(context, value, var_numeric, var_feedback);
+ BIND(&done);
+ return var_result.value();
}
-void CodeStubAssembler::TaggedToNumeric(TNode<Context> context,
- TNode<Object> value,
- TVariable<Numeric>* var_numeric,
- TVariable<Smi>* var_feedback) {
- Label done(this), if_smi(this), if_heapnumber(this), if_bigint(this),
- if_oddball(this);
- GotoIf(TaggedIsSmi(value), &if_smi);
+void CodeStubAssembler::TaggedToBigInt(TNode<Context> context,
+ TNode<Object> value,
+ Label* if_not_bigint, Label* if_bigint,
+ Label* if_bigint64,
+ TVariable<BigInt>* var_bigint,
+ TVariable<Smi>* var_feedback) {
+ Label done(this), is_smi(this), is_heapnumber(this), maybe_bigint64(this),
+ is_bigint(this), is_oddball(this);
+ GotoIf(TaggedIsSmi(value), &is_smi);
TNode<HeapObject> heap_object_value = CAST(value);
TNode<Map> map = LoadMap(heap_object_value);
- GotoIf(IsHeapNumberMap(map), &if_heapnumber);
+ GotoIf(IsHeapNumberMap(map), &is_heapnumber);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
- GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
+ if (Is64() && if_bigint64) {
+ GotoIf(IsBigIntInstanceType(instance_type), &maybe_bigint64);
+ } else {
+ GotoIf(IsBigIntInstanceType(instance_type), &is_bigint);
+ }
// {heap_object_value} is not a Numeric yet.
- GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
- *var_numeric = CAST(
+ GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &is_oddball);
+ TNode<Numeric> numeric_value = CAST(
CallBuiltin(Builtin::kNonNumberToNumeric, context, heap_object_value));
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
- Goto(&done);
+ GotoIf(TaggedIsSmi(numeric_value), if_not_bigint);
+ GotoIfNot(IsBigInt(CAST(numeric_value)), if_not_bigint);
+ *var_bigint = CAST(numeric_value);
+ Goto(if_bigint);
- BIND(&if_smi);
- *var_numeric = CAST(value);
+ BIND(&is_smi);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
- Goto(&done);
+ Goto(if_not_bigint);
- BIND(&if_heapnumber);
- *var_numeric = CAST(value);
+ BIND(&is_heapnumber);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumber);
- Goto(&done);
+ Goto(if_not_bigint);
- BIND(&if_bigint);
- *var_numeric = CAST(value);
+ if (Is64() && if_bigint64) {
+ BIND(&maybe_bigint64);
+ GotoIfLargeBigInt(CAST(value), &is_bigint);
+ *var_bigint = CAST(value);
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kBigInt64);
+ Goto(if_bigint64);
+ }
+
+ BIND(&is_bigint);
+ *var_bigint = CAST(value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
- Goto(&done);
+ Goto(if_bigint);
- BIND(&if_oddball);
+ BIND(&is_oddball);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumberOrOddball);
- *var_numeric =
- CAST(LoadObjectField(heap_object_value, Oddball::kToNumberOffset));
- Goto(&done);
-
- Bind(&done);
+ Goto(if_not_bigint);
}
// ES#sec-touint32
@@ -8237,9 +8455,6 @@ void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
GotoIf(InstanceTypeEqual(var_instance_type.value(), THIN_STRING_TYPE),
&if_thinstring);
- GotoIf(InstanceTypeEqual(var_instance_type.value(),
- THIN_ONE_BYTE_STRING_TYPE),
- &if_thinstring);
// Check if the hash field encodes an internalized string forwarding
// index.
@@ -8767,30 +8982,27 @@ void CodeStubAssembler::NameDictionaryLookup(
// memory features turned on. To minimize affecting the fast path, the
// forwarding index branch defers both fetching the actual hash value and
// the dictionary lookup to the runtime.
- ExternalReference func_ref;
+ using ER = ExternalReference; // To avoid super long lines below.
+ ER func_ref;
if constexpr (std::is_same<Dictionary, NameDictionary>::value) {
func_ref =
mode == kFindExisting
- ? ExternalReference::name_dictionary_lookup_forwarded_string()
- : ExternalReference::
- name_dictionary_find_insertion_entry_forwarded_string();
+ ? ER::name_dictionary_lookup_forwarded_string()
+ : ER::name_dictionary_find_insertion_entry_forwarded_string();
} else if constexpr (std::is_same<Dictionary, GlobalDictionary>::value) {
func_ref =
mode == kFindExisting
- ? ExternalReference::global_dictionary_lookup_forwarded_string()
- : ExternalReference::
- global_dictionary_find_insertion_entry_forwarded_string();
+ ? ER::global_dictionary_lookup_forwarded_string()
+ : ER::global_dictionary_find_insertion_entry_forwarded_string();
} else {
- func_ref =
- mode == kFindExisting
- ? ExternalReference::
- name_to_index_hashtable_lookup_forwarded_string()
- : ExternalReference::
- name_to_index_hashtable_find_insertion_entry_forwarded_string();
+ auto ref0 = ER::name_to_index_hashtable_lookup_forwarded_string();
+ auto ref1 =
+ ER::name_to_index_hashtable_find_insertion_entry_forwarded_string();
+ func_ref = mode == kFindExisting ? ref0 : ref1;
}
- const TNode<ExternalReference> function = ExternalConstant(func_ref);
- const TNode<ExternalReference> isolate_ptr =
- ExternalConstant(ExternalReference::isolate_address(isolate()));
+ const TNode<ER> function = ExternalConstant(func_ref);
+ const TNode<ER> isolate_ptr =
+ ExternalConstant(ER::isolate_address(isolate()));
TNode<IntPtrT> entry = UncheckedCast<IntPtrT>(CallCFunction(
function, MachineType::IntPtr(),
std::make_pair(MachineType::Pointer(), isolate_ptr),
@@ -9082,6 +9294,34 @@ template TNode<Smi> CodeStubAssembler::GetNumberOfElements(
template TNode<Smi> CodeStubAssembler::GetNumberOfElements(
TNode<GlobalDictionary> dictionary);
+template <>
+TNode<Smi> CodeStubAssembler::GetNameDictionaryFlags(
+ TNode<NameDictionary> dictionary) {
+ return CAST(LoadFixedArrayElement(dictionary, NameDictionary::kFlagsIndex));
+}
+
+template <>
+void CodeStubAssembler::SetNameDictionaryFlags(TNode<NameDictionary> dictionary,
+ TNode<Smi> flags) {
+ StoreFixedArrayElement(dictionary, NameDictionary::kFlagsIndex, flags,
+ SKIP_WRITE_BARRIER);
+}
+
+template <>
+TNode<Smi> CodeStubAssembler::GetNameDictionaryFlags(
+ TNode<SwissNameDictionary> dictionary) {
+ // TODO(pthier): Add flags to swiss dictionaries.
+ Unreachable();
+ return SmiConstant(0);
+}
+
+template <>
+void CodeStubAssembler::SetNameDictionaryFlags(
+ TNode<SwissNameDictionary> dictionary, TNode<Smi> flags) {
+ // TODO(pthier): Add flags to swiss dictionaries.
+ Unreachable();
+}
+
template <typename Array>
void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
TNode<Array> array,
@@ -9110,7 +9350,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
*var_name_index = name_index;
GotoIf(TaggedEqual(candidate_name, unique_name), if_found);
},
- -Array::kEntrySize, IndexAdvanceMode::kPre);
+ -Array::kEntrySize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPre);
Goto(if_not_found);
}
@@ -9437,7 +9677,8 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
}
BIND(&next_iteration);
},
- DescriptorArray::kEntrySize, IndexAdvanceMode::kPost);
+ DescriptorArray::kEntrySize, LoopUnrollingMode::kNo,
+ IndexAdvanceMode::kPost);
if (mode == kEnumerationOrder) {
Label done(this);
@@ -9753,18 +9994,8 @@ TNode<Object> CodeStubAssembler::CreateAsyncFromSyncIterator(
const TNode<Object> next =
GetProperty(context, sync_iterator, factory()->next_string());
-
- const TNode<NativeContext> native_context = LoadNativeContext(context);
- const TNode<Map> map = CAST(LoadContextElement(
- native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
- const TNode<JSObject> iterator = AllocateJSObjectFromMap(map);
-
- StoreObjectFieldNoWriteBarrier(
- iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
- StoreObjectFieldNoWriteBarrier(iterator, JSAsyncFromSyncIterator::kNextOffset,
- next);
-
- return_value = iterator;
+ return_value =
+ CreateAsyncFromSyncIterator(context, CAST(sync_iterator), next);
Goto(&done);
BIND(&not_receiver);
@@ -9779,6 +10010,21 @@ TNode<Object> CodeStubAssembler::CreateAsyncFromSyncIterator(
return return_value.value();
}
+TNode<JSObject> CodeStubAssembler::CreateAsyncFromSyncIterator(
+ TNode<Context> context, TNode<JSReceiver> sync_iterator,
+ TNode<Object> next) {
+ const TNode<NativeContext> native_context = LoadNativeContext(context);
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
+ const TNode<JSObject> iterator = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
+ StoreObjectFieldNoWriteBarrier(iterator, JSAsyncFromSyncIterator::kNextOffset,
+ next);
+ return iterator;
+}
+
void CodeStubAssembler::LoadPropertyFromFastObject(
TNode<HeapObject> object, TNode<Map> map,
TNode<DescriptorArray> descriptors, TNode<IntPtrT> name_index,
@@ -10148,6 +10394,130 @@ void CodeStubAssembler::TryGetOwnProperty(
}
}
+void CodeStubAssembler::InitializePropertyDescriptorObject(
+ TNode<PropertyDescriptorObject> descriptor, TNode<Object> value,
+ TNode<Uint32T> details, Label* if_bailout) {
+ Label if_data_property(this), if_accessor_property(this),
+ test_configurable(this), test_property_type(this), done(this);
+ TVARIABLE(Smi, flags,
+ SmiConstant(PropertyDescriptorObject::HasEnumerableBit::kMask |
+ PropertyDescriptorObject::HasConfigurableBit::kMask));
+
+ { // test enumerable
+ TNode<Uint32T> dont_enum =
+ Uint32Constant(DONT_ENUM << PropertyDetails::AttributesField::kShift);
+ GotoIf(Word32And(details, dont_enum), &test_configurable);
+ flags =
+ SmiOr(flags.value(),
+ SmiConstant(PropertyDescriptorObject::IsEnumerableBit::kMask));
+ Goto(&test_configurable);
+ }
+
+ BIND(&test_configurable);
+ {
+ TNode<Uint32T> dont_delete =
+ Uint32Constant(DONT_DELETE << PropertyDetails::AttributesField::kShift);
+ GotoIf(Word32And(details, dont_delete), &test_property_type);
+ flags =
+ SmiOr(flags.value(),
+ SmiConstant(PropertyDescriptorObject::IsConfigurableBit::kMask));
+ Goto(&test_property_type);
+ }
+
+ BIND(&test_property_type);
+ BranchIfAccessorPair(value, &if_accessor_property, &if_data_property);
+
+ BIND(&if_accessor_property);
+ {
+ Label done_get(this), store_fields(this);
+ TNode<AccessorPair> accessor_pair = CAST(value);
+
+ auto BailoutIfTemplateInfo = [this, &if_bailout](TNode<HeapObject> value) {
+ TVARIABLE(HeapObject, result);
+
+ Label bind_undefined(this), return_result(this);
+ GotoIf(IsNull(value), &bind_undefined);
+ result = value;
+ TNode<Map> map = LoadMap(value);
+ // TODO(ishell): probe template instantiations cache.
+ GotoIf(IsFunctionTemplateInfoMap(map), if_bailout);
+ Goto(&return_result);
+
+ BIND(&bind_undefined);
+ result = UndefinedConstant();
+ Goto(&return_result);
+
+ BIND(&return_result);
+ return result.value();
+ };
+
+ TNode<HeapObject> getter =
+ LoadObjectField<HeapObject>(accessor_pair, AccessorPair::kGetterOffset);
+ TNode<HeapObject> setter =
+ LoadObjectField<HeapObject>(accessor_pair, AccessorPair::kSetterOffset);
+ getter = BailoutIfTemplateInfo(getter);
+ setter = BailoutIfTemplateInfo(setter);
+
+ Label bind_undefined(this, Label::kDeferred), return_result(this);
+ flags = SmiOr(flags.value(),
+ SmiConstant(PropertyDescriptorObject::HasGetBit::kMask |
+ PropertyDescriptorObject::HasSetBit::kMask));
+ StoreObjectField(descriptor, PropertyDescriptorObject::kFlagsOffset,
+ flags.value());
+ StoreObjectField(descriptor, PropertyDescriptorObject::kValueOffset,
+ NullConstant());
+ StoreObjectField(descriptor, PropertyDescriptorObject::kGetOffset,
+ BailoutIfTemplateInfo(getter));
+ StoreObjectField(descriptor, PropertyDescriptorObject::kSetOffset,
+ BailoutIfTemplateInfo(setter));
+ Goto(&done);
+ }
+
+ BIND(&if_data_property);
+ {
+ Label store_fields(this);
+ flags = SmiOr(flags.value(),
+ SmiConstant(PropertyDescriptorObject::HasValueBit::kMask |
+ PropertyDescriptorObject::HasWritableBit::kMask));
+ TNode<Uint32T> read_only =
+ Uint32Constant(READ_ONLY << PropertyDetails::AttributesField::kShift);
+ GotoIf(Word32And(details, read_only), &store_fields);
+ flags = SmiOr(flags.value(),
+ SmiConstant(PropertyDescriptorObject::IsWritableBit::kMask));
+ Goto(&store_fields);
+
+ BIND(&store_fields);
+ StoreObjectField(descriptor, PropertyDescriptorObject::kFlagsOffset,
+ flags.value());
+ StoreObjectField(descriptor, PropertyDescriptorObject::kValueOffset, value);
+ StoreObjectField(descriptor, PropertyDescriptorObject::kGetOffset,
+ NullConstant());
+ StoreObjectField(descriptor, PropertyDescriptorObject::kSetOffset,
+ NullConstant());
+ Goto(&done);
+ }
+
+ BIND(&done);
+}
+
+TNode<PropertyDescriptorObject>
+CodeStubAssembler::AllocatePropertyDescriptorObject(TNode<Context> context) {
+ TNode<HeapObject> result = Allocate(PropertyDescriptorObject::kSize);
+ TNode<Map> map = GetInstanceTypeMap(PROPERTY_DESCRIPTOR_OBJECT_TYPE);
+ StoreMapNoWriteBarrier(result, map);
+ TNode<Smi> zero = SmiConstant(0);
+ StoreObjectFieldNoWriteBarrier(result, PropertyDescriptorObject::kFlagsOffset,
+ zero);
+ TNode<Oddball> the_hole = TheHoleConstant();
+ StoreObjectFieldNoWriteBarrier(result, PropertyDescriptorObject::kValueOffset,
+ the_hole);
+ StoreObjectFieldNoWriteBarrier(result, PropertyDescriptorObject::kGetOffset,
+ the_hole);
+ StoreObjectFieldNoWriteBarrier(result, PropertyDescriptorObject::kSetOffset,
+ the_hole);
+ return CAST(result);
+}
+
void CodeStubAssembler::TryLookupElement(
TNode<HeapObject> object, TNode<Map> map, TNode<Int32T> instance_type,
TNode<IntPtrT> intptr_index, Label* if_found, Label* if_absent,
@@ -10758,6 +11128,13 @@ void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
CSA_DCHECK(this, IsFeedbackVector(maybe_feedback_vector));
UpdateFeedback(feedback, CAST(maybe_feedback_vector), slot_id);
break;
+ case UpdateFeedbackMode::kNoFeedback:
+#ifdef V8_JITLESS
+ CSA_DCHECK(this, IsUndefined(maybe_feedback_vector));
+ break;
+#else
+ UNREACHABLE();
+#endif // !V8_JITLESS
}
}
@@ -11888,7 +12265,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
TNode<MaybeObject> CodeStubAssembler::StoreWeakReferenceInFeedbackVector(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<HeapObject> value, int additional_offset) {
- TNode<MaybeObject> weak_value = MakeWeak(value);
+ TNode<HeapObjectReference> weak_value = MakeWeak(value);
StoreFeedbackVectorSlot(feedback_vector, slot, weak_value,
UPDATE_WRITE_BARRIER, additional_offset);
return weak_value;
@@ -11924,70 +12301,122 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
}
template <typename TIndex>
-TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
- TNode<TIndex> start_index,
- TNode<TIndex> end_index,
- const FastLoopBody<TIndex>& body,
- int increment,
- IndexAdvanceMode advance_mode) {
- TVARIABLE(TIndex, var, start_index);
+TNode<TIndex> CodeStubAssembler::BuildFastLoop(
+ const VariableList& vars, TVariable<TIndex>& var_index,
+ TNode<TIndex> start_index, TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body, int increment,
+ LoopUnrollingMode unrolling_mode, IndexAdvanceMode advance_mode) {
+ var_index = start_index;
VariableList vars_copy(vars.begin(), vars.end(), zone());
- vars_copy.push_back(&var);
+ vars_copy.push_back(&var_index);
Label loop(this, vars_copy);
- Label after_loop(this);
- // Introduce an explicit second check of the termination condition before the
- // loop that helps turbofan generate better code. If there's only a single
- // check, then the CodeStubAssembler forces it to be at the beginning of the
- // loop requiring a backwards branch at the end of the loop (it's not possible
- // to force the loop header check at the end of the loop and branch forward to
- // it from the pre-header). The extra branch is slower in the case that the
- // loop actually iterates.
- TNode<BoolT> first_check = IntPtrOrSmiEqual(var.value(), end_index);
- int32_t first_check_val;
- if (TryToInt32Constant(first_check, &first_check_val)) {
- if (first_check_val) return var.value();
- Goto(&loop);
- } else {
- Branch(first_check, &after_loop, &loop);
- }
+ Label after_loop(this), done(this);
- BIND(&loop);
- {
+ auto loop_body = [&]() {
if (advance_mode == IndexAdvanceMode::kPre) {
- Increment(&var, increment);
+ Increment(&var_index, increment);
}
- body(var.value());
+ body(var_index.value());
if (advance_mode == IndexAdvanceMode::kPost) {
- Increment(&var, increment);
+ Increment(&var_index, increment);
+ }
+ };
+ // The loops below are generated using the following trick:
+ // Introduce an explicit second check of the termination condition before
+ // the loop that helps turbofan generate better code. If there's only a
+ // single check, then the CodeStubAssembler forces it to be at the beginning
+ // of the loop requiring a backwards branch at the end of the loop (it's not
+ // possible to force the loop header check at the end of the loop and branch
+ // forward to it from the pre-header). The extra branch is slower in the
+ // case that the loop actually iterates.
+ if (unrolling_mode == LoopUnrollingMode::kNo) {
+ TNode<BoolT> first_check = IntPtrOrSmiEqual(var_index.value(), end_index);
+ int32_t first_check_val;
+ if (TryToInt32Constant(first_check, &first_check_val)) {
+ if (first_check_val) return var_index.value();
+ Goto(&loop);
+ } else {
+ Branch(first_check, &done, &loop);
}
- Branch(IntPtrOrSmiNotEqual(var.value(), end_index), &loop, &after_loop);
- }
- BIND(&after_loop);
- return var.value();
-}
-// Instantiate BuildFastLoop for IntPtrT and UintPtrT.
-template V8_EXPORT_PRIVATE TNode<IntPtrT>
-CodeStubAssembler::BuildFastLoop<IntPtrT>(const VariableList& vars,
- TNode<IntPtrT> start_index,
- TNode<IntPtrT> end_index,
- const FastLoopBody<IntPtrT>& body,
- int increment,
- IndexAdvanceMode advance_mode);
-template V8_EXPORT_PRIVATE TNode<UintPtrT>
-CodeStubAssembler::BuildFastLoop<UintPtrT>(const VariableList& vars,
- TNode<UintPtrT> start_index,
- TNode<UintPtrT> end_index,
- const FastLoopBody<UintPtrT>& body,
- int increment,
- IndexAdvanceMode advance_mode);
+ BIND(&loop);
+ {
+ loop_body();
+ CSA_DCHECK(
+ this, increment > 0
+ ? IntPtrOrSmiLessThanOrEqual(var_index.value(), end_index)
+ : IntPtrOrSmiLessThanOrEqual(end_index, var_index.value()));
+ Branch(IntPtrOrSmiNotEqual(var_index.value(), end_index), &loop, &done);
+ }
+ BIND(&done);
+ } else {
+ // Check if there are at least two elements between start_index and
+ // end_index.
+ DCHECK_EQ(unrolling_mode, LoopUnrollingMode::kYes);
+ CSA_DCHECK(this, increment > 0
+ ? IntPtrOrSmiLessThanOrEqual(start_index, end_index)
+ : IntPtrOrSmiLessThanOrEqual(end_index, start_index));
+ TNode<TIndex> last_index =
+ IntPtrOrSmiSub(end_index, IntPtrOrSmiConstant<TIndex>(increment));
+ TNode<BoolT> first_check =
+ increment > 0 ? IntPtrOrSmiLessThan(start_index, last_index)
+ : IntPtrOrSmiGreaterThan(start_index, last_index);
+ int32_t first_check_val;
+ if (TryToInt32Constant(first_check, &first_check_val)) {
+ if (first_check_val) {
+ Goto(&loop);
+ } else {
+ Goto(&after_loop);
+ }
+ } else {
+ Branch(first_check, &loop, &after_loop);
+ }
+
+ BIND(&loop);
+ {
+ Comment("Unrolled Loop");
+ loop_body();
+ loop_body();
+ TNode<BoolT> loop_check =
+ increment > 0 ? IntPtrOrSmiLessThan(var_index.value(), last_index)
+ : IntPtrOrSmiGreaterThan(var_index.value(), last_index);
+ Branch(loop_check, &loop, &after_loop);
+ }
+ BIND(&after_loop);
+ {
+ GotoIfNot(IntPtrOrSmiEqual(var_index.value(), last_index), &done);
+ // Iteration count is odd.
+ loop_body();
+ Goto(&done);
+ }
+ BIND(&done);
+ }
+ return var_index.value();
+}
+
+// Instantiate BuildFastLoop for IntPtrT, UintPtrT and RawPtrT.
+template V8_EXPORT_PRIVATE TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<
+ IntPtrT>(const VariableList& vars, TVariable<IntPtrT>& var_index,
+ TNode<IntPtrT> start_index, TNode<IntPtrT> end_index,
+ const FastLoopBody<IntPtrT>& body, int increment,
+ LoopUnrollingMode unrolling_mode, IndexAdvanceMode advance_mode);
+template V8_EXPORT_PRIVATE TNode<UintPtrT> CodeStubAssembler::BuildFastLoop<
+ UintPtrT>(const VariableList& vars, TVariable<UintPtrT>& var_index,
+ TNode<UintPtrT> start_index, TNode<UintPtrT> end_index,
+ const FastLoopBody<UintPtrT>& body, int increment,
+ LoopUnrollingMode unrolling_mode, IndexAdvanceMode advance_mode);
+template V8_EXPORT_PRIVATE TNode<RawPtrT> CodeStubAssembler::BuildFastLoop<
+ RawPtrT>(const VariableList& vars, TVariable<RawPtrT>& var_index,
+ TNode<RawPtrT> start_index, TNode<RawPtrT> end_index,
+ const FastLoopBody<RawPtrT>& body, int increment,
+ LoopUnrollingMode unrolling_mode, IndexAdvanceMode advance_mode);
template <typename TIndex>
void CodeStubAssembler::BuildFastArrayForEach(
TNode<UnionT<UnionT<FixedArray, PropertyArray>, HeapObject>> array,
ElementsKind kind, TNode<TIndex> first_element_inclusive,
TNode<TIndex> last_element_exclusive, const FastArrayForEachBody& body,
- ForEachDirection direction) {
+ LoopUnrollingMode loop_unrolling_mode, ForEachDirection direction) {
static_assert(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
CSA_SLOW_DCHECK(this, Word32Or(IsFixedArrayWithKind(array, kind),
IsPropertyArray(array)));
@@ -12030,6 +12459,7 @@ void CodeStubAssembler::BuildFastArrayForEach(
BuildFastLoop<IntPtrT>(
start, limit, [&](TNode<IntPtrT> offset) { body(array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
+ loop_unrolling_mode,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
@@ -12060,7 +12490,7 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current,
root_value);
},
- -kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
+ -kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPre);
}
void CodeStubAssembler::BranchIfNumberRelationalComparison(Operation op,
@@ -12225,6 +12655,41 @@ TNode<Context> CodeStubAssembler::GotoIfHasContextExtensionUpToDepth(
return cur_context.value();
}
+void CodeStubAssembler::BigInt64Comparison(Operation op, TNode<Object>& left,
+ TNode<Object>& right,
+ Label* return_true,
+ Label* return_false) {
+ TVARIABLE(UintPtrT, left_raw);
+ TVARIABLE(UintPtrT, right_raw);
+ BigIntToRawBytes(CAST(left), &left_raw, &left_raw);
+ BigIntToRawBytes(CAST(right), &right_raw, &right_raw);
+ TNode<WordT> left_raw_value = left_raw.value();
+ TNode<WordT> right_raw_value = right_raw.value();
+
+ TNode<BoolT> condition;
+ switch (op) {
+ case Operation::kEqual:
+ case Operation::kStrictEqual:
+ condition = WordEqual(left_raw_value, right_raw_value);
+ break;
+ case Operation::kLessThan:
+ condition = IntPtrLessThan(left_raw_value, right_raw_value);
+ break;
+ case Operation::kLessThanOrEqual:
+ condition = IntPtrLessThanOrEqual(left_raw_value, right_raw_value);
+ break;
+ case Operation::kGreaterThan:
+ condition = IntPtrGreaterThan(left_raw_value, right_raw_value);
+ break;
+ case Operation::kGreaterThanOrEqual:
+ condition = IntPtrGreaterThanOrEqual(left_raw_value, right_raw_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ Branch(condition, return_true, return_false);
+}
+
TNode<Oddball> CodeStubAssembler::RelationalComparison(
Operation op, TNode<Object> left, TNode<Object> right,
const LazyNode<Context>& context, TVariable<Smi>* var_type_feedback) {
@@ -12447,11 +12912,21 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
BIND(&if_right_bigint);
{
+ if (Is64()) {
+ Label if_both_bigint(this);
+ GotoIfLargeBigInt(CAST(left), &if_both_bigint);
+ GotoIfLargeBigInt(CAST(right), &if_both_bigint);
+
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBigInt64);
+ BigInt64Comparison(op, left, right, &return_true, &return_false);
+ BIND(&if_both_bigint);
+ }
+
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kBigInt);
- var_result = CAST(CallRuntime(Runtime::kBigIntCompareToBigInt,
- NoContextConstant(), SmiConstant(op),
- left, right));
+ var_result = CAST(CallBuiltin(BigIntComparisonBuiltinOf(op),
+ NoContextConstant(), left, right));
Goto(&end);
}
@@ -12507,7 +12982,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
default:
UNREACHABLE();
}
- var_result = CAST(CallBuiltin(builtin, context(), left, right));
+ var_result = CAST(CallBuiltin(builtin, TNode<Object>(), left, right));
Goto(&end);
BIND(&if_right_not_string);
@@ -12710,6 +13185,14 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_bigint);
{
CSA_DCHECK(this, IsBigInt(value_heapobject));
+
+ if (Is64()) {
+ Label if_large_bigint(this);
+ GotoIfLargeBigInt(CAST(value_heapobject), &if_large_bigint);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt64);
+ Goto(if_equal);
+ BIND(&if_large_bigint);
+ }
CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
Goto(if_equal);
}
@@ -12899,12 +13382,16 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_left_string);
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
- result =
- CAST(CallBuiltin(Builtin::kStringEqual, context(), left, right));
- CombineFeedback(var_type_feedback,
- SmiOr(CollectFeedbackForString(left_type),
- CollectFeedbackForString(right_type)));
- Goto(&end);
+ Label combine_feedback(this);
+ BranchIfStringEqual(CAST(left), CAST(right), &combine_feedback,
+ &combine_feedback, &result);
+ BIND(&combine_feedback);
+ {
+ CombineFeedback(var_type_feedback,
+ SmiOr(CollectFeedbackForString(left_type),
+ CollectFeedbackForString(right_type)));
+ Goto(&end);
+ }
}
BIND(&if_left_number);
@@ -12972,9 +13459,21 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_right_bigint);
{
- // We already have BigInt feedback.
- result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), left, right));
+ if (Is64()) {
+ Label if_both_bigint(this);
+ GotoIfLargeBigInt(CAST(left), &if_both_bigint);
+ GotoIfLargeBigInt(CAST(right), &if_both_bigint);
+
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kBigInt64);
+ BigInt64Comparison(Operation::kEqual, left, right, &if_equal,
+ &if_notequal);
+ BIND(&if_both_bigint);
+ }
+
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ result = CAST(CallBuiltin(Builtin::kBigIntEqual, NoContextConstant(),
+ left, right));
Goto(&end);
}
@@ -13365,9 +13864,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
CollectFeedbackForString(rhs_instance_type);
*var_type_feedback = SmiOr(lhs_feedback, rhs_feedback);
}
- result = CAST(CallBuiltin(Builtin::kStringEqual,
- NoContextConstant(), lhs, rhs));
- Goto(&end);
+ BranchIfStringEqual(CAST(lhs), CAST(rhs), &end, &end, &result);
}
BIND(&if_rhsisnotstring);
@@ -13394,9 +13891,21 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
BIND(&if_rhsisbigint);
{
+ if (Is64()) {
+ Label if_both_bigint(this);
+ GotoIfLargeBigInt(CAST(lhs), &if_both_bigint);
+ GotoIfLargeBigInt(CAST(rhs), &if_both_bigint);
+
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kBigInt64);
+ BigInt64Comparison(Operation::kStrictEqual, lhs, rhs,
+ &if_equal, &if_notequal);
+ BIND(&if_both_bigint);
+ }
+
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kBigInt);
- result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ result = CAST(CallBuiltin(Builtin::kBigIntEqual,
NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -13567,6 +14076,36 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
return result.value();
}
+void CodeStubAssembler::BranchIfStringEqual(TNode<String> lhs,
+ TNode<IntPtrT> lhs_length,
+ TNode<String> rhs,
+ TNode<IntPtrT> rhs_length,
+ Label* if_true, Label* if_false,
+ TVariable<Oddball>* result) {
+ Label length_equal(this), length_not_equal(this);
+ Branch(IntPtrEqual(lhs_length, rhs_length), &length_equal, &length_not_equal);
+
+ BIND(&length_not_equal);
+ {
+ if (result != nullptr) *result = FalseConstant();
+ Goto(if_false);
+ }
+
+ BIND(&length_equal);
+ {
+ TNode<Oddball> value = CAST(CallBuiltin(
+ Builtin::kStringEqual, NoContextConstant(), lhs, rhs, lhs_length));
+ if (result != nullptr) {
+ *result = value;
+ }
+ if (if_true == if_false) {
+ Goto(if_true);
+ } else {
+ Branch(TaggedEqual(value, TrueConstant()), if_true, if_false);
+ }
+ }
+}
+
// ECMA#sec-samevalue
// This algorithm differs from the Strict Equality Comparison Algorithm in its
// treatment of signed zeroes and NaNs.
@@ -13641,9 +14180,7 @@ void CodeStubAssembler::BranchIfSameValue(TNode<Object> lhs, TNode<Object> rhs,
// Now we can only yield true if {rhs} is also a String
// with the same sequence of characters.
GotoIfNot(IsString(CAST(rhs)), if_false);
- const TNode<Object> result = CallBuiltin(
- Builtin::kStringEqual, NoContextConstant(), lhs, rhs);
- Branch(IsTrue(result), if_true, if_false);
+ BranchIfStringEqual(CAST(lhs), CAST(rhs), if_true, if_false);
}
BIND(&if_lhsisbigint);
@@ -14259,6 +14796,22 @@ void CodeStubAssembler::GotoIfNumber(TNode<Object> input, Label* is_number) {
GotoIf(IsHeapNumber(CAST(input)), is_number);
}
+TNode<Word32T> CodeStubAssembler::NormalizeShift32OperandIfNecessary(
+ TNode<Word32T> right32) {
+ TVARIABLE(Word32T, result, right32);
+ Label done(this);
+ // Use UniqueInt32Constant instead of BoolConstant here in order to ensure
+ // that the graph structure does not depend on the value of the predicate
+ // (BoolConstant uses cached nodes).
+ GotoIf(UniqueInt32Constant(Word32ShiftIsSafe()), &done);
+ {
+ result = Word32And(right32, Int32Constant(0x1F));
+ Goto(&done);
+ }
+ BIND(&done);
+ return result.value();
+}
+
TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
TNode<Word32T> right32,
Operation bitwise_op) {
@@ -14270,19 +14823,13 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
case Operation::kBitwiseXor:
return ChangeInt32ToTagged(Signed(Word32Xor(left32, right32)));
case Operation::kShiftLeft:
- if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1F));
- }
+ right32 = NormalizeShift32OperandIfNecessary(right32);
return ChangeInt32ToTagged(Signed(Word32Shl(left32, right32)));
case Operation::kShiftRight:
- if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1F));
- }
+ right32 = NormalizeShift32OperandIfNecessary(right32);
return ChangeInt32ToTagged(Signed(Word32Sar(left32, right32)));
case Operation::kShiftRightLogical:
- if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1F));
- }
+ right32 = NormalizeShift32OperandIfNecessary(right32);
return ChangeUint32ToTagged(Unsigned(Word32Shr(left32, right32)));
default:
break;
@@ -14308,10 +14855,8 @@ TNode<Number> CodeStubAssembler::BitwiseSmiOp(TNode<Smi> left, TNode<Smi> right,
// perform int32 operation but don't check for overflow.
case Operation::kShiftRight: {
TNode<Int32T> left32 = SmiToInt32(left);
- TNode<Int32T> right32 = SmiToInt32(right);
- if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1F));
- }
+ TNode<Int32T> right32 =
+ Signed(NormalizeShift32OperandIfNecessary(SmiToInt32(right)));
return ChangeInt32ToTaggedNoOverflow(Word32Sar(left32, right32));
}
default:
@@ -14822,7 +15367,8 @@ void CodeStubArguments::ForEach(
TNode<Object> arg = assembler_->LoadFullTagged(current);
body(arg);
},
- increment, CodeStubAssembler::IndexAdvanceMode::kPost);
+ increment, CodeStubAssembler::LoopUnrollingMode::kNo,
+ CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
@@ -15008,7 +15554,7 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
return Word32NotEqual(flags, Int32Constant(0));
}
-TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
+TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
@@ -15020,13 +15566,13 @@ TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
return CAST(BitcastWordToTagged(Load<RawPtrT>(table, offset)));
}
-TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
+TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info, TVariable<Uint16T>* data_type_out,
Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
- TVARIABLE(CodeT, sfi_code);
+ TVARIABLE(Code, sfi_code);
Label done(this);
Label check_instance_type(this);
@@ -15052,7 +15598,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
int32_t case_values[] = {
BYTECODE_ARRAY_TYPE,
- CODET_TYPE,
+ CODE_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE,
@@ -15102,7 +15648,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
- TNode<CodeT> baseline_code = CAST(sfi_data);
+ TNode<Code> baseline_code = CAST(sfi_data);
sfi_code = baseline_code;
Goto(&done);
}
@@ -15124,7 +15670,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
CSA_DCHECK(this,
Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
{
- TNode<CodeT> trampoline =
+ TNode<Code> trampoline =
LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data));
sfi_code = trampoline;
}
@@ -15152,28 +15698,20 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
return sfi_code.value();
}
-TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeT> code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- TNode<CodeDataContainer> cdc = CodeDataContainerFromCodeT(code);
- return LoadObjectField<RawPtrT>(
- cdc, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset));
-#else
- TNode<IntPtrT> object = BitcastTaggedToWord(code);
- return ReinterpretCast<RawPtrT>(
- IntPtrAdd(object, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
-#endif
+TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<Code> code) {
+ return LoadObjectField<RawPtrT>(code,
+ IntPtrConstant(Code::kCodeEntryPointOffset));
}
-TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(TNode<CodeT> codet) {
+TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(TNode<Code> code) {
return IsSetWord32<Code::MarkedForDeoptimizationField>(
- LoadObjectField<Int32T>(CodeDataContainerFromCodeT(codet),
- CodeDataContainer::kKindSpecificFlagsOffset));
+ LoadObjectField<Int16T>(code, Code::kKindSpecificFlagsOffset));
}
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
TNode<Context> context) {
- const TNode<CodeT> code = GetSharedFunctionInfoCode(shared_info);
+ const TNode<Code> code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
@@ -15259,6 +15797,10 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
TNode<Smi> length;
TNode<HeapObject> properties = LoadSlowProperties(receiver);
+ // g++ version 8 has a bug when using `if constexpr(false)` with a lambda:
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85149
+ // TODO(miladfarca): Use `if constexpr` once all compilers handle this
+ // properly.
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(properties),
IsGlobalDictionary(properties)));
@@ -15387,14 +15929,14 @@ void CodeStubAssembler::PerformStackCheck(TNode<Context> context) {
}
TNode<Object> CodeStubAssembler::CallApiCallback(
- TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
+ TNode<Object> context, TNode<RawPtrT> callback, TNode<Int32T> argc,
TNode<Object> data, TNode<Object> holder, TNode<Object> receiver) {
Callable callable = CodeFactory::CallApiCallback(isolate());
return CallStub(callable, context, callback, argc, data, holder, receiver);
}
TNode<Object> CodeStubAssembler::CallApiCallback(
- TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
+ TNode<Object> context, TNode<RawPtrT> callback, TNode<Int32T> argc,
TNode<Object> data, TNode<Object> holder, TNode<Object> receiver,
TNode<Object> value) {
Callable callable = CodeFactory::CallApiCallback(isolate());
@@ -15983,7 +16525,7 @@ CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
UnsafeStoreNoWriteBarrier(MachineRepresentation::kWord32, current,
empty32);
},
- sizeof(uint32_t), IndexAdvanceMode::kPost);
+ sizeof(uint32_t), LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
Comment("Initialize the data table.");
@@ -16078,7 +16620,7 @@ TNode<SwissNameDictionary> CodeStubAssembler::CopySwissNameDictionary(
TNode<Object> table_field = LoadObjectField(original, offset);
StoreObjectField(table, offset, table_field);
},
- kTaggedSize, IndexAdvanceMode::kPost);
+ kTaggedSize, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
}
Comment("Copy the meta table");
@@ -16148,7 +16690,7 @@ TNode<SwissNameDictionary> CodeStubAssembler::CopySwissNameDictionary(
IntPtrAdd(details_table_offset_minus_tag.value(),
IntPtrConstant(kOneByteSize));
},
- kOneByteSize, IndexAdvanceMode::kPost);
+ kOneByteSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost);
}
Comment("CopySwissNameDictionary ]");
@@ -16387,17 +16929,18 @@ void CodeStubAssembler::SharedValueBarrier(
TNode<Uint16T> value_instance_type =
LoadMapInstanceType(LoadMap(CAST(value)));
GotoIf(IsSharedStringInstanceType(value_instance_type), &skip_barrier);
- GotoIf(IsJSSharedStructInstanceType(value_instance_type), &skip_barrier);
+ GotoIf(IsAlwaysSharedSpaceJSObjectInstanceType(value_instance_type),
+ &skip_barrier);
GotoIf(IsHeapNumberInstanceType(value_instance_type), &check_in_shared_heap);
- GotoIf(IsJSSharedArrayInstanceType(value_instance_type), &skip_barrier);
Goto(&slow);
BIND(&check_in_shared_heap);
{
Branch(
- WordNotEqual(WordAnd(page_flags,
- IntPtrConstant(BasicMemoryChunk::IN_SHARED_HEAP)),
- IntPtrConstant(0)),
+ WordNotEqual(
+ WordAnd(page_flags,
+ IntPtrConstant(BasicMemoryChunk::IN_WRITABLE_SHARED_SPACE)),
+ IntPtrConstant(0)),
&skip_barrier, &slow);
}
@@ -16417,7 +16960,7 @@ void CodeStubAssembler::SharedValueBarrier(
WordNotEqual(
WordAnd(LoadBasicMemoryChunkFlags(CAST(var_shared_value->value())),
IntPtrConstant(BasicMemoryChunk::READ_ONLY_HEAP |
- BasicMemoryChunk::IN_SHARED_HEAP)),
+ BasicMemoryChunk::IN_WRITABLE_SHARED_SPACE)),
IntPtrConstant(0)));
Goto(&done);
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 559deafa85..fa41984c6a 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -82,6 +82,9 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(NoElementsProtector, no_elements_protector, NoElementsProtector) \
V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \
V(NumberStringCache, number_string_cache, NumberStringCache) \
+ V(NumberStringPrototypeNoReplaceProtector, \
+ number_string_prototype_no_replace_protector, \
+ NumberStringPrototypeNoReplaceProtector) \
V(PromiseAllResolveElementSharedFun, promise_all_resolve_element_shared_fun, \
PromiseAllResolveElementSharedFun) \
V(PromiseAllSettledRejectElementSharedFun, \
@@ -163,7 +166,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
V(Function_string, function_string, FunctionString) \
V(function_to_string, function_to_string, FunctionToString) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
V(Infinity_string, Infinity_string, InfinityString) \
V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \
@@ -184,6 +186,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
V(null_to_string, null_to_string, NullToString) \
V(NullValue, null_value, Null) \
+ IF_WASM(V, WasmNull, wasm_null, WasmNull) \
V(number_string, number_string, NumberString) \
V(number_to_string, number_to_string, NumberToString) \
V(Object_string, Object_string, ObjectString) \
@@ -501,6 +504,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// TODO(v8:9708): Define BInt operations once all uses are ported.
PARAMETER_BINOP(IntPtrOrSmiEqual, WordEqual, SmiEqual)
PARAMETER_BINOP(IntPtrOrSmiNotEqual, WordNotEqual, SmiNotEqual)
+ PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
SmiLessThanOrEqual)
PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan)
@@ -781,6 +785,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// BigInt operations.
void GotoIfLargeBigInt(TNode<BigInt> bigint, Label* true_label);
+ TNode<Word32T> NormalizeShift32OperandIfNecessary(TNode<Word32T> right32);
TNode<Number> BitwiseOp(TNode<Word32T> left32, TNode<Word32T> right32,
Operation bitwise_op);
TNode<Number> BitwiseSmiOp(TNode<Smi> left32, TNode<Smi> right32,
@@ -832,63 +837,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void FastCheck(TNode<BoolT> condition);
- TNode<BoolT> IsCodeTMap(TNode<Map> map) {
- return V8_EXTERNAL_CODE_SPACE_BOOL ? IsCodeDataContainerMap(map)
- : IsCodeMap(map);
- }
- TNode<BoolT> IsCodeT(TNode<HeapObject> object) {
- return IsCodeTMap(LoadMap(object));
- }
-
- // TODO(v8:11880): remove once Code::bytecode_or_interpreter_data field
- // is cached in or moved to CodeT.
- TNode<Code> FromCodeTNonBuiltin(TNode<CodeT> code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // Compute the Code object pointer from the code entry point.
+ // TODO(v8:11880): remove once InstructionStream::bytecode_or_interpreter_data
+ // field is cached in or moved to Code.
+ TNode<InstructionStream> FromCodeNonBuiltin(TNode<Code> code) {
+ // Compute the InstructionStream object pointer from the code entry point.
TNode<RawPtrT> code_entry = Load<RawPtrT>(
- code, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset -
- kHeapObjectTag));
+ code, IntPtrConstant(Code::kCodeEntryPointOffset - kHeapObjectTag));
TNode<Object> o = BitcastWordToTagged(IntPtrSub(
- code_entry, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
+ code_entry,
+ IntPtrConstant(InstructionStream::kHeaderSize - kHeapObjectTag)));
return CAST(o);
-#else
- return code;
-#endif
}
- TNode<CodeDataContainer> CodeDataContainerFromCodeT(TNode<CodeT> code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return code;
-#else
- return LoadObjectField<CodeDataContainer>(code,
- Code::kCodeDataContainerOffset);
-#endif
+ TNode<Code> ToCode(TNode<InstructionStream> code) {
+ return LoadObjectField<Code>(code, InstructionStream::kCodeOffset);
}
- TNode<CodeT> ToCodeT(TNode<Code> code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return LoadObjectField<CodeDataContainer>(code,
- Code::kCodeDataContainerOffset);
-#else
- return code;
-#endif
- }
-
- TNode<CodeT> ToCodeT(TNode<Code> code,
- TNode<CodeDataContainer> code_data_container) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return code_data_container;
-#else
- return code;
-#endif
- }
-
- TNode<RawPtrT> GetCodeEntry(TNode<CodeT> code);
- TNode<BoolT> IsMarkedForDeoptimization(TNode<CodeT> codet);
+ TNode<RawPtrT> GetCodeEntry(TNode<Code> code);
+ TNode<BoolT> IsMarkedForDeoptimization(TNode<Code> code);
// The following Call wrappers call an object according to the semantics that
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
- // JSFunction or proxy) rather than a Code object.
+ // JSFunction or proxy) rather than a InstructionStream object.
template <class... TArgs>
TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
TNode<JSReceiver> receiver, TArgs... args) {
@@ -909,11 +879,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
TNode<Object> CallApiCallback(TNode<Object> context, TNode<RawPtrT> callback,
- TNode<IntPtrT> argc, TNode<Object> data,
+ TNode<Int32T> argc, TNode<Object> data,
TNode<Object> holder, TNode<Object> receiver);
TNode<Object> CallApiCallback(TNode<Object> context, TNode<RawPtrT> callback,
- TNode<IntPtrT> argc, TNode<Object> data,
+ TNode<Int32T> argc, TNode<Object> data,
TNode<Object> holder, TNode<Object> receiver,
TNode<Object> value);
@@ -1193,6 +1163,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
kExternalStringResourceDataTag);
}
+ TNode<RawPtr<Uint64T>> Log10OffsetTable() {
+ return ReinterpretCast<RawPtr<Uint64T>>(
+ ExternalConstant(ExternalReference::address_of_log10_offset_table()));
+ }
+
#if V8_ENABLE_WEBASSEMBLY
TNode<RawPtrT> LoadWasmInternalFunctionCallTargetPtr(
TNode<WasmInternalFunction> object) {
@@ -1241,6 +1216,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return LoadBufferData<IntPtrT>(buffer, offset);
}
TNode<Uint8T> LoadUint8Ptr(TNode<RawPtrT> ptr, TNode<IntPtrT> offset);
+ TNode<Uint64T> LoadUint64Ptr(TNode<RawPtrT> ptr, TNode<IntPtrT> index);
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
@@ -1538,7 +1514,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsWeakReferenceToObject(TNode<MaybeObject> maybe_object,
TNode<Object> object);
- TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
+ TNode<HeapObjectReference> MakeWeak(TNode<HeapObject> value);
TNode<MaybeObject> ClearedValue();
void FixedArrayBoundsCheck(TNode<FixedArrayBase> array, TNode<Smi> index,
@@ -1864,13 +1840,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreJSSharedStructPropertyArrayElement(TNode<PropertyArray> array,
TNode<IntPtrT> index,
TNode<Object> value) {
- // JSSharedStructs are allocated in the shared old space, which is currently
- // collected by stopping the world, so the incremental write barrier is not
- // needed. They can only store Smis and other HeapObjects in the shared old
- // space, so the generational write barrier is also not needed.
- // TODO(v8:12547): Add a safer, shared variant of SKIP_WRITE_BARRIER.
- StoreFixedArrayOrPropertyArrayElement(array, index, value,
- UNSAFE_SKIP_WRITE_BARRIER);
+ StoreFixedArrayOrPropertyArrayElement(array, index, value);
}
// EnsureArrayPushable verifies that receiver with this map is:
@@ -2468,17 +2438,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> value);
void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value,
Label* if_number, TVariable<Word32T>* var_word32,
- Label* if_bigint,
+ Label* if_bigint, Label* if_bigint64,
TVariable<BigInt>* var_maybe_bigint);
void TaggedToWord32OrBigIntWithFeedback(TNode<Context> context,
TNode<Object> value, Label* if_number,
TVariable<Word32T>* var_word32,
- Label* if_bigint,
+ Label* if_bigint, Label* if_bigint64,
TVariable<BigInt>* var_maybe_bigint,
TVariable<Smi>* var_feedback);
void TaggedPointerToWord32OrBigIntWithFeedback(
TNode<Context> context, TNode<HeapObject> pointer, Label* if_number,
- TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Word32T>* var_word32, Label* if_bigint, Label* if_bigint64,
TVariable<BigInt>* var_maybe_bigint, TVariable<Smi>* var_feedback);
TNode<Int32T> TruncateNumberToWord32(TNode<Number> value);
@@ -2508,11 +2478,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> ChangeBoolToInt32(TNode<BoolT> b);
- void TaggedToNumeric(TNode<Context> context, TNode<Object> value,
- TVariable<Numeric>* var_numeric);
- void TaggedToNumericWithFeedback(TNode<Context> context, TNode<Object> value,
- TVariable<Numeric>* var_numeric,
- TVariable<Smi>* var_feedback);
+ void TaggedToBigInt(TNode<Context> context, TNode<Object> value,
+ Label* if_not_bigint, Label* if_bigint,
+ Label* if_bigint64, TVariable<BigInt>* var_bigint,
+ TVariable<Smi>* var_feedback);
// Ensures that {var_shared_value} is shareable across Isolates, and throws if
// not.
@@ -2595,6 +2564,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> InstanceTypeEqual(TNode<Int32T> instance_type, int type);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsMegaDOMProtectorCellInvalid();
+ TNode<BoolT> IsAlwaysSharedSpaceJSObjectInstanceType(
+ TNode<Int32T> instance_type);
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsBigInt(TNode<HeapObject> object);
@@ -2627,6 +2598,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsIndirectStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayBuffer(TNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
+ TNode<BoolT> IsJSRabGsabDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayMap(TNode<Map> map);
TNode<BoolT> IsJSArray(TNode<HeapObject> object);
@@ -2658,6 +2630,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSReceiverMap(TNode<Map> map);
TNode<BoolT> IsJSReceiver(TNode<HeapObject> object);
+ // The following two methods assume that we deal either with a primitive
+ // object or a JS receiver.
+ TNode<BoolT> JSAnyIsNotPrimitiveMap(TNode<Map> map);
+ TNode<BoolT> JSAnyIsNotPrimitive(TNode<HeapObject> object);
TNode<BoolT> IsJSRegExp(TNode<HeapObject> object);
TNode<BoolT> IsJSTypedArrayInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSTypedArrayMap(TNode<Map> map);
@@ -2730,6 +2706,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
+ TNode<BoolT> IsNumberStringPrototypeNoReplaceProtectorCellInvalid();
TNode<IntPtrT> LoadBasicMemoryChunkFlags(TNode<HeapObject> object);
@@ -2875,6 +2852,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers).
// https://tc39.github.io/proposal-bigint/#sec-to-bigint
TNode<BigInt> ToBigInt(TNode<Context> context, TNode<Object> input);
+ // Try to convert any object to a BigInt, including Numbers.
+ TNode<BigInt> ToBigIntConvertNumber(TNode<Context> context,
+ TNode<Object> input);
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
@@ -3234,6 +3214,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
next_enum_index_smi, SKIP_WRITE_BARRIER);
}
+ template <class Dictionary>
+ TNode<Smi> GetNameDictionaryFlags(TNode<Dictionary> dictionary);
+ template <class Dictionary>
+ void SetNameDictionaryFlags(TNode<Dictionary>, TNode<Smi> flags);
+
// Looks up an entry in a NameDictionaryBase successor. If the entry is found
// control goes to {if_found} and {var_name_index} contains an index of the
// key field of the entry found. If the key is not found control goes to
@@ -3313,6 +3298,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Object>* var_raw_value, Label* if_not_found,
Label* if_bailout, GetOwnPropertyMode mode);
+ TNode<PropertyDescriptorObject> AllocatePropertyDescriptorObject(
+ TNode<Context> context);
+ void InitializePropertyDescriptorObject(
+ TNode<PropertyDescriptorObject> descriptor, TNode<Object> value,
+ TNode<Uint32T> details, Label* if_bailout);
+
TNode<Object> GetProperty(TNode<Context> context, TNode<Object> receiver,
Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
@@ -3345,6 +3336,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> CreateAsyncFromSyncIterator(TNode<Context> context,
TNode<Object> sync_iterator);
+ TNode<JSObject> CreateAsyncFromSyncIterator(TNode<Context> context,
+ TNode<JSReceiver> sync_iterator,
+ TNode<Object> next);
template <class... TArgs>
TNode<Object> CallBuiltin(Builtin id, TNode<Object> context, TArgs... args) {
@@ -3593,23 +3587,50 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> LoadElementsKind(TNode<AllocationSite> allocation_site);
enum class IndexAdvanceMode { kPre, kPost };
+ enum class LoopUnrollingMode { kNo, kYes };
template <typename TIndex>
using FastLoopBody = std::function<void(TNode<TIndex> index)>;
template <typename TIndex>
TNode<TIndex> BuildFastLoop(
- const VariableList& var_list, TNode<TIndex> start_index,
- TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
+ const VariableList& vars, TVariable<TIndex>& var_index,
+ TNode<TIndex> start_index, TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body, int increment,
+ LoopUnrollingMode unrolling_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
template <typename TIndex>
TNode<TIndex> BuildFastLoop(
+ TVariable<TIndex>& var_index, TNode<TIndex> start_index,
+ TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
+ LoopUnrollingMode unrolling_mode,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
+ return BuildFastLoop(VariableList(0, zone()), var_index, start_index,
+ end_index, body, increment, unrolling_mode,
+ advance_mode);
+ }
+
+ template <typename TIndex>
+ TNode<TIndex> BuildFastLoop(const VariableList& vars,
+ TNode<TIndex> start_index,
+ TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body, int increment,
+ LoopUnrollingMode unrolling_mode,
+ IndexAdvanceMode advance_mode) {
+ TVARIABLE(TIndex, var_index);
+ return BuildFastLoop(vars, var_index, start_index, end_index, body,
+ increment, unrolling_mode, advance_mode);
+ }
+
+ template <typename TIndex>
+ TNode<TIndex> BuildFastLoop(
TNode<TIndex> start_index, TNode<TIndex> end_index,
const FastLoopBody<TIndex>& body, int increment,
+ LoopUnrollingMode unrolling_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
- increment, advance_mode);
+ increment, unrolling_mode, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
@@ -3622,6 +3643,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UnionT<UnionT<FixedArray, PropertyArray>, HeapObject>> array,
ElementsKind kind, TNode<TIndex> first_element_inclusive,
TNode<TIndex> last_element_exclusive, const FastArrayForEachBody& body,
+ LoopUnrollingMode loop_unrolling_mode,
ForEachDirection direction = ForEachDirection::kReverse);
template <typename TIndex>
@@ -3731,6 +3753,29 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Oddball> StrictEqual(TNode<Object> lhs, TNode<Object> rhs,
TVariable<Smi>* var_type_feedback = nullptr);
+ void GotoIfStringEqual(TNode<String> lhs, TNode<IntPtrT> lhs_length,
+ TNode<String> rhs, Label* if_true) {
+ Label if_false(this);
+ TNode<IntPtrT> rhs_length = LoadStringLengthAsWord(rhs);
+ BranchIfStringEqual(lhs, lhs_length, rhs, rhs_length, if_true, &if_false,
+ nullptr);
+
+ BIND(&if_false);
+ }
+
+ void BranchIfStringEqual(TNode<String> lhs, TNode<String> rhs, Label* if_true,
+ Label* if_false,
+ TVariable<Oddball>* result = nullptr) {
+ return BranchIfStringEqual(lhs, LoadStringLengthAsWord(lhs), rhs,
+ LoadStringLengthAsWord(rhs), if_true, if_false,
+ result);
+ }
+
+ void BranchIfStringEqual(TNode<String> lhs, TNode<IntPtrT> lhs_length,
+ TNode<String> rhs, TNode<IntPtrT> rhs_length,
+ Label* if_true, Label* if_false,
+ TVariable<Oddball>* result = nullptr);
+
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
// differs from positive zero.
@@ -3851,7 +3896,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ElementsKind kind = HOLEY_ELEMENTS);
// Load a builtin's code from the builtin array in the isolate.
- TNode<CodeT> LoadBuiltin(TNode<Smi> builtin_id);
+ TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
// If |data_type_out| is provided, the instance type of the function data will
@@ -3859,7 +3904,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// data_type_out will be set to 0.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
- TNode<CodeT> GetSharedFunctionInfoCode(
+ TNode<Code> GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info,
TVariable<Uint16T>* data_type_out = nullptr,
Label* if_compile_lazy = nullptr);
@@ -3970,6 +4015,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
uint8_t ConstexprIntegerLiteralToUint8(const IntegerLiteral& i) {
return i.To<uint8_t>();
}
+ int64_t ConstexprIntegerLiteralToInt64(const IntegerLiteral& i) {
+ return i.To<int64_t>();
+ }
uint64_t ConstexprIntegerLiteralToUint64(const IntegerLiteral& i) {
return i.To<uint64_t>();
}
@@ -4248,6 +4296,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
private:
friend class CodeStubArguments;
+ void BigInt64Comparison(Operation op, TNode<Object>& left,
+ TNode<Object>& right, Label* return_true,
+ Label* return_false);
+
void HandleBreakOnNode();
TNode<HeapObject> AllocateRawDoubleAligned(TNode<IntPtrT> size_in_bytes,
@@ -4311,10 +4363,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Context> context, TNode<HeapObject> input, Object::Conversion mode,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
- void TaggedToNumeric(TNode<Context> context, TNode<Object> value,
- TVariable<Numeric>* var_numeric,
- TVariable<Smi>* var_feedback);
-
enum IsKnownTaggedPointer { kNo, kYes };
template <Object::Conversion conversion>
void TaggedToWord32OrBigIntImpl(TNode<Context> context, TNode<Object> value,
@@ -4322,6 +4370,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Word32T>* var_word32,
IsKnownTaggedPointer is_known_tagged_pointer,
Label* if_bigint = nullptr,
+ Label* if_bigint64 = nullptr,
TVariable<BigInt>* var_maybe_bigint = nullptr,
TVariable<Smi>* var_feedback = nullptr);
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index d5d371e26c..72b36071b8 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -10,6 +10,7 @@
#include "src/logging/log.h"
#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
#include "src/utils/ostreams.h"
@@ -78,7 +79,8 @@ void CompilationCacheScript::Age() {
if (!info.HasBytecodeArray() ||
info.GetBytecodeArray(isolate()).IsOld()) {
table.SetPrimaryValueAt(entry,
- ReadOnlyRoots(isolate()).undefined_value());
+ ReadOnlyRoots(isolate()).undefined_value(),
+ SKIP_WRITE_BARRIER);
}
}
}
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 9e7d2394fb..22f743a4a4 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -26,7 +26,7 @@
#include "src/common/message-template.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/compiler/pipeline.h"
+#include "src/compiler/turbofan.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/diagnostics/code-tracer.h"
@@ -62,7 +62,6 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/snapshot/code-serializer.h"
#include "src/utils/ostreams.h"
-#include "src/web-snapshot/web-snapshot.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
#ifdef V8_ENABLE_MAGLEV
@@ -202,13 +201,13 @@ class CompilerTracer : public AllStatic {
static void TraceFinishMaglevCompile(Isolate* isolate,
Handle<JSFunction> function,
- double ms_prepare, double ms_optimize,
- double ms_codegen) {
+ double ms_prepare, double ms_execute,
+ double ms_finalize) {
if (!v8_flags.trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintTracePrefix(scope, "completed compiling", function, CodeKind::MAGLEV);
PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_prepare,
- ms_optimize, ms_codegen);
+ ms_execute, ms_finalize);
PrintTraceSuffix(scope);
}
@@ -231,14 +230,17 @@ class CompilerTracer : public AllStatic {
PrintTraceSuffix(scope);
}
- static void TraceAbortedJob(Isolate* isolate,
- OptimizedCompilationInfo* info) {
+ static void TraceAbortedJob(Isolate* isolate, OptimizedCompilationInfo* info,
+ double ms_prepare, double ms_execute,
+ double ms_finalize) {
if (!v8_flags.trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintTracePrefix(scope, "aborted optimizing", info);
if (info->is_osr()) PrintF(scope.file(), " OSR");
PrintF(scope.file(), " because: %s",
GetBailoutReason(info->bailout_reason()));
+ PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_prepare,
+ ms_execute, ms_finalize);
PrintTraceSuffix(scope);
}
@@ -316,7 +318,7 @@ void Compiler::LogFunctionCompilation(Isolate* isolate,
Handle<AbstractCode> abstract_code,
CodeKind kind, double time_taken_ms) {
DCHECK_NE(*abstract_code,
- ToAbstractCode(*BUILTIN_CODE(isolate, CompileLazy)));
+ AbstractCode::cast(*BUILTIN_CODE(isolate, CompileLazy)));
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
@@ -370,7 +372,7 @@ void Compiler::LogFunctionCompilation(Isolate* isolate,
UNREACHABLE();
}
- Handle<String> debug_name = SharedFunctionInfo::DebugName(shared);
+ Handle<String> debug_name = SharedFunctionInfo::DebugName(isolate, shared);
DisallowGarbageCollection no_gc;
LOG(isolate, FunctionEvent(name.c_str(), script->id(), time_taken_ms,
shared->StartPosition(), shared->EndPosition(),
@@ -455,7 +457,7 @@ void LogUnoptimizedCompilation(Isolate* isolate,
#if V8_ENABLE_WEBASSEMBLY
DCHECK(shared->HasAsmWasmData());
abstract_code =
- ToAbstractCode(BUILTIN_CODE(isolate, InstantiateAsmJs), isolate);
+ Handle<AbstractCode>::cast(BUILTIN_CODE(isolate, InstantiateAsmJs));
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
@@ -649,7 +651,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
INTERPRETER_DATA_TYPE, AllocationType::kOld));
interpreter_data->set_bytecode_array(*bytecode_array);
- interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
+ interpreter_data->set_interpreter_trampoline(*code);
shared_info->set_interpreter_data(*interpreter_data);
@@ -919,7 +921,7 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
// A wrapper to access the optimized code cache slots on the feedback vector.
class OptimizedCodeCache : public AllStatic {
public:
- static V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> Get(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Code> Get(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
CodeKind code_kind) {
if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {};
@@ -929,18 +931,18 @@ class OptimizedCodeCache : public AllStatic {
SharedFunctionInfo shared = function->shared();
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
- CodeT code;
+ Code code;
FeedbackVector feedback_vector = function->feedback_vector();
if (IsOSR(osr_offset)) {
Handle<BytecodeArray> bytecode(shared.GetBytecodeArray(isolate), isolate);
interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt());
DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop);
- base::Optional<CodeT> maybe_code =
+ base::Optional<Code> maybe_code =
feedback_vector.GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
if (maybe_code.has_value()) code = maybe_code.value();
} else {
feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- shared, "OptimizedCodeCache::Get");
+ isolate, shared, "OptimizedCodeCache::Get");
code = feedback_vector.optimized_code();
}
@@ -958,7 +960,7 @@ class OptimizedCodeCache : public AllStatic {
}
static void Insert(Isolate* isolate, JSFunction function,
- BytecodeOffset osr_offset, CodeT code,
+ BytecodeOffset osr_offset, Code code,
bool is_function_context_specializing) {
const CodeKind kind = code.kind();
if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
@@ -1018,7 +1020,9 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
if (!PrepareJobWithHandleScope(job, isolate, compilation_info,
ConcurrencyMode::kSynchronous)) {
- CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info,
+ job->prepare_in_ms(), job->execute_in_ms(),
+ job->finalize_in_ms());
return false;
}
@@ -1028,13 +1032,17 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
if (job->ExecuteJob(isolate->counters()->runtime_call_stats(),
isolate->main_thread_local_isolate())) {
UnparkedScope unparked_scope(isolate->main_thread_local_isolate());
- CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ CompilerTracer::TraceAbortedJob(
+ isolate, compilation_info, job->prepare_in_ms(), job->execute_in_ms(),
+ job->finalize_in_ms());
return false;
}
}
if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
- CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info,
+ job->prepare_in_ms(), job->execute_in_ms(),
+ job->finalize_in_ms());
return false;
}
@@ -1043,7 +1051,7 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
DCHECK(!isolate->has_pending_exception());
OptimizedCodeCache::Insert(isolate, *compilation_info->closure(),
compilation_info->osr_offset(),
- ToCodeT(*compilation_info->code()),
+ *compilation_info->code(),
compilation_info->function_context_specializing());
job->RecordFunctionCompilation(LogEventListener::CodeTag::kFunction, isolate);
return true;
@@ -1119,12 +1127,11 @@ bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
}
}
-MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<SharedFunctionInfo> shared,
- ConcurrencyMode mode,
- BytecodeOffset osr_offset,
- CompileResultBehavior result_behavior) {
+MaybeHandle<Code> CompileTurbofan(Isolate* isolate, Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> shared,
+ ConcurrencyMode mode,
+ BytecodeOffset osr_offset,
+ CompileResultBehavior result_behavior) {
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
@@ -1132,13 +1139,15 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
- bool has_script = shared->script().IsScript();
+ const compiler::IsScriptAvailable has_script =
+ shared->script().IsScript() ? compiler::IsScriptAvailable::kYes
+ : compiler::IsScriptAvailable::kNo;
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
- DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
+ DCHECK_IMPLIES(has_script == compiler::IsScriptAvailable::kNo,
+ shared->HasBytecodeArray());
std::unique_ptr<TurbofanCompilationJob> job(
- compiler::Pipeline::NewCompilationJob(
- isolate, function, CodeKind::TURBOFAN, has_script, osr_offset));
+ compiler::NewCompilationJob(isolate, function, has_script, osr_offset));
if (result_behavior == CompileResultBehavior::kDiscardForTesting) {
job->compilation_info()->set_discard_result_for_testing();
@@ -1154,7 +1163,7 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
} else {
DCHECK(IsSynchronous(mode));
if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
- return ToCodeT(job->compilation_info()->code(), isolate);
+ return job->compilation_info()->code();
}
}
@@ -1167,10 +1176,8 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
void RecordMaglevFunctionCompilation(Isolate* isolate,
Handle<JSFunction> function) {
PtrComprCageBase cage_base(isolate);
- // TODO(v8:13261): We should be able to pass a CodeT AbstractCode in here, but
- // LinuxPerfJitLogger only supports Code AbstractCode.
Handle<AbstractCode> abstract_code(
- AbstractCode::cast(FromCodeT(function->code(cage_base))), isolate);
+ AbstractCode::cast(function->code(cage_base)), isolate);
Handle<SharedFunctionInfo> shared(function->shared(cage_base), isolate);
Handle<Script> script(Script::cast(shared->script(cage_base)), isolate);
Handle<FeedbackVector> feedback_vector(function->feedback_vector(cage_base),
@@ -1186,10 +1193,9 @@ void RecordMaglevFunctionCompilation(Isolate* isolate,
}
#endif // V8_ENABLE_MAGLEV
-MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
- ConcurrencyMode mode,
- BytecodeOffset osr_offset,
- CompileResultBehavior result_behavior) {
+MaybeHandle<Code> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
+ ConcurrencyMode mode, BytecodeOffset osr_offset,
+ CompileResultBehavior result_behavior) {
#ifdef V8_ENABLE_MAGLEV
DCHECK(v8_flags.maglev);
// TODO(v8:7700): Add missing support.
@@ -1228,6 +1234,9 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
CompilationJob::Status status =
job->ExecuteJob(isolate->counters()->runtime_call_stats(),
isolate->main_thread_local_isolate());
+ if (status == CompilationJob::FAILED) {
+ return {};
+ }
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
@@ -1254,7 +1263,7 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
#endif // V8_ENABLE_MAGLEV
}
-MaybeHandle<CodeT> GetOrCompileOptimized(
+MaybeHandle<Code> GetOrCompileOptimized(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
CompileResultBehavior result_behavior = CompileResultBehavior::kDefault) {
@@ -1284,7 +1293,7 @@ MaybeHandle<CodeT> GetOrCompileOptimized(
// turbo_filter.
if (!ShouldOptimize(code_kind, shared)) return {};
- Handle<CodeT> cached_code;
+ Handle<Code> cached_code;
if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
.ToHandle(&cached_code)) {
return cached_code;
@@ -1585,7 +1594,12 @@ BackgroundCompileTask::BackgroundCompileTask(
timer_(isolate->counters()->compile_script_on_background()),
start_position_(0),
end_position_(0),
- function_literal_id_(kFunctionLiteralIdTopLevel) {}
+ function_literal_id_(kFunctionLiteralIdTopLevel) {
+ if (options == ScriptCompiler::CompileOptions::kProduceCompileHints) {
+ flags_.set_produce_compile_hints(true);
+ }
+ DCHECK(is_streaming_compilation());
+}
BackgroundCompileTask::BackgroundCompileTask(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
@@ -1606,6 +1620,7 @@ BackgroundCompileTask::BackgroundCompileTask(
end_position_(shared_info->EndPosition()),
function_literal_id_(shared_info->function_literal_id()) {
DCHECK(!shared_info->is_toplevel());
+ DCHECK(!is_streaming_compilation());
character_stream_->Seek(start_position_);
@@ -1718,13 +1733,11 @@ class MergeAssumptionChecker final : public ObjectVisitor {
// The object graph for a newly compiled Script shouldn't yet contain any
// Code. If any of these functions are called, then that would indicate that
// the graph was not disjoint from the rest of the heap as expected.
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- UNREACHABLE();
- }
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
UNREACHABLE();
}
+ void VisitCodeTarget(RelocInfo* rinfo) override { UNREACHABLE(); }
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override { UNREACHABLE(); }
private:
enum ObjectKind {
@@ -1758,6 +1771,10 @@ class MergeAssumptionChecker final : public ObjectVisitor {
} // namespace
+bool BackgroundCompileTask::is_streaming_compilation() const {
+ return function_literal_id_ == kFunctionLiteralIdTopLevel;
+}
+
void BackgroundCompileTask::Run() {
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
DCHECK_NE(ThreadId::Current(), isolate_for_local_isolate_->thread_id());
@@ -1790,6 +1807,7 @@ void BackgroundCompileTask::Run(
ParseInfo info(isolate, flags_, &compile_state_, reusable_state,
GetCurrentStackPosition() - stack_size_ * KB);
info.set_character_stream(std::move(character_stream_));
+ if (is_streaming_compilation()) info.set_is_streaming_compilation();
if (toplevel_script_compilation) {
DCHECK_NULL(persistent_handles_);
@@ -1900,6 +1918,7 @@ class ConstantPoolPointerForwarder {
: cage_base_(cage_base), local_heap_(local_heap) {}
void AddBytecodeArray(BytecodeArray bytecode_array) {
+ CHECK(bytecode_array.IsBytecodeArray());
bytecode_arrays_to_update_.push_back(handle(bytecode_array, local_heap_));
}
@@ -1951,10 +1970,6 @@ class ConstantPoolPointerForwarder {
std::unordered_map<int, Handle<SharedFunctionInfo>> forwarding_table_;
};
-BackgroundMergeTask::~BackgroundMergeTask() {
- DCHECK(!HasPendingForegroundWork());
-}
-
void BackgroundMergeTask::SetUpOnMainThread(Isolate* isolate,
Handle<String> source_text,
const ScriptDetails& script_details,
@@ -1972,9 +1987,6 @@ void BackgroundMergeTask::SetUpOnMainThread(Isolate* isolate,
return;
}
- // Any data sent to the background thread will need to be a persistent handle.
- persistent_handles_ = std::make_unique<PersistentHandles>(isolate);
-
if (lookup_result.is_compiled_scope().is_compiled()) {
// There already exists a compiled top-level SFI, so the main thread will
// discard the background serialization results and use the top-level SFI
@@ -1985,11 +1997,18 @@ void BackgroundMergeTask::SetUpOnMainThread(Isolate* isolate,
} else {
DCHECK(lookup_result.toplevel_sfi().is_null());
// A background merge is required.
- state_ = kPendingBackgroundWork;
- cached_script_ = persistent_handles_->NewHandle(*script);
+ SetUpOnMainThread(isolate, script);
}
}
+void BackgroundMergeTask::SetUpOnMainThread(Isolate* isolate,
+ Handle<Script> cached_script) {
+ // Any data sent to the background thread will need to be a persistent handle.
+ persistent_handles_ = std::make_unique<PersistentHandles>(isolate);
+ state_ = kPendingBackgroundWork;
+ cached_script_ = persistent_handles_->NewHandle(*cached_script);
+}
+
void BackgroundMergeTask::BeginMergeInBackground(LocalIsolate* isolate,
Handle<Script> new_script) {
DCHECK_EQ(state_, kPendingBackgroundWork);
@@ -2030,8 +2049,8 @@ void BackgroundMergeTask::BeginMergeInBackground(LocalIsolate* isolate,
SharedFunctionInfo old_sfi =
SharedFunctionInfo::cast(maybe_old_sfi.GetHeapObjectAssumeWeak());
forwarder.Forward(new_sfi, old_sfi);
- if (new_sfi.is_compiled()) {
- if (old_sfi.is_compiled()) {
+ if (new_sfi.HasBytecodeArray()) {
+ if (old_sfi.HasBytecodeArray()) {
// Reset the old SFI's bytecode age so that it won't likely get
// flushed right away. This operation might be racing against
// concurrent modification by another thread, but such a race is not
@@ -2051,7 +2070,7 @@ void BackgroundMergeTask::BeginMergeInBackground(LocalIsolate* isolate,
DCHECK_EQ(i, new_sfi.function_literal_id());
new_sfi.set_script(*old_script);
used_new_sfis_.push_back(local_heap->NewPersistentHandle(new_sfi));
- if (new_sfi.is_compiled()) {
+ if (new_sfi.HasBytecodeArray()) {
forwarder.AddBytecodeArray(new_sfi.GetBytecodeArray(isolate));
}
}
@@ -2144,7 +2163,8 @@ Handle<SharedFunctionInfo> BackgroundMergeTask::CompleteMergeInForeground(
MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::FinalizeScript(
Isolate* isolate, Handle<String> source,
- const ScriptDetails& script_details) {
+ const ScriptDetails& script_details,
+ MaybeHandle<Script> maybe_cached_script) {
ScriptOriginOptions origin_options = script_details.origin_options;
DCHECK(flags_.is_toplevel());
@@ -2162,11 +2182,15 @@ MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::FinalizeScript(
maybe_result = outer_function_sfi_;
}
- if (!maybe_result.is_null() &&
- background_merge_task_.HasPendingForegroundWork()) {
- DCHECK(flags().is_toplevel());
+ if (Handle<Script> cached_script;
+ maybe_cached_script.ToHandle(&cached_script) && !maybe_result.is_null()) {
+ BackgroundMergeTask merge;
+ merge.SetUpOnMainThread(isolate, cached_script);
+ CHECK(merge.HasPendingBackgroundWork());
+ merge.BeginMergeInBackground(isolate->AsLocalIsolate(), script);
+ CHECK(merge.HasPendingForegroundWork());
Handle<SharedFunctionInfo> result =
- background_merge_task_.CompleteMergeInForeground(isolate, script);
+ merge.CompleteMergeInForeground(isolate, script);
maybe_result = result;
script = handle(Script::cast(result->script()), isolate);
DCHECK(script->source().StrictEquals(*source));
@@ -2209,7 +2233,6 @@ MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::FinalizeScript(
bool BackgroundCompileTask::FinalizeFunction(
Isolate* isolate, Compiler::ClearExceptionFlag flag) {
DCHECK(!flags_.is_toplevel());
- DCHECK(!background_merge_task_.HasPendingForegroundWork());
MaybeHandle<SharedFunctionInfo> maybe_result;
Handle<SharedFunctionInfo> input_shared_info =
@@ -2302,33 +2325,12 @@ void BackgroundDeserializeTask::SourceTextAvailable(
language_mode);
}
-void BackgroundCompileTask::SourceTextAvailable(
- Isolate* isolate, Handle<String> source_text,
- const ScriptDetails& script_details) {
- DCHECK_EQ(isolate, isolate_for_local_isolate_);
-
- // Non-toplevel compilations already refer to an existing Script; there is no
- // need to look anything up in the compilation cache.
- if (!flags().is_toplevel()) return;
-
- LanguageMode language_mode = flags().outer_language_mode();
- background_merge_task_.SetUpOnMainThread(isolate, source_text, script_details,
- language_mode);
-}
-
bool BackgroundDeserializeTask::ShouldMergeWithExistingScript() const {
DCHECK(v8_flags.merge_background_deserialized_script_with_compilation_cache);
return background_merge_task_.HasPendingBackgroundWork() &&
off_thread_data_.HasResult();
}
-bool BackgroundCompileTask::ShouldMergeWithExistingScript() const {
- DCHECK(v8_flags.stress_background_compile);
- DCHECK(!script_.is_null());
- return background_merge_task_.HasPendingBackgroundWork() &&
- jobs_to_retry_finalization_on_main_thread_.empty();
-}
-
void BackgroundDeserializeTask::MergeWithExistingScript() {
DCHECK(ShouldMergeWithExistingScript());
@@ -2340,21 +2342,6 @@ void BackgroundDeserializeTask::MergeWithExistingScript() {
&isolate, off_thread_data_.GetOnlyScript(isolate.heap()));
}
-void BackgroundCompileTask::MergeWithExistingScript() {
- DCHECK(ShouldMergeWithExistingScript());
-
- LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
- UnparkedScope unparked_scope(&isolate);
- LocalHandleScope handle_scope(isolate.heap());
-
- // Get a non-persistent handle to the newly compiled script.
- isolate.heap()->AttachPersistentHandles(std::move(persistent_handles_));
- Handle<Script> script = handle(*script_, &isolate);
- persistent_handles_ = isolate.heap()->DetachPersistentHandles();
-
- background_merge_task_.BeginMergeInBackground(&isolate, script);
-}
-
MaybeHandle<SharedFunctionInfo> BackgroundDeserializeTask::Finish(
Isolate* isolate, Handle<String> source,
ScriptOriginOptions origin_options) {
@@ -2546,6 +2533,21 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
CompileAllWithBaseline(isolate, finalize_unoptimized_compilation_data_list);
}
+ if (script->produce_compile_hints()) {
+ // Log lazy funtion compilation.
+ Handle<ArrayList> list;
+ if (script->compiled_lazy_function_positions().IsUndefined()) {
+ constexpr int kInitialLazyFunctionPositionListSize = 100;
+ list = ArrayList::New(isolate, kInitialLazyFunctionPositionListSize);
+ } else {
+ list = handle(ArrayList::cast(script->compiled_lazy_function_positions()),
+ isolate);
+ }
+ list = ArrayList::Add(isolate, list,
+ Smi::FromInt(shared_info->StartPosition()));
+ script->set_compiled_lazy_function_positions(*list);
+ }
+
DCHECK(!isolate->has_pending_exception());
DCHECK(is_compiled_scope->is_compiled());
return true;
@@ -2575,7 +2577,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
DCHECK(is_compiled_scope->is_compiled());
- Handle<CodeT> code = handle(shared_info->GetCode(), isolate);
+ Handle<Code> code = handle(shared_info->GetCode(isolate), isolate);
// Initialize the feedback cell for this JSFunction and reset the interrupt
// budget for feedback vector allocation even if there is a closure feedback
@@ -2604,7 +2606,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
concurrency_mode, code_kind);
}
- Handle<CodeT> maybe_code;
+ Handle<Code> maybe_code;
if (GetOrCompileOptimized(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
@@ -2612,7 +2614,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
// Install code on closure.
- function->set_code(*code, kReleaseStore);
+ function->set_code(*code);
// Install a feedback vector if necessary.
if (code->kind() == CodeKind::BASELINE) {
JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
@@ -2657,7 +2659,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
- shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
+ shared->set_baseline_code(*code, kReleaseStore);
}
double time_taken_ms = time_taken.InMillisecondsF();
@@ -2685,7 +2687,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
- CodeT baseline_code = shared->baseline_code(kAcquireLoad);
+ Code baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
return true;
@@ -2729,10 +2731,10 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
code_kind);
}
- Handle<CodeT> code;
+ Handle<Code> code;
if (GetOrCompileOptimized(isolate, function, mode, code_kind)
.ToHandle(&code)) {
- function->set_code(*code, kReleaseStore);
+ function->set_code(*code);
}
#ifdef DEBUG
@@ -2822,7 +2824,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// If the position is missing, attempt to get the code offset by
// walking the stack. Do not translate the code offset into source
// position, but store it as negative value for lazy translation.
- StackTraceFrameIterator it(isolate);
+ DebuggableStackFrameIterator it(isolate);
if (!it.done() && it.is_javascript()) {
FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(
@@ -3302,15 +3304,9 @@ class StressBackgroundCompileThread : public base::Thread {
data()->task = std::make_unique<i::BackgroundCompileTask>(
data(), isolate, type,
ScriptCompiler::CompileOptions::kNoCompileOptions);
- data()->task->SourceTextAvailable(isolate, source, script_details);
}
- void Run() override {
- data()->task->Run();
- if (data()->task->ShouldMergeWithExistingScript()) {
- data()->task->MergeWithExistingScript();
- }
- }
+ void Run() override { data()->task->Run(); }
ScriptStreamingData* data() { return streamed_source_.impl(); }
@@ -3352,7 +3348,8 @@ bool CanBackgroundCompile(const ScriptDetails& script_details,
// modules is supported.
return !script_details.origin_options.IsModule() && !extension &&
script_details.repl_mode == REPLMode::kNo &&
- compile_options == ScriptCompiler::kNoCompileOptions &&
+ (compile_options == ScriptCompiler::kNoCompileOptions ||
+ compile_options == ScriptCompiler::kProduceCompileHints) &&
natives == NOT_NATIVES_CODE;
}
@@ -3434,43 +3431,25 @@ MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
AlignedCachedData* cached_data, BackgroundDeserializeTask* deserialize_task,
+ v8::CompileHintCallback compile_hint_callback,
+ void* compile_hint_callback_data,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
- if (compile_options == ScriptCompiler::kNoCompileOptions ||
- compile_options == ScriptCompiler::kEagerCompile) {
- DCHECK_NULL(cached_data);
- DCHECK_NULL(deserialize_task);
- } else {
- DCHECK_EQ(compile_options, ScriptCompiler::kConsumeCodeCache);
+ if (compile_options == ScriptCompiler::kConsumeCodeCache) {
// Have to have exactly one of cached_data or deserialize_task.
DCHECK(cached_data || deserialize_task);
DCHECK(!(cached_data && deserialize_task));
DCHECK_NULL(extension);
+ } else {
+ DCHECK_NULL(cached_data);
+ DCHECK_NULL(deserialize_task);
}
- if (V8_UNLIKELY(
- v8_flags.experimental_web_snapshots &&
- (source->IsExternalOneByteString() || source->IsSeqOneByteString() ||
- source->IsExternalTwoByteString() || source->IsSeqTwoByteString()) &&
- source->length() > 4)) {
- // Experimental: Treat the script as a web snapshot if it starts with the
- // magic byte sequence. TODO(v8:11525): Remove this once proper embedder
- // integration is done.
- bool magic_matches = true;
- for (size_t i = 0;
- i < sizeof(WebSnapshotSerializerDeserializer::kMagicNumber); ++i) {
- if (source->Get(static_cast<int>(i)) !=
- WebSnapshotSerializerDeserializer::kMagicNumber[i]) {
- magic_matches = false;
- break;
- }
- }
- if (magic_matches) {
- return Compiler::GetSharedFunctionInfoForWebSnapshot(
- isolate, source, script_details.name_obj);
- }
+ if (compile_options == ScriptCompiler::kConsumeCompileHints) {
+ DCHECK_NOT_NULL(compile_hint_callback);
+ DCHECK_NOT_NULL(compile_hint_callback_data);
}
LanguageMode language_mode = construct_language_mode(v8_flags.use_strict);
@@ -3530,9 +3509,8 @@ MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
// would be non-trivial.
} else {
maybe_result = CodeSerializer::Deserialize(
- isolate, cached_data, source, script_details.origin_options);
- // TODO(v8:12808): Merge the newly deserialized code into a preexisting
- // Script if one was found in the compilation cache.
+ isolate, cached_data, source, script_details.origin_options,
+ maybe_script);
}
bool consuming_code_cache_succeeded = false;
@@ -3590,6 +3568,11 @@ MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
isolate->ReportPendingMessages();
}
}
+ Handle<SharedFunctionInfo> result;
+ if (compile_options == ScriptCompiler::CompileOptions::kProduceCompileHints &&
+ maybe_result.ToHandle(&result)) {
+ Script::cast(result->script()).set_produce_compile_hints(true);
+ }
return maybe_result;
}
@@ -3602,8 +3585,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
return GetSharedFunctionInfoForScriptImpl(
- isolate, source, script_details, nullptr, nullptr, nullptr,
- compile_options, no_cache_reason, natives);
+ isolate, source, script_details, nullptr, nullptr, nullptr, nullptr,
+ nullptr, compile_options, no_cache_reason, natives);
}
MaybeHandle<SharedFunctionInfo>
@@ -3612,8 +3595,9 @@ Compiler::GetSharedFunctionInfoForScriptWithExtension(
const ScriptDetails& script_details, v8::Extension* extension,
ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
return GetSharedFunctionInfoForScriptImpl(
- isolate, source, script_details, extension, nullptr, nullptr,
- compile_options, ScriptCompiler::kNoCacheBecauseV8Extension, natives);
+ isolate, source, script_details, extension, nullptr, nullptr, nullptr,
+ nullptr, compile_options, ScriptCompiler::kNoCacheBecauseV8Extension,
+ natives);
}
MaybeHandle<SharedFunctionInfo>
@@ -3623,8 +3607,8 @@ Compiler::GetSharedFunctionInfoForScriptWithCachedData(
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
return GetSharedFunctionInfoForScriptImpl(
- isolate, source, script_details, nullptr, cached_data, nullptr,
- compile_options, no_cache_reason, natives);
+ isolate, source, script_details, nullptr, cached_data, nullptr, nullptr,
+ nullptr, compile_options, no_cache_reason, natives);
}
MaybeHandle<SharedFunctionInfo>
@@ -3636,7 +3620,21 @@ Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
return GetSharedFunctionInfoForScriptImpl(
isolate, source, script_details, nullptr, nullptr, deserialize_task,
- compile_options, no_cache_reason, natives);
+ nullptr, nullptr, compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithCompileHints(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ v8::CompileHintCallback compile_hint_callback,
+ void* compile_hint_callback_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, nullptr,
+ compile_hint_callback, compile_hint_callback_data, compile_options,
+ no_cache_reason, natives);
}
// static
@@ -3649,12 +3647,10 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Isolate* isolate = context->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
- if (compile_options == ScriptCompiler::kNoCompileOptions ||
- compile_options == ScriptCompiler::kEagerCompile) {
- DCHECK_NULL(cached_data);
- } else {
- DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
+ if (compile_options == ScriptCompiler::kConsumeCodeCache) {
DCHECK(cached_data);
+ } else {
+ DCHECK_NULL(cached_data);
}
LanguageMode language_mode = construct_language_mode(v8_flags.use_strict);
@@ -3745,6 +3741,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
BackgroundCompileTask* task = streaming_data->task.get();
MaybeHandle<SharedFunctionInfo> maybe_result;
+ MaybeHandle<Script> maybe_cached_script;
// Check if compile cache already holds the SFI, if so no need to finalize
// the code compiled on the background thread.
CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -3755,16 +3752,14 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
compilation_cache->LookupScript(source, script_details,
task->flags().outer_language_mode());
- // TODO(v8:12808): Determine what to do if we finish streaming and find that
- // another copy of the Script already exists but has no root
- // SharedFunctionInfo or has an uncompiled SharedFunctionInfo. For now, we
- // just ignore it and create a new Script.
if (!lookup_result.toplevel_sfi().is_null()) {
maybe_result = lookup_result.toplevel_sfi();
}
if (!maybe_result.is_null()) {
compile_timer.set_hit_isolate_cache();
+ } else {
+ maybe_cached_script = lookup_result.script();
}
}
@@ -3776,10 +3771,15 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish");
- maybe_result = task->FinalizeScript(isolate, source, script_details);
+ maybe_result = task->FinalizeScript(isolate, source, script_details,
+ maybe_cached_script);
Handle<SharedFunctionInfo> result;
if (maybe_result.ToHandle(&result)) {
+ if (task->flags().produce_compile_hints()) {
+ Script::cast(result->script()).set_produce_compile_hints(true);
+ }
+
// Add compiled code to the isolate cache.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.AddToCache");
@@ -3795,30 +3795,6 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
} // namespace internal
// static
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForWebSnapshot(
- Isolate* isolate, Handle<String> source,
- MaybeHandle<Object> maybe_script_name) {
- // This script won't hold the functions created from the web snapshot;
- // reserving space only for the top-level SharedFunctionInfo is enough.
- Handle<WeakFixedArray> shared_function_infos =
- isolate->factory()->NewWeakFixedArray(1, AllocationType::kOld);
- Handle<Script> script = isolate->factory()->NewScript(source);
- script->set_type(Script::TYPE_WEB_SNAPSHOT);
- script->set_shared_function_infos(*shared_function_infos);
- Handle<Object> script_name;
- if (maybe_script_name.ToHandle(&script_name) && script_name->IsString()) {
- script->set_name(String::cast(*script_name));
- } else {
- script->set_name(*isolate->factory()->empty_string());
- }
-
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfoForWebSnapshot();
- shared->SetScript(isolate->factory()->read_only_roots(), *script, 0, false);
- return shared;
-}
-
-// static
template <typename IsolateT>
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, IsolateT* isolate) {
@@ -3870,10 +3846,10 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
-MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
- Handle<JSFunction> function,
- BytecodeOffset osr_offset,
- ConcurrencyMode mode) {
+MaybeHandle<Code> Compiler::CompileOptimizedOSR(Isolate* isolate,
+ Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ ConcurrencyMode mode) {
DCHECK(IsOSR(osr_offset));
if (V8_UNLIKELY(isolate->serializer_enabled())) return {};
@@ -3896,7 +3872,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
function->feedback_vector().reset_osr_urgency();
CompilerTracer::TraceOptimizeOSRStarted(isolate, function, osr_offset, mode);
- MaybeHandle<CodeT> result = GetOrCompileOptimized(
+ MaybeHandle<Code> result = GetOrCompileOptimized(
isolate, function, mode, CodeKind::TURBOFAN, osr_offset);
if (result.is_null()) {
@@ -3911,12 +3887,13 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
}
// static
-void Compiler::DisposeTurbofanCompilationJob(TurbofanCompilationJob* job,
+void Compiler::DisposeTurbofanCompilationJob(Isolate* isolate,
+ TurbofanCompilationJob* job,
bool restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure();
ResetTieringState(*function, job->compilation_info()->osr_offset());
if (restore_function_code) {
- function->set_code(function->shared().GetCode(), kReleaseStore);
+ function->set_code(function->shared().GetCode(isolate));
}
}
@@ -3948,7 +3925,7 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
// 2) The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
// 3) The code may have already been invalidated due to dependency change.
- // 4) Code generation may have failed.
+ // 4) InstructionStream generation may have failed.
if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
@@ -3960,14 +3937,14 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
ResetTieringState(*function, osr_offset);
OptimizedCodeCache::Insert(
isolate, *compilation_info->closure(),
- compilation_info->osr_offset(), ToCodeT(*compilation_info->code()),
+ compilation_info->osr_offset(), *compilation_info->code(),
compilation_info->function_context_specializing());
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (IsOSR(osr_offset)) {
CompilerTracer::TraceOptimizeOSRFinished(isolate, function,
osr_offset);
} else {
- function->set_code(*compilation_info->code(), kReleaseStore);
+ function->set_code(*compilation_info->code());
}
}
return;
@@ -3975,11 +3952,13 @@ void Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
}
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
- CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info,
+ job->prepare_in_ms(), job->execute_in_ms(),
+ job->finalize_in_ms());
if (V8_LIKELY(use_result)) {
ResetTieringState(*function, osr_offset);
if (!IsOSR(osr_offset)) {
- function->set_code(shared->GetCode(), kReleaseStore);
+ function->set_code(shared->GetCode(isolate));
}
}
}
@@ -4007,8 +3986,8 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
ResetTieringState(*function, osr_offset);
if (status == CompilationJob::SUCCEEDED) {
- // Note the finalized Code object has already been installed on the
- // function by MaglevCompilationJob::FinalizeJobImpl.
+ // Note the finalized InstructionStream object has already been installed on
+ // the function by MaglevCompilationJob::FinalizeJobImpl.
OptimizedCodeCache::Insert(isolate, *function, BytecodeOffset::None(),
function->code(),
@@ -4020,11 +3999,10 @@ void Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
ResetProfilerTicks(*function, osr_offset);
RecordMaglevFunctionCompilation(isolate, function);
- double ms_prepare = job->time_taken_to_prepare().InMillisecondsF();
- double ms_optimize = job->time_taken_to_execute().InMillisecondsF();
- double ms_codegen = job->time_taken_to_finalize().InMillisecondsF();
- CompilerTracer::TraceFinishMaglevCompile(isolate, function, ms_prepare,
- ms_optimize, ms_codegen);
+ job->RecordCompilationStats(isolate);
+ CompilerTracer::TraceFinishMaglevCompile(
+ isolate, function, job->prepare_in_ms(), job->execute_in_ms(),
+ job->finalize_in_ms());
}
#endif
}
@@ -4048,17 +4026,13 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// deoptimized the code on the feedback vector. So check for any
// deoptimized code just before installing it on the funciton.
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- *shared, "new function from shared function info");
- CodeT code = function->feedback_vector().optimized_code();
+ isolate, *shared, "new function from shared function info");
+ Code code = function->feedback_vector().optimized_code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());
DCHECK(function->shared().is_compiled());
- // We don't need a release store because the optimized code was
- // stored with release semantics into the vector
- static_assert(
- FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(code);
}
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 7a0d18a452..67c8c145f0 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Generate and return optimized code for OSR. The empty handle is returned
// either on failure, or after spawning a concurrent OSR task (in which case
// a future OSR request will pick up the resulting code object).
- V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> CompileOptimizedOSR(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Code> CompileOptimizedOSR(
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
ConcurrencyMode mode);
@@ -115,7 +115,8 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
ClearExceptionFlag flag);
// Dispose a job without finalization.
- static void DisposeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ static void DisposeTurbofanCompilationJob(Isolate* isolate,
+ TurbofanCompilationJob* job,
bool restore_function_code);
// Finalize and install Turbofan code from a previously run job.
@@ -215,6 +216,16 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithCompileHints(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ v8::CompileHintCallback compile_hint_callback,
+ void* compile_hint_callback_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
// Create a shared function info object for a Script source that has already
// been parsed and possibly compiled on a background thread while being loaded
// from a streamed source. On return, the data held by |streaming_data| will
@@ -224,9 +235,6 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, ScriptStreamingData* streaming_data);
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForWebSnapshot(
- Isolate* isolate, Handle<String> source, MaybeHandle<Object> script_name);
-
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
template <typename IsolateT>
@@ -384,6 +392,16 @@ class OptimizedCompilationJob : public CompilationJob {
const char* compiler_name() const { return compiler_name_; }
+ double prepare_in_ms() const {
+ return time_taken_to_prepare_.InMillisecondsF();
+ }
+ double execute_in_ms() const {
+ return time_taken_to_execute_.InMillisecondsF();
+ }
+ double finalize_in_ms() const {
+ return time_taken_to_finalize_.InMillisecondsF();
+ }
+
protected:
// Overridden by the actual implementation.
virtual Status PrepareJobImpl(Isolate* isolate) = 0;
@@ -520,7 +538,7 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
BackgroundCompileTask& operator=(const BackgroundCompileTask&) = delete;
~BackgroundCompileTask();
- // Creates a new task that when run will parse and compile the top-level
+ // Creates a new task that when run will parse and compile the non-top-level
// |shared_info| and can be finalized with FinalizeFunction in
// Compiler::FinalizeBackgroundCompileTask.
BackgroundCompileTask(
@@ -534,28 +552,10 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
void Run(LocalIsolate* isolate,
ReusableUnoptimizedCompileState* reusable_state);
- // Checks the Isolate compilation cache to see whether it will be necessary to
- // merge the newly compiled objects into an existing Script. This can change
- // the value of ShouldMergeWithExistingScript, and embedders should check the
- // latter after calling this. May only be called on a thread where the Isolate
- // is currently entered.
- void SourceTextAvailable(Isolate* isolate, Handle<String> source_text,
- const ScriptDetails& script_details);
-
- // Returns whether the embedder should call MergeWithExistingScript. This
- // function may be called from any thread, any number of times, but its return
- // value is only meaningful after SourceTextAvailable has completed.
- bool ShouldMergeWithExistingScript() const;
-
- // Partially merges newly compiled objects into an existing Script with the
- // same source, and generates a list of follow-up work for the main thread.
- // May be called from any thread, only once, and only if
- // ShouldMergeWithExistingScript returned true.
- void MergeWithExistingScript();
-
MaybeHandle<SharedFunctionInfo> FinalizeScript(
Isolate* isolate, Handle<String> source,
- const ScriptDetails& script_details);
+ const ScriptDetails& script_details,
+ MaybeHandle<Script> maybe_cached_script);
bool FinalizeFunction(Isolate* isolate, Compiler::ClearExceptionFlag flag);
@@ -568,6 +568,8 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
void ClearFunctionJobPointer();
+ bool is_streaming_compilation() const;
+
// Data needed for parsing and compilation. These need to be initialized
// before the compilation starts.
Isolate* isolate_for_local_isolate_;
@@ -593,10 +595,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
int start_position_;
int end_position_;
int function_literal_id_;
-
- // Task that merges newly compiled content into an existing Script from the
- // Isolate compilation cache, if necessary.
- BackgroundMergeTask background_merge_task_;
};
// Contains all data which needs to be transmitted between threads for
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index dc087da1c9..0516447518 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -43,6 +43,10 @@ enum CpuFeature {
#elif V8_TARGET_ARCH_ARM64
JSCVT,
+ DOTPROD,
+ // Large System Extension, include atomic operations on memory: CAS, LDADD,
+ // STADD, SWP, etc.
+ LSE,
#elif V8_TARGET_ARCH_MIPS64
FPU,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 63ec0d1fb6..7990f52169 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -6,6 +6,7 @@
#include "include/v8-fast-api-calls.h"
#include "src/api/api-inl.h"
+#include "src/base/bits.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
#include "src/common/globals.h"
@@ -36,6 +37,7 @@
#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-stack.h"
#include "src/strings/string-search.h"
+#include "src/strings/unicode-inl.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-external-refs.h"
@@ -338,6 +340,9 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
FUNCTION_REFERENCE(write_barrier_marking_from_code_function,
WriteBarrier::MarkingFromCode)
+FUNCTION_REFERENCE(write_barrier_shared_marking_from_code_function,
+ WriteBarrier::SharedMarkingFromCode)
+
FUNCTION_REFERENCE(shared_barrier_from_code_function,
WriteBarrier::SharedFromCode)
@@ -350,6 +355,19 @@ FUNCTION_REFERENCE(delete_handle_scope_extensions,
FUNCTION_REFERENCE(ephemeron_key_write_barrier_function,
Heap::EphemeronKeyWriteBarrierFromCode)
+ExternalPointerHandle AllocateAndInitializeExternalPointerTableEntry(
+ Isolate* isolate, Address pointer) {
+#ifdef V8_ENABLE_SANDBOX
+ return isolate->external_pointer_table().AllocateAndInitializeEntry(
+ isolate, pointer, kExternalObjectValueTag);
+#else
+ return 0;
+#endif // V8_ENABLE_SANDBOX
+}
+
+FUNCTION_REFERENCE(allocate_and_initialize_external_pointer_table_entry,
+ AllocateAndInitializeExternalPointerTableEntry)
+
FUNCTION_REFERENCE(get_date_field_function, JSDate::GetField)
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
@@ -455,8 +473,7 @@ IF_WASM(FUNCTION_REFERENCE, wasm_float64_pow, wasm::float64_pow_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
wasm::call_trap_callback_for_testing)
IF_WASM(FUNCTION_REFERENCE, wasm_array_copy, wasm::array_copy_wrapper)
-IF_WASM(FUNCTION_REFERENCE, wasm_array_fill_with_number_or_null,
- wasm::array_fill_with_number_or_null_wrapper)
+IF_WASM(FUNCTION_REFERENCE, wasm_array_fill, wasm::array_fill_wrapper)
static void f64_acos_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
@@ -514,6 +531,18 @@ ExternalReference ExternalReference::heap_is_minor_marking_flag_address(
return ExternalReference(isolate->heap()->IsMinorMarkingFlagAddress());
}
+ExternalReference ExternalReference::is_shared_space_isolate_flag_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->isolate_data()->is_shared_space_isolate_flag_address());
+}
+
+ExternalReference ExternalReference::uses_shared_heap_flag_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->isolate_data()->uses_shared_heap_flag_address());
+}
+
ExternalReference ExternalReference::new_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
@@ -852,8 +881,6 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2,
BUILTIN_FP_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt,
BUILTIN_FP_CALL)
-FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos,
- BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp,
@@ -868,8 +895,6 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_log10_function, base::ieee754::log10,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log2_function, base::ieee754::log2,
BUILTIN_FP_CALL)
-FUNCTION_REFERENCE_WITH_TYPE(ieee754_sin_function, base::ieee754::sin,
- BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_sinh_function, base::ieee754::sinh,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_tan_function, base::ieee754::tan,
@@ -879,6 +904,32 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_tanh_function, base::ieee754::tanh,
FUNCTION_REFERENCE_WITH_TYPE(ieee754_pow_function, base::ieee754::pow,
BUILTIN_FP_FP_CALL)
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+ExternalReference ExternalReference::ieee754_sin_function() {
+ static_assert(
+ IsValidExternalReferenceType<decltype(&base::ieee754::libm_sin)>::value);
+ static_assert(IsValidExternalReferenceType<
+ decltype(&base::ieee754::fdlibm_sin)>::value);
+ auto* f = v8_flags.use_libm_trig_functions ? base::ieee754::libm_sin
+ : base::ieee754::fdlibm_sin;
+ return ExternalReference(Redirect(FUNCTION_ADDR(f), BUILTIN_FP_CALL));
+}
+ExternalReference ExternalReference::ieee754_cos_function() {
+ static_assert(
+ IsValidExternalReferenceType<decltype(&base::ieee754::libm_cos)>::value);
+ static_assert(IsValidExternalReferenceType<
+ decltype(&base::ieee754::fdlibm_cos)>::value);
+ auto* f = v8_flags.use_libm_trig_functions ? base::ieee754::libm_cos
+ : base::ieee754::fdlibm_cos;
+ return ExternalReference(Redirect(FUNCTION_ADDR(f), BUILTIN_FP_CALL));
+}
+#else
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_sin_function, base::ieee754::sin,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos,
+ BUILTIN_FP_CALL)
+#endif
+
void* libc_memchr(void* string, int character, size_t search_length) {
return memchr(string, character, search_length);
}
@@ -1010,6 +1061,22 @@ FUNCTION_REFERENCE(external_one_byte_string_get_chars,
FUNCTION_REFERENCE(external_two_byte_string_get_chars,
ExternalTwoByteStringGetChars)
+// See:
+// https://lemire.me/blog/2021/06/03/computing-the-number-of-digits-of-an-integer-even-faster/
+static constexpr uint64_t kLog10OffsetTable[] = {
+ 0x100000000, 0x1fffffff6, 0x1fffffff6, 0x1fffffff6, 0x2ffffff9c,
+ 0x2ffffff9c, 0x2ffffff9c, 0x3fffffc18, 0x3fffffc18, 0x3fffffc18,
+ 0x4ffffd8f0, 0x4ffffd8f0, 0x4ffffd8f0, 0x4ffffd8f0, 0x5fffe7960,
+ 0x5fffe7960, 0x5fffe7960, 0x6fff0bdc0, 0x6fff0bdc0, 0x6fff0bdc0,
+ 0x7ff676980, 0x7ff676980, 0x7ff676980, 0x7ff676980, 0x8fa0a1f00,
+ 0x8fa0a1f00, 0x8fa0a1f00, 0x9c4653600, 0x9c4653600, 0x9c4653600,
+ 0xa00000000, 0xa00000000,
+};
+
+ExternalReference ExternalReference::address_of_log10_offset_table() {
+ return ExternalReference(reinterpret_cast<Address>(&kLog10OffsetTable[0]));
+}
+
FUNCTION_REFERENCE(orderedhashmap_gethash_raw, OrderedHashMap::GetHash)
Address GetOrCreateHash(Isolate* isolate, Address raw_key) {
@@ -1103,6 +1170,24 @@ static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x,
FUNCTION_REFERENCE(smi_lexicographic_compare_function,
LexicographicCompareWrapper)
+uint32_t HasUnpairedSurrogate(const uint16_t* code_units, size_t length) {
+ // Use uint32_t to avoid complexity around bool return types.
+ static constexpr uint32_t kTrue = 1;
+ static constexpr uint32_t kFalse = 0;
+ return unibrow::Utf16::HasUnpairedSurrogate(code_units, length) ? kTrue
+ : kFalse;
+}
+
+FUNCTION_REFERENCE(has_unpaired_surrogate, HasUnpairedSurrogate)
+
+void ReplaceUnpairedSurrogates(const uint16_t* source_code_units,
+ uint16_t* dest_code_units, size_t length) {
+ return unibrow::Utf16::ReplaceUnpairedSurrogates(source_code_units,
+ dest_code_units, length);
+}
+
+FUNCTION_REFERENCE(replace_unpaired_surrogates, ReplaceUnpairedSurrogates)
+
FUNCTION_REFERENCE(mutable_big_int_absolute_add_and_canonicalize_function,
MutableBigInt_AbsoluteAddAndCanonicalize)
@@ -1130,6 +1215,33 @@ FUNCTION_REFERENCE(mutable_big_int_bitwise_and_nn_and_canonicalize_function,
FUNCTION_REFERENCE(mutable_big_int_bitwise_and_pn_and_canonicalize_function,
MutableBigInt_BitwiseAndPosNegAndCanonicalize)
+FUNCTION_REFERENCE(mutable_big_int_bitwise_or_pp_and_canonicalize_function,
+ MutableBigInt_BitwiseOrPosPosAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_bitwise_or_nn_and_canonicalize_function,
+ MutableBigInt_BitwiseOrNegNegAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_bitwise_or_pn_and_canonicalize_function,
+ MutableBigInt_BitwiseOrPosNegAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_bitwise_xor_pp_and_canonicalize_function,
+ MutableBigInt_BitwiseXorPosPosAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_bitwise_xor_nn_and_canonicalize_function,
+ MutableBigInt_BitwiseXorNegNegAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_bitwise_xor_pn_and_canonicalize_function,
+ MutableBigInt_BitwiseXorPosNegAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_left_shift_and_canonicalize_function,
+ MutableBigInt_LeftShiftAndCanonicalize)
+
+FUNCTION_REFERENCE(big_int_right_shift_result_length_function,
+ RightShiftResultLength)
+
+FUNCTION_REFERENCE(mutable_big_int_right_shift_and_canonicalize_function,
+ MutableBigInt_RightShiftAndCanonicalize)
+
FUNCTION_REFERENCE(check_object_type, CheckObjectType)
#ifdef V8_INTL_SUPPORT
@@ -1260,6 +1372,11 @@ ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
FUNCTION_REFERENCE(call_enqueue_microtask_function,
MicrotaskQueue::CallEnqueueMicrotask)
+ExternalReference ExternalReference::int64_mul_high_function() {
+ return ExternalReference(
+ Redirect(FUNCTION_ADDR(base::bits::SignedMulHigh64)));
+}
+
static int64_t atomic_pair_load(intptr_t address) {
return std::atomic_load(reinterpret_cast<std::atomic<int64_t>*>(address));
}
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 4485e3bea5..503b5b681d 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -41,6 +41,9 @@ class StatsCounter;
V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \
V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \
V(heap_is_minor_marking_flag_address, "heap_is_minor_marking_flag_address") \
+ V(is_shared_space_isolate_flag_address, \
+ "is_shared_space_isolate_flag_address") \
+ V(uses_shared_heap_flag_address, "uses_shared_heap_flag_address") \
V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \
V(new_space_allocation_limit_address, \
"Heap::NewSpaceAllocationLimitAddress()") \
@@ -108,6 +111,7 @@ class StatsCounter;
"address_of_enable_experimental_regexp_engine") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
+ V(address_of_log10_offset_table, "log10_offset_table") \
V(address_of_min_int, "LDoubleConstant::min_int") \
V(address_of_mock_arraybuffer_allocator_flag, \
"v8_flags.mock_arraybuffer_allocator") \
@@ -116,6 +120,8 @@ class StatsCounter;
V(address_of_shared_string_table_flag, "v8_flags.shared_string_table") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
+ V(allocate_and_initialize_external_pointer_table_entry, \
+ "AllocateAndInitializeExternalPointerTableEntry") \
V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
V(baseline_pc_for_next_executed_bytecode, \
"BaselinePCForNextExecutedBytecode") \
@@ -192,6 +198,23 @@ class StatsCounter;
"MutableBigInt_BitwiseAndNegNegAndCanonicalize") \
V(mutable_big_int_bitwise_and_pn_and_canonicalize_function, \
"MutableBigInt_BitwiseAndPosNegAndCanonicalize") \
+ V(mutable_big_int_bitwise_or_pp_and_canonicalize_function, \
+ "MutableBigInt_BitwiseOrPosPosAndCanonicalize") \
+ V(mutable_big_int_bitwise_or_nn_and_canonicalize_function, \
+ "MutableBigInt_BitwiseOrNegNegAndCanonicalize") \
+ V(mutable_big_int_bitwise_or_pn_and_canonicalize_function, \
+ "MutableBigInt_BitwiseOrPosNegAndCanonicalize") \
+ V(mutable_big_int_bitwise_xor_pp_and_canonicalize_function, \
+ "MutableBigInt_BitwiseXorPosPosAndCanonicalize") \
+ V(mutable_big_int_bitwise_xor_nn_and_canonicalize_function, \
+ "MutableBigInt_BitwiseXorNegNegAndCanonicalize") \
+ V(mutable_big_int_bitwise_xor_pn_and_canonicalize_function, \
+ "MutableBigInt_BitwiseXorPosNegAndCanonicalize") \
+ V(mutable_big_int_left_shift_and_canonicalize_function, \
+ "MutableBigInt_LeftShiftAndCanonicalize") \
+ V(big_int_right_shift_result_length_function, "RightShiftResultLength") \
+ V(mutable_big_int_right_shift_and_canonicalize_function, \
+ "MutableBigInt_RightShiftAndCanonicalize") \
V(new_deoptimizer_function, "Deoptimizer::New()") \
V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
V(printf_function, "printf") \
@@ -209,6 +232,8 @@ class StatsCounter;
V(array_indexof_includes_smi_or_object, \
"array_indexof_includes_smi_or_object") \
V(array_indexof_includes_double, "array_indexof_includes_double") \
+ V(has_unpaired_surrogate, "Utf16::HasUnpairedSurrogate") \
+ V(replace_unpaired_surrogates, "Utf16::ReplaceUnpairedSurrogates") \
V(try_string_to_index_or_lookup_existing, \
"try_string_to_index_or_lookup_existing") \
V(string_from_forward_table, "string_from_forward_table") \
@@ -274,8 +299,7 @@ class StatsCounter;
IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
IF_WASM(V, wasm_array_copy, "wasm::array_copy") \
- IF_WASM(V, wasm_array_fill_with_number_or_null, \
- "wasm::array_fill_with_number_or_null") \
+ IF_WASM(V, wasm_array_fill, "wasm::array_fill") \
V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
@@ -292,9 +316,12 @@ class StatsCounter;
V(address_of_wasm_int32_overflow_as_float, "wasm_int32_overflow_as_float") \
V(supports_cetss_address, "CpuFeatures::supports_cetss_address") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
+ V(write_barrier_shared_marking_from_code_function, \
+ "WriteBarrier::SharedMarkingFromCode") \
V(shared_barrier_from_code_function, "WriteBarrier::SharedFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
+ V(int64_mul_high_function, "int64_mul_high_function") \
V(atomic_pair_load_function, "atomic_pair_load_function") \
V(atomic_pair_store_function, "atomic_pair_store_function") \
V(atomic_pair_add_function, "atomic_pair_add_function") \
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index fb49c0d5eb..a00f9113ff 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -23,12 +23,6 @@ HandlerTable::HandlerTable(Code code)
: HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
kReturnAddressBasedEncoding) {}
-#ifdef V8_EXTERNAL_CODE_SPACE
-HandlerTable::HandlerTable(CodeDataContainer code)
- : HandlerTable(code.HandlerTableAddress(), code.handler_table_size(),
- kReturnAddressBasedEncoding) {}
-#endif // V8_EXTERNAL_CODE_SPACE
-
#if V8_ENABLE_WEBASSEMBLY
HandlerTable::HandlerTable(const wasm::WasmCode* code)
: HandlerTable(code->handler_table(), code->handler_table_size(),
@@ -153,7 +147,7 @@ int HandlerTable::LengthForRange(int entries) {
// static
int HandlerTable::EmitReturnTableStart(Assembler* masm) {
- masm->DataAlign(Code::kMetadataAlignment);
+ masm->DataAlign(InstructionStream::kMetadataAlignment);
masm->RecordComment(";;; Exception handler table.");
int table_start = masm->pc_offset();
return table_start;
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index a449980d23..7a4554796b 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -15,8 +15,8 @@ namespace internal {
class Assembler;
class ByteArray;
class BytecodeArray;
+class InstructionStream;
class Code;
-class CodeDataContainer;
namespace wasm {
class WasmCode;
@@ -30,8 +30,9 @@ class WasmCode;
// Layout looks as follows:
// [ range-start , range-end , handler-offset , handler-data ]
// 2) Based on return addresses: Used for turbofanned code. Stored directly in
-// the instruction stream of the {Code} object. Contains one entry per
-// call-site that could throw an exception. Layout looks as follows:
+// the instruction stream of the {InstructionStream} object. Contains one
+// entry per call-site that could throw an exception. Layout looks as
+// follows:
// [ return-address-offset , handler-offset ]
class V8_EXPORT_PRIVATE HandlerTable {
public:
@@ -54,10 +55,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
// Constructors for the various encodings.
+ explicit HandlerTable(InstructionStream code);
explicit HandlerTable(Code code);
-#ifdef V8_EXTERNAL_CODE_SPACE
- explicit HandlerTable(CodeDataContainer code);
-#endif
explicit HandlerTable(ByteArray byte_array);
#if V8_ENABLE_WEBASSEMBLY
explicit HandlerTable(const wasm::WasmCode* code);
@@ -123,8 +122,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
#endif
// Direct pointer into the encoded data. This pointer potentially points into
- // objects on the GC heap (either {ByteArray} or {Code}) and could become
- // stale during a collection. Hence we disallow any allocation.
+ // objects on the GC heap (either {ByteArray} or {InstructionStream}) and
+ // could become stale during a collection. Hence we disallow any allocation.
const Address raw_encoded_data_;
DISALLOW_GARBAGE_COLLECTION(no_gc_)
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index f03b032ec2..d5040b8aa4 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -53,8 +53,10 @@ bool CpuFeatures::SupportsOptimizer() { return true; }
void RelocInfo::apply(intptr_t delta) {
DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET)));
- if (IsCodeTarget(rmode_) || IsOffHeapTarget(rmode_)) {
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
+ if (IsCodeTarget(rmode_) || IsOffHeapTarget(rmode_) ||
+ IsWasmStubCall(rmode_)) {
base::WriteUnalignedValue(pc_,
base::ReadUnalignedValue<int32_t>(pc_) - delta);
} else if (IsInternalReference(rmode_)) {
@@ -65,7 +67,7 @@ void RelocInfo::apply(intptr_t delta) {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsWasmCall(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -96,8 +98,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
@@ -188,7 +190,7 @@ void Assembler::emit(const Immediate& x) {
void Assembler::emit_code_relative_offset(Label* label) {
if (label->is_bound()) {
int32_t pos;
- pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
+ pos = label->pos() + InstructionStream::kHeaderSize - kHeapObjectTag;
emit(pos);
} else {
emit_disp(label, Displacement::CODE_RELATIVE);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index ab9a895e9d..bb76c82a6f 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -204,7 +204,8 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -320,13 +321,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
const int code_comments_size = WriteCodeComments();
@@ -1537,8 +1538,9 @@ void Assembler::bind_to(Label* L, int pos) {
long_at_put(fixup_pos, reinterpret_cast<int>(buffer_start_ + pos));
internal_reference_positions_.push_back(fixup_pos);
} else if (disp.type() == Displacement::CODE_RELATIVE) {
- // Relative to Code heap object pointer.
- long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
+ // Relative to InstructionStream heap object pointer.
+ long_at_put(fixup_pos,
+ pos + InstructionStream::kHeaderSize - kHeapObjectTag);
} else {
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected
@@ -1642,7 +1644,6 @@ void Assembler::call(Operand adr) {
void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(code->IsExecutable());
EMIT(0xE8);
emit(code, rmode);
}
@@ -3406,7 +3407,8 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 2cbc775ef5..54cbbf6886 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -79,7 +79,23 @@ enum Condition {
zero = equal,
not_zero = not_equal,
sign = negative,
- not_sign = positive
+ not_sign = positive,
+
+ // Unified cross-platform condition names/aliases.
+ kEqual = equal,
+ kNotEqual = not_equal,
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+ kUnsignedLessThan = below,
+ kUnsignedGreaterThan = above,
+ kUnsignedLessThanEqual = below_equal,
+ kUnsignedGreaterThanEqual = above_equal,
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+ kZero = equal,
+ kNotZero = not_equal,
};
// Returns the equivalent of !cc.
@@ -433,7 +449,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
// ---------------------------------------------------------------------------
- // Code generation
+ // InstructionStream generation
//
// - function names correspond one-to-one to ia32 instruction mnemonics
// - unless specified otherwise, instructions operate on 32bit operands
diff --git a/deps/v8/src/codegen/ia32/constants-ia32.h b/deps/v8/src/codegen/ia32/constants-ia32.h
index af3bd09330..87d8ec6e8b 100644
--- a/deps/v8/src/codegen/ia32/constants-ia32.h
+++ b/deps/v8/src/codegen/ia32/constants-ia32.h
@@ -9,14 +9,15 @@
namespace v8 {
namespace internal {
-// Actual value of root register is offset from the root array's start
+
+// The actual value of the kRootRegister is offset from the IsolateData's start
// to take advantage of negative displacement values.
-// For x86, this value is provided for uniformity with other platforms, although
-// currently no root register is present.
-constexpr int kRootRegisterBias = 0;
+constexpr int kRootRegisterBias = 128;
-// TODO(sigurds): Change this value once we use relative jumps.
+// The maximum size of the code range s.t. pc-relative calls are possible
+// between all Code objects in the range.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
index 2f05a95635..d3f6101f4e 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
@@ -96,6 +96,9 @@ constexpr Register StoreWithVectorDescriptor::VectorRegister() {
}
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return no_reg; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return edi; }
// static
@@ -317,6 +320,20 @@ constexpr auto NewHeapNumberDescriptor::registers() {
return RegisterArray(ecx);
}
+// static
+constexpr auto CheckTurboshaftFloat32TypeDescriptor::registers() {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ return RegisterArray(ecx);
+}
+
+// static
+constexpr auto CheckTurboshaftFloat64TypeDescriptor::registers() {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ return RegisterArray(ecx);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index ceaf19587d..77d07785f5 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -21,11 +21,11 @@
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/label.h"
+#include "src/codegen/macro-assembler-base.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h"
-#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -77,18 +77,18 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-void TurboAssembler::InitializeRootRegister() {
+void MacroAssembler::InitializeRootRegister() {
ASM_CODE_COMMENT(this);
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root));
}
-Operand TurboAssembler::RootAsOperand(RootIndex index) {
+Operand MacroAssembler::RootAsOperand(RootIndex index) {
DCHECK(root_array_available());
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
mov(destination, RootAsOperand(index));
@@ -113,7 +113,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::CompareRoot(Register with, Register scratch,
+void MacroAssembler::CompareRoot(Register with, Register scratch,
RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
@@ -126,7 +126,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
}
}
-void TurboAssembler::CompareRoot(Register with, RootIndex index) {
+void MacroAssembler::CompareRoot(Register with, RootIndex index) {
ASM_CODE_COMMENT(this);
if (root_array_available()) {
cmp(with, RootAsOperand(index));
@@ -180,7 +180,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
j(below_equal, on_in_range, near_jump);
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(array, size, scratch));
@@ -206,7 +206,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
+Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
if (root_array_available() && options().enable_root_relative_access) {
intptr_t delta =
@@ -233,8 +233,8 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
}
// TODO(v8:6666): If possible, refactor into a platform-independent function in
-// TurboAssembler.
-Operand TurboAssembler::ExternalReferenceAddressAsOperand(
+// MacroAssembler.
+Operand MacroAssembler::ExternalReferenceAddressAsOperand(
ExternalReference reference) {
DCHECK(root_array_available());
DCHECK(options().isolate_independent_code);
@@ -244,8 +244,8 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
}
// TODO(v8:6666): If possible, refactor into a platform-independent function in
-// TurboAssembler.
-Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
+// MacroAssembler.
+Operand MacroAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
DCHECK(root_array_available());
Builtin builtin;
@@ -264,7 +264,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
}
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@@ -273,7 +273,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
ASM_CODE_COMMENT(this);
DCHECK(is_int32(offset));
@@ -285,13 +285,13 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
mov(destination, Operand(kRootRegister, offset));
}
-void TurboAssembler::LoadAddress(Register destination,
+void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
// TODO(jgruber): Add support for enable_root_relative_access.
if (root_array_available() && options().isolate_independent_code) {
@@ -301,7 +301,7 @@ void TurboAssembler::LoadAddress(Register destination,
mov(destination, Immediate(source));
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
RegList saved_regs = kCallerSaved - exclusion;
@@ -315,7 +315,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
ASM_CODE_COMMENT(this);
// We don't allow a GC in a write barrier slow path so there is no need to
@@ -346,7 +346,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
@@ -412,19 +412,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
for (Register reg : registers) {
push(reg);
}
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
for (Register reg : base::Reversed(registers)) {
pop(reg);
}
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
@@ -449,7 +449,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -473,7 +473,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
@@ -526,8 +526,8 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- zero, &done, Label::kNear);
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
@@ -547,17 +547,17 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
}
-void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
xorps(dst, dst);
cvtsi2ss(dst, src);
}
-void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
xorpd(dst, dst);
cvtsi2sd(dst, src);
}
-void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
+void MacroAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
Label done;
Register src_reg = src.is_reg_only() ? src.reg() : tmp;
if (src_reg == tmp) mov(tmp, src);
@@ -578,7 +578,7 @@ void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
bind(&done);
}
-void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
+void MacroAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
Label done;
cvttss2si(dst, src);
test(dst, dst);
@@ -590,7 +590,7 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
bind(&done);
}
-void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
+void MacroAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
@@ -600,14 +600,14 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
bind(&done);
}
-void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
+void MacroAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
Move(tmp, -2147483648.0);
addsd(tmp, src);
cvttsd2si(dst, tmp);
add(dst, Immediate(0x80000000));
}
-void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
+void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
mov(high, low);
@@ -619,7 +619,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
}
}
-void TurboAssembler::ShlPair_cl(Register high, Register low) {
+void MacroAssembler::ShlPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this);
shld_cl(high, low);
shl_cl(low);
@@ -631,7 +631,7 @@ void TurboAssembler::ShlPair_cl(Register high, Register low) {
bind(&done);
}
-void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
+void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
mov(low, high);
@@ -643,7 +643,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
}
}
-void TurboAssembler::ShrPair_cl(Register high, Register low) {
+void MacroAssembler::ShrPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this);
shrd_cl(low, high);
shr_cl(high);
@@ -655,7 +655,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) {
bind(&done);
}
-void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
+void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
ASM_CODE_COMMENT(this);
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -668,7 +668,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
}
}
-void TurboAssembler::SarPair_cl(Register high, Register low) {
+void MacroAssembler::SarPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this);
shrd_cl(low, high);
sar_cl(high);
@@ -680,7 +680,7 @@ void TurboAssembler::SarPair_cl(Register high, Register low) {
bind(&done);
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
+void MacroAssembler::LoadMap(Register destination, Register object) {
mov(destination, FieldOperand(object, HeapObject::kMapOffset));
}
@@ -706,10 +706,8 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
- Register scratch) {
- mov(scratch, FieldOperand(codet, Code::kCodeDataContainerOffset));
- test(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
+void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code) {
+ test(FieldOperand(code, Code::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
}
@@ -742,7 +740,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
- __ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, eax);
+ __ TestCodeIsMarkedForDeoptimization(optimized_code_entry);
__ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
@@ -752,7 +750,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
ecx);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Pop(optimized_code_entry);
- __ LoadCodeObjectEntry(ecx, optimized_code_entry);
+ __ LoadCodeEntry(ecx, optimized_code_entry);
__ Pop(edx);
__ Pop(eax);
__ jmp(ecx);
@@ -981,23 +979,23 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void TurboAssembler::Assert(Condition cc, AbortReason reason) {
+void MacroAssembler::Assert(Condition cc, AbortReason reason) {
if (v8_flags.debug_code) Check(cc, reason);
}
-void TurboAssembler::AssertUnreachable(AbortReason reason) {
+void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
#endif // V8_ENABLE_DEBUG_CODE
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
}
-void TurboAssembler::Prologue() {
+void MacroAssembler::Prologue() {
ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
@@ -1006,7 +1004,7 @@ void TurboAssembler::Prologue() {
push(kJavaScriptCallArgCountRegister); // Actual argument count.
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@@ -1036,7 +1034,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArguments(Register count, Register scratch,
+void MacroAssembler::DropArguments(Register count, Register scratch,
ArgumentsCountType type,
ArgumentsCountMode mode) {
DCHECK(!AreAliased(count, scratch));
@@ -1045,7 +1043,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
Register scratch,
ArgumentsCountType type,
@@ -1057,7 +1055,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Operand receiver,
Register scratch,
ArgumentsCountType type,
@@ -1070,7 +1068,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
push(ebp);
mov(ebp, esp);
@@ -1082,7 +1080,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
@@ -1093,7 +1091,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
}
#ifdef V8_OS_WIN
-void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this);
// In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the
@@ -1115,7 +1113,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
sub(esp, bytes_scratch);
}
-void TurboAssembler::AllocateStackSpace(int bytes) {
+void MacroAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
while (bytes >= kStackPageSize) {
@@ -1128,8 +1126,8 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
}
#endif
-void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
- Register scratch) {
+void MacroAssembler::EnterExitFrame(int argc, StackFrame::Type frame_type,
+ Register scratch) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -1141,43 +1139,23 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
push(ebp);
mov(ebp, esp);
- // Reserve room for entry stack pointer.
push(Immediate(StackFrame::TypeToMarker(frame_type)));
DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
- push(Immediate(0)); // Saved entry sp, patched before call.
-
- static_assert(edx == kRuntimeCallFunctionRegister);
- static_assert(esi == kContextRegister);
+ push(Immediate(0)); // Saved entry sp, patched below.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- ExternalReference context_address =
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- ExternalReference c_function_address =
- ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate());
-
DCHECK(!AreAliased(scratch, ebp, esi, edx));
- mov(ExternalReferenceAsOperand(c_entry_fp_address, scratch), ebp);
- mov(ExternalReferenceAsOperand(context_address, scratch), esi);
- mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
-}
+ using ER = ExternalReference;
+ ER r0 = ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
+ mov(ExternalReferenceAsOperand(r0, scratch), ebp);
+ static_assert(esi == kContextRegister);
+ ER r1 = ER::Create(IsolateAddressId::kContextAddress, isolate());
+ mov(ExternalReferenceAsOperand(r1, scratch), esi);
+ static_assert(edx == kRuntimeCallFunctionRegister);
+ ER r2 = ER::Create(IsolateAddressId::kCFunctionAddress, isolate());
+ mov(ExternalReferenceAsOperand(r2, scratch), edx);
-void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
- ASM_CODE_COMMENT(this);
- // Optionally save all XMM registers.
- if (save_doubles) {
- int space =
- XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize;
- AllocateStackSpace(space);
- const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else {
- AllocateStackSpace(argc * kSystemPointerSize);
- }
+ AllocateStackSpace(argc * kSystemPointerSize);
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
@@ -1190,65 +1168,21 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
}
-void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
- StackFrame::Type frame_type) {
- ASM_CODE_COMMENT(this);
- EnterExitFramePrologue(frame_type, edi);
-
- // Set up argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
- mov(edi, eax);
- lea(esi, Operand(ebp, eax, times_system_pointer_size, offset));
-
- // Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(argc, save_doubles);
-}
-
-void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
- EnterExitFramePrologue(StackFrame::EXIT, scratch);
- EnterExitFrameEpilogue(argc, false);
-}
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
+void MacroAssembler::LeaveExitFrame(Register scratch) {
ASM_CODE_COMMENT(this);
- // Optionally restore all XMM registers.
- if (save_doubles) {
- const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
- }
- }
-
- if (pop_arguments) {
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kSystemPointerSize));
- mov(ebp, Operand(ebp, 0 * kSystemPointerSize));
-
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kSystemPointerSize));
-
- // Push the return address to get ready to return.
- push(ecx);
- } else {
- // Otherwise just leave the exit frame.
- leave();
- }
- LeaveExitFrameEpilogue();
-}
+ leave();
-void MacroAssembler::LeaveExitFrameEpilogue() {
- ASM_CODE_COMMENT(this);
// Clear the top frame.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- mov(ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
+ mov(ExternalReferenceAsOperand(c_entry_fp_address, scratch), Immediate(0));
- // Restore current context from top and clear it in debug mode.
+ // Restore the current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- mov(esi, ExternalReferenceAsOperand(context_address, esi));
+ mov(esi, ExternalReferenceAsOperand(context_address, scratch));
+
#ifdef DEBUG
push(eax);
mov(ExternalReferenceAsOperand(context_address, eax),
@@ -1257,14 +1191,6 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
#endif
}
-void MacroAssembler::LeaveApiExitFrame() {
- ASM_CODE_COMMENT(this);
- mov(esp, ebp);
- pop(ebp);
-
- LeaveExitFrameEpilogue();
-}
-
void MacroAssembler::PushStackHandler(Register scratch) {
ASM_CODE_COMMENT(this);
// Adjust this code if not the case.
@@ -1291,8 +1217,8 @@ void MacroAssembler::PopStackHandler(Register scratch) {
add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
@@ -1305,8 +1231,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -1338,15 +1263,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
Move(kRuntimeCallFunctionRegister, Immediate(ext));
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- jmp(entry, RelocInfo::OFF_HEAP_TARGET);
-}
-
void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
@@ -1356,10 +1277,10 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
cmp(with, Operand(kRootRegister, offset));
}
@@ -1589,9 +1510,9 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
mov(destination, Operand(destination, Context::SlotOffset(index)));
}
-void TurboAssembler::Ret() { ret(0); }
+void MacroAssembler::Ret() { ret(0); }
-void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
@@ -1602,7 +1523,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
}
}
-void TurboAssembler::Push(Immediate value) {
+void MacroAssembler::Push(Immediate value) {
if (root_array_available() && options().isolate_independent_code) {
if (value.is_embedded_object()) {
Push(HeapObjectAsOperand(value.embedded_object()));
@@ -1621,13 +1542,13 @@ void MacroAssembler::Drop(int stack_elements) {
}
}
-void TurboAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Register src) {
if (dst != src) {
mov(dst, src);
}
}
-void TurboAssembler::Move(Register dst, const Immediate& src) {
+void MacroAssembler::Move(Register dst, const Immediate& src) {
if (!src.is_heap_number_request() && src.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else if (src.is_external_reference()) {
@@ -1637,7 +1558,7 @@ void TurboAssembler::Move(Register dst, const Immediate& src) {
}
}
-void TurboAssembler::Move(Operand dst, const Immediate& src) {
+void MacroAssembler::Move(Operand dst, const Immediate& src) {
// Since there's no scratch register available, take a detour through the
// stack.
if (root_array_available() && options().isolate_independent_code) {
@@ -1656,9 +1577,9 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
}
}
-void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
+void MacroAssembler::Move(Register dst, Operand src) { mov(dst, src); }
-void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
+void MacroAssembler::Move(Register dst, Handle<HeapObject> src) {
if (root_array_available() && options().isolate_independent_code) {
IndirectLoadConstant(dst, src);
return;
@@ -1666,7 +1587,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
mov(dst, src);
}
-void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
+void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
@@ -1690,7 +1611,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
}
}
-void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
+void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
@@ -1729,7 +1650,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
+void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src,
uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -1745,7 +1666,7 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
uint32_t* load_pc_offset) {
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This
@@ -1766,7 +1687,7 @@ void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::Lzcnt(Register dst, Operand src) {
+void MacroAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcnt(dst, src);
@@ -1780,7 +1701,7 @@ void TurboAssembler::Lzcnt(Register dst, Operand src) {
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
-void TurboAssembler::Tzcnt(Register dst, Operand src) {
+void MacroAssembler::Tzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcnt(dst, src);
@@ -1793,7 +1714,7 @@ void TurboAssembler::Tzcnt(Register dst, Operand src) {
bind(&not_zero_src);
}
-void TurboAssembler::Popcnt(Register dst, Operand src) {
+void MacroAssembler::Popcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcnt(dst, src);
@@ -1840,7 +1761,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Check(Condition cc, AbortReason reason) {
+void MacroAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L);
Abort(reason);
@@ -1848,7 +1769,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
bind(&L);
}
-void TurboAssembler::CheckStackAlignment() {
+void MacroAssembler::CheckStackAlignment() {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
@@ -1863,7 +1784,7 @@ void TurboAssembler::CheckStackAlignment() {
}
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
if (v8_flags.code_comments) {
const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
@@ -1906,7 +1827,7 @@ void TurboAssembler::Abort(AbortReason reason) {
int3();
}
-void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -1922,14 +1843,16 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
// Trashing eax is ok as it will be the return value.
Move(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
@@ -1938,40 +1861,48 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CheckStackAlignment();
}
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- // Find two caller-saved scratch registers.
- Register pc_scratch = eax;
- Register scratch = ecx;
- if (function == eax) pc_scratch = edx;
- if (function == ecx) scratch = edx;
- PushPC();
- pop(pc_scratch);
-
- // See x64 code for reasoning about how to address the isolate data fields.
- DCHECK_IMPLIES(!root_array_available(), isolate() != nullptr);
- mov(root_array_available()
- ? Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset())
- : ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_pc_address(isolate()),
- scratch),
- pc_scratch);
- mov(root_array_available()
- ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
- : ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_fp_address(isolate()),
- scratch),
- ebp);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // Find two caller-saved scratch registers.
+ Register pc_scratch = eax;
+ Register scratch = ecx;
+ if (function == eax) pc_scratch = edx;
+ if (function == ecx) scratch = edx;
+ PushPC();
+ pop(pc_scratch);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ DCHECK_IMPLIES(!root_array_available(), isolate() != nullptr);
+ mov(root_array_available()
+ ? Operand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset())
+ : ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_pc_address(isolate()),
+ scratch),
+ pc_scratch);
+ mov(root_array_available()
+ ? Operand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset())
+ : ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate()),
+ scratch),
+ ebp);
+ }
call(function);
- // We don't unset the PC; the FP is the source of truth.
- mov(root_array_available()
- ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
- : ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_fp_address(isolate()),
- scratch),
- Immediate(0));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch = ecx;
+ mov(root_array_available()
+ ? Operand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset())
+ : ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate()),
+ scratch),
+ Immediate(0));
+ }
if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kSystemPointerSize));
@@ -1980,7 +1911,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
}
-void TurboAssembler::PushPC() {
+void MacroAssembler::PushPC() {
// Push the current PC onto the stack as "return address" via calling
// the next instruction.
Label get_pc;
@@ -1988,7 +1919,7 @@ void TurboAssembler::PushPC() {
bind(&get_pc);
}
-void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASM_CODE_COMMENT(this);
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
@@ -2001,7 +1932,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
static_assert(kSystemPointerSize == 4);
static_assert(kSmiShiftSize == 0);
@@ -2017,13 +1948,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
call(builtin_index);
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute: {
@@ -2036,14 +1967,14 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
call(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) {
@@ -2057,71 +1988,30 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
jmp(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jmp(code, RelocInfo::CODE_TARGET);
break;
}
}
}
-Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- test(FieldOperand(code_object, Code::kFlagsOffset),
- Immediate(Code::IsOffHeapTrampoline::kMask));
- j(not_equal, &if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- Move(destination, code_object);
- add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
- jmp(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
- mov(destination,
- Operand(kRootRegister, destination, times_system_pointer_size,
- IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- Move(destination, code_object);
- add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
- }
+ mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
- ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::CallCodeObject(Register code_object) {
+ LoadCodeEntry(code_object, code_object);
call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
- ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ LoadCodeEntry(code_object, code_object);
switch (jump_mode) {
case JumpMode::kJump:
jmp(code_object);
@@ -2133,13 +2023,13 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
}
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
DCHECK(root_array_available());
jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
isolate(), reference)));
}
-void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@@ -2151,7 +2041,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
ASM_CODE_COMMENT(this);
@@ -2170,7 +2060,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
ASM_CODE_COMMENT(this);
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
@@ -2185,7 +2075,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -2195,8 +2085,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::Trap() { int3(); }
-void TurboAssembler::DebugBreak() { int3(); }
+void MacroAssembler::Trap() { int3(); }
+void MacroAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index a55beb1a4e..39ee0f106e 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -21,10 +21,10 @@
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/label.h"
+#include "src/codegen/macro-assembler-base.h"
#include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h"
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
-#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
@@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
-class Code;
+class InstructionStream;
class ExternalReference;
class StatsCounter;
@@ -68,10 +68,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler
- : public SharedTurboAssemblerBase<TurboAssembler> {
+class V8_EXPORT_PRIVATE MacroAssembler
+ : public SharedMacroAssembler<MacroAssembler> {
public:
- using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
+ using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -107,11 +107,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void Assert(Condition cc, AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cc, AbortReason reason);
@@ -158,12 +158,13 @@ class V8_EXPORT_PRIVATE TurboAssembler
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
- void Jump(const ExternalReference& reference);
+ void Jump(const ExternalReference& reference);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void LoadMap(Register destination, Register object);
@@ -220,8 +221,16 @@ class V8_EXPORT_PRIVATE TurboAssembler
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
void ShlPair(Register high, Register low, uint8_t imm8);
void ShlPair_cl(Register high, Register low);
@@ -404,17 +413,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
- protected:
- // Drops arguments assuming that the return address was already popped.
- void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
- ArgumentsCountMode mode = kCountExcludesReceiver);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -462,22 +460,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
SaveFPRegsMode save_fp,
SmiCheck smi_check = SmiCheck::kInline);
- // Enter specific kind of exit frame. Expects the number of
- // arguments in register eax and sets up the number of arguments in
- // register edi and the pointer to the first argument in register
- // esi.
- void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
-
- void EnterApiExitFrame(int argc, Register scratch);
-
- // Leave the current exit frame. Expects the return value in
- // register eax:edx (untouched) and the pointer to the first
- // argument in register esi (if pop_arguments == true).
- void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
-
- // Leave the current exit frame. Expects the return value in
- // register eax (untouched).
- void LeaveApiExitFrame();
+ void EnterExitFrame(int argc, StackFrame::Type frame_type, Register scratch);
+ void LeaveExitFrame(Register scratch);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
@@ -551,12 +535,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
and_(reg, Immediate(mask));
}
- void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
+ void TestCodeIsMarkedForDeoptimization(Register code);
Immediate ClearedValue() const;
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register slot_address);
@@ -568,34 +552,34 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register flags, XMMRegister saved_feedback_vector);
// Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
void AssertCallableFunction(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
// ---------------------------------------------------------------------------
// Exception handling
@@ -610,20 +594,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -633,9 +614,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// Utilities
@@ -667,17 +645,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void StackOverflowCheck(Register num_args, Register scratch,
Label* stack_overflow, bool include_receiver = false);
+ protected:
+ // Drops arguments assuming that the return address was already popped.
+ void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
+ ArgumentsCountMode mode = kCountExcludesReceiver);
+
private:
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
InvokeType type);
- void EnterExitFramePrologue(StackFrame::Type frame_type, Register scratch);
- void EnterExitFrameEpilogue(int argc, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index e06a372ea6..5f7585267b 100644
--- a/deps/v8/src/codegen/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -137,10 +137,6 @@ constexpr Register kJavaScriptCallNewTargetRegister = edx;
// platforms. Note that on ia32 it aliases kJavaScriptCallCodeStartRegister.
constexpr Register kJavaScriptCallExtraArg1Register = ecx;
-// The off-heap trampoline does not need a register on ia32 (it uses a
-// pc-relative call instead).
-constexpr Register kOffHeapTrampolineRegister = no_reg;
-
constexpr Register kRuntimeCallFunctionRegister = edx;
constexpr Register kRuntimeCallArgCountRegister = eax;
constexpr Register kRuntimeCallArgvRegister = ecx;
diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h
index 543a577f2a..f94e50404c 100644
--- a/deps/v8/src/codegen/interface-descriptors-inl.h
+++ b/deps/v8/src/codegen/interface-descriptors-inl.h
@@ -56,6 +56,11 @@ constexpr auto StaticJSCallInterfaceDescriptor<DerivedDescriptor>::registers() {
return CallInterfaceDescriptor::DefaultJSRegisterArray();
}
+// static
+constexpr auto CompareNoContextDescriptor::registers() {
+ return CompareDescriptor::registers();
+}
+
template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::Initialize(
CallInterfaceDescriptorData* data) {
@@ -329,6 +334,20 @@ constexpr auto StoreGlobalBaselineDescriptor::registers() {
}
// static
+constexpr auto DefineKeyedOwnDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ DefineKeyedOwnDescriptor::FlagsRegister(),
+ StoreDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto DefineKeyedOwnBaselineDescriptor::registers() {
+ return DefineKeyedOwnDescriptor::registers();
+}
+
+// static
constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
return RegisterArray(
LoadDescriptor::ReceiverRegister(),
@@ -527,6 +546,15 @@ constexpr auto StoreWithVectorDescriptor::registers() {
}
// static
+constexpr auto DefineKeyedOwnWithVectorDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ DefineKeyedOwnDescriptor::FlagsRegister(),
+ StoreDescriptor::SlotRegister());
+}
+
+// static
constexpr auto ApiGetterDescriptor::registers() {
return RegisterArray(ReceiverRegister(), HolderRegister(),
CallbackRegister());
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 2d42960ba1..e5e26b5805 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -25,7 +25,7 @@ void CallInterfaceDescriptorData::InitializeRegisters(
DCHECK(reg.is_valid());
DCHECK(!reglist.has(reg));
DCHECK_NE(reg, kRootRegister);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
DCHECK_NE(reg, kPtrComprCageBaseRegister);
#endif
reglist.set(reg);
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 1915499830..0322b43369 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -55,6 +55,8 @@ namespace internal {
V(CloneObjectBaseline) \
V(CloneObjectWithVector) \
V(Compare) \
+ V(CompareNoContext) \
+ V(StringEqual) \
V(Compare_Baseline) \
V(Compare_WithFeedback) \
V(Construct_Baseline) \
@@ -71,6 +73,9 @@ namespace internal {
V(CopyDataPropertiesWithExcludedProperties) \
V(CopyDataPropertiesWithExcludedPropertiesOnStack) \
V(CppBuiltinAdaptor) \
+ V(DefineKeyedOwn) \
+ V(DefineKeyedOwnBaseline) \
+ V(DefineKeyedOwnWithVector) \
V(FastNewObject) \
V(FindNonDefaultConstructorOrConstruct) \
V(ForInPrepare) \
@@ -100,6 +105,7 @@ namespace internal {
V(LoadWithReceiverAndVector) \
V(LoadWithReceiverBaseline) \
V(LoadWithVector) \
+ V(LookupWithVector) \
V(LookupTrampoline) \
V(LookupBaseline) \
V(NewHeapNumber) \
@@ -845,6 +851,17 @@ class LoadGlobalBaselineDescriptor
static constexpr auto registers();
};
+class LookupWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<LookupWithVectorDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kName, kDepth, kSlot, kVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kDepth
+ MachineType::AnyTagged(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(LookupWithVectorDescriptor)
+};
+
class LookupTrampolineDescriptor
: public StaticCallInterfaceDescriptor<LookupTrampolineDescriptor> {
public:
@@ -965,6 +982,54 @@ class StoreGlobalWithVectorDescriptor
static constexpr auto registers();
};
+class DefineKeyedOwnDescriptor
+ : public StaticCallInterfaceDescriptor<DefineKeyedOwnDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kName, kValue, kFlags, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned(), // kFlags
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(DefineKeyedOwnDescriptor)
+
+ static constexpr inline Register FlagsRegister();
+
+ static constexpr auto registers();
+};
+
+class DefineKeyedOwnBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<DefineKeyedOwnBaselineDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kFlags, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned(), // kFlags
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(DefineKeyedOwnBaselineDescriptor)
+
+ static constexpr auto registers();
+};
+
+class DefineKeyedOwnWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<DefineKeyedOwnWithVectorDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kName, kValue, kFlags,
+ kSlot, // register argument
+ kVector // stack argument
+ )
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned(), // kFlags
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(DefineKeyedOwnWithVectorDescriptor)
+
+ static constexpr auto registers();
+};
+
class LoadWithVectorDescriptor
: public StaticCallInterfaceDescriptor<LoadWithVectorDescriptor> {
public:
@@ -1563,6 +1628,25 @@ class CompareDescriptor
static constexpr inline auto registers();
};
+class CompareNoContextDescriptor
+ : public StaticCallInterfaceDescriptor<CompareNoContextDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight)
+ DECLARE_DESCRIPTOR(CompareNoContextDescriptor)
+
+ static constexpr inline auto registers();
+};
+
+class StringEqualDescriptor
+ : public StaticCallInterfaceDescriptor<StringEqualDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kLength)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
+ MachineType::AnyTagged(), // kRight
+ MachineType::IntPtr()) // kLength
+ DECLARE_DEFAULT_DESCRIPTOR(StringEqualDescriptor)
+};
+
class BinaryOpDescriptor
: public StaticCallInterfaceDescriptor<BinaryOpDescriptor> {
public:
@@ -1599,7 +1683,7 @@ class BinarySmiOp_BaselineDescriptor
class StringAtAsStringDescriptor final
: public StaticCallInterfaceDescriptor<StringAtAsStringDescriptor> {
public:
- DEFINE_PARAMETERS(kReceiver, kPosition)
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kPosition)
// TODO(turbofan): Return untagged value here.
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::TaggedPointer(), // result string
@@ -1611,7 +1695,7 @@ class StringAtAsStringDescriptor final
class StringSubstringDescriptor final
: public StaticCallInterfaceDescriptor<StringSubstringDescriptor> {
public:
- DEFINE_PARAMETERS(kString, kFrom, kTo)
+ DEFINE_PARAMETERS_NO_CONTEXT(kString, kFrom, kTo)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kString
MachineType::IntPtr(), // kFrom
MachineType::IntPtr()) // kTo
@@ -1656,7 +1740,7 @@ class ApiCallbackDescriptor
// receiver is implicit stack argument 1
// argv are implicit stack arguments [2, 2 + kArgc[
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kApiFunctionAddress
- MachineType::IntPtr(), // kActualArgumentsCount
+ MachineType::Int32(), // kActualArgumentsCount
MachineType::AnyTagged(), // kCallData
MachineType::AnyTagged()) // kHolder
DECLARE_DESCRIPTOR(ApiCallbackDescriptor)
@@ -2120,6 +2204,40 @@ class UnaryOp_BaselineDescriptor
DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor)
};
+class CheckTurboshaftFloat32TypeDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CheckTurboshaftFloat32TypeDescriptor> {
+ public:
+ DEFINE_RESULT_AND_PARAMETERS(1, kValue, kExpectedType, kNodeId)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(),
+ MachineTypeOf<Float32T>::value,
+ MachineType::TaggedPointer(),
+ MachineType::TaggedSigned())
+ DECLARE_DEFAULT_DESCRIPTOR(CheckTurboshaftFloat32TypeDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ // We need a custom descriptor on ia32 to avoid using xmm0.
+ static constexpr inline auto registers();
+#endif
+};
+
+class CheckTurboshaftFloat64TypeDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CheckTurboshaftFloat64TypeDescriptor> {
+ public:
+ DEFINE_RESULT_AND_PARAMETERS(1, kValue, kExpectedType, kNodeId)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(),
+ MachineTypeOf<Float64T>::value,
+ MachineType::TaggedPointer(),
+ MachineType::TaggedSigned())
+ DECLARE_DEFAULT_DESCRIPTOR(CheckTurboshaftFloat64TypeDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ // We need a custom descriptor on ia32 to avoid using xmm0.
+ static constexpr inline auto registers();
+#endif
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor \
: public StaticCallInterfaceDescriptor<Name##Descriptor> { \
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
index 0fe72de2ff..6bacf8c5e3 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
+++ b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
@@ -41,7 +41,8 @@ void RelocInfo::apply(intptr_t delta) {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_) ||
+ IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -85,16 +86,46 @@ void Assembler::deserialization_set_target_internal_reference_at(
WriteUnalignedValue<Address>(pc, target);
}
+Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
+ Address pc, Address constant_pool) {
+ return GetEmbeddedObject(target_compressed_address_at(pc, constant_pool));
+}
+
+Handle<HeapObject> Assembler::embedded_object_handle_at(Address pc,
+ Address constant_pool) {
+ return GetEmbeddedObject(target_address_at(pc, constant_pool));
+}
+
+Handle<Code> Assembler::code_target_object_handle_at(Address pc,
+ Address constant_pool) {
+ int index =
+ static_cast<int>(target_address_at(pc, constant_pool)) & 0xFFFFFFFF;
+ return GetCodeTarget(index);
+}
+
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return HeapObject::cast(
- Object(Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsCompressedEmbeddedObject(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Tagged_t compressed =
+ Assembler::target_compressed_address_at(pc_, constant_pool_);
+ DCHECK(!HAS_SMI_TAG(compressed));
+ Object obj(
+ V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
+ return HeapObject::cast(obj);
+ } else {
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsCodeTarget(rmode_)) {
+ return origin->code_target_object_handle_at(pc_, constant_pool_);
+ } else if (IsFullEmbeddedObject(rmode_)) {
+ return origin->embedded_object_handle_at(pc_, constant_pool_);
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
} else {
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
@@ -105,10 +136,17 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
- icache_flush_mode);
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(
+ pc_, constant_pool_,
+ V8HeapCompressionScheme::CompressObject(target.ptr()),
+ icache_flush_mode);
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
@@ -139,7 +177,7 @@ Address RelocInfo::target_internal_reference_address() {
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
- Instr instr = Assembler::instr_at(pc);
+ Instr instr = instr_at(pc);
int32_t code_target_index = instr & kImm26Mask;
code_target_index = ((code_target_index & 0x3ff) << 22 >> 6) |
((code_target_index >> 10) & kImm16Mask);
@@ -159,6 +197,9 @@ void RelocInfo::WipeOut() {
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
Memory<Address>(pc_) = kNullAddress;
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(pc_, constant_pool_,
+ kNullAddress);
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc
index b636538f77..58cd86a563 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc
@@ -141,7 +141,12 @@ void Assembler::AllocateAndInstallRequestedHeapNumbers(Isolate* isolate) {
object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
request.heap_number());
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
- set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
+ EmbeddedObjectIndex index = AddEmbeddedObject(object);
+ if (IsLu32i_d(instr_at(pc + 2 * kInstrSize))) {
+ set_target_value_at(pc, static_cast<uint64_t>(index));
+ } else {
+ set_target_compressed_value_at(pc, static_cast<uint32_t>(index));
+ }
}
}
@@ -175,13 +180,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
// EmitForbiddenSlotInstruction(); TODO:LOONG64 why?
@@ -507,7 +512,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code pointer of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ instr_at_put(
+ pos, target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag));
return;
}
@@ -937,7 +943,8 @@ void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ instr_at_put(at_offset, target_pos + (InstructionStream::kHeaderSize -
+ kHeapObjectTag));
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
@@ -2192,7 +2199,8 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
@@ -2266,7 +2274,7 @@ Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc + 1 * kInstrSize);
Instr instr2 = instr_at(pc + 2 * kInstrSize);
- // Interpret 4 instructions for address generated by li: See listing in
+ // Interpret 3 instructions for address generated by li: See listing in
// Assembler::set_target_address_at() just below.
DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2))));
@@ -2281,6 +2289,22 @@ Address Assembler::target_address_at(Address pc) {
return static_cast<Address>(addr);
}
+uint32_t Assembler::target_compressed_address_at(Address pc) {
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+
+ // Interpret 2 instructions for address generated by li: See listing in
+ // Assembler::set_target_compressed_value_at just below.
+ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1))));
+
+ // Assemble the 32 bit value.
+ uint32_t hi20 = ((uint32_t)(instr0 >> 5) & 0xfffff) << 12;
+ uint32_t low12 = ((uint32_t)(instr1 >> 10) & 0xfff);
+ uint32_t addr = static_cast<uint32_t>(hi20 | low12);
+
+ return addr;
+}
+
// On loong64, a target address is stored in a 3-instruction sequence:
// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask);
// 1: ori(rd, rd, j.imm64_ & kImm12Mask);
@@ -2301,7 +2325,7 @@ void Assembler::set_target_value_at(Address pc, uint64_t target,
Instr instr0 = instr_at(pc);
Instr instr1 = instr_at(pc + kInstrSize);
Instr instr2 = instr_at(pc + kInstrSize * 2);
- DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2) ||
+ DCHECK((IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2)) ||
IsB(instr0));
#endif
@@ -2334,6 +2358,31 @@ void Assembler::set_target_value_at(Address pc, uint64_t target,
}
}
+void Assembler::set_target_compressed_value_at(
+ Address pc, uint32_t target, ICacheFlushMode icache_flush_mode) {
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + kInstrSize);
+ DCHECK(IsLu12i_w(instr0) && IsOri(instr1));
+#endif
+
+ Instr instr = instr_at(pc);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ uint32_t rd_code = GetRd(instr);
+
+ // Must use 2 instructions to insure patchable code.
+ // lu12i_w rd, high-20.
+ // ori rd, rd, low-12.
+ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code;
+ *(p + 1) =
+ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code;
+
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, 2 * kInstrSize);
+ }
+}
+
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
: available_(assembler->GetScratchRegisterList()),
availablefp_(assembler->GetScratchFPRegisterList()),
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h
index 98e94d2764..d420fcf54f 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h
@@ -211,25 +211,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
- V8_INLINE static void set_target_address_at(
- Address pc, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- set_target_value_at(pc, target, icache_flush_mode);
- }
+ static uint32_t target_compressed_address_at(Address pc);
// On LOONG64 there is no Constant Pool so we skip that parameter.
- V8_INLINE static Address target_address_at(Address pc,
- Address constant_pool) {
+ inline static Address target_address_at(Address pc, Address constant_pool) {
return target_address_at(pc);
}
- V8_INLINE static void set_target_address_at(
+ inline static Tagged_t target_compressed_address_at(Address pc,
+ Address constant_pool) {
+ return target_compressed_address_at(pc);
+ }
+ inline static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- set_target_address_at(pc, target, icache_flush_mode);
+ set_target_value_at(pc, target, icache_flush_mode);
}
+ inline static void set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_compressed_value_at(pc, target, icache_flush_mode);
+ }
+
+ inline Handle<Code> code_target_object_handle_at(Address pc,
+ Address constant_pool);
static void set_target_value_at(
Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ static void set_target_compressed_value_at(
+ Address pc, uint32_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static void JumpLabelToJumpRegister(Address pc);
@@ -248,12 +258,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+ inline Handle<HeapObject> compressed_embedded_object_handle_at(
+ Address pc, Address constant_pool);
+ inline Handle<HeapObject> embedded_object_handle_at(Address pc,
+ Address constant_pool);
+
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
- // LOONG platform, as Code, Embedded Object or External-reference pointers
- // are split across two consecutive instructions and don't exist separately
- // in the code, so the serializer should not step forwards in memory after
- // a target is resolved and written.
+ // LOONG platform, as InstructionStream, Embedded Object or External-reference
+ // pointers are split across two consecutive instructions and don't exist
+ // separately in the code, so the serializer should not step forwards in
+ // memory after a target is resolved and written.
static constexpr int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit/64bit constant.
@@ -281,7 +296,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// ---------------------------------------------------------------------------
- // Code generation.
+ // InstructionStream generation.
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -902,7 +917,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// intervals of kBufferCheckInterval emitted bytes.
static constexpr int kBufferCheckInterval = 1 * KB / 2;
- // Code generation.
+ // InstructionStream generation.
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -937,7 +952,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- // Code emission.
+ // InstructionStream emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
diff --git a/deps/v8/src/codegen/loong64/constants-loong64.h b/deps/v8/src/codegen/loong64/constants-loong64.h
index 8003073da4..42c6b48a30 100644
--- a/deps/v8/src/codegen/loong64/constants-loong64.h
+++ b/deps/v8/src/codegen/loong64/constants-loong64.h
@@ -71,7 +71,6 @@ const uint32_t kFCSRExceptionCauseMask = kFCSRCauseMask ^ kFCSRInexactCauseMask;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 256;
// Helper functions for converting between register numbers and names.
@@ -580,6 +579,22 @@ enum Condition {
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
+
+ // Unified cross-platform condition names/aliases.
+ kEqual = equal,
+ kNotEqual = not_equal,
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+ kZero = equal,
+ kNotZero = not_equal,
};
// Returns the equivalent of !cc.
diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
index a260c78190..b039a5b3de 100644
--- a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
+++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
@@ -100,6 +100,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return a4; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return a5; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
// static
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index 8dfe2d5fc5..ceb2fef4d7 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -48,7 +48,7 @@ static inline bool IsZero(const Operand& rk) {
}
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -56,7 +56,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList exclusions = {exclusion1, exclusion2, exclusion3};
RegList list = kJSCallerSaved - exclusions;
- bytes += list.Count() * kPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kCallerSavedFPU.Count() * kDoubleSize;
@@ -65,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -73,7 +73,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList exclusions = {exclusion1, exclusion2, exclusion3};
RegList list = kJSCallerSaved - exclusions;
MultiPush(list);
- bytes += list.Count() * kPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
@@ -83,7 +83,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -95,33 +95,33 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList exclusions = {exclusion1, exclusion2, exclusion3};
RegList list = kJSCallerSaved - exclusions;
MultiPop(list);
- bytes += list.Count() * kPointerSize;
+ bytes += list.Count() * kSystemPointerSize;
return bytes;
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
- Add_d(fp, sp, Operand(kPointerSize));
+ Add_d(fp, sp, Operand(kSystemPointerSize));
} else {
Push(ra, fp);
mov(fp, sp);
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
- offset += 2 * kPointerSize;
+ offset += 2 * kSystemPointerSize;
} else {
Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
- offset += kPointerSize;
+ offset += kSystemPointerSize;
}
Add_d(fp, sp, Operand(offset));
}
@@ -145,7 +145,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Although the object register is tagged, the offset is relative to the start
// of the object, so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kTaggedSize));
if (v8_flags.debug_code) {
Label ok;
@@ -153,7 +153,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Add_d(scratch, object, offset - kHeapObjectTag);
- And(scratch, scratch, Operand(kPointerSize - 1));
+ And(scratch, scratch, Operand(kTaggedSize - 1));
Branch(&ok, eq, scratch, Operand(zero_reg));
Abort(AbortReason::kUnalignedCellInWriteBarrier);
bind(&ok);
@@ -165,17 +165,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
bind(&done);
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
@@ -193,7 +193,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Operand offset,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -212,7 +212,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -232,7 +232,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
}
}
-void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
Register object, Operand offset) {
ASM_CODE_COMMENT(this);
DCHECK_NE(dst_object, dst_slot);
@@ -278,9 +278,9 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (v8_flags.debug_code) {
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
Add_d(scratch, object, offset);
- Ld_d(scratch, MemOperand(scratch, 0));
+ LoadTaggedField(scratch, MemOperand(scratch, 0));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
}
@@ -298,9 +298,8 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, &done);
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
&done);
@@ -325,7 +324,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// ---------------------------------------------------------------------------
// Instruction macros.
-void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
add_w(rd, rj, rk.rm());
} else {
@@ -342,7 +341,7 @@ void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
add_d(rd, rj, rk.rm());
} else {
@@ -359,7 +358,7 @@ void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
sub_w(rd, rj, rk.rm());
} else {
@@ -384,7 +383,7 @@ void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
sub_d(rd, rj, rk.rm());
} else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
@@ -411,7 +410,7 @@ void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mul_w(rd, rj, rk.rm());
} else {
@@ -424,7 +423,7 @@ void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mulh_w(rd, rj, rk.rm());
} else {
@@ -437,7 +436,7 @@ void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mulh_wu(rd, rj, rk.rm());
} else {
@@ -450,7 +449,7 @@ void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mul_d(rd, rj, rk.rm());
} else {
@@ -463,7 +462,7 @@ void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mulh_d(rd, rj, rk.rm());
} else {
@@ -476,7 +475,7 @@ void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mulh_du(rd, rj, rk.rm());
} else {
@@ -489,7 +488,7 @@ void TurboAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
div_w(rd, rj, rk.rm());
} else {
@@ -502,7 +501,7 @@ void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mod_w(rd, rj, rk.rm());
} else {
@@ -515,7 +514,7 @@ void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mod_wu(rd, rj, rk.rm());
} else {
@@ -528,7 +527,7 @@ void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
div_d(rd, rj, rk.rm());
} else {
@@ -541,7 +540,7 @@ void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
div_wu(rd, rj, rk.rm());
} else {
@@ -554,7 +553,7 @@ void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
div_du(rd, rj, rk.rm());
} else {
@@ -567,7 +566,7 @@ void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mod_d(rd, rj, rk.rm());
} else {
@@ -580,7 +579,7 @@ void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
mod_du(rd, rj, rk.rm());
} else {
@@ -593,7 +592,7 @@ void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::And(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::And(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
and_(rd, rj, rk.rm());
} else {
@@ -610,7 +609,7 @@ void TurboAssembler::And(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Or(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
or_(rd, rj, rk.rm());
} else {
@@ -627,7 +626,7 @@ void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Xor(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
xor_(rd, rj, rk.rm());
} else {
@@ -644,7 +643,7 @@ void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Nor(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
nor(rd, rj, rk.rm());
} else {
@@ -657,7 +656,7 @@ void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Andn(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
andn(rd, rj, rk.rm());
} else {
@@ -670,7 +669,7 @@ void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Orn(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
orn(rd, rj, rk.rm());
} else {
@@ -683,12 +682,12 @@ void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Neg(Register rj, const Operand& rk) {
+void MacroAssembler::Neg(Register rj, const Operand& rk) {
DCHECK(rk.is_reg());
sub_d(rj, zero_reg, rk.rm());
}
-void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Slt(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
slt(rd, rj, rk.rm());
} else {
@@ -706,7 +705,7 @@ void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
sltu(rd, rj, rk.rm());
} else {
@@ -724,7 +723,7 @@ void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sle(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
slt(rd, rk.rm(), rj);
} else {
@@ -739,7 +738,7 @@ void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
sltu(rd, rk.rm(), rj);
} else {
@@ -754,17 +753,17 @@ void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sge(Register rd, Register rj, const Operand& rk) {
Slt(rd, rj, rk);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sgeu(Register rd, Register rj, const Operand& rk) {
Sltu(rd, rj, rk);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
slt(rd, rk.rm(), rj);
} else {
@@ -778,7 +777,7 @@ void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
sltu(rd, rk.rm(), rj);
} else {
@@ -792,7 +791,7 @@ void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
rotr_w(rd, rj, rk.rm());
} else {
@@ -804,7 +803,7 @@ void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
+void MacroAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
if (rk.is_reg()) {
rotr_d(rd, rj, rk.rm());
} else {
@@ -814,7 +813,7 @@ void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
}
}
-void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+void MacroAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (sa <= 4) {
@@ -827,7 +826,7 @@ void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
}
}
-void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+void MacroAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 63);
if (sa <= 4) {
@@ -843,7 +842,7 @@ void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
// ------------Pseudo-instructions-------------
// Change endianness
-void TurboAssembler::ByteSwapSigned(Register dest, Register src,
+void MacroAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
if (operand_size == 2) {
@@ -857,7 +856,7 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src,
}
}
-void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
+void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 2 || operand_size == 4);
if (operand_size == 2) {
@@ -869,7 +868,7 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
}
}
-void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_b(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
if (source.hasIndexReg()) {
@@ -879,7 +878,7 @@ void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_bu(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
if (source.hasIndexReg()) {
@@ -889,7 +888,7 @@ void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::St_b(Register rd, const MemOperand& rj) {
+void MacroAssembler::St_b(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
if (source.hasIndexReg()) {
@@ -899,7 +898,7 @@ void TurboAssembler::St_b(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_h(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
if (source.hasIndexReg()) {
@@ -909,7 +908,7 @@ void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_hu(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
if (source.hasIndexReg()) {
@@ -919,7 +918,7 @@ void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::St_h(Register rd, const MemOperand& rj) {
+void MacroAssembler::St_h(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
if (source.hasIndexReg()) {
@@ -929,7 +928,7 @@ void TurboAssembler::St_h(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_w(Register rd, const MemOperand& rj) {
MemOperand source = rj;
if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
@@ -946,9 +945,10 @@ void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_wu(Register rd, const MemOperand& rj) {
MemOperand source = rj;
AdjustBaseAndOffset(&source);
+
if (source.hasIndexReg()) {
ldx_wu(rd, source.base(), source.index());
} else {
@@ -956,7 +956,7 @@ void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::St_w(Register rd, const MemOperand& rj) {
+void MacroAssembler::St_w(Register rd, const MemOperand& rj) {
MemOperand source = rj;
if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
@@ -973,7 +973,7 @@ void TurboAssembler::St_w(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ld_d(Register rd, const MemOperand& rj) {
MemOperand source = rj;
if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
@@ -990,7 +990,7 @@ void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::St_d(Register rd, const MemOperand& rj) {
+void MacroAssembler::St_d(Register rd, const MemOperand& rj) {
MemOperand source = rj;
if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
@@ -1007,7 +1007,7 @@ void TurboAssembler::St_d(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
if (tmp.hasIndexReg()) {
@@ -1017,7 +1017,7 @@ void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
}
}
-void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
if (tmp.hasIndexReg()) {
@@ -1027,7 +1027,7 @@ void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
}
}
-void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
if (tmp.hasIndexReg()) {
@@ -1037,7 +1037,7 @@ void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
}
}
-void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
if (tmp.hasIndexReg()) {
@@ -1047,7 +1047,7 @@ void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
}
}
-void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ll_w(Register rd, const MemOperand& rj) {
DCHECK(!rj.hasIndexReg());
bool is_one_instruction = is_int14(rj.offset());
if (is_one_instruction) {
@@ -1061,7 +1061,7 @@ void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) {
+void MacroAssembler::Ll_d(Register rd, const MemOperand& rj) {
DCHECK(!rj.hasIndexReg());
bool is_one_instruction = is_int14(rj.offset());
if (is_one_instruction) {
@@ -1075,7 +1075,7 @@ void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) {
+void MacroAssembler::Sc_w(Register rd, const MemOperand& rj) {
DCHECK(!rj.hasIndexReg());
bool is_one_instruction = is_int14(rj.offset());
if (is_one_instruction) {
@@ -1089,7 +1089,7 @@ void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) {
+void MacroAssembler::Sc_d(Register rd, const MemOperand& rj) {
DCHECK(!rj.hasIndexReg());
bool is_one_instruction = is_int14(rj.offset());
if (is_one_instruction) {
@@ -1103,7 +1103,8 @@ void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) {
}
}
-void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+void MacroAssembler::li(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode, LiFlags mode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -1114,7 +1115,7 @@ void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
-void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -1134,7 +1135,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) {
}
}
-void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
+void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
if (is_int12(static_cast<int32_t>(j.immediate()))) {
addi_d(rd, zero_reg, j.immediate());
} else if (is_uint12(static_cast<int32_t>(j.immediate()))) {
@@ -1147,7 +1148,7 @@ void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
}
}
-int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
if (is_int32(value)) {
return InstrCountForLiLower32Bit(value);
} else if (is_int52(value)) {
@@ -1177,7 +1178,7 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
// All changes to if...else conditions here must be added to
// InstrCountForLi64Bit as well.
-void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
@@ -1212,16 +1213,25 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
}
}
-void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
li_optimized(rd, j, mode);
+ } else if (RelocInfo::IsCompressedEmbeddedObject(j.rmode())) {
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(j.immediate()));
+ uint32_t immediate = AddEmbeddedObject(handle);
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
} else if (MustUseReg(j.rmode())) {
int64_t immediate;
if (j.IsHeapNumberRequest()) {
RequestHeapNumber(j.heap_number_request());
immediate = 0;
+ } else if (RelocInfo::IsFullEmbeddedObject(j.rmode())) {
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(j.immediate()));
+ immediate = AddEmbeddedObject(handle);
} else {
immediate = j.immediate();
}
@@ -1245,38 +1255,38 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
}
}
-void TurboAssembler::MultiPush(RegList regs) {
+void MacroAssembler::MultiPush(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs.bits() & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPush(RegList regs1, RegList regs2) {
+void MacroAssembler::MultiPush(RegList regs1, RegList regs2) {
DCHECK((regs1 & regs2).is_empty());
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs1.bits() & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs2.bits() & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
+void MacroAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
DCHECK((regs1 & regs2).is_empty());
DCHECK((regs1 & regs3).is_empty());
DCHECK((regs2 & regs3).is_empty());
@@ -1284,57 +1294,57 @@ void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs1.bits() & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs2.bits() & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs3.bits() & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
St_d(ToRegister(i), MemOperand(sp, stack_offset));
}
}
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPop(RegList regs) {
+void MacroAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPop(RegList regs1, RegList regs2) {
+void MacroAssembler::MultiPop(RegList regs1, RegList regs2) {
DCHECK((regs1 & regs2).is_empty());
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs2.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs1.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
+void MacroAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
DCHECK((regs1 & regs2).is_empty());
DCHECK((regs1 & regs3).is_empty());
DCHECK((regs2 & regs3).is_empty());
@@ -1343,25 +1353,25 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs3.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs2.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs1.bits() & (1 << i)) != 0) {
Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPushFPU(DoubleRegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -1374,7 +1384,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
}
}
-void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1386,7 +1396,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
addi_d(sp, sp, stack_offset);
}
-void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
+void MacroAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
uint16_t lsbw) {
DCHECK_LT(lsbw, msbw);
DCHECK_LT(lsbw, 32);
@@ -1394,7 +1404,7 @@ void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
bstrpick_w(rk, rj, msbw, lsbw);
}
-void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
+void MacroAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
uint16_t lsbw) {
DCHECK_LT(lsbw, msbw);
DCHECK_LT(lsbw, 64);
@@ -1402,17 +1412,17 @@ void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
bstrpick_d(rk, rj, msbw, lsbw);
}
-void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); }
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); }
-void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); }
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); }
-void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) {
+void MacroAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
movfr2gr_s(t8, fj);
Ffint_d_uw(fd, t8);
}
-void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
+void MacroAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rj != t7);
@@ -1421,13 +1431,13 @@ void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
ffint_d_l(fd, fd);
}
-void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) {
+void MacroAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
movfr2gr_d(t8, fj);
Ffint_d_ul(fd, t8);
}
-void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
+void MacroAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rj != t7);
@@ -1452,13 +1462,13 @@ void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
bind(&conversion_done);
}
-void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) {
+void MacroAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
movfr2gr_d(t8, fj);
Ffint_s_uw(fd, t8);
}
-void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
+void MacroAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rj != t7);
@@ -1467,13 +1477,13 @@ void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
ffint_s_l(fd, fd);
}
-void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) {
+void MacroAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
movfr2gr_d(t8, fj);
Ffint_s_ul(fd, t8);
}
-void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) {
+void MacroAssembler::Ffint_s_ul(FPURegister fd, Register rj) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(rj != t7);
@@ -1530,28 +1540,28 @@ void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj,
Ftintrz_l_d(fd, scratch);
}
-void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj,
+void MacroAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj,
FPURegister scratch) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ftintrz_uw_d(t8, fj, scratch);
movgr2fr_w(fd, t8);
}
-void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj,
+void MacroAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj,
FPURegister scratch) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ftintrz_uw_s(t8, fj, scratch);
movgr2fr_w(fd, t8);
}
-void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj,
+void MacroAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj,
FPURegister scratch, Register result) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ftintrz_ul_d(t8, fj, scratch, result);
movgr2fr_d(fd, t8);
}
-void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj,
+void MacroAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj,
FPURegister scratch, Register result) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ftintrz_ul_s(t8, fj, scratch, result);
@@ -1574,77 +1584,70 @@ void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) {
ftintrp_w_d(fd, fj);
}
-void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj,
+void MacroAssembler::Ftintrz_uw_d(Register rd, FPURegister fj,
FPURegister scratch) {
DCHECK(fj != scratch);
DCHECK(rd != t7);
{
- // Load 2^31 into scratch as its float representation.
+ // Load 2^32 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x41E00000);
- movgr2fr_w(scratch, zero_reg);
- movgr2frh_w(scratch, scratch1);
+ li(scratch1, 0x41F0000000000000);
+ movgr2fr_d(scratch, scratch1);
}
// Test if scratch > fd.
- // If fd < 2^31 we can convert it normally.
+ // If fd < 2^32 we can convert it normally.
Label simple_convert;
- CompareF64(fj, scratch, CLT);
+ CompareF64(fj, scratch, CULT);
BranchTrueShortF(&simple_convert);
- // First we subtract 2^31 from fd, then trunc it to rs
- // and add 2^31 to rj.
- fsub_d(scratch, fj, scratch);
- ftintrz_w_d(scratch, scratch);
- movfr2gr_s(rd, scratch);
- Or(rd, rd, 1 << 31);
+ // If fd > 2^32, the result should be UINT_32_MAX;
+ Add_w(rd, zero_reg, -1);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
- ftintrz_w_d(scratch, fj);
+ // Double -> Int64 -> Uint32;
+ ftintrz_l_d(scratch, fj);
movfr2gr_s(rd, scratch);
bind(&done);
}
-void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj,
+void MacroAssembler::Ftintrz_uw_s(Register rd, FPURegister fj,
FPURegister scratch) {
DCHECK(fj != scratch);
DCHECK(rd != t7);
{
- // Load 2^31 into scratch as its float representation.
+ // Load 2^32 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x4F000000);
+ li(scratch1, 0x4F800000);
movgr2fr_w(scratch, scratch1);
}
// Test if scratch > fs.
- // If fs < 2^31 we can convert it normally.
+ // If fs < 2^32 we can convert it normally.
Label simple_convert;
- CompareF32(fj, scratch, CLT);
+ CompareF32(fj, scratch, CULT);
BranchTrueShortF(&simple_convert);
- // First we subtract 2^31 from fs, then trunc it to rd
- // and add 2^31 to rd.
- fsub_s(scratch, fj, scratch);
- ftintrz_w_s(scratch, scratch);
- movfr2gr_s(rd, scratch);
- Or(rd, rd, 1 << 31);
+ // If fd > 2^32, the result should be UINT_32_MAX;
+ Add_w(rd, zero_reg, -1);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
- ftintrz_w_s(scratch, fj);
+ // Float -> Int64 -> Uint32;
+ ftintrz_l_s(scratch, fj);
movfr2gr_s(rd, scratch);
bind(&done);
}
-void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
+void MacroAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
FPURegister scratch, Register result) {
DCHECK(fj != scratch);
DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
@@ -1654,9 +1657,7 @@ void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
mov(result, zero_reg);
Move(scratch, -1.0);
// If fd =< -1 or unordered, then the conversion fails.
- CompareF64(fj, scratch, CLE);
- BranchTrueShortF(&fail);
- CompareIsNanF64(fj, scratch);
+ CompareF64(fj, scratch, CULE);
BranchTrueShortF(&fail);
}
@@ -1665,8 +1666,8 @@ void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
movgr2fr_d(scratch, t7);
// Test if scratch > fs.
- // If fs < 2^63 we can convert it normally.
- CompareF64(fj, scratch, CLT);
+ // If fs < 2^63 or unordered we can convert it normally.
+ CompareF64(fj, scratch, CULT);
BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fs, then trunc it to rd
@@ -1699,7 +1700,7 @@ void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
bind(&fail);
}
-void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
+void MacroAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
FPURegister scratch, Register result) {
DCHECK(fj != scratch);
DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
@@ -1709,9 +1710,7 @@ void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
mov(result, zero_reg);
Move(scratch, -1.0f);
// If fd =< -1 or unordered, then the conversion fails.
- CompareF32(fj, scratch, CLE);
- BranchTrueShortF(&fail);
- CompareIsNanF32(fj, scratch);
+ CompareF32(fj, scratch, CULE);
BranchTrueShortF(&fail);
}
@@ -1724,8 +1723,8 @@ void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
}
// Test if scratch > fs.
- // If fs < 2^63 we can convert it normally.
- CompareF32(fj, scratch, CLT);
+ // If fs < 2^63 or unordered, we can convert it normally.
+ CompareF32(fj, scratch, CULT);
BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fs, then trunc it to rd
@@ -1758,7 +1757,7 @@ void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
bind(&fail);
}
-void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src,
FPURoundingMode mode) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
@@ -1769,23 +1768,23 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
movgr2fcsr(scratch);
}
-void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Floor_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_floor);
}
-void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Ceil_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_ceil);
}
-void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Trunc_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_trunc);
}
-void TurboAssembler::Round_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Round_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_round);
}
-void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
FPURoundingMode mode) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
@@ -1796,23 +1795,23 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
movgr2fcsr(scratch);
}
-void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Floor_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_floor);
}
-void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Ceil_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_ceil);
}
-void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Trunc_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_trunc);
}
-void TurboAssembler::Round_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Round_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_round);
}
-void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
+void MacroAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
FPUCondition cc, CFRegister cd, bool f32) {
if (f32) {
fcmp_cond_s(cc, cmp1, cmp2, cd);
@@ -1821,20 +1820,20 @@ void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
}
}
-void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2,
+void MacroAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2,
CFRegister cd, bool f32) {
CompareF(cmp1, cmp2, CUN, cd, f32);
}
-void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) {
+void MacroAssembler::BranchTrueShortF(Label* target, CFRegister cj) {
bcnez(cj, target);
}
-void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) {
+void MacroAssembler::BranchFalseShortF(Label* target, CFRegister cj) {
bceqz(cj, target);
}
-void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) {
+void MacroAssembler::BranchTrueF(Label* target, CFRegister cj) {
// TODO(yuyin): can be optimzed
bool long_branch = target->is_bound()
? !is_near(target, OffsetSize::kOffset21)
@@ -1849,7 +1848,7 @@ void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) {
}
}
-void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) {
+void MacroAssembler::BranchFalseF(Label* target, CFRegister cj) {
bool long_branch = target->is_bound()
? !is_near(target, OffsetSize::kOffset21)
: is_trampoline_emitted();
@@ -1863,7 +1862,7 @@ void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) {
}
}
-void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
+void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(src_low != scratch);
@@ -1872,14 +1871,14 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
movgr2frh_w(dst, scratch);
}
-void TurboAssembler::Move(FPURegister dst, uint32_t src) {
+void MacroAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(static_cast<int32_t>(src)));
movgr2fr_w(dst, scratch);
}
-void TurboAssembler::Move(FPURegister dst, uint64_t src) {
+void MacroAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
if (src == base::bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
fmov_d(dst, kDoubleRegZero);
@@ -1895,7 +1894,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) {
}
}
-void TurboAssembler::Movz(Register rd, Register rj, Register rk) {
+void MacroAssembler::Movz(Register rd, Register rj, Register rk) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
masknez(scratch, rj, rk);
@@ -1903,7 +1902,7 @@ void TurboAssembler::Movz(Register rd, Register rj, Register rk) {
or_(rd, rd, scratch);
}
-void TurboAssembler::Movn(Register rd, Register rj, Register rk) {
+void MacroAssembler::Movn(Register rd, Register rj, Register rk) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
maskeqz(scratch, rj, rk);
@@ -1911,7 +1910,7 @@ void TurboAssembler::Movn(Register rd, Register rj, Register rk) {
or_(rd, rd, scratch);
}
-void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj,
+void MacroAssembler::LoadZeroOnCondition(Register rd, Register rj,
const Operand& rk, Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
@@ -1995,40 +1994,40 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj,
} // namespace internal
} // namespace internal
-void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionNotZero(Register dest,
Register condition) {
masknez(dest, dest, condition);
}
-void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionZero(Register dest,
Register condition) {
maskeqz(dest, dest, condition);
}
-void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) {
+void MacroAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
movcf2gr(scratch, cc);
LoadZeroIfConditionNotZero(dest, scratch);
}
-void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) {
+void MacroAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
movcf2gr(scratch, cc);
LoadZeroIfConditionZero(dest, scratch);
}
-void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); }
+void MacroAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); }
-void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); }
+void MacroAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); }
-void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); }
+void MacroAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); }
-void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
+void MacroAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
// TODO(LOONG_dev): Optimize like arm64, use simd instruction
-void TurboAssembler::Popcnt_w(Register rd, Register rj) {
+void MacroAssembler::Popcnt_w(Register rd, Register rj) {
ASM_CODE_COMMENT(this);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
@@ -2072,7 +2071,7 @@ void TurboAssembler::Popcnt_w(Register rd, Register rj) {
srli_w(rd, rd, shift);
}
-void TurboAssembler::Popcnt_d(Register rd, Register rj) {
+void MacroAssembler::Popcnt_d(Register rd, Register rj) {
ASM_CODE_COMMENT(this);
int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
@@ -2102,7 +2101,7 @@ void TurboAssembler::Popcnt_d(Register rd, Register rj) {
srli_d(rd, rd, shift);
}
-void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
+void MacroAssembler::ExtractBits(Register dest, Register source, Register pos,
int size, bool sign_extend) {
sra_d(dest, source, pos);
bstrpick_d(dest, dest, size - 1, 0);
@@ -2124,7 +2123,7 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
}
}
-void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+void MacroAssembler::InsertBits(Register dest, Register source, Register pos,
int size) {
Rotr_d(dest, dest, pos);
bstrins_d(dest, source, size - 1, 0);
@@ -2136,10 +2135,10 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
}
}
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
- DoubleRegister single_scratch = kScratchDoubleReg.low();
+ DoubleRegister single_scratch = kScratchDoubleReg;
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2152,14 +2151,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
rotri_d(scratch2, scratch, 1);
movfr2gr_s(result, single_scratch);
Branch(done, ne, scratch, Operand(scratch2));
-
- // Truncate NaN to zero.
- CompareIsNanF64(double_input, double_input);
- Move(result, zero_reg);
- bcnez(FCC0, done);
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -2193,7 +2187,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \
(cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg)))
-void TurboAssembler::Branch(Label* L, bool need_link) {
+void MacroAssembler::Branch(Label* L, bool need_link) {
int offset = GetOffset(L, OffsetSize::kOffset26);
if (need_link) {
bl(offset);
@@ -2202,7 +2196,7 @@ void TurboAssembler::Branch(Label* L, bool need_link) {
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rj,
const Operand& rk, bool need_link) {
if (L->is_bound()) {
BRANCH_ARGS_CHECK(cond, rj, rk);
@@ -2234,19 +2228,19 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rj,
RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
- Branch(L, cond, rj, Operand(scratch));
+ CompareTaggedAndBranch(L, cond, rj, Operand(scratch));
}
-int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) {
+int32_t MacroAssembler::GetOffset(Label* L, OffsetSize bits) {
return branch_offset_helper(L, bits) >> 2;
}
-Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk,
+Register MacroAssembler::GetRkAsRegisterHelper(const Operand& rk,
Register scratch) {
Register r2 = no_reg;
if (rk.is_reg()) {
@@ -2259,7 +2253,7 @@ Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk,
return r2;
}
-bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond,
+bool MacroAssembler::BranchShortOrFallback(Label* L, Condition cond,
Register rj, const Operand& rk,
bool need_link) {
UseScratchRegisterScope temps(this);
@@ -2490,7 +2484,7 @@ bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond,
return true;
}
-void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj,
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rj,
const Operand& rk, bool need_link) {
BRANCH_ARGS_CHECK(cond, rj, rk);
bool result = BranchShortOrFallback(L, cond, rj, rk, need_link);
@@ -2498,21 +2492,44 @@ void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj,
USE(result);
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::CompareTaggedAndBranch(Label* label, Condition cond,
+ Register r1, const Operand& r2,
+ bool need_link) {
+ if (COMPRESS_POINTERS_BOOL) {
+ UseScratchRegisterScope temps(this);
+ Register scratch0 = temps.Acquire();
+ slli_w(scratch0, r1, 0);
+ if (IsZero(r2)) {
+ Branch(label, cond, scratch0, Operand(zero_reg), need_link);
+ } else {
+ Register scratch1 = temps.hasAvailable() ? temps.Acquire() : t8;
+ if (r2.is_reg()) {
+ slli_w(scratch1, r2.rm(), 0);
+ } else {
+ li(scratch1, r2);
+ }
+ Branch(label, cond, scratch0, Operand(scratch1), need_link);
+ }
+ } else {
+ Branch(label, cond, r1, r2, need_link);
+ }
+}
+
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- Ld_d(destination,
- FieldMemOperand(destination, FixedArray::kHeaderSize +
- constant_index * kPointerSize));
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)));
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
Ld_d(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
@@ -2521,7 +2538,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -2550,7 +2567,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(Register target, Condition cond, Register rj,
+void MacroAssembler::Jump(Register target, Condition cond, Register rj,
const Operand& rk) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
@@ -2564,7 +2581,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rj,
}
}
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register rj, const Operand& rk) {
Label skip;
if (cond != cc_always) {
@@ -2578,13 +2595,12 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Register rj, const Operand& rk) {
- DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rj, const Operand& rk) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -2600,17 +2616,18 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
return;
}
- Jump(static_cast<intptr_t>(code.address()), rmode, cc_always, rj, rk);
+ int32_t target_index = AddCodeTarget(code);
+ Jump(static_cast<Address>(target_index), rmode, cc_always, rj, rk);
bind(&skip);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
li(t7, reference);
Jump(t7);
}
// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8].
-void TurboAssembler::Call(Register target, Condition cond, Register rj,
+void MacroAssembler::Call(Register target, Condition cond, Register rj,
const Operand& rk) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
@@ -2639,7 +2656,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
}
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rj, const Operand& rk) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label skip;
@@ -2659,7 +2676,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
bind(&skip);
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rj, const Operand& rk) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Builtin builtin = Builtin::kNoBuiltinId;
@@ -2669,11 +2686,11 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(code->IsExecutable());
- Call(code.address(), rmode, cond, rj, rk);
+ int32_t target_index = AddCodeTarget(code);
+ Call(static_cast<Address>(target_index), rmode, cond, rj, rk);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
static_assert(kSystemPointerSize == 8);
static_assert(kSmiTagSize == 1);
@@ -2687,22 +2704,22 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
Ld_d(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
UseScratchRegisterScope temps(this);
Register temp = temps.Acquire();
@@ -2719,7 +2736,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
bl(code_target_index);
@@ -2736,7 +2753,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
UseScratchRegisterScope temps(this);
@@ -2755,7 +2772,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
int32_t code_target_index = AddCodeTarget(code);
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
b(code_target_index);
@@ -2770,7 +2787,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::PatchAndJump(Address target) {
+void MacroAssembler::PatchAndJump(Address target) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2783,14 +2800,14 @@ void TurboAssembler::PatchAndJump(Address target) {
pc_ += sizeof(uint64_t);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
static constexpr int kNumInstructionsToJump = 2;
@@ -2811,17 +2828,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode, Register scratch) {
switch (type) {
case kCountIsInteger: {
- Alsl_d(sp, count, sp, kPointerSizeLog2);
+ Alsl_d(sp, count, sp, kSystemPointerSizeLog2);
break;
}
case kCountIsSmi: {
static_assert(kSmiTagSize == 1 && kSmiTag == 0);
DCHECK_NE(scratch, no_reg);
- SmiScale(scratch, count, kPointerSizeLog2);
+ SmiScale(scratch, count, kSystemPointerSizeLog2);
Add_d(sp, sp, scratch);
break;
}
@@ -2835,7 +2852,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
ArgumentsCountType type,
ArgumentsCountMode mode,
@@ -2851,11 +2868,11 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
}
}
-void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) {
+void MacroAssembler::Ret(Condition cond, Register rj, const Operand& rk) {
Jump(ra, cond, rj, rk);
}
-void TurboAssembler::Drop(int count, Condition cond, Register reg,
+void MacroAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
@@ -2867,7 +2884,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
Branch(&skip, NegateCondition(cond), reg, op);
}
- Add_d(sp, sp, Operand(count * kPointerSize));
+ Add_d(sp, sp, Operand(count * kSystemPointerSize));
if (cond != al) {
bind(&skip);
@@ -2886,23 +2903,23 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
}
}
-void TurboAssembler::Call(Label* target) { Branch(target, true); }
+void MacroAssembler::Call(Label* target) { Branch(target, true); }
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
Push(scratch);
}
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(handle));
Push(scratch);
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order) {
DCHECK(!AreAliased(array, size, scratch, scratch2));
Label loop, entry;
@@ -2910,7 +2927,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
mov(scratch, zero_reg);
jmp(&entry);
bind(&loop);
- Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Alsl_d(scratch2, scratch, array, kSystemPointerSizeLog2, t7);
Ld_d(scratch2, MemOperand(scratch2, 0));
Push(scratch2);
Add_d(scratch, scratch, Operand(1));
@@ -2920,7 +2937,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
mov(scratch, size);
jmp(&entry);
bind(&loop);
- Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Alsl_d(scratch2, scratch, array, kSystemPointerSizeLog2, t7);
Ld_d(scratch2, MemOperand(scratch2, 0));
Push(scratch2);
bind(&entry);
@@ -2934,8 +2951,8 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- static_assert(StackHandlerConstants::kSize == 2 * kPointerSize);
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ static_assert(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
Push(Smi::zero()); // Padding.
@@ -2953,8 +2970,8 @@ void MacroAssembler::PopStackHandler() {
static_assert(StackHandlerConstants::kNextOffset == 0);
Pop(a1);
Add_d(sp, sp,
- Operand(
- static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
+ Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
+ kSystemPointerSize)));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch,
@@ -2962,7 +2979,7 @@ void MacroAssembler::PopStackHandler() {
St_d(a1, MemOperand(scratch, 0));
}
-void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
fsub_d(dst, src, kDoubleRegZero);
}
@@ -2978,10 +2995,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
Ld_d(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
}
@@ -2999,18 +3016,16 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
// here which will cause scratch1 to become negative.
sub_d(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- slli_d(scratch2, num_args, kPointerSizeLog2);
+ slli_d(scratch2, num_args, kSystemPointerSizeLog2);
// Signed comparison.
Branch(stack_overflow, le, scratch1, Operand(scratch2));
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
- Register scratch,
- Condition cond,
- Label* target) {
- Ld_d(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- Ld_wu(scratch,
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::TestCodeIsMarkedForDeoptimizationAndJump(
+ Register code_data_container, Register scratch, Condition cond,
+ Label* target) {
+ Ld_hu(scratch,
+ FieldMemOperand(code_data_container, Code::kKindSpecificFlagsOffset));
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Branch(target, cond, scratch, Operand(zero_reg));
}
@@ -3147,7 +3162,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -3181,8 +3196,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
DCHECK_EQ(function, a1);
Register expected_parameter_count = a2;
Register temp_reg = t0;
- Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ LoadTaggedField(temp_reg,
+ FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as uint16_t
Ld_hu(expected_parameter_count,
FieldMemOperand(temp_reg,
@@ -3204,7 +3220,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, a1);
// Get the function and setup the context.
- Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -3229,7 +3245,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
// -----------------------------------------------------------------------------
// Runtime calls.
-void TurboAssembler::AddOverflow_d(Register dst, Register left,
+void MacroAssembler::AddOverflow_d(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3262,7 +3278,7 @@ void TurboAssembler::AddOverflow_d(Register dst, Register left,
}
}
-void TurboAssembler::SubOverflow_d(Register dst, Register left,
+void MacroAssembler::SubOverflow_d(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3295,7 +3311,7 @@ void TurboAssembler::SubOverflow_d(Register dst, Register left,
}
}
-void TurboAssembler::MulOverflow_w(Register dst, Register left,
+void MacroAssembler::MulOverflow_w(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3327,7 +3343,7 @@ void TurboAssembler::MulOverflow_w(Register dst, Register left,
xor_(overflow, overflow, scratch2);
}
-void TurboAssembler::MulOverflow_d(Register dst, Register left,
+void MacroAssembler::MulOverflow_d(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3359,8 +3375,8 @@ void TurboAssembler::MulOverflow_d(Register dst, Register left,
xor_(overflow, overflow, scratch2);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// All parameters are on the stack. v0 has the return value after call.
@@ -3375,8 +3391,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -3393,19 +3408,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
+ CompareTaggedAndBranch(target_if_cleared, eq, in,
+ Operand(kClearedWeakHeapObjectLower32));
And(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -3444,10 +3455,10 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj,
+void MacroAssembler::Check(Condition cc, AbortReason reason, Register rj,
Operand rk) {
Label L;
Branch(&L, cc, rj, rk);
@@ -3456,7 +3467,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj,
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -3514,27 +3525,28 @@ void TurboAssembler::Abort(AbortReason reason) {
}
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
- Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+void MacroAssembler::LoadMap(Register destination, Register object) {
+ LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
- Ld_d(dst, FieldMemOperand(
- dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- Ld_d(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(
+ dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
}
-void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+void MacroAssembler::Prologue() { PushStandardFrame(a1); }
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Push(ra, fp);
@@ -3549,23 +3561,24 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
- addi_d(sp, fp, 2 * kPointerSize);
- Ld_d(ra, MemOperand(fp, 1 * kPointerSize));
- Ld_d(fp, MemOperand(fp, 0 * kPointerSize));
+ addi_d(sp, fp, 2 * kSystemPointerSize);
+ Ld_d(ra, MemOperand(fp, 1 * kSystemPointerSize));
+ Ld_d(fp, MemOperand(fp, 0 * kSystemPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
- static_assert(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
- static_assert(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
- static_assert(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+ static_assert(2 * kSystemPointerSize ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ static_assert(1 * kSystemPointerSize == ExitFrameConstants::kCallerPCOffset);
+ static_assert(0 * kSystemPointerSize == ExitFrameConstants::kCallerFPOffset);
// This is how the stack will look:
// fp + 2 (==kCallerSPDisplacement) - old stack's end
@@ -3577,14 +3590,15 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// new stack (will contain saved ra)
// Save registers and reserve room for saved entry sp.
- addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
- St_d(ra, MemOperand(sp, 3 * kPointerSize));
- St_d(fp, MemOperand(sp, 2 * kPointerSize));
+ addi_d(sp, sp,
+ -2 * kSystemPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ St_d(ra, MemOperand(sp, 3 * kSystemPointerSize));
+ St_d(fp, MemOperand(sp, 2 * kSystemPointerSize));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
- St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+ St_d(scratch, MemOperand(sp, 1 * kSystemPointerSize));
}
// Set up new frame pointer.
addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -3605,23 +3619,12 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- if (save_doubles) {
- // The stack is already aligned to 0 modulo 8 for stores with sdc1.
- int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
- int space = kNumOfSavedRegisters * kDoubleSize;
- Sub_d(sp, sp, Operand(space));
- // Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < kNumOfSavedRegisters; i++) {
- FPURegister reg = FPURegister::from_code(2 * i);
- Fst_d(reg, MemOperand(sp, i * kDoubleSize));
- }
- }
// Reserve place for the return address, stack space and an optional slot
// (used by DirectCEntry to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
DCHECK_GE(stack_space, 0);
- Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ Sub_d(sp, sp, Operand((stack_space + 2) * kSystemPointerSize));
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
@@ -3631,27 +3634,14 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// location.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- addi_d(scratch, sp, kPointerSize);
+ addi_d(scratch, sp, kSystemPointerSize);
St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool do_return,
+void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return,
bool argument_count_is_length) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- // Optionally restore all double registers.
- if (save_doubles) {
- // Remember: we only need to restore every 2nd double FPU value.
- int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
- Sub_d(t8, fp,
- Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumOfSavedRegisters * kDoubleSize));
- for (int i = 0; i < kNumOfSavedRegisters; i++) {
- FPURegister reg = FPURegister::from_code(2 * i);
- Fld_d(reg, MemOperand(t8, i * kDoubleSize));
- }
- }
// Clear top frame.
li(t8,
@@ -3679,17 +3669,17 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
add_d(sp, sp, argument_count);
} else {
- Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8);
+ Alsl_d(sp, argument_count, sp, kSystemPointerSizeLog2, t8);
}
}
- addi_d(sp, sp, 2 * kPointerSize);
+ addi_d(sp, sp, 2 * kSystemPointerSize);
if (do_return) {
Ret();
}
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_LOONG64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -3705,17 +3695,21 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_LOONG64
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) {
Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset())));
} else {
DCHECK(SmiValuesAre31Bits());
- Ld_w(dst, src);
+ if (COMPRESS_POINTERS_BOOL) {
+ Ld_w(dst, src);
+ } else {
+ Ld_d(dst, src);
+ }
SmiUntag(dst);
}
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3733,12 +3727,12 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rk) {
if (v8_flags.debug_code) Check(cc, reason, rs, rk);
}
-void TurboAssembler::AssertNotSmi(Register object) {
+void MacroAssembler::AssertNotSmi(Register object) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -3749,7 +3743,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
}
}
-void TurboAssembler::AssertSmi(Register object) {
+void MacroAssembler::AssertSmi(Register object) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -3766,7 +3760,7 @@ void MacroAssembler::AssertStackIsAligned() {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
{
@@ -3879,7 +3873,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
#endif // V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -3894,12 +3888,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
fmax_s(dst, src1, src2);
}
-void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
fadd_s(dst, src1, src2);
}
-void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -3914,12 +3908,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
fmin_s(dst, src1, src2);
}
-void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
fadd_s(dst, src1, src2);
}
-void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -3934,12 +3928,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
fmax_d(dst, src1, src2);
}
-void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
fadd_d(dst, src1, src2);
}
-void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -3954,7 +3948,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
fmin_d(dst, src1, src2);
}
-void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
fadd_d(dst, src1, src2);
}
@@ -3962,7 +3956,7 @@ void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
static const int kRegisterPassedArguments = 8;
static const int kFPRegisterPassedArguments = 8;
-int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
@@ -3982,7 +3976,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
ASM_CODE_COMMENT(this);
@@ -3992,51 +3986,57 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// Remaining arguments are pushed on the stack.
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
- Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0);
- St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ St_d(scratch, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
- Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ Sub_d(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
}
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t7, function);
- CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
@@ -4047,7 +4047,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
if (v8_flags.debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
{
@@ -4068,53 +4068,64 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// stays correct.
{
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (function != t7) {
- mov(t7, function);
- function = t7;
- }
-
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- // 't' registers are caller-saved so this is safe as a scratch register.
- Register pc_scratch = t1;
- Register scratch = t2;
- DCHECK(!AreAliased(pc_scratch, scratch, function));
-
- pcaddi(pc_scratch, 1);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ if (function != t7) {
+ mov(t7, function);
+ function = t7;
+ }
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- St_d(pc_scratch, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- St_d(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- St_d(pc_scratch, MemOperand(scratch, 0));
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- St_d(fp, MemOperand(scratch, 0));
+ // Save the frame pointer and PC so that the stack layout remains
+ // iterable, even without an ExitFrame which normally exists between JS
+ // and C frames. 't' registers are caller-saved so this is safe as a
+ // scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ pcaddi(pc_scratch, 1);
+
+ // See x64 code for reasoning about how to address the isolate data
+ // fields.
+ if (root_array_available()) {
+ St_d(pc_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ St_d(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ St_d(pc_scratch, MemOperand(scratch, 0));
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(fp, MemOperand(scratch, 0));
+ }
}
Call(function);
- // We don't unset the PC; the FP is the source of truth.
- if (root_array_available()) {
- St_d(zero_reg, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- St_d(zero_reg, MemOperand(scratch, 0));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ St_d(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register scratch = t2;
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(zero_reg, MemOperand(scratch, 0));
+ }
}
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
- if (base::OS::ActivationFrameAlignment() > kPointerSize) {
- Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
+ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
- Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ Add_d(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
}
set_pc_for_safepoint();
@@ -4123,8 +4134,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void TurboAssembler::CheckPageFlag(const Register& object, int mask,
- Condition cc, Label* condition_met) {
+void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc,
+ Label* condition_met) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
temps.Include(t8);
@@ -4150,12 +4161,12 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
// TODO(LOONG_dev): range check, add Pcadd macro function?
pcaddi(dst, -pc_offset() >> 2);
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -4168,63 +4179,25 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
+void MacroAssembler::LoadCodeEntry(Register destination,
+ Register code_data_container_object) {
ASM_CODE_COMMENT(this);
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
- Register scratch = t8;
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
- // Not an off-heap trampoline object, the entry point is at
- // Code::raw_instruction_start().
- Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- Branch(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- // TODO(liuyu): don't use scratch_reg in Alsl_d;
- Alsl_d(destination, scratch, kRootRegister, kSystemPointerSizeLog2,
- zero_reg);
- Ld_d(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- }
+ Ld_d(destination, FieldMemOperand(code_data_container_object,
+ Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
+void MacroAssembler::CallCodeObject(Register code_data_container_object) {
ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
- Call(code_object);
+ LoadCodeEntry(code_data_container_object, code_data_container_object);
+ Call(code_data_container_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_data_container_object,
+ JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
- Jump(code_object);
+ LoadCodeEntry(code_data_container_object, code_data_container_object);
+ Jump(code_data_container_object);
}
namespace {
@@ -4248,8 +4221,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, a6, ne,
- &heal_optimized_code_slot);
+ __ TestCodeIsMarkedForDeoptimizationAndJump(optimized_code_entry, a6, ne,
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -4258,7 +4231,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ LoadCodeEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -4283,7 +4256,8 @@ void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
Register optimized_code, Register closure) {
ASM_CODE_COMMENT(this);
- St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ StoreTaggedField(optimized_code,
+ FieldMemOperand(closure, JSFunction::kCodeOffset));
RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
SmiCheck::kOmit);
@@ -4307,7 +4281,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
CallRuntime(function_id, 1);
- LoadCodeObjectEntry(a2, a0);
+ LoadCodeEntry(a2, a0);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
@@ -4366,13 +4340,96 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- Ld_d(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry);
}
+void MacroAssembler::LoadTaggedField(Register destination,
+ const MemOperand& field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTagged(destination, field_operand);
+ } else {
+ Ld_d(destination, field_operand);
+ }
+}
+
+void MacroAssembler::LoadTaggedSignedField(Register destination,
+ const MemOperand& field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ Ld_d(destination, field_operand);
+ }
+}
+
+void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+ SmiUntag(dst, src);
+}
+
+void MacroAssembler::StoreTaggedField(Register src, const MemOperand& dst) {
+ if (COMPRESS_POINTERS_BOOL) {
+ St_w(src, dst);
+ } else {
+ St_d(src, dst);
+ }
+}
+
+void MacroAssembler::AtomicStoreTaggedField(Register src,
+ const MemOperand& dst) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, dst.base(), dst.offset());
+ if (COMPRESS_POINTERS_BOOL) {
+ amswap_db_w(zero_reg, src, scratch);
+ } else {
+ amswap_db_d(zero_reg, src, scratch);
+ }
+}
+
+void MacroAssembler::DecompressTaggedSigned(Register dst,
+ const MemOperand& src) {
+ ASM_CODE_COMMENT(this);
+ Ld_wu(dst, src);
+ if (v8_flags.debug_code) {
+ // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
+ Add_d(dst, dst, ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
+ }
+}
+
+void MacroAssembler::DecompressTagged(Register dst, const MemOperand& src) {
+ ASM_CODE_COMMENT(this);
+ Ld_wu(dst, src);
+ Add_d(dst, kPtrComprCageBaseRegister, dst);
+}
+
+void MacroAssembler::DecompressTagged(Register dst, Register src) {
+ ASM_CODE_COMMENT(this);
+ Bstrpick_d(dst, src, 31, 0);
+ Add_d(dst, kPtrComprCageBaseRegister, Operand(dst));
+}
+
+void MacroAssembler::AtomicDecompressTaggedSigned(Register dst,
+ const MemOperand& src) {
+ ASM_CODE_COMMENT(this);
+ Ld_wu(dst, src);
+ dbar(0);
+ if (v8_flags.debug_code) {
+ // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
+ Add_d(dst, dst, ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
+ }
+}
+
+void MacroAssembler::AtomicDecompressTagged(Register dst,
+ const MemOperand& src) {
+ ASM_CODE_COMMENT(this);
+ Ld_wu(dst, src);
+ dbar(0);
+ Add_d(dst, kPtrComprCageBaseRegister, dst);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index 57395c903a..4930ef6c50 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/loong64/assembler-loong64.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
#include "src/objects/tagged-index.h"
namespace v8 {
@@ -52,16 +53,16 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
// -----------------------------------------------------------------------------
// Static helper functions.
-#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -86,6 +87,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
li(kRootRegister, Operand(isolate_root));
+#ifdef V8_COMPRESS_POINTERS
+ LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset());
+#endif
}
// Jump unconditionally to given label.
@@ -101,7 +105,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, AbortReason reason, Register rj,
- Operand rk) NOOP_UNLESS_DEBUG_CODE
+ Operand rk) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cc, AbortReason reason, Register rj, Operand rk);
@@ -116,6 +120,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool need_link = false);
void Branch(Label* L, Condition cond, Register rj, RootIndex index);
+ void CompareTaggedAndBranch(Label* label, Condition cond, Register r1,
+ const Operand& r2, bool need_link = false);
+
// Floating point branches
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
CFRegister cd = FCC0) {
@@ -153,7 +160,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(static_cast<int64_t>(j)), mode);
}
- void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO,
+ LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void LoadFromConstantsTable(Register destination, int constant_index) final;
@@ -206,10 +215,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
- void CallCodeObject(Register code_object);
-
- void JumpCodeObject(Register code_object,
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_data_container_object);
+ void CallCodeObject(Register code_data_container_object);
+ void JumpCodeObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
@@ -244,43 +253,43 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Smi smi);
void Push(Register src) {
- Add_d(sp, sp, Operand(-kPointerSize));
+ Add_d(sp, sp, Operand(-kSystemPointerSize));
St_d(src, MemOperand(sp, 0));
}
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- Sub_d(sp, sp, Operand(2 * kPointerSize));
- St_d(src1, MemOperand(sp, 1 * kPointerSize));
- St_d(src2, MemOperand(sp, 0 * kPointerSize));
+ Sub_d(sp, sp, Operand(2 * kSystemPointerSize));
+ St_d(src1, MemOperand(sp, 1 * kSystemPointerSize));
+ St_d(src2, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- Sub_d(sp, sp, Operand(3 * kPointerSize));
- St_d(src1, MemOperand(sp, 2 * kPointerSize));
- St_d(src2, MemOperand(sp, 1 * kPointerSize));
- St_d(src3, MemOperand(sp, 0 * kPointerSize));
+ Sub_d(sp, sp, Operand(3 * kSystemPointerSize));
+ St_d(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ St_d(src2, MemOperand(sp, 1 * kSystemPointerSize));
+ St_d(src3, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- Sub_d(sp, sp, Operand(4 * kPointerSize));
- St_d(src1, MemOperand(sp, 3 * kPointerSize));
- St_d(src2, MemOperand(sp, 2 * kPointerSize));
- St_d(src3, MemOperand(sp, 1 * kPointerSize));
- St_d(src4, MemOperand(sp, 0 * kPointerSize));
+ Sub_d(sp, sp, Operand(4 * kSystemPointerSize));
+ St_d(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ St_d(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ St_d(src3, MemOperand(sp, 1 * kSystemPointerSize));
+ St_d(src4, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- Sub_d(sp, sp, Operand(5 * kPointerSize));
- St_d(src1, MemOperand(sp, 4 * kPointerSize));
- St_d(src2, MemOperand(sp, 3 * kPointerSize));
- St_d(src3, MemOperand(sp, 2 * kPointerSize));
- St_d(src4, MemOperand(sp, 1 * kPointerSize));
- St_d(src5, MemOperand(sp, 0 * kPointerSize));
+ Sub_d(sp, sp, Operand(5 * kSystemPointerSize));
+ St_d(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ St_d(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ St_d(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ St_d(src4, MemOperand(sp, 1 * kSystemPointerSize));
+ St_d(src5, MemOperand(sp, 0 * kSystemPointerSize));
}
enum PushArrayOrder { kNormal, kReverse };
@@ -337,23 +346,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pop(Register dst) {
Ld_d(dst, MemOperand(sp, 0));
- Add_d(sp, sp, Operand(kPointerSize));
+ Add_d(sp, sp, Operand(kSystemPointerSize));
}
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
DCHECK(src1 != src2);
- Ld_d(src2, MemOperand(sp, 0 * kPointerSize));
- Ld_d(src1, MemOperand(sp, 1 * kPointerSize));
- Add_d(sp, sp, 2 * kPointerSize);
+ Ld_d(src2, MemOperand(sp, 0 * kSystemPointerSize));
+ Ld_d(src1, MemOperand(sp, 1 * kSystemPointerSize));
+ Add_d(sp, sp, 2 * kSystemPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- Ld_d(src3, MemOperand(sp, 0 * kPointerSize));
- Ld_d(src2, MemOperand(sp, 1 * kPointerSize));
- Ld_d(src1, MemOperand(sp, 2 * kPointerSize));
- Add_d(sp, sp, 3 * kPointerSize);
+ Ld_d(src3, MemOperand(sp, 0 * kSystemPointerSize));
+ Ld_d(src2, MemOperand(sp, 1 * kSystemPointerSize));
+ Ld_d(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ Add_d(sp, sp, 3 * kSystemPointerSize);
}
// Pops multiple values from the stack and load them in the
@@ -467,12 +476,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AssertSmi(smi);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- SmiUntag(smi);
+ SmiUntag(smi, smi);
}
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -494,12 +503,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
// See comments at the beginning of Builtins::Generate_CEntry.
inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
@@ -507,7 +527,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
li(a1, ref);
}
- void CheckPageFlag(const Register& object, int mask, Condition cc,
+ void CheckPageFlag(Register object, int mask, Condition cc,
Label* condition_met);
#undef COND_ARGS
@@ -767,45 +787,38 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
- protected:
- inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
- inline int32_t GetOffset(Label* L, OffsetSize bits);
+ // ---------------------------------------------------------------------------
+ // Pointer compression Support
- private:
- bool has_double_zero_reg_set_ = false;
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadTaggedField(Register destination, const MemOperand& field_operand);
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
+ // Loads a field containing a tagged signed value and decompresses it if
+ // necessary.
+ void LoadTaggedSignedField(Register destination,
+ const MemOperand& field_operand);
- bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
- const Operand& rk, bool need_link);
+ // Loads a field containing smi value and untags it.
+ void SmiUntagField(Register dst, const MemOperand& src);
- // f32 or f64
- void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
- CFRegister cd, bool f32 = true);
-
- void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
- bool f32 = true);
+ // Compresses and stores tagged value to given on-heap location.
+ void StoreTaggedField(Register src, const MemOperand& dst);
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-
- void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
+ void AtomicStoreTaggedField(Register dst, const MemOperand& src);
- void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
+ void DecompressTaggedSigned(Register dst, const MemOperand& src);
+ void DecompressTagged(Register dst, const MemOperand& src);
+ void DecompressTagged(Register dst, Register src);
- // Push a fixed frame, consisting of ra, fp.
- void PushCommonFrame(Register marker_reg = no_reg);
-};
+ void AtomicDecompressTaggedSigned(Register dst, const MemOperand& src);
+ void AtomicDecompressTagged(Register dst, const MemOperand& src);
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
@@ -825,9 +838,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
- void TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
- Register scratch,
- Condition cond, Label* target);
+ void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container,
+ Register scratch,
+ Condition cond, Label* target);
Operand ClearedValue() const;
void PushRoot(RootIndex index) {
@@ -842,7 +855,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
- Branch(if_equal, eq, with, Operand(scratch));
+ CompareTaggedAndBranch(if_equal, eq, with, Operand(scratch));
}
// Compare the object in a register to a value and jump if they are not equal.
@@ -850,7 +863,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
- Branch(if_not_equal, ne, with, Operand(scratch));
+ CompareTaggedAndBranch(if_not_equal, ne, with, Operand(scratch));
}
// Checks if value is in range [lower_limit, higher_limit] using a single
@@ -899,18 +912,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
- // save_doubles - saves FPU registers on stack, currently disabled.
// stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles, int stack_space = 0,
- StackFrame::Type frame_type = StackFrame::EXIT);
+ void EnterExitFrame(int stack_space, StackFrame::Type frame_type);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles, Register arg_count,
- bool do_return = NO_EMIT_RETURN,
+ void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
// Make sure the stack is aligned. Only emits code in debug mode.
- void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
+ void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE;
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -966,20 +976,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls.
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -989,9 +996,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -1034,32 +1038,32 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
// ---------------------------------------------------------------------------
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
@@ -1079,17 +1083,49 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
+ protected:
+ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
+ inline int32_t GetOffset(Label* L, OffsetSize bits);
+
private:
+ bool has_double_zero_reg_set_ = false;
+
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
InvokeType type);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+
+ bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link);
+
+ // f32 or f64
+ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd, bool f32 = true);
+
+ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
+ bool f32 = true);
+
+ void CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
template <typename Func>
-void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
UseScratchRegisterScope scope(this);
Register scratch = scope.Acquire();
diff --git a/deps/v8/src/codegen/loong64/register-loong64.h b/deps/v8/src/codegen/loong64/register-loong64.h
index 07c975223b..97e954b749 100644
--- a/deps/v8/src/codegen/loong64/register-loong64.h
+++ b/deps/v8/src/codegen/loong64/register-loong64.h
@@ -19,10 +19,20 @@ namespace internal {
V(x_reg) V(fp) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) \
- V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s7) V(s8)
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s7)
+
+#ifdef V8_COMPRESS_POINTERS
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
+#else
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(s8)
+#endif
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
+ MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#define DOUBLE_REGISTERS(V) \
V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
@@ -203,13 +213,18 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-constexpr Register kOffHeapTrampolineRegister = t7;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+#ifdef V8_COMPRESS_POINTERS
+constexpr Register kPtrComprCageBaseRegister = s8;
+#else
+constexpr Register kPtrComprCageBaseRegister = no_reg;
+#endif
+
constexpr DoubleRegister kFPReturnRegister0 = f0;
} // namespace internal
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index 38834d2394..286ce8e033 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -314,6 +314,8 @@ class MachineType {
return MachineType::Float32();
case CTypeInfo::Type::kFloat64:
return MachineType::Float64();
+ case CTypeInfo::Type::kPointer:
+ return MachineType::Pointer();
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kSeqOneByteString:
case CTypeInfo::Type::kApiObject:
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/macro-assembler-base.cc
index da6d2a0799..899f67830f 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/macro-assembler-base.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/turbo-assembler.h"
+#include "src/codegen/macro-assembler-base.h"
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
+MacroAssemblerBase::MacroAssemblerBase(Isolate* isolate,
const AssemblerOptions& options,
CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer)
@@ -26,7 +26,7 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
}
}
-Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
+Address MacroAssemblerBase::BuiltinEntry(Builtin builtin) {
DCHECK(Builtins::IsBuiltinId(builtin));
if (isolate_ != nullptr) {
Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)];
@@ -38,7 +38,7 @@ Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
return d.InstructionStartOfBuiltin(builtin);
}
-void TurboAssemblerBase::IndirectLoadConstant(Register destination,
+void MacroAssemblerBase::IndirectLoadConstant(Register destination,
Handle<HeapObject> object) {
CHECK(root_array_available_);
@@ -71,7 +71,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
}
}
-void TurboAssemblerBase::IndirectLoadExternalReference(
+void MacroAssemblerBase::IndirectLoadExternalReference(
Register destination, ExternalReference reference) {
CHECK(root_array_available_);
@@ -90,24 +90,24 @@ void TurboAssemblerBase::IndirectLoadExternalReference(
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex(
+int32_t MacroAssemblerBase::RootRegisterOffsetForRootIndex(
RootIndex root_index) {
return IsolateData::root_slot_offset(root_index);
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
+int32_t MacroAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
return IsolateData::BuiltinSlotOffset(builtin);
}
// static
-intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+intptr_t MacroAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) {
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
+int32_t MacroAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
Isolate* isolate, const ExternalReference& reference) {
// Encode as an index into the external reference table stored on the
// isolate.
@@ -120,11 +120,24 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
}
// static
-bool TurboAssemblerBase::IsAddressableThroughRootRegister(
+bool MacroAssemblerBase::IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference) {
Address address = reference.address();
return isolate->root_register_addressable_region().contains(address);
}
+// static
+Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index,
+ Isolate* isolate) {
+ DCHECK(CanBeImmediate(index));
+ Object obj = isolate->root(index);
+ CHECK(obj.IsHeapObject());
+ return V8HeapCompressionScheme::CompressObject(obj.ptr());
+}
+
+Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index) {
+ return ReadOnlyRootPtr(index, isolate_);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/macro-assembler-base.h
index 88ec873425..a66f594aff 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler-base.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_
-#define V8_CODEGEN_TURBO_ASSEMBLER_H_
+#ifndef V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
+#define V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
#include <memory>
@@ -15,30 +15,24 @@
namespace v8 {
namespace internal {
-// Common base class for platform-specific TurboAssemblers containing
+// Common base class for platform-specific MacroAssemblers containing
// platform-independent bits.
-// You will encounter two subclasses, TurboAssembler (derives from
-// TurboAssemblerBase), and MacroAssembler (derives from TurboAssembler). The
-// main difference is that MacroAssembler is allowed to access the isolate, and
-// TurboAssembler accesses the isolate in a very limited way. TurboAssembler
-// contains all the functionality that is used by Turbofan, and does not expect
-// to be running on the main thread.
-class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
+// TODO(victorgomes): We should use LocalIsolate instead of Isolate in the
+// methods of this class.
+class V8_EXPORT_PRIVATE MacroAssemblerBase : public Assembler {
public:
// Constructors are declared public to inherit them in derived classes
// with `using` directive.
- TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
+ MacroAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {})
- : TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate),
+ : MacroAssemblerBase(isolate, AssemblerOptions::Default(isolate),
create_code_object, std::move(buffer)) {}
- TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
+ MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {});
- Isolate* isolate() const {
- return isolate_;
- }
+ Isolate* isolate() const { return isolate_; }
Handle<HeapObject> CodeObject() const {
DCHECK(!code_object_.is_null());
@@ -76,6 +70,11 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
// Corresponds to: destination = [kRootRegister + offset].
virtual void LoadRootRelative(Register destination, int32_t offset) = 0;
+ static constexpr bool CanBeImmediate(RootIndex index) {
+ return V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index);
+ }
+ Tagged_t ReadOnlyRootPtr(RootIndex index);
+ static Tagged_t ReadOnlyRootPtr(RootIndex index, Isolate* isolate);
virtual void LoadRoot(Register destination, RootIndex index) = 0;
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
@@ -134,25 +133,25 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
int comment_depth_ = 0;
- DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssemblerBase);
};
// Avoids emitting calls to the {Builtin::kAbort} builtin when emitting
// debug code during the lifetime of this scope object.
class V8_NODISCARD HardAbortScope {
public:
- explicit HardAbortScope(TurboAssemblerBase* assembler)
+ explicit HardAbortScope(MacroAssemblerBase* assembler)
: assembler_(assembler), old_value_(assembler->should_abort_hard()) {
assembler_->set_abort_hard(true);
}
~HardAbortScope() { assembler_->set_abort_hard(old_value_); }
private:
- TurboAssemblerBase* assembler_;
+ MacroAssemblerBase* assembler_;
bool old_value_;
};
} // namespace internal
} // namespace v8
-#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_
+#endif // V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 61b26a320f..3e5d83806d 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_
#define V8_CODEGEN_MACRO_ASSEMBLER_H_
-#include "src/codegen/turbo-assembler.h"
+#include "src/codegen/macro-assembler-base.h"
#include "src/execution/frames.h"
#include "src/heap/heap.h"
@@ -82,25 +82,25 @@ static constexpr int kMaxCParameters = 256;
class V8_NODISCARD FrameScope {
public:
- explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type)
+ explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
:
#ifdef V8_CODE_COMMENTS
- comment_(tasm, frame_name(type)),
+ comment_(masm, frame_name(type)),
#endif
- tasm_(tasm),
+ masm_(masm),
type_(type),
- old_has_frame_(tasm->has_frame()) {
- tasm->set_has_frame(true);
+ old_has_frame_(masm->has_frame()) {
+ masm->set_has_frame(true);
if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
- tasm->EnterFrame(type);
+ masm->EnterFrame(type);
}
}
~FrameScope() {
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
- tasm_->LeaveFrame(type_);
+ masm_->LeaveFrame(type_);
}
- tasm_->set_has_frame(old_has_frame_);
+ masm_->set_has_frame(old_has_frame_);
}
private:
@@ -125,7 +125,7 @@ class V8_NODISCARD FrameScope {
Assembler::CodeComment comment_;
#endif // V8_CODE_COMMENTS
- TurboAssembler* tasm_;
+ MacroAssembler* masm_;
StackFrame::Type const type_;
bool const old_has_frame_;
};
@@ -198,7 +198,7 @@ class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope {
// scope object.
class V8_NODISCARD NoRootArrayScope {
public:
- explicit NoRootArrayScope(TurboAssembler* masm)
+ explicit NoRootArrayScope(MacroAssembler* masm)
: masm_(masm), old_value_(masm->root_array_available()) {
masm->set_root_array_available(false);
}
@@ -206,7 +206,7 @@ class V8_NODISCARD NoRootArrayScope {
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
private:
- TurboAssembler* masm_;
+ MacroAssembler* masm_;
bool old_value_;
};
diff --git a/deps/v8/src/codegen/maglev-safepoint-table.cc b/deps/v8/src/codegen/maglev-safepoint-table.cc
index 370a6ef877..50adb61efd 100644
--- a/deps/v8/src/codegen/maglev-safepoint-table.cc
+++ b/deps/v8/src/codegen/maglev-safepoint-table.cc
@@ -19,14 +19,12 @@ MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
DCHECK(code.is_maglevved());
}
-#ifdef V8_EXTERNAL_CODE_SPACE
MaglevSafepointTable::MaglevSafepointTable(Isolate* isolate, Address pc,
- CodeDataContainer code)
+ GcSafeCode code)
: MaglevSafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {
DCHECK(code.is_maglevved());
}
-#endif // V8_EXTERNAL_CODE_SPACE
MaglevSafepointTable::MaglevSafepointTable(Address instruction_start,
Address safepoint_table_address)
@@ -83,6 +81,14 @@ MaglevSafepointEntry MaglevSafepointTable::FindEntry(Address pc) const {
tagged_register_indexes, trampoline_pc);
}
+// static
+MaglevSafepointEntry MaglevSafepointTable::FindEntry(Isolate* isolate,
+ GcSafeCode code,
+ Address pc) {
+ MaglevSafepointTable table(isolate, pc, code);
+ return table.FindEntry(pc);
+}
+
void MaglevSafepointTable::Print(std::ostream& os) const {
os << "Safepoints (entries = " << length_ << ", byte size = " << byte_size()
<< ", tagged slots = " << num_tagged_slots_
@@ -162,7 +168,7 @@ void MaglevSafepointTableBuilder::Emit(Assembler* assembler) {
#endif
// Make sure the safepoint table is properly aligned. Pad with nops.
- assembler->Align(Code::kMetadataAlignment);
+ assembler->Align(InstructionStream::kMetadataAlignment);
assembler->RecordComment(";;; Maglev safepoint table.");
set_safepoint_table_offset(assembler->pc_offset());
diff --git a/deps/v8/src/codegen/maglev-safepoint-table.h b/deps/v8/src/codegen/maglev-safepoint-table.h
index 1babc2368e..492346917f 100644
--- a/deps/v8/src/codegen/maglev-safepoint-table.h
+++ b/deps/v8/src/codegen/maglev-safepoint-table.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+class GcSafeCode;
+
class MaglevSafepointEntry : public SafepointEntryBase {
public:
static constexpr int kNoDeoptIndex = -1;
@@ -49,6 +51,8 @@ class MaglevSafepointEntry : public SafepointEntryBase {
uint8_t num_pushed_registers() const { return num_pushed_registers_; }
uint32_t tagged_register_indexes() const { return tagged_register_indexes_; }
+ uint32_t register_input_count() const { return tagged_register_indexes_; }
+
private:
uint32_t num_tagged_slots_ = 0;
uint32_t num_untagged_slots_ = 0;
@@ -56,17 +60,13 @@ class MaglevSafepointEntry : public SafepointEntryBase {
uint32_t tagged_register_indexes_ = 0;
};
-// A wrapper class for accessing the safepoint table embedded into the Code
-// object.
+// A wrapper class for accessing the safepoint table embedded into the
+// InstructionStream object.
class MaglevSafepointTable {
public:
// The isolate and pc arguments are used for figuring out whether pc
// belongs to the embedded or un-embedded code blob.
explicit MaglevSafepointTable(Isolate* isolate, Address pc, Code code);
-#ifdef V8_EXTERNAL_CODE_SPACE
- explicit MaglevSafepointTable(Isolate* isolate, Address pc,
- CodeDataContainer code);
-#endif
MaglevSafepointTable(const MaglevSafepointTable&) = delete;
MaglevSafepointTable& operator=(const MaglevSafepointTable&) = delete;
@@ -107,10 +107,14 @@ class MaglevSafepointTable {
// Returns the entry for the given pc.
MaglevSafepointEntry FindEntry(Address pc) const;
+ static MaglevSafepointEntry FindEntry(Isolate* isolate, GcSafeCode code,
+ Address pc);
void Print(std::ostream&) const;
private:
+ MaglevSafepointTable(Isolate* isolate, Address pc, GcSafeCode code);
+
// Layout information.
static constexpr int kLengthOffset = 0;
static constexpr int kEntryConfigurationOffset = kLengthOffset + kIntSize;
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
index 33c1bfcf2d..4b0e0c641e 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -69,7 +69,7 @@ void RelocInfo::apply(intptr_t delta) {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsWasmCall(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -153,8 +153,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 758a3d1664..accdd4532e 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -275,13 +275,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
EmitForbiddenSlotInstruction();
@@ -794,8 +794,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
if ((instr & ~kImm16Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
- // Make label relative to Code pointer of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ // Make label relative to Code pointer of generated InstructionStream
+ // object.
+ instr_at_put(
+ pos, target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag));
return;
}
@@ -817,7 +819,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
Instr instr_b = REGIMM | BGEZAL; // Branch and link.
instr_b = SetBranchOffset(pos, target_pos, instr_b);
// Correct ra register to point to one instruction after jalr from
- // TurboAssembler::BranchAndLinkLong.
+ // MacroAssembler::BranchAndLinkLong.
Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
kOptimizedBranchAndLinkLongReturnOffset;
@@ -1397,7 +1399,8 @@ void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ instr_at_put(at_offset, target_pos + (InstructionStream::kHeaderSize -
+ kHeapObjectTag));
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
@@ -3809,7 +3812,8 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 06f4c848bd..ccdc9f828d 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -294,16 +294,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Adjust ra register in branch delay slot of bal instruction so to skip
// instructions not needed after optimization of PIC in
- // TurboAssembler::BranchAndLink method.
+ // MacroAssembler::BranchAndLink method.
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
- // MIPS platform, as Code, Embedded Object or External-reference pointers
- // are split across two consecutive instructions and don't exist separately
- // in the code, so the serializer should not step forwards in memory after
- // a target is resolved and written.
+ // MIPS platform, as InstructionStream, Embedded Object or External-reference
+ // pointers are split across two consecutive instructions and don't exist
+ // separately in the code, so the serializer should not step forwards in
+ // memory after a target is resolved and written.
static constexpr int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit/64bit constant.
@@ -329,7 +329,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
- // Code generation.
+ // InstructionStream generation.
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -1654,7 +1654,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// intervals of kBufferCheckInterval emitted bytes.
static constexpr int kBufferCheckInterval = 1 * KB / 2;
- // Code generation.
+ // InstructionStream generation.
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -1692,7 +1692,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Readable constants for compact branch handling in emit()
enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
- // Code emission.
+ // InstructionStream emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x,
diff --git a/deps/v8/src/codegen/mips64/constants-mips64.h b/deps/v8/src/codegen/mips64/constants-mips64.h
index 6e8fe1ccbe..1b8790d844 100644
--- a/deps/v8/src/codegen/mips64/constants-mips64.h
+++ b/deps/v8/src/codegen/mips64/constants-mips64.h
@@ -197,7 +197,6 @@ const int32_t kPrefHintPrepareForStore = 30;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 256;
// Helper functions for converting between register numbers and names.
@@ -1100,6 +1099,22 @@ enum Condition {
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
+
+ // Unified cross-platform condition names/aliases.
+ kEqual = equal,
+ kNotEqual = not_equal,
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+ kZero = equal,
+ kNotZero = not_equal,
};
// Returns the equivalent of !cc.
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
index 3c0e0dc27f..2965a5bc13 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
@@ -100,6 +100,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return a4; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return a5; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
// static
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 051d758515..ecbd3fdb9b 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -48,7 +48,7 @@ static inline bool IsZero(const Operand& rt) {
}
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -64,7 +64,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -81,7 +81,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -98,18 +98,18 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
Daddu(fp, sp, Operand(kPointerSize));
@@ -119,7 +119,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
@@ -176,17 +176,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
@@ -210,7 +210,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -233,7 +233,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -286,8 +286,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, &done);
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
@@ -320,7 +319,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// ---------------------------------------------------------------------------
// Instruction macros.
-void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
@@ -337,7 +336,7 @@ void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
daddu(rd, rs, rt.rm());
} else {
@@ -354,7 +353,7 @@ void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
@@ -380,7 +379,7 @@ void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
dsubu(rd, rs, rt.rm());
} else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
@@ -408,7 +407,7 @@ void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -421,7 +420,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
mult(rs, rt.rm());
@@ -444,7 +443,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
multu(rs, rt.rm());
@@ -467,7 +466,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmul(rd, rs, rt.rm());
@@ -490,7 +489,7 @@ void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, rt.rm());
@@ -513,7 +512,7 @@ void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmuhu(rd, rs, rt.rm());
@@ -536,7 +535,7 @@ void TurboAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mult(Register rs, const Operand& rt) {
+void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
@@ -549,7 +548,7 @@ void TurboAssembler::Mult(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmult(Register rs, const Operand& rt) {
+void MacroAssembler::Dmult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmult(rs, rt.rm());
} else {
@@ -562,7 +561,7 @@ void TurboAssembler::Dmult(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Multu(Register rs, const Operand& rt) {
+void MacroAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
@@ -575,7 +574,7 @@ void TurboAssembler::Multu(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmultu(Register rs, const Operand& rt) {
+void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmultu(rs, rt.rm());
} else {
@@ -588,7 +587,7 @@ void TurboAssembler::Dmultu(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Div(Register rs, const Operand& rt) {
+void MacroAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
@@ -601,7 +600,7 @@ void TurboAssembler::Div(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
@@ -624,7 +623,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
@@ -647,7 +646,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
@@ -670,7 +669,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ddiv(Register rs, const Operand& rt) {
+void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
} else {
@@ -683,7 +682,7 @@ void TurboAssembler::Ddiv(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
@@ -711,7 +710,7 @@ void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu(Register rs, const Operand& rt) {
+void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
@@ -724,7 +723,7 @@ void TurboAssembler::Divu(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
@@ -747,7 +746,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ddivu(Register rs, const Operand& rt) {
+void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
} else {
@@ -760,7 +759,7 @@ void TurboAssembler::Ddivu(Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
ddivu(rs, rt.rm());
@@ -783,7 +782,7 @@ void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
@@ -811,7 +810,7 @@ void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
@@ -839,7 +838,7 @@ void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
@@ -856,7 +855,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
@@ -873,7 +872,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
@@ -890,7 +889,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
@@ -903,11 +902,11 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Neg(Register rs, const Operand& rt) {
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
dsubu(rs, zero_reg, rt.rm());
}
-void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
@@ -925,7 +924,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
@@ -949,7 +948,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
@@ -964,7 +963,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
@@ -979,17 +978,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) {
Slt(rd, rs, rt);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
Sltu(rd, rs, rt);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
@@ -1003,7 +1002,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
@@ -1017,7 +1016,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
@@ -1029,7 +1028,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
drotrv(rd, rs, rt.rm());
} else {
@@ -1047,7 +1046,7 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
pref(hint, rs);
}
-void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
@@ -1060,7 +1059,7 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
-void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
+void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 63);
if (kArchVariant == kMips64r6 && sa <= 4) {
@@ -1076,7 +1075,7 @@ void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
-void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
+void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bnvc(rs, rt, &skip);
@@ -1087,7 +1086,7 @@ void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
}
}
-void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
+void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bovc(rs, rt, &skip);
@@ -1101,7 +1100,7 @@ void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
// ------------Pseudo-instructions-------------
// Change endianness
-void TurboAssembler::ByteSwapSigned(Register dest, Register src,
+void MacroAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
@@ -1117,7 +1116,7 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src,
}
}
-void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
+void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 2 || operand_size == 4);
if (operand_size == 2) {
@@ -1130,7 +1129,7 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
}
}
-void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
@@ -1154,7 +1153,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lwu(rd, rs);
} else {
@@ -1164,7 +1163,7 @@ void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
DCHECK(rd != rs.rm());
@@ -1181,7 +1180,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
@@ -1215,7 +1214,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
@@ -1249,7 +1248,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
DCHECK(rs.rm() != scratch);
@@ -1278,7 +1277,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
}
}
-void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
@@ -1313,7 +1312,7 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Daddu(rd, rd, scratch);
}
-void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
@@ -1337,7 +1336,7 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
-void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
Lwc1(fd, rs);
@@ -1348,7 +1347,7 @@ void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
}
}
-void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
Swc1(fd, rs);
@@ -1359,7 +1358,7 @@ void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
}
}
-void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
@@ -1371,7 +1370,7 @@ void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
}
}
-void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
@@ -1383,97 +1382,97 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
}
}
-void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
lb(rd, source);
}
-void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
lbu(rd, source);
}
-void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
sb(rd, source);
}
-void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
lh(rd, source);
}
-void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
lhu(rd, source);
}
-void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
sh(rd, source);
}
-void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
lw(rd, source);
}
-void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
lwu(rd, source);
}
-void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
sw(rd, source);
}
-void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
ld(rd, source);
}
-void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(&source);
sd(rd, source);
}
-void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
lwc1(fd, tmp);
}
-void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
swc1(fs, tmp);
}
-void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
ldc1(fd, tmp);
}
-void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(&tmp);
sdc1(fs, tmp);
}
-void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ll(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
@@ -1487,7 +1486,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lld(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
@@ -1501,7 +1500,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sc(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
@@ -1515,7 +1514,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Scd(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
@@ -1529,7 +1528,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+void MacroAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -1540,7 +1539,7 @@ void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
-void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -1560,7 +1559,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) {
}
}
-void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
+void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
if (is_int16(static_cast<int32_t>(j.immediate()))) {
daddiu(rd, zero_reg, (j.immediate() & kImm16Mask));
} else if (!(j.immediate() & kUpper16MaskOf64)) {
@@ -1584,7 +1583,7 @@ static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
return INT_MAX;
}
-int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
if (is_int32(value)) {
return InstrCountForLiLower32Bit(value);
} else {
@@ -1679,7 +1678,7 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
// All changes to if...else conditions here must be added to
// InstrCountForLi64Bit as well.
-void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
@@ -1857,7 +1856,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
}
}
-void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
@@ -1919,7 +1918,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
}
}
-void TurboAssembler::MultiPush(RegList regs) {
+void MacroAssembler::MultiPush(RegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kPointerSize;
@@ -1932,7 +1931,7 @@ void TurboAssembler::MultiPush(RegList regs) {
}
}
-void TurboAssembler::MultiPop(RegList regs) {
+void MacroAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1944,7 +1943,7 @@ void TurboAssembler::MultiPop(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPushFPU(DoubleRegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -1957,7 +1956,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
}
}
-void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1969,7 +1968,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
daddiu(sp, sp, stack_offset);
}
-void TurboAssembler::MultiPushMSA(DoubleRegList regs) {
+void MacroAssembler::MultiPushMSA(DoubleRegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
@@ -1982,7 +1981,7 @@ void TurboAssembler::MultiPushMSA(DoubleRegList regs) {
}
}
-void TurboAssembler::MultiPopMSA(DoubleRegList regs) {
+void MacroAssembler::MultiPopMSA(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1994,14 +1993,14 @@ void TurboAssembler::MultiPopMSA(DoubleRegList regs) {
daddiu(sp, sp, stack_offset);
}
-void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
+void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
DCHECK_LT(pos + size, 33);
ext_(rt, rs, pos, size);
}
-void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos,
+void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
pos + size <= 64);
@@ -2014,7 +2013,7 @@ void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos,
}
}
-void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
+void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
DCHECK_LE(pos + size, 32);
@@ -2022,7 +2021,7 @@ void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
ins_(rt, rs, pos, size);
}
-void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos,
+void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
pos + size <= 64);
@@ -2035,7 +2034,7 @@ void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos,
}
}
-void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
+void MacroAssembler::ExtractBits(Register dest, Register source, Register pos,
int size, bool sign_extend) {
dsrav(dest, source, pos);
Dext(dest, dest, 0, size);
@@ -2057,7 +2056,7 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
}
}
-void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+void MacroAssembler::InsertBits(Register dest, Register source, Register pos,
int size) {
Dror(dest, dest, pos);
Dins(dest, source, 0, size);
@@ -2069,7 +2068,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
}
}
-void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_s changes the sign for NaN-like operands as well.
neg_s(fd, fs);
@@ -2094,7 +2093,7 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
}
}
-void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_d changes the sign for NaN-like operands as well.
neg_d(fd, fs);
@@ -2119,14 +2118,14 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
}
}
-void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
BlockTrampolinePoolScope block_trampoline_pool(this);
mfc1(t8, fs);
Cvt_d_uw(fd, t8);
}
-void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
@@ -2139,14 +2138,14 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
cvt_d_l(fd, fd);
}
-void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_d_ul(fd, t8);
}
-void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
@@ -2174,14 +2173,14 @@ void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
bind(&conversion_done);
}
-void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_s_uw(fd, t8);
}
-void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
@@ -2193,14 +2192,14 @@ void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
cvt_s_l(fd, fd);
}
-void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_s_ul(fd, t8);
}
-void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Convert rs to a FP value in fd.
@@ -2260,28 +2259,28 @@ void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs,
trunc_l_d(fd, fs);
}
-void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
+void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
FPURegister scratch) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Trunc_uw_d(t8, fs, scratch);
mtc1(t8, fd);
}
-void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
FPURegister scratch) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Trunc_uw_s(t8, fs, scratch);
mtc1(t8, fd);
}
-void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
+void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Trunc_ul_d(t8, fs, scratch, result);
dmtc1(t8, fd);
}
-void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
+void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Trunc_ul_s(t8, fs, scratch, result);
@@ -2304,78 +2303,72 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
ceil_w_d(fd, fs);
}
-void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs,
+void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs,
FPURegister scratch) {
DCHECK(fs != scratch);
DCHECK(rd != at);
{
- // Load 2^31 into scratch as its float representation.
+ // Load 2^32 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x41E00000);
+ li(scratch1, 0x41F00000);
mtc1(zero_reg, scratch);
mthc1(scratch1, scratch);
}
// Test if scratch > fd.
- // If fd < 2^31 we can convert it normally.
+ // If fd < 2^32 we can convert it normally.
Label simple_convert;
- CompareF64(OLT, fs, scratch);
+ CompareF64(ULT, fs, scratch);
BranchTrueShortF(&simple_convert);
- // First we subtract 2^31 from fd, then trunc it to rs
- // and add 2^31 to rs.
- sub_d(scratch, fs, scratch);
- trunc_w_d(scratch, scratch);
- mfc1(rd, scratch);
- Or(rd, rd, 1 << 31);
+ // If fd > 2^32, the result should be UINT_32_MAX;
+ Addu(rd, zero_reg, -1);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
- trunc_w_d(scratch, fs);
+ // Double -> Int64 -> Uint32;
+ trunc_l_d(scratch, fs);
mfc1(rd, scratch);
bind(&done);
}
-void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs,
+void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs,
FPURegister scratch) {
DCHECK(fs != scratch);
DCHECK(rd != at);
{
- // Load 2^31 into scratch as its float representation.
+ // Load 2^32 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x4F000000);
+ li(scratch1, 0x4F800000);
mtc1(scratch1, scratch);
}
// Test if scratch > fs.
- // If fs < 2^31 we can convert it normally.
+ // If fs < 2^32 we can convert it normally.
Label simple_convert;
- CompareF32(OLT, fs, scratch);
+ CompareF32(ULT, fs, scratch);
BranchTrueShortF(&simple_convert);
- // First we subtract 2^31 from fs, then trunc it to rd
- // and add 2^31 to rd.
- sub_s(scratch, fs, scratch);
- trunc_w_s(scratch, scratch);
- mfc1(rd, scratch);
- Or(rd, rd, 1 << 31);
+ // If fd > 2^32, the result should be UINT_32_MAX;
+ Addu(rd, zero_reg, -1);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
- trunc_w_s(scratch, fs);
+ // Float -> Int64 -> Uint32;
+ trunc_l_s(scratch, fs);
mfc1(rd, scratch);
bind(&done);
}
-void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
+void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs,
FPURegister scratch, Register result) {
DCHECK(fs != scratch);
DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
@@ -2385,9 +2378,7 @@ void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
mov(result, zero_reg);
Move(scratch, -1.0);
// If fd =< -1 or unordered, then the conversion fails.
- CompareF64(OLE, fs, scratch);
- BranchTrueShortF(&fail);
- CompareIsNanF64(fs, scratch);
+ CompareF64(ULE, fs, scratch);
BranchTrueShortF(&fail);
}
@@ -2396,8 +2387,8 @@ void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
dmtc1(at, scratch);
// Test if scratch > fs.
- // If fs < 2^63 we can convert it normally.
- CompareF64(OLT, fs, scratch);
+ // If fs < 2^63 or unordered, we can convert it normally.
+ CompareF64(ULT, fs, scratch);
BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fs, then trunc it to rd
@@ -2430,7 +2421,7 @@ void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
bind(&fail);
}
-void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs,
+void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs,
FPURegister scratch, Register result) {
DCHECK(fs != scratch);
DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
@@ -2440,9 +2431,7 @@ void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs,
mov(result, zero_reg);
Move(scratch, -1.0f);
// If fd =< -1 or unordered, then the conversion fails.
- CompareF32(OLE, fs, scratch);
- BranchTrueShortF(&fail);
- CompareIsNanF32(fs, scratch);
+ CompareF32(ULE, fs, scratch);
BranchTrueShortF(&fail);
}
@@ -2455,8 +2444,8 @@ void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs,
}
// Test if scratch > fs.
- // If fs < 2^63 we can convert it normally.
- CompareF32(OLT, fs, scratch);
+ // If fs < 2^63 or unordered, we can convert it normally.
+ CompareF32(ULT, fs, scratch);
BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fs, then trunc it to rd
@@ -2490,7 +2479,7 @@ void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs,
}
template <typename RoundFunc>
-void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src,
FPURoundingMode mode, RoundFunc round) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
@@ -2522,36 +2511,36 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
}
}
-void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_floor,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->floor_l_d(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->floor_l_d(dst, src);
});
}
-void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_ceil,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->ceil_l_d(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->ceil_l_d(dst, src);
});
}
-void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_trunc,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->trunc_l_d(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->trunc_l_d(dst, src);
});
}
-void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
+void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_round,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->round_l_d(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->round_l_d(dst, src);
});
}
template <typename RoundFunc>
-void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
FPURoundingMode mode, RoundFunc round) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
@@ -2586,35 +2575,35 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
}
}
-void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_floor,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->floor_w_s(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->floor_w_s(dst, src);
});
}
-void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_ceil,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->ceil_w_s(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->ceil_w_s(dst, src);
});
}
-void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_trunc,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->trunc_w_s(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->trunc_w_s(dst, src);
});
}
-void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
+void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_round,
- [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
- tasm->round_w_s(dst, src);
+ [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
+ masm->round_w_s(dst, src);
});
}
-void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx,
+void MacroAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx,
MemOperand src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2640,7 +2629,7 @@ void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx,
}
}
-void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx,
+void MacroAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx,
MemOperand dst) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2684,7 +2673,7 @@ void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx,
dotp_instr(dst, kSimd128ScratchReg, kSimd128RegZero); \
break;
-void TurboAssembler::ExtMulLow(MSADataType type, MSARegister dst,
+void MacroAssembler::ExtMulLow(MSADataType type, MSARegister dst,
MSARegister src1, MSARegister src2) {
switch (type) {
EXT_MUL_BINOP(MSAS8, ilvr_b, dotp_s_h)
@@ -2698,7 +2687,7 @@ void TurboAssembler::ExtMulLow(MSADataType type, MSARegister dst,
}
}
-void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst,
+void MacroAssembler::ExtMulHigh(MSADataType type, MSARegister dst,
MSARegister src1, MSARegister src2) {
switch (type) {
EXT_MUL_BINOP(MSAS8, ilvl_b, dotp_s_h)
@@ -2713,7 +2702,7 @@ void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst,
}
#undef EXT_MUL_BINOP
-void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) {
+void MacroAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
switch (sz) {
@@ -2738,7 +2727,7 @@ void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) {
}
}
-void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst,
+void MacroAssembler::ExtAddPairwise(MSADataType type, MSARegister dst,
MSARegister src) {
switch (type) {
case MSAS8:
@@ -2758,7 +2747,7 @@ void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst,
}
}
-void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src,
+void MacroAssembler::MSARoundW(MSARegister dst, MSARegister src,
FPURoundingMode mode) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
@@ -2774,7 +2763,7 @@ void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src,
ctcmsa(MSACSR, scratch);
}
-void TurboAssembler::MSARoundD(MSARegister dst, MSARegister src,
+void MacroAssembler::MSARoundD(MSARegister dst, MSARegister src,
FPURoundingMode mode) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
@@ -2818,7 +2807,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
sub_d(fd, scratch, fr);
}
-void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
+void MacroAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
FPURegister cmp1, FPURegister cmp2) {
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
@@ -2829,12 +2818,12 @@ void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
}
}
-void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+void MacroAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2) {
CompareF(sizeField, UN, cmp1, cmp2);
}
-void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
+void MacroAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
if (kArchVariant == kMips64r6) {
bc1nez(target, kDoubleCompareReg);
} else {
@@ -2845,7 +2834,7 @@ void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
}
}
-void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
+void MacroAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
if (kArchVariant == kMips64r6) {
bc1eqz(target, kDoubleCompareReg);
} else {
@@ -2856,7 +2845,7 @@ void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
}
}
-void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
+void MacroAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
@@ -2869,7 +2858,7 @@ void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
}
}
-void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
+void MacroAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
@@ -2882,7 +2871,7 @@ void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
}
}
-void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
+void MacroAssembler::BranchMSA(Label* target, MSABranchDF df,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
{
@@ -2904,7 +2893,7 @@ void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
}
}
-void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
+void MacroAssembler::BranchShortMSA(MSABranchDF df, Label* target,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
if (IsEnabled(MIPS_SIMD)) {
@@ -2961,7 +2950,7 @@ void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
}
}
-void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
+void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(src_low != scratch);
@@ -2970,14 +2959,14 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
mthc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, uint32_t src) {
+void MacroAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(static_cast<int32_t>(src)));
mtc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, uint64_t src) {
+void MacroAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
if (src == base::bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
@@ -3011,7 +3000,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) {
}
}
-void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
+void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
@@ -3022,7 +3011,7 @@ void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
}
}
-void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
+void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
@@ -3033,7 +3022,7 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
}
}
-void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
+void MacroAssembler::LoadZeroOnCondition(Register rd, Register rs,
const Operand& rt, Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
@@ -3125,7 +3114,7 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
}
}
-void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionNotZero(Register dest,
Register condition) {
if (kArchVariant == kMips64r6) {
seleqz(dest, dest, condition);
@@ -3134,7 +3123,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
}
}
-void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionZero(Register dest,
Register condition) {
if (kArchVariant == kMips64r6) {
selnez(dest, dest, condition);
@@ -3143,7 +3132,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest,
}
}
-void TurboAssembler::LoadZeroIfFPUCondition(Register dest) {
+void MacroAssembler::LoadZeroIfFPUCondition(Register dest) {
if (kArchVariant == kMips64r6) {
dmfc1(kScratchReg, kDoubleCompareReg);
LoadZeroIfConditionNotZero(dest, kScratchReg);
@@ -3152,7 +3141,7 @@ void TurboAssembler::LoadZeroIfFPUCondition(Register dest) {
}
}
-void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) {
+void MacroAssembler::LoadZeroIfNotFPUCondition(Register dest) {
if (kArchVariant == kMips64r6) {
dmfc1(kScratchReg, kDoubleCompareReg);
LoadZeroIfConditionZero(dest, kScratchReg);
@@ -3161,19 +3150,19 @@ void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) {
}
}
-void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
movt(rd, rs, cc);
}
-void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
movf(rd, rs, cc);
}
-void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
+void MacroAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
-void TurboAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); }
+void MacroAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); }
-void TurboAssembler::Ctz(Register rd, Register rs) {
+void MacroAssembler::Ctz(Register rd, Register rs) {
if (kArchVariant == kMips64r6) {
// We don't have an instruction to count the number of trailing zeroes.
// Start by flipping the bits end-for-end so we can count the number of
@@ -3199,7 +3188,7 @@ void TurboAssembler::Ctz(Register rd, Register rs) {
}
}
-void TurboAssembler::Dctz(Register rd, Register rs) {
+void MacroAssembler::Dctz(Register rd, Register rs) {
if (kArchVariant == kMips64r6) {
// We don't have an instruction to count the number of trailing zeroes.
// Start by flipping the bits end-for-end so we can count the number of
@@ -3225,7 +3214,7 @@ void TurboAssembler::Dctz(Register rd, Register rs) {
}
}
-void TurboAssembler::Popcnt(Register rd, Register rs) {
+void MacroAssembler::Popcnt(Register rd, Register rs) {
ASM_CODE_COMMENT(this);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
@@ -3277,7 +3266,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
srl(rd, rd, shift);
}
-void TurboAssembler::Dpopcnt(Register rd, Register rs) {
+void MacroAssembler::Dpopcnt(Register rd, Register rs) {
ASM_CODE_COMMENT(this);
uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
@@ -3307,7 +3296,7 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) {
dsrl32(rd, rd, shift);
}
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister single_scratch = kScratchDoubleReg.low();
@@ -3327,7 +3316,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
Branch(done, eq, scratch, Operand(zero_reg));
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -3365,19 +3354,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
(cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
-void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
-void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchShort(L, bdslot);
@@ -3393,7 +3382,7 @@ void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
@@ -3424,7 +3413,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
RootIndex index, BranchDelaySlot bdslot) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3432,7 +3421,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
Branch(L, cond, rs, Operand(scratch), bdslot);
}
-void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
@@ -3442,13 +3431,13 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
if (bdslot == PROTECT) nop();
}
-void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
bc(offset);
}
-void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
BranchShortHelperR6(offset, nullptr);
@@ -3458,7 +3447,7 @@ void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
}
}
-void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
BranchShortHelperR6(0, L);
} else {
@@ -3466,7 +3455,7 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
}
-int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
} else {
@@ -3475,7 +3464,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
return offset;
}
-Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
Register scratch) {
Register r2 = no_reg;
if (rt.is_reg()) {
@@ -3488,14 +3477,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
+bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
*offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
*scratch = GetRtAsRegisterHelper(rt, *scratch);
@@ -3503,7 +3492,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
return true;
}
-bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -3716,7 +3705,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
return true;
}
-bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
@@ -3853,7 +3842,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
return true;
}
-bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -3876,28 +3865,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
}
}
-void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(0, L, cond, rs, rt, bdslot);
}
-void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
-void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
@@ -3913,7 +3902,7 @@ void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
}
}
-void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
@@ -3936,7 +3925,7 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
}
}
-void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
@@ -3946,13 +3935,13 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
if (bdslot == PROTECT) nop();
}
-void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
balc(offset);
}
-void TurboAssembler::BranchAndLinkShort(int32_t offset,
+void MacroAssembler::BranchAndLinkShort(int32_t offset,
BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
@@ -3963,7 +3952,7 @@ void TurboAssembler::BranchAndLinkShort(int32_t offset,
}
}
-void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
BranchAndLinkShortHelperR6(0, L);
} else {
@@ -3971,7 +3960,7 @@ void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
}
}
-bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -4113,7 +4102,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
// with the slt instructions. We could use sub or add instead but we would miss
// overflow cases, so we keep slt and add an intermediate third instruction.
-bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
@@ -4203,7 +4192,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
return true;
}
-bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
@@ -4227,7 +4216,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
}
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@@ -4237,11 +4226,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
Ld(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
@@ -4250,7 +4239,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -4279,7 +4268,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(Register target, Condition cond, Register rs,
+void MacroAssembler::Jump(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
@@ -4303,7 +4292,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs,
}
}
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Label skip;
@@ -4320,13 +4309,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -4347,13 +4336,13 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bind(&skip);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
li(t9, reference);
Jump(t9);
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
-void TurboAssembler::Call(Register target, Condition cond, Register rs,
+void MacroAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
@@ -4392,14 +4381,14 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
}
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t9, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
Call(t9, cond, rs, rt, bd);
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4409,11 +4398,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
return;
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(code->IsExecutable());
Call(code.address(), rmode, cond, rs, rt, bd);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
static_assert(kSystemPointerSize == 8);
static_assert(kSmiTagSize == 1);
@@ -4425,22 +4413,22 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
Ld(builtin_index,
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
Ld(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
Register temp = t9;
switch (options().builtin_call_jump_mode) {
@@ -4455,10 +4443,9 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
break;
}
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
IndirectLoadConstant(temp, code);
- Daddu(temp, temp, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(temp);
+ CallCodeObject(temp);
break;
}
case BuiltinCallJumpMode::kPCRelative:
@@ -4467,7 +4454,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
Register temp = t9;
@@ -4484,10 +4471,9 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
break;
}
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
IndirectLoadConstant(temp, code);
- Daddu(temp, temp, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(temp);
+ JumpCodeObject(temp);
break;
}
case BuiltinCallJumpMode::kPCRelative:
@@ -4495,7 +4481,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::PatchAndJump(Address target) {
+void MacroAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips64r6) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -4515,14 +4501,14 @@ void TurboAssembler::PatchAndJump(Address target) {
}
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
// Compute the return address in lr to return to after the jump below. The pc
// is already at '+ 8' from the current instruction; but return is after three
@@ -4557,12 +4543,12 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
}
-void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
+void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
}
-void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
@@ -4586,7 +4572,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
}
}
-void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT && (is_int26(offset))) {
BranchShortHelperR6(offset, nullptr);
} else {
@@ -4605,7 +4591,7 @@ void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
}
}
-void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
@@ -4625,7 +4611,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode, Register scratch) {
switch (type) {
case kCountIsInteger: {
@@ -4649,7 +4635,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
ArgumentsCountType type,
ArgumentsCountMode mode,
@@ -4665,7 +4651,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
}
}
-void TurboAssembler::DropAndRet(int drop) {
+void MacroAssembler::DropAndRet(int drop) {
int32_t drop_size = drop * kSystemPointerSize;
DCHECK(is_int31(drop_size));
@@ -4681,7 +4667,7 @@ void TurboAssembler::DropAndRet(int drop) {
}
}
-void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
+void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
@@ -4697,7 +4683,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
}
}
-void TurboAssembler::Drop(int count, Condition cond, Register reg,
+void MacroAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
@@ -4728,28 +4714,28 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
}
}
-void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void MacroAssembler::Call(Label* target) { BranchAndLink(target); }
-void TurboAssembler::LoadAddress(Register dst, Label* target) {
+void MacroAssembler::LoadAddress(Register dst, Label* target) {
uint64_t address = jump_address(target);
li(dst, address);
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
push(scratch);
}
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(handle));
push(scratch);
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order) {
DCHECK(!AreAliased(array, size, scratch, scratch2));
Label loop, entry;
@@ -4809,12 +4795,12 @@ void MacroAssembler::PopStackHandler() {
Sd(a1, MemOperand(scratch));
}
-void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
-void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, v0, v1);
@@ -4826,7 +4812,7 @@ void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
}
}
-void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, a0, a1);
@@ -4838,7 +4824,7 @@ void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
}
}
-void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f12, src);
} else {
@@ -4850,7 +4836,7 @@ void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
}
}
-void TurboAssembler::MovToFloatResult(DoubleRegister src) {
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f0, src);
} else {
@@ -4862,7 +4848,7 @@ void TurboAssembler::MovToFloatResult(DoubleRegister src) {
}
}
-void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
const DoubleRegister fparg2 = f13;
@@ -4896,10 +4882,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
}
@@ -4922,13 +4908,11 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Branch(stack_overflow, le, scratch1, Operand(scratch2));
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
- Register scratch,
- Condition cond,
- Label* target) {
- Ld(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- Lwu(scratch,
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::TestCodeIsMarkedForDeoptimizationAndJump(
+ Register code_data_container, Register scratch, Condition cond,
+ Label* target) {
+ Lhu(scratch,
+ FieldMemOperand(code_data_container, Code::kKindSpecificFlagsOffset));
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Branch(target, cond, scratch, Operand(zero_reg));
}
@@ -5072,12 +5056,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
- Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(code);
+ CallCodeObject(code);
break;
case InvokeType::kJump:
- Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(code);
+ JumpCodeObject(code);
break;
}
@@ -5145,7 +5127,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
// -----------------------------------------------------------------------------
// Runtime calls.
-void TurboAssembler::DaddOverflow(Register dst, Register left,
+void MacroAssembler::DaddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5176,7 +5158,7 @@ void TurboAssembler::DaddOverflow(Register dst, Register left,
}
}
-void TurboAssembler::DsubOverflow(Register dst, Register left,
+void MacroAssembler::DsubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5207,7 +5189,7 @@ void TurboAssembler::DsubOverflow(Register dst, Register left,
}
}
-void TurboAssembler::MulOverflow(Register dst, Register left,
+void MacroAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5237,7 +5219,7 @@ void TurboAssembler::MulOverflow(Register dst, Register left,
xor_(overflow, overflow, scratch);
}
-void TurboAssembler::DMulOverflow(Register dst, Register left,
+void MacroAssembler::DMulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5267,8 +5249,8 @@ void TurboAssembler::DMulOverflow(Register dst, Register left,
xor_(overflow, overflow, scratch);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// All parameters are on the stack. v0 has the return value after call.
@@ -5283,8 +5265,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -5302,16 +5283,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
@@ -5354,10 +5330,10 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
@@ -5366,7 +5342,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -5423,7 +5399,7 @@ void TurboAssembler::Abort(AbortReason reason) {
}
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
+void MacroAssembler::LoadMap(Register destination, Register object) {
Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
@@ -5434,16 +5410,16 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
Ld(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
}
-void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+void MacroAssembler::Prologue() { PushStandardFrame(a1); }
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Push(ra, fp);
@@ -5458,14 +5434,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
daddiu(sp, fp, 2 * kPointerSize);
Ld(ra, MemOperand(fp, 1 * kPointerSize));
Ld(fp, MemOperand(fp, 0 * kPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
@@ -5514,17 +5490,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- if (save_doubles) {
- // The stack is already aligned to 0 modulo 8 for stores with sdc1.
- int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
- int space = kNumOfSavedRegisters * kDoubleSize;
- Dsubu(sp, sp, Operand(space));
- // Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < kNumOfSavedRegisters; i++) {
- FPURegister reg = FPURegister::from_code(2 * i);
- Sdc1(reg, MemOperand(sp, i * kDoubleSize));
- }
- }
// Reserve place for the return address, stack space and an optional slot
// (used by DirectCEntry to hold the return value if a struct is
@@ -5544,23 +5509,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool do_return,
+void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return,
bool argument_count_is_length) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- // Optionally restore all double registers.
- if (save_doubles) {
- // Remember: we only need to restore every 2nd double FPU value.
- int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
- Dsubu(t8, fp,
- Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumOfSavedRegisters * kDoubleSize));
- for (int i = 0; i < kNumOfSavedRegisters; i++) {
- FPURegister reg = FPURegister::from_code(2 * i);
- Ldc1(reg, MemOperand(t8, i * kDoubleSize));
- }
- }
// Clear top frame.
li(t8,
@@ -5599,7 +5551,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
daddiu(sp, sp, 2 * kPointerSize);
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -5615,7 +5567,7 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_MIPS
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) {
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
@@ -5625,7 +5577,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
+void MacroAssembler::JumpIfSmi(Register value, Label* smi_label,
BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
@@ -5645,12 +5597,12 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (v8_flags.debug_code) Check(cc, reason, rs, rt);
}
-void TurboAssembler::AssertNotSmi(Register object) {
+void MacroAssembler::AssertNotSmi(Register object) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -5661,7 +5613,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
}
}
-void TurboAssembler::AssertSmi(Register object) {
+void MacroAssembler::AssertSmi(Register object) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -5791,7 +5743,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
#endif // V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -5837,12 +5789,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
}
}
-void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
-void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -5888,12 +5840,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
}
}
-void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
-void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -5938,12 +5890,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
}
}
-void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_d(dst, src1, src2);
}
-void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
ASM_CODE_COMMENT(this);
if (src1 == src2) {
@@ -5988,14 +5940,14 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
}
}
-void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_d(dst, src1, src2);
}
static const int kRegisterPassedArguments = 8;
-int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
int num_args = num_reg_arguments + num_double_arguments;
@@ -6008,7 +5960,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
ASM_CODE_COMMENT(this);
@@ -6036,38 +5988,44 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t9, function);
- CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
@@ -6102,47 +6060,57 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// stays correct.
{
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (function != t9) {
- mov(t9, function);
- function = t9;
- }
-
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- // 't' registers are caller-saved so this is safe as a scratch register.
- Register pc_scratch = t1;
- Register scratch = t2;
- DCHECK(!AreAliased(pc_scratch, scratch, function));
-
- mov(scratch, ra);
- nal();
- mov(pc_scratch, ra);
- mov(ra, scratch);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ if (function != t9) {
+ mov(t9, function);
+ function = t9;
+ }
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- Sd(pc_scratch, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- Sd(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- Sd(pc_scratch, MemOperand(scratch));
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Sd(fp, MemOperand(scratch));
+ // Save the frame pointer and PC so that the stack layout remains
+ // iterable, even without an ExitFrame which normally exists between JS
+ // and C frames. 't' registers are caller-saved so this is safe as a
+ // scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ mov(scratch, ra);
+ nal();
+ mov(pc_scratch, ra);
+ mov(ra, scratch);
+
+ // See x64 code for reasoning about how to address the isolate data
+ // fields.
+ if (root_array_available()) {
+ Sd(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ Sd(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Sd(pc_scratch, MemOperand(scratch));
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(fp, MemOperand(scratch));
+ }
}
Call(function);
- // We don't unset the PC; the FP is the source of truth.
- if (root_array_available()) {
- Sd(zero_reg, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Sd(zero_reg, MemOperand(scratch));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ Sd(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register scratch = t2;
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(zero_reg, MemOperand(scratch));
+ }
}
int stack_passed_arguments =
@@ -6160,7 +6128,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
ASM_CODE_COMMENT(this);
And(scratch, object, Operand(~kPageAlignmentMask));
@@ -6184,7 +6152,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing the code start address.
push(ra);
@@ -6204,7 +6172,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -6217,62 +6185,25 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
+void MacroAssembler::LoadCodeEntry(Register destination,
+ Register code_data_container_object) {
ASM_CODE_COMMENT(this);
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- Register scratch = kScratchReg;
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
-
- // Not an off-heap trampoline object, the entry point is at
- // Code::raw_instruction_start().
- Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- Branch(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- Dlsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
- Ld(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- }
-}
-
-void TurboAssembler::CallCodeObject(Register code_object) {
+ Ld(destination,
+ FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset));
+}
+
+void MacroAssembler::CallCodeObject(Register code_data_container_object) {
ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
- Call(code_object);
+ LoadCodeEntry(code_data_container_object, code_data_container_object);
+ Call(code_data_container_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_data_container_object,
+ JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
- Jump(code_object);
+ LoadCodeEntry(code_data_container_object, code_data_container_object);
+ Jump(code_data_container_object);
}
namespace {
@@ -6298,8 +6229,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1,
- ne, &heal_optimized_code_slot);
+ __ TestCodeIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1,
+ ne, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -6309,8 +6240,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Daddu(a2, optimized_code_entry,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -6370,7 +6300,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ LoadCodeEntry(a2, v0);
Jump(a2);
}
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 77897e1efd..2494edb217 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -135,7 +135,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, AbortReason reason, Register rs,
- Operand rt) NOOP_UNLESS_DEBUG_CODE
+ Operand rt) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
@@ -263,9 +263,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
- void CallCodeObject(Register code_object);
- void JumpCodeObject(Register code_object,
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_data_container_object);
+ void CallCodeObject(Register code_data_container_object);
+ void JumpCodeObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
@@ -536,8 +537,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -564,12 +565,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
void MovFromFloatResult(DoubleRegister dst);
void MovFromFloatParameter(DoubleRegister dst);
@@ -906,79 +918,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
- protected:
- inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
- inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
-
- private:
- bool has_double_zero_reg_set_ = false;
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
-
- void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
- FPURegister cmp2);
-
- void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
- FPURegister cmp2);
-
- void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
- MSARegister wt, BranchDelaySlot bd = PROTECT);
-
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-
- // TODO(mips) Reorder parameters so out parameters come last.
- bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
- Register* scratch, const Operand& rt);
-
- void BranchShortHelperR6(int32_t offset, Label* L);
- void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
- bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bdslot);
- bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bdslot);
-
- void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
- void BranchAndLinkShortHelper(int16_t offset, Label* L,
- BranchDelaySlot bdslot);
- void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot);
- bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot);
- void BranchLong(Label* L, BranchDelaySlot bdslot);
- void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
-
- template <typename RoundFunc>
- void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
- RoundFunc round);
-
- template <typename RoundFunc>
- void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
- RoundFunc round);
-
- // Push a fixed frame, consisting of ra, fp.
- void PushCommonFrame(Register marker_reg = no_reg);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@@ -997,9 +936,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
- void TestCodeTIsMarkedForDeoptimizationAndJump(Register codet,
- Register scratch,
- Condition cond, Label* target);
+ void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container,
+ Register scratch,
+ Condition cond, Label* target);
Operand ClearedValue() const;
void PushRoot(RootIndex index) {
@@ -1080,23 +1019,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
- void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
- MSARegister wt, BranchDelaySlot bd = PROTECT);
-
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
- // save_doubles - saves FPU registers on stack, currently disabled.
// stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles, int stack_space = 0,
- StackFrame::Type frame_type = StackFrame::EXIT);
+ void EnterExitFrame(int stack_space, StackFrame::Type frame_type);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles, Register arg_count,
- bool do_return = NO_EMIT_RETURN,
+ void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
// Make sure the stack is aligned. Only emits code in debug mode.
- void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
+ void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE;
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -1152,20 +1085,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls.
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -1176,9 +1106,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
BranchDelaySlot bd = PROTECT,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -1222,32 +1149,32 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
// ---------------------------------------------------------------------------
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register scratch2);
@@ -1268,17 +1195,84 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
+ protected:
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
private:
+ bool has_double_zero_reg_set_ = false;
+
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
InvokeType type);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2);
+
+ void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+ FPURegister cmp2);
+
+ void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
+ MSARegister wt, BranchDelaySlot bd = PROTECT);
+
+ void CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+
+ // TODO(mips) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
+
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
+
+ template <typename RoundFunc>
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
+ RoundFunc round);
+
+ template <typename RoundFunc>
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
+ RoundFunc round);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
template <typename Func>
-void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 00feb1c01c..f3abb8fbec 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -295,7 +295,6 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 85fea1c902..8690b0f7df 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -23,7 +23,8 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind, BytecodeOffset osr_offset)
- : code_kind_(code_kind),
+ : isolate_unsafe_(isolate),
+ code_kind_(code_kind),
osr_offset_(osr_offset),
zone_(zone),
optimization_id_(isolate->NextOptimizationId()) {
@@ -51,13 +52,15 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
OptimizedCompilationInfo::OptimizedCompilationInfo(
base::Vector<const char> debug_name, Zone* zone, CodeKind code_kind)
- : code_kind_(code_kind),
+ : isolate_unsafe_(nullptr),
+ code_kind_(code_kind),
zone_(zone),
optimization_id_(kNoOptimizationId),
debug_name_(debug_name) {
SetTracingFlags(
PassesFilter(debug_name, base::CStrVector(v8_flags.trace_turbo_filter)));
ConfigureFlags();
+ DCHECK(!has_shared_info());
}
void OptimizedCompilationInfo::ConfigureFlags() {
@@ -103,7 +106,8 @@ void OptimizedCompilationInfo::ConfigureFlags() {
OptimizedCompilationInfo::~OptimizedCompilationInfo() {
if (disable_future_optimization() && has_shared_info()) {
- shared_info()->DisableOptimization(bailout_reason());
+ DCHECK_NOT_NULL(isolate_unsafe_);
+ shared_info()->DisableOptimization(isolate_unsafe_, bailout_reason());
}
}
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 560db8484c..5a9c4a2491 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -250,6 +250,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Compilation flags.
unsigned flags_ = 0;
+ // Take care when accessing this on any background thread.
+ Isolate* const isolate_unsafe_;
+
const CodeKind code_kind_;
Builtin builtin_ = Builtin::kNoBuiltinId;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index 898f132520..4bead935f3 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -80,7 +80,7 @@ Address RelocInfo::target_internal_reference_address() {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsWasmCall(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -148,7 +148,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc,
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
- return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
+ return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
@@ -183,15 +183,15 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_,
- V8HeapCompressionScheme::CompressTagged(target.ptr()),
+ V8HeapCompressionScheme::CompressObject(target.ptr()),
icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 9047c47086..a6bc4bc7ff 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -246,13 +246,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
// Emit constant pool if necessary.
int constant_pool_size = EmitConstantPool();
@@ -480,7 +480,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Load the position of the label relative to the generated code object
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
- int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ int32_t offset =
+ target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
PatchingAssembler patcher(
options(), reinterpret_cast<byte*>(buffer_start_ + pos), 2);
patcher.bitwise_mov32(dst, offset);
@@ -1511,7 +1512,8 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
int position = link(label);
if (label->is_bound()) {
// Load the position of the label relative to the generated code object.
- mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
+ mov(dst,
+ Operand(position + InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
// Encode internal reference to unbound label. We use a dummy opcode
// such that it won't collide with any opcode that might appear in the
@@ -2204,7 +2206,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
- RelocInfo rinfo(pc, rmode, it->data(), Code());
+ RelocInfo rinfo(pc, rmode, it->data(), Code(), InstructionStream());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 21a439a85e..6cff641b5b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -621,7 +621,7 @@ class Assembler : public AssemblerBase {
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
- // Code generation
+ // InstructionStream generation
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -1383,7 +1383,7 @@ class Assembler : public AssemblerBase {
bool is_trampoline_emitted() const { return trampoline_emitted_; }
- // Code generation
+ // InstructionStream generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -1570,7 +1570,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private:
friend class Assembler;
- friend class TurboAssembler;
+ friend class MacroAssembler;
Assembler* assembler_;
RegList old_available_;
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 80364f48aa..6ddb3da411 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -81,9 +81,8 @@ const int kNoRegister = -1;
const int kLoadPtrMaxReachBits = 15;
const int kLoadDoubleMaxReachBits = 15;
-// Actual value of root register is offset from the root array's start
+// The actual value of the kRootRegister is offset from the IsolateData's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 128;
// sign-extend the least significant 5-bits of value <imm>
@@ -129,9 +128,73 @@ enum Condition {
ordered = 7,
overflow = 8, // Summary overflow
nooverflow = 9,
- al = 10 // Always.
+ al = 10, // Always.
+
+ // Unified cross-platform condition names/aliases.
+ // Do not set unsigned constants equal to their signed variants.
+ // We need to be able to differentiate between signed and unsigned enum
+ // constants in order to emit the right instructions (i.e CmpS64 vs CmpU64).
+ kEqual = eq,
+ kNotEqual = ne,
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+ kUnsignedLessThan = 11,
+ kUnsignedGreaterThan = 12,
+ kUnsignedLessThanEqual = 13,
+ kUnsignedGreaterThanEqual = 14,
+ kOverflow = overflow,
+ kNoOverflow = nooverflow,
+ kZero = 15,
+ kNotZero = 16,
};
+inline Condition to_condition(Condition cond) {
+ switch (cond) {
+ case kUnsignedLessThan:
+ return lt;
+ case kUnsignedGreaterThan:
+ return gt;
+ case kUnsignedLessThanEqual:
+ return le;
+ case kUnsignedGreaterThanEqual:
+ return ge;
+ case kZero:
+ return eq;
+ case kNotZero:
+ return ne;
+ default:
+ break;
+ }
+ return cond;
+}
+
+inline bool is_signed(Condition cond) {
+ switch (cond) {
+ case kEqual:
+ case kNotEqual:
+ case kLessThan:
+ case kGreaterThan:
+ case kLessThanEqual:
+ case kGreaterThanEqual:
+ case kOverflow:
+ case kNoOverflow:
+ case kZero:
+ case kNotZero:
+ return true;
+
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThan:
+ case kUnsignedLessThanEqual:
+ case kUnsignedGreaterThanEqual:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
@@ -1965,6 +2028,8 @@ using Instr = uint32_t;
V(vmladduhm, VMLADDUHM, 0x10000022) \
/* Vector Select */ \
V(vsel, VSEL, 0x1000002A) \
+ /* Vector Multiply-Sum Mixed Byte Modulo */ \
+ V(vmsummbm, VMSUMMBM, 0x10000025) \
/* Vector Multiply-Sum Signed Halfword Modulo */ \
V(vmsumshm, VMSUMSHM, 0x10000028) \
/* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
@@ -1979,8 +2044,6 @@ using Instr = uint32_t;
V(vmaddfp, VMADDFP, 0x1000002E) \
/* Vector Multiply-High-Add Signed Halfword Saturate */ \
V(vmhaddshs, VMHADDSHS, 0x10000020) \
- /* Vector Multiply-Sum Mixed Byte Modulo */ \
- V(vmsummbm, VMSUMMBM, 0x10000025) \
/* Vector Multiply-Sum Signed Halfword Saturate */ \
V(vmsumshs, VMSUMSHS, 0x10000029) \
/* Vector Multiply-Sum Unsigned Byte Modulo */ \
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
index b7b1ea515a..a60716648c 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -100,6 +100,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return r7; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return r8; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return r8; }
// static
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 5e52d4f9ac..c60b26f307 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes =
} // namespace
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -72,7 +72,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
Register scratch2, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
@@ -91,7 +91,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
Register scratch2, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
@@ -109,29 +109,28 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
return bytes;
}
-void TurboAssembler::Jump(Register target) {
+void MacroAssembler::Jump(Register target) {
mtctr(target);
bctr();
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
DCHECK_NE(destination, r0);
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
- destination,
- FieldMemOperand(destination,
- FixedArray::OffsetOfElementAt(constant_index)),
- r0);
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)),
+ r0);
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
LoadU64(destination, MemOperand(kRootRegister, offset), r0);
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
mr(destination, kRootRegister);
@@ -140,7 +139,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -170,7 +169,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
Label skip;
@@ -183,13 +182,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
CRegister cr) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond, cr);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -204,7 +203,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(target_index), rmode, cond, cr);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Move(scratch, reference);
@@ -218,7 +217,7 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
Jump(scratch);
}
-void TurboAssembler::Call(Register target) {
+void MacroAssembler::Call(Register target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// branch via link register and set LK bit for return point
mtctr(target);
@@ -236,7 +235,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(cond == al);
@@ -252,7 +251,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
bctrl();
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -264,12 +263,11 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
CallBuiltin(builtin, cond);
return;
}
- DCHECK(code->IsExecutable());
int32_t target_index = AddCodeTarget(code);
Call(static_cast<Address>(target_index), rmode, cond);
}
-void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
@@ -310,7 +308,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond,
+void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond,
CRegister cr) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
@@ -353,27 +351,22 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond,
}
}
-void TurboAssembler::Drop(int count) {
+void MacroAssembler::Drop(int count) {
if (count > 0) {
AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
}
}
-void TurboAssembler::Drop(Register count, Register scratch) {
+void MacroAssembler::Drop(Register count, Register scratch) {
ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
add(sp, sp, scratch);
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
- Register scratch1,
- Register scratch2) {
- LoadTaggedPointerField(scratch1,
- FieldMemOperand(codet, Code::kCodeDataContainerOffset),
- scratch2);
- LoadS32(
- scratch1,
- FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
- scratch2);
+void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
+ Register scratch1,
+ Register scratch2) {
+ LoadU16(scratch1, FieldMemOperand(code, Code::kKindSpecificFlagsOffset),
+ scratch2);
TestBit(scratch1, Code::kMarkedForDeoptimizationBit, scratch2);
}
@@ -382,19 +375,19 @@ Operand MacroAssembler::ClearedValue() const {
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
-void TurboAssembler::Call(Label* target) { b(target, SetLK); }
+void MacroAssembler::Call(Label* target) { b(target, SetLK); }
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
mov(r0, Operand(smi));
push(r0);
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order) {
Label loop, done;
@@ -426,7 +419,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
+void MacroAssembler::Move(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
@@ -444,7 +437,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
}
}
-void TurboAssembler::Move(Register dst, ExternalReference reference) {
+void MacroAssembler::Move(Register dst, ExternalReference reference) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -455,20 +448,20 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) {
mov(dst, Operand(reference));
}
-void TurboAssembler::Move(Register dst, Register src, Condition cond) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
DCHECK(cond == al);
if (dst != src) {
mr(dst, src);
}
}
-void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (dst != src) {
fmr(dst, src);
}
}
-void TurboAssembler::MultiPush(RegList regs, Register location) {
+void MacroAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
@@ -481,7 +474,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
}
}
-void TurboAssembler::MultiPop(RegList regs, Register location) {
+void MacroAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
@@ -493,7 +486,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
+void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -507,7 +500,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch,
+void MacroAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch,
Register location) {
int16_t num_to_push = simd_regs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
@@ -522,7 +515,7 @@ void TurboAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch,
}
}
-void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
+void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
@@ -535,7 +528,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch,
+void MacroAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch,
Register location) {
int16_t stack_offset = 0;
@@ -549,7 +542,7 @@ void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch,
addi(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs,
+void MacroAssembler::MultiPushF64AndV128(DoubleRegList dregs,
Simd128RegList simd_regs,
Register scratch1, Register scratch2,
Register location) {
@@ -586,7 +579,7 @@ void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs,
#endif
}
-void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs,
+void MacroAssembler::MultiPopF64AndV128(DoubleRegList dregs,
Simd128RegList simd_regs,
Register scratch1, Register scratch2,
Register location) {
@@ -617,34 +610,37 @@ void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs,
MultiPopDoubles(dregs);
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
+void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
+ ASM_CODE_COMMENT(this);
+ if (CanBeImmediate(index)) {
+ mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO));
+ return;
+ }
+ LoadRoot(destination, index);
+}
+
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
+ if (CanBeImmediate(index)) {
+ DecompressTagged(destination, ReadOnlyRootPtr(index));
+ return;
+ }
LoadU64(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
-void TurboAssembler::LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch) {
+void MacroAssembler::LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
LoadU64(destination, field_operand, scratch);
}
}
-void TurboAssembler::LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch) {
- if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
- } else {
- LoadU64(destination, field_operand, scratch);
- }
-}
-
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
Register scratch) {
if (SmiValuesAre31Bits()) {
LoadU32(dst, src, scratch);
@@ -655,7 +651,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
SmiUntag(dst, rc);
}
-void TurboAssembler::StoreTaggedField(const Register& value,
+void MacroAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
@@ -667,53 +663,43 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
Register src) {
RecordComment("[ DecompressTaggedSigned");
ZeroExtWord32(destination, src);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
LoadU32(destination, field_operand, r0);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- Register source) {
- RecordComment("[ DecompressTaggedPointer");
+void MacroAssembler::DecompressTagged(Register destination, Register source) {
+ RecordComment("[ DecompressTagged");
ZeroExtWord32(destination, source);
- add(destination, destination, kRootRegister);
+ add(destination, destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- MemOperand field_operand) {
- RecordComment("[ DecompressTaggedPointer");
+void MacroAssembler::DecompressTagged(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTagged");
LoadU32(destination, field_operand, r0);
- add(destination, destination, kRootRegister);
+ add(destination, destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
-void TurboAssembler::DecompressAnyTagged(Register destination,
- MemOperand field_operand) {
- RecordComment("[ DecompressAnyTagged");
- LoadU32(destination, field_operand, r0);
- add(destination, destination, kRootRegister);
- RecordComment("]");
-}
-
-void TurboAssembler::DecompressAnyTagged(Register destination,
- Register source) {
- RecordComment("[ DecompressAnyTagged");
- ZeroExtWord32(destination, source);
- add(destination, destination, kRootRegister);
- RecordComment("]");
+void MacroAssembler::DecompressTagged(const Register& destination,
+ Tagged_t immediate) {
+ ASM_CODE_COMMENT(this);
+ AddS64(destination, kPtrComprCageBaseRegister,
+ Operand(immediate, RelocInfo::Mode::NO_INFO));
}
-void TurboAssembler::LoadTaggedSignedField(Register destination,
+void MacroAssembler::LoadTaggedSignedField(Register destination,
MemOperand field_operand,
Register scratch) {
if (COMPRESS_POINTERS_BOOL) {
@@ -762,17 +748,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, slot_address));
@@ -795,7 +781,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -818,7 +804,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -847,7 +833,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
DCHECK(!AreAliased(object, value, slot_address));
if (v8_flags.debug_code) {
- LoadTaggedPointerField(r0, MemOperand(slot_address));
+ LoadTaggedField(r0, MemOperand(slot_address));
CmpS64(r0, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -866,8 +852,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, &done);
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
@@ -895,7 +880,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
mflr(r0);
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
@@ -918,7 +903,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
mflr(r0);
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
@@ -942,7 +927,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(kJavaScriptCallArgCountRegister);
}
-void TurboAssembler::RestoreFrameStateForTailCall() {
+void MacroAssembler::RestoreFrameStateForTailCall() {
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
LoadU64(kConstantPoolRegister,
MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -953,61 +938,61 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
mtlr(r0);
}
-void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
fsub(dst, src, kDoubleRegZero);
}
-void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
MovIntToDouble(dst, src, r0);
fcfid(dst, dst);
}
-void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
+void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
DoubleRegister dst) {
MovUnsignedIntToDouble(dst, src, r0);
fcfid(dst, dst);
}
-void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
MovIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
-void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
+void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
DoubleRegister dst) {
MovUnsignedIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
#if V8_TARGET_ARCH_PPC64
-void TurboAssembler::ConvertInt64ToDouble(Register src,
+void MacroAssembler::ConvertInt64ToDouble(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfid(double_dst, double_dst);
}
-void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
+void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfidus(double_dst, double_dst);
}
-void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
+void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfidu(double_dst, double_dst);
}
-void TurboAssembler::ConvertInt64ToFloat(Register src,
+void MacroAssembler::ConvertInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfids(double_dst, double_dst);
}
#endif
-void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
+void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_PPC64
const Register dst_hi,
#endif
@@ -1030,7 +1015,7 @@ void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
}
#if V8_TARGET_ARCH_PPC64
-void TurboAssembler::ConvertDoubleToUnsignedInt64(
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
if (rounding_mode == kRoundToZero) {
@@ -1046,7 +1031,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
#endif
#if !V8_TARGET_ARCH_PPC64
-void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1071,7 +1056,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1093,7 +1078,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1118,7 +1103,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1140,7 +1125,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high, shift));
@@ -1164,7 +1149,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1187,36 +1172,40 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
}
#endif
-void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
// Builtins do not use the constant pool (see is_constant_pool_available).
- static_assert(Code::kOnHeapBodyIsContiguous);
-
- lwz(r0, MemOperand(code_target_address,
- Code::kInstructionSizeOffset - Code::kHeaderSize));
- lwz(kConstantPoolRegister,
- MemOperand(code_target_address,
- Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
- add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
- add(kConstantPoolRegister, kConstantPoolRegister, r0);
-}
-
-void TurboAssembler::LoadPC(Register dst) {
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
+
+ // TODO(miladfarca): Pass in scratch registers.
+ LoadU64(ip, FieldMemOperand(code_target_address, Code::kCodeEntryPointOffset),
+ r0);
+ LoadU32(r0,
+ FieldMemOperand(code_target_address, Code::kInstructionSizeOffset),
+ r0);
+ add(ip, r0, ip);
+ LoadU32(kConstantPoolRegister,
+ FieldMemOperand(code_target_address, Code::kConstantPoolOffsetOffset),
+ r0);
+ add(kConstantPoolRegister, ip, kConstantPoolRegister);
+}
+
+void MacroAssembler::LoadPC(Register dst) {
b(4, SetLK);
mflr(dst);
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
mflr(r0);
LoadPC(dst);
subi(dst, dst, Operand(pc_offset() - kInstrSize));
mtlr(r0);
}
-void TurboAssembler::LoadConstantPoolPointerRegister() {
+void MacroAssembler::LoadConstantPoolPointerRegister() {
//
// Builtins do not use the constant pool (see is_constant_pool_available).
- static_assert(Code::kOnHeapBodyIsContiguous);
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
LoadPC(kConstantPoolRegister);
int32_t delta = -pc_offset() + 4;
@@ -1224,7 +1213,7 @@ void TurboAssembler::LoadConstantPoolPointerRegister() {
ConstantPoolPosition(), delta);
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
mov(r11, Operand(StackFrame::TypeToMarker(type)));
@@ -1236,7 +1225,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
}
}
-void TurboAssembler::Prologue() {
+void MacroAssembler::Prologue() {
PushStandardFrame(r4);
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
// base contains prologue address
@@ -1245,7 +1234,7 @@ void TurboAssembler::Prologue() {
}
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@@ -1271,7 +1260,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
ArgumentsCountType type,
ArgumentsCountMode mode) {
@@ -1286,7 +1275,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
}
}
-void TurboAssembler::EnterFrame(StackFrame::Type type,
+void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && load_constant_pool_pointer_reg) {
// Push type explicitly so we can leverage the constant pool.
@@ -1310,7 +1299,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
#endif // V8_ENABLE_WEBASSEMBLY
}
-int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// r3: preserved
// r4: preserved
@@ -1349,7 +1338,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// in the fp register (r31)
// Then - we buy a new frame
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -1385,15 +1374,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreU64(cp, MemOperand(r8));
- // Optionally save all volatile double registers.
- if (save_doubles) {
- MultiPushDoubles(kCallerSavedDoubles);
- // Note that d0 will be accessible at
- // fp - ExitFrameConstants::kFrameSize -
- // kNumCallerSavedDoubles * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
AddS64(sp, sp, Operand(-stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
@@ -1415,7 +1395,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1431,18 +1411,9 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+void MacroAssembler::LeaveExitFrame(Register argument_count,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
- // Optionally restore all double registers.
- if (save_doubles) {
- // Calculate the stack location of the saved doubles and restore them.
- const int kNumRegs = kNumCallerSavedDoubles;
- const int offset =
- (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
- AddS64(r6, fp, Operand(-offset), r0);
- MultiPopDoubles(kCallerSavedDoubles, r6);
- }
// Clear top frame.
li(r6, Operand::Zero());
@@ -1474,11 +1445,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d1);
}
-void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d1);
}
@@ -1489,10 +1460,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
LoadU64(destination, MemOperand(kRootRegister, offset), r0);
}
@@ -1647,8 +1618,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(
- code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -1675,10 +1645,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_reg = r5;
Register temp_reg = r7;
- LoadTaggedPointerField(
+ LoadTaggedField(
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
- LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
- r0);
+ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
LoadU16(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1698,8 +1667,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r4);
// Get the function and setup the context.
- LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
- r0);
+ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -1777,11 +1745,15 @@ void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
DCHECK(obj != r0);
- LoadRoot(r0, index);
+ if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
+ LoadTaggedRoot(r0, index);
+ } else {
+ LoadRoot(r0, index);
+ }
CmpS64(obj, r0);
}
-void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -1813,7 +1785,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
}
-void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
intptr_t right,
Register overflow_dst,
Register scratch) {
@@ -1838,7 +1810,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
}
}
-void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
+void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -1869,7 +1841,7 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
}
-void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, DoubleRegister scratch) {
Label check_zero, return_left, return_right, return_nan, done;
fcmpu(lhs, rhs);
@@ -1917,7 +1889,7 @@ void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
bind(&done);
}
-void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, DoubleRegister scratch) {
Label check_zero, return_left, return_right, return_nan, done;
fcmpu(lhs, rhs);
@@ -1963,7 +1935,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
ble(on_in_range);
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -1996,7 +1968,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
bind(&done);
}
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister double_scratch = kScratchDoubleReg;
@@ -2043,8 +2015,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// runtime to clear it.
{
UseScratchRegisterScope temps(masm);
- __ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire(),
- scratch);
+ __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire(),
+ scratch);
__ bne(&heal_optimized_code_slot, cr0);
}
@@ -2053,7 +2025,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
r8);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadCodeObjectEntry(r5, optimized_code_entry);
+ __ LoadCodeEntry(r5, optimized_code_entry);
__ Jump(r5);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -2161,15 +2133,15 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- LoadAnyTaggedField(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset),
- r0);
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset),
+ r0);
TailCallOptimizedCodeSlot(this, optimized_code_entry, r9);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r3 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -2184,10 +2156,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
mov(r3, Operand(num_arguments));
Move(r4, ExternalReference::Create(f));
#if V8_TARGET_ARCH_PPC64
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
#else
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1);
#endif
Call(code, RelocInfo::CODE_TARGET);
}
@@ -2204,16 +2175,11 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r4, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
CmpS32(in, Operand(kClearedWeakHeapObjectLower32), r0);
@@ -2253,7 +2219,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
+void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L, cr);
Abort(reason);
@@ -2261,7 +2227,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -2305,26 +2271,26 @@ void TurboAssembler::Abort(AbortReason reason) {
// will not return here
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
- LoadTaggedPointerField(destination,
- FieldMemOperand(object, HeapObject::kMapOffset), r0);
+void MacroAssembler::LoadMap(Register destination, Register object) {
+ LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset),
+ r0);
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst,
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
r0);
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
}
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
+void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (v8_flags.debug_code) Check(cond, reason, cr);
}
-void TurboAssembler::AssertNotSmi(Register object) {
+void MacroAssembler::AssertNotSmi(Register object) {
if (v8_flags.debug_code) {
static_assert(kSmiTag == 0);
TestIfSmi(object, r0);
@@ -2332,7 +2298,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
}
}
-void TurboAssembler::AssertSmi(Register object) {
+void MacroAssembler::AssertSmi(Register object) {
if (v8_flags.debug_code) {
static_assert(kSmiTag == 0);
TestIfSmi(object, r0);
@@ -2430,7 +2396,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
static const int kRegisterPassedArguments = 8;
-int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
@@ -2444,7 +2410,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -2473,16 +2439,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
StoreU64WithUpdate(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
-void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
-void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2 == d1) {
DCHECK(src1 != d2);
@@ -2494,68 +2460,75 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots,
bool has_function_descriptor) {
Move(ip, function);
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments,
- has_function_descriptor);
+ set_isolate_data_slots, has_function_descriptor);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots,
bool has_function_descriptor) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
- has_function_descriptor);
+ set_isolate_data_slots, has_function_descriptor);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots,
bool has_function_descriptor) {
- CallCFunction(function, num_arguments, 0, has_function_descriptor);
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
+ has_function_descriptor);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments,
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots,
bool has_function_descriptor) {
- CallCFunction(function, num_arguments, 0, has_function_descriptor);
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
+ has_function_descriptor);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments,
- bool has_function_descriptor) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots, bool has_function_descriptor) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- Register addr_scratch = r7;
- Register scratch = r8;
- Push(scratch);
- mflr(scratch);
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- LoadPC(r0);
- StoreU64(r0, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- StoreU64(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Push(addr_scratch);
-
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_pc_address(isolate()));
- LoadPC(r0);
- StoreU64(r0, MemOperand(addr_scratch));
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreU64(fp, MemOperand(addr_scratch));
- Pop(addr_scratch);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ Register scratch = r8;
+ Push(scratch);
+ mflr(scratch);
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ LoadPC(r0);
+ StoreU64(r0, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreU64(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r7;
+ Push(addr_scratch);
+
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ LoadPC(r0);
+ StoreU64(r0, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreU64(fp, MemOperand(addr_scratch));
+ Pop(addr_scratch);
+ }
+ mtlr(scratch);
+ Pop(scratch);
}
- mtlr(scratch);
- Pop(scratch);
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
@@ -2576,21 +2549,24 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
- // We don't unset the PC; the FP is the source of truth.
- Register zero_scratch = r0;
- mov(zero_scratch, Operand::Zero());
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r0;
+ mov(zero_scratch, Operand::Zero());
- if (root_array_available()) {
- StoreU64(
- zero_scratch,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Push(addr_scratch);
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreU64(zero_scratch, MemOperand(addr_scratch));
- Pop(addr_scratch);
+ if (root_array_available()) {
+ StoreU64(zero_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r7;
+ Push(addr_scratch);
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreU64(zero_scratch, MemOperand(addr_scratch));
+ Pop(addr_scratch);
+ }
}
// Remove frame bought in PrepareCallCFunction
@@ -2604,7 +2580,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-void TurboAssembler::CheckPageFlag(
+void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
@@ -2624,9 +2600,9 @@ void TurboAssembler::CheckPageFlag(
}
}
-void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
+void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
-void TurboAssembler::ResetRoundingMode() {
+void MacroAssembler::ResetRoundingMode() {
mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
}
@@ -2635,15 +2611,15 @@ void TurboAssembler::ResetRoundingMode() {
// New MacroAssembler Interfaces added for PPC
//
////////////////////////////////////////////////////////////////////////////////
-void TurboAssembler::LoadIntLiteral(Register dst, int value) {
+void MacroAssembler::LoadIntLiteral(Register dst, int value) {
mov(dst, Operand(value));
}
-void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) {
mov(dst, Operand(smi));
}
-void TurboAssembler::LoadDoubleLiteral(DoubleRegister result,
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result,
base::Double value, Register scratch) {
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && is_constant_pool_available() &&
!(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
@@ -2692,7 +2668,7 @@ void TurboAssembler::LoadDoubleLiteral(DoubleRegister result,
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
+void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// sign-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
@@ -2717,7 +2693,7 @@ void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// zero-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
@@ -2742,7 +2718,7 @@ void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
#if !V8_TARGET_ARCH_PPC64
Register src_hi,
#endif
@@ -2767,7 +2743,7 @@ void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
}
#if V8_TARGET_ARCH_PPC64
-void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
+void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_hi,
Register src_lo,
Register scratch) {
@@ -2787,7 +2763,7 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
}
#endif
-void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
+void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
@@ -2806,7 +2782,7 @@ void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
+void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
@@ -2825,7 +2801,7 @@ void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
+void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprwz(dst, src);
@@ -2840,7 +2816,7 @@ void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
+void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
mffprd(dst, src);
@@ -2856,7 +2832,7 @@ void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovDoubleToInt64(
+void MacroAssembler::MovDoubleToInt64(
#if !V8_TARGET_ARCH_PPC64
Register dst_hi,
#endif
@@ -2880,7 +2856,7 @@ void TurboAssembler::MovDoubleToInt64(
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src,
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src,
Register scratch) {
if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
ShiftLeftU64(scratch, src, Operand(32));
@@ -2895,7 +2871,7 @@ void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kFloatSize));
}
-void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src,
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src,
DoubleRegister scratch) {
if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
xscvdpspn(scratch, src);
@@ -2909,12 +2885,12 @@ void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src,
addi(sp, sp, Operand(kFloatSize));
}
-void TurboAssembler::AddS64(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::AddS64(Register dst, Register src, Register value, OEBit s,
RCBit r) {
add(dst, src, value, s, r);
}
-void TurboAssembler::AddS64(Register dst, Register src, const Operand& value,
+void MacroAssembler::AddS64(Register dst, Register src, const Operand& value,
Register scratch, OEBit s, RCBit r) {
if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
addi(dst, src, value);
@@ -2924,12 +2900,12 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& value,
}
}
-void TurboAssembler::SubS64(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::SubS64(Register dst, Register src, Register value, OEBit s,
RCBit r) {
sub(dst, src, value, s, r);
}
-void TurboAssembler::SubS64(Register dst, Register src, const Operand& value,
+void MacroAssembler::SubS64(Register dst, Register src, const Operand& value,
Register scratch, OEBit s, RCBit r) {
if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
subi(dst, src, value);
@@ -2939,31 +2915,31 @@ void TurboAssembler::SubS64(Register dst, Register src, const Operand& value,
}
}
-void TurboAssembler::AddS32(Register dst, Register src, Register value,
+void MacroAssembler::AddS32(Register dst, Register src, Register value,
RCBit r) {
AddS64(dst, src, value, LeaveOE, r);
extsw(dst, dst, r);
}
-void TurboAssembler::AddS32(Register dst, Register src, const Operand& value,
+void MacroAssembler::AddS32(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
AddS64(dst, src, value, scratch, LeaveOE, r);
extsw(dst, dst, r);
}
-void TurboAssembler::SubS32(Register dst, Register src, Register value,
+void MacroAssembler::SubS32(Register dst, Register src, Register value,
RCBit r) {
SubS64(dst, src, value, LeaveOE, r);
extsw(dst, dst, r);
}
-void TurboAssembler::SubS32(Register dst, Register src, const Operand& value,
+void MacroAssembler::SubS32(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
SubS64(dst, src, value, scratch, LeaveOE, r);
extsw(dst, dst, r);
}
-void TurboAssembler::MulS64(Register dst, Register src, const Operand& value,
+void MacroAssembler::MulS64(Register dst, Register src, const Operand& value,
Register scratch, OEBit s, RCBit r) {
if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
mulli(dst, src, value);
@@ -2973,45 +2949,45 @@ void TurboAssembler::MulS64(Register dst, Register src, const Operand& value,
}
}
-void TurboAssembler::MulS64(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::MulS64(Register dst, Register src, Register value, OEBit s,
RCBit r) {
mulld(dst, src, value, s, r);
}
-void TurboAssembler::MulS32(Register dst, Register src, const Operand& value,
+void MacroAssembler::MulS32(Register dst, Register src, const Operand& value,
Register scratch, OEBit s, RCBit r) {
MulS64(dst, src, value, scratch, s, r);
extsw(dst, dst, r);
}
-void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
RCBit r) {
MulS64(dst, src, value, s, r);
extsw(dst, dst, r);
}
-void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
RCBit r) {
divd(dst, src, value, s, r);
}
-void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
RCBit r) {
divdu(dst, src, value, s, r);
}
-void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
RCBit r) {
divw(dst, src, value, s, r);
extsw(dst, dst);
}
-void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
+void MacroAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
RCBit r) {
divwu(dst, src, value, s, r);
ZeroExtWord32(dst, dst);
}
-void TurboAssembler::ModS64(Register dst, Register src, Register value) {
+void MacroAssembler::ModS64(Register dst, Register src, Register value) {
if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
modsd(dst, src, value);
} else {
@@ -3024,7 +3000,7 @@ void TurboAssembler::ModS64(Register dst, Register src, Register value) {
}
}
-void TurboAssembler::ModU64(Register dst, Register src, Register value) {
+void MacroAssembler::ModU64(Register dst, Register src, Register value) {
if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
modud(dst, src, value);
} else {
@@ -3037,7 +3013,7 @@ void TurboAssembler::ModU64(Register dst, Register src, Register value) {
}
}
-void TurboAssembler::ModS32(Register dst, Register src, Register value) {
+void MacroAssembler::ModS32(Register dst, Register src, Register value) {
if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
modsw(dst, src, value);
} else {
@@ -3050,7 +3026,7 @@ void TurboAssembler::ModS32(Register dst, Register src, Register value) {
}
extsw(dst, dst);
}
-void TurboAssembler::ModU32(Register dst, Register src, Register value) {
+void MacroAssembler::ModU32(Register dst, Register src, Register value) {
if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
moduw(dst, src, value);
} else {
@@ -3064,7 +3040,7 @@ void TurboAssembler::ModU32(Register dst, Register src, Register value) {
ZeroExtWord32(dst, dst);
}
-void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
+void MacroAssembler::AndU64(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
if (is_uint16(value.immediate()) && r == SetRC) {
andi(dst, src, value);
@@ -3074,12 +3050,12 @@ void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
}
}
-void TurboAssembler::AndU64(Register dst, Register src, Register value,
+void MacroAssembler::AndU64(Register dst, Register src, Register value,
RCBit r) {
and_(dst, src, value, r);
}
-void TurboAssembler::OrU64(Register dst, Register src, const Operand& value,
+void MacroAssembler::OrU64(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
if (is_int16(value.immediate()) && r == LeaveRC) {
ori(dst, src, value);
@@ -3089,12 +3065,12 @@ void TurboAssembler::OrU64(Register dst, Register src, const Operand& value,
}
}
-void TurboAssembler::OrU64(Register dst, Register src, Register value,
+void MacroAssembler::OrU64(Register dst, Register src, Register value,
RCBit r) {
orx(dst, src, value, r);
}
-void TurboAssembler::XorU64(Register dst, Register src, const Operand& value,
+void MacroAssembler::XorU64(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
if (is_int16(value.immediate()) && r == LeaveRC) {
xori(dst, src, value);
@@ -3104,112 +3080,112 @@ void TurboAssembler::XorU64(Register dst, Register src, const Operand& value,
}
}
-void TurboAssembler::XorU64(Register dst, Register src, Register value,
+void MacroAssembler::XorU64(Register dst, Register src, Register value,
RCBit r) {
xor_(dst, src, value, r);
}
-void TurboAssembler::AndU32(Register dst, Register src, const Operand& value,
+void MacroAssembler::AndU32(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
AndU64(dst, src, value, scratch, r);
extsw(dst, dst, r);
}
-void TurboAssembler::AndU32(Register dst, Register src, Register value,
+void MacroAssembler::AndU32(Register dst, Register src, Register value,
RCBit r) {
AndU64(dst, src, value, r);
extsw(dst, dst, r);
}
-void TurboAssembler::OrU32(Register dst, Register src, const Operand& value,
+void MacroAssembler::OrU32(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
OrU64(dst, src, value, scratch, r);
extsw(dst, dst, r);
}
-void TurboAssembler::OrU32(Register dst, Register src, Register value,
+void MacroAssembler::OrU32(Register dst, Register src, Register value,
RCBit r) {
OrU64(dst, src, value, r);
extsw(dst, dst, r);
}
-void TurboAssembler::XorU32(Register dst, Register src, const Operand& value,
+void MacroAssembler::XorU32(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
XorU64(dst, src, value, scratch, r);
extsw(dst, dst, r);
}
-void TurboAssembler::XorU32(Register dst, Register src, Register value,
+void MacroAssembler::XorU32(Register dst, Register src, Register value,
RCBit r) {
XorU64(dst, src, value, r);
extsw(dst, dst, r);
}
-void TurboAssembler::ShiftLeftU64(Register dst, Register src,
+void MacroAssembler::ShiftLeftU64(Register dst, Register src,
const Operand& value, RCBit r) {
sldi(dst, src, value, r);
}
-void TurboAssembler::ShiftRightU64(Register dst, Register src,
+void MacroAssembler::ShiftRightU64(Register dst, Register src,
const Operand& value, RCBit r) {
srdi(dst, src, value, r);
}
-void TurboAssembler::ShiftRightS64(Register dst, Register src,
+void MacroAssembler::ShiftRightS64(Register dst, Register src,
const Operand& value, RCBit r) {
sradi(dst, src, value.immediate(), r);
}
-void TurboAssembler::ShiftLeftU32(Register dst, Register src,
+void MacroAssembler::ShiftLeftU32(Register dst, Register src,
const Operand& value, RCBit r) {
slwi(dst, src, value, r);
}
-void TurboAssembler::ShiftRightU32(Register dst, Register src,
+void MacroAssembler::ShiftRightU32(Register dst, Register src,
const Operand& value, RCBit r) {
srwi(dst, src, value, r);
}
-void TurboAssembler::ShiftRightS32(Register dst, Register src,
+void MacroAssembler::ShiftRightS32(Register dst, Register src,
const Operand& value, RCBit r) {
srawi(dst, src, value.immediate(), r);
}
-void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register value,
+void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register value,
RCBit r) {
sld(dst, src, value, r);
}
-void TurboAssembler::ShiftRightU64(Register dst, Register src, Register value,
+void MacroAssembler::ShiftRightU64(Register dst, Register src, Register value,
RCBit r) {
srd(dst, src, value, r);
}
-void TurboAssembler::ShiftRightS64(Register dst, Register src, Register value,
+void MacroAssembler::ShiftRightS64(Register dst, Register src, Register value,
RCBit r) {
srad(dst, src, value, r);
}
-void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register value,
+void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register value,
RCBit r) {
slw(dst, src, value, r);
}
-void TurboAssembler::ShiftRightU32(Register dst, Register src, Register value,
+void MacroAssembler::ShiftRightU32(Register dst, Register src, Register value,
RCBit r) {
srw(dst, src, value, r);
}
-void TurboAssembler::ShiftRightS32(Register dst, Register src, Register value,
+void MacroAssembler::ShiftRightS32(Register dst, Register src, Register value,
RCBit r) {
sraw(dst, src, value, r);
}
-void TurboAssembler::CmpS64(Register src1, Register src2, CRegister cr) {
+void MacroAssembler::CmpS64(Register src1, Register src2, CRegister cr) {
cmp(src1, src2, cr);
}
-void TurboAssembler::CmpS64(Register src1, const Operand& src2,
+void MacroAssembler::CmpS64(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
@@ -3220,7 +3196,7 @@ void TurboAssembler::CmpS64(Register src1, const Operand& src2,
}
}
-void TurboAssembler::CmpU64(Register src1, const Operand& src2,
+void MacroAssembler::CmpU64(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_uint16(value)) {
@@ -3231,11 +3207,11 @@ void TurboAssembler::CmpU64(Register src1, const Operand& src2,
}
}
-void TurboAssembler::CmpU64(Register src1, Register src2, CRegister cr) {
+void MacroAssembler::CmpU64(Register src1, Register src2, CRegister cr) {
cmpl(src1, src2, cr);
}
-void TurboAssembler::CmpS32(Register src1, const Operand& src2,
+void MacroAssembler::CmpS32(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
@@ -3246,11 +3222,11 @@ void TurboAssembler::CmpS32(Register src1, const Operand& src2,
}
}
-void TurboAssembler::CmpS32(Register src1, Register src2, CRegister cr) {
+void MacroAssembler::CmpS32(Register src1, Register src2, CRegister cr) {
cmpw(src1, src2, cr);
}
-void TurboAssembler::CmpU32(Register src1, const Operand& src2,
+void MacroAssembler::CmpU32(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_uint16(value)) {
@@ -3261,55 +3237,55 @@ void TurboAssembler::CmpU32(Register src1, const Operand& src2,
}
}
-void TurboAssembler::CmpU32(Register src1, Register src2, CRegister cr) {
+void MacroAssembler::CmpU32(Register src1, Register src2, CRegister cr) {
cmplw(src1, src2, cr);
}
-void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fadd(dst, lhs, rhs, r);
}
-void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fsub(dst, lhs, rhs, r);
}
-void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fmul(dst, lhs, rhs, r);
}
-void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fdiv(dst, lhs, rhs, r);
}
-void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fadd(dst, lhs, rhs, r);
frsp(dst, dst, r);
}
-void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fsub(dst, lhs, rhs, r);
frsp(dst, dst, r);
}
-void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fmul(dst, lhs, rhs, r);
frsp(dst, dst, r);
}
-void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fdiv(dst, lhs, rhs, r);
frsp(dst, dst, r);
}
-void TurboAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs, RCBit r) {
fcpsgn(dst, rhs, lhs, r);
}
@@ -3512,7 +3488,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
V(StoreU64WithUpdate, stdu, stdux)
#define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op) \
- void TurboAssembler::name(Register reg, const MemOperand& mem, \
+ void MacroAssembler::name(Register reg, const MemOperand& mem, \
Register scratch) { \
GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op); \
}
@@ -3526,7 +3502,7 @@ MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION)
V(StoreU64, std, pstd, stdx)
#define MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION(name, ri_op, rip_op, rr_op) \
- void TurboAssembler::name(Register reg, const MemOperand& mem, \
+ void MacroAssembler::name(Register reg, const MemOperand& mem, \
Register scratch) { \
GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, rr_op); \
}
@@ -3541,7 +3517,7 @@ MEM_OP_WITH_ALIGN_PREFIXED_LIST(MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION)
V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux)
#define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op) \
- void TurboAssembler::name(result_t reg, const MemOperand& mem, \
+ void MacroAssembler::name(result_t reg, const MemOperand& mem, \
Register scratch) { \
GenerateMemoryOperation(reg, mem, ri_op, rr_op); \
}
@@ -3563,7 +3539,7 @@ MEM_OP_LIST(MEM_OP_FUNCTION)
V(StoreF32, DoubleRegister, stfs, pstfs, stfsx)
#define MEM_OP_PREFIXED_FUNCTION(name, result_t, ri_op, rip_op, rr_op) \
- void TurboAssembler::name(result_t reg, const MemOperand& mem, \
+ void MacroAssembler::name(result_t reg, const MemOperand& mem, \
Register scratch) { \
GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op); \
}
@@ -3571,7 +3547,28 @@ MEM_OP_PREFIXED_LIST(MEM_OP_PREFIXED_FUNCTION)
#undef MEM_OP_PREFIXED_LIST
#undef MEM_OP_PREFIXED_FUNCTION
-void TurboAssembler::LoadS8(Register dst, const MemOperand& mem,
+#define MEM_OP_SIMD_LIST(V) \
+ V(LoadSimd128, lxvx) \
+ V(StoreSimd128, stxvx) \
+ V(LoadSimd128Uint64, lxsdx) \
+ V(LoadSimd128Uint32, lxsiwzx) \
+ V(LoadSimd128Uint16, lxsihzx) \
+ V(LoadSimd128Uint8, lxsibzx) \
+ V(StoreSimd128Uint64, stxsdx) \
+ V(StoreSimd128Uint32, stxsiwx) \
+ V(StoreSimd128Uint16, stxsihx) \
+ V(StoreSimd128Uint8, stxsibx)
+
+#define MEM_OP_SIMD_FUNCTION(name, rr_op) \
+ void MacroAssembler::name(Simd128Register reg, const MemOperand& mem, \
+ Register scratch) { \
+ GenerateMemoryOperationRR(reg, mem, rr_op); \
+ }
+MEM_OP_SIMD_LIST(MEM_OP_SIMD_FUNCTION)
+#undef MEM_OP_SIMD_LIST
+#undef MEM_OP_SIMD_FUNCTION
+
+void MacroAssembler::LoadS8(Register dst, const MemOperand& mem,
Register scratch) {
LoadU8(dst, mem, scratch);
extsb(dst, dst);
@@ -3587,13 +3584,13 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem,
#ifdef V8_TARGET_BIG_ENDIAN
#define MEM_LE_OP_FUNCTION(name, op) \
- void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \
+ void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \
Register scratch) { \
GenerateMemoryOperationRR(reg, mem, op); \
}
#else
#define MEM_LE_OP_FUNCTION(name, op) \
- void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \
+ void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \
Register scratch) { \
name(reg, mem, scratch); \
}
@@ -3603,7 +3600,7 @@ MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION)
#undef MEM_LE_OP_FUNCTION
#undef MEM_LE_OP_LIST
-void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadS32LE(Register dst, const MemOperand& mem,
Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
LoadU32LE(dst, mem, scratch);
@@ -3613,7 +3610,7 @@ void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadS16LE(Register dst, const MemOperand& mem,
Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
LoadU16LE(dst, mem, scratch);
@@ -3623,7 +3620,7 @@ void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem,
+void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem,
Register scratch, Register scratch2) {
#ifdef V8_TARGET_BIG_ENDIAN
LoadU64LE(scratch, mem, scratch2);
@@ -3635,7 +3632,7 @@ void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem,
+void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem,
Register scratch, Register scratch2) {
#ifdef V8_TARGET_BIG_ENDIAN
LoadU32LE(scratch, mem, scratch2);
@@ -3647,7 +3644,7 @@ void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
+void MacroAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
Register scratch, Register scratch2) {
#ifdef V8_TARGET_BIG_ENDIAN
StoreF64(dst, mem, scratch2);
@@ -3658,7 +3655,7 @@ void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
+void MacroAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
Register scratch, Register scratch2) {
#ifdef V8_TARGET_BIG_ENDIAN
StoreF32(dst, mem, scratch2);
@@ -3670,66 +3667,68 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
}
// Simd Support.
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, xvadddp) \
- V(F64x2Sub, xvsubdp) \
- V(F64x2Mul, xvmuldp) \
- V(F64x2Div, xvdivdp) \
- V(F64x2Eq, xvcmpeqdp) \
- V(F32x4Add, vaddfp) \
- V(F32x4Sub, vsubfp) \
- V(F32x4Mul, xvmulsp) \
- V(F32x4Div, xvdivsp) \
- V(F32x4Min, vminfp) \
- V(F32x4Max, vmaxfp) \
- V(F32x4Eq, xvcmpeqsp) \
- V(I64x2Add, vaddudm) \
- V(I64x2Sub, vsubudm) \
- V(I64x2Eq, vcmpequd) \
- V(I64x2GtS, vcmpgtsd) \
- V(I32x4Add, vadduwm) \
- V(I32x4Sub, vsubuwm) \
- V(I32x4Mul, vmuluwm) \
- V(I32x4MinS, vminsw) \
- V(I32x4MinU, vminuw) \
- V(I32x4MaxS, vmaxsw) \
- V(I32x4MaxU, vmaxuw) \
- V(I32x4Eq, vcmpequw) \
- V(I32x4GtS, vcmpgtsw) \
- V(I32x4GtU, vcmpgtuw) \
- V(I16x8Add, vadduhm) \
- V(I16x8Sub, vsubuhm) \
- V(I16x8MinS, vminsh) \
- V(I16x8MinU, vminuh) \
- V(I16x8MaxS, vmaxsh) \
- V(I16x8MaxU, vmaxuh) \
- V(I16x8Eq, vcmpequh) \
- V(I16x8GtS, vcmpgtsh) \
- V(I16x8GtU, vcmpgtuh) \
- V(I16x8AddSatS, vaddshs) \
- V(I16x8SubSatS, vsubshs) \
- V(I16x8AddSatU, vadduhs) \
- V(I16x8SubSatU, vsubuhs) \
- V(I8x16Add, vaddubm) \
- V(I8x16Sub, vsububm) \
- V(I8x16MinS, vminsb) \
- V(I8x16MinU, vminub) \
- V(I8x16MaxS, vmaxsb) \
- V(I8x16MaxU, vmaxub) \
- V(I8x16Eq, vcmpequb) \
- V(I8x16GtS, vcmpgtsb) \
- V(I8x16GtU, vcmpgtub) \
- V(I8x16AddSatS, vaddsbs) \
- V(I8x16SubSatS, vsubsbs) \
- V(I8x16AddSatU, vaddubs) \
- V(I8x16SubSatU, vsububs) \
- V(S128And, vand) \
- V(S128Or, vor) \
- V(S128Xor, vxor) \
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, xvadddp) \
+ V(F64x2Sub, xvsubdp) \
+ V(F64x2Mul, xvmuldp) \
+ V(F64x2Div, xvdivdp) \
+ V(F64x2Eq, xvcmpeqdp) \
+ V(F32x4Add, vaddfp) \
+ V(F32x4Sub, vsubfp) \
+ V(F32x4Mul, xvmulsp) \
+ V(F32x4Div, xvdivsp) \
+ V(F32x4Min, vminfp) \
+ V(F32x4Max, vmaxfp) \
+ V(F32x4Eq, xvcmpeqsp) \
+ V(I64x2Add, vaddudm) \
+ V(I64x2Sub, vsubudm) \
+ V(I64x2Eq, vcmpequd) \
+ V(I64x2GtS, vcmpgtsd) \
+ V(I32x4Add, vadduwm) \
+ V(I32x4Sub, vsubuwm) \
+ V(I32x4Mul, vmuluwm) \
+ V(I32x4MinS, vminsw) \
+ V(I32x4MinU, vminuw) \
+ V(I32x4MaxS, vmaxsw) \
+ V(I32x4MaxU, vmaxuw) \
+ V(I32x4Eq, vcmpequw) \
+ V(I32x4GtS, vcmpgtsw) \
+ V(I32x4GtU, vcmpgtuw) \
+ V(I16x8Add, vadduhm) \
+ V(I16x8Sub, vsubuhm) \
+ V(I16x8MinS, vminsh) \
+ V(I16x8MinU, vminuh) \
+ V(I16x8MaxS, vmaxsh) \
+ V(I16x8MaxU, vmaxuh) \
+ V(I16x8Eq, vcmpequh) \
+ V(I16x8GtS, vcmpgtsh) \
+ V(I16x8GtU, vcmpgtuh) \
+ V(I16x8AddSatS, vaddshs) \
+ V(I16x8SubSatS, vsubshs) \
+ V(I16x8AddSatU, vadduhs) \
+ V(I16x8SubSatU, vsubuhs) \
+ V(I16x8RoundingAverageU, vavguh) \
+ V(I8x16Add, vaddubm) \
+ V(I8x16Sub, vsububm) \
+ V(I8x16MinS, vminsb) \
+ V(I8x16MinU, vminub) \
+ V(I8x16MaxS, vmaxsb) \
+ V(I8x16MaxU, vmaxub) \
+ V(I8x16Eq, vcmpequb) \
+ V(I8x16GtS, vcmpgtsb) \
+ V(I8x16GtU, vcmpgtub) \
+ V(I8x16AddSatS, vaddsbs) \
+ V(I8x16SubSatS, vsubsbs) \
+ V(I8x16AddSatU, vaddubs) \
+ V(I8x16SubSatU, vsububs) \
+ V(I8x16RoundingAverageU, vavgub) \
+ V(S128And, vand) \
+ V(S128Or, vor) \
+ V(S128Xor, vxor) \
V(S128AndNot, vandc)
#define EMIT_SIMD_BINOP(name, op) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2); \
}
@@ -3752,13 +3751,13 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
V(I8x16ShrU, vsrb)
#define EMIT_SIMD_SHIFT(name, op) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Register src2, Simd128Register scratch) { \
mtvsrd(scratch, src2); \
vspltb(scratch, scratch, Operand(7)); \
op(dst, src1, scratch); \
} \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
const Operand& src2, Register scratch1, \
Simd128Register scratch2) { \
mov(scratch1, src2); \
@@ -3781,18 +3780,21 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
V(F32x4Ceil, xvrspip) \
V(F32x4Floor, xvrspim) \
V(F32x4Trunc, xvrspiz) \
+ V(F32x4SConvertI32x4, xvcvsxwsp) \
+ V(F32x4UConvertI32x4, xvcvuxwsp) \
V(I64x2Neg, vnegd) \
V(I64x2SConvertI32x4Low, vupklsw) \
V(I64x2SConvertI32x4High, vupkhsw) \
V(I32x4Neg, vnegw) \
V(I32x4SConvertI16x8Low, vupklsh) \
V(I32x4SConvertI16x8High, vupkhsh) \
+ V(I32x4UConvertF32x4, xvcvspuxws) \
V(I16x8SConvertI8x16Low, vupklsb) \
V(I16x8SConvertI8x16High, vupkhsb) \
V(I8x16Popcnt, vpopcntb)
#define EMIT_SIMD_UNOP(name, op) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
op(dst, src); \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
@@ -3813,7 +3815,7 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
V(I16x8ExtMulHighI8x16U, vmuleub, vmuloub, vmrghh)
#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2, Simd128Register scratch) { \
EXT_MUL(scratch, dst, mul_even, mul_odd) \
merge(dst, scratch, dst); \
@@ -3829,7 +3831,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
V(I8x16AllTrue, vcmpgtub)
#define EMIT_SIMD_ALL_TRUE(name, op) \
- void TurboAssembler::name(Register dst, Simd128Register src, \
+ void MacroAssembler::name(Register dst, Simd128Register src, \
Register scratch1, Register scratch2, \
Simd128Register scratch3) { \
constexpr uint8_t fxm = 0x2; /* field mask. */ \
@@ -3846,7 +3848,53 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
#undef EMIT_SIMD_ALL_TRUE
#undef SIMD_ALL_TRUE_LIST
-void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst,
+#define SIMD_BITMASK_LIST(V) \
+ V(I64x2BitMask, vextractdm, 0x8080808080800040) \
+ V(I32x4BitMask, vextractwm, 0x8080808000204060) \
+ V(I16x8BitMask, vextracthm, 0x10203040506070)
+
+#define EMIT_SIMD_BITMASK(name, op, indicies) \
+ void MacroAssembler::name(Register dst, Simd128Register src, \
+ Register scratch1, Simd128Register scratch2) { \
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
+ op(dst, src); \
+ } else { \
+ mov(scratch1, Operand(indicies)); /* Select 0 for the high bits. */ \
+ mtvsrd(scratch2, scratch1); \
+ vbpermq(scratch2, src, scratch2); \
+ vextractub(scratch2, scratch2, Operand(6)); \
+ mfvsrd(dst, scratch2); \
+ } \
+ }
+SIMD_BITMASK_LIST(EMIT_SIMD_BITMASK)
+#undef EMIT_SIMD_BITMASK
+#undef SIMD_BITMASK_LIST
+
+#define SIMD_QFM_LIST(V) \
+ V(F64x2Qfma, xvmaddmdp) \
+ V(F64x2Qfms, xvnmsubmdp) \
+ V(F32x4Qfma, xvmaddmsp) \
+ V(F32x4Qfms, xvnmsubmsp)
+
+#define EMIT_SIMD_QFM(name, op) \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Simd128Register src2, Simd128Register src3, \
+ Simd128Register scratch) { \
+ Simd128Register dest = dst; \
+ if (dst != src1) { \
+ vor(scratch, src1, src1); \
+ dest = scratch; \
+ } \
+ op(dest, src2, src3); \
+ if (dest != dst) { \
+ vor(dst, dest, dest); \
+ } \
+ }
+SIMD_QFM_LIST(EMIT_SIMD_QFM)
+#undef EMIT_SIMD_QFM
+#undef SIMD_QFM_LIST
+
+void MacroAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -3856,7 +3904,7 @@ void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst,
vinsertd(dst, scratch, Operand(0));
}
-void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst,
+void MacroAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -3866,7 +3914,7 @@ void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst,
vor(dst, scratch, scratch);
}
-void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst,
+void MacroAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -3876,7 +3924,7 @@ void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst,
vinsertd(dst, scratch, Operand(0));
}
-void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst,
+void MacroAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -3887,17 +3935,7 @@ void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst,
}
#undef EXT_MUL
-void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
- Register scratch) {
- GenerateMemoryOperationRR(dst, mem, lxvx);
-}
-
-void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
- Register scratch) {
- GenerateMemoryOperationRR(src, mem, stxvx);
-}
-
-void TurboAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem,
Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
LoadSimd128(dst, mem, scratch);
@@ -3907,7 +3945,7 @@ void TurboAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem,
Register scratch1,
Simd128Register scratch2) {
#ifdef V8_TARGET_BIG_ENDIAN
@@ -3918,7 +3956,7 @@ void TurboAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem,
#endif
}
-void TurboAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src,
+void MacroAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src,
Register scratch) {
constexpr int lane_width_in_bytes = 8;
MovDoubleToInt64(scratch, src);
@@ -3926,35 +3964,35 @@ void TurboAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src,
vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
}
-void TurboAssembler::F32x4Splat(Simd128Register dst, DoubleRegister src,
+void MacroAssembler::F32x4Splat(Simd128Register dst, DoubleRegister src,
DoubleRegister scratch1, Register scratch2) {
MovFloatToInt(scratch2, src, scratch1);
mtvsrd(dst, scratch2);
vspltw(dst, dst, Operand(1));
}
-void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) {
constexpr int lane_width_in_bytes = 8;
mtvsrd(dst, src);
vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
}
-void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) {
mtvsrd(dst, src);
vspltw(dst, dst, Operand(1));
}
-void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) {
mtvsrd(dst, src);
vsplth(dst, dst, Operand(3));
}
-void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) {
mtvsrd(dst, src);
vspltb(dst, dst, Operand(7));
}
-void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
+void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch1,
Register scratch2) {
@@ -3964,7 +4002,7 @@ void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
MovInt64ToDouble(dst, scratch2);
}
-void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
+void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch1,
Register scratch2, Register scratch3) {
@@ -3974,7 +4012,7 @@ void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
MovIntToFloat(dst, scratch2, scratch3);
}
-void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
+void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch) {
constexpr int lane_width_in_bytes = 8;
@@ -3982,7 +4020,7 @@ void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
mfvsrd(dst, scratch);
}
-void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
+void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch) {
constexpr int lane_width_in_bytes = 4;
@@ -3990,7 +4028,7 @@ void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
mfvsrd(dst, scratch);
}
-void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
+void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch) {
constexpr int lane_width_in_bytes = 2;
@@ -3998,28 +4036,28 @@ void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
mfvsrd(dst, scratch);
}
-void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
+void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch) {
I16x8ExtractLaneU(dst, src, imm_lane_idx, scratch);
extsh(dst, dst);
}
-void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
+void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch) {
vextractub(scratch, src, Operand(15 - imm_lane_idx));
mfvsrd(dst, scratch);
}
-void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
+void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
uint8_t imm_lane_idx,
Simd128Register scratch) {
I8x16ExtractLaneU(dst, src, imm_lane_idx, scratch);
extsb(dst, dst);
}
-void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
DoubleRegister src2, uint8_t imm_lane_idx,
Register scratch1,
Simd128Register scratch2) {
@@ -4036,7 +4074,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
}
}
-void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
DoubleRegister src2, uint8_t imm_lane_idx,
Register scratch1,
DoubleRegister scratch2,
@@ -4054,7 +4092,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
}
}
-void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Simd128Register scratch) {
constexpr int lane_width_in_bytes = 8;
@@ -4069,7 +4107,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
}
}
-void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Simd128Register scratch) {
constexpr int lane_width_in_bytes = 4;
@@ -4084,7 +4122,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
}
}
-void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Simd128Register scratch) {
constexpr int lane_width_in_bytes = 2;
@@ -4095,7 +4133,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
vinserth(dst, scratch, Operand((7 - imm_lane_idx) * lane_width_in_bytes));
}
-void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Simd128Register scratch) {
if (src1 != dst) {
@@ -4105,7 +4143,7 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
vinsertb(dst, scratch, Operand(15 - imm_lane_idx));
}
-void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Register scratch1,
Register scratch2, Register scratch3,
Simd128Register scratch4) {
@@ -4132,7 +4170,7 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
}
}
-void TurboAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vxor(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
vmladduhm(dst, src1, src2, kSimd128RegZero);
@@ -4145,7 +4183,7 @@ void TurboAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1,
vsel(dst, src2, result, scratch2); \
/* Use xvmindp to turn any selected SNANs to QNANs. */ \
xvmindp(dst, dst, dst);
-void TurboAssembler::F64x2Min(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Min(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch1,
Simd128Register scratch2) {
xvmindp(scratch1, src1, src2);
@@ -4153,7 +4191,7 @@ void TurboAssembler::F64x2Min(Simd128Register dst, Simd128Register src1,
F64X2_MIN_MAX_NAN(scratch1)
}
-void TurboAssembler::F64x2Max(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Max(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch1,
Simd128Register scratch2) {
xvmaxdp(scratch1, src1, src2);
@@ -4162,108 +4200,108 @@ void TurboAssembler::F64x2Max(Simd128Register dst, Simd128Register src1,
}
#undef F64X2_MIN_MAX_NAN
-void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
xvcmpgtdp(dst, src2, src1);
}
-void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
xvcmpgedp(dst, src2, src1);
}
-void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
xvcmpeqdp(scratch, src1, src2);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
xvcmpgtsp(dst, src2, src1);
}
-void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
xvcmpgesp(dst, src2, src1);
}
-void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
xvcmpeqsp(scratch, src1, src2);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequd(scratch, src1, src2);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpgtsd(scratch, src2, src1);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequw(scratch, src1, src2);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpgtsw(scratch, src2, src1);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequw(scratch, src1, src2);
vcmpgtuw(dst, src1, src2);
vor(dst, dst, scratch);
}
-void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequh(scratch, src1, src2);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpgtsh(scratch, src2, src1);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequh(scratch, src1, src2);
vcmpgtuh(dst, src1, src2);
vor(dst, dst, scratch);
}
-void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequb(scratch, src1, src2);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpgtsb(scratch, src2, src1);
vnor(dst, scratch, scratch);
}
-void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vcmpequb(scratch, src1, src2);
vcmpgtub(dst, src1, src2);
vor(dst, dst, scratch);
}
-void TurboAssembler::I64x2Abs(Simd128Register dst, Simd128Register src,
+void MacroAssembler::I64x2Abs(Simd128Register dst, Simd128Register src,
Simd128Register scratch) {
constexpr int shift_bits = 63;
xxspltib(scratch, Operand(shift_bits));
@@ -4271,7 +4309,7 @@ void TurboAssembler::I64x2Abs(Simd128Register dst, Simd128Register src,
vxor(dst, src, scratch);
vsubudm(dst, dst, scratch);
}
-void TurboAssembler::I32x4Abs(Simd128Register dst, Simd128Register src,
+void MacroAssembler::I32x4Abs(Simd128Register dst, Simd128Register src,
Simd128Register scratch) {
constexpr int shift_bits = 31;
xxspltib(scratch, Operand(shift_bits));
@@ -4279,7 +4317,7 @@ void TurboAssembler::I32x4Abs(Simd128Register dst, Simd128Register src,
vxor(dst, src, scratch);
vsubuwm(dst, dst, scratch);
}
-void TurboAssembler::I16x8Abs(Simd128Register dst, Simd128Register src,
+void MacroAssembler::I16x8Abs(Simd128Register dst, Simd128Register src,
Simd128Register scratch) {
constexpr int shift_bits = 15;
xxspltib(scratch, Operand(shift_bits));
@@ -4287,13 +4325,13 @@ void TurboAssembler::I16x8Abs(Simd128Register dst, Simd128Register src,
vxor(dst, src, scratch);
vsubuhm(dst, dst, scratch);
}
-void TurboAssembler::I16x8Neg(Simd128Register dst, Simd128Register src,
+void MacroAssembler::I16x8Neg(Simd128Register dst, Simd128Register src,
Simd128Register scratch) {
vspltish(scratch, Operand(1));
vnor(dst, src, src);
vadduhm(dst, scratch, dst);
}
-void TurboAssembler::I8x16Abs(Simd128Register dst, Simd128Register src,
+void MacroAssembler::I8x16Abs(Simd128Register dst, Simd128Register src,
Simd128Register scratch) {
constexpr int shift_bits = 7;
xxspltib(scratch, Operand(shift_bits));
@@ -4301,68 +4339,77 @@ void TurboAssembler::I8x16Abs(Simd128Register dst, Simd128Register src,
vxor(dst, src, scratch);
vsububm(dst, dst, scratch);
}
-void TurboAssembler::I8x16Neg(Simd128Register dst, Simd128Register src,
+void MacroAssembler::I8x16Neg(Simd128Register dst, Simd128Register src,
Simd128Register scratch) {
xxspltib(scratch, Operand(1));
vnor(dst, src, src);
vaddubm(dst, scratch, dst);
}
-void TurboAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
xvcmpgtdp(kScratchSimd128Reg, src1, src2);
vsel(dst, src1, src2, kScratchSimd128Reg);
}
-void TurboAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
xvcmpgtdp(kScratchSimd128Reg, src2, src1);
vsel(dst, src1, src2, kScratchSimd128Reg);
}
-void TurboAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
xvcmpgtsp(kScratchSimd128Reg, src1, src2);
vsel(dst, src1, src2, kScratchSimd128Reg);
}
-void TurboAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
xvcmpgtsp(kScratchSimd128Reg, src2, src1);
vsel(dst, src1, src2, kScratchSimd128Reg);
}
-void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst,
+void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch) {
+ // NaN to 0
+ xvcmpeqsp(scratch, src, src);
+ vand(scratch, src, scratch);
+ xvcvspsxws(dst, scratch);
+}
+
+void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpkswss(dst, src2, src1);
}
-void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
+void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpkswus(dst, src2, src1);
}
-void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
+void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpkshss(dst, src2, src1);
}
-void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
+void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpkshus(dst, src2, src1);
}
-void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
+void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
Simd128Register src) {
vupklsw(dst, src);
xvcvsxddp(dst, dst);
}
-void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
+void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4376,7 +4423,7 @@ void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
xvcvuxddp(dst, dst);
}
-void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst,
+void MacroAssembler::I64x2UConvertI32x4Low(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4389,7 +4436,7 @@ void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst,
vand(dst, scratch2, dst);
}
-void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst,
+void MacroAssembler::I64x2UConvertI32x4High(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4402,7 +4449,7 @@ void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst,
vand(dst, scratch2, dst);
}
-void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst,
+void MacroAssembler::I32x4UConvertI16x8Low(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4414,7 +4461,7 @@ void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst,
vand(dst, scratch2, dst);
}
-void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst,
+void MacroAssembler::I32x4UConvertI16x8High(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4426,7 +4473,7 @@ void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst,
vand(dst, scratch2, dst);
}
-void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst,
+void MacroAssembler::I16x8UConvertI8x16Low(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4438,7 +4485,7 @@ void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst,
vand(dst, scratch2, dst);
}
-void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst,
+void MacroAssembler::I16x8UConvertI8x16High(Simd128Register dst,
Simd128Register src,
Register scratch1,
Simd128Register scratch2) {
@@ -4450,7 +4497,335 @@ void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst,
vand(dst, scratch2, dst);
}
-void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
+void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src,
+ Register scratch1, Register scratch2,
+ Simd128Register scratch3) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ vextractbm(dst, src);
+ } else {
+ mov(scratch1, Operand(0x8101820283038));
+ mov(scratch2, Operand(0x4048505860687078));
+ mtvsrdd(scratch3, scratch1, scratch2);
+ vbpermq(scratch3, src, scratch3);
+ mfvsrd(dst, scratch3);
+ }
+}
+
+void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ vxor(scratch, scratch, scratch);
+ vmsumshm(dst, src1, src2, scratch);
+}
+
+void MacroAssembler::I32x4DotI8x16AddS(Simd128Register dst,
+ Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register src3) {
+ vmsummbm(dst, src1, src2, src3);
+}
+
+void MacroAssembler::I16x8DotI8x16S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ vmulesb(scratch, src1, src2);
+ vmulosb(dst, src1, src2);
+ vadduhm(dst, scratch, dst);
+}
+
+void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ vxor(scratch, scratch, scratch);
+ vmhraddshs(dst, src1, src2, scratch);
+}
+
+void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ xxspltib(scratch, Operand(31));
+ vminub(scratch, src2, scratch);
+ // Input needs to be reversed.
+ xxbrq(dst, src1);
+ vxor(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ vperm(dst, dst, kSimd128RegZero, scratch);
+}
+
+void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, uint64_t high,
+ uint64_t low, Register scratch1,
+ Register scratch2, Simd128Register scratch3) {
+ mov(scratch1, Operand(low));
+ mov(scratch2, Operand(high));
+ mtvsrdd(scratch3, scratch2, scratch1);
+ vperm(dst, src1, src2, scratch3);
+}
+
+#define EXT_ADD_PAIRWISE(splat, mul_even, mul_odd, add) \
+ splat(scratch1, Operand(1)); \
+ mul_even(scratch2, src, scratch1); \
+ mul_odd(scratch1, src, scratch1); \
+ add(dst, scratch2, scratch1);
+void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(vspltish, vmulesh, vmulosh, vadduwm)
+}
+void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(vspltish, vmuleuh, vmulouh, vadduwm)
+}
+void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(xxspltib, vmulesb, vmulosb, vadduhm)
+}
+void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch1,
+ Simd128Register scratch2) {
+ EXT_ADD_PAIRWISE(xxspltib, vmuleub, vmuloub, vadduhm)
+}
+#undef EXT_ADD_PAIRWISE
+
+void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
+ Simd128Register src) {
+ constexpr int lane_number = 8;
+ vextractd(dst, src, Operand(lane_number));
+ vinsertw(dst, dst, Operand(lane_number));
+ xvcvspdp(dst, dst);
+}
+
+void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch) {
+ constexpr int lane_number = 8;
+ xvcvdpsp(scratch, src);
+ vextractuw(dst, scratch, Operand(lane_number));
+ vinsertw(scratch, dst, Operand(4));
+ vxor(dst, dst, dst);
+ vinsertd(dst, scratch, Operand(lane_number));
+}
+
+void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch) {
+ constexpr int lane_number = 8;
+ // NaN to 0.
+ xvcmpeqdp(scratch, src, src);
+ vand(scratch, src, scratch);
+ xvcvdpsxws(scratch, scratch);
+ vextractuw(dst, scratch, Operand(lane_number));
+ vinsertw(scratch, dst, Operand(4));
+ vxor(dst, dst, dst);
+ vinsertd(dst, scratch, Operand(lane_number));
+}
+
+void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
+ Simd128Register src,
+ Simd128Register scratch) {
+ constexpr int lane_number = 8;
+ xvcvdpuxws(scratch, src);
+ vextractuw(dst, scratch, Operand(lane_number));
+ vinsertw(scratch, dst, Operand(4));
+ vxor(dst, dst, dst);
+ vinsertd(dst, scratch, Operand(lane_number));
+}
+
+#if V8_TARGET_BIG_ENDIAN
+#define MAYBE_REVERSE_BYTES(reg, instr) instr(reg, reg);
+#else
+#define MAYBE_REVERSE_BYTES(reg, instr)
+#endif
+void MacroAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 8;
+ LoadSimd128Uint64(scratch2, mem, scratch1);
+ MAYBE_REVERSE_BYTES(scratch2, xxbrd)
+ vinsertd(dst, scratch2, Operand((1 - lane) * lane_width_in_bytes));
+}
+
+void MacroAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 4;
+ LoadSimd128Uint32(scratch2, mem, scratch1);
+ MAYBE_REVERSE_BYTES(scratch2, xxbrw)
+ vinsertw(dst, scratch2, Operand((3 - lane) * lane_width_in_bytes));
+}
+
+void MacroAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 2;
+ LoadSimd128Uint16(scratch2, mem, scratch1);
+ MAYBE_REVERSE_BYTES(scratch2, xxbrh)
+ vinserth(dst, scratch2, Operand((7 - lane) * lane_width_in_bytes));
+}
+
+void MacroAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ LoadSimd128Uint8(scratch2, mem, scratch1);
+ vinsertb(dst, scratch2, Operand((15 - lane)));
+}
+
+void MacroAssembler::StoreLane64LE(Simd128Register src, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 8;
+ vextractd(scratch2, src, Operand((1 - lane) * lane_width_in_bytes));
+ MAYBE_REVERSE_BYTES(scratch2, xxbrd)
+ StoreSimd128Uint64(scratch2, mem, scratch1);
+}
+
+void MacroAssembler::StoreLane32LE(Simd128Register src, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 4;
+ vextractuw(scratch2, src, Operand((3 - lane) * lane_width_in_bytes));
+ MAYBE_REVERSE_BYTES(scratch2, xxbrw)
+ StoreSimd128Uint32(scratch2, mem, scratch1);
+}
+
+void MacroAssembler::StoreLane16LE(Simd128Register src, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 2;
+ vextractuh(scratch2, src, Operand((7 - lane) * lane_width_in_bytes));
+ MAYBE_REVERSE_BYTES(scratch2, xxbrh)
+ StoreSimd128Uint16(scratch2, mem, scratch1);
+}
+
+void MacroAssembler::StoreLane8LE(Simd128Register src, const MemOperand& mem,
+ int lane, Register scratch1,
+ Simd128Register scratch2) {
+ vextractub(scratch2, src, Operand(15 - lane));
+ StoreSimd128Uint8(scratch2, mem, scratch1);
+}
+
+void MacroAssembler::LoadAndSplat64x2LE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ constexpr int lane_width_in_bytes = 8;
+ LoadSimd128Uint64(dst, mem, scratch);
+ MAYBE_REVERSE_BYTES(dst, xxbrd)
+ vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
+}
+
+void MacroAssembler::LoadAndSplat32x4LE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ LoadSimd128Uint32(dst, mem, scratch);
+ MAYBE_REVERSE_BYTES(dst, xxbrw)
+ vspltw(dst, dst, Operand(1));
+}
+
+void MacroAssembler::LoadAndSplat16x8LE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ LoadSimd128Uint16(dst, mem, scratch);
+ MAYBE_REVERSE_BYTES(dst, xxbrh)
+ vsplth(dst, dst, Operand(3));
+}
+
+void MacroAssembler::LoadAndSplat8x16LE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ LoadSimd128Uint8(dst, mem, scratch);
+ vspltb(dst, dst, Operand(7));
+}
+
+void MacroAssembler::LoadAndExtend32x2SLE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ LoadSimd128Uint64(dst, mem, scratch);
+ MAYBE_REVERSE_BYTES(dst, xxbrd)
+ vupkhsw(dst, dst);
+}
+
+void MacroAssembler::LoadAndExtend32x2ULE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 8;
+ LoadAndExtend32x2SLE(dst, mem, scratch1);
+ // Zero extend.
+ mov(scratch1, Operand(0xFFFFFFFF));
+ mtvsrd(scratch2, scratch1);
+ vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
+ vand(dst, scratch2, dst);
+}
+
+void MacroAssembler::LoadAndExtend16x4SLE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ LoadSimd128Uint64(dst, mem, scratch);
+ MAYBE_REVERSE_BYTES(dst, xxbrd)
+ vupkhsh(dst, dst);
+}
+
+void MacroAssembler::LoadAndExtend16x4ULE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch1,
+ Simd128Register scratch2) {
+ LoadAndExtend16x4SLE(dst, mem, scratch1);
+ // Zero extend.
+ mov(scratch1, Operand(0xFFFF));
+ mtvsrd(scratch2, scratch1);
+ vspltw(scratch2, scratch2, Operand(1));
+ vand(dst, scratch2, dst);
+}
+
+void MacroAssembler::LoadAndExtend8x8SLE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch) {
+ LoadSimd128Uint64(dst, mem, scratch);
+ MAYBE_REVERSE_BYTES(dst, xxbrd)
+ vupkhsb(dst, dst);
+}
+
+void MacroAssembler::LoadAndExtend8x8ULE(Simd128Register dst,
+ const MemOperand& mem,
+ Register scratch1,
+ Simd128Register scratch2) {
+ LoadAndExtend8x8SLE(dst, mem, scratch1);
+ // Zero extend.
+ li(scratch1, Operand(0xFF));
+ mtvsrd(scratch2, scratch1);
+ vsplth(scratch2, scratch2, Operand(3));
+ vand(dst, scratch2, dst);
+}
+
+void MacroAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 8;
+ LoadSimd128Uint64(scratch2, mem, scratch1);
+ MAYBE_REVERSE_BYTES(scratch2, xxbrd)
+ vxor(dst, dst, dst);
+ vinsertd(dst, scratch2, Operand(1 * lane_width_in_bytes));
+}
+
+void MacroAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1,
+ Simd128Register scratch2) {
+ constexpr int lane_width_in_bytes = 4;
+ LoadSimd128Uint32(scratch2, mem, scratch1);
+ MAYBE_REVERSE_BYTES(scratch2, xxbrw)
+ vxor(dst, dst, dst);
+ vinsertw(dst, scratch2, Operand(3 * lane_width_in_bytes));
+}
+#undef MAYBE_REVERSE_BYTES
+
+void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src,
Register scratch1, Register scratch2,
Simd128Register scratch3) {
constexpr uint8_t fxm = 0x2; // field mask.
@@ -4464,10 +4839,22 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
isel(dst, scratch1, scratch2, bit_number);
}
-void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) {
vnor(dst, src, src);
}
+void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
+ Register scratch1, Register scratch2) {
+ mov(scratch1, Operand(low));
+ mov(scratch2, Operand(high));
+ mtvsrdd(dst, scratch2, scratch1);
+}
+
+void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register mask) {
+ vsel(dst, src2, src1, mask);
+}
+
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
@@ -4483,7 +4870,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
+void MacroAssembler::SwapP(Register src, Register dst, Register scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
mr(scratch, src);
@@ -4491,7 +4878,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
mr(dst, scratch);
}
-void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
+void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
if (dst.ra() != r0 && dst.ra().is_valid())
DCHECK(!AreAliased(src, dst.ra(), scratch));
if (dst.rb() != r0 && dst.rb().is_valid())
@@ -4502,7 +4889,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
StoreU64(scratch, dst, r0);
}
-void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
Register scratch_1) {
if (src.ra() != r0 && src.ra().is_valid())
DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
@@ -4534,7 +4921,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
}
}
-void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
+void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
@@ -4543,7 +4930,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
fmr(dst, scratch);
}
-void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
+void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
fmr(scratch, src);
@@ -4551,7 +4938,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
StoreF32(scratch, dst, r0);
}
-void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst,
DoubleRegister scratch_0,
DoubleRegister scratch_1) {
DCHECK(!AreAliased(scratch_0, scratch_1));
@@ -4561,7 +4948,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
StoreF32(scratch_1, src, r0);
}
-void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
+void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
@@ -4570,7 +4957,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
fmr(dst, scratch);
}
-void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
+void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
fmr(scratch, src);
@@ -4578,7 +4965,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
StoreF64(scratch, dst, r0);
}
-void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst,
DoubleRegister scratch_0,
DoubleRegister scratch_1) {
DCHECK(!AreAliased(scratch_0, scratch_1));
@@ -4588,7 +4975,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
StoreF64(scratch_1, src, r0);
}
-void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
+void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
Simd128Register scratch) {
if (src == dst) return;
vor(scratch, src, src);
@@ -4596,7 +4983,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
vor(dst, scratch, scratch);
}
-void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
+void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
Simd128Register scratch1, Register scratch2) {
DCHECK(src != scratch1);
LoadSimd128(scratch1, dst, scratch2);
@@ -4604,7 +4991,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
vor(src, scratch1, scratch1);
}
-void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst,
Simd128Register scratch1,
Simd128Register scratch2, Register scratch3) {
LoadSimd128(scratch1, src, scratch3);
@@ -4614,7 +5001,7 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
StoreSimd128(scratch2, src, scratch3);
}
-void TurboAssembler::ByteReverseU16(Register dst, Register val,
+void MacroAssembler::ByteReverseU16(Register dst, Register val,
Register scratch) {
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
brh(dst, val);
@@ -4627,7 +5014,7 @@ void TurboAssembler::ByteReverseU16(Register dst, Register val,
ZeroExtHalfWord(dst, dst);
}
-void TurboAssembler::ByteReverseU32(Register dst, Register val,
+void MacroAssembler::ByteReverseU32(Register dst, Register val,
Register scratch) {
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
brw(dst, val);
@@ -4640,7 +5027,7 @@ void TurboAssembler::ByteReverseU32(Register dst, Register val,
ZeroExtWord32(dst, scratch);
}
-void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) {
+void MacroAssembler::ByteReverseU64(Register dst, Register val, Register) {
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
brd(dst, val);
return;
@@ -4651,17 +5038,17 @@ void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) {
addi(sp, sp, Operand(kSystemPointerSize));
}
-void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS64(x, Operand(y), r0);
beq(dest);
}
-void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
CmpS64(x, Operand(y), r0);
blt(dest);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
static_assert(kSystemPointerSize == 8);
static_assert(kSmiTagSize == 1);
static_assert(kSmiTag == 0);
@@ -4680,92 +5067,50 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
LoadU64(builtin_index, MemOperand(kRootRegister, builtin_index));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
ASM_CODE_COMMENT(this);
LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- Register scratch = r11;
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset), r0);
- mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
- and_(r0, scratch, r0, SetRC);
- bne(&if_code_is_off_heap, cr0);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- b(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset),
- r0);
- ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
- add(destination, destination, kRootRegister);
- LoadU64(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()),
- r0);
-
- bind(&out);
- } else {
- addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
+ ASM_CODE_COMMENT(this);
+ LoadU64(destination,
+ FieldMemOperand(code_object, Code::kCodeEntryPointOffset), r0);
}
-void TurboAssembler::CallCodeObject(Register code_object) {
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
+ LoadCodeEntry(code_object, code_object);
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
+ LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
static constexpr int after_call_offset = 5 * kInstrSize;
Label start_call;
@@ -4793,7 +5138,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
SizeOfCodeGeneratedSince(&start_call));
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4806,30 +5151,30 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::ZeroExtByte(Register dst, Register src) {
+void MacroAssembler::ZeroExtByte(Register dst, Register src) {
clrldi(dst, src, Operand(56));
}
-void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) {
+void MacroAssembler::ZeroExtHalfWord(Register dst, Register src) {
clrldi(dst, src, Operand(48));
}
-void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
+void MacroAssembler::ZeroExtWord32(Register dst, Register src) {
clrldi(dst, src, Operand(32));
}
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); }
+void MacroAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); }
-void TurboAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); }
+void MacroAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); }
-void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) {
+void MacroAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) {
cntlzw(dst, src, r);
}
-void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
+void MacroAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
cntlzd(dst, src, r);
}
@@ -4846,7 +5191,7 @@ void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
addi(dst, dst, Operand(1)); /* dst++ */ \
bdnz(&loop); \
bind(&done);
-void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
+void MacroAssembler::CountTrailingZerosU32(Register dst, Register src,
Register scratch1, Register scratch2,
RCBit r) {
if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
@@ -4856,7 +5201,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
}
}
-void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
+void MacroAssembler::CountTrailingZerosU64(Register dst, Register src,
Register scratch1, Register scratch2,
RCBit r) {
if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
@@ -4867,14 +5212,14 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
}
#undef COUNT_TRAILING_ZEROES_SLOW
-void TurboAssembler::ClearByteU64(Register dst, int byte_idx) {
+void MacroAssembler::ClearByteU64(Register dst, int byte_idx) {
CHECK(0 <= byte_idx && byte_idx <= 7);
int shift = byte_idx*8;
rldicl(dst, dst, shift, 8);
rldicl(dst, dst, 64-shift, 0);
}
-void TurboAssembler::ReverseBitsU64(Register dst, Register src,
+void MacroAssembler::ReverseBitsU64(Register dst, Register src,
Register scratch1, Register scratch2) {
ByteReverseU64(dst, src);
for (int i = 0; i < 8; i++) {
@@ -4882,7 +5227,7 @@ void TurboAssembler::ReverseBitsU64(Register dst, Register src,
}
}
-void TurboAssembler::ReverseBitsU32(Register dst, Register src,
+void MacroAssembler::ReverseBitsU32(Register dst, Register src,
Register scratch1, Register scratch2) {
ByteReverseU32(dst, src, scratch1);
for (int i = 4; i < 8; i++) {
@@ -4891,7 +5236,7 @@ void TurboAssembler::ReverseBitsU32(Register dst, Register src,
}
// byte_idx=7 refers to least significant byte
-void TurboAssembler::ReverseBitsInSingleByteU64(Register dst, Register src,
+void MacroAssembler::ReverseBitsInSingleByteU64(Register dst, Register src,
Register scratch1,
Register scratch2,
int byte_idx) {
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 3951e83c6b..c9e28283b2 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -14,6 +14,7 @@
#include "src/codegen/bailout-reason.h"
#include "src/codegen/ppc/assembler-ppc.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
#include "src/objects/contexts.h"
namespace v8 {
@@ -46,9 +47,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
#define ClearRightImm clrrwi
#endif
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al,
@@ -140,6 +141,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
mov(kRootRegister, Operand(isolate_root));
+#ifdef V8_COMPRESS_POINTERS
+ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
+#endif
}
void LoadDoubleLiteral(DoubleRegister result, base::Double value,
@@ -597,6 +602,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LoadRoot(destination, index, al);
}
void LoadRoot(Register destination, RootIndex index, Condition cond);
+ void LoadTaggedRoot(Register destination, RootIndex index);
void SwapP(Register src, Register dst, Register scratch);
void SwapP(Register src, MemOperand dst, Register scratch);
@@ -650,16 +656,27 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments,
- bool has_function_descriptor = true);
- void CallCFunction(Register function, int num_arguments,
- bool has_function_descriptor = true);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments,
- bool has_function_descriptor = true);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments,
- bool has_function_descriptor = true);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
+ bool has_function_descriptor = true);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
+ bool has_function_descriptor = true);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
+ bool has_function_descriptor = true);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
+ bool has_function_descriptor = true);
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
@@ -670,7 +687,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, AbortReason reason,
- CRegister cr = cr7) NOOP_UNLESS_DEBUG_CODE
+ CRegister cr = cr7) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
@@ -726,7 +743,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltin(Builtin builtin, Register destination);
MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
+
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
@@ -818,8 +837,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
void ZeroExtByte(Register dst, Register src);
void ZeroExtHalfWord(Register dst, Register src);
@@ -996,19 +1015,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
- // Loads a field containing a HeapObject and decompresses it if pointer
- // compression is enabled.
- void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch = no_reg);
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch = no_reg);
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
Register scratch);
- // Loads a field containing any tagged value and decompresses it if necessary.
- void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch = no_reg);
-
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
@@ -1016,10 +1029,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
- void DecompressTaggedPointer(Register destination, MemOperand field_operand);
- void DecompressTaggedPointer(Register destination, Register source);
- void DecompressAnyTagged(Register destination, MemOperand field_operand);
- void DecompressAnyTagged(Register destination, Register source);
+ void DecompressTagged(Register destination, MemOperand field_operand);
+ void DecompressTagged(Register destination, Register source);
+ void DecompressTagged(const Register& destination, Tagged_t immediate);
void LoadF64(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
@@ -1127,6 +1139,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I16x8SubSatU) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
+ V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
V(I8x16MinS) \
@@ -1142,6 +1155,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I8x16SubSatU) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
+ V(I8x16RoundingAverageU) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
@@ -1173,6 +1187,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I32x4ExtMulHighI16x8S) \
V(I32x4ExtMulLowI16x8U) \
V(I32x4ExtMulHighI16x8U) \
+ V(I32x4DotI16x8S) \
V(I16x8Ne) \
V(I16x8GeS) \
V(I16x8GeU) \
@@ -1180,9 +1195,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I16x8ExtMulHighI8x16S) \
V(I16x8ExtMulLowI8x16U) \
V(I16x8ExtMulHighI8x16U) \
+ V(I16x8Q15MulRSatS) \
+ V(I16x8DotI8x16S) \
V(I8x16Ne) \
V(I8x16GeS) \
- V(I8x16GeU)
+ V(I8x16GeU) \
+ V(I8x16Swizzle)
#define PROTOTYPE_SIMD_BINOP_WITH_SCRATCH(name) \
void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
@@ -1214,6 +1232,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef PROTOTYPE_SIMD_SHIFT
#undef SIMD_SHIFT_LIST
+#define SIMD_BITMASK_LIST(V) \
+ V(I64x2BitMask) \
+ V(I32x4BitMask) \
+ V(I16x8BitMask)
+
+#define PROTOTYPE_SIMD_BITMASK(name) \
+ void name(Register dst, Simd128Register src, Register scratch1, \
+ Simd128Register scratch2);
+ SIMD_BITMASK_LIST(PROTOTYPE_SIMD_BITMASK)
+#undef PROTOTYPE_SIMD_BITMASK
+#undef SIMD_BITMASK_LIST
+
#define SIMD_UNOP_LIST(V) \
V(F64x2Abs) \
V(F64x2Neg) \
@@ -1221,12 +1251,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(F64x2Ceil) \
V(F64x2Floor) \
V(F64x2Trunc) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4Ceil) \
V(F32x4Floor) \
V(F32x4Trunc) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
V(I64x2Neg) \
V(F64x2ConvertLowI32x4S) \
V(I64x2SConvertI32x4Low) \
@@ -1234,6 +1267,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I32x4Neg) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertF32x4) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I8x16Popcnt) \
@@ -1246,8 +1280,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef SIMD_UNOP_LIST
#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
+ V(F32x4DemoteF64x2Zero) \
V(I64x2Abs) \
V(I32x4Abs) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
V(I16x8Abs) \
V(I16x8Neg) \
V(I8x16Abs) \
@@ -1272,6 +1310,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef PROTOTYPE_SIMD_ALL_TRUE
#undef SIMD_ALL_TRUE_LIST
+#define SIMD_QFM_LIST(V) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms)
+#define PROTOTYPE_SIMD_QFM(name) \
+ void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
+ Simd128Register src3, Simd128Register scratch);
+ SIMD_QFM_LIST(PROTOTYPE_SIMD_QFM)
+#undef PROTOTYPE_SIMD_QFM
+#undef SIMD_QFM_LIST
+
+#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U)
+#define PROTOTYPE_SIMD_EXT_ADD_PAIRWISE(name) \
+ void name(Simd128Register dst, Simd128Register src, \
+ Simd128Register scratch1, Simd128Register scratch2);
+ SIMD_EXT_ADD_PAIRWISE_LIST(PROTOTYPE_SIMD_EXT_ADD_PAIRWISE)
+#undef PROTOTYPE_SIMD_EXT_ADD_PAIRWISE
+#undef SIMD_EXT_ADD_PAIRWISE_LIST
+
void LoadSimd128(Simd128Register dst, const MemOperand& mem,
Register scratch);
void StoreSimd128(Simd128Register src, const MemOperand& mem,
@@ -1280,6 +1342,62 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch);
void StoreSimd128LE(Simd128Register src, const MemOperand& mem,
Register scratch1, Simd128Register scratch2);
+ void LoadSimd128Uint64(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void LoadSimd128Uint32(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void LoadSimd128Uint16(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void LoadSimd128Uint8(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void StoreSimd128Uint64(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void StoreSimd128Uint32(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void StoreSimd128Uint16(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void StoreSimd128Uint8(Simd128Register reg, const MemOperand& mem,
+ Register scratch);
+ void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane,
+ Register scratch1, Simd128Register scratch2);
+ void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& me,
+ Register scratch);
+ void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1, Simd128Register scratch2);
+ void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1, Simd128Register scratch2);
+ void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
+ void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1, Simd128Register scratch2);
+ void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1, Simd128Register scratch2);
+ void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
+ Register scratch1, Simd128Register scratch2);
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch);
void F32x4Splat(Simd128Register dst, DoubleRegister src,
DoubleRegister scratch1, Register scratch2);
@@ -1345,23 +1463,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1, Simd128Register scratch2);
void I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2);
+ void I8x16BitMask(Register dst, Simd128Register src, Register scratch1,
+ Register scratch2, Simd128Register scratch3);
+ void I8x16Shuffle(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, uint64_t high, uint64_t low,
+ Register scratch1, Register scratch2,
+ Simd128Register scratch3);
+ void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register src3);
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1,
Register scratch2, Simd128Register scratch3);
-
- private:
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments,
- bool has_function_descriptor);
-};
-
-// MacroAssembler implements a collection of frequently used acros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
+ void S128Const(Simd128Register dst, uint64_t high, uint64_t low,
+ Register scratch1, Register scratch2);
+ void S128Select(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register mask);
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
@@ -1397,15 +1512,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
- // At least one slot (for the return address) should be provided.
- void EnterExitFrame(bool save_doubles, int stack_space = 1,
- StackFrame::Type frame_type = StackFrame::EXIT);
+ void EnterExitFrame(int stack_space, StackFrame::Type frame_type);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool argument_count_is_length = false);
+ void LeaveExitFrame(Register argument_count, bool argument_count_is_length);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -1521,7 +1633,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register slot_address);
@@ -1541,24 +1653,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void CallJSEntry(Register target);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
- void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
- }
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -1568,9 +1673,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -1622,27 +1724,27 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
#endif
// Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
// ---------------------------------------------------------------------------
// Patching helpers.
@@ -1658,13 +1760,20 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg, rc);
}
- void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch1,
- Register scratch2);
+ void TestCodeIsMarkedForDeoptimization(Register code, Register scratch1,
+ Register scratch2);
Operand ClearedValue() const;
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots,
+ bool has_function_descriptor);
+
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index bdcb12b9d2..ee529b50fc 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -29,7 +29,7 @@ namespace internal {
#define MAYBE_ALLOCATEABLE_CONSTANT_POOL_REGISTER(V) V(r28)
#endif
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
#define MAYBE_ALLOCATABLE_CAGE_REGISTERS(V)
#else
#define MAYBE_ALLOCATABLE_CAGE_REGISTERS(V) V(r27)
@@ -146,10 +146,10 @@ constexpr Register no_reg = Register::no_reg();
constexpr Register kConstantPoolRegister = r28; // Constant pool.
constexpr Register kRootRegister = r29; // Roots array pointer.
constexpr Register cp = r30; // JavaScript context pointer.
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
constexpr Register kPtrComprCageBaseRegister = r27; // callee save
#else
-constexpr Register kPtrComprCageBaseRegister = kRootRegister;
+constexpr Register kPtrComprCageBaseRegister = no_reg;
#endif
// Returns the number of padding slots needed for stack pointer alignment.
@@ -308,7 +308,6 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = r6;
constexpr Register kJavaScriptCallExtraArg1Register = r5;
-constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r4;
constexpr Register kRuntimeCallArgCountRegister = r3;
constexpr Register kRuntimeCallArgvRegister = r5;
diff --git a/deps/v8/src/codegen/register-base.h b/deps/v8/src/codegen/register-base.h
index fad0684d4b..6d9432360f 100644
--- a/deps/v8/src/codegen/register-base.h
+++ b/deps/v8/src/codegen/register-base.h
@@ -34,16 +34,19 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int8_t code) {
- DCHECK(base::IsInRange(static_cast<int>(code), 0, kNumRegisters - 1));
+ V8_ASSUME(code >= 0 && code < kNumRegisters);
return SubType{code};
}
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int8_t code() const {
- // Only assume that it's positive (not no_reg); arm64 uses
- // kSPRegInternalCode which is > kNumRegisters.
+#if V8_TARGET_ARCH_ARM64
+ // Arm64 uses kSPRegInternalCode which is > kNumRegisters.
V8_ASSUME(reg_code_ >= 0);
+#else
+ V8_ASSUME(reg_code_ >= 0 && reg_code_ < kNumRegisters);
+#endif
return reg_code_;
}
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index a2b1204ff7..a7c4be40c4 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -258,44 +258,59 @@ RelocIterator::RelocIterator(Code code, int mode_mask)
RelocIterator::RelocIterator(Code code, ByteArray relocation_info,
int mode_mask)
- : RelocIterator(code, code.raw_instruction_start(), code.constant_pool(),
+ : RelocIterator(
+ code,
+ InstructionStream::unchecked_cast(code.raw_instruction_stream()),
+ InstructionStream::unchecked_cast(code.raw_instruction_stream())
+ .instruction_start(),
+ code.constant_pool(), relocation_info.GetDataEndAddress(),
+ relocation_info.GetDataStartAddress(), mode_mask) {}
+
+RelocIterator::RelocIterator(Code code, InstructionStream instruction_stream,
+ ByteArray relocation_info, Address constant_pool,
+ int mode_mask)
+ : RelocIterator(code, instruction_stream,
+ instruction_stream.instruction_start(), constant_pool,
relocation_info.GetDataEndAddress(),
relocation_info.GetDataStartAddress(), mode_mask) {}
RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
- : RelocIterator(Code(), code_reference.instruction_start(),
- code_reference.constant_pool(),
- code_reference.relocation_end(),
- code_reference.relocation_start(), mode_mask) {}
+ : RelocIterator(
+ Code(), InstructionStream(), code_reference.instruction_start(),
+ code_reference.constant_pool(), code_reference.relocation_end(),
+ code_reference.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
int mode_mask)
- : RelocIterator(code,
+ : RelocIterator(code, code.instruction_stream(),
embedded_data->InstructionStartOfBuiltin(code.builtin_id()),
code.constant_pool(),
code.relocation_start() + code.relocation_size(),
code.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
- : RelocIterator(Code(), reinterpret_cast<Address>(desc.buffer), 0,
- desc.buffer + desc.buffer_size,
- desc.buffer + desc.buffer_size - desc.reloc_size,
- mode_mask) {}
+ : RelocIterator(
+ Code(), InstructionStream(), reinterpret_cast<Address>(desc.buffer),
+ 0, desc.buffer + desc.buffer_size,
+ desc.buffer + desc.buffer_size - desc.reloc_size, mode_mask) {}
RelocIterator::RelocIterator(base::Vector<byte> instructions,
base::Vector<const byte> reloc_info,
Address const_pool, int mode_mask)
- : RelocIterator(Code(), reinterpret_cast<Address>(instructions.begin()),
- const_pool, reloc_info.begin() + reloc_info.size(),
- reloc_info.begin(), mode_mask) {}
+ : RelocIterator(Code(), InstructionStream(),
+ reinterpret_cast<Address>(instructions.begin()), const_pool,
+ reloc_info.begin() + reloc_info.size(), reloc_info.begin(),
+ mode_mask) {}
-RelocIterator::RelocIterator(Code host, Address pc, Address constant_pool,
- const byte* pos, const byte* end, int mode_mask)
+RelocIterator::RelocIterator(Code code, InstructionStream instruction_stream,
+ Address pc, Address constant_pool, const byte* pos,
+ const byte* end, int mode_mask)
: pos_(pos), end_(end), mode_mask_(mode_mask) {
// Relocation info is read backwards.
DCHECK_GE(pos_, end_);
- rinfo_.host_ = host;
+ rinfo_.code_ = code;
rinfo_.pc_ = pc;
+ rinfo_.instruction_stream_ = instruction_stream;
rinfo_.constant_pool_ = constant_pool;
if (mode_mask_ == 0) pos_ = end_;
next();
@@ -348,10 +363,12 @@ void RelocInfo::set_target_address(Address target,
IsWasmCall(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
- if (!host().is_null() && IsCodeTargetMode(rmode_) &&
+ if (!instruction_stream().is_null() && IsCodeTargetMode(rmode_) &&
!v8_flags.disable_write_barriers) {
- Code target_code = Code::GetCodeFromTargetAddress(target);
- WriteBarrierForCode(host(), this, target_code, write_barrier_mode);
+ InstructionStream target_code =
+ InstructionStream::FromTargetAddress(target);
+ WriteBarrierForCode(instruction_stream(), this, target_code,
+ write_barrier_mode);
}
}
@@ -380,16 +397,6 @@ bool RelocInfo::HasTargetAddressAddress() const {
return (ModeMask(rmode_) & kTargetAddressAddressModeMask) != 0;
}
-bool RelocInfo::RequiresRelocationAfterCodegen(const CodeDesc& desc) {
- RelocIterator it(desc, RelocInfo::PostCodegenRelocationMask());
- return !it.done();
-}
-
-bool RelocInfo::RequiresRelocation(Code code) {
- RelocIterator it(code, RelocInfo::kApplyMask);
- return !it.done();
-}
-
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
@@ -462,15 +469,24 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
<< ")";
} else if (IsCodeTargetMode(rmode_)) {
const Address code_target = target_address();
- Code code = Code::GetCodeFromTargetAddress(code_target);
- DCHECK(code.IsCode());
- os << " (" << CodeKindToString(code.kind());
- if (Builtins::IsBuiltin(code)) {
- os << " " << Builtins::name(code.builtin_id());
+ Code target_code = Code::FromTargetAddress(code_target);
+ os << " (" << CodeKindToString(target_code.kind());
+ if (Builtins::IsBuiltin(target_code)) {
+ os << " " << Builtins::name(target_code.builtin_id());
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsConstPool(rmode_)) {
os << " (size " << static_cast<int>(data_) << ")";
+ } else if (IsWasmStubCall(rmode_)) {
+ os << " (";
+ Address addr = target_address();
+ if (isolate != nullptr) {
+ Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate, addr);
+ os << (Builtins::IsBuiltinId(builtin) ? Builtins::name(builtin)
+ : "<UNRECOGNIZED>")
+ << ") (";
+ }
+ os << reinterpret_cast<const void*>(addr) << ")";
}
os << "\n";
@@ -492,21 +508,18 @@ void RelocInfo::Verify(Isolate* isolate) {
Address addr = target_address();
CHECK_NE(addr, kNullAddress);
// Check that we can find the right code object.
- Code code = Code::GetCodeFromTargetAddress(addr);
- CodeLookupResult lookup_result = isolate->FindCodeObject(addr);
- CHECK(lookup_result.IsFound());
- CHECK_EQ(code.address(), lookup_result.code().address());
+ InstructionStream code = InstructionStream::FromTargetAddress(addr);
+ Code lookup_result = isolate->heap()->FindCodeForInnerPointer(addr);
+ CHECK_EQ(code.address(), lookup_result.instruction_stream().address());
break;
}
case INTERNAL_REFERENCE:
case INTERNAL_REFERENCE_ENCODED: {
Address target = target_internal_reference();
Address pc = target_internal_reference_address();
- CodeLookupResult lookup_result = isolate->FindCodeObject(pc);
- CHECK(lookup_result.IsFound());
- Code code = lookup_result.code();
- CHECK(target >= code.InstructionStart(isolate, pc));
- CHECK(target <= code.InstructionEnd(isolate, pc));
+ Code lookup_result = isolate->heap()->FindCodeForInnerPointer(pc);
+ CHECK_GE(target, lookup_result.InstructionStart());
+ CHECK_LT(target, lookup_result.InstructionEnd());
break;
}
case OFF_HEAP_TARGET: {
@@ -516,6 +529,7 @@ void RelocInfo::Verify(Isolate* isolate) {
OffHeapInstructionStream::TryLookupCode(isolate, addr)));
break;
}
+ case WASM_STUB_CALL:
case NEAR_BUILTIN_ENTRY: {
Address addr = target_address();
CHECK_NE(addr, kNullAddress);
@@ -533,7 +547,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case CONST_POOL:
case VENEER_POOL:
case WASM_CALL:
- case WASM_STUB_CALL:
case NO_INFO:
break;
case NUMBER_OF_MODES:
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index fd74413fc9..0f5b64757c 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -114,12 +114,14 @@ class RelocInfo {
RelocInfo() = default;
- RelocInfo(Address pc, Mode rmode, intptr_t data, Code host,
+ RelocInfo(Address pc, Mode rmode, intptr_t data, Code code,
+ InstructionStream instruction_stream,
Address constant_pool = kNullAddress)
: pc_(pc),
rmode_(rmode),
data_(data),
- host_(host),
+ code_(code),
+ instruction_stream_(instruction_stream),
constant_pool_(constant_pool) {
DCHECK_IMPLIES(!COMPRESS_POINTERS_BOOL,
rmode != COMPRESSED_EMBEDDED_OBJECT);
@@ -213,7 +215,8 @@ class RelocInfo {
Address pc() const { return pc_; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
- Code host() const { return host_; }
+ Code code() const { return code_; }
+ InstructionStream instruction_stream() const { return instruction_stream_; }
Address constant_pool() const { return constant_pool_; }
// Apply a relocation by delta bytes. When the code object is moved, PC
@@ -317,23 +320,18 @@ class RelocInfo {
void Visit(ObjectVisitor* visitor) {
Mode mode = rmode();
if (IsEmbeddedObjectMode(mode)) {
- visitor->VisitEmbeddedPointer(host(), this);
+ visitor->VisitEmbeddedPointer(this);
} else if (IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
+ visitor->VisitCodeTarget(this);
} else if (IsExternalReference(mode)) {
- visitor->VisitExternalReference(host(), this);
+ visitor->VisitExternalReference(this);
} else if (IsInternalReference(mode) || IsInternalReferenceEncoded(mode)) {
- visitor->VisitInternalReference(host(), this);
+ visitor->VisitInternalReference(this);
} else if (IsBuiltinEntryMode(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
+ visitor->VisitOffHeapTarget(this);
}
}
- // Check whether the given code contains relocation information that
- // either is position-relative or movable by the garbage collector.
- static bool RequiresRelocationAfterCodegen(const CodeDesc& desc);
- static bool RequiresRelocation(Code code);
-
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
@@ -359,12 +357,13 @@ class RelocInfo {
// In addition to modes covered by the apply mask (which is applied at GC
// time, among others), this covers all modes that are relocated by
- // Code::CopyFromNoFlush after code generation.
+ // InstructionStream::CopyFromNoFlush after code generation.
static int PostCodegenRelocationMask() {
return ModeMask(RelocInfo::CODE_TARGET) |
ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
ModeMask(RelocInfo::NEAR_BUILTIN_ENTRY) |
+ ModeMask(RelocInfo::WASM_STUB_CALL) |
ModeMask(RelocInfo::RELATIVE_CODE_TARGET) | kApplyMask;
}
@@ -374,7 +373,8 @@ class RelocInfo {
Address pc_;
Mode rmode_;
intptr_t data_ = 0;
- Code host_;
+ Code code_;
+ InstructionStream instruction_stream_;
Address constant_pool_ = kNullAddress;
friend class RelocIterator;
};
@@ -428,12 +428,14 @@ class RelocInfoWriter {
// A mask can be specified to skip unwanted modes.
class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
public:
- // Create a new iterator positioned at
- // the beginning of the reloc info.
- // Relocation information with mode k is included in the
- // iteration iff bit k of mode_mask is set.
+ // Create a new iterator positioned at the beginning of the reloc info.
+ // Relocation information with mode k is included in the iteration iff bit k
+ // of mode_mask is set.
explicit RelocIterator(Code code, int mode_mask = -1);
explicit RelocIterator(Code code, ByteArray relocation_info, int mode_mask);
+ explicit RelocIterator(Code code, InstructionStream instruction_stream,
+ ByteArray relocation_info, Address constant_pool,
+ int mode_mask);
explicit RelocIterator(EmbeddedData* embedded_data, Code code, int mode_mask);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(const CodeReference code_reference,
@@ -457,8 +459,9 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
}
private:
- RelocIterator(Code host, Address pc, Address constant_pool, const byte* pos,
- const byte* end, int mode_mask);
+ RelocIterator(Code code, InstructionStream instruction_stream, Address pc,
+ Address constant_pool, const byte* pos, const byte* end,
+ int mode_mask);
// Advance* moves the position before/after reading.
// *Read* reads from current byte(s) into rinfo_.
diff --git a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h
index b8d3ededcd..55f191e6af 100644
--- a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h
+++ b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h
@@ -65,7 +65,8 @@ void RelocInfo::apply(intptr_t delta) {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_) ||
+ IsNearBuiltinEntry(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -161,7 +162,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
- return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
+ return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
@@ -192,16 +193,16 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_,
- V8HeapCompressionScheme::CompressTagged(target.ptr()),
+ V8HeapCompressionScheme::CompressObject(target.ptr()),
icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
- !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ !instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target);
}
}
@@ -244,7 +245,20 @@ Handle<Code> Assembler::relative_code_target_object_handle_at(
return Handle<Code>::cast(GetEmbeddedObject(code_target_index));
}
-Builtin RelocInfo::target_builtin_at(Assembler* origin) { UNREACHABLE(); }
+Builtin Assembler::target_builtin_at(Address pc) {
+ Instr instr1 = Assembler::instr_at(pc);
+ Instr instr2 = Assembler::instr_at(pc + kInstrSize);
+ DCHECK(IsAuipc(instr1));
+ DCHECK(IsJalr(instr2));
+ int32_t builtin_id = BrachlongOffset(instr1, instr2);
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ return static_cast<Builtin>(builtin_id);
+}
+
+Builtin RelocInfo::target_builtin_at(Assembler* origin) {
+ DCHECK(IsNearBuiltinEntry(rmode_));
+ return Assembler::target_builtin_at(pc_);
+}
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.cc b/deps/v8/src/codegen/riscv/assembler-riscv.cc
index f8c2e33051..b6844a6f37 100644
--- a/deps/v8/src/codegen/riscv/assembler-riscv.cc
+++ b/deps/v8/src/codegen/riscv/assembler-riscv.cc
@@ -34,6 +34,7 @@
#include "src/codegen/riscv/assembler-riscv.h"
+#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/safepoint-table.h"
@@ -132,10 +133,8 @@ Register ToRegister(int num) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::NEAR_BUILTIN_ENTRY) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -225,13 +224,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
ForceConstantPoolEmissionWithoutJump();
@@ -564,8 +563,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal,
} break;
default: {
// Emitted label constant, not part of a branch.
- // Make label relative to Code pointer of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ // Make label relative to Code pointer of generated InstructionStream
+ // object.
+ instr_at_put(
+ pos, target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag));
} break;
}
disassembleInstr(instr);
@@ -839,7 +840,8 @@ void Assembler::label_at_put(Label* L, int at_offset) {
reinterpret_cast<Instr*>(buffer_start_ + at_offset), at_offset);
if (L->is_bound()) {
target_pos = L->pos();
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ instr_at_put(at_offset, target_pos + (InstructionStream::kHeaderSize -
+ kHeapObjectTag));
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
@@ -884,8 +886,29 @@ void Assembler::EBREAK() {
void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); }
+inline int64_t signExtend(uint64_t V, int N) {
+ return int64_t(V << (64 - N)) >> (64 - N);
+}
+
#if V8_TARGET_ARCH_RISCV64
void Assembler::RV_li(Register rd, int64_t imm) {
+ UseScratchRegisterScope temps(this);
+ if (RecursiveLiCount(imm) > GeneralLiCount(imm, temps.hasAvailable())) {
+ GeneralLi(rd, imm);
+ } else {
+ RecursiveLi(rd, imm);
+ }
+}
+
+int Assembler::RV_li_count(int64_t imm, bool is_get_temp_reg) {
+ if (RecursiveLiCount(imm) > GeneralLiCount(imm, is_get_temp_reg)) {
+ return GeneralLiCount(imm, is_get_temp_reg);
+ } else {
+ return RecursiveLiCount(imm);
+ }
+}
+
+void Assembler::GeneralLi(Register rd, int64_t imm) {
// 64-bit imm is put in the register rd.
// In most cases the imm is 32 bit and 2 instructions are generated. If a
// temporary register is available, in the worst case, 6 instructions are
@@ -913,6 +936,7 @@ void Assembler::RV_li(Register rd, int64_t imm) {
}
return;
} else {
+ UseScratchRegisterScope temps(this);
// 64-bit case: divide imm into two 32-bit parts, upper and lower
int64_t up_32 = imm >> 32;
int64_t low_32 = imm & 0xffffffffull;
@@ -921,7 +945,6 @@ void Assembler::RV_li(Register rd, int64_t imm) {
if (up_32 == 0 || low_32 == 0) {
// No temp register is needed
} else {
- UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
temp_reg = temps.hasAvailable() ? temps.Acquire() : no_reg;
}
@@ -1037,129 +1060,6 @@ void Assembler::RV_li(Register rd, int64_t imm) {
}
}
-int Assembler::li_estimate(int64_t imm, bool is_get_temp_reg) {
- int count = 0;
- // imitate Assembler::RV_li
- if (is_int32(imm + 0x800)) {
- // 32-bit case. Maximum of 2 instructions generated
- int64_t high_20 = ((imm + 0x800) >> 12);
- int64_t low_12 = imm << 52 >> 52;
- if (high_20) {
- count++;
- if (low_12) {
- count++;
- }
- } else {
- count++;
- }
- return count;
- } else {
- // 64-bit case: divide imm into two 32-bit parts, upper and lower
- int64_t up_32 = imm >> 32;
- int64_t low_32 = imm & 0xffffffffull;
- // Check if a temporary register is available
- if (is_get_temp_reg) {
- // keep track of hardware behavior for lower part in sim_low
- int64_t sim_low = 0;
- // Build lower part
- if (low_32 != 0) {
- int64_t high_20 = ((low_32 + 0x800) >> 12);
- int64_t low_12 = low_32 & 0xfff;
- if (high_20) {
- // Adjust to 20 bits for the case of overflow
- high_20 &= 0xfffff;
- sim_low = ((high_20 << 12) << 32) >> 32;
- count++;
- if (low_12) {
- sim_low += (low_12 << 52 >> 52) | low_12;
- count++;
- }
- } else {
- sim_low = low_12;
- count++;
- }
- }
- if (sim_low & 0x100000000) {
- // Bit 31 is 1. Either an overflow or a negative 64 bit
- if (up_32 == 0) {
- // Positive number, but overflow because of the add 0x800
- count++;
- count++;
- return count;
- }
- // low_32 is a negative 64 bit after the build
- up_32 = (up_32 - 0xffffffff) & 0xffffffff;
- }
- if (up_32 == 0) {
- return count;
- }
- int64_t high_20 = (up_32 + 0x800) >> 12;
- int64_t low_12 = up_32 & 0xfff;
- if (high_20) {
- // Adjust to 20 bits for the case of overflow
- high_20 &= 0xfffff;
- count++;
- if (low_12) {
- count++;
- }
- } else {
- count++;
- }
- // Put it at the bgining of register
- count++;
- if (low_32 != 0) {
- count++;
- }
- return count;
- }
- // No temp register. Build imm in rd.
- // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
- // parts to the upper part by doing shift and add.
- // First build upper part in rd.
- int64_t high_20 = (up_32 + 0x800) >> 12;
- int64_t low_12 = up_32 & 0xfff;
- if (high_20) {
- // Adjust to 20 bits for the case of overflow
- high_20 &= 0xfffff;
- count++;
- if (low_12) {
- count++;
- }
- } else {
- count++;
- }
- // upper part already in rd. Each part to be added to rd, has maximum of 11
- // bits, and always starts with a 1. rd is shifted by the size of the part
- // plus the number of zeros between the parts. Each part is added after the
- // left shift.
- uint32_t mask = 0x80000000;
- int32_t i;
- for (i = 0; i < 32; i++) {
- if ((low_32 & mask) == 0) {
- mask >>= 1;
- if (i == 31) {
- // rest is zero
- count++;
- }
- continue;
- }
- // The first 1 seen
- if ((i + 11) < 32) {
- // Pick 11 bits
- count++;
- count++;
- i += 10;
- mask >>= 11;
- } else {
- count++;
- count++;
- break;
- }
- }
- }
- return count;
-}
-
void Assembler::li_ptr(Register rd, int64_t imm) {
// Initialize rd with an address
// Pointers are 48 bits
@@ -1207,7 +1107,7 @@ void Assembler::RV_li(Register rd, int32_t imm) {
}
}
-int Assembler::li_estimate(int32_t imm, bool is_get_temp_reg) {
+int Assembler::RV_li_count(int32_t imm, bool is_get_temp_reg) {
int count = 0;
// imitate Assembler::RV_li
int32_t high_20 = ((imm + 0x800) >> 12);
@@ -1250,8 +1150,8 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
DCHECK(
- (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
- (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
+ (break_as_stop && code <= kMaxStopCode && code > kMaxTracepointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxTracepointCode)));
// since ebreak does not allow additional immediate field, we use the
// immediate field of lui instruction immediately following the ebreak to
@@ -1451,7 +1351,8 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
@@ -1909,5 +1810,326 @@ const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
const size_t ConstantPool::kApproxMaxEntryCount = 512;
+#if defined(V8_TARGET_ARCH_RISCV64)
+// LLVM Code
+//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++
+//-*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM
+// Exceptions. See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+void Assembler::RecursiveLi(Register rd, int64_t val) {
+ if (val > 0 && RecursiveLiImplCount(val) > 2) {
+ unsigned LeadingZeros = base::bits::CountLeadingZeros((uint64_t)val);
+ uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
+ int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
+ if (countFillZero < RecursiveLiImplCount(val)) {
+ RecursiveLiImpl(rd, ShiftedVal);
+ srli(rd, rd, LeadingZeros);
+ return;
+ }
+ }
+ RecursiveLiImpl(rd, val);
+}
+
+int Assembler::RecursiveLiCount(int64_t val) {
+ if (val > 0 && RecursiveLiImplCount(val) > 2) {
+ unsigned LeadingZeros = base::bits::CountLeadingZeros((uint64_t)val);
+ uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
+ // Fill in the bits that will be shifted out with 1s. An example where
+ // this helps is trailing one masks with 32 or more ones. This will
+ // generate ADDI -1 and an SRLI.
+ int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
+ if (countFillZero < RecursiveLiImplCount(val)) {
+ return countFillZero;
+ }
+ }
+ return RecursiveLiImplCount(val);
+}
+
+void Assembler::RecursiveLiImpl(Register rd, int64_t Val) {
+ if (is_int32(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = Val << 52 >> 52;
+
+ if (Hi20) {
+ lui(rd, (int32_t)Hi20);
+ }
+
+ if (Lo12 || Hi20 == 0) {
+ if (Hi20) {
+ addiw(rd, rd, Lo12);
+ } else {
+ addi(rd, zero_reg, Lo12);
+ }
+ }
+ return;
+ }
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8
+ // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
+ // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
+ // up to 32 bits while the following ADDI instructions contribute up to 12
+ // bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
+ // to the fact that ADDI performs a sign extended addition, doing it like
+ // that would only be possible when at most 11 bits of the ADDI instructions
+ // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
+ // actually requires that the constant is processed starting with the least
+ // significant bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as
+ // it fits into 32 bits. The emission of the shifts and additions is
+ // subsequently performed when the recursion returns.
+
+ int64_t Lo12 = Val << 52 >> 52;
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + base::bits::CountTrailingZeros((uint64_t)Hi52);
+ Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ // If the remaining bits don't fit in 12 bits, we might be able to reduce
+ // the shift amount in order to use LUI which will zero the lower 12 bits.
+ bool Unsigned = false;
+ if (ShiftAmount > 12 && !is_int12(Hi52)) {
+ if (is_int32((uint64_t)Hi52 << 12)) {
+ // Reduce the shift amount and add zeros to the LSBs so it will match
+ // LUI.
+ ShiftAmount -= 12;
+ Hi52 = (uint64_t)Hi52 << 12;
+ }
+ }
+ RecursiveLi(rd, Hi52);
+
+ if (Unsigned) {
+ } else {
+ slli(rd, rd, ShiftAmount);
+ }
+ if (Lo12) {
+ addi(rd, rd, Lo12);
+ }
+}
+
+int Assembler::RecursiveLiImplCount(int64_t Val) {
+ int count = 0;
+ if (is_int32(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = Val << 52 >> 52;
+
+ if (Hi20) {
+ // lui(rd, (int32_t)Hi20);
+ count++;
+ }
+
+ if (Lo12 || Hi20 == 0) {
+ // unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
+ // Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
+ count++;
+ }
+ return count;
+ }
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8
+ // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
+ // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
+ // up to 32 bits while the following ADDI instructions contribute up to 12
+ // bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
+ // to the fact that ADDI performs a sign extended addition, doing it like
+ // that would only be possible when at most 11 bits of the ADDI instructions
+ // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
+ // actually requires that the constant is processed starting with the least
+ // significant bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as
+ // it fits into 32 bits. The emission of the shifts and additions is
+ // subsequently performed when the recursion returns.
+
+ int64_t Lo12 = Val << 52 >> 52;
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + base::bits::CountTrailingZeros((uint64_t)Hi52);
+ Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ // If the remaining bits don't fit in 12 bits, we might be able to reduce
+ // the shift amount in order to use LUI which will zero the lower 12 bits.
+ bool Unsigned = false;
+ if (ShiftAmount > 12 && !is_int12(Hi52)) {
+ if (is_int32((uint64_t)Hi52 << 12)) {
+ // Reduce the shift amount and add zeros to the LSBs so it will match
+ // LUI.
+ ShiftAmount -= 12;
+ Hi52 = (uint64_t)Hi52 << 12;
+ }
+ }
+
+ count += RecursiveLiImplCount(Hi52);
+
+ if (Unsigned) {
+ } else {
+ // slli(rd, rd, ShiftAmount);
+ count++;
+ }
+ if (Lo12) {
+ // addi(rd, rd, Lo12);
+ count++;
+ }
+ return count;
+}
+
+int Assembler::GeneralLiCount(int64_t imm, bool is_get_temp_reg) {
+ int count = 0;
+ // imitate Assembler::RV_li
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ return count;
+ } else {
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ // Check if a temporary register is available
+ if (is_get_temp_reg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ count++;
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ count++;
+ }
+ } else {
+ sim_low = low_12;
+ count++;
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ count++;
+ count++;
+ return count;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return count;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // Put it at the bgining of register
+ count++;
+ if (low_32 != 0) {
+ count++;
+ }
+ return count;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of
+ // 11 bits, and always starts with a 1. rd is shifted by the size of the
+ // part plus the number of zeros between the parts. Each part is added
+ // after the left shift.
+ uint32_t mask = 0x80000000;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ if (i == 31) {
+ // rest is zero
+ count++;
+ }
+ continue;
+ }
+ // The first 1 seen
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ count++;
+ count++;
+ i += 10;
+ mask >>= 11;
+ } else {
+ count++;
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.h b/deps/v8/src/codegen/riscv/assembler-riscv.h
index c08f82bc77..ed222b52d6 100644
--- a/deps/v8/src/codegen/riscv/assembler-riscv.h
+++ b/deps/v8/src/codegen/riscv/assembler-riscv.h
@@ -239,6 +239,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
+ // During code generation builtin targets in PC-relative call/jump
+ // instructions are temporarily encoded as builtin ID until the generated
+ // code is moved into the code space.
+ static inline Builtin target_builtin_at(Address pc);
+
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
@@ -280,8 +285,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
- inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code code, Address target);
+ inline static void deserialization_set_special_target_at(Address location,
+ Code code,
+ Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -294,10 +300,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
// Here we are patching the address in the LUI/ADDI instruction pair.
// These values are used in the serialization process and must be zero for
- // RISC-V platform, as Code, Embedded Object or External-reference pointers
- // are split across two consecutive instructions and don't exist separately
- // in the code, so the serializer should not step forwards in memory after
- // a target is resolved and written.
+ // RISC-V platform, as InstructionStream, Embedded Object or
+ // External-reference pointers are split across two consecutive instructions
+ // and don't exist separately in the code, so the serializer should not step
+ // forwards in memory after a target is resolved and written.
static constexpr int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit/64bit constant.
@@ -334,7 +340,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
- // Code generation.
+ // InstructionStream generation.
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -366,9 +372,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
// Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA)
void nop();
+#if defined(V8_TARGET_ARCH_RISCV64)
+ void RecursiveLiImpl(Register rd, intptr_t imm);
+ void RecursiveLi(Register rd, intptr_t imm);
+ static int RecursiveLiCount(intptr_t imm);
+ static int RecursiveLiImplCount(intptr_t imm);
void RV_li(Register rd, intptr_t imm);
+ static int RV_li_count(int64_t imm, bool is_get_temp_reg = false);
// Returns the number of instructions required to load the immediate
- static int li_estimate(intptr_t imm, bool is_get_temp_reg = false);
+ void GeneralLi(Register rd, int64_t imm);
+ static int GeneralLiCount(intptr_t imm, bool is_get_temp_reg = false);
+#endif
+#if defined(V8_TARGET_ARCH_RISCV32)
+ void RV_li(Register rd, int32_t imm);
+ static int RV_li_count(int32_t imm, bool is_get_temp_reg = false);
+#endif
// Loads an immediate, always using 8 instructions, regardless of the value,
// so that it can be modified later.
void li_constant(Register rd, intptr_t imm);
@@ -595,6 +613,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
VectorUnit VU;
+ void ClearVectorunit() { VU.clear(); }
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -681,7 +701,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
// intervals of kBufferCheckInterval emitted bytes.
static constexpr int kBufferCheckInterval = 1 * KB / 2;
- // Code generation.
+ // InstructionStream generation.
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -716,7 +736,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase,
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- // Code emission.
+ // InstructionStream emission.
inline void CheckBuffer();
void GrowBuffer();
void emit(Instr x);
diff --git a/deps/v8/src/codegen/riscv/base-assembler-riscv.h b/deps/v8/src/codegen/riscv/base-assembler-riscv.h
index 8bdfd4ecd1..7c2d02b208 100644
--- a/deps/v8/src/codegen/riscv/base-assembler-riscv.h
+++ b/deps/v8/src/codegen/riscv/base-assembler-riscv.h
@@ -78,6 +78,8 @@ class AssemblerRiscvBase {
virtual void emit(Instr x) = 0;
virtual void emit(ShortInstr x) = 0;
virtual void emit(uint64_t x) = 0;
+
+ virtual void ClearVectorunit() = 0;
// Instruction generation.
// ----- Top-level instruction formats match those in the ISA manual
diff --git a/deps/v8/src/codegen/riscv/base-constants-riscv.h b/deps/v8/src/codegen/riscv/base-constants-riscv.h
index bc38bfabc9..9acbf86f15 100644
--- a/deps/v8/src/codegen/riscv/base-constants-riscv.h
+++ b/deps/v8/src/codegen/riscv/base-constants-riscv.h
@@ -17,8 +17,9 @@
#define UNIMPLEMENTED_RISCV()
#endif
-#define UNSUPPORTED_RISCV() \
- v8::internal::PrintF("Unsupported instruction %d.\n", __LINE__)
+#define UNSUPPORTED_RISCV() \
+ v8::internal::PrintF("Unsupported instruction %d.\n", __LINE__); \
+ UNIMPLEMENTED();
enum Endianness { kLittle, kBig };
@@ -53,10 +54,10 @@ const uint32_t kLessSignificantWordInDoublewordOffset = 4;
// Try https://content.riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf.
namespace v8 {
namespace internal {
+using Opcode = uint32_t;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 256;
#define RVV_LMUL(V) \
@@ -198,9 +199,33 @@ enum SoftwareInterruptCodes {
// instructions (see Assembler::stop()).
// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
// debugger.
+const uint32_t kMaxTracepointCode = 63;
const uint32_t kMaxWatchpointCode = 31;
const uint32_t kMaxStopCode = 127;
static_assert(kMaxWatchpointCode < kMaxStopCode);
+static_assert(kMaxTracepointCode < kMaxStopCode);
+
+// Debug parameters.
+//
+// For example:
+//
+// __ Debug(TRACE_ENABLE | LOG_TRACE);
+// starts tracing: set v8_flags.trace-sim is true.
+// __ Debug(TRACE_ENABLE | LOG_REGS);
+// PrintAllregs.
+// __ Debug(TRACE_DISABLE | LOG_TRACE);
+// stops tracing: set v8_flags.trace-sim is false.
+const unsigned kDebuggerTracingDirectivesMask = 0b111 << 3;
+enum DebugParameters : uint32_t {
+ NO_PARAM = 1 << 5,
+ BREAK = 1 << 0,
+ LOG_TRACE = 1 << 1,
+ LOG_REGS = 1 << 2,
+ LOG_ALL = LOG_TRACE,
+ // Trace control.
+ TRACE_ENABLE = 1 << 3 | NO_PARAM,
+ TRACE_DISABLE = 1 << 4 | NO_PARAM,
+};
// ----- Fields offset and length.
// RISCV constants
@@ -288,8 +313,17 @@ const uint32_t kRvcBImm8Mask = (((1 << 5) - 1) << 2) | (((1 << 3) - 1) << 10);
// for RVV extension
constexpr int kRvvELEN = 64;
+#ifdef RVV_VLEN
+constexpr int kRvvVLEN = RVV_VLEN;
+// TODO(riscv): support rvv 256/512/1024
+static_assert(
+ kRvvVLEN == 128,
+ "RVV extension only supports 128bit wide VLEN at current RISC-V backend.");
+#else
constexpr int kRvvVLEN = 128;
+#endif
constexpr int kRvvSLEN = kRvvVLEN;
+
const int kRvvFunct6Shift = 26;
const int kRvvFunct6Bits = 6;
const uint32_t kRvvFunct6Mask =
@@ -417,6 +451,22 @@ enum Condition { // Any value < 0 is considered no_condition.
uge = Ugreater_equal,
ule = Uless_equal,
ugt = Ugreater,
+
+ // Unified cross-platform condition names/aliases.
+ kEqual = equal,
+ kNotEqual = not_equal,
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+ kZero = equal,
+ kNotZero = not_equal,
};
// Returns the equivalent of !cc.
@@ -489,7 +539,7 @@ enum ControlStatusReg {
enum FFlagsMask {
kInvalidOperation = 0b10000, // NV: Invalid
kDivideByZero = 0b1000, // DZ: Divide by Zero
- kOverflow = 0b100, // OF: Overflow
+ kFPUOverflow = 0b100, // OF: Overflow
kUnderflow = 0b10, // UF: Underflow
kInexact = 0b1 // NX: Inexact
};
diff --git a/deps/v8/src/codegen/riscv/base-riscv-i.cc b/deps/v8/src/codegen/riscv/base-riscv-i.cc
index 19687c9370..a3d7029248 100644
--- a/deps/v8/src/codegen/riscv/base-riscv-i.cc
+++ b/deps/v8/src/codegen/riscv/base-riscv-i.cc
@@ -18,11 +18,13 @@ void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
GenInstrJ(JAL, rd, imm21);
+ ClearVectorunit();
BlockTrampolinePoolFor(1);
}
void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
GenInstrI(0b000, JALR, rd, rs1, imm12);
+ ClearVectorunit();
BlockTrampolinePoolFor(1);
}
@@ -30,26 +32,32 @@ void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
+ ClearVectorunit();
}
void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
+ ClearVectorunit();
}
void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
+ ClearVectorunit();
}
void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
+ ClearVectorunit();
}
void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
+ ClearVectorunit();
}
void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
+ ClearVectorunit();
}
// Loads
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-a.h b/deps/v8/src/codegen/riscv/constant-riscv-a.h
index afd335ce59..c5090054c6 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-a.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-a.h
@@ -8,35 +8,56 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCVA : uint32_t {
- // RV32A Standard Extension
- RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift),
- RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift),
- RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift),
- RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift),
- RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift),
- RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift),
- RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift),
- RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift),
- RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift),
- RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift),
- RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+// RV32A Standard Extension
+constexpr Opcode RO_LR_W =
+ AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift);
+constexpr Opcode RO_SC_W =
+ AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift);
+constexpr Opcode RO_AMOSWAP_W =
+ AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift);
+constexpr Opcode RO_AMOADD_W =
+ AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift);
+constexpr Opcode RO_AMOXOR_W =
+ AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift);
+constexpr Opcode RO_AMOAND_W =
+ AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift);
+constexpr Opcode RO_AMOOR_W =
+ AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift);
+constexpr Opcode RO_AMOMIN_W =
+ AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift);
+constexpr Opcode RO_AMOMAX_W =
+ AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift);
+constexpr Opcode RO_AMOMINU_W =
+ AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift);
+constexpr Opcode RO_AMOMAXU_W =
+ AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift);
#ifdef V8_TARGET_ARCH_RISCV64
// RV64A Standard Extension (in addition to RV32A)
- RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift),
- RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift),
- RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift),
- RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift),
- RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift),
- RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift),
- RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift),
- RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift),
- RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift),
- RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift),
- RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+constexpr Opcode RO_LR_D =
+ AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift);
+constexpr Opcode RO_SC_D =
+ AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift);
+constexpr Opcode RO_AMOSWAP_D =
+ AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift);
+constexpr Opcode RO_AMOADD_D =
+ AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift);
+constexpr Opcode RO_AMOXOR_D =
+ AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift);
+constexpr Opcode RO_AMOAND_D =
+ AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift);
+constexpr Opcode RO_AMOOR_D =
+ AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift);
+constexpr Opcode RO_AMOMIN_D =
+ AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift);
+constexpr Opcode RO_AMOMAX_D =
+ AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift);
+constexpr Opcode RO_AMOMINU_D =
+ AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift);
+constexpr Opcode RO_AMOMAXU_D =
+ AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift);
#endif // V8_TARGET_ARCH_RISCV64
-};
+// clang-format on
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-c.h b/deps/v8/src/codegen/riscv/constant-riscv-c.h
index 2f8a504780..1377792f45 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-c.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-c.h
@@ -8,55 +8,57 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCVC : uint32_t {
+constexpr Opcode RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift);
+constexpr Opcode RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift);
+constexpr Opcode RO_C_LW = C0 | (0b010 << kRvcFunct3Shift);
+constexpr Opcode RO_C_SW = C0 | (0b110 << kRvcFunct3Shift);
+constexpr Opcode RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift);
+constexpr Opcode RO_C_LI = C1 | (0b010 << kRvcFunct3Shift);
+constexpr Opcode RO_C_SUB =
+ C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift);
+constexpr Opcode RO_C_XOR =
+ C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift);
+constexpr Opcode RO_C_OR =
+ C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift);
+constexpr Opcode RO_C_AND =
+ C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift);
+constexpr Opcode RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift);
+constexpr Opcode RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift);
+constexpr Opcode RO_C_J = C1 | (0b101 << kRvcFunct3Shift);
+constexpr Opcode RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift);
+constexpr Opcode RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift);
+constexpr Opcode RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift);
+constexpr Opcode RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift);
+constexpr Opcode RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift);
+constexpr Opcode RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift);
+constexpr Opcode RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift);
+constexpr Opcode RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift);
+constexpr Opcode RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift);
+constexpr Opcode RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift);
+constexpr Opcode RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift);
- RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift),
- RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift),
- RO_C_LW = C0 | (0b010 << kRvcFunct3Shift),
- RO_C_SW = C0 | (0b110 << kRvcFunct3Shift),
- RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift),
- RO_C_LI = C1 | (0b010 << kRvcFunct3Shift),
- RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
- RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
- RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift),
- RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift),
- RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift),
- RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift),
- RO_C_J = C1 | (0b101 << kRvcFunct3Shift),
- RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift),
- RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift),
- RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift),
- RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift),
- RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift),
- RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift),
- RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift),
- RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift),
- RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift),
- RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift),
- RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
-
- RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift),
- RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift),
- RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift),
- RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
+constexpr Opcode RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift);
+constexpr Opcode RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift);
+constexpr Opcode RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift);
+constexpr Opcode RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift);
#ifdef V8_TARGET_ARCH_RISCV64
- RO_C_LD = C0 | (0b011 << kRvcFunct3Shift),
- RO_C_SD = C0 | (0b111 << kRvcFunct3Shift),
- RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift),
- RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
- RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift),
- RO_C_SUBW =
- C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
- RO_C_ADDW =
- C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+constexpr Opcode RO_C_LD = C0 | (0b011 << kRvcFunct3Shift);
+constexpr Opcode RO_C_SD = C0 | (0b111 << kRvcFunct3Shift);
+constexpr Opcode RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift);
+constexpr Opcode RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift);
+constexpr Opcode RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift);
+constexpr Opcode RO_C_SUBW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift);
+constexpr Opcode RO_C_ADDW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift);
#endif
#ifdef V8_TARGET_ARCH_RISCV32
- RO_C_FLWSP = C2 | (0b011 << kRvcFunct3Shift),
- RO_C_FSWSP = C2 | (0b111 << kRvcFunct3Shift),
- RO_C_FLW = C0 | (0b011 << kRvcFunct3Shift),
- RO_C_FSW = C0 | (0b111 << kRvcFunct3Shift),
+constexpr Opcode RO_C_FLWSP = C2 | (0b011 << kRvcFunct3Shift);
+constexpr Opcode RO_C_FSWSP = C2 | (0b111 << kRvcFunct3Shift);
+constexpr Opcode RO_C_FLW = C0 | (0b011 << kRvcFunct3Shift);
+constexpr Opcode RO_C_FSW = C0 | (0b111 << kRvcFunct3Shift);
#endif
-};
+// clang-format on
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-d.h b/deps/v8/src/codegen/riscv/constant-riscv-d.h
index 3fd0b251bd..8f0f2fdcce 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-d.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-d.h
@@ -7,48 +7,69 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCVD : uint32_t {
- // RV32D Standard Extension
- RO_FLD = LOAD_FP | (0b011 << kFunct3Shift),
- RO_FSD = STORE_FP | (0b011 << kFunct3Shift),
- RO_FMADD_D = MADD | (0b01 << kFunct2Shift),
- RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift),
- RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift),
- RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift),
- RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift),
- RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift),
- RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift),
- RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift),
- RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
- RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
- RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
- RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
- RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
- RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
- RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
- RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
- RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
- RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
- (0b00000 << kRs2Shift),
- RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift),
- RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+// RV32D Standard Extension
+constexpr Opcode RO_FLD = LOAD_FP | (0b011 << kFunct3Shift);
+constexpr Opcode RO_FSD = STORE_FP | (0b011 << kFunct3Shift);
+constexpr Opcode RO_FMADD_D = MADD | (0b01 << kFunct2Shift);
+constexpr Opcode RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift);
+constexpr Opcode RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift);
+constexpr Opcode RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift);
+constexpr Opcode RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift);
+constexpr Opcode RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift);
+constexpr Opcode RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift);
+constexpr Opcode RO_FSQRT_D =
+ OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FSGNJ_D =
+ OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift);
+constexpr Opcode RO_FSGNJN_D =
+ OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift);
+constexpr Opcode RO_FSQNJX_D =
+ OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift);
+constexpr Opcode RO_FMIN_D =
+ OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift);
+constexpr Opcode RO_FMAX_D =
+ OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift);
+constexpr Opcode RO_FCVT_S_D =
+ OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift);
+constexpr Opcode RO_FCVT_D_S =
+ OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FEQ_D =
+ OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift);
+constexpr Opcode RO_FLT_D =
+ OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift);
+constexpr Opcode RO_FLE_D =
+ OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift);
+constexpr Opcode RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) |
+ (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift);
+constexpr Opcode RO_FCVT_W_D =
+ OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FCVT_WU_D =
+ OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift);
+constexpr Opcode RO_FCVT_D_W =
+ OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FCVT_D_WU =
+ OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift);
#ifdef V8_TARGET_ARCH_RISCV64
// RV64D Standard Extension (in addition to RV32D)
- RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift),
- RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift),
- RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
- (0b00000 << kRs2Shift),
- RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift),
- RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift),
- RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) |
- (0b00000 << kRs2Shift),
+constexpr Opcode RO_FCVT_L_D =
+ OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift);
+constexpr Opcode RO_FCVT_LU_D =
+ OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift);
+constexpr Opcode RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) |
+ (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift);
+constexpr Opcode RO_FCVT_D_L =
+ OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift);
+constexpr Opcode RO_FCVT_D_LU =
+ OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift);
+constexpr Opcode RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) |
+ (0b1111001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift);
#endif
-};
+// clang-format on
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-f.h b/deps/v8/src/codegen/riscv/constant-riscv-f.h
index fc742e7d57..c90dc70e20 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-f.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-f.h
@@ -7,44 +7,62 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCVF : uint32_t {
- // RV32F Standard Extension
- RO_FLW = LOAD_FP | (0b010 << kFunct3Shift),
- RO_FSW = STORE_FP | (0b010 << kFunct3Shift),
- RO_FMADD_S = MADD | (0b00 << kFunct2Shift),
- RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift),
- RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift),
- RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift),
- RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift),
- RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift),
- RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift),
- RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift),
- RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
- RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
- RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
- RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
- RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
- RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
- RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) |
- (0b00000 << kRs2Shift),
- RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
- RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
- RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
- RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift),
- RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift),
- RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift),
- RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift),
+// RV32F Standard Extension
+constexpr Opcode RO_FLW = LOAD_FP | (0b010 << kFunct3Shift);
+constexpr Opcode RO_FSW = STORE_FP | (0b010 << kFunct3Shift);
+constexpr Opcode RO_FMADD_S = MADD | (0b00 << kFunct2Shift);
+constexpr Opcode RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift);
+constexpr Opcode RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift);
+constexpr Opcode RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift);
+constexpr Opcode RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift);
+constexpr Opcode RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift);
+constexpr Opcode RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift);
+constexpr Opcode RO_FSQRT_S =
+ OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FSGNJ_S =
+ OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift);
+constexpr Opcode RO_FSGNJN_S =
+ OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift);
+constexpr Opcode RO_FSQNJX_S =
+ OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift);
+constexpr Opcode RO_FMIN_S =
+ OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift);
+constexpr Opcode RO_FMAX_S =
+ OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift);
+constexpr Opcode RO_FCVT_W_S =
+ OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FCVT_WU_S =
+ OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift);
+constexpr Opcode RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) |
+ (0b000 << kFunct3Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FEQ_S =
+ OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift);
+constexpr Opcode RO_FLT_S =
+ OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift);
+constexpr Opcode RO_FLE_S =
+ OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift);
+constexpr Opcode RO_FCLASS_S =
+ OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift);
+constexpr Opcode RO_FCVT_S_W =
+ OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift);
+constexpr Opcode RO_FCVT_S_WU =
+ OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift);
+constexpr Opcode RO_FMV_W_X =
+ OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift);
#ifdef V8_TARGET_ARCH_RISCV64
// RV64F Standard Extension (in addition to RV32F)
- RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift),
- RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift),
- RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift),
- RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+constexpr Opcode RO_FCVT_L_S =
+ OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift);
+constexpr Opcode RO_FCVT_LU_S =
+ OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift);
+constexpr Opcode RO_FCVT_S_L =
+ OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift);
+constexpr Opcode RO_FCVT_S_LU =
+ OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift);
#endif // V8_TARGET_ARCH_RISCV64
-};
+// clang-format on
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-i.h b/deps/v8/src/codegen/riscv/constant-riscv-i.h
index 75c6c44565..d19010f933 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-i.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-i.h
@@ -7,66 +7,80 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCV32I : uint32_t {
- // Note use RO (RiscV Opcode) prefix
- // RV32I Base Instruction Set
- RO_LUI = LUI,
- RO_AUIPC = AUIPC,
- RO_JAL = JAL,
- RO_JALR = JALR | (0b000 << kFunct3Shift),
- RO_BEQ = BRANCH | (0b000 << kFunct3Shift),
- RO_BNE = BRANCH | (0b001 << kFunct3Shift),
- RO_BLT = BRANCH | (0b100 << kFunct3Shift),
- RO_BGE = BRANCH | (0b101 << kFunct3Shift),
- RO_BLTU = BRANCH | (0b110 << kFunct3Shift),
- RO_BGEU = BRANCH | (0b111 << kFunct3Shift),
- RO_LB = LOAD | (0b000 << kFunct3Shift),
- RO_LH = LOAD | (0b001 << kFunct3Shift),
- RO_LW = LOAD | (0b010 << kFunct3Shift),
- RO_LBU = LOAD | (0b100 << kFunct3Shift),
- RO_LHU = LOAD | (0b101 << kFunct3Shift),
- RO_SB = STORE | (0b000 << kFunct3Shift),
- RO_SH = STORE | (0b001 << kFunct3Shift),
- RO_SW = STORE | (0b010 << kFunct3Shift),
- RO_ADDI = OP_IMM | (0b000 << kFunct3Shift),
- RO_SLTI = OP_IMM | (0b010 << kFunct3Shift),
- RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift),
- RO_XORI = OP_IMM | (0b100 << kFunct3Shift),
- RO_ORI = OP_IMM | (0b110 << kFunct3Shift),
- RO_ANDI = OP_IMM | (0b111 << kFunct3Shift),
- RO_SLLI = OP_IMM | (0b001 << kFunct3Shift),
- RO_SRLI = OP_IMM | (0b101 << kFunct3Shift),
- // RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
- RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
- RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
- RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift),
- RO_ECALL = SYSTEM | (0b000 << kFunct3Shift),
+// Note use RO (RiscV Opcode) prefix
+// RV32I Base Instruction Set
+constexpr Opcode RO_LUI = LUI;
+constexpr Opcode RO_AUIPC = AUIPC;
+constexpr Opcode RO_JAL = JAL;
+constexpr Opcode RO_JALR = JALR | (0b000 << kFunct3Shift);
+constexpr Opcode RO_BEQ = BRANCH | (0b000 << kFunct3Shift);
+constexpr Opcode RO_BNE = BRANCH | (0b001 << kFunct3Shift);
+constexpr Opcode RO_BLT = BRANCH | (0b100 << kFunct3Shift);
+constexpr Opcode RO_BGE = BRANCH | (0b101 << kFunct3Shift);
+constexpr Opcode RO_BLTU = BRANCH | (0b110 << kFunct3Shift);
+constexpr Opcode RO_BGEU = BRANCH | (0b111 << kFunct3Shift);
+constexpr Opcode RO_LB = LOAD | (0b000 << kFunct3Shift);
+constexpr Opcode RO_LH = LOAD | (0b001 << kFunct3Shift);
+constexpr Opcode RO_LW = LOAD | (0b010 << kFunct3Shift);
+constexpr Opcode RO_LBU = LOAD | (0b100 << kFunct3Shift);
+constexpr Opcode RO_LHU = LOAD | (0b101 << kFunct3Shift);
+constexpr Opcode RO_SB = STORE | (0b000 << kFunct3Shift);
+constexpr Opcode RO_SH = STORE | (0b001 << kFunct3Shift);
+constexpr Opcode RO_SW = STORE | (0b010 << kFunct3Shift);
+constexpr Opcode RO_ADDI = OP_IMM | (0b000 << kFunct3Shift);
+constexpr Opcode RO_SLTI = OP_IMM | (0b010 << kFunct3Shift);
+constexpr Opcode RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift);
+constexpr Opcode RO_XORI = OP_IMM | (0b100 << kFunct3Shift);
+constexpr Opcode RO_ORI = OP_IMM | (0b110 << kFunct3Shift);
+constexpr Opcode RO_ANDI = OP_IMM | (0b111 << kFunct3Shift);
+constexpr Opcode RO_SLLI = OP_IMM | (0b001 << kFunct3Shift);
+constexpr Opcode RO_SRLI = OP_IMM | (0b101 << kFunct3Shift);
+// RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
+constexpr Opcode RO_ADD =
+ OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SUB =
+ OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift);
+constexpr Opcode RO_SLL =
+ OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SLT =
+ OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SLTU =
+ OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_XOR =
+ OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SRL =
+ OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SRA =
+ OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift);
+constexpr Opcode RO_OR =
+ OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_AND =
+ OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift);
+constexpr Opcode RO_ECALL = SYSTEM | (0b000 << kFunct3Shift);
// RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12
#if V8_TARGET_ARCH_RISCV64
// RV64I Base Instruction Set (in addition to RV32I)
- RO_LWU = LOAD | (0b110 << kFunct3Shift),
- RO_LD = LOAD | (0b011 << kFunct3Shift),
- RO_SD = STORE | (0b011 << kFunct3Shift),
- RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift),
- RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift),
- RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift),
- // RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
- RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
- RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
- RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+constexpr Opcode RO_LWU = LOAD | (0b110 << kFunct3Shift);
+constexpr Opcode RO_LD = LOAD | (0b011 << kFunct3Shift);
+constexpr Opcode RO_SD = STORE | (0b011 << kFunct3Shift);
+constexpr Opcode RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift);
+constexpr Opcode RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift);
+constexpr Opcode RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift);
+// RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
+constexpr Opcode RO_ADDW =
+ OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SUBW =
+ OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift);
+constexpr Opcode RO_SLLW =
+ OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SRLW =
+ OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift);
+constexpr Opcode RO_SRAW =
+ OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift);
#endif
-};
+// clang-format on
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-m.h b/deps/v8/src/codegen/riscv/constant-riscv-m.h
index 2ad1ffd1b5..a5c349f6d8 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-m.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-m.h
@@ -8,26 +8,38 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCVM : uint32_t {
- // RV32M Standard Extension
- RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+// RV32M Standard Extension
+constexpr Opcode RO_MUL =
+ OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_MULH =
+ OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_MULHSU =
+ OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_MULHU =
+ OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_DIV =
+ OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_DIVU =
+ OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_REM =
+ OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_REMU =
+ OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
#ifdef V8_TARGET_ARCH_RISCV64
- // RV64M Standard Extension (in addition to RV32M)
- RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
- RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+// RV64M Standard Extension (in addition to RV32M)
+constexpr Opcode RO_MULW =
+ OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_DIVW =
+ OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_DIVUW =
+ OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_REMW =
+ OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
+constexpr Opcode RO_REMUW =
+ OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift);
#endif
-};
+// clang-format on
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-v.h b/deps/v8/src/codegen/riscv/constant-riscv-v.h
index 30ff0c1a24..b5dddcc666 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-v.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-v.h
@@ -8,485 +8,554 @@
namespace v8 {
namespace internal {
-enum OpcodeRISCVV : uint32_t {
- // RVV Extension
- OP_IVV = OP_V | (0b000 << kFunct3Shift),
- OP_FVV = OP_V | (0b001 << kFunct3Shift),
- OP_MVV = OP_V | (0b010 << kFunct3Shift),
- OP_IVI = OP_V | (0b011 << kFunct3Shift),
- OP_IVX = OP_V | (0b100 << kFunct3Shift),
- OP_FVF = OP_V | (0b101 << kFunct3Shift),
- OP_MVX = OP_V | (0b110 << kFunct3Shift),
-
- RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
- RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
- RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
-
- // RVV LOAD/STORE
- RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
- RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
- RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
-
- RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
- RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
- RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
- RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
- // THE kFunct6Shift is mop
- RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
- RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
- RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
- RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
- RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
- RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
- RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
-
- RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
- RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
- RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
- RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
- RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
- RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
- RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
-
- RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
- RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
- RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
- RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
- RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
- RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
- RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
-
- RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
- RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
- RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
- RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
- RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
- RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
- RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
-
- RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
- RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
- RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
- RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
- RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
- RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
- RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
-
- RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
- RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
- RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
- RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
- RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
- RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
- RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
-
- // RVV Vector Arithmetic Instruction
- VADD_FUNCT6 = 0b000000,
- RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
-
- VSUB_FUNCT6 = 0b000010,
- RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
-
- VDIVU_FUNCT6 = 0b100000,
- RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
-
- VDIV_FUNCT6 = 0b100001,
- RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
- RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
-
- VREMU_FUNCT6 = 0b100010,
- RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
-
- VREM_FUNCT6 = 0b100011,
- RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
- RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
-
- VMULHU_FUNCT6 = 0b100100,
- RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
-
- VMUL_FUNCT6 = 0b100101,
- RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
-
- VWMUL_FUNCT6 = 0b111011,
- RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
-
- VWMULU_FUNCT6 = 0b111000,
- RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
-
- VMULHSU_FUNCT6 = 0b100110,
- RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
-
- VMULH_FUNCT6 = 0b100111,
- RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
-
- VWADD_FUNCT6 = 0b110001,
- RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
-
- VWADDU_FUNCT6 = 0b110000,
- RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
-
- VWADDUW_FUNCT6 = 0b110101,
- RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
- RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
-
- VCOMPRESS_FUNCT6 = 0b010111,
- RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
-
- VSADDU_FUNCT6 = 0b100000,
- RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
-
- VSADD_FUNCT6 = 0b100001,
- RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
-
- VSSUB_FUNCT6 = 0b100011,
- RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
-
- VSSUBU_FUNCT6 = 0b100010,
- RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
-
- VRSUB_FUNCT6 = 0b000011,
- RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
-
- VMINU_FUNCT6 = 0b000100,
- RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
-
- VMIN_FUNCT6 = 0b000101,
- RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
-
- VMAXU_FUNCT6 = 0b000110,
- RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
-
- VMAX_FUNCT6 = 0b000111,
- RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
-
- VAND_FUNCT6 = 0b001001,
- RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
- RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
- RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
-
- VOR_FUNCT6 = 0b001010,
- RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
- RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
- RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
-
- VXOR_FUNCT6 = 0b001011,
- RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
- RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
- RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
-
- VRGATHER_FUNCT6 = 0b001100,
- RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
- RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
- RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
-
- VMV_FUNCT6 = 0b010111,
- RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift),
-
- RO_V_VMERGE_VI = RO_V_VMV_VI,
- RO_V_VMERGE_VV = RO_V_VMV_VV,
- RO_V_VMERGE_VX = RO_V_VMV_VX,
-
- VMSEQ_FUNCT6 = 0b011000,
- RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
-
- VMSNE_FUNCT6 = 0b011001,
- RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
-
- VMSLTU_FUNCT6 = 0b011010,
- RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
-
- VMSLT_FUNCT6 = 0b011011,
- RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
-
- VMSLE_FUNCT6 = 0b011101,
- RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
-
- VMSLEU_FUNCT6 = 0b011100,
- RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
-
- VMSGTU_FUNCT6 = 0b011110,
- RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
-
- VMSGT_FUNCT6 = 0b011111,
- RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
-
- VSLIDEUP_FUNCT6 = 0b001110,
- RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
-
- VSLIDEDOWN_FUNCT6 = 0b001111,
- RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
-
- VSRL_FUNCT6 = 0b101000,
- RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
-
- VSRA_FUNCT6 = 0b101001,
- RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
-
- VSLL_FUNCT6 = 0b100101,
- RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
-
- VSMUL_FUNCT6 = 0b100111,
- RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
-
- VADC_FUNCT6 = 0b010000,
- RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
-
- VMADC_FUNCT6 = 0b010001,
- RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
-
- VWXUNARY0_FUNCT6 = 0b010000,
- VRXUNARY0_FUNCT6 = 0b010000,
- VMUNARY0_FUNCT6 = 0b010100,
-
- RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
- RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
-
- VID_V = 0b10001,
-
- VXUNARY0_FUNCT6 = 0b010010,
- RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
-
- VWFUNARY0_FUNCT6 = 0b010000,
- RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
-
- VRFUNARY0_FUNCT6 = 0b010000,
- RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
-
- VREDMAXU_FUNCT6 = 0b000110,
- RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
- VREDMAX_FUNCT6 = 0b000111,
- RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
-
- VREDMINU_FUNCT6 = 0b000100,
- RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
- VREDMIN_FUNCT6 = 0b000101,
- RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
-
- VFUNARY0_FUNCT6 = 0b010010,
- RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift),
- VFUNARY1_FUNCT6 = 0b010011,
- RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift),
-
- VFCVT_XU_F_V = 0b00000,
- VFCVT_X_F_V = 0b00001,
- VFCVT_F_XU_V = 0b00010,
- VFCVT_F_X_V = 0b00011,
- VFWCVT_XU_F_V = 0b01000,
- VFWCVT_X_F_V = 0b01001,
- VFWCVT_F_XU_V = 0b01010,
- VFWCVT_F_X_V = 0b01011,
- VFWCVT_F_F_V = 0b01100,
- VFNCVT_F_F_W = 0b10100,
- VFNCVT_X_F_W = 0b10001,
- VFNCVT_XU_F_W = 0b10000,
-
- VFCLASS_V = 0b10000,
- VFSQRT_V = 0b00000,
- VFRSQRT7_V = 0b00100,
- VFREC7_V = 0b00101,
-
- VFADD_FUNCT6 = 0b000000,
- RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift),
-
- VFSUB_FUNCT6 = 0b000010,
- RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift),
-
- VFDIV_FUNCT6 = 0b100000,
- RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift),
-
- VFMUL_FUNCT6 = 0b100100,
- RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
-
- // Vector Widening Floating-Point Add/Subtract Instructions
- VFWADD_FUNCT6 = 0b110000,
- RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift),
-
- VFWSUB_FUNCT6 = 0b110010,
- RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
-
- VFWADD_W_FUNCT6 = 0b110100,
- RO_V_VFWADD_W_VV = OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWADD_W_VF = OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
-
- VFWSUB_W_FUNCT6 = 0b110110,
- RO_V_VFWSUB_W_VV = OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWSUB_W_VF = OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
-
- // Vector Widening Floating-Point Reduction Instructions
- VFWREDUSUM_FUNCT6 = 0b110001,
- RO_V_VFWREDUSUM_VV = OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift),
-
- VFWREDOSUM_FUNCT6 = 0b110011,
- RO_V_VFWREDOSUM_VV = OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift),
-
- // Vector Widening Floating-Point Multiply
- VFWMUL_FUNCT6 = 0b111000,
- RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
-
- VMFEQ_FUNCT6 = 0b011000,
- RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
-
- VMFNE_FUNCT6 = 0b011100,
- RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift),
-
- VMFLT_FUNCT6 = 0b011011,
- RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift),
-
- VMFLE_FUNCT6 = 0b011001,
- RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift),
- RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift),
-
- VMFGE_FUNCT6 = 0b011111,
- RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift),
-
- VMFGT_FUNCT6 = 0b011101,
- RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift),
-
- VFMAX_FUNCT6 = 0b000110,
- RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
-
- VFREDMAX_FUNCT6 = 0b0001111,
- RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
-
- VFMIN_FUNCT6 = 0b000100,
- RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
-
- VFSGNJ_FUNCT6 = 0b001000,
- RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
-
- VFSGNJN_FUNCT6 = 0b001001,
- RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
-
- VFSGNJX_FUNCT6 = 0b001010,
- RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
-
- VFMADD_FUNCT6 = 0b101000,
- RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift),
-
- VFNMADD_FUNCT6 = 0b101001,
- RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
-
- VFMSUB_FUNCT6 = 0b101010,
- RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
-
- VFNMSUB_FUNCT6 = 0b101011,
- RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
-
- VFMACC_FUNCT6 = 0b101100,
- RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift),
-
- VFNMACC_FUNCT6 = 0b101101,
- RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
-
- VFMSAC_FUNCT6 = 0b101110,
- RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
-
- VFNMSAC_FUNCT6 = 0b101111,
- RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
-
- // Vector Widening Floating-Point Fused Multiply-Add Instructions
- VFWMACC_FUNCT6 = 0b111100,
- RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
-
- VFWNMACC_FUNCT6 = 0b111101,
- RO_V_VFWNMACC_VV = OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWNMACC_VF = OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
-
- VFWMSAC_FUNCT6 = 0b111110,
- RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
-
- VFWNMSAC_FUNCT6 = 0b111111,
- RO_V_VFWNMSAC_VV = OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
- RO_V_VFWNMSAC_VF = OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
-
- VNCLIP_FUNCT6 = 0b101111,
- RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
- RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
- RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
-
- VNCLIPU_FUNCT6 = 0b101110,
- RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
- RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
-};
+// RVV Extension
+constexpr Opcode OP_IVV = OP_V | (0b000 << kFunct3Shift);
+constexpr Opcode OP_FVV = OP_V | (0b001 << kFunct3Shift);
+constexpr Opcode OP_MVV = OP_V | (0b010 << kFunct3Shift);
+constexpr Opcode OP_IVI = OP_V | (0b011 << kFunct3Shift);
+constexpr Opcode OP_IVX = OP_V | (0b100 << kFunct3Shift);
+constexpr Opcode OP_FVF = OP_V | (0b101 << kFunct3Shift);
+constexpr Opcode OP_MVX = OP_V | (0b110 << kFunct3Shift);
+
+constexpr Opcode RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31;
+constexpr Opcode RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30;
+constexpr Opcode RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31;
+
+// RVV LOAD/STORE
+constexpr Opcode RO_V_VL =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift);
+constexpr Opcode RO_V_VLS =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift);
+constexpr Opcode RO_V_VLX =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift);
+
+constexpr Opcode RO_V_VS =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift);
+constexpr Opcode RO_V_VSS =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift);
+constexpr Opcode RO_V_VSX =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift);
+constexpr Opcode RO_V_VSU =
+ STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift);
+// THE kFunct6Shift is mop
+constexpr Opcode RO_V_VLSEG2 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift);
+constexpr Opcode RO_V_VLSEG3 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift);
+constexpr Opcode RO_V_VLSEG4 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift);
+constexpr Opcode RO_V_VLSEG5 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift);
+constexpr Opcode RO_V_VLSEG6 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift);
+constexpr Opcode RO_V_VLSEG7 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift);
+constexpr Opcode RO_V_VLSEG8 =
+ LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift);
+
+constexpr Opcode RO_V_VSSEG2 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift);
+constexpr Opcode RO_V_VSSEG3 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift);
+constexpr Opcode RO_V_VSSEG4 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift);
+constexpr Opcode RO_V_VSSEG5 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift);
+constexpr Opcode RO_V_VSSEG6 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift);
+constexpr Opcode RO_V_VSSEG7 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift);
+constexpr Opcode RO_V_VSSEG8 =
+ STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift);
+
+constexpr Opcode RO_V_VLSSEG2 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift);
+constexpr Opcode RO_V_VLSSEG3 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift);
+constexpr Opcode RO_V_VLSSEG4 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift);
+constexpr Opcode RO_V_VLSSEG5 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift);
+constexpr Opcode RO_V_VLSSEG6 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift);
+constexpr Opcode RO_V_VLSSEG7 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift);
+constexpr Opcode RO_V_VLSSEG8 =
+ LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift);
+
+constexpr Opcode RO_V_VSSSEG2 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift);
+constexpr Opcode RO_V_VSSSEG3 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift);
+constexpr Opcode RO_V_VSSSEG4 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift);
+constexpr Opcode RO_V_VSSSEG5 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift);
+constexpr Opcode RO_V_VSSSEG6 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift);
+constexpr Opcode RO_V_VSSSEG7 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift);
+constexpr Opcode RO_V_VSSSEG8 =
+ STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift);
+
+constexpr Opcode RO_V_VLXSEG2 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift);
+constexpr Opcode RO_V_VLXSEG3 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift);
+constexpr Opcode RO_V_VLXSEG4 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift);
+constexpr Opcode RO_V_VLXSEG5 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift);
+constexpr Opcode RO_V_VLXSEG6 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift);
+constexpr Opcode RO_V_VLXSEG7 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift);
+constexpr Opcode RO_V_VLXSEG8 =
+ LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift);
+
+constexpr Opcode RO_V_VSXSEG2 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift);
+constexpr Opcode RO_V_VSXSEG3 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift);
+constexpr Opcode RO_V_VSXSEG4 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift);
+constexpr Opcode RO_V_VSXSEG5 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift);
+constexpr Opcode RO_V_VSXSEG6 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift);
+constexpr Opcode RO_V_VSXSEG7 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift);
+constexpr Opcode RO_V_VSXSEG8 =
+ STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift);
+
+// RVV Vector Arithmetic Instruction
+constexpr Opcode VADD_FUNCT6 = 0b000000;
+constexpr Opcode RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSUB_FUNCT6 = 0b000010;
+constexpr Opcode RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VDIVU_FUNCT6 = 0b100000;
+constexpr Opcode RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VDIV_FUNCT6 = 0b100001;
+constexpr Opcode RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VREMU_FUNCT6 = 0b100010;
+constexpr Opcode RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VREM_FUNCT6 = 0b100011;
+constexpr Opcode RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMULHU_FUNCT6 = 0b100100;
+constexpr Opcode RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMUL_FUNCT6 = 0b100101;
+constexpr Opcode RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWMUL_FUNCT6 = 0b111011;
+constexpr Opcode RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWMULU_FUNCT6 = 0b111000;
+constexpr Opcode RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMULHSU_FUNCT6 = 0b100110;
+constexpr Opcode RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMULH_FUNCT6 = 0b100111;
+constexpr Opcode RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWADD_FUNCT6 = 0b110001;
+constexpr Opcode RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWADDU_FUNCT6 = 0b110000;
+constexpr Opcode RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWADDUW_FUNCT6 = 0b110101;
+constexpr Opcode RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VCOMPRESS_FUNCT6 = 0b010111;
+constexpr Opcode RO_V_VCOMPRESS_VV =
+ OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSADDU_FUNCT6 = 0b100000;
+constexpr Opcode RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSADD_FUNCT6 = 0b100001;
+constexpr Opcode RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSSUB_FUNCT6 = 0b100011;
+constexpr Opcode RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSSUBU_FUNCT6 = 0b100010;
+constexpr Opcode RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VRSUB_FUNCT6 = 0b000011;
+constexpr Opcode RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMINU_FUNCT6 = 0b000100;
+constexpr Opcode RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMIN_FUNCT6 = 0b000101;
+constexpr Opcode RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMAXU_FUNCT6 = 0b000110;
+constexpr Opcode RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMAX_FUNCT6 = 0b000111;
+constexpr Opcode RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VAND_FUNCT6 = 0b001001;
+constexpr Opcode RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VOR_FUNCT6 = 0b001010;
+constexpr Opcode RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VXOR_FUNCT6 = 0b001011;
+constexpr Opcode RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VRGATHER_FUNCT6 = 0b001100;
+constexpr Opcode RO_V_VRGATHER_VI =
+ OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VRGATHER_VV =
+ OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VRGATHER_VX =
+ OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMV_FUNCT6 = 0b010111;
+constexpr Opcode RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode RO_V_VMERGE_VI = RO_V_VMV_VI;
+constexpr Opcode RO_V_VMERGE_VV = RO_V_VMV_VV;
+constexpr Opcode RO_V_VMERGE_VX = RO_V_VMV_VX;
+
+constexpr Opcode VMSEQ_FUNCT6 = 0b011000;
+constexpr Opcode RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSNE_FUNCT6 = 0b011001;
+constexpr Opcode RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSLTU_FUNCT6 = 0b011010;
+constexpr Opcode RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSLT_FUNCT6 = 0b011011;
+constexpr Opcode RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSLE_FUNCT6 = 0b011101;
+constexpr Opcode RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSLEU_FUNCT6 = 0b011100;
+constexpr Opcode RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSGTU_FUNCT6 = 0b011110;
+constexpr Opcode RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMSGT_FUNCT6 = 0b011111;
+constexpr Opcode RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSLIDEUP_FUNCT6 = 0b001110;
+constexpr Opcode RO_V_VSLIDEUP_VI =
+ OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSLIDEUP_VX =
+ OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSLIDEDOWN_FUNCT6 = 0b001111;
+constexpr Opcode RO_V_VSLIDEDOWN_VI =
+ OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSLIDEDOWN_VX =
+ OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSRL_FUNCT6 = 0b101000;
+constexpr Opcode RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSRA_FUNCT6 = 0b101001;
+constexpr Opcode RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSLL_FUNCT6 = 0b100101;
+constexpr Opcode RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VSMUL_FUNCT6 = 0b100111;
+constexpr Opcode RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VADC_FUNCT6 = 0b010000;
+constexpr Opcode RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMADC_FUNCT6 = 0b010001;
+constexpr Opcode RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWXUNARY0_FUNCT6 = 0b010000;
+constexpr Opcode VRXUNARY0_FUNCT6 = 0b010000;
+constexpr Opcode VMUNARY0_FUNCT6 = 0b010100;
+
+constexpr Opcode RO_V_VWXUNARY0 =
+ OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VRXUNARY0 =
+ OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VID_V = 0b10001;
+
+constexpr Opcode VXUNARY0_FUNCT6 = 0b010010;
+constexpr Opcode RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VWFUNARY0_FUNCT6 = 0b010000;
+constexpr Opcode RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VRFUNARY0_FUNCT6 = 0b010000;
+constexpr Opcode RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VREDMAXU_FUNCT6 = 0b000110;
+constexpr Opcode RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode VREDMAX_FUNCT6 = 0b000111;
+constexpr Opcode RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VREDMINU_FUNCT6 = 0b000100;
+constexpr Opcode RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode VREDMIN_FUNCT6 = 0b000101;
+constexpr Opcode RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFUNARY0_FUNCT6 = 0b010010;
+constexpr Opcode RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode VFUNARY1_FUNCT6 = 0b010011;
+constexpr Opcode RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFCVT_XU_F_V = 0b00000;
+constexpr Opcode VFCVT_X_F_V = 0b00001;
+constexpr Opcode VFCVT_F_XU_V = 0b00010;
+constexpr Opcode VFCVT_F_X_V = 0b00011;
+constexpr Opcode VFWCVT_XU_F_V = 0b01000;
+constexpr Opcode VFWCVT_X_F_V = 0b01001;
+constexpr Opcode VFWCVT_F_XU_V = 0b01010;
+constexpr Opcode VFWCVT_F_X_V = 0b01011;
+constexpr Opcode VFWCVT_F_F_V = 0b01100;
+constexpr Opcode VFNCVT_F_F_W = 0b10100;
+constexpr Opcode VFNCVT_X_F_W = 0b10001;
+constexpr Opcode VFNCVT_XU_F_W = 0b10000;
+
+constexpr Opcode VFCLASS_V = 0b10000;
+constexpr Opcode VFSQRT_V = 0b00000;
+constexpr Opcode VFRSQRT7_V = 0b00100;
+constexpr Opcode VFREC7_V = 0b00101;
+
+constexpr Opcode VFADD_FUNCT6 = 0b000000;
+constexpr Opcode RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFSUB_FUNCT6 = 0b000010;
+constexpr Opcode RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFDIV_FUNCT6 = 0b100000;
+constexpr Opcode RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMUL_FUNCT6 = 0b100100;
+constexpr Opcode RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift);
+
+// Vector Widening Floating-Point Add/Subtract Instructions
+constexpr Opcode VFWADD_FUNCT6 = 0b110000;
+constexpr Opcode RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWSUB_FUNCT6 = 0b110010;
+constexpr Opcode RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWADD_W_FUNCT6 = 0b110100;
+constexpr Opcode RO_V_VFWADD_W_VV =
+ OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWADD_W_VF =
+ OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWSUB_W_FUNCT6 = 0b110110;
+constexpr Opcode RO_V_VFWSUB_W_VV =
+ OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWSUB_W_VF =
+ OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift);
+
+// Vector Widening Floating-Point Reduction Instructions
+constexpr Opcode VFWREDUSUM_FUNCT6 = 0b110001;
+constexpr Opcode RO_V_VFWREDUSUM_VV =
+ OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWREDOSUM_FUNCT6 = 0b110011;
+constexpr Opcode RO_V_VFWREDOSUM_VV =
+ OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift);
+
+// Vector Widening Floating-Point Multiply
+constexpr Opcode VFWMUL_FUNCT6 = 0b111000;
+constexpr Opcode RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMFEQ_FUNCT6 = 0b011000;
+constexpr Opcode RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMFNE_FUNCT6 = 0b011100;
+constexpr Opcode RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMFLT_FUNCT6 = 0b011011;
+constexpr Opcode RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMFLE_FUNCT6 = 0b011001;
+constexpr Opcode RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMFGE_FUNCT6 = 0b011111;
+constexpr Opcode RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VMFGT_FUNCT6 = 0b011101;
+constexpr Opcode RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMAX_FUNCT6 = 0b000110;
+constexpr Opcode RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFREDMAX_FUNCT6 = 0b0001111;
+constexpr Opcode RO_V_VFREDMAX_VV =
+ OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMIN_FUNCT6 = 0b000100;
+constexpr Opcode RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFSGNJ_FUNCT6 = 0b001000;
+constexpr Opcode RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFSGNJN_FUNCT6 = 0b001001;
+constexpr Opcode RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFSGNJX_FUNCT6 = 0b001010;
+constexpr Opcode RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMADD_FUNCT6 = 0b101000;
+constexpr Opcode RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFNMADD_FUNCT6 = 0b101001;
+constexpr Opcode RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMSUB_FUNCT6 = 0b101010;
+constexpr Opcode RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFNMSUB_FUNCT6 = 0b101011;
+constexpr Opcode RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMACC_FUNCT6 = 0b101100;
+constexpr Opcode RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFNMACC_FUNCT6 = 0b101101;
+constexpr Opcode RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFMSAC_FUNCT6 = 0b101110;
+constexpr Opcode RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFNMSAC_FUNCT6 = 0b101111;
+constexpr Opcode RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift);
+
+// Vector Widening Floating-Point Fused Multiply-Add Instructions
+constexpr Opcode VFWMACC_FUNCT6 = 0b111100;
+constexpr Opcode RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWNMACC_FUNCT6 = 0b111101;
+constexpr Opcode RO_V_VFWNMACC_VV =
+ OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWNMACC_VF =
+ OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWMSAC_FUNCT6 = 0b111110;
+constexpr Opcode RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VFWNMSAC_FUNCT6 = 0b111111;
+constexpr Opcode RO_V_VFWNMSAC_VV =
+ OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VFWNMSAC_VF =
+ OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VNCLIP_FUNCT6 = 0b101111;
+constexpr Opcode RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift);
+
+constexpr Opcode VNCLIPU_FUNCT6 = 0b101110;
+constexpr Opcode RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift);
+constexpr Opcode RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift);
+// clang-format on
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-zicsr.h b/deps/v8/src/codegen/riscv/constant-riscv-zicsr.h
index d6171859ee..0b3d254541 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-zicsr.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-zicsr.h
@@ -16,15 +16,13 @@ const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
-enum OpcodeRISCVZICSR : uint32_t {
- // RV32/RV64 Zicsr Standard Extension
- RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift),
- RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift),
- RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift),
- RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift),
- RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift),
- RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift),
-};
+// RV32/RV64 Zicsr Standard Extension
+constexpr Opcode RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift);
+constexpr Opcode RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift);
+constexpr Opcode RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift);
+constexpr Opcode RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift);
+constexpr Opcode RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift);
+constexpr Opcode RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift);
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
diff --git a/deps/v8/src/codegen/riscv/constant-riscv-zifencei.h b/deps/v8/src/codegen/riscv/constant-riscv-zifencei.h
index 49105017cb..e3869fa879 100644
--- a/deps/v8/src/codegen/riscv/constant-riscv-zifencei.h
+++ b/deps/v8/src/codegen/riscv/constant-riscv-zifencei.h
@@ -7,9 +7,7 @@
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
-enum OpcodeRISCVIFENCEI : uint32_t {
- RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift),
-};
+constexpr Opcode RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift);
}
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
diff --git a/deps/v8/src/codegen/riscv/extension-riscv-v.cc b/deps/v8/src/codegen/riscv/extension-riscv-v.cc
index c5be03a181..f43aea40d4 100644
--- a/deps/v8/src/codegen/riscv/extension-riscv-v.cc
+++ b/deps/v8/src/codegen/riscv/extension-riscv-v.cc
@@ -160,7 +160,7 @@ void AssemblerRISCVV::vid_v(VRegister vd, MaskType mask) {
GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
}
-// void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, Register
+// void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register
// rs1,
// VRegister vs2, MaskType mask = NoMask);
#define DEFINE_OPMVX(name, funct6) \
@@ -455,9 +455,8 @@ uint8_t vsew_switch(VSew vsew) {
}
// OPIVV OPFVV OPMVV
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- VRegister vd, VRegister vs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((vd.code() & 0x1F) << kRvvVdShift) |
@@ -466,9 +465,8 @@ void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
emit(instr);
}
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- VRegister vd, int8_t vs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ int8_t vs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((vd.code() & 0x1F) << kRvvVdShift) |
@@ -477,9 +475,8 @@ void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
emit(instr);
}
// OPMVV OPFVV
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- Register rd, VRegister vs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_MVV || opcode == OP_FVV);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((rd.code() & 0x1F) << kRvvVdShift) |
@@ -489,9 +486,8 @@ void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
}
// OPFVV
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- FPURegister fd, VRegister vs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, FPURegister fd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_FVV);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((fd.code() & 0x1F) << kRvvVdShift) |
@@ -501,9 +497,8 @@ void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
}
// OPIVX OPMVX
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- VRegister vd, Register rs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_IVX || opcode == OP_MVX);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((vd.code() & 0x1F) << kRvvVdShift) |
@@ -513,9 +508,8 @@ void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
}
// OPFVF
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- VRegister vd, FPURegister fs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ FPURegister fs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_FVF);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((vd.code() & 0x1F) << kRvvVdShift) |
@@ -589,9 +583,8 @@ void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
emit(instr);
}
// vmv_xs vcpop_m vfirst_m
-void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
- Register rd, uint8_t vs1, VRegister vs2,
- MaskType mask) {
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
+ uint8_t vs1, VRegister vs2, MaskType mask) {
DCHECK(opcode == OP_MVV);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((rd.code() & 0x1F) << kRvvVdShift) |
@@ -867,6 +860,7 @@ void AssemblerRISCVV::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
uint8_t laneidx) {
+#ifdef CAN_USE_RVV_INSTRUCTIONS
switch (rep) {
case MachineRepresentation::kWord8:
*this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
@@ -883,6 +877,9 @@ LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
default:
UNREACHABLE();
}
+#else
+ UNREACHABLE();
+#endif
}
} // namespace internal
diff --git a/deps/v8/src/codegen/riscv/extension-riscv-v.h b/deps/v8/src/codegen/riscv/extension-riscv-v.h
index 2682f6c045..576d349eb4 100644
--- a/deps/v8/src/codegen/riscv/extension-riscv-v.h
+++ b/deps/v8/src/codegen/riscv/extension-riscv-v.h
@@ -425,25 +425,25 @@ class AssemblerRISCVV : public AssemblerRiscvBase {
// vsetvli
void GenInstrV(Register rd, Register rs1, uint32_t zimm);
// OPIVV OPFVV OPMVV
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
- VRegister vs1, VRegister vs2, MaskType mask = NoMask);
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, int8_t vs1,
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1,
VRegister vs2, MaskType mask = NoMask);
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1,
VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs2,
+ MaskType mask = NoMask);
// OPMVV OPFVV
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd,
- VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
// OPFVV
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, FPURegister fd,
- VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, FPURegister fd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
// OPIVX OPMVX
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
- Register rs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask = NoMask);
// OPFVF
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
- FPURegister fs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, FPURegister fs1,
+ VRegister vs2, MaskType mask = NoMask);
// OPMVX
void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
MaskType mask = NoMask);
@@ -464,7 +464,7 @@ class AssemblerRISCVV : public AssemblerRiscvBase {
VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
uint8_t Nf);
// vmv_xs vcpop_m vfirst_m
- void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd, uint8_t vs1,
+ void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, uint8_t vs1,
VRegister vs2, MaskType mask);
};
diff --git a/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h b/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h
index f379d82914..dfda1ef720 100644
--- a/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h
+++ b/deps/v8/src/codegen/riscv/interface-descriptors-riscv-inl.h
@@ -100,6 +100,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return a4; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return a5; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
// static
diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
index 00a89a40c1..3fa69f10e5 100644
--- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
+++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
@@ -41,7 +41,7 @@ static inline bool IsZero(const Operand& rt) {
}
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -58,7 +58,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
@@ -75,7 +75,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
@@ -114,8 +114,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1,
- &heal_optimized_code_slot);
+ __ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch1,
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -124,7 +124,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ LoadCodeEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -175,7 +175,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
CallRuntime(function_id, 1);
// Use the return value before restoring a0
- AddWord(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ LoadCodeEntry(a2, a0);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
@@ -238,20 +238,19 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
temps.Acquire());
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
LoadWord(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Label skip;
@@ -261,7 +260,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
bind(&skip);
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
AddWord(fp, sp, Operand(kSystemPointerSize));
@@ -271,7 +270,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
@@ -328,17 +327,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
bind(&done);
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, slot_address));
@@ -361,7 +360,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -384,7 +383,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -413,7 +412,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Register temp = temps.Acquire();
DCHECK(!AreAliased(object, value, temp));
AddWord(temp, object, offset);
- LoadTaggedPointerField(temp, MemOperand(temp));
+ LoadTaggedField(temp, MemOperand(temp));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
Operand(value));
}
@@ -436,7 +435,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Register temp = temps.Acquire();
CheckPageFlag(value,
temp, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
+ MemoryChunk::kPointersToHereAreInterestingMask,
eq, // In RISC-V, it uses cc for a comparison with 0, so if
// no bits are set, and cc is eq, it will branch to done
&done);
@@ -469,7 +468,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// ---------------------------------------------------------------------------
// Instruction macros.
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -499,7 +498,7 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -541,15 +540,15 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
Add64(rd, rs, rt);
}
-void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
Sub64(rd, rs, rt);
}
-void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -598,7 +597,7 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Add64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
(rt.rm() != zero_reg) && (rs != zero_reg)) {
@@ -638,7 +637,7 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulw(rd, rs, rt.rm());
} else {
@@ -650,7 +649,7 @@ void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -663,7 +662,7 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
srai(rd, rd, 32);
}
-void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
+void MacroAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
Register rsz, Register rtz) {
slli(rsz, rs, 32);
if (rt.is_reg()) {
@@ -675,7 +674,7 @@ void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
srai(rd, rd, 32);
}
-void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -687,7 +686,7 @@ void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulh(rd, rs, rt.rm());
} else {
@@ -699,7 +698,7 @@ void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulhu(rd, rs, rt.rm());
} else {
@@ -711,7 +710,7 @@ void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Div32(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divw(res, rs, rt.rm());
} else {
@@ -723,7 +722,7 @@ void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remw(rd, rs, rt.rm());
} else {
@@ -735,7 +734,7 @@ void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remuw(rd, rs, rt.rm());
} else {
@@ -747,7 +746,7 @@ void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Div64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rd, rs, rt.rm());
} else {
@@ -759,7 +758,7 @@ void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu32(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divuw(res, rs, rt.rm());
} else {
@@ -771,7 +770,7 @@ void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu64(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(res, rs, rt.rm());
} else {
@@ -783,7 +782,7 @@ void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rem(rd, rs, rt.rm());
} else {
@@ -795,7 +794,7 @@ void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remu(rd, rs, rt.rm());
} else {
@@ -807,11 +806,11 @@ void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
}
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
Add32(rd, rs, rt);
}
-void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
(rt.rm() != zero_reg) && (rs != zero_reg)) {
@@ -851,11 +850,11 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
Sub32(rd, rs, rt);
}
-void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -905,11 +904,11 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
Mul(rd, rs, rt);
}
-void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -921,7 +920,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulh(rd, rs, rt.rm());
} else {
@@ -933,7 +932,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
+void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
Register rsz, Register rtz) {
if (rt.is_reg()) {
mulhu(rd, rs, rt.rm());
@@ -946,7 +945,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
}
}
-void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(res, rs, rt.rm());
} else {
@@ -958,7 +957,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rem(rd, rs, rt.rm());
} else {
@@ -970,7 +969,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remu(rd, rs, rt.rm());
} else {
@@ -982,7 +981,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(res, rs, rt.rm());
} else {
@@ -996,7 +995,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
#endif
-void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -1022,7 +1021,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -1044,7 +1043,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -1066,7 +1065,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
not_(rd, rd);
@@ -1076,12 +1075,12 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Neg(Register rs, const Operand& rt) {
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
DCHECK(rt.is_reg());
neg(rs, rt.rm());
}
-void TurboAssembler::Seqz(Register rd, const Operand& rt) {
+void MacroAssembler::Seqz(Register rd, const Operand& rt) {
if (rt.is_reg()) {
seqz(rd, rt.rm());
} else {
@@ -1089,7 +1088,7 @@ void TurboAssembler::Seqz(Register rd, const Operand& rt) {
}
}
-void TurboAssembler::Snez(Register rd, const Operand& rt) {
+void MacroAssembler::Snez(Register rd, const Operand& rt) {
if (rt.is_reg()) {
snez(rd, rt.rm());
} else {
@@ -1097,7 +1096,7 @@ void TurboAssembler::Snez(Register rd, const Operand& rt) {
}
}
-void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Seq(Register rd, Register rs, const Operand& rt) {
if (rs == zero_reg) {
Seqz(rd, rt);
} else if (IsZero(rt)) {
@@ -1108,7 +1107,7 @@ void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sne(Register rd, Register rs, const Operand& rt) {
if (rs == zero_reg) {
Snez(rd, rt);
} else if (IsZero(rt)) {
@@ -1119,7 +1118,7 @@ void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
@@ -1136,7 +1135,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
@@ -1153,7 +1152,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
@@ -1167,7 +1166,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
@@ -1181,17 +1180,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) {
Slt(rd, rs, rt);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
Sltu(rd, rs, rt);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
@@ -1204,7 +1203,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
@@ -1218,7 +1217,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sllw(rd, rs, rt.rm());
} else {
@@ -1227,7 +1226,7 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sraw(rd, rs, rt.rm());
} else {
@@ -1236,7 +1235,7 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srlw(rd, rs, rt.rm());
} else {
@@ -1245,11 +1244,11 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
Sra64(rd, rs, rt);
}
-void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sra(rd, rs, rt.rm());
} else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
@@ -1262,11 +1261,11 @@ void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
Srl64(rd, rs, rt);
}
-void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srl(rd, rs, rt.rm());
} else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
@@ -1279,11 +1278,11 @@ void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
Sll64(rd, rs, rt);
}
-void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sll(rd, rs, rt.rm());
} else {
@@ -1297,7 +1296,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1322,7 +1321,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1345,11 +1344,11 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
Sll32(rd, rs, rt);
}
-void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sll(rd, rs, rt.rm());
} else {
@@ -1358,11 +1357,11 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
Sra32(rd, rs, rt);
}
-void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sra(rd, rs, rt.rm());
} else {
@@ -1371,11 +1370,11 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
Srl32(rd, rs, rt);
}
-void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srl(rd, rs, rt.rm());
} else {
@@ -1384,7 +1383,7 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1408,7 +1407,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
#endif
-void TurboAssembler::Li(Register rd, intptr_t imm) {
+void MacroAssembler::Li(Register rd, intptr_t imm) {
if (v8_flags.riscv_c_extension && (rd != zero_reg) && is_int6(imm)) {
c_li(rd, imm);
} else {
@@ -1416,7 +1415,7 @@ void TurboAssembler::Li(Register rd, intptr_t imm) {
}
}
-void TurboAssembler::Mv(Register rd, const Operand& rt) {
+void MacroAssembler::Mv(Register rd, const Operand& rt) {
if (v8_flags.riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) {
c_mv(rd, rt.rm());
} else {
@@ -1424,7 +1423,7 @@ void TurboAssembler::Mv(Register rd, const Operand& rt) {
}
}
-void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
+void MacroAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
uint8_t sa) {
DCHECK(sa >= 1 && sa <= 31);
UseScratchRegisterScope temps(this);
@@ -1437,7 +1436,7 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
// ------------Pseudo-instructions-------------
// Change endianness
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size,
Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
@@ -1495,7 +1494,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size,
Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
@@ -1522,7 +1521,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
#endif
template <int NBYTES, bool LOAD_SIGNED>
-void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs,
+void MacroAssembler::LoadNBytes(Register rd, const MemOperand& rs,
Register scratch) {
DCHECK(rd != rs.rm() && rd != scratch);
DCHECK_LE(NBYTES, 8);
@@ -1544,7 +1543,7 @@ void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs,
}
template <int NBYTES, bool LOAD_SIGNED>
-void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
+void MacroAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
Register scratch0,
Register scratch1) {
// This function loads nbytes from memory specified by rs and into rs.rm()
@@ -1573,7 +1572,7 @@ void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
}
template <int NBYTES, bool IS_SIGNED>
-void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
+void MacroAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
@@ -1604,7 +1603,7 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
#if V8_TARGET_ARCH_RISCV64
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
Register scratch_base) {
DCHECK(NBYTES == 4 || NBYTES == 8);
DCHECK_NE(scratch_base, rs.rm());
@@ -1629,7 +1628,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
}
#elif V8_TARGET_ARCH_RISCV32
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
Register scratch_base) {
DCHECK_EQ(NBYTES, 4);
DCHECK_NE(scratch_base, rs.rm());
@@ -1650,7 +1649,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
fmv_w_x(frd, scratch);
}
-void TurboAssembler::UnalignedDoubleHelper(FPURegister frd,
+void MacroAssembler::UnalignedDoubleHelper(FPURegister frd,
const MemOperand& rs,
Register scratch_base) {
DCHECK_NE(scratch_base, rs.rm());
@@ -1679,7 +1678,7 @@ void TurboAssembler::UnalignedDoubleHelper(FPURegister frd,
#endif
template <int NBYTES>
-void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
+void MacroAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
Register scratch_other) {
DCHECK(scratch_other != rs.rm());
DCHECK_LE(NBYTES, 8);
@@ -1718,7 +1717,7 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
#if V8_TARGET_ARCH_RISCV64
template <int NBYTES>
-void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
+void MacroAssembler::UnalignedFStoreHelper(FPURegister frd,
const MemOperand& rs,
Register scratch) {
DCHECK(NBYTES == 8 || NBYTES == 4);
@@ -1732,7 +1731,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
}
#elif V8_TARGET_ARCH_RISCV32
template <int NBYTES>
-void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
+void MacroAssembler::UnalignedFStoreHelper(FPURegister frd,
const MemOperand& rs,
Register scratch) {
DCHECK_EQ(NBYTES, 4);
@@ -1740,7 +1739,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
fmv_x_w(scratch, frd);
UnalignedStoreHelper<NBYTES>(scratch, rs);
}
-void TurboAssembler::UnalignedDStoreHelper(FPURegister frd,
+void MacroAssembler::UnalignedDStoreHelper(FPURegister frd,
const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
@@ -1757,7 +1756,7 @@ void TurboAssembler::UnalignedDStoreHelper(FPURegister frd,
#endif
template <typename Reg_T, typename Func>
-void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
+void MacroAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
Func generator) {
MemOperand source = rs;
UseScratchRegisterScope temps(this);
@@ -1771,7 +1770,7 @@ void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
}
template <typename Reg_T, typename Func>
-void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
+void MacroAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
Func generator) {
MemOperand source = rs;
UseScratchRegisterScope temps(this);
@@ -1787,32 +1786,32 @@ void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
generator(value, source);
}
-void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<4, true>(rd, rs);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<4, false>(rd, rs);
}
#endif
-void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
UnalignedStoreHelper<4>(rd, rs);
}
-void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<2, true>(rd, rs);
}
-void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<2, false>(rd, rs);
}
-void TurboAssembler::Ush(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ush(Register rd, const MemOperand& rs) {
UnalignedStoreHelper<2>(rd, rs);
}
-void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<8, true>(rd, rs);
}
#if V8_TARGET_ARCH_RISCV64
@@ -1838,23 +1837,23 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
}
#endif
-void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
UnalignedStoreHelper<8>(rd, rs);
}
-void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
UnalignedFLoadHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
UnalignedFStoreHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
#if V8_TARGET_ARCH_RISCV64
@@ -1864,7 +1863,7 @@ void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
#endif
}
-void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
#if V8_TARGET_ARCH_RISCV64
@@ -1874,49 +1873,49 @@ void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
#endif
}
-void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lb(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lbu(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
this->sb(value, source.rm(), source.offset());
};
AlignedStoreHelper(rd, rs, fn);
}
-void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lh(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lhu(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
this->sh(value, source.rm(), source.offset());
};
AlignedStoreHelper(rd, rs, fn);
}
-void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1934,14 +1933,14 @@ void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lwu(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
#endif
-void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1958,7 +1957,7 @@ void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1975,7 +1974,7 @@ void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1991,21 +1990,21 @@ void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
AlignedStoreHelper(rd, rs, fn);
}
#endif
-void TurboAssembler::LoadFloat(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::LoadFloat(FPURegister fd, const MemOperand& src) {
auto fn = [this](FPURegister target, const MemOperand& source) {
this->flw(target, source.rm(), source.offset());
};
AlignedLoadHelper(fd, src, fn);
}
-void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
auto fn = [this](FPURegister value, const MemOperand& source) {
this->fsw(value, source.rm(), source.offset());
};
AlignedStoreHelper(fs, src, fn);
}
-void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
auto fn = [this](FPURegister target, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -2021,7 +2020,7 @@ void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
AlignedLoadHelper(fd, src, fn);
}
-void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
auto fn = [this](FPURegister value, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -2037,7 +2036,7 @@ void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
AlignedStoreHelper(fs, src, fn);
}
-void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ll(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
lr_w(false, false, rd, rs.rm());
@@ -2050,7 +2049,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lld(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
lr_d(false, false, rd, rs.rm());
@@ -2062,7 +2061,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
}
}
#endif
-void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sc(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
sc_w(false, false, rd, rs.rm(), rd);
@@ -2074,7 +2073,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
}
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Scd(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
sc_d(false, false, rd, rs.rm(), rd);
@@ -2086,7 +2085,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
}
}
#endif
-void TurboAssembler::li(Register dst, Handle<HeapObject> value,
+void MacroAssembler::li(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
@@ -2104,7 +2103,7 @@ void TurboAssembler::li(Register dst, Handle<HeapObject> value,
}
}
-void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -2124,30 +2123,30 @@ static inline int InstrCountForLiLower32Bit(int64_t value) {
return 2;
}
-int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
if (is_int32(value + 0x800)) {
return InstrCountForLiLower32Bit(value);
} else {
- return li_estimate(value);
+ return RV_li_count(value);
}
UNREACHABLE();
return INT_MAX;
}
-void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
Li(rd, j.immediate());
}
-void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
UseScratchRegisterScope temps(this);
- int count = li_estimate(j.immediate(), temps.hasAvailable());
- int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable());
+ int count = RV_li_count(j.immediate(), temps.hasAvailable());
+ int reverse_count = RV_li_count(~j.immediate(), temps.hasAvailable());
if (v8_flags.riscv_constant_pool && count >= 4 && reverse_count >= 4) {
// Ld/Lw a Address from a constant pool.
RecordEntry((uintptr_t)j.immediate(), j.rmode());
@@ -2188,7 +2187,7 @@ static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6};
static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7};
static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11};
-void TurboAssembler::MultiPush(RegList regs) {
+void MacroAssembler::MultiPush(RegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
@@ -2232,7 +2231,7 @@ void TurboAssembler::MultiPush(RegList regs) {
#undef S_REGS
}
-void TurboAssembler::MultiPop(RegList regs) {
+void MacroAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
#define TEST_AND_POP_REG(reg) \
@@ -2273,7 +2272,7 @@ void TurboAssembler::MultiPop(RegList regs) {
#undef A_REGS
}
-void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPushFPU(DoubleRegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -2286,7 +2285,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
}
}
-void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -2299,7 +2298,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
}
#if V8_TARGET_ARCH_RISCV32
-void TurboAssembler::AddPair(Register dst_low, Register dst_high,
+void MacroAssembler::AddPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
Register scratch1, Register scratch2) {
@@ -2317,7 +2316,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high,
Move(dst_low, scratch1);
}
-void TurboAssembler::SubPair(Register dst_low, Register dst_high,
+void MacroAssembler::SubPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
Register scratch1, Register scratch2) {
@@ -2335,27 +2334,27 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high,
Move(dst_low, scratch1);
}
-void TurboAssembler::AndPair(Register dst_low, Register dst_high,
+void MacroAssembler::AndPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
And(dst_low, left_low, right_low);
And(dst_high, left_high, right_high);
}
-void TurboAssembler::OrPair(Register dst_low, Register dst_high,
+void MacroAssembler::OrPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
Or(dst_low, left_low, right_low);
Or(dst_high, left_high, right_high);
}
-void TurboAssembler::XorPair(Register dst_low, Register dst_high,
+void MacroAssembler::XorPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
Xor(dst_low, left_low, right_low);
Xor(dst_high, left_high, right_high);
}
-void TurboAssembler::MulPair(Register dst_low, Register dst_high,
+void MacroAssembler::MulPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
Register scratch1, Register scratch2) {
@@ -2381,7 +2380,7 @@ void TurboAssembler::MulPair(Register dst_low, Register dst_high,
Add32(dst_high, scratch2, scratch3);
}
-void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift, Register scratch1,
Register scratch2) {
@@ -2426,7 +2425,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, int32_t shift,
Register scratch1, Register scratch2) {
DCHECK_GE(63, shift);
@@ -2451,7 +2450,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift, Register scratch1,
Register scratch2) {
@@ -2496,7 +2495,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, int32_t shift,
Register scratch1, Register scratch2) {
DCHECK_GE(63, shift);
@@ -2521,7 +2520,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::SarPair(Register dst_low, Register dst_high,
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift, Register scratch1,
Register scratch2) {
@@ -2564,7 +2563,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::SarPair(Register dst_low, Register dst_high,
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, int32_t shift,
Register scratch1, Register scratch2) {
DCHECK_GE(63, shift);
@@ -2589,7 +2588,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
}
#endif
-void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
+void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
uint16_t size, bool sign_extend) {
#if V8_TARGET_ARCH_RISCV64
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
@@ -2615,7 +2614,7 @@ void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
#endif
}
-void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+void MacroAssembler::InsertBits(Register dest, Register source, Register pos,
int size) {
#if V8_TARGET_ARCH_RISCV64
DCHECK_LT(size, 64);
@@ -2641,42 +2640,42 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
or_(dest, dest, source_);
}
-void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); }
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); }
-void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); }
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); }
-void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_d_wu(fd, rs);
}
-void TurboAssembler::Cvt_d_w(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_w(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_d_w(fd, rs);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_d_lu(fd, rs);
}
#endif
-void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_s_wu(fd, rs);
}
-void TurboAssembler::Cvt_s_w(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_w(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_s_w(fd, rs);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_s_lu(fd, rs);
}
#endif
template <typename CvtFunc>
-void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
+void MacroAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
Register result,
CvtFunc fcvt_generator) {
// Save csr_fflags to scratch & clear exception flags
@@ -2705,7 +2704,7 @@ void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
}
}
-void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
+void MacroAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
Label no_nan;
feq_d(kScratchReg, fs, fs);
bnez(kScratchReg, &no_nan);
@@ -2713,7 +2712,7 @@ void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
bind(&no_nan);
}
-void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
+void MacroAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
Label no_nan;
feq_s(kScratchReg, fs, fs);
bnez(kScratchReg, &no_nan);
@@ -2721,101 +2720,101 @@ void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
bind(&no_nan);
}
-void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_wu_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_wu_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_s(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RTZ);
});
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_lu_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_l_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_lu_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_s(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_l_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_s(dst, src, RTZ);
});
}
#endif
-void TurboAssembler::Round_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Round_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RNE);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RNE);
});
}
-void TurboAssembler::Round_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Round_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RNE);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RNE);
});
}
-void TurboAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RUP);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RUP);
});
}
-void TurboAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RUP);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RUP);
});
}
-void TurboAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RDN);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RDN);
});
}
-void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RDN);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RDN);
});
}
@@ -2826,7 +2825,7 @@ void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
// handling is needed by NaN, +/-Infinity, +/-0
#if V8_TARGET_ARCH_RISCV64
template <typename F>
-void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundHelper(FPURegister dst, FPURegister src,
FPURegister fpu_scratch, FPURoundingMode frm) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
@@ -2945,7 +2944,7 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
// rounded result; this differs from behavior of RISCV fcvt instructions (which
// round out-of-range values to the nearest max or min value), therefore special
// handling is needed by NaN, +/-Infinity, +/-0
-void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
FPURegister fpu_scratch, FPURoundingMode frm) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
@@ -3038,8 +3037,9 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
// round out-of-range values to the nearest max or min value), therefore special
// handling is needed by NaN, +/-Infinity, +/-0
template <typename F>
-void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
- VRegister v_scratch, FPURoundingMode frm) {
+void MacroAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, FPURoundingMode frm,
+ bool keep_nan_same) {
VU.set(scratch, std::is_same<F, float>::value ? E32 : E64, m1);
// if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
// in mantissa, the result is the same as src, so move src to dest (to avoid
@@ -3065,14 +3065,13 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
// } else {
// srli(rt, rt, 64 - size);
// }
-
+ vmv_vx(v_scratch, zero_reg);
li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits);
vsll_vx(v_scratch, src, scratch);
li(scratch, 64 - kFloatExponentBits);
vsrl_vx(v_scratch, v_scratch, scratch);
li(scratch, kFloatExponentBias + kFloatMantissaBits);
vmslt_vx(v0, v_scratch, scratch);
-
VU.set(frm);
vmv_vv(dst, src);
if (dst == src) {
@@ -3090,71 +3089,85 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
} else {
vfsngj_vv(dst, dst, src);
}
+ if (!keep_nan_same) {
+ vmfeq_vv(v0, src, src);
+ vnot_vv(v0, v0);
+ if (std::is_same<F, float>::value) {
+ fmv_w_x(kScratchDoubleReg, zero_reg);
+ } else {
+#ifdef V8_TARGET_ARCH_RISCV64
+ fmv_d_x(kScratchDoubleReg, zero_reg);
+#else
+ UNIMPLEMENTED();
+#endif
+ }
+ vfadd_vf(dst, src, kScratchDoubleReg, MaskType::Mask);
+ }
}
-void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP, false);
}
-void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP, false);
}
-void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN, false);
}
-void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN, false);
}
-void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ, false);
}
-void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ, false);
}
-void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE, false);
}
-void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE, false);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
}
-void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RUP);
}
-void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RTZ);
}
-void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RNE);
}
#endif
-void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RDN);
@@ -3163,7 +3176,7 @@ void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src,
#endif
}
-void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RUP);
@@ -3172,7 +3185,7 @@ void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
#endif
}
-void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RTZ);
@@ -3181,7 +3194,7 @@ void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
#endif
}
-void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RNE);
@@ -3210,7 +3223,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
fmsub_d(fd, fs, ft, fr);
}
-void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
+void MacroAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2) {
switch (cc) {
case EQ:
@@ -3237,7 +3250,7 @@ void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
+void MacroAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2) {
switch (cc) {
case EQ:
@@ -3264,7 +3277,7 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3275,7 +3288,7 @@ void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
}
-void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3286,27 +3299,27 @@ void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
FPURegister cmp2) {
CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
FPURegister cmp2) {
CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::BranchTrueShortF(Register rs, Label* target) {
+void MacroAssembler::BranchTrueShortF(Register rs, Label* target) {
Branch(target, not_equal, rs, Operand(zero_reg));
}
-void TurboAssembler::BranchFalseShortF(Register rs, Label* target) {
+void MacroAssembler::BranchFalseShortF(Register rs, Label* target) {
Branch(target, equal, rs, Operand(zero_reg));
}
-void TurboAssembler::BranchTrueF(Register rs, Label* target) {
+void MacroAssembler::BranchTrueF(Register rs, Label* target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
@@ -3319,7 +3332,7 @@ void TurboAssembler::BranchTrueF(Register rs, Label* target) {
}
}
-void TurboAssembler::BranchFalseF(Register rs, Label* target) {
+void MacroAssembler::BranchFalseF(Register rs, Label* target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
@@ -3332,7 +3345,7 @@ void TurboAssembler::BranchFalseF(Register rs, Label* target) {
}
}
-void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
+void MacroAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
#if V8_TARGET_ARCH_RISCV64
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3357,7 +3370,7 @@ void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
#endif
}
-void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
+void MacroAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
#if V8_TARGET_ARCH_RISCV64
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3382,7 +3395,7 @@ void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
#endif
}
-void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
+void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
ASM_CODE_COMMENT(this);
// Handle special values first.
if (src == base::bit_cast<uint32_t>(0.0f) && has_single_zero_reg_set_) {
@@ -3408,7 +3421,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
}
}
-void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
+void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
ASM_CODE_COMMENT(this);
// Handle special values first.
if (src == base::bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
@@ -3459,7 +3472,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
}
}
-void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
+void MacroAssembler::CompareI(Register rd, Register rs, const Operand& rt,
Condition cond) {
switch (cond) {
case eq:
@@ -3504,7 +3517,7 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
}
// dest <- (condition != 0 ? zero : dest)
-void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionNotZero(Register dest,
Register condition) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3515,7 +3528,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
}
// dest <- (condition == 0 ? 0 : dest)
-void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionZero(Register dest,
Register condition) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3525,7 +3538,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest,
and_(dest, dest, scratch);
}
-void TurboAssembler::Clz32(Register rd, Register xx) {
+void MacroAssembler::Clz32(Register rd, Register xx) {
// 32 bit unsigned in lower word: count number of leading zeros.
// int n = 32;
// unsigned y;
@@ -3602,7 +3615,7 @@ void TurboAssembler::Clz32(Register rd, Register xx) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Clz64(Register rd, Register xx) {
+void MacroAssembler::Clz64(Register rd, Register xx) {
// 64 bit: count number of leading zeros.
// int n = 64;
// unsigned y;
@@ -3656,7 +3669,7 @@ void TurboAssembler::Clz64(Register rd, Register xx) {
bind(&L5);
}
#endif
-void TurboAssembler::Ctz32(Register rd, Register rs) {
+void MacroAssembler::Ctz32(Register rd, Register rs) {
// Convert trailing zeroes to trailing ones, and bits to their left
// to zeroes.
@@ -3680,7 +3693,7 @@ void TurboAssembler::Ctz32(Register rd, Register rs) {
}
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Ctz64(Register rd, Register rs) {
+void MacroAssembler::Ctz64(Register rd, Register rs) {
// Convert trailing zeroes to trailing ones, and bits to their left
// to zeroes.
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3703,7 +3716,7 @@ void TurboAssembler::Ctz64(Register rd, Register rs) {
}
}
#endif
-void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
+void MacroAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
@@ -3754,7 +3767,7 @@ void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
+void MacroAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
// uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
@@ -3790,7 +3803,7 @@ void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
srli(rd, rd, 32 + shift);
}
#endif
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
UseScratchRegisterScope temps(this);
@@ -3801,7 +3814,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
Branch(done, eq, scratch, Operand(1));
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -3837,19 +3850,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
(cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
-void TurboAssembler::Branch(int32_t offset) {
+void MacroAssembler::Branch(int32_t offset) {
DCHECK(is_int21(offset));
BranchShort(offset);
}
-void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
const Operand& rt, Label::Distance near_jump) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt);
DCHECK(is_near);
USE(is_near);
}
-void TurboAssembler::Branch(Label* L) {
+void MacroAssembler::Branch(Label* L) {
if (L->is_bound()) {
if (is_near(L)) {
BranchShort(L);
@@ -3865,7 +3878,7 @@ void TurboAssembler::Branch(Label* L) {
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt, Label::Distance near_jump) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt)) {
@@ -3898,7 +3911,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3906,20 +3919,20 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
Branch(L, cond, rs, Operand(scratch));
}
-void TurboAssembler::BranchShortHelper(int32_t offset, Label* L) {
+void MacroAssembler::BranchShortHelper(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset21);
j(offset);
}
-void TurboAssembler::BranchShort(int32_t offset) {
+void MacroAssembler::BranchShort(int32_t offset) {
DCHECK(is_int21(offset));
BranchShortHelper(offset, nullptr);
}
-void TurboAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); }
+void MacroAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); }
-int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits);
} else {
@@ -3928,7 +3941,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
return offset;
}
-Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
Register scratch) {
Register r2 = no_reg;
if (rt.is_reg()) {
@@ -3941,14 +3954,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
+bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
*offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
*scratch = GetRtAsRegisterHelper(rt, *scratch);
@@ -3956,7 +3969,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
return true;
}
-bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
+bool MacroAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
UseScratchRegisterScope temps(this);
@@ -4084,7 +4097,7 @@ bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
return true;
}
-bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -4097,28 +4110,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
}
}
-void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
const Operand& rt) {
BranchShortCheck(offset, nullptr, cond, rs, rt);
}
-void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt) {
BranchShortCheck(0, L, cond, rs, rt);
}
-void TurboAssembler::BranchAndLink(int32_t offset) {
+void MacroAssembler::BranchAndLink(int32_t offset) {
BranchAndLinkShort(offset);
}
-void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
const Operand& rt) {
bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt);
DCHECK(is_near);
USE(is_near);
}
-void TurboAssembler::BranchAndLink(Label* L) {
+void MacroAssembler::BranchAndLink(Label* L) {
if (L->is_bound()) {
if (is_near(L)) {
BranchAndLinkShort(L);
@@ -4134,7 +4147,7 @@ void TurboAssembler::BranchAndLink(Label* L) {
}
}
-void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt) {
if (L->is_bound()) {
if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) {
@@ -4157,25 +4170,25 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
}
}
-void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) {
+void MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset21);
jal(offset);
}
-void TurboAssembler::BranchAndLinkShort(int32_t offset) {
+void MacroAssembler::BranchAndLinkShort(int32_t offset) {
DCHECK(is_int21(offset));
BranchAndLinkShortHelper(offset, nullptr);
}
-void TurboAssembler::BranchAndLinkShort(Label* L) {
+void MacroAssembler::BranchAndLinkShort(Label* L) {
BranchAndLinkShortHelper(0, L);
}
// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
// with the slt instructions. We could use sub or add instead but we would miss
// overflow cases, so we keep slt and add an intermediate third instruction.
-bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -4198,7 +4211,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
return true;
}
-bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -4212,20 +4225,20 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
}
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
- destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
- constant_index)));
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)));
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
LoadWord(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
@@ -4234,7 +4247,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-void TurboAssembler::Jump(Register target, Condition cond, Register rs,
+void MacroAssembler::Jump(Register target, Condition cond, Register rs,
const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
@@ -4247,7 +4260,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs,
}
}
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
Label skip;
if (cond != cc_always) {
@@ -4262,13 +4275,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -4296,18 +4309,15 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} else {
Jump(code.address(), rmode, cond);
}
-
- int32_t target_index = AddCodeTarget(code);
- Jump(static_cast<intptr_t>(target_index), rmode, cond, rs, rt);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
li(t6, reference);
Jump(t6);
}
// Note: To call gcc-compiled C code on riscv64, you must call through t6.
-void TurboAssembler::Call(Register target, Condition cond, Register rs,
+void MacroAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
@@ -4334,13 +4344,13 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
}
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt) {
li(t6, Operand(static_cast<intptr_t>(target), rmode), ADDRESS_LOAD);
Call(t6, cond, rs, rt);
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -4356,7 +4366,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(code->IsExecutable());
if (CanUseNearCallOrJump(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(code);
@@ -4370,12 +4379,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else {
Call(code.address(), rmode);
}
-
- // int32_t target_index = AddCodeTarget(code);
- // Call(static_cast<Address>(target_index), rmode, cond, rs, rt);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
#if V8_TARGET_ARCH_RISCV64
static_assert(kSystemPointerSize == 8);
#elif V8_TARGET_ARCH_RISCV32
@@ -4391,12 +4397,12 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
MemOperand(builtin, IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin) {
LoadEntryFromBuiltinIndex(builtin);
Call(builtin);
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute: {
@@ -4405,7 +4411,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
break;
}
case BuiltinCallJumpMode::kPCRelative:
- Call(BuiltinEntry(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
+ near_call(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
break;
case BuiltinCallJumpMode::kIndirect: {
LoadEntryFromBuiltin(builtin, t6);
@@ -4429,7 +4435,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) {
@@ -4439,7 +4445,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
break;
}
case BuiltinCallJumpMode::kPCRelative:
- Jump(BuiltinEntry(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
+ near_jump(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
break;
case BuiltinCallJumpMode::kIndirect: {
LoadEntryFromBuiltin(builtin, t6);
@@ -4448,7 +4454,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET,
@@ -4463,18 +4469,18 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
LoadWord(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::PatchAndJump(Address target) {
+void MacroAssembler::PatchAndJump(Address target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4491,13 +4497,13 @@ void TurboAssembler::PatchAndJump(Address target) {
pc_ += sizeof(uintptr_t);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
//
// Compute the return address in lr to return to after the jump below. The
// pc is already at '+ 8' from the current instruction; but return is after
@@ -4529,14 +4535,14 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
}
-void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
+void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
Jump(ra, cond, rs, rt);
if (cond == al) {
ForceConstantPoolEmissionWithoutJump();
}
}
-void TurboAssembler::BranchLong(Label* L) {
+void MacroAssembler::BranchLong(Label* L) {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm;
@@ -4545,7 +4551,7 @@ void TurboAssembler::BranchLong(Label* L) {
EmitConstPoolWithJumpIfNeeded();
}
-void TurboAssembler::BranchAndLinkLong(Label* L) {
+void MacroAssembler::BranchAndLinkLong(Label* L) {
// Generate position independent long branch and link.
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm;
@@ -4553,12 +4559,12 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
GenPCRelativeJumpAndLink(t6, imm);
}
-void TurboAssembler::DropAndRet(int drop) {
+void MacroAssembler::DropAndRet(int drop) {
AddWord(sp, sp, drop * kSystemPointerSize);
Ret();
}
-void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
+void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
@@ -4574,7 +4580,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
}
}
-void TurboAssembler::Drop(int count, Condition cond, Register reg,
+void MacroAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
@@ -4605,9 +4611,9 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
}
}
-void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void MacroAssembler::Call(Label* target) { BranchAndLink(target); }
-void TurboAssembler::LoadAddress(Register dst, Label* target,
+void MacroAssembler::LoadAddress(Register dst, Label* target,
RelocInfo::Mode rmode) {
int32_t offset;
if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) {
@@ -4623,14 +4629,14 @@ void TurboAssembler::LoadAddress(Register dst, Label* target,
}
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
push(scratch);
}
-void TurboAssembler::PushArray(Register array, Register size,
+void MacroAssembler::PushArray(Register array, Register size,
PushArrayOrder order) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4659,7 +4665,7 @@ void TurboAssembler::PushArray(Register array, Register size,
}
}
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(handle));
@@ -4702,7 +4708,7 @@ void MacroAssembler::PopStackHandler() {
StoreWord(a1, MemOperand(scratch));
}
-void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
// become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
@@ -4713,19 +4719,19 @@ void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
fsub_d(dst, src, kDoubleRegZero);
}
-void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, fa0); // Reg fa0 is FP return value.
}
-void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, fa0); // Reg fa0 is FP first argument value.
}
-void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); }
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); }
-void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); }
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); }
-void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
const DoubleRegister fparg2 = fa1;
if (src2 == fa0) {
@@ -4748,10 +4754,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
LoadWord(destination,
MemOperand(kRootRegister, static_cast<int32_t>(offset)));
@@ -4917,8 +4923,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(code,
- FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -4945,11 +4950,10 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
{
UseScratchRegisterScope temps(this);
Register temp_reg = temps.Acquire();
- LoadTaggedPointerField(
+ LoadTaggedField(
temp_reg,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- LoadTaggedPointerField(
- cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// The argument count is stored as uint16_t
Lhu(expected_parameter_count,
FieldMemOperand(temp_reg,
@@ -4970,7 +4974,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, a1);
// Get the function and setup the context.
- LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -4993,7 +4997,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
}
//------------------------------------------------------------------------------
// Wasm
-void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmseq_vv(v0, lhs, rhs);
@@ -5002,7 +5006,7 @@ void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsne_vv(v0, lhs, rhs);
@@ -5011,7 +5015,7 @@ void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsle_vv(v0, rhs, lhs);
@@ -5020,7 +5024,7 @@ void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsleu_vv(v0, rhs, lhs);
@@ -5029,7 +5033,7 @@ void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmslt_vv(v0, rhs, lhs);
@@ -5038,7 +5042,7 @@ void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsltu_vv(v0, rhs, lhs);
@@ -5047,20 +5051,18 @@ void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
- uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(imms));
- uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(imms)) + 1);
- VU.set(kScratchReg, VSew::E64, Vlmul::m1);
- li(kScratchReg, 1);
- vmv_vx(v0, kScratchReg);
- li(kScratchReg, imm1);
- vmerge_vx(dst, kScratchReg, dst);
- li(kScratchReg, imm2);
- vsll_vi(v0, v0, 1);
- vmerge_vx(dst, kScratchReg, dst);
+void MacroAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
+ uint64_t vals[2];
+ memcpy(vals, imms, sizeof(vals));
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, vals[1]);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vslideup_vi(dst, kSimd128ScratchReg, 1);
+ li(kScratchReg, vals[0]);
+ vmv_sx(dst, kScratchReg);
}
-void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
+void MacroAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
MemOperand src) {
if (ts == 8) {
Lbu(kScratchReg2, src);
@@ -5092,7 +5094,7 @@ void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
}
}
-void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
+void MacroAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
MemOperand dst) {
if (sz == 8) {
VU.set(kScratchReg, E8, m1);
@@ -5120,7 +5122,7 @@ void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
// -----------------------------------------------------------------------------
// Runtime calls.
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::AddOverflow64(Register dst, Register left,
+void MacroAssembler::AddOverflow64(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5150,7 +5152,7 @@ void TurboAssembler::AddOverflow64(Register dst, Register left,
}
}
-void TurboAssembler::SubOverflow64(Register dst, Register left,
+void MacroAssembler::SubOverflow64(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5182,7 +5184,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left,
}
}
-void TurboAssembler::MulOverflow32(Register dst, Register left,
+void MacroAssembler::MulOverflow32(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -5208,7 +5210,7 @@ void TurboAssembler::MulOverflow32(Register dst, Register left,
xor_(overflow, overflow, dst);
}
-void TurboAssembler::MulOverflow64(Register dst, Register left,
+void MacroAssembler::MulOverflow64(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -5239,7 +5241,7 @@ void TurboAssembler::MulOverflow64(Register dst, Register left,
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::AddOverflow(Register dst, Register left,
+void MacroAssembler::AddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5269,7 +5271,7 @@ void TurboAssembler::AddOverflow(Register dst, Register left,
}
}
-void TurboAssembler::SubOverflow(Register dst, Register left,
+void MacroAssembler::SubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5301,7 +5303,7 @@ void TurboAssembler::SubOverflow(Register dst, Register left,
}
}
-void TurboAssembler::MulOverflow32(Register dst, Register left,
+void MacroAssembler::MulOverflow32(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -5326,8 +5328,8 @@ void TurboAssembler::MulOverflow32(Register dst, Register left,
}
#endif
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// All parameters are on the stack. a0 has the return value after call.
@@ -5342,8 +5344,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -5361,27 +5362,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- // Ld a Address from a constant pool.
- // Record a value into constant pool.
- ASM_CODE_COMMENT(this);
- if (!v8_flags.riscv_constant_pool) {
- li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- } else {
- RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET);
- RecordRelocInfo(RelocInfo::OFF_HEAP_TARGET, entry);
- auipc(kOffHeapTrampolineRegister, 0);
- LoadWord(kOffHeapTrampolineRegister,
- MemOperand(kOffHeapTrampolineRegister, 0));
- }
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
ASM_CODE_COMMENT(this);
@@ -5424,15 +5409,15 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (v8_flags.debug_code) Check(cc, reason, rs, rt);
}
-void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
BranchShort(&L, cc, rs, rt);
@@ -5441,7 +5426,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -5498,22 +5483,21 @@ void TurboAssembler::Abort(AbortReason reason) {
}
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
+void MacroAssembler::LoadMap(Register destination, Register object) {
ASM_CODE_COMMENT(this);
- LoadTaggedPointerField(destination,
- FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ASM_CODE_COMMENT(this);
LoadMap(dst, cp);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5521,9 +5505,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
PushCommonFrame(scratch);
}
-void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+void MacroAssembler::Prologue() { PushStandardFrame(a1); }
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5540,14 +5524,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
addi(sp, fp, 2 * kSystemPointerSize);
LoadWord(ra, MemOperand(fp, 1 * kSystemPointerSize));
LoadWord(fp, MemOperand(fp, 0 * kSystemPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
@@ -5600,19 +5584,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- if (save_doubles) {
- // The stack is already aligned to 0 modulo 8 for stores with sdc1.
- int space = kNumCallerSavedFPU * kDoubleSize;
- SubWord(sp, sp, Operand(space));
- int count = 0;
- for (int i = 0; i < kNumFPURegisters; i++) {
- if (kCallerSavedFPU.bits() & (1 << i)) {
- FPURegister reg = FPURegister::from_code(i);
- StoreDouble(reg, MemOperand(sp, count * kDoubleSize));
- count++;
- }
- }
- }
// Reserve place for the return address, stack space and an optional slot
// (used by DirectCEntry to hold the return value if a struct is
@@ -5632,28 +5603,12 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreWord(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool do_return,
+void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return,
bool argument_count_is_length) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- // Optionally restore all double registers.
- if (save_doubles) {
- // Remember: we only need to restore kCallerSavedFPU.
- SubWord(scratch, fp,
- Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumCallerSavedFPU * kDoubleSize));
- int cout = 0;
- for (int i = 0; i < kNumFPURegisters; i++) {
- if (kCalleeSavedFPU.bits() & (1 << i)) {
- FPURegister reg = FPURegister::from_code(i);
- LoadDouble(reg, MemOperand(scratch, cout * kDoubleSize));
- cout++;
- }
- }
- }
// Clear top frame.
li(scratch,
@@ -5692,7 +5647,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -5730,7 +5685,7 @@ void MacroAssembler::AssertStackIsAligned() {
}
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
ASM_CODE_COMMENT(this);
if (SmiValuesAre32Bits()) {
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
@@ -5745,7 +5700,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::SmiToInt32(Register smi) {
+void MacroAssembler::SmiToInt32(Register smi) {
ASM_CODE_COMMENT(this);
if (v8_flags.enable_slow_asserts) {
AssertSmi(smi);
@@ -5754,7 +5709,7 @@ void TurboAssembler::SmiToInt32(Register smi) {
SmiUntag(smi);
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
@@ -5763,12 +5718,9 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
Branch(smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfCodeTIsMarkedForDeoptimization(
- Register codet, Register scratch, Label* if_marked_for_deoptimization) {
- LoadTaggedPointerField(
- scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- Lw(scratch,
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization(
+ Register code, Register scratch, Label* if_marked_for_deoptimization) {
+ Load32U(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Branch(if_marked_for_deoptimization, ne, scratch, Operand(zero_reg));
}
@@ -5787,7 +5739,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
-void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) {
+void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -5797,7 +5749,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) {
}
}
-void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
+void MacroAssembler::AssertSmi(Register object, AbortReason reason) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -5910,7 +5862,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
template <typename F_TYPE>
-void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
+void MacroAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
FPURegister src2, MaxMinKind kind) {
DCHECK((std::is_same<F_TYPE, float>::value) ||
(std::is_same<F_TYPE, double>::value));
@@ -5965,25 +5917,25 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
bind(&done);
}
-void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMax);
}
-void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMin);
}
-void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMax);
}
-void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMin);
@@ -5991,7 +5943,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
static const int kRegisterPassedArguments = 8;
-int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments,
+int MacroAssembler::CalculateStackPassedDWords(int num_gp_arguments,
int num_fp_arguments) {
int stack_passed_dwords = 0;
@@ -6007,7 +5959,7 @@ int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments,
return stack_passed_dwords;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
ASM_CODE_COMMENT(this);
@@ -6032,36 +5984,42 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t6, function);
- CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
ASM_CODE_COMMENT(this);
@@ -6096,42 +6054,49 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
{
- if (function != t6) {
- Mv(t6, function);
- function = t6;
- }
-
- // Save the frame pointer and PC so that the stack layout remains
- // iterable, even without an ExitFrame which normally exists between JS
- // and C frames.
- // 't' registers are caller-saved so this is safe as a scratch register.
- Register pc_scratch = t1;
- Register scratch = t2;
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ if (function != t6) {
+ Mv(t6, function);
+ function = t6;
+ }
- auipc(pc_scratch, 0);
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- StoreWord(pc_scratch,
- MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- StoreWord(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- StoreWord(pc_scratch, MemOperand(scratch));
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreWord(fp, MemOperand(scratch));
+ // Save the frame pointer and PC so that the stack layout remains
+ // iterable, even without an ExitFrame which normally exists between JS
+ // and C frames.
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+
+ auipc(pc_scratch, 0);
+ // See x64 code for reasoning about how to address the isolate data
+ // fields.
+ if (root_array_available()) {
+ StoreWord(pc_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreWord(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ StoreWord(pc_scratch, MemOperand(scratch));
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreWord(fp, MemOperand(scratch));
+ }
}
Call(function);
-
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreWord(zero_reg, MemOperand(scratch));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreWord(zero_reg, MemOperand(scratch));
+ }
}
}
@@ -6147,7 +6112,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
LoadWord(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
@@ -6170,21 +6135,15 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
- // This push on ra and the pop below together ensure that we restore the
- // register ra, which is needed while computing the code start address.
- push(ra);
-
- auipc(ra, 0);
- addi(ra, ra, kInstrSize * 2); // ra = address of li
- int pc = pc_offset();
- li(dst, Operand(pc));
- SubWord(dst, ra, dst);
-
- pop(ra); // Restore ra
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
+ auto pc = -pc_offset();
+ auipc(dst, 0);
+ if (pc != 0) {
+ SubWord(dst, dst, pc);
+ }
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -6197,87 +6156,35 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
+void MacroAssembler::LoadCodeEntry(Register destination, Register code) {
ASM_CODE_COMMENT(this);
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
-
- Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
- // Not an off-heap trampoline object, the entry point is at
- // Code::raw_instruction_start().
- AddWord(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- Branch(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- slli(destination, scratch, kSystemPointerSizeLog2);
- AddWord(destination, destination, kRootRegister);
- LoadWord(
- destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- AddWord(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- }
+ LoadWord(destination, FieldMemOperand(code, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
+void MacroAssembler::CallCodeObject(Register code) {
ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
- Call(code_object);
+ LoadCodeEntry(code, code);
+ Call(code);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
- Jump(code_object);
-}
-#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand) {
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
- } else {
- Ld(destination, field_operand);
- }
+ LoadCodeEntry(code, code);
+ Jump(code);
}
-void TurboAssembler::LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand) {
+#if V8_TARGET_ARCH_RISCV64
+void MacroAssembler::LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
Ld(destination, field_operand);
}
}
-void TurboAssembler::LoadTaggedSignedField(const Register& destination,
+void MacroAssembler::LoadTaggedSignedField(const Register& destination,
const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedSigned(destination, field_operand);
@@ -6286,11 +6193,11 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination,
}
}
-void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
SmiUntag(dst, src);
}
-void TurboAssembler::StoreTaggedField(const Register& value,
+void MacroAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) {
if (COMPRESS_POINTERS_BOOL) {
Sw(value, dst_field_operand);
@@ -6299,7 +6206,7 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
-void TurboAssembler::DecompressTaggedSigned(const Register& destination,
+void MacroAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
@@ -6310,26 +6217,19 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination,
}
}
-void TurboAssembler::DecompressTaggedPointer(const Register& destination,
- const MemOperand& field_operand) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
AddWord(destination, kPtrComprCageBaseRegister, destination);
}
-void TurboAssembler::DecompressTaggedPointer(const Register& destination,
- const Register& source) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ const Register& source) {
ASM_CODE_COMMENT(this);
And(destination, source, Operand(0xFFFFFFFF));
AddWord(destination, kPtrComprCageBaseRegister, Operand(destination));
}
-
-void TurboAssembler::DecompressAnyTagged(const Register& destination,
- const MemOperand& field_operand) {
- ASM_CODE_COMMENT(this);
- Lwu(destination, field_operand);
- AddWord(destination, kPtrComprCageBaseRegister, destination);
-}
#endif
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode, Register scratch) {
diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
index 76c6436991..193b10658f 100644
--- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
+++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
@@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -109,7 +109,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
li(kRootRegister, Operand(isolate_root));
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
@@ -123,7 +123,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trap();
void DebugBreak();
-
+#ifdef USE_SIMULATOR
+ // See src/codegen/riscv/base-constants-riscv.h DebugParameters.
+ void Debug(uint32_t parameters) { break_(parameters, false); }
+#endif
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
@@ -228,6 +231,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
jalr(rd, Lo12); // jump PC + Hi20 + Lo12
}
+
+ // Generate a B immediate instruction with the corresponding relocation info.
+ // 'offset' is the immediate to encode in the B instruction (so it is the
+ // difference between the target and the PC of the instruction, divided by
+ // the instruction size).
+ void near_jump(int offset, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.Acquire();
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode, offset);
+ GenPCRelativeJump(temp, offset);
+ }
+ // Generate a BL immediate instruction with the corresponding relocation info.
+ // As for near_jump, 'offset' is the immediate to encode in the BL
+ // instruction.
+ void near_call(int offset, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.Acquire();
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode, offset);
+ GenPCRelativeJumpAndLink(temp, offset);
+ }
+ // Generate a BL immediate instruction with the corresponding relocation info
+ // for the input HeapNumberRequest.
+ void near_call(HeapNumberRequest request) { UNIMPLEMENTED(); }
+
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
@@ -257,6 +284,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register dst, Label* target,
RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
// Load the builtin given by the Smi in |builtin| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin);
@@ -266,11 +299,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
- void CallCodeObject(Register code_object);
- void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump);
-
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
@@ -556,12 +584,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
void MovFromFloatResult(DoubleRegister dst);
void MovFromFloatParameter(DoubleRegister dst);
@@ -1038,14 +1077,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression Support
- // Loads a field containing a HeapObject and decompresses it if pointer
- // compression is enabled.
- void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand);
-
// Loads a field containing any tagged value and decompresses it if necessary.
- void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand);
+ void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand);
// Loads a field containing a tagged signed value and decompresses it if
// necessary.
@@ -1061,12 +1095,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
- void DecompressTaggedPointer(const Register& destination,
- const MemOperand& field_operand);
- void DecompressTaggedPointer(const Register& destination,
- const Register& source);
- void DecompressAnyTagged(const Register& destination,
- const MemOperand& field_operand);
+ void DecompressTagged(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressTagged(const Register& destination, const Register& source);
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
if (COMPRESS_POINTERS_BOOL) {
Sub32(rd, rs1, rs2);
@@ -1079,12 +1110,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pointer compression Support
// rv32 don't support Pointer compression. Defines these functions for
// simplify builtins.
- inline void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand) {
- Lw(destination, field_operand);
- }
- inline void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand) {
+ inline void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
Lw(destination, field_operand);
}
inline void LoadTaggedSignedField(const Register& destination,
@@ -1140,71 +1167,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
- protected:
- inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
- inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
-
- private:
- bool has_double_zero_reg_set_ = false;
- bool has_single_zero_reg_set_ = false;
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
-
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-
- // TODO(RISCV) Reorder parameters so out parameters come last.
- bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
- Register* scratch, const Operand& rt);
-
- void BranchShortHelper(int32_t offset, Label* L);
- bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt);
- bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt);
-
- void BranchAndLinkShortHelper(int32_t offset, Label* L);
- void BranchAndLinkShort(int32_t offset);
- void BranchAndLinkShort(Label* L);
- bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- void BranchAndLinkLong(Label* L);
-#if V8_TARGET_ARCH_RISCV64
- template <typename F_TYPE>
- void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
- FPURoundingMode mode);
-#elif V8_TARGET_ARCH_RISCV32
- void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
- FPURoundingMode mode);
-
- void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
- FPURoundingMode mode);
-#endif
- template <typename F>
- void RoundHelper(VRegister dst, VRegister src, Register scratch,
- VRegister v_scratch, FPURoundingMode frm);
-
- template <typename TruncFunc>
- void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
- TruncFunc trunc);
-
- // Push a fixed frame, consisting of ra, fp.
- void PushCommonFrame(Register marker_reg = no_reg);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@@ -1287,12 +1249,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack.
// stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ void EnterExitFrame(int stack_space = 0,
StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles, Register arg_count,
- bool do_return = NO_EMIT_RETURN,
+ void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
// Make sure the stack is aligned. Only emits code in debug mode.
@@ -1342,7 +1303,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
@@ -1364,20 +1325,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls.
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -1386,10 +1344,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
-
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -1452,8 +1406,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
ArgumentsCountType type,
ArgumentsCountMode mode,
Register scratch = no_reg);
- void JumpIfCodeTIsMarkedForDeoptimization(
- Register codet, Register scratch, Label* if_marked_for_deoptimization);
+ void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch,
+ Label* if_marked_for_deoptimization);
Operand ClearedValue() const;
// Jump if the register contains a non-smi.
@@ -1491,7 +1445,67 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
+ protected:
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
private:
+ bool has_double_zero_reg_set_ = false;
+ bool has_single_zero_reg_set_ = false;
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ void CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+
+ // TODO(RISCV) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
+
+ void BranchShortHelper(int32_t offset, Label* L);
+ bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+
+ void BranchAndLinkShortHelper(int32_t offset, Label* L);
+ void BranchAndLinkShort(int32_t offset);
+ void BranchAndLinkShort(Label* L);
+ bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ void BranchAndLinkLong(Label* L);
+#if V8_TARGET_ARCH_RISCV64
+ template <typename F_TYPE>
+ void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+#elif V8_TARGET_ARCH_RISCV32
+ void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+
+ void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+#endif
+ template <typename F>
+ void RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, FPURoundingMode frm,
+ bool keep_nan_same = true);
+
+ template <typename TruncFunc>
+ void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
+ TruncFunc trunc);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
@@ -1508,7 +1522,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
};
template <typename Func>
-void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.
@@ -1537,9 +1551,14 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
struct MoveCycleState {
- // Whether a move in the cycle needs the scratch or double scratch register.
- bool pending_scratch_register_use = false;
- bool pending_double_scratch_register_use = false;
+ // List of scratch registers reserved for pending moves in a move cycle, and
+ // which should therefore not be used as a temporary location by
+ // {MoveToTempLocation}.
+ RegList scratch_regs;
+ // Available scratch registers during the move cycle resolution scope.
+ base::Optional<UseScratchRegisterScope> temps;
+ // Scratch register picked by {MoveToTempLocation}.
+ base::Optional<Register> scratch_reg;
};
#define ACCESS_MASM(masm) masm->
diff --git a/deps/v8/src/codegen/riscv/register-riscv.h b/deps/v8/src/codegen/riscv/register-riscv.h
index c530c54b4e..915ecf6a3a 100644
--- a/deps/v8/src/codegen/riscv/register-riscv.h
+++ b/deps/v8/src/codegen/riscv/register-riscv.h
@@ -31,7 +31,7 @@ namespace internal {
V(a4) V(a5) V(a6) V(a7) V(t0) \
V(t1) V(t2) V(t4) V(s7) V(s8) V(s9) V(s10)
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#else
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(s11)
@@ -288,7 +288,6 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-constexpr Register kOffHeapTrampolineRegister = t6;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;
@@ -301,10 +300,10 @@ constexpr VRegister kSimd128ScratchReg2 = v23;
constexpr VRegister kSimd128ScratchReg3 = v8;
constexpr VRegister kSimd128RegZero = v25;
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
constexpr Register kPtrComprCageBaseRegister = s11; // callee save
#else
-constexpr Register kPtrComprCageBaseRegister = kRootRegister;
+constexpr Register kPtrComprCageBaseRegister = no_reg;
#endif
} // namespace internal
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index 91f18727cd..f9da5f713c 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -91,7 +91,7 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
- IsWasmCall(rmode_));
+ IsWasmCall(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -142,7 +142,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
- return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
+ return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
@@ -177,15 +177,15 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_,
- V8HeapCompressionScheme::CompressTagged(target.ptr()),
+ V8HeapCompressionScheme::CompressObject(target.ptr()),
icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 0b0bf2bc43..91653d4dc6 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -371,7 +371,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
EmitRelocations();
@@ -510,8 +510,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
} else if (LLILF == opcode) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
- // Make label relative to Code pointer of generated Code object.
- int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ // Make label relative to InstructionStream pointer of generated
+ // InstructionStream object.
+ int32_t imm32 =
+ target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | imm32);
return;
@@ -608,7 +610,7 @@ void Assembler::load_label_offset(Register r1, Label* L) {
int constant;
if (L->is_bound()) {
target_pos = L->pos();
- constant = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ constant = target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link
@@ -831,7 +833,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
- RelocInfo rinfo(pc, rmode, it->data(), Code());
+ RelocInfo rinfo(pc, rmode, it->data(), Code(), InstructionStream());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 50f2cd3093..62d9b36ac2 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -320,7 +320,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
- // Code generation
+ // InstructionStream generation
template <class T, int size, int lo, int hi>
inline T getfield(T value) {
@@ -1043,7 +1043,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// ---------------------------------------------------------------------------
- // Code generation
+ // InstructionStream generation
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -1359,8 +1359,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
byte* buffer_pos() const { return buffer_start_; }
-
- // Code generation
+ // InstructionStream generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -1495,7 +1494,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private:
friend class Assembler;
- friend class TurboAssembler;
+ friend class MacroAssembler;
Assembler* assembler_;
RegList old_available_;
diff --git a/deps/v8/src/codegen/s390/constants-s390.h b/deps/v8/src/codegen/s390/constants-s390.h
index edf4eba214..3de1a2f372 100644
--- a/deps/v8/src/codegen/s390/constants-s390.h
+++ b/deps/v8/src/codegen/s390/constants-s390.h
@@ -29,6 +29,8 @@
namespace v8 {
namespace internal {
+// The maximum size of the code range s.t. pc-relative calls are possible
+// between all Code objects in the range.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// Number of registers
@@ -39,9 +41,8 @@ const int kNumDoubleRegisters = 16;
const int kNoRegister = -1;
-// Actual value of root register is offset from the root array's start
+// The actual value of the kRootRegister is offset from the IsolateData's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 128;
// sign-extend the least significant 16-bits of value <imm>
@@ -101,9 +102,73 @@ enum Condition {
mask0xC = 12,
mask0xD = 13,
mask0xE = 14,
- mask0xF = 15
+ mask0xF = 15,
+
+ // Unified cross-platform condition names/aliases.
+ // Do not set unsigned constants equal to their signed variants.
+ // We need to be able to differentiate between signed and unsigned enum
+ // constants in order to emit the right instructions (i.e CmpS64 vs CmpU64).
+ kEqual = eq,
+ kNotEqual = ne,
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+ kUnsignedLessThan = 16,
+ kUnsignedGreaterThan = 17,
+ kUnsignedLessThanEqual = 18,
+ kUnsignedGreaterThanEqual = 19,
+ kOverflow = overflow,
+ kNoOverflow = nooverflow,
+ kZero = 20,
+ kNotZero = 21,
};
+inline Condition to_condition(Condition cond) {
+ switch (cond) {
+ case kUnsignedLessThan:
+ return lt;
+ case kUnsignedGreaterThan:
+ return gt;
+ case kUnsignedLessThanEqual:
+ return le;
+ case kUnsignedGreaterThanEqual:
+ return ge;
+ case kZero:
+ return eq;
+ case kNotZero:
+ return ne;
+ default:
+ break;
+ }
+ return cond;
+}
+
+inline bool is_signed(Condition cond) {
+ switch (cond) {
+ case kEqual:
+ case kNotEqual:
+ case kLessThan:
+ case kGreaterThan:
+ case kLessThanEqual:
+ case kGreaterThanEqual:
+ case kOverflow:
+ case kNoOverflow:
+ case kZero:
+ case kNotZero:
+ return true;
+
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThan:
+ case kUnsignedLessThanEqual:
+ case kUnsignedGreaterThanEqual:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
switch (cond) {
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
index cf92bbff00..f2a406394b 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -100,6 +100,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return r6; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return r7; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return r7; }
// static
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 79c9cd8ec6..7f88a15259 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes =
} // namespace
-void TurboAssembler::DoubleMax(DoubleRegister result_reg,
+void MacroAssembler::DoubleMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -101,7 +101,7 @@ void TurboAssembler::DoubleMax(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::DoubleMin(DoubleRegister result_reg,
+void MacroAssembler::DoubleMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -152,7 +152,7 @@ void TurboAssembler::DoubleMin(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::FloatMax(DoubleRegister result_reg,
+void MacroAssembler::FloatMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -197,7 +197,7 @@ void TurboAssembler::FloatMax(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::FloatMin(DoubleRegister result_reg,
+void MacroAssembler::FloatMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -249,39 +249,39 @@ void TurboAssembler::FloatMin(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_POS_INF, dst, src);
}
-void TurboAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TOWARD_POS_INF, dst, src);
}
-void TurboAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_NEG_INF, dst, src);
}
-void TurboAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TOWARD_NEG_INF, dst, src);
}
-void TurboAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_0, dst, src);
}
-void TurboAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TOWARD_0, dst, src);
}
-void TurboAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
}
-void TurboAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -298,7 +298,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
Register exclusion1, Register exclusion2,
Register exclusion3) {
int bytes = 0;
@@ -316,7 +316,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
Register exclusion1, Register exclusion2,
Register exclusion3) {
int bytes = 0;
@@ -333,7 +333,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
return bytes;
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@@ -343,18 +343,17 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
- destination,
- FieldMemOperand(destination,
- FixedArray::OffsetOfElementAt(constant_index)),
- r1);
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)),
+ r1);
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
LoadU64(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
mov(destination, kRootRegister);
@@ -366,7 +365,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -396,9 +395,9 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
+void MacroAssembler::Jump(Register target, Condition cond) { b(cond, target); }
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
Label skip;
@@ -410,13 +409,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -431,14 +430,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Move(scratch, reference);
Jump(scratch);
}
-void TurboAssembler::Call(Register target) {
+void MacroAssembler::Call(Register target) {
// Branch to target via indirect branch
basr(r14, target);
}
@@ -461,7 +460,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
return size;
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(cond == al);
@@ -469,7 +468,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
basr(r14, ip);
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
@@ -481,12 +480,11 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
CallBuiltin(builtin);
return;
}
- DCHECK(code->IsExecutable());
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code, rmode);
}
-void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
@@ -503,14 +501,14 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
Call(ip);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
@@ -529,7 +527,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
} else {
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
@@ -540,7 +538,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
}
-void TurboAssembler::Drop(int count) {
+void MacroAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
if (is_uint12(total)) {
@@ -553,17 +551,14 @@ void TurboAssembler::Drop(int count) {
}
}
-void TurboAssembler::Drop(Register count, Register scratch) {
+void MacroAssembler::Drop(Register count, Register scratch) {
ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
AddS64(sp, sp, scratch);
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
- Register scratch) {
- LoadTaggedPointerField(
- scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- LoadS32(scratch, FieldMemOperand(
- scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
+ Register scratch) {
+ LoadU16(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
TestBit(scratch, Code::kMarkedForDeoptimizationBit, scratch);
}
@@ -572,19 +567,19 @@ Operand MacroAssembler::ClearedValue() const {
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
-void TurboAssembler::Call(Label* target) { b(r14, target); }
+void MacroAssembler::Call(Label* target) { b(r14, target); }
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
mov(r0, Operand(smi));
push(r0);
}
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
+void MacroAssembler::Move(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
@@ -602,7 +597,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
}
}
-void TurboAssembler::Move(Register dst, ExternalReference reference) {
+void MacroAssembler::Move(Register dst, ExternalReference reference) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -613,7 +608,7 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) {
mov(dst, Operand(reference));
}
-void TurboAssembler::Move(Register dst, Register src, Condition cond) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
if (dst != src) {
if (cond == al) {
mov(dst, src);
@@ -623,38 +618,38 @@ void TurboAssembler::Move(Register dst, Register src, Condition cond) {
}
}
-void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (dst != src) {
ldr(dst, src);
}
}
-void TurboAssembler::Move(Register dst, const MemOperand& src) {
+void MacroAssembler::Move(Register dst, const MemOperand& src) {
LoadU64(dst, src);
}
// Wrapper around Assembler::mvc (SS-a format)
-void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
+void MacroAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length) {
mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
// Wrapper around Assembler::clc (SS-a format)
-void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1,
+void MacroAssembler::CompareLogicalChar(const MemOperand& opnd1,
const MemOperand& opnd2,
const Operand& length) {
clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
// Wrapper around Assembler::xc (SS-a format)
-void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
+void MacroAssembler::ExclusiveOrChar(const MemOperand& opnd1,
const MemOperand& opnd2,
const Operand& length) {
xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
// Wrapper around Assembler::risbg(n) (RIE-f)
-void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
+void MacroAssembler::RotateInsertSelectBits(Register dst, Register src,
const Operand& startBit,
const Operand& endBit,
const Operand& shiftAmt,
@@ -667,7 +662,7 @@ void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
risbg(dst, src, startBit, endBit, shiftAmt);
}
-void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
+void MacroAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
Label* L) {
#if V8_TARGET_ARCH_S390X
brxhg(dst, inc, L);
@@ -676,7 +671,7 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
#endif // V8_TARGET_ARCH_S390X
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order) {
Label loop, done;
@@ -707,7 +702,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void TurboAssembler::MultiPush(RegList regs, Register location) {
+void MacroAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
@@ -720,7 +715,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
}
}
-void TurboAssembler::MultiPop(RegList regs, Register location) {
+void MacroAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
@@ -732,7 +727,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
+void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -746,7 +741,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
@@ -761,7 +756,7 @@ void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
}
}
-void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
+void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
@@ -774,7 +769,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
Register location) {
int16_t stack_offset = 0;
@@ -788,7 +783,7 @@ void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -822,7 +817,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
#endif
}
-void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -854,33 +849,36 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
#endif
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
- Condition) {
- LoadU64(destination,
- MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
+void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
+ ASM_CODE_COMMENT(this);
+ if (CanBeImmediate(index)) {
+ mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO));
+ return;
+ }
+ LoadRoot(destination, index);
}
-void TurboAssembler::LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch) {
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
- } else {
- LoadU64(destination, field_operand, scratch);
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
+ Condition) {
+ if (CanBeImmediate(index)) {
+ DecompressTagged(destination, ReadOnlyRootPtr(index));
+ return;
}
+ LoadU64(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
-void TurboAssembler::LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch) {
+void MacroAssembler::LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
LoadU64(destination, field_operand, scratch);
}
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre31Bits()) {
LoadS32(dst, src);
} else {
@@ -889,11 +887,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
SmiUntag(dst);
}
-void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
SmiUntag(dst, src);
}
-void TurboAssembler::StoreTaggedField(const Register& value,
+void MacroAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
@@ -905,53 +903,43 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
Register src) {
RecordComment("[ DecompressTaggedSigned");
llgfr(destination, src);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
llgf(destination, field_operand);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- Register source) {
- RecordComment("[ DecompressTaggedPointer");
+void MacroAssembler::DecompressTagged(Register destination, Register source) {
+ RecordComment("[ DecompressTagged");
llgfr(destination, source);
agr(destination, kRootRegister);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- MemOperand field_operand) {
- RecordComment("[ DecompressTaggedPointer");
- llgf(destination, field_operand);
- agr(destination, kRootRegister);
- RecordComment("]");
-}
-
-void TurboAssembler::DecompressAnyTagged(Register destination,
- MemOperand field_operand) {
- RecordComment("[ DecompressAnyTagged");
+void MacroAssembler::DecompressTagged(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTagged");
llgf(destination, field_operand);
agr(destination, kRootRegister);
RecordComment("]");
}
-void TurboAssembler::DecompressAnyTagged(Register destination,
- Register source) {
- RecordComment("[ DecompressAnyTagged");
- llgfr(destination, source);
+void MacroAssembler::DecompressTagged(const Register& destination,
+ Tagged_t immediate) {
+ ASM_CODE_COMMENT(this);
+ mov(destination, Operand(immediate, RelocInfo::NO_INFO));
agr(destination, kRootRegister);
- RecordComment("]");
}
-void TurboAssembler::LoadTaggedSignedField(Register destination,
+void MacroAssembler::LoadTaggedSignedField(Register destination,
MemOperand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedSigned(destination, field_operand);
@@ -999,17 +987,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, slot_address));
@@ -1032,7 +1020,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -1055,7 +1043,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -1083,7 +1071,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
DCHECK(!AreAliased(object, slot_address, value));
if (v8_flags.debug_code) {
- LoadTaggedPointerField(r0, MemOperand(slot_address));
+ LoadTaggedField(r0, MemOperand(slot_address));
CmpS64(value, r0);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -1101,8 +1089,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, &done);
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
@@ -1128,7 +1115,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
ASM_CODE_COMMENT(this);
int fp_delta = 0;
CleanseP(r14);
@@ -1142,7 +1129,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
-void TurboAssembler::PopCommonFrame(Register marker_reg) {
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Pop(r14, fp, marker_reg);
} else {
@@ -1150,7 +1137,7 @@ void TurboAssembler::PopCommonFrame(Register marker_reg) {
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
CleanseP(r14);
if (function_reg.is_valid()) {
@@ -1164,7 +1151,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(kJavaScriptCallArgCountRegister);
}
-void TurboAssembler::RestoreFrameStateForTailCall() {
+void MacroAssembler::RestoreFrameStateForTailCall() {
// if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
// LoadU64(kConstantPoolRegister,
// MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -1175,7 +1162,7 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
-void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN
if (dst != src) ldr(dst, src);
@@ -1183,11 +1170,11 @@ void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
sdbr(dst, kDoubleRegZero);
}
-void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
+void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
cdfbr(dst, src);
}
-void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
+void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
Register src) {
if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
cdlfbr(Condition(5), Condition(0), dst, src);
@@ -1199,36 +1186,36 @@ void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
}
}
-void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
+void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
cefbra(Condition(4), dst, src);
}
-void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
+void MacroAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
Register src) {
celfbr(Condition(4), Condition(0), dst, src);
}
-void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
+void MacroAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
Register src) {
cegbr(double_dst, src);
}
-void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
+void MacroAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdgbr(double_dst, src);
}
-void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
+void MacroAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
Register src) {
celgbr(Condition(0), Condition(0), double_dst, src);
}
-void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
+void MacroAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdlgbr(Condition(0), Condition(0), double_dst, src);
}
-void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
+void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1250,7 +1237,7 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
cgebr(m, dst, double_input);
}
-void TurboAssembler::ConvertDoubleToInt64(const Register dst,
+void MacroAssembler::ConvertDoubleToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1272,7 +1259,7 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
cgdbr(m, dst, double_input);
}
-void TurboAssembler::ConvertDoubleToInt32(const Register dst,
+void MacroAssembler::ConvertDoubleToInt32(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1298,7 +1285,7 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
cfdbr(m, dst, double_input);
}
-void TurboAssembler::ConvertFloat32ToInt32(const Register result,
+void MacroAssembler::ConvertFloat32ToInt32(const Register result,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1324,7 +1311,7 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
cfebr(m, result, double_input);
}
-void TurboAssembler::ConvertFloat32ToUnsignedInt32(
+void MacroAssembler::ConvertFloat32ToUnsignedInt32(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1349,7 +1336,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
clfebr(m, Condition(0), result, double_input);
}
-void TurboAssembler::ConvertFloat32ToUnsignedInt64(
+void MacroAssembler::ConvertFloat32ToUnsignedInt64(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1371,7 +1358,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
clgebr(m, Condition(0), result, double_input);
}
-void TurboAssembler::ConvertDoubleToUnsignedInt64(
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1393,7 +1380,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
clgdbr(m, Condition(0), dst, double_input);
}
-void TurboAssembler::ConvertDoubleToUnsignedInt32(
+void MacroAssembler::ConvertDoubleToUnsignedInt32(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1418,15 +1405,15 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
clfdbr(m, Condition(0), dst, double_input);
}
-void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
+void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
lgdr(dst, src);
}
-void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
ldgr(dst, src);
}
-void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
+void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -1435,12 +1422,12 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
-void TurboAssembler::Prologue(Register base, int prologue_offset) {
+void MacroAssembler::Prologue(Register base, int prologue_offset) {
DCHECK(base != no_reg);
PushStandardFrame(r3);
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@@ -1466,7 +1453,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
ArgumentsCountType type,
ArgumentsCountMode mode) {
@@ -1481,7 +1468,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
}
}
-void TurboAssembler::EnterFrame(StackFrame::Type type,
+void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
ASM_CODE_COMMENT(this);
// We create a stack frame with:
@@ -1502,7 +1489,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
#endif // V8_ENABLE_WEBASSEMBLY
}
-int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
ASM_CODE_COMMENT(this);
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer.
@@ -1542,7 +1529,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// gaps
// Args
// ABIRes <- newSP
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -1574,20 +1561,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreU64(cp, MemOperand(r1));
- // Optionally save all volatile double registers.
- if (save_doubles) {
- MultiPushDoubles(kCallerSavedDoubles);
- // Note that d0 will be accessible at
- // fp - ExitFrameConstants::kFrameSize -
- // kNumCallerSavedDoubles * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
- const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > 0) {
DCHECK_EQ(frame_alignment, 8);
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
@@ -1601,7 +1579,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreU64(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1617,17 +1595,8 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+void MacroAssembler::LeaveExitFrame(Register argument_count,
bool argument_count_is_length) {
- // Optionally restore all double registers.
- if (save_doubles) {
- // Calculate the stack location of the saved doubles and restore them.
- const int kNumRegs = kNumCallerSavedDoubles;
- lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumRegs * kDoubleSize)));
- MultiPopDoubles(kCallerSavedDoubles, r5);
- }
-
// Clear top frame.
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
@@ -1657,11 +1626,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d0);
}
-void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d0);
}
@@ -1672,10 +1641,10 @@ MemOperand MacroAssembler::StackLimitAsMemOperand(StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
return MemOperand(kRootRegister, offset);
}
@@ -1829,8 +1798,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(code,
- FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -1855,9 +1823,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_reg = r4;
Register temp_reg = r6;
- LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
- LoadTaggedPointerField(
- temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
+ LoadTaggedField(temp_reg,
+ FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
LoadU16(
expected_reg,
FieldMemOperand(temp_reg,
@@ -1878,8 +1846,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r3);
// Get the function and setup the context.
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -1961,6 +1928,10 @@ void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
+ if (CanBeImmediate(index)) {
+ CompareTagged(obj, Operand(ReadOnlyRootPtr(index)));
+ return;
+ }
int32_t offset = RootRegisterOffsetForRootIndex(index);
#ifdef V8_TARGET_BIG_ENDIAN
offset += (COMPRESS_POINTERS_BOOL ? kTaggedSize : 0);
@@ -1975,7 +1946,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
ble(on_in_range);
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -2007,7 +1978,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
bind(&done);
}
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
ConvertDoubleToInt64(result, double_input);
@@ -2040,7 +2011,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
{
- __ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch);
+ __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, scratch);
__ bne(&heal_optimized_code_slot);
}
@@ -2049,7 +2020,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
r7);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadCodeObjectEntry(r4, optimized_code_entry);
+ __ LoadCodeEntry(r4, optimized_code_entry);
__ Jump(r4);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -2156,15 +2127,14 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, r8);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r2 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -2179,10 +2149,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
mov(r2, Operand(num_arguments));
Move(r3, ExternalReference::Create(f));
#if V8_TARGET_ARCH_S390X
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
#else
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1);
#endif
Call(code, RelocInfo::CODE_TARGET);
@@ -2200,16 +2169,11 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r3, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
CmpS32(in, Operand(kClearedWeakHeapObjectLower32));
@@ -2244,7 +2208,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
+void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L);
Abort(reason);
@@ -2252,7 +2216,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -2299,29 +2263,28 @@ void TurboAssembler::Abort(AbortReason reason) {
// will not return here
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
- LoadTaggedPointerField(destination,
- FieldMemOperand(object, HeapObject::kMapOffset));
+void MacroAssembler::LoadMap(Register destination, Register object) {
+ LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
+void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (v8_flags.debug_code) Check(cond, reason, cr);
}
-void TurboAssembler::AssertUnreachable(AbortReason reason) {
+void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
-void TurboAssembler::AssertNotSmi(Register object) {
+void MacroAssembler::AssertNotSmi(Register object) {
if (v8_flags.debug_code) {
static_assert(kSmiTag == 0);
TestIfSmi(object);
@@ -2329,7 +2292,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
}
}
-void TurboAssembler::AssertSmi(Register object) {
+void MacroAssembler::AssertSmi(Register object) {
if (v8_flags.debug_code) {
static_assert(kSmiTag == 0);
TestIfSmi(object);
@@ -2425,7 +2388,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
static const int kRegisterPassedArguments = 5;
-int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
@@ -2439,7 +2402,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -2462,16 +2425,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
-void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
-void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2 == d0) {
DCHECK(src1 != d2);
@@ -2483,53 +2446,60 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
Move(ip, function);
- CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- Register addr_scratch = r1;
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- LoadPC(r0);
- StoreU64(r0, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- StoreU64(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
-
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_pc_address(isolate()));
- LoadPC(r0);
- StoreU64(r0, MemOperand(addr_scratch));
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreU64(fp, MemOperand(addr_scratch));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ LoadPC(r0);
+ StoreU64(r0, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreU64(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r1;
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ LoadPC(r0);
+ StoreU64(r0, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreU64(fp, MemOperand(addr_scratch));
+ }
}
// Just call directly. The function called cannot cause a GC, or
@@ -2543,19 +2513,22 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
- // We don't unset the PC; the FP is the source of truth.
- Register zero_scratch = r0;
- lghi(zero_scratch, Operand::Zero());
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r0;
+ lghi(zero_scratch, Operand::Zero());
- if (root_array_available()) {
- StoreU64(
- zero_scratch,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreU64(zero_scratch, MemOperand(addr_scratch));
+ if (root_array_available()) {
+ StoreU64(zero_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r1;
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreU64(zero_scratch, MemOperand(addr_scratch));
+ }
}
int stack_passed_arguments =
@@ -2569,7 +2542,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-void TurboAssembler::CheckPageFlag(
+void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
@@ -2629,9 +2602,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::mov(Register dst, Register src) { lgr(dst, src); }
+void MacroAssembler::mov(Register dst, Register src) { lgr(dst, src); }
-void TurboAssembler::mov(Register dst, const Operand& src) {
+void MacroAssembler::mov(Register dst, const Operand& src) {
int64_t value = 0;
if (src.is_heap_number_request()) {
@@ -2676,7 +2649,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
iilf(dst, Operand(lo_32));
}
-void TurboAssembler::MulS32(Register dst, const MemOperand& src1) {
+void MacroAssembler::MulS32(Register dst, const MemOperand& src1) {
if (is_uint12(src1.offset())) {
ms(dst, src1);
} else if (is_int20(src1.offset())) {
@@ -2686,9 +2659,9 @@ void TurboAssembler::MulS32(Register dst, const MemOperand& src1) {
}
}
-void TurboAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); }
+void MacroAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); }
-void TurboAssembler::MulS32(Register dst, const Operand& src1) {
+void MacroAssembler::MulS32(Register dst, const Operand& src1) {
msfi(dst, src1);
}
@@ -2699,19 +2672,19 @@ void TurboAssembler::MulS32(Register dst, const Operand& src1) {
srlg(dst, dst, Operand(32)); \
}
-void TurboAssembler::MulHighS32(Register dst, Register src1,
+void MacroAssembler::MulHighS32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHigh32(msgf);
}
-void TurboAssembler::MulHighS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighS32(Register dst, Register src1, Register src2) {
if (dst == src2) {
std::swap(src1, src2);
}
Generate_MulHigh32(msgfr);
}
-void TurboAssembler::MulHighS32(Register dst, Register src1,
+void MacroAssembler::MulHighS32(Register dst, Register src1,
const Operand& src2) {
Generate_MulHigh32(msgfi);
}
@@ -2725,16 +2698,16 @@ void TurboAssembler::MulHighS32(Register dst, Register src1,
LoadU32(dst, r0); \
}
-void TurboAssembler::MulHighU32(Register dst, Register src1,
+void MacroAssembler::MulHighU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHighU32(ml);
}
-void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
Generate_MulHighU32(mlr);
}
-void TurboAssembler::MulHighU32(Register dst, Register src1,
+void MacroAssembler::MulHighU32(Register dst, Register src1,
const Operand& src2) {
USE(dst);
USE(src1);
@@ -2751,7 +2724,7 @@ void TurboAssembler::MulHighU32(Register dst, Register src1,
cgfr(dst, dst); \
}
-void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2) {
Register result = dst;
if (src2.rx() == dst || src2.rb() == dst) dst = r0;
@@ -2759,7 +2732,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
if (result != dst) llgfr(result, dst);
}
-void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Register src2) {
if (dst == src2) {
std::swap(src1, src2);
@@ -2767,7 +2740,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Generate_Mul32WithOverflowIfCCUnequal(msgfr);
}
-void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const Operand& src2) {
Generate_Mul32WithOverflowIfCCUnequal(msgfi);
}
@@ -2781,12 +2754,12 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
LoadU32(dst, r1); \
}
-void TurboAssembler::DivS32(Register dst, Register src1,
+void MacroAssembler::DivS32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div32(dsgf);
}
-void TurboAssembler::DivS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivS32(Register dst, Register src1, Register src2) {
Generate_Div32(dsgfr);
}
@@ -2800,12 +2773,12 @@ void TurboAssembler::DivS32(Register dst, Register src1, Register src2) {
LoadU32(dst, r1); \
}
-void TurboAssembler::DivU32(Register dst, Register src1,
+void MacroAssembler::DivU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU32(dl);
}
-void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
Generate_DivU32(dlr);
}
@@ -2818,12 +2791,12 @@ void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
-void TurboAssembler::DivS64(Register dst, Register src1,
+void MacroAssembler::DivS64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div64(dsg);
}
-void TurboAssembler::DivS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivS64(Register dst, Register src1, Register src2) {
Generate_Div64(dsgr);
}
@@ -2837,12 +2810,12 @@ void TurboAssembler::DivS64(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
-void TurboAssembler::DivU64(Register dst, Register src1,
+void MacroAssembler::DivU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU64(dlg);
}
-void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
Generate_DivU64(dlgr);
}
@@ -2855,12 +2828,12 @@ void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
LoadU32(dst, r0); \
}
-void TurboAssembler::ModS32(Register dst, Register src1,
+void MacroAssembler::ModS32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod32(dsgf);
}
-void TurboAssembler::ModS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModS32(Register dst, Register src1, Register src2) {
Generate_Mod32(dsgfr);
}
@@ -2874,12 +2847,12 @@ void TurboAssembler::ModS32(Register dst, Register src1, Register src2) {
LoadU32(dst, r0); \
}
-void TurboAssembler::ModU32(Register dst, Register src1,
+void MacroAssembler::ModU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU32(dl);
}
-void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
Generate_ModU32(dlr);
}
@@ -2892,12 +2865,12 @@ void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
-void TurboAssembler::ModS64(Register dst, Register src1,
+void MacroAssembler::ModS64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod64(dsg);
}
-void TurboAssembler::ModS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModS64(Register dst, Register src1, Register src2) {
Generate_Mod64(dsgr);
}
@@ -2911,54 +2884,54 @@ void TurboAssembler::ModS64(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
-void TurboAssembler::ModU64(Register dst, Register src1,
+void MacroAssembler::ModU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU64(dlg);
}
-void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModU64(Register dst, Register src1, Register src2) {
Generate_ModU64(dlgr);
}
#undef Generate_ModU64
-void TurboAssembler::MulS64(Register dst, const Operand& opnd) {
+void MacroAssembler::MulS64(Register dst, const Operand& opnd) {
msgfi(dst, opnd);
}
-void TurboAssembler::MulS64(Register dst, Register src) { msgr(dst, src); }
+void MacroAssembler::MulS64(Register dst, Register src) { msgr(dst, src); }
-void TurboAssembler::MulS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::MulS64(Register dst, const MemOperand& opnd) {
msg(dst, opnd);
}
-void TurboAssembler::MulHighS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighS64(Register dst, Register src1, Register src2) {
mgrk(r0, src1, src2);
lgr(dst, r0);
}
-void TurboAssembler::MulHighS64(Register dst, Register src1,
+void MacroAssembler::MulHighS64(Register dst, Register src1,
const MemOperand& src2) {
// TODO(v8): implement this.
UNIMPLEMENTED();
}
-void TurboAssembler::MulHighU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighU64(Register dst, Register src1, Register src2) {
lgr(r1, src1);
mlgr(r0, src2);
lgr(dst, r0);
}
-void TurboAssembler::MulHighU64(Register dst, Register src1,
+void MacroAssembler::MulHighU64(Register dst, Register src1,
const MemOperand& src2) {
// TODO(v8): implement this.
UNIMPLEMENTED();
}
-void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
+void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
sqdbr(result, input);
}
-void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
+void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
if (is_uint12(input.offset())) {
sqdb(result, input);
} else {
@@ -2971,7 +2944,7 @@ void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
//----------------------------------------------------------------------------
// Add 32-bit (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddS32(Register dst, const Operand& opnd) {
+void MacroAssembler::AddS32(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
ahi(dst, opnd);
else
@@ -2979,19 +2952,19 @@ void TurboAssembler::AddS32(Register dst, const Operand& opnd) {
}
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddS64(Register dst, const Operand& opnd) {
+void MacroAssembler::AddS64(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
aghi(dst, opnd);
else
agfi(dst, opnd);
}
-void TurboAssembler::AddS32(Register dst, Register src, int32_t opnd) {
+void MacroAssembler::AddS32(Register dst, Register src, int32_t opnd) {
AddS32(dst, src, Operand(opnd));
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
-void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
ahik(dst, src, opnd);
@@ -3002,12 +2975,12 @@ void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
AddS32(dst, opnd);
}
-void TurboAssembler::AddS64(Register dst, Register src, int32_t opnd) {
+void MacroAssembler::AddS64(Register dst, Register src, int32_t opnd) {
AddS64(dst, src, Operand(opnd));
}
// Add Pointer Size (Register dst = Register src + Immediate opnd)
-void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
aghik(dst, src, opnd);
@@ -3019,13 +2992,13 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register dst + Register src)
-void TurboAssembler::AddS32(Register dst, Register src) { ar(dst, src); }
+void MacroAssembler::AddS32(Register dst, Register src) { ar(dst, src); }
// Add Pointer Size (Register dst = Register dst + Register src)
-void TurboAssembler::AddS64(Register dst, Register src) { agr(dst, src); }
+void MacroAssembler::AddS64(Register dst, Register src) { agr(dst, src); }
// Add 32-bit (Register dst = Register src1 + Register src2)
-void TurboAssembler::AddS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddS32(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@@ -3042,7 +3015,7 @@ void TurboAssembler::AddS32(Register dst, Register src1, Register src2) {
}
// Add Pointer Size (Register dst = Register src1 + Register src2)
-void TurboAssembler::AddS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddS64(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@@ -3059,7 +3032,7 @@ void TurboAssembler::AddS64(Register dst, Register src1, Register src2) {
}
// Add 32-bit (Register-Memory)
-void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddS32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
a(dst, opnd);
@@ -3068,13 +3041,13 @@ void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) {
}
// Add Pointer Size (Register-Memory)
-void TurboAssembler::AddS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddS64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
ag(dst, opnd);
}
// Add 32-bit (Memory - Immediate)
-void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
+void MacroAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@@ -3082,7 +3055,7 @@ void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
}
// Add Pointer-sized (Memory - Immediate)
-void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
+void MacroAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@@ -3094,7 +3067,7 @@ void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
//----------------------------------------------------------------------------
// Add Logical 32-bit (Register dst = Register src1 + Register src2)
-void TurboAssembler::AddU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddU32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
alr(dst, src2);
@@ -3110,16 +3083,16 @@ void TurboAssembler::AddU32(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddU32(Register dst, const Operand& imm) {
+void MacroAssembler::AddU32(Register dst, const Operand& imm) {
alfi(dst, imm);
}
// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddU64(Register dst, const Operand& imm) {
+void MacroAssembler::AddU64(Register dst, const Operand& imm) {
algfi(dst, imm);
}
-void TurboAssembler::AddU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddU64(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
algrk(dst, src1, src2);
@@ -3139,7 +3112,7 @@ void TurboAssembler::AddU64(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register-Memory)
-void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
al_z(dst, opnd);
@@ -3148,7 +3121,7 @@ void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) {
}
// Add Logical Pointer Size (Register-Memory)
-void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddU64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
alg(dst, opnd);
}
@@ -3158,7 +3131,7 @@ void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
-void TurboAssembler::SubU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::SubU32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
slr(dst, src2);
@@ -3175,41 +3148,41 @@ void TurboAssembler::SubU32(Register dst, Register src1, Register src2) {
}
// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
-void TurboAssembler::SubS32(Register dst, const Operand& imm) {
+void MacroAssembler::SubS32(Register dst, const Operand& imm) {
AddS32(dst, Operand(-(imm.immediate())));
}
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
-void TurboAssembler::SubS64(Register dst, const Operand& imm) {
+void MacroAssembler::SubS64(Register dst, const Operand& imm) {
AddS64(dst, Operand(-(imm.immediate())));
}
-void TurboAssembler::SubS32(Register dst, Register src, int32_t imm) {
+void MacroAssembler::SubS32(Register dst, Register src, int32_t imm) {
SubS32(dst, src, Operand(imm));
}
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
-void TurboAssembler::SubS32(Register dst, Register src, const Operand& imm) {
+void MacroAssembler::SubS32(Register dst, Register src, const Operand& imm) {
AddS32(dst, src, Operand(-(imm.immediate())));
}
-void TurboAssembler::SubS64(Register dst, Register src, int32_t imm) {
+void MacroAssembler::SubS64(Register dst, Register src, int32_t imm) {
SubS64(dst, src, Operand(imm));
}
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
-void TurboAssembler::SubS64(Register dst, Register src, const Operand& imm) {
+void MacroAssembler::SubS64(Register dst, Register src, const Operand& imm) {
AddS64(dst, src, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register dst - Register src)
-void TurboAssembler::SubS32(Register dst, Register src) { sr(dst, src); }
+void MacroAssembler::SubS32(Register dst, Register src) { sr(dst, src); }
// Subtract Pointer Size (Register dst = Register dst - Register src)
-void TurboAssembler::SubS64(Register dst, Register src) { sgr(dst, src); }
+void MacroAssembler::SubS64(Register dst, Register src) { sgr(dst, src); }
// Subtract 32-bit (Register = Register - Register)
-void TurboAssembler::SubS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::SubS32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
@@ -3229,7 +3202,7 @@ void TurboAssembler::SubS32(Register dst, Register src1, Register src2) {
}
// Subtract Pointer Sized (Register = Register - Register)
-void TurboAssembler::SubS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::SubS64(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sgrk(dst, src1, src2);
@@ -3249,7 +3222,7 @@ void TurboAssembler::SubS64(Register dst, Register src1, Register src2) {
}
// Subtract 32-bit (Register-Memory)
-void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubS32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
s(dst, opnd);
@@ -3258,7 +3231,7 @@ void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) {
}
// Subtract Pointer Sized (Register - Memory)
-void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubS64(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
sg(dst, opnd);
#else
@@ -3266,24 +3239,24 @@ void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) {
#endif
}
-void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
sllg(r0, src, Operand(32));
ldgr(dst, r0);
}
-void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
lgdr(dst, src);
srlg(dst, dst, Operand(32));
}
// Load And Subtract 32-bit (similar to laa/lan/lao/lax)
-void TurboAssembler::LoadAndSub32(Register dst, Register src,
+void MacroAssembler::LoadAndSub32(Register dst, Register src,
const MemOperand& opnd) {
lcr(dst, src);
laa(dst, dst, opnd);
}
-void TurboAssembler::LoadAndSub64(Register dst, Register src,
+void MacroAssembler::LoadAndSub64(Register dst, Register src,
const MemOperand& opnd) {
lcgr(dst, src);
laag(dst, dst, opnd);
@@ -3294,7 +3267,7 @@ void TurboAssembler::LoadAndSub64(Register dst, Register src,
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register - Memory)
-void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
sl(dst, opnd);
@@ -3303,7 +3276,7 @@ void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) {
}
// Subtract Logical Pointer Sized (Register - Memory)
-void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubU64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
slgf(dst, opnd);
@@ -3317,13 +3290,13 @@ void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// AND 32-bit - dst = dst & src
-void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
+void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
// AND Pointer Size - dst = dst & src
-void TurboAssembler::AndP(Register dst, Register src) { ngr(dst, src); }
+void MacroAssembler::AndP(Register dst, Register src) { ngr(dst, src); }
// Non-clobbering AND 32-bit - dst = src1 & src1
-void TurboAssembler::And(Register dst, Register src1, Register src2) {
+void MacroAssembler::And(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3340,7 +3313,7 @@ void TurboAssembler::And(Register dst, Register src1, Register src2) {
}
// Non-clobbering AND pointer size - dst = src1 & src1
-void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
+void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3357,7 +3330,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
}
// AND 32-bit (Reg - Mem)
-void TurboAssembler::And(Register dst, const MemOperand& opnd) {
+void MacroAssembler::And(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
n(dst, opnd);
@@ -3366,7 +3339,7 @@ void TurboAssembler::And(Register dst, const MemOperand& opnd) {
}
// AND Pointer Size (Reg - Mem)
-void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
ng(dst, opnd);
@@ -3376,10 +3349,10 @@ void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
}
// AND 32-bit - dst = dst & imm
-void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
+void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
// AND Pointer Size - dst = dst & imm
-void TurboAssembler::AndP(Register dst, const Operand& opnd) {
+void MacroAssembler::AndP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
if (value >> 32 != -1) {
@@ -3393,13 +3366,13 @@ void TurboAssembler::AndP(Register dst, const Operand& opnd) {
}
// AND 32-bit - dst = src & imm
-void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
if (dst != src) lr(dst, src);
nilf(dst, opnd);
}
// AND Pointer Size - dst = src & imm
-void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// Try to exploit RISBG first
intptr_t value = opnd.immediate();
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -3440,13 +3413,13 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
}
// OR 32-bit - dst = dst & src
-void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
+void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
// OR Pointer Size - dst = dst & src
-void TurboAssembler::OrP(Register dst, Register src) { ogr(dst, src); }
+void MacroAssembler::OrP(Register dst, Register src) { ogr(dst, src); }
// Non-clobbering OR 32-bit - dst = src1 & src1
-void TurboAssembler::Or(Register dst, Register src1, Register src2) {
+void MacroAssembler::Or(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3463,7 +3436,7 @@ void TurboAssembler::Or(Register dst, Register src1, Register src2) {
}
// Non-clobbering OR pointer size - dst = src1 & src1
-void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
+void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3480,7 +3453,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
}
// OR 32-bit (Reg - Mem)
-void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
+void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
o(dst, opnd);
@@ -3489,7 +3462,7 @@ void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
}
// OR Pointer Size (Reg - Mem)
-void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
+void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
og(dst, opnd);
@@ -3499,10 +3472,10 @@ void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
}
// OR 32-bit - dst = dst & imm
-void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
+void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
// OR Pointer Size - dst = dst & imm
-void TurboAssembler::OrP(Register dst, const Operand& opnd) {
+void MacroAssembler::OrP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
if (value >> 32 != 0) {
@@ -3516,25 +3489,25 @@ void TurboAssembler::OrP(Register dst, const Operand& opnd) {
}
// OR 32-bit - dst = src & imm
-void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
if (dst != src) lr(dst, src);
oilf(dst, opnd);
}
// OR Pointer Size - dst = src & imm
-void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
if (dst != src) mov(dst, src);
OrP(dst, opnd);
}
// XOR 32-bit - dst = dst & src
-void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
+void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
// XOR Pointer Size - dst = dst & src
-void TurboAssembler::XorP(Register dst, Register src) { xgr(dst, src); }
+void MacroAssembler::XorP(Register dst, Register src) { xgr(dst, src); }
// Non-clobbering XOR 32-bit - dst = src1 & src1
-void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
+void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3551,7 +3524,7 @@ void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
}
// Non-clobbering XOR pointer size - dst = src1 & src1
-void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
+void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3568,7 +3541,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
}
// XOR 32-bit (Reg - Mem)
-void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
+void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
x(dst, opnd);
@@ -3577,7 +3550,7 @@ void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
}
// XOR Pointer Size (Reg - Mem)
-void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
+void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
xg(dst, opnd);
@@ -3587,10 +3560,10 @@ void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
}
// XOR 32-bit - dst = dst & imm
-void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
+void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
// XOR Pointer Size - dst = dst & imm
-void TurboAssembler::XorP(Register dst, const Operand& opnd) {
+void MacroAssembler::XorP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
xihf(dst, Operand(value >> 32));
@@ -3601,29 +3574,29 @@ void TurboAssembler::XorP(Register dst, const Operand& opnd) {
}
// XOR 32-bit - dst = src & imm
-void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
if (dst != src) lr(dst, src);
xilf(dst, opnd);
}
// XOR Pointer Size - dst = src & imm
-void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
if (dst != src) mov(dst, src);
XorP(dst, opnd);
}
-void TurboAssembler::Not32(Register dst, Register src) {
+void MacroAssembler::Not32(Register dst, Register src) {
if (src != no_reg && src != dst) lr(dst, src);
xilf(dst, Operand(0xFFFFFFFF));
}
-void TurboAssembler::Not64(Register dst, Register src) {
+void MacroAssembler::Not64(Register dst, Register src) {
if (src != no_reg && src != dst) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
}
-void TurboAssembler::NotP(Register dst, Register src) {
+void MacroAssembler::NotP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
Not64(dst, src);
#else
@@ -3631,7 +3604,7 @@ void TurboAssembler::NotP(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadPositiveP(Register result, Register input) {
+void MacroAssembler::LoadPositiveP(Register result, Register input) {
#if V8_TARGET_ARCH_S390X
lpgr(result, input);
#else
@@ -3639,7 +3612,7 @@ void TurboAssembler::LoadPositiveP(Register result, Register input) {
#endif
}
-void TurboAssembler::LoadPositive32(Register result, Register input) {
+void MacroAssembler::LoadPositive32(Register result, Register input) {
lpr(result, input);
lgfr(result, result);
}
@@ -3649,14 +3622,14 @@ void TurboAssembler::LoadPositive32(Register result, Register input) {
//-----------------------------------------------------------------------------
// Compare 32-bit Register vs Register
-void TurboAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); }
+void MacroAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); }
// Compare Pointer Sized Register vs Register
-void TurboAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
+void MacroAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
-void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpS32(Register dst, const Operand& opnd) {
if (opnd.rmode() == RelocInfo::NO_INFO) {
intptr_t value = opnd.immediate();
if (is_int16(value))
@@ -3672,7 +3645,7 @@ void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
// Compare Pointer Sized Register vs Immediate
// This helper will set up proper relocation entries if required.
-void TurboAssembler::CmpS64(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpS64(Register dst, const Operand& opnd) {
if (opnd.rmode() == RelocInfo::NO_INFO) {
cgfi(dst, opnd);
} else {
@@ -3682,7 +3655,7 @@ void TurboAssembler::CmpS64(Register dst, const Operand& opnd) {
}
// Compare 32-bit Register vs Memory
-void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpS32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
@@ -3692,14 +3665,14 @@ void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) {
}
// Compare Pointer Size Register vs Memory
-void TurboAssembler::CmpS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpS64(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
cg(dst, opnd);
}
// Using cs or scy based on the offset
-void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
+void MacroAssembler::CmpAndSwap(Register old_val, Register new_val,
const MemOperand& opnd) {
if (is_uint12(opnd.offset())) {
cs(old_val, new_val, opnd);
@@ -3708,7 +3681,7 @@ void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
}
}
-void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
+void MacroAssembler::CmpAndSwap64(Register old_val, Register new_val,
const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
csg(old_val, new_val, opnd);
@@ -3719,10 +3692,10 @@ void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
//-----------------------------------------------------------------------------
// Compare Logical 32-bit Register vs Register
-void TurboAssembler::CmpU32(Register dst, Register src) { clr(dst, src); }
+void MacroAssembler::CmpU32(Register dst, Register src) { clr(dst, src); }
// Compare Logical Pointer Sized Register vs Register
-void TurboAssembler::CmpU64(Register dst, Register src) {
+void MacroAssembler::CmpU64(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
clgr(dst, src);
#else
@@ -3731,12 +3704,12 @@ void TurboAssembler::CmpU64(Register dst, Register src) {
}
// Compare Logical 32-bit Register vs Immediate
-void TurboAssembler::CmpU32(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpU32(Register dst, const Operand& opnd) {
clfi(dst, opnd);
}
// Compare Logical Pointer Sized Register vs Immediate
-void TurboAssembler::CmpU64(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpU64(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
clgfi(dst, opnd);
@@ -3746,7 +3719,7 @@ void TurboAssembler::CmpU64(Register dst, const Operand& opnd) {
}
// Compare Logical 32-bit Register vs Memory
-void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpU32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
@@ -3756,7 +3729,7 @@ void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) {
}
// Compare Logical Pointer Sized Register vs Memory
-void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpU64(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
@@ -3766,7 +3739,7 @@ void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) {
#endif
}
-void TurboAssembler::Branch(Condition c, const Operand& opnd) {
+void MacroAssembler::Branch(Condition c, const Operand& opnd) {
intptr_t value = opnd.immediate();
if (is_int16(value))
brc(c, opnd);
@@ -3775,7 +3748,7 @@ void TurboAssembler::Branch(Condition c, const Operand& opnd) {
}
// Branch On Count. Decrement R1, and branch if R1 != 0.
-void TurboAssembler::BranchOnCount(Register r1, Label* l) {
+void MacroAssembler::BranchOnCount(Register r1, Label* l) {
int32_t offset = branch_offset(l);
if (is_int16(offset)) {
#if V8_TARGET_ARCH_S390X
@@ -3789,7 +3762,7 @@ void TurboAssembler::BranchOnCount(Register r1, Label* l) {
}
}
-void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) {
intptr_t value = static_cast<intptr_t>(smi.ptr());
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
llilf(dst, Operand(value));
@@ -3800,7 +3773,7 @@ void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
#endif
}
-void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// CFI takes 32-bit immediate.
cfi(src1, Operand(smi));
@@ -3814,7 +3787,7 @@ void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
#endif
}
-void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadU64(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -3829,7 +3802,7 @@ void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
-void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU64(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -3842,7 +3815,7 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
}
// Store a "pointer" sized constant to the memory location
-void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
+void MacroAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO);
@@ -3857,7 +3830,7 @@ void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
}
}
-void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
+void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
@@ -3872,7 +3845,7 @@ void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
#endif
}
-void TurboAssembler::StoreMultipleP(Register src1, Register src2,
+void MacroAssembler::StoreMultipleP(Register src1, Register src2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
@@ -3887,7 +3860,7 @@ void TurboAssembler::StoreMultipleP(Register src1, Register src2,
#endif
}
-void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
+void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
lm(dst1, dst2, mem);
@@ -3897,7 +3870,7 @@ void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
}
}
-void TurboAssembler::StoreMultipleW(Register src1, Register src2,
+void MacroAssembler::StoreMultipleW(Register src1, Register src2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
stm(src1, src2, mem);
@@ -3908,7 +3881,7 @@ void TurboAssembler::StoreMultipleW(Register src1, Register src2,
}
// Load 32-bits and sign extend if necessary.
-void TurboAssembler::LoadS32(Register dst, Register src) {
+void MacroAssembler::LoadS32(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgfr(dst, src);
#else
@@ -3917,8 +3890,8 @@ void TurboAssembler::LoadS32(Register dst, Register src) {
}
// Load 32-bits and sign extend if necessary.
-void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
- Register scratch) {
+void MacroAssembler::LoadS32(Register dst, const MemOperand& mem,
+ Register scratch) {
int offset = mem.offset();
if (!is_int20(offset)) {
@@ -3943,7 +3916,7 @@ void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
}
// Load 32-bits and zero extend if necessary.
-void TurboAssembler::LoadU32(Register dst, Register src) {
+void MacroAssembler::LoadU32(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgfr(dst, src);
#else
@@ -3953,8 +3926,8 @@ void TurboAssembler::LoadU32(Register dst, Register src) {
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
-void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
- Register scratch) {
+void MacroAssembler::LoadU32(Register dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -3994,7 +3967,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadU16(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
@@ -4003,7 +3976,7 @@ void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
#endif
}
-void TurboAssembler::LoadU16(Register dst, Register src) {
+void MacroAssembler::LoadU16(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llghr(dst, src);
#else
@@ -4011,7 +3984,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadS8(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
@@ -4020,7 +3993,7 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
#endif
}
-void TurboAssembler::LoadS8(Register dst, Register src) {
+void MacroAssembler::LoadS8(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgbr(dst, src);
#else
@@ -4028,7 +4001,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadU8(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
@@ -4037,7 +4010,7 @@ void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
#endif
}
-void TurboAssembler::LoadU8(Register dst, Register src) {
+void MacroAssembler::LoadU8(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgcr(dst, src);
#else
@@ -4046,34 +4019,34 @@ void TurboAssembler::LoadU8(Register dst, Register src) {
}
#ifdef V8_TARGET_BIG_ENDIAN
-void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
lrvg(dst, mem);
}
-void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch) {
lrv(dst, opnd);
LoadS32(dst, dst);
}
-void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch) {
lrv(dst, opnd);
LoadU32(dst, dst);
}
-void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
lrvh(dst, opnd);
LoadU16(dst, dst);
}
-void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
lrvh(dst, opnd);
LoadS16(dst, dst);
}
-void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch0, Register scratch1) {
bool use_vlbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
is_uint12(opnd.offset());
@@ -4087,20 +4060,20 @@ void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
lrvg(scratch, opnd);
ldgr(dst, scratch);
}
-void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
lrv(scratch, opnd);
ShiftLeftU64(scratch, scratch, Operand(32));
ldgr(dst, scratch);
}
-void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -4112,7 +4085,7 @@ void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -4124,7 +4097,7 @@ void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -4136,14 +4109,14 @@ void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
DCHECK(is_uint12(opnd.offset()));
lgdr(scratch, src);
strvg(scratch, opnd);
}
-void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
DCHECK(is_uint12(opnd.offset()));
lgdr(scratch, src);
@@ -4151,7 +4124,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
strv(scratch, opnd);
}
-void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
Register scratch1, Register scratch2) {
bool use_vstbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
is_uint12(mem.offset());
@@ -4167,73 +4140,73 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
}
#else
-void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
LoadU64(dst, mem, scratch);
}
-void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch) {
LoadS32(dst, opnd, scratch);
}
-void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch) {
LoadU32(dst, opnd, scratch);
}
-void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
LoadU16(dst, opnd);
}
-void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
LoadS16(dst, opnd);
}
-void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch0, Register scratch1) {
USE(scratch1);
LoadV128(dst, opnd, scratch0);
}
-void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
USE(scratch);
LoadF64(dst, opnd);
}
-void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
USE(scratch);
LoadF32(dst, opnd);
}
-void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem,
Register scratch) {
StoreU64(src, mem, scratch);
}
-void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem,
Register scratch) {
StoreU32(src, mem, scratch);
}
-void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem,
Register scratch) {
StoreU16(src, mem, scratch);
}
-void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
StoreF64(src, opnd);
}
-void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
StoreF32(src, opnd);
}
-void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
Register scratch1, Register scratch2) {
StoreV128(src, mem, scratch1);
}
@@ -4241,12 +4214,12 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
#endif
// Load And Test (Reg <- Reg)
-void TurboAssembler::LoadAndTest32(Register dst, Register src) {
+void MacroAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
}
// Load And Test Pointer Sized (Reg <- Reg)
-void TurboAssembler::LoadAndTestP(Register dst, Register src) {
+void MacroAssembler::LoadAndTestP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
ltgr(dst, src);
#else
@@ -4255,12 +4228,12 @@ void TurboAssembler::LoadAndTestP(Register dst, Register src) {
}
// Load And Test 32-bit (Reg <- Mem)
-void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
lt_z(dst, mem);
}
// Load And Test Pointer Sized (Reg <- Mem)
-void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
ltg(dst, mem);
#else
@@ -4269,7 +4242,7 @@ void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
}
// Load On Condition Pointer Sized (Reg <- Reg)
-void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
+void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
Register src) {
#if V8_TARGET_ARCH_S390X
locgr(cond, dst, src);
@@ -4279,7 +4252,7 @@ void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
}
// Load Double Precision (64-bit) Floating Point number from memory
-void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) {
+void MacroAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) {
// for 32bit and 64bit we all use 64bit floating point regs
if (is_uint12(mem.offset())) {
ld(dst, mem);
@@ -4289,7 +4262,7 @@ void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) {
}
// Load Single Precision (32-bit) Floating Point number from memory
-void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
+void MacroAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
le_z(dst, mem);
} else {
@@ -4298,7 +4271,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
}
}
-void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
Register scratch) {
DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
@@ -4311,7 +4284,7 @@ void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
}
// Store Double Precision (64-bit) Floating Point number to memory
-void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
+void MacroAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
std(dst, mem);
} else {
@@ -4320,7 +4293,7 @@ void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
}
// Store Single Precision (32-bit) Floating Point number to memory
-void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
+void MacroAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
ste(src, mem);
} else {
@@ -4328,7 +4301,7 @@ void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
}
}
-void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
Register scratch) {
DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
@@ -4340,7 +4313,7 @@ void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
}
}
-void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
aebr(dst, rhs);
@@ -4352,7 +4325,7 @@ void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
sebr(dst, rhs);
@@ -4365,7 +4338,7 @@ void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
meebr(dst, rhs);
@@ -4377,7 +4350,7 @@ void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
debr(dst, rhs);
@@ -4393,7 +4366,7 @@ void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
adbr(dst, rhs);
@@ -4405,7 +4378,7 @@ void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
sdbr(dst, rhs);
@@ -4418,7 +4391,7 @@ void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
mdbr(dst, rhs);
@@ -4430,7 +4403,7 @@ void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
ddbr(dst, rhs);
@@ -4446,7 +4419,7 @@ void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
aeb(dst, opnd);
@@ -4456,7 +4429,7 @@ void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
adb(dst, opnd);
@@ -4466,7 +4439,7 @@ void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
seb(dst, opnd);
@@ -4476,7 +4449,7 @@ void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
sdb(dst, opnd);
@@ -4486,7 +4459,7 @@ void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
meeb(dst, opnd);
@@ -4496,7 +4469,7 @@ void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
mdb(dst, opnd);
@@ -4506,7 +4479,7 @@ void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
deb(dst, opnd);
@@ -4516,7 +4489,7 @@ void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ddb(dst, opnd);
@@ -4526,7 +4499,7 @@ void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ldeb(dst, opnd);
@@ -4538,7 +4511,7 @@ void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
-void TurboAssembler::StoreU32(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU32(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4569,7 +4542,7 @@ void TurboAssembler::StoreU32(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::LoadS16(Register dst, Register src) {
+void MacroAssembler::LoadS16(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lghr(dst, src);
#else
@@ -4579,8 +4552,8 @@ void TurboAssembler::LoadS16(Register dst, Register src) {
// Loads 16-bits half-word value from memory and sign extends to pointer
// sized register
-void TurboAssembler::LoadS16(Register dst, const MemOperand& mem,
- Register scratch) {
+void MacroAssembler::LoadS16(Register dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4607,7 +4580,7 @@ void TurboAssembler::LoadS16(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void TurboAssembler::StoreU16(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU16(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4625,7 +4598,7 @@ void TurboAssembler::StoreU16(Register src, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void TurboAssembler::StoreU8(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU8(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4642,13 +4615,13 @@ void TurboAssembler::StoreU8(Register src, const MemOperand& mem,
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU32(Register dst, Register src,
+void MacroAssembler::ShiftLeftU32(Register dst, Register src,
const Operand& val) {
ShiftLeftU32(dst, src, r0, val);
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val,
+void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
sll(dst, val, val2);
@@ -4662,25 +4635,25 @@ void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val,
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU64(Register dst, Register src,
+void MacroAssembler::ShiftLeftU64(Register dst, Register src,
const Operand& val) {
ShiftLeftU64(dst, src, r0, val);
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register val,
+void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register val,
const Operand& val2) {
sllg(dst, src, val, val2);
}
// Shift right logical for 32-bit integer types.
-void TurboAssembler::ShiftRightU32(Register dst, Register src,
+void MacroAssembler::ShiftRightU32(Register dst, Register src,
const Operand& val) {
ShiftRightU32(dst, src, r0, val);
}
// Shift right logical for 32-bit integer types.
-void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightU32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
srl(dst, val, val2);
@@ -4693,25 +4666,25 @@ void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val,
}
}
-void TurboAssembler::ShiftRightU64(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightU64(Register dst, Register src, Register val,
const Operand& val2) {
srlg(dst, src, val, val2);
}
// Shift right logical for 64-bit integer types.
-void TurboAssembler::ShiftRightU64(Register dst, Register src,
+void MacroAssembler::ShiftRightU64(Register dst, Register src,
const Operand& val) {
ShiftRightU64(dst, src, r0, val);
}
// Shift right arithmetic for 32-bit integer types.
-void TurboAssembler::ShiftRightS32(Register dst, Register src,
+void MacroAssembler::ShiftRightS32(Register dst, Register src,
const Operand& val) {
ShiftRightS32(dst, src, r0, val);
}
// Shift right arithmetic for 32-bit integer types.
-void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightS32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
sra(dst, val, val2);
@@ -4725,19 +4698,19 @@ void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val,
}
// Shift right arithmetic for 64-bit integer types.
-void TurboAssembler::ShiftRightS64(Register dst, Register src,
+void MacroAssembler::ShiftRightS64(Register dst, Register src,
const Operand& val) {
ShiftRightS64(dst, src, r0, val);
}
// Shift right arithmetic for 64-bit integer types.
-void TurboAssembler::ShiftRightS64(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightS64(Register dst, Register src, Register val,
const Operand& val2) {
srag(dst, src, val, val2);
}
// Clear right most # of bits
-void TurboAssembler::ClearRightImm(Register dst, Register src,
+void MacroAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
@@ -4764,7 +4737,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
}
}
-void TurboAssembler::Popcnt32(Register dst, Register src) {
+void MacroAssembler::Popcnt32(Register dst, Register src) {
DCHECK(src != r0);
DCHECK(dst != r0);
@@ -4777,7 +4750,7 @@ void TurboAssembler::Popcnt32(Register dst, Register src) {
}
#ifdef V8_TARGET_ARCH_S390X
-void TurboAssembler::Popcnt64(Register dst, Register src) {
+void MacroAssembler::Popcnt64(Register dst, Register src) {
DCHECK(src != r0);
DCHECK(dst != r0);
@@ -4792,7 +4765,7 @@ void TurboAssembler::Popcnt64(Register dst, Register src) {
}
#endif
-void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
+void MacroAssembler::SwapP(Register src, Register dst, Register scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
mov(scratch, src);
@@ -4800,7 +4773,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
mov(dst, scratch);
}
-void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
+void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
@@ -4809,7 +4782,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
StoreU64(scratch, dst);
}
-void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
Register scratch_1) {
if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
@@ -4822,7 +4795,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
StoreU64(scratch_1, src);
}
-void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
+void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
@@ -4831,7 +4804,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
ldr(dst, scratch);
}
-void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
+void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
ldr(scratch, src);
@@ -4839,7 +4812,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
StoreF32(scratch, dst);
}
-void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst,
DoubleRegister scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kDoubleSize));
@@ -4853,7 +4826,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kDoubleSize));
}
-void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
+void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
@@ -4862,7 +4835,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
ldr(dst, scratch);
}
-void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
+void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
ldr(scratch, src);
@@ -4870,7 +4843,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
StoreF64(scratch, dst);
}
-void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst,
DoubleRegister scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kDoubleSize));
@@ -4884,7 +4857,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kDoubleSize));
}
-void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
+void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
Simd128Register scratch) {
if (src == dst) return;
vlr(scratch, src, Condition(0), Condition(0), Condition(0));
@@ -4892,7 +4865,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
vlr(dst, scratch, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
+void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
Simd128Register scratch) {
DCHECK(!AreAliased(src, scratch));
vlr(scratch, src, Condition(0), Condition(0), Condition(0));
@@ -4900,7 +4873,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
StoreV128(scratch, dst, ip);
}
-void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst,
Simd128Register scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kSimd128Size));
@@ -4914,27 +4887,27 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
-void TurboAssembler::LoadPC(Register dst) {
+void MacroAssembler::LoadPC(Register dst) {
Label current_pc;
larl(dst, &current_pc);
bind(&current_pc);
}
-void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS32(x, Operand(y));
beq(dest);
}
-void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
CmpS32(x, Operand(y));
blt(dest);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
static_assert(kSystemPointerSize == 8);
static_assert(kSmiTagSize == 1);
static_assert(kSmiTag == 0);
@@ -4951,91 +4924,50 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
ASM_CODE_COMMENT(this);
LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- Register scratch = r1;
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- tmlh(scratch, Operand(Code::IsOffHeapTrampoline::kMask >> 16));
- bne(&if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- AddS64(destination, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- b(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
- AddS64(destination, destination, kRootRegister);
- LoadU64(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- AddS64(destination, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
+ ASM_CODE_COMMENT(this);
+ LoadU64(destination,
+ FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
+ LoadCodeEntry(code_object, code_object);
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
+ LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
Label return_label;
larl(r14, &return_label); // Generate the return addr of call later.
@@ -5047,7 +4979,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -5059,10 +4991,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::CountLeadingZerosU32(Register dst, Register src,
+void MacroAssembler::CountLeadingZerosU32(Register dst, Register src,
Register scratch_pair) {
llgfr(dst, src);
flogr(scratch_pair,
@@ -5070,14 +5002,14 @@ void TurboAssembler::CountLeadingZerosU32(Register dst, Register src,
AddS32(dst, scratch_pair, Operand(-32));
}
-void TurboAssembler::CountLeadingZerosU64(Register dst, Register src,
+void MacroAssembler::CountLeadingZerosU64(Register dst, Register src,
Register scratch_pair) {
flogr(scratch_pair,
src); // will modify a register pair scratch and scratch + 1
mov(dst, scratch_pair);
}
-void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
+void MacroAssembler::CountTrailingZerosU32(Register dst, Register src,
Register scratch_pair) {
Register scratch0 = scratch_pair;
Register scratch1 = Register::from_code(scratch_pair.code() + 1);
@@ -5098,7 +5030,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
bind(&done);
}
-void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
+void MacroAssembler::CountTrailingZerosU64(Register dst, Register src,
Register scratch_pair) {
Register scratch0 = scratch_pair;
Register scratch1 = Register::from_code(scratch_pair.code() + 1);
@@ -5118,7 +5050,7 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
bind(&done);
}
-void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
+void MacroAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value,
Register new_value, int start,
int end, int shift_amount,
@@ -5136,7 +5068,7 @@ void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
Operand(64 - shift_amount), true);
}
-void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output,
+void MacroAssembler::AtomicCmpExchangeU8(Register addr, Register output,
Register old_value, Register new_value,
Register temp0, Register temp1) {
#ifdef V8_TARGET_BIG_ENDIAN
@@ -5185,7 +5117,7 @@ void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output,
bind(&done);
}
-void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output,
+void MacroAssembler::AtomicCmpExchangeU16(Register addr, Register output,
Register old_value,
Register new_value, Register temp0,
Register temp1) {
@@ -5223,7 +5155,7 @@ void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output,
bind(&done);
}
-void TurboAssembler::AtomicExchangeHelper(Register addr, Register value,
+void MacroAssembler::AtomicExchangeHelper(Register addr, Register value,
Register output, int start, int end,
int shift_amount, int offset,
Register scratch) {
@@ -5238,7 +5170,7 @@ void TurboAssembler::AtomicExchangeHelper(Register addr, Register value,
srl(output, Operand(shift_amount));
}
-void TurboAssembler::AtomicExchangeU8(Register addr, Register value,
+void MacroAssembler::AtomicExchangeU8(Register addr, Register value,
Register output, Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_EXCHANGE_BYTE(i) \
@@ -5290,7 +5222,7 @@ void TurboAssembler::AtomicExchangeU8(Register addr, Register value,
bind(&done);
}
-void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
+void MacroAssembler::AtomicExchangeU16(Register addr, Register value,
Register output, Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_EXCHANGE_HALFWORD(i) \
@@ -5331,77 +5263,77 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
}
// Simd Support.
-void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(3));
}
-void TurboAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(2));
}
-void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(3));
vrep(dst, dst, Operand(0), Condition(3));
}
-void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(2));
vrep(dst, dst, Operand(0), Condition(2));
}
-void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(1));
vrep(dst, dst, Operand(0), Condition(1));
}
-void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(0));
vrep(dst, dst, Operand(0), Condition(0));
}
-void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
+void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
+void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
+void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
+void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
+void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
}
-void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
+void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register scratch) {
vlgv(scratch, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
lghr(dst, scratch);
}
-void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
+void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
-void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
+void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register scratch) {
vlgv(scratch, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
lgbr(dst, scratch);
}
-void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
DoubleRegister src2, uint8_t imm_lane_idx,
Register scratch) {
vlgv(scratch, src2, MemOperand(r0, 0), Condition(3));
@@ -5411,7 +5343,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, scratch, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
DoubleRegister src2, uint8_t imm_lane_idx,
Register scratch) {
vlgv(scratch, src2, MemOperand(r0, 0), Condition(2));
@@ -5421,7 +5353,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, scratch, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5430,7 +5362,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5439,7 +5371,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5448,7 +5380,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
}
-void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5457,19 +5389,19 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
-void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) {
vno(dst, src, src, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::S128Zero(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128Zero(Simd128Register dst, Simd128Register src) {
vx(dst, src, src, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) {
vceq(dst, src, src, Condition(0), Condition(3));
}
-void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register mask) {
vsel(dst, src1, src2, mask, Condition(0), Condition(0));
}
@@ -5512,7 +5444,7 @@ void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1,
V(I8x16Popcnt, vpopct, 0, 0, 0)
#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \
}
SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
@@ -5533,7 +5465,7 @@ SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
V(I8x16GtU, vchl, 0, 0)
#define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2, Condition(c1), Condition(c2)); \
}
@@ -5590,7 +5522,7 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
V(S128AndNot, vnc, 0, 0, 0)
#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \
}
@@ -5613,13 +5545,13 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
V(I8x16ShrU, vesrlv, 0)
#define EMIT_SIMD_SHIFT(name, op, c1) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Register src2, Simd128Register scratch) { \
vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \
vrep(scratch, scratch, Operand(0), Condition(c1)); \
op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \
} \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
const Operand& src2, Register scratch1, \
Simd128Register scratch2) { \
mov(scratch1, src2); \
@@ -5644,7 +5576,7 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0)
#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2, Simd128Register scratch) { \
mul_even(scratch, src1, src2, Condition(0), Condition(0), \
Condition(mode)); \
@@ -5662,7 +5594,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
V(I8x16AllTrue, 0)
#define EMIT_SIMD_ALL_TRUE(name, mode) \
- void TurboAssembler::name(Register dst, Simd128Register src, \
+ void MacroAssembler::name(Register dst, Simd128Register src, \
Register scratch1, Simd128Register scratch2) { \
mov(scratch1, Operand(1)); \
xgr(dst, dst); \
@@ -5683,15 +5615,15 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
V(F32x4Qfms, vfnms, 2)
#define EMIT_SIMD_QFM(name, op, c1) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2, Simd128Register src3) { \
- op(dst, src2, src3, src1, Condition(c1), Condition(0)); \
+ op(dst, src1, src2, src3, Condition(c1), Condition(0)); \
}
SIMD_QFM_LIST(EMIT_SIMD_QFM)
#undef EMIT_SIMD_QFM
#undef SIMD_QFM_LIST
-void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Register scratch1,
Register scratch2, Register scratch3) {
Register scratch_1 = scratch1;
@@ -5706,112 +5638,112 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
vlvgp(dst, scratch1, scratch2);
}
-void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vceq(scratch, src1, src2, Condition(0), Condition(2));
vchl(dst, src1, src2, Condition(0), Condition(2));
vo(dst, dst, scratch, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(1));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(1));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vceq(scratch, src1, src2, Condition(0), Condition(1));
vchl(dst, src1, src2, Condition(0), Condition(1));
vo(dst, dst, scratch, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(0));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(0));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vceq(scratch, src1, src2, Condition(0), Condition(0));
vchl(dst, src1, src2, Condition(0), Condition(0));
vo(dst, dst, scratch, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I64x2BitMask(Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2) {
mov(scratch1, Operand(0x8080808080800040));
vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
@@ -5819,7 +5751,7 @@ void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
}
-void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I32x4BitMask(Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2) {
mov(scratch1, Operand(0x8080808000204060));
vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
@@ -5827,7 +5759,7 @@ void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
}
-void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I16x8BitMask(Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2) {
mov(scratch1, Operand(0x10203040506070));
vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
@@ -5835,19 +5767,19 @@ void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
}
-void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
+void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
Simd128Register src) {
vupl(dst, src, Condition(0), Condition(0), Condition(2));
vcdg(dst, dst, Condition(4), Condition(0), Condition(3));
}
-void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
+void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
Simd128Register src) {
vupll(dst, src, Condition(0), Condition(0), Condition(2));
vcdlg(dst, dst, Condition(4), Condition(0), Condition(3));
}
-void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src,
Register scratch1, Register scratch2,
Simd128Register scratch3) {
mov(scratch1, Operand(0x4048505860687078));
@@ -5857,7 +5789,7 @@ void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch3, MemOperand(r0, 3), Condition(1));
}
-void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
+void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src,
Register scratch) {
mov(dst, Operand(1));
xgr(scratch, scratch);
@@ -5872,7 +5804,7 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
convert(scratch2, scratch1, kRoundToZero); \
vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
}
-void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst,
+void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5886,7 +5818,7 @@ void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst,
}
}
-void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst,
+void MacroAssembler::I32x4UConvertF32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5908,7 +5840,7 @@ void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst,
MovFloatToInt(scratch2, scratch1); \
vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
}
-void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst,
+void MacroAssembler::F32x4SConvertI32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5918,7 +5850,7 @@ void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst,
CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, dst, src, scratch1, scratch2)
}
}
-void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst,
+void MacroAssembler::F32x4UConvertI32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5931,13 +5863,13 @@ void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst,
}
#undef CONVERT_INT32_TO_FLOAT
-void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst,
+void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpks(dst, src2, src1, Condition(0), Condition(2));
}
-void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
+void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpks(dst, src2, src1, Condition(0), Condition(1));
@@ -5949,7 +5881,7 @@ void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \
Condition(mode)); \
vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode));
-void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
+void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -5958,7 +5890,7 @@ void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
vpkls(dst, dst, scratch, Condition(0), Condition(2));
}
-void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
+void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -5980,7 +5912,7 @@ void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \
Condition(mode + 1));
-void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -5988,7 +5920,7 @@ void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -5996,7 +5928,7 @@ void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6004,7 +5936,7 @@ void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
vpkls(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6018,7 +5950,7 @@ void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
vpkls(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6026,7 +5958,7 @@ void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(1));
}
-void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6034,7 +5966,7 @@ void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(1));
}
-void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6042,7 +5974,7 @@ void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
vpkls(dst, dst, scratch1, Condition(0), Condition(1));
}
-void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6057,7 +5989,7 @@ void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
}
#undef BINOP_EXTRACT
-void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
+void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2, Register scratch3,
@@ -6073,7 +6005,7 @@ void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
vlvgp(dst, scratch3, scratch4);
}
-void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
+void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2, Register scratch3,
@@ -6101,14 +6033,14 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
Condition(lane_size)); \
va(dst, scratch1, scratch2, Condition(0), Condition(0), \
Condition(lane_size + 1));
-void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst,
+void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Simd128Register scratch2) {
EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo)
}
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
+void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
Simd128Register src,
Simd128Register scratch,
Simd128Register scratch2) {
@@ -6116,14 +6048,14 @@ void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
vsum(dst, src, scratch, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst,
+void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Simd128Register scratch2) {
EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo)
}
-void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
+void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6131,7 +6063,7 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
}
#undef EXT_ADD_PAIRWISE
-void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
+void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
Simd128Register src,
Simd128Register scratch) {
// NaN to 0.
@@ -6143,7 +6075,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
vpks(dst, dst, scratch, Condition(0), Condition(3));
}
-void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
+void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
Simd128Register src,
Simd128Register scratch) {
vclgd(scratch, src, Condition(5), Condition(0), Condition(3));
@@ -6151,14 +6083,14 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
vpkls(dst, dst, scratch, Condition(0), Condition(3));
}
-void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
+void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
Register scratch1, Register scratch2) {
mov(scratch1, Operand(low));
mov(scratch2, Operand(high));
vlvgp(dst, scratch2, scratch1);
}
-void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Register scratch1,
Register scratch2, Simd128Register scratch3,
Simd128Register scratch4) {
@@ -6178,7 +6110,7 @@ void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
vperm(dst, dst, scratch3, scratch4, Condition(0), Condition(0));
}
-void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
Simd128Register src2, uint64_t high,
uint64_t low, Register scratch1,
Register scratch2, Simd128Register scratch3) {
@@ -6188,7 +6120,7 @@ void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
vperm(dst, src1, src2, scratch3, Condition(0), Condition(0));
}
-void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
vme(scratch, src1, src2, Condition(0), Condition(0), Condition(1));
@@ -6196,6 +6128,30 @@ void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
va(dst, scratch, dst, Condition(0), Condition(0), Condition(2));
}
+void MacroAssembler::I32x4DotI8x16AddS(
+ Simd128Register dst, Simd128Register src1, Simd128Register src2,
+ Simd128Register src3, Simd128Register scratch1, Simd128Register scratch2) {
+ // I8 -> I16.
+ vme(scratch1, src1, src2, Condition(0), Condition(0), Condition(0));
+ vmo(dst, src1, src2, Condition(0), Condition(0), Condition(0));
+ va(dst, scratch1, dst, Condition(0), Condition(0), Condition(1));
+ // I16 -> I32.
+ vrepi(scratch2, Operand(1), Condition(1));
+ vme(scratch1, dst, scratch2, Condition(0), Condition(0), Condition(1));
+ vmo(dst, dst, scratch2, Condition(0), Condition(0), Condition(1));
+ va(dst, scratch1, dst, Condition(0), Condition(0), Condition(2));
+ // Add src3.
+ va(dst, dst, src3, Condition(0), Condition(0), Condition(2));
+}
+
+void MacroAssembler::I16x8DotI8x16S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ vme(scratch, src1, src2, Condition(0), Condition(0), Condition(0));
+ vmo(dst, src1, src2, Condition(0), Condition(0), Condition(0));
+ va(dst, scratch, dst, Condition(0), Condition(0), Condition(1));
+}
+
#define Q15_MUL_ROAUND(accumulator, src1, src2, const_val, scratch, unpack) \
unpack(scratch, src1, Condition(0), Condition(0), Condition(1)); \
unpack(accumulator, src2, Condition(0), Condition(0), Condition(1)); \
@@ -6206,7 +6162,7 @@ void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
vrepi(scratch, Operand(15), Condition(2)); \
vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \
Condition(2));
-void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2,
@@ -6236,7 +6192,7 @@ void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
V(8x16, vlrep, LoadU8, 0)
#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadAndSplat##name##LE( \
+ void MacroAssembler::LoadAndSplat##name##LE( \
Simd128Register dst, const MemOperand& mem, Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
vector_instr(dst, mem, Condition(condition)); \
@@ -6259,7 +6215,7 @@ LOAD_SPLAT_LIST(LOAD_SPLAT)
V(8x8S, vuph, 0)
#define LOAD_EXTEND(name, unpack_instr, condition) \
- void TurboAssembler::LoadAndExtend##name##LE( \
+ void MacroAssembler::LoadAndExtend##name##LE( \
Simd128Register dst, const MemOperand& mem, Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
vlebrg(dst, mem, Condition(0)); \
@@ -6273,7 +6229,7 @@ LOAD_EXTEND_LIST(LOAD_EXTEND)
#undef LOAD_EXTEND
#undef LOAD_EXTEND
-void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
Register scratch) {
vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
@@ -6284,7 +6240,7 @@ void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
vlvg(dst, scratch, MemOperand(r0, 3), Condition(2));
}
-void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
Register scratch) {
vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
@@ -6302,7 +6258,7 @@ void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
V(8, vleb, LoadU8, 0)
#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ void MacroAssembler::LoadLane##name##LE(Simd128Register dst, \
const MemOperand& mem, int lane, \
Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
@@ -6323,7 +6279,7 @@ LOAD_LANE_LIST(LOAD_LANE)
V(8, vsteb, StoreU8, 0)
#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ void MacroAssembler::StoreLane##name##LE(Simd128Register src, \
const MemOperand& mem, int lane, \
Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
@@ -6347,10 +6303,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
LoadU64(destination, MemOperand(kRootRegister, offset));
}
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 6a91f1c096..06edec6516 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -41,9 +41,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al);
@@ -136,7 +136,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltin(Builtin builtin, Register destination);
MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
+
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
@@ -220,6 +222,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LoadRoot(destination, index, al);
}
void LoadRoot(Register destination, RootIndex index, Condition cond);
+ void LoadTaggedRoot(Register destination, RootIndex index);
//--------------------------------------------------------------------------
// S390 Macro Assemblers for Instructions
//--------------------------------------------------------------------------
@@ -914,12 +917,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
@@ -940,11 +954,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, AbortReason reason,
- CRegister cr = cr7) NOOP_UNLESS_DEBUG_CODE
+ CRegister cr = cr7) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but without condition.
// Use --debug-code to enable.
- void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
@@ -1079,8 +1093,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -1209,6 +1223,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Simd128Register scratch3);
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch);
+ void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register scratch);
+ void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2, Simd128Register src3,
+ Simd128Register scratch1, Simd128Register scratch2);
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch1,
Simd128Register scratch2, Simd128Register scratch3);
@@ -1455,17 +1474,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
- // Loads a field containing a HeapObject and decompresses it if pointer
- // compression is enabled.
- void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch = no_reg);
- void LoadTaggedSignedField(Register destination, MemOperand field_operand);
-
// Loads a field containing any tagged value and decompresses it if necessary.
- void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch = no_reg);
+ void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch = no_reg);
+ void LoadTaggedSignedField(Register destination, MemOperand field_operand);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src);
@@ -1477,10 +1490,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
- void DecompressTaggedPointer(Register destination, MemOperand field_operand);
- void DecompressTaggedPointer(Register destination, Register source);
- void DecompressAnyTagged(Register destination, MemOperand field_operand);
- void DecompressAnyTagged(Register destination, Register source);
+ void DecompressTagged(Register destination, MemOperand field_operand);
+ void DecompressTagged(Register destination, Register source);
+ void DecompressTagged(const Register& destination, Tagged_t immediate);
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
void CountLeadingZerosU32(Register dst, Register src,
@@ -1492,22 +1504,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CountTrailingZerosU64(Register dst, Register src,
Register scratch_pair = r0);
- private:
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
void LoadStackLimit(Register destination, StackLimitKind kind);
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
@@ -1521,24 +1517,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
StoreU64(rec, MemOperand(sp, 0));
}
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
- void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
- }
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -1592,9 +1581,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
CompareRoot(with, index);
@@ -1686,15 +1672,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
- // At least one slot (for the return address) should be provided.
- void EnterExitFrame(bool save_doubles, int stack_space = 1,
- StackFrame::Type frame_type = StackFrame::EXIT);
+ void EnterExitFrame(int stack_space, StackFrame::Type frame_type);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool argument_count_is_length = false);
+ void LeaveExitFrame(Register argument_count, bool argument_count_is_length);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -1726,27 +1709,27 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
template <typename Field>
void DecodeField(Register dst, Register src) {
@@ -1760,7 +1743,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register slot_address);
@@ -1798,11 +1781,20 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
SmiCheck smi_check = SmiCheck::kInline);
- void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
+ void TestCodeIsMarkedForDeoptimization(Register code, Register scratch);
Operand ClearedValue() const;
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index b3e5a49f2d..fc66830f0e 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -213,7 +213,6 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = r5;
constexpr Register kJavaScriptCallExtraArg1Register = r4;
-constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r3;
constexpr Register kRuntimeCallArgCountRegister = r2;
constexpr Register kRuntimeCallArgvRegister = r4;
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 454eebc25c..b1ae0d8a7e 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -24,12 +24,9 @@ SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
: SafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {}
-#ifdef V8_EXTERNAL_CODE_SPACE
-SafepointTable::SafepointTable(Isolate* isolate, Address pc,
- CodeDataContainer code)
+SafepointTable::SafepointTable(Isolate* isolate, Address pc, GcSafeCode code)
: SafepointTable(code.InstructionStart(isolate, pc),
code.SafepointTableAddress()) {}
-#endif // V8_EXTERNAL_CODE_SPACE
#if V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(const wasm::WasmCode* code)
@@ -80,6 +77,13 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
UNREACHABLE();
}
+// static
+SafepointEntry SafepointTable::FindEntry(Isolate* isolate, GcSafeCode code,
+ Address pc) {
+ SafepointTable table(isolate, pc, code);
+ return table.FindEntry(pc);
+}
+
void SafepointTable::Print(std::ostream& os) const {
os << "Safepoints (entries = " << length_ << ", byte size = " << byte_size()
<< ")\n";
@@ -118,7 +122,7 @@ void SafepointTable::Print(std::ostream& os) const {
SafepointTableBuilder::Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler) {
- entries_.push_back(EntryBuilder(zone_, assembler->pc_offset_for_safepoint()));
+ entries_.emplace_back(zone_, assembler->pc_offset_for_safepoint());
return SafepointTableBuilder::Safepoint(&entries_.back(), this);
}
@@ -127,7 +131,7 @@ int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
int deopt_index) {
DCHECK_NE(SafepointEntry::kNoTrampolinePC, trampoline);
DCHECK_NE(SafepointEntry::kNoDeoptIndex, deopt_index);
- auto it = entries_.Find(start);
+ auto it = entries_.begin() + start;
DCHECK(std::any_of(it, entries_.end(),
[pc](auto& entry) { return entry.pc == pc; }));
int index = start;
@@ -171,7 +175,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
#endif
// Make sure the safepoint table is properly aligned. Pad with nops.
- assembler->Align(Code::kMetadataAlignment);
+ assembler->Align(InstructionStream::kMetadataAlignment);
assembler->RecordComment(";;; Safepoint table.");
set_safepoint_table_offset(assembler->pc_offset());
@@ -289,10 +293,9 @@ void SafepointTableBuilder::RemoveDuplicates() {
};
auto remaining_it = entries_.begin();
- size_t remaining = 0;
+ auto end = entries_.end();
- for (auto it = entries_.begin(), end = entries_.end(); it != end;
- ++remaining_it, ++remaining) {
+ for (auto it = entries_.begin(); it != end; ++remaining_it) {
if (remaining_it != it) *remaining_it = *it;
// Merge identical entries.
do {
@@ -300,7 +303,7 @@ void SafepointTableBuilder::RemoveDuplicates() {
} while (it != end && is_identical_except_for_pc(*it, *remaining_it));
}
- entries_.Rewind(remaining);
+ entries_.erase(remaining_it, end);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 963729dd79..3676ae1c11 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -10,12 +10,14 @@
#include "src/common/assert-scope.h"
#include "src/utils/allocation.h"
#include "src/utils/bit-vector.h"
-#include "src/zone/zone-chunk-list.h"
+#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
+class GcSafeCode;
+
namespace wasm {
class WasmCode;
} // namespace wasm
@@ -54,16 +56,14 @@ class SafepointEntry : public SafepointEntryBase {
base::Vector<uint8_t> tagged_slots_;
};
-// A wrapper class for accessing the safepoint table embedded into the Code
-// object.
+// A wrapper class for accessing the safepoint table embedded into the
+// InstructionStream object.
class SafepointTable {
public:
// The isolate and pc arguments are used for figuring out whether pc
// belongs to the embedded or un-embedded code blob.
+ explicit SafepointTable(Isolate* isolate, Address pc, InstructionStream code);
explicit SafepointTable(Isolate* isolate, Address pc, Code code);
-#ifdef V8_EXTERNAL_CODE_SPACE
- explicit SafepointTable(Isolate* isolate, Address pc, CodeDataContainer code);
-#endif
#if V8_ENABLE_WEBASSEMBLY
explicit SafepointTable(const wasm::WasmCode* code);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -115,10 +115,14 @@ class SafepointTable {
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
+ static SafepointEntry FindEntry(Isolate* isolate, GcSafeCode code,
+ Address pc);
void Print(std::ostream&) const;
private:
+ SafepointTable(Isolate* isolate, Address pc, GcSafeCode code);
+
// Layout information.
static constexpr int kLengthOffset = 0;
static constexpr int kEntryConfigurationOffset = kLengthOffset + kIntSize;
@@ -258,7 +262,7 @@ class SafepointTableBuilder : public SafepointTableBuilderBase {
#endif // DEBUG
int min_stack_index_ = std::numeric_limits<int>::max();
- ZoneChunkList<EntryBuilder> entries_;
+ ZoneDeque<EntryBuilder> entries_;
Zone* zone_;
};
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 826e6d0250..fb7fb8d582 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -27,7 +27,7 @@
namespace v8 {
namespace internal {
-void SharedTurboAssembler::Move(Register dst, uint32_t src) {
+void SharedMacroAssemblerBase::Move(Register dst, uint32_t src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
mov(dst, Immediate(src));
@@ -38,7 +38,7 @@ void SharedTurboAssembler::Move(Register dst, uint32_t src) {
#endif
}
-void SharedTurboAssembler::Move(Register dst, Register src) {
+void SharedMacroAssemblerBase::Move(Register dst, Register src) {
// Helper to paper over the different assembler function names.
if (dst != src) {
#if V8_TARGET_ARCH_IA32
@@ -51,7 +51,7 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
}
}
-void SharedTurboAssembler::Add(Register dst, Immediate src) {
+void SharedMacroAssemblerBase::Add(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
add(dst, src);
@@ -62,7 +62,7 @@ void SharedTurboAssembler::Add(Register dst, Immediate src) {
#endif
}
-void SharedTurboAssembler::And(Register dst, Immediate src) {
+void SharedMacroAssemblerBase::And(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
and_(dst, src);
@@ -77,8 +77,8 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif
}
-void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
- Operand src2) {
+void SharedMacroAssemblerBase::Movhps(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovhps(dst, src1, src2);
@@ -90,8 +90,8 @@ void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
- Operand src2) {
+void SharedMacroAssemblerBase::Movlps(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovlps(dst, src1, src2);
@@ -102,9 +102,34 @@ void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
movlps(dst, src2);
}
}
+void SharedMacroAssemblerBase::Blendvpd(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vblendvpd(dst, src1, src2, mask);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ DCHECK_EQ(mask, xmm0);
+ DCHECK_EQ(dst, src1);
+ blendvpd(dst, src2);
+ }
+}
+
+void SharedMacroAssemblerBase::Blendvps(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vblendvps(dst, src1, src2, mask);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ DCHECK_EQ(mask, xmm0);
+ DCHECK_EQ(dst, src1);
+ blendvps(dst, src2);
+ }
+}
-void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
+void SharedMacroAssemblerBase::Pblendvb(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpblendvb(dst, src1, src2, mask);
@@ -116,8 +141,8 @@ void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, uint8_t imm8) {
+void SharedMacroAssemblerBase::Shufps(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vshufps(dst, src1, src2, imm8);
@@ -129,8 +154,8 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
- uint8_t lane) {
+void SharedMacroAssemblerBase::F64x2ExtractLane(DoubleRegister dst,
+ XMMRegister src, uint8_t lane) {
ASM_CODE_COMMENT(this);
if (lane == 0) {
if (dst != src) {
@@ -148,8 +173,10 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
}
}
-void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
- DoubleRegister rep, uint8_t lane) {
+void SharedMacroAssemblerBase::F64x2ReplaceLane(XMMRegister dst,
+ XMMRegister src,
+ DoubleRegister rep,
+ uint8_t lane) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -172,8 +199,8 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
}
}
-void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
- XMMRegister rhs, XMMRegister scratch) {
+void SharedMacroAssemblerBase::F32x4Min(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The minps instruction doesn't propagate NaNs and +0's in its first
// operand. Perform minps in both orders, merge the results, and adjust.
@@ -201,8 +228,8 @@ void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
Andnps(dst, dst, scratch);
}
-void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
- XMMRegister rhs, XMMRegister scratch) {
+void SharedMacroAssemblerBase::F32x4Max(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The maxps instruction doesn't propagate NaNs and +0's in its first
// operand. Perform maxps in both orders, merge the results, and adjust.
@@ -233,8 +260,8 @@ void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
Andnps(dst, dst, scratch);
}
-void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
- XMMRegister rhs, XMMRegister scratch) {
+void SharedMacroAssemblerBase::F64x2Min(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -271,8 +298,8 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
}
}
-void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
- XMMRegister rhs, XMMRegister scratch) {
+void SharedMacroAssemblerBase::F64x2Max(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -311,7 +338,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
}
}
-void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
+void SharedMacroAssemblerBase::F32x4Splat(XMMRegister dst, DoubleRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@@ -329,8 +356,8 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
}
}
-void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
- uint8_t lane) {
+void SharedMacroAssemblerBase::F32x4ExtractLane(FloatRegister dst,
+ XMMRegister src, uint8_t lane) {
ASM_CODE_COMMENT(this);
DCHECK_LT(lane, 4);
// These instructions are shorter than insertps, but will leave junk in
@@ -351,8 +378,8 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
}
}
-void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
+void SharedMacroAssemblerBase::S128Store32Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movss(dst, src);
@@ -363,8 +390,8 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
}
template <typename Op>
-void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I8x16SplatPreAvx2(XMMRegister dst, Op src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
DCHECK(!CpuFeatures::IsSupported(AVX2));
CpuFeatureScope ssse3_scope(this, SSSE3);
@@ -373,8 +400,8 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
Pshufb(dst, scratch);
}
-void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Register src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@@ -385,8 +412,8 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
}
}
-void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) {
@@ -397,9 +424,9 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
}
}
-void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
- uint8_t src2, Register tmp1,
- XMMRegister tmp2) {
+void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2);
// Perform 16-bit shift, then mask away low bits.
@@ -419,9 +446,9 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Pand(dst, tmp2);
}
-void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
- Register src2, Register tmp1,
- XMMRegister tmp2, XMMRegister tmp3) {
+void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK(!AreAliased(src1, tmp2, tmp3));
@@ -446,8 +473,8 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Psllw(dst, dst, tmp3);
}
-void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
- uint8_t src2, XMMRegister tmp) {
+void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, XMMRegister tmp) {
ASM_CODE_COMMENT(this);
// Unpack bytes into words, do word (16-bit) shifts, and repack.
DCHECK_NE(dst, tmp);
@@ -460,9 +487,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Packsswb(dst, tmp);
}
-void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
- Register src2, Register tmp1,
- XMMRegister tmp2, XMMRegister tmp3) {
+void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2);
@@ -481,9 +508,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Packsswb(dst, tmp2);
}
-void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
- uint8_t src2, Register tmp1,
- XMMRegister tmp2) {
+void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2);
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
@@ -503,9 +530,9 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Pand(dst, tmp2);
}
-void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
- Register src2, Register tmp1,
- XMMRegister tmp2, XMMRegister tmp3) {
+void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2);
@@ -525,14 +552,14 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
}
template <typename Op>
-void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
+void SharedMacroAssemblerBase::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
DCHECK(!CpuFeatures::IsSupported(AVX2));
Movd(dst, src);
Pshuflw(dst, dst, uint8_t{0x0});
Punpcklqdq(dst, dst);
}
-void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
+void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Register src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@@ -543,7 +570,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
}
}
-void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
+void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Operand src) {
ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) {
@@ -554,18 +581,20 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
}
}
-void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch,
- bool is_signed) {
+void SharedMacroAssemblerBase::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch,
+ bool is_signed) {
ASM_CODE_COMMENT(this);
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
Pmullw(dst, scratch);
}
-void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I16x8ExtMulHighS(XMMRegister dst,
+ XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -587,9 +616,10 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
- XMMRegister src2,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I16x8ExtMulHighU(XMMRegister dst,
+ XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The logic here is slightly complicated to handle all the cases of register
// aliasing. This allows flexibility for callers in TurboFan and Liftoff.
@@ -637,8 +667,8 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
- XMMRegister src) {
+void SharedMacroAssemblerBase::I16x8SConvertI8x16High(XMMRegister dst,
+ XMMRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -660,9 +690,9 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
}
}
-void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
- XMMRegister src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I16x8UConvertI8x16High(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -686,9 +716,10 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
}
}
-void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I16x8Q15MulRSatS(XMMRegister dst,
+ XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// k = i16x8.splat(0x8000)
Pcmpeqd(scratch, scratch);
@@ -704,9 +735,9 @@ void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
Pxor(dst, scratch);
}
-void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
- XMMRegister src1,
- XMMRegister src2) {
+void SharedMacroAssemblerBase::I16x8DotI8x16I7x16S(XMMRegister dst,
+ XMMRegister src1,
+ XMMRegister src2) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -719,9 +750,33 @@ void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
}
}
-void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
- XMMRegister src,
- XMMRegister tmp) {
+void SharedMacroAssemblerBase::I32x4DotI8x16I7x16AddS(
+ XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3,
+ XMMRegister scratch, XMMRegister splat_reg) {
+ ASM_CODE_COMMENT(this);
+ // k = i16x8.splat(1)
+ Pcmpeqd(splat_reg, splat_reg);
+ Psrlw(splat_reg, splat_reg, byte{15});
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddubsw(scratch, src2, src1);
+ } else {
+ movdqa(scratch, src2);
+ pmaddubsw(scratch, src1);
+ }
+ Pmaddwd(scratch, splat_reg);
+ if (dst == src3) {
+ Paddd(dst, scratch);
+ } else {
+ Movdqa(dst, src3);
+ Paddd(dst, scratch);
+ }
+}
+
+void SharedMacroAssemblerBase::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister tmp) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -763,9 +818,10 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
// 1. Multiply low word into scratch.
// 2. Multiply high word (can be signed or unsigned) into dst.
// 3. Unpack and interleave scratch and dst into dst.
-void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch,
- bool low, bool is_signed) {
+void SharedMacroAssemblerBase::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch, bool low,
+ bool is_signed) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -781,8 +837,8 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
- XMMRegister src) {
+void SharedMacroAssemblerBase::I32x4SConvertI16x8High(XMMRegister dst,
+ XMMRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -804,9 +860,9 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
}
}
-void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
- XMMRegister src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I32x4UConvertI16x8High(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -830,8 +886,8 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
}
}
-void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I64x2Neg(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -847,8 +903,8 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
}
}
-void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I64x2Abs(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -868,8 +924,8 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
}
}
-void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
- XMMRegister src1, XMMRegister scratch) {
+void SharedMacroAssemblerBase::I64x2GtS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -902,8 +958,8 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
}
}
-void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
- XMMRegister src1, XMMRegister scratch) {
+void SharedMacroAssemblerBase::I64x2GeS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1, XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -937,8 +993,8 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
}
}
-void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
- uint8_t shift, XMMRegister xmm_tmp) {
+void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
+ uint8_t shift, XMMRegister xmm_tmp) {
ASM_CODE_COMMENT(this);
DCHECK_GT(64, shift);
DCHECK_NE(xmm_tmp, dst);
@@ -970,10 +1026,10 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp);
}
-void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
- Register shift, XMMRegister xmm_tmp,
- XMMRegister xmm_shift,
- Register tmp_shift) {
+void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
+ Register shift, XMMRegister xmm_tmp,
+ XMMRegister xmm_shift,
+ Register tmp_shift) {
ASM_CODE_COMMENT(this);
DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src);
@@ -1000,9 +1056,9 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp);
}
-void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
- XMMRegister rhs, XMMRegister tmp1,
- XMMRegister tmp2) {
+void SharedMacroAssemblerBase::I64x2Mul(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister tmp1,
+ XMMRegister tmp2) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp1, tmp2));
DCHECK(!AreAliased(lhs, tmp1, tmp2));
@@ -1050,9 +1106,10 @@ void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
// 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2.
// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
-void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch,
- bool low, bool is_signed) {
+void SharedMacroAssemblerBase::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch, bool low,
+ bool is_signed) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -1081,8 +1138,8 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
}
}
-void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
- XMMRegister src) {
+void SharedMacroAssemblerBase::I64x2SConvertI32x4High(XMMRegister dst,
+ XMMRegister src) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -1099,9 +1156,9 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
}
}
-void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
- XMMRegister src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::I64x2UConvertI32x4High(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -1121,8 +1178,8 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
}
}
-void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::S128Not(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
if (dst == src) {
Pcmpeqd(scratch, scratch);
@@ -1133,9 +1190,9 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
}
}
-void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
- XMMRegister src1, XMMRegister src2,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::S128Select(XMMRegister dst, XMMRegister mask,
+ XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
// pandn(x, y) = !x & y, so we have to flip the mask and input.
@@ -1154,8 +1211,8 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
}
}
-void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
@@ -1177,8 +1234,8 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
}
}
-void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
- XMMRegister scratch) {
+void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
@@ -1199,7 +1256,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
}
}
-void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
+void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) {
ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
@@ -1213,8 +1270,8 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
}
}
-void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
+void SharedMacroAssemblerBase::S128Store64Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movlps(dst, src);
@@ -1230,37 +1287,37 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
if (CpuFeatures::IsSupported(FMA3)) { \
CpuFeatureScope fma3_scope(this, FMA3); \
if (dst == src1) { \
- vfmadd231##ps_or_pd(dst, src2, src3); \
+ vfmadd213##ps_or_pd(dst, src2, src3); \
} else if (dst == src2) { \
- vfmadd132##ps_or_pd(dst, src1, src3); \
+ vfmadd213##ps_or_pd(dst, src1, src3); \
} else if (dst == src3) { \
- vfmadd213##ps_or_pd(dst, src2, src1); \
+ vfmadd231##ps_or_pd(dst, src2, src1); \
} else { \
CpuFeatureScope avx_scope(this, AVX); \
vmovups(dst, src1); \
- vfmadd231##ps_or_pd(dst, src2, src3); \
+ vfmadd213##ps_or_pd(dst, src2, src3); \
} \
} else if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(this, AVX); \
- vmul##ps_or_pd(tmp, src2, src3); \
- vadd##ps_or_pd(dst, src1, tmp); \
+ vmul##ps_or_pd(tmp, src1, src2); \
+ vadd##ps_or_pd(dst, tmp, src3); \
} else { \
if (dst == src1) { \
- movaps(tmp, src2); \
- mul##ps_or_pd(tmp, src3); \
- add##ps_or_pd(dst, tmp); \
+ mul##ps_or_pd(dst, src2); \
+ add##ps_or_pd(dst, src3); \
} else if (dst == src2) { \
DCHECK_NE(src2, src1); \
- mul##ps_or_pd(src2, src3); \
- add##ps_or_pd(src2, src1); \
+ mul##ps_or_pd(dst, src1); \
+ add##ps_or_pd(dst, src3); \
} else if (dst == src3) { \
DCHECK_NE(src3, src1); \
- mul##ps_or_pd(src3, src2); \
- add##ps_or_pd(src3, src1); \
+ movaps(tmp, src1); \
+ mul##ps_or_pd(tmp, src2); \
+ add##ps_or_pd(dst, tmp); \
} else { \
- movaps(dst, src2); \
- mul##ps_or_pd(dst, src3); \
- add##ps_or_pd(dst, src1); \
+ movaps(dst, src1); \
+ mul##ps_or_pd(dst, src2); \
+ add##ps_or_pd(dst, src3); \
} \
}
@@ -1270,50 +1327,50 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
if (CpuFeatures::IsSupported(FMA3)) { \
CpuFeatureScope fma3_scope(this, FMA3); \
if (dst == src1) { \
- vfnmadd231##ps_or_pd(dst, src2, src3); \
+ vfnmadd213##ps_or_pd(dst, src2, src3); \
} else if (dst == src2) { \
- vfnmadd132##ps_or_pd(dst, src1, src3); \
+ vfnmadd213##ps_or_pd(dst, src1, src3); \
} else if (dst == src3) { \
- vfnmadd213##ps_or_pd(dst, src2, src1); \
+ vfnmadd231##ps_or_pd(dst, src2, src1); \
} else { \
CpuFeatureScope avx_scope(this, AVX); \
vmovups(dst, src1); \
- vfnmadd231##ps_or_pd(dst, src2, src3); \
+ vfnmadd213##ps_or_pd(dst, src2, src3); \
} \
} else if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(this, AVX); \
- vmul##ps_or_pd(tmp, src2, src3); \
- vsub##ps_or_pd(dst, src1, tmp); \
+ vmul##ps_or_pd(tmp, src1, src2); \
+ vsub##ps_or_pd(dst, src3, tmp); \
} else { \
- movaps(tmp, src2); \
- mul##ps_or_pd(tmp, src3); \
- if (dst != src1) { \
- movaps(dst, src1); \
+ movaps(tmp, src1); \
+ mul##ps_or_pd(tmp, src2); \
+ if (dst != src3) { \
+ movaps(dst, src3); \
} \
sub##ps_or_pd(dst, tmp); \
}
-void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
+void SharedMacroAssemblerBase::F32x4Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
QFMA(ps)
}
-void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
+void SharedMacroAssemblerBase::F32x4Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
QFMS(ps)
}
-void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
+void SharedMacroAssemblerBase::F64x2Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
QFMA(pd);
}
-void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
+void SharedMacroAssemblerBase::F64x2Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
QFMS(pd);
}
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 985c154eac..ae97572783 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -8,7 +8,7 @@
#include "src/base/macros.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h"
-#include "src/codegen/turbo-assembler.h"
+#include "src/codegen/macro-assembler-base.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
@@ -30,15 +30,15 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
constexpr int kStackSavedSavedFPSize = kDoubleSize;
#endif // V8_ENABLE_WEBASSEMBLY
-// Base class for SharedTurboAssemblerBase. This class contains macro-assembler
+// Base class for SharedMacroAssembler. This class contains macro-assembler
// functions that can be shared across ia32 and x64 without any template
// machinery, i.e. does not require the CRTP pattern that
-// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of
+// SharedMacroAssembler exposes. This allows us to keep the bulk of
// definition inside a separate source file, rather than putting everything
// inside this header.
-class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE SharedMacroAssemblerBase : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
void Move(Register dst, uint32_t src);
// Move if registers are not identical.
@@ -50,6 +50,10 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void Movhps(XMMRegister dst, XMMRegister src1, Operand src2);
void Movlps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+ void Blendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask);
@@ -443,6 +447,9 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch);
void I16x8DotI8x16I7x16S(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void I32x4DotI8x16I7x16AddS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister scratch, XMMRegister splat_reg);
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
XMMRegister tmp);
// Requires that dst == src1 if AVX is not supported.
@@ -523,41 +530,41 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8SplatPreAvx2(XMMRegister dst, Op src);
};
-// Common base class template shared by ia32 and x64 TurboAssembler. This uses
+// Common base class template shared by ia32 and x64 MacroAssembler. This uses
// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
-// class (subclass of SharedTurboAssemblerBase instantiated with the actual
+// class (subclass of SharedMacroAssembler instantiated with the actual
// class). This allows static polymorphism, where member functions can be move
-// into SharedTurboAssembler, and we can also call into member functions
-// defined in ia32 or x64 specific TurboAssembler from within this template
+// into SharedMacroAssemblerBase, and we can also call into member functions
+// defined in ia32 or x64 specific MacroAssembler from within this template
// class, via Impl.
//
// Note: all member functions must be defined in this header file so that the
// compiler can generate code for the function definitions. See
// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
-// If a function does not need polymorphism, move it into SharedTurboAssembler,
-// and define it outside of this header.
+// If a function does not need polymorphism, move it into
+// SharedMacroAssemblerBase, and define it outside of this header.
template <typename Impl>
-class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
- using SharedTurboAssembler::SharedTurboAssembler;
+class V8_EXPORT_PRIVATE SharedMacroAssembler : public SharedMacroAssemblerBase {
+ using SharedMacroAssemblerBase::SharedMacroAssemblerBase;
public:
void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
- FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
+ FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
ExternalReference::address_of_double_abs_constant());
}
void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
- FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
+ FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
ExternalReference::address_of_float_abs_constant());
}
void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
- FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
+ FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
ExternalReference::address_of_double_neg_constant());
}
void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
- FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
+ FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
ExternalReference::address_of_float_neg_constant());
}
#undef FLOAT_UNOP
@@ -968,15 +975,16 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
return impl()->ExternalReferenceAsOperand(reference, scratch);
}
- using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister,
- XMMRegister, Operand);
+ using FloatInstruction = void (SharedMacroAssemblerBase::*)(XMMRegister,
+ XMMRegister,
+ Operand);
void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
FloatInstruction op, ExternalReference ext) {
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
movaps(dst, src);
src = dst;
}
- SharedTurboAssembler* assm = this;
+ SharedMacroAssemblerBase* assm = this;
(assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
}
};
diff --git a/deps/v8/src/codegen/signature.h b/deps/v8/src/codegen/signature.h
index 38f79d4629..5a17688a54 100644
--- a/deps/v8/src/codegen/signature.h
+++ b/deps/v8/src/codegen/signature.h
@@ -66,11 +66,22 @@ class Signature : public ZoneObject {
Builder(Zone* zone, size_t return_count, size_t parameter_count)
: return_count_(return_count),
parameter_count_(parameter_count),
- zone_(zone),
rcursor_(0),
- pcursor_(0),
- buffer_(zone->NewArray<T>(
- static_cast<int>(return_count + parameter_count))) {}
+ pcursor_(0) {
+ // Allocate memory for the signature plus the array backing the
+ // signature.
+ constexpr size_t padding = sizeof(Signature<T>) % alignof(T);
+ using AllocationTypeTag = Signature<T>::Builder;
+ const size_t allocated_bytes =
+ sizeof(Signature<T>) + padding +
+ sizeof(T) * (return_count + parameter_count);
+ void* memory = zone->Allocate<AllocationTypeTag>(allocated_bytes);
+ uint8_t* rep_buffer =
+ reinterpret_cast<uint8_t*>(memory) + sizeof(Signature<T>) + padding;
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(rep_buffer), alignof(T)));
+ buffer_ = reinterpret_cast<T*>(rep_buffer);
+ sig_ = new (memory) Signature<T>{return_count, parameter_count, buffer_};
+ }
const size_t return_count_;
const size_t parameter_count_;
@@ -91,16 +102,30 @@ class Signature : public ZoneObject {
pcursor_ = std::max(pcursor_, index + 1);
}
+ Signature<T>* Get() const {
+ DCHECK_EQ(rcursor_, return_count_);
+ DCHECK_EQ(pcursor_, parameter_count_);
+ DCHECK_NOT_NULL(sig_);
+ return sig_;
+ }
+
+ // TODO(clemensb): Remove {Build()}, replace all callers by {Get()}.
Signature<T>* Build() {
+ // {Build} is the old API, and should be replaced by {Get}.
+ // {Build} did previously return a freshly allocated pointer, so make sure
+ // that we do not call it twice by clearing the {sig_} field.
+ DCHECK_NOT_NULL(sig_);
DCHECK_EQ(rcursor_, return_count_);
DCHECK_EQ(pcursor_, parameter_count_);
- return zone_->New<Signature<T>>(return_count_, parameter_count_, buffer_);
+ Signature<T>* sig = sig_;
+ sig_ = nullptr;
+ return sig;
}
private:
- Zone* zone_;
size_t rcursor_;
size_t pcursor_;
+ Signature<T>* sig_;
T* buffer_;
};
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 6579a419fa..efa3e9d8b1 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -160,7 +160,8 @@ void SourcePositionTableBuilder::AddPosition(size_t code_offset,
AddEntry({offset, source_position.raw(), is_statement});
}
-void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
+V8_INLINE void SourcePositionTableBuilder::AddEntry(
+ const PositionTableEntry& entry) {
PositionTableEntry tmp(entry);
SubtractFromEntry(&tmp, previous_);
EncodeEntry(&bytes_, tmp);
diff --git a/deps/v8/src/codegen/source-position-table.h b/deps/v8/src/codegen/source-position-table.h
index 3e7340da3c..13bf2adf5f 100644
--- a/deps/v8/src/codegen/source-position-table.h
+++ b/deps/v8/src/codegen/source-position-table.h
@@ -24,14 +24,14 @@ class Zone;
struct PositionTableEntry {
PositionTableEntry()
- : code_offset(kFunctionEntryBytecodeOffset),
- source_position(0),
+ : source_position(0),
+ code_offset(kFunctionEntryBytecodeOffset),
is_statement(false) {}
PositionTableEntry(int offset, int64_t source, bool statement)
- : code_offset(offset), source_position(source), is_statement(statement) {}
+ : source_position(source), code_offset(offset), is_statement(statement) {}
- int code_offset;
int64_t source_position;
+ int code_offset;
bool is_statement;
};
diff --git a/deps/v8/src/codegen/source-position.cc b/deps/v8/src/codegen/source-position.cc
index e08f2d11a4..60ff9f8f98 100644
--- a/deps/v8/src/codegen/source-position.cc
+++ b/deps/v8/src/codegen/source-position.cc
@@ -47,53 +47,52 @@ std::ostream& operator<<(std::ostream& out, const SourcePosition& pos) {
}
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
- OptimizedCompilationInfo* cinfo) const {
+ Isolate* isolate, OptimizedCompilationInfo* cinfo) const {
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
const auto& inl = cinfo->inlined_functions()[pos.InliningId()];
- stack.push_back(SourcePositionInfo(pos, inl.shared_info));
+ stack.push_back(SourcePositionInfo(isolate, pos, inl.shared_info));
pos = inl.position.position;
}
- stack.push_back(SourcePositionInfo(pos, cinfo->shared_info()));
+ stack.push_back(SourcePositionInfo(isolate, pos, cinfo->shared_info()));
return stack;
}
-std::vector<SourcePositionInfo> SourcePosition::InliningStack(
- Handle<Code> code) const {
- Isolate* isolate = code->GetIsolate();
+std::vector<SourcePositionInfo> SourcePosition::InliningStack(Isolate* isolate,
+ Code code) const {
DeoptimizationData deopt_data =
- DeoptimizationData::cast(code->deoptimization_data());
+ DeoptimizationData::cast(code.deoptimization_data());
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());
Handle<SharedFunctionInfo> function(
deopt_data.GetInlinedFunction(inl.inlined_function_id), isolate);
- stack.push_back(SourcePositionInfo(pos, function));
+ stack.push_back(SourcePositionInfo(isolate, pos, function));
pos = inl.position;
}
Handle<SharedFunctionInfo> function(
SharedFunctionInfo::cast(deopt_data.SharedFunctionInfo()), isolate);
- stack.push_back(SourcePositionInfo(pos, function));
+ stack.push_back(SourcePositionInfo(isolate, pos, function));
return stack;
}
-SourcePositionInfo SourcePosition::FirstInfo(Handle<Code> code) const {
+SourcePositionInfo SourcePosition::FirstInfo(Isolate* isolate,
+ Code code) const {
DisallowGarbageCollection no_gc;
- Isolate* isolate = code->GetIsolate();
DeoptimizationData deopt_data =
- DeoptimizationData::cast(code->deoptimization_data());
+ DeoptimizationData::cast(code.deoptimization_data());
SourcePosition pos = *this;
if (pos.isInlined()) {
InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());
Handle<SharedFunctionInfo> function(
deopt_data.GetInlinedFunction(inl.inlined_function_id), isolate);
- return SourcePositionInfo(pos, function);
+ return SourcePositionInfo(isolate, pos, function);
}
Handle<SharedFunctionInfo> function(
SharedFunctionInfo::cast(deopt_data.SharedFunctionInfo()), isolate);
- return SourcePositionInfo(pos, function);
+ return SourcePositionInfo(isolate, pos, function);
}
void SourcePosition::Print(std::ostream& out,
@@ -148,13 +147,13 @@ void SourcePosition::Print(std::ostream& out, Code code) const {
}
}
-SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
+SourcePositionInfo::SourcePositionInfo(Isolate* isolate, SourcePosition pos,
Handle<SharedFunctionInfo> f)
: position(pos),
shared(f),
script(f.is_null() || !f->script().IsScript()
? Handle<Script>::null()
- : handle(Script::cast(f->script()), f->GetIsolate())) {
+ : handle(Script::cast(f->script()), isolate)) {
if (!script.is_null()) {
Script::PositionInfo info;
if (Script::GetPositionInfo(script, pos.ScriptOffset(), &info,
diff --git a/deps/v8/src/codegen/source-position.h b/deps/v8/src/codegen/source-position.h
index c77bad2539..5d6b1a93f7 100644
--- a/deps/v8/src/codegen/source-position.h
+++ b/deps/v8/src/codegen/source-position.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-class Code;
+class InstructionStream;
class OptimizedCompilationInfo;
class Script;
class SharedFunctionInfo;
@@ -78,11 +78,12 @@ class SourcePosition final {
return ExternalFileIdField::decode(value_);
}
- // Assumes that the code object is optimized
- std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
+ // Assumes that the code object is optimized.
+ std::vector<SourcePositionInfo> InliningStack(Isolate* isolate,
+ Code code) const;
std::vector<SourcePositionInfo> InliningStack(
- OptimizedCompilationInfo* cinfo) const;
- SourcePositionInfo FirstInfo(Handle<Code> code) const;
+ Isolate* isolate, OptimizedCompilationInfo* cinfo) const;
+ SourcePositionInfo FirstInfo(Isolate* isolate, Code code) const;
void Print(std::ostream& out, Code code) const;
void PrintJson(std::ostream& out) const;
@@ -174,8 +175,16 @@ struct InliningPosition {
int inlined_function_id;
};
+struct WasmInliningPosition {
+ // Non-canonicalized (module-specific) index of the inlined function.
+ int inlinee_func_index;
+ // Source location of the caller.
+ SourcePosition caller_pos;
+};
+
struct SourcePositionInfo {
- SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f);
+ SourcePositionInfo(Isolate* isolate, SourcePosition pos,
+ Handle<SharedFunctionInfo> f);
SourcePosition position;
Handle<SharedFunctionInfo> shared;
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 8bddf1df9a..dc51e84ff4 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -20,35 +20,6 @@ bool CpuFeatures::SupportsOptimizer() { return true; }
// -----------------------------------------------------------------------------
// Implementation of Assembler
-void Assembler::emitl(uint32_t x) {
- WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
- pc_ += sizeof(uint32_t);
-}
-
-void Assembler::emitq(uint64_t x) {
- WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
- pc_ += sizeof(uint64_t);
-}
-
-void Assembler::emitw(uint16_t x) {
- WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
- pc_ += sizeof(uint16_t);
-}
-
-void Assembler::emit(Immediate x) {
- if (!RelocInfo::IsNoInfo(x.rmode_)) {
- RecordRelocInfo(x.rmode_);
- }
- emitl(x.value_);
-}
-
-void Assembler::emit(Immediate64 x) {
- if (!RelocInfo::IsNoInfo(x.rmode_)) {
- RecordRelocInfo(x.rmode_);
- }
- emitq(static_cast<uint64_t>(x.value_));
-}
-
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
}
@@ -66,11 +37,11 @@ void Assembler::emit_rex_64(XMMRegister reg, XMMRegister rm_reg) {
}
void Assembler::emit_rex_64(Register reg, Operand op) {
- emit(0x48 | reg.high_bit() << 2 | op.data().rex);
+ emit(0x48 | reg.high_bit() << 2 | op.rex());
}
void Assembler::emit_rex_64(XMMRegister reg, Operand op) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | op.data().rex);
+ emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex());
}
void Assembler::emit_rex_64(Register rm_reg) {
@@ -78,19 +49,19 @@ void Assembler::emit_rex_64(Register rm_reg) {
emit(0x48 | rm_reg.high_bit());
}
-void Assembler::emit_rex_64(Operand op) { emit(0x48 | op.data().rex); }
+void Assembler::emit_rex_64(Operand op) { emit(0x48 | op.rex()); }
void Assembler::emit_rex_32(Register reg, Register rm_reg) {
emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
}
void Assembler::emit_rex_32(Register reg, Operand op) {
- emit(0x40 | reg.high_bit() << 2 | op.data().rex);
+ emit(0x40 | reg.high_bit() << 2 | op.rex());
}
void Assembler::emit_rex_32(Register rm_reg) { emit(0x40 | rm_reg.high_bit()); }
-void Assembler::emit_rex_32(Operand op) { emit(0x40 | op.data().rex); }
+void Assembler::emit_rex_32(Operand op) { emit(0x40 | op.rex()); }
void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
@@ -98,12 +69,12 @@ void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
}
void Assembler::emit_optional_rex_32(Register reg, Operand op) {
- byte rex_bits = reg.high_bit() << 2 | op.data().rex;
+ byte rex_bits = reg.high_bit() << 2 | op.rex();
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(XMMRegister reg, Operand op) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | op.data().rex;
+ byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex();
if (rex_bits != 0) emit(0x40 | rex_bits);
}
@@ -131,7 +102,7 @@ void Assembler::emit_optional_rex_32(XMMRegister rm_reg) {
}
void Assembler::emit_optional_rex_32(Operand op) {
- if (op.data().rex != 0) emit(0x40 | op.data().rex);
+ if (op.rex() != 0) emit(0x40 | op.rex());
}
void Assembler::emit_optional_rex_8(Register reg) {
@@ -159,7 +130,7 @@ void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
- byte rxb = static_cast<byte>(~((reg.high_bit() << 2) | rm.data().rex)) << 5;
+ byte rxb = static_cast<byte>(~((reg.high_bit() << 2) | rm.rex())) << 5;
emit(rxb | m);
}
@@ -201,7 +172,7 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Register rm,
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
- if (rm.data().rex || mm != k0F || w != kW0) {
+ if (rm.rex() || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
@@ -255,7 +226,7 @@ int Assembler::deserialization_special_target_size(
return kSpecialTargetSize;
}
-Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
+Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
return GetCodeTarget(ReadUnalignedValue<int32_t>(pc));
}
@@ -274,7 +245,8 @@ Builtin Assembler::target_builtin_at(Address pc) {
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
- if (IsCodeTarget(rmode_) || IsNearBuiltinEntry(rmode_)) {
+ if (IsCodeTarget(rmode_) || IsNearBuiltinEntry(rmode_) ||
+ IsWasmStubCall(rmode_)) {
WriteUnalignedValue(
pc_, ReadUnalignedValue<int32_t>(pc_) - static_cast<int32_t>(delta));
} else if (IsInternalReference(rmode_)) {
@@ -285,7 +257,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsNearBuiltinEntry(rmode_) ||
- IsWasmCall(rmode_));
+ IsWasmCall(rmode_) || IsWasmStubCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -312,10 +284,11 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
DCHECK(!HAS_SMI_TAG(compressed));
- Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
- compressed));
- // Embedding of compressed Code objects must not happen when external code
- // space is enabled, because CodeDataContainers must be used instead.
+ Object obj(
+ V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
+ // Embedding of compressed InstructionStream objects must not happen when
+ // external code space is enabled, because Codes must be used
+ // instead.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!IsCodeSpaceObject(HeapObject::cast(obj)));
return HeapObject::cast(obj);
@@ -367,7 +340,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
DCHECK(COMPRESS_POINTERS_BOOL);
- Tagged_t tagged = V8HeapCompressionScheme::CompressTagged(target.ptr());
+ Tagged_t tagged = V8HeapCompressionScheme::CompressObject(target.ptr());
WriteUnalignedValue(pc_, tagged);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
@@ -376,8 +349,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
- if (!host().is_null() && !v8_flags.disable_write_barriers) {
- WriteBarrierForCode(host(), this, target, write_barrier_mode);
+ if (!instruction_stream().is_null() && !v8_flags.disable_write_barriers) {
+ WriteBarrierForCode(instruction_stream(), this, target, write_barrier_mode);
}
}
@@ -398,7 +371,7 @@ void RelocInfo::WipeOut() {
} else if (IsCompressedEmbeddedObject(rmode_)) {
Address smi_address = Smi::FromInt(0).ptr();
WriteUnalignedValue(pc_,
- V8HeapCompressionScheme::CompressTagged(smi_address));
+ V8HeapCompressionScheme::CompressObject(smi_address));
} else if (IsCodeTarget(rmode_) || IsNearBuiltinEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(pc_, constant_pool_,
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 0cd2ea907c..b271271de5 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -166,14 +166,14 @@ uint32_t RelocInfo::wasm_call_tag() const {
// Implementation of Operand
Operand::Operand(Operand operand, int32_t offset) {
- DCHECK_GE(operand.data().len, 1);
+ DCHECK_GE(operand.memory().len, 1);
// Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.data().buf[0];
+ byte modrm = operand.memory().buf[0];
DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
bool has_sib = ((modrm & 0x07) == 0x04);
byte mode = modrm & 0xC0;
int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
+ int base_reg = (has_sib ? operand.memory().buf[1] : modrm) & 0x07;
// Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
// displacement.
bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
@@ -181,60 +181,62 @@ Operand::Operand(Operand operand, int32_t offset) {
if (mode == 0x80 || is_baseless) {
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
disp_value = ReadUnalignedValue<int32_t>(
- reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
+ reinterpret_cast<Address>(&operand.memory().buf[disp_offset]));
} else if (mode == 0x40) {
// Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
+ disp_value = static_cast<signed char>(operand.memory().buf[disp_offset]);
}
// Write new operand with same registers, but with modified displacement.
DCHECK(offset >= 0 ? disp_value + offset > disp_value
: disp_value + offset < disp_value); // No overflow.
disp_value += offset;
- data_.rex = operand.data().rex;
+ memory_.rex = operand.memory().rex;
if (!is_int8(disp_value) || is_baseless) {
// Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
- data_.len = disp_offset + 4;
- WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
+ memory_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
+ memory_.len = disp_offset + 4;
+ WriteUnalignedValue(reinterpret_cast<Address>(&memory_.buf[disp_offset]),
disp_value);
} else if (disp_value != 0 || (base_reg == 0x05)) {
// Need 8 bits of displacement.
- data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
- data_.len = disp_offset + 1;
- data_.buf[disp_offset] = static_cast<byte>(disp_value);
+ memory_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
+ memory_.len = disp_offset + 1;
+ memory_.buf[disp_offset] = static_cast<byte>(disp_value);
} else {
// Need no displacement.
- data_.buf[0] = (modrm & 0x3F); // Mode 0.
- data_.len = disp_offset;
+ memory_.buf[0] = (modrm & 0x3F); // Mode 0.
+ memory_.len = disp_offset;
}
if (has_sib) {
- data_.buf[1] = operand.data().buf[1];
+ memory_.buf[1] = operand.memory().buf[1];
}
}
bool Operand::AddressUsesRegister(Register reg) const {
+ DCHECK(!is_label_operand());
int code = reg.code();
- DCHECK_NE(data_.buf[0] & 0xC0, 0xC0); // Always a memory operand.
+ DCHECK_NE(memory_.buf[0] & 0xC0, 0xC0);
// Start with only low three bits of base register. Initial decoding
// doesn't distinguish on the REX.B bit.
- int base_code = data_.buf[0] & 0x07;
+ int base_code = memory_.buf[0] & 0x07;
if (base_code == rsp.code()) {
// SIB byte present in buf_[1].
// Check the index register from the SIB byte + REX.X prefix.
- int index_code = ((data_.buf[1] >> 3) & 0x07) | ((data_.rex & 0x02) << 2);
+ int index_code =
+ ((memory_.buf[1] >> 3) & 0x07) | ((memory_.rex & 0x02) << 2);
// Index code (including REX.X) of 0x04 (rsp) means no index register.
if (index_code != rsp.code() && index_code == code) return true;
// Add REX.B to get the full base register code.
- base_code = (data_.buf[1] & 0x07) | ((data_.rex & 0x01) << 3);
+ base_code = (memory_.buf[1] & 0x07) | ((memory_.rex & 0x01) << 3);
// A base register of 0x05 (rbp) with mod = 0 means no base register.
- if (base_code == rbp.code() && ((data_.buf[0] & 0xC0) == 0)) return false;
+ if (base_code == rbp.code() && ((memory_.buf[0] & 0xC0) == 0)) return false;
return code == base_code;
} else {
// A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
// no base register.
- if (base_code == rbp.code() && ((data_.buf[0] & 0xC0) == 0)) return false;
- base_code |= ((data_.rex & 0x01) << 3);
+ if (base_code == rbp.code() && ((memory_.buf[0] & 0xC0) == 0)) return false;
+ base_code |= ((memory_.rex & 0x01) << 3);
return code == base_code;
}
}
@@ -291,29 +293,35 @@ bool ConstPool::IsMoveRipRelative(Address instr) {
void ConstPool::Clear() { entries_.clear(); }
void ConstPool::PatchEntries() {
- for (EntryMap::iterator iter = entries_.begin(); iter != entries_.end();
- iter = entries_.upper_bound(iter->first)) {
- std::pair<EntryMap::iterator, EntryMap::iterator> range =
- entries_.equal_range(iter->first);
- int constant_entry_offset = 0;
- for (EntryMap::iterator it = range.first; it != range.second; it++) {
- if (it == range.first) {
- constant_entry_offset = it->second;
- continue;
- }
-
- DCHECK_GT(constant_entry_offset, 0);
- DCHECK_LT(constant_entry_offset, it->second);
- int32_t disp32 =
- constant_entry_offset - (it->second + kRipRelativeDispSize);
- Address disp_addr = assm_->addr_at(it->second);
-
- // Check if the instruction is actually a rip-relative move.
- DCHECK(IsMoveRipRelative(disp_addr - kMoveRipRelativeDispOffset));
- // The displacement of the rip-relative move should be 0 before patching.
- DCHECK(ReadUnalignedValue<uint32_t>(disp_addr) == 0);
- WriteUnalignedValue(disp_addr, disp32);
+ auto iter = entries_.begin();
+ if (iter == entries_.end()) return;
+
+ // Read off the first value/offset pair before starting the loop proper.
+ std::pair<uint64_t, int> first_entry_of_range = *iter;
+ while (++iter != entries_.end()) {
+ // Check if we've entered a new set of values.
+ if (first_entry_of_range.first != iter->first) {
+ // Make sure that this iterator is both the (exclusive) end of the
+ // previous value's equal range, and the start of this value's equal
+ // range.
+ DCHECK_EQ(entries_.equal_range(first_entry_of_range.first).second, iter);
+ DCHECK_EQ(entries_.equal_range(iter->first).first, iter);
+ first_entry_of_range = *iter;
+ continue;
}
+ int constant_entry_offset = first_entry_of_range.second;
+
+ DCHECK_GT(constant_entry_offset, 0);
+ DCHECK_LT(constant_entry_offset, iter->second);
+ int32_t disp32 =
+ constant_entry_offset - (iter->second + kRipRelativeDispSize);
+ Address disp_addr = assm_->addr_at(iter->second);
+
+ // Check if the instruction is actually a rip-relative move.
+ DCHECK(IsMoveRipRelative(disp_addr - kMoveRipRelativeDispOffset));
+ // The displacement of the rip-relative move should be 0 before patching.
+ DCHECK(ReadUnalignedValue<uint32_t>(disp_addr) == 0);
+ WriteUnalignedValue(disp_addr, disp32);
}
Clear();
}
@@ -361,13 +369,13 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
- // raw workflow to create Code objects (mostly in tests), add another Align
- // call here. It does no harm - the end of the Code object is aligned to the
- // (larger) kCodeAlignment anyways.
+ // raw workflow to create InstructionStream objects (mostly in tests), add
+ // another Align call here. It does no harm - the end of the InstructionStream
+ // object is aligned to the (larger) kCodeAlignment anyways.
// TODO(jgruber): Consider moving responsibility for proper alignment to
// metadata table builders (safepoint, handler, constant pool, code
// comments).
- DataAlign(Code::kMetadataAlignment);
+ DataAlign(InstructionStream::kMetadataAlignment);
PatchConstPool();
DCHECK(constpool_.IsEmpty());
@@ -584,36 +592,65 @@ void Assembler::GrowBuffer() {
}
void Assembler::emit_operand(int code, Operand adr) {
- DCHECK(is_uint3(code));
- const unsigned length = adr.data().len;
- DCHECK_GT(length, 0);
-
- // Emit updated ModR/M byte containing the given register.
- DCHECK_EQ(adr.data().buf[0] & 0x38, 0);
- *pc_++ = adr.data().buf[0] | code << 3;
-
- // Recognize RIP relative addressing.
- if (adr.data().buf[0] == 5) {
- DCHECK_EQ(9u, length);
- Label* label = ReadUnalignedValue<Label*>(
- reinterpret_cast<Address>(&adr.data().buf[1]));
- if (label->is_bound()) {
- int offset =
- label->pos() - pc_offset() - sizeof(int32_t) + adr.data().addend;
- DCHECK_GE(0, offset);
- emitl(offset);
- } else if (label->is_linked()) {
- emitl(label->pos());
- label->link_to(pc_offset() - sizeof(int32_t));
- } else {
- DCHECK(label->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- label->link_to(current);
+ // Redirect to {emit_label_operand} if {adr} contains a label.
+ if (adr.is_label_operand()) {
+ emit_label_operand(code, adr.label().label, adr.label().addend);
+ return;
+ }
+
+ const size_t length = adr.memory().len;
+ V8_ASSUME(1 <= length && length <= 6);
+
+ // Compute the opcode extension to be encoded in the ModR/M byte.
+ V8_ASSUME(0 <= code && code <= 7);
+ DCHECK((adr.memory().buf[0] & 0x38) == 0);
+ uint8_t opcode_extension = code << 3;
+
+ // Use an optimized routine for copying the 1-6 bytes into the assembler
+ // buffer. We execute up to two read and write instructions, while also
+ // minimizing the number of branches.
+ Address src = reinterpret_cast<Address>(adr.memory().buf);
+ Address dst = reinterpret_cast<Address>(pc_);
+ if (length > 4) {
+ // Length is 5 or 6.
+ // Copy range [0, 3] and [len-2, len-1] (might overlap).
+ uint32_t lower_four_bytes = ReadUnalignedValue<uint32_t>(src);
+ lower_four_bytes |= opcode_extension;
+ uint16_t upper_two_bytes = ReadUnalignedValue<uint16_t>(src + length - 2);
+ WriteUnalignedValue<uint16_t>(dst + length - 2, upper_two_bytes);
+ WriteUnalignedValue<uint32_t>(dst, lower_four_bytes);
+ } else {
+ // Length is in [1, 3].
+ uint8_t first_byte = ReadUnalignedValue<uint8_t>(src);
+ first_byte |= opcode_extension;
+ if (length != 1) {
+ // Copy bytes [len-2, len-1].
+ uint16_t upper_two_bytes = ReadUnalignedValue<uint16_t>(src + length - 2);
+ WriteUnalignedValue<uint16_t>(dst + length - 2, upper_two_bytes);
}
+ WriteUnalignedValue<uint8_t>(dst, first_byte);
+ }
+
+ pc_ += length;
+}
+
+void Assembler::emit_label_operand(int code, Label* label, int addend) {
+ DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
+ V8_ASSUME(0 <= code && code <= 7);
+
+ *pc_++ = 5 | (code << 3);
+ if (label->is_bound()) {
+ int offset = label->pos() - pc_offset() - sizeof(int32_t) + addend;
+ DCHECK_GE(0, offset);
+ emitl(offset);
+ } else if (label->is_linked()) {
+ emitl(label->pos());
+ label->link_to(pc_offset() - sizeof(int32_t));
} else {
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) *pc_++ = adr.data().buf[i];
+ DCHECK(label->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ label->link_to(current);
}
}
@@ -989,9 +1026,8 @@ void Assembler::call(Label* L) {
}
}
-void Assembler::call(Handle<CodeT> target, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(FromCodeT(*target).IsExecutable());
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@@ -1402,7 +1438,7 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
emitl(static_cast<int32_t>(entry));
}
-void Assembler::j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode) {
+void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
// 0000 1111 1000 tttn #32-bit disp.
@@ -1481,7 +1517,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
}
}
-void Assembler::jmp(Handle<CodeT> target, RelocInfo::Mode rmode) {
+void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
// 1110 1001 #32-bit disp.
@@ -4456,7 +4492,8 @@ void Assembler::dq(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code(),
+ InstructionStream());
reloc_info_writer.Write(&rinfo);
}
@@ -4464,7 +4501,8 @@ const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::NEAR_BUILTIN_ENTRY) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::WASM_CALL);
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 8f77e38c1a..a8b7d77536 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -63,7 +63,7 @@ class MaglevSafepointTableBuilder;
// Utility functions
-enum Condition {
+enum Condition : uint8_t {
overflow = 0,
no_overflow = 1,
below = 2,
@@ -88,6 +88,22 @@ enum Condition {
not_zero = not_equal,
sign = negative,
not_sign = positive,
+
+ // Unified cross-platform condition names/aliases.
+ kEqual = equal,
+ kNotEqual = not_equal,
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+ kUnsignedLessThan = below,
+ kUnsignedGreaterThan = above,
+ kUnsignedLessThanEqual = below_equal,
+ kUnsignedGreaterThanEqual = above_equal,
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+ kZero = equal,
+ kNotZero = not_equal,
};
// Returns the equivalent of !cc.
@@ -160,13 +176,46 @@ enum ScaleFactor : int8_t {
class V8_EXPORT_PRIVATE Operand {
public:
- struct Data {
- byte rex = 0;
- byte buf[9] = {0};
- byte len = 1; // number of bytes of buf_ in use.
- int8_t addend = 0; // for rip + offset + addend.
+ struct LabelOperand {
+ // The first two fields are shared in {LabelOperand} and {MemoryOperand},
+ // but cannot be pulled out of the union, because otherwise the compiler
+ // introduces additional padding between them and the union, increasing the
+ // size unnecessarily.
+ bool is_label_operand = true;
+ byte rex = 0; // REX prefix, always zero for label operands.
+
+ int8_t addend; // Used for rip + offset + addend operands.
+ Label* label;
+ };
+
+ struct MemoryOperand {
+ bool is_label_operand = false;
+ byte rex = 0; // REX prefix.
+
+ // Register (1 byte) + SIB (0 or 1 byte) + displacement (0, 1, or 4 byte).
+ byte buf[6] = {0};
+ // Number of bytes of buf in use.
+ // We must keep {len} and {buf} together for the compiler to elide the
+ // stack canary protection code.
+ size_t len = 1;
};
+ // Assert that the shared {is_label_operand} and {rex} fields have the same
+ // type and offset in both union variants.
+ static_assert(std::is_same<decltype(LabelOperand::is_label_operand),
+ decltype(MemoryOperand::is_label_operand)>::value);
+ static_assert(offsetof(LabelOperand, is_label_operand) ==
+ offsetof(MemoryOperand, is_label_operand));
+ static_assert(std::is_same<decltype(LabelOperand::rex),
+ decltype(MemoryOperand::rex)>::value);
+ static_assert(offsetof(LabelOperand, rex) == offsetof(MemoryOperand, rex));
+
+ static_assert(sizeof(MemoryOperand::len) == kSystemPointerSize,
+ "Length must have native word size to avoid spurious reloads "
+ "after writing it.");
+ static_assert(offsetof(MemoryOperand, len) % kSystemPointerSize == 0,
+ "Length must be aligned for fast access.");
+
// [base + disp/r]
V8_INLINE constexpr Operand(Register base, int32_t disp) {
if (base == rsp || base == r12) {
@@ -218,17 +267,39 @@ class V8_EXPORT_PRIVATE Operand {
// [rip + disp/r]
V8_INLINE explicit Operand(Label* label, int addend = 0) {
- data_.addend = addend;
DCHECK_NOT_NULL(label);
DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
- set_modrm(0, rbp);
- set_disp64(reinterpret_cast<intptr_t>(label));
+ label_ = {};
+ label_.label = label;
+ label_.addend = addend;
}
Operand(const Operand&) V8_NOEXCEPT = default;
Operand& operator=(const Operand&) V8_NOEXCEPT = default;
- const Data& data() const { return data_; }
+ V8_INLINE constexpr bool is_label_operand() const {
+ // Since this field is in the common initial sequence of {label_} and
+ // {memory_}, the access is valid regardless of the active union member.
+ return memory_.is_label_operand;
+ }
+
+ V8_INLINE constexpr byte rex() const {
+ // Since both fields are in the common initial sequence of {label_} and
+ // {memory_}, the access is valid regardless of the active union member.
+ // Label operands always have a REX prefix of zero.
+ V8_ASSUME(!memory_.is_label_operand || memory_.rex == 0);
+ return memory_.rex;
+ }
+
+ V8_INLINE const MemoryOperand& memory() const {
+ DCHECK(!is_label_operand());
+ return memory_;
+ }
+
+ V8_INLINE const LabelOperand& label() const {
+ DCHECK(is_label_operand());
+ return label_;
+ }
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
@@ -236,46 +307,43 @@ class V8_EXPORT_PRIVATE Operand {
private:
V8_INLINE constexpr void set_modrm(int mod, Register rm_reg) {
+ DCHECK(!is_label_operand());
DCHECK(is_uint2(mod));
- data_.buf[0] = mod << 6 | rm_reg.low_bits();
+ memory_.buf[0] = mod << 6 | rm_reg.low_bits();
// Set REX.B to the high bit of rm.code().
- data_.rex |= rm_reg.high_bit();
+ memory_.rex |= rm_reg.high_bit();
}
V8_INLINE constexpr void set_sib(ScaleFactor scale, Register index,
Register base) {
- DCHECK_EQ(data_.len, 1);
+ V8_ASSUME(memory_.len == 1);
DCHECK(is_uint2(scale));
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
DCHECK(index != rsp || base == rsp || base == r12);
- data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- data_.rex |= index.high_bit() << 1 | base.high_bit();
- data_.len = 2;
+ memory_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
+ memory_.rex |= index.high_bit() << 1 | base.high_bit();
+ memory_.len = 2;
}
V8_INLINE constexpr void set_disp8(int disp) {
+ V8_ASSUME(memory_.len == 1 || memory_.len == 2);
DCHECK(is_int8(disp));
- DCHECK(data_.len == 1 || data_.len == 2);
- data_.buf[data_.len] = disp;
- data_.len += sizeof(int8_t);
+ memory_.buf[memory_.len] = disp;
+ memory_.len += sizeof(int8_t);
}
V8_INLINE void set_disp32(int disp) {
- DCHECK(data_.len == 1 || data_.len == 2);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(int32_t);
- }
-
- V8_INLINE void set_disp64(int64_t disp) {
- DCHECK_EQ(1, data_.len);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ V8_ASSUME(memory_.len == 1 || memory_.len == 2);
+ Address p = reinterpret_cast<Address>(&memory_.buf[memory_.len]);
WriteUnalignedValue(p, disp);
- data_.len += sizeof(disp);
+ memory_.len += sizeof(int32_t);
}
- Data data_;
+ union {
+ LabelOperand label_;
+ MemoryOperand memory_ = {};
+ };
};
class V8_EXPORT_PRIVATE Operand256 : public Operand {
@@ -379,8 +447,7 @@ class ConstPool {
Assembler* assm_;
// Values, pc offsets of entries.
- using EntryMap = std::multimap<uint64_t, int>;
- EntryMap entries_;
+ std::multimap<uint64_t, int> entries_;
// Number of bytes taken up by the displacement of rip-relative addressing.
static constexpr int kRipRelativeDispSize = 4; // 32-bit displacement.
@@ -443,12 +510,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Read/Modify the code target in the relative branch/call instruction at pc.
// On the x64 architecture, we use relative jumps with a 32-bit displacement
- // to jump to other Code objects in the Code space in the heap.
- // Jumps to C functions are done indirectly through a 64-bit register holding
- // the absolute address of the target.
- // These functions convert between absolute Addresses of Code objects and
- // the relative displacements stored in the code.
- // The isolate argument is unused (and may be nullptr) when skipping flushing.
+ // to jump to other InstructionStream objects in the InstructionStream space
+ // in the heap. Jumps to C functions are done indirectly through a 64-bit
+ // register holding the absolute address of the target. These functions
+ // convert between absolute Addresses of InstructionStream objects and the
+ // relative displacements stored in the code. The isolate argument is unused
+ // (and may be nullptr) when skipping flushing.
static inline Address target_address_at(Address pc, Address constant_pool);
static inline void set_target_address_at(
Address pc, Address constant_pool, Address target,
@@ -474,7 +541,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- inline Handle<CodeT> code_target_object_handle_at(Address pc);
+ inline Handle<Code> code_target_object_handle_at(Address pc);
inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
@@ -501,7 +568,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
// ---------------------------------------------------------------------------
- // Code generation
+ // InstructionStream generation
//
// Function names correspond one-to-one to x64 instruction mnemonics.
// Unless specified otherwise, instructions operate on 64-bit operands.
@@ -823,7 +890,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void near_jmp(intptr_t disp, RelocInfo::Mode rmode);
void near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode);
- void call(Handle<CodeT> target,
+ void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Call near absolute indirect, address in register
@@ -834,7 +901,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Use a 32-bit signed displacement.
// Unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(Handle<CodeT> target, RelocInfo::Mode rmode);
+ void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
void jmp(Register adr);
@@ -847,7 +914,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional jumps
void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, Address entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<CodeT> target, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Floating-point operations
void fld(int i);
@@ -1524,9 +1591,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vcvtdq2pd(YMMRegister dst, XMMRegister src) {
+ vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG, AVX);
+ }
+ void vcvtdq2pd(YMMRegister dst, Operand src) {
+ vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG, AVX);
+ }
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vinstr(0x5b, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vcvttps2dq(YMMRegister dst, YMMRegister src) {
+ vinstr(0x5b, dst, ymm0, src, kF3, k0F, kWIG, AVX);
+ }
+ void vcvttps2dq(YMMRegister dst, Operand src) {
+ vinstr(0x5b, dst, ymm0, src, kF3, k0F, kWIG, AVX);
+ }
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
@@ -2088,12 +2167,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
- inline bool buffer_overflow() const {
- return pc_ >= reloc_info_writer.pos() - kGap;
- }
+ bool buffer_overflow() const { return available_space() < kGap; }
// Get the number of bytes available in the buffer.
- inline int available_space() const {
+ int available_space() const {
+ DCHECK_GE(reloc_info_writer.pos(), pc_);
+ DCHECK_GE(kMaxInt, reloc_info_writer.pos() - pc_);
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
@@ -2126,15 +2205,29 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
WriteUnalignedValue(addr_at(pos), x);
}
- // code emission
- void GrowBuffer();
+ // InstructionStream emission.
+ V8_NOINLINE V8_PRESERVE_MOST void GrowBuffer();
- void emit(byte x) { *pc_++ = x; }
- inline void emitl(uint32_t x);
- inline void emitq(uint64_t x);
- inline void emitw(uint16_t x);
- inline void emit(Immediate x);
- inline void emit(Immediate64 x);
+ template <typename T>
+ static uint8_t* emit(uint8_t* __restrict pc, T t) {
+ WriteUnalignedValue(reinterpret_cast<Address>(pc), t);
+ return pc + sizeof(T);
+ }
+
+ void emit(uint8_t x) { pc_ = emit(pc_, x); }
+ void emitw(uint16_t x) { pc_ = emit(pc_, x); }
+ void emitl(uint32_t x) { pc_ = emit(pc_, x); }
+ void emitq(uint64_t x) { pc_ = emit(pc_, x); }
+
+ void emit(Immediate x) {
+ if (!RelocInfo::IsNoInfo(x.rmode_)) RecordRelocInfo(x.rmode_);
+ emitl(x.value_);
+ }
+
+ void emit(Immediate64 x) {
+ if (!RelocInfo::IsNoInfo(x.rmode_)) RecordRelocInfo(x.rmode_);
+ emitq(static_cast<uint64_t>(x.value_));
+ }
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
@@ -2287,10 +2380,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also used to encode
- // a three-bit opcode extension into the ModR/M byte.
+ // 1- or 4-byte offset for a memory operand.
+ // Also used to encode a three-bit opcode extension into the ModR/M byte.
void emit_operand(int rm, Operand adr);
+ // Emit a RIP-relative operand.
+ // Also used to encode a three-bit opcode extension into the ModR/M byte.
+ V8_NOINLINE void emit_label_operand(int rm, Label* label, int addend = 0);
+
// Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
void emit_modrm(Register reg, Register rm_reg) {
emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
diff --git a/deps/v8/src/codegen/x64/constants-x64.h b/deps/v8/src/codegen/x64/constants-x64.h
index 775abecd9f..0ff9fcfa66 100644
--- a/deps/v8/src/codegen/x64/constants-x64.h
+++ b/deps/v8/src/codegen/x64/constants-x64.h
@@ -9,13 +9,18 @@
namespace v8 {
namespace internal {
-// Actual value of root register is offset from the root array's start
+
+// The actual value of the kRootRegister is offset from the IsolateData's start
// to take advantage of negative displacement values.
-// TODO(sigurds): Choose best value.
-// TODO(ishell): Choose best value for ptr-compr.
-constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 128 : 0;
+// On x64, the smallest operand encoding allows int8 offsets, thus we select the
+// bias s.t. the first 32 8-byte slots of IsolateData are can be encoded this
+// way.
+constexpr int kRootRegisterBias = 128;
+// The maximum size of the code range s.t. pc-relative calls are possible
+// between all Code objects in the range.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index 39f190747d..cc662c4899 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -112,6 +112,9 @@ constexpr Register StoreDescriptor::SlotRegister() { return rdi; }
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
// static
+constexpr Register DefineKeyedOwnDescriptor::FlagsRegister() { return r11; }
+
+// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return r11; }
// static
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index e7207766b5..f362456e7f 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -25,6 +25,7 @@
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/sandbox/external-pointer.h"
@@ -81,16 +82,16 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
}
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
+ LoadTaggedField(
destination,
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
DCHECK(is_int32(offset));
if (offset == 0) {
@@ -100,11 +101,11 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
movq(destination, Operand(kRootRegister, offset));
}
-void TurboAssembler::LoadAddress(Register destination,
+void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
if (root_array_available_ && options().enable_root_relative_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
@@ -124,7 +125,7 @@ void TurboAssembler::LoadAddress(Register destination,
Move(destination, source);
}
-Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
+Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t delta =
@@ -158,12 +159,29 @@ void MacroAssembler::PushAddress(ExternalReference source) {
Push(kScratchRegister);
}
-Operand TurboAssembler::RootAsOperand(RootIndex index) {
+Operand MacroAssembler::RootAsOperand(RootIndex index) {
DCHECK(root_array_available());
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
+ static_assert(!CanBeImmediate(RootIndex::kUndefinedValue) ||
+ std::is_same<Tagged_t, uint32_t>::value);
+ if (CanBeImmediate(index)) {
+ mov_tagged(destination,
+ Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
+ return;
+ }
+ DCHECK(root_array_available_);
+ movq(destination, RootAsOperand(index));
+}
+
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
+ if (CanBeImmediate(index)) {
+ DecompressTagged(destination,
+ static_cast<uint32_t>(ReadOnlyRootPtr(index)));
+ return;
+ }
DCHECK(root_array_available_);
movq(destination, RootAsOperand(index));
}
@@ -173,7 +191,11 @@ void MacroAssembler::PushRoot(RootIndex index) {
Push(RootAsOperand(index));
}
-void TurboAssembler::CompareRoot(Register with, RootIndex index) {
+void MacroAssembler::CompareRoot(Register with, RootIndex index) {
+ if (CanBeImmediate(index)) {
+ cmp_tagged(with, Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
+ return;
+ }
DCHECK(root_array_available_);
if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
RootIndex::kLastStrongOrReadOnlyRoot)) {
@@ -184,7 +206,11 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
}
}
-void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
+void MacroAssembler::CompareRoot(Operand with, RootIndex index) {
+ if (CanBeImmediate(index)) {
+ cmp_tagged(with, Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
+ return;
+ }
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
@@ -198,34 +224,39 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
}
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
- LoadTaggedPointerField(destination,
- FieldOperand(object, HeapObject::kMapOffset));
+void MacroAssembler::LoadCompressedMap(Register destination, Register object) {
+ CHECK(COMPRESS_POINTERS_BOOL);
+ mov_tagged(destination, FieldOperand(object, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::LoadMap(Register destination, Register object) {
+ LoadTaggedField(destination, FieldOperand(object, HeapObject::kMapOffset));
#ifdef V8_MAP_PACKING
UnpackMapWord(destination);
#endif
}
-void TurboAssembler::LoadTaggedPointerField(Register destination,
- Operand field_operand) {
+void MacroAssembler::LoadTaggedField(Register destination,
+ Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
mov_tagged(destination, field_operand);
}
}
-void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination,
- Operand field_operand) {
- if (COMPRESS_POINTERS_BOOL) {
- movl(destination.reg(), field_operand);
- } else {
- mov_tagged(destination.reg(), field_operand);
- }
+void MacroAssembler::LoadTaggedField(TaggedRegister destination,
+ Operand field_operand) {
+ LoadTaggedFieldWithoutDecompressing(destination.reg(), field_operand);
+}
+
+void MacroAssembler::LoadTaggedFieldWithoutDecompressing(
+ Register destination, Operand field_operand) {
+ mov_tagged(destination, field_operand);
}
#ifdef V8_MAP_PACKING
-void TurboAssembler::UnpackMapWord(Register r) {
+void MacroAssembler::UnpackMapWord(Register r) {
// Clear the top two bytes (which may include metadata). Must be in sync with
// MapWord::Unpack, and vice versa.
shlq(r, Immediate(16));
@@ -234,7 +265,7 @@ void TurboAssembler::UnpackMapWord(Register r) {
}
#endif
-void TurboAssembler::LoadTaggedSignedField(Register destination,
+void MacroAssembler::LoadTaggedSignedField(Register destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedSigned(destination, field_operand);
@@ -243,55 +274,25 @@ void TurboAssembler::LoadTaggedSignedField(Register destination,
}
}
-void TurboAssembler::LoadAnyTaggedField(Register destination,
- Operand field_operand) {
- if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
- } else {
- mov_tagged(destination, field_operand);
- }
-}
-
-void TurboAssembler::LoadAnyTaggedField(TaggedRegister destination,
- Operand field_operand) {
- if (COMPRESS_POINTERS_BOOL) {
- movl(destination.reg(), field_operand);
- } else {
- mov_tagged(destination.reg(), field_operand);
- }
-}
-
-void TurboAssembler::PushTaggedPointerField(Operand field_operand,
- Register scratch) {
- if (COMPRESS_POINTERS_BOOL) {
- DCHECK(!field_operand.AddressUsesRegister(scratch));
- DecompressTaggedPointer(scratch, field_operand);
- Push(scratch);
- } else {
- Push(field_operand);
- }
-}
-
-void TurboAssembler::PushTaggedAnyField(Operand field_operand,
- Register scratch) {
+void MacroAssembler::PushTaggedField(Operand field_operand, Register scratch) {
if (COMPRESS_POINTERS_BOOL) {
DCHECK(!field_operand.AddressUsesRegister(scratch));
- DecompressAnyTagged(scratch, field_operand);
+ DecompressTagged(scratch, field_operand);
Push(scratch);
} else {
Push(field_operand);
}
}
-void TurboAssembler::SmiUntagField(Register dst, Operand src) {
+void MacroAssembler::SmiUntagField(Register dst, Operand src) {
SmiUntag(dst, src);
}
-void TurboAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) {
+void MacroAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) {
SmiUntagUnsigned(dst, src);
}
-void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
+void MacroAssembler::StoreTaggedField(Operand dst_field_operand,
Immediate value) {
if (COMPRESS_POINTERS_BOOL) {
movl(dst_field_operand, value);
@@ -300,7 +301,7 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
}
}
-void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
+void MacroAssembler::StoreTaggedField(Operand dst_field_operand,
Register value) {
if (COMPRESS_POINTERS_BOOL) {
movl(dst_field_operand, value);
@@ -309,7 +310,7 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
}
}
-void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
+void MacroAssembler::StoreTaggedSignedField(Operand dst_field_operand,
Smi value) {
if (SmiValuesAre32Bits()) {
Move(kScratchRegister, value);
@@ -319,7 +320,7 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
}
}
-void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
+void MacroAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
Register value) {
if (COMPRESS_POINTERS_BOOL) {
movl(kScratchRegister, value);
@@ -330,31 +331,30 @@ void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
}
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
ASM_CODE_COMMENT(this);
movl(destination, field_operand);
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- Operand field_operand) {
+void MacroAssembler::DecompressTagged(Register destination,
+ Operand field_operand) {
ASM_CODE_COMMENT(this);
movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister);
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- Register source) {
+void MacroAssembler::DecompressTagged(Register destination, Register source) {
ASM_CODE_COMMENT(this);
movl(destination, source);
addq(destination, kPtrComprCageBaseRegister);
}
-void TurboAssembler::DecompressAnyTagged(Register destination,
- Operand field_operand) {
+void MacroAssembler::DecompressTagged(Register destination,
+ Tagged_t immediate) {
ASM_CODE_COMMENT(this);
- movl(destination, field_operand);
- addq(destination, kPtrComprCageBaseRegister);
+ leaq(destination,
+ Operand(kPtrComprCageBaseRegister, static_cast<int32_t>(immediate)));
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -399,7 +399,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::EncodeSandboxedPointer(Register value) {
+void MacroAssembler::EncodeSandboxedPointer(Register value) {
ASM_CODE_COMMENT(this);
#ifdef V8_ENABLE_SANDBOX
subq(value, kPtrComprCageBaseRegister);
@@ -409,7 +409,7 @@ void TurboAssembler::EncodeSandboxedPointer(Register value) {
#endif
}
-void TurboAssembler::DecodeSandboxedPointer(Register value) {
+void MacroAssembler::DecodeSandboxedPointer(Register value) {
ASM_CODE_COMMENT(this);
#ifdef V8_ENABLE_SANDBOX
shrq(value, Immediate(kSandboxedPointerShift));
@@ -419,14 +419,14 @@ void TurboAssembler::DecodeSandboxedPointer(Register value) {
#endif
}
-void TurboAssembler::LoadSandboxedPointerField(Register destination,
+void MacroAssembler::LoadSandboxedPointerField(Register destination,
Operand field_operand) {
ASM_CODE_COMMENT(this);
movq(destination, field_operand);
DecodeSandboxedPointer(destination);
}
-void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand,
+void MacroAssembler::StoreSandboxedPointerField(Operand dst_field_operand,
Register value) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(value, kScratchRegister));
@@ -436,38 +436,36 @@ void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand,
movq(dst_field_operand, kScratchRegister);
}
-void TurboAssembler::LoadExternalPointerField(
+void MacroAssembler::LoadExternalPointerField(
Register destination, Operand field_operand, ExternalPointerTag tag,
Register scratch, IsolateRootLocation isolateRootLocation) {
DCHECK(!AreAliased(destination, scratch));
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- DCHECK_NE(kExternalPointerNullTag, tag);
- DCHECK(!IsSharedExternalPointerType(tag));
- DCHECK(!field_operand.AddressUsesRegister(scratch));
- if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
- DCHECK(root_array_available_);
- movq(scratch, Operand(kRootRegister,
- IsolateData::external_pointer_table_offset() +
- Internals::kExternalPointerTableBufferOffset));
- } else {
- DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
- movq(scratch,
- Operand(scratch, IsolateData::external_pointer_table_offset() +
- Internals::kExternalPointerTableBufferOffset));
- }
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ DCHECK(!IsSharedExternalPointerType(tag));
+ DCHECK(!field_operand.AddressUsesRegister(scratch));
+ if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
+ DCHECK(root_array_available_);
+ movq(scratch, Operand(kRootRegister,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ } else {
+ DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
+ movq(scratch,
+ Operand(scratch, IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ }
movl(destination, field_operand);
shrq(destination, Immediate(kExternalPointerIndexShift));
movq(destination, Operand(scratch, destination, times_8, 0));
movq(scratch, Immediate64(~tag));
andq(destination, scratch);
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+#else
movq(destination, field_operand);
+#endif // V8_ENABLE_SANDBOX
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
@@ -482,12 +480,12 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MovePair(slot_address_parameter, slot_address, object_parameter, object);
Call(isolate()->builtins()->code_handle(
- Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
- RelocInfo::CODE_TARGET);
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
+ RelocInfo::CODE_TARGET);
PopAll(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -505,7 +503,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
PopAll(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
@@ -528,7 +526,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
}
#ifdef V8_IS_TSAN
-void TurboAssembler::CallTSANStoreStub(Register address, Register value,
+void MacroAssembler::CallTSANStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode,
std::memory_order order) {
@@ -549,7 +547,7 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value,
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
- Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -574,7 +572,7 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value,
PopAll(registers);
}
-void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
+void MacroAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
TSANLoadDescriptor descriptor;
@@ -590,7 +588,7 @@ void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
if (isolate()) {
Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
- Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -647,8 +645,8 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- zero, &done, Label::kNear);
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
@@ -668,7 +666,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
}
-void TurboAssembler::Check(Condition cc, AbortReason reason) {
+void MacroAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L, Label::kNear);
Abort(reason);
@@ -676,7 +674,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
bind(&L);
}
-void TurboAssembler::CheckStackAlignment() {
+void MacroAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kSystemPointerSize) {
@@ -691,7 +689,7 @@ void TurboAssembler::CheckStackAlignment() {
}
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
ASM_CODE_COMMENT(this);
if (v8_flags.code_comments) {
const char* msg = GetAbortReason(reason);
@@ -736,8 +734,8 @@ void TurboAssembler::Abort(AbortReason reason) {
int3();
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
@@ -750,8 +748,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
Move(rax, num_arguments);
LoadAddress(rbx, ExternalReference::Create(f));
- Handle<CodeT> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -779,9 +776,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- Handle<CodeT> code =
- CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -810,8 +806,8 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ AssertCodeT(optimized_code_entry);
- __ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1);
+ __ AssertCode(optimized_code_entry);
+ __ TestCodeIsMarkedForDeoptimization(optimized_code_entry);
__ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
@@ -820,7 +816,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
- __ JumpCodeTObject(rcx, jump_mode);
+ __ JumpCodeObject(rcx, jump_mode);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
@@ -834,7 +830,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
#ifdef V8_ENABLE_DEBUG_CODE
void MacroAssembler::AssertFeedbackVector(Register object) {
if (v8_flags.debug_code) {
- CmpObjectType(object, FEEDBACK_VECTOR_TYPE, kScratchRegister);
+ IsObjectType(object, FEEDBACK_VECTOR_TYPE, kScratchRegister);
Assert(equal, AbortReason::kExpectedFeedbackVector);
}
}
@@ -869,7 +865,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
Pop(kJavaScriptCallTargetRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- JumpCodeTObject(rcx, jump_mode);
+ JumpCodeObject(rcx, jump_mode);
}
void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
@@ -879,7 +875,7 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
DCHECK_EQ(closure, kJSFunctionRegister);
// Store the optimized code in the closure.
- AssertCodeT(optimized_code);
+ AssertCode(optimized_code);
StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
// Write barrier clobbers scratch1 below.
@@ -927,7 +923,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- LoadAnyTaggedField(
+ LoadTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9,
@@ -935,7 +931,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
jump_mode);
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
RegList saved_regs = kCallerSaved - exclusion;
@@ -949,7 +945,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
ASM_CODE_COMMENT(this);
int bytes = 0;
@@ -961,7 +957,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
@@ -972,7 +968,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
return bytes;
}
-int TurboAssembler::PushAll(RegList registers) {
+int MacroAssembler::PushAll(RegList registers) {
int bytes = 0;
for (Register reg : registers) {
pushq(reg);
@@ -981,7 +977,7 @@ int TurboAssembler::PushAll(RegList registers) {
return bytes;
}
-int TurboAssembler::PopAll(RegList registers) {
+int MacroAssembler::PopAll(RegList registers) {
int bytes = 0;
for (Register reg : base::Reversed(registers)) {
popq(reg);
@@ -990,7 +986,7 @@ int TurboAssembler::PopAll(RegList registers) {
return bytes;
}
-int TurboAssembler::PushAll(DoubleRegList registers, int stack_slot_size) {
+int MacroAssembler::PushAll(DoubleRegList registers, int stack_slot_size) {
if (registers.is_empty()) return 0;
const int delta = stack_slot_size * registers.Count();
AllocateStackSpace(delta);
@@ -1008,7 +1004,7 @@ int TurboAssembler::PushAll(DoubleRegList registers, int stack_slot_size) {
return delta;
}
-int TurboAssembler::PopAll(DoubleRegList registers, int stack_slot_size) {
+int MacroAssembler::PopAll(DoubleRegList registers, int stack_slot_size) {
if (registers.is_empty()) return 0;
int slot = 0;
for (XMMRegister reg : registers) {
@@ -1025,7 +1021,7 @@ int TurboAssembler::PopAll(DoubleRegList registers, int stack_slot_size) {
return slot;
}
-void TurboAssembler::Movq(XMMRegister dst, Register src) {
+void MacroAssembler::Movq(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vmovq(dst, src);
@@ -1034,7 +1030,7 @@ void TurboAssembler::Movq(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Movq(Register dst, XMMRegister src) {
+void MacroAssembler::Movq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vmovq(dst, src);
@@ -1043,7 +1039,7 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
+void MacroAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpextrq(dst, src, imm8);
@@ -1053,7 +1049,7 @@ void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
}
}
-void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
+void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtss2sd(dst, src, src);
@@ -1062,7 +1058,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtss2sd(dst, dst, src);
@@ -1071,7 +1067,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
}
}
-void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
+void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtsd2ss(dst, src, src);
@@ -1080,7 +1076,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtsd2ss(dst, dst, src);
@@ -1089,7 +1085,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
}
}
-void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtlsi2sd(dst, kScratchDoubleReg, src);
@@ -1099,7 +1095,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtlsi2sd(dst, kScratchDoubleReg, src);
@@ -1109,7 +1105,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
}
}
-void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtlsi2ss(dst, kScratchDoubleReg, src);
@@ -1119,7 +1115,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtlsi2ss(dst, kScratchDoubleReg, src);
@@ -1129,7 +1125,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
}
}
-void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtqsi2ss(dst, kScratchDoubleReg, src);
@@ -1139,7 +1135,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtqsi2ss(dst, kScratchDoubleReg, src);
@@ -1149,7 +1145,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
}
}
-void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtqsi2sd(dst, kScratchDoubleReg, src);
@@ -1159,7 +1155,7 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtqsi2sd(dst, kScratchDoubleReg, src);
@@ -1169,31 +1165,31 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
}
}
-void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
// Zero-extend the 32 bit value to 64 bit.
movl(kScratchRegister, src);
Cvtqsi2ss(dst, kScratchRegister);
}
-void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
// Zero-extend the 32 bit value to 64 bit.
movl(kScratchRegister, src);
Cvtqsi2ss(dst, kScratchRegister);
}
-void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
// Zero-extend the 32 bit value to 64 bit.
movl(kScratchRegister, src);
Cvtqsi2sd(dst, kScratchRegister);
}
-void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
// Zero-extend the 32 bit value to 64 bit.
movl(kScratchRegister, src);
Cvtqsi2sd(dst, kScratchRegister);
}
-void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
Label done;
Cvtqsi2ss(dst, src);
testq(src, src);
@@ -1212,12 +1208,12 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
bind(&done);
}
-void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
movq(kScratchRegister, src);
Cvtqui2ss(dst, kScratchRegister);
}
-void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
+void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
Label done;
Cvtqsi2sd(dst, src);
testq(src, src);
@@ -1236,12 +1232,12 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
bind(&done);
}
-void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
+void MacroAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
movq(kScratchRegister, src);
Cvtqui2sd(dst, kScratchRegister);
}
-void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
+void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2si(dst, src);
@@ -1250,7 +1246,7 @@ void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttss2si(Register dst, Operand src) {
+void MacroAssembler::Cvttss2si(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2si(dst, src);
@@ -1259,7 +1255,7 @@ void TurboAssembler::Cvttss2si(Register dst, Operand src) {
}
}
-void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
+void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2si(dst, src);
@@ -1268,7 +1264,7 @@ void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
+void MacroAssembler::Cvttsd2si(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2si(dst, src);
@@ -1277,7 +1273,7 @@ void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
}
}
-void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
+void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
@@ -1286,7 +1282,7 @@ void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
+void MacroAssembler::Cvttss2siq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
@@ -1295,7 +1291,7 @@ void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
}
}
-void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
+void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2siq(dst, src);
@@ -1304,7 +1300,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
+void MacroAssembler::Cvttsd2siq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2siq(dst, src);
@@ -1315,115 +1311,115 @@ void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
namespace {
template <typename OperandOrXMMRegister, bool is_double>
-void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
+void ConvertFloatToUint64(MacroAssembler* masm, Register dst,
OperandOrXMMRegister src, Label* fail) {
Label success;
// There does not exist a native float-to-uint instruction, so we have to use
// a float-to-int, and postprocess the result.
if (is_double) {
- tasm->Cvttsd2siq(dst, src);
+ masm->Cvttsd2siq(dst, src);
} else {
- tasm->Cvttss2siq(dst, src);
+ masm->Cvttss2siq(dst, src);
}
// If the result of the conversion is positive, we are already done.
- tasm->testq(dst, dst);
- tasm->j(positive, &success);
+ masm->testq(dst, dst);
+ masm->j(positive, &success);
// The result of the first conversion was negative, which means that the
// input value was not within the positive int64 range. We subtract 2^63
// and convert it again to see if it is within the uint64 range.
if (is_double) {
- tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
- tasm->Addsd(kScratchDoubleReg, src);
- tasm->Cvttsd2siq(dst, kScratchDoubleReg);
+ masm->Move(kScratchDoubleReg, -9223372036854775808.0);
+ masm->Addsd(kScratchDoubleReg, src);
+ masm->Cvttsd2siq(dst, kScratchDoubleReg);
} else {
- tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
- tasm->Addss(kScratchDoubleReg, src);
- tasm->Cvttss2siq(dst, kScratchDoubleReg);
+ masm->Move(kScratchDoubleReg, -9223372036854775808.0f);
+ masm->Addss(kScratchDoubleReg, src);
+ masm->Cvttss2siq(dst, kScratchDoubleReg);
}
- tasm->testq(dst, dst);
+ masm->testq(dst, dst);
// The only possible negative value here is 0x8000000000000000, which is
// used on x64 to indicate an integer overflow.
- tasm->j(negative, fail ? fail : &success);
+ masm->j(negative, fail ? fail : &success);
// The input value is within uint64 range and the second conversion worked
// successfully, but we still have to undo the subtraction we did
// earlier.
- tasm->Move(kScratchRegister, 0x8000000000000000);
- tasm->orq(dst, kScratchRegister);
- tasm->bind(&success);
+ masm->Move(kScratchRegister, 0x8000000000000000);
+ masm->orq(dst, kScratchRegister);
+ masm->bind(&success);
}
template <typename OperandOrXMMRegister, bool is_double>
-void ConvertFloatToUint32(TurboAssembler* tasm, Register dst,
+void ConvertFloatToUint32(MacroAssembler* masm, Register dst,
OperandOrXMMRegister src, Label* fail) {
Label success;
// There does not exist a native float-to-uint instruction, so we have to use
// a float-to-int, and postprocess the result.
if (is_double) {
- tasm->Cvttsd2si(dst, src);
+ masm->Cvttsd2si(dst, src);
} else {
- tasm->Cvttss2si(dst, src);
+ masm->Cvttss2si(dst, src);
}
// If the result of the conversion is positive, we are already done.
- tasm->testl(dst, dst);
- tasm->j(positive, &success);
+ masm->testl(dst, dst);
+ masm->j(positive, &success);
// The result of the first conversion was negative, which means that the
// input value was not within the positive int32 range. We subtract 2^31
// and convert it again to see if it is within the uint32 range.
if (is_double) {
- tasm->Move(kScratchDoubleReg, -2147483648.0);
- tasm->Addsd(kScratchDoubleReg, src);
- tasm->Cvttsd2si(dst, kScratchDoubleReg);
+ masm->Move(kScratchDoubleReg, -2147483648.0);
+ masm->Addsd(kScratchDoubleReg, src);
+ masm->Cvttsd2si(dst, kScratchDoubleReg);
} else {
- tasm->Move(kScratchDoubleReg, -2147483648.0f);
- tasm->Addss(kScratchDoubleReg, src);
- tasm->Cvttss2si(dst, kScratchDoubleReg);
+ masm->Move(kScratchDoubleReg, -2147483648.0f);
+ masm->Addss(kScratchDoubleReg, src);
+ masm->Cvttss2si(dst, kScratchDoubleReg);
}
- tasm->testl(dst, dst);
+ masm->testl(dst, dst);
// The only possible negative value here is 0x80000000, which is
// used on x64 to indicate an integer overflow.
- tasm->j(negative, fail ? fail : &success);
+ masm->j(negative, fail ? fail : &success);
// The input value is within uint32 range and the second conversion worked
// successfully, but we still have to undo the subtraction we did
// earlier.
- tasm->Move(kScratchRegister, 0x80000000);
- tasm->orl(dst, kScratchRegister);
- tasm->bind(&success);
+ masm->Move(kScratchRegister, 0x80000000);
+ masm->orl(dst, kScratchRegister);
+ masm->bind(&success);
}
} // namespace
-void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) {
+void MacroAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) {
ConvertFloatToUint64<Operand, true>(this, dst, src, fail);
}
-void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) {
+void MacroAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, true>(this, dst, src, fail);
}
-void TurboAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) {
+void MacroAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) {
ConvertFloatToUint32<Operand, true>(this, dst, src, fail);
}
-void TurboAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) {
+void MacroAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint32<XMMRegister, true>(this, dst, src, fail);
}
-void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) {
+void MacroAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) {
ConvertFloatToUint64<Operand, false>(this, dst, src, fail);
}
-void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
+void MacroAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
-void TurboAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) {
+void MacroAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) {
ConvertFloatToUint32<Operand, false>(this, dst, src, fail);
}
-void TurboAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) {
+void MacroAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint32<XMMRegister, false>(this, dst, src, fail);
}
-void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
+void MacroAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vcmpeqss(dst, src);
@@ -1432,7 +1428,7 @@ void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) {
+void MacroAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vcmpeqsd(dst, src);
@@ -1444,12 +1440,12 @@ void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) {
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
-Register TurboAssembler::GetSmiConstant(Smi source) {
+Register MacroAssembler::GetSmiConstant(Smi source) {
Move(kScratchRegister, source);
return kScratchRegister;
}
-void TurboAssembler::Cmp(Register dst, int32_t src) {
+void MacroAssembler::Cmp(Register dst, int32_t src) {
if (src == 0) {
testl(dst, dst);
} else {
@@ -1457,7 +1453,7 @@ void TurboAssembler::Cmp(Register dst, int32_t src) {
}
}
-void TurboAssembler::SmiTag(Register reg) {
+void MacroAssembler::SmiTag(Register reg) {
static_assert(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
@@ -1468,7 +1464,7 @@ void TurboAssembler::SmiTag(Register reg) {
}
}
-void TurboAssembler::SmiTag(Register dst, Register src) {
+void MacroAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst != src);
if (COMPRESS_POINTERS_BOOL) {
movl(dst, src);
@@ -1478,7 +1474,7 @@ void TurboAssembler::SmiTag(Register dst, Register src) {
SmiTag(dst);
}
-void TurboAssembler::SmiUntag(Register reg) {
+void MacroAssembler::SmiUntag(Register reg) {
static_assert(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
// TODO(v8:7703): Is there a way to avoid this sign extension when pointer
@@ -1489,7 +1485,7 @@ void TurboAssembler::SmiUntag(Register reg) {
sarq(reg, Immediate(kSmiShift));
}
-void TurboAssembler::SmiUntagUnsigned(Register reg) {
+void MacroAssembler::SmiUntagUnsigned(Register reg) {
static_assert(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
@@ -1500,7 +1496,7 @@ void TurboAssembler::SmiUntagUnsigned(Register reg) {
}
}
-void TurboAssembler::SmiUntag(Register dst, Register src) {
+void MacroAssembler::SmiUntag(Register dst, Register src) {
DCHECK(dst != src);
if (COMPRESS_POINTERS_BOOL) {
movsxlq(dst, src);
@@ -1514,7 +1510,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
sarq(dst, Immediate(kSmiShift));
}
-void TurboAssembler::SmiUntag(Register dst, Operand src) {
+void MacroAssembler::SmiUntag(Register dst, Operand src) {
if (SmiValuesAre32Bits()) {
// Sign extend to 64-bit.
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
@@ -1529,7 +1525,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
}
}
-void TurboAssembler::SmiUntagUnsigned(Register dst, Operand src) {
+void MacroAssembler::SmiUntagUnsigned(Register dst, Operand src) {
if (SmiValuesAre32Bits()) {
// Zero extend to 64-bit.
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
@@ -1546,7 +1542,7 @@ void TurboAssembler::SmiUntagUnsigned(Register dst, Operand src) {
}
}
-void TurboAssembler::SmiToInt32(Register reg) {
+void MacroAssembler::SmiToInt32(Register reg) {
static_assert(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
@@ -1556,18 +1552,24 @@ void TurboAssembler::SmiToInt32(Register reg) {
}
}
-void TurboAssembler::SmiCompare(Register smi1, Register smi2) {
+void MacroAssembler::SmiToInt32(Register dst, Register src) {
+ DCHECK(dst != src);
+ mov_tagged(dst, src);
+ SmiToInt32(dst);
+}
+
+void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
cmp_tagged(smi1, smi2);
}
-void TurboAssembler::SmiCompare(Register dst, Smi src) {
+void MacroAssembler::SmiCompare(Register dst, Smi src) {
AssertSmi(dst);
Cmp(dst, src);
}
-void TurboAssembler::Cmp(Register dst, Smi src) {
+void MacroAssembler::Cmp(Register dst, Smi src) {
if (src.value() == 0) {
test_tagged(dst, dst);
} else if (COMPRESS_POINTERS_BOOL) {
@@ -1579,19 +1581,19 @@ void TurboAssembler::Cmp(Register dst, Smi src) {
}
}
-void TurboAssembler::SmiCompare(Register dst, Operand src) {
+void MacroAssembler::SmiCompare(Register dst, Operand src) {
AssertSmi(dst);
AssertSmi(src);
cmp_tagged(dst, src);
}
-void TurboAssembler::SmiCompare(Operand dst, Register src) {
+void MacroAssembler::SmiCompare(Operand dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
cmp_tagged(dst, src);
}
-void TurboAssembler::SmiCompare(Operand dst, Smi src) {
+void MacroAssembler::SmiCompare(Operand dst, Smi src) {
AssertSmi(dst);
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value()));
@@ -1601,44 +1603,44 @@ void TurboAssembler::SmiCompare(Operand dst, Smi src) {
}
}
-void TurboAssembler::Cmp(Operand dst, Smi src) {
+void MacroAssembler::Cmp(Operand dst, Smi src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
DCHECK(!dst.AddressUsesRegister(smi_reg));
cmp_tagged(dst, smi_reg);
}
-Condition TurboAssembler::CheckSmi(Register src) {
+Condition MacroAssembler::CheckSmi(Register src) {
static_assert(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
return zero;
}
-Condition TurboAssembler::CheckSmi(Operand src) {
+Condition MacroAssembler::CheckSmi(Operand src) {
static_assert(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
return zero;
}
-void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(smi, on_smi, near_jump);
}
-void TurboAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void TurboAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
+void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) {
+void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
if (constant.value() != 0) {
if (SmiValuesAre32Bits()) {
addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value()));
@@ -1658,7 +1660,7 @@ void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) {
}
}
-SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) {
+SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
if (SmiValuesAre32Bits()) {
DCHECK(is_uint6(shift));
// There is a possible optimization if shift is in the range 60-63, but that
@@ -1689,7 +1691,7 @@ SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) {
}
}
-void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base,
+void MacroAssembler::Switch(Register scratch, Register reg, int case_value_base,
Label** labels, int num_labels) {
Register table = scratch;
Label fallthrough, jump_table;
@@ -1709,7 +1711,7 @@ void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base,
bind(&fallthrough);
}
-void TurboAssembler::Push(Smi source) {
+void MacroAssembler::Push(Smi source) {
intptr_t smi = static_cast<intptr_t>(source.ptr());
if (is_int32(smi)) {
Push(Immediate(static_cast<int32_t>(smi)));
@@ -1730,7 +1732,7 @@ void TurboAssembler::Push(Smi source) {
// ----------------------------------------------------------------------------
-void TurboAssembler::Move(Register dst, Smi source) {
+void MacroAssembler::Move(Register dst, Smi source) {
static_assert(kSmiTag == 0);
int value = source.value();
if (value == 0) {
@@ -1743,7 +1745,7 @@ void TurboAssembler::Move(Register dst, Smi source) {
}
}
-void TurboAssembler::Move(Operand dst, intptr_t x) {
+void MacroAssembler::Move(Operand dst, intptr_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
@@ -1752,7 +1754,7 @@ void TurboAssembler::Move(Operand dst, intptr_t x) {
}
}
-void TurboAssembler::Move(Register dst, ExternalReference ext) {
+void MacroAssembler::Move(Register dst, ExternalReference ext) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -1763,14 +1765,14 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
-void TurboAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Register src) {
if (dst != src) {
movq(dst, src);
}
}
-void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
-void TurboAssembler::Move(Register dst, Immediate src) {
+void MacroAssembler::Move(Register dst, Operand src) { movq(dst, src); }
+void MacroAssembler::Move(Register dst, Immediate src) {
if (src.rmode() == RelocInfo::Mode::NO_INFO) {
Move(dst, src.value());
} else {
@@ -1778,13 +1780,13 @@ void TurboAssembler::Move(Register dst, Immediate src) {
}
}
-void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
+void MacroAssembler::Move(XMMRegister dst, XMMRegister src) {
if (dst != src) {
Movaps(dst, src);
}
}
-void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
+void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1,
Register src1) {
if (dst0 != src1) {
// Normal case: Writing to dst0 does not destroy src1.
@@ -1803,7 +1805,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
}
}
-void TurboAssembler::MoveNumber(Register dst, double value) {
+void MacroAssembler::MoveNumber(Register dst, double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) {
Move(dst, Smi::FromInt(smi));
@@ -1812,7 +1814,7 @@ void TurboAssembler::MoveNumber(Register dst, double value) {
}
}
-void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
+void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
Xorps(dst, dst);
} else {
@@ -1831,7 +1833,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
}
}
-void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
+void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
Xorpd(dst, dst);
} else {
@@ -1856,7 +1858,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
+void MacroAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
if (high == low) {
Move(dst, low);
Punpcklqdq(dst, dst);
@@ -1937,12 +1939,12 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
j(below_equal, on_in_range, near_jump);
}
-void TurboAssembler::Push(Handle<HeapObject> source) {
+void MacroAssembler::Push(Handle<HeapObject> source) {
Move(kScratchRegister, source);
Push(kScratchRegister);
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) {
DCHECK(!AreAliased(array, size, scratch));
Register counter = scratch;
@@ -1967,7 +1969,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void TurboAssembler::Move(Register result, Handle<HeapObject> object,
+void MacroAssembler::Move(Register result, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
@@ -1986,7 +1988,7 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
}
}
-void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
+void MacroAssembler::Move(Operand dst, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
Move(kScratchRegister, object, rmode);
movq(dst, kScratchRegister);
@@ -2011,7 +2013,7 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@@ -2036,7 +2038,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArguments(Register count, Register scratch,
+void MacroAssembler::DropArguments(Register count, Register scratch,
ArgumentsCountType type,
ArgumentsCountMode mode) {
DCHECK(!AreAliased(count, scratch));
@@ -2045,7 +2047,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
Register scratch,
ArgumentsCountType type,
@@ -2057,7 +2059,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Operand receiver,
Register scratch,
ArgumentsCountType type,
@@ -2070,13 +2072,13 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::Push(Register src) { pushq(src); }
+void MacroAssembler::Push(Register src) { pushq(src); }
-void TurboAssembler::Push(Operand src) { pushq(src); }
+void MacroAssembler::Push(Operand src) { pushq(src); }
void MacroAssembler::PushQuad(Operand src) { pushq(src); }
-void TurboAssembler::Push(Immediate value) { pushq(value); }
+void MacroAssembler::Push(Immediate value) { pushq(value); }
void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
@@ -2086,27 +2088,27 @@ void MacroAssembler::Pop(Operand dst) { popq(dst); }
void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
DCHECK(root_array_available());
jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
isolate(), reference)));
}
-void TurboAssembler::Jump(Operand op) { jmp(op); }
+void MacroAssembler::Jump(Operand op) { jmp(op); }
-void TurboAssembler::Jump(Operand op, Condition cc) {
+void MacroAssembler::Jump(Operand op, Condition cc) {
Label skip;
j(NegateCondition(cc), &skip, Label::kNear);
Jump(op);
bind(&skip);
}
-void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
+void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
-void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode,
Condition cc) {
Label skip;
j(NegateCondition(cc), &skip, Label::kNear);
@@ -2114,7 +2116,7 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode,
bind(&skip);
}
-void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@@ -2126,7 +2128,7 @@ void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
@@ -2139,17 +2141,12 @@ void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
j(cc, code_object, rmode);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
- jmp(kOffHeapTrampolineRegister);
-}
-
-void TurboAssembler::Call(ExternalReference ext) {
+void MacroAssembler::Call(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
}
-void TurboAssembler::Call(Operand op) {
+void MacroAssembler::Call(Operand op) {
if (!CpuFeatures::IsSupported(INTEL_ATOM)) {
call(op);
} else {
@@ -2158,12 +2155,12 @@ void TurboAssembler::Call(Operand op) {
}
}
-void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
}
-void TurboAssembler::Call(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId;
@@ -2175,12 +2172,12 @@ void TurboAssembler::Call(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
}
-Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
+Operand MacroAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
if (SmiValuesAre32Bits()) {
// The builtin_index register contains the builtin index as a Smi.
SmiUntagUnsigned(builtin_index);
@@ -2197,11 +2194,11 @@ Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
}
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(EntryFromBuiltinIndexAsOperand(builtin_index));
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute:
@@ -2214,14 +2211,14 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Call(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) {
@@ -2235,14 +2232,14 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
Jump(EntryFromBuiltinAsOperand(builtin));
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jmp(code, RelocInfo::CODE_TARGET);
break;
}
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cc) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) {
@@ -2256,70 +2253,25 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) {
Jump(EntryFromBuiltinAsOperand(builtin), cc);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
j(cc, code, RelocInfo::CODE_TARGET);
break;
}
}
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- movq(destination,
- FieldOperand(code_object, CodeDataContainer::kCodeEntryPointOffset));
- return;
- }
-
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- testl(FieldOperand(code_object, Code::kFlagsOffset),
- Immediate(Code::IsOffHeapTrampoline::kMask));
- j(not_equal, &if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- Move(destination, code_object);
- addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
- jmp(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
- movq(destination,
- Operand(kRootRegister, destination, times_system_pointer_size,
- IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- Move(destination, code_object);
- addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
- }
+ movq(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::CallCodeObject(Register code_object) {
+ LoadCodeEntry(code_object, code_object);
call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ LoadCodeEntry(code_object, code_object);
switch (jump_mode) {
case JumpMode::kJump:
jmp(code_object);
@@ -2331,82 +2283,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
}
}
-void TurboAssembler::LoadCodeDataContainerEntry(
- Register destination, Register code_data_container_object) {
- ASM_CODE_COMMENT(this);
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- movq(destination, FieldOperand(code_data_container_object,
- CodeDataContainer::kCodeEntryPointOffset));
-}
-
-void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
- Register destination, Register code_data_container_object) {
- ASM_CODE_COMMENT(this);
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Compute the Code object pointer from the code entry point.
- movq(destination, FieldOperand(code_data_container_object,
- CodeDataContainer::kCodeEntryPointOffset));
- subq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
-}
-
-void TurboAssembler::CallCodeDataContainerObject(
- Register code_data_container_object) {
- LoadCodeDataContainerEntry(code_data_container_object,
- code_data_container_object);
- call(code_data_container_object);
-}
-
-void TurboAssembler::JumpCodeDataContainerObject(
- Register code_data_container_object, JumpMode jump_mode) {
- LoadCodeDataContainerEntry(code_data_container_object,
- code_data_container_object);
- switch (jump_mode) {
- case JumpMode::kJump:
- jmp(code_data_container_object);
- return;
- case JumpMode::kPushAndReturn:
- pushq(code_data_container_object);
- Ret();
- return;
- }
-}
-
-void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
- ASM_CODE_COMMENT(this);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- LoadCodeDataContainerEntry(destination, code);
- } else {
- leaq(destination, Operand(code, Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-void TurboAssembler::CallCodeTObject(Register code) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CallCodeDataContainerObject(code);
- } else {
- CallCodeObject(code);
- }
-}
-
-void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- JumpCodeDataContainerObject(code, jump_mode);
- } else {
- JumpCodeObject(code, jump_mode);
- }
-}
-
-void TurboAssembler::CodeDataContainerFromCodeT(Register destination,
- Register codet) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- Move(destination, codet);
- } else {
- LoadTaggedPointerField(destination,
- FieldOperand(codet, Code::kCodeDataContainerOffset));
- }
-}
-
-void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
+void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src,
uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2419,42 +2296,42 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
namespace {
template <typename Op>
-void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src,
+void PinsrdPreSse41Helper(MacroAssembler* masm, XMMRegister dst, Op src,
uint8_t imm8, uint32_t* load_pc_offset) {
- tasm->Movd(kScratchDoubleReg, src);
- if (load_pc_offset) *load_pc_offset = tasm->pc_offset();
+ masm->Movd(kScratchDoubleReg, src);
+ if (load_pc_offset) *load_pc_offset = masm->pc_offset();
if (imm8 == 1) {
- tasm->punpckldq(dst, kScratchDoubleReg);
+ masm->punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
- tasm->Movss(dst, kScratchDoubleReg);
+ masm->Movss(dst, kScratchDoubleReg);
}
}
} // namespace
-void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
+void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
uint32_t* load_pc_offset) {
PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
}
-void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
uint32_t* load_pc_offset) {
PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
}
-void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
+void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
imm8, load_pc_offset, {SSE4_1});
}
-void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
+void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
imm8, load_pc_offset, {SSE4_1});
}
-void TurboAssembler::Lzcntl(Register dst, Register src) {
+void MacroAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntl(dst, src);
@@ -2468,7 +2345,7 @@ void TurboAssembler::Lzcntl(Register dst, Register src) {
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
-void TurboAssembler::Lzcntl(Register dst, Operand src) {
+void MacroAssembler::Lzcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntl(dst, src);
@@ -2482,7 +2359,7 @@ void TurboAssembler::Lzcntl(Register dst, Operand src) {
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
-void TurboAssembler::Lzcntq(Register dst, Register src) {
+void MacroAssembler::Lzcntq(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntq(dst, src);
@@ -2496,7 +2373,7 @@ void TurboAssembler::Lzcntq(Register dst, Register src) {
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
-void TurboAssembler::Lzcntq(Register dst, Operand src) {
+void MacroAssembler::Lzcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntq(dst, src);
@@ -2510,7 +2387,7 @@ void TurboAssembler::Lzcntq(Register dst, Operand src) {
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
-void TurboAssembler::Tzcntq(Register dst, Register src) {
+void MacroAssembler::Tzcntq(Register dst, Register src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntq(dst, src);
@@ -2524,7 +2401,7 @@ void TurboAssembler::Tzcntq(Register dst, Register src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntq(Register dst, Operand src) {
+void MacroAssembler::Tzcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntq(dst, src);
@@ -2538,7 +2415,7 @@ void TurboAssembler::Tzcntq(Register dst, Operand src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntl(Register dst, Register src) {
+void MacroAssembler::Tzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntl(dst, src);
@@ -2551,7 +2428,7 @@ void TurboAssembler::Tzcntl(Register dst, Register src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntl(Register dst, Operand src) {
+void MacroAssembler::Tzcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntl(dst, src);
@@ -2564,7 +2441,7 @@ void TurboAssembler::Tzcntl(Register dst, Operand src) {
bind(&not_zero_src);
}
-void TurboAssembler::Popcntl(Register dst, Register src) {
+void MacroAssembler::Popcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntl(dst, src);
@@ -2573,7 +2450,7 @@ void TurboAssembler::Popcntl(Register dst, Register src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntl(Register dst, Operand src) {
+void MacroAssembler::Popcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntl(dst, src);
@@ -2582,7 +2459,7 @@ void TurboAssembler::Popcntl(Register dst, Operand src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntq(Register dst, Register src) {
+void MacroAssembler::Popcntq(Register dst, Register src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntq(dst, src);
@@ -2591,7 +2468,7 @@ void TurboAssembler::Popcntq(Register dst, Register src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntq(Register dst, Operand src) {
+void MacroAssembler::Popcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntq(dst, src);
@@ -2624,9 +2501,9 @@ void MacroAssembler::PopStackHandler() {
addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
-void TurboAssembler::Ret() { ret(0); }
+void MacroAssembler::Ret() { ret(0); }
-void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
@@ -2637,7 +2514,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
}
}
-void TurboAssembler::IncsspqIfSupported(Register number_of_words,
+void MacroAssembler::IncsspqIfSupported(Register number_of_words,
Register scratch) {
// Optimized code can validate at runtime whether the cpu supports the
// incsspq instruction, so it shouldn't use this method.
@@ -2654,6 +2531,52 @@ void TurboAssembler::IncsspqIfSupported(Register number_of_words,
bind(&not_supported);
}
+void MacroAssembler::IsObjectType(Register heap_object, InstanceType type,
+ Register map) {
+ if (V8_STATIC_ROOTS_BOOL) {
+ if (base::Optional<RootIndex> expected =
+ InstanceTypeChecker::UniqueMapOfInstanceType(type)) {
+ LoadCompressedMap(map, heap_object);
+ cmp_tagged(map, Immediate(ReadOnlyRootPtr(*expected)));
+ return;
+ }
+ }
+ CmpObjectType(heap_object, type, map);
+}
+
+void MacroAssembler::JumpIfJSAnyIsNotPrimitive(Register heap_object,
+ Register scratch, Label* target,
+ Label::Distance distance,
+ Condition cc) {
+ CHECK(cc == Condition::kUnsignedLessThan ||
+ cc == Condition::kUnsignedGreaterThanEqual);
+ if (V8_STATIC_ROOTS_BOOL) {
+#ifdef DEBUG
+ Label ok;
+ LoadMap(scratch, heap_object);
+ CmpInstanceTypeRange(scratch, scratch, FIRST_JS_RECEIVER_TYPE,
+ LAST_JS_RECEIVER_TYPE);
+ j(Condition::kUnsignedLessThanEqual, &ok, Label::Distance::kNear);
+ LoadMap(scratch, heap_object);
+ CmpInstanceTypeRange(scratch, scratch, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
+ LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
+ j(Condition::kUnsignedLessThanEqual, &ok, Label::Distance::kNear);
+ Abort(AbortReason::kInvalidReceiver);
+ bind(&ok);
+#endif // DEBUG
+
+ // All primitive object's maps are allocated at the start of the read only
+ // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
+ // addresses.
+ LoadCompressedMap(scratch, heap_object);
+ cmp_tagged(scratch, Immediate(InstanceTypeChecker::kNonJsReceiverMapLimit));
+ } else {
+ static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ CmpObjectType(heap_object, FIRST_JS_RECEIVER_TYPE, scratch);
+ }
+ j(cc, target, distance);
+}
+
void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
Register map) {
LoadMap(map, heap_object);
@@ -2673,17 +2596,9 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
CompareRange(instance_type_out, lower_limit, higher_limit);
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
- Register scratch) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- testl(FieldOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- } else {
- LoadTaggedPointerField(scratch,
- FieldOperand(codet, Code::kCodeDataContainerOffset));
- testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- }
+void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code) {
+ testl(FieldOperand(code, Code::kKindSpecificFlagsOffset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
}
Immediate MacroAssembler::ClearedValue() const {
@@ -2692,37 +2607,37 @@ Immediate MacroAssembler::ClearedValue() const {
}
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::AssertNotSmi(Register object) {
+void MacroAssembler::AssertNotSmi(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
-void TurboAssembler::AssertSmi(Register object) {
+void MacroAssembler::AssertSmi(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
-void TurboAssembler::AssertSmi(Operand object) {
+void MacroAssembler::AssertSmi(Operand object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
-void TurboAssembler::AssertZeroExtended(Register int32_register) {
+void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
DCHECK_NE(int32_register, kScratchRegister);
- movq(kScratchRegister, int64_t{0x0000000100000000});
- cmpq(kScratchRegister, int32_register);
- Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
+ movl(kScratchRegister, Immediate(kMaxUInt32)); // zero-extended
+ cmpq(int32_register, kScratchRegister);
+ Check(below_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
-void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) {
+void MacroAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
DCHECK(COMPRESS_POINTERS_BOOL);
@@ -2730,16 +2645,28 @@ void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) {
Check(zero, AbortReason::kSignedBitOfSmiIsNotZero);
}
-void MacroAssembler::AssertCodeT(Register object) {
+void MacroAssembler::AssertMap(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, AbortReason::kOperandIsNotACodeT);
+ Check(not_equal, AbortReason::kOperandIsNotAMap);
Push(object);
LoadMap(object, object);
- CmpInstanceType(object, CODET_TYPE);
- Pop(object);
- Check(equal, AbortReason::kOperandIsNotACodeT);
+ CmpInstanceType(object, MAP_TYPE);
+ popq(object);
+ Check(equal, AbortReason::kOperandIsNotAMap);
+}
+
+void MacroAssembler::AssertCode(Register object) {
+ if (!v8_flags.debug_code) return;
+ ASM_CODE_COMMENT(this);
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsNotACode);
+ Push(object);
+ LoadMap(object, object);
+ CmpInstanceType(object, CODE_TYPE);
+ popq(object);
+ Check(equal, AbortReason::kOperandIsNotACode);
}
void MacroAssembler::AssertConstructor(Register object) {
@@ -2787,7 +2714,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
- CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ IsObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
@@ -2827,11 +2754,11 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
bind(&done_checking);
}
-void TurboAssembler::Assert(Condition cc, AbortReason reason) {
+void MacroAssembler::Assert(Condition cc, AbortReason reason) {
if (v8_flags.debug_code) Check(cc, reason);
}
-void TurboAssembler::AssertUnreachable(AbortReason reason) {
+void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
#endif // V8_ENABLE_DEBUG_CODE
@@ -2881,7 +2808,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type) {
ASM_CODE_COMMENT(this);
- LoadTaggedPointerField(
+ LoadTaggedField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2894,8 +2821,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type) {
DCHECK_EQ(function, rdi);
- LoadTaggedPointerField(rsi,
- FieldOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(rsi, FieldOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(rdi, new_target, expected_parameter_count,
actual_parameter_count, type);
}
@@ -2910,6 +2836,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_EQ(function, rdi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
+ AssertFunction(function);
+
// On function call, call into the debugger if necessary.
Label debug_hook, continue_after_hook;
{
@@ -2933,13 +2861,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
- CallCodeTObject(rcx);
+ CallCodeObject(rcx);
break;
case InvokeType::kJump:
- JumpCodeTObject(rcx);
+ JumpCodeObject(rcx);
break;
}
jmp(&done, Label::kNear);
@@ -2960,10 +2888,10 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
return Operand(kRootRegister, static_cast<int32_t>(offset));
}
@@ -3091,14 +3019,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
SmiUntag(expected_parameter_count);
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
-void TurboAssembler::Prologue() {
+void MacroAssembler::Prologue() {
ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp);
@@ -3107,11 +3035,13 @@ void TurboAssembler::Prologue() {
Push(kJavaScriptCallArgCountRegister); // Actual argument count.
}
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
pushq(rbp);
movq(rbp, rsp);
if (!StackFrame::IsJavaScript(type)) {
+ static_assert(CommonFrameConstants::kContextOrFrameTypeOffset ==
+ -kSystemPointerSize);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3119,7 +3049,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
// TODO(v8:11429): Consider passing BASELINE instead, and checking for
// IsJSFrame or similar. Could then unify with manual frame leaves in the
@@ -3134,7 +3064,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
}
#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS)
-void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this);
// On Windows and on macOS, we cannot increment the stack size by more than
// one page (minimum page size is 4KB) without accessing at least one byte on
@@ -3156,7 +3086,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
subq(rsp, bytes_scratch);
}
-void TurboAssembler::AllocateStackSpace(int bytes) {
+void MacroAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
while (bytes >= kStackPageSize) {
@@ -3169,8 +3099,8 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
}
#endif
-void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
- StackFrame::Type frame_type) {
+void MacroAssembler::EnterExitFrame(int reserved_stack_slots,
+ StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -3184,53 +3114,24 @@ void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
pushq(rbp);
movq(rbp, rsp);
- // Reserve room for entry stack pointer.
Push(Immediate(StackFrame::TypeToMarker(frame_type)));
DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
- Push(Immediate(0)); // Saved entry sp, patched before call.
+ Push(Immediate(0)); // Saved entry sp, patched below.
- // Save the frame pointer and the context in top.
- if (saved_rax_reg != no_reg) {
- movq(saved_rax_reg, rax); // Backup rax in callee-save register.
- }
-
- Store(
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
- rbp);
- Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()),
- rsi);
- Store(
- ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()),
- rbx);
-}
+ using ER = ExternalReference;
+ Store(ER::Create(IsolateAddressId::kCEntryFPAddress, isolate()), rbp);
+ Store(ER::Create(IsolateAddressId::kContextAddress, isolate()), rsi);
+ Store(ER::Create(IsolateAddressId::kCFunctionAddress, isolate()), rbx);
#ifdef V8_TARGET_OS_WIN
-static const int kRegisterPassedArguments = 4;
-#else
-static const int kRegisterPassedArguments = 6;
+ // Note this is only correct under the assumption that the caller hasn't
+ // considered home stack slots already.
+ // TODO(jgruber): This is a bit hacky since the caller in most cases still
+ // needs to know about the home stack slots in order to address reserved
+ // slots. Consider moving this fully into caller code.
+ reserved_stack_slots += kWindowsHomeStackSlots;
#endif
-
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
- bool save_doubles) {
- ASM_CODE_COMMENT(this);
-#ifdef V8_TARGET_OS_WIN
- arg_stack_space += kRegisterPassedArguments;
-#endif
- // Optionally save all XMM registers.
- if (save_doubles) {
- int space = XMMRegister::kNumRegisters * kDoubleSize +
- arg_stack_space * kSystemPointerSize;
- AllocateStackSpace(space);
- int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- DoubleRegister reg =
- DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
- Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else if (arg_stack_space > 0) {
- AllocateStackSpace(arg_stack_space * kSystemPointerSize);
- }
+ AllocateStackSpace(reserved_stack_slots * kSystemPointerSize);
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
@@ -3244,69 +3145,12 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
-void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
- StackFrame::Type frame_type) {
- ASM_CODE_COMMENT(this);
- Register saved_rax_reg = r12;
- EnterExitFramePrologue(saved_rax_reg, frame_type);
-
- // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
- // so it must be retained across the C-call.
- int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
- leaq(r15, Operand(rbp, saved_rax_reg, times_system_pointer_size, offset));
-
- EnterExitFrameEpilogue(arg_stack_space, save_doubles);
-}
-
-void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
- ASM_CODE_COMMENT(this);
- EnterExitFramePrologue(no_reg, StackFrame::EXIT);
- EnterExitFrameEpilogue(arg_stack_space, false);
-}
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
- ASM_CODE_COMMENT(this);
- // Registers:
- // r15 : argv
- if (save_doubles) {
- int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- DoubleRegister reg =
- DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
- Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
- }
- }
-
- if (pop_arguments) {
- // Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, kFPOnStackSize));
- movq(rbp, Operand(rbp, 0 * kSystemPointerSize));
-
- // Drop everything up to and including the arguments and the receiver
- // from the caller stack.
- leaq(rsp, Operand(r15, 1 * kSystemPointerSize));
-
- PushReturnAddressFrom(rcx);
- } else {
- // Otherwise just leave the exit frame.
- leave();
- }
-
- LeaveExitFrameEpilogue();
-}
-
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveExitFrame() {
ASM_CODE_COMMENT(this);
- movq(rsp, rbp);
- popq(rbp);
- LeaveExitFrameEpilogue();
-}
+ leave();
-void MacroAssembler::LeaveExitFrameEpilogue() {
- ASM_CODE_COMMENT(this);
- // Restore current context from top and clear it in debug mode.
+ // Restore the current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
Operand context_operand = ExternalReferenceAsOperand(context_address);
@@ -3326,32 +3170,49 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ASM_CODE_COMMENT(this);
// Load native context.
LoadMap(dst, rsi);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst,
FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
// Load value from native context.
- LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(dst, Operand(dst, Context::SlotOffset(index)));
+}
+
+void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
+ Register feedback_vector,
+ FeedbackSlot slot,
+ Label* on_result,
+ Label::Distance distance) {
+ Label fallthrough;
+ LoadTaggedField(
+ scratch_and_result,
+ FieldOperand(feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt())));
+ LoadWeakValue(scratch_and_result, &fallthrough);
+
+ // Is it marked_for_deoptimization? If yes, clear the slot.
+ {
+ TestCodeIsMarkedForDeoptimization(scratch_and_result);
+ j(equal, on_result, distance);
+ StoreTaggedField(
+ FieldOperand(feedback_vector,
+ FeedbackVector::OffsetOfElementAt(slot.ToInt())),
+ ClearedValue());
+ }
+
+ bind(&fallthrough);
+ Move(scratch_and_result, 0);
}
-int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
- // On Windows 64 stack slots are reserved by the caller for all arguments
- // including the ones passed in registers, and space is always allocated for
- // the four register arguments even if the function takes fewer than four
- // arguments.
- // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
- // and the caller does not reserve stack slots for them.
+int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
DCHECK_GE(num_arguments, 0);
#ifdef V8_TARGET_OS_WIN
- const int kMinimumStackSlots = kRegisterPassedArguments;
- if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
- return num_arguments;
+ return std::max(num_arguments, kWindowsHomeStackSlots);
#else
- if (num_arguments < kRegisterPassedArguments) return 0;
- return num_arguments - kRegisterPassedArguments;
+ return std::max(num_arguments - kRegisterPassedArguments, 0);
#endif
}
-void TurboAssembler::PrepareCallCFunction(int num_arguments) {
+void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
DCHECK_NE(frame_alignment, 0);
@@ -3368,14 +3229,16 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
kScratchRegister);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
LoadAddress(rax, function);
- CallCFunction(rax, num_arguments);
+ CallCFunction(rax, num_arguments, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
@@ -3391,55 +3254,60 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
leaq(kScratchRegister, Operand(&get_pc, 0));
bind(&get_pc);
- // Addressing the following external references is tricky because we need
- // this to work in three situations:
- // 1. In wasm compilation, the isolate is nullptr and thus no
- // ExternalReference can be created, but we can construct the address
- // directly using the root register and a static offset.
- // 2. In normal JIT (and builtin) compilation, the external reference is
- // usually addressed through the root register, so we can use the direct
- // offset directly in most cases.
- // 3. In regexp compilation, the external reference is embedded into the reloc
- // info.
- // The solution here is to use root register offsets wherever possible in
- // which case we can construct it directly. When falling back to external
- // references we need to ensure that the scratch register does not get
- // accidentally overwritten. If we run into more such cases in the future, we
- // should implement a more general solution.
- if (root_array_available()) {
- movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()),
- kScratchRegister);
- movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
- rbp);
- } else {
- DCHECK_NOT_NULL(isolate());
- // Use alternative scratch register in order not to overwrite
- // kScratchRegister.
- Register scratch = r12;
- pushq(scratch);
-
- movq(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_pc_address(isolate()),
- scratch),
- kScratchRegister);
- movq(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_fp_address(isolate())),
- rbp);
-
- popq(scratch);
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Addressing the following external references is tricky because we need
+ // this to work in three situations:
+ // 1. In wasm compilation, the isolate is nullptr and thus no
+ // ExternalReference can be created, but we can construct the address
+ // directly using the root register and a static offset.
+ // 2. In normal JIT (and builtin) compilation, the external reference is
+ // usually addressed through the root register, so we can use the direct
+ // offset directly in most cases.
+ // 3. In regexp compilation, the external reference is embedded into the
+ // reloc
+ // info.
+ // The solution here is to use root register offsets wherever possible in
+ // which case we can construct it directly. When falling back to external
+ // references we need to ensure that the scratch register does not get
+ // accidentally overwritten. If we run into more such cases in the future,
+ // we should implement a more general solution.
+ if (root_array_available()) {
+ movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()),
+ kScratchRegister);
+ movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
+ rbp);
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ // Use alternative scratch register in order not to overwrite
+ // kScratchRegister.
+ Register scratch = r12;
+ pushq(scratch);
+
+ movq(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_pc_address(isolate()),
+ scratch),
+ kScratchRegister);
+ movq(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate())),
+ rbp);
+
+ popq(scratch);
+ }
}
call(function);
- // We don't unset the PC; the FP is the source of truth.
- if (root_array_available()) {
- movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
- Immediate(0));
- } else {
- DCHECK_NOT_NULL(isolate());
- movq(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_fp_address(isolate())),
- Immediate(0));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
+ Immediate(0));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ movq(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate())),
+ Immediate(0));
+ }
}
DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
@@ -3449,7 +3317,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
}
-void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
ASM_CODE_COMMENT(this);
@@ -3469,7 +3337,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
Label current;
bind(&current);
int pc = pc_offset();
@@ -3481,25 +3349,24 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
-void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- LoadTaggedPointerField(scratch,
- Operand(kJavaScriptCallCodeStartRegister, offset));
- testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
+void MacroAssembler::BailoutIfDeoptimized(Register scratch) {
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ LoadTaggedField(scratch, Operand(kJavaScriptCallCodeStartRegister, offset));
+ testl(FieldOperand(scratch, Code::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, not_zero);
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
// Note: Assembler::call is used here on purpose to guarantee fixed-size
- // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
+ // exits even on Atom CPUs; see MacroAssembler::Call for Atom-specific
// performance tuning which emits a different instruction sequence.
call(EntryFromBuiltinAsOperand(target));
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
@@ -3507,8 +3374,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::Trap() { int3(); }
-void TurboAssembler::DebugBreak() { int3(); }
+void MacroAssembler::Trap() { int3(); }
+void MacroAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 59a642e35a..9c84b4956c 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -55,10 +55,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler
- : public SharedTurboAssemblerBase<TurboAssembler> {
+class V8_EXPORT_PRIVATE MacroAssembler
+ : public SharedMacroAssembler<MacroAssembler> {
public:
- using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
+ using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -76,6 +76,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Operations on roots in the root-array.
Operand RootAsOperand(RootIndex index);
+ void LoadTaggedRoot(Register destination, RootIndex index);
void LoadRoot(Register destination, RootIndex index) final;
void LoadRoot(Operand destination, RootIndex index) {
LoadRoot(kScratchRegister, index);
@@ -111,12 +112,20 @@ class V8_EXPORT_PRIVATE TurboAssembler
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
// Calculate the number of stack slots to reserve for arguments when calling a
// C function.
- int ArgumentStackSlotsForCFunctionCall(int num_arguments);
+ static int ArgumentStackSlotsForCFunctionCall(int num_arguments);
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -204,6 +213,10 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Cmp(Operand dst, Smi src);
void Cmp(Register dst, int32_t src);
+ void CmpTagged(const Register& src1, const Register& src2) {
+ cmp_tagged(src1, src2);
+ }
+
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
@@ -226,11 +239,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
Condition CheckSmi(Operand src);
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
- void AssertSmi(Operand object) NOOP_UNLESS_DEBUG_CODE
+ void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE;
+ void AssertSmi(Operand object) NOOP_UNLESS_DEBUG_CODE;
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
@@ -278,6 +291,8 @@ class V8_EXPORT_PRIVATE TurboAssembler
j(less, dest);
}
+ // Caution: if {reg} is a 32-bit negative int, it should be sign-extended to
+ // 64-bit before calling this function.
void Switch(Register scrach, Register reg, int case_base_value,
Label** labels, int num_labels);
@@ -286,6 +301,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
#endif
void LoadMap(Register destination, Register object);
+ void LoadCompressedMap(Register destination, Register object);
void Move(Register dst, intptr_t x) {
if (x == 0) {
@@ -360,6 +376,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Convert smi to 32-bit value.
void SmiToInt32(Register reg);
+ void SmiToInt32(Register dst, Register src);
// Loads the address of the external reference into the destination
// register.
@@ -380,7 +397,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Call(Register reg) { call(reg); }
void Call(Operand op);
- void Call(Handle<CodeT> code_object, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
@@ -392,39 +409,19 @@ class V8_EXPORT_PRIVATE TurboAssembler
void TailCallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin, Condition cc);
- void LoadCodeObjectEntry(Register destination, Register code_object);
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump);
- // Load code entry point from the CodeDataContainer object.
- void LoadCodeDataContainerEntry(Register destination,
- Register code_data_container_object);
- // Load code entry point from the CodeDataContainer object and compute
- // Code object pointer out of it. Must not be used for CodeDataContainers
- // corresponding to builtins, because their entry points values point to
- // the embedded instruction stream in .text section.
- void LoadCodeDataContainerCodeNonBuiltin(Register destination,
- Register code_data_container_object);
- void CallCodeDataContainerObject(Register code_data_container_object);
- void JumpCodeDataContainerObject(Register code_data_container_object,
- JumpMode jump_mode = JumpMode::kJump);
-
- // Helper functions that dispatch either to Call/JumpCodeObject or to
- // Call/JumpCodeDataContainerObject.
- // TODO(v8:11880): remove since CodeT targets are now default.
- void LoadCodeTEntry(Register destination, Register code);
- void CallCodeTObject(Register code);
- void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
- void CodeDataContainerFromCodeT(Register destination, Register codet);
-
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(Address destination, RelocInfo::Mode rmode, Condition cc);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Operand op, Condition cc);
- void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode);
- void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode, Condition cc);
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode, Condition cc);
void BailoutIfDeoptimized(Register scratch);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
@@ -457,23 +454,35 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void Assert(Condition cc, AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
+ void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if a 64 bit register containing a 32 bit payload does not
// have zeros in the top 32 bits, enabled via --debug-code.
- void AssertZeroExtended(Register reg) NOOP_UNLESS_DEBUG_CODE
+ void AssertZeroExtended(Register reg) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if the signed bit of smi register with pointer compression
// is not zero, enabled via --debug-code.
- void AssertSignedBitOfSmiIsZero(Register smi) NOOP_UNLESS_DEBUG_CODE
+ void AssertSignedBitOfSmiIsZero(Register smi) NOOP_UNLESS_DEBUG_CODE;
// Like Assert(), but always enabled.
void Check(Condition cc, AbortReason reason);
+ // Compare instance type for map.
+ // Always use unsigned comparisons: above and below, not less and greater.
+ void CmpInstanceType(Register map, InstanceType type);
+
+ // Abort execution if argument is not a Map, enabled via
+ // --debug-code.
+ void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE;
+
+ // Abort execution if argument is not a Code, enabled via
+ // --debug-code.
+ void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE;
+
// Print a message to stdout and abort execution.
void Abort(AbortReason msg);
@@ -508,7 +517,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, isolate_root);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
@@ -577,35 +586,23 @@ class V8_EXPORT_PRIVATE TurboAssembler
// ---------------------------------------------------------------------------
// Pointer compression support
- // Loads a field containing a HeapObject and decompresses it if pointer
- // compression is enabled.
- void LoadTaggedPointerField(Register destination, Operand field_operand);
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadTaggedField(Register destination, Operand field_operand);
- // Loads a field containing a HeapObject but does not decompress it when
+ // Loads a field containing any tagged value but does not decompress it when
// pointer compression is enabled.
- void LoadTaggedPointerField(TaggedRegister destination,
- Operand field_operand);
+ void LoadTaggedField(TaggedRegister destination, Operand field_operand);
+ void LoadTaggedFieldWithoutDecompressing(Register destination,
+ Operand field_operand);
// Loads a field containing a Smi and decompresses it if pointer compression
// is enabled.
void LoadTaggedSignedField(Register destination, Operand field_operand);
- // Loads a field containing any tagged value and decompresses it if necessary.
- void LoadAnyTaggedField(Register destination, Operand field_operand);
-
- // Loads a field containing any tagged value but does not decompress it when
- // pointer compression is enabled.
- void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
-
- // Loads a field containing a HeapObject, decompresses it if necessary and
- // pushes full pointer to the stack. When pointer compression is enabled,
- // uses |scratch| to decompress the value.
- void PushTaggedPointerField(Operand field_operand, Register scratch);
-
// Loads a field containing any tagged value, decompresses it if necessary and
// pushes the full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value.
- void PushTaggedAnyField(Operand field_operand, Register scratch);
+ void PushTaggedField(Operand field_operand, Register scratch);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, Operand src);
@@ -620,9 +617,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
- void DecompressTaggedPointer(Register destination, Operand field_operand);
- void DecompressTaggedPointer(Register destination, Register source);
- void DecompressAnyTagged(Register destination, Operand field_operand);
+ void DecompressTagged(Register destination, Operand field_operand);
+ void DecompressTagged(Register destination, Register source);
+ void DecompressTagged(Register destination, Tagged_t immediate);
// ---------------------------------------------------------------------------
// V8 Sandbox support
@@ -646,23 +643,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
IsolateRootLocation isolateRootLocation =
IsolateRootLocation::kInRootRegister);
- protected:
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
- // Returns a register holding the smi value. The register MUST NOT be
- // modified. It may be the "smi 1 constant" register.
- Register GetSmiConstant(Smi value);
-
- // Drops arguments assuming that the return address was already popped.
- void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
- ArgumentsCountMode mode = kCountExcludesReceiver);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
// Loads and stores the value of an external reference.
// Special case code for load and store to take advantage of
// load_rax/store_rax if possible/necessary.
@@ -726,29 +706,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
SaveFPRegsMode save_fp,
SmiCheck smi_check = SmiCheck::kInline);
- // Enter specific kind of exit frame; either in normal or
- // debug mode. Expects the number of arguments in register rax and
- // sets up the number of arguments in register rdi and the pointer
- // to the first argument in register rsi.
- //
- // Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the
- // stack accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false,
- StackFrame::Type frame_type = StackFrame::EXIT);
-
- // Enter specific kind of exit frame. Allocates
- // (arg_stack_space * kSystemPointerSize) memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterApiExitFrame(int arg_stack_space);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi (if pop_arguments == true).
- void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax (untouched).
- void LeaveApiExitFrame();
+ void EnterExitFrame(int reserved_stack_slots, StackFrame::Type frame_type);
+ void LeaveExitFrame();
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -775,7 +734,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Macro instructions.
- using TurboAssembler::Cmp;
void Cmp(Register dst, Handle<Object> source);
void Cmp(Operand dst, Handle<Object> source);
@@ -802,18 +760,26 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Pop(Operand dst);
void PopQuad(Operand dst);
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
// They may be the same register, and may be kScratchRegister.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- // Always use unsigned comparisons: above and below, not less and greater.
- void CmpInstanceType(Register map, InstanceType type);
+ // Variant of the above, which only guarantees to set the correct
+ // equal/not_equal flag. Map might not be loaded.
+ void IsObjectType(Register heap_object, InstanceType type, Register scratch);
+ // Fast check if the object is a js receiver type. Assumes only primitive
+ // objects or js receivers are passed.
+ void JumpIfJSAnyIsNotPrimitive(
+ Register heap_object, Register scratch, Label* target,
+ Label::Distance distance = Label::kFar,
+ Condition condition = Condition::kUnsignedGreaterThanEqual);
+ void JumpIfJSAnyIsPrimitive(Register heap_object, Register scratch,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
+ return JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target, distance,
+ Condition::kUnsignedLessThan);
+ }
// Compare instance type ranges for a map (low and high inclusive)
// Always use unsigned comparisons: below_equal for a positive result.
@@ -830,11 +796,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
andq(reg, Immediate(mask));
}
- void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
+ void TestCodeIsMarkedForDeoptimization(Register code);
Immediate ClearedValue() const;
// Tiering support.
- void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure, Register scratch1,
Register slot_address);
@@ -847,30 +813,27 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register flags, Register feedback_vector, Register closure,
JumpMode jump_mode = JumpMode::kJump);
- // Abort execution if argument is not a CodeT, enabled via --debug-code.
- void AssertCodeT(Register object) NOOP_UNLESS_DEBUG_CODE
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
- void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a callable JSFunction, enabled via
// --debug-code.
- void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
- void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
- void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE
+ void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE;
// ---------------------------------------------------------------------------
// Exception handling
@@ -892,24 +855,27 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the native context slot with the current index.
void LoadNativeContextSlot(Register dst, int index);
+ // Falls through and sets scratch_and_result to 0 on failure, jumps to
+ // on_result on success.
+ void TryLoadOptimizedOsrCode(Register scratch_and_result,
+ Register feedback_vector, FeedbackSlot slot,
+ Label* on_result, Label::Distance distance);
+
// ---------------------------------------------------------------------------
// Runtime calls
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump)
@@ -943,20 +909,22 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
+ protected:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+ // Returns a register holding the smi value. The register MUST NOT be
+ // modified. It may be the "smi 1 constant" register.
+ Register GetSmiConstant(Smi value);
+
+ // Drops arguments assuming that the return address was already popped.
+ void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
+ ArgumentsCountMode mode = kCountExcludesReceiver);
+
private:
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, InvokeType type);
- void EnterExitFramePrologue(Register saved_rax_reg,
- StackFrame::Type frame_type);
-
- // Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the
- // stack accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
@@ -968,6 +936,11 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
+// For compatibility with platform-independent code.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
// Generate an Operand for loading a field from an object. Object pointer is a
// compressed pointer when pointer compression is enabled.
inline Operand FieldOperand(TaggedRegister object, int offset) {
@@ -985,20 +958,6 @@ inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-// Provides access to exit frame stack space (not GCed).
-inline Operand StackSpaceOperand(int index) {
-#ifdef V8_TARGET_OS_WIN
- const int kShaddowSpace = 4;
- return Operand(rsp, (index + kShaddowSpace) * kSystemPointerSize);
-#else
- return Operand(rsp, index * kSystemPointerSize);
-#endif
-}
-
-inline Operand StackOperandForReturnAddress(int32_t disp) {
- return Operand(rsp, disp);
-}
-
struct MoveCycleState {
// Whether a move in the cycle needs the scratch or double scratch register.
bool pending_scratch_register_use = false;
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 4ab85275cc..6adf816dd3 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -41,7 +41,7 @@ namespace internal {
V(r12) \
V(r15)
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#else
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(r14)
@@ -102,12 +102,20 @@ constexpr Register arg_reg_1 = rcx;
constexpr Register arg_reg_2 = rdx;
constexpr Register arg_reg_3 = r8;
constexpr Register arg_reg_4 = r9;
+constexpr int kRegisterPassedArguments = 4;
+// The Windows 64 ABI always reserves spill slots on the stack for the four
+// register arguments even if the function takes fewer than four arguments.
+// These stack slots are sometimes called 'home space', sometimes 'shadow
+// store' in Microsoft documentation, see
+// https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention.
+constexpr int kWindowsHomeStackSlots = 4;
#else
// AMD64 calling convention
constexpr Register arg_reg_1 = rdi;
constexpr Register arg_reg_2 = rsi;
constexpr Register arg_reg_3 = rdx;
constexpr Register arg_reg_4 = rcx;
+constexpr int kRegisterPassedArguments = 6;
#endif // V8_TARGET_OS_WIN
#define DOUBLE_REGISTERS(V) \
@@ -279,15 +287,14 @@ constexpr Register kWasmInstanceRegister = rsi;
// function calling convention.
constexpr Register kScratchRegister = r10;
constexpr XMMRegister kScratchDoubleReg = xmm15;
+constexpr YMMRegister kScratchSimd256Reg = ymm15;
constexpr Register kRootRegister = r13; // callee save
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
constexpr Register kPtrComprCageBaseRegister = r14; // callee save
#else
-constexpr Register kPtrComprCageBaseRegister = kRootRegister;
+constexpr Register kPtrComprCageBaseRegister = no_reg;
#endif
-constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
-
constexpr DoubleRegister kFPReturnRegister0 = xmm0;
} // namespace internal
diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index f638a2c778..1b996b14e4 100644
--- a/deps/v8/src/common/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -15,11 +15,15 @@ namespace internal {
namespace {
+// All asserts are allowed by default except for one.
+constexpr uint32_t kInitialValue =
+ ~(1 << HANDLE_DEREFERENCE_ALL_THREADS_ASSERT);
+
template <PerThreadAssertType kType>
using PerThreadDataBit = base::BitField<bool, kType, 1>;
-// Thread-local storage for assert data. Default all asserts to "allow".
-thread_local uint32_t current_per_thread_assert_data(~0);
+// Thread-local storage for assert data.
+thread_local uint32_t current_per_thread_assert_data(kInitialValue);
} // namespace
@@ -81,8 +85,10 @@ bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
enable) \
PER_ISOLATE_ASSERT_SCOPE_DEFINITION(DisableType, field, enable)
-PER_ISOLATE_ASSERT_TYPE(PER_ISOLATE_ASSERT_ENABLE_SCOPE_DEFINITION, true)
-PER_ISOLATE_ASSERT_TYPE(PER_ISOLATE_ASSERT_DISABLE_SCOPE_DEFINITION, false)
+PER_ISOLATE_DCHECK_TYPE(PER_ISOLATE_ASSERT_ENABLE_SCOPE_DEFINITION, true)
+PER_ISOLATE_CHECK_TYPE(PER_ISOLATE_ASSERT_ENABLE_SCOPE_DEFINITION, true)
+PER_ISOLATE_DCHECK_TYPE(PER_ISOLATE_ASSERT_DISABLE_SCOPE_DEFINITION, false)
+PER_ISOLATE_CHECK_TYPE(PER_ISOLATE_ASSERT_DISABLE_SCOPE_DEFINITION, false)
// -----------------------------------------------------------------------------
// Instantiations.
@@ -95,6 +101,8 @@ template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
+template class PerThreadAssertScope<HANDLE_DEREFERENCE_ALL_THREADS_ASSERT,
+ true>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, false>;
diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h
index 10e5e3907e..5eb04c59bb 100644
--- a/deps/v8/src/common/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -25,6 +25,7 @@ enum PerThreadAssertType {
HEAP_ALLOCATION_ASSERT,
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
+ HANDLE_DEREFERENCE_ALL_THREADS_ASSERT,
CODE_DEPENDENCY_CHANGE_ASSERT,
CODE_ALLOCATION_ASSERT,
// Dummy type for disabling GC mole.
@@ -50,7 +51,7 @@ class V8_NODISCARD PerThreadAssertScope {
// Per-isolate assert scopes.
-#define PER_ISOLATE_ASSERT_TYPE_DEBUG_ONLY(V, enable) \
+#define PER_ISOLATE_DCHECK_TYPE(V, enable) \
/* Scope to document where we do not expect javascript execution. */ \
/* Scope to introduce an exception to DisallowJavascriptExecution. */ \
V(AllowJavascriptExecution, DisallowJavascriptExecution, \
@@ -66,7 +67,7 @@ class V8_NODISCARD PerThreadAssertScope {
/* Scope to introduce an exception to DisallowExceptions. */ \
V(AllowExceptions, DisallowExceptions, no_exception_assert, enable)
-#define PER_ISOLATE_ASSERT_TYPE(V, enable) \
+#define PER_ISOLATE_CHECK_TYPE(V, enable) \
/* Scope in which javascript execution leads to exception being thrown. */ \
/* Scope to introduce an exception to ThrowOnJavascriptExecution. */ \
V(NoThrowOnJavascriptExecution, ThrowOnJavascriptExecution, \
@@ -74,8 +75,7 @@ class V8_NODISCARD PerThreadAssertScope {
/* Scope in which javascript execution causes dumps. */ \
/* Scope in which javascript execution doesn't cause dumps. */ \
V(NoDumpOnJavascriptExecution, DumpOnJavascriptExecution, \
- javascript_execution_dump, enable) \
- PER_ISOLATE_ASSERT_TYPE_DEBUG_ONLY(V, enable)
+ javascript_execution_dump, enable)
#define PER_ISOLATE_ASSERT_SCOPE_DECLARATION(ScopeType) \
class V8_NODISCARD ScopeType { \
@@ -103,46 +103,42 @@ class V8_NODISCARD PerThreadAssertScope {
#define PER_ISOLATE_ASSERT_DISABLE_SCOPE(_1, DisableType, _2, _3) \
PER_ISOLATE_ASSERT_SCOPE_DECLARATION(DisableType)
-PER_ISOLATE_ASSERT_TYPE(PER_ISOLATE_ASSERT_ENABLE_SCOPE, true)
-PER_ISOLATE_ASSERT_TYPE(PER_ISOLATE_ASSERT_DISABLE_SCOPE, false)
+PER_ISOLATE_DCHECK_TYPE(PER_ISOLATE_ASSERT_ENABLE_SCOPE, true)
+PER_ISOLATE_CHECK_TYPE(PER_ISOLATE_ASSERT_ENABLE_SCOPE, true)
+PER_ISOLATE_DCHECK_TYPE(PER_ISOLATE_ASSERT_DISABLE_SCOPE, false)
+PER_ISOLATE_CHECK_TYPE(PER_ISOLATE_ASSERT_DISABLE_SCOPE, false)
#ifdef DEBUG
-#define PER_ISOLATE_ASSERT_ENABLE_SCOPE_DEBUG_ONLY(EnableType, DisableType, \
- field, _) \
+#define PER_ISOLATE_DCHECK_ENABLE_SCOPE(EnableType, DisableType, field, _) \
class EnableType##DebugOnly : public EnableType { \
public: \
explicit EnableType##DebugOnly(Isolate* isolate) : EnableType(isolate) {} \
};
#else
-#define PER_ISOLATE_ASSERT_ENABLE_SCOPE_DEBUG_ONLY(EnableType, DisableType, \
- field, _) \
- class V8_NODISCARD EnableType##DebugOnly { \
- public: \
- explicit EnableType##DebugOnly(Isolate* isolate) {} \
+#define PER_ISOLATE_DCHECK_ENABLE_SCOPE(EnableType, DisableType, field, _) \
+ class V8_NODISCARD EnableType##DebugOnly { \
+ public: \
+ explicit EnableType##DebugOnly(Isolate* isolate) {} \
};
#endif
#ifdef DEBUG
-#define PER_ISOLATE_ASSERT_DISABLE_SCOPE_DEBUG_ONLY(EnableType, DisableType, \
- field, _) \
- class DisableType##DebugOnly : public DisableType { \
- public: \
- explicit DisableType##DebugOnly(Isolate* isolate) \
- : DisableType(isolate) {} \
+#define PER_ISOLATE_DCHECK_DISABLE_SCOPE(EnableType, DisableType, field, _) \
+ class DisableType##DebugOnly : public DisableType { \
+ public: \
+ explicit DisableType##DebugOnly(Isolate* isolate) \
+ : DisableType(isolate) {} \
};
#else
-#define PER_ISOLATE_ASSERT_DISABLE_SCOPE_DEBUG_ONLY(EnableType, DisableType, \
- field, _) \
- class V8_NODISCARD DisableType##DebugOnly { \
- public: \
- explicit DisableType##DebugOnly(Isolate* isolate) {} \
+#define PER_ISOLATE_DCHECK_DISABLE_SCOPE(EnableType, DisableType, field, _) \
+ class V8_NODISCARD DisableType##DebugOnly { \
+ public: \
+ explicit DisableType##DebugOnly(Isolate* isolate) {} \
};
#endif
-PER_ISOLATE_ASSERT_TYPE_DEBUG_ONLY(PER_ISOLATE_ASSERT_ENABLE_SCOPE_DEBUG_ONLY,
- true)
-PER_ISOLATE_ASSERT_TYPE_DEBUG_ONLY(PER_ISOLATE_ASSERT_DISABLE_SCOPE_DEBUG_ONLY,
- false)
+PER_ISOLATE_DCHECK_TYPE(PER_ISOLATE_DCHECK_ENABLE_SCOPE, true)
+PER_ISOLATE_DCHECK_TYPE(PER_ISOLATE_DCHECK_DISABLE_SCOPE, false)
template <typename... Scopes>
class CombinationAssertScope;
@@ -231,6 +227,11 @@ using DisallowHandleDereference =
using AllowHandleDereference =
PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>;
+// Explicitly allow handle dereference for all threads/isolates on one
+// particular thread.
+using AllowHandleDereferenceAllThreads =
+ PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ALL_THREADS_ASSERT, true>;
+
// Scope to document where we do not expect code dependencies to change.
using DisallowCodeDependencyChange =
PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
diff --git a/deps/v8/src/common/code-memory-access-inl.h b/deps/v8/src/common/code-memory-access-inl.h
index ed570b131d..2b504b86f3 100644
--- a/deps/v8/src/common/code-memory-access-inl.h
+++ b/deps/v8/src/common/code-memory-access-inl.h
@@ -10,6 +10,9 @@
#if V8_HAS_PKU_JIT_WRITE_PROTECT
#include "src/base/platform/memory-protection-key.h"
#endif
+#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+#include "src/base/platform/platform.h"
+#endif
namespace v8 {
namespace internal {
@@ -29,18 +32,13 @@ RwxMemoryWriteScope::~RwxMemoryWriteScope() {
#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
-// Ignoring this warning is considered better than relying on
-// __builtin_available.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability-new"
-
// static
bool RwxMemoryWriteScope::IsSupported() { return true; }
// static
void RwxMemoryWriteScope::SetWritable() {
if (code_space_write_nesting_level_ == 0) {
- pthread_jit_write_protect_np(0);
+ base::SetJitWriteProtected(0);
}
code_space_write_nesting_level_++;
}
@@ -49,10 +47,9 @@ void RwxMemoryWriteScope::SetWritable() {
void RwxMemoryWriteScope::SetExecutable() {
code_space_write_nesting_level_--;
if (code_space_write_nesting_level_ == 0) {
- pthread_jit_write_protect_np(1);
+ base::SetJitWriteProtected(1);
}
}
-#pragma clang diagnostic pop
#elif V8_HAS_PKU_JIT_WRITE_PROTECT
// static
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 9288e13c5d..17057b4c5f 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -71,7 +71,9 @@ namespace internal {
// Determine whether the architecture uses an embedded constant pool
// (contiguous constant pool embedded in code object).
-#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// Need to temporary disable the constant pool on PPC, more details can be found
+// under https://crrev.com/c/4341976.
+#if 0 && (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64)
#define V8_EMBEDDED_CONSTANT_POOL_BOOL true
#else
#define V8_EMBEDDED_CONSTANT_POOL_BOOL false
@@ -122,6 +124,12 @@ namespace internal {
#define V8_CAN_CREATE_SHARED_HEAP_BOOL false
#endif
+#ifdef V8_STATIC_ROOT_GENERATION
+#define V8_STATIC_ROOT_GENERATION_BOOL true
+#else
+#define V8_STATIC_ROOT_GENERATION_BOOL false
+#endif
+
#ifdef V8_ENABLE_SANDBOX
#define V8_ENABLE_SANDBOX_BOOL true
#else
@@ -225,14 +233,8 @@ const size_t kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB;
#ifdef V8_EXTERNAL_CODE_SPACE
#define V8_EXTERNAL_CODE_SPACE_BOOL true
-// This flag enables the mode when V8 does not create trampoline Code objects
-// for builtins. It should be enough to have only CodeDataContainer objects.
-class CodeDataContainer;
-using CodeT = CodeDataContainer;
#else
#define V8_EXTERNAL_CODE_SPACE_BOOL false
-class Code;
-using CodeT = Code;
#endif
// V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT controls how V8 sets permissions for
@@ -298,6 +300,12 @@ using CodeT = Code;
#define V8_SFI_HAS_UNIQUE_ID false
#endif
+#if V8_SFI_HAS_UNIQUE_ID && TAGGED_SIZE_8_BYTES
+#define V8_SFI_NEEDS_PADDING true
+#else
+#define V8_SFI_NEEDS_PADDING false
+#endif
+
#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_X64)
#define V8_OS_WIN_X64 true
#endif
@@ -380,9 +388,26 @@ constexpr int kMaxDoubleStringLength = 24;
// Total wasm code space per engine (i.e. per process) is limited to make
// certain attacks that rely on heap spraying harder.
+// Do not access directly, but via the {--wasm-max-committed-code-mb} flag.
// Just below 4GB, such that {kMaxWasmCodeMemory} fits in a 32-bit size_t.
-constexpr size_t kMaxWasmCodeMB = 4095;
-constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
+constexpr uint32_t kMaxCommittedWasmCodeMB = 4095;
+
+// The actual maximum code space size used can be configured with
+// --max-wasm-code-space-size. This constant is the default value, and at the
+// same time the maximum allowed value (checked by the WasmCodeManager).
+#if V8_TARGET_ARCH_ARM64
+// ARM64 only supports direct calls within a 128 MB range.
+constexpr uint32_t kDefaultMaxWasmCodeSpaceSizeMb = 128;
+#elif V8_TARGET_ARCH_PPC64
+// Branches only take 26 bits.
+constexpr uint32_t kDefaultMaxWasmCodeSpaceSizeMb = 32;
+#else
+// Use 1024 MB limit for code spaces on other platforms. This is smaller than
+// the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
+// big reservations, and to ensure that distances within a code space fit
+// within a 32-bit signed integer.
+constexpr uint32_t kDefaultMaxWasmCodeSpaceSizeMb = 1024;
+#endif
#if V8_HOST_ARCH_64_BIT
constexpr int kSystemPointerSizeLog2 = 3;
@@ -652,8 +677,11 @@ enum class StoreOrigin { kMaybeKeyed, kNamed };
enum class TypeofMode { kInside, kNotInside };
-// Enums used by CEntry.
+// Whether floating point registers should be saved (and restored).
enum class SaveFPRegsMode { kIgnore, kSave };
+
+// Whether arguments are passed on a known stack location or through a
+// register.
enum class ArgvMode { kStack, kRegister };
// This constant is used as an undefined value when passing source positions.
@@ -847,8 +875,8 @@ using RuntimeArguments = Arguments<ArgumentsType::kRuntime>;
using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
class Assembler;
class ClassScope;
+class InstructionStream;
class Code;
-class CodeDataContainer;
class CodeSpace;
class Context;
class DeclarationScope;
@@ -903,6 +931,7 @@ class CompressedMaybeObjectSlot;
class CompressedMapWordSlot;
class CompressedHeapObjectSlot;
class V8HeapCompressionScheme;
+class ExternalCodeCompressionScheme;
template <typename CompressionScheme>
class OffHeapCompressedObjectSlot;
class FullObjectSlot;
@@ -934,7 +963,12 @@ struct SlotTraits {
using THeapObjectSlot = CompressedHeapObjectSlot;
using TOffHeapObjectSlot =
OffHeapCompressedObjectSlot<V8HeapCompressionScheme>;
- using TCodeObjectSlot = OffHeapCompressedObjectSlot<V8HeapCompressionScheme>;
+#ifdef V8_EXTERNAL_CODE_SPACE
+ using TCodeObjectSlot =
+ OffHeapCompressedObjectSlot<ExternalCodeCompressionScheme>;
+#else
+ using TCodeObjectSlot = TObjectSlot;
+#endif // V8_EXTERNAL_CODE_SPACE
#else
using TObjectSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
@@ -963,9 +997,10 @@ using HeapObjectSlot = SlotTraits::THeapObjectSlot;
using OffHeapObjectSlot = SlotTraits::TOffHeapObjectSlot;
// A CodeObjectSlot instance describes a kTaggedSize-sized field ("slot")
-// holding a strong pointer to a Code object. The Code object slots might be
-// compressed and since code space might be allocated off the main heap
-// the load operations require explicit cage base value for code space.
+// holding a strong pointer to a InstructionStream object. The InstructionStream
+// object slots might be compressed and since code space might be allocated off
+// the main heap the load operations require explicit cage base value for code
+// space.
using CodeObjectSlot = SlotTraits::TCodeObjectSlot;
using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
@@ -1002,10 +1037,10 @@ constexpr int kSpaceTagSize = 4;
static_assert(FIRST_SPACE == 0);
enum class AllocationType : uint8_t {
- kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
- kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
- kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE
- kMap, // Map object allocated in OLD_SPACE
+ kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
+ kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
+ kCode, // InstructionStream object allocated in CODE_SPACE or CODE_LO_SPACE
+ kMap, // Map object allocated in OLD_SPACE
kReadOnly, // Object allocated in RO_SPACE
kSharedOld, // Regular object allocated in OLD_SPACE in the shared heap
kSharedMap, // Map object in OLD_SPACE in the shared heap
@@ -1124,6 +1159,8 @@ enum class CodeFlushMode {
kStressFlushCode,
};
+enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
+
bool inline IsBaselineCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
return mode.contains(CodeFlushMode::kFlushBaselineCode);
}
@@ -1166,6 +1203,15 @@ enum ParseRestriction : bool {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
+enum class ScriptEventType {
+ kReserveId,
+ kCreate,
+ kDeserialize,
+ kBackgroundCompile,
+ kStreamingCompileBackground,
+ kStreamingCompileForeground
+};
+
// State for inline cache call sites. Aliased as IC::State.
enum class InlineCacheState {
// No feedback will be collected.
@@ -1376,14 +1422,15 @@ inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
constexpr int kScopeInfoMaxInlinedLocalNamesSize = 75;
enum ScopeType : uint8_t {
- CLASS_SCOPE, // The scope introduced by a class.
- EVAL_SCOPE, // The top-level scope for an eval source.
- FUNCTION_SCOPE, // The top-level scope for a function.
- MODULE_SCOPE, // The scope introduced by a module literal
- SCRIPT_SCOPE, // The top-level scope for a script or a top-level eval.
- CATCH_SCOPE, // The scope introduced by catch.
- BLOCK_SCOPE, // The scope introduced by a new block.
- WITH_SCOPE // The scope introduced by with.
+ CLASS_SCOPE, // The scope introduced by a class.
+ EVAL_SCOPE, // The top-level scope for an eval source.
+ FUNCTION_SCOPE, // The top-level scope for a function.
+ MODULE_SCOPE, // The scope introduced by a module literal
+ SCRIPT_SCOPE, // The top-level scope for a script or a top-level eval.
+ CATCH_SCOPE, // The scope introduced by catch.
+ BLOCK_SCOPE, // The scope introduced by a new block.
+ WITH_SCOPE, // The scope introduced by with.
+ SHADOW_REALM_SCOPE // Synthetic scope for ShadowRealm NativeContexts.
};
inline std::ostream& operator<<(std::ostream& os, ScopeType type) {
@@ -1404,6 +1451,8 @@ inline std::ostream& operator<<(std::ostream& os, ScopeType type) {
return os << "CLASS_SCOPE";
case ScopeType::WITH_SCOPE:
return os << "WITH_SCOPE";
+ case ScopeType::SHADOW_REALM_SCOPE:
+ return os << "SHADOW_REALM_SCOPE";
}
UNREACHABLE();
}
@@ -1535,11 +1584,20 @@ inline bool IsDeclaredVariableMode(VariableMode mode) {
return mode <= VariableMode::kVar;
}
-inline bool IsPrivateMethodOrAccessorVariableMode(VariableMode mode) {
- return mode >= VariableMode::kPrivateMethod &&
+inline bool IsPrivateAccessorVariableMode(VariableMode mode) {
+ return mode >= VariableMode::kPrivateSetterOnly &&
mode <= VariableMode::kPrivateGetterAndSetter;
}
+inline bool IsPrivateMethodVariableMode(VariableMode mode) {
+ return mode == VariableMode::kPrivateMethod;
+}
+
+inline bool IsPrivateMethodOrAccessorVariableMode(VariableMode mode) {
+ return IsPrivateMethodVariableMode(mode) ||
+ IsPrivateAccessorVariableMode(mode);
+}
+
inline bool IsSerializableVariableMode(VariableMode mode) {
return IsDeclaredVariableMode(mode) ||
IsPrivateMethodOrAccessorVariableMode(mode);
@@ -1699,9 +1757,10 @@ class CompareOperationFeedback {
kInternalizedStringFlag = 1 << 4,
kOtherStringFlag = 1 << 5,
kSymbolFlag = 1 << 6,
- kBigIntFlag = 1 << 7,
- kReceiverFlag = 1 << 8,
- kAnyMask = 0x1FF,
+ kBigInt64Flag = 1 << 7,
+ kOtherBigIntFlag = 1 << 8,
+ kReceiverFlag = 1 << 9,
+ kAnyMask = 0x3FF,
};
public:
@@ -1723,7 +1782,8 @@ class CompareOperationFeedback {
kReceiver = kReceiverFlag,
kReceiverOrNullOrUndefined = kReceiver | kNullOrUndefined,
- kBigInt = kBigIntFlag,
+ kBigInt64 = kBigInt64Flag,
+ kBigInt = kBigInt64Flag | kOtherBigIntFlag,
kSymbol = kSymbolFlag,
kAny = kAnyMask,
@@ -1796,18 +1856,23 @@ inline std::ostream& operator<<(std::ostream& os, CollectionKind kind) {
UNREACHABLE();
}
-// Flags for the runtime function kDefineKeyedOwnPropertyInLiteral. A property
-// can be enumerable or not, and, in case of functions, the function name can be
-// set or not.
+// Flags for the runtime function kDefineKeyedOwnPropertyInLiteral.
+// - Whether the function name should be set or not.
enum class DefineKeyedOwnPropertyInLiteralFlag {
kNoFlags = 0,
- kDontEnum = 1 << 0,
- kSetFunctionName = 1 << 1
+ kSetFunctionName = 1 << 0
};
using DefineKeyedOwnPropertyInLiteralFlags =
base::Flags<DefineKeyedOwnPropertyInLiteralFlag>;
DEFINE_OPERATORS_FOR_FLAGS(DefineKeyedOwnPropertyInLiteralFlags)
+enum class DefineKeyedOwnPropertyFlag {
+ kNoFlags = 0,
+ kSetFunctionName = 1 << 0
+};
+using DefineKeyedOwnPropertyFlags = base::Flags<DefineKeyedOwnPropertyFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(DefineKeyedOwnPropertyFlags)
+
enum ExternalArrayType {
kExternalInt8Array = 1,
kExternalUint8Array,
@@ -1853,6 +1918,12 @@ enum class TieringState : int32_t {
kLastTieringState = kRequestTurbofan_Concurrent,
};
+// The state kInProgress (= an optimization request for this function is
+// currently being serviced) currently means that no other tiering action can
+// happen. Define this constant so we can static_assert it at related code
+// sites.
+static constexpr bool kTieringStateInProgressBlocksTierup = true;
+
// To efficiently check whether a marker is kNone or kInProgress using a single
// mask, we expect the kNone to be 0 and kInProgress to be 1 so that we can
// mask off the lsb for checking.
@@ -1979,7 +2050,8 @@ enum IsolateAddressId {
V(TrapNullDereference) \
V(TrapIllegalCast) \
V(TrapArrayOutOfBounds) \
- V(TrapArrayTooLarge)
+ V(TrapArrayTooLarge) \
+ V(TrapStringOffsetOutOfBounds)
enum KeyedAccessLoadMode {
STANDARD_LOAD,
@@ -2008,7 +2080,8 @@ enum class IcCheckType { kElement, kProperty };
// Helper stubs can be called in different ways depending on where the target
// code is located and how the call sequence is expected to look like:
-// - CodeObject: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
+// - CodeObject: Call on-heap {Code} object via
+// {RelocInfo::CODE_TARGET}.
// - WasmRuntimeStub: Call native {WasmCode} stub via
// {RelocInfo::WASM_STUB_CALL}.
// - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic
@@ -2038,6 +2111,11 @@ inline constexpr int JSParameterCount(int param_count_without_receiver) {
return param_count_without_receiver + kJSArgcReceiverSlots;
}
+// A special {Parameter} index for JSCalls that represents the closure.
+// The constant is defined here for accessibility (without having to include TF
+// internals), even though it is mostly relevant to Turbofan.
+constexpr int kJSCallClosureParameterIndex = -1;
+
// Opaque data type for identifying stack frames. Used extensively
// by the debugger.
// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
@@ -2065,7 +2143,7 @@ class PtrComprCageBase {
// NOLINTNEXTLINE
inline PtrComprCageBase(const LocalIsolate* isolate);
- inline Address address() const;
+ inline Address address() const { return address_; }
bool operator==(const PtrComprCageBase& other) const {
return address_ == other.address_;
@@ -2077,6 +2155,7 @@ class PtrComprCageBase {
#else
class PtrComprCageBase {
public:
+ explicit constexpr PtrComprCageBase(Address address) {}
PtrComprCageBase() = default;
// NOLINTNEXTLINE
PtrComprCageBase(const Isolate* isolate) {}
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 343adf65ec..99c9230107 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -13,6 +13,9 @@ namespace internal {
#define MESSAGE_TEMPLATES(T) \
/* Error */ \
T(None, "") \
+ T(ConflictingPrivateName, \
+ "Operation is ambiguous because there are more than one private name" \
+ "'%' on the object") \
T(CyclicProto, "Cyclic __proto__ value") \
T(Debugger, "Debugger: %") \
T(DebuggerLoading, "Error loading debugger") \
@@ -26,7 +29,7 @@ namespace internal {
T(IcuError, "Internal error. Icu error.") \
/* TypeError */ \
T(ApplyNonFunction, \
- "Function.prototype.apply was called on %, which is a % and not a " \
+ "Function.prototype.apply was called on %, which is % and not a " \
"function") \
T(ArgumentsDisallowedInInitializerAndStaticBlock, \
"'arguments' is not allowed in class field initializer or static " \
@@ -61,11 +64,14 @@ namespace internal {
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
- T(CallShadowRealmFunctionThrown, "Called throwing ShadowRealm function") \
+ T(CallShadowRealmEvaluateThrew, "ShadowRealm evaluate threw (%)") \
T(CallSiteExpectsFunction, \
"CallSite expects wasm object as first or function as second argument, " \
"got <%, %>") \
T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
+ T(CallSiteMethodUnsupportedInShadowRealm, \
+ "CallSite method % is unsupported inside ShadowRealms") \
+ T(CallWrappedFunctionThrew, "WrappedFunction threw (%)") \
T(CannotBeShared, "% cannot be shared") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
T(CannotPreventExt, "Cannot prevent extensions") \
@@ -73,7 +79,7 @@ namespace internal {
T(CannotFreezeArrayBufferView, \
"Cannot freeze array buffer views with elements") \
T(CannotSeal, "Cannot seal") \
- T(CannotWrap, "Cannot wrap target callable") \
+ T(CannotWrap, "Cannot wrap target callable (%)") \
T(CircularStructure, "Converting circular structure to JSON%") \
T(ConstructAbstractClass, "Abstract class % not directly constructable") \
T(ConstAssign, "Assignment to constant variable.") \
@@ -93,6 +99,8 @@ namespace internal {
T(DebuggerType, "Debugger: Parameters have wrong types.") \
T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
T(DefineDisallowed, "Cannot define property %, object is not extensible") \
+ T(DefineDisallowedFixedLayout, \
+ "Cannot define property %, object is fixed layout") \
T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \
T(DoNotUse, "Do not use %; %") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
@@ -110,7 +118,7 @@ namespace internal {
T(ImportOutsideModule, "Cannot use import statement outside a module") \
T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
T(ImportMissingSpecifier, "import() requires a specifier") \
- T(ImportShadowRealmRejected, "Cannot import in the ShadowRealm") \
+ T(ImportShadowRealmRejected, "Cannot import in ShadowRealm (%)") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
T(InstanceofNonobjectProto, \
"Function has non-object prototype '%' in instanceof check") \
@@ -146,6 +154,7 @@ namespace internal {
T(NonObjectAssertOption, "The 'assert' option must be an object") \
T(NonObjectInInstanceOfCheck, \
"Right-hand side of 'instanceof' is not an object") \
+ T(NonObjectPrivateNameAccess, "Cannot access private name % from %") \
T(NonObjectPropertyLoad, "Cannot read properties of %") \
T(NonObjectPropertyLoadWithProperty, \
"Cannot read properties of % (reading '%')") \
@@ -350,7 +359,6 @@ namespace internal {
T(AccessedUnavailableVariable, "Cannot access '%' from debugger") \
/* RangeError */ \
T(BigIntDivZero, "Division by zero") \
- T(BigIntNegativeExponent, "Exponent must be positive") \
T(BigIntTooBig, "Maximum BigInt size exceeded") \
T(CantSetOptionXWhenYIsUsed, "Can't set option % when % is used") \
T(DateRange, "Provided date is not in valid range.") \
@@ -414,6 +422,7 @@ namespace internal {
"Custom comparefn not supported for huge TypedArrays") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
T(CollectionGrowFailed, "% maximum size exceeded") \
+ T(MustBePositive, "% must be positive") \
/* SyntaxError */ \
T(AmbiguousExport, \
"The requested module '%' contains conflicting star exports for name '%'") \
@@ -534,7 +543,7 @@ namespace internal {
"Labelled function declaration not allowed as the body of a control flow " \
"structure") \
T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
- T(MalformedRegExp, "Invalid regular expression: /%/: %") \
+ T(MalformedRegExp, "Invalid regular expression: /%/%: %") \
T(MalformedRegExpFlags, "Invalid regular expression flags") \
T(ModuleExportUndefined, "Export '%' is not defined in module") \
T(MissingFunctionName, "Function statements require a function name") \
@@ -703,8 +712,8 @@ namespace internal {
T(OptionalChainingNoTemplate, "Invalid tagged template on optional chain") \
/* AggregateError */ \
T(AllPromisesRejected, "All promises were rejected") \
- /* Web snapshots */ \
- T(WebSnapshotError, "Web snapshot failed: %")
+ T(CannotDeepFreezeObject, "Cannot DeepFreeze object of type %") \
+ T(CannotDeepFreezeValue, "Cannot DeepFreeze non-const value %")
enum class MessageTemplate {
#define TEMPLATE(NAME, STRING) k##NAME,
diff --git a/deps/v8/src/common/operation.h b/deps/v8/src/common/operation.h
index 74682d9046..dc243fe0a5 100644
--- a/deps/v8/src/common/operation.h
+++ b/deps/v8/src/common/operation.h
@@ -40,7 +40,7 @@
UNARY_OPERATION_LIST(V) \
COMPARISON_OPERATION_LIST(V)
-enum class Operation {
+enum class Operation : uint8_t {
#define DEFINE_OP(name) k##name,
OPERATION_LIST(DEFINE_OP)
#undef DEFINE_OP
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index 1322270e8f..10d984921a 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -19,17 +19,12 @@ PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
: address_(isolate->cage_base()) {}
-Address PtrComprCageBase::address() const {
- Address ret = address_;
- ret = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
- reinterpret_cast<void*>(ret), kPtrComprCageBaseAlignment));
- return ret;
-}
-
//
// V8HeapCompressionScheme
//
+constexpr Address kPtrComprCageBaseMask = ~(kPtrComprCageBaseAlignment - 1);
+
// static
Address V8HeapCompressionScheme::GetPtrComprCageBaseAddress(
Address on_heap_addr) {
@@ -39,33 +34,65 @@ Address V8HeapCompressionScheme::GetPtrComprCageBaseAddress(
// static
Address V8HeapCompressionScheme::GetPtrComprCageBaseAddress(
PtrComprCageBase cage_base) {
- return cage_base.address();
+ Address base = cage_base.address();
+ V8_ASSUME((base & kPtrComprCageBaseMask) == base);
+ base = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(base), kPtrComprCageBaseAlignment));
+ return base;
+}
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+// static
+void V8HeapCompressionScheme::InitBase(Address base) {
+ CHECK_EQ(base, GetPtrComprCageBaseAddress(base));
+ base_ = base;
+}
+
+// static
+V8_CONST Address V8HeapCompressionScheme::base() {
+ // V8_ASSUME_ALIGNED is often not preserved across ptr-to-int casts (i.e. when
+ // casting to an Address). To increase our chances we additionally encode the
+ // same information in this V8_ASSUME.
+ V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
+ return reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(base_), kPtrComprCageBaseAlignment));
}
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// static
-Tagged_t V8HeapCompressionScheme::CompressTagged(Address tagged) {
+Tagged_t V8HeapCompressionScheme::CompressObject(Address tagged) {
+ // This is used to help clang produce better code. Values which could be
+ // invalid pointers need to be compressed with CompressAny.
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ V8_ASSUME((tagged & kPtrComprCageBaseMask) == base_ || HAS_SMI_TAG(tagged));
+#endif
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
// static
-Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
- // For runtime code the upper 32-bits of the Smi value do not matter.
- return static_cast<Address>(raw_value);
+Tagged_t V8HeapCompressionScheme::CompressAny(Address tagged) {
+ return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
// static
-template <typename TOnHeapAddress>
-Address V8HeapCompressionScheme::DecompressTaggedPointer(
- TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
- return GetPtrComprCageBaseAddress(on_heap_addr) +
- static_cast<Address>(raw_value);
+Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
+ // For runtime code the upper 32-bits of the Smi value do not matter.
+ return static_cast<Address>(raw_value);
}
// static
template <typename TOnHeapAddress>
-Address V8HeapCompressionScheme::DecompressTaggedAny(
- TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
- return DecompressTaggedPointer(on_heap_addr, raw_value);
+Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
+ Tagged_t raw_value) {
+#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ Address cage_base = base();
+#else
+ Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
+#endif
+ Address result = cage_base + static_cast<Address>(raw_value);
+ V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
+ return result;
}
// static
@@ -76,15 +103,94 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers(
// If pointer compression is enabled, we may have random compressed pointers
// on the stack that may be used for subsequent operations.
// Extract, decompress and trace both halfwords.
- Address decompressed_low = V8HeapCompressionScheme::DecompressTaggedPointer(
+ Address decompressed_low = V8HeapCompressionScheme::DecompressTagged(
cage_base, static_cast<Tagged_t>(raw_value));
callback(decompressed_low);
- Address decompressed_high = V8HeapCompressionScheme::DecompressTaggedPointer(
+ Address decompressed_high = V8HeapCompressionScheme::DecompressTagged(
cage_base,
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
callback(decompressed_high);
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+
+//
+// ExternalCodeCompressionScheme
+//
+
+// static
+Address ExternalCodeCompressionScheme::PrepareCageBaseAddress(
+ Address on_heap_addr) {
+ return RoundDown<kPtrComprCageBaseAlignment>(on_heap_addr);
+}
+
+// static
+Address ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(
+ PtrComprCageBase cage_base) {
+ Address base = cage_base.address();
+ V8_ASSUME((base & kPtrComprCageBaseMask) == base);
+ base = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(base), kPtrComprCageBaseAlignment));
+ return base;
+}
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+// static
+void ExternalCodeCompressionScheme::InitBase(Address base) {
+ CHECK_EQ(base, PrepareCageBaseAddress(base));
+ base_ = base;
+}
+
+// static
+V8_CONST Address ExternalCodeCompressionScheme::base() {
+ // V8_ASSUME_ALIGNED is often not preserved across ptr-to-int casts (i.e. when
+ // casting to an Address). To increase our chances we additionally encode the
+ // same information in this V8_ASSUME.
+ V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
+ return reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(base_), kPtrComprCageBaseAlignment));
+}
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+// static
+Tagged_t ExternalCodeCompressionScheme::CompressObject(Address tagged) {
+ // This is used to help clang produce better code. Values which could be
+ // invalid pointers need to be compressed with CompressAny.
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ V8_ASSUME((tagged & kPtrComprCageBaseMask) == base_ || HAS_SMI_TAG(tagged));
+#endif
+ return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
+}
+
+// static
+Tagged_t ExternalCodeCompressionScheme::CompressAny(Address tagged) {
+ return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
+}
+
+// static
+Address ExternalCodeCompressionScheme::DecompressTaggedSigned(
+ Tagged_t raw_value) {
+ // For runtime code the upper 32-bits of the Smi value do not matter.
+ return static_cast<Address>(raw_value);
+}
+
+// static
+template <typename TOnHeapAddress>
+Address ExternalCodeCompressionScheme::DecompressTagged(
+ TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
+#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ Address cage_base = base();
+#else
+ Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
+#endif
+ Address result = cage_base + static_cast<Address>(raw_value);
+ V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
+ return result;
+}
+
+#endif // V8_EXTERNAL_CODE_SPACE
+
//
// Misc functions.
//
@@ -108,25 +214,22 @@ Address V8HeapCompressionScheme::GetPtrComprCageBaseAddress(
}
// static
-Tagged_t V8HeapCompressionScheme::CompressTagged(Address tagged) {
+Tagged_t V8HeapCompressionScheme::CompressObject(Address tagged) {
UNREACHABLE();
}
// static
-Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
- UNREACHABLE();
-}
+Tagged_t V8HeapCompressionScheme::CompressAny(Address tagged) { UNREACHABLE(); }
-template <typename TOnHeapAddress>
-Address V8HeapCompressionScheme::DecompressTaggedPointer(
- TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
+// static
+Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
UNREACHABLE();
}
// static
template <typename TOnHeapAddress>
-Address V8HeapCompressionScheme::DecompressTaggedAny(
- TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
+Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
+ Tagged_t raw_value) {
UNREACHABLE();
}
diff --git a/deps/v8/src/common/ptr-compr.cc b/deps/v8/src/common/ptr-compr.cc
new file mode 100644
index 0000000000..899bb4798a
--- /dev/null
+++ b/deps/v8/src/common/ptr-compr.cc
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/common/ptr-compr.h"
+
+namespace v8::internal {
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+uintptr_t V8HeapCompressionScheme::base_ = kNullAddress;
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+uintptr_t ExternalCodeCompressionScheme::base_ = kNullAddress;
+#endif // V8_EXTERNAL_CODE_SPACE
+
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+} // namespace v8::internal
diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index 5a47331a54..bfe99cea9e 100644
--- a/deps/v8/src/common/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -13,7 +13,7 @@ namespace v8::internal {
// This is just a collection of compression scheme related functions. Having
// such a class allows plugging different decompression scheme in certain
// places by introducing another CompressionScheme class with a customized
-// implementation. This is useful, for example, for CodeDataContainer::code
+// implementation. This is useful, for example, for Code::code
// field (see CodeObjectSlot).
class V8HeapCompressionScheme {
public:
@@ -24,20 +24,20 @@ class V8HeapCompressionScheme {
// Compresses full-pointer representation of a tagged value to on-heap
// representation.
- V8_INLINE static Tagged_t CompressTagged(Address tagged);
+ // Must only be used for compressing object pointers since this function
+ // assumes that we deal with a valid address inside the pointer compression
+ // cage.
+ V8_INLINE static Tagged_t CompressObject(Address tagged);
+ // Compress a potentially invalid pointer.
+ V8_INLINE static Tagged_t CompressAny(Address tagged);
// Decompresses smi value.
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
- // Decompresses weak or strong heap object pointer or forwarding pointer,
- // preserving both weak- and smi- tags.
- template <typename TOnHeapAddress>
- V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
- Tagged_t raw_value);
// Decompresses any tagged value, preserving both weak- and smi- tags.
template <typename TOnHeapAddress>
- V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
- Tagged_t raw_value);
+ V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
+ Tagged_t raw_value);
// Given a 64bit raw value, found on the stack, calls the callback function
// with all possible pointers that may be "contained" in compressed form in
@@ -47,12 +47,61 @@ class V8HeapCompressionScheme {
V8_INLINE static void ProcessIntermediatePointers(
PtrComprCageBase cage_base, Address raw_value,
ProcessPointerCallback callback);
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ // Process-wide cage base value used for decompression.
+ V8_INLINE static void InitBase(Address base);
+ V8_INLINE static Address base();
+
+ private:
+ static V8_EXPORT_PRIVATE uintptr_t base_ V8_CONSTINIT;
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
};
#ifdef V8_EXTERNAL_CODE_SPACE
-// Compression scheme used for fields containing Code objects (namely for the
-// CodeDataContainer::code field).
-using ExternalCodeCompressionScheme = V8HeapCompressionScheme;
+
+// Compression scheme used for fields containing InstructionStream objects
+// (namely for the Code::code field). Same as
+// V8HeapCompressionScheme but with a different base value.
+class ExternalCodeCompressionScheme {
+ public:
+ V8_INLINE static Address PrepareCageBaseAddress(Address on_heap_addr);
+
+ // Note that this compression scheme doesn't allow reconstruction of the cage
+ // base value from any arbitrary value, thus the cage base has to be passed
+ // explicitly to the decompression functions.
+ static Address GetPtrComprCageBaseAddress(Address on_heap_addr) = delete;
+
+ V8_INLINE static Address GetPtrComprCageBaseAddress(
+ PtrComprCageBase cage_base);
+
+ // Compresses full-pointer representation of a tagged value to on-heap
+ // representation.
+ // Must only be used for compressing object pointers (incl. SMI) since this
+ // function assumes pointers to be inside the pointer compression cage.
+ V8_INLINE static Tagged_t CompressObject(Address tagged);
+ // Compress anything that does not follow the above requirements (e.g. a maybe
+ // object, or a marker bit pattern).
+ V8_INLINE static Tagged_t CompressAny(Address tagged);
+
+ // Decompresses smi value.
+ V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
+
+ // Decompresses any tagged value, preserving both weak- and smi- tags.
+ template <typename TOnHeapAddress>
+ V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
+ Tagged_t raw_value);
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ // Process-wide cage base value used for decompression.
+ V8_INLINE static void InitBase(Address base);
+ V8_INLINE static Address base();
+
+ private:
+ static V8_EXPORT_PRIVATE uintptr_t base_ V8_CONSTINIT;
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+};
+
#endif // V8_EXTERNAL_CODE_SPACE
// Accessors for fields that may be unaligned due to pointer compression.
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index d85832bdcd..73e181b408 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -126,7 +126,8 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
output_queue_.pop();
}
- Compiler::DisposeTurbofanCompilationJob(job.get(), restore_function_code);
+ Compiler::DisposeTurbofanCompilationJob(isolate_, job.get(),
+ restore_function_code);
}
}
@@ -138,7 +139,7 @@ void OptimizingCompileDispatcher::FlushInputQueue() {
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
- Compiler::DisposeTurbofanCompilationJob(job.get(), true);
+ Compiler::DisposeTurbofanCompilationJob(isolate_, job.get(), true);
}
}
@@ -205,7 +206,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
function->ShortPrint();
PrintF(" as it has already been optimized.\n");
}
- Compiler::DisposeTurbofanCompilationJob(job.get(), false);
+ Compiler::DisposeTurbofanCompilationJob(isolate_, job.get(), false);
continue;
}
diff --git a/deps/v8/src/compiler/DEPS b/deps/v8/src/compiler/DEPS
index efc635b308..78af8f08f8 100644
--- a/deps/v8/src/compiler/DEPS
+++ b/deps/v8/src/compiler/DEPS
@@ -13,5 +13,6 @@ specific_include_rules = {
],
"wasm-compiler.h": [
"-src/compiler",
+ "+src/compiler/wasm-compiler-definitions.h",
],
}
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index f8ce4bd0f2..be0785d50f 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -3,6 +3,7 @@ mslekova@chromium.org
nicohartmann@chromium.org
tebbi@chromium.org
thibaudm@chromium.org
+dmercadier@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=clemensb@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 3156b22a1e..376170cce7 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -156,6 +156,35 @@ FieldAccess AccessBuilder::ForJSCollectionIteratorIndex() {
}
// static
+FieldAccess AccessBuilder::ForJSExternalObjectValue() {
+ FieldAccess access = {
+ kTaggedBase,
+ JSExternalObject::kValueOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ "JSExternalObjectValue",
+ ConstFieldInfo::None(),
+ false,
+ kExternalObjectValueTag,
+ };
+ return access;
+}
+
+#ifdef V8_ENABLE_SANDBOX
+// static
+FieldAccess AccessBuilder::ForJSExternalObjectPointerHandle() {
+ FieldAccess access = {
+ kTaggedBase, JSExternalObject::kValueOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint32, MachineType::Uint32(),
+ kNoWriteBarrier, "JSExternalObjectPointerHandle"};
+ return access;
+}
+#endif
+
+// static
FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
FieldAccess access = {
kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
@@ -740,6 +769,15 @@ FieldAccess AccessBuilder::ForNameRawHashField() {
}
// static
+FieldAccess AccessBuilder::ForFreeSpaceSize() {
+ FieldAccess access = {kTaggedBase, FreeSpace::kSizeOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForStringLength() {
FieldAccess access = {kTaggedBase,
String::kLengthOffset,
@@ -1135,10 +1173,16 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
MachineType::Float64(), kNoWriteBarrier};
return access;
}
- case kExternalBigInt64Array:
- case kExternalBigUint64Array:
- // TODO(neis/jkummerow): Define appropriate types.
- UNIMPLEMENTED();
+ case kExternalBigInt64Array: {
+ ElementAccess access = {taggedness, header_size, Type::SignedBigInt64(),
+ MachineType::Int64(), kNoWriteBarrier};
+ return access;
+ }
+ case kExternalBigUint64Array: {
+ ElementAccess access = {taggedness, header_size, Type::UnsignedBigInt64(),
+ MachineType::Uint64(), kNoWriteBarrier};
+ return access;
+ }
}
UNREACHABLE();
}
@@ -1299,6 +1343,20 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
}
// static
+FieldAccess AccessBuilder::ForNameDictionaryFlagsIndex() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(NameDictionary::kFlagsIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier,
+ "NameDictionaryFlagsIndex"};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
FieldAccess access = {kTaggedBase,
FeedbackCell::kInterruptBudgetOffset,
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index f4735aee52..b800436615 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -74,6 +74,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSCollectionIterator::index() field.
static FieldAccess ForJSCollectionIteratorIndex();
+ // Provides access to an ExternalPointer through the JSExternalObject::value()
+ // field.
+ static FieldAccess ForJSExternalObjectValue();
+
+#ifdef V8_ENABLE_SANDBOX
+ // Provides access to JSExternalObject::value() field.
+ static FieldAccess ForJSExternalObjectPointerHandle();
+#endif
+
// Provides access to JSFunction::prototype_or_initial_map() field.
static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -233,6 +242,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to Name::raw_hash_field() field.
static FieldAccess ForNameRawHashField();
+ // Provides access to FreeSpace::size() field
+ static FieldAccess ForFreeSpaceSize();
+
// Provides access to String::length() field.
static FieldAccess ForStringLength();
@@ -348,6 +360,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryNextEnumerationIndex();
static FieldAccess ForDictionaryObjectHashIndex();
+ // Provides access to NameDictionary fields.
+ static FieldAccess ForNameDictionaryFlagsIndex();
+
// Provides access to FeedbackCell fields.
static FieldAccess ForFeedbackCellInterruptBudget();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 7c35df243a..3120077a77 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -9,6 +9,7 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/heap-refs.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
@@ -97,26 +98,25 @@ PropertyAccessInfo PropertyAccessInfo::Invalid(Zone* zone) {
}
// static
-PropertyAccessInfo PropertyAccessInfo::NotFound(
- Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder) {
+PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone, MapRef receiver_map,
+ OptionalJSObjectRef holder) {
return PropertyAccessInfo(zone, kNotFound, holder, {{receiver_map}, zone});
}
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- Zone* zone, MapRef receiver_map,
+ JSHeapBroker* broker, Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
- base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ Type field_type, MapRef field_owner_map, OptionalMapRef field_map,
+ OptionalJSObjectRef holder, OptionalMapRef transition_map) {
DCHECK(!field_representation.IsNone());
DCHECK_IMPLIES(
field_representation.IsDouble(),
HasFieldRepresentationDependenciesOnMap(
- dependencies, transition_map.has_value()
- ? transition_map->object()
- : holder.has_value() ? holder->map().object()
- : receiver_map.object()));
+ dependencies, transition_map.has_value() ? transition_map->object()
+ : holder.has_value() ? holder->map(broker).object()
+ : receiver_map.object()));
return PropertyAccessInfo(kDataField, holder, transition_map, field_index,
field_representation, field_type, field_owner_map,
field_map, {{receiver_map}, zone},
@@ -128,8 +128,8 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
- base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ Type field_type, MapRef field_owner_map, OptionalMapRef field_map,
+ OptionalJSObjectRef holder, OptionalMapRef transition_map) {
DCHECK(!field_representation.IsNone());
return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
field_index, field_representation, field_type,
@@ -139,9 +139,8 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
// static
PropertyAccessInfo PropertyAccessInfo::FastAccessorConstant(
- Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder,
- base::Optional<ObjectRef> constant,
- base::Optional<JSObjectRef> api_holder) {
+ Zone* zone, MapRef receiver_map, OptionalJSObjectRef holder,
+ OptionalObjectRef constant, OptionalJSObjectRef api_holder) {
return PropertyAccessInfo(zone, kFastAccessorConstant, holder, constant,
api_holder, {} /* name */, {{receiver_map}, zone});
}
@@ -171,9 +170,8 @@ PropertyAccessInfo PropertyAccessInfo::DictionaryProtoDataConstant(
// static
PropertyAccessInfo PropertyAccessInfo::DictionaryProtoAccessorConstant(
- Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder,
- ObjectRef constant, base::Optional<JSObjectRef> api_holder,
- NameRef property_name) {
+ Zone* zone, MapRef receiver_map, OptionalJSObjectRef holder,
+ ObjectRef constant, OptionalJSObjectRef api_holder, NameRef property_name) {
return PropertyAccessInfo(zone, kDictionaryProtoAccessorConstant, holder,
constant, api_holder, property_name,
{{receiver_map}, zone});
@@ -188,7 +186,7 @@ PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
dictionary_index_(InternalIndex::NotFound()) {}
PropertyAccessInfo::PropertyAccessInfo(
- Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ Zone* zone, Kind kind, OptionalJSObjectRef holder,
ZoneVector<MapRef>&& lookup_start_object_maps)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
@@ -199,9 +197,9 @@ PropertyAccessInfo::PropertyAccessInfo(
dictionary_index_(InternalIndex::NotFound()) {}
PropertyAccessInfo::PropertyAccessInfo(
- Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
- base::Optional<ObjectRef> constant, base::Optional<JSObjectRef> api_holder,
- base::Optional<NameRef> name, ZoneVector<MapRef>&& lookup_start_object_maps)
+ Zone* zone, Kind kind, OptionalJSObjectRef holder,
+ OptionalObjectRef constant, OptionalJSObjectRef api_holder,
+ OptionalNameRef name, ZoneVector<MapRef>&& lookup_start_object_maps)
: kind_(kind),
lookup_start_object_maps_(lookup_start_object_maps),
constant_(constant),
@@ -216,10 +214,9 @@ PropertyAccessInfo::PropertyAccessInfo(
}
PropertyAccessInfo::PropertyAccessInfo(
- Kind kind, base::Optional<JSObjectRef> holder,
- base::Optional<MapRef> transition_map, FieldIndex field_index,
- Representation field_representation, Type field_type,
- MapRef field_owner_map, base::Optional<MapRef> field_map,
+ Kind kind, OptionalJSObjectRef holder, OptionalMapRef transition_map,
+ FieldIndex field_index, Representation field_representation,
+ Type field_type, MapRef field_owner_map, OptionalMapRef field_map,
ZoneVector<MapRef>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
@@ -238,7 +235,7 @@ PropertyAccessInfo::PropertyAccessInfo(
}
PropertyAccessInfo::PropertyAccessInfo(
- Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ Zone* zone, Kind kind, OptionalJSObjectRef holder,
ZoneVector<MapRef>&& lookup_start_object_maps,
InternalIndex dictionary_index, NameRef name)
: kind_(kind),
@@ -253,7 +250,7 @@ PropertyAccessInfo::PropertyAccessInfo(
namespace {
template <class RefT>
-bool OptionalRefEquals(base::Optional<RefT> lhs, base::Optional<RefT> rhs) {
+bool OptionalRefEquals(OptionalRef<RefT> lhs, OptionalRef<RefT> rhs) {
if (!lhs.has_value()) return !rhs.has_value();
if (!rhs.has_value()) return false;
return lhs->equals(rhs.value());
@@ -359,13 +356,8 @@ ConstFieldInfo PropertyAccessInfo::GetConstFieldInfo() const {
: ConstFieldInfo::None();
}
-AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
- CompilationDependencies* dependencies,
- Zone* zone)
- : broker_(broker),
- dependencies_(dependencies),
- type_cache_(TypeCache::Get()),
- zone_(zone) {}
+AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker, Zone* zone)
+ : broker_(broker), type_cache_(TypeCache::Get()), zone_(zone) {}
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
MapRef map, AccessMode access_mode) const {
@@ -392,15 +384,13 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
for (auto const& group : feedback.transition_groups()) {
DCHECK(!group.empty());
- base::Optional<MapRef> target =
- MakeRefAssumeMemoryFence(broker(), group.front());
+ OptionalMapRef target = group.front();
base::Optional<ElementAccessInfo> access_info =
ComputeElementAccessInfo(target.value(), access_mode);
if (!access_info.has_value()) return false;
for (size_t i = 1; i < group.size(); ++i) {
- base::Optional<MapRef> map_ref =
- MakeRefAssumeMemoryFence(broker(), group[i]);
+ OptionalMapRef map_ref = group[i];
if (!map_ref.has_value()) continue;
access_info->AddTransitionSource(map_ref.value());
}
@@ -410,12 +400,12 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
}
PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
- MapRef receiver_map, MapRef map, NameRef name,
- base::Optional<JSObjectRef> holder, InternalIndex descriptor,
- AccessMode access_mode) const {
+ MapRef receiver_map, MapRef map, NameRef name, OptionalJSObjectRef holder,
+ InternalIndex descriptor, AccessMode access_mode) const {
DCHECK(descriptor.is_found());
// TODO(jgruber,v8:7790): Use DescriptorArrayRef instead.
- Handle<DescriptorArray> descriptors = map.instance_descriptors().object();
+ Handle<DescriptorArray> descriptors =
+ map.instance_descriptors(broker()).object();
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
Representation details_representation = details.representation();
@@ -433,14 +423,14 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// BlockContext, an internal object.
Type field_type = name.object()->IsPrivateBrand() ? Type::OtherInternal()
: Type::NonInternal();
- base::Optional<MapRef> field_map;
+ OptionalMapRef field_map;
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
Handle<FieldType> descriptors_field_type =
broker()->CanonicalPersistentHandle(
descriptors->GetFieldType(descriptor));
- base::Optional<ObjectRef> descriptors_field_type_ref =
+ OptionalObjectRef descriptors_field_type_ref =
TryMakeRef<Object>(broker(), descriptors_field_type);
if (!descriptors_field_type_ref.has_value()) return Invalid();
@@ -475,10 +465,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
map, descriptor, details_representation));
if (descriptors_field_type->IsClass()) {
// Remember the field map, and try to infer a useful type.
- base::Optional<MapRef> maybe_field_map =
+ OptionalMapRef maybe_field_map =
TryMakeRef(broker(), descriptors_field_type->AsClass());
if (!maybe_field_map.has_value()) return Invalid();
- field_type = Type::For(maybe_field_map.value());
+ field_type = Type::For(maybe_field_map.value(), broker());
field_map = maybe_field_map;
}
} else {
@@ -496,14 +486,14 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// Note: FindFieldOwner may be called multiple times throughout one
// compilation. This is safe since its result is fixed for a given map and
// descriptor.
- MapRef field_owner_map = map.FindFieldOwner(descriptor);
+ MapRef field_owner_map = map.FindFieldOwner(broker(), descriptor);
switch (constness) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
- zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
- details_representation, field_type, field_owner_map, field_map,
- holder, {});
+ broker(), zone(), receiver_map, std::move(unrecorded_dependencies),
+ field_index, details_representation, field_type, field_owner_map,
+ field_map, holder, {});
case PropertyConstness::kConst:
return PropertyAccessInfo::FastDataConstant(
@@ -521,7 +511,7 @@ using AccessorsObjectGetter = std::function<Handle<Object>()>;
PropertyAccessInfo AccessorAccessInfoHelper(
Isolate* isolate, Zone* zone, JSHeapBroker* broker,
const AccessInfoFactory* ai_factory, MapRef receiver_map, NameRef name,
- MapRef map, base::Optional<JSObjectRef> holder, AccessMode access_mode,
+ MapRef map, OptionalJSObjectRef holder, AccessMode access_mode,
AccessorsObjectGetter get_accessors) {
if (map.instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map.object()->is_prototype_map());
@@ -537,7 +527,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
}
- base::Optional<CellRef> cell_ref = TryMakeRef(broker, cell);
+ OptionalCellRef cell_ref = TryMakeRef(broker, cell);
if (!cell_ref.has_value()) {
return PropertyAccessInfo::Invalid(zone);
}
@@ -561,10 +551,10 @@ PropertyAccessInfo AccessorAccessInfoHelper(
access_mode == AccessMode::kLoad ? accessors->getter(kAcquireLoad)
: accessors->setter(kAcquireLoad));
- base::Optional<ObjectRef> accessor_ref = TryMakeRef(broker, accessor);
+ OptionalObjectRef accessor_ref = TryMakeRef(broker, accessor);
if (!accessor_ref.has_value()) return PropertyAccessInfo::Invalid(zone);
- base::Optional<JSObjectRef> api_holder_ref;
+ OptionalJSObjectRef api_holder_ref;
if (!accessor->IsJSFunction()) {
CallOptimization optimization(broker->local_isolate_or_isolate(), accessor);
if (!optimization.is_simple_api_call() ||
@@ -595,7 +585,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
base::Optional<Name> cached_property_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate, *accessor);
if (cached_property_name.has_value()) {
- base::Optional<NameRef> cached_property_name_ref =
+ OptionalNameRef cached_property_name_ref =
TryMakeRef(broker, cached_property_name.value());
if (cached_property_name_ref.has_value()) {
PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo(
@@ -619,7 +609,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
MapRef receiver_map, NameRef name, MapRef holder_map,
- base::Optional<JSObjectRef> holder, InternalIndex descriptor,
+ OptionalJSObjectRef holder, InternalIndex descriptor,
AccessMode access_mode) const {
DCHECK(descriptor.is_found());
Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
@@ -641,7 +631,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
InternalIndex dictionary_index, AccessMode access_mode,
PropertyDetails details) const {
CHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
- DCHECK(holder.map().object()->is_prototype_map());
+ DCHECK(holder.map(broker()).object()->is_prototype_map());
DCHECK_EQ(access_mode, AccessMode::kLoad);
// We can only inline accesses to constant properties.
@@ -659,12 +649,12 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
dictionary_index);
};
return AccessorAccessInfoHelper(isolate(), zone(), broker(), this,
- receiver_map, name, holder.map(), holder,
- access_mode, get_accessors);
+ receiver_map, name, holder.map(broker()),
+ holder, access_mode, get_accessors);
}
bool AccessInfoFactory::TryLoadPropertyDetails(
- MapRef map, base::Optional<JSObjectRef> maybe_holder, NameRef name,
+ MapRef map, OptionalJSObjectRef maybe_holder, NameRef name,
InternalIndex* index_out, PropertyDetails* details_out) const {
if (map.is_dictionary_map()) {
DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
@@ -696,7 +686,7 @@ bool AccessInfoFactory::TryLoadPropertyDetails(
}
}
} else {
- DescriptorArray descriptors = *map.instance_descriptors().object();
+ DescriptorArray descriptors = *map.instance_descriptors(broker()).object();
*index_out = descriptors.Search(*name.object(), *map.object(), true);
if (index_out->is_found()) {
*details_out = descriptors.GetDetails(*index_out);
@@ -736,17 +726,18 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Remember the receiver map. We use {map} as loop variable.
MapRef receiver_map = map;
- base::Optional<JSObjectRef> holder;
+ OptionalJSObjectRef holder;
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
// Note: Keep sync'd with
// CompilationDependencies::DependOnStablePrototypeChains.
if (receiver_map.IsPrimitiveMap()) {
- base::Optional<JSFunctionRef> constructor =
- broker()->target_native_context().GetConstructorFunction(receiver_map);
+ OptionalJSFunctionRef constructor =
+ broker()->target_native_context().GetConstructorFunction(broker(),
+ receiver_map);
if (!constructor.has_value()) return Invalid();
- map = constructor->initial_map(broker()->dependencies());
+ map = constructor->initial_map(broker());
DCHECK(!map.IsPrimitiveMap());
}
@@ -864,7 +855,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && holder.has_value()) {
// At this point, we are past the first loop iteration.
DCHECK(holder->object()->map().is_prototype_map());
- DCHECK(!holder->map().equals(receiver_map));
+ DCHECK(!holder->map(broker()).equals(receiver_map));
fast_mode_prototype_on_chain =
fast_mode_prototype_on_chain || !map.is_dictionary_map();
@@ -875,9 +866,9 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Walk up the prototype chain.
// Load the map's prototype's map to guarantee that every time we use it,
// we use the same Map.
- HeapObjectRef prototype = map.prototype();
+ HeapObjectRef prototype = map.prototype(broker());
- MapRef map_prototype_map = prototype.map();
+ MapRef map_prototype_map = prototype.map(broker());
if (!map_prototype_map.object()->IsJSObjectMap()) {
// Don't allow proxies on the prototype chain.
if (!prototype.IsNull()) {
@@ -982,6 +973,9 @@ void AccessInfoFactory::MergePropertyAccessInfos(
CHECK(!result->empty());
}
+CompilationDependencies* AccessInfoFactory::dependencies() const {
+ return broker()->dependencies();
+}
Isolate* AccessInfoFactory::isolate() const { return broker()->isolate(); }
namespace {
@@ -1012,26 +1006,22 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
if (feedback.transition_groups().empty()) return {};
DCHECK(!feedback.transition_groups().front().empty());
- Handle<Map> first_map = feedback.transition_groups().front().front();
- base::Optional<MapRef> first_map_ref = TryMakeRef(broker(), first_map);
- if (!first_map_ref.has_value()) return {};
- InstanceType instance_type = first_map_ref->instance_type();
- ElementsKind elements_kind = first_map_ref->elements_kind();
+ MapRef first_map = feedback.transition_groups().front().front();
+ InstanceType instance_type = first_map.instance_type();
+ ElementsKind elements_kind = first_map.elements_kind();
ZoneVector<MapRef> maps(zone());
for (auto const& group : feedback.transition_groups()) {
- for (Handle<Map> map_handle : group) {
- base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
- if (!map.has_value()) return {};
- if (map->instance_type() != instance_type ||
- !map->CanInlineElementAccess()) {
+ for (MapRef map : group) {
+ if (map.instance_type() != instance_type ||
+ !map.CanInlineElementAccess()) {
return {};
}
- if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
+ if (!GeneralizeElementsKind(elements_kind, map.elements_kind())
.To(&elements_kind)) {
return {};
}
- maps.push_back(map.value());
+ maps.push_back(map);
}
}
@@ -1073,29 +1063,28 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
}
}
// Special fields are always mutable.
- return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index,
- field_representation, field_type, map,
- {}, {}, {});
+ return PropertyAccessInfo::DataField(broker(), zone(), map, {{}, zone()},
+ field_index, field_representation,
+ field_type, map, {}, {}, {});
}
return Invalid();
}
PropertyAccessInfo AccessInfoFactory::LookupTransition(
- MapRef map, NameRef name, base::Optional<JSObjectRef> holder,
+ MapRef map, NameRef name, OptionalJSObjectRef holder,
PropertyAttributes attrs) const {
// Check if the {map} has a data transition with the given {name}.
Map transition =
TransitionsAccessor(isolate(), *map.object(), true)
.SearchTransition(*name.object(), PropertyKind::kData, attrs);
if (transition.is_null()) return Invalid();
- base::Optional<MapRef> maybe_transition_map =
- TryMakeRef(broker(), transition);
+ OptionalMapRef maybe_transition_map = TryMakeRef(broker(), transition);
if (!maybe_transition_map.has_value()) return Invalid();
MapRef transition_map = maybe_transition_map.value();
InternalIndex const number = transition_map.object()->LastAdded();
Handle<DescriptorArray> descriptors =
- transition_map.instance_descriptors().object();
+ transition_map.instance_descriptors(broker()).object();
PropertyDetails const details = descriptors->GetDetails(number);
// Don't bother optimizing stores to read-only properties.
@@ -1111,7 +1100,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
FieldIndex field_index = FieldIndex::ForPropertyIndex(
*transition_map.object(), index, details_representation);
Type field_type = Type::NonInternal();
- base::Optional<MapRef> field_map;
+ OptionalMapRef field_map;
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
@@ -1130,7 +1119,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
// TODO(jgruber,v8:7790): Use DescriptorArrayRef instead.
Handle<FieldType> descriptors_field_type =
broker()->CanonicalPersistentHandle(descriptors->GetFieldType(number));
- base::Optional<ObjectRef> descriptors_field_type_ref =
+ OptionalObjectRef descriptors_field_type_ref =
TryMakeRef<Object>(broker(), descriptors_field_type);
if (!descriptors_field_type_ref.has_value()) return Invalid();
@@ -1146,10 +1135,10 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
dependencies()->FieldTypeDependencyOffTheRecord(
transition_map, number, *descriptors_field_type_ref));
// Remember the field map, and try to infer a useful type.
- base::Optional<MapRef> maybe_field_map =
+ OptionalMapRef maybe_field_map =
TryMakeRef(broker(), descriptors_field_type->AsClass());
if (!maybe_field_map.has_value()) return Invalid();
- field_type = Type::For(maybe_field_map.value());
+ field_type = Type::For(maybe_field_map.value(), broker());
field_map = maybe_field_map;
}
}
@@ -1162,9 +1151,9 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
switch (dependencies()->DependOnFieldConstness(transition_map, number)) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
- zone(), map, std::move(unrecorded_dependencies), field_index,
- details_representation, field_type, transition_map, field_map, holder,
- transition_map);
+ broker(), zone(), map, std::move(unrecorded_dependencies),
+ field_index, details_representation, field_type, transition_map,
+ field_map, holder, transition_map);
case PropertyConstness::kConst:
return PropertyAccessInfo::FastDataConstant(
zone(), map, std::move(unrecorded_dependencies), field_index,
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index d75e8d7b2b..0491583d33 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -69,25 +69,22 @@ class PropertyAccessInfo final {
};
static PropertyAccessInfo NotFound(Zone* zone, MapRef receiver_map,
- base::Optional<JSObjectRef> holder);
+ OptionalJSObjectRef holder);
static PropertyAccessInfo DataField(
- Zone* zone, MapRef receiver_map,
+ JSHeapBroker* broker, Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
- base::Optional<JSObjectRef> holder,
- base::Optional<MapRef> transition_map);
+ Type field_type, MapRef field_owner_map, OptionalMapRef field_map,
+ OptionalJSObjectRef holder, OptionalMapRef transition_map);
static PropertyAccessInfo FastDataConstant(
Zone* zone, MapRef receiver_map,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
- Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
- base::Optional<JSObjectRef> holder,
- base::Optional<MapRef> transition_map);
+ Type field_type, MapRef field_owner_map, OptionalMapRef field_map,
+ OptionalJSObjectRef holder, OptionalMapRef transition_map);
static PropertyAccessInfo FastAccessorConstant(
- Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder,
- base::Optional<ObjectRef> constant,
- base::Optional<JSObjectRef> api_holder);
+ Zone* zone, MapRef receiver_map, OptionalJSObjectRef holder,
+ OptionalObjectRef constant, OptionalJSObjectRef api_holder);
static PropertyAccessInfo ModuleExport(Zone* zone, MapRef receiver_map,
CellRef cell);
static PropertyAccessInfo StringLength(Zone* zone, MapRef receiver_map);
@@ -96,8 +93,8 @@ class PropertyAccessInfo final {
Zone* zone, MapRef receiver_map, JSObjectRef holder,
InternalIndex dict_index, NameRef name);
static PropertyAccessInfo DictionaryProtoAccessorConstant(
- Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder,
- ObjectRef constant, base::Optional<JSObjectRef> api_holder, NameRef name);
+ Zone* zone, MapRef receiver_map, OptionalJSObjectRef holder,
+ ObjectRef constant, OptionalJSObjectRef api_holder, NameRef name);
bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
Zone* zone) V8_WARN_UNUSED_RESULT;
@@ -130,7 +127,7 @@ class PropertyAccessInfo final {
Kind kind() const { return kind_; }
// The object where the property definition was found.
- base::Optional<JSObjectRef> holder() const {
+ OptionalJSObjectRef holder() const {
// TODO(neis): There was a CHECK here that tries to protect against
// using the access info without recording its dependencies first.
// Find a more suitable place for it.
@@ -141,12 +138,12 @@ class PropertyAccessInfo final {
// FunctionCallbackInfo::Holder().
// Don't mix it up with holder in a "object where the property was found"
// sense.
- base::Optional<JSObjectRef> api_holder() const { return api_holder_; }
- base::Optional<MapRef> transition_map() const {
+ OptionalJSObjectRef api_holder() const { return api_holder_; }
+ OptionalMapRef transition_map() const {
DCHECK(!HasDictionaryHolder());
return transition_map_;
}
- base::Optional<ObjectRef> constant() const {
+ OptionalObjectRef constant() const {
DCHECK_IMPLIES(constant_.has_value(),
IsModuleExport() || IsFastAccessorConstant() ||
IsDictionaryProtoAccessorConstant());
@@ -165,7 +162,7 @@ class PropertyAccessInfo final {
DCHECK(!HasDictionaryHolder());
return field_representation_;
}
- base::Optional<MapRef> field_map() const {
+ OptionalMapRef field_map() const {
DCHECK(!HasDictionaryHolder());
return field_map_;
}
@@ -185,50 +182,47 @@ class PropertyAccessInfo final {
private:
explicit PropertyAccessInfo(Zone* zone);
- PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ PropertyAccessInfo(Zone* zone, Kind kind, OptionalJSObjectRef holder,
ZoneVector<MapRef>&& lookup_start_object_maps);
- PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
- base::Optional<ObjectRef> constant,
- base::Optional<JSObjectRef> api_holder,
- base::Optional<NameRef> name,
+ PropertyAccessInfo(Zone* zone, Kind kind, OptionalJSObjectRef holder,
+ OptionalObjectRef constant, OptionalJSObjectRef api_holder,
+ OptionalNameRef name,
ZoneVector<MapRef>&& lookup_start_object_maps);
- PropertyAccessInfo(Kind kind, base::Optional<JSObjectRef> holder,
- base::Optional<MapRef> transition_map,
- FieldIndex field_index,
+ PropertyAccessInfo(Kind kind, OptionalJSObjectRef holder,
+ OptionalMapRef transition_map, FieldIndex field_index,
Representation field_representation, Type field_type,
- MapRef field_owner_map, base::Optional<MapRef> field_map,
+ MapRef field_owner_map, OptionalMapRef field_map,
ZoneVector<MapRef>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& dependencies);
- PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder,
+ PropertyAccessInfo(Zone* zone, Kind kind, OptionalJSObjectRef holder,
ZoneVector<MapRef>&& lookup_start_object_maps,
InternalIndex dictionary_index, NameRef name);
// Members used for fast and dictionary mode holders:
Kind kind_;
ZoneVector<MapRef> lookup_start_object_maps_;
- base::Optional<ObjectRef> constant_;
- base::Optional<JSObjectRef> holder_;
- base::Optional<JSObjectRef> api_holder_;
+ OptionalObjectRef constant_;
+ OptionalJSObjectRef holder_;
+ OptionalJSObjectRef api_holder_;
// Members only used for fast mode holders:
ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
- base::Optional<MapRef> transition_map_;
+ OptionalMapRef transition_map_;
FieldIndex field_index_;
Representation field_representation_;
Type field_type_;
- base::Optional<MapRef> field_owner_map_;
- base::Optional<MapRef> field_map_;
+ OptionalMapRef field_owner_map_;
+ OptionalMapRef field_map_;
// Members only used for dictionary mode holders:
InternalIndex dictionary_index_;
- base::Optional<NameRef> name_;
+ OptionalNameRef name_;
};
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
- AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies,
- Zone* zone);
+ AccessInfoFactory(JSHeapBroker* broker, Zone* zone);
base::Optional<ElementAccessInfo> ComputeElementAccessInfo(
MapRef map, AccessMode access_mode) const;
@@ -263,16 +257,16 @@ class AccessInfoFactory final {
ElementAccessFeedback const& feedback) const;
PropertyAccessInfo LookupSpecialFieldAccessor(MapRef map, NameRef name) const;
PropertyAccessInfo LookupTransition(MapRef map, NameRef name,
- base::Optional<JSObjectRef> holder,
+ OptionalJSObjectRef holder,
PropertyAttributes attrs) const;
- PropertyAccessInfo ComputeDataFieldAccessInfo(
- MapRef receiver_map, MapRef map, NameRef name,
- base::Optional<JSObjectRef> holder, InternalIndex descriptor,
- AccessMode access_mode) const;
+ PropertyAccessInfo ComputeDataFieldAccessInfo(MapRef receiver_map, MapRef map,
+ NameRef name,
+ OptionalJSObjectRef holder,
+ InternalIndex descriptor,
+ AccessMode access_mode) const;
PropertyAccessInfo ComputeAccessorDescriptorAccessInfo(
- MapRef receiver_map, NameRef name, MapRef map,
- base::Optional<JSObjectRef> holder, InternalIndex descriptor,
- AccessMode access_mode) const;
+ MapRef receiver_map, NameRef name, MapRef map, OptionalJSObjectRef holder,
+ InternalIndex descriptor, AccessMode access_mode) const;
PropertyAccessInfo Invalid() const {
return PropertyAccessInfo::Invalid(zone());
@@ -282,18 +276,16 @@ class AccessInfoFactory final {
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* result) const;
- bool TryLoadPropertyDetails(MapRef map,
- base::Optional<JSObjectRef> maybe_holder,
+ bool TryLoadPropertyDetails(MapRef map, OptionalJSObjectRef maybe_holder,
NameRef name, InternalIndex* index_out,
PropertyDetails* details_out) const;
- CompilationDependencies* dependencies() const { return dependencies_; }
+ CompilationDependencies* dependencies() const;
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
Zone* zone() const { return zone_; }
JSHeapBroker* const broker_;
- CompilationDependencies* const dependencies_;
TypeCache const* const type_cache_;
Zone* const zone_;
diff --git a/deps/v8/src/compiler/all-nodes.cc b/deps/v8/src/compiler/all-nodes.cc
index eada0cff8c..78db63f904 100644
--- a/deps/v8/src/compiler/all-nodes.cc
+++ b/deps/v8/src/compiler/all-nodes.cc
@@ -12,7 +12,7 @@ namespace compiler {
AllNodes::AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs)
: reachable(local_zone),
- is_reachable_(graph->NodeCount(), false, local_zone),
+ is_reachable_(static_cast<int>(graph->NodeCount()), local_zone),
only_inputs_(only_inputs) {
Mark(local_zone, graph->end(), graph);
}
@@ -20,14 +20,14 @@ AllNodes::AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs)
AllNodes::AllNodes(Zone* local_zone, Node* end, const Graph* graph,
bool only_inputs)
: reachable(local_zone),
- is_reachable_(graph->NodeCount(), false, local_zone),
+ is_reachable_(static_cast<int>(graph->NodeCount()), local_zone),
only_inputs_(only_inputs) {
Mark(local_zone, end, graph);
}
void AllNodes::Mark(Zone* local_zone, Node* end, const Graph* graph) {
DCHECK_LT(end->id(), graph->NodeCount());
- is_reachable_[end->id()] = true;
+ is_reachable_.Add(end->id());
reachable.push_back(end);
// Find all nodes reachable from {end}.
for (size_t i = 0; i < reachable.size(); i++) {
@@ -36,8 +36,8 @@ void AllNodes::Mark(Zone* local_zone, Node* end, const Graph* graph) {
// TODO(titzer): print a warning.
continue;
}
- if (!is_reachable_[input->id()]) {
- is_reachable_[input->id()] = true;
+ if (!is_reachable_.Contains(input->id())) {
+ is_reachable_.Add(input->id());
reachable.push_back(input);
}
}
@@ -46,8 +46,8 @@ void AllNodes::Mark(Zone* local_zone, Node* end, const Graph* graph) {
if (use == nullptr || use->id() >= graph->NodeCount()) {
continue;
}
- if (!is_reachable_[use->id()]) {
- is_reachable_[use->id()] = true;
+ if (!is_reachable_.Contains(use->id())) {
+ is_reachable_.Add(use->id());
reachable.push_back(use);
}
}
diff --git a/deps/v8/src/compiler/all-nodes.h b/deps/v8/src/compiler/all-nodes.h
index b86c8fa132..696aa6dd23 100644
--- a/deps/v8/src/compiler/all-nodes.h
+++ b/deps/v8/src/compiler/all-nodes.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_ALL_NODES_H_
#include "src/compiler/node.h"
-#include "src/zone/zone-containers.h"
+#include "src/utils/bit-vector.h"
namespace v8 {
namespace internal {
@@ -32,8 +32,8 @@ class AllNodes {
bool IsReachable(const Node* node) const {
if (!node) return false;
- size_t id = node->id();
- return id < is_reachable_.size() && is_reachable_[id];
+ int id = node->id();
+ return id < is_reachable_.length() && is_reachable_.Contains(id);
}
NodeVector reachable; // Nodes reachable from end.
@@ -41,7 +41,7 @@ class AllNodes {
private:
void Mark(Zone* local_zone, Node* end, const Graph* graph);
- BoolVector is_reachable_;
+ BitVector is_reachable_;
const bool only_inputs_;
};
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index 28d3fbbc43..cab3782c97 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -18,8 +18,10 @@ namespace compiler {
// allocated object and also provides helpers for commonly allocated objects.
class AllocationBuilder final {
public:
- AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
+ AllocationBuilder(JSGraph* jsgraph, JSHeapBroker* broker, Node* effect,
+ Node* control)
: jsgraph_(jsgraph),
+ broker_(broker),
allocation_(nullptr),
effect_(effect),
control_(control) {}
@@ -61,7 +63,7 @@ class AllocationBuilder final {
// Compound store of a constant into a field.
void Store(const FieldAccess& access, const ObjectRef& value) {
- Store(access, jsgraph()->Constant(value));
+ Store(access, jsgraph()->Constant(value, broker_));
}
void FinishAndChange(Node* node) {
@@ -85,6 +87,7 @@ class AllocationBuilder final {
private:
JSGraph* const jsgraph_;
+ JSHeapBroker* const broker_;
Node* allocation_;
Node* effect_;
Node* control_;
diff --git a/deps/v8/src/compiler/backend/OWNERS b/deps/v8/src/compiler/backend/OWNERS
index d55672b606..1ab495a38c 100644
--- a/deps/v8/src/compiler/backend/OWNERS
+++ b/deps/v8/src/compiler/backend/OWNERS
@@ -1,6 +1,4 @@
-bbudge@chromium.org
gdeepti@chromium.org
-zhin@chromium.org
# Plus src/compiler owners.
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 4c5accd7a8..43c94b3e9d 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -29,7 +29,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// Adds Arm-specific methods to convert InstructionOperands.
class ArmOperandConverter final : public InstructionOperandConverter {
@@ -193,9 +193,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- __ CheckPageFlag(
- value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, exit());
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -415,7 +414,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -429,7 +428,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -473,7 +472,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
- UseScratchRegisterScope temps(tasm()); \
+ UseScratchRegisterScope temps(masm()); \
Simd128Register tmp = temps.AcquireQ(); \
Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
@@ -493,7 +492,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
- UseScratchRegisterScope temps(tasm()); \
+ UseScratchRegisterScope temps(masm()); \
Simd128Register tmp = temps.AcquireQ(); \
Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
@@ -518,20 +517,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void FlushPendingPushRegisters(TurboAssembler* tasm,
+void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- tasm->push((*pending_pushes)[0]);
+ masm->push((*pending_pushes)[0]);
break;
case 2:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -542,7 +541,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
}
void AdjustStackPointerForTailCall(
- TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -550,15 +549,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
+ masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
+ masm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -601,7 +600,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- tasm(), frame_access_state(),
+ masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
// Pushes of non-register data types are not supported.
@@ -611,26 +610,26 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) {
- FlushPendingPushRegisters(tasm(), frame_access_state(),
+ FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes);
}
move->Eliminate();
}
- FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
@@ -641,16 +640,15 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ ldr(scratch,
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ ldr(scratch, FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne);
@@ -748,7 +746,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check the function's context matches the context argument.
__ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
@@ -859,7 +857,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -931,7 +929,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode;
if (arch_opcode == kArchStoreWithWriteBarrier) {
- mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ mode = RecordWriteModeField::decode(instr->opcode());
} else {
mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
}
@@ -1070,7 +1068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(2), i.OutputSBit());
break;
case kArmMls: {
- CpuFeatureScope scope(tasm(), ARMv7);
+ CpuFeatureScope scope(masm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1094,13 +1092,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.OutputSBit());
break;
case kArmSdiv: {
- CpuFeatureScope scope(tasm(), SUDIV);
+ CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUdiv: {
- CpuFeatureScope scope(tasm(), SUDIV);
+ CpuFeatureScope scope(masm(), SUDIV);
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1128,20 +1126,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputSBit());
break;
case kArmBfc: {
- CpuFeatureScope scope(tasm(), ARMv7);
+ CpuFeatureScope scope(masm(), ARMv7);
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUbfx: {
- CpuFeatureScope scope(tasm(), ARMv7);
+ CpuFeatureScope scope(masm(), ARMv7);
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmSbfx: {
- CpuFeatureScope scope(tasm(), ARMv7);
+ CpuFeatureScope scope(masm(), ARMv7);
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1184,7 +1182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmRbit: {
- CpuFeatureScope scope(tasm(), ARMv7);
+ CpuFeatureScope scope(masm(), ARMv7);
__ rbit(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1379,7 +1377,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVmodF64: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1399,7 +1397,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintm(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@@ -1409,12 +1407,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintmF64: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintpF32: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintp(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@@ -1424,12 +1422,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintpF64: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintzF32: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintz(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@@ -1439,17 +1437,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintzF64: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintaF64: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintnF32: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintn(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0));
@@ -1459,7 +1457,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVrintnF64: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
@@ -1474,7 +1472,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32S32: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
@@ -1482,7 +1480,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32U32: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
@@ -1490,7 +1488,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF64S32: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
@@ -1498,7 +1496,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF64U32: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
@@ -1506,7 +1504,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtS32F32: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
@@ -1521,7 +1519,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtU32F32: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
@@ -1536,7 +1534,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtS32F64: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
@@ -1544,7 +1542,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtU32F64: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
@@ -1763,7 +1761,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ add(scratch, fp, Operand(offset));
__ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
@@ -1900,7 +1898,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
case kArmF64x2Eq: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
@@ -1916,7 +1914,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Ne: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
@@ -1932,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Lt: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
@@ -1948,7 +1946,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Le: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
@@ -1989,8 +1987,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmov(dst.high(), rhs.high(), gt);
break;
}
+ case kArmF64x2Qfma: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ __ vmul(dst.low(), src0.low(), src1.low());
+ __ vmul(dst.high(), src0.high(), src1.high());
+ __ vadd(dst.low(), src2.low(), dst.low());
+ __ vadd(dst.high(), src2.high(), dst.high());
+ break;
+ }
+ case kArmF64x2Qfms: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ __ vmul(dst.low(), src0.low(), src1.low());
+ __ vmul(dst.high(), src0.high(), src1.high());
+ __ vsub(dst.low(), src2.low(), dst.low());
+ __ vsub(dst.high(), src2.high(), dst.high());
+ break;
+ }
case kArmF64x2Ceil: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintp(dst.low(), src.low());
@@ -1998,7 +2018,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Floor: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintm(dst.low(), src.low());
@@ -2006,7 +2026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2Trunc: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintz(dst.low(), src.low());
@@ -2014,7 +2034,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF64x2NearestInt: {
- CpuFeatureScope scope(tasm(), ARMv8);
+ CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ vrintn(dst.low(), src.low());
@@ -2061,7 +2081,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Mul: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
QwNeonRegister dst = i.OutputSimd128Register();
QwNeonRegister left = i.InputSimd128Register(0);
QwNeonRegister right = i.InputSimd128Register(1);
@@ -2289,6 +2309,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vbsl(dst, rhs, lhs);
break;
}
+ case kArmF32x4Qfma: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vmul(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vadd(dst, i.InputSimd128Register(2), dst);
+ break;
+ }
+ case kArmF32x4Qfms: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vmul(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vsub(dst, i.InputSimd128Register(2), dst);
+ break;
+ }
case kArmF32x4DemoteF64x2Zero: {
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
@@ -2448,7 +2480,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4BitMask: {
Register dst = i.OutputRegister();
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ();
Simd128Register mask = i.TempSimd128Register(0);
@@ -2468,15 +2500,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register();
Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1);
- Simd128Register tmp1 = i.TempSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
- __ vmull(NeonS16, tmp1, lhs.low(), rhs.low());
+ __ vmull(NeonS16, scratch, lhs.low(), rhs.low());
+ __ vpadd(Neon32, dst.low(), scratch.low(), scratch.high());
__ vmull(NeonS16, scratch, lhs.high(), rhs.high());
- __ vpadd(Neon32, dst.low(), tmp1.low(), tmp1.high());
__ vpadd(Neon32, dst.high(), scratch.low(), scratch.high());
break;
}
+ case kArmI16x8DotI8x16S: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(masm());
+ Simd128Register scratch = temps.AcquireQ();
+ __ vmull(NeonS8, scratch, lhs.low(), rhs.low());
+ __ vpadd(Neon16, dst.low(), scratch.low(), scratch.high());
+ __ vmull(NeonS8, scratch, lhs.high(), rhs.high());
+ __ vpadd(Neon16, dst.high(), scratch.low(), scratch.high());
+ break;
+ }
+ case kArmI32x4DotI8x16AddS: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ Simd128Register tmp1 = i.TempSimd128Register(0);
+ DCHECK_EQ(dst, i.InputSimd128Register(2));
+ UseScratchRegisterScope temps(masm());
+ Simd128Register scratch = temps.AcquireQ();
+ __ vmull(NeonS8, scratch, lhs.low(), rhs.low());
+ __ vpadd(Neon16, tmp1.low(), scratch.low(), scratch.high());
+ __ vmull(NeonS8, scratch, lhs.high(), rhs.high());
+ __ vpadd(Neon16, tmp1.high(), scratch.low(), scratch.high());
+ __ vpadal(NeonS16, dst, tmp1);
+ break;
+ }
case kArmI32x4TruncSatF64x2SZero: {
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
@@ -2651,7 +2709,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8BitMask: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ();
@@ -2806,7 +2864,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16BitMask: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ();
@@ -2907,7 +2965,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(scratch, src1);
@@ -2918,7 +2976,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
__ vmov(scratch, src1);
@@ -2929,7 +2987,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(scratch, src1);
@@ -2962,7 +3020,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
@@ -2991,7 +3049,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
@@ -3002,7 +3060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
@@ -3013,7 +3071,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
@@ -3024,7 +3082,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
@@ -3053,7 +3111,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
@@ -3064,7 +3122,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
@@ -3075,7 +3133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
@@ -3086,7 +3144,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
@@ -3113,7 +3171,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low();
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ();
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
// src1. They must be consecutive.
@@ -3164,7 +3222,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmV128AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmax(NeonU32, scratch, src.low(), src.high());
__ vpmax(NeonU32, scratch, scratch, scratch);
@@ -3179,7 +3237,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU32, scratch, src.low(), src.high());
__ vpmin(NeonU32, scratch, scratch, scratch);
@@ -3190,7 +3248,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI16x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU16, scratch, src.low(), src.high());
__ vpmin(NeonU16, scratch, scratch, scratch);
@@ -3202,7 +3260,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU8, scratch, src.low(), src.high());
__ vpmin(NeonU8, scratch, scratch, scratch);
@@ -3699,6 +3757,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ Push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -3744,7 +3806,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
@@ -3870,8 +3932,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ cmp(argc_reg, Operand(parameter_slots));
__ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
}
- __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
@@ -3941,7 +4003,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsDoubleRegister()) {
__ vstr(g.ToDoubleRegister(source), dst);
} else {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
QwNeonRegister src = g.ToSimd128Register(source);
__ add(temp, dst.rn(), Operand(dst.offset()));
@@ -3962,7 +4024,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsDoubleStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
QwNeonRegister dst = g.ToSimd128Register(destination);
__ add(temp, src.rn(), Operand(src.offset()));
@@ -3973,7 +4035,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
SwVfpRegister temp = temps.AcquireS();
__ vldr(temp, src);
@@ -4011,27 +4073,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination);
if (destination->IsStackSlot()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
// Acquire a S register instead of a general purpose register in case
// `vstr` needs one to compute the address of `dst`.
SwVfpRegister s_temp = temps.AcquireS();
{
// TODO(arm): This sequence could be optimized further if necessary by
// writing the constant directly into `s_temp`.
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
MoveConstantToRegister(temp, src);
__ vmov(s_temp, temp);
}
__ vstr(s_temp, dst);
} else if (destination->IsFloatStackSlot()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister temp = temps.AcquireS();
__ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
__ vstr(temp, dst);
} else {
DCHECK(destination->IsDoubleStackSlot());
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister temp = temps.AcquireD();
// TODO(arm): Look into optimizing this further if possible. Supporting
// the NEON version of VMOV may help.
@@ -4044,10 +4106,70 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
- // Must be kept in sync with {MoveTempLocationTo}.
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
auto rep = LocationOperand::cast(source)->representation();
- move_cycle_.temps.emplace(tasm());
+ int new_slots = ElementSizeInPointers(rep);
+ ArmOperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ push(g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ ldr(scratch, g.ToMemOperand(source));
+ __ push(scratch);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ sub(sp, sp, Operand(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ ArmOperandConverter g(this, nullptr);
+ if (dest->IsRegister()) {
+ __ pop(g.ToRegister(dest));
+ } else if (dest->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ pop(scratch);
+ __ str(scratch, g.ToMemOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ add(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ add(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
+ // Must be kept in sync with {MoveTempLocationTo}.
+ move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick a
// location to resolve the cycle. Re-include them immediately afterwards so
@@ -4083,27 +4205,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
} else {
// The scratch registers are blocked by pending moves. Use the stack
// instead.
- int new_slots = ElementSizeInPointers(rep);
- ArmOperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ push(g.ToRegister(source));
- } else if (source->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.Acquire();
- __ ldr(scratch, g.ToMemOperand(source));
- __ push(scratch);
- } else {
- // No push instruction for this operand type. Bump the stack pointer and
- // assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ sub(sp, sp, Operand(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -4129,25 +4231,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
}
}
} else {
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- ArmOperandConverter g(this, nullptr);
- if (dest->IsRegister()) {
- __ pop(g.ToRegister(dest));
- } else if (dest->IsStackSlot()) {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
- __ pop(scratch);
- __ str(scratch, g.ToMemOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ add(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
// Restore the default state to release the {UseScratchRegisterScope} and to
// prepare for the next cycle.
@@ -4159,7 +4243,7 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand& destination = move->destination();
MoveType::Type move_type =
MoveType::InferMove(&move->source(), &move->destination());
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
if (move_type == MoveType::kStackToStack) {
if (source.IsStackSlot() || source.IsFloatStackSlot()) {
SwVfpRegister temp = temps.AcquireS();
@@ -4199,7 +4283,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD();
int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code();
@@ -4216,20 +4300,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
Register src = g.ToRegister(source);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister temp = temps.AcquireS();
__ vmov(temp, src);
__ ldr(src, dst);
__ vstr(temp, dst);
} else if (source->IsFloatRegister()) {
int src_code = LocationOperand::cast(source)->register_code();
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
} else if (source->IsDoubleRegister()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister temp = temps.AcquireD();
DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(temp, src);
@@ -4237,7 +4321,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp, dst);
} else {
QwNeonRegister src = g.ToSimd128Register(source);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire();
QwNeonRegister temp_q = temps.AcquireQ();
__ Move(temp_q, src);
@@ -4251,7 +4335,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
SwVfpRegister temp_0 = temps.AcquireS();
SwVfpRegister temp_1 = temps.AcquireS();
__ vldr(temp_0, dst);
@@ -4259,7 +4343,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp_0, src);
__ vstr(temp_1, dst);
} else if (source->IsDoubleStackSlot()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD();
if (temps.CanAcquireD()) {
DwVfpRegister temp_0 = temp;
@@ -4292,7 +4376,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst0 = dst;
MemOperand src1(src.rn(), src.offset() + kDoubleSize);
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
DwVfpRegister temp_0 = temps.AcquireD();
DwVfpRegister temp_1 = temps.AcquireD();
__ vldr(temp_0, dst0);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 75f6aee953..28aabaaa56 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -152,6 +152,8 @@ namespace compiler {
V(ArmF64x2Le) \
V(ArmF64x2Pmin) \
V(ArmF64x2Pmax) \
+ V(ArmF64x2Qfma) \
+ V(ArmF64x2Qfms) \
V(ArmF64x2Ceil) \
V(ArmF64x2Floor) \
V(ArmF64x2Trunc) \
@@ -179,6 +181,8 @@ namespace compiler {
V(ArmF32x4Le) \
V(ArmF32x4Pmin) \
V(ArmF32x4Pmax) \
+ V(ArmF32x4Qfma) \
+ V(ArmF32x4Qfms) \
V(ArmF32x4DemoteF64x2Zero) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
@@ -228,6 +232,8 @@ namespace compiler {
V(ArmI32x4Abs) \
V(ArmI32x4BitMask) \
V(ArmI32x4DotI16x8S) \
+ V(ArmI16x8DotI8x16S) \
+ V(ArmI32x4DotI8x16AddS) \
V(ArmI32x4TruncSatF64x2SZero) \
V(ArmI32x4TruncSatF64x2UZero) \
V(ArmI16x8Splat) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 8b4a623cc3..fab7f5b57b 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -129,6 +129,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF64x2Ne:
case kArmF64x2Lt:
case kArmF64x2Le:
+ case kArmF64x2Qfma:
+ case kArmF64x2Qfms:
case kArmF64x2Pmin:
case kArmF64x2Pmax:
case kArmF64x2Ceil:
@@ -156,6 +158,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4Ne:
case kArmF32x4Lt:
case kArmF32x4Le:
+ case kArmF32x4Qfma:
+ case kArmF32x4Qfms:
case kArmF32x4Pmin:
case kArmF32x4Pmax:
case kArmF32x4DemoteF64x2Zero:
@@ -207,6 +211,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4Abs:
case kArmI32x4BitMask:
case kArmI32x4DotI16x8S:
+ case kArmI16x8DotI8x16S:
+ case kArmI32x4DotI8x16AddS:
case kArmI32x4TruncSatF64x2SZero:
case kArmI32x4TruncSatF64x2UZero:
case kArmI16x8Splat:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 8733aff787..1a19523d02 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -397,7 +397,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
input_count = 1;
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
@@ -407,6 +407,15 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ input_count = 1;
+ // This will only work if {index} is a constant.
+ inputs[0] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode, 1, output, input_count, inputs);
+ return;
+ }
+
inputs[0] = g.UseRegister(base);
if (g.CanBeImmediate(index, opcode)) {
inputs[1] = g.UseImmediate(index);
@@ -726,7 +735,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
InstructionCode code;
if (!atomic_order) {
code = kArchStoreWithWriteBarrier;
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
} else {
code = kArchAtomicStoreWithWriteBarrier;
code |= AtomicMemoryOrderField::encode(*atomic_order);
@@ -753,7 +762,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
@@ -765,6 +774,16 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ int input_count = 2;
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(value);
+ inputs[1] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ return;
+ }
+
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(value);
@@ -2720,10 +2739,25 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
ArmOperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kArmI32x4DotI16x8S, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmI16x8DotI8x16S, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
+ ArmOperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kArmI32x4DotI8x16AddS, g.DefineSameAsInput(node, 2),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)), arraysize(temps), temps);
}
void InstructionSelector::VisitS128Const(Node* node) {
@@ -2918,6 +2952,20 @@ void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
VisitS128Select(node);
}
+#define VISIT_SIMD_QFMOP(op) \
+ void InstructionSelector::Visit##op(Node* node) { \
+ ArmOperandGenerator g(this); \
+ Emit(kArm##op, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseUniqueRegister(node->InputAt(2))); \
+ }
+VISIT_SIMD_QFMOP(F64x2Qfma)
+VISIT_SIMD_QFMOP(F64x2Qfms)
+VISIT_SIMD_QFMOP(F32x4Qfma)
+VISIT_SIMD_QFMOP(F32x4Qfms)
+#undef VISIT_SIMD_QFMOP
+
#if V8_ENABLE_WEBASSEMBLY
namespace {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index daae9502ce..22f52dfcb6 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -23,7 +24,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// Adds Arm64-specific methods to convert InstructionOperands.
class Arm64OperandConverter final : public InstructionOperandConverter {
@@ -223,12 +224,25 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
#endif // V8_ENABLE_WEBASSEMBLY
return Operand(constant.ToInt64());
case Constant::kFloat32:
- return Operand(Operand::EmbeddedNumber(constant.ToFloat32()));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
- case Constant::kCompressedHeapObject: // Fall through.
+ case Constant::kCompressedHeapObject: {
+ RootIndex root_index;
+ if (gen_->isolate()->roots_table().IsRootHandle(constant.ToHeapObject(),
+ &root_index)) {
+ CHECK(COMPRESS_POINTERS_BOOL);
+ CHECK(V8_STATIC_ROOTS_BOOL || !gen_->isolate()->bootstrapper());
+ Tagged_t ptr =
+ MacroAssemblerBase::ReadOnlyRootPtr(root_index, gen_->isolate());
+ CHECK(Assembler::IsImmAddSub(ptr));
+ return Immediate(ptr);
+ }
+
+ return Operand(constant.ToHeapObject());
+ }
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kRpoNumber:
@@ -237,13 +251,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
- MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
+ MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
- return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
}
- MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
+ MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
@@ -283,17 +297,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
- __ CheckPageFlag(
- value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- ne, exit());
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Push<TurboAssembler::kSignLR>(lr, padreg);
+ __ Push<MacroAssembler::kSignLR>(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
@@ -310,7 +323,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
}
if (must_save_lr_) {
- __ Pop<TurboAssembler::kAuthLR>(padreg, lr);
+ __ Pop<MacroAssembler::kAuthLR>(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -416,7 +429,15 @@ class WasmOutOfLineTrap : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ if (gen_->IsWasm() || PointerCompressionIsEnabled()) {
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ } else {
+ // For wasm traps inlined into JavaScript force indirect call if pointer
+ // compression is disabled as it can't be guaranteed that the built-in's
+ // address is close enough for a near call.
+ __ IndirectCall(static_cast<Address>(trap_id),
+ RelocInfo::WASM_STUB_CALL);
+ }
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
gen_->RecordSafepoint(reference_map);
@@ -429,43 +450,49 @@ class WasmOutOfLineTrap : public OutOfLineCode {
class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
public:
- WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
- : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr,
+ TrapId trap_id)
+ : WasmOutOfLineTrap(gen, instr), pc_(pc), trap_id_(trap_id) {}
void Generate() override {
DCHECK(v8_flags.wasm_bounds_checks && !v8_flags.wasm_enforce_bounds_checks);
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
- GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
+ GenerateWithTrapId(trap_id_);
}
private:
int pc_;
+ TrapId trap_id_;
};
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessProtected) {
- zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
+ if (access_mode == kMemoryAccessProtectedMemOutOfBounds) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr,
+ TrapId::kTrapMemOutOfBounds);
+ } else if (access_mode == kMemoryAccessProtectedNullDereference) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr,
+ TrapId::kTrapNullDereference);
}
}
#else
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+ DCHECK_EQ(kMemoryAccessDirect, AccessModeField::decode(opcode));
}
#endif // V8_ENABLE_WEBASSEMBLY
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
-void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
+void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr,
Arm64OperandConverter i, VectorFormat scalar,
VectorFormat vector) {
VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
- (tasm->*fn)(output, input);
+ (masm->*fn)(output, input);
}
} // namespace
@@ -497,54 +524,124 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_instr(i.Input##reg(2), i.TempRegister(0)); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr, reg) \
- do { \
- Label exchange; \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Bind(&exchange); \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- __ load_instr(i.Output##reg(), i.TempRegister(0)); \
- __ store_instr(i.TempRegister32(1), i.Input##reg(2), i.TempRegister(0)); \
- __ Cbnz(i.TempRegister32(1), &exchange); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(suffix, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Swpal##suffix(i.Input##reg(2), i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label exchange; \
+ __ Bind(&exchange); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ stlxr##suffix(i.TempRegister32(1), i.Input##reg(2), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(1), &exchange); \
+ } \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext, \
- reg) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Bind(&compareExchange); \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- __ load_instr(i.Output##reg(), i.TempRegister(0)); \
- __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \
- __ B(ne, &exit); \
- __ store_instr(i.TempRegister32(1), i.Input##reg(3), i.TempRegister(0)); \
- __ Cbnz(i.TempRegister32(1), &compareExchange); \
- __ Bind(&exit); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(suffix, ext, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ DCHECK_EQ(i.OutputRegister(), i.InputRegister(2)); \
+ CpuFeatureScope scope(masm(), LSE); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Casal##suffix(i.Output##reg(), i.Input##reg(3), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label compareExchange; \
+ Label exit; \
+ __ Bind(&compareExchange); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \
+ __ B(ne, &exit); \
+ __ stlxr##suffix(i.TempRegister32(1), i.Input##reg(3), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(1), &compareExchange); \
+ __ Bind(&exit); \
+ } \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr, reg) \
- do { \
- Label binop; \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Bind(&binop); \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- __ load_instr(i.Output##reg(), i.TempRegister(0)); \
- __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
- __ store_instr(i.TempRegister32(2), i.Temp##reg(1), i.TempRegister(0)); \
- __ Cbnz(i.TempRegister32(2), &binop); \
+#define ASSEMBLE_ATOMIC_SUB(suffix, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ UseScratchRegisterScope temps(masm()); \
+ Register scratch = temps.AcquireSameSizeAs(i.Input##reg(2)); \
+ __ Neg(scratch, i.Input##reg(2)); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Ldaddal##suffix(scratch, i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label binop; \
+ __ Bind(&binop); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ Sub(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
+ __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_AND(suffix, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ UseScratchRegisterScope temps(masm()); \
+ Register scratch = temps.AcquireSameSizeAs(i.Input##reg(2)); \
+ __ Mvn(scratch, i.Input##reg(2)); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Ldclral##suffix(scratch, i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label binop; \
+ __ Bind(&binop); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ And(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
+ __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(suffix, bin_instr, lse_instr, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ lse_instr##suffix(i.Input##reg(2), i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label binop; \
+ __ Bind(&binop); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
+ __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
+ } \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
} while (0)
@@ -557,7 +654,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- UseScratchRegisterScope temps(tasm()); \
+ UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
@@ -577,7 +674,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- UseScratchRegisterScope temps(tasm()); \
+ UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
@@ -591,7 +688,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(sp, fp);
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
+ __ Pop<MacroAssembler::kAuthLR>(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
}
@@ -605,7 +702,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -614,10 +711,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) {
- tasm->Claim(stack_slot_delta);
+ masm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->Drop(-stack_slot_delta);
+ masm->Drop(-stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -626,14 +723,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
DCHECK_EQ(first_unused_slot_offset % 2, 0);
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
DCHECK(instr->IsTailCall());
InstructionOperandConverter g(this, instr);
@@ -645,34 +742,14 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
-// Check if the code object is marked for deoptimization. If it is, then it
-// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
-// to:
-// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
-// 2. test kMarkedForDeoptimizationBit in those flags; and
-// 3. if it is not zero then it jumps to the builtin.
-void CodeGenerator::BailoutIfDeoptimized() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(
- scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ Ldr(scratch.W(),
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
- Label not_deoptimized;
- __ Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, &not_deoptimized);
- __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
- RelocInfo::CODE_TARGET);
- __ Bind(&not_deoptimized);
-}
+void CodeGenerator::BailoutIfDeoptimized() { __ BailoutIfDeoptimized(); }
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
@@ -724,7 +801,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, constant.rmode());
} else {
Register target = i.InputRegister(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
temps.Exclude(x17);
__ Mov(x17, target);
__ Jump(x17);
@@ -756,7 +833,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
temps.Exclude(x17);
__ Mov(x17, reg);
__ Jump(x17);
@@ -769,17 +846,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
- __ LoadTaggedPointerField(
- temp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedField(temp,
+ FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ LoadTaggedPointerField(x2,
- FieldMemOperand(func, JSFunction::kCodeOffset));
- __ CallCodeTObject(x2);
+ __ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -879,7 +955,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -947,8 +1023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
@@ -971,19 +1046,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ StoreTaggedField(value, MemOperand(object, offset));
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
- eq, ool->entry());
+ ne, ool->entry());
__ Bind(ool->exit());
break;
}
case kArchAtomicStoreWithWriteBarrier: {
DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register offset = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -995,7 +1070,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ JumpIfSmi(value, ool->exit());
}
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
- eq, ool->entry());
+ ne, ool->entry());
__ Bind(ool->exit());
break;
}
@@ -1070,39 +1145,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kArm64Float32RoundDown:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundDown:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundUp:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundUp:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float64RoundTiesAway:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundTruncate:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundTruncate:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundTiesEven:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundTiesEven:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Add:
@@ -1333,14 +1408,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Imod: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Imod32: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW();
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1348,14 +1423,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umod: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Umod32: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW();
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1669,7 +1744,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64Mod: {
// TODO(turbofan): implement directly.
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
DCHECK_EQ(d0, i.InputDoubleRegister(0));
DCHECK_EQ(d1, i.InputDoubleRegister(1));
DCHECK_EQ(d0, i.OutputDoubleRegister());
@@ -1907,25 +1982,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldr(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdrDecompressTaggedSigned:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
break;
- case kArm64LdrDecompressTaggedPointer:
- __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- break;
- case kArm64LdrDecompressAnyTagged:
- __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ case kArm64LdrDecompressTagged:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdarDecompressTaggedSigned:
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
- case kArm64LdarDecompressTaggedPointer:
- __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1), i.TempRegister(0));
- break;
- case kArm64LdarDecompressAnyTagged:
- __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1), i.TempRegister(0));
+ case kArm64LdarDecompressTagged:
+ __ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdrDecodeSandboxedPointer:
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
@@ -1935,6 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StrCompressTagged:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StlrCompressTagged:
@@ -2013,75 +2084,109 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break;
case kAtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(b, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(b, Register32);
break;
case kAtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(h, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(h, Register32);
break;
case kAtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(, Register);
break;
case kAtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(b, UXTB, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(b, UXTB, Register32);
break;
case kAtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(h, UXTH, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(h, UXTH, Register32);
break;
case kAtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(, UXTW, Register32);
break;
case kArm64Word64AtomicCompareExchangeUint64:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
- break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
- __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
- break; \
- case kAtomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
- break; \
- case kAtomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
- __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
- break; \
- case kAtomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
- break; \
- case kAtomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
- break; \
- case kArm64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register); \
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(, UXTX, Register);
+ break;
+ case kAtomicSubInt8:
+ ASSEMBLE_ATOMIC_SUB(b, Register32);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicSubUint8:
+ ASSEMBLE_ATOMIC_SUB(b, Register32);
+ break;
+ case kAtomicSubInt16:
+ ASSEMBLE_ATOMIC_SUB(h, Register32);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicSubUint16:
+ ASSEMBLE_ATOMIC_SUB(h, Register32);
+ break;
+ case kAtomicSubWord32:
+ ASSEMBLE_ATOMIC_SUB(, Register32);
+ break;
+ case kArm64Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_SUB(, Register);
+ break;
+ case kAtomicAndInt8:
+ ASSEMBLE_ATOMIC_AND(b, Register32);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicAndUint8:
+ ASSEMBLE_ATOMIC_AND(b, Register32);
+ break;
+ case kAtomicAndInt16:
+ ASSEMBLE_ATOMIC_AND(h, Register32);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicAndUint16:
+ ASSEMBLE_ATOMIC_AND(h, Register32);
+ break;
+ case kAtomicAndWord32:
+ ASSEMBLE_ATOMIC_AND(, Register32);
+ break;
+ case kArm64Word64AtomicAndUint64:
+ ASSEMBLE_ATOMIC_AND(, Register);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst, lse_instr) \
+ case kAtomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(b, inst, lse_instr, Register32); \
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(b, inst, lse_instr, Register32); \
+ break; \
+ case kAtomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(h, inst, lse_instr, Register32); \
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(h, inst, lse_instr, Register32); \
+ break; \
+ case kAtomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(, inst, lse_instr, Register32); \
+ break; \
+ case kArm64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(, inst, lse_instr, Register); \
break;
- ATOMIC_BINOP_CASE(Add, Add)
- ATOMIC_BINOP_CASE(Sub, Sub)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Orr)
- ATOMIC_BINOP_CASE(Xor, Eor)
+ ATOMIC_BINOP_CASE(Add, Add, Ldaddal)
+ ATOMIC_BINOP_CASE(Or, Orr, Ldsetal)
+ ATOMIC_BINOP_CASE(Xor, Eor, Ldeoral)
#undef ATOMIC_BINOP_CASE
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
@@ -2178,6 +2283,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2).Format(f)); \
break; \
}
+#define SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(Op, Instr, FORMAT) \
+ case Op: { \
+ VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(2).V##FORMAT()); \
+ __ Instr(dst, i.InputSimd128Register(0).V##FORMAT(), \
+ i.InputSimd128Register(1).V##FORMAT()); \
+ break; \
+ }
SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
@@ -2293,8 +2406,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_FCM_L_CASE(kArm64FLe, le, ge);
SIMD_FCM_G_CASE(kArm64FGt, gt);
SIMD_FCM_G_CASE(kArm64FGe, ge);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F64x2Qfma, Fmla, 2D);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F64x2Qfms, Fmls, 2D);
case kArm64F64x2Pmin: {
VRegister dst = i.OutputSimd128Register().V2D();
VRegister lhs = i.InputSimd128Register(0).V2D();
@@ -2327,8 +2440,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(s_f), i.InputInt8(2));
break;
}
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfms, Fmls, 4S);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F32x4Qfma, Fmla, 4S);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F32x4Qfms, Fmls, 4S);
case kArm64F32x4Pmin: {
VRegister dst = i.OutputSimd128Register().V4S();
VRegister lhs = i.InputSimd128Register(0).V4S();
@@ -2380,7 +2493,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister dst = i.OutputSimd128Register();
VRegister src1 = i.InputSimd128Register(0);
VRegister src2 = i.InputSimd128Register(1);
@@ -2481,7 +2594,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@@ -2497,7 +2610,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I32x4DotI16x8S: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat4S);
@@ -2508,7 +2621,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8DotI8x16S: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat8H);
@@ -2519,17 +2632,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I32x4DotI8x16AddS: {
- UseScratchRegisterScope scope(tasm());
- VRegister lhs = i.InputSimd128Register(0);
- VRegister rhs = i.InputSimd128Register(1);
- VRegister tmp1 = scope.AcquireV(kFormat8H);
- VRegister tmp2 = scope.AcquireV(kFormat8H);
- __ Smull(tmp1, lhs.V8B(), rhs.V8B());
- __ Smull2(tmp2, lhs.V16B(), rhs.V16B());
- __ Addp(tmp1, tmp1, tmp2);
- __ Saddlp(tmp1.V4S(), tmp1);
- __ Add(i.OutputSimd128Register().V4S(), tmp1.V4S(),
- i.InputSimd128Register(2).V4S());
+ if (CpuFeatures::IsSupported(DOTPROD)) {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(2));
+ __ Sdot(i.InputSimd128Register(2).V4S(),
+ i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B());
+
+ } else {
+ UseScratchRegisterScope scope(masm());
+ VRegister lhs = i.InputSimd128Register(0);
+ VRegister rhs = i.InputSimd128Register(1);
+ VRegister tmp1 = scope.AcquireV(kFormat8H);
+ VRegister tmp2 = scope.AcquireV(kFormat8H);
+ __ Smull(tmp1, lhs.V8B(), rhs.V8B());
+ __ Smull2(tmp2, lhs.V16B(), rhs.V16B());
+ __ Addp(tmp1, tmp1, tmp2);
+ __ Saddlp(tmp1.V4S(), tmp1);
+ __ Add(i.OutputSimd128Register().V4S(), tmp1.V4S(),
+ i.InputSimd128Register(2).V4S());
+ }
break;
}
case kArm64IExtractLaneU: {
@@ -2556,7 +2677,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) {
__ Mov(temp, src1.V4S());
@@ -2577,7 +2698,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) {
__ Mov(temp, src1.V4S());
@@ -2591,7 +2712,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
case kArm64I16x8BitMask: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@@ -2618,7 +2739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) {
__ Mov(temp, src1.V8H());
@@ -2636,7 +2757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) {
__ Mov(temp, src1.V8H());
@@ -2647,7 +2768,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I8x16BitMask: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@@ -2736,7 +2857,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1).V4S();
// Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src0) {
__ Mov(temp, src0);
@@ -2802,7 +2923,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
: 0xE0E0E0E0E0E0E0E0));
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1);
@@ -2881,7 +3002,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64V128AnyTrue: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
// For AnyTrue, the format does not matter; also, we would like to avoid
// an expensive horizontal reduction.
VRegister temp = scope.AcquireV(kFormat4S);
@@ -2894,7 +3015,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
case Op: { \
- UseScratchRegisterScope scope(tasm()); \
+ UseScratchRegisterScope scope(masm()); \
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
@@ -2915,6 +3036,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef SIMD_BINOP_LANE_SIZE_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
+#undef SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -3047,7 +3169,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register input = i.InputRegister32(0);
Register temp = scope.AcquireX();
size_t const case_count = instr->InputCount() - 2;
@@ -3058,12 +3180,18 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
+ constexpr int instructions_per_jump_target = 1;
+#else
+ constexpr int instructions_per_jump_target = 0;
#endif
+ constexpr int instructions_per_case = 1 + instructions_per_jump_target;
__ Add(temp, temp, Operand(input, UXTW, entry_size_log2));
__ Br(temp);
{
- TurboAssembler::BlockPoolsScope block_pools(tasm(),
- case_count * kInstrSize);
+ const size_t instruction_count =
+ case_count * instructions_per_case + instructions_per_jump_target;
+ MacroAssembler::BlockPoolsScope block_pools(masm(),
+ instruction_count * kInstrSize);
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
__ JumpTarget();
@@ -3121,10 +3249,10 @@ void CodeGenerator::AssembleConstructFrame() {
DCHECK_EQ(required_slots % 2, 1);
__ Prologue();
// Update required_slots count since we have just claimed one extra slot.
- static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
- required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue;
+ static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
+ required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue;
} else {
- __ Push<TurboAssembler::kSignLR>(lr, fp);
+ __ Push<MacroAssembler::kSignLR>(lr, fp);
__ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -3147,7 +3275,7 @@ void CodeGenerator::AssembleConstructFrame() {
// One unoptimized frame slot has already been claimed when the actual
// arguments count was pushed.
required_slots -=
- unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
+ unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue;
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3161,7 +3289,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
@@ -3174,7 +3302,7 @@ void CodeGenerator::AssembleConstructFrame() {
{
// Finish the frame that hasn't been fully built yet.
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -3205,7 +3333,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Claim(required_slots);
break;
case CallDescriptor::kCallCodeObject: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -3221,7 +3349,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -3231,10 +3359,13 @@ void CodeGenerator::AssembleConstructFrame() {
}
case CallDescriptor::kCallWasmImportWrapper:
case CallDescriptor::kCallWasmCapiFunction: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
+ // This stack slot is only used for printing stack traces in V8. Also,
+ // it holds a WasmApiFunctionRef instead of the instance itself, which
+ // is taken care of in the frames accessors.
__ Push(scratch, kWasmInstanceRegister);
int extra_slots =
call_descriptor->kind() == CallDescriptor::kCallWasmImportWrapper
@@ -3247,7 +3378,7 @@ void CodeGenerator::AssembleConstructFrame() {
case CallDescriptor::kCallAddress:
#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
__ Push(scratch, padreg);
@@ -3377,15 +3508,15 @@ void CodeGenerator::PrepareForDeoptimizationExits(
false, false,
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
- // Check which deopt kinds exist in this Code object, to avoid emitting jumps
- // to unused entries.
+ // Check which deopt kinds exist in this InstructionStream object, to avoid
+ // emitting jumps to unused entries.
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
for (auto exit : *exits) {
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
}
// Emit the jumps to deoptimization entries.
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX();
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
for (int i = 0; i < kDeoptimizeKindCount; i++) {
@@ -3397,11 +3528,71 @@ void CodeGenerator::PrepareForDeoptimizationExits(
}
}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
+ Arm64OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ Push(padreg, g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.AcquireX();
+ __ Ldr(scratch, g.ToMemOperand(source, masm()));
+ __ Push(padreg, scratch);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ Sub(sp, sp, Operand(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ Arm64OperandConverter g(this, nullptr);
+ if (dest->IsRegister()) {
+ __ Pop(g.ToRegister(dest), padreg);
+ } else if (dest->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.AcquireX();
+ __ Pop(scratch, padreg);
+ __ Str(scratch, g.ToMemOperand(dest, masm()));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ Add(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ add(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
- move_cycle_.temps.emplace(tasm());
+ move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they
@@ -3439,7 +3630,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
scratch_reg.code());
Arm64OperandConverter g(this, nullptr);
if (source->IsStackSlot()) {
- __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm()));
+ __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm()));
} else {
DCHECK(source->IsRegister());
__ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
@@ -3452,27 +3643,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
} else {
// The scratch registers are blocked by pending moves. Use the stack
// instead.
- int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
- Arm64OperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ Push(g.ToRegister(source), padreg);
- } else if (source->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.AcquireX();
- __ Ldr(scratch, g.ToMemOperand(source, tasm()));
- __ Push(scratch, padreg);
- } else {
- // No push instruction for this operand type. Bump the stack pointer and
- // assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ Sub(sp, sp, Operand(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -3488,7 +3659,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
move_cycle_.scratch_reg->code());
Arm64OperandConverter g(this, nullptr);
if (dest->IsStackSlot()) {
- __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm()));
+ __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm()));
} else {
DCHECK(dest->IsRegister());
__ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
@@ -3499,25 +3670,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
AssembleMove(&scratch, dest);
}
} else {
- int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
- frame_access_state()->IncreaseSPDelta(-new_slots);
- Arm64OperandConverter g(this, nullptr);
- if (dest->IsRegister()) {
- __ Pop(padreg, g.ToRegister(dest));
- } else if (dest->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.AcquireX();
- __ Pop(padreg, scratch);
- __ Str(scratch, g.ToMemOperand(dest, tasm()));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ Add(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
// Restore the default state to release the {UseScratchRegisterScope} and to
// prepare for the next cycle.
@@ -3528,9 +3681,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
auto move_type = MoveType::InferMove(&move->source(), &move->destination());
if (move_type == MoveType::kStackToStack) {
Arm64OperandConverter g(this, nullptr);
- MemOperand src = g.ToMemOperand(&move->source(), tasm());
- MemOperand dst = g.ToMemOperand(&move->destination(), tasm());
- UseScratchRegisterScope temps(tasm());
+ MemOperand src = g.ToMemOperand(&move->source(), masm());
+ MemOperand dst = g.ToMemOperand(&move->destination(), masm());
+ UseScratchRegisterScope temps(masm());
if (move->source().IsSimd128StackSlot()) {
VRegister temp = temps.AcquireQ();
move_cycle_.scratch_fp_regs.set(temp);
@@ -3545,11 +3698,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
// Offset doesn't fit into the immediate field so the assembler will emit
// two instructions and use a second temp register.
if ((src.IsImmediateOffset() &&
- !tasm()->IsImmLSScaled(src_offset, src_size) &&
- !tasm()->IsImmLSUnscaled(src_offset)) ||
+ !masm()->IsImmLSScaled(src_offset, src_size) &&
+ !masm()->IsImmLSUnscaled(src_offset)) ||
(dst.IsImmediateOffset() &&
- !tasm()->IsImmLSScaled(dst_offset, dst_size) &&
- !tasm()->IsImmLSUnscaled(dst_offset))) {
+ !masm()->IsImmLSScaled(dst_offset, dst_size) &&
+ !masm()->IsImmLSUnscaled(dst_offset))) {
Register temp = temps.AcquireX();
move_cycle_.scratch_regs.set(temp);
}
@@ -3573,7 +3726,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ __ LoadTaggedRoot(dst, index);
} else {
// TODO(v8:8977): Even though this mov happens on 32 bits (Note the
// .W()) and we are passing along the RelocInfo, we still haven't made
@@ -3598,7 +3751,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
return;
case MoveType::kRegisterToStack: {
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst);
} else {
@@ -3613,7 +3766,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return;
}
case MoveType::kStackToRegister: {
- MemOperand src = g.ToMemOperand(source, tasm());
+ MemOperand src = g.ToMemOperand(source, masm());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
@@ -3628,15 +3781,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return;
}
case MoveType::kStackToStack: {
- MemOperand src = g.ToMemOperand(source, tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsSimd128StackSlot()) {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireQ();
__ Ldr(temp, src);
__ Str(temp, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, dst);
@@ -3660,9 +3813,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (destination->IsStackSlot()) {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
MoveConstantToRegister(temp, src);
__ Str(temp, dst);
@@ -3670,7 +3823,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, dst);
@@ -3680,7 +3833,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, dst);
@@ -3711,8 +3864,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
return;
case MoveType::kRegisterToStack: {
- UseScratchRegisterScope scope(tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ UseScratchRegisterScope scope(masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) {
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
@@ -3720,7 +3873,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Ldr(src, dst);
__ Str(temp, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD();
@@ -3738,9 +3891,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
return;
}
case MoveType::kStackToStack: {
- UseScratchRegisterScope scope(tasm());
- MemOperand src = g.ToMemOperand(source, tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ UseScratchRegisterScope scope(masm());
+ MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) {
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 6c4eafa1d9..855a5cedcd 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -42,6 +42,9 @@ namespace compiler {
V(Arm64StrQ) \
V(Arm64StrS) \
V(Arm64StrW) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTagged) \
+ V(Arm64StrCompressTagged) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64)
@@ -198,13 +201,8 @@ namespace compiler {
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
V(Arm64LdarDecompressTaggedSigned) \
- V(Arm64LdarDecompressTaggedPointer) \
- V(Arm64LdarDecompressAnyTagged) \
- V(Arm64StrCompressTagged) \
+ V(Arm64LdarDecompressTagged) \
V(Arm64StlrCompressTagged) \
V(Arm64LdrDecodeSandboxedPointer) \
V(Arm64StrEncodeSandboxedPointer) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index eba6cdf75e..dcfb0151f3 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -315,11 +315,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrW:
case kArm64Ldr:
case kArm64LdrDecompressTaggedSigned:
- case kArm64LdrDecompressTaggedPointer:
- case kArm64LdrDecompressAnyTagged:
+ case kArm64LdrDecompressTagged:
case kArm64LdarDecompressTaggedSigned:
- case kArm64LdarDecompressTaggedPointer:
- case kArm64LdarDecompressAnyTagged:
+ case kArm64LdarDecompressTagged:
case kArm64LdrDecodeSandboxedPointer:
case kArm64Peek:
case kArm64LoadSplat:
@@ -431,8 +429,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 1;
case kArm64LdrDecompressTaggedSigned:
- case kArm64LdrDecompressTaggedPointer:
- case kArm64LdrDecompressAnyTagged:
+ case kArm64LdrDecompressTagged:
case kArm64Ldr:
case kArm64LdrD:
case kArm64LdrS:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 875f60e0b5..f8cbca8b5e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -90,6 +90,25 @@ class Arm64OperandGenerator final : public OperandGenerator {
}
bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ if (node->opcode() == IrOpcode::kCompressedHeapConstant) {
+ if (!COMPRESS_POINTERS_BOOL) return false;
+ // For builtin code we need static roots
+ if (selector()->isolate()->bootstrapper() && !V8_STATIC_ROOTS_BOOL) {
+ return false;
+ }
+ const RootsTable& roots_table = selector()->isolate()->roots_table();
+ RootIndex root_index;
+ CompressedHeapObjectMatcher m(node);
+ if (m.HasResolvedValue() &&
+ roots_table.IsRootHandle(m.ResolvedValue(), &root_index)) {
+ if (!RootsTable::IsReadOnly(root_index)) return false;
+ return CanBeImmediate(MacroAssemblerBase::ReadOnlyRootPtr(
+ root_index, selector()->isolate()),
+ mode);
+ }
+ return false;
+ }
+
return IsIntegerConstant(node) &&
CanBeImmediate(GetIntegerConstantValue(node), mode);
}
@@ -286,15 +305,31 @@ bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
return false;
}
-template <typename Matcher>
bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
- Node* input_node, InstructionCode* opcode, bool try_ror) {
+ Node* input_node, InstructionCode* opcode, bool try_ror,
+ MachineRepresentation rep) {
Arm64OperandGenerator g(selector);
if (!selector->CanCover(node, input_node)) return false;
if (input_node->InputCount() != 2) return false;
- Matcher shift(input_node);
- if (!shift.right().HasResolvedValue()) return false;
+ if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
+
+ switch (input_node->opcode()) {
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord32Shr:
+ case IrOpcode::kWord32Sar:
+ case IrOpcode::kWord32Ror:
+ if (rep != MachineRepresentation::kWord32) return false;
+ break;
+ case IrOpcode::kWord64Shl:
+ case IrOpcode::kWord64Shr:
+ case IrOpcode::kWord64Sar:
+ case IrOpcode::kWord64Ror:
+ if (rep != MachineRepresentation::kWord64) return false;
+ break;
+ default:
+ return false;
+ }
switch (input_node->opcode()) {
case IrOpcode::kWord32Shl:
@@ -320,7 +355,7 @@ bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
}
return false;
default:
- return false;
+ UNREACHABLE();
}
}
@@ -490,23 +525,24 @@ void VisitBinop(InstructionSelector* selector, Node* node,
&inputs[0], &inputs[1], &opcode)) {
if (must_commute_cond) cont->Commute();
input_count += 2;
- } else if (TryMatchAnyShift<Matcher>(selector, node, right_node, &opcode,
- !is_add_sub)) {
+ } else if (TryMatchAnyShift(selector, node, right_node, &opcode, !is_add_sub,
+ Matcher::representation)) {
Matcher m_shift(right_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
- inputs[input_count++] = g.UseImmediate(
- static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
- } else if (can_commute && TryMatchAnyShift<Matcher>(selector, node, left_node,
- &opcode, !is_add_sub)) {
+ inputs[input_count++] = g.UseImmediate(static_cast<int>(
+ g.GetIntegerConstantValue(m_shift.right().node()) & 0x3F));
+ } else if (can_commute &&
+ TryMatchAnyShift(selector, node, left_node, &opcode, !is_add_sub,
+ Matcher::representation)) {
if (must_commute_cond) cont->Commute();
Matcher m_shift(left_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
- inputs[input_count++] = g.UseImmediate(
- static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
+ inputs[input_count++] = g.UseImmediate(static_cast<int>(
+ g.GetIntegerConstantValue(m_shift.right().node()) & 0x3F));
} else {
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
@@ -606,7 +642,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
input_count = 1;
// Check that the delta is a 32-bit integer due to the limitations of
@@ -619,6 +655,14 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ input_count = 1;
+ inputs[0] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
+ return;
+ }
+
inputs[0] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) {
@@ -663,7 +707,7 @@ void InstructionSelector::VisitLoadLane(Node* node) {
InstructionCode opcode = kArm64LoadLane;
opcode |= LaneSizeField::encode(params.rep.MemSize() * kBitsPerByte);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
Arm64OperandGenerator g(this);
@@ -681,7 +725,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
opcode |=
LaneSizeField::encode(ElementSizeInBytes(params.rep) * kBitsPerByte);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
Arm64OperandGenerator g(this);
@@ -771,7 +815,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
opcode |= AddressingModeField::encode(kMode_MRR);
}
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
Emit(opcode, 1, outputs, 2, inputs);
}
@@ -826,11 +870,8 @@ void InstructionSelector::VisitLoad(Node* node) {
immediate_mode = kLoadStoreImm32;
break;
case MachineRepresentation::kTaggedPointer:
- opcode = kArm64LdrDecompressTaggedPointer;
- immediate_mode = kLoadStoreImm32;
- break;
case MachineRepresentation::kTagged:
- opcode = kArm64LdrDecompressAnyTagged;
+ opcode = kArm64LdrDecompressTagged;
immediate_mode = kLoadStoreImm32;
break;
#else
@@ -856,7 +897,9 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kProtectedLoad) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
+ } else if (node->opcode() == IrOpcode::kLoadTrapOnNull) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
}
EmitLoad(this, node, opcode, immediate_mode, rep);
@@ -903,7 +946,10 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
+ if (node->opcode() == IrOpcode::kStoreTrapOnNull) {
+ code |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
+ }
Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionOperand inputs[4];
@@ -971,7 +1017,7 @@ void InstructionSelector::VisitStore(Node* node) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue());
if (is_int32(delta)) {
input_count = 2;
@@ -985,6 +1031,16 @@ void InstructionSelector::VisitStore(Node* node) {
}
inputs[0] = g.UseRegisterOrImmediateZero(value);
+
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ input_count = 2;
+ // This will only work if {index} is a constant.
+ inputs[1] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Root);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+ return;
+ }
+
inputs[1] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) {
@@ -1002,7 +1058,9 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (node->opcode() == IrOpcode::kProtectedStore) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
+ } else if (node->opcode() == IrOpcode::kStoreTrapOnNull) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
}
Emit(opcode, 0, nullptr, input_count, inputs);
@@ -2689,14 +2747,20 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
AtomicWidthField::encode(width);
if (access_kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
+ }
+ if (CpuFeatures::IsSupported(LSE)) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
}
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
@@ -2710,15 +2774,23 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(old_value),
g.UseUniqueRegister(new_value)};
- InstructionOperand outputs[] = {g.DefineAsRegister(node)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionOperand outputs[1];
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
AtomicWidthField::encode(width);
if (access_kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
+ }
+ if (CpuFeatures::IsSupported(LSE)) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ outputs[0] = g.DefineSameAsInput(node, 2);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ outputs[0] = g.DefineAsRegister(node);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
}
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
@@ -2756,10 +2828,10 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
code = kArm64LdarDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
- code = kArm64LdarDecompressTaggedPointer;
+ code = kArm64LdarDecompressTagged;
break;
case MachineRepresentation::kTagged:
- code = kArm64LdarDecompressAnyTagged;
+ code = kArm64LdarDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
@@ -2782,7 +2854,7 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
}
if (atomic_load_params.kind() == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
code |=
@@ -2823,7 +2895,7 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
code = kArchAtomicStoreWithWriteBarrier;
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
} else {
switch (rep) {
case MachineRepresentation::kWord8:
@@ -2858,7 +2930,7 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
}
if (store_params.kind() == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
code |= AddressingModeField::encode(kMode_MRR);
@@ -2877,15 +2949,22 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AtomicWidthField::encode(width);
if (access_kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
+ }
+
+ if (CpuFeatures::IsSupported(LSE)) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
}
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
}
} // namespace
@@ -3156,6 +3235,43 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
return VisitWord32Test(this, value, &cont);
}
}
+
+ if (isolate() && (V8_STATIC_ROOTS_BOOL ||
+ (COMPRESS_POINTERS_BOOL && !isolate()->bootstrapper()))) {
+ Arm64OperandGenerator g(this);
+ const RootsTable& roots_table = isolate()->roots_table();
+ RootIndex root_index;
+ Node* left = nullptr;
+ Handle<HeapObject> right;
+ // HeapConstants and CompressedHeapConstants can be treated the same when
+ // using them as an input to a 32-bit comparison. Check whether either is
+ // present.
+ {
+ CompressedHeapObjectBinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ left = m.left().node();
+ right = m.right().ResolvedValue();
+ } else {
+ HeapObjectBinopMatcher m2(node);
+ if (m2.right().HasResolvedValue()) {
+ left = m2.left().node();
+ right = m2.right().ResolvedValue();
+ }
+ }
+ }
+ if (!right.is_null() && roots_table.IsRootHandle(right, &root_index)) {
+ DCHECK_NE(left, nullptr);
+ if (RootsTable::IsReadOnly(root_index)) {
+ Tagged_t ptr =
+ MacroAssemblerBase::ReadOnlyRootPtr(root_index, isolate());
+ if (g.CanBeImmediate(ptr, ImmediateMode::kArithmeticImm)) {
+ return VisitCompare(this, kArm64Cmp32, g.UseRegister(left),
+ g.TempImmediate(ptr), &cont);
+ }
+ }
+ }
+ }
+
VisitWord32Compare(this, node, &cont);
}
@@ -3869,9 +3985,11 @@ void InstructionSelector::VisitS128Zero(Node* node) {
void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
Arm64OperandGenerator g(this);
- Emit(
- kArm64I32x4DotI8x16AddS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+ InstructionOperand output = CpuFeatures::IsSupported(DOTPROD)
+ ? g.DefineSameAsInput(node, 2)
+ : g.DefineAsRegister(node);
+ Emit(kArm64I32x4DotI8x16AddS, output, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
}
#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
@@ -3945,7 +4063,8 @@ SIMD_UNOP_LANE_SIZE_LIST(SIMD_VISIT_UNOP_LANE_SIZE)
using ShuffleMatcher =
ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
-using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher>;
+using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher,
+ MachineRepresentation::kSimd128>;
namespace {
// Struct holding the result of pattern-matching a mul+dup.
@@ -4285,7 +4404,7 @@ void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
#define VISIT_SIMD_QFMOP(op) \
void InstructionSelector::Visit##op(Node* node) { \
Arm64OperandGenerator g(this); \
- Emit(kArm64##op, g.DefineSameAsFirst(node), \
+ Emit(kArm64##op, g.DefineSameAsInput(node, 2), \
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
g.UseRegister(node->InputAt(2))); \
}
diff --git a/deps/v8/src/compiler/backend/bitcast-elider.cc b/deps/v8/src/compiler/backend/bitcast-elider.cc
index 263e3d691b..403e6d318c 100644
--- a/deps/v8/src/compiler/backend/bitcast-elider.cc
+++ b/deps/v8/src/compiler/backend/bitcast-elider.cc
@@ -20,6 +20,37 @@ bool IsBitcast(Node* node) {
node->opcode() == IrOpcode::kBitcastWordToTaggedSigned;
}
+bool OwnedByWord32Op(Node* node) {
+#if V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_MIPS64
+ return false;
+#else
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ case IrOpcode::kChangeInt32ToInt64:
+#define Word32Op(Name) case IrOpcode::k##Name:
+ MACHINE_BINOP_32_LIST(Word32Op)
+#undef Word32Op
+ break;
+ default:
+ return false;
+ }
+ }
+ return true;
+#endif
+}
+
+void Replace(Node* node, Node* replacement) {
+ for (Edge edge : node->use_edges()) {
+ edge.UpdateTo(replacement);
+ }
+ node->Kill();
+}
+
} // namespace
void BitcastElider::Enqueue(Node* node) {
@@ -28,18 +59,21 @@ void BitcastElider::Enqueue(Node* node) {
to_visit_.push(node);
}
+void BitcastElider::Revisit(Node* node) { to_visit_.push(node); }
+
void BitcastElider::VisitNode(Node* node) {
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- bool should_replace_input = false;
- while (IsBitcast(input)) {
- input = input->InputAt(0);
- should_replace_input = true;
- }
- if (should_replace_input) {
- node->ReplaceInput(i, input);
+ if (input->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
+ OwnedByWord32Op(input)) {
+ Replace(input, input->InputAt(0));
+ Revisit(node);
+ } else if (is_builtin_ && IsBitcast(input)) {
+ Replace(input, input->InputAt(0));
+ Revisit(node);
+ } else {
+ Enqueue(input);
}
- Enqueue(input);
}
}
@@ -52,8 +86,11 @@ void BitcastElider::ProcessGraph() {
}
}
-BitcastElider::BitcastElider(Zone* zone, Graph* graph)
- : graph_(graph), to_visit_(zone), seen_(graph, 2) {}
+BitcastElider::BitcastElider(Zone* zone, Graph* graph, bool is_builtin)
+ : graph_(graph),
+ to_visit_(zone),
+ seen_(graph, 2),
+ is_builtin_(is_builtin) {}
void BitcastElider::Reduce() { ProcessGraph(); }
diff --git a/deps/v8/src/compiler/backend/bitcast-elider.h b/deps/v8/src/compiler/backend/bitcast-elider.h
index b20d127a98..cc6b3653e6 100644
--- a/deps/v8/src/compiler/backend/bitcast-elider.h
+++ b/deps/v8/src/compiler/backend/bitcast-elider.h
@@ -15,16 +15,18 @@ namespace compiler {
class Graph;
-// Elide all the Bitcast nodes which are required by MachineGraphVerifier. This
-// avoid generating redundant move instructions in instruction selection phase.
+// Elide all the Bitcast and TruncateInt64ToInt32 nodes which are required by
+// MachineGraphVerifier. This avoid generating redundant move instructions in
+// instruction selection phase.
class BitcastElider {
public:
- BitcastElider(Zone* zone, Graph* graph);
+ BitcastElider(Zone* zone, Graph* graph, bool is_builtin);
~BitcastElider() = default;
void Reduce();
void Enqueue(Node* node);
+ void Revisit(Node* node);
void VisitNode(Node* node);
void ProcessGraph();
@@ -32,6 +34,7 @@ class BitcastElider {
Graph* const graph_;
ZoneQueue<Node*> to_visit_;
NodeMarker<bool> seen_;
+ bool is_builtin_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index df463396e5..9ece920f30 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -90,7 +90,7 @@ class InstructionOperandConverter {
return ToExternalReference(instr_->InputAt(index));
}
- Handle<CodeT> InputCode(size_t index) {
+ Handle<Code> InputCode(size_t index) {
return ToCode(instr_->InputAt(index));
}
@@ -128,6 +128,20 @@ class InstructionOperandConverter {
return ToSimd128Register(instr_->TempAt(index));
}
+#if defined(V8_TARGET_ARCH_X64)
+ Simd256Register InputSimd256Register(size_t index) {
+ return ToSimd256Register(instr_->InputAt(index));
+ }
+
+ Simd256Register OutputSimd256Register() {
+ return ToSimd256Register(instr_->Output());
+ }
+
+ Simd256Register TempSimd256Register(size_t index) {
+ return ToSimd256Register(instr_->TempAt(index));
+ }
+#endif
+
// -- Conversions for operands -----------------------------------------------
Label* ToLabel(InstructionOperand* op) {
@@ -154,6 +168,12 @@ class InstructionOperandConverter {
return LocationOperand::cast(op)->GetSimd128Register();
}
+#if defined(V8_TARGET_ARCH_X64)
+ Simd256Register ToSimd256Register(InstructionOperand* op) {
+ return LocationOperand::cast(op)->GetSimd256Register();
+ }
+#endif
+
Constant ToConstant(InstructionOperand* op) const {
if (op->IsImmediate()) {
return gen_->instructions()->GetImmediate(ImmediateOperand::cast(op));
@@ -172,7 +192,7 @@ class InstructionOperandConverter {
return ToConstant(op).ToExternalReference();
}
- Handle<CodeT> ToCode(InstructionOperand* op) {
+ Handle<Code> ToCode(InstructionOperand* op) {
return ToConstant(op).ToCode();
}
@@ -266,14 +286,14 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
const Frame* frame() const { return frame_; }
- TurboAssembler* tasm() { return tasm_; }
+ MacroAssembler* masm() { return masm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
const Frame* const frame_;
- TurboAssembler* const tasm_;
+ MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 929fc7eb77..04826d22b1 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -64,7 +64,7 @@ CodeGenerator::CodeGenerator(
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
- tasm_(isolate, options, CodeObjectRequired::kNo,
+ masm_(isolate, options, CodeObjectRequired::kNo,
#if V8_ENABLE_WEBASSEMBLY
buffer_cache ? buffer_cache->GetAssemblerBuffer(
AssemblerBase::kDefaultBufferSize)
@@ -98,15 +98,15 @@ CodeGenerator::CodeGenerator(
}
CreateFrameAccessState(frame);
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
- tasm_.set_jump_optimization_info(jump_opt);
+ masm_.set_jump_optimization_info(jump_opt);
CodeKind code_kind = info->code_kind();
if (code_kind == CodeKind::WASM_FUNCTION ||
code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
- tasm_.set_abort_hard(true);
+ masm_.set_abort_hard(true);
}
- tasm_.set_builtin(builtin);
+ masm_.set_builtin(builtin);
}
bool CodeGenerator::wasm_runtime_exception_support() const {
@@ -145,7 +145,11 @@ uint32_t CodeGenerator::GetStackCheckOffset() {
return 0;
}
+ size_t incoming_parameter_count =
+ linkage_->GetIncomingDescriptor()->ParameterSlotCount();
+ DCHECK(is_int32(incoming_parameter_count));
int32_t optimized_frame_height =
+ static_cast<int32_t>(incoming_parameter_count) * kSystemPointerSize +
frame()->GetTotalFrameSlotCount() * kSystemPointerSize;
DCHECK(is_int32(max_unoptimized_frame_height_));
int32_t signed_max_unoptimized_frame_height =
@@ -158,6 +162,11 @@ uint32_t CodeGenerator::GetStackCheckOffset() {
signed_max_unoptimized_frame_height - optimized_frame_height, 0));
uint32_t max_pushed_argument_bytes =
static_cast<uint32_t>(max_pushed_argument_count_ * kSystemPointerSize);
+ if (v8_flags.deopt_to_baseline) {
+ // If we deopt to baseline, we need to be sure that we have enough space
+ // to recreate the unoptimize frame plus arguments to the largest call.
+ return frame_height_delta + max_pushed_argument_bytes;
+ }
return std::max(frame_height_delta, max_pushed_argument_bytes);
}
@@ -173,19 +182,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Label* jump_deoptimization_entry_label =
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
- tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
+ masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
exit->pos(), deoptimization_id);
}
if (deopt_kind == DeoptimizeKind::kLazy) {
++lazy_deopt_count_;
- tasm()->BindExceptionHandler(exit->label());
+ masm()->BindExceptionHandler(exit->label());
} else {
++eager_deopt_count_;
- tasm()->bind(exit->label());
+ masm()->bind(exit->label());
}
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
- tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
+ masm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
deopt_kind, exit->continue_label(),
jump_deoptimization_entry_label);
@@ -195,7 +204,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
- tasm()->MaybeEmitOutOfLineConstantPool();
+ masm()->MaybeEmitOutOfLineConstantPool();
}
void CodeGenerator::AssembleCode() {
@@ -204,27 +213,27 @@ void CodeGenerator::AssembleCode() {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in AssemblePrologue).
- FrameScope frame_scope(tasm(), StackFrame::MANUAL);
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
if (info->source_positions()) {
AssembleSourcePosition(start_source_position());
}
- offsets_info_.code_start_register_check = tasm()->pc_offset();
+ offsets_info_.code_start_register_check = masm()->pc_offset();
- tasm()->CodeEntry();
+ masm()->CodeEntry();
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
if (v8_flags.debug_code && info->called_with_code_start_register()) {
- tasm()->RecordComment("-- Prologue: check code start register --");
+ masm()->RecordComment("-- Prologue: check code start register --");
AssembleCodeStartRegisterCheck();
}
- offsets_info_.deopt_check = tasm()->pc_offset();
+ offsets_info_.deopt_check = masm()->pc_offset();
// We want to bailout only from JS functions, which are the only ones
// that are optimized.
if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
- tasm()->RecordComment("-- Prologue: check for deoptimization --");
+ masm()->RecordComment("-- Prologue: check for deoptimization --");
BailoutIfDeoptimized();
}
@@ -258,22 +267,22 @@ void CodeGenerator::AssembleCode() {
instr_starts_.assign(instructions()->instructions().size(), {});
}
// Assemble instructions in assembly order.
- offsets_info_.blocks_start = tasm()->pc_offset();
+ offsets_info_.blocks_start = masm()->pc_offset();
for (const InstructionBlock* block : instructions()->ao_blocks()) {
// Align loop headers on vendor recommended boundaries.
- if (!tasm()->jump_optimization_info()) {
+ if (!masm()->jump_optimization_info()) {
if (block->ShouldAlignLoopHeader()) {
- tasm()->LoopHeaderAlign();
+ masm()->LoopHeaderAlign();
} else if (block->ShouldAlignCodeTarget()) {
- tasm()->CodeTargetAlign();
+ masm()->CodeTargetAlign();
}
}
if (info->trace_turbo_json()) {
- block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
+ block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset();
}
// Bind a label for a block.
current_block_ = block->rpo_number();
- unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
+ unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
if (v8_flags.code_comments) {
std::ostringstream buffer;
buffer << "-- B" << block->rpo_number().ToInt() << " start";
@@ -289,12 +298,12 @@ void CodeGenerator::AssembleCode() {
buffer << " (in loop " << block->loop_header().ToInt() << ")";
}
buffer << " --";
- tasm()->RecordComment(buffer.str().c_str());
+ masm()->RecordComment(buffer.str().c_str());
}
frame_access_state()->MarkHasFrame(block->needs_frame());
- tasm()->bind(GetLabel(current_block_));
+ masm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) {
AssembleConstructFrame();
@@ -303,7 +312,7 @@ void CodeGenerator::AssembleCode() {
// using the roots.
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
- tasm()->InitializeRootRegister();
+ masm()->InitializeRootRegister();
}
}
#ifdef V8_TARGET_ARCH_RISCV64
@@ -312,10 +321,10 @@ void CodeGenerator::AssembleCode() {
// back between blocks. the Rvv instruction may get an incorrect vtype. so
// here VectorUnit needs to be cleared to ensure that the vtype is correct
// within the block.
- tasm()->VU.clear();
+ masm()->VU.clear();
#endif
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) {
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
result_ = AssembleBlock(block);
} else {
result_ = AssembleBlock(block);
@@ -325,29 +334,29 @@ void CodeGenerator::AssembleCode() {
}
// Assemble all out-of-line code.
- offsets_info_.out_of_line_code = tasm()->pc_offset();
+ offsets_info_.out_of_line_code = masm()->pc_offset();
if (ools_) {
- tasm()->RecordComment("-- Out of line code --");
+ masm()->RecordComment("-- Out of line code --");
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
- tasm()->bind(ool->entry());
+ masm()->bind(ool->entry());
ool->Generate();
- if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
+ if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
}
}
// This nop operation is needed to ensure that the trampoline is not
// confused with the pc of the call before deoptimization.
// The test regress/regress-259 is an example of where we need it.
- tasm()->nop();
+ masm()->nop();
// For some targets, we must make sure that constant and veneer pools are
// emitted before emitting the deoptimization exits.
PrepareForDeoptimizationExits(&deoptimization_exits_);
- deopt_exit_start_offset_ = tasm()->pc_offset();
+ deopt_exit_start_offset_ = masm()->pc_offset();
// Assemble deoptimization exits.
- offsets_info_.deoptimization_exits = tasm()->pc_offset();
+ offsets_info_.deoptimization_exits = masm()->pc_offset();
int last_updated = 0;
// We sort the deoptimization exits here so that the lazy ones will be visited
// last. We need this as lazy deopts might need additional instructions.
@@ -367,7 +376,7 @@ void CodeGenerator::AssembleCode() {
{
#ifdef V8_TARGET_ARCH_PPC64
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- tasm());
+ masm());
#endif
for (DeoptimizationExit* exit : deoptimization_exits_) {
if (exit->emitted()) continue;
@@ -388,19 +397,19 @@ void CodeGenerator::AssembleCode() {
}
}
- offsets_info_.pools = tasm()->pc_offset();
+ offsets_info_.pools = masm()->pc_offset();
// TODO(jgruber): Move all inlined metadata generation into a new,
// architecture-independent version of FinishCode. Currently, this includes
// the safepoint table, handler table, constant pool, and code comments, in
// that order.
FinishCode();
- offsets_info_.jump_tables = tasm()->pc_offset();
+ offsets_info_.jump_tables = masm()->pc_offset();
// Emit the jump tables.
if (jump_tables_) {
- tasm()->Align(kSystemPointerSize);
+ masm()->Align(kSystemPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) {
- tasm()->bind(table->label());
+ masm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count());
}
}
@@ -408,34 +417,35 @@ void CodeGenerator::AssembleCode() {
// The LinuxPerfJitLogger logs code up until here, excluding the safepoint
// table. Resolve the unwinding info now so it is aware of the same code
// size as reported by perf.
- unwinding_info_writer_.Finish(tasm()->pc_offset());
+ unwinding_info_writer_.Finish(masm()->pc_offset());
// Final alignment before starting on the metadata section.
- tasm()->Align(Code::kMetadataAlignment);
+ masm()->Align(InstructionStream::kMetadataAlignment);
- safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
+ safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
// Emit the exception handler table.
if (!handlers_.empty()) {
- handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm());
+ handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm());
for (size_t i = 0; i < handlers_.size(); ++i) {
- HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
+ HandlerTable::EmitReturnEntry(masm(), handlers_[i].pc_offset,
handlers_[i].handler->pos());
}
}
- tasm()->MaybeEmitOutOfLineConstantPool();
- tasm()->FinalizeJumpOptimizationInfo();
+ masm()->MaybeEmitOutOfLineConstantPool();
+ masm()->FinalizeJumpOptimizationInfo();
result_ = kSuccess;
}
+#ifndef V8_TARGET_ARCH_X64
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
if (end - begin < kBinarySearchSwitchMinimalCases) {
while (begin != end) {
- tasm()->JumpIfEqual(input, begin->first, begin->second);
+ masm()->JumpIfEqual(input, begin->first, begin->second);
++begin;
}
AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
@@ -443,11 +453,12 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
}
auto middle = begin + (end - begin) / 2;
Label less_label;
- tasm()->JumpIfLessThan(input, middle->first, &less_label);
+ masm()->JumpIfLessThan(input, middle->first, &less_label);
AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
- tasm()->bind(&less_label);
+ masm()->bind(&less_label);
AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
}
+#endif // V8_TARGET_ARCH_X64
void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target))
@@ -469,8 +480,8 @@ base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) {
- tasm()->AbortedCodeGeneration();
- return MaybeHandle<Code>();
+ masm()->AbortedCodeGeneration();
+ return {};
}
// Allocate the source position table.
@@ -482,11 +493,11 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
// Allocate and install the code.
CodeDesc desc;
- tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
+ masm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
#if defined(V8_OS_WIN64)
if (Builtins::IsBuiltinId(info_->builtin())) {
- isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo());
+ isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo());
}
#endif // V8_OS_WIN64
@@ -508,13 +519,13 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
- tasm()->AbortedCodeGeneration();
- return MaybeHandle<Code>();
+ masm()->AbortedCodeGeneration();
+ return {};
}
- LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(
- code->raw_instruction_start(),
- *source_positions, JitCodeEvent::JIT_CODE));
+ LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(code->InstructionStart(),
+ *source_positions,
+ JitCodeEvent::JIT_CODE));
return code;
}
@@ -527,7 +538,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
}
void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
- auto safepoint = safepoints()->DefineSafepoint(tasm());
+ auto safepoint = safepoints()->DefineSafepoint(masm());
int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
@@ -558,7 +569,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
const InstructionBlock* block) {
if (block->IsHandler()) {
- tasm()->ExceptionHandler();
+ masm()->ExceptionHandler();
}
for (int i = block->code_start(); i < block->code_end(); ++i) {
CodeGenResult result = AssembleInstruction(i, block);
@@ -702,8 +713,11 @@ RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
return true_rpo;
}
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
- if (IsNextInAssemblyOrder(true_rpo)) {
+ if (IsNextInAssemblyOrder(true_rpo) || instructions()
+ ->InstructionBlockAt(false_rpo)
+ ->IsLoopHeaderInAssemblyOrder()) {
// true block is next, can fall through if condition negated.
+ // false block is loop header, can save one jump if condition negated.
std::swap(true_rpo, false_rpo);
condition = NegateFlagsCondition(condition);
}
@@ -718,7 +732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
int instruction_index, const InstructionBlock* block) {
Instruction* instr = instructions()->InstructionAt(instruction_index);
if (info()->trace_turbo_json()) {
- instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
+ instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset();
}
int first_unused_stack_slot;
FlagsMode mode = FlagsModeField::decode(instr->opcode());
@@ -738,14 +752,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleDeconstructFrame();
}
if (info()->trace_turbo_json()) {
- instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
+ instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset();
}
// Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result;
if (info()->trace_turbo_json()) {
- instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
+ instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset();
}
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
@@ -779,7 +793,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(exit->continue_label());
+ masm()->bind(exit->continue_label());
break;
}
case kFlags_set: {
@@ -818,7 +832,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
if (!source_position.IsKnown()) return;
- source_position_table_builder_.AddPosition(tasm()->pc_offset(),
+ source_position_table_builder_.AddPosition(masm()->pc_offset(),
source_position, false);
if (v8_flags.code_comments) {
OptimizedCompilationInfo* info = this->info();
@@ -833,17 +847,17 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
// the inlining stack from other information.
- if (info->trace_turbo_json() || !tasm()->isolate() ||
- tasm()->isolate()->concurrent_recompilation_enabled()) {
+ if (info->trace_turbo_json() || !masm()->isolate() ||
+ masm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
AllowGarbageCollection allocation;
AllowHandleAllocation handles;
AllowHandleDereference deref;
- buffer << source_position.InliningStack(info);
+ buffer << source_position.InliningStack(masm()->isolate(), info);
}
buffer << " --";
- tasm()->RecordComment(buffer.str().c_str());
+ masm()->RecordComment(buffer.str().c_str());
}
}
@@ -981,7 +995,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
handlers_.push_back(
- {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
+ {GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()});
}
if (needs_frame_state) {
@@ -991,7 +1005,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
- int pc_offset = tasm()->pc_offset_for_safepoint();
+ int pc_offset = masm()->pc_offset_for_safepoint();
BuildTranslation(instr, pc_offset, frame_state_offset, 0,
descriptor->state_combine());
}
@@ -1129,10 +1143,10 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
- const int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
const int translation_index = translations_.BeginTranslation(
static_cast<int>(descriptor->GetFrameCount()),
- static_cast<int>(descriptor->GetJSFrameCount()), update_feedback_count);
+ static_cast<int>(descriptor->GetJSFrameCount()),
+ entry.feedback().IsValid());
if (entry.feedback().IsValid()) {
DeoptimizationLiteral literal =
DeoptimizationLiteral(entry.feedback().vector);
@@ -1325,7 +1339,7 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr,
}
void CodeGenerator::MarkLazyDeoptSite() {
- last_lazy_deopt_pc_ = tasm()->pc_offset();
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
@@ -1336,7 +1350,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
}
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
- : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
+ : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
}
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index ee36b75dae..1709a398b8 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void RecordSafepoint(ReferenceMap* references);
Zone* zone() const { return zone_; }
- TurboAssembler* tasm() { return &tasm_; }
+ MacroAssembler* masm() { return &masm_; }
SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
size_t handler_table_offset() const { return handler_table_offset_; }
@@ -201,6 +201,10 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
return offsets_info_;
}
+#if V8_ENABLE_WEBASSEMBLY
+ bool IsWasm() const { return info()->IsWasm(); }
+#endif
+
static constexpr int kBinarySearchSwitchMinimalCases = 4;
// Returns true if an offset should be applied to the given stack check. There
@@ -278,9 +282,15 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
#if V8_ENABLE_WEBASSEMBLY
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_TARGET_ARCH_X64
+ void AssembleArchBinarySearchSwitchRange(
+ Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
+ std::pair<int32_t, Label*>* end, base::Optional<int32_t>& last_cmp_value);
+#else
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end);
+#endif // V8_TARGET_ARCH_X64
void AssembleArchBinarySearchSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
@@ -375,7 +385,11 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
InstructionOperand* destination) final;
void AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) final;
- void MoveToTempLocation(InstructionOperand* src) final;
+ AllocatedOperand Push(InstructionOperand* src) final;
+ void Pop(InstructionOperand* src, MachineRepresentation rep) final;
+ void PopTempStackSlots() final;
+ void MoveToTempLocation(InstructionOperand* src,
+ MachineRepresentation rep) final;
void MoveTempLocationTo(InstructionOperand* dst,
MachineRepresentation rep) final;
void SetPendingMove(MoveOperands* move) final;
@@ -444,7 +458,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
RpoNumber current_block_;
SourcePosition start_source_position_;
SourcePosition current_source_position_;
- TurboAssembler tasm_;
+ MacroAssembler masm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
ZoneVector<HandlerInfo> handlers_;
@@ -463,8 +477,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// with function size. {jump_deoptimization_entry_labels_} is an optimization
// to that effect, which extracts the (potentially large) instruction
// sequence for the final jump to the deoptimization entry into a single spot
- // per Code object. All deopt exits can then near-call to this label. Note:
- // not used on all architectures.
+ // per InstructionStream object. All deopt exits can then near-call to this
+ // label. Note: not used on all architectures.
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
// The maximal combined height of all frames produced upon deoptimization, and
diff --git a/deps/v8/src/compiler/backend/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc
index 293fc9352c..5454996b38 100644
--- a/deps/v8/src/compiler/backend/frame-elider.cc
+++ b/deps/v8/src/compiler/backend/frame-elider.cc
@@ -29,6 +29,18 @@ void FrameElider::MarkBlocks() {
block->mark_needs_frame();
break;
}
+ if (instr->arch_opcode() == ArchOpcode::kArchStackSlot &&
+ instr->InputAt(0)->IsImmediate() &&
+ code_->GetImmediate(ImmediateOperand::cast(instr->InputAt(0)))
+ .ToInt32() > 0) {
+ // We shouldn't allow accesses to the stack below the current stack
+ // pointer (indicated by positive slot indices).
+ // This is in particular because signal handlers (which could, of
+ // course, be triggered at any point in time) will overwrite this
+ // memory.
+ block->mark_needs_frame();
+ break;
+ }
}
}
}
diff --git a/deps/v8/src/compiler/backend/gap-resolver.cc b/deps/v8/src/compiler/backend/gap-resolver.cc
index ad2dbf9048..e5d479e824 100644
--- a/deps/v8/src/compiler/backend/gap-resolver.cc
+++ b/deps/v8/src/compiler/backend/gap-resolver.cc
@@ -16,64 +16,6 @@ namespace compiler {
namespace {
-// Splits a FP move between two location operands into the equivalent series of
-// moves between smaller sub-operands, e.g. a double move to two single moves.
-// This helps reduce the number of cycles that would normally occur under FP
-// aliasing, and makes swaps much easier to implement.
-MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
- ParallelMove* moves) {
- DCHECK(kFPAliasing == AliasingKind::kCombine);
- // Splitting is only possible when the slot size is the same as float size.
- DCHECK_EQ(kSystemPointerSize, kFloatSize);
- const LocationOperand& src_loc = LocationOperand::cast(move->source());
- const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
- MachineRepresentation dst_rep = dst_loc.representation();
- DCHECK_NE(smaller_rep, dst_rep);
- auto src_kind = src_loc.location_kind();
- auto dst_kind = dst_loc.location_kind();
-
- int aliases =
- 1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
- int base = -1;
- USE(base);
- DCHECK_EQ(aliases, RegisterConfiguration::Default()->GetAliases(
- dst_rep, 0, smaller_rep, &base));
-
- int src_index = -1;
- int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kSystemPointerSize;
- int src_step = 1;
- if (src_kind == LocationOperand::REGISTER) {
- src_index = src_loc.register_code() * aliases;
- } else {
- src_index = src_loc.index();
- // For operands that occupy multiple slots, the index refers to the last
- // slot. On little-endian architectures, we start at the high slot and use a
- // negative step so that register-to-slot moves are in the correct order.
- src_step = -slot_size;
- }
- int dst_index = -1;
- int dst_step = 1;
- if (dst_kind == LocationOperand::REGISTER) {
- dst_index = dst_loc.register_code() * aliases;
- } else {
- dst_index = dst_loc.index();
- dst_step = -slot_size;
- }
-
- // Reuse 'move' for the first fragment. It is not pending.
- move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
- move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
- // Add the remaining fragment moves.
- for (int i = 1; i < aliases; ++i) {
- src_index += src_step;
- dst_index += dst_step;
- moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
- AllocatedOperand(dst_kind, smaller_rep, dst_index));
- }
- // Return the first fragment.
- return move;
-}
-
enum MoveOperandKind : uint8_t { kConstant, kGpReg, kFpReg, kStack };
MoveOperandKind GetKind(const InstructionOperand& move) {
@@ -92,7 +34,6 @@ void GapResolver::Resolve(ParallelMove* moves) {
// Remove redundant moves, collect source kinds and destination kinds to
// detect simple non-overlapping moves, and collect FP move representations if
// aliasing is non-simple.
- int fp_reps = 0;
size_t nmoves = moves->size();
for (size_t i = 0; i < nmoves;) {
MoveOperands* move = (*moves)[i];
@@ -104,11 +45,6 @@ void GapResolver::Resolve(ParallelMove* moves) {
i++;
source_kinds.Add(GetKind(move->source()));
destination_kinds.Add(GetKind(move->destination()));
- if (kFPAliasing == AliasingKind::kCombine &&
- move->destination().IsFPRegister()) {
- fp_reps |= RepresentationBit(
- LocationOperand::cast(move->destination()).representation());
- }
}
if (nmoves != moves->size()) moves->resize(nmoves);
@@ -120,66 +56,46 @@ void GapResolver::Resolve(ParallelMove* moves) {
return;
}
- if (kFPAliasing == AliasingKind::kCombine) {
- if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
- // Start with the smallest FP moves, so we never encounter smaller moves
- // in the middle of a cycle of larger moves.
- if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
- split_rep_ = MachineRepresentation::kFloat32;
- for (size_t i = 0; i < moves->size(); ++i) {
- auto move = (*moves)[i];
- if (!move->IsEliminated() && move->destination().IsFloatRegister())
- PerformMove(moves, move);
- }
- }
- if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
- split_rep_ = MachineRepresentation::kFloat64;
- for (size_t i = 0; i < moves->size(); ++i) {
- auto move = (*moves)[i];
- if (!move->IsEliminated() && move->destination().IsDoubleRegister())
- PerformMove(moves, move);
- }
- }
- }
- split_rep_ = MachineRepresentation::kSimd128;
- }
-
for (size_t i = 0; i < moves->size(); ++i) {
auto move = (*moves)[i];
if (!move->IsEliminated()) PerformMove(moves, move);
}
+ assembler_->PopTempStackSlots();
}
-void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
- // {PerformMoveHelper} assembles all the moves that block {move} but are not
- // blocked by {move} (directly or indirectly). By the assumptions described in
- // {PerformMoveHelper}, at most one cycle is left. If it exists, it is
- // returned and we assemble it below.
- auto cycle = PerformMoveHelper(moves, move);
- if (!cycle.has_value()) return;
- DCHECK_EQ(cycle->back(), move);
- if (cycle->size() == 2) {
- // A cycle of size two where the two moves have the same machine
- // representation is a swap. For this case, call {AssembleSwap} which can
- // generate better code than the generic algorithm below in some cases.
- MoveOperands* move2 = cycle->front();
- MachineRepresentation rep =
- LocationOperand::cast(move->source()).representation();
- MachineRepresentation rep2 =
- LocationOperand::cast(move2->source()).representation();
- if (rep == rep2) {
- InstructionOperand* source = &move->source();
- InstructionOperand* destination = &move->destination();
- // Ensure source is a register or both are stack slots, to limit swap
- // cases.
- if (source->IsAnyStackSlot()) {
- std::swap(source, destination);
- }
- assembler_->AssembleSwap(source, destination);
- move->Eliminate();
- move2->Eliminate();
- return;
+// Check if a 2-move cycle is a swap. This is not always the case, for instance:
+//
+// [fp_stack:-3|s128] = [xmm5|R|s128]
+// [xmm5|R|s128] = [fp_stack:-4|s128]
+//
+// The two stack operands conflict but start at a different stack offset, so a
+// swap would be incorrect.
+// In general, swapping is allowed if the conflicting operands:
+// - Have the same representation, and
+// - Are the same register, or are stack slots with the same index
+bool IsSwap(MoveOperands* move1, MoveOperands* move2) {
+ return move1->source() == move2->destination() &&
+ move2->source() == move1->destination();
+}
+
+void GapResolver::PerformCycle(const std::vector<MoveOperands*>& cycle) {
+ DCHECK(!cycle.empty());
+ MoveOperands* move1 = cycle.back();
+ if (cycle.size() == 2 && IsSwap(cycle.front(), cycle.back())) {
+ // Call {AssembleSwap} which can generate better code than the generic
+ // algorithm below in some cases.
+ MoveOperands* move2 = cycle.front();
+ InstructionOperand* source = &move1->source();
+ InstructionOperand* destination = &move1->destination();
+ // Ensure source is a register or both are stack slots, to limit swap
+ // cases.
+ if (source->IsAnyStackSlot()) {
+ std::swap(source, destination);
}
+ assembler_->AssembleSwap(source, destination);
+ move1->Eliminate();
+ move2->Eliminate();
+ return;
}
// Generic move-cycle algorithm. The cycle of size n is ordered such that the
// move at index i % n blocks the move at index (i + 1) % n.
@@ -194,47 +110,60 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
// {SetPendingMove}, which marks the registers needed for the given moves.
// {MoveToTempLocation} will then choose the location accordingly.
MachineRepresentation rep =
- LocationOperand::cast(move->source()).representation();
- cycle->pop_back();
- for (auto* pending_move : *cycle) {
- assembler_->SetPendingMove(pending_move);
+ LocationOperand::cast(move1->destination()).representation();
+ for (size_t i = 0; i < cycle.size() - 1; ++i) {
+ assembler_->SetPendingMove(cycle[i]);
}
- assembler_->MoveToTempLocation(&move->source());
- InstructionOperand destination = move->destination();
- move->Eliminate();
- for (auto* unblocked_move : *cycle) {
- assembler_->AssembleMove(&unblocked_move->source(),
- &unblocked_move->destination());
- unblocked_move->Eliminate();
+ assembler_->MoveToTempLocation(&move1->source(), rep);
+ InstructionOperand destination = move1->destination();
+ move1->Eliminate();
+ for (size_t i = 0; i < cycle.size() - 1; ++i) {
+ assembler_->AssembleMove(&cycle[i]->source(), &cycle[i]->destination());
+ cycle[i]->Eliminate();
}
assembler_->MoveTempLocationTo(&destination, rep);
+ // We do not need to update the sources of the remaining moves in the parallel
+ // move. If any of the remaining moves had the same source as one of the moves
+ // in the cycle, it would block the cycle and would have already been
+ // assembled by {PerformMoveHelper}.
+}
+
+void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
+ // Try to perform the move and its dependencies with {PerformMoveHelper}.
+ // This helper function will be able to solve most cases, including cycles.
+ // But for some rare cases, it will bail out and return one of the
+ // problematic moves. In this case, push the source to the stack to
+ // break the cycles that it belongs to, and try again.
+ std::vector<MoveOperands*> cycle;
+ while (MoveOperands* blocking_move = PerformMoveHelper(moves, move, &cycle)) {
+ // Push an arbitrary operand of the cycle to break it.
+ AllocatedOperand scratch = assembler_->Push(&blocking_move->source());
+ InstructionOperand source = blocking_move->source();
+ for (auto m : *moves) {
+ if (m->source() == source) {
+ m->set_source(scratch);
+ }
+ }
+ cycle.clear();
+ }
}
-base::Optional<std::vector<MoveOperands*>> GapResolver::PerformMoveHelper(
- ParallelMove* moves, MoveOperands* move) {
+MoveOperands* GapResolver::PerformMoveHelper(
+ ParallelMove* moves, MoveOperands* move,
+ std::vector<MoveOperands*>* cycle) {
// We interpret moves as nodes in a graph. x is a successor of y (x blocks y)
// if x.source() conflicts with y.destination(). We recursively assemble the
// moves in this graph in post-order using a DFS traversal, such that all
// blocking moves are assembled first.
// We also mark moves in the current DFS branch as pending. If a move is
- // blocked by a pending move, this is a cycle. In this case we just return the
- // cycle without assembling the moves, and the cycle is processed separately
- // by {PerformMove}.
- // We can show that there is at most one cycle in this connected component
- // with the two following assumptions:
- // - Two moves cannot have conflicting destinations (1)
- // - Operand conflict is transitive (2)
- // From this, it it follows that:
- // - A move cannot block two or more moves (or the destinations of the blocked
- // moves would conflict with each other by (2)).
- // - Therefore the graph is a tree, except possibly for one cycle that goes
- // back to the root
- // (1) must hold by construction of parallel moves. (2) is generally true,
- // except if this is a tail-call gap move and some operands span multiple
- // stack slots. In this case, slots can partially overlap and interference is
- // not transitive. In other cases, conflicting stack operands should have the
- // same base address and machine representation.
- // TODO(thibaudm): Fix the tail-call case.
+ // blocked by a pending move, this is a cycle. In this case we just
+ // reconstruct the cycle on the way back, and assemble it using {PerformCycle}
+ // when we reach the first move.
+ // This algorithm can only process one cycle at a time. If another cycle is
+ // found while the first one is still being processed, we bail out.
+ // The caller breaks the cycle using a temporary stack slot, and we try
+ // again.
+
DCHECK(!move->IsPending());
DCHECK(!move->IsRedundant());
@@ -244,11 +173,7 @@ base::Optional<std::vector<MoveOperands*>> GapResolver::PerformMoveHelper(
DCHECK(!source.IsInvalid()); // Or else it will look eliminated.
InstructionOperand destination = move->destination();
move->SetPending();
- base::Optional<std::vector<MoveOperands*>> cycle;
-
- // We may need to split moves between FP locations differently.
- const bool is_fp_loc_move = kFPAliasing == AliasingKind::kCombine &&
- destination.IsFPLocationOperand();
+ MoveOperands* blocking_move = nullptr;
for (size_t i = 0; i < moves->size(); ++i) {
auto other = (*moves)[i];
@@ -258,24 +183,24 @@ base::Optional<std::vector<MoveOperands*>> GapResolver::PerformMoveHelper(
if (other->IsPending()) {
// The conflicting move is pending, we found a cycle. Build the list of
// moves that belong to the cycle on the way back.
- cycle.emplace();
+ // If this move already belongs to a cycle, bail out.
+ if (!cycle->empty()) {
+ blocking_move = cycle->front();
+ break;
+ }
+ // Initialize the cycle with {other} and reconstruct the rest of the
+ // cycle on the way back.
+ cycle->push_back(other);
} else {
- // Recursively perform the conflicting move.
- if (is_fp_loc_move &&
- LocationOperand::cast(other->source()).representation() >
- split_rep_) {
- // 'other' must also be an FP location move. Break it into fragments
- // of the same size as 'move'. 'other' is set to one of the fragments,
- // and the rest are appended to 'moves'.
- other = Split(other, split_rep_, moves);
- // 'other' may not block destination now.
- if (!other->source().InterferesWith(destination)) continue;
+ std::vector<MoveOperands*> cycle_rec;
+ blocking_move = PerformMoveHelper(moves, other, &cycle_rec);
+ if (blocking_move) break;
+ if (!cycle->empty() && !cycle_rec.empty()) {
+ blocking_move = cycle_rec.front();
+ break;
}
- auto cycle_rec = PerformMoveHelper(moves, other);
- if (cycle_rec.has_value()) {
- // Check that our assumption that there is at most one cycle is true.
- DCHECK(!cycle.has_value());
- cycle = cycle_rec;
+ if (cycle->empty() && !cycle_rec.empty()) {
+ *cycle = std::move(cycle_rec);
}
}
}
@@ -285,15 +210,22 @@ base::Optional<std::vector<MoveOperands*>> GapResolver::PerformMoveHelper(
// marked as pending anymore, restore its destination.
move->set_destination(destination);
- if (cycle.has_value()) {
- // Do not assemble the moves in the cycle, just return it.
- cycle->push_back(move);
- return cycle;
- }
+ if (blocking_move != nullptr) return blocking_move;
- assembler_->AssembleMove(&source, &destination);
- move->Eliminate();
- return {};
+ if (!cycle->empty()) {
+ if (cycle->front() == move) {
+ // We returned to the topmost move in the cycle and assembled all the
+ // other dependencies. Assemble the cycle.
+ PerformCycle(*cycle);
+ cycle->clear();
+ } else {
+ cycle->push_back(move);
+ }
+ } else {
+ assembler_->AssembleMove(&source, &destination);
+ move->Eliminate();
+ }
+ return nullptr;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/gap-resolver.h b/deps/v8/src/compiler/backend/gap-resolver.h
index 0d7ba02da2..7c17d0d977 100644
--- a/deps/v8/src/compiler/backend/gap-resolver.h
+++ b/deps/v8/src/compiler/backend/gap-resolver.h
@@ -25,7 +25,11 @@ class GapResolver final {
virtual void AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) = 0;
- // Assemble cycles.
+ // Helper functions to resolve cyclic dependencies.
+ // - {Push} pushes {src} and returns an operand that encodes the new stack
+ // slot.
+ // - {Pop} pops the topmost stack operand and moves it to {dest}.
+ // - {PopTempStackSlots} pops all remaining unpopped stack slots.
// - {SetPendingMove} reserves scratch registers needed to perform the moves
// in the cycle.
// - {MoveToTempLocation} moves an operand to a temporary location, either
@@ -33,34 +37,38 @@ class GapResolver final {
// reserved registers.
// - {MoveTempLocationTo} moves the temp location to the destination,
// thereby completing the cycle.
- virtual void MoveToTempLocation(InstructionOperand* src) = 0;
+ virtual AllocatedOperand Push(InstructionOperand* src) = 0;
+ virtual void Pop(InstructionOperand* dest, MachineRepresentation rep) = 0;
+ virtual void PopTempStackSlots() = 0;
+ virtual void MoveToTempLocation(InstructionOperand* src,
+ MachineRepresentation rep) = 0;
virtual void MoveTempLocationTo(InstructionOperand* dst,
MachineRepresentation rep) = 0;
virtual void SetPendingMove(MoveOperands* move) = 0;
+ int temp_slots_ = 0;
};
- explicit GapResolver(Assembler* assembler)
- : assembler_(assembler), split_rep_(MachineRepresentation::kSimd128) {}
+ explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
// Resolve a set of parallel moves, emitting assembler instructions.
V8_EXPORT_PRIVATE void Resolve(ParallelMove* parallel_move);
private:
+ // Take a vector of moves where each move blocks the next one, and the last
+ // one blocks the first one, and resolve it using a temporary location.
+ void PerformCycle(const std::vector<MoveOperands*>& cycle);
// Performs the given move, possibly performing other moves to unblock the
// destination operand.
void PerformMove(ParallelMove* moves, MoveOperands* move);
- // Perform the move and its non-cyclic dependencies. Return the cycle if one
- // is found.
- base::Optional<std::vector<MoveOperands*>> PerformMoveHelper(
- ParallelMove* moves, MoveOperands* move);
-
+ // Perform the move and its dependencies. Also performs simple cyclic
+ // dependencies. For more complex cases the method may bail out:
+ // in this case, it returns one of the problematic moves. The caller
+ // ({PerformMove}) will use a temporary stack slot to unblock the dependencies
+ // and try again.
+ MoveOperands* PerformMoveHelper(ParallelMove* moves, MoveOperands* move,
+ std::vector<MoveOperands*>* cycle);
// Assembler used to emit moves and save registers.
Assembler* const assembler_;
-
- // While resolving moves, the largest FP representation that can be moved.
- // Any larger moves must be split into an equivalent series of moves of this
- // representation.
- MachineRepresentation split_rep_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 5afd119ff5..fe1df09b98 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -29,7 +29,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
#define kScratchDoubleReg xmm0
@@ -202,11 +202,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
void MoveInstructionOperandToRegister(Register destination,
InstructionOperand* op) {
if (op->IsImmediate() || op->IsConstant()) {
- gen_->tasm()->mov(destination, ToImmediate(op));
+ gen_->masm()->mov(destination, ToImmediate(op));
} else if (op->IsRegister()) {
- gen_->tasm()->Move(destination, ToRegister(op));
+ gen_->masm()->Move(destination, ToRegister(op));
} else {
- gen_->tasm()->mov(destination, ToOperand(op));
+ gen_->masm()->mov(destination, ToOperand(op));
}
}
};
@@ -318,10 +318,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
__ lea(scratch1_, operand_);
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
@@ -475,7 +474,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
XMMRegister src0 = i.InputSimd128Register(0); \
Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), src0, src1); \
} else { \
DCHECK_EQ(i.OutputSimd128Register(), src0); \
@@ -485,11 +484,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputOperand(1), imm); \
} else { \
- CpuFeatureScope sse_scope(tasm(), SSELevel); \
+ CpuFeatureScope sse_scope(masm(), SSELevel); \
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
@@ -532,26 +531,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
int8_t laneidx = i.InputInt8(1); \
if (HasAddressingMode(instr)) { \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \
} else { \
DCHECK_EQ(dst, src); \
- CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \
+ CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
__ OPCODE(dst, i.MemoryOperand(2), laneidx); \
} \
} else { \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \
} else { \
DCHECK_EQ(dst, src); \
- CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \
+ CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
__ OPCODE(dst, i.InputOperand(2), laneidx); \
} \
} \
} while (false)
-
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -566,7 +564,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -574,10 +572,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
+ masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
+ masm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -617,7 +615,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@@ -635,13 +633,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@@ -658,14 +656,14 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ push(eax); // Push eax so we can use it as a scratch register.
__ mov(eax, Operand(kJavaScriptCallCodeStartRegister, offset));
- __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
+ __ test(FieldOperand(eax, Code::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ pop(eax); // Restore eax.
@@ -693,8 +691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ LoadCodeObjectEntry(reg, reg);
- __ call(reg);
+ __ CallCodeObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -747,8 +744,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ LoadCodeObjectEntry(reg, reg);
- __ jmp(reg);
+ __ JumpCodeObject(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -829,7 +825,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ PushPC();
int pc = __ pc_offset();
__ pop(scratch);
- __ sub(scratch, Immediate(pc + Code::kHeaderSize - kHeapObjectTag));
+ __ sub(scratch,
+ Immediate(pc + InstructionStream::kHeaderSize - kHeapObjectTag));
__ add(scratch, Immediate::CodeRelativeOffset(&return_location));
__ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset),
scratch);
@@ -885,7 +882,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -953,8 +950,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchStoreWithWriteBarrier: // Fall thrugh.
case kArchAtomicStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -1263,7 +1259,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kIA32Float32Round: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1679,15 +1675,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int slots = stack_decrement / kSystemPointerSize;
// Whenever codegen uses push, we need to check if stack_decrement
// contains any extra padding and adjust the stack before the push.
- if (HasImmediateInput(instr, 1)) {
- __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
- __ push(i.InputImmediate(1));
- } else if (HasAddressingMode(instr)) {
+ if (HasAddressingMode(instr)) {
// Only single slot pushes from memory are supported.
__ AllocateStackSpace(stack_decrement - kSystemPointerSize);
size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
+ } else if (HasImmediateInput(instr, 1)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputImmediate(1));
} else {
InstructionOperand* input = instr->InputAt(1);
if (input->IsRegister()) {
@@ -2095,6 +2091,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kIA32I32x4DotI8x16I7x16AddS: {
+ __ I32x4DotI8x16I7x16AddS(
+ i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ break;
+ }
case kIA32F32x4Splat: {
__ F32x4Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0));
break;
@@ -2106,12 +2109,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32Insertps: {
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
__ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1) << 4);
} else {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
__ insertps(i.OutputSimd128Register(), i.InputOperand(2),
i.InputInt8(1) << 4);
}
@@ -2309,12 +2312,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(0);
XMMRegister src2 = i.InputSimd128Register(1);
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
__ vpminsd(kScratchDoubleReg, src1, src2);
__ vpcmpeqd(dst, kScratchDoubleReg, src2);
} else {
DCHECK_EQ(dst, src1);
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminsd(dst, src2);
__ pcmpeqd(dst, src2);
}
@@ -2322,7 +2325,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0
@@ -2350,7 +2353,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXI32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0
@@ -2400,7 +2403,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI32x4GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxud(dst, src);
@@ -2410,7 +2413,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI32x4GtU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
@@ -2422,7 +2425,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI32x4GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminud(dst, src);
@@ -2430,7 +2433,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI32x4GeU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminud(kScratchDoubleReg, src1, src2);
@@ -2546,7 +2549,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@@ -2568,7 +2571,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8GeS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsw(kScratchDoubleReg, src1, src2);
@@ -2615,7 +2618,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI16x8GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxuw(dst, src);
@@ -2625,7 +2628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8GtU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
@@ -2637,7 +2640,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI16x8GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminuw(dst, src);
@@ -2645,7 +2648,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI16x8GeU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminuw(kScratchDoubleReg, src1, src2);
@@ -2838,7 +2841,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@@ -2853,7 +2856,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI8x16GeS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminsb(dst, src);
@@ -2861,7 +2864,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16GeS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsb(kScratchDoubleReg, src1, src2);
@@ -2919,7 +2922,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16GtU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
@@ -2938,7 +2941,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXI8x16GeU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminub(kScratchDoubleReg, src1, src2);
@@ -3177,7 +3180,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
uint8_t lane = i.InputUint8(1) & 0xf;
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
if (lane < 8) {
__ vpunpcklbw(dst, src, src);
} else {
@@ -3228,7 +3231,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
break;
case kSSES16x8UnzipHigh: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
@@ -3242,7 +3245,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS16x8UnzipHigh: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
if (instr->InputCount() == 2) {
@@ -3254,7 +3257,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSES16x8UnzipLow: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
@@ -3268,7 +3271,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS16x8UnzipLow: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
__ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@@ -3295,7 +3298,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16UnzipHigh: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
if (instr->InputCount() == 2) {
@@ -3322,7 +3325,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16UnzipLow: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
if (instr->InputCount() == 2) {
@@ -3351,7 +3354,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16TransposeLow: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputCount() == 1) {
__ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8);
@@ -3381,7 +3384,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXS8x16TransposeHigh: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputCount() == 1) {
__ vpsrlw(dst, i.InputSimd128Register(0), 8);
@@ -3417,7 +3420,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAVXS8x4Reverse:
case kAVXS8x8Reverse: {
DCHECK_EQ(1, instr->InputCount());
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = dst;
if (arch_opcode != kAVXS8x2Reverse) {
@@ -3461,6 +3464,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
+ case kIA32Blendvpd: {
+ __ Blendvpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
+ case kIA32Blendvps: {
+ __ Blendvps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
case kIA32Pblendvb: {
__ Pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), i.InputSimd128Register(2));
@@ -4026,6 +4039,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -4185,8 +4202,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
- __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
@@ -4221,10 +4238,64 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ IA32OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ push(g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot() || source->IsFloatStackSlot()) {
+ __ push(g.ToOperand(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ sub(esp, Immediate(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ IA32OperandConverter g(this, nullptr);
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ if (dest->IsRegister()) {
+ __ pop(g.ToRegister(dest));
+ } else if (dest->IsStackSlot() || dest->IsFloatStackSlot()) {
+ __ pop(g.ToOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ add(esp, Immediate(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ add(esp, Immediate(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
if ((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use)) {
// The scratch double register is available.
@@ -4233,24 +4304,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
AssembleMove(source, &scratch);
} else {
// The scratch register blocked by pending moves. Use the stack instead.
- int new_slots = ElementSizeInPointers(rep);
- IA32OperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ push(g.ToRegister(source));
- } else if (source->IsStackSlot() || source->IsFloatStackSlot()) {
- __ push(g.ToOperand(source));
- } else {
- // No push instruction for this operand type. Bump the stack pointer and
- // assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ sub(esp, Immediate(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -4262,22 +4316,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
kScratchDoubleReg.code());
AssembleMove(&scratch, dest);
} else {
- IA32OperandConverter g(this, nullptr);
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- if (dest->IsRegister()) {
- __ pop(g.ToRegister(dest));
- } else if (dest->IsStackSlot() || dest->IsFloatStackSlot()) {
- __ pop(g.ToOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ add(esp, Immediate(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
move_cycle_ = MoveCycleState();
}
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 45f967b75d..c47fdf60b6 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -101,6 +101,8 @@ namespace compiler {
V(IA32Movhps) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
+ V(IA32Blendvpd) \
+ V(IA32Blendvps) \
V(IA32Lea) \
V(IA32Pblendvb) \
V(IA32Push) \
@@ -208,6 +210,7 @@ namespace compiler {
V(IA32I32x4Abs) \
V(IA32I32x4BitMask) \
V(IA32I32x4DotI16x8S) \
+ V(IA32I32x4DotI8x16I7x16AddS) \
V(IA32I32x4ExtMulLowI16x8S) \
V(IA32I32x4ExtMulHighI16x8S) \
V(IA32I32x4ExtMulLowI16x8U) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index a30b4fc6a3..f27ed723f4 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -89,6 +89,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kIA32Blendvpd:
+ case kIA32Blendvps:
case kIA32Pblendvb:
case kIA32Cvttps2dq:
case kIA32Cvttpd2dq:
@@ -192,6 +194,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Abs:
case kIA32I32x4BitMask:
case kIA32I32x4DotI16x8S:
+ case kIA32I32x4DotI8x16I7x16AddS:
case kIA32I32x4ExtMulLowI16x8S:
case kIA32I32x4ExtMulHighI16x8S:
case kIA32I32x4ExtMulLowI16x8U:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index bea1475584..eb9195b0d1 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -18,7 +18,7 @@
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/machine-type.h"
-#include "src/codegen/turbo-assembler.h"
+#include "src/codegen/macro-assembler-base.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction-selector-impl.h"
@@ -208,7 +208,7 @@ class IA32OperandGenerator final : public OperandGenerator {
m.object().ResolvedValue())) {
ptrdiff_t const delta =
m.index().ResolvedValue() +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
@@ -219,7 +219,14 @@ class IA32OperandGenerator final : public OperandGenerator {
BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
DCHECK(m.matches());
- if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
+ if (m.base() != nullptr &&
+ m.base()->opcode() == IrOpcode::kLoadRootRegister) {
+ DCHECK_EQ(m.index(), nullptr);
+ DCHECK_EQ(m.scale(), 0);
+ inputs[(*input_count)++] = UseImmediate(m.displacement());
+ return kMode_Root;
+ } else if ((m.displacement() == nullptr ||
+ CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(),
m.displacement_mode(), inputs, input_count, register_mode);
@@ -714,7 +721,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
: kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
temps);
} else if (is_seqcst) {
@@ -3261,23 +3268,22 @@ void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
}
namespace {
-// pblendvb is a correct implementation for all the various relaxed lane select,
-// see https://github.com/WebAssembly/relaxed-simd/issues/17.
-void VisitRelaxedLaneSelect(InstructionSelector* selector, Node* node) {
+void VisitRelaxedLaneSelect(InstructionSelector* selector, Node* node,
+ InstructionCode code = kIA32Pblendvb) {
IA32OperandGenerator g(selector);
- // pblendvb copies src2 when mask is set, opposite from Wasm semantics.
- // node's inputs are: mask, lhs, rhs (determined in wasm-compiler.cc).
+ // pblendvb/blendvps/blendvpd copies src2 when mask is set, opposite from Wasm
+ // semantics. node's inputs are: mask, lhs, rhs (determined in
+ // wasm-compiler.cc).
if (selector->IsSupported(AVX)) {
- selector->Emit(kIA32Pblendvb, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(2)),
- g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)));
+ selector->Emit(
+ code, g.DefineAsRegister(node), g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
} else {
- // SSE4.1 pblendvb requires xmm0 to hold the mask as an implicit operand.
- selector->Emit(kIA32Pblendvb, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(2)),
- g.UseRegister(node->InputAt(1)),
- g.UseFixed(node->InputAt(0), xmm0));
+ // SSE4.1 pblendvb/blendvps/blendvpd requires xmm0 to hold the mask as an
+ // implicit operand.
+ selector->Emit(
+ code, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(1)), g.UseFixed(node->InputAt(0), xmm0));
}
}
} // namespace
@@ -3289,10 +3295,10 @@ void InstructionSelector::VisitI16x8RelaxedLaneSelect(Node* node) {
VisitRelaxedLaneSelect(this, node);
}
void InstructionSelector::VisitI32x4RelaxedLaneSelect(Node* node) {
- VisitRelaxedLaneSelect(this, node);
+ VisitRelaxedLaneSelect(this, node, kIA32Blendvps);
}
void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
- VisitRelaxedLaneSelect(this, node);
+ VisitRelaxedLaneSelect(this, node, kIA32Blendvpd);
}
void InstructionSelector::VisitF64x2Qfma(Node* node) {
@@ -3317,6 +3323,15 @@ void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
+void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kIA32I32x4DotI8x16I7x16AddS, g.DefineSameAsInput(node, 2),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)), arraysize(temps), temps);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 31dfc864b2..87adf068bb 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -263,7 +263,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum MemoryAccessMode {
kMemoryAccessDirect = 0,
- kMemoryAccessProtected = 1,
+ kMemoryAccessProtectedMemOutOfBounds = 1,
+ kMemoryAccessProtectedNullDereference = 2,
};
enum class AtomicWidth { kWord32, kWord64 };
@@ -278,36 +279,89 @@ inline size_t AtomicWidthSize(AtomicWidth width) {
UNREACHABLE();
}
-// The InstructionCode is an opaque, target-specific integer that encodes
-// what code to emit for an instruction in the code generator. It is not
-// interesting to the register allocator, as the inputs and flags on the
-// instructions specify everything of interest.
+// The InstructionCode is an opaque, target-specific integer that encodes what
+// code to emit for an instruction in the code generator. It is not interesting
+// to the register allocator, as the inputs and flags on the instructions
+// specify everything of interest.
using InstructionCode = uint32_t;
// Helpers for encoding / decoding InstructionCode into the fields needed
-// for code generation. We encode the instruction, addressing mode, and flags
-// continuation into a single InstructionCode which is stored as part of
-// the instruction.
+// for code generation. We encode the instruction, addressing mode, flags, and
+// other information into a single InstructionCode which is stored as part of
+// the instruction. Some fields in the layout of InstructionCode overlap as
+// follows:
+// ArchOpcodeField
+// AddressingModeField
+// FlagsModeField
+// FlagsConditionField
+// AtomicWidthField | RecordWriteModeField | LaneSizeField
+// AtomicMemoryOrderField | | VectorLengthField
+// AtomicStoreRecordWriteModeField | |
+// AccessModeField
+//
+// or,
+//
+// ArchOpcodeField
+// AddressingModeField
+// FlagsModeField
+// FlagsConditionField
+// DeoptImmedArgsCountField | ParamField | MiscField
+// DeoptFrameStateOffsetField | FPParamField |
+//
+// Notably, AccessModeField can follow any of several sequences of fields.
+
using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
"All opcodes must fit in the 9-bit ArchOpcodeField.");
-using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
+using AddressingModeField = ArchOpcodeField::Next<AddressingMode, 5>;
static_assert(
AddressingModeField::is_valid(kLastAddressingMode),
"All addressing modes must fit in the 5-bit AddressingModeField.");
-using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
-using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
-using MiscField = base::BitField<int, 22, 10>;
-
-// {MiscField} is used for a variety of things, depending on the opcode.
-// TODO(turbofan): There should be an abstraction that ensures safe encoding and
-// decoding. {HasMemoryAccessMode} and its uses are a small step in that
-// direction.
+using FlagsModeField = AddressingModeField::Next<FlagsMode, 3>;
+using FlagsConditionField = FlagsModeField::Next<FlagsCondition, 5>;
+
+// AtomicWidthField is used for the various Atomic opcodes. Only used on 64bit
+// architectures. All atomic instructions on 32bit architectures are assumed to
+// be 32bit wide.
+using AtomicWidthField = FlagsConditionField::Next<AtomicWidth, 2>;
+// AtomicMemoryOrderField is used for the various Atomic opcodes. This field is
+// not used on all architectures. It is used on architectures where the codegen
+// for kSeqCst and kAcqRel differ only by emitting fences.
+using AtomicMemoryOrderField = AtomicWidthField::Next<AtomicMemoryOrder, 2>;
+using AtomicStoreRecordWriteModeField =
+ AtomicMemoryOrderField::Next<RecordWriteMode, 4>;
+
+// Write modes for writes with barrier.
+using RecordWriteModeField = FlagsConditionField::Next<RecordWriteMode, 2>;
// LaneSizeField and AccessModeField are helper types to encode/decode a lane
// size, an access mode, or both inside the overlapping MiscField.
-using LaneSizeField = base::BitField<int, 22, 8>;
-using AccessModeField = base::BitField<MemoryAccessMode, 30, 1>;
+#ifdef V8_TARGET_ARCH_X64
+enum LaneSize { kL8 = 0, kL16 = 1, kL32 = 2, kL64 = 3 };
+enum VectorLength { kV128 = 0, kV256 = 1, kV512 = 3 };
+using LaneSizeField = FlagsConditionField::Next<LaneSize, 2>;
+using VectorLengthField = LaneSizeField::Next<VectorLength, 2>;
+#else
+using LaneSizeField = FlagsConditionField::Next<int, 8>;
+#endif // V8_TARGET_ARCH_X64
+
+// Denotes whether the instruction needs to emit an accompanying landing pad for
+// the trap handler.
+using AccessModeField =
+ AtomicStoreRecordWriteModeField::Next<MemoryAccessMode, 2>;
+
+// Since AccessModeField is defined in terms of atomics, this assert ensures it
+// does not overlap with other fields it is used with.
+static_assert(AtomicStoreRecordWriteModeField::kLastUsedBit >=
+ RecordWriteModeField::kLastUsedBit);
+#ifdef V8_TARGET_ARCH_X64
+static_assert(AtomicStoreRecordWriteModeField::kLastUsedBit >=
+ VectorLengthField::kLastUsedBit);
+#else
+static_assert(AtomicStoreRecordWriteModeField::kLastUsedBit >=
+ LaneSizeField::kLastUsedBit);
+#endif
+
// TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
// decoding (in CodeGenerator and InstructionScheduler). Encoding (in
// InstructionSelector) is not yet guarded. There are in fact instructions for
@@ -332,28 +386,21 @@ inline bool HasMemoryAccessMode(ArchOpcode opcode) {
#endif
}
-using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
-using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
-
-// AtomicWidthField overlaps with MiscField and is used for the various Atomic
-// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
-// architectures are assumed to be 32bit wide.
-using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
-
-// AtomicMemoryOrderField overlaps with MiscField and is used for the various
-// Atomic opcodes. This field is not used on all architectures. It is used on
-// architectures where the codegen for kSeqCst and kAcqRel differ only by
-// emitting fences.
-using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
-using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
-
-// ParamField and FPParamField overlap with MiscField, as the latter is never
-// used for Call instructions. These 2 fields represent the general purpose
-// and floating point parameter counts of a direct call into C and are given 5
-// bits each, which allow storing a number up to the current maximum parameter
-// count, which is 20 (see kMaxCParameters defined in macro-assembler.h).
-using ParamField = base::BitField<int, 22, 5>;
-using FPParamField = base::BitField<int, 27, 5>;
+using DeoptImmedArgsCountField = FlagsConditionField::Next<int, 2>;
+using DeoptFrameStateOffsetField = DeoptImmedArgsCountField::Next<int, 8>;
+
+// ParamField and FPParamField represent the general purpose and floating point
+// parameter counts of a direct call into C and are given 5 bits each, which
+// allow storing a number up to the current maximum parameter count, which is 20
+// (see kMaxCParameters defined in macro-assembler.h).
+using ParamField = FlagsConditionField::Next<int, 5>;
+using FPParamField = ParamField::Next<int, 5>;
+
+// {MiscField} is used for a variety of things, depending on the opcode.
+// TODO(turbofan): There should be an abstraction that ensures safe encoding and
+// decoding. {HasMemoryAccessMode} and its uses are a small step in that
+// direction.
+using MiscField = FlagsConditionField::Next<int, 10>;
// This static assertion serves as an early warning if we are about to exhaust
// the available opcode space. If we are about to exhaust it, we should start
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.h b/deps/v8/src/compiler/backend/instruction-scheduler.h
index d4c08a033d..d521149f7e 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.h
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.h
@@ -172,7 +172,7 @@ class InstructionScheduler final : public ZoneObject {
bool CanTrap(const Instruction* instr) const {
return instr->IsTrap() ||
(instr->HasMemoryAccessMode() &&
- instr->memory_access_mode() == kMemoryAccessProtected);
+ instr->memory_access_mode() != kMemoryAccessDirect);
}
// The scheduler will not move the following instructions before the last
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 7f1802f32e..8118e32f97 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -57,8 +57,8 @@ InstructionSelector::InstructionSelector(
continuation_inputs_(sequence->zone()),
continuation_outputs_(sequence->zone()),
continuation_temps_(sequence->zone()),
- defined_(node_count, false, zone),
- used_(node_count, false, zone),
+ defined_(static_cast<int>(node_count), zone),
+ used_(static_cast<int>(node_count), zone),
effect_level_(node_count, 0, zone),
virtual_registers_(node_count,
InstructionOperand::kInvalidVirtualRegister, zone),
@@ -389,16 +389,12 @@ const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
- size_t const id = node->id();
- DCHECK_LT(id, defined_.size());
- return defined_[id];
+ return defined_.Contains(node->id());
}
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
- size_t const id = node->id();
- DCHECK_LT(id, defined_.size());
- defined_[id] = true;
+ defined_.Add(node->id());
}
bool InstructionSelector::IsUsed(Node* node) const {
@@ -407,16 +403,12 @@ bool InstructionSelector::IsUsed(Node* node) const {
// that the Retain is actually emitted, otherwise the GC will mess up.
if (node->opcode() == IrOpcode::kRetain) return true;
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
- size_t const id = node->id();
- DCHECK_LT(id, used_.size());
- return used_[id];
+ return used_.Contains(node->id());
}
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
- size_t const id = node->id();
- DCHECK_LT(id, used_.size());
- used_[id] = true;
+ used_.Add(node->id());
}
int InstructionSelector::GetEffectLevel(Node* node) const {
@@ -459,7 +451,7 @@ bool InstructionSelector::CanAddressRelativeToRootsRegister(
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
// have a fixed root-relative offset? If so, we can ignore 2.
const bool this_root_relative_offset_is_constant =
- TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(),
+ MacroAssemblerBase::IsAddressableThroughRootRegister(isolate(),
reference);
return this_root_relative_offset_is_constant;
}
@@ -1162,7 +1154,9 @@ bool InstructionSelector::IsSourcePositionUsed(Node* node) {
node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless ||
node->opcode() == IrOpcode::kProtectedLoad ||
- node->opcode() == IrOpcode::kProtectedStore);
+ node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kLoadTrapOnNull ||
+ node->opcode() == IrOpcode::kStoreTrapOnNull);
}
void InstructionSelector::VisitBlock(BasicBlock* block) {
@@ -1182,6 +1176,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kStoreTrapOnNull ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
@@ -1473,7 +1468,13 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitLoad(node);
}
case IrOpcode::kLoadTransform: {
- MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ if (params.transformation == LoadTransformation::kS256Load32Splat ||
+ params.transformation == LoadTransformation::kS256Load64Splat) {
+ MarkAsRepresentation(MachineRepresentation::kSimd256, node);
+ } else {
+ MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ }
return VisitLoadTransform(node);
}
case IrOpcode::kLoadLane: {
@@ -1483,6 +1484,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
+ case IrOpcode::kStoreTrapOnNull:
return VisitProtectedStore(node);
case IrOpcode::kStoreLane: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
@@ -1836,6 +1838,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node);
+ case IrOpcode::kLoadRootRegister:
+ return VisitLoadRootRegister(node);
case IrOpcode::kUnalignedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1927,7 +1931,8 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE
- case IrOpcode::kProtectedLoad: {
+ case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
@@ -2378,6 +2383,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8DotI8x16I7x16S(node);
case IrOpcode::kI32x4DotI8x16I7x16AddS:
return MarkAsSimd128(node), VisitI32x4DotI8x16I7x16AddS(node);
+
+ // SIMD256
+#if V8_TARGET_ARCH_X64
+ case IrOpcode::kF32x8Add:
+ return MarkAsSimd256(node), VisitF32x8Add(node);
+ case IrOpcode::kF32x8Sub:
+ return MarkAsSimd256(node), VisitF32x8Sub(node);
+#endif // V8_TARGET_ARCH_X64
default:
FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
node->op()->mnemonic(), node->id());
@@ -2405,6 +2418,11 @@ void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
Emit(kArchParentFramePointer, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitLoadRootRegister(Node* node) {
+ // Do nothing. Following loads/stores from this operator will use kMode_Root
+ // to load/store from an offset of the root register.
+}
+
void InstructionSelector::VisitFloat64Acos(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
}
@@ -2805,29 +2823,6 @@ void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 && \
- !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_RISCV32 && !V8_TARGET_ARCH_RISCV64
-void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
- // && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32 &&
- // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_RISCV32
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
-void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM6 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_ARM64
-void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM6
-
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
@@ -2919,7 +2914,7 @@ void InstructionSelector::VisitProjection(Node* node) {
case IrOpcode::kInt32AbsWithOverflow:
case IrOpcode::kInt64AbsWithOverflow:
if (ProjectionIndexOf(node->op()) == 0u) {
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
} else {
DCHECK_EQ(1u, ProjectionIndexOf(node->op()));
MarkAsUsed(value);
@@ -3125,12 +3120,109 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
+ TryPrepareScheduleFirstProjection(branch->InputAt(0));
+
FlagsContinuation cont =
FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
+// When a DeoptimizeIf/DeoptimizeUnless/Branch depends on a BinopOverflow, the
+// InstructionSelector can sometimes generate a fuse instruction covering both
+// the BinopOverflow and the DeoptIf/Branch, and the final emitted code will
+// look like:
+//
+// r = BinopOverflow
+// jo branch_target/deopt_target
+//
+// When this fusing fails, the final code looks like:
+//
+// r = BinopOverflow
+// o = sete // sets overflow bit
+// cmp o, 0
+// jnz branch_target/deopt_target
+//
+// To be able to fuse tue BinopOverflow and the DeoptIf/Branch, the 1st
+// projection (Projection[0], which contains the actual result) must already be
+// scheduled (and a few other conditions must be satisfied, see
+// InstructionSelectorXXX::VisitWordCompareZero).
+// TryPrepareScheduleFirstProjection is thus called from
+// VisitDeoptimizeIf/VisitDeoptimizeUnless/VisitBranch and detects if the 1st
+// projection could be scheduled now, and, if so, defines it.
+void InstructionSelector::TryPrepareScheduleFirstProjection(
+ Node* const maybe_projection) {
+ if (maybe_projection->opcode() != IrOpcode::kProjection) {
+ // The DeoptimizeIf/DeoptimizeUnless/Branch condition is not a projection.
+ return;
+ }
+
+ if (ProjectionIndexOf(maybe_projection->op()) != 1u) {
+ // The DeoptimizeIf/DeoptimizeUnless/Branch isn't on the Projection[1] (ie,
+ // not on the overflow bit of a BinopOverflow).
+ return;
+ }
+
+ Node* const node = maybe_projection->InputAt(0);
+ if (schedule_->block(node) != current_block_) {
+ // The projection input is not in the current block, so it shouldn't be
+ // emitted now, so we don't need to eagerly schedule its Projection[0].
+ return;
+ }
+
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt32MulWithOverflow:
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kInt64MulWithOverflow: {
+ Node* result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ // No Projection(0), or it's already defined.
+ return;
+ }
+
+ if (schedule_->block(result) != current_block_) {
+ // {result} wasn't planned to be scheduled in {current_block_}. To avoid
+ // adding checks to see if it can still be scheduled now, we just bail
+ // out.
+ return;
+ }
+
+ // Checking if all uses of {result} that are in the current block have
+ // already been Defined.
+ // We also ignore Phi uses: if {result} is used in a Phi in the block in
+ // which it is defined, this means that this block is a loop header, and
+ // {result} back into it through the back edge. In this case, it's normal
+ // to schedule {result} before the Phi that uses it.
+ for (Node* use : result->uses()) {
+ if (IsUsed(use) && !IsDefined(use) &&
+ schedule_->block(use) == current_block_ &&
+ use->opcode() != IrOpcode::kPhi) {
+ return;
+ }
+ }
+
+ // Visiting the projection now. Note that this relies on the fact that
+ // VisitProjection doesn't Emit something: if it did, then we could be
+ // Emitting something after a Branch, which is invalid (Branch can only be
+ // at the end of a block, and the end of a block must always be a block
+ // terminator). (remember that we emit operation in reverse order, so
+ // because we are doing TryPrepareScheduleFirstProjection before actually
+ // emitting the Branch, it would be after in the final instruction
+ // sequence, not before)
+ VisitProjection(result);
+ return;
+ }
+
+ default:
+ return;
+ }
+}
+
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ TryPrepareScheduleFirstProjection(node->InputAt(0));
+
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, p.reason(), node->id(), p.feedback(),
@@ -3139,6 +3231,8 @@ void InstructionSelector::VisitDeoptimizeIf(Node* node) {
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ TryPrepareScheduleFirstProjection(node->InputAt(0));
+
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, p.reason(), node->id(), p.feedback(),
@@ -3165,6 +3259,7 @@ void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
void InstructionSelector::EmitIdentity(Node* node) {
MarkAsUsed(node->InputAt(0));
+ MarkAsDefined(node);
SetRename(node, node->InputAt(0));
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 7cd22ee492..f6a333e864 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -15,6 +15,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/utils/bit-vector.h"
#include "src/zone/zone-containers.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -518,6 +519,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void MarkAsSimd128(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
}
+ void MarkAsSimd256(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kSimd256, node);
+ }
void MarkAsTagged(Node* node) {
MarkAsRepresentation(MachineRepresentation::kTagged, node);
}
@@ -588,7 +592,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
- MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
+ MACHINE_SIMD128_OP_LIST(DECLARE_GENERATOR)
+ MACHINE_SIMD256_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
// Visit the load node with a value and opcode to replace with.
@@ -621,6 +626,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitStaticAssert(Node* node);
void VisitDeadValue(Node* node);
+ void TryPrepareScheduleFirstProjection(Node* maybe_projection);
+
void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont);
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
@@ -736,8 +743,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionOperandVector continuation_inputs_;
InstructionOperandVector continuation_outputs_;
InstructionOperandVector continuation_temps_;
- BoolVector defined_;
- BoolVector used_;
+ BitVector defined_;
+ BitVector used_;
IntVector effect_level_;
int current_effect_level_;
IntVector virtual_registers_;
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 0ec6772947..a1db78f214 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -81,30 +81,38 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
- const bool kCombineFPAliasing = kFPAliasing == AliasingKind::kCombine &&
- this->IsFPLocationOperand() &&
- other.IsFPLocationOperand();
- const bool kComplexS128SlotAliasing =
- (this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
- (other.IsSimd128StackSlot() && this->IsAnyStackSlot());
- if (!kCombineFPAliasing && !kComplexS128SlotAliasing) {
+ const bool combine_fp_aliasing = kFPAliasing == AliasingKind::kCombine &&
+ this->IsFPLocationOperand() &&
+ other.IsFPLocationOperand();
+ const bool stack_slots = this->IsAnyStackSlot() && other.IsAnyStackSlot();
+ if (!combine_fp_aliasing && !stack_slots) {
return EqualsCanonicalized(other);
}
const LocationOperand& loc = *LocationOperand::cast(this);
const LocationOperand& other_loc = LocationOperand::cast(other);
+ MachineRepresentation rep = loc.representation();
+ MachineRepresentation other_rep = other_loc.representation();
LocationOperand::LocationKind kind = loc.location_kind();
LocationOperand::LocationKind other_kind = other_loc.location_kind();
if (kind != other_kind) return false;
- MachineRepresentation rep = loc.representation();
- MachineRepresentation other_rep = other_loc.representation();
- if (kCombineFPAliasing && !kComplexS128SlotAliasing) {
+ if (combine_fp_aliasing && !stack_slots) {
if (rep == other_rep) return EqualsCanonicalized(other);
- if (kind == LocationOperand::REGISTER) {
- // FP register-register interference.
- return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
- other_loc.register_code());
- }
+ DCHECK_EQ(kind, LocationOperand::REGISTER);
+ // FP register-register interference.
+ return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
+ other_loc.register_code());
+ }
+
+ DCHECK(stack_slots);
+ int num_slots =
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(rep));
+ int num_slots_other =
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(other_rep));
+ const bool complex_stack_slot_interference =
+ (num_slots > 1 || num_slots_other > 1);
+ if (!complex_stack_slot_interference) {
+ return EqualsCanonicalized(other);
}
// Complex multi-slot operand interference:
@@ -216,6 +224,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
} else if (op.IsFloatRegister()) {
os << "[" << FloatRegister::from_code(allocated.register_code())
<< "|R";
+#if V8_TARGET_ARCH_X64
+ } else if (op.IsSimd256Register()) {
+ os << "[" << Simd256Register::from_code(allocated.register_code())
+ << "|R";
+#endif // V8_TARGET_ARCH_X64
} else {
DCHECK(op.IsSimd128Register());
os << "[" << Simd128Register::from_code(allocated.register_code())
@@ -573,11 +586,10 @@ Handle<HeapObject> Constant::ToHeapObject() const {
return value;
}
-Handle<CodeT> Constant::ToCode() const {
+Handle<Code> Constant::ToCode() const {
DCHECK_EQ(kHeapObject, type());
- Handle<CodeT> value(
- reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
- DCHECK(value->IsCodeT(GetPtrComprCageBaseSlow(*value)));
+ Handle<Code> value(reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
+ DCHECK(value->IsCode(GetPtrComprCageBaseSlow(*value)));
return value;
}
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index e9f0f9514b..cb889169c3 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -93,12 +93,14 @@ class V8_EXPORT_PRIVATE INSTRUCTION_OPERAND_ALIGN InstructionOperand {
inline bool IsFloatRegister() const;
inline bool IsDoubleRegister() const;
inline bool IsSimd128Register() const;
+ inline bool IsSimd256Register() const;
inline bool IsAnyStackSlot() const;
inline bool IsStackSlot() const;
inline bool IsFPStackSlot() const;
inline bool IsFloatStackSlot() const;
inline bool IsDoubleStackSlot() const;
inline bool IsSimd128StackSlot() const;
+ inline bool IsSimd256StackSlot() const;
template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
@@ -536,6 +538,13 @@ class LocationOperand : public InstructionOperand {
return Simd128Register::from_code(register_code());
}
+#if defined(V8_TARGET_ARCH_X64)
+ Simd256Register GetSimd256Register() const {
+ DCHECK(IsSimd256Register());
+ return Simd256Register::from_code(register_code());
+ }
+#endif
+
LocationKind location_kind() const {
return LocationKindField::decode(value_);
}
@@ -654,6 +663,11 @@ bool InstructionOperand::IsSimd128Register() const {
MachineRepresentation::kSimd128;
}
+bool InstructionOperand::IsSimd256Register() const {
+ return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kSimd256;
+}
+
bool InstructionOperand::IsAnyStackSlot() const {
return IsAnyLocationOperand() &&
LocationOperand::cast(this)->location_kind() ==
@@ -694,6 +708,14 @@ bool InstructionOperand::IsSimd128StackSlot() const {
MachineRepresentation::kSimd128;
}
+bool InstructionOperand::IsSimd256StackSlot() const {
+ return IsAnyLocationOperand() &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kSimd256;
+}
+
uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAnyLocationOperand()) {
MachineRepresentation canonical = MachineRepresentation::kNone;
@@ -736,19 +758,38 @@ class V8_EXPORT_PRIVATE MoveOperands final
const InstructionOperand& destination)
: source_(source), destination_(destination) {
DCHECK(!source.IsInvalid() && !destination.IsInvalid());
+ CheckPointerCompressionConsistency();
}
MoveOperands(const MoveOperands&) = delete;
MoveOperands& operator=(const MoveOperands&) = delete;
+ void CheckPointerCompressionConsistency() {
+#if DEBUG && V8_COMPRESS_POINTERS
+ if (!source_.IsLocationOperand()) return;
+ if (!destination_.IsLocationOperand()) return;
+ using MR = MachineRepresentation;
+ MR dest_rep = LocationOperand::cast(&destination_)->representation();
+ if (dest_rep == MR::kTagged || dest_rep == MR::kTaggedPointer) {
+ MR src_rep = LocationOperand::cast(&source_)->representation();
+ DCHECK_NE(src_rep, MR::kCompressedPointer);
+ DCHECK_NE(src_rep, MR::kCompressed);
+ }
+#endif
+ }
+
const InstructionOperand& source() const { return source_; }
InstructionOperand& source() { return source_; }
- void set_source(const InstructionOperand& operand) { source_ = operand; }
+ void set_source(const InstructionOperand& operand) {
+ source_ = operand;
+ CheckPointerCompressionConsistency();
+ }
const InstructionOperand& destination() const { return destination_; }
InstructionOperand& destination() { return destination_; }
void set_destination(const InstructionOperand& operand) {
destination_ = operand;
+ CheckPointerCompressionConsistency();
}
// The gap resolver marks moves as "in-progress" by clearing the
@@ -798,8 +839,7 @@ class V8_EXPORT_PRIVATE ParallelMove final
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
- Zone* zone = get_allocator().zone();
- return AddMove(from, to, zone);
+ return AddMove(from, to, zone());
}
MoveOperands* AddMove(const InstructionOperand& from,
@@ -1192,7 +1232,7 @@ class V8_EXPORT_PRIVATE Constant final {
}
Handle<HeapObject> ToHeapObject() const;
- Handle<CodeT> ToCode() const;
+ Handle<Code> ToCode() const;
private:
Type type_;
@@ -1578,6 +1618,9 @@ class V8_EXPORT_PRIVATE InstructionBlock final
inline bool IsSwitchTarget() const { return switch_target_; }
inline bool ShouldAlignCodeTarget() const { return code_target_alignment_; }
inline bool ShouldAlignLoopHeader() const { return loop_header_alignment_; }
+ inline bool IsLoopHeaderInAssemblyOrder() const {
+ return loop_header_alignment_;
+ }
using Predecessors = ZoneVector<RpoNumber>;
Predecessors& predecessors() { return predecessors_; }
@@ -1712,7 +1755,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
constexpr int kFPRepMask =
RepresentationBit(MachineRepresentation::kFloat32) |
RepresentationBit(MachineRepresentation::kFloat64) |
- RepresentationBit(MachineRepresentation::kSimd128);
+ RepresentationBit(MachineRepresentation::kSimd128) |
+ RepresentationBit(MachineRepresentation::kSimd256);
return (representation_mask() & kFPRepMask) != 0;
}
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index 9984c3e2b3..a4b955f3a6 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -56,7 +56,7 @@ struct JumpThreadingState {
};
struct GapJumpRecord {
- GapJumpRecord(Zone* zone) : zone_(zone), gap_jump_records_(zone) {}
+ explicit GapJumpRecord(Zone* zone) : zone_(zone), gap_jump_records_(zone) {}
struct Record {
RpoNumber block;
@@ -269,7 +269,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
InstructionSequence* code) {
if (!v8_flags.turbo_jt) return;
- ZoneVector<bool> skip(static_cast<int>(result.size()), false, local_zone);
+ BitVector skip(static_cast<int>(result.size()), local_zone);
// Skip empty blocks when the previous block doesn't fall through.
bool prev_fallthru = true;
@@ -277,7 +277,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
RpoNumber block_rpo = block->rpo_number();
int block_num = block_rpo.ToInt();
RpoNumber result_rpo = result[block_num];
- skip[block_num] = !prev_fallthru && result_rpo != block_rpo;
+ if (!prev_fallthru && result_rpo != block_rpo) skip.Add(block_num);
if (result_rpo != block_rpo) {
// We need the handler information to be propagated, so that branch
@@ -296,7 +296,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp ||
instr->arch_opcode() == kArchRet) {
- if (skip[block_num]) {
+ if (skip.Contains(block_num)) {
// Overwrite a redundant jump with a nop.
TRACE("jt-fw nop @%d\n", i);
instr->OverwriteWithNop();
@@ -334,7 +334,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
int ao = 0;
for (auto const block : code->ao_blocks()) {
block->set_ao_number(RpoNumber::FromInt(ao));
- if (!skip[block->rpo_number().ToInt()]) ao++;
+ if (!skip.Contains(block->rpo_number().ToInt())) ao++;
}
}
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
index 74b25770d4..95cad37d7c 100644
--- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// TODO(LOONG_dev): consider renaming these macros.
#define TRACE_MSG(msg) \
@@ -159,9 +159,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- __ CheckPageFlag(
- value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, exit());
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTagged(value_, value_);
+ }
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
@@ -199,13 +201,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
-#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
class ool_name final : public OutOfLineCode { \
public: \
ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
: OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
\
- void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
+ void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
\
private: \
T const dst_; \
@@ -450,8 +452,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
- UseScratchRegisterScope temps(tasm()); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(masm()); \
Register scratch = temps.Acquire(); \
__ PrepareCallCFunction(0, 2, scratch); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
@@ -459,8 +461,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
- UseScratchRegisterScope temps(tasm()); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(masm()); \
Register scratch = temps.Acquire(); \
__ PrepareCallCFunction(0, 1, scratch); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -487,7 +489,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -495,10 +497,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
+ masm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ masm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -507,19 +509,19 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ ComputeCodeStartAddress(scratch);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
@@ -530,16 +532,16 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ Ld_w(scratch, FieldMemOperand(
- scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ __ LoadTaggedField(scratch,
+ MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Ld_hu(scratch, FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset));
__ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne, scratch, Operand(zero_reg));
@@ -628,21 +630,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check the function's context matches the context argument.
- __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, AbortReason::kWrongFunctionContext, cp, Operand(scratch));
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedField(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchPrepareCallCFunction: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
int const num_gp_parameters = ParamField::decode(instr->opcode());
int const num_fp_parameters = FPParamField::decode(instr->opcode());
@@ -749,7 +752,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -801,39 +804,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: // Fall through.
- case kArchAtomicStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ case kArchStoreWithWriteBarrier: { // Fall through.
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
- Operand offset(zero_reg);
+ Register value = i.InputRegister(2);
+
if (addressing_mode == kMode_MRI) {
- offset = Operand(i.InputInt64(1));
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, Operand(i.InputInt64(1)), value, mode,
+ DetermineStubCallMode());
+ __ StoreTaggedField(value, MemOperand(object, i.InputInt64(1)));
+
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
} else {
DCHECK_EQ(addressing_mode, kMode_MRR);
- offset = Operand(i.InputRegister(1));
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, Operand(i.InputRegister(1)), value, mode,
+ DetermineStubCallMode());
+ __ StoreTaggedField(value, MemOperand(object, i.InputRegister(1)));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
}
+ break;
+ }
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
+ Register object = i.InputRegister(0);
+ int64_t offset = i.InputInt64(1);
Register value = i.InputRegister(2);
auto ool = zone()->New<OutOfLineRecordWrite>(
- this, object, offset, value, mode, DetermineStubCallMode());
- if (arch_opcode == kArchStoreWithWriteBarrier) {
- if (addressing_mode == kMode_MRI) {
- __ St_d(value, MemOperand(object, i.InputInt64(1)));
- } else {
- DCHECK_EQ(addressing_mode, kMode_MRR);
- __ St_d(value, MemOperand(object, i.InputRegister(1)));
- }
- } else {
- DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
- DCHECK_EQ(addressing_mode, kMode_MRI);
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
- __ Add_d(scratch, object, Operand(i.InputInt64(1)));
- __ amswap_db_d(zero_reg, value, scratch);
- }
+ this, object, Operand(offset), value, mode, DetermineStubCallMode());
+ __ AtomicStoreTaggedField(value, MemOperand(object, offset));
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -843,7 +858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchStackSlot: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -1121,7 +1136,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ And(t8, i.InputRegister(0), i.InputOperand(1));
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
- case kLoong64Cmp:
+ case kLoong64Cmp32:
+ case kLoong64Cmp64:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kLoong64Mov:
@@ -1225,8 +1241,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kLoong64Float64Mod: {
// TODO(turbofan): implement directly.
- FrameScope scope(tasm(), StackFrame::MANUAL);
- UseScratchRegisterScope temps(tasm());
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ PrepareCallCFunction(0, 2, scratch);
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
@@ -1363,7 +1379,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
__ movfr2gr_s(i.OutputRegister(), scratch_d);
if (set_overflow_to_min_i32) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
@@ -1392,7 +1408,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kLoong64Float64ToInt64: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg;
@@ -1438,7 +1454,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
__ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
if (set_overflow_to_min_i32) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
@@ -1493,18 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kLoong64Ld_b:
__ Ld_b(i.OutputRegister(), i.MemoryOperand());
break;
- case kLoong64St_b:
- __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand());
+ case kLoong64St_b: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ St_b(i.InputOrZeroRegister(index), mem);
break;
+ }
case kLoong64Ld_hu:
__ Ld_hu(i.OutputRegister(), i.MemoryOperand());
break;
case kLoong64Ld_h:
__ Ld_h(i.OutputRegister(), i.MemoryOperand());
break;
- case kLoong64St_h:
- __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand());
+ case kLoong64St_h: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ St_h(i.InputOrZeroRegister(index), mem);
break;
+ }
case kLoong64Ld_w:
__ Ld_w(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1514,12 +1536,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kLoong64Ld_d:
__ Ld_d(i.OutputRegister(), i.MemoryOperand());
break;
- case kLoong64St_w:
- __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand());
+ case kLoong64St_w: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ St_w(i.InputOrZeroRegister(index), mem);
break;
- case kLoong64St_d:
- __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand());
+ }
+ case kLoong64St_d: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ St_d(i.InputOrZeroRegister(index), mem);
+ break;
+ }
+ case kLoong64LoadDecompressTaggedSigned:
+ __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64LoadDecompressTagged:
+ __ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64StoreCompressTagged: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ StoreTaggedField(i.InputOrZeroRegister(index), mem);
+ break;
+ }
+ case kLoong64AtomicLoadDecompressTaggedSigned:
+ __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64AtomicLoadDecompressTagged:
+ __ AtomicDecompressTagged(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64AtomicStoreCompressTagged: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ AtomicStoreTaggedField(i.InputOrZeroRegister(index), mem);
break;
+ }
case kLoong64Fld_s: {
__ Fld_s(i.OutputSingleRegister(), i.MemoryOperand());
break;
@@ -1539,12 +1591,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kLoong64Fst_d: {
- FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroDoubleRegister(index);
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ Fst_d(ft, i.MemoryOperand());
+ __ Fst_d(ft, operand);
break;
}
case kLoong64Dbar: {
@@ -1634,7 +1688,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
break;
- case kLoong64StoreCompressTagged:
case kLoong64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
break;
@@ -1863,11 +1916,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
-void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+void SignExtend(MacroAssembler* masm, Instruction* instr, Register* left,
+ Operand* right, Register* temp0, Register* temp1) {
+ bool need_signed = false;
+ MachineRepresentation rep_left =
+ LocationOperand::cast(instr->InputAt(0))->representation();
+ need_signed = IsAnyTagged(rep_left) || IsAnyCompressed(rep_left) ||
+ rep_left == MachineRepresentation::kWord64;
+ if (need_signed) {
+ masm->slli_w(*temp0, *left, 0);
+ *left = *temp0;
+ }
+
+ if (instr->InputAt(1)->IsAnyLocationOperand()) {
+ MachineRepresentation rep_right =
+ LocationOperand::cast(instr->InputAt(1))->representation();
+ need_signed = IsAnyTagged(rep_right) || IsAnyCompressed(rep_right) ||
+ rep_right == MachineRepresentation::kWord64;
+ if (need_signed && right->is_reg()) {
+ masm->slli_w(*temp1, right->rm(), 0);
+ *right = Operand(*temp1);
+ }
+ }
+}
+
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ tasm->
+#define __ masm->
Loong64OperandConverter i(gen, instr);
// LOONG64 does not have condition code flags, so compare and branch are
@@ -1882,7 +1959,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
__ Branch(tlabel, cc, t8, Operand(zero_reg));
} else if (instr->arch_opcode() == kLoong64Add_d ||
instr->arch_opcode() == kLoong64Sub_d) {
- UseScratchRegisterScope temps(tasm);
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Condition cc = FlagsConditionToConditionOvf(condition);
@@ -1915,9 +1992,18 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
}
- } else if (instr->arch_opcode() == kLoong64Cmp) {
+ } else if (instr->arch_opcode() == kLoong64Cmp32 ||
+ instr->arch_opcode() == kLoong64Cmp64) {
Condition cc = FlagsConditionToConditionCmp(condition);
- __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ // Word32Compare has two temp registers.
+ if (COMPRESS_POINTERS_BOOL && (instr->arch_opcode() == kLoong64Cmp32)) {
+ Register temp0 = i.TempRegister(0);
+ Register temp1 = i.TempRegister(1);
+ SignExtend(masm, instr, &left, &right, &temp0, &temp1);
+ }
+ __ Branch(tlabel, cc, left, right);
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
Condition cc = FlagsConditionToConditionCmp(condition);
DCHECK((cc == ls) || (cc == hi));
@@ -1941,7 +2027,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ tasm()->
+#define __ masm()->
}
// Assembles branches after an instruction.
@@ -1949,7 +2035,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -2014,7 +2100,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
};
auto ool = zone()->New<OutOfLineTrap>(this, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2041,7 +2127,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kLoong64Add_d ||
instr->arch_opcode() == kLoong64Sub_d) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
Condition cc = FlagsConditionToConditionOvf(condition);
// Check for overflow creates 1 or 0 for result.
@@ -2059,13 +2145,19 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
instr->arch_opcode() == kLoong64MulOvf_d) {
// Overflow occurs if overflow register is not zero
__ Sgtu(result, t8, zero_reg);
- } else if (instr->arch_opcode() == kLoong64Cmp) {
+ } else if (instr->arch_opcode() == kLoong64Cmp32 ||
+ instr->arch_opcode() == kLoong64Cmp64) {
Condition cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
case eq:
case ne: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
+ if (COMPRESS_POINTERS_BOOL && (instr->arch_opcode() == kLoong64Cmp32)) {
+ Register temp0 = i.TempRegister(0);
+ Register temp1 = i.TempRegister(1);
+ SignExtend(masm(), instr, &left, &right, &temp0, &temp1);
+ }
if (instr->InputAt(1)->IsImmediate()) {
if (is_int12(-right.immediate())) {
if (right.immediate() == 0) {
@@ -2103,6 +2195,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case ge: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
+ if (COMPRESS_POINTERS_BOOL && (instr->arch_opcode() == kLoong64Cmp32)) {
+ Register temp0 = i.TempRegister(0);
+ Register temp1 = i.TempRegister(1);
+ SignExtend(masm(), instr, &left, &right, &temp0, &temp1);
+ }
__ Slt(result, left, right);
if (cc == ge) {
__ xori(result, result, 1);
@@ -2112,6 +2209,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case le: {
Register left = i.InputRegister(1);
Operand right = i.InputOperand(0);
+ if (COMPRESS_POINTERS_BOOL && (instr->arch_opcode() == kLoong64Cmp32)) {
+ Register temp0 = i.TempRegister(0);
+ Register temp1 = i.TempRegister(1);
+ SignExtend(masm(), instr, &left, &right, &temp0, &temp1);
+ }
__ Slt(result, left, right);
if (cc == le) {
__ xori(result, result, 1);
@@ -2121,6 +2223,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case hs: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
+ if (COMPRESS_POINTERS_BOOL && (instr->arch_opcode() == kLoong64Cmp32)) {
+ Register temp0 = i.TempRegister(0);
+ Register temp1 = i.TempRegister(1);
+ SignExtend(masm(), instr, &left, &right, &temp0, &temp1);
+ }
__ Sltu(result, left, right);
if (cc == hs) {
__ xori(result, result, 1);
@@ -2130,6 +2237,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case ls: {
Register left = i.InputRegister(1);
Operand right = i.InputOperand(0);
+ if (COMPRESS_POINTERS_BOOL && (instr->arch_opcode() == kLoong64Cmp32)) {
+ Register temp0 = i.TempRegister(0);
+ Register temp1 = i.TempRegister(1);
+ SignExtend(masm(), instr, &left, &right, &temp0, &temp1);
+ }
__ Sltu(result, left, right);
if (cc == ls) {
__ xori(result, result, 1);
@@ -2242,6 +2354,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ Push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -2285,7 +2401,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Ld_d(scratch, FieldMemOperand(
kWasmInstanceRegister,
@@ -2427,11 +2543,71 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ Loong64OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ Push(g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, g.ToMemOperand(source));
+ __ Push(scratch);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ Sub_d(sp, sp, Operand(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ Loong64OperandConverter g(this, nullptr);
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ if (dest->IsRegister()) {
+ __ Pop(g.ToRegister(dest));
+ } else if (dest->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ St_d(scratch, g.ToMemOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ Add_d(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ Add_d(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
- move_cycle_.temps.emplace(tasm());
+ move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they
@@ -2481,27 +2657,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
} else {
// The scratch registers are blocked by pending moves. Use the stack
// instead.
- int new_slots = ElementSizeInPointers(rep);
- Loong64OperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ Push(g.ToRegister(source));
- } else if (source->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.Acquire();
- __ Ld_d(scratch, g.ToMemOperand(source));
- __ Push(scratch);
- } else {
- // No push instruction for this operand type. Bump the stack pointer and
- // assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ Sub_d(sp, sp, Operand(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -2531,25 +2687,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
AssembleMove(&scratch, dest);
}
} else {
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- Loong64OperandConverter g(this, nullptr);
- if (dest->IsRegister()) {
- __ Pop(g.ToRegister(dest));
- } else if (dest->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.Acquire();
- __ Pop(scratch);
- __ St_d(scratch, g.ToMemOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ Add_d(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
// Restore the default state to release the {UseScratchRegisterScope} and to
// prepare for the next cycle.
@@ -2559,7 +2697,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand* src = &move->source();
InstructionOperand* dst = &move->destination();
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) {
Register temp = temps.Acquire();
move_cycle_.scratch_regs.set(temp);
@@ -2616,7 +2754,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ Ld_d(g.ToRegister(destination), src);
} else {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ Ld_d(scratch, src);
__ St_d(scratch, g.ToMemOperand(destination));
@@ -2624,7 +2762,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : scratch;
@@ -2659,8 +2797,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
- case Constant::kCompressedHeapObject:
- UNREACHABLE();
+ case Constant::kCompressedHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ }
+ break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64.
}
@@ -2671,7 +2817,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ St_d(zero_reg, dst);
} else {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ li(scratch, Operand(base::bit_cast<int32_t>(src.ToFloat32())));
__ St_d(scratch, dst);
@@ -2722,7 +2868,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Register-register.
Register src = g.ToRegister(source);
@@ -2744,7 +2890,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// Since the Ld instruction may need a scratch reg,
// we should not use both of the two scratch registers in
// UseScratchRegisterScope here.
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg;
MemOperand src = g.ToMemOperand(source);
@@ -2770,7 +2916,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg;
MemOperand src = g.ToMemOperand(source);
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
index 5ad8096509..e17572ba76 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -12,374 +12,380 @@ namespace compiler {
// LOONG64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Loong64Add_d) \
- V(Loong64Add_w) \
- V(Loong64AddOvf_d) \
- V(Loong64Sub_d) \
- V(Loong64Sub_w) \
- V(Loong64SubOvf_d) \
- V(Loong64Mul_d) \
- V(Loong64MulOvf_w) \
- V(Loong64MulOvf_d) \
- V(Loong64Mulh_d) \
- V(Loong64Mulh_w) \
- V(Loong64Mulh_du) \
- V(Loong64Mulh_wu) \
- V(Loong64Mul_w) \
- V(Loong64Div_d) \
- V(Loong64Div_w) \
- V(Loong64Div_du) \
- V(Loong64Div_wu) \
- V(Loong64Mod_d) \
- V(Loong64Mod_w) \
- V(Loong64Mod_du) \
- V(Loong64Mod_wu) \
- V(Loong64And) \
- V(Loong64And32) \
- V(Loong64Or) \
- V(Loong64Or32) \
- V(Loong64Nor) \
- V(Loong64Nor32) \
- V(Loong64Xor) \
- V(Loong64Xor32) \
- V(Loong64Alsl_d) \
- V(Loong64Alsl_w) \
- V(Loong64Sll_d) \
- V(Loong64Sll_w) \
- V(Loong64Srl_d) \
- V(Loong64Srl_w) \
- V(Loong64Sra_d) \
- V(Loong64Sra_w) \
- V(Loong64Rotr_d) \
- V(Loong64Rotr_w) \
- V(Loong64Bstrpick_d) \
- V(Loong64Bstrpick_w) \
- V(Loong64Bstrins_d) \
- V(Loong64Bstrins_w) \
- V(Loong64ByteSwap64) \
- V(Loong64ByteSwap32) \
- V(Loong64Clz_d) \
- V(Loong64Clz_w) \
- V(Loong64Mov) \
- V(Loong64Tst) \
- V(Loong64Cmp) \
- V(Loong64Float32Cmp) \
- V(Loong64Float32Add) \
- V(Loong64Float32Sub) \
- V(Loong64Float32Mul) \
- V(Loong64Float32Div) \
- V(Loong64Float32Abs) \
- V(Loong64Float32Neg) \
- V(Loong64Float32Sqrt) \
- V(Loong64Float32Max) \
- V(Loong64Float32Min) \
- V(Loong64Float32ToFloat64) \
- V(Loong64Float32RoundDown) \
- V(Loong64Float32RoundUp) \
- V(Loong64Float32RoundTruncate) \
- V(Loong64Float32RoundTiesEven) \
- V(Loong64Float32ToInt32) \
- V(Loong64Float32ToInt64) \
- V(Loong64Float32ToUint32) \
- V(Loong64Float32ToUint64) \
- V(Loong64Float64Cmp) \
- V(Loong64Float64Add) \
- V(Loong64Float64Sub) \
- V(Loong64Float64Mul) \
- V(Loong64Float64Div) \
- V(Loong64Float64Mod) \
- V(Loong64Float64Abs) \
- V(Loong64Float64Neg) \
- V(Loong64Float64Sqrt) \
- V(Loong64Float64Max) \
- V(Loong64Float64Min) \
- V(Loong64Float64ToFloat32) \
- V(Loong64Float64RoundDown) \
- V(Loong64Float64RoundUp) \
- V(Loong64Float64RoundTruncate) \
- V(Loong64Float64RoundTiesEven) \
- V(Loong64Float64ToInt32) \
- V(Loong64Float64ToInt64) \
- V(Loong64Float64ToUint32) \
- V(Loong64Float64ToUint64) \
- V(Loong64Int32ToFloat32) \
- V(Loong64Int32ToFloat64) \
- V(Loong64Int64ToFloat32) \
- V(Loong64Int64ToFloat64) \
- V(Loong64Uint32ToFloat32) \
- V(Loong64Uint32ToFloat64) \
- V(Loong64Uint64ToFloat32) \
- V(Loong64Uint64ToFloat64) \
- V(Loong64Float64ExtractLowWord32) \
- V(Loong64Float64ExtractHighWord32) \
- V(Loong64Float64InsertLowWord32) \
- V(Loong64Float64InsertHighWord32) \
- V(Loong64BitcastDL) \
- V(Loong64BitcastLD) \
- V(Loong64Float64SilenceNaN) \
- V(Loong64Ld_b) \
- V(Loong64Ld_bu) \
- V(Loong64St_b) \
- V(Loong64Ld_h) \
- V(Loong64Ld_hu) \
- V(Loong64St_h) \
- V(Loong64Ld_w) \
- V(Loong64Ld_wu) \
- V(Loong64St_w) \
- V(Loong64Ld_d) \
- V(Loong64St_d) \
- V(Loong64Fld_s) \
- V(Loong64Fst_s) \
- V(Loong64Fld_d) \
- V(Loong64Fst_d) \
- V(Loong64Push) \
- V(Loong64Peek) \
- V(Loong64Poke) \
- V(Loong64StackClaim) \
- V(Loong64Ext_w_b) \
- V(Loong64Ext_w_h) \
- V(Loong64Dbar) \
- V(Loong64S128Const) \
- V(Loong64S128Zero) \
- V(Loong64S128AllOnes) \
- V(Loong64I32x4Splat) \
- V(Loong64I32x4ExtractLane) \
- V(Loong64I32x4ReplaceLane) \
- V(Loong64I32x4Add) \
- V(Loong64I32x4Sub) \
- V(Loong64F64x2Abs) \
- V(Loong64F64x2Neg) \
- V(Loong64F32x4Splat) \
- V(Loong64F32x4ExtractLane) \
- V(Loong64F32x4ReplaceLane) \
- V(Loong64F32x4SConvertI32x4) \
- V(Loong64F32x4UConvertI32x4) \
- V(Loong64I32x4Mul) \
- V(Loong64I32x4MaxS) \
- V(Loong64I32x4MinS) \
- V(Loong64I32x4Eq) \
- V(Loong64I32x4Ne) \
- V(Loong64I32x4Shl) \
- V(Loong64I32x4ShrS) \
- V(Loong64I32x4ShrU) \
- V(Loong64I32x4MaxU) \
- V(Loong64I32x4MinU) \
- V(Loong64F64x2Sqrt) \
- V(Loong64F64x2Add) \
- V(Loong64F64x2Sub) \
- V(Loong64F64x2Mul) \
- V(Loong64F64x2Div) \
- V(Loong64F64x2Min) \
- V(Loong64F64x2Max) \
- V(Loong64F64x2Eq) \
- V(Loong64F64x2Ne) \
- V(Loong64F64x2Lt) \
- V(Loong64F64x2Le) \
- V(Loong64F64x2Splat) \
- V(Loong64F64x2ExtractLane) \
- V(Loong64F64x2ReplaceLane) \
- V(Loong64F64x2Pmin) \
- V(Loong64F64x2Pmax) \
- V(Loong64F64x2Ceil) \
- V(Loong64F64x2Floor) \
- V(Loong64F64x2Trunc) \
- V(Loong64F64x2NearestInt) \
- V(Loong64F64x2ConvertLowI32x4S) \
- V(Loong64F64x2ConvertLowI32x4U) \
- V(Loong64F64x2PromoteLowF32x4) \
- V(Loong64F64x2RelaxedMin) \
- V(Loong64F64x2RelaxedMax) \
- V(Loong64I64x2Splat) \
- V(Loong64I64x2ExtractLane) \
- V(Loong64I64x2ReplaceLane) \
- V(Loong64I64x2Add) \
- V(Loong64I64x2Sub) \
- V(Loong64I64x2Mul) \
- V(Loong64I64x2Neg) \
- V(Loong64I64x2Shl) \
- V(Loong64I64x2ShrS) \
- V(Loong64I64x2ShrU) \
- V(Loong64I64x2BitMask) \
- V(Loong64I64x2Eq) \
- V(Loong64I64x2Ne) \
- V(Loong64I64x2GtS) \
- V(Loong64I64x2GeS) \
- V(Loong64I64x2Abs) \
- V(Loong64I64x2SConvertI32x4Low) \
- V(Loong64I64x2SConvertI32x4High) \
- V(Loong64I64x2UConvertI32x4Low) \
- V(Loong64I64x2UConvertI32x4High) \
- V(Loong64ExtMulLow) \
- V(Loong64ExtMulHigh) \
- V(Loong64ExtAddPairwise) \
- V(Loong64F32x4Abs) \
- V(Loong64F32x4Neg) \
- V(Loong64F32x4Sqrt) \
- V(Loong64F32x4Add) \
- V(Loong64F32x4Sub) \
- V(Loong64F32x4Mul) \
- V(Loong64F32x4Div) \
- V(Loong64F32x4Max) \
- V(Loong64F32x4Min) \
- V(Loong64F32x4Eq) \
- V(Loong64F32x4Ne) \
- V(Loong64F32x4Lt) \
- V(Loong64F32x4Le) \
- V(Loong64F32x4Pmin) \
- V(Loong64F32x4Pmax) \
- V(Loong64F32x4Ceil) \
- V(Loong64F32x4Floor) \
- V(Loong64F32x4Trunc) \
- V(Loong64F32x4NearestInt) \
- V(Loong64F32x4DemoteF64x2Zero) \
- V(Loong64F32x4RelaxedMin) \
- V(Loong64F32x4RelaxedMax) \
- V(Loong64I32x4SConvertF32x4) \
- V(Loong64I32x4UConvertF32x4) \
- V(Loong64I32x4Neg) \
- V(Loong64I32x4GtS) \
- V(Loong64I32x4GeS) \
- V(Loong64I32x4GtU) \
- V(Loong64I32x4GeU) \
- V(Loong64I32x4Abs) \
- V(Loong64I32x4BitMask) \
- V(Loong64I32x4DotI16x8S) \
- V(Loong64I32x4TruncSatF64x2SZero) \
- V(Loong64I32x4TruncSatF64x2UZero) \
- V(Loong64I32x4RelaxedTruncF32x4S) \
- V(Loong64I32x4RelaxedTruncF32x4U) \
- V(Loong64I32x4RelaxedTruncF64x2SZero) \
- V(Loong64I32x4RelaxedTruncF64x2UZero) \
- V(Loong64I16x8Splat) \
- V(Loong64I16x8ExtractLaneU) \
- V(Loong64I16x8ExtractLaneS) \
- V(Loong64I16x8ReplaceLane) \
- V(Loong64I16x8Neg) \
- V(Loong64I16x8Shl) \
- V(Loong64I16x8ShrS) \
- V(Loong64I16x8ShrU) \
- V(Loong64I16x8Add) \
- V(Loong64I16x8AddSatS) \
- V(Loong64I16x8Sub) \
- V(Loong64I16x8SubSatS) \
- V(Loong64I16x8Mul) \
- V(Loong64I16x8MaxS) \
- V(Loong64I16x8MinS) \
- V(Loong64I16x8Eq) \
- V(Loong64I16x8Ne) \
- V(Loong64I16x8GtS) \
- V(Loong64I16x8GeS) \
- V(Loong64I16x8AddSatU) \
- V(Loong64I16x8SubSatU) \
- V(Loong64I16x8MaxU) \
- V(Loong64I16x8MinU) \
- V(Loong64I16x8GtU) \
- V(Loong64I16x8GeU) \
- V(Loong64I16x8RoundingAverageU) \
- V(Loong64I16x8Abs) \
- V(Loong64I16x8BitMask) \
- V(Loong64I16x8Q15MulRSatS) \
- V(Loong64I16x8RelaxedQ15MulRS) \
- V(Loong64I8x16Splat) \
- V(Loong64I8x16ExtractLaneU) \
- V(Loong64I8x16ExtractLaneS) \
- V(Loong64I8x16ReplaceLane) \
- V(Loong64I8x16Neg) \
- V(Loong64I8x16Shl) \
- V(Loong64I8x16ShrS) \
- V(Loong64I8x16Add) \
- V(Loong64I8x16AddSatS) \
- V(Loong64I8x16Sub) \
- V(Loong64I8x16SubSatS) \
- V(Loong64I8x16MaxS) \
- V(Loong64I8x16MinS) \
- V(Loong64I8x16Eq) \
- V(Loong64I8x16Ne) \
- V(Loong64I8x16GtS) \
- V(Loong64I8x16GeS) \
- V(Loong64I8x16ShrU) \
- V(Loong64I8x16AddSatU) \
- V(Loong64I8x16SubSatU) \
- V(Loong64I8x16MaxU) \
- V(Loong64I8x16MinU) \
- V(Loong64I8x16GtU) \
- V(Loong64I8x16GeU) \
- V(Loong64I8x16RoundingAverageU) \
- V(Loong64I8x16Abs) \
- V(Loong64I8x16Popcnt) \
- V(Loong64I8x16BitMask) \
- V(Loong64S128And) \
- V(Loong64S128Or) \
- V(Loong64S128Xor) \
- V(Loong64S128Not) \
- V(Loong64S128Select) \
- V(Loong64S128AndNot) \
- V(Loong64I64x2AllTrue) \
- V(Loong64I32x4AllTrue) \
- V(Loong64I16x8AllTrue) \
- V(Loong64I8x16AllTrue) \
- V(Loong64V128AnyTrue) \
- V(Loong64S32x4InterleaveRight) \
- V(Loong64S32x4InterleaveLeft) \
- V(Loong64S32x4PackEven) \
- V(Loong64S32x4PackOdd) \
- V(Loong64S32x4InterleaveEven) \
- V(Loong64S32x4InterleaveOdd) \
- V(Loong64S32x4Shuffle) \
- V(Loong64S16x8InterleaveRight) \
- V(Loong64S16x8InterleaveLeft) \
- V(Loong64S16x8PackEven) \
- V(Loong64S16x8PackOdd) \
- V(Loong64S16x8InterleaveEven) \
- V(Loong64S16x8InterleaveOdd) \
- V(Loong64S16x4Reverse) \
- V(Loong64S16x2Reverse) \
- V(Loong64S8x16InterleaveRight) \
- V(Loong64S8x16InterleaveLeft) \
- V(Loong64S8x16PackEven) \
- V(Loong64S8x16PackOdd) \
- V(Loong64S8x16InterleaveEven) \
- V(Loong64S8x16InterleaveOdd) \
- V(Loong64I8x16Shuffle) \
- V(Loong64I8x16Swizzle) \
- V(Loong64S8x16Concat) \
- V(Loong64S8x8Reverse) \
- V(Loong64S8x4Reverse) \
- V(Loong64S8x2Reverse) \
- V(Loong64S128LoadSplat) \
- V(Loong64S128Load8x8S) \
- V(Loong64S128Load8x8U) \
- V(Loong64S128Load16x4S) \
- V(Loong64S128Load16x4U) \
- V(Loong64S128Load32x2S) \
- V(Loong64S128Load32x2U) \
- V(Loong64S128Load32Zero) \
- V(Loong64S128Load64Zero) \
- V(Loong64LoadLane) \
- V(Loong64StoreLane) \
- V(Loong64I32x4SConvertI16x8Low) \
- V(Loong64I32x4SConvertI16x8High) \
- V(Loong64I32x4UConvertI16x8Low) \
- V(Loong64I32x4UConvertI16x8High) \
- V(Loong64I16x8SConvertI8x16Low) \
- V(Loong64I16x8SConvertI8x16High) \
- V(Loong64I16x8SConvertI32x4) \
- V(Loong64I16x8UConvertI32x4) \
- V(Loong64I16x8UConvertI8x16Low) \
- V(Loong64I16x8UConvertI8x16High) \
- V(Loong64I8x16SConvertI16x8) \
- V(Loong64I8x16UConvertI16x8) \
- V(Loong64StoreCompressTagged) \
- V(Loong64Word64AtomicLoadUint32) \
- V(Loong64Word64AtomicLoadUint64) \
- V(Loong64Word64AtomicStoreWord64) \
- V(Loong64Word64AtomicAddUint64) \
- V(Loong64Word64AtomicSubUint64) \
- V(Loong64Word64AtomicAndUint64) \
- V(Loong64Word64AtomicOrUint64) \
- V(Loong64Word64AtomicXorUint64) \
- V(Loong64Word64AtomicExchangeUint64) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64MulOvf_d) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_du) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp32) \
+ V(Loong64Cmp64) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64LoadDecompressTaggedSigned) \
+ V(Loong64LoadDecompressTagged) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64F64x2RelaxedMin) \
+ V(Loong64F64x2RelaxedMax) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64F32x4RelaxedMin) \
+ V(Loong64F32x4RelaxedMax) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I32x4RelaxedTruncF32x4S) \
+ V(Loong64I32x4RelaxedTruncF32x4U) \
+ V(Loong64I32x4RelaxedTruncF64x2SZero) \
+ V(Loong64I32x4RelaxedTruncF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I16x8RelaxedQ15MulRS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64AtomicLoadDecompressTaggedSigned) \
+ V(Loong64AtomicLoadDecompressTagged) \
+ V(Loong64AtomicStoreCompressTagged) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
V(Loong64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
index 091c7ad9bc..69b10e29a9 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -41,6 +41,10 @@ class Loong64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ MachineRepresentation GetRepresentation(Node* node) {
+ return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
+ }
+
bool IsIntegerConstant(Node* node) {
return (node->opcode() == IrOpcode::kInt32Constant) ||
(node->opcode() == IrOpcode::kInt64Constant);
@@ -74,6 +78,9 @@ class Loong64OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(int64_t value, InstructionCode opcode) {
switch (ArchOpcodeField::decode(opcode)) {
+ case kLoong64Cmp32:
+ case kLoong64Cmp64:
+ return true;
case kLoong64Sll_w:
case kLoong64Srl_w:
case kLoong64Sra_w:
@@ -360,7 +367,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@@ -373,6 +380,13 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Root),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(index));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(output == nullptr ? node : output),
@@ -458,14 +472,32 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kLoong64Ld_w;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kLoong64LoadDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ opcode = kLoong64LoadDecompressTagged;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kLoong64Ld_d;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kLoong64Ld_wu;
+ break;
+#endif
case MachineRepresentation::kCompressed: // Fall through.
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kLoong64Ld_wu;
+ break;
+#endif
case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone: // Fall through.
@@ -492,14 +524,15 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
- if (v8_flags.enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
+ if (v8_flags.enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
}
// TODO(loong64): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier &&
!v8_flags.disable_write_barriers) {
- DCHECK(CanBeTaggedPointer(rep));
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -518,7 +551,7 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
Emit(code, 0, nullptr, input_count, inputs);
} else {
ArchOpcode opcode;
@@ -539,14 +572,20 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kLoong64St_w;
break;
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64St_d;
+ break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64:
- opcode = kLoong64St_d;
+ opcode = kLoong64StoreCompressTagged;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kLoong64StoreCompressTagged;
+ break;
+#endif
case MachineRepresentation::kSandboxedPointer: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone: // Fall through.
@@ -560,18 +599,25 @@ void InstructionSelector::VisitStore(Node* node) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
if (is_int32(delta)) {
Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
- g.UseImmediate(static_cast<int32_t>(delta)), g.UseImmediate(0),
+ g.UseImmediate(static_cast<int32_t>(delta)),
g.UseRegisterOrImmediateZero(value));
return;
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ // This will only work if {index} is a constant.
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseImmediate(index), g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index),
@@ -804,25 +850,32 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
- Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
- Loong64OperandGenerator g(this);
- uint32_t sar = m.right().ResolvedValue();
- uint32_t shl = mleft.right().ResolvedValue();
- if ((sar == shl) && (sar == 16)) {
- Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
- return;
- } else if ((sar == shl) && (sar == 24)) {
- Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
- return;
- } else if ((sar == shl) && (sar == 32)) {
- Emit(kLoong64Sll_w, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(0));
- return;
+ if (CanCover(node, m.left().node())) {
+ Loong64OperandGenerator g(this);
+ if (m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
}
+ } else if (m.left().IsTruncateInt64ToInt32()) {
+ Emit(kLoong64Sra_w, g.DefineAsRegister(node),
+ g.UseRegister(m.left().InputAt(0)),
+ g.UseOperand(node->InputAt(1), kLoong64Sra_w));
+ return;
}
}
VisitRRO(this, kLoong64Sra_w, node);
@@ -895,6 +948,21 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
void InstructionSelector::VisitWord64Sar(Node* node) {
if (TryEmitExtendingLoad(this, node, node)) return;
+
+ Int64BinopMatcher m(node);
+ if (m.left().IsChangeInt32ToInt64() && m.right().HasResolvedValue() &&
+ is_uint5(m.right().ResolvedValue()) && CanCover(node, m.left().node())) {
+ if ((m.left().InputAt(0)->opcode() != IrOpcode::kLoad &&
+ m.left().InputAt(0)->opcode() != IrOpcode::kLoadImmutable) ||
+ !CanCover(m.left().node(), m.left().InputAt(0))) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sra_w, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+
VisitRRO(this, kLoong64Sra_d, node);
}
@@ -1394,25 +1462,42 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint32(Node* node) {
}
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
- UNIMPLEMENTED();
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ EmitIdentity(node);
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- // On LoongArch64, int32 values should all be sign-extended to 64-bit, so
- // no need to sign-extend them here.
- // But when call to a host function in simulator, if the function return an
- // int32 value, the simulator do not sign-extend to int64, because in
- // simulator we do not know the function whether return an int32 or int64.
-#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kCall) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
Loong64OperandGenerator g(this);
Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
return;
}
-#endif
- EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
@@ -1803,6 +1888,16 @@ namespace {
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
+#ifdef V8_COMPRESS_POINTERS
+ if (opcode == kLoong64Cmp32) {
+ Loong64OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionOperand inputs[] = {left, right};
+ selector->EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs),
+ inputs, arraysize(temps), temps, cont);
+ return;
+ }
+#endif
selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1857,23 +1952,23 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
case kEqual:
case kNotEqual:
if (cont->IsSet()) {
- VisitCompare(selector, opcode, g.UseRegister(left),
+ VisitCompare(selector, opcode, g.UseUniqueRegister(left),
g.UseImmediate(right), cont);
} else {
- VisitCompare(selector, opcode, g.UseRegister(left),
- g.UseRegister(right), cont);
+ VisitCompare(selector, opcode, g.UseUniqueRegister(left),
+ g.UseImmediate(right), cont);
}
break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
- VisitCompare(selector, opcode, g.UseRegister(left),
+ VisitCompare(selector, opcode, g.UseUniqueRegister(left),
g.UseImmediate(right), cont);
break;
default:
- VisitCompare(selector, opcode, g.UseRegister(left),
- g.UseRegister(right), cont);
+ VisitCompare(selector, opcode, g.UseUniqueRegister(left),
+ g.UseUniqueRegister(right), cont);
}
}
} else if (g.CanBeImmediate(left, opcode)) {
@@ -1886,28 +1981,28 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
case kEqual:
case kNotEqual:
if (cont->IsSet()) {
- VisitCompare(selector, opcode, g.UseRegister(right),
+ VisitCompare(selector, opcode, g.UseUniqueRegister(right),
g.UseImmediate(left), cont);
} else {
- VisitCompare(selector, opcode, g.UseRegister(right),
- g.UseRegister(left), cont);
+ VisitCompare(selector, opcode, g.UseUniqueRegister(right),
+ g.UseImmediate(left), cont);
}
break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
case kUnsignedGreaterThanOrEqual:
- VisitCompare(selector, opcode, g.UseRegister(right),
+ VisitCompare(selector, opcode, g.UseUniqueRegister(right),
g.UseImmediate(left), cont);
break;
default:
- VisitCompare(selector, opcode, g.UseRegister(right),
- g.UseRegister(left), cont);
+ VisitCompare(selector, opcode, g.UseUniqueRegister(right),
+ g.UseUniqueRegister(left), cont);
}
}
} else {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
- cont);
+ VisitCompare(selector, opcode, g.UseUniqueRegister(left),
+ g.UseUniqueRegister(right), cont);
}
}
@@ -1947,23 +2042,16 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
// so we need do a full word32 compare in this case.
if (node->InputAt(0)->opcode() == IrOpcode::kCall ||
node->InputAt(1)->opcode() == IrOpcode::kCall) {
- VisitFullWord32Compare(selector, node, kLoong64Cmp, cont);
+ VisitFullWord32Compare(selector, node, kLoong64Cmp64, cont);
return;
}
#endif
- VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont);
+ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp32, cont);
}
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kLoong64Cmp, cont, false);
-}
-
-void EmitWordCompareZero(InstructionSelector* selector, Node* value,
- FlagsContinuation* cont) {
- Loong64OperandGenerator g(selector);
- selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value),
- g.TempImmediate(0), cont);
+ VisitWordCompare(selector, node, kLoong64Cmp64, cont, false);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
@@ -1992,12 +2080,26 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
case MachineRepresentation::kWord64:
code = kLoong64Word64AtomicLoadUint64;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ code = kLoong64AtomicLoadDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ code = kLoong64AtomicLoadDecompressTagged;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
- DCHECK_EQ(kTaggedSize, 8);
code = kLoong64Word64AtomicLoadUint64;
break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ code = kLoong64Word64AtomicLoadUint32;
+ break;
default:
UNREACHABLE();
}
@@ -2045,7 +2147,7 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
code = kArchAtomicStoreWithWriteBarrier;
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
} else {
switch (rep) {
case MachineRepresentation::kWord8:
@@ -2064,8 +2166,14 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
- DCHECK_EQ(kTaggedSize, 8);
- code = kLoong64StoreCompressTagged;
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kLoong64AtomicStoreCompressTagged;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK_EQ(width, AtomicWidth::kWord32);
+ code = kLoong64AtomicStoreCompressTagged;
break;
default:
UNREACHABLE();
@@ -2199,22 +2307,13 @@ void InstructionSelector::VisitStackPointerGreaterThan(
// Shared routine for word comparisons against zero.
void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
FlagsContinuation* cont) {
+ Loong64OperandGenerator g(this);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value)) {
- if (value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (!m.right().Is(0)) break;
- user = value;
- value = m.left().node();
- } else if (value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (!m.right().Is(0)) break;
- user = value;
- value = m.left().node();
- } else {
- break;
- }
-
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
cont->Negate();
}
@@ -2317,7 +2416,8 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Continuation could not be combined with a compare, emit compare against 0.
- EmitWordCompareZero(this, value, cont);
+ VisitCompare(this, kLoong64Cmp32, g.UseRegister(value), g.TempImmediate(0),
+ cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -2439,11 +2539,6 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int64BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWordCompareZero(m.node(), m.left().node(), &cont);
- }
-
VisitWord64Compare(this, node, &cont);
}
@@ -2967,6 +3062,21 @@ void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
VisitS128Select(node);
}
+#define SIMD_UNIMP_OP_LIST(V) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms) \
+ V(I16x8DotI8x16I7x16S) \
+ V(I32x4DotI8x16I7x16AddS)
+
+#define SIMD_VISIT_UNIMP_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { UNIMPLEMENTED(); }
+SIMD_UNIMP_OP_LIST(SIMD_VISIT_UNIMP_OP)
+
+#undef SIMD_VISIT_UNIMP_OP
+#undef SIMD_UNIMP_OP_LIST
+
#if V8_ENABLE_WEBASSEMBLY
namespace {
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index a31e8230cf..5b1ac3963a 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \
@@ -110,6 +110,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -162,10 +165,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
__ Daddu(scratch1_, object_, index_);
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
@@ -464,7 +466,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -475,7 +477,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -504,7 +506,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -512,10 +514,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize);
+ masm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ masm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -524,13 +526,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@@ -545,15 +547,14 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ Lw(kScratchReg,
- FieldMemOperand(kScratchReg,
- CodeDataContainer::kKindSpecificFlagsOffset));
+ __ Lhu(kScratchReg,
+ FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
@@ -575,8 +576,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
- __ Call(reg);
+ __ CallCodeObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -626,8 +626,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(reg);
+ __ JumpCodeObject(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -654,8 +653,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(a2);
+ __ CallCodeObject(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -770,7 +768,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -824,8 +822,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -1294,7 +1291,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64ModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1630,9 +1627,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
break;
- case kMips64Sb:
- __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
+ case kMips64Sb: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Sb(i.InputOrZeroRegister(index), mem);
break;
+ }
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1645,12 +1645,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
- case kMips64Sh:
- __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
+ case kMips64Sh: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Sh(i.InputOrZeroRegister(index), mem);
break;
- case kMips64Ush:
- __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
+ }
+ case kMips64Ush: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Ush(i.InputOrZeroRegister(index), mem, kScratchReg);
break;
+ }
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1669,18 +1675,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
break;
- case kMips64Sw:
- __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ case kMips64Sw: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Sw(i.InputOrZeroRegister(index), mem);
break;
- case kMips64Usw:
- __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ }
+ case kMips64Usw: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Usw(i.InputOrZeroRegister(index), mem);
break;
- case kMips64Sd:
- __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ }
+ case kMips64Sd: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Sd(i.InputOrZeroRegister(index), mem);
break;
- case kMips64Usd:
- __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ }
+ case kMips64Usd: {
+ size_t index = 0;
+ MemOperand mem = i.MemoryOperand(&index);
+ __ Usd(i.InputOrZeroRegister(index), mem);
break;
+ }
case kMips64Lwc1: {
__ Lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
@@ -1716,19 +1734,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kMips64Sdc1: {
- FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroDoubleRegister(index);
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ Sdc1(ft, i.MemoryOperand());
+ __ Sdc1(ft, operand);
break;
}
case kMips64Usdc1: {
- FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroDoubleRegister(index);
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
+ __ Usdc1(ft, operand, kScratchReg);
break;
}
case kMips64Sync: {
@@ -1775,7 +1797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64StoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
if (instr->InputAt(0)->IsSimd128Register()) {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1794,13 +1816,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128LoadSplat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
__ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kMips64S128Load8x8S: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1810,7 +1832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load8x8U: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1819,7 +1841,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load16x4S: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1829,7 +1851,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load16x4U: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1838,7 +1860,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load32x2S: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1848,7 +1870,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load32x2U: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1857,7 +1879,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load32Zero: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(dst, dst, dst);
__ Lwu(kScratchReg, i.MemoryOperand());
@@ -1865,7 +1887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Load64Zero: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(dst, dst, dst);
__ Ld(kScratchReg, i.MemoryOperand());
@@ -1873,7 +1895,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128LoadLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
@@ -1881,7 +1903,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128StoreLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
__ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
@@ -2059,7 +2081,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kMips64S128Const: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2));
@@ -2070,30 +2092,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128Zero: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(dst, dst, dst);
break;
}
case kMips64S128AllOnes: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_d(dst, dst, dst);
break;
}
case kMips64I32x4Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I32x4ExtractLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I32x4ReplaceLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (src != dst) {
@@ -2103,54 +2125,54 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4Add: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Sub: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F64x2Abs: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
break;
}
case kMips64F64x2Neg: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
break;
}
case kMips64F64x2Sqrt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F64x2Add: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d);
break;
}
case kMips64F64x2Sub: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d);
break;
}
case kMips64F64x2Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d);
break;
}
case kMips64F64x2Div: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d);
break;
}
case kMips64F64x2Min: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2173,7 +2195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F64x2Max: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2196,43 +2218,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F64x2Eq: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F64x2Ne: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcune_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F64x2Lt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F64x2Le: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F64x2Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ Move(kScratchReg, i.InputDoubleRegister(0));
__ fill_d(i.OutputSimd128Register(), kScratchReg);
break;
}
case kMips64F64x2ExtractLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_s_d(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
__ Move(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kMips64F64x2ReplaceLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
__ Move(kScratchReg, i.InputDoubleRegister(2));
@@ -2243,18 +2265,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fill_d(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I64x2ExtractLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_s_d(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64F64x2Pmin: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1);
@@ -2264,7 +2286,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F64x2Pmax: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1);
@@ -2274,31 +2296,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F64x2Ceil: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToPlusInf);
break;
}
case kMips64F64x2Floor: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToMinusInf);
break;
}
case kMips64F64x2Trunc: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToZero);
break;
}
case kMips64F64x2NearestInt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToNearest);
break;
}
case kMips64F64x2ConvertLowI32x4S: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
__ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
@@ -2307,19 +2329,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F64x2ConvertLowI32x4U: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
__ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
break;
}
case kMips64F64x2PromoteLowF32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I64x2ReplaceLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (src != dst) {
@@ -2329,32 +2351,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2Add: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I64x2Sub: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I64x2Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I64x2Neg: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_d(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I64x2Shl: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_d(kSimd128ScratchReg, i.InputRegister(1));
__ sll_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2366,7 +2388,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2ShrS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_d(kSimd128ScratchReg, i.InputRegister(1));
__ sra_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2378,7 +2400,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2ShrU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_d(kSimd128ScratchReg, i.InputRegister(1));
__ srl_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2390,7 +2412,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2BitMask: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register scratch0 = kSimd128RegZero;
@@ -2403,13 +2425,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2Eq: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I64x2Ne: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
__ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
@@ -2417,26 +2439,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2GtS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I64x2GeS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I64x2Abs: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
}
case kMips64I64x2SConvertI32x4Low: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_w(kSimd128ScratchReg, src, src);
@@ -2445,7 +2467,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2SConvertI32x4High: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_w(kSimd128ScratchReg, src, src);
@@ -2454,14 +2476,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2UConvertI32x4Low: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I64x2UConvertI32x4High: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
@@ -2486,19 +2508,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
case kMips64F32x4ExtractLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
__ FmoveLow(i.OutputSingleRegister(), kScratchReg);
break;
}
case kMips64F32x4ReplaceLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
__ FmoveLow(kScratchReg, i.InputSingleRegister(2));
@@ -2509,48 +2531,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4SConvertI32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4UConvertI32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MaxS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MinS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Eq: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Ne: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMips64I32x4Shl: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_w(kSimd128ScratchReg, i.InputRegister(1));
__ sll_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2562,7 +2584,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4ShrS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_w(kSimd128ScratchReg, i.InputRegister(1));
__ sra_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2574,7 +2596,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4ShrU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_w(kSimd128ScratchReg, i.InputRegister(1));
__ srl_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2586,26 +2608,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4MaxU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MinU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64S128Select: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
case kMips64S128AndNot: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register scratch = kSimd128ScratchReg,
dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
@@ -2615,41 +2637,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4Abs: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4Neg: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4Add: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Sub: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Div: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Max: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2672,7 +2694,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4Min: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2695,31 +2717,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4Eq: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Ne: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcune_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Lt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Le: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Pmin: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1);
@@ -2729,7 +2751,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4Pmax: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1);
@@ -2739,91 +2761,91 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4Ceil: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToPlusInf);
break;
}
case kMips64F32x4Floor: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToMinusInf);
break;
}
case kMips64F32x4Trunc: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToZero);
break;
}
case kMips64F32x4NearestInt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
kRoundToNearest);
break;
}
case kMips64F32x4DemoteF64x2Zero: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I32x4SConvertF32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4UConvertF32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4Sqrt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4Neg: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I32x4GtS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I32x4GeS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I32x4GtU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I32x4GeU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I32x4Abs: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
}
case kMips64I32x4BitMask: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register scratch0 = kSimd128RegZero;
@@ -2838,13 +2860,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4DotI16x8S: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4TruncSatF64x2SZero: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
__ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
@@ -2853,7 +2875,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4TruncSatF64x2UZero: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
__ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
@@ -2862,24 +2884,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I16x8ExtractLaneU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I16x8ExtractLaneS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I16x8ReplaceLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (src != dst) {
@@ -2889,14 +2911,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8Neg: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8Shl: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_h(kSimd128ScratchReg, i.InputRegister(1));
__ sll_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2908,7 +2930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8ShrS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_h(kSimd128ScratchReg, i.InputRegister(1));
__ sra_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2920,7 +2942,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8ShrU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_h(kSimd128ScratchReg, i.InputRegister(1));
__ srl_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2932,123 +2954,123 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8Add: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8AddSatS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Sub: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8SubSatS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Mul: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MaxS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MinS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Eq: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Ne: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMips64I16x8GtS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I16x8GeS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I16x8AddSatU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8SubSatU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MaxU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MinU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8GtU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I16x8GeU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I16x8RoundingAverageU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I16x8Abs: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
}
case kMips64I16x8BitMask: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register scratch0 = kSimd128RegZero;
@@ -3065,30 +3087,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8Q15MulRSatS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I8x16ExtractLaneU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I8x16ExtractLaneS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I8x16ReplaceLane: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (src != dst) {
@@ -3098,14 +3120,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16Neg: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I8x16Shl: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_b(kSimd128ScratchReg, i.InputRegister(1));
__ sll_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3117,7 +3139,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16ShrS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_b(kSimd128ScratchReg, i.InputRegister(1));
__ sra_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3129,68 +3151,68 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16Add: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16AddSatS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16Sub: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16SubSatS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16MaxS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16MinS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16Eq: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16Ne: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMips64I8x16GtS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I8x16GeS: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I8x16ShrU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (instr->InputAt(1)->IsRegister()) {
__ fill_b(kSimd128ScratchReg, i.InputRegister(1));
__ srl_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3202,61 +3224,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16AddSatU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16SubSatU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16MaxU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16MinU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I8x16GtU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I8x16GeU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I8x16RoundingAverageU: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0));
break;
}
case kMips64I8x16Abs: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
}
case kMips64I8x16Popcnt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I8x16BitMask: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
Simd128Register scratch0 = kSimd128RegZero;
@@ -3274,31 +3296,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S128And: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64S128Or: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64S128Xor: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64S128Not: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(0));
break;
}
case kMips64V128AnyTrue: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
__ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
@@ -3309,7 +3331,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I64x2AllTrue: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
@@ -3320,7 +3342,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4AllTrue: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
@@ -3331,7 +3353,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8AllTrue: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
@@ -3342,7 +3364,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16AllTrue: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
@@ -3353,17 +3375,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64MsaLd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kMips64MsaSt: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ st_b(i.InputSimd128Register(2), i.MemoryOperand());
break;
}
case kMips64S32x4InterleaveRight: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3373,7 +3395,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S32x4InterleaveLeft: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3383,7 +3405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S32x4PackEven: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3393,7 +3415,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S32x4PackOdd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3403,7 +3425,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S32x4InterleaveEven: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3413,7 +3435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S32x4InterleaveOdd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3423,7 +3445,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S32x4Shuffle: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3477,7 +3499,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x8InterleaveRight: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3487,7 +3509,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x8InterleaveLeft: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3497,7 +3519,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x8PackEven: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3507,7 +3529,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x8PackOdd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3517,7 +3539,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x8InterleaveEven: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3527,7 +3549,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x8InterleaveOdd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3537,21 +3559,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S16x4Reverse: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
// src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
// shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
__ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
break;
}
case kMips64S16x2Reverse: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
// src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
// shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
__ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
break;
}
case kMips64S8x16InterleaveRight: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3561,7 +3583,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x16InterleaveLeft: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3571,7 +3593,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x16PackEven: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3581,7 +3603,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x16PackOdd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3591,7 +3613,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x16InterleaveEven: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3601,7 +3623,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x16InterleaveOdd: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3611,14 +3633,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x16Concat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
DCHECK(dst == i.InputSimd128Register(0));
__ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
break;
}
case kMips64I8x16Shuffle: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3654,7 +3676,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x8Reverse: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
// src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
// dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
// [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
@@ -3664,21 +3686,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64S8x4Reverse: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
// src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
// shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
__ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
break;
}
case kMips64S8x2Reverse: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
// src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
// shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
__ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
break;
}
case kMips64I32x4SConvertI16x8Low: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_h(kSimd128ScratchReg, src, src);
@@ -3687,7 +3709,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4SConvertI16x8High: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_h(kSimd128ScratchReg, src, src);
@@ -3696,21 +3718,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4UConvertI16x8Low: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I32x4UConvertI16x8High: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8SConvertI8x16Low: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_b(kSimd128ScratchReg, src, src);
@@ -3719,7 +3741,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8SConvertI8x16High: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_b(kSimd128ScratchReg, src, src);
@@ -3728,7 +3750,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8SConvertI32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -3738,7 +3760,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8UConvertI32x4: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -3751,21 +3773,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8UConvertI8x16Low: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8UConvertI8x16High: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I8x16SConvertI16x8: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -3775,7 +3797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16UConvertI16x8: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -3796,11 +3818,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
-void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ tasm->
+#define __ masm->
MipsOperandConverter i(gen, instr);
// MIPS does not have condition code flags, so compare and branch are
@@ -3871,7 +3893,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ tasm()->
+#define __ masm()->
}
// Assembles branches after an instruction.
@@ -3879,7 +3901,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -3944,7 +3966,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
};
auto ool = zone()->New<OutOfLineTrap>(this, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -3984,7 +4006,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Overflow occurs if overflow register is negative
__ slt(result, kScratchReg, zero_reg);
} else if (instr->arch_opcode() == kMips64MulOvf ||
- instr->arch_opcode() == kMips64MulOvf) {
+ instr->arch_opcode() == kMips64DMulOvf) {
// Overflow occurs if overflow register is not zero
__ Sgtu(result, kScratchReg, zero_reg);
} else if (instr->arch_opcode() == kMips64Cmp) {
@@ -4189,6 +4211,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ Push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -4371,11 +4397,71 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ MipsOperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ Push(g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ Ld(scratch, g.ToMemOperand(source));
+ __ Push(scratch);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ Dsubu(sp, sp, Operand(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ MipsOperandConverter g(this, nullptr);
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ if (dest->IsRegister()) {
+ __ Pop(g.ToRegister(dest));
+ } else if (dest->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ Sd(scratch, g.ToMemOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ Daddu(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ Daddu(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
- move_cycle_.temps.emplace(tasm());
+ move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they
@@ -4399,27 +4485,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
} else {
// The scratch registers are blocked by pending moves. Use the stack
// instead.
- int new_slots = ElementSizeInPointers(rep);
- MipsOperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ Push(g.ToRegister(source));
- } else if (source->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.Acquire();
- __ Ld(scratch, g.ToMemOperand(source));
- __ Push(scratch);
- } else {
- // No push instruction for this operand type. Bump the stack pointer and
- // assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ Dsubu(sp, sp, Operand(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -4431,25 +4497,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
move_cycle_.scratch_reg->code());
AssembleMove(&scratch, dest);
} else {
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- MipsOperandConverter g(this, nullptr);
- if (dest->IsRegister()) {
- __ Pop(g.ToRegister(dest));
- } else if (dest->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.Acquire();
- __ Pop(scratch);
- __ Sd(scratch, g.ToMemOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ Daddu(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
// Restore the default state to release the {UseScratchRegisterScope} and to
// prepare for the next cycle.
@@ -4459,7 +4507,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand* src = &move->source();
InstructionOperand* dst = &move->destination();
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
if (src->IsConstant() && dst->IsFPLocationOperand()) {
Register temp = temps.Acquire();
move_cycle_.scratch_regs.set(temp);
@@ -4578,7 +4626,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
MSARegister src = g.ToSimd128Register(source);
if (destination->IsSimd128Register()) {
MSARegister dst = g.ToSimd128Register(destination);
@@ -4602,7 +4650,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
if (destination->IsSimd128Register()) {
__ ld_b(g.ToSimd128Register(destination), src);
} else {
@@ -4660,7 +4708,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
MSARegister temp = kSimd128ScratchReg;
MSARegister src = g.ToSimd128Register(source);
if (destination->IsSimd128Register()) {
@@ -4700,7 +4748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst1(dst0.rm(), dst0.offset() + kInt64Size);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
MSARegister temp_1 = kSimd128ScratchReg;
__ ld_b(temp_1, dst0); // Save destination in temp_1.
__ Ld(temp_0, src0); // Then use temp_0 to copy source to destination.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 6b6181de59..81e7f77682 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -416,8 +416,9 @@ namespace compiler {
// MRR = [register + register]
// TODO(plind): Add the new r6 address modes.
#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [%rr + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 1d17d4bd58..af0746622f 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -775,7 +775,7 @@ int PrepareForTailCallLatency() {
int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() {
- int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > kSystemPointerSize) {
return 1 + DsubuLatency(false) + AndLatency(false) + 1;
} else {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 8b4398eecb..70d19dc4c1 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -359,6 +359,13 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Root),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(index));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(output == nullptr ? node : output),
@@ -585,6 +592,13 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ // This will only work if {index} is a constant.
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseImmediate(index), g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index),
@@ -1481,21 +1495,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- // On MIPS64, int32 values should all be sign-extended to 64-bit, so
- // no need to sign-extend them here.
- // But when call to a host function in simulator, if the function return an
- // int32 value, the simulator do not sign-extend to int64, because in
- // simulator we do not know the function whether return an int32 or int64.
-#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kCall) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Lw;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
- return;
}
-#endif
- EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
@@ -2287,7 +2313,7 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
code = kArchAtomicStoreWithWriteBarrier;
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
switch (rep) {
@@ -3206,6 +3232,21 @@ void InstructionSelector::VisitS128Select(Node* node) {
VisitRRRR(this, kMips64S128Select, node);
}
+#define SIMD_UNIMP_OP_LIST(V) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms) \
+ V(I16x8DotI8x16I7x16S) \
+ V(I32x4DotI8x16I7x16AddS)
+
+#define SIMD_VISIT_UNIMP_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { UNIMPLEMENTED(); }
+SIMD_UNIMP_OP_LIST(SIMD_VISIT_UNIMP_OP)
+
+#undef SIMD_VISIT_UNIMP_OP
+#undef SIMD_UNIMP_OP_LIST
+
#if V8_ENABLE_WEBASSEMBLY
namespace {
diff --git a/deps/v8/src/compiler/backend/move-optimizer.cc b/deps/v8/src/compiler/backend/move-optimizer.cc
index 8544259027..e901c22819 100644
--- a/deps/v8/src/compiler/backend/move-optimizer.cc
+++ b/deps/v8/src/compiler/backend/move-optimizer.cc
@@ -500,10 +500,36 @@ bool IsSlot(const InstructionOperand& op) {
return op.IsStackSlot() || op.IsFPStackSlot();
}
+bool Is64BitsWide(const InstructionOperand& op) {
+ MachineRepresentation rep = LocationOperand::cast(&op)->representation();
+#if V8_COMPRESS_POINTERS
+ // We can't use {ElementSizeInBytes} because it's made for on-heap object
+ // slots and assumes that kTagged == kCompressed, whereas for the purpose
+ // here we specifically need to distinguish those cases.
+ return (rep == MachineRepresentation::kTagged ||
+ rep == MachineRepresentation::kTaggedPointer ||
+ rep == MachineRepresentation::kWord64);
+#else
+ return rep == MachineRepresentation::kWord64;
+#endif
+}
+
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (!a->source().EqualsCanonicalized(b->source())) {
return a->source().CompareCanonicalized(b->source());
}
+ // The replacements below are only safe if wider values are preferred.
+ // In particular, replacing an uncompressed pointer with a compressed
+ // pointer is disallowed.
+ if (a->destination().IsLocationOperand() &&
+ b->destination().IsLocationOperand()) {
+ if (Is64BitsWide(a->destination()) && !Is64BitsWide(b->destination())) {
+ return true;
+ }
+ if (!Is64BitsWide(a->destination()) && Is64BitsWide(b->destination())) {
+ return false;
+ }
+ }
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
return a->destination().CompareCanonicalized(b->destination());
@@ -538,8 +564,13 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
group_begin = load;
continue;
}
- // Nothing to be gained from splitting here.
- if (IsSlot(group_begin->destination())) continue;
+ // Nothing to be gained from splitting here. However, due to the sorting
+ // scheme, there could be optimizable groups of loads later in the group,
+ // so bump the {group_begin} along.
+ if (IsSlot(group_begin->destination())) {
+ group_begin = load;
+ continue;
+ }
// Insert new move into slot 1.
ParallelMove* slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 31b5a26d6f..a3492faf0f 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
#define kScratchReg r11
@@ -98,6 +98,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case kMode_MRR:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputRegister(index));
}
UNREACHABLE();
}
@@ -170,14 +173,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
if (offset_ == no_reg) {
__ addi(scratch1_, object_, Operand(offset_immediate_));
} else {
@@ -409,7 +411,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define ASSEMBLE_FLOAT_MODULO() \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -422,7 +424,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -435,7 +437,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -448,9 +450,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrp, asm_instrx) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- bool is_atomic = i.InputInt32(2); \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ bool is_atomic = i.InputInt32(index); \
if (mode == kMode_MRI) { \
intptr_t offset = operand.offset(); \
if (is_int16(offset)) { \
@@ -470,9 +473,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
must_be_aligned) \
do { \
Register result = i.OutputRegister(); \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- bool is_atomic = i.InputInt32(2); \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ bool is_atomic = i.InputInt32(index); \
if (mode == kMode_MRI) { \
intptr_t offset = operand.offset(); \
bool misaligned = offset & 3; \
@@ -489,16 +493,17 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_LOAD_INTEGER_RR(asm_instr) \
- do { \
- Register result = i.OutputRegister(); \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- DCHECK_EQ(mode, kMode_MRR); \
- bool is_atomic = i.InputInt32(2); \
- __ asm_instr(result, operand); \
- if (is_atomic) __ lwsync(); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_LOAD_INTEGER_RR(asm_instr) \
+ do { \
+ Register result = i.OutputRegister(); \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DCHECK_EQ(mode, kMode_MRR); \
+ bool is_atomic = i.InputInt32(index); \
+ __ asm_instr(result, operand); \
+ if (is_atomic) __ lwsync(); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrp, asm_instrx) \
@@ -533,7 +538,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
- bool is_atomic = i.InputInt32(3); \
+ bool is_atomic = i.InputInt32(index + 1); \
if (is_atomic) __ lwsync(); \
if (mode == kMode_MRI) { \
intptr_t offset = operand.offset(); \
@@ -558,7 +563,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, &index); \
DCHECK_EQ(mode, kMode_MRR); \
Register value = i.InputRegister(index); \
- bool is_atomic = i.InputInt32(3); \
+ bool is_atomic = i.InputInt32(index + 1); \
if (is_atomic) __ lwsync(); \
__ asm_instr(value, operand); \
if (is_atomic) __ sync(); \
@@ -680,20 +685,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void FlushPendingPushRegisters(TurboAssembler* tasm,
+void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- tasm->Push((*pending_pushes)[0]);
+ masm->Push((*pending_pushes)[0]);
break;
case 2:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -704,7 +709,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
}
void AdjustStackPointerForTailCall(
- TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -712,15 +717,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
+ masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
+ masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -742,7 +747,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- tasm(), frame_access_state(),
+ masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
// Pushes of non-register data types are not supported.
@@ -752,20 +757,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) {
- FlushPendingPushRegisters(tasm(), frame_access_state(),
+ FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes);
}
move->Eliminate();
}
- FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@@ -781,7 +786,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -792,12 +797,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(
- r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
- __ LoadS32(r11,
- FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset),
- r0);
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ __ LoadTaggedField(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset),
+ r0);
+ __ LoadU16(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne, cr0);
@@ -812,7 +815,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (opcode) {
case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- tasm());
+ masm());
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@@ -885,7 +888,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -906,18 +909,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- tasm());
+ masm());
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
__ CmpS64(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadTaggedPointerField(
- r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
+ __ LoadTaggedField(r5, FieldMemOperand(func, JSFunction::kCodeOffset),
+ r0);
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1005,10 +1008,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
+ MacroAssembler::SetIsolateDataSlots::kYes,
has_function_descriptor);
} else {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
+ MacroAssembler::SetIsolateDataSlots::kYes,
has_function_descriptor);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -1060,7 +1065,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -1123,8 +1128,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
@@ -2249,6 +2253,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I16x8SubSatU) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
+ V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
V(I8x16MinS) \
@@ -2264,6 +2269,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I8x16SubSatU) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
+ V(I8x16RoundingAverageU) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
@@ -2299,6 +2305,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I32x4ExtMulHighI16x8S) \
V(I32x4ExtMulLowI16x8U) \
V(I32x4ExtMulHighI16x8U) \
+ V(I32x4DotI16x8S) \
V(I16x8Ne) \
V(I16x8GeS) \
V(I16x8GeU) \
@@ -2306,9 +2313,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I16x8ExtMulHighI8x16S) \
V(I16x8ExtMulLowI8x16U) \
V(I16x8ExtMulHighI8x16U) \
+ V(I16x8Q15MulRSatS) \
+ V(I16x8DotI8x16S) \
V(I8x16Ne) \
V(I8x16GeS) \
- V(I8x16GeU)
+ V(I8x16GeU) \
+ V(I8x16Swizzle)
#define EMIT_SIMD_BINOP_WITH_SCRATCH(name) \
case kPPC_##name: { \
@@ -2351,8 +2361,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(F64x2Ceil) \
V(F64x2Floor) \
V(F64x2Trunc) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
V(I64x2Neg) \
V(I32x4Neg) \
V(F32x4Sqrt) \
@@ -2364,6 +2377,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I64x2SConvertI32x4High) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertF32x4) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I8x16Popcnt) \
@@ -2379,8 +2393,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef SIMD_UNOP_LIST
#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
+ V(F32x4DemoteF64x2Zero) \
V(I64x2Abs) \
V(I32x4Abs) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
V(I16x8Abs) \
V(I16x8Neg) \
V(I8x16Abs) \
@@ -2411,6 +2429,96 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef EMIT_SIMD_ALL_TRUE
#undef SIMD_ALL_TRUE_LIST
+#define SIMD_QFM_LIST(V) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms)
+#define EMIT_SIMD_QFM(name) \
+ case kPPC_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1), i.InputSimd128Register(2), \
+ kScratchSimd128Reg); \
+ break; \
+ }
+ SIMD_QFM_LIST(EMIT_SIMD_QFM)
+#undef EMIT_SIMD_QFM
+#undef SIMD_QFM_LIST
+
+#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U)
+#define EMIT_SIMD_EXT_ADD_PAIRWISE(name) \
+ case kPPC_##name: { \
+ __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ kScratchSimd128Reg, kScratchSimd128Reg2); \
+ break; \
+ }
+ SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
+#undef EMIT_SIMD_EXT_ADD_PAIRWISE
+#undef SIMD_EXT_ADD_PAIRWISE_LIST
+
+#define SIMD_LOAD_LANE_LIST(V) \
+ V(S128Load64Lane, LoadLane64LE) \
+ V(S128Load32Lane, LoadLane32LE) \
+ V(S128Load16Lane, LoadLane16LE) \
+ V(S128Load8Lane, LoadLane8LE)
+
+#define EMIT_SIMD_LOAD_LANE(name, op) \
+ case kPPC_##name: { \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ AddressingMode mode = kMode_None; \
+ size_t index = 1; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DCHECK_EQ(mode, kMode_MRR); \
+ __ op(dst, operand, i.InputUint8(3), kScratchReg, kScratchSimd128Reg); \
+ break; \
+ }
+ SIMD_LOAD_LANE_LIST(EMIT_SIMD_LOAD_LANE)
+#undef EMIT_SIMD_LOAD_LANE
+#undef SIMD_LOAD_LANE_LIST
+
+#define SIMD_STORE_LANE_LIST(V) \
+ V(S128Store64Lane, StoreLane64LE) \
+ V(S128Store32Lane, StoreLane32LE) \
+ V(S128Store16Lane, StoreLane16LE) \
+ V(S128Store8Lane, StoreLane8LE)
+
+#define EMIT_SIMD_STORE_LANE(name, op) \
+ case kPPC_##name: { \
+ AddressingMode mode = kMode_None; \
+ size_t index = 1; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DCHECK_EQ(mode, kMode_MRR); \
+ __ op(i.InputSimd128Register(0), operand, i.InputUint8(3), kScratchReg, \
+ kScratchSimd128Reg); \
+ break; \
+ }
+ SIMD_STORE_LANE_LIST(EMIT_SIMD_STORE_LANE)
+#undef EMIT_SIMD_STORE_LANE
+#undef SIMD_STORE_LANE_LIST
+
+#define SIMD_LOAD_SPLAT(V) \
+ V(S128Load64Splat, LoadAndSplat64x2LE) \
+ V(S128Load32Splat, LoadAndSplat32x4LE) \
+ V(S128Load16Splat, LoadAndSplat16x8LE) \
+ V(S128Load8Splat, LoadAndSplat8x16LE)
+
+#define EMIT_SIMD_LOAD_SPLAT(name, op) \
+ case kPPC_##name: { \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ DCHECK_EQ(mode, kMode_MRR); \
+ __ op(i.OutputSimd128Register(), operand, kScratchReg); \
+ break; \
+ }
+ SIMD_LOAD_SPLAT(EMIT_SIMD_LOAD_SPLAT)
+#undef EMIT_SIMD_LOAD_SPLAT
+#undef SIMD_LOAD_SPLAT
+
case kPPC_F64x2Splat: {
__ F64x2Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0),
kScratchReg);
@@ -2540,9 +2648,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_S128Const: {
uint64_t low = make_uint64(i.InputUint32(1), i.InputUint32(0));
uint64_t high = make_uint64(i.InputUint32(3), i.InputUint32(2));
- __ mov(r0, Operand(low));
- __ mov(ip, Operand(high));
- __ mtvsrdd(i.OutputSimd128Register(), ip, r0);
+ __ S128Const(i.OutputSimd128Register(), high, low, r0, ip);
break;
}
case kPPC_S128Zero: {
@@ -2560,7 +2666,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register mask = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register src2 = i.InputSimd128Register(2);
- __ vsel(dst, src2, src1, mask);
+ __ S128Select(dst, src1, src2, mask);
break;
}
case kPPC_V128AnyTrue: {
@@ -2568,27 +2674,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchSimd128Reg);
break;
}
- case kPPC_I32x4SConvertF32x4: {
- Simd128Register src = i.InputSimd128Register(0);
- // NaN to 0
- __ vor(kScratchSimd128Reg, src, src);
- __ xvcmpeqsp(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ vand(kScratchSimd128Reg, src, kScratchSimd128Reg);
- __ xvcvspsxws(i.OutputSimd128Register(), kScratchSimd128Reg);
- break;
- }
- case kPPC_I32x4UConvertF32x4: {
- __ xvcvspuxws(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
- case kPPC_F32x4SConvertI32x4: {
- __ xvcvsxwsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
- case kPPC_F32x4UConvertI32x4: {
- __ xvcvuxwsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
case kPPC_F64x2ConvertLowI32x4U: {
__ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
i.InputSimd128Register(0), kScratchReg,
@@ -2632,444 +2717,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I8x16Shuffle: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
uint64_t low = make_uint64(i.InputUint32(3), i.InputUint32(2));
uint64_t high = make_uint64(i.InputUint32(5), i.InputUint32(4));
- __ mov(r0, Operand(low));
- __ mov(ip, Operand(high));
- __ mtvsrdd(kScratchSimd128Reg, ip, r0);
- __ vperm(dst, src0, src1, kScratchSimd128Reg);
- break;
- }
- case kPPC_I8x16Swizzle: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Saturate the indices to 5 bits. Input indices more than 31 should
- // return 0.
- __ xxspltib(kScratchSimd128Reg, Operand(31));
- __ vminub(kScratchSimd128Reg, src1, kScratchSimd128Reg);
- // input needs to be reversed.
- __ xxbrq(dst, src0);
- __ vxor(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
- __ vperm(dst, dst, kSimd128RegZero, kScratchSimd128Reg);
- break;
- }
- case kPPC_F64x2Qfma: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vor(kScratchSimd128Reg, src1, src1);
- __ xvmaddmdp(kScratchSimd128Reg, src2, src0);
- __ vor(dst, kScratchSimd128Reg, kScratchSimd128Reg);
- break;
- }
- case kPPC_F64x2Qfms: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vor(kScratchSimd128Reg, src1, src1);
- __ xvnmsubmdp(kScratchSimd128Reg, src2, src0);
- __ vor(dst, kScratchSimd128Reg, kScratchSimd128Reg);
- break;
- }
- case kPPC_F32x4Qfma: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vor(kScratchSimd128Reg, src1, src1);
- __ xvmaddmsp(kScratchSimd128Reg, src2, src0);
- __ vor(dst, kScratchSimd128Reg, kScratchSimd128Reg);
- break;
- }
- case kPPC_F32x4Qfms: {
- Simd128Register src0 = i.InputSimd128Register(0);
- Simd128Register src1 = i.InputSimd128Register(1);
- Simd128Register src2 = i.InputSimd128Register(2);
- Simd128Register dst = i.OutputSimd128Register();
- __ vor(kScratchSimd128Reg, src1, src1);
- __ xvnmsubmsp(kScratchSimd128Reg, src2, src0);
- __ vor(dst, kScratchSimd128Reg, kScratchSimd128Reg);
- break;
- }
- case kPPC_I16x8RoundingAverageU: {
- __ vavguh(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
- case kPPC_I8x16RoundingAverageU: {
- __ vavgub(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ __ I8x16Shuffle(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), high, low, r0, ip,
+ kScratchSimd128Reg);
break;
}
case kPPC_I64x2BitMask: {
- if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
- __ vextractdm(i.OutputRegister(), i.InputSimd128Register(0));
- } else {
- __ mov(kScratchReg,
- Operand(0x8080808080800040)); // Select 0 for the high bits.
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
- }
+ __ I64x2BitMask(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg, kScratchSimd128Reg);
break;
}
case kPPC_I32x4BitMask: {
- if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
- __ vextractwm(i.OutputRegister(), i.InputSimd128Register(0));
- } else {
- __ mov(kScratchReg,
- Operand(0x8080808000204060)); // Select 0 for the high bits.
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
- }
+ __ I32x4BitMask(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg, kScratchSimd128Reg);
break;
}
case kPPC_I16x8BitMask: {
- if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
- __ vextracthm(i.OutputRegister(), i.InputSimd128Register(0));
- } else {
- __ mov(kScratchReg, Operand(0x10203040506070));
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
- }
+ __ I16x8BitMask(i.OutputRegister(), i.InputSimd128Register(0),
+ kScratchReg, kScratchSimd128Reg);
break;
}
case kPPC_I8x16BitMask: {
- if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
- __ vextractbm(i.OutputRegister(), i.InputSimd128Register(0));
- } else {
- __ mov(kScratchReg, Operand(0x8101820283038));
- __ mov(ip, Operand(0x4048505860687078));
- __ mtvsrdd(kScratchSimd128Reg, kScratchReg, ip);
- __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0),
- kScratchSimd128Reg);
- __ vextractuh(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6));
- __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg);
- }
- break;
- }
- case kPPC_I32x4DotI16x8S: {
- __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ vmsumshm(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchSimd128Reg);
- break;
- }
-#define ASSEMBLE_LOAD_TRANSFORM(scratch, load_instr) \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- DCHECK_EQ(mode, kMode_MRR); \
- __ load_instr(scratch, operand);
-#if V8_TARGET_BIG_ENDIAN
-#define MAYBE_REVERSE_BYTES(reg, instr) __ instr(reg, reg);
-#else
-#define MAYBE_REVERSE_BYTES(reg, instr)
-#endif
- case kPPC_S128Load8Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsibzx)
- __ vspltb(dst, kScratchSimd128Reg, Operand(7));
+ __ I8x16BitMask(i.OutputRegister(), i.InputSimd128Register(0), r0, ip,
+ kScratchSimd128Reg);
break;
}
- case kPPC_S128Load16Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsihzx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrh)
- __ vsplth(dst, kScratchSimd128Reg, Operand(3));
- break;
- }
- case kPPC_S128Load32Splat: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsiwzx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
- __ vspltw(dst, kScratchSimd128Reg, Operand(1));
- break;
- }
- case kPPC_S128Load64Splat: {
- constexpr int lane_width_in_bytes = 8;
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(dst, lxsdx)
- MAYBE_REVERSE_BYTES(dst, xxbrd)
- __ vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
+ case kPPC_I32x4DotI8x16AddS: {
+ __ I32x4DotI8x16AddS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
break;
}
+#define PREP_LOAD_EXTEND() \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ DCHECK_EQ(mode, kMode_MRR);
case kPPC_S128Load8x8S: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vupkhsb(dst, kScratchSimd128Reg);
+ PREP_LOAD_EXTEND()
+ __ LoadAndExtend8x8SLE(i.OutputSimd128Register(), operand, kScratchReg);
break;
}
case kPPC_S128Load8x8U: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vupkhsb(dst, kScratchSimd128Reg);
- // Zero extend.
- __ li(ip, Operand(0xFF));
- __ mtvsrd(kScratchSimd128Reg, ip);
- __ vsplth(kScratchSimd128Reg, kScratchSimd128Reg, Operand(3));
- __ vand(dst, kScratchSimd128Reg, dst);
+ PREP_LOAD_EXTEND()
+ __ LoadAndExtend8x8ULE(i.OutputSimd128Register(), operand, kScratchReg,
+ kScratchSimd128Reg);
break;
}
case kPPC_S128Load16x4S: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vupkhsh(dst, kScratchSimd128Reg);
+ PREP_LOAD_EXTEND()
+ __ LoadAndExtend16x4SLE(i.OutputSimd128Register(), operand, kScratchReg);
break;
}
case kPPC_S128Load16x4U: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vupkhsh(dst, kScratchSimd128Reg);
- // Zero extend.
- __ mov(ip, Operand(0xFFFF));
- __ mtvsrd(kScratchSimd128Reg, ip);
- __ vspltw(kScratchSimd128Reg, kScratchSimd128Reg, Operand(1));
- __ vand(dst, kScratchSimd128Reg, dst);
-
+ PREP_LOAD_EXTEND()
+ __ LoadAndExtend16x4ULE(i.OutputSimd128Register(), operand, kScratchReg,
+ kScratchSimd128Reg);
break;
}
case kPPC_S128Load32x2S: {
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vupkhsw(dst, kScratchSimd128Reg);
+ PREP_LOAD_EXTEND()
+ __ LoadAndExtend32x2SLE(i.OutputSimd128Register(), operand, kScratchReg);
break;
}
case kPPC_S128Load32x2U: {
- constexpr int lane_width_in_bytes = 8;
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vupkhsw(dst, kScratchSimd128Reg);
- // Zero extend.
- __ mov(ip, Operand(0xFFFFFFFF));
- __ mtvsrd(kScratchSimd128Reg, ip);
- __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
- Operand(1 * lane_width_in_bytes));
- __ vand(dst, kScratchSimd128Reg, dst);
+ PREP_LOAD_EXTEND()
+ __ LoadAndExtend32x2ULE(i.OutputSimd128Register(), operand, kScratchReg,
+ kScratchSimd128Reg);
break;
}
case kPPC_S128Load32Zero: {
- constexpr int lane_width_in_bytes = 4;
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsiwzx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
- __ vxor(dst, dst, dst);
- __ vinsertw(dst, kScratchSimd128Reg, Operand(3 * lane_width_in_bytes));
+ PREP_LOAD_EXTEND()
+ __ LoadV32ZeroLE(i.OutputSimd128Register(), operand, kScratchReg,
+ kScratchSimd128Reg);
break;
}
case kPPC_S128Load64Zero: {
- constexpr int lane_width_in_bytes = 8;
- Simd128Register dst = i.OutputSimd128Register();
- ASSEMBLE_LOAD_TRANSFORM(kScratchSimd128Reg, lxsdx)
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vxor(dst, dst, dst);
- __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
- break;
- }
-#undef ASSEMBLE_LOAD_TRANSFORM
- case kPPC_S128Load8Lane: {
- Simd128Register dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- AddressingMode mode = kMode_None;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ lxsibzx(kScratchSimd128Reg, operand);
- __ vinsertb(dst, kScratchSimd128Reg, Operand(15 - i.InputUint8(3)));
- break;
- }
- case kPPC_S128Load16Lane: {
- Simd128Register dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- constexpr int lane_width_in_bytes = 2;
- AddressingMode mode = kMode_None;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ lxsihzx(kScratchSimd128Reg, operand);
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrh)
- __ vinserth(dst, kScratchSimd128Reg,
- Operand((7 - i.InputUint8(3)) * lane_width_in_bytes));
- break;
- }
- case kPPC_S128Load32Lane: {
- Simd128Register dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- constexpr int lane_width_in_bytes = 4;
- AddressingMode mode = kMode_None;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ lxsiwzx(kScratchSimd128Reg, operand);
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
- __ vinsertw(dst, kScratchSimd128Reg,
- Operand((3 - i.InputUint8(3)) * lane_width_in_bytes));
- break;
- }
- case kPPC_S128Load64Lane: {
- Simd128Register dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- constexpr int lane_width_in_bytes = 8;
- AddressingMode mode = kMode_None;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ lxsdx(kScratchSimd128Reg, operand);
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ vinsertd(dst, kScratchSimd128Reg,
- Operand((1 - i.InputUint8(3)) * lane_width_in_bytes));
- break;
- }
- case kPPC_S128Store8Lane: {
- AddressingMode mode = kMode_None;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ vextractub(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand(15 - i.InputUint8(3)));
- __ stxsibx(kScratchSimd128Reg, operand);
- break;
- }
- case kPPC_S128Store16Lane: {
- AddressingMode mode = kMode_None;
- constexpr int lane_width_in_bytes = 2;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ vextractuh(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand((7 - i.InputUint8(3)) * lane_width_in_bytes));
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrh)
- __ stxsihx(kScratchSimd128Reg, operand);
- break;
- }
- case kPPC_S128Store32Lane: {
- AddressingMode mode = kMode_None;
- constexpr int lane_width_in_bytes = 4;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ vextractuw(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand((3 - i.InputUint8(3)) * lane_width_in_bytes));
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrw)
- __ stxsiwx(kScratchSimd128Reg, operand);
- break;
- }
- case kPPC_S128Store64Lane: {
- AddressingMode mode = kMode_None;
- constexpr int lane_width_in_bytes = 8;
- size_t index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &index);
- DCHECK_EQ(mode, kMode_MRR);
- __ vextractd(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand((1 - i.InputUint8(3)) * lane_width_in_bytes));
- MAYBE_REVERSE_BYTES(kScratchSimd128Reg, xxbrd)
- __ stxsdx(kScratchSimd128Reg, operand);
- break;
- }
-#undef MAYBE_REVERSE_BYTES
-#define EXT_ADD_PAIRWISE(mul_even, mul_odd, add) \
- __ mul_even(kScratchSimd128Reg2, src, kScratchSimd128Reg); \
- __ mul_odd(kScratchSimd128Reg, src, kScratchSimd128Reg); \
- __ add(dst, kScratchSimd128Reg2, kScratchSimd128Reg);
- case kPPC_I32x4ExtAddPairwiseI16x8S: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vspltish(kScratchSimd128Reg, Operand(1));
- EXT_ADD_PAIRWISE(vmulesh, vmulosh, vadduwm)
- break;
- }
- case kPPC_I32x4ExtAddPairwiseI16x8U: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vspltish(kScratchSimd128Reg, Operand(1));
- EXT_ADD_PAIRWISE(vmuleuh, vmulouh, vadduwm)
- break;
- }
- case kPPC_I16x8ExtAddPairwiseI8x16S: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ xxspltib(kScratchSimd128Reg, Operand(1));
- EXT_ADD_PAIRWISE(vmulesb, vmulosb, vadduhm)
- break;
- }
- case kPPC_I16x8ExtAddPairwiseI8x16U: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ xxspltib(kScratchSimd128Reg, Operand(1));
- EXT_ADD_PAIRWISE(vmuleub, vmuloub, vadduhm)
- break;
- }
-#undef EXT_ADD_PAIRWISE
- case kPPC_I16x8Q15MulRSatS: {
- __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ vmhraddshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchSimd128Reg);
- break;
- }
- case kPPC_F64x2PromoteLowF32x4: {
- constexpr int lane_number = 8;
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ vextractd(kScratchSimd128Reg, src, Operand(lane_number));
- __ vinsertw(kScratchSimd128Reg, kScratchSimd128Reg, Operand(lane_number));
- __ xvcvspdp(dst, kScratchSimd128Reg);
- break;
- }
- case kPPC_F32x4DemoteF64x2Zero: {
- constexpr int lane_number = 8;
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ xvcvdpsp(kScratchSimd128Reg, src);
- __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number));
- __ vinsertw(kScratchSimd128Reg, dst, Operand(4));
- __ vxor(dst, dst, dst);
- __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number));
- break;
- }
- case kPPC_I32x4TruncSatF64x2SZero: {
- constexpr int lane_number = 8;
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- // NaN to 0.
- __ vor(kScratchSimd128Reg, src, src);
- __ xvcmpeqdp(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
- __ vand(kScratchSimd128Reg, src, kScratchSimd128Reg);
- __ xvcvdpsxws(kScratchSimd128Reg, kScratchSimd128Reg);
- __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number));
- __ vinsertw(kScratchSimd128Reg, dst, Operand(4));
- __ vxor(dst, dst, dst);
- __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number));
- break;
- }
- case kPPC_I32x4TruncSatF64x2UZero: {
- constexpr int lane_number = 8;
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- __ xvcvdpuxws(kScratchSimd128Reg, src);
- __ vextractuw(dst, kScratchSimd128Reg, Operand(lane_number));
- __ vinsertw(kScratchSimd128Reg, dst, Operand(4));
- __ vxor(dst, dst, dst);
- __ vinsertd(dst, kScratchSimd128Reg, Operand(lane_number));
+ PREP_LOAD_EXTEND()
+ __ LoadV64ZeroLE(i.OutputSimd128Register(), operand, kScratchReg,
+ kScratchSimd128Reg);
break;
}
+#undef PREP_LOAD_EXTEND
case kPPC_StoreCompressTagged: {
size_t index = 0;
AddressingMode mode = kMode_None;
@@ -3087,16 +2817,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
break;
}
- case kPPC_LoadDecompressTaggedPointer: {
- CHECK(instr->HasOutput());
- ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
- __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
- break;
- }
- case kPPC_LoadDecompressAnyTagged: {
+ case kPPC_LoadDecompressTagged: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
- __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
+ __ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
break;
}
default:
@@ -3358,6 +3082,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ Push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -3523,7 +3251,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
AssembleDeconstructFrame();
}
// Constant pool is unavailable since the frame has been destructed
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver).
// The number of arguments without the receiver is
@@ -3537,8 +3265,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
}
- __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
@@ -3566,37 +3294,79 @@ void CodeGenerator::PrepareForDeoptimizationExits(
__ CheckTrampolinePoolQuick(total_size);
}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
- // Must be kept in sync with {MoveTempLocationTo}.
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ PPCOperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
+ __ LoadU64(r0, g.ToMemOperand(source), r0);
+ __ Push(r0);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // Bump the stack pointer and assemble the move.
+ __ addi(sp, sp, Operand(-(new_slots * kSystemPointerSize)));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ PPCOperandConverter g(this, nullptr);
+ if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ StoreU64(scratch, g.ToMemOperand(dest), r0);
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ addi(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ addi(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
+ // Must be kept in sync with {MoveTempLocationTo}.
if (!IsFloatingPoint(rep) ||
((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use))) {
// The scratch register for this rep is available.
- int scratch_reg_code =
- !IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code();
+ int scratch_reg_code;
+ if (IsSimd128(rep)) {
+ scratch_reg_code = kScratchSimd128Reg.code();
+ } else if (IsFloatingPoint(rep)) {
+ scratch_reg_code = kScratchDoubleReg.code();
+ } else {
+ scratch_reg_code = kScratchReg.code();
+ }
AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
DCHECK(!AreAliased(kScratchReg, r0, ip));
AssembleMove(source, &scratch);
} else {
- DCHECK(!source->IsRegister() && !source->IsStackSlot());
// The scratch register is blocked by pending moves. Use the stack instead.
- int new_slots = ElementSizeInPointers(rep);
- PPCOperandConverter g(this, nullptr);
- if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
- __ LoadU64(r0, g.ToMemOperand(source), r0);
- __ Push(r0);
- } else {
- // Bump the stack pointer and assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ addi(sp, sp, Operand(-(new_slots * kSystemPointerSize)));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -3611,34 +3381,14 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
DCHECK(!AreAliased(kScratchReg, r0, ip));
AssembleMove(&scratch, dest);
} else {
- DCHECK(!dest->IsRegister() && !dest->IsStackSlot());
- PPCOperandConverter g(this, nullptr);
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
- __ Pop(scratch);
- __ StoreU64(scratch, g.ToMemOperand(dest), r0);
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ addi(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
move_cycle_ = MoveCycleState();
}
void CodeGenerator::SetPendingMove(MoveOperands* move) {
- if (move->source().IsFPStackSlot() && !move->destination().IsFPRegister()) {
- move_cycle_.pending_double_scratch_register_use = true;
- } else if (move->source().IsConstant() &&
- (move->destination().IsDoubleStackSlot() ||
- move->destination().IsFloatStackSlot())) {
+ if ((move->source().IsConstant() || move->source().IsFPStackSlot()) &&
+ !move->destination().IsFPRegister()) {
move_cycle_.pending_double_scratch_register_use = true;
}
}
@@ -3714,7 +3464,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ __ LoadTaggedRoot(dst, index);
} else {
// TODO(v8:7703, jyan@ca.ibm.com): Turn into a
// COMPRESSED_EMBEDDED_OBJECT when the constant pool entry size is
@@ -3771,7 +3521,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
DCHECK(destination->IsSimd128StackSlot());
MemOperand dst = g.ToMemOperand(destination);
- __ StoreSimd128(g.ToSimd128Register(source), dst, kScratchReg);
+ __ StoreSimd128(g.ToSimd128Register(source), dst, r0);
}
} else {
DoubleRegister src = g.ToDoubleRegister(source);
@@ -3800,7 +3550,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
MemOperand src = g.ToMemOperand(source);
- __ LoadSimd128(g.ToSimd128Register(destination), src, kScratchReg);
+ __ LoadSimd128(g.ToSimd128Register(destination), src, r0);
}
} else {
LocationOperand* op = LocationOperand::cast(source);
@@ -3815,8 +3565,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
- __ LoadSimd128(kScratchSimd128Reg, src, kScratchReg);
- __ StoreSimd128(kScratchSimd128Reg, dst, kScratchReg);
+ __ LoadSimd128(kScratchSimd128Reg, src, r0);
+ __ StoreSimd128(kScratchSimd128Reg, dst, r0);
}
}
} else {
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 5710aa313d..a893678bb3 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -302,6 +302,7 @@ namespace compiler {
V(PPC_I32x4ExtMulHighI16x8U) \
V(PPC_I32x4TruncSatF64x2SZero) \
V(PPC_I32x4TruncSatF64x2UZero) \
+ V(PPC_I32x4DotI8x16AddS) \
V(PPC_I16x8Splat) \
V(PPC_I16x8ExtractLaneU) \
V(PPC_I16x8ExtractLaneS) \
@@ -343,6 +344,7 @@ namespace compiler {
V(PPC_I16x8ExtMulHighI8x16S) \
V(PPC_I16x8ExtMulLowI8x16U) \
V(PPC_I16x8ExtMulHighI8x16U) \
+ V(PPC_I16x8DotI8x16S) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
@@ -411,8 +413,7 @@ namespace compiler {
V(PPC_S128Store64Lane) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
- V(PPC_LoadDecompressTaggedPointer) \
- V(PPC_LoadDecompressAnyTagged)
+ V(PPC_LoadDecompressTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
@@ -428,8 +429,9 @@ namespace compiler {
// MRI = [register + immediate]
// MRR = [register + register]
#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [%rr + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index e1d195f253..22f78af268 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -227,6 +227,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4ExtMulHighI16x8U:
case kPPC_I32x4TruncSatF64x2SZero:
case kPPC_I32x4TruncSatF64x2UZero:
+ case kPPC_I32x4DotI8x16AddS:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
@@ -268,6 +269,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8ExtMulHighI8x16S:
case kPPC_I16x8ExtMulLowI8x16U:
case kPPC_I16x8ExtMulHighI8x16U:
+ case kPPC_I16x8DotI8x16S:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
@@ -331,8 +333,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadSimd128:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
- case kPPC_LoadDecompressTaggedPointer:
- case kPPC_LoadDecompressAnyTagged:
+ case kPPC_LoadDecompressTagged:
case kPPC_S128Load8Splat:
case kPPC_S128Load16Splat:
case kPPC_S128Load32Splat:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index a5069d22fd..87be6d6c52 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -214,10 +214,10 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
opcode = kPPC_LoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
- opcode = kPPC_LoadDecompressTaggedPointer;
+ opcode = kPPC_LoadDecompressTagged;
break;
case MachineRepresentation::kTagged:
- opcode = kPPC_LoadDecompressAnyTagged;
+ opcode = kPPC_LoadDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
@@ -242,7 +242,11 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad);
- if (g.CanBeImmediate(offset, mode)) {
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ selector->Emit(opcode |= AddressingModeField::encode(kMode_Root),
+ g.DefineAsRegister(node), g.UseRegister(offset),
+ g.UseImmediate(is_atomic));
+ } else if (g.CanBeImmediate(offset, mode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(offset), g.UseImmediate(is_atomic));
@@ -318,7 +322,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
CHECK_EQ(is_atomic, false);
selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
@@ -387,7 +391,11 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
UNREACHABLE();
}
- if (g.CanBeImmediate(offset, mode)) {
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Root),
+ g.NoOutput(), g.UseRegister(offset), g.UseRegister(value),
+ g.UseImmediate(is_atomic));
+ } else if (g.CanBeImmediate(offset, mode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(offset),
g.UseRegister(value), g.UseImmediate(is_atomic));
@@ -2613,6 +2621,21 @@ void InstructionSelector::VisitS128Const(Node* node) {
}
}
+void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_I16x8DotI8x16S, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_I32x4DotI8x16AddS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)));
+}
+
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index c0c9db2bb1..f643cec765 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -1424,7 +1424,9 @@ bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
PrintF("Register allocator error: live v%d reached first block.\n",
operand_index);
LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
- PrintF(" (first use is at %d)\n", range->first_pos()->pos().value());
+ PrintF(" (first use is at position %d in instruction %d)\n",
+ range->first_pos()->pos().value(),
+ range->first_pos()->pos().ToInstructionIndex());
if (debug_name() == nullptr) {
PrintF("\n");
} else {
@@ -2606,6 +2608,8 @@ void LiveRangeBuilder::Verify() const {
for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
// Except for the first interval, the other intevals must start at
// a block boundary, otherwise data wouldn't flow to them.
+ // You might trigger this CHECK if your SSA is not valid. For instance,
+ // if the inputs of a Phi node are in the wrong order.
CHECK(IntervalStartsAtBlockBoundary(i));
// The last instruction of the predecessors of the block the interval
// starts must be covered by the range.
diff --git a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc
index 0f227a3fc6..351ac43302 100644
--- a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc
+++ b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc
@@ -19,7 +19,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \
@@ -173,13 +173,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
#if V8_TARGET_ARCH_RISCV64
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
#endif
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
__ AddWord(scratch1_, object_, index_);
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
@@ -334,7 +333,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
__ PrepareCallCFunction(3, 0, kScratchReg); \
@@ -344,7 +343,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
__ PrepareCallCFunction(3, 0, kScratchReg); \
@@ -473,7 +472,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -484,7 +483,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -582,7 +581,7 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -590,10 +589,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize);
+ masm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ masm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -602,13 +601,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@@ -623,16 +622,15 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(
- kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ __ LoadTaggedField(kScratchReg,
+ MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(kScratchReg,
- FieldMemOperand(kScratchReg,
- CodeDataContainer::kKindSpecificFlagsOffset));
+ FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
@@ -723,14 +721,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputOrZeroRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
- __ LoadTaggedPointerField(
- kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ LoadTaggedPointerField(a2,
- FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedField(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -830,7 +827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -876,8 +873,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -1296,7 +1292,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1426,7 +1422,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1721,7 +1717,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Lb(i.OutputRegister(), i.MemoryOperand());
break;
case kRiscvSb:
- __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sb(i.InputOrZeroRegister(0), i.MemoryOperand(1));
break;
case kRiscvLhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
@@ -1736,7 +1732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kRiscvSh:
- __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sh(i.InputOrZeroRegister(0), i.MemoryOperand(1));
break;
case kRiscvUsh:
__ Ush(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1761,14 +1757,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Uld(i.OutputRegister(), i.MemoryOperand());
break;
case kRiscvSd:
- __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sd(i.InputOrZeroRegister(0), i.MemoryOperand(1));
break;
case kRiscvUsd:
__ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
#endif
case kRiscvSw:
- __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sw(i.InputOrZeroRegister(0), i.MemoryOperand(1));
break;
case kRiscvUsw:
__ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1782,9 +1778,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvStoreFloat: {
- size_t index = 0;
- MemOperand operand = i.MemoryOperand(&index);
- FPURegister ft = i.InputOrZeroSingleRegister(index);
+ MemOperand operand = i.MemoryOperand(1);
+ FPURegister ft = i.InputOrZeroSingleRegister(0);
if (ft == kSingleRegZero && !__ IsSingleZeroRegSet()) {
__ LoadFPRImmediate(kSingleRegZero, 0.0f);
}
@@ -1808,11 +1803,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kRiscvStoreDouble: {
- FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ FPURegister ft = i.InputOrZeroDoubleRegister(0);
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0);
}
- __ StoreDouble(ft, i.MemoryOperand());
+ __ StoreDouble(ft, i.MemoryOperand(1));
break;
}
case kRiscvUStoreDouble: {
@@ -1941,7 +1936,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#if V8_TARGET_ARCH_RISCV32
case kRiscvWord32AtomicPairLoad: {
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1));
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1);
__ PrepareCallCFunction(1, 0, kScratchReg);
@@ -1950,7 +1945,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvWord32AtomicPairStore: {
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1));
__ PushCallerSaved(SaveFPRegsMode::kIgnore);
__ PrepareCallCFunction(3, 0, kScratchReg);
@@ -1973,7 +1968,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
case kRiscvWord32AtomicPairExchange: {
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1);
__ PrepareCallCFunction(3, 0, kScratchReg);
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1));
@@ -1983,7 +1978,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvWord32AtomicPairCompareExchange: {
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1);
__ PrepareCallCFunction(5, 0, kScratchReg);
__ add(a0, i.InputRegister(0), i.InputRegister(1));
@@ -2183,9 +2178,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#if V8_TARGET_ARCH_RISCV64
case kRiscvStoreCompressTagged: {
- size_t index = 0;
- MemOperand operand = i.MemoryOperand(&index);
- __ StoreTaggedField(i.InputOrZeroRegister(index), operand);
+ MemOperand operand = i.MemoryOperand(1);
+ __ StoreTaggedField(i.InputOrZeroRegister(0), operand);
break;
}
case kRiscvLoadDecompressTaggedSigned: {
@@ -2195,29 +2189,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressTaggedSigned(result, operand);
break;
}
- case kRiscvLoadDecompressTaggedPointer: {
- CHECK(instr->HasOutput());
- Register result = i.OutputRegister();
- MemOperand operand = i.MemoryOperand();
- __ DecompressTaggedPointer(result, operand);
- break;
- }
- case kRiscvLoadDecompressAnyTagged: {
+ case kRiscvLoadDecompressTagged: {
CHECK(instr->HasOutput());
Register result = i.OutputRegister();
MemOperand operand = i.MemoryOperand();
- __ DecompressAnyTagged(result, operand);
+ __ DecompressTagged(result, operand);
break;
}
#endif
case kRiscvRvvSt: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- Register dst = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
- : kScratchReg;
- if (i.MemoryOperand().offset() != 0) {
- __ AddWord(dst, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ auto memOperand = i.MemoryOperand(1);
+ Register dst = memOperand.offset() == 0 ? memOperand.rm() : kScratchReg;
+ if (memOperand.offset() != 0) {
+ __ AddWord(dst, memOperand.rm(), memOperand.offset());
}
- __ vs(i.InputSimd128Register(2), dst, 0, VSew::E8);
+ __ vs(i.InputSimd128Register(0), dst, 0, VSew::E8);
break;
}
case kRiscvRvvLd: {
@@ -2377,6 +2364,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ VU.set(kScratchReg, E64, m1);
__ li(kScratchReg, i.InputInt64(1));
+ __ vmv_vi(kSimd128ScratchReg3, -1);
__ vmv_sx(kSimd128ScratchReg3, kScratchReg);
index = kSimd128ScratchReg3;
}
@@ -2915,6 +2903,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
__ VU.set(kScratchReg, E8, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
__ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
__ VU.set(kScratchReg, E32, m1);
__ vmv_xs(dst, kSimd128ScratchReg);
@@ -2925,6 +2914,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
__ VU.set(kScratchReg, E16, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
__ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
__ VU.set(kScratchReg, E32, m1);
__ vmv_xs(dst, kSimd128ScratchReg);
@@ -2935,6 +2925,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
__ VU.set(kScratchReg, E32, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
__ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
__ vmv_xs(dst, kSimd128ScratchReg);
break;
@@ -2944,6 +2935,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
__ VU.set(kScratchReg, E64, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
__ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
__ VU.set(kScratchReg, E32, m1);
__ vmv_xs(dst, kSimd128ScratchReg);
@@ -3264,16 +3256,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kRiscvF64x2Qfma: {
__ VU.set(kScratchReg, E64, m1);
- __ vfmadd_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
- i.InputSimd128Register(0));
- __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vfmadd_vv(i.InputSimd128Register(0), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF64x2Qfms: {
__ VU.set(kScratchReg, E64, m1);
- __ vfnmsub_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
- i.InputSimd128Register(0));
- __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vfnmsub_vv(i.InputSimd128Register(0), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF32x4ExtractLane: {
@@ -3452,16 +3444,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kRiscvF32x4Qfma: {
__ VU.set(kScratchReg, E32, m1);
- __ vfmadd_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
- i.InputSimd128Register(0));
- __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vfmadd_vv(i.InputSimd128Register(0), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF32x4Qfms: {
__ VU.set(kScratchReg, E32, m1);
- __ vfnmsub_vv(i.InputSimd128Register(1), i.InputSimd128Register(2),
- i.InputSimd128Register(0));
- __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vfnmsub_vv(i.InputSimd128Register(0), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvI64x2SConvertI32x4Low: {
@@ -3712,11 +3704,11 @@ bool IsInludeEqual(Condition cc) {
}
}
-void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ tasm->
+#define __ masm->
RiscvOperandConverter i(gen, instr);
// RISC-V does not have condition code flags, so compare and branch are
@@ -3807,7 +3799,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ tasm()->
+#define __ masm()->
}
// Assembles branches after an instruction.
@@ -3815,7 +3807,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -3879,7 +3871,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
};
auto ool = zone()->New<OutOfLineTrap>(this, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
}
// Assembles boolean materializations after an instruction.
@@ -4370,98 +4362,79 @@ void CodeGenerator::PrepareForDeoptimizationExits(
__ CheckTrampolinePoolQuick(total_size);
}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
- if ((IsFloatingPoint(rep) &&
- !move_cycle_.pending_double_scratch_register_use) ||
- (!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) {
- // The scratch register for this rep is available.
- int scratch_reg_code =
- !IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code();
- AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
+ move_cycle_.temps.emplace(masm());
+ auto& temps = *move_cycle_.temps;
+ // Temporarily exclude the reserved scratch registers while we pick one to
+ // resolve the move cycle. Re-include them immediately afterwards as they
+ // might be needed for the move to the temp location.
+ temps.Exclude(move_cycle_.scratch_regs);
+ if (!IsFloatingPoint(rep)) {
+ if (temps.hasAvailable()) {
+ Register scratch = move_cycle_.temps->Acquire();
+ move_cycle_.scratch_reg.emplace(scratch);
+ }
+ }
+
+ temps.Include(move_cycle_.scratch_regs);
+
+ if (move_cycle_.scratch_reg.has_value()) {
+ // A scratch register is available for this rep.
+ // auto& scratch_reg = *move_cycle_.scratch_reg;
+ AllocatedOperand scratch(LocationOperand::REGISTER, rep,
+ move_cycle_.scratch_reg->code());
AssembleMove(source, &scratch);
} else {
- // The scratch register is blocked by pending moves. Use the stack instead.
- int new_slots = ElementSizeInPointers(rep);
- RiscvOperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ Push(g.ToRegister(source));
-#if V8_TARGET_ARCH_RISCV64
- } else if (source->IsStackSlot() || source->IsFloatStackSlot() ||
- source->IsDoubleStackSlot()) {
-#elif V8_TARGET_ARCH_RISCV32
- } else if (source->IsStackSlot() || source->IsFloatStackSlot()) {
-#endif
- __ LoadWord(kScratchReg, g.ToMemOperand(source));
- __ Push(kScratchReg);
- } else {
- // Bump the stack pointer and assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ SubWord(sp, sp, Operand(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ // The scratch registers are blocked by pending moves. Use the stack
+ // instead.
+ Push(source);
}
}
void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
MachineRepresentation rep) {
- if ((IsFloatingPoint(rep) &&
- !move_cycle_.pending_double_scratch_register_use) ||
- (!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) {
- int scratch_reg_code =
- !IsFloatingPoint(rep) ? kScratchReg.code() : kScratchDoubleReg.code();
- AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
+ if (move_cycle_.scratch_reg.has_value()) {
+ // auto& scratch_reg = *move_cycle_.scratch_reg;
+ AllocatedOperand scratch(LocationOperand::REGISTER, rep,
+ move_cycle_.scratch_reg->code());
AssembleMove(&scratch, dest);
} else {
- RiscvOperandConverter g(this, nullptr);
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- if (dest->IsRegister()) {
- __ Pop(g.ToRegister(dest));
-#if V8_TARGET_ARCH_RISCV64
- } else if (dest->IsStackSlot() || dest->IsFloatStackSlot() ||
- dest->IsDoubleStackSlot()) {
-#elif V8_TARGET_ARCH_RISCV32
- } else if (dest->IsStackSlot() || dest->IsFloatStackSlot()) {
-#endif
- __ Pop(kScratchReg);
- __ StoreWord(kScratchReg, g.ToMemOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ AddWord(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
+ // Restore the default state to release the {UseScratchRegisterScope} and to
+ // prepare for the next cycle.
move_cycle_ = MoveCycleState();
}
void CodeGenerator::SetPendingMove(MoveOperands* move) {
- MoveType::Type move_type =
- MoveType::InferMove(&move->source(), &move->destination());
- if (move_type == MoveType::kConstantToStack) {
+ InstructionOperand* src = &move->source();
+ InstructionOperand* dst = &move->destination();
+ UseScratchRegisterScope temps(masm());
+ if (src->IsConstant() && dst->IsFPLocationOperand()) {
+ Register temp = temps.Acquire();
+ move_cycle_.scratch_regs.set(temp);
+ } else if (src->IsAnyStackSlot() || dst->IsAnyStackSlot()) {
RiscvOperandConverter g(this, nullptr);
- Constant src = g.ToConstant(&move->source());
- if (move->destination().IsStackSlot() &&
- (RelocInfo::IsWasmReference(src.rmode()) ||
- (src.type() != Constant::kInt32 && src.type() != Constant::kInt64))) {
- move_cycle_.pending_scratch_register_use = true;
- }
- } else if (move_type == MoveType::kStackToStack) {
- if (move->source().IsFPLocationOperand()) {
- move_cycle_.pending_double_scratch_register_use = true;
- } else {
- move_cycle_.pending_scratch_register_use = true;
+ bool src_need_scratch = false;
+ bool dst_need_scratch = false;
+ if (src->IsAnyStackSlot()) {
+ MemOperand src_mem = g.ToMemOperand(src);
+ src_need_scratch =
+ (!is_int16(src_mem.offset())) || (((src_mem.offset() & 0b111) != 0) &&
+ !is_int16(src_mem.offset() + 4));
+ }
+ if (dst->IsAnyStackSlot()) {
+ MemOperand dst_mem = g.ToMemOperand(dst);
+ dst_need_scratch =
+ (!is_int16(dst_mem.offset())) || (((dst_mem.offset() & 0b111) != 0) &&
+ !is_int16(dst_mem.offset() + 4));
+ }
+ if (src_need_scratch || dst_need_scratch) {
+ Register temp = temps.Acquire();
+ move_cycle_.scratch_regs.set(temp);
}
}
}
@@ -4768,7 +4741,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
}
#endif
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp_0 = kScratchReg;
Register temp_1 = kScratchReg2;
__ LoadWord(temp_0, src);
@@ -4782,6 +4755,66 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
}
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ RiscvOperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ Push(g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ LoadWord(scratch, g.ToMemOperand(source));
+ __ Push(scratch);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ SubWord(sp, sp, Operand(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ RiscvOperandConverter g(this, nullptr);
+ if (dest->IsRegister()) {
+ __ Pop(g.ToRegister(dest));
+ } else if (dest->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ StoreWord(scratch, g.ToMemOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ AddWord(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ AddWord(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 64-bit RISC-V we emit the jump tables inline.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h b/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h
index efe7a23267..dd854aa22d 100644
--- a/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h
+++ b/deps/v8/src/compiler/backend/riscv/instruction-codes-riscv.h
@@ -64,8 +64,7 @@ namespace compiler {
V(RiscvWord64AtomicExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
- V(RiscvLoadDecompressTaggedPointer) \
- V(RiscvLoadDecompressAnyTagged) \
+ V(RiscvLoadDecompressTagged) \
V(RiscvWord64AtomicCompareExchangeUint64)
#elif V8_TARGET_ARCH_RISCV32
#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
diff --git a/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc b/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc
index ea9e603920..51663e2b6e 100644
--- a/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc
+++ b/deps/v8/src/compiler/backend/riscv/instruction-scheduler-riscv.cc
@@ -377,8 +377,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUlwu:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
- case kRiscvLoadDecompressTaggedPointer:
- case kRiscvLoadDecompressAnyTagged:
+ case kRiscvLoadDecompressTagged:
#elif V8_TARGET_ARCH_RISCV32
case kRiscvWord32AtomicPairLoad:
#endif
@@ -744,7 +743,7 @@ int AssemblePopArgumentsAdoptFrameLatency() {
int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() {
- int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > kSystemPointerSize) {
return 1 + Sub64Latency(false) + AndLatency(false) + 1;
} else {
diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h
index 42cae64f21..28cedf9c44 100644
--- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h
+++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv.h
@@ -1072,6 +1072,8 @@ VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms)
#undef VISIT_SIMD_QFMOP
void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
+ constexpr int32_t FIRST_INDEX = 0b01010101;
+ constexpr int32_t SECOND_INDEX = 0b10101010;
RiscvOperandGenerator g(this);
InstructionOperand temp = g.TempFpRegister(v16);
InstructionOperand temp1 = g.TempFpRegister(v14);
@@ -1080,14 +1082,77 @@ void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), g.UseImmediate(E16),
g.UseImmediate(m1));
- this->Emit(kRiscvVcompress, temp2, temp, g.UseImmediate(0b01010101),
+ this->Emit(kRiscvVcompress, temp2, temp, g.UseImmediate(FIRST_INDEX),
g.UseImmediate(E32), g.UseImmediate(m2));
- this->Emit(kRiscvVcompress, temp1, temp, g.UseImmediate(0b10101010),
+ this->Emit(kRiscvVcompress, temp1, temp, g.UseImmediate(SECOND_INDEX),
g.UseImmediate(E32), g.UseImmediate(m2));
this->Emit(kRiscvVaddVv, dst, temp1, temp2, g.UseImmediate(E32),
g.UseImmediate(m1));
}
+void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
+ constexpr int32_t FIRST_INDEX = 0b0101010101010101;
+ constexpr int32_t SECOND_INDEX = 0b1010101010101010;
+ RiscvOperandGenerator g(this);
+ InstructionOperand temp = g.TempFpRegister(v16);
+ InstructionOperand temp1 = g.TempFpRegister(v14);
+ InstructionOperand temp2 = g.TempFpRegister(v30);
+ InstructionOperand dst = g.DefineAsRegister(node);
+ this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseImmediate(E8),
+ g.UseImmediate(m1));
+ this->Emit(kRiscvVcompress, temp2, temp, g.UseImmediate(FIRST_INDEX),
+ g.UseImmediate(E16), g.UseImmediate(m2));
+ this->Emit(kRiscvVcompress, temp1, temp, g.UseImmediate(SECOND_INDEX),
+ g.UseImmediate(E16), g.UseImmediate(m2));
+ this->Emit(kRiscvVaddVv, dst, temp1, temp2, g.UseImmediate(E16),
+ g.UseImmediate(m1));
+}
+
+void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
+ constexpr int32_t FIRST_INDEX = 0b0001000100010001;
+ constexpr int32_t SECOND_INDEX = 0b0010001000100010;
+ constexpr int32_t THIRD_INDEX = 0b0100010001000100;
+ constexpr int32_t FOURTH_INDEX = 0b1000100010001000;
+ RiscvOperandGenerator g(this);
+ InstructionOperand intermediate = g.TempFpRegister(v12);
+ this->Emit(kRiscvVwmul, intermediate, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseImmediate(E8),
+ g.UseImmediate(m1));
+
+ InstructionOperand compressedPart1 = g.TempFpRegister(v14);
+ InstructionOperand compressedPart2 = g.TempFpRegister(v16);
+ this->Emit(kRiscvVcompress, compressedPart2, intermediate,
+ g.UseImmediate(FIRST_INDEX), g.UseImmediate(E16),
+ g.UseImmediate(m2));
+ this->Emit(kRiscvVcompress, compressedPart1, intermediate,
+ g.UseImmediate(SECOND_INDEX), g.UseImmediate(E16),
+ g.UseImmediate(m2));
+
+ InstructionOperand compressedPart3 = g.TempFpRegister(v20);
+ InstructionOperand compressedPart4 = g.TempFpRegister(v26);
+ this->Emit(kRiscvVcompress, compressedPart3, intermediate,
+ g.UseImmediate(THIRD_INDEX), g.UseImmediate(E16),
+ g.UseImmediate(m2));
+ this->Emit(kRiscvVcompress, compressedPart4, intermediate,
+ g.UseImmediate(FOURTH_INDEX), g.UseImmediate(E16),
+ g.UseImmediate(m2));
+
+ InstructionOperand temp2 = g.TempFpRegister(v18);
+ InstructionOperand temp = g.TempFpRegister(kSimd128ScratchReg);
+ this->Emit(kRiscvVwadd, temp2, compressedPart1, compressedPart2,
+ g.UseImmediate(E16), g.UseImmediate(m1));
+ this->Emit(kRiscvVwadd, temp, compressedPart3, compressedPart4,
+ g.UseImmediate(E16), g.UseImmediate(m1));
+
+ InstructionOperand mul_result = g.TempFpRegister(v16);
+ InstructionOperand dst = g.DefineAsRegister(node);
+ this->Emit(kRiscvVaddVv, mul_result, temp2, temp, g.UseImmediate(E32),
+ g.UseImmediate(m1));
+ this->Emit(kRiscvVaddVv, dst, mul_result, g.UseRegister(node->InputAt(2)),
+ g.UseImmediate(E32), g.UseImmediate(m1));
+}
+
void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc
index a8db8248b3..5e494aa3ab 100644
--- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc
+++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc
@@ -65,7 +65,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@@ -78,6 +78,13 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Root),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(index));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(output == nullptr ? node : output),
@@ -216,7 +223,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
@@ -254,17 +261,23 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseRegisterOrImmediateZero(value), g.UseImmediate(index));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
+ g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
+ g.UseImmediate(index));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0));
}
}
}
diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc
index 83f5b5ecb4..130831675f 100644
--- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc
@@ -168,7 +168,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@@ -181,6 +181,13 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
}
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Root),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(index));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(output == nullptr ? node : output),
@@ -280,10 +287,10 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kRiscvLoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
- opcode = kRiscvLoadDecompressTaggedPointer;
+ opcode = kRiscvLoadDecompressTagged;
break;
case MachineRepresentation::kTagged:
- opcode = kRiscvLoadDecompressAnyTagged;
+ opcode = kRiscvLoadDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
@@ -338,7 +345,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
@@ -387,17 +394,23 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
}
+ if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) {
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseRegisterOrImmediateZero(value), g.UseImmediate(index));
+ return;
+ }
+
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
+ g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
+ g.UseImmediate(index));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0));
}
}
}
@@ -1938,10 +1951,10 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
opcode = kRiscv64LdDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
- opcode = kRiscv64LdDecompressTaggedPointer;
+ opcode = kRiscv64LdDecompressTagged;
break;
case MachineRepresentation::kTagged:
- opcode = kRiscv64LdDecompressAnyTagged;
+ opcode = kRiscv64LdDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 27b2a5a853..510616a82f 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
#define kScratchReg ip
@@ -99,6 +99,9 @@ class S390OperandConverter final : public InstructionOperandConverter {
*first_index += 3;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
InputInt32(index + 2));
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
}
UNREACHABLE();
}
@@ -209,12 +212,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, eq,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
if (offset_ == no_reg) {
__ AddS64(scratch1_, object_, Operand(offset_immediate_));
} else {
@@ -321,6 +323,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kS390_Abs64:
case kS390_Abs32:
case kS390_Mul32:
+ case kS390_Mul64WithOverflow:
return nooverflow;
default:
break;
@@ -618,7 +621,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
#define ASSEMBLE_FLOAT_MODULO() \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -630,7 +633,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@@ -642,7 +645,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -1020,20 +1023,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void FlushPendingPushRegisters(TurboAssembler* tasm,
+void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- tasm->Push((*pending_pushes)[0]);
+ masm->Push((*pending_pushes)[0]);
break;
case 2:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -1044,7 +1047,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
}
void AdjustStackPointerForTailCall(
- TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -1052,15 +1055,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
+ masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(tasm, state, pending_pushes);
+ FlushPendingPushRegisters(masm, state, pending_pushes);
}
- tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
+ masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -1082,7 +1085,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- tasm(), frame_access_state(),
+ masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
// Pushes of non-register data types are not supported.
@@ -1092,20 +1095,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) {
- FlushPendingPushRegisters(tasm(), frame_access_state(),
+ FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes);
}
move->Eliminate();
}
- FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
}
@@ -1121,7 +1124,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
+// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
@@ -1132,11 +1135,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(
- ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
- __ LoadS32(ip,
- FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
+ int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ __ LoadTaggedField(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset),
+ r0);
+ __ LoadU16(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne);
@@ -1218,7 +1220,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
frame_access_state()->ClearSPDelta();
@@ -1240,14 +1242,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
- __ LoadTaggedPointerField(
- kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedField(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpS64(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadTaggedPointerField(r4,
- FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedField(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1351,7 +1352,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -1410,8 +1411,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
@@ -1725,7 +1725,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_BIN_OP(RRRInstr(MulHighU64), nullInstr, nullInstr);
break;
case kS390_MulHighS64:
- ASSEMBLE_BIN_OP(RRRInstr(MulHighS64), nullInstr, nullInstr);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ ASSEMBLE_BIN_OP(RRRInstr(MulHighS64), nullInstr, nullInstr);
+ } else {
+ __ Push(r2, r3, i.InputRegister(0), i.InputRegister(1));
+ __ Pop(r2, r3);
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, 0, kScratchReg);
+ __ CallCFunction(ExternalReference::int64_mul_high_function(), 2, 0);
+ }
+ __ mov(kScratchReg, r2);
+ __ Pop(r2, r3);
+ __ mov(i.OutputRegister(), kScratchReg);
+ }
break;
case kS390_MulFloat:
ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
@@ -3004,6 +3017,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
+
+ case kS390_I16x8DotI8x16S: {
+ __ I16x8DotI8x16S(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kS390_I32x4DotI8x16AddS: {
+ __ I32x4DotI8x16AddS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ break;
+ }
case kS390_I16x8Q15MulRSatS: {
__ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
@@ -3175,14 +3200,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
break;
}
- case kS390_LoadDecompressTaggedPointer: {
- CHECK(instr->HasOutput());
- __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- break;
- }
- case kS390_LoadDecompressAnyTagged: {
+ case kS390_LoadDecompressTagged: {
CHECK(instr->HasOutput());
- __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ __ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
break;
}
default:
@@ -3300,7 +3320,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Overflow checked for add/sub only.
DCHECK((condition != kOverflow && condition != kNotOverflow) ||
(op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
- op == kS390_Sub64 || op == kS390_Mul32));
+ op == kS390_Sub64 || op == kS390_Mul32 ||
+ op == kS390_Mul64WithOverflow));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
@@ -3404,6 +3425,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ Push(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -3575,9 +3600,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
}
- __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
+ __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
@@ -3596,9 +3621,59 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
- // Must be kept in sync with {MoveTempLocationTo}.
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ S390OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
+ __ LoadU64(r1, g.ToMemOperand(source));
+ __ Push(r1);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // Bump the stack pointer and assemble the move.
+ __ lay(sp, MemOperand(sp, -(new_slots * kSystemPointerSize)));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ S390OperandConverter g(this, nullptr);
+ if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
+ __ Pop(r1);
+ __ StoreU64(r1, g.ToMemOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ lay(sp, MemOperand(sp, new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ lay(sp, MemOperand(sp, temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
+ // Must be kept in sync with {MoveTempLocationTo}.
if (!IsFloatingPoint(rep) ||
((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use))) {
@@ -3609,24 +3684,8 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
DCHECK(!AreAliased(kScratchReg, r0, r1));
AssembleMove(source, &scratch);
} else {
- DCHECK(!source->IsRegister() && !source->IsStackSlot());
// The scratch register is blocked by pending moves. Use the stack instead.
- int new_slots = ElementSizeInPointers(rep);
- S390OperandConverter g(this, nullptr);
- if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
- __ LoadU64(r1, g.ToMemOperand(source));
- __ Push(r1);
- } else {
- // Bump the stack pointer and assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ lay(sp, MemOperand(sp, -(new_slots * kSystemPointerSize)));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -3641,32 +3700,14 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
DCHECK(!AreAliased(kScratchReg, r0, r1));
AssembleMove(&scratch, dest);
} else {
- DCHECK(!dest->IsRegister() && !dest->IsStackSlot());
- S390OperandConverter g(this, nullptr);
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
- __ Pop(r1);
- __ StoreU64(r1, g.ToMemOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ lay(sp, MemOperand(sp, new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
move_cycle_ = MoveCycleState();
}
void CodeGenerator::SetPendingMove(MoveOperands* move) {
- if (move->source().IsFPStackSlot() && !move->destination().IsFPRegister()) {
- move_cycle_.pending_double_scratch_register_use = true;
- } else if (move->source().IsConstant() &&
- (move->destination().IsDoubleStackSlot() ||
- move->destination().IsFloatStackSlot())) {
+ if ((move->source().IsConstant() || move->source().IsFPStackSlot()) &&
+ !move->destination().IsFPRegister()) {
move_cycle_.pending_double_scratch_register_use = true;
}
}
@@ -3736,7 +3777,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ __ LoadTaggedRoot(dst, index);
} else {
__ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
}
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index f362cddcf7..70216904fa 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -287,6 +287,7 @@ namespace compiler {
V(S390_I32x4ExtAddPairwiseI16x8U) \
V(S390_I32x4TruncSatF64x2SZero) \
V(S390_I32x4TruncSatF64x2UZero) \
+ V(S390_I32x4DotI8x16AddS) \
V(S390_I16x8Splat) \
V(S390_I16x8ExtractLaneU) \
V(S390_I16x8ExtractLaneS) \
@@ -328,6 +329,7 @@ namespace compiler {
V(S390_I16x8ExtAddPairwiseI8x16S) \
V(S390_I16x8ExtAddPairwiseI8x16U) \
V(S390_I16x8Q15MulRSatS) \
+ V(S390_I16x8DotI8x16S) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
@@ -398,8 +400,7 @@ namespace compiler {
V(S390_LoadSimd128) \
V(S390_StoreCompressTagged) \
V(S390_LoadDecompressTaggedSigned) \
- V(S390_LoadDecompressTaggedPointer) \
- V(S390_LoadDecompressAnyTagged)
+ V(S390_LoadDecompressTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
@@ -418,7 +419,8 @@ namespace compiler {
V(MR) /* [%r0 ] */ \
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1 ] */ \
- V(MRRI) /* [%r0 + %r1 + K] */
+ V(MRRI) /* [%r0 + %r1 + K] */ \
+ V(Root) /* [%r0 + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index fa0a60a019..6c72e1ad13 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -253,6 +253,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4ExtAddPairwiseI16x8U:
case kS390_I32x4TruncSatF64x2SZero:
case kS390_I32x4TruncSatF64x2UZero:
+ case kS390_I32x4DotI8x16AddS:
case kS390_I16x8Splat:
case kS390_I16x8ExtractLaneU:
case kS390_I16x8ExtractLaneS:
@@ -294,6 +295,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8ExtAddPairwiseI8x16S:
case kS390_I16x8ExtAddPairwiseI8x16U:
case kS390_I16x8Q15MulRSatS:
+ case kS390_I16x8DotI8x16S:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
@@ -358,8 +360,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverseSimd128:
case kS390_Peek:
case kS390_LoadDecompressTaggedSigned:
- case kS390_LoadDecompressTaggedPointer:
- case kS390_LoadDecompressAnyTagged:
+ case kS390_LoadDecompressTagged:
case kS390_S128Load8Splat:
case kS390_S128Load16Splat:
case kS390_S128Load32Splat:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index a0192b0022..52d844c82e 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -220,8 +220,14 @@ class S390OperandGenerator final : public OperandGenerator {
AddressOption::kAllowInputSwap);
#endif
DCHECK(m.matches());
- if ((m.displacement() == nullptr ||
- CanBeImmediate(m.displacement(), immediate_mode))) {
+ if (m.base() != nullptr &&
+ m.base()->opcode() == IrOpcode::kLoadRootRegister) {
+ DCHECK_EQ(m.index(), nullptr);
+ DCHECK_EQ(m.scale(), 0);
+ inputs[(*input_count)++] = UseImmediate(m.displacement());
+ return kMode_Root;
+ } else if ((m.displacement() == nullptr ||
+ CanBeImmediate(m.displacement(), immediate_mode))) {
DCHECK_EQ(0, m.scale());
return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
m.displacement_mode(), inputs,
@@ -303,10 +309,10 @@ ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
opcode = kS390_LoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
- opcode = kS390_LoadDecompressTaggedPointer;
+ opcode = kS390_LoadDecompressTagged;
break;
case MachineRepresentation::kTagged:
- opcode = kS390_LoadDecompressAnyTagged;
+ opcode = kS390_LoadDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
@@ -748,7 +754,7 @@ static void VisitGeneralStore(
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
@@ -2971,6 +2977,22 @@ void InstructionSelector::VisitStoreLane(Node* node) {
Emit(opcode, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_I16x8DotI8x16S, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
+ S390OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kS390_I32x4DotI8x16AddS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
S390OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index e3f759f570..b37032a741 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -4,6 +4,7 @@
#include <limits>
+#include "src/base/optional.h"
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
@@ -13,6 +14,7 @@
#include "src/codegen/x64/assembler-x64.h"
#include "src/codegen/x64/register-x64.h"
#include "src/common/globals.h"
+#include "src/common/ptr-compr-inl.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
@@ -33,7 +35,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
@@ -53,6 +55,15 @@ class X64OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kCompressedHeapObject) {
+ CHECK(COMPRESS_POINTERS_BOOL);
+ CHECK(V8_STATIC_ROOTS_BOOL || !gen_->isolate()->bootstrapper());
+ RootIndex root_index;
+ CHECK(gen_->isolate()->roots_table().IsRootHandle(constant.ToHeapObject(),
+ &root_index));
+ return Immediate(
+ MacroAssemblerBase::ReadOnlyRootPtr(root_index, gen_->isolate()));
+ }
if (constant.type() == Constant::kFloat64) {
DCHECK_EQ(0, constant.ToFloat64().AsUint64());
return Immediate(0);
@@ -293,12 +304,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
- __ CheckPageFlag(
- value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- exit());
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
__ leaq(scratch1_, operand_);
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
@@ -334,29 +344,29 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
};
template <std::memory_order order>
-int EmitStore(TurboAssembler* tasm, Operand operand, Register value,
- MachineRepresentation rep) {
+int EmitStore(MacroAssembler* masm, Operand operand, Register value,
+ MachineRepresentation rep) {
int store_instr_offset;
if (order == std::memory_order_relaxed) {
- store_instr_offset = tasm->pc_offset();
+ store_instr_offset = masm->pc_offset();
switch (rep) {
case MachineRepresentation::kWord8:
- tasm->movb(operand, value);
+ masm->movb(operand, value);
break;
case MachineRepresentation::kWord16:
- tasm->movw(operand, value);
+ masm->movw(operand, value);
break;
case MachineRepresentation::kWord32:
- tasm->movl(operand, value);
+ masm->movl(operand, value);
break;
case MachineRepresentation::kWord64:
- tasm->movq(operand, value);
+ masm->movq(operand, value);
break;
case MachineRepresentation::kTagged:
- tasm->StoreTaggedField(operand, value);
+ masm->StoreTaggedField(operand, value);
break;
case MachineRepresentation::kSandboxedPointer:
- tasm->StoreSandboxedPointerField(operand, value);
+ masm->StoreSandboxedPointerField(operand, value);
break;
default:
UNREACHABLE();
@@ -367,28 +377,28 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value,
DCHECK_EQ(order, std::memory_order_seq_cst);
switch (rep) {
case MachineRepresentation::kWord8:
- tasm->movq(kScratchRegister, value);
- store_instr_offset = tasm->pc_offset();
- tasm->xchgb(kScratchRegister, operand);
+ masm->movq(kScratchRegister, value);
+ store_instr_offset = masm->pc_offset();
+ masm->xchgb(kScratchRegister, operand);
break;
case MachineRepresentation::kWord16:
- tasm->movq(kScratchRegister, value);
- store_instr_offset = tasm->pc_offset();
- tasm->xchgw(kScratchRegister, operand);
+ masm->movq(kScratchRegister, value);
+ store_instr_offset = masm->pc_offset();
+ masm->xchgw(kScratchRegister, operand);
break;
case MachineRepresentation::kWord32:
- tasm->movq(kScratchRegister, value);
- store_instr_offset = tasm->pc_offset();
- tasm->xchgl(kScratchRegister, operand);
+ masm->movq(kScratchRegister, value);
+ store_instr_offset = masm->pc_offset();
+ masm->xchgl(kScratchRegister, operand);
break;
case MachineRepresentation::kWord64:
- tasm->movq(kScratchRegister, value);
- store_instr_offset = tasm->pc_offset();
- tasm->xchgq(kScratchRegister, operand);
+ masm->movq(kScratchRegister, value);
+ store_instr_offset = masm->pc_offset();
+ masm->xchgq(kScratchRegister, operand);
break;
case MachineRepresentation::kTagged:
- store_instr_offset = tasm->pc_offset();
- tasm->AtomicStoreTaggedField(operand, value);
+ store_instr_offset = masm->pc_offset();
+ masm->AtomicStoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
@@ -397,29 +407,29 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value,
}
template <std::memory_order order>
-int EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
- MachineRepresentation rep);
+int EmitStore(MacroAssembler* masm, Operand operand, Immediate value,
+ MachineRepresentation rep);
template <>
-int EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
- Immediate value,
- MachineRepresentation rep) {
- int store_instr_offset = tasm->pc_offset();
+int EmitStore<std::memory_order_relaxed>(MacroAssembler* masm, Operand operand,
+ Immediate value,
+ MachineRepresentation rep) {
+ int store_instr_offset = masm->pc_offset();
switch (rep) {
case MachineRepresentation::kWord8:
- tasm->movb(operand, value);
+ masm->movb(operand, value);
break;
case MachineRepresentation::kWord16:
- tasm->movw(operand, value);
+ masm->movw(operand, value);
break;
case MachineRepresentation::kWord32:
- tasm->movl(operand, value);
+ masm->movl(operand, value);
break;
case MachineRepresentation::kWord64:
- tasm->movq(operand, value);
+ masm->movq(operand, value);
break;
case MachineRepresentation::kTagged:
- tasm->StoreTaggedField(operand, value);
+ masm->StoreTaggedField(operand, value);
break;
default:
UNREACHABLE();
@@ -477,25 +487,31 @@ class WasmOutOfLineTrap : public OutOfLineCode {
class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
public:
- WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
- : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr,
+ TrapId trap_id)
+ : WasmOutOfLineTrap(gen, instr), pc_(pc), trap_id_(trap_id) {}
void Generate() final {
DCHECK(v8_flags.wasm_bounds_checks && !v8_flags.wasm_enforce_bounds_checks);
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
- GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
+ GenerateWithTrapId(trap_id_);
}
private:
int pc_;
+ TrapId trap_id_;
};
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
const MemoryAccessMode access_mode = instr->memory_access_mode();
- if (access_mode == kMemoryAccessProtected) {
- zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
+ if (access_mode == kMemoryAccessProtectedMemOutOfBounds) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr,
+ TrapId::kTrapMemOutOfBounds);
+ } else if (access_mode == kMemoryAccessProtectedNullDereference) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr,
+ TrapId::kTrapNullDereference);
}
}
@@ -503,13 +519,13 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
+ DCHECK_EQ(kMemoryAccessDirect, instr->memory_access_mode());
}
#endif // V8_ENABLE_WEBASSEMBLY
#ifdef V8_IS_TSAN
-void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
+void EmitMemoryProbeForTrapHandlerIfNeeded(MacroAssembler* masm,
Register scratch, Operand operand,
StubCallMode mode, int size) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
@@ -522,16 +538,16 @@ void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
mode == StubCallMode::kCallWasmRuntimeStub) {
switch (size) {
case kInt8Size:
- tasm->movb(scratch, operand);
+ masm->movb(scratch, operand);
break;
case kInt16Size:
- tasm->movw(scratch, operand);
+ masm->movw(scratch, operand);
break;
case kInt32Size:
- tasm->movl(scratch, operand);
+ masm->movl(scratch, operand);
break;
case kInt64Size:
- tasm->movq(scratch, operand);
+ masm->movq(scratch, operand);
break;
default:
UNREACHABLE();
@@ -569,14 +585,14 @@ class OutOfLineTSANStore : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub,
memory_order_);
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
- tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer, memory_order_);
}
@@ -592,7 +608,7 @@ class OutOfLineTSANStore : public OutOfLineCode {
Zone* zone_;
};
-void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
+void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, MacroAssembler* masm,
Operand operand, Register value_reg,
X64OperandConverter& i, StubCallMode mode, int size,
std::memory_order order) {
@@ -606,45 +622,45 @@ void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
scratch0, mode, size, order);
- tasm->jmp(tsan_ool->entry());
- tasm->bind(tsan_ool->exit());
+ masm->jmp(tsan_ool->entry());
+ masm->bind(tsan_ool->exit());
}
template <std::memory_order order>
-Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
+Register GetTSANValueRegister(MacroAssembler* masm, Register value,
X64OperandConverter& i,
MachineRepresentation rep) {
if (rep == MachineRepresentation::kSandboxedPointer) {
// SandboxedPointers need to be encoded.
Register value_reg = i.TempRegister(1);
- tasm->movq(value_reg, value);
- tasm->EncodeSandboxedPointer(value_reg);
+ masm->movq(value_reg, value);
+ masm->EncodeSandboxedPointer(value_reg);
return value_reg;
}
return value;
}
template <std::memory_order order>
-Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
+Register GetTSANValueRegister(MacroAssembler* masm, Immediate value,
X64OperandConverter& i,
MachineRepresentation rep);
template <>
Register GetTSANValueRegister<std::memory_order_relaxed>(
- TurboAssembler* tasm, Immediate value, X64OperandConverter& i,
+ MacroAssembler* masm, Immediate value, X64OperandConverter& i,
MachineRepresentation rep) {
Register value_reg = i.TempRegister(1);
- tasm->movq(value_reg, value);
+ masm->movq(value_reg, value);
if (rep == MachineRepresentation::kSandboxedPointer) {
// SandboxedPointers need to be encoded.
- tasm->EncodeSandboxedPointer(value_reg);
+ masm->EncodeSandboxedPointer(value_reg);
}
return value_reg;
}
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand, ValueT value,
+ MacroAssembler* masm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep, Instruction* instr) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
@@ -654,17 +670,17 @@ void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
// path. It is not crucial, but it would be nice to remove this restriction.
if (codegen->code_kind() != CodeKind::FOR_TESTING) {
if (instr->HasMemoryAccessMode()) {
- EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(),
- instr, tasm->pc_offset());
+ EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr,
+ masm->pc_offset());
}
int size = ElementSizeInBytes(rep);
- EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
+ EmitMemoryProbeForTrapHandlerIfNeeded(masm, i.TempRegister(0), operand,
stub_call_mode, size);
- Register value_reg = GetTSANValueRegister<order>(tasm, value, i, rep);
- EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
+ Register value_reg = GetTSANValueRegister<order>(masm, value, i, rep);
+ EmitTSANStoreOOL(zone, codegen, masm, operand, value_reg, i, stub_call_mode,
size, order);
} else {
- int store_instr_offset = EmitStore<order>(tasm, operand, value, rep);
+ int store_instr_offset = EmitStore<order>(masm, operand, value, rep);
if (instr->HasMemoryAccessMode()) {
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(),
instr, store_instr_offset);
@@ -718,7 +734,7 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
};
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
+ MacroAssembler* masm, Operand operand,
X64OperandConverter& i, StubCallMode mode,
int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
@@ -731,26 +747,26 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANRelaxedLoad>(codegen, operand,
scratch0, mode, size);
- tasm->jmp(tsan_ool->entry());
- tasm->bind(tsan_ool->exit());
+ masm->jmp(tsan_ool->entry());
+ masm->bind(tsan_ool->exit());
}
#else
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand, ValueT value,
+ MacroAssembler* masm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep, Instruction* instr) {
DCHECK(order == std::memory_order_relaxed ||
order == std::memory_order_seq_cst);
- int store_instr_off = EmitStore<order>(tasm, operand, value, rep);
+ int store_instr_off = EmitStore<order>(masm, operand, value, rep);
if (instr->HasMemoryAccessMode()) {
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_off);
}
}
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
+ MacroAssembler* masm, Operand operand,
X64OperandConverter& i, StubCallMode mode,
int size) {}
#endif // V8_IS_TSAN
@@ -789,38 +805,36 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_COMPARE(cmp_instr, test_instr) \
- do { \
- if (HasAddressingMode(instr)) { \
- size_t index = 0; \
- Operand left = i.MemoryOperand(&index); \
- if (HasImmediateInput(instr, index)) { \
- __ cmp_instr(left, i.InputImmediate(index)); \
- } else { \
- __ cmp_instr(left, i.InputRegister(index)); \
- } \
- } else { \
- if (HasImmediateInput(instr, 1)) { \
- Immediate right = i.InputImmediate(1); \
- if (HasRegisterInput(instr, 0)) { \
- if (right.value() == 0 && \
- (FlagsConditionField::decode(opcode) == kEqual || \
- FlagsConditionField::decode(opcode) == kNotEqual)) { \
- __ test_instr(i.InputRegister(0), i.InputRegister(0)); \
- } else { \
- __ cmp_instr(i.InputRegister(0), right); \
- } \
- } else { \
- __ cmp_instr(i.InputOperand(0), right); \
- } \
- } else { \
- if (HasRegisterInput(instr, 1)) { \
- __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
- } else { \
- __ cmp_instr(i.InputRegister(0), i.InputOperand(1)); \
- } \
- } \
- } \
+#define ASSEMBLE_COMPARE(cmp_instr, test_instr) \
+ do { \
+ if (HasAddressingMode(instr)) { \
+ size_t index = 0; \
+ Operand left = i.MemoryOperand(&index); \
+ if (HasImmediateInput(instr, index)) { \
+ __ cmp_instr(left, i.InputImmediate(index)); \
+ } else { \
+ __ cmp_instr(left, i.InputRegister(index)); \
+ } \
+ } else { \
+ if (HasImmediateInput(instr, 1)) { \
+ Immediate right = i.InputImmediate(1); \
+ if (HasRegisterInput(instr, 0)) { \
+ if (right.value() == 0) { \
+ __ test_instr(i.InputRegister(0), i.InputRegister(0)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), right); \
+ } \
+ } else { \
+ __ cmp_instr(i.InputOperand(0), right); \
+ } \
+ } else { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
+ } \
} while (false)
#define ASSEMBLE_TEST(asm_instr) \
@@ -923,7 +937,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
if (HasAddressingMode(instr)) { \
size_t index = 1; \
Operand right = i.MemoryOperand(&index); \
@@ -983,7 +997,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
#define ASSEMBLE_SIMD_BINOP(opcode) \
do { \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputSimd128Register(1)); \
} else { \
@@ -992,6 +1006,13 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (false)
+#define ASSEMBLE_SIMD256_BINOP(opcode) \
+ do { \
+ CpuFeatureScope avx_scope(masm(), AVX); \
+ __ v##opcode(i.OutputSimd256Register(), i.InputSimd256Register(0), \
+ i.InputSimd256Register(1)); \
+ } while (false)
+
#define ASSEMBLE_SIMD_INSTR(opcode, dst_operand, index) \
do { \
if (instr->InputAt(index)->IsSimd128Register()) { \
@@ -1015,7 +1036,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
XMMRegister dst = i.OutputSimd128Register(); \
byte input_index = instr->InputCount() == 2 ? 1 : 0; \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
DCHECK(instr->InputAt(input_index)->IsSimd128Register()); \
__ v##opcode(dst, i.InputSimd128Register(0), \
i.InputSimd128Register(input_index)); \
@@ -1030,7 +1051,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
XMMRegister dst = i.OutputSimd128Register(); \
XMMRegister src = i.InputSimd128Register(0); \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
DCHECK(instr->InputAt(1)->IsSimd128Register()); \
__ v##opcode(dst, src, i.InputSimd128Register(1), imm); \
} else { \
@@ -1061,7 +1082,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
XMMRegister dst = i.OutputSimd128Register(); \
if (HasImmediateInput(instr, 1)) { \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(dst, i.InputSimd128Register(0), \
byte{i.InputInt##width(1)}); \
} else { \
@@ -1074,7 +1095,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
__ andq(kScratchRegister, Immediate(mask)); \
__ Movq(kScratchDoubleReg, kScratchRegister); \
if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
+ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(dst, i.InputSimd128Register(0), kScratchDoubleReg); \
} else { \
DCHECK_EQ(dst, i.InputSimd128Register(0)); \
@@ -1102,13 +1123,13 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \
} while (false)
-#define ASSEMBLE_SEQ_CST_STORE(rep) \
- do { \
- Register value = i.InputRegister(0); \
- Operand operand = i.MemoryOperand(1); \
- EmitTSANAwareStore<std::memory_order_seq_cst>( \
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
- rep, instr); \
+#define ASSEMBLE_SEQ_CST_STORE(rep) \
+ do { \
+ Register value = i.InputRegister(0); \
+ Operand operand = i.MemoryOperand(1); \
+ EmitTSANAwareStore<std::memory_order_seq_cst>( \
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(), rep, \
+ instr); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1127,7 +1148,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
void AdjustStackPointerForTailCall(Instruction* instr,
- TurboAssembler* assembler, Linkage* linkage,
+ MacroAssembler* assembler, Linkage* linkage,
OptimizedCompilationInfo* info,
FrameAccessState* state,
int new_slot_above_sp,
@@ -1163,7 +1184,7 @@ void AdjustStackPointerForTailCall(Instruction* instr,
}
}
-void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms,
+void SetupSimdImmediateInRegister(MacroAssembler* assembler, uint32_t* imms,
XMMRegister reg) {
assembler->Move(reg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0]));
@@ -1186,7 +1207,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
+ AdjustStackPointerForTailCall(instr, masm(), linkage(), info(),
frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
@@ -1205,14 +1226,14 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
+ AdjustStackPointerForTailCall(instr, masm(), linkage(), info(),
frame_access_state(), first_unused_slot_offset,
false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
+ AdjustStackPointerForTailCall(instr, masm(), linkage(), info(),
frame_access_state(), first_unused_slot_offset);
}
@@ -1263,14 +1284,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
- Handle<CodeT> code = i.InputCode(0);
+ Handle<Code> code = i.InputCode(0);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ LoadCodeObjectEntry(reg, reg);
+ __ LoadCodeEntry(reg, reg);
__ call(reg);
}
RecordCallPosition(instr);
@@ -1323,14 +1344,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
- Handle<CodeT> code = i.InputCode(0);
+ Handle<Code> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ LoadCodeObjectEntry(reg, reg);
+ __ LoadCodeEntry(reg, reg);
__ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
@@ -1358,9 +1379,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx,
- FieldOperand(func, JSFunction::kCodeOffset));
- __ CallCodeTObject(rcx);
+ __ LoadTaggedField(rcx, FieldOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(rcx);
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
@@ -1464,7 +1484,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -1540,8 +1560,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchStoreWithWriteBarrier: // Fall through.
case kArchAtomicStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ // {EmitTSANAwareStore} calls EmitOOLTrapIfNeeded. No need to do it here.
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -1561,12 +1581,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode());
if (arch_opcode == kArchStoreWithWriteBarrier) {
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr);
} else {
DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
EmitTSANAwareStore<std::memory_order_seq_cst>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr);
}
if (mode > RecordWriteMode::kValueIsPointer) {
@@ -1873,7 +1893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Cvtss2sd);
break;
case kSSEFloat32Round: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1930,7 +1950,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// The following 2 instruction implicitly use rax.
__ fnstsw_ax();
if (CpuFeatures::IsSupported(SAHF)) {
- CpuFeatureScope sahf_scope(tasm(), SAHF);
+ CpuFeatureScope sahf_scope(masm(), SAHF);
__ sahf();
} else {
__ shrl(rax, Immediate(8));
@@ -2066,7 +2086,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -2389,7 +2409,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kAVXFloat32Cmp: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) {
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -2413,7 +2433,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat64Cmp: {
- CpuFeatureScope avx_scope(tasm(), AVX);
+ CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) {
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -2446,13 +2466,80 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchRegister);
break;
}
- case kX64F64x2Abs:
+ case kX64FAbs: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Abs
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrld(kScratchDoubleReg, byte{1});
+ __ Andps(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Psrld(dst, byte{1});
+ __ Andps(dst, src);
+ }
+ break;
+ }
+ case kL64: {
+ // F64x2Abs
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
case kX64Float64Abs: {
__ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
kScratchRegister);
break;
}
- case kX64F64x2Neg:
+ case kX64FNeg: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Neg
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pslld(kScratchDoubleReg, byte{31});
+ __ Xorps(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Pslld(dst, byte{31});
+ __ Xorps(dst, src);
+ }
+ break;
+ }
+ case kL64: {
+ // F64x2Neg
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
case kX64Float64Neg: {
__ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
kScratchRegister);
@@ -2487,12 +2574,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index)));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord8, instr);
} else {
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord8, instr);
}
break;
@@ -2522,12 +2609,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index)));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord16, instr);
} else {
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord16, instr);
}
break;
@@ -2538,7 +2625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address);
- EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kInt32Size);
} else {
if (HasRegisterInput(instr, 0)) {
@@ -2554,12 +2641,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord32, instr);
} else {
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord32, instr);
}
}
@@ -2570,41 +2657,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address);
- EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- break;
- }
- case kX64MovqDecompressTaggedPointer: {
- CHECK(instr->HasOutput());
- Operand address(i.MemoryOperand());
- __ DecompressTaggedPointer(i.OutputRegister(), address);
- EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kTaggedSize);
break;
}
- case kX64MovqDecompressAnyTagged: {
+ case kX64MovqDecompressTagged: {
CHECK(instr->HasOutput());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
Operand address(i.MemoryOperand());
- __ DecompressAnyTagged(i.OutputRegister(), address);
- EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ __ DecompressTagged(i.OutputRegister(), address);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqCompressTagged: {
+ // {EmitTSANAwareStore} calls EmitOOLTrapIfNeeded. No need to do it here.
CHECK(!instr->HasOutput());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr);
} else {
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr);
}
break;
@@ -2615,7 +2697,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
__ movq(dst, address);
__ DecodeSandboxedPointer(dst);
- EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(),
kSystemPointerSize);
break;
@@ -2627,7 +2709,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, index));
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kSandboxedPointer, instr);
break;
}
@@ -2636,7 +2718,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address);
- EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kInt64Size);
} else {
size_t index = 0;
@@ -2644,12 +2726,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord64, instr);
} else {
Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>(
- zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord64, instr);
}
}
@@ -2800,14 +2882,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int slots = stack_decrement / kSystemPointerSize;
// Whenever codegen uses pushq, we need to check if stack_decrement
// contains any extra padding and adjust the stack before the pushq.
- if (HasImmediateInput(instr, 1)) {
- __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
- __ pushq(i.InputImmediate(1));
- } else if (HasAddressingMode(instr)) {
+ if (HasAddressingMode(instr)) {
__ AllocateStackSpace(stack_decrement - kSystemPointerSize);
size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ pushq(operand);
+ } else if (HasImmediateInput(instr, 1)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputImmediate(1));
} else {
InstructionOperand* input = instr->InputAt(1);
if (input->IsRegister()) {
@@ -2878,73 +2960,374 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64F64x2Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (instr->InputAt(0)->IsFPRegister()) {
- __ Movddup(dst, i.InputDoubleRegister(0));
+ case kX64FSplat: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Splat
+ __ F32x4Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kL64: {
+ // F64X2Splat
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Movddup(dst, i.InputDoubleRegister(0));
+ } else {
+ __ Movddup(dst, i.InputOperand(0));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
} else {
- __ Movddup(dst, i.InputOperand(0));
+ UNREACHABLE();
}
break;
}
- case kX64F64x2ExtractLane: {
- __ F64x2ExtractLane(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputUint8(1));
+ case kX64FExtractLane: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4ExtractLane
+ __ F32x4ExtractLane(i.OutputFloatRegister(),
+ i.InputSimd128Register(0), i.InputUint8(1));
+ break;
+ }
+ case kL64: {
+ // F64X2ExtractLane
+ __ F64x2ExtractLane(i.OutputDoubleRegister(),
+ i.InputDoubleRegister(0), i.InputUint8(1));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2ReplaceLane: {
- __ F64x2ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputDoubleRegister(2), i.InputInt8(1));
+ case kX64FReplaceLane: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4ReplaceLane
+ // The insertps instruction uses imm8[5:4] to indicate the lane
+ // that needs to be replaced.
+ byte select = i.InputInt8(1) << 4 & 0x30;
+ if (instr->InputAt(2)->IsFPRegister()) {
+ __ Insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2),
+ select);
+ } else {
+ __ Insertps(i.OutputSimd128Register(), i.InputOperand(2), select);
+ }
+ break;
+ }
+ case kL64: {
+ // F64X2ReplaceLane
+ __ F64x2ReplaceLane(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.InputDoubleRegister(2), i.InputInt8(1));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Sqrt: {
- __ Sqrtpd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ case kX64FSqrt: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Sqrt
+ __ Sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kL64: {
+ // F64x2Sqrt
+ __ Sqrtpd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Add: {
- ASSEMBLE_SIMD_BINOP(addpd);
+ case kX64FAdd: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Add
+ ASSEMBLE_SIMD_BINOP(addps);
+ break;
+ }
+ case kL64: {
+ // F64x2Add
+ ASSEMBLE_SIMD_BINOP(addpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else if (vec_len == kV256) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x8Add
+ ASSEMBLE_SIMD256_BINOP(addps);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Sub: {
- ASSEMBLE_SIMD_BINOP(subpd);
+ case kX64FSub: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Sub
+ ASSEMBLE_SIMD_BINOP(subps);
+ break;
+ }
+ case kL64: {
+ // F64x2Sub
+ ASSEMBLE_SIMD_BINOP(subpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else if (vec_len == kV256) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x8Sub
+ ASSEMBLE_SIMD256_BINOP(subps);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Mul: {
- ASSEMBLE_SIMD_BINOP(mulpd);
+ case kX64FMul: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Mul
+ ASSEMBLE_SIMD_BINOP(mulps);
+ break;
+ }
+ case kL64: {
+ // F64x2Mul
+ ASSEMBLE_SIMD_BINOP(mulpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Div: {
- ASSEMBLE_SIMD_BINOP(divpd);
+ case kX64FDiv: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Div
+ ASSEMBLE_SIMD_BINOP(divps);
+ break;
+ }
+ case kL64: {
+ // F64x2Div
+ ASSEMBLE_SIMD_BINOP(divpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Min: {
- // Avoids a move in no-AVX case if dst = src0.
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ F64x2Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
+ case kX64FMin: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Min
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kL64: {
+ // F64x2Min
+ // Avoids a move in no-AVX case if dst = src0.
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ F64x2Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Max: {
- // Avoids a move in no-AVX case if dst = src0.
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ F64x2Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
+ case kX64FMax: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Max
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kL64: {
+ // F64x2Max
+ // Avoids a move in no-AVX case if dst = src0.
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ F64x2Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Eq: {
- ASSEMBLE_SIMD_BINOP(cmpeqpd);
+ case kX64FEq: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Eq
+ ASSEMBLE_SIMD_BINOP(cmpeqps);
+ break;
+ }
+ case kL64: {
+ // F64x2Eq
+ ASSEMBLE_SIMD_BINOP(cmpeqpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Ne: {
- ASSEMBLE_SIMD_BINOP(cmpneqpd);
+ case kX64FNe: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Ne
+ ASSEMBLE_SIMD_BINOP(cmpneqps);
+ break;
+ }
+ case kL64: {
+ // F64x2Ne
+ ASSEMBLE_SIMD_BINOP(cmpneqpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Lt: {
- ASSEMBLE_SIMD_BINOP(cmpltpd);
+ case kX64FLt: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Lt
+ ASSEMBLE_SIMD_BINOP(cmpltps);
+ break;
+ }
+ case kL64: {
+ // F64x2Lt
+ ASSEMBLE_SIMD_BINOP(cmpltpd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64F64x2Le: {
- ASSEMBLE_SIMD_BINOP(cmplepd);
+ case kX64FLe: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // F32x4Le
+ ASSEMBLE_SIMD_BINOP(cmpleps);
+ break;
+ }
+ case kL64: {
+ // F64x2Le
+ ASSEMBLE_SIMD_BINOP(cmplepd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
case kX64F64x2Qfma: {
@@ -2993,27 +3376,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchRegister);
break;
}
- case kX64F32x4Splat: {
- __ F32x4Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0));
- break;
- }
- case kX64F32x4ExtractLane: {
- __ F32x4ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
- i.InputUint8(1));
- break;
- }
- case kX64F32x4ReplaceLane: {
- // The insertps instruction uses imm8[5:4] to indicate the lane
- // that needs to be replaced.
- byte select = i.InputInt8(1) << 4 & 0x30;
- if (instr->InputAt(2)->IsFPRegister()) {
- __ Insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2),
- select);
- } else {
- __ Insertps(i.OutputSimd128Register(), i.InputOperand(2), select);
- }
- break;
- }
case kX64F32x4SConvertI32x4: {
__ Cvtdq2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3032,80 +3394,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addps(dst, kScratchDoubleReg); // add hi and lo, may round.
break;
}
- case kX64F32x4Abs: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, byte{1});
- __ Andps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Psrld(dst, byte{1});
- __ Andps(dst, src);
- }
- break;
- }
- case kX64F32x4Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pslld(kScratchDoubleReg, byte{31});
- __ Xorps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Pslld(dst, byte{31});
- __ Xorps(dst, src);
- }
- break;
- }
- case kX64F32x4Sqrt: {
- __ Sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
- case kX64F32x4Add: {
- ASSEMBLE_SIMD_BINOP(addps);
- break;
- }
- case kX64F32x4Sub: {
- ASSEMBLE_SIMD_BINOP(subps);
- break;
- }
- case kX64F32x4Mul: {
- ASSEMBLE_SIMD_BINOP(mulps);
- break;
- }
- case kX64F32x4Div: {
- ASSEMBLE_SIMD_BINOP(divps);
- break;
- }
- case kX64F32x4Min: {
- __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
- break;
- }
- case kX64F32x4Max: {
- __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
- break;
- }
- case kX64F32x4Eq: {
- ASSEMBLE_SIMD_BINOP(cmpeqps);
- break;
- }
- case kX64F32x4Ne: {
- ASSEMBLE_SIMD_BINOP(cmpneqps);
- break;
- }
- case kX64F32x4Lt: {
- ASSEMBLE_SIMD_BINOP(cmpltps);
- break;
- }
- case kX64F32x4Le: {
- ASSEMBLE_SIMD_BINOP(cmpleps);
- break;
- }
case kX64F32x4Qfma: {
__ F32x4Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), i.InputSimd128Register(2),
@@ -3146,90 +3434,608 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(maxpd);
break;
}
- case kX64I64x2Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (HasRegisterInput(instr, 0)) {
- __ Movq(dst, i.InputRegister(0));
- __ Movddup(dst, dst);
+ case kX64ISplat: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Splat
+ XMMRegister dst = i.OutputSimd128Register();
+ if (HasRegisterInput(instr, 0)) {
+ __ I8x16Splat(dst, i.InputRegister(0), kScratchDoubleReg);
+ } else {
+ __ I8x16Splat(dst, i.InputOperand(0), kScratchDoubleReg);
+ }
+ break;
+ }
+ case kL16: {
+ // I16x8Splat
+ XMMRegister dst = i.OutputSimd128Register();
+ if (HasRegisterInput(instr, 0)) {
+ __ I16x8Splat(dst, i.InputRegister(0));
+ } else {
+ __ I16x8Splat(dst, i.InputOperand(0));
+ }
+ break;
+ }
+ case kL32: {
+ // I32x4Splat
+ XMMRegister dst = i.OutputSimd128Register();
+ if (HasRegisterInput(instr, 0)) {
+ __ Movd(dst, i.InputRegister(0));
+ } else {
+ // TODO(v8:9198): Pshufd can load from aligned memory once
+ // supported.
+ __ Movd(dst, i.InputOperand(0));
+ }
+ __ Pshufd(dst, dst, uint8_t{0x0});
+ break;
+ }
+ case kL64: {
+ // I64X2Splat
+ XMMRegister dst = i.OutputSimd128Register();
+ if (HasRegisterInput(instr, 0)) {
+ __ Movq(dst, i.InputRegister(0));
+ __ Movddup(dst, dst);
+ } else {
+ __ Movddup(dst, i.InputOperand(0));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
} else {
- __ Movddup(dst, i.InputOperand(0));
+ UNREACHABLE();
}
break;
}
- case kX64I64x2ExtractLane: {
- __ Pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
+ case kX64IExtractLane: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL32: {
+ // I32x4ExtractLane
+ __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kL64: {
+ // I64X2ExtractLane
+ __ Pextrq(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Abs: {
- __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0),
- kScratchDoubleReg);
+ case kX64IAbs: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Abs
+ __ Pabsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kL16: {
+ // I16x8Abs
+ __ Pabsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kL32: {
+ // I32x4Abs
+ __ Pabsd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kL64: {
+ // I64x2Abs
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Neg: {
- __ I64x2Neg(i.OutputSimd128Register(), i.InputSimd128Register(0),
- kScratchDoubleReg);
+ case kX64INeg: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Neg
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignb(dst, kScratchDoubleReg);
+ } else {
+ __ Pxor(dst, dst);
+ __ Psubb(dst, src);
+ }
+ break;
+ }
+ case kL16: {
+ // I16x8Neg
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignw(dst, kScratchDoubleReg);
+ } else {
+ __ Pxor(dst, dst);
+ __ Psubw(dst, src);
+ }
+ break;
+ }
+ case kL32: {
+ // I32x4Neg
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignd(dst, kScratchDoubleReg);
+ } else {
+ __ Pxor(dst, dst);
+ __ Psubd(dst, src);
+ }
+ break;
+ }
+ case kL64: {
+ // I64x2Neg
+ __ I64x2Neg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2BitMask: {
- __ Movmskpd(i.OutputRegister(), i.InputSimd128Register(0));
+ case kX64IBitMask: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16BitMask
+ __ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
+ case kL16: {
+ // I16x8BitMask
+ Register dst = i.OutputRegister();
+ __ Packsswb(kScratchDoubleReg, i.InputSimd128Register(0));
+ __ Pmovmskb(dst, kScratchDoubleReg);
+ __ shrq(dst, Immediate(8));
+ break;
+ }
+ case kL32: {
+ // I632x4BitMask
+ __ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
+ case kL64: {
+ // I64x2BitMask
+ __ Movmskpd(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Shl: {
- // Take shift value modulo 2^6.
- ASSEMBLE_SIMD_SHIFT(psllq, 6);
+ case kX64IShl: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Shl
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ if (HasImmediateInput(instr, 1)) {
+ __ I8x16Shl(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
+ } else {
+ __ I8x16Shl(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ }
+ break;
+ }
+ case kL16: {
+ // I16x8Shl
+ // Take shift value modulo 2^4.
+ ASSEMBLE_SIMD_SHIFT(psllw, 4);
+ break;
+ }
+ case kL32: {
+ // I32x4Shl
+ // Take shift value modulo 2^5.
+ ASSEMBLE_SIMD_SHIFT(pslld, 5);
+ break;
+ }
+ case kL64: {
+ // I64x2Shl
+ // Take shift value modulo 2^6.
+ ASSEMBLE_SIMD_SHIFT(psllq, 6);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2ShrS: {
- // TODO(zhin): there is vpsraq but requires AVX512
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (HasImmediateInput(instr, 1)) {
- __ I64x2ShrS(dst, src, i.InputInt6(1), kScratchDoubleReg);
+ case kX64IShrS: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16ShrS
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ if (HasImmediateInput(instr, 1)) {
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
+ } else {
+ __ I8x16ShrS(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ }
+ break;
+ }
+ case kL16: {
+ // I16x8ShrS
+ // Take shift value modulo 2^4.
+ ASSEMBLE_SIMD_SHIFT(psraw, 4);
+ break;
+ }
+ case kL32: {
+ // I32x4ShrS
+ // Take shift value modulo 2^5.
+ ASSEMBLE_SIMD_SHIFT(psrad, 5);
+ break;
+ }
+ case kL64: {
+ // I64x2ShrS
+ // TODO(zhin): there is vpsraq but requires AVX512
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (HasImmediateInput(instr, 1)) {
+ __ I64x2ShrS(dst, src, i.InputInt6(1), kScratchDoubleReg);
+ } else {
+ __ I64x2ShrS(dst, src, i.InputRegister(1), kScratchDoubleReg,
+ i.TempSimd128Register(0), kScratchRegister);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
} else {
- __ I64x2ShrS(dst, src, i.InputRegister(1), kScratchDoubleReg,
- i.TempSimd128Register(0), kScratchRegister);
+ UNREACHABLE();
}
break;
}
- case kX64I64x2Add: {
- ASSEMBLE_SIMD_BINOP(paddq);
+ case kX64IAdd: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Add
+ ASSEMBLE_SIMD_BINOP(paddb);
+ break;
+ }
+ case kL16: {
+ // I16x8Add
+ ASSEMBLE_SIMD_BINOP(paddw);
+ break;
+ }
+ case kL32: {
+ // I32x4Add
+ ASSEMBLE_SIMD_BINOP(paddd);
+ break;
+ }
+ case kL64: {
+ // I64x2Add
+ ASSEMBLE_SIMD_BINOP(paddq);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Sub: {
- ASSEMBLE_SIMD_BINOP(psubq);
+ case kX64ISub: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Sub
+ ASSEMBLE_SIMD_BINOP(psubb);
+ break;
+ }
+ case kL16: {
+ // I16x8Sub
+ ASSEMBLE_SIMD_BINOP(psubw);
+ break;
+ }
+ case kL32: {
+ // I32x4Sub
+ ASSEMBLE_SIMD_BINOP(psubd);
+ break;
+ }
+ case kL64: {
+ // I64x2Sub
+ ASSEMBLE_SIMD_BINOP(psubq);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Mul: {
- __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), i.TempSimd128Register(0),
- kScratchDoubleReg);
+ case kX64IMul: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL16: {
+ // I16x8Mul
+ ASSEMBLE_SIMD_BINOP(pmullw);
+ break;
+ }
+ case kL32: {
+ // I32x4Mul
+ ASSEMBLE_SIMD_BINOP(pmulld);
+ break;
+ }
+ case kL64: {
+ // I64x2Mul
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Eq: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- ASSEMBLE_SIMD_BINOP(pcmpeqq);
+ case kX64IEq: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16Eq
+ ASSEMBLE_SIMD_BINOP(pcmpeqb);
+ break;
+ }
+ case kL16: {
+ // I16x8Eq
+ ASSEMBLE_SIMD_BINOP(pcmpeqw);
+ break;
+ }
+ case kL32: {
+ // I32x4Eq
+ ASSEMBLE_SIMD_BINOP(pcmpeqd);
+ break;
+ }
+ case kL64: {
+ // I64x2Eq
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ ASSEMBLE_SIMD_BINOP(pcmpeqq);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ case kX64INe: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pcmpeqb(dst, i.InputSimd128Register(1));
+ __ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kL16: {
+ // I16x8Ne
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pcmpeqw(dst, i.InputSimd128Register(1));
+ __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kL32: {
+ // I32x4Ne
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kL64: {
+ // I64x2Ne
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2GtS: {
- __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
+ case kX64IGtS: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16GtS
+ ASSEMBLE_SIMD_BINOP(pcmpgtb);
+ break;
+ }
+ case kL16: {
+ // I16x8GtS
+ ASSEMBLE_SIMD_BINOP(pcmpgtw);
+ break;
+ }
+ case kL32: {
+ // I32x4GtS
+ ASSEMBLE_SIMD_BINOP(pcmpgtd);
+ break;
+ }
+ case kL64: {
+ // I64x2GtS
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2GeS: {
- __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), kScratchDoubleReg);
+ case kX64IGeS: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16GeS
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pminsb(dst, src);
+ __ Pcmpeqb(dst, src);
+ break;
+ }
+ case kL16: {
+ // I16x8GeS
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pminsw(dst, src);
+ __ Pcmpeqw(dst, src);
+ break;
+ }
+ case kL32: {
+ // I32x4GeS
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pminsd(dst, src);
+ __ Pcmpeqd(dst, src);
+ break;
+ }
+ case kL64: {
+ // I64x2GeS
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I64x2ShrU: {
- // Take shift value modulo 2^6.
- ASSEMBLE_SIMD_SHIFT(psrlq, 6);
+ case kX64IShrU: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16ShrU
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ if (HasImmediateInput(instr, 1)) {
+ __ I8x16ShrU(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
+ } else {
+ __ I8x16ShrU(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ }
+ break;
+ }
+ case kL16: {
+ // I16x8ShrU
+ // Take shift value modulo 2^4.
+ ASSEMBLE_SIMD_SHIFT(psrlw, 4);
+ break;
+ }
+ case kL32: {
+ // I32x4ShrU
+ // Take shift value modulo 2^5.
+ ASSEMBLE_SIMD_SHIFT(psrld, 5);
+ break;
+ }
+ case kL64: {
+ // I64x2ShrU
+ // Take shift value modulo 2^6.
+ ASSEMBLE_SIMD_SHIFT(psrlq, 6);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
case kX64I64x2ExtMulLowI32x4S: {
@@ -3276,21 +4082,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
- case kX64I32x4Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
- } else {
- // TODO(v8:9198): Pshufd can load from aligned memory once supported.
- __ Movd(dst, i.InputOperand(0));
- }
- __ Pshufd(dst, dst, uint8_t{0x0});
- break;
- }
- case kX64I32x4ExtractLane: {
- __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
- break;
- }
case kX64I32x4SConvertF32x4: {
__ I32x4SConvertF32x4(i.OutputSimd128Register(),
i.InputSimd128Register(0), kScratchDoubleReg,
@@ -3306,67 +4097,60 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kX64I32x4Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psignd(dst, kScratchDoubleReg);
+ case kX64IMinS: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16MinS
+ ASSEMBLE_SIMD_BINOP(pminsb);
+ break;
+ }
+ case kL16: {
+ // I16x8MinS
+ ASSEMBLE_SIMD_BINOP(pminsw);
+ break;
+ }
+ case kL32: {
+ // I32x4MinS
+ ASSEMBLE_SIMD_BINOP(pminsd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
} else {
- __ Pxor(dst, dst);
- __ Psubd(dst, src);
+ UNREACHABLE();
}
break;
}
- case kX64I32x4Shl: {
- // Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(pslld, 5);
- break;
- }
- case kX64I32x4ShrS: {
- // Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(psrad, 5);
- break;
- }
- case kX64I32x4Add: {
- ASSEMBLE_SIMD_BINOP(paddd);
- break;
- }
- case kX64I32x4Sub: {
- ASSEMBLE_SIMD_BINOP(psubd);
- break;
- }
- case kX64I32x4Mul: {
- ASSEMBLE_SIMD_BINOP(pmulld);
- break;
- }
- case kX64I32x4MinS: {
- ASSEMBLE_SIMD_BINOP(pminsd);
- break;
- }
- case kX64I32x4MaxS: {
- ASSEMBLE_SIMD_BINOP(pmaxsd);
- break;
- }
- case kX64I32x4Eq: {
- ASSEMBLE_SIMD_BINOP(pcmpeqd);
- break;
- }
- case kX64I32x4Ne: {
- __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
- break;
- }
- case kX64I32x4GtS: {
- ASSEMBLE_SIMD_BINOP(pcmpgtd);
- break;
- }
- case kX64I32x4GeS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pminsd(dst, src);
- __ Pcmpeqd(dst, src);
+ case kX64IMaxS: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16MaxS
+ ASSEMBLE_SIMD_BINOP(pmaxsb);
+ break;
+ }
+ case kL16: {
+ // I16x8MaxS
+ ASSEMBLE_SIMD_BINOP(pmaxsw);
+ break;
+ }
+ case kL32: {
+ // I32x4MaxS
+ ASSEMBLE_SIMD_BINOP(pmaxsd);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
case kX64I32x4UConvertF32x4: {
@@ -3406,47 +4190,154 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
- case kX64I32x4ShrU: {
- // Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(psrld, 5);
- break;
- }
- case kX64I32x4MinU: {
- ASSEMBLE_SIMD_BINOP(pminud);
- break;
- }
- case kX64I32x4MaxU: {
- ASSEMBLE_SIMD_BINOP(pmaxud);
- break;
- }
- case kX64I32x4GtU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pmaxud(dst, src);
- __ Pcmpeqd(dst, src);
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
+ case kX64IMinU: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16MinU
+ ASSEMBLE_SIMD_BINOP(pminub);
+ break;
+ }
+ case kL16: {
+ // I16x8MinU
+ ASSEMBLE_SIMD_BINOP(pminuw);
+ break;
+ }
+ case kL32: {
+ // I32x4MinU
+ ASSEMBLE_SIMD_BINOP(pminud);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I32x4GeU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pminud(dst, src);
- __ Pcmpeqd(dst, src);
+ case kX64IMaxU: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16MaxU
+ ASSEMBLE_SIMD_BINOP(pmaxub);
+ break;
+ }
+ case kL16: {
+ // I16x8MaxU
+ ASSEMBLE_SIMD_BINOP(pmaxuw);
+ break;
+ }
+ case kL32: {
+ // I32x4MaxU
+ ASSEMBLE_SIMD_BINOP(pmaxud);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I32x4Abs: {
- __ Pabsd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ case kX64IGtU: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pmaxub(dst, src);
+ __ Pcmpeqb(dst, src);
+ __ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kL16: {
+ // I16x8GtU
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pmaxuw(dst, src);
+ __ Pcmpeqw(dst, src);
+ __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kL32: {
+ // I32x4GtU
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pmaxud(dst, src);
+ __ Pcmpeqd(dst, src);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I32x4BitMask: {
- __ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
+ case kX64IGeU: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16GeU
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pminub(dst, src);
+ __ Pcmpeqb(dst, src);
+ break;
+ }
+ case kL16: {
+ // I16x8GeU
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pminuw(dst, src);
+ __ Pcmpeqw(dst, src);
+ break;
+ }
+ case kL32: {
+ // I32x4GeU
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ Pminud(dst, src);
+ __ Pcmpeqd(dst, src);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
case kX64I32x4DotI16x8S: {
ASSEMBLE_SIMD_BINOP(pmaddwd);
break;
}
+ case kX64I32x4DotI8x16I7x16AddS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(2));
+ __ I32x4DotI8x16I7x16AddS(
+ i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg, i.TempSimd128Register(0));
+ break;
+ }
case kX64I32x4ExtAddPairwiseI16x8S: {
__ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
i.InputSimd128Register(0), kScratchRegister);
@@ -3478,7 +4369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
for (int j = 0; j < 4; j++) {
imm[j] = i.InputUint32(j);
}
- SetupSimdImmediateInRegister(tasm(), imm, dst);
+ SetupSimdImmediateInRegister(masm(), imm, dst);
break;
}
case kX64S128Zero: {
@@ -3491,21 +4382,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pcmpeqd(dst, dst);
break;
}
- case kX64I16x8Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (HasRegisterInput(instr, 0)) {
- __ I16x8Splat(dst, i.InputRegister(0));
+ // case kX64I16x8ExtractLaneS: {
+ case kX64IExtractLaneS: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16ExtractLaneS
+ Register dst = i.OutputRegister();
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputUint8(1));
+ __ movsxbl(dst, dst);
+ break;
+ }
+ case kL16: {
+ // I16x8ExtractLaneS
+ Register dst = i.OutputRegister();
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputUint8(1));
+ __ movsxwl(dst, dst);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
} else {
- __ I16x8Splat(dst, i.InputOperand(0));
+ UNREACHABLE();
}
break;
}
- case kX64I16x8ExtractLaneS: {
- Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputUint8(1));
- __ movsxwl(dst, dst);
- break;
- }
case kX64I16x8SConvertI8x16Low: {
__ Pmovsxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3515,82 +4420,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kX64I16x8Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psignw(dst, kScratchDoubleReg);
- } else {
- __ Pxor(dst, dst);
- __ Psubw(dst, src);
- }
- break;
- }
- case kX64I16x8Shl: {
- // Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(psllw, 4);
- break;
- }
- case kX64I16x8ShrS: {
- // Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(psraw, 4);
- break;
- }
case kX64I16x8SConvertI32x4: {
ASSEMBLE_SIMD_BINOP(packssdw);
break;
}
- case kX64I16x8Add: {
- ASSEMBLE_SIMD_BINOP(paddw);
- break;
- }
case kX64I16x8AddSatS: {
ASSEMBLE_SIMD_BINOP(paddsw);
break;
}
- case kX64I16x8Sub: {
- ASSEMBLE_SIMD_BINOP(psubw);
- break;
- }
case kX64I16x8SubSatS: {
ASSEMBLE_SIMD_BINOP(psubsw);
break;
}
- case kX64I16x8Mul: {
- ASSEMBLE_SIMD_BINOP(pmullw);
- break;
- }
- case kX64I16x8MinS: {
- ASSEMBLE_SIMD_BINOP(pminsw);
- break;
- }
- case kX64I16x8MaxS: {
- ASSEMBLE_SIMD_BINOP(pmaxsw);
- break;
- }
- case kX64I16x8Eq: {
- ASSEMBLE_SIMD_BINOP(pcmpeqw);
- break;
- }
- case kX64I16x8Ne: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Pcmpeqw(dst, i.InputSimd128Register(1));
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- break;
- }
- case kX64I16x8GtS: {
- ASSEMBLE_SIMD_BINOP(pcmpgtw);
- break;
- }
- case kX64I16x8GeS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pminsw(dst, src);
- __ Pcmpeqw(dst, src);
- break;
- }
case kX64I16x8UConvertI8x16Low: {
__ Pmovzxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3600,11 +4441,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
- case kX64I16x8ShrU: {
- // Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(psrlw, 4);
- break;
- }
case kX64I16x8UConvertI32x4: {
ASSEMBLE_SIMD_BINOP(packusdw);
break;
@@ -3617,45 +4453,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(psubusw);
break;
}
- case kX64I16x8MinU: {
- ASSEMBLE_SIMD_BINOP(pminuw);
- break;
- }
- case kX64I16x8MaxU: {
- ASSEMBLE_SIMD_BINOP(pmaxuw);
- break;
- }
- case kX64I16x8GtU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pmaxuw(dst, src);
- __ Pcmpeqw(dst, src);
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- break;
- }
- case kX64I16x8GeU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pminuw(dst, src);
- __ Pcmpeqw(dst, src);
- break;
- }
case kX64I16x8RoundingAverageU: {
ASSEMBLE_SIMD_BINOP(pavgw);
break;
}
- case kX64I16x8Abs: {
- __ Pabsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
- case kX64I16x8BitMask: {
- Register dst = i.OutputRegister();
- __ Packsswb(kScratchDoubleReg, i.InputSimd128Register(0));
- __ Pmovmskb(dst, kScratchDoubleReg);
- __ shrq(dst, Immediate(8));
- break;
- }
case kX64I16x8ExtMulLowI8x16S: {
__ I16x8ExtMulLow(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
@@ -3705,15 +4506,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kX64I8x16Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (HasRegisterInput(instr, 0)) {
- __ I8x16Splat(dst, i.InputRegister(0), kScratchDoubleReg);
- } else {
- __ I8x16Splat(dst, i.InputOperand(0), kScratchDoubleReg);
- }
- break;
- }
case kX64Pextrb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
size_t index = 0;
@@ -3740,12 +4532,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64I8x16ExtractLaneS: {
- Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputUint8(1));
- __ movsxbl(dst, dst);
- break;
- }
case kX64Pinsrb: {
ASSEMBLE_PINSR(Pinsrb);
break;
@@ -3766,106 +4552,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(packsswb);
break;
}
- case kX64I8x16Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psignb(dst, kScratchDoubleReg);
- } else {
- __ Pxor(dst, dst);
- __ Psubb(dst, src);
- }
- break;
- }
- case kX64I8x16Shl: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
- if (HasImmediateInput(instr, 1)) {
- __ I8x16Shl(dst, src, i.InputInt3(1), kScratchRegister,
- kScratchDoubleReg);
- } else {
- __ I8x16Shl(dst, src, i.InputRegister(1), kScratchRegister,
- kScratchDoubleReg, i.TempSimd128Register(0));
- }
- break;
- }
- case kX64I8x16ShrS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
- if (HasImmediateInput(instr, 1)) {
- __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
- } else {
- __ I8x16ShrS(dst, src, i.InputRegister(1), kScratchRegister,
- kScratchDoubleReg, i.TempSimd128Register(0));
- }
- break;
- }
- case kX64I8x16Add: {
- ASSEMBLE_SIMD_BINOP(paddb);
- break;
- }
case kX64I8x16AddSatS: {
ASSEMBLE_SIMD_BINOP(paddsb);
break;
}
- case kX64I8x16Sub: {
- ASSEMBLE_SIMD_BINOP(psubb);
- break;
- }
case kX64I8x16SubSatS: {
ASSEMBLE_SIMD_BINOP(psubsb);
break;
}
- case kX64I8x16MinS: {
- ASSEMBLE_SIMD_BINOP(pminsb);
- break;
- }
- case kX64I8x16MaxS: {
- ASSEMBLE_SIMD_BINOP(pmaxsb);
- break;
- }
- case kX64I8x16Eq: {
- ASSEMBLE_SIMD_BINOP(pcmpeqb);
- break;
- }
- case kX64I8x16Ne: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Pcmpeqb(dst, i.InputSimd128Register(1));
- __ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- break;
- }
- case kX64I8x16GtS: {
- ASSEMBLE_SIMD_BINOP(pcmpgtb);
- break;
- }
- case kX64I8x16GeS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pminsb(dst, src);
- __ Pcmpeqb(dst, src);
- break;
- }
case kX64I8x16UConvertI16x8: {
ASSEMBLE_SIMD_BINOP(packuswb);
break;
}
- case kX64I8x16ShrU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
- if (HasImmediateInput(instr, 1)) {
- __ I8x16ShrU(dst, src, i.InputInt3(1), kScratchRegister,
- kScratchDoubleReg);
- } else {
- __ I8x16ShrU(dst, src, i.InputRegister(1), kScratchRegister,
- kScratchDoubleReg, i.TempSimd128Register(0));
- }
- break;
- }
case kX64I8x16AddSatU: {
ASSEMBLE_SIMD_BINOP(paddusb);
break;
@@ -3874,42 +4572,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_BINOP(psubusb);
break;
}
- case kX64I8x16MinU: {
- ASSEMBLE_SIMD_BINOP(pminub);
- break;
- }
- case kX64I8x16MaxU: {
- ASSEMBLE_SIMD_BINOP(pmaxub);
- break;
- }
- case kX64I8x16GtU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pmaxub(dst, src);
- __ Pcmpeqb(dst, src);
- __ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- break;
- }
- case kX64I8x16GeU: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- __ Pminub(dst, src);
- __ Pcmpeqb(dst, src);
- break;
- }
case kX64I8x16RoundingAverageU: {
ASSEMBLE_SIMD_BINOP(pavgb);
break;
}
- case kX64I8x16Abs: {
- __ Pabsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
- case kX64I8x16BitMask: {
- __ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
- break;
- }
case kX64I32x4ExtMulLowI16x8S: {
__ I32x4ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
@@ -3986,7 +4652,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask[j - 1] = i.InputUint32(j);
}
- SetupSimdImmediateInRegister(tasm(), mask, tmp_simd);
+ SetupSimdImmediateInRegister(masm(), mask, tmp_simd);
__ Pshufb(dst, tmp_simd);
} else { // two input operands
DCHECK_NE(tmp_simd, i.InputSimd128Register(1));
@@ -4000,7 +4666,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask1[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k;
}
}
- SetupSimdImmediateInRegister(tasm(), mask1, tmp_simd);
+ SetupSimdImmediateInRegister(masm(), mask1, tmp_simd);
__ Pshufb(kScratchDoubleReg, tmp_simd);
uint32_t mask2[4] = {};
if (instr->InputAt(1)->IsSimd128Register()) {
@@ -4016,7 +4682,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask2[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k;
}
}
- SetupSimdImmediateInRegister(tasm(), mask2, tmp_simd);
+ SetupSimdImmediateInRegister(masm(), mask2, tmp_simd);
__ Pshufb(dst, tmp_simd);
__ Por(dst, kScratchDoubleReg);
}
@@ -4332,20 +4998,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kX64I64x2AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
- break;
- }
- case kX64I32x4AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
+ case kX64IAllTrue: {
+ LaneSize lane_size = LaneSizeField::decode(opcode);
+ VectorLength vec_len = VectorLengthField::decode(opcode);
+ if (vec_len == kV128) {
+ switch (lane_size) {
+ case kL8: {
+ // I8x16AllTrue
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
+ break;
+ }
+ case kL16: {
+ // I16x8AllTrue
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqw);
+ break;
+ }
+ case kL32: {
+ // I32x4AllTrue
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
+ break;
+ }
+ case kL64: {
+ // I64x2AllTrue
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kX64I16x8AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqw);
+ case kX64Blendvpd: {
+ __ Blendvpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
break;
}
- case kX64I8x16AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
+ case kX64Blendvps: {
+ __ Blendvps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
break;
}
case kX64Pblendvb: {
@@ -4559,6 +5252,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadUint16:
case kAtomicLoadWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
+
+ case kX64S256Load32Splat: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vbroadcastss(i.OutputSimd256Register(), i.MemoryOperand());
+ break;
+ }
+ case kX64S256Load64Splat: {
+ UNIMPLEMENTED();
+ }
+ case kX64Movdqu256: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->HasOutput()) {
+ __ vmovdqu(i.OutputSimd256Register(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ vmovdqu(operand, i.InputSimd256Register(index));
+ }
+ break;
+ }
}
return kSuccess;
} // NOLadability/fn_size)
@@ -4729,6 +5444,34 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
+void CodeGenerator::AssembleArchBinarySearchSwitchRange(
+ Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
+ std::pair<int32_t, Label*>* end, base::Optional<int32_t>& last_cmp_value) {
+ if (end - begin < kBinarySearchSwitchMinimalCases) {
+ if (last_cmp_value && *last_cmp_value == begin->first) {
+ // No need to do another repeat cmp.
+ masm()->j(equal, begin->second);
+ ++begin;
+ }
+
+ while (begin != end) {
+ masm()->JumpIfEqual(input, begin->first, begin->second);
+ ++begin;
+ }
+ AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
+ return;
+ }
+ auto middle = begin + (end - begin) / 2;
+ Label less_label;
+ masm()->JumpIfLessThan(input, middle->first, &less_label);
+ last_cmp_value = middle->first;
+ AssembleArchBinarySearchSwitchRange(input, def_block, middle, end,
+ last_cmp_value);
+ masm()->bind(&less_label);
+ AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle,
+ last_cmp_value);
+}
+
void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -4736,8 +5479,10 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
for (size_t index = 2; index < instr->InputCount(); index += 2) {
cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
}
+ base::Optional<int32_t> last_cmp_value;
AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
- cases.data() + cases.size());
+ cases.data() + cases.size(),
+ last_cmp_value);
}
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
@@ -4841,10 +5586,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsWasmFunctionCall() ||
call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // We do not use this stack value in import wrappers and capi functions.
- // We push it anyway to satisfy legacy assumptions about these frames'
- // size and order.
- // TODO(manoskouk): Consider fixing this.
+ // For import wrappers and C-API functions, this stack slot is only used
+ // for printing stack traces in V8. Also, it holds a WasmApiFunctionRef
+ // instead of the instance itself, which is taken care of in the frames
+ // accessors.
__ pushq(kWasmInstanceRegister);
}
if (call_descriptor->IsWasmCapiFunction()) {
@@ -5031,8 +5776,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
- __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
@@ -5056,7 +5801,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
}
-void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
+void CodeGenerator::FinishCode() { masm()->PatchConstPool(); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
@@ -5085,10 +5830,66 @@ void CodeGenerator::IncrementStackAccessCounter(
}
}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ X64OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ pushq(g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot() || source->IsFloatStackSlot() ||
+ source->IsDoubleStackSlot()) {
+ __ pushq(g.ToOperand(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for xmm registers / 128-bit memory operands. Bump
+ // the stack pointer and assemble the move.
+ __ subq(rsp, Immediate(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ X64OperandConverter g(this, nullptr);
+ int new_slots = ElementSizeInPointers(rep);
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ if (dest->IsRegister()) {
+ __ popq(g.ToRegister(dest));
+ } else if (dest->IsStackSlot() || dest->IsFloatStackSlot() ||
+ dest->IsDoubleStackSlot()) {
+ __ popq(g.ToOperand(dest));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ addq(rsp, Immediate(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ addq(rsp, Immediate(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
if ((IsFloatingPoint(rep) &&
!move_cycle_.pending_double_scratch_register_use) ||
(!IsFloatingPoint(rep) && !move_cycle_.pending_scratch_register_use)) {
@@ -5099,25 +5900,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
AssembleMove(source, &scratch);
} else {
// The scratch register is blocked by pending moves. Use the stack instead.
- int new_slots = ElementSizeInPointers(rep);
- X64OperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ pushq(g.ToRegister(source));
- } else if (source->IsStackSlot() || source->IsFloatStackSlot() ||
- source->IsDoubleStackSlot()) {
- __ pushq(g.ToOperand(source));
- } else {
- // No push instruction for xmm registers / 128-bit memory operands. Bump
- // the stack pointer and assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ subq(rsp, Immediate(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -5131,23 +5914,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
AssembleMove(&scratch, dest);
} else {
- X64OperandConverter g(this, nullptr);
- int new_slots = ElementSizeInPointers(rep);
- frame_access_state()->IncreaseSPDelta(-new_slots);
- if (dest->IsRegister()) {
- __ popq(g.ToRegister(dest));
- } else if (dest->IsStackSlot() || dest->IsFloatStackSlot() ||
- dest->IsDoubleStackSlot()) {
- __ popq(g.ToOperand(dest));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ addq(rsp, Immediate(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
move_cycle_ = MoveCycleState();
}
@@ -5172,6 +5939,30 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
}
}
+namespace {
+
+bool Is32BitOperand(InstructionOperand* operand) {
+ DCHECK(operand->IsStackSlot() || operand->IsRegister());
+ MachineRepresentation mr = LocationOperand::cast(operand)->representation();
+ return mr == MachineRepresentation::kWord32 ||
+ mr == MachineRepresentation::kCompressed ||
+ mr == MachineRepresentation::kCompressedPointer;
+}
+
+// When we need only 32 bits, move only 32 bits. Benefits:
+// - Save a byte here and there (depending on the destination
+// register; "movl eax, ..." is smaller than "movq rax, ...").
+// - Safeguard against accidental decompression of compressed slots.
+// We must check both {source} and {destination} to be 32-bit values,
+// because treating 32-bit sources as 64-bit values can be perfectly
+// fine as a result of virtual register renaming (to avoid redundant
+// explicit zero-extensions that also happen implicitly).
+bool Use32BitMove(InstructionOperand* source, InstructionOperand* destination) {
+ return Is32BitOperand(source) && Is32BitOperand(destination);
+}
+
+} // namespace
+
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
X64OperandConverter g(this, nullptr);
@@ -5223,7 +6014,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ __ LoadTaggedRoot(dst, index);
} else {
__ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
}
@@ -5259,20 +6050,24 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
- MachineRepresentation src_rep =
- LocationOperand::cast(source)->representation();
- MachineRepresentation dest_rep =
- LocationOperand::cast(destination)->representation();
- if (dest_rep == MachineRepresentation::kWord32 &&
- src_rep == MachineRepresentation::kWord32) {
- DCHECK(destination->IsRegister());
+ DCHECK(destination->IsRegister());
+ if (Use32BitMove(source, destination)) {
__ movl(g.ToRegister(destination), g.ToRegister(source));
} else {
__ movq(g.ToRegister(destination), g.ToRegister(source));
}
} else {
DCHECK(source->IsFPRegister());
- __ Movapd(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd256) {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmovapd(g.ToSimd256Register(destination),
+ g.ToSimd256Register(source));
+ } else {
+ __ Movapd(g.ToDoubleRegister(destination),
+ g.ToDoubleRegister(source));
+ }
}
return;
case MoveType::kRegisterToStack: {
@@ -5284,10 +6079,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
XMMRegister src = g.ToDoubleRegister(source);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(dst, src);
- } else {
+ if (rep == MachineRepresentation::kSimd128) {
__ Movups(dst, src);
+ } else if (rep == MachineRepresentation::kSimd256) {
+ YMMRegister src = g.ToSimd256Register(source);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmovups(dst, src);
+ } else {
+ __ Movsd(dst, src);
}
}
return;
@@ -5295,18 +6094,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case MoveType::kStackToRegister: {
Operand src = g.ToOperand(source);
if (source->IsStackSlot()) {
- MachineRepresentation mr =
- LocationOperand::cast(source)->representation();
- const bool is_32_bit = mr == MachineRepresentation::kWord32 ||
- mr == MachineRepresentation::kCompressed ||
- mr == MachineRepresentation::kCompressedPointer;
- // TODO(13581): Fix this for other code kinds (see
- // https://crbug.com/1356461).
- if (code_kind() == CodeKind::WASM_FUNCTION && is_32_bit) {
- // When we need only 32 bits, move only 32 bits. Benefits:
- // - Save a byte here and there (depending on the destination
- // register; "movl eax, ..." is smaller than "movq rax, ...").
- // - Safeguard against accidental decompression of compressed slots.
+ if (Use32BitMove(source, destination)) {
__ movl(g.ToRegister(destination), src);
} else {
__ movq(g.ToRegister(destination), src);
@@ -5316,10 +6104,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
XMMRegister dst = g.ToDoubleRegister(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(dst, src);
- } else {
+ if (rep == MachineRepresentation::kSimd128) {
__ Movups(dst, src);
+ } else if (rep == MachineRepresentation::kSimd256) {
+ YMMRegister dst = g.ToSimd256Register(destination);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmovups(dst, src);
+ } else {
+ __ Movsd(dst, src);
}
}
return;
@@ -5330,18 +6122,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (source->IsStackSlot()) {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
- __ movq(kScratchRegister, src);
+ if (Use32BitMove(source, destination)) {
+ __ movl(kScratchRegister, src);
+ } else {
+ __ movq(kScratchRegister, src);
+ }
+ // Always write the full 64-bit to avoid leaving stale bits in the upper
+ // 32-bit on the stack.
__ movq(dst, kScratchRegister);
} else {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(dst, kScratchDoubleReg);
- } else {
- DCHECK(source->IsSimd128StackSlot());
+ if (rep == MachineRepresentation::kSimd128) {
__ Movups(kScratchDoubleReg, src);
__ Movups(dst, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kSimd256) {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmovups(kScratchSimd256Reg, src);
+ __ vmovups(dst, kScratchSimd256Reg);
+ } else {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(dst, kScratchDoubleReg);
}
}
return;
@@ -5398,16 +6199,34 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
if (source->IsRegister()) {
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
- __ movq(kScratchRegister, src);
- __ movq(src, dst);
- __ movq(dst, kScratchRegister);
+ if (Use32BitMove(source, destination)) {
+ __ movl(kScratchRegister, src);
+ __ movl(src, dst);
+ __ movl(dst, kScratchRegister);
+ } else {
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
+ }
} else {
DCHECK(source->IsFPRegister());
- XMMRegister src = g.ToDoubleRegister(source);
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(kScratchDoubleReg, src);
- __ Movapd(src, dst);
- __ Movapd(dst, kScratchDoubleReg);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd256) {
+ YMMRegister src = g.ToSimd256Register(source);
+ YMMRegister dst = g.ToSimd256Register(destination);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmovapd(kScratchSimd256Reg, src);
+ __ vmovapd(src, dst);
+ __ vmovapd(dst, kScratchSimd256Reg);
+
+ } else {
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ Movapd(kScratchDoubleReg, src);
+ __ Movapd(src, dst);
+ __ Movapd(dst, kScratchDoubleReg);
+ }
}
return;
}
@@ -5420,18 +6239,25 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ movq(dst, kScratchRegister);
} else {
DCHECK(source->IsFPRegister());
- XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(src, dst);
- __ Movsd(dst, kScratchDoubleReg);
- } else {
+ if (rep == MachineRepresentation::kSimd128) {
+ XMMRegister src = g.ToDoubleRegister(source);
__ Movups(kScratchDoubleReg, src);
__ Movups(src, dst);
__ Movups(dst, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kSimd256) {
+ YMMRegister src = g.ToSimd256Register(source);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmovups(kScratchSimd256Reg, src);
+ __ vmovups(src, dst);
+ __ vmovups(dst, kScratchSimd256Reg);
+ } else {
+ XMMRegister src = g.ToDoubleRegister(source);
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, kScratchDoubleReg);
}
}
return;
@@ -5441,19 +6267,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- Register tmp = kScratchRegister;
- __ movq(tmp, dst);
- __ pushq(src); // Then use stack to copy src to destination.
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kSystemPointerSize);
- __ movq(src, tmp);
- } else {
+ if (rep == MachineRepresentation::kSimd128) {
// Without AVX, misaligned reads and writes will trap. Move using the
// stack, in two parts.
+ // The XOR trick can be used if AVX is supported, but it needs more
+ // instructions, and may introduce performance penalty if the memory
+ // reference splits a cache line.
__ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
__ pushq(src); // Then use stack to copy src to destination.
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
@@ -5468,6 +6287,30 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kSystemPointerSize);
__ movups(src, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kSimd256) {
+ // Use the XOR trick to swap without a temporary. The xorps may read
+ // from unaligned address, causing a slowdown, but swaps
+ // between slots should be rare.
+ __ vmovups(kScratchSimd256Reg, src);
+ __ vxorps(kScratchSimd256Reg, kScratchSimd256Reg,
+ dst); // scratch contains src ^ dst.
+ __ vmovups(src, kScratchSimd256Reg);
+ __ vxorps(kScratchSimd256Reg, kScratchSimd256Reg,
+ dst); // scratch contains src.
+ __ vmovups(dst, kScratchSimd256Reg);
+ __ vxorps(kScratchSimd256Reg, kScratchSimd256Reg,
+ src); // scratch contains dst.
+ __ vmovups(src, kScratchSimd256Reg);
+ } else {
+ Register tmp = kScratchRegister;
+ __ movq(tmp, dst);
+ __ pushq(src); // Then use stack to copy src to destination.
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSystemPointerSize);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kSystemPointerSize);
+ __ movq(src, tmp);
}
return;
}
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 173a16316c..d1b7eb639e 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -56,7 +56,13 @@ namespace compiler {
V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicExchangeUint64) \
- V(X64Word64AtomicCompareExchangeUint64)
+ V(X64Word64AtomicCompareExchangeUint64) \
+ V(X64Movdqu256) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64S256Load32Splat) \
+ V(X64S256Load64Splat)
#define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
@@ -169,10 +175,6 @@ namespace compiler {
V(X64Float64Neg) \
V(X64Float32Abs) \
V(X64Float32Neg) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
V(X64MovqEncodeSandboxedPointer) \
V(X64MovqDecodeSandboxedPointer) \
V(X64BitcastFI) \
@@ -190,22 +192,22 @@ namespace compiler {
V(X64Cvttpd2dq) \
V(X64I32x4TruncF64x2UZero) \
V(X64I32x4TruncF32x4U) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
+ V(X64FSplat) \
+ V(X64FExtractLane) \
+ V(X64FReplaceLane) \
+ V(X64FAbs) \
+ V(X64FNeg) \
+ V(X64FSqrt) \
+ V(X64FAdd) \
+ V(X64FSub) \
+ V(X64FMul) \
+ V(X64FDiv) \
+ V(X64FMin) \
+ V(X64FMax) \
+ V(X64FEq) \
+ V(X64FNe) \
+ V(X64FLt) \
+ V(X64FLe) \
V(X64F64x2Qfma) \
V(X64F64x2Qfms) \
V(X64Minpd) \
@@ -213,45 +215,29 @@ namespace compiler {
V(X64F64x2Round) \
V(X64F64x2ConvertLowI32x4S) \
V(X64F64x2ConvertLowI32x4U) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
V(X64F32x4SConvertI32x4) \
V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
V(X64F32x4Qfma) \
V(X64F32x4Qfms) \
V(X64Minps) \
V(X64Maxps) \
V(X64F32x4Round) \
V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
+ V(X64ISplat) \
+ V(X64IExtractLane) \
+ V(X64IAbs) \
+ V(X64INeg) \
+ V(X64IBitMask) \
+ V(X64IShl) \
+ V(X64IShrS) \
+ V(X64IAdd) \
+ V(X64ISub) \
+ V(X64IMul) \
+ V(X64IEq) \
+ V(X64IGtS) \
+ V(X64IGeS) \
+ V(X64INe) \
+ V(X64IShrU) \
V(X64I64x2ExtMulLowI32x4S) \
V(X64I64x2ExtMulHighI32x4S) \
V(X64I64x2ExtMulLowI32x4U) \
@@ -260,34 +246,20 @@ namespace compiler {
V(X64I64x2SConvertI32x4High) \
V(X64I64x2UConvertI32x4Low) \
V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
V(X64I32x4SConvertF32x4) \
V(X64I32x4SConvertI16x8Low) \
V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
+ V(X64IMinS) \
+ V(X64IMaxS) \
V(X64I32x4UConvertF32x4) \
V(X64I32x4UConvertI16x8Low) \
V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
+ V(X64IMinU) \
+ V(X64IMaxU) \
+ V(X64IGtU) \
+ V(X64IGeU) \
V(X64I32x4DotI16x8S) \
+ V(X64I32x4DotI8x16I7x16AddS) \
V(X64I32x4ExtMulLowI16x8S) \
V(X64I32x4ExtMulHighI16x8S) \
V(X64I32x4ExtMulLowI16x8U) \
@@ -297,38 +269,18 @@ namespace compiler {
V(X64I32x4TruncSatF64x2SZero) \
V(X64I32x4TruncSatF64x2UZero) \
V(X64I32X4ShiftZeroExtendI8x16) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
+ V(X64IExtractLaneS) \
V(X64I16x8SConvertI8x16Low) \
V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
V(X64I16x8UConvertI8x16Low) \
V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
V(X64I16x8UConvertI32x4) \
V(X64I16x8AddSatU) \
V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
V(X64I16x8ExtMulLowI8x16S) \
V(X64I16x8ExtMulHighI8x16S) \
V(X64I16x8ExtMulLowI8x16U) \
@@ -338,33 +290,13 @@ namespace compiler {
V(X64I16x8Q15MulRSatS) \
V(X64I16x8RelaxedQ15MulRS) \
V(X64I16x8DotI8x16I7x16S) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
V(X64I8x16UConvertI16x8) \
V(X64I8x16AddSatU) \
V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
V(X64S128Const) \
V(X64S128Zero) \
V(X64S128AllOnes) \
@@ -405,13 +337,11 @@ namespace compiler {
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
+ V(X64IAllTrue) \
+ V(X64Blendvpd) \
+ V(X64Blendvps) \
V(X64Pblendvb) \
V(X64TraceInstruction)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 8bbb7e2519..38a9a5f872 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -134,22 +134,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Cvttpd2dq:
case kX64I32x4TruncF64x2UZero:
case kX64I32x4TruncF32x4U:
- case kX64F64x2Splat:
- case kX64F64x2ExtractLane:
- case kX64F64x2ReplaceLane:
- case kX64F64x2Abs:
- case kX64F64x2Neg:
- case kX64F64x2Sqrt:
- case kX64F64x2Add:
- case kX64F64x2Sub:
- case kX64F64x2Mul:
- case kX64F64x2Div:
- case kX64F64x2Min:
- case kX64F64x2Max:
- case kX64F64x2Eq:
- case kX64F64x2Ne:
- case kX64F64x2Lt:
- case kX64F64x2Le:
+ case kX64FSplat:
+ case kX64FExtractLane:
+ case kX64FReplaceLane:
+ case kX64FAbs:
+ case kX64FNeg:
+ case kX64FSqrt:
+ case kX64FAdd:
+ case kX64FSub:
+ case kX64FMul:
+ case kX64FDiv:
+ case kX64FMin:
+ case kX64FMax:
+ case kX64FEq:
+ case kX64FNe:
+ case kX64FLt:
+ case kX64FLe:
case kX64F64x2Qfma:
case kX64F64x2Qfms:
case kX64Minpd:
@@ -158,45 +158,29 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2ConvertLowI32x4S:
case kX64F64x2ConvertLowI32x4U:
case kX64F64x2PromoteLowF32x4:
- case kX64F32x4Splat:
- case kX64F32x4ExtractLane:
- case kX64F32x4ReplaceLane:
case kX64F32x4SConvertI32x4:
case kX64F32x4UConvertI32x4:
- case kX64F32x4Abs:
- case kX64F32x4Neg:
- case kX64F32x4Sqrt:
- case kX64F32x4Add:
- case kX64F32x4Sub:
- case kX64F32x4Mul:
- case kX64F32x4Div:
- case kX64F32x4Min:
- case kX64F32x4Max:
- case kX64F32x4Eq:
- case kX64F32x4Ne:
- case kX64F32x4Lt:
- case kX64F32x4Le:
case kX64F32x4Qfma:
case kX64F32x4Qfms:
case kX64Minps:
case kX64Maxps:
case kX64F32x4Round:
case kX64F32x4DemoteF64x2Zero:
- case kX64I64x2Splat:
- case kX64I64x2ExtractLane:
- case kX64I64x2Abs:
- case kX64I64x2Neg:
- case kX64I64x2BitMask:
- case kX64I64x2Shl:
- case kX64I64x2ShrS:
- case kX64I64x2Add:
- case kX64I64x2Sub:
- case kX64I64x2Mul:
- case kX64I64x2Eq:
- case kX64I64x2GtS:
- case kX64I64x2GeS:
- case kX64I64x2Ne:
- case kX64I64x2ShrU:
+ case kX64ISplat:
+ case kX64IExtractLane:
+ case kX64IAbs:
+ case kX64INeg:
+ case kX64IBitMask:
+ case kX64IShl:
+ case kX64IShrS:
+ case kX64IAdd:
+ case kX64ISub:
+ case kX64IMul:
+ case kX64IEq:
+ case kX64IGtS:
+ case kX64IGeS:
+ case kX64INe:
+ case kX64IShrU:
case kX64I64x2ExtMulLowI32x4S:
case kX64I64x2ExtMulHighI32x4S:
case kX64I64x2ExtMulLowI32x4U:
@@ -205,34 +189,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I64x2SConvertI32x4High:
case kX64I64x2UConvertI32x4Low:
case kX64I64x2UConvertI32x4High:
- case kX64I32x4Splat:
- case kX64I32x4ExtractLane:
case kX64I32x4SConvertF32x4:
case kX64I32x4SConvertI16x8Low:
case kX64I32x4SConvertI16x8High:
- case kX64I32x4Neg:
- case kX64I32x4Shl:
- case kX64I32x4ShrS:
- case kX64I32x4Add:
- case kX64I32x4Sub:
- case kX64I32x4Mul:
- case kX64I32x4MinS:
- case kX64I32x4MaxS:
- case kX64I32x4Eq:
- case kX64I32x4Ne:
- case kX64I32x4GtS:
- case kX64I32x4GeS:
+ case kX64IMinS:
+ case kX64IMaxS:
case kX64I32x4UConvertF32x4:
case kX64I32x4UConvertI16x8Low:
case kX64I32x4UConvertI16x8High:
- case kX64I32x4ShrU:
- case kX64I32x4MinU:
- case kX64I32x4MaxU:
- case kX64I32x4GtU:
- case kX64I32x4GeU:
- case kX64I32x4Abs:
- case kX64I32x4BitMask:
+ case kX64IMinU:
+ case kX64IMaxU:
+ case kX64IGtU:
+ case kX64IGeU:
case kX64I32x4DotI16x8S:
+ case kX64I32x4DotI8x16I7x16AddS:
case kX64I32x4ExtMulLowI16x8S:
case kX64I32x4ExtMulHighI16x8S:
case kX64I32x4ExtMulLowI16x8U:
@@ -242,38 +212,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4TruncSatF64x2SZero:
case kX64I32x4TruncSatF64x2UZero:
case kX64I32X4ShiftZeroExtendI8x16:
- case kX64I16x8Splat:
- case kX64I16x8ExtractLaneS:
+ case kX64IExtractLaneS:
case kX64I16x8SConvertI8x16Low:
case kX64I16x8SConvertI8x16High:
- case kX64I16x8Neg:
- case kX64I16x8Shl:
- case kX64I16x8ShrS:
case kX64I16x8SConvertI32x4:
- case kX64I16x8Add:
case kX64I16x8AddSatS:
- case kX64I16x8Sub:
case kX64I16x8SubSatS:
- case kX64I16x8Mul:
- case kX64I16x8MinS:
- case kX64I16x8MaxS:
- case kX64I16x8Eq:
- case kX64I16x8Ne:
- case kX64I16x8GtS:
- case kX64I16x8GeS:
case kX64I16x8UConvertI8x16Low:
case kX64I16x8UConvertI8x16High:
case kX64I16x8UConvertI32x4:
- case kX64I16x8ShrU:
case kX64I16x8AddSatU:
case kX64I16x8SubSatU:
- case kX64I16x8MinU:
- case kX64I16x8MaxU:
- case kX64I16x8GtU:
- case kX64I16x8GeU:
case kX64I16x8RoundingAverageU:
- case kX64I16x8Abs:
- case kX64I16x8BitMask:
case kX64I16x8ExtMulLowI8x16S:
case kX64I16x8ExtMulHighI8x16S:
case kX64I16x8ExtMulLowI8x16U:
@@ -283,33 +233,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8Q15MulRSatS:
case kX64I16x8RelaxedQ15MulRS:
case kX64I16x8DotI8x16I7x16S:
- case kX64I8x16Splat:
- case kX64I8x16ExtractLaneS:
case kX64I8x16SConvertI16x8:
- case kX64I8x16Neg:
- case kX64I8x16Shl:
- case kX64I8x16ShrS:
- case kX64I8x16Add:
case kX64I8x16AddSatS:
- case kX64I8x16Sub:
case kX64I8x16SubSatS:
- case kX64I8x16MinS:
- case kX64I8x16MaxS:
- case kX64I8x16Eq:
- case kX64I8x16Ne:
- case kX64I8x16GtS:
- case kX64I8x16GeS:
case kX64I8x16UConvertI16x8:
case kX64I8x16AddSatU:
case kX64I8x16SubSatU:
- case kX64I8x16ShrU:
- case kX64I8x16MinU:
- case kX64I8x16MaxU:
- case kX64I8x16GtU:
- case kX64I8x16GeU:
case kX64I8x16RoundingAverageU:
- case kX64I8x16Abs:
- case kX64I8x16BitMask:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
@@ -319,9 +249,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Zero:
case kX64S128AllOnes:
case kX64S128AndNot:
- case kX64I64x2AllTrue:
- case kX64I32x4AllTrue:
- case kX64I16x8AllTrue:
+ case kX64IAllTrue:
case kX64I8x16Swizzle:
case kX64I8x16Shuffle:
case kX64I8x16Popcnt:
@@ -353,7 +281,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x4Reverse:
case kX64S8x2Reverse:
case kX64V128AnyTrue:
- case kX64I8x16AllTrue:
+ case kX64Blendvpd:
+ case kX64Blendvps:
case kX64Pblendvb:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
@@ -398,8 +327,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
case kX64MovqDecompressTaggedSigned:
- case kX64MovqDecompressTaggedPointer:
- case kX64MovqDecompressAnyTagged:
+ case kX64MovqDecompressTagged:
case kX64MovqCompressTagged:
case kX64MovqDecodeSandboxedPointer:
case kX64MovqEncodeSandboxedPointer:
@@ -407,10 +335,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movsd:
case kX64Movss:
case kX64Movdqu:
+ case kX64Movdqu256:
case kX64S128Load8Splat:
case kX64S128Load16Splat:
case kX64S128Load32Splat:
+ case kX64S256Load32Splat:
case kX64S128Load64Splat:
+ case kX64S256Load64Splat:
case kX64S128Load8x8S:
case kX64S128Load8x8U:
case kX64S128Load16x4S:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 0da7336254..9b987bc82b 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -32,6 +32,7 @@ bool IsCompressed(Node* const node) {
if (node == nullptr) return false;
const IrOpcode::Value opcode = node->opcode();
if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
+ opcode == IrOpcode::kLoadTrapOnNull ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -54,6 +55,21 @@ class X64OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kCompressedHeapConstant: {
+ if (!COMPRESS_POINTERS_BOOL) return false;
+ // For builtin code we need static roots
+ if (selector()->isolate()->bootstrapper() && !V8_STATIC_ROOTS_BOOL) {
+ return false;
+ }
+ const RootsTable& roots_table = selector()->isolate()->roots_table();
+ RootIndex root_index;
+ CompressedHeapObjectMatcher m(node);
+ if (m.HasResolvedValue() &&
+ roots_table.IsRootHandle(m.ResolvedValue(), &root_index)) {
+ return RootsTable::IsReadOnly(root_index);
+ }
+ return false;
+ }
case IrOpcode::kInt32Constant:
case IrOpcode::kRelocatableInt32Constant: {
const int32_t value = OpParameter<int32_t>(node->op());
@@ -149,9 +165,33 @@ class X64OperandGenerator final : public OperandGenerator {
size_t* input_count,
RegisterUseKind reg_kind = RegisterUseKind::kUseRegister) {
AddressingMode mode = kMode_MRI;
+ bool fold_base_into_displacement = false;
+ int64_t fold_value = 0;
if (base != nullptr && (index != nullptr || displacement != nullptr)) {
- if (base->opcode() == IrOpcode::kInt32Constant &&
- OpParameter<int32_t>(base->op()) == 0) {
+ if (index != nullptr && displacement != nullptr &&
+ (base->opcode() == IrOpcode::kInt32Constant ||
+ base->opcode() == IrOpcode::kInt64Constant) &&
+ CanBeImmediate(base) &&
+ (displacement->opcode() == IrOpcode::kInt32Constant ||
+ displacement->opcode() == IrOpcode::kInt64Constant) &&
+ CanBeImmediate(displacement)) {
+ fold_value = GetImmediateIntegerValue(base);
+ int64_t displacement_val = GetImmediateIntegerValue(displacement);
+ if (displacement_mode == kNegativeDisplacement) {
+ fold_value -= displacement_val;
+ } else {
+ fold_value += displacement_val;
+ }
+ if (fold_value == 0) {
+ base = nullptr;
+ displacement = nullptr;
+ } else if (std::numeric_limits<int32_t>::min() < fold_value &&
+ fold_value <= std::numeric_limits<int32_t>::max()) {
+ base = nullptr;
+ fold_base_into_displacement = true;
+ }
+ } else if (base->opcode() == IrOpcode::kInt32Constant &&
+ OpParameter<int32_t>(base->op()) == 0) {
base = nullptr;
} else if (base->opcode() == IrOpcode::kInt64Constant &&
OpParameter<int64_t>(base->op()) == 0) {
@@ -187,7 +227,16 @@ class X64OperandGenerator final : public OperandGenerator {
}
} else {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
- if (displacement != nullptr) {
+ if (fold_base_into_displacement) {
+ DCHECK(base == nullptr);
+ DCHECK(index != nullptr);
+ DCHECK(displacement != nullptr);
+ inputs[(*input_count)++] = UseRegister(index, reg_kind);
+ inputs[(*input_count)++] = UseImmediate(static_cast<int>(fold_value));
+ static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale_exponent];
+ } else if (displacement != nullptr) {
if (index == nullptr) {
inputs[(*input_count)++] = UseRegister(displacement, reg_kind);
mode = kMode_MR;
@@ -224,7 +273,7 @@ class X64OperandGenerator final : public OperandGenerator {
m.object().ResolvedValue())) {
ptrdiff_t const delta =
m.index().ResolvedValue() +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
@@ -249,7 +298,14 @@ class X64OperandGenerator final : public OperandGenerator {
}
return mode;
}
- if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
+ if (m.base() != nullptr &&
+ m.base()->opcode() == IrOpcode::kLoadRootRegister) {
+ DCHECK_EQ(m.index(), nullptr);
+ DCHECK_EQ(m.scale(), 0);
+ inputs[(*input_count)++] = UseImmediate(m.displacement());
+ return kMode_Root;
+ } else if (m.displacement() == nullptr ||
+ CanBeImmediate(m.displacement())) {
return GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(),
m.displacement_mode(), inputs, input_count, reg_kind);
@@ -318,10 +374,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64MovqDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
- opcode = kX64MovqDecompressTaggedPointer;
- break;
case MachineRepresentation::kTagged:
- opcode = kX64MovqDecompressAnyTagged;
+ opcode = kX64MovqDecompressTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
@@ -338,6 +392,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movdqu;
break;
case MachineRepresentation::kSimd256: // Fall through.
+ opcode = kX64Movdqu256;
+ break;
case MachineRepresentation::kNone: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
UNREACHABLE();
@@ -376,6 +432,7 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kSimd128:
return kX64Movdqu;
case MachineRepresentation::kSimd256: // Fall through.
+ return kX64Movdqu256;
case MachineRepresentation::kNone: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
UNREACHABLE();
@@ -464,7 +521,7 @@ void InstructionSelector::VisitLoadLane(Node* node) {
// x64 supports unaligned loads.
DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
Emit(opcode, 1, outputs, input_count, inputs);
}
@@ -509,6 +566,13 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
case LoadTransformation::kS128Load64Zero:
opcode = kX64Movsd;
break;
+ // Simd256
+ case LoadTransformation::kS256Load32Splat:
+ opcode = kX64S256Load32Splat;
+ break;
+ case LoadTransformation::kS256Load64Splat:
+ opcode = kX64S256Load64Splat;
+ break;
default:
UNREACHABLE();
}
@@ -516,7 +580,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
InstructionCode code = opcode;
if (params.kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
VisitLoad(node, node, code);
}
@@ -547,7 +611,9 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
node->opcode() == IrOpcode::kWord64AtomicLoad) &&
(AtomicLoadParametersOf(node->op()).kind() ==
MemoryAccessKind::kProtected))) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
+ } else if (node->opcode() == IrOpcode::kLoadTrapOnNull) {
+ code |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
}
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
@@ -578,7 +644,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AtomicWidthField::encode(width);
if (access_kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
@@ -602,9 +668,12 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
write_barrier_kind = kFullWriteBarrier;
}
- const auto access_mode = acs_kind == MemoryAccessKind::kProtected
- ? MemoryAccessMode::kMemoryAccessProtected
- : MemoryAccessMode::kMemoryAccessDirect;
+ const auto access_mode =
+ acs_kind == MemoryAccessKind::kProtected
+ ? (node->opcode() == IrOpcode::kStoreTrapOnNull
+ ? kMemoryAccessProtectedNullDereference
+ : MemoryAccessMode::kMemoryAccessProtectedMemOutOfBounds)
+ : MemoryAccessMode::kMemoryAccessDirect;
if (write_barrier_kind != kNoWriteBarrier &&
!v8_flags.disable_write_barriers) {
@@ -620,7 +689,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
: kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
+ code |= RecordWriteModeField::encode(record_write_mode);
code |= AccessModeField::encode(access_mode);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
arraysize(temps), temps);
@@ -688,36 +757,8 @@ void InstructionSelector::VisitStore(Node* node) {
}
void InstructionSelector::VisitProtectedStore(Node* node) {
- X64OperandGenerator g(this);
- Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
-
-#ifdef V8_IS_TSAN
- // On TSAN builds we require two scratch registers. Because of this we also
- // have to modify the inputs to take into account possible aliasing and use
- // UseUniqueRegister which is not required for non-TSAN builds.
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t temp_count = arraysize(temps);
- auto reg_kind = OperandGenerator::RegisterUseKind::kUseUniqueRegister;
-#else
- InstructionOperand* temps = nullptr;
- size_t temp_count = 0;
- auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
-#endif // V8_IS_TSAN
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count, reg_kind);
- InstructionOperand value_operand = g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : g.UseRegister(value, reg_kind);
- inputs[input_count++] = value_operand;
- ArchOpcode opcode = GetStoreOpcode(store_rep);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
- AccessModeField::encode(kMemoryAccessProtected);
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs,
- temp_count, temps);
+ return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt, MemoryAccessKind::kProtected);
}
// Architecture supports unaligned access, therefore VisitLoad is used instead
@@ -750,7 +791,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
opcode |= AddressingModeField::encode(addressing_mode);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
@@ -1653,10 +1694,6 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
DCHECK_EQ(node->InputCount(), 1);
- Node* input = node->InputAt(0);
- if (input->opcode() == IrOpcode::kTruncateInt64ToInt32) {
- node->ReplaceInput(0, input->InputAt(0));
- }
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
@@ -1675,6 +1712,10 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
break;
case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ // While BitcastElider may remove nodes of
+ // IrOpcode::kTruncateInt64ToInt32 and directly use the inputs, values
+ // with kWord64 can also reach this line.
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTagged:
// ChangeInt32ToInt64 must interpret its input as a _signed_ 32-bit
@@ -1739,7 +1780,8 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kProtectedLoad: {
+ case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1858,7 +1900,7 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode opcode) {
+ InstructionCode opcode) {
X64OperandGenerator g(selector);
if (selector->IsSupported(AVX)) {
selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input));
@@ -2363,17 +2405,9 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
}
break;
}
- case MachineRepresentation::kWord16:
- if (opcode == kX64Test || opcode == kX64Test32) return kX64Test16;
- if (opcode == kX64Cmp || opcode == kX64Cmp32) {
- if (left_type.semantic() == MachineSemantic::kUint32) {
- cont->OverwriteUnsignedIfSigned();
- } else {
- CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
- }
- return kX64Cmp16;
- }
- break;
+ // Cmp16/Test16 may introduce LCP(Length-Changing-Prefixes) stall, use
+ // Cmp32/Test32 instead.
+ case MachineRepresentation::kWord16: // Fall through.
case MachineRepresentation::kWord32:
if (opcode == kX64Test) return kX64Test32;
if (opcode == kX64Cmp) {
@@ -2528,7 +2562,7 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
return VisitCompare(
selector, opcode,
g.TempImmediate(
- TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
+ MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(m.left().node()), cont);
}
}
@@ -2537,7 +2571,7 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- if (COMPRESS_POINTERS_BOOL && selector->CanUseRootsRegister()) {
+ if (COMPRESS_POINTERS_BOOL && selector->isolate()) {
X64OperandGenerator g(selector);
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
@@ -2561,13 +2595,22 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
}
if (!right.is_null() && roots_table.IsRootHandle(right, &root_index)) {
DCHECK_NE(left, nullptr);
- InstructionCode opcode =
- kX64Cmp32 | AddressingModeField::encode(kMode_Root);
- return VisitCompare(
- selector, opcode,
- g.TempImmediate(
- TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
- g.UseRegister(left), cont);
+ if (RootsTable::IsReadOnly(root_index) &&
+ (V8_STATIC_ROOTS_BOOL || !selector->isolate()->bootstrapper())) {
+ return VisitCompare(selector, kX64Cmp32, g.UseRegister(left),
+ g.TempImmediate(MacroAssemblerBase::ReadOnlyRootPtr(
+ root_index, selector->isolate())),
+ cont);
+ }
+ if (selector->CanUseRootsRegister()) {
+ InstructionCode opcode =
+ kX64Cmp32 | AddressingModeField::encode(kMode_Root);
+ return VisitCompare(
+ selector, opcode,
+ g.TempImmediate(
+ MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
+ g.UseRegister(left), cont);
+ }
}
}
VisitWordCompare(selector, node, kX64Cmp32, cont);
@@ -2689,7 +2732,7 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AtomicWidthField::encode(width);
if (access_kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
@@ -2713,7 +2756,7 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AtomicWidthField::encode(width);
if (access_kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
@@ -3124,7 +3167,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 4);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
- params.kind());
+ params.kind());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -3132,7 +3175,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
kTaggedSize == 8);
VisitStoreCommon(this, node, params.store_representation(), params.order(),
- params.kind());
+ params.kind());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -3279,38 +3322,10 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_BINOP_SSE_AVX_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Eq) \
V(I64x2ExtMulLowI32x4S) \
V(I64x2ExtMulHighI32x4S) \
V(I64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4U) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4GtS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4DotI16x8S) \
V(I32x4ExtMulLowI16x8S) \
V(I32x4ExtMulHighI16x8S) \
@@ -3318,19 +3333,10 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4ExtMulHighI16x8U) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
- V(I16x8Add) \
V(I16x8AddSatS) \
- V(I16x8Sub) \
V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
- V(I16x8GtS) \
V(I16x8AddSatU) \
V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8RoundingAverageU) \
V(I16x8ExtMulLowI8x16S) \
V(I16x8ExtMulHighI8x16S) \
@@ -3340,92 +3346,135 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8RelaxedQ15MulRS) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
- V(I8x16Add) \
V(I8x16AddSatS) \
- V(I8x16Sub) \
V(I8x16SubSatS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16Eq) \
- V(I8x16GtS) \
V(I8x16AddSatU) \
V(I8x16SubSatU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
V(I8x16RoundingAverageU) \
V(S128And) \
V(S128Or) \
V(S128Xor)
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(I64x2Ne) \
- V(I32x4Ne) \
- V(I32x4GtU) \
- V(I32x4GeS) \
- V(I32x4GeU) \
- V(I16x8Ne) \
- V(I16x8GtU) \
- V(I16x8GeS) \
- V(I16x8GeU) \
- V(I8x16Ne) \
- V(I8x16GtU) \
- V(I8x16GeS) \
- V(I8x16GeU)
+#define SIMD_BINOP_SSE_AVX_LANE_SIZE_VECTOR_LENGTH_LIST(V) \
+ V(F64x2Add, FAdd, kL64, kV128) \
+ V(F32x4Add, FAdd, kL32, kV128) \
+ V(F32x8Add, FAdd, kL32, kV256) \
+ V(I64x2Add, IAdd, kL64, kV128) \
+ V(I32x4Add, IAdd, kL32, kV128) \
+ V(I16x8Add, IAdd, kL16, kV128) \
+ V(I8x16Add, IAdd, kL8, kV128) \
+ V(F64x2Sub, FSub, kL64, kV128) \
+ V(F32x4Sub, FSub, kL32, kV128) \
+ V(F32x8Sub, FSub, kL32, kV256) \
+ V(I64x2Sub, ISub, kL64, kV128) \
+ V(I32x4Sub, ISub, kL32, kV128) \
+ V(I16x8Sub, ISub, kL16, kV128) \
+ V(I8x16Sub, ISub, kL8, kV128) \
+ V(F64x2Mul, FMul, kL64, kV128) \
+ V(F32x4Mul, FMul, kL32, kV128) \
+ V(I32x4Mul, IMul, kL32, kV128) \
+ V(I16x8Mul, IMul, kL16, kV128) \
+ V(F64x2Div, FDiv, kL64, kV128) \
+ V(F32x4Div, FDiv, kL32, kV128) \
+ V(F64x2Eq, FEq, kL64, kV128) \
+ V(F32x4Eq, FEq, kL32, kV128) \
+ V(I64x2Eq, IEq, kL64, kV128) \
+ V(I32x4Eq, IEq, kL32, kV128) \
+ V(I16x8Eq, IEq, kL16, kV128) \
+ V(I8x16Eq, IEq, kL8, kV128) \
+ V(F64x2Ne, FNe, kL64, kV128) \
+ V(F32x4Ne, FNe, kL32, kV128) \
+ V(I32x4GtS, IGtS, kL32, kV128) \
+ V(I16x8GtS, IGtS, kL16, kV128) \
+ V(I8x16GtS, IGtS, kL8, kV128) \
+ V(F64x2Lt, FLt, kL64, kV128) \
+ V(F32x4Lt, FLt, kL32, kV128) \
+ V(F64x2Le, FLe, kL64, kV128) \
+ V(F32x4Le, FLe, kL32, kV128) \
+ V(I32x4MinS, IMinS, kL32, kV128) \
+ V(I16x8MinS, IMinS, kL16, kV128) \
+ V(I8x16MinS, IMinS, kL8, kV128) \
+ V(I32x4MinU, IMinU, kL32, kV128) \
+ V(I16x8MinU, IMinU, kL16, kV128) \
+ V(I8x16MinU, IMinU, kL8, kV128) \
+ V(I32x4MaxS, IMaxS, kL32, kV128) \
+ V(I16x8MaxS, IMaxS, kL16, kV128) \
+ V(I8x16MaxS, IMaxS, kL8, kV128) \
+ V(I32x4MaxU, IMaxU, kL32, kV128) \
+ V(I16x8MaxU, IMaxU, kL16, kV128) \
+ V(I8x16MaxU, IMaxU, kL8, kV128)
+
+#define SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH_LIST(V) \
+ V(F64x2Min, FMin, kL64, kV128) \
+ V(F32x4Min, FMin, kL32, kV128) \
+ V(F64x2Max, FMax, kL64, kV128) \
+ V(F32x4Max, FMax, kL32, kV128) \
+ V(I64x2Ne, INe, kL64, kV128) \
+ V(I32x4Ne, INe, kL32, kV128) \
+ V(I16x8Ne, INe, kL16, kV128) \
+ V(I8x16Ne, INe, kL8, kV128) \
+ V(I32x4GtU, IGtU, kL32, kV128) \
+ V(I16x8GtU, IGtU, kL16, kV128) \
+ V(I8x16GtU, IGtU, kL8, kV128) \
+ V(I32x4GeS, IGeS, kL32, kV128) \
+ V(I16x8GeS, IGeS, kL16, kV128) \
+ V(I8x16GeS, IGeS, kL8, kV128) \
+ V(I32x4GeU, IGeU, kL32, kV128) \
+ V(I16x8GeU, IGeU, kL16, kV128) \
+ V(I8x16GeU, IGeU, kL8, kV128)
#define SIMD_UNOP_LIST(V) \
- V(F64x2Sqrt) \
V(F64x2ConvertLowI32x4S) \
V(F32x4SConvertI32x4) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4Sqrt) \
V(F32x4DemoteF64x2Zero) \
- V(I64x2BitMask) \
V(I64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High) \
V(I64x2UConvertI32x4Low) \
V(I64x2UConvertI32x4High) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
- V(I32x4Neg) \
V(I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High) \
- V(I32x4Abs) \
- V(I32x4BitMask) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
- V(I16x8Neg) \
V(I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High) \
- V(I16x8Abs) \
- V(I16x8BitMask) \
- V(I8x16Neg) \
- V(I8x16Abs) \
- V(I8x16BitMask) \
- V(I64x2AllTrue) \
- V(I32x4AllTrue) \
- V(I16x8AllTrue) \
- V(I8x16AllTrue) \
V(S128Not)
-#define SIMD_SHIFT_OPCODES(V) \
- V(I64x2Shl) \
- V(I64x2ShrU) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8ShrU)
-
-#define SIMD_NARROW_SHIFT_OPCODES(V) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
- V(I8x16ShrU)
+#define SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH_LIST(V) \
+ V(F32x4Abs, FAbs, kL32, kV128) \
+ V(I32x4Abs, IAbs, kL32, kV128) \
+ V(I16x8Abs, IAbs, kL16, kV128) \
+ V(I8x16Abs, IAbs, kL8, kV128) \
+ V(F32x4Neg, FNeg, kL32, kV128) \
+ V(I32x4Neg, INeg, kL32, kV128) \
+ V(I16x8Neg, INeg, kL16, kV128) \
+ V(I8x16Neg, INeg, kL8, kV128) \
+ V(F64x2Sqrt, FSqrt, kL64, kV128) \
+ V(F32x4Sqrt, FSqrt, kL32, kV128) \
+ V(I64x2BitMask, IBitMask, kL64, kV128) \
+ V(I32x4BitMask, IBitMask, kL32, kV128) \
+ V(I16x8BitMask, IBitMask, kL16, kV128) \
+ V(I8x16BitMask, IBitMask, kL8, kV128) \
+ V(I64x2AllTrue, IAllTrue, kL64, kV128) \
+ V(I32x4AllTrue, IAllTrue, kL32, kV128) \
+ V(I16x8AllTrue, IAllTrue, kL16, kV128) \
+ V(I8x16AllTrue, IAllTrue, kL8, kV128)
+
+#define SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(V) \
+ V(I64x2Shl, IShl, kL64, kV128) \
+ V(I32x4Shl, IShl, kL32, kV128) \
+ V(I16x8Shl, IShl, kL16, kV128) \
+ V(I32x4ShrS, IShrS, kL32, kV128) \
+ V(I16x8ShrS, IShrS, kL16, kV128) \
+ V(I64x2ShrU, IShrU, kL64, kV128) \
+ V(I32x4ShrU, IShrU, kL32, kV128) \
+ V(I16x8ShrU, IShrU, kL16, kV128)
+
+#define SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(V) \
+ V(I8x16Shl, IShl, kL8, kV128) \
+ V(I8x16ShrS, IShrS, kL8, kV128) \
+ V(I8x16ShrU, IShrU, kL8, kV128)
void InstructionSelector::VisitS128Const(Node* node) {
X64OperandGenerator g(this);
@@ -3452,61 +3501,82 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kX64S128Zero, g.DefineAsRegister(node));
}
-#define SIMD_TYPES_FOR_SPLAT(V) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
+// Name, LaneSize, VectorLength
+#define SIMD_INT_TYPES_FOR_SPLAT(V) \
+ V(I64x2, kL64, kV128) \
+ V(I32x4, kL32, kV128) \
+ V(I16x8, kL16, kV128) \
+ V(I8x16, kL8, kV128)
// Splat with an optimization for const 0.
-#define VISIT_SIMD_SPLAT(Type) \
+#define VISIT_INT_SIMD_SPLAT(Type, LaneSize, VectorLength) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
X64OperandGenerator g(this); \
Node* input = node->InputAt(0); \
if (g.CanBeImmediate(input) && g.GetImmediateIntegerValue(input) == 0) { \
Emit(kX64S128Zero, g.DefineAsRegister(node)); \
} else { \
- Emit(kX64##Type##Splat, g.DefineAsRegister(node), g.Use(input)); \
+ Emit(kX64ISplat | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ g.DefineAsRegister(node), g.Use(input)); \
} \
}
-SIMD_TYPES_FOR_SPLAT(VISIT_SIMD_SPLAT)
-#undef VISIT_SIMD_SPLAT
-#undef SIMD_TYPES_FOR_SPLAT
+SIMD_INT_TYPES_FOR_SPLAT(VISIT_INT_SIMD_SPLAT)
+#undef VISIT_INT_SIMD_SPLAT
+#undef SIMD_INT_TYPES_FOR_SPLAT
void InstructionSelector::VisitF64x2Splat(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64F64x2Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Emit(kX64FSplat | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitF32x4Splat(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64F32x4Splat, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ Emit(kX64FSplat | LaneSizeField::encode(kL32) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+#define SIMD_VISIT_EXTRACT_LANE(IF, Type, Sign, LaneSize, VectorLength) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kX64##IF##ExtractLane##Sign | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(lane)); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F, F64x2, , kL64, kV128)
+SIMD_VISIT_EXTRACT_LANE(F, F32x4, , kL32, kV128)
+SIMD_VISIT_EXTRACT_LANE(I, I64x2, , kL64, kV128)
+SIMD_VISIT_EXTRACT_LANE(I, I32x4, , kL32, kV128)
+SIMD_VISIT_EXTRACT_LANE(I, I16x8, S, kL16, kV128)
+SIMD_VISIT_EXTRACT_LANE(I, I8x16, S, kL8, kV128)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+void InstructionSelector::VisitI16x8ExtractLaneU(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node->op());
+ Emit(kX64Pextrw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(lane));
}
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign, Op) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kX64##Op, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
- g.UseImmediate(lane)); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, , F64x2ExtractLane)
-SIMD_VISIT_EXTRACT_LANE(F32x4, , F32x4ExtractLane)
-SIMD_VISIT_EXTRACT_LANE(I64x2, , I64x2ExtractLane)
-SIMD_VISIT_EXTRACT_LANE(I32x4, , I32x4ExtractLane)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S, I16x8ExtractLaneS)
-SIMD_VISIT_EXTRACT_LANE(I16x8, U, Pextrw)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S, I8x16ExtractLaneS)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U, Pextrb)
-#undef SIMD_VISIT_EXTRACT_LANE
+void InstructionSelector::VisitI8x16ExtractLaneU(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node->op());
+ Emit(kX64Pextrb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(lane));
+}
void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
X64OperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node->op());
- Emit(kX64F32x4ReplaceLane, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
- g.Use(node->InputAt(1)));
+ Emit(kX64FReplaceLane | LaneSizeField::encode(kL32) |
+ VectorLengthField::encode(kV128),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(lane), g.Use(node->InputAt(1)));
}
void InstructionSelector::VisitF64x2ReplaceLane(Node* node) {
@@ -3515,8 +3585,10 @@ void InstructionSelector::VisitF64x2ReplaceLane(Node* node) {
// When no-AVX, define dst == src to save a move.
InstructionOperand dst =
IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
- Emit(kX64F64x2ReplaceLane, dst, g.UseRegister(node->InputAt(0)),
- g.UseImmediate(lane), g.UseRegister(node->InputAt(1)));
+ Emit(kX64FReplaceLane | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ dst, g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.UseRegister(node->InputAt(1)));
}
#define VISIT_SIMD_REPLACE_LANE(TYPE, OPCODE) \
@@ -3537,40 +3609,53 @@ SIMD_TYPES_FOR_REPLACE_LANE(VISIT_SIMD_REPLACE_LANE)
#undef SIMD_TYPES_FOR_REPLACE_LANE
#undef VISIT_SIMD_REPLACE_LANE
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
+#define VISIT_SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES( \
+ Name, Opcode, LaneSize, VectorLength) \
+ void InstructionSelector::Visit##Name(Node* node) { \
X64OperandGenerator g(this); \
InstructionOperand dst = IsSupported(AVX) ? g.DefineAsRegister(node) \
: g.DefineSameAsFirst(node); \
if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ dst, g.UseRegister(node->InputAt(0)), \
g.UseImmediate(node->InputAt(1))); \
} else { \
- Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ dst, g.UseRegister(node->InputAt(0)), \
g.UseRegister(node->InputAt(1))); \
} \
}
-SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
-#undef VISIT_SIMD_SHIFT
-#undef SIMD_SHIFT_OPCODES
+SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(
+ VISIT_SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES)
+
+#undef VISIT_SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
+#undef SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
-#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
+#define VISIT_SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES( \
+ Name, Opcode, LaneSize, VectorLength) \
+ void InstructionSelector::Visit##Name(Node* node) { \
X64OperandGenerator g(this); \
InstructionOperand output = \
IsSupported(AVX) ? g.UseRegister(node) : g.DefineSameAsFirst(node); \
if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, output, g.UseRegister(node->InputAt(0)), \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ output, g.UseRegister(node->InputAt(0)), \
g.UseImmediate(node->InputAt(1))); \
} else { \
InstructionOperand temps[] = {g.TempSimd128Register()}; \
- Emit(kX64##Opcode, output, g.UseUniqueRegister(node->InputAt(0)), \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ output, g.UseUniqueRegister(node->InputAt(0)), \
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
} \
}
-SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
-#undef VISIT_SIMD_NARROW_SHIFT
-#undef SIMD_NARROW_SHIFT_OPCODES
+SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(
+ VISIT_SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES)
+#undef VISIT_SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
+#undef SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
#define VISIT_SIMD_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3582,15 +3667,35 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
-#define VISIT_SIMD_BINOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+#define VISIT_SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH(Name, Opcode, LaneSize, \
+ VectorLength) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); \
}
-SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
-#undef VISIT_SIMD_BINOP
-#undef SIMD_BINOP_LIST
+
+SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH_LIST(VISIT_SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH)
+
+#undef VISIT_SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH
+#undef SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH_LIST
+
+#define VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH(Name, Opcode, LaneSize, \
+ VectorLength) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1))); \
+ }
+
+SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH_LIST(
+ VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH)
+
+#undef VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH
+#undef SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3603,10 +3708,33 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
} \
}
+
SIMD_BINOP_SSE_AVX_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_SSE_AVX_LIST
+#define VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH(Name, Opcode, LaneSize, \
+ VectorLength) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ X64OperandGenerator g(this); \
+ if (IsSupported(AVX)) { \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1))); \
+ } else { \
+ Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
+ VectorLengthField::encode(VectorLength), \
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1))); \
+ } \
+ }
+
+SIMD_BINOP_SSE_AVX_LANE_SIZE_VECTOR_LENGTH_LIST(
+ VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH)
+#undef VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH
+#undef SIMD_BINOP_SSE_AVX_LANE_SIZE_VECTOR_LENGTH_LIST
+
void InstructionSelector::VisitV128AnyTrue(Node* node) {
X64OperandGenerator g(this);
Emit(kX64V128AnyTrue, g.DefineAsRegister(node),
@@ -3629,11 +3757,15 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
}
void InstructionSelector::VisitF64x2Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Abs);
+ VisitFloatUnop(this, node, node->InputAt(0),
+ kX64FAbs | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128));
}
void InstructionSelector::VisitF64x2Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Neg);
+ VisitFloatUnop(this, node, node->InputAt(0),
+ kX64FNeg | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128));
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
@@ -3660,7 +3792,9 @@ void InstructionSelector::VisitI64x2Neg(Node* node) {
InstructionOperand operand0 = IsSupported(AVX)
? g.UseRegister(node->InputAt(0))
: g.UseUnique(node->InputAt(0));
- Emit(kX64I64x2Neg, g.DefineAsRegister(node), operand0);
+ Emit(
+ kX64INeg | LaneSizeField::encode(kL64) | VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), operand0);
}
void InstructionSelector::VisitI64x2ShrS(Node* node) {
@@ -3669,11 +3803,15 @@ void InstructionSelector::VisitI64x2ShrS(Node* node) {
IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
if (g.CanBeImmediate(node->InputAt(1))) {
- Emit(kX64I64x2ShrS, dst, g.UseRegister(node->InputAt(0)),
+ Emit(kX64IShrS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ dst, g.UseRegister(node->InputAt(0)),
g.UseImmediate(node->InputAt(1)));
} else {
InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I64x2ShrS, dst, g.UseUniqueRegister(node->InputAt(0)),
+ Emit(kX64IShrS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ dst, g.UseUniqueRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
}
}
@@ -3681,9 +3819,10 @@ void InstructionSelector::VisitI64x2ShrS(Node* node) {
void InstructionSelector::VisitI64x2Mul(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I64x2Mul, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+ Emit(
+ kX64IMul | LaneSizeField::encode(kL64) | VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
@@ -4098,22 +4237,22 @@ void InstructionSelector::VisitI8x16Swizzle(Node* node) {
}
namespace {
-// pblendvb is a correct implementation for all the various relaxed lane select,
-// see https://github.com/WebAssembly/relaxed-simd/issues/17.
-void VisitRelaxedLaneSelect(InstructionSelector* selector, Node* node) {
+void VisitRelaxedLaneSelect(InstructionSelector* selector, Node* node,
+ InstructionCode code = kX64Pblendvb) {
X64OperandGenerator g(selector);
- // pblendvb copies src2 when mask is set, opposite from Wasm semantics.
- // node's inputs are: mask, lhs, rhs (determined in wasm-compiler.cc).
+ // pblendvb/blendvps/blendvpd copies src2 when mask is set, opposite from Wasm
+ // semantics. Node's inputs are: mask, lhs, rhs (determined in
+ // wasm-compiler.cc).
if (selector->IsSupported(AVX)) {
selector->Emit(
- kX64Pblendvb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(2)),
+ code, g.DefineAsRegister(node), g.UseRegister(node->InputAt(2)),
g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
} else {
- // SSE4.1 pblendvb requires xmm0 to hold the mask as an implicit operand.
- selector->Emit(kX64Pblendvb, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(2)),
- g.UseRegister(node->InputAt(1)),
- g.UseFixed(node->InputAt(0), xmm0));
+ // SSE4.1 pblendvb/blendvps/blendvpd requires xmm0 to hold the mask as an
+ // implicit operand.
+ selector->Emit(
+ code, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(1)), g.UseFixed(node->InputAt(0), xmm0));
}
}
} // namespace
@@ -4125,10 +4264,11 @@ void InstructionSelector::VisitI16x8RelaxedLaneSelect(Node* node) {
VisitRelaxedLaneSelect(this, node);
}
void InstructionSelector::VisitI32x4RelaxedLaneSelect(Node* node) {
- VisitRelaxedLaneSelect(this, node);
+ VisitRelaxedLaneSelect(this, node, kX64Blendvps);
}
+
void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
- VisitRelaxedLaneSelect(this, node);
+ VisitRelaxedLaneSelect(this, node, kX64Blendvpd);
}
#else
void InstructionSelector::VisitI8x16Swizzle(Node* node) { UNREACHABLE(); }
@@ -4282,14 +4422,19 @@ void InstructionSelector::VisitI32x4RelaxedTruncF32x4U(Node* node) {
void InstructionSelector::VisitI64x2GtS(Node* node) {
X64OperandGenerator g(this);
if (CpuFeatures::IsSupported(AVX)) {
- Emit(kX64I64x2GtS, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ Emit(kX64IGtS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
} else if (CpuFeatures::IsSupported(SSE4_2)) {
- Emit(kX64I64x2GtS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ Emit(kX64IGtS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
} else {
- Emit(kX64I64x2GtS, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
+ Emit(kX64IGtS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
}
}
@@ -4297,15 +4442,19 @@ void InstructionSelector::VisitI64x2GtS(Node* node) {
void InstructionSelector::VisitI64x2GeS(Node* node) {
X64OperandGenerator g(this);
if (CpuFeatures::IsSupported(AVX)) {
- Emit(kX64I64x2GeS, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ Emit(kX64IGeS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
} else if (CpuFeatures::IsSupported(SSE4_2)) {
- Emit(kX64I64x2GeS, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
+ Emit(kX64IGeS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseUniqueRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
} else {
- Emit(kX64I64x2GeS, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
+ Emit(kX64IGeS | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
}
}
@@ -4313,11 +4462,13 @@ void InstructionSelector::VisitI64x2GeS(Node* node) {
void InstructionSelector::VisitI64x2Abs(Node* node) {
X64OperandGenerator g(this);
if (CpuFeatures::IsSupported(AVX)) {
- Emit(kX64I64x2Abs, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)));
+ Emit(kX64IAbs | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineAsRegister(node), g.UseUniqueRegister(node->InputAt(0)));
} else {
- Emit(kX64I64x2Abs, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ Emit(kX64IAbs | LaneSizeField::encode(kL64) |
+ VectorLengthField::encode(kV128),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
}
}
@@ -4329,7 +4480,7 @@ void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
if (m.ResolvedValue().kind == MemoryAccessKind::kProtected) {
- code |= AccessModeField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
}
// LoadTransforms cannot be eliminated, so they are visited even if
// unused. Mark it as defined so that we don't visit it.
@@ -4347,6 +4498,15 @@ void InstructionSelector::VisitI16x8DotI8x16I7x16S(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
+void InstructionSelector::VisitI32x4DotI8x16I7x16AddS(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64I32x4DotI8x16I7x16AddS, g.DefineSameAsInput(node, 2),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)), arraysize(temps), temps);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 1e9d0246b6..448bed84c9 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -59,7 +59,7 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
AllowHandleDereference allow_handle_dereference;
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
- size_t n_blocks = schedule->RpoBlockCount() - 1;
+ size_t n_blocks = schedule->RpoBlockCount();
BasicBlockProfilerData* data = BasicBlockProfiler::Get()->NewData(n_blocks);
// Set the function name.
data->SetFunctionName(info->GetDebugName());
@@ -84,7 +84,7 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
// PatchBasicBlockCountersReference). An important and subtle point: we
// cannot use the root handle basic_block_counters_marker_handle() and must
// create a new separate handle. Otherwise
- // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a
+ // MacroAssemblerBase::IndirectLoadConstant would helpfully emit a
// root-relative load rather than putting this value in the constants table
// where we expect it to be for patching.
counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(
@@ -99,6 +99,7 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
++it, ++block_number) {
BasicBlock* block = (*it);
+ if (block == schedule->end()) continue;
// Iteration is already in reverse post-order.
DCHECK_EQ(block->rpo_number(), block_number);
data->SetBlockId(block_number, block->id().ToInt());
@@ -146,10 +147,8 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
// The exit block is not instrumented and so we must ignore that block
// count.
if (block->control() == BasicBlock::kBranch &&
- block->successors()[0]->rpo_number() !=
- static_cast<int32_t>(n_blocks) &&
- block->successors()[1]->rpo_number() !=
- static_cast<int32_t>(n_blocks)) {
+ block->successors()[0] != schedule->end() &&
+ block->successors()[1] != schedule->end()) {
data->AddBranch(block->successors()[0]->id().ToInt(),
block->successors()[1]->id().ToInt());
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 3102dcc57b..7a8d91cf89 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -7,6 +7,7 @@
#include "src/base/small-vector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
@@ -138,6 +139,86 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
NodeProperties::ReplaceValueInput(branch, new_phi, 0);
}
+bool BranchElimination::TryEliminateBranchWithPhiCondition(Node* branch,
+ Node* phi,
+ Node* merge) {
+ // If the condition of the branch comes from two constant values,
+ // then try to merge the branches successors into its predecessors,
+ // and eliminate the (branch, phi, merge) nodes.
+ //
+ // pred0 pred1
+ // \ /
+ // merge 0 1
+ // | \___________ | /
+ // | \ | / pred0 pred1
+ // | phi | |
+ // | _____________/ => | |
+ // | / | |
+ // branch succ0 succ1
+ // / \
+ // false true
+ // | |
+ // succ0 succ1
+ //
+
+ DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
+ DCHECK_EQ(phi->opcode(), IrOpcode::kPhi);
+ DCHECK_EQ(merge->opcode(), IrOpcode::kMerge);
+ DCHECK_EQ(NodeProperties::GetControlInput(branch, 0), merge);
+ if (!phi->OwnedBy(branch)) return false;
+ if (phi->InputCount() != 3) return false;
+ if (phi->InputAt(2) != merge) return false;
+ if (merge->UseCount() != 2) return false;
+
+ Node::Inputs phi_inputs = phi->inputs();
+ Node* first_value = phi_inputs[0];
+ Node* second_value = phi_inputs[1];
+ if (first_value->opcode() != IrOpcode::kInt32Constant ||
+ second_value->opcode() != IrOpcode::kInt32Constant) {
+ return false;
+ }
+ Node::Inputs merge_inputs = merge->inputs();
+ Node* predecessor0 = merge_inputs[0];
+ Node* predecessor1 = merge_inputs[1];
+ DCHECK_EQ(branch->op()->ControlOutputCount(), 2);
+ Node** projections = zone()->NewArray<Node*>(2);
+ NodeProperties::CollectControlProjections(branch, projections, 2);
+ Node* branch_true = projections[0];
+ Node* branch_false = projections[1];
+ DCHECK_EQ(branch_true->opcode(), IrOpcode::kIfTrue);
+ DCHECK_EQ(branch_false->opcode(), IrOpcode::kIfFalse);
+
+ // The input values of phi should be true(1) and false(0).
+ Int32Matcher mfirst_value(first_value);
+ Int32Matcher msecond_value(second_value);
+ Node* predecessor_true = nullptr;
+ Node* predecessor_false = nullptr;
+ if (mfirst_value.Is(1) && msecond_value.Is(0)) {
+ predecessor_true = predecessor0;
+ predecessor_false = predecessor1;
+ } else if (mfirst_value.Is(0) && msecond_value.Is(1)) {
+ predecessor_true = predecessor1;
+ predecessor_false = predecessor0;
+ } else {
+ return false;
+ }
+
+ // Merge the branches successors into its predecessors.
+ for (Edge edge : branch_true->use_edges()) {
+ edge.UpdateTo(predecessor_true);
+ }
+ for (Edge edge : branch_false->use_edges()) {
+ edge.UpdateTo(predecessor_false);
+ }
+
+ branch_true->Kill();
+ branch_false->Kill();
+ branch->Kill();
+ phi->Kill();
+ merge->Kill();
+ return true;
+}
+
Reduction BranchElimination::ReduceBranch(Node* node) {
Node* condition = node->InputAt(0);
Node* control_input = NodeProperties::GetControlInput(node, 0);
@@ -162,6 +243,13 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return Replace(dead());
}
SimplifyBranchCondition(node);
+ // Try to reduce the pattern that branch condition comes from phi node.
+ if (condition->opcode() == IrOpcode::kPhi &&
+ control_input->opcode() == IrOpcode::kMerge) {
+ if (TryEliminateBranchWithPhiCondition(node, condition, control_input)) {
+ return Replace(dead());
+ }
+ }
// Trigger revisits of the IfTrue/IfFalse projections, since they depend on
// the branch condition.
for (Node* const use : node->uses()) {
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 4c0f37ad7c..ca0a872e50 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -70,6 +70,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceStart(Node* node);
Reduction ReduceOtherControl(Node* node);
void SimplifyBranchCondition(Node* branch);
+ bool TryEliminateBranchWithPhiCondition(Node* branch, Node* phi, Node* merge);
Reduction UpdateStatesHelper(Node* node,
ControlPathConditions prev_conditions,
Node* current_condition, Node* current_branch,
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 419d4aa1e2..f84692bf18 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -212,6 +212,8 @@ void UpdateInLiveness(BytecodeLivenessState* in_liveness,
if (BytecodeOperands::WritesAccumulator(implicit_register_use)) {
in_liveness->MarkAccumulatorDead();
}
+ DCHECK_IMPLIES(BytecodeOperands::ClobbersAccumulator(implicit_register_use),
+ !in_liveness->AccumulatorIsLive());
(UpdateInLivenessForOutOperand<bytecode, operand_types, operand_index>(
in_liveness, iterator),
...);
@@ -237,6 +239,7 @@ void UpdateInLiveness(BytecodeLivenessState* in_liveness,
std::make_index_sequence<sizeof...(operand_types)>());
}
+#ifdef DEBUG
void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
const interpreter::BytecodeArrayIterator& iterator) {
switch (bytecode) {
@@ -248,6 +251,7 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
#undef BYTECODE_UPDATE_IN_LIVENESS
}
}
+#endif // DEBUG
template <bool IsFirstUpdate = false>
void EnsureOutLivenessIsNotAlias(
@@ -289,6 +293,25 @@ void UpdateOutLiveness(BytecodeLiveness& liveness,
return;
}
+ // Special case SwitchOnGeneratorState to ignore resume liveness, since that's
+ // a pass through. Instead, just consider the fallthrough live, plus the
+ // generator register itself for the resumes.
+ if (bytecode == Bytecode::kSwitchOnGeneratorState) {
+ DCHECK_NOT_NULL(next_bytecode_in_liveness);
+ if (IsFirstUpdate) {
+ // The generator register won't be live in the fallthrough, so copy the
+ // liveness and make it live here.
+ int generator_reg_index = iterator.GetRegisterOperand(0).index();
+ DCHECK(!next_bytecode_in_liveness->RegisterIsLive(generator_reg_index));
+ liveness.out =
+ zone->New<BytecodeLivenessState>(*next_bytecode_in_liveness, zone);
+ liveness.out->MarkRegisterLive(generator_reg_index);
+ } else {
+ liveness.out->Union(*next_bytecode_in_liveness);
+ }
+ return;
+ }
+
// Update from next bytecode (unless there isn't one or this is an
// unconditional jump).
if (next_bytecode_in_liveness != nullptr &&
@@ -464,7 +487,6 @@ void BytecodeAnalysis::Analyze() {
loop_stack_.push({-1, nullptr});
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
- int generator_switch_index = -1;
int osr_loop_end_offset = osr_bailout_id_.ToInt();
DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone());
@@ -479,10 +501,7 @@ void BytecodeAnalysis::Analyze() {
Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
- if (bytecode == Bytecode::kSwitchOnGeneratorState) {
- DCHECK_EQ(generator_switch_index, -1);
- generator_switch_index = iterator.current_index();
- } else if (bytecode == Bytecode::kJumpLoop) {
+ if (bytecode == Bytecode::kJumpLoop) {
// Every byte up to and including the last byte within the backwards jump
// instruction is considered part of the loop, set loop end accordingly.
int loop_end = current_offset + iterator.current_bytecode_size();
@@ -669,48 +688,6 @@ void BytecodeAnalysis::Analyze() {
liveness_map(), zone());
}
- // Process the generator switch statement separately, once the loops are done.
- // This has to be a separate pass because the generator switch can jump into
- // the middle of loops (and is the only kind of jump that can jump across a
- // loop header).
- if (generator_switch_index != -1) {
- iterator.GoToIndex(generator_switch_index);
- DCHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnGeneratorState);
-
- int current_offset = iterator.current_offset();
- BytecodeLiveness& switch_liveness =
- liveness_map().GetLiveness(current_offset);
-
- bool any_changed = false;
- for (interpreter::JumpTableTargetOffset entry :
- iterator.GetJumpTableTargetOffsets()) {
- if (switch_liveness.out->UnionIsChanged(
- *liveness_map().GetInLiveness(entry.target_offset))) {
- any_changed = true;
- }
- }
-
- // If the switch liveness changed, we have to propagate it up the remaining
- // bytecodes before it.
- if (any_changed) {
- switch_liveness.in->CopyFrom(*switch_liveness.out);
- UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, switch_liveness.in,
- iterator);
- next_bytecode_in_liveness = switch_liveness.in;
- for (--iterator; iterator.IsValid(); --iterator) {
- Bytecode bytecode = iterator.current_bytecode();
- BytecodeLiveness& liveness =
- liveness_map().GetLiveness(iterator.current_offset());
-
- // There shouldn't be any more loops.
- DCHECK_NE(bytecode, Bytecode::kJumpLoop);
-
- UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- bytecode_array(), liveness_map(), zone());
- }
- }
- }
-
DCHECK(analyze_liveness_);
if (v8_flags.trace_environment_liveness) {
StdoutStream of;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index adf8b1d7a5..d032d0a7d1 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -293,7 +293,7 @@ class BytecodeGraphBuilder {
// Slow path taken when we cannot figure out the current scope info.
Environment* CheckContextExtensionsSlowPath(uint32_t depth);
// Helper function that tries to get the current scope info.
- base::Optional<ScopeInfoRef> TryGetScopeInfo();
+ OptionalScopeInfoRef TryGetScopeInfo();
// Helper function to create a context extension check.
Environment* CheckContextExtensionAtDepth(Environment* slow_environment,
uint32_t depth);
@@ -1056,9 +1056,9 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph),
native_context_(native_context),
shared_info_(shared_info),
- bytecode_array_(shared_info.GetBytecodeArray()),
+ bytecode_array_(shared_info.GetBytecodeArray(broker)),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.feedback_vector().value()),
+ feedback_vector_(feedback_cell.feedback_vector(broker).value()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -1070,7 +1070,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array().parameter_count(), bytecode_array().register_count(),
shared_info.object())),
source_position_iterator_(std::make_unique<SourcePositionTableIterator>(
- bytecode_array().SourcePositionTable())),
+ bytecode_array().SourcePositionTable(broker))),
bytecode_iterator_(bytecode_array().object()),
bytecode_analysis_(
bytecode_array().object(), local_zone, osr_offset,
@@ -1131,16 +1131,17 @@ Node* BytecodeGraphBuilder::GetParameter(int parameter_index,
void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
DCHECK_NULL(feedback_vector_node_);
- feedback_vector_node_ = jsgraph()->Constant(feedback_vector());
+ feedback_vector_node_ = jsgraph()->Constant(feedback_vector(), broker());
}
Node* BytecodeGraphBuilder::BuildLoadFeedbackCell(int index) {
- return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index));
+ return jsgraph()->Constant(
+ feedback_vector().GetClosureFeedbackCell(broker(), index), broker());
}
void BytecodeGraphBuilder::CreateNativeContextNode() {
DCHECK_NULL(native_context_node_);
- native_context_node_ = jsgraph()->Constant(native_context());
+ native_context_node_ = jsgraph()->Constant(native_context(), broker());
}
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
@@ -1517,7 +1518,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
void BytecodeGraphBuilder::VisitLdaConstant() {
ObjectRef object = MakeRefForConstantForIndexOperand(0);
- Node* node = jsgraph()->Constant(object);
+ Node* node = jsgraph()->Constant(object, broker());
environment()->BindAccumulator(node);
}
@@ -1728,7 +1729,8 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
- Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
+ Node* name =
+ jsgraph()->Constant(MakeRefForConstantForIndexOperand(0), broker());
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::kNotInside
? Runtime::kLoadLookupSlot
@@ -1774,20 +1776,19 @@ BytecodeGraphBuilder::CheckContextExtensionAtDepth(
return slow_environment;
}
-base::Optional<ScopeInfoRef> BytecodeGraphBuilder::TryGetScopeInfo() {
+OptionalScopeInfoRef BytecodeGraphBuilder::TryGetScopeInfo() {
Node* context = environment()->Context();
switch (context->opcode()) {
case IrOpcode::kJSCreateFunctionContext:
- return CreateFunctionContextParametersOf(context->op())
- .scope_info(broker());
+ return CreateFunctionContextParametersOf(context->op()).scope_info();
case IrOpcode::kJSCreateBlockContext:
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateWithContext:
- return ScopeInfoOf(broker(), context->op());
+ return ScopeInfoOf(context->op());
case IrOpcode::kParameter: {
- ScopeInfoRef scope_info = shared_info_.scope_info();
+ ScopeInfoRef scope_info = shared_info_.scope_info(broker());
if (scope_info.HasOuterScopeInfo()) {
- scope_info = scope_info.OuterScopeInfo();
+ scope_info = scope_info.OuterScopeInfo(broker());
}
return scope_info;
}
@@ -1798,7 +1799,7 @@ base::Optional<ScopeInfoRef> BytecodeGraphBuilder::TryGetScopeInfo() {
BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
uint32_t depth) {
- base::Optional<ScopeInfoRef> maybe_scope_info = TryGetScopeInfo();
+ OptionalScopeInfoRef maybe_scope_info = TryGetScopeInfo();
if (!maybe_scope_info.has_value()) {
return CheckContextExtensionsSlowPath(depth);
}
@@ -1813,7 +1814,7 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
}
DCHECK_IMPLIES(!scope_info.HasOuterScopeInfo(), d + 1 == depth);
if (scope_info.HasOuterScopeInfo()) {
- scope_info = scope_info.OuterScopeInfo();
+ scope_info = scope_info.OuterScopeInfo(broker());
}
}
@@ -1877,7 +1878,8 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
+ Node* name =
+ jsgraph()->Constant(MakeRefForConstantForIndexOperand(0), broker());
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::kNotInside
@@ -1924,8 +1926,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name =
- jsgraph()->Constant(MakeRefForConstantForIndexOperand<Name>(0));
+ Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand<Name>(0),
+ broker());
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::kNotInside
@@ -1953,7 +1955,8 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
- Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
+ Node* name =
+ jsgraph()->Constant(MakeRefForConstantForIndexOperand(0), broker());
int bytecode_flags = bytecode_iterator().GetFlag8Operand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -2135,8 +2138,9 @@ void BytecodeGraphBuilder::VisitDefineKeyedOwnProperty() {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* key =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ int flags = bytecode_iterator().GetFlag8Operand(2);
FeedbackSource source =
- CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(3));
LanguageMode language_mode =
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(source));
@@ -2155,9 +2159,11 @@ void BytecodeGraphBuilder::VisitDefineKeyedOwnProperty() {
static_assert(JSDefineKeyedOwnPropertyNode::ObjectIndex() == 0);
static_assert(JSDefineKeyedOwnPropertyNode::KeyIndex() == 1);
static_assert(JSDefineKeyedOwnPropertyNode::ValueIndex() == 2);
- static_assert(JSDefineKeyedOwnPropertyNode::FeedbackVectorIndex() == 3);
+ static_assert(JSDefineKeyedOwnPropertyNode::FlagsIndex() == 3);
+ static_assert(JSDefineKeyedOwnPropertyNode::FeedbackVectorIndex() == 4);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
- node = NewNode(op, object, key, value, feedback_vector_node());
+ node = NewNode(op, object, key, value, jsgraph()->Constant(flags),
+ feedback_vector_node());
}
environment()->RecordAfterState(node, Environment::kAttachFrameState);
@@ -2202,7 +2208,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
bytecode_iterator().GetFlag8Operand(2))
? AllocationType::kOld
: AllocationType::kYoung;
- CodeTRef compile_lazy =
+ CodeRef compile_lazy =
MakeRef(broker(), *BUILTIN_CODE(jsgraph()->isolate(), CompileLazy));
const Operator* op =
javascript()->CreateClosure(shared_info, compile_lazy, allocation);
@@ -2835,7 +2841,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* accumulator = environment()->LookupAccumulator();
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
- Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
+ Node* name =
+ jsgraph()->Constant(MakeRefForConstantForIndexOperand(0), broker());
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 7b69cea027..88e87308d5 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -230,6 +230,10 @@ bool CodeAssembler::IsWord64CtzSupported() const {
return raw_assembler()->machine()->Word64Ctz().IsSupported();
}
+TNode<Int32T> CodeAssembler::UniqueInt32Constant(int32_t value) {
+ return UncheckedCast<Int32T>(jsgraph()->UniqueInt32Constant(value));
+}
+
TNode<Int32T> CodeAssembler::Int32Constant(int32_t value) {
return UncheckedCast<Int32T>(jsgraph()->Int32Constant(value));
}
@@ -996,7 +1000,7 @@ Node* CodeAssembler::CallRuntimeImpl(
Runtime::FunctionId function, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
- TNode<CodeT> centry =
+ TNode<Code> centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
constexpr size_t kMaxNumArgs = 6;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1029,7 +1033,7 @@ void CodeAssembler::TailCallRuntimeImpl(
Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
- TNode<CodeT> centry =
+ TNode<Code> centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), result_size));
constexpr size_t kMaxNumArgs = 6;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1085,7 +1089,7 @@ Node* CodeAssembler::CallStubN(StubCallMode call_mode,
}
void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
- TNode<CodeT> target, TNode<Object> context,
+ TNode<Code> target, TNode<Object> context,
std::initializer_list<Node*> args) {
constexpr size_t kMaxNumArgs = 11;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1190,7 +1194,7 @@ template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch(
TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>,
TNode<ExternalReference>);
-void CodeAssembler::TailCallJSCode(TNode<CodeT> code, TNode<Context> context,
+void CodeAssembler::TailCallJSCode(TNode<Code> code, TNode<Context> context,
TNode<JSFunction> function,
TNode<Object> new_target,
TNode<Int32T> arg_count) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 06cf450431..11b0901f02 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -259,7 +259,9 @@ class CodeAssemblerParameterizedLabel;
V(Int32Mul, Word32T, Word32T, Word32T) \
V(Int32MulWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Div, Int32T, Int32T, Int32T) \
+ V(Uint32Div, Uint32T, Uint32T, Uint32T) \
V(Int32Mod, Int32T, Int32T, Int32T) \
+ V(Uint32Mod, Uint32T, Uint32T, Uint32T) \
V(Int64Add, Word64T, Word64T, Word64T) \
V(Int64Sub, Word64T, Word64T, Word64T) \
V(Int64SubWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T, Int64T) \
@@ -508,6 +510,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#endif
// Constants.
+ TNode<Int32T> UniqueInt32Constant(int32_t value);
TNode<Int32T> Int32Constant(int32_t value);
TNode<Int64T> Int64Constant(int64_t value);
TNode<Uint64T> Uint64Constant(uint64_t value) {
@@ -582,7 +585,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return UncheckedCast<UintPtrT>(x);
}
- static constexpr int kTargetParameterIndex = -1;
+ static constexpr int kTargetParameterIndex = kJSCallClosureParameterIndex;
+ static_assert(kTargetParameterIndex == -1);
template <class T>
TNode<T> Parameter(
@@ -979,6 +983,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<IntPtrT> WordNot(TNode<IntPtrT> a) {
return Signed(WordNot(static_cast<TNode<WordT>>(a)));
}
+ TNode<Int32T> Word32BitwiseNot(TNode<Int32T> a) {
+ return Signed(Word32BitwiseNot(static_cast<TNode<Word32T>>(a)));
+ }
TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
return UncheckedCast<BoolT>(Word32Or(static_cast<TNode<Word32T>>(left),
static_cast<TNode<Word32T>>(right)));
@@ -1013,6 +1020,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
static_cast<TNode<Word32T>>(right)));
}
+ TNode<Uint32T> Uint32Mul(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(Int32Mul(static_cast<TNode<Word32T>>(left),
+ static_cast<TNode<Word32T>>(right)));
+ }
+
TNode<Int64T> Int64Add(TNode<Int64T> left, TNode<Int64T> right) {
return Signed(Int64Add(static_cast<TNode<Word64T>>(left), right));
}
@@ -1168,13 +1180,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class T = Object, class... TArgs>
TNode<T> CallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
- TNode<CodeT> target = HeapConstant(callable.code());
+ TNode<Code> target = HeapConstant(callable.code());
return CallStub<T>(callable.descriptor(), target, context, args...);
}
template <class T = Object, class... TArgs>
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
- TNode<CodeT> target, TNode<Object> context, TArgs... args) {
+ TNode<Code> target, TNode<Object> context, TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
target, context, args...));
}
@@ -1190,13 +1202,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
void TailCallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
- TNode<CodeT> target = HeapConstant(callable.code());
+ TNode<Code> target = HeapConstant(callable.code());
TailCallStub(callable.descriptor(), target, context, args...);
}
template <class... TArgs>
void TailCallStub(const CallInterfaceDescriptor& descriptor,
- TNode<CodeT> target, TNode<Object> context, TArgs... args) {
+ TNode<Code> target, TNode<Object> context, TArgs... args) {
TailCallStubImpl(descriptor, target, context, {args...});
}
@@ -1219,7 +1231,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Note that no arguments adaption is going on here - all the JavaScript
// arguments are left on the stack unmodified. Therefore, this tail call can
// only be used after arguments adaptation has been performed already.
- void TailCallJSCode(TNode<CodeT> code, TNode<Context> context,
+ void TailCallJSCode(TNode<Code> code, TNode<Context> context,
TNode<JSFunction> function, TNode<Object> new_target,
TNode<Int32T> arg_count);
@@ -1228,7 +1240,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* receiver, TArgs... args) {
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
- TNode<CodeT> target = HeapConstant(callable.code());
+ TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
CAST(function), {}, arity, {receiver, args...}));
}
@@ -1239,7 +1251,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
- TNode<CodeT> target = HeapConstant(callable.code());
+ TNode<Code> target = HeapConstant(callable.code());
return CallJSStubImpl(callable.descriptor(), target, CAST(context),
CAST(function), CAST(new_target), arity,
{receiver, args...});
@@ -1338,7 +1350,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
std::initializer_list<TNode<Object>> args);
void TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
- TNode<CodeT> target, TNode<Object> context,
+ TNode<Code> target, TNode<Object> context,
std::initializer_list<Node*> args);
void TailCallStubThenBytecodeDispatchImpl(
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 0e34933c66..b7d6721f0f 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -19,12 +19,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
- JSHeapBroker* broker,
- CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine,
- Zone* temp_zone,
- BranchSemantics branch_semantics)
+CommonOperatorReducer::CommonOperatorReducer(
+ Editor* editor, Graph* graph, JSHeapBroker* broker,
+ CommonOperatorBuilder* common, MachineOperatorBuilder* machine,
+ Zone* temp_zone, BranchSemantics default_branch_semantics)
: AdvancedReducer(editor),
graph_(graph),
broker_(broker),
@@ -32,7 +30,7 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
machine_(machine),
dead_(graph->NewNode(common->Dead())),
zone_(temp_zone),
- branch_semantics_(branch_semantics) {
+ default_branch_semantics_(default_branch_semantics) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -67,20 +65,22 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return NoChange();
}
-Decision CommonOperatorReducer::DecideCondition(Node* const cond) {
+Decision CommonOperatorReducer::DecideCondition(
+ Node* const cond, BranchSemantics branch_semantics) {
Node* unwrapped = SkipValueIdentities(cond);
switch (unwrapped->opcode()) {
case IrOpcode::kInt32Constant: {
- DCHECK_EQ(branch_semantics_, BranchSemantics::kMachine);
+ DCHECK_EQ(branch_semantics, BranchSemantics::kMachine);
Int32Matcher m(unwrapped);
return m.ResolvedValue() ? Decision::kTrue : Decision::kFalse;
}
case IrOpcode::kHeapConstant: {
- if (branch_semantics_ == BranchSemantics::kMachine) {
+ if (branch_semantics == BranchSemantics::kMachine) {
return Decision::kTrue;
}
HeapObjectMatcher m(unwrapped);
- base::Optional<bool> maybe_result = m.Ref(broker_).TryGetBooleanValue();
+ base::Optional<bool> maybe_result =
+ m.Ref(broker_).TryGetBooleanValue(broker());
if (!maybe_result.has_value()) return Decision::kUnknown;
return *maybe_result ? Decision::kTrue : Decision::kFalse;
}
@@ -91,6 +91,7 @@ Decision CommonOperatorReducer::DecideCondition(Node* const cond) {
Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ BranchSemantics branch_semantics = BranchSemanticsOf(node);
Node* const cond = node->InputAt(0);
// Swap IfTrue/IfFalse on {branch} if {cond} is a BooleanNot and use the input
// to BooleanNot as new condition for {branch}. Note we assume that {cond} was
@@ -99,8 +100,10 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
// not (i.e. true being returned in the false case and vice versa).
if (cond->opcode() == IrOpcode::kBooleanNot ||
(cond->opcode() == IrOpcode::kSelect &&
- DecideCondition(cond->InputAt(1)) == Decision::kFalse &&
- DecideCondition(cond->InputAt(2)) == Decision::kTrue)) {
+ DecideCondition(cond->InputAt(1), branch_semantics) ==
+ Decision::kFalse &&
+ DecideCondition(cond->InputAt(2), branch_semantics) ==
+ Decision::kTrue)) {
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -122,7 +125,7 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
return Changed(node);
}
- Decision const decision = DecideCondition(cond);
+ Decision const decision = DecideCondition(cond, branch_semantics);
if (decision == Decision::kUnknown) return NoChange();
Node* const control = node->InputAt(1);
for (Node* const use : node->uses()) {
@@ -161,7 +164,8 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
: common()->DeoptimizeUnless(p.reason(), p.feedback()));
return Changed(node);
}
- Decision const decision = DecideCondition(condition);
+ Decision const decision =
+ DecideCondition(condition, default_branch_semantics_);
if (decision == Decision::kUnknown) return NoChange();
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
@@ -305,6 +309,7 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(IrOpcode::kReturn, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
+ // TODO(mslekova): Port this to Turboshaft.
if (effect->opcode() == IrOpcode::kCheckpoint) {
// Any {Return} node can never be used to insert a deoptimization point,
// hence checkpoints can be cut out of the effect chain flowing into it.
@@ -392,7 +397,7 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
Node* const vtrue = node->InputAt(1);
Node* const vfalse = node->InputAt(2);
if (vtrue == vfalse) return Replace(vtrue);
- switch (DecideCondition(cond)) {
+ switch (DecideCondition(cond, default_branch_semantics_)) {
case Decision::kTrue:
return Replace(vtrue);
case Decision::kFalse:
@@ -469,7 +474,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
Reduction CommonOperatorReducer::ReduceStaticAssert(Node* node) {
DCHECK_EQ(IrOpcode::kStaticAssert, node->opcode());
Node* const cond = node->InputAt(0);
- Decision decision = DecideCondition(cond);
+ Decision decision = DecideCondition(cond, default_branch_semantics_);
if (decision == Decision::kTrue) {
RelaxEffectsAndControls(node);
return Changed(node);
@@ -483,7 +488,7 @@ Reduction CommonOperatorReducer::ReduceTrapConditional(Node* trap) {
trap->opcode() == IrOpcode::kTrapUnless);
bool trapping_condition = trap->opcode() == IrOpcode::kTrapIf;
Node* const cond = trap->InputAt(0);
- Decision decision = DecideCondition(cond);
+ Decision decision = DecideCondition(cond, default_branch_semantics_);
if (decision == Decision::kUnknown) {
return NoChange();
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index f4a2884fcf..07258174fd 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
CommonOperatorReducer(Editor* editor, Graph* graph, JSHeapBroker* broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* temp_zone,
- BranchSemantics branch_semantics);
+ BranchSemantics default_branch_semantics);
~CommonOperatorReducer() final = default;
const char* reducer_name() const override { return "CommonOperatorReducer"; }
@@ -50,7 +50,12 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
// Helper to determine if conditions are true or false.
- Decision DecideCondition(Node* const cond);
+ Decision DecideCondition(Node* const cond, BranchSemantics branch_semantics);
+ BranchSemantics BranchSemanticsOf(const Node* branch) {
+ BranchSemantics bs = BranchParametersOf(branch->op()).semantics();
+ if (bs != BranchSemantics::kUnspecified) return bs;
+ return default_branch_semantics_;
+ }
Graph* graph() const { return graph_; }
JSHeapBroker* broker() const { return broker_; }
@@ -64,7 +69,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
MachineOperatorBuilder* const machine_;
Node* const dead_;
Zone* zone_;
- BranchSemantics branch_semantics_;
+ BranchSemantics default_branch_semantics_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index c7c39d6fe8..a763595f7e 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1640,6 +1640,29 @@ CommonOperatorBuilder::CreateJSToWasmFrameStateFunctionInfo(
}
#endif // V8_ENABLE_WEBASSEMBLY
+const Operator* CommonOperatorBuilder::Chained(const Operator* op) {
+ // Use Chained only for operators that are not on the effect chain already.
+ DCHECK_EQ(op->EffectInputCount(), 0);
+ DCHECK_EQ(op->ControlInputCount(), 0);
+ const char* mnemonic;
+ switch (op->opcode()) {
+ case IrOpcode::kChangeInt64ToBigInt:
+ mnemonic = "Chained[ChangeInt64ToBigInt]";
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ mnemonic = "Chained[ChangeUint64ToBigInt]";
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // TODO(nicohartmann@): Need to store operator properties once we have to
+ // support Operator1 operators.
+ Operator::Properties properties = op->properties();
+ return zone()->New<Operator>(op->opcode(), properties, mnemonic,
+ op->ValueInputCount(), 1, 1,
+ op->ValueOutputCount(), 1, 0);
+}
+
const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
return zone()->New<Operator1<MachineRepresentation>>( // --
IrOpcode::kDeadValue, Operator::kPure, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 895d297a13..c90e334fd3 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -513,6 +513,13 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
// expected to not survive dead code elimination.
const Operator* Plug();
+ // Chained operator serves as a temporary solution to fix allocating operators
+ // at a specific position in the effect and control chain during
+ // effect control linearization, such that its position is non-floating
+ // and cannot interfere with other inlined allocations when recomputing a
+ // schedule (e.g. in Turboshaft's graph builder) when regions are gone.
+ const Operator* Chained(const Operator* op);
+
const Operator* Dead();
const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 2c219f5d67..d2f34fdc7c 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -70,9 +70,10 @@ class CompilationDependency : public ZoneObject {
public:
explicit CompilationDependency(CompilationDependencyKind kind) : kind(kind) {}
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(PendingDependencies* deps) const = 0;
+ virtual bool IsValid(JSHeapBroker* broker) const = 0;
+ virtual void PrepareInstall(JSHeapBroker* broker) const {}
+ virtual void Install(JSHeapBroker* broker,
+ PendingDependencies* deps) const = 0;
#define V(Name) \
bool Is##Name() const { return kind == k##Name; } \
@@ -115,12 +116,12 @@ class PendingDependencies final {
void Register(Handle<HeapObject> object,
DependentCode::DependencyGroup group) {
- // Code, which are per-local Isolate, cannot depend on objects in the shared
- // heap. Shared heap dependencies are designed to never invalidate
- // assumptions. E.g., maps for shared structs do not have transitions or
- // change the shape of their fields. See
+ // InstructionStream, which are per-local Isolate, cannot depend on objects
+ // in the shared heap. Shared heap dependencies are designed to never
+ // invalidate assumptions. E.g., maps for shared structs do not have
+ // transitions or change the shape of their fields. See
// DependentCode::DeoptimizeDependencyGroups for corresponding DCHECK.
- if (object->InSharedWritableHeap()) return;
+ if (object->InWritableSharedSpace()) return;
deps_[object] |= group;
}
@@ -186,14 +187,14 @@ class InitialMapDependency final : public CompilationDependency {
function_(function),
initial_map_(initial_map) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
Handle<JSFunction> function = function_.object();
return function->has_initial_map() &&
function->initial_map() == *initial_map_.object();
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(initial_map_.object(),
DependentCode::kInitialMapChangedGroup);
}
@@ -222,13 +223,12 @@ class PrototypePropertyDependency final : public CompilationDependency {
: CompilationDependency(kPrototypeProperty),
function_(function),
prototype_(prototype) {
- DCHECK(function_.has_instance_prototype(broker->dependencies()));
- DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies()));
- DCHECK(function_.instance_prototype(broker->dependencies())
- .equals(prototype_));
+ DCHECK(function_.has_instance_prototype(broker));
+ DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker));
+ DCHECK(function_.instance_prototype(broker).equals(prototype_));
}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
Handle<JSFunction> function = function_.object();
return function->has_prototype_slot() &&
function->has_instance_prototype() &&
@@ -236,17 +236,17 @@ class PrototypePropertyDependency final : public CompilationDependency {
function->instance_prototype() == *prototype_.object();
}
- void PrepareInstall() const override {
- SLOW_DCHECK(IsValid());
+ void PrepareInstall(JSHeapBroker* broker) const override {
+ SLOW_DCHECK(IsValid(broker));
Handle<JSFunction> function = function_.object();
if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
Handle<JSFunction> function = function_.object();
CHECK(function->has_initial_map());
- Handle<Map> initial_map(function->initial_map(), function_.isolate());
+ Handle<Map> initial_map(function->initial_map(), broker->isolate());
deps->Register(initial_map, DependentCode::kInitialMapChangedGroup);
}
@@ -271,14 +271,14 @@ class StableMapDependency final : public CompilationDependency {
explicit StableMapDependency(const MapRef& map)
: CompilationDependency(kStableMap), map_(map) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
// TODO(v8:11670): Consider turn this back into a CHECK inside the
// constructor and DependOnStableMap, if possible in light of concurrent
// heap state modifications.
return !map_.object()->is_dictionary_map() && map_.object()->is_stable();
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(map_.object(), DependentCode::kPrototypeCheckGroup);
}
@@ -312,12 +312,14 @@ class ConstantInDictionaryPrototypeChainDependency final
// Checks that |constant_| is still the value of accessing |property_name_|
// starting at |receiver_map_|.
- bool IsValid() const override { return !GetHolderIfValid().is_null(); }
+ bool IsValid(JSHeapBroker* broker) const override {
+ return !GetHolderIfValid(broker).is_null();
+ }
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
- Isolate* isolate = receiver_map_.isolate();
- Handle<JSObject> holder = GetHolderIfValid().ToHandleChecked();
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
+ Isolate* isolate = broker->isolate();
+ Handle<JSObject> holder = GetHolderIfValid(broker).ToHandleChecked();
Handle<Map> map = receiver_map_.object();
while (map->prototype() != *holder) {
@@ -337,9 +339,9 @@ class ConstantInDictionaryPrototypeChainDependency final
// TODO(neis) Currently, invoking IsValid and then Install duplicates the call
// to GetHolderIfValid. Instead, consider letting IsValid change the state
// (and store the holder), or merge IsValid and Install.
- MaybeHandle<JSObject> GetHolderIfValid() const {
+ MaybeHandle<JSObject> GetHolderIfValid(JSHeapBroker* broker) const {
DisallowGarbageCollection no_gc;
- Isolate* isolate = receiver_map_.isolate();
+ Isolate* isolate = broker->isolate();
Handle<Object> holder;
HeapObject prototype = receiver_map_.object()->prototype();
@@ -446,7 +448,7 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
index_(index),
value_(value) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
if (holder_.object()->map() != *map_.object()) {
TRACE_BROKER_MISSING(broker_,
"Map change detected in " << holder_.object());
@@ -475,7 +477,8 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
return true;
}
- void Install(PendingDependencies* deps) const override {}
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ }
private:
size_t Hash() const override {
@@ -508,9 +511,8 @@ class OwnConstantDictionaryPropertyDependency final
InternalIndex index,
const ObjectRef& value)
: CompilationDependency(kOwnConstantDictionaryProperty),
- broker_(broker),
holder_(holder),
- map_(holder.map()),
+ map_(holder.map(broker)),
index_(index),
value_(value) {
// We depend on map() being cached.
@@ -518,35 +520,36 @@ class OwnConstantDictionaryPropertyDependency final
RefSerializationKind::kNeverSerialized);
}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
if (holder_.object()->map() != *map_.object()) {
- TRACE_BROKER_MISSING(broker_,
+ TRACE_BROKER_MISSING(broker,
"Map change detected in " << holder_.object());
return false;
}
base::Optional<Object> maybe_value = JSObject::DictionaryPropertyAt(
- holder_.object(), index_, broker_->isolate()->heap());
+ holder_.object(), index_, broker->isolate()->heap());
if (!maybe_value) {
TRACE_BROKER_MISSING(
- broker_, holder_.object()
- << "has a value that might not safe to read at index "
- << index_.as_int());
+ broker, holder_.object()
+ << "has a value that might not safe to read at index "
+ << index_.as_int());
return false;
}
if (*maybe_value != *value_.object()) {
- TRACE_BROKER_MISSING(broker_, "Constant property value changed in "
- << holder_.object()
- << " at InternalIndex "
- << index_.as_int());
+ TRACE_BROKER_MISSING(broker, "Constant property value changed in "
+ << holder_.object()
+ << " at InternalIndex "
+ << index_.as_int());
return false;
}
return true;
}
- void Install(PendingDependencies* deps) const override {}
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ }
private:
size_t Hash() const override {
@@ -562,7 +565,6 @@ class OwnConstantDictionaryPropertyDependency final
index_ == zat->index_ && value_.equals(zat->value_);
}
- JSHeapBroker* const broker_;
JSObjectRef const holder_;
MapRef const map_;
InternalIndex const index_;
@@ -574,11 +576,12 @@ class ConsistentJSFunctionViewDependency final : public CompilationDependency {
explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function)
: CompilationDependency(kConsistentJSFunctionView), function_(function) {}
- bool IsValid() const override {
- return function_.IsConsistentWithHeapState();
+ bool IsValid(JSHeapBroker* broker) const override {
+ return function_.IsConsistentWithHeapState(broker);
}
- void Install(PendingDependencies* deps) const override {}
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ }
private:
size_t Hash() const override {
@@ -602,10 +605,12 @@ class TransitionDependency final : public CompilationDependency {
DCHECK(map_.CanBeDeprecated());
}
- bool IsValid() const override { return !map_.object()->is_deprecated(); }
+ bool IsValid(JSHeapBroker* broker) const override {
+ return !map_.object()->is_deprecated();
+ }
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(map_.object(), DependentCode::kTransitionGroup);
}
@@ -631,11 +636,11 @@ class PretenureModeDependency final : public CompilationDependency {
site_(site),
allocation_(allocation) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
return allocation_ == site_.object()->GetAllocationType();
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(site_.object(),
DependentCode::kAllocationSiteTenuringChangedGroup);
}
@@ -664,18 +669,18 @@ class FieldRepresentationDependency final : public CompilationDependency {
descriptor_(descriptor),
representation_(representation) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
DisallowGarbageCollection no_heap_allocation;
if (map_.object()->is_deprecated()) return false;
return representation_.Equals(map_.object()
- ->instance_descriptors(map_.isolate())
+ ->instance_descriptors(broker->isolate())
.GetDetails(descriptor_)
.representation());
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
- Isolate* isolate = map_.isolate();
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
+ Isolate* isolate = broker->isolate();
Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
isolate);
CHECK(!owner->is_deprecated());
@@ -717,17 +722,17 @@ class FieldTypeDependency final : public CompilationDependency {
descriptor_(descriptor),
type_(type) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
DisallowGarbageCollection no_heap_allocation;
if (map_.object()->is_deprecated()) return false;
return *type_.object() == map_.object()
- ->instance_descriptors(map_.isolate())
+ ->instance_descriptors(broker->isolate())
.GetFieldType(descriptor_);
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
- Isolate* isolate = map_.isolate();
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
+ Isolate* isolate = broker->isolate();
Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
isolate);
CHECK(!owner->is_deprecated());
@@ -760,19 +765,19 @@ class FieldConstnessDependency final : public CompilationDependency {
map_(map),
descriptor_(descriptor) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
DisallowGarbageCollection no_heap_allocation;
if (map_.object()->is_deprecated()) return false;
return PropertyConstness::kConst ==
map_.object()
- ->instance_descriptors(map_.isolate())
+ ->instance_descriptors(broker->isolate())
.GetDetails(descriptor_)
.constness();
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
- Isolate* isolate = map_.isolate();
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
+ Isolate* isolate = broker->isolate();
Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
isolate);
CHECK(!owner->is_deprecated());
@@ -809,18 +814,18 @@ class GlobalPropertyDependency final : public CompilationDependency {
DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
Handle<PropertyCell> cell = cell_.object();
// The dependency is never valid if the cell is 'invalidated'. This is
// marked by setting the value to the hole.
- if (cell->value() == *(cell_.isolate()->factory()->the_hole_value())) {
+ if (cell->value() == *(broker->isolate()->factory()->the_hole_value())) {
return false;
}
return type_ == cell->property_details().cell_type() &&
read_only_ == cell->property_details().IsReadOnly();
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(cell_.object(), DependentCode::kPropertyCellChangedGroup);
}
@@ -846,12 +851,12 @@ class ProtectorDependency final : public CompilationDependency {
explicit ProtectorDependency(const PropertyCellRef& cell)
: CompilationDependency(kProtector), cell_(cell) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
Handle<PropertyCell> cell = cell_.object();
return cell->value() == Smi::FromInt(Protectors::kProtectorValid);
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(cell_.object(), DependentCode::kPropertyCellChangedGroup);
}
@@ -879,7 +884,7 @@ class ObjectSlotValueDependency final : public CompilationDependency {
offset_(offset),
value_(value.object()) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
PtrComprCageBase cage_base = GetPtrComprCageBase(*object_);
Object current_value =
offset_ == HeapObject::kMapOffset
@@ -887,7 +892,8 @@ class ObjectSlotValueDependency final : public CompilationDependency {
: TaggedField<Object>::Relaxed_Load(cage_base, *object_, offset_);
return *value_ == current_value;
}
- void Install(PendingDependencies* deps) const override {}
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ }
private:
size_t Hash() const override {
@@ -912,7 +918,7 @@ class ElementsKindDependency final : public CompilationDependency {
DCHECK(AllocationSite::ShouldTrack(kind_));
}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
Handle<AllocationSite> site = site_.object();
ElementsKind kind =
site->PointsToLiteral()
@@ -920,8 +926,8 @@ class ElementsKindDependency final : public CompilationDependency {
: site->GetElementsKind();
return kind_ == kind;
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
deps->Register(site_.object(),
DependentCode::kAllocationSiteTransitionChangedGroup);
}
@@ -952,17 +958,18 @@ class OwnConstantElementDependency final : public CompilationDependency {
index_(index),
element_(element) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
DisallowGarbageCollection no_gc;
JSObject holder = *holder_.object();
base::Optional<Object> maybe_element =
- holder_.GetOwnConstantElementFromHeap(holder.elements(),
+ holder_.GetOwnConstantElementFromHeap(broker, holder.elements(),
holder.GetElementsKind(), index_);
if (!maybe_element.has_value()) return false;
return maybe_element.value() == *element_.object();
}
- void Install(PendingDependencies* deps) const override {}
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ }
private:
size_t Hash() const override {
@@ -991,22 +998,22 @@ class InitialMapInstanceSizePredictionDependency final
function_(function),
instance_size_(instance_size) {}
- bool IsValid() const override {
+ bool IsValid(JSHeapBroker* broker) const override {
// The dependency is valid if the prediction is the same as the current
// slack tracking result.
if (!function_.object()->has_initial_map()) return false;
- int instance_size = function_.object()->ComputeInstanceSizeWithMinSlack(
- function_.isolate());
+ int instance_size =
+ function_.object()->ComputeInstanceSizeWithMinSlack(broker->isolate());
return instance_size == instance_size_;
}
- void PrepareInstall() const override {
- SLOW_DCHECK(IsValid());
+ void PrepareInstall(JSHeapBroker* broker) const override {
+ SLOW_DCHECK(IsValid(broker));
function_.object()->CompleteInobjectSlackTrackingIfActive();
}
- void Install(PendingDependencies* deps) const override {
- SLOW_DCHECK(IsValid());
+ void Install(JSHeapBroker* broker, PendingDependencies* deps) const override {
+ SLOW_DCHECK(IsValid(broker));
DCHECK(
!function_.object()->initial_map().IsInobjectSlackTrackingInProgress());
}
@@ -1037,14 +1044,14 @@ void CompilationDependencies::RecordDependency(
MapRef CompilationDependencies::DependOnInitialMap(
const JSFunctionRef& function) {
- MapRef map = function.initial_map(this);
+ MapRef map = function.initial_map(broker_);
RecordDependency(zone_->New<InitialMapDependency>(broker_, function, map));
return map;
}
ObjectRef CompilationDependencies::DependOnPrototypeProperty(
const JSFunctionRef& function) {
- ObjectRef prototype = function.instance_prototype(this);
+ ObjectRef prototype = function.instance_prototype(broker_);
RecordDependency(
zone_->New<PrototypePropertyDependency>(broker_, function, prototype));
return prototype;
@@ -1073,7 +1080,8 @@ AllocationType CompilationDependencies::DependOnPretenureMode(
PropertyConstness CompilationDependencies::DependOnFieldConstness(
const MapRef& map, InternalIndex descriptor) {
- PropertyConstness constness = map.GetPropertyDetails(descriptor).constness();
+ PropertyConstness constness =
+ map.GetPropertyDetails(broker_, descriptor).constness();
if (constness == PropertyConstness::kMutable) return constness;
// If the map can have fast elements transitions, then the field can be only
@@ -1100,8 +1108,8 @@ void CompilationDependencies::DependOnGlobalProperty(
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
- cell.CacheAsProtector();
- if (cell.value().AsSmi() != Protectors::kProtectorValid) return false;
+ cell.CacheAsProtector(broker_);
+ if (cell.value(broker_).AsSmi() != Protectors::kProtectorValid) return false;
RecordDependency(zone_->New<ProtectorDependency>(cell));
return true;
}
@@ -1149,9 +1157,10 @@ bool CompilationDependencies::DependOnPromiseThenProtector() {
void CompilationDependencies::DependOnElementsKind(
const AllocationSiteRef& site) {
- ElementsKind kind = site.PointsToLiteral()
- ? site.boilerplate().value().map().elements_kind()
- : site.GetElementsKind();
+ ElementsKind kind =
+ site.PointsToLiteral()
+ ? site.boilerplate(broker_).value().map(broker_).elements_kind()
+ : site.GetElementsKind();
if (AllocationSite::ShouldTrack(kind)) {
RecordDependency(zone_->New<ElementsKindDependency>(site, kind));
}
@@ -1183,9 +1192,9 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty(
}
V8_INLINE void TraceInvalidCompilationDependency(
- const CompilationDependency* d) {
+ compiler::JSHeapBroker* broker, const CompilationDependency* d) {
DCHECK(v8_flags.trace_compilation_dependencies);
- DCHECK(!d->IsValid());
+ DCHECK(!d->IsValid(broker));
PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
}
@@ -1201,14 +1210,14 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// dependencies. For example, PrototypePropertyDependency::PrepareInstall
// can call EnsureHasInitialMap, which can invalidate a
// StableMapDependency on the prototype object's map.
- if (!dep->IsValid()) {
+ if (!dep->IsValid(broker_)) {
if (v8_flags.trace_compilation_dependencies) {
- TraceInvalidCompilationDependency(dep);
+ TraceInvalidCompilationDependency(broker_, dep);
}
dependencies_.clear();
return false;
}
- dep->Install(&pending_deps);
+ dep->Install(broker_, &pending_deps);
}
pending_deps.InstallAll(broker_->isolate(), code);
}
@@ -1232,7 +1241,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
}
#ifdef DEBUG
for (auto dep : dependencies_) {
- CHECK_IMPLIES(!dep->IsValid(),
+ CHECK_IMPLIES(!dep->IsValid(broker_),
dep->IsPretenureMode() || dep->IsConsistentJSFunctionView());
}
#endif
@@ -1247,14 +1256,14 @@ bool CompilationDependencies::PrepareInstall() {
}
for (auto dep : dependencies_) {
- if (!dep->IsValid()) {
+ if (!dep->IsValid(broker_)) {
if (v8_flags.trace_compilation_dependencies) {
- TraceInvalidCompilationDependency(dep);
+ TraceInvalidCompilationDependency(broker_, dep);
}
dependencies_.clear();
return false;
}
- dep->PrepareInstall();
+ dep->PrepareInstall(broker_);
}
return true;
}
@@ -1267,14 +1276,14 @@ bool CompilationDependencies::PrepareInstallPredictable() {
std::sort(deps.begin(), deps.end());
for (auto dep : deps) {
- if (!dep->IsValid()) {
+ if (!dep->IsValid(broker_)) {
if (v8_flags.trace_compilation_dependencies) {
- TraceInvalidCompilationDependency(dep);
+ TraceInvalidCompilationDependency(broker_, dep);
}
dependencies_.clear();
return false;
}
- dep->PrepareInstall();
+ dep->PrepareInstall(broker_);
}
return true;
}
@@ -1289,7 +1298,7 @@ DEPENDENCY_LIST(V)
void CompilationDependencies::DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
- base::Optional<JSObjectRef> last_prototype) {
+ OptionalJSObjectRef last_prototype) {
for (MapRef receiver_map : receiver_maps) {
DependOnStablePrototypeChain(receiver_map, start, last_prototype);
}
@@ -1297,25 +1306,26 @@ void CompilationDependencies::DependOnStablePrototypeChains(
void CompilationDependencies::DependOnStablePrototypeChain(
MapRef receiver_map, WhereToStart start,
- base::Optional<JSObjectRef> last_prototype) {
+ OptionalJSObjectRef last_prototype) {
if (receiver_map.IsPrimitiveMap()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
// Note: Keep sync'd with AccessInfoFactory::ComputePropertyAccessInfo.
- base::Optional<JSFunctionRef> constructor =
- broker_->target_native_context().GetConstructorFunction(receiver_map);
- receiver_map = constructor.value().initial_map(this);
+ OptionalJSFunctionRef constructor =
+ broker_->target_native_context().GetConstructorFunction(broker_,
+ receiver_map);
+ receiver_map = constructor.value().initial_map(broker_);
}
if (start == kStartAtReceiver) DependOnStableMap(receiver_map);
MapRef map = receiver_map;
while (true) {
- HeapObjectRef proto = map.prototype();
+ HeapObjectRef proto = map.prototype(broker_);
if (!proto.IsJSObject()) {
- CHECK_EQ(proto.map().oddball_type(), OddballType::kNull);
+ CHECK_EQ(proto.map(broker_).oddball_type(broker_), OddballType::kNull);
break;
}
- map = proto.map();
+ map = proto.map(broker_);
DependOnStableMap(map);
if (last_prototype.has_value() && proto.equals(*last_prototype)) break;
}
@@ -1326,10 +1336,10 @@ void CompilationDependencies::DependOnElementsKinds(
AllocationSiteRef current = site;
while (true) {
DependOnElementsKind(current);
- if (!current.nested_site().IsAllocationSite()) break;
- current = current.nested_site().AsAllocationSite();
+ if (!current.nested_site(broker_).IsAllocationSite()) break;
+ current = current.nested_site(broker_).AsAllocationSite();
}
- CHECK_EQ(current.nested_site().AsSmi(), 0);
+ CHECK_EQ(current.nested_site(broker_).AsSmi(), 0);
}
void CompilationDependencies::DependOnConsistentJSFunctionView(
@@ -1348,13 +1358,13 @@ SlackTrackingPrediction
CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
const JSFunctionRef& function) {
MapRef initial_map = DependOnInitialMap(function);
- int instance_size = function.InitialMapInstanceSizeWithMinSlack(this);
+ int instance_size = function.InitialMapInstanceSizeWithMinSlack(broker_);
// Currently, we always install the prediction dependency. If this turns out
// to be too expensive, we can only install the dependency if slack
// tracking is active.
RecordDependency(zone_->New<InitialMapInstanceSizePredictionDependency>(
function, instance_size));
- CHECK_LE(instance_size, function.initial_map(this).instance_size());
+ CHECK_LE(instance_size, function.initial_map(broker_).instance_size());
return SlackTrackingPrediction(initial_map, instance_size);
}
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index b6799342d3..45b320ce60 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -118,14 +118,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// up to (and including) the {last_prototype}.
void DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
- base::Optional<JSObjectRef> last_prototype =
- base::Optional<JSObjectRef>());
+ OptionalJSObjectRef last_prototype = OptionalJSObjectRef());
// For the given map, depend on the stability of (the maps of) all prototypes
// up to (and including) the {last_prototype}.
- void DependOnStablePrototypeChain(MapRef receiver_maps, WhereToStart start,
- base::Optional<JSObjectRef> last_prototype =
- base::Optional<JSObjectRef>());
+ void DependOnStablePrototypeChain(
+ MapRef receiver_maps, WhereToStart start,
+ OptionalJSObjectRef last_prototype = OptionalJSObjectRef());
// Like DependOnElementsKind but also applies to all nested allocation sites.
void DependOnElementsKinds(const AllocationSiteRef& site);
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index 5e74ba7535..14279b0b13 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -13,7 +13,7 @@ namespace internal {
namespace compiler {
namespace {
-Node* TryGetConstant(JSGraph* jsgraph, Node* node) {
+Node* TryGetConstant(JSGraph* jsgraph, Node* node, JSHeapBroker* broker) {
Type type = NodeProperties::GetType(node);
Node* result;
if (type.IsNone()) {
@@ -29,7 +29,7 @@ Node* TryGetConstant(JSGraph* jsgraph, Node* node) {
} else if (type.Is(Type::Hole())) {
result = jsgraph->TheHoleConstant();
} else if (type.IsHeapConstant()) {
- result = jsgraph->Constant(type.AsHeapConstant()->Ref());
+ result = jsgraph->Constant(type.AsHeapConstant()->Ref(), broker);
} else if (type.Is(Type::PlainNumber()) && type.Min() == type.Max()) {
result = jsgraph->Constant(type.Min());
} else {
@@ -66,8 +66,9 @@ ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) &&
- node->opcode() != IrOpcode::kFinishRegion) {
- Node* constant = TryGetConstant(jsgraph(), node);
+ node->opcode() != IrOpcode::kFinishRegion &&
+ node->opcode() != IrOpcode::kTypeGuard) {
+ Node* constant = TryGetConstant(jsgraph(), node, broker());
if (constant != nullptr) {
DCHECK(NodeProperties::IsTyped(constant));
if (!v8_flags.assert_types) {
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index 43f5572e78..ac25df2392 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -354,7 +354,15 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
if (!(is_mutable ? &state->immutable_state : &state->mutable_state)
->Lookup(object, offset)
.IsEmpty()) {
- return AssertUnreachable(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* unreachable =
+ graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control);
+ auto rep = ObjectAccessOf(node->op()).machine_type.representation();
+ Node* dead_value =
+ graph()->NewNode(jsgraph()->common()->DeadValue(rep), unreachable);
+ ReplaceWithValue(node, dead_value, unreachable, control);
+ node->Kill();
+ return Replace(dead_value);
}
HalfState const* half_state =
is_mutable ? &state->mutable_state : &state->immutable_state;
diff --git a/deps/v8/src/compiler/decompression-optimizer.cc b/deps/v8/src/compiler/decompression-optimizer.cc
index fb53d9f55b..18e682a4fd 100644
--- a/deps/v8/src/compiler/decompression-optimizer.cc
+++ b/deps/v8/src/compiler/decompression-optimizer.cc
@@ -16,6 +16,7 @@ namespace {
bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
+ opcode == IrOpcode::kLoadTrapOnNull ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable;
}
@@ -100,6 +101,7 @@ void DecompressionOptimizer::MarkNodeInputs(Node* node) {
// SPECIAL CASES - Load.
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kLoadImmutable:
DCHECK_EQ(node->op()->ValueInputCount(), 2);
@@ -119,6 +121,7 @@ void DecompressionOptimizer::MarkNodeInputs(Node* node) {
// SPECIAL CASES - Store.
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kStoreTrapOnNull:
case IrOpcode::kUnalignedStore: {
DCHECK_EQ(node->op()->ValueInputCount(), 3);
MaybeMarkAndQueueForRevisit(node->InputAt(0),
@@ -265,6 +268,10 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
NodeProperties::ChangeOp(node,
machine()->ProtectedLoad(compressed_load_rep));
break;
+ case IrOpcode::kLoadTrapOnNull:
+ NodeProperties::ChangeOp(node,
+ machine()->LoadTrapOnNull(compressed_load_rep));
+ break;
case IrOpcode::kUnalignedLoad:
NodeProperties::ChangeOp(node,
machine()->UnalignedLoad(compressed_load_rep));
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 66973abcbc..0b50f3e14f 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -4,11 +4,16 @@
#include "src/compiler/effect-control-linearizer.h"
+#include <cstdint>
+#include <type_traits>
+
#include "include/v8-fast-api-calls.h"
#include "src/base/bits.h"
+#include "src/base/small-vector.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -18,15 +23,21 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/compiler/string-builder-optimizer.h"
#include "src/heap/factory-inl.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/object-list-macros.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/string.h"
#include "src/objects/turbofan-types.h"
namespace v8 {
@@ -42,7 +53,8 @@ class EffectControlLinearizer {
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaintainSchedule maintain_schedule,
- JSHeapBroker* broker)
+ JSHeapBroker* broker,
+ StringBuilderOptimizer* string_builder_optimizer)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
@@ -51,6 +63,7 @@ class EffectControlLinearizer {
node_origins_(node_origins),
broker_(broker),
graph_assembler_(graph_assembler),
+ string_builder_optimizer_(string_builder_optimizer),
frame_state_zapper_(nullptr) {}
void Run();
@@ -83,7 +96,6 @@ class EffectControlLinearizer {
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
- Node* LowerCheckBigInt(Node* node, Node* frame_state);
Node* LowerCheckedBigIntToBigInt64(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
void LowerCheckIf(Node* node, Node* frame_state);
@@ -118,6 +130,7 @@ class EffectControlLinearizer {
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerCheckBigInt(Node* node, Node* frame_state);
Node* LowerChangeInt64ToBigInt(Node* node);
Node* LowerChangeUint64ToBigInt(Node* node);
Node* LowerTruncateBigIntToWord64(Node* node);
@@ -185,6 +198,13 @@ class EffectControlLinearizer {
Node* LowerBigIntDivide(Node* node, Node* frame_state);
Node* LowerBigIntModulus(Node* node, Node* frame_state);
Node* LowerBigIntBitwiseAnd(Node* node, Node* frame_state);
+ Node* LowerBigIntBitwiseOr(Node* node);
+ Node* LowerBigIntBitwiseXor(Node* node, Node* frame_state);
+ Node* LowerBigIntShiftLeft(Node* node, Node* frame_state);
+ Node* LowerBigIntShiftRight(Node* node, Node* frame_state);
+ Node* LowerBigIntEqual(Node* node);
+ Node* LowerBigIntLessThan(Node* node);
+ Node* LowerBigIntLessThanOrEqual(Node* node);
Node* LowerBigIntNegate(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
@@ -302,6 +322,7 @@ class EffectControlLinearizer {
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
Node* ObjectIsSmi(Node* value);
+ Node* JSAnyIsNotPrimitiveHeapObject(Node* value, Node* map = nullptr);
Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
Node* TruncateWordToInt32(Node* value);
Node* MakeWeakForComparison(Node* heap_object);
@@ -312,9 +333,32 @@ class EffectControlLinearizer {
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
+ void IfThenElse(Node* condition, std::function<void(void)> then_body,
+ std::function<void(void)> else_body);
+ Node* SizeForString(Node* length, Node* is_two_byte);
+ Node* AllocateSeqString(Node* size, Node* one_byte);
+ Node* AllocateSeqString(Node* size, bool one_byte);
+ Node* AllocateOneByteSlicedString();
+ Node* AllocateTwoByteSlicedString();
+ void CopyString(Node* src, Node* dst, Node* len, Node* is_one_byte);
+ Node* StringIsOneByte(Node* node);
+ Node* StringIsTwoByte(Node* node);
+ Node* ConstStringIsOneByte(Node* node);
+ void StoreLiteralStringToBuffer(Node* buffer, Node* offset, Node* node,
+ Node* is_one_byte);
+ Node* ConvertOneByteStringToTwoByte(Node* orig, Node* total_len,
+ Node* initialized_len);
+ template <typename Char>
+ void StoreConstantLiteralStringToBuffer(Node* buffer, Node* offset,
+ Node* node, Node* is_one_byte);
+ void EndStringBuilderConcatForLoopPhi(Node* node, BasicBlock* block);
+ Node* EndStringBuilderConcat(Node* node);
+
// Pass {bitfield} = {digit} = nullptr to construct the canoncial 0n BigInt.
Node* BuildAllocateBigInt(Node* bitfield, Node* digit);
+ Node* BuildAllocateJSExternalObject(Node* pointer);
+
void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
ElementsKind to);
@@ -328,6 +372,8 @@ class EffectControlLinearizer {
// deopt on failure.
void TryMigrateInstance(Node* value, Node* value_map);
+ Node* CallBuiltinForBigIntBinop(Node* left, Node* right, Builtin builtin);
+
bool should_maintain_schedule() const {
return maintain_schedule_ == MaintainSchedule::kMaintain;
}
@@ -356,6 +402,7 @@ class EffectControlLinearizer {
NodeOriginTable* node_origins_;
JSHeapBroker* broker_;
JSGraphAssembler* graph_assembler_;
+ StringBuilderOptimizer* string_builder_optimizer_;
Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
};
@@ -657,6 +704,13 @@ void EffectControlLinearizer::Run() {
}
instr++;
+ // We collect in {string_builder_end_phis} the non-loop-Phis that end String
+ // Builders: before processing regular nodes of the current block, we'll
+ // insert truncations of those Phis. (note that when loop Phis end String
+ // Builders, the truncation is inserted in the next block rather than in the
+ // current block, see where BlockShouldFinalizeStringBuilders is used in
+ // this file)
+ base::SmallVector<Node*, 8> string_builder_end_phis;
// Iterate over the phis and update the effect phis.
Node* effect_phi = nullptr;
Node* terminate = nullptr;
@@ -670,6 +724,9 @@ void EffectControlLinearizer::Run() {
DCHECK_NE(IrOpcode::kIfException, control->opcode());
effect_phi = node;
} else if (node->opcode() == IrOpcode::kPhi) {
+ if (string_builder_optimizer_->IsNonLoopPhiStringBuilderEnd(node)) {
+ string_builder_end_phis.push_back(node);
+ }
// Just skip phis.
} else if (node->opcode() == IrOpcode::kTerminate) {
DCHECK_NULL(terminate);
@@ -768,6 +825,34 @@ void EffectControlLinearizer::Run() {
gasm()->InitializeEffectControl(effect, control);
+ // Inserting trimmings for finished string builders that ended with a loop
+ // Phi.
+ if (string_builder_optimizer_->BlockShouldFinalizeStringBuilders(block)) {
+ for (Node* node :
+ string_builder_optimizer_->GetStringBuildersToFinalize(block)) {
+ EndStringBuilderConcatForLoopPhi(node, block);
+ }
+ }
+
+ // Finishing string builders that end with non-loop Phi nodes.
+ for (Node* phi : string_builder_end_phis) {
+ size_t node_count = graph()->NodeCount();
+
+ Node* trimmed = EndStringBuilderConcat(phi);
+
+ // Replacing uses of {phi} by {trimmed}.
+ for (Edge edge : phi->use_edges()) {
+ if (edge.from()->id() >= node_count) {
+ // This is an edge to a new node that was introduced to do the
+ // trimming; we certainly don't want to replace it.
+ continue;
+ }
+ DCHECK(!NodeProperties::IsControlEdge(edge) &&
+ !NodeProperties::IsEffectEdge(edge));
+ edge.UpdateTo(trimmed);
+ }
+ }
+
// Process the ordinary instructions.
for (; instr != end_instr; instr++) {
Node* node = *instr;
@@ -875,7 +960,10 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
inside_region_ = false;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRenameNode(node);
+ if (!v8_flags.turboshaft) {
+ RemoveRenameNode(node);
+ return;
+ }
}
if (node->opcode() == IrOpcode::kBeginRegion) {
// Determine the observability for this region and use that for all
@@ -886,7 +974,10 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
inside_region_ = true;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRenameNode(node);
+ if (!v8_flags.turboshaft) {
+ RemoveRenameNode(node);
+ return;
+ }
}
if (node->opcode() == IrOpcode::kTypeGuard) {
return RemoveRenameNode(node);
@@ -898,7 +989,9 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
// effect that is passed. The frame state is preserved for lowering.
DCHECK_EQ(RegionObservability::kObservable, region_observability_);
*frame_state = NodeProperties::GetFrameStateInput(node);
- return;
+ if (!v8_flags.turboshaft) return;
+ // We keep checkpoints to allow Turboshaft's graph builder to recompute the
+ // correct FrameStates for nodes.
}
if (node->opcode() == IrOpcode::kStoreField) {
@@ -929,60 +1022,78 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
Node* result = nullptr;
switch (node->opcode()) {
case IrOpcode::kChangeBitToTagged:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeBitToTagged(node);
break;
case IrOpcode::kChangeInt31ToTaggedSigned:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeInt31ToTaggedSigned(node);
break;
case IrOpcode::kChangeInt32ToTagged:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeInt32ToTagged(node);
break;
case IrOpcode::kChangeInt64ToTagged:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeInt64ToTagged(node);
break;
case IrOpcode::kChangeUint32ToTagged:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeUint32ToTagged(node);
break;
case IrOpcode::kChangeUint64ToTagged:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeUint64ToTagged(node);
break;
case IrOpcode::kChangeFloat64ToTagged:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeFloat64ToTagged(node);
break;
case IrOpcode::kChangeFloat64ToTaggedPointer:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeFloat64ToTaggedPointer(node);
break;
case IrOpcode::kChangeTaggedSignedToInt32:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedSignedToInt32(node);
break;
case IrOpcode::kChangeTaggedSignedToInt64:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedSignedToInt64(node);
break;
case IrOpcode::kChangeTaggedToBit:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedToBit(node);
break;
case IrOpcode::kChangeTaggedToInt32:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedToInt32(node);
break;
case IrOpcode::kChangeTaggedToUint32:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedToUint32(node);
break;
case IrOpcode::kChangeTaggedToInt64:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedToInt64(node);
break;
case IrOpcode::kChangeTaggedToFloat64:
+ if (v8_flags.turboshaft) return false;
result = LowerChangeTaggedToFloat64(node);
break;
case IrOpcode::kChangeTaggedToTaggedSigned:
result = LowerChangeTaggedToTaggedSigned(node);
break;
case IrOpcode::kTruncateTaggedToBit:
+ if (v8_flags.turboshaft) return false;
result = LowerTruncateTaggedToBit(node);
break;
case IrOpcode::kTruncateTaggedPointerToBit:
+ if (v8_flags.turboshaft) return false;
result = LowerTruncateTaggedPointerToBit(node);
break;
case IrOpcode::kTruncateTaggedToFloat64:
+ if (v8_flags.turboshaft) return false;
result = LowerTruncateTaggedToFloat64(node);
break;
case IrOpcode::kCheckClosure:
@@ -995,102 +1106,215 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerCompareMaps(node);
break;
case IrOpcode::kCheckNumber:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckNumber(node, frame_state);
break;
case IrOpcode::kCheckReceiver:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckReceiver(node, frame_state);
break;
case IrOpcode::kCheckReceiverOrNullOrUndefined:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckReceiverOrNullOrUndefined(node, frame_state);
break;
case IrOpcode::kCheckSymbol:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckSymbol(node, frame_state);
break;
case IrOpcode::kCheckString:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckString(node, frame_state);
break;
case IrOpcode::kCheckedUint64ToInt64:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint64ToInt64(node, frame_state);
break;
- case IrOpcode::kCheckBigInt:
- result = LowerCheckBigInt(node, frame_state);
- break;
case IrOpcode::kCheckedBigIntToBigInt64:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedBigIntToBigInt64(node, frame_state);
break;
case IrOpcode::kCheckInternalizedString:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckInternalizedString(node, frame_state);
break;
case IrOpcode::kCheckIf:
LowerCheckIf(node, frame_state);
break;
case IrOpcode::kCheckedInt32Add:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt32Add(node, frame_state);
break;
case IrOpcode::kCheckedInt32Sub:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt32Sub(node, frame_state);
break;
case IrOpcode::kCheckedInt32Div:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt32Div(node, frame_state);
break;
case IrOpcode::kCheckedInt32Mod:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt32Mod(node, frame_state);
break;
case IrOpcode::kCheckedUint32Div:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint32Div(node, frame_state);
break;
case IrOpcode::kCheckedUint32Mod:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint32Mod(node, frame_state);
break;
case IrOpcode::kCheckedInt32Mul:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt32Mul(node, frame_state);
break;
case IrOpcode::kCheckedInt64Add:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64Add(node, frame_state);
break;
case IrOpcode::kCheckedInt64Sub:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64Sub(node, frame_state);
break;
case IrOpcode::kCheckedInt64Mul:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64Mul(node, frame_state);
break;
case IrOpcode::kCheckedInt64Div:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64Div(node, frame_state);
break;
case IrOpcode::kCheckedInt64Mod:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64Mod(node, frame_state);
break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedInt64ToInt32:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedInt64ToTaggedSigned:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedInt64ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedUint32Bounds:
result = LowerCheckedUint32Bounds(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToInt32:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint32ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToTaggedSigned:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedUint64Bounds:
result = LowerCheckedUint64Bounds(node, frame_state);
break;
case IrOpcode::kCheckedUint64ToInt32:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedUint64ToTaggedSigned:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedUint64ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedFloat64ToInt32:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedFloat64ToInt64:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedFloat64ToInt64(node, frame_state);
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
@@ -1098,18 +1322,38 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
FATAL("No frame state (zapped by #%d: %s)", frame_state_zapper_->id(),
frame_state_zapper_->op()->mnemonic());
}
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToArrayIndex:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedTaggedToArrayIndex(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToInt32:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedTaggedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToInt64:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedTaggedToInt64(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToFloat64:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerCheckedTaggedToFloat64(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToTaggedSigned:
@@ -1118,16 +1362,43 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToTaggedPointer:
result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
+ case IrOpcode::kCheckBigInt:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
+ result = LowerCheckBigInt(node, frame_state);
+ break;
case IrOpcode::kChangeInt64ToBigInt:
- result = LowerChangeInt64ToBigInt(node);
+ if (v8_flags.turboshaft) {
+ DCHECK(machine()->Is64());
+ // ChangeInt64ToBigInt is allocting when lowered, so we must fix its
+ // position in the effect chain such that it is non-floating after ECL
+ // and cannot mess up when rescheduling (e.g. in Turboshaft's graph
+ // builder).
+ result = gasm()->Chained(node->op(), node->InputAt(0));
+ } else {
+ result = LowerChangeInt64ToBigInt(node);
+ }
break;
case IrOpcode::kChangeUint64ToBigInt:
- result = LowerChangeUint64ToBigInt(node);
+ if (v8_flags.turboshaft) {
+ DCHECK(machine()->Is64());
+ // ChangeUint64ToBigInt is allocting when lowered, so we must fix its
+ // position in the effect chain such that it is non-floating after ECL
+ // and cannot mess up when rescheduling (e.g. in Turboshaft's graph
+ // builder).
+ result = gasm()->Chained(node->op(), node->InputAt(0));
+ } else {
+ result = LowerChangeUint64ToBigInt(node);
+ }
break;
case IrOpcode::kTruncateBigIntToWord64:
+ if (v8_flags.turboshaft) return false;
result = LowerTruncateBigIntToWord64(node);
break;
case IrOpcode::kTruncateTaggedToWord32:
+ if (v8_flags.turboshaft) return false;
result = LowerTruncateTaggedToWord32(node);
break;
case IrOpcode::kCheckedTruncateTaggedToWord32:
@@ -1137,18 +1408,23 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerNumberToString(node);
break;
case IrOpcode::kObjectIsArrayBufferView:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsArrayBufferView(node);
break;
case IrOpcode::kObjectIsBigInt:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsBigInt(node);
break;
case IrOpcode::kObjectIsCallable:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsCallable(node);
break;
case IrOpcode::kObjectIsConstructor:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsConstructor(node);
break;
case IrOpcode::kObjectIsDetectableCallable:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsDetectableCallable(node);
break;
case IrOpcode::kObjectIsMinusZero:
@@ -1161,27 +1437,35 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerObjectIsNaN(node);
break;
case IrOpcode::kNumberIsNaN:
+ if (v8_flags.turboshaft) return false;
result = LowerNumberIsNaN(node);
break;
case IrOpcode::kObjectIsNonCallable:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsNonCallable(node);
break;
case IrOpcode::kObjectIsNumber:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsNumber(node);
break;
case IrOpcode::kObjectIsReceiver:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsReceiver(node);
break;
case IrOpcode::kObjectIsSmi:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsSmi(node);
break;
case IrOpcode::kObjectIsString:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsString(node);
break;
case IrOpcode::kObjectIsSymbol:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsSymbol(node);
break;
case IrOpcode::kObjectIsUndetectable:
+ if (v8_flags.turboshaft) return false;
result = LowerObjectIsUndetectable(node);
break;
case IrOpcode::kArgumentsLength:
@@ -1197,15 +1481,18 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerTypeOf(node);
break;
case IrOpcode::kNewDoubleElements:
+ if (v8_flags.turboshaft) return false;
result = LowerNewDoubleElements(node);
break;
case IrOpcode::kNewSmiOrObjectElements:
+ if (v8_flags.turboshaft) return false;
result = LowerNewSmiOrObjectElements(node);
break;
case IrOpcode::kNewArgumentsElements:
result = LowerNewArgumentsElements(node);
break;
case IrOpcode::kNewConsString:
+ if (v8_flags.turboshaft) return false;
result = LowerNewConsString(node);
break;
case IrOpcode::kSameValue:
@@ -1224,66 +1511,147 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerStringConcat(node);
break;
case IrOpcode::kStringFromSingleCharCode:
+ if (v8_flags.turboshaft) return false;
result = LowerStringFromSingleCharCode(node);
break;
case IrOpcode::kStringFromSingleCodePoint:
+ if (v8_flags.turboshaft) return false;
result = LowerStringFromSingleCodePoint(node);
break;
case IrOpcode::kStringIndexOf:
+ if (v8_flags.turboshaft) return false;
result = LowerStringIndexOf(node);
break;
case IrOpcode::kStringFromCodePointAt:
+ if (v8_flags.turboshaft) return false;
result = LowerStringFromCodePointAt(node);
break;
case IrOpcode::kStringLength:
+ if (v8_flags.turboshaft) return false;
result = LowerStringLength(node);
break;
case IrOpcode::kStringToNumber:
result = LowerStringToNumber(node);
break;
case IrOpcode::kStringCharCodeAt:
+ if (v8_flags.turboshaft) return false;
result = LowerStringCharCodeAt(node);
break;
case IrOpcode::kStringCodePointAt:
+ if (v8_flags.turboshaft) return false;
result = LowerStringCodePointAt(node);
break;
case IrOpcode::kStringToLowerCaseIntl:
+ if (v8_flags.turboshaft) return false;
result = LowerStringToLowerCaseIntl(node);
break;
case IrOpcode::kStringToUpperCaseIntl:
+ if (v8_flags.turboshaft) return false;
result = LowerStringToUpperCaseIntl(node);
break;
case IrOpcode::kStringSubstring:
+ if (v8_flags.turboshaft) return false;
result = LowerStringSubstring(node);
break;
case IrOpcode::kStringEqual:
+ if (v8_flags.turboshaft) return false;
result = LowerStringEqual(node);
break;
case IrOpcode::kStringLessThan:
+ if (v8_flags.turboshaft) return false;
result = LowerStringLessThan(node);
break;
case IrOpcode::kStringLessThanOrEqual:
+ if (v8_flags.turboshaft) return false;
result = LowerStringLessThanOrEqual(node);
break;
case IrOpcode::kBigIntAdd:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerBigIntAdd(node, frame_state);
break;
case IrOpcode::kBigIntSubtract:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerBigIntSubtract(node, frame_state);
break;
case IrOpcode::kBigIntMultiply:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerBigIntMultiply(node, frame_state);
break;
case IrOpcode::kBigIntDivide:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerBigIntDivide(node, frame_state);
break;
case IrOpcode::kBigIntModulus:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerBigIntModulus(node, frame_state);
break;
case IrOpcode::kBigIntBitwiseAnd:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
result = LowerBigIntBitwiseAnd(node, frame_state);
break;
+ case IrOpcode::kBigIntBitwiseOr:
+ if (v8_flags.turboshaft) {
+ // Although bitwise or doesn't need a FrameState (because it cannot
+ // deopt), we keep it here for Turboshaft, because this allows us to
+ // handle `or` uniformly with all other binary operations.
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
+ result = LowerBigIntBitwiseOr(node);
+ break;
+ case IrOpcode::kBigIntBitwiseXor:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
+ result = LowerBigIntBitwiseXor(node, frame_state);
+ break;
+ case IrOpcode::kBigIntShiftLeft:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
+ result = LowerBigIntShiftLeft(node, frame_state);
+ break;
+ case IrOpcode::kBigIntShiftRight:
+ if (v8_flags.turboshaft) {
+ gasm()->Checkpoint(FrameState{frame_state});
+ return false;
+ }
+ result = LowerBigIntShiftRight(node, frame_state);
+ break;
+ case IrOpcode::kBigIntEqual:
+ if (v8_flags.turboshaft) return false;
+ result = LowerBigIntEqual(node);
+ break;
+ case IrOpcode::kBigIntLessThan:
+ if (v8_flags.turboshaft) return false;
+ result = LowerBigIntLessThan(node);
+ break;
+ case IrOpcode::kBigIntLessThanOrEqual:
+ if (v8_flags.turboshaft) return false;
+ result = LowerBigIntLessThanOrEqual(node);
+ break;
case IrOpcode::kBigIntNegate:
+ if (v8_flags.turboshaft) return false;
result = LowerBigIntNegate(node);
break;
case IrOpcode::kNumberIsFloat64Hole:
@@ -1320,6 +1688,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
LowerCheckEqualsInternalizedString(node, frame_state);
break;
case IrOpcode::kAllocate:
+ if (v8_flags.turboshaft) return false;
result = LowerAllocate(node);
break;
case IrOpcode::kCheckEqualsSymbol:
@@ -1353,6 +1722,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerFastApiCall(node);
break;
case IrOpcode::kLoadFieldByIndex:
+ if (v8_flags.turboshaft) return false;
result = LowerLoadFieldByIndex(node);
break;
case IrOpcode::kLoadTypedElement:
@@ -1428,6 +1798,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
break;
case IrOpcode::kDoubleArrayMax:
case IrOpcode::kDoubleArrayMin:
+ if (v8_flags.turboshaft) return false;
result = LowerDoubleArrayMinMax(node);
break;
default:
@@ -1449,6 +1820,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
#define __ gasm()->
Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
return ChangeFloat64ToTagged(value, mode);
@@ -1505,11 +1877,13 @@ Node* EffectControlLinearizer::ChangeFloat64ToTagged(
}
Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return AllocateHeapNumberWithValue(value);
}
Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ChangeBitToTagged(value);
}
@@ -1529,11 +1903,13 @@ Node* EffectControlLinearizer::ChangeBitToTagged(Node* value) {
}
Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ChangeInt32ToSmi(value);
}
Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ChangeInt32ToTagged(value);
}
@@ -1558,6 +1934,7 @@ Node* EffectControlLinearizer::ChangeInt32ToTagged(Node* value) {
}
Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_in_smi_range = __ MakeDeferredLabel();
@@ -1583,6 +1960,7 @@ Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ChangeUint32ToTagged(value);
}
@@ -1605,6 +1983,7 @@ Node* EffectControlLinearizer::ChangeUint32ToTagged(Node* value) {
}
Node* EffectControlLinearizer::LowerChangeUint64ToTagged(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_in_smi_range = __ MakeDeferredLabel();
@@ -1625,16 +2004,19 @@ Node* EffectControlLinearizer::LowerChangeUint64ToTagged(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ChangeSmiToInt32(value);
}
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ChangeSmiToInt64(value);
}
Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return __ TaggedEqual(value, __ TrueConstant());
}
@@ -1699,6 +2081,7 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
}
Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
auto done = __ MakeLabel(MachineRepresentation::kBit);
auto if_smi = __ MakeDeferredLabel();
@@ -1719,6 +2102,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
}
Node* EffectControlLinearizer::LowerTruncateTaggedPointerToBit(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
auto done = __ MakeLabel(MachineRepresentation::kBit);
TruncateTaggedPointerToBit(node, &done);
@@ -1728,6 +2112,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedPointerToBit(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1749,6 +2134,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1770,6 +2156,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeTaggedToInt64(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1791,6 +2178,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToInt64(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
return LowerTruncateTaggedToFloat64(node);
}
@@ -1798,7 +2186,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
Node* check = ObjectIsSmi(value);
__ GotoIfNot(check, &if_not_smi);
@@ -1817,6 +2205,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
}
Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1858,7 +2247,7 @@ Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
// we recorded before.
Node* value_cell =
__ LoadField(AccessBuilder::ForJSFunctionFeedbackCell(), value);
- Node* check_cell = __ WordEqual(value_cell, __ HeapConstant(feedback_cell));
+ Node* check_cell = __ TaggedEqual(value_cell, __ HeapConstant(feedback_cell));
__ DeoptimizeIfNot(DeoptimizeReason::kWrongFeedbackCell, FeedbackSource(),
check_cell, frame_state);
return value;
@@ -1981,6 +2370,18 @@ void EffectControlLinearizer::TryMigrateInstance(Node* value, Node* value_map) {
__ Bind(&done);
}
+Node* EffectControlLinearizer::CallBuiltinForBigIntBinop(Node* left,
+ Node* right,
+ Builtin builtin) {
+ Callable const callable = Builtins::CallableFor(isolate(), builtin);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), left, right,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
size_t const map_count = maps.size();
@@ -2011,6 +2412,7 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
}
Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2034,15 +2436,9 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
-
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
-
- static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* check = __ Uint32LessThanOrEqual(
- __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+ Node* check = JSAnyIsNotPrimitiveHeapObject(value);
__ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, FeedbackSource(),
check, frame_state);
return value;
@@ -2050,8 +2446,20 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
+#if V8_STATIC_ROOTS_BOOL
+ Node* check0 = JSAnyIsNotPrimitiveHeapObject(value);
+ Node* check1 = __ TaggedEqual(value, __ UndefinedConstant());
+ Node* check2 = __ TaggedEqual(value, __ NullConstant());
+
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
+ FeedbackSource(),
+ __ Word32Or(check0, __ Word32Or(check1, check2)),
+ frame_state);
+
+#else
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
@@ -2068,10 +2476,12 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
Node* check1 = __ TaggedEqual(value_map, __ BooleanMapConstant());
__ DeoptimizeIf(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
FeedbackSource(), check1, frame_state);
+#endif
return value;
}
Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
@@ -2084,6 +2494,7 @@ Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
}
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2100,6 +2511,7 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
@@ -2122,25 +2534,732 @@ void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
__ DeoptimizeIfNot(p.reason(), p.feedback(), value, frame_state);
}
+namespace {
+
+int GetLiteralStringLen(Node* node, JSHeapBroker* broker) {
+ if (node->opcode() == IrOpcode::kStringFromSingleCharCode) {
+ return 1;
+ }
+ HeapObjectMatcher m(node);
+ DCHECK(m.HasResolvedValue() && m.Ref(broker).IsString());
+ StringRef string = m.Ref(broker).AsString();
+ return string.length();
+}
+
+int IsTwoByteString(Node* node, JSHeapBroker* broker) {
+ HeapObjectMatcher m(node);
+ DCHECK(m.HasResolvedValue() && m.Ref(broker).IsString());
+ StringRef string = m.Ref(broker).AsString();
+ return string.object()->IsTwoByteRepresentation();
+}
+
+template <typename Char>
+const Char* GetLiteralString(Node* node, JSHeapBroker* broker) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kHeapConstant);
+ HeapObjectMatcher m(node);
+ DCHECK(m.HasResolvedValue() && m.Ref(broker).IsString());
+ StringRef string = m.Ref(broker).AsString();
+ DisallowGarbageCollection no_gc;
+ SharedStringAccessGuardIfNeeded access_guard(broker->isolate());
+ const Char* str = string.object()->template GetDirectStringChars<Char>(
+ broker->isolate(), no_gc, access_guard);
+ return str;
+}
+
+} // namespace
+
+// Generated-code equivalent of:
+// if (condition) { then_body } else { else_body }
+void EffectControlLinearizer::IfThenElse(Node* condition,
+ std::function<void(void)> then_body,
+ std::function<void(void)> else_body) {
+ auto if_true = __ MakeLabel(), if_false = __ MakeLabel(),
+ done = __ MakeLabel();
+ __ Branch(condition, &if_true, &if_false);
+ __ Bind(&if_true);
+ then_body();
+ __ Goto(&done);
+ __ Bind(&if_false);
+ else_body();
+ __ Goto(&done);
+ __ Bind(&done);
+}
+
+// This function is the generated-code equivalent of SeqOneByteString::SizeFor
+// and SeqTwoByteString::SizeFor: it calculates how many bytes should be
+// allocated if we want to allocate a string of length {length} (in particular,
+// it takes headers and alignment into account).
+Node* EffectControlLinearizer::SizeForString(Node* length, Node* is_two_byte) {
+ Node* size = __ Int32Constant(String::kHeaderSize);
+ size = __ Int32Add(size, __ Word32Shl(length, is_two_byte));
+
+ auto object_pointer_align = [&](Node* value) -> Node* {
+ // Generated-code equivalent of the OBJECT_POINTER_ALIGN macro.
+ return __ Word32And(
+ __ Int32Add(value, __ Int32Constant(kObjectAlignmentMask)),
+ __ Int32Constant(~kObjectAlignmentMask));
+ };
+ size = object_pointer_align(size);
+ size = ChangeInt32ToIntPtr(size);
+ return size;
+}
+
+// Allocates a new 1/2-byte (depending on {is_one_byte}) SeqString of length
+// {length}.
+Node* EffectControlLinearizer::AllocateSeqString(Node* length, bool one_byte) {
+ Node* size = SizeForString(length, __ Int32Constant(!one_byte));
+ Node* seq_string = __ Allocate(AllocationType::kYoung, size);
+ __ StoreField(AccessBuilder::ForMap(), seq_string,
+ __ HeapConstant(one_byte ? factory()->one_byte_string_map()
+ : factory()->string_map()));
+ __ StoreField(AccessBuilder::ForNameRawHashField(), seq_string,
+ __ Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), seq_string, length);
+ // zero-ing the padding bytes.
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kTaggedSigned,
+ kNoWriteBarrier),
+ seq_string,
+ __ IntPtrAdd(size, __ IntPtrConstant(-kObjectAlignment - kHeapObjectTag)),
+ __ SmiConstant(0));
+
+ return seq_string;
+}
+
+// Allocates a new 1/2-byte (depending on {is_one_byte}) SeqString of length
+// {length}. This function is a dynamic version of the previous one for cases
+// where {is_one_byte} is not known at compile time.
+Node* EffectControlLinearizer::AllocateSeqString(Node* length,
+ Node* is_one_byte) {
+ Node* is_two_byte = __ Word32Xor(is_one_byte, __ Int32Constant(1));
+ Node* size = SizeForString(length, is_two_byte);
+ Node* seq_string = __ Allocate(AllocationType::kYoung, size);
+ __ StoreField(AccessBuilder::ForNameRawHashField(), seq_string,
+ __ Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), seq_string, length);
+ // zero-ing the padding bytes.
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kTaggedSigned,
+ kNoWriteBarrier),
+ seq_string,
+ __ IntPtrAdd(size, __ IntPtrConstant(-kObjectAlignment - kHeapObjectTag)),
+ __ SmiConstant(0));
+
+ IfThenElse(
+ is_one_byte,
+ [&]() {
+ __ StoreField(AccessBuilder::ForMap(), seq_string,
+ __ HeapConstant(factory()->one_byte_string_map()));
+ },
+ [&]() {
+ __ StoreField(AccessBuilder::ForMap(), seq_string,
+ __ HeapConstant(factory()->string_map()));
+ });
+ return seq_string;
+}
+
+Node* EffectControlLinearizer::AllocateOneByteSlicedString() {
+ Node* sliced_string = __ Allocate(
+ AllocationType::kYoung,
+ __ IntPtrConstant(
+ jsgraph()->factory()->sliced_one_byte_string_map()->instance_size()));
+ __ StoreField(AccessBuilder::ForMap(), sliced_string,
+ __ HeapConstant(factory()->sliced_one_byte_string_map()));
+ return sliced_string;
+}
+
+Node* EffectControlLinearizer::AllocateTwoByteSlicedString() {
+ Node* sliced_string = __ Allocate(
+ AllocationType::kYoung,
+ __ IntPtrConstant(
+ jsgraph()->factory()->sliced_string_map()->instance_size()));
+ __ StoreField(AccessBuilder::ForMap(), sliced_string,
+ __ HeapConstant(factory()->sliced_string_map()));
+ return sliced_string;
+}
+
+// Copies the first {length} characters of {src} into {dst}, assuming that they
+// both have the same 1/2-byte representation described by {is_one_byte}.
+void EffectControlLinearizer::CopyString(Node* src, Node* dst, Node* length,
+ Node* is_one_byte) {
+ auto one_byte_lbl = __ MakeLabel(), two_byte_lbl = __ MakeLabel(),
+ done = __ MakeLabel();
+
+ auto copy_string_fun = [&](auto label, auto access) {
+ __ Bind(label);
+ auto loop = __ MakeLoopLabel(MachineRepresentation::kWord32);
+ __ Goto(&loop, __ Int32Constant(0));
+
+ __ Bind(&loop);
+ Node* index = loop.PhiAt(0);
+ __ GotoIf(__ Word32Equal(index, length), &done);
+ __ StoreElement(access, dst, index, __ LoadElement(access, src, index));
+ index = __ Int32Add(index, __ Int32Constant(1));
+ __ Goto(&loop, index);
+ };
+
+ __ Branch(is_one_byte, &one_byte_lbl, &two_byte_lbl);
+ copy_string_fun(&one_byte_lbl, AccessBuilder::ForSeqOneByteStringCharacter());
+ copy_string_fun(&two_byte_lbl, AccessBuilder::ForSeqTwoByteStringCharacter());
+
+ __ Bind(&done);
+}
+
+// For any string, returns true if it's representation is 1-byte.
+Node* EffectControlLinearizer::StringIsOneByte(Node* node) {
+ Node* map = __ LoadField(AccessBuilder::ForMap(), node);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+ return is_one_byte;
+}
+
+// For any string, returns true if its representation is 2-byte. The result
+// should be equivalent to `!StringIsOneByte(node)`, but we avoid the negation
+// to make things a bit faster.
+Node* EffectControlLinearizer::StringIsTwoByte(Node* node) {
+ Node* map = __ LoadField(AccessBuilder::ForMap(), node);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kTwoByteStringTag));
+ return is_one_byte;
+}
+
+// For a literal-ish string (a HeapConstant or a StringFromSingleCharCode),
+// computes whether it's 1-byte, or 2-byte.
+Node* EffectControlLinearizer::ConstStringIsOneByte(Node* node) {
+ if (node->opcode() == IrOpcode::kHeapConstant) {
+ HeapObjectMatcher m(node);
+ DCHECK(m.HasResolvedValue() && m.Ref(broker()).IsString());
+ StringRef string = m.Ref(broker()).AsString();
+ return __ Int32Constant(string.object()->IsOneByteRepresentation());
+ } else {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStringFromSingleCharCode);
+ Node* c = __ Word32And(node->InputAt(0), __ Uint32Constant(0xFFFF));
+ return __ Int32LessThan(c, __ Int32Constant(0xFF + 1));
+ }
+}
+
+// Assuming that {orig} is a 1-byte backing store for a string builder of total
+// length of {total_length}, including {initialized_length} initialized
+// characters (ie, the SlicedString has length {initialized_length}), this
+// function allocates a 2-byte backing store of {total_length} length, and
+// copies the first {initialized_length} characters of {orig} to this new
+// backing store.
+Node* EffectControlLinearizer::ConvertOneByteStringToTwoByte(
+ Node* orig, Node* total_length, Node* initialized_length) {
+ Node* new_string = AllocateSeqString(total_length, false);
+ auto loop = __ MakeLoopLabel(MachineRepresentation::kWord32);
+ auto done = __ MakeLabel();
+ __ Goto(&loop, __ Int32Constant(0));
+ __ Bind(&loop);
+
+ Node* index = loop.PhiAt(0);
+ __ GotoIf(__ Word32Equal(index, initialized_length), &done);
+ __ StoreElement(AccessBuilder::ForSeqTwoByteStringCharacter(), new_string,
+ index,
+ __ LoadElement(AccessBuilder::ForSeqOneByteStringCharacter(),
+ orig, index));
+ index = __ Int32Add(index, __ Int32Constant(1));
+ __ Goto(&loop, index);
+
+ __ Bind(&done);
+ return new_string;
+}
+
+// Stores {node} (which should be a kHeapConstant containing a SeqString) into
+// {buffer} at offset {offset}. {Char} should be {uint8_t} if {node} is a 1-byte
+// string, and {uint16_t} if {node} is a 2-byte string.
+// If {node}'s length is more than {kMaxUnrollSize}, we generate a loop that
+// inserts each character of {node} in {buffer} one by one. Otherwise, we unroll
+// the loop and generate one StoreElement per character.
+template <typename Char>
+void EffectControlLinearizer::StoreConstantLiteralStringToBuffer(
+ Node* buffer, Node* offset, Node* node, Node* is_one_byte) {
+ const int kMaxUnrollSize = 6;
+ int len = GetLiteralStringLen(node, broker());
+ if (len < kMaxUnrollSize) {
+ // For {len} below 6, we unroll the loop and generate one Store per
+ // character.
+ auto copy_constant = [&](auto access) {
+ const Char* chars = GetLiteralString<Char>(node, broker());
+ for (int i = 0; i < len; i++) {
+ __ StoreElement(access, buffer,
+ __ Int32Add(offset, __ Int32Constant(i)),
+ __ Int32Constant(chars[i]));
+ }
+ };
+ if (std::is_same<Char, uint16_t>()) {
+ DCHECK(IsTwoByteString(node, broker()));
+ // If {node} is a literal 2-byte string, then we can just generate the
+ // 2-byte case (because it's guaranteed that {is_one_byte} is false).
+ copy_constant(AccessBuilder::ForSeqTwoByteStringCharacter());
+ } else {
+ // On the other hand, if {node} is 1-byte, then we can't infer anything,
+ // because {node} can be a 1-byte string that we need to add into a 2-byte
+ // string. Thus, we generate both 1-byte and 2-byte cases, and we'll
+ // decide dynamically which one to execute based on the value of
+ // {is_one_byte}.
+ IfThenElse(
+ is_one_byte,
+ [&]() {
+ copy_constant(AccessBuilder::ForSeqOneByteStringCharacter());
+ },
+ [&]() {
+ copy_constant(AccessBuilder::ForSeqTwoByteStringCharacter());
+ });
+ }
+ } else {
+ // For {len} above 6, we generate a proper loop, in order to keep code size
+ // not too high.
+ auto copy_constant = [&](auto buffer_access, auto constant_access) {
+ auto loop = __ MakeLoopLabel(MachineRepresentation::kWord32);
+ auto done = __ MakeLabel();
+ __ Goto(&loop, __ Int32Constant(0));
+
+ __ Bind(&loop);
+ Node* index = loop.PhiAt(0);
+ __ GotoIf(__ Word32Equal(index, __ Int32Constant(len)), &done);
+ __ StoreElement(buffer_access, buffer, __ Int32Add(offset, index),
+ __ LoadElement(constant_access, node, index));
+ __ Goto(&loop, __ Int32Add(index, __ Int32Constant(1)));
+ __ Bind(&done);
+ };
+ auto constant_access = IsTwoByteString(node, broker())
+ ? AccessBuilder::ForSeqTwoByteStringCharacter()
+ : AccessBuilder::ForSeqOneByteStringCharacter();
+ IfThenElse(
+ is_one_byte,
+ [&]() {
+ copy_constant(AccessBuilder::ForSeqOneByteStringCharacter(),
+ constant_access);
+ },
+ [&]() {
+ copy_constant(AccessBuilder::ForSeqTwoByteStringCharacter(),
+ constant_access);
+ });
+ }
+}
+
+// Stores {node} (which should be a kHeapConstant or a StringFromSingleCharCode)
+// into {buffer} at offset {offset}.
+void EffectControlLinearizer::StoreLiteralStringToBuffer(Node* buffer,
+ Node* offset,
+ Node* node,
+ Node* is_one_byte) {
+ DCHECK(node->opcode() == IrOpcode::kHeapConstant ||
+ node->opcode() == IrOpcode::kStringFromSingleCharCode);
+
+ if (node->opcode() == IrOpcode::kHeapConstant) {
+ if (IsTwoByteString(node, broker())) {
+ StoreConstantLiteralStringToBuffer<uint16_t>(buffer, offset, node,
+ is_one_byte);
+ } else {
+ StoreConstantLiteralStringToBuffer<uint8_t>(buffer, offset, node,
+ is_one_byte);
+ }
+ } else {
+ IfThenElse(
+ is_one_byte,
+ [&]() {
+ __ StoreElement(
+ AccessBuilder::ForSeqOneByteStringCharacter(), buffer, offset,
+ __ Word32And(node->InputAt(0), __ Uint32Constant(0xFFFF)));
+ },
+ [&]() {
+ __ StoreElement(
+ AccessBuilder::ForSeqTwoByteStringCharacter(), buffer, offset,
+ __ Word32And(node->InputAt(0), __ Uint32Constant(0xFFFF)));
+ });
+ }
+}
+
+void EffectControlLinearizer::EndStringBuilderConcatForLoopPhi(
+ Node* node, BasicBlock* block) {
+ Node* backing_store = EndStringBuilderConcat(node);
+
+ // Updating the uses of {node} to use {backing_store} instead.
+ BasicBlock* loop_header = schedule()->block(node);
+ DCHECK(loop_header->IsLoopHeader());
+ for (Edge edge : node->use_edges()) {
+ BasicBlock* user_block = schedule()->block(edge.from());
+ if (!user_block) {
+ // If {schedule_} doesn't have a block |edge.from()|, then this is a new
+ // node, which means that it is within the Phi's loop (because
+ // EffectControlLinearize iterates the graph in RPO order).
+ continue;
+ }
+ if (loop_header->LoopContains(user_block)) {
+ // |edge.from()| is within the loop, so it's part of the string builder,
+ // and should use the regular Phi node.
+ continue;
+ }
+ if ((*user_block->nodes())[0]->opcode() == IrOpcode::kMerge) {
+ if (std::find(user_block->predecessors().begin(),
+ user_block->predecessors().end(),
+ block) != user_block->predecessors().end()) {
+ // {user_block} merges {block} (+ some other block), so we update the
+ // edge, even though {block} will not dominate {user_block}.
+ edge.UpdateTo(backing_store);
+ continue;
+ }
+ }
+
+ // If we reach this point:
+ // - {user_block} cannot be before {block}, otherwise it couldn't use
+ // {node} which is defined in {block} (that was true at the beginning of
+ // the loop already).
+ // - {user_block} cannot be inside the loop (we've checked that).
+ // So {user_block} has to be after the loop. Then, since {user_block} is
+ // after {block} and uses {node}:
+ // - either it's not dominated by {block}, in which case it has to be a
+ // merge so that is can receive another value for {node} from its other
+ // predecessor(s) (otherwise it couldn't use {node}). We've checked that
+ // right above, so we're not in this case.
+ // - it is dominated by {block}. This is the only case that remains; we
+ // DCHECK it just to be safe.
+ DCHECK_EQ(BasicBlock::GetCommonDominator(block, user_block), block);
+ // {user_block} is dominated by {block}, which mean that we can safely
+ // update the edge.
+ DCHECK(!NodeProperties::IsControlEdge(edge) &&
+ !NodeProperties::IsEffectEdge(edge));
+ edge.UpdateTo(backing_store);
+ }
+}
+
+// Once the string builder is done building the string, we get rid of the
+// SlicedString and right-trim the backing store SeqString, to keep only the
+// SeqString.
+Node* EffectControlLinearizer::EndStringBuilderConcat(Node* node) {
+ Node* new_length = __ LoadField(AccessBuilder::ForStringLength(), node);
+ Node* backing_store =
+ __ LoadField(AccessBuilder::ForSlicedStringParent(), node);
+ Node* backing_store_length =
+ __ LoadField(AccessBuilder::ForStringLength(), backing_store);
+
+ Node* is_two_byte = StringIsTwoByte(backing_store);
+ Node* backing_store_real_size =
+ SizeForString(backing_store_length, is_two_byte);
+ Node* new_backing_store_real_size = SizeForString(new_length, is_two_byte);
+
+ Node* freed_size =
+ __ Int32Sub(backing_store_real_size, new_backing_store_real_size);
+
+ // Right-trimming code inspired by heap.cc:CreateFillerObjectAtImpl
+ IfThenElse(
+ __ Word32Equal(freed_size, __ Int32Constant(0)),
+ []() {
+ // If the filler has size 0, do nothing.
+ },
+ [&]() {
+ Node* filler_map_location =
+ __ IntPtrAdd(backing_store, new_backing_store_real_size);
+ IfThenElse(
+ __ Word32Equal(freed_size, __ Int32Constant(kTaggedSize)),
+ [&]() {
+ // If the filler has size kTaggedSize, insert
+ // one_pointer_filler_map()
+ __ StoreField(
+ AccessBuilder::ForMap(kNoWriteBarrier), filler_map_location,
+ __ HeapConstant(factory()->one_pointer_filler_map()));
+ },
+ [&]() {
+ IfThenElse(
+ __ Word32Equal(freed_size, __ Int32Constant(2 * kTaggedSize)),
+ [&]() {
+ // If the filler has size kTaggedSize*2, insert
+ // two_pointer_filler_map()
+ __ StoreField(
+ AccessBuilder::ForMap(kNoWriteBarrier),
+ filler_map_location,
+ __ HeapConstant(factory()->two_pointer_filler_map()));
+ },
+ [&]() {
+ // Otherwise, insert free_space_map(), with the proper size.
+ __ StoreField(AccessBuilder::ForMap(kNoWriteBarrier),
+ filler_map_location,
+ __ HeapConstant(factory()->free_space_map()));
+ __ StoreField(AccessBuilder::ForFreeSpaceSize(),
+ filler_map_location,
+ ChangeIntPtrToSmi(freed_size));
+ });
+ });
+ });
+
+ // Updating backing store length after trimming
+ __ StoreField(AccessBuilder::ForStringLength(), backing_store, new_length);
+
+ // Setting the padding bytes to 0
+ {
+ Node* end =
+ __ IntPtrSub(__ IntPtrAdd(backing_store, new_backing_store_real_size),
+ __ IntPtrConstant(kHeapObjectTag));
+ Node* start = __ IntPtrSub(
+ end, __ IntPtrSub(new_backing_store_real_size,
+ __ IntPtrAdd(__ IntPtrConstant(String::kHeaderSize),
+ ChangeInt32ToIntPtr(__ Word32Shl(
+ new_length, is_two_byte)))));
+ auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
+ auto done = __ MakeLabel();
+ __ Goto(&loop, start);
+ __ Bind(&loop);
+ Node* addr = loop.PhiAt(0);
+ Node* check = __ UintLessThan(addr, end);
+ __ GotoIfNot(check, &done);
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+ addr, 0, __ Int32Constant(0));
+ __ Goto(&loop, __ IntPtrAdd(addr, __ IntPtrConstant(1)));
+
+ __ Bind(&done);
+ }
+
+ // Overwriting {node} with a filler object, so that we don't leave around a
+ // potentially-too-small SlicedString. Note that:
+ // - the GC can still see too-small SlicedString, but it shouldn't care
+ // except in debug builds, and even then, the isolate has a
+ // has_turbofan_string_builders method that tells the GC to ignore
+ // too-small SlicedString when checking verifying heap integrity.
+ // - overwriting {node} with a filler is not strictly needed, but is
+ // relatively cheap, and useful to catch bugs (eg, if after a string
+ // builder, we get an exception saying "Expected String, got FreeSpace",
+ // we'll have an idea of what went wrong!).
+ __ StoreField(AccessBuilder::ForMap(kNoWriteBarrier), node,
+ __ HeapConstant(factory()->free_space_map()));
+ __ StoreField(AccessBuilder::ForFreeSpaceSize(), node,
+ ChangeInt32ToSmi(__ Int32Constant(SlicedString::kSize)));
+
+ return backing_store;
+}
+
Node* EffectControlLinearizer::LowerStringConcat(Node* node) {
- Node* lhs = node->InputAt(1);
- Node* rhs = node->InputAt(2);
+ if (string_builder_optimizer_->IsFirstConcatInStringBuilder(node)) {
+ // This is the 1st node of a string builder. We thus need to create and
+ // initialize the string builder's backing store and SlicedString.
+ OneOrTwoByteAnalysis::State one_or_two_byte =
+ string_builder_optimizer_->GetOneOrTwoByte(node);
+
+ int left_length = GetLiteralStringLen(node->InputAt(1), broker());
+ int right_length = GetLiteralStringLen(node->InputAt(2), broker());
+
+ int initial_length = left_length + right_length;
+ int backing_store_initial_length = initial_length * 4;
+
+ // Creating the backing store.
+ Node* is_one_byte =
+ one_or_two_byte == OneOrTwoByteAnalysis::State::kOneByte
+ ? __ Int32Constant(1)
+ : one_or_two_byte == OneOrTwoByteAnalysis::State::kTwoByte
+ ? __ Int32Constant(0)
+ : __ Word32And(ConstStringIsOneByte(node->InputAt(1)),
+ ConstStringIsOneByte(node->InputAt(2)));
+
+ Node* length = __ Int32Constant(initial_length);
+ Node* backing_store_length = __ Int32Constant(backing_store_initial_length);
+ Node* backing_store = AllocateSeqString(backing_store_length, is_one_byte);
+
+ // Storing first two strings into the backing store
+ if (left_length != 0) {
+ StoreLiteralStringToBuffer(backing_store, __ Int32Constant(0),
+ node->InputAt(1), is_one_byte);
+ }
+ if (right_length != 0) {
+ StoreLiteralStringToBuffer(backing_store, __ Int32Constant(left_length),
+ node->InputAt(2), is_one_byte);
+ }
- Callable const callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);
+ // Creating the SlicedString view into the backing store
+ Node* sliced_string = nullptr;
+ if (one_or_two_byte == OneOrTwoByteAnalysis::State::kOneByte) {
+ // Allocating 1-byte sliced string
+ sliced_string = AllocateOneByteSlicedString();
+ } else if (one_or_two_byte == OneOrTwoByteAnalysis::State::kTwoByte) {
+ // Allocating 2-byte sliced string
+ sliced_string = AllocateTwoByteSlicedString();
+ } else {
+ // Dynamically choosing to allocate a 1-byte or 2-byte sliced string
+ auto if_true = __ MakeLabel(), if_false = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+ __ Branch(is_one_byte, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Goto(&done, AllocateOneByteSlicedString());
+ __ Bind(&if_false);
+ __ Goto(&done, AllocateTwoByteSlicedString());
+ __ Bind(&done);
+ sliced_string = done.PhiAt(0);
+ }
+ // The following StoreFields initialize the SlicedString (except for its map
+ // field, which was already initialized by AllocateOneByteSlicedString or
+ // AllocateTwoByteSlicedString).
+ __ StoreField(AccessBuilder::ForNameRawHashField(), sliced_string,
+ __ Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForSlicedStringParent(), sliced_string,
+ backing_store);
+ __ StoreField(AccessBuilder::ForSlicedStringOffset(), sliced_string,
+ __ Int32Constant(0));
+ __ StoreField(AccessBuilder::ForStringLength(), sliced_string, length);
+
+ return sliced_string;
+ } else if (string_builder_optimizer_->ConcatIsInStringBuilder(node)) {
+ OneOrTwoByteAnalysis::State one_or_two_byte =
+ string_builder_optimizer_->GetOneOrTwoByte(node);
+
+ int literal_length = GetLiteralStringLen(node->InputAt(2), broker());
+
+ Node* sliced_string = node->InputAt(1);
+ Node* current_length =
+ __ LoadField(AccessBuilder::ForStringLength(), sliced_string);
+ Node* init_backing_store =
+ __ LoadField(AccessBuilder::ForSlicedStringParent(), sliced_string);
+ Node* max_length =
+ __ LoadField(AccessBuilder::ForStringLength(), init_backing_store);
+
+ // Checking if we need to convert from 1-byte to 2-byte
+ Node* backing_store_is_onebyte =
+ one_or_two_byte == OneOrTwoByteAnalysis::State::kOneByte
+ ? __ Int32Constant(1)
+ : one_or_two_byte == OneOrTwoByteAnalysis::State::kTwoByte
+ ? __ Int32Constant(0)
+ : StringIsOneByte(init_backing_store);
+ Node* rhs_is_onebyte =
+ one_or_two_byte == OneOrTwoByteAnalysis::State::kOneByte
+ ? __ Int32Constant(1)
+ : one_or_two_byte == OneOrTwoByteAnalysis::State::kTwoByte
+ ? __ Int32Constant(0)
+ : ConstStringIsOneByte(node->InputAt(2));
+ auto has_correct_representation =
+ __ MakeLabel(MachineType::PointerRepresentation());
+ if (one_or_two_byte != OneOrTwoByteAnalysis::State::kOneByte &&
+ one_or_two_byte != OneOrTwoByteAnalysis::State::kTwoByte) {
+ Node* need_to_move_backing_store_to_2_bytes =
+ __ Word32And(backing_store_is_onebyte,
+ __ Word32Equal(rhs_is_onebyte, __ Int32Constant(0)));
+ auto move_backing_store_to_2_bytes = __ MakeDeferredLabel();
+ __ GotoIf(need_to_move_backing_store_to_2_bytes,
+ &move_backing_store_to_2_bytes);
+ __ Goto(&has_correct_representation, init_backing_store);
+
+ // Converting from 1-byte to 2-byte string.
+ __ Bind(&move_backing_store_to_2_bytes);
+ {
+ Node* new_backing_store = ConvertOneByteStringToTwoByte(
+ init_backing_store, max_length, current_length);
+ __ StoreField(AccessBuilder::ForSlicedStringParent(), sliced_string,
+ new_backing_store);
+ __ StoreField(AccessBuilder::ForMap(), sliced_string,
+ __ HeapConstant(factory()->sliced_string_map()));
+ __ Goto(&has_correct_representation, new_backing_store);
+ }
+ } else {
+ // We statically know that the string is 1 (or 2) byte, so we know that we
+ // don't need to change the string from 1 to 2-byte.
+ __ Goto(&has_correct_representation, init_backing_store);
+ }
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ // At this point, the backing store has the correct 1-byte or 2-byte
+ // representation.
+ __ Bind(&has_correct_representation);
+ Node* backing_store = has_correct_representation.PhiAt(0);
+ Node* is_one_byte =
+ one_or_two_byte == OneOrTwoByteAnalysis::State::kOneByte
+ ? __ Int32Constant(1)
+ : one_or_two_byte == OneOrTwoByteAnalysis::State::kTwoByte
+ ? __ Int32Constant(0)
+ : __ Word32And(rhs_is_onebyte, backing_store_is_onebyte);
+
+ // Checking if reallocation is needed (if there is not enough free space
+ // left in the backing store).
+ Node* new_length =
+ __ Int32Add(current_length, __ Int32Constant(literal_length));
+ auto needs_realloc = __ MakeLabel();
+ auto add_to_backing_store =
+ __ MakeLabel(MachineType::PointerRepresentation());
+ Node* realloc_cond = __ Int32LessThan(max_length, new_length);
+
+ __ GotoIf(realloc_cond, &needs_realloc);
+ __ Goto(&add_to_backing_store, backing_store);
+
+ // Reallocating backing store.
+ __ Bind(&needs_realloc);
+ {
+ // The new backing store will have a length of min(2*new_length,
+ // String::kMaxLength).
+ // Note that since early in the pipeline, StringConcat is preceeded by a
+ // check that the size of the resulting string does not exceed
+ // String::kMaxLength (which the String Builder optimizer does not
+ // remove). So, we can safely assume that `new_length*2` is less or equal
+ // to `String::kMaxLength*2`. Thus, since `String::kMaxLength` is less
+ // than `max_uint/2` (checked by the static_assert right here),
+ // `new_length*2` never overflows, and we can safely check if it is more
+ // or less than `String::kMaxLength`: if it's more, then we use
+ // String::kMaxLength instead.
+ static_assert(String::kMaxLength <=
+ std::numeric_limits<unsigned int>::max() / 2);
+ Node* new_backing_store_size_maybe_too_large =
+ __ Word32Shl(new_length, __ Int32Constant(1));
+ auto size_computed_lbl = __ MakeLabel(MachineRepresentation::kWord32);
+ __ GotoIf(__ Int32LessThan(new_backing_store_size_maybe_too_large,
+ __ Int32Constant(String::kMaxLength)),
+ &size_computed_lbl, new_backing_store_size_maybe_too_large);
+ __ Goto(&size_computed_lbl, __ Int32Constant(String::kMaxLength));
+
+ __ Bind(&size_computed_lbl);
+ Node* new_backing_store_size = size_computed_lbl.PhiAt(0);
+ // We allocate a new SeqString, and copy the content of the old backing
+ // store into the new one.
+ Node* new_backing_store =
+ one_or_two_byte == OneOrTwoByteAnalysis::State::kOneByte
+ ? AllocateSeqString(new_backing_store_size, /*one_byte*/ true)
+ : one_or_two_byte == OneOrTwoByteAnalysis::State::kTwoByte
+ ? AllocateSeqString(new_backing_store_size, /*one_byte*/ false)
+ : AllocateSeqString(new_backing_store_size, is_one_byte);
+ CopyString(backing_store, new_backing_store, current_length, is_one_byte);
+ __ StoreField(AccessBuilder::ForSlicedStringParent(), sliced_string,
+ new_backing_store);
+ __ Goto(&add_to_backing_store, new_backing_store);
+ }
- return value;
+ // After reallocation, simply adding the rhs to the backing store.
+ __ Bind(&add_to_backing_store);
+ {
+ Node* real_backing_store = add_to_backing_store.PhiAt(0);
+ StoreLiteralStringToBuffer(real_backing_store, current_length,
+ node->InputAt(2), is_one_byte);
+ __ StoreField(AccessBuilder::ForStringLength(), sliced_string,
+ new_length);
+ }
+
+ if (string_builder_optimizer_->IsStringBuilderEnd(node)) {
+ // If the string builder ends on {node}, then we add the trimming code
+ // right now.
+ return EndStringBuilderConcat(sliced_string);
+ } else {
+ return sliced_string;
+ }
+ } else {
+ Node* lhs = node->InputAt(1);
+ Node* rhs = node->InputAt(2);
+
+ Callable const callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
+ CallDescriptor::kNoFlags,
+ Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);
+
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ lhs, rhs, __ NoContextConstant());
+
+ return value;
+ }
}
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
@@ -2153,6 +3272,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
@@ -2165,6 +3285,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
Node* zero = __ Int32Constant(0);
@@ -2306,6 +3427,7 @@ Node* EffectControlLinearizer::BuildUint32Mod(Node* lhs, Node* rhs) {
Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
// General case for signed integer modulus, with optimization for (unknown)
// power of 2 right hand side.
//
@@ -2313,7 +3435,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// rhs = -rhs
// deopt if rhs == 0
// if lhs < 0 then
- // let lhs_abs = -lsh in
+ // let lhs_abs = -lhs in
// let res = lhs_abs % rhs in
// deopt if res == 0
// -res
@@ -2381,6 +3503,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
Node* zero = __ Int32Constant(0);
@@ -2419,6 +3542,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
@@ -2435,6 +3559,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
@@ -2469,6 +3594,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(SmiValuesAre31Bits());
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2477,6 +3603,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* EffectControlLinearizer::LowerCheckedInt64ToInt32(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2489,6 +3616,7 @@ Node* EffectControlLinearizer::LowerCheckedInt64ToInt32(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt64ToTaggedSigned(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2522,7 +3650,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
__ Branch(check, &done, &if_abort);
__ Bind(&if_abort);
- __ Unreachable(&done);
+ __ Unreachable();
__ Bind(&done);
}
@@ -2532,6 +3660,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
@@ -2542,6 +3671,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
@@ -2568,7 +3698,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
__ Branch(check, &done, &if_abort);
__ Bind(&if_abort);
- __ Unreachable(&done);
+ __ Unreachable();
__ Bind(&done);
}
@@ -2577,6 +3707,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint64ToInt32(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2588,6 +3719,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64ToInt32(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint64ToInt64(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2600,6 +3732,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64ToInt64(Node* node,
Node* EffectControlLinearizer::LowerCheckedUint64ToTaggedSigned(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2672,6 +3805,7 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToIndex(
Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
const CheckMinusZeroParameters& params =
CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2712,6 +3846,7 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64(
Node* EffectControlLinearizer::LowerCheckedFloat64ToInt64(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
const CheckMinusZeroParameters& params =
CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2721,6 +3856,7 @@ Node* EffectControlLinearizer::LowerCheckedFloat64ToInt64(Node* node,
Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
@@ -2731,6 +3867,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
Node* EffectControlLinearizer::LowerCheckedTaggedToArrayIndex(
Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
CheckParameters const& params = CheckParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2783,6 +3920,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToArrayIndex(
Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
const CheckMinusZeroParameters& params =
CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2812,6 +3950,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
const CheckMinusZeroParameters& params =
CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2890,6 +4029,7 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
CheckTaggedInputParameters const& p =
CheckTaggedInputParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2939,6 +4079,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
}
Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
const CheckParameters& params = CheckParametersOf(node->op());
@@ -2958,6 +4099,7 @@ Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
Node* EffectControlLinearizer::LowerCheckedBigIntToBigInt64(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
auto done = __ MakeLabel();
@@ -3012,6 +4154,7 @@ Node* EffectControlLinearizer::LowerCheckedBigIntToBigInt64(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt64Add(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
Node* lhs = node->InputAt(0);
@@ -3027,6 +4170,7 @@ Node* EffectControlLinearizer::LowerCheckedInt64Add(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt64Sub(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
Node* lhs = node->InputAt(0);
@@ -3042,6 +4186,7 @@ Node* EffectControlLinearizer::LowerCheckedInt64Sub(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt64Mul(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
Node* lhs = node->InputAt(0);
@@ -3057,6 +4202,7 @@ Node* EffectControlLinearizer::LowerCheckedInt64Mul(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt64Div(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
auto division = __ MakeLabel();
@@ -3083,6 +4229,7 @@ Node* EffectControlLinearizer::LowerCheckedInt64Div(Node* node,
Node* EffectControlLinearizer::LowerCheckedInt64Mod(Node* node,
Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
auto modulo_op = __ MakeLabel();
@@ -3110,6 +4257,7 @@ Node* EffectControlLinearizer::LowerCheckedInt64Mod(Node* node,
}
Node* EffectControlLinearizer::LowerChangeInt64ToBigInt(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -3125,7 +4273,7 @@ Node* EffectControlLinearizer::LowerChangeInt64ToBigInt(Node* node) {
Node* bitfield =
__ Word32Or(__ Int32Constant(BigInt::LengthBits::encode(1)), sign);
- // We use (value XOR (value >>> 63)) - (value >>> 63) to compute the
+ // We use (value XOR (value >> 63)) - (value >> 63) to compute the
// absolute value, in a branchless fashion.
Node* sign_mask = __ Word64Sar(value, __ Int64Constant(63));
Node* absolute_value = __ Int64Sub(__ Word64Xor(value, sign_mask), sign_mask);
@@ -3136,6 +4284,7 @@ Node* EffectControlLinearizer::LowerChangeInt64ToBigInt(Node* node) {
}
Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -3153,6 +4302,7 @@ Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
}
Node* EffectControlLinearizer::LowerTruncateBigIntToWord64(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(machine()->Is64());
auto done = __ MakeLabel(MachineRepresentation::kWord64);
@@ -3183,6 +4333,7 @@ Node* EffectControlLinearizer::LowerTruncateBigIntToWord64(Node* node) {
}
Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -3230,6 +4381,7 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
}
Node* EffectControlLinearizer::LowerAllocate(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* size = node->InputAt(0);
AllocationType allocation = AllocationTypeOf(node->op());
Node* new_node = __ Allocate(allocation, size);
@@ -3251,6 +4403,7 @@ Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3277,6 +4430,7 @@ Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3284,6 +4438,7 @@ Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
Node* check = ObjectIsSmi(value);
__ GotoIf(check, &if_smi);
+
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* vfalse = __ TaggedEqual(value_map, __ BigIntMapConstant());
__ Goto(&done, vfalse);
@@ -3296,6 +4451,7 @@ Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3321,6 +4477,7 @@ Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3346,6 +4503,7 @@ Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3580,6 +4738,7 @@ Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) {
}
Node* EffectControlLinearizer::LowerNumberIsNaN(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* number = node->InputAt(0);
Node* diff = __ Float64Equal(number, number);
Node* check = __ Word32Equal(diff, __ Int32Constant(0));
@@ -3587,6 +4746,7 @@ Node* EffectControlLinearizer::LowerNumberIsNaN(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_primitive = __ MakeDeferredLabel();
@@ -3596,11 +4756,7 @@ Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
__ GotoIf(check0, &if_primitive);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* check1 = __ Uint32LessThanOrEqual(
- __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+ Node* check1 = JSAnyIsNotPrimitiveHeapObject(value, value_map);
__ GotoIfNot(check1, &if_primitive);
Node* value_bit_field =
@@ -3619,6 +4775,7 @@ Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeLabel();
@@ -3636,6 +4793,7 @@ Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3643,13 +4801,7 @@ Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
__ GotoIf(ObjectIsSmi(value), &if_smi);
- static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* result = __ Uint32LessThanOrEqual(
- __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
- __ Goto(&done, result);
+ __ Goto(&done, JSAnyIsNotPrimitiveHeapObject(value));
__ Bind(&if_smi);
__ Goto(&done, __ Int32Constant(0));
@@ -3659,11 +4811,13 @@ Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
return ObjectIsSmi(value);
}
Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3671,6 +4825,7 @@ Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
Node* check = ObjectIsSmi(value);
__ GotoIf(check, &if_smi);
+
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
@@ -3686,6 +4841,7 @@ Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsSymbol(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3693,6 +4849,7 @@ Node* EffectControlLinearizer::LowerObjectIsSymbol(Node* node) {
Node* check = ObjectIsSmi(value);
__ GotoIf(check, &if_smi);
+
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
@@ -3708,6 +4865,7 @@ Node* EffectControlLinearizer::LowerObjectIsSymbol(Node* node) {
}
Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
auto if_smi = __ MakeDeferredLabel();
@@ -3720,11 +4878,9 @@ Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Word32Equal(
- __ Int32Constant(0),
- __ Word32And(value_bit_field,
- __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask))),
- __ Int32Constant(0));
+ __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -3790,6 +4946,7 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
}
Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
AllocationType const allocation = AllocationTypeOf(node->op());
Node* length = node->InputAt(0);
@@ -3838,6 +4995,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
}
Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
AllocationType const allocation = AllocationTypeOf(node->op());
Node* length = node->InputAt(0);
@@ -3912,6 +5070,10 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
}
Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ if (string_builder_optimizer_->ConcatIsInStringBuilder(node)) {
+ return LowerStringConcat(node);
+ }
Node* length = node->InputAt(0);
Node* first = node->InputAt(1);
Node* second = node->InputAt(2);
@@ -3968,8 +5130,7 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
- return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs);
}
Node* EffectControlLinearizer::LowerSameValueNumbersOnly(Node* node) {
@@ -3983,8 +5144,7 @@ Node* EffectControlLinearizer::LowerSameValueNumbersOnly(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
- return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs);
}
Node* EffectControlLinearizer::LowerNumberSameValue(Node* node) {
@@ -4095,11 +5255,10 @@ Node* EffectControlLinearizer::StringCharCodeAt(Node* receiver,
__ Bind(&if_seqstring);
{
- Node* receiver_is_onebyte = __ Word32Equal(
+ Node* receiver_is_onebyte =
__ Word32Equal(__ Word32And(receiver_instance_type,
__ Int32Constant(kStringEncodingMask)),
- __ Int32Constant(kTwoByteStringTag)),
- __ Int32Constant(0));
+ __ Int32Constant(kOneByteStringTag));
Node* result = LoadFromSeqString(receiver, position, receiver_is_onebyte);
__ Goto(&loop_done, result);
}
@@ -4187,12 +5346,14 @@ Node* EffectControlLinearizer::StringCharCodeAt(Node* receiver,
}
Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
return StringCharCodeAt(receiver, position);
}
Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
@@ -4204,7 +5365,8 @@ Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
__ Int32Constant(0xD800)),
&return_result, BranchHint::kFalse, first_code_unit);
- auto length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ auto length = __ ChangeUint32ToUintPtr(
+ __ LoadField(AccessBuilder::ForStringLength(), receiver));
auto next_index = __ IntAdd(position, __ IntPtrConstant(1));
__ GotoIfNot(__ IntLessThan(next_index, length), &return_result,
first_code_unit);
@@ -4242,6 +5404,10 @@ Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
}
Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ if (string_builder_optimizer_->IsStringBuilderConcatInput(node)) {
+ return node;
+ }
Node* value = node->InputAt(0);
Node* code = __ Word32And(value, __ Uint32Constant(0xFFFF));
@@ -4274,6 +5440,11 @@ Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
Node* vfalse1 =
__ Allocate(AllocationType::kYoung,
__ IntPtrConstant(SeqTwoByteString::SizeFor(1)));
+ __ Store(StoreRepresentation(MachineRepresentation::kTaggedSigned,
+ kNoWriteBarrier),
+ vfalse1,
+ SeqTwoByteString::SizeFor(1) - kObjectAlignment - kHeapObjectTag,
+ __ SmiConstant(0));
__ StoreField(AccessBuilder::ForMap(), vfalse1,
__ HeapConstant(factory()->string_map()));
__ StoreField(AccessBuilder::ForNameRawHashField(), vfalse1,
@@ -4295,6 +5466,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
#ifdef V8_INTL_SUPPORT
Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* receiver = node->InputAt(0);
Callable callable =
@@ -4309,6 +5481,7 @@ Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
}
Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* receiver = node->InputAt(0);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
@@ -4332,6 +5505,7 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
#endif // V8_INTL_SUPPORT
Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* value = node->InputAt(0);
Node* code = value;
@@ -4370,6 +5544,11 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
Node* vfalse1 =
__ Allocate(AllocationType::kYoung,
__ IntPtrConstant(SeqTwoByteString::SizeFor(1)));
+ __ Store(StoreRepresentation(MachineRepresentation::kTaggedSigned,
+ kNoWriteBarrier),
+ vfalse1,
+ SeqTwoByteString::SizeFor(1) - kObjectAlignment - kHeapObjectTag,
+ __ SmiConstant(0));
__ StoreField(AccessBuilder::ForMap(), vfalse1,
__ HeapConstant(factory()->string_map()));
__ StoreField(AccessBuilder::ForNameRawHashField(), vfalse1,
@@ -4410,6 +5589,11 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
Node* vfalse0 =
__ Allocate(AllocationType::kYoung,
__ IntPtrConstant(SeqTwoByteString::SizeFor(2)));
+ __ Store(StoreRepresentation(MachineRepresentation::kTaggedSigned,
+ kNoWriteBarrier),
+ vfalse0,
+ SeqTwoByteString::SizeFor(2) - kObjectAlignment - kHeapObjectTag,
+ __ SmiConstant(0));
__ StoreField(AccessBuilder::ForMap(), vfalse0,
__ HeapConstant(factory()->string_map()));
__ StoreField(AccessBuilder::ForNameRawHashField(), vfalse0,
@@ -4429,6 +5613,7 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
}
Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* subject = node->InputAt(0);
Node* search_string = node->InputAt(1);
Node* position = node->InputAt(2);
@@ -4440,10 +5625,11 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
- search_string, position, __ NoContextConstant());
+ search_string, position);
}
Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* string = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -4455,15 +5641,44 @@ Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
- index, __ NoContextConstant());
+ index);
}
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* subject = node->InputAt(0);
return __ LoadField(AccessBuilder::ForStringLength(), subject);
}
+Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ Callable callable = Builtins::CallableFor(isolate(), Builtin::kStringEqual);
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+ Node* lhs_length = __ LoadField(AccessBuilder::ForStringLength(), lhs);
+ Node* rhs_length = __ LoadField(AccessBuilder::ForStringLength(), rhs);
+
+ auto if_length_equal = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ __ GotoIf(__ Word32Equal(lhs_length, rhs_length), &if_length_equal);
+ __ Goto(&done, __ FalseConstant());
+
+ __ Bind(&if_length_equal);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
+ rhs, lhs_length);
+ __ Goto(&done, result);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Node* node) {
Node* lhs = node->InputAt(0);
@@ -4474,11 +5689,11 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
- return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs);
}
Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* receiver = node->InputAt(0);
Node* start = ChangeInt32ToIntPtr(node->InputAt(1));
Node* end = ChangeInt32ToIntPtr(node->InputAt(2));
@@ -4491,36 +5706,25 @@ Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
- start, end, __ NoContextConstant());
-}
-
-Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
- return LowerStringComparison(
- Builtins::CallableFor(isolate(), Builtin::kStringEqual), node);
+ start, end);
}
Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
return LowerStringComparison(
Builtins::CallableFor(isolate(), Builtin::kStringLessThan), node);
}
Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
return LowerStringComparison(
Builtins::CallableFor(isolate(), Builtin::kStringLessThanOrEqual), node);
}
Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
-
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kBigIntAddNoThrow);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kFoldable | Operator::kNoThrow);
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntAddNoThrow);
// Check for exception sentinel: Smi is returned to signal BigIntTooBig.
__ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
@@ -4531,17 +5735,9 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
Node* EffectControlLinearizer::LowerBigIntSubtract(Node* node,
Node* frame_state) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
-
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kBigIntSubtractNoThrow);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kFoldable | Operator::kNoThrow);
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntSubtractNoThrow);
// Check for exception sentinel: Smi is returned to signal BigIntTooBig.
__ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
@@ -4552,17 +5748,9 @@ Node* EffectControlLinearizer::LowerBigIntSubtract(Node* node,
Node* EffectControlLinearizer::LowerBigIntMultiply(Node* node,
Node* frame_state) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
-
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kBigIntMultiplyNoThrow);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kFoldable | Operator::kNoThrow);
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntMultiplyNoThrow);
auto if_termreq = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -4596,17 +5784,9 @@ Node* EffectControlLinearizer::LowerBigIntMultiply(Node* node,
Node* EffectControlLinearizer::LowerBigIntDivide(Node* node,
Node* frame_state) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
-
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kBigIntDivideNoThrow);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kFoldable | Operator::kNoThrow);
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntDivideNoThrow);
auto if_termreq = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -4640,17 +5820,9 @@ Node* EffectControlLinearizer::LowerBigIntDivide(Node* node,
Node* EffectControlLinearizer::LowerBigIntModulus(Node* node,
Node* frame_state) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
-
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kBigIntModulusNoThrow);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kFoldable | Operator::kNoThrow);
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntModulusNoThrow);
auto if_termreq = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -4684,17 +5856,54 @@ Node* EffectControlLinearizer::LowerBigIntModulus(Node* node,
Node* EffectControlLinearizer::LowerBigIntBitwiseAnd(Node* node,
Node* frame_state) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntBitwiseAndNoThrow);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtin::kBigIntBitwiseAndNoThrow);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kFoldable | Operator::kNoThrow);
- Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
+ __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
+ ObjectIsSmi(value), frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntBitwiseOr(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ return CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntBitwiseOrNoThrow);
+}
+
+Node* EffectControlLinearizer::LowerBigIntBitwiseXor(Node* node,
+ Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntBitwiseXorNoThrow);
+
+ // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
+ __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
+ ObjectIsSmi(value), frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntShiftLeft(Node* node,
+ Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntShiftLeftNoThrow);
+
+ // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
+ __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
+ ObjectIsSmi(value), frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntShiftRight(Node* node,
+ Node* frame_state) {
+ DCHECK(!v8_flags.turboshaft);
+ Node* value = CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntShiftRightNoThrow);
// Check for exception sentinel: Smi is returned to signal BigIntTooBig.
__ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
@@ -4703,7 +5912,26 @@ Node* EffectControlLinearizer::LowerBigIntBitwiseAnd(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerBigIntEqual(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ return CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntEqual);
+}
+
+Node* EffectControlLinearizer::LowerBigIntLessThan(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ return CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntLessThan);
+}
+
+Node* EffectControlLinearizer::LowerBigIntLessThanOrEqual(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
+ return CallBuiltinForBigIntBinop(node->InputAt(0), node->InputAt(1),
+ Builtin::kBigIntLessThanOrEqual);
+}
+
Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Callable const callable =
Builtins::CallableFor(isolate(), Builtin::kBigIntUnaryMinus);
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -4794,10 +6022,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
Node* val_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), val_map);
- // Check for the common case of ThinString first.
- __ GotoIf(__ Word32Equal(val_instance_type,
- __ Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
- &if_thinstring);
+ // ThinString.
__ Branch(
__ Word32Equal(val_instance_type, __ Int32Constant(THIN_STRING_TYPE)),
&if_thinstring, &if_notthinstring);
@@ -4961,6 +6186,26 @@ Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
__ Int32Constant(kSmiTag));
}
+Node* EffectControlLinearizer::JSAnyIsNotPrimitiveHeapObject(Node* value,
+ Node* value_map) {
+ if (value_map == nullptr) {
+ value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ }
+#if V8_STATIC_ROOTS_BOOL
+ // Assumes only primitive objects and JS_RECEIVER's are passed here.
+ // All primitive object's maps are allocated at the start of the read only
+ // heap. Thus JS_RECEIVER's must have maps with larger (compressed) addresses.
+ return __ Uint32LessThan(
+ __ Int32Constant(InstanceTypeChecker::kNonJsReceiverMapLimit), value_map);
+#else
+ static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ return __ Uint32LessThanOrEqual(__ Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ value_instance_type);
+#endif
+}
+
Node* EffectControlLinearizer::SmiMaxValueConstant() {
return __ Int32Constant(Smi::kMaxValue);
}
@@ -5388,13 +6633,45 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
- stack_slot, 0, node);
+ stack_slot, 0, __ BitcastTaggedToWord(node));
return stack_slot;
}
case CTypeInfo::Type::kFloat32: {
return __ TruncateFloat64ToFloat32(node);
}
+ case CTypeInfo::Type::kPointer: {
+ // Check that the value is a HeapObject.
+ Node* const value_is_smi = ObjectIsSmi(node);
+ __ GotoIf(value_is_smi, if_error);
+
+ auto if_null = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineType::PointerRepresentation());
+
+ // Check if the value is null
+ __ GotoIf(__ TaggedEqual(node, __ NullConstant()), &if_null);
+
+ {
+ // Check that the value is a JSExternalObject.
+ Node* const is_external =
+ __ TaggedEqual(__ LoadField(AccessBuilder::ForMap(), node),
+ __ ExternalObjectMapConstant());
+
+ __ GotoIfNot(is_external, if_error);
+
+ Node* external_pointer =
+ __ LoadField(AccessBuilder::ForJSExternalObjectValue(), node);
+
+ __ Goto(&done, external_pointer);
+ }
+
+ // Value is null, signifying a null pointer.
+ __ Bind(&if_null);
+ { __ Goto(&done, __ IntPtrConstant(0)); }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+ }
case CTypeInfo::Type::kSeqOneByteString: {
// Check that the value is a HeapObject.
Node* value_is_smi = ObjectIsSmi(node);
@@ -5414,9 +6691,10 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* length_in_bytes =
__ LoadField(AccessBuilder::ForStringLength(), node);
- Node* data_ptr = __ IntPtrAdd(
- node, __ IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag));
+ Node* data_ptr =
+ __ IntPtrAdd(__ BitcastTaggedToWord(node),
+ __ IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag));
constexpr int kAlign = alignof(FastOneByteString);
constexpr int kSize = sizeof(FastOneByteString);
@@ -5455,7 +6733,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
- stack_slot, 0, node);
+ stack_slot, 0, __ BitcastTaggedToWord(node));
// Check that the value is a JSArray.
Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
@@ -5521,7 +6799,7 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
- stack_slot, 0, node);
+ stack_slot, 0, __ BitcastTaggedToWord(node));
Node* target_address = __ ExternalConstant(ExternalReference::Create(
c_functions[func_index].address, ref_type));
@@ -5644,6 +6922,8 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
case CTypeInfo::Type::kFloat64:
return ChangeFloat64ToTagged(
c_call_result, CheckForMinusZeroMode::kCheckForMinusZero);
+ case CTypeInfo::Type::kPointer:
+ return BuildAllocateJSExternalObject(c_call_result);
case CTypeInfo::Type::kSeqOneByteString:
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kApiObject:
@@ -5669,6 +6949,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
}
Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
Node* zero = __ IntPtrConstant(0);
@@ -5681,8 +6962,6 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
auto if_double = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
- auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged);
- auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
// Check if field is a mutable double field.
__ GotoIfNot(__ IntPtrEqual(__ WordAnd(index, one), zero), &if_double);
@@ -5699,8 +6978,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* offset =
__ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2 - 1)),
__ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
- Node* field = __ Load(MachineType::AnyTagged(), object, offset);
- __ Goto(&loaded_field, field);
+ Node* result = __ Load(MachineType::AnyTagged(), object, offset);
+ __ Goto(&done, result);
}
// The field is located in the properties backing store of {object}.
@@ -5714,8 +6993,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ IntPtrConstant(kTaggedSizeLog2 - 1)),
__ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
- Node* field = __ Load(MachineType::AnyTagged(), properties, offset);
- __ Goto(&loaded_field, field);
+ Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
+ __ Goto(&done, result);
}
}
@@ -5723,6 +7002,9 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
// architectures, or a mutable HeapNumber.
__ Bind(&if_double);
{
+ auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged);
+ auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
+
index = __ WordSar(index, one);
// Check if field is in-object or out-of-object.
@@ -5750,27 +7032,27 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* field = __ Load(MachineType::AnyTagged(), properties, offset);
__ Goto(&loaded_field, field);
}
- }
- __ Bind(&loaded_field);
- {
- Node* field = loaded_field.PhiAt(0);
- // We may have transitioned in-place away from double, so check that
- // this is a HeapNumber -- otherwise the load is fine and we don't need
- // to copy anything anyway.
- __ GotoIf(ObjectIsSmi(field), &done, field);
- Node* field_map = __ LoadField(AccessBuilder::ForMap(), field);
- __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done,
- field);
-
- Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field);
- __ Goto(&done_double, value);
- }
+ __ Bind(&loaded_field);
+ {
+ Node* field = loaded_field.PhiAt(0);
+ // We may have transitioned in-place away from double, so check that
+ // this is a HeapNumber -- otherwise the load is fine and we don't need
+ // to copy anything anyway.
+ __ GotoIf(ObjectIsSmi(field), &done, field);
+ Node* field_map = __ LoadField(AccessBuilder::ForMap(), field);
+ __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done,
+ field);
+
+ Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field);
+ __ Goto(&done_double, value);
+ }
- __ Bind(&done_double);
- {
- Node* result = AllocateHeapNumberWithValue(done_double.PhiAt(0));
- __ Goto(&done, result);
+ __ Bind(&done_double);
+ {
+ Node* result = AllocateHeapNumberWithValue(done_double.PhiAt(0));
+ __ Goto(&done, result);
+ }
}
__ Bind(&done);
@@ -6194,7 +7476,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreNumberElement(Node* node) {
// loop peeling can break this assumption.
__ GotoIf(__ Word32Equal(kind, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
&do_store);
- __ Unreachable(&do_store);
+ __ Unreachable();
}
__ Bind(&transition_smi_array); // deferred code.
@@ -6392,6 +7674,7 @@ Node* EffectControlLinearizer::LowerFoldConstant(Node* node) {
}
Node* EffectControlLinearizer::LowerDoubleArrayMinMax(Node* node) {
+ DCHECK(!v8_flags.turboshaft);
DCHECK(node->opcode() == IrOpcode::kDoubleArrayMin ||
node->opcode() == IrOpcode::kDoubleArrayMax);
@@ -6448,13 +7731,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
// Check if {value} is already a JSReceiver.
__ GotoIf(ObjectIsSmi(value), &convert_to_object);
- static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check = __ Uint32LessThan(
- value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
- __ GotoIf(check, &convert_to_object);
+ __ GotoIfNot(JSAnyIsNotPrimitiveHeapObject(value), &convert_to_object);
__ Goto(&done_convert, value);
// Wrap the primitive {value} into a JSPrimitiveWrapper.
@@ -6481,13 +7758,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
// Check if {value} is already a JSReceiver, or null/undefined.
__ GotoIf(ObjectIsSmi(value), &convert_to_object);
- static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check = __ Uint32LessThan(
- value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
- __ GotoIf(check, &convert_to_object);
+ __ GotoIfNot(JSAnyIsNotPrimitiveHeapObject(value), &convert_to_object);
__ Goto(&done_convert, value);
// Wrap the primitive {value} into a JSPrimitiveWrapper.
@@ -7059,17 +8330,75 @@ Node* EffectControlLinearizer::BuildAllocateBigInt(Node* bitfield,
return result;
}
+Node* EffectControlLinearizer::BuildAllocateJSExternalObject(Node* pointer) {
+ auto if_null = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ // Check if the pointer is a null pointer
+ __ GotoIf(__ WordEqual(pointer, __ IntPtrConstant(0)), &if_null);
+
+ {
+ Node* external =
+ __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(JSExternalObject::kHeaderSize));
+ __ StoreField(AccessBuilder::ForMap(), external,
+ __ ExternalObjectMapConstant());
+ Node* empty_fixed_array = __ HeapConstant(factory()->empty_fixed_array());
+ __ StoreField(AccessBuilder::ForJSObjectPropertiesOrHash(), external,
+ empty_fixed_array);
+ __ StoreField(AccessBuilder::ForJSObjectElements(), external,
+ empty_fixed_array);
+
+#ifdef V8_ENABLE_SANDBOX
+ Node* const isolate_ptr =
+ __ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ MachineSignature::Builder builder(graph()->zone(), 1, 2);
+ builder.AddReturn(MachineType::Uint32());
+ builder.AddParam(MachineType::Pointer());
+ builder.AddParam(MachineType::Pointer());
+ Node* allocate_and_initialize_external_pointer_table_entry =
+ __ ExternalConstant(
+ ExternalReference::
+ allocate_and_initialize_external_pointer_table_entry());
+ auto call_descriptor =
+ Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
+ Node* handle = __ Call(common()->Call(call_descriptor),
+ allocate_and_initialize_external_pointer_table_entry,
+ isolate_ptr, pointer);
+
+ __ StoreField(AccessBuilder::ForJSExternalObjectPointerHandle(), external,
+ handle);
+#else
+ __ StoreField(AccessBuilder::ForJSExternalObjectValue(), external, pointer);
+#endif // V8_ENABLE_SANDBOX
+ __ Goto(&done, external);
+ }
+
+ // Pointer is null, convert to a null
+ __ Bind(&if_null);
+ { __ Goto(&done, __ NullConstant()); }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
#undef __
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
JSHeapBroker* broker) {
- JSGraphAssembler graph_assembler_(graph, temp_zone,
+ StringBuilderOptimizer string_builder_optimizer(graph, schedule, temp_zone,
+ broker);
+ if (v8_flags.turbo_string_builder && !v8_flags.turboshaft) {
+ string_builder_optimizer.Run();
+ }
+ JSGraphAssembler graph_assembler_(broker, graph, temp_zone,
BranchSemantics::kMachine);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
- MaintainSchedule::kDiscard, broker);
+ MaintainSchedule::kDiscard, broker,
+ &string_builder_optimizer);
linearizer.Run();
}
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 9f760f2c0b..758a6188ee 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -125,19 +125,21 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
// occurrences of virtual objects.
class Deduplicator {
public:
- explicit Deduplicator(Zone* zone) : is_duplicate_(zone) {}
+ explicit Deduplicator(Zone* zone) : zone_(zone) {}
bool SeenBefore(const VirtualObject* vobject) {
- VirtualObject::Id id = vobject->id();
- if (id >= is_duplicate_.size()) {
- is_duplicate_.resize(id + 1);
+ DCHECK_LE(vobject->id(), std::numeric_limits<int>::max());
+ int id = static_cast<int>(vobject->id());
+ if (id >= is_duplicate_.length()) {
+ is_duplicate_.Resize(id + 1, zone_);
}
- bool is_duplicate = is_duplicate_[id];
- is_duplicate_[id] = true;
+ bool is_duplicate = is_duplicate_.Contains(id);
+ is_duplicate_.Add(id);
return is_duplicate;
}
private:
- ZoneVector<bool> is_duplicate_;
+ Zone* zone_;
+ BitVector is_duplicate_;
};
void EscapeAnalysisReducer::ReduceFrameStateInputs(Node* node) {
diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc
index 4279048ee1..a2d05eedc3 100644
--- a/deps/v8/src/compiler/fast-api-calls.cc
+++ b/deps/v8/src/compiler/fast-api-calls.cc
@@ -31,6 +31,7 @@ ElementsKind GetTypedArrayElementsKind(CTypeInfo::Type type) {
case CTypeInfo::Type::kVoid:
case CTypeInfo::Type::kSeqOneByteString:
case CTypeInfo::Type::kBool:
+ case CTypeInfo::Type::kPointer:
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kApiObject:
case CTypeInfo::Type::kAny:
@@ -185,7 +186,7 @@ Node* FastApiCallBuilder::WrapFastCall(const CallDescriptor* call_descriptor,
ExternalReference::fast_api_call_target_address(isolate()));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
- target_address, 0, target);
+ target_address, 0, __ BitcastTaggedToWord(target));
// Disable JS execution
Node* javascript_execution_assert = __ ExternalConstant(
@@ -199,7 +200,7 @@ Node* FastApiCallBuilder::WrapFastCall(const CallDescriptor* call_descriptor,
__ GotoIf(__ Word32Equal(old_scope_value, __ Int32Constant(1)), &do_store);
// We expect that JS execution is enabled, otherwise assert.
- __ Unreachable(&do_store);
+ __ Unreachable();
__ Bind(&do_store);
}
__ Store(StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
@@ -328,10 +329,9 @@ Node* FastApiCallBuilder::Build(const FastApiCallFunctionVector& c_functions,
__ Int32Constant(0));
Node* data_stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t));
- __ Store(
- StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- data_stack_slot, 0, data_argument);
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ data_stack_slot, 0, __ BitcastTaggedToWord(data_argument));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index dd4abbd840..08c90bffab 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -60,6 +60,7 @@ FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
} else {
// No frame. Retrieve all parameters relative to stack pointer.
int sp_offset = frame_offset + GetSPToFPOffset();
+ DCHECK_GE(sp_offset, 0);
return FrameOffset::FromStackPointer(sp_offset);
}
}
diff --git a/deps/v8/src/compiler/globals.h b/deps/v8/src/compiler/globals.h
index a20a0044d9..5d715761b1 100644
--- a/deps/v8/src/compiler/globals.h
+++ b/deps/v8/src/compiler/globals.h
@@ -101,6 +101,8 @@ inline std::ostream& operator<<(std::ostream& os,
const int kMaxFastLiteralDepth = 3;
const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
+enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 54b8d15afb..f673feca22 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -105,7 +105,8 @@ TNode<HeapObject> JSGraphAssembler::HeapConstant(Handle<HeapObject> object) {
}
TNode<Object> JSGraphAssembler::Constant(const ObjectRef& ref) {
- return TNode<Object>::UncheckedCast(AddClonedNode(jsgraph()->Constant(ref)));
+ return TNode<Object>::UncheckedCast(
+ AddClonedNode(jsgraph()->Constant(ref, broker())));
}
TNode<Number> JSGraphAssembler::NumberConstant(double value) {
@@ -200,6 +201,13 @@ TNode<UintPtrT> GraphAssembler::UintPtrDiv(TNode<UintPtrT> left,
: TNode<UintPtrT>::UncheckedCast(Uint32Div(left, right));
}
+TNode<UintPtrT> GraphAssembler::ChangeUint32ToUintPtr(
+ SloppyTNode<Uint32T> value) {
+ return kSystemPointerSize == 8
+ ? TNode<UintPtrT>::UncheckedCast(ChangeUint32ToUint64(value))
+ : TNode<UintPtrT>::UncheckedCast(value);
+}
+
#define CHECKED_BINOP_DEF(Name) \
Node* GraphAssembler::Name(Node* left, Node* right) { \
return AddNode( \
@@ -222,7 +230,7 @@ Node* GraphAssembler::TaggedEqual(Node* left, Node* right) {
Node* GraphAssembler::SmiSub(Node* left, Node* right) {
if (COMPRESS_POINTERS_BOOL) {
- return Int32Sub(left, right);
+ return BitcastWord32ToWord64(Int32Sub(left, right));
} else {
return IntSub(left, right);
}
@@ -482,6 +490,7 @@ class ArrayBufferViewAccessBuilder {
instance_type_(instance_type),
candidates_(std::move(candidates)) {
DCHECK_NOT_NULL(assembler_);
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
DCHECK(instance_type_ == JS_DATA_VIEW_TYPE ||
instance_type_ == JS_TYPED_ARRAY_TYPE);
}
@@ -494,6 +503,7 @@ class ArrayBufferViewAccessBuilder {
}
base::Optional<int> TryComputeStaticElementShift() {
+ DCHECK(instance_type_ != JS_RAB_GSAB_DATA_VIEW_TYPE);
if (instance_type_ == JS_DATA_VIEW_TYPE) return 0;
if (candidates_.empty()) return base::nullopt;
int shift = ElementsKindToShiftSize(*candidates_.begin());
@@ -506,6 +516,7 @@ class ArrayBufferViewAccessBuilder {
}
base::Optional<int> TryComputeStaticElementSize() {
+ DCHECK(instance_type_ != JS_RAB_GSAB_DATA_VIEW_TYPE);
if (instance_type_ == JS_DATA_VIEW_TYPE) return 1;
if (candidates_.empty()) return base::nullopt;
int size = ElementsKindToByteSize(*candidates_.begin());
@@ -579,8 +590,7 @@ class ArrayBufferViewAccessBuilder {
.Then([&]() { return unchecked_byte_length; })
.Else([&]() { return a.UintPtrConstant(0); })
.Value();
- return a.UintPtrDiv(byte_length,
- TNode<UintPtrT>::UncheckedCast(element_size));
+ return a.UintPtrDiv(byte_length, a.ChangeUint32ToUintPtr(element_size));
};
// 3) Length-tracking backed by RAB (JSArrayBuffer stores the length)
@@ -597,7 +607,7 @@ class ArrayBufferViewAccessBuilder {
.Then([&]() {
// length = floor((byte_length - byte_offset) / element_size)
return a.UintPtrDiv(a.UintPtrSub(byte_length, byte_offset),
- TNode<UintPtrT>::UncheckedCast(element_size));
+ a.ChangeUint32ToUintPtr(element_size));
})
.Else([&]() { return a.UintPtrConstant(0); })
.ExpectTrue()
@@ -618,7 +628,7 @@ class ArrayBufferViewAccessBuilder {
UseInfo::Word());
return a.UintPtrDiv(a.UintPtrSub(byte_length, byte_offset),
- TNode<UintPtrT>::UncheckedCast(element_size));
+ a.ChangeUint32ToUintPtr(element_size));
};
return a.MachineSelectIf<UintPtrT>(length_tracking_bit)
@@ -793,7 +803,7 @@ TNode<Number> JSGraphAssembler::TypedArrayLength(
TNode<Uint32T> JSGraphAssembler::LookupByteShiftForElementsKind(
TNode<Uint32T> elements_kind) {
- TNode<Uint32T> index = TNode<Uint32T>::UncheckedCast(Int32Sub(
+ TNode<UintPtrT> index = ChangeUint32ToUintPtr(Int32Sub(
elements_kind, Uint32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)));
TNode<RawPtrT> shift_table = TNode<RawPtrT>::UncheckedCast(ExternalConstant(
ExternalReference::
@@ -804,7 +814,7 @@ TNode<Uint32T> JSGraphAssembler::LookupByteShiftForElementsKind(
TNode<Uint32T> JSGraphAssembler::LookupByteSizeForElementsKind(
TNode<Uint32T> elements_kind) {
- TNode<Uint32T> index = TNode<Uint32T>::UncheckedCast(Int32Sub(
+ TNode<UintPtrT> index = ChangeUint32ToUintPtr(Int32Sub(
elements_kind, Uint32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)));
TNode<RawPtrT> size_table = TNode<RawPtrT>::UncheckedCast(ExternalConstant(
ExternalReference::
@@ -841,6 +851,12 @@ TNode<Object> JSGraphAssembler::JSCallRuntime2(Runtime::FunctionId function_id,
});
}
+Node* JSGraphAssembler::Chained(const Operator* op, Node* input) {
+ DCHECK_EQ(op->ValueInputCount(), 1);
+ return AddNode(
+ graph()->NewNode(common()->Chained(op), input, effect(), control()));
+}
+
Node* GraphAssembler::TypeGuard(Type type, Node* value) {
return AddNode(
graph()->NewNode(common()->TypeGuard(type), value, effect(), control()));
@@ -856,8 +872,7 @@ Node* GraphAssembler::DebugBreak() {
graph()->NewNode(machine()->DebugBreak(), effect(), control()));
}
-Node* GraphAssembler::Unreachable(
- GraphAssemblerLabel<0u>* block_updater_successor) {
+Node* GraphAssembler::Unreachable() {
Node* result = UnreachableWithoutConnectToEnd();
ConnectUnreachableToEnd();
InitializeEffectControl(nullptr, nullptr);
@@ -882,7 +897,7 @@ Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, int offset,
Node* value) {
- return Store(rep, object, Int32Constant(offset), value);
+ return Store(rep, object, IntPtrConstant(offset), value);
}
Node* GraphAssembler::Load(MachineType type, Node* object, Node* offset) {
@@ -891,7 +906,7 @@ Node* GraphAssembler::Load(MachineType type, Node* object, Node* offset) {
}
Node* GraphAssembler::Load(MachineType type, Node* object, int offset) {
- return Load(type, object, Int32Constant(offset));
+ return Load(type, object, IntPtrConstant(offset));
}
Node* GraphAssembler::StoreUnaligned(MachineRepresentation rep, Node* object,
@@ -927,6 +942,18 @@ Node* GraphAssembler::ProtectedLoad(MachineType type, Node* object,
offset, effect(), control()));
}
+Node* GraphAssembler::LoadTrapOnNull(MachineType type, Node* object,
+ Node* offset) {
+ return AddNode(graph()->NewNode(machine()->LoadTrapOnNull(type), object,
+ offset, effect(), control()));
+}
+
+Node* GraphAssembler::StoreTrapOnNull(StoreRepresentation rep, Node* object,
+ Node* offset, Node* value) {
+ return AddNode(graph()->NewNode(machine()->StoreTrapOnNull(rep), object,
+ offset, value, effect(), control()));
+}
+
Node* GraphAssembler::Retain(Node* buffer) {
return AddNode(graph()->NewNode(common()->Retain(), buffer, effect()));
}
@@ -936,6 +963,11 @@ Node* GraphAssembler::IntPtrAdd(Node* a, Node* b) {
machine()->Is64() ? machine()->Int64Add() : machine()->Int32Add(), a, b));
}
+Node* GraphAssembler::IntPtrSub(Node* a, Node* b) {
+ return AddNode(graph()->NewNode(
+ machine()->Is64() ? machine()->Int64Sub() : machine()->Int32Sub(), a, b));
+}
+
TNode<Number> JSGraphAssembler::PlainPrimitiveToNumber(TNode<Object> value) {
return AddNode<Number>(graph()->NewNode(
PlainPrimitiveToNumberOperator(), PlainPrimitiveToNumberBuiltinConstant(),
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 454852cee8..e474b0f6d1 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -79,7 +79,7 @@ class Reducer;
V(Float64Sub) \
V(Int32Add) \
V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
+ T(Int32LessThanOrEqual, BoolT, Int32T, Int32T) \
V(Int32Mul) \
V(Int32Sub) \
V(Int64Add) \
@@ -88,7 +88,7 @@ class Reducer;
V(IntLessThan) \
V(IntMul) \
V(IntSub) \
- V(Uint32LessThan) \
+ T(Uint32LessThan, BoolT, Uint32T, Uint32T) \
T(Uint32LessThanOrEqual, BoolT, Uint32T, Uint32T) \
V(Uint64LessThan) \
T(Uint64LessThanOrEqual, BoolT, Uint64T, Uint64T) \
@@ -134,29 +134,30 @@ class Reducer;
V(Uint64Div) \
V(Uint64Mod)
-#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
- V(AllocateInOldGenerationStub, Code) \
- V(AllocateInYoungGenerationStub, Code) \
- V(AllocateRegularInOldGenerationStub, Code) \
- V(AllocateRegularInYoungGenerationStub, Code) \
- V(BigIntMap, Map) \
- V(BooleanMap, Map) \
- V(EmptyString, String) \
- V(False, Boolean) \
- V(FixedArrayMap, Map) \
- V(FixedDoubleArrayMap, Map) \
- V(WeakFixedArrayMap, Map) \
- V(HeapNumberMap, Map) \
- V(MinusOne, Number) \
- V(NaN, Number) \
- V(NoContext, Object) \
- V(Null, Oddball) \
- V(One, Number) \
- V(TheHole, Oddball) \
- V(ToNumberBuiltin, Code) \
- V(PlainPrimitiveToNumberBuiltin, Code) \
- V(True, Boolean) \
- V(Undefined, Oddball) \
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+ V(AllocateInOldGenerationStub, InstructionStream) \
+ V(AllocateInYoungGenerationStub, InstructionStream) \
+ V(AllocateRegularInOldGenerationStub, InstructionStream) \
+ V(AllocateRegularInYoungGenerationStub, InstructionStream) \
+ V(BigIntMap, Map) \
+ V(BooleanMap, Map) \
+ V(EmptyString, String) \
+ V(ExternalObjectMap, Map) \
+ V(False, Boolean) \
+ V(FixedArrayMap, Map) \
+ V(FixedDoubleArrayMap, Map) \
+ V(WeakFixedArrayMap, Map) \
+ V(HeapNumberMap, Map) \
+ V(MinusOne, Number) \
+ V(NaN, Number) \
+ V(NoContext, Object) \
+ V(Null, Oddball) \
+ V(One, Number) \
+ V(TheHole, Oddball) \
+ V(ToNumberBuiltin, InstructionStream) \
+ V(PlainPrimitiveToNumberBuiltin, InstructionStream) \
+ V(True, Boolean) \
+ V(Undefined, Oddball) \
V(Zero, Number)
class GraphAssembler;
@@ -360,6 +361,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
TNode<UintPtrT> UintPtrAdd(TNode<UintPtrT> left, TNode<UintPtrT> right);
TNode<UintPtrT> UintPtrSub(TNode<UintPtrT> left, TNode<UintPtrT> right);
TNode<UintPtrT> UintPtrDiv(TNode<UintPtrT> left, TNode<UintPtrT> right);
+ TNode<UintPtrT> ChangeUint32ToUintPtr(SloppyTNode<Uint32T> value);
#ifdef V8_MAP_PACKING
Node* PackMapWord(TNode<Map> map);
@@ -369,14 +371,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* DebugBreak();
- // Unreachable nodes are similar to Goto in that they reset effect/control to
- // nullptr and it's thus not possible to append other nodes without first
- // binding a new label.
- // The block_updater_successor label is a crutch to work around block updater
- // weaknesses (see the related comment in ConnectUnreachableToEnd); if the
- // block updater exists, we cannot connect unreachable to end, instead we
- // must preserve the Goto pattern.
- Node* Unreachable(GraphAssemblerLabel<0u>* block_updater_successor = nullptr);
+ Node* Unreachable();
// This special variant doesn't connect the Unreachable node to end, and does
// not reset current effect/control. Intended only for special use-cases like
// lowering DeadValue.
@@ -419,9 +414,13 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* ProtectedStore(MachineRepresentation rep, Node* object, Node* offset,
Node* value);
Node* ProtectedLoad(MachineType type, Node* object, Node* offset);
+ Node* LoadTrapOnNull(MachineType type, Node* object, Node* offset);
+ Node* StoreTrapOnNull(StoreRepresentation rep, Node* object, Node* offset,
+ Node* value);
Node* Retain(Node* buffer);
Node* IntPtrAdd(Node* a, Node* b);
+ Node* IntPtrSub(Node* a, Node* b);
Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
Node* condition, Node* frame_state);
@@ -944,11 +943,13 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
// Constructs a JSGraphAssembler. If {schedule} is not null, the graph
// assembler will maintain the schedule as it updates blocks.
JSGraphAssembler(
- JSGraph* jsgraph, Zone* zone, BranchSemantics branch_semantics,
+ JSHeapBroker* broker, JSGraph* jsgraph, Zone* zone,
+ BranchSemantics branch_semantics,
base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
bool mark_loop_exits = false)
: GraphAssembler(jsgraph, zone, branch_semantics, node_changed_callback,
mark_loop_exits),
+ broker_(broker),
jsgraph_(jsgraph),
outermost_catch_scope_(CatchScope::Outermost(zone)),
catch_scope_(&outermost_catch_scope_) {
@@ -1055,7 +1056,9 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
TNode<Object> JSCallRuntime2(Runtime::FunctionId function_id,
TNode<Object> arg0, TNode<Object> arg1,
TNode<Context> context, FrameState frame_state);
+ Node* Chained(const Operator* op, Node* input);
+ JSHeapBroker* broker() const { return broker_; }
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return jsgraph()->isolate(); }
SimplifiedOperatorBuilder* simplified() override {
@@ -1377,6 +1380,7 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
Operator const* PlainPrimitiveToNumberOperator();
private:
+ JSHeapBroker* broker_;
JSGraph* jsgraph_;
SetOncePointer<Operator const> to_number_operator_;
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index a5f7ed1d15..0877945bb5 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -86,6 +86,7 @@ class AdvancedReducer : public Reducer {
// Replace {node} with {replacement}.
virtual void Replace(Node* node, Node* replacement) = 0;
+ virtual void Replace(Node* node, Node* replacement, NodeId max_id) = 0;
// Revisit the {node} again later.
virtual void Revisit(Node* node) = 0;
// Replace value uses of {node} with {value} and effect uses of {node} with
@@ -106,6 +107,9 @@ class AdvancedReducer : public Reducer {
DCHECK_NOT_NULL(editor_);
editor_->Replace(node, replacement);
}
+ void Replace(Node* node, Node* replacement, NodeId max_id) {
+ return editor_->Replace(node, replacement, max_id);
+ }
void Revisit(Node* node) {
DCHECK_NOT_NULL(editor_);
editor_->Revisit(node);
@@ -179,7 +183,7 @@ class V8_EXPORT_PRIVATE GraphReducer
// Replace all uses of {node} with {replacement} if the id of {replacement} is
// less than or equal to {max_id}. Otherwise, replace all uses of {node} whose
// id is less than or equal to {max_id} with the {replacement}.
- void Replace(Node* node, Node* replacement, NodeId max_id);
+ void Replace(Node* node, Node* replacement, NodeId max_id) final;
// Node stack operations.
void Pop();
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 2cd220eb86..88dfbcab1f 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -235,6 +235,11 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
const char* file_prefix = v8_flags.trace_turbo_file_prefix.value();
int optimization_id = info->IsOptimizing() ? info->optimization_id() : 0;
if (strlen(debug_name.get()) > 0) {
+ if (strcmp(debug_name.get(), "WasmJSFastApiCall") == 0) {
+ // Don't clobber one wrapper's output with another's.
+ static int fast_call_wrappers_count = 0;
+ optimization_id = ++fast_call_wrappers_count;
+ }
SNPrintF(filename, "%s-%s-%i", file_prefix, debug_name.get(),
optimization_id);
} else if (info->has_shared_info()) {
@@ -541,9 +546,8 @@ void GraphC1Visualizer::PrintCompilation(const OptimizedCompilationInfo* info) {
PrintStringProperty("name", name.get());
PrintStringProperty("method", "stub");
}
- PrintLongProperty(
- "date",
- static_cast<int64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis()));
+ PrintLongProperty("date",
+ V8::GetCurrentPlatform()->CurrentClockTimeMilliseconds());
}
@@ -770,6 +774,10 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
os_ << " \"" << DoubleRegister::from_code(op.register_code()) << "\"";
} else if (op.IsFloatRegister()) {
os_ << " \"" << FloatRegister::from_code(op.register_code()) << "\"";
+#if defined(V8_TARGET_ARCH_X64)
+ } else if (op.IsSimd256Register()) {
+ os_ << " \"" << Simd256Register::from_code(op.register_code()) << "\"";
+#endif
} else {
DCHECK(op.IsSimd128Register());
os_ << " \"" << Simd128Register::from_code(op.register_code()) << "\"";
@@ -1047,6 +1055,7 @@ std::ostream& operator<<(
const TopLevelLiveRangeAsJSON& top_level_live_range_json) {
int vreg = top_level_live_range_json.range_.vreg();
bool first = true;
+ int instruction_range[2] = {INT32_MAX, -1};
os << "\"" << (vreg > 0 ? vreg : -vreg) << "\":{ \"child_ranges\":[";
for (const LiveRange* child = &(top_level_live_range_json.range_);
child != nullptr; child = child->next()) {
@@ -1057,6 +1066,15 @@ std::ostream& operator<<(
os << ",";
}
os << LiveRangeAsJSON{*child, top_level_live_range_json.code_};
+ // Record the minimum and maximum positions observed within this
+ // TopLevelLiveRange
+ for (const UseInterval* interval = child->first_interval();
+ interval != nullptr; interval = interval->next()) {
+ if (interval->start().value() < instruction_range[0])
+ instruction_range[0] = interval->start().value();
+ if (interval->end().value() > instruction_range[1])
+ instruction_range[1] = interval->end().value();
+ }
}
}
os << "]";
@@ -1065,7 +1083,8 @@ std::ostream& operator<<(
<< (top_level_live_range_json.range_.IsDeferredFixed() ? "true"
: "false");
}
- os << "}";
+ os << ", \"instruction_range\": [" << instruction_range[0] << ","
+ << instruction_range[1] << "]}";
return os;
}
@@ -1224,6 +1243,10 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
os << DoubleRegister::from_code(allocated->register_code());
} else if (op->IsFloatRegister()) {
os << FloatRegister::from_code(allocated->register_code());
+#if defined(V8_TARGET_ARCH_X64)
+ } else if (op->IsSimd256Register()) {
+ os << Simd256Register::from_code(allocated->register_code());
+#endif
} else {
DCHECK(op->IsSimd128Register());
os << Simd128Register::from_code(allocated->register_code());
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index fa4aeb8031..3ef88e418b 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -20,7 +20,9 @@ Graph::Graph(Zone* zone)
end_(nullptr),
mark_max_(0),
next_node_id_(0),
- decorators_(zone) {
+ decorators_(zone),
+ has_simd_(false),
+ simd_stores_(zone) {
// Nodes use compressed pointers, so zone must support pointer compression.
// If the check fails, ensure the zone is created with kCompressGraphZone
// flag.
@@ -78,6 +80,10 @@ NodeId Graph::NextNodeId() {
void Graph::Print() const { StdoutStream{} << AsRPO(*this); }
+void Graph::RecordSimdStore(Node* store) { simd_stores_.push_back(store); }
+
+ZoneVector<Node*> const& Graph::GetSimdStoreNodes() { return simd_stores_; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index a51142d735..6851e67f53 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -95,6 +95,12 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
// Very simple print API usable in a debugger.
void Print() const;
+ bool HasSimd() const { return has_simd_; }
+ void SetSimd(bool has_simd) { has_simd_ = has_simd; }
+
+ void RecordSimdStore(Node* store);
+ ZoneVector<Node*> const& GetSimdStoreNodes();
+
private:
friend class NodeMarkerBase;
@@ -106,6 +112,8 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Mark mark_max_;
NodeId next_node_id_;
ZoneVector<GraphDecorator*> decorators_;
+ bool has_simd_;
+ ZoneVector<Node*> simd_stores_;
};
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index dac5cbfa26..058fa5fd4d 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -65,15 +65,7 @@ enum ObjectDataKind {
namespace {
-bool IsReadOnlyHeapObjectForCompiler(PtrComprCageBase cage_base,
- HeapObject object) {
- DisallowGarbageCollection no_gc;
- // TODO(jgruber): Remove this compiler-specific predicate and use the plain
- // heap predicate instead. This would involve removing the special cases for
- // builtins.
- return (object.IsCode(cage_base) && Code::cast(object).is_builtin()) ||
- ReadOnlyHeap::Contains(object);
-}
+bool Is64() { return kSystemPointerSize == 8; }
} // namespace
@@ -110,10 +102,9 @@ class ObjectData : public ZoneObject {
kind == kUnserializedReadOnlyHeapObject || kind == kSmi ||
kind == kNeverSerializedHeapObject ||
kind == kBackgroundSerializedHeapObject);
- CHECK_IMPLIES(
- kind == kUnserializedReadOnlyHeapObject,
- object->IsHeapObject() && IsReadOnlyHeapObjectForCompiler(
- isolate, HeapObject::cast(*object)));
+ CHECK_IMPLIES(kind == kUnserializedReadOnlyHeapObject,
+ object->IsHeapObject() &&
+ ReadOnlyHeap::Contains(HeapObject::cast(*object)));
}
#define DECLARE_IS(Name) bool Is##Name() const;
@@ -287,9 +278,10 @@ class JSObjectData : public JSReceiverData {
namespace {
-base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
- JSHeapBroker* broker, JSObjectRef holder, Representation representation,
- FieldIndex field_index) {
+OptionalObjectRef GetOwnFastDataPropertyFromHeap(JSHeapBroker* broker,
+ JSObjectRef holder,
+ Representation representation,
+ FieldIndex field_index) {
base::Optional<Object> constant;
{
DisallowGarbageCollection no_gc;
@@ -301,7 +293,7 @@ base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
// shrunk in size. It might end up at the edge of a heap boundary. If
// we see that the map is the same in this GC epoch, we are safe.
Map map = holder.object()->map(cage_base, kAcquireLoad);
- if (*holder.map().object() != map) {
+ if (*holder.map(broker).object() != map) {
TRACE_BROKER_MISSING(broker, "Map changed for " << holder);
return {};
}
@@ -365,8 +357,9 @@ base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
// Tries to get the property at {dict_index}. If we are within bounds of the
// object, we are guaranteed to see valid heap words even if the data is wrong.
-base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap(
- JSHeapBroker* broker, Handle<JSObject> receiver, InternalIndex dict_index) {
+OptionalObjectRef GetOwnDictionaryPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ InternalIndex dict_index) {
Handle<Object> constant;
{
DisallowGarbageCollection no_gc;
@@ -503,12 +496,19 @@ class BigIntData : public HeapObjectData {
BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object,
ObjectDataKind kind)
: HeapObjectData(broker, storage, object, kind),
- as_uint64_(object->AsUint64(nullptr)) {}
+ as_uint64_(object->AsUint64(nullptr)),
+ as_int64_(object->AsInt64(&lossless_)) {}
uint64_t AsUint64() const { return as_uint64_; }
+ int64_t AsInt64(bool* lossless) const {
+ *lossless = lossless_;
+ return as_int64_;
+ }
private:
const uint64_t as_uint64_;
+ const int64_t as_int64_;
+ bool lossless_;
};
struct PropertyDescriptor {
@@ -721,9 +721,9 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
return true;
}
-bool JSFunctionRef::IsConsistentWithHeapState() const {
- DCHECK(broker()->IsMainThread());
- return data()->AsJSFunction()->IsConsistentWithHeapState(broker());
+bool JSFunctionRef::IsConsistentWithHeapState(JSHeapBroker* broker) const {
+ DCHECK(broker->IsMainThread());
+ return data()->AsJSFunction()->IsConsistentWithHeapState(broker);
}
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -756,7 +756,7 @@ base::Optional<bool> HeapObjectData::TryGetBooleanValueImpl(
return false;
} else if (o.IsNullOrUndefined(isolate)) {
return false;
- } else if (MapRef{broker, map()}.is_undetectable()) {
+ } else if (MapRef{map()}.is_undetectable()) {
return false; // Undetectable object is false.
} else if (InstanceTypeChecker::IsString(t)) {
// TODO(jgruber): Implement in possible cases.
@@ -917,9 +917,7 @@ bool ObjectRef::equals(const ObjectRef& other) const {
return data_ == other.data_;
}
-Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
-
-ContextRef ContextRef::previous(size_t* depth) const {
+ContextRef ContextRef::previous(JSHeapBroker* broker, size_t* depth) const {
DCHECK_NOT_NULL(depth);
Context current = *object();
@@ -929,14 +927,14 @@ ContextRef ContextRef::previous(size_t* depth) const {
}
// The `previous` field is immutable after initialization and the
// context itself is read through an atomic load.
- return MakeRefAssumeMemoryFence(broker(), current);
+ return MakeRefAssumeMemoryFence(broker, current);
}
-base::Optional<ObjectRef> ContextRef::get(int index) const {
+OptionalObjectRef ContextRef::get(JSHeapBroker* broker, int index) const {
CHECK_LE(0, index);
// Length is immutable after initialization.
if (index >= object()->length(kRelaxedLoad)) return {};
- return TryMakeRef(broker(), object()->get(index));
+ return TryMakeRef(broker, object()->get(index));
}
void JSHeapBroker::InitializeAndStartSerializing() {
@@ -1005,7 +1003,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
return nullptr;
}
- if (IsReadOnlyHeapObjectForCompiler(isolate(), HeapObject::cast(*object))) {
+ if (ReadOnlyHeap::Contains(HeapObject::cast(*object))) {
entry = refs_->LookupOrInsert(object.address());
return zone()->New<ObjectData>(this, &entry->value, object,
kUnserializedReadOnlyHeapObject);
@@ -1034,27 +1032,11 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
Name##Ref ObjectRef::As##Name() const { \
DCHECK(Is##Name()); \
- return Name##Ref(broker(), data()); \
+ return Name##Ref(data()); \
}
HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
-bool ObjectRef::IsCodeT() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return IsCodeDataContainer();
-#else
- return IsCode();
-#endif
-}
-
-CodeTRef ObjectRef::AsCodeT() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return AsCodeDataContainer();
-#else
- return AsCode();
-#endif
-}
-
bool ObjectRef::IsSmi() const { return data()->is_smi(); }
int ObjectRef::AsSmi() const {
@@ -1076,9 +1058,8 @@ bool MapRef::CanInlineElementAccess() const {
if (has_indexed_interceptor()) return false;
ElementsKind kind = elements_kind();
if (IsFastElementsKind(kind)) return true;
- if (IsSharedArrayElementsKind(kind)) return true;
- if (IsTypedArrayElementsKind(kind) && kind != BIGUINT64_ELEMENTS &&
- kind != BIGINT64_ELEMENTS) {
+ if (IsTypedArrayElementsKind(kind) &&
+ (Is64() || (kind != BIGINT64_ELEMENTS && kind != BIGUINT64_ELEMENTS))) {
return true;
}
if (v8_flags.turbo_rab_gsab && IsRabGsabTypedArrayElementsKind(kind) &&
@@ -1089,53 +1070,54 @@ bool MapRef::CanInlineElementAccess() const {
return false;
}
-base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
+OptionalMapRef MapRef::AsElementsKind(JSHeapBroker* broker,
+ ElementsKind kind) const {
const ElementsKind current_kind = elements_kind();
if (kind == current_kind) return *this;
base::Optional<Map> maybe_result = Map::TryAsElementsKind(
- broker()->isolate(), object(), kind, ConcurrencyMode::kConcurrent);
+ broker->isolate(), object(), kind, ConcurrencyMode::kConcurrent);
#ifdef DEBUG
// If starting from an initial JSArray map, TryAsElementsKind must succeed
// and return the expected transitioned JSArray map.
- NativeContextRef native_context = broker()->target_native_context();
- if (equals(native_context.GetInitialJSArrayMap(current_kind))) {
- CHECK_EQ(Map::TryAsElementsKind(broker()->isolate(), object(), kind,
+ NativeContextRef native_context = broker->target_native_context();
+ if (equals(native_context.GetInitialJSArrayMap(broker, current_kind))) {
+ CHECK_EQ(Map::TryAsElementsKind(broker->isolate(), object(), kind,
ConcurrencyMode::kConcurrent)
.value(),
- *native_context.GetInitialJSArrayMap(kind).object());
+ *native_context.GetInitialJSArrayMap(broker, kind).object());
}
#endif // DEBUG
if (!maybe_result.has_value()) {
- TRACE_BROKER_MISSING(broker(), "MapRef::AsElementsKind " << *this);
+ TRACE_BROKER_MISSING(broker, "MapRef::AsElementsKind " << *this);
return {};
}
- return MakeRefAssumeMemoryFence(broker(), maybe_result.value());
+ return MakeRefAssumeMemoryFence(broker, maybe_result.value());
}
bool MapRef::HasOnlyStablePrototypesWithFastElements(
- ZoneVector<MapRef>* prototype_maps) {
+ JSHeapBroker* broker, ZoneVector<MapRef>* prototype_maps) {
DCHECK_NOT_NULL(prototype_maps);
- MapRef prototype_map = prototype().map();
- while (prototype_map.oddball_type() != OddballType::kNull) {
+ MapRef prototype_map = prototype(broker).map(broker);
+ while (prototype_map.oddball_type(broker) != OddballType::kNull) {
if (!prototype_map.IsJSObjectMap() || !prototype_map.is_stable() ||
!IsFastElementsKind(prototype_map.elements_kind())) {
return false;
}
prototype_maps->push_back(prototype_map);
- prototype_map = prototype_map.prototype().map();
+ prototype_map = prototype_map.prototype(broker).map(broker);
}
return true;
}
-bool MapRef::supports_fast_array_iteration() const {
- return SupportsFastArrayIteration(broker(), object());
+bool MapRef::supports_fast_array_iteration(JSHeapBroker* broker) const {
+ return SupportsFastArrayIteration(broker, object());
}
-bool MapRef::supports_fast_array_resize() const {
- return SupportsFastArrayResize(broker(), object());
+bool MapRef::supports_fast_array_resize(JSHeapBroker* broker) const {
+ return SupportsFastArrayResize(broker, object());
}
namespace {
@@ -1152,84 +1134,85 @@ void RecordConsistentJSFunctionViewDependencyIfNeeded(
} // namespace
-base::Optional<FeedbackVectorRef> JSFunctionRef::feedback_vector(
- CompilationDependencies* dependencies) const {
- return raw_feedback_cell(dependencies).feedback_vector();
+OptionalFeedbackVectorRef JSFunctionRef::feedback_vector(
+ JSHeapBroker* broker) const {
+ return raw_feedback_cell(broker).feedback_vector(broker);
}
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack(
- CompilationDependencies* dependencies) const {
+ JSHeapBroker* broker) const {
if (data_->should_access_heap()) {
- return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
+ return object()->ComputeInstanceSizeWithMinSlack(broker->isolate());
}
RecordConsistentJSFunctionViewDependencyIfNeeded(
- broker(), *this, data()->AsJSFunction(),
+ broker, *this, data()->AsJSFunction(),
JSFunctionData::kInitialMapInstanceSizeWithMinSlack);
return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
}
-OddballType MapRef::oddball_type() const {
+OddballType MapRef::oddball_type(JSHeapBroker* broker) const {
if (instance_type() != ODDBALL_TYPE) {
return OddballType::kNone;
}
- Factory* f = broker()->isolate()->factory();
- if (equals(MakeRef(broker(), f->undefined_map()))) {
+ if (equals(broker->undefined_map())) {
return OddballType::kUndefined;
}
- if (equals(MakeRef(broker(), f->null_map()))) {
+ if (equals(broker->null_map())) {
return OddballType::kNull;
}
- if (equals(MakeRef(broker(), f->boolean_map()))) {
+ if (equals(broker->boolean_map())) {
return OddballType::kBoolean;
}
- if (equals(MakeRef(broker(), f->the_hole_map()))) {
+ if (equals(broker->the_hole_map())) {
return OddballType::kHole;
}
- if (equals(MakeRef(broker(), f->uninitialized_map()))) {
+ if (equals(broker->uninitialized_map())) {
return OddballType::kUninitialized;
}
- DCHECK(equals(MakeRef(broker(), f->termination_exception_map())) ||
- equals(MakeRef(broker(), f->arguments_marker_map())) ||
- equals(MakeRef(broker(), f->optimized_out_map())) ||
- equals(MakeRef(broker(), f->stale_register_map())));
+ DCHECK(equals(broker->termination_exception_map()) ||
+ equals(broker->arguments_marker_map()) ||
+ equals(broker->optimized_out_map()) ||
+ equals(broker->stale_register_map()));
return OddballType::kOther;
}
-FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
- return MakeRefAssumeMemoryFence(broker(),
+FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(JSHeapBroker* broker,
+ int index) const {
+ return MakeRefAssumeMemoryFence(broker,
object()->closure_feedback_cell(index));
}
-base::Optional<ObjectRef> JSObjectRef::raw_properties_or_hash() const {
- return TryMakeRef(broker(), object()->raw_properties_or_hash());
+OptionalObjectRef JSObjectRef::raw_properties_or_hash(
+ JSHeapBroker* broker) const {
+ return TryMakeRef(broker, object()->raw_properties_or_hash());
}
-base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt(
- FieldIndex index) const {
+OptionalObjectRef JSObjectRef::RawInobjectPropertyAt(JSHeapBroker* broker,
+ FieldIndex index) const {
CHECK(index.is_inobject());
Handle<Object> value;
{
DisallowGarbageCollection no_gc;
- PtrComprCageBase cage_base = broker()->cage_base();
+ PtrComprCageBase cage_base = broker->cage_base();
Map current_map = object()->map(cage_base, kAcquireLoad);
// If the map changed in some prior GC epoch, our {index} could be
// outside the valid bounds of the cached map.
- if (*map().object() != current_map) {
- TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
+ if (*map(broker).object() != current_map) {
+ TRACE_BROKER_MISSING(broker, "Map change detected in " << *this);
return {};
}
base::Optional<Object> maybe_value =
object()->RawInobjectPropertyAt(cage_base, current_map, index);
if (!maybe_value.has_value()) {
- TRACE_BROKER_MISSING(broker(),
+ TRACE_BROKER_MISSING(broker,
"Unable to safely read property in " << *this);
return {};
}
- value = broker()->CanonicalPersistentHandle(maybe_value.value());
+ value = broker->CanonicalPersistentHandle(maybe_value.value());
}
- return TryMakeRef(broker(), value);
+ return TryMakeRef(broker, value);
}
bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) {
@@ -1248,51 +1231,52 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
}
PropertyDetails MapRef::GetPropertyDetails(
- InternalIndex descriptor_index) const {
+ JSHeapBroker* broker, InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetPropertyDetails(descriptor_index);
+ return instance_descriptors(broker).GetPropertyDetails(descriptor_index);
}
-NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
+NameRef MapRef::GetPropertyKey(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetPropertyKey(descriptor_index);
+ return instance_descriptors(broker).GetPropertyKey(broker, descriptor_index);
}
-bool MapRef::IsFixedCowArrayMap() const {
+bool MapRef::IsFixedCowArrayMap(JSHeapBroker* broker) const {
Handle<Map> fixed_cow_array_map =
- ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map_handle();
- return equals(MakeRef(broker(), fixed_cow_array_map));
+ ReadOnlyRoots(broker->isolate()).fixed_cow_array_map_handle();
+ return equals(MakeRef(broker, fixed_cow_array_map));
}
bool MapRef::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
}
-MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
+MapRef MapRef::FindFieldOwner(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
// TODO(solanes, v8:7790): Consider caching the result of the field owner on
// the descriptor array. It would be useful for same map as well as any
// other map sharing that descriptor array.
return MakeRefAssumeMemoryFence(
- broker(),
- object()->FindFieldOwner(broker()->isolate(), descriptor_index));
+ broker, object()->FindFieldOwner(broker->isolate(), descriptor_index));
}
-base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
- uint32_t index) const {
+OptionalObjectRef StringRef::GetCharAsStringOrUndefined(JSHeapBroker* broker,
+ uint32_t index) const {
String maybe_char;
auto result = ConcurrentLookupIterator::TryGetOwnChar(
- &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
+ &maybe_char, broker->isolate(), broker->local_isolate(), *object(),
index);
if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
- << *this << " at index " << index);
+ TRACE_BROKER_MISSING(broker, "StringRef::GetCharAsStringOrUndefined on "
+ << *this << " at index " << index);
return {};
}
DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
- return TryMakeRef(broker(), maybe_char);
+ return TryMakeRef(broker, maybe_char);
}
bool StringRef::SupportedStringKind() const {
@@ -1303,10 +1287,13 @@ bool StringRef::IsContentAccessible() const {
return data_->kind() != kNeverSerializedHeapObject || SupportedStringKind();
}
-base::Optional<Handle<String>> StringRef::ObjectIfContentAccessible() {
+// TODO(leszeks): The broker is only needed here for tracing, maybe we could get
+// it from a thread local instead.
+base::Optional<Handle<String>> StringRef::ObjectIfContentAccessible(
+ JSHeapBroker* broker) {
if (!IsContentAccessible()) {
TRACE_BROKER_MISSING(
- broker(),
+ broker,
"content for kNeverSerialized unsupported string kind " << *this);
return base::nullopt;
} else {
@@ -1316,18 +1303,21 @@ base::Optional<Handle<String>> StringRef::ObjectIfContentAccessible() {
int StringRef::length() const { return object()->length(kAcquireLoad); }
-base::Optional<uint16_t> StringRef::GetFirstChar() const { return GetChar(0); }
+base::Optional<uint16_t> StringRef::GetFirstChar(JSHeapBroker* broker) const {
+ return GetChar(broker, 0);
+}
-base::Optional<uint16_t> StringRef::GetChar(int index) const {
+base::Optional<uint16_t> StringRef::GetChar(JSHeapBroker* broker,
+ int index) const {
if (!IsContentAccessible()) {
TRACE_BROKER_MISSING(
- broker(),
+ broker,
"get char for kNeverSerialized unsupported string kind " << *this);
return base::nullopt;
}
- if (!broker()->IsMainThread()) {
- return object()->Get(index, broker()->local_isolate());
+ if (!broker->IsMainThread()) {
+ return object()->Get(index, broker->local_isolate());
} else {
// TODO(solanes, v8:7790): Remove this case once the inlining phase is
// done concurrently all the time.
@@ -1335,34 +1325,44 @@ base::Optional<uint16_t> StringRef::GetChar(int index) const {
}
}
-base::Optional<double> StringRef::ToNumber() {
+base::Optional<double> StringRef::ToNumber(JSHeapBroker* broker) {
if (!IsContentAccessible()) {
TRACE_BROKER_MISSING(
- broker(),
+ broker,
"number for kNeverSerialized unsupported string kind " << *this);
return base::nullopt;
}
- return TryStringToDouble(broker()->local_isolate(), object());
+ return TryStringToDouble(broker->local_isolate(), object());
+}
+
+base::Optional<double> StringRef::ToInt(JSHeapBroker* broker, int radix) {
+ if (!IsContentAccessible()) {
+ TRACE_BROKER_MISSING(
+ broker, "toInt for kNeverSerialized unsupported string kind " << *this);
+ return base::nullopt;
+ }
+
+ return TryStringToInt(broker->local_isolate(), object(), radix);
}
int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
return object()->constant_elements().length();
}
-base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
+OptionalObjectRef FixedArrayRef::TryGet(JSHeapBroker* broker, int i) const {
Handle<Object> value;
{
DisallowGarbageCollection no_gc;
CHECK_GE(i, 0);
- value = broker()->CanonicalPersistentHandle(object()->get(i, kAcquireLoad));
+ value = broker->CanonicalPersistentHandle(object()->get(i, kAcquireLoad));
if (i >= object()->length(kAcquireLoad)) {
// Right-trimming happened.
CHECK_LT(i, length());
return {};
}
}
- return TryMakeRef(broker(), value);
+ return TryMakeRef(broker, value);
}
Float64 FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(int i) const {
@@ -1372,8 +1372,9 @@ Float64 FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(int i) const {
return Float64::FromBits(object()->get_representation(i));
}
-Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
- return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
+Handle<ByteArray> BytecodeArrayRef::SourcePositionTable(
+ JSHeapBroker* broker) const {
+ return broker->CanonicalPersistentHandle(object()->SourcePositionTable());
}
Address BytecodeArrayRef::handler_table_address() const {
@@ -1390,17 +1391,17 @@ int BytecodeArrayRef::handler_table_size() const {
return object()->name(); \
}
-#define IF_ACCESS_FROM_HEAP(result, name) \
- if (data_->should_access_heap()) { \
- return MakeRef(broker(), result::cast(object()->name())); \
+#define IF_ACCESS_FROM_HEAP(result, name) \
+ if (data_->should_access_heap()) { \
+ return MakeRef(broker, result::cast(object()->name())); \
}
// Macros for definining a const getter that, depending on the data kind,
// either looks into the heap or into the serialized data.
-#define BIMODAL_ACCESSOR(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
+#define BIMODAL_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name(JSHeapBroker* broker) const { \
+ IF_ACCESS_FROM_HEAP(result, name); \
+ return result##Ref(ObjectRef::data()->As##holder()->name()); \
}
// Like above except that the result type is not an XYZRef.
@@ -1425,8 +1426,8 @@ int BytecodeArrayRef::handler_table_size() const {
return object()->name(); \
}
-ObjectRef AllocationSiteRef::nested_site() const {
- return MakeRefAssumeMemoryFence(broker(), object()->nested_site());
+ObjectRef AllocationSiteRef::nested_site(JSHeapBroker* broker) const {
+ return MakeRefAssumeMemoryFence(broker, object()->nested_site());
}
HEAP_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
@@ -1435,6 +1436,12 @@ HEAP_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
HEAP_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64)
+int64_t BigIntRef::AsInt64(bool* lossless) const {
+ if (data_->should_access_heap()) {
+ return object()->AsInt64(lossless);
+ }
+ return ObjectRef::data()->AsBigInt()->AsInt64(lossless);
+}
int BytecodeArrayRef::register_count() const {
return object()->register_count();
@@ -1455,19 +1462,20 @@ uint64_t HeapNumberRef::value_as_bits() const {
return object()->value_as_bits(kRelaxedLoad);
}
-JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
+JSReceiverRef JSBoundFunctionRef::bound_target_function(
+ JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
+ return MakeRefAssumeMemoryFence(broker, object()->bound_target_function());
}
-ObjectRef JSBoundFunctionRef::bound_this() const {
+ObjectRef JSBoundFunctionRef::bound_this(JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
+ return MakeRefAssumeMemoryFence(broker, object()->bound_this());
}
-FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
+FixedArrayRef JSBoundFunctionRef::bound_arguments(JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
+ return MakeRefAssumeMemoryFence(broker, object()->bound_arguments());
}
// Immutable after initialization.
@@ -1501,26 +1509,29 @@ int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-FixedArrayRef RegExpBoilerplateDescriptionRef::data() const {
+FixedArrayRef RegExpBoilerplateDescriptionRef::data(
+ JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->data());
+ return MakeRefAssumeMemoryFence(broker, object()->data());
}
-StringRef RegExpBoilerplateDescriptionRef::source() const {
+StringRef RegExpBoilerplateDescriptionRef::source(JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->source());
+ return MakeRefAssumeMemoryFence(broker, object()->source());
}
int RegExpBoilerplateDescriptionRef::flags() const { return object()->flags(); }
-base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
+OptionalCallHandlerInfoRef FunctionTemplateInfoRef::call_code(
+ JSHeapBroker* broker) const {
HeapObject call_code = object()->call_code(kAcquireLoad);
if (call_code.IsUndefined()) return base::nullopt;
- return TryMakeRef(broker(), CallHandlerInfo::cast(call_code));
+ return TryMakeRef(broker, CallHandlerInfo::cast(call_code));
}
-bool FunctionTemplateInfoRef::is_signature_undefined() const {
- return object()->signature().IsUndefined(broker()->isolate());
+bool FunctionTemplateInfoRef::is_signature_undefined(
+ JSHeapBroker* broker) const {
+ return object()->signature().IsUndefined(broker->isolate());
}
HEAP_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
@@ -1530,7 +1541,7 @@ HEAP_ACCESSOR_C(FunctionTemplateInfo, int16_t,
allowed_receiver_instance_type_range_end)
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
- MapRef receiver_map) {
+ JSHeapBroker* broker, MapRef receiver_map) {
const HolderLookupResult not_found;
if (!receiver_map.IsJSObjectMap() || (receiver_map.is_access_check_needed() &&
!object()->accept_any_receiver())) {
@@ -1544,7 +1555,7 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (signature.IsUndefined()) {
return HolderLookupResult(CallOptimization::kHolderIsReceiver);
}
- expected_receiver_type = broker()->CanonicalPersistentHandle(
+ expected_receiver_type = broker->CanonicalPersistentHandle(
FunctionTemplateInfo::cast(signature));
if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) {
return HolderLookupResult(CallOptimization::kHolderIsReceiver);
@@ -1552,7 +1563,7 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- HeapObjectRef prototype = receiver_map.prototype();
+ HeapObjectRef prototype = receiver_map.prototype(broker);
if (prototype.IsNull()) return not_found;
if (!expected_receiver_type->IsTemplateFor(prototype.object()->map())) {
return not_found;
@@ -1561,8 +1572,8 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
prototype.AsJSObject());
}
-ObjectRef CallHandlerInfoRef::data() const {
- return MakeRefAssumeMemoryFence(broker(), object()->data());
+ObjectRef CallHandlerInfoRef::data(JSHeapBroker* broker) const {
+ return MakeRefAssumeMemoryFence(broker, object()->data());
}
HEAP_ACCESSOR_C(ScopeInfo, int, ContextLength)
@@ -1570,21 +1581,22 @@ HEAP_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
HEAP_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
HEAP_ACCESSOR_C(ScopeInfo, bool, ClassScopeHasPrivateBrand)
-ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
- return MakeRefAssumeMemoryFence(broker(), object()->OuterScopeInfo());
+ScopeInfoRef ScopeInfoRef::OuterScopeInfo(JSHeapBroker* broker) const {
+ return MakeRefAssumeMemoryFence(broker, object()->OuterScopeInfo());
}
HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id)
-BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
+BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray(
+ JSHeapBroker* broker) const {
CHECK(HasBytecodeArray());
BytecodeArray bytecode_array;
- if (!broker()->IsMainThread()) {
- bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
+ if (!broker->IsMainThread()) {
+ bytecode_array = object()->GetBytecodeArray(broker->local_isolate());
} else {
- bytecode_array = object()->GetBytecodeArray(broker()->isolate());
+ bytecode_array = object()->GetBytecodeArray(broker->isolate());
}
- return MakeRefAssumeMemoryFence(broker(), bytecode_array);
+ return MakeRefAssumeMemoryFence(broker, bytecode_array);
}
#define DEF_SFI_ACCESSOR(type, name) \
@@ -1592,49 +1604,48 @@ BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
#undef DEF_SFI_ACCESSOR
-SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
- const {
- return broker()->IsMainThread()
- ? object()->GetInlineability(broker()->isolate())
- : object()->GetInlineability(broker()->local_isolate());
+SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability(
+ JSHeapBroker* broker) const {
+ return broker->IsMainThread()
+ ? object()->GetInlineability(broker->isolate())
+ : object()->GetInlineability(broker->local_isolate());
}
-ObjectRef FeedbackCellRef::value() const {
+ObjectRef FeedbackCellRef::value(JSHeapBroker* broker) const {
DCHECK(data_->should_access_heap());
- return MakeRefAssumeMemoryFence(broker(), object()->value(kAcquireLoad));
+ return MakeRefAssumeMemoryFence(broker, object()->value(kAcquireLoad));
}
-base::Optional<ObjectRef> MapRef::GetStrongValue(
- InternalIndex descriptor_index) const {
+OptionalObjectRef MapRef::GetStrongValue(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetStrongValue(descriptor_index);
+ return instance_descriptors(broker).GetStrongValue(broker, descriptor_index);
}
-DescriptorArrayRef MapRef::instance_descriptors() const {
+DescriptorArrayRef MapRef::instance_descriptors(JSHeapBroker* broker) const {
return MakeRefAssumeMemoryFence(
- broker(),
- object()->instance_descriptors(broker()->isolate(), kAcquireLoad));
+ broker, object()->instance_descriptors(broker->isolate(), kAcquireLoad));
}
-HeapObjectRef MapRef::prototype() const {
- return MakeRefAssumeMemoryFence(broker(),
+HeapObjectRef MapRef::prototype(JSHeapBroker* broker) const {
+ return MakeRefAssumeMemoryFence(broker,
HeapObject::cast(object()->prototype()));
}
-MapRef MapRef::FindRootMap() const {
+MapRef MapRef::FindRootMap(JSHeapBroker* broker) const {
// TODO(solanes, v8:7790): Consider caching the result of the root map.
- return MakeRefAssumeMemoryFence(broker(),
- object()->FindRootMap(broker()->isolate()));
+ return MakeRefAssumeMemoryFence(broker,
+ object()->FindRootMap(broker->isolate()));
}
-ObjectRef MapRef::GetConstructor() const {
+ObjectRef MapRef::GetConstructor(JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
+ return MakeRefAssumeMemoryFence(broker, object()->GetConstructor());
}
-HeapObjectRef MapRef::GetBackPointer() const {
+HeapObjectRef MapRef::GetBackPointer(JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(),
+ return MakeRefAssumeMemoryFence(broker,
HeapObject::cast(object()->GetBackPointer()));
}
@@ -1649,10 +1660,10 @@ size_t JSTypedArrayRef::length() const {
return object()->length();
}
-HeapObjectRef JSTypedArrayRef::buffer() const {
+HeapObjectRef JSTypedArrayRef::buffer(JSHeapBroker* broker) const {
CHECK(!is_on_heap());
// Immutable after initialization.
- return MakeRef<HeapObject>(broker(), object()->buffer());
+ return MakeRef<HeapObject>(broker, object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
@@ -1697,74 +1708,78 @@ bool StringRef::IsExternalString() const {
Address CallHandlerInfoRef::callback() const { return object()->callback(); }
-ZoneVector<Address> FunctionTemplateInfoRef::c_functions() const {
+ZoneVector<Address> FunctionTemplateInfoRef::c_functions(
+ JSHeapBroker* broker) const {
return GetCFunctions(FixedArray::cast(object()->GetCFunctionOverloads()),
- broker()->zone());
+ broker->zone());
}
-ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
+ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures(
+ JSHeapBroker* broker) const {
return GetCSignatures(FixedArray::cast(object()->GetCFunctionOverloads()),
- broker()->zone());
+ broker->zone());
}
bool StringRef::IsSeqString() const { return object()->IsSeqString(); }
-ScopeInfoRef ContextRef::scope_info() const {
+ScopeInfoRef ContextRef::scope_info(JSHeapBroker* broker) const {
// The scope_info is immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->scope_info());
+ return MakeRefAssumeMemoryFence(broker, object()->scope_info());
}
-MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
+MapRef NativeContextRef::GetFunctionMapFromIndex(JSHeapBroker* broker,
+ int index) const {
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
CHECK_LT(index, object()->length());
return MakeRefAssumeMemoryFence(
- broker(), Map::cast(object()->get(index, kAcquireLoad)));
+ broker, Map::cast(object()->get(index, kAcquireLoad)));
}
-MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
+MapRef NativeContextRef::GetInitialJSArrayMap(JSHeapBroker* broker,
+ ElementsKind kind) const {
switch (kind) {
case PACKED_SMI_ELEMENTS:
- return js_array_packed_smi_elements_map();
+ return js_array_packed_smi_elements_map(broker);
case HOLEY_SMI_ELEMENTS:
- return js_array_holey_smi_elements_map();
+ return js_array_holey_smi_elements_map(broker);
case PACKED_DOUBLE_ELEMENTS:
- return js_array_packed_double_elements_map();
+ return js_array_packed_double_elements_map(broker);
case HOLEY_DOUBLE_ELEMENTS:
- return js_array_holey_double_elements_map();
+ return js_array_holey_double_elements_map(broker);
case PACKED_ELEMENTS:
- return js_array_packed_elements_map();
+ return js_array_packed_elements_map(broker);
case HOLEY_ELEMENTS:
- return js_array_holey_elements_map();
+ return js_array_holey_elements_map(broker);
default:
UNREACHABLE();
}
}
-#define DEF_NATIVE_CONTEXT_ACCESSOR(ResultType, Name) \
- ResultType##Ref NativeContextRef::Name() const { \
- return MakeRefAssumeMemoryFence( \
- broker(), ResultType::cast(object()->Name(kAcquireLoad))); \
+#define DEF_NATIVE_CONTEXT_ACCESSOR(ResultType, Name) \
+ ResultType##Ref NativeContextRef::Name(JSHeapBroker* broker) const { \
+ return MakeRefAssumeMemoryFence( \
+ broker, ResultType::cast(object()->Name(kAcquireLoad))); \
}
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
#undef DEF_NATIVE_CONTEXT_ACCESSOR
-base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
- const MapRef& map) const {
+OptionalJSFunctionRef NativeContextRef::GetConstructorFunction(
+ JSHeapBroker* broker, const MapRef& map) const {
CHECK(map.IsPrimitiveMap());
switch (map.constructor_function_index()) {
case Map::kNoConstructorFunctionIndex:
return base::nullopt;
case Context::BIGINT_FUNCTION_INDEX:
- return bigint_function();
+ return bigint_function(broker);
case Context::BOOLEAN_FUNCTION_INDEX:
- return boolean_function();
+ return boolean_function(broker);
case Context::NUMBER_FUNCTION_INDEX:
- return number_function();
+ return number_function(broker);
case Context::STRING_FUNCTION_INDEX:
- return string_function();
+ return string_function(broker);
case Context::SYMBOL_FUNCTION_INDEX:
- return symbol_function();
+ return symbol_function(broker);
default:
UNREACHABLE();
}
@@ -1772,32 +1787,31 @@ base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
bool ObjectRef::IsNull() const { return object()->IsNull(); }
-bool ObjectRef::IsNullOrUndefined() const {
+bool ObjectRef::IsNullOrUndefined(JSHeapBroker* broker) const {
if (IsSmi()) return false;
- OddballType type = AsHeapObject().map().oddball_type();
+ OddballType type = AsHeapObject().map(broker).oddball_type(broker);
return type == OddballType::kNull || type == OddballType::kUndefined;
}
-bool ObjectRef::IsTheHole() const {
+bool ObjectRef::IsTheHole(JSHeapBroker* broker) const {
return IsHeapObject() &&
- AsHeapObject().map().oddball_type() == OddballType::kHole;
+ AsHeapObject().map(broker).oddball_type(broker) == OddballType::kHole;
}
-base::Optional<bool> ObjectRef::TryGetBooleanValue() const {
+base::Optional<bool> ObjectRef::TryGetBooleanValue(JSHeapBroker* broker) const {
if (data_->should_access_heap()) {
- return object()->BooleanValue(broker()->isolate());
+ return object()->BooleanValue(broker->isolate());
}
if (IsSmi()) return AsSmi() != 0;
- return data()->AsHeapObject()->TryGetBooleanValue(broker());
+ return data()->AsHeapObject()->TryGetBooleanValue(broker);
}
-Maybe<double> ObjectRef::OddballToNumber() const {
- OddballType type = AsHeapObject().map().oddball_type();
+Maybe<double> ObjectRef::OddballToNumber(JSHeapBroker* broker) const {
+ OddballType type = AsHeapObject().map(broker).oddball_type(broker);
switch (type) {
case OddballType::kBoolean: {
- ObjectRef true_ref = MakeRef<Object>(
- broker(), broker()->isolate()->factory()->true_value());
+ ObjectRef true_ref = broker->true_value();
return this->equals(true_ref) ? Just(1.0) : Just(0.0);
}
case OddballType::kUndefined: {
@@ -1816,15 +1830,14 @@ bool ObjectRef::should_access_heap() const {
return data()->should_access_heap();
}
-base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
- const FixedArrayBaseRef& elements_ref, uint32_t index,
+OptionalObjectRef JSObjectRef::GetOwnConstantElement(
+ JSHeapBroker* broker, const FixedArrayBaseRef& elements_ref, uint32_t index,
CompilationDependencies* dependencies) const {
base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
- *elements_ref.object(), map().elements_kind(), index);
+ broker, *elements_ref.object(), map(broker).elements_kind(), index);
if (!maybe_element.has_value()) return {};
- base::Optional<ObjectRef> result =
- TryMakeRef(broker(), maybe_element.value());
+ OptionalObjectRef result = TryMakeRef(broker, maybe_element.value());
if (result.has_value()) {
dependencies->DependOnOwnConstantElement(*this, index, *result);
}
@@ -1832,7 +1845,8 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
}
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
- FixedArrayBase elements, ElementsKind elements_kind, uint32_t index) const {
+ JSHeapBroker* broker, FixedArrayBase elements, ElementsKind elements_kind,
+ uint32_t index) const {
DCHECK_LE(index, JSObject::kMaxElementIndex);
Handle<JSObject> holder = object();
@@ -1849,7 +1863,7 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
// of `length` below.
if (holder->IsJSArray()) {
Object array_length_obj =
- JSArray::cast(*holder).length(broker()->isolate(), kRelaxedLoad);
+ JSArray::cast(*holder).length(broker->isolate(), kRelaxedLoad);
if (!array_length_obj.IsSmi()) {
// Can't safely read into HeapNumber objects without atomic semantics
// (relaxed would be sufficient due to the guarantees above).
@@ -1865,12 +1879,12 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
Object maybe_element;
auto result = ConcurrentLookupIterator::TryGetOwnConstantElement(
- &maybe_element, broker()->isolate(), broker()->local_isolate(), *holder,
+ &maybe_element, broker->isolate(), broker->local_isolate(), *holder,
elements, elements_kind, index);
if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "JSObject::GetOwnConstantElement on "
- << *this << " at index " << index);
+ TRACE_BROKER_MISSING(broker, "JSObject::GetOwnConstantElement on "
+ << *this << " at index " << index);
return {};
} else if (result == ConcurrentLookupIterator::kNotPresent) {
return {};
@@ -1880,44 +1894,45 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
return maybe_element;
}
-base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
- Representation field_representation, FieldIndex index,
+OptionalObjectRef JSObjectRef::GetOwnFastDataProperty(
+ JSHeapBroker* broker, Representation field_representation, FieldIndex index,
CompilationDependencies* dependencies) const {
- base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
- broker(), *this, field_representation, index);
+ OptionalObjectRef result = GetOwnFastDataPropertyFromHeap(
+ broker, *this, field_representation, index);
if (result.has_value()) {
dependencies->DependOnOwnConstantDataProperty(
- *this, map(), field_representation, index, *result);
+ *this, map(broker), field_representation, index, *result);
}
return result;
}
-base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies) const {
+OptionalObjectRef JSObjectRef::GetOwnDictionaryProperty(
+ JSHeapBroker* broker, InternalIndex index,
+ CompilationDependencies* dependencies) const {
CHECK(index.is_found());
- base::Optional<ObjectRef> result =
- GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
+ OptionalObjectRef result =
+ GetOwnDictionaryPropertyFromHeap(broker, object(), index);
if (result.has_value()) {
dependencies->DependOnOwnConstantDictionaryProperty(*this, index, *result);
}
return result;
}
-ObjectRef JSArrayRef::GetBoilerplateLength() const {
+ObjectRef JSArrayRef::GetBoilerplateLength(JSHeapBroker* broker) const {
// Safe to read concurrently because:
// - boilerplates are immutable after initialization.
// - boilerplates are published into the feedback vector.
// These facts also mean we can expect a valid value.
- return length_unsafe().value();
+ return length_unsafe(broker).value();
}
-base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
- return TryMakeRef(broker(),
- object()->length(broker()->isolate(), kRelaxedLoad));
+OptionalObjectRef JSArrayRef::length_unsafe(JSHeapBroker* broker) const {
+ return TryMakeRef(broker, object()->length(broker->isolate(), kRelaxedLoad));
}
-base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index) const {
+OptionalObjectRef JSArrayRef::GetOwnCowElement(JSHeapBroker* broker,
+ FixedArrayBaseRef elements_ref,
+ uint32_t index) const {
// Note: we'd like to check `elements_ref == elements()` here, but due to
// concurrency this may not hold. The code below must be able to deal with
// concurrent `elements` modifications.
@@ -1926,19 +1941,19 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
// `elements_ref`. The caller has to guarantee consistency at runtime by
// other means (e.g. through a runtime equality check or a compilation
// dependency).
- ElementsKind elements_kind = map().elements_kind();
+ ElementsKind elements_kind = map(broker).elements_kind();
// We only inspect fixed COW arrays, which may only occur for fast
// smi/objects elements kinds.
if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
DCHECK(IsFastElementsKind(elements_kind));
- if (!elements_ref.map().IsFixedCowArrayMap()) return {};
+ if (!elements_ref.map(broker).IsFixedCowArrayMap(broker)) return {};
// As the name says, the `length` read here is unsafe and may not match
// `elements`. We rely on the invariant that any `length` change will
// also result in an `elements` change to make this safe. The `elements`
// consistency check in the caller thus also guards the value of `length`.
- base::Optional<ObjectRef> length_ref = length_unsafe();
+ OptionalObjectRef length_ref = length_unsafe(broker);
if (!length_ref.has_value()) return {};
@@ -1946,24 +1961,25 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
if (!length_ref->IsSmi()) return {};
base::Optional<Object> result = ConcurrentLookupIterator::TryGetOwnCowElement(
- broker()->isolate(), *elements_ref.AsFixedArray().object(), elements_kind,
+ broker->isolate(), *elements_ref.AsFixedArray().object(), elements_kind,
length_ref->AsSmi(), index);
if (!result.has_value()) return {};
- return TryMakeRef(broker(), result.value());
+ return TryMakeRef(broker, result.value());
}
-base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
- return TryMakeRef(broker(), object()->GetCell(cell_index));
+OptionalCellRef SourceTextModuleRef::GetCell(JSHeapBroker* broker,
+ int cell_index) const {
+ return TryMakeRef(broker, object()->GetCell(cell_index));
}
-base::Optional<ObjectRef> SourceTextModuleRef::import_meta() const {
- return TryMakeRef(broker(), object()->import_meta(kAcquireLoad));
+OptionalObjectRef SourceTextModuleRef::import_meta(JSHeapBroker* broker) const {
+ return TryMakeRef(broker, object()->import_meta(kAcquireLoad));
}
-base::Optional<MapRef> HeapObjectRef::map_direct_read() const {
- PtrComprCageBase cage_base = broker()->cage_base();
- return TryMakeRef(broker(), object()->map(cage_base, kAcquireLoad),
+OptionalMapRef HeapObjectRef::map_direct_read(JSHeapBroker* broker) const {
+ PtrComprCageBase cage_base = broker->cage_base();
+ return TryMakeRef(broker, object()->map(cage_base, kAcquireLoad),
kAssumeMemoryFence);
}
@@ -1997,30 +2013,31 @@ OddballType GetOddballType(Isolate* isolate, Map map) {
} // namespace
-HeapObjectType HeapObjectRef::GetHeapObjectType() const {
+HeapObjectType HeapObjectRef::GetHeapObjectType(JSHeapBroker* broker) const {
if (data_->should_access_heap()) {
- Map map = Handle<HeapObject>::cast(object())->map(broker()->cage_base());
+ Map map = Handle<HeapObject>::cast(object())->map(broker->cage_base());
HeapObjectType::Flags flags(0);
if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
if (map.is_callable()) flags |= HeapObjectType::kCallable;
return HeapObjectType(map.instance_type(), flags,
- GetOddballType(broker()->isolate(), map));
+ GetOddballType(broker->isolate(), map));
}
HeapObjectType::Flags flags(0);
- if (map().is_undetectable()) flags |= HeapObjectType::kUndetectable;
- if (map().is_callable()) flags |= HeapObjectType::kCallable;
- return HeapObjectType(map().instance_type(), flags, map().oddball_type());
+ if (map(broker).is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map(broker).is_callable()) flags |= HeapObjectType::kCallable;
+ return HeapObjectType(map(broker).instance_type(), flags,
+ map(broker).oddball_type(broker));
}
-base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
+OptionalJSObjectRef AllocationSiteRef::boilerplate(JSHeapBroker* broker) const {
if (!PointsToLiteral()) return {};
DCHECK(data_->should_access_heap());
- return TryMakeRef(broker(), object()->boilerplate(kAcquireLoad));
+ return TryMakeRef(broker, object()->boilerplate(kAcquireLoad));
}
-base::Optional<FixedArrayBaseRef> JSObjectRef::elements(
- RelaxedLoadTag tag) const {
- return TryMakeRef(broker(), object()->elements(tag));
+OptionalFixedArrayBaseRef JSObjectRef::elements(JSHeapBroker* broker,
+ RelaxedLoadTag tag) const {
+ return TryMakeRef(broker, object()->elements(tag));
}
int FixedArrayBaseRef::length() const {
@@ -2034,14 +2051,14 @@ PropertyDetails DescriptorArrayRef::GetPropertyDetails(
}
NameRef DescriptorArrayRef::GetPropertyKey(
- InternalIndex descriptor_index) const {
- NameRef result = MakeRef(broker(), object()->GetKey(descriptor_index));
+ JSHeapBroker* broker, InternalIndex descriptor_index) const {
+ NameRef result = MakeRef(broker, object()->GetKey(descriptor_index));
CHECK(result.IsUniqueName());
return result;
}
-base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
- InternalIndex descriptor_index) const {
+OptionalObjectRef DescriptorArrayRef::GetStrongValue(
+ JSHeapBroker* broker, InternalIndex descriptor_index) const {
HeapObject heap_object;
if (!object()
->GetValue(descriptor_index)
@@ -2051,25 +2068,27 @@ base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
// Since the descriptors in the descriptor array can be changed in-place
// via DescriptorArray::Replace, we might get a value that we haven't seen
// before.
- return TryMakeRef(broker(), heap_object);
+ return TryMakeRef(broker, heap_object);
}
-base::Optional<FeedbackVectorRef> FeedbackCellRef::feedback_vector() const {
- ObjectRef contents = value();
+OptionalFeedbackVectorRef FeedbackCellRef::feedback_vector(
+ JSHeapBroker* broker) const {
+ ObjectRef contents = value(broker);
if (!contents.IsFeedbackVector()) return {};
return contents.AsFeedbackVector();
}
-base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
- const {
- base::Optional<FeedbackVectorRef> vector = feedback_vector();
+OptionalSharedFunctionInfoRef FeedbackCellRef::shared_function_info(
+ JSHeapBroker* broker) const {
+ OptionalFeedbackVectorRef vector = feedback_vector(broker);
if (!vector.has_value()) return {};
- return vector->shared_function_info();
+ return vector->shared_function_info(broker);
}
-SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
+SharedFunctionInfoRef FeedbackVectorRef::shared_function_info(
+ JSHeapBroker* broker) const {
// Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->shared_function_info());
+ return MakeRefAssumeMemoryFence(broker, object()->shared_function_info());
}
bool NameRef::IsUniqueName() const {
@@ -2096,60 +2115,38 @@ Handle<Object> ObjectRef::object() const {
HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
-JSHeapBroker* ObjectRef::broker() const { return broker_; }
-
ObjectData* ObjectRef::data() const {
- switch (broker()->mode()) {
+#ifdef DEBUG
+ switch (JSHeapBroker::Current()->mode()) {
case JSHeapBroker::kDisabled:
- return data_;
+ break;
case JSHeapBroker::kSerializing:
CHECK_NE(data_->kind(), kUnserializedHeapObject);
- return data_;
+ break;
case JSHeapBroker::kSerialized:
case JSHeapBroker::kRetired:
CHECK_NE(data_->kind(), kUnserializedHeapObject);
- return data_;
+ break;
}
-}
-
-template <class T>
-typename TinyRef<T>::RefType TinyRef<T>::AsRef(JSHeapBroker* broker) const {
- if (data_->kind() == kUnserializedHeapObject &&
- broker->mode() != JSHeapBroker::kDisabled) {
- // Gotta reconstruct to avoid returning a stale unserialized ref.
- return MakeRefAssumeMemoryFence<T>(broker,
- Handle<T>::cast(data_->object()));
- }
- return TryMakeRef<T>(broker, data_).value();
-}
+#endif
-template <class T>
-Handle<T> TinyRef<T>::object() const {
- return Handle<T>::cast(data_->object());
+ return data_;
}
-#define V(Name) \
- template class TinyRef<Name>; \
- /* TinyRef should contain only one pointer. */ \
- static_assert(sizeof(TinyRef<Name>) == kSystemPointerSize);
-HEAP_BROKER_OBJECT_LIST(V)
-#undef V
-
-#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
- Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \
- const { \
- IF_ACCESS_FROM_HEAP(Result, Name); \
- RecordConsistentJSFunctionViewDependencyIfNeeded( \
- broker(), *this, data()->AsJSFunction(), UsedField); \
- return Result##Ref(broker(), data()->AsJSFunction()->Name()); \
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
+ Result##Ref JSFunctionRef::Name(JSHeapBroker* broker) const { \
+ IF_ACCESS_FROM_HEAP(Result, Name); \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker, *this, data()->AsJSFunction(), UsedField); \
+ return Result##Ref(data()->AsJSFunction()->Name()); \
}
-#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(Result, Name, UsedField) \
- Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
- IF_ACCESS_FROM_HEAP_C(Name); \
- RecordConsistentJSFunctionViewDependencyIfNeeded( \
- broker(), *this, data()->AsJSFunction(), UsedField); \
- return data()->AsJSFunction()->Name(); \
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(Result, Name, UsedField) \
+ Result JSFunctionRef::Name(JSHeapBroker* broker) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker, *this, data()->AsJSFunction(), UsedField); \
+ return data()->AsJSFunction()->Name(); \
}
// Like JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C but only depend on the
@@ -2157,16 +2154,16 @@ HEAP_BROKER_OBJECT_LIST(V)
// tolerate certain state changes during compilation, e.g. from "has no feedback
// vector" (in which case we would simply do less optimization) to "has feedback
// vector".
-#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C( \
- Result, Name, UsedField, RelevantValue) \
- Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
- IF_ACCESS_FROM_HEAP_C(Name); \
- Result const result = data()->AsJSFunction()->Name(); \
- if (result == RelevantValue) { \
- RecordConsistentJSFunctionViewDependencyIfNeeded( \
- broker(), *this, data()->AsJSFunction(), UsedField); \
- } \
- return result; \
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C( \
+ Result, Name, UsedField, RelevantValue) \
+ Result JSFunctionRef::Name(JSHeapBroker* broker) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ Result const result = data()->AsJSFunction()->Name(); \
+ if (result == RelevantValue) { \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker, *this, data()->AsJSFunction(), UsedField); \
+ } \
+ return result; \
}
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(bool, has_initial_map,
@@ -2191,21 +2188,20 @@ BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C
-CodeTRef JSFunctionRef::code() const {
- CodeT code = object()->code(kAcquireLoad);
- return MakeRefAssumeMemoryFence(broker(), code);
+OptionalCodeRef JSFunctionRef::code(JSHeapBroker* broker) const {
+ return TryMakeRef(broker, object()->code());
}
-NativeContextRef JSFunctionRef::native_context() const {
- return MakeRefAssumeMemoryFence(broker(),
- context().object()->native_context());
+NativeContextRef JSFunctionRef::native_context(JSHeapBroker* broker) const {
+ return MakeRefAssumeMemoryFence(broker,
+ context(broker).object()->native_context());
}
-base::Optional<FunctionTemplateInfoRef>
-SharedFunctionInfoRef::function_template_info() const {
+OptionalFunctionTemplateInfoRef SharedFunctionInfoRef::function_template_info(
+ JSHeapBroker* broker) const {
if (!object()->IsApiFunction()) return {};
- return TryMakeRef(broker(), FunctionTemplateInfo::cast(
- object()->function_data(kAcquireLoad)));
+ return TryMakeRef(broker, FunctionTemplateInfo::cast(
+ object()->function_data(kAcquireLoad)));
}
int SharedFunctionInfoRef::context_header_size() const {
@@ -2216,16 +2212,16 @@ int SharedFunctionInfoRef::context_parameters_start() const {
return object()->scope_info().ParametersStartIndex();
}
-ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
- return MakeRefAssumeMemoryFence(broker(), object()->scope_info(kAcquireLoad));
+ScopeInfoRef SharedFunctionInfoRef::scope_info(JSHeapBroker* broker) const {
+ return MakeRefAssumeMemoryFence(broker, object()->scope_info(kAcquireLoad));
}
-base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
- Handle<Map> map_handle = Handle<Map>::cast(map().object());
+OptionalMapRef JSObjectRef::GetObjectCreateMap(JSHeapBroker* broker) const {
+ Handle<Map> map_handle = Handle<Map>::cast(map(broker).object());
// Note: implemented as an acquire-load.
if (!map_handle->is_prototype_map()) return {};
- Handle<Object> maybe_proto_info = broker()->CanonicalPersistentHandle(
+ Handle<Object> maybe_proto_info = broker->CanonicalPersistentHandle(
map_handle->prototype_info(kAcquireLoad));
if (!maybe_proto_info->IsPrototypeInfo()) return {};
@@ -2234,33 +2230,32 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
->object_create_map(kAcquireLoad);
if (!maybe_object_create_map->IsWeak()) return {};
- return MapRef(broker(),
- broker()->GetOrCreateData(
- maybe_object_create_map->GetHeapObjectAssumeWeak(),
- kAssumeMemoryFence));
+ return MapRef(broker->GetOrCreateData(
+ maybe_object_create_map->GetHeapObjectAssumeWeak(), kAssumeMemoryFence));
}
-bool PropertyCellRef::Cache() const {
+bool PropertyCellRef::Cache(JSHeapBroker* broker) const {
if (data_->should_access_heap()) return true;
- CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
- broker()->mode() == JSHeapBroker::kSerialized);
- return data()->AsPropertyCell()->Cache(broker());
+ CHECK(broker->mode() == JSHeapBroker::kSerializing ||
+ broker->mode() == JSHeapBroker::kSerialized);
+ return data()->AsPropertyCell()->Cache(broker);
}
-bool NativeContextRef::GlobalIsDetached() const {
- ObjectRef proxy_proto = global_proxy_object().map().prototype();
- return !proxy_proto.equals(global_object());
+bool NativeContextRef::GlobalIsDetached(JSHeapBroker* broker) const {
+ ObjectRef proxy_proto =
+ global_proxy_object(broker).map(broker).prototype(broker);
+ return !proxy_proto.equals(global_object(broker));
}
-base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
- NameRef const& name) const {
+OptionalPropertyCellRef JSGlobalObjectRef::GetPropertyCell(
+ JSHeapBroker* broker, NameRef const& name) const {
base::Optional<PropertyCell> maybe_cell =
ConcurrentLookupIterator::TryGetPropertyCell(
- broker()->isolate(), broker()->local_isolate_or_isolate(),
- broker()->target_native_context().global_object().object(),
+ broker->isolate(), broker->local_isolate_or_isolate(),
+ broker->target_native_context().global_object(broker).object(),
name.object());
if (!maybe_cell.has_value()) return {};
- return TryMakeRef(broker(), *maybe_cell);
+ return TryMakeRef(broker, *maybe_cell);
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
@@ -2275,38 +2270,15 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
}
}
-namespace {
-
-unsigned GetInlinedBytecodeSizeImpl(Code code) {
- unsigned value = code.inlined_bytecode_size();
- if (value > 0) {
+unsigned CodeRef::GetInlinedBytecodeSize() const {
+ Code code = *object();
+ const unsigned value = code.inlined_bytecode_size();
+ if (value != 0 && code.marked_for_deoptimization()) {
// Don't report inlined bytecode size if the code object was already
// deoptimized.
- value = code.marked_for_deoptimization() ? 0 : value;
- }
- return value;
-}
-
-} // namespace
-
-unsigned CodeRef::GetInlinedBytecodeSize() const {
- return GetInlinedBytecodeSizeImpl(*object());
-}
-
-unsigned CodeDataContainerRef::GetInlinedBytecodeSize() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- CodeDataContainer codet = *object();
- if (codet.is_off_heap_trampoline()) {
return 0;
}
-
- // Safe to do a relaxed conversion to Code here since CodeT::code field is
- // modified only by GC and the CodeT was acquire-loaded.
- Code code = codet.code(kRelaxedLoad);
- return GetInlinedBytecodeSizeImpl(code);
-#else
- UNREACHABLE();
-#endif // V8_EXTERNAL_CODE_SPACE
+ return value;
}
#undef BIMODAL_ACCESSOR
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index c3a502ab33..6695e684b7 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_HEAP_REFS_H_
#define V8_COMPILER_HEAP_REFS_H_
+#include <type_traits>
+
#include "src/base/optional.h"
#include "src/ic/call-optimization.h"
#include "src/objects/elements-kind.h"
@@ -112,7 +114,6 @@ enum class RefSerializationKind {
NEVER_SERIALIZED(CallHandlerInfo) \
NEVER_SERIALIZED(Cell) \
NEVER_SERIALIZED(Code) \
- NEVER_SERIALIZED(CodeDataContainer) \
NEVER_SERIALIZED(Context) \
NEVER_SERIALIZED(DescriptorArray) \
NEVER_SERIALIZED(FeedbackCell) \
@@ -183,43 +184,117 @@ struct ref_traits<Object> {
RefSerializationKind::kNeverSerialized;
};
-// A ref without the broker_ field, used when storage size is important.
-template <class T>
-class TinyRef {
- private:
- using RefType = typename ref_traits<T>::ref_type;
-
+// For types used in ReadOnlyRoots, but which don't have a corresponding Ref
+// type, use HeapObjectRef.
+template <>
+struct ref_traits<Oddball> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<EnumCache> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<PropertyArray> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<ByteArray> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<ClosureFeedbackCellArray> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<NumberDictionary> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<OrderedHashMap> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<OrderedHashSet> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<FeedbackMetadata> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<NameDictionary> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<OrderedNameDictionary> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<SwissNameDictionary> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<InterceptorInfo> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<ArrayList> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<WeakFixedArray> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<WeakArrayList> : public ref_traits<HeapObject> {};
+template <>
+struct ref_traits<RegisteredSymbolTable> : public ref_traits<HeapObject> {};
+#if V8_ENABLE_WEBASSEMBLY
+template <>
+struct ref_traits<WasmNull> : public ref_traits<HeapObject> {};
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Wrapper around heap refs which works roughly like a base::Optional, but
+// doesn't use extra storage for a boolean, but instead uses a null data pointer
+// as a sentinel no value.
+template <typename TRef>
+class OptionalRef {
public:
- explicit TinyRef(const RefType& ref) : TinyRef(ref.data_) {}
- RefType AsRef(JSHeapBroker* broker) const;
- static base::Optional<RefType> AsOptionalRef(JSHeapBroker* broker,
- base::Optional<TinyRef<T>> ref) {
- if (!ref.has_value()) return {};
- return ref->AsRef(broker);
+ // {ArrowOperatorHelper} is returned by {OptionalRef::operator->}. It should
+ // never be stored anywhere or used in any other code; no one should ever have
+ // to spell out {ArrowOperatorHelper} in code. Its only purpose is to be
+ // dereferenced immediately by "operator-> chaining". Returning the address of
+ // the field is valid because this objects lifetime only ends at the end of
+ // the full statement.
+ class ArrowOperatorHelper {
+ public:
+ TRef* operator->() { return &object_; }
+
+ private:
+ friend class OptionalRef<TRef>;
+ explicit ArrowOperatorHelper(TRef object) : object_(object) {}
+
+ TRef object_;
+ };
+
+ OptionalRef() = default;
+ // NOLINTNEXTLINE
+ OptionalRef(base::nullopt_t) : OptionalRef() {}
+
+ // Allow implicit upcasting from OptionalRefs with compatible refs.
+ template <typename SRef, typename = typename std::enable_if<
+ std::is_convertible<SRef*, TRef*>::value>::type>
+ // NOLINTNEXTLINE
+ V8_INLINE OptionalRef(OptionalRef<SRef> ref) : data_(ref.data_) {}
+
+ // Allow implicit upcasting from compatible refs.
+ template <typename SRef, typename = typename std::enable_if<
+ std::is_convertible<SRef*, TRef*>::value>::type>
+ // NOLINTNEXTLINE
+ V8_INLINE OptionalRef(SRef ref) : data_(ref.data_) {}
+
+ constexpr bool has_value() const { return data_ != nullptr; }
+ constexpr explicit operator bool() const { return has_value(); }
+
+ TRef value() const {
+ DCHECK(has_value());
+ return TRef(data_, false);
+ }
+ TRef operator*() const { return value(); }
+ ArrowOperatorHelper operator->() const {
+ return ArrowOperatorHelper(value());
}
- Handle<T> object() const;
private:
- explicit TinyRef(ObjectData* data) : data_(data) { DCHECK_NOT_NULL(data); }
- ObjectData* const data_;
+ explicit OptionalRef(ObjectData* data) : data_(data) {
+ CHECK_NOT_NULL(data_);
+ }
+ ObjectData* data_ = nullptr;
+
+ template <typename SRef>
+ friend class OptionalRef;
};
-#define V(Name) using Name##TinyRef = TinyRef<Name>;
+// Define aliases for OptionalFooRef = OptionalRef<FooRef>.
+#define V(Name) using Optional##Name##Ref = OptionalRef<Name##Ref>;
+V(Object)
HEAP_BROKER_OBJECT_LIST(V)
#undef V
-#ifdef V8_EXTERNAL_CODE_SPACE
-using CodeTRef = CodeDataContainerRef;
-using CodeTTinyRef = CodeDataContainerTinyRef;
-#else
-using CodeTRef = CodeRef;
-using CodeTTinyRef = CodeTinyRef;
-#endif
-
class V8_EXPORT_PRIVATE ObjectRef {
public:
- ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
- : data_(data), broker_(broker) {
+ explicit ObjectRef(ObjectData* data, bool check_type = true) : data_(data) {
CHECK_NOT_NULL(data_);
}
@@ -238,25 +313,15 @@ class V8_EXPORT_PRIVATE ObjectRef {
HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
- // CodeT is defined as an alias to either CodeDataContainer or Code, depending
- // on the architecture. We can't put it in HEAP_BROKER_OBJECT_LIST, because
- // this list already contains CodeDataContainer and Code. Still, defining
- // IsCodeT and AsCodeT is useful to write code that is independent of
- // V8_EXTERNAL_CODE_SPACE.
- bool IsCodeT() const;
- CodeTRef AsCodeT() const;
-
bool IsNull() const;
- bool IsNullOrUndefined() const;
- bool IsTheHole() const;
+ bool IsNullOrUndefined(JSHeapBroker* broker) const;
+ bool IsTheHole(JSHeapBroker* broker) const;
- base::Optional<bool> TryGetBooleanValue() const;
- Maybe<double> OddballToNumber() const;
+ base::Optional<bool> TryGetBooleanValue(JSHeapBroker* broker) const;
+ Maybe<double> OddballToNumber(JSHeapBroker* broker) const;
bool should_access_heap() const;
- Isolate* isolate() const;
-
struct Hash {
size_t operator()(const ObjectRef& ref) const {
return base::hash_combine(ref.object().address());
@@ -264,7 +329,6 @@ class V8_EXPORT_PRIVATE ObjectRef {
};
protected:
- JSHeapBroker* broker() const;
ObjectData* data() const;
ObjectData* data_; // Should be used only by object() getters.
@@ -277,13 +341,12 @@ class V8_EXPORT_PRIVATE ObjectRef {
friend class JSHeapBroker;
friend class JSObjectData;
friend class StringData;
- template <class T>
- friend class TinyRef;
+
+ template <typename TRef>
+ friend class OptionalRef;
friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
friend bool operator<(const ObjectRef& lhs, const ObjectRef& rhs);
-
- JSHeapBroker* broker_;
};
inline bool operator==(const ObjectRef& lhs, const ObjectRef& rhs) {
@@ -327,6 +390,8 @@ class HeapObjectType {
}
OddballType oddball_type() const { return oddball_type_; }
+ // For compatibility with MapRef.
+ OddballType oddball_type(JSHeapBroker* broker) const { return oddball_type_; }
InstanceType instance_type() const { return instance_type_; }
Flags flags() const { return flags_; }
@@ -341,12 +406,12 @@ class HeapObjectType {
// Constructors are carefully defined such that we do a type check on
// the outermost Ref class in the inheritance chain only.
-#define DEFINE_REF_CONSTRUCTOR(Name, Base) \
- Name##Ref(JSHeapBroker* broker, ObjectData* data, bool check_type = true) \
- : Base(broker, data, false) { \
- if (check_type) { \
- CHECK(Is##Name()); \
- } \
+#define DEFINE_REF_CONSTRUCTOR(Name, Base) \
+ explicit Name##Ref(ObjectData* data, bool check_type = true) \
+ : Base(data, false) { \
+ if (check_type) { \
+ CHECK(Is##Name()); \
+ } \
}
class HeapObjectRef : public ObjectRef {
@@ -355,14 +420,14 @@ class HeapObjectRef : public ObjectRef {
Handle<HeapObject> object() const;
- MapRef map() const;
+ MapRef map(JSHeapBroker* broker) const;
// Only for use in special situations where we need to read the object's
// current map (instead of returning the cached map). Use with care.
- base::Optional<MapRef> map_direct_read() const;
+ OptionalMapRef map_direct_read(JSHeapBroker* broker) const;
// See the comment on the HeapObjectType class.
- HeapObjectType GetHeapObjectType() const;
+ HeapObjectType GetHeapObjectType(JSHeapBroker* broker) const;
};
class PropertyCellRef : public HeapObjectRef {
@@ -371,16 +436,16 @@ class PropertyCellRef : public HeapObjectRef {
Handle<PropertyCell> object() const;
- V8_WARN_UNUSED_RESULT bool Cache() const;
- void CacheAsProtector() const {
- bool cached = Cache();
+ V8_WARN_UNUSED_RESULT bool Cache(JSHeapBroker* broker) const;
+ void CacheAsProtector(JSHeapBroker* broker) const {
+ bool cached = Cache(broker);
// A protector always holds a Smi value and its cell type never changes, so
// Cache can't fail.
CHECK(cached);
}
PropertyDetails property_details() const;
- ObjectRef value() const;
+ ObjectRef value(JSHeapBroker* broker) const;
};
class JSReceiverRef : public HeapObjectRef {
@@ -396,26 +461,27 @@ class JSObjectRef : public JSReceiverRef {
Handle<JSObject> object() const;
- base::Optional<ObjectRef> raw_properties_or_hash() const;
+ OptionalObjectRef raw_properties_or_hash(JSHeapBroker* broker) const;
// Usable only for in-object properties. Only use this if the underlying
// value can be an uninitialized-sentinel, or if HeapNumber construction must
// be avoided for some reason. Otherwise, use the higher-level
// GetOwnFastDataProperty.
- base::Optional<ObjectRef> RawInobjectPropertyAt(FieldIndex index) const;
+ OptionalObjectRef RawInobjectPropertyAt(JSHeapBroker* broker,
+ FieldIndex index) const;
// Return the element at key {index} if {index} is known to be an own data
// property of the object that is non-writable and non-configurable. If
// {dependencies} is non-null, a dependency will be taken to protect
// against inconsistency due to weak memory concurrency.
- base::Optional<ObjectRef> GetOwnConstantElement(
- const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies) const;
+ OptionalObjectRef GetOwnConstantElement(
+ JSHeapBroker* broker, const FixedArrayBaseRef& elements_ref,
+ uint32_t index, CompilationDependencies* dependencies) const;
// The direct-read implementation of the above, extracted into a helper since
// it's also called from compilation-dependency validation. This helper is
// guaranteed to not create new Ref instances.
base::Optional<Object> GetOwnConstantElementFromHeap(
- FixedArrayBase elements, ElementsKind elements_kind,
+ JSHeapBroker* broker, FixedArrayBase elements, ElementsKind elements_kind,
uint32_t index) const;
// Return the value of the property identified by the field {index}
@@ -423,22 +489,24 @@ class JSObjectRef : public JSReceiverRef {
// If {dependencies} is non-null, and a property was successfully read,
// then the function will take a dependency to check the value of the
// property at code finalization time.
- base::Optional<ObjectRef> GetOwnFastDataProperty(
- Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies) const;
+ OptionalObjectRef GetOwnFastDataProperty(
+ JSHeapBroker* broker, Representation field_representation,
+ FieldIndex index, CompilationDependencies* dependencies) const;
// Return the value of the dictionary property at {index} in the dictionary
// if {index} is known to be an own data property of the object.
- base::Optional<ObjectRef> GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies) const;
+ OptionalObjectRef GetOwnDictionaryProperty(
+ JSHeapBroker* broker, InternalIndex index,
+ CompilationDependencies* dependencies) const;
// When concurrent inlining is enabled, reads the elements through a direct
// relaxed read. This is to ease the transition to unserialized (or
// background-serialized) elements.
- base::Optional<FixedArrayBaseRef> elements(RelaxedLoadTag) const;
+ OptionalFixedArrayBaseRef elements(JSHeapBroker* broker,
+ RelaxedLoadTag) const;
bool IsElementsTenured(const FixedArrayBaseRef& elements);
- base::Optional<MapRef> GetObjectCreateMap() const;
+ OptionalMapRef GetObjectCreateMap(JSHeapBroker* broker) const;
};
class JSDataViewRef : public JSObjectRef {
@@ -456,9 +524,9 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- JSReceiverRef bound_target_function() const;
- ObjectRef bound_this() const;
- FixedArrayRef bound_arguments() const;
+ JSReceiverRef bound_target_function(JSHeapBroker* broker) const;
+ ObjectRef bound_this(JSHeapBroker* broker) const;
+ FixedArrayRef bound_arguments(JSHeapBroker* broker) const;
};
class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
@@ -470,25 +538,21 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
// Returns true, iff the serialized JSFunctionData contents are consistent
// with the state of the underlying JSFunction object. Must be called from
// the main thread.
- bool IsConsistentWithHeapState() const;
-
- ContextRef context() const;
- NativeContextRef native_context() const;
- SharedFunctionInfoRef shared() const;
- CodeTRef code() const;
-
- bool has_initial_map(CompilationDependencies* dependencies) const;
- bool PrototypeRequiresRuntimeLookup(
- CompilationDependencies* dependencies) const;
- bool has_instance_prototype(CompilationDependencies* dependencies) const;
- ObjectRef instance_prototype(CompilationDependencies* dependencies) const;
- MapRef initial_map(CompilationDependencies* dependencies) const;
- int InitialMapInstanceSizeWithMinSlack(
- CompilationDependencies* dependencies) const;
- FeedbackCellRef raw_feedback_cell(
- CompilationDependencies* dependencies) const;
- base::Optional<FeedbackVectorRef> feedback_vector(
- CompilationDependencies* dependencies) const;
+ bool IsConsistentWithHeapState(JSHeapBroker* broker) const;
+
+ ContextRef context(JSHeapBroker* broker) const;
+ NativeContextRef native_context(JSHeapBroker* broker) const;
+ SharedFunctionInfoRef shared(JSHeapBroker* broker) const;
+ OptionalCodeRef code(JSHeapBroker* broker) const;
+
+ bool has_initial_map(JSHeapBroker* broker) const;
+ bool PrototypeRequiresRuntimeLookup(JSHeapBroker* broker) const;
+ bool has_instance_prototype(JSHeapBroker* broker) const;
+ ObjectRef instance_prototype(JSHeapBroker* broker) const;
+ MapRef initial_map(JSHeapBroker* broker) const;
+ int InitialMapInstanceSizeWithMinSlack(JSHeapBroker* broker) const;
+ FeedbackCellRef raw_feedback_cell(JSHeapBroker* broker) const;
+ OptionalFeedbackVectorRef feedback_vector(JSHeapBroker* broker) const;
};
class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
@@ -497,8 +561,8 @@ class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
Handle<RegExpBoilerplateDescription> object() const;
- FixedArrayRef data() const;
- StringRef source() const;
+ FixedArrayRef data(JSHeapBroker* broker) const;
+ StringRef source(JSHeapBroker* broker) const;
int flags() const;
};
@@ -526,12 +590,12 @@ class ContextRef : public HeapObjectRef {
// {previous} decrements {depth} by 1 for each previous link successfully
// followed. If {depth} != 0 on function return, then it only got partway to
// the desired depth.
- ContextRef previous(size_t* depth) const;
+ ContextRef previous(JSHeapBroker* broker, size_t* depth) const;
// Only returns a value if the index is valid for this ContextRef.
- base::Optional<ObjectRef> get(int index) const;
+ OptionalObjectRef get(JSHeapBroker* broker, int index) const;
- ScopeInfoRef scope_info() const;
+ ScopeInfoRef scope_info(JSHeapBroker* broker) const;
};
#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
@@ -585,14 +649,15 @@ class NativeContextRef : public ContextRef {
Handle<NativeContext> object() const;
-#define DECL_ACCESSOR(type, name) type##Ref name() const;
+#define DECL_ACCESSOR(type, name) type##Ref name(JSHeapBroker* broker) const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
- MapRef GetFunctionMapFromIndex(int index) const;
- MapRef GetInitialJSArrayMap(ElementsKind kind) const;
- base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
- bool GlobalIsDetached() const;
+ MapRef GetFunctionMapFromIndex(JSHeapBroker* broker, int index) const;
+ MapRef GetInitialJSArrayMap(JSHeapBroker* broker, ElementsKind kind) const;
+ OptionalJSFunctionRef GetConstructorFunction(JSHeapBroker* broker,
+ const MapRef& map) const;
+ bool GlobalIsDetached(JSHeapBroker* broker) const;
};
class NameRef : public HeapObjectRef {
@@ -611,9 +676,10 @@ class DescriptorArrayRef : public HeapObjectRef {
Handle<DescriptorArray> object() const;
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
- NameRef GetPropertyKey(InternalIndex descriptor_index) const;
- base::Optional<ObjectRef> GetStrongValue(
- InternalIndex descriptor_index) const;
+ NameRef GetPropertyKey(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const;
+ OptionalObjectRef GetStrongValue(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const;
};
class FeedbackCellRef : public HeapObjectRef {
@@ -622,11 +688,12 @@ class FeedbackCellRef : public HeapObjectRef {
Handle<FeedbackCell> object() const;
- ObjectRef value() const;
+ ObjectRef value(JSHeapBroker* broker) const;
// Convenience wrappers around {value()}:
- base::Optional<FeedbackVectorRef> feedback_vector() const;
- base::Optional<SharedFunctionInfoRef> shared_function_info() const;
+ OptionalFeedbackVectorRef feedback_vector(JSHeapBroker* broker) const;
+ OptionalSharedFunctionInfoRef shared_function_info(
+ JSHeapBroker* broker) const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -635,9 +702,9 @@ class FeedbackVectorRef : public HeapObjectRef {
Handle<FeedbackVector> object() const;
- SharedFunctionInfoRef shared_function_info() const;
+ SharedFunctionInfoRef shared_function_info(JSHeapBroker* broker) const;
- FeedbackCellRef GetClosureFeedbackCell(int index) const;
+ FeedbackCellRef GetClosureFeedbackCell(JSHeapBroker* broker, int index) const;
};
class CallHandlerInfoRef : public HeapObjectRef {
@@ -647,7 +714,7 @@ class CallHandlerInfoRef : public HeapObjectRef {
Handle<CallHandlerInfo> object() const;
Address callback() const;
- ObjectRef data() const;
+ ObjectRef data(JSHeapBroker* broker) const;
};
class AccessorInfoRef : public HeapObjectRef {
@@ -665,9 +732,9 @@ class AllocationSiteRef : public HeapObjectRef {
bool PointsToLiteral() const;
AllocationType GetAllocationType() const;
- ObjectRef nested_site() const;
+ ObjectRef nested_site(JSHeapBroker* broker) const;
- base::Optional<JSObjectRef> boilerplate() const;
+ OptionalJSObjectRef boilerplate(JSHeapBroker* broker) const;
ElementsKind GetElementsKind() const;
bool CanInlineCall() const;
};
@@ -679,6 +746,7 @@ class BigIntRef : public HeapObjectRef {
Handle<BigInt> object() const;
uint64_t AsUint64() const;
+ int64_t AsInt64(bool* lossless) const;
};
class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
@@ -706,55 +774,58 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
bool CanTransition() const;
bool IsInobjectSlackTrackingInProgress() const;
bool is_dictionary_map() const;
- bool IsFixedCowArrayMap() const;
+ bool IsFixedCowArrayMap(JSHeapBroker* broker) const;
bool IsPrimitiveMap() const;
bool is_undetectable() const;
bool is_callable() const;
bool has_indexed_interceptor() const;
bool is_migration_target() const;
- bool supports_fast_array_iteration() const;
- bool supports_fast_array_resize() const;
+ bool supports_fast_array_iteration(JSHeapBroker* broker) const;
+ bool supports_fast_array_resize(JSHeapBroker* broker) const;
bool is_abandoned_prototype_map() const;
- OddballType oddball_type() const;
+ OddballType oddball_type(JSHeapBroker* broker) const;
bool CanInlineElementAccess() const;
// Note: Only returns a value if the requested elements kind matches the
// current kind, or if the current map is an unmodified JSArray initial map.
- base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
+ OptionalMapRef AsElementsKind(JSHeapBroker* broker, ElementsKind kind) const;
#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
- HeapObjectRef GetBackPointer() const;
+ HeapObjectRef GetBackPointer(JSHeapBroker* broker) const;
- HeapObjectRef prototype() const;
+ HeapObjectRef prototype(JSHeapBroker* broker) const;
bool HasOnlyStablePrototypesWithFastElements(
- ZoneVector<MapRef>* prototype_maps);
+ JSHeapBroker* broker, ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
- DescriptorArrayRef instance_descriptors() const;
- MapRef FindFieldOwner(InternalIndex descriptor_index) const;
- PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
- NameRef GetPropertyKey(InternalIndex descriptor_index) const;
+ DescriptorArrayRef instance_descriptors(JSHeapBroker* broker) const;
+ MapRef FindFieldOwner(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const;
+ PropertyDetails GetPropertyDetails(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const;
+ NameRef GetPropertyKey(JSHeapBroker* broker,
+ InternalIndex descriptor_index) const;
FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
- base::Optional<ObjectRef> GetStrongValue(
- InternalIndex descriptor_number) const;
+ OptionalObjectRef GetStrongValue(JSHeapBroker* broker,
+ InternalIndex descriptor_number) const;
- MapRef FindRootMap() const;
- ObjectRef GetConstructor() const;
+ MapRef FindRootMap(JSHeapBroker* broker) const;
+ ObjectRef GetConstructor(JSHeapBroker* broker) const;
};
struct HolderLookupResult {
HolderLookupResult(CallOptimization::HolderLookup lookup_ =
CallOptimization::kHolderNotFound,
- base::Optional<JSObjectRef> holder_ = base::nullopt)
+ OptionalJSObjectRef holder_ = base::nullopt)
: lookup(lookup_), holder(holder_) {}
CallOptimization::HolderLookup lookup;
- base::Optional<JSObjectRef> holder;
+ OptionalJSObjectRef holder;
};
class FunctionTemplateInfoRef : public HeapObjectRef {
@@ -763,15 +834,16 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
Handle<FunctionTemplateInfo> object() const;
- bool is_signature_undefined() const;
+ bool is_signature_undefined(JSHeapBroker* broker) const;
bool accept_any_receiver() const;
int16_t allowed_receiver_instance_type_range_start() const;
int16_t allowed_receiver_instance_type_range_end() const;
- base::Optional<CallHandlerInfoRef> call_code() const;
- ZoneVector<Address> c_functions() const;
- ZoneVector<const CFunctionInfo*> c_signatures() const;
- HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map);
+ OptionalCallHandlerInfoRef call_code(JSHeapBroker* broker) const;
+ ZoneVector<Address> c_functions(JSHeapBroker* broker) const;
+ ZoneVector<const CFunctionInfo*> c_signatures(JSHeapBroker* broker) const;
+ HolderLookupResult LookupHolderOfExpectedType(JSHeapBroker* broker,
+ MapRef receiver_map);
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -797,7 +869,7 @@ class FixedArrayRef : public FixedArrayBaseRef {
Handle<FixedArray> object() const;
- base::Optional<ObjectRef> TryGet(int i) const;
+ OptionalObjectRef TryGet(JSHeapBroker* broker, int i) const;
};
class FixedDoubleArrayRef : public FixedArrayBaseRef {
@@ -826,7 +898,7 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
int parameter_count() const;
interpreter::Register incoming_new_target_or_generator_register() const;
- Handle<ByteArray> SourcePositionTable() const;
+ Handle<ByteArray> SourcePositionTable(JSHeapBroker* broker) const;
// Exception handler table.
Address handler_table_address() const;
@@ -858,20 +930,21 @@ class JSArrayRef : public JSObjectRef {
// The `length` property of boilerplate JSArray objects. Boilerplates are
// immutable after initialization. Must not be used for non-boilerplate
// JSArrays.
- ObjectRef GetBoilerplateLength() const;
+ ObjectRef GetBoilerplateLength(JSHeapBroker* broker) const;
// Return the element at key {index} if the array has a copy-on-write elements
// storage and {index} is known to be an own data property.
// Note the value returned by this function is only valid if we ensure at
// runtime that the backing store has not changed.
- base::Optional<ObjectRef> GetOwnCowElement(FixedArrayBaseRef elements_ref,
- uint32_t index) const;
+ OptionalObjectRef GetOwnCowElement(JSHeapBroker* broker,
+ FixedArrayBaseRef elements_ref,
+ uint32_t index) const;
// The `JSArray::length` property; not safe to use in general, but can be
// used in some special cases that guarantee a valid `length` value despite
// concurrent reads. The result needs to be optional in case the
// return value was created too recently to pass the gc predicate.
- base::Optional<ObjectRef> length_unsafe() const;
+ OptionalObjectRef length_unsafe(JSHeapBroker* broker) const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -885,29 +958,30 @@ class ScopeInfoRef : public HeapObjectRef {
bool HasContextExtensionSlot() const;
bool ClassScopeHasPrivateBrand() const;
- ScopeInfoRef OuterScopeInfo() const;
+ ScopeInfoRef OuterScopeInfo(JSHeapBroker* broker) const;
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count_with_receiver) \
- V(int, internal_formal_parameter_count_without_receiver) \
- V(bool, IsDontAdaptArguments) \
- V(bool, has_simple_parameters) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript) \
- V(bool, requires_instance_members_initializer) \
- IF_WASM(V, const wasm::WasmModule*, wasm_module) \
- IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature)
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count_with_receiver) \
+ V(int, internal_formal_parameter_count_without_receiver) \
+ V(bool, IsDontAdaptArguments) \
+ V(bool, has_simple_parameters) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ V(bool, requires_instance_members_initializer) \
+ IF_WASM(V, const wasm::WasmModule*, wasm_module) \
+ IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature) \
+ IF_WASM(V, int, wasm_function_index)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -918,17 +992,19 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
Builtin builtin_id() const;
int context_header_size() const;
int context_parameters_start() const;
- BytecodeArrayRef GetBytecodeArray() const;
- SharedFunctionInfo::Inlineability GetInlineability() const;
- base::Optional<FunctionTemplateInfoRef> function_template_info() const;
- ScopeInfoRef scope_info() const;
+ BytecodeArrayRef GetBytecodeArray(JSHeapBroker* broker) const;
+ SharedFunctionInfo::Inlineability GetInlineability(
+ JSHeapBroker* broker) const;
+ OptionalFunctionTemplateInfoRef function_template_info(
+ JSHeapBroker* broker) const;
+ ScopeInfoRef scope_info(JSHeapBroker* broker) const;
#define DECL_ACCESSOR(type, name) type name() const;
BROKER_SFI_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
- bool IsInlineable() const {
- return GetInlineability() == SharedFunctionInfo::kIsInlineable;
+ bool IsInlineable(JSHeapBroker* broker) const {
+ return GetInlineability(broker) == SharedFunctionInfo::kIsInlineable;
}
};
@@ -940,15 +1016,18 @@ class StringRef : public NameRef {
// With concurrent inlining on, we return base::nullopt due to not being able
// to use LookupIterator in a thread-safe way.
- base::Optional<ObjectRef> GetCharAsStringOrUndefined(uint32_t index) const;
+ OptionalObjectRef GetCharAsStringOrUndefined(JSHeapBroker* broker,
+ uint32_t index) const;
// When concurrently accessing non-read-only non-supported strings, we return
// base::nullopt for these methods.
- base::Optional<Handle<String>> ObjectIfContentAccessible();
+ base::Optional<Handle<String>> ObjectIfContentAccessible(
+ JSHeapBroker* broker);
int length() const;
- base::Optional<uint16_t> GetFirstChar() const;
- base::Optional<uint16_t> GetChar(int index) const;
- base::Optional<double> ToNumber();
+ base::Optional<uint16_t> GetFirstChar(JSHeapBroker* broker) const;
+ base::Optional<uint16_t> GetChar(JSHeapBroker* broker, int index) const;
+ base::Optional<double> ToNumber(JSHeapBroker* broker);
+ base::Optional<double> ToInt(JSHeapBroker* broker, int radix);
bool IsSeqString() const;
bool IsExternalString() const;
@@ -978,7 +1057,7 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
void* data_ptr() const;
- HeapObjectRef buffer() const;
+ HeapObjectRef buffer(JSHeapBroker* broker) const;
};
class SourceTextModuleRef : public HeapObjectRef {
@@ -987,8 +1066,8 @@ class SourceTextModuleRef : public HeapObjectRef {
Handle<SourceTextModule> object() const;
- base::Optional<CellRef> GetCell(int cell_index) const;
- base::Optional<ObjectRef> import_meta() const;
+ OptionalCellRef GetCell(JSHeapBroker* broker, int cell_index) const;
+ OptionalObjectRef import_meta(JSHeapBroker* broker) const;
};
class TemplateObjectDescriptionRef : public HeapObjectRef {
@@ -1014,7 +1093,8 @@ class JSGlobalObjectRef : public JSObjectRef {
bool IsDetachedFrom(JSGlobalProxyRef const& proxy) const;
// Can be called even when there is no property cell for the given name.
- base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name) const;
+ OptionalPropertyCellRef GetPropertyCell(JSHeapBroker* broker,
+ NameRef const& name) const;
};
class JSGlobalProxyRef : public JSObjectRef {
@@ -1033,17 +1113,6 @@ class CodeRef : public HeapObjectRef {
unsigned GetInlinedBytecodeSize() const;
};
-// CodeDataContainerRef doesn't appear to be used directly, but it is used via
-// CodeTRef when V8_EXTERNAL_CODE_SPACE is enabled.
-class CodeDataContainerRef : public HeapObjectRef {
- public:
- DEFINE_REF_CONSTRUCTOR(CodeDataContainer, HeapObjectRef)
-
- Handle<CodeDataContainer> object() const;
-
- unsigned GetInlinedBytecodeSize() const;
-};
-
class InternalizedStringRef : public StringRef {
public:
DEFINE_REF_CONSTRUCTOR(InternalizedString, StringRef)
@@ -1053,6 +1122,20 @@ class InternalizedStringRef : public StringRef {
#undef DEFINE_REF_CONSTRUCTOR
+#define V(Name) \
+ /* Refs should contain only one pointer. */ \
+ static_assert(sizeof(Name##Ref) == kSystemPointerSize); \
+ static_assert(sizeof(OptionalName##Ref) == kSystemPointerSize); \
+ /* Refs should be trivial to copy, move and destroy. */ \
+ static_assert(std::is_trivially_copyable_v<Name##Ref>); \
+ static_assert(std::is_trivially_copyable_v<OptionalName##Ref>); \
+ static_assert(std::is_trivially_destructible_v<Name##Ref>); \
+ static_assert(std::is_trivially_destructible_v<OptionalName##Ref>);
+
+V(Object)
+HEAP_BROKER_OBJECT_LIST(V)
+#undef V
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 9bfe1cb376..3c86b35347 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -13,35 +13,34 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/compiler/wasm-call-descriptors.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/wasm-engine.h"
// TODO(wasm): Remove this include.
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-subtyping.h"
#include "src/zone/zone.h"
+#if V8_TARGET_ARCH_32_BIT
+
namespace v8 {
namespace internal {
namespace compiler {
-Int64Lowering::Int64Lowering(
- Graph* graph, MachineOperatorBuilder* machine,
- CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified,
- Zone* zone, const wasm::WasmModule* module,
- Signature<MachineRepresentation>* signature,
- std::unique_ptr<Int64LoweringSpecialCase> special_case)
+Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common,
+ SimplifiedOperatorBuilder* simplified, Zone* zone,
+ Signature<MachineRepresentation>* signature)
: graph_(graph),
machine_(machine),
common_(common),
simplified_(simplified),
zone_(zone),
signature_(signature),
- special_case_(std::move(special_case)),
state_(graph->NodeCount(), State::kUnvisited),
stack_(zone),
replacements_(nullptr),
- placeholder_(graph->NewNode(common->Dead())),
- int32_type_(Type::Wasm({wasm::kWasmI32, module}, graph->zone())),
- float64_type_(Type::Wasm({wasm::kWasmF64, module}, graph->zone())) {
+ placeholder_(graph->NewNode(common->Dead())) {
DCHECK_NOT_NULL(graph);
DCHECK_NOT_NULL(graph->end());
replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
@@ -49,9 +48,6 @@ Int64Lowering::Int64Lowering(
}
void Int64Lowering::LowerGraph() {
- if (!machine()->Is32()) {
- return;
- }
stack_.push_back({graph()->end(), 0});
state_[graph()->end()->id()] = State::kOnStack;
@@ -128,16 +124,6 @@ int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
} // namespace
-Node* Int64Lowering::SetInt32Type(Node* node) {
- NodeProperties::SetType(node, int32_type_);
- return node;
-}
-
-Node* Int64Lowering::SetFloat64Type(Node* node) {
- NodeProperties::SetType(node, float64_type_);
- return node;
-}
-
void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
DCHECK_EQ(5, node->InputCount());
LowerMemoryBaseAndIndex(node);
@@ -151,7 +137,6 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
DefaultLowering(node, true);
NodeProperties::ChangeOp(node, op);
- SetInt32Type(node);
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
@@ -206,7 +191,7 @@ void Int64Lowering::LowerLoadOperator(Node* node, MachineRepresentation rep,
}
node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
- ReplaceNode(node, SetInt32Type(node), SetInt32Type(high_node));
+ ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node);
}
@@ -362,9 +347,8 @@ void Int64Lowering::LowerNode(Node* node) {
if (signature()->GetParam(old_index) ==
MachineRepresentation::kWord64) {
- SetInt32Type(node);
- Node* high_node = SetInt32Type(graph()->NewNode(
- common()->Parameter(new_index + 1), graph()->start()));
+ Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1),
+ graph()->start());
ReplaceNode(node, node, high_node);
}
}
@@ -428,9 +412,9 @@ void Int64Lowering::LowerNode(Node* node) {
}
if (call_descriptor->GetReturnType(old_index).representation() ==
MachineRepresentation::kWord64) {
- Node* high_node = SetInt32Type(graph()->NewNode(
- common()->Projection(new_index + 1), node, graph()->start()));
- ReplaceNode(use_node, SetInt32Type(use_node), high_node);
+ Node* high_node = graph()->NewNode(
+ common()->Projection(new_index + 1), node, graph()->start());
+ ReplaceNode(use_node, use_node, high_node);
++new_index;
}
}
@@ -443,12 +427,12 @@ void Int64Lowering::LowerNode(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Node* low_node = SetInt32Type(graph()->NewNode(machine()->Word32And(),
- GetReplacementLow(left),
- GetReplacementLow(right)));
- Node* high_node = SetInt32Type(
+ Node* low_node =
+ graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
- GetReplacementHigh(right)));
+ GetReplacementHigh(right));
ReplaceNode(node, low_node, high_node);
break;
}
@@ -512,12 +496,12 @@ void Int64Lowering::LowerNode(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Node* low_node = SetInt32Type(graph()->NewNode(machine()->Word32Or(),
- GetReplacementLow(left),
- GetReplacementLow(right)));
- Node* high_node = SetInt32Type(
+ Node* low_node =
+ graph()->NewNode(machine()->Word32Or(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
graph()->NewNode(machine()->Word32Or(), GetReplacementHigh(left),
- GetReplacementHigh(right)));
+ GetReplacementHigh(right));
ReplaceNode(node, low_node, high_node);
break;
}
@@ -526,12 +510,12 @@ void Int64Lowering::LowerNode(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Node* low_node = SetInt32Type(graph()->NewNode(machine()->Word32Xor(),
- GetReplacementLow(left),
- GetReplacementLow(right)));
- Node* high_node = SetInt32Type(
+ Node* low_node =
+ graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
- GetReplacementHigh(right)));
+ GetReplacementHigh(right));
ReplaceNode(node, low_node, high_node);
break;
}
@@ -610,7 +594,7 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
GetReplacementHigh(right))),
graph()->NewNode(common()->Int32Constant(0)));
- ReplaceNode(node, SetInt32Type(replacement), nullptr);
+ ReplaceNode(node, replacement, nullptr);
break;
}
case IrOpcode::kInt64LessThan: {
@@ -641,9 +625,9 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
// We use SAR to preserve the sign in the high word.
- Node* high_node = SetInt32Type(
+ Node* high_node =
graph()->NewNode(machine()->Word32Sar(), input,
- graph()->NewNode(common()->Int32Constant(31))));
+ graph()->NewNode(common()->Int32Constant(31)));
ReplaceNode(node, input, high_node);
node->NullAllInputs();
break;
@@ -668,7 +652,6 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementHigh(input));
Node* result = graph()->NewNode(machine()->Float64InsertLowWord32(),
high_half, GetReplacementLow(input));
- SetFloat64Type(node);
ReplaceNode(node, result, nullptr);
break;
}
@@ -679,10 +662,10 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
- Node* low_node = SetInt32Type(
- graph()->NewNode(machine()->Float64ExtractLowWord32(), input));
- Node* high_node = SetInt32Type(
- graph()->NewNode(machine()->Float64ExtractHighWord32(), input));
+ Node* low_node =
+ graph()->NewNode(machine()->Float64ExtractLowWord32(), input);
+ Node* high_node =
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), input);
ReplaceNode(node, low_node, high_node);
break;
}
@@ -726,14 +709,14 @@ void Int64Lowering::LowerNode(Node* node) {
bool is_ror = node->opcode() == IrOpcode::kWord64RorLowerable;
if (!is_ror) std::swap(op1, op2);
- Node* low_node = SetInt32Type(
+ Node* low_node =
graph()->NewNode(machine()->Word32Or(),
graph()->NewNode(op1, low_input, masked_shift),
- graph()->NewNode(op2, high_input, inv_shift)));
- Node* high_node = SetInt32Type(
+ graph()->NewNode(op2, high_input, inv_shift));
+ Node* high_node =
graph()->NewNode(machine()->Word32Or(),
graph()->NewNode(op1, high_input, masked_shift),
- graph()->NewNode(op2, low_input, inv_shift)));
+ graph()->NewNode(op2, low_input, inv_shift));
ReplaceNode(node, low_node, high_node);
}
} else {
@@ -798,14 +781,14 @@ void Int64Lowering::LowerNode(Node* node) {
auto* mask2 = inv_mask;
if (!is_ror) std::swap(mask1, mask2);
- Node* low_node = SetInt32Type(graph()->NewNode(
+ Node* low_node = graph()->NewNode(
machine()->Word32Or(),
graph()->NewNode(machine()->Word32And(), rotate_low, mask1),
- graph()->NewNode(machine()->Word32And(), rotate_high, mask2)));
- Node* high_node = SetInt32Type(graph()->NewNode(
+ graph()->NewNode(machine()->Word32And(), rotate_high, mask2));
+ Node* high_node = graph()->NewNode(
machine()->Word32Or(),
graph()->NewNode(machine()->Word32And(), rotate_high, mask1),
- graph()->NewNode(machine()->Word32And(), rotate_low, mask2)));
+ graph()->NewNode(machine()->Word32And(), rotate_low, mask2));
ReplaceNode(node, low_node, high_node);
}
break;
@@ -826,8 +809,7 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementLow(input)),
graph()->NewNode(common()->Int32Constant(32))),
graph()->NewNode(machine()->Word32Clz(), GetReplacementHigh(input)));
- ReplaceNode(node, SetInt32Type(low_node),
- SetInt32Type(graph()->NewNode(common()->Int32Constant(0))));
+ ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
case IrOpcode::kWord64CtzLowerable: {
@@ -848,8 +830,7 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(common()->Int32Constant(32))),
graph()->NewNode(machine()->Word32Ctz().op(),
GetReplacementLow(input)));
- ReplaceNode(node, SetInt32Type(low_node),
- SetInt32Type(graph()->NewNode(common()->Int32Constant(0))));
+ ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
case IrOpcode::kWord64Ror:
@@ -870,8 +851,7 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementLow(input)),
graph()->NewNode(machine()->Word32Popcnt().op(),
GetReplacementHigh(input)));
- ReplaceNode(node, SetInt32Type(low_node),
- SetInt32Type(graph()->NewNode(common()->Int32Constant(0))));
+ ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
case IrOpcode::kPhi: {
@@ -893,12 +873,12 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kLoopExitValue: {
MachineRepresentation rep = LoopExitValueRepresentationOf(node->op());
if (rep == MachineRepresentation::kWord64) {
- Node* low_node = SetInt32Type(graph()->NewNode(
+ Node* low_node = graph()->NewNode(
common()->LoopExitValue(MachineRepresentation::kWord32),
- GetReplacementLow(node->InputAt(0)), node->InputAt(1)));
- Node* high_node = SetInt32Type(graph()->NewNode(
+ GetReplacementLow(node->InputAt(0)), node->InputAt(1));
+ Node* high_node = graph()->NewNode(
common()->LoopExitValue(MachineRepresentation::kWord32),
- GetReplacementHigh(node->InputAt(0)), node->InputAt(1)));
+ GetReplacementHigh(node->InputAt(0)), node->InputAt(1));
ReplaceNode(node, low_node, high_node);
} else {
DefaultLowering(node);
@@ -907,10 +887,10 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
- Node* low_node = SetInt32Type(graph()->NewNode(
- machine()->Word32ReverseBytes(), GetReplacementHigh(input)));
- Node* high_node = SetInt32Type(graph()->NewNode(
- machine()->Word32ReverseBytes(), GetReplacementLow(input)));
+ Node* low_node = graph()->NewNode(machine()->Word32ReverseBytes(),
+ GetReplacementHigh(input));
+ Node* high_node = graph()->NewNode(machine()->Word32ReverseBytes(),
+ GetReplacementLow(input));
ReplaceNode(node, low_node, high_node);
break;
}
@@ -921,12 +901,12 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
// Sign extend low node to Int32
- Node* low_node = SetInt32Type(
- graph()->NewNode(machine()->SignExtendWord8ToInt32(), input));
+ Node* low_node =
+ graph()->NewNode(machine()->SignExtendWord8ToInt32(), input);
// We use SAR to preserve the sign in the high word.
- Node* high_node = SetInt32Type(
+ Node* high_node =
graph()->NewNode(machine()->Word32Sar(), low_node,
- graph()->NewNode(common()->Int32Constant(31))));
+ graph()->NewNode(common()->Int32Constant(31)));
ReplaceNode(node, low_node, high_node);
node->NullAllInputs();
break;
@@ -938,12 +918,12 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
// Sign extend low node to Int32
- Node* low_node = SetInt32Type(
- graph()->NewNode(machine()->SignExtendWord16ToInt32(), input));
+ Node* low_node =
+ graph()->NewNode(machine()->SignExtendWord16ToInt32(), input);
// We use SAR to preserve the sign in the high word.
- Node* high_node = SetInt32Type(
+ Node* high_node =
graph()->NewNode(machine()->Word32Sar(), low_node,
- graph()->NewNode(common()->Int32Constant(31))));
+ graph()->NewNode(common()->Int32Constant(31)));
ReplaceNode(node, low_node, high_node);
node->NullAllInputs();
break;
@@ -958,8 +938,7 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNodeWithProjections(node);
} else {
NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
- ReplaceNode(node, SetInt32Type(node),
- SetInt32Type(graph()->NewNode(common()->Int32Constant(0))));
+ ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
@@ -1015,8 +994,7 @@ void Int64Lowering::LowerNode(Node* node) {
DefaultLowering(node, true);
NodeProperties::ChangeOp(node,
machine()->Word32AtomicCompareExchange(type));
- ReplaceNode(node, SetInt32Type(node),
- SetInt32Type(graph()->NewNode(common()->Int32Constant(0))));
+ ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
@@ -1032,10 +1010,10 @@ void Int64Lowering::LowerNode(Node* node) {
DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
int32_t lane = OpParameter<int32_t>(node->op());
- Node* low_node = SetInt32Type(
- graph()->NewNode(machine()->I32x4ExtractLane(lane * 2), input));
- Node* high_node = SetInt32Type(
- graph()->NewNode(machine()->I32x4ExtractLane(lane * 2 + 1), input));
+ Node* low_node =
+ graph()->NewNode(machine()->I32x4ExtractLane(lane * 2), input);
+ Node* high_node =
+ graph()->NewNode(machine()->I32x4ExtractLane(lane * 2 + 1), input);
ReplaceNode(node, low_node, high_node);
break;
}
@@ -1068,7 +1046,7 @@ void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
GetReplacementHigh(right)),
graph()->NewNode(low_word_op, GetReplacementLow(left),
GetReplacementLow(right))));
- ReplaceNode(node, SetInt32Type(replacement), nullptr);
+ ReplaceNode(node, replacement, nullptr);
}
bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
@@ -1089,12 +1067,10 @@ bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
const CallDescriptor* Int64Lowering::LowerCallDescriptor(
const CallDescriptor* call_descriptor) {
- if (special_case_) {
- auto replacement = special_case_->replacements.find(call_descriptor);
- if (replacement != special_case_->replacements.end()) {
- return replacement->second;
- }
- }
+ CallDescriptor* maybe_special_replacement =
+ wasm::GetWasmEngine()->call_descriptors()->GetLoweredCallDescriptor(
+ call_descriptor);
+ if (maybe_special_replacement) return maybe_special_replacement;
return GetI32WasmCallDescriptor(zone(), call_descriptor);
}
@@ -1157,7 +1133,7 @@ void Int64Lowering::ReplaceNodeWithProjections(Node* node) {
graph()->NewNode(common()->Projection(0), node, graph()->start());
Node* high_node =
graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, SetInt32Type(low_node), SetInt32Type(high_node));
+ ReplaceNode(node, low_node, high_node);
}
void Int64Lowering::LowerMemoryBaseAndIndex(Node* node) {
@@ -1176,3 +1152,5 @@ void Int64Lowering::LowerMemoryBaseAndIndex(Node* node) {
} // namespace compiler
} // namespace internal
} // namespace v8
+
+#endif // V8_TARGET_ARCH_32_BIT
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 98d090b2d4..e170a3fb16 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -21,21 +21,24 @@ class Signature;
namespace compiler {
-// Struct for CallDescriptors that need special lowering.
-struct V8_EXPORT_PRIVATE Int64LoweringSpecialCase {
- // Map from CallDescriptors that should be replaced, to the replacement
- // CallDescriptors.
- std::unordered_map<const CallDescriptor*, const CallDescriptor*> replacements;
+#if !V8_TARGET_ARCH_32_BIT
+class Int64Lowering {
+ public:
+ Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common,
+ SimplifiedOperatorBuilder* simplified_, Zone* zone,
+ Signature<MachineRepresentation>* signature) {}
+
+ void LowerGraph() {}
};
+#else
class V8_EXPORT_PRIVATE Int64Lowering {
public:
- Int64Lowering(
- Graph* graph, MachineOperatorBuilder* machine,
- CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified_,
- Zone* zone, const wasm::WasmModule* module,
- Signature<MachineRepresentation>* signature,
- std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr);
+ Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common,
+ SimplifiedOperatorBuilder* simplified_, Zone* zone,
+ Signature<MachineRepresentation>* signature);
void LowerGraph();
@@ -71,8 +74,6 @@ class V8_EXPORT_PRIVATE Int64Lowering {
const CallDescriptor* LowerCallDescriptor(
const CallDescriptor* call_descriptor);
- Node* SetInt32Type(Node* node);
- Node* SetFloat64Type(Node* node);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
@@ -95,16 +96,14 @@ class V8_EXPORT_PRIVATE Int64Lowering {
SimplifiedOperatorBuilder* simplified_;
Zone* zone_;
Signature<MachineRepresentation>* signature_;
- std::unique_ptr<Int64LoweringSpecialCase> special_case_;
std::vector<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
Node* placeholder_;
- // Caches for node types, so we do not waste memory.
- Type int32_type_;
- Type float64_type_;
};
+#endif // V8_TARGET_ARCH_32_BIT
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 0894490d23..db77216b74 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -56,7 +56,7 @@ class JSCallReducerAssembler : public JSGraphAssembler {
JSCallReducerAssembler(JSCallReducer* reducer, Node* node,
Node* effect = nullptr, Node* control = nullptr)
: JSGraphAssembler(
- reducer->JSGraphForGraphAssembler(),
+ reducer->broker(), reducer->JSGraphForGraphAssembler(),
reducer->ZoneForGraphAssembler(), BranchSemantics::kJS,
[reducer](Node* n) { reducer->RevisitForGraphAssembler(n); },
kMarkLoopExits),
@@ -537,9 +537,8 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
public:
- PromiseBuiltinReducerAssembler(JSCallReducer* reducer, Node* node,
- JSHeapBroker* broker)
- : JSCallReducerAssembler(reducer, node), broker_(broker) {
+ PromiseBuiltinReducerAssembler(JSCallReducer* reducer, Node* node)
+ : JSCallReducerAssembler(reducer, node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
}
@@ -569,7 +568,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
int slot_count) {
return AddNode<Context>(graph()->NewNode(
javascript()->CreateFunctionContext(
- native_context.scope_info(),
+ native_context.scope_info(broker()),
slot_count - Context::MIN_CONTEXT_SLOTS, FUNCTION_SCOPE),
outer_context, effect(), control()));
}
@@ -586,7 +585,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
- CodeTRef code = MakeRef(broker_, *callable.code());
+ CodeRef code = MakeRef(broker(), *callable.code());
return AddNode<JSFunction>(graph()->NewNode(
javascript()->CreateClosure(shared, code), HeapConstant(feedback_cell),
context, effect(), control()));
@@ -623,8 +622,6 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
frame_state, effect(), control()));
});
}
-
- JSHeapBroker* const broker_;
};
class FastApiCallReducerAssembler : public JSCallReducerAssembler {
@@ -684,7 +681,8 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
// [fast callee, receiver, ... C arguments,
// call code, external constant for function, argc, call handler info data,
// holder, receiver, ... JS arguments, context, new frame state]
- CallHandlerInfoRef call_handler_info = *function_template_info_.call_code();
+ CallHandlerInfoRef call_handler_info =
+ *function_template_info_.call_code(broker());
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
CallDescriptor* call_descriptor =
@@ -693,9 +691,10 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
ApiFunction api_function(call_handler_info.callback());
ExternalReference function_reference = ExternalReference::Create(
isolate(), &api_function, ExternalReference::DIRECT_API_CALL,
- function_template_info_.c_functions().data(),
- function_template_info_.c_signatures().data(),
- static_cast<unsigned>(function_template_info_.c_functions().size()));
+ function_template_info_.c_functions(broker()).data(),
+ function_template_info_.c_signatures(broker()).data(),
+ static_cast<unsigned>(
+ function_template_info_.c_functions(broker()).size()));
Node* continuation_frame_state =
CreateGenericLazyDeoptContinuationFrameState(
@@ -705,7 +704,7 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
inputs[cursor++] = HeapConstant(call_api_callback.code());
inputs[cursor++] = ExternalConstant(function_reference);
inputs[cursor++] = NumberConstant(arity_);
- inputs[cursor++] = Constant(call_handler_info.data());
+ inputs[cursor++] = Constant(call_handler_info.data(broker()));
inputs[cursor++] = holder_;
inputs[cursor++] = receiver_;
for (int i = 0; i < arity_; ++i) {
@@ -860,9 +859,9 @@ TNode<JSArray> JSCallReducerAssembler::CreateArrayNoThrow(
TNode<JSArray> JSCallReducerAssembler::AllocateEmptyJSArray(
ElementsKind kind, const NativeContextRef& native_context) {
// TODO(jgruber): Port AllocationBuilder to JSGraphAssembler.
- MapRef map = native_context.GetInitialJSArrayMap(kind);
+ MapRef map = native_context.GetInitialJSArrayMap(broker(), kind);
- AllocationBuilder ab(jsgraph(), effect(), control());
+ AllocationBuilder ab(jsgraph(), broker(), effect(), control());
ab.Allocate(map.instance_size(), AllocationType::kYoung, Type::Array());
ab.Store(AccessBuilder::ForMap(), map);
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
@@ -961,7 +960,7 @@ TNode<Boolean> JSCallReducerAssembler::ReduceStringPrototypeStartsWith(
Node* receiver_string_char =
StringCharCodeAt(receiver_string, receiver_string_position);
Node* search_string_char =
- jsgraph()->Constant(search_element_string.GetChar(i).value());
+ jsgraph()->Constant(search_element_string.GetChar(broker(), i).value());
auto is_equal = graph()->NewNode(simplified()->NumberEqual(),
search_string_char, receiver_string_char);
GotoIfNot(is_equal, &out, FalseConstant());
@@ -1002,6 +1001,13 @@ TNode<Boolean> JSCallReducerAssembler::ReduceStringPrototypeStartsWith() {
TypeGuard(Type::UnsignedSmall(), NumberAdd(k, clamped_start)));
Node* receiver_string_char =
StringCharCodeAt(receiver_string, receiver_string_position);
+ if (!v8_flags.turbo_loop_variable) {
+ // Without loop variable analysis, Turbofan's typer is unable to derive a
+ // sufficiently precise type here. This is not a soundness problem, but
+ // triggers graph verification errors. So we only insert the TypeGuard if
+ // necessary.
+ k = TypeGuard(Type::Unsigned32(), k);
+ }
Node* search_string_char = StringCharCodeAt(search_string, k);
auto is_equal = graph()->NewNode(simplified()->NumberEqual(),
receiver_string_char, search_string_char);
@@ -1128,7 +1134,7 @@ TNode<Object> IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeAt(
auto out = MakeLabel(MachineRepresentation::kTagged);
for (const MapRef* map : maps) {
- DCHECK(map->supports_fast_array_iteration());
+ DCHECK(map->supports_fast_array_iteration(broker()));
auto correct_map_label = MakeLabel(), wrong_map_label = MakeLabel();
TNode<Boolean> is_map_equal = ReferenceEqual(receiver_map, Constant(*map));
Branch(is_map_equal, &correct_map_label, &wrong_map_label);
@@ -1648,7 +1654,8 @@ TNode<JSArray> IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeMap(
// exceptional projections because it cannot throw with the given
// parameters.
TNode<Object> array_ctor =
- Constant(native_context.GetInitialJSArrayMap(kind).GetConstructor());
+ Constant(native_context.GetInitialJSArrayMap(broker(), kind)
+ .GetConstructor(broker()));
MapFrameStateParams frame_state_params{
jsgraph(), shared, context, target, outer_frame_state,
@@ -1679,8 +1686,9 @@ TNode<JSArray> IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeMap(
// this loop if the input array length is non-zero, and "new Array({x > 0})"
// always produces a HOLEY array.
MapRef holey_double_map =
- native_context.GetInitialJSArrayMap(HOLEY_DOUBLE_ELEMENTS);
- MapRef holey_map = native_context.GetInitialJSArrayMap(HOLEY_ELEMENTS);
+ native_context.GetInitialJSArrayMap(broker(), HOLEY_DOUBLE_ELEMENTS);
+ MapRef holey_map =
+ native_context.GetInitialJSArrayMap(broker(), HOLEY_ELEMENTS);
TransitionAndStoreElement(holey_double_map, holey_map, a, k, v);
Goto(&continue_label);
@@ -2244,7 +2252,7 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor(
DCHECK_EQ(target, NewTargetInput());
SharedFunctionInfoRef promise_shared =
- native_context.promise_function().shared();
+ native_context.promise_function(broker()).shared(broker());
PromiseCtorFrameStateParams frame_state_params{jsgraph(), promise_shared,
node_ptr(), context,
@@ -2275,16 +2283,18 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor(
// Allocate closures for the resolve and reject cases.
SharedFunctionInfoRef resolve_sfi =
- MakeRef(broker_, broker_->isolate()
- ->factory()
- ->promise_capability_default_resolve_shared_fun());
+ MakeRef(broker(), broker()
+ ->isolate()
+ ->factory()
+ ->promise_capability_default_resolve_shared_fun());
TNode<JSFunction> resolve =
CreateClosureFromBuiltinSharedFunctionInfo(resolve_sfi, promise_context);
SharedFunctionInfoRef reject_sfi =
- MakeRef(broker_, broker_->isolate()
- ->factory()
- ->promise_capability_default_reject_shared_fun());
+ MakeRef(broker(), broker()
+ ->isolate()
+ ->factory()
+ ->promise_capability_default_reject_shared_fun());
TNode<JSFunction> reject =
CreateClosureFromBuiltinSharedFunctionInfo(reject_sfi, promise_context);
@@ -2507,11 +2517,13 @@ void JSCallReducer::Finalize() {
std::set<Node*> const waitlist = std::move(waitlist_);
for (Node* node : waitlist) {
if (!node->IsDead()) {
+ // Remember the max node id before reduction.
+ NodeId const max_id = static_cast<NodeId>(graph()->NodeCount() - 1);
Reduction const reduction = Reduce(node);
if (reduction.Changed()) {
Node* replacement = reduction.replacement();
if (replacement != node) {
- Replace(node, replacement);
+ Replace(node, replacement, max_id);
}
}
}
@@ -2727,10 +2739,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
MapRef first_receiver_map = receiver_maps[0];
bool const is_constructor = first_receiver_map.is_constructor();
- HeapObjectRef prototype = first_receiver_map.prototype();
+ HeapObjectRef prototype = first_receiver_map.prototype(broker());
for (const MapRef& receiver_map : receiver_maps) {
- HeapObjectRef map_prototype = receiver_map.prototype();
+ HeapObjectRef map_prototype = receiver_map.prototype(broker());
// Check for consistency among the {receiver_maps}.
if (!map_prototype.equals(prototype) ||
@@ -2762,22 +2774,23 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex);
const InternalIndex kNameIndex(
JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex);
- ReadOnlyRoots roots(isolate());
- StringRef length_string = MakeRef(broker(), roots.length_string_handle());
- StringRef name_string = MakeRef(broker(), roots.name_string_handle());
-
- base::Optional<ObjectRef> length_value(
- receiver_map.GetStrongValue(kLengthIndex));
- base::Optional<ObjectRef> name_value(
- receiver_map.GetStrongValue(kNameIndex));
+ StringRef length_string = broker()->length_string();
+ StringRef name_string = broker()->name_string();
+
+ OptionalObjectRef length_value(
+ receiver_map.GetStrongValue(broker(), kLengthIndex));
+ OptionalObjectRef name_value(
+ receiver_map.GetStrongValue(broker(), kNameIndex));
if (!length_value || !name_value) {
TRACE_BROKER_MISSING(
broker(), "name or length descriptors on map " << receiver_map);
return inference.NoChange();
}
- if (!receiver_map.GetPropertyKey(kLengthIndex).equals(length_string) ||
+ if (!receiver_map.GetPropertyKey(broker(), kLengthIndex)
+ .equals(length_string) ||
!length_value->IsAccessorInfo() ||
- !receiver_map.GetPropertyKey(kNameIndex).equals(name_string) ||
+ !receiver_map.GetPropertyKey(broker(), kNameIndex)
+ .equals(name_string) ||
!name_value->IsAccessorInfo()) {
return inference.NoChange();
}
@@ -2785,10 +2798,11 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// Choose the map for the resulting JSBoundFunction (but bail out in case of a
// custom prototype).
- MapRef map = is_constructor
- ? native_context().bound_function_with_constructor_map()
- : native_context().bound_function_without_constructor_map();
- if (!map.prototype().equals(prototype)) return inference.NoChange();
+ MapRef map =
+ is_constructor
+ ? native_context().bound_function_with_constructor_map(broker())
+ : native_context().bound_function_without_constructor_map(broker());
+ if (!map.prototype(broker()).equals(prototype)) return inference.NoChange();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -2799,8 +2813,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
int const arity = n.ArgumentCount();
if (arity > 0) {
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateArray(arity, fixed_array_map)) {
return NoChange();
}
@@ -2842,7 +2856,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
HeapObjectMatcher m(target);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- context = jsgraph()->Constant(function.context());
+ context = jsgraph()->Constant(function.context(broker()), broker());
} else {
context = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
@@ -2911,12 +2925,12 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
ZoneVector<MapRef> const& object_maps = inference.GetMaps();
MapRef candidate_map = object_maps[0];
- HeapObjectRef candidate_prototype = candidate_map.prototype();
+ HeapObjectRef candidate_prototype = candidate_map.prototype(broker());
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
MapRef object_map = object_maps[i];
- HeapObjectRef map_prototype = object_map.prototype();
+ HeapObjectRef map_prototype = object_map.prototype(broker());
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
!map_prototype.equals(candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
@@ -2931,7 +2945,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
if (!inference.RelyOnMapsViaStability(dependencies())) {
return inference.NoChange();
}
- Node* value = jsgraph()->Constant(candidate_prototype);
+ Node* value = jsgraph()->Constant(candidate_prototype, broker());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -3295,7 +3309,7 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
DCHECK_NE(0, receiver_maps.size());
*kind_return = receiver_maps[0].elements_kind();
for (const MapRef& map : receiver_maps) {
- if (!map.supports_fast_array_iteration() ||
+ if (!map.supports_fast_array_iteration(broker) ||
!UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
return false;
}
@@ -3309,7 +3323,7 @@ bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
for (const MapRef& map : receiver_maps) {
- if (!map.supports_fast_array_resize()) return false;
+ if (!map.supports_fast_array_resize(broker)) return false;
// TODO(turbofan): We should also handle fast holey double elements once
// we got the hole NaN mess sorted out in TurboFan/V8.
if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS && !builtin_is_push) {
@@ -3552,12 +3566,14 @@ bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
return false;
}
+ wasm::ValueType externRefNonNull = wasm::kWasmExternRef.AsNonNull();
for (auto type : wasm_signature->all()) {
#if defined(V8_TARGET_ARCH_32_BIT)
if (type == wasm::kWasmI64) return false;
#endif
if (type != wasm::kWasmI32 && type != wasm::kWasmI64 &&
- type != wasm::kWasmF32 && type != wasm::kWasmF64) {
+ type != wasm::kWasmF32 && type != wasm::kWasmF64 &&
+ type != wasm::kWasmExternRef && type != externRefNonNull) {
return false;
}
}
@@ -3588,8 +3604,21 @@ Reduction JSCallReducer::ReduceCallWasmFunction(
has_wasm_calls_ = true;
const wasm::WasmModule* wasm_module = shared.wasm_module();
- const Operator* op =
- javascript()->CallWasm(wasm_module, wasm_signature, p.feedback());
+ if (wasm_module_for_inlining_ == nullptr) {
+ wasm_module_for_inlining_ = wasm_module;
+ }
+
+ wasm::NativeModule* native_module = nullptr;
+ if (shared.object()->HasWasmExportedFunctionData()) {
+ native_module = shared.object()
+ ->wasm_exported_function_data()
+ .instance()
+ .module_object()
+ .native_module();
+ }
+ const Operator* op = javascript()->CallWasm(wasm_module, wasm_signature,
+ shared.wasm_function_index(),
+ native_module, p.feedback());
// Remove additional inputs
size_t actual_arity = n.ArgumentCount();
@@ -3633,16 +3662,16 @@ Reduction JSCallReducer::ReduceCallWasmFunction(
// represents the set of "optimizable" function overloads.
FastApiCallFunctionVector CanOptimizeFastCall(
- Zone* zone, const FunctionTemplateInfoRef& function_template_info,
- size_t argc) {
+ JSHeapBroker* broker, Zone* zone,
+ const FunctionTemplateInfoRef& function_template_info, size_t argc) {
FastApiCallFunctionVector result(zone);
if (!v8_flags.turbo_fast_api_calls) return result;
static constexpr int kReceiver = 1;
- ZoneVector<Address> functions = function_template_info.c_functions();
+ ZoneVector<Address> functions = function_template_info.c_functions(broker);
ZoneVector<const CFunctionInfo*> signatures =
- function_template_info.c_signatures();
+ function_template_info.c_signatures(broker);
const size_t overloads_count = signatures.size();
// Calculates the length of the longest type list of the entries in
@@ -3681,8 +3710,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
CallParameters const& p = n.Parameters();
int const argc = p.arity_without_implicit_args();
Node* target = n.target();
- Node* global_proxy =
- jsgraph()->Constant(native_context().global_proxy_object());
+ Node* global_proxy = jsgraph()->Constant(
+ native_context().global_proxy_object(broker()), broker());
Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
? global_proxy
: n.receiver();
@@ -3692,7 +3721,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Control control = n.control();
FrameState frame_state = n.frame_state();
- if (!shared.function_template_info().has_value()) {
+ if (!shared.function_template_info(broker()).has_value()) {
TRACE_BROKER_MISSING(
broker(), "FunctionTemplateInfo for function with SFI " << shared);
return NoChange();
@@ -3700,10 +3729,10 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// See if we can optimize this API call to {shared}.
FunctionTemplateInfoRef function_template_info(
- shared.function_template_info().value());
+ shared.function_template_info(broker()).value());
if (function_template_info.accept_any_receiver() &&
- function_template_info.is_signature_undefined()) {
+ function_template_info.is_signature_undefined(broker())) {
// We might be able to
// optimize the API call depending on the {function_template_info}.
// If the API function accepts any kind of {receiver}, we only need to
@@ -3731,7 +3760,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// See if we can constant-fold the compatible receiver checks.
HolderLookupResult api_holder =
- function_template_info.LookupHolderOfExpectedType(first_receiver_map);
+ function_template_info.LookupHolderOfExpectedType(broker(),
+ first_receiver_map);
if (api_holder.lookup == CallOptimization::kHolderNotFound) {
return inference.NoChange();
}
@@ -3762,7 +3792,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
for (size_t i = 1; i < receiver_maps.size(); ++i) {
MapRef receiver_map = receiver_maps[i];
HolderLookupResult holder_i =
- function_template_info.LookupHolderOfExpectedType(receiver_map);
+ function_template_info.LookupHolderOfExpectedType(broker(),
+ receiver_map);
if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
DCHECK(holder_i.lookup == CallOptimization::kHolderFound ||
@@ -3793,7 +3824,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// Determine the appropriate holder for the {lookup}.
holder = api_holder.lookup == CallOptimization::kHolderFound
- ? jsgraph()->Constant(*api_holder.holder)
+ ? jsgraph()->Constant(*api_holder.holder, broker())
: receiver;
} else {
// We don't have enough information to eliminate the access check
@@ -3803,7 +3834,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Builtin builtin_name;
if (function_template_info.accept_any_receiver()) {
builtin_name = Builtin::kCallFunctionTemplate_CheckCompatibleReceiver;
- } else if (function_template_info.is_signature_undefined()) {
+ } else if (function_template_info.is_signature_undefined(broker())) {
builtin_name = Builtin::kCallFunctionTemplate_CheckAccess;
} else {
builtin_name =
@@ -3824,7 +3855,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
+ node->ReplaceInput(1,
+ jsgraph()->Constant(function_template_info, broker()));
node->InsertInput(graph()->zone(), 2,
jsgraph()->Constant(JSParameterCount(argc)));
node->ReplaceInput(3, receiver); // Update receiver input.
@@ -3837,7 +3869,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
// this and lower it during JSGenericLowering, and unify this with the
// JSNativeContextSpecialization::InlineApiCall method a bit.
- if (!function_template_info.call_code().has_value()) {
+ if (!function_template_info.call_code(broker()).has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
return NoChange();
@@ -3845,8 +3877,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// Handles overloaded functions.
- FastApiCallFunctionVector c_candidate_functions =
- CanOptimizeFastCall(graph()->zone(), function_template_info, argc);
+ FastApiCallFunctionVector c_candidate_functions = CanOptimizeFastCall(
+ broker(), graph()->zone(), function_template_info, argc);
DCHECK_LE(c_candidate_functions.size(), 2);
if (!c_candidate_functions.empty()) {
@@ -3861,7 +3893,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// Slow call
- CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
+ CallHandlerInfoRef call_handler_info =
+ *function_template_info.call_code(broker());
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor =
@@ -3879,8 +3912,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference));
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->InsertInput(graph()->zone(), 3,
- jsgraph()->Constant(call_handler_info.data()));
+ node->InsertInput(
+ graph()->zone(), 3,
+ jsgraph()->Constant(call_handler_info.data(broker()), broker()));
node->InsertInput(graph()->zone(), 4, holder);
node->ReplaceInput(5, receiver); // Update receiver input.
// 6 + argc is context input.
@@ -4255,10 +4289,11 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
if (feedback.IsInsufficient()) return NoChange();
AllocationSiteRef site = feedback.AsLiteral().value();
- if (!site.boilerplate().has_value()) return NoChange();
+ if (!site.boilerplate(broker()).has_value()) return NoChange();
- JSArrayRef boilerplate_array = site.boilerplate()->AsJSArray();
- int const array_length = boilerplate_array.GetBoilerplateLength().AsSmi();
+ JSArrayRef boilerplate_array = site.boilerplate(broker())->AsJSArray();
+ int const array_length =
+ boilerplate_array.GetBoilerplateLength(broker()).AsSmi();
// We'll replace the arguments_list input with {array_length} element loads.
new_argument_count = argument_count - 1 + array_length;
@@ -4271,8 +4306,8 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
}
// Determine the array's map.
- MapRef array_map = boilerplate_array.map();
- if (!array_map.supports_fast_array_iteration()) {
+ MapRef array_map = boilerplate_array.map(broker());
+ if (!array_map.supports_fast_array_iteration(broker())) {
return NoChange();
}
@@ -4343,8 +4378,8 @@ bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const {
// TODO(neis): Add a way to check if function template info isn't serialized
// and add a warning in such cases. Currently we can't tell if function
// template info doesn't exist or wasn't serialized.
- return function.shared().HasBuiltinId() ||
- function.shared().function_template_info().has_value();
+ return function.shared(broker()).HasBuiltinId() ||
+ function.shared(broker()).function_template_info(broker()).has_value();
}
Reduction JSCallReducer::ReduceJSCall(Node* node) {
@@ -4365,40 +4400,43 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
JSFunctionRef function = target_ref.AsJSFunction();
// Don't inline cross native context.
- if (!function.native_context().equals(native_context())) {
+ if (!function.native_context(broker()).equals(native_context())) {
return NoChange();
}
- return ReduceJSCall(node, function.shared());
+ return ReduceJSCall(node, function.shared(broker()));
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- ObjectRef bound_this = function.bound_this();
+ ObjectRef bound_this = function.bound_this(broker());
ConvertReceiverMode const convert_mode =
- bound_this.IsNullOrUndefined()
+ bound_this.IsNullOrUndefined(broker())
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
// TODO(jgruber): Inline this block below once TryGet is guaranteed to
// succeed.
- FixedArrayRef bound_arguments = function.bound_arguments();
+ FixedArrayRef bound_arguments = function.bound_arguments(broker());
const int bound_arguments_length = bound_arguments.length();
static constexpr int kInlineSize = 16; // Arbitrary.
base::SmallVector<Node*, kInlineSize> args;
for (int i = 0; i < bound_arguments_length; ++i) {
- base::Optional<ObjectRef> maybe_arg = bound_arguments.TryGet(i);
+ OptionalObjectRef maybe_arg = bound_arguments.TryGet(broker(), i);
if (!maybe_arg.has_value()) {
TRACE_BROKER_MISSING(broker(), "bound argument");
return NoChange();
}
- args.emplace_back(jsgraph()->Constant(maybe_arg.value()));
+ args.emplace_back(jsgraph()->Constant(maybe_arg.value(), broker()));
}
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(function.bound_target_function()),
+ node,
+ jsgraph()->Constant(function.bound_target_function(broker()),
+ broker()),
JSCallNode::TargetIndex());
- NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
- JSCallNode::ReceiverIndex());
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->Constant(bound_this, broker()),
+ JSCallNode::ReceiverIndex());
// Insert the [[BoundArguments]] for {node}.
for (int i = 0; i < bound_arguments_length; ++i) {
@@ -4429,10 +4467,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& params =
JSCreateClosureNode{target}.Parameters();
- return ReduceJSCall(node, params.shared_info(broker()));
+ return ReduceJSCall(node, params.shared_info());
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ OptionalSharedFunctionInfoRef shared = cell.shared_function_info(broker());
if (!shared.has_value()) {
TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
<< cell << " has no FeedbackVector");
@@ -4490,16 +4528,17 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
}
- base::Optional<HeapObjectRef> feedback_target;
+ OptionalHeapObjectRef feedback_target;
if (p.feedback_relation() == CallFeedbackRelation::kTarget) {
feedback_target = feedback.AsCall().target();
} else {
DCHECK_EQ(p.feedback_relation(), CallFeedbackRelation::kReceiver);
- feedback_target = native_context().function_prototype_apply();
+ feedback_target = native_context().function_prototype_apply(broker());
}
- if (feedback_target.has_value() && feedback_target->map().is_callable()) {
- Node* target_function = jsgraph()->Constant(*feedback_target);
+ if (feedback_target.has_value() &&
+ feedback_target->map(broker()).is_callable()) {
+ Node* target_function = jsgraph()->Constant(*feedback_target, broker());
// Check that the {target} is still the {target_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -4517,7 +4556,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell = feedback_target.value().AsFeedbackCell();
// TODO(neis): This check seems unnecessary.
- if (feedback_cell.feedback_vector().has_value()) {
+ if (feedback_cell.feedback_vector(broker()).has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
Node* target_closure = effect =
@@ -4645,8 +4684,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtin::kArrayBufferIsView:
return ReduceArrayBufferIsView(node);
case Builtin::kDataViewPrototypeGetByteLength:
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
return ReduceArrayBufferViewByteLengthAccessor(node, JS_DATA_VIEW_TYPE);
case Builtin::kDataViewPrototypeGetByteOffset:
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
AccessBuilder::ForJSArrayBufferViewByteOffset(), builtin);
@@ -4890,6 +4931,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDateNow(node);
case Builtin::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtin::kBigIntConstructor:
+ return ReduceBigIntConstructor(node);
case Builtin::kBigIntAsIntN:
case Builtin::kBigIntAsUintN:
return ReduceBigIntAsN(node, builtin);
@@ -4897,7 +4940,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
break;
}
- if (shared.function_template_info().has_value()) {
+ if (shared.function_template_info(broker()).has_value()) {
return ReduceCallApiFunction(node, shared);
}
@@ -4964,21 +5007,21 @@ namespace {
// skipping the instance type check.
bool TargetIsClassConstructor(Node* node, JSHeapBroker* broker) {
Node* target = NodeProperties::GetValueInput(node, 0);
- base::Optional<SharedFunctionInfoRef> shared;
+ OptionalSharedFunctionInfoRef shared;
HeapObjectMatcher m(target);
if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker);
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
- shared = function.shared();
+ shared = function.shared(broker);
}
} else if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& ccp =
JSCreateClosureNode{target}.Parameters();
- shared = ccp.shared_info(broker);
+ shared = ccp.shared_info();
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker, FeedbackCellOf(target->op()));
- shared = cell.shared_function_info();
+ shared = cell.shared_function_info(broker);
}
if (shared.has_value() && IsClassConstructor(shared->kind())) return true;
@@ -5044,15 +5087,15 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
}
- base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target();
+ OptionalHeapObjectRef feedback_target = feedback.AsCall().target();
if (feedback_target.has_value() && feedback_target->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
// for the resulting arrays. This has to be kept in sync with the
// implementation in Ignition.
- Node* array_function =
- jsgraph()->Constant(native_context().array_function());
+ Node* array_function = jsgraph()->Constant(
+ native_context().array_function(broker()), broker());
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -5072,8 +5115,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return Changed(node);
} else if (feedback_target.has_value() &&
!HeapObjectMatcher(new_target).HasResolvedValue() &&
- feedback_target->map().is_constructor()) {
- Node* new_target_feedback = jsgraph()->Constant(*feedback_target);
+ feedback_target->map(broker()).is_constructor()) {
+ Node* new_target_feedback =
+ jsgraph()->Constant(*feedback_target, broker());
// Check that the {new_target} is still the {new_target_feedback}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), new_target,
@@ -5100,7 +5144,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
HeapObjectRef target_ref = m.Ref(broker());
// Raise a TypeError if the {target} is not a constructor.
- if (!target_ref.map().is_constructor()) {
+ if (!target_ref.map(broker()).is_constructor()) {
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(node,
javascript()->CallRuntime(
@@ -5115,11 +5159,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// If this state changes during background compilation, the compilation
// job will be aborted from the main thread (see
// Debug::PrepareFunctionForDebugExecution()).
- SharedFunctionInfoRef sfi = function.shared();
+ SharedFunctionInfoRef sfi = function.shared(broker());
if (sfi.HasBreakInfo()) return NoChange();
// Don't inline cross native context.
- if (!function.native_context().equals(native_context())) {
+ if (!function.native_context(broker()).equals(native_context())) {
return NoChange();
}
@@ -5165,14 +5209,15 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
case Builtin::kPromiseConstructor:
return ReducePromiseConstructor(node);
case Builtin::kTypedArrayConstructor:
- return ReduceTypedArrayConstructor(node, function.shared());
+ return ReduceTypedArrayConstructor(node, function.shared(broker()));
default:
break;
}
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- JSReceiverRef bound_target_function = function.bound_target_function();
- FixedArrayRef bound_arguments = function.bound_arguments();
+ JSReceiverRef bound_target_function =
+ function.bound_target_function(broker());
+ FixedArrayRef bound_arguments = function.bound_arguments(broker());
const int bound_arguments_length = bound_arguments.length();
// TODO(jgruber): Inline this block below once TryGet is guaranteed to
@@ -5180,31 +5225,33 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
static constexpr int kInlineSize = 16; // Arbitrary.
base::SmallVector<Node*, kInlineSize> args;
for (int i = 0; i < bound_arguments_length; ++i) {
- base::Optional<ObjectRef> maybe_arg = bound_arguments.TryGet(i);
+ OptionalObjectRef maybe_arg = bound_arguments.TryGet(broker(), i);
if (!maybe_arg.has_value()) {
TRACE_BROKER_MISSING(broker(), "bound argument");
return NoChange();
}
- args.emplace_back(jsgraph()->Constant(maybe_arg.value()));
+ args.emplace_back(jsgraph()->Constant(maybe_arg.value(), broker()));
}
// Patch {node} to use [[BoundTargetFunction]].
node->ReplaceInput(n.TargetIndex(),
- jsgraph()->Constant(bound_target_function));
+ jsgraph()->Constant(bound_target_function, broker()));
// Patch {node} to use [[BoundTargetFunction]]
// as new.target if {new_target} equals {target}.
if (target == new_target) {
- node->ReplaceInput(n.NewTargetIndex(),
- jsgraph()->Constant(bound_target_function));
+ node->ReplaceInput(
+ n.NewTargetIndex(),
+ jsgraph()->Constant(bound_target_function, broker()));
} else {
node->ReplaceInput(
n.NewTargetIndex(),
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->ReferenceEqual(),
- target, new_target),
- jsgraph()->Constant(bound_target_function),
- new_target));
+ graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(), target,
+ new_target),
+ jsgraph()->Constant(bound_target_function, broker()),
+ new_target));
}
// Insert the [[BoundArguments]] for {node}.
@@ -5578,7 +5625,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeAt(Node* node) {
ZoneVector<const MapRef*> maps(broker()->zone());
bool needs_fallback_builtin_call = false;
for (const MapRef& map : inference.GetMaps()) {
- if (map.supports_fast_array_iteration()) {
+ if (map.supports_fast_array_iteration(broker())) {
maps.push_back(&map);
} else {
needs_fallback_builtin_call = true;
@@ -5968,8 +6015,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
Builtins::name(builtin), node->op()->properties(),
CallDescriptor::kNeedsFrameState);
- Node* stub_code = jsgraph()->CEntryStubConstant(
- 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
+ Node* stub_code =
+ jsgraph()->CEntryStubConstant(1, ArgvMode::kStack, true);
Address builtin_entry = Builtins::CppEntryOf(builtin);
Node* entry = jsgraph()->ExternalConstant(
ExternalReference::Create(builtin_entry));
@@ -6062,7 +6109,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
// `slice.call(arguments)`, for example jQuery makes heavy use of that.
bool can_be_holey = false;
for (const MapRef& receiver_map : receiver_maps) {
- if (!receiver_map.supports_fast_array_iteration()) {
+ if (!receiver_map.supports_fast_array_iteration(broker())) {
return inference.NoChange();
}
if (IsHoleyElementsKind(receiver_map.elements_kind())) {
@@ -6675,7 +6722,7 @@ Reduction JSCallReducer::ReduceStringPrototypeLocaleCompare(Node* node) {
if (!ref.IsString()) return NoChange();
StringRef sref = ref.AsString();
if (base::Optional<Handle<String>> maybe_locales =
- sref.ObjectIfContentAccessible()) {
+ sref.ObjectIfContentAccessible(broker())) {
locales = *maybe_locales;
} else {
return NoChange();
@@ -6830,7 +6877,7 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) {
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
- PromiseBuiltinReducerAssembler a(this, node, broker());
+ PromiseBuiltinReducerAssembler a(this, node);
// We only inline when we have the executor.
if (a.ConstructArity() < 1) return NoChange();
@@ -6850,8 +6897,8 @@ bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
// have the initial Promise.prototype as their [[Prototype]].
for (const MapRef& receiver_map : receiver_maps) {
if (!receiver_map.IsJSPromiseMap()) return false;
- HeapObjectRef prototype = receiver_map.prototype();
- if (!prototype.equals(native_context().promise_prototype())) {
+ HeapObjectRef prototype = receiver_map.prototype(broker());
+ if (!prototype.equals(native_context().promise_prototype(broker()))) {
return false;
}
}
@@ -6883,7 +6930,8 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
// Massage the {node} to call "then" instead by first removing all inputs
// following the onRejected parameter, and then filling up the parameters
// to two inputs from the left with undefined.
- Node* target = jsgraph()->Constant(native_context().promise_then());
+ Node* target =
+ jsgraph()->Constant(native_context().promise_then(broker()), broker());
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceEffectInput(node, effect);
for (; arity > 1; --arity) node->RemoveInput(3);
@@ -6905,7 +6953,7 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
isolate()->factory()->many_closures_cell();
Callable const callable =
Builtins::CallableFor(isolate(), shared.builtin_id());
- CodeTRef code = MakeRef(broker(), *callable.code());
+ CodeRef code = MakeRef(broker(), *callable.code());
return graph()->NewNode(javascript()->CreateClosure(shared, code),
jsgraph()->HeapConstant(feedback_cell), context,
effect, control);
@@ -6951,14 +6999,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
Node* catch_true;
Node* then_true;
{
- Node* context = jsgraph()->Constant(native_context());
- Node* constructor =
- jsgraph()->Constant(native_context().promise_function());
+ Node* context = jsgraph()->Constant(native_context(), broker());
+ Node* constructor = jsgraph()->Constant(
+ native_context().promise_function(broker()), broker());
// Allocate shared context for the closures below.
context = etrue = graph()->NewNode(
javascript()->CreateFunctionContext(
- native_context().scope_info(),
+ native_context().scope_info(broker()),
int{PromiseBuiltins::kPromiseFinallyContextLength} -
Context::MIN_CONTEXT_SLOTS,
FUNCTION_SCOPE),
@@ -7014,7 +7062,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Massage the {node} to call "then" instead by first removing all inputs
// following the onFinally parameter, and then replacing the only parameter
// input with the {on_finally} value.
- Node* target = jsgraph()->Constant(native_context().promise_then());
+ Node* target =
+ jsgraph()->Constant(native_context().promise_then(broker()), broker());
NodeProperties::ReplaceValueInput(node, target, n.TargetIndex());
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceControlInput(node, control);
@@ -7087,7 +7136,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// into the graph such that subsequent passes can use the
// information for further optimizations.
MapRef promise_map =
- native_context().promise_function().initial_map(dependencies());
+ native_context().promise_function(broker()).initial_map(broker());
effect = graph()->NewNode(
simplified()->MapGuard(ZoneHandleSet<Map>(promise_map.object())), promise,
effect, control);
@@ -7201,6 +7250,10 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
simplified()->NumberSubtract(), receiver_elements_kind,
jsgraph()->Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
+ // To be converted into a switch by the ControlFlowOptimizer, the below
+ // code requires that TYPED_ARRAYS and RAB_GSAB_TYPED_ARRAYS are consecutive.
+ static_assert(LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1 ==
+ FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
do { \
Node* check = graph()->NewNode( \
@@ -7209,12 +7262,13 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \
control = graph()->NewNode(common()->Branch(), check, control); \
values.push_back(jsgraph()->Constant( \
- broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS))); \
+ broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS), broker())); \
effects.push_back(effect); \
controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \
control = graph()->NewNode(common()->IfFalse(), control); \
} while (false);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
values.push_back(jsgraph()->UndefinedConstant());
@@ -7236,6 +7290,7 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
Reduction JSCallReducer::ReduceArrayBufferViewByteLengthAccessor(
Node* node, InstanceType instance_type) {
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
DCHECK(instance_type == JS_TYPED_ARRAY_TYPE ||
instance_type == JS_DATA_VIEW_TYPE);
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -7250,9 +7305,7 @@ Reduction JSCallReducer::ReduceArrayBufferViewByteLengthAccessor(
std::set<ElementsKind> elements_kinds;
bool maybe_rab_gsab = false;
- if (instance_type == JS_DATA_VIEW_TYPE) {
- maybe_rab_gsab = true;
- } else {
+ if (instance_type == JS_TYPED_ARRAY_TYPE) {
for (const auto& map : inference.GetMaps()) {
ElementsKind kind = map.elements_kind();
elements_kinds.insert(kind);
@@ -7328,9 +7381,9 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeLength(Node* node) {
return inference.NoChange();
}
- inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
- control,
- CallParametersOf(node->op()).feedback());
+ if (!inference.RelyOnMapsViaStability(dependencies())) {
+ return inference.NoChange();
+ }
JSCallReducerAssembler a(this, node);
TNode<JSTypedArray> typed_array =
@@ -7851,6 +7904,7 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) {
Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access,
Builtin builtin) {
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Effect effect{NodeProperties::GetEffectInput(node)};
Control control{NodeProperties::GetControlInput(node)};
@@ -7933,6 +7987,7 @@ uint32_t ExternalArrayElementSize(const ExternalArrayType element_type) {
Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
ExternalArrayType element_type) {
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
JSCallNode n(node);
CallParameters const& p = n.Parameters();
size_t const element_size = ExternalArrayElementSize(element_type);
@@ -7965,6 +8020,7 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
// We only deal with DataViews here whose [[ByteLength]] is at least
// {element_size}, as for all other DataViews it'll be out-of-bounds.
JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
+
size_t length = dataview.byte_length();
if (length < element_size) return NoChange();
@@ -7973,21 +8029,11 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
offset = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
offset, byte_length, effect, control);
} else {
- Node* byte_length;
- if (!v8_flags.harmony_rab_gsab) {
- // We only deal with DataViews here that have Smi [[ByteLength]]s.
- byte_length = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, effect, control);
- } else {
- JSCallReducerAssembler a(this, node);
- byte_length = a.ArrayBufferViewByteLength(
- TNode<JSArrayBufferView>::UncheckedCast(receiver), JS_DATA_VIEW_TYPE,
- {}, a.ContextInput());
- std::tie(effect, control) = ReleaseEffectAndControlFromAssembler(&a);
- }
-
+ // We only deal with DataViews here that have Smi [[ByteLength]]s.
+ Node* byte_length = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, effect, control);
if (element_size > 1) {
// For non-byte accesses we also need to check that the {offset}
// plus the {element_size}-1 fits within the given {byte_length}.
@@ -8170,6 +8216,34 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) {
FrameState frame_state = n.frame_state();
Node* object = n.Argument(0);
Node* radix = n.ArgumentOrUndefined(1, jsgraph());
+
+ // Try constant-folding when input is a string constant.
+ HeapObjectMatcher object_matcher(object);
+ HeapObjectMatcher radix_object_matcher(radix);
+ NumberMatcher radix_number_matcher(radix);
+ if (object_matcher.HasResolvedValue() &&
+ object_matcher.Ref(broker()).IsString() &&
+ (radix_object_matcher.Is(factory()->undefined_value()) ||
+ radix_number_matcher.HasResolvedValue())) {
+ StringRef input_value = object_matcher.Ref(broker()).AsString();
+ // {undefined} is treated same as 0.
+ int radix_value = radix_object_matcher.Is(factory()->undefined_value())
+ ? 0
+ : DoubleToInt32(radix_number_matcher.ResolvedValue());
+ if (radix_value != 0 && (radix_value < 2 || radix_value > 36)) {
+ Node* value = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ base::Optional<double> number = input_value.ToInt(broker(), radix_value);
+ if (number.has_value()) {
+ Node* result = graph()->NewNode(common()->NumberConstant(number.value()));
+ ReplaceWithValue(node, result);
+ return Replace(result);
+ }
+ }
+
node->ReplaceInput(0, object);
node->ReplaceInput(1, radix);
node->ReplaceInput(2, context);
@@ -8199,20 +8273,18 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// check as well as the lowered builtin call rely on a known location of the
// lastIndex field.
MapRef regexp_initial_map =
- native_context().regexp_function().initial_map(dependencies());
+ native_context().regexp_function(broker()).initial_map(broker());
MapInference inference(broker(), regexp, effect);
if (!inference.Is(regexp_initial_map)) return inference.NoChange();
ZoneVector<MapRef> const& regexp_maps = inference.GetMaps();
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
for (const MapRef& map : regexp_maps) {
access_infos.push_back(broker()->GetPropertyAccessInfo(
- map, MakeRef(broker(), isolate()->factory()->exec_string()),
- AccessMode::kLoad, dependencies()));
+ map, broker()->exec_string(), AccessMode::kLoad));
}
PropertyAccessInfo ai_exec =
@@ -8222,14 +8294,15 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
if (!ai_exec.IsFastDataConstant()) return inference.NoChange();
// Do not reduce if the exec method is not on the prototype chain.
- base::Optional<JSObjectRef> holder = ai_exec.holder();
+ OptionalJSObjectRef holder = ai_exec.holder();
if (!holder.has_value()) return inference.NoChange();
// Bail out if the exec method is not the original one.
- base::Optional<ObjectRef> constant = holder->GetOwnFastDataProperty(
- ai_exec.field_representation(), ai_exec.field_index(), dependencies());
+ OptionalObjectRef constant =
+ holder->GetOwnFastDataProperty(broker(), ai_exec.field_representation(),
+ ai_exec.field_index(), dependencies());
if (!constant.has_value() ||
- !constant->equals(native_context().regexp_exec_function())) {
+ !constant->equals(native_context().regexp_exec_function(broker()))) {
return inference.NoChange();
}
@@ -8282,7 +8355,7 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
// Create the artificial frame state in the middle of the Number constructor.
SharedFunctionInfoRef shared_info =
- native_context().number_function().shared();
+ native_context().number_function(broker()).shared(broker());
Node* stack_parameters[] = {receiver};
int stack_parameter_count = arraysize(stack_parameters);
Node* continuation_frame_state =
@@ -8298,6 +8371,39 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
+// ES section #sec-bigint-constructor
+Reduction JSCallReducer::ReduceBigIntConstructor(Node* node) {
+ if (!jsgraph()->machine()->Is64()) return NoChange();
+
+ JSCallNode n(node);
+ if (n.ArgumentCount() < 1) {
+ return NoChange();
+ }
+
+ Node* target = n.target();
+ Node* receiver = n.receiver();
+ Node* value = n.Argument(0);
+ Node* context = n.context();
+ FrameState frame_state = n.frame_state();
+
+ // Create the artificial frame state in the middle of the BigInt constructor.
+ SharedFunctionInfoRef shared_info =
+ native_context().bigint_function(broker()).shared(broker());
+ Node* stack_parameters[] = {receiver};
+ int stack_parameter_count = arraysize(stack_parameters);
+ Node* continuation_frame_state =
+ CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared_info, Builtin::kGenericLazyDeoptContinuation,
+ target, context, stack_parameters, stack_parameter_count, frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // Convert the {value} to a BigInt.
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToBigIntConvertNumber());
+ NodeProperties::ReplaceFrameStateInput(node, continuation_frame_state);
+ return Changed(node);
+}
+
Reduction JSCallReducer::ReduceBigIntAsN(Node* node, Builtin builtin) {
DCHECK(builtin == Builtin::kBigIntAsIntN ||
builtin == Builtin::kBigIntAsUintN);
@@ -8372,11 +8478,11 @@ base::Optional<Reduction> JSCallReducer::TryReduceJSCallMathMinMaxWithArrayLike(
JSFunctionRef function = target_ref.AsJSFunction();
// Don't inline cross native context.
- if (!function.native_context().equals(native_context())) {
+ if (!function.native_context(broker()).equals(native_context())) {
return base::nullopt;
}
- SharedFunctionInfoRef shared = function.shared();
+ SharedFunctionInfoRef shared = function.shared(broker());
Builtin builtin =
shared.HasBuiltinId() ? shared.builtin_id() : Builtin::kNoBuiltinId;
if (builtin == Builtin::kMathMax || builtin == Builtin::kMathMin) {
@@ -8396,15 +8502,16 @@ base::Optional<Reduction> JSCallReducer::TryReduceJSCallMathMinMaxWithArrayLike(
if (feedback.IsInsufficient()) {
return base::nullopt;
}
- base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target();
- if (feedback_target.has_value() && feedback_target->map().is_callable()) {
- Node* target_function = jsgraph()->Constant(*feedback_target);
+ OptionalHeapObjectRef feedback_target = feedback.AsCall().target();
+ if (feedback_target.has_value() &&
+ feedback_target->map(broker()).is_callable()) {
+ Node* target_function = jsgraph()->Constant(*feedback_target, broker());
ObjectRef target_ref = feedback_target.value();
if (!target_ref.IsJSFunction()) {
return base::nullopt;
}
JSFunctionRef function = target_ref.AsJSFunction();
- SharedFunctionInfoRef shared = function.shared();
+ SharedFunctionInfoRef shared = function.shared(broker());
Builtin builtin =
shared.HasBuiltinId() ? shared.builtin_id() : Builtin::kNoBuiltinId;
if (builtin == Builtin::kMathMax || builtin == Builtin::kMathMin) {
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 072024dc69..58af159fd1 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -72,8 +72,12 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
JSGraph* JSGraphForGraphAssembler() const { return jsgraph(); }
bool has_wasm_calls() const { return has_wasm_calls_; }
+ const wasm::WasmModule* wasm_module_for_inlining() const {
+ return wasm_module_for_inlining_;
+ }
CompilationDependencies* dependencies() const;
+ JSHeapBroker* broker() const { return broker_; }
private:
Reduction ReduceBooleanConstructor(Node* node);
@@ -231,6 +235,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceBigIntConstructor(Node* node);
Reduction ReduceBigIntAsN(Node* node, Builtin builtin);
base::Optional<Reduction> TryReduceJSCallMathMinMaxWithArrayLike(Node* node);
@@ -267,7 +272,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* broker() const { return broker_; }
Zone* temp_zone() const { return temp_zone_; }
Isolate* isolate() const;
Factory* factory() const;
@@ -287,6 +291,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
std::unordered_set<Node*> generated_calls_with_array_like_or_spread_;
bool has_wasm_calls_ = false;
+ const wasm::WasmModule* wasm_module_for_inlining_ = nullptr;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 3e1ae072a5..4cae6927fb 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -39,7 +39,7 @@ Reduction JSContextSpecialization::ReduceParameter(Node* node) {
// Constant-fold the function parameter {node}.
Handle<JSFunction> function;
if (closure().ToHandle(&function)) {
- Node* value = jsgraph()->Constant(MakeRef(broker_, function));
+ Node* value = jsgraph()->Constant(MakeRef(broker_, function), broker());
return Replace(value);
}
}
@@ -97,9 +97,9 @@ bool IsContextParameter(Node* node) {
// context (which we want to read from or store to), try to return a
// specialization context. If successful, update {distance} to whatever
// distance remains from the specialization context.
-base::Optional<ContextRef> GetSpecializationContext(
- JSHeapBroker* broker, Node* node, size_t* distance,
- Maybe<OuterContext> maybe_outer) {
+OptionalContextRef GetSpecializationContext(JSHeapBroker* broker, Node* node,
+ size_t* distance,
+ Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
// TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
@@ -127,7 +127,7 @@ base::Optional<ContextRef> GetSpecializationContext(
default:
break;
}
- return base::Optional<ContextRef>();
+ return OptionalContextRef();
}
} // anonymous namespace
@@ -141,7 +141,7 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// First walk up the context chain in the graph as far as possible.
Node* context = NodeProperties::GetOuterContext(node, &depth);
- base::Optional<ContextRef> maybe_concrete =
+ OptionalContextRef maybe_concrete =
GetSpecializationContext(broker(), context, &depth, outer());
if (!maybe_concrete.has_value()) {
// We do not have a concrete context object, so we can only partially reduce
@@ -151,27 +151,30 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete = concrete.previous(&depth);
+ concrete = concrete.previous(broker(), &depth);
if (depth > 0) {
TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
- return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete, broker()),
+ depth);
}
if (!access.immutable()) {
// We found the requested context object but since the context slot is
// mutable we can only partially reduce the load.
- return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete, broker()),
+ depth);
}
// This will hold the final value, if we can figure it out.
- base::Optional<ObjectRef> maybe_value;
- maybe_value = concrete.get(static_cast<int>(access.index()));
+ OptionalObjectRef maybe_value;
+ maybe_value = concrete.get(broker(), static_cast<int>(access.index()));
if (!maybe_value.has_value()) {
TRACE_BROKER_MISSING(broker(), "slot value " << access.index()
<< " for context "
<< concrete);
- return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete, broker()),
+ depth);
}
if (!maybe_value->IsSmi()) {
@@ -180,15 +183,17 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// We must be conservative and check if the value in the slot is currently
// the hole or undefined. Only if it is neither of these, can we be sure
// that it won't change anymore.
- OddballType oddball_type = maybe_value->AsHeapObject().map().oddball_type();
+ OddballType oddball_type =
+ maybe_value->AsHeapObject().map(broker()).oddball_type(broker());
if (oddball_type == OddballType::kUndefined ||
oddball_type == OddballType::kHole) {
- return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
+ return SimplifyJSLoadContext(
+ node, jsgraph()->Constant(concrete, broker()), depth);
}
}
// Success. The context load can be replaced with the constant.
- Node* constant = jsgraph_->Constant(*maybe_value);
+ Node* constant = jsgraph_->Constant(*maybe_value, broker());
ReplaceWithValue(node, constant);
return Replace(constant);
}
@@ -204,7 +209,7 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// or hit a node that does not have a CreateXYZContext operator.
Node* context = NodeProperties::GetOuterContext(node, &depth);
- base::Optional<ContextRef> maybe_concrete =
+ OptionalContextRef maybe_concrete =
GetSpecializationContext(broker(), context, &depth, outer());
if (!maybe_concrete.has_value()) {
// We do not have a concrete context object, so we can only partially reduce
@@ -214,24 +219,26 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete = concrete.previous(&depth);
+ concrete = concrete.previous(broker(), &depth);
if (depth > 0) {
TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
- return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete, broker()),
+ depth);
}
- return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete, broker()),
+ depth);
}
-base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
- Maybe<OuterContext> maybe_context) {
+OptionalContextRef GetModuleContext(JSHeapBroker* broker, Node* node,
+ Maybe<OuterContext> maybe_context) {
size_t depth = std::numeric_limits<size_t>::max();
Node* context = NodeProperties::GetOuterContext(node, &depth);
- auto find_context = [](ContextRef c) {
- while (c.map().instance_type() != MODULE_CONTEXT_TYPE) {
+ auto find_context = [broker](ContextRef c) {
+ while (c.map(broker).instance_type() != MODULE_CONTEXT_TYPE) {
size_t depth = 1;
- c = c.previous(&depth);
+ c = c.previous(broker, &depth);
CHECK_EQ(depth, 0);
}
return c;
@@ -265,28 +272,27 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
break;
}
- return base::Optional<ContextRef>();
+ return OptionalContextRef();
}
Reduction JSContextSpecialization::ReduceJSGetImportMeta(Node* node) {
- base::Optional<ContextRef> maybe_context =
- GetModuleContext(broker(), node, outer());
+ OptionalContextRef maybe_context = GetModuleContext(broker(), node, outer());
if (!maybe_context.has_value()) return NoChange();
ContextRef context = maybe_context.value();
- base::Optional<ObjectRef> module = context.get(Context::EXTENSION_INDEX);
+ OptionalObjectRef module = context.get(broker(), Context::EXTENSION_INDEX);
if (!module.has_value()) return NoChange();
- base::Optional<ObjectRef> import_meta =
- module->AsSourceTextModule().import_meta();
+ OptionalObjectRef import_meta =
+ module->AsSourceTextModule().import_meta(broker());
if (!import_meta.has_value()) return NoChange();
if (!import_meta->IsJSObject()) {
- DCHECK(import_meta->IsTheHole());
+ DCHECK(import_meta->IsTheHole(broker()));
// The import.meta object has not yet been created. Let JSGenericLowering
// replace the operator with a runtime call.
return NoChange();
}
- Node* import_meta_const = jsgraph()->Constant(*import_meta);
+ Node* import_meta_const = jsgraph()->Constant(*import_meta, broker());
ReplaceWithValue(node, import_meta_const);
return Changed(import_meta_const);
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 85e9754337..8bb11635fd 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -111,8 +111,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
- base::Optional<MapRef> initial_map =
- NodeProperties::GetJSCreateMap(broker(), node);
+ OptionalMapRef initial_map = NodeProperties::GetJSCreateMap(broker(), node);
if (!initial_map.has_value()) return NoChange();
JSFunctionRef original_constructor =
@@ -123,7 +122,7 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
// Emit code to allocate the JSObject instance for the
// {original_constructor}.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(slack_tracking_prediction.instance_size());
a.Store(AccessBuilder::ForMap(), *initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -172,10 +171,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
has_aliased_arguments
- ? native_context().fast_aliased_arguments_map()
- : native_context().sloppy_arguments_map());
+ ? native_context().fast_aliased_arguments_map(broker())
+ : native_context().sloppy_arguments_map(broker()),
+ broker());
// Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize);
a.Allocate(JSSloppyArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
@@ -199,10 +199,10 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
shared.internal_formal_parameter_count_without_receiver()),
arguments_length, effect);
// Load the arguments object map.
- Node* const arguments_map =
- jsgraph()->Constant(native_context().strict_arguments_map());
+ Node* const arguments_map = jsgraph()->Constant(
+ native_context().strict_arguments_map(broker()), broker());
// Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(JSStrictArgumentsObject::kSize == 4 * kTaggedSize);
a.Allocate(JSStrictArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
@@ -228,9 +228,9 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
- native_context().js_array_packed_elements_map());
+ native_context().js_array_packed_elements_map(broker()), broker());
// Actually allocate and initialize the jsarray.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(JSArray::kHeaderSize == 4 * kTaggedSize);
a.Allocate(JSArray::kHeaderSize);
a.Store(AccessBuilder::ForMap(), jsarray_map);
@@ -256,7 +256,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// TODO(turbofan): Duplicate parameters are not handled yet.
if (shared.has_duplicate_parameters()) return NoChange();
// Choose the correct frame state and frame state info depending on
- // whether there conceptually is an arguments adaptor frame in the call
+ // whether there conceptually is an inlined arguments frame in the call
// chain.
FrameState args_state = GetArgumentsFrameState(frame_state);
if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
@@ -275,10 +275,12 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
- has_aliased_arguments ? native_context().fast_aliased_arguments_map()
- : native_context().sloppy_arguments_map());
+ has_aliased_arguments
+ ? native_context().fast_aliased_arguments_map(broker())
+ : native_context().sloppy_arguments_map(broker()),
+ broker());
// Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize);
a.Allocate(JSSloppyArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
@@ -296,7 +298,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
- // whether there conceptually is an arguments adaptor frame in the call
+ // whether there conceptually is an inlined arguments frame in the call
// chain.
FrameState args_state = GetArgumentsFrameState(frame_state);
if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
@@ -312,10 +314,10 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
- Node* const arguments_map =
- jsgraph()->Constant(native_context().strict_arguments_map());
+ Node* const arguments_map = jsgraph()->Constant(
+ native_context().strict_arguments_map(broker()), broker());
// Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(JSStrictArgumentsObject::kSize == 4 * kTaggedSize);
a.Allocate(JSStrictArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
@@ -334,7 +336,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
- // whether there conceptually is an arguments adaptor frame in the call
+ // whether there conceptually is an inlined arguments frame in the call
// chain.
FrameState args_state = GetArgumentsFrameState(frame_state);
if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
@@ -350,10 +352,10 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
- Node* const jsarray_map =
- jsgraph()->Constant(native_context().js_array_packed_elements_map());
+ Node* const jsarray_map = jsgraph()->Constant(
+ native_context().js_array_packed_elements_map(broker()), broker());
// Actually allocate and initialize the jsarray.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
// -1 to minus receiver
int argument_count = args_state_info.parameter_count() - 1;
@@ -386,24 +388,24 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
DCHECK(closure_type.AsHeapConstant()->Ref().IsJSFunction());
JSFunctionRef js_function =
closure_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!js_function.has_initial_map(dependencies())) return NoChange();
+ if (!js_function.has_initial_map(broker())) return NoChange();
SlackTrackingPrediction slack_tracking_prediction =
dependencies()->DependOnInitialMapInstanceSizePrediction(js_function);
- MapRef initial_map = js_function.initial_map(dependencies());
+ MapRef initial_map = js_function.initial_map(broker());
DCHECK(initial_map.instance_type() == JS_GENERATOR_OBJECT_TYPE ||
initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
// Allocate a register file.
- SharedFunctionInfoRef shared = js_function.shared();
+ SharedFunctionInfoRef shared = js_function.shared(broker());
DCHECK(shared.HasBytecodeArray());
int parameter_count_no_receiver =
shared.internal_formal_parameter_count_without_receiver();
int length = parameter_count_no_receiver +
- shared.GetBytecodeArray().register_count();
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ shared.GetBytecodeArray(broker()).register_count();
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateArray(length, fixed_array_map)) {
return NoChange();
}
@@ -415,7 +417,7 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
Node* parameters_and_registers = effect = ab.Finish();
// Emit code to allocate the JS[Async]GeneratorObject instance.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(slack_tracking_prediction.instance_size());
Node* undefined = jsgraph()->UndefinedConstant();
a.Store(AccessBuilder::ForMap(), initial_map);
@@ -464,8 +466,8 @@ Reduction JSCreateLowering::ReduceNewArray(
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- base::Optional<MapRef> maybe_initial_map =
- initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind));
+ OptionalMapRef maybe_initial_map =
+ initial_map.AsElementsKind(broker(), GetHoleyElementsKind(elements_kind));
if (!maybe_initial_map.has_value()) return NoChange();
initial_map = maybe_initial_map.value();
@@ -491,7 +493,7 @@ Reduction JSCreateLowering::ReduceNewArray(
length, effect, control);
// Perform the allocation of the actual JSArray object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(slack_tracking_prediction.instance_size(), allocation);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -525,8 +527,8 @@ Reduction JSCreateLowering::ReduceNewArray(
elements_kind = GetHoleyElementsKind(elements_kind);
}
- base::Optional<MapRef> maybe_initial_map =
- initial_map.AsElementsKind(elements_kind);
+ OptionalMapRef maybe_initial_map =
+ initial_map.AsElementsKind(broker(), elements_kind);
if (!maybe_initial_map.has_value()) return NoChange();
initial_map = maybe_initial_map.value();
@@ -542,7 +544,7 @@ Reduction JSCreateLowering::ReduceNewArray(
}
// Perform the allocation of the actual JSArray object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(slack_tracking_prediction.instance_size(), allocation);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -570,8 +572,8 @@ Reduction JSCreateLowering::ReduceNewArray(
// Determine the appropriate elements kind.
DCHECK(IsFastElementsKind(elements_kind));
- base::Optional<MapRef> maybe_initial_map =
- initial_map.AsElementsKind(elements_kind);
+ OptionalMapRef maybe_initial_map =
+ initial_map.AsElementsKind(broker(), elements_kind);
if (!maybe_initial_map.has_value()) return NoChange();
initial_map = maybe_initial_map.value();
@@ -603,7 +605,7 @@ Reduction JSCreateLowering::ReduceNewArray(
Node* length = jsgraph()->Constant(static_cast<int>(values.size()));
// Perform the allocation of the actual JSArray object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(slack_tracking_prediction.instance_size(), allocation);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -624,11 +626,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- base::Optional<AllocationSiteRef> site_ref = p.site(broker());
+ OptionalAllocationSiteRef site_ref = p.site();
AllocationType allocation = AllocationType::kYoung;
- base::Optional<MapRef> initial_map =
- NodeProperties::GetJSCreateMap(broker(), node);
+ OptionalMapRef initial_map = NodeProperties::GetJSCreateMap(broker(), node);
if (!initial_map.has_value()) return NoChange();
Node* new_target = NodeProperties::GetValueInput(node, 1);
@@ -652,8 +653,8 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else {
PropertyCellRef array_constructor_protector =
MakeRef(broker(), factory()->array_constructor_protector());
- array_constructor_protector.CacheAsProtector();
- can_inline_call = array_constructor_protector.value().AsSmi() ==
+ array_constructor_protector.CacheAsProtector(broker());
+ can_inline_call = array_constructor_protector.value(broker()).AsSmi() ==
Protectors::kProtectorValid;
}
@@ -745,11 +746,11 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Create the JSArrayIterator result.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(JSArrayIterator::kHeaderSize, AllocationType::kYoung,
Type::OtherObject());
a.Store(AccessBuilder::ForMap(),
- native_context().initial_array_iterator_map());
+ native_context().initial_array_iterator_map(broker()));
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -775,8 +776,8 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Create the register file.
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
CHECK(ab.CanAllocateArray(register_count, fixed_array_map));
ab.AllocateArray(register_count, fixed_array_map);
for (int i = 0; i < register_count; ++i) {
@@ -786,10 +787,10 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
Node* parameters_and_registers = effect = ab.Finish();
// Create the JSAsyncFunctionObject result.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(JSAsyncFunctionObject::kHeaderSize);
a.Store(AccessBuilder::ForMap(),
- native_context().async_function_object_map());
+ native_context().async_function_object_map(broker()));
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -812,7 +813,8 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
namespace {
-MapRef MapForCollectionIterationKind(const NativeContextRef& native_context,
+MapRef MapForCollectionIterationKind(JSHeapBroker* broker,
+ const NativeContextRef& native_context,
CollectionKind collection_kind,
IterationKind iteration_kind) {
switch (collection_kind) {
@@ -821,19 +823,19 @@ MapRef MapForCollectionIterationKind(const NativeContextRef& native_context,
case IterationKind::kKeys:
UNREACHABLE();
case IterationKind::kValues:
- return native_context.set_value_iterator_map();
+ return native_context.set_value_iterator_map(broker);
case IterationKind::kEntries:
- return native_context.set_key_value_iterator_map();
+ return native_context.set_key_value_iterator_map(broker);
}
break;
case CollectionKind::kMap:
switch (iteration_kind) {
case IterationKind::kKeys:
- return native_context.map_key_iterator_map();
+ return native_context.map_key_iterator_map(broker);
case IterationKind::kValues:
- return native_context.map_value_iterator_map();
+ return native_context.map_value_iterator_map(broker);
case IterationKind::kEntries:
- return native_context.map_key_value_iterator_map();
+ return native_context.map_key_value_iterator_map(broker);
}
break;
}
@@ -856,12 +858,13 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
iterated_object, effect, control);
// Create the JSCollectionIterator result.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(JSCollectionIterator::kHeaderSize, AllocationType::kYoung,
Type::OtherObject());
- a.Store(AccessBuilder::ForMap(),
- MapForCollectionIterationKind(native_context(), p.collection_kind(),
- p.iteration_kind()));
+ a.Store(
+ AccessBuilder::ForMap(),
+ MapForCollectionIterationKind(broker(), native_context(),
+ p.collection_kind(), p.iteration_kind()));
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
@@ -879,7 +882,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- MapRef const map = p.map(broker());
+ MapRef const map = p.map();
Node* bound_target_function = NodeProperties::GetValueInput(node, 0);
Node* bound_this = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -888,8 +891,8 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
// Create the [[BoundArguments]] for the result.
Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
if (arity > 0) {
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
CHECK(ab.CanAllocateArray(arity, fixed_array_map));
ab.AllocateArray(arity, fixed_array_map);
for (int i = 0; i < arity; ++i) {
@@ -900,7 +903,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
}
// Create the JSBoundFunction result.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(JSBoundFunction::kHeaderSize, AllocationType::kYoung,
Type::BoundFunction());
a.Store(AccessBuilder::ForMap(), map);
@@ -920,9 +923,9 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
JSCreateClosureNode n(node);
CreateClosureParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared = p.shared_info(broker());
+ SharedFunctionInfoRef shared = p.shared_info();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- HeapObjectRef code = p.code(broker());
+ HeapObjectRef code = p.code();
Effect effect = n.effect();
Control control = n.control();
Node* context = n.context();
@@ -930,16 +933,15 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Use inline allocation of closures only for instantiation sites that have
// seen more than one instantiation, this simplifies the generated code and
// also serves as a heuristic of which allocation sites benefit from it.
- if (!feedback_cell.map().equals(
- MakeRef(broker(), factory()->many_closures_cell_map()))) {
+ if (!feedback_cell.map(broker()).equals(broker()->many_closures_cell_map())) {
return NoChange();
}
// Don't inline anything for class constructors.
if (IsClassConstructor(shared.kind())) return NoChange();
- MapRef function_map =
- native_context().GetFunctionMapFromIndex(shared.function_map_index());
+ MapRef function_map = native_context().GetFunctionMapFromIndex(
+ broker(), shared.function_map_index());
DCHECK(!function_map.IsInobjectSlackTrackingInProgress());
DCHECK(!function_map.is_dictionary_map());
@@ -956,7 +958,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Emit code to allocate the JSFunction instance.
static_assert(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(function_map.instance_size(), allocation,
Type::CallableFunction());
a.Store(AccessBuilder::ForMap(), function_map);
@@ -989,11 +991,11 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
Node* done = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* iterator_result_map =
- jsgraph()->Constant(native_context().iterator_result_map());
+ Node* iterator_result_map = jsgraph()->Constant(
+ native_context().iterator_result_map(broker()), broker());
// Emit code to allocate the JSIteratorResult instance.
- AllocationBuilder a(jsgraph(), effect, graph()->start());
+ AllocationBuilder a(jsgraph(), broker(), effect, graph()->start());
a.Allocate(JSIteratorResult::kSize);
a.Store(AccessBuilder::ForMap(), iterator_result_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -1012,10 +1014,10 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
Node* string = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* map =
- jsgraph()->Constant(native_context().initial_string_iterator_map());
+ Node* map = jsgraph()->Constant(
+ native_context().initial_string_iterator_map(broker()), broker());
// Allocate new iterator and attach the iterator to this string.
- AllocationBuilder a(jsgraph(), effect, graph()->start());
+ AllocationBuilder a(jsgraph(), broker(), effect, graph()->start());
a.Allocate(JSStringIterator::kHeaderSize, AllocationType::kYoung,
Type::OtherObject());
a.Store(AccessBuilder::ForMap(), map);
@@ -1036,19 +1038,19 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* array_map =
- jsgraph()->Constant(native_context().js_array_packed_elements_map());
+ Node* array_map = jsgraph()->Constant(
+ native_context().js_array_packed_elements_map(broker()), broker());
Node* length = jsgraph()->Constant(2);
- AllocationBuilder aa(jsgraph(), effect, graph()->start());
- aa.AllocateArray(2, MakeRef(broker(), factory()->fixed_array_map()));
+ AllocationBuilder aa(jsgraph(), broker(), effect, graph()->start());
+ aa.AllocateArray(2, broker()->fixed_array_map());
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->ZeroConstant(), key);
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->OneConstant(), value);
Node* elements = aa.Finish();
- AllocationBuilder a(jsgraph(), elements, graph()->start());
+ AllocationBuilder a(jsgraph(), broker(), elements, graph()->start());
a.Allocate(ALIGN_TO_ALLOCATION_ALIGNMENT(JSArray::kHeaderSize));
a.Store(AccessBuilder::ForMap(), array_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -1065,9 +1067,9 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
MapRef promise_map =
- native_context().promise_function().initial_map(dependencies());
+ native_context().promise_function(broker()).initial_map(broker());
- AllocationBuilder a(jsgraph(), effect, graph()->start());
+ AllocationBuilder a(jsgraph(), broker(), effect, graph()->start());
a.Allocate(promise_map.instance_size());
a.Store(AccessBuilder::ForMap(), promise_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -1100,12 +1102,12 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
if (!feedback.IsInsufficient()) {
AllocationSiteRef site = feedback.AsLiteral().value();
- if (!site.boilerplate().has_value()) return NoChange();
+ if (!site.boilerplate(broker()).has_value()) return NoChange();
AllocationType allocation = dependencies()->DependOnPretenureMode(site);
int max_properties = kMaxFastLiteralProperties;
- base::Optional<Node*> maybe_value =
- TryAllocateFastLiteral(effect, control, *site.boilerplate(), allocation,
- kMaxFastLiteralDepth, &max_properties);
+ base::Optional<Node*> maybe_value = TryAllocateFastLiteral(
+ effect, control, *site.boilerplate(broker()), allocation,
+ kMaxFastLiteralDepth, &max_properties);
if (!maybe_value.has_value()) return NoChange();
dependencies()->DependOnElementsKinds(site);
Node* value = effect = maybe_value.value();
@@ -1124,7 +1126,7 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
AllocationSiteRef site = feedback.AsLiteral().value();
DCHECK(!site.PointsToLiteral());
MapRef initial_map =
- native_context().GetInitialJSArrayMap(site.GetElementsKind());
+ native_context().GetInitialJSArrayMap(broker(), site.GetElementsKind());
AllocationType const allocation =
dependencies()->DependOnPretenureMode(site);
dependencies()->DependOnElementsKind(site);
@@ -1145,16 +1147,16 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Retrieve the initial map for the object.
- MapRef map = native_context().object_function().initial_map(dependencies());
+ MapRef map = native_context().object_function(broker()).initial_map(broker());
DCHECK(!map.is_dictionary_map());
DCHECK(!map.IsInobjectSlackTrackingInProgress());
- Node* js_object_map = jsgraph()->Constant(map);
+ Node* js_object_map = jsgraph()->Constant(map, broker());
// Setup elements and properties.
Node* elements = jsgraph()->EmptyFixedArrayConstant();
// Perform the allocation of the actual JSArray object.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(map.instance_size());
a.Store(AccessBuilder::ForMap(), js_object_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
@@ -1199,7 +1201,7 @@ Reduction JSCreateLowering::ReduceJSGetTemplateObject(Node* node) {
if (feedback.IsInsufficient()) return NoChange();
JSArrayRef template_object = feedback.AsTemplateObject().value();
- Node* value = jsgraph()->Constant(template_object);
+ Node* value = jsgraph()->Constant(template_object, broker());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -1208,7 +1210,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef scope_info = parameters.scope_info(broker());
+ ScopeInfoRef scope_info = parameters.scope_info();
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
@@ -1218,16 +1220,17 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(Context::MIN_CONTEXT_SLOTS == 2); // Ensure fully covered.
int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
switch (scope_type) {
case EVAL_SCOPE:
- a.AllocateContext(context_length, native_context().eval_context_map());
+ a.AllocateContext(context_length,
+ native_context().eval_context_map(broker()));
break;
case FUNCTION_SCOPE:
a.AllocateContext(context_length,
- native_context().function_context_map());
+ native_context().function_context_map(broker()));
break;
default:
UNREACHABLE();
@@ -1248,17 +1251,17 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
+ ScopeInfoRef scope_info = ScopeInfoOf(node->op());
Node* extension = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(Context::MIN_CONTEXT_EXTENDED_SLOTS ==
3); // Ensure fully covered.
a.AllocateContext(Context::MIN_CONTEXT_EXTENDED_SLOTS,
- native_context().with_context_map());
+ native_context().with_context_map(broker()));
a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1269,16 +1272,16 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
+ ScopeInfoRef scope_info = ScopeInfoOf(node->op());
Node* exception = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(Context::MIN_CONTEXT_SLOTS == 2); // Ensure fully covered.
a.AllocateContext(Context::MIN_CONTEXT_SLOTS + 1,
- native_context().catch_context_map());
+ native_context().catch_context_map(broker()));
a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX), scope_info);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
@@ -1290,7 +1293,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
+ ScopeInfoRef scope_info = ScopeInfoOf(node->op());
int const context_length = scope_info.ContextLength();
// Use inline allocation for block contexts up to a size limit.
@@ -1300,9 +1303,10 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
static_assert(Context::MIN_CONTEXT_SLOTS == 2); // Ensure fully covered.
- a.AllocateContext(context_length, native_context().block_context_map());
+ a.AllocateContext(context_length,
+ native_context().block_context_map(broker()));
a.Store(AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX),
scope_info);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
@@ -1319,22 +1323,22 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
namespace {
-base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
- HeapObjectRef prototype) {
+OptionalMapRef GetObjectCreateMap(JSHeapBroker* broker,
+ HeapObjectRef prototype) {
MapRef standard_map =
- broker->target_native_context().object_function().initial_map(
- broker->dependencies());
- if (prototype.equals(standard_map.prototype())) {
+ broker->target_native_context().object_function(broker).initial_map(
+ broker);
+ if (prototype.equals(standard_map.prototype(broker))) {
return standard_map;
}
- if (prototype.map().oddball_type() == OddballType::kNull) {
- return broker->target_native_context()
- .slow_object_with_null_prototype_map();
+ if (prototype.map(broker).oddball_type(broker) == OddballType::kNull) {
+ return broker->target_native_context().slow_object_with_null_prototype_map(
+ broker);
}
if (prototype.IsJSObject()) {
- return prototype.AsJSObject().GetObjectCreateMap();
+ return prototype.AsJSObject().GetObjectCreateMap(broker);
}
- return base::Optional<MapRef>();
+ return OptionalMapRef();
}
} // namespace
@@ -1354,16 +1358,17 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
Node* properties = jsgraph()->EmptyFixedArrayConstant();
if (instance_map.is_dictionary_map()) {
- DCHECK_EQ(prototype_const.map().oddball_type(), OddballType::kNull);
+ DCHECK_EQ(prototype_const.map(broker()).oddball_type(broker()),
+ OddballType::kNull);
// Allocate an empty NameDictionary as backing store for the properties.
- MapRef map = MakeRef(broker(), factory()->name_dictionary_map());
+ MapRef map = broker()->name_dictionary_map();
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
DCHECK(base::bits::IsPowerOfTwo(capacity));
int length = NameDictionary::EntryToIndex(InternalIndex(capacity));
int size = NameDictionary::SizeFor(length);
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(size, AllocationType::kYoung, Type::Any());
a.Store(AccessBuilder::ForMap(), map);
// Initialize FixedArray fields.
@@ -1381,10 +1386,13 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
jsgraph()->SmiConstant(PropertyDetails::kInitialIndex));
a.Store(AccessBuilder::ForDictionaryObjectHashIndex(),
jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel));
+ // Initialize NameDictionary fields.
+ a.Store(AccessBuilder::ForNameDictionaryFlagsIndex(),
+ jsgraph()->SmiConstant(NameDictionary::kFlagsDefault));
// Initialize the Properties fields.
Node* undefined = jsgraph()->UndefinedConstant();
static_assert(NameDictionary::kElementsStartIndex ==
- NameDictionary::kObjectHashIndex + 1);
+ NameDictionary::kFlagsIndex + 1);
for (int index = NameDictionary::kElementsStartIndex; index < length;
index++) {
a.Store(AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier),
@@ -1399,7 +1407,7 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
// Emit code to allocate the JSObject instance for the given
// {instance_map}.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(instance_size, AllocationType::kYoung, Type::Any());
a.Store(AccessBuilder::ForMap(), instance_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -1432,8 +1440,8 @@ Node* JSCreateLowering::TryAllocateArguments(Node* effect, Node* control,
auto parameters_it = parameters_access.begin_without_receiver();
// Actually allocate the backing store.
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateArray(argument_count, fixed_array_map)) {
return nullptr;
}
@@ -1463,8 +1471,8 @@ Node* JSCreateLowering::TryAllocateRestArguments(Node* effect, Node* control,
parameters_access.begin_without_receiver_and_skip(start_index);
// Actually allocate the backing store.
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateArray(num_elements, fixed_array_map)) {
return nullptr;
}
@@ -1500,15 +1508,15 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
*has_aliased_arguments = true;
MapRef sloppy_arguments_elements_map =
- MakeRef(broker(), factory()->sloppy_arguments_elements_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ broker()->sloppy_arguments_elements_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateSloppyArgumentElements(mapped_count,
sloppy_arguments_elements_map)) {
return nullptr;
}
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = broker()->fixed_array_map();
if (!ab.CanAllocateArray(argument_count, fixed_array_map)) {
return nullptr;
}
@@ -1535,7 +1543,7 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
Node* arguments = ab.Finish();
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), arguments, control);
+ AllocationBuilder a(jsgraph(), broker(), arguments, control);
a.AllocateSloppyArgumentElements(mapped_count, sloppy_arguments_elements_map);
a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
@@ -1567,10 +1575,10 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
int mapped_count = parameter_count;
MapRef sloppy_arguments_elements_map =
- MakeRef(broker(), factory()->sloppy_arguments_elements_map());
+ broker()->sloppy_arguments_elements_map();
{
- AllocationBuilder ab(jsgraph(), effect, control);
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateSloppyArgumentElements(mapped_count,
sloppy_arguments_elements_map)) {
return nullptr;
@@ -1592,7 +1600,7 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
arguments_length, effect);
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.AllocateSloppyArgumentElements(mapped_count, sloppy_arguments_elements_map);
a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
@@ -1625,7 +1633,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
Node* value = jsgraph()->TheHoleConstant();
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.AllocateArray(capacity, MakeRef(broker(), elements_map), allocation);
for (int i = 0; i < capacity; ++i) {
Node* index = jsgraph()->Constant(i);
@@ -1650,7 +1658,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
: AccessBuilder::ForFixedArrayElement();
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.AllocateArray(capacity, MakeRef(broker(), elements_map), allocation);
for (int i = 0; i < capacity; ++i) {
Node* index = jsgraph()->Constant(i);
@@ -1672,14 +1680,14 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
broker());
// Now that we hold the migration lock, get the current map.
- MapRef boilerplate_map = boilerplate.map();
+ MapRef boilerplate_map = boilerplate.map(broker());
// Protect against concurrent changes to the boilerplate object by checking
// for an identical value at the end of the compilation.
dependencies()->DependOnObjectSlotValue(boilerplate, HeapObject::kMapOffset,
boilerplate_map);
{
- base::Optional<MapRef> current_boilerplate_map =
- boilerplate.map_direct_read();
+ OptionalMapRef current_boilerplate_map =
+ boilerplate.map_direct_read(broker());
if (!current_boilerplate_map.has_value() ||
!current_boilerplate_map->equals(boilerplate_map)) {
return {};
@@ -1693,18 +1701,16 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
if (boilerplate_map.is_deprecated()) return {};
// We currently only support in-object properties.
- if (boilerplate.map().elements_kind() == DICTIONARY_ELEMENTS ||
- boilerplate.map().is_dictionary_map() ||
- !boilerplate.raw_properties_or_hash().has_value()) {
+ if (boilerplate.map(broker()).elements_kind() == DICTIONARY_ELEMENTS ||
+ boilerplate.map(broker()).is_dictionary_map() ||
+ !boilerplate.raw_properties_or_hash(broker()).has_value()) {
return {};
}
{
- ObjectRef properties = *boilerplate.raw_properties_or_hash();
+ ObjectRef properties = *boilerplate.raw_properties_or_hash(broker());
bool const empty = properties.IsSmi() ||
- properties.equals(MakeRef<Object>(
- broker(), factory()->empty_fixed_array())) ||
- properties.equals(MakeRef<Object>(
- broker(), factory()->empty_property_array()));
+ properties.equals(broker()->empty_fixed_array()) ||
+ properties.equals(broker()->empty_property_array());
if (!empty) return {};
}
@@ -1714,13 +1720,14 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors();
for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) {
PropertyDetails const property_details =
- boilerplate_map.GetPropertyDetails(i);
+ boilerplate_map.GetPropertyDetails(broker(), i);
if (property_details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(PropertyKind::kData, property_details.kind());
if ((*max_properties)-- == 0) return {};
- NameRef property_name = boilerplate_map.GetPropertyKey(i);
- FieldIndex index = boilerplate_map.GetFieldIndexFor(i);
+ NameRef property_name = boilerplate_map.GetPropertyKey(broker(), i);
+ FieldIndex index =
+ FieldIndex::ForDetails(*boilerplate_map.object(), property_details);
ConstFieldInfo const_field_info(boilerplate_map.object());
FieldAccess access = {kTaggedBase,
index.offset(),
@@ -1735,8 +1742,8 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
// Note: the use of RawInobjectPropertyAt (vs. the higher-level
// GetOwnFastDataProperty) here is necessary, since the underlying value
// may be `uninitialized`, which the latter explicitly does not support.
- base::Optional<ObjectRef> maybe_boilerplate_value =
- boilerplate.RawInobjectPropertyAt(index);
+ OptionalObjectRef maybe_boilerplate_value =
+ boilerplate.RawInobjectPropertyAt(broker(), index);
if (!maybe_boilerplate_value.has_value()) return {};
// Note: We don't need to take a compilation dependency verifying the value
@@ -1754,8 +1761,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
// Note that although we create nodes to write `uninitialized_value` into
// the object, the field should be overwritten immediately with a real
// value, and `uninitialized_value` should never be exposed to JS.
- ObjectRef uninitialized_oddball =
- MakeRef<HeapObject>(broker(), factory()->uninitialized_value());
+ ObjectRef uninitialized_oddball = broker()->uninitialized_value();
if (boilerplate_value.equals(uninitialized_oddball) ||
(boilerplate_value.IsHeapNumber() &&
boilerplate_value.AsHeapNumber().value_as_bits() == kHoleNanInt64)) {
@@ -1773,10 +1779,9 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
} else if (property_details.representation().IsDouble()) {
double number = boilerplate_value.AsHeapNumber().value();
// Allocate a mutable HeapNumber box and store the value into it.
- AllocationBuilder builder(jsgraph(), effect, control);
+ AllocationBuilder builder(jsgraph(), broker(), effect, control);
builder.Allocate(HeapNumber::kSize, allocation);
- builder.Store(AccessBuilder::ForMap(),
- MakeRef(broker(), factory()->heap_number_map()));
+ builder.Store(AccessBuilder::ForMap(), broker()->heap_number_map());
builder.Store(AccessBuilder::ForHeapNumberValue(),
jsgraph()->Constant(number));
value = effect = builder.Finish();
@@ -1787,7 +1792,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
DCHECK_IMPLIES(property_details.representation().IsSmi() &&
!boilerplate_value.IsSmi(),
boilerplate_value.equals(uninitialized_oddball));
- value = jsgraph()->Constant(boilerplate_value);
+ value = jsgraph()->Constant(boilerplate_value, broker());
}
inobject_fields.push_back(std::make_pair(access, value));
}
@@ -1813,9 +1818,9 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
if (elements->op()->EffectOutputCount() > 0) effect = elements;
// Actually allocate and initialize the object.
- AllocationBuilder builder(jsgraph(), effect, control);
+ AllocationBuilder builder(jsgraph(), broker(), effect, control);
builder.Allocate(boilerplate_map.instance_size(), allocation,
- Type::For(boilerplate_map));
+ Type::For(boilerplate_map, broker()));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(),
jsgraph()->EmptyFixedArrayConstant());
@@ -1823,8 +1828,8 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
if (boilerplate.IsJSArray()) {
JSArrayRef boilerplate_array = boilerplate.AsJSArray();
builder.Store(AccessBuilder::ForJSArrayLength(
- boilerplate_array.map().elements_kind()),
- boilerplate_array.GetBoilerplateLength());
+ boilerplate_array.map(broker()).elements_kind()),
+ boilerplate_array.GetBoilerplateLength(broker()));
}
for (auto const& inobject_field : inobject_fields) {
builder.Store(inobject_field.first, inobject_field.second);
@@ -1838,8 +1843,8 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
DCHECK_GT(max_depth, 0);
DCHECK_GE(*max_properties, 0);
- base::Optional<FixedArrayBaseRef> maybe_boilerplate_elements =
- boilerplate.elements(kRelaxedLoad);
+ OptionalFixedArrayBaseRef maybe_boilerplate_elements =
+ boilerplate.elements(broker(), kRelaxedLoad);
if (!maybe_boilerplate_elements.has_value()) return {};
FixedArrayBaseRef boilerplate_elements = maybe_boilerplate_elements.value();
// Protect against concurrent changes to the boilerplate object by checking
@@ -1849,17 +1854,18 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
// Empty or copy-on-write elements just store a constant.
int const elements_length = boilerplate_elements.length();
- MapRef elements_map = boilerplate_elements.map();
+ MapRef elements_map = boilerplate_elements.map(broker());
// Protect against concurrent changes to the boilerplate object by checking
// for an identical value at the end of the compilation.
dependencies()->DependOnObjectSlotValue(boilerplate_elements,
HeapObject::kMapOffset, elements_map);
- if (boilerplate_elements.length() == 0 || elements_map.IsFixedCowArrayMap()) {
+ if (boilerplate_elements.length() == 0 ||
+ elements_map.IsFixedCowArrayMap(broker())) {
if (allocation == AllocationType::kOld &&
!boilerplate.IsElementsTenured(boilerplate_elements)) {
return {};
}
- return jsgraph()->Constant(boilerplate_elements);
+ return jsgraph()->Constant(boilerplate_elements, broker());
}
// Compute the elements to store first (might have effects).
@@ -1879,7 +1885,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
FixedArrayRef elements = boilerplate_elements.AsFixedArray();
for (int i = 0; i < elements_length; ++i) {
if ((*max_properties)-- == 0) return {};
- base::Optional<ObjectRef> element_value = elements.TryGet(i);
+ OptionalObjectRef element_value = elements.TryGet(broker(), i);
if (!element_value.has_value()) return {};
if (element_value->IsJSObject()) {
base::Optional<Node*> object =
@@ -1888,13 +1894,13 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
if (!object.has_value()) return {};
elements_values[i] = effect = *object;
} else {
- elements_values[i] = jsgraph()->Constant(*element_value);
+ elements_values[i] = jsgraph()->Constant(*element_value, broker());
}
}
}
// Allocate the backing store array and store the elements.
- AllocationBuilder ab(jsgraph(), effect, control);
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
CHECK(ab.CanAllocateArray(elements_length, elements_map, allocation));
ab.AllocateArray(elements_length, elements_map, allocation);
ElementAccess const access = boilerplate_elements.IsFixedDoubleArray()
@@ -1909,7 +1915,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
Node* JSCreateLowering::AllocateLiteralRegExp(
Node* effect, Node* control, RegExpBoilerplateDescriptionRef boilerplate) {
MapRef initial_map =
- native_context().regexp_function().initial_map(dependencies());
+ native_context().regexp_function(broker()).initial_map(broker());
// Sanity check that JSRegExp object layout hasn't changed.
static_assert(JSRegExp::kDataOffset == JSObject::kHeaderSize);
@@ -1920,17 +1926,18 @@ Node* JSCreateLowering::AllocateLiteralRegExp(
static_assert(JSRegExp::kLastIndexOffset == JSRegExp::kHeaderSize);
DCHECK_EQ(JSRegExp::Size(), JSRegExp::kLastIndexOffset + kTaggedSize);
- AllocationBuilder builder(jsgraph(), effect, control);
+ AllocationBuilder builder(jsgraph(), broker(), effect, control);
builder.Allocate(JSRegExp::Size(), AllocationType::kYoung,
- Type::For(initial_map));
+ Type::For(initial_map, broker()));
builder.Store(AccessBuilder::ForMap(), initial_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
builder.Store(AccessBuilder::ForJSObjectElements(),
jsgraph()->EmptyFixedArrayConstant());
- builder.Store(AccessBuilder::ForJSRegExpData(), boilerplate.data());
- builder.Store(AccessBuilder::ForJSRegExpSource(), boilerplate.source());
+ builder.Store(AccessBuilder::ForJSRegExpData(), boilerplate.data(broker()));
+ builder.Store(AccessBuilder::ForJSRegExpSource(),
+ boilerplate.source(broker()));
builder.Store(AccessBuilder::ForJSRegExpFlags(),
jsgraph()->SmiConstant(boilerplate.flags()));
builder.Store(AccessBuilder::ForJSRegExpLastIndex(),
@@ -1949,6 +1956,10 @@ CommonOperatorBuilder* JSCreateLowering::common() const {
return jsgraph()->common();
}
+CompilationDependencies* JSCreateLowering::dependencies() const {
+ return broker()->dependencies();
+}
+
SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
return jsgraph()->simplified();
}
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 85a9a7bb58..db7a5281fe 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -33,10 +33,9 @@ class SlackTrackingPrediction;
class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone)
+ JSCreateLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone)
: AdvancedReducer(editor),
- dependencies_(dependencies),
jsgraph_(jsgraph),
broker_(broker),
zone_(zone) {}
@@ -124,11 +123,10 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
NativeContextRef native_context() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
- CompilationDependencies* dependencies() const { return dependencies_; }
+ CompilationDependencies* dependencies() const;
JSHeapBroker* broker() const { return broker_; }
Zone* zone() const { return zone_; }
- CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
Zone* const zone_;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 1cca549fbb..794884520c 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -63,6 +63,8 @@ Reduction JSGenericLowering::Reduce(Node* node) {
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToNumberConvertBigInt)
+REPLACE_STUB_CALL(ToBigInt)
+REPLACE_STUB_CALL(ToBigIntConvertNumber)
REPLACE_STUB_CALL(ToNumeric)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
@@ -236,7 +238,7 @@ namespace {
// some cases - unlike the full builtin, the megamorphic builtin does fewer
// checks and does not collect feedback.
bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
- base::Optional<NameRef> name,
+ OptionalNameRef name,
JSHeapBroker* broker) {
ProcessedFeedback const& feedback =
broker->GetFeedbackForPropertyAccess(source, AccessMode::kLoad, name);
@@ -267,6 +269,14 @@ void JSGenericLowering::LowerJSHasProperty(Node* node) {
}
}
+bool HasStringType(Node* key) {
+ if (key->opcode() == IrOpcode::kLoadElement) {
+ ElementAccess const& access = ElementAccessOf(key->op());
+ return access.type.Is(Type::String());
+ }
+ return false;
+}
+
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
JSLoadPropertyNode n(node);
const PropertyAccess& p = n.Parameters();
@@ -279,14 +289,18 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker())
- ? Builtin::kKeyedLoadICTrampoline_Megamorphic
+ ? (HasStringType(n->InputAt(1))
+ ? Builtin::kKeyedLoadICTrampoline_MegamorphicStringKey
+ : Builtin::kKeyedLoadICTrampoline_Megamorphic)
: Builtin::kKeyedLoadICTrampoline);
} else {
n->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker())
- ? Builtin::kKeyedLoadIC_Megamorphic
+ ? (HasStringType(n->InputAt(1))
+ ? Builtin::kKeyedLoadIC_MegamorphicStringKey
+ : Builtin::kKeyedLoadIC_Megamorphic)
: Builtin::kKeyedLoadIC);
}
}
@@ -299,25 +313,25 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
static_assert(n.FeedbackVectorIndex() == 1);
if (!p.feedback().IsValid()) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
ReplaceWithBuiltinCall(node, Builtin::kGetProperty);
} else if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithBuiltinCall(node, ShouldUseMegamorphicLoadBuiltin(
- p.feedback(), p.name(broker()), broker())
- ? Builtin::kLoadICTrampoline_Megamorphic
- : Builtin::kLoadICTrampoline);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), p.name(), broker())
+ ? Builtin::kLoadICTrampoline_Megamorphic
+ : Builtin::kLoadICTrampoline);
} else {
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- ReplaceWithBuiltinCall(node, ShouldUseMegamorphicLoadBuiltin(
- p.feedback(), p.name(broker()), broker())
- ? Builtin::kLoadIC_Megamorphic
- : Builtin::kLoadIC);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), p.name(), broker())
+ ? Builtin::kLoadIC_Megamorphic
+ : Builtin::kLoadIC);
}
}
@@ -342,7 +356,7 @@ void JSGenericLowering::LowerJSLoadNamedFromSuper(Node* node) {
// be double-checked that the FeedbackVector parameter will be the
// UndefinedConstant.
DCHECK(p.feedback().IsValid());
- node->InsertInput(zone(), 2, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kLoadSuperIC);
@@ -357,13 +371,13 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
static_assert(n.FeedbackVectorIndex() == 0);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
ReplaceWithBuiltinCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable =
@@ -425,14 +439,14 @@ void JSGenericLowering::LowerJSDefineKeyedOwnProperty(Node* node) {
const PropertyAccess& p = n.Parameters();
FrameState frame_state = n.frame_state();
Node* outer_state = frame_state.outer_frame_state();
- static_assert(n.FeedbackVectorIndex() == 3);
+ static_assert(n.FeedbackVectorIndex() == 4);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 3,
+ node->InsertInput(zone(), 4,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kDefineKeyedOwnICTrampoline);
} else {
- node->InsertInput(zone(), 3,
+ node->InsertInput(zone(), 4,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kDefineKeyedOwnIC);
}
@@ -446,11 +460,11 @@ void JSGenericLowering::LowerJSSetNamedProperty(Node* node) {
static_assert(n.FeedbackVectorIndex() == 2);
if (!p.feedback().IsValid()) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty);
} else if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
// StoreIC is currently a base class for multiple property store operations
@@ -460,7 +474,7 @@ void JSGenericLowering::LowerJSSetNamedProperty(Node* node) {
// be called here.
ReplaceWithBuiltinCall(node, Builtin::kStoreICTrampoline);
} else {
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreIC);
@@ -476,13 +490,13 @@ void JSGenericLowering::LowerJSDefineNamedOwnProperty(Node* node) {
static_assert(n.FeedbackVectorIndex() == 2);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable = CodeFactory::DefineNamedOwnIC(isolate());
ReplaceWithBuiltinCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
Callable callable = CodeFactory::DefineNamedOwnICInOptimizedCode(isolate());
@@ -498,12 +512,12 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
static_assert(n.FeedbackVectorIndex() == 1);
if (outer_state->opcode() != IrOpcode::kFrameState) {
n->RemoveInput(n.FeedbackVectorIndex());
- node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreGlobalICTrampoline);
} else {
- node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker())));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(), broker()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(node, Builtin::kStoreGlobalIC);
@@ -613,9 +627,10 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arity));
- base::Optional<AllocationSiteRef> const site = p.site(broker());
- Node* type_info = site.has_value() ? jsgraph()->Constant(site.value())
- : jsgraph()->UndefinedConstant();
+ OptionalAllocationSiteRef const site = p.site();
+ Node* type_info = site.has_value()
+ ? jsgraph()->Constant(site.value(), broker())
+ : jsgraph()->UndefinedConstant();
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
@@ -659,9 +674,9 @@ void JSGenericLowering::LowerJSRegExpTest(Node* node) {
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
JSCreateClosureNode n(node);
CreateClosureParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared_info = p.shared_info(broker());
+ SharedFunctionInfoRef shared_info = p.shared_info();
static_assert(n.FeedbackCellIndex() == 0);
- node->InsertInput(zone(), 0, jsgraph()->Constant(shared_info));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(shared_info, broker()));
node->RemoveInput(4); // control
// Use the FastNewClosure builtin only for functions allocated in new space.
@@ -675,7 +690,7 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef scope_info = parameters.scope_info(broker());
+ ScopeInfoRef scope_info = parameters.scope_info();
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -683,11 +698,11 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
Callable callable =
CodeFactory::FastNewFunctionContext(isolate(), scope_type);
- node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info, broker()));
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
ReplaceWithBuiltinCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info, broker()));
ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
}
}
@@ -723,7 +738,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
static_assert(n.FeedbackVectorIndex() == 0);
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker())));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(), broker()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
// Use the CreateShallowArrayLiteral builtin only for shallow boilerplates
@@ -739,15 +754,15 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
JSGetTemplateObjectNode n(node);
GetTemplateObjectParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared = p.shared(broker());
- TemplateObjectDescriptionRef description = p.description(broker());
+ SharedFunctionInfoRef shared = p.shared();
+ TemplateObjectDescriptionRef description = p.description();
DCHECK_EQ(node->op()->ControlInputCount(), 1);
node->RemoveInput(NodeProperties::FirstControlIndex(node));
static_assert(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0);
- node->InsertInput(zone(), 0, jsgraph()->Constant(shared));
- node->InsertInput(zone(), 1, jsgraph()->Constant(description));
+ node->InsertInput(zone(), 0, jsgraph()->Constant(shared, broker()));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(description, broker()));
node->InsertInput(zone(), 2,
jsgraph()->UintPtrConstant(p.feedback().index()));
@@ -774,7 +789,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
static_assert(n.FeedbackVectorIndex() == 0);
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker())));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(), broker()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
// Use the CreateShallowObjectLiteratal builtin only for shallow boilerplates
@@ -808,27 +823,27 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
static_assert(n.FeedbackVectorIndex() == 0);
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
- node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker())));
+ node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(), broker()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithBuiltinCall(node, Builtin::kCreateRegExpLiteral);
}
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
- node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info));
+ ScopeInfoRef scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info, broker()));
ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
- ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
- node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info));
+ ScopeInfoRef scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info, broker()));
ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
- ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op());
- node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info));
+ ScopeInfoRef scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info, broker()));
ReplaceWithRuntimeCall(node, Runtime::kPushBlockContext);
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 302297a122..33ab2f4ddf 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -16,10 +16,9 @@ namespace compiler {
#define DEFINE_GETTER(name, expr) \
Node* JSGraph::name() { return GET_CACHED_FIELD(&name##_, expr); }
-Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
- ArgvMode argv_mode, bool builtin_exit_frame) {
- if (save_doubles == SaveFPRegsMode::kIgnore &&
- argv_mode == ArgvMode::kStack) {
+Node* JSGraph::CEntryStubConstant(int result_size, ArgvMode argv_mode,
+ bool builtin_exit_frame) {
+ if (argv_mode == ArgvMode::kStack) {
DCHECK(result_size >= 1 && result_size <= 3);
if (!builtin_exit_frame) {
Node** ptr = nullptr;
@@ -31,41 +30,42 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
DCHECK_EQ(3, result_size);
ptr = &CEntryStub3Constant_;
}
- return GET_CACHED_FIELD(ptr, HeapConstant(CodeFactory::CEntry(
- isolate(), result_size, save_doubles,
- argv_mode, builtin_exit_frame)));
+ return GET_CACHED_FIELD(
+ ptr, HeapConstant(CodeFactory::CEntry(
+ isolate(), result_size, argv_mode, builtin_exit_frame)));
}
Node** ptr = builtin_exit_frame ? &CEntryStub1WithBuiltinExitFrameConstant_
: &CEntryStub1Constant_;
- return GET_CACHED_FIELD(ptr, HeapConstant(CodeFactory::CEntry(
- isolate(), result_size, save_doubles,
- argv_mode, builtin_exit_frame)));
+ return GET_CACHED_FIELD(
+ ptr, HeapConstant(CodeFactory::CEntry(isolate(), result_size, argv_mode,
+ builtin_exit_frame)));
}
- return HeapConstant(CodeFactory::CEntry(isolate(), result_size, save_doubles,
- argv_mode, builtin_exit_frame));
+ return HeapConstant(CodeFactory::CEntry(isolate(), result_size, argv_mode,
+ builtin_exit_frame));
}
-Node* JSGraph::Constant(const ObjectRef& ref) {
+Node* JSGraph::Constant(const ObjectRef& ref, JSHeapBroker* broker) {
if (ref.IsSmi()) return Constant(ref.AsSmi());
if (ref.IsHeapNumber()) {
return Constant(ref.AsHeapNumber().value());
}
OddballType oddball_type =
- ref.AsHeapObject().GetHeapObjectType().oddball_type();
+ ref.AsHeapObject().GetHeapObjectType(broker).oddball_type();
+ ReadOnlyRoots roots(isolate());
if (oddball_type == OddballType::kUndefined) {
- DCHECK(ref.object().equals(isolate()->factory()->undefined_value()));
+ DCHECK(ref.object()->IsUndefined(roots));
return UndefinedConstant();
} else if (oddball_type == OddballType::kNull) {
- DCHECK(ref.object().equals(isolate()->factory()->null_value()));
+ DCHECK(ref.object()->IsNull(roots));
return NullConstant();
} else if (oddball_type == OddballType::kHole) {
- DCHECK(ref.object().equals(isolate()->factory()->the_hole_value()));
+ DCHECK(ref.object()->IsTheHole(roots));
return TheHoleConstant();
} else if (oddball_type == OddballType::kBoolean) {
- if (ref.object().equals(isolate()->factory()->true_value())) {
+ if (ref.object()->IsTrue(roots)) {
return TrueConstant();
} else {
- DCHECK(ref.object().equals(isolate()->factory()->false_value()));
+ DCHECK(ref.object()->IsFalse(roots));
return FalseConstant();
}
} else {
@@ -187,6 +187,9 @@ DEFINE_GETTER(
graph()->zone()->New<ZoneVector<MachineType>>(0, graph()->zone()),
SparseInputMask(SparseInputMask::kEndMarker << 1))))
+DEFINE_GETTER(ExternalObjectMapConstant,
+ HeapConstant(factory()->external_map()))
+
#undef DEFINE_GETTER
#undef GET_CACHED_FIELD
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 2b7d901822..471ad294ce 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -37,9 +37,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
JSGraph& operator=(const JSGraph&) = delete;
// CEntryStubs are cached depending on the result size and other flags.
- Node* CEntryStubConstant(
- int result_size, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore,
- ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false);
+ Node* CEntryStubConstant(int result_size,
+ ArgvMode argv_mode = ArgvMode::kStack,
+ bool builtin_exit_frame = false);
// Used for padding frames. (alias: the hole)
Node* PaddingConstant() { return TheHoleConstant(); }
@@ -53,7 +53,7 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
// Creates a Constant node of the appropriate type for the given object.
// Inspect the (serialized) object and determine whether one of the
// canonicalized globals or a number constant should be returned.
- Node* Constant(const ObjectRef& value);
+ Node* Constant(const ObjectRef& value, JSHeapBroker* broker);
// Creates a NumberConstant node, usually canonicalized.
Node* Constant(double value);
@@ -107,7 +107,8 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
V(MinusOneConstant) \
V(NaNConstant) \
V(EmptyStateValues) \
- V(SingleDeadTypedStateValues)
+ V(SingleDeadTypedStateValues) \
+ V(ExternalObjectMapConstant)
// Cached global node accessor methods.
#define DECLARE_GETTER(name) Node* name();
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 48b21bea31..18f1d5a1c1 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -82,6 +82,24 @@ std::string JSHeapBroker::Trace() const {
return oss.str();
}
+#ifdef DEBUG
+static thread_local JSHeapBroker* current_broker = nullptr;
+
+CurrentHeapBrokerScope::CurrentHeapBrokerScope(JSHeapBroker* broker)
+ : prev_broker_(current_broker) {
+ current_broker = broker;
+}
+CurrentHeapBrokerScope::~CurrentHeapBrokerScope() {
+ current_broker = prev_broker_;
+}
+
+// static
+JSHeapBroker* JSHeapBroker::Current() {
+ DCHECK_NOT_NULL(current_broker);
+ return current_broker;
+}
+#endif
+
void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
LocalIsolate* local_isolate) {
set_canonical_handles(info->DetachCanonicalHandles());
@@ -142,12 +160,13 @@ void JSHeapBroker::CollectArrayAndObjectPrototypes() {
}
StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
- DCHECK(IsTypedArrayElementsKind(kind));
+ DCHECK(IsTypedArrayOrRabGsabTypedArrayElementsKind(kind));
switch (kind) {
#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype) \
case ElementsKind::TYPE##_ELEMENTS: \
- return MakeRef(this, isolate()->factory()->Type##Array_string());
+ return Type##Array_string();
TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG)
+ RAB_GSAB_TYPED_ARRAYS_WITH_TYPED_ARRAY_TYPE(TYPED_ARRAY_STRING_TAG)
#undef TYPED_ARRAY_STRING_TAG
default:
UNREACHABLE();
@@ -208,6 +227,14 @@ bool JSHeapBroker::ObjectMayBeUninitialized(HeapObject object) const {
return !IsMainThread() && isolate()->heap()->IsPendingAllocation(object);
}
+#define V(Type, name, Name) \
+ void JSHeapBroker::Init##Name() { \
+ DCHECK(!name##_); \
+ name##_ = MakeRefAssumeMemoryFence(this, isolate()->factory()->name()); \
+ }
+READ_ONLY_ROOT_LIST(V)
+#undef V
+
ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
: kind_(kind), slot_kind_(slot_kind) {}
@@ -234,24 +261,23 @@ ElementAccessFeedback const& ElementAccessFeedback::Refine(
DCHECK(!group.empty());
TransitionGroup new_group(broker->zone());
for (size_t i = 1; i < group.size(); ++i) {
- MapRef source = MakeRefAssumeMemoryFence(broker, *group[i]);
+ MapRef source = group[i];
if (inferred.find(source) != inferred.end()) {
- new_group.push_back(source.object());
+ new_group.push_back(source);
}
}
- MapRef target = MakeRefAssumeMemoryFence(broker, *group.front());
+ MapRef target = group.front();
bool const keep_target =
inferred.find(target) != inferred.end() || new_group.size() > 1;
if (keep_target) {
- new_group.push_back(target.object());
+ new_group.push_back(target);
// The target must be at the front, the order of sources doesn't matter.
std::swap(new_group[0], new_group[new_group.size() - 1]);
}
if (!new_group.empty()) {
- DCHECK(new_group.size() == 1 ||
- new_group.front().equals(target.object()));
+ DCHECK(new_group.size() == 1 || new_group.front().equals(target));
refined_feedback.transition_groups_.push_back(std::move(new_group));
}
}
@@ -313,13 +339,14 @@ bool GlobalAccessFeedback::immutable() const {
return FeedbackNexus::ImmutabilityBit::decode(index_and_immutable_);
}
-base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
+OptionalObjectRef GlobalAccessFeedback::GetConstantHint(
+ JSHeapBroker* broker) const {
if (IsPropertyCell()) {
- bool cell_cached = property_cell().Cache();
+ bool cell_cached = property_cell().Cache(broker);
CHECK(cell_cached); // Can't fail on the main thread.
- return property_cell().value();
+ return property_cell().value(broker);
} else if (IsScriptContextSlot() && immutable()) {
- return script_context().get(slot_index());
+ return script_context().get(broker, slot_index());
} else {
return base::nullopt;
}
@@ -402,11 +429,8 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
- for (Handle<Map> map : group) {
- // We assume a memory fence because {map} was read earlier from
- // the feedback vector and was store ordered on insertion into the
- // vector.
- if (!MakeRefAssumeMemoryFence(broker, map).IsStringMap()) return false;
+ for (MapRef map : group) {
+ if (!map.IsStringMap()) return false;
}
}
return true;
@@ -470,7 +494,7 @@ const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
- base::Optional<NameRef> static_name) {
+ OptionalNameRef static_name) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
FeedbackSlotKind kind = nexus.kind();
if (nexus.IsUninitialized()) return NewInsufficientFeedback(kind);
@@ -500,7 +524,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
}
}
- base::Optional<NameRef> name =
+ OptionalNameRef name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
if (nexus.ic_state() == InlineCacheState::MEGADOM) {
@@ -543,7 +567,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
- FeedbackSource const& source) {
+ JSHeapBroker* broker, FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
@@ -566,12 +590,13 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
FeedbackNexus::ContextIndexBits::decode(number);
int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number);
ContextRef context = MakeRefAssumeMemoryFence(
- this,
- target_native_context().script_context_table().object()->get_context(
- script_context_index, kAcquireLoad));
+ this, target_native_context()
+ .script_context_table(broker)
+ .object()
+ ->get_context(script_context_index, kAcquireLoad));
- base::Optional<ObjectRef> contents = context.get(context_slot_index);
- if (contents.has_value()) CHECK(!contents->IsTheHole());
+ OptionalObjectRef contents = context.get(broker, context_slot_index);
+ if (contents.has_value()) CHECK(!contents->IsTheHole(broker));
return *zone()->New<GlobalAccessFeedback>(
context, context_slot_index,
@@ -619,7 +644,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
- base::Optional<JSObjectRef> optional_constructor;
+ OptionalJSObjectRef optional_constructor;
{
MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback();
Handle<JSObject> constructor;
@@ -679,7 +704,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
- base::Optional<HeapObjectRef> target_ref;
+ OptionalHeapObjectRef target_ref;
{
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
@@ -767,7 +792,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
- base::Optional<NameRef> static_name) {
+ OptionalNameRef static_name) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback =
ReadFeedbackForPropertyAccess(source, mode, static_name);
@@ -794,7 +819,7 @@ ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
- ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source);
+ ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(this, source);
SetFeedback(source, &feedback);
return feedback;
}
@@ -816,12 +841,7 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
}
using TransitionGroup = ElementAccessFeedback::TransitionGroup;
- struct HandleLess {
- bool operator()(Handle<Map> x, Handle<Map> y) const {
- return x.address() < y.address();
- }
- };
- ZoneMap<Handle<Map>, TransitionGroup, HandleLess> transition_groups(zone());
+ ZoneRefMap<MapRef, TransitionGroup> transition_groups(zone());
// Separate the actual receiver maps and the possible transition sources.
for (const MapRef& map : maps) {
@@ -838,14 +858,14 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
}
if (transition_target.is_null()) {
- TransitionGroup group(1, map.object(), zone());
- transition_groups.insert({map.object(), group});
+ TransitionGroup group(1, map, zone());
+ transition_groups.insert({map, group});
} else {
- Handle<Map> target = CanonicalPersistentHandle(transition_target);
+ MapRef target = MakeRefAssumeMemoryFence(this, transition_target);
TransitionGroup new_group(1, target, zone());
TransitionGroup& actual_group =
transition_groups.insert({target, new_group}).first->second;
- actual_group.push_back(map.object());
+ actual_group.push_back(map);
}
}
@@ -866,35 +886,33 @@ void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
#ifdef ENABLE_SLOW_DCHECKS
// Check that each of the group's maps occurs exactly once in the whole
// feedback. This implies that "a source is not a target".
- for (Handle<Map> map : group) {
+ for (MapRef map : group) {
int count = 0;
for (TransitionGroup const& some_group : transition_groups()) {
- count += std::count_if(
- some_group.begin(), some_group.end(),
- [&](Handle<Map> some_map) { return some_map.equals(map); });
+ count +=
+ std::count_if(some_group.begin(), some_group.end(),
+ [&](MapRef some_map) { return some_map.equals(map); });
}
CHECK_EQ(count, 1);
}
#endif
}
-base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
- FeedbackNexus const& nexus) {
+OptionalNameRef JSHeapBroker::GetNameFeedback(FeedbackNexus const& nexus) {
Name raw_name = nexus.GetName();
if (raw_name.is_null()) return base::nullopt;
return MakeRefAssumeMemoryFence(this, raw_name);
}
-PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
- MapRef map, NameRef name, AccessMode access_mode,
- CompilationDependencies* dependencies) {
- DCHECK_NOT_NULL(dependencies);
+PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(MapRef map, NameRef name,
+ AccessMode access_mode) {
+ DCHECK_NOT_NULL(dependencies_);
PropertyAccessTarget target({map, name, access_mode});
auto it = property_access_infos_.find(target);
if (it != property_access_infos_.end()) return it->second;
- AccessInfoFactory factory(this, dependencies, zone());
+ AccessInfoFactory factory(this, zone());
PropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(map, name, access_mode);
TRACE(this, "Storing PropertyAccessInfo for "
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 1faa1004e9..b7c0cce8e1 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_JS_HEAP_BROKER_H_
#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
@@ -22,6 +23,7 @@
#include "src/objects/code-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/objects.h"
+#include "src/roots/roots.h"
#include "src/utils/address-map.h"
#include "src/utils/identity-map.h"
#include "src/utils/ostreams.h"
@@ -118,7 +120,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Isolate* isolate() const { return isolate_; }
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
@@ -143,6 +145,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void Retire();
bool SerializingAllowed() const;
+#ifdef DEBUG
+ // Get the current heap broker for this thread. Only to be used for DCHECKs.
+ static JSHeapBroker* Current();
+#endif
+
// Remember the local isolate and initialize its local heap with the
// persistent and canonical handles provided by {info}.
void AttachLocalIsolate(OptimizedCompilationInfo* info,
@@ -214,7 +221,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource const& source);
ProcessedFeedback const& GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
- base::Optional<NameRef> static_name);
+ OptionalNameRef static_name);
ProcessedFeedback const& ProcessFeedbackForBinaryOperation(
FeedbackSource const& source);
@@ -225,11 +232,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool FeedbackIsInsufficient(FeedbackSource const& source) const;
- base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
+ OptionalNameRef GetNameFeedback(FeedbackNexus const& nexus);
- PropertyAccessInfo GetPropertyAccessInfo(
- MapRef map, NameRef name, AccessMode access_mode,
- CompilationDependencies* dependencies);
+ PropertyAccessInfo GetPropertyAccessInfo(MapRef map, NameRef name,
+ AccessMode access_mode);
StringRef GetTypedArrayStringTag(ElementsKind kind);
@@ -247,6 +253,17 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
: isolate()->AsLocalIsolate();
}
+ base::Optional<RootIndex> FindRootIndex(const HeapObjectRef& object) {
+ // No root constant is a JSReceiver.
+ if (object.IsJSReceiver()) return {};
+ Address address = object.object()->ptr();
+ RootIndex root_index;
+ if (root_index_map_.Lookup(address, &root_index)) {
+ return root_index;
+ }
+ return {};
+ }
+
// Return the corresponding canonical persistent handle for {object}. Create
// one if it does not exist.
// If we have the canonical map, we can create the canonical & persistent
@@ -364,6 +381,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
return dependencies_;
}
+#define V(Type, name, Name) inline typename ref_traits<Type>::ref_type name();
+ READ_ONLY_ROOT_LIST(V)
+#undef V
+
private:
friend class HeapObjectRef;
friend class ObjectRef;
@@ -385,12 +406,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ReadFeedbackForForIn(
FeedbackSource const& source) const;
ProcessedFeedback const& ReadFeedbackForGlobalAccess(
- FeedbackSource const& source);
+ JSHeapBroker* broker, FeedbackSource const& source);
ProcessedFeedback const& ReadFeedbackForInstanceOf(
FeedbackSource const& source);
ProcessedFeedback const& ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
- base::Optional<NameRef> static_name);
+ OptionalNameRef static_name);
ProcessedFeedback const& ReadFeedbackForRegExpLiteral(
FeedbackSource const& source);
ProcessedFeedback const& ReadFeedbackForTemplateObject(
@@ -425,12 +446,16 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void CopyCanonicalHandlesForTesting(
std::unique_ptr<CanonicalHandlesMap> canonical_handles);
+#define V(Type, name, Name) void Init##Name();
+ READ_ONLY_ROOT_LIST(V)
+#undef V
+
Isolate* const isolate_;
#if V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
#endif // V8_COMPRESS_POINTERS
Zone* const zone_;
- base::Optional<NativeContextRef> target_native_context_;
+ OptionalNativeContextRef target_native_context_;
RefsMap* refs_;
RootIndexMap root_index_map_;
ZoneUnorderedSet<Handle<JSObject>, Handle<JSObject>::hash,
@@ -450,6 +475,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
PropertyAccessTarget::Hash, PropertyAccessTarget::Equal>
property_access_infos_;
+ // Cache read only roots to avoid needing to look them up via the map.
+#define V(Type, name, Name) \
+ OptionalRef<typename ref_traits<Type>::ref_type> name##_;
+ READ_ONLY_ROOT_LIST(V)
+#undef V
+
CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
@@ -468,6 +499,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
static_assert(base::bits::IsPowerOfTwo(kInitialRefsBucketCount));
};
+#ifdef DEBUG
+// In debug builds, store the current heap broker on a thread local, for
+// DCHECKs to access it via JSHeapBroker::Current();
+class V8_NODISCARD V8_EXPORT_PRIVATE CurrentHeapBrokerScope {
+ public:
+ explicit CurrentHeapBrokerScope(JSHeapBroker* broker);
+ ~CurrentHeapBrokerScope();
+
+ private:
+ JSHeapBroker* const prev_broker_;
+};
+#else
+class V8_NODISCARD V8_EXPORT_PRIVATE CurrentHeapBrokerScope {
+ public:
+ explicit CurrentHeapBrokerScope(JSHeapBroker* broker) {}
+ ~CurrentHeapBrokerScope() {}
+};
+#endif
+
class V8_NODISCARD TraceScope {
public:
TraceScope(JSHeapBroker* broker, const char* label)
@@ -513,15 +563,15 @@ class V8_NODISCARD UnparkedScopeIfNeeded {
template <class T,
typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
-base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
- JSHeapBroker* broker, ObjectData* data) {
+OptionalRef<typename ref_traits<T>::ref_type> TryMakeRef(JSHeapBroker* broker,
+ ObjectData* data) {
if (data == nullptr) return {};
- return {typename ref_traits<T>::ref_type(broker, data)};
+ return {typename ref_traits<T>::ref_type(data)};
}
// Usage:
//
-// base::Optional<FooRef> ref = TryMakeRef(broker, o);
+// OptionalFooRef ref = TryMakeRef(broker, o);
// if (!ref.has_value()) return {}; // bailout
//
// or
@@ -529,7 +579,7 @@ base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
// FooRef ref = MakeRef(broker, o);
template <class T,
typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
-base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
+OptionalRef<typename ref_traits<T>::ref_type> TryMakeRef(
JSHeapBroker* broker, T object, GetOrCreateDataFlags flags = {}) {
ObjectData* data = broker->TryGetOrCreateData(object, flags);
if (data == nullptr) {
@@ -540,7 +590,7 @@ base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
template <class T,
typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
-base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
+OptionalRef<typename ref_traits<T>::ref_type> TryMakeRef(
JSHeapBroker* broker, Handle<T> object, GetOrCreateDataFlags flags = {}) {
ObjectData* data = broker->TryGetOrCreateData(object, flags);
if (data == nullptr) {
@@ -577,6 +627,16 @@ typename ref_traits<T>::ref_type MakeRefAssumeMemoryFence(JSHeapBroker* broker,
return TryMakeRef(broker, object, kAssumeMemoryFence | kCrashOnError).value();
}
+#define V(Type, name, Name) \
+ inline typename ref_traits<Type>::ref_type JSHeapBroker::name() { \
+ if (!name##_) { \
+ Init##Name(); \
+ } \
+ return name##_.value(); \
+ }
+READ_ONLY_ROOT_LIST(V)
+#undef V
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index e73a9c9f90..2672122694 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -27,14 +27,14 @@ bool IsSmall(int const size) {
bool CanConsiderForInlining(JSHeapBroker* broker,
FeedbackCellRef const& feedback_cell) {
- base::Optional<FeedbackVectorRef> feedback_vector =
- feedback_cell.feedback_vector();
+ OptionalFeedbackVectorRef feedback_vector =
+ feedback_cell.feedback_vector(broker);
if (!feedback_vector.has_value()) {
TRACE("Cannot consider " << feedback_cell
<< " for inlining (no feedback vector)");
return false;
}
- SharedFunctionInfoRef shared = feedback_vector->shared_function_info();
+ SharedFunctionInfoRef shared = feedback_vector->shared_function_info(broker);
if (!shared.HasBytecodeArray()) {
TRACE("Cannot consider " << shared << " for inlining (no bytecode)");
@@ -42,12 +42,12 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
}
// Ensure we have a persistent handle to the bytecode in order to avoid
// flushing it during the remaining compilation.
- shared.GetBytecodeArray();
+ shared.GetBytecodeArray(broker);
// Read feedback vector again in case it got flushed before we were able to
// prevent flushing above.
- base::Optional<FeedbackVectorRef> feedback_vector_again =
- feedback_cell.feedback_vector();
+ OptionalFeedbackVectorRef feedback_vector_again =
+ feedback_cell.feedback_vector(broker);
if (!feedback_vector_again.has_value()) {
TRACE("Cannot consider " << shared << " for inlining (no feedback vector)");
return false;
@@ -60,7 +60,8 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- SharedFunctionInfo::Inlineability inlineability = shared.GetInlineability();
+ SharedFunctionInfo::Inlineability inlineability =
+ shared.GetInlineability(broker);
if (inlineability != SharedFunctionInfo::kIsInlineable) {
TRACE("Cannot consider "
<< shared << " for inlining (reason: " << inlineability << ")");
@@ -73,12 +74,11 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
bool CanConsiderForInlining(JSHeapBroker* broker,
JSFunctionRef const& function) {
- FeedbackCellRef feedback_cell =
- function.raw_feedback_cell(broker->dependencies());
+ FeedbackCellRef feedback_cell = function.raw_feedback_cell(broker);
bool const result = CanConsiderForInlining(broker, feedback_cell);
if (result) {
- CHECK(
- function.shared().equals(feedback_cell.shared_function_info().value()));
+ CHECK(function.shared(broker).equals(
+ feedback_cell.shared_function_info(broker).value()));
}
return result;
}
@@ -97,7 +97,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
out.functions[0] = function;
if (CanConsiderForInlining(broker(), function)) {
- out.bytecode[0] = function.shared().GetBytecodeArray();
+ out.bytecode[0] = function.shared(broker()).GetBytecodeArray(broker());
out.num_functions = 1;
return out;
}
@@ -118,7 +118,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.functions[n] = m2.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (CanConsiderForInlining(broker(), function)) {
- out.bytecode[n] = function.shared().GetBytecodeArray();
+ out.bytecode[n] = function.shared(broker()).GetBytecodeArray(broker());
}
}
out.num_functions = value_input_count;
@@ -128,8 +128,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell = MakeRef(broker(), FeedbackCellOf(m.op()));
if (CanConsiderForInlining(broker(), feedback_cell)) {
- out.shared_info = feedback_cell.shared_function_info().value();
- out.bytecode[0] = out.shared_info->GetBytecodeArray();
+ out.shared_info = feedback_cell.shared_function_info(broker()).value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray(broker());
}
out.num_functions = 1;
return out;
@@ -139,9 +139,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
JSCreateClosureNode n(callee);
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
if (CanConsiderForInlining(broker(), feedback_cell)) {
- out.shared_info = feedback_cell.shared_function_info().value();
- out.bytecode[0] = out.shared_info->GetBytecodeArray();
- CHECK(out.shared_info->equals(n.Parameters().shared_info(broker())));
+ out.shared_info = feedback_cell.shared_function_info(broker()).value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray(broker());
+ CHECK(out.shared_info->equals(n.Parameters().shared_info()));
}
out.num_functions = 1;
return out;
@@ -192,17 +192,18 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
continue;
}
- SharedFunctionInfoRef shared = candidate.functions[i].has_value()
- ? candidate.functions[i].value().shared()
- : candidate.shared_info.value();
+ SharedFunctionInfoRef shared =
+ candidate.functions[i].has_value()
+ ? candidate.functions[i].value().shared(broker())
+ : candidate.shared_info.value();
candidate.can_inline_function[i] = candidate.bytecode[i].has_value();
// Because of concurrent optimization, optimization of the inlining
// candidate could have been disabled meanwhile.
// JSInliner will check this again and not actually inline the function in
// this case.
CHECK_IMPLIES(candidate.can_inline_function[i],
- shared.IsInlineable() ||
- shared.GetInlineability() ==
+ shared.IsInlineable(broker()) ||
+ shared.GetInlineability(broker()) ==
SharedFunctionInfo::kHasOptimizationDisabled);
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recursion like f() -> g() -> f(). The indirect recursion is helpful in
@@ -225,10 +226,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
candidate.total_size += bytecode.length();
unsigned inlined_bytecode_size = 0;
- if (candidate.functions[i].has_value()) {
- JSFunctionRef function = candidate.functions[i].value();
- inlined_bytecode_size = function.code().GetInlinedBytecodeSize();
- candidate.total_size += inlined_bytecode_size;
+ if (OptionalJSFunctionRef function = candidate.functions[i]) {
+ if (OptionalCodeRef code = function->code(broker())) {
+ inlined_bytecode_size = code->GetInlinedBytecodeSize();
+ candidate.total_size += inlined_bytecode_size;
+ }
}
candidate_is_small = candidate_is_small &&
IsSmall(bytecode.length() + inlined_bytecode_size);
@@ -665,7 +667,8 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
for (int i = 0; i < num_calls; ++i) {
// TODO(2206): Make comparison be based on underlying SharedFunctionInfo
// instead of the target JSFunction reference directly.
- Node* target = jsgraph()->Constant(candidate.functions[i].value());
+ Node* target =
+ jsgraph()->Constant(candidate.functions[i].value(), broker());
if (i != (num_calls - 1)) {
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), callee, target);
@@ -814,19 +817,20 @@ void JSInliningHeuristic::PrintCandidates() {
<< candidate.node->id() << " with frequency " << candidate.frequency
<< ", " << candidate.num_functions << " target(s):" << std::endl;
for (int i = 0; i < candidate.num_functions; ++i) {
- SharedFunctionInfoRef shared = candidate.functions[i].has_value()
- ? candidate.functions[i]->shared()
- : candidate.shared_info.value();
+ SharedFunctionInfoRef shared =
+ candidate.functions[i].has_value()
+ ? candidate.functions[i]->shared(broker())
+ : candidate.shared_info.value();
os << " - target: " << shared;
if (candidate.bytecode[i].has_value()) {
os << ", bytecode size: " << candidate.bytecode[i]->length();
- if (candidate.functions[i].has_value()) {
- JSFunctionRef function = candidate.functions[i].value();
- unsigned inlined_bytecode_size =
- function.code().GetInlinedBytecodeSize();
- if (inlined_bytecode_size > 0) {
- os << ", existing opt code's inlined bytecode size: "
- << inlined_bytecode_size;
+ if (OptionalJSFunctionRef function = candidate.functions[i]) {
+ if (OptionalCodeRef code = function->code(broker())) {
+ unsigned inlined_bytecode_size = code->GetInlinedBytecodeSize();
+ if (inlined_bytecode_size > 0) {
+ os << ", existing opt code's inlined bytecode size: "
+ << inlined_bytecode_size;
+ }
}
}
} else {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 3a830943bd..02bcde64b1 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -19,10 +19,11 @@ class JSInliningHeuristic final : public AdvancedReducer {
OptimizedCompilationInfo* info, JSGraph* jsgraph,
JSHeapBroker* broker,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, Mode mode)
+ NodeOriginTable* node_origins, Mode mode,
+ const wasm::WasmModule* wasm_module = nullptr)
: AdvancedReducer(editor),
inliner_(editor, local_zone, info, jsgraph, broker, source_positions,
- node_origins),
+ node_origins, wasm_module),
candidates_(local_zone),
seen_(local_zone),
source_positions_(source_positions),
@@ -32,7 +33,9 @@ class JSInliningHeuristic final : public AdvancedReducer {
max_inlined_bytecode_size_cumulative_(
v8_flags.max_inlined_bytecode_size_cumulative),
max_inlined_bytecode_size_absolute_(
- v8_flags.max_inlined_bytecode_size_absolute) {}
+ v8_flags.max_inlined_bytecode_size_absolute) {
+ DCHECK_EQ(mode == kWasmOnly, wasm_module != nullptr);
+ }
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -52,18 +55,18 @@ class JSInliningHeuristic final : public AdvancedReducer {
static const int kMaxCallPolymorphism = 4;
struct Candidate {
- base::Optional<JSFunctionRef> functions[kMaxCallPolymorphism];
+ OptionalJSFunctionRef functions[kMaxCallPolymorphism];
// In the case of polymorphic inlining, this tells if each of the
// functions could be inlined.
bool can_inline_function[kMaxCallPolymorphism];
// Strong references to bytecode to ensure it is not flushed from SFI
// while choosing inlining candidates.
- base::Optional<BytecodeArrayRef> bytecode[kMaxCallPolymorphism];
+ OptionalBytecodeArrayRef bytecode[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently
// we use {num_functions == 1 && functions[0].is_null()} as an indicator.
- base::Optional<SharedFunctionInfoRef> shared_info;
+ OptionalSharedFunctionInfoRef shared_info;
int num_functions;
Node* node = nullptr; // The call site at which to inline.
CallFrequency frequency; // Relative frequency of this call site.
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 4baabf3077..f0377c030d 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -287,8 +287,7 @@ bool NeedsImplicitReceiver(SharedFunctionInfoRef shared_info) {
// Determines whether the call target of the given call {node} is statically
// known and can be used as an inlining candidate. The {SharedFunctionInfo} of
// the call target is provided (the exact closure might be unknown).
-base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
- Node* node) {
+OptionalSharedFunctionInfoRef JSInliner::DetermineCallTarget(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
Node* target = node->InputAt(JSCallOrConstructNode::TargetIndex());
HeapObjectMatcher match(target);
@@ -301,7 +300,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
- if (!function.feedback_vector(broker()->dependencies()).has_value()) {
+ if (!function.feedback_vector(broker()).has_value()) {
return base::nullopt;
}
@@ -313,11 +312,12 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// TODO(turbofan): We might want to revisit this restriction later when we
// have a need for this, and we know how to model different native contexts
// in the same graph in a compositional way.
- if (!function.native_context().equals(broker()->target_native_context())) {
+ if (!function.native_context(broker()).equals(
+ broker()->target_native_context())) {
return base::nullopt;
}
- return function.shared();
+ return function.shared(broker());
}
// This reducer can also handle calls where the target is statically known to
@@ -328,10 +328,10 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
if (match.IsJSCreateClosure()) {
JSCreateClosureNode n(target);
FeedbackCellRef cell = n.GetFeedbackCellRefChecked(broker());
- return cell.shared_function_info();
+ return cell.shared_function_info(broker());
} else if (match.IsCheckClosure()) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(match.op()));
- return cell.shared_function_info();
+ return cell.shared_function_info(broker());
}
return base::nullopt;
@@ -351,11 +351,11 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
- CHECK(function.feedback_vector(broker()->dependencies()).has_value());
+ CHECK(function.feedback_vector(broker()).has_value());
// The inlinee specializes to the context from the JSFunction object.
- *context_out = jsgraph()->Constant(function.context());
- return function.raw_feedback_cell(broker()->dependencies());
+ *context_out = jsgraph()->Constant(function.context(broker()), broker());
+ return function.raw_feedback_cell(broker());
}
if (match.IsJSCreateClosure()) {
@@ -386,25 +386,46 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
#if V8_ENABLE_WEBASSEMBLY
Reduction JSInliner::ReduceJSWasmCall(Node* node) {
- // Create the subgraph for the inlinee.
- Node* start_node;
- Node* end;
+ JSWasmCallNode n(node);
+ const JSWasmCallParameters& wasm_call_params = n.Parameters();
+ int fct_index = wasm_call_params.function_index();
+ wasm::NativeModule* native_module = wasm_call_params.native_module();
+ const wasm::FunctionSig* sig = wasm_call_params.signature();
+
+ // Try "full" inlining of very simple WasmGC functions.
+ bool can_inline_body = false;
+ Node* inlinee_body_start = nullptr;
+ Node* inlinee_body_end = nullptr;
+ // TODO(7748): It would be useful to also support inlining of wasm functions
+ // if they are surrounded by a try block which requires further work, so that
+ // the wasm trap gets forwarded to the corresponding catch block.
+ if (native_module->enabled_features().has_gc() &&
+ v8_flags.experimental_wasm_js_inlining && fct_index != -1 &&
+ native_module && native_module->module() == wasm_module_ &&
+ !NodeProperties::IsExceptionalCall(node)) {
+ Graph::SubgraphScope graph_scope(graph());
+ WasmGraphBuilder builder(nullptr, zone(), jsgraph(), sig, source_positions_,
+ WasmGraphBuilder::kNoSpecialParameterMode,
+ isolate(), native_module->enabled_features());
+ can_inline_body = builder.TryWasmInlining(fct_index, native_module);
+ inlinee_body_start = graph()->start();
+ inlinee_body_end = graph()->end();
+ }
+
+ // Create the subgraph for the wrapper inlinee.
+ Node* wrapper_start_node;
+ Node* wrapper_end_node;
size_t subgraph_min_node_id;
{
Graph::SubgraphScope scope(graph());
-
graph()->SetEnd(nullptr);
- JSWasmCallNode n(node);
- const JSWasmCallParameters& wasm_call_params = n.Parameters();
-
// Create a nested frame state inside the frame state attached to the
// call; this will ensure that lazy deoptimizations at this point will
// still return the result of the Wasm function call.
Node* continuation_frame_state =
- CreateJSWasmCallBuiltinContinuationFrameState(
- jsgraph(), n.context(), n.frame_state(),
- wasm_call_params.signature());
+ CreateJSWasmCallBuiltinContinuationFrameState(jsgraph(), n.context(),
+ n.frame_state(), sig);
// All the nodes inserted by the inlined subgraph will have
// id >= subgraph_min_node_id. We use this later to avoid wire nodes that
@@ -412,17 +433,17 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) {
// surrounding exception handler, if present.
subgraph_min_node_id = graph()->NodeCount();
+ bool set_in_wasm_flag = !can_inline_body;
BuildInlinedJSToWasmWrapper(
- graph()->zone(), jsgraph(), wasm_call_params.signature(),
- wasm_call_params.module(), isolate(), source_positions_,
- StubCallMode::kCallBuiltinPointer, wasm::WasmFeatures::FromFlags(),
- continuation_frame_state);
+ graph()->zone(), jsgraph(), sig, wasm_call_params.module(), isolate(),
+ source_positions_, wasm::WasmFeatures::FromFlags(),
+ continuation_frame_state, set_in_wasm_flag);
// Extract the inlinee start/end nodes.
- start_node = graph()->start();
- end = graph()->end();
+ wrapper_start_node = graph()->start();
+ wrapper_end_node = graph()->end();
}
- StartNode start{start_node};
+ StartNode start{wrapper_start_node};
Node* exception_target = nullptr;
NodeProperties::IsExceptionalCall(node, &exception_target);
@@ -433,7 +454,7 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) {
NodeVector uncaught_subcalls(local_zone_);
if (exception_target != nullptr) {
// Find all uncaught 'calls' in the inlinee.
- AllNodes inlined_nodes(local_zone_, end, graph());
+ AllNodes inlined_nodes(local_zone_, wrapper_end_node, graph());
for (Node* subnode : inlined_nodes.reachable) {
// Ignore nodes that are not part of the inlinee.
if (subnode->id() < subgraph_min_node_id) continue;
@@ -448,13 +469,111 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) {
}
}
+ // Search in inlined nodes for call to inline wasm.
+ // Note: We can only inline wasm functions of a single wasm module into any
+ // given JavaScript function (due to the WasmGCLowering being dependent on
+ // module-specific type indices).
+ Node* wasm_fct_call = nullptr;
+ if (can_inline_body) {
+ AllNodes inlined_nodes(local_zone_, wrapper_end_node, graph());
+ for (Node* subnode : inlined_nodes.reachable) {
+ // Ignore nodes that are not part of the inlinee.
+ if (subnode->id() < subgraph_min_node_id) continue;
+
+ if (subnode->opcode() == IrOpcode::kCall &&
+ CallDescriptorOf(subnode->op())->kind() ==
+ CallDescriptor::kCallWasmFunction) {
+ wasm_fct_call = subnode;
+ break;
+ }
+ }
+ DCHECK(wasm_fct_call != nullptr);
+ }
+
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* new_target = jsgraph()->UndefinedConstant();
- return InlineJSWasmCall(node, new_target, context, frame_state, start, end,
- exception_target, uncaught_subcalls);
+ // Inline the wasm wrapper.
+ Reduction r =
+ InlineJSWasmCall(node, new_target, context, frame_state, start,
+ wrapper_end_node, exception_target, uncaught_subcalls);
+ // Inline the wrapped wasm body if supported.
+ if (can_inline_body) {
+ InlineWasmFunction(wasm_fct_call, inlinee_body_start, inlinee_body_end);
+ }
+ return r;
+}
+
+void JSInliner::InlineWasmFunction(Node* call, Node* inlinee_start,
+ Node* inlinee_end) {
+ // TODO(7748): This is very similar to what is done for wasm inlining inside
+ // another wasm function. Can we reuse some of its code?
+ // 1) Rewire function entry.
+ Node* control = NodeProperties::GetControlInput(call);
+ Node* effect = NodeProperties::GetEffectInput(call);
+
+ for (Edge edge : inlinee_start->use_edges()) {
+ Node* use = edge.from();
+ if (use == nullptr) continue;
+ switch (use->opcode()) {
+ case IrOpcode::kParameter: {
+ // Index 0 is the callee node.
+ int index = 1 + ParameterIndexOf(use->op());
+ Node* arg = NodeProperties::GetValueInput(call, index);
+ Replace(use, arg);
+ break;
+ }
+ default:
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ // Projections pointing to the inlinee start are floating
+ // control. They should point to the graph's start.
+ edge.UpdateTo(use->opcode() == IrOpcode::kProjection
+ ? graph()->start()
+ : control);
+ } else {
+ UNREACHABLE();
+ }
+ Revisit(edge.from());
+ break;
+ }
+ }
+
+ // 2) Handle all graph terminators for the callee.
+ // Special case here: There is only one call terminator.
+ DCHECK_EQ(inlinee_end->inputs().count(), 1);
+ Node* terminator = *inlinee_end->inputs().begin();
+ DCHECK_EQ(terminator->opcode(), IrOpcode::kReturn);
+ inlinee_end->Kill();
+
+ // 3) Rewire unhandled calls to the handler.
+ // This is not supported yet resulting in exceptional calls being treated
+ // as non-inlineable.
+ DCHECK(!NodeProperties::IsExceptionalCall(call));
+
+ // 4) Handle return values.
+ int return_values = terminator->InputCount();
+ DCHECK_GE(return_values, 3);
+ DCHECK_LE(return_values, 4);
+ // Subtract effect, control and drop count.
+ int return_count = return_values - 3;
+ Node* effect_output = terminator->InputAt(return_count + 1);
+ Node* control_output = terminator->InputAt(return_count + 2);
+ for (Edge use_edge : call->use_edges()) {
+ if (NodeProperties::IsValueEdge(use_edge)) {
+ Node* use = use_edge.from();
+ // There is at most one value edge.
+ ReplaceWithValue(use, return_count == 1 ? terminator->InputAt(1)
+ : jsgraph()->UndefinedConstant());
+ }
+ }
+ // All value inputs are replaced by the above loop, so it is ok to use
+ // Dead() as a dummy for value replacement.
+ ReplaceWithValue(call, jsgraph()->Dead(), effect_output, control_output);
}
+
#endif // V8_ENABLE_WEBASSEMBLY
Reduction JSInliner::ReduceJSCall(Node* node) {
@@ -465,14 +584,14 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
JSCallAccessor call(node);
// Determine the call target.
- base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node));
+ OptionalSharedFunctionInfoRef shared_info(DetermineCallTarget(node));
if (!shared_info.has_value()) return NoChange();
SharedFunctionInfoRef outer_shared_info =
MakeRef(broker(), info_->shared_info());
SharedFunctionInfo::Inlineability inlineability =
- shared_info->GetInlineability();
+ shared_info->GetInlineability(broker());
if (inlineability != SharedFunctionInfo::kIsInlineable) {
// The function is no longer inlineable. The only way this can happen is if
// the function had its optimization disabled in the meantime, e.g. because
@@ -553,7 +672,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
- BytecodeArrayRef bytecode_array = shared_info->GetBytecodeArray();
+ BytecodeArrayRef bytecode_array = shared_info->GetBytecodeArray(broker());
// Remember that we inlined this function.
int inlining_id =
@@ -698,7 +817,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) {
CallParameters const& p = CallParametersOf(node->op());
Node* global_proxy = jsgraph()->Constant(
- broker()->target_native_context().global_proxy_object());
+ broker()->target_native_context().global_proxy_object(broker()),
+ broker());
Node* receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
call.receiver(), global_proxy, effect, start);
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 97699909ca..a860f15f43 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -27,14 +27,18 @@ class JSInliner final : public AdvancedReducer {
JSInliner(Editor* editor, Zone* local_zone, OptimizedCompilationInfo* info,
JSGraph* jsgraph, JSHeapBroker* broker,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins)
+ NodeOriginTable* node_origins, const wasm::WasmModule* wasm_module)
: AdvancedReducer(editor),
local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
broker_(broker),
source_positions_(source_positions),
- node_origins_(node_origins){}
+ node_origins_(node_origins),
+ wasm_module_(wasm_module) {
+ // In case WebAssembly is disabled.
+ USE(wasm_module_);
+ }
const char* reducer_name() const override { return "JSInliner"; }
@@ -46,6 +50,7 @@ class JSInliner final : public AdvancedReducer {
#if V8_ENABLE_WEBASSEMBLY
Reduction ReduceJSWasmCall(Node* node);
+ void InlineWasmFunction(Node* call, Node* inlinee_start, Node* inlinee_end);
#endif // V8_ENABLE_WEBASSEMBLY
private:
@@ -65,8 +70,9 @@ class JSInliner final : public AdvancedReducer {
JSHeapBroker* const broker_;
SourcePositionTable* const source_positions_;
NodeOriginTable* const node_origins_;
+ const wasm::WasmModule* wasm_module_;
- base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
+ OptionalSharedFunctionInfoRef DetermineCallTarget(Node* node);
FeedbackCellRef DetermineCallContext(Node* node, Node** context_out);
FrameState CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 7ad3f5c78d..2fd69d1207 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -35,6 +35,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceTurbofanStaticAssert(node);
case Runtime::kVerifyType:
return ReduceVerifyType(node);
+ case Runtime::kCheckTurboshaftTypeOf:
+ return ReduceCheckTurboshaftTypeOf(node);
default:
break;
}
@@ -296,6 +298,23 @@ Reduction JSIntrinsicLowering::ReduceVerifyType(Node* node) {
return Change(node, simplified()->VerifyType());
}
+Reduction JSIntrinsicLowering::ReduceCheckTurboshaftTypeOf(Node* node) {
+ Node* value = node->InputAt(0);
+ if (!v8_flags.turboshaft) {
+ RelaxEffectsAndControls(node);
+ ReplaceWithValue(node, value);
+ return Changed(value);
+ }
+
+ Node* pattern = node->InputAt(1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* check = graph()->NewNode(simplified()->CheckTurboshaftTypeOf(), value,
+ pattern, effect, control);
+ ReplaceWithValue(node, value, check);
+ return Changed(value);
+}
+
Reduction JSIntrinsicLowering::ReduceIsBeingInterpreted(Node* node) {
RelaxEffectsAndControls(node);
return Changed(jsgraph_->FalseConstant());
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 0affbb184d..1871bfbb38 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -60,6 +60,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsBeingInterpreted(Node* node);
Reduction ReduceTurbofanStaticAssert(Node* node);
Reduction ReduceVerifyType(Node* node);
+ Reduction ReduceCheckTurboshaftTypeOf(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index e975867645..b6336828e5 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -13,6 +13,7 @@
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/frame-states.h"
#include "src/compiler/graph-assembler.h"
@@ -59,15 +60,15 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) {
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
- CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone)
+ Zone* zone, Zone* shared_zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
flags_(flags),
- global_object_(broker->target_native_context().global_object().object()),
+ global_object_(
+ broker->target_native_context().global_object(broker).object()),
global_proxy_(
- broker->target_native_context().global_proxy_object().object()),
- dependencies_(dependencies),
+ broker->target_native_context().global_proxy_object(broker).object()),
zone_(zone),
shared_zone_(shared_zone),
type_cache_(TypeCache::Get()),
@@ -262,9 +263,9 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
DCHECK(shared.is_compiled());
int register_count =
shared.internal_formal_parameter_count_without_receiver() +
- shared.GetBytecodeArray().register_count();
- MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
- AllocationBuilder ab(jsgraph(), effect, control);
+ shared.GetBytecodeArray(broker()).register_count();
+ MapRef fixed_array_map = broker()->fixed_array_map();
+ AllocationBuilder ab(jsgraph(), broker(), effect, control);
if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
return NoChange();
}
@@ -531,15 +532,15 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
return NoChange();
}
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- MapRef function_map = function.map();
- HeapObjectRef function_prototype = function_map.prototype();
+ MapRef function_map = function.map(broker());
+ HeapObjectRef function_prototype = function_map.prototype(broker());
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
if (function_map.is_stable()) {
dependencies()->DependOnStableMap(function_map);
- Node* value = jsgraph()->Constant(function_prototype);
+ Node* value = jsgraph()->Constant(function_prototype, broker());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -579,11 +580,11 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
}
JSFunctionRef this_function_ref = m.Ref(broker()).AsJSFunction();
- MapRef function_map = this_function_ref.map();
- HeapObjectRef current = function_map.prototype();
+ MapRef function_map = this_function_ref.map(broker());
+ HeapObjectRef current = function_map.prototype(broker());
// The uppermost JSFunction on the class hierarchy (above it, there can be
// other JSObjects, e.g., Proxies).
- base::Optional<JSObjectRef> last_function;
+ OptionalJSObjectRef last_function;
Node* return_value;
Node* ctor_or_instance;
@@ -597,16 +598,19 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
JSFunctionRef current_function = current.AsJSFunction();
// If there are class fields, bail out. TODO(v8:13091): Handle them here.
- if (current_function.shared().requires_instance_members_initializer()) {
+ if (current_function.shared(broker())
+ .requires_instance_members_initializer()) {
return NoChange();
}
// If there are private methods, bail out. TODO(v8:13091): Handle them here.
- if (current_function.context().scope_info().ClassScopeHasPrivateBrand()) {
+ if (current_function.context(broker())
+ .scope_info(broker())
+ .ClassScopeHasPrivateBrand()) {
return NoChange();
}
- FunctionKind kind = current_function.shared().kind();
+ FunctionKind kind = current_function.shared(broker()).kind();
if (kind != FunctionKind::kDefaultDerivedConstructor) {
// The hierarchy walk will end here; this is the last change to bail out
@@ -620,7 +624,7 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
return_value = jsgraph()->BooleanConstant(true);
// Generate a builtin call for creating the instance.
- Node* constructor = jsgraph()->Constant(current_function);
+ Node* constructor = jsgraph()->Constant(current_function, broker());
// In the current FrameState setup, the two outputs of this bytecode are
// poked at indices slot(index(reg_2)) (boolean_output) and
@@ -650,13 +654,13 @@ JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct(
n.context(), new_frame_state, effect, control);
} else {
return_value = jsgraph()->BooleanConstant(false);
- ctor_or_instance = jsgraph()->Constant(current_function);
+ ctor_or_instance = jsgraph()->Constant(current_function, broker());
}
break;
}
// Keep walking up the class tree.
- current = current_function.map().prototype();
+ current = current_function.map(broker()).prototype(broker());
}
dependencies()->DependOnStablePrototypeChain(
@@ -699,7 +703,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Check if the right hand side is a known {receiver}, or
// we have feedback from the InstanceOfIC.
- base::Optional<JSObjectRef> receiver;
+ OptionalJSObjectRef receiver;
HeapObjectMatcher m(constructor);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) {
receiver = m.Ref(broker()).AsJSObject();
@@ -714,10 +718,10 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (!receiver.has_value()) return NoChange();
- MapRef receiver_map = receiver->map();
- NameRef name = MakeRef(broker(), isolate()->factory()->has_instance_symbol());
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- receiver_map, name, AccessMode::kLoad, dependencies());
+ MapRef receiver_map = receiver->map(broker());
+ NameRef name = broker()->has_instance_symbol();
+ PropertyAccessInfo access_info =
+ broker()->GetPropertyAccessInfo(receiver_map, name, AccessMode::kLoad);
// TODO(v8:11457) Support dictionary mode holders here.
if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
@@ -725,7 +729,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
access_info.RecordDependencies(dependencies());
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
if (access_info.IsNotFound()) {
// If there's no @@hasInstance handler, the OrdinaryHasInstance operation
@@ -750,14 +754,14 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
if (access_info.IsFastDataConstant()) {
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
bool found_on_proto = holder.has_value();
JSObjectRef holder_ref = found_on_proto ? holder.value() : receiver.value();
- base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
- access_info.field_representation(), access_info.field_index(),
+ OptionalObjectRef constant = holder_ref.GetOwnFastDataProperty(
+ broker(), access_info.field_representation(), access_info.field_index(),
dependencies());
if (!constant.has_value() || !constant->IsHeapObject() ||
- !constant->AsHeapObject().map().is_callable()) {
+ !constant->AsHeapObject().map(broker()).is_callable()) {
return NoChange();
}
@@ -786,7 +790,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
0, frame_state, ContinuationFrameStateMode::LAZY);
// Call the @@hasInstance handler.
- Node* target = jsgraph()->Constant(*constant);
+ Node* target = jsgraph()->Constant(*constant, broker());
Node* feedback = jsgraph()->UndefinedConstant();
// Value inputs plus context, frame state, effect, control.
static_assert(JSCallNode::ArityForArgc(1) + 4 == 8);
@@ -846,17 +850,17 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
all = false;
break;
}
- HeapObjectRef map_prototype = map.prototype();
+ HeapObjectRef map_prototype = map.prototype(broker());
if (map_prototype.equals(prototype)) {
none = false;
break;
}
- map = map_prototype.map();
+ map = map_prototype.map(broker());
// TODO(v8:11457) Support dictionary mode protoypes here.
if (!map.is_stable() || map.is_dictionary_map()) {
return kMayBeInPrototypeChain;
}
- if (map.oddball_type() == OddballType::kNull) {
+ if (map.oddball_type(broker()) == OddballType::kNull) {
all = false;
break;
}
@@ -866,7 +870,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
if (!all && !none) return kMayBeInPrototypeChain;
{
- base::Optional<JSObjectRef> last_prototype;
+ OptionalJSObjectRef last_prototype;
if (all) {
// We don't need to protect the full chain if we found the prototype, we
// can stop at {prototype}. In fact we could stop at the one before
@@ -874,7 +878,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
// might be a different object each time, so it's much simpler to include
// {prototype}. That does, however, mean that we must check {prototype}'s
// map stability.
- if (!prototype.map().is_stable()) return kMayBeInPrototypeChain;
+ if (!prototype.map(broker()).is_stable()) return kMayBeInPrototypeChain;
last_prototype = prototype.AsJSObject();
}
WhereToStart start = result == NodeProperties::kUnreliableMaps
@@ -930,7 +934,8 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
NodeProperties::ReplaceValueInput(node, object,
JSInstanceOfNode::LeftIndex());
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(function.bound_target_function()),
+ node,
+ jsgraph()->Constant(function.bound_target_function(broker()), broker()),
JSInstanceOfNode::RightIndex());
node->InsertInput(zone(), JSInstanceOfNode::FeedbackVectorIndex(),
feedback);
@@ -945,14 +950,14 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
- if (!function.map().has_prototype_slot() ||
- !function.has_instance_prototype(dependencies()) ||
- function.PrototypeRequiresRuntimeLookup(dependencies())) {
+ if (!function.map(broker()).has_prototype_slot() ||
+ !function.has_instance_prototype(broker()) ||
+ function.PrototypeRequiresRuntimeLookup(broker())) {
return NoChange();
}
ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
- Node* prototype_constant = jsgraph()->Constant(prototype);
+ Node* prototype_constant = jsgraph()->Constant(prototype, broker());
// Lower the {node} to JSHasInPrototypeChain.
NodeProperties::ReplaceValueInput(node, object, 0);
@@ -977,7 +982,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
if (!m.HasResolvedValue() ||
- !m.Ref(broker()).equals(native_context().promise_function())) {
+ !m.Ref(broker()).equals(native_context().promise_function(broker()))) {
return NoChange();
}
@@ -1015,13 +1020,11 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// Compute property access info for "then" on {resolution}.
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
for (const MapRef& map : resolution_maps) {
access_infos.push_back(broker()->GetPropertyAccessInfo(
- map, MakeRef(broker(), isolate()->factory()->then_string()),
- AccessMode::kLoad, dependencies()));
+ map, broker()->then_string(), AccessMode::kLoad));
}
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
@@ -1080,14 +1083,14 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* lookup_start_object, Node* receiver, Node* value,
NameRef const& name, AccessMode access_mode, Node* key,
PropertyCellRef const& property_cell, Node* effect) {
- if (!property_cell.Cache()) {
+ if (!property_cell.Cache(broker())) {
TRACE_BROKER_MISSING(broker(), "usable data for " << property_cell);
return NoChange();
}
- ObjectRef property_cell_value = property_cell.value();
+ ObjectRef property_cell_value = property_cell.value(broker());
if (property_cell_value.IsHeapObject() &&
- property_cell_value.AsHeapObject().map().oddball_type() ==
+ property_cell_value.AsHeapObject().map(broker()).oddball_type(broker()) ==
OddballType::kHole) {
// The property cell is no longer valid.
return NoChange();
@@ -1115,7 +1118,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
} else if (property_cell_type == PropertyCellType::kConstantType) {
// We rely on stability further below.
if (property_cell_value.IsHeapObject() &&
- !property_cell_value.AsHeapObject().map().is_stable()) {
+ !property_cell_value.AsHeapObject().map(broker()).is_stable()) {
return NoChange();
}
}
@@ -1143,8 +1146,10 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(
- native_context().global_proxy_object().map().object())),
+ ZoneHandleSet<Map>(native_context()
+ .global_proxy_object(broker())
+ .map(broker())
+ .object())),
lookup_start_object, effect, control);
}
@@ -1154,7 +1159,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
- : jsgraph()->Constant(property_cell_value);
+ : jsgraph()->Constant(property_cell_value, broker());
} else {
// Record a code dependency on the cell if we can benefit from the
// additional feedback, or the global property is configurable (i.e.
@@ -1169,10 +1174,10 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
property_details.cell_type() == PropertyCellType::kUndefined) {
value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
- : jsgraph()->Constant(property_cell_value);
+ : jsgraph()->Constant(property_cell_value, broker());
DCHECK(!property_cell_value.IsHeapObject() ||
- property_cell_value.AsHeapObject().map().oddball_type() !=
- OddballType::kHole);
+ property_cell_value.AsHeapObject().map(broker()).oddball_type(
+ broker()) != OddballType::kHole);
} else {
DCHECK_NE(AccessMode::kHas, access_mode);
@@ -1190,8 +1195,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
representation = MachineRepresentation::kTaggedPointer;
} else {
MapRef property_cell_value_map =
- property_cell_value.AsHeapObject().map();
- property_cell_value_type = Type::For(property_cell_value_map);
+ property_cell_value.AsHeapObject().map(broker());
+ property_cell_value_type =
+ Type::For(property_cell_value_map, broker());
representation = MachineRepresentation::kTaggedPointer;
// We can only use the property cell value map for map check
@@ -1206,7 +1212,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
value = effect = graph()->NewNode(
simplified()->LoadField(ForPropertyCellValue(
representation, property_cell_value_type, map, name)),
- jsgraph()->Constant(property_cell), effect, control);
+ jsgraph()->Constant(property_cell, broker()), effect, control);
}
}
} else if (access_mode == AccessMode::kStore) {
@@ -1217,9 +1223,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
dependencies()->DependOnGlobalProperty(property_cell);
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), value,
- jsgraph()->Constant(property_cell_value));
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(), value,
+ jsgraph()->Constant(property_cell_value, broker()));
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kValueMismatch), check,
effect, control);
@@ -1234,7 +1240,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_cell_value.IsHeapObject()) {
MapRef property_cell_value_map =
- property_cell_value.AsHeapObject().map();
+ property_cell_value.AsHeapObject().map(broker());
dependencies()->DependOnStableMap(property_cell_value_map);
// Check that the {value} is a HeapObject.
@@ -1258,8 +1264,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
representation, property_cell_value_type,
MaybeHandle<Map>(), name)),
- jsgraph()->Constant(property_cell), value,
- effect, control);
+ jsgraph()->Constant(property_cell, broker()),
+ value, effect, control);
break;
}
case PropertyCellType::kMutable: {
@@ -1270,7 +1276,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
simplified()->StoreField(ForPropertyCellValue(
MachineRepresentation::kTagged, Type::NonInternal(),
MaybeHandle<Map>(), name)),
- jsgraph()->Constant(property_cell), value, effect, control);
+ jsgraph()->Constant(property_cell, broker()), value, effect,
+ control);
break;
}
case PropertyCellType::kUndefined:
@@ -1297,7 +1304,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
GlobalAccessFeedback const& feedback = processed.AsGlobalAccess();
if (feedback.IsScriptContextSlot()) {
Effect effect = n.effect();
- Node* script_context = jsgraph()->Constant(feedback.script_context());
+ Node* script_context =
+ jsgraph()->Constant(feedback.script_context(), broker());
Node* value = effect =
graph()->NewNode(javascript()->LoadContext(0, feedback.slot_index(),
feedback.immutable()),
@@ -1305,7 +1313,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, p.name(broker()),
+ return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, p.name(),
AccessMode::kLoad, nullptr,
feedback.property_cell());
} else {
@@ -1329,14 +1337,15 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
if (feedback.immutable()) return NoChange();
Effect effect = n.effect();
Control control = n.control();
- Node* script_context = jsgraph()->Constant(feedback.script_context());
+ Node* script_context =
+ jsgraph()->Constant(feedback.script_context(), broker());
effect =
graph()->NewNode(javascript()->StoreContext(0, feedback.slot_index()),
value, script_context, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr, value, p.name(broker()),
+ return ReduceGlobalAccess(node, nullptr, nullptr, value, p.name(),
AccessMode::kStore, nullptr,
feedback.property_cell());
} else {
@@ -1397,7 +1406,7 @@ Reduction JSNativeContextSpecialization::ReduceMegaDOMPropertyAccess(
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kWrongInstanceType), check,
effect, control);
- } else if (function_template_info.is_signature_undefined()) {
+ } else if (function_template_info.is_signature_undefined(broker())) {
// Signature is undefined, enough to check if the receiver is a JSApiObject.
Node* check =
graph()->NewNode(simplified()->NumberEqual(), receiver_instance_type,
@@ -1417,10 +1426,10 @@ Reduction JSNativeContextSpecialization::ReduceMegaDOMPropertyAccess(
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
Node* inputs[8] = {jsgraph()->HeapConstant(callable.code()),
- jsgraph()->Constant(function_template_info),
+ jsgraph()->Constant(function_template_info, broker()),
jsgraph()->Constant(stack_arg_count),
lookup_start_object,
- jsgraph()->Constant(native_context()),
+ jsgraph()->Constant(native_context(), broker()),
frame_state,
effect,
control};
@@ -1494,10 +1503,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (inferred_maps.size() == 1) {
MapRef lookup_start_object_map = inferred_maps[0];
if (lookup_start_object_map.equals(
- native_context().global_proxy_object().map())) {
- if (!native_context().GlobalIsDetached()) {
- base::Optional<PropertyCellRef> cell =
- native_context().global_object().GetPropertyCell(feedback.name());
+ native_context().global_proxy_object(broker()).map(broker()))) {
+ if (!native_context().GlobalIsDetached(broker())) {
+ OptionalPropertyCellRef cell =
+ native_context().global_object(broker()).GetPropertyCell(
+ broker(), feedback.name());
if (!cell.has_value()) return NoChange();
// Note: The map check generated by ReduceGlobalAccesses ensures that we
// will deopt when/if GlobalIsDetached becomes true.
@@ -1514,20 +1524,20 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
for (const MapRef& map : inferred_maps) {
if (map.is_deprecated()) continue;
- // TODO(v8:12547): Support writing to shared structs, which needs a write
- // barrier that calls Object::Share to ensure the RHS is shared.
- if (InstanceTypeChecker::IsJSSharedStruct(map.instance_type()) &&
+ // TODO(v8:12547): Support writing to objects in shared space, which need
+ // a write barrier that calls Object::Share to ensure the RHS is shared.
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
+ map.instance_type()) &&
access_mode == AccessMode::kStore) {
return NoChange();
}
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map, feedback.name(), access_mode, dependencies());
+ PropertyAccessInfo access_info =
+ broker()->GetPropertyAccessInfo(map, feedback.name(), access_mode);
access_infos_for_feedback.push_back(access_info);
}
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
if (!access_info_factory.FinalizePropertyAccessInfos(
access_infos_for_feedback, access_mode, &access_infos)) {
return NoChange();
@@ -1547,7 +1557,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if_exceptions = &if_exception_nodes;
}
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
@@ -1744,6 +1754,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// trimming.
return NoChange();
}
+
values.push_back(continuation->value());
effects.push_back(continuation->effect());
controls.push_back(continuation->control());
@@ -1797,29 +1808,27 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
NamedAccess const& p = n.Parameters();
Node* const receiver = n.object();
- NameRef name = p.name(broker());
+ NameRef name = p.name();
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
if (m.HasResolvedValue()) {
ObjectRef object = m.Ref(broker());
- if (object.IsJSFunction() &&
- name.equals(MakeRef(broker(), factory()->prototype_string()))) {
+ if (object.IsJSFunction() && name.equals(broker()->prototype_string())) {
// Optimize "prototype" property of functions.
JSFunctionRef function = object.AsJSFunction();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
- if (!function.map().has_prototype_slot() ||
- !function.has_instance_prototype(dependencies()) ||
- function.PrototypeRequiresRuntimeLookup(dependencies())) {
+ if (!function.map(broker()).has_prototype_slot() ||
+ !function.has_instance_prototype(broker()) ||
+ function.PrototypeRequiresRuntimeLookup(broker())) {
return NoChange();
}
ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
- Node* value = jsgraph()->Constant(prototype);
+ Node* value = jsgraph()->Constant(prototype, broker());
ReplaceWithValue(node, value);
return Replace(value);
- } else if (object.IsString() &&
- name.equals(MakeRef(broker(), factory()->length_string()))) {
+ } else if (object.IsString() && name.equals(broker()->length_string())) {
// Constant-fold "length" property on constant strings.
Node* value = jsgraph()->Constant(object.AsString().length());
ReplaceWithValue(node, value);
@@ -1836,7 +1845,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper(
Node* node) {
JSLoadNamedFromSuperNode n(node);
NamedAccess const& p = n.Parameters();
- NameRef name = p.name(broker());
+ NameRef name = p.name();
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
@@ -1892,7 +1901,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
}
// Load iterator property operator
- NameRef iterator_symbol = MakeRef(broker(), factory()->iterator_symbol());
+ NameRef iterator_symbol = broker()->iterator_symbol();
const Operator* load_op =
javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
@@ -2014,7 +2023,7 @@ Reduction JSNativeContextSpecialization::ReduceJSSetNamedProperty(Node* node) {
JSSetNamedPropertyNode n(node);
NamedAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
+ return ReducePropertyAccess(node, nullptr, p.name(), n.value(),
FeedbackSource(p.feedback()), AccessMode::kStore);
}
@@ -2023,7 +2032,7 @@ Reduction JSNativeContextSpecialization::ReduceJSDefineNamedOwnProperty(
JSDefineNamedOwnPropertyNode n(node);
DefineNamedOwnPropertyParameters const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(),
+ return ReducePropertyAccess(node, nullptr, p.name(), n.value(),
FeedbackSource(p.feedback()),
AccessMode::kStoreInLiteral);
}
@@ -2058,8 +2067,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
namespace {
-base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
- Node* receiver) {
+OptionalJSTypedArrayRef GetTypedArrayConstant(JSHeapBroker* broker,
+ Node* receiver) {
HeapObjectMatcher m(receiver);
if (!m.HasResolvedValue()) return base::nullopt;
ObjectRef object = m.Ref(broker);
@@ -2073,14 +2082,15 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
void JSNativeContextSpecialization::RemoveImpossibleMaps(
Node* object, ZoneVector<MapRef>* maps) const {
- base::Optional<MapRef> root_map = InferRootMap(object);
+ OptionalMapRef root_map = InferRootMap(object);
if (root_map.has_value() && !root_map->is_abandoned_prototype_map()) {
- maps->erase(std::remove_if(maps->begin(), maps->end(),
- [root_map](const MapRef& map) {
- return map.is_abandoned_prototype_map() ||
- !map.FindRootMap().equals(*root_map);
- }),
- maps->end());
+ maps->erase(
+ std::remove_if(maps->begin(), maps->end(),
+ [root_map, this](const MapRef& map) {
+ return map.is_abandoned_prototype_map() ||
+ !map.FindRootMap(broker()).equals(*root_map);
+ }),
+ maps->end());
}
}
@@ -2146,8 +2156,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
refined_feedback.keyed_mode());
}
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
if (!access_info_factory.ComputeElementAccessInfos(refined_feedback,
&access_infos) ||
@@ -2155,6 +2164,16 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
return NoChange();
}
+ // Do not optimize AccessMode::kDefine for typed arrays.
+ if (access_mode == AccessMode::kDefine) {
+ for (const ElementAccessInfo& access_info : access_infos) {
+ if (IsTypedArrayOrRabGsabTypedArrayElementsKind(
+ access_info.elements_kind())) {
+ return NoChange();
+ }
+ }
+ }
+
// For holey stores or growing stores, we need to check that the prototype
// chain contains no setters for elements, and we need to guard those checks
// via code dependencies on the relevant prototype maps.
@@ -2172,13 +2191,14 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
IsGrowStoreMode(feedback.keyed_mode().store_mode())) &&
!receiver_map.HasOnlyStablePrototypesWithFastElements(
- &prototype_maps)) {
+ broker(), &prototype_maps)) {
return NoChange();
}
- // TODO(v8:12547): Support writing to shared structs, which needs a
- // write barrier that calls Object::Share to ensure the RHS is shared.
- if (InstanceTypeChecker::IsJSSharedStruct(
+ // TODO(v8:12547): Support writing to objects in shared space, which
+ // need a write barrier that calls Object::Share to ensure the RHS is
+ // shared.
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
receiver_map.instance_type())) {
return NoChange();
}
@@ -2199,7 +2219,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Check for the monomorphic case.
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
if (access_infos.size() == 1) {
ElementAccessInfo access_info = access_infos.front();
@@ -2344,9 +2364,10 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
HeapObjectMatcher mreceiver(receiver);
HeapObjectRef receiver_ref = mreceiver.Ref(broker());
- if (receiver_ref.map().oddball_type() == OddballType::kHole ||
- receiver_ref.map().oddball_type() == OddballType::kNull ||
- receiver_ref.map().oddball_type() == OddballType::kUndefined ||
+ if (receiver_ref.map(broker()).oddball_type(broker()) == OddballType::kHole ||
+ receiver_ref.map(broker()).oddball_type(broker()) == OddballType::kNull ||
+ receiver_ref.map(broker()).oddball_type(broker()) ==
+ OddballType::kUndefined ||
// The 'in' operator throws a TypeError on primitive values.
(receiver_ref.IsString() && access_mode == AccessMode::kHas)) {
return NoChange();
@@ -2359,27 +2380,28 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
mkey.IsInRange(0.0, static_cast<double>(JSObject::kMaxElementIndex))) {
static_assert(JSObject::kMaxElementIndex <= kMaxUInt32);
const uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
- base::Optional<ObjectRef> element;
+ OptionalObjectRef element;
if (receiver_ref.IsJSObject()) {
JSObjectRef jsobject_ref = receiver_ref.AsJSObject();
- base::Optional<FixedArrayBaseRef> elements =
- jsobject_ref.elements(kRelaxedLoad);
+ OptionalFixedArrayBaseRef elements =
+ jsobject_ref.elements(broker(), kRelaxedLoad);
if (elements.has_value()) {
- element = jsobject_ref.GetOwnConstantElement(*elements, index,
+ element = jsobject_ref.GetOwnConstantElement(broker(), *elements, index,
dependencies());
if (!element.has_value() && receiver_ref.IsJSArray()) {
// We didn't find a constant element, but if the receiver is a
// cow-array we can exploit the fact that any future write to the
// element will replace the whole elements storage.
- element = receiver_ref.AsJSArray().GetOwnCowElement(*elements, index);
+ element = receiver_ref.AsJSArray().GetOwnCowElement(broker(),
+ *elements, index);
if (element.has_value()) {
Node* actual_elements = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
receiver, effect, control);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
- actual_elements,
- jsgraph()->Constant(*elements));
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(), actual_elements,
+ jsgraph()->Constant(*elements, broker()));
effect = graph()->NewNode(
simplified()->CheckIf(
DeoptimizeReason::kCowArrayElementsChanged),
@@ -2388,13 +2410,14 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
}
}
} else if (receiver_ref.IsString()) {
- element = receiver_ref.AsString().GetCharAsStringOrUndefined(index);
+ element =
+ receiver_ref.AsString().GetCharAsStringOrUndefined(broker(), index);
}
if (element.has_value()) {
Node* value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
- : jsgraph()->Constant(*element);
+ : jsgraph()->Constant(*element, broker());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -2420,7 +2443,7 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
}
Reduction JSNativeContextSpecialization::ReducePropertyAccess(
- Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
+ Node* node, Node* key, OptionalNameRef static_name, Node* value,
FeedbackSource const& source, AccessMode access_mode) {
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -2642,7 +2665,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
}
}
- Node* target = jsgraph()->Constant(constant);
+ Node* target = jsgraph()->Constant(constant, broker());
// Introduce the call to the getter function.
Node* value;
if (constant.IsJSFunction()) {
@@ -2660,7 +2683,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
}
Node* api_holder =
access_info.api_holder().has_value()
- ? jsgraph()->Constant(access_info.api_holder().value())
+ ? jsgraph()->Constant(access_info.api_holder().value(), broker())
: receiver;
value = InlineApiCall(receiver, api_holder, frame_state, nullptr, effect,
control, constant.AsFunctionTemplateInfo());
@@ -2682,7 +2705,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
ObjectRef constant = access_info.constant().value();
- Node* target = jsgraph()->Constant(constant);
+ Node* target = jsgraph()->Constant(constant, broker());
// Introduce the call to the setter function.
if (constant.IsJSFunction()) {
Node* feedback = jsgraph()->UndefinedConstant();
@@ -2695,7 +2718,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
} else {
Node* api_holder =
access_info.api_holder().has_value()
- ? jsgraph()->Constant(access_info.api_holder().value())
+ ? jsgraph()->Constant(access_info.api_holder().value(), broker())
: receiver;
InlineApiCall(receiver, api_holder, frame_state, value, effect, control,
constant.AsFunctionTemplateInfo());
@@ -2715,12 +2738,13 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* api_holder, Node* frame_state, Node* value,
Node** effect, Node** control,
FunctionTemplateInfoRef const& function_template_info) {
- if (!function_template_info.call_code().has_value()) {
+ if (!function_template_info.call_code(broker()).has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
return nullptr;
}
- CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
+ CallHandlerInfoRef call_handler_info =
+ *function_template_info.call_code(broker());
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
@@ -2734,7 +2758,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState);
- Node* data = jsgraph()->Constant(call_handler_info.data());
+ Node* data = jsgraph()->Constant(call_handler_info.data(broker()), broker());
ApiFunction function(call_handler_info.callback());
Node* function_reference =
graph()->NewNode(common()->ExternalConstant(ExternalReference::Create(
@@ -2742,7 +2766,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* code = jsgraph()->HeapConstant(call_api_callback.code());
// Add CallApiCallbackStub's register argument as well.
- Node* context = jsgraph()->Constant(native_context());
+ Node* context = jsgraph()->Constant(native_context(), broker());
Node* inputs[11] = {code, function_reference, jsgraph()->Constant(argc),
data, api_holder, receiver};
int index = 6 + argc;
@@ -2766,7 +2790,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
Node* effect, Node* control, NameRef const& name,
ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value() && !access_info.HasDictionaryHolder()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
@@ -2787,7 +2811,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
receiver, receiver_mode, lookup_start_object, context, frame_state,
&effect, &control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
- Node* cell = jsgraph()->Constant(access_info.constant().value().AsCell());
+ Node* cell =
+ jsgraph()->Constant(access_info.constant().value().AsCell(), broker());
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -2797,7 +2822,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
} else {
DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant() ||
access_info.IsDictionaryProtoDataConstant());
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
if (access_info.IsDictionaryProtoDataConstant()) {
auto maybe_value =
access_builder.FoldLoadDictPrototypeConstant(access_info);
@@ -2821,7 +2846,7 @@ JSNativeContextSpecialization::BuildPropertyTest(
DCHECK(!access_info.HasDictionaryHolder());
// Determine actual holder and perform prototype chain checks.
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
@@ -2864,8 +2889,8 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info, AccessMode access_mode) {
// Determine actual holder and perform prototype chain checks.
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- base::Optional<JSObjectRef> holder = access_info.holder();
+ PropertyAccessBuilder access_builder(jsgraph(), broker());
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value()) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
DCHECK_NE(AccessMode::kDefine, access_mode);
@@ -2897,9 +2922,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer()),
storage, effect, control);
}
- bool store_to_existing_constant_field = access_info.IsFastDataConstant() &&
- access_mode == AccessMode::kStore &&
- !access_info.HasTransitionMap();
+ if (access_info.IsFastDataConstant() && access_mode == AccessMode::kStore &&
+ !access_info.HasTransitionMap()) {
+ Node* deoptimize = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kStoreToConstant),
+ jsgraph()->FalseConstant(), effect, control);
+ return ValueEffectControl(jsgraph()->UndefinedConstant(), deoptimize,
+ control);
+ }
FieldAccess field_access = {
kTaggedBase,
field_index.offset(),
@@ -2919,11 +2949,10 @@ JSNativeContextSpecialization::BuildPropertyStore(
effect, control);
if (access_info.HasTransitionMap()) {
// Allocate a HeapNumber for the new property.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
Type::OtherInternal());
- a.Store(AccessBuilder::ForMap(),
- MakeRef(broker(), factory()->heap_number_map()));
+ a.Store(AccessBuilder::ForMap(), broker()->heap_number_map());
FieldAccess value_field_access = AccessBuilder::ForHeapNumberValue();
value_field_access.const_field_info = field_access.const_field_info;
a.Store(value_field_access, value);
@@ -2952,40 +2981,11 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_access.name = MaybeHandle<Name>();
field_access.machine_type = MachineType::Float64();
}
- if (store_to_existing_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect = graph()->NewNode(
- simplified()->LoadField(field_access), storage, effect, control);
-
- Node* check =
- graph()->NewNode(simplified()->SameValue(), current_value, value);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
- effect, control);
- return ValueEffectControl(value, effect, control);
- }
break;
}
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- if (store_to_existing_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect = graph()->NewNode(
- simplified()->LoadField(field_access), storage, effect, control);
-
- Node* check = graph()->NewNode(simplified()->SameValueNumbersOnly(),
- current_value, value);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
- effect, control);
- return ValueEffectControl(value, effect, control);
- }
-
if (field_representation == MachineRepresentation::kTaggedSigned) {
value = effect = graph()->NewNode(
simplified()->CheckSmi(FeedbackSource()), value, effect, control);
@@ -2993,7 +2993,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
} else if (field_representation ==
MachineRepresentation::kTaggedPointer) {
- base::Optional<MapRef> field_map = access_info.field_map();
+ OptionalMapRef field_map = access_info.field_map();
if (field_map.has_value()) {
// Emit a map check for the value.
effect =
@@ -3028,12 +3028,12 @@ JSNativeContextSpecialization::BuildPropertyStore(
UNREACHABLE();
}
// Check if we need to perform a transitioning store.
- base::Optional<MapRef> transition_map = access_info.transition_map();
+ OptionalMapRef transition_map = access_info.transition_map();
if (transition_map.has_value()) {
// Check if we need to grow the properties backing store
// with this transitioning store.
MapRef transition_map_ref = transition_map.value();
- MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
+ MapRef original_map = transition_map_ref.GetBackPointer(broker()).AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -3054,7 +3054,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
common()->BeginRegion(RegionObservability::kObservable), effect);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map_ref), effect, control);
+ jsgraph()->Constant(transition_map_ref, broker()), effect, control);
effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
value, effect, control);
effect = graph()->NewNode(common()->FinishRegion(),
@@ -3079,7 +3079,6 @@ JSNativeContextSpecialization::ReduceJSDefineKeyedOwnPropertyInLiteral(
NumberMatcher mflags(n.flags());
CHECK(mflags.HasResolvedValue());
DefineKeyedOwnPropertyInLiteralFlags cflags(mflags.ResolvedValue());
- DCHECK(!(cflags & DefineKeyedOwnPropertyInLiteralFlag::kDontEnum));
if (cflags & DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName)
return NoChange();
@@ -3145,347 +3144,524 @@ JSNativeContextSpecialization::BuildElementAccess(
if (IsTypedArrayElementsKind(elements_kind) ||
IsRabGsabTypedArrayElementsKind(elements_kind)) {
- Node* buffer_or_receiver = receiver;
- Node* length;
- Node* base_pointer;
- Node* external_pointer;
-
- // Check if we can constant-fold information about the {receiver} (e.g.
- // for asm.js-like code patterns).
- base::Optional<JSTypedArrayRef> typed_array =
- GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value() &&
- !IsRabGsabTypedArrayElementsKind(elements_kind)) {
- // TODO(v8:11111): Add support for rab/gsab here.
- length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
+ return BuildElementAccessForTypedArrayOrRabGsabTypedArray(
+ receiver, index, value, effect, control, context, elements_kind,
+ keyed_mode);
+ }
- DCHECK(!typed_array->is_on_heap());
- // Load the (known) data pointer for the {receiver} and set {base_pointer}
- // and {external_pointer} to the values that will allow to generate typed
- // element accesses using the known data pointer.
- // The data pointer might be invalid if the {buffer} was detached,
- // so we need to make sure that any access is properly guarded.
- base_pointer = jsgraph()->ZeroConstant();
- external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
- } else {
- // Load the {receiver}s length.
- JSGraphAssembler assembler(jsgraph_, zone(), BranchSemantics::kJS,
- [this](Node* n) { this->Revisit(n); });
- assembler.InitializeEffectControl(effect, control);
- length = assembler.TypedArrayLength(
- TNode<JSTypedArray>::UncheckedCast(receiver), {elements_kind},
- TNode<Context>::UncheckedCast(context));
- std::tie(effect, control) =
- ReleaseEffectAndControlFromAssembler(&assembler);
-
- // Load the base pointer for the {receiver}. This will always be Smi
- // zero unless we allow on-heap TypedArrays, which is only the case
- // for Chrome. Node and Electron both set this limit to 0. Setting
- // the base to Smi zero here allows the EffectControlLinearizer to
- // optimize away the tricky part of the access later.
- if (JSTypedArray::kMaxSizeInHeap == 0) {
- base_pointer = jsgraph()->ZeroConstant();
- } else {
- base_pointer = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSTypedArrayBasePointer()),
- receiver, effect, control);
- }
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
- // Load the external pointer for the {receiver}.
- external_pointer = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSTypedArrayExternalPointer()),
- receiver, effect, control);
- }
-
- // See if we can skip the detaching check.
- if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
- // Load the buffer for the {receiver}.
- Node* buffer =
- typed_array.has_value()
- ? jsgraph()->Constant(typed_array->buffer())
- : (effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control));
-
- // Deopt if the {buffer} was detached.
- // Note: A detached buffer leads to megamorphic feedback.
- Node* buffer_bit_field = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
- buffer, effect, control);
- Node* check = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(
- simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
- jsgraph()->ZeroConstant());
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached),
- check, effect, control);
-
- // Retain the {buffer} instead of {receiver} to reduce live ranges.
- buffer_or_receiver = buffer;
- }
-
- enum Situation { kBoundsCheckDone, kHandleOOB_SmiCheckDone };
- Situation situation;
- if ((keyed_mode.IsLoad() &&
- keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
- (keyed_mode.IsStore() &&
- keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
- // Only check that the {index} is in SignedSmall range. We do the actual
- // bounds check below and just skip the property access if it's out of
- // bounds for the {receiver}.
- index = effect = graph()->NewNode(
- simplified()->CheckSmi(FeedbackSource()), index, effect, control);
+ // Don't try to store to a copy-on-write backing store (unless supported by
+ // the store mode).
+ if (IsAnyStore(keyed_mode.access_mode()) &&
+ IsSmiOrObjectElementsKind(elements_kind) &&
+ !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(factory()->fixed_array_map())),
+ elements, effect, control);
+ }
- // Cast the {index} to Unsigned32 range, so that the bounds checks
- // below are performed on unsigned values, which means that all the
- // Negative32 values are treated as out-of-bounds.
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- situation = kHandleOOB_SmiCheckDone;
- } else {
- // Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, length, effect, control);
- situation = kBoundsCheckDone;
+ // Check if the {receiver} is a JSArray.
+ bool receiver_is_jsarray = HasOnlyJSArrayMaps(broker(), receiver_maps);
+
+ // Load the length of the {receiver}.
+ Node* length = effect =
+ receiver_is_jsarray
+ ? graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ receiver, effect, control)
+ : graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ // Check if we might need to grow the {elements} backing store.
+ if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
+ // For growing stores we validate the {index} below.
+ } else if (keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ CanTreatHoleAsUndefined(receiver_maps)) {
+ // Check that the {index} is a valid array index, we do the actual
+ // bounds check below and just skip the store below if it's out of
+ // bounds for the {receiver}.
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, jsgraph()->Constant(Smi::kMaxValue), effect, control);
+ } else {
+ // Check that the {index} is in the valid range for the {receiver}.
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, effect, control);
+ }
+
+ // Compute the element access.
+ Type element_type = Type::NonInternal();
+ MachineType element_machine_type = MachineType::AnyTagged();
+ if (IsDoubleElementsKind(elements_kind)) {
+ element_type = Type::Number();
+ element_machine_type = MachineType::Float64();
+ } else if (IsSmiElementsKind(elements_kind)) {
+ element_type = Type::SignedSmall();
+ element_machine_type = MachineType::TaggedSigned();
+ }
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
+
+ // Access the actual element.
+ if (keyed_mode.access_mode() == AccessMode::kLoad) {
+ // Compute the real element access type, which includes the hole in case
+ // of holey backing stores.
+ if (IsHoleyElementsKind(elements_kind)) {
+ element_access.type =
+ Type::Union(element_type, Type::Hole(), graph()->zone());
+ }
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ element_access.machine_type = MachineType::AnyTagged();
}
- // Access the actual element.
- ExternalArrayType external_array_type =
- GetArrayTypeFromElementsKind(elements_kind);
- switch (keyed_mode.access_mode()) {
- case AccessMode::kLoad: {
- // Check if we can return undefined for out-of-bounds loads.
- if (situation == kHandleOOB_SmiCheckDone) {
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- // Do a real bounds check against {length}. This is in order to
- // protect against a potential typer bug leading to the elimination
- // of the NumberLessThan above.
- index = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(),
- CheckBoundsFlag::kConvertStringAndMinusZero |
- CheckBoundsFlag::kAbortOnOutOfBounds),
- index, length, etrue, if_true);
-
- // Perform the actual load
- vtrue = etrue = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index,
- etrue, if_true);
- }
+ // Check if we can return undefined for out-of-bounds loads.
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ CanTreatHoleAsUndefined(receiver_maps)) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination of
+ // the NumberLessThan above.
+ index = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // Materialize undefined for out-of-bounds loads.
- vfalse = jsgraph()->UndefinedConstant();
- }
+ // Perform the actual load
+ vtrue = etrue =
+ graph()->NewNode(simplified()->LoadElement(element_access),
+ elements, index, etrue, if_true);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ // Handle loading from holey backing stores correctly, by either
+ // mapping the hole to undefined if possible, or deoptimizing
+ // otherwise.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Turn the hole into undefined.
+ vtrue = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+ vtrue);
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // Return the signaling NaN hole directly if all uses are
+ // truncating.
+ vtrue = etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()),
+ vtrue, etrue, if_true);
+ }
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // Materialize undefined for out-of-bounds loads.
+ vfalse = jsgraph()->UndefinedConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, control);
+ } else {
+ // Perform the actual load.
+ value = effect =
+ graph()->NewNode(simplified()->LoadElement(element_access), elements,
+ index, effect, control);
+
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ // Turn the hole into undefined.
+ value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+ value);
} else {
- // Perform the actual load.
- DCHECK_EQ(kBoundsCheckDone, situation);
- value = effect = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index, effect,
- control);
+ // Bailout if we see the hole.
+ value = effect = graph()->NewNode(simplified()->CheckNotTaggedHole(),
+ value, effect, control);
+ }
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
+ // Check if we are allowed to return the hole directly.
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ // Return the signaling NaN hole directly if all uses are
+ // truncating.
+ mode = CheckFloat64HoleMode::kAllowReturnHole;
}
- break;
- }
- case AccessMode::kStoreInLiteral:
- case AccessMode::kDefine:
- UNREACHABLE();
- case AccessMode::kStore: {
- // Ensure that the {value} is actually a Number or an Oddball,
- // and truncate it to a Number appropriately.
value = effect = graph()->NewNode(
- simplified()->SpeculativeToNumber(
- NumberOperationHint::kNumberOrOddball, FeedbackSource()),
- value, effect, control);
+ simplified()->CheckFloat64Hole(mode, FeedbackSource()), value,
+ effect, control);
+ }
+ }
+ } else if (keyed_mode.access_mode() == AccessMode::kHas) {
+ // For packed arrays with NoElementsProctector valid, a bound check
+ // is equivalent to HasProperty.
+ value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ index, length, effect, control);
+ if (IsHoleyElementsKind(elements_kind)) {
+ // If the index is in bounds, do a load and hole check.
- // Introduce the appropriate truncation for {value}. Currently we
- // only need to do this for ClamedUint8Array {receiver}s, as the
- // other truncations are implicit in the StoreTypedElement, but we
- // might want to change that at some point.
- if (external_array_type == kExternalUint8ClampedArray) {
- value = graph()->NewNode(simplified()->NumberToUint8Clamped(), value);
- }
+ Node* branch = graph()->NewNode(common()->Branch(), value, control);
- if (situation == kHandleOOB_SmiCheckDone) {
- // We have to detect OOB stores and handle them without deopt (by
- // simply not performing them).
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- {
- // Do a real bounds check against {length}. This is in order to
- // protect against a potential typer bug leading to the elimination
- // of the NumberLessThan above.
- index = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(),
- CheckBoundsFlag::kConvertStringAndMinusZero |
- CheckBoundsFlag::kAbortOnOutOfBounds),
- index, length, etrue, if_true);
-
- // Perform the actual store.
- etrue = graph()->NewNode(
- simplified()->StoreTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index,
- value, etrue, if_true);
- }
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->FalseConstant();
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- // Just ignore the out-of-bounds write.
- }
+ element_access.type =
+ Type::Union(element_type, Type::Hole(), graph()->zone());
+
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ element_access.machine_type = MachineType::AnyTagged();
+ }
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* checked = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, etrue, if_true);
+
+ Node* element = etrue =
+ graph()->NewNode(simplified()->LoadElement(element_access), elements,
+ checked, etrue, if_true);
+
+ Node* vtrue;
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ // Turn the hole into undefined.
+ vtrue = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
} else {
- // Perform the actual store
- DCHECK_EQ(kBoundsCheckDone, situation);
- effect = graph()->NewNode(
- simplified()->StoreTypedElement(external_array_type),
- buffer_or_receiver, base_pointer, external_pointer, index, value,
- effect, control);
+ vtrue =
+ graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
}
- break;
- }
- case AccessMode::kHas:
- if (situation == kHandleOOB_SmiCheckDone) {
- value = effect =
- graph()->NewNode(simplified()->SpeculativeNumberLessThan(
- NumberOperationHint::kSignedSmall),
- index, length, effect, control);
+
+ // has == !IsHole
+ vtrue = graph()->NewNode(simplified()->BooleanNot(), vtrue);
+ } else {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Bailout if we see the hole.
+ etrue = graph()->NewNode(simplified()->CheckNotTaggedHole(), element,
+ etrue, if_true);
} else {
- DCHECK_EQ(kBoundsCheckDone, situation);
- // For has-property on a typed array, all we need is a bounds check.
- value = jsgraph()->TrueConstant();
+ etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()),
+ element, etrue, if_true);
}
- break;
+
+ vtrue = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
}
} else {
- // Load the elements for the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
+ DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
+ keyed_mode.access_mode() == AccessMode::kStoreInLiteral ||
+ keyed_mode.access_mode() == AccessMode::kDefine);
+
+ if (IsSmiElementsKind(elements_kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(FeedbackSource()), value, effect, control);
+ } else if (IsDoubleElementsKind(elements_kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckNumber(FeedbackSource()), value, effect, control);
+ // Make sure we do not store signalling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
- // Don't try to store to a copy-on-write backing store (unless supported by
- // the store mode).
- if (IsAnyStore(keyed_mode.access_mode()) &&
- IsSmiOrObjectElementsKind(elements_kind) &&
- !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
- effect = graph()->NewNode(
- simplified()->CheckMaps(
- CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(factory()->fixed_array_map())),
+ // Ensure that copy-on-write backing store is writable.
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
+ keyed_mode.store_mode() == STORE_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+ elements, effect, control);
+ } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
+ // Determine the length of the {elements} backing store.
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
elements, effect, control);
- }
- // Check if the {receiver} is a JSArray.
- bool receiver_is_jsarray = HasOnlyJSArrayMaps(broker(), receiver_maps);
-
- // Load the length of the {receiver}.
- Node* length = effect =
- receiver_is_jsarray
- ? graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(elements_kind)),
- receiver, effect, control)
- : graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // Check if we might need to grow the {elements} backing store.
- if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
- // For growing stores we validate the {index} below.
- } else if (keyed_mode.IsLoad() &&
- keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
- CanTreatHoleAsUndefined(receiver_maps)) {
- // Check that the {index} is a valid array index, we do the actual
- // bounds check below and just skip the store below if it's out of
- // bounds for the {receiver}.
- index = effect = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, jsgraph()->Constant(Smi::kMaxValue), effect, control);
- } else {
- // Check that the {index} is in the valid range for the {receiver}.
+ // Validate the {index} depending on holeyness:
+ //
+ // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
+ // backing store capacity plus the maximum allowed gap, as otherwise
+ // the (potential) backing store growth would normalize and thus
+ // the elements kind of the {receiver} would change to slow mode.
+ //
+ // For PACKED_*_ELEMENTS the {index} must be within the range
+ // [0,length+1[ to be valid. In case {index} equals {length},
+ // the {receiver} will be extended, but kept packed.
+ Node* limit =
+ IsHoleyElementsKind(elements_kind)
+ ? graph()->NewNode(simplified()->NumberAdd(), elements_length,
+ jsgraph()->Constant(JSObject::kMaxGap))
+ : graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->OneConstant());
index = effect = graph()->NewNode(
simplified()->CheckBounds(
FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, length, effect, control);
+ index, limit, effect, control);
+
+ // Grow {elements} backing store if necessary.
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(elements_kind)
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, FeedbackSource()), receiver,
+ elements, index, elements_length, effect, control);
+
+ // If we didn't grow {elements}, it might still be COW, in which case we
+ // copy it now.
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
+ keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, effect, control);
+ }
+
+ // Also update the "length" property if {receiver} is a JSArray.
+ if (receiver_is_jsarray) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ {
+ // We don't need to do anything, the {index} is within
+ // the valid bounds for the JSArray {receiver}.
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ Node* new_length = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+ efalse = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ receiver, new_length, efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ }
}
- // Compute the element access.
- Type element_type = Type::NonInternal();
- MachineType element_machine_type = MachineType::AnyTagged();
- if (IsDoubleElementsKind(elements_kind)) {
- element_type = Type::Number();
- element_machine_type = MachineType::Float64();
- } else if (IsSmiElementsKind(elements_kind)) {
- element_type = Type::SignedSmall();
- element_machine_type = MachineType::TaggedSigned();
+ // Perform the actual element access.
+ effect = graph()->NewNode(simplified()->StoreElement(element_access),
+ elements, index, value, effect, control);
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::
+ BuildElementAccessForTypedArrayOrRabGsabTypedArray(
+ Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+ Node* context, ElementsKind elements_kind,
+ KeyedAccessMode const& keyed_mode) {
+ DCHECK(IsTypedArrayElementsKind(elements_kind) ||
+ IsRabGsabTypedArrayElementsKind(elements_kind));
+ DCHECK_IMPLIES(IsRabGsabTypedArrayElementsKind(elements_kind),
+ v8_flags.turbo_rab_gsab);
+ // AccessMode::kDefine is not handled here. Optimization should be skipped by
+ // caller.
+ DCHECK(keyed_mode.access_mode() != AccessMode::kDefine);
+
+ Node* buffer_or_receiver = receiver;
+ Node* length;
+ Node* base_pointer;
+ Node* external_pointer;
+
+ // Check if we can constant-fold information about the {receiver} (e.g.
+ // for asm.js-like code patterns).
+ OptionalJSTypedArrayRef typed_array =
+ GetTypedArrayConstant(broker(), receiver);
+ if (typed_array.has_value() &&
+ // TODO(v8:11111): Add support for rab/gsab here.
+ !IsRabGsabTypedArrayElementsKind(elements_kind)) {
+ if (typed_array->map(broker()).elements_kind() != elements_kind) {
+ // This case should never be reachable at runtime.
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(),
+ BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ assembler.Unreachable();
+ ReleaseEffectAndControlFromAssembler(&assembler);
+ Node* dead = jsgraph_->Dead();
+ return ValueEffectControl{dead, dead, dead};
+ } else {
+ length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
+
+ DCHECK(!typed_array->is_on_heap());
+ // Load the (known) data pointer for the {receiver} and set
+ // {base_pointer} and {external_pointer} to the values that will allow
+ // to generate typed element accesses using the known data pointer. The
+ // data pointer might be invalid if the {buffer} was detached, so we
+ // need to make sure that any access is properly guarded.
+ base_pointer = jsgraph()->ZeroConstant();
+ external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
+ }
+ } else {
+ // Load the {receiver}s length.
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(), BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ length = assembler.TypedArrayLength(
+ TNode<JSTypedArray>::UncheckedCast(receiver), {elements_kind},
+ TNode<Context>::UncheckedCast(context));
+ std::tie(effect, control) =
+ ReleaseEffectAndControlFromAssembler(&assembler);
+
+ // Load the base pointer for the {receiver}. This will always be Smi
+ // zero unless we allow on-heap TypedArrays, which is only the case
+ // for Chrome. Node and Electron both set this limit to 0. Setting
+ // the base to Smi zero here allows the EffectControlLinearizer to
+ // optimize away the tricky part of the access later.
+ if (JSTypedArray::kMaxSizeInHeap == 0) {
+ base_pointer = jsgraph()->ZeroConstant();
+ } else {
+ base_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSTypedArrayBasePointer()),
+ receiver, effect, control);
}
- ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type,
- kFullWriteBarrier};
- // Access the actual element.
- if (keyed_mode.access_mode() == AccessMode::kLoad) {
- // Compute the real element access type, which includes the hole in case
- // of holey backing stores.
- if (IsHoleyElementsKind(elements_kind)) {
- element_access.type =
- Type::Union(element_type, Type::Hole(), graph()->zone());
- }
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- element_access.machine_type = MachineType::AnyTagged();
- }
+ // Load the external pointer for the {receiver}.
+ external_pointer = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSTypedArrayExternalPointer()),
+ receiver, effect, control);
+ }
+ // See if we can skip the detaching check.
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
+ // Load the buffer for the {receiver}.
+ Node* buffer =
+ typed_array.has_value()
+ ? jsgraph()->Constant(typed_array->buffer(broker()), broker())
+ : (effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control));
+
+ // Deopt if the {buffer} was detached.
+ // Note: A detached buffer leads to megamorphic feedback.
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
+ jsgraph()->ZeroConstant());
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached), check,
+ effect, control);
+
+ // Retain the {buffer} instead of {receiver} to reduce live ranges.
+ buffer_or_receiver = buffer;
+ }
+
+ enum Situation { kBoundsCheckDone, kHandleOOB_SmiAndRangeCheckComputed };
+ Situation situation;
+ TNode<BoolT> check;
+ if ((keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
+ (keyed_mode.IsStore() &&
+ keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
+ // Only check that the {index} is in SignedSmall range. We do the actual
+ // bounds check below and just skip the property access if it's out of
+ // bounds for the {receiver}.
+ index = effect = graph()->NewNode(simplified()->CheckSmi(FeedbackSource()),
+ index, effect, control);
+ TNode<Boolean> compare_length = TNode<Boolean>::UncheckedCast(
+ graph()->NewNode(simplified()->NumberLessThan(), index, length));
+
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(), BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ TNode<BoolT> check_less_than_length =
+ assembler.EnterMachineGraph<BoolT>(compare_length, UseInfo::Bool());
+ TNode<Int32T> index_int32 = assembler.EnterMachineGraph<Int32T>(
+ TNode<Smi>::UncheckedCast(index), UseInfo::TruncatingWord32());
+ TNode<BoolT> check_non_negative =
+ assembler.Int32LessThanOrEqual(assembler.Int32Constant(0), index_int32);
+ check = TNode<BoolT>::UncheckedCast(
+ assembler.Word32And(check_less_than_length, check_non_negative));
+ std::tie(effect, control) =
+ ReleaseEffectAndControlFromAssembler(&assembler);
+
+ situation = kHandleOOB_SmiAndRangeCheckComputed;
+ } else {
+ // Check that the {index} is in the valid range for the {receiver}.
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, effect, control);
+ situation = kBoundsCheckDone;
+ }
+
+ // Access the actual element.
+ ExternalArrayType external_array_type =
+ GetArrayTypeFromElementsKind(elements_kind);
+ switch (keyed_mode.access_mode()) {
+ case AccessMode::kLoad: {
// Check if we can return undefined for out-of-bounds loads.
- if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
- CanTreatHoleAsUndefined(receiver_maps)) {
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
+ if (situation == kHandleOOB_SmiAndRangeCheckComputed) {
+ DCHECK_NE(check, nullptr);
+ Node* branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
Node* vtrue;
{
// Do a real bounds check against {length}. This is in order to
- // protect against a potential typer bug leading to the elimination of
- // the NumberLessThan above.
+ // protect against a potential typer bug leading to the elimination
+ // of the NumberLessThan above.
index = etrue =
graph()->NewNode(simplified()->CheckBounds(
FeedbackSource(),
@@ -3494,26 +3670,10 @@ JSNativeContextSpecialization::BuildElementAccess(
index, length, etrue, if_true);
// Perform the actual load
- vtrue = etrue =
- graph()->NewNode(simplified()->LoadElement(element_access),
- elements, index, etrue, if_true);
-
- // Handle loading from holey backing stores correctly, by either
- // mapping the hole to undefined if possible, or deoptimizing
- // otherwise.
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Turn the hole into undefined.
- vtrue = graph()->NewNode(
- simplified()->ConvertTaggedHoleToUndefined(), vtrue);
- } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
- // Return the signaling NaN hole directly if all uses are
- // truncating.
- vtrue = etrue = graph()->NewNode(
- simplified()->CheckFloat64Hole(
- CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()),
- vtrue, etrue, if_true);
- }
+ vtrue = etrue = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, etrue,
+ if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -3532,214 +3692,109 @@ JSNativeContextSpecialization::BuildElementAccess(
vtrue, vfalse, control);
} else {
// Perform the actual load.
- value = effect =
- graph()->NewNode(simplified()->LoadElement(element_access),
- elements, index, effect, control);
-
- // Handle loading from holey backing stores correctly, by either mapping
- // the hole to undefined if possible, or deoptimizing otherwise.
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Check if we are allowed to turn the hole into undefined.
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- // Turn the hole into undefined.
- value = graph()->NewNode(
- simplified()->ConvertTaggedHoleToUndefined(), value);
- } else {
- // Bailout if we see the hole.
- value = effect = graph()->NewNode(
- simplified()->CheckNotTaggedHole(), value, effect, control);
- }
- } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
- // Perform the hole check on the result.
- CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
- // Check if we are allowed to return the hole directly.
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- // Return the signaling NaN hole directly if all uses are
- // truncating.
- mode = CheckFloat64HoleMode::kAllowReturnHole;
- }
- value = effect = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode, FeedbackSource()), value,
- effect, control);
- }
+ DCHECK_EQ(kBoundsCheckDone, situation);
+ value = effect = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, effect,
+ control);
+ }
+ break;
+ }
+ case AccessMode::kStoreInLiteral:
+ case AccessMode::kDefine:
+ UNREACHABLE();
+ case AccessMode::kStore: {
+ if (external_array_type == kExternalBigInt64Array ||
+ external_array_type == kExternalBigUint64Array) {
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToBigInt(BigIntOperationHint::kBigInt,
+ FeedbackSource()),
+ value, effect, control);
+ } else {
+ // Ensure that the {value} is actually a Number or an Oddball,
+ // and truncate it to a Number appropriately.
+ // TODO(panq): Eliminate the deopt loop introduced by the speculation.
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, FeedbackSource()),
+ value, effect, control);
}
- } else if (keyed_mode.access_mode() == AccessMode::kHas) {
- // For packed arrays with NoElementsProctector valid, a bound check
- // is equivalent to HasProperty.
- value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
- NumberOperationHint::kSignedSmall),
- index, length, effect, control);
- if (IsHoleyElementsKind(elements_kind)) {
- // If the index is in bounds, do a load and hole check.
-
- Node* branch = graph()->NewNode(common()->Branch(), value, control);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse = jsgraph()->FalseConstant();
- element_access.type =
- Type::Union(element_type, Type::Hole(), graph()->zone());
+ // Introduce the appropriate truncation for {value}. Currently we
+ // only need to do this for ClamedUint8Array {receiver}s, as the
+ // other truncations are implicit in the StoreTypedElement, but we
+ // might want to change that at some point.
+ if (external_array_type == kExternalUint8ClampedArray) {
+ value = graph()->NewNode(simplified()->NumberToUint8Clamped(), value);
+ }
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- element_access.machine_type = MachineType::AnyTagged();
- }
+ if (situation == kHandleOOB_SmiAndRangeCheckComputed) {
+ // We have to detect OOB stores and handle them without deopt (by
+ // simply not performing them).
+ DCHECK_NE(check, nullptr);
+ Node* branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
+ {
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination
+ // of the NumberLessThan above.
+ index = etrue =
+ graph()->NewNode(simplified()->CheckBounds(
+ FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
- Node* checked = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, length, etrue, if_true);
-
- Node* element = etrue =
- graph()->NewNode(simplified()->LoadElement(element_access),
- elements, checked, etrue, if_true);
-
- Node* vtrue;
- if (CanTreatHoleAsUndefined(receiver_maps)) {
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Check if we are allowed to turn the hole into undefined.
- // Turn the hole into undefined.
- vtrue = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- } else {
- vtrue =
- graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
- }
-
- // has == !IsHole
- vtrue = graph()->NewNode(simplified()->BooleanNot(), vtrue);
- } else {
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- // Bailout if we see the hole.
- etrue = graph()->NewNode(simplified()->CheckNotTaggedHole(),
- element, etrue, if_true);
- } else {
- etrue = graph()->NewNode(
- simplified()->CheckFloat64Hole(
- CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()),
- element, etrue, if_true);
- }
+ // Perform the actual store.
+ etrue = graph()->NewNode(
+ simplified()->StoreTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, value,
+ etrue, if_true);
+ }
- vtrue = jsgraph()->TrueConstant();
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Just ignore the out-of-bounds write.
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect =
graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- }
- } else {
- DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
- keyed_mode.access_mode() == AccessMode::kStoreInLiteral ||
- keyed_mode.access_mode() == AccessMode::kDefine);
-
- if (IsSmiElementsKind(elements_kind)) {
- value = effect = graph()->NewNode(
- simplified()->CheckSmi(FeedbackSource()), value, effect, control);
- } else if (IsDoubleElementsKind(elements_kind)) {
- value = effect =
- graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
- effect, control);
- // Make sure we do not store signalling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
- }
-
- // Ensure that copy-on-write backing store is writable.
- if (IsSmiOrObjectElementsKind(elements_kind) &&
- keyed_mode.store_mode() == STORE_HANDLE_COW) {
- elements = effect =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, effect, control);
- } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
- // Determine the length of the {elements} backing store.
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // Validate the {index} depending on holeyness:
- //
- // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
- // backing store capacity plus the maximum allowed gap, as otherwise
- // the (potential) backing store growth would normalize and thus
- // the elements kind of the {receiver} would change to slow mode.
- //
- // For PACKED_*_ELEMENTS the {index} must be within the range
- // [0,length+1[ to be valid. In case {index} equals {length},
- // the {receiver} will be extended, but kept packed.
- Node* limit =
- IsHoleyElementsKind(elements_kind)
- ? graph()->NewNode(simplified()->NumberAdd(), elements_length,
- jsgraph()->Constant(JSObject::kMaxGap))
- : graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->OneConstant());
- index = effect = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
- index, limit, effect, control);
-
- // Grow {elements} backing store if necessary.
- GrowFastElementsMode mode =
- IsDoubleElementsKind(elements_kind)
- ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode, FeedbackSource()),
- receiver, elements, index, elements_length, effect, control);
-
- // If we didn't grow {elements}, it might still be COW, in which case we
- // copy it now.
- if (IsSmiOrObjectElementsKind(elements_kind) &&
- keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
- elements = effect =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, effect, control);
- }
-
- // Also update the "length" property if {receiver} is a JSArray.
- if (receiver_is_jsarray) {
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- {
- // We don't need to do anything, the {index} is within
- // the valid bounds for the JSArray {receiver}.
- }
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- Node* new_length = graph()->NewNode(
- simplified()->NumberAdd(), index, jsgraph()->OneConstant());
- efalse = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(elements_kind)),
- receiver, new_length, efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect =
- graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- }
+ } else {
+ // Perform the actual store
+ DCHECK_EQ(kBoundsCheckDone, situation);
+ effect = graph()->NewNode(
+ simplified()->StoreTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, value,
+ effect, control);
}
-
- // Perform the actual element access.
- effect = graph()->NewNode(simplified()->StoreElement(element_access),
- elements, index, value, effect, control);
+ break;
}
+ case AccessMode::kHas:
+ if (situation == kHandleOOB_SmiAndRangeCheckComputed) {
+ DCHECK_NE(check, nullptr);
+ JSGraphAssembler assembler(broker(), jsgraph_, zone(),
+ BranchSemantics::kJS,
+ [this](Node* n) { this->Revisit(n); });
+ assembler.InitializeEffectControl(effect, control);
+ value = assembler.MachineSelectIf<Boolean>(check)
+ .Then([&]() { return assembler.TrueConstant(); })
+ .Else([&]() { return assembler.FalseConstant(); })
+ .ExpectTrue()
+ .Value();
+ std::tie(effect, control) =
+ ReleaseEffectAndControlFromAssembler(&assembler);
+ } else {
+ DCHECK_EQ(kBoundsCheckDone, situation);
+ // For has-property on a typed array, all we need is a bounds check.
+ value = jsgraph()->TrueConstant();
+ }
+ break;
}
return ValueEffectControl(value, effect, control);
@@ -3856,7 +3911,7 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
new_length_and_hash, effect, control);
// Allocate and initialize the new properties.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), broker(), effect, control);
a.Allocate(PropertyArray::SizeFor(new_length), AllocationType::kYoung,
Type::OtherInternal());
a.Store(AccessBuilder::ForMap(), jsgraph()->PropertyArrayMapConstant());
@@ -3875,8 +3930,8 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name,
Operator const* const op =
name.IsSymbol() ? simplified()->CheckEqualsSymbol()
: simplified()->CheckEqualsInternalizedString();
- return graph()->NewNode(op, jsgraph()->Constant(name), value, effect,
- control);
+ return graph()->NewNode(op, jsgraph()->Constant(name, broker()), value,
+ effect, control);
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
@@ -3885,7 +3940,7 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
for (MapRef receiver_map : receiver_maps) {
- ObjectRef receiver_prototype = receiver_map.prototype();
+ ObjectRef receiver_prototype = receiver_map.prototype(broker());
if (!receiver_prototype.IsJSObject() ||
!broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
return false;
@@ -3920,17 +3975,16 @@ bool JSNativeContextSpecialization::InferMaps(Node* object, Effect effect,
return false;
}
-base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
- Node* object) const {
+OptionalMapRef JSNativeContextSpecialization::InferRootMap(Node* object) const {
HeapObjectMatcher m(object);
if (m.HasResolvedValue()) {
- MapRef map = m.Ref(broker()).map();
- return map.FindRootMap();
+ MapRef map = m.Ref(broker()).map(broker());
+ return map.FindRootMap(broker());
} else if (m.IsJSCreate()) {
- base::Optional<MapRef> initial_map =
+ OptionalMapRef initial_map =
NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
- DCHECK(initial_map->equals(initial_map->FindRootMap()));
+ DCHECK(initial_map->equals(initial_map->FindRootMap(broker())));
return *initial_map;
}
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 8e43cfb864..0d8b9904b1 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -51,9 +51,8 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
using Flags = base::Flags<Flag>;
JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* broker, Flags flags,
- CompilationDependencies* dependencies,
- Zone* zone, Zone* shared_zone);
+ JSHeapBroker* broker, Flags flags, Zone* zone,
+ Zone* shared_zone);
JSNativeContextSpecialization(const JSNativeContextSpecialization&) = delete;
JSNativeContextSpecialization& operator=(
const JSNativeContextSpecialization&) = delete;
@@ -102,8 +101,8 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// In the case of non-keyed (named) accesses, pass the name as {static_name}
// and use {nullptr} for {key} (load/store modes are irrelevant).
Reduction ReducePropertyAccess(Node* node, Node* key,
- base::Optional<NameRef> static_name,
- Node* value, FeedbackSource const& source,
+ OptionalNameRef static_name, Node* value,
+ FeedbackSource const& source,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
NamedAccessFeedback const& feedback,
@@ -193,6 +192,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Node* control, Node* context,
ElementAccessInfo const& access_info,
KeyedAccessMode const& keyed_mode);
+ ValueEffectControl BuildElementAccessForTypedArrayOrRabGsabTypedArray(
+ Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+ Node* context, ElementsKind elements_kind,
+ KeyedAccessMode const& keyed_mode);
// Construct appropriate subgraph to load from a String.
Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length,
@@ -235,7 +238,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// Try to infer a root map for the {object} independent of the current program
// location.
- base::Optional<MapRef> InferRootMap(Node* object) const;
+ OptionalMapRef InferRootMap(Node* object) const;
// Checks if we know at compile time that the {receiver} either definitely
// has the {prototype} in it's prototype chain, or the {receiver} definitely
@@ -268,7 +271,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
NativeContextRef native_context() const {
return broker()->target_native_context();
}
- CompilationDependencies* dependencies() const { return dependencies_; }
+ CompilationDependencies* dependencies() const {
+ return broker()->dependencies();
+ }
Zone* zone() const { return zone_; }
Zone* shared_zone() const { return shared_zone_; }
@@ -277,7 +282,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Flags const flags_;
Handle<JSGlobalObject> global_object_;
Handle<JSGlobalProxy> global_proxy_;
- CompilationDependencies* const dependencies_;
Zone* const zone_;
Zone* const shared_zone_;
TypeCache const* type_cache_;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 2cba89c278..34f6e9d7b0 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -29,7 +29,7 @@ constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) {
}
template <class T>
-Address AddressOrNull(base::Optional<T> ref) {
+Address AddressOrNull(OptionalRef<T> ref) {
if (!ref.has_value()) return kNullAddress;
return ref->object().address();
}
@@ -730,6 +730,10 @@ Type JSWasmCallNode::TypeForWasmReturnType(const wasm::ValueType& type) {
case wasm::kF32:
case wasm::kF64:
return Type::Number();
+ case wasm::kRef:
+ case wasm::kRefNull:
+ CHECK_EQ(type.heap_type(), wasm::HeapType::kExtern);
+ return Type::Any();
default:
UNREACHABLE();
}
@@ -741,6 +745,8 @@ Type JSWasmCallNode::TypeForWasmReturnType(const wasm::ValueType& type) {
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToNumberConvertBigInt, Operator::kNoProperties, 1, 1) \
+ V(ToBigInt, Operator::kNoProperties, 1, 1) \
+ V(ToBigIntConvertNumber, Operator::kNoProperties, 1, 1) \
V(ToNumeric, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
@@ -936,8 +942,10 @@ const Operator* JSOperatorBuilder::CallRuntime(
#if V8_ENABLE_WEBASSEMBLY
const Operator* JSOperatorBuilder::CallWasm(
const wasm::WasmModule* wasm_module,
- const wasm::FunctionSig* wasm_signature, FeedbackSource const& feedback) {
- JSWasmCallParameters parameters(wasm_module, wasm_signature, feedback);
+ const wasm::FunctionSig* wasm_signature, int function_index,
+ wasm::NativeModule* native_module, FeedbackSource const& feedback) {
+ JSWasmCallParameters parameters(wasm_module, wasm_signature, function_index,
+ native_module, feedback);
return zone()->New<Operator1<JSWasmCallParameters>>(
IrOpcode::kJSWasmCall, Operator::kNoProperties, // opcode
"JSWasmCall", // name
@@ -1131,7 +1139,7 @@ const Operator* JSOperatorBuilder::DefineKeyedOwnProperty(
return zone()->New<Operator1<PropertyAccess>>( // --
IrOpcode::kJSDefineKeyedOwnProperty, Operator::kNoProperties, // opcode
"JSDefineKeyedOwnProperty", // name
- 4, 1, 1, 0, 1, 2, // counts
+ 5, 1, 1, 0, 1, 2, // counts
access); // parameter
}
@@ -1255,8 +1263,8 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
type); // parameter
}
-const Operator* JSOperatorBuilder::CreateArray(
- size_t arity, base::Optional<AllocationSiteRef> site) {
+const Operator* JSOperatorBuilder::CreateArray(size_t arity,
+ OptionalAllocationSiteRef site) {
// constructor, new_target, arg1, ..., argN
int const value_input_count = static_cast<int>(arity) + 2;
CreateArrayParameters parameters(arity, site);
@@ -1308,7 +1316,7 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
}
const Operator* JSOperatorBuilder::CreateClosure(
- const SharedFunctionInfoRef& shared_info, const CodeTRef& code,
+ const SharedFunctionInfoRef& shared_info, const CodeRef& code,
AllocationType allocation) {
static constexpr int kFeedbackCell = 1;
static constexpr int kArity = kFeedbackCell;
@@ -1434,51 +1442,51 @@ const Operator* JSOperatorBuilder::CreateFunctionContext(
const Operator* JSOperatorBuilder::CreateCatchContext(
const ScopeInfoRef& scope_info) {
- return zone()->New<Operator1<ScopeInfoTinyRef>>(
+ return zone()->New<Operator1<ScopeInfoRef>>(
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
1, 1, 1, 1, 1, 2, // counts
- ScopeInfoTinyRef{scope_info}); // parameter
+ ScopeInfoRef{scope_info}); // parameter
}
const Operator* JSOperatorBuilder::CreateWithContext(
const ScopeInfoRef& scope_info) {
- return zone()->New<Operator1<ScopeInfoTinyRef>>(
+ return zone()->New<Operator1<ScopeInfoRef>>(
IrOpcode::kJSCreateWithContext, Operator::kNoProperties, // opcode
"JSCreateWithContext", // name
1, 1, 1, 1, 1, 2, // counts
- ScopeInfoTinyRef{scope_info}); // parameter
+ ScopeInfoRef{scope_info}); // parameter
}
const Operator* JSOperatorBuilder::CreateBlockContext(
const ScopeInfoRef& scope_info) {
- return zone()->New<Operator1<ScopeInfoTinyRef>>( // --
+ return zone()->New<Operator1<ScopeInfoRef>>( // --
IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
"JSCreateBlockContext", // name
0, 1, 1, 1, 1, 2, // counts
- ScopeInfoTinyRef{scope_info}); // parameter
+ ScopeInfoRef{scope_info}); // parameter
}
-ScopeInfoRef ScopeInfoOf(JSHeapBroker* broker, const Operator* op) {
+ScopeInfoRef ScopeInfoOf(const Operator* op) {
DCHECK(IrOpcode::kJSCreateBlockContext == op->opcode() ||
IrOpcode::kJSCreateWithContext == op->opcode() ||
IrOpcode::kJSCreateCatchContext == op->opcode());
- return OpParameter<ScopeInfoTinyRef>(op).AsRef(broker);
+ return OpParameter<ScopeInfoRef>(op);
}
-bool operator==(ScopeInfoTinyRef const& lhs, ScopeInfoTinyRef const& rhs) {
+bool operator==(ScopeInfoRef const& lhs, ScopeInfoRef const& rhs) {
return lhs.object().location() == rhs.object().location();
}
-bool operator!=(ScopeInfoTinyRef const& lhs, ScopeInfoTinyRef const& rhs) {
+bool operator!=(ScopeInfoRef const& lhs, ScopeInfoRef const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(ScopeInfoTinyRef const& ref) {
+size_t hash_value(ScopeInfoRef const& ref) {
return reinterpret_cast<size_t>(ref.object().location());
}
-std::ostream& operator<<(std::ostream& os, ScopeInfoTinyRef const& ref) {
+std::ostream& operator<<(std::ostream& os, ScopeInfoRef const& ref) {
return os << Brief(*ref.object());
}
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 7cbef46a58..53610ed52b 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -368,14 +368,12 @@ class CreateFunctionContextParameters final {
slot_count_(slot_count),
scope_type_(scope_type) {}
- ScopeInfoRef scope_info(JSHeapBroker* broker) const {
- return scope_info_.AsRef(broker);
- }
+ ScopeInfoRef scope_info() const { return scope_info_; }
int slot_count() const { return slot_count_; }
ScopeType scope_type() const { return scope_type_; }
private:
- const ScopeInfoTinyRef scope_info_;
+ const ScopeInfoRef scope_info_;
int const slot_count_;
ScopeType const scope_type_;
@@ -400,11 +398,11 @@ class DefineNamedOwnPropertyParameters final {
FeedbackSource const& feedback)
: name_(name), feedback_(feedback) {}
- NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
+ NameRef name() const { return name_; }
FeedbackSource const& feedback() const { return feedback_; }
private:
- const NameTinyRef name_;
+ const NameRef name_;
FeedbackSource const feedback_;
friend bool operator==(DefineNamedOwnPropertyParameters const&,
@@ -450,12 +448,12 @@ class NamedAccess final {
FeedbackSource const& feedback)
: name_(name), feedback_(feedback), language_mode_(language_mode) {}
- NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
+ NameRef name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
FeedbackSource const& feedback() const { return feedback_; }
private:
- const NameTinyRef name_;
+ const NameRef name_;
FeedbackSource const feedback_;
LanguageMode const language_mode_;
@@ -478,13 +476,13 @@ class LoadGlobalParameters final {
TypeofMode typeof_mode)
: name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {}
- NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
+ NameRef name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
const FeedbackSource& feedback() const { return feedback_; }
private:
- const NameTinyRef name_;
+ const NameRef name_;
const FeedbackSource feedback_;
const TypeofMode typeof_mode_;
@@ -511,11 +509,11 @@ class StoreGlobalParameters final {
LanguageMode language_mode() const { return language_mode_; }
FeedbackSource const& feedback() const { return feedback_; }
- NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); }
+ NameRef name() const { return name_; }
private:
LanguageMode const language_mode_;
- const NameTinyRef name_;
+ const NameRef name_;
FeedbackSource const feedback_;
friend bool operator==(StoreGlobalParameters const&,
@@ -565,17 +563,15 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
// used as parameter by JSCreateArray operators.
class CreateArrayParameters final {
public:
- CreateArrayParameters(size_t arity, base::Optional<AllocationSiteRef> site)
+ CreateArrayParameters(size_t arity, OptionalAllocationSiteRef site)
: arity_(arity), site_(site) {}
size_t arity() const { return arity_; }
- base::Optional<AllocationSiteRef> site(JSHeapBroker* broker) const {
- return AllocationSiteTinyRef::AsOptionalRef(broker, site_);
- }
+ OptionalAllocationSiteRef site() const { return site_; }
private:
size_t const arity_;
- base::Optional<AllocationSiteTinyRef> const site_;
+ OptionalAllocationSiteRef const site_;
friend bool operator==(CreateArrayParameters const&,
CreateArrayParameters const&);
@@ -651,11 +647,11 @@ class CreateBoundFunctionParameters final {
: arity_(arity), map_(map) {}
size_t arity() const { return arity_; }
- MapRef map(JSHeapBroker* broker) const { return map_.AsRef(broker); }
+ MapRef map() const { return map_; }
private:
size_t const arity_;
- const MapTinyRef map_;
+ const MapRef map_;
friend bool operator==(CreateBoundFunctionParameters const&,
CreateBoundFunctionParameters const&);
@@ -676,18 +672,16 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
class CreateClosureParameters final {
public:
CreateClosureParameters(const SharedFunctionInfoRef& shared_info,
- const CodeTRef& code, AllocationType allocation)
+ const CodeRef& code, AllocationType allocation)
: shared_info_(shared_info), code_(code), allocation_(allocation) {}
- SharedFunctionInfoRef shared_info(JSHeapBroker* broker) const {
- return shared_info_.AsRef(broker);
- }
- CodeTRef code(JSHeapBroker* broker) const { return code_.AsRef(broker); }
+ SharedFunctionInfoRef shared_info() const { return shared_info_; }
+ CodeRef code() const { return code_; }
AllocationType allocation() const { return allocation_; }
private:
- const SharedFunctionInfoTinyRef shared_info_;
- const CodeTTinyRef code_;
+ const SharedFunctionInfoRef shared_info_;
+ const CodeRef code_;
AllocationType const allocation_;
friend bool operator==(CreateClosureParameters const&,
@@ -710,17 +704,13 @@ class GetTemplateObjectParameters final {
FeedbackSource const& feedback)
: description_(description), shared_(shared), feedback_(feedback) {}
- TemplateObjectDescriptionRef description(JSHeapBroker* broker) const {
- return description_.AsRef(broker);
- }
- SharedFunctionInfoRef shared(JSHeapBroker* broker) const {
- return shared_.AsRef(broker);
- }
+ TemplateObjectDescriptionRef description() const { return description_; }
+ SharedFunctionInfoRef shared() const { return shared_; }
FeedbackSource const& feedback() const { return feedback_; }
private:
- const TemplateObjectDescriptionTinyRef description_;
- const SharedFunctionInfoTinyRef shared_;
+ const TemplateObjectDescriptionRef description_;
+ const SharedFunctionInfoRef shared_;
FeedbackSource const feedback_;
friend bool operator==(GetTemplateObjectParameters const&,
@@ -749,15 +739,13 @@ class CreateLiteralParameters final {
length_(length),
flags_(flags) {}
- HeapObjectRef constant(JSHeapBroker* broker) const {
- return constant_.AsRef(broker);
- }
+ HeapObjectRef constant() const { return constant_; }
FeedbackSource const& feedback() const { return feedback_; }
int length() const { return length_; }
int flags() const { return flags_; }
private:
- const HeapObjectTinyRef constant_;
+ const HeapObjectRef constant_;
FeedbackSource const feedback_;
int const length_;
int const flags_;
@@ -854,14 +842,22 @@ class JSWasmCallParameters {
public:
explicit JSWasmCallParameters(const wasm::WasmModule* module,
const wasm::FunctionSig* signature,
+ int function_index,
+ wasm::NativeModule* native_module,
FeedbackSource const& feedback)
- : module_(module), signature_(signature), feedback_(feedback) {
+ : module_(module),
+ signature_(signature),
+ function_index_(function_index),
+ native_module_(native_module),
+ feedback_(feedback) {
DCHECK_NOT_NULL(module);
DCHECK_NOT_NULL(signature);
}
const wasm::WasmModule* module() const { return module_; }
const wasm::FunctionSig* signature() const { return signature_; }
+ int function_index() const { return function_index_; }
+ wasm::NativeModule* native_module() const { return native_module_; }
FeedbackSource const& feedback() const { return feedback_; }
int input_count() const;
int arity_without_implicit_args() const;
@@ -869,6 +865,8 @@ class JSWasmCallParameters {
private:
const wasm::WasmModule* const module_;
const wasm::FunctionSig* const signature_;
+ int function_index_;
+ wasm::NativeModule* native_module_;
const FeedbackSource feedback_;
};
@@ -885,16 +883,14 @@ int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
int RestoreRegisterIndexOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-ScopeInfoRef ScopeInfoOf(JSHeapBroker* broker,
- const Operator* op) V8_WARN_UNUSED_RESULT;
+ScopeInfoRef ScopeInfoOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-bool operator==(ScopeInfoTinyRef const&, ScopeInfoTinyRef const&);
-bool operator!=(ScopeInfoTinyRef const&, ScopeInfoTinyRef const&);
+bool operator==(ScopeInfoRef const&, ScopeInfoRef const&);
+bool operator!=(ScopeInfoRef const&, ScopeInfoRef const&);
-size_t hash_value(ScopeInfoTinyRef const&);
+size_t hash_value(ScopeInfoRef const&);
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
- ScopeInfoTinyRef const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ScopeInfoRef const&);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
@@ -935,20 +931,21 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ToName();
const Operator* ToNumber();
const Operator* ToNumberConvertBigInt();
+ const Operator* ToBigInt();
+ const Operator* ToBigIntConvertNumber();
const Operator* ToNumeric();
const Operator* ToObject();
const Operator* ToString();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
- const Operator* CreateArray(size_t arity,
- base::Optional<AllocationSiteRef> site);
+ const Operator* CreateArray(size_t arity, OptionalAllocationSiteRef site);
const Operator* CreateArrayIterator(IterationKind);
const Operator* CreateAsyncFunctionObject(int register_count);
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
const Operator* CreateBoundFunction(size_t arity, const MapRef& map);
const Operator* CreateClosure(
- const SharedFunctionInfoRef& shared_info, const CodeTRef& code,
+ const SharedFunctionInfoRef& shared_info, const CodeRef& code,
AllocationType allocation = AllocationType::kYoung);
const Operator* CreateIterResultObject();
const Operator* CreateStringIterator();
@@ -1006,6 +1003,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
#if V8_ENABLE_WEBASSEMBLY
const Operator* CallWasm(const wasm::WasmModule* wasm_module,
const wasm::FunctionSig* wasm_signature,
+ int function_index,
+ wasm::NativeModule* native_module,
FeedbackSource const& feedback);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1339,7 +1338,8 @@ class JSDefineKeyedOwnPropertyNode final : public JSNodeWrapperBase {
V(Object, object, 0, Object) \
V(Key, key, 1, Object) \
V(Value, value, 2, Object) \
- V(FeedbackVector, feedback_vector, 3, HeapObject)
+ V(Flags, flags, 3, Object) \
+ V(FeedbackVector, feedback_vector, 4, HeapObject)
INPUTS(DEFINE_INPUT_ACCESSORS)
#undef INPUTS
};
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 71c4aa0368..904dee0dc5 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -107,6 +107,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
case CompareOperationHint::kBigInt:
+ case CompareOperationHint::kBigInt64:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kReceiverOrNullOrUndefined:
case CompareOperationHint::kInternalizedString:
@@ -115,6 +116,29 @@ class JSSpeculativeBinopBuilder final {
return false;
}
+ bool GetCompareBigIntOperationHint(BigIntOperationHint* hint) {
+ switch (GetCompareOperationHint()) {
+ case CompareOperationHint::kSignedSmall:
+ case CompareOperationHint::kNumber:
+ case CompareOperationHint::kNumberOrBoolean:
+ case CompareOperationHint::kNumberOrOddball:
+ case CompareOperationHint::kAny:
+ case CompareOperationHint::kNone:
+ case CompareOperationHint::kString:
+ case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kReceiver:
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
+ case CompareOperationHint::kInternalizedString:
+ return false;
+ case CompareOperationHint::kBigInt:
+ *hint = BigIntOperationHint::kBigInt;
+ return true;
+ case CompareOperationHint::kBigInt64:
+ *hint = BigIntOperationHint::kBigInt64;
+ return true;
+ }
+ }
+
const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
switch (op_->opcode()) {
case IrOpcode::kJSAdd:
@@ -169,13 +193,21 @@ class JSSpeculativeBinopBuilder final {
return simplified()->SpeculativeBigIntModulus(hint);
case IrOpcode::kJSBitwiseAnd:
return simplified()->SpeculativeBigIntBitwiseAnd(hint);
+ case IrOpcode::kJSBitwiseOr:
+ return simplified()->SpeculativeBigIntBitwiseOr(hint);
+ case IrOpcode::kJSBitwiseXor:
+ return simplified()->SpeculativeBigIntBitwiseXor(hint);
+ case IrOpcode::kJSShiftLeft:
+ return simplified()->SpeculativeBigIntShiftLeft(hint);
+ case IrOpcode::kJSShiftRight:
+ return simplified()->SpeculativeBigIntShiftRight(hint);
default:
break;
}
UNREACHABLE();
}
- const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
+ const Operator* SpeculativeNumberCompareOp(NumberOperationHint hint) {
switch (op_->opcode()) {
case IrOpcode::kJSEqual:
return simplified()->SpeculativeNumberEqual(hint);
@@ -195,6 +227,26 @@ class JSSpeculativeBinopBuilder final {
UNREACHABLE();
}
+ const Operator* SpeculativeBigIntCompareOp(BigIntOperationHint hint) {
+ switch (op_->opcode()) {
+ case IrOpcode::kJSEqual:
+ return simplified()->SpeculativeBigIntEqual(hint);
+ case IrOpcode::kJSLessThan:
+ return simplified()->SpeculativeBigIntLessThan(hint);
+ case IrOpcode::kJSGreaterThan:
+ std::swap(left_, right_);
+ return simplified()->SpeculativeBigIntLessThan(hint);
+ case IrOpcode::kJSLessThanOrEqual:
+ return simplified()->SpeculativeBigIntLessThanOrEqual(hint);
+ case IrOpcode::kJSGreaterThanOrEqual:
+ std::swap(left_, right_);
+ return simplified()->SpeculativeBigIntLessThanOrEqual(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ }
+
Node* BuildSpeculativeOperation(const Operator* op) {
DCHECK_EQ(2, op->ValueInputCount());
DCHECK_EQ(1, op->EffectInputCount());
@@ -229,7 +281,17 @@ class JSSpeculativeBinopBuilder final {
Node* TryBuildNumberCompare() {
NumberOperationHint hint;
if (GetCompareNumberOperationHint(&hint)) {
- const Operator* op = SpeculativeCompareOp(hint);
+ const Operator* op = SpeculativeNumberCompareOp(hint);
+ Node* node = BuildSpeculativeOperation(op);
+ return node;
+ }
+ return nullptr;
+ }
+
+ Node* TryBuildBigIntCompare() {
+ BigIntOperationHint hint;
+ if (GetCompareBigIntOperationHint(&hint)) {
+ const Operator* op = SpeculativeBigIntCompareOp(hint);
Node* node = BuildSpeculativeOperation(op);
return node;
}
@@ -381,6 +443,9 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
if (Node* node = b.TryBuildNumberCompare()) {
return LoweringResult::SideEffectFree(node, node, control);
}
+ if (Node* node = b.TryBuildBigIntCompare()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
break;
}
case IrOpcode::kJSInstanceOf: {
@@ -414,12 +479,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
if (Node* node = b.TryBuildNumberBinop()) {
return LoweringResult::SideEffectFree(node, node, control);
}
- if (op->opcode() == IrOpcode::kJSAdd ||
- op->opcode() == IrOpcode::kJSSubtract ||
- op->opcode() == IrOpcode::kJSMultiply ||
- op->opcode() == IrOpcode::kJSDivide ||
- op->opcode() == IrOpcode::kJSModulus ||
- op->opcode() == IrOpcode::kJSBitwiseAnd) {
+ if (op->opcode() != IrOpcode::kJSShiftRightLogical &&
+ op->opcode() != IrOpcode::kJSExponentiate) {
if (Node* node = b.TryBuildBigIntBinop()) {
return LoweringResult::SideEffectFree(node, node, control);
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index fa6c559a4d..a26ee1a517 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -58,6 +58,7 @@ class JSBinopReduction final {
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
case CompareOperationHint::kBigInt:
+ case CompareOperationHint::kBigInt64:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kReceiverOrNullOrUndefined:
case CompareOperationHint::kInternalizedString:
@@ -66,6 +67,31 @@ class JSBinopReduction final {
return false;
}
+ bool GetCompareBigIntOperationHint(BigIntOperationHint* hint) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ switch (GetCompareOperationHint(node_)) {
+ case CompareOperationHint::kSignedSmall:
+ case CompareOperationHint::kNumber:
+ case CompareOperationHint::kNumberOrBoolean:
+ case CompareOperationHint::kNumberOrOddball:
+ case CompareOperationHint::kAny:
+ case CompareOperationHint::kNone:
+ case CompareOperationHint::kString:
+ case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kReceiver:
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
+ case CompareOperationHint::kInternalizedString:
+ return false;
+ case CompareOperationHint::kBigInt:
+ *hint = BigIntOperationHint::kBigInt;
+ return true;
+ case CompareOperationHint::kBigInt64:
+ *hint = BigIntOperationHint::kBigInt64;
+ return true;
+ }
+ UNREACHABLE();
+ }
+
bool IsInternalizedStringCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
return (GetCompareOperationHint(node_) ==
@@ -451,7 +477,7 @@ JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
jsgraph_(jsgraph),
broker_(broker),
empty_string_type_(
- Type::Constant(broker, factory()->empty_string(), graph()->zone())),
+ Type::Constant(broker, broker->empty_string(), graph()->zone())),
pointer_comparable_type_(
Type::Union(Type::Oddball(),
Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
@@ -593,9 +619,9 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
PropertyCellRef string_length_protector =
MakeRef(broker(), factory()->string_length_protector());
- string_length_protector.CacheAsProtector();
+ string_length_protector.CacheAsProtector(broker());
- if (string_length_protector.value().AsSmi() ==
+ if (string_length_protector.value(broker()).AsSmi() ==
Protectors::kProtectorValid) {
// We can just deoptimize if the {length} is out-of-bounds. Besides
// generating a shorter code sequence than the version below, this
@@ -842,7 +868,8 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
// then ObjectIsUndetectable(left)
// else ReferenceEqual(left, right)
#define __ gasm.
- JSGraphAssembler gasm(jsgraph(), jsgraph()->zone(), BranchSemantics::kJS);
+ JSGraphAssembler gasm(broker(), jsgraph(), jsgraph()->zone(),
+ BranchSemantics::kJS);
gasm.InitializeEffectControl(r.effect(), r.control());
auto lhs = TNode<Object>::UncheckedCast(r.left());
@@ -911,6 +938,7 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
}
NumberOperationHint hint;
+ BigIntOperationHint hint_bigint;
if (r.BothInputsAre(Type::Signed32()) ||
r.BothInputsAre(Type::Unsigned32())) {
return r.ChangeToPureOperator(simplified()->NumberEqual());
@@ -926,6 +954,11 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
simplified()->SpeculativeNumberEqual(hint), Type::Boolean());
} else if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual());
+ } else if (r.GetCompareBigIntOperationHint(&hint_bigint)) {
+ DCHECK(hint_bigint == BigIntOperationHint::kBigInt ||
+ hint_bigint == BigIntOperationHint::kBigInt64);
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeBigIntEqual(hint_bigint), Type::Boolean());
} else if (r.IsReceiverCompareOperation()) {
// For strict equality, it's enough to know that one input is a Receiver,
// as a strict equality comparison with a Receiver can only yield true if
@@ -995,7 +1028,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- base::Optional<double> number = input_value.ToNumber();
+ base::Optional<double> number = input_value.ToNumber(broker());
if (!number.has_value()) return NoChange();
return Replace(jsgraph()->Constant(number.value()));
}
@@ -1003,7 +1036,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (input_type.IsHeapConstant()) {
HeapObjectRef input_value = input_type.AsHeapConstant()->Ref();
double value;
- if (input_value.OddballToNumber().To(&value)) {
+ if (input_value.OddballToNumber(broker()).To(&value)) {
return Replace(jsgraph()->Constant(value));
}
}
@@ -1044,6 +1077,39 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSToBigInt(Node* node) {
+ // TODO(panq): Reduce constant inputs.
+ Node* const input = node->InputAt(0);
+ Type const input_type = NodeProperties::GetType(input);
+ if (input_type.Is(Type::BigInt())) {
+ ReplaceWithValue(node, input);
+ return Changed(input);
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSToBigIntConvertNumber(Node* node) {
+ // TODO(panq): Reduce constant inputs.
+ Node* const input = node->InputAt(0);
+ Type const input_type = NodeProperties::GetType(input);
+ if (input_type.Is(Type::BigInt())) {
+ ReplaceWithValue(node, input);
+ return Changed(input);
+ } else if (input_type.Is(Type::Signed32OrMinusZero()) ||
+ input_type.Is(Type::Unsigned32OrMinusZero())) {
+ RelaxEffectsAndControls(node);
+ node->TrimInputCount(1);
+ Type node_type = NodeProperties::GetType(node);
+ NodeProperties::SetType(
+ node,
+ Type::Intersect(node_type, Type::SignedBigInt64(), graph()->zone()));
+ NodeProperties::ChangeOp(node,
+ simplified()->Integral32OrMinusZeroToBigInt());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSToNumeric(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
@@ -1170,8 +1236,8 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
Node* receiver = n.object();
Type receiver_type = NodeProperties::GetType(receiver);
- NameRef name = NamedAccessOf(node->op()).name(broker());
- NameRef length_str = MakeRef(broker(), factory()->length_string());
+ NameRef name = NamedAccessOf(node->op()).name();
+ NameRef length_str = broker()->length_string();
// Optimize "length" property of strings.
if (name.equals(length_str) && receiver_type.Is(Type::String())) {
Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
@@ -1437,8 +1503,10 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
if (module_type.IsHeapConstant()) {
SourceTextModuleRef module_constant =
module_type.AsHeapConstant()->Ref().AsSourceTextModule();
- base::Optional<CellRef> cell_constant = module_constant.GetCell(cell_index);
- if (cell_constant.has_value()) return jsgraph()->Constant(*cell_constant);
+ OptionalCellRef cell_constant =
+ module_constant.GetCell(broker(), cell_index);
+ if (cell_constant.has_value())
+ return jsgraph()->Constant(*cell_constant, broker());
}
FieldAccess field_access;
@@ -1540,8 +1608,8 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, Builtin builtin, int arity,
DCHECK(Builtins::IsCpp(builtin));
const bool has_builtin_exit_frame = true;
- Node* stub = jsgraph->CEntryStubConstant(
- 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, has_builtin_exit_frame);
+ Node* stub =
+ jsgraph->CEntryStubConstant(1, ArgvMode::kStack, has_builtin_exit_frame);
node->ReplaceInput(0, stub);
const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
@@ -1587,7 +1655,7 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
target_type.AsHeapConstant()->Ref().IsJSFunction()) {
// Only optimize [[Construct]] here if {function} is a Constructor.
JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.map().is_constructor()) return NoChange();
+ if (!function.map(broker()).is_constructor()) return NoChange();
// Patch {node} to an indirect call via ConstructFunctionForwardVarargs.
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
@@ -1619,11 +1687,11 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
// Only optimize [[Construct]] here if {function} is a Constructor.
- if (!function.map().is_constructor()) return NoChange();
+ if (!function.map(broker()).is_constructor()) return NoChange();
// Patch {node} to an indirect call via the {function}s construct stub.
Callable callable = Builtins::CallableFor(
- isolate(), function.shared().construct_as_builtin()
+ isolate(), function.shared(broker()).construct_as_builtin()
? Builtin::kJSBuiltinsConstructStub
: Builtin::kJSConstructStubGeneric);
static_assert(JSConstructNode::TargetIndex() == 0);
@@ -1693,20 +1761,20 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
}
// Check if we know the SharedFunctionInfo of {target}.
- base::Optional<JSFunctionRef> function;
- base::Optional<SharedFunctionInfoRef> shared;
+ OptionalJSFunctionRef function;
+ OptionalSharedFunctionInfoRef shared;
if (target_type.IsHeapConstant() &&
target_type.AsHeapConstant()->Ref().IsJSFunction()) {
function = target_type.AsHeapConstant()->Ref().AsJSFunction();
- shared = function->shared();
+ shared = function->shared(broker());
} else if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& ccp =
JSCreateClosureNode{target}.Parameters();
- shared = ccp.shared_info(broker());
+ shared = ccp.shared_info();
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- shared = cell.shared_function_info();
+ shared = cell.shared_function_info(broker());
}
if (shared.has_value()) {
@@ -1726,12 +1794,13 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// require data from a foreign native context.
if (is_sloppy(shared->language_mode()) && !shared->native() &&
!receiver_type.Is(Type::Receiver())) {
- if (!function.has_value() || !function->native_context().equals(
+ if (!function.has_value() || !function->native_context(broker()).equals(
broker()->target_native_context())) {
return NoChange();
}
- Node* global_proxy =
- jsgraph()->Constant(function->native_context().global_proxy_object());
+ Node* global_proxy = jsgraph()->Constant(
+ function->native_context(broker()).global_proxy_object(broker()),
+ broker());
receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(convert_mode),
receiver, global_proxy, effect, control);
@@ -2395,6 +2464,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
return ReduceJSToNumber(node);
+ case IrOpcode::kJSToBigInt:
+ return ReduceJSToBigInt(node);
+ case IrOpcode::kJSToBigIntConvertNumber:
+ return ReduceJSToBigIntConvertNumber(node);
case IrOpcode::kJSToNumeric:
return ReduceJSToNumeric(node);
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index cd375fb607..c5504d47c0 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -61,6 +61,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSToName(Node* node);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToNumber(Node* node);
+ Reduction ReduceJSToBigInt(Node* node);
+ Reduction ReduceJSToBigIntConvertNumber(Node* node);
Reduction ReduceJSToNumeric(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
diff --git a/deps/v8/src/compiler/late-escape-analysis.cc b/deps/v8/src/compiler/late-escape-analysis.cc
index 9e004fad84..9e81da1868 100644
--- a/deps/v8/src/compiler/late-escape-analysis.cc
+++ b/deps/v8/src/compiler/late-escape-analysis.cc
@@ -111,6 +111,7 @@ void LateEscapeAnalysis::RemoveAllocation(Node* node) {
for (Edge edge : node->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
Node* use = edge.from();
+ if (use->IsDead()) continue;
// The value stored by this Store node might be another allocation which has
// no more uses. Affected allocations are revisited.
if (base::Optional<Node*> stored_value = TryGetStoredValue(use);
diff --git a/deps/v8/src/compiler/linear-scheduler.cc b/deps/v8/src/compiler/linear-scheduler.cc
index c6fe65b482..a8df8ac1e3 100644
--- a/deps/v8/src/compiler/linear-scheduler.cc
+++ b/deps/v8/src/compiler/linear-scheduler.cc
@@ -99,8 +99,8 @@ Node* LinearScheduler::GetEarlySchedulePosition(Node* node) {
NodeState& use = stack.top();
if (use.early_schedule_position == nullptr ||
GetControlLevel(use.early_schedule_position) <
- GetControlLevel(top.early_schedule_position)) {
- use.early_schedule_position = top.early_schedule_position;
+ GetControlLevel(early_schedule_position)) {
+ use.early_schedule_position = early_schedule_position;
}
}
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 3b41f3bc19..860a3f144d 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -241,7 +241,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// 3. JS runtime arguments are not attached as inputs to the TailCall node.
// 4. Prior to the tail call, frame and register state is torn down to just
// before the caller frame was constructed.
- // 5. Unlike normal tail calls, arguments adaptor frames (if present) are
+ // 5. Unlike normal tail calls, inlined arguments frames (if present) are
// *not* torn down.
//
// In other words, behavior is identical to a jmp instruction prior caller
@@ -587,7 +587,8 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
}
// A special {Parameter} index for JSCalls that represents the closure.
- static constexpr int kJSCallClosureParamIndex = -1;
+ static constexpr int kJSCallClosureParamIndex = kJSCallClosureParameterIndex;
+ static_assert(kJSCallClosureParamIndex == -1);
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index cc56abe493..1ad6e194aa 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -1348,6 +1348,7 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
state = state->KillElement(object, index, zone());
break;
}
+ case IrOpcode::kCheckMaps:
case IrOpcode::kStoreTypedElement: {
// Doesn't affect anything we track with the state currently.
break;
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 8595a8a806..a1ca9235ff 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -294,7 +294,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractState const* Get(Node* node) const;
void Set(Node* node, AbstractState const* state);
- Zone* zone() const { return info_for_node_.get_allocator().zone(); }
+ Zone* zone() const { return info_for_node_.zone(); }
private:
ZoneVector<AbstractState const*> info_for_node_;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 71c82cd87d..0c4ed5a8c3 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-analysis.h"
+#include "src/base/v8-fallthrough.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
@@ -553,27 +554,31 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
// static
ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
Node* loop_header, AllNodes& all_nodes, Zone* zone, size_t max_size,
- bool calls_are_large) {
+ Purpose purpose) {
auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone);
std::vector<Node*> queue;
DCHECK_EQ(loop_header->opcode(), IrOpcode::kLoop);
queue.push_back(loop_header);
-
-#define ENQUEUE_USES(use_name, condition) \
- for (Node * use_name : node->uses()) { \
- if (condition && visited->count(use_name) == 0) queue.push_back(use_name); \
+ visited->insert(loop_header);
+
+#define ENQUEUE_USES(use_name, condition) \
+ for (Node * use_name : node->uses()) { \
+ if (condition && visited->count(use_name) == 0) { \
+ visited->insert(use_name); \
+ queue.push_back(use_name); \
+ } \
}
-
+ bool has_instruction_worth_peeling = false;
while (!queue.empty()) {
Node* node = queue.back();
queue.pop_back();
if (node->opcode() == IrOpcode::kEnd) {
// We reached the end of the graph. The end node is not part of the loop.
+ visited->erase(node);
continue;
}
- visited->insert(node);
if (visited->size() > max_size) return nullptr;
switch (node->opcode()) {
case IrOpcode::kLoop:
@@ -596,16 +601,16 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
}
// All uses are outside the loop, do nothing.
break;
- // If {calls_are_large}, call nodes are considered to have unbounded size,
+ // If unrolling, call nodes are considered to have unbounded size,
// i.e. >max_size, with the exception of certain wasm builtins.
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
- if (calls_are_large) return nullptr;
+ if (purpose == Purpose::kLoopUnrolling) return nullptr;
ENQUEUE_USES(use, true)
break;
case IrOpcode::kCall: {
- if (!calls_are_large) {
+ if (purpose == Purpose::kLoopPeeling) {
ENQUEUE_USES(use, true);
break;
}
@@ -631,15 +636,33 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
WasmCode::kWasmAllocateFixedArray, WasmCode::kWasmThrow,
WasmCode::kWasmRethrow, WasmCode::kWasmRethrowExplicitContext,
// Fast wasm-gc operations.
- WasmCode::kWasmRefFunc};
- if (std::count(unrollable_builtins,
- unrollable_builtins + arraysize(unrollable_builtins),
- info) == 0) {
+ WasmCode::kWasmRefFunc,
+ // While a built-in call, this is the slow path, so it should not
+ // prevent loop unrolling for stringview_wtf16.get_codeunit.
+ WasmCode::kWasmStringViewWtf16GetCodeUnit};
+ if (std::count(std::begin(unrollable_builtins),
+ std::end(unrollable_builtins), info) == 0) {
return nullptr;
}
ENQUEUE_USES(use, true)
break;
}
+ case IrOpcode::kWasmStructGet: {
+ // When a chained load occurs in the loop, assume that peeling might
+ // help.
+ // Extending this idea to array.get/array.len has been found to hurt
+ // more than it helps (tested on Sheets, Feb 2023).
+ Node* object = node->InputAt(0);
+ if (object->opcode() == IrOpcode::kWasmStructGet &&
+ visited->find(object) != visited->end()) {
+ has_instruction_worth_peeling = true;
+ }
+ ENQUEUE_USES(use, true);
+ break;
+ }
+ case IrOpcode::kStringPrepareForGetCodeunit:
+ has_instruction_worth_peeling = true;
+ V8_FALLTHROUGH;
default:
ENQUEUE_USES(use, true)
break;
@@ -672,6 +695,12 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
}
}
+ // Only peel functions containing instructions for which loop peeling is known
+ // to be useful. TODO(7748): Add more instructions to get more benefits out of
+ // loop peeling.
+ if (purpose == Purpose::kLoopPeeling && !has_instruction_worth_peeling) {
+ return nullptr;
+ }
return visited;
}
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index d3c53b850b..07d30b8cda 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -178,9 +178,11 @@ class V8_EXPORT_PRIVATE LoopFinder {
static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter,
Zone* temp_zone);
- static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop);
+ static bool HasMarkedExits(LoopTree* loop_tree, const LoopTree::Loop* loop);
#if V8_ENABLE_WEBASSEMBLY
+ enum class Purpose { kLoopPeeling, kLoopUnrolling };
+
// Find all nodes in the loop headed by {loop_header} if it contains no nested
// loops.
// Assumption: *if* this loop has no nested loops, all exits from the loop are
@@ -192,7 +194,7 @@ class V8_EXPORT_PRIVATE LoopFinder {
// 3) a nested loop is found in the loop.
static ZoneUnorderedSet<Node*>* FindSmallInnermostLoopFromHeader(
Node* loop_header, AllNodes& all_nodes, Zone* zone, size_t max_size,
- bool calls_are_large);
+ Purpose purpose);
#endif
};
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index ee46d5e494..3a0ae8aba9 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -269,7 +269,7 @@ void LoopPeeler::PeelInnerLoopsOfTree() {
// static
void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* tmp_zone) {
ZoneQueue<Node*> queue(tmp_zone);
- ZoneVector<bool> visited(graph->NodeCount(), false, tmp_zone);
+ BitVector visited(static_cast<int>(graph->NodeCount()), tmp_zone);
queue.push(graph->end());
while (!queue.empty()) {
Node* node = queue.front();
@@ -278,15 +278,15 @@ void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* tmp_zone) {
if (node->opcode() == IrOpcode::kLoopExit) {
Node* control = NodeProperties::GetControlInput(node);
EliminateLoopExit(node);
- if (!visited[control->id()]) {
- visited[control->id()] = true;
+ if (!visited.Contains(control->id())) {
+ visited.Add(control->id());
queue.push(control);
}
} else {
for (int i = 0; i < node->op()->ControlInputCount(); i++) {
Node* control = NodeProperties::GetControlInput(node, i);
- if (!visited[control->id()]) {
- visited[control->id()] = true;
+ if (!visited.Contains(control->id())) {
+ visited.Add(control->id());
queue.push(control);
}
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 64f6a5a0c0..411f878a05 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -9,6 +9,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
#include "src/zone/zone.h"
@@ -39,52 +40,6 @@ class MachineRepresentationInferrer {
}
private:
- MachineRepresentation GetProjectionType(Node const* projection) {
- size_t index = ProjectionIndexOf(projection->op());
- Node* input = projection->InputAt(0);
- switch (input->opcode()) {
- case IrOpcode::kInt32AddWithOverflow:
- case IrOpcode::kInt32SubWithOverflow:
- case IrOpcode::kInt32MulWithOverflow:
- CHECK_LE(index, static_cast<size_t>(1));
- return index == 0 ? MachineRepresentation::kWord32
- : MachineRepresentation::kBit;
- case IrOpcode::kInt64AddWithOverflow:
- case IrOpcode::kInt64SubWithOverflow:
- case IrOpcode::kInt64MulWithOverflow:
- CHECK_LE(index, static_cast<size_t>(1));
- return index == 0 ? MachineRepresentation::kWord64
- : MachineRepresentation::kBit;
- case IrOpcode::kTryTruncateFloat64ToInt32:
- case IrOpcode::kTryTruncateFloat64ToUint32:
- CHECK_LE(index, static_cast<size_t>(1));
- return index == 0 ? MachineRepresentation::kWord32
- : MachineRepresentation::kBit;
- case IrOpcode::kTryTruncateFloat32ToInt64:
- case IrOpcode::kTryTruncateFloat64ToInt64:
- case IrOpcode::kTryTruncateFloat32ToUint64:
- CHECK_LE(index, static_cast<size_t>(1));
- return index == 0 ? MachineRepresentation::kWord64
- : MachineRepresentation::kBit;
- case IrOpcode::kCall: {
- auto call_descriptor = CallDescriptorOf(input->op());
- return call_descriptor->GetReturnType(index).representation();
- }
- case IrOpcode::kWord32AtomicPairLoad:
- case IrOpcode::kWord32AtomicPairAdd:
- case IrOpcode::kWord32AtomicPairSub:
- case IrOpcode::kWord32AtomicPairAnd:
- case IrOpcode::kWord32AtomicPairOr:
- case IrOpcode::kWord32AtomicPairXor:
- case IrOpcode::kWord32AtomicPairExchange:
- case IrOpcode::kWord32AtomicPairCompareExchange:
- CHECK_LE(index, static_cast<size_t>(1));
- return MachineRepresentation::kWord32;
- default:
- return MachineRepresentation::kNone;
- }
- }
-
MachineRepresentation PromoteRepresentation(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
@@ -124,7 +79,8 @@ class MachineRepresentationInferrer {
break;
}
case IrOpcode::kProjection: {
- representation_vector_[node->id()] = GetProjectionType(node);
+ representation_vector_[node->id()] =
+ NodeProperties::GetProjectionType(node);
} break;
case IrOpcode::kTypedStateValues:
representation_vector_[node->id()] = MachineRepresentation::kNone;
@@ -139,6 +95,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
@@ -201,6 +158,7 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kStoreTrapOnNull:
representation_vector_[node->id()] = PromoteRepresentation(
StoreRepresentationOf(node->op()).representation());
break;
@@ -849,6 +807,7 @@ class MachineRepresentationChecker {
switch (node->opcode()) {
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kLoadImmutable:
if (rep == MachineRepresentation::kCompressed ||
diff --git a/deps/v8/src/compiler/machine-graph-verifier.h b/deps/v8/src/compiler/machine-graph-verifier.h
index 656f7d575b..ddee458f51 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.h
+++ b/deps/v8/src/compiler/machine-graph-verifier.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
#define V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
+#include "src/codegen/machine-type.h"
namespace v8 {
namespace internal {
class Zone;
@@ -13,6 +14,7 @@ namespace compiler {
class Graph;
class Linkage;
class Schedule;
+class Node;
// Verifies properties of a scheduled graph, such as that the nodes' inputs are
// of the correct type.
diff --git a/deps/v8/src/compiler/machine-graph.cc b/deps/v8/src/compiler/machine-graph.cc
index 0822b27847..da5cb68741 100644
--- a/deps/v8/src/compiler/machine-graph.cc
+++ b/deps/v8/src/compiler/machine-graph.cc
@@ -10,10 +10,14 @@ namespace v8 {
namespace internal {
namespace compiler {
+Node* MachineGraph::UniqueInt32Constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+}
+
Node* MachineGraph::Int32Constant(int32_t value) {
Node** loc = cache_.FindInt32Constant(value);
if (*loc == nullptr) {
- *loc = graph()->NewNode(common()->Int32Constant(value));
+ *loc = UniqueInt32Constant(value);
}
return *loc;
}
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index 0ec61bb36c..05cbbad53e 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -33,6 +33,9 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
MachineGraph(const MachineGraph&) = delete;
MachineGraph& operator=(const MachineGraph&) = delete;
+ // Creates a new (unique) Int32Constant node.
+ Node* UniqueInt32Constant(int32_t value);
+
// Creates a Int32Constant node, usually canonicalized.
Node* Int32Constant(int32_t value);
Node* Uint32Constant(uint32_t value) {
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 7514f724ce..c538695770 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/machine-operator-reducer.h"
#include <cmath>
+#include <cstdint>
#include <limits>
#include "src/base/bits.h"
@@ -12,6 +13,7 @@
#include "src/base/ieee754.h"
#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
+#include "src/builtins/builtins.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
@@ -185,16 +187,15 @@ T SilenceNaN(T x) {
} // namespace
-MachineOperatorReducer::MachineOperatorReducer(Editor* editor,
- MachineGraph* mcgraph,
- bool allow_signalling_nan)
+MachineOperatorReducer::MachineOperatorReducer(
+ Editor* editor, MachineGraph* mcgraph,
+ SignallingNanPropagation signalling_nan_propagation)
: AdvancedReducer(editor),
mcgraph_(mcgraph),
- allow_signalling_nan_(allow_signalling_nan) {}
+ signalling_nan_propagation_(signalling_nan_propagation) {}
MachineOperatorReducer::~MachineOperatorReducer() = default;
-
Node* MachineOperatorReducer::Float32Constant(float value) {
return graph()->NewNode(common()->Float32Constant(value));
}
@@ -237,15 +238,29 @@ Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) {
return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs));
}
+Node* MachineOperatorReducer::Word64Sar(Node* lhs, uint32_t rhs) {
+ if (rhs == 0) return lhs;
+ return graph()->NewNode(machine()->Word64Sar(), lhs, Uint64Constant(rhs));
+}
+
Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) {
if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs));
}
+Node* MachineOperatorReducer::Word64Shr(Node* lhs, uint32_t rhs) {
+ if (rhs == 0) return lhs;
+ return graph()->NewNode(machine()->Word64Shr(), lhs, Uint64Constant(rhs));
+}
+
Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) {
return graph()->NewNode(machine()->Word32Equal(), lhs, rhs);
}
+Node* MachineOperatorReducer::Word64Equal(Node* lhs, Node* rhs) {
+ return graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
+}
+
Node* MachineOperatorReducer::Word64And(Node* lhs, Node* rhs) {
Node* const node = graph()->NewNode(machine()->Word64And(), lhs, rhs);
Reduction const reduction = ReduceWord64And(node);
@@ -258,16 +273,32 @@ Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
return reduction.Changed() ? reduction.replacement() : node;
}
+Node* MachineOperatorReducer::Int64Add(Node* lhs, Node* rhs) {
+ Node* const node = graph()->NewNode(machine()->Int64Add(), lhs, rhs);
+ Reduction const reduction = ReduceInt64Add(node);
+ return reduction.Changed() ? reduction.replacement() : node;
+}
+
Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) {
Node* const node = graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
Reduction const reduction = ReduceInt32Sub(node);
return reduction.Changed() ? reduction.replacement() : node;
}
+Node* MachineOperatorReducer::Int64Sub(Node* lhs, Node* rhs) {
+ Node* const node = graph()->NewNode(machine()->Int64Sub(), lhs, rhs);
+ Reduction const reduction = ReduceInt64Sub(node);
+ return reduction.Changed() ? reduction.replacement() : node;
+}
+
Node* MachineOperatorReducer::Int32Mul(Node* lhs, Node* rhs) {
return graph()->NewNode(machine()->Int32Mul(), lhs, rhs);
}
+Node* MachineOperatorReducer::Int64Mul(Node* lhs, Node* rhs) {
+ return graph()->NewNode(machine()->Int64Mul(), lhs, rhs);
+}
+
Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
DCHECK_NE(0, divisor);
DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor);
@@ -283,6 +314,21 @@ Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31));
}
+Node* MachineOperatorReducer::Int64Div(Node* dividend, int64_t divisor) {
+ DCHECK_NE(0, divisor);
+ DCHECK_NE(std::numeric_limits<int64_t>::min(), divisor);
+ base::MagicNumbersForDivision<uint64_t> const mag =
+ base::SignedDivisionByConstant(base::bit_cast<uint64_t>(divisor));
+ Node* quotient = graph()->NewNode(machine()->Int64MulHigh(), dividend,
+ Uint64Constant(mag.multiplier));
+ if (divisor > 0 && base::bit_cast<int64_t>(mag.multiplier) < 0) {
+ quotient = Int64Add(quotient, dividend);
+ } else if (divisor < 0 && base::bit_cast<int64_t>(mag.multiplier) > 0) {
+ quotient = Int64Sub(quotient, dividend);
+ }
+ return Int64Add(Word64Sar(quotient, mag.shift), Word64Shr(dividend, 63));
+}
+
Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
DCHECK_LT(0u, divisor);
// If the divisor is even, we can avoid using the expensive fixup by shifting
@@ -306,12 +352,39 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
return quotient;
}
+Node* MachineOperatorReducer::Uint64Div(Node* dividend, uint64_t divisor) {
+ DCHECK_LT(0u, divisor);
+ // If the divisor is even, we can avoid using the expensive fixup by shifting
+ // the dividend upfront.
+ unsigned const shift = base::bits::CountTrailingZeros(divisor);
+ dividend = Word64Shr(dividend, shift);
+ divisor >>= shift;
+ // Compute the magic number for the (shifted) divisor.
+ base::MagicNumbersForDivision<uint64_t> const mag =
+ base::UnsignedDivisionByConstant(divisor, shift);
+ Node* quotient = graph()->NewNode(machine()->Uint64MulHigh(), dividend,
+ Uint64Constant(mag.multiplier));
+ if (mag.add) {
+ DCHECK_LE(1u, mag.shift);
+ quotient = Word64Shr(
+ Int64Add(Word64Shr(Int64Sub(dividend, quotient), 1), quotient),
+ mag.shift - 1);
+ } else {
+ quotient = Word64Shr(quotient, mag.shift);
+ }
+ return quotient;
+}
+
Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) {
Node* const node = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
Reduction const reduction = ReduceTruncateInt64ToInt32(node);
return reduction.Changed() ? reduction.replacement() : node;
}
+Node* MachineOperatorReducer::ChangeInt32ToInt64(Node* value) {
+ return graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+}
+
// Perform constant folding and strength reduction on machine operators.
Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
@@ -414,12 +487,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceInt64Mul(node);
case IrOpcode::kInt32Div:
return ReduceInt32Div(node);
+ case IrOpcode::kInt64Div:
+ return ReduceInt64Div(node);
case IrOpcode::kUint32Div:
return ReduceUint32Div(node);
+ case IrOpcode::kUint64Div:
+ return ReduceUint64Div(node);
case IrOpcode::kInt32Mod:
return ReduceInt32Mod(node);
+ case IrOpcode::kInt64Mod:
+ return ReduceInt64Mod(node);
case IrOpcode::kUint32Mod:
return ReduceUint32Mod(node);
+ case IrOpcode::kUint64Mod:
+ return ReduceUint64Mod(node);
case IrOpcode::kInt32LessThan: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
@@ -477,7 +558,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
- if (allow_signalling_nan_ && m.right().Is(0) &&
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.right().Is(0) &&
(std::copysign(1.0, m.right().ResolvedValue()) > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
@@ -491,7 +573,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat32(m.left().ResolvedValue() -
m.right().ResolvedValue());
}
- if (allow_signalling_nan_ && m.left().IsMinusZero()) {
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
if (machine()->Float32RoundUp().IsSupported() &&
m.right().IsFloat32RoundDown()) {
@@ -526,7 +609,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Sub: {
Float64BinopMatcher m(node);
- if (allow_signalling_nan_ && m.right().Is(0) &&
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.right().Is(0) &&
(base::Double(m.right().ResolvedValue()).Sign() > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
@@ -540,7 +624,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat64(m.left().ResolvedValue() -
m.right().ResolvedValue());
}
- if (allow_signalling_nan_ && m.left().IsMinusZero()) {
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
if (machine()->Float64RoundUp().IsSupported() &&
m.right().IsFloat64RoundDown()) {
@@ -561,15 +646,16 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Mul: {
Float64BinopMatcher m(node);
- if (allow_signalling_nan_ && m.right().Is(1))
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.right().Is(1))
return Replace(m.left().node()); // x * 1.0 => x
- if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
+ if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
node->ReplaceInput(0, Float64Constant(-0.0));
node->ReplaceInput(1, m.left().node());
NodeProperties::ChangeOp(node, machine()->Float64Sub());
return Changed(node);
}
- if (m.right().IsNaN()) { // x * NaN => NaN
+ if (m.right().IsNaN()) { // x * NaN => NaN
return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
@@ -585,10 +671,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Div: {
Float64BinopMatcher m(node);
- if (allow_signalling_nan_ && m.right().Is(1))
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.right().Is(1))
return Replace(m.left().node()); // x / 1.0 => x
// TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
- if (m.right().IsNaN()) { // x / NaN => NaN
+ if (m.right().IsNaN()) { // x / NaN => NaN
return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN / x => NaN
@@ -598,7 +685,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat64(
base::Divide(m.left().ResolvedValue(), m.right().ResolvedValue()));
}
- if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.right().Is(-1)) { // x / -1.0 => -x
node->RemoveInput(1);
NodeProperties::ChangeOp(node, machine()->Float64Neg());
return Changed(node);
@@ -689,7 +777,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Cos: {
Float64Matcher m(node->InputAt(0));
if (m.HasResolvedValue())
- return ReplaceFloat64(base::ieee754::cos(m.ResolvedValue()));
+ return ReplaceFloat64(COS_IMPL(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Cosh: {
@@ -754,7 +842,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Sin: {
Float64Matcher m(node->InputAt(0));
if (m.HasResolvedValue())
- return ReplaceFloat64(base::ieee754::sin(m.ResolvedValue()));
+ return ReplaceFloat64(SIN_IMPL(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Sinh: {
@@ -778,7 +866,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64: {
Float32Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
+ if (signalling_nan_propagation_ == kSilenceSignallingNan &&
+ std::isnan(m.ResolvedValue())) {
return ReplaceFloat64(SilenceNaN(m.ResolvedValue()));
}
return ReplaceFloat64(m.ResolvedValue());
@@ -815,6 +904,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kBitcastWord32ToWord64: {
Int32Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
+ // No need to truncate the value, since top 32 bits are not important.
+ if (m.IsTruncateInt64ToInt32()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeInt32ToInt64: {
@@ -853,12 +944,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
- if (!allow_signalling_nan_ && m.IsNaN()) {
+ if (signalling_nan_propagation_ == kSilenceSignallingNan && m.IsNaN()) {
return ReplaceFloat32(DoubleToFloat32(SilenceNaN(m.ResolvedValue())));
}
return ReplaceFloat32(DoubleToFloat32(m.ResolvedValue()));
}
- if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
+ if (signalling_nan_propagation_ == kPropagateSignallingNan &&
+ m.IsChangeFloat32ToFloat64())
return Replace(m.node()->InputAt(0));
break;
}
@@ -939,6 +1031,23 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kLoad:
+ case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull: {
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ if (input0->opcode() == IrOpcode::kInt64Add) {
+ Int64BinopMatcher m(input0);
+ if (m.right().HasResolvedValue()) {
+ int64_t value = m.right().ResolvedValue();
+ node->ReplaceInput(0, m.left().node());
+ Node* new_node = Int64Add(input1, Int64Constant(value));
+ node->ReplaceInput(1, new_node);
+ return Changed(node);
+ }
+ }
+ break;
+ }
default:
break;
}
@@ -949,7 +1058,8 @@ Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) {
Int64Matcher m(node->InputAt(0));
if (m.HasResolvedValue())
return ReplaceInt32(static_cast<int32_t>(m.ResolvedValue()));
- if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeInt32ToInt64() || m.IsChangeUint32ToUint64())
+ return Replace(m.node()->InputAt(0));
// TruncateInt64ToInt32(BitcastTaggedToWordForTagAndSmiBits(Load(x))) =>
// Load(x)
// where the new Load uses Int32 rather than the tagged representation.
@@ -1166,6 +1276,56 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceInt64Div(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
+ if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
+ if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
+ return ReplaceInt64(base::bits::SignedDiv64(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
+ }
+ if (m.LeftEqualsRight()) { // x / x => x != 0
+ Node* const zero = Int64Constant(0);
+ // {Word64Equal} can get reduced to a bool/int32, but we need this
+ // operation to produce an int64.
+ return Replace(ChangeInt32ToInt64(
+ Word64Equal(Word64Equal(m.left().node(), zero), zero)));
+ }
+ if (m.right().Is(-1)) { // x / -1 => 0 - x
+ node->ReplaceInput(0, Int64Constant(0));
+ node->ReplaceInput(1, m.left().node());
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int64Sub());
+ return Changed(node);
+ }
+ if (m.right().HasResolvedValue()) {
+ int64_t const divisor = m.right().ResolvedValue();
+ Node* const dividend = m.left().node();
+ Node* quotient = dividend;
+ if (base::bits::IsPowerOfTwo(Abs(divisor))) {
+ uint32_t const shift = base::bits::WhichPowerOfTwo(Abs(divisor));
+ DCHECK_NE(0u, shift);
+ if (shift > 1) {
+ quotient = Word64Sar(quotient, 63);
+ }
+ quotient = Int64Add(Word64Shr(quotient, 64u - shift), dividend);
+ quotient = Word64Sar(quotient, shift);
+ } else {
+ quotient = Int64Div(quotient, Abs(divisor));
+ }
+ if (divisor < 0) {
+ node->ReplaceInput(0, Int64Constant(0));
+ node->ReplaceInput(1, quotient);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int64Sub());
+ return Changed(node);
+ }
+ return Replace(quotient);
+ }
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
Uint32BinopMatcher m(node);
if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
@@ -1195,6 +1355,38 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceUint64Div(Node* node) {
+ Uint64BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
+ if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
+ if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
+ return ReplaceUint64(base::bits::UnsignedDiv64(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
+ }
+ if (m.LeftEqualsRight()) { // x / x => x != 0
+ Node* const zero = Int64Constant(0);
+ // {Word64Equal} can get reduced to a bool/int32, but we need this
+ // operation to produce an int64.
+ return Replace(ChangeInt32ToInt64(
+ Word64Equal(Word64Equal(m.left().node(), zero), zero)));
+ }
+ if (m.right().HasResolvedValue()) {
+ Node* const dividend = m.left().node();
+ uint64_t const divisor = m.right().ResolvedValue();
+ if (base::bits::IsPowerOfTwo(divisor)) { // x / 2^n => x >> n
+ node->ReplaceInput(1, Uint64Constant(base::bits::WhichPowerOfTwo(
+ m.right().ResolvedValue())));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word64Shr());
+ return Changed(node);
+ } else {
+ return Replace(Uint64Div(dividend, divisor));
+ }
+ }
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
Int32BinopMatcher m(node);
if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
@@ -1231,12 +1423,48 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceInt64Mod(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
+ if (m.right().Is(1)) return ReplaceInt64(0); // x % 1 => 0
+ if (m.right().Is(-1)) return ReplaceInt64(0); // x % -1 => 0
+ if (m.LeftEqualsRight()) return ReplaceInt64(0); // x % x => 0
+ if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
+ return ReplaceInt64(base::bits::SignedMod64(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
+ }
+ if (m.right().HasResolvedValue()) {
+ Node* const dividend = m.left().node();
+ uint64_t const divisor = Abs(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(divisor)) {
+ uint64_t const mask = divisor - 1;
+ Node* const zero = Int64Constant(0);
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Int64LessThan(), dividend, zero),
+ BranchHint::kFalse);
+ return Replace(
+ d.Phi(MachineRepresentation::kWord64,
+ Int64Sub(zero, Word64And(Int64Sub(zero, dividend), mask)),
+ Word64And(dividend, mask)));
+ } else {
+ Node* quotient = Int64Div(dividend, divisor);
+ DCHECK_EQ(dividend, node->InputAt(0));
+ node->ReplaceInput(1, Int64Mul(quotient, Int64Constant(divisor)));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int64Sub());
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
Uint32BinopMatcher m(node);
if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
if (m.right().Is(1)) return ReplaceUint32(0); // x % 1 => 0
- if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
+ if (m.LeftEqualsRight()) return ReplaceUint32(0); // x % x => 0
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
return ReplaceUint32(base::bits::UnsignedMod32(m.left().ResolvedValue(),
m.right().ResolvedValue()));
@@ -1260,6 +1488,35 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceUint64Mod(Node* node) {
+ Uint64BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
+ if (m.right().Is(1)) return ReplaceUint64(0); // x % 1 => 0
+ if (m.LeftEqualsRight()) return ReplaceUint64(0); // x % x => 0
+ if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
+ return ReplaceUint64(base::bits::UnsignedMod64(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
+ }
+ if (m.right().HasResolvedValue()) {
+ Node* const dividend = m.left().node();
+ uint64_t const divisor = m.right().ResolvedValue();
+ if (base::bits::IsPowerOfTwo(divisor)) { // x % 2^n => x & 2^n-1
+ node->ReplaceInput(1, Uint64Constant(m.right().ResolvedValue() - 1));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word64And());
+ } else {
+ Node* quotient = Uint64Div(dividend, divisor);
+ DCHECK_EQ(dividend, node->InputAt(0));
+ node->ReplaceInput(1, Int64Mul(quotient, Uint64Constant(divisor)));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int64Sub());
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
DCHECK(nm.IsStore() || nm.IsUnalignedStore());
@@ -1542,6 +1799,132 @@ Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
}
}
+ /*
+ If Int64Constant(c) can be casted from an Int32Constant:
+ -------------------------------------------------
+ Int64LessThan(Int32ToInt64(a), Int64Constant(c))
+ ====>
+ Int32LessThan(a,Int32Constant(c))
+ -------------------------------------------------
+ */
+ if (node->opcode() == IrOpcode::kInt64LessThan ||
+ node->opcode() == IrOpcode::kInt64LessThanOrEqual) {
+ // Int64LessThan(Int32ToInt64(a), Int64Constant(c))
+ if (m.left().IsChangeInt32ToInt64() && m.right().HasResolvedValue()) {
+ int64_t right_value = static_cast<int64_t>(m.right().ResolvedValue());
+ // Int64Constant can be casted from an Int32Constant
+ if (right_value == static_cast<int32_t>(right_value)) {
+ const Operator* new_op;
+
+ if (node->opcode() == IrOpcode::kInt64LessThan) {
+ new_op = machine()->Int32LessThan();
+ } else {
+ new_op = machine()->Int32LessThanOrEqual();
+ }
+ NodeProperties::ChangeOp(node, new_op);
+ node->ReplaceInput(0, m.left().InputAt(0));
+ node->ReplaceInput(1, Int32Constant(static_cast<int32_t>(right_value)));
+ return Changed(node);
+ } else if (right_value < std::numeric_limits<int32_t>::min()) {
+ // left > right always
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(0));
+ return Changed(node);
+ } else if (right_value > std::numeric_limits<int32_t>::max()) {
+ // left < right always
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(1));
+ return Changed(node);
+ }
+ }
+ // Int64LessThan(Int64Constant(c), Int32ToInt64(a))
+ if (m.right().IsChangeInt32ToInt64() && m.left().HasResolvedValue()) {
+ int64_t left_value = static_cast<int64_t>(m.left().ResolvedValue());
+ // Int64Constant can be casted from an Int32Constant
+ if (left_value == static_cast<int32_t>(left_value)) {
+ const Operator* new_op;
+
+ if (node->opcode() == IrOpcode::kInt64LessThan) {
+ new_op = machine()->Int32LessThan();
+ } else {
+ new_op = machine()->Int32LessThanOrEqual();
+ }
+ NodeProperties::ChangeOp(node, new_op);
+ node->ReplaceInput(1, m.right().InputAt(0));
+ node->ReplaceInput(0, Int32Constant(static_cast<int32_t>(left_value)));
+ return Changed(node);
+ } else if (left_value < std::numeric_limits<int32_t>::min()) {
+ // left < right always
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(1));
+ return Changed(node);
+ } else if (left_value > std::numeric_limits<int32_t>::max()) {
+ // left > right always
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(0));
+ return Changed(node);
+ }
+ }
+ }
+
+ /*
+ If Uint64Constant(c) can be casted from an Uint32Constant:
+ -------------------------------------------------
+ Uint64LessThan(Uint32ToInt64(a), Uint64Constant(c))
+ ====>
+ Uint32LessThan(a,Uint32Constant(c))
+ -------------------------------------------------
+ */
+ if (node->opcode() == IrOpcode::kUint64LessThan ||
+ node->opcode() == IrOpcode::kUint64LessThanOrEqual) {
+ // Uint64LessThan(Uint32ToInt64(a), Uint32Constant(c))
+ if (m.left().IsChangeUint32ToUint64() && m.right().HasResolvedValue()) {
+ uint64_t right_value = static_cast<uint64_t>(m.right().ResolvedValue());
+ // Uint64Constant can be casted from an Uint32Constant
+ if (right_value == static_cast<uint32_t>(right_value)) {
+ const Operator* new_op;
+
+ if (node->opcode() == IrOpcode::kUint64LessThan) {
+ new_op = machine()->Uint32LessThan();
+ } else {
+ new_op = machine()->Uint32LessThanOrEqual();
+ }
+ NodeProperties::ChangeOp(node, new_op);
+ node->ReplaceInput(0, m.left().InputAt(0));
+ node->ReplaceInput(1,
+ Uint32Constant(static_cast<uint32_t>(right_value)));
+ return Changed(node);
+ } else {
+ // left < right always
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(1));
+ return Changed(node);
+ }
+ }
+ // Uint64LessThan(Uint64Constant(c), Uint32ToInt64(a))
+ if (m.right().IsChangeUint32ToUint64() && m.left().HasResolvedValue()) {
+ uint64_t left_value = static_cast<uint64_t>(m.left().ResolvedValue());
+ // Uint64Constant can be casted from an Uint32Constant
+ if (left_value == static_cast<uint32_t>(left_value)) {
+ const Operator* new_op;
+ if (node->opcode() == IrOpcode::kUint64LessThan) {
+ new_op = machine()->Uint32LessThan();
+ } else {
+ new_op = machine()->Uint32LessThanOrEqual();
+ }
+ NodeProperties::ChangeOp(node, new_op);
+ node->ReplaceInput(1, m.right().InputAt(0));
+ node->ReplaceInput(0,
+ Uint32Constant(static_cast<uint32_t>(left_value)));
+ return Changed(node);
+ } else {
+ // left > right always
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(0));
+ return Changed(node);
+ }
+ }
+ }
return NoChange();
}
@@ -1767,7 +2150,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
return a.ReplaceIntN(0);
}
}
- if (m.left().IsComparison() && m.right().Is(1)) { // CMP & 1 => CMP
+ if (m.left().IsComparison() && m.right().Is(1)) { // CMP & 1 => CMP
return Replace(m.left().node());
}
if (m.IsFoldable()) { // K & K => K (K stands for arbitrary constants)
@@ -2128,7 +2511,7 @@ Reduction MachineOperatorReducer::ReduceWordNXor(Node* node) {
if (m.IsFoldable()) { // K ^ K => K (K stands for arbitrary constants)
return a.ReplaceIntN(m.left().ResolvedValue() ^ m.right().ResolvedValue());
}
- if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
+ if (m.LeftEqualsRight()) return Replace(a.IntNConstant(0)); // x ^ x => 0
if (A::IsWordNXor(m.left()) && m.right().Is(-1)) {
typename A::IntNBinopMatcher mleft(m.left().node());
if (mleft.right().Is(-1)) { // (x ^ -1) ^ -1 => x
@@ -2181,6 +2564,20 @@ Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
node->ReplaceInput(1, Uint32Constant(replacements->second));
return Changed(node);
}
+
+ // Simplifying (x+k1)==k2 into x==k2-k1.
+ if (m.left().IsInt32Add() && m.right().IsInt32Constant()) {
+ Int32AddMatcher m_add(m.left().node());
+ if (m_add.right().IsInt32Constant()) {
+ int32_t lte_right = m.right().ResolvedValue();
+ int32_t add_right = m_add.right().ResolvedValue();
+ // No need to consider overflow in this condition (==).
+ node->ReplaceInput(0, m_add.left().node());
+ node->ReplaceInput(1, Int32Constant(static_cast<uint32_t>(lte_right) -
+ static_cast<uint32_t>(add_right)));
+ return Changed(node);
+ }
+ }
}
return NoChange();
@@ -2208,6 +2605,44 @@ Reduction MachineOperatorReducer::ReduceWord64Equal(Node* node) {
node->ReplaceInput(1, Uint64Constant(replacements->second));
return Changed(node);
}
+
+ // Simplifying (x+k1)==k2 into x==k2-k1.
+ if (m.left().IsInt64Add() && m.right().IsInt64Constant()) {
+ Int64AddMatcher m_add(m.left().node());
+ if (m_add.right().IsInt64Constant()) {
+ int64_t lte_right = m.right().ResolvedValue();
+ int64_t add_right = m_add.right().ResolvedValue();
+ // No need to consider overflow in this condition (==).
+ node->ReplaceInput(0, m_add.left().node());
+ node->ReplaceInput(1, Int64Constant(static_cast<uint64_t>(lte_right) -
+ static_cast<uint64_t>(add_right)));
+ return Changed(node);
+ }
+ }
+
+ /*
+ If Int64Constant(c) can be casted from an Int32Constant:
+ -------------------------------------------------
+ Word64Equal(Int32ToInt64(a), Int64Constant(c))
+ ====>
+ Word32Equal(a,Int32Constant(c))
+ -------------------------------------------------
+ */
+ if (m.left().IsChangeInt32ToInt64()) {
+ int64_t right_value = m.right().ResolvedValue();
+ // Int64Constant can be casted from an Int32Constant
+ if (right_value == static_cast<int32_t>(right_value)) {
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ node->ReplaceInput(0, m.left().InputAt(0));
+ node->ReplaceInput(1, Int32Constant(static_cast<int32_t>(right_value)));
+ return Changed(node);
+ } else {
+ // Always false, change node op to zero(false).
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Int32Constant(0));
+ return Changed(node);
+ }
+ }
}
return NoChange();
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 7c4dc85b59..77ba737830 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -25,8 +25,14 @@ class Word64Adapter;
class V8_EXPORT_PRIVATE MachineOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- explicit MachineOperatorReducer(Editor* editor, MachineGraph* mcgraph,
- bool allow_signalling_nan = true);
+ enum SignallingNanPropagation {
+ kSilenceSignallingNan,
+ kPropagateSignallingNan
+ };
+
+ explicit MachineOperatorReducer(
+ Editor* editor, MachineGraph* mcgraph,
+ SignallingNanPropagation signalling_nan_propagation);
~MachineOperatorReducer() override;
const char* reducer_name() const override { return "MachineOperatorReducer"; }
@@ -54,18 +60,27 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
return Word32And(lhs, Uint32Constant(rhs));
}
Node* Word32Sar(Node* lhs, uint32_t rhs);
+ Node* Word64Sar(Node* lhs, uint32_t rhs);
Node* Word32Shr(Node* lhs, uint32_t rhs);
+ Node* Word64Shr(Node* lhs, uint32_t rhs);
Node* Word32Equal(Node* lhs, Node* rhs);
+ Node* Word64Equal(Node* lhs, Node* rhs);
Node* Word64And(Node* lhs, Node* rhs);
Node* Word64And(Node* lhs, uint64_t rhs) {
return Word64And(lhs, Uint64Constant(rhs));
}
Node* Int32Add(Node* lhs, Node* rhs);
+ Node* Int64Add(Node* lhs, Node* rhs);
Node* Int32Sub(Node* lhs, Node* rhs);
+ Node* Int64Sub(Node* lhs, Node* rhs);
Node* Int32Mul(Node* lhs, Node* rhs);
+ Node* Int64Mul(Node* lhs, Node* rhs);
Node* Int32Div(Node* dividend, int32_t divisor);
+ Node* Int64Div(Node* dividend, int64_t divisor);
Node* Uint32Div(Node* dividend, uint32_t divisor);
+ Node* Uint64Div(Node* dividend, uint64_t divisor);
Node* TruncateInt64ToInt32(Node* value);
+ Node* ChangeInt32ToInt64(Node* value);
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
Reduction ReplaceFloat32(float value) {
@@ -83,6 +98,9 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReplaceInt64(int64_t value) {
return Replace(Int64Constant(value));
}
+ Reduction ReplaceUint64(uint64_t value) {
+ return Replace(Uint64Constant(value));
+ }
Reduction ReduceInt32Add(Node* node);
Reduction ReduceInt64Add(Node* node);
@@ -90,9 +108,13 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceInt64Sub(Node* node);
Reduction ReduceInt64Mul(Node* node);
Reduction ReduceInt32Div(Node* node);
+ Reduction ReduceInt64Div(Node* node);
Reduction ReduceUint32Div(Node* node);
+ Reduction ReduceUint64Div(Node* node);
Reduction ReduceInt32Mod(Node* node);
+ Reduction ReduceInt64Mod(Node* node);
Reduction ReduceUint32Mod(Node* node);
+ Reduction ReduceUint64Mod(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
const Operator* Map64To32Comparison(const Operator* op, bool sign_extended);
@@ -160,7 +182,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Node* lhs, uintN_t rhs);
MachineGraph* mcgraph_;
- bool allow_signalling_nan_;
+ SignallingNanPropagation signalling_nan_propagation_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 5a7ccfe3dc..a5a27418f8 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -126,6 +126,11 @@ std::ostream& operator<<(std::ostream& os, LoadTransformation rep) {
return os << "kS128Load32Zero";
case LoadTransformation::kS128Load64Zero:
return os << "kS128Load64Zero";
+ // Simd256
+ case LoadTransformation::kS256Load32Splat:
+ return os << "kS256Load32Splat";
+ case LoadTransformation::kS256Load64Splat:
+ return os << "kS256Load64Splat";
}
UNREACHABLE();
}
@@ -173,6 +178,7 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
+ IrOpcode::kLoadTrapOnNull == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
@@ -191,7 +197,8 @@ AtomicOpParameters AtomicOpParametersOf(Operator const* op) {
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
- IrOpcode::kProtectedStore == op->opcode());
+ IrOpcode::kProtectedStore == op->opcode() ||
+ IrOpcode::kStoreTrapOnNull == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
@@ -431,6 +438,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackCheckOffset, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadRootRegister, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
@@ -637,7 +645,18 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(I32x4RelaxedTruncF64x2UZero, Operator::kNoProperties, 1, 0, 1) \
V(I16x8RelaxedQ15MulRS, Operator::kCommutative, 2, 0, 1) \
V(I16x8DotI8x16I7x16S, Operator::kCommutative, 2, 0, 1) \
- V(I32x4DotI8x16I7x16AddS, Operator::kNoProperties, 3, 0, 1)
+ V(I32x4DotI8x16I7x16AddS, Operator::kNoProperties, 3, 0, 1) \
+ V(F32x8Add, Operator::kCommutative, 2, 0, 1) \
+ V(F32x8Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x8Mul, Operator::kCommutative, 2, 0, 1) \
+ V(F32x8Div, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x8Pmin, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x8Pmax, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x8Eq, Operator::kCommutative, 2, 0, 1) \
+ V(F32x8Ne, Operator::kCommutative, 2, 0, 1) \
+ V(F32x8Lt, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x8Le, Operator::kNoProperties, 2, 0, 1) \
+ V(S256Select, Operator::kNoProperties, 3, 0, 1)
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
@@ -729,7 +748,9 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Load32x2S) \
V(S128Load32x2U) \
V(S128Load32Zero) \
- V(S128Load64Zero)
+ V(S128Load64Zero) \
+ V(S256Load32Splat) \
+ V(S256Load64Splat)
#if TAGGED_SIZE_8_BYTES
@@ -951,6 +972,8 @@ struct MachineOperatorGlobalCache {
OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
+// ProtectedLoad and LoadTrapOnNull are not marked kNoWrite, so potentially
+// trapping loads are not eliminated if their result is unused.
#define LOAD(Type) \
struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
Load##Type##Operator() \
@@ -973,6 +996,14 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
+ struct LoadTrapOnNull##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ LoadTrapOnNull##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoadTrapOnNull, \
+ Operator::kNoDeopt | Operator::kNoThrow, "LoadTrapOnNull", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
struct LoadImmutable##Type##Operator final \
: public Operator1<LoadRepresentation> { \
LoadImmutable##Type##Operator() \
@@ -983,6 +1014,7 @@ struct MachineOperatorGlobalCache {
Load##Type##Operator kLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type; \
+ LoadTrapOnNull##Type##Operator kLoadTrapOnNull##Type; \
LoadImmutable##Type##Operator kLoadImmutable##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1081,6 +1113,26 @@ struct MachineOperatorGlobalCache {
StoreRepresentation(MachineRepresentation::Type, \
kNoWriteBarrier)) {} \
}; \
+ struct StoreTrapOnNull##Type##FullWriteBarrier##Operator \
+ : public Operator1<StoreRepresentation> { \
+ explicit StoreTrapOnNull##Type##FullWriteBarrier##Operator() \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kStoreTrapOnNull, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "StoreTrapOnNull", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ kFullWriteBarrier)) {} \
+ }; \
+ struct StoreTrapOnNull##Type##NoWriteBarrier##Operator \
+ : public Operator1<StoreRepresentation> { \
+ explicit StoreTrapOnNull##Type##NoWriteBarrier##Operator() \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kStoreTrapOnNull, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "StoreTrapOnNull", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ kNoWriteBarrier)) {} \
+ }; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##AssertNoWriteBarrier##Operator \
kStore##Type##AssertNoWriteBarrier; \
@@ -1091,7 +1143,11 @@ struct MachineOperatorGlobalCache {
kStore##Type##EphemeronKeyWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
- ProtectedStore##Type##Operator kProtectedStore##Type;
+ ProtectedStore##Type##Operator kProtectedStore##Type; \
+ StoreTrapOnNull##Type##FullWriteBarrier##Operator \
+ kStoreTrapOnNull##Type##FullWriteBarrier; \
+ StoreTrapOnNull##Type##NoWriteBarrier##Operator \
+ kStoreTrapOnNull##Type##NoWriteBarrier;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1542,6 +1598,16 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::LoadTrapOnNull(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoadTrapOnNull##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::LoadTransform(
MemoryAccessKind kind, LoadTransformation transform) {
#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
@@ -1700,6 +1766,26 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::StoreTrapOnNull(
+ StoreRepresentation rep) {
+ switch (rep.representation()) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ if (rep.write_barrier_kind() == kNoWriteBarrier) { \
+ return &cache_.kStoreTrapOnNull##kRep##NoWriteBarrier; \
+ } else if (rep.write_barrier_kind() == kFullWriteBarrier) { \
+ return &cache_.kStoreTrapOnNull##kRep##FullWriteBarrier; \
+ } \
+ break;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::StackPointerGreaterThan(
StackCheckKind kind) {
switch (kind) {
@@ -2226,6 +2312,21 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
return OpParameter<StackCheckKind>(op);
}
+const Operator* MachineOperatorBuilder::ExtractF128(int32_t lane_index) {
+ DCHECK(0 <= lane_index && lane_index < 2);
+ class ExtractF128Operator final : public Operator1<int32_t> {
+ public:
+ explicit ExtractF128Operator(int32_t lane_index)
+ : Operator1<int32_t>(IrOpcode::kExtractF128, Operator::kPure,
+ "ExtractF128", 1, 0, 0, 1, 0, 0, lane_index) {
+ lane_index_ = lane_index;
+ }
+
+ int32_t lane_index_;
+ };
+ return zone_->New<ExtractF128Operator>(lane_index);
+}
+
#undef PURE_BINARY_OP_LIST_32
#undef PURE_BINARY_OP_LIST_64
#undef MACHINE_PURE_OP_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 5e28ea15f6..0853d9d77f 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -126,6 +126,8 @@ enum class LoadTransformation {
kS128Load32x2U,
kS128Load32Zero,
kS128Load64Zero,
+ kS256Load32Splat,
+ kS256Load64Splat,
};
size_t hash_value(LoadTransformation);
@@ -964,10 +966,27 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* TraceInstruction(uint32_t markid);
+ // SIMD256
+ const Operator* F32x8Add();
+ const Operator* F32x8Sub();
+ const Operator* F32x8Mul();
+ const Operator* F32x8Div();
+ const Operator* F32x8Min();
+ const Operator* F32x8Max();
+ const Operator* F32x8Pmin();
+ const Operator* F32x8Pmax();
+ const Operator* F32x8Eq();
+ const Operator* F32x8Ne();
+ const Operator* F32x8Lt();
+ const Operator* F32x8Le();
+ const Operator* S256Select();
+ const Operator* ExtractF128(int32_t lane_index);
+
// load [base + index]
const Operator* Load(LoadRepresentation rep);
const Operator* LoadImmutable(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
+ const Operator* LoadTrapOnNull(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
LoadTransformation transform);
@@ -979,6 +998,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
const Operator* ProtectedStore(MachineRepresentation rep);
+ const Operator* StoreTrapOnNull(StoreRepresentation rep);
// SIMD store: store a specified lane of value into [base + index].
const Operator* StoreLane(MemoryAccessKind kind, MachineRepresentation rep,
@@ -993,6 +1013,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
+ // Note: Only use this operator to:
+ // - Load from a constant offset.
+ // - Store to a constant offset with {kNoWriteBarrier}.
+ // These are the only usages supported by the instruction selector.
+ const Operator* LoadRootRegister();
+
// Access to the machine stack.
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
@@ -1113,6 +1139,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
return WordSar(ShiftKind::kShiftOutZeros);
}
+ const Operator* TaggedEqual() {
+ return COMPRESS_POINTERS_BOOL ? Word32Equal() : WordEqual();
+ }
+
private:
Zone* zone_;
MachineOperatorGlobalCache const& cache_;
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index f6f87cd62e..3cf62faabd 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -42,7 +42,8 @@ void MapInference::SetGuarded() { maps_state_ = kReliableOrGuarded; }
bool MapInference::HaveMaps() const { return !maps_.empty(); }
bool MapInference::AllOfInstanceTypesAreJSReceiver() const {
- return AllOfInstanceTypesUnsafe(InstanceTypeChecker::IsJSReceiver);
+ return AllOfInstanceTypesUnsafe(
+ static_cast<bool (*)(InstanceType)>(&InstanceTypeChecker::IsJSReceiver));
}
bool MapInference::AllOfInstanceTypesAre(InstanceType type) const {
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 306063fba9..3593c6e961 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -5,6 +5,7 @@
#include "src/compiler/memory-lowering.h"
#include "src/codegen/interface-descriptors-inl.h"
+#include "src/common/globals.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -166,9 +167,10 @@ Reduction MemoryLowering::ReduceAllocateRaw(
if (v8_flags.single_generation && allocation_type == AllocationType::kYoung) {
allocation_type = AllocationType::kOld;
}
- // Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
- // guard pages. If we need to support allocating code here we would need to
- // call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
+ // InstructionStream objects may have a maximum size smaller than
+ // kMaxHeapObjectSize due to guard pages. If we need to support allocating
+ // code here we would need to call
+ // MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
DCHECK_NE(allocation_type, AllocationType::kCode);
Node* value;
Node* size = node->InputAt(0);
@@ -440,56 +442,56 @@ Reduction MemoryLowering::ReduceLoadExternalPointerField(Node* node) {
#ifdef V8_ENABLE_SANDBOX
ExternalPointerTag tag = access.external_pointer_tag;
- if (IsSandboxedExternalPointerType(tag)) {
- // Fields for sandboxed external pointer contain a 32-bit handle, not a
- // 64-bit raw pointer.
- NodeProperties::ChangeOp(node, machine()->Load(MachineType::Uint32()));
-
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- __ InitializeEffectControl(effect, control);
-
- // Clone the load node and put it here.
- // TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
- // cloning nodes from arbitrary locations in effect/control chains.
- static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2);
- Node* handle = __ AddNode(graph()->CloneNode(node));
- Node* shift_amount =
- __ Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
- Node* offset = __ Word32Shr(handle, shift_amount);
-
- // Uncomment this to generate a breakpoint for debugging purposes.
- // __ DebugBreak();
-
- // Decode loaded external pointer.
- //
- // Here we access the external pointer table through an ExternalReference.
- // Alternatively, we could also hardcode the address of the table since it
- // is never reallocated. However, in that case we must be able to guarantee
- // that the generated code is never executed under a different Isolate, as
- // that would allow access to external objects from different Isolates. It
- // also would break if the code is serialized/deserialized at some point.
- Node* table_address =
- IsSharedExternalPointerType(tag)
- ? __
- Load(MachineType::Pointer(),
- __ ExternalConstant(
- ExternalReference::
- shared_external_pointer_table_address_address(
- isolate())),
- __ IntPtrConstant(0))
- : __ ExternalConstant(
- ExternalReference::external_pointer_table_address(isolate()));
- Node* table = __ Load(MachineType::Pointer(), table_address,
- Internals::kExternalPointerTableBufferOffset);
- Node* pointer =
- __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
- pointer = __ WordAnd(pointer, __ IntPtrConstant(~tag));
- return Replace(pointer);
- }
-#endif
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ // Fields for sandboxed external pointer contain a 32-bit handle, not a
+ // 64-bit raw pointer.
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::Uint32()));
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ __ InitializeEffectControl(effect, control);
+
+ // Clone the load node and put it here.
+ // TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
+ // cloning nodes from arbitrary locations in effect/control chains.
+ static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2);
+ Node* handle = __ AddNode(graph()->CloneNode(node));
+ Node* shift_amount =
+ __ Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
+ Node* offset = __ Word32Shr(handle, shift_amount);
+
+ // Uncomment this to generate a breakpoint for debugging purposes.
+ // __ DebugBreak();
+
+ // Decode loaded external pointer.
+ //
+ // Here we access the external pointer table through an ExternalReference.
+ // Alternatively, we could also hardcode the address of the table since it
+ // is never reallocated. However, in that case we must be able to guarantee
+ // that the generated code is never executed under a different Isolate, as
+ // that would allow access to external objects from different Isolates. It
+ // also would break if the code is serialized/deserialized at some point.
+ Node* table_address =
+ IsSharedExternalPointerType(tag)
+ ? __
+ Load(MachineType::Pointer(),
+ __ ExternalConstant(
+ ExternalReference::
+ shared_external_pointer_table_address_address(
+ isolate())),
+ __ IntPtrConstant(0))
+ : __ ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ Node* table = __ Load(MachineType::Pointer(), table_address,
+ Internals::kExternalPointerTableBufferOffset);
+ Node* pointer =
+ __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
+ pointer = __ WordAnd(pointer, __ IntPtrConstant(~tag));
+ return Replace(pointer);
+#else
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
return Changed(node);
+#endif // V8_ENABLE_SANDBOX
}
Reduction MemoryLowering::ReduceLoadBoundedSize(Node* node) {
@@ -595,8 +597,9 @@ Reduction MemoryLowering::ReduceStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
- // External pointer must never be stored by optimized code.
- DCHECK(!access.type.Is(Type::ExternalPointer()));
+ // External pointer must never be stored by optimized code when sandbox is
+ // turned on
+ DCHECK(!access.type.Is(Type::ExternalPointer()) || !V8_ENABLE_SANDBOX_BOOL);
// SandboxedPointers are not currently stored by optimized code.
DCHECK(!access.type.Is(Type::SandboxedPointer()));
// Bounded size fields are not currently stored by optimized code.
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index d7b9adf345..507a871ac4 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -24,6 +24,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kAbortCSADcheck:
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kCheckTurboshaftTypeOf:
case IrOpcode::kComment:
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
@@ -40,7 +41,9 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kStoreTrapOnNull:
case IrOpcode::kRetain:
case IrOpcode::kStackPointerGreaterThan:
case IrOpcode::kStaticAssert:
@@ -181,10 +184,10 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
MemoryOptimizer::MemoryOptimizer(
- JSGraph* jsgraph, Zone* zone,
+ JSHeapBroker* broker, JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
- : graph_assembler_(jsgraph, zone, BranchSemantics::kMachine),
+ : graph_assembler_(broker, jsgraph, zone, BranchSemantics::kMachine),
memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 7d8bca44d4..55c1f8fd7d 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -29,7 +29,7 @@ using NodeId = uint32_t;
// operators.
class MemoryOptimizer final {
public:
- MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
+ MemoryOptimizer(JSHeapBroker* broker, JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index d145b0defa..5362501275 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -286,7 +286,7 @@ struct LoadMatcher : public NodeMatcher {
// For shorter pattern matching code, this struct matches both the left and
// right hand sides of a binary operation and can put constants on the right
// if they appear on the left hand side of a commutative operation.
-template <typename Left, typename Right>
+template <typename Left, typename Right, MachineRepresentation rep>
struct BinopMatcher : public NodeMatcher {
explicit BinopMatcher(Node* node)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
@@ -300,6 +300,8 @@ struct BinopMatcher : public NodeMatcher {
using LeftMatcher = Left;
using RightMatcher = Right;
+ static constexpr MachineRepresentation representation = rep;
+
const Left& left() const { return left_; }
const Right& right() const { return right_; }
@@ -338,19 +340,30 @@ struct BinopMatcher : public NodeMatcher {
Right right_;
};
-using Int32BinopMatcher = BinopMatcher<Int32Matcher, Int32Matcher>;
-using Uint32BinopMatcher = BinopMatcher<Uint32Matcher, Uint32Matcher>;
-using Int64BinopMatcher = BinopMatcher<Int64Matcher, Int64Matcher>;
-using Uint64BinopMatcher = BinopMatcher<Uint64Matcher, Uint64Matcher>;
-using IntPtrBinopMatcher = BinopMatcher<IntPtrMatcher, IntPtrMatcher>;
-using UintPtrBinopMatcher = BinopMatcher<UintPtrMatcher, UintPtrMatcher>;
-using Float32BinopMatcher = BinopMatcher<Float32Matcher, Float32Matcher>;
-using Float64BinopMatcher = BinopMatcher<Float64Matcher, Float64Matcher>;
-using NumberBinopMatcher = BinopMatcher<NumberMatcher, NumberMatcher>;
+using Int32BinopMatcher =
+ BinopMatcher<Int32Matcher, Int32Matcher, MachineRepresentation::kWord32>;
+using Uint32BinopMatcher =
+ BinopMatcher<Uint32Matcher, Uint32Matcher, MachineRepresentation::kWord32>;
+using Int64BinopMatcher =
+ BinopMatcher<Int64Matcher, Int64Matcher, MachineRepresentation::kWord64>;
+using Uint64BinopMatcher =
+ BinopMatcher<Uint64Matcher, Uint64Matcher, MachineRepresentation::kWord64>;
+using IntPtrBinopMatcher = BinopMatcher<IntPtrMatcher, IntPtrMatcher,
+ MachineType::PointerRepresentation()>;
+using UintPtrBinopMatcher = BinopMatcher<UintPtrMatcher, UintPtrMatcher,
+ MachineType::PointerRepresentation()>;
+using Float32BinopMatcher = BinopMatcher<Float32Matcher, Float32Matcher,
+ MachineRepresentation::kFloat32>;
+using Float64BinopMatcher = BinopMatcher<Float64Matcher, Float64Matcher,
+ MachineRepresentation::kFloat64>;
+using NumberBinopMatcher =
+ BinopMatcher<NumberMatcher, NumberMatcher, MachineRepresentation::kTagged>;
using HeapObjectBinopMatcher =
- BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>;
+ BinopMatcher<HeapObjectMatcher, HeapObjectMatcher,
+ MachineRepresentation::kTagged>;
using CompressedHeapObjectBinopMatcher =
- BinopMatcher<CompressedHeapObjectMatcher, CompressedHeapObjectMatcher>;
+ BinopMatcher<CompressedHeapObjectMatcher, CompressedHeapObjectMatcher,
+ MachineRepresentation::kCompressed>;
template <class BinopMatcher, IrOpcode::Value kMulOpcode,
IrOpcode::Value kShiftOpcode>
@@ -749,6 +762,7 @@ struct BaseWithIndexAndDisplacementMatcher {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kLoadTrapOnNull:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
// Skip addressing uses.
@@ -765,6 +779,7 @@ struct BaseWithIndexAndDisplacementMatcher {
break;
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kStoreTrapOnNull:
// If the stored value is this node, it is not an addressing use.
if (from->InputAt(2) == node) return false;
// Otherwise it is used as an address and skipped.
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 051eeeb5ef..6c72633e6d 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -304,6 +304,55 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
}
// static
+MachineRepresentation NodeProperties::GetProjectionType(
+ Node const* projection) {
+ size_t index = ProjectionIndexOf(projection->op());
+ Node* input = projection->InputAt(0);
+ switch (input->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt32MulWithOverflow:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord32
+ : MachineRepresentation::kBit;
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kInt64MulWithOverflow:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kBit;
+ case IrOpcode::kTryTruncateFloat64ToInt32:
+ case IrOpcode::kTryTruncateFloat64ToUint32:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord32
+ : MachineRepresentation::kBit;
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kBit;
+ case IrOpcode::kCall: {
+ auto call_descriptor = CallDescriptorOf(input->op());
+ return call_descriptor->GetReturnType(index).representation();
+ }
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicPairExchange:
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return MachineRepresentation::kWord32;
+ default:
+ return MachineRepresentation::kNone;
+ }
+}
+
+// static
bool NodeProperties::IsSame(Node* a, Node* b) {
for (;;) {
if (a->opcode() == IrOpcode::kCheckHeapObject) {
@@ -319,8 +368,8 @@ bool NodeProperties::IsSame(Node* a, Node* b) {
}
// static
-base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
- Node* receiver) {
+OptionalMapRef NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
+ Node* receiver) {
DCHECK(receiver->opcode() == IrOpcode::kJSCreate ||
receiver->opcode() == IrOpcode::kJSCreateArray);
HeapObjectMatcher mtarget(GetValueInput(receiver, 0));
@@ -329,12 +378,12 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
mnewtarget.Ref(broker).IsJSFunction()) {
ObjectRef target = mtarget.Ref(broker);
JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction();
- if (newtarget.map().has_prototype_slot() &&
- newtarget.has_initial_map(broker->dependencies())) {
- MapRef initial_map = newtarget.initial_map(broker->dependencies());
- if (initial_map.GetConstructor().equals(target)) {
- DCHECK(target.AsJSFunction().map().is_constructor());
- DCHECK(newtarget.map().is_constructor());
+ if (newtarget.map(broker).has_prototype_slot() &&
+ newtarget.has_initial_map(broker)) {
+ MapRef initial_map = newtarget.initial_map(broker);
+ if (initial_map.GetConstructor(broker).equals(target)) {
+ DCHECK(target.AsJSFunction().map(broker).is_constructor());
+ DCHECK(newtarget.map(broker).is_constructor());
return initial_map;
}
}
@@ -380,10 +429,10 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
// Object.prototype have NO_ELEMENTS elements kind.
if (!ref.IsJSObject() ||
!broker->IsArrayOrObjectPrototype(ref.AsJSObject())) {
- if (ref.map().is_stable()) {
+ if (ref.map(broker).is_stable()) {
// The {receiver_map} is only reliable when we install a stability
// code dependency.
- *maps_out = RefSetOf(broker, ref.map());
+ *maps_out = RefSetOf(broker, ref.map(broker));
return kUnreliableMaps;
}
}
@@ -410,7 +459,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
}
case IrOpcode::kJSCreate: {
if (IsSame(receiver, effect)) {
- base::Optional<MapRef> initial_map = GetJSCreateMap(broker, receiver);
+ OptionalMapRef initial_map = GetJSCreateMap(broker, receiver);
if (initial_map.has_value()) {
*maps_out = RefSetOf(broker, initial_map.value());
return result;
@@ -423,10 +472,9 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
}
case IrOpcode::kJSCreatePromise: {
if (IsSame(receiver, effect)) {
- *maps_out = RefSetOf(
- broker,
- broker->target_native_context().promise_function().initial_map(
- broker->dependencies()));
+ *maps_out = RefSetOf(broker, broker->target_native_context()
+ .promise_function(broker)
+ .initial_map(broker));
return result;
}
break;
@@ -533,7 +581,7 @@ bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver,
return false;
case IrOpcode::kHeapConstant: {
HeapObjectRef value = HeapObjectMatcher(receiver).Ref(broker);
- return value.map().IsPrimitiveMap();
+ return value.map(broker).IsPrimitiveMap();
}
default: {
MapInference inference(broker, receiver, effect);
@@ -563,7 +611,7 @@ bool NodeProperties::CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver,
return false;
case IrOpcode::kHeapConstant: {
HeapObjectRef value = HeapObjectMatcher(receiver).Ref(broker);
- OddballType type = value.map().oddball_type();
+ OddballType type = value.map(broker).oddball_type(broker);
return type == OddballType::kNull || type == OddballType::kUndefined;
}
default:
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index f56aa6ebfb..40a81d4278 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_NODE_PROPERTIES_H_
#define V8_COMPILER_NODE_PROPERTIES_H_
+#include "src/codegen/machine-type.h"
#include "src/common/globals.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/node.h"
@@ -116,6 +117,9 @@ class V8_EXPORT_PRIVATE NodeProperties {
static bool IsPhi(Node* node) {
return IrOpcode::IsPhiOpcode(node->opcode());
}
+ static bool IsSimd128Operation(Node* node) {
+ return IrOpcode::IsSimd128Opcode(node->opcode());
+ }
// Determines whether exceptions thrown by the given node are handled locally
// within the graph (i.e. an IfException projection is present). Optionally
@@ -196,6 +200,9 @@ class V8_EXPORT_PRIVATE NodeProperties {
// - Switch: [ IfValue, ..., IfDefault ]
static void CollectControlProjections(Node* node, Node** proj, size_t count);
+ // Return the MachineRepresentation of a Projection based on its input.
+ static MachineRepresentation GetProjectionType(Node const* projection);
+
// Checks if two nodes are the same, looking past {CheckHeapObject}.
static bool IsSame(Node* a, Node* b);
@@ -219,8 +226,7 @@ class V8_EXPORT_PRIVATE NodeProperties {
ZoneRefUnorderedSet<MapRef>* maps_out);
// Return the initial map of the new-target if the allocation can be inlined.
- static base::Optional<MapRef> GetJSCreateMap(JSHeapBroker* broker,
- Node* receiver);
+ static OptionalMapRef GetJSCreateMap(JSHeapBroker* broker, Node* receiver);
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 617e8d41e5..3f48f1bf07 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -133,6 +133,8 @@
V(JSToName) \
V(JSToNumber) \
V(JSToNumberConvertBigInt) \
+ V(JSToBigInt) \
+ V(JSToBigIntConvertNumber) \
V(JSToNumeric) \
V(JSToObject) \
V(JSToString) \
@@ -327,7 +329,13 @@
V(NumberSameValue) \
V(StringEqual) \
V(StringLessThan) \
- V(StringLessThanOrEqual)
+ V(StringLessThanOrEqual) \
+ V(BigIntEqual) \
+ V(BigIntLessThan) \
+ V(BigIntLessThanOrEqual) \
+ V(SpeculativeBigIntEqual) \
+ V(SpeculativeBigIntLessThan) \
+ V(SpeculativeBigIntLessThanOrEqual)
#define SIMPLIFIED_NUMBER_BINOP_LIST(V) \
V(NumberAdd) \
@@ -353,7 +361,11 @@
V(BigIntMultiply) \
V(BigIntDivide) \
V(BigIntModulus) \
- V(BigIntBitwiseAnd)
+ V(BigIntBitwiseAnd) \
+ V(BigIntBitwiseOr) \
+ V(BigIntBitwiseXor) \
+ V(BigIntShiftLeft) \
+ V(BigIntShiftRight)
#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
V(SpeculativeNumberAdd) \
@@ -405,6 +417,7 @@
V(NumberToString) \
V(NumberToUint32) \
V(NumberToUint8Clamped) \
+ V(Integral32OrMinusZeroToBigInt) \
V(NumberSilenceNaN)
#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \
@@ -436,6 +449,7 @@
V(CheckSmi) \
V(CheckString) \
V(CheckSymbol) \
+ V(CheckTurboshaftTypeOf) \
V(CompareMaps) \
V(ConvertReceiver) \
V(ConvertTaggedHoleToUndefined) \
@@ -524,12 +538,17 @@
V(SpeculativeBigIntMultiply) \
V(SpeculativeBigIntDivide) \
V(SpeculativeBigIntModulus) \
- V(SpeculativeBigIntBitwiseAnd)
+ V(SpeculativeBigIntBitwiseAnd) \
+ V(SpeculativeBigIntBitwiseOr) \
+ V(SpeculativeBigIntBitwiseXor) \
+ V(SpeculativeBigIntShiftLeft) \
+ V(SpeculativeBigIntShiftRight)
#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \
V(SpeculativeBigIntAsIntN) \
V(SpeculativeBigIntAsUintN) \
- V(SpeculativeBigIntNegate)
+ V(SpeculativeBigIntNegate) \
+ V(SpeculativeToBigInt)
#define SIMPLIFIED_WASM_OP_LIST(V) \
V(AssertNotNull) \
@@ -540,7 +559,15 @@
V(WasmTypeCast) \
V(WasmTypeCheck) \
V(WasmExternInternalize) \
- V(WasmExternExternalize)
+ V(WasmExternExternalize) \
+ V(WasmStructGet) \
+ V(WasmStructSet) \
+ V(WasmArrayGet) \
+ V(WasmArraySet) \
+ V(WasmArrayLength) \
+ V(WasmArrayInitializeLength) \
+ V(StringAsWtf16) \
+ V(StringPrepareForGetCodeunit)
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
@@ -793,6 +820,7 @@
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
+ V(LoadRootRegister) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
@@ -803,6 +831,8 @@
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
+ V(LoadTrapOnNull) \
+ V(StoreTrapOnNull) \
V(MemoryBarrier) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
@@ -812,248 +842,264 @@
V(StackPointerGreaterThan) \
V(TraceInstruction)
-#define MACHINE_SIMD_OP_LIST(V) \
- V(F64x2Splat) \
- V(F64x2ExtractLane) \
- V(F64x2ReplaceLane) \
- V(F64x2Abs) \
- V(F64x2Neg) \
- V(F64x2Sqrt) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F64x2Qfma) \
- V(F64x2Qfms) \
- V(F64x2Pmin) \
- V(F64x2Pmax) \
- V(F64x2Ceil) \
- V(F64x2Floor) \
- V(F64x2Trunc) \
- V(F64x2NearestInt) \
- V(F64x2ConvertLowI32x4S) \
- V(F64x2ConvertLowI32x4U) \
- V(F64x2PromoteLowF32x4) \
- V(F32x4Splat) \
- V(F32x4ExtractLane) \
- V(F32x4ReplaceLane) \
- V(F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4Sqrt) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(F32x4Gt) \
- V(F32x4Ge) \
- V(F32x4Qfma) \
- V(F32x4Qfms) \
- V(F32x4Pmin) \
- V(F32x4Pmax) \
- V(F32x4Ceil) \
- V(F32x4Floor) \
- V(F32x4Trunc) \
- V(F32x4NearestInt) \
- V(F32x4DemoteF64x2Zero) \
- V(I64x2Splat) \
- V(I64x2SplatI32Pair) \
- V(I64x2ExtractLane) \
- V(I64x2ReplaceLane) \
- V(I64x2ReplaceLaneI32Pair) \
- V(I64x2Abs) \
- V(I64x2Neg) \
- V(I64x2SConvertI32x4Low) \
- V(I64x2SConvertI32x4High) \
- V(I64x2UConvertI32x4Low) \
- V(I64x2UConvertI32x4High) \
- V(I64x2BitMask) \
- V(I64x2Shl) \
- V(I64x2ShrS) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I64x2ShrU) \
- V(I64x2ExtMulLowI32x4S) \
- V(I64x2ExtMulHighI32x4S) \
- V(I64x2ExtMulLowI32x4U) \
- V(I64x2ExtMulHighI32x4U) \
- V(I32x4Splat) \
- V(I32x4ExtractLane) \
- V(I32x4ReplaceLane) \
- V(I32x4SConvertF32x4) \
- V(I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High) \
- V(I32x4Neg) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4LtS) \
- V(I32x4LeS) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4UConvertF32x4) \
- V(I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High) \
- V(I32x4ShrU) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
- V(I32x4LtU) \
- V(I32x4LeU) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4Abs) \
- V(I32x4BitMask) \
- V(I32x4DotI16x8S) \
- V(I32x4ExtMulLowI16x8S) \
- V(I32x4ExtMulHighI16x8S) \
- V(I32x4ExtMulLowI16x8U) \
- V(I32x4ExtMulHighI16x8U) \
- V(I32x4ExtAddPairwiseI16x8S) \
- V(I32x4ExtAddPairwiseI16x8U) \
- V(I32x4TruncSatF64x2SZero) \
- V(I32x4TruncSatF64x2UZero) \
- V(I16x8Splat) \
- V(I16x8ExtractLaneU) \
- V(I16x8ExtractLaneS) \
- V(I16x8ReplaceLane) \
- V(I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High) \
- V(I16x8Neg) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSatS) \
- V(I16x8Sub) \
- V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8LtS) \
- V(I16x8LeS) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High) \
- V(I16x8ShrU) \
- V(I16x8UConvertI32x4) \
- V(I16x8AddSatU) \
- V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
- V(I16x8LtU) \
- V(I16x8LeU) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8RoundingAverageU) \
- V(I16x8Q15MulRSatS) \
- V(I16x8Abs) \
- V(I16x8BitMask) \
- V(I16x8ExtMulLowI8x16S) \
- V(I16x8ExtMulHighI8x16S) \
- V(I16x8ExtMulLowI8x16U) \
- V(I16x8ExtMulHighI8x16U) \
- V(I16x8ExtAddPairwiseI8x16S) \
- V(I16x8ExtAddPairwiseI8x16U) \
- V(I8x16Splat) \
- V(I8x16ExtractLaneU) \
- V(I8x16ExtractLaneS) \
- V(I8x16ReplaceLane) \
- V(I8x16SConvertI16x8) \
- V(I8x16Neg) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
- V(I8x16Add) \
- V(I8x16AddSatS) \
- V(I8x16Sub) \
- V(I8x16SubSatS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16LtS) \
- V(I8x16LeS) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16UConvertI16x8) \
- V(I8x16AddSatU) \
- V(I8x16SubSatU) \
- V(I8x16ShrU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
- V(I8x16LtU) \
- V(I8x16LeU) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16RoundingAverageU) \
- V(I8x16Popcnt) \
- V(I8x16Abs) \
- V(I8x16BitMask) \
- V(S128Zero) \
- V(S128Const) \
- V(S128Not) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor) \
- V(S128Select) \
- V(S128AndNot) \
- V(I8x16Swizzle) \
- V(I8x16RelaxedLaneSelect) \
- V(I16x8RelaxedLaneSelect) \
- V(I32x4RelaxedLaneSelect) \
- V(I64x2RelaxedLaneSelect) \
- V(F32x4RelaxedMin) \
- V(F32x4RelaxedMax) \
- V(F64x2RelaxedMin) \
- V(F64x2RelaxedMax) \
- V(I32x4RelaxedTruncF32x4S) \
- V(I32x4RelaxedTruncF32x4U) \
- V(I32x4RelaxedTruncF64x2SZero) \
- V(I32x4RelaxedTruncF64x2UZero) \
- V(I16x8RelaxedQ15MulRS) \
- V(I16x8DotI8x16I7x16S) \
- V(I32x4DotI8x16I7x16AddS) \
- V(I8x16Shuffle) \
- V(V128AnyTrue) \
- V(I64x2AllTrue) \
- V(I32x4AllTrue) \
- V(I16x8AllTrue) \
- V(I8x16AllTrue) \
- V(LoadTransform) \
- V(LoadLane) \
+#define MACHINE_SIMD128_OP_LIST(V) \
+ V(F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
+ V(F64x2Pmin) \
+ V(F64x2Pmax) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4) \
+ V(F32x4Splat) \
+ V(F32x4ExtractLane) \
+ V(F32x4ReplaceLane) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4Sqrt) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(F32x4Gt) \
+ V(F32x4Ge) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms) \
+ V(F32x4Pmin) \
+ V(F32x4Pmax) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero) \
+ V(I64x2Splat) \
+ V(I64x2SplatI32Pair) \
+ V(I64x2ExtractLane) \
+ V(I64x2ReplaceLane) \
+ V(I64x2ReplaceLaneI32Pair) \
+ V(I64x2Abs) \
+ V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I64x2BitMask) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I64x2ShrU) \
+ V(I64x2ExtMulLowI32x4S) \
+ V(I64x2ExtMulHighI32x4S) \
+ V(I64x2ExtMulLowI32x4U) \
+ V(I64x2ExtMulHighI32x4U) \
+ V(I32x4Splat) \
+ V(I32x4ExtractLane) \
+ V(I32x4ReplaceLane) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4Neg) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4LtS) \
+ V(I32x4LeS) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4UConvertF32x4) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4ShrU) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
+ V(I32x4LtU) \
+ V(I32x4LeU) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I32x4Abs) \
+ V(I32x4BitMask) \
+ V(I32x4DotI16x8S) \
+ V(I32x4ExtMulLowI16x8S) \
+ V(I32x4ExtMulHighI16x8S) \
+ V(I32x4ExtMulLowI16x8U) \
+ V(I32x4ExtMulHighI16x8U) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
+ V(I16x8Splat) \
+ V(I16x8ExtractLaneU) \
+ V(I16x8ExtractLaneS) \
+ V(I16x8ReplaceLane) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8Neg) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8LtS) \
+ V(I16x8LeS) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I16x8ShrU) \
+ V(I16x8UConvertI32x4) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8LtU) \
+ V(I16x8LeU) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS) \
+ V(I16x8Abs) \
+ V(I16x8BitMask) \
+ V(I16x8ExtMulLowI8x16S) \
+ V(I16x8ExtMulHighI8x16S) \
+ V(I16x8ExtMulLowI8x16U) \
+ V(I16x8ExtMulHighI8x16U) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
+ V(I8x16Splat) \
+ V(I8x16ExtractLaneU) \
+ V(I8x16ExtractLaneS) \
+ V(I8x16ReplaceLane) \
+ V(I8x16SConvertI16x8) \
+ V(I8x16Neg) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16Add) \
+ V(I8x16AddSatS) \
+ V(I8x16Sub) \
+ V(I8x16SubSatS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16LtS) \
+ V(I8x16LeS) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
+ V(I8x16ShrU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(I8x16LtU) \
+ V(I8x16LeU) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(I8x16RoundingAverageU) \
+ V(I8x16Popcnt) \
+ V(I8x16Abs) \
+ V(I8x16BitMask) \
+ V(S128Zero) \
+ V(S128Const) \
+ V(S128Not) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor) \
+ V(S128Select) \
+ V(S128AndNot) \
+ V(I8x16Swizzle) \
+ V(I8x16RelaxedLaneSelect) \
+ V(I16x8RelaxedLaneSelect) \
+ V(I32x4RelaxedLaneSelect) \
+ V(I64x2RelaxedLaneSelect) \
+ V(F32x4RelaxedMin) \
+ V(F32x4RelaxedMax) \
+ V(F64x2RelaxedMin) \
+ V(F64x2RelaxedMax) \
+ V(I32x4RelaxedTruncF32x4S) \
+ V(I32x4RelaxedTruncF32x4U) \
+ V(I32x4RelaxedTruncF64x2SZero) \
+ V(I32x4RelaxedTruncF64x2UZero) \
+ V(I16x8RelaxedQ15MulRS) \
+ V(I16x8DotI8x16I7x16S) \
+ V(I32x4DotI8x16I7x16AddS) \
+ V(I8x16Shuffle) \
+ V(V128AnyTrue) \
+ V(I64x2AllTrue) \
+ V(I32x4AllTrue) \
+ V(I16x8AllTrue) \
+ V(I8x16AllTrue) \
+ V(LoadTransform) \
+ V(LoadLane) \
V(StoreLane)
-#define VALUE_OP_LIST(V) \
- COMMON_OP_LIST(V) \
- SIMPLIFIED_OP_LIST(V) \
- MACHINE_OP_LIST(V) \
- MACHINE_SIMD_OP_LIST(V) \
+// SIMD256 for AVX
+#define MACHINE_SIMD256_OP_LIST(V) \
+ V(F32x8Add) \
+ V(F32x8Sub) \
+ V(F32x8Mul) \
+ V(F32x8Div) \
+ V(F32x8Pmin) \
+ V(F32x8Pmax) \
+ V(F32x8Eq) \
+ V(F32x8Ne) \
+ V(F32x8Lt) \
+ V(F32x8Le) \
+ V(S256Select) \
+ V(ExtractF128)
+
+#define VALUE_OP_LIST(V) \
+ COMMON_OP_LIST(V) \
+ SIMPLIFIED_OP_LIST(V) \
+ MACHINE_OP_LIST(V) \
+ MACHINE_SIMD128_OP_LIST(V) \
+ MACHINE_SIMD256_OP_LIST(V) \
JS_OP_LIST(V)
// The combination of all operators at all levels and the common operators.
@@ -1239,6 +1285,18 @@ class V8_EXPORT_PRIVATE IrOpcode {
}
UNREACHABLE();
}
+
+ static bool IsSimd128Opcode(Value value) {
+#define CASE(Name, ...) case k##Name:
+ switch (value) {
+ MACHINE_SIMD128_OP_LIST(CASE)
+ return true;
+ default:
+ return false;
+ }
+#undef CASE
+ UNREACHABLE();
+ }
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index e64cc3d11f..bd57b79ed4 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -8,8 +8,6 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
-#include "src/execution/isolate.h"
-#include "src/heap/factory-inl.h"
#include "src/objects/oddball.h"
namespace v8 {
@@ -18,18 +16,17 @@ namespace compiler {
OperationTyper::OperationTyper(JSHeapBroker* broker, Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
- Factory* factory = broker->isolate()->factory();
infinity_ = Type::Constant(V8_INFINITY, zone);
minus_infinity_ = Type::Constant(-V8_INFINITY, zone);
Type truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero.Maybe(Type::Integral32()));
singleton_empty_string_ =
- Type::Constant(broker, factory->empty_string(), zone);
- singleton_NaN_string_ = Type::Constant(broker, factory->NaN_string(), zone);
- singleton_zero_string_ = Type::Constant(broker, factory->zero_string(), zone);
- singleton_false_ = Type::Constant(broker, factory->false_value(), zone);
- singleton_true_ = Type::Constant(broker, factory->true_value(), zone);
+ Type::Constant(broker, broker->empty_string(), zone);
+ singleton_NaN_string_ = Type::Constant(broker, broker->NaN_string(), zone);
+ singleton_zero_string_ = Type::Constant(broker, broker->zero_string(), zone);
+ singleton_false_ = Type::Constant(broker, broker->false_value(), zone);
+ singleton_true_ = Type::Constant(broker, broker->true_value(), zone);
singleton_the_hole_ = Type::Hole();
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
@@ -310,6 +307,27 @@ Type OperationTyper::ToNumberConvertBigInt(Type type) {
return maybe_bigint ? Type::Union(type, cache_->kInteger, zone()) : type;
}
+Type OperationTyper::ToBigInt(Type type) {
+ if (type.Is(Type::BigInt())) {
+ return type;
+ }
+
+ return Type::BigInt();
+}
+
+Type OperationTyper::ToBigIntConvertNumber(Type type) {
+ if (type.Is(Type::Unsigned32OrMinusZero())) {
+ return Type::UnsignedBigInt63();
+ } else if (type.Is(Type::Signed32OrMinusZero())) {
+ return Type::SignedBigInt64();
+ }
+
+ bool maybe_number =
+ type.Maybe(Type::Number()) || type.Maybe(Type::Receiver());
+ type = ToBigInt(Type::Intersect(type, Type::NonNumber(), zone()));
+ return maybe_number ? Type::Union(type, Type::BigInt(), zone()) : type;
+}
+
Type OperationTyper::ToNumeric(Type type) {
// If the {type} includes any receivers, then the callbacks
// might actually produce BigInt primitive values here.
@@ -566,6 +584,18 @@ Type OperationTyper::NumberToUint8Clamped(Type type) {
return cache_->kUint8;
}
+Type OperationTyper::Integral32OrMinusZeroToBigInt(Type type) {
+ DCHECK(type.Is(Type::Number()));
+
+ if (type.Is(Type::Unsigned32OrMinusZero())) {
+ return Type::UnsignedBigInt63();
+ }
+ if (type.Is(Type::Signed32OrMinusZero())) {
+ return Type::SignedBigInt64();
+ }
+ return Type::BigInt();
+}
+
Type OperationTyper::NumberSilenceNaN(Type type) {
DCHECK(type.Is(Type::Number()));
// TODO(jarin): This is a terrible hack; we definitely need a dedicated type
@@ -1137,53 +1167,19 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
TYPER_SUPPORTED_MACHINE_BINOP_LIST(MACHINE_BINOP)
#undef MACHINE_BINOP
-Type OperationTyper::BigIntAdd(Type lhs, Type rhs) {
- DCHECK(lhs.Is(Type::BigInt()));
- DCHECK(rhs.Is(Type::BigInt()));
-
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::BigIntSubtract(Type lhs, Type rhs) {
- DCHECK(lhs.Is(Type::BigInt()));
- DCHECK(rhs.Is(Type::BigInt()));
-
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::BigIntMultiply(Type lhs, Type rhs) {
- DCHECK(lhs.Is(Type::BigInt()));
- DCHECK(rhs.Is(Type::BigInt()));
-
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
+Type OperationTyper::ChangeUint32ToUint64(Type input) {
+ return Type::Machine();
}
-Type OperationTyper::BigIntDivide(Type lhs, Type rhs) {
- DCHECK(lhs.Is(Type::BigInt()));
- DCHECK(rhs.Is(Type::BigInt()));
-
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::BigIntModulus(Type lhs, Type rhs) {
- DCHECK(lhs.Is(Type::BigInt()));
- DCHECK(rhs.Is(Type::BigInt()));
-
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::BigIntBitwiseAnd(Type lhs, Type rhs) {
- DCHECK(lhs.Is(Type::BigInt()));
- DCHECK(rhs.Is(Type::BigInt()));
-
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
+#define BIGINT_BINOP(Name) \
+ Type OperationTyper::Name(Type lhs, Type rhs) { \
+ DCHECK(lhs.Is(Type::BigInt())); \
+ DCHECK(rhs.Is(Type::BigInt())); \
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None(); \
+ return Type::BigInt(); \
+ }
+SIMPLIFIED_BIGINT_BINOP_LIST(BIGINT_BINOP)
+#undef BIGINT_BINOP
Type OperationTyper::BigIntNegate(Type type) {
DCHECK(type.Is(Type::BigInt()));
@@ -1192,41 +1188,23 @@ Type OperationTyper::BigIntNegate(Type type) {
return Type::BigInt();
}
-Type OperationTyper::SpeculativeBigIntAdd(Type lhs, Type rhs) {
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::SpeculativeBigIntSubtract(Type lhs, Type rhs) {
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::SpeculativeBigIntMultiply(Type lhs, Type rhs) {
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::SpeculativeBigIntDivide(Type lhs, Type rhs) {
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::SpeculativeBigIntModulus(Type lhs, Type rhs) {
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
-
-Type OperationTyper::SpeculativeBigIntBitwiseAnd(Type lhs, Type rhs) {
- if (lhs.IsNone() || rhs.IsNone()) return Type::None();
- return Type::BigInt();
-}
+#define SPECULATIVE_BIGINT_BINOP(Name) \
+ Type OperationTyper::Name(Type lhs, Type rhs) { \
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None(); \
+ return Type::BigInt(); \
+ }
+SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(SPECULATIVE_BIGINT_BINOP)
+#undef SPECULATIVE_BIGINT_BINOP
Type OperationTyper::SpeculativeBigIntNegate(Type type) {
if (type.IsNone()) return type;
return Type::BigInt();
}
+Type OperationTyper::SpeculativeToBigInt(Type type) {
+ return ToBigInt(Type::Intersect(type, Type::BigInt(), zone()));
+}
+
Type OperationTyper::SpeculativeToNumber(Type type) {
return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone()));
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index dec1dc9da4..f657a45187 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -11,6 +11,7 @@
#define TYPER_SUPPORTED_MACHINE_BINOP_LIST(V) \
V(Int32Add) \
+ V(Int32LessThanOrEqual) \
V(Int64Add) \
V(Int32Sub) \
V(Int64Sub) \
@@ -54,6 +55,8 @@ class V8_EXPORT_PRIVATE OperationTyper {
Type ToPrimitive(Type type);
Type ToNumber(Type type);
Type ToNumberConvertBigInt(Type type);
+ Type ToBigInt(Type type);
+ Type ToBigIntConvertNumber(Type type);
Type ToNumeric(Type type);
Type ToBoolean(Type type);
@@ -77,6 +80,8 @@ class V8_EXPORT_PRIVATE OperationTyper {
TYPER_SUPPORTED_MACHINE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
+ Type ChangeUint32ToUint64(Type input);
+
// Comparison operators.
Type SameValue(Type lhs, Type rhs);
Type SameValueNumbersOnly(Type lhs, Type rhs);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 2b642fdee1..59e7c4b795 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -126,7 +126,8 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
COMMON_OP_LIST(CASE)
CONTROL_OP_LIST(CASE)
MACHINE_OP_LIST(CASE)
- MACHINE_SIMD_OP_LIST(CASE)
+ MACHINE_SIMD128_OP_LIST(CASE)
+ MACHINE_SIMD256_OP_LIST(CASE)
SIMPLIFIED_OP_LIST(CASE)
break;
#undef CASE
@@ -213,6 +214,8 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
+ case IrOpcode::kJSToBigInt:
+ case IrOpcode::kJSToBigIntConvertNumber:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/phase.h b/deps/v8/src/compiler/phase.h
new file mode 100644
index 0000000000..98e27da6c1
--- /dev/null
+++ b/deps/v8/src/compiler/phase.h
@@ -0,0 +1,40 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PHASE_H_
+#define V8_COMPILER_PHASE_H_
+
+#include "src/logging/runtime-call-stats.h"
+
+#ifdef V8_RUNTIME_CALL_STATS
+#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Kind, Mode) \
+ static constexpr PhaseKind kKind = Kind; \
+ static const char* phase_name() { return "V8.TF" #Name; } \
+ static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
+ RuntimeCallCounterId::kOptimize##Name; \
+ static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;
+#else // V8_RUNTIME_CALL_STATS
+#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Kind, Mode) \
+ static constexpr PhaseKind kKind = Kind; \
+ static const char* phase_name() { return "V8.TF" #Name; }
+#endif // V8_RUNTIME_CALL_STATS
+
+#define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
+ DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, PhaseKind::kTurbofan, \
+ RuntimeCallStats::kThreadSpecific)
+
+#define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name) \
+ DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, PhaseKind::kTurbofan, \
+ RuntimeCallStats::kExact)
+
+namespace v8::internal::compiler {
+
+enum class PhaseKind {
+ kTurbofan,
+ kTurboshaft,
+};
+
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_PHASE_H_
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index f7dba9fdac..58d4b9ae95 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -70,6 +70,7 @@
#include "src/compiler/node-observer.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/osr.h"
+#include "src/compiler/phase.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/schedule.h"
@@ -80,16 +81,24 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
#include "src/compiler/turboshaft/assembler.h"
-#include "src/compiler/turboshaft/decompression-optimization.h"
-#include "src/compiler/turboshaft/graph-builder.h"
+#include "src/compiler/turboshaft/build-graph-phase.h"
+#include "src/compiler/turboshaft/dead-code-elimination-phase.h"
+#include "src/compiler/turboshaft/decompression-optimization-phase.h"
#include "src/compiler/turboshaft/graph-visualizer.h"
#include "src/compiler/turboshaft/graph.h"
-#include "src/compiler/turboshaft/machine-optimization-reducer.h"
+#include "src/compiler/turboshaft/index.h"
+#include "src/compiler/turboshaft/late-optimization-phase.h"
+#include "src/compiler/turboshaft/machine-lowering-phase.h"
#include "src/compiler/turboshaft/optimization-phase.h"
-#include "src/compiler/turboshaft/recreate-schedule.h"
-#include "src/compiler/turboshaft/select-lowering-reducer.h"
+#include "src/compiler/turboshaft/optimize-phase.h"
+#include "src/compiler/turboshaft/phase.h"
+#include "src/compiler/turboshaft/recreate-schedule-phase.h"
#include "src/compiler/turboshaft/simplify-tf-loops.h"
-#include "src/compiler/turboshaft/value-numbering-reducer.h"
+#include "src/compiler/turboshaft/tag-untag-lowering-phase.h"
+#include "src/compiler/turboshaft/tracing.h"
+#include "src/compiler/turboshaft/type-assertions-phase.h"
+#include "src/compiler/turboshaft/typed-optimizations-phase.h"
+#include "src/compiler/turboshaft/types.h"
#include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
@@ -104,17 +113,20 @@
#include "src/logging/code-events.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
+#include "src/logging/runtime-call-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/tracing/trace-event.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
#if V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/int64-lowering.h"
#include "src/compiler/wasm-compiler.h"
#include "src/compiler/wasm-escape-analysis.h"
#include "src/compiler/wasm-gc-lowering.h"
#include "src/compiler/wasm-gc-operator-reducer.h"
#include "src/compiler/wasm-inlining.h"
+#include "src/compiler/wasm-load-elimination.h"
#include "src/compiler/wasm-loop-peeling.h"
#include "src/compiler/wasm-typer.h"
#include "src/wasm/function-body-decoder.h"
@@ -122,6 +134,10 @@
#include "src/wasm/wasm-engine.h"
#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_ENABLE_WASM_SIMD256_REVEC
+#include "src/compiler/revectorizer.h"
+#endif // V8_ENABLE_WASM_SIMD256_REVEC
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -357,15 +373,19 @@ class PipelineData {
Zone* graph_zone() const { return graph_zone_; }
Graph* graph() const { return graph_; }
void set_graph(Graph* graph) { graph_ = graph; }
- void CreateTurboshaftGraph() {
- DCHECK_NULL(turboshaft_graph_);
- turboshaft_graph_ = std::make_unique<turboshaft::Graph>(graph_zone_);
+ turboshaft::PipelineData* InitializeTurboshaftPipeline() {
+ DCHECK_EQ(turboshaft_data_, base::nullopt);
+ turboshaft_data_.emplace(info_, schedule_, graph_zone_, broker_, isolate_,
+ source_positions_, node_origins_);
+ return &turboshaft_data_.value();
+ }
+ turboshaft::PipelineData* turboshaft_data() {
+ return turboshaft_data_.has_value() ? &turboshaft_data_.value() : nullptr;
}
- bool HasTurboshaftGraph() const { return turboshaft_graph_ != nullptr; }
- turboshaft::Graph& turboshaft_graph() const { return *turboshaft_graph_; }
SourcePositionTable* source_positions() const { return source_positions_; }
NodeOriginTable* node_origins() const { return node_origins_; }
MachineOperatorBuilder* machine() const { return machine_; }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
@@ -482,7 +502,6 @@ class PipelineData {
if (graph_zone_ == nullptr) return;
graph_zone_ = nullptr;
graph_ = nullptr;
- turboshaft_graph_ = nullptr;
source_positions_ = nullptr;
node_origins_ = nullptr;
simplified_ = nullptr;
@@ -492,6 +511,7 @@ class PipelineData {
jsgraph_ = nullptr;
mcgraph_ = nullptr;
schedule_ = nullptr;
+ if (turboshaft_data_) turboshaft_data_->DeleteGraphZone();
graph_zone_scope_.Destroy();
}
@@ -622,11 +642,25 @@ class PipelineData {
has_js_wasm_calls_ = has_js_wasm_calls;
}
+#if V8_ENABLE_WEBASSEMBLY
+ const wasm::WasmModule* wasm_module_for_inlining() const {
+ return wasm_module_for_inlining_;
+ }
+ void set_wasm_module_for_inlining(const wasm::WasmModule* module) {
+ wasm_module_for_inlining_ = module;
+ }
+#endif
+
private:
Isolate* const isolate_;
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine* const wasm_engine_ = nullptr;
wasm::AssemblerBufferCache* assembler_buffer_cache_ = nullptr;
+ // The wasm module to be used for inlining wasm functions into JS.
+ // The first module wins and inlining of different modules into the same
+ // JS function is not supported. This is necessary because the wasm
+ // instructions use module-specific (non-canonicalized) type indices.
+ const wasm::WasmModule* wasm_module_for_inlining_ = nullptr;
#endif // V8_ENABLE_WEBASSEMBLY
AccountingAllocator* const allocator_;
OptimizedCompilationInfo* const info_;
@@ -647,7 +681,6 @@ class PipelineData {
ZoneStats::Scope graph_zone_scope_;
Zone* graph_zone_ = nullptr;
Graph* graph_ = nullptr;
- std::unique_ptr<turboshaft::Graph> turboshaft_graph_ = nullptr;
SourcePositionTable* source_positions_ = nullptr;
NodeOriginTable* node_origins_ = nullptr;
SimplifiedOperatorBuilder* simplified_ = nullptr;
@@ -699,6 +732,8 @@ class PipelineData {
const ProfileDataFromFile* profile_data_ = nullptr;
bool has_js_wasm_calls_ = false;
+
+ base::Optional<turboshaft::PipelineData> turboshaft_data_ = base::nullopt;
};
class PipelineImpl final {
@@ -720,6 +755,11 @@ class PipelineImpl final {
// Substep B.1. Produce a scheduled graph.
void ComputeScheduledGraph();
+ void InitializeTurboshaftPipeline();
+
+#if V8_ENABLE_WASM_SIMD256_REVEC
+ void Revectorize();
+#endif // V8_ENABLE_WASM_SIMD256_REVEC
// Substep B.2. Select instructions from a scheduled graph.
bool SelectInstructions(Linkage* linkage);
@@ -730,7 +770,10 @@ class PipelineImpl final {
// Step D. Run the code finalization pass.
MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
- // Step E. Install any code dependencies.
+ // Step E. Ensure all embedded maps are non-deprecated.
+ bool CheckNoDeprecatedMaps(Handle<Code> code);
+
+ // Step F. Install any code dependencies.
bool CommitDependencies(Handle<Code> code);
void VerifyGeneratedCodeIsIdempotent();
@@ -999,9 +1042,7 @@ void TraceSchedule(OptimizedCompilationInfo* info, PipelineData* data,
AllowHandleDereference allow_deref;
CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
- tracing_scope.stream()
- << "----- " << phase_name << " -----\n"
- << *schedule;
+ tracing_scope.stream() << "----- " << phase_name << " -----\n" << *schedule;
}
}
@@ -1166,7 +1207,8 @@ namespace {
// is running on a background or foreground thread.
class V8_NODISCARD PipelineJobScope {
public:
- PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) {
+ PipelineJobScope(PipelineData* data, RuntimeCallStats* stats)
+ : data_(data), current_broker_(data_->broker()) {
data_->set_runtime_call_stats(stats);
}
@@ -1176,6 +1218,7 @@ class V8_NODISCARD PipelineJobScope {
HighAllocationThroughputScope high_throughput_scope_{
V8::GetCurrentPlatform()};
PipelineData* data_;
+ CurrentHeapBrokerScope current_broker_;
};
} // namespace
@@ -1274,15 +1317,15 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
}
return FAILED;
}
+ if (!pipeline_.CheckNoDeprecatedMaps(code)) {
+ return RetryOptimization(BailoutReason::kConcurrentMapDeprecation);
+ }
if (!pipeline_.CommitDependencies(code)) {
return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
}
compilation_info()->SetCode(code);
Handle<NativeContext> context(compilation_info()->native_context(), isolate);
- if (CodeKindCanDeoptimize(code->kind())) {
- context->AddOptimizedCode(ToCodeT(*code));
- }
RegisterWeakObjectsInOptimizedCode(isolate, context, code);
return SUCCEEDED;
}
@@ -1320,26 +1363,34 @@ auto PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
#endif
Phase phase;
- return phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
+ if constexpr (Phase::kKind == PhaseKind::kTurbofan) {
+ return phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
+ } else if constexpr (Phase::kKind == PhaseKind::kTurboshaft) {
+ turboshaft::PipelineData* data = this->data_->turboshaft_data();
+ using result_t =
+ decltype(phase.Run(data, scope.zone(), std::forward<Args>(args)...));
+ CodeTracer* code_tracer = nullptr;
+ if (data->info()->trace_turbo_graph()) {
+ // NOTE: We must not call `GetCodeTracer` if tracing is not enabled,
+ // because it may not yet be initialized then and doing so from the
+ // background thread is not threadsafe.
+ code_tracer = this->data_->GetCodeTracer();
+ }
+ if constexpr (std::is_same_v<result_t, void>) {
+ phase.Run(data, scope.zone(), std::forward<Args>(args)...);
+ turboshaft::PrintTurboshaftGraph(data, scope.zone(), code_tracer,
+ Phase::phase_name());
+ return;
+ } else {
+ auto result = phase.Run(data, scope.zone(), std::forward<Args>(args)...);
+ turboshaft::PrintTurboshaftGraph(data, scope.zone(), code_tracer,
+ Phase::phase_name());
+ return result;
+ }
+ }
+ UNREACHABLE();
}
-#ifdef V8_RUNTIME_CALL_STATS
-#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
- static const char* phase_name() { return "V8.TF" #Name; } \
- static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
- RuntimeCallCounterId::kOptimize##Name; \
- static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;
-#else // V8_RUNTIME_CALL_STATS
-#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
- static const char* phase_name() { return "V8.TF" #Name; }
-#endif // V8_RUNTIME_CALL_STATS
-
-#define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
- DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kThreadSpecific)
-
-#define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name) \
- DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kExact)
-
struct GraphBuilderPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BytecodeGraphBuilder)
@@ -1352,15 +1403,16 @@ struct GraphBuilderPhase {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
- JSFunctionRef closure = MakeRef(data->broker(), data->info()->closure());
+ JSHeapBroker* broker = data->broker();
+ UnparkedScopeIfNeeded scope(broker);
+ JSFunctionRef closure = MakeRef(broker, data->info()->closure());
CallFrequency frequency(1.0f);
BuildGraphFromBytecode(
- data->broker(), temp_zone, closure.shared(),
- closure.raw_feedback_cell(data->dependencies()),
- data->info()->osr_offset(), data->jsgraph(), frequency,
- data->source_positions(), data->node_origins(),
- SourcePosition::kNotInlined, data->info()->code_kind(),
- flags, &data->info()->tick_counter(),
+ broker, temp_zone, closure.shared(broker),
+ closure.raw_feedback_cell(broker), data->info()->osr_offset(),
+ data->jsgraph(), frequency, data->source_positions(),
+ data->node_origins(), SourcePosition::kNotInlined,
+ data->info()->code_kind(), flags, &data->info()->tick_counter(),
ObserveNodeInfo{data->observe_node_manager(),
data->info()->node_observer()});
}
@@ -1404,12 +1456,12 @@ struct InliningPhase {
// JSNativeContextSpecialization allocates out-of-heap objects
// that need to live until code generation.
JSNativeContextSpecialization native_context_specialization(
- &graph_reducer, data->jsgraph(), data->broker(), flags,
- data->dependencies(), temp_zone, info->zone());
- JSInliningHeuristic inlining(
- &graph_reducer, temp_zone, data->info(), data->jsgraph(),
- data->broker(), data->source_positions(), data->node_origins(),
- JSInliningHeuristic::kJSOnly);
+ &graph_reducer, data->jsgraph(), data->broker(), flags, temp_zone,
+ info->zone());
+ JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
+ data->jsgraph(), data->broker(),
+ data->source_positions(), data->node_origins(),
+ JSInliningHeuristic::kJSOnly);
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
data->broker());
@@ -1426,10 +1478,15 @@ struct InliningPhase {
graph_reducer.ReduceGraph();
info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
+#if V8_ENABLE_WEBASSEMBLY
// Skip the "wasm-inlining" phase if there are no Wasm functions calls.
if (call_reducer.has_wasm_calls()) {
data->set_has_js_wasm_calls(true);
+ DCHECK(call_reducer.wasm_module_for_inlining() != nullptr);
+ data->set_wasm_module_for_inlining(
+ call_reducer.wasm_module_for_inlining());
}
+#endif
}
};
@@ -1438,8 +1495,10 @@ struct JSWasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->has_js_wasm_calls());
+ DCHECK(data->wasm_module_for_inlining() != nullptr);
OptimizedCompilationInfo* info = data->info();
+ info->set_wasm_runtime_exception_support();
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->broker(), data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -1447,14 +1506,19 @@ struct JSWasmInliningPhase {
CommonOperatorReducer common_reducer(
&graph_reducer, data->graph(), data->broker(), data->common(),
data->machine(), temp_zone, BranchSemantics::kMachine);
- JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
- data->jsgraph(), data->broker(),
- data->source_positions(),
- data->node_origins(),
- JSInliningHeuristic::kWasmOnly);
+ JSInliningHeuristic inlining(
+ &graph_reducer, temp_zone, data->info(), data->jsgraph(),
+ data->broker(), data->source_positions(), data->node_origins(),
+ JSInliningHeuristic::kWasmOnly, data->wasm_module_for_inlining());
+ // The Wasm trap handler is not supported in JavaScript.
+ const bool disable_trap_handler = true;
+ WasmGCLowering lowering(&graph_reducer, data->jsgraph(),
+ data->wasm_module_for_inlining(),
+ disable_trap_handler, data->source_positions());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &inlining);
+ AddReducer(data, &graph_reducer, &lowering);
graph_reducer.ReduceGraph();
}
};
@@ -1541,9 +1605,8 @@ struct TypedLoweringPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
- data->jsgraph(), data->broker(),
- temp_zone);
+ JSCreateLowering create_lowering(&graph_reducer, data->jsgraph(),
+ data->broker(), temp_zone);
JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
data->broker(), temp_zone);
ConstantFoldingReducer constant_folding_reducer(
@@ -1574,7 +1637,6 @@ struct TypedLoweringPhase {
}
};
-
struct EscapeAnalysisPhase {
DECL_PIPELINE_PHASE_CONSTANTS(EscapeAnalysis)
@@ -1661,8 +1723,8 @@ struct WasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
- uint32_t function_index, const wasm::WireBytesStorage* wire_bytes,
- std::vector<compiler::WasmLoopInfo>* loop_info) {
+ WasmCompilationData& compilation_data,
+ ZoneVector<WasmInliningPosition>* inlining_positions) {
if (!WasmInliner::graph_size_allows_inlining(data->graph()->NodeCount())) {
return;
}
@@ -1672,10 +1734,8 @@ struct WasmInliningPhase {
DeadCodeElimination dead(&graph_reducer, data->graph(), data->common(),
temp_zone);
std::unique_ptr<char[]> debug_name = data->info()->GetDebugName();
- WasmInliner inliner(&graph_reducer, env, function_index,
- data->source_positions(), data->node_origins(),
- data->mcgraph(), wire_bytes, loop_info,
- debug_name.get());
+ WasmInliner inliner(&graph_reducer, env, compilation_data, data->mcgraph(),
+ debug_name.get(), inlining_positions);
AddReducer(data, &graph_reducer, &dead);
AddReducer(data, &graph_reducer, &inliner);
graph_reducer.ReduceGraph();
@@ -1715,7 +1775,8 @@ struct WasmLoopUnrollingPhase {
loop_info.header, all_nodes, temp_zone,
// Only discover the loop until its size is the maximum unrolled
// size for its depth.
- maximum_unrollable_size(loop_info.nesting_depth), true);
+ maximum_unrollable_size(loop_info.nesting_depth),
+ LoopFinder::Purpose::kLoopUnrolling);
if (loop == nullptr) continue;
UnrollLoop(loop_info.header, loop, loop_info.nesting_depth, data->graph(),
data->common(), temp_zone, data->source_positions(),
@@ -1737,8 +1798,15 @@ struct WasmLoopPeelingPhase {
ZoneUnorderedSet<Node*>* loop =
LoopFinder::FindSmallInnermostLoopFromHeader(
loop_info.header, all_nodes, temp_zone,
- v8_flags.wasm_loop_peeling_max_size, false);
+ v8_flags.wasm_loop_peeling_max_size,
+ LoopFinder::Purpose::kLoopPeeling);
if (loop == nullptr) continue;
+ if (v8_flags.trace_wasm_loop_peeling) {
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ auto& os = tracing_scope.stream();
+ os << "Peeling loop at " << loop_info.header->id() << ", size "
+ << loop->size() << std::endl;
+ }
PeelWasmLoop(loop_info.header, loop, data->graph(), data->common(),
temp_zone, data->source_positions(), data->node_origins());
}
@@ -1787,9 +1855,12 @@ struct EarlyOptimizationPhase {
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
data->broker(),
BranchSemantics::kMachine);
- RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
+ RedundancyElimination redundancy_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
+ MachineOperatorReducer machine_reducer(
+ &graph_reducer, data->jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan);
CommonOperatorReducer common_reducer(
&graph_reducer, data->graph(), data->broker(), data->common(),
data->machine(), temp_zone, BranchSemantics::kMachine);
@@ -1840,6 +1911,9 @@ struct EffectControlLinearizationPhase {
TraceScheduleAndVerify(data->info(), data, schedule,
"effect linearization schedule");
+ // LinearizeEffectControl accesses the heap for StringBuilderOptimizer.
+ UnparkedScopeIfNeeded scope(data->broker());
+
// Post-pass for wiring the control/effects
// - connect allocating representation changes into the control&effect
// chains and lower them,
@@ -1900,7 +1974,8 @@ struct LoadEliminationPhase {
&graph_reducer, data->jsgraph(), temp_zone, BranchElimination::kEARLY);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
+ RedundancyElimination redundancy_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
@@ -1949,7 +2024,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
- data->jsgraph(), temp_zone,
+ data->broker(), data->jsgraph(), temp_zone,
data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
@@ -1962,56 +2037,54 @@ struct LateOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- if (data->HasTurboshaftGraph()) {
- // TODO(dmercadier,tebbi): add missing reducers (LateEscapeAnalysis,
- // BranchElimination, MachineOperatorReducer and CommonOperatorReducer).
- turboshaft::OptimizationPhase<
- turboshaft::SelectLoweringReducer,
- turboshaft::ValueNumberingReducer>::Run(&data->turboshaft_graph(),
- temp_zone,
- data->node_origins());
- } else {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead(),
- data->observe_node_manager());
- LateEscapeAnalysis escape_analysis(&graph_reducer, data->graph(),
- data->common(), temp_zone);
- BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
- ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
- CommonOperatorReducer common_reducer(
- &graph_reducer, data->graph(), data->broker(), data->common(),
- data->machine(), temp_zone, BranchSemantics::kMachine);
- JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone,
- BranchSemantics::kMachine);
- SelectLowering select_lowering(&graph_assembler, data->graph());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
+ LateEscapeAnalysis escape_analysis(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ MachineOperatorReducer machine_reducer(
+ &graph_reducer, data->jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan);
+ CommonOperatorReducer common_reducer(
+ &graph_reducer, data->graph(), data->broker(), data->common(),
+ data->machine(), temp_zone, BranchSemantics::kMachine);
+ JSGraphAssembler graph_assembler(data->broker(), data->jsgraph(), temp_zone,
+ BranchSemantics::kMachine);
+ SelectLowering select_lowering(&graph_assembler, data->graph());
+ if (!v8_flags.turboshaft) {
AddReducer(data, &graph_reducer, &escape_analysis);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
- AddReducer(data, &graph_reducer, &dead_code_elimination);
+ }
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ if (!v8_flags.turboshaft) {
AddReducer(data, &graph_reducer, &machine_reducer);
- AddReducer(data, &graph_reducer, &common_reducer);
- if (!v8_flags.turboshaft) {
- AddReducer(data, &graph_reducer, &select_lowering);
- AddReducer(data, &graph_reducer, &value_numbering);
- }
- graph_reducer.ReduceGraph();
}
+ AddReducer(data, &graph_reducer, &common_reducer);
+ if (!v8_flags.turboshaft) {
+ AddReducer(data, &graph_reducer, &select_lowering);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ }
+ graph_reducer.ReduceGraph();
}
};
struct MachineOperatorOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone,
+ MachineOperatorReducer::SignallingNanPropagation
+ signalling_nan_propagation) {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
+ MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
+ signalling_nan_propagation);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
@@ -2037,14 +2110,9 @@ struct DecompressionOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
if (!COMPRESS_POINTERS_BOOL) return;
- if (data->HasTurboshaftGraph()) {
- turboshaft::RunDecompressionOptimization(data->turboshaft_graph(),
- temp_zone);
- } else {
- DecompressionOptimizer decompression_optimizer(
- temp_zone, data->graph(), data->common(), data->machine());
- decompression_optimizer.Reduce();
- }
+ DecompressionOptimizer decompression_optimizer(
+ temp_zone, data->graph(), data->common(), data->machine());
+ decompression_optimizer.Reduce();
}
};
@@ -2058,49 +2126,6 @@ struct BranchConditionDuplicationPhase {
}
};
-struct BuildTurboshaftPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(BuildTurboshaft)
-
- base::Optional<BailoutReason> Run(PipelineData* data, Zone* temp_zone) {
- Schedule* schedule = data->schedule();
- data->reset_schedule();
- data->CreateTurboshaftGraph();
- if (auto bailout = turboshaft::BuildGraph(
- schedule, data->graph_zone(), temp_zone, &data->turboshaft_graph(),
- data->source_positions(), data->node_origins())) {
- return bailout;
- }
- return {};
- }
-};
-
-struct OptimizeTurboshaftPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(OptimizeTurboshaft)
-
- void Run(PipelineData* data, Zone* temp_zone) {
- UnparkedScopeIfNeeded scope(data->broker(),
- v8_flags.turboshaft_trace_reduction);
- turboshaft::OptimizationPhase<
- turboshaft::MachineOptimizationReducerSignallingNanImpossible,
- turboshaft::ValueNumberingReducer>::Run(&data->turboshaft_graph(),
- temp_zone,
- data->node_origins());
- }
-};
-
-struct TurboshaftRecreateSchedulePhase {
- DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftRecreateSchedule)
-
- void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- auto result = turboshaft::RecreateSchedule(
- data->turboshaft_graph(), linkage->GetIncomingDescriptor(),
- data->graph_zone(), temp_zone, data->source_positions(),
- data->node_origins());
- data->set_graph(result.graph);
- data->set_schedule(result.schedule);
- }
-};
-
#if V8_ENABLE_WEBASSEMBLY
struct WasmTypingPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmTyping)
@@ -2123,8 +2148,13 @@ struct WasmGCOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
+ WasmLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+ temp_zone);
WasmGCOperatorReducer wasm_gc(&graph_reducer, temp_zone, data->mcgraph(),
module);
+ // Note: if we want to add DeadCodeElimination here, we'll have to update
+ // the existing reducers to handle kDead and kDeadValue nodes everywhere.
+ AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &wasm_gc);
graph_reducer.ReduceGraph();
}
@@ -2151,7 +2181,8 @@ struct WasmGCLoweringPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- WasmGCLowering lowering(&graph_reducer, data->mcgraph(), module);
+ WasmGCLowering lowering(&graph_reducer, data->mcgraph(), module, false,
+ data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
AddReducer(data, &graph_reducer, &lowering);
@@ -2163,18 +2194,21 @@ struct WasmGCLoweringPhase {
struct WasmOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
- void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
+ void Run(PipelineData* data, Zone* temp_zone,
+ MachineOperatorReducer::SignallingNanPropagation
+ signalling_nan_propagation,
+ wasm::WasmFeatures features) {
// Run optimizations in two rounds: First one around load elimination and
// then one around branch elimination. This is because those two
// optimizations sometimes display quadratic complexity when run together.
// We only need load elimination for managed objects.
- if (v8_flags.experimental_wasm_gc) {
+ if (features.has_gc()) {
GraphReducer graph_reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(),
data->observe_node_manager());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
- allow_signalling_nan);
+ signalling_nan_propagation);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(
@@ -2198,7 +2232,7 @@ struct WasmOptimizationPhase {
data->jsgraph()->Dead(),
data->observe_node_manager());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
- allow_signalling_nan);
+ signalling_nan_propagation);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(
@@ -2230,8 +2264,9 @@ struct CsaEarlyOptimizationPhase {
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(),
data->observe_node_manager());
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
- true);
+ MachineOperatorReducer machine_reducer(
+ &graph_reducer, data->jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(
@@ -2252,8 +2287,9 @@ struct CsaEarlyOptimizationPhase {
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(),
data->observe_node_manager());
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
- true);
+ MachineOperatorReducer machine_reducer(
+ &graph_reducer, data->jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(
@@ -2275,7 +2311,7 @@ struct CsaEarlyOptimizationPhase {
struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
- void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
+ void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
@@ -2283,8 +2319,9 @@ struct CsaOptimizationPhase {
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
- allow_signalling_nan);
+ MachineOperatorReducer machine_reducer(
+ &graph_reducer, data->jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan);
CommonOperatorReducer common_reducer(
&graph_reducer, data->graph(), data->broker(), data->common(),
data->machine(), temp_zone, BranchSemantics::kMachine);
@@ -2311,6 +2348,17 @@ struct ComputeSchedulePhase {
}
};
+#if V8_ENABLE_WASM_SIMD256_REVEC
+struct RevectorizePhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(Revectorizer)
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ Revectorizer revec(temp_zone, data->graph(), data->mcgraph());
+ revec.TryRevectorize(data->info()->GetDebugName().get());
+ }
+};
+#endif // V8_ENABLE_WASM_SIMD256_REVEC
+
struct InstructionRangesAsJSON {
const InstructionSequence* sequence;
const ZoneVector<std::pair<int, int>>* instr_origins;
@@ -2388,8 +2436,8 @@ struct InstructionSelectionPhase {
struct BitcastElisionPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BitcastElision)
- void Run(PipelineData* data, Zone* temp_zone) {
- BitcastElider bitcast_optimizer(temp_zone, data->graph());
+ void Run(PipelineData* data, Zone* temp_zone, bool is_builtin) {
+ BitcastElider bitcast_optimizer(temp_zone, data->graph(), is_builtin);
bitcast_optimizer.Reduce();
}
};
@@ -2402,7 +2450,6 @@ struct MeetRegisterConstraintsPhase {
}
};
-
struct ResolvePhisPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ResolvePhis)
@@ -2412,7 +2459,6 @@ struct ResolvePhisPhase {
}
};
-
struct BuildLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRanges)
@@ -2483,7 +2529,6 @@ struct AssignSpillSlotsPhase {
}
};
-
struct CommitAssignmentPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CommitAssignment)
@@ -2493,7 +2538,6 @@ struct CommitAssignmentPhase {
}
};
-
struct PopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(PopulatePointerMaps)
@@ -2503,7 +2547,6 @@ struct PopulateReferenceMapsPhase {
}
};
-
struct ConnectRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ConnectRanges)
@@ -2513,7 +2556,6 @@ struct ConnectRangesPhase {
}
};
-
struct ResolveControlFlowPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ResolveControlFlow)
@@ -2643,44 +2685,6 @@ struct PrintGraphPhase {
}
};
-struct PrintTurboshaftGraphPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(PrintTurboshaftGraph)
-
- void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
- if (data->info()->trace_turbo_json()) {
- UnparkedScopeIfNeeded scope(data->broker());
- AllowHandleDereference allow_deref;
-
- {
- TurboJsonFile json_of(data->info(), std::ios_base::app);
- json_of << "{\"name\":\"" << phase
- << "\",\"type\":\"turboshaft_graph\",\"data\":"
- << AsJSON(data->turboshaft_graph(), data->node_origins(),
- temp_zone)
- << "},\n";
- }
- PrintTurboshaftCustomDataPerOperation(
- data->info(), "Properties", data->turboshaft_graph(),
- [](std::ostream& stream, const turboshaft::Graph& graph,
- turboshaft::OpIndex index) -> bool {
- const auto& op = graph.Get(index);
- op.PrintOptions(stream);
- return true;
- });
- }
-
- if (data->info()->trace_turbo_graph()) {
- UnparkedScopeIfNeeded scope(data->broker());
- AllowHandleDereference allow_deref;
-
- CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
- tracing_scope.stream()
- << "\n----- " << phase << " -----\n"
- << data->turboshaft_graph();
- }
- }
-};
-
struct VerifyGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTS(VerifyGraph)
@@ -2714,8 +2718,7 @@ class WasmHeapStubCompilationJob final : public TurbofanCompilationJob {
WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph,
CodeKind kind, std::unique_ptr<char[]> debug_name,
- const AssemblerOptions& options,
- SourcePositionTable* source_positions)
+ const AssemblerOptions& options)
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
@@ -2727,7 +2730,7 @@ class WasmHeapStubCompilationJob final : public TurbofanCompilationJob {
zone_(std::move(zone)),
graph_(graph),
data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(),
- graph_, nullptr, nullptr, source_positions,
+ graph_, nullptr, nullptr, nullptr,
zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
pipeline_(&data_) {}
@@ -2756,11 +2759,10 @@ class WasmHeapStubCompilationJob final : public TurbofanCompilationJob {
std::unique_ptr<TurbofanCompilationJob> Pipeline::NewWasmHeapStubCompilationJob(
Isolate* isolate, CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph, CodeKind kind,
- std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
- SourcePositionTable* source_positions) {
+ std::unique_ptr<char[]> debug_name, const AssemblerOptions& options) {
return std::make_unique<WasmHeapStubCompilationJob>(
isolate, call_descriptor, std::move(zone), graph, kind,
- std::move(debug_name), options, source_positions);
+ std::move(debug_name), options);
}
CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
@@ -2997,15 +2999,16 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<LateOptimizationPhase>();
RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
- // Optimize memory access and allocation operations.
- Run<MemoryOptimizationPhase>();
- RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
-
if (!v8_flags.turboshaft) {
+ // Optimize memory access and allocation operations.
+ Run<MemoryOptimizationPhase>();
+ RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
+
// Run value numbering and machine operator reducer to optimize load/store
// address computation (in particular, reuse the address computation
// whenever possible).
- Run<MachineOperatorOptimizationPhase>();
+ Run<MachineOperatorOptimizationPhase>(
+ MachineOperatorReducer::kPropagateSignallingNan);
RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);
Run<DecompressionOptimizationPhase>();
@@ -3023,26 +3026,43 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
ComputeScheduledGraph();
if (v8_flags.turboshaft) {
- if (base::Optional<BailoutReason> bailout = Run<BuildTurboshaftPhase>()) {
+ UnparkedScopeIfNeeded scope(data->broker(),
+ v8_flags.turboshaft_trace_reduction);
+
+ data->InitializeTurboshaftPipeline();
+ turboshaft::Tracing::Scope tracing_scope(data->info());
+
+ if (base::Optional<BailoutReason> bailout =
+ Run<turboshaft::BuildGraphPhase>(linkage)) {
info()->AbortOptimization(*bailout);
data->EndPhaseKind();
return false;
}
- Run<PrintTurboshaftGraphPhase>(BuildTurboshaftPhase::phase_name());
- Run<LateOptimizationPhase>();
- Run<PrintTurboshaftGraphPhase>(LateOptimizationPhase::phase_name());
+ Run<turboshaft::MachineLoweringPhase>();
- Run<OptimizeTurboshaftPhase>();
- Run<PrintTurboshaftGraphPhase>(OptimizeTurboshaftPhase::phase_name());
+ Run<turboshaft::LateOptimizationPhase>();
- Run<DecompressionOptimizationPhase>();
- Run<PrintTurboshaftGraphPhase>(
- DecompressionOptimizationPhase::phase_name());
+ Run<turboshaft::OptimizePhase>();
+
+ Run<turboshaft::DecompressionOptimizationPhase>();
+
+ Run<turboshaft::TypedOptimizationsPhase>();
- Run<TurboshaftRecreateSchedulePhase>(linkage);
+ if (v8_flags.turboshaft_assert_types) {
+ Run<turboshaft::TypeAssertionsPhase>();
+ }
+
+ Run<turboshaft::DeadCodeEliminationPhase>();
+
+ Run<turboshaft::TagUntagLoweringPhase>();
+
+ auto [new_graph, new_schedule] =
+ Run<turboshaft::RecreateSchedulePhase>(linkage);
+ data->set_graph(new_graph);
+ data->set_schedule(new_schedule);
TraceSchedule(data->info(), data, data->schedule(),
- TurboshaftRecreateSchedulePhase::phase_name());
+ turboshaft::RecreateSchedulePhase::phase_name());
}
return SelectInstructions(linkage);
@@ -3164,6 +3184,39 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
}
+ int initial_graph_hash = 0;
+ if (v8_flags.turbo_profiling || profile_data != nullptr) {
+ initial_graph_hash = HashGraphForPGO(data.graph());
+ }
+
+ if (profile_data != nullptr && profile_data->hash() != initial_graph_hash) {
+ if (v8_flags.abort_on_bad_builtin_profile_data ||
+ v8_flags.warn_about_builtin_profile_data) {
+ base::EmbeddedVector<char, 256> msg;
+ SNPrintF(msg,
+ "Rejected profile data for %s due to function change. "
+ "Please use tools/builtins-pgo/generate.py to refresh it.",
+ debug_name);
+ if (v8_flags.abort_on_bad_builtin_profile_data) {
+ // mksnapshot might fail here because of the following reasons:
+ // * builtins were changed since the builtins profile generation,
+ // * current build options affect builtins code and they don't match
+ // the options used for building the profile (for example, it might
+ // be because of gn argument 'dcheck_always_on=true').
+ // To fix the issue one must either update the builtins PGO profiles
+ // (see tools/builtins-pgo/generate.py) or disable builtins PGO by
+ // setting gn argument v8_builtins_profiling_log_file="".
+ // One might also need to update the tools/builtins-pgo/generate.py if
+ // the set of default release arguments has changed.
+ FATAL("%s", msg.begin());
+ } else {
+ PrintF("%s\n", msg.begin());
+ }
+ }
+ profile_data = nullptr;
+ data.set_profile_data(profile_data);
+ }
+
pipeline.Run<CsaEarlyOptimizationPhase>();
pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
@@ -3171,7 +3224,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
- pipeline.Run<CsaOptimizationPhase>(true);
+ pipeline.Run<CsaOptimizationPhase>();
pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
pipeline.Run<DecompressionOptimizationPhase>();
@@ -3184,22 +3237,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<VerifyGraphPhase>(true);
- int graph_hash_before_scheduling = 0;
- if (v8_flags.turbo_profiling || profile_data != nullptr) {
- graph_hash_before_scheduling = HashGraphForPGO(data.graph());
- }
-
- if (profile_data != nullptr &&
- profile_data->hash() != graph_hash_before_scheduling) {
- if (v8_flags.warn_about_builtin_profile_data) {
- PrintF("Rejected profile data for %s due to function change\n",
- debug_name);
- PrintF("Please use tools/builtins-pgo/generate.py to refresh it.\n");
- }
- profile_data = nullptr;
- data.set_profile_data(profile_data);
- }
-
pipeline.ComputeScheduledGraph();
DCHECK_NOT_NULL(data.schedule());
@@ -3218,7 +3255,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
if (v8_flags.turbo_profiling) {
- info.profiler_data()->SetHash(graph_hash_before_scheduling);
+ info.profiler_data()->SetHash(initial_graph_hash);
}
if (jump_opt.is_optimizable()) {
@@ -3305,10 +3342,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
CodeGenerator* code_generator = pipeline.code_generator();
wasm::WasmCompilationResult result;
- code_generator->tasm()->GetCode(
+ code_generator->masm()->GetCode(
nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
static_cast<int>(code_generator->handler_table_offset()));
- result.instr_buffer = code_generator->tasm()->ReleaseBuffer();
+ result.instr_buffer = code_generator->masm()->ReleaseBuffer();
result.source_positions = code_generator->GetSourcePositionTable();
result.protected_instructions_data =
code_generator->GetProtectedInstructionsData();
@@ -3351,26 +3388,69 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
return result;
}
+namespace {
+
+void LowerInt64(const wasm::FunctionSig* sig, MachineGraph* mcgraph,
+ SimplifiedOperatorBuilder* simplified, PipelineImpl& pipeline) {
+ if (mcgraph->machine()->Is64()) return;
+
+ Signature<MachineRepresentation>::Builder builder(
+ mcgraph->zone(), sig->return_count(), sig->parameter_count());
+ for (auto ret : sig->returns()) {
+ builder.AddReturn(ret.machine_representation());
+ }
+ for (auto param : sig->parameters()) {
+ builder.AddParam(param.machine_representation());
+ }
+ Signature<MachineRepresentation>* signature = builder.Build();
+
+ Int64Lowering r(mcgraph->graph(), mcgraph->machine(), mcgraph->common(),
+ simplified, mcgraph->zone(), signature);
+ r.LowerGraph();
+ pipeline.RunPrintAndVerify("V8.Int64Lowering", true);
+}
+
+base::OwnedVector<byte> SerializeInliningPositions(
+ const ZoneVector<WasmInliningPosition>& positions) {
+ const size_t entry_size =
+ sizeof positions[0].inlinee_func_index + sizeof positions[0].caller_pos;
+ auto result = base::OwnedVector<byte>::New(positions.size() * entry_size);
+ byte* iter = result.begin();
+ for (const auto& [func_index, caller_pos] : positions) {
+ size_t index_size = sizeof func_index;
+ std::memcpy(iter, &func_index, index_size);
+ iter += index_size;
+ size_t pos_size = sizeof caller_pos;
+ std::memcpy(iter, &caller_pos, pos_size);
+ iter += pos_size;
+ }
+ DCHECK_EQ(iter, result.end());
+ return result;
+}
+
+} // namespace
+
// static
void Pipeline::GenerateCodeForWasmFunction(
OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
- const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
- CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, wasm::FunctionBody function_body,
- const wasm::WasmModule* module, int function_index,
- std::vector<compiler::WasmLoopInfo>* loop_info,
- wasm::AssemblerBufferCache* buffer_cache) {
+ WasmCompilationData& compilation_data, MachineGraph* mcgraph,
+ CallDescriptor* call_descriptor,
+ ZoneVector<WasmInliningPosition>* inlining_positions) {
auto* wasm_engine = wasm::GetWasmEngine();
+ const wasm::WasmModule* module = env->module;
+ wasm::WasmFeatures features = env->enabled_features;
base::TimeTicks start_time;
if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) {
start_time = base::TimeTicks::Now();
}
ZoneStats zone_stats(wasm_engine->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
- CreatePipelineStatistics(function_body, module, info, &zone_stats));
- PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
- pipeline_statistics.get(), source_positions, node_origins,
- WasmAssemblerOptions(), buffer_cache);
+ CreatePipelineStatistics(compilation_data.func_body, module, info,
+ &zone_stats));
+ PipelineData data(
+ &zone_stats, wasm_engine, info, mcgraph, pipeline_statistics.get(),
+ compilation_data.source_positions, compilation_data.node_origins,
+ WasmAssemblerOptions(), compilation_data.buffer_cache);
PipelineImpl pipeline(&data);
@@ -3384,24 +3464,33 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
+#if V8_ENABLE_WASM_SIMD256_REVEC
+ if (v8_flags.experimental_wasm_revectorize) {
+ pipeline.Revectorize();
+ pipeline.RunPrintAndVerify("V8.WasmRevec", true);
+ }
+#endif // V8_ENABLE_WASM_SIMD256_REVEC
+
data.BeginPhaseKind("V8.WasmOptimization");
- if (v8_flags.wasm_inlining) {
- pipeline.Run<WasmInliningPhase>(env, function_index, wire_bytes_storage,
- loop_info);
+ if (features.has_inlining()) {
+ pipeline.Run<WasmInliningPhase>(env, compilation_data, inlining_positions);
pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
}
if (v8_flags.wasm_loop_peeling) {
- pipeline.Run<WasmLoopPeelingPhase>(loop_info);
+ pipeline.Run<WasmLoopPeelingPhase>(compilation_data.loop_infos);
pipeline.RunPrintAndVerify(WasmLoopPeelingPhase::phase_name(), true);
}
if (v8_flags.wasm_loop_unrolling) {
- pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
+ pipeline.Run<WasmLoopUnrollingPhase>(compilation_data.loop_infos);
pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
}
const bool is_asm_js = is_asmjs_module(module);
+ MachineOperatorReducer::SignallingNanPropagation signalling_nan_propagation =
+ is_asm_js ? MachineOperatorReducer::kPropagateSignallingNan
+ : MachineOperatorReducer::kSilenceSignallingNan;
- if (v8_flags.experimental_wasm_gc || v8_flags.experimental_wasm_stringref) {
- pipeline.Run<WasmTypingPhase>(function_index);
+ if (features.has_gc() || features.has_stringref()) {
+ pipeline.Run<WasmTypingPhase>(compilation_data.func_index);
pipeline.RunPrintAndVerify(WasmTypingPhase::phase_name(), true);
if (v8_flags.wasm_opt) {
pipeline.Run<WasmGCOptimizationPhase>(module);
@@ -3410,15 +3499,21 @@ void Pipeline::GenerateCodeForWasmFunction(
}
// These proposals use gc nodes.
- if (v8_flags.experimental_wasm_gc ||
- v8_flags.experimental_wasm_typed_funcref ||
- v8_flags.experimental_wasm_stringref) {
+ if (features.has_gc() || features.has_typed_funcref() ||
+ features.has_stringref()) {
pipeline.Run<WasmGCLoweringPhase>(module);
pipeline.RunPrintAndVerify(WasmGCLoweringPhase::phase_name(), true);
}
+ // Int64Lowering must happen after inlining (otherwise inlining would have
+ // to invoke it separately for the inlined function body).
+ // It must also happen after WasmGCLowering, otherwise it would have to
+ // add type annotations to nodes it creates, and handle wasm-gc nodes.
+ LowerInt64(compilation_data.func_body.sig, mcgraph, data.simplified(),
+ pipeline);
+
if (v8_flags.wasm_opt || is_asm_js) {
- pipeline.Run<WasmOptimizationPhase>(is_asm_js);
+ pipeline.Run<WasmOptimizationPhase>(signalling_nan_propagation, features);
pipeline.RunPrintAndVerify(WasmOptimizationPhase::phase_name(), true);
} else {
pipeline.Run<WasmBaseOptimizationPhase>();
@@ -3428,11 +3523,11 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
- if (v8_flags.experimental_wasm_gc && v8_flags.wasm_opt) {
+ if (features.has_gc() && v8_flags.wasm_opt) {
// Run value numbering and machine operator reducer to optimize load/store
// address computation (in particular, reuse the address computation
// whenever possible).
- pipeline.Run<MachineOperatorOptimizationPhase>();
+ pipeline.Run<MachineOperatorOptimizationPhase>(signalling_nan_propagation);
pipeline.RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(),
true);
if (!v8_flags.turboshaft_wasm) {
@@ -3467,26 +3562,26 @@ void Pipeline::GenerateCodeForWasmFunction(
Linkage linkage(call_descriptor);
if (v8_flags.turboshaft_wasm) {
+ pipeline.InitializeTurboshaftPipeline();
+
if (base::Optional<BailoutReason> bailout =
- pipeline.Run<BuildTurboshaftPhase>()) {
+ pipeline.Run<turboshaft::BuildGraphPhase>(&linkage)) {
pipeline.info()->AbortOptimization(*bailout);
data.EndPhaseKind();
info->SetWasmCompilationResult({});
return;
}
- pipeline.Run<PrintTurboshaftGraphPhase>(BuildTurboshaftPhase::phase_name());
- pipeline.Run<OptimizeTurboshaftPhase>();
- pipeline.Run<PrintTurboshaftGraphPhase>(
- OptimizeTurboshaftPhase::phase_name());
+ pipeline.Run<turboshaft::OptimizePhase>();
- pipeline.Run<DecompressionOptimizationPhase>();
- pipeline.Run<PrintTurboshaftGraphPhase>(
- DecompressionOptimizationPhase::phase_name());
+ pipeline.Run<turboshaft::DecompressionOptimizationPhase>();
- pipeline.Run<TurboshaftRecreateSchedulePhase>(&linkage);
+ auto [new_graph, new_schedule] =
+ pipeline.Run<turboshaft::RecreateSchedulePhase>(&linkage);
+ data.set_graph(new_graph);
+ data.set_schedule(new_schedule);
TraceSchedule(data.info(), &data, data.schedule(),
- TurboshaftRecreateSchedulePhase::phase_name());
+ turboshaft::RecreateSchedulePhase::phase_name());
}
if (!pipeline.SelectInstructions(&linkage)) return;
@@ -3494,14 +3589,15 @@ void Pipeline::GenerateCodeForWasmFunction(
auto result = std::make_unique<wasm::WasmCompilationResult>();
CodeGenerator* code_generator = pipeline.code_generator();
- code_generator->tasm()->GetCode(
+ code_generator->masm()->GetCode(
nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
static_cast<int>(code_generator->handler_table_offset()));
- result->instr_buffer = code_generator->tasm()->ReleaseBuffer();
+ result->instr_buffer = code_generator->masm()->ReleaseBuffer();
result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result->source_positions = code_generator->GetSourcePositionTable();
+ result->inlining_positions = SerializeInliningPositions(*inlining_positions);
result->protected_instructions_data =
code_generator->GetProtectedInstructionsData();
result->result_tier = wasm::ExecutionTier::kTurbofan;
@@ -3538,13 +3634,13 @@ void Pipeline::GenerateCodeForWasmFunction(
int codesize = result->code_desc.body_size();
StdoutStream{} << "Compiled function "
<< reinterpret_cast<const void*>(module) << "#"
- << function_index << " using TurboFan, took "
+ << compilation_data.func_index << " using TurboFan, took "
<< time.InMilliseconds() << " ms and "
<< zone_stats.GetMaxAllocatedBytes() << " / "
<< zone_stats.GetTotalAllocatedBytes()
<< " max/total bytes; bodysize "
- << function_body.end - function_body.start << " codesize "
- << codesize << " name " << data.info()->GetDebugName().get()
+ << compilation_data.body_size() << " codesize " << codesize
+ << " name " << data.info()->GetDebugName().get()
<< std::endl;
}
@@ -3563,6 +3659,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
&zone_stats));
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
+ PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
@@ -3577,9 +3674,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
{
LocalIsolateScope local_isolate_scope(data.broker(), info,
isolate->main_thread_local_isolate());
- if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
+ if (!pipeline.CreateGraph()) return {};
// We selectively Unpark inside OptimizeGraph.
- if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
+ if (!pipeline.OptimizeGraph(&linkage)) return {};
pipeline.AssembleCode(&linkage);
}
@@ -3599,7 +3696,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
if (!will_retire_broker) *out_broker = data.ReleaseBroker();
return code;
}
- return MaybeHandle<Code>();
+ return {};
}
// static
@@ -3613,6 +3710,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
nullptr, schedule, nullptr, node_positions, nullptr,
options, nullptr);
+ PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(
@@ -3640,7 +3738,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
pipeline.CommitDependencies(code)) {
return code;
}
- return MaybeHandle<Code>();
+ return {};
}
// static
@@ -3686,6 +3784,14 @@ void PipelineImpl::ComputeScheduledGraph() {
TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
}
+void PipelineImpl::InitializeTurboshaftPipeline() {
+ this->data_->InitializeTurboshaftPipeline();
+}
+
+#if V8_ENABLE_WASM_SIMD256_REVEC
+void PipelineImpl::Revectorize() { Run<RevectorizePhase>(); }
+#endif // V8_ENABLE_WASM_SIMD256_REVEC
+
bool PipelineImpl::SelectInstructions(Linkage* linkage) {
auto call_descriptor = linkage->GetIncomingDescriptor();
PipelineData* data = this->data_;
@@ -3739,9 +3845,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
data->debug_name(), &temp_zone);
}
- if (Builtins::IsBuiltinId(data->info()->builtin())) {
- Run<BitcastElisionPhase>();
- }
+ Run<BitcastElisionPhase>(Builtins::IsBuiltinId(data->info()->builtin()));
data->InitializeInstructionSequence(call_descriptor);
@@ -3992,6 +4096,31 @@ MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
return FinalizeCode();
}
+// The CheckMaps node can migrate objects with deprecated maps. Afterwards, we
+// check the resulting object against a fixed list of maps known at compile
+// time. This is problematic if we made any assumptions about an object with the
+// deprecated map, as it now changed shape. Therefore, we want to avoid
+// embedding deprecated maps, as objects with these maps can be changed by
+// CheckMaps.
+// The following code only checks for deprecated maps at the end of compilation,
+// but doesn't protect us against the embedded maps becoming deprecated later.
+// However, this is enough, since if the map becomes deprecated later, it will
+// migrate to a new map not yet known at compile time, so if we migrate to it as
+// part of a CheckMaps, this check will always fail afterwards and deoptimize.
+// This in turn relies on a runtime invariant that map migrations always target
+// newly allocated maps.
+bool PipelineImpl::CheckNoDeprecatedMaps(Handle<Code> code) {
+ int mode_mask = RelocInfo::EmbeddedObjectModeMask();
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
+ HeapObject obj = it.rinfo()->target_object(data_->isolate());
+ if (obj.IsMap() && Map::cast(obj).is_deprecated()) {
+ return false;
+ }
+ }
+ return true;
+}
+
bool PipelineImpl::CommitDependencies(Handle<Code> code) {
return data_->dependencies() == nullptr ||
data_->dependencies()->Commit(code);
@@ -4091,7 +4220,6 @@ void PipelineImpl::AllocateRegistersForTopTier(
verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
}
-
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index ba1decfafc..28c2046deb 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -20,14 +20,11 @@ class OptimizedCompilationInfo;
class TurbofanCompilationJob;
class ProfileDataFromFile;
class RegisterConfiguration;
+struct WasmInliningPosition;
namespace wasm {
-class AssemblerBufferCache;
struct CompilationEnv;
-struct FunctionBody;
struct WasmCompilationResult;
-struct WasmModule;
-class WireBytesStorage;
} // namespace wasm
namespace compiler {
@@ -38,10 +35,9 @@ class InstructionSequence;
class JSGraph;
class JSHeapBroker;
class MachineGraph;
-class NodeOriginTable;
class Schedule;
class SourcePositionTable;
-struct WasmLoopInfo;
+struct WasmCompilationData;
class Pipeline : public AllStatic {
public:
@@ -54,12 +50,9 @@ class Pipeline : public AllStatic {
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
- const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
- CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, wasm::FunctionBody function_body,
- const wasm::WasmModule* module, int function_index,
- std::vector<compiler::WasmLoopInfo>* loop_infos,
- wasm::AssemblerBufferCache* buffer_cache);
+ WasmCompilationData& compilation_data, MachineGraph* mcgraph,
+ CallDescriptor* call_descriptor,
+ ZoneVector<WasmInliningPosition>* inlining_positions);
// Run the pipeline on a machine graph and generate code.
static wasm::WasmCompilationResult GenerateCodeForWasmNativeStub(
@@ -71,8 +64,7 @@ class Pipeline : public AllStatic {
static std::unique_ptr<TurbofanCompilationJob> NewWasmHeapStubCompilationJob(
Isolate* isolate, CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph, CodeKind kind,
- std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
- SourcePositionTable* source_positions = nullptr);
+ std::unique_ptr<char[]> debug_name, const AssemblerOptions& options);
// Run the pipeline on a machine graph and generate code.
static MaybeHandle<Code> GenerateCodeForCodeStub(
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index c5555b0c49..af628897c6 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -89,10 +89,10 @@ class GlobalAccessFeedback : public ProcessedFeedback {
int slot_index() const;
bool immutable() const;
- base::Optional<ObjectRef> GetConstantHint() const;
+ OptionalObjectRef GetConstantHint(JSHeapBroker* broker) const;
private:
- base::Optional<ObjectRef> const cell_or_context_;
+ OptionalObjectRef const cell_or_context_;
int const index_and_immutable_;
};
@@ -129,7 +129,7 @@ class ElementAccessFeedback : public ProcessedFeedback {
// A transition group is a target and a possibly empty set of sources that can
// transition to the target. It is represented as a non-empty vector with the
// target at index 0.
- using TransitionGroup = ZoneVector<Handle<Map>>;
+ using TransitionGroup = ZoneVector<MapRef>;
ZoneVector<TransitionGroup> const& transition_groups() const;
bool HasOnlyStringMaps(JSHeapBroker* broker) const;
@@ -184,7 +184,7 @@ class MegaDOMPropertyAccessFeedback : public ProcessedFeedback {
class CallFeedback : public ProcessedFeedback {
public:
- CallFeedback(base::Optional<HeapObjectRef> target, float frequency,
+ CallFeedback(OptionalHeapObjectRef target, float frequency,
SpeculationMode mode, CallFeedbackContent call_feedback_content,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kCall, slot_kind),
@@ -193,13 +193,13 @@ class CallFeedback : public ProcessedFeedback {
mode_(mode),
content_(call_feedback_content) {}
- base::Optional<HeapObjectRef> target() const { return target_; }
+ OptionalHeapObjectRef target() const { return target_; }
float frequency() const { return frequency_; }
SpeculationMode speculation_mode() const { return mode_; }
CallFeedbackContent call_feedback_content() const { return content_; }
private:
- base::Optional<HeapObjectRef> const target_;
+ OptionalHeapObjectRef const target_;
float const frequency_;
SpeculationMode const mode_;
CallFeedbackContent const content_;
@@ -226,7 +226,7 @@ class SingleValueFeedback : public ProcessedFeedback {
};
class InstanceOfFeedback
- : public SingleValueFeedback<base::Optional<JSObjectRef>,
+ : public SingleValueFeedback<OptionalJSObjectRef,
ProcessedFeedback::kInstanceOf> {
using SingleValueFeedback::SingleValueFeedback;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index db3635df62..050a1926f7 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -85,7 +85,7 @@ void PropertyAccessBuilder::BuildCheckMaps(Node* object, Effect* effect,
ZoneVector<MapRef> const& maps) {
HeapObjectMatcher m(object);
if (m.HasResolvedValue()) {
- MapRef object_map = m.Ref(broker()).map();
+ MapRef object_map = m.Ref(broker()).map(broker());
if (object_map.is_stable()) {
for (MapRef map : maps) {
if (map.equals(object_map)) {
@@ -123,9 +123,9 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Effect* effect,
Node* PropertyAccessBuilder::ResolveHolder(
PropertyAccessInfo const& access_info, Node* lookup_start_object) {
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
if (holder.has_value()) {
- return jsgraph()->Constant(holder.value());
+ return jsgraph()->Constant(holder.value(), broker());
}
return lookup_start_object;
}
@@ -152,8 +152,8 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
DCHECK(access_info.IsDictionaryProtoDataConstant());
InternalIndex index = access_info.dictionary_index();
- base::Optional<ObjectRef> value =
- access_info.holder()->GetOwnDictionaryProperty(index, dependencies());
+ OptionalObjectRef value = access_info.holder()->GetOwnDictionaryProperty(
+ broker(), index, dependencies());
if (!value) return {};
for (MapRef map : access_info.lookup_start_object_maps()) {
@@ -176,7 +176,7 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
map, access_info.name(), value.value(), PropertyKind::kData);
}
- return jsgraph()->Constant(value.value());
+ return jsgraph()->Constant(value.value(), broker());
}
Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
@@ -185,7 +185,7 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
if (!access_info.IsFastDataConstant()) return nullptr;
// First, determine if we have a constant holder to load from.
- base::Optional<JSObjectRef> holder = access_info.holder();
+ OptionalJSObjectRef holder = access_info.holder();
// If {access_info} has a holder, just use it.
if (!holder.has_value()) {
@@ -195,7 +195,7 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
// Let us make sure the actual map of the constant lookup_start_object is
// among the maps in {access_info}.
- MapRef lookup_start_object_map = m.Ref(broker()).map();
+ MapRef lookup_start_object_map = m.Ref(broker()).map(broker());
if (std::find_if(access_info.lookup_start_object_maps().begin(),
access_info.lookup_start_object_maps().end(),
[&](MapRef map) {
@@ -208,10 +208,10 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
holder = m.Ref(broker()).AsJSObject();
}
- base::Optional<ObjectRef> value =
- holder->GetOwnFastDataProperty(access_info.field_representation(),
- access_info.field_index(), dependencies());
- return value.has_value() ? jsgraph()->Constant(*value) : nullptr;
+ OptionalObjectRef value = holder->GetOwnFastDataProperty(
+ broker(), access_info.field_representation(), access_info.field_index(),
+ dependencies());
+ return value.has_value() ? jsgraph()->Constant(*value, broker()) : nullptr;
}
Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
@@ -305,7 +305,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
field_representation == MachineRepresentation::kCompressedPointer) {
// Remember the map of the field value, if its map is stable. This is
// used by the LoadElimination to eliminate map checks on the result.
- base::Optional<MapRef> field_map = access_info.field_map();
+ OptionalMapRef field_map = access_info.field_map();
if (field_map.has_value()) {
if (field_map->is_stable()) {
dependencies()->DependOnStableMap(field_map.value());
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 439514d7b3..6ce25f1752 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -27,9 +27,8 @@ struct FieldAccess;
class PropertyAccessBuilder {
public:
- PropertyAccessBuilder(JSGraph* jsgraph, JSHeapBroker* broker,
- CompilationDependencies* dependencies)
- : jsgraph_(jsgraph), broker_(broker), dependencies_(dependencies) {}
+ PropertyAccessBuilder(JSGraph* jsgraph, JSHeapBroker* broker)
+ : jsgraph_(jsgraph), broker_(broker) {}
// Builds the appropriate string check if the maps are only string
// maps.
@@ -64,7 +63,9 @@ class PropertyAccessBuilder {
private:
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
+ CompilationDependencies* dependencies() const {
+ return broker_->dependencies();
+ }
Graph* graph() const;
Isolate* isolate() const;
CommonOperatorBuilder* common() const;
@@ -84,7 +85,6 @@ class PropertyAccessBuilder {
JSGraph* jsgraph_;
JSHeapBroker* broker_;
- CompilationDependencies* dependencies_;
};
bool HasOnlyStringMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 44a9164637..c62ff7c193 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -876,19 +876,19 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Float32RoundDown().op(), a);
}
Node* Float64RoundDown(Node* a) {
- return AddNode(machine()->Float64RoundDown().op(), a);
+ return AddNode(machine()->Float64RoundDown().placeholder(), a);
}
Node* Float32RoundUp(Node* a) {
return AddNode(machine()->Float32RoundUp().op(), a);
}
Node* Float64RoundUp(Node* a) {
- return AddNode(machine()->Float64RoundUp().op(), a);
+ return AddNode(machine()->Float64RoundUp().placeholder(), a);
}
Node* Float32RoundTruncate(Node* a) {
return AddNode(machine()->Float32RoundTruncate().op(), a);
}
Node* Float64RoundTruncate(Node* a) {
- return AddNode(machine()->Float64RoundTruncate().op(), a);
+ return AddNode(machine()->Float64RoundTruncate().placeholder(), a);
}
Node* Float64RoundTiesAway(Node* a) {
return AddNode(machine()->Float64RoundTiesAway().op(), a);
@@ -897,7 +897,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Float32RoundTiesEven().op(), a);
}
Node* Float64RoundTiesEven(Node* a) {
- return AddNode(machine()->Float64RoundTiesEven().op(), a);
+ return AddNode(machine()->Float64RoundTiesEven().placeholder(), a);
}
Node* Word32ReverseBytes(Node* a) {
return AddNode(machine()->Word32ReverseBytes(), a);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 9cfb03c18c..784665cb94 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -4,6 +4,7 @@
#include "src/compiler/redundancy-elimination.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -11,8 +12,12 @@ namespace v8 {
namespace internal {
namespace compiler {
-RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
- : AdvancedReducer(editor), node_checks_(zone), zone_(zone) {}
+RedundancyElimination::RedundancyElimination(Editor* editor, JSGraph* jsgraph,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ node_checks_(zone),
+ jsgraph_(jsgraph),
+ zone_(zone) {}
RedundancyElimination::~RedundancyElimination() = default;
@@ -133,8 +138,43 @@ RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
namespace {
+struct Subsumption {
+ enum class Kind {
+ kNone,
+ kImplicit,
+ kWithConversion,
+ };
+
+ static Subsumption None() { return Subsumption(Kind::kNone, nullptr); }
+ static Subsumption Implicit() {
+ return Subsumption(Kind::kImplicit, nullptr);
+ }
+ static Subsumption WithConversion(const Operator* conversion_op) {
+ return Subsumption(Kind::kWithConversion, conversion_op);
+ }
+
+ bool IsNone() const { return kind_ == Kind::kNone; }
+ bool IsImplicit() const { return kind_ == Kind::kImplicit; }
+ bool IsWithConversion() const { return kind_ == Kind::kWithConversion; }
+ const Operator* conversion_operator() const {
+ DCHECK(IsWithConversion());
+ return conversion_op_;
+ }
+
+ private:
+ Subsumption(Kind kind, const Operator* conversion_op)
+ : kind_(kind), conversion_op_(conversion_op) {
+ DCHECK_EQ(kind_ == Kind::kWithConversion, conversion_op_ != nullptr);
+ }
+
+ Kind kind_;
+ const Operator* conversion_op_;
+};
+
// Does check {a} subsume check {b}?
-bool CheckSubsumes(Node const* a, Node const* b) {
+Subsumption CheckSubsumes(Node const* a, Node const* b,
+ MachineOperatorBuilder* machine) {
+ Subsumption subsumption = Subsumption::Implicit();
if (a->op() != b->op()) {
if (a->opcode() == IrOpcode::kCheckInternalizedString &&
b->opcode() == IrOpcode::kCheckString) {
@@ -149,14 +189,24 @@ bool CheckSubsumes(Node const* a, Node const* b) {
b->opcode() == IrOpcode::kCheckedTaggedToArrayIndex) {
// CheckedTaggedSignedToInt32(node) implies
// CheckedTaggedToArrayIndex(node)
+ if (machine->Is64()) {
+ // On 64 bit architectures, ArrayIndex is 64 bit.
+ subsumption =
+ Subsumption::WithConversion(machine->ChangeInt32ToInt64());
+ }
} else if (a->opcode() == IrOpcode::kCheckedTaggedToInt32 &&
b->opcode() == IrOpcode::kCheckedTaggedToArrayIndex) {
// CheckedTaggedToInt32(node) implies CheckedTaggedToArrayIndex(node)
+ if (machine->Is64()) {
+ // On 64 bit architectures, ArrayIndex is 64 bit.
+ subsumption =
+ Subsumption::WithConversion(machine->ChangeInt32ToInt64());
+ }
} else if (a->opcode() == IrOpcode::kCheckReceiver &&
b->opcode() == IrOpcode::kCheckReceiverOrNullOrUndefined) {
// CheckReceiver(node) implies CheckReceiverOrNullOrUndefined(node)
} else if (a->opcode() != b->opcode()) {
- return false;
+ return Subsumption::None();
} else {
switch (a->opcode()) {
case IrOpcode::kCheckBounds:
@@ -189,7 +239,7 @@ bool CheckSubsumes(Node const* a, Node const* b) {
const CheckMinusZeroParameters& bp =
CheckMinusZeroParametersOf(b->op());
if (ap.mode() != bp.mode()) {
- return false;
+ return Subsumption::None();
}
break;
}
@@ -203,20 +253,20 @@ bool CheckSubsumes(Node const* a, Node const* b) {
// for Number, in which case {b} will be subsumed no matter what.
if (ap.mode() != bp.mode() &&
ap.mode() != CheckTaggedInputMode::kNumber) {
- return false;
+ return Subsumption::None();
}
break;
}
default:
DCHECK(!IsCheckedWithFeedback(a->op()));
- return false;
+ return Subsumption::None();
}
}
}
for (int i = a->op()->ValueInputCount(); --i >= 0;) {
- if (a->InputAt(i) != b->InputAt(i)) return false;
+ if (a->InputAt(i) != b->InputAt(i)) return Subsumption::None();
}
- return true;
+ return subsumption;
}
bool TypeSubsumes(Node* node, Node* replacement) {
@@ -232,11 +282,19 @@ bool TypeSubsumes(Node* node, Node* replacement) {
} // namespace
-Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
+Node* RedundancyElimination::EffectPathChecks::LookupCheck(
+ Node* node, JSGraph* jsgraph) const {
for (Check const* check = head_; check != nullptr; check = check->next) {
- if (CheckSubsumes(check->node, node) && TypeSubsumes(node, check->node)) {
+ Subsumption subsumption =
+ CheckSubsumes(check->node, node, jsgraph->machine());
+ if (!subsumption.IsNone() && TypeSubsumes(node, check->node)) {
DCHECK(!check->node->IsDead());
- return check->node;
+ Node* result = check->node;
+ if (subsumption.IsWithConversion()) {
+ result = jsgraph->graph()->NewNode(subsumption.conversion_operator(),
+ result);
+ }
+ return result;
}
}
return nullptr;
@@ -276,7 +334,7 @@ Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
// because we will have to recompute anyway once we compute the predecessor.
if (checks == nullptr) return NoChange();
// See if we have another check that dominates us.
- if (Node* check = checks->LookupCheck(node)) {
+ if (Node* check = checks->LookupCheck(node, jsgraph_)) {
ReplaceWithValue(node, check);
return Replace(check);
}
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index cabdb1b41c..ffa02df7a2 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -6,14 +6,17 @@
#define V8_COMPILER_REDUNDANCY_ELIMINATION_H_
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/machine-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
+class JSGraph;
+
class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
public:
- RedundancyElimination(Editor* editor, Zone* zone);
+ RedundancyElimination(Editor* editor, JSGraph* jsgraph, Zone* zone);
~RedundancyElimination() final;
RedundancyElimination(const RedundancyElimination&) = delete;
RedundancyElimination& operator=(const RedundancyElimination&) = delete;
@@ -37,7 +40,7 @@ class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
void Merge(EffectPathChecks const* that);
EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
- Node* LookupCheck(Node* node) const;
+ Node* LookupCheck(Node* node, JSGraph* jsgraph) const;
Node* LookupBoundsCheckFor(Node* node) const;
private:
@@ -74,6 +77,7 @@ class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
Zone* zone() const { return zone_; }
PathChecksForEffectNodes node_checks_;
+ JSGraph* jsgraph_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 3eff911352..affa92024b 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -1044,12 +1044,11 @@ Node* RepresentationChanger::GetBitRepresentationFor(
HeapObjectMatcher m(node);
if (m.Is(factory()->false_value())) {
return InsertTypeOverrideForVerifier(
- Type::Constant(broker_, factory()->false_value(),
- jsgraph()->zone()),
+ Type::Constant(broker_, broker_->false_value(), jsgraph()->zone()),
jsgraph()->Int32Constant(0));
} else if (m.Is(factory()->true_value())) {
return InsertTypeOverrideForVerifier(
- Type::Constant(broker_, factory()->true_value(), jsgraph()->zone()),
+ Type::Constant(broker_, broker_->true_value(), jsgraph()->zone()),
jsgraph()->Int32Constant(1));
}
break;
@@ -1197,7 +1196,9 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
- if (output_type.Is(cache_->kDoubleRepresentableInt64)) {
+ if (output_type.Is(cache_->kDoubleRepresentableInt64) ||
+ (output_type.Is(cache_->kDoubleRepresentableInt64OrMinusZero) &&
+ use_info.truncation().IdentifiesZeroAndMinusZero())) {
// float32 -> float64 -> int64
node = InsertChangeFloat32ToFloat64(node);
op = machine()->ChangeFloat64ToInt64();
@@ -1219,7 +1220,9 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat64) {
- if (output_type.Is(cache_->kDoubleRepresentableInt64)) {
+ if (output_type.Is(cache_->kDoubleRepresentableInt64) ||
+ (output_type.Is(cache_->kDoubleRepresentableInt64OrMinusZero) &&
+ use_info.truncation().IdentifiesZeroAndMinusZero())) {
op = machine()->ChangeFloat64ToInt64();
} else if (output_type.Is(cache_->kDoubleRepresentableUint64)) {
op = machine()->ChangeFloat64ToUint64();
@@ -1250,7 +1253,9 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
use_node, use_info);
op = simplified()->TruncateBigIntToWord64();
} else if (CanBeTaggedPointer(output_rep)) {
- if (output_type.Is(cache_->kDoubleRepresentableInt64)) {
+ if (output_type.Is(cache_->kDoubleRepresentableInt64) ||
+ (output_type.Is(cache_->kDoubleRepresentableInt64OrMinusZero) &&
+ use_info.truncation().IdentifiesZeroAndMinusZero())) {
op = simplified()->ChangeTaggedToInt64();
} else if (use_info.type_check() == TypeCheckKind::kSigned64) {
op = simplified()->CheckedTaggedToInt64(
@@ -1371,6 +1376,18 @@ const Operator* RepresentationChanger::Int64OperatorFor(
return machine()->Int64Sub();
case IrOpcode::kSpeculativeBigIntMultiply:
return machine()->Int64Mul();
+ case IrOpcode::kSpeculativeBigIntBitwiseAnd:
+ return machine()->Word64And();
+ case IrOpcode::kSpeculativeBigIntBitwiseOr:
+ return machine()->Word64Or();
+ case IrOpcode::kSpeculativeBigIntBitwiseXor:
+ return machine()->Word64Xor();
+ case IrOpcode::kSpeculativeBigIntEqual:
+ return machine()->Word64Equal();
+ case IrOpcode::kSpeculativeBigIntLessThan:
+ return machine()->Int64LessThan();
+ case IrOpcode::kSpeculativeBigIntLessThanOrEqual:
+ return machine()->Int64LessThanOrEqual();
default:
UNREACHABLE();
}
@@ -1407,6 +1424,22 @@ const Operator* RepresentationChanger::BigIntOperatorFor(
return simplified()->BigIntDivide();
case IrOpcode::kSpeculativeBigIntModulus:
return simplified()->BigIntModulus();
+ case IrOpcode::kSpeculativeBigIntBitwiseAnd:
+ return simplified()->BigIntBitwiseAnd();
+ case IrOpcode::kSpeculativeBigIntBitwiseOr:
+ return simplified()->BigIntBitwiseOr();
+ case IrOpcode::kSpeculativeBigIntBitwiseXor:
+ return simplified()->BigIntBitwiseXor();
+ case IrOpcode::kSpeculativeBigIntShiftLeft:
+ return simplified()->BigIntShiftLeft();
+ case IrOpcode::kSpeculativeBigIntShiftRight:
+ return simplified()->BigIntShiftRight();
+ case IrOpcode::kSpeculativeBigIntEqual:
+ return simplified()->BigIntEqual();
+ case IrOpcode::kSpeculativeBigIntLessThan:
+ return simplified()->BigIntLessThan();
+ case IrOpcode::kSpeculativeBigIntLessThanOrEqual:
+ return simplified()->BigIntLessThanOrEqual();
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/revectorizer.cc b/deps/v8/src/compiler/revectorizer.cc
new file mode 100644
index 0000000000..0a1c4dd4ff
--- /dev/null
+++ b/deps/v8/src/compiler/revectorizer.cc
@@ -0,0 +1,964 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/revectorizer.h"
+
+#include "src/base/cpu.h"
+#include "src/base/logging.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/verifier.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(...) \
+ do { \
+ if (v8_flags.trace_wasm_revectorize) { \
+ PrintF("Revec: "); \
+ PrintF(__VA_ARGS__); \
+ } \
+ } while (false)
+
+namespace {
+
+// Currently, only Load/ProtectedLoad/LoadTransfrom are supported.
+// TODO(jiepan): add support for UnalignedLoad, LoadLane, LoadTrapOnNull
+bool IsSupportedLoad(const Node* node) {
+ if (node->opcode() == IrOpcode::kProtectedLoad ||
+ node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kLoadTransform) {
+ return true;
+ }
+ return false;
+}
+
+#ifdef DEBUG
+bool IsSupportedLoad(const ZoneVector<Node*>& node_group) {
+ for (auto node : node_group) {
+ if (!IsSupportedLoad(node)) return false;
+ }
+ return true;
+}
+#endif
+
+int64_t GetConstantValue(const Node* node) {
+ int64_t value = -1;
+ if (node->opcode() == IrOpcode::kInt64Constant) {
+ value = OpParameter<int64_t>(node->op());
+ }
+ return value;
+}
+
+int64_t GetMemoryOffsetValue(const Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kProtectedLoad ||
+ node->opcode() == IrOpcode::kStore ||
+ node->opcode() == IrOpcode::kProtectedStore);
+
+ Node* offset = node->InputAt(0);
+ if (offset->opcode() == IrOpcode::kLoadFromObject ||
+ offset->opcode() == IrOpcode::kLoad) {
+ return 0;
+ }
+
+ int64_t offset_value = -1;
+ if (offset->opcode() == IrOpcode::kInt64Add) {
+ if (NodeProperties::IsConstant(offset->InputAt(0))) {
+ offset_value = GetConstantValue(offset->InputAt(0));
+ } else if (NodeProperties::IsConstant(offset->InputAt(1))) {
+ offset_value = GetConstantValue(offset->InputAt(1));
+ }
+ }
+ return offset_value;
+}
+
+// We want to combine load/store nodes with continuous memory address,
+// for load/store node, input(0) is memory_start + offset, input(1) is index,
+// we currently use index as the address of the node, nodes with same index and
+// continuous offset can be combined together.
+Node* GetNodeAddress(const Node* node) {
+ Node* address = node->InputAt(1);
+ // The index is changed to Uint64 for memory32
+ if (address->opcode() == IrOpcode::kChangeUint32ToUint64) {
+ address = address->InputAt(0);
+ }
+ return address;
+}
+
+bool IsContinuousAccess(const ZoneVector<Node*>& node_group) {
+ DCHECK_GT(node_group.size(), 0);
+ int64_t previous_offset = GetMemoryOffsetValue(node_group[0]);
+ for (size_t i = 1; i < node_group.size(); ++i) {
+ int64_t current_offset = GetMemoryOffsetValue(node_group[i]);
+ int64_t diff = current_offset - previous_offset;
+ if (diff != kSimd128Size) {
+ TRACE("Non-continuous store!");
+ return false;
+ }
+ previous_offset = current_offset;
+ }
+ return true;
+}
+
+// Returns true if all of the nodes in node_group are constants.
+bool AllConstant(const ZoneVector<Node*>& node_group) {
+ for (Node* node : node_group) {
+ if (!NodeProperties::IsConstant(node)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if all the addresses of the nodes in node_group are identical.
+bool AllSameAddress(const ZoneVector<Node*>& nodes) {
+ Node* address = GetNodeAddress(nodes[0]);
+ for (size_t i = 1; i < nodes.size(); i++) {
+ if (GetNodeAddress(nodes[i]) != address) {
+ TRACE("Diff address #%d,#%d!\n", address->id(),
+ GetNodeAddress(nodes[i])->id());
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if all of the nodes in node_group are identical.
+// Splat opcode in WASM SIMD is used to create vector with identical lanes.
+template <typename T>
+bool IsSplat(const T& node_group) {
+ for (typename T::size_type i = 1; i < node_group.size(); ++i) {
+ if (node_group[i] != node_group[0]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if all of the nodes in node_group have the same type.
+bool AllSameOperator(const ZoneVector<Node*>& node_group) {
+ auto op = node_group[0]->op();
+ for (ZoneVector<Node*>::size_type i = 1; i < node_group.size(); i++) {
+ if (node_group[i]->op() != op) {
+ return false;
+ }
+ }
+ return true;
+}
+
+class EffectChainIterator {
+ public:
+ explicit EffectChainIterator(Node* node) : node_(node) {}
+
+ Node* Advance() {
+ prev_ = node_;
+ node_ = EffectInputOf(node_);
+ return node_;
+ }
+
+ Node* Prev() { return prev_; }
+
+ Node* Next() { return EffectInputOf(node_); }
+
+ void Set(Node* node) {
+ DCHECK_NOT_NULL(prev_);
+ node_ = node;
+ prev_ = nullptr;
+ }
+
+ Node* operator*() { return node_; }
+
+ private:
+ Node* EffectInputOf(Node* node) {
+ DCHECK(IsSupportedLoad(node));
+ return node->InputAt(2);
+ }
+
+ Node* node_;
+ Node* prev_;
+};
+
+void ReplaceEffectInput(Node* target, Node* value) {
+ DCHECK(IsSupportedLoad(target));
+ DCHECK(IsSupportedLoad(value));
+ target->ReplaceInput(2, value);
+}
+
+void Swap(EffectChainIterator& dest, EffectChainIterator& src) {
+ DCHECK_NE(dest.Prev(), nullptr);
+ DCHECK_NE(src.Prev(), nullptr);
+ ReplaceEffectInput(dest.Prev(), *src);
+ ReplaceEffectInput(src.Prev(), *dest);
+ Node* temp = dest.Next();
+ ReplaceEffectInput(*dest, src.Next());
+ ReplaceEffectInput(*src, temp);
+
+ temp = *dest;
+ dest.Set(*src);
+ src.Set(temp);
+}
+
+} // anonymous namespace
+
+// Sort load/store node by offset
+bool MemoryOffsetComparer::operator()(const Node* lhs, const Node* rhs) const {
+ return GetMemoryOffsetValue(lhs) < GetMemoryOffsetValue(rhs);
+}
+
+void PackNode::Print() const {
+ if (revectorized_node_ != nullptr) {
+ TRACE("0x%p #%d:%s(%d %d, %s)\n", this, revectorized_node_->id(),
+ revectorized_node_->op()->mnemonic(), nodes_[0]->id(),
+ nodes_[1]->id(), nodes_[0]->op()->mnemonic());
+ } else {
+ TRACE("0x%p null(%d %d, %s)\n", this, nodes_[0]->id(), nodes_[1]->id(),
+ nodes_[0]->op()->mnemonic());
+ }
+}
+
+bool SLPTree::CanBePacked(const ZoneVector<Node*>& node_group) {
+ DCHECK_EQ(node_group.size(), 2);
+ if (!SameBasicBlock(node_group[0], node_group[1])) {
+ TRACE("%s(#%d, #%d) not in same BB!\n", node_group[0]->op()->mnemonic(),
+ node_group[0]->id(), node_group[1]->id());
+ return false;
+ }
+ if (!AllSameOperator(node_group)) {
+ TRACE("%s(#%d, #%d) have different operator!\n",
+ node_group[0]->op()->mnemonic(), node_group[0]->id(),
+ node_group[1]->id());
+ return false;
+ }
+ // TODO(jiepan): add support for Constant
+ if (AllConstant(node_group)) {
+ TRACE("%s(#%d, #%d) are constantant, not supported yet!\n",
+ node_group[0]->op()->mnemonic(), node_group[0]->id(),
+ node_group[1]->id());
+ return false;
+ }
+
+ // Only Support simd128 operators or common operators with simd128
+ // MachineRepresentation. The MachineRepresentation of root had been checked,
+ // and the leaf node will be checked later. here we omit the check of
+ // MachineRepresentation, only check the opcode itself.
+ IrOpcode::Value op = node_group[0]->opcode();
+ if (NodeProperties::IsSimd128Operation(node_group[0]) ||
+ (op == IrOpcode::kStore) || (op == IrOpcode::kProtectedStore) ||
+ (op == IrOpcode::kLoad) || (op == IrOpcode::kProtectedLoad) ||
+ (op == IrOpcode::kPhi) || (op == IrOpcode::kLoopExitValue) ||
+ (op == IrOpcode::kExtractF128)) {
+ return true;
+ }
+ return false;
+}
+
+PackNode* SLPTree::NewPackNode(const ZoneVector<Node*>& node_group) {
+ TRACE("PackNode %s(#%d:, #%d)\n", node_group[0]->op()->mnemonic(),
+ node_group[0]->id(), node_group[1]->id());
+ PackNode* pnode = zone_->New<PackNode>(zone_, node_group);
+ for (Node* node : node_group) {
+ node_to_packnode_[node] = pnode;
+ }
+ return pnode;
+}
+
+PackNode* SLPTree::NewPackNodeAndRecurs(const ZoneVector<Node*>& node_group,
+ int start_index, int count,
+ unsigned recursion_depth) {
+ PackNode* pnode = NewPackNode(node_group);
+ for (int i = start_index; i < start_index + count; ++i) {
+ ZoneVector<Node*> operands(zone_);
+ // Prepare the operand vector.
+ for (size_t j = 0; j < node_group.size(); j++) {
+ Node* node = node_group[j];
+ operands.push_back(NodeProperties::GetValueInput(node, i));
+ }
+
+ PackNode* child = BuildTreeRec(operands, recursion_depth + 1);
+ if (child) {
+ pnode->SetOperand(i, child);
+ } else {
+ return nullptr;
+ }
+ }
+ return pnode;
+}
+
+PackNode* SLPTree::GetPackNode(Node* node) {
+ auto I = node_to_packnode_.find(node);
+ if (I != node_to_packnode_.end()) {
+ return I->second;
+ }
+ return nullptr;
+}
+
+void SLPTree::PushStack(const ZoneVector<Node*>& node_group) {
+ TRACE("Stack Push (%d %s, %d %s)\n", node_group[0]->id(),
+ node_group[0]->op()->mnemonic(), node_group[1]->id(),
+ node_group[1]->op()->mnemonic());
+ for (auto node : node_group) {
+ on_stack_.insert(node);
+ }
+ stack_.push({node_group});
+}
+
+void SLPTree::PopStack() {
+ const ZoneVector<Node*>& node_group = stack_.top();
+ DCHECK_EQ(node_group.size(), 2);
+ TRACE("Stack Pop (%d %s, %d %s)\n", node_group[0]->id(),
+ node_group[0]->op()->mnemonic(), node_group[1]->id(),
+ node_group[1]->op()->mnemonic());
+ for (auto node : node_group) {
+ on_stack_.erase(node);
+ }
+ stack_.pop();
+}
+
+bool SLPTree::OnStack(Node* node) {
+ return on_stack_.find(node) != on_stack_.end();
+}
+
+bool SLPTree::AllOnStack(const ZoneVector<Node*>& node_group) {
+ for (auto node : node_group) {
+ if (OnStack(node)) return true;
+ }
+ return false;
+}
+
+bool SLPTree::StackTopIsPhi() {
+ const ZoneVector<Node*>& node_group = stack_.top();
+ DCHECK_EQ(node_group.size(), 2);
+ return NodeProperties::IsPhi(node_group[0]);
+}
+
+void SLPTree::ClearStack() {
+ stack_ = ZoneStack<ZoneVector<Node*>>(zone_);
+ on_stack_.clear();
+}
+
+// Try to connect the nodes in |loads| by effect edges. This allows us to build
+// |PackNode| without breaking effect dependency:
+// Before: [Load1]->...->[Load2]->...->[Load3]->...->[Load4]
+// After: [Load1]->[Load2]->[Load3]->[Load4]
+void SLPTree::TryReduceLoadChain(const ZoneVector<Node*>& loads) {
+ ZoneSet<Node*> visited(zone());
+ for (Node* load : loads) {
+ if (visited.find(load) != visited.end()) continue;
+ visited.insert(load);
+
+ EffectChainIterator dest(load);
+ EffectChainIterator it(dest.Next());
+ while (SameBasicBlock(*it, load) && IsSupportedLoad(*it)) {
+ if (std::find(loads.begin(), loads.end(), *it) != loads.end()) {
+ visited.insert(*it);
+ dest.Advance();
+ if (*dest != *it) {
+ Swap(dest, it);
+ }
+ }
+ it.Advance();
+ }
+ }
+}
+
+bool SLPTree::IsSideEffectFreeLoad(const ZoneVector<Node*>& node_group) {
+ DCHECK(IsSupportedLoad(node_group));
+ DCHECK_EQ(node_group.size(), 2);
+ TRACE("Enter IsSideEffectFreeLoad (%d %s, %d %s)\n", node_group[0]->id(),
+ node_group[0]->op()->mnemonic(), node_group[1]->id(),
+ node_group[1]->op()->mnemonic());
+
+ TryReduceLoadChain(node_group);
+
+ std::stack<Node*> to_visit;
+ std::unordered_set<Node*> visited;
+ // Visit all the inputs (except for control inputs) of Loads.
+ for (size_t i = 0, e = node_group.size(); i < e; i++) {
+ Node* load = node_group[i];
+ for (int j = 0; j < NodeProperties::FirstControlIndex(load); ++j) {
+ Node* input = load->InputAt(j);
+ if (std::find(node_group.begin(), node_group.end(), input) ==
+ node_group.end()) {
+ to_visit.push(input);
+ }
+ }
+ }
+
+ // Check the inputs of Loads and find if they are connected to existing nodes
+ // in SLPTree. If there is, then there will be side effect and we can not
+ // merge such Loads.
+ while (!to_visit.empty()) {
+ Node* input = to_visit.top();
+ to_visit.pop();
+ TRACE("IsSideEffectFreeLoad visit (%d %s)\n", input->id(),
+ input->op()->mnemonic());
+ if (visited.find(input) == visited.end()) {
+ visited.insert(input);
+
+ if (OnStack(input)) {
+ TRACE("Has internal dependency because (%d %s) on stack\n", input->id(),
+ input->op()->mnemonic());
+ return false;
+ }
+
+ // If the input is not in same basic block as Loads, it must not be in
+ // SLPTree. Otherwise recursively visit all input's edges and find if they
+ // are connected to SLPTree.
+ if (SameBasicBlock(input, node_group[0])) {
+ for (int i = 0; i < NodeProperties::FirstControlIndex(input); ++i) {
+ to_visit.push(input->InputAt(i));
+ }
+ }
+ }
+ }
+ return true;
+}
+
+PackNode* SLPTree::BuildTree(const ZoneVector<Node*>& roots) {
+ TRACE("Enter %s\n", __func__);
+
+ DeleteTree();
+
+ root_ = BuildTreeRec(roots, 0);
+ return root_;
+}
+
+PackNode* SLPTree::BuildTreeRec(const ZoneVector<Node*>& node_group,
+ unsigned recursion_depth) {
+ TRACE("Enter %s\n", __func__);
+ DCHECK_EQ(node_group.size(), 2);
+
+ Node* node0 = node_group[0];
+ Node* node1 = node_group[1];
+
+ if (recursion_depth == RecursionMaxDepth) {
+ TRACE("Failed due to max recursion depth!\n");
+ return nullptr;
+ }
+
+ if (AllOnStack(node_group)) {
+ if (!StackTopIsPhi()) {
+ TRACE("Failed due to (%d %s, %d %s) on stack!\n", node0->id(),
+ node0->op()->mnemonic(), node1->id(), node1->op()->mnemonic());
+ return nullptr;
+ }
+ }
+ PushStack(node_group);
+
+ if (!CanBePacked(node_group)) {
+ return nullptr;
+ }
+
+ DCHECK(AllConstant(node_group) || AllSameOperator(node_group));
+
+ // Check if this is a duplicate of another entry.
+ for (Node* node : node_group) {
+ if (PackNode* p = GetPackNode(node)) {
+ if (!p->IsSame(node_group)) {
+ // TODO(jiepan): Gathering due to partial overlap
+ TRACE("Failed due to partial overlap at #%d,%s!\n", node->id(),
+ node->op()->mnemonic());
+ return nullptr;
+ }
+
+ PopStack();
+ TRACE("Perfect diamond merge at #%d,%s\n", node->id(),
+ node->op()->mnemonic());
+ return p;
+ }
+ }
+
+ if (node0->opcode() == IrOpcode::kExtractF128) {
+ Node* source = node0->InputAt(0);
+ TRACE("Extract leaf node from #%d,%s!\n", source->id(),
+ source->op()->mnemonic());
+ // For 256 only, check whether they are from the same source
+ if (node0->InputAt(0) == node1->InputAt(0) &&
+ (node0->InputAt(0)->opcode() == IrOpcode::kLoadTransform
+ ? node0 == node1
+ : OpParameter<int32_t>(node0->op()) + 1 ==
+ OpParameter<int32_t>(node1->op()))) {
+ TRACE("Added a pair of Extract.\n");
+ PackNode* pnode = NewPackNode(node_group);
+ PopStack();
+ return pnode;
+ }
+ TRACE("Failed due to ExtractF128!\n");
+ return nullptr;
+ }
+
+ if (node0->opcode() == IrOpcode::kProtectedLoad ||
+ node0->opcode() == IrOpcode::kLoadTransform) {
+ TRACE("Load leaf node\n");
+ if (!AllSameAddress(node_group)) {
+ TRACE("Failed due to different load addr!\n");
+ return nullptr;
+ }
+ if (node0->opcode() == IrOpcode::kProtectedLoad) {
+ MachineRepresentation rep =
+ LoadRepresentationOf(node0->op()).representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ return nullptr;
+ }
+ // Sort loads by offset
+ ZoneVector<Node*> sorted_node_group(node_group.size(), zone_);
+ std::partial_sort_copy(node_group.begin(), node_group.end(),
+ sorted_node_group.begin(), sorted_node_group.end(),
+ MemoryOffsetComparer());
+ if (!IsContinuousAccess(sorted_node_group)) {
+ TRACE("Failed due to non-continuous load!\n");
+ return nullptr;
+ }
+ }
+
+ if (node0->opcode() == IrOpcode::kLoadTransform) {
+ if (!IsSplat(node_group)) {
+ TRACE("LoadTransform Failed due to IsSplat!\n");
+ return nullptr;
+ }
+ LoadTransformParameters params = LoadTransformParametersOf(node0->op());
+ // TODO(jiepan): Support more LoadTransformation types
+ if (params.transformation != LoadTransformation::kS128Load32Splat &&
+ params.transformation != LoadTransformation::kS128Load64Splat) {
+ TRACE("LoadTransform failed due to unsupported type #%d!\n",
+ node0->id());
+ return nullptr;
+ }
+ }
+
+ if (!IsSideEffectFreeLoad(node_group)) {
+ TRACE("Failed due to dependency check\n");
+ return nullptr;
+ }
+ PackNode* p = NewPackNode(node_group);
+ PopStack();
+ return p;
+ }
+
+ int value_in_count = node0->op()->ValueInputCount();
+ switch (node0->opcode()) {
+ case IrOpcode::kPhi: {
+ TRACE("Added a vector of PHI nodes.\n");
+ MachineRepresentation rep = PhiRepresentationOf(node0->op());
+ if (rep != MachineRepresentation::kSimd128) {
+ return nullptr;
+ }
+ PackNode* pnode =
+ NewPackNodeAndRecurs(node_group, 0, value_in_count, recursion_depth);
+ PopStack();
+ return pnode;
+ }
+ case IrOpcode::kLoopExitValue: {
+ MachineRepresentation rep = LoopExitValueRepresentationOf(node0->op());
+ if (rep != MachineRepresentation::kSimd128) {
+ return nullptr;
+ }
+ PackNode* pnode =
+ NewPackNodeAndRecurs(node_group, 0, value_in_count, recursion_depth);
+ PopStack();
+ return pnode;
+ }
+ case IrOpcode::kF32x4Add:
+ case IrOpcode::kF32x4Mul: {
+ TRACE("Added a vector of un/bin/ter op.\n");
+ PackNode* pnode =
+ NewPackNodeAndRecurs(node_group, 0, value_in_count, recursion_depth);
+ PopStack();
+ return pnode;
+ }
+
+ // TODO(jiepan): UnalignedStore, StoreTrapOnNull.
+ case IrOpcode::kStore:
+ case IrOpcode::kProtectedStore: {
+ TRACE("Added a vector of stores.\n");
+ if (!AllSameAddress(node_group)) {
+ TRACE("Failed due to different store addr!\n");
+ return nullptr;
+ }
+ PackNode* pnode = NewPackNodeAndRecurs(node_group, 2, 1, recursion_depth);
+ PopStack();
+ return pnode;
+ }
+ default:
+ TRACE("Default branch #%d:%s\n", node0->id(), node0->op()->mnemonic());
+ break;
+ }
+ return nullptr;
+}
+
+void SLPTree::DeleteTree() {
+ ClearStack();
+ node_to_packnode_.clear();
+}
+
+void SLPTree::Print(const char* info) {
+ TRACE("%s, Packed node:\n", info);
+ if (!v8_flags.trace_wasm_revectorize) {
+ return;
+ }
+
+ ForEach([](PackNode const* pnode) { pnode->Print(); });
+}
+
+template <typename FunctionType>
+void SLPTree::ForEach(FunctionType callback) {
+ std::unordered_set<PackNode const*> visited;
+
+ for (auto& entry : node_to_packnode_) {
+ PackNode const* pnode = entry.second;
+ if (!pnode || visited.find(pnode) != visited.end()) {
+ continue;
+ }
+ visited.insert(pnode);
+
+ callback(pnode);
+ }
+}
+
+//////////////////////////////////////////////////////
+bool Revectorizer::DecideVectorize() {
+ TRACE("Enter %s\n", __func__);
+
+ int save = 0, cost = 0;
+ slp_tree_->ForEach([&](PackNode const* pnode) {
+ const ZoneVector<Node*>& nodes = pnode->Nodes();
+ IrOpcode::Value op = nodes[0]->opcode();
+
+ // Skip LoopExit as auxiliary nodes are not issued in generated code.
+ // Skip Extract128 as we will reuse its revectorized input and no additional
+ // extract nodes will be generated.
+ if (op == IrOpcode::kLoopExitValue || op == IrOpcode::kExtractF128) {
+ return;
+ }
+ // Splat nodes will not cause a saving as it simply extends itself.
+ if (!IsSplat(nodes)) {
+ save++;
+ }
+
+ for (size_t i = 0; i < nodes.size(); i++) {
+ if (i > 0 && nodes[i] == nodes[0]) continue;
+
+ for (auto edge : nodes[i]->use_edges()) {
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ Node* useNode = edge.from();
+ if (!GetPackNode(useNode) && !(useNode->uses().empty()) &&
+ useNode->opcode() != IrOpcode::kLoopExitValue) {
+ TRACE("External use edge: (%d:%s) -> (%d:%s)\n", useNode->id(),
+ useNode->op()->mnemonic(), nodes[i]->id(),
+ nodes[i]->op()->mnemonic());
+ cost++;
+
+ // We only need one Extract node and all other uses can share.
+ break;
+ }
+ }
+ }
+ });
+
+ TRACE("Save: %d, cost: %d\n", save, cost);
+ return save > cost;
+}
+
+void Revectorizer::SetEffectInput(PackNode* pnode, int index, Node*& input) {
+ const ZoneVector<Node*>& nodes = pnode->Nodes();
+
+ // We assumed there's no effect edge to the 3rd node inbetween.
+ DCHECK(nodes[0] == nodes[1] ||
+ NodeProperties::GetEffectInput(nodes[0]) == nodes[1] ||
+ NodeProperties::GetEffectInput(nodes[1]) == nodes[0]);
+
+ // Scanning till find the other effect outside pnode.
+ for (size_t i = 0; i < nodes.size(); i++) {
+ Node* node128 = nodes[i];
+ PackNode* effect = GetPackNode(node128->InputAt(index));
+ if (effect == pnode) continue;
+ if (effect)
+ pnode->SetOperand(index, effect);
+ else
+ input = node128->InputAt(index);
+ break;
+ }
+}
+
+void Revectorizer::SetMemoryOpInputs(base::SmallVector<Node*, 2>& inputs,
+ PackNode* pnode, int effect_index) {
+ Node* node = pnode->Nodes()[0];
+ // Keep the addressing inputs
+ inputs[0] = node->InputAt(0);
+ inputs[1] = node->InputAt(1);
+ // Set the effect input and the value input will be set later
+ SetEffectInput(pnode, effect_index, inputs[effect_index]);
+ // Set the control input
+ inputs[effect_index + 1] = node->InputAt(effect_index + 1);
+}
+
+Node* Revectorizer::VectorizeTree(PackNode* pnode) {
+ TRACE("Enter %s with PackNode\n", __func__);
+
+ Node* node0 = pnode->Nodes()[0];
+ if (pnode->RevectorizedNode()) {
+ TRACE("Diamond merged for #%d:%s\n", node0->id(), node0->op()->mnemonic());
+ return pnode->RevectorizedNode();
+ }
+
+ int input_count = node0->InputCount();
+ TRACE("Vectorize #%d:%s, input count: %d\n", node0->id(),
+ node0->op()->mnemonic(), input_count);
+
+ IrOpcode::Value op = node0->opcode();
+ const Operator* new_op = nullptr;
+ Node* dead = mcgraph()->Dead();
+ base::SmallVector<Node*, 2> inputs(input_count);
+ for (int i = 0; i < input_count; i++) inputs[i] = dead;
+
+ switch (op) {
+ case IrOpcode::kPhi: {
+ DCHECK_EQ(PhiRepresentationOf(node0->op()),
+ MachineRepresentation::kSimd128);
+ new_op = mcgraph_->common()->Phi(MachineRepresentation::kSimd256,
+ input_count - 1);
+ inputs[input_count - 1] = NodeProperties::GetControlInput(node0);
+ break;
+ }
+ case IrOpcode::kLoopExitValue: {
+ DCHECK_EQ(LoopExitValueRepresentationOf(node0->op()),
+ MachineRepresentation::kSimd128);
+ new_op =
+ mcgraph_->common()->LoopExitValue(MachineRepresentation::kSimd256);
+ inputs[input_count - 1] = NodeProperties::GetControlInput(node0);
+ break;
+ }
+ case IrOpcode::kF32x4Add:
+ new_op = mcgraph_->machine()->F32x8Add();
+ break;
+ case IrOpcode::kF32x4Mul:
+ new_op = mcgraph_->machine()->F32x8Mul();
+ break;
+ case IrOpcode::kProtectedLoad: {
+ DCHECK_EQ(LoadRepresentationOf(node0->op()).representation(),
+ MachineRepresentation::kSimd128);
+ new_op = mcgraph_->machine()->ProtectedLoad(MachineType::Simd256());
+ SetMemoryOpInputs(inputs, pnode, 2);
+ break;
+ }
+ case IrOpcode::kLoad: {
+ DCHECK_EQ(LoadRepresentationOf(node0->op()).representation(),
+ MachineRepresentation::kSimd128);
+ new_op = mcgraph_->machine()->Load(MachineType::Simd256());
+ SetMemoryOpInputs(inputs, pnode, 2);
+ break;
+ }
+ case IrOpcode::kProtectedStore: {
+ DCHECK_EQ(StoreRepresentationOf(node0->op()).representation(),
+ MachineRepresentation::kSimd128);
+ new_op =
+ mcgraph_->machine()->ProtectedStore(MachineRepresentation::kSimd256);
+ SetMemoryOpInputs(inputs, pnode, 3);
+ break;
+ }
+ case IrOpcode::kStore: {
+ DCHECK_EQ(StoreRepresentationOf(node0->op()).representation(),
+ MachineRepresentation::kSimd128);
+ WriteBarrierKind write_barrier_kind =
+ StoreRepresentationOf(node0->op()).write_barrier_kind();
+ new_op = mcgraph_->machine()->Store(StoreRepresentation(
+ MachineRepresentation::kSimd256, write_barrier_kind));
+ SetMemoryOpInputs(inputs, pnode, 3);
+ break;
+ }
+ case IrOpcode::kLoadTransform: {
+ LoadTransformParameters params = LoadTransformParametersOf(node0->op());
+ if (params.transformation == LoadTransformation::kS128Load32Splat) {
+ new_op = mcgraph_->machine()->LoadTransform(
+ params.kind, LoadTransformation::kS256Load32Splat);
+ SetMemoryOpInputs(inputs, pnode, 2);
+ } else if (params.transformation ==
+ LoadTransformation::kS128Load64Splat) {
+ new_op = mcgraph_->machine()->LoadTransform(
+ params.kind, LoadTransformation::kS256Load64Splat);
+ SetMemoryOpInputs(inputs, pnode, 2);
+ } else {
+ TRACE("Unsupported #%d:%s!\n", node0->id(), node0->op()->mnemonic());
+ }
+ break;
+ }
+ case IrOpcode::kExtractF128: {
+ pnode->SetRevectorizedNode(node0->InputAt(0));
+ // The extract uses other than its parent don't need to change.
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ DCHECK(pnode->RevectorizedNode() || new_op);
+ if (new_op != nullptr) {
+ Node* new_node =
+ graph()->NewNode(new_op, input_count, inputs.begin(), true);
+ pnode->SetRevectorizedNode(new_node);
+ for (int i = 0; i < input_count; i++) {
+ if (inputs[i] == dead) {
+ new_node->ReplaceInput(i, VectorizeTree(pnode->GetOperand(i)));
+ }
+ }
+
+ // Extract Uses
+ const ZoneVector<Node*>& nodes = pnode->Nodes();
+ for (size_t i = 0; i < nodes.size(); i++) {
+ if (i > 0 && nodes[i] == nodes[i - 1]) continue;
+ Node* input_128 = nullptr;
+ for (auto edge : nodes[i]->use_edges()) {
+ Node* useNode = edge.from();
+ if (!GetPackNode(useNode)) {
+ if (NodeProperties::IsValueEdge(edge)) {
+ // Extract use
+ TRACE("Replace Value Edge from %d:%s, to %d:%s\n", useNode->id(),
+ useNode->op()->mnemonic(), edge.to()->id(),
+ edge.to()->op()->mnemonic());
+
+ if (!input_128) {
+ TRACE("Create ExtractF128(%lu) node from #%d\n", i,
+ new_node->id());
+ input_128 = graph()->NewNode(
+ mcgraph()->machine()->ExtractF128(int32_t(i)), new_node);
+ }
+ edge.UpdateTo(input_128);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ TRACE("Replace Effect Edge from %d:%s, to %d:%s\n", useNode->id(),
+ useNode->op()->mnemonic(), edge.to()->id(),
+ edge.to()->op()->mnemonic());
+
+ edge.UpdateTo(new_node);
+ }
+ }
+ }
+ if (nodes[i]->uses().empty()) nodes[i]->Kill();
+ }
+ }
+
+ return pnode->RevectorizedNode();
+}
+
+void Revectorizer::DetectCPUFeatures() {
+ base::CPU cpu;
+ if (cpu.has_avx2()) {
+ support_simd256_ = true;
+ }
+}
+
+bool Revectorizer::TryRevectorize(const char* function) {
+ bool success = false;
+ if (support_simd256_ && graph_->GetSimdStoreNodes().size()) {
+ TRACE("TryRevectorize %s\n", function);
+ CollectSeeds();
+ for (auto entry : group_of_stores_) {
+ ZoneMap<Node*, StoreNodeSet>* store_chains = entry.second;
+ if (store_chains != nullptr) {
+ PrintStores(store_chains);
+ if (ReduceStoreChains(store_chains)) {
+ TRACE("Successful revectorize %s\n", function);
+ success = true;
+ }
+ }
+ }
+ TRACE("Finish revectorize %s\n", function);
+ }
+ return success;
+}
+
+void Revectorizer::CollectSeeds() {
+ for (auto it = graph_->GetSimdStoreNodes().begin();
+ it != graph_->GetSimdStoreNodes().end(); ++it) {
+ Node* node = *it;
+ Node* dominator = slp_tree_->GetEarlySchedulePosition(node);
+
+ if ((GetMemoryOffsetValue(node) % kSimd128Size) != 0) {
+ continue;
+ }
+ Node* address = GetNodeAddress(node);
+ ZoneMap<Node*, StoreNodeSet>* store_nodes;
+ auto first_level_iter = group_of_stores_.find(dominator);
+ if (first_level_iter == group_of_stores_.end()) {
+ store_nodes = zone_->New<ZoneMap<Node*, StoreNodeSet>>(zone_);
+ group_of_stores_[dominator] = store_nodes;
+ } else {
+ store_nodes = first_level_iter->second;
+ }
+ auto second_level_iter = store_nodes->find(address);
+ if (second_level_iter == store_nodes->end()) {
+ second_level_iter =
+ store_nodes->insert({address, StoreNodeSet(zone())}).first;
+ }
+ second_level_iter->second.insert(node);
+ }
+}
+
+bool Revectorizer::ReduceStoreChains(
+ ZoneMap<Node*, StoreNodeSet>* store_chains) {
+ TRACE("Enter %s\n", __func__);
+ bool changed = false;
+ for (auto chain_iter = store_chains->cbegin();
+ chain_iter != store_chains->cend(); ++chain_iter) {
+ if (chain_iter->second.size() >= 2 && chain_iter->second.size() % 2 == 0) {
+ ZoneVector<Node*> store_chain(chain_iter->second.begin(),
+ chain_iter->second.end(), zone_);
+ for (auto it = store_chain.begin(); it < store_chain.end(); it = it + 2) {
+ ZoneVector<Node*> stores_unit(it, it + 2, zone_);
+ if (ReduceStoreChain(stores_unit)) {
+ changed = true;
+ }
+ }
+ }
+ }
+
+ return changed;
+}
+
+bool Revectorizer::ReduceStoreChain(const ZoneVector<Node*>& Stores) {
+ TRACE("Enter %s, root@ (#%d,#%d)\n", __func__, Stores[0]->id(),
+ Stores[1]->id());
+ if (!IsContinuousAccess(Stores)) {
+ return false;
+ }
+
+ PackNode* root = slp_tree_->BuildTree(Stores);
+ if (!root) {
+ TRACE("Build tree failed!\n");
+ return false;
+ }
+
+ slp_tree_->Print("After build tree");
+
+ if (DecideVectorize()) {
+ VectorizeTree(root);
+ slp_tree_->Print("After vectorize tree");
+ }
+
+ TRACE("\n");
+ return true;
+}
+
+void Revectorizer::PrintStores(ZoneMap<Node*, StoreNodeSet>* store_chains) {
+ if (!v8_flags.trace_wasm_revectorize) {
+ return;
+ }
+ TRACE("Enter %s\n", __func__);
+ for (auto it = store_chains->cbegin(); it != store_chains->cend(); ++it) {
+ if (it->second.size() > 0) {
+ TRACE("address = #%d:%s \n", it->first->id(),
+ it->first->op()->mnemonic());
+
+ for (auto node : it->second) {
+ TRACE("#%d:%s, ", node->id(), node->op()->mnemonic());
+ }
+
+ TRACE("\n");
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/revectorizer.h b/deps/v8/src/compiler/revectorizer.h
new file mode 100644
index 0000000000..f5e62b0bed
--- /dev/null
+++ b/deps/v8/src/compiler/revectorizer.h
@@ -0,0 +1,212 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REVECTORIZER_H_
+#define V8_COMPILER_REVECTORIZER_H_
+
+// Revectorizer is an optimization to promote pairs of simd128 nodes to new
+// simd256 nodes accelerated by wider vector available from hardware e.g. the
+// YMM registers from AVX2 instruction set when possible and beneficial. The
+// main algorithm is based on the Superword Level Parallel (SLP) vectorization
+// technique.
+
+#include <vector>
+
+#include "src/base/small-vector.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linear-scheduler.h"
+#include "src/compiler/machine-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct V8_EXPORT_PRIVATE MemoryOffsetComparer {
+ bool operator()(const Node* lhs, const Node* rhs) const;
+};
+
+using StoreNodeSet = ZoneSet<Node*, MemoryOffsetComparer>;
+
+// A PackNode consists of a fixed number of isomorphic simd128 nodes which can
+// execute in parallel and convert to a 256-bit simd node later. The nodes in a
+// PackNode must satisfy that they can be scheduled in the same basic block and
+// are mutually independent.
+class PackNode final : public NON_EXPORTED_BASE(ZoneObject) {
+ public:
+ explicit PackNode(Zone* zone, const ZoneVector<Node*>& node_group)
+ : nodes_(node_group.cbegin(), node_group.cend(), zone),
+ operands_(zone),
+ revectorized_node_(nullptr) {}
+ const ZoneVector<Node*>& Nodes() const { return nodes_; }
+ bool IsSame(const ZoneVector<Node*>& node_group) const {
+ return nodes_ == node_group;
+ }
+ Node* RevectorizedNode() { return revectorized_node_; }
+ void SetRevectorizedNode(Node* node) { revectorized_node_ = node; }
+ // returns the index operand of this PackNode.
+ PackNode* GetOperand(size_t index) {
+ DCHECK_LT(index, operands_.size());
+ return operands_[index];
+ }
+
+ ZoneVector<PackNode*>::size_type GetOperandsSize() const {
+ return operands_.size();
+ }
+
+ void SetOperand(size_t index, PackNode* pnode) {
+ if (operands_.size() < index + 1) operands_.resize(index + 1);
+ operands_[index] = pnode;
+ }
+
+ void Print() const;
+
+ private:
+ ZoneVector<Node*> nodes_;
+ ZoneVector<PackNode*> operands_;
+ Node* revectorized_node_;
+};
+
+// An auxillary tree structure with a set of PackNodes based on the Superword
+// Level Parallelism (SLP) vectorization technique. The BuildTree method will
+// start from a selected root, e.g. a group of consecutive stores, and extend
+// through value inputs to create new PackNodes if the inputs are valid, or
+// conclude that the current PackNode is a leaf and terminate the tree.
+// Below is an example of SLPTree where loads and stores in each PackNode are
+// all consecutive.
+// [Load0, Load1] [Load2, Load3]
+// \ /
+// [Add0, Add1]
+// |
+// [Store0, Store1]
+class SLPTree : public NON_EXPORTED_BASE(ZoneObject) {
+ public:
+ explicit SLPTree(Zone* zone, Graph* graph)
+ : zone_(zone),
+ graph_(graph),
+ root_(nullptr),
+ on_stack_(zone),
+ stack_(zone),
+ node_to_packnode_(zone) {
+ scheduler_ = zone->New<LinearScheduler>(zone, graph);
+ }
+
+ PackNode* BuildTree(const ZoneVector<Node*>& roots);
+ void DeleteTree();
+
+ PackNode* GetPackNode(Node* node);
+
+ void Print(const char* info);
+
+ template <typename FunctionType>
+ void ForEach(FunctionType callback);
+
+ Node* GetEarlySchedulePosition(Node* node) {
+ return scheduler_->GetEarlySchedulePosition(node);
+ }
+
+ private:
+ friend class LinearScheduler;
+
+ // This is the recursive part of BuildTree.
+ PackNode* BuildTreeRec(const ZoneVector<Node*>& node_group, unsigned depth);
+
+ // Baseline: create a new PackNode, and return.
+ PackNode* NewPackNode(const ZoneVector<Node*>& node_group);
+
+ // Recursion: create a new PackNode and call BuildTreeRec recursively
+ PackNode* NewPackNodeAndRecurs(const ZoneVector<Node*>& node_group,
+ int start_index, int count, unsigned depth);
+
+ bool CanBePacked(const ZoneVector<Node*>& node_group);
+
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
+
+ // Node stack operations.
+ void PopStack();
+ void PushStack(const ZoneVector<Node*>& node_group);
+ void ClearStack();
+ bool OnStack(Node* node);
+ bool AllOnStack(const ZoneVector<Node*>& node_group);
+ bool StackTopIsPhi();
+
+ void TryReduceLoadChain(const ZoneVector<Node*>& loads);
+ bool IsSideEffectFreeLoad(const ZoneVector<Node*>& node_group);
+ bool SameBasicBlock(Node* node0, Node* node1) {
+ return scheduler_->SameBasicBlock(node0, node1);
+ }
+
+ Zone* const zone_;
+ Graph* const graph_;
+ PackNode* root_;
+ LinearScheduler* scheduler_;
+ ZoneSet<Node*> on_stack_;
+ ZoneStack<ZoneVector<Node*>> stack_;
+ // Maps a specific node to PackNode.
+ ZoneUnorderedMap<Node*, PackNode*> node_to_packnode_;
+ static constexpr size_t RecursionMaxDepth = 1000;
+};
+
+// The Revectorizer pass will firstly collect seeds with valid group of
+// consecutive stores as the root to build the SLPTree. If the SLPTree is built
+// successfully, it will estimate the cost of the 256-bit transformation for
+// each PackNode and conduct the final revectorization if benefitial.
+class V8_EXPORT_PRIVATE Revectorizer final
+ : public NON_EXPORTED_BASE(ZoneObject) {
+ public:
+ Revectorizer(Zone* zone, Graph* graph, MachineGraph* mcgraph)
+ : zone_(zone),
+ graph_(graph),
+ mcgraph_(mcgraph),
+ group_of_stores_(zone),
+ support_simd256_(false) {
+ DetectCPUFeatures();
+ slp_tree_ = zone_->New<SLPTree>(zone, graph);
+ }
+
+ void DetectCPUFeatures();
+ bool TryRevectorize(const char* name);
+
+ private:
+ void CollectSeeds();
+
+ bool ReduceStoreChains(ZoneMap<Node*, StoreNodeSet>* store_chains);
+ bool ReduceStoreChain(const ZoneVector<Node*>& Stores);
+
+ void PrintStores(ZoneMap<Node*, StoreNodeSet>* store_chains);
+ Zone* zone() const { return zone_; }
+ Graph* graph() const { return graph_; }
+ MachineGraph* mcgraph() const { return mcgraph_; }
+
+ PackNode* GetPackNode(Node* node) const {
+ return slp_tree_->GetPackNode(node);
+ }
+
+ bool DecideVectorize();
+
+ void SetEffectInput(PackNode* pnode, int index, Node*& nput);
+ void SetMemoryOpInputs(base::SmallVector<Node*, 2>& inputs, PackNode* pnode,
+ int index);
+ Node* VectorizeTree(PackNode* pnode);
+
+ Zone* const zone_;
+ Graph* const graph_;
+ MachineGraph* const mcgraph_;
+ ZoneMap<Node*, ZoneMap<Node*, StoreNodeSet>*> group_of_stores_;
+ SLPTree* slp_tree_;
+
+ bool support_simd256_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_REVECTORIZER_H_
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index c608dd63ad..d38f68543d 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -460,22 +460,14 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock* block :
((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
if (block == nullptr) continue;
- if (block->rpo_number() == -1) {
- os << "--- BLOCK id:" << block->id();
- } else {
- os << "--- BLOCK B" << block->rpo_number();
- }
+ os << "--- BLOCK B" << block->rpo_number() << " id" << block->id();
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
for (BasicBlock const* predecessor : block->predecessors()) {
if (comma) os << ", ";
comma = true;
- if (predecessor->rpo_number() == -1) {
- os << "id:" << predecessor->id();
- } else {
- os << "B" << predecessor->rpo_number();
- }
+ os << "B" << predecessor->rpo_number();
}
os << " ---\n";
for (Node* node : *block) {
@@ -498,11 +490,7 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock const* successor : block->successors()) {
if (comma) os << ", ";
comma = true;
- if (successor->rpo_number() == -1) {
- os << "id:" << successor->id();
- } else {
- os << "B" << successor->rpo_number();
- }
+ os << "B" << successor->rpo_number();
}
os << "\n";
}
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 4da855cf6e..a7d6e56ac6 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -495,13 +495,6 @@ class CFGBuilder : public ZoneObject {
break;
}
- if (v8_flags.warn_about_builtin_profile_data &&
- hint_from_profile != BranchHint::kNone &&
- BranchHintOf(branch->op()) != BranchHint::kNone &&
- hint_from_profile != BranchHintOf(branch->op())) {
- PrintF("Warning: profiling data overrode manual branch hint.\n");
- }
-
if (branch == component_entry_) {
TraceConnect(branch, component_start_, successor_blocks[0]);
TraceConnect(branch, component_start_, successor_blocks[1]);
@@ -1307,7 +1300,7 @@ class PrepareUsesVisitor {
: scheduler_(scheduler),
schedule_(scheduler->schedule_),
graph_(graph),
- visited_(graph_->NodeCount(), false, zone),
+ visited_(static_cast<int>(graph_->NodeCount()), zone),
stack_(zone) {}
void Run() {
@@ -1340,7 +1333,7 @@ class PrepareUsesVisitor {
}
}
stack_.push(node);
- visited_[node->id()] = true;
+ visited_.Add(node->id());
}
void VisitInputs(Node* node) {
@@ -1363,12 +1356,12 @@ class PrepareUsesVisitor {
}
}
- bool Visited(Node* node) { return visited_[node->id()]; }
+ bool Visited(Node* node) { return visited_.Contains(node->id()); }
Scheduler* scheduler_;
Schedule* schedule_;
Graph* graph_;
- BoolVector visited_;
+ BitVector visited_;
ZoneStack<Node*> stack_;
};
@@ -1506,7 +1499,6 @@ class ScheduleLateNodeVisitor {
: zone_(zone),
scheduler_(scheduler),
schedule_(scheduler_->schedule_),
- marked_(scheduler->zone_),
marking_queue_(scheduler->zone_) {}
// Run the schedule late algorithm on a set of fixed root nodes.
@@ -1594,15 +1586,13 @@ class ScheduleLateNodeVisitor {
}
bool IsMarked(BasicBlock* block) const {
- DCHECK_LT(block->id().ToSize(), marked_.size());
- return marked_[block->id().ToSize()];
+ return marked_.Contains(block->id().ToInt());
}
- void Mark(BasicBlock* block) { marked_[block->id().ToSize()] = true; }
+ void Mark(BasicBlock* block) { marked_.Add(block->id().ToInt()); }
// Mark {block} and push its non-marked predecessor on the marking queue.
void MarkBlock(BasicBlock* block) {
- DCHECK_LT(block->id().ToSize(), marked_.size());
Mark(block);
for (BasicBlock* pred_block : block->predecessors()) {
if (IsMarked(pred_block)) continue;
@@ -1623,8 +1613,11 @@ class ScheduleLateNodeVisitor {
// Clear marking bits.
DCHECK(marking_queue_.empty());
- std::fill(marked_.begin(), marked_.end(), false);
- marked_.resize(schedule_->BasicBlockCount() + 1, false);
+ marked_.Clear();
+ int new_size = static_cast<int>(schedule_->BasicBlockCount() + 1);
+ if (marked_.length() < new_size) {
+ marked_.Resize(new_size, scheduler_->zone_);
+ }
// Check if the {node} has uses in {block}.
for (Edge edge : node->use_edges()) {
@@ -1647,10 +1640,12 @@ class ScheduleLateNodeVisitor {
marking_queue_.pop_front();
if (IsMarked(top_block)) continue;
bool marked = true;
- for (BasicBlock* successor : top_block->successors()) {
- if (!IsMarked(successor)) {
- marked = false;
- break;
+ if (top_block->loop_depth() == block->loop_depth()) {
+ for (BasicBlock* successor : top_block->successors()) {
+ if (!IsMarked(successor)) {
+ marked = false;
+ break;
+ }
}
}
if (marked) MarkBlock(top_block);
@@ -1840,7 +1835,7 @@ class ScheduleLateNodeVisitor {
Zone* zone_;
Scheduler* scheduler_;
Schedule* schedule_;
- BoolVector marked_;
+ BitVector marked_;
ZoneDeque<BasicBlock*> marking_queue_;
};
diff --git a/deps/v8/src/compiler/simplified-lowering-verifier.cc b/deps/v8/src/compiler/simplified-lowering-verifier.cc
index 9548edc43d..b1eaaed57c 100644
--- a/deps/v8/src/compiler/simplified-lowering-verifier.cc
+++ b/deps/v8/src/compiler/simplified-lowering-verifier.cc
@@ -668,6 +668,7 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
CASE(LoadStackCheckOffset)
CASE(LoadFramePointer)
CASE(LoadParentFramePointer)
+ CASE(LoadRootRegister)
CASE(UnalignedLoad)
CASE(UnalignedStore)
CASE(Int32PairAdd)
@@ -678,6 +679,8 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
CASE(Word32PairSar)
CASE(ProtectedLoad)
CASE(ProtectedStore)
+ CASE(LoadTrapOnNull)
+ CASE(StoreTrapOnNull)
CASE(MemoryBarrier)
CASE(SignExtendWord8ToInt32)
CASE(SignExtendWord16ToInt32)
@@ -720,7 +723,8 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
// TODO(nicohartmann@): These operators might need to be supported.
break;
}
- MACHINE_SIMD_OP_LIST(CASE)
+ MACHINE_SIMD128_OP_LIST(CASE)
+ MACHINE_SIMD256_OP_LIST(CASE)
IF_WASM(SIMPLIFIED_WASM_OP_LIST, CASE) {
// SIMD operators should not be in the graph, yet.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 2d47663809..190e8bfa00 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -96,7 +96,7 @@ MachineRepresentation MachineRepresentationFromArrayType(
return MachineRepresentation::kFloat64;
case kExternalBigInt64Array:
case kExternalBigUint64Array:
- UNIMPLEMENTED();
+ return MachineRepresentation::kWord64;
}
UNREACHABLE();
}
@@ -156,7 +156,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kWord32:
return UseInfo::TruncatingWord32();
case MachineRepresentation::kWord64:
- return UseInfo::Word64();
+ return UseInfo::TruncatingWord64();
case MachineRepresentation::kBit:
return UseInfo::Bool();
case MachineRepresentation::kCompressedPointer:
@@ -359,11 +359,10 @@ class RepresentationSelector {
linkage_(linkage),
observe_node_manager_(observe_node_manager),
verifier_(verifier) {
- Factory* factory = broker_->isolate()->factory();
singleton_true_ =
- Type::Constant(broker, factory->true_value(), graph_zone());
+ Type::Constant(broker, broker->true_value(), graph_zone());
singleton_false_ =
- Type::Constant(broker, factory->false_value(), graph_zone());
+ Type::Constant(broker, broker->false_value(), graph_zone());
}
bool verification_enabled() const { return verifier_ != nullptr; }
@@ -1960,6 +1959,7 @@ class RepresentationSelector {
case CTypeInfo::Type::kFloat32:
case CTypeInfo::Type::kFloat64:
return UseInfo::CheckedNumberAsFloat64(kDistinguishZeros, feedback);
+ case CTypeInfo::Type::kPointer:
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kSeqOneByteString:
case CTypeInfo::Type::kApiObject:
@@ -2032,6 +2032,9 @@ class RepresentationSelector {
return MachineType::Float32();
case wasm::kF64:
return MachineType::Float64();
+ case wasm::kRef:
+ case wasm::kRefNull:
+ return MachineType::AnyTagged();
default:
UNREACHABLE();
}
@@ -2054,6 +2057,9 @@ class RepresentationSelector {
// WasmWrapperGraphBuilder::BuildJSToWasmWrapper.
return UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
feedback);
+ case wasm::kRef:
+ case wasm::kRefNull:
+ return UseInfo::AnyTagged();
default:
UNREACHABLE();
}
@@ -2181,13 +2187,15 @@ class RepresentationSelector {
DCHECK_EQ(0, node->InputCount());
SetOutput<T>(node, MachineRepresentation::kWord32);
DCHECK(NodeProperties::GetType(node).Is(Type::Machine()));
- if (verification_enabled()) {
+ if (V8_UNLIKELY(verification_enabled())) {
// During lowering, SimplifiedLowering generates Int32Constants which
// need to be treated differently by the verifier than the
// Int32Constants introduced explicitly in machine graphs. To be able
// to distinguish them, we record those that are being visited here
// because they were generated before SimplifiedLowering.
- verifier_->RecordMachineUsesOfConstant(node, node->uses());
+ if (propagate<T>()) {
+ verifier_->RecordMachineUsesOfConstant(node, node->uses());
+ }
}
return;
case IrOpcode::kInt64Constant:
@@ -2273,6 +2281,12 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kJSToBigInt:
+ case IrOpcode::kJSToBigIntConvertNumber: {
+ VisitInputs<T>(node);
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
//------------------------------------------------------------------
// Simplified operators.
@@ -2296,9 +2310,9 @@ class RepresentationSelector {
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
ChangeOp(node, lowering->machine()->Word32Equal());
} else if (CanBeTaggedPointer(input_info->representation())) {
- // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
+ // BooleanNot(x: kRepTagged) => TaggedEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
- ChangeOp(node, lowering->machine()->WordEqual());
+ ChangeOp(node, lowering->machine()->TaggedEqual());
} else {
DCHECK(TypeOf(node->InputAt(0)).IsNone());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
@@ -2832,11 +2846,23 @@ class RepresentationSelector {
if (input_type.Is(Type::Unsigned32OrMinusZero())) {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) {
+ DeferReplacement(
+ node,
+ InsertTypeOverrideForVerifier(
+ Type::Intersect(input_type, Type::Unsigned32(), zone()),
+ node->InputAt(0)));
+ }
} else if (input_type.Is(Type::Signed32OrMinusZero())) {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) DeferReplacement(node, lowering->Int32Abs(node));
+ if (lower<T>()) {
+ DeferReplacement(
+ node,
+ InsertTypeOverrideForVerifier(
+ Type::Intersect(input_type, Type::Unsigned32(), zone()),
+ lowering->Int32Abs(node)));
+ }
} else if (input_type.Is(type_cache_->kPositiveIntegerOrNaN)) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kFloat64);
@@ -3198,6 +3224,16 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kIntegral32OrMinusZeroToBigInt: {
+ VisitUnop<T>(node, UseInfo::Word64(kIdentifyZeros),
+ MachineRepresentation::kWord64);
+ if (lower<T>()) {
+ DeferReplacement(
+ node, InsertTypeOverrideForVerifier(NodeProperties::GetType(node),
+ node->InputAt(0)));
+ }
+ return;
+ }
case IrOpcode::kReferenceEqual: {
VisitBinop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower<T>()) {
@@ -3305,31 +3341,186 @@ class RepresentationSelector {
}
}
}
- case IrOpcode::kSpeculativeBigIntBitwiseAnd: {
- if (truncation.IsUnused()) {
- Type left_type = GetUpperBound(node->InputAt(0));
- Type right_type = GetUpperBound(node->InputAt(1));
- if (left_type.Is(Type::BigInt()) && right_type.Is(Type::BigInt())) {
- VisitUnused<T>(node);
- return;
- }
+ case IrOpcode::kSpeculativeBigIntBitwiseAnd:
+ case IrOpcode::kSpeculativeBigIntBitwiseOr:
+ case IrOpcode::kSpeculativeBigIntBitwiseXor: {
+ if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
+ VisitUnused<T>(node);
+ return;
}
if (truncation.IsUsedAsWord64()) {
VisitBinop<T>(
node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
MachineRepresentation::kWord64);
if (lower<T>()) {
- ChangeToPureOp(node, lowering->machine()->Word64And());
+ ChangeToPureOp(node, Int64Op(node));
}
- } else {
- VisitBinop<T>(node,
- UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
- MachineRepresentation::kTaggedPointer);
- if (lower<T>()) {
- ChangeOp(node, lowering->simplified()->BigIntBitwiseAnd());
+ return;
+ }
+ BigIntOperationHint hint = BigIntOperationHintOf(node->op());
+ switch (hint) {
+ case BigIntOperationHint::kBigInt64: {
+ VisitBinop<T>(
+ node, UseInfo::CheckedBigInt64AsWord64(FeedbackSource{}),
+ MachineRepresentation::kWord64, Type::SignedBigInt64());
+ if (lower<T>()) {
+ ChangeToPureOp(node, Int64Op(node));
+ }
+ return;
+ }
+ case BigIntOperationHint::kBigInt: {
+ VisitBinop<T>(
+ node, UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
+ ChangeOp(node, BigIntOp(node));
+ }
+ return;
+ }
+ }
+ }
+ case IrOpcode::kSpeculativeBigIntShiftLeft:
+ case IrOpcode::kSpeculativeBigIntShiftRight: {
+ if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
+ VisitUnused<T>(node);
+ return;
+ }
+ if (truncation.IsUsedAsWord64()) {
+ Type input_type = GetUpperBound(node->InputAt(0));
+ Type shift_amount_type = GetUpperBound(node->InputAt(1));
+
+ if (shift_amount_type.IsHeapConstant()) {
+ HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref();
+ if (ref.IsBigInt()) {
+ BigIntRef bigint = ref.AsBigInt();
+ bool lossless = false;
+ int64_t shift_amount = bigint.AsInt64(&lossless);
+
+ // Canonicalize {shift_amount}.
+ bool is_shift_left =
+ node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft;
+ if (shift_amount < 0) {
+ is_shift_left = !is_shift_left;
+ shift_amount = -shift_amount;
+ }
+ DCHECK_GE(shift_amount, 0);
+
+ // If the operation is a *real* left shift, propagate truncation.
+ // If it is a *real* right shift, the output representation is
+ // word64 only if we know the input type is BigInt64.
+ // Otherwise, fall through to using BigIntOperationHint.
+ if (is_shift_left) {
+ VisitBinop<T>(
+ node,
+ UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
+ UseInfo::Any(), MachineRepresentation::kWord64);
+ if (lower<T>()) {
+ if (!lossless || shift_amount > 63) {
+ DeferReplacement(node, jsgraph_->Int64Constant(0));
+ } else if (shift_amount == 0) {
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ DCHECK_GE(shift_amount, 1);
+ DCHECK_LE(shift_amount, 63);
+ ReplaceWithPureNode(
+ node,
+ graph()->NewNode(
+ lowering->machine()->Word64Shl(), node->InputAt(0),
+ jsgraph_->Int64Constant(shift_amount)));
+ }
+ }
+ return;
+ } else if (input_type.Is(Type::SignedBigInt64())) {
+ VisitBinop<T>(node, UseInfo::Word64(), UseInfo::Any(),
+ MachineRepresentation::kWord64);
+ if (lower<T>()) {
+ if (!lossless || shift_amount > 63) {
+ ReplaceWithPureNode(
+ node, graph()->NewNode(lowering->machine()->Word64Sar(),
+ node->InputAt(0),
+ jsgraph_->Int64Constant(63)));
+ } else if (shift_amount == 0) {
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ DCHECK_GE(shift_amount, 1);
+ DCHECK_LE(shift_amount, 63);
+ ReplaceWithPureNode(
+ node,
+ graph()->NewNode(
+ lowering->machine()->Word64Sar(), node->InputAt(0),
+ jsgraph_->Int64Constant(shift_amount)));
+ }
+ }
+ return;
+ } else if (input_type.Is(Type::UnsignedBigInt64())) {
+ VisitBinop<T>(node, UseInfo::Word64(), UseInfo::Any(),
+ MachineRepresentation::kWord64);
+ if (lower<T>()) {
+ if (!lossless || shift_amount > 63) {
+ DeferReplacement(node, jsgraph_->Int64Constant(0));
+ } else if (shift_amount == 0) {
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ DCHECK_GE(shift_amount, 1);
+ DCHECK_LE(shift_amount, 63);
+ ReplaceWithPureNode(
+ node,
+ graph()->NewNode(
+ lowering->machine()->Word64Shr(), node->InputAt(0),
+ jsgraph_->Int64Constant(shift_amount)));
+ }
+ }
+ return;
+ }
+ }
+ }
+ }
+ BigIntOperationHint hint = BigIntOperationHintOf(node->op());
+ switch (hint) {
+ case BigIntOperationHint::kBigInt64:
+ // Do not collect or use BigInt64 feedback for shift operations.
+ UNREACHABLE();
+ case BigIntOperationHint::kBigInt: {
+ VisitBinop<T>(
+ node, UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
+ ChangeOp(node, BigIntOp(node));
+ }
+ return;
+ }
+ }
+ }
+ case IrOpcode::kSpeculativeBigIntEqual:
+ case IrOpcode::kSpeculativeBigIntLessThan:
+ case IrOpcode::kSpeculativeBigIntLessThanOrEqual: {
+ // Loose equality can throw a TypeError when failing to cast an object
+ // operand to primitive.
+ if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
+ VisitUnused<T>(node);
+ return;
+ }
+ BigIntOperationHint hint = BigIntOperationHintOf(node->op());
+ switch (hint) {
+ case BigIntOperationHint::kBigInt64: {
+ VisitBinop<T>(node,
+ UseInfo::CheckedBigInt64AsWord64(FeedbackSource{}),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
+ ChangeToPureOp(node, Int64Op(node));
+ }
+ return;
+ }
+ case BigIntOperationHint::kBigInt: {
+ VisitBinop<T>(
+ node, UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
+ ChangeToPureOp(node, BigIntOp(node));
+ }
+ return;
}
}
- return;
}
case IrOpcode::kSpeculativeBigIntNegate: {
if (truncation.IsUnused()) {
@@ -3776,6 +3967,34 @@ class RepresentationSelector {
if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
+ case IrOpcode::kSpeculativeToBigInt: {
+ if (truncation.IsUnused() && InputIs(node, Type::BigInt())) {
+ VisitUnused<T>(node);
+ return;
+ }
+ if (truncation.IsUsedAsWord64()) {
+ VisitUnop<T>(node,
+ UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
+ MachineRepresentation::kWord64);
+ } else {
+ BigIntOperationParameters const& p =
+ BigIntOperationParametersOf(node->op());
+ switch (p.hint()) {
+ case BigIntOperationHint::kBigInt64: {
+ VisitUnop<T>(node, UseInfo::CheckedBigInt64AsWord64(p.feedback()),
+ MachineRepresentation::kWord64);
+ break;
+ }
+ case BigIntOperationHint::kBigInt: {
+ VisitUnop<T>(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(p.feedback()),
+ MachineRepresentation::kTaggedPointer);
+ }
+ }
+ }
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
+ return;
+ }
case IrOpcode::kObjectIsArrayBufferView: {
// TODO(turbofan): Introduce a Type::ArrayBufferView?
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -4251,6 +4470,14 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kCheckTurboshaftTypeOf: {
+ NodeInfo* info = GetInfo(node->InputAt(0));
+ MachineRepresentation input_rep = info->representation();
+ ProcessInput<T>(node, 0, UseInfo{input_rep, Truncation::None()});
+ ProcessInput<T>(node, 1, UseInfo::Any());
+ SetOutput<T>(node, input_rep);
+ return;
+ }
case IrOpcode::kDebugBreak:
return;
@@ -4278,6 +4505,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kInt32Add:
+ case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kInt32Sub:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
@@ -4299,6 +4527,7 @@ class RepresentationSelector {
case IrOpcode::kWord64And:
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
+ case IrOpcode::kChangeUint32ToUint64:
for (int i = 0; i < node->InputCount(); ++i) {
ProcessInput<T>(node, i, UseInfo::Any());
}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index c0d5f0b4e4..b48fea68fd 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -65,7 +65,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
base::Optional<bool> maybe_result =
- m.Ref(broker()).TryGetBooleanValue();
+ m.Ref(broker()).TryGetBooleanValue(broker());
if (maybe_result.has_value()) return ReplaceInt32(*maybe_result);
}
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 42dacef032..996f47bcb2 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -153,6 +153,43 @@ std::ostream& operator<<(std::ostream& os, ObjectAccess const& access) {
return os;
}
+#if V8_ENABLE_WEBASSEMBLY
+
+V8_EXPORT_PRIVATE bool operator==(WasmFieldInfo const& lhs,
+ WasmFieldInfo const& rhs) {
+ return lhs.field_index == rhs.field_index && lhs.type == rhs.type &&
+ lhs.is_signed == rhs.is_signed && lhs.null_check == rhs.null_check;
+}
+
+size_t hash_value(WasmFieldInfo const& info) {
+ return base::hash_combine(info.field_index, info.type, info.is_signed,
+ info.null_check);
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ WasmFieldInfo const& info) {
+ return os << info.field_index << ", "
+ << (info.is_signed ? "signed" : "unsigned") << ", "
+ << (info.null_check == kWithNullCheck ? "null check"
+ : "no null check");
+}
+
+V8_EXPORT_PRIVATE bool operator==(WasmElementInfo const& lhs,
+ WasmElementInfo const& rhs) {
+ return lhs.type == rhs.type && lhs.is_signed == rhs.is_signed;
+}
+
+size_t hash_value(WasmElementInfo const& info) {
+ return base::hash_combine(info.type, info.is_signed);
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ WasmElementInfo const& info) {
+ return os << (info.is_signed ? "signed" : "unsigned");
+}
+
+#endif
+
const FieldAccess& FieldAccessOf(const Operator* op) {
DCHECK_NOT_NULL(op);
DCHECK(op->opcode() == IrOpcode::kLoadField ||
@@ -548,7 +585,15 @@ BigIntOperationHint BigIntOperationHintOf(const Operator* op) {
op->opcode() == IrOpcode::kSpeculativeBigIntSubtract ||
op->opcode() == IrOpcode::kSpeculativeBigIntMultiply ||
op->opcode() == IrOpcode::kSpeculativeBigIntDivide ||
- op->opcode() == IrOpcode::kSpeculativeBigIntModulus);
+ op->opcode() == IrOpcode::kSpeculativeBigIntModulus ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntBitwiseAnd ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntBitwiseOr ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntBitwiseXor ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntShiftRight ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntEqual ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntLessThan ||
+ op->opcode() == IrOpcode::kSpeculativeBigIntLessThanOrEqual);
return OpParameter<BigIntOperationHint>(op);
}
@@ -572,6 +617,26 @@ NumberOperationParameters const& NumberOperationParametersOf(
return OpParameter<NumberOperationParameters>(op);
}
+bool operator==(BigIntOperationParameters const& lhs,
+ BigIntOperationParameters const& rhs) {
+ return lhs.hint() == rhs.hint() && lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(BigIntOperationParameters const& p) {
+ FeedbackSource::Hash feedback_hash;
+ return base::hash_combine(p.hint(), feedback_hash(p.feedback()));
+}
+
+std::ostream& operator<<(std::ostream& os, BigIntOperationParameters const& p) {
+ return os << p.hint() << ", " << p.feedback();
+}
+
+BigIntOperationParameters const& BigIntOperationParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kSpeculativeToBigInt, op->opcode());
+ return OpParameter<BigIntOperationParameters>(op);
+}
+
bool operator==(SpeculativeBigIntAsNParameters const& lhs,
SpeculativeBigIntAsNParameters const& rhs) {
return lhs.bits() == rhs.bits() && lhs.feedback() == rhs.feedback();
@@ -678,129 +743,149 @@ bool operator==(CheckMinusZeroParameters const& lhs,
return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1, 0) \
- V(NumberEqual, Operator::kCommutative, 2, 0) \
- V(NumberLessThan, Operator::kNoProperties, 2, 0) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(NumberAdd, Operator::kCommutative, 2, 0) \
- V(NumberSubtract, Operator::kNoProperties, 2, 0) \
- V(NumberMultiply, Operator::kCommutative, 2, 0) \
- V(NumberDivide, Operator::kNoProperties, 2, 0) \
- V(NumberModulus, Operator::kNoProperties, 2, 0) \
- V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
- V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
- V(NumberImul, Operator::kCommutative, 2, 0) \
- V(NumberAbs, Operator::kNoProperties, 1, 0) \
- V(NumberClz32, Operator::kNoProperties, 1, 0) \
- V(NumberCeil, Operator::kNoProperties, 1, 0) \
- V(NumberFloor, Operator::kNoProperties, 1, 0) \
- V(NumberFround, Operator::kNoProperties, 1, 0) \
- V(NumberAcos, Operator::kNoProperties, 1, 0) \
- V(NumberAcosh, Operator::kNoProperties, 1, 0) \
- V(NumberAsin, Operator::kNoProperties, 1, 0) \
- V(NumberAsinh, Operator::kNoProperties, 1, 0) \
- V(NumberAtan, Operator::kNoProperties, 1, 0) \
- V(NumberAtan2, Operator::kNoProperties, 2, 0) \
- V(NumberAtanh, Operator::kNoProperties, 1, 0) \
- V(NumberCbrt, Operator::kNoProperties, 1, 0) \
- V(NumberCos, Operator::kNoProperties, 1, 0) \
- V(NumberCosh, Operator::kNoProperties, 1, 0) \
- V(NumberExp, Operator::kNoProperties, 1, 0) \
- V(NumberExpm1, Operator::kNoProperties, 1, 0) \
- V(NumberLog, Operator::kNoProperties, 1, 0) \
- V(NumberLog1p, Operator::kNoProperties, 1, 0) \
- V(NumberLog10, Operator::kNoProperties, 1, 0) \
- V(NumberLog2, Operator::kNoProperties, 1, 0) \
- V(NumberMax, Operator::kNoProperties, 2, 0) \
- V(NumberMin, Operator::kNoProperties, 2, 0) \
- V(NumberPow, Operator::kNoProperties, 2, 0) \
- V(NumberRound, Operator::kNoProperties, 1, 0) \
- V(NumberSign, Operator::kNoProperties, 1, 0) \
- V(NumberSin, Operator::kNoProperties, 1, 0) \
- V(NumberSinh, Operator::kNoProperties, 1, 0) \
- V(NumberSqrt, Operator::kNoProperties, 1, 0) \
- V(NumberTan, Operator::kNoProperties, 1, 0) \
- V(NumberTanh, Operator::kNoProperties, 1, 0) \
- V(NumberTrunc, Operator::kNoProperties, 1, 0) \
- V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
- V(NumberToInt32, Operator::kNoProperties, 1, 0) \
- V(NumberToString, Operator::kNoProperties, 1, 0) \
- V(NumberToUint32, Operator::kNoProperties, 1, 0) \
- V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
- V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
- V(BigIntNegate, Operator::kNoProperties, 1, 0) \
- V(StringConcat, Operator::kNoProperties, 3, 0) \
- V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
- V(StringIndexOf, Operator::kNoProperties, 3, 0) \
- V(StringLength, Operator::kNoProperties, 1, 0) \
- V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
- V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
- V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
- V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
- V(TruncateBigIntToWord64, Operator::kNoProperties, 1, 0) \
- V(ChangeInt64ToBigInt, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
- V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
- V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
- V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
- V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
- V(ObjectIsString, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
- V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
- V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
- V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
- V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
- V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
- V(SameValue, Operator::kCommutative, 2, 0) \
- V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
- V(NumberSameValue, Operator::kCommutative, 2, 0) \
- V(ReferenceEqual, Operator::kCommutative, 2, 0) \
- V(StringEqual, Operator::kCommutative, 2, 0) \
- V(StringLessThan, Operator::kNoProperties, 2, 0) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(ToBoolean, Operator::kNoProperties, 1, 0) \
- V(NewConsString, Operator::kNoProperties, 3, 0) \
+#if V8_ENABLE_WEBASSEMBLY
+V8_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream& os, AssertNotNullParameters const& params) {
+ return os << params.type << ", " << params.trap_id;
+}
+
+size_t hash_value(AssertNotNullParameters const& params) {
+ return base::hash_combine(params.type, params.trap_id);
+}
+
+bool operator==(AssertNotNullParameters const& lhs,
+ AssertNotNullParameters const& rhs) {
+ return lhs.type == rhs.type && lhs.trap_id == rhs.trap_id;
+}
+#endif
+
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1, 0) \
+ V(NumberEqual, Operator::kCommutative, 2, 0) \
+ V(NumberLessThan, Operator::kNoProperties, 2, 0) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(NumberAdd, Operator::kCommutative, 2, 0) \
+ V(NumberSubtract, Operator::kNoProperties, 2, 0) \
+ V(NumberMultiply, Operator::kCommutative, 2, 0) \
+ V(NumberDivide, Operator::kNoProperties, 2, 0) \
+ V(NumberModulus, Operator::kNoProperties, 2, 0) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
+ V(NumberImul, Operator::kCommutative, 2, 0) \
+ V(NumberAbs, Operator::kNoProperties, 1, 0) \
+ V(NumberClz32, Operator::kNoProperties, 1, 0) \
+ V(NumberCeil, Operator::kNoProperties, 1, 0) \
+ V(NumberFloor, Operator::kNoProperties, 1, 0) \
+ V(NumberFround, Operator::kNoProperties, 1, 0) \
+ V(NumberAcos, Operator::kNoProperties, 1, 0) \
+ V(NumberAcosh, Operator::kNoProperties, 1, 0) \
+ V(NumberAsin, Operator::kNoProperties, 1, 0) \
+ V(NumberAsinh, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan2, Operator::kNoProperties, 2, 0) \
+ V(NumberAtanh, Operator::kNoProperties, 1, 0) \
+ V(NumberCbrt, Operator::kNoProperties, 1, 0) \
+ V(NumberCos, Operator::kNoProperties, 1, 0) \
+ V(NumberCosh, Operator::kNoProperties, 1, 0) \
+ V(NumberExp, Operator::kNoProperties, 1, 0) \
+ V(NumberExpm1, Operator::kNoProperties, 1, 0) \
+ V(NumberLog, Operator::kNoProperties, 1, 0) \
+ V(NumberLog1p, Operator::kNoProperties, 1, 0) \
+ V(NumberLog10, Operator::kNoProperties, 1, 0) \
+ V(NumberLog2, Operator::kNoProperties, 1, 0) \
+ V(NumberMax, Operator::kNoProperties, 2, 0) \
+ V(NumberMin, Operator::kNoProperties, 2, 0) \
+ V(NumberPow, Operator::kNoProperties, 2, 0) \
+ V(NumberRound, Operator::kNoProperties, 1, 0) \
+ V(NumberSign, Operator::kNoProperties, 1, 0) \
+ V(NumberSin, Operator::kNoProperties, 1, 0) \
+ V(NumberSinh, Operator::kNoProperties, 1, 0) \
+ V(NumberSqrt, Operator::kNoProperties, 1, 0) \
+ V(NumberTan, Operator::kNoProperties, 1, 0) \
+ V(NumberTanh, Operator::kNoProperties, 1, 0) \
+ V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
+ V(Integral32OrMinusZeroToBigInt, Operator::kNoProperties, 1, 0) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntEqual, Operator::kNoProperties, 2, 0) \
+ V(BigIntLessThan, Operator::kNoProperties, 2, 0) \
+ V(BigIntLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
+ V(StringConcat, Operator::kNoProperties, 3, 0) \
+ V(StringToNumber, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
+ V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(TypeOf, Operator::kNoProperties, 1, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToWord64, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToBigInt, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
+ V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
+ V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(SameValue, Operator::kCommutative, 2, 0) \
+ V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
+ V(NumberSameValue, Operator::kCommutative, 2, 0) \
+ V(ReferenceEqual, Operator::kCommutative, 2, 0) \
+ V(StringEqual, Operator::kCommutative, 2, 0) \
+ V(StringLessThan, Operator::kNoProperties, 2, 0) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0) \
V(Unsigned32Divide, Operator::kNoProperties, 2, 0)
#define EFFECT_DEPENDENT_OP_LIST(V) \
@@ -810,6 +895,10 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(BigIntDivide, Operator::kNoProperties, 2, 1) \
V(BigIntModulus, Operator::kNoProperties, 2, 1) \
V(BigIntBitwiseAnd, Operator::kNoProperties, 2, 1) \
+ V(BigIntBitwiseOr, Operator::kNoProperties, 2, 1) \
+ V(BigIntBitwiseXor, Operator::kNoProperties, 2, 1) \
+ V(BigIntShiftLeft, Operator::kNoProperties, 2, 1) \
+ V(BigIntShiftRight, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
V(StringFromCodePointAt, Operator::kNoProperties, 2, 1) \
@@ -1168,37 +1257,37 @@ struct SimplifiedOperatorGlobalCache final {
LoadStackArgumentOperator kLoadStackArgument;
#if V8_ENABLE_WEBASSEMBLY
- // Note: The following two operators have a control input solely to find the
- // typing context from the control path in wasm-gc-operator-reducer.
- struct IsNullOperator final : public Operator {
- IsNullOperator()
- : Operator(IrOpcode::kIsNull, Operator::kPure, "IsNull", 1, 0, 1, 1, 0,
- 0) {}
+ struct WasmArrayLengthOperator final : public Operator1<bool> {
+ explicit WasmArrayLengthOperator(bool null_check)
+ : Operator1<bool>(IrOpcode::kWasmArrayLength, Operator::kEliminatable,
+ "WasmArrayLength", 1, 1, 1, 1, 1, 1, null_check) {}
};
- IsNullOperator kIsNull;
-
- struct IsNotNullOperator final : public Operator {
- IsNotNullOperator()
- : Operator(IrOpcode::kIsNotNull, Operator::kPure, "IsNotNull", 1, 0, 1,
- 1, 0, 0) {}
+ WasmArrayLengthOperator kWasmArrayLengthNullCheck{true};
+ WasmArrayLengthOperator kWasmArrayLengthNoNullCheck{false};
+
+ struct WasmArrayInitializeLengthOperator final : public Operator {
+ WasmArrayInitializeLengthOperator()
+ : Operator(IrOpcode::kWasmArrayInitializeLength,
+ Operator::kNoThrow | Operator::kNoRead | Operator::kNoDeopt,
+ "WasmArrayInitializeLength", 2, 1, 1, 0, 1, 0) {}
};
- IsNotNullOperator kIsNotNull;
+ WasmArrayInitializeLengthOperator kWasmArrayInitializeLength;
- struct NullOperator final : public Operator {
- NullOperator()
- : Operator(IrOpcode::kNull, Operator::kPure, "Null", 0, 0, 0, 1, 0, 0) {
- }
+ struct StringAsWtf16Operator final : public Operator {
+ StringAsWtf16Operator()
+ : Operator(IrOpcode::kStringAsWtf16, Operator::kEliminatable,
+ "StringAsWtf16", 1, 1, 1, 1, 1, 1) {}
};
- NullOperator kNull;
-
- struct AssertNotNullOperator final : public Operator {
- AssertNotNullOperator()
- : Operator(
- IrOpcode::kAssertNotNull,
- Operator::kNoWrite | Operator::kNoThrow | Operator::kIdempotent,
- "AssertNotNull", 1, 1, 1, 1, 1, 1) {}
+ StringAsWtf16Operator kStringAsWtf16;
+
+ struct StringPrepareForGetCodeunitOperator final : public Operator {
+ StringPrepareForGetCodeunitOperator()
+ : Operator(IrOpcode::kStringPrepareForGetCodeunit,
+ Operator::kEliminatable, "StringPrepareForGetCodeunit", 1, 1,
+ 1, 3, 1, 1) {}
};
- AssertNotNullOperator kAssertNotNull;
+ StringPrepareForGetCodeunitOperator kStringPrepareForGetCodeunit;
+
#endif
#define SPECULATIVE_NUMBER_BINOP(Name) \
@@ -1237,6 +1326,21 @@ struct SimplifiedOperatorGlobalCache final {
kSpeculativeToNumberNumberOperator;
SpeculativeToNumberOperator<NumberOperationHint::kNumberOrOddball>
kSpeculativeToNumberNumberOrOddballOperator;
+
+ template <BigIntOperationHint kHint>
+ struct SpeculativeToBigIntOperator final
+ : public Operator1<BigIntOperationParameters> {
+ SpeculativeToBigIntOperator()
+ : Operator1<BigIntOperationParameters>(
+ IrOpcode::kSpeculativeToBigInt,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeToBigInt",
+ 1, 1, 1, 1, 1, 0,
+ BigIntOperationParameters(kHint, FeedbackSource())) {}
+ };
+ SpeculativeToBigIntOperator<BigIntOperationHint::kBigInt64>
+ kSpeculativeToBigIntBigInt64Operator;
+ SpeculativeToBigIntOperator<BigIntOperationHint::kBigInt>
+ kSpeculativeToBigIntBigIntOperator;
};
namespace {
@@ -1372,6 +1476,12 @@ const Operator* SimplifiedOperatorBuilder::VerifyType() {
"VerifyType", 1, 0, 0, 1, 0, 0);
}
+const Operator* SimplifiedOperatorBuilder::CheckTurboshaftTypeOf() {
+ return zone()->New<Operator>(IrOpcode::kCheckTurboshaftTypeOf,
+ Operator::kNoThrow | Operator::kNoDeopt,
+ "CheckTurboshaftTypeOf", 2, 1, 1, 1, 1, 0);
+}
+
#if V8_ENABLE_WEBASSEMBLY
const Operator* SimplifiedOperatorBuilder::WasmTypeCheck(
WasmTypeCheckConfig config) {
@@ -1390,18 +1500,59 @@ const Operator* SimplifiedOperatorBuilder::WasmTypeCast(
const Operator* SimplifiedOperatorBuilder::RttCanon(int index) {
return zone()->New<Operator1<int>>(IrOpcode::kRttCanon, Operator::kPure,
- "RttCanon", 0, 0, 0, 1, 0, 0, index);
+ "RttCanon", 1, 0, 0, 1, 0, 0, index);
+}
+
+// Note: The following two operators have a control input solely to find the
+// typing context from the control path in wasm-gc-operator-reducer.
+struct IsNullOperator final : public Operator1<wasm::ValueType> {
+ explicit IsNullOperator(wasm::ValueType type)
+ : Operator1(IrOpcode::kIsNull, Operator::kPure, "IsNull", 1, 0, 1, 1, 0,
+ 0, type) {}
+};
+
+struct IsNotNullOperator final : public Operator1<wasm::ValueType> {
+ explicit IsNotNullOperator(wasm::ValueType type)
+ : Operator1(IrOpcode::kIsNotNull, Operator::kPure, "IsNotNull", 1, 0, 1,
+ 1, 0, 0, type) {}
+};
+
+struct NullOperator final : public Operator1<wasm::ValueType> {
+ explicit NullOperator(wasm::ValueType type)
+ : Operator1(IrOpcode::kNull, Operator::kPure, "Null", 0, 0, 0, 1, 0, 0,
+ type) {}
+};
+
+struct AssertNotNullOperator final : public Operator1<AssertNotNullParameters> {
+ explicit AssertNotNullOperator(wasm::ValueType type, TrapId trap_id)
+ : Operator1(
+ IrOpcode::kAssertNotNull,
+ Operator::kNoWrite | Operator::kNoThrow | Operator::kIdempotent,
+ "AssertNotNull", 1, 1, 1, 1, 1, 1, {type, trap_id}) {}
+};
+
+const Operator* SimplifiedOperatorBuilder::Null(wasm::ValueType type) {
+ return zone()->New<NullOperator>(type);
}
-const Operator* SimplifiedOperatorBuilder::Null() { return &cache_.kNull; }
+const Operator* SimplifiedOperatorBuilder::AssertNotNull(wasm::ValueType type,
+ TrapId trap_id) {
+ return zone()->New<AssertNotNullOperator>(type, trap_id);
+}
+
+const Operator* SimplifiedOperatorBuilder::IsNull(wasm::ValueType type) {
+ return zone()->New<IsNullOperator>(type);
+}
+const Operator* SimplifiedOperatorBuilder::IsNotNull(wasm::ValueType type) {
+ return zone()->New<IsNotNullOperator>(type);
+}
-const Operator* SimplifiedOperatorBuilder::AssertNotNull() {
- return &cache_.kAssertNotNull;
+const Operator* SimplifiedOperatorBuilder::StringAsWtf16() {
+ return &cache_.kStringAsWtf16;
}
-const Operator* SimplifiedOperatorBuilder::IsNull() { return &cache_.kIsNull; }
-const Operator* SimplifiedOperatorBuilder::IsNotNull() {
- return &cache_.kIsNotNull;
+const Operator* SimplifiedOperatorBuilder::StringPrepareForGetCodeunit() {
+ return &cache_.kStringPrepareForGetCodeunit;
}
const Operator* SimplifiedOperatorBuilder::WasmExternInternalize() {
@@ -1415,6 +1566,49 @@ const Operator* SimplifiedOperatorBuilder::WasmExternExternalize() {
Operator::kEliminatable, "WasmExternExternalize",
1, 1, 1, 1, 1, 1);
}
+
+const Operator* SimplifiedOperatorBuilder::WasmStructGet(
+ const wasm::StructType* type, int field_index, bool is_signed,
+ CheckForNull null_check) {
+ return zone()->New<Operator1<WasmFieldInfo>>(
+ IrOpcode::kWasmStructGet, Operator::kEliminatable, "WasmStructGet", 1, 1,
+ 1, 1, 1, 1, WasmFieldInfo{type, field_index, is_signed, null_check});
+}
+
+const Operator* SimplifiedOperatorBuilder::WasmStructSet(
+ const wasm::StructType* type, int field_index, CheckForNull null_check) {
+ return zone()->New<Operator1<WasmFieldInfo>>(
+ IrOpcode::kWasmStructSet,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoRead,
+ "WasmStructSet", 2, 1, 1, 0, 1, 1,
+ WasmFieldInfo{type, field_index, true /* unused */, null_check});
+}
+
+const Operator* SimplifiedOperatorBuilder::WasmArrayGet(
+ const wasm::ArrayType* type, bool is_signed) {
+ return zone()->New<Operator1<WasmElementInfo>>(
+ IrOpcode::kWasmArrayGet, Operator::kEliminatable, "WasmArrayGet", 2, 1, 1,
+ 1, 1, 0, WasmElementInfo{type, is_signed});
+}
+
+const Operator* SimplifiedOperatorBuilder::WasmArraySet(
+ const wasm::ArrayType* type) {
+ return zone()->New<Operator1<const wasm::ArrayType*>>(
+ IrOpcode::kWasmArraySet,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoRead,
+ "WasmArraySet", 3, 1, 1, 0, 1, 0, type);
+}
+
+const Operator* SimplifiedOperatorBuilder::WasmArrayLength(
+ CheckForNull null_check) {
+ return null_check == kWithNullCheck ? &cache_.kWasmArrayLengthNullCheck
+ : &cache_.kWasmArrayLengthNoNullCheck;
+}
+
+const Operator* SimplifiedOperatorBuilder::WasmArrayInitializeLength() {
+ return &cache_.kWasmArrayInitializeLength;
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
const Operator* SimplifiedOperatorBuilder::CheckIf(
@@ -1560,9 +1754,13 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
CheckMapsFlags flags, ZoneHandleSet<Map> maps,
const FeedbackSource& feedback) {
CheckMapsParameters const parameters(flags, maps, feedback);
+ Operator::Properties operator_props = Operator::kNoThrow;
+ if (!(flags & CheckMapsFlag::kTryMigrateInstance)) {
+ operator_props |= Operator::kNoWrite;
+ }
return zone()->New<Operator1<CheckMapsParameters>>( // --
IrOpcode::kCheckMaps, // opcode
- Operator::kNoThrow | Operator::kNoWrite, // flags
+ operator_props, // flags
"CheckMaps", // name
1, 1, 1, 0, 1, 0, // counts
parameters); // parameter
@@ -1618,52 +1816,18 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
CheckFloat64HoleParameters(mode, feedback));
}
-const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd(
- BigIntOperationHint hint) {
- return zone()->New<Operator1<BigIntOperationHint>>(
- IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow,
- "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntSubtract(
- BigIntOperationHint hint) {
- return zone()->New<Operator1<BigIntOperationHint>>(
- IrOpcode::kSpeculativeBigIntSubtract,
- Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntSubtract", 2,
- 1, 1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntMultiply(
- BigIntOperationHint hint) {
- return zone()->New<Operator1<BigIntOperationHint>>(
- IrOpcode::kSpeculativeBigIntMultiply,
- Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntMultiply", 2,
- 1, 1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntDivide(
- BigIntOperationHint hint) {
- return zone()->New<Operator1<BigIntOperationHint>>(
- IrOpcode::kSpeculativeBigIntDivide,
- Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntDivide", 2, 1,
- 1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntModulus(
- BigIntOperationHint hint) {
- return zone()->New<Operator1<BigIntOperationHint>>(
- IrOpcode::kSpeculativeBigIntModulus,
- Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntModulus", 2,
- 1, 1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntBitwiseAnd(
- BigIntOperationHint hint) {
- return zone()->New<Operator1<BigIntOperationHint>>(
- IrOpcode::kSpeculativeBigIntBitwiseAnd,
- Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntBitwiseAnd",
- 2, 1, 1, 1, 1, 0, hint);
-}
+// TODO(panq): Cache speculative bigint operators.
+#define SPECULATIVE_BIGINT_BINOP(Name) \
+ const Operator* SimplifiedOperatorBuilder::Name(BigIntOperationHint hint) { \
+ return zone()->New<Operator1<BigIntOperationHint>>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, 2, \
+ 1, 1, 1, 1, 0, hint); \
+ }
+SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(SPECULATIVE_BIGINT_BINOP)
+SPECULATIVE_BIGINT_BINOP(SpeculativeBigIntEqual)
+SPECULATIVE_BIGINT_BINOP(SpeculativeBigIntLessThan)
+SPECULATIVE_BIGINT_BINOP(SpeculativeBigIntLessThanOrEqual)
+#undef SPECULATIVE_BIGINT_BINOP
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
BigIntOperationHint hint) {
@@ -1673,6 +1837,22 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
1, 1, 1, 0, hint);
}
+const Operator* SimplifiedOperatorBuilder::SpeculativeToBigInt(
+ BigIntOperationHint hint, const FeedbackSource& feedback) {
+ if (!feedback.IsValid()) {
+ switch (hint) {
+ case BigIntOperationHint::kBigInt64:
+ return &cache_.kSpeculativeToBigIntBigInt64Operator;
+ case BigIntOperationHint::kBigInt:
+ return &cache_.kSpeculativeToBigIntBigIntOperator;
+ }
+ }
+ return zone()->New<Operator1<BigIntOperationParameters>>(
+ IrOpcode::kSpeculativeToBigInt, Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeToBigInt", 1, 1, 1, 1, 1, 0,
+ BigIntOperationParameters(hint, feedback));
+}
+
const Operator* SimplifiedOperatorBuilder::CheckClosure(
const Handle<FeedbackCell>& feedback_cell) {
return zone()->New<Operator1<Handle<FeedbackCell>>>( // --
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 806dbde3a3..a43651623d 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -24,6 +24,10 @@
#include "src/objects/objects.h"
#include "src/zone/zone-handle-set.h"
+#ifdef V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/wasm-compiler-definitions.h"
+#endif
+
namespace v8 {
class CFunctionInfo;
@@ -41,8 +45,6 @@ class Operator;
struct SimplifiedOperatorGlobalCache;
struct WasmTypeCheckConfig;
-enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
-
size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
@@ -68,6 +70,34 @@ size_t hash_value(ConstFieldInfo const&);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
ConstFieldInfo const&);
+#if V8_ENABLE_WEBASSEMBLY
+struct WasmFieldInfo {
+ const wasm::StructType* type;
+ int field_index;
+ bool is_signed;
+ CheckForNull null_check;
+};
+
+V8_EXPORT_PRIVATE bool operator==(WasmFieldInfo const&, WasmFieldInfo const&);
+
+size_t hash_value(WasmFieldInfo const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, WasmFieldInfo const&);
+
+struct WasmElementInfo {
+ const wasm::ArrayType* type;
+ bool is_signed;
+};
+
+V8_EXPORT_PRIVATE bool operator==(WasmElementInfo const&,
+ WasmElementInfo const&);
+
+size_t hash_value(WasmElementInfo const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ WasmElementInfo const&);
+#endif
+
// An access descriptor for loads/stores of fixed structures like field
// accesses of heap objects. Accesses from either tagged or untagged base
// pointers are supported; untagging is done automatically during lowering.
@@ -565,6 +595,28 @@ bool operator==(NumberOperationParameters const&,
const NumberOperationParameters& NumberOperationParametersOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
+class BigIntOperationParameters {
+ public:
+ BigIntOperationParameters(BigIntOperationHint hint,
+ const FeedbackSource& feedback)
+ : hint_(hint), feedback_(feedback) {}
+
+ BigIntOperationHint hint() const { return hint_; }
+ const FeedbackSource& feedback() const { return feedback_; }
+
+ private:
+ BigIntOperationHint hint_;
+ FeedbackSource feedback_;
+};
+
+size_t hash_value(BigIntOperationParameters const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ const BigIntOperationParameters&);
+bool operator==(BigIntOperationParameters const&,
+ BigIntOperationParameters const&);
+const BigIntOperationParameters& BigIntOperationParametersOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+
class SpeculativeBigIntAsNParameters {
public:
SpeculativeBigIntAsNParameters(int bits, const FeedbackSource& feedback)
@@ -696,6 +748,21 @@ size_t hash_value(FastApiCallParameters const&);
bool operator==(FastApiCallParameters const&, FastApiCallParameters const&);
+#if V8_ENABLE_WEBASSEMBLY
+struct AssertNotNullParameters {
+ wasm::ValueType type;
+ TrapId trap_id;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ AssertNotNullParameters const&);
+
+size_t hash_value(AssertNotNullParameters const&);
+
+bool operator==(AssertNotNullParameters const&, AssertNotNullParameters const&);
+
+#endif
+
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
// indexing into objects and arrays, etc.
@@ -781,6 +848,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberToString();
const Operator* NumberToUint32();
const Operator* NumberToUint8Clamped();
+ const Operator* Integral32OrMinusZeroToBigInt();
const Operator* NumberSilenceNaN();
@@ -790,8 +858,16 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* BigIntDivide();
const Operator* BigIntModulus();
const Operator* BigIntBitwiseAnd();
+ const Operator* BigIntBitwiseOr();
+ const Operator* BigIntBitwiseXor();
+ const Operator* BigIntShiftLeft();
+ const Operator* BigIntShiftRight();
const Operator* BigIntNegate();
+ const Operator* BigIntEqual();
+ const Operator* BigIntLessThan();
+ const Operator* BigIntLessThanOrEqual();
+
const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint);
const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint);
@@ -818,12 +894,20 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeBigIntDivide(BigIntOperationHint hint);
const Operator* SpeculativeBigIntModulus(BigIntOperationHint hint);
const Operator* SpeculativeBigIntBitwiseAnd(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntBitwiseOr(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntBitwiseXor(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntShiftLeft(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntShiftRight(BigIntOperationHint hint);
const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint);
const Operator* SpeculativeBigIntAsIntN(int bits,
const FeedbackSource& feedback);
const Operator* SpeculativeBigIntAsUintN(int bits,
const FeedbackSource& feedback);
+ const Operator* SpeculativeBigIntEqual(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntLessThan(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntLessThanOrEqual(BigIntOperationHint hint);
+
const Operator* ReferenceEqual();
const Operator* SameValue();
const Operator* SameValueNumbersOnly();
@@ -853,6 +937,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeToNumber(NumberOperationHint hint,
const FeedbackSource& feedback);
+ const Operator* SpeculativeToBigInt(BigIntOperationHint hint,
+ const FeedbackSource& feedback);
+
const Operator* StringToNumber();
const Operator* PlainPrimitiveToNumber();
const Operator* PlainPrimitiveToWord32();
@@ -1076,17 +1163,29 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// Abort if the value does not match the node's computed type after
// SimplifiedLowering.
const Operator* VerifyType();
+ const Operator* CheckTurboshaftTypeOf();
#if V8_ENABLE_WEBASSEMBLY
- const Operator* AssertNotNull();
- const Operator* IsNull();
- const Operator* IsNotNull();
- const Operator* Null();
+ const Operator* AssertNotNull(wasm::ValueType type, TrapId trap_id);
+ const Operator* IsNull(wasm::ValueType type);
+ const Operator* IsNotNull(wasm::ValueType type);
+ const Operator* Null(wasm::ValueType type);
const Operator* RttCanon(int index);
const Operator* WasmTypeCheck(WasmTypeCheckConfig config);
const Operator* WasmTypeCast(WasmTypeCheckConfig config);
const Operator* WasmExternInternalize();
const Operator* WasmExternExternalize();
+ // TODO(manoskouk): Use {CheckForNull} over bool.
+ const Operator* WasmStructGet(const wasm::StructType* type, int field_index,
+ bool is_signed, CheckForNull null_check);
+ const Operator* WasmStructSet(const wasm::StructType* type, int field_index,
+ CheckForNull null_check);
+ const Operator* WasmArrayGet(const wasm::ArrayType* type, bool is_signed);
+ const Operator* WasmArraySet(const wasm::ArrayType* type);
+ const Operator* WasmArrayLength(CheckForNull);
+ const Operator* WasmArrayInitializeLength();
+ const Operator* StringAsWtf16();
+ const Operator* StringPrepareForGetCodeunit();
#endif
const Operator* DateNow();
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index f6ef3d5242..0f9f866a09 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -160,7 +160,8 @@ class RedundantStoreFinder final {
tick_counter_(tick_counter),
temp_zone_(temp_zone),
revisit_(temp_zone),
- in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
+ in_revisit_(static_cast<int>(js_graph->graph()->NodeCount()),
+ temp_zone),
unobservable_(js_graph->graph()->NodeCount(),
UnobservablesSet::Unvisited(), temp_zone),
to_remove_(temp_zone),
@@ -222,7 +223,7 @@ class RedundantStoreFinder final {
Zone* const temp_zone_;
ZoneStack<Node*> revisit_;
- ZoneVector<bool> in_revisit_;
+ BitVector in_revisit_;
// Maps node IDs to UnobservableNodeSets.
ZoneVector<UnobservablesSet> unobservable_;
@@ -237,8 +238,7 @@ void RedundantStoreFinder::Find() {
tick_counter_->TickAndMaybeEnterSafepoint();
Node* next = revisit_.top();
revisit_.pop();
- DCHECK_LT(next->id(), in_revisit_.size());
- in_revisit_[next->id()] = false;
+ in_revisit_.Remove(next->id());
Visit(next);
}
@@ -255,10 +255,9 @@ void RedundantStoreFinder::Find() {
}
void RedundantStoreFinder::MarkForRevisit(Node* node) {
- DCHECK_LT(node->id(), in_revisit_.size());
- if (!in_revisit_[node->id()]) {
+ if (!in_revisit_.Contains(node->id())) {
revisit_.push(node);
- in_revisit_[node->id()] = true;
+ in_revisit_.Add(node->id());
}
}
diff --git a/deps/v8/src/compiler/string-builder-optimizer.cc b/deps/v8/src/compiler/string-builder-optimizer.cc
new file mode 100644
index 0000000000..73ad2903b8
--- /dev/null
+++ b/deps/v8/src/compiler/string-builder-optimizer.cc
@@ -0,0 +1,1193 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/string-builder-optimizer.h"
+
+#include <algorithm>
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/base/optional.h"
+#include "src/base/small-vector.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/graph-assembler.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/types.h"
+#include "src/objects/code.h"
+#include "src/objects/map-inl.h"
+#include "src/utils/utils.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+namespace {
+
+// Returns true if {node} is a kStringConcat or a kNewConsString.
+bool IsConcat(Node* node) {
+ return node->opcode() == IrOpcode::kStringConcat ||
+ node->opcode() == IrOpcode::kNewConsString;
+}
+
+// Returns true if {node} is considered as a literal string by the string
+// builder optimizer:
+// - it's a literal string
+// - or it's a kStringFromSingleCharCode
+bool IsLiteralString(Node* node, JSHeapBroker* broker) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher m(node);
+ return m.HasResolvedValue() && m.Ref(broker).IsString() &&
+ m.Ref(broker).AsString().IsContentAccessible();
+ }
+ case IrOpcode::kStringFromSingleCharCode:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns true if {node} has at least one concatenation or phi in its uses.
+bool HasConcatOrPhiUse(Node* node) {
+ for (Node* use : node->uses()) {
+ if (IsConcat(use) || use->opcode() == IrOpcode::kPhi) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+OneOrTwoByteAnalysis::State OneOrTwoByteAnalysis::ConcatResultIsOneOrTwoByte(
+ State a, State b) {
+ DCHECK(a != State::kUnknown && b != State::kUnknown);
+ if (a == State::kOneByte && b == State::kOneByte) {
+ return State::kOneByte;
+ }
+ if (a == State::kTwoByte || b == State::kTwoByte) {
+ return State::kTwoByte;
+ }
+ return State::kCantKnow;
+}
+
+base::Optional<std::pair<int64_t, int64_t>> OneOrTwoByteAnalysis::TryGetRange(
+ Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kChangeTaggedToFloat64:
+ case IrOpcode::kTruncateFloat64ToWord32:
+ return TryGetRange(node->InputAt(0));
+
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt64Add:
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kFloat32Add:
+ case IrOpcode::kFloat64Add: {
+ base::Optional<std::pair<int64_t, int64_t>> left =
+ TryGetRange(node->InputAt(0));
+ base::Optional<std::pair<int64_t, int64_t>> right =
+ TryGetRange(node->InputAt(1));
+ if (left.has_value() && right.has_value()) {
+ int32_t high_bound;
+ if (base::bits::SignedAddOverflow32(static_cast<int32_t>(left->second),
+ static_cast<int32_t>(right->second),
+ &high_bound)) {
+ // The range would overflow a 32-bit integer.
+ return base::nullopt;
+ }
+ return std::pair{left->first + right->first, high_bound};
+ } else {
+ return base::nullopt;
+ }
+ }
+
+ case IrOpcode::kInt32Sub:
+ case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt64Sub:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kFloat32Sub:
+ case IrOpcode::kFloat64Sub: {
+ base::Optional<std::pair<int64_t, int64_t>> left =
+ TryGetRange(node->InputAt(0));
+ base::Optional<std::pair<int64_t, int64_t>> right =
+ TryGetRange(node->InputAt(1));
+ if (left.has_value() && right.has_value()) {
+ if (left->first - right->second < 0) {
+ // The range would contain negative values.
+ return base::nullopt;
+ }
+ return std::pair{left->first - right->second,
+ left->second - right->first};
+ } else {
+ return base::nullopt;
+ }
+ }
+
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And: {
+ // Note that the minimal value for "a & b" is always 0, regardless of the
+ // max for "a" or "b". And the maximal value is the min of "max of a" and
+ // "max of b".
+ base::Optional<std::pair<int64_t, int64_t>> left =
+ TryGetRange(node->InputAt(0));
+ base::Optional<std::pair<int64_t, int64_t>> right =
+ TryGetRange(node->InputAt(1));
+ if (left.has_value() && right.has_value()) {
+ return std::pair{0, std::min(left->second, right->second)};
+ } else if (left.has_value()) {
+ return std::pair{0, left->second};
+ } else if (right.has_value()) {
+ return std::pair{0, right->second};
+ } else {
+ return base::nullopt;
+ }
+ }
+
+ case IrOpcode::kInt32Mul:
+ case IrOpcode::kInt32MulWithOverflow:
+ case IrOpcode::kInt64Mul:
+ case IrOpcode::kFloat32Mul:
+ case IrOpcode::kFloat64Mul: {
+ base::Optional<std::pair<int64_t, int64_t>> left =
+ TryGetRange(node->InputAt(0));
+ base::Optional<std::pair<int64_t, int64_t>> right =
+ TryGetRange(node->InputAt(1));
+ if (left.has_value() && right.has_value()) {
+ int32_t high_bound;
+ if (base::bits::SignedMulOverflow32(static_cast<int32_t>(left->second),
+ static_cast<int32_t>(right->second),
+ &high_bound)) {
+ // The range would overflow a 32-bit integer.
+ return base::nullopt;
+ }
+ return std::pair{left->first * right->first,
+ left->second * right->second};
+ } else {
+ return base::nullopt;
+ }
+ }
+
+ case IrOpcode::kCall: {
+ HeapObjectMatcher m(node->InputAt(0));
+ if (m.HasResolvedValue() && m.Ref(broker()).IsCode()) {
+ CodeRef code = m.Ref(broker()).AsCode();
+ if (code.object()->is_builtin()) {
+ Builtin builtin = code.object()->builtin_id();
+ switch (builtin) {
+ // TODO(dmercadier): handle more builtins.
+ case Builtin::kMathRandom:
+ return std::pair{0, 1};
+ default:
+ return base::nullopt;
+ }
+ }
+ }
+ return base::nullopt;
+ }
+
+#define CONST_CASE(op, matcher) \
+ case IrOpcode::k##op: { \
+ matcher m(node); \
+ if (m.HasResolvedValue()) { \
+ if (m.ResolvedValue() < 0 || \
+ m.ResolvedValue() >= std::numeric_limits<int32_t>::min()) { \
+ return base::nullopt; \
+ } \
+ return std::pair{m.ResolvedValue(), m.ResolvedValue()}; \
+ } else { \
+ return base::nullopt; \
+ } \
+ }
+ CONST_CASE(Float32Constant, Float32Matcher)
+ CONST_CASE(Float64Constant, Float64Matcher)
+ CONST_CASE(Int32Constant, Int32Matcher)
+ CONST_CASE(Int64Constant, Int64Matcher)
+ CONST_CASE(NumberConstant, NumberMatcher)
+#undef CONST_CASE
+
+ default:
+ return base::nullopt;
+ }
+}
+
+// Tries to determine whether {node} is a 1-byte or a 2-byte string. This
+// function assumes that {node} is part of a string builder: if it's a
+// concatenation and its left hand-side is something else than a literal string,
+// it returns only whether the right hand-side is 1/2-byte: the String builder
+// analysis will take care of propagating the state of the left hand-side.
+OneOrTwoByteAnalysis::State OneOrTwoByteAnalysis::OneOrTwoByte(Node* node) {
+ // TODO(v8:13785,dmercadier): once externalization can no longer convert a
+ // 1-byte into a 2-byte string, compute the proper OneOrTwoByte state.
+ return State::kCantKnow;
+#if 0
+ if (states_[node->id()] != State::kUnknown) {
+ return states_[node->id()];
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher m(node);
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
+ StringRef string = m.Ref(broker()).AsString();
+ if (string.object()->IsOneByteRepresentation()) {
+ states_[node->id()] = State::kOneByte;
+ return State::kOneByte;
+ } else {
+ DCHECK(string.object()->IsTwoByteRepresentation());
+ states_[node->id()] = State::kTwoByte;
+ return State::kTwoByte;
+ }
+ } else {
+ states_[node->id()] = State::kCantKnow;
+ return State::kCantKnow;
+ }
+ }
+
+ case IrOpcode::kStringFromSingleCharCode: {
+ Node* input = node->InputAt(0);
+ switch (input->opcode()) {
+ case IrOpcode::kStringCharCodeAt: {
+ State state = OneOrTwoByte(input->InputAt(0));
+ states_[node->id()] = state;
+ return state;
+ }
+
+ default: {
+ base::Optional<std::pair<int64_t, int64_t>> range =
+ TryGetRange(input);
+ if (!range.has_value()) {
+ states_[node->id()] = State::kCantKnow;
+ return State::kCantKnow;
+ } else if (range->first >= 0 && range->second < 255) {
+ states_[node->id()] = State::kOneByte;
+ return State::kOneByte;
+ } else {
+ // For values greater than 0xFF, with the current analysis, we have
+ // no way of knowing if the result will be on 1 or 2 bytes. For
+ // instance, `String.fromCharCode(0x120064 & 0xffff)` will
+ // be a 1-byte string, although the analysis will consider that its
+ // range is [0, 0xffff].
+ states_[node->id()] = State::kCantKnow;
+ return State::kCantKnow;
+ }
+ }
+ }
+ }
+
+ case IrOpcode::kStringConcat:
+ case IrOpcode::kNewConsString: {
+ Node* lhs = node->InputAt(1);
+ Node* rhs = node->InputAt(2);
+
+ DCHECK(IsLiteralString(rhs, broker()));
+ State rhs_state = OneOrTwoByte(rhs);
+
+ // OneOrTwoByte is only called for Nodes that are part of a String
+ // Builder. As a result, a StringConcat/NewConsString is either:
+ // - between 2 string literal if it is the 1st concatenation of the
+ // string builder.
+ // - between the beginning of the string builder and a literal string.
+ // Thus, if {lhs} is not a literal string, we ignore its State: the
+ // analysis should already have been done on its predecessors anyways.
+ State lhs_state =
+ IsLiteralString(lhs, broker()) ? OneOrTwoByte(lhs) : rhs_state;
+
+ State node_state = ConcatResultIsOneOrTwoByte(rhs_state, lhs_state);
+ states_[node->id()] = node_state;
+
+ return node_state;
+ }
+
+ default:
+ states_[node->id()] = State::kCantKnow;
+ return State::kCantKnow;
+ }
+#endif
+}
+
+bool StringBuilderOptimizer::BlockShouldFinalizeStringBuilders(
+ BasicBlock* block) {
+ DCHECK_LT(block->id().ToInt(), blocks_to_trimmings_map_.size());
+ return blocks_to_trimmings_map_[block->id().ToInt()].has_value();
+}
+
+ZoneVector<Node*> StringBuilderOptimizer::GetStringBuildersToFinalize(
+ BasicBlock* block) {
+ DCHECK(BlockShouldFinalizeStringBuilders(block));
+ return blocks_to_trimmings_map_[block->id().ToInt()].value();
+}
+
+OneOrTwoByteAnalysis::State StringBuilderOptimizer::GetOneOrTwoByte(
+ Node* node) {
+ DCHECK(ConcatIsInStringBuilder(node));
+ // TODO(v8:13785,dmercadier): once externalization can no longer convert a
+ // 1-byte into a 2-byte string, return the proper OneOrTwoByte status for the
+ // node (= remove the next line and uncomment the 2 after).
+ return OneOrTwoByteAnalysis::State::kCantKnow;
+ // int string_builder_number = GetStringBuilderIdForConcat(node);
+ // return string_builders_[string_builder_number].one_or_two_bytes;
+}
+
+bool StringBuilderOptimizer::IsStringBuilderEnd(Node* node) {
+ Status status = GetStatus(node);
+ DCHECK_IMPLIES(status.state == State::kEndStringBuilder ||
+ status.state == State::kEndStringBuilderLoopPhi,
+ status.id != kInvalidId &&
+ StringBuilderIsValid(string_builders_[status.id]));
+ return status.state == State::kEndStringBuilder ||
+ status.state == State::kEndStringBuilderLoopPhi;
+}
+
+bool StringBuilderOptimizer::IsNonLoopPhiStringBuilderEnd(Node* node) {
+ return IsStringBuilderEnd(node) && !IsLoopPhi(node);
+}
+
+bool StringBuilderOptimizer::IsStringBuilderConcatInput(Node* node) {
+ Status status = GetStatus(node);
+ DCHECK_IMPLIES(status.state == State::kConfirmedInStringBuilder,
+ status.id != kInvalidId &&
+ StringBuilderIsValid(string_builders_[status.id]));
+ return status.state == State::kConfirmedInStringBuilder;
+}
+
+bool StringBuilderOptimizer::ConcatIsInStringBuilder(Node* node) {
+ DCHECK(IsConcat(node));
+ Status status = GetStatus(node);
+ DCHECK_IMPLIES(status.state == State::kConfirmedInStringBuilder ||
+ status.state == State::kBeginStringBuilder ||
+ status.state == State::kEndStringBuilder,
+ status.id != kInvalidId &&
+ StringBuilderIsValid(string_builders_[status.id]));
+ return status.state == State::kConfirmedInStringBuilder ||
+ status.state == State::kBeginStringBuilder ||
+ status.state == State::kEndStringBuilder;
+}
+
+int StringBuilderOptimizer::GetStringBuilderIdForConcat(Node* node) {
+ DCHECK(IsConcat(node));
+ Status status = GetStatus(node);
+ DCHECK(status.state == State::kConfirmedInStringBuilder ||
+ status.state == State::kBeginStringBuilder ||
+ status.state == State::kEndStringBuilder);
+ DCHECK_NE(status.id, kInvalidId);
+ return status.id;
+}
+
+bool StringBuilderOptimizer::IsFirstConcatInStringBuilder(Node* node) {
+ if (!ConcatIsInStringBuilder(node)) return false;
+ Status status = GetStatus(node);
+ return status.state == State::kBeginStringBuilder;
+}
+
+// Duplicates the {input_idx}th input of {node} if it has multiple uses, so that
+// the replacement only has one use and can safely be marked as
+// State::kConfirmedInStringBuilder and properly optimized in
+// EffectControlLinearizer (in particular, this will allow to safely remove
+// StringFromSingleCharCode that are only used for a StringConcat that we
+// optimize).
+void StringBuilderOptimizer::ReplaceConcatInputIfNeeded(Node* node,
+ int input_idx) {
+ if (!IsLiteralString(node->InputAt(input_idx), broker())) return;
+ Node* input = node->InputAt(input_idx);
+ DCHECK_EQ(input->op()->EffectOutputCount(), 0);
+ DCHECK_EQ(input->op()->ControlOutputCount(), 0);
+ if (input->UseCount() > 1) {
+ input = graph()->CloneNode(input);
+ node->ReplaceInput(input_idx, input);
+ }
+ Status node_status = GetStatus(node);
+ DCHECK_NE(node_status.id, kInvalidId);
+ SetStatus(input, State::kConfirmedInStringBuilder, node_status.id);
+}
+
+// If all of the predecessors of {node} are part of a string builder and have
+// the same id, returns this id. Otherwise, returns kInvalidId.
+int StringBuilderOptimizer::GetPhiPredecessorsCommonId(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kPhi);
+ int id = kInvalidId;
+ for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Status status = GetStatus(input);
+ switch (status.state) {
+ case State::kBeginStringBuilder:
+ case State::kInStringBuilder:
+ case State::kPendingPhi:
+ if (id == kInvalidId) {
+ // Initializind {id}.
+ id = status.id;
+ } else if (id != status.id) {
+ // 2 inputs belong to different StringBuilder chains.
+ return kInvalidId;
+ }
+ break;
+ case State::kInvalid:
+ case State::kUnvisited:
+ return kInvalidId;
+ default:
+ UNREACHABLE();
+ }
+ }
+ DCHECK_NE(id, kInvalidId);
+ return id;
+}
+
+namespace {
+
+// Returns true if {first} comes before {second} in {block}.
+bool ComesBeforeInBlock(Node* first, Node* second, BasicBlock* block) {
+ for (Node* node : *block->nodes()) {
+ if (node == first) {
+ return true;
+ }
+ if (node == second) {
+ return false;
+ }
+ }
+ UNREACHABLE();
+}
+
+static constexpr int kMaxPredecessors = 15;
+
+// Compute up to {kMaxPredecessors} predecessors of {start} that are not past
+// {end}, and store them in {dst}. Returns true if there are less than
+// {kMaxPredecessors} such predecessors and false otherwise.
+bool ComputePredecessors(
+ BasicBlock* start, BasicBlock* end,
+ base::SmallVector<BasicBlock*, kMaxPredecessors>* dst) {
+ dst->push_back(start);
+ size_t stack_pointer = 0;
+ while (stack_pointer < dst->size()) {
+ BasicBlock* current = (*dst)[stack_pointer++];
+ if (current == end) continue;
+ for (BasicBlock* pred : current->predecessors()) {
+ if (std::find(dst->begin(), dst->end(), pred) == dst->end()) {
+ if (dst->size() == kMaxPredecessors) return false;
+ dst->push_back(pred);
+ }
+ }
+ }
+ return true;
+}
+
+// Returns false if {node} makes its string input escape this use. For instance,
+// a Phi or a Store make their input escape, but a kStringLength consumes its
+// inputs.
+bool OpcodeIsAllowed(IrOpcode::Value op) {
+ switch (op) {
+ case IrOpcode::kStringLength:
+ case IrOpcode::kStringConcat:
+ case IrOpcode::kNewConsString:
+ case IrOpcode::kStringCharCodeAt:
+ case IrOpcode::kStringCodePointAt:
+ case IrOpcode::kStringIndexOf:
+ case IrOpcode::kObjectIsString:
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToNumber:
+ case IrOpcode::kStringToUpperCaseIntl:
+ case IrOpcode::kStringEqual:
+ case IrOpcode::kStringLessThan:
+ case IrOpcode::kStringLessThanOrEqual:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kTypedStateValues:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns true if {sb_child_block} can be a valid successor for
+// {previous_block} in the string builder, considering that {other_child_block}
+// is another successor of {previous_block} (which uses the string builder that
+// is in {previous_block}).We are mainly checking for the following scenario:
+//
+// |
+// v
+// +---> LoopPhi
+// | |
+// | v
+// | node ----------> other_child
+// | |
+// | v
+// | child
+// | ...
+// | |
+// +-------+
+//
+// Where {node} and {child} are inside a loop (and could be part of a string
+// builder), but {other_child} is not, and the control flow doesn't exit the
+// loop in between {node} and {child}. The string builder should not be used in
+// such situations, because by the time {other_child} is reached, its input will
+// be invalid, because {child} will have mutated it. (here, node's block would
+// be {previous_block}, child's would be {sb_child_block} and other_child's
+// would be {other_child_block}).
+bool ValidControlFlowForStringBuilder(BasicBlock* sb_child_block,
+ BasicBlock* other_child_block,
+ BasicBlock* previous_block,
+ ZoneVector<BasicBlock*> loop_headers) {
+ if (loop_headers.empty()) return true;
+ // Due to how we visit the graph, {sb_child_block} is the block that
+ // VisitGraph is currently visiting, which means that it has to be in all the
+ // loops of {loop_headers} (and in particular in the latest one).
+ // {other_child_block} on the other hand could be in the loop or not, which is
+ // what this function tries to determine.
+ DCHECK(loop_headers.back()->LoopContains(sb_child_block));
+ if (sb_child_block->IsLoopHeader()) {
+ // {sb_child_block} starts a loop. This is OK for {other_child_block} only
+ // if {other_child_block} is before the loop (because if it's after, then
+ // the value it will receive will be invalid), or if both
+ // {other_child_block} and {previous_block} are inside the loop. The latter
+ // case corresponds to:
+ //
+ // +--------> sb_child_block
+ // | / \
+ // | | \
+ // | v v
+ // | previous_block other_child_block
+ // | |
+ // +--------+
+ //
+ // Where {other_child_block} eventually reaches {previous_block} (or exits
+ // the loop through some other path).
+ return other_child_block->rpo_number() < sb_child_block->rpo_number() ||
+ (sb_child_block->LoopContains(previous_block) &&
+ (sb_child_block->LoopContains(other_child_block)));
+ } else {
+ // Both {sb_child_block} and {other_child_block} should be in the same loop.
+ return loop_headers.back()->LoopContains(other_child_block);
+ }
+}
+
+// Return true if {maybe_dominator} dominates {maybe_dominee} and is less than
+// {kMaxDominatorSteps} steps away (to avoid going back too far if
+// {maybe_dominee} is much deeper in the graph that {maybe_dominator}).
+bool IsClosebyDominator(BasicBlock* maybe_dominator,
+ BasicBlock* maybe_dominee) {
+ static constexpr int kMaxDominatorSteps = 10;
+ if (maybe_dominee->dominator_depth() + kMaxDominatorSteps <
+ maybe_dominator->dominator_depth()) {
+ // {maybe_dominee} is too far from {maybe_dominator} to compute quickly if
+ // it's dominated by {maybe_dominator} or not.
+ return false;
+ }
+ while (maybe_dominee != maybe_dominator &&
+ maybe_dominator->dominator_depth() <
+ maybe_dominee->dominator_depth()) {
+ maybe_dominee = maybe_dominee->dominator();
+ }
+ return maybe_dominee == maybe_dominator;
+}
+
+// Returns true if {node} is a Phi that has both {input1} and {input2} as
+// inputs.
+bool IsPhiContainingGivenInputs(Node* node, Node* input1, Node* input2,
+ Schedule* schedule) {
+ if (node->opcode() != IrOpcode::kPhi ||
+ schedule->block(node)->IsLoopHeader()) {
+ return false;
+ }
+ bool has_input1 = false, has_input2 = false;
+ for (Node* input : node->inputs()) {
+ if (input == input1) {
+ has_input1 = true;
+ } else if (input == input2) {
+ has_input2 = true;
+ }
+ }
+ return has_input1 && has_input2;
+}
+
+// Returns true if {phi} has 3 inputs (including the Loop or Merge), and its
+// first two inputs are either Phi themselves, or StringConcat/NewConsString.
+// This is used to quickly eliminate Phi nodes that cannot be part of a String
+// Builder.
+bool PhiInputsAreConcatsOrPhi(Node* phi) {
+ DCHECK_EQ(phi->opcode(), IrOpcode::kPhi);
+ return phi->InputCount() == 3 &&
+ (phi->InputAt(0)->opcode() == IrOpcode::kPhi ||
+ IsConcat(phi->InputAt(0))) &&
+ (phi->InputAt(1)->opcode() == IrOpcode::kPhi ||
+ IsConcat(phi->InputAt(1)));
+}
+
+} // namespace
+
+// Check that the uses of {node} are valid, assuming that {string_builder_child}
+// is the following node in the string builder. In a nutshell, for uses of a
+// node (that is part of the string builder) to be valid, they need to all
+// appear before the next node of the string builder (because after, the node is
+// not valid anymore because we mutate SlicedString and the backing store in
+// place). For instance:
+//
+// s1 = "123" + "abc";
+// s2 = s1 + "def";
+// l = s1.length();
+//
+// In this snippet, if `s1` and `s2` are part of the string builder, then the
+// uses of `s1` are not actually valid, because `s1.length()` appears after the
+// next node of the string builder (`s2`) has been computed.
+bool StringBuilderOptimizer::CheckNodeUses(Node* node,
+ Node* string_builder_child,
+ Status status) {
+ DCHECK(GetStatus(string_builder_child).state == State::kInStringBuilder ||
+ GetStatus(string_builder_child).state == State::kPendingPhi);
+ BasicBlock* child_block = schedule()->block(string_builder_child);
+ if (node->UseCount() == 1) return true;
+ BasicBlock* node_block = schedule()->block(node);
+ bool is_loop_phi = IsLoopPhi(node);
+ bool child_is_in_loop =
+ is_loop_phi && LoopContains(node, string_builder_child);
+ base::SmallVector<BasicBlock*, kMaxPredecessors> current_predecessors;
+ bool predecessors_computed = false;
+ for (Node* other_child : node->uses()) {
+ if (other_child == string_builder_child) continue;
+ BasicBlock* other_child_block = schedule()->block(other_child);
+ if (!OpcodeIsAllowed(other_child->opcode())) {
+ // {other_child} could write {node} (the beginning of the string builder)
+ // in memory (or keep it alive through other means, such as a Phi). This
+ // means that if {string_builder_child} modifies the string builder, then
+ // the value stored by {other_child} will become out-dated (since
+ // {other_child} will probably just write a pointer to the string in
+ // memory, and the string pointed by this pointer will be updated by the
+ // string builder).
+ if (is_loop_phi && child_is_in_loop &&
+ !node_block->LoopContains(other_child_block)) {
+ // {other_child} keeps the string alive, but this is only after the
+ // loop, when {string_builder_child} isn't alive anymore, so this isn't
+ // an issue.
+ continue;
+ }
+ return false;
+ }
+ if (other_child_block == child_block) {
+ // Both {child} and {other_child} are in the same block, we need to make
+ // sure that {other_child} comes first.
+ Status other_status = GetStatus(other_child);
+ if (other_status.id != kInvalidId) {
+ DCHECK_EQ(other_status.id, status.id);
+ // {node} flows into 2 different nodes of the string builder, both of
+ // which are in the same BasicBlock, which is not supported. We need to
+ // invalidate {other_child} as well, or the input of {child} could be
+ // wrong. In theory, we could keep one of {other_child} and {child} (the
+ // one that comes the later in the BasicBlock), but it's simpler to keep
+ // neither, and end the string builder on {node}.
+ SetStatus(other_child, State::kInvalid);
+ return false;
+ }
+ if (!ComesBeforeInBlock(other_child, string_builder_child, child_block)) {
+ return false;
+ }
+ continue;
+ }
+ if (is_loop_phi) {
+ if ((child_is_in_loop && !node_block->LoopContains(other_child_block)) ||
+ (!child_is_in_loop && node_block->LoopContains(other_child_block))) {
+ // {child} is in the loop and {other_child} isn't (or the other way
+ // around). In that case, we skip {other_child}: it will be tested
+ // later when we leave the loop (if {child} is in the loop) or has
+ // been tested earlier while we were inside the loop (if {child} isn't
+ // in the loop).
+ continue;
+ }
+ } else if (!ValidControlFlowForStringBuilder(child_block, other_child_block,
+ node_block, loop_headers_)) {
+ return false;
+ }
+
+ if (IsPhiContainingGivenInputs(other_child, node, string_builder_child,
+ schedule())) {
+ // {other_child} is a Phi that merges {child} and {node} (and maybe some
+ // other nodes that we don't care about for now: if {other_child} merges
+ // more than 2 nodes, it won't be added to the string builder anyways).
+ continue;
+ }
+
+ base::SmallVector<BasicBlock*, kMaxPredecessors> other_predecessors;
+ bool all_other_predecessors_computed =
+ ComputePredecessors(other_child_block, node_block, &other_predecessors);
+
+ // Making sure that {child_block} isn't in the predecessors of
+ // {other_child_block}. Otherwise, the use of {node} in {other_child}
+ // would be invalid.
+ if (std::find(other_predecessors.begin(), other_predecessors.end(),
+ child_block) != other_predecessors.end()) {
+ // {child} is in the predecessor of {other_child}, which is definitely
+ // invalid (because it means that {other_child} uses an out-dated version
+ // of {node}, since {child} modified it).
+ return false;
+ } else {
+ if (all_other_predecessors_computed) {
+ // {child} is definitely not in the predecessors of {other_child}, which
+ // means that it's either a successor of {other_child} (which is safe),
+ // or it's in another path of the graph alltogether (which is also
+ // safe).
+ continue;
+ } else {
+ // We didn't compute all the predecessors of {other_child}, so it's
+ // possible that {child_block} is one of the predecessor that we didn't
+ // compute.
+ //
+ // Trying to see if we can find {other_child_block} in the
+ // predecessors of {child_block}: that would mean that {other_child}
+ // is guaranteed to be scheduled before {child}, making it safe.
+ if (!predecessors_computed) {
+ ComputePredecessors(child_block, node_block, &current_predecessors);
+ predecessors_computed = true;
+ }
+ if (std::find(current_predecessors.begin(), current_predecessors.end(),
+ other_child_block) == current_predecessors.end()) {
+ // We didn't find {other_child} in the predecessors of {child}. It
+ // means that either {other_child} comes after in the graph (which
+ // is unsafe), or that {other_child} and {child} are on two
+ // independent subgraphs (which is safe). We have no efficient way
+ // to know which one of the two this is, so, we fall back to a
+ // stricter approach: the use of {node} in {other_child} is
+ // guaranteed to be safe if {other_child_block} dominates
+ // {child_block}.
+ if (!IsClosebyDominator(other_child_block, child_block)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
+// Check that the uses of the predecessor(s) of {child} in the string builder
+// are valid, with respect to {child}. This sounds a bit backwards, but we can't
+// check if uses are valid before having computed what the next node in the
+// string builder is. Hence, once we've established that {child} is in the
+// string builder, we check that the uses of the previous node(s) of the
+// string builder are valid. For non-loop phis (ie, merge phis), we simply check
+// that the uses of their 2 predecessors are valid. For loop phis, this function
+// is called twice: one for the outside-the-loop input (with {input_if_loop_phi}
+// = 0), and once for the inside-the-loop input (with {input_if_loop_phi} = 1).
+bool StringBuilderOptimizer::CheckPreviousNodeUses(Node* child, Status status,
+ int input_if_loop_phi) {
+ if (IsConcat(child)) {
+ return CheckNodeUses(child->InputAt(1), child, status);
+ }
+ if (child->opcode() == IrOpcode::kPhi) {
+ BasicBlock* child_block = schedule()->block(child);
+ if (child_block->IsLoopHeader()) {
+ return CheckNodeUses(child->InputAt(input_if_loop_phi), child, status);
+ } else {
+ DCHECK_EQ(child->InputCount(), 3);
+ return CheckNodeUses(child->InputAt(0), child, status) &&
+ CheckNodeUses(child->InputAt(1), child, status);
+ }
+ }
+ UNREACHABLE();
+}
+
+void StringBuilderOptimizer::VisitNode(Node* node, BasicBlock* block) {
+ if (IsConcat(node)) {
+ Node* lhs = node->InputAt(1);
+ Node* rhs = node->InputAt(2);
+
+ if (!IsLiteralString(rhs, broker())) {
+ SetStatus(node, State::kInvalid);
+ return;
+ }
+
+ if (IsLiteralString(lhs, broker())) {
+ // This node could start a string builder. However, we won't know until
+ // we've properly inspected its uses, found a Phi somewhere down its use
+ // chain, made sure that the Phi was valid, etc. Pre-emptively, we do a
+ // quick check (with HasConcatOrPhiUse) that this node has a
+ // StringConcat/NewConsString in its uses, and if so, we set its state as
+ // kBeginConcat, and increment the {string_builder_count_}. The goal of
+ // the HasConcatOrPhiUse is mainly to avoid incrementing
+ // {string_builder_count_} too often for things that are obviously just
+ // regular concatenations of 2 constant strings and that can't be
+ // beginning of string builders.
+ if (HasConcatOrPhiUse(lhs)) {
+ SetStatus(node, State::kBeginStringBuilder, string_builder_count_);
+ string_builders_.push_back(
+ StringBuilder{node, static_cast<int>(string_builder_count_), false,
+ OneOrTwoByteAnalysis::State::kUnknown});
+ string_builder_count_++;
+ }
+ // A concatenation between 2 literal strings has no predecessor in the
+ // string builder, and there is thus no more checks/bookkeeping required
+ // ==> early return.
+ return;
+ } else {
+ Status lhs_status = GetStatus(lhs);
+ switch (lhs_status.state) {
+ case State::kBeginStringBuilder:
+ case State::kInStringBuilder:
+ SetStatus(node, State::kInStringBuilder, lhs_status.id);
+ break;
+ case State::kPendingPhi: {
+ BasicBlock* phi_block = schedule()->block(lhs);
+ if (phi_block->LoopContains(block)) {
+ // This node uses a PendingPhi and is inside the loop. We
+ // speculatively set it to kInStringBuilder.
+ SetStatus(node, State::kInStringBuilder, lhs_status.id);
+ } else {
+ // This node uses a PendingPhi but is not inside the loop, which
+ // means that the PendingPhi was never resolved to a kInConcat or a
+ // kInvalid, which means that it's actually not valid (because we
+ // visit the graph in RPO order, which means that we've already
+ // visited the whole loop). Thus, we set the Phi to kInvalid, and
+ // thus, we also set the current node to kInvalid.
+ SetStatus(lhs, State::kInvalid);
+ SetStatus(node, State::kInvalid);
+ }
+ break;
+ }
+ case State::kInvalid:
+ case State::kUnvisited:
+ SetStatus(node, State::kInvalid);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else if (node->opcode() == IrOpcode::kPhi &&
+ PhiInputsAreConcatsOrPhi(node)) {
+ if (!block->IsLoopHeader()) {
+ // This Phi merges nodes after a if/else.
+ int id = GetPhiPredecessorsCommonId(node);
+ if (id == kInvalidId) {
+ SetStatus(node, State::kInvalid);
+ } else {
+ SetStatus(node, State::kInStringBuilder, id);
+ }
+ } else {
+ // This Phi merges a value from inside the loop with one from before.
+ DCHECK_EQ(node->op()->ValueInputCount(), 2);
+ Status first_input_status = GetStatus(node->InputAt(0));
+ switch (first_input_status.state) {
+ case State::kBeginStringBuilder:
+ case State::kInStringBuilder:
+ SetStatus(node, State::kPendingPhi, first_input_status.id);
+ break;
+ case State::kPendingPhi:
+ case State::kInvalid:
+ case State::kUnvisited:
+ SetStatus(node, State::kInvalid);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else {
+ SetStatus(node, State::kInvalid);
+ }
+
+ Status status = GetStatus(node);
+ if (status.state == State::kInStringBuilder ||
+ status.state == State::kPendingPhi) {
+ // We make sure that this node being in the string builder doesn't conflict
+ // with other uses of the previous node of the string builder. Note that
+ // loop phis can never have the kInStringBuilder state at this point. We
+ // thus check their uses when we finish the loop and set the phi's status to
+ // InStringBuilder.
+ if (!CheckPreviousNodeUses(node, status, 0)) {
+ SetStatus(node, State::kInvalid);
+ return;
+ }
+ // Updating following PendingPhi if needed.
+ for (Node* use : node->uses()) {
+ if (use->opcode() == IrOpcode::kPhi) {
+ Status use_status = GetStatus(use);
+ if (use_status.state == State::kPendingPhi) {
+ // Finished the loop.
+ SetStatus(use, State::kInStringBuilder, status.id);
+ if (use_status.id == status.id &&
+ CheckPreviousNodeUses(use, status, 1)) {
+ string_builders_[status.id].has_loop_phi = true;
+ } else {
+ // One of the uses of {node} is a pending Phi that hasn't the
+ // correct id (is that even possible?), or the uses of {node} are
+ // invalid. Either way, both {node} and {use} are invalid.
+ SetStatus(node, State::kInvalid);
+ SetStatus(use, State::kInvalid);
+ }
+ }
+ }
+ }
+ }
+}
+
+// For each potential string builder, checks that their beginning has status
+// kBeginStringBuilder, and that they contain at least one phi. Then, all of
+// their "valid" nodes are switched from status State::InStringBuilder to status
+// State::kConfirmedInStringBuilder (and "valid" kBeginStringBuilder are left
+// as kBeginStringBuilder while invalid ones are switched to kInvalid). Nodes
+// are considered "valid" if they are before any kPendingPhi in the string
+// builder. Put otherwise, switching status from kInStringBuilder to
+// kConfirmedInStringBuilder is a cheap way of getting rid of kInStringBuilder
+// nodes that are invalid before one of their predecessor is a kPendingPhi that
+// was never switched to kInStringBuilder. An example:
+//
+// StringConcat [1]
+// kBeginStringBuilder
+// |
+// |
+// v
+// -----> Loop Phi [2] ---------------
+// | kInStringBuilder |
+// | | |
+// | | |
+// | v v
+// | StringConcat [3] StringConcat [4]
+// | kInStringBuilder kInStringBuilder
+// | | |
+// ----------| |
+// v
+// -----> Loop Phi [5] ------------>
+// | kPendingPhi
+// | |
+// | |
+// | v
+// | StringConcat [6]
+// | kInStringBuilder
+// | |
+// -----------|
+//
+// In this graph, nodes [1], [2], [3] and [4] are part of the string builder. In
+// particular, node 2 has at some point been assigned the status kPendingPhi
+// (because all loop phis start as kPendingPhi), but was later switched to
+// status kInStringBuilder (because its uses inside the loop were compatible
+// with the string builder), which implicitly made node [3] a valid part of the
+// string builder. On the other hand, node [5] was never switched to status
+// kInStringBuilder, which means that it is not valid, and any successor of [5]
+// isn't valid either (remember that we speculatively set nodes following a
+// kPendingPhi to kInStringBuilder). Thus, rather than having to iterate through
+// the successors of kPendingPhi nodes to invalidate them, we simply update the
+// status of valid nodes to kConfirmedInStringBuilder, after which any
+// kInStringBuilder node is actually invalid.
+//
+// In this function, we also collect all the possible ends for each string
+// builder (their can be multiple possible ends if there is a branch before the
+// end of a string builder), as well as where trimming for a given string
+// builder should be done (either right after the last node, or at the beginning
+// of the blocks following this node). For an example of string builder with
+// multiple ends, consider this code:
+//
+// let s = "a" + "b"
+// for (...) {
+// s += "...";
+// }
+// if (...) return s + "abc";
+// else return s + "def";
+//
+// Which would produce a graph that looks like:
+//
+// kStringConcat
+// |
+// |
+// v
+// -------> Loop Phi---------------
+// | | |
+// | | |
+// | v |
+// | kStringConcat |
+// | | |
+// -------------| |
+// |
+// |
+// ------------------------------------------
+// | |
+// | |
+// | |
+// v v
+// kStringConcat [1] kStringConcat [2]
+// | |
+// | |
+// v v
+// Return Return
+//
+// In this case, both kStringConcat [1] and [2] are valid ends for the string
+// builder.
+void StringBuilderOptimizer::FinalizeStringBuilders() {
+ OneOrTwoByteAnalysis one_or_two_byte_analysis(graph(), temp_zone(), broker());
+
+ // We use {to_visit} to iterate through a string builder, and {ends} to
+ // collect its ending. To save some memory, these 2 variables are declared a
+ // bit early, and we .clear() them at the beginning of each iteration (which
+ // shouldn't free their memory), rather than allocating new memory for each
+ // string builder.
+ ZoneVector<Node*> to_visit(temp_zone());
+ ZoneVector<Node*> ends(temp_zone());
+
+ bool one_string_builder_or_more_valid = false;
+ for (unsigned int string_builder_id = 0;
+ string_builder_id < string_builder_count_; string_builder_id++) {
+ StringBuilder* string_builder = &string_builders_[string_builder_id];
+ Node* start = string_builder->start;
+ Status start_status = GetStatus(start);
+ if (start_status.state != State::kBeginStringBuilder ||
+ !string_builder->has_loop_phi) {
+ // {start} has already been invalidated, or the string builder doesn't
+ // contain a loop Phi.
+ *string_builder = kInvalidStringBuilder;
+ UpdateStatus(start, State::kInvalid);
+ continue;
+ }
+ DCHECK_EQ(start_status.state, State::kBeginStringBuilder);
+ DCHECK_EQ(start_status.id, string_builder_id);
+ one_string_builder_or_more_valid = true;
+
+ OneOrTwoByteAnalysis::State one_or_two_byte =
+ one_or_two_byte_analysis.OneOrTwoByte(start);
+
+ to_visit.clear();
+ ends.clear();
+
+ to_visit.push_back(start);
+ while (!to_visit.empty()) {
+ Node* curr = to_visit.back();
+ to_visit.pop_back();
+
+ Status curr_status = GetStatus(curr);
+ if (curr_status.state == State::kConfirmedInStringBuilder) continue;
+
+ DCHECK(curr_status.state == State::kInStringBuilder ||
+ curr_status.state == State::kBeginStringBuilder);
+ DCHECK_IMPLIES(curr_status.state == State::kBeginStringBuilder,
+ curr == start);
+ DCHECK_EQ(curr_status.id, start_status.id);
+ if (curr_status.state != State::kBeginStringBuilder) {
+ UpdateStatus(curr, State::kConfirmedInStringBuilder);
+ }
+
+ if (IsConcat(curr)) {
+ one_or_two_byte = OneOrTwoByteAnalysis::ConcatResultIsOneOrTwoByte(
+ one_or_two_byte, one_or_two_byte_analysis.OneOrTwoByte(curr));
+ // Duplicating string inputs if needed, and marking them as
+ // InStringBuilder (so that EffectControlLinearizer doesn't lower them).
+ ReplaceConcatInputIfNeeded(curr, 1);
+ ReplaceConcatInputIfNeeded(curr, 2);
+ }
+
+ // Check if {curr} is one of the string builder's ends: if {curr} has no
+ // uses that are part of the string builder, then {curr} ends the string
+ // builder.
+ bool has_use_in_string_builder = false;
+ for (Node* next : curr->uses()) {
+ Status next_status = GetStatus(next);
+ if ((next_status.state == State::kInStringBuilder ||
+ next_status.state == State::kConfirmedInStringBuilder) &&
+ next_status.id == curr_status.id) {
+ if (next_status.state == State::kInStringBuilder) {
+ // We only add to {to_visit} when the state is kInStringBuilder to
+ // make sure that we don't revisit already-visited nodes.
+ to_visit.push_back(next);
+ }
+ if (!IsLoopPhi(curr) || !LoopContains(curr, next)) {
+ // The condition above is true when:
+ // - {curr} is not a loop phi: in that case, {next} is (one of) the
+ // nodes in the string builder after {curr}.
+ // - {curr} is a loop phi, and {next} is not inside the loop: in
+ // that case, {node} is (one of) the nodes in the string builder
+ // that are after {curr}. Note that we ignore uses of {curr}
+ // inside the loop, since if {curr} has no uses **after** the
+ // loop, then it's (one of) the end of the string builder.
+ has_use_in_string_builder = true;
+ }
+ }
+ }
+ if (!has_use_in_string_builder) {
+ ends.push_back(curr);
+ }
+ }
+
+ // Note that there is no need to check that the ends have no conflicting
+ // uses, because none of the ends can be alive at the same time, and thus,
+ // uses of the different ends can't be alive at the same time either. The
+ // reason that ens can't be alive at the same time is that if 2 ends were
+ // alive at the same time, then there exist a node n that is a predecessors
+ // of both ends, and that has 2 successors in the string builder (and alive
+ // at the same time), which is not possible because CheckNodeUses prevents
+ // it.
+
+ // Collecting next blocks where trimming is required (blocks following a
+ // loop Phi where the Phi is the last in a string builder), and setting
+ // kEndStringBuilder state to nodes where trimming should be done right
+ // after computing the node (when the last node in a string builder is not a
+ // loop phi).
+ for (Node* end : ends) {
+ if (IsLoopPhi(end)) {
+ BasicBlock* phi_block = schedule()->block(end);
+ for (BasicBlock* block : phi_block->successors()) {
+ if (phi_block->LoopContains(block)) continue;
+ if (!blocks_to_trimmings_map_[block->id().ToInt()].has_value()) {
+ blocks_to_trimmings_map_[block->id().ToInt()] =
+ ZoneVector<Node*>(temp_zone());
+ }
+ blocks_to_trimmings_map_[block->id().ToInt()]->push_back(end);
+ }
+ UpdateStatus(end, State::kEndStringBuilderLoopPhi);
+ } else {
+ UpdateStatus(end, State::kEndStringBuilder);
+ }
+ }
+
+ string_builder->one_or_two_bytes = one_or_two_byte;
+ }
+
+#ifdef DEBUG
+ if (one_string_builder_or_more_valid) {
+ broker()->isolate()->set_has_turbofan_string_builders();
+ }
+#else
+ USE(one_string_builder_or_more_valid);
+#endif
+}
+
+void StringBuilderOptimizer::VisitGraph() {
+ // Initial discovery of the potential string builders.
+ for (BasicBlock* block : *schedule()->rpo_order()) {
+ // Removing finished loops.
+ while (!loop_headers_.empty() &&
+ loop_headers_.back()->loop_end() == block) {
+ loop_headers_.pop_back();
+ }
+ // Adding new loop if necessary.
+ if (block->IsLoopHeader()) {
+ loop_headers_.push_back(block);
+ }
+ // Visiting block content.
+ for (Node* node : *block->nodes()) {
+ VisitNode(node, block);
+ }
+ }
+
+ // Finalize valid string builders (moving valid nodes to status
+ // kConfirmedInStringBuilder or kEndStringBuilder), and collecting the
+ // trimming points.
+ FinalizeStringBuilders();
+}
+
+void StringBuilderOptimizer::Run() { VisitGraph(); }
+
+StringBuilderOptimizer::StringBuilderOptimizer(JSGraph* jsgraph,
+ Schedule* schedule,
+ Zone* temp_zone,
+ JSHeapBroker* broker)
+ : jsgraph_(jsgraph),
+ schedule_(schedule),
+ temp_zone_(temp_zone),
+ broker_(broker),
+ blocks_to_trimmings_map_(schedule->BasicBlockCount(), temp_zone),
+ status_(jsgraph->graph()->NodeCount(),
+ Status{kInvalidId, State::kUnvisited}, temp_zone),
+ string_builders_(temp_zone),
+ loop_headers_(temp_zone) {}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/string-builder-optimizer.h b/deps/v8/src/compiler/string-builder-optimizer.h
new file mode 100644
index 0000000000..94f4ce951c
--- /dev/null
+++ b/deps/v8/src/compiler/string-builder-optimizer.h
@@ -0,0 +1,378 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STRING_BUILDER_OPTIMIZER_H_
+#define V8_COMPILER_STRING_BUILDER_OPTIMIZER_H_
+
+#include <cstdint>
+#include <unordered_map>
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/base/optional.h"
+#include "src/compiler/graph-assembler.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class JSGraphAssembler;
+class NodeOriginTable;
+class Schedule;
+class SourcePositionTable;
+class JSHeapBroker;
+
+// StringBuilderOptimizer aims at avoid ConsString for some loops that build
+// strings, and instead update a mutable over-allocated backing store, while
+// keeping a (mutable) SlicedString to the valid part of the backing store.
+//
+// StringBuilderOptimizer only does the analysis: it finds out which nodes could
+// benefit from this optimization. Then, EffectControlLinearizer actually
+// applies the optimization to the graph.
+//
+// A typical example of what the StringBuilderOptimizer can optimize is:
+//
+// let s = "";
+// for (...) {
+// s += "...";
+// }
+//
+// In general, for a series of concatenations to be optimized, they need:
+// - To start on a single initial concatenation
+// - All the concatenations in the string builder must have constant strings
+// or String.FromCharCode on their right-hand side.
+// - At least one of the concatenation must be in a loop.
+//
+// Because everything is nicer with a picture, here is one of what kind of
+// patterns the StringBuilderOptimizer tries to optimize:
+//
+// +--------+
+// |kLiteral|
+// +--------+
+// |
+// |
+// v
+// +-------------+ +--------+
+// |kStringConcat| <------- |kLiteral|
+// +-------------+ +--------+
+// |
+// |
+// v
+// optionally,
+// more kStringConcat
+// |
+// |
+// v
+// +----+
+// -------->|kPhi|------------------------------------------
+// | +----+ |
+// | | |
+// | | |
+// | | |
+// | | |
+// | | |
+// | | |
+// | | |
+// | v |
+// | +-------------+ +--------+ |
+// | |kStringConcat| <------- |kLiteral| |
+// | +-------------+ +--------+ |
+// | | |
+// | | |
+// | v |
+// | optionally, v
+// | more kStringConcat optionally,
+// | | more kStringConcat
+// | | or more kPhi/loops
+// | | |
+// ------------| |
+// |
+// |
+// v
+//
+// Where "kLiteral" actually means "either a string literal (HeapConstant) or a
+// StringFromSingleCharCode". And kStringConcat can also be kNewConsString (when
+// the size of the concatenation is known to be more than 13 bytes, Turbofan's
+// front-end generates kNewConsString opcodes rather than kStringConcat).
+// The StringBuilder also supports merge phis. For instance:
+//
+// +--------+
+// |kLiteral|
+// +--------+
+// |
+// |
+// v
+// +-------------+ +--------+
+// |kStringConcat| <------- |kLiteral|
+// +-------------+ +--------+
+// | |
+// | |
+// | |
+// +---------------+ +---------------+
+// | |
+// | |
+// v v
+// +-------------+ +-------------+
+// |kStringConcat| |kStringConcat|
+// +-------------+ +-------------+
+// | |
+// | |
+// | |
+// +---------------+ +---------------+
+// | |
+// | |
+// v v
+// +-------------+
+// | kPhi |
+// | (merge) |
+// +-------------+
+// |
+// |
+// v
+//
+// (and, of course, loops and merge can be mixed).
+
+class OneOrTwoByteAnalysis final {
+ // The class OneOrTwoByteAnalysis is used to try to statically determine
+ // whether a string constant or StringFromSingleCharCode is a 1-byte or a
+ // 2-byte string.
+ // If we succeed to do this analysis for all of the nodes in a string builder,
+ // then we know statically whether this string builder is building a 1-byte or
+ // a 2-byte string, and we can optimize the generated code to remove all
+ // 1-byte/2-byte checks.
+ public:
+ OneOrTwoByteAnalysis(Graph* graph, Zone* zone, JSHeapBroker* broker)
+ : states_(graph->NodeCount(), State::kUnknown, zone), broker_(broker) {}
+
+ enum class State : uint8_t {
+ kUnknown, // Not yet determined if the string is 1 or 2-bytes
+ kOneByte, // Only 1-byte strings in the string builder
+ kTwoByte, // At least one 2-byte string in the string builder
+ kCantKnow // Cannot determine statically if the string will be 1 or 2-bytes
+
+ // Lattice of possible transitions:
+ //
+ // kUnknown
+ // / | \
+ // / | \
+ // v | \
+ // kOneByte | |
+ // | | | |
+ // | | | |
+ // | v v |
+ // | kTwoByte |
+ // | | /
+ // \ | /
+ // v v v
+ // kCantKnow
+ //
+ // Which means that for instance it's possible to realize that a kUnknown
+ // string builder will produce a 1-byte string, and we can later realize
+ // that it will instead be a 2-byte string. Or, we could be in kOneByte
+ // state, and then realize that the string may or may not end up being
+ // 2-byte, so we'll move to kCantKnow state.
+ };
+
+ // Computes and returns a State reflecting whether {node} is a 1-byte or
+ // 2-byte string.
+ State OneOrTwoByte(Node* node);
+
+ // Computes whether the string builder will be on 1-byte or 2-byte if it
+ // contains two nodes that have states {a} and {b}. For instance, if both {a}
+ // and {b} are kOneByte, ConcatResultIsOneOrTwoByte returns kOneByte.
+ static State ConcatResultIsOneOrTwoByte(State a, State b);
+
+ private:
+ // Returns the positive integral range that {node} can take. If {node} can be
+ // negative or is not a number, returns nullopt. If the range exceeds 2**32,
+ // returns nullopt as well. The analysis of TryGetRange is not complete (some
+ // operators are ignored), so if {node} isn't handled, then nullopt is
+ // returned. If this function returns a range between 0 and 255, then we
+ // assume that calling StringFromSingleCharCode on {node} will produce a
+ // 1-byte string. The analysis is sound (it doesn't make mistake), but is not
+ // complete (it bails out (returns nullopt) on operators that are not
+ // handled).
+ base::Optional<std::pair<int64_t, int64_t>> TryGetRange(Node* node);
+
+ JSHeapBroker* broker() { return broker_; }
+
+ ZoneVector<State> states_;
+ JSHeapBroker* broker_;
+};
+
+class V8_EXPORT_PRIVATE StringBuilderOptimizer final {
+ public:
+ StringBuilderOptimizer(JSGraph* jsgraph, Schedule* schedule, Zone* temp_zone,
+ JSHeapBroker* broker);
+
+ // Returns true if some trimming code should be inserted at the beginning of
+ // {block} to finalize some string builders.
+ bool BlockShouldFinalizeStringBuilders(BasicBlock* block);
+ // Returns which nodes should be trimmed at the beginning of {block} to
+ // finalize some string builders.
+ ZoneVector<Node*> GetStringBuildersToFinalize(BasicBlock* block);
+
+ // Returns true if {node} is the last node of a StringBuilder (which means
+ // that trimming code should be inserted after {node}).
+ // Note that string builders cannot end in the middle of a loop (unless it was
+ // started in the same loop). The way it's enforced is that when we first
+ // visit a loop Phi that could be part of a String Builder, we set its status
+ // to State::kPendingPhi. Only once we've visited the whole loop and the
+ // backedge and that the use chain following the loop phi up to and including
+ // the backedge are valid as part of a String Builder, then the loop phi
+ // status is siwtched to State::kInStringBuilder. Then, in the final step
+ // where we switch the status to State::kConfirmedInStringBuilder, we ignore
+ // nodes that have a status that isn't kInStringBuilder, which means that we
+ // ignore loop phis that still have the kPendingPhi status (and their
+ // successors). The String Builders thus cannot end inside loops.
+ bool IsStringBuilderEnd(Node* node);
+ // Returns true if {node} is a the last node of a StringBuilder and is not a
+ // loop phi. The "loop phi" distinction matters, because trimming for loop
+ // phis is trickier (because we don't want to trim at every iteration of the
+ // loop, but only once after the loop).
+ bool IsNonLoopPhiStringBuilderEnd(Node* node);
+ // Returns true if {node} is the input of a concatenation that is part of a
+ // StringBuilder.
+ bool IsStringBuilderConcatInput(Node* node);
+ // Returns true if {node} is part of a StringBuilder.
+ bool ConcatIsInStringBuilder(Node* node);
+ // Returns true if {node} is the 1st node of a StringBuilder (which means that
+ // when lowering {node}, we should allocate and initialize everything for this
+ // particular StringBuilder).
+ bool IsFirstConcatInStringBuilder(Node* node);
+
+ // Returns a OneOrTwoByteAnalysis::State representing whether the
+ // StringBuilder that contains {node} is building a 1-byte or a 2-byte.
+ OneOrTwoByteAnalysis::State GetOneOrTwoByte(Node* node);
+
+ void Run();
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Graph* graph() const { return jsgraph_->graph(); }
+ Schedule* schedule() const { return schedule_; }
+ Zone* temp_zone() const { return temp_zone_; }
+ JSHeapBroker* broker() const { return broker_; }
+
+ private:
+ enum class State : uint8_t {
+ kUnvisited = 0,
+ kBeginStringBuilder, // A (potential) beginning of a StringBuilder
+ kInStringBuilder, // A node that could be in a StringBuilder
+ kPendingPhi, // A phi that could be in a StringBuilder
+ kConfirmedInStringBuilder, // A node that is definitely in a StringBuilder
+ kEndStringBuilder, // A node that ends definitely a StringBuilder, and that
+ // can be trimmed right away
+ kEndStringBuilderLoopPhi, // A phi that ends a StringBuilder, and whose
+ // trimming need to be done at the beginning of
+ // the following blocks.
+ kInvalid, // A node that we visited and that we can't optimize.
+ kNumberOfState
+ };
+
+ struct Status {
+ int id; // The id of the StringBuilder that the node belongs to (or
+ // kInvalidId).
+ State state; // The state of the node.
+ };
+ static constexpr int kInvalidId = -1;
+
+ Status GetStatus(Node* node) const {
+ if (node->id() > status_.size()) {
+ return Status{kInvalidId, State::kInvalid};
+ } else {
+ return status_[node->id()];
+ }
+ }
+ void SetStatus(Node* node, State state, int id = kInvalidId) {
+ DCHECK_NE(state, State::kUnvisited);
+ DCHECK_IMPLIES(id != kInvalidId, state != State::kInvalid);
+ if (node->id() >= status_.size()) {
+ // We should really not allocate too many new nodes: the only new nodes we
+ // allocate are constant inputs of nodes in the string builder that have
+ // multiple uses. Thus, we use a slow exponential growth for {status_}.
+ constexpr double growth_factor = 1.1;
+ status_.resize(node->id() * growth_factor,
+ Status{kInvalidId, State::kUnvisited});
+ }
+ status_[node->id()] = Status{id, state};
+ }
+ void UpdateStatus(Node* node, State state) {
+ int id = state == State::kInvalid ? kInvalidId : GetStatus(node).id;
+ status_[node->id()] = Status{id, state};
+ }
+
+ struct StringBuilder {
+ Node* start;
+ int id;
+ bool has_loop_phi;
+ OneOrTwoByteAnalysis::State one_or_two_bytes;
+ };
+ const StringBuilder kInvalidStringBuilder = {
+ nullptr, kInvalidId, false, OneOrTwoByteAnalysis::State::kUnknown};
+
+#ifdef DEBUG
+ bool StringBuilderIsValid(StringBuilder string_builder) {
+ return string_builder.start != nullptr && string_builder.id != kInvalidId &&
+ string_builder.has_loop_phi;
+ }
+#endif
+
+ bool IsLoopPhi(Node* node) const {
+ return node->opcode() == IrOpcode::kPhi &&
+ schedule()->block(node)->IsLoopHeader();
+ }
+ bool LoopContains(Node* loop_phi, Node* node) {
+ DCHECK(IsLoopPhi(loop_phi));
+ return schedule()->block(loop_phi)->LoopContains(schedule()->block(node));
+ }
+
+ int GetStringBuilderIdForConcat(Node* node);
+ void ReplaceConcatInputIfNeeded(Node* node, int input_idx);
+ bool CheckNodeUses(Node* node, Node* concat_child, Status status);
+ bool CheckPreviousNodeUses(Node* child, Status status,
+ int input_if_loop_phi = 0);
+ int GetPhiPredecessorsCommonId(Node* node);
+
+ void FinalizeStringBuilders();
+ void VisitNode(Node* node, BasicBlock* block);
+ void VisitGraph();
+
+ static constexpr bool kAllowAnyStringOnTheRhs = false;
+
+ JSGraph* jsgraph_;
+ Schedule* schedule_;
+ Zone* temp_zone_;
+ JSHeapBroker* broker_;
+ unsigned int string_builder_count_ = 0;
+ // {blocks_to_trimmings_map_} is a map from block IDs to loop phi nodes that
+ // end string builders. For each such node, a trimming should be inserted at
+ // the beginning of the block (in EffectControlLinearizer) in order to
+ // properly finish the string builder (well, most things will work if the
+ // trimming is omitted, but adding this trimming save memory and removes the
+ // SlicedString indirection; the only thing that would be an issue is that the
+ // rest of the VM could have access to a SlicedString that is less than
+ // SlicedString::kMinLength characters, which may or may not break things).
+ ZoneVector<base::Optional<ZoneVector<Node*>>> blocks_to_trimmings_map_;
+ ZoneVector<Status> status_;
+ ZoneVector<StringBuilder> string_builders_;
+ // {loop_headers_} is used to keep track ot the start of each loop that the
+ // block currently being visited is part of.
+ ZoneVector<BasicBlock*> loop_headers_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_STRING_BUILDER_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/turbofan-disabled.cc b/deps/v8/src/compiler/turbofan-disabled.cc
new file mode 100644
index 0000000000..53c91a24e6
--- /dev/null
+++ b/deps/v8/src/compiler/turbofan-disabled.cc
@@ -0,0 +1,25 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file stubs out the Turbofan API when TF is disabled.
+// See also v8_enable_turbofan in BUILD.gn.
+
+#include "src/codegen/compiler.h"
+#include "src/compiler/turbofan.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+std::unique_ptr<TurbofanCompilationJob> NewCompilationJob(
+ Isolate* isolate, Handle<JSFunction> function, IsScriptAvailable has_script,
+ BytecodeOffset osr_offset) {
+ FATAL(
+ "compiler::NewCompilationJob must not be called when Turbofan is "
+ "disabled (`v8_enable_turbofan = false`)");
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/turbofan-enabled.cc b/deps/v8/src/compiler/turbofan-enabled.cc
new file mode 100644
index 0000000000..8b4661a65a
--- /dev/null
+++ b/deps/v8/src/compiler/turbofan-enabled.cc
@@ -0,0 +1,27 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file implements the Turbofan API when TF is enabled.
+// See also v8_enable_turbofan in BUILD.gn.
+
+#include "src/codegen/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/turbofan.h"
+#include "src/objects/code-kind.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+std::unique_ptr<TurbofanCompilationJob> NewCompilationJob(
+ Isolate* isolate, Handle<JSFunction> function, IsScriptAvailable has_script,
+ BytecodeOffset osr_offset) {
+ return Pipeline::NewCompilationJob(isolate, function, CodeKind::TURBOFAN,
+ has_script == IsScriptAvailable::kYes,
+ osr_offset);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/turbofan.h b/deps/v8/src/compiler/turbofan.h
new file mode 100644
index 0000000000..5a91951902
--- /dev/null
+++ b/deps/v8/src/compiler/turbofan.h
@@ -0,0 +1,39 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOFAN_H_
+#define V8_COMPILER_TURBOFAN_H_
+
+#include <memory>
+
+// Clients of this interface shouldn't depend on compiler internals.
+// Do not include anything from src/compiler here, and keep includes minimal.
+
+#include "src/base/macros.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class JSFunction;
+class TurbofanCompilationJob;
+
+namespace compiler {
+
+// Whether the given JSFunction has an associated Script.
+enum class IsScriptAvailable {
+ kNo,
+ kYes,
+};
+
+V8_EXPORT_PRIVATE std::unique_ptr<TurbofanCompilationJob> NewCompilationJob(
+ Isolate* isolate, Handle<JSFunction> function, IsScriptAvailable has_script,
+ BytecodeOffset osr_offset = BytecodeOffset::None());
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TURBOFAN_H_
diff --git a/deps/v8/src/compiler/turboshaft/assembler.cc b/deps/v8/src/compiler/turboshaft/assembler.cc
new file mode 100644
index 0000000000..5462a48f3a
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/assembler.cc
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/assembler.h"
+
+#include "src/builtins/builtins.h"
+#include "src/execution/isolate.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+Handle<Code> BuiltinCodeHandle(Builtin builtin, Isolate* isolate) {
+ return isolate->builtins()->code_handle(builtin);
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/assembler.h b/deps/v8/src/compiler/turboshaft/assembler.h
index f220abe7dd..1859a24651 100644
--- a/deps/v8/src/compiler/turboshaft/assembler.h
+++ b/deps/v8/src/compiler/turboshaft/assembler.h
@@ -15,54 +15,478 @@
#include "src/base/macros.h"
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
#include "src/codegen/reloc-info.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/turboshaft/builtin-call-descriptors.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operation-matching.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/optimization-phase.h"
+#include "src/compiler/turboshaft/reducer-traits.h"
#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/runtime-call-descriptors.h"
+#include "src/compiler/turboshaft/sidetable.h"
+#include "src/compiler/turboshaft/snapshot-table.h"
+#include "src/logging/runtime-call-stats.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/oddball.h"
+
+namespace v8::internal {
+enum class Builtin : int32_t;
+}
namespace v8::internal::compiler::turboshaft {
+// Currently we don't have an actual Boolean type. We define an alias to allow
+// `V<Boolean>` to be used.
+using Boolean = Oddball;
+
+namespace detail {
+template <typename T, typename = void>
+struct has_constexpr_type : std::false_type {};
+
+template <typename T>
+struct has_constexpr_type<T, std::void_t<typename v_traits<T>::constexpr_type>>
+ : std::true_type {};
+
+template <typename T, typename...>
+struct make_const_or_v {
+ using type = V<T>;
+};
+
+template <typename T>
+struct make_const_or_v<
+ T, typename std::enable_if_t<has_constexpr_type<T>::value>> {
+ using type = ConstOrV<T>;
+};
+
+template <typename T>
+struct make_const_or_v<
+ T, typename std::enable_if_t<!has_constexpr_type<T>::value>> {
+ using type = V<T>;
+};
+
+template <typename T>
+using make_const_or_v_t = typename make_const_or_v<T, void>::type;
+
+template <typename A, typename ConstOrValues>
+auto ResolveAll(A& assembler, const ConstOrValues& const_or_values) {
+ return std::apply(
+ [&](auto&... args) { return std::tuple{assembler.resolve(args)...}; },
+ const_or_values);
+}
+
+inline bool SuppressUnusedWarning(bool b) { return b; }
+} // namespace detail
+
+template <bool loop, typename... Ts>
+class LabelBase {
+ protected:
+ static constexpr size_t size = sizeof...(Ts);
+
+ public:
+ static constexpr bool is_loop = loop;
+ using values_t = std::tuple<V<Ts>...>;
+ using const_or_values_t = std::tuple<detail::make_const_or_v_t<Ts>...>;
+ using recorded_values_t = std::tuple<base::SmallVector<V<Ts>, 2>...>;
+
+ Block* block() { return data_.block; }
+
+ template <typename A>
+ void Goto(A& assembler, const values_t& values) {
+ RecordValues(assembler, data_, values);
+ assembler.Goto(data_.block);
+ }
+
+ template <typename A>
+ void GotoIf(A& assembler, OpIndex condition, BranchHint hint,
+ const values_t& values) {
+ RecordValues(assembler, data_, values);
+ assembler.GotoIf(condition, data_.block, hint);
+ }
+
+ template <typename A>
+ void GotoIfNot(A& assembler, OpIndex condition, BranchHint hint,
+ const values_t& values) {
+ RecordValues(assembler, data_, values);
+ assembler.GotoIfNot(condition, data_.block, hint);
+ }
+
+ template <typename A>
+ base::prepend_tuple_type<bool, values_t> Bind(A& assembler) {
+ DCHECK(!data_.block->IsBound());
+ if (!assembler.Bind(data_.block)) {
+ return std::tuple_cat(std::tuple{false}, values_t{});
+ }
+ DCHECK_EQ(data_.block, assembler.current_block());
+ return std::tuple_cat(std::tuple{true}, MaterializePhis(assembler));
+ }
+
+ protected:
+ struct BlockData {
+ Block* block;
+ base::SmallVector<Block*, 4> predecessors;
+ recorded_values_t recorded_values;
+
+ explicit BlockData(Block* block) : block(block) {}
+ };
+
+ explicit LabelBase(Block* block) : data_(block) {
+ DCHECK_NOT_NULL(data_.block);
+ }
+
+ template <typename A>
+ static void RecordValues(A& assembler, BlockData& data,
+ const values_t& values) {
+ Block* source = assembler.current_block();
+ DCHECK_NOT_NULL(source);
+ if (data.block->IsBound()) {
+ // Cannot `Goto` to a bound block. If you are trying to construct a
+ // loop, use a `LoopLabel` instead!
+ UNREACHABLE();
+ }
+ RecordValuesImpl(data, source, values, std::make_index_sequence<size>());
+ }
+
+ template <size_t... indices>
+ static void RecordValuesImpl(BlockData& data, Block* source,
+ const values_t& values,
+ std::index_sequence<indices...>) {
+#ifdef DEBUG
+ std::initializer_list<size_t> sizes{
+ std::get<indices>(data.recorded_values).size()...};
+ DCHECK(base::all_equal(
+ sizes, static_cast<size_t>(data.block->PredecessorCount())));
+ DCHECK_EQ(data.block->PredecessorCount(), data.predecessors.size());
+#endif
+ (std::get<indices>(data.recorded_values)
+ .push_back(std::get<indices>(values)),
+ ...);
+ data.predecessors.push_back(source);
+ }
+
+ template <typename A>
+ values_t MaterializePhis(A& assembler) {
+ return MaterializePhisImpl(assembler, data_,
+ std::make_index_sequence<size>());
+ }
+
+ template <typename A, size_t... indices>
+ static values_t MaterializePhisImpl(A& assembler, BlockData& data,
+ std::index_sequence<indices...>) {
+ size_t predecessor_count = data.block->PredecessorCount();
+ DCHECK_EQ(data.predecessors.size(), predecessor_count);
+ // If this label has no values, we don't need any Phis.
+ if constexpr (size == 0) return values_t{};
+
+ // If this block does not have any predecessors, we shouldn't call this.
+ DCHECK_LT(0, predecessor_count);
+ // With 1 predecessor, we don't need any Phis.
+ if (predecessor_count == 1) {
+ return values_t{std::get<indices>(data.recorded_values)[0]...};
+ }
+ DCHECK_LT(1, predecessor_count);
+
+ // Construct Phis.
+ return values_t{assembler.Phi(
+ base::VectorOf(std::get<indices>(data.recorded_values)))...};
+ }
+
+ BlockData data_;
+};
+
+template <typename... Ts>
+class Label : public LabelBase<false, Ts...> {
+ using super = LabelBase<false, Ts...>;
+
+ public:
+ template <typename Reducer>
+ explicit Label(Reducer* reducer) : super(reducer->Asm().NewBlock()) {}
+};
+
+template <typename... Ts>
+class LoopLabel : public LabelBase<true, Ts...> {
+ using super = LabelBase<true, Ts...>;
+ using BlockData = typename super::BlockData;
+
+ public:
+ using values_t = typename super::values_t;
+ template <typename Reducer>
+ explicit LoopLabel(Reducer* reducer)
+ : super(reducer->Asm().NewBlock()),
+ loop_header_data_{reducer->Asm().NewLoopHeader()} {}
+
+ Block* loop_header() const { return loop_header_data_.block; }
+
+ template <typename A>
+ void Goto(A& assembler, const values_t& values) {
+ if (!loop_header_data_.block->IsBound()) {
+ // If the loop header is not bound yet, we have the forward edge to the
+ // loop.
+ DCHECK_EQ(0, loop_header_data_.block->PredecessorCount());
+ super::RecordValues(assembler, loop_header_data_, values);
+ assembler.Goto(loop_header_data_.block);
+ } else {
+ // We have a jump back to the loop header and wire it to the single
+ // backedge block.
+ this->super::Goto(assembler, values);
+ }
+ }
+
+ template <typename A>
+ void GotoIf(A& assembler, OpIndex condition, BranchHint hint,
+ const values_t& values) {
+ if (!loop_header_data_.block->IsBound()) {
+ // If the loop header is not bound yet, we have the forward edge to the
+ // loop.
+ DCHECK_EQ(0, loop_header_data_.block->PredecessorCount());
+ super::RecordValues(assembler, loop_header_data_, values);
+ assembler.GotoIf(condition, loop_header_data_.block, hint);
+ } else {
+ // We have a jump back to the loop header and wire it to the single
+ // backedge block.
+ this->super::GotoIf(assembler, condition, hint, values);
+ }
+ }
+
+ template <typename A>
+ void GotoIfNot(A& assembler, OpIndex condition, BranchHint hint,
+ const values_t& values) {
+ if (!loop_header_data_.block->IsBound()) {
+ // If the loop header is not bound yet, we have the forward edge to the
+ // loop.
+ DCHECK_EQ(0, loop_header_data_.block->PredecessorCount());
+ super::RecordValues(assembler, loop_header_data_, values);
+ assembler.GotoIfNot(condition, loop_header_data_.block, hint);
+ } else {
+ // We have a jump back to the loop header and wire it to the single
+ // backedge block.
+ this->super::GotoIfNot(assembler, condition, hint, values);
+ }
+ }
+
+ template <typename A>
+ base::prepend_tuple_type<bool, values_t> Bind(A& assembler) {
+ // LoopLabels must not be bound using `Bind`, but with `Loop`.
+ UNREACHABLE();
+ }
+
+ template <typename A>
+ base::prepend_tuple_type<bool, values_t> BindLoop(A& assembler) {
+ DCHECK(!loop_header_data_.block->IsBound());
+ if (!assembler.Bind(loop_header_data_.block)) {
+ return std::tuple_cat(std::tuple{false}, values_t{});
+ }
+ DCHECK_EQ(loop_header_data_.block, assembler.current_block());
+ return std::tuple_cat(std::tuple{true},
+ MaterializeLoopPhis(assembler, loop_header_data_));
+ }
+
+ template <typename A>
+ void EndLoop(A& assembler) {
+ // First, we need to bind the backedge block.
+ auto bind_result = this->super::Bind(assembler);
+ // `Bind` returns a tuple with a `bool` as first entry that indicates
+ // whether the block was bound. The rest of the tuple contains the phi
+ // values. Check if this block was bound (aka is reachable).
+ if (std::get<0>(bind_result)) {
+ // The block is bound.
+ DCHECK_EQ(assembler.current_block(), this->super::block());
+ // Now we build a jump from this block to the loop header.
+ // Remove the "bound"-flag from the beginning of the tuple.
+ auto values = base::tuple_drop<1>(bind_result);
+ assembler.Goto(loop_header_data_.block);
+ // Finalize Phis in the loop header.
+ FixLoopPhis(assembler, loop_header_data_, values);
+ }
+ }
+
+ private:
+ template <typename A>
+ static values_t MaterializeLoopPhis(A& assembler, BlockData& data) {
+ return MaterializeLoopPhisImpl(assembler, data,
+ std::make_index_sequence<super::size>());
+ }
+
+ template <typename A, size_t... indices>
+ static values_t MaterializeLoopPhisImpl(A& assembler, BlockData& data,
+ std::index_sequence<indices...>) {
+ size_t predecessor_count = data.block->PredecessorCount();
+ USE(predecessor_count);
+ DCHECK_EQ(data.predecessors.size(), predecessor_count);
+ // If this label has no values, we don't need any Phis.
+ if constexpr (super::size == 0) return typename super::values_t{};
+
+ DCHECK_EQ(predecessor_count, 1);
+ auto phis = typename super::values_t{
+ assembler.PendingLoopPhi(std::get<indices>(data.recorded_values)[0],
+ PendingLoopPhiOp::PhiIndex{indices})...};
+ return phis;
+ }
+
+ template <typename A>
+ static void FixLoopPhis(A& assembler, BlockData& data,
+ const typename super::values_t& values) {
+ DCHECK(data.block->IsBound());
+ DCHECK(data.block->IsLoop());
+ DCHECK_LE(1, data.predecessors.size());
+ DCHECK_LE(data.predecessors.size(), 2);
+ auto op_range = assembler.output_graph().operations(*data.block);
+ FixLoopPhi<0>(assembler, data, values, op_range.begin(), op_range.end());
+ }
+
+ template <size_t I, typename A>
+ static void FixLoopPhi(A& assembler, BlockData& data,
+ const typename super::values_t& values,
+ Graph::MutableOperationIterator next,
+ Graph::MutableOperationIterator end) {
+ if constexpr (I == std::tuple_size_v<typename super::values_t>) {
+#ifdef DEBUG
+ for (; next != end; ++next) {
+ DCHECK(!(*next).Is<PendingLoopPhiOp>());
+ }
+#endif // DEBUG
+ } else {
+ // Find the next PendingLoopPhi.
+ for (; next != end; ++next) {
+ if (auto* pending_phi = (*next).TryCast<PendingLoopPhiOp>()) {
+ OpIndex phi_index = assembler.output_graph().Index(*pending_phi);
+ DCHECK_EQ(pending_phi->first(), std::get<I>(data.recorded_values)[0]);
+ DCHECK_EQ(I, pending_phi->data.phi_index.index);
+ assembler.output_graph().template Replace<PhiOp>(
+ phi_index,
+ base::VectorOf<OpIndex>(
+ {pending_phi->first(), std::get<I>(values)}),
+ pending_phi->rep);
+ break;
+ }
+ }
+ // Check that we found a PendingLoopPhi. Otherwise something is wrong.
+ // Did you `Goto` to a loop header more than twice?
+ DCHECK_NE(next, end);
+ FixLoopPhi<I + 1>(assembler, data, values, ++next, end);
+ }
+ }
+
+ BlockData loop_header_data_;
+};
+
+Handle<Code> BuiltinCodeHandle(Builtin builtin, Isolate* isolate);
+
+// Forward declarations
+template <class Assembler>
+class GraphVisitor;
+using Variable =
+ SnapshotTable<OpIndex, base::Optional<RegisterRepresentation>>::Key;
+
template <class Assembler, template <class> class... Reducers>
class ReducerStack {};
template <class Assembler, template <class> class FirstReducer,
template <class> class... Reducers>
class ReducerStack<Assembler, FirstReducer, Reducers...>
- : public FirstReducer<ReducerStack<Assembler, Reducers...>> {};
+ : public FirstReducer<ReducerStack<Assembler, Reducers...>> {
+ public:
+ using FirstReducer<ReducerStack<Assembler, Reducers...>>::FirstReducer;
+};
-template <class Assembler>
-class ReducerStack<Assembler> {
+template <class Reducers>
+class ReducerStack<Assembler<Reducers>> {
public:
- Assembler& Asm() { return *static_cast<Assembler*>(this); }
+ using AssemblerType = Assembler<Reducers>;
+ using ReducerList = Reducers;
+ Assembler<ReducerList>& Asm() {
+ return *static_cast<Assembler<ReducerList>*>(this);
+ }
};
+template <class Reducers>
+struct reducer_stack_type {};
+template <template <class> class... Reducers>
+struct reducer_stack_type<reducer_list<Reducers...>> {
+ using type = ReducerStack<Assembler<reducer_list<Reducers...>>, Reducers...,
+ v8::internal::compiler::turboshaft::ReducerBase>;
+};
+
+template <typename Next>
+class ReducerBase;
+
+#define TURBOSHAFT_REDUCER_BOILERPLATE() \
+ Assembler<typename Next::ReducerList>& Asm() { \
+ return *static_cast<Assembler<typename Next::ReducerList>*>(this); \
+ }
+
+// LABEL_BLOCK is used in Reducers to have a single call forwarding to the next
+// reducer without change. A typical use would be:
+//
+// OpIndex ReduceFoo(OpIndex arg) {
+// LABEL_BLOCK(no_change) return Next::ReduceFoo(arg);
+// ...
+// if (...) goto no_change;
+// ...
+// if (...) goto no_change;
+// ...
+// }
+#define LABEL_BLOCK(label) \
+ for (; false; UNREACHABLE()) \
+ label:
+
// This empty base-class is used to provide default-implementations of plain
// methods emitting operations.
template <class Next>
class ReducerBaseForwarder : public Next {
public:
-#define EMIT_OP(Name) \
- template <class... Args> \
- OpIndex Reduce##Name(Args... args) { \
- return this->Asm().template Emit<Name##Op>(args...); \
+#define EMIT_OP(Name) \
+ OpIndex ReduceInputGraph##Name(OpIndex ig_index, const Name##Op& op) { \
+ return this->Asm().AssembleOutputGraph##Name(op); \
+ } \
+ template <class... Args> \
+ OpIndex Reduce##Name(Args... args) { \
+ return this->Asm().template Emit<Name##Op>(args...); \
}
TURBOSHAFT_OPERATION_LIST(EMIT_OP)
#undef EMIT_OP
};
// ReducerBase provides default implementations of Branch-related Operations
-// (Goto, Branch, Switch, CatchException), and takes care of updating Block
-// predecessors (and calls the Assembler to maintain split-edge form).
+// (Goto, Branch, Switch, CallAndCatchException), and takes care of updating
+// Block predecessors (and calls the Assembler to maintain split-edge form).
// ReducerBase is always added by Assembler at the bottom of the reducer stack.
template <class Next>
class ReducerBase : public ReducerBaseForwarder<Next> {
public:
- using Next::Asm;
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
using Base = ReducerBaseForwarder<Next>;
+ using ArgT = std::tuple<>;
+
+ template <class... Args>
+ explicit ReducerBase(const std::tuple<Args...>&) {}
- void Bind(Block*, const Block*) {}
+ void Bind(Block* block) {}
+
+ void Analyze() {}
+
+#ifdef DEBUG
+ void Verify(OpIndex old_index, OpIndex new_index) {}
+#endif // DEBUG
+
+ void RemoveLast(OpIndex index_of_last_operation) {
+ Asm().output_graph().RemoveLast();
+ }
+
+ // Get, GetPredecessorValue, Set and NewFreshVariable should be overwritten by
+ // the VariableReducer. If the reducer stack has no VariableReducer, then
+ // those methods should not be called.
+ OpIndex Get(Variable) { UNREACHABLE(); }
+ OpIndex GetPredecessorValue(Variable, int) { UNREACHABLE(); }
+ void Set(Variable, OpIndex) { UNREACHABLE(); }
+ Variable NewFreshVariable(base::Optional<RegisterRepresentation>) {
+ UNREACHABLE();
+ }
OpIndex ReducePhi(base::Vector<const OpIndex> inputs,
RegisterRepresentation rep) {
@@ -78,30 +502,70 @@ class ReducerBase : public ReducerBaseForwarder<Next> {
}
OpIndex ReduceGoto(Block* destination) {
- destination->AddPredecessor(Asm().current_block());
- return Base::ReduceGoto(destination);
- }
-
- OpIndex ReduceBranch(OpIndex condition, Block* if_true, Block* if_false) {
- if_true->AddPredecessor(Asm().current_block());
- if_false->AddPredecessor(Asm().current_block());
- return Base::ReduceBranch(condition, if_true, if_false);
- }
-
- OpIndex ReduceCatchException(OpIndex call, Block* if_success,
- Block* if_exception) {
- if_success->AddPredecessor(Asm().current_block());
- if_exception->AddPredecessor(Asm().current_block());
- return Base::ReduceCatchException(call, if_success, if_exception);
+ // Calling Base::Goto will call Emit<Goto>, which will call FinalizeBlock,
+ // which will reset {current_block_}. We thus save {current_block_} before
+ // calling Base::Goto, as we'll need it for AddPredecessor. Note also that
+ // AddPredecessor might introduce some new blocks/operations if it needs to
+ // split an edge, which means that it has to run after Base::Goto
+ // (otherwise, the current Goto could be inserted in the wrong block).
+ Block* saved_current_block = Asm().current_block();
+ OpIndex new_opindex = Base::ReduceGoto(destination);
+ Asm().AddPredecessor(saved_current_block, destination, false);
+ return new_opindex;
+ }
+
+ OpIndex ReduceBranch(OpIndex condition, Block* if_true, Block* if_false,
+ BranchHint hint) {
+ // There should never be a good reason to generate a Branch where both the
+ // {if_true} and {if_false} are the same Block. If we ever decide to lift
+ // this condition, then AddPredecessor and SplitEdge should be updated
+ // accordingly.
+ DCHECK_NE(if_true, if_false);
+ Block* saved_current_block = Asm().current_block();
+ OpIndex new_opindex =
+ Base::ReduceBranch(condition, if_true, if_false, hint);
+ Asm().AddPredecessor(saved_current_block, if_true, true);
+ Asm().AddPredecessor(saved_current_block, if_false, true);
+ return new_opindex;
+ }
+
+ OpIndex ReduceCallAndCatchException(OpIndex callee, OpIndex frame_state,
+ base::Vector<const OpIndex> arguments,
+ Block* if_success, Block* if_exception,
+ const TSCallDescriptor* descriptor) {
+ // {if_success} and {if_exception} should never be the same. If we ever
+ // decide to lift this condition, then AddPredecessor and SplitEdge should
+ // be updated accordingly.
+ DCHECK_NE(if_success, if_exception);
+ Block* saved_current_block = Asm().current_block();
+ OpIndex new_opindex = Base::ReduceCallAndCatchException(
+ callee, frame_state, arguments, if_success, if_exception, descriptor);
+ Asm().AddPredecessor(saved_current_block, if_success, true);
+ Asm().AddPredecessor(saved_current_block, if_exception, true);
+ return new_opindex;
}
OpIndex ReduceSwitch(OpIndex input, base::Vector<const SwitchOp::Case> cases,
- Block* default_case) {
+ Block* default_case, BranchHint default_hint) {
+#ifdef DEBUG
+ // Making sure that all cases and {default_case} are different. If we ever
+ // decide to lift this condition, then AddPredecessor and SplitEdge should
+ // be updated accordingly.
+ std::unordered_set<Block*> seen;
+ seen.insert(default_case);
+ for (auto switch_case : cases) {
+ DCHECK_EQ(seen.count(switch_case.destination), 0);
+ seen.insert(switch_case.destination);
+ }
+#endif
+ Block* saved_current_block = Asm().current_block();
+ OpIndex new_opindex =
+ Base::ReduceSwitch(input, cases, default_case, default_hint);
for (SwitchOp::Case c : cases) {
- c.destination->AddPredecessor(Asm().current_block());
+ Asm().AddPredecessor(saved_current_block, c.destination, true);
}
- default_case->AddPredecessor(Asm().current_block());
- return Base::ReduceSwitch(input, cases, default_case);
+ Asm().AddPredecessor(saved_current_block, default_case, true);
+ return new_opindex;
}
};
@@ -112,386 +576,412 @@ class AssemblerOpInterface {
// reducer stack.
#define DECL_MULTI_REP_BINOP(name, operation, rep_type, kind) \
OpIndex name(OpIndex left, OpIndex right, rep_type rep) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
return stack().Reduce##operation(left, right, \
operation##Op::Kind::k##kind, rep); \
}
#define DECL_SINGLE_REP_BINOP(name, operation, kind, rep) \
OpIndex name(OpIndex left, OpIndex right) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
return stack().Reduce##operation(left, right, \
operation##Op::Kind::k##kind, rep); \
}
-#define DECL_SINGLE_REP_BINOP_NO_KIND(name, operation, rep) \
- OpIndex name(OpIndex left, OpIndex right) { \
- return stack().Reduce##operation(left, right, rep); \
+#define DECL_SINGLE_REP_BINOP_V(name, operation, kind, tag) \
+ V<tag> name(ConstOrV<tag> left, ConstOrV<tag> right) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().Reduce##operation(resolve(left), resolve(right), \
+ operation##Op::Kind::k##kind, \
+ V<tag>::rep); \
+ }
+#define DECL_SINGLE_REP_BINOP_NO_KIND(name, operation, rep) \
+ OpIndex name(OpIndex left, OpIndex right) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().Reduce##operation(left, right, rep); \
}
DECL_MULTI_REP_BINOP(WordAdd, WordBinop, WordRepresentation, Add)
- DECL_SINGLE_REP_BINOP(Word32Add, WordBinop, Add, WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64Add, WordBinop, Add, WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32Add, WordBinop, Add, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64Add, WordBinop, Add, Word64)
+ DECL_SINGLE_REP_BINOP_V(WordPtrAdd, WordBinop, Add, WordPtr)
+ DECL_SINGLE_REP_BINOP(PointerAdd, WordBinop, Add,
+ WordRepresentation::PointerSized())
DECL_MULTI_REP_BINOP(WordMul, WordBinop, WordRepresentation, Mul)
- DECL_SINGLE_REP_BINOP(Word32Mul, WordBinop, Mul, WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64Mul, WordBinop, Mul, WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32Mul, WordBinop, Mul, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64Mul, WordBinop, Mul, Word64)
DECL_MULTI_REP_BINOP(WordBitwiseAnd, WordBinop, WordRepresentation,
BitwiseAnd)
- DECL_SINGLE_REP_BINOP(Word32BitwiseAnd, WordBinop, BitwiseAnd,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64BitwiseAnd, WordBinop, BitwiseAnd,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32BitwiseAnd, WordBinop, BitwiseAnd, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64BitwiseAnd, WordBinop, BitwiseAnd, Word64)
+ DECL_SINGLE_REP_BINOP_V(WordPtrBitwiseAnd, WordBinop, BitwiseAnd, WordPtr)
DECL_MULTI_REP_BINOP(WordBitwiseOr, WordBinop, WordRepresentation, BitwiseOr)
- DECL_SINGLE_REP_BINOP(Word32BitwiseOr, WordBinop, BitwiseOr,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64BitwiseOr, WordBinop, BitwiseOr,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32BitwiseOr, WordBinop, BitwiseOr, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64BitwiseOr, WordBinop, BitwiseOr, Word64)
DECL_MULTI_REP_BINOP(WordBitwiseXor, WordBinop, WordRepresentation,
BitwiseXor)
- DECL_SINGLE_REP_BINOP(Word32BitwiseXor, WordBinop, BitwiseXor,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64BitwiseXor, WordBinop, BitwiseXor,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32BitwiseXor, WordBinop, BitwiseXor, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64BitwiseXor, WordBinop, BitwiseXor, Word64)
DECL_MULTI_REP_BINOP(WordSub, WordBinop, WordRepresentation, Sub)
- DECL_SINGLE_REP_BINOP(Word32Sub, WordBinop, Sub, WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64Sub, WordBinop, Sub, WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32Sub, WordBinop, Sub, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64Sub, WordBinop, Sub, Word64)
+ DECL_SINGLE_REP_BINOP_V(WordPtrSub, WordBinop, Sub, WordPtr)
+ DECL_SINGLE_REP_BINOP(PointerSub, WordBinop, Sub,
+ WordRepresentation::PointerSized())
DECL_MULTI_REP_BINOP(IntDiv, WordBinop, WordRepresentation, SignedDiv)
- DECL_SINGLE_REP_BINOP(Int32Div, WordBinop, SignedDiv,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64Div, WordBinop, SignedDiv,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Int32Div, WordBinop, SignedDiv, Word32)
+ DECL_SINGLE_REP_BINOP_V(Int64Div, WordBinop, SignedDiv, Word64)
DECL_MULTI_REP_BINOP(UintDiv, WordBinop, WordRepresentation, UnsignedDiv)
- DECL_SINGLE_REP_BINOP(Uint32Div, WordBinop, UnsignedDiv,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Uint64Div, WordBinop, UnsignedDiv,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Uint32Div, WordBinop, UnsignedDiv, Word32)
+ DECL_SINGLE_REP_BINOP_V(Uint64Div, WordBinop, UnsignedDiv, Word64)
DECL_MULTI_REP_BINOP(IntMod, WordBinop, WordRepresentation, SignedMod)
- DECL_SINGLE_REP_BINOP(Int32Mod, WordBinop, SignedMod,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64Mod, WordBinop, SignedMod,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Int32Mod, WordBinop, SignedMod, Word32)
+ DECL_SINGLE_REP_BINOP_V(Int64Mod, WordBinop, SignedMod, Word64)
DECL_MULTI_REP_BINOP(UintMod, WordBinop, WordRepresentation, UnsignedMod)
- DECL_SINGLE_REP_BINOP(Uint32Mod, WordBinop, UnsignedMod,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Uint64Mod, WordBinop, UnsignedMod,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Uint32Mod, WordBinop, UnsignedMod, Word32)
+ DECL_SINGLE_REP_BINOP_V(Uint64Mod, WordBinop, UnsignedMod, Word64)
DECL_MULTI_REP_BINOP(IntMulOverflownBits, WordBinop, WordRepresentation,
SignedMulOverflownBits)
- DECL_SINGLE_REP_BINOP(Int32MulOverflownBits, WordBinop,
- SignedMulOverflownBits, WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64MulOverflownBits, WordBinop,
- SignedMulOverflownBits, WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Int32MulOverflownBits, WordBinop,
+ SignedMulOverflownBits, Word32)
+ DECL_SINGLE_REP_BINOP_V(Int64MulOverflownBits, WordBinop,
+ SignedMulOverflownBits, Word64)
DECL_MULTI_REP_BINOP(UintMulOverflownBits, WordBinop, WordRepresentation,
UnsignedMulOverflownBits)
- DECL_SINGLE_REP_BINOP(Uint32MulOverflownBits, WordBinop,
- UnsignedMulOverflownBits, WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Uint64MulOverflownBits, WordBinop,
- UnsignedMulOverflownBits, WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Uint32MulOverflownBits, WordBinop,
+ UnsignedMulOverflownBits, Word32)
+ DECL_SINGLE_REP_BINOP_V(Uint64MulOverflownBits, WordBinop,
+ UnsignedMulOverflownBits, Word64)
+ OpIndex OverflowCheckedBinop(OpIndex left, OpIndex right,
+ OverflowCheckedBinopOp::Kind kind,
+ WordRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceOverflowCheckedBinop(left, right, kind, rep);
+ }
DECL_MULTI_REP_BINOP(IntAddCheckOverflow, OverflowCheckedBinop,
WordRepresentation, SignedAdd)
- DECL_SINGLE_REP_BINOP(Int32AddCheckOverflow, OverflowCheckedBinop, SignedAdd,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64AddCheckOverflow, OverflowCheckedBinop, SignedAdd,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Int32AddCheckOverflow, OverflowCheckedBinop,
+ SignedAdd, Word32)
+ DECL_SINGLE_REP_BINOP_V(Int64AddCheckOverflow, OverflowCheckedBinop,
+ SignedAdd, Word64)
DECL_MULTI_REP_BINOP(IntSubCheckOverflow, OverflowCheckedBinop,
WordRepresentation, SignedSub)
- DECL_SINGLE_REP_BINOP(Int32SubCheckOverflow, OverflowCheckedBinop, SignedSub,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64SubCheckOverflow, OverflowCheckedBinop, SignedSub,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Int32SubCheckOverflow, OverflowCheckedBinop,
+ SignedSub, Word32)
+ DECL_SINGLE_REP_BINOP_V(Int64SubCheckOverflow, OverflowCheckedBinop,
+ SignedSub, Word64)
DECL_MULTI_REP_BINOP(IntMulCheckOverflow, OverflowCheckedBinop,
WordRepresentation, SignedMul)
- DECL_SINGLE_REP_BINOP(Int32MulCheckOverflow, OverflowCheckedBinop, SignedMul,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64MulCheckOverflow, OverflowCheckedBinop, SignedMul,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Int32MulCheckOverflow, OverflowCheckedBinop,
+ SignedMul, Word32)
+ DECL_SINGLE_REP_BINOP_V(Int64MulCheckOverflow, OverflowCheckedBinop,
+ SignedMul, Word64)
DECL_MULTI_REP_BINOP(FloatAdd, FloatBinop, FloatRepresentation, Add)
- DECL_SINGLE_REP_BINOP(Float32Add, FloatBinop, Add,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64Add, FloatBinop, Add,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_BINOP_V(Float32Add, FloatBinop, Add, Float32)
+ DECL_SINGLE_REP_BINOP_V(Float64Add, FloatBinop, Add, Float64)
DECL_MULTI_REP_BINOP(FloatMul, FloatBinop, FloatRepresentation, Mul)
- DECL_SINGLE_REP_BINOP(Float32Mul, FloatBinop, Mul,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64Mul, FloatBinop, Mul,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_BINOP_V(Float32Mul, FloatBinop, Mul, Float32)
+ DECL_SINGLE_REP_BINOP_V(Float64Mul, FloatBinop, Mul, Float64)
DECL_MULTI_REP_BINOP(FloatSub, FloatBinop, FloatRepresentation, Sub)
- DECL_SINGLE_REP_BINOP(Float32Sub, FloatBinop, Sub,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64Sub, FloatBinop, Sub,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_BINOP_V(Float32Sub, FloatBinop, Sub, Float32)
+ DECL_SINGLE_REP_BINOP_V(Float64Sub, FloatBinop, Sub, Float64)
DECL_MULTI_REP_BINOP(FloatDiv, FloatBinop, FloatRepresentation, Div)
- DECL_SINGLE_REP_BINOP(Float32Div, FloatBinop, Div,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64Div, FloatBinop, Div,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_BINOP_V(Float32Div, FloatBinop, Div, Float32)
+ DECL_SINGLE_REP_BINOP_V(Float64Div, FloatBinop, Div, Float64)
DECL_MULTI_REP_BINOP(FloatMin, FloatBinop, FloatRepresentation, Min)
- DECL_SINGLE_REP_BINOP(Float32Min, FloatBinop, Min,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64Min, FloatBinop, Min,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_BINOP_V(Float32Min, FloatBinop, Min, Float32)
+ DECL_SINGLE_REP_BINOP_V(Float64Min, FloatBinop, Min, Float64)
DECL_MULTI_REP_BINOP(FloatMax, FloatBinop, FloatRepresentation, Max)
- DECL_SINGLE_REP_BINOP(Float32Max, FloatBinop, Max,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64Max, FloatBinop, Max,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_BINOP(Float64Mod, FloatBinop, Mod,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_BINOP(Float64Power, FloatBinop, Power,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_BINOP(Float64Atan2, FloatBinop, Atan2,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_BINOP_V(Float32Max, FloatBinop, Max, Float32)
+ DECL_SINGLE_REP_BINOP_V(Float64Max, FloatBinop, Max, Float64)
+ DECL_SINGLE_REP_BINOP_V(Float64Mod, FloatBinop, Mod, Float64)
+ DECL_SINGLE_REP_BINOP_V(Float64Power, FloatBinop, Power, Float64)
+ DECL_SINGLE_REP_BINOP_V(Float64Atan2, FloatBinop, Atan2, Float64)
OpIndex Shift(OpIndex left, OpIndex right, ShiftOp::Kind kind,
WordRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceShift(left, right, kind, rep);
}
DECL_MULTI_REP_BINOP(ShiftRightArithmeticShiftOutZeros, Shift,
WordRepresentation, ShiftRightArithmeticShiftOutZeros)
- DECL_SINGLE_REP_BINOP(Word32ShiftRightArithmeticShiftOutZeros, Shift,
- ShiftRightArithmeticShiftOutZeros,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64ShiftRightArithmeticShiftOutZeros, Shift,
- ShiftRightArithmeticShiftOutZeros,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32ShiftRightArithmeticShiftOutZeros, Shift,
+ ShiftRightArithmeticShiftOutZeros, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64ShiftRightArithmeticShiftOutZeros, Shift,
+ ShiftRightArithmeticShiftOutZeros, Word64)
+ DECL_SINGLE_REP_BINOP_V(WordPtrShiftRightArithmeticShiftOutZeros, Shift,
+ ShiftRightArithmeticShiftOutZeros, WordPtr)
DECL_MULTI_REP_BINOP(ShiftRightArithmetic, Shift, WordRepresentation,
ShiftRightArithmetic)
- DECL_SINGLE_REP_BINOP(Word32ShiftRightArithmetic, Shift, ShiftRightArithmetic,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64ShiftRightArithmetic, Shift, ShiftRightArithmetic,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32ShiftRightArithmetic, Shift,
+ ShiftRightArithmetic, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64ShiftRightArithmetic, Shift,
+ ShiftRightArithmetic, Word64)
+ DECL_SINGLE_REP_BINOP_V(WordPtrShiftRightArithmetic, Shift,
+ ShiftRightArithmetic, WordPtr)
DECL_MULTI_REP_BINOP(ShiftRightLogical, Shift, WordRepresentation,
ShiftRightLogical)
- DECL_SINGLE_REP_BINOP(Word32ShiftRightLogical, Shift, ShiftRightLogical,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64ShiftRightLogical, Shift, ShiftRightLogical,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32ShiftRightLogical, Shift, ShiftRightLogical,
+ Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64ShiftRightLogical, Shift, ShiftRightLogical,
+ Word64)
DECL_MULTI_REP_BINOP(ShiftLeft, Shift, WordRepresentation, ShiftLeft)
- DECL_SINGLE_REP_BINOP(Word32ShiftLeft, Shift, ShiftLeft,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64ShiftLeft, Shift, ShiftLeft,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32ShiftLeft, Shift, ShiftLeft, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64ShiftLeft, Shift, ShiftLeft, Word64)
+ DECL_SINGLE_REP_BINOP_V(WordPtrShiftLeft, Shift, ShiftLeft, WordPtr)
DECL_MULTI_REP_BINOP(RotateRight, Shift, WordRepresentation, RotateRight)
- DECL_SINGLE_REP_BINOP(Word32RotateRight, Shift, RotateRight,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64RotateRight, Shift, RotateRight,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32RotateRight, Shift, RotateRight, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64RotateRight, Shift, RotateRight, Word64)
DECL_MULTI_REP_BINOP(RotateLeft, Shift, WordRepresentation, RotateLeft)
- DECL_SINGLE_REP_BINOP(Word32RotateLeft, Shift, RotateLeft,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Word64RotateLeft, Shift, RotateLeft,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_BINOP_V(Word32RotateLeft, Shift, RotateLeft, Word32)
+ DECL_SINGLE_REP_BINOP_V(Word64RotateLeft, Shift, RotateLeft, Word64)
OpIndex ShiftRightLogical(OpIndex left, uint32_t right,
WordRepresentation rep) {
DCHECK_GE(right, 0);
DCHECK_LT(right, rep.bit_width());
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return ShiftRightLogical(left, this->Word32Constant(right), rep);
}
OpIndex ShiftRightArithmetic(OpIndex left, uint32_t right,
WordRepresentation rep) {
DCHECK_GE(right, 0);
DCHECK_LT(right, rep.bit_width());
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return ShiftRightArithmetic(left, this->Word32Constant(right), rep);
}
+ OpIndex ShiftLeft(OpIndex left, uint32_t right, WordRepresentation rep) {
+ DCHECK_LT(right, rep.bit_width());
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return ShiftLeft(left, this->Word32Constant(right), rep);
+ }
- DECL_SINGLE_REP_BINOP_NO_KIND(Word32Equal, Equal,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP_NO_KIND(Word64Equal, Equal,
- WordRepresentation::Word64())
- DECL_SINGLE_REP_BINOP_NO_KIND(Float32Equal, Equal,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP_NO_KIND(Float64Equal, Equal,
- FloatRepresentation::Float64())
OpIndex Equal(OpIndex left, OpIndex right, RegisterRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceEqual(left, right, rep);
}
+#define DECL_SINGLE_REP_EQUAL_V(name, operation, tag) \
+ V<Word32> name(ConstOrV<tag> left, ConstOrV<tag> right) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().Reduce##operation(resolve(left), resolve(right), \
+ V<tag>::rep); \
+ }
+ DECL_SINGLE_REP_EQUAL_V(Word32Equal, Equal, Word32)
+ DECL_SINGLE_REP_EQUAL_V(Word64Equal, Equal, Word64)
+ DECL_SINGLE_REP_EQUAL_V(WordPtrEqual, Equal, WordPtr)
+ DECL_SINGLE_REP_EQUAL_V(Float32Equal, Equal, Float32)
+ DECL_SINGLE_REP_EQUAL_V(Float64Equal, Equal, Float64)
+#undef DECL_SINGLE_REP_EQUAL_V
+
+ DECL_SINGLE_REP_BINOP_NO_KIND(TaggedEqual, Equal,
+ RegisterRepresentation::Tagged())
+
+#define DECL_SINGLE_REP_COMPARISON_V(name, operation, kind, tag) \
+ V<Word32> name(ConstOrV<tag> left, ConstOrV<tag> right) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().Reduce##operation(resolve(left), resolve(right), \
+ operation##Op::Kind::k##kind, \
+ V<tag>::rep); \
+ }
+
DECL_MULTI_REP_BINOP(IntLessThan, Comparison, RegisterRepresentation,
SignedLessThan)
- DECL_SINGLE_REP_BINOP(Int32LessThan, Comparison, SignedLessThan,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64LessThan, Comparison, SignedLessThan,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_COMPARISON_V(Int32LessThan, Comparison, SignedLessThan,
+ Word32)
+ DECL_SINGLE_REP_COMPARISON_V(Int64LessThan, Comparison, SignedLessThan,
+ Word64)
+ DECL_SINGLE_REP_COMPARISON_V(IntPtrLessThan, Comparison, SignedLessThan,
+ WordPtr)
+
DECL_MULTI_REP_BINOP(UintLessThan, Comparison, RegisterRepresentation,
UnsignedLessThan)
- DECL_SINGLE_REP_BINOP(Uint32LessThan, Comparison, UnsignedLessThan,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Uint64LessThan, Comparison, UnsignedLessThan,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_COMPARISON_V(Uint32LessThan, Comparison, UnsignedLessThan,
+ Word32)
+ DECL_SINGLE_REP_COMPARISON_V(Uint64LessThan, Comparison, UnsignedLessThan,
+ Word64)
+ DECL_SINGLE_REP_BINOP(UintPtrLessThan, Comparison, UnsignedLessThan,
+ WordRepresentation::PointerSized())
DECL_MULTI_REP_BINOP(FloatLessThan, Comparison, RegisterRepresentation,
SignedLessThan)
- DECL_SINGLE_REP_BINOP(Float32LessThan, Comparison, SignedLessThan,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64LessThan, Comparison, SignedLessThan,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_COMPARISON_V(Float32LessThan, Comparison, SignedLessThan,
+ Float32)
+ DECL_SINGLE_REP_COMPARISON_V(Float64LessThan, Comparison, SignedLessThan,
+ Float64)
DECL_MULTI_REP_BINOP(IntLessThanOrEqual, Comparison, RegisterRepresentation,
SignedLessThanOrEqual)
- DECL_SINGLE_REP_BINOP(Int32LessThanOrEqual, Comparison, SignedLessThanOrEqual,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Int64LessThanOrEqual, Comparison, SignedLessThanOrEqual,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_COMPARISON_V(Int32LessThanOrEqual, Comparison,
+ SignedLessThanOrEqual, Word32)
+ DECL_SINGLE_REP_COMPARISON_V(Int64LessThanOrEqual, Comparison,
+ SignedLessThanOrEqual, Word64)
DECL_MULTI_REP_BINOP(UintLessThanOrEqual, Comparison, RegisterRepresentation,
UnsignedLessThanOrEqual)
- DECL_SINGLE_REP_BINOP(Uint32LessThanOrEqual, Comparison,
- UnsignedLessThanOrEqual, WordRepresentation::Word32())
- DECL_SINGLE_REP_BINOP(Uint64LessThanOrEqual, Comparison,
- UnsignedLessThanOrEqual, WordRepresentation::Word64())
+ DECL_SINGLE_REP_COMPARISON_V(Uint32LessThanOrEqual, Comparison,
+ UnsignedLessThanOrEqual, Word32)
+ DECL_SINGLE_REP_COMPARISON_V(Uint64LessThanOrEqual, Comparison,
+ UnsignedLessThanOrEqual, Word64)
+ DECL_SINGLE_REP_BINOP(UintPtrLessThanOrEqual, Comparison,
+ UnsignedLessThanOrEqual,
+ WordRepresentation::PointerSized())
DECL_MULTI_REP_BINOP(FloatLessThanOrEqual, Comparison, RegisterRepresentation,
SignedLessThanOrEqual)
- DECL_SINGLE_REP_BINOP(Float32LessThanOrEqual, Comparison,
- SignedLessThanOrEqual, FloatRepresentation::Float32())
- DECL_SINGLE_REP_BINOP(Float64LessThanOrEqual, Comparison,
- SignedLessThanOrEqual, FloatRepresentation::Float64())
+ DECL_SINGLE_REP_COMPARISON_V(Float32LessThanOrEqual, Comparison,
+ SignedLessThanOrEqual, Float32)
+ DECL_SINGLE_REP_COMPARISON_V(Float64LessThanOrEqual, Comparison,
+ SignedLessThanOrEqual, Float64)
+#undef DECL_SINGLE_REP_COMPARISON_V
+
OpIndex Comparison(OpIndex left, OpIndex right, ComparisonOp::Kind kind,
RegisterRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceComparison(left, right, kind, rep);
}
#undef DECL_SINGLE_REP_BINOP
+#undef DECL_SINGLE_REP_BINOP_V
#undef DECL_MULTI_REP_BINOP
#undef DECL_SINGLE_REP_BINOP_NO_KIND
#define DECL_MULTI_REP_UNARY(name, operation, rep_type, kind) \
OpIndex name(OpIndex input, rep_type rep) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
return stack().Reduce##operation(input, operation##Op::Kind::k##kind, \
rep); \
}
-#define DECL_SINGLE_REP_UNARY(name, operation, kind, rep) \
- OpIndex name(OpIndex input) { \
- return stack().Reduce##operation(input, operation##Op::Kind::k##kind, \
- rep); \
+#define DECL_SINGLE_REP_UNARY_V(name, operation, kind, tag) \
+ V<tag> name(ConstOrV<tag> input) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().Reduce##operation( \
+ resolve(input), operation##Op::Kind::k##kind, V<tag>::rep); \
}
DECL_MULTI_REP_UNARY(FloatAbs, FloatUnary, FloatRepresentation, Abs)
- DECL_SINGLE_REP_UNARY(Float32Abs, FloatUnary, Abs,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64Abs, FloatUnary, Abs,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32Abs, FloatUnary, Abs, Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64Abs, FloatUnary, Abs, Float64)
DECL_MULTI_REP_UNARY(FloatNegate, FloatUnary, FloatRepresentation, Negate)
- DECL_SINGLE_REP_UNARY(Float32Negate, FloatUnary, Negate,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64Negate, FloatUnary, Negate,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64SilenceNaN, FloatUnary, SilenceNaN,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32Negate, FloatUnary, Negate, Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64Negate, FloatUnary, Negate, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64SilenceNaN, FloatUnary, SilenceNaN, Float64)
DECL_MULTI_REP_UNARY(FloatRoundDown, FloatUnary, FloatRepresentation,
RoundDown)
- DECL_SINGLE_REP_UNARY(Float32RoundDown, FloatUnary, RoundDown,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64RoundDown, FloatUnary, RoundDown,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32RoundDown, FloatUnary, RoundDown, Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64RoundDown, FloatUnary, RoundDown, Float64)
DECL_MULTI_REP_UNARY(FloatRoundUp, FloatUnary, FloatRepresentation, RoundUp)
- DECL_SINGLE_REP_UNARY(Float32RoundUp, FloatUnary, RoundUp,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64RoundUp, FloatUnary, RoundUp,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32RoundUp, FloatUnary, RoundUp, Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64RoundUp, FloatUnary, RoundUp, Float64)
DECL_MULTI_REP_UNARY(FloatRoundToZero, FloatUnary, FloatRepresentation,
RoundToZero)
- DECL_SINGLE_REP_UNARY(Float32RoundToZero, FloatUnary, RoundToZero,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64RoundToZero, FloatUnary, RoundToZero,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32RoundToZero, FloatUnary, RoundToZero, Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64RoundToZero, FloatUnary, RoundToZero, Float64)
DECL_MULTI_REP_UNARY(FloatRoundTiesEven, FloatUnary, FloatRepresentation,
RoundTiesEven)
- DECL_SINGLE_REP_UNARY(Float32RoundTiesEven, FloatUnary, RoundTiesEven,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64RoundTiesEven, FloatUnary, RoundTiesEven,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Log, FloatUnary, Log,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32RoundTiesEven, FloatUnary, RoundTiesEven,
+ Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64RoundTiesEven, FloatUnary, RoundTiesEven,
+ Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Log, FloatUnary, Log, Float64)
DECL_MULTI_REP_UNARY(FloatSqrt, FloatUnary, FloatRepresentation, Sqrt)
- DECL_SINGLE_REP_UNARY(Float32Sqrt, FloatUnary, Sqrt,
- FloatRepresentation::Float32())
- DECL_SINGLE_REP_UNARY(Float64Sqrt, FloatUnary, Sqrt,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Exp, FloatUnary, Exp,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Expm1, FloatUnary, Expm1,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Sin, FloatUnary, Sin,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Cos, FloatUnary, Cos,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Sinh, FloatUnary, Sinh,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Cosh, FloatUnary, Cosh,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Asin, FloatUnary, Asin,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Acos, FloatUnary, Acos,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Asinh, FloatUnary, Asinh,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Acosh, FloatUnary, Acosh,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Tan, FloatUnary, Tan,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Tanh, FloatUnary, Tanh,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Log2, FloatUnary, Log2,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Log10, FloatUnary, Log10,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Log1p, FloatUnary, Log1p,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Atan, FloatUnary, Atan,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Atanh, FloatUnary, Atanh,
- FloatRepresentation::Float64())
- DECL_SINGLE_REP_UNARY(Float64Cbrt, FloatUnary, Cbrt,
- FloatRepresentation::Float64())
+ DECL_SINGLE_REP_UNARY_V(Float32Sqrt, FloatUnary, Sqrt, Float32)
+ DECL_SINGLE_REP_UNARY_V(Float64Sqrt, FloatUnary, Sqrt, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Exp, FloatUnary, Exp, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Expm1, FloatUnary, Expm1, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Sin, FloatUnary, Sin, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Cos, FloatUnary, Cos, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Sinh, FloatUnary, Sinh, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Cosh, FloatUnary, Cosh, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Asin, FloatUnary, Asin, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Acos, FloatUnary, Acos, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Asinh, FloatUnary, Asinh, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Acosh, FloatUnary, Acosh, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Tan, FloatUnary, Tan, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Tanh, FloatUnary, Tanh, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Log2, FloatUnary, Log2, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Log10, FloatUnary, Log10, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Log1p, FloatUnary, Log1p, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Atan, FloatUnary, Atan, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Atanh, FloatUnary, Atanh, Float64)
+ DECL_SINGLE_REP_UNARY_V(Float64Cbrt, FloatUnary, Cbrt, Float64)
DECL_MULTI_REP_UNARY(WordReverseBytes, WordUnary, WordRepresentation,
ReverseBytes)
- DECL_SINGLE_REP_UNARY(Word32ReverseBytes, WordUnary, ReverseBytes,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_UNARY(Word64ReverseBytes, WordUnary, ReverseBytes,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_UNARY_V(Word32ReverseBytes, WordUnary, ReverseBytes, Word32)
+ DECL_SINGLE_REP_UNARY_V(Word64ReverseBytes, WordUnary, ReverseBytes, Word64)
DECL_MULTI_REP_UNARY(WordCountLeadingZeros, WordUnary, WordRepresentation,
CountLeadingZeros)
- DECL_SINGLE_REP_UNARY(Word32CountLeadingZeros, WordUnary, CountLeadingZeros,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_UNARY(Word64CountLeadingZeros, WordUnary, CountLeadingZeros,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_UNARY_V(Word32CountLeadingZeros, WordUnary, CountLeadingZeros,
+ Word32)
+ DECL_SINGLE_REP_UNARY_V(Word64CountLeadingZeros, WordUnary, CountLeadingZeros,
+ Word64)
DECL_MULTI_REP_UNARY(WordCountTrailingZeros, WordUnary, WordRepresentation,
CountTrailingZeros)
- DECL_SINGLE_REP_UNARY(Word32CountTrailingZeros, WordUnary, CountTrailingZeros,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_UNARY(Word64CountTrailingZeros, WordUnary, CountTrailingZeros,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_UNARY_V(Word32CountTrailingZeros, WordUnary,
+ CountTrailingZeros, Word32)
+ DECL_SINGLE_REP_UNARY_V(Word64CountTrailingZeros, WordUnary,
+ CountTrailingZeros, Word64)
DECL_MULTI_REP_UNARY(WordPopCount, WordUnary, WordRepresentation, PopCount)
- DECL_SINGLE_REP_UNARY(Word32PopCount, WordUnary, PopCount,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_UNARY(Word64PopCount, WordUnary, PopCount,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_UNARY_V(Word32PopCount, WordUnary, PopCount, Word32)
+ DECL_SINGLE_REP_UNARY_V(Word64PopCount, WordUnary, PopCount, Word64)
DECL_MULTI_REP_UNARY(WordSignExtend8, WordUnary, WordRepresentation,
SignExtend8)
- DECL_SINGLE_REP_UNARY(Word32SignExtend8, WordUnary, SignExtend8,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_UNARY(Word64SignExtend8, WordUnary, SignExtend8,
- WordRepresentation::Word64())
+ DECL_SINGLE_REP_UNARY_V(Word32SignExtend8, WordUnary, SignExtend8, Word32)
+ DECL_SINGLE_REP_UNARY_V(Word64SignExtend8, WordUnary, SignExtend8, Word64)
DECL_MULTI_REP_UNARY(WordSignExtend16, WordUnary, WordRepresentation,
SignExtend16)
- DECL_SINGLE_REP_UNARY(Word32SignExtend16, WordUnary, SignExtend16,
- WordRepresentation::Word32())
- DECL_SINGLE_REP_UNARY(Word64SignExtend16, WordUnary, SignExtend16,
- WordRepresentation::Word64())
-#undef DECL_SINGLE_REP_UNARY
+ DECL_SINGLE_REP_UNARY_V(Word32SignExtend16, WordUnary, SignExtend16, Word32)
+ DECL_SINGLE_REP_UNARY_V(Word64SignExtend16, WordUnary, SignExtend16, Word64)
+#undef DECL_SINGLE_REP_UNARY_V
#undef DECL_MULTI_REP_UNARY
OpIndex Float64InsertWord32(OpIndex float64, OpIndex word32,
Float64InsertWord32Op::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceFloat64InsertWord32(float64, word32, kind);
}
OpIndex TaggedBitcast(OpIndex input, RegisterRepresentation from,
RegisterRepresentation to) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceTaggedBitcast(input, from, to);
}
OpIndex BitcastTaggedToWord(OpIndex tagged) {
@@ -503,16 +993,104 @@ class AssemblerOpInterface {
RegisterRepresentation::Tagged());
}
- OpIndex Word32Constant(uint32_t value) {
+ OpIndex ObjectIs(OpIndex input, ObjectIsOp::Kind kind,
+ ObjectIsOp::InputAssumptions input_assumptions) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceObjectIs(input, kind, input_assumptions);
+ }
+ OpIndex FloatIs(OpIndex input, FloatIsOp::Kind kind,
+ FloatRepresentation input_rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceFloatIs(input, kind, input_rep);
+ }
+ V<Word32> ObjectIsSmi(V<Tagged> object) {
+ return ObjectIs(object, ObjectIsOp::Kind::kSmi,
+ ObjectIsOp::InputAssumptions::kNone);
+ }
+
+ OpIndex ConvertToObject(
+ OpIndex input, ConvertToObjectOp::Kind kind,
+ RegisterRepresentation input_rep,
+ ConvertToObjectOp::InputInterpretation input_interpretation,
+ CheckForMinusZeroMode minus_zero_mode) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceConvertToObject(input, kind, input_rep,
+ input_interpretation, minus_zero_mode);
+ }
+ V<Tagged> ConvertFloat64ToNumber(V<Float64> input,
+ CheckForMinusZeroMode minus_zero_mode) {
+ return ConvertToObject(input, ConvertToObjectOp::Kind::kNumber,
+ RegisterRepresentation::Float64(),
+ ConvertToObjectOp::InputInterpretation::kSigned,
+ minus_zero_mode);
+ }
+
+ OpIndex ConvertToObjectOrDeopt(
+ OpIndex input, OpIndex frame_state, ConvertToObjectOrDeoptOp::Kind kind,
+ RegisterRepresentation input_rep,
+ ConvertToObjectOrDeoptOp::InputInterpretation input_interpretation,
+ const FeedbackSource& feedback) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceConvertToObjectOrDeopt(
+ input, frame_state, kind, input_rep, input_interpretation, feedback);
+ }
+
+ OpIndex ConvertObjectToPrimitive(
+ V<Object> object, ConvertObjectToPrimitiveOp::Kind kind,
+ ConvertObjectToPrimitiveOp::InputAssumptions input_assumptions) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceConvertObjectToPrimitive(object, kind,
+ input_assumptions);
+ }
+
+ OpIndex ConvertObjectToPrimitiveOrDeopt(
+ V<Object> object, OpIndex frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind from_kind,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind to_kind,
+ CheckForMinusZeroMode minus_zero_mode, const FeedbackSource& feedback) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceConvertObjectToPrimitiveOrDeopt(
+ object, frame_state, from_kind, to_kind, minus_zero_mode, feedback);
+ }
+
+ OpIndex TruncateObjectToPrimitive(
+ V<Object> object, TruncateObjectToPrimitiveOp::Kind kind,
+ TruncateObjectToPrimitiveOp::InputAssumptions input_assumptions) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceTruncateObjectToPrimitive(object, kind,
+ input_assumptions);
+ }
+
+ V<Word32> Word32Constant(uint32_t value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kWord32, uint64_t{value});
}
- OpIndex Word32Constant(int32_t value) {
+ V<Word32> Word32Constant(int32_t value) {
return Word32Constant(static_cast<uint32_t>(value));
}
- OpIndex Word64Constant(uint64_t value) {
+ V<Word64> Word64Constant(uint64_t value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kWord64, value);
}
- OpIndex Word64Constant(int64_t value) {
+ V<Word64> Word64Constant(int64_t value) {
return Word64Constant(static_cast<uint64_t>(value));
}
OpIndex WordConstant(uint64_t value, WordRepresentation rep) {
@@ -523,10 +1101,23 @@ class AssemblerOpInterface {
return Word64Constant(value);
}
}
+ OpIndex IntPtrConstant(intptr_t value) {
+ return UintPtrConstant(static_cast<uintptr_t>(value));
+ }
+ OpIndex UintPtrConstant(uintptr_t value) {
+ return WordConstant(static_cast<uint64_t>(value),
+ WordRepresentation::PointerSized());
+ }
OpIndex Float32Constant(float value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kFloat32, value);
}
OpIndex Float64Constant(double value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kFloat64, value);
}
OpIndex FloatConstant(double value, FloatRepresentation rep) {
@@ -538,76 +1129,155 @@ class AssemblerOpInterface {
}
}
OpIndex NumberConstant(double value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kNumber, value);
}
OpIndex TaggedIndexConstant(int32_t value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kTaggedIndex,
uint64_t{static_cast<uint32_t>(value)});
}
OpIndex HeapConstant(Handle<HeapObject> value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kHeapObject, value);
}
+ OpIndex BuiltinCode(Builtin builtin, Isolate* isolate) {
+ return HeapConstant(BuiltinCodeHandle(builtin, isolate));
+ }
OpIndex CompressedHeapConstant(Handle<HeapObject> value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kHeapObject, value);
}
OpIndex ExternalConstant(ExternalReference value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(ConstantOp::Kind::kExternal, value);
}
OpIndex RelocatableConstant(int64_t value, RelocInfo::Mode mode) {
DCHECK_EQ(mode, any_of(RelocInfo::WASM_CALL, RelocInfo::WASM_STUB_CALL));
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceConstant(
mode == RelocInfo::WASM_CALL
? ConstantOp::Kind::kRelocatableWasmCall
: ConstantOp::Kind::kRelocatableWasmStubCall,
static_cast<uint64_t>(value));
}
+ V<Context> NoContextConstant() {
+ return V<Context>::Cast(SmiTag(Context::kNoContext));
+ }
+ // TODO(nicohartmann@): Might want to get rid of the isolate when supporting
+ // Wasm.
+ V<Tagged> CEntryStubConstant(Isolate* isolate, int result_size,
+ ArgvMode argv_mode = ArgvMode::kStack,
+ bool builtin_exit_frame = false) {
+ if (argv_mode != ArgvMode::kStack) {
+ return HeapConstant(CodeFactory::CEntry(isolate, result_size, argv_mode,
+ builtin_exit_frame));
+ }
+
+ DCHECK(result_size >= 1 && result_size <= 3);
+ DCHECK_IMPLIES(builtin_exit_frame, result_size == 1);
+ const int index = builtin_exit_frame ? 0 : result_size;
+ if (cached_centry_stub_constants_[index].is_null()) {
+ cached_centry_stub_constants_[index] = CodeFactory::CEntry(
+ isolate, result_size, argv_mode, builtin_exit_frame);
+ }
+ return HeapConstant(cached_centry_stub_constants_[index].ToHandleChecked());
+ }
#define DECL_CHANGE(name, kind, assumption, from, to) \
OpIndex name(OpIndex input) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
return stack().ReduceChange( \
input, ChangeOp::Kind::kind, ChangeOp::Assumption::assumption, \
RegisterRepresentation::from(), RegisterRepresentation::to()); \
}
-#define DECL_TRY_CHANGE(name, kind, from, to) \
- OpIndex name(OpIndex input) { \
- return stack().ReduceTryChange(input, TryChangeOp::Kind::kind, \
- FloatRepresentation::from(), \
- WordRepresentation::to()); \
- }
-
- DECL_CHANGE(BitcastWord32ToWord64, kBitcast, kNoAssumption, Word32, Word64)
- DECL_CHANGE(BitcastFloat32ToWord32, kBitcast, kNoAssumption, Float32, Word32)
- DECL_CHANGE(BitcastWord32ToFloat32, kBitcast, kNoAssumption, Word32, Float32)
- DECL_CHANGE(BitcastFloat64ToWord64, kBitcast, kNoAssumption, Float64, Word64)
- DECL_CHANGE(BitcastWord64ToFloat64, kBitcast, kNoAssumption, Word64, Float64)
- DECL_CHANGE(ChangeUint32ToUint64, kZeroExtend, kNoAssumption, Word32, Word64)
- DECL_CHANGE(ChangeInt32ToInt64, kSignExtend, kNoAssumption, Word32, Word64)
- DECL_CHANGE(ChangeInt32ToFloat64, kSignedToFloat, kNoAssumption, Word32,
- Float64)
- DECL_CHANGE(ChangeInt64ToFloat64, kSignedToFloat, kNoAssumption, Word64,
- Float64)
- DECL_CHANGE(ChangeInt32ToFloat32, kSignedToFloat, kNoAssumption, Word32,
- Float32)
- DECL_CHANGE(ChangeInt64ToFloat32, kSignedToFloat, kNoAssumption, Word64,
- Float32)
- DECL_CHANGE(ChangeUint32ToFloat32, kUnsignedToFloat, kNoAssumption, Word32,
- Float32)
- DECL_CHANGE(ChangeUint64ToFloat32, kUnsignedToFloat, kNoAssumption, Word64,
- Float32)
- DECL_CHANGE(ReversibleInt64ToFloat64, kSignedToFloat, kReversible, Word64,
- Float64)
- DECL_CHANGE(ChangeUint64ToFloat64, kUnsignedToFloat, kNoAssumption, Word64,
- Float64)
- DECL_CHANGE(ReversibleUint64ToFloat64, kUnsignedToFloat, kReversible, Word64,
- Float64)
- DECL_CHANGE(ChangeUint32ToFloat64, kUnsignedToFloat, kNoAssumption, Word32,
- Float64)
- DECL_CHANGE(ChangeFloat64ToFloat32, kFloatConversion, kNoAssumption, Float64,
- Float32)
- DECL_CHANGE(ChangeFloat32ToFloat64, kFloatConversion, kNoAssumption, Float32,
- Float64)
- DECL_CHANGE(JSTruncateFloat64ToWord32, kJSFloatTruncate, kNoAssumption,
- Float64, Word32)
+#define DECL_CHANGE_V(name, kind, assumption, from, to) \
+ V<to> name(ConstOrV<from> input) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().ReduceChange(resolve(input), ChangeOp::Kind::kind, \
+ ChangeOp::Assumption::assumption, \
+ V<from>::rep, V<to>::rep); \
+ }
+#define DECL_TRY_CHANGE(name, kind, from, to) \
+ OpIndex name(OpIndex input) { \
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) { \
+ return OpIndex::Invalid(); \
+ } \
+ return stack().ReduceTryChange(input, TryChangeOp::Kind::kind, \
+ FloatRepresentation::from(), \
+ WordRepresentation::to()); \
+ }
+
+ DECL_CHANGE_V(BitcastWord32ToWord64, kBitcast, kNoAssumption, Word32, Word64)
+ DECL_CHANGE_V(BitcastFloat32ToWord32, kBitcast, kNoAssumption, Float32,
+ Word32)
+ DECL_CHANGE_V(BitcastWord32ToFloat32, kBitcast, kNoAssumption, Word32,
+ Float32)
+ DECL_CHANGE_V(BitcastFloat64ToWord64, kBitcast, kNoAssumption, Float64,
+ Word64)
+ DECL_CHANGE_V(BitcastWord64ToFloat64, kBitcast, kNoAssumption, Word64,
+ Float64)
+ DECL_CHANGE_V(ChangeUint32ToUint64, kZeroExtend, kNoAssumption, Word32,
+ Word64)
+ DECL_CHANGE_V(ChangeInt32ToInt64, kSignExtend, kNoAssumption, Word32, Word64)
+ DECL_CHANGE_V(ChangeInt32ToFloat64, kSignedToFloat, kNoAssumption, Word32,
+ Float64)
+ DECL_CHANGE_V(ChangeInt64ToFloat64, kSignedToFloat, kNoAssumption, Word64,
+ Float64)
+ DECL_CHANGE_V(ChangeInt32ToFloat32, kSignedToFloat, kNoAssumption, Word32,
+ Float32)
+ DECL_CHANGE_V(ChangeInt64ToFloat32, kSignedToFloat, kNoAssumption, Word64,
+ Float32)
+ DECL_CHANGE_V(ChangeUint32ToFloat32, kUnsignedToFloat, kNoAssumption, Word32,
+ Float32)
+ DECL_CHANGE_V(ChangeUint64ToFloat32, kUnsignedToFloat, kNoAssumption, Word64,
+ Float32)
+ DECL_CHANGE_V(ReversibleInt64ToFloat64, kSignedToFloat, kReversible, Word64,
+ Float64)
+ DECL_CHANGE_V(ChangeUint64ToFloat64, kUnsignedToFloat, kNoAssumption, Word64,
+ Float64)
+ DECL_CHANGE_V(ReversibleUint64ToFloat64, kUnsignedToFloat, kReversible,
+ Word64, Float64)
+ DECL_CHANGE_V(ChangeUint32ToFloat64, kUnsignedToFloat, kNoAssumption, Word32,
+ Float64)
+ DECL_CHANGE_V(ChangeFloat64ToFloat32, kFloatConversion, kNoAssumption,
+ Float64, Float32)
+ DECL_CHANGE_V(ChangeFloat32ToFloat64, kFloatConversion, kNoAssumption,
+ Float32, Float64)
+ DECL_CHANGE_V(JSTruncateFloat64ToWord32, kJSFloatTruncate, kNoAssumption,
+ Float64, Word32)
+ V<WordPtr> ChangeInt32ToIntPtr(V<Word32> input) {
+ if constexpr (Is64()) {
+ return ChangeInt32ToInt64(input);
+ } else {
+ DCHECK_EQ(WordPtr::bits, Word32::bits);
+ return V<WordPtr>::Cast(input);
+ }
+ }
+ V<WordPtr> ChangeUint32ToUintPtr(V<Word32> input) {
+ if constexpr (Is64()) {
+ return ChangeUint32ToUint64(input);
+ } else {
+ DCHECK_EQ(WordPtr::bits, Word32::bits);
+ return V<WordPtr>::Cast(input);
+ }
+ }
#define DECL_SIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits) \
DECL_CHANGE(TruncateFloat##FloatBits##ToInt##ResultBits##OverflowUndefined, \
@@ -643,192 +1313,1089 @@ class AssemblerOpInterface {
DECL_UNSIGNED_FLOAT_TRUNCATE(32, 32)
#undef DECL_UNSIGNED_FLOAT_TRUNCATE
- DECL_CHANGE(ReversibleFloat64ToInt32, kSignedFloatTruncateOverflowToMin,
- kReversible, Float64, Word32)
- DECL_CHANGE(ReversibleFloat64ToUint32, kUnsignedFloatTruncateOverflowToMin,
- kReversible, Float64, Word32)
- DECL_CHANGE(ReversibleFloat64ToInt64, kSignedFloatTruncateOverflowToMin,
- kReversible, Float64, Word64)
- DECL_CHANGE(ReversibleFloat64ToUint64, kUnsignedFloatTruncateOverflowToMin,
- kReversible, Float64, Word64)
- DECL_CHANGE(Float64ExtractLowWord32, kExtractLowHalf, kNoAssumption, Float64,
- Word32)
- DECL_CHANGE(Float64ExtractHighWord32, kExtractHighHalf, kNoAssumption,
- Float64, Word32)
+ DECL_CHANGE_V(ReversibleFloat64ToInt32, kSignedFloatTruncateOverflowToMin,
+ kReversible, Float64, Word32)
+ DECL_CHANGE_V(ReversibleFloat64ToUint32, kUnsignedFloatTruncateOverflowToMin,
+ kReversible, Float64, Word32)
+ DECL_CHANGE_V(ReversibleFloat64ToInt64, kSignedFloatTruncateOverflowToMin,
+ kReversible, Float64, Word64)
+ DECL_CHANGE_V(ReversibleFloat64ToUint64, kUnsignedFloatTruncateOverflowToMin,
+ kReversible, Float64, Word64)
+ DECL_CHANGE_V(Float64ExtractLowWord32, kExtractLowHalf, kNoAssumption,
+ Float64, Word32)
+ DECL_CHANGE_V(Float64ExtractHighWord32, kExtractHighHalf, kNoAssumption,
+ Float64, Word32)
#undef DECL_CHANGE
+#undef DECL_CHANGE_V
#undef DECL_TRY_CHANGE
- OpIndex Load(OpIndex base, LoadOp::Kind kind, MemoryRepresentation loaded_rep,
- int32_t offset = 0) {
- return Load(base, OpIndex::Invalid(), kind, loaded_rep, offset);
+ OpIndex ChangeOrDeopt(OpIndex input, OpIndex frame_state,
+ ChangeOrDeoptOp::Kind kind,
+ CheckForMinusZeroMode minus_zero_mode,
+ const FeedbackSource& feedback) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceChangeOrDeopt(input, frame_state, kind,
+ minus_zero_mode, feedback);
}
+
+ V<Word32> ChangeFloat64ToInt32OrDeopt(V<Float64> input, OpIndex frame_state,
+ CheckForMinusZeroMode minus_zero_mode,
+ const FeedbackSource& feedback) {
+ return ChangeOrDeopt(input, frame_state,
+ ChangeOrDeoptOp::Kind::kFloat64ToInt32,
+ minus_zero_mode, feedback);
+ }
+ V<Word64> ChangeFloat64ToInt64OrDeopt(V<Float64> input, OpIndex frame_state,
+ CheckForMinusZeroMode minus_zero_mode,
+ const FeedbackSource& feedback) {
+ return ChangeOrDeopt(input, frame_state,
+ ChangeOrDeoptOp::Kind::kFloat64ToInt64,
+ minus_zero_mode, feedback);
+ }
+
+ OpIndex Tag(OpIndex input, TagKind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceTag(input, kind);
+ }
+ V<Smi> SmiTag(ConstOrV<Word32> input) {
+ return Tag(resolve(input), TagKind::kSmiTag);
+ }
+
+ OpIndex Untag(OpIndex input, TagKind kind, RegisterRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceUntag(input, kind, rep);
+ }
+ V<Word32> SmiUntag(V<Tagged> input) {
+ return Untag(input, TagKind::kSmiTag, RegisterRepresentation::Word32());
+ }
+
OpIndex Load(OpIndex base, OpIndex index, LoadOp::Kind kind,
MemoryRepresentation loaded_rep, int32_t offset = 0,
uint8_t element_size_log2 = 0) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceLoad(base, index, kind, loaded_rep,
loaded_rep.ToRegisterRepresentation(), offset,
element_size_log2);
}
- void Store(OpIndex base, OpIndex value, StoreOp::Kind kind,
- MemoryRepresentation stored_rep, WriteBarrierKind write_barrier,
- int32_t offset = 0) {
- Store(base, OpIndex::Invalid(), value, kind, stored_rep, write_barrier,
- offset);
+ OpIndex Load(OpIndex base, LoadOp::Kind kind, MemoryRepresentation loaded_rep,
+ int32_t offset = 0) {
+ return Load(base, OpIndex::Invalid(), kind, loaded_rep, offset);
+ }
+ OpIndex LoadOffHeap(OpIndex address, MemoryRepresentation rep) {
+ return LoadOffHeap(address, 0, rep);
+ }
+ OpIndex LoadOffHeap(OpIndex address, int32_t offset,
+ MemoryRepresentation rep) {
+ return Load(address, LoadOp::Kind::RawAligned(), rep, offset);
+ }
+ OpIndex LoadOffHeap(OpIndex address, OpIndex index, int32_t offset,
+ MemoryRepresentation rep) {
+ return Load(address, index, LoadOp::Kind::RawAligned(), rep, offset,
+ rep.SizeInBytesLog2());
}
+
void Store(OpIndex base, OpIndex index, OpIndex value, StoreOp::Kind kind,
MemoryRepresentation stored_rep, WriteBarrierKind write_barrier,
int32_t offset = 0, uint8_t element_size_log2 = 0) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceStore(base, index, value, kind, stored_rep, write_barrier,
offset, element_size_log2);
}
+ void Store(OpIndex base, OpIndex value, StoreOp::Kind kind,
+ MemoryRepresentation stored_rep, WriteBarrierKind write_barrier,
+ int32_t offset = 0) {
+ Store(base, OpIndex::Invalid(), value, kind, stored_rep, write_barrier,
+ offset);
+ }
+ void StoreOffHeap(OpIndex address, OpIndex value, MemoryRepresentation rep,
+ int32_t offset = 0) {
+ Store(address, value, StoreOp::Kind::RawAligned(), rep,
+ WriteBarrierKind::kNoWriteBarrier, offset);
+ }
+ void StoreOffHeap(OpIndex address, OpIndex index, OpIndex value,
+ MemoryRepresentation rep, int32_t offset) {
+ Store(address, index, value, StoreOp::Kind::RawAligned(), rep,
+ WriteBarrierKind::kNoWriteBarrier, offset, rep.SizeInBytesLog2());
+ }
+
+ template <typename Rep = Any>
+ V<Rep> LoadField(V<Tagged> object, const FieldAccess& access) {
+ MachineType machine_type = access.machine_type;
+ if (machine_type.IsMapWord()) {
+ machine_type = MachineType::TaggedPointer();
+#ifdef V8_MAP_PACKING
+ UNIMPLEMENTED();
+#endif
+ }
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(machine_type);
+#ifdef V8_ENABLE_SANDBOX
+ bool is_sandboxed_external =
+ access.type.Is(compiler::Type::ExternalPointer());
+ if (is_sandboxed_external) {
+ // Fields for sandboxed external pointer contain a 32-bit handle, not a
+ // 64-bit raw pointer.
+ rep = MemoryRepresentation::Uint32();
+ }
+#endif // V8_ENABLE_SANDBOX
+ V<Rep> value = Load(object, LoadOp::Kind::Aligned(access.base_is_tagged),
+ rep, access.offset);
+#ifdef V8_ENABLE_SANDBOX
+ if (is_sandboxed_external) {
+ value = DecodeExternalPointer(value, access.external_pointer_tag);
+ }
+ if (access.is_bounded_size_access) {
+ DCHECK(!is_sandboxed_external);
+ value = ShiftRightLogical(value, kBoundedSizeShift,
+ WordRepresentation::PointerSized());
+ }
+#endif // V8_ENABLE_SANDBOX
+ return value;
+ }
+
+ V<Map> LoadMapField(V<Object> object) {
+ return LoadField<Map>(object, AccessBuilder::ForMap());
+ }
+
+ void StoreField(V<Tagged> object, const FieldAccess& access, V<Any> value) {
+ // External pointer must never be stored by optimized code.
+ DCHECK(!access.type.Is(compiler::Type::ExternalPointer()) ||
+ !V8_ENABLE_SANDBOX_BOOL);
+ // SandboxedPointers are not currently stored by optimized code.
+ DCHECK(!access.type.Is(compiler::Type::SandboxedPointer()));
+
+#ifdef V8_ENABLE_SANDBOX
+ if (access.is_bounded_size_access) {
+ value = ShiftLeft(value, kBoundedSizeShift,
+ WordRepresentation::PointerSized());
+ }
+#endif // V8_ENABLE_SANDBOX
+
+ StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
+ MachineType machine_type = access.machine_type;
+ if (machine_type.IsMapWord()) {
+ machine_type = MachineType::TaggedPointer();
+#ifdef V8_MAP_PACKING
+ UNIMPLEMENTED();
+#endif
+ }
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(machine_type);
+ Store(object, value, kind, rep, access.write_barrier_kind, access.offset);
+ }
+
+ template <typename Rep = Any>
+ V<Rep> LoadElement(V<Tagged> object, const ElementAccess& access,
+ V<WordPtr> index) {
+ DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase);
+ LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(access.machine_type);
+ return Load(object, index, kind, rep, access.header_size,
+ rep.SizeInBytesLog2());
+ }
+
+ void StoreElement(V<Tagged> object, const ElementAccess& access,
+ V<WordPtr> index, V<Any> value) {
+ DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase);
+ LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(access.machine_type);
+ Store(object, index, value, kind, rep, access.write_barrier_kind,
+ access.header_size, rep.SizeInBytesLog2());
+ }
+
+ V<Tagged> Allocate(
+ V<WordPtr> size, AllocationType type,
+ AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceAllocate(size, type, allow_large_objects);
+ }
+
+ OpIndex DecodeExternalPointer(OpIndex handle, ExternalPointerTag tag) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceDecodeExternalPointer(handle, tag);
+ }
- void Retain(OpIndex value) { stack().ReduceRetain(value); }
+ void Retain(OpIndex value) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceRetain(value);
+ }
OpIndex StackPointerGreaterThan(OpIndex limit, StackCheckKind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceStackPointerGreaterThan(limit, kind);
}
OpIndex StackCheckOffset() {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceFrameConstant(
FrameConstantOp::Kind::kStackCheckOffset);
}
OpIndex FramePointer() {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceFrameConstant(FrameConstantOp::Kind::kFramePointer);
}
OpIndex ParentFramePointer() {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceFrameConstant(
FrameConstantOp::Kind::kParentFramePointer);
}
OpIndex StackSlot(int size, int alignment) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceStackSlot(size, alignment);
}
- void Goto(Block* destination) { stack().ReduceGoto(destination); }
- void Branch(OpIndex condition, Block* if_true, Block* if_false) {
- stack().ReduceBranch(condition, if_true, if_false);
+ void Goto(Block* destination) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceGoto(destination);
+ }
+ void Branch(V<Word32> condition, Block* if_true, Block* if_false,
+ BranchHint hint = BranchHint::kNone) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceBranch(condition, if_true, if_false, hint);
}
OpIndex Select(OpIndex cond, OpIndex vtrue, OpIndex vfalse,
RegisterRepresentation rep, BranchHint hint,
SelectOp::Implementation implem) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceSelect(cond, vtrue, vfalse, rep, hint, implem);
}
void Switch(OpIndex input, base::Vector<const SwitchOp::Case> cases,
- Block* default_case) {
- stack().ReduceSwitch(input, cases, default_case);
+ Block* default_case,
+ BranchHint default_hint = BranchHint::kNone) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceSwitch(input, cases, default_case, default_hint);
}
- OpIndex CatchException(OpIndex call, Block* if_success, Block* if_exception) {
- return stack().ReduceCatchException(call, if_success, if_exception);
+ void Unreachable() {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceUnreachable();
}
- void Unreachable() { stack().ReduceUnreachable(); }
- OpIndex Parameter(int index, const char* debug_name = nullptr) {
- return stack().ReduceParameter(index, debug_name);
+ OpIndex Parameter(int index, RegisterRepresentation rep,
+ const char* debug_name = nullptr) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceParameter(index, rep, debug_name);
+ }
+ OpIndex OsrValue(int index) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceOsrValue(index);
}
- OpIndex OsrValue(int index) { return stack().ReduceOsrValue(index); }
void Return(OpIndex pop_count, base::Vector<OpIndex> return_values) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceReturn(pop_count, return_values);
}
void Return(OpIndex result) {
Return(Word32Constant(0), base::VectorOf({result}));
}
- OpIndex Call(OpIndex callee, base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor) {
- return stack().ReduceCall(callee, arguments, descriptor);
+ OpIndex Call(OpIndex callee, OpIndex frame_state,
+ base::Vector<const OpIndex> arguments,
+ const TSCallDescriptor* descriptor) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceCall(callee, frame_state, arguments, descriptor);
+ }
+ OpIndex Call(OpIndex callee, std::initializer_list<OpIndex> arguments,
+ const TSCallDescriptor* descriptor) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return Call(callee, OpIndex::Invalid(), base::VectorOf(arguments),
+ descriptor);
+ }
+
+ template <typename Descriptor>
+ std::enable_if_t<Descriptor::NeedsFrameState && Descriptor::NeedsContext,
+ typename Descriptor::result_t>
+ CallBuiltin(Isolate* isolate, OpIndex frame_state, OpIndex context,
+ const typename Descriptor::arguments_t& args) {
+ DCHECK(frame_state.valid());
+ DCHECK(context.valid());
+ return CallBuiltinImpl<typename Descriptor::result_t>(
+ isolate, Descriptor::Function,
+ Descriptor::Create(isolate, stack().output_graph().graph_zone()),
+ frame_state, context, args);
+ }
+ template <typename Descriptor>
+ std::enable_if_t<!Descriptor::NeedsFrameState && Descriptor::NeedsContext,
+ typename Descriptor::result_t>
+ CallBuiltin(Isolate* isolate, OpIndex context,
+ const typename Descriptor::arguments_t& args) {
+ DCHECK(context.valid());
+ return CallBuiltinImpl<typename Descriptor::result_t>(
+ isolate, Descriptor::Function,
+ Descriptor::Create(isolate, stack().output_graph().graph_zone()), {},
+ context, args);
+ }
+ template <typename Descriptor>
+ std::enable_if_t<Descriptor::NeedsFrameState && !Descriptor::NeedsContext,
+ typename Descriptor::result_t>
+ CallBuiltin(Isolate* isolate, OpIndex frame_state,
+ const typename Descriptor::arguments_t& args) {
+ DCHECK(frame_state.valid());
+ return CallBuiltinImpl<typename Descriptor::result_t>(
+ isolate, Descriptor::Function,
+ Descriptor::Create(isolate, stack().output_graph().graph_zone()),
+ frame_state, {}, args);
+ }
+ template <typename Descriptor>
+ std::enable_if_t<!Descriptor::NeedsFrameState && !Descriptor::NeedsContext,
+ typename Descriptor::result_t>
+ CallBuiltin(Isolate* isolate, const typename Descriptor::arguments_t& args) {
+ return CallBuiltinImpl<typename Descriptor::result_t>(
+ isolate, Descriptor::Function,
+ Descriptor::Create(isolate, stack().output_graph().graph_zone()), {},
+ {}, args);
+ }
+
+ template <typename Ret, typename Args>
+ Ret CallBuiltinImpl(Isolate* isolate, Builtin function,
+ const TSCallDescriptor* desc, OpIndex frame_state,
+ V<Context> context, const Args& args) {
+ Callable callable = Builtins::CallableFor(isolate, function);
+ // Convert arguments from `args` tuple into a `SmallVector<OpIndex>`.
+ auto inputs = std::apply(
+ [](auto&&... as) {
+ return base::SmallVector<OpIndex, std::tuple_size_v<Args> + 1>{
+ std::forward<decltype(as)>(as)...};
+ },
+ args);
+ if (context.valid()) inputs.push_back(context);
+
+ if constexpr (std::is_same_v<Ret, void>) {
+ Call(HeapConstant(callable.code()), frame_state, base::VectorOf(inputs),
+ desc);
+ } else {
+ return Call(HeapConstant(callable.code()), frame_state,
+ base::VectorOf(inputs), desc);
+ }
+ }
+
+ V<Boolean> CallBuiltin_StringEqual(Isolate* isolate, V<String> left,
+ V<String> right, V<WordPtr> length) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringEqual>(
+ isolate, {left, right, length});
+ }
+ V<Boolean> CallBuiltin_StringLessThan(Isolate* isolate, V<String> left,
+ V<String> right) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringLessThan>(
+ isolate, {left, right});
+ }
+ V<Boolean> CallBuiltin_StringLessThanOrEqual(Isolate* isolate, V<String> left,
+ V<String> right) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringLessThanOrEqual>(
+ isolate, {left, right});
+ }
+ V<Smi> CallBuiltin_StringIndexOf(Isolate* isolate, V<String> string,
+ V<String> search, V<Smi> position) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringIndexOf>(
+ isolate, {string, search, position});
+ }
+ V<String> CallBuiltin_StringFromCodePointAt(Isolate* isolate,
+ V<String> string,
+ V<WordPtr> index) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringFromCodePointAt>(
+ isolate, {string, index});
+ }
+#ifdef V8_INTL_SUPPORT
+ V<String> CallBuiltin_StringToLowerCaseIntl(Isolate* isolate,
+ V<Context> context,
+ V<String> string) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringToLowerCaseIntl>(
+ isolate, context, {string});
+ }
+#endif // V8_INTL_SUPPORT
+ V<String> CallBuiltin_StringSubstring(Isolate* isolate, V<String> string,
+ V<WordPtr> start, V<WordPtr> end) {
+ return CallBuiltin<typename BuiltinCallDescriptor::StringSubstring>(
+ isolate, {string, start, end});
+ }
+
+ template <typename Descriptor>
+ std::enable_if_t<Descriptor::NeedsFrameState, typename Descriptor::result_t>
+ CallRuntime(Isolate* isolate, OpIndex frame_state, OpIndex context,
+ const typename Descriptor::arguments_t& args) {
+ DCHECK(frame_state.valid());
+ DCHECK(context.valid());
+ return CallRuntimeImpl<typename Descriptor::result_t>(
+ isolate, Descriptor::Function,
+ Descriptor::Create(stack().output_graph().graph_zone()), frame_state,
+ context, args);
+ }
+ template <typename Descriptor>
+ std::enable_if_t<!Descriptor::NeedsFrameState, typename Descriptor::result_t>
+ CallRuntime(Isolate* isolate, OpIndex context,
+ const typename Descriptor::arguments_t& args) {
+ DCHECK(context.valid());
+ return CallRuntimeImpl<typename Descriptor::result_t>(
+ isolate, Descriptor::Function,
+ Descriptor::Create(stack().output_graph().graph_zone()), {}, context,
+ args);
+ }
+
+ template <typename Ret, typename Args>
+ Ret CallRuntimeImpl(Isolate* isolate, Runtime::FunctionId function,
+ const TSCallDescriptor* desc, OpIndex frame_state,
+ OpIndex context, const Args& args) {
+ const int result_size = Runtime::FunctionForId(function)->result_size;
+ constexpr size_t kMaxNumArgs = 6;
+ const size_t argc = std::tuple_size_v<Args>;
+ static_assert(kMaxNumArgs >= argc);
+ // Convert arguments from `args` tuple into a `SmallVector<OpIndex>`.
+ using vector_t = base::SmallVector<OpIndex, argc + 4>;
+ auto inputs = std::apply(
+ [](auto&&... as) {
+ return vector_t{std::forward<decltype(as)>(as)...};
+ },
+ args);
+ DCHECK(context.valid());
+ inputs.push_back(ExternalConstant(ExternalReference::Create(function)));
+ inputs.push_back(Word32Constant(static_cast<int>(argc)));
+ inputs.push_back(context);
+
+ if constexpr (std::is_same_v<Ret, void>) {
+ Call(CEntryStubConstant(isolate, result_size), frame_state,
+ base::VectorOf(inputs), desc);
+ } else {
+ return Call(CEntryStubConstant(isolate, result_size), frame_state,
+ base::VectorOf(inputs), desc);
+ }
}
- OpIndex CallMaybeDeopt(OpIndex callee, base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor,
- OpIndex frame_state) {
- OpIndex call = stack().ReduceCall(callee, arguments, descriptor);
- stack().ReduceCheckLazyDeopt(call, frame_state);
- return call;
+
+ V<Tagged> CallRuntime_StringCharCodeAt(Isolate* isolate, V<Context> context,
+ V<String> string, V<Number> index) {
+ return CallRuntime<typename RuntimeCallDescriptor::StringCharCodeAt>(
+ isolate, context, {string, index});
+ }
+#ifdef V8_INTL_SUPPORT
+ V<String> CallRuntime_StringToUpperCaseIntl(Isolate* isolate,
+ V<Context> context,
+ V<String> string) {
+ return CallRuntime<typename RuntimeCallDescriptor::StringToUpperCaseIntl>(
+ isolate, context, {string});
+ }
+#endif // V8_INTL_SUPPORT
+ V<Tagged> CallRuntime_TerminateExecution(Isolate* isolate,
+ OpIndex frame_state,
+ V<Context> context) {
+ return CallRuntime<typename RuntimeCallDescriptor::TerminateExecution>(
+ isolate, frame_state, context, {});
+ }
+
+ OpIndex CallAndCatchException(OpIndex callee, OpIndex frame_state,
+ base::Vector<const OpIndex> arguments,
+ Block* if_success, Block* if_exception,
+ const TSCallDescriptor* descriptor) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceCallAndCatchException(
+ callee, frame_state, arguments, if_success, if_exception, descriptor);
}
void TailCall(OpIndex callee, base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor) {
+ const TSCallDescriptor* descriptor) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceTailCall(callee, arguments, descriptor);
}
OpIndex FrameState(base::Vector<const OpIndex> inputs, bool inlined,
const FrameStateData* data) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceFrameState(inputs, inlined, data);
}
void DeoptimizeIf(OpIndex condition, OpIndex frame_state,
const DeoptimizeParameters* parameters) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceDeoptimizeIf(condition, frame_state, false, parameters);
}
void DeoptimizeIfNot(OpIndex condition, OpIndex frame_state,
const DeoptimizeParameters* parameters) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceDeoptimizeIf(condition, frame_state, true, parameters);
}
+ void DeoptimizeIf(OpIndex condition, OpIndex frame_state,
+ DeoptimizeReason reason, const FeedbackSource& feedback) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ Zone* zone = stack().output_graph().graph_zone();
+ const DeoptimizeParameters* params =
+ zone->New<DeoptimizeParameters>(reason, feedback);
+ DeoptimizeIf(condition, frame_state, params);
+ }
+ void DeoptimizeIfNot(OpIndex condition, OpIndex frame_state,
+ DeoptimizeReason reason,
+ const FeedbackSource& feedback) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ Zone* zone = stack().output_graph().graph_zone();
+ const DeoptimizeParameters* params =
+ zone->New<DeoptimizeParameters>(reason, feedback);
+ DeoptimizeIfNot(condition, frame_state, params);
+ }
void Deoptimize(OpIndex frame_state, const DeoptimizeParameters* parameters) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceDeoptimize(frame_state, parameters);
}
void TrapIf(OpIndex condition, TrapId trap_id) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceTrapIf(condition, false, trap_id);
}
void TrapIfNot(OpIndex condition, TrapId trap_id) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
stack().ReduceTrapIf(condition, true, trap_id);
}
+ void StaticAssert(OpIndex condition, const char* source) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceStaticAssert(condition, source);
+ }
+
OpIndex Phi(base::Vector<const OpIndex> inputs, RegisterRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReducePhi(inputs, rep);
}
+ OpIndex Phi(std::initializer_list<OpIndex> inputs,
+ RegisterRepresentation rep) {
+ return Phi(base::VectorOf(inputs), rep);
+ }
+ template <typename T>
+ V<T> Phi(const base::Vector<V<T>>& inputs) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ std::vector<OpIndex> temp(inputs.size());
+ for (std::size_t i = 0; i < inputs.size(); ++i) temp[i] = inputs[i];
+ return Phi(base::VectorOf(temp), V<T>::rep);
+ }
+ OpIndex PendingLoopPhi(OpIndex first, RegisterRepresentation rep,
+ PendingLoopPhiOp::Data data) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReducePendingLoopPhi(first, rep, data);
+ }
OpIndex PendingLoopPhi(OpIndex first, RegisterRepresentation rep,
OpIndex old_backedge_index) {
- return stack().ReducePendingLoopPhi(first, rep, old_backedge_index);
+ return PendingLoopPhi(first, rep,
+ PendingLoopPhiOp::Data{old_backedge_index});
}
OpIndex PendingLoopPhi(OpIndex first, RegisterRepresentation rep,
Node* old_backedge_index) {
- return stack().ReducePendingLoopPhi(first, rep, old_backedge_index);
+ return PendingLoopPhi(first, rep,
+ PendingLoopPhiOp::Data{old_backedge_index});
+ }
+ template <typename T>
+ V<T> PendingLoopPhi(V<T> first, PendingLoopPhiOp::PhiIndex phi_index) {
+ return PendingLoopPhi(first, V<T>::rep, PendingLoopPhiOp::Data{phi_index});
}
+ OpIndex Tuple(base::Vector<OpIndex> indices) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceTuple(indices);
+ }
OpIndex Tuple(OpIndex a, OpIndex b) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
return stack().ReduceTuple(base::VectorOf({a, b}));
}
- OpIndex Projection(OpIndex tuple, uint16_t index) {
- return stack().ReduceProjection(tuple, index);
+ OpIndex Projection(OpIndex tuple, uint16_t index,
+ RegisterRepresentation rep) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceProjection(tuple, index, rep);
+ }
+ template <typename T>
+ V<T> Projection(OpIndex tuple, uint16_t index) {
+ return Projection(tuple, index, V<T>::rep);
+ }
+ OpIndex CheckTurboshaftTypeOf(OpIndex input, RegisterRepresentation rep,
+ Type expected_type, bool successful) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceCheckTurboshaftTypeOf(input, rep, expected_type,
+ successful);
+ }
+
+ OpIndex LoadException() {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceLoadException();
+ }
+
+ // Return `true` if the control flow after the conditional jump is reachable.
+ bool GotoIf(OpIndex condition, Block* if_true,
+ BranchHint hint = BranchHint::kNone) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return false;
+ }
+ Block* if_false = stack().NewBlock();
+ stack().Branch(condition, if_true, if_false, hint);
+ return stack().Bind(if_false);
+ }
+ // Return `true` if the control flow after the conditional jump is reachable.
+ bool GotoIfNot(OpIndex condition, Block* if_false,
+ BranchHint hint = BranchHint::kNone) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return false;
+ }
+ Block* if_true = stack().NewBlock();
+ stack().Branch(condition, if_true, if_false, hint);
+ return stack().Bind(if_true);
+ }
+
+ OpIndex CallBuiltin(Builtin builtin, OpIndex frame_state,
+ const base::Vector<OpIndex>& arguments,
+ Isolate* isolate) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ Callable const callable = Builtins::CallableFor(isolate, builtin);
+ Zone* graph_zone = stack().output_graph().graph_zone();
+
+ const CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ graph_zone, callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoThrow | Operator::kNoDeopt);
+ DCHECK_EQ(call_descriptor->NeedsFrameState(), frame_state.valid());
+
+ const TSCallDescriptor* ts_call_descriptor =
+ TSCallDescriptor::Create(call_descriptor, graph_zone);
+
+ OpIndex callee = stack().HeapConstant(callable.code());
+
+ return stack().Call(callee, frame_state, arguments, ts_call_descriptor);
+ }
+
+ V<Tagged> NewConsString(V<Word32> length, V<Tagged> first, V<Tagged> second) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceNewConsString(length, first, second);
+ }
+ V<Tagged> NewArray(V<WordPtr> length, NewArrayOp::Kind kind,
+ AllocationType allocation_type) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceNewArray(length, kind, allocation_type);
+ }
+ V<Tagged> NewDoubleArray(V<WordPtr> length, AllocationType allocation_type) {
+ return NewArray(length, NewArrayOp::Kind::kDouble, allocation_type);
+ }
+
+ V<Tagged> DoubleArrayMinMax(V<Tagged> array, DoubleArrayMinMaxOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceDoubleArrayMinMax(array, kind);
+ }
+ V<Tagged> DoubleArrayMin(V<Tagged> array) {
+ return DoubleArrayMinMax(array, DoubleArrayMinMaxOp::Kind::kMin);
+ }
+ V<Tagged> DoubleArrayMax(V<Tagged> array) {
+ return DoubleArrayMinMax(array, DoubleArrayMinMaxOp::Kind::kMax);
+ }
+
+ V<Any> LoadFieldByIndex(V<Tagged> object, V<Word32> index) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceLoadFieldByIndex(object, index);
+ }
+
+ void DebugBreak() {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return;
+ }
+ stack().ReduceDebugBreak();
+ }
+
+ V<Tagged> BigIntBinop(V<Tagged> left, V<Tagged> right, OpIndex frame_state,
+ BigIntBinopOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceBigIntBinop(left, right, frame_state, kind);
+ }
+#define BIGINT_BINOP(kind) \
+ V<Tagged> BigInt##kind(V<Tagged> left, V<Tagged> right, \
+ OpIndex frame_state) { \
+ return BigIntBinop(left, right, frame_state, \
+ BigIntBinopOp::Kind::k##kind); \
+ }
+ BIGINT_BINOP(Add)
+ BIGINT_BINOP(Sub)
+ BIGINT_BINOP(Mul)
+ BIGINT_BINOP(Div)
+ BIGINT_BINOP(Mod)
+ BIGINT_BINOP(BitwiseAnd)
+ BIGINT_BINOP(BitwiseOr)
+ BIGINT_BINOP(BitwiseXor)
+ BIGINT_BINOP(ShiftLeft)
+ BIGINT_BINOP(ShiftRightArithmetic)
+#undef BIGINT_BINOP
+
+ V<Word32> BigIntEqual(V<Tagged> left, V<Tagged> right) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceBigIntEqual(left, right);
+ }
+
+ V<Word32> BigIntComparison(V<Tagged> left, V<Tagged> right,
+ BigIntComparisonOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceBigIntComparison(left, right, kind);
+ }
+ V<Word32> BigIntLessThan(V<Tagged> left, V<Tagged> right) {
+ return BigIntComparison(left, right, BigIntComparisonOp::Kind::kLessThan);
+ }
+ V<Word32> BigIntLessThanOrEqual(V<Tagged> left, V<Tagged> right) {
+ return BigIntComparison(left, right,
+ BigIntComparisonOp::Kind::kLessThanOrEqual);
+ }
+
+ V<Tagged> BigIntUnary(V<Tagged> input, BigIntUnaryOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceBigIntUnary(input, kind);
+ }
+ V<Tagged> BigIntNegate(V<Tagged> input) {
+ return BigIntUnary(input, BigIntUnaryOp::Kind::kNegate);
+ }
+
+ V<Word32> StringAt(V<String> string, V<WordPtr> position,
+ StringAtOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringAt(string, position, kind);
+ }
+ V<Word32> StringCharCodeAt(V<String> string, V<WordPtr> position) {
+ return StringAt(string, position, StringAtOp::Kind::kCharCode);
+ }
+ V<Word32> StringCodePointAt(V<String> string, V<WordPtr> position) {
+ return StringAt(string, position, StringAtOp::Kind::kCodePoint);
+ }
+
+#ifdef V8_INTL_SUPPORT
+ V<String> StringToCaseIntl(V<String> string, StringToCaseIntlOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringToCaseIntl(string, kind);
+ }
+ V<String> StringToLowerCaseIntl(V<String> string) {
+ return StringToCaseIntl(string, StringToCaseIntlOp::Kind::kLower);
+ }
+ V<String> StringToUpperCaseIntl(V<String> string) {
+ return StringToCaseIntl(string, StringToCaseIntlOp::Kind::kUpper);
+ }
+#endif // V8_INTL_SUPPORT
+
+ V<Word32> StringLength(V<Tagged> string) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringLength(string);
+ }
+
+ V<Tagged> StringIndexOf(V<Tagged> string, V<Tagged> search,
+ V<Tagged> position) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringIndexOf(string, search, position);
+ }
+
+ V<Tagged> StringFromCodePointAt(V<Tagged> string, V<WordPtr> index) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringFromCodePointAt(string, index);
+ }
+
+ V<Tagged> StringSubstring(V<Tagged> string, V<Word32> start, V<Word32> end) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringSubstring(string, start, end);
+ }
+
+ V<Boolean> StringEqual(V<String> left, V<String> right) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringEqual(left, right);
+ }
+
+ V<Boolean> StringComparison(V<String> left, V<String> right,
+ StringComparisonOp::Kind kind) {
+ if (V8_UNLIKELY(stack().generating_unreachable_operations())) {
+ return OpIndex::Invalid();
+ }
+ return stack().ReduceStringComparison(left, right, kind);
+ }
+ V<Boolean> StringLessThan(V<String> left, V<String> right) {
+ return StringComparison(left, right, StringComparisonOp::Kind::kLessThan);
+ }
+ V<Boolean> StringLessThanOrEqual(V<String> left, V<String> right) {
+ return StringComparison(left, right,
+ StringComparisonOp::Kind::kLessThanOrEqual);
+ }
+
+ template <typename Rep>
+ V<Rep> resolve(const V<Rep>& v) {
+ return v;
+ }
+ V<Word32> resolve(const ConstOrV<Word32>& v) {
+ return v.is_constant() ? Word32Constant(v.constant_value()) : v.value();
+ }
+ V<Word64> resolve(const ConstOrV<Word64>& v) {
+ return v.is_constant() ? Word64Constant(v.constant_value()) : v.value();
+ }
+ V<Float32> resolve(const ConstOrV<Float32>& v) {
+ return v.is_constant() ? Float32Constant(v.constant_value()) : v.value();
+ }
+ V<Float64> resolve(const ConstOrV<Float64>& v) {
+ return v.is_constant() ? Float64Constant(v.constant_value()) : v.value();
+ }
+
+ // These methods are used by the assembler macros (IF, ELSE, ELSE_IF, END_IF).
+ template <typename L>
+ auto ControlFlowHelper_Bind(L& label)
+ -> base::prepend_tuple_type<bool, typename L::values_t> {
+ // LoopLabels need to be bound with `LOOP` instead of `BIND`.
+ static_assert(!L::is_loop);
+ return label.Bind(stack());
+ }
+
+ template <typename L>
+ auto ControlFlowHelper_BindLoop(L& label)
+ -> base::prepend_tuple_type<bool, typename L::values_t> {
+ // Only LoopLabels can be bound with `LOOP`. Otherwise use `BIND`.
+ static_assert(L::is_loop);
+ return label.BindLoop(stack());
+ }
+
+ template <typename L>
+ void ControlFlowHelper_EndLoop(L& label) {
+ static_assert(L::is_loop);
+ label.EndLoop(stack());
+ }
+
+ template <typename L>
+ void ControlFlowHelper_Goto(L& label,
+ const typename L::const_or_values_t& values) {
+ auto resolved_values = detail::ResolveAll(stack(), values);
+ label.Goto(stack(), resolved_values);
+ }
+
+ template <typename L>
+ void ControlFlowHelper_GotoIf(V<Word32> condition, L& label,
+ const typename L::const_or_values_t& values,
+ BranchHint hint) {
+ auto resolved_values = detail::ResolveAll(stack(), values);
+ label.GotoIf(stack(), condition, hint, resolved_values);
+ }
+
+ template <typename L>
+ void ControlFlowHelper_GotoIfNot(V<Word32> condition, L& label,
+ const typename L::const_or_values_t& values,
+ BranchHint hint) {
+ auto resolved_values = detail::ResolveAll(stack(), values);
+ label.GotoIfNot(stack(), condition, hint, resolved_values);
+ }
+
+ bool ControlFlowHelper_If(V<Word32> condition, bool negate, BranchHint hint) {
+ Block* then_block = stack().NewBlock();
+ Block* else_block = stack().NewBlock();
+ Block* end_block = stack().NewBlock();
+ if (negate) {
+ this->Branch(condition, else_block, then_block, hint);
+ } else {
+ this->Branch(condition, then_block, else_block, hint);
+ }
+ if_scope_stack_.emplace_back(else_block, end_block);
+ return stack().Bind(then_block);
+ }
+
+ template <typename F>
+ bool ControlFlowHelper_ElseIf(F&& condition_builder, BranchHint hint) {
+ DCHECK_LT(0, if_scope_stack_.size());
+ auto& info = if_scope_stack_.back();
+ Block* else_block = info.else_block;
+ DCHECK_NOT_NULL(else_block);
+ if (!stack().Bind(else_block)) return false;
+ Block* then_block = stack().NewBlock();
+ info.else_block = stack().NewBlock();
+ stack().Branch(condition_builder(), then_block, info.else_block, hint);
+ return stack().Bind(then_block);
+ }
+
+ bool ControlFlowHelper_Else() {
+ DCHECK_LT(0, if_scope_stack_.size());
+ auto& info = if_scope_stack_.back();
+ Block* else_block = info.else_block;
+ DCHECK_NOT_NULL(else_block);
+ info.else_block = nullptr;
+ return stack().Bind(else_block);
+ }
+
+ void ControlFlowHelper_EndIf() {
+ DCHECK_LT(0, if_scope_stack_.size());
+ auto& info = if_scope_stack_.back();
+ // Do we still have to place an else block (aka we had if's without else).
+ if (info.else_block) {
+ if (stack().Bind(info.else_block)) {
+ stack().Goto(info.end_block);
+ }
+ }
+ stack().Bind(info.end_block);
+ if_scope_stack_.pop_back();
+ }
+
+ void ControlFlowHelper_GotoEnd() {
+ DCHECK_LT(0, if_scope_stack_.size());
+ auto& info = if_scope_stack_.back();
+
+ if (!stack().current_block()) {
+ // We had an unconditional goto inside the block, so we don't need to add
+ // a jump to the end block.
+ return;
+ }
+ // Generate a jump to the end block.
+ stack().Goto(info.end_block);
}
private:
Assembler& stack() { return *static_cast<Assembler*>(this); }
+ struct IfScopeInfo {
+ Block* else_block;
+ Block* end_block;
+
+ IfScopeInfo(Block* else_block, Block* end_block)
+ : else_block(else_block), end_block(end_block) {}
+ };
+ base::SmallVector<IfScopeInfo, 16> if_scope_stack_;
+ // [0] contains the stub with exit frame.
+ MaybeHandle<Code> cached_centry_stub_constants_[4];
};
-template <template <class> class... Reducers>
-class Assembler
- : public GraphVisitor<Assembler<Reducers...>>,
- public ReducerStack<Assembler<Reducers...>, Reducers..., ReducerBase>,
- public OperationMatching<Assembler<Reducers...>>,
- public AssemblerOpInterface<Assembler<Reducers...>> {
- using Stack = ReducerStack<Assembler<Reducers...>, Reducers...,
- v8::internal::compiler::turboshaft::ReducerBase>;
+template <class Reducers>
+class Assembler : public GraphVisitor<Assembler<Reducers>>,
+ public reducer_stack_type<Reducers>::type,
+ public OperationMatching<Assembler<Reducers>>,
+ public AssemblerOpInterface<Assembler<Reducers>> {
+ using Stack = typename reducer_stack_type<Reducers>::type;
public:
+ template <class... ReducerArgs>
explicit Assembler(Graph& input_graph, Graph& output_graph, Zone* phase_zone,
- compiler::NodeOriginTable* origins = nullptr)
- : GraphVisitor<Assembler>(input_graph, output_graph, phase_zone,
- origins) {
+ compiler::NodeOriginTable* origins,
+ const typename Stack::ArgT& reducer_args)
+ : GraphVisitor<Assembler>(input_graph, output_graph, phase_zone, origins),
+ Stack(reducer_args) {
SupportedOperations::Initialize();
}
- Block* NewBlock(Block::Kind kind) {
- return this->output_graph().NewBlock(kind);
- }
+ Block* NewLoopHeader() { return this->output_graph().NewLoopHeader(); }
+ Block* NewBlock() { return this->output_graph().NewBlock(); }
- using OperationMatching<Assembler<Reducers...>>::Get;
+ using OperationMatching<Assembler<Reducers>>::Get;
+ using Stack::Get;
- V8_INLINE V8_WARN_UNUSED_RESULT bool Bind(Block* block,
- const Block* origin = nullptr) {
- if (!this->output_graph().Add(block)) return false;
+ V8_INLINE bool Bind(Block* block) {
+ if (!this->output_graph().Add(block)) {
+ generating_unreachable_operations_ = true;
+ return false;
+ }
DCHECK_NULL(current_block_);
current_block_ = block;
- Stack::Bind(block, origin);
+ generating_unreachable_operations_ = false;
+ block->SetOrigin(this->current_input_block());
+ Stack::Bind(block);
return true;
}
- V8_INLINE void BindReachable(Block* block, const Block* origin = nullptr) {
- bool bound = Bind(block, origin);
+ // TODO(nicohartmann@): Remove this.
+ V8_INLINE void BindReachable(Block* block) {
+ bool bound = Bind(block);
DCHECK(bound);
USE(bound);
}
@@ -838,8 +2405,28 @@ class Assembler
}
Block* current_block() const { return current_block_; }
+ bool generating_unreachable_operations() const {
+ DCHECK_IMPLIES(generating_unreachable_operations_,
+ current_block_ == nullptr);
+ return generating_unreachable_operations_;
+ }
OpIndex current_operation_origin() const { return current_operation_origin_; }
+ // ReduceProjection eliminates projections to tuples and returns instead the
+ // corresponding tuple input. We do this at the top of the stack to avoid
+ // passing this Projection around needlessly. This is in particular important
+ // to ValueNumberingReducer, which assumes that it's at the bottom of the
+ // stack, and that the BaseReducer will actually emit an Operation. If we put
+ // this projection-to-tuple-simplification in the BaseReducer, then this
+ // assumption of the ValueNumberingReducer will break.
+ OpIndex ReduceProjection(OpIndex tuple, uint16_t index,
+ RegisterRepresentation rep) {
+ if (auto* tuple_op = this->template TryCast<TupleOp>(tuple)) {
+ return tuple_op->input(index);
+ }
+ return Stack::ReduceProjection(tuple, index, rep);
+ }
+
template <class Op, class... Args>
OpIndex Emit(Args... args) {
static_assert((std::is_base_of<Operation, Op>::value));
@@ -849,20 +2436,194 @@ class Assembler
Op& op = this->output_graph().template Add<Op>(args...);
this->output_graph().operation_origins()[result] =
current_operation_origin_;
+#ifdef DEBUG
+ op_to_block_[result] = current_block_;
+ DCHECK(ValidInputs(result));
+#endif // DEBUG
if (op.Properties().is_block_terminator) FinalizeBlock();
return result;
}
+ // Adds {source} to the predecessors of {destination}.
+ void AddPredecessor(Block* source, Block* destination, bool branch) {
+ DCHECK_IMPLIES(branch, source->EndsWithBranchingOp(this->output_graph()));
+ if (destination->LastPredecessor() == nullptr) {
+ // {destination} has currently no predecessors.
+ DCHECK(destination->IsLoopOrMerge());
+ if (branch && destination->IsLoop()) {
+ // We always split Branch edges that go to loop headers.
+ SplitEdge(source, destination);
+ } else {
+ destination->AddPredecessor(source);
+ if (branch) {
+ DCHECK(!destination->IsLoop());
+ destination->SetKind(Block::Kind::kBranchTarget);
+ }
+ }
+ return;
+ } else if (destination->IsBranchTarget()) {
+ // {destination} used to be a BranchTarget, but branch targets can only
+ // have one predecessor. We'll thus split its (single) incoming edge, and
+ // change its type to kMerge.
+ DCHECK_EQ(destination->PredecessorCount(), 1);
+ Block* pred = destination->LastPredecessor();
+ destination->ResetLastPredecessor();
+ destination->SetKind(Block::Kind::kMerge);
+ // We have to split `pred` first to preserve order of predecessors.
+ SplitEdge(pred, destination);
+ if (branch) {
+ // A branch always goes to a BranchTarget. We thus split the edge: we'll
+ // insert a new Block, to which {source} will branch, and which will
+ // "Goto" to {destination}.
+ SplitEdge(source, destination);
+ } else {
+ // {destination} is a Merge, and {source} just does a Goto; nothing
+ // special to do.
+ destination->AddPredecessor(source);
+ }
+ return;
+ }
+
+ DCHECK(destination->IsLoopOrMerge());
+
+ if (branch) {
+ // A branch always goes to a BranchTarget. We thus split the edge: we'll
+ // insert a new Block, to which {source} will branch, and which will
+ // "Goto" to {destination}.
+ SplitEdge(source, destination);
+ } else {
+ // {destination} is a Merge, and {source} just does a Goto; nothing
+ // special to do.
+ destination->AddPredecessor(source);
+ }
+ }
+
private:
void FinalizeBlock() {
this->output_graph().Finalize(current_block_);
current_block_ = nullptr;
}
+ // Insert a new Block between {source} and {destination}, in order to maintain
+ // the split-edge form.
+ void SplitEdge(Block* source, Block* destination) {
+ DCHECK(source->EndsWithBranchingOp(this->output_graph()));
+ // Creating the new intermediate block
+ Block* intermediate_block = NewBlock();
+ intermediate_block->SetKind(Block::Kind::kBranchTarget);
+ // Updating "predecessor" edge of {intermediate_block}. This needs to be
+ // done before calling Bind, because otherwise Bind will think that this
+ // block is not reachable.
+ intermediate_block->AddPredecessor(source);
+
+ // Updating {source}'s last Branch/Switch/CallAndCatchException. Note that
+ // this must be done before Binding {intermediate_block}, otherwise,
+ // Reducer::Bind methods will see an invalid block being bound (because its
+ // predecessor would be a branch, but none of its targets would be the block
+ // being bound).
+ Operation& op = this->output_graph().Get(
+ this->output_graph().PreviousIndex(source->end()));
+ switch (op.opcode) {
+ case Opcode::kBranch: {
+ BranchOp& branch = op.Cast<BranchOp>();
+ if (branch.if_true == destination) {
+ branch.if_true = intermediate_block;
+ // We enforce that Branches if_false and if_true can never be the same
+ // (there is a DCHECK in Assembler::Branch enforcing that).
+ DCHECK_NE(branch.if_false, destination);
+ } else {
+ DCHECK_EQ(branch.if_false, destination);
+ branch.if_false = intermediate_block;
+ }
+ break;
+ }
+ case Opcode::kCallAndCatchException: {
+ CallAndCatchExceptionOp& catch_exception =
+ op.Cast<CallAndCatchExceptionOp>();
+ if (catch_exception.if_success == destination) {
+ catch_exception.if_success = intermediate_block;
+ // We enforce that CallAndCatchException's if_success and if_exception
+ // can never be the same (there is a DCHECK in
+ // Assembler::CallAndCatchException enforcing that).
+ DCHECK_NE(catch_exception.if_exception, destination);
+ } else {
+ DCHECK_EQ(catch_exception.if_exception, destination);
+ catch_exception.if_exception = intermediate_block;
+ }
+ break;
+ }
+ case Opcode::kSwitch: {
+ SwitchOp& switch_op = op.Cast<SwitchOp>();
+ bool found = false;
+ for (auto case_block : switch_op.cases) {
+ if (case_block.destination == destination) {
+ case_block.destination = intermediate_block;
+ DCHECK(!found);
+ found = true;
+#ifndef DEBUG
+ break;
+#endif
+ }
+ }
+ DCHECK_IMPLIES(found, switch_op.default_case != destination);
+ if (!found) {
+ DCHECK_EQ(switch_op.default_case, destination);
+ switch_op.default_case = intermediate_block;
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ BindReachable(intermediate_block);
+ intermediate_block->SetOrigin(source->OriginForBlockEnd());
+ // Inserting a Goto in {intermediate_block} to {destination}. This will
+ // create the edge from {intermediate_block} to {destination}. Note that
+ // this will call AddPredecessor, but we've already removed the eventual
+ // edge of {destination} that need splitting, so no risks of inifinite
+ // recursion here.
+ this->Goto(destination);
+ }
+
Block* current_block_ = nullptr;
+ bool generating_unreachable_operations_ = false;
// TODO(dmercadier,tebbi): remove {current_operation_origin_} and pass instead
// additional parameters to ReduceXXX methods.
OpIndex current_operation_origin_ = OpIndex::Invalid();
+#ifdef DEBUG
+ GrowingSidetable<Block*> op_to_block_{this->phase_zone()};
+
+ bool ValidInputs(OpIndex op_idx) {
+ const Operation& op = this->output_graph().Get(op_idx);
+ if (auto* phi = op.TryCast<PhiOp>()) {
+ auto pred_blocks = current_block_->Predecessors();
+ for (size_t i = 0; i < phi->input_count; ++i) {
+ Block* input_block = op_to_block_[phi->input(i)];
+ Block* pred_block = pred_blocks[i];
+ if (input_block->GetCommonDominator(pred_block) != input_block) {
+ std::cerr << "Input #" << phi->input(i).id()
+ << " does not dominate predecessor B"
+ << pred_block->index().id() << ".\n";
+ std::cerr << op_idx.id() << ": " << op << "\n";
+ return false;
+ }
+ }
+ } else {
+ for (OpIndex input : op.inputs()) {
+ Block* input_block = op_to_block_[input];
+ if (input_block->GetCommonDominator(current_block_) != input_block) {
+ std::cerr << "Input #" << input.id()
+ << " does not dominate its use.\n";
+ std::cerr << op_idx.id() << ": " << op << "\n";
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+#endif // DEBUG
};
} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/assert-types-reducer.h b/deps/v8/src/compiler/turboshaft/assert-types-reducer.h
new file mode 100644
index 0000000000..fcbe01daa3
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/assert-types-reducer.h
@@ -0,0 +1,153 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_ASSERT_TYPES_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_ASSERT_TYPES_REDUCER_H_
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/template-utils.h"
+#include "src/base/vector.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/sidetable.h"
+#include "src/compiler/turboshaft/type-inference-reducer.h"
+#include "src/compiler/turboshaft/types.h"
+#include "src/compiler/turboshaft/uniform-reducer-adapter.h"
+#include "src/heap/parked-scope.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct AssertTypesReducerArgs {
+ Isolate* isolate;
+};
+
+template <class Next>
+class AssertTypesReducer
+ : public UniformReducerAdapter<AssertTypesReducer, Next> {
+ // TODO(nicohartmann@): Reenable this in a way that compiles with msvc light.
+ // static_assert(next_contains_reducer<Next, TypeInferenceReducer>::value);
+
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ using Adapter = UniformReducerAdapter<AssertTypesReducer, Next>;
+ using ArgT =
+ base::append_tuple_type<typename Next::ArgT, AssertTypesReducerArgs>;
+
+ template <typename... Args>
+ explicit AssertTypesReducer(const std::tuple<Args...>& args)
+ : Adapter(args),
+ isolate_(std::get<AssertTypesReducerArgs>(args).isolate) {}
+
+ uint32_t NoContextConstant() { return IntToSmi(Context::kNoContext); }
+
+ template <typename Op, typename Continuation>
+ OpIndex ReduceInputGraphOperation(OpIndex ig_index, const Op& operation) {
+ OpIndex og_index = Continuation{this}.ReduceInputGraph(ig_index, operation);
+ if (!og_index.valid()) return og_index;
+ if (!CanBeTyped(operation)) return og_index;
+ // Unfortunately, we cannot insert assertions after block terminators, so we
+ // skip them here.
+ if (operation.Properties().is_block_terminator) return og_index;
+
+ auto reps = operation.outputs_rep();
+ DCHECK_GT(reps.size(), 0);
+ if (reps.size() == 1) {
+ Type type = Asm().GetInputGraphType(ig_index);
+ InsertTypeAssert(reps[0], og_index, type);
+ }
+ return og_index;
+ }
+
+ void InsertTypeAssert(RegisterRepresentation rep, OpIndex value,
+ const Type& type) {
+ DCHECK(!type.IsInvalid());
+ if (type.IsNone()) {
+ Asm().Unreachable();
+ return;
+ }
+
+ if (type.IsAny()) {
+ // Ignore any typed for now.
+ return;
+ }
+
+ auto GenerateBuiltinCall =
+ [this](Builtin builtin, OpIndex original_value,
+ base::SmallVector<OpIndex, 6> actual_value_indices,
+ const Type& type) {
+ uint32_t op_id = static_cast<uint32_t>(IntToSmi(original_value.id()));
+ // Add expected type and operation id.
+ Handle<TurboshaftType> expected_type = type.AllocateOnHeap(factory());
+ actual_value_indices.push_back(Asm().HeapConstant(expected_type));
+ actual_value_indices.push_back(Asm().Word32Constant(op_id));
+ actual_value_indices.push_back(
+ Asm().Word32Constant(NoContextConstant()));
+ Asm().CallBuiltin(
+ builtin, OpIndex::Invalid(),
+ {actual_value_indices.data(), actual_value_indices.size()},
+ isolate_);
+#ifdef DEBUG
+ // Used for debugging
+ if (v8_flags.turboshaft_trace_typing) {
+ PrintF("Inserted assert for %3d:%-40s (%s)\n", original_value.id(),
+ Asm().output_graph().Get(original_value).ToString().c_str(),
+ type.ToString().c_str());
+ }
+#endif
+ };
+
+ switch (rep.value()) {
+ case RegisterRepresentation::Word32(): {
+ DCHECK(type.IsWord32());
+ base::SmallVector<OpIndex, 6> actual_value_indices = {value};
+ GenerateBuiltinCall(Builtin::kCheckTurboshaftWord32Type, value,
+ std::move(actual_value_indices), type);
+ break;
+ }
+ case RegisterRepresentation::Word64(): {
+ DCHECK(type.IsWord64());
+ OpIndex value_high = Asm().Word64ShiftRightLogical(
+ value, Asm().Word64Constant(static_cast<uint64_t>(32)));
+ OpIndex value_low = value; // Use implicit truncation to word32.
+ base::SmallVector<OpIndex, 6> actual_value_indices = {value_high,
+ value_low};
+ GenerateBuiltinCall(Builtin::kCheckTurboshaftWord64Type, value,
+ std::move(actual_value_indices), type);
+ break;
+ }
+ case RegisterRepresentation::Float32(): {
+ DCHECK(type.IsFloat32());
+ base::SmallVector<OpIndex, 6> actual_value_indices = {value};
+ GenerateBuiltinCall(Builtin::kCheckTurboshaftFloat32Type, value,
+ std::move(actual_value_indices), type);
+ break;
+ }
+ case RegisterRepresentation::Float64(): {
+ DCHECK(type.IsFloat64());
+ base::SmallVector<OpIndex, 6> actual_value_indices = {value};
+ GenerateBuiltinCall(Builtin::kCheckTurboshaftFloat64Type, value,
+ std::move(actual_value_indices), type);
+ break;
+ }
+ case RegisterRepresentation::Tagged():
+ case RegisterRepresentation::Compressed():
+ // TODO(nicohartmann@): Handle remaining cases.
+ break;
+ }
+ }
+
+ private:
+ Factory* factory() { return isolate_->factory(); }
+ Isolate* isolate_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_ASSERT_TYPES_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h b/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h
new file mode 100644
index 0000000000..25f65ca091
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h
@@ -0,0 +1,476 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_BRANCH_ELIMINATION_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_BRANCH_ELIMINATION_REDUCER_H_
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/base/optional.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/index.h"
+#include "src/compiler/turboshaft/layered-hash-map.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/utils/utils.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+template <class Next>
+class BranchEliminationReducer : public Next {
+ // # General overview
+ //
+ // BranchEliminationAssembler optimizes branches in a few ways:
+ //
+ // 1- When a branch is nested in another branch and uses the same condition,
+ // then we can get rid of this branch and keep only the correct target.
+ // For instance:
+ //
+ // if (cond) {
+ // if (cond) print("B1");
+ // else print("B2");
+ // } else {
+ // if (cond) print("B3");
+ // else print("B4");
+ // }
+ //
+ // Will be simplified to:
+ //
+ // if (cond) {
+ // print("B1");
+ // } else {
+ // print("B4");
+ // }
+ //
+ // Because the 1st nested "if (cond)" is always true, and the 2nd is
+ // always false.
+ //
+ // Or, if you prefer a more graph-oriented visual representation:
+ //
+ // condition condition
+ // | | | |
+ // ----- | ------ |
+ // | | | |
+ // | v | v
+ // | branch | branch
+ // | / \ | / \
+ // | / \ | / \
+ // v / \ v becomes v v
+ // branch branch ======> B1 B4
+ // / \ / \
+ // / \ / \
+ // B1 B2 B3 B4
+ //
+ //
+ // 2- When 2 consecutive branches (where the 2nd one is after the merging of
+ // the 1st one) have the same condition, we can pull up the 2nd branch to
+ // get rid of the merge of the 1st branch and the branch of the 2nd
+ // branch. For instance:
+ //
+ // if (cond) {
+ // B1;
+ // } else {
+ // B2;
+ // }
+ // B3;
+ // if (cond) {
+ // B4;
+ // } else {
+ // B5;
+ // }
+ //
+ // Will be simplified to:
+ //
+ // if (cond) {
+ // B1;
+ // B3;
+ // B4;
+ // } else {
+ // B2;
+ // B3;
+ // B5;
+ // }
+ //
+ // Or, if you prefer a more graph-oriented visual representation:
+ //
+ // condition condition
+ // | | |
+ // ------- | |
+ // | v v
+ // | branch branch
+ // | / \ / \
+ // | / \ / \
+ // | B1 B2 B1 B2
+ // | \ / | |
+ // | \ / becomes | |
+ // | merge1 ======> B3 B3
+ // | B3 | |
+ // -------> branch | |
+ // / \ B4 B5
+ // / \ \ /
+ // B4 B5 \ /
+ // \ / merge
+ // \ /
+ // merge2
+ //
+ //
+ // 3- Optimizing {Return} nodes through merges. It checks that
+ // the return value is actually a {Phi} and the Return is dominated
+ // only by the Phi.
+ //
+ // if (c) { if (c) {
+ // v = 42; ====> v = 42;
+ // } else { return v;
+ // v = 5; } else {
+ // } v = 5;
+ // return v; return v;
+ // }
+ //
+ // And here's the graph representation:
+ //
+ // +----B1----+ <Some other +----B1'----+ +----B2'----+
+ // | p1 = ... | block(s): | p1 = ... | | p2 = ... |
+ // | <...> | B2,...> | <...> | | <...> |
+ // +----------+ / | return p1 | | return p2 |
+ // \ / +-----------+ +-----------+
+ // \ / =====>
+ // \ /
+ // \ |
+ // +--------B3-------+
+ // | p = Phi(p1,...) |
+ // | <...> |
+ // | return p |
+ // +-----------------+
+ //
+ //
+ // 4- Eliminating merges: if the 2 merged branches are empty,
+ // and the merge block doesn't have a Phi (which is either the first
+ // operation or is only preceded by FrameState operations),
+ // we can remove the merge and instead Goto the block from the new graph.
+ //
+ // # Technical overview of the implementation
+ //
+ // We iterate the graph in dominator order, and maintain a hash map of
+ // conditions with a resolved value along the current path. For instance, if
+ // we have:
+ // if (c) { B1 } else { B2 }
+ // when iterating B1, we'll know that |c| is true, while when iterating
+ // over B2, we'll know that |c| is false.
+ // When reaching a Branch, we'll insert the condition in the hash map, while
+ // when reaching a Merge, we'll remove it.
+ //
+ // Then, the 1st optimization (nested branches with the same condition) is
+ // trivial: we just look in the hashmap if the condition is known, and only
+ // generate the right branch target without generating the branch itself.
+ //
+ // For the 2nd optimization, when generating a Goto, we check if the
+ // destination block ends with a branch whose condition is already known. If
+ // that's the case, then we copy the destination block, and the 1st
+ // optimization will replace its final Branch by a Goto when reaching it.
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ template <class... Args>
+ explicit BranchEliminationReducer(const std::tuple<Args...>& args)
+ : Next(args),
+ dominator_path_(Asm().phase_zone()),
+ known_conditions_(Asm().phase_zone(),
+ Asm().input_graph().DominatorTreeDepth() * 2) {}
+
+ void Bind(Block* new_block) {
+ Next::Bind(new_block);
+
+ if (ShouldSkipOptimizationStep()) {
+ // It's important to have a ShouldSkipOptimizationStep here, because
+ // {known_conditions_} assumes that we perform all branch elimination
+ // possible (which implies that we don't ever insert twice the same thing
+ // in {known_conditions_}). If we stop doing ReduceBranch because of
+ // ShouldSkipOptimizationStep, then this assumption doesn't hold anymore,
+ // and we should thus stop updating {known_conditions_} to not trigger
+ // some DCHECKs.
+ return;
+ }
+
+ // Update {known_conditions_} based on where {new_block} is in the dominator
+ // tree.
+ ResetToBlock(new_block);
+ ReplayMissingPredecessors(new_block);
+ StartLayer(new_block);
+
+ if (new_block->IsBranchTarget()) {
+ // The current block is a branch target, so we add the branch condition
+ // along with its value in {known_conditions_}.
+ DCHECK_EQ(new_block->PredecessorCount(), 1);
+ const Operation& op =
+ new_block->LastPredecessor()->LastOperation(Asm().output_graph());
+ if (const BranchOp* branch = op.TryCast<BranchOp>()) {
+ DCHECK_EQ(new_block, any_of(branch->if_true, branch->if_false));
+ bool condition_value = branch->if_true == new_block;
+ if (!known_conditions_.Contains(branch->condition())) {
+ known_conditions_.InsertNewKey(branch->condition(), condition_value);
+ }
+ }
+ }
+ }
+
+ OpIndex ReduceBranch(OpIndex cond, Block* if_true, Block* if_false,
+ BranchHint hint) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceBranch(cond, if_true, if_false, hint);
+ }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+
+ if (const Block* if_true_origin = if_true->OriginForBlockStart()) {
+ if (const Block* if_false_origin = if_false->OriginForBlockStart()) {
+ const Operation& first_op_true =
+ if_true_origin->FirstOperation(Asm().input_graph());
+ const Operation& first_op_false =
+ if_false_origin->FirstOperation(Asm().input_graph());
+ const GotoOp* true_goto = first_op_true.template TryCast<GotoOp>();
+ const GotoOp* false_goto = first_op_false.template TryCast<GotoOp>();
+ // We apply the fourth optimization, replacing empty braches with a
+ // Goto to their destination (if it's the same block).
+ if (true_goto && false_goto &&
+ true_goto->destination == false_goto->destination) {
+ Block* merge_block = true_goto->destination;
+ if (!merge_block->HasPhis(Asm().input_graph())) {
+ // Using `ReduceInputGraphGoto()` here enables more optimizations.
+ Asm().Goto(merge_block->MapToNextGraph());
+ return OpIndex::Invalid();
+ }
+ }
+ }
+ }
+
+ if (auto cond_value = known_conditions_.Get(cond)) {
+ // We already know the value of {cond}. We thus remove the branch (this is
+ // the "first" optimization in the documentation at the top of this
+ // module).
+ return Asm().ReduceGoto(*cond_value ? if_true : if_false);
+ }
+ // We can't optimize this branch.
+ goto no_change;
+ }
+
+ OpIndex ReduceSelect(OpIndex cond, OpIndex vtrue, OpIndex vfalse,
+ RegisterRepresentation rep, BranchHint hint,
+ SelectOp::Implementation implem) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceSelect(cond, vtrue, vfalse, rep, hint, implem);
+ }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+
+ if (auto cond_value = known_conditions_.Get(cond)) {
+ if (*cond_value) {
+ return vtrue;
+ } else {
+ return vfalse;
+ }
+ }
+ goto no_change;
+ }
+
+ OpIndex ReduceGoto(Block* destination) {
+ LABEL_BLOCK(no_change) { return Next::ReduceGoto(destination); }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+
+ if (const Block* destination_origin = destination->OriginForBlockStart()) {
+ if (!destination_origin->IsMerge()) goto no_change;
+ if (destination_origin->HasExactlyNPredecessors(1)) {
+ // There is no point in trying the 2nd optimization: this would remove
+ // neither Phi nor Branch.
+ // TODO(dmercadier, tebbi): this block has a single predecessor and a
+ // single successor, so we might want to inline it.
+ goto no_change;
+ }
+ const Operation& last_op =
+ destination_origin->LastOperation(Asm().input_graph());
+ if (const BranchOp* branch = last_op.template TryCast<BranchOp>()) {
+ OpIndex condition =
+ Asm().template MapToNewGraph<true>(branch->condition());
+ if (!condition.valid()) {
+ // The condition of the subsequent block's Branch hasn't been visited
+ // before, so we definitely don't know its value.
+ goto no_change;
+ }
+ base::Optional<bool> condition_value = known_conditions_.Get(condition);
+ if (!condition_value.has_value()) {
+ // We've already visited the subsequent block's Branch condition, but
+ // we don't know its value right now.
+ goto no_change;
+ }
+
+ // The next block {new_dst} is a Merge, and ends with a Branch whose
+ // condition is already known. As per the 2nd optimization, we'll
+ // process {new_dst} right away, and we'll end it with a Goto instead of
+ // its current Branch.
+ Asm().CloneAndInlineBlock(destination_origin);
+ return OpIndex::Invalid();
+ } else if (const ReturnOp* return_op =
+ last_op.template TryCast<ReturnOp>()) {
+ // The destination block in the old graph ends with a Return
+ // and the old destination is a merge block, so we can directly
+ // inline the destination block in place of the Goto.
+ // TODO(nicohartmann@): Temporarily disable this "optimization" because
+ // it prevents dead code elimination in some cases. Reevaluate this and
+ // reenable if phases have been reordered properly.
+ // Asm().CloneAndInlineBlock(old_dst);
+ // return OpIndex::Invalid();
+ }
+ }
+
+ goto no_change;
+ }
+
+ OpIndex ReduceDeoptimizeIf(OpIndex condition, OpIndex frame_state,
+ bool negated,
+ const DeoptimizeParameters* parameters) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceDeoptimizeIf(condition, frame_state, negated,
+ parameters);
+ }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+
+ base::Optional<bool> condition_value = known_conditions_.Get(condition);
+ if (!condition_value.has_value()) goto no_change;
+
+ if ((*condition_value && !negated) || (!*condition_value && negated)) {
+ // The condition is true, so we always deoptimize.
+ return Next::ReduceDeoptimize(frame_state, parameters);
+ } else {
+ // The condition is false, so we never deoptimize.
+ return OpIndex::Invalid();
+ }
+ }
+
+ OpIndex ReduceTrapIf(OpIndex condition, bool negated, const TrapId trap_id) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceTrapIf(condition, negated, trap_id);
+ }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+
+ base::Optional<bool> condition_value = known_conditions_.Get(condition);
+ if (!condition_value.has_value()) goto no_change;
+
+ if ((*condition_value && !negated) || (!*condition_value && negated)) {
+ // The condition is true, so we always trap.
+ return Next::ReduceUnreachable();
+ } else {
+ // The condition is false, so we never trap.
+ return OpIndex::Invalid();
+ }
+ }
+
+ private:
+ // Resets {known_conditions_} and {dominator_path_} up to the 1st dominator of
+ // {block} that they contain.
+ void ResetToBlock(Block* block) {
+ Block* target = block->GetDominator();
+ while (!dominator_path_.empty() && target != nullptr &&
+ dominator_path_.back() != target) {
+ if (dominator_path_.back()->Depth() > target->Depth()) {
+ ClearCurrentEntries();
+ } else if (dominator_path_.back()->Depth() < target->Depth()) {
+ target = target->GetDominator();
+ } else {
+ // {target} and {dominator_path.back} have the same depth but are not
+ // equal, so we go one level up for both.
+ ClearCurrentEntries();
+ target = target->GetDominator();
+ }
+ }
+ }
+
+ // Removes the latest entry in {known_conditions_} and {dominator_path_}.
+ void ClearCurrentEntries() {
+ known_conditions_.DropLastLayer();
+ dominator_path_.pop_back();
+ }
+
+ void StartLayer(Block* block) {
+ known_conditions_.StartLayer();
+ dominator_path_.push_back(block);
+ }
+
+ // ReplayMissingPredecessors adds to {known_conditions_} and {dominator_path_}
+ // the conditions/blocks that related to the dominators of {block} that are
+ // not already present. This can happen when control-flow changes during the
+ // OptimizationPhase, which results in a block being visited not right after
+ // its dominator. For instance, when optimizing a double-diamond like:
+ //
+ // B0
+ // / \
+ // / \
+ // B1 B2
+ // \ /
+ // \ /
+ // B3
+ // / \
+ // / \
+ // B4 B5
+ // \ /
+ // \ /
+ // B6
+ // / \
+ // / \
+ // B7 B8
+ // \ /
+ // \ /
+ // B9
+ //
+ // In this example, where B0, B3 and B6 branch on the same condition, the
+ // blocks are actually visited in the following order: B0 - B1 - B3/1 - B2 -
+ // B3/2 - B4 - B5 - ... (note how B3 is duplicated and visited twice because
+ // from B1/B2 its branch condition is already known; I've noted the duplicated
+ // blocks as B3/1 and B3/2). In the new graph, the dominator of B4 is B3/1 and
+ // the dominator of B5 is B3/2. Except that upon visiting B4, the last visited
+ // block is not B3/1 but rather B3/2, so, we have to reset {known_conditions_}
+ // to B0, and thus miss that we actually know branch condition of B0/B3/B6 and
+ // we thus won't optimize the 3rd diamond.
+ //
+ // To overcome this issue, ReplayMissingPredecessors will add the information
+ // of the missing predecessors of the current block to {known_conditions_}. In
+ // the example above, this means that when visiting B4,
+ // ReplayMissingPredecessors will add the information of B3/1 to
+ // {known_conditions_}.
+ void ReplayMissingPredecessors(Block* new_block) {
+ // Collect blocks that need to be replayed.
+ base::SmallVector<Block*, 32> missing_blocks;
+ for (Block* dom = new_block->GetDominator();
+ dom != nullptr && dom != dominator_path_.back();
+ dom = dom->GetDominator()) {
+ missing_blocks.push_back(dom);
+ }
+ // Actually does the replaying, starting from the oldest block and finishing
+ // with the newest one (so that they will later be removed in the correct
+ // order).
+ for (auto it = missing_blocks.rbegin(); it != missing_blocks.rend(); ++it) {
+ Block* block = *it;
+ StartLayer(block);
+
+ if (block->IsBranchTarget()) {
+ const Operation& op =
+ block->LastPredecessor()->LastOperation(Asm().output_graph());
+ if (const BranchOp* branch = op.TryCast<BranchOp>()) {
+ DCHECK(branch->if_true->index() == block->index() ||
+ branch->if_false->index() == block->index());
+ bool condition_value =
+ branch->if_true->index().valid()
+ ? branch->if_true->index() == block->index()
+ : branch->if_false->index() != block->index();
+ known_conditions_.InsertNewKey(branch->condition(), condition_value);
+ }
+ }
+ }
+ }
+
+ // TODO(dmercadier): use the SnapshotTable to replace {dominator_path_} and
+ // {known_conditions_}, and to reuse the existing merging/replay logic of the
+ // SnapshotTable.
+ ZoneVector<Block*> dominator_path_;
+ LayeredHashMap<OpIndex, bool> known_conditions_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_BRANCH_ELIMINATION_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/build-graph-phase.cc b/deps/v8/src/compiler/turboshaft/build-graph-phase.cc
new file mode 100644
index 0000000000..70fc39a448
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/build-graph-phase.cc
@@ -0,0 +1,31 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/build-graph-phase.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/turboshaft/graph-builder.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+base::Optional<BailoutReason> BuildGraphPhase::Run(PipelineData* data,
+ Zone* temp_zone,
+ Linkage* linkage) {
+ Schedule* schedule = data->schedule();
+ data->reset_schedule();
+ DCHECK_NOT_NULL(schedule);
+ data->CreateTurboshaftGraph();
+
+ UnparkedScopeIfNeeded scope(data->broker());
+
+ if (auto bailout = turboshaft::BuildGraph(
+ data->broker(), schedule, data->isolate(), data->graph_zone(),
+ temp_zone, &data->graph(), linkage, data->source_positions(),
+ data->node_origins())) {
+ return bailout;
+ }
+ return {};
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/build-graph-phase.h b/deps/v8/src/compiler/turboshaft/build-graph-phase.h
new file mode 100644
index 0000000000..3c49de7aa9
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/build-graph-phase.h
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_BUILD_GRAPH_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_BUILD_GRAPH_PHASE_H_
+
+#include "src/codegen/bailout-reason.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct BuildGraphPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(BuildGraph)
+
+ base::Optional<BailoutReason> Run(PipelineData* data, Zone* temp_zone,
+ Linkage* linkage);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_BUILD_GRAPH_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h b/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h
new file mode 100644
index 0000000000..92969514e1
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h
@@ -0,0 +1,144 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_BUILTIN_CALL_DESCRIPTORS_H_
+#define V8_COMPILER_TURBOSHAFT_BUILTIN_CALL_DESCRIPTORS_H_
+
+#include "src/codegen/callable.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct BuiltinCallDescriptor {
+ private:
+ template <typename Derived>
+ struct Descriptor {
+ static const TSCallDescriptor* Create(Isolate* isolate, Zone* zone) {
+ Callable callable = Builtins::CallableFor(isolate, Derived::Function);
+ auto descriptor = Linkage::GetStubCallDescriptor(
+ zone, callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
+ Derived::NeedsFrameState ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags,
+ Derived::Properties);
+#ifdef DEBUG
+ Derived::Verify(descriptor);
+#endif // DEBUG
+ return TSCallDescriptor::Create(descriptor, zone);
+ }
+
+#ifdef DEBUG
+ static void Verify(const CallDescriptor* desc) {
+ using result_t = typename Derived::result_t;
+ using arguments_t = typename Derived::arguments_t;
+ if constexpr (std::is_same_v<result_t, void>) {
+ DCHECK_EQ(desc->ReturnCount(), 0);
+ } else {
+ DCHECK_EQ(desc->ReturnCount(), 1);
+ DCHECK(result_t::allows_representation(
+ RegisterRepresentation::FromMachineRepresentation(
+ desc->GetReturnType(0).representation())));
+ }
+ DCHECK_EQ(desc->NeedsFrameState(), Derived::NeedsFrameState);
+ DCHECK_EQ(desc->properties(), Derived::Properties);
+ DCHECK_EQ(desc->ParameterCount(),
+ std::tuple_size_v<arguments_t> + Derived::NeedsContext);
+ DCHECK(VerifyArguments<arguments_t>(desc));
+ }
+
+ template <typename Arguments>
+ static bool VerifyArguments(const CallDescriptor* desc) {
+ return VerifyArgumentsImpl<Arguments>(
+ desc, std::make_index_sequence<std::tuple_size_v<Arguments>>());
+ }
+
+ private:
+ template <typename Arguments, size_t... Indices>
+ static bool VerifyArgumentsImpl(const CallDescriptor* desc,
+ std::index_sequence<Indices...>) {
+ return (std::tuple_element_t<Indices, Arguments>::allows_representation(
+ RegisterRepresentation::FromMachineRepresentation(
+ desc->GetParameterType(Indices).representation())) &&
+ ...);
+ }
+#endif // DEBUG
+ };
+
+ using Boolean = Oddball;
+
+ public:
+ struct StringEqual : public Descriptor<StringEqual> {
+ static constexpr auto Function = Builtin::kStringEqual;
+ using arguments_t = std::tuple<V<String>, V<String>, V<WordPtr>>;
+ using result_t = V<Boolean>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr bool NeedsContext = false;
+ static constexpr Operator::Properties Properties = Operator::kEliminatable;
+ };
+
+ struct StringFromCodePointAt : public Descriptor<StringFromCodePointAt> {
+ static constexpr auto Function = Builtin::kStringFromCodePointAt;
+ using arguments_t = std::tuple<V<String>, V<WordPtr>>;
+ using result_t = V<String>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr bool NeedsContext = false;
+ static constexpr Operator::Properties Properties = Operator::kEliminatable;
+ };
+
+ struct StringIndexOf : public Descriptor<StringIndexOf> {
+ static constexpr auto Function = Builtin::kStringIndexOf;
+ using arguments_t = std::tuple<V<String>, V<String>, V<Smi>>;
+ using result_t = V<Smi>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr bool NeedsContext = false;
+ static constexpr Operator::Properties Properties = Operator::kEliminatable;
+ };
+
+ template <Builtin B>
+ struct StringComparison : public Descriptor<StringComparison<B>> {
+ static constexpr auto Function = B;
+ using arguments_t = std::tuple<V<String>, V<String>>;
+ using result_t = V<Boolean>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr bool NeedsContext = false;
+ static constexpr Operator::Properties Properties = Operator::kEliminatable;
+ };
+ using StringLessThan = StringComparison<Builtin::kStringLessThan>;
+ using StringLessThanOrEqual =
+ StringComparison<Builtin::kStringLessThanOrEqual>;
+
+ struct StringSubstring : public Descriptor<StringSubstring> {
+ static constexpr auto Function = Builtin::kStringSubstring;
+ using arguments_t = std::tuple<V<String>, V<WordPtr>, V<WordPtr>>;
+ using result_t = V<String>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr bool NeedsContext = false;
+ static constexpr Operator::Properties Properties = Operator::kEliminatable;
+ };
+
+#ifdef V8_INTL_SUPPORT
+ struct StringToLowerCaseIntl : public Descriptor<StringToLowerCaseIntl> {
+ static constexpr auto Function = Builtin::kStringToLowerCaseIntl;
+ using arguments_t = std::tuple<V<String>>;
+ using result_t = V<String>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr bool NeedsContext = true;
+ static constexpr Operator::Properties Properties =
+ Operator::kNoDeopt | Operator::kNoThrow;
+ };
+#endif // V8_INTL_SUPPORT
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_BUILTIN_CALL_DESCRIPTORS_H_
diff --git a/deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.cc b/deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.cc
new file mode 100644
index 0000000000..b9de41214d
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.cc
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/dead-code-elimination-phase.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/turboshaft/dead-code-elimination-reducer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void DeadCodeEliminationPhase::Run(PipelineData* data, Zone* temp_zone) {
+ UnparkedScopeIfNeeded scope(data->broker(), DEBUG_BOOL);
+
+ turboshaft::OptimizationPhase<turboshaft::DeadCodeEliminationReducer>::Run(
+ data->isolate(), &data->graph(), temp_zone, data->node_origins());
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.h b/deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.h
new file mode 100644
index 0000000000..edd95c4b99
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/dead-code-elimination-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct DeadCodeEliminationPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(DeadCodeElimination)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/dead-code-elimination-reducer.h b/deps/v8/src/compiler/turboshaft/dead-code-elimination-reducer.h
new file mode 100644
index 0000000000..eee11e4e41
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/dead-code-elimination-reducer.h
@@ -0,0 +1,465 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_
+
+#include <iomanip>
+
+#include "src/common/globals.h"
+#include "src/compiler/backend/instruction-codes.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/graph.h"
+#include "src/compiler/turboshaft/index.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/uniform-reducer-adapter.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// General overview
+//
+// DeadCodeAnalysis iterates the graph backwards to propagate liveness
+// information. This information consists of the ControlState and the
+// OperationState.
+//
+// OperationState reflects the liveness of operations. An operation is live if
+//
+// 1) The operation has the `is_required_when_unused` property
+// 2) Any of its outputs is live (is used in a live operation).
+//
+// If the operation is not live, it is dead and can be eliminated.
+//
+// ControlState describes to which block we could jump immediately without
+// changing the program semantics. That is missing any side effects, required
+// control flow or any live operations. This information is then used
+// at BranchOps to rewrite them to a GotoOp towards the corresponding block.
+// From the output control state(s) c after an operation, the control state c'
+// before the operation is computed as follows:
+//
+// | Bi if ct, cf are Bi or Unreachable
+// c' = [Branch](ct, cf) = {
+// | NotEliminatable otherwise
+//
+// And if c' = Bi, then the BranchOp can be rewritten into GotoOp(Bi).
+//
+// | NotEliminatable if Op is live
+// c' = [Op](c) = {
+// | c otherwise
+//
+// | Bk if c = Bk
+// c' = [Merge i](c) = { Bi if Merge i has no live phis
+// | NotEliminatable otherwise
+//
+// Where Merge is an imaginary operation at the start of every merge block. This
+// is the important part for the analysis. If block `Merge i` does not have any
+// live phi operations, then we don't necessarily need to distinguish the
+// control flow paths going into that block and if we further don't encounter
+// any live operations along any of the paths leading to `Merge i`
+// starting at some BranchOp, we can skip both branches and eliminate the
+// control flow entirely by rewriting the BranchOp into a GotoOp(Bi). Notice
+// that if the control state already describes a potential Goto-target Bk, then
+// we do not replace that in order to track the farthest block we can jump to.
+
+struct ControlState {
+ // Lattice:
+ //
+ // NotEliminatable
+ // / | \
+ // B1 ... Bn
+ // \ | /
+ // Unreachable
+ //
+ // We use ControlState to propagate information during the analysis about how
+ // branches can be rewritten. Read the values like this:
+ // - NotEliminatable: We cannot rewrite a branch, because we need the control
+ // flow (e.g. because we have seen live operations on either branch or need
+ // the phi at the merge).
+ // - Bj: Control can be rewritten to go directly to Block Bj, because all
+ // paths to that block are free of live operations.
+ // - Unreachable: This is the bottom element and it represents that we haven't
+ // seen anything live yet and are free to rewrite branches to any block
+ // reachable from the current block.
+ enum Kind {
+ kUnreachable,
+ kBlock,
+ kNotEliminatable,
+ };
+
+ static ControlState NotEliminatable() {
+ return ControlState{kNotEliminatable};
+ }
+ static ControlState Block(BlockIndex block) {
+ return ControlState{kBlock, block};
+ }
+ static ControlState Unreachable() { return ControlState{kUnreachable}; }
+
+ explicit ControlState(Kind kind, BlockIndex block = BlockIndex::Invalid())
+ : kind(kind), block(block) {}
+
+ static ControlState LeastUpperBound(const ControlState& lhs,
+ const ControlState& rhs) {
+ switch (lhs.kind) {
+ case Kind::kUnreachable:
+ return rhs;
+ case Kind::kBlock: {
+ if (rhs.kind == Kind::kUnreachable) return lhs;
+ if (rhs.kind == Kind::kNotEliminatable) return rhs;
+ if (lhs.block == rhs.block) return lhs;
+ return NotEliminatable();
+ }
+ case Kind::kNotEliminatable:
+ return lhs;
+ }
+ }
+
+ Kind kind;
+ BlockIndex block;
+};
+
+inline std::ostream& operator<<(std::ostream& stream,
+ const ControlState& state) {
+ switch (state.kind) {
+ case ControlState::kNotEliminatable:
+ return stream << "NotEliminatable";
+ case ControlState::kBlock:
+ return stream << "Block(" << state.block << ")";
+ case ControlState::kUnreachable:
+ return stream << "Unreachable";
+ }
+}
+
+inline bool operator==(const ControlState& lhs, const ControlState& rhs) {
+ if (lhs.kind != rhs.kind) return false;
+ if (lhs.kind == ControlState::kBlock) {
+ DCHECK_EQ(rhs.kind, ControlState::kBlock);
+ return lhs.block == rhs.block;
+ }
+ return true;
+}
+
+inline bool operator!=(const ControlState& lhs, const ControlState& rhs) {
+ return !(lhs == rhs);
+}
+
+struct OperationState {
+ // Lattice:
+ //
+ // Live
+ // |
+ // Dead
+ //
+ // Describes the liveness state of an operation.
+ enum Liveness : uint8_t {
+ kDead,
+ kLive,
+ };
+
+ static Liveness LeastUpperBound(Liveness lhs, Liveness rhs) {
+ static_assert(kDead == 0 && kLive == 1);
+ return static_cast<Liveness>(lhs | rhs);
+ }
+};
+
+inline std::ostream& operator<<(std::ostream& stream,
+ OperationState::Liveness liveness) {
+ switch (liveness) {
+ case OperationState::kDead:
+ return stream << "Dead";
+ case OperationState::kLive:
+ return stream << "Live";
+ }
+ UNREACHABLE();
+}
+
+class DeadCodeAnalysis {
+ public:
+ explicit DeadCodeAnalysis(Graph& graph, Zone* phase_zone)
+ : graph_(graph),
+ liveness_(graph.op_id_count(), OperationState::kDead, phase_zone),
+ entry_control_state_(graph.block_count(), ControlState::Unreachable(),
+ phase_zone),
+ rewritable_branch_targets_(phase_zone) {}
+
+ template <bool trace_analysis>
+ std::pair<FixedSidetable<OperationState::Liveness>,
+ ZoneMap<uint32_t, BlockIndex>>
+ Run() {
+ if constexpr (trace_analysis) {
+ std::cout << "===== Running Dead Code Analysis =====\n";
+ }
+ for (uint32_t unprocessed_count = graph_.block_count();
+ unprocessed_count > 0;) {
+ BlockIndex block_index = static_cast<BlockIndex>(unprocessed_count - 1);
+ --unprocessed_count;
+
+ const Block& block = graph_.Get(block_index);
+ ProcessBlock<trace_analysis>(block, &unprocessed_count);
+ }
+
+ if constexpr (trace_analysis) {
+ std::cout << "===== Results =====\n== Operation State ==\n";
+ for (Block b : graph_.blocks()) {
+ std::cout << PrintAsBlockHeader{b} << ":\n";
+ for (OpIndex index : graph_.OperationIndices(b)) {
+ std::cout << " " << std::setw(8) << liveness_[index] << " "
+ << std::setw(3) << index.id() << ": " << graph_.Get(index)
+ << "\n";
+ }
+ }
+
+ std::cout << "== Rewritable Branches ==\n";
+ for (auto [branch_id, target] : rewritable_branch_targets_) {
+ DCHECK(target.valid());
+ std::cout << " " << std::setw(3) << branch_id << ": Branch ==> Goto "
+ << target.id() << "\n";
+ }
+ std::cout << "==========\n";
+ }
+
+ return {std::move(liveness_), std::move(rewritable_branch_targets_)};
+ }
+
+ template <bool trace_analysis>
+ void ProcessBlock(const Block& block, uint32_t* unprocessed_count) {
+ if constexpr (trace_analysis) {
+ std::cout << "\n==========\n=== Processing " << PrintAsBlockHeader{block}
+ << ":\n==========\nEXIT CONTROL STATE\n";
+ }
+ auto successors = SuccessorBlocks(block.LastOperation(graph_));
+ ControlState control_state = ControlState::Unreachable();
+ for (size_t i = 0; i < successors.size(); ++i) {
+ const auto& r = entry_control_state_[successors[i]->index()];
+ if constexpr (trace_analysis) {
+ std::cout << " Successor " << successors[i]->index() << ": " << r
+ << "\n";
+ }
+ control_state = ControlState::LeastUpperBound(control_state, r);
+ }
+ if constexpr (trace_analysis)
+ std::cout << "Combined: " << control_state << "\n";
+
+ // If control_state == ControlState::Block(b), then the merge block b is
+ // reachable through every path starting at the current block without any
+ // live operations.
+
+ if constexpr (trace_analysis) std::cout << "OPERATION STATE\n";
+ auto op_range = graph_.OperationIndices(block);
+ bool has_live_phis = false;
+ for (auto it = op_range.end(); it != op_range.begin();) {
+ --it;
+ OpIndex index = *it;
+ const Operation& op = graph_.Get(index);
+ if constexpr (trace_analysis) std::cout << index << ":" << op << "\n";
+ OperationState::Liveness op_state = liveness_[index];
+
+ if (op.Is<BranchOp>()) {
+ if (control_state != ControlState::NotEliminatable()) {
+ // Branch is still dead.
+ DCHECK_EQ(op_state, OperationState::kDead);
+ // If we know a target block we can rewrite into a goto.
+ if (control_state.kind == ControlState::kBlock) {
+ BlockIndex target = control_state.block;
+ DCHECK(target.valid());
+ rewritable_branch_targets_[index.id()] = target;
+ }
+ } else {
+ // Branch is live. We cannot rewrite it.
+ op_state = OperationState::kLive;
+ auto it = rewritable_branch_targets_.find(index.id());
+ if (it != rewritable_branch_targets_.end()) {
+ rewritable_branch_targets_.erase(it);
+ }
+ }
+ } else if (op.saturated_use_count == 0) {
+ // Operation is already recognized as dead by a previous analysis.
+ DCHECK_EQ(op_state, OperationState::kDead);
+ } else if (op.Is<GotoOp>()) {
+ // We mark Gotos as live, but they do not influence operation or control
+ // state, so we skip them here.
+ liveness_[index] = OperationState::kLive;
+ continue;
+ } else if (op.Properties().is_required_when_unused) {
+ op_state = OperationState::kLive;
+ } else if (op.Is<PhiOp>()) {
+ has_live_phis = has_live_phis || (op_state == OperationState::kLive);
+
+ if (block.IsLoop()) {
+ const PhiOp& phi = op.Cast<PhiOp>();
+ // Check if the operation state of the input coming from the backedge
+ // changes the liveness of the phi. In that case, trigger a revisit of
+ // the loop.
+ if (liveness_[phi.inputs()[PhiOp::kLoopPhiBackEdgeIndex]] <
+ op_state) {
+ if constexpr (trace_analysis) {
+ std::cout
+ << "Operation state has changed. Need to revisit loop.\n";
+ }
+ Block* backedge = block.LastPredecessor();
+ // Revisit the loop by increasing the {unprocessed_count} to include
+ // all blocks of the loop.
+ *unprocessed_count =
+ std::max(*unprocessed_count, backedge->index().id() + 1);
+ }
+ }
+ }
+
+ // TODO(nicohartmann@): Handle Stack Guards to allow elimination of
+ // otherwise empty loops.
+ //
+ // if(const CallOp* call = op.TryCast<CallOp>()) {
+ // if(std::string(call->descriptor->descriptor->debug_name())
+ // == "StackGuard") {
+ // DCHECK_EQ(op_state, OperationState::kLive);
+ // op_state = OperationState::kWeakLive;
+ // }
+ // }
+
+ DCHECK_LE(liveness_[index], op_state);
+ // If everything is still dead. We don't need to update anything.
+ if (op_state == OperationState::kDead) continue;
+
+ // We have a live operation.
+ if constexpr (trace_analysis) {
+ std::cout << " " << op_state << " <== " << liveness_[index] << "\n";
+ }
+ liveness_[index] = op_state;
+
+ if constexpr (trace_analysis) {
+ if (op.input_count > 0) std::cout << " Updating inputs:\n";
+ }
+ for (OpIndex input : op.inputs()) {
+ auto old_input_state = liveness_[input];
+ auto new_input_state =
+ OperationState::LeastUpperBound(old_input_state, op_state);
+ if constexpr (trace_analysis) {
+ std::cout << " " << input << ": " << new_input_state
+ << " <== " << old_input_state << " || " << op_state << "\n";
+ }
+ liveness_[input] = new_input_state;
+ }
+
+ if (op_state == OperationState::kLive &&
+ control_state != ControlState::NotEliminatable()) {
+ // This block has live operations, which means that we can't skip it.
+ // Reset the ControlState to NotEliminatable.
+ if constexpr (trace_analysis) {
+ std::cout << "Block has live operations. New control state: "
+ << ControlState::NotEliminatable() << "\n";
+ }
+ control_state = ControlState::NotEliminatable();
+ }
+ }
+
+ if constexpr (trace_analysis) {
+ std::cout << "ENTRY CONTROL STATE\nAfter operations: " << control_state
+ << "\n";
+ }
+
+ // If this block is a merge and we don't have any live phis, it is a
+ // potential target for branch redirection.
+ if (block.IsMerge()) {
+ if (!has_live_phis) {
+ if (control_state.kind != ControlState::kBlock) {
+ control_state = ControlState::Block(block.index());
+ if constexpr (trace_analysis) {
+ std::cout
+ << "Block is loop or merge and has no live phi operations.\n";
+ }
+ } else if constexpr (trace_analysis) {
+ std::cout << "Block is loop or merge and has no live phi "
+ "operations.\nControl state already has a goto block: "
+ << control_state << "\n";
+ }
+ }
+ } else if (block.IsLoop()) {
+ // If this is a loop, we reset the control state to avoid jumps into the
+ // middle of the loop. In particular, this is required to prevent
+ // introducing new backedges when blocks towards the end of the loop body
+ // want to jump to a block at the beginning (past the header).
+ control_state = ControlState::NotEliminatable();
+ if constexpr (trace_analysis) {
+ std::cout << "Block is loop header. Resetting control state: "
+ << control_state << "\n";
+ }
+
+ if (entry_control_state_[block.index()] != control_state) {
+ if constexpr (trace_analysis) {
+ std::cout << "Control state has changed. Need to revisit loop.\n";
+ }
+ Block* backedge = block.LastPredecessor();
+ DCHECK_NOT_NULL(backedge);
+ // Revisit the loop by increasing the {unprocessed_count} to include
+ // all blocks of the loop.
+ *unprocessed_count =
+ std::max(*unprocessed_count, backedge->index().id() + 1);
+ }
+ }
+
+ if constexpr (trace_analysis) {
+ std::cout << "Final: " << control_state << "\n";
+ }
+ entry_control_state_[block.index()] = control_state;
+ }
+
+ private:
+ Graph& graph_;
+ FixedSidetable<OperationState::Liveness> liveness_;
+ FixedBlockSidetable<ControlState> entry_control_state_;
+ ZoneMap<uint32_t, BlockIndex> rewritable_branch_targets_;
+};
+
+template <class Next>
+class DeadCodeEliminationReducer
+ : public UniformReducerAdapter<DeadCodeEliminationReducer, Next> {
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ using Adapter = UniformReducerAdapter<DeadCodeEliminationReducer, Next>;
+
+ template <class... Args>
+ explicit DeadCodeEliminationReducer(const std::tuple<Args...>& args)
+ : Adapter(args),
+ branch_rewrite_targets_(Asm().phase_zone()),
+ analyzer_(Asm().modifiable_input_graph(), Asm().phase_zone()) {}
+
+ void Analyze() {
+ // TODO(nicohartmann@): We might want to make this a flag.
+ constexpr bool trace_analysis = false;
+ std::tie(liveness_, branch_rewrite_targets_) =
+ analyzer_.Run<trace_analysis>();
+ Next::Analyze();
+ }
+
+ OpIndex ReduceInputGraphBranch(OpIndex ig_index, const BranchOp& branch) {
+ auto it = branch_rewrite_targets_.find(ig_index.id());
+ if (it != branch_rewrite_targets_.end()) {
+ BlockIndex goto_target = it->second;
+ Asm().Goto(Asm().input_graph().Get(goto_target).MapToNextGraph());
+ return OpIndex::Invalid();
+ }
+ return Next::ReduceInputGraphBranch(ig_index, branch);
+ }
+
+ template <typename Op, typename Continuation>
+ OpIndex ReduceInputGraphOperation(OpIndex ig_index, const Op& op) {
+ if ((*liveness_)[ig_index] == OperationState::kDead) {
+ return OpIndex::Invalid();
+ }
+ return Continuation{this}.ReduceInputGraph(ig_index, op);
+ }
+
+ template <Opcode opcode, typename Continuation, typename... Args>
+ OpIndex ReduceOperation(const Args&... args) {
+ return Continuation{this}.Reduce(args...);
+ }
+
+ private:
+ base::Optional<FixedSidetable<OperationState::Liveness>> liveness_;
+ ZoneMap<uint32_t, BlockIndex> branch_rewrite_targets_;
+ DeadCodeAnalysis analyzer_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/decompression-optimization-phase.cc b/deps/v8/src/compiler/turboshaft/decompression-optimization-phase.cc
new file mode 100644
index 0000000000..ae5c5aa2e8
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/decompression-optimization-phase.cc
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/decompression-optimization-phase.h"
+
+#include "src/compiler/turboshaft/decompression-optimization.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void DecompressionOptimizationPhase::Run(PipelineData* data, Zone* temp_zone) {
+ if (!COMPRESS_POINTERS_BOOL) return;
+ turboshaft::RunDecompressionOptimization(data->graph(), temp_zone);
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/decompression-optimization-phase.h b/deps/v8/src/compiler/turboshaft/decompression-optimization-phase.h
new file mode 100644
index 0000000000..217be316aa
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/decompression-optimization-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_DECOMPRESSION_OPTIMIZATION_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_DECOMPRESSION_OPTIMIZATION_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct DecompressionOptimizationPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(DecompressionOptimization)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_DECOMPRESSION_OPTIMIZATION_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/decompression-optimization.cc b/deps/v8/src/compiler/turboshaft/decompression-optimization.cc
index e722feae9e..24dec586f4 100644
--- a/deps/v8/src/compiler/turboshaft/decompression-optimization.cc
+++ b/deps/v8/src/compiler/turboshaft/decompression-optimization.cc
@@ -140,6 +140,8 @@ void DecompressionAnalyzer::ProcessOperation(const Operation& op) {
auto& bitcast = op.Cast<TaggedBitcastOp>();
if (NeedsDecompression(op)) {
MarkAsNeedsDecompression(bitcast.input());
+ } else {
+ candidates.push_back(graph.Index(op));
}
break;
}
@@ -179,7 +181,7 @@ void RunDecompressionOptimization(Graph& graph, Zone* phase_zone) {
case Opcode::kPhi: {
auto& phi = op.Cast<PhiOp>();
if (phi.rep == RegisterRepresentation::Tagged()) {
- phi.rep = RegisterRepresentation::Tagged();
+ phi.rep = RegisterRepresentation::Compressed();
}
break;
}
@@ -193,6 +195,15 @@ void RunDecompressionOptimization(Graph& graph, Zone* phase_zone) {
}
break;
}
+ case Opcode::kTaggedBitcast: {
+ auto& bitcast = op.Cast<TaggedBitcastOp>();
+ if (bitcast.from == RegisterRepresentation::Tagged() &&
+ bitcast.to == RegisterRepresentation::PointerSized()) {
+ bitcast.from = RegisterRepresentation::Compressed();
+ bitcast.to = RegisterRepresentation::Word32();
+ }
+ break;
+ }
default:
break;
}
diff --git a/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc b/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc
new file mode 100644
index 0000000000..dcdfdf57cd
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/define-assembler-macros.inc
@@ -0,0 +1,69 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+// This file defines Turboshaft's assembler macros. Include this file before
+// your reducers and don't forget to include 'undef-assembler-macros.inc'
+// afterwards.
+
+#ifdef V8_COMPILER_TURBOSHAFT_ASSEMBLER_MACROS_DEFINED
+#error \
+ "Assembler macros already defined. Did you forget to #include \"undef-assembler-macros.inc\" in a previous file?"
+#endif
+
+#define V8_COMPILER_TURBOSHAFT_ASSEMBLER_MACROS_DEFINED 1
+
+#define BIND(label, ...) \
+ auto [CONCAT(is_bound_, __LINE__), ##__VA_ARGS__] = \
+ Asm().ControlFlowHelper_Bind(label); \
+ (detail::SuppressUnusedWarning(CONCAT(is_bound_, __LINE__)))
+#define LOOP(loop_label, ...) \
+ for(auto [CONCAT(run_loop_, __LINE__), ##__VA_ARGS__] = \
+ Asm().ControlFlowHelper_BindLoop(loop_label); CONCAT(run_loop_, __LINE__); \
+ Asm().ControlFlowHelper_EndLoop(loop_label), \
+ CONCAT(run_loop_, __LINE__) = false)
+#define GOTO(label, ...) \
+ Asm().ControlFlowHelper_Goto(label, {__VA_ARGS__})
+#define GOTO_IF(cond, label, ...) \
+ Asm().ControlFlowHelper_GotoIf(cond, label, {__VA_ARGS__}, \
+ BranchHint::kNone)
+#define GOTO_IF_LIKELY(cond, label, ...) \
+ Asm().ControlFlowHelper_GotoIf(cond, label, {__VA_ARGS__}, \
+ BranchHint::kTrue)
+#define GOTO_IF_UNLIKELY(cond, label, ...) \
+ Asm().ControlFlowHelper_GotoIf(cond, label, {__VA_ARGS__}, \
+ BranchHint::kFalse)
+#define GOTO_IF_NOT(cond, label, ...) \
+ Asm().ControlFlowHelper_GotoIfNot(cond, label, {__VA_ARGS__}, \
+ BranchHint::kNone)
+#define GOTO_IF_NOT_LIKELY(cond, label, ...) \
+ Asm().ControlFlowHelper_GotoIfNot(cond, label, {__VA_ARGS__}, \
+ BranchHint::kFalse)
+#define GOTO_IF_NOT_UNLIKELY(cond, label, ...) \
+ Asm().ControlFlowHelper_GotoIfNot(cond, label, {__VA_ARGS__}, \
+ BranchHint::kTrue)
+
+#define IF_WITH_HINT(cond, if_not, hint) \
+ for (bool bound = Asm().ControlFlowHelper_If(cond, if_not, hint); bound; \
+ (bound = false), Asm().ControlFlowHelper_GotoEnd())
+#define IF(cond) IF_WITH_HINT(cond, false, BranchHint::kNone)
+#define IF_LIKELY(cond) IF_WITH_HINT(cond, false, BranchHint::kTrue)
+#define IF_UNLIKELY(cond) IF_WITH_HINT(cond, false, BranchHint::kFalse)
+#define IF_NOT(cond) IF_WITH_HINT(cond, true, BranchHint::kNone)
+#define IF_NOT_LIKELY(cond) IF_WITH_HINT(cond, true, BranchHint::kFalse)
+#define IF_NOT_UNLIKELY(cond) IF_WITH_HINT(cond, true, BranchHint::kTrue)
+#define ELSE_IF_WITH_HINT(cond, hint) \
+ for (bool bound = Asm().ControlFlowHelper_ElseIf( \
+ [&]() { return cond; }, hint); \
+ bound; (bound = false), Asm().ControlFlowHelper_GotoEnd())
+#define ELSE_IF(cond) ELSE_IF_WITH_HINT(cond, BranchHint::kNone)
+#define ELSE_IF_LIKELY(cond) ELSE_IF_WITH_HINT(cond, BranchHint::kTrue)
+#define ELSE_IF_UNLIKELY(cond) ELSE_IF_WITH_HINT(cond, BranchHint::kFalse)
+#define ELSE \
+ for (bool bound = Asm().ControlFlowHelper_Else(); bound; \
+ (bound = false), Asm().ControlFlowHelper_GotoEnd())
+#define END_IF Asm().ControlFlowHelper_EndIf();
+
+#define __ Asm().
diff --git a/deps/v8/src/compiler/turboshaft/deopt-data.h b/deps/v8/src/compiler/turboshaft/deopt-data.h
index def0bee47e..2b8d666682 100644
--- a/deps/v8/src/compiler/turboshaft/deopt-data.h
+++ b/deps/v8/src/compiler/turboshaft/deopt-data.h
@@ -7,7 +7,9 @@
#include "src/base/small-vector.h"
#include "src/common/globals.h"
-#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/frame-states.h"
+#include "src/compiler/turboshaft/index.h"
+#include "src/compiler/turboshaft/representations.h"
namespace v8::internal::compiler::turboshaft {
@@ -135,6 +137,13 @@ struct FrameStateData {
base::Vector<uint32_t> int_operands;
};
+inline bool operator==(const FrameStateData& lhs, const FrameStateData& rhs) {
+ return lhs.frame_state_info == rhs.frame_state_info &&
+ lhs.instructions == rhs.instructions &&
+ lhs.machine_types == rhs.machine_types &&
+ lhs.int_operands == rhs.int_operands;
+}
+
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
diff --git a/deps/v8/src/compiler/turboshaft/fast-hash.h b/deps/v8/src/compiler/turboshaft/fast-hash.h
index 46525327de..9b50e5e8e3 100644
--- a/deps/v8/src/compiler/turboshaft/fast-hash.h
+++ b/deps/v8/src/compiler/turboshaft/fast-hash.h
@@ -26,7 +26,7 @@ V8_INLINE size_t fast_hash_combine(T const& v, Ts const&... vs);
template <class T>
struct fast_hash {
- size_t operator()(const T& v) {
+ size_t operator()(const T& v) const {
if constexpr (std::is_enum<T>::value) {
return static_cast<size_t>(v);
} else {
@@ -37,12 +37,13 @@ struct fast_hash {
template <class... Ts>
struct fast_hash<std::tuple<Ts...>> {
- size_t operator()(const std::tuple<Ts...>& v) {
+ size_t operator()(const std::tuple<Ts...>& v) const {
return impl(v, std::make_index_sequence<sizeof...(Ts)>());
}
template <size_t... I>
- V8_INLINE size_t impl(std::tuple<Ts...> const& v, std::index_sequence<I...>) {
+ V8_INLINE size_t impl(std::tuple<Ts...> const& v,
+ std::index_sequence<I...>) const {
return fast_hash_combine(std::get<I>(v)...);
}
};
@@ -63,7 +64,7 @@ V8_INLINE size_t fast_hash_range(Iterator first, Iterator last) {
template <typename T>
struct fast_hash<base::Vector<T>> {
- V8_INLINE size_t operator()(base::Vector<T> v) {
+ V8_INLINE size_t operator()(base::Vector<T> v) const {
return fast_hash_range(v.begin(), v.end());
}
};
diff --git a/deps/v8/src/compiler/turboshaft/graph-builder.cc b/deps/v8/src/compiler/turboshaft/graph-builder.cc
index ad218f393f..b33997e3b4 100644
--- a/deps/v8/src/compiler/turboshaft/graph-builder.cc
+++ b/deps/v8/src/compiler/turboshaft/graph-builder.cc
@@ -6,7 +6,9 @@
#include <limits>
#include <numeric>
+#include <string_view>
+#include "src/base/container-utils.h"
#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/safe_conversions.h"
@@ -16,56 +18,71 @@
#include "src/codegen/machine-type.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-aux-data.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-origin-table.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
+#include "src/compiler/turboshaft/define-assembler-macros.inc"
+
namespace {
struct GraphBuilder {
+ Isolate* isolate;
+ JSHeapBroker* broker;
Zone* graph_zone;
Zone* phase_zone;
Schedule& schedule;
- Assembler<> assembler;
+ Assembler<reducer_list<>> assembler;
+ Linkage* linkage;
SourcePositionTable* source_positions;
NodeOriginTable* origins;
+ struct BlockData {
+ Block* block;
+ };
NodeAuxData<OpIndex> op_mapping{phase_zone};
- ZoneVector<Block*> block_mapping{schedule.RpoBlockCount(), phase_zone};
+ ZoneVector<BlockData> block_mapping{schedule.RpoBlockCount(), phase_zone};
base::Optional<BailoutReason> Run();
+ Assembler<reducer_list<>>& Asm() { return assembler; }
private:
OpIndex Map(Node* old_node) {
OpIndex result = op_mapping.Get(old_node);
- DCHECK(assembler.output_graph().IsValid(result));
+ DCHECK(__ output_graph().IsValid(result));
return result;
}
Block* Map(BasicBlock* block) {
- Block* result = block_mapping[block->rpo_number()];
+ Block* result = block_mapping[block->rpo_number()].block;
DCHECK_NOT_NULL(result);
return result;
}
void FixLoopPhis(Block* loop, Block* backedge) {
DCHECK(loop->IsLoop());
- for (Operation& op : assembler.output_graph().operations(*loop)) {
+ for (Operation& op : __ output_graph().operations(*loop)) {
if (!op.Is<PendingLoopPhiOp>()) continue;
auto& pending_phi = op.Cast<PendingLoopPhiOp>();
- assembler.output_graph().Replace<PhiOp>(
- assembler.output_graph().Index(pending_phi),
+ __ output_graph().Replace<PhiOp>(
+ __ output_graph().Index(pending_phi),
base::VectorOf(
- {pending_phi.first(), Map(pending_phi.old_backedge_node)}),
+ {pending_phi.first(), Map(pending_phi.data.old_backedge_node)}),
pending_phi.rep);
}
}
@@ -140,18 +157,38 @@ struct GraphBuilder {
UNIMPLEMENTED();
}
}
+ V<Word32> BuildUint32Mod(V<Word32> lhs, V<Word32> rhs);
OpIndex Process(Node* node, BasicBlock* block,
- const base::SmallVector<int, 16>& predecessor_permutation);
+ const base::SmallVector<int, 16>& predecessor_permutation,
+ OpIndex& dominating_frame_state,
+ base::Optional<BailoutReason>* bailout,
+ bool is_final_control = false);
+
+ OpIndex EmitProjectionsAndTuple(OpIndex op_idx) {
+ Operation& op = __ output_graph().Get(op_idx);
+ base::Vector<const RegisterRepresentation> outputs_rep = op.outputs_rep();
+ if (outputs_rep.size() <= 1) {
+ // If {op} has a single output, there is no need to emit Projections or
+ // Tuple, so we just return it.
+ return op_idx;
+ }
+ base::SmallVector<OpIndex, 16> tuple_inputs;
+ for (size_t i = 0; i < outputs_rep.size(); i++) {
+ tuple_inputs.push_back(__ Projection(op_idx, i, outputs_rep[i]));
+ }
+ return __ Tuple(base::VectorOf(tuple_inputs));
+ }
};
base::Optional<BailoutReason> GraphBuilder::Run() {
for (BasicBlock* block : *schedule.rpo_order()) {
- block_mapping[block->rpo_number()] = assembler.NewBlock(BlockKind(block));
+ block_mapping[block->rpo_number()].block =
+ block->IsLoopHeader() ? __ NewLoopHeader() : __ NewBlock();
}
+
for (BasicBlock* block : *schedule.rpo_order()) {
Block* target_block = Map(block);
- if (!assembler.Bind(target_block)) continue;
- target_block->SetDeferred(block->deferred());
+ if (!__ Bind(target_block)) continue;
// Since we visit blocks in rpo-order, the new block predecessors are sorted
// in rpo order too. However, the input schedule does not order
@@ -167,13 +204,17 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
predecessors[j]->rpo_number();
});
+ OpIndex dominating_frame_state = OpIndex::Invalid();
+ base::Optional<BailoutReason> bailout = base::nullopt;
for (Node* node : *block->nodes()) {
if (V8_UNLIKELY(node->InputCount() >=
int{std::numeric_limits<
decltype(Operation::input_count)>::max()})) {
return BailoutReason::kTooManyArguments;
}
- OpIndex i = Process(node, block, predecessor_permutation);
+ OpIndex i = Process(node, block, predecessor_permutation,
+ dominating_frame_state, &bailout);
+ if (V8_UNLIKELY(bailout)) return bailout;
op_mapping.Set(node, i);
}
if (Node* node = block->control_input()) {
@@ -182,14 +223,16 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
decltype(Operation::input_count)>::max()})) {
return BailoutReason::kTooManyArguments;
}
- OpIndex i = Process(node, block, predecessor_permutation);
+ OpIndex i = Process(node, block, predecessor_permutation,
+ dominating_frame_state, &bailout, true);
+ if (V8_UNLIKELY(bailout)) return bailout;
op_mapping.Set(node, i);
}
switch (block->control()) {
case BasicBlock::kGoto: {
DCHECK_EQ(block->SuccessorCount(), 1);
Block* destination = Map(block->SuccessorAt(0));
- assembler.Goto(destination);
+ __ Goto(destination);
if (destination->IsBound()) {
DCHECK(destination->IsLoop());
FixLoopPhis(destination, target_block);
@@ -201,40 +244,27 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
case BasicBlock::kReturn:
case BasicBlock::kDeoptimize:
case BasicBlock::kThrow:
+ case BasicBlock::kCall:
case BasicBlock::kTailCall:
break;
- case BasicBlock::kCall: {
- Node* call = block->control_input();
- DCHECK_EQ(call->opcode(), IrOpcode::kCall);
- DCHECK_EQ(block->SuccessorCount(), 2);
- Block* if_success = Map(block->SuccessorAt(0));
- Block* if_exception = Map(block->SuccessorAt(1));
- OpIndex catch_exception =
- assembler.CatchException(Map(call), if_success, if_exception);
- Node* if_exception_node = block->SuccessorAt(1)->NodeAt(0);
- DCHECK_EQ(if_exception_node->opcode(), IrOpcode::kIfException);
- op_mapping.Set(if_exception_node, catch_exception);
- break;
- }
case BasicBlock::kNone:
UNREACHABLE();
}
- DCHECK_NULL(assembler.current_block());
+ DCHECK_NULL(__ current_block());
}
if (source_positions->IsEnabled()) {
- for (OpIndex index : assembler.output_graph().AllOperationIndices()) {
- compiler::NodeId origin = assembler.output_graph()
- .operation_origins()[index]
- .DecodeTurbofanNodeId();
- assembler.output_graph().source_positions()[index] =
+ for (OpIndex index : __ output_graph().AllOperationIndices()) {
+ compiler::NodeId origin =
+ __ output_graph().operation_origins()[index].DecodeTurbofanNodeId();
+ __ output_graph().source_positions()[index] =
source_positions->GetSourcePosition(origin);
}
}
if (origins) {
- for (OpIndex index : assembler.output_graph().AllOperationIndices()) {
- OpIndex origin = assembler.output_graph().operation_origins()[index];
+ for (OpIndex index : __ output_graph().AllOperationIndices()) {
+ OpIndex origin = __ output_graph().operation_origins()[index];
origins->SetNodeOrigin(index.id(), origin.DecodeTurbofanNodeId());
}
}
@@ -242,10 +272,35 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
return base::nullopt;
}
+V<Word32> GraphBuilder::BuildUint32Mod(V<Word32> lhs, V<Word32> rhs) {
+ Label<Word32> done(this);
+
+ // Compute the mask for the {rhs}.
+ V<Word32> msk = __ Word32Sub(rhs, 1);
+
+ // Check if the {rhs} is a power of two.
+ IF(__ Word32Equal(__ Word32BitwiseAnd(rhs, msk), 0)) {
+ // The {rhs} is a power of two, just do a fast bit masking.
+ GOTO(done, __ Word32BitwiseAnd(lhs, msk));
+ }
+ ELSE {
+ // The {rhs} is not a power of two, do a generic Uint32Mod.
+ GOTO(done, __ Uint32Mod(lhs, rhs));
+ }
+
+ BIND(done, result);
+ return result;
+}
+
OpIndex GraphBuilder::Process(
Node* node, BasicBlock* block,
- const base::SmallVector<int, 16>& predecessor_permutation) {
- assembler.SetCurrentOrigin(OpIndex::EncodeTurbofanNodeId(node->id()));
+ const base::SmallVector<int, 16>& predecessor_permutation,
+ OpIndex& dominating_frame_state, base::Optional<BailoutReason>* bailout,
+ bool is_final_control) {
+ if (Asm().current_block() == nullptr) {
+ return OpIndex::Invalid();
+ }
+ __ SetCurrentOrigin(OpIndex::EncodeTurbofanNodeId(node->id()));
const Operator* op = node->op();
Operator::Opcode opcode = op->opcode();
switch (opcode) {
@@ -264,25 +319,59 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kEffectPhi:
case IrOpcode::kTerminate:
- case IrOpcode::kIfSuccess:
return OpIndex::Invalid();
+ case IrOpcode::kCheckpoint: {
+ // Preserve the frame state from this checkpoint for following nodes.
+ dominating_frame_state = Map(NodeProperties::GetFrameStateInput(node));
+ return OpIndex::Invalid();
+ }
+
case IrOpcode::kIfException: {
- // Use the `CatchExceptionOp` that has already been produced when
- // processing the call.
- OpIndex catch_exception = Map(node);
- DCHECK(
- assembler.output_graph().Get(catch_exception).Is<CatchExceptionOp>());
- return catch_exception;
+ return __ LoadException();
+ }
+
+ case IrOpcode::kIfSuccess: {
+ // We emit all of the value projections of the call now, emit a Tuple with
+ // all of those projections, and remap the old call to this new Tuple
+ // instead of the CallAndCatchExceptionOp.
+ Node* call = node->InputAt(0);
+ DCHECK_EQ(call->opcode(), IrOpcode::kCall);
+ OpIndex call_idx = Map(call);
+ CallAndCatchExceptionOp& op =
+ __ output_graph().Get(call_idx).Cast<CallAndCatchExceptionOp>();
+
+ size_t return_count = op.outputs_rep().size();
+ DCHECK_EQ(return_count, op.descriptor->descriptor->ReturnCount());
+ if (return_count <= 1) {
+ // Calls with one output (or zero) do not require Projections.
+ return OpIndex::Invalid();
+ }
+ base::Vector<OpIndex> projections =
+ graph_zone->NewVector<OpIndex>(return_count);
+ for (size_t i = 0; i < return_count; i++) {
+ projections[i] = __ Projection(call_idx, i, op.outputs_rep()[i]);
+ }
+ OpIndex tuple_idx = __ Tuple(projections);
+
+ // Re-mapping {call} to {tuple_idx} so that subsequent projections are not
+ // emitted.
+ op_mapping.Set(call, tuple_idx);
+
+ return OpIndex::Invalid();
}
case IrOpcode::kParameter: {
const ParameterInfo& info = ParameterInfoOf(op);
- return assembler.Parameter(info.index(), info.debug_name());
+ RegisterRepresentation rep =
+ RegisterRepresentation::FromMachineRepresentation(
+ linkage->GetParameterType(ParameterIndexOf(node->op()))
+ .representation());
+ return __ Parameter(info.index(), rep, info.debug_name());
}
case IrOpcode::kOsrValue: {
- return assembler.OsrValue(OsrValueIndexOf(op));
+ return __ OsrValue(OsrValueIndexOf(op));
}
case IrOpcode::kPhi: {
@@ -290,46 +379,43 @@ OpIndex GraphBuilder::Process(
RegisterRepresentation rep =
RegisterRepresentation::FromMachineRepresentation(
PhiRepresentationOf(op));
- if (assembler.current_block()->IsLoop()) {
+ if (__ current_block()->IsLoop()) {
DCHECK_EQ(input_count, 2);
- return assembler.PendingLoopPhi(Map(node->InputAt(0)), rep,
- node->InputAt(1));
+ return __ PendingLoopPhi(Map(node->InputAt(0)), rep, node->InputAt(1));
} else {
base::SmallVector<OpIndex, 16> inputs;
for (int i = 0; i < input_count; ++i) {
inputs.push_back(Map(node->InputAt(predecessor_permutation[i])));
}
- return assembler.Phi(base::VectorOf(inputs), rep);
+ return __ Phi(base::VectorOf(inputs), rep);
}
}
case IrOpcode::kInt64Constant:
- return assembler.Word64Constant(
- static_cast<uint64_t>(OpParameter<int64_t>(op)));
+ return __ Word64Constant(static_cast<uint64_t>(OpParameter<int64_t>(op)));
case IrOpcode::kInt32Constant:
- return assembler.Word32Constant(
- static_cast<uint32_t>(OpParameter<int32_t>(op)));
+ return __ Word32Constant(static_cast<uint32_t>(OpParameter<int32_t>(op)));
case IrOpcode::kFloat64Constant:
- return assembler.Float64Constant(OpParameter<double>(op));
+ return __ Float64Constant(OpParameter<double>(op));
case IrOpcode::kFloat32Constant:
- return assembler.Float32Constant(OpParameter<float>(op));
+ return __ Float32Constant(OpParameter<float>(op));
case IrOpcode::kNumberConstant:
- return assembler.NumberConstant(OpParameter<double>(op));
+ return __ NumberConstant(OpParameter<double>(op));
case IrOpcode::kTaggedIndexConstant:
- return assembler.TaggedIndexConstant(OpParameter<int32_t>(op));
+ return __ TaggedIndexConstant(OpParameter<int32_t>(op));
case IrOpcode::kHeapConstant:
- return assembler.HeapConstant(HeapConstantOf(op));
+ return __ HeapConstant(HeapConstantOf(op));
case IrOpcode::kCompressedHeapConstant:
- return assembler.CompressedHeapConstant(HeapConstantOf(op));
+ return __ CompressedHeapConstant(HeapConstantOf(op));
case IrOpcode::kExternalConstant:
- return assembler.ExternalConstant(OpParameter<ExternalReference>(op));
+ return __ ExternalConstant(OpParameter<ExternalReference>(op));
case IrOpcode::kRelocatableInt64Constant:
- return assembler.RelocatableConstant(
+ return __ RelocatableConstant(
OpParameter<RelocatablePtrConstantInfo>(op).value(),
OpParameter<RelocatablePtrConstantInfo>(op).rmode());
#define BINOP_CASE(opcode, assembler_op) \
case IrOpcode::k##opcode: \
- return assembler.assembler_op(Map(node->InputAt(0)), Map(node->InputAt(1)));
+ return __ assembler_op(Map(node->InputAt(0)), Map(node->InputAt(1)));
BINOP_CASE(Int32Add, Word32Add)
BINOP_CASE(Int64Add, Word64Add)
@@ -372,13 +458,6 @@ OpIndex GraphBuilder::Process(
BINOP_CASE(Float64Pow, Float64Power)
BINOP_CASE(Float64Atan2, Float64Atan2)
- BINOP_CASE(Int32AddWithOverflow, Int32AddCheckOverflow)
- BINOP_CASE(Int64AddWithOverflow, Int64AddCheckOverflow)
- BINOP_CASE(Int32MulWithOverflow, Int32MulCheckOverflow)
- BINOP_CASE(Int64MulWithOverflow, Int64MulCheckOverflow)
- BINOP_CASE(Int32SubWithOverflow, Int32SubCheckOverflow)
- BINOP_CASE(Int64SubWithOverflow, Int64SubCheckOverflow)
-
BINOP_CASE(Word32Shr, Word32ShiftRightLogical)
BINOP_CASE(Word64Shr, Word64ShiftRightLogical)
@@ -411,6 +490,20 @@ OpIndex GraphBuilder::Process(
BINOP_CASE(Float64LessThanOrEqual, Float64LessThanOrEqual)
#undef BINOP_CASE
+#define TUPLE_BINOP_CASE(opcode, assembler_op) \
+ case IrOpcode::k##opcode: { \
+ OpIndex idx = \
+ __ assembler_op(Map(node->InputAt(0)), Map(node->InputAt(1))); \
+ return EmitProjectionsAndTuple(idx); \
+ }
+ TUPLE_BINOP_CASE(Int32AddWithOverflow, Int32AddCheckOverflow)
+ TUPLE_BINOP_CASE(Int64AddWithOverflow, Int64AddCheckOverflow)
+ TUPLE_BINOP_CASE(Int32MulWithOverflow, Int32MulCheckOverflow)
+ TUPLE_BINOP_CASE(Int64MulWithOverflow, Int64MulCheckOverflow)
+ TUPLE_BINOP_CASE(Int32SubWithOverflow, Int32SubCheckOverflow)
+ TUPLE_BINOP_CASE(Int64SubWithOverflow, Int64SubCheckOverflow)
+#undef TUPLE_BINOP_CASE
+
case IrOpcode::kWord64Sar:
case IrOpcode::kWord32Sar: {
WordRepresentation rep = opcode == IrOpcode::kWord64Sar
@@ -425,13 +518,17 @@ OpIndex GraphBuilder::Process(
kind = ShiftOp::Kind::kShiftRightArithmetic;
break;
}
- return assembler.Shift(Map(node->InputAt(0)), Map(node->InputAt(1)), kind,
- rep);
+ return __ Shift(Map(node->InputAt(0)), Map(node->InputAt(1)), kind, rep);
}
#define UNARY_CASE(opcode, assembler_op) \
case IrOpcode::k##opcode: \
- return assembler.assembler_op(Map(node->InputAt(0)));
+ return __ assembler_op(Map(node->InputAt(0)));
+#define TUPLE_UNARY_CASE(opcode, assembler_op) \
+ case IrOpcode::k##opcode: { \
+ OpIndex idx = __ assembler_op(Map(node->InputAt(0))); \
+ return EmitProjectionsAndTuple(idx); \
+ }
UNARY_CASE(Word32ReverseBytes, Word32ReverseBytes)
UNARY_CASE(Word64ReverseBytes, Word64ReverseBytes)
@@ -512,84 +609,312 @@ OpIndex GraphBuilder::Process(
UNARY_CASE(TruncateFloat64ToUint32,
TruncateFloat64ToUint32OverflowUndefined)
UNARY_CASE(TruncateFloat64ToWord32, JSTruncateFloat64ToWord32)
- UNARY_CASE(TryTruncateFloat32ToInt64, TryTruncateFloat32ToInt64)
- UNARY_CASE(TryTruncateFloat32ToUint64, TryTruncateFloat32ToUint64)
- UNARY_CASE(TryTruncateFloat64ToInt32, TryTruncateFloat64ToInt32)
- UNARY_CASE(TryTruncateFloat64ToInt64, TryTruncateFloat64ToInt64)
- UNARY_CASE(TryTruncateFloat64ToUint32, TryTruncateFloat64ToUint32)
- UNARY_CASE(TryTruncateFloat64ToUint64, TryTruncateFloat64ToUint64)
+
+ TUPLE_UNARY_CASE(TryTruncateFloat32ToInt64, TryTruncateFloat32ToInt64)
+ TUPLE_UNARY_CASE(TryTruncateFloat32ToUint64, TryTruncateFloat32ToUint64)
+ TUPLE_UNARY_CASE(TryTruncateFloat64ToInt32, TryTruncateFloat64ToInt32)
+ TUPLE_UNARY_CASE(TryTruncateFloat64ToInt64, TryTruncateFloat64ToInt64)
+ TUPLE_UNARY_CASE(TryTruncateFloat64ToUint32, TryTruncateFloat64ToUint32)
+ TUPLE_UNARY_CASE(TryTruncateFloat64ToUint64, TryTruncateFloat64ToUint64)
UNARY_CASE(Float64ExtractLowWord32, Float64ExtractLowWord32)
UNARY_CASE(Float64ExtractHighWord32, Float64ExtractHighWord32)
#undef UNARY_CASE
+#undef TUPLE_UNARY_CASE
case IrOpcode::kTruncateInt64ToInt32:
// 64- to 32-bit truncation is implicit in Turboshaft.
return Map(node->InputAt(0));
case IrOpcode::kTruncateFloat32ToInt32:
switch (OpParameter<TruncateKind>(node->op())) {
case TruncateKind::kArchitectureDefault:
- return assembler.TruncateFloat32ToInt32OverflowUndefined(
+ return __ TruncateFloat32ToInt32OverflowUndefined(
Map(node->InputAt(0)));
case TruncateKind::kSetOverflowToMin:
- return assembler.TruncateFloat32ToInt32OverflowToMin(
- Map(node->InputAt(0)));
+ return __ TruncateFloat32ToInt32OverflowToMin(Map(node->InputAt(0)));
}
case IrOpcode::kTruncateFloat32ToUint32:
switch (OpParameter<TruncateKind>(node->op())) {
case TruncateKind::kArchitectureDefault:
- return assembler.TruncateFloat32ToUint32OverflowUndefined(
+ return __ TruncateFloat32ToUint32OverflowUndefined(
Map(node->InputAt(0)));
case TruncateKind::kSetOverflowToMin:
- return assembler.TruncateFloat32ToUint32OverflowToMin(
- Map(node->InputAt(0)));
+ return __ TruncateFloat32ToUint32OverflowToMin(Map(node->InputAt(0)));
}
case IrOpcode::kTruncateFloat64ToInt64:
switch (OpParameter<TruncateKind>(node->op())) {
case TruncateKind::kArchitectureDefault:
- return assembler.TruncateFloat64ToInt64OverflowUndefined(
+ return __ TruncateFloat64ToInt64OverflowUndefined(
Map(node->InputAt(0)));
case TruncateKind::kSetOverflowToMin:
- return assembler.TruncateFloat64ToInt64OverflowToMin(
- Map(node->InputAt(0)));
+ return __ TruncateFloat64ToInt64OverflowToMin(Map(node->InputAt(0)));
}
case IrOpcode::kFloat64InsertLowWord32:
- return assembler.Float64InsertWord32(
- Map(node->InputAt(0)), Map(node->InputAt(1)),
- Float64InsertWord32Op::Kind::kLowHalf);
+ return __ Float64InsertWord32(Map(node->InputAt(0)),
+ Map(node->InputAt(1)),
+ Float64InsertWord32Op::Kind::kLowHalf);
case IrOpcode::kFloat64InsertHighWord32:
- return assembler.Float64InsertWord32(
- Map(node->InputAt(0)), Map(node->InputAt(1)),
- Float64InsertWord32Op::Kind::kHighHalf);
+ return __ Float64InsertWord32(Map(node->InputAt(0)),
+ Map(node->InputAt(1)),
+ Float64InsertWord32Op::Kind::kHighHalf);
case IrOpcode::kBitcastTaggedToWord:
- return assembler.TaggedBitcast(Map(node->InputAt(0)),
- RegisterRepresentation::Tagged(),
- RegisterRepresentation::PointerSized());
+ return __ TaggedBitcast(Map(node->InputAt(0)),
+ RegisterRepresentation::Tagged(),
+ RegisterRepresentation::PointerSized());
case IrOpcode::kBitcastWordToTagged:
- return assembler.TaggedBitcast(Map(node->InputAt(0)),
- RegisterRepresentation::PointerSized(),
- RegisterRepresentation::Tagged());
+ return __ TaggedBitcast(Map(node->InputAt(0)),
+ RegisterRepresentation::PointerSized(),
+ RegisterRepresentation::Tagged());
+ case IrOpcode::kNumberIsNaN:
+ return __ FloatIs(Map(node->InputAt(0)), FloatIsOp::Kind::kNaN,
+ FloatRepresentation::Float64());
+
+#define OBJECT_IS_CASE(kind) \
+ case IrOpcode::kObjectIs##kind: { \
+ return __ ObjectIs(Map(node->InputAt(0)), ObjectIsOp::Kind::k##kind, \
+ ObjectIsOp::InputAssumptions::kNone); \
+ }
+ OBJECT_IS_CASE(ArrayBufferView)
+ OBJECT_IS_CASE(BigInt)
+ OBJECT_IS_CASE(Callable)
+ OBJECT_IS_CASE(Constructor)
+ OBJECT_IS_CASE(DetectableCallable)
+ OBJECT_IS_CASE(NonCallable)
+ OBJECT_IS_CASE(Number)
+ OBJECT_IS_CASE(Receiver)
+ OBJECT_IS_CASE(Smi)
+ OBJECT_IS_CASE(String)
+ OBJECT_IS_CASE(Symbol)
+ OBJECT_IS_CASE(Undetectable)
+#undef OBJECT_IS_CASE
+
+#define CHECK_OBJECT_IS_CASE(code, kind, input_assumptions, reason, feedback) \
+ case IrOpcode::k##code: { \
+ DCHECK(dominating_frame_state.valid()); \
+ V<Tagged> input = Map(node->InputAt(0)); \
+ V<Word32> check = \
+ __ ObjectIs(input, ObjectIsOp::Kind::k##kind, \
+ ObjectIsOp::InputAssumptions::k##input_assumptions); \
+ __ DeoptimizeIfNot(check, dominating_frame_state, \
+ DeoptimizeReason::k##reason, feedback); \
+ return input; \
+ }
+ CHECK_OBJECT_IS_CASE(CheckInternalizedString, InternalizedString,
+ HeapObject, WrongInstanceType, {})
+ CHECK_OBJECT_IS_CASE(CheckNumber, Number, None, NotANumber,
+ CheckParametersOf(op).feedback())
+ CHECK_OBJECT_IS_CASE(CheckReceiver, Receiver, HeapObject,
+ NotAJavaScriptObject, {})
+ CHECK_OBJECT_IS_CASE(CheckReceiverOrNullOrUndefined,
+ ReceiverOrNullOrUndefined, HeapObject,
+ NotAJavaScriptObjectOrNullOrUndefined, {})
+ CHECK_OBJECT_IS_CASE(CheckString, String, HeapObject, NotAString,
+ CheckParametersOf(op).feedback())
+ CHECK_OBJECT_IS_CASE(CheckSymbol, Symbol, HeapObject, NotASymbol, {})
+ CHECK_OBJECT_IS_CASE(CheckBigInt, BigInt, None, NotABigInt,
+ CheckParametersOf(op).feedback())
+ CHECK_OBJECT_IS_CASE(CheckedBigIntToBigInt64, BigInt64, BigInt,
+ NotABigInt64, CheckParametersOf(op).feedback())
+#undef CHECK_OBJECT_IS_CASE
+
+#define CONVERT_TO_OBJECT_CASE(name, kind, input_type, input_interpretation) \
+ case IrOpcode::k##name: \
+ return __ ConvertToObject( \
+ Map(node->InputAt(0)), ConvertToObjectOp::Kind::k##kind, \
+ V<input_type>::rep, \
+ ConvertToObjectOp::InputInterpretation::k##input_interpretation, \
+ CheckForMinusZeroMode::kDontCheckForMinusZero);
+ CONVERT_TO_OBJECT_CASE(ChangeInt32ToTagged, Number, Word32, Signed)
+ CONVERT_TO_OBJECT_CASE(ChangeUint32ToTagged, Number, Word32, Unsigned)
+ CONVERT_TO_OBJECT_CASE(ChangeInt64ToTagged, Number, Word64, Signed)
+ CONVERT_TO_OBJECT_CASE(ChangeUint64ToTagged, Number, Word64, Unsigned)
+ CONVERT_TO_OBJECT_CASE(ChangeFloat64ToTaggedPointer, HeapNumber, Float64,
+ Signed)
+ CONVERT_TO_OBJECT_CASE(ChangeInt64ToBigInt, BigInt, Word64, Signed)
+ CONVERT_TO_OBJECT_CASE(ChangeUint64ToBigInt, BigInt, Word64, Unsigned)
+ CONVERT_TO_OBJECT_CASE(ChangeInt31ToTaggedSigned, Smi, Word32, Signed)
+ CONVERT_TO_OBJECT_CASE(ChangeBitToTagged, Boolean, Word32, Signed)
+ CONVERT_TO_OBJECT_CASE(StringFromSingleCharCode, String, Word32, CharCode)
+ CONVERT_TO_OBJECT_CASE(StringFromSingleCodePoint, String, Word32,
+ CodePoint)
+
+ case IrOpcode::kChangeFloat64ToTagged:
+ return __ ConvertToObject(Map(node->InputAt(0)),
+ ConvertToObjectOp::Kind::kNumber,
+ RegisterRepresentation::Float64(),
+ ConvertToObjectOp::InputInterpretation::kSigned,
+ CheckMinusZeroModeOf(node->op()));
+#undef CONVERT_TO_OBJECT_CASE
+
+#define CONVERT_TO_OBJECT_OR_DEOPT_CASE(name, kind, input_type, \
+ input_interpretation) \
+ case IrOpcode::k##name: { \
+ DCHECK(dominating_frame_state.valid()); \
+ const CheckParameters& params = CheckParametersOf(node->op()); \
+ return __ ConvertToObjectOrDeopt( \
+ Map(node->InputAt(0)), dominating_frame_state, \
+ ConvertToObjectOrDeoptOp::Kind::k##kind, V<input_type>::rep, \
+ ConvertToObjectOrDeoptOp::InputInterpretation:: \
+ k##input_interpretation, \
+ params.feedback()); \
+ }
+ CONVERT_TO_OBJECT_OR_DEOPT_CASE(CheckedInt32ToTaggedSigned, Smi, Word32,
+ Signed)
+ CONVERT_TO_OBJECT_OR_DEOPT_CASE(CheckedUint32ToTaggedSigned, Smi, Word32,
+ Unsigned)
+ CONVERT_TO_OBJECT_OR_DEOPT_CASE(CheckedInt64ToTaggedSigned, Smi, Word64,
+ Signed)
+ CONVERT_TO_OBJECT_OR_DEOPT_CASE(CheckedUint64ToTaggedSigned, Smi, Word64,
+ Unsigned)
+#undef CONVERT_TO_OBJECT_OR_DEOPT_CASE
+
+#define CONVERT_OBJECT_TO_PRIMITIVE_CASE(name, kind, input_assumptions) \
+ case IrOpcode::k##name: \
+ return __ ConvertObjectToPrimitive( \
+ Map(node->InputAt(0)), ConvertObjectToPrimitiveOp::Kind::k##kind, \
+ ConvertObjectToPrimitiveOp::InputAssumptions::k##input_assumptions);
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedSignedToInt32, Int32, Smi)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedSignedToInt64, Int64, Smi)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToBit, Bit, Object)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToInt32, Int32,
+ NumberOrOddball)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToUint32, Uint32,
+ NumberOrOddball)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToInt64, Int64,
+ NumberOrOddball)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToFloat64, Float64,
+ NumberOrOddball)
+ CONVERT_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedToFloat64, Float64,
+ NumberOrOddball)
+#undef CONVERT_OBJECT_TO_PRIMITIVE_CASE
+
+#define TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(name, kind, input_assumptions) \
+ case IrOpcode::k##name: \
+ return __ TruncateObjectToPrimitive( \
+ Map(node->InputAt(0)), TruncateObjectToPrimitiveOp::Kind::k##kind, \
+ TruncateObjectToPrimitiveOp::InputAssumptions::k##input_assumptions);
+ TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedToWord32, Int32,
+ NumberOrOddball)
+ TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateBigIntToWord64, Int64, BigInt)
+ TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedToBit, Bit, Object)
+ TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedPointerToBit, Bit,
+ HeapObject)
+#undef TRUNCATE_OBJECT_TO_PRIMITIVE_CASE
+
+#define CHANGE_OR_DEOPT_INT_CASE(kind) \
+ case IrOpcode::kChecked##kind: { \
+ DCHECK(dominating_frame_state.valid()); \
+ const CheckParameters& params = CheckParametersOf(node->op()); \
+ return __ ChangeOrDeopt(Map(node->InputAt(0)), dominating_frame_state, \
+ ChangeOrDeoptOp::Kind::k##kind, \
+ CheckForMinusZeroMode::kDontCheckForMinusZero, \
+ params.feedback()); \
+ }
+ CHANGE_OR_DEOPT_INT_CASE(Uint32ToInt32)
+ CHANGE_OR_DEOPT_INT_CASE(Int64ToInt32)
+ CHANGE_OR_DEOPT_INT_CASE(Uint64ToInt32)
+ CHANGE_OR_DEOPT_INT_CASE(Uint64ToInt64)
+#undef CHANGE_OR_DEOPT_INT_CASE
+
+ case IrOpcode::kCheckedFloat64ToInt32: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
+ return __ ChangeOrDeopt(Map(node->InputAt(0)), dominating_frame_state,
+ ChangeOrDeoptOp::Kind::kFloat64ToInt32,
+ params.mode(), params.feedback());
+ }
+
+ case IrOpcode::kCheckedFloat64ToInt64: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
+ return __ ChangeOrDeopt(Map(node->InputAt(0)), dominating_frame_state,
+ ChangeOrDeoptOp::Kind::kFloat64ToInt64,
+ params.mode(), params.feedback());
+ }
+
+ case IrOpcode::kCheckedTaggedToInt32: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
+ return __ ConvertObjectToPrimitiveOrDeopt(
+ Map(node->InputAt(0)), dominating_frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumber,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt32,
+ params.mode(), params.feedback());
+ }
+
+ case IrOpcode::kCheckedTaggedToInt64: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
+ return __ ConvertObjectToPrimitiveOrDeopt(
+ Map(node->InputAt(0)), dominating_frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumber,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt64,
+ params.mode(), params.feedback());
+ }
+
+ case IrOpcode::kCheckedTaggedToFloat64: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckTaggedInputParameters& params =
+ CheckTaggedInputParametersOf(node->op());
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind from_kind;
+ switch (params.mode()) {
+#define CASE(mode) \
+ case CheckTaggedInputMode::k##mode: \
+ from_kind = ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::k##mode; \
+ break;
+ CASE(Number)
+ CASE(NumberOrBoolean)
+ CASE(NumberOrOddball)
+#undef CASE
+ }
+ return __ ConvertObjectToPrimitiveOrDeopt(
+ Map(node->InputAt(0)), dominating_frame_state, from_kind,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kFloat64,
+ CheckForMinusZeroMode::kDontCheckForMinusZero, params.feedback());
+ }
+
+ case IrOpcode::kCheckedTaggedToArrayIndex: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckParameters& params = CheckParametersOf(node->op());
+ return __ ConvertObjectToPrimitiveOrDeopt(
+ Map(node->InputAt(0)), dominating_frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrString,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kArrayIndex,
+ CheckForMinusZeroMode::kCheckForMinusZero, params.feedback());
+ }
+
+ case IrOpcode::kCheckedTaggedSignedToInt32: {
+ DCHECK(dominating_frame_state.valid());
+ const CheckParameters& params = CheckParametersOf(node->op());
+ return __ ConvertObjectToPrimitiveOrDeopt(
+ Map(node->InputAt(0)), dominating_frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kSmi,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt32,
+ CheckForMinusZeroMode::kDontCheckForMinusZero, params.feedback());
+ }
case IrOpcode::kSelect: {
OpIndex cond = Map(node->InputAt(0));
OpIndex vtrue = Map(node->InputAt(1));
OpIndex vfalse = Map(node->InputAt(2));
const SelectParameters& params = SelectParametersOf(op);
- return assembler.Select(cond, vtrue, vfalse,
- RegisterRepresentation::FromMachineRepresentation(
- params.representation()),
- params.hint(), SelectOp::Implementation::kBranch);
+ return __ Select(cond, vtrue, vfalse,
+ RegisterRepresentation::FromMachineRepresentation(
+ params.representation()),
+ params.hint(), SelectOp::Implementation::kBranch);
}
case IrOpcode::kWord32Select:
- return assembler.Select(
- Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
- RegisterRepresentation::Word32(), BranchHint::kNone,
- SelectOp::Implementation::kCMove);
+ return __ Select(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ Map(node->InputAt(2)), RegisterRepresentation::Word32(),
+ BranchHint::kNone, SelectOp::Implementation::kCMove);
case IrOpcode::kWord64Select:
- return assembler.Select(
- Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
- RegisterRepresentation::Word64(), BranchHint::kNone,
- SelectOp::Implementation::kCMove);
+ return __ Select(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ Map(node->InputAt(2)), RegisterRepresentation::Word64(),
+ BranchHint::kNone, SelectOp::Implementation::kCMove);
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
@@ -604,25 +929,25 @@ OpIndex GraphBuilder::Process(
: LoadOp::Kind::RawAligned();
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
- return assembler.Load(Map(base), kind, loaded_rep, offset);
+ return __ Load(Map(base), kind, loaded_rep, offset);
}
if (index->opcode() == IrOpcode::kInt64Constant) {
int64_t offset = OpParameter<int64_t>(index->op());
if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
- return assembler.Load(Map(base), kind, loaded_rep,
- static_cast<int32_t>(offset));
+ return __ Load(Map(base), kind, loaded_rep,
+ static_cast<int32_t>(offset));
}
}
int32_t offset = 0;
uint8_t element_size_log2 = 0;
- return assembler.Load(Map(base), Map(index), kind, loaded_rep, offset,
- element_size_log2);
+ return __ Load(Map(base), Map(index), kind, loaded_rep, offset,
+ element_size_log2);
}
case IrOpcode::kProtectedLoad: {
MemoryRepresentation loaded_rep =
MemoryRepresentation::FromMachineType(LoadRepresentationOf(op));
- return assembler.Load(Map(node->InputAt(0)), Map(node->InputAt(1)),
- LoadOp::Kind::Protected(), loaded_rep);
+ return __ Load(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ LoadOp::Kind::Protected(), loaded_rep);
}
case IrOpcode::kStore:
@@ -641,61 +966,60 @@ OpIndex GraphBuilder::Process(
Node* value = node->InputAt(2);
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
- assembler.Store(Map(base), Map(value), kind,
- MemoryRepresentation::FromMachineRepresentation(
- store_rep.representation()),
- store_rep.write_barrier_kind(), offset);
+ __ Store(Map(base), Map(value), kind,
+ MemoryRepresentation::FromMachineRepresentation(
+ store_rep.representation()),
+ store_rep.write_barrier_kind(), offset);
return OpIndex::Invalid();
}
if (index->opcode() == IrOpcode::kInt64Constant) {
int64_t offset = OpParameter<int64_t>(index->op());
if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
- assembler.Store(Map(base), Map(value), kind,
- MemoryRepresentation::FromMachineRepresentation(
- store_rep.representation()),
- store_rep.write_barrier_kind(),
- static_cast<int32_t>(offset));
+ __ Store(Map(base), Map(value), kind,
+ MemoryRepresentation::FromMachineRepresentation(
+ store_rep.representation()),
+ store_rep.write_barrier_kind(),
+ static_cast<int32_t>(offset));
return OpIndex::Invalid();
}
}
int32_t offset = 0;
uint8_t element_size_log2 = 0;
- assembler.Store(Map(base), Map(index), Map(value), kind,
- MemoryRepresentation::FromMachineRepresentation(
- store_rep.representation()),
- store_rep.write_barrier_kind(), offset,
- element_size_log2);
+ __ Store(Map(base), Map(index), Map(value), kind,
+ MemoryRepresentation::FromMachineRepresentation(
+ store_rep.representation()),
+ store_rep.write_barrier_kind(), offset, element_size_log2);
return OpIndex::Invalid();
}
case IrOpcode::kProtectedStore:
- assembler.Store(Map(node->InputAt(0)), Map(node->InputAt(1)),
- Map(node->InputAt(2)), StoreOp::Kind::Protected(),
- MemoryRepresentation::FromMachineRepresentation(
- OpParameter<MachineRepresentation>(node->op())),
- WriteBarrierKind::kNoWriteBarrier);
+ __ Store(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ Map(node->InputAt(2)), StoreOp::Kind::Protected(),
+ MemoryRepresentation::FromMachineRepresentation(
+ OpParameter<MachineRepresentation>(node->op())),
+ WriteBarrierKind::kNoWriteBarrier);
return OpIndex::Invalid();
case IrOpcode::kRetain:
- assembler.Retain(Map(node->InputAt(0)));
+ __ Retain(Map(node->InputAt(0)));
return OpIndex::Invalid();
case IrOpcode::kStackPointerGreaterThan:
- return assembler.StackPointerGreaterThan(Map(node->InputAt(0)),
- StackCheckKindOf(op));
+ return __ StackPointerGreaterThan(Map(node->InputAt(0)),
+ StackCheckKindOf(op));
case IrOpcode::kLoadStackCheckOffset:
- return assembler.StackCheckOffset();
+ return __ StackCheckOffset();
case IrOpcode::kLoadFramePointer:
- return assembler.FramePointer();
+ return __ FramePointer();
case IrOpcode::kLoadParentFramePointer:
- return assembler.ParentFramePointer();
+ return __ ParentFramePointer();
case IrOpcode::kStackSlot:
- return assembler.StackSlot(StackSlotRepresentationOf(op).size(),
- StackSlotRepresentationOf(op).alignment());
+ return __ StackSlot(StackSlotRepresentationOf(op).size(),
+ StackSlotRepresentationOf(op).alignment());
case IrOpcode::kBranch:
DCHECK_EQ(block->SuccessorCount(), 2);
- assembler.Branch(Map(node->InputAt(0)), Map(block->SuccessorAt(0)),
- Map(block->SuccessorAt(1)));
+ __ Branch(Map(node->InputAt(0)), Map(block->SuccessorAt(0)),
+ Map(block->SuccessorAt(1)), BranchHintOf(node->op()));
return OpIndex::Invalid();
case IrOpcode::kSwitch: {
@@ -706,11 +1030,11 @@ OpIndex GraphBuilder::Process(
for (size_t i = 0; i < case_count; ++i) {
BasicBlock* branch = block->SuccessorAt(i);
const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
- cases.emplace_back(p.value(), Map(branch));
+ cases.emplace_back(p.value(), Map(branch), p.hint());
}
- assembler.Switch(Map(node->InputAt(0)),
- graph_zone->CloneVector(base::VectorOf(cases)),
- Map(default_branch));
+ __ Switch(
+ Map(node->InputAt(0)), graph_zone->CloneVector(base::VectorOf(cases)),
+ Map(default_branch), BranchHintOf(default_branch->front()->op()));
return OpIndex::Invalid();
}
@@ -725,13 +1049,32 @@ OpIndex GraphBuilder::Process(
++i) {
arguments.emplace_back(Map(node->InputAt(i)));
}
+
+ const TSCallDescriptor* ts_descriptor =
+ TSCallDescriptor::Create(call_descriptor, graph_zone);
+
+ OpIndex frame_state_idx = OpIndex::Invalid();
if (call_descriptor->NeedsFrameState()) {
FrameState frame_state{
node->InputAt(static_cast<int>(call_descriptor->InputCount()))};
- return assembler.CallMaybeDeopt(callee, base::VectorOf(arguments),
- call_descriptor, Map(frame_state));
+ frame_state_idx = Map(frame_state);
+ }
+
+ if (!is_final_control) {
+ return EmitProjectionsAndTuple(__ Call(
+ callee, frame_state_idx, base::VectorOf(arguments), ts_descriptor));
+ } else {
+ DCHECK_EQ(block->SuccessorCount(), 2);
+
+ Block* if_success = Map(block->SuccessorAt(0));
+ Block* if_exception = Map(block->SuccessorAt(1));
+ // CallAndCatchException is a block terminator, so we can't generate the
+ // projections right away. We'll generate them in the IfSuccess
+ // successor.
+ return __ CallAndCatchException(callee, frame_state_idx,
+ base::VectorOf(arguments), if_success,
+ if_exception, ts_descriptor);
}
- return assembler.Call(callee, base::VectorOf(arguments), call_descriptor);
}
case IrOpcode::kTailCall: {
@@ -745,7 +1088,11 @@ OpIndex GraphBuilder::Process(
++i) {
arguments.emplace_back(Map(node->InputAt(i)));
}
- assembler.TailCall(callee, base::VectorOf(arguments), call_descriptor);
+
+ const TSCallDescriptor* ts_descriptor =
+ TSCallDescriptor::Create(call_descriptor, graph_zone);
+
+ __ TailCall(callee, base::VectorOf(arguments), ts_descriptor);
return OpIndex::Invalid();
}
@@ -753,31 +1100,35 @@ OpIndex GraphBuilder::Process(
FrameState frame_state{node};
FrameStateData::Builder builder;
BuildFrameStateData(&builder, frame_state);
- return assembler.FrameState(
- builder.Inputs(), builder.inlined(),
- builder.AllocateFrameStateData(frame_state.frame_state_info(),
- graph_zone));
+ if (builder.Inputs().size() >
+ std::numeric_limits<decltype(Operation::input_count)>::max() - 1) {
+ *bailout = BailoutReason::kTooManyArguments;
+ return OpIndex::Invalid();
+ }
+ return __ FrameState(builder.Inputs(), builder.inlined(),
+ builder.AllocateFrameStateData(
+ frame_state.frame_state_info(), graph_zone));
}
case IrOpcode::kDeoptimizeIf:
- assembler.DeoptimizeIf(Map(node->InputAt(0)), Map(node->InputAt(1)),
- &DeoptimizeParametersOf(op));
+ __ DeoptimizeIf(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ &DeoptimizeParametersOf(op));
return OpIndex::Invalid();
case IrOpcode::kDeoptimizeUnless:
- assembler.DeoptimizeIfNot(Map(node->InputAt(0)), Map(node->InputAt(1)),
- &DeoptimizeParametersOf(op));
+ __ DeoptimizeIfNot(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ &DeoptimizeParametersOf(op));
return OpIndex::Invalid();
case IrOpcode::kTrapIf:
- assembler.TrapIf(Map(node->InputAt(0)), TrapIdOf(op));
+ __ TrapIf(Map(node->InputAt(0)), TrapIdOf(op));
return OpIndex::Invalid();
case IrOpcode::kTrapUnless:
- assembler.TrapIfNot(Map(node->InputAt(0)), TrapIdOf(op));
+ __ TrapIfNot(Map(node->InputAt(0)), TrapIdOf(op));
return OpIndex::Invalid();
case IrOpcode::kDeoptimize: {
OpIndex frame_state = Map(node->InputAt(0));
- assembler.Deoptimize(frame_state, &DeoptimizeParametersOf(op));
+ __ Deoptimize(frame_state, &DeoptimizeParametersOf(op));
return OpIndex::Invalid();
}
@@ -787,25 +1138,554 @@ OpIndex GraphBuilder::Process(
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
return_values.push_back(Map(node->InputAt(i)));
}
- assembler.Return(Map(pop_count), base::VectorOf(return_values));
+ __ Return(Map(pop_count), base::VectorOf(return_values));
return OpIndex::Invalid();
}
-
case IrOpcode::kUnreachable:
- for (Node* use : node->uses()) {
- CHECK_EQ(use->opcode(), IrOpcode::kThrow);
- }
- return OpIndex::Invalid();
case IrOpcode::kThrow:
- assembler.Unreachable();
+ __ Unreachable();
return OpIndex::Invalid();
case IrOpcode::kProjection: {
Node* input = node->InputAt(0);
size_t index = ProjectionIndexOf(op);
- return assembler.Projection(Map(input), index);
+ RegisterRepresentation rep =
+ RegisterRepresentation::FromMachineRepresentation(
+ NodeProperties::GetProjectionType(node));
+ return __ Projection(Map(input), index, rep);
}
+ case IrOpcode::kStaticAssert: {
+ // We currently ignore StaticAsserts in turboshaft (because some of them
+ // need specific unported optimizations to be evaluated).
+ // TODO(turboshaft): once CommonOperatorReducer and MachineOperatorReducer
+ // have been ported, re-enable StaticAsserts.
+ // return __ ReduceStaticAssert(Map(node->InputAt(0)),
+ // StaticAssertSourceOf(node->op()));
+ return OpIndex::Invalid();
+ }
+
+ case IrOpcode::kAllocate: {
+ AllocationType allocation = AllocationTypeOf(node->op());
+ return __ Allocate(Map(node->InputAt(0)), allocation,
+ AllowLargeObjects::kFalse);
+ }
+ // TODO(nicohartmann@): We might not see AllocateRaw here anymore.
+ case IrOpcode::kAllocateRaw: {
+ Node* size = node->InputAt(0);
+ const AllocateParameters& params = AllocateParametersOf(node->op());
+ return __ Allocate(Map(size), params.allocation_type(),
+ params.allow_large_objects());
+ }
+ case IrOpcode::kStoreToObject: {
+ Node* object = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ __ Store(Map(object), Map(offset), Map(value),
+ StoreOp::Kind::TaggedBase(),
+ MemoryRepresentation::FromMachineType(access.machine_type),
+ access.write_barrier_kind, kHeapObjectTag);
+ return OpIndex::Invalid();
+ }
+ case IrOpcode::kStoreElement: {
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ElementAccess const& access = ElementAccessOf(node->op());
+ DCHECK(!access.machine_type.IsMapWord());
+ StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(access.machine_type);
+ __ Store(Map(object), Map(index), Map(value), kind, rep,
+ access.write_barrier_kind, access.header_size,
+ rep.SizeInBytesLog2());
+ return OpIndex::Invalid();
+ }
+ case IrOpcode::kStoreField: {
+ OpIndex object = Map(node->InputAt(0));
+ OpIndex value = Map(node->InputAt(1));
+ FieldAccess const& access = FieldAccessOf(node->op());
+ // External pointer must never be stored by optimized code.
+ DCHECK(!access.type.Is(compiler::Type::ExternalPointer()) ||
+ !V8_ENABLE_SANDBOX_BOOL);
+ // SandboxedPointers are not currently stored by optimized code.
+ DCHECK(!access.type.Is(compiler::Type::SandboxedPointer()));
+
+#ifdef V8_ENABLE_SANDBOX
+ if (access.is_bounded_size_access) {
+ value = __ ShiftLeft(value, kBoundedSizeShift,
+ WordRepresentation::PointerSized());
+ }
+#endif // V8_ENABLE_SANDBOX
+
+ StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
+ MachineType machine_type = access.machine_type;
+ if (machine_type.IsMapWord()) {
+ machine_type = MachineType::TaggedPointer();
+#ifdef V8_MAP_PACKING
+ UNIMPLEMENTED();
+#endif
+ }
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(machine_type);
+ __ Store(object, value, kind, rep, access.write_barrier_kind,
+ access.offset);
+ return OpIndex::Invalid();
+ }
+ case IrOpcode::kLoadFromObject:
+ case IrOpcode::kLoadImmutableFromObject: {
+ Node* object = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(access.machine_type);
+ return __ Load(Map(object), Map(offset), LoadOp::Kind::TaggedBase(), rep,
+ kHeapObjectTag);
+ }
+ case IrOpcode::kLoadField: {
+ Node* object = node->InputAt(0);
+ FieldAccess const& access = FieldAccessOf(node->op());
+ StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
+ MachineType machine_type = access.machine_type;
+ if (machine_type.IsMapWord()) {
+ machine_type = MachineType::TaggedPointer();
+#ifdef V8_MAP_PACKING
+ UNIMPLEMENTED();
+#endif
+ }
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(machine_type);
+#ifdef V8_ENABLE_SANDBOX
+ bool is_sandboxed_external =
+ access.type.Is(compiler::Type::ExternalPointer());
+ if (is_sandboxed_external) {
+ // Fields for sandboxed external pointer contain a 32-bit handle, not a
+ // 64-bit raw pointer.
+ rep = MemoryRepresentation::Uint32();
+ }
+#endif // V8_ENABLE_SANDBOX
+ OpIndex value = __ Load(Map(object), kind, rep, access.offset);
+#ifdef V8_ENABLE_SANDBOX
+ if (is_sandboxed_external) {
+ value = __ DecodeExternalPointer(value, access.external_pointer_tag);
+ }
+ if (access.is_bounded_size_access) {
+ DCHECK(!is_sandboxed_external);
+ value = __ ShiftRightLogical(value, kBoundedSizeShift,
+ WordRepresentation::PointerSized());
+ }
+#endif // V8_ENABLE_SANDBOX
+ return value;
+ }
+ case IrOpcode::kLoadElement: {
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ElementAccess const& access = ElementAccessOf(node->op());
+ LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
+ MemoryRepresentation rep =
+ MemoryRepresentation::FromMachineType(access.machine_type);
+ return __ Load(Map(object), Map(index), kind, rep, access.header_size,
+ rep.SizeInBytesLog2());
+ }
+ case IrOpcode::kCheckTurboshaftTypeOf: {
+ Node* input = node->InputAt(0);
+ Node* type_description = node->InputAt(1);
+
+ HeapObjectMatcher m(type_description);
+ CHECK(m.HasResolvedValue() && m.Ref(broker).IsString() &&
+ m.Ref(broker).AsString().IsContentAccessible());
+ StringRef type_string = m.Ref(broker).AsString();
+ Handle<String> pattern_string =
+ *type_string.ObjectIfContentAccessible(broker);
+ std::unique_ptr<char[]> pattern = pattern_string->ToCString();
+
+ auto type_opt =
+ Type::ParseFromString(std::string_view{pattern.get()}, graph_zone);
+ if (type_opt == base::nullopt) {
+ FATAL(
+ "String '%s' (of %d:CheckTurboshaftTypeOf) is not a valid type "
+ "description!",
+ pattern.get(), node->id());
+ }
+
+ OpIndex input_index = Map(input);
+ RegisterRepresentation rep =
+ __ output_graph().Get(input_index).outputs_rep()[0];
+ return __ CheckTurboshaftTypeOf(input_index, rep, *type_opt, false);
+ }
+
+ case IrOpcode::kNewConsString:
+ return __ NewConsString(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ Map(node->InputAt(2)));
+ case IrOpcode::kNewDoubleElements:
+ return __ NewArray(Map(node->InputAt(0)), NewArrayOp::Kind::kDouble,
+ AllocationTypeOf(node->op()));
+ case IrOpcode::kNewSmiOrObjectElements:
+ return __ NewArray(Map(node->InputAt(0)), NewArrayOp::Kind::kObject,
+ AllocationTypeOf(node->op()));
+
+ case IrOpcode::kDoubleArrayMin:
+ return __ DoubleArrayMinMax(Map(node->InputAt(0)),
+ DoubleArrayMinMaxOp::Kind::kMin);
+ case IrOpcode::kDoubleArrayMax:
+ return __ DoubleArrayMinMax(Map(node->InputAt(0)),
+ DoubleArrayMinMaxOp::Kind::kMax);
+
+ case IrOpcode::kLoadFieldByIndex:
+ return __ LoadFieldByIndex(Map(node->InputAt(0)), Map(node->InputAt(1)));
+
+ case IrOpcode::kCheckedInt64Add:
+ case IrOpcode::kCheckedInt64Sub:
+ DCHECK(Is64());
+ [[fallthrough]];
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub: {
+ DCHECK(dominating_frame_state.valid());
+ auto kind = (opcode == IrOpcode::kCheckedInt32Add ||
+ opcode == IrOpcode::kCheckedInt64Add)
+ ? OverflowCheckedBinopOp::Kind::kSignedAdd
+ : OverflowCheckedBinopOp::Kind::kSignedSub;
+ auto rep = (opcode == IrOpcode::kCheckedInt32Add ||
+ opcode == IrOpcode::kCheckedInt32Sub)
+ ? WordRepresentation::Word32()
+ : WordRepresentation::Word64();
+
+ OpIndex result = __ OverflowCheckedBinop(
+ Map(node->InputAt(0)), Map(node->InputAt(1)), kind, rep);
+
+ V<Word32> overflow =
+ __ Projection(result, 1, RegisterRepresentation::Word32());
+ __ DeoptimizeIf(overflow, dominating_frame_state,
+ DeoptimizeReason::kOverflow, FeedbackSource{});
+ return __ Projection(result, 0, rep);
+ }
+
+ case IrOpcode::kCheckedInt32Mul: {
+ DCHECK(dominating_frame_state.valid());
+ V<Word32> lhs = Map(node->InputAt(0));
+ V<Word32> rhs = Map(node->InputAt(1));
+
+ CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ OpIndex result = __ Int32MulCheckOverflow(lhs, rhs);
+ V<Word32> overflow =
+ __ Projection(result, 1, RegisterRepresentation::Word32());
+ __ DeoptimizeIf(overflow, dominating_frame_state,
+ DeoptimizeReason::kOverflow, FeedbackSource{});
+ V<Word32> value =
+ __ Projection(result, 0, RegisterRepresentation::Word32());
+
+ if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ IF(__ Word32Equal(value, 0)) {
+ __ DeoptimizeIf(__ Int32LessThan(__ Word32BitwiseOr(lhs, rhs), 0),
+ dominating_frame_state, DeoptimizeReason::kMinusZero,
+ FeedbackSource{});
+ }
+ END_IF
+ }
+
+ return value;
+ }
+
+ case IrOpcode::kCheckedInt64Mul: {
+ DCHECK(Is64());
+ DCHECK(dominating_frame_state.valid());
+ OpIndex result = __ Int64MulCheckOverflow(Map(node->InputAt(0)),
+ Map(node->InputAt(1)));
+
+ V<Word32> overflow =
+ __ Projection(result, 1, RegisterRepresentation::Word32());
+ __ DeoptimizeIf(overflow, dominating_frame_state,
+ DeoptimizeReason::kOverflow, FeedbackSource{});
+ return __ Projection(result, 0, RegisterRepresentation::Word64());
+ }
+
+ case IrOpcode::kCheckedInt32Div: {
+ DCHECK(dominating_frame_state.valid());
+ V<Word32> lhs = Map(node->InputAt(0));
+ V<Word32> rhs = Map(node->InputAt(1));
+
+ // Check if the {rhs} is a known power of two.
+ Int32Matcher m(node->InputAt(1));
+ if (m.IsPowerOf2()) {
+ // Since we know that {rhs} is a power of two, we can perform a fast
+ // check to see if the relevant least significant bits of the {lhs}
+ // are all zero, and if so we know that we can perform a division
+ // safely (and fast by doing an arithmetic - aka sign preserving -
+ // right shift on {lhs}).
+ int32_t divisor = m.ResolvedValue();
+ V<Word32> check =
+ __ Word32Equal(__ Word32BitwiseAnd(lhs, divisor - 1), 0);
+ __ DeoptimizeIfNot(check, dominating_frame_state,
+ DeoptimizeReason::kLostPrecision, FeedbackSource{});
+ return __ Word32ShiftRightArithmeticShiftOutZeros(
+ lhs, base::bits::WhichPowerOfTwo(divisor));
+ } else {
+ Label<Word32> done(this);
+
+ // Check if {rhs} is positive (and not zero).
+ IF(__ Int32LessThan(0, rhs)) { GOTO(done, __ Int32Div(lhs, rhs)); }
+ ELSE {
+ // Check if {rhs} is zero.
+ __ DeoptimizeIf(__ Word32Equal(rhs, 0), dominating_frame_state,
+ DeoptimizeReason::kDivisionByZero, FeedbackSource{});
+
+ // Check if {lhs} is zero, as that would produce minus zero.
+ __ DeoptimizeIf(__ Word32Equal(lhs, 0), dominating_frame_state,
+ DeoptimizeReason::kMinusZero, FeedbackSource{});
+
+ // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
+ // to return -kMinInt, which is not representable as Word32.
+ IF_UNLIKELY(__ Word32Equal(lhs, kMinInt)) {
+ __ DeoptimizeIf(__ Word32Equal(rhs, -1), dominating_frame_state,
+ DeoptimizeReason::kOverflow, FeedbackSource{});
+ }
+ END_IF
+
+ GOTO(done, __ Int32Div(lhs, rhs));
+ }
+ END_IF
+
+ BIND(done, value);
+ V<Word32> lossless = __ Word32Equal(lhs, __ Word32Mul(value, rhs));
+ __ DeoptimizeIfNot(lossless, dominating_frame_state,
+ DeoptimizeReason::kLostPrecision, FeedbackSource{});
+ return value;
+ }
+ }
+
+ case IrOpcode::kCheckedInt64Div: {
+ DCHECK(Is64());
+ DCHECK(dominating_frame_state.valid());
+ V<Word64> lhs = Map(node->InputAt(0));
+ V<Word64> rhs = Map(node->InputAt(1));
+
+ __ DeoptimizeIf(__ Word64Equal(rhs, 0), dominating_frame_state,
+ DeoptimizeReason::kDivisionByZero, FeedbackSource{});
+ // Check if {lhs} is kMinInt64 and {rhs} is -1, in which case we'd have
+ // to return -kMinInt64, which is not representable as Word64.
+ IF_UNLIKELY(__ Word64Equal(lhs, std::numeric_limits<int64_t>::min())) {
+ __ DeoptimizeIf(__ Word64Equal(rhs, int64_t{-1}),
+ dominating_frame_state, DeoptimizeReason::kOverflow,
+ FeedbackSource{});
+ }
+ END_IF
+
+ return __ Int64Div(lhs, rhs);
+ }
+
+ case IrOpcode::kCheckedUint32Div: {
+ DCHECK(dominating_frame_state.valid());
+ V<Word32> lhs = Map(node->InputAt(0));
+ V<Word32> rhs = Map(node->InputAt(1));
+
+ // Check if the {rhs} is a known power of two.
+ Uint32Matcher m(node->InputAt(1));
+ if (m.IsPowerOf2()) {
+ // Since we know that {rhs} is a power of two, we can perform a fast
+ // check to see if the relevant least significant bits of the {lhs}
+ // are all zero, and if so we know that we can perform a division
+ // safely (and fast by doing a logical - aka zero extending - right
+ // shift on {lhs}).
+ uint32_t divisor = m.ResolvedValue();
+ V<Word32> check =
+ __ Word32Equal(__ Word32BitwiseAnd(lhs, divisor - 1), 0);
+ __ DeoptimizeIfNot(check, dominating_frame_state,
+ DeoptimizeReason::kLostPrecision, FeedbackSource{});
+ return __ Word32ShiftRightLogical(lhs,
+ base::bits::WhichPowerOfTwo(divisor));
+ } else {
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ __ DeoptimizeIf(__ Word32Equal(rhs, 0), dominating_frame_state,
+ DeoptimizeReason::kDivisionByZero, FeedbackSource{});
+
+ // Perform the actual unsigned integer division.
+ V<Word32> value = __ Uint32Div(lhs, rhs);
+
+ // Check if the remainder is non-zero.
+ V<Word32> lossless = __ Word32Equal(lhs, __ Word32Mul(rhs, value));
+ __ DeoptimizeIfNot(lossless, dominating_frame_state,
+ DeoptimizeReason::kLostPrecision, FeedbackSource{});
+ return value;
+ }
+ }
+
+ case IrOpcode::kCheckedInt32Mod: {
+ DCHECK(dominating_frame_state.valid());
+ // General case for signed integer modulus, with optimization for
+ // (unknown) power of 2 right hand side.
+ //
+ // if rhs <= 0 then
+ // rhs = -rhs
+ // deopt if rhs == 0
+ // if lhs < 0 then
+ // let lhs_abs = -lhs in
+ // let res = lhs_abs % rhs in
+ // deopt if res == 0
+ // -res
+ // else
+ // let msk = rhs - 1 in
+ // if rhs & msk == 0 then
+ // lhs & msk
+ // else
+ // lhs % rhs
+ //
+ V<Word32> lhs = Map(node->InputAt(0));
+ V<Word32> rhs = Map(node->InputAt(1));
+
+ Label<Word32> rhs_checked(this);
+ Label<Word32> done(this);
+
+ // Check if {rhs} is not strictly positive.
+ IF(__ Int32LessThanOrEqual(rhs, 0)) {
+ // Negate {rhs}, might still produce a negative result in case of
+ // -2^31, but that is handled safely below.
+ V<Word32> temp = __ Word32Sub(0, rhs);
+
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ __ DeoptimizeIfNot(temp, dominating_frame_state,
+ DeoptimizeReason::kDivisionByZero, FeedbackSource{});
+ GOTO(rhs_checked, temp);
+ }
+ ELSE { GOTO(rhs_checked, rhs); }
+ END_IF
+
+ BIND(rhs_checked, rhs_value);
+
+ IF(__ Int32LessThan(lhs, 0)) {
+ // The {lhs} is a negative integer. This is very unlikely and
+ // we intentionally don't use the BuildUint32Mod() here, which
+ // would try to figure out whether {rhs} is a power of two,
+ // since this is intended to be a slow-path.
+ V<Word32> temp = __ Uint32Mod(__ Word32Sub(0, lhs), rhs_value);
+
+ // Check if we would have to return -0.
+ __ DeoptimizeIf(__ Word32Equal(temp, 0), dominating_frame_state,
+ DeoptimizeReason::kMinusZero, FeedbackSource{});
+ GOTO(done, __ Word32Sub(0, temp));
+ }
+ ELSE {
+ // The {lhs} is a non-negative integer.
+ GOTO(done, BuildUint32Mod(lhs, rhs_value));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+
+ case IrOpcode::kCheckedInt64Mod: {
+ DCHECK(Is64());
+ DCHECK(dominating_frame_state.valid());
+ V<Word64> lhs = Map(node->InputAt(0));
+ V<Word64> rhs = Map(node->InputAt(1));
+
+ __ DeoptimizeIf(__ Word64Equal(rhs, 0), dominating_frame_state,
+ DeoptimizeReason::kDivisionByZero, FeedbackSource{});
+
+ // While the mod-result cannot overflow, the underlying instruction is
+ // `idiv` and will trap when the accompanying div-result overflows.
+ IF_UNLIKELY(__ Word64Equal(lhs, std::numeric_limits<int64_t>::min())) {
+ __ DeoptimizeIf(__ Word64Equal(rhs, int64_t{-1}),
+ dominating_frame_state, DeoptimizeReason::kOverflow,
+ FeedbackSource{});
+ }
+ END_IF
+
+ return __ Int64Mod(lhs, rhs);
+ }
+
+ case IrOpcode::kCheckedUint32Mod: {
+ DCHECK(dominating_frame_state.valid());
+ V<Word32> lhs = Map(node->InputAt(0));
+ V<Word32> rhs = Map(node->InputAt(1));
+
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ __ DeoptimizeIf(__ Word32Equal(rhs, 0), dominating_frame_state,
+ DeoptimizeReason::kDivisionByZero, FeedbackSource{});
+
+ // Perform the actual unsigned integer modulus.
+ return BuildUint32Mod(lhs, rhs);
+ }
+
+#define BIGINT_BINOP_CASE(op, kind) \
+ case IrOpcode::kBigInt##op: \
+ DCHECK(dominating_frame_state.valid()); \
+ return __ BigIntBinop(Map(node->InputAt(0)), Map(node->InputAt(1)), \
+ dominating_frame_state, \
+ BigIntBinopOp::Kind::k##kind);
+ BIGINT_BINOP_CASE(Add, Add)
+ BIGINT_BINOP_CASE(Subtract, Sub)
+ BIGINT_BINOP_CASE(Multiply, Mul)
+ BIGINT_BINOP_CASE(Divide, Div)
+ BIGINT_BINOP_CASE(Modulus, Mod)
+ BIGINT_BINOP_CASE(BitwiseAnd, BitwiseAnd)
+ BIGINT_BINOP_CASE(BitwiseOr, BitwiseOr)
+ BIGINT_BINOP_CASE(BitwiseXor, BitwiseXor)
+ BIGINT_BINOP_CASE(ShiftLeft, ShiftLeft)
+ BIGINT_BINOP_CASE(ShiftRight, ShiftRightArithmetic)
+#undef BIGINT_BINOP_CASE
+
+ case IrOpcode::kBigIntEqual:
+ return __ BigIntEqual(Map(node->InputAt(0)), Map(node->InputAt(1)));
+
+ case IrOpcode::kBigIntLessThan:
+ return __ BigIntLessThan(Map(node->InputAt(0)), Map(node->InputAt(1)));
+ case IrOpcode::kBigIntLessThanOrEqual:
+ return __ BigIntLessThanOrEqual(Map(node->InputAt(0)),
+ Map(node->InputAt(1)));
+
+ case IrOpcode::kBigIntNegate:
+ return __ BigIntNegate(Map(node->InputAt(0)));
+
+ case IrOpcode::kLoadRootRegister:
+ // Inlined usage of wasm root register operation in JS.
+ return assembler.ReduceLoadRootRegister();
+
+ case IrOpcode::kStringCharCodeAt:
+ return __ StringCharCodeAt(Map(node->InputAt(0)), Map(node->InputAt(1)));
+ case IrOpcode::kStringCodePointAt:
+ return __ StringCodePointAt(Map(node->InputAt(0)), Map(node->InputAt(1)));
+
+#ifdef V8_INTL_SUPPORT
+ case IrOpcode::kStringToLowerCaseIntl:
+ return __ StringToLowerCaseIntl(Map(node->InputAt(0)));
+ case IrOpcode::kStringToUpperCaseIntl:
+ return __ StringToUpperCaseIntl(Map(node->InputAt(0)));
+#else
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl:
+ UNREACHABLE();
+#endif // V8_INTL_SUPPORT
+
+ case IrOpcode::kStringLength:
+ return __ StringLength(Map(node->InputAt(0)));
+
+ case IrOpcode::kStringIndexOf:
+ return __ StringIndexOf(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ Map(node->InputAt(2)));
+
+ case IrOpcode::kStringFromCodePointAt:
+ return __ StringFromCodePointAt(Map(node->InputAt(0)),
+ Map(node->InputAt(1)));
+
+ case IrOpcode::kStringSubstring:
+ return __ StringSubstring(Map(node->InputAt(0)), Map(node->InputAt(1)),
+ Map(node->InputAt(2)));
+
+ case IrOpcode::kStringEqual:
+ return __ StringEqual(Map(node->InputAt(0)), Map(node->InputAt(1)));
+ case IrOpcode::kStringLessThan:
+ return __ StringLessThan(Map(node->InputAt(0)), Map(node->InputAt(1)));
+ case IrOpcode::kStringLessThanOrEqual:
+ return __ StringLessThanOrEqual(Map(node->InputAt(0)),
+ Map(node->InputAt(1)));
+
+ case IrOpcode::kBeginRegion:
+ return OpIndex::Invalid();
+ case IrOpcode::kFinishRegion:
+ return Map(node->InputAt(0));
+
default:
std::cerr << "unsupported node type: " << *node->op() << "\n";
node->Print(std::cerr);
@@ -815,15 +1695,25 @@ OpIndex GraphBuilder::Process(
} // namespace
-base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
- Zone* phase_zone, Graph* graph,
+base::Optional<BailoutReason> BuildGraph(JSHeapBroker* broker,
+ Schedule* schedule, Isolate* isolate,
+ Zone* graph_zone, Zone* phase_zone,
+ Graph* graph, Linkage* linkage,
SourcePositionTable* source_positions,
NodeOriginTable* origins) {
- GraphBuilder builder{
- graph_zone, phase_zone,
- *schedule, Assembler<>(*graph, *graph, phase_zone),
- source_positions, origins};
+ GraphBuilder builder{isolate,
+ broker,
+ graph_zone,
+ phase_zone,
+ *schedule,
+ Assembler<reducer_list<>>(*graph, *graph, phase_zone,
+ nullptr, std::tuple<>{}),
+ linkage,
+ source_positions,
+ origins};
return builder.Run();
}
+#include "src/compiler/turboshaft/undef-assembler-macros.inc"
+
} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/graph-builder.h b/deps/v8/src/compiler/turboshaft/graph-builder.h
index 520aaf168b..86cc857291 100644
--- a/deps/v8/src/compiler/turboshaft/graph-builder.h
+++ b/deps/v8/src/compiler/turboshaft/graph-builder.h
@@ -14,8 +14,10 @@ class Schedule;
class SourcePositionTable;
}
namespace v8::internal::compiler::turboshaft {
-base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
- Zone* phase_zone, Graph* graph,
+base::Optional<BailoutReason> BuildGraph(JSHeapBroker* broker,
+ Schedule* schedule, Isolate* isolate,
+ Zone* graph_zone, Zone* phase_zone,
+ Graph* graph, Linkage* linkage,
SourcePositionTable* source_positions,
NodeOriginTable* origins);
}
diff --git a/deps/v8/src/compiler/turboshaft/graph-visualizer.cc b/deps/v8/src/compiler/turboshaft/graph-visualizer.cc
index 8c142fbadb..edbbdfe601 100644
--- a/deps/v8/src/compiler/turboshaft/graph-visualizer.cc
+++ b/deps/v8/src/compiler/turboshaft/graph-visualizer.cc
@@ -85,7 +85,6 @@ void JSONTurboshaftGraphWriter::PrintBlocks() {
first_block = false;
os_ << "{\"id\":" << block.index().id() << ",";
os_ << "\"type\":\"" << block.kind() << "\",";
- os_ << "\"deferred\":" << std::boolalpha << block.IsDeferred() << ",";
os_ << "\"predecessors\":[";
bool first_predecessor = true;
for (const Block* pred : block.Predecessors()) {
@@ -125,4 +124,27 @@ void PrintTurboshaftCustomDataPerOperation(
json_of << "]},\n";
}
+void PrintTurboshaftCustomDataPerBlock(
+ OptimizedCompilationInfo* info, const char* data_name, const Graph& graph,
+ std::function<bool(std::ostream&, const Graph&, BlockIndex)> printer) {
+ DCHECK(printer);
+
+ TurboJsonFile json_of(info, std::ios_base::app);
+ json_of << "{\"name\":\"" << data_name
+ << "\", \"type\":\"turboshaft_custom_data\", "
+ "\"data_target\":\"blocks\", \"data\":[";
+ bool first = true;
+ for (const Block& block : graph.blocks()) {
+ std::stringstream stream;
+ BlockIndex index = block.index();
+ if (printer(stream, graph, index)) {
+ json_of << (first ? "\n" : ",\n") << "{\"key\":" << index.id()
+ << ", \"value\":\"" << stream.str() << "\"}";
+ first = false;
+ }
+ }
+
+ json_of << "]},\n";
+}
+
} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/graph-visualizer.h b/deps/v8/src/compiler/turboshaft/graph-visualizer.h
index 2e384e0fcb..6c2188469e 100644
--- a/deps/v8/src/compiler/turboshaft/graph-visualizer.h
+++ b/deps/v8/src/compiler/turboshaft/graph-visualizer.h
@@ -51,6 +51,9 @@ class JSONTurboshaftGraphWriter {
void PrintTurboshaftCustomDataPerOperation(
OptimizedCompilationInfo* info, const char* data_name, const Graph& graph,
std::function<bool(std::ostream&, const Graph&, OpIndex)> printer);
+void PrintTurboshaftCustomDataPerBlock(
+ OptimizedCompilationInfo* info, const char* data_name, const Graph& graph,
+ std::function<bool(std::ostream&, const Graph&, BlockIndex)> printer);
} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/graph.cc b/deps/v8/src/compiler/turboshaft/graph.cc
index c1744dd68f..aae0becb58 100644
--- a/deps/v8/src/compiler/turboshaft/graph.cc
+++ b/deps/v8/src/compiler/turboshaft/graph.cc
@@ -16,13 +16,14 @@ namespace v8::internal::compiler::turboshaft {
// 0
// ╠ 1
// ╠ 2
-// ╚ 3
-// ╠ 4
-// ║ ╠ 5
-// ║ ╚ 6
-// ╚ 7
-// ╠ 8
-// ╚ 16
+// ╠ 3
+// ║ ╠ 4
+// ║ ║ ╠ 5
+// ║ ║ ╚ 6
+// ║ ╚ 7
+// ║ ╠ 8
+// ║ ╚ 16
+// ╚ 17
//
// Where the numbers are the IDs of the Blocks.
// Doing so is mostly straight forward, with the subtelty that we need to know
@@ -37,7 +38,7 @@ void Block::PrintDominatorTree(std::vector<const char*> tree_symbols,
// Printing the current node.
if (tree_symbols.empty()) {
// This node is the root of the tree.
- PrintF("%d\n", index().id());
+ PrintF("B%d\n", index().id());
tree_symbols.push_back("");
} else {
// This node is not the root of the tree; we start by printing the
@@ -45,7 +46,7 @@ void Block::PrintDominatorTree(std::vector<const char*> tree_symbols,
for (const char* s : tree_symbols) PrintF("%s", s);
// Then, we print the node id, preceeded by a ╠ or ╚ connector.
const char* tree_connector_symbol = has_next ? "╠" : "╚";
- PrintF("%s %d\n", tree_connector_symbol, index().id());
+ PrintF("%s B%d\n", tree_connector_symbol, index().id());
// And we add to the stack a connector to continue this path (if needed)
// while printing the current node's children.
const char* tree_cont_symbol = has_next ? "║ " : " ";
@@ -62,8 +63,7 @@ void Block::PrintDominatorTree(std::vector<const char*> tree_symbols,
std::ostream& operator<<(std::ostream& os, PrintAsBlockHeader block_header) {
const Block& block = block_header.block;
- os << "\n" << block.kind() << " " << block.index();
- if (block.IsDeferred()) os << " (deferred)";
+ os << block.kind() << " " << block_header.block_id;
if (!block.Predecessors().empty()) {
os << " <- ";
bool first = true;
@@ -78,7 +78,7 @@ std::ostream& operator<<(std::ostream& os, PrintAsBlockHeader block_header) {
std::ostream& operator<<(std::ostream& os, const Graph& graph) {
for (const Block& block : graph.blocks()) {
- os << PrintAsBlockHeader{block} << "\n";
+ os << "\n" << PrintAsBlockHeader{block} << "\n";
for (const Operation& op : graph.operations(block)) {
os << std::setw(5) << graph.Index(op).id() << ": " << op << "\n";
}
diff --git a/deps/v8/src/compiler/turboshaft/graph.h b/deps/v8/src/compiler/turboshaft/graph.h
index 4704646324..8f77f08ebe 100644
--- a/deps/v8/src/compiler/turboshaft/graph.h
+++ b/deps/v8/src/compiler/turboshaft/graph.h
@@ -9,19 +9,22 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <tuple>
#include <type_traits>
#include "src/base/iterator.h"
+#include "src/base/logging.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/sidetable.h"
+#include "src/compiler/turboshaft/types.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
-template <template <class> class... Reducers>
+template <class Reducers>
class Assembler;
// `OperationBuffer` is a growable, Zone-allocated buffer to store Turboshaft
@@ -68,6 +71,7 @@ class OperationBuffer {
};
explicit OperationBuffer(Zone* zone, size_t initial_capacity) : zone_(zone) {
+ DCHECK_NE(initial_capacity, 0);
begin_ = end_ = zone_->NewArray<OperationStorageSlot>(initial_capacity);
operation_sizes_ =
zone_->NewArray<uint16_t>((initial_capacity + 1) / kSlotsPerId);
@@ -274,28 +278,18 @@ class Block : public RandomAccessStackDominatorNode<Block> {
bool IsBranchTarget() const { return kind_ == Kind::kBranchTarget; }
bool IsHandler() const { return false; }
bool IsSwitchCase() const { return false; }
+
Kind kind() const { return kind_; }
void SetKind(Kind kind) { kind_ = kind; }
BlockIndex index() const { return index_; }
- bool IsDeferred() const { return deferred_; }
- void SetDeferred(bool deferred) { deferred_ = deferred; }
-
bool Contains(OpIndex op_idx) const {
return begin_ <= op_idx && op_idx < end_;
}
bool IsBound() const { return index_ != BlockIndex::Invalid(); }
- void AddPredecessor(Block* predecessor) {
- DCHECK(!IsBound() ||
- (Predecessors().size() == 1 && kind_ == Kind::kLoopHeader));
- DCHECK_EQ(predecessor->neighboring_predecessor_, nullptr);
- predecessor->neighboring_predecessor_ = last_predecessor_;
- last_predecessor_ = predecessor;
- }
-
base::SmallVector<Block*, 8> Predecessors() const {
base::SmallVector<Block*, 8> result;
for (Block* pred = last_predecessor_; pred != nullptr;
@@ -306,6 +300,9 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return result;
}
+ // TODO(dmercadier): we should store predecessor count in the Blocks directly
+ // (or in the Graph, or in the Assembler), to avoid this O(n) PredecessorCount
+ // method.
int PredecessorCount() const {
int count = 0;
for (Block* pred = last_predecessor_; pred != nullptr;
@@ -315,18 +312,79 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return count;
}
+ // Returns the index of {target} in the predecessors of the current Block.
+ // If {target} is not a direct predecessor, returns -1.
+ int GetPredecessorIndex(const Block* target) const {
+ int pred_count = 0;
+ int pred_reverse_index = -1;
+ for (Block* pred = last_predecessor_; pred != nullptr;
+ pred = pred->neighboring_predecessor_) {
+ if (pred == target) {
+ DCHECK_EQ(pred_reverse_index, -1);
+ pred_reverse_index = pred_count;
+ }
+ pred_count++;
+ }
+ if (pred_reverse_index == -1) {
+ return -1;
+ }
+ return pred_count - pred_reverse_index - 1;
+ }
+
+ // HasExactlyNPredecessors(n) returns the same result as
+ // `PredecessorCount() == n`, but stops early and iterates at most the first
+ // {n} predecessors.
+ bool HasExactlyNPredecessors(unsigned int n) const {
+ Block* current_pred = last_predecessor_;
+ while (current_pred != nullptr && n != 0) {
+ current_pred = current_pred->neighboring_predecessor_;
+ n--;
+ }
+ return n == 0 && current_pred == nullptr;
+ }
+
Block* LastPredecessor() const { return last_predecessor_; }
Block* NeighboringPredecessor() const { return neighboring_predecessor_; }
bool HasPredecessors() const { return last_predecessor_ != nullptr; }
+ void ResetLastPredecessor() { last_predecessor_ = nullptr; }
- // The block from the previous graph which produced the current block. This is
- // used for translating phi nodes from the previous graph.
+ void SetMappingToNextGraph(Block* next_graph_block) {
+ DCHECK_NULL(next_graph_mapping_);
+ DCHECK_NOT_NULL(next_graph_block);
+ next_graph_mapping_ = next_graph_block;
+ next_graph_block->SetOrigin(this);
+ }
+ Block* MapToNextGraph() const {
+ DCHECK_NOT_NULL(next_graph_mapping_);
+ return next_graph_mapping_;
+ }
+ // The block from the previous graph which produced the current block. This
+ // has to be updated to be the last block that contributed operations to the
+ // current block to ensure that phi nodes are created correctly.git cl
void SetOrigin(const Block* origin) {
- DCHECK_NULL(origin_);
- DCHECK_EQ(origin->graph_generation_ + 1, graph_generation_);
+ DCHECK_IMPLIES(origin != nullptr,
+ origin->graph_generation_ + 1 == graph_generation_);
origin_ = origin;
}
- const Block* Origin() const { return origin_; }
+ // The block from the input graph that is equivalent as a predecessor. It is
+ // only available for bound blocks and it does *not* refer to an equivalent
+ // block as a branch destination.
+ const Block* OriginForBlockEnd() const {
+ DCHECK(IsBound());
+ return origin_;
+ }
+ // The block from the input graph that corresponds to the current block as a
+ // branch destination. Such a block might not exist, and this function uses a
+ // trick to compute such a block in almost all cases, but might rarely fail
+ // and return `nullptr` instead.
+ const Block* OriginForBlockStart() const {
+ // Check that `origin_` is still valid as a block start and was not changed
+ // to a semantically different block when inlining blocks.
+ if (origin_ && origin_->MapToNextGraph() == this) {
+ return origin_;
+ }
+ return nullptr;
+ }
OpIndex begin() const {
DCHECK(begin_.valid());
@@ -337,9 +395,27 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return end_;
}
+ // Might return nullptr if the first operation is invalid.
+ const Operation& FirstOperation(const Graph& graph) const;
+ const Operation& LastOperation(const Graph& graph) const;
+
+ bool EndsWithBranchingOp(const Graph& graph) const {
+ switch (LastOperation(graph).opcode) {
+ case Opcode::kBranch:
+ case Opcode::kSwitch:
+ case Opcode::kCallAndCatchException:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool HasPhis(const Graph& graph) const;
+
// Computes the dominators of the this block, assuming that the dominators of
- // its predecessors are already computed.
- void ComputeDominator();
+ // its predecessors are already computed. Returns the depth of the current
+ // block in the dominator tree.
+ uint32_t ComputeDominator();
void PrintDominatorTree(
std::vector<const char*> tree_symbols = std::vector<const char*>(),
@@ -347,22 +423,45 @@ class Block : public RandomAccessStackDominatorNode<Block> {
explicit Block(Kind kind) : kind_(kind) {}
+ uint32_t& custom_data() { return custom_data_; }
+ const uint32_t& custom_data() const { return custom_data_; }
+
private:
+ // AddPredecessor should never be called directly except from Assembler's
+ // AddPredecessor and SplitEdge methods, which takes care of maintaining
+ // split-edge form.
+ void AddPredecessor(Block* predecessor) {
+ DCHECK(!IsBound() ||
+ (Predecessors().size() == 1 && kind_ == Kind::kLoopHeader));
+ DCHECK_EQ(predecessor->neighboring_predecessor_, nullptr);
+ predecessor->neighboring_predecessor_ = last_predecessor_;
+ last_predecessor_ = predecessor;
+ }
+
friend class Graph;
+ template <class Reducers>
+ friend class Assembler;
Kind kind_;
- bool deferred_ = false;
OpIndex begin_ = OpIndex::Invalid();
OpIndex end_ = OpIndex::Invalid();
BlockIndex index_ = BlockIndex::Invalid();
Block* last_predecessor_ = nullptr;
Block* neighboring_predecessor_ = nullptr;
const Block* origin_ = nullptr;
+ Block* next_graph_mapping_ = nullptr;
+ // The {custom_data_} field can be used by algorithms to temporarily store
+ // block-specific data. This field is not preserved when constructing a new
+ // output graph and algorithms cannot rely on this field being properly reset
+ // after previous uses.
+ uint32_t custom_data_ = 0;
#ifdef DEBUG
size_t graph_generation_ = 0;
#endif
};
+std::ostream& operator<<(std::ostream& os, const Block* b);
+
class Graph {
public:
// A big initial capacity prevents many growing steps. It also makes sense
@@ -373,7 +472,14 @@ class Graph {
all_blocks_(graph_zone),
graph_zone_(graph_zone),
source_positions_(graph_zone),
- operation_origins_(graph_zone) {}
+ operation_origins_(graph_zone),
+ operation_types_(graph_zone)
+#ifdef DEBUG
+ ,
+ block_type_refinement_(graph_zone)
+#endif
+ {
+ }
// Reset the graph to recycle its memory.
void Reset() {
@@ -381,7 +487,12 @@ class Graph {
bound_blocks_.clear();
source_positions_.Reset();
operation_origins_.Reset();
+ operation_types_.Reset();
next_block_ = 0;
+ dominator_tree_depth_ = 0;
+#ifdef DEBUG
+ block_type_refinement_.Reset();
+#endif
}
V8_INLINE const Operation& Get(OpIndex i) const {
@@ -403,6 +514,8 @@ class Graph {
return *ptr;
}
+ void MarkAsUnused(OpIndex i) { Get(i).saturated_use_count = 0; }
+
const Block& StartBlock() const { return Get(BlockIndex(0)); }
Block& Get(BlockIndex i) {
@@ -415,6 +528,19 @@ class Graph {
}
OpIndex Index(const Operation& op) const { return operations_.Index(op); }
+ BlockIndex BlockOf(OpIndex index) const {
+ auto it = std::upper_bound(
+ bound_blocks_.begin(), bound_blocks_.end(), index,
+ [](OpIndex value, const Block* b) { return value < b->begin_; });
+ DCHECK_NE(it, bound_blocks_.begin());
+ --it;
+ return (*it)->index();
+ }
+
+ OpIndex NextIndex(const OpIndex idx) const { return operations_.Next(idx); }
+ OpIndex PreviousIndex(const OpIndex idx) const {
+ return operations_.Previous(idx);
+ }
OperationStorageSlot* Allocate(size_t slot_count) {
return operations_.Allocate(slot_count);
@@ -432,6 +558,16 @@ class Graph {
#endif // DEBUG
Op& op = Op::New(this, args...);
IncrementInputUses(op);
+
+ if (op.Properties().is_required_when_unused) {
+ // Once the graph is built, an operation with a `saturated_use_count` of 0
+ // is guaranteed to be unused and can be removed. Thus, to avoid removing
+ // operations that never have uses (such as Goto or Branch), we set the
+ // `saturated_use_count` of Operations that are `required_when_unused`
+ // to 1.
+ op.saturated_use_count = 1;
+ }
+
DCHECK_EQ(result, Index(op));
#ifdef DEBUG
for (OpIndex input : op.inputs()) {
@@ -458,43 +594,29 @@ class Graph {
IncrementInputUses(*new_op);
}
- V8_INLINE Block* NewBlock(Block::Kind kind) {
- if (V8_UNLIKELY(next_block_ == all_blocks_.size())) {
- constexpr size_t new_block_count = 64;
- base::Vector<Block> blocks =
- graph_zone_->NewVector<Block>(new_block_count, Block(kind));
- for (size_t i = 0; i < new_block_count; ++i) {
- all_blocks_.push_back(&blocks[i]);
- }
- }
- Block* result = all_blocks_[next_block_++];
- *result = Block(kind);
-#ifdef DEBUG
- result->graph_generation_ = generation_;
-#endif
- return result;
+ V8_INLINE Block* NewLoopHeader() {
+ return NewBlock(Block::Kind::kLoopHeader);
+ }
+ V8_INLINE Block* NewBlock() { return NewBlock(Block::Kind::kMerge); }
+ V8_INLINE Block* NewMappedBlock(Block* origin) {
+ Block* new_block = NewBlock(origin->IsLoop() ? Block::Kind::kLoopHeader
+ : Block::Kind::kMerge);
+ origin->SetMappingToNextGraph(new_block);
+ return new_block;
}
V8_INLINE bool Add(Block* block) {
DCHECK_EQ(block->graph_generation_, generation_);
if (!bound_blocks_.empty() && !block->HasPredecessors()) return false;
- if (!block->IsDeferred()) {
- bool deferred = true;
- for (Block* pred = block->last_predecessor_; pred != nullptr;
- pred = pred->neighboring_predecessor_) {
- if (!pred->IsDeferred()) {
- deferred = false;
- break;
- }
- }
- block->SetDeferred(deferred);
- }
+
DCHECK(!block->begin_.valid());
block->begin_ = next_operation_index();
DCHECK_EQ(block->index_, BlockIndex::Invalid());
- block->index_ = BlockIndex(static_cast<uint32_t>(bound_blocks_.size()));
+ block->index_ = next_block_index();
bound_blocks_.push_back(block);
- block->ComputeDominator();
+ uint32_t depth = block->ComputeDominator();
+ dominator_tree_depth_ = std::max<uint32_t>(dominator_tree_depth_, depth);
+
return true;
}
@@ -517,6 +639,9 @@ class Graph {
}
OpIndex next_operation_index() const { return operations_.EndIndex(); }
+ BlockIndex next_block_index() const {
+ return BlockIndex(static_cast<uint32_t>(bound_blocks_.size()));
+ }
Zone* graph_zone() const { return graph_zone_; }
uint32_t block_count() const {
@@ -648,6 +773,11 @@ class Graph {
bound_blocks_.size())};
}
+ bool IsLoopBackedge(const GotoOp& op) const {
+ DCHECK(op.destination->IsBound());
+ return op.destination->begin() <= Index(op);
+ }
+
bool IsValid(OpIndex i) const { return i < next_operation_index(); }
const GrowingSidetable<SourcePosition>& source_positions() const {
@@ -662,6 +792,24 @@ class Graph {
}
GrowingSidetable<OpIndex>& operation_origins() { return operation_origins_; }
+ uint32_t DominatorTreeDepth() const { return dominator_tree_depth_; }
+ const GrowingSidetable<Type>& operation_types() const {
+ return operation_types_;
+ }
+ GrowingSidetable<Type>& operation_types() { return operation_types_; }
+#ifdef DEBUG
+ // Store refined types per block here for --trace-turbo printing.
+ // TODO(nicohartmann@): Remove this once we have a proper way to print
+ // type information inside the reducers.
+ using TypeRefinements = std::vector<std::pair<OpIndex, Type>>;
+ const GrowingBlockSidetable<TypeRefinements>& block_type_refinement() const {
+ return block_type_refinement_;
+ }
+ GrowingBlockSidetable<TypeRefinements>& block_type_refinement() {
+ return block_type_refinement_;
+ }
+#endif // DEBUG
+
Graph& GetOrCreateCompanion() {
if (!companion_) {
companion_ = std::make_unique<Graph>(graph_zone_, operations_.size());
@@ -683,13 +831,19 @@ class Graph {
std::swap(graph_zone_, companion.graph_zone_);
std::swap(source_positions_, companion.source_positions_);
std::swap(operation_origins_, companion.operation_origins_);
+ std::swap(operation_types_, companion.operation_types_);
#ifdef DEBUG
+ std::swap(block_type_refinement_, companion.block_type_refinement_);
// Update generation index.
DCHECK_EQ(generation_ + 1, companion.generation_);
generation_ = companion.generation_++;
#endif // DEBUG
}
+#ifdef DEBUG
+ size_t generation() const { return generation_; }
+#endif // DEBUG
+
private:
bool InputsValid(const Operation& op) const {
for (OpIndex i : op.inputs()) {
@@ -724,6 +878,23 @@ class Graph {
}
}
+ V8_INLINE Block* NewBlock(Block::Kind kind) {
+ if (V8_UNLIKELY(next_block_ == all_blocks_.size())) {
+ constexpr size_t new_block_count = 64;
+ base::Vector<Block> blocks =
+ graph_zone_->NewVector<Block>(new_block_count, Block(kind));
+ for (size_t i = 0; i < new_block_count; ++i) {
+ all_blocks_.push_back(&blocks[i]);
+ }
+ }
+ Block* result = all_blocks_[next_block_++];
+ *result = Block(kind);
+#ifdef DEBUG
+ result->graph_generation_ = generation_;
+#endif
+ return result;
+ }
+
OperationBuffer operations_;
ZoneVector<Block*> bound_blocks_;
ZoneVector<Block*> all_blocks_;
@@ -731,6 +902,11 @@ class Graph {
Zone* graph_zone_;
GrowingSidetable<SourcePosition> source_positions_;
GrowingSidetable<OpIndex> operation_origins_;
+ uint32_t dominator_tree_depth_ = 0;
+ GrowingSidetable<Type> operation_types_;
+#ifdef DEBUG
+ GrowingBlockSidetable<TypeRefinements> block_type_refinement_;
+#endif
std::unique_ptr<Graph> companion_ = {};
#ifdef DEBUG
@@ -743,14 +919,62 @@ V8_INLINE OperationStorageSlot* AllocateOpStorage(Graph* graph,
return graph->Allocate(slot_count);
}
+V8_INLINE const Operation& Get(const Graph& graph, OpIndex index) {
+ return graph.Get(index);
+}
+
+V8_INLINE const Operation& Block::FirstOperation(const Graph& graph) const {
+ DCHECK_EQ(graph_generation_, graph.generation());
+ DCHECK(begin_.valid());
+ DCHECK(end_.valid());
+ return graph.Get(begin_);
+}
+
+V8_INLINE const Operation& Block::LastOperation(const Graph& graph) const {
+ DCHECK_EQ(graph_generation_, graph.generation());
+ return graph.Get(graph.PreviousIndex(end()));
+}
+
+V8_INLINE bool Block::HasPhis(const Graph& graph) const {
+ DCHECK_EQ(graph_generation_, graph.generation());
+#ifdef DEBUG
+ // Verify that only Phis/FrameStates are found, then all other Phis/
+ // FrameStateOps in the block come consecutively.
+ bool starts_with_phi = false;
+ bool finished_phis = false;
+ for (const auto& op : graph.operations(*this)) {
+ if (op.Is<PhiOp>()) {
+ DCHECK(!finished_phis);
+ starts_with_phi = true;
+ }
+ if (!op.Is<PhiOp>() && !op.Is<FrameStateOp>()) {
+ finished_phis = true;
+ }
+ }
+ return starts_with_phi;
+#else // DEBUG
+ for (const auto& op : graph.operations(*this)) {
+ if (op.Is<PhiOp>()) {
+ return true;
+ } else if (op.Is<FrameStateOp>()) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+ return false;
+#endif // DEBUG
+}
+
struct PrintAsBlockHeader {
const Block& block;
+ BlockIndex block_id = block.index();
};
std::ostream& operator<<(std::ostream& os, PrintAsBlockHeader block);
std::ostream& operator<<(std::ostream& os, const Graph& graph);
std::ostream& operator<<(std::ostream& os, const Block::Kind& kind);
-inline void Block::ComputeDominator() {
+inline uint32_t Block::ComputeDominator() {
if (V8_UNLIKELY(LastPredecessor() == nullptr)) {
// If the block has no predecessors, then it's the start block. We create a
// jmp_ edge to itself, so that the SetDominator algorithm does not need a
@@ -778,6 +1002,7 @@ inline void Block::ComputeDominator() {
DCHECK_NE(jmp_, nullptr);
DCHECK_IMPLIES(nxt_ == nullptr, LastPredecessor() == nullptr);
DCHECK_IMPLIES(len_ == 0, LastPredecessor() == nullptr);
+ return Depth();
}
template <class Derived>
diff --git a/deps/v8/src/compiler/turboshaft/index.h b/deps/v8/src/compiler/turboshaft/index.h
new file mode 100644
index 0000000000..1c73a2cbee
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/index.h
@@ -0,0 +1,358 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_INDEX_H_
+#define V8_COMPILER_TURBOSHAFT_INDEX_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "src/base/logging.h"
+#include "src/codegen/tnode.h"
+#include "src/compiler/turboshaft/fast-hash.h"
+#include "src/compiler/turboshaft/representations.h"
+
+namespace v8::internal::compiler::turboshaft {
+// Operations are stored in possibly muliple sequential storage slots.
+using OperationStorageSlot = std::aligned_storage_t<8, 8>;
+// Operations occupy at least 2 slots, therefore we assign one id per two slots.
+constexpr size_t kSlotsPerId = 2;
+
+// `OpIndex` is an offset from the beginning of the operations buffer.
+// Compared to `Operation*`, it is more memory efficient (32bit) and stable when
+// the operations buffer is re-allocated.
+class OpIndex {
+ public:
+ explicit constexpr OpIndex(uint32_t offset) : offset_(offset) {
+ DCHECK_EQ(offset % sizeof(OperationStorageSlot), 0);
+ }
+ constexpr OpIndex() : offset_(std::numeric_limits<uint32_t>::max()) {}
+
+ uint32_t id() const {
+ // Operations are stored at an offset that's a multiple of
+ // `sizeof(OperationStorageSlot)`. In addition, an operation occupies at
+ // least `kSlotsPerId` many `OperationSlot`s. Therefore, we can assign id's
+ // by dividing by `kSlotsPerId`. A compact id space is important, because it
+ // makes side-tables smaller.
+ DCHECK_EQ(offset_ % sizeof(OperationStorageSlot), 0);
+ return offset_ / sizeof(OperationStorageSlot) / kSlotsPerId;
+ }
+ uint32_t offset() const {
+ DCHECK_EQ(offset_ % sizeof(OperationStorageSlot), 0);
+ return offset_;
+ }
+
+ bool valid() const { return *this != Invalid(); }
+
+ static constexpr OpIndex Invalid() { return OpIndex(); }
+
+ // Encode a sea-of-nodes node id in the `OpIndex` type.
+ // Only used for node origins that actually point to sea-of-nodes graph nodes.
+ static OpIndex EncodeTurbofanNodeId(uint32_t id) {
+ OpIndex result = OpIndex(id * sizeof(OperationStorageSlot));
+ result.offset_ += kTurbofanNodeIdFlag;
+ return result;
+ }
+ uint32_t DecodeTurbofanNodeId() const {
+ DCHECK(IsTurbofanNodeId());
+ return offset_ / sizeof(OperationStorageSlot);
+ }
+ bool IsTurbofanNodeId() const {
+ return offset_ % sizeof(OperationStorageSlot) == kTurbofanNodeIdFlag;
+ }
+
+ bool operator==(OpIndex other) const { return offset_ == other.offset_; }
+ bool operator!=(OpIndex other) const { return offset_ != other.offset_; }
+ bool operator<(OpIndex other) const { return offset_ < other.offset_; }
+ bool operator>(OpIndex other) const { return offset_ > other.offset_; }
+ bool operator<=(OpIndex other) const { return offset_ <= other.offset_; }
+ bool operator>=(OpIndex other) const { return offset_ >= other.offset_; }
+
+ uint32_t offset_;
+
+ static constexpr uint32_t kTurbofanNodeIdFlag = 1;
+};
+
+std::ostream& operator<<(std::ostream& os, OpIndex idx);
+
+// Dummy value for abstract representation classes that don't have a
+// RegisterRepresentation.
+struct nullrep_t {};
+constexpr nullrep_t nullrep;
+
+// Abstract tag classes for V<>.
+struct Any {};
+
+template <size_t Bits>
+struct WordWithBits : public Any {
+ static constexpr int bits = Bits;
+ static_assert(Bits == 32 || Bits == 64);
+};
+
+using Word32 = WordWithBits<32>;
+using Word64 = WordWithBits<64>;
+using WordPtr = std::conditional_t<Is64(), Word64, Word32>;
+
+template <size_t Bits>
+struct FloatWithBits : public Any { // FloatAny {
+ static constexpr int bits = Bits;
+ static_assert(Bits == 32 || Bits == 64);
+};
+
+using Float32 = FloatWithBits<32>;
+using Float64 = FloatWithBits<64>;
+
+// TODO(nicohartmann@): Replace all uses of `V<Tagged>` by `V<Object>`.
+using Tagged = Object;
+
+struct Compressed : public Any {};
+
+// Traits classes `v_traits<T>` to provide additional T-specific information for
+// V<T> and ConstOrV<T>. If you need to provide non-default conversion behavior
+// for a specific type, specialize the corresponding v_traits<>.
+template <typename T, typename = void>
+struct v_traits;
+
+template <>
+struct v_traits<Any> {
+ static constexpr bool is_abstract_tag = true;
+ static constexpr auto rep = nullrep;
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return true;
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_same_v<U, Any>> {};
+};
+
+template <>
+struct v_traits<Compressed> {
+ static constexpr bool is_abstract_tag = true;
+ static constexpr auto rep = nullrep;
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Compressed();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_base_of_v<U, Compressed>> {};
+};
+
+template <>
+struct v_traits<Word32> {
+ static constexpr bool is_abstract_tag = true;
+ static constexpr WordRepresentation rep = WordRepresentation::Word32();
+ using constexpr_type = uint32_t;
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Word32();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_base_of_v<U, Word32>> {};
+};
+
+template <>
+struct v_traits<Word64> {
+ static constexpr bool is_abstract_tag = true;
+ static constexpr WordRepresentation rep = WordRepresentation::Word64();
+ using constexpr_type = uint64_t;
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Word64();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_base_of_v<U, Word64> ||
+ std::is_same_v<U, Word32>> {};
+};
+
+template <>
+struct v_traits<Float32> {
+ static constexpr bool is_abstract_tag = true;
+ static constexpr FloatRepresentation rep = FloatRepresentation::Float32();
+ using constexpr_type = float;
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Float32();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_base_of_v<U, Float32>> {};
+};
+
+template <>
+struct v_traits<Float64> {
+ static constexpr bool is_abstract_tag = true;
+ static constexpr FloatRepresentation rep = FloatRepresentation::Float64();
+ using constexpr_type = double;
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Float64();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_base_of_v<U, Float64>> {};
+};
+
+template <typename T>
+struct v_traits<T, typename std::enable_if_t<std::is_base_of_v<Object, T>>> {
+ static constexpr bool is_abstract_tag = false;
+ static constexpr auto rep = RegisterRepresentation::Tagged();
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Tagged();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<std::is_base_of_v<U, T> || std::is_same_v<U, Any> ||
+ is_subtype<T, U>::value> {};
+};
+
+template <typename T1, typename T2>
+struct v_traits<UnionT<T1, T2>,
+ typename std::enable_if_t<std::is_base_of_v<Object, T1> &&
+ std::is_base_of_v<Object, T2>>> {
+ static constexpr bool is_abstract_tag = false;
+ static constexpr auto rep = RegisterRepresentation::Tagged();
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return rep == RegisterRepresentation::Tagged();
+ }
+
+ template <typename U>
+ struct implicitly_convertible_to
+ : std::bool_constant<
+ (std::is_base_of_v<U, T1> && std::is_base_of_v<U, T2>) ||
+ std::is_same_v<U, Any> || is_subtype<UnionT<T1, T2>, U>::value> {};
+};
+
+// V<> represents an SSA-value that is parameterized with the type of the value.
+// Types from the `Object` hierarchy can be provided as well as the abstract
+// representation classes (`Word32`, ...) defined above.
+// Prefer using V<> instead of a plain OpIndex where possible.
+template <typename T>
+class V : public OpIndex {
+ public:
+ using type = T;
+ static constexpr auto rep = v_traits<type>::rep;
+ constexpr V() : OpIndex() {}
+
+ // V<T> is implicitly constructible from plain OpIndex.
+ template <typename U, typename = std::enable_if_t<std::is_same_v<U, OpIndex>>>
+ V(U index) : OpIndex(index) {} // NOLINT(runtime/explicit)
+
+ // V<T> is implicitly constructible from V<U> iff
+ // `v_traits<U>::implicitly_convertible_to<T>::value`. This is typically the
+ // case if T == U or T is a subclass of U. Different types may specify
+ // different conversion rules in the corresponding `v_traits` when necessary.
+ template <typename U, typename = std::enable_if_t<v_traits<
+ U>::template implicitly_convertible_to<T>::value>>
+ V(V<U> index) : OpIndex(index) {} // NOLINT(runtime/explicit)
+
+ template <typename U>
+ static V<T> Cast(V<U> index) {
+ return V<T>(OpIndex{index});
+ }
+
+ static constexpr bool allows_representation(RegisterRepresentation rep) {
+ return v_traits<T>::allows_representation(rep);
+ }
+};
+
+// ConstOrV<> is a generalization of V<> that allows constexpr values
+// (constants) to be passed implicitly. This allows reducers to write things
+// like
+//
+// __ Word32Add(value, 1)
+//
+// instead of having to write
+//
+// __ Word32Add(value, __ Word32Constant(1))
+//
+// which makes overall code more compact and easier to read. Functions need to
+// call `resolve` on the assembler in order to convert to V<> (which will then
+// construct the corresponding ConstantOp if the given ConstOrV<> holds a
+// constexpr value).
+// NOTICE: `ConstOrV<T>` can only be used if `v_traits<T>` provides a
+// `constexpr_type`.
+template <typename T, typename C = typename v_traits<T>::constexpr_type>
+class ConstOrV {
+ public:
+ using type = T;
+ using constant_type = C;
+
+ ConstOrV(constant_type value) // NOLINT(runtime/explicit)
+ : constant_value_(value), value_() {}
+
+ // ConstOrV<T> is implicitly constructible from plain OpIndex.
+ template <typename U, typename = std::enable_if_t<std::is_same_v<U, OpIndex>>>
+ ConstOrV(U index) // NOLINT(runtime/explicit)
+ : constant_value_(), value_(index) {}
+
+ // ConstOrV<T> is implicitly constructible from V<U> iff V<T> is
+ // constructible from V<U>.
+ template <typename U,
+ typename = std::enable_if_t<std::is_constructible_v<V<T>, V<U>>>>
+ ConstOrV(V<U> index) // NOLINT(runtime/explicit)
+ : constant_value_(), value_(index) {}
+
+ bool is_constant() const { return constant_value_.has_value(); }
+ constant_type constant_value() const {
+ DCHECK(is_constant());
+ return *constant_value_;
+ }
+ V<type> value() const {
+ DCHECK(!is_constant());
+ return value_;
+ }
+
+ private:
+ base::Optional<constant_type> constant_value_;
+ V<type> value_;
+};
+
+template <>
+struct fast_hash<OpIndex> {
+ V8_INLINE size_t operator()(OpIndex op) const { return op.id(); }
+};
+
+V8_INLINE size_t hash_value(OpIndex op) { return base::hash_value(op.id()); }
+
+// `BlockIndex` is the index of a bound block.
+// A dominating block always has a smaller index.
+// It corresponds to the ordering of basic blocks in the operations buffer.
+class BlockIndex {
+ public:
+ explicit constexpr BlockIndex(uint32_t id) : id_(id) {}
+ constexpr BlockIndex() : id_(std::numeric_limits<uint32_t>::max()) {}
+
+ uint32_t id() const { return id_; }
+ bool valid() const { return *this != Invalid(); }
+
+ static constexpr BlockIndex Invalid() { return BlockIndex(); }
+
+ bool operator==(BlockIndex other) const { return id_ == other.id_; }
+ bool operator!=(BlockIndex other) const { return id_ != other.id_; }
+ bool operator<(BlockIndex other) const { return id_ < other.id_; }
+ bool operator>(BlockIndex other) const { return id_ > other.id_; }
+ bool operator<=(BlockIndex other) const { return id_ <= other.id_; }
+ bool operator>=(BlockIndex other) const { return id_ >= other.id_; }
+
+ private:
+ uint32_t id_;
+};
+
+template <>
+struct fast_hash<BlockIndex> {
+ V8_INLINE size_t operator()(BlockIndex op) const { return op.id(); }
+};
+
+V8_INLINE size_t hash_value(BlockIndex op) { return base::hash_value(op.id()); }
+
+std::ostream& operator<<(std::ostream& os, BlockIndex b);
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_INDEX_H_
diff --git a/deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.cc b/deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.cc
new file mode 100644
index 0000000000..234dab18e9
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.cc
@@ -0,0 +1,101 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/late-escape-analysis-reducer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void LateEscapeAnalysisAnalyzer::Run() {
+ CollectUsesAndAllocations();
+ FindRemovableAllocations();
+}
+
+void LateEscapeAnalysisAnalyzer::RecordAllocateUse(OpIndex alloc, OpIndex use) {
+ auto [it, new_entry] = alloc_uses_.try_emplace(alloc, phase_zone_);
+ auto& uses = it->second;
+ if (new_entry) {
+ uses.reserve(graph_.Get(alloc).saturated_use_count);
+ }
+ uses.push_back(use);
+}
+
+// Collects the Allocate Operations and their uses.
+void LateEscapeAnalysisAnalyzer::CollectUsesAndAllocations() {
+ for (auto& op : graph_.AllOperations()) {
+ if (ShouldSkipOperation(op)) continue;
+ OpIndex op_index = graph_.Index(op);
+ for (OpIndex input : op.inputs()) {
+ if (graph_.Get(input).Is<AllocateOp>()) {
+ RecordAllocateUse(input, op_index);
+ }
+ }
+ if (op.Is<AllocateOp>()) {
+ allocs_.push_back(op_index);
+ }
+ }
+}
+
+void LateEscapeAnalysisAnalyzer::FindRemovableAllocations() {
+ while (!allocs_.empty()) {
+ OpIndex current_alloc = allocs_.back();
+ allocs_.pop_back();
+
+ if (ShouldSkipOperation(graph_.Get(current_alloc))) {
+ // We are re-visiting an allocation that we've actually already removed.
+ continue;
+ }
+
+ if (!AllocationIsEscaping(current_alloc)) {
+ MarkToRemove(current_alloc);
+ }
+ }
+}
+
+bool LateEscapeAnalysisAnalyzer::AllocationIsEscaping(OpIndex alloc) {
+ if (alloc_uses_.find(alloc) == alloc_uses_.end()) return false;
+ for (OpIndex use : alloc_uses_.at(alloc)) {
+ if (EscapesThroughUse(alloc, use)) return true;
+ }
+ // We haven't found any non-store use
+ return false;
+}
+
+// Returns true if {using_op_idx} is an operation that forces {alloc} to be
+// emitted.
+bool LateEscapeAnalysisAnalyzer::EscapesThroughUse(OpIndex alloc,
+ OpIndex using_op_idx) {
+ if (ShouldSkipOperation(graph_.Get(alloc))) {
+ // {using_op_idx} is an Allocate itself, which has been removed.
+ return false;
+ }
+ const Operation& op = graph_.Get(using_op_idx);
+ if (const StoreOp* store_op = op.TryCast<StoreOp>()) {
+ // A StoreOp only makes {alloc} escape if it uses {alloc} as the {value} or
+ // the {index}. Put otherwise, StoreOp makes {alloc} escape if it writes
+ // {alloc}, but not if it writes **to** {alloc}.
+ return store_op->value() == alloc;
+ }
+ return true;
+}
+
+void LateEscapeAnalysisAnalyzer::MarkToRemove(OpIndex alloc) {
+ graph_.MarkAsUnused(alloc);
+ if (alloc_uses_.find(alloc) == alloc_uses_.end()) {
+ return;
+ }
+
+ // The uses of {alloc} should also be skipped.
+ for (OpIndex use : alloc_uses_.at(alloc)) {
+ graph_.MarkAsUnused(use);
+ const StoreOp& store = graph_.Get(use).Cast<StoreOp>();
+ if (graph_.Get(store.value()).Is<AllocateOp>()) {
+ // This store was storing the result of an allocation. Because we now
+ // removed this store, we might be able to remove the other allocation
+ // as well.
+ allocs_.push_back(store.value());
+ }
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.h b/deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.h
new file mode 100644
index 0000000000..c0000c8b3b
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/late-escape-analysis-reducer.h
@@ -0,0 +1,67 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_LATE_ESCAPE_ANALYSIS_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_LATE_ESCAPE_ANALYSIS_REDUCER_H_
+
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/graph.h"
+#include "src/compiler/turboshaft/utils.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// LateEscapeAnalysis removes allocation that have no uses besides the stores
+// initializing the object.
+
+class LateEscapeAnalysisAnalyzer {
+ public:
+ LateEscapeAnalysisAnalyzer(Graph& graph, Zone* zone)
+ : graph_(graph), phase_zone_(zone), alloc_uses_(zone), allocs_(zone) {}
+
+ void Run();
+
+ private:
+ void RecordAllocateUse(OpIndex alloc, OpIndex use);
+
+ void CollectUsesAndAllocations();
+ void FindRemovableAllocations();
+ bool AllocationIsEscaping(OpIndex alloc);
+ bool EscapesThroughUse(OpIndex alloc, OpIndex using_op_idx);
+ void MarkToRemove(OpIndex alloc);
+
+ Graph& graph_;
+ Zone* phase_zone_;
+
+ // {alloc_uses_} records all the uses of each AllocateOp.
+ ZoneUnorderedMap<OpIndex, ZoneVector<OpIndex>> alloc_uses_;
+ // {allocs_} is filled with all of the AllocateOp of the graph, and then
+ // iterated upon to determine which allocations can be removed and which
+ // cannot.
+ ZoneVector<OpIndex> allocs_;
+};
+
+template <class Next>
+class LateEscapeAnalysisReducer : public Next {
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ template <class... Args>
+ explicit LateEscapeAnalysisReducer(const std::tuple<Args...>& args)
+ : Next(args),
+ analyzer_(Asm().modifiable_input_graph(), Asm().phase_zone()) {}
+
+ void Analyze() {
+ analyzer_.Run();
+ Next::Analyze();
+ }
+
+ private:
+ LateEscapeAnalysisAnalyzer analyzer_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_LATE_ESCAPE_ANALYSIS_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/late-optimization-phase.cc b/deps/v8/src/compiler/turboshaft/late-optimization-phase.cc
new file mode 100644
index 0000000000..c915d04372
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/late-optimization-phase.cc
@@ -0,0 +1,26 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/late-optimization-phase.h"
+
+#include "src/compiler/turboshaft/branch-elimination-reducer.h"
+#include "src/compiler/turboshaft/machine-optimization-reducer.h"
+#include "src/compiler/turboshaft/select-lowering-reducer.h"
+#include "src/compiler/turboshaft/value-numbering-reducer.h"
+#include "src/compiler/turboshaft/variable-reducer.h"
+#include "src/numbers/conversions-inl.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void LateOptimizationPhase::Run(PipelineData* data, Zone* temp_zone) {
+ // TODO(dmercadier,tebbi): add missing CommonOperatorReducer.
+ turboshaft::OptimizationPhase<
+ turboshaft::VariableReducer, turboshaft::BranchEliminationReducer,
+ turboshaft::SelectLoweringReducer,
+ turboshaft::MachineOptimizationReducerSignallingNanImpossible,
+ turboshaft::ValueNumberingReducer>::Run(data->isolate(), &data->graph(),
+ temp_zone, data->node_origins());
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/late-optimization-phase.h b/deps/v8/src/compiler/turboshaft/late-optimization-phase.h
new file mode 100644
index 0000000000..a2e0fb2085
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/late-optimization-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_LATE_OPTIMIZATION_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_LATE_OPTIMIZATION_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct LateOptimizationPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(LateOptimization)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_LATE_OPTIMIZATION_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/layered-hash-map.h b/deps/v8/src/compiler/turboshaft/layered-hash-map.h
new file mode 100644
index 0000000000..a481a8ddc8
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/layered-hash-map.h
@@ -0,0 +1,194 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_LAYERED_HASH_MAP_H_
+#define V8_COMPILER_TURBOSHAFT_LAYERED_HASH_MAP_H_
+
+#include <cstddef>
+#include <iostream>
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/optional.h"
+#include "src/compiler/turboshaft/fast-hash.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// LayeredHashMap is a hash map whose elements are groupped into layers, such
+// that it's efficient to remove all of the items from the last inserted layer.
+// In addition to the regular Insert/Get/Contains functions of hash maps, it
+// thus provides two additional functions: StartLayer to indicate that future
+// insertions are part of a new layer, and DropLastLayer to remove all of the
+// items of the last layer.
+//
+// LayeredHashMap does not support inserting multiple values with the same key,
+// and does not support updating already-inserted items in the map. If you need
+// to update an existing key, you'll need to remove it (by calling DropLastLayer
+// as many times as needed), and then re-insert it.
+//
+// The implementation uses a regular ZoneVector for the main hash table, while
+// keeping a linked list of items per layer. When inserting an item in the
+// LayeredHashMap, we insert it into the ZoneVector and link it to the linked
+// list of the current (=latest) layer. In order to remove all of the items from
+// the last layer, we iterate its linked list, and remove the items one by one
+// from the ZoneVector, after which we drop the linked list alltogether.
+
+template <class Key, class Value>
+class LayeredHashMap {
+ public:
+ explicit LayeredHashMap(Zone* zone, uint32_t initial_capacity = 64);
+
+ void StartLayer();
+ void DropLastLayer();
+
+ void InsertNewKey(Key key, Value value);
+ bool Contains(Key key);
+ base::Optional<Value> Get(Key key);
+
+ private:
+ struct Entry {
+ size_t hash = 0;
+ Key key = Key();
+ Value value = Value();
+ Entry* depth_neighboring_entry = nullptr;
+ };
+ void ResizeIfNeeded();
+ size_t NextEntryIndex(size_t index) { return (index + 1) & mask_; }
+ Entry* FindEntryForKey(Key key, size_t hash = 0);
+ Entry* InsertEntry(Entry entry);
+
+ size_t ComputeHash(Key key) {
+ size_t hash = fast_hash<Key>()(key);
+ return V8_UNLIKELY(hash == 0) ? 1 : hash;
+ }
+
+ size_t mask_;
+ size_t entry_count_;
+ base::Vector<Entry> table_;
+ ZoneVector<Entry*> depths_heads_;
+ Zone* zone_;
+
+ static constexpr double kNeedResizePercentage = 0.75;
+ static constexpr int kGrowthFactor = 2;
+};
+
+template <class Key, class Value>
+LayeredHashMap<Key, Value>::LayeredHashMap(Zone* zone,
+ uint32_t initial_capacity)
+ : entry_count_(0), depths_heads_(zone), zone_(zone) {
+ // Setting the minimal capacity to 16
+ initial_capacity = std::max<uint32_t>(initial_capacity, 16);
+ // {initial_capacity} should be a power of 2, so that we can compute offset
+ // in {table_} with a mask rather than a modulo.
+ initial_capacity = base::bits::RoundUpToPowerOfTwo32(initial_capacity);
+ mask_ = initial_capacity - 1;
+ // Allocating the table_
+ table_ = zone_->NewVector(initial_capacity, Entry());
+}
+
+template <class Key, class Value>
+void LayeredHashMap<Key, Value>::StartLayer() {
+ depths_heads_.push_back(nullptr);
+}
+
+template <class Key, class Value>
+void LayeredHashMap<Key, Value>::DropLastLayer() {
+ DCHECK_GT(depths_heads_.size(), 0);
+ for (Entry* entry = depths_heads_.back(); entry != nullptr;) {
+ Entry* next = entry->depth_neighboring_entry;
+ *entry = Entry();
+ entry = next;
+ }
+ depths_heads_.pop_back();
+}
+
+template <class Key, class Value>
+typename LayeredHashMap<Key, Value>::Entry*
+LayeredHashMap<Key, Value>::FindEntryForKey(Key key, size_t hash) {
+ for (size_t i = hash & mask_;; i = NextEntryIndex(i)) {
+ if (table_[i].hash == 0) return &table_[i];
+ if (table_[i].hash == hash && table_[i].key == key) return &table_[i];
+ }
+}
+
+template <class Key, class Value>
+void LayeredHashMap<Key, Value>::InsertNewKey(Key key, Value value) {
+ ResizeIfNeeded();
+ size_t hash = ComputeHash(key);
+ Entry* destination = FindEntryForKey(key, hash);
+ DCHECK_EQ(destination->hash, 0);
+ *destination = Entry{hash, key, value, depths_heads_.back()};
+ depths_heads_.back() = destination;
+}
+
+template <class Key, class Value>
+base::Optional<Value> LayeredHashMap<Key, Value>::Get(Key key) {
+ Entry* destination = FindEntryForKey(key, ComputeHash(key));
+ if (destination->hash == 0) return base::nullopt;
+ return destination->value;
+}
+
+template <class Key, class Value>
+bool LayeredHashMap<Key, Value>::Contains(Key key) {
+ return Get(key).has_value();
+}
+
+template <class Key, class Value>
+void LayeredHashMap<Key, Value>::ResizeIfNeeded() {
+ if (table_.size() * kNeedResizePercentage > entry_count_) return;
+ CHECK_LE(table_.size(), std::numeric_limits<size_t>::max() / kGrowthFactor);
+ table_ = zone_->NewVector<Entry>(table_.size() * kGrowthFactor, Entry());
+ mask_ = table_.size() - 1;
+ DCHECK_EQ(base::bits::CountPopulation(mask_),
+ sizeof(mask_) * 8 - base::bits::CountLeadingZeros(mask_));
+ for (size_t depth_idx = 0; depth_idx < depths_heads_.size(); depth_idx++) {
+ // It's important to fill the new hash by inserting data in increasing
+ // depth order, in order to avoid holes when later calling DropLastLayer.
+ // Consider for instance:
+ //
+ // ---+------+------+------+----
+ // | a1 | a2 | a3 |
+ // ---+------+------+------+----
+ //
+ // Where a1, a2 and a3 have the same hash. By construction, we know that
+ // depth(a1) <= depth(a2) <= depth(a3). If, when re-hashing, we were to
+ // insert them in another order, say:
+ //
+ // ---+------+------+------+----
+ // | a3 | a1 | a2 |
+ // ---+------+------+------+----
+ //
+ // Then, when we'll call DropLastLayer to remove entries from a3's depth,
+ // we'll get this:
+ //
+ // ---+------+------+------+----
+ // | null | a1 | a2 |
+ // ---+------+------+------+----
+ //
+ // And, when looking if a1 is in the hash, we'd find a "null" where we
+ // expect it, and assume that it's not present. If, instead, we always
+ // conserve the increasing depth order, then when removing a3, we'd get:
+ //
+ // ---+------+------+------+----
+ // | a1 | a2 | null |
+ // ---+------+------+------+----
+ //
+ // Where we can still find a1 and a2.
+ Entry* entry = depths_heads_[depth_idx];
+ depths_heads_[depth_idx] = nullptr;
+ while (entry != nullptr) {
+ Entry* new_entry_loc = FindEntryForKey(entry->key, entry->hash);
+ *new_entry_loc = *entry;
+ Entry* next_entry = entry->depth_neighboring_entry;
+ new_entry_loc->depth_neighboring_entry = depths_heads_[depth_idx];
+ depths_heads_[depth_idx] = new_entry_loc;
+ entry = next_entry;
+ }
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_LAYERED_HASH_MAP_H_
diff --git a/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc b/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc
new file mode 100644
index 0000000000..5473164a31
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/machine-lowering-phase.cc
@@ -0,0 +1,21 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/machine-lowering-phase.h"
+
+#include "src/compiler/turboshaft/machine-lowering-reducer.h"
+#include "src/compiler/turboshaft/variable-reducer.h"
+#include "src/heap/factory-inl.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void MachineLoweringPhase::Run(PipelineData* data, Zone* temp_zone) {
+ turboshaft::OptimizationPhase<turboshaft::MachineLoweringReducer,
+ turboshaft::VariableReducer>::
+ Run(data->isolate(), &data->graph(), temp_zone, data->node_origins(),
+ std::tuple{turboshaft::MachineLoweringReducerArgs{
+ data->isolate()->factory(), data->isolate()}});
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/machine-lowering-phase.h b/deps/v8/src/compiler/turboshaft/machine-lowering-phase.h
new file mode 100644
index 0000000000..468e627f52
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/machine-lowering-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct MachineLoweringPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(MachineLowering)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/machine-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/machine-lowering-reducer.h
new file mode 100644
index 0000000000..41199bec37
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/machine-lowering-reducer.h
@@ -0,0 +1,1811 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_REDUCER_H_
+
+#include "src/base/v8-fallthrough.h"
+#include "src/common/globals.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/feedback-source.h"
+#include "src/compiler/globals.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/index.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/optimization-phase.h"
+#include "src/compiler/turboshaft/reducer-traits.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/objects/bigint.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/oddball.h"
+#include "src/runtime/runtime.h"
+#include "src/utils/utils.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+#include "src/compiler/turboshaft/define-assembler-macros.inc"
+
+struct MachineLoweringReducerArgs {
+ Factory* factory;
+ Isolate* isolate;
+};
+
+// MachineLoweringReducer, formerly known as EffectControlLinearizer, lowers
+// simplified operations to machine operations.
+template <typename Next>
+class MachineLoweringReducer : public Next {
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ using ArgT =
+ base::append_tuple_type<typename Next::ArgT, MachineLoweringReducerArgs>;
+
+ template <typename... Args>
+ explicit MachineLoweringReducer(const std::tuple<Args...>& args)
+ : Next(args),
+ factory_(std::get<MachineLoweringReducerArgs>(args).factory),
+ isolate_(std::get<MachineLoweringReducerArgs>(args).isolate) {}
+
+ bool NeedsHeapObjectCheck(ObjectIsOp::InputAssumptions input_assumptions) {
+ // TODO(nicohartmann@): Consider type information once we have that.
+ switch (input_assumptions) {
+ case ObjectIsOp::InputAssumptions::kNone:
+ return true;
+ case ObjectIsOp::InputAssumptions::kHeapObject:
+ case ObjectIsOp::InputAssumptions::kBigInt:
+ return false;
+ }
+ }
+
+ OpIndex ReduceChangeOrDeopt(OpIndex input, OpIndex frame_state,
+ ChangeOrDeoptOp::Kind kind,
+ CheckForMinusZeroMode minus_zero_mode,
+ const FeedbackSource& feedback) {
+ switch (kind) {
+ case ChangeOrDeoptOp::Kind::kUint32ToInt32: {
+ __ DeoptimizeIf(__ Int32LessThan(input, 0), frame_state,
+ DeoptimizeReason::kLostPrecision, feedback);
+ return input;
+ }
+ case ChangeOrDeoptOp::Kind::kInt64ToInt32: {
+ // Int64 is truncated to Int32 implicitly.
+ V<Word32> i32 = input;
+ __ DeoptimizeIfNot(__ Word64Equal(__ ChangeInt32ToInt64(i32), input),
+ frame_state, DeoptimizeReason::kLostPrecision,
+ feedback);
+ return i32;
+ }
+ case ChangeOrDeoptOp::Kind::kUint64ToInt32: {
+ __ DeoptimizeIfNot(
+ __ Uint64LessThanOrEqual(input, static_cast<uint64_t>(kMaxInt)),
+ frame_state, DeoptimizeReason::kLostPrecision, feedback);
+ // Uint64 is truncated to Int32 implicitly
+ return input;
+ }
+ case ChangeOrDeoptOp::Kind::kUint64ToInt64: {
+ __ DeoptimizeIfNot(__ Uint64LessThanOrEqual(
+ input, std::numeric_limits<int64_t>::max()),
+ frame_state, DeoptimizeReason::kLostPrecision,
+ feedback);
+ return input;
+ }
+ case ChangeOrDeoptOp::Kind::kFloat64ToInt32: {
+ V<Word32> i32 = __ TruncateFloat64ToInt32OverflowUndefined(input);
+ __ DeoptimizeIfNot(__ Float64Equal(__ ChangeInt32ToFloat64(i32), input),
+ frame_state, DeoptimizeReason::kLostPrecisionOrNaN,
+ feedback);
+
+ if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // Check if {value} is -0.
+ IF_UNLIKELY(__ Word32Equal(i32, 0)) {
+ // In case of 0, we need to check the high bits for the IEEE -0
+ // pattern.
+ V<Word32> check_negative =
+ __ Int32LessThan(__ Float64ExtractHighWord32(input), 0);
+ __ DeoptimizeIf(check_negative, frame_state,
+ DeoptimizeReason::kMinusZero, feedback);
+ }
+ END_IF
+ }
+
+ return i32;
+ }
+ case ChangeOrDeoptOp::Kind::kFloat64ToInt64: {
+ V<Word64> i64 = __ TruncateFloat64ToInt64OverflowUndefined(input);
+ __ DeoptimizeIfNot(__ Float64Equal(__ ChangeInt64ToFloat64(i64), input),
+ frame_state, DeoptimizeReason::kLostPrecisionOrNaN,
+ feedback);
+
+ if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // Check if {value} is -0.
+ IF_UNLIKELY(__ Word64Equal(i64, 0)) {
+ // In case of 0, we need to check the high bits for the IEEE -0
+ // pattern.
+ V<Word32> check_negative =
+ __ Int32LessThan(__ Float64ExtractHighWord32(input), 0);
+ __ DeoptimizeIf(check_negative, frame_state,
+ DeoptimizeReason::kMinusZero, feedback);
+ }
+ END_IF
+ }
+
+ return i64;
+ }
+ }
+ UNREACHABLE();
+ }
+
+ V<Word32> ReduceObjectIs(V<Tagged> input, ObjectIsOp::Kind kind,
+ ObjectIsOp::InputAssumptions input_assumptions) {
+ switch (kind) {
+ case ObjectIsOp::Kind::kBigInt:
+ case ObjectIsOp::Kind::kBigInt64: {
+ DCHECK_IMPLIES(kind == ObjectIsOp::Kind::kBigInt64, Is64());
+
+ Label<Word32> done(this);
+
+ if (input_assumptions != ObjectIsOp::InputAssumptions::kBigInt) {
+ if (NeedsHeapObjectCheck(input_assumptions)) {
+ // Check for Smi.
+ GOTO_IF(IsSmi(input), done, 0);
+ }
+
+ // Check for BigInt.
+ V<Tagged> map = __ LoadMapField(input);
+ V<Word32> is_bigint_map =
+ __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map()));
+ GOTO_IF_NOT(is_bigint_map, done, 0);
+ }
+
+ if (kind == ObjectIsOp::Kind::kBigInt) {
+ GOTO(done, 1);
+ } else {
+ DCHECK_EQ(kind, ObjectIsOp::Kind::kBigInt64);
+ // We have to perform check for BigInt64 range.
+ V<Word32> bitfield = __ template LoadField<Word32>(
+ input, AccessBuilder::ForBigIntBitfield());
+ GOTO_IF(__ Word32Equal(bitfield, 0), done, 1);
+
+ // Length must be 1.
+ V<Word32> length_field =
+ __ Word32BitwiseAnd(bitfield, BigInt::LengthBits::kMask);
+ GOTO_IF_NOT(__ Word32Equal(length_field,
+ uint32_t{1} << BigInt::LengthBits::kShift),
+ done, 0);
+
+ // Check if it fits in 64 bit signed int.
+ V<Word64> lsd = __ template LoadField<Word64>(
+ input, AccessBuilder::ForBigIntLeastSignificantDigit64());
+ V<Word32> magnitude_check = __ Uint64LessThanOrEqual(
+ lsd, std::numeric_limits<int64_t>::max());
+ GOTO_IF(magnitude_check, done, 1);
+
+ // The BigInt probably doesn't fit into signed int64. The only
+ // exception is int64_t::min. We check for this.
+ V<Word32> sign =
+ __ Word32BitwiseAnd(bitfield, BigInt::SignBits::kMask);
+ V<Word32> sign_check = __ Word32Equal(sign, BigInt::SignBits::kMask);
+ GOTO_IF_NOT(sign_check, done, 0);
+
+ V<Word32> min_check =
+ __ Word64Equal(lsd, std::numeric_limits<int64_t>::min());
+ GOTO_IF(min_check, done, 1);
+
+ GOTO(done, 0);
+ }
+
+ BIND(done, result);
+ return result;
+ }
+ case ObjectIsOp::Kind::kCallable:
+ case ObjectIsOp::Kind::kConstructor:
+ case ObjectIsOp::Kind::kDetectableCallable:
+ case ObjectIsOp::Kind::kNonCallable:
+ case ObjectIsOp::Kind::kReceiver:
+ case ObjectIsOp::Kind::kReceiverOrNullOrUndefined:
+ case ObjectIsOp::Kind::kUndetectable: {
+ Label<Word32> done(this);
+
+ // Check for Smi if necessary.
+ if (NeedsHeapObjectCheck(input_assumptions)) {
+ GOTO_IF(IsSmi(input), done, 0);
+ }
+
+ // Load bitfield from map.
+ V<Tagged> map = __ LoadMapField(input);
+ V<Word32> bitfield =
+ __ template LoadField<Word32>(map, AccessBuilder::ForMapBitField());
+
+ V<Word32> check;
+ switch (kind) {
+ case ObjectIsOp::Kind::kCallable:
+ check =
+ __ Word32Equal(Map::Bits1::IsCallableBit::kMask,
+ __ Word32BitwiseAnd(
+ bitfield, Map::Bits1::IsCallableBit::kMask));
+ break;
+ case ObjectIsOp::Kind::kConstructor:
+ check = __ Word32Equal(
+ Map::Bits1::IsConstructorBit::kMask,
+ __ Word32BitwiseAnd(bitfield,
+ Map::Bits1::IsConstructorBit::kMask));
+ break;
+ case ObjectIsOp::Kind::kDetectableCallable:
+ check = __ Word32Equal(
+ Map::Bits1::IsCallableBit::kMask,
+ __ Word32BitwiseAnd(
+ bitfield, (Map::Bits1::IsCallableBit::kMask) |
+ (Map::Bits1::IsUndetectableBit::kMask)));
+ break;
+ case ObjectIsOp::Kind::kNonCallable:
+ check = __ Word32Equal(
+ 0, __ Word32BitwiseAnd(bitfield,
+ Map::Bits1::IsCallableBit::kMask));
+ GOTO_IF_NOT(check, done, 0);
+ // Fallthrough into receiver check.
+ V8_FALLTHROUGH;
+ case ObjectIsOp::Kind::kReceiver: {
+ static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+ check =
+ __ Uint32LessThanOrEqual(FIRST_JS_RECEIVER_TYPE, instance_type);
+ break;
+ }
+ case ObjectIsOp::Kind::kReceiverOrNullOrUndefined: {
+ static_assert(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
+ static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ // Rule out all primitives except oddballs (true, false, undefined,
+ // null).
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+ GOTO_IF_NOT(__ Uint32LessThanOrEqual(ODDBALL_TYPE, instance_type),
+ done, 0);
+
+ // Rule out booleans.
+ check = __ Word32Equal(
+ 0,
+ __ TaggedEqual(map, __ HeapConstant(factory_->boolean_map())));
+ break;
+ }
+ case ObjectIsOp::Kind::kUndetectable:
+ check = __ Word32Equal(
+ Map::Bits1::IsUndetectableBit::kMask,
+ __ Word32BitwiseAnd(bitfield,
+ Map::Bits1::IsUndetectableBit::kMask));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ GOTO(done, check);
+
+ BIND(done, result);
+ return result;
+ }
+ case ObjectIsOp::Kind::kSmi: {
+ // If we statically know that this is a heap object, it cannot be a Smi.
+ if (!NeedsHeapObjectCheck(input_assumptions)) {
+ return __ Word32Constant(0);
+ }
+ return IsSmi(input);
+ }
+ case ObjectIsOp::Kind::kNumber: {
+ Label<Word32> done(this);
+
+ // Check for Smi if necessary.
+ if (NeedsHeapObjectCheck(input_assumptions)) {
+ GOTO_IF(IsSmi(input), done, 1);
+ }
+
+ V<Tagged> map = __ LoadMapField(input);
+ GOTO(done,
+ __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())));
+
+ BIND(done, result);
+ return result;
+ }
+ case ObjectIsOp::Kind::kSymbol:
+ case ObjectIsOp::Kind::kString:
+ case ObjectIsOp::Kind::kArrayBufferView: {
+ Label<Word32> done(this);
+
+ // Check for Smi if necessary.
+ if (NeedsHeapObjectCheck(input_assumptions)) {
+ GOTO_IF(IsSmi(input), done, 0);
+ }
+
+ // Load instance type from map.
+ V<Tagged> map = __ LoadMapField(input);
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+
+ V<Word32> check;
+ switch (kind) {
+ case ObjectIsOp::Kind::kSymbol:
+ check = __ Word32Equal(instance_type, SYMBOL_TYPE);
+ break;
+ case ObjectIsOp::Kind::kString:
+ check = __ Uint32LessThan(instance_type, FIRST_NONSTRING_TYPE);
+ break;
+ case ObjectIsOp::Kind::kArrayBufferView:
+ check = __ Uint32LessThan(
+ __ Word32Sub(instance_type, FIRST_JS_ARRAY_BUFFER_VIEW_TYPE),
+ LAST_JS_ARRAY_BUFFER_VIEW_TYPE -
+ FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ GOTO(done, check);
+
+ BIND(done, result);
+ return result;
+ }
+ case ObjectIsOp::Kind::kInternalizedString: {
+ DCHECK_EQ(input_assumptions, ObjectIsOp::InputAssumptions::kHeapObject);
+ // Load instance type from map.
+ V<Tagged> map = __ LoadMapField(input);
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+
+ return __ Word32Equal(
+ __ Word32BitwiseAnd(instance_type,
+ (kIsNotStringMask | kIsNotInternalizedMask)),
+ kInternalizedTag);
+ }
+ }
+
+ UNREACHABLE();
+ }
+
+ V<Word32> ReduceFloatIs(OpIndex input, FloatIsOp::Kind kind,
+ FloatRepresentation input_rep) {
+ DCHECK_EQ(input_rep, FloatRepresentation::Float64());
+ switch (kind) {
+ case FloatIsOp::Kind::kNaN: {
+ OpIndex diff = __ Float64Equal(input, input);
+ return __ Word32Equal(diff, 0);
+ }
+ }
+
+ UNREACHABLE();
+ }
+
+ OpIndex ReduceConvertToObject(
+ OpIndex input, ConvertToObjectOp::Kind kind,
+ RegisterRepresentation input_rep,
+ ConvertToObjectOp::InputInterpretation input_interpretation,
+ CheckForMinusZeroMode minus_zero_mode) {
+ switch (kind) {
+ case ConvertToObjectOp::Kind::kBigInt: {
+ DCHECK(Is64());
+ DCHECK_EQ(input_rep, RegisterRepresentation::Word64());
+ Label<Tagged> done(this);
+
+ // BigInts with value 0 must be of size 0 (canonical form).
+ GOTO_IF(__ Word64Equal(input, int64_t{0}), done,
+ AllocateBigInt(OpIndex::Invalid(), OpIndex::Invalid()));
+
+ if (input_interpretation ==
+ ConvertToObjectOp::InputInterpretation::kSigned) {
+ // Shift sign bit into BigInt's sign bit position.
+ V<Word32> bitfield = __ Word32BitwiseOr(
+ BigInt::LengthBits::encode(1),
+ __ Word64ShiftRightLogical(
+ input, static_cast<int64_t>(63 - BigInt::SignBits::kShift)));
+
+ // We use (value XOR (value >> 63)) - (value >> 63) to compute the
+ // absolute value, in a branchless fashion.
+ V<Word64> sign_mask =
+ __ Word64ShiftRightArithmetic(input, int64_t{63});
+ V<Word64> absolute_value =
+ __ Word64Sub(__ Word64BitwiseXor(input, sign_mask), sign_mask);
+ GOTO(done, AllocateBigInt(bitfield, absolute_value));
+ } else {
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOp::InputInterpretation::kUnsigned);
+ const auto bitfield = BigInt::LengthBits::encode(1);
+ GOTO(done, AllocateBigInt(__ Word32Constant(bitfield), input));
+ }
+ BIND(done, result);
+ return result;
+ }
+ case ConvertToObjectOp::Kind::kNumber: {
+ if (input_rep == RegisterRepresentation::Word32()) {
+ switch (input_interpretation) {
+ case ConvertToObjectOp::InputInterpretation::kSigned: {
+ if (SmiValuesAre32Bits()) {
+ return __ SmiTag(input);
+ }
+ DCHECK(SmiValuesAre31Bits());
+
+ Label<Tagged> done(this);
+ Label<> overflow(this);
+
+ SmiTagOrOverflow(input, &overflow, &done);
+
+ if (BIND(overflow)) {
+ GOTO(done, AllocateHeapNumberWithValue(
+ __ ChangeInt32ToFloat64(input)));
+ }
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertToObjectOp::InputInterpretation::kUnsigned: {
+ Label<Tagged> done(this);
+
+ GOTO_IF(__ Uint32LessThanOrEqual(input, Smi::kMaxValue), done,
+ __ SmiTag(input));
+ GOTO(done, AllocateHeapNumberWithValue(
+ __ ChangeUint32ToFloat64(input)));
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertToObjectOp::InputInterpretation::kCharCode:
+ case ConvertToObjectOp::InputInterpretation::kCodePoint:
+ UNREACHABLE();
+ }
+ } else if (input_rep == RegisterRepresentation::Word64()) {
+ switch (input_interpretation) {
+ case ConvertToObjectOp::InputInterpretation::kSigned: {
+ Label<Tagged> done(this);
+ Label<> outside_smi_range(this);
+
+ V<Word32> v32 = input;
+ V<Word64> v64 = __ ChangeInt32ToInt64(v32);
+ GOTO_IF_NOT(__ Word64Equal(v64, input), outside_smi_range);
+
+ if constexpr (SmiValuesAre32Bits()) {
+ GOTO(done, __ SmiTag(input));
+ } else {
+ SmiTagOrOverflow(v32, &outside_smi_range, &done);
+ }
+
+ if (BIND(outside_smi_range)) {
+ GOTO(done, AllocateHeapNumberWithValue(
+ __ ChangeInt64ToFloat64(input)));
+ }
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertToObjectOp::InputInterpretation::kUnsigned: {
+ Label<Tagged> done(this);
+
+ GOTO_IF(__ Uint64LessThanOrEqual(input, Smi::kMaxValue), done,
+ __ SmiTag(input));
+ GOTO(done,
+ AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(input)));
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertToObjectOp::InputInterpretation::kCharCode:
+ case ConvertToObjectOp::InputInterpretation::kCodePoint:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(input_rep, RegisterRepresentation::Float64());
+ Label<Tagged> done(this);
+ Label<> outside_smi_range(this);
+
+ V<Word32> v32 = __ TruncateFloat64ToInt32OverflowUndefined(input);
+ GOTO_IF_NOT(__ Float64Equal(input, __ ChangeInt32ToFloat64(v32)),
+ outside_smi_range);
+
+ if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // In case of 0, we need to check the high bits for the IEEE -0
+ // pattern.
+ IF(__ Word32Equal(v32, 0)) {
+ GOTO_IF(__ Int32LessThan(__ Float64ExtractHighWord32(input), 0),
+ outside_smi_range);
+ }
+ END_IF
+ }
+
+ if constexpr (SmiValuesAre32Bits()) {
+ GOTO(done, __ SmiTag(v32));
+ } else {
+ SmiTagOrOverflow(v32, &outside_smi_range, &done);
+ }
+
+ if (BIND(outside_smi_range)) {
+ GOTO(done, AllocateHeapNumberWithValue(input));
+ }
+
+ BIND(done, result);
+ return result;
+ }
+ UNREACHABLE();
+ break;
+ }
+ case ConvertToObjectOp::Kind::kHeapNumber: {
+ DCHECK_EQ(input_rep, RegisterRepresentation::Float64());
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOp::InputInterpretation::kSigned);
+ return AllocateHeapNumberWithValue(input);
+ }
+ case ConvertToObjectOp::Kind::kSmi: {
+ DCHECK_EQ(input_rep, RegisterRepresentation::Word32());
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOp::InputInterpretation::kSigned);
+ return __ SmiTag(input);
+ }
+ case ConvertToObjectOp::Kind::kBoolean: {
+ DCHECK_EQ(input_rep, RegisterRepresentation::Word32());
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOp::InputInterpretation::kSigned);
+ Label<Tagged> done(this);
+
+ IF(input) { GOTO(done, __ HeapConstant(factory_->true_value())); }
+ ELSE { GOTO(done, __ HeapConstant(factory_->false_value())); }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertToObjectOp::Kind::kString: {
+ Label<Word32> single_code(this);
+ Label<Tagged> done(this);
+
+ if (input_interpretation ==
+ ConvertToObjectOp::InputInterpretation::kCharCode) {
+ GOTO(single_code, __ Word32BitwiseAnd(input, 0xFFFF));
+ } else {
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOp::InputInterpretation::kCodePoint);
+ // Check if the input is a single code unit.
+ GOTO_IF_LIKELY(__ Uint32LessThanOrEqual(input, 0xFFFF), single_code,
+ input);
+
+ // Generate surrogate pair string.
+
+ // Convert UTF32 to UTF16 code units and store as a 32 bit word.
+ V<Word32> lead_offset = __ Word32Constant(0xD800 - (0x10000 >> 10));
+
+ // lead = (codepoint >> 10) + LEAD_OFFSET
+ V<Word32> lead =
+ __ Word32Add(__ Word32ShiftRightLogical(input, 10), lead_offset);
+
+ // trail = (codepoint & 0x3FF) + 0xDC00
+ V<Word32> trail =
+ __ Word32Add(__ Word32BitwiseAnd(input, 0x3FF), 0xDC00);
+
+ // codepoint = (trail << 16) | lead
+#if V8_TARGET_BIG_ENDIAN
+ V<Word32> code =
+ __ Word32BitwiseOr(__ Word32ShiftLeft(lead, 16), trail);
+#else
+ V<Word32> code =
+ __ Word32BitwiseOr(__ Word32ShiftLeft(trail, 16), lead);
+#endif
+
+ // Allocate a new SeqTwoByteString for {code}.
+ V<Tagged> string =
+ __ Allocate(__ IntPtrConstant(SeqTwoByteString::SizeFor(2)),
+ AllocationType::kYoung);
+ // Set padding to 0.
+ __ Store(string, __ IntPtrConstant(0),
+ StoreOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::TaggedSigned(), kNoWriteBarrier,
+ SeqTwoByteString::SizeFor(2) - kObjectAlignment);
+ __ StoreField(string, AccessBuilder::ForMap(),
+ __ HeapConstant(factory_->string_map()));
+ __ StoreField(string, AccessBuilder::ForNameRawHashField(),
+ __ Word32Constant(Name::kEmptyHashField));
+ __ StoreField(string, AccessBuilder::ForStringLength(),
+ __ Word32Constant(2));
+ __ Store(string, code,
+ StoreOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::Uint32(), kNoWriteBarrier,
+ SeqTwoByteString::kHeaderSize);
+ GOTO(done, string);
+ }
+
+ if (BIND(single_code, code)) {
+ // Check if the {code} is a one byte character.
+ IF_LIKELY(
+ __ Uint32LessThanOrEqual(code, String::kMaxOneByteCharCode)) {
+ // Load the isolate wide single character string table.
+ OpIndex table =
+ __ HeapConstant(factory_->single_character_string_table());
+
+ // Compute the {table} index for {code}.
+ V<WordPtr> index = __ ChangeUint32ToUintPtr(code);
+
+ // Load the string for the {code} from the single character string
+ // table.
+ OpIndex entry = __ LoadElement(
+ table, AccessBuilder::ForFixedArrayElement(), index);
+
+ // Use the {entry} from the {table}.
+ GOTO(done, entry);
+ }
+ ELSE {
+ // Allocate a new SeqTwoBytesString for {code}.
+ V<Tagged> string =
+ __ Allocate(__ IntPtrConstant(SeqTwoByteString::SizeFor(1)),
+ AllocationType::kYoung);
+
+ // Set padding to 0.
+ __ Store(string, __ IntPtrConstant(0),
+ StoreOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::TaggedSigned(), kNoWriteBarrier,
+ SeqTwoByteString::SizeFor(1) - kObjectAlignment);
+ __ StoreField(string, AccessBuilder::ForMap(),
+ __ HeapConstant(factory_->string_map()));
+ __ StoreField(string, AccessBuilder::ForNameRawHashField(),
+ __ Word32Constant(Name::kEmptyHashField));
+ __ StoreField(string, AccessBuilder::ForStringLength(),
+ __ Word32Constant(1));
+ __ Store(string, code,
+ StoreOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::Uint16(), kNoWriteBarrier,
+ SeqTwoByteString::kHeaderSize);
+ GOTO(done, string);
+ }
+ END_IF
+ }
+
+ BIND(done, result);
+ return result;
+ }
+ }
+
+ UNREACHABLE();
+ }
+
+ OpIndex ReduceConvertToObjectOrDeopt(
+ OpIndex input, OpIndex frame_state, ConvertToObjectOrDeoptOp::Kind kind,
+ RegisterRepresentation input_rep,
+ ConvertToObjectOrDeoptOp::InputInterpretation input_interpretation,
+ const FeedbackSource& feedback) {
+ DCHECK_EQ(kind, ConvertToObjectOrDeoptOp::Kind::kSmi);
+ if (input_rep == RegisterRepresentation::Word32()) {
+ if (input_interpretation ==
+ ConvertToObjectOrDeoptOp::InputInterpretation::kSigned) {
+ if constexpr (SmiValuesAre32Bits()) {
+ return __ SmiTag(input);
+ } else {
+ OpIndex test = __ Int32AddCheckOverflow(input, input);
+ __ DeoptimizeIf(__ template Projection<Word32>(test, 1), frame_state,
+ DeoptimizeReason::kLostPrecision, feedback);
+ return __ SmiTag(input);
+ }
+ } else {
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOrDeoptOp::InputInterpretation::kUnsigned);
+ V<Word32> check = __ Uint32LessThanOrEqual(input, Smi::kMaxValue);
+ __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kLostPrecision,
+ feedback);
+ return __ SmiTag(input);
+ }
+ } else {
+ DCHECK_EQ(input_rep, RegisterRepresentation::Word64());
+ if (input_interpretation ==
+ ConvertToObjectOrDeoptOp::InputInterpretation::kSigned) {
+ // Word32 truncation is implicit.
+ V<Word32> i32 = input;
+ V<Word32> check = __ Word64Equal(__ ChangeInt32ToInt64(i32), input);
+ __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kLostPrecision,
+ feedback);
+ if constexpr (SmiValuesAre32Bits()) {
+ return __ SmiTag(input);
+ } else {
+ OpIndex test = __ Int32AddCheckOverflow(i32, i32);
+ __ DeoptimizeIf(__ template Projection<Word32>(test, 1), frame_state,
+ DeoptimizeReason::kLostPrecision, feedback);
+ return __ SmiTag(i32);
+ }
+ } else {
+ DCHECK_EQ(input_interpretation,
+ ConvertToObjectOrDeoptOp::InputInterpretation::kUnsigned);
+ V<Word32> check = __ Uint64LessThanOrEqual(
+ input, static_cast<uint64_t>(Smi::kMaxValue));
+ __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kLostPrecision,
+ feedback);
+ return __ SmiTag(input);
+ }
+ }
+
+ UNREACHABLE();
+ }
+
+ OpIndex ReduceConvertObjectToPrimitive(
+ OpIndex object, ConvertObjectToPrimitiveOp::Kind kind,
+ ConvertObjectToPrimitiveOp::InputAssumptions input_assumptions) {
+ switch (kind) {
+ case ConvertObjectToPrimitiveOp::Kind::kInt32:
+ if (input_assumptions ==
+ ConvertObjectToPrimitiveOp::InputAssumptions::kSmi) {
+ return __ SmiUntag(object);
+ } else {
+ DCHECK_EQ(
+ input_assumptions,
+ ConvertObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball);
+ Label<Word32> done(this);
+
+ IF(__ ObjectIsSmi(object)) { GOTO(done, __ SmiUntag(object)); }
+ ELSE {
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ V<Float64> value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, __ ReversibleFloat64ToInt32(value));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ UNREACHABLE();
+ case ConvertObjectToPrimitiveOp::Kind::kInt64:
+ if (input_assumptions ==
+ ConvertObjectToPrimitiveOp::InputAssumptions::kSmi) {
+ return __ ChangeInt32ToInt64(__ SmiUntag(object));
+ } else {
+ DCHECK_EQ(
+ input_assumptions,
+ ConvertObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball);
+ Label<Word64> done(this);
+
+ IF(__ ObjectIsSmi(object)) {
+ GOTO(done, __ ChangeInt32ToInt64(__ SmiUntag(object)));
+ }
+ ELSE {
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ V<Float64> value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, __ ReversibleFloat64ToInt64(value));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ UNREACHABLE();
+ case ConvertObjectToPrimitiveOp::Kind::kUint32: {
+ DCHECK_EQ(
+ input_assumptions,
+ ConvertObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball);
+ Label<Word32> done(this);
+
+ IF(__ ObjectIsSmi(object)) { GOTO(done, __ SmiUntag(object)); }
+ ELSE {
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ V<Float64> value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, __ ReversibleFloat64ToUint32(value));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertObjectToPrimitiveOp::Kind::kBit:
+ DCHECK_EQ(input_assumptions,
+ ConvertObjectToPrimitiveOp::InputAssumptions::kObject);
+ return __ TaggedEqual(object, __ HeapConstant(factory_->true_value()));
+ case ConvertObjectToPrimitiveOp::Kind::kFloat64: {
+ DCHECK_EQ(
+ input_assumptions,
+ ConvertObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball);
+ Label<Float64> done(this);
+
+ IF(__ ObjectIsSmi(object)) {
+ GOTO(done, __ ChangeInt32ToFloat64(__ SmiUntag(object)));
+ }
+ ELSE {
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ V<Float64> value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, value);
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ }
+ }
+
+ OpIndex ReduceConvertObjectToPrimitiveOrDeopt(
+ V<Tagged> object, OpIndex frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind from_kind,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind to_kind,
+ CheckForMinusZeroMode minus_zero_mode, const FeedbackSource& feedback) {
+ switch (to_kind) {
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt32: {
+ if (from_kind == ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kSmi) {
+ __ DeoptimizeIfNot(__ ObjectIsSmi(object), frame_state,
+ DeoptimizeReason::kNotASmi, feedback);
+ return __ SmiUntag(object);
+ } else {
+ DCHECK_EQ(from_kind,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumber);
+ Label<Word32> done(this);
+
+ IF_LIKELY(__ ObjectIsSmi(object)) { GOTO(done, __ SmiUntag(object)); }
+ ELSE {
+ V<Tagged> map = __ LoadMapField(object);
+ __ DeoptimizeIfNot(
+ __ TaggedEqual(map,
+ __ HeapConstant(factory_->heap_number_map())),
+ frame_state, DeoptimizeReason::kNotAHeapNumber, feedback);
+ V<Float64> heap_number_value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+
+ GOTO(done,
+ __ ChangeFloat64ToInt32OrDeopt(heap_number_value, frame_state,
+ minus_zero_mode, feedback));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ }
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt64: {
+ DCHECK_EQ(from_kind,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumber);
+ Label<Word64> done(this);
+
+ IF_LIKELY(__ ObjectIsSmi(object)) {
+ GOTO(done, __ ChangeInt32ToInt64(__ SmiUntag(object)));
+ }
+ ELSE {
+ V<Tagged> map = __ LoadMapField(object);
+ __ DeoptimizeIfNot(
+ __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
+ frame_state, DeoptimizeReason::kNotAHeapNumber, feedback);
+ V<Float64> heap_number_value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done,
+ __ ChangeFloat64ToInt64OrDeopt(heap_number_value, frame_state,
+ minus_zero_mode, feedback));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kFloat64: {
+ Label<Float64> done(this);
+
+ // In the Smi case, just convert to int32 and then float64.
+ // Otherwise, check heap numberness and load the number.
+ IF(__ ObjectIsSmi(object)) {
+ GOTO(done, __ ChangeInt32ToFloat64(__ SmiUntag(object)));
+ }
+ ELSE {
+ GOTO(done, ConvertHeapObjectToFloat64OrDeopt(object, frame_state,
+ from_kind, feedback));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kArrayIndex: {
+ DCHECK_EQ(
+ from_kind,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrString);
+ Label<WordPtr> done(this);
+
+ IF_LIKELY(__ ObjectIsSmi(object)) {
+ // In the Smi case, just convert to intptr_t.
+ GOTO(done, __ ChangeInt32ToIntPtr(__ SmiUntag(object)));
+ }
+ ELSE {
+ V<Tagged> map = __ LoadMapField(object);
+ IF_LIKELY(__ TaggedEqual(
+ map, __ HeapConstant(factory_->heap_number_map()))) {
+ V<Float64> heap_number_value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ // Perform Turbofan's "CheckedFloat64ToIndex"
+ {
+ if constexpr (Is64()) {
+ V<Word64> i64 = __ TruncateFloat64ToInt64OverflowUndefined(
+ heap_number_value);
+ // The TruncateKind above means there will be a precision loss
+ // in case INT64_MAX input is passed, but that precision loss
+ // would not be detected and would not lead to a deoptimization
+ // from the first check. But in this case, we'll deopt anyway
+ // because of the following checks.
+ __ DeoptimizeIfNot(__ Float64Equal(__ ChangeInt64ToFloat64(i64),
+ heap_number_value),
+ frame_state,
+ DeoptimizeReason::kLostPrecisionOrNaN,
+ feedback);
+ __ DeoptimizeIfNot(
+ __ IntPtrLessThan(i64, kMaxSafeIntegerUint64), frame_state,
+ DeoptimizeReason::kNotAnArrayIndex, feedback);
+ __ DeoptimizeIfNot(
+ __ IntPtrLessThan(-kMaxSafeIntegerUint64, i64), frame_state,
+ DeoptimizeReason::kNotAnArrayIndex, feedback);
+ GOTO(done, i64);
+ } else {
+ V<Word32> i32 = __ TruncateFloat64ToInt32OverflowUndefined(
+ heap_number_value);
+ __ DeoptimizeIfNot(__ Float64Equal(__ ChangeInt32ToFloat64(i32),
+ heap_number_value),
+ frame_state,
+ DeoptimizeReason::kLostPrecisionOrNaN,
+ feedback);
+ GOTO(done, i32);
+ }
+ }
+ }
+ ELSE {
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+ __ DeoptimizeIfNot(
+ __ Uint32LessThan(instance_type, FIRST_NONSTRING_TYPE),
+ frame_state, DeoptimizeReason::kNotAString, feedback);
+
+ // TODO(nicohartmann@): We might introduce a Turboshaft way for
+ // constructing call descriptors.
+ MachineSignature::Builder builder(__ graph_zone(), 1, 1);
+ builder.AddReturn(MachineType::IntPtr());
+ builder.AddParam(MachineType::TaggedPointer());
+ auto desc = Linkage::GetSimplifiedCDescriptor(__ graph_zone(),
+ builder.Build());
+ auto ts_desc = TSCallDescriptor::Create(desc, __ graph_zone());
+ OpIndex callee = __ ExternalConstant(
+ ExternalReference::string_to_array_index_function());
+ // NOTE: String::ToArrayIndex() currently returns int32_t.
+ V<WordPtr> index =
+ __ ChangeInt32ToIntPtr(__ Call(callee, {object}, ts_desc));
+ __ DeoptimizeIf(__ WordPtrEqual(index, -1), frame_state,
+ DeoptimizeReason::kNotAnArrayIndex, feedback);
+ GOTO(done, index);
+ }
+ END_IF
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ }
+ UNREACHABLE();
+ }
+
+ OpIndex ReduceTruncateObjectToPrimitive(
+ V<Object> object, TruncateObjectToPrimitiveOp::Kind kind,
+ TruncateObjectToPrimitiveOp::InputAssumptions input_assumptions) {
+ switch (kind) {
+ case TruncateObjectToPrimitiveOp::Kind::kInt32: {
+ DCHECK_EQ(
+ input_assumptions,
+ TruncateObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball);
+ Label<Word32> done(this);
+
+ IF(__ ObjectIsSmi(object)) { GOTO(done, __ SmiUntag(object)); }
+ ELSE {
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ V<Float64> number_value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, __ JSTruncateFloat64ToWord32(number_value));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ case TruncateObjectToPrimitiveOp::Kind::kInt64: {
+ DCHECK_EQ(input_assumptions,
+ TruncateObjectToPrimitiveOp::InputAssumptions::kBigInt);
+ DCHECK(Is64());
+ Label<Word64> done(this);
+
+ V<Word32> bitfield = __ template LoadField<Word32>(
+ object, AccessBuilder::ForBigIntBitfield());
+ IF(__ Word32Equal(bitfield, 0)) { GOTO(done, 0); }
+ ELSE {
+ V<Word64> lsd = __ template LoadField<Word64>(
+ object, AccessBuilder::ForBigIntLeastSignificantDigit64());
+ V<Word32> sign =
+ __ Word32BitwiseAnd(bitfield, BigInt::SignBits::kMask);
+ IF(__ Word32Equal(sign, 1)) { GOTO(done, __ Word64Sub(0, lsd)); }
+ END_IF
+ GOTO(done, lsd);
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+ case TruncateObjectToPrimitiveOp::Kind::kBit: {
+ Label<Word32> done(this);
+
+ if (input_assumptions ==
+ TruncateObjectToPrimitiveOp::InputAssumptions::kObject) {
+ // Perform Smi check.
+ IF_UNLIKELY(__ ObjectIsSmi(object)) {
+ GOTO(done, __ Word32Equal(__ TaggedEqual(object, __ SmiTag(0)), 0));
+ }
+ END_IF
+ // Otherwise fall through into HeapObject case.
+ } else {
+ DCHECK_EQ(input_assumptions,
+ TruncateObjectToPrimitiveOp::InputAssumptions::kHeapObject);
+ }
+
+ // Check if {object} is false.
+ GOTO_IF(
+ __ TaggedEqual(object, __ HeapConstant(factory_->false_value())),
+ done, 0);
+
+ // Check if {object} is the empty string.
+ GOTO_IF(
+ __ TaggedEqual(object, __ HeapConstant(factory_->empty_string())),
+ done, 0);
+
+ // Load the map of {object}.
+ V<Map> map = __ LoadMapField(object);
+
+ // Check if the {object} is undetectable and immediately return false.
+ // This includes undefined and null.
+ V<Word32> bitfield =
+ __ template LoadField<Word32>(map, AccessBuilder::ForMapBitField());
+ GOTO_IF(
+ __ Word32BitwiseAnd(bitfield, Map::Bits1::IsUndetectableBit::kMask),
+ done, 0);
+
+ // Check if {object} is a HeapNumber.
+ IF_UNLIKELY(
+ __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map()))) {
+ // For HeapNumber {object}, just check that its value is not 0.0, -0.0
+ // or NaN.
+ V<Float64> number_value = __ template LoadField<Float64>(
+ object, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, __ Float64LessThan(0.0, __ Float64Abs(number_value)));
+ }
+ END_IF
+
+ // Check if {object} is a BigInt.
+ IF_UNLIKELY(
+ __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map()))) {
+ V<Word32> bitfield = __ template LoadField<Word32>(
+ object, AccessBuilder::ForBigIntBitfield());
+ GOTO(done, IsNonZero(__ Word32BitwiseAnd(bitfield,
+ BigInt::LengthBits::kMask)));
+ }
+ END_IF
+
+ // All other values that reach here are true.
+ GOTO(done, 1);
+
+ BIND(done, result);
+ return result;
+ }
+ }
+ UNREACHABLE();
+ }
+
+ // `IsNonZero` converts any non-0 value into 1.
+ V<Word32> IsNonZero(V<Word32> value) {
+ return __ Word32Equal(__ Word32Equal(value, 0), 0);
+ }
+
+ OpIndex ReduceNewConsString(OpIndex length, OpIndex first, OpIndex second) {
+ // Determine the instance types of {first} and {second}.
+ V<Tagged> first_map = __ LoadMapField(first);
+ V<Word32> first_type = __ template LoadField<Word32>(
+ first_map, AccessBuilder::ForMapInstanceType());
+ V<Tagged> second_map = __ LoadMapField(second);
+ V<Word32> second_type = __ template LoadField<Word32>(
+ second_map, AccessBuilder::ForMapInstanceType());
+
+ Label<Tagged> allocate_string(this);
+ // Determine the proper map for the resulting ConsString.
+ // If both {first} and {second} are one-byte strings, we
+ // create a new ConsOneByteString, otherwise we create a
+ // new ConsString instead.
+ static_assert(kOneByteStringTag != 0);
+ static_assert(kTwoByteStringTag == 0);
+ V<Word32> instance_type = __ Word32BitwiseAnd(first_type, second_type);
+ V<Word32> encoding =
+ __ Word32BitwiseAnd(instance_type, kStringEncodingMask);
+ IF(__ Word32Equal(encoding, kTwoByteStringTag)) {
+ GOTO(allocate_string, __ HeapConstant(factory_->cons_string_map()));
+ }
+ ELSE {
+ GOTO(allocate_string,
+ __ HeapConstant(factory_->cons_one_byte_string_map()));
+ }
+
+ // Allocate the resulting ConsString.
+ BIND(allocate_string, map);
+ V<Tagged> string = __ Allocate(__ IntPtrConstant(ConsString::kSize),
+ AllocationType::kYoung);
+ __ StoreField(string, AccessBuilder::ForMap(), map);
+ __ StoreField(string, AccessBuilder::ForNameRawHashField(),
+ __ Word32Constant(Name::kEmptyHashField));
+ __ StoreField(string, AccessBuilder::ForStringLength(), length);
+ __ StoreField(string, AccessBuilder::ForConsStringFirst(), first);
+ __ StoreField(string, AccessBuilder::ForConsStringSecond(), second);
+ return string;
+ }
+
+ OpIndex ReduceNewArray(V<WordPtr> length, NewArrayOp::Kind kind,
+ AllocationType allocation_type) {
+ Label<Tagged> done(this);
+
+ GOTO_IF(__ WordPtrEqual(length, 0), done,
+ __ HeapConstant(factory_->empty_fixed_array()));
+
+ // Compute the effective size of the backing store.
+ intptr_t size_log2;
+ Handle<Map> array_map;
+ // TODO(nicohartmann@): Replace ElementAccess by a Turboshaft replacement.
+ ElementAccess access;
+ V<Any> the_hole_value;
+ switch (kind) {
+ case NewArrayOp::Kind::kDouble: {
+ size_log2 = kDoubleSizeLog2;
+ array_map = factory_->fixed_double_array_map();
+ access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
+ compiler::Type::NumberOrHole(), MachineType::Float64(),
+ kNoWriteBarrier};
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ the_hole_value = __ template LoadField<Float64>(
+ __ HeapConstant(factory_->the_hole_value()),
+ AccessBuilder::ForHeapNumberValue());
+ break;
+ }
+ case NewArrayOp::Kind::kObject: {
+ size_log2 = kTaggedSizeLog2;
+ array_map = factory_->fixed_array_map();
+ access = {kTaggedBase, FixedArray::kHeaderSize, compiler::Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+ the_hole_value = __ HeapConstant(factory_->the_hole_value());
+ break;
+ }
+ }
+ V<WordPtr> size = __ WordPtrAdd(__ WordPtrShiftLeft(length, size_log2),
+ access.header_size);
+
+ // Allocate the result and initialize the header.
+ V<Tagged> array = __ Allocate(size, allocation_type);
+ __ StoreField(array, AccessBuilder::ForMap(), __ HeapConstant(array_map));
+ __ StoreField(array, AccessBuilder::ForFixedArrayLength(),
+ __ SmiTag(length));
+
+ // Initialize the backing store with holes.
+ LoopLabel<WordPtr> loop(this);
+ GOTO(loop, intptr_t{0});
+
+ LOOP(loop, index) {
+ GOTO_IF_NOT_UNLIKELY(__ UintPtrLessThan(index, length), done, array);
+
+ __ StoreElement(array, access, index, the_hole_value);
+
+ // Advance the {index}.
+ GOTO(loop, __ WordPtrAdd(index, 1));
+ }
+
+ BIND(done, result);
+ return result;
+ }
+
+ OpIndex ReduceDoubleArrayMinMax(V<Tagged> array,
+ DoubleArrayMinMaxOp::Kind kind) {
+ DCHECK(kind == DoubleArrayMinMaxOp::Kind::kMin ||
+ kind == DoubleArrayMinMaxOp::Kind::kMax);
+ const bool is_max = kind == DoubleArrayMinMaxOp::Kind::kMax;
+
+ // Iterate the elements and find the result.
+ V<Float64> empty_value =
+ __ Float64Constant(is_max ? -V8_INFINITY : V8_INFINITY);
+ V<WordPtr> array_length =
+ __ ChangeInt32ToIntPtr(__ SmiUntag(__ template LoadField<Tagged>(
+ array, AccessBuilder::ForJSArrayLength(
+ ElementsKind::PACKED_DOUBLE_ELEMENTS))));
+ V<Tagged> elements = __ template LoadField<Tagged>(
+ array, AccessBuilder::ForJSObjectElements());
+
+ Label<Float64> done(this);
+ LoopLabel<WordPtr, Float64> loop(this);
+
+ GOTO(loop, intptr_t{0}, empty_value);
+
+ LOOP(loop, index, accumulator) {
+ GOTO_IF_NOT_UNLIKELY(__ UintPtrLessThan(index, array_length), done,
+ accumulator);
+
+ V<Float64> element = __ template LoadElement<Float64>(
+ elements, AccessBuilder::ForFixedDoubleArrayElement(), index);
+
+ V<Float64> new_accumulator = is_max ? __ Float64Max(accumulator, element)
+ : __ Float64Min(accumulator, element);
+ GOTO(loop, __ WordPtrAdd(index, 1), new_accumulator);
+ }
+
+ BIND(done, result);
+ return __ ConvertFloat64ToNumber(result,
+ CheckForMinusZeroMode::kCheckForMinusZero);
+ }
+
+ OpIndex ReduceLoadFieldByIndex(V<Tagged> object, V<Word32> field_index) {
+ // Index encoding (see `src/objects/field-index-inl.h`):
+ // For efficiency, the LoadByFieldIndex instruction takes an index that is
+ // optimized for quick access. If the property is inline, the index is
+ // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
+ // disambiguate the zero out-of-line index from the zero inobject case.
+ // The index itself is shifted up by one bit, the lower-most bit
+ // signifying if the field is a mutable double box (1) or not (0).
+ V<WordPtr> index = __ ChangeInt32ToIntPtr(field_index);
+
+ Label<> double_field(this);
+ Label<Tagged> done(this);
+
+ // Check if field is a mutable double field.
+ GOTO_IF_UNLIKELY(__ WordPtrBitwiseAnd(index, 0x1), double_field);
+
+ {
+ // The field is a proper Tagged field on {object}. The {index} is
+ // shifted to the left by one in the code below.
+
+ // Check if field is in-object or out-of-object.
+ IF(__ IntPtrLessThan(index, 0)) {
+ // The field is located in the properties backing store of {object}.
+ // The {index} is equal to the negated out of property index plus 1.
+ V<Tagged> properties = __ template LoadField<Tagged>(
+ object, AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer());
+
+ V<WordPtr> out_of_object_index = __ WordPtrSub(0, index);
+ V<Tagged> result =
+ __ Load(properties, out_of_object_index,
+ LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::AnyTagged(),
+ FixedArray::kHeaderSize - kTaggedSize, kTaggedSizeLog2 - 1);
+ GOTO(done, result);
+ }
+ ELSE {
+ // This field is located in the {object} itself.
+ V<Tagged> result = __ Load(
+ object, index, LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::AnyTagged(), JSObject::kHeaderSize,
+ kTaggedSizeLog2 - 1);
+ GOTO(done, result);
+ }
+ END_IF
+ }
+
+ if (BIND(double_field)) {
+ // If field is a Double field, either unboxed in the object on 64 bit
+ // architectures, or a mutable HeapNumber.
+ V<WordPtr> double_index = __ WordPtrShiftRightArithmetic(index, 1);
+ Label<Tagged> loaded_field(this);
+
+ // Check if field is in-object or out-of-object.
+ IF(__ IntPtrLessThan(double_index, 0)) {
+ V<Tagged> properties = __ template LoadField<Tagged>(
+ object, AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer());
+
+ V<WordPtr> out_of_object_index = __ WordPtrSub(0, double_index);
+ V<Tagged> result =
+ __ Load(properties, out_of_object_index,
+ LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::AnyTagged(),
+ FixedArray::kHeaderSize - kTaggedSize, kTaggedSizeLog2);
+ GOTO(loaded_field, result);
+ }
+ ELSE {
+ // The field is located in the {object} itself.
+ V<Tagged> result =
+ __ Load(object, double_index,
+ LoadOp::Kind::Aligned(BaseTaggedness::kTaggedBase),
+ MemoryRepresentation::AnyTagged(), JSObject::kHeaderSize,
+ kTaggedSizeLog2);
+ GOTO(loaded_field, result);
+ }
+ END_IF
+
+ if (BIND(loaded_field, field)) {
+ // We may have transitioned in-place away from double, so check that
+ // this is a HeapNumber -- otherwise the load is fine and we don't need
+ // to copy anything anyway.
+ GOTO_IF(__ ObjectIsSmi(field), done, field);
+ V<Tagged> map =
+ __ template LoadField<Tagged>(field, AccessBuilder::ForMap());
+ GOTO_IF_NOT(
+ __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
+ done, field);
+
+ V<Float64> value = __ template LoadField<Float64>(
+ field, AccessBuilder::ForHeapNumberValue());
+ GOTO(done, AllocateHeapNumberWithValue(value));
+ }
+ }
+
+ BIND(done, result);
+ return result;
+ }
+
+ OpIndex ReduceBigIntBinop(V<Tagged> left, V<Tagged> right,
+ OpIndex frame_state, BigIntBinopOp::Kind kind) {
+ const Builtin builtin = GetBuiltinForBigIntBinop(kind);
+ switch (kind) {
+ case BigIntBinopOp::Kind::kAdd:
+ case BigIntBinopOp::Kind::kSub:
+ case BigIntBinopOp::Kind::kBitwiseAnd:
+ case BigIntBinopOp::Kind::kBitwiseXor:
+ case BigIntBinopOp::Kind::kShiftLeft:
+ case BigIntBinopOp::Kind::kShiftRightArithmetic: {
+ V<Tagged> result = CallBuiltinForBigIntOp(builtin, {left, right});
+
+ // Check for exception sentinel: Smi 0 is returned to signal
+ // BigIntTooBig.
+ __ DeoptimizeIf(__ ObjectIsSmi(result), frame_state,
+ DeoptimizeReason::kBigIntTooBig, FeedbackSource{});
+ return result;
+ }
+ case BigIntBinopOp::Kind::kMul:
+ case BigIntBinopOp::Kind::kDiv:
+ case BigIntBinopOp::Kind::kMod: {
+ V<Tagged> result = CallBuiltinForBigIntOp(builtin, {left, right});
+
+ // Check for exception sentinel: Smi 1 is returned to signal
+ // TerminationRequested.
+ IF_UNLIKELY(__ TaggedEqual(result, __ SmiTag(1))) {
+ __ CallRuntime_TerminateExecution(isolate_, frame_state,
+ __ NoContextConstant());
+ }
+ END_IF
+
+ // Check for exception sentinel: Smi 0 is returned to signal
+ // BigIntTooBig or DivisionByZero.
+ __ DeoptimizeIf(__ ObjectIsSmi(result), frame_state,
+ kind == BigIntBinopOp::Kind::kMul
+ ? DeoptimizeReason::kBigIntTooBig
+ : DeoptimizeReason::kDivisionByZero,
+ FeedbackSource{});
+ return result;
+ }
+ case BigIntBinopOp::Kind::kBitwiseOr: {
+ return CallBuiltinForBigIntOp(builtin, {left, right});
+ }
+ default:
+ UNIMPLEMENTED();
+ }
+ UNREACHABLE();
+ }
+
+ V<Word32> ReduceBigIntEqual(V<Tagged> left, V<Tagged> right) {
+ return CallBuiltinForBigIntOp(Builtin::kBigIntEqual, {left, right});
+ }
+
+ V<Word32> ReduceBigIntComparison(V<Tagged> left, V<Tagged> right,
+ BigIntComparisonOp::Kind kind) {
+ if (kind == BigIntComparisonOp::Kind::kLessThan) {
+ return CallBuiltinForBigIntOp(Builtin::kBigIntLessThan, {left, right});
+ } else {
+ DCHECK_EQ(kind, BigIntComparisonOp::Kind::kLessThanOrEqual);
+ return CallBuiltinForBigIntOp(Builtin::kBigIntLessThanOrEqual,
+ {left, right});
+ }
+ }
+
+ V<Tagged> ReduceBigIntUnary(V<Tagged> input, BigIntUnaryOp::Kind kind) {
+ DCHECK_EQ(kind, BigIntUnaryOp::Kind::kNegate);
+ return CallBuiltinForBigIntOp(Builtin::kBigIntUnaryMinus, {input});
+ }
+
+ V<Word32> ReduceStringAt(V<String> string, V<WordPtr> pos,
+ StringAtOp::Kind kind) {
+ if (kind == StringAtOp::Kind::kCharCode) {
+ Label<Word32> done(this);
+ Label<> runtime(this);
+ // We need a loop here to properly deal with indirect strings
+ // (SlicedString, ConsString and ThinString).
+ LoopLabel<String, WordPtr> loop(this);
+ GOTO(loop, string, pos);
+
+ LOOP(loop, receiver, position) {
+ V<Tagged> map = __ LoadMapField(receiver);
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+ V<Word32> representation =
+ __ Word32BitwiseAnd(instance_type, kStringRepresentationMask);
+
+ IF(__ Int32LessThanOrEqual(representation, kConsStringTag)) {
+ {
+ // if_lessthanoreq_cons
+ IF(__ Word32Equal(representation, kConsStringTag)) {
+ // if_consstring
+ V<String> second = __ template LoadField<String>(
+ receiver, AccessBuilder::ForConsStringSecond());
+ GOTO_IF_NOT_UNLIKELY(
+ __ TaggedEqual(second,
+ __ HeapConstant(factory_->empty_string())),
+ runtime);
+ V<String> first = __ template LoadField<String>(
+ receiver, AccessBuilder::ForConsStringFirst());
+ GOTO(loop, first, position);
+ }
+ ELSE {
+ // if_seqstring
+ V<Word32> onebyte = __ Word32Equal(
+ __ Word32BitwiseAnd(instance_type, kStringEncodingMask),
+ kOneByteStringTag);
+ GOTO(done, LoadFromSeqString(receiver, position, onebyte));
+ }
+ END_IF
+ }
+ }
+ ELSE {
+ // if_greaterthan_cons
+ {
+ IF(__ Word32Equal(representation, kThinStringTag)) {
+ // if_thinstring
+ V<String> actual = __ template LoadField<String>(
+ receiver, AccessBuilder::ForThinStringActual());
+ GOTO(loop, actual, position);
+ }
+ ELSE_IF(__ Word32Equal(representation, kExternalStringTag)) {
+ // if_externalstring
+ // We need to bailout to the runtime for uncached external
+ // strings.
+ GOTO_IF_UNLIKELY(__ Word32Equal(__ Word32BitwiseAnd(
+ instance_type,
+ kUncachedExternalStringMask),
+ kUncachedExternalStringTag),
+ runtime);
+
+ OpIndex data = __ LoadField(
+ receiver, AccessBuilder::ForExternalStringResourceData());
+ IF(__ Word32Equal(
+ __ Word32BitwiseAnd(instance_type, kStringEncodingMask),
+ kTwoByteStringTag)) {
+ // if_twobyte
+ constexpr uint8_t twobyte_size_log2 = 1;
+ V<Word32> value = __ Load(
+ data, position,
+ LoadOp::Kind::Aligned(BaseTaggedness::kUntaggedBase),
+ MemoryRepresentation::Uint16(), 0, twobyte_size_log2);
+ GOTO(done, value);
+ }
+ ELSE {
+ // if_onebyte
+ constexpr uint8_t onebyte_size_log2 = 0;
+ V<Word32> value = __ Load(
+ data, position,
+ LoadOp::Kind::Aligned(BaseTaggedness::kUntaggedBase),
+ MemoryRepresentation::Uint8(), 0, onebyte_size_log2);
+ GOTO(done, value);
+ }
+ END_IF
+ }
+ ELSE_IF(__ Word32Equal(representation, kSlicedStringTag)) {
+ // if_slicedstring
+ V<Tagged> offset = __ template LoadField<Tagged>(
+ receiver, AccessBuilder::ForSlicedStringOffset());
+ V<String> parent = __ template LoadField<String>(
+ receiver, AccessBuilder::ForSlicedStringParent());
+ GOTO(loop, parent,
+ __ WordPtrAdd(position,
+ __ ChangeInt32ToIntPtr(__ SmiUntag(offset))));
+ }
+ ELSE { GOTO(runtime); }
+ END_IF
+ }
+ }
+ END_IF
+
+ if (BIND(runtime)) {
+ V<Word32> value = __ SmiUntag(__ CallRuntime_StringCharCodeAt(
+ isolate_, __ NoContextConstant(), receiver, __ SmiTag(position)));
+ GOTO(done, value);
+ }
+ }
+
+ BIND(done, result);
+ return result;
+ } else {
+ DCHECK_EQ(kind, StringAtOp::Kind::kCodePoint);
+ Label<Word32> done(this);
+
+ V<Word32> first_code_unit = __ StringCharCodeAt(string, pos);
+ GOTO_IF_NOT_LIKELY(
+ __ Word32Equal(__ Word32BitwiseAnd(first_code_unit, 0xFC00), 0xD800),
+ done, first_code_unit);
+ V<WordPtr> length =
+ __ ChangeUint32ToUintPtr(__ template LoadField<WordPtr>(
+ string, AccessBuilder::ForStringLength()));
+ V<WordPtr> next_index = __ WordPtrAdd(pos, 1);
+ GOTO_IF_NOT(__ IntPtrLessThan(next_index, length), done, first_code_unit);
+
+ V<Word32> second_code_unit = __ StringCharCodeAt(string, next_index);
+ GOTO_IF_NOT(
+ __ Word32Equal(__ Word32BitwiseAnd(second_code_unit, 0xFC00), 0xDC00),
+ done, first_code_unit);
+
+ const int32_t surrogate_offset = 0x10000 - (0xD800 << 10) - 0xDC00;
+ V<Word32> value =
+ __ Word32Add(__ Word32ShiftLeft(first_code_unit, 10),
+ __ Word32Add(second_code_unit, surrogate_offset));
+ GOTO(done, value);
+
+ BIND(done, result);
+ return result;
+ }
+
+ UNREACHABLE();
+ }
+
+ V<Word32> ReduceStringLength(V<String> string) {
+ return __ template LoadField<Word32>(string,
+ AccessBuilder::ForStringLength());
+ }
+
+ V<Smi> ReduceStringIndexOf(V<String> string, V<String> search,
+ V<Smi> position) {
+ return __ CallBuiltin_StringIndexOf(isolate_, string, search, position);
+ }
+
+ V<String> ReduceStringFromCodePointAt(V<String> string, V<WordPtr> index) {
+ return __ CallBuiltin_StringFromCodePointAt(isolate_, string, index);
+ }
+
+#ifdef V8_INTL_SUPPORT
+ V<String> ReduceStringToCaseIntl(V<String> string,
+ StringToCaseIntlOp::Kind kind) {
+ if (kind == StringToCaseIntlOp::Kind::kLower) {
+ return __ CallBuiltin_StringToLowerCaseIntl(
+ isolate_, __ NoContextConstant(), string);
+ } else {
+ DCHECK_EQ(kind, StringToCaseIntlOp::Kind::kUpper);
+ return __ CallRuntime_StringToUpperCaseIntl(
+ isolate_, __ NoContextConstant(), string);
+ }
+ }
+#endif // V8_INTL_SUPPORT
+
+ V<String> ReduceStringSubstring(V<String> string, V<Word32> start,
+ V<Word32> end) {
+ V<WordPtr> s = __ ChangeInt32ToIntPtr(start);
+ V<WordPtr> e = __ ChangeInt32ToIntPtr(end);
+ return __ CallBuiltin_StringSubstring(isolate_, string, s, e);
+ }
+
+ V<Boolean> ReduceStringEqual(V<String> left, V<String> right) {
+ V<Word32> left_length =
+ __ template LoadField<Word32>(left, AccessBuilder::ForStringLength());
+ V<Word32> right_length =
+ __ template LoadField<Word32>(right, AccessBuilder::ForStringLength());
+
+ Label<Boolean> done(this);
+ IF(__ Word32Equal(left_length, right_length)) {
+ GOTO(done,
+ __ CallBuiltin_StringEqual(isolate_, left, right,
+ __ ChangeInt32ToIntPtr(left_length)));
+ }
+ ELSE { GOTO(done, __ HeapConstant(factory_->false_value())); }
+
+ BIND(done, result);
+ return result;
+ }
+
+ V<Boolean> ReduceStringComparison(V<String> left, V<String> right,
+ StringComparisonOp::Kind kind) {
+ switch (kind) {
+ case StringComparisonOp::Kind::kLessThan:
+ return __ CallBuiltin_StringLessThan(isolate_, left, right);
+ case StringComparisonOp::Kind::kLessThanOrEqual:
+ return __ CallBuiltin_StringLessThanOrEqual(isolate_, left, right);
+ }
+ }
+
+ // TODO(nicohartmann@): Remove this once ECL has been fully ported.
+ // ECL: ChangeInt64ToSmi(input) ==> MLR: __ SmiTag(input)
+ // ECL: ChangeInt32ToSmi(input) ==> MLR: __ SmiTag(input)
+ // ECL: ChangeUint32ToSmi(input) ==> MLR: __ SmiTag(input)
+ // ECL: ChangeUint64ToSmi(input) ==> MLR: __ SmiTag(input)
+ // ECL: ChangeIntPtrToSmi(input) ==> MLR: __ SmiTag(input)
+ // ECL: ChangeFloat64ToTagged(i, m) ==> MLR: __ ConvertFloat64ToNumber(i, m)
+ // ECL: ChangeSmiToIntPtr(input)
+ // ==> MLR: __ ChangeInt32ToIntPtr(__ SmiUntag(input))
+ // ECL: ChangeSmiToInt32(input) ==> MLR: __ SmiUntag(input)
+ // ECL: ChangeSmiToInt64(input) ==> MLR: __ ChangeInt32ToInt64(__
+ // SmiUntag(input))
+ // ECL: BuildCheckedHeapNumberOrOddballToFloat64 ==> MLR:
+ // ConvertHeapObjectToFloat64OrDeopt
+
+ private:
+ // TODO(nicohartmann@): Might move some of those helpers into the assembler
+ // interface.
+ // Pass {bitfield} = {digit} = OpIndex::Invalid() to construct the canonical
+ // 0n BigInt.
+ V<BigInt> AllocateBigInt(V<Word32> bitfield, V<Word64> digit) {
+ DCHECK(Is64());
+ DCHECK_EQ(bitfield.valid(), digit.valid());
+ static constexpr auto zero_bitfield =
+ BigInt::SignBits::update(BigInt::LengthBits::encode(0), false);
+
+ V<Tagged> map = __ HeapConstant(factory_->bigint_map());
+ auto bigint = V<FreshlyAllocatedBigInt>::Cast(
+ __ Allocate(__ IntPtrConstant(BigInt::SizeFor(digit.valid() ? 1 : 0)),
+ AllocationType::kYoung));
+ __ StoreField(bigint, AccessBuilder::ForMap(), map);
+ __ StoreField(
+ bigint, AccessBuilder::ForBigIntBitfield(),
+ bitfield.valid() ? bitfield : __ Word32Constant(zero_bitfield));
+
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(bigint, AccessBuilder::ForBigIntOptionalPadding(),
+ __ IntPtrConstant(0));
+ }
+ if (digit.valid()) {
+ __ StoreField(bigint, AccessBuilder::ForBigIntLeastSignificantDigit64(),
+ digit);
+ }
+ return V<BigInt>::Cast(bigint);
+ }
+
+ // TODO(nicohartmann@): Should also make this an operation and lower in
+ // TagUntagLoweringReducer.
+ V<Word32> IsSmi(V<Tagged> input) {
+ return __ Word32Equal(
+ __ Word32BitwiseAnd(V<Word32>::Cast(input),
+ static_cast<uint32_t>(kSmiTagMask)),
+ static_cast<uint32_t>(kSmiTag));
+ }
+
+ void SmiTagOrOverflow(V<Word32> input, Label<>* overflow,
+ Label<Tagged>* done) {
+ DCHECK(SmiValuesAre31Bits());
+
+ // Check for overflow at the same time that we are smi tagging.
+ // Since smi tagging shifts left by one, it's the same as adding value
+ // twice.
+ OpIndex add = __ Int32AddCheckOverflow(input, input);
+ V<Word32> check = __ Projection(add, 1, WordRepresentation::Word32());
+ GOTO_IF(check, *overflow);
+ GOTO(*done, __ SmiTag(input));
+ }
+
+ V<Tagged> AllocateHeapNumberWithValue(V<Float64> value) {
+ V<Tagged> result = __ Allocate(__ IntPtrConstant(HeapNumber::kSize),
+ AllocationType::kYoung);
+ __ StoreField(result, AccessBuilder::ForMap(),
+ __ HeapConstant(factory_->heap_number_map()));
+ __ StoreField(result, AccessBuilder::ForHeapNumberValue(), value);
+ return result;
+ }
+
+ V<Float64> ConvertHeapObjectToFloat64OrDeopt(
+ V<Tagged> heap_object, OpIndex frame_state,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind input_kind,
+ const FeedbackSource& feedback) {
+ V<Tagged> map = __ LoadMapField(heap_object);
+ V<Word32> check_number =
+ __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map()));
+ switch (input_kind) {
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kSmi:
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrString:
+ UNREACHABLE();
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumber: {
+ __ DeoptimizeIfNot(check_number, frame_state,
+ DeoptimizeReason::kNotAHeapNumber, feedback);
+ break;
+ }
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrBoolean: {
+ IF_NOT(check_number) {
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ __ DeoptimizeIfNot(
+ __ TaggedEqual(map, __ HeapConstant(factory_->boolean_map())),
+ frame_state, DeoptimizeReason::kNotANumberOrBoolean, feedback);
+ }
+ END_IF
+ break;
+ }
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrOddball: {
+ IF_NOT(check_number) {
+ // For oddballs also contain the numeric value, let us just check that
+ // we have an oddball here.
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ V<Word32> instance_type = __ template LoadField<Word32>(
+ map, AccessBuilder::ForMapInstanceType());
+ __ DeoptimizeIfNot(__ Word32Equal(instance_type, ODDBALL_TYPE),
+ frame_state,
+ DeoptimizeReason::kNotANumberOrOddball, feedback);
+ }
+ END_IF
+ break;
+ }
+ }
+ return __ template LoadField<Float64>(heap_object,
+ AccessBuilder::ForHeapNumberValue());
+ }
+
+ OpIndex LoadFromSeqString(V<Tagged> receiver, V<WordPtr> position,
+ V<Word32> onebyte) {
+ Label<Word32> done(this);
+
+ IF(onebyte) {
+ GOTO(done, __ template LoadElement<Word32>(
+ receiver, AccessBuilder::ForSeqOneByteStringCharacter(),
+ position));
+ }
+ ELSE {
+ GOTO(done, __ template LoadElement<Word32>(
+ receiver, AccessBuilder::ForSeqTwoByteStringCharacter(),
+ position));
+ }
+ END_IF
+
+ BIND(done, result);
+ return result;
+ }
+
+ // TODO(nicohartmann@): Might use the CallBuiltinDescriptors here.
+ OpIndex CallBuiltinForBigIntOp(Builtin builtin,
+ std::initializer_list<OpIndex> arguments) {
+ DCHECK_IMPLIES(builtin == Builtin::kBigIntUnaryMinus,
+ arguments.size() == 1);
+ DCHECK_IMPLIES(builtin != Builtin::kBigIntUnaryMinus,
+ arguments.size() == 2);
+ base::SmallVector<OpIndex, 4> args(arguments);
+ args.push_back(__ NoContextConstant());
+
+ Callable callable = Builtins::CallableFor(isolate_, builtin);
+ auto descriptor = Linkage::GetStubCallDescriptor(
+ __ graph_zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kFoldable | Operator::kNoThrow);
+ auto ts_descriptor = TSCallDescriptor::Create(descriptor, __ graph_zone());
+ return __ Call(__ HeapConstant(callable.code()), OpIndex::Invalid(),
+ base::VectorOf(args), ts_descriptor);
+ }
+
+ Builtin GetBuiltinForBigIntBinop(BigIntBinopOp::Kind kind) {
+ switch (kind) {
+ case BigIntBinopOp::Kind::kAdd:
+ return Builtin::kBigIntAddNoThrow;
+ case BigIntBinopOp::Kind::kSub:
+ return Builtin::kBigIntSubtractNoThrow;
+ case BigIntBinopOp::Kind::kMul:
+ return Builtin::kBigIntMultiplyNoThrow;
+ case BigIntBinopOp::Kind::kDiv:
+ return Builtin::kBigIntDivideNoThrow;
+ case BigIntBinopOp::Kind::kMod:
+ return Builtin::kBigIntModulusNoThrow;
+ case BigIntBinopOp::Kind::kBitwiseAnd:
+ return Builtin::kBigIntBitwiseAndNoThrow;
+ case BigIntBinopOp::Kind::kBitwiseOr:
+ return Builtin::kBigIntBitwiseOrNoThrow;
+ case BigIntBinopOp::Kind::kBitwiseXor:
+ return Builtin::kBigIntBitwiseXorNoThrow;
+ case BigIntBinopOp::Kind::kShiftLeft:
+ return Builtin::kBigIntShiftLeftNoThrow;
+ case BigIntBinopOp::Kind::kShiftRightArithmetic:
+ return Builtin::kBigIntShiftRightNoThrow;
+ }
+ }
+
+ Factory* factory_;
+ Isolate* isolate_;
+};
+
+#include "src/compiler/turboshaft/undef-assembler-macros.inc"
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h b/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h
index da59a88431..b5539e06b3 100644
--- a/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h
+++ b/deps/v8/src/compiler/turboshaft/machine-optimization-reducer.h
@@ -20,8 +20,10 @@
#include "src/base/overflowing-math.h"
#include "src/base/template-utils.h"
#include "src/base/vector.h"
+#include "src/builtins/builtins.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/backend/instruction.h"
+#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/representations.h"
@@ -43,11 +45,24 @@ using MachineOptimizationReducerSignallingNanImpossible =
// operations that can be performed on-the-fly, without requiring type analysis
// or analyzing uses. It largely corresponds to MachineOperatorReducer in
// sea-of-nodes Turbofan.
+//
+// Additional optimizations include some of the control-flow reductions that
+// were previously done in CommonOperatorReducer, including:
+// 1- Reducing Phis, whose all inputs are the same, replace
+// them with their input.
+
template <bool signalling_nan_possible, class Next>
class MachineOptimizationReducer : public Next {
public:
using Next::Asm;
+ template <class... Args>
+ explicit MachineOptimizationReducer(const std::tuple<Args...>& args)
+ : Next(args) {}
+
+ // TODO(mslekova): Implement ReduceSelect and ReducePhi,
+ // by reducing `(f > 0) ? f : -f` to `fabs(f)`.
+
OpIndex ReduceChange(OpIndex input, ChangeOp::Kind kind,
ChangeOp::Assumption assumption,
RegisterRepresentation from, RegisterRepresentation to) {
@@ -236,9 +251,9 @@ class MachineOptimizationReducer : public Next {
case FloatUnaryOp::Kind::kExpm1:
return Asm().Float32Constant(base::ieee754::expm1(k));
case FloatUnaryOp::Kind::kSin:
- return Asm().Float32Constant(base::ieee754::sin(k));
+ return Asm().Float32Constant(SIN_IMPL(k));
case FloatUnaryOp::Kind::kCos:
- return Asm().Float32Constant(base::ieee754::cos(k));
+ return Asm().Float32Constant(COS_IMPL(k));
case FloatUnaryOp::Kind::kSinh:
return Asm().Float32Constant(base::ieee754::sinh(k));
case FloatUnaryOp::Kind::kCosh:
@@ -300,9 +315,9 @@ class MachineOptimizationReducer : public Next {
case FloatUnaryOp::Kind::kExpm1:
return Asm().Float64Constant(base::ieee754::expm1(k));
case FloatUnaryOp::Kind::kSin:
- return Asm().Float64Constant(base::ieee754::sin(k));
+ return Asm().Float64Constant(SIN_IMPL(k));
case FloatUnaryOp::Kind::kCos:
- return Asm().Float64Constant(base::ieee754::cos(k));
+ return Asm().Float64Constant(COS_IMPL(k));
case FloatUnaryOp::Kind::kSinh:
return Asm().Float64Constant(base::ieee754::sinh(k));
case FloatUnaryOp::Kind::kCosh:
@@ -528,13 +543,12 @@ class MachineOptimizationReducer : public Next {
if (Asm().MatchFloat(rhs, 0.5)) {
// lhs ** 0.5 ==> sqrt(lhs)
// (unless if lhs is -infinity)
- Block* if_neg_infinity = Asm().NewBlock(Block::Kind::kBranchTarget);
- if_neg_infinity->SetDeferred(true);
- Block* otherwise = Asm().NewBlock(Block::Kind::kBranchTarget);
- Block* merge = Asm().NewBlock(Block::Kind::kMerge);
+ Block* if_neg_infinity = Asm().NewBlock();
+ Block* otherwise = Asm().NewBlock();
+ Block* merge = Asm().NewBlock();
Asm().Branch(Asm().FloatLessThanOrEqual(
lhs, Asm().FloatConstant(-V8_INFINITY, rep), rep),
- if_neg_infinity, otherwise);
+ if_neg_infinity, otherwise, BranchHint::kFalse);
// TODO(dmercadier,tebbi): once the VariableAssembler has landed, and
// use only one AutoVariable both both {infty} and {sqrt} to avoid the
@@ -879,7 +893,8 @@ class MachineOptimizationReducer : public Next {
case WordBinopOp::Kind::kSignedDiv:
case WordBinopOp::Kind::kUnsignedDiv: {
OpIndex zero = Asm().WordConstant(0, rep);
- return Asm().Equal(Asm().Equal(left, zero, rep), zero, rep);
+ return Asm().ChangeUint32ToUintPtr(
+ Asm().Word32Equal(Asm().Equal(left, zero, rep), 0));
}
case WordBinopOp::Kind::kAdd:
case WordBinopOp::Kind::kMul:
@@ -964,13 +979,6 @@ class MachineOptimizationReducer : public Next {
}
}
- OpIndex ReduceProjection(OpIndex tuple, uint16_t index) {
- if (auto* tuple_op = Asm().template TryCast<TupleOp>(tuple)) {
- return tuple_op->input(index);
- }
- return Next::ReduceProjection(tuple, index);
- }
-
OpIndex ReduceOverflowCheckedBinop(OpIndex left, OpIndex right,
OverflowCheckedBinopOp::Kind kind,
WordRepresentation rep) {
@@ -1018,7 +1026,8 @@ class MachineOptimizationReducer : public Next {
overflow = base::bits::SignedAddOverflow64(k1, k2, &res);
break;
case OverflowCheckedBinopOp::Kind::kSignedMul:
- UNREACHABLE();
+ overflow = base::bits::SignedMulOverflow64(k1, k2, &res);
+ break;
case OverflowCheckedBinopOp::Kind::kSignedSub:
overflow = base::bits::SignedSubOverflow64(k1, k2, &res);
break;
@@ -1102,6 +1111,12 @@ class MachineOptimizationReducer : public Next {
}
break;
}
+ case RegisterRepresentation::Tagged(): {
+ // TODO(nicohartmann@): We might optimize comparison of
+ // HeapConstants here, but this requires that we are allowed to
+ // dereference handles.
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1148,6 +1163,19 @@ class MachineOptimizationReducer : public Next {
rep_w);
}
}
+ // Map 64bit to 32bit equals.
+ if (rep_w == WordRepresentation::Word64()) {
+ base::Optional<bool> left_sign_extended;
+ base::Optional<bool> right_sign_extended;
+ if (IsWord32ConvertedToWord64(left, &left_sign_extended) &&
+ IsWord32ConvertedToWord64(right, &right_sign_extended)) {
+ if (left_sign_extended == right_sign_extended) {
+ return Asm().Equal(UndoWord32ToWord64Conversion(left),
+ UndoWord32ToWord64Conversion(right),
+ WordRepresentation::Word32());
+ }
+ }
+ }
}
}
return Next::ReduceEqual(left, right, rep);
@@ -1483,9 +1511,10 @@ class MachineOptimizationReducer : public Next {
return Next::ReduceShift(left, right, kind, rep);
}
- OpIndex ReduceBranch(OpIndex condition, Block* if_true, Block* if_false) {
+ OpIndex ReduceBranch(OpIndex condition, Block* if_true, Block* if_false,
+ BranchHint hint) {
if (ShouldSkipOptimizationStep()) {
- return Next::ReduceBranch(condition, if_true, if_false);
+ return Next::ReduceBranch(condition, if_true, if_false, hint);
}
if (base::Optional<bool> decision = DecideBranchCondition(condition)) {
Asm().Goto(*decision ? if_true : if_false);
@@ -1494,10 +1523,14 @@ class MachineOptimizationReducer : public Next {
bool negated = false;
if (base::Optional<OpIndex> new_condition =
ReduceBranchCondition(condition, &negated)) {
- if (negated) std::swap(if_true, if_false);
- return Asm().ReduceBranch(new_condition.value(), if_true, if_false);
+ if (negated) {
+ std::swap(if_true, if_false);
+ hint = NegateBranchHint(hint);
+ }
+
+ return Asm().ReduceBranch(new_condition.value(), if_true, if_false, hint);
} else {
- return Next::ReduceBranch(condition, if_true, if_false);
+ return Next::ReduceBranch(condition, if_true, if_false, hint);
}
}
@@ -1525,6 +1558,62 @@ class MachineOptimizationReducer : public Next {
}
}
+ OpIndex ReduceTrapIf(OpIndex condition, bool negated, TrapId trap_id) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceTrapIf(condition, negated, trap_id);
+ }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+ if (base::Optional<bool> decision = DecideBranchCondition(condition)) {
+ if (*decision != negated) {
+ Next::ReduceTrapIf(condition, negated, trap_id);
+ Asm().Unreachable();
+ }
+ // `TrapIf` doesn't produce a value.
+ return OpIndex::Invalid();
+ }
+ if (base::Optional<OpIndex> new_condition =
+ ReduceBranchCondition(condition, &negated)) {
+ return Asm().ReduceTrapIf(new_condition.value(), negated, trap_id);
+ } else {
+ goto no_change;
+ }
+ }
+
+ OpIndex ReduceStaticAssert(OpIndex condition, const char* source) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceStaticAssert(condition, source);
+ }
+ if (base::Optional<bool> decision = DecideBranchCondition(condition)) {
+ if (decision) {
+ // Drop the assert, the condition holds true.
+ return OpIndex::Invalid();
+ } else {
+ // Leave the assert, as the condition is not true.
+ goto no_change;
+ }
+ }
+ goto no_change;
+ }
+
+ OpIndex ReduceSwitch(OpIndex input, base::Vector<const SwitchOp::Case> cases,
+ Block* default_case, BranchHint default_hint) {
+ LABEL_BLOCK(no_change) {
+ return Next::ReduceSwitch(input, cases, default_case, default_hint);
+ }
+ if (ShouldSkipOptimizationStep()) goto no_change;
+ if (int32_t value; Asm().MatchWord32Constant(input, &value)) {
+ for (const SwitchOp::Case& if_value : cases) {
+ if (if_value.value == value) {
+ Asm().Goto(if_value.destination);
+ return OpIndex::Invalid();
+ }
+ }
+ Asm().Goto(default_case);
+ return OpIndex::Invalid();
+ }
+ goto no_change;
+ }
+
OpIndex ReduceStore(OpIndex base, OpIndex index, OpIndex value,
StoreOp::Kind kind, MemoryRepresentation stored_rep,
WriteBarrierKind write_barrier, int32_t offset,
@@ -1583,6 +1672,17 @@ class MachineOptimizationReducer : public Next {
element_scale);
}
+ OpIndex ReducePhi(base::Vector<const OpIndex> inputs,
+ RegisterRepresentation rep) {
+ LABEL_BLOCK(no_change) { return Next::ReducePhi(inputs, rep); }
+ if (inputs.size() == 0) goto no_change;
+ OpIndex first = inputs.first();
+ for (const OpIndex& input : inputs) {
+ if (input != first) goto no_change;
+ }
+ return first;
+ }
+
private:
// Try to match a constant and add it to `offset`. Return `true` if
// successful.
@@ -1590,7 +1690,7 @@ class MachineOptimizationReducer : public Next {
uint8_t element_scale) {
if (!maybe_constant.Is<ConstantOp>()) return false;
const ConstantOp& constant = maybe_constant.Cast<ConstantOp>();
- if (constant.Representation() != WordRepresentation::PointerSized()) {
+ if (constant.rep != WordRepresentation::PointerSized()) {
// This can only happen in unreachable code. Ideally, we identify this
// situation and use `Asm().Unreachable()`. However, this is difficult to
// do from within this helper, so we just don't perform the reduction.
@@ -1851,7 +1951,7 @@ class MachineOptimizationReducer : public Next {
Asm().ShiftRightArithmetic(quotient, rep.bit_width() - 1, rep);
}
quotient =
- Asm().ShiftRightArithmetic(quotient, rep.bit_width() - shift, rep);
+ Asm().ShiftRightLogical(quotient, rep.bit_width() - shift, rep);
quotient = Asm().WordAdd(quotient, left, rep);
quotient = Asm().ShiftRightArithmetic(quotient, shift, rep);
return quotient;
diff --git a/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.cc b/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.cc
new file mode 100644
index 0000000000..db3731d79f
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.cc
@@ -0,0 +1,166 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/memory-optimization-reducer.h"
+
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/compiler/linkage.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+const TSCallDescriptor* CreateAllocateBuiltinDescriptor(Zone* zone) {
+ return TSCallDescriptor::Create(
+ Linkage::GetStubCallDescriptor(
+ zone, AllocateDescriptor{},
+ AllocateDescriptor{}.GetStackParameterCount(),
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow,
+ StubCallMode::kCallCodeObject),
+ zone);
+}
+
+void MemoryAnalyzer::Run() {
+ block_states[current_block] = BlockState{};
+ BlockIndex end = BlockIndex(input_graph.block_count());
+ while (current_block < end) {
+ state = *block_states[current_block];
+ auto operations_range =
+ input_graph.operations(input_graph.Get(current_block));
+ // Set the next block index here already, to allow it to be changed if
+ // needed.
+ current_block = BlockIndex(current_block.id() + 1);
+ for (const Operation& op : operations_range) {
+ Process(op);
+ }
+ }
+}
+
+void MemoryAnalyzer::Process(const Operation& op) {
+ if (ShouldSkipOperation(op)) {
+ return;
+ }
+
+ if (auto* alloc = op.TryCast<AllocateOp>()) {
+ ProcessAllocation(*alloc);
+ return;
+ }
+ if (auto* store = op.TryCast<StoreOp>()) {
+ ProcessStore(input_graph.Index(op), store->base());
+ return;
+ }
+ OpProperties properties = op.Properties();
+ if (properties.can_allocate) {
+ state = BlockState();
+ }
+ if (properties.is_block_terminator) {
+ ProcessBlockTerminator(op);
+ }
+}
+
+// Update the successor block states based on the state of the current block.
+// For loop backedges, we need to re-start the analysis from the loop header
+// unless the backedge state is unchanged.
+void MemoryAnalyzer::ProcessBlockTerminator(const Operation& op) {
+ if (auto* goto_op = op.TryCast<GotoOp>()) {
+ if (input_graph.IsLoopBackedge(*goto_op)) {
+ base::Optional<BlockState>& target_state =
+ block_states[goto_op->destination->index()];
+ BlockState old_state = *target_state;
+ MergeCurrentStateIntoSuccessor(goto_op->destination);
+ if (old_state != *target_state) {
+ // We can never fold allocations inside of the loop into an
+ // allocation before the loop, since this leads to unbounded
+ // allocation size. An unknown `reserved_size` will prevent adding
+ // allocations inside of the loop.
+ target_state->reserved_size = base::nullopt;
+ // Redo the analysis from the beginning of the loop.
+ current_block = goto_op->destination->index();
+ }
+ return;
+ } else if (goto_op->destination->IsLoop()) {
+ // Look ahead to detect allocating loops earlier, avoiding a wrong
+ // speculation resulting in processing the loop twice.
+ for (const Operation& op :
+ input_graph.operations(*goto_op->destination)) {
+ if (op.Properties().can_allocate && !ShouldSkipOperation(op)) {
+ state = BlockState();
+ break;
+ }
+ }
+ }
+ }
+ for (Block* successor : SuccessorBlocks(op)) {
+ MergeCurrentStateIntoSuccessor(successor);
+ }
+}
+
+// We try to merge the new allocation into a previous dominating allocation.
+// We also allow folding allocations across blocks, as long as there is a
+// dominating relationship.
+void MemoryAnalyzer::ProcessAllocation(const AllocateOp& alloc) {
+ if (ShouldSkipOptimizationStep()) return;
+ base::Optional<uint64_t> new_size;
+ if (auto* size =
+ input_graph.Get(alloc.size()).template TryCast<ConstantOp>()) {
+ new_size = size->integral();
+ }
+ // If the new allocation has a static size and is of the same type, then we
+ // can fold it into the previous allocation unless the folded allocation would
+ // exceed `kMaxRegularHeapObjectSize`.
+ if (state.last_allocation && new_size.has_value() &&
+ state.reserved_size.has_value() &&
+ alloc.type == state.last_allocation->type &&
+ *new_size <= kMaxRegularHeapObjectSize - *state.reserved_size) {
+ state.reserved_size =
+ static_cast<uint32_t>(*state.reserved_size + *new_size);
+ folded_into[&alloc] = state.last_allocation;
+ uint32_t& max_reserved_size = reserved_size[state.last_allocation];
+ max_reserved_size = std::max(max_reserved_size, *state.reserved_size);
+ return;
+ }
+ state.last_allocation = &alloc;
+ state.reserved_size = base::nullopt;
+ if (new_size.has_value() && *new_size <= kMaxRegularHeapObjectSize) {
+ state.reserved_size = static_cast<uint32_t>(*new_size);
+ }
+ // We might be re-visiting the current block. In this case, we need to remove
+ // an allocation that can no longer be folded.
+ reserved_size.erase(&alloc);
+ folded_into.erase(&alloc);
+}
+
+void MemoryAnalyzer::ProcessStore(OpIndex store, OpIndex object) {
+ if (SkipWriteBarrier(input_graph.Get(object))) {
+ skipped_write_barriers.insert(store);
+ } else {
+ // We might be re-visiting the current block. In this case, we need to
+ // still update the information.
+ skipped_write_barriers.erase(store);
+ }
+}
+
+void MemoryAnalyzer::MergeCurrentStateIntoSuccessor(const Block* successor) {
+ base::Optional<BlockState>& target_state = block_states[successor->index()];
+ if (!target_state.has_value()) {
+ target_state = state;
+ return;
+ }
+ // All predecessors need to have the same last allocation for us to continue
+ // folding into it.
+ if (target_state->last_allocation != state.last_allocation) {
+ target_state = BlockState();
+ return;
+ }
+ // We take the maximum allocation size of all predecessors. If the size is
+ // unknown because it is dynamic, we remember the allocation to eliminate
+ // write barriers.
+ if (target_state->reserved_size.has_value() &&
+ state.reserved_size.has_value()) {
+ target_state->reserved_size =
+ std::max(*target_state->reserved_size, *state.reserved_size);
+ } else {
+ target_state->reserved_size = base::nullopt;
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h b/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h
new file mode 100644
index 0000000000..22231fce55
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/memory-optimization-reducer.h
@@ -0,0 +1,269 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_MEMORY_OPTIMIZATION_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_MEMORY_OPTIMIZATION_REDUCER_H_
+
+#include "src/base/template-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/codegen/external-reference.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/utils.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+const TSCallDescriptor* CreateAllocateBuiltinDescriptor(Zone* zone);
+
+// The main purpose of memory optimization is folding multiple allocations into
+// one. For this, the first allocation reserves additional space, that is
+// consumed by subsequent allocations, which only move the allocation top
+// pointer and are therefore guaranteed to succeed. Another nice side-effect of
+// allocation folding is that more stores are performed on the most recent
+// allocation, which allows us to eliminate the write barrier for the store.
+//
+// This analysis works by keeping track of the most recent non-folded
+// allocation, as well as the number of bytes this allocation needs to reserve
+// to satisfy all subsequent allocations.
+// We can do write barrier elimination across loops if the loop does not contain
+// any potentially allocating operations.
+struct MemoryAnalyzer {
+ Zone* phase_zone;
+ const Graph& input_graph;
+ MemoryAnalyzer(Zone* phase_zone, const Graph& input_graph)
+ : phase_zone(phase_zone), input_graph(input_graph) {}
+
+ struct BlockState {
+ const AllocateOp* last_allocation = nullptr;
+ base::Optional<uint32_t> reserved_size = base::nullopt;
+
+ bool operator!=(const BlockState& other) {
+ return last_allocation != other.last_allocation ||
+ reserved_size != other.reserved_size;
+ }
+ };
+ FixedSidetable<base::Optional<BlockState>, BlockIndex> block_states{
+ input_graph.block_count(), phase_zone};
+ ZoneUnorderedMap<const AllocateOp*, const AllocateOp*> folded_into{
+ phase_zone};
+ ZoneUnorderedSet<OpIndex> skipped_write_barriers{phase_zone};
+ ZoneUnorderedMap<const AllocateOp*, uint32_t> reserved_size{phase_zone};
+ BlockIndex current_block = BlockIndex(0);
+ BlockState state;
+
+ bool SkipWriteBarrier(const Operation& object) {
+ if (ShouldSkipOptimizationStep()) return false;
+ if (state.last_allocation == nullptr ||
+ state.last_allocation->type != AllocationType::kYoung) {
+ return false;
+ }
+ if (state.last_allocation == &object) {
+ return true;
+ }
+ if (!object.Is<AllocateOp>()) return false;
+ auto it = folded_into.find(&object.Cast<AllocateOp>());
+ return it != folded_into.end() && it->second == state.last_allocation;
+ }
+
+ bool IsFoldedAllocation(OpIndex op) {
+ return folded_into.count(
+ input_graph.Get(op).template TryCast<AllocateOp>());
+ }
+
+ base::Optional<uint32_t> ReservedSize(OpIndex alloc) {
+ if (auto it = reserved_size.find(
+ input_graph.Get(alloc).template TryCast<AllocateOp>());
+ it != reserved_size.end()) {
+ return it->second;
+ }
+ return base::nullopt;
+ }
+
+ void Run();
+
+ void Process(const Operation& op);
+ void ProcessBlockTerminator(const Operation& op);
+ void ProcessAllocation(const AllocateOp& alloc);
+ void ProcessStore(OpIndex store, OpIndex object);
+ void MergeCurrentStateIntoSuccessor(const Block* successor);
+};
+
+struct MemoryOptimizationReducerArgs {
+ Isolate* isolate;
+};
+
+template <class Next>
+class MemoryOptimizationReducer : public Next {
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ using ArgT = base::append_tuple_type<typename Next::ArgT,
+ MemoryOptimizationReducerArgs>;
+
+ template <class... Args>
+ explicit MemoryOptimizationReducer(const std::tuple<Args...>& args)
+ : Next(args),
+ isolate_(std::get<MemoryOptimizationReducerArgs>(args).isolate) {}
+
+ void Analyze() {
+ analyzer_.emplace(Asm().phase_zone(), Asm().input_graph());
+ analyzer_->Run();
+ Next::Analyze();
+ }
+
+ OpIndex ReduceStore(OpIndex base, OpIndex index, OpIndex value,
+ StoreOp::Kind kind, MemoryRepresentation stored_rep,
+ WriteBarrierKind write_barrier, int32_t offset,
+ uint8_t element_scale) {
+ if (!ShouldSkipOptimizationStep() &&
+ analyzer_->skipped_write_barriers.count(
+ Asm().current_operation_origin())) {
+ write_barrier = WriteBarrierKind::kNoWriteBarrier;
+ }
+ return Next::ReduceStore(base, index, value, kind, stored_rep,
+ write_barrier, offset, element_scale);
+ }
+
+ OpIndex ReduceAllocate(OpIndex size, AllocationType type,
+ AllowLargeObjects allow_large_objects) {
+ DCHECK_EQ(type, any_of(AllocationType::kYoung, AllocationType::kOld));
+
+ if (v8_flags.single_generation && type == AllocationType::kYoung) {
+ type = AllocationType::kOld;
+ }
+
+ OpIndex top_address = Asm().ExternalConstant(
+ type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_top_address(isolate_)
+ : ExternalReference::old_space_allocation_top_address(isolate_));
+ Variable top =
+ Asm().NewFreshVariable(RegisterRepresentation::PointerSized());
+ Asm().Set(top, Asm().LoadOffHeap(top_address,
+ MemoryRepresentation::PointerSized()));
+
+ if (analyzer_->IsFoldedAllocation(Asm().current_operation_origin())) {
+ Asm().StoreOffHeap(top_address, Asm().PointerAdd(Asm().Get(top), size),
+ MemoryRepresentation::PointerSized());
+ return Asm().BitcastWordToTagged(Asm().PointerAdd(
+ Asm().Get(top), Asm().IntPtrConstant(kHeapObjectTag)));
+ }
+
+ OpIndex allocate_builtin;
+ if (type == AllocationType::kYoung) {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin =
+ Asm().BuiltinCode(Builtin::kAllocateInYoungGeneration, isolate_);
+ } else {
+ allocate_builtin = Asm().BuiltinCode(
+ Builtin::kAllocateRegularInYoungGeneration, isolate_);
+ }
+ } else {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin =
+ Asm().BuiltinCode(Builtin::kAllocateInOldGeneration, isolate_);
+ } else {
+ allocate_builtin = Asm().BuiltinCode(
+ Builtin::kAllocateRegularInOldGeneration, isolate_);
+ }
+ }
+
+ Block* call_runtime = Asm().NewBlock();
+ Block* done = Asm().NewBlock();
+
+ OpIndex limit_address = Asm().ExternalConstant(
+ type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_limit_address(isolate_)
+ : ExternalReference::old_space_allocation_limit_address(isolate_));
+ OpIndex limit =
+ Asm().LoadOffHeap(limit_address, MemoryRepresentation::PointerSized());
+
+ OpIndex reservation_size;
+ if (auto c = analyzer_->ReservedSize(Asm().current_operation_origin())) {
+ reservation_size = Asm().UintPtrConstant(*c);
+ } else {
+ reservation_size = size;
+ }
+ // Check if we can do bump pointer allocation here.
+ bool reachable = true;
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ reachable = Asm().GotoIfNot(
+ Asm().UintPtrLessThan(
+ size, Asm().IntPtrConstant(kMaxRegularHeapObjectSize)),
+ call_runtime, BranchHint::kTrue);
+ }
+ if (reachable) {
+ Asm().Branch(
+ Asm().UintPtrLessThan(
+ Asm().PointerAdd(Asm().Get(top), reservation_size), limit),
+ done, call_runtime, BranchHint::kTrue);
+ }
+
+ // Call the runtime if bump pointer area exhausted.
+ if (Asm().Bind(call_runtime)) {
+ OpIndex allocated = Asm().Call(allocate_builtin, {reservation_size},
+ AllocateBuiltinDescriptor());
+ Asm().Set(top, Asm().PointerSub(Asm().BitcastTaggedToWord(allocated),
+ Asm().IntPtrConstant(kHeapObjectTag)));
+ Asm().Goto(done);
+ }
+
+ Asm().BindReachable(done);
+ // Compute the new top and write it back.
+ Asm().StoreOffHeap(top_address, Asm().PointerAdd(Asm().Get(top), size),
+ MemoryRepresentation::PointerSized());
+ return Asm().BitcastWordToTagged(
+ Asm().PointerAdd(Asm().Get(top), Asm().IntPtrConstant(kHeapObjectTag)));
+ }
+
+ OpIndex ReduceDecodeExternalPointer(OpIndex handle, ExternalPointerTag tag) {
+#ifdef V8_ENABLE_SANDBOX
+ // Decode loaded external pointer.
+ //
+ // Here we access the external pointer table through an ExternalReference.
+ // Alternatively, we could also hardcode the address of the table since it
+ // is never reallocated. However, in that case we must be able to guarantee
+ // that the generated code is never executed under a different Isolate, as
+ // that would allow access to external objects from different Isolates. It
+ // also would break if the code is serialized/deserialized at some point.
+ OpIndex table_address =
+ IsSharedExternalPointerType(tag)
+ ? Asm().LoadOffHeap(
+ Asm().ExternalConstant(
+ ExternalReference::
+ shared_external_pointer_table_address_address(
+ isolate_)),
+ MemoryRepresentation::PointerSized())
+ : Asm().ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate_));
+ OpIndex table = Asm().LoadOffHeap(
+ table_address, Internals::kExternalPointerTableBufferOffset,
+ MemoryRepresentation::PointerSized());
+ OpIndex index = Asm().ShiftRightLogical(handle, kExternalPointerIndexShift,
+ WordRepresentation::Word32());
+ OpIndex pointer =
+ Asm().LoadOffHeap(table, Asm().ChangeUint32ToUint64(index), 0,
+ MemoryRepresentation::PointerSized());
+ pointer = Asm().Word64BitwiseAnd(pointer, Asm().Word64Constant(~tag));
+ return pointer;
+#else // V8_ENABLE_SANDBOX
+ UNREACHABLE();
+#endif // V8_ENABLE_SANDBOX
+ }
+
+ private:
+ base::Optional<MemoryAnalyzer> analyzer_;
+ Isolate* isolate_;
+ const TSCallDescriptor* allocate_builtin_descriptor_ = nullptr;
+
+ const TSCallDescriptor* AllocateBuiltinDescriptor() {
+ if (allocate_builtin_descriptor_ == nullptr) {
+ allocate_builtin_descriptor_ =
+ CreateAllocateBuiltinDescriptor(Asm().graph_zone());
+ }
+ return allocate_builtin_descriptor_;
+ }
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_MEMORY_OPTIMIZATION_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/operation-matching.h b/deps/v8/src/compiler/turboshaft/operation-matching.h
index 7082b8293c..36bec7663e 100644
--- a/deps/v8/src/compiler/turboshaft/operation-matching.h
+++ b/deps/v8/src/compiler/turboshaft/operation-matching.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_TURBOSHAFT_OPERATION_MATCHING_H_
#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
namespace v8 ::internal::compiler::turboshaft {
@@ -93,7 +94,7 @@ class OperationMatching {
int64_t* signed_constant = nullptr) {
const ConstantOp* op = TryCast<ConstantOp>(matched);
if (!op) return false;
- switch (op->Representation()) {
+ switch (op->rep) {
case RegisterRepresentation::Word32():
if (rep != WordRepresentation::Word32()) return false;
break;
@@ -106,8 +107,26 @@ class OperationMatching {
default:
return false;
}
- if (unsigned_constant) *unsigned_constant = op->integral();
- if (signed_constant) *signed_constant = op->signed_integral();
+ if (unsigned_constant) {
+ switch (rep.value()) {
+ case WordRepresentation::Word32():
+ *unsigned_constant = static_cast<uint32_t>(op->integral());
+ break;
+ case WordRepresentation::Word64():
+ *unsigned_constant = op->integral();
+ break;
+ }
+ }
+ if (signed_constant) {
+ switch (rep.value()) {
+ case WordRepresentation::Word32():
+ *signed_constant = static_cast<int32_t>(op->signed_integral());
+ break;
+ case WordRepresentation::Word64():
+ *signed_constant = op->signed_integral();
+ break;
+ }
+ }
return true;
}
diff --git a/deps/v8/src/compiler/turboshaft/operations.cc b/deps/v8/src/compiler/turboshaft/operations.cc
index 96d3c61c92..d9b01df05e 100644
--- a/deps/v8/src/compiler/turboshaft/operations.cc
+++ b/deps/v8/src/compiler/turboshaft/operations.cc
@@ -7,6 +7,8 @@
#include <atomic>
#include <sstream>
+#include "src/base/logging.h"
+#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/codegen/machine-type.h"
#include "src/common/globals.h"
@@ -19,6 +21,101 @@
namespace v8::internal::compiler::turboshaft {
+void Print(const Operation& op) { std::cout << op << "\n"; }
+
+bool AllowImplicitRepresentationChange(RegisterRepresentation actual_rep,
+ RegisterRepresentation expected_rep) {
+ if (actual_rep == expected_rep) {
+ return true;
+ }
+ switch (expected_rep.value()) {
+ case RegisterRepresentation::Word32():
+ // We allow implicit 64- to 32-bit truncation.
+ if (actual_rep == RegisterRepresentation::Word64()) {
+ return true;
+ }
+ // We allow implicit tagged -> untagged conversions.
+ // Even without pointer compression, we use `Word32And` for Smi-checks on
+ // tagged values.
+ if (actual_rep == any_of(RegisterRepresentation::Tagged(),
+ RegisterRepresentation::Compressed())) {
+ return true;
+ }
+ break;
+ case RegisterRepresentation::Word64():
+ // We allow implicit tagged -> untagged conversions.
+ if (kTaggedSize == kInt64Size &&
+ actual_rep == RegisterRepresentation::Tagged()) {
+ return true;
+ }
+ break;
+ case RegisterRepresentation::Tagged():
+ // We allow implicit untagged -> tagged conversions. This is only safe for
+ // Smi values.
+ if (actual_rep == RegisterRepresentation::PointerSized()) {
+ return true;
+ }
+ break;
+ case RegisterRepresentation::Compressed():
+ // Compression is a no-op.
+ if (actual_rep == any_of(RegisterRepresentation::Tagged(),
+ RegisterRepresentation::PointerSized(),
+ RegisterRepresentation::Word32())) {
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool ValidOpInputRep(
+ const Graph& graph, OpIndex input,
+ std::initializer_list<RegisterRepresentation> expected_reps,
+ base::Optional<size_t> projection_index) {
+ base::Vector<const RegisterRepresentation> input_reps =
+ graph.Get(input).outputs_rep();
+ RegisterRepresentation input_rep;
+ if (projection_index) {
+ if (*projection_index < input_reps.size()) {
+ input_rep = input_reps[*projection_index];
+ } else {
+ std::cerr << "Turboshaft operation has input #" << input
+ << " with wrong arity.\n";
+ std::cerr << "Input has results " << PrintCollection(input_reps)
+ << ", but expected at least " << *projection_index
+ << " results.\n";
+ return false;
+ }
+ } else if (input_reps.size() == 1) {
+ input_rep = input_reps[0];
+ } else {
+ std::cerr << "Turboshaft operation has input #" << input
+ << " with wrong arity.\n";
+ std::cerr << "Expected a single output but found " << input_reps.size()
+ << ".\n";
+ return false;
+ }
+ for (RegisterRepresentation expected_rep : expected_reps) {
+ if (AllowImplicitRepresentationChange(input_rep, expected_rep)) {
+ return true;
+ }
+ }
+ std::cerr << "Turboshaft operation has input #" << input
+ << " with wrong representation.\n";
+ std::cerr << "Expected " << (expected_reps.size() > 1 ? "one of " : "")
+ << PrintCollection(expected_reps).WithoutBrackets() << " but found "
+ << input_rep << ".\n";
+ return false;
+}
+
+bool ValidOpInputRep(const Graph& graph, OpIndex input,
+ RegisterRepresentation expected_rep,
+ base::Optional<size_t> projection_index) {
+ return ValidOpInputRep(graph, input, {expected_rep}, projection_index);
+}
+
const char* OpcodeName(Opcode opcode) {
#define OPCODE_NAME(Name) #Name,
const char* table[kNumberOfOpcodes] = {
@@ -219,6 +316,23 @@ std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
}
}
+std::ostream& operator<<(std::ostream& os, ChangeOrDeoptOp::Kind kind) {
+ switch (kind) {
+ case ChangeOrDeoptOp::Kind::kUint32ToInt32:
+ return os << "Uint32ToInt32";
+ case ChangeOrDeoptOp::Kind::kInt64ToInt32:
+ return os << "Int64ToInt32";
+ case ChangeOrDeoptOp::Kind::kUint64ToInt32:
+ return os << "Uint64ToInt32";
+ case ChangeOrDeoptOp::Kind::kUint64ToInt64:
+ return os << "Uint64ToInt64";
+ case ChangeOrDeoptOp::Kind::kFloat64ToInt32:
+ return os << "Float64ToInt32";
+ case ChangeOrDeoptOp::Kind::kFloat64ToInt64:
+ return os << "Float64ToInt64";
+ }
+}
+
std::ostream& operator<<(std::ostream& os, TryChangeOp::Kind kind) {
switch (kind) {
case TryChangeOp::Kind::kSignedFloatTruncateOverflowUndefined:
@@ -268,6 +382,13 @@ std::ostream& operator<<(std::ostream& os, FrameConstantOp::Kind kind) {
}
}
+std::ostream& operator<<(std::ostream& os, TagKind kind) {
+ switch (kind) {
+ case TagKind::kSmiTag:
+ return os << "SmiTag";
+ }
+}
+
void Operation::PrintInputs(std::ostream& os,
const std::string& op_index_prefix) const {
switch (opcode) {
@@ -292,7 +413,7 @@ void Operation::PrintOptions(std::ostream& os) const {
}
void PendingLoopPhiOp::PrintOptions(std::ostream& os) const {
- os << "[" << rep << ", #o" << old_backedge_index.id() << "]";
+ os << "[" << rep << ", #o" << data.old_backedge_index.id() << "]";
}
void ConstantOp::PrintOptions(std::ostream& os) const {
@@ -394,6 +515,20 @@ void StoreOp::PrintOptions(std::ostream& os) const {
os << "]";
}
+void AllocateOp::PrintOptions(std::ostream& os) const {
+ os << "[";
+ os << type << ", ";
+ os << (allow_large_objects == AllowLargeObjects::kTrue ? "allow large objects"
+ : "no large objects");
+ os << "]";
+}
+
+void DecodeExternalPointerOp::PrintOptions(std::ostream& os) const {
+ os << "[";
+ os << "tag: " << std::hex << tag << std::dec;
+ os << "]";
+}
+
void FrameStateOp::PrintOptions(std::ostream& os) const {
os << "[";
os << (inlined ? "inlined" : "not inlined");
@@ -540,6 +675,13 @@ void OverflowCheckedBinopOp::PrintOptions(std::ostream& os) const {
os << "]";
}
+std::ostream& operator<<(std::ostream& os, OpIndex idx) {
+ if (!idx.valid()) {
+ return os << "<invalid OpIndex>";
+ }
+ return os << idx.id();
+}
+
std::ostream& operator<<(std::ostream& os, BlockIndex b) {
if (!b.valid()) {
return os << "<invalid block>";
@@ -552,22 +694,16 @@ std::ostream& operator<<(std::ostream& os, const Block* b) {
}
std::ostream& operator<<(std::ostream& os, OpProperties opProperties) {
- if (opProperties == OpProperties::Pure()) {
- os << "Pure";
- } else if (opProperties == OpProperties::Reading()) {
- os << "Reading";
- } else if (opProperties == OpProperties::Writing()) {
- os << "Writing";
- } else if (opProperties == OpProperties::CanAbort()) {
- os << "CanAbort";
- } else if (opProperties == OpProperties::AnySideEffects()) {
- os << "AnySideEffects";
- } else if (opProperties == OpProperties::BlockTerminator()) {
- os << "BlockTerminator";
- } else {
- UNREACHABLE();
+#define PRINT_PROPERTY(Name, ...) \
+ if (opProperties == OpProperties::Name()) { \
+ return os << #Name; \
}
- return os;
+
+ ALL_OP_PROPERTIES(PRINT_PROPERTY)
+
+#undef PRINT_PROPERTY
+
+ UNREACHABLE();
}
void SwitchOp::PrintOptions(std::ostream& os) const {
@@ -578,13 +714,292 @@ void SwitchOp::PrintOptions(std::ostream& os) const {
os << " default: " << default_case << "]";
}
+std::ostream& operator<<(std::ostream& os, ObjectIsOp::Kind kind) {
+ switch (kind) {
+ case ObjectIsOp::Kind::kArrayBufferView:
+ return os << "ArrayBufferView";
+ case ObjectIsOp::Kind::kBigInt:
+ return os << "BigInt";
+ case ObjectIsOp::Kind::kBigInt64:
+ return os << "BigInt64";
+ case ObjectIsOp::Kind::kCallable:
+ return os << "Callable";
+ case ObjectIsOp::Kind::kConstructor:
+ return os << "Constructor";
+ case ObjectIsOp::Kind::kDetectableCallable:
+ return os << "DetectableCallable";
+ case ObjectIsOp::Kind::kInternalizedString:
+ return os << "InternalizedString";
+ case ObjectIsOp::Kind::kNonCallable:
+ return os << "NonCallable";
+ case ObjectIsOp::Kind::kNumber:
+ return os << "Number";
+ case ObjectIsOp::Kind::kReceiver:
+ return os << "Receiver";
+ case ObjectIsOp::Kind::kReceiverOrNullOrUndefined:
+ return os << "ReceiverOrNullOrUndefined";
+ case ObjectIsOp::Kind::kSmi:
+ return os << "Smi";
+ case ObjectIsOp::Kind::kString:
+ return os << "String";
+ case ObjectIsOp::Kind::kSymbol:
+ return os << "Symbol";
+ case ObjectIsOp::Kind::kUndetectable:
+ return os << "Undetectable";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os,
+ ObjectIsOp::InputAssumptions input_assumptions) {
+ switch (input_assumptions) {
+ case ObjectIsOp::InputAssumptions::kNone:
+ return os << "None";
+ case ObjectIsOp::InputAssumptions::kHeapObject:
+ return os << "HeapObject";
+ case ObjectIsOp::InputAssumptions::kBigInt:
+ return os << "BigInt";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, FloatIsOp::Kind kind) {
+ switch (kind) {
+ case FloatIsOp::Kind::kNaN:
+ return os << "NaN";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, ConvertToObjectOp::Kind kind) {
+ switch (kind) {
+ case ConvertToObjectOp::Kind::kBigInt:
+ return os << "BigInt";
+ case ConvertToObjectOp::Kind::kBoolean:
+ return os << "Boolean";
+ case ConvertToObjectOp::Kind::kHeapNumber:
+ return os << "HeapNumber";
+ case ConvertToObjectOp::Kind::kNumber:
+ return os << "Number";
+ case ConvertToObjectOp::Kind::kSmi:
+ return os << "Smi";
+ case ConvertToObjectOp::Kind::kString:
+ return os << "String";
+ }
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ ConvertToObjectOp::InputInterpretation input_interpretation) {
+ switch (input_interpretation) {
+ case ConvertToObjectOp::InputInterpretation::kSigned:
+ return os << "Signed";
+ case ConvertToObjectOp::InputInterpretation::kUnsigned:
+ return os << "Unsigned";
+ case ConvertToObjectOp::InputInterpretation::kCharCode:
+ return os << "CharCode";
+ case ConvertToObjectOp::InputInterpretation::kCodePoint:
+ return os << "CodePoint";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os,
+ ConvertToObjectOrDeoptOp::Kind kind) {
+ switch (kind) {
+ case ConvertToObjectOrDeoptOp::Kind::kSmi:
+ return os << "Smi";
+ }
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ ConvertToObjectOrDeoptOp::InputInterpretation input_interpretation) {
+ switch (input_interpretation) {
+ case ConvertToObjectOrDeoptOp::InputInterpretation::kSigned:
+ return os << "Signed";
+ case ConvertToObjectOrDeoptOp::InputInterpretation::kUnsigned:
+ return os << "Unsigned";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os,
+ ConvertObjectToPrimitiveOp::Kind kind) {
+ switch (kind) {
+ case ConvertObjectToPrimitiveOp::Kind::kInt32:
+ return os << "Int32";
+ case ConvertObjectToPrimitiveOp::Kind::kInt64:
+ return os << "Int64";
+ case ConvertObjectToPrimitiveOp::Kind::kUint32:
+ return os << "Uint32";
+ case ConvertObjectToPrimitiveOp::Kind::kBit:
+ return os << "Bit";
+ case ConvertObjectToPrimitiveOp::Kind::kFloat64:
+ return os << "Float64";
+ }
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ ConvertObjectToPrimitiveOp::InputAssumptions input_assumptions) {
+ switch (input_assumptions) {
+ case ConvertObjectToPrimitiveOp::InputAssumptions::kObject:
+ return os << "Object";
+ case ConvertObjectToPrimitiveOp::InputAssumptions::kSmi:
+ return os << "Smi";
+ case ConvertObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ }
+}
+
+std::ostream& operator<<(
+ std::ostream& os, ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind kind) {
+ switch (kind) {
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt32:
+ return os << "Int32";
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kInt64:
+ return os << "Int64";
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kFloat64:
+ return os << "Float64";
+ case ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind::kArrayIndex:
+ return os << "ArrayIndex";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind kind) {
+ switch (kind) {
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumber:
+ return os << "Number";
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kNumberOrString:
+ return os << "NumberOrString";
+ case ConvertObjectToPrimitiveOrDeoptOp::ObjectKind::kSmi:
+ return os << "Smi";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os,
+ TruncateObjectToPrimitiveOp::Kind kind) {
+ switch (kind) {
+ case TruncateObjectToPrimitiveOp::Kind::kInt32:
+ return os << "Int32";
+ case TruncateObjectToPrimitiveOp::Kind::kInt64:
+ return os << "Int64";
+ case TruncateObjectToPrimitiveOp::Kind::kBit:
+ return os << "Bit";
+ }
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ TruncateObjectToPrimitiveOp::InputAssumptions input_assumptions) {
+ switch (input_assumptions) {
+ case TruncateObjectToPrimitiveOp::InputAssumptions::kBigInt:
+ return os << "BigInt";
+ case TruncateObjectToPrimitiveOp::InputAssumptions::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ case TruncateObjectToPrimitiveOp::InputAssumptions::kHeapObject:
+ return os << "HeapObject";
+ case TruncateObjectToPrimitiveOp::InputAssumptions::kObject:
+ return os << "Object";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, NewArrayOp::Kind kind) {
+ switch (kind) {
+ case NewArrayOp::Kind::kDouble:
+ return os << "Double";
+ case NewArrayOp::Kind::kObject:
+ return os << "Object";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, DoubleArrayMinMaxOp::Kind kind) {
+ switch (kind) {
+ case DoubleArrayMinMaxOp::Kind::kMin:
+ return os << "Min";
+ case DoubleArrayMinMaxOp::Kind::kMax:
+ return os << "Max";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, BigIntBinopOp::Kind kind) {
+ switch (kind) {
+ case BigIntBinopOp::Kind::kAdd:
+ return os << "Add";
+ case BigIntBinopOp::Kind::kSub:
+ return os << "Sub";
+ case BigIntBinopOp::Kind::kMul:
+ return os << "Mul";
+ case BigIntBinopOp::Kind::kDiv:
+ return os << "Div";
+ case BigIntBinopOp::Kind::kMod:
+ return os << "Mod";
+ case BigIntBinopOp::Kind::kBitwiseAnd:
+ return os << "BitwiseAnd";
+ case BigIntBinopOp::Kind::kBitwiseOr:
+ return os << "BitwiseOr";
+ case BigIntBinopOp::Kind::kBitwiseXor:
+ return os << "BitwiseXor";
+ case BigIntBinopOp::Kind::kShiftLeft:
+ return os << "ShiftLeft";
+ case BigIntBinopOp::Kind::kShiftRightArithmetic:
+ return os << "ShiftRightArithmetic";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, BigIntComparisonOp::Kind kind) {
+ switch (kind) {
+ case BigIntComparisonOp::Kind::kLessThan:
+ return os << "LessThan";
+ case BigIntComparisonOp::Kind::kLessThanOrEqual:
+ return os << "LessThanOrEqual";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, BigIntUnaryOp::Kind kind) {
+ switch (kind) {
+ case BigIntUnaryOp::Kind::kNegate:
+ return os << "Negate";
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, StringAtOp::Kind kind) {
+ switch (kind) {
+ case StringAtOp::Kind::kCharCode:
+ return os << "CharCode";
+ case StringAtOp::Kind::kCodePoint:
+ return os << "CodePoint";
+ }
+}
+
+#ifdef V8_INTL_SUPPORT
+std::ostream& operator<<(std::ostream& os, StringToCaseIntlOp::Kind kind) {
+ switch (kind) {
+ case StringToCaseIntlOp::Kind::kLower:
+ return os << "Lower";
+ case StringToCaseIntlOp::Kind::kUpper:
+ return os << "Upper";
+ }
+}
+#endif // V8_INTL_SUPPORT
+
+std::ostream& operator<<(std::ostream& os, StringComparisonOp::Kind kind) {
+ switch (kind) {
+ case StringComparisonOp::Kind::kLessThan:
+ return os << "LessThan";
+ case StringComparisonOp::Kind::kLessThanOrEqual:
+ return os << "LessThanOrEqual";
+ }
+}
+
std::string Operation::ToString() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
-base::LazyMutex SupportedOperations::mutex_;
+base::LazyMutex SupportedOperations::mutex_ = LAZY_MUTEX_INITIALIZER;
SupportedOperations SupportedOperations::instance_;
bool SupportedOperations::initialized_;
diff --git a/deps/v8/src/compiler/turboshaft/operations.h b/deps/v8/src/compiler/turboshaft/operations.h
index e240c41115..8ef0f79b8e 100644
--- a/deps/v8/src/compiler/turboshaft/operations.h
+++ b/deps/v8/src/compiler/turboshaft/operations.h
@@ -17,14 +17,18 @@
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
+#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/base/vector.h"
#include "src/codegen/external-reference.h"
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/globals.h"
+#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/fast-hash.h"
+#include "src/compiler/turboshaft/index.h"
#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/types.h"
#include "src/compiler/turboshaft/utils.h"
#include "src/compiler/write-barrier-kind.h"
@@ -41,8 +45,8 @@ enum class TrapId : uint32_t;
namespace v8::internal::compiler::turboshaft {
class Block;
struct FrameStateData;
-class Variable;
class Graph;
+struct FrameStateOp;
// DEFINING NEW OPERATIONS
// =======================
@@ -65,7 +69,14 @@ class Graph;
// non-static method `Properties()` if the properties depend on the particular
// operation and not just the opcode.
+#ifdef V8_INTL_SUPPORT
+#define TURBOSHAFT_INTL_OPERATION_LIST(V) V(StringToCaseIntl)
+#else
+#define TURBOSHAFT_INTL_OPERATION_LIST(V)
+#endif // V8_INTL_SUPPORT
+
#define TURBOSHAFT_OPERATION_LIST(V) \
+ TURBOSHAFT_INTL_OPERATION_LIST(V) \
V(WordBinop) \
V(FloatBinop) \
V(OverflowCheckedBinop) \
@@ -75,6 +86,7 @@ class Graph;
V(Equal) \
V(Comparison) \
V(Change) \
+ V(ChangeOrDeopt) \
V(TryChange) \
V(Float64InsertWord32) \
V(TaggedBitcast) \
@@ -83,6 +95,8 @@ class Graph;
V(Constant) \
V(Load) \
V(Store) \
+ V(Allocate) \
+ V(DecodeExternalPointer) \
V(Retain) \
V(Parameter) \
V(OsrValue) \
@@ -90,21 +104,49 @@ class Graph;
V(StackPointerGreaterThan) \
V(StackSlot) \
V(FrameConstant) \
- V(CheckLazyDeopt) \
V(Deoptimize) \
V(DeoptimizeIf) \
V(TrapIf) \
V(Phi) \
V(FrameState) \
V(Call) \
+ V(CallAndCatchException) \
+ V(LoadException) \
V(TailCall) \
V(Unreachable) \
V(Return) \
V(Branch) \
- V(CatchException) \
V(Switch) \
V(Tuple) \
- V(Projection)
+ V(Projection) \
+ V(StaticAssert) \
+ V(CheckTurboshaftTypeOf) \
+ V(ObjectIs) \
+ V(FloatIs) \
+ V(ConvertToObject) \
+ V(ConvertToObjectOrDeopt) \
+ V(ConvertObjectToPrimitive) \
+ V(ConvertObjectToPrimitiveOrDeopt) \
+ V(TruncateObjectToPrimitive) \
+ V(Tag) \
+ V(Untag) \
+ V(NewConsString) \
+ V(NewArray) \
+ V(DoubleArrayMinMax) \
+ V(LoadFieldByIndex) \
+ V(DebugBreak) \
+ V(BigIntBinop) \
+ V(BigIntEqual) \
+ V(BigIntComparison) \
+ V(BigIntUnary) \
+ V(LoadRootRegister) \
+ V(StringAt) \
+ V(StringLength) \
+ V(StringIndexOf) \
+ V(StringFromCodePointAt) \
+ V(StringSubstring) \
+ V(StringEqual) \
+ V(StringComparison)
enum class Opcode : uint8_t {
#define ENUM_CONSTANT(Name) k##Name,
@@ -122,110 +164,17 @@ constexpr uint16_t kNumberOfOpcodes =
0 TURBOSHAFT_OPERATION_LIST(COUNT_OPCODES);
#undef COUNT_OPCODES
-// Operations are stored in possibly muliple sequential storage slots.
-using OperationStorageSlot = std::aligned_storage_t<8, 8>;
-// Operations occupy at least 2 slots, therefore we assign one id per two slots.
-constexpr size_t kSlotsPerId = 2;
-
-// `OpIndex` is an offset from the beginning of the operations buffer.
-// Compared to `Operation*`, it is more memory efficient (32bit) and stable when
-// the operations buffer is re-allocated.
-class OpIndex {
- public:
- explicit constexpr OpIndex(uint32_t offset) : offset_(offset) {
- DCHECK_EQ(offset % sizeof(OperationStorageSlot), 0);
- }
- constexpr OpIndex() : offset_(std::numeric_limits<uint32_t>::max()) {}
-
- uint32_t id() const {
- // Operations are stored at an offset that's a multiple of
- // `sizeof(OperationStorageSlot)`. In addition, an operation occupies at
- // least `kSlotsPerId` many `OperationSlot`s. Therefore, we can assign id's
- // by dividing by `kSlotsPerId`. A compact id space is important, because it
- // makes side-tables smaller.
- DCHECK_EQ(offset_ % sizeof(OperationStorageSlot), 0);
- return offset_ / sizeof(OperationStorageSlot) / kSlotsPerId;
- }
- uint32_t offset() const {
- DCHECK_EQ(offset_ % sizeof(OperationStorageSlot), 0);
- return offset_;
- }
-
- bool valid() const { return *this != Invalid(); }
-
- static constexpr OpIndex Invalid() { return OpIndex(); }
-
- // Encode a sea-of-nodes node id in the `OpIndex` type.
- // Only used for node origins that actually point to sea-of-nodes graph nodes.
- static OpIndex EncodeTurbofanNodeId(uint32_t id) {
- OpIndex result = OpIndex(id * sizeof(OperationStorageSlot));
- result.offset_ += kTurbofanNodeIdFlag;
- return result;
- }
- uint32_t DecodeTurbofanNodeId() const {
- DCHECK(IsTurbofanNodeId());
- return offset_ / sizeof(OperationStorageSlot);
- }
- bool IsTurbofanNodeId() const {
- return offset_ % sizeof(OperationStorageSlot) == kTurbofanNodeIdFlag;
- }
-
- bool operator==(OpIndex other) const { return offset_ == other.offset_; }
- bool operator!=(OpIndex other) const { return offset_ != other.offset_; }
- bool operator<(OpIndex other) const { return offset_ < other.offset_; }
- bool operator>(OpIndex other) const { return offset_ > other.offset_; }
- bool operator<=(OpIndex other) const { return offset_ <= other.offset_; }
- bool operator>=(OpIndex other) const { return offset_ >= other.offset_; }
-
- private:
- uint32_t offset_;
-
- static constexpr uint32_t kTurbofanNodeIdFlag = 1;
-};
-
-template <>
-struct fast_hash<OpIndex> {
- V8_INLINE size_t operator()(OpIndex op) { return op.id(); }
-};
-
-// `BlockIndex` is the index of a bound block.
-// A dominating block always has a smaller index.
-// It corresponds to the ordering of basic blocks in the operations buffer.
-class BlockIndex {
- public:
- explicit constexpr BlockIndex(uint32_t id) : id_(id) {}
- constexpr BlockIndex() : id_(std::numeric_limits<uint32_t>::max()) {}
-
- uint32_t id() const { return id_; }
- bool valid() const { return *this != Invalid(); }
-
- static constexpr BlockIndex Invalid() { return BlockIndex(); }
-
- bool operator==(BlockIndex other) const { return id_ == other.id_; }
- bool operator!=(BlockIndex other) const { return id_ != other.id_; }
- bool operator<(BlockIndex other) const { return id_ < other.id_; }
- bool operator>(BlockIndex other) const { return id_ > other.id_; }
- bool operator<=(BlockIndex other) const { return id_ <= other.id_; }
- bool operator>=(BlockIndex other) const { return id_ >= other.id_; }
-
- private:
- uint32_t id_;
-};
-
-template <>
-struct fast_hash<BlockIndex> {
- V8_INLINE size_t operator()(BlockIndex op) { return op.id(); }
-};
-
-std::ostream& operator<<(std::ostream& os, BlockIndex b);
-std::ostream& operator<<(std::ostream& os, const Block* b);
-
struct OpProperties {
// The operation may read memory or depend on other information beyond its
- // inputs.
+ // inputs. Generating random numbers or nondeterministic behavior counts as
+ // reading.
const bool can_read;
// The operation may write memory or have other observable side-effects.
+ // Writing to memory allocated as part of the operation does not count, since
+ // it is not observable.
const bool can_write;
+ // The operation can allocate memory on the heap, which might also trigger GC.
+ const bool can_allocate;
// The operation can abort the current execution by throwing an exception or
// deoptimizing.
const bool can_abort;
@@ -233,47 +182,50 @@ struct OpProperties {
const bool is_block_terminator;
// By being const and not being set in the constructor, these properties are
// guaranteed to be derived.
- const bool is_pure =
- !(can_read || can_write || can_abort || is_block_terminator);
+ const bool is_pure_no_allocation = !(can_read || can_write || can_allocate ||
+ can_abort || is_block_terminator);
const bool is_required_when_unused =
can_write || can_abort || is_block_terminator;
- // Nodes that don't read, write and aren't block terminators can be eliminated
- // via value numbering.
+ // Operations that don't read, write, allocate and aren't block terminators
+ // can be eliminated via value numbering, which means that if there are two
+ // identical operations where one dominates the other, then the second can be
+ // replaced with the first one. This is safe for deopting or throwing
+ // operations, because the first instance would have aborted the execution
+ // already as
+ // `!can_read` guarantees deterministic behavior.
const bool can_be_eliminated =
- !(can_read || can_write || is_block_terminator);
+ !(can_read || can_write || can_allocate || is_block_terminator);
- constexpr OpProperties(bool can_read, bool can_write, bool can_abort,
- bool is_block_terminator)
+ constexpr OpProperties(bool can_read, bool can_write, bool can_allocate,
+ bool can_abort, bool is_block_terminator)
: can_read(can_read),
can_write(can_write),
+ can_allocate(can_allocate),
can_abort(can_abort),
is_block_terminator(is_block_terminator) {}
- static constexpr OpProperties Pure() { return {false, false, false, false}; }
- static constexpr OpProperties Reading() {
- return {true, false, false, false};
- }
- static constexpr OpProperties Writing() {
- return {false, true, false, false};
- }
- static constexpr OpProperties CanAbort() {
- return {false, false, true, false};
- }
- static constexpr OpProperties AnySideEffects() {
- return {true, true, true, false};
- }
- static constexpr OpProperties BlockTerminator() {
- return {false, false, false, true};
- }
- static constexpr OpProperties BlockTerminatorWithAnySideEffect() {
- return {true, true, true, true};
- }
- static constexpr OpProperties ReadingAndCanAbort() {
- return {true, false, true, false};
- }
- static constexpr OpProperties WritingAndCanAbort() {
- return {false, true, true, false};
+#define ALL_OP_PROPERTIES(V) \
+ V(PureNoAllocation, false, false, false, false, false) \
+ V(PureMayAllocate, false, false, true, false, false) \
+ V(Reading, true, false, false, false, false) \
+ V(Writing, false, true, false, false, false) \
+ V(CanAbort, false, false, false, true, false) \
+ V(AnySideEffects, true, true, true, true, false) \
+ V(BlockTerminator, false, false, false, false, true) \
+ V(BlockTerminatorWithAnySideEffect, true, true, true, true, true) \
+ V(ReadingAndCanAbort, true, false, false, true, false) \
+ V(WritingAndCanAbort, false, true, false, true, false)
+
+#define DEFINE_OP_PROPERTY(Name, can_read, can_write, can_allocate, can_abort, \
+ is_block_terminator) \
+ static constexpr OpProperties Name() { \
+ return {can_read, can_write, can_allocate, can_abort, \
+ is_block_terminator}; \
}
+
+ ALL_OP_PROPERTIES(DEFINE_OP_PROPERTY)
+#undef DEFINE_OP_PROPERTY
+
bool operator==(const OpProperties& other) const {
return can_read == other.can_read && can_write == other.can_write &&
can_abort == other.can_abort &&
@@ -310,6 +262,8 @@ struct alignas(OpIndex) Operation {
return StorageSlotCount(opcode, input_count);
}
+ base::Vector<const RegisterRepresentation> outputs_rep() const;
+
template <class Op>
bool Is() const {
return opcode == Op::opcode;
@@ -362,9 +316,10 @@ std::ostream& operator<<(std::ostream& os, OperationPrintStyle op);
inline std::ostream& operator<<(std::ostream& os, const Operation& op) {
return os << OperationPrintStyle{op};
}
-inline void Print(const Operation& op) { std::cout << op << "\n"; }
+void Print(const Operation& op);
OperationStorageSlot* AllocateOpStorage(Graph* graph, size_t slot_count);
+const Operation& Get(const Graph& graph, OpIndex index);
// Determine if an operation declares `properties`, which means that its
// properties are static and don't depend on inputs or options.
@@ -435,6 +390,9 @@ struct OperationT : Operation {
OperationStorageSlot* ptr =
AllocateOpStorage(graph, StorageSlotCount(input_count));
Derived* result = new (ptr) Derived(std::move(args)...);
+#ifdef DEBUG
+ result->Validate(*graph);
+#endif
// If this DCHECK fails, then the number of inputs specified in the
// operation constructor and in the static New function disagree.
DCHECK_EQ(input_count, result->Operation::input_count);
@@ -489,6 +447,11 @@ struct OperationT : Operation {
PrintOptionsHelper(os, options, std::make_index_sequence<options_count>());
}
+ // Check graph invariants for this operation. Will be invoked in debug mode
+ // immediately upon construction.
+ // Concrete Operator classes are expected to re-define it.
+ void Validate(const Graph& graph) const = delete;
+
private:
template <class... T, size_t... I>
static void PrintOptionsHelper(std::ostream& os,
@@ -502,6 +465,15 @@ struct OperationT : Operation {
...);
os << "]";
}
+
+ // All Operations have to define the outputs_rep function, to which
+ // Operation::outputs_rep() will forward, based on their opcode. If you forget
+ // to define it, then Operation::outputs_rep() would forward to itself,
+ // resulting in an infinite loop. To avoid this, we define here in OperationT
+ // a private version outputs_rep (with no implementation): if an operation
+ // forgets to define outputs_rep, then Operation::outputs_rep() tries to call
+ // this private version, which fails at compile time.
+ base::Vector<const RegisterRepresentation> outputs_rep() const;
};
template <size_t InputCount, class Derived>
@@ -587,6 +559,20 @@ class SupportedOperations {
#undef DECLARE_GETTER
};
+template <RegisterRepresentation::Enum... reps>
+base::Vector<const RegisterRepresentation> RepVector() {
+ static const std::array<RegisterRepresentation, sizeof...(reps)> rep_array{
+ RegisterRepresentation{reps}...};
+ return base::VectorOf(rep_array);
+}
+
+bool ValidOpInputRep(const Graph& graph, OpIndex input,
+ std::initializer_list<RegisterRepresentation> expected_rep,
+ base::Optional<size_t> projection_index = {});
+bool ValidOpInputRep(const Graph& graph, OpIndex input,
+ RegisterRepresentation expected_rep,
+ base::Optional<size_t> projection_index = {});
+
struct WordBinopOp : FixedArityOperationT<2, WordBinopOp> {
enum class Kind : uint8_t {
kAdd,
@@ -605,7 +591,10 @@ struct WordBinopOp : FixedArityOperationT<2, WordBinopOp> {
Kind kind;
WordRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1);
+ }
OpIndex left() const { return input(0); }
OpIndex right() const { return input(1); }
@@ -670,6 +659,11 @@ struct WordBinopOp : FixedArityOperationT<2, WordBinopOp> {
WordBinopOp(OpIndex left, OpIndex right, Kind kind, WordRepresentation rep)
: Base(left, right), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), rep));
+ DCHECK(ValidOpInputRep(graph, right(), rep));
+ }
auto options() const { return std::tuple{kind, rep}; }
void PrintOptions(std::ostream& os) const;
};
@@ -689,7 +683,10 @@ struct FloatBinopOp : FixedArityOperationT<2, FloatBinopOp> {
Kind kind;
FloatRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1);
+ }
OpIndex left() const { return input(0); }
OpIndex right() const { return input(1); }
@@ -711,9 +708,13 @@ struct FloatBinopOp : FixedArityOperationT<2, FloatBinopOp> {
}
FloatBinopOp(OpIndex left, OpIndex right, Kind kind, FloatRepresentation rep)
- : Base(left, right), kind(kind), rep(rep) {
+ : Base(left, right), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
DCHECK_IMPLIES(kind == any_of(Kind::kPower, Kind::kAtan2, Kind::kMod),
rep == FloatRepresentation::Float64());
+ DCHECK(ValidOpInputRep(graph, left(), rep));
+ DCHECK(ValidOpInputRep(graph, right(), rep));
}
auto options() const { return std::tuple{kind, rep}; }
void PrintOptions(std::ostream& os) const;
@@ -729,7 +730,17 @@ struct OverflowCheckedBinopOp
Kind kind;
WordRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (rep.value()) {
+ case WordRepresentation::Word32():
+ return RepVector<RegisterRepresentation::Word32(),
+ RegisterRepresentation::Word32()>();
+ case WordRepresentation::Word64():
+ return RepVector<RegisterRepresentation::Word64(),
+ RegisterRepresentation::Word32()>();
+ }
+ }
OpIndex left() const { return input(0); }
OpIndex right() const { return input(1); }
@@ -747,6 +758,11 @@ struct OverflowCheckedBinopOp
OverflowCheckedBinopOp(OpIndex left, OpIndex right, Kind kind,
WordRepresentation rep)
: Base(left, right), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), rep));
+ DCHECK(ValidOpInputRep(graph, right(), rep));
+ }
auto options() const { return std::tuple{kind, rep}; }
void PrintOptions(std::ostream& os) const;
};
@@ -762,15 +778,21 @@ struct WordUnaryOp : FixedArityOperationT<1, WordUnaryOp> {
};
Kind kind;
WordRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1);
+ }
OpIndex input() const { return Base::input(0); }
static bool IsSupported(Kind kind, WordRepresentation rep);
explicit WordUnaryOp(OpIndex input, Kind kind, WordRepresentation rep)
- : Base(input), kind(kind), rep(rep) {
+ : Base(input), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
DCHECK(IsSupported(kind, rep));
+ DCHECK(ValidOpInputRep(graph, input(), rep));
}
auto options() const { return std::tuple{kind, rep}; }
};
@@ -808,15 +830,21 @@ struct FloatUnaryOp : FixedArityOperationT<1, FloatUnaryOp> {
};
Kind kind;
FloatRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1);
+ }
OpIndex input() const { return Base::input(0); }
static bool IsSupported(Kind kind, FloatRepresentation rep);
explicit FloatUnaryOp(OpIndex input, Kind kind, FloatRepresentation rep)
- : Base(input), kind(kind), rep(rep) {
+ : Base(input), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
DCHECK(IsSupported(kind, rep));
+ DCHECK(ValidOpInputRep(graph, input(), rep));
}
auto options() const { return std::tuple{kind, rep}; }
};
@@ -834,7 +862,10 @@ struct ShiftOp : FixedArityOperationT<2, ShiftOp> {
Kind kind;
WordRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1);
+ }
OpIndex left() const { return input(0); }
OpIndex right() const { return input(1); }
@@ -868,6 +899,11 @@ struct ShiftOp : FixedArityOperationT<2, ShiftOp> {
ShiftOp(OpIndex left, OpIndex right, Kind kind, WordRepresentation rep)
: Base(left, right), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), rep));
+ DCHECK(ValidOpInputRep(graph, right(), WordRepresentation::Word32()));
+ }
auto options() const { return std::tuple{kind, rep}; }
};
std::ostream& operator<<(std::ostream& os, ShiftOp::Kind kind);
@@ -875,17 +911,37 @@ std::ostream& operator<<(std::ostream& os, ShiftOp::Kind kind);
struct EqualOp : FixedArityOperationT<2, EqualOp> {
RegisterRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
OpIndex left() const { return input(0); }
OpIndex right() const { return input(1); }
+ bool ValidInputRep(
+ base::Vector<const RegisterRepresentation> input_reps) const;
+
EqualOp(OpIndex left, OpIndex right, RegisterRepresentation rep)
- : Base(left, right), rep(rep) {
+ : Base(left, right), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
+#ifdef DEBUG
DCHECK(rep == any_of(RegisterRepresentation::Word32(),
RegisterRepresentation::Word64(),
RegisterRepresentation::Float32(),
- RegisterRepresentation::Float64()));
+ RegisterRepresentation::Float64(),
+ RegisterRepresentation::Tagged()));
+ RegisterRepresentation input_rep = rep;
+#ifdef V8_COMPRESS_POINTERS
+ // In the presence of pointer compression, we only compare the lower 32bit.
+ if (input_rep == RegisterRepresentation::Tagged()) {
+ input_rep = RegisterRepresentation::Compressed();
+ }
+#endif // V8_COMPRESS_POINTERS
+ DCHECK(ValidOpInputRep(graph, left(), input_rep));
+ DCHECK(ValidOpInputRep(graph, right(), input_rep));
+#endif // DEBUG
}
auto options() const { return std::tuple{rep}; }
};
@@ -900,14 +956,19 @@ struct ComparisonOp : FixedArityOperationT<2, ComparisonOp> {
Kind kind;
RegisterRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
OpIndex left() const { return input(0); }
OpIndex right() const { return input(1); }
ComparisonOp(OpIndex left, OpIndex right, Kind kind,
RegisterRepresentation rep)
- : Base(left, right), kind(kind), rep(rep) {
+ : Base(left, right), kind(kind), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
DCHECK_EQ(rep, any_of(RegisterRepresentation::Word32(),
RegisterRepresentation::Word64(),
RegisterRepresentation::Float32(),
@@ -916,9 +977,21 @@ struct ComparisonOp : FixedArityOperationT<2, ComparisonOp> {
rep == any_of(RegisterRepresentation::Float32(),
RegisterRepresentation::Float64()),
kind == any_of(Kind::kSignedLessThan, Kind::kSignedLessThanOrEqual));
+ DCHECK(ValidOpInputRep(graph, left(), rep));
+ DCHECK(ValidOpInputRep(graph, right(), rep));
}
auto options() const { return std::tuple{kind, rep}; }
+ static bool IsLessThan(Kind kind) {
+ switch (kind) {
+ case Kind::kSignedLessThan:
+ case Kind::kUnsignedLessThan:
+ return true;
+ case Kind::kSignedLessThanOrEqual:
+ case Kind::kUnsignedLessThanOrEqual:
+ return false;
+ }
+ }
static bool IsSigned(Kind kind) {
switch (kind) {
case Kind::kSignedLessThan:
@@ -1036,21 +1109,92 @@ struct ChangeOp : FixedArityOperationT<1, ChangeOp> {
signalling_nan_possible);
}
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&to, 1);
+ }
OpIndex input() const { return Base::input(0); }
ChangeOp(OpIndex input, Kind kind, Assumption assumption,
RegisterRepresentation from, RegisterRepresentation to)
: Base(input), kind(kind), assumption(assumption), from(from), to(to) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), from));
+ }
auto options() const { return std::tuple{kind, assumption, from, to}; }
};
std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind);
std::ostream& operator<<(std::ostream& os, ChangeOp::Assumption assumption);
+struct ChangeOrDeoptOp : FixedArityOperationT<2, ChangeOrDeoptOp> {
+ enum class Kind : uint8_t {
+ kUint32ToInt32,
+ kInt64ToInt32,
+ kUint64ToInt32,
+ kUint64ToInt64,
+ kFloat64ToInt32,
+ kFloat64ToInt64,
+ };
+ Kind kind;
+ CheckForMinusZeroMode minus_zero_mode;
+ FeedbackSource feedback;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (kind) {
+ case Kind::kUint32ToInt32:
+ case Kind::kInt64ToInt32:
+ case Kind::kUint64ToInt32:
+ case Kind::kFloat64ToInt32:
+ return RepVector<RegisterRepresentation::Word32()>();
+ case Kind::kUint64ToInt64:
+ case Kind::kFloat64ToInt64:
+ return RepVector<RegisterRepresentation::Word64()>();
+ }
+ }
+
+ OpIndex input() const { return Base::input(0); }
+ OpIndex frame_state() const { return Base::input(1); }
+
+ ChangeOrDeoptOp(OpIndex input, OpIndex frame_state, Kind kind,
+ CheckForMinusZeroMode minus_zero_mode,
+ const FeedbackSource& feedback)
+ : Base(input, frame_state),
+ kind(kind),
+ minus_zero_mode(minus_zero_mode),
+ feedback(feedback) {}
+
+ void Validate(const Graph& graph) const {
+ switch (kind) {
+ case Kind::kUint32ToInt32:
+ DCHECK(
+ ValidOpInputRep(graph, input(), RegisterRepresentation::Word32()));
+ break;
+ case Kind::kInt64ToInt32:
+ case Kind::kUint64ToInt32:
+ case Kind::kUint64ToInt64:
+ DCHECK(
+ ValidOpInputRep(graph, input(), RegisterRepresentation::Word64()));
+ break;
+ case Kind::kFloat64ToInt32:
+ case Kind::kFloat64ToInt64:
+ DCHECK(
+ ValidOpInputRep(graph, input(), RegisterRepresentation::Float64()));
+ break;
+ }
+ DCHECK(Get(graph, frame_state()).Is<FrameStateOp>());
+ }
+ auto options() const { return std::tuple{kind, minus_zero_mode, feedback}; }
+};
+std::ostream& operator<<(std::ostream& os, ChangeOrDeoptOp::Kind kind);
+
// Perform a conversion and return a pair of the result and a bit if it was
// successful.
struct TryChangeOp : FixedArityOperationT<1, TryChangeOp> {
+ static constexpr uint32_t kSuccessValue = 1;
+ static constexpr uint32_t kFailureValue = 0;
enum class Kind : uint8_t {
// The result of the truncation is undefined if the result is out of range.
kSignedFloatTruncateOverflowUndefined,
@@ -1060,13 +1204,27 @@ struct TryChangeOp : FixedArityOperationT<1, TryChangeOp> {
FloatRepresentation from;
WordRepresentation to;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (to.value()) {
+ case WordRepresentation::Word32():
+ return RepVector<RegisterRepresentation::Word32(),
+ RegisterRepresentation::Word32()>();
+ case WordRepresentation::Word64():
+ return RepVector<RegisterRepresentation::Word64(),
+ RegisterRepresentation::Word32()>();
+ }
+ }
OpIndex input() const { return Base::input(0); }
TryChangeOp(OpIndex input, Kind kind, FloatRepresentation from,
WordRepresentation to)
: Base(input), kind(kind), from(from), to(to) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), from));
+ }
auto options() const { return std::tuple{kind, from, to}; }
};
std::ostream& operator<<(std::ostream& os, TryChangeOp::Kind kind);
@@ -1076,13 +1234,22 @@ struct Float64InsertWord32Op : FixedArityOperationT<2, Float64InsertWord32Op> {
enum class Kind { kLowHalf, kHighHalf };
Kind kind;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Float64()>();
+ }
OpIndex float64() const { return input(0); }
OpIndex word32() const { return input(1); }
Float64InsertWord32Op(OpIndex float64, OpIndex word32, Kind kind)
: Base(float64, word32), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, float64(), RegisterRepresentation::Float64()));
+ DCHECK(ValidOpInputRep(graph, word32(), RegisterRepresentation::Word32()));
+ }
auto options() const { return std::tuple{kind}; }
};
std::ostream& operator<<(std::ostream& os, Float64InsertWord32Op::Kind kind);
@@ -1093,16 +1260,24 @@ struct TaggedBitcastOp : FixedArityOperationT<1, TaggedBitcastOp> {
// Due to moving GC, converting from or to pointers doesn't commute with GC.
static constexpr OpProperties properties = OpProperties::Reading();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&to, 1);
+ }
OpIndex input() const { return Base::input(0); }
TaggedBitcastOp(OpIndex input, RegisterRepresentation from,
RegisterRepresentation to)
- : Base(input), from(from), to(to) {
+ : Base(input), from(from), to(to) {}
+
+ void Validate(const Graph& graph) const {
DCHECK((from == RegisterRepresentation::PointerSized() &&
to == RegisterRepresentation::Tagged()) ||
(from == RegisterRepresentation::Tagged() &&
- to == RegisterRepresentation::PointerSized()));
+ to == RegisterRepresentation::PointerSized()) ||
+ (from == RegisterRepresentation::Compressed() &&
+ to == RegisterRepresentation::Word32()));
+ DCHECK(ValidOpInputRep(graph, input(), from));
}
auto options() const { return std::tuple{from, to}; }
};
@@ -1110,22 +1285,28 @@ struct TaggedBitcastOp : FixedArityOperationT<1, TaggedBitcastOp> {
struct SelectOp : FixedArityOperationT<3, SelectOp> {
enum class Implementation : uint8_t { kBranch, kCMove };
- static constexpr OpProperties properties = OpProperties::Pure();
RegisterRepresentation rep;
BranchHint hint;
Implementation implem;
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&rep, 1);
+ }
+
SelectOp(OpIndex cond, OpIndex vtrue, OpIndex vfalse,
RegisterRepresentation rep, BranchHint hint, Implementation implem)
- : Base(cond, vtrue, vfalse), rep(rep), hint(hint), implem(implem) {
-#ifdef DEBUG
- if (implem == Implementation::kCMove) {
- DCHECK((rep == RegisterRepresentation::Word32() &&
- SupportedOperations::word32_select()) ||
- (rep == RegisterRepresentation::Word64() &&
- SupportedOperations::word64_select()));
- }
-#endif
+ : Base(cond, vtrue, vfalse), rep(rep), hint(hint), implem(implem) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK_IMPLIES(implem == Implementation::kCMove,
+ (rep == RegisterRepresentation::Word32() &&
+ SupportedOperations::word32_select()) ||
+ (rep == RegisterRepresentation::Word64() &&
+ SupportedOperations::word64_select()));
+ DCHECK(ValidOpInputRep(graph, cond(), RegisterRepresentation::Word32()));
+ DCHECK(ValidOpInputRep(graph, vtrue(), rep));
+ DCHECK(ValidOpInputRep(graph, vfalse(), rep));
}
OpIndex cond() const { return input(0); }
@@ -1139,39 +1320,64 @@ std::ostream& operator<<(std::ostream& os, SelectOp::Implementation kind);
struct PhiOp : OperationT<PhiOp> {
RegisterRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&rep, 1);
+ }
static constexpr size_t kLoopPhiBackEdgeIndex = 1;
explicit PhiOp(base::Vector<const OpIndex> inputs, RegisterRepresentation rep)
: Base(inputs), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
+#ifdef DEBUG
+ for (OpIndex input : inputs()) {
+ DCHECK(ValidOpInputRep(graph, input, rep));
+ }
+#endif
+ }
auto options() const { return std::tuple{rep}; }
};
// Only used when moving a loop phi to a new graph while the loop backedge has
// not been emitted yet.
struct PendingLoopPhiOp : FixedArityOperationT<1, PendingLoopPhiOp> {
- RegisterRepresentation rep;
- union {
- // Used when transforming a Turboshaft graph.
- // This is not an input because it refers to the old graph.
- OpIndex old_backedge_index = OpIndex::Invalid();
- // Used when translating from sea-of-nodes.
- Node* old_backedge_node;
+ struct PhiIndex {
+ int index;
+ };
+ struct Data {
+ union {
+ // Used when transforming a Turboshaft graph.
+ // This is not an input because it refers to the old graph.
+ OpIndex old_backedge_index = OpIndex::Invalid();
+ // Used when translating from sea-of-nodes.
+ Node* old_backedge_node;
+ // Used when building loops with the assembler macros.
+ PhiIndex phi_index;
+ };
+ explicit Data(OpIndex old_backedge_index)
+ : old_backedge_index(old_backedge_index) {}
+ explicit Data(Node* old_backedge_node)
+ : old_backedge_node(old_backedge_node) {}
+ explicit Data(PhiIndex phi_index) : phi_index(phi_index) {}
};
+ RegisterRepresentation rep;
+ Data data;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&rep, 1);
+ }
OpIndex first() const { return input(0); }
- PendingLoopPhiOp(OpIndex first, RegisterRepresentation rep,
- OpIndex old_backedge_index)
- : Base(first), rep(rep), old_backedge_index(old_backedge_index) {
- DCHECK(old_backedge_index.valid());
+ PendingLoopPhiOp(OpIndex first, RegisterRepresentation rep, Data data)
+ : Base(first), rep(rep), data(data) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, first(), rep));
}
- PendingLoopPhiOp(OpIndex first, RegisterRepresentation rep,
- Node* old_backedge_node)
- : Base(first), rep(rep), old_backedge_node(old_backedge_node) {}
std::tuple<> options() const { UNREACHABLE(); }
void PrintOptions(std::ostream& os) const;
};
@@ -1192,6 +1398,7 @@ struct ConstantOp : FixedArityOperationT<0, ConstantOp> {
};
Kind kind;
+ RegisterRepresentation rep = Representation(kind);
union Storage {
uint64_t integral;
float float32;
@@ -1206,9 +1413,12 @@ struct ConstantOp : FixedArityOperationT<0, ConstantOp> {
Storage(Handle<HeapObject> constant) : handle(constant) {}
} storage;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&rep, 1);
+ }
- RegisterRepresentation Representation() const {
+ static RegisterRepresentation Representation(Kind kind) {
switch (kind) {
case Kind::kWord32:
return RegisterRepresentation::Word32();
@@ -1232,7 +1442,9 @@ struct ConstantOp : FixedArityOperationT<0, ConstantOp> {
}
ConstantOp(Kind kind, Storage storage)
- : Base(), kind(kind), storage(storage) {
+ : Base(), kind(kind), storage(storage) {}
+
+ void Validate(const Graph& graph) const {
DCHECK_IMPLIES(
kind == Kind::kWord32,
storage.integral <= WordRepresentation::Word32().MaxUnsignedValue());
@@ -1418,6 +1630,14 @@ struct LoadOp : OperationT<LoadOp> {
// There is a Wasm trap handler for out-of-bounds accesses.
bool with_trap_handler : 1;
+ static constexpr Kind Aligned(BaseTaggedness base_is_tagged) {
+ switch (base_is_tagged) {
+ case BaseTaggedness::kTaggedBase:
+ return TaggedBase();
+ case BaseTaggedness::kUntaggedBase:
+ return RawAligned();
+ }
+ }
static constexpr Kind TaggedBase() { return Kind{true, false, false}; }
static constexpr Kind RawAligned() { return Kind{false, false, false}; }
static constexpr Kind RawUnaligned() { return Kind{false, true, false}; }
@@ -1439,6 +1659,9 @@ struct LoadOp : OperationT<LoadOp> {
return kind.with_trap_handler ? OpProperties::ReadingAndCanAbort()
: OpProperties::Reading();
}
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&result_rep, 1);
+ }
OpIndex base() const { return input(0); }
OpIndex index() const {
@@ -1454,12 +1677,27 @@ struct LoadOp : OperationT<LoadOp> {
result_rep(result_rep),
element_size_log2(element_size_log2),
offset(offset) {
+ input(0) = base;
+ if (index.valid()) {
+ input(1) = index;
+ }
+ }
+
+ void Validate(const Graph& graph) const {
DCHECK(loaded_rep.ToRegisterRepresentation() == result_rep ||
(loaded_rep.IsTagged() &&
result_rep == RegisterRepresentation::Compressed()));
- DCHECK_IMPLIES(element_size_log2 > 0, index.valid());
- input(0) = base;
- if (index.valid()) input(1) = index;
+ DCHECK_IMPLIES(element_size_log2 > 0, index().valid());
+ DCHECK(
+ kind.tagged_base
+ ? ValidOpInputRep(graph, base(), RegisterRepresentation::Tagged())
+ : ValidOpInputRep(graph, base(),
+ {RegisterRepresentation::PointerSized(),
+ RegisterRepresentation::Tagged()}));
+ if (index().valid()) {
+ DCHECK(ValidOpInputRep(graph, index(),
+ RegisterRepresentation::PointerSized()));
+ }
}
static LoadOp& New(Graph* graph, OpIndex base, OpIndex index, Kind kind,
MemoryRepresentation loaded_rep,
@@ -1496,6 +1734,7 @@ struct StoreOp : OperationT<StoreOp> {
return kind.with_trap_handler ? OpProperties::WritingAndCanAbort()
: OpProperties::Writing();
}
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex base() const { return input(0); }
OpIndex value() const { return input(1); }
@@ -1512,10 +1751,27 @@ struct StoreOp : OperationT<StoreOp> {
write_barrier(write_barrier),
element_size_log2(element_size_log2),
offset(offset) {
- DCHECK_IMPLIES(element_size_log2 > 0, index.valid());
input(0) = base;
input(1) = value;
- if (index.valid()) input(2) = index;
+ if (index.valid()) {
+ input(2) = index;
+ }
+ }
+
+ void Validate(const Graph& graph) const {
+ DCHECK_IMPLIES(element_size_log2 > 0, index().valid());
+ DCHECK(
+ kind.tagged_base
+ ? ValidOpInputRep(graph, base(), RegisterRepresentation::Tagged())
+ : ValidOpInputRep(graph, base(),
+ {RegisterRepresentation::PointerSized(),
+ RegisterRepresentation::Tagged()}));
+ DCHECK(ValidOpInputRep(graph, value(),
+ stored_rep.ToRegisterRepresentationForStore()));
+ if (index().valid()) {
+ DCHECK(ValidOpInputRep(graph, index(),
+ RegisterRepresentation::PointerSized()));
+ }
}
static StoreOp& New(Graph* graph, OpIndex base, OpIndex index, OpIndex value,
Kind kind, MemoryRepresentation stored_rep,
@@ -1533,6 +1789,51 @@ struct StoreOp : OperationT<StoreOp> {
}
};
+struct AllocateOp : FixedArityOperationT<1, AllocateOp> {
+ AllocationType type;
+ AllowLargeObjects allow_large_objects;
+
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex size() const { return input(0); }
+
+ AllocateOp(OpIndex size, AllocationType type,
+ AllowLargeObjects allow_large_objects)
+ : Base(size), type(type), allow_large_objects(allow_large_objects) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, size(), RegisterRepresentation::PointerSized()));
+ }
+ void PrintOptions(std::ostream& os) const;
+ auto options() const { return std::tuple{type, allow_large_objects}; }
+};
+
+struct DecodeExternalPointerOp
+ : FixedArityOperationT<1, DecodeExternalPointerOp> {
+ ExternalPointerTag tag;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::PointerSized()>();
+ }
+
+ OpIndex handle() const { return input(0); }
+
+ DecodeExternalPointerOp(OpIndex handle, ExternalPointerTag tag)
+ : Base(handle), tag(tag) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ DCHECK(ValidOpInputRep(graph, handle(), RegisterRepresentation::Word32()));
+ }
+ void PrintOptions(std::ostream& os) const;
+ auto options() const { return std::tuple{tag}; }
+};
+
// Retain a HeapObject to prevent it from being garbage collected too early.
struct RetainOp : FixedArityOperationT<1, RetainOp> {
OpIndex retained() const { return input(0); }
@@ -1541,8 +1842,14 @@ struct RetainOp : FixedArityOperationT<1, RetainOp> {
// this must not be reordered with operations reading from the heap, we mark
// it as writing to prevent such reorderings.
static constexpr OpProperties properties = OpProperties::Writing();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
explicit RetainOp(OpIndex retained) : Base(retained) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, retained(), RegisterRepresentation::Tagged()));
+ }
auto options() const { return std::tuple{}; }
};
@@ -1551,11 +1858,19 @@ struct StackPointerGreaterThanOp
StackCheckKind kind;
static constexpr OpProperties properties = OpProperties::Reading();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
OpIndex stack_limit() const { return input(0); }
StackPointerGreaterThanOp(OpIndex stack_limit, StackCheckKind kind)
: Base(stack_limit), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, stack_limit(),
+ RegisterRepresentation::PointerSized()));
+ }
auto options() const { return std::tuple{kind}; }
};
@@ -1567,8 +1882,12 @@ struct StackSlotOp : FixedArityOperationT<0, StackSlotOp> {
int alignment;
static constexpr OpProperties properties = OpProperties::Writing();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::PointerSized()>();
+ }
StackSlotOp(int size, int alignment) : size(size), alignment(alignment) {}
+ void Validate(const Graph& graph) const {}
auto options() const { return std::tuple{size, alignment}; }
};
@@ -1579,9 +1898,19 @@ struct FrameConstantOp : FixedArityOperationT<0, FrameConstantOp> {
enum class Kind { kStackCheckOffset, kFramePointer, kParentFramePointer };
Kind kind;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (kind) {
+ case Kind::kStackCheckOffset:
+ return RepVector<RegisterRepresentation::Tagged()>();
+ case Kind::kFramePointer:
+ case Kind::kParentFramePointer:
+ return RepVector<RegisterRepresentation::PointerSized()>();
+ }
+ }
explicit FrameConstantOp(Kind kind) : Base(), kind(kind) {}
+ void Validate(const Graph& graph) const {}
auto options() const { return std::tuple{kind}; }
};
std::ostream& operator<<(std::ostream& os, FrameConstantOp::Kind kind);
@@ -1590,7 +1919,8 @@ struct FrameStateOp : OperationT<FrameStateOp> {
bool inlined;
const FrameStateData* data;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex parent_frame_state() const {
DCHECK(inlined);
@@ -1601,37 +1931,44 @@ struct FrameStateOp : OperationT<FrameStateOp> {
if (inlined) result += 1;
return result;
}
+ uint16_t state_values_count() const {
+ DCHECK_EQ(input_count - inlined, state_values().size());
+ return input_count - inlined;
+ }
+ const OpIndex state_value(size_t idx) const { return state_values()[idx]; }
+
+ RegisterRepresentation state_value_rep(size_t idx) const {
+ return RegisterRepresentation::FromMachineRepresentation(
+ data->machine_types[idx].representation());
+ }
FrameStateOp(base::Vector<const OpIndex> inputs, bool inlined,
const FrameStateData* data)
: Base(inputs), inlined(inlined), data(data) {}
+
+ void Validate(const Graph& graph) const {
+ if (inlined) {
+ DCHECK(Get(graph, parent_frame_state()).Is<FrameStateOp>());
+ }
+ // TODO(tebbi): Check frame state inputs using `FrameStateData`.
+ }
void PrintOptions(std::ostream& os) const;
auto options() const { return std::tuple{inlined, data}; }
};
-// CheckLazyDeoptOp should always immediately follow a call.
-// Semantically, it deopts if the current code object has been
-// deoptimized. But this might also be implemented differently.
-struct CheckLazyDeoptOp : FixedArityOperationT<2, CheckLazyDeoptOp> {
- static constexpr OpProperties properties = OpProperties::CanAbort();
-
- OpIndex call() const { return input(0); }
- OpIndex frame_state() const { return input(1); }
-
- CheckLazyDeoptOp(OpIndex call, OpIndex frame_state)
- : Base(call, frame_state) {}
- auto options() const { return std::tuple{}; }
-};
-
struct DeoptimizeOp : FixedArityOperationT<1, DeoptimizeOp> {
const DeoptimizeParameters* parameters;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex frame_state() const { return input(0); }
DeoptimizeOp(OpIndex frame_state, const DeoptimizeParameters* parameters)
: Base(frame_state), parameters(parameters) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(Get(graph, frame_state()).Is<FrameStateOp>());
+ }
auto options() const { return std::tuple{parameters}; }
};
@@ -1640,6 +1977,7 @@ struct DeoptimizeIfOp : FixedArityOperationT<2, DeoptimizeIfOp> {
const DeoptimizeParameters* parameters;
static constexpr OpProperties properties = OpProperties::CanAbort();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex condition() const { return input(0); }
OpIndex frame_state() const { return input(1); }
@@ -1649,6 +1987,11 @@ struct DeoptimizeIfOp : FixedArityOperationT<2, DeoptimizeIfOp> {
: Base(condition, frame_state),
negated(negated),
parameters(parameters) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, condition(), RegisterRepresentation::Word32()));
+ }
auto options() const { return std::tuple{negated, parameters}; }
};
@@ -1657,66 +2000,223 @@ struct TrapIfOp : FixedArityOperationT<1, TrapIfOp> {
const TrapId trap_id;
static constexpr OpProperties properties = OpProperties::CanAbort();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex condition() const { return input(0); }
TrapIfOp(OpIndex condition, bool negated, const TrapId trap_id)
: Base(condition), negated(negated), trap_id(trap_id) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, condition(), RegisterRepresentation::Word32()));
+ }
auto options() const { return std::tuple{negated, trap_id}; }
};
+struct StaticAssertOp : FixedArityOperationT<1, StaticAssertOp> {
+ static constexpr OpProperties properties = OpProperties::CanAbort();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
+ const char* source;
+
+ OpIndex condition() const { return Base::input(0); }
+
+ StaticAssertOp(OpIndex condition, const char* source)
+ : Base(condition), source(source) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, condition(), RegisterRepresentation::Word32()));
+ }
+ auto options() const { return std::tuple{source}; }
+};
+
struct ParameterOp : FixedArityOperationT<0, ParameterOp> {
int32_t parameter_index;
+ RegisterRepresentation rep;
const char* debug_name;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return {&rep, 1};
+ }
- explicit ParameterOp(int32_t parameter_index, const char* debug_name = "")
- : Base(), parameter_index(parameter_index), debug_name(debug_name) {}
- auto options() const { return std::tuple{parameter_index, debug_name}; }
+ explicit ParameterOp(int32_t parameter_index, RegisterRepresentation rep,
+ const char* debug_name = "")
+ : Base(),
+ parameter_index(parameter_index),
+ rep(rep),
+ debug_name(debug_name) {}
+ void Validate(const Graph& graph) const {}
+ auto options() const { return std::tuple{parameter_index, rep, debug_name}; }
void PrintOptions(std::ostream& os) const;
};
struct OsrValueOp : FixedArityOperationT<0, OsrValueOp> {
int32_t index;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
explicit OsrValueOp(int32_t index) : Base(), index(index) {}
+ void Validate(const Graph& graph) const {}
auto options() const { return std::tuple{index}; }
};
-struct CallOp : OperationT<CallOp> {
+struct TSCallDescriptor : public NON_EXPORTED_BASE(ZoneObject) {
const CallDescriptor* descriptor;
+ base::Vector<const RegisterRepresentation> out_reps;
+
+ TSCallDescriptor(const CallDescriptor* descriptor,
+ base::Vector<const RegisterRepresentation> out_reps)
+ : descriptor(descriptor), out_reps(out_reps) {}
+
+ static const TSCallDescriptor* Create(const CallDescriptor* descriptor,
+ Zone* graph_zone) {
+ base::Vector<RegisterRepresentation> out_reps =
+ graph_zone->NewVector<RegisterRepresentation>(
+ descriptor->ReturnCount());
+ for (size_t i = 0; i < descriptor->ReturnCount(); ++i) {
+ out_reps[i] = RegisterRepresentation::FromMachineRepresentation(
+ descriptor->GetReturnType(i).representation());
+ }
+ return graph_zone->New<TSCallDescriptor>(descriptor, out_reps);
+ }
+};
+
+struct CallOp : OperationT<CallOp> {
+ const TSCallDescriptor* descriptor;
static constexpr OpProperties properties = OpProperties::AnySideEffects();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return descriptor->out_reps;
+ }
+
+ bool HasFrameState() const {
+ return descriptor->descriptor->NeedsFrameState();
+ }
OpIndex callee() const { return input(0); }
+ OpIndex frame_state() const {
+ return HasFrameState() ? input(1) : OpIndex::Invalid();
+ }
base::Vector<const OpIndex> arguments() const {
- return inputs().SubVector(1, input_count);
+ return inputs().SubVector(1 + HasFrameState(), input_count);
}
- CallOp(OpIndex callee, base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor)
- : Base(1 + arguments.size()), descriptor(descriptor) {
+ CallOp(OpIndex callee, OpIndex frame_state,
+ base::Vector<const OpIndex> arguments,
+ const TSCallDescriptor* descriptor)
+ : Base(1 + frame_state.valid() + arguments.size()),
+ descriptor(descriptor) {
base::Vector<OpIndex> inputs = this->inputs();
inputs[0] = callee;
- inputs.SubVector(1, inputs.size()).OverwriteWith(arguments);
+ if (frame_state.valid()) {
+ inputs[1] = frame_state;
+ }
+ inputs.SubVector(1 + frame_state.valid(), inputs.size())
+ .OverwriteWith(arguments);
}
- static CallOp& New(Graph* graph, OpIndex callee,
+ void Validate(const Graph& graph) const {
+ if (frame_state().valid()) {
+ DCHECK(Get(graph, frame_state()).Is<FrameStateOp>());
+ }
+ // TODO(tebbi): Check call inputs based on `TSCallDescriptor`.
+ }
+
+ static CallOp& New(Graph* graph, OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor) {
- return Base::New(graph, 1 + arguments.size(), callee, arguments,
- descriptor);
+ const TSCallDescriptor* descriptor) {
+ return Base::New(graph, 1 + frame_state.valid() + arguments.size(), callee,
+ frame_state, arguments, descriptor);
}
auto options() const { return std::tuple{descriptor}; }
};
+struct CallAndCatchExceptionOp : OperationT<CallAndCatchExceptionOp> {
+ const TSCallDescriptor* descriptor;
+ Block* if_success;
+ Block* if_exception;
+
+ static constexpr OpProperties properties =
+ OpProperties::BlockTerminatorWithAnySideEffect();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return descriptor->out_reps;
+ }
+
+ bool HasFrameState() const {
+ return descriptor->descriptor->NeedsFrameState();
+ }
+
+ OpIndex callee() const { return input(0); }
+ OpIndex frame_state() const {
+ return HasFrameState() ? input(1) : OpIndex::Invalid();
+ }
+ base::Vector<const OpIndex> arguments() const {
+ return inputs().SubVector(1 + HasFrameState(), input_count);
+ }
+
+ CallAndCatchExceptionOp(OpIndex callee, OpIndex frame_state,
+ base::Vector<const OpIndex> arguments,
+ Block* if_success, Block* if_exception,
+ const TSCallDescriptor* descriptor)
+ : Base(1 + frame_state.valid() + arguments.size()),
+ descriptor(descriptor),
+ if_success(if_success),
+ if_exception(if_exception) {
+ base::Vector<OpIndex> inputs = this->inputs();
+ inputs[0] = callee;
+ if (frame_state.valid()) {
+ inputs[1] = frame_state;
+ }
+ inputs.SubVector(1 + frame_state.valid(), inputs.size())
+ .OverwriteWith(arguments);
+ }
+
+ void Validate(const Graph& graph) const {
+ if (frame_state().valid()) {
+ DCHECK(Get(graph, frame_state()).Is<FrameStateOp>());
+ }
+ }
+
+ static CallAndCatchExceptionOp& New(Graph* graph, OpIndex callee,
+ OpIndex frame_state,
+ base::Vector<const OpIndex> arguments,
+ Block* if_success, Block* if_exception,
+ const TSCallDescriptor* descriptor) {
+ return Base::New(graph, 1 + frame_state.valid() + arguments.size(), callee,
+ frame_state, arguments, if_success, if_exception,
+ descriptor);
+ }
+
+ auto options() const {
+ return std::tuple{descriptor, if_success, if_exception};
+ }
+};
+
+struct LoadExceptionOp : FixedArityOperationT<0, LoadExceptionOp> {
+ static constexpr OpProperties properties = OpProperties::Reading();
+
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ LoadExceptionOp() : Base() {}
+ void Validate(const Graph& graph) const {}
+
+ auto options() const { return std::tuple{}; }
+};
+
struct TailCallOp : OperationT<TailCallOp> {
- const CallDescriptor* descriptor;
+ const TSCallDescriptor* descriptor;
static constexpr OpProperties properties =
OpProperties::BlockTerminatorWithAnySideEffect();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return descriptor->out_reps;
+ }
OpIndex callee() const { return input(0); }
base::Vector<const OpIndex> arguments() const {
@@ -1724,15 +2224,16 @@ struct TailCallOp : OperationT<TailCallOp> {
}
TailCallOp(OpIndex callee, base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor)
+ const TSCallDescriptor* descriptor)
: Base(1 + arguments.size()), descriptor(descriptor) {
base::Vector<OpIndex> inputs = this->inputs();
inputs[0] = callee;
inputs.SubVector(1, inputs.size()).OverwriteWith(arguments);
}
+ void Validate(const Graph& graph) const {}
static TailCallOp& New(Graph* graph, OpIndex callee,
base::Vector<const OpIndex> arguments,
- const CallDescriptor* descriptor) {
+ const TSCallDescriptor* descriptor) {
return Base::New(graph, 1 + arguments.size(), callee, arguments,
descriptor);
}
@@ -1742,13 +2243,16 @@ struct TailCallOp : OperationT<TailCallOp> {
// Control-flow should never reach here.
struct UnreachableOp : FixedArityOperationT<0, UnreachableOp> {
static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
UnreachableOp() : Base() {}
+ void Validate(const Graph& graph) const {}
auto options() const { return std::tuple{}; }
};
struct ReturnOp : OperationT<ReturnOp> {
static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
// Number of additional stack slots to be removed.
OpIndex pop_count() const { return input(0); }
@@ -1763,6 +2267,11 @@ struct ReturnOp : OperationT<ReturnOp> {
inputs[0] = pop_count;
inputs.SubVector(1, inputs.size()).OverwriteWith(return_values);
}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, pop_count(), RegisterRepresentation::Word32()));
+ }
static ReturnOp& New(Graph* graph, OpIndex pop_count,
base::Vector<const OpIndex> return_values) {
return Base::New(graph, 1 + return_values.size(), pop_count, return_values);
@@ -1774,64 +2283,68 @@ struct GotoOp : FixedArityOperationT<0, GotoOp> {
Block* destination;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
explicit GotoOp(Block* destination) : Base(), destination(destination) {}
+ void Validate(const Graph& graph) const {}
auto options() const { return std::tuple{destination}; }
};
struct BranchOp : FixedArityOperationT<1, BranchOp> {
Block* if_true;
Block* if_false;
+ BranchHint hint;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex condition() const { return input(0); }
- BranchOp(OpIndex condition, Block* if_true, Block* if_false)
- : Base(condition), if_true(if_true), if_false(if_false) {}
- auto options() const { return std::tuple{if_true, if_false}; }
-};
-
-// `CatchExceptionOp` has to follow a `CallOp` with a subsequent
-// `CheckLazyDeoptOp`. It provides the exception value, which might only be used
-// from the `if_exception` successor.
-struct CatchExceptionOp : FixedArityOperationT<1, CatchExceptionOp> {
- Block* if_success;
- Block* if_exception;
-
- static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ BranchOp(OpIndex condition, Block* if_true, Block* if_false, BranchHint hint)
+ : Base(condition), if_true(if_true), if_false(if_false), hint(hint) {}
- OpIndex call() const { return input(0); }
-
- explicit CatchExceptionOp(OpIndex call, Block* if_success,
- Block* if_exception)
- : Base(call), if_success(if_success), if_exception(if_exception) {}
- auto options() const { return std::tuple{if_success, if_exception}; }
+ void Validate(const Graph& graph) const {
+ DCHECK(
+ ValidOpInputRep(graph, condition(), RegisterRepresentation::Word32()));
+ }
+ auto options() const { return std::tuple{if_true, if_false, hint}; }
};
struct SwitchOp : FixedArityOperationT<1, SwitchOp> {
struct Case {
int32_t value;
Block* destination;
+ BranchHint hint;
- Case(int32_t value, Block* destination)
- : value(value), destination(destination) {}
+ Case(int32_t value, Block* destination, BranchHint hint)
+ : value(value), destination(destination), hint(hint) {}
bool operator==(const Case& other) const {
- return value == other.value && destination == other.destination;
+ return value == other.value && destination == other.destination &&
+ hint == other.hint;
}
};
base::Vector<const Case> cases;
Block* default_case;
+ BranchHint default_hint;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex input() const { return Base::input(0); }
- SwitchOp(OpIndex input, base::Vector<const Case> cases, Block* default_case)
- : Base(input), cases(cases), default_case(default_case) {}
+ SwitchOp(OpIndex input, base::Vector<const Case> cases, Block* default_case,
+ BranchHint default_hint)
+ : Base(input),
+ cases(cases),
+ default_case(default_case),
+ default_hint(default_hint) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Word32()));
+ }
void PrintOptions(std::ostream& os) const;
- auto options() const { return std::tuple{cases, default_case}; }
+ auto options() const { return std::tuple{cases, default_case, default_hint}; }
};
template <>
@@ -1841,12 +2354,47 @@ struct fast_hash<SwitchOp::Case> {
}
};
+inline base::SmallVector<Block*, 4> SuccessorBlocks(const Operation& op) {
+ DCHECK(op.Properties().is_block_terminator);
+ switch (op.opcode) {
+ case Opcode::kCallAndCatchException: {
+ auto& casted = op.Cast<CallAndCatchExceptionOp>();
+ return {casted.if_success, casted.if_exception};
+ }
+ case Opcode::kGoto: {
+ auto& casted = op.Cast<GotoOp>();
+ return {casted.destination};
+ }
+ case Opcode::kBranch: {
+ auto& casted = op.Cast<BranchOp>();
+ return {casted.if_true, casted.if_false};
+ }
+ case Opcode::kReturn:
+ case Opcode::kDeoptimize:
+ case Opcode::kUnreachable:
+ return base::SmallVector<Block*, 4>{};
+ case Opcode::kSwitch: {
+ auto& casted = op.Cast<SwitchOp>();
+ base::SmallVector<Block*, 4> result;
+ for (const SwitchOp::Case& c : casted.cases) {
+ result.push_back(c.destination);
+ }
+ result.push_back(casted.default_case);
+ return result;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
// Tuples are only used to lower operations with multiple outputs.
// `TupleOp` should be folded away by subsequent `ProjectionOp`s.
struct TupleOp : OperationT<TupleOp> {
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
explicit TupleOp(base::Vector<const OpIndex> inputs) : Base(inputs) {}
+ void Validate(const Graph& graph) const {}
auto options() const { return std::tuple{}; }
};
@@ -1854,15 +2402,845 @@ struct TupleOp : OperationT<TupleOp> {
// distinguish them.
struct ProjectionOp : FixedArityOperationT<1, ProjectionOp> {
uint16_t index;
+ RegisterRepresentation rep;
- static constexpr OpProperties properties = OpProperties::Pure();
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&rep, 1);
+ }
OpIndex input() const { return Base::input(0); }
- ProjectionOp(OpIndex input, uint16_t index) : Base(input), index(index) {}
+ ProjectionOp(OpIndex input, uint16_t index, RegisterRepresentation rep)
+ : Base(input), index(index), rep(rep) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), rep, index));
+ }
auto options() const { return std::tuple{index}; }
};
+struct CheckTurboshaftTypeOfOp
+ : FixedArityOperationT<1, CheckTurboshaftTypeOfOp> {
+ RegisterRepresentation rep;
+ Type type;
+ bool successful;
+
+ static constexpr OpProperties properties = OpProperties::AnySideEffects();
+ base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
+
+ OpIndex input() const { return Base::input(0); }
+
+ CheckTurboshaftTypeOfOp(OpIndex input, RegisterRepresentation rep, Type type,
+ bool successful)
+ : Base(input), rep(rep), type(std::move(type)), successful(successful) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), rep));
+ }
+ auto options() const { return std::tuple{rep, type, successful}; }
+};
+
+struct ObjectIsOp : FixedArityOperationT<1, ObjectIsOp> {
+ enum class Kind : uint8_t {
+ kArrayBufferView,
+ kBigInt,
+ kBigInt64,
+ kCallable,
+ kConstructor,
+ kDetectableCallable,
+ kInternalizedString,
+ kNonCallable,
+ kNumber,
+ kReceiver,
+ kReceiverOrNullOrUndefined,
+ kSmi,
+ kString,
+ kSymbol,
+ kUndetectable,
+ };
+ enum class InputAssumptions : uint8_t {
+ kNone,
+ kHeapObject,
+ kBigInt,
+ };
+ Kind kind;
+ InputAssumptions input_assumptions;
+
+ static constexpr OpProperties properties = OpProperties::Reading();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ ObjectIsOp(OpIndex input, Kind kind, InputAssumptions input_assumptions)
+ : Base(input), kind(kind), input_assumptions(input_assumptions) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Tagged()));
+ }
+ auto options() const { return std::tuple{kind, input_assumptions}; }
+};
+std::ostream& operator<<(std::ostream& os, ObjectIsOp::Kind kind);
+std::ostream& operator<<(std::ostream& os,
+ ObjectIsOp::InputAssumptions input_assumptions);
+
+struct FloatIsOp : FixedArityOperationT<1, FloatIsOp> {
+ enum class Kind : uint8_t {
+ kNaN,
+ };
+ Kind kind;
+ FloatRepresentation input_rep;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ FloatIsOp(OpIndex input, Kind kind, FloatRepresentation input_rep)
+ : Base(input), kind(kind), input_rep(input_rep) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ }
+ auto options() const { return std::tuple{kind, input_rep}; }
+};
+std::ostream& operator<<(std::ostream& os, FloatIsOp::Kind kind);
+
+struct ConvertToObjectOp : FixedArityOperationT<1, ConvertToObjectOp> {
+ enum class Kind : uint8_t {
+ kBigInt,
+ kBoolean,
+ kHeapNumber,
+ kNumber,
+ kSmi,
+ kString,
+ };
+ enum class InputInterpretation : uint8_t {
+ kSigned,
+ kUnsigned,
+ kCharCode,
+ kCodePoint,
+ };
+ Kind kind;
+ RegisterRepresentation input_rep;
+ InputInterpretation input_interpretation;
+ CheckForMinusZeroMode minus_zero_mode;
+
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ ConvertToObjectOp(OpIndex input, Kind kind, RegisterRepresentation input_rep,
+ InputInterpretation input_interpretation,
+ CheckForMinusZeroMode minus_zero_mode)
+ : Base(input),
+ kind(kind),
+ input_rep(input_rep),
+ input_interpretation(input_interpretation),
+ minus_zero_mode(minus_zero_mode) {}
+
+ void Validate(const Graph& graph) const {
+ switch (kind) {
+ case Kind::kBigInt:
+ DCHECK_EQ(input_rep, RegisterRepresentation::Word64());
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ DCHECK_EQ(minus_zero_mode,
+ CheckForMinusZeroMode::kDontCheckForMinusZero);
+ break;
+ case Kind::kBoolean:
+ DCHECK_EQ(input_rep, RegisterRepresentation::Word32());
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ DCHECK_EQ(minus_zero_mode,
+ CheckForMinusZeroMode::kDontCheckForMinusZero);
+ break;
+ case Kind::kNumber:
+ case Kind::kHeapNumber:
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ DCHECK_IMPLIES(
+ minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero,
+ input_rep == RegisterRepresentation::Float64());
+ break;
+ case Kind::kSmi:
+ DCHECK_EQ(input_rep, WordRepresentation::Word32());
+ DCHECK_EQ(minus_zero_mode,
+ CheckForMinusZeroMode::kDontCheckForMinusZero);
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ break;
+ case Kind::kString:
+ DCHECK_EQ(input_rep, WordRepresentation::Word32());
+ DCHECK_EQ(input_interpretation,
+ any_of(InputInterpretation::kCharCode,
+ InputInterpretation::kCodePoint));
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ break;
+ }
+ }
+
+ auto options() const {
+ return std::tuple{kind, input_rep, input_interpretation, minus_zero_mode};
+ }
+};
+std::ostream& operator<<(std::ostream& os, ConvertToObjectOp::Kind kind);
+
+struct ConvertToObjectOrDeoptOp
+ : FixedArityOperationT<2, ConvertToObjectOrDeoptOp> {
+ enum class Kind : uint8_t {
+ kSmi,
+ };
+ enum class InputInterpretation : uint8_t {
+ kSigned,
+ kUnsigned,
+ };
+ Kind kind;
+ RegisterRepresentation input_rep;
+ InputInterpretation input_interpretation;
+ FeedbackSource feedback;
+
+ static constexpr OpProperties properties = OpProperties::CanAbort();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex input() const { return Base::input(0); }
+ OpIndex frame_state() const { return Base::input(1); }
+
+ ConvertToObjectOrDeoptOp(OpIndex input, OpIndex frame_state, Kind kind,
+ RegisterRepresentation input_rep,
+ InputInterpretation input_interpretation,
+ const FeedbackSource& feedback)
+ : Base(input, frame_state),
+ kind(kind),
+ input_rep(input_rep),
+ input_interpretation(input_interpretation),
+ feedback(feedback) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), input_rep));
+ }
+
+ auto options() const {
+ return std::tuple{kind, input_rep, input_interpretation, feedback};
+ }
+};
+std::ostream& operator<<(std::ostream& os, ConvertToObjectOrDeoptOp::Kind kind);
+std::ostream& operator<<(
+ std::ostream& os,
+ ConvertToObjectOrDeoptOp::InputInterpretation input_interpretation);
+
+struct ConvertObjectToPrimitiveOp
+ : FixedArityOperationT<1, ConvertObjectToPrimitiveOp> {
+ enum class Kind : uint8_t {
+ kInt32,
+ kInt64,
+ kUint32,
+ kBit,
+ kFloat64,
+ };
+ enum class InputAssumptions : uint8_t {
+ kObject,
+ kSmi,
+ kNumberOrOddball,
+ };
+ Kind kind;
+ InputAssumptions input_assumptions;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (kind) {
+ case Kind::kInt32:
+ case Kind::kUint32:
+ case Kind::kBit:
+ return RepVector<RegisterRepresentation::Word32()>();
+ case Kind::kInt64:
+ return RepVector<RegisterRepresentation::Word64()>();
+ case Kind::kFloat64:
+ return RepVector<RegisterRepresentation::Float64()>();
+ }
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ ConvertObjectToPrimitiveOp(OpIndex input, Kind kind,
+ InputAssumptions input_assumptions)
+ : Base(input), kind(kind), input_assumptions(input_assumptions) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind, input_assumptions}; }
+};
+std::ostream& operator<<(std::ostream& os,
+ ConvertObjectToPrimitiveOp::Kind kind);
+std::ostream& operator<<(
+ std::ostream& os,
+ ConvertObjectToPrimitiveOp::InputAssumptions input_assumptions);
+
+struct ConvertObjectToPrimitiveOrDeoptOp
+ : FixedArityOperationT<2, ConvertObjectToPrimitiveOrDeoptOp> {
+ enum class PrimitiveKind : uint8_t {
+ kInt32,
+ kInt64,
+ kFloat64,
+ kArrayIndex,
+ };
+ enum class ObjectKind : uint8_t {
+ kNumber,
+ kNumberOrBoolean,
+ kNumberOrOddball,
+ kNumberOrString,
+ kSmi,
+ };
+ ObjectKind from_kind;
+ PrimitiveKind to_kind;
+ CheckForMinusZeroMode minus_zero_mode;
+ FeedbackSource feedback;
+
+ static constexpr OpProperties properties = OpProperties::ReadingAndCanAbort();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (to_kind) {
+ case PrimitiveKind::kInt32:
+ return RepVector<RegisterRepresentation::Word32()>();
+ case PrimitiveKind::kInt64:
+ return RepVector<RegisterRepresentation::Word64()>();
+ case PrimitiveKind::kFloat64:
+ return RepVector<RegisterRepresentation::Float64()>();
+ case PrimitiveKind::kArrayIndex:
+ return Is64() ? RepVector<RegisterRepresentation::Word64()>()
+ : RepVector<RegisterRepresentation::Word32()>();
+ }
+ }
+
+ OpIndex input() const { return Base::input(0); }
+ OpIndex frame_state() const { return Base::input(1); }
+
+ ConvertObjectToPrimitiveOrDeoptOp(OpIndex input, OpIndex frame_state,
+ ObjectKind from_kind, PrimitiveKind to_kind,
+ CheckForMinusZeroMode minus_zero_mode,
+ const FeedbackSource& feedback)
+ : Base(input, frame_state),
+ from_kind(from_kind),
+ to_kind(to_kind),
+ minus_zero_mode(minus_zero_mode),
+ feedback(feedback) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Tagged()));
+ DCHECK(Get(graph, frame_state()).Is<FrameStateOp>());
+ }
+
+ auto options() const {
+ return std::tuple{from_kind, to_kind, minus_zero_mode, feedback};
+ }
+};
+std::ostream& operator<<(std::ostream& os,
+ ConvertObjectToPrimitiveOrDeoptOp::ObjectKind kind);
+std::ostream& operator<<(std::ostream& os,
+ ConvertObjectToPrimitiveOrDeoptOp::PrimitiveKind kind);
+
+struct TruncateObjectToPrimitiveOp
+ : FixedArityOperationT<1, TruncateObjectToPrimitiveOp> {
+ enum class Kind : uint8_t {
+ kInt32,
+ kInt64,
+ kBit,
+ };
+ enum class InputAssumptions : uint8_t {
+ kBigInt,
+ kNumberOrOddball,
+ kHeapObject,
+ kObject,
+ };
+ Kind kind;
+ InputAssumptions input_assumptions;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ switch (kind) {
+ case Kind::kInt32:
+ case Kind::kBit:
+ return RepVector<RegisterRepresentation::Word32()>();
+ case Kind::kInt64:
+ return RepVector<RegisterRepresentation::Word64()>();
+ }
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ TruncateObjectToPrimitiveOp(OpIndex input, Kind kind,
+ InputAssumptions input_assumptions)
+ : Base(input), kind(kind), input_assumptions(input_assumptions) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind, input_assumptions}; }
+};
+std::ostream& operator<<(std::ostream& os,
+ TruncateObjectToPrimitiveOp::Kind kind);
+std::ostream& operator<<(
+ std::ostream& os,
+ TruncateObjectToPrimitiveOp::InputAssumptions input_assumptions);
+
+enum class TagKind {
+ kSmiTag,
+};
+std::ostream& operator<<(std::ostream& os, TagKind kind);
+
+struct TagOp : FixedArityOperationT<1, TagOp> {
+ TagKind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ TagOp(OpIndex input, TagKind kind) : Base(input), kind(kind) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Word32()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+
+struct UntagOp : FixedArityOperationT<1, UntagOp> {
+ TagKind kind;
+ RegisterRepresentation rep;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return base::VectorOf(&rep, 1);
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ UntagOp(OpIndex input, TagKind kind, RegisterRepresentation rep)
+ : Base(input), kind(kind), rep(rep) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind, rep}; }
+};
+
+struct NewConsStringOp : FixedArityOperationT<3, NewConsStringOp> {
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex length() const { return Base::input(0); }
+ OpIndex first() const { return Base::input(1); }
+ OpIndex second() const { return Base::input(2); }
+
+ NewConsStringOp(OpIndex length, OpIndex first, OpIndex second)
+ : Base(length, first, second) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, length(), RegisterRepresentation::Word32()));
+ DCHECK(ValidOpInputRep(graph, first(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, second(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct NewArrayOp : FixedArityOperationT<1, NewArrayOp> {
+ enum class Kind : uint8_t {
+ kDouble,
+ kObject,
+ };
+ Kind kind;
+ AllocationType allocation_type;
+
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex length() const { return Base::input(0); }
+
+ NewArrayOp(OpIndex length, Kind kind, AllocationType allocation_type)
+ : Base(length), kind(kind), allocation_type(allocation_type) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, length(),
+ RegisterRepresentation::PointerSized()));
+ }
+
+ auto options() const { return std::tuple{kind, allocation_type}; }
+};
+std::ostream& operator<<(std::ostream& os, NewArrayOp::Kind kind);
+
+struct DoubleArrayMinMaxOp : FixedArityOperationT<1, DoubleArrayMinMaxOp> {
+ enum class Kind : uint8_t {
+ kMin,
+ kMax,
+ };
+ Kind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex array() const { return Base::input(0); }
+
+ DoubleArrayMinMaxOp(OpIndex array, Kind kind) : Base(array), kind(kind) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, array(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, DoubleArrayMinMaxOp::Kind kind);
+
+// TODO(nicohartmann@): We should consider getting rid of the LoadFieldByIndex
+// operation.
+struct LoadFieldByIndexOp : FixedArityOperationT<2, LoadFieldByIndexOp> {
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex object() const { return Base::input(0); }
+ // Index encoding (see `src/objects/field-index-inl.h`):
+ // For efficiency, the LoadByFieldIndex instruction takes an index that is
+ // optimized for quick access. If the property is inline, the index is
+ // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
+ // disambiguate the zero out-of-line index from the zero inobject case.
+ // The index itself is shifted up by one bit, the lower-most bit
+ // signifying if the field is a mutable double box (1) or not (0).
+ OpIndex index() const { return Base::input(1); }
+
+ LoadFieldByIndexOp(OpIndex object, OpIndex index) : Base(object, index) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, object(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, index(), RegisterRepresentation::Word32()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct DebugBreakOp : FixedArityOperationT<0, DebugBreakOp> {
+ static constexpr OpProperties properties = OpProperties::AnySideEffects();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<>();
+ }
+
+ DebugBreakOp() : Base() {}
+ void Validate(const Graph& graph) const {}
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct BigIntBinopOp : FixedArityOperationT<3, BigIntBinopOp> {
+ enum class Kind : uint8_t {
+ kAdd,
+ kSub,
+ kMul,
+ kDiv,
+ kMod,
+ kBitwiseAnd,
+ kBitwiseOr,
+ kBitwiseXor,
+ kShiftLeft,
+ kShiftRightArithmetic,
+ };
+ Kind kind;
+
+ // TODO(nicohartmann@): Maybe we can specify more precise properties here.
+ // These operations can deopt (abort), allocate and read immutable data.
+ static constexpr OpProperties properties = OpProperties::AnySideEffects();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex left() const { return Base::input(0); }
+ OpIndex right() const { return Base::input(1); }
+ OpIndex frame_state() const { return Base::input(2); }
+
+ BigIntBinopOp(OpIndex left, OpIndex right, OpIndex frame_state, Kind kind)
+ : Base(left, right, frame_state), kind(kind) {}
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, right(), RegisterRepresentation::Tagged()));
+ DCHECK(Get(graph, frame_state()).Is<FrameStateOp>());
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, BigIntBinopOp::Kind kind);
+
+struct BigIntEqualOp : FixedArityOperationT<2, BigIntEqualOp> {
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex left() const { return Base::input(0); }
+ OpIndex right() const { return Base::input(1); }
+
+ BigIntEqualOp(OpIndex left, OpIndex right) : Base(left, right) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, right(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct BigIntComparisonOp : FixedArityOperationT<2, BigIntComparisonOp> {
+ enum class Kind : uint8_t {
+ kLessThan,
+ kLessThanOrEqual,
+ };
+ Kind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex left() const { return Base::input(0); }
+ OpIndex right() const { return Base::input(1); }
+
+ BigIntComparisonOp(OpIndex left, OpIndex right, Kind kind)
+ : Base(left, right), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, right(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, BigIntComparisonOp::Kind kind);
+
+struct BigIntUnaryOp : FixedArityOperationT<1, BigIntUnaryOp> {
+ enum class Kind : uint8_t {
+ kNegate,
+ };
+ Kind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex input() const { return Base::input(0); }
+
+ BigIntUnaryOp(OpIndex input, Kind kind) : Base(input), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, input(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, BigIntUnaryOp::Kind kind);
+
+struct LoadRootRegisterOp : FixedArityOperationT<0, LoadRootRegisterOp> {
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::PointerSized()>();
+ }
+
+ LoadRootRegisterOp() : Base() {}
+ void Validate(const Graph& graph) const {}
+ std::tuple<> options() const { return {}; }
+};
+
+struct StringAtOp : FixedArityOperationT<2, StringAtOp> {
+ enum class Kind : uint8_t {
+ kCharCode,
+ kCodePoint,
+ };
+ Kind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
+
+ OpIndex string() const { return Base::input(0); }
+ OpIndex position() const { return Base::input(1); }
+
+ StringAtOp(OpIndex string, OpIndex position, Kind kind)
+ : Base(string, position), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, string(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, position(),
+ RegisterRepresentation::PointerSized()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, StringAtOp::Kind kind);
+
+#ifdef V8_INTL_SUPPORT
+struct StringToCaseIntlOp : FixedArityOperationT<1, StringToCaseIntlOp> {
+ enum class Kind : uint8_t {
+ kLower,
+ kUpper,
+ };
+ Kind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex string() const { return Base::input(0); }
+
+ StringToCaseIntlOp(OpIndex string, Kind kind) : Base(string), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, string(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, StringToCaseIntlOp::Kind kind);
+#endif // V8_INTL_SUPPORT
+
+struct StringLengthOp : FixedArityOperationT<1, StringLengthOp> {
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Word32()>();
+ }
+
+ OpIndex string() const { return Base::input(0); }
+
+ explicit StringLengthOp(OpIndex string) : Base(string) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, string(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct StringIndexOfOp : FixedArityOperationT<3, StringIndexOfOp> {
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ // Search the string `search` within the string `string` starting at
+ // `position`.
+ OpIndex string() const { return Base::input(0); }
+ OpIndex search() const { return Base::input(1); }
+ OpIndex position() const { return Base::input(2); }
+
+ StringIndexOfOp(OpIndex string, OpIndex search, OpIndex position)
+ : Base(string, search, position) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, string(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, search(), RegisterRepresentation::Tagged()));
+ DCHECK(
+ ValidOpInputRep(graph, position(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct StringFromCodePointAtOp
+ : FixedArityOperationT<2, StringFromCodePointAtOp> {
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex string() const { return Base::input(0); }
+ OpIndex index() const { return Base::input(1); }
+
+ StringFromCodePointAtOp(OpIndex string, OpIndex index)
+ : Base(string, index) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, string(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, index(),
+ RegisterRepresentation::PointerSized()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct StringSubstringOp : FixedArityOperationT<3, StringSubstringOp> {
+ static constexpr OpProperties properties = OpProperties::PureMayAllocate();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex string() const { return Base::input(0); }
+ OpIndex start() const { return Base::input(1); }
+ OpIndex end() const { return Base::input(2); }
+
+ StringSubstringOp(OpIndex string, OpIndex start, OpIndex end)
+ : Base(string, start, end) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, string(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, start(), RegisterRepresentation::Word32()));
+ DCHECK(ValidOpInputRep(graph, end(), RegisterRepresentation::Word32()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct StringEqualOp : FixedArityOperationT<2, StringEqualOp> {
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex left() const { return Base::input(0); }
+ OpIndex right() const { return Base::input(1); }
+
+ StringEqualOp(OpIndex left, OpIndex right) : Base(left, right) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, right(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{}; }
+};
+
+struct StringComparisonOp : FixedArityOperationT<2, StringComparisonOp> {
+ enum class Kind : uint8_t {
+ kLessThan,
+ kLessThanOrEqual,
+ };
+ Kind kind;
+
+ static constexpr OpProperties properties = OpProperties::PureNoAllocation();
+ base::Vector<const RegisterRepresentation> outputs_rep() const {
+ return RepVector<RegisterRepresentation::Tagged()>();
+ }
+
+ OpIndex left() const { return Base::input(0); }
+ OpIndex right() const { return Base::input(1); }
+
+ StringComparisonOp(OpIndex left, OpIndex right, Kind kind)
+ : Base(left, right), kind(kind) {}
+
+ void Validate(const Graph& graph) const {
+ DCHECK(ValidOpInputRep(graph, left(), RegisterRepresentation::Tagged()));
+ DCHECK(ValidOpInputRep(graph, right(), RegisterRepresentation::Tagged()));
+ }
+
+ auto options() const { return std::tuple{kind}; }
+};
+std::ostream& operator<<(std::ostream& os, StringComparisonOp::Kind kind);
+
#define OPERATION_PROPERTIES_CASE(Name) Name##Op::PropertiesIfStatic(),
static constexpr base::Optional<OpProperties>
kOperationPropertiesTable[kNumberOfOpcodes] = {
@@ -1929,6 +3307,31 @@ inline size_t Operation::StorageSlotCount(Opcode opcode, size_t input_count) {
return std::max<size_t>(2, (r - 1 + size + input_count) / r);
}
+template <class Op>
+V8_INLINE bool CanBeUsedAsInput(const Op& op) {
+ if (std::is_same<Op, FrameStateOp>::value) {
+ // FrameStateOp is the only Operation that can be used as an input but has
+ // empty `outputs_rep`.
+ return true;
+ }
+ // For all other Operations, they can only be used as an input if they have at
+ // least one output.
+ return op.outputs_rep().size() > 0;
+}
+
+inline base::Vector<const RegisterRepresentation> Operation::outputs_rep()
+ const {
+ switch (opcode) {
+#define CASE(type) \
+ case Opcode::k##type: { \
+ const type##Op& op = Cast<type##Op>(); \
+ return op.outputs_rep(); \
+ }
+ TURBOSHAFT_OPERATION_LIST(CASE)
+#undef CASE
+ }
+}
+
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_OPERATIONS_H_
diff --git a/deps/v8/src/compiler/turboshaft/optimization-phase.h b/deps/v8/src/compiler/turboshaft/optimization-phase.h
index b7f3ed4a70..b75572a59e 100644
--- a/deps/v8/src/compiler/turboshaft/optimization-phase.h
+++ b/deps/v8/src/compiler/turboshaft/optimization-phase.h
@@ -17,10 +17,17 @@
#include "src/base/vector.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/turboshaft/graph.h"
+#include "src/compiler/turboshaft/index.h"
#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/reducer-traits.h"
+#include "src/compiler/turboshaft/snapshot-table.h"
namespace v8::internal::compiler::turboshaft {
+using Variable =
+ SnapshotTable<OpIndex, base::Optional<RegisterRepresentation>>::Key;
+using MaybeVariable = base::Optional<Variable>;
+
int CountDecimalDigits(uint32_t value);
struct PaddingSpace {
int spaces;
@@ -42,87 +49,53 @@ struct AnalyzerBase {
: phase_zone(phase_zone), graph(graph) {}
};
-// TODO(dmercadier, tebbi): transform this analyzer into a reducer, and plug in
-// into some reducer stacks.
-struct LivenessAnalyzer : AnalyzerBase {
- using Base = AnalyzerBase;
- // Using `uint8_t` instead of `bool` prevents `std::vector` from using a
- // bitvector, which has worse performance.
- std::vector<uint8_t> op_used;
-
- LivenessAnalyzer(const Graph& graph, Zone* phase_zone)
- : AnalyzerBase(graph, phase_zone), op_used(graph.op_id_count(), false) {}
-
- bool OpIsUsed(OpIndex i) { return op_used[i.id()]; }
-
- void Run() {
- for (uint32_t unprocessed_count = graph.block_count();
- unprocessed_count > 0;) {
- BlockIndex block_index = static_cast<BlockIndex>(unprocessed_count - 1);
- --unprocessed_count;
- const Block& block = graph.Get(block_index);
- if (V8_UNLIKELY(block.IsLoop())) {
- ProcessBlock<true>(block, &unprocessed_count);
- } else {
- ProcessBlock<false>(block, &unprocessed_count);
- }
- }
- }
-
- template <bool is_loop>
- void ProcessBlock(const Block& block, uint32_t* unprocessed_count) {
- auto op_range = graph.OperationIndices(block);
- for (auto it = op_range.end(); it != op_range.begin();) {
- --it;
- OpIndex index = *it;
- const Operation& op = graph.Get(index);
- if (op.Properties().is_required_when_unused) {
- op_used[index.id()] = true;
- } else if (!OpIsUsed(index)) {
- continue;
- }
- if constexpr (is_loop) {
- if (op.Is<PhiOp>()) {
- const PhiOp& phi = op.Cast<PhiOp>();
- // Mark the loop backedge as used. Trigger a revisit if it wasn't
- // marked as used already.
- if (!OpIsUsed(phi.inputs()[PhiOp::kLoopPhiBackEdgeIndex])) {
- Block* backedge = block.LastPredecessor();
- // Revisit the loop by increasing the `unprocessed_count` to include
- // all blocks of the loop.
- *unprocessed_count =
- std::max(*unprocessed_count, backedge->index().id() + 1);
- }
- }
- }
- for (OpIndex input : op.inputs()) {
- op_used[input.id()] = true;
- }
- }
- }
-};
+// All operations whose `saturated_use_count` are unused and can be skipped.
+// Analyzers modify the input graph in-place when they want to mark some
+// Operations as removeable. In order to make that work for operations that have
+// no uses such as Goto and Branch, all operations that have the property
+// `is_required_when_unused` have a non-zero `saturated_use_count`.
+V8_INLINE bool ShouldSkipOperation(const Operation& op) {
+ return op.saturated_use_count == 0;
+}
template <template <class> class... Reducers>
-class OptimizationPhase {
+class OptimizationPhaseImpl {
public:
- static void Run(Graph* input, Zone* phase_zone, NodeOriginTable* origins) {
- Assembler<Reducers...> phase(*input, input->GetOrCreateCompanion(),
- phase_zone, origins);
+ static void Run(Graph* input, Zone* phase_zone, NodeOriginTable* origins,
+ const typename Assembler<reducer_list<Reducers...>>::ArgT&
+ reducer_args = std::tuple<>{}) {
+ Assembler<reducer_list<Reducers...>> phase(
+ *input, input->GetOrCreateCompanion(), phase_zone, origins,
+ reducer_args);
if (v8_flags.turboshaft_trace_reduction) {
phase.template VisitGraph<true>();
} else {
phase.template VisitGraph<false>();
}
}
- static void RunWithoutTracing(Graph* input, Zone* phase_zone) {
- Assembler<Reducers...> phase(input, input->GetOrCreateCompanion(),
- phase_zone);
- phase->template VisitGraph<false>();
+};
+
+template <template <typename> typename... Reducers>
+class OptimizationPhase {
+ using impl_t = OptimizationPhaseImpl<Reducers...>;
+
+ public:
+ static void Run(Isolate* isolate, Graph* input, Zone* phase_zone,
+ NodeOriginTable* origins,
+ const typename Assembler<reducer_list<Reducers...>>::ArgT&
+ reducer_args = std::tuple<>{}) {
+ impl_t::Run(input, phase_zone, origins, reducer_args);
}
};
+template <typename Next>
+class ReducerBaseForwarder;
+
template <class Assembler>
class GraphVisitor {
+ template <typename Next>
+ friend class ReducerBaseForwarder;
+
public:
GraphVisitor(Graph& input_graph, Graph& output_graph, Zone* phase_zone,
compiler::NodeOriginTable* origins = nullptr)
@@ -131,8 +104,9 @@ class GraphVisitor {
phase_zone_(phase_zone),
origins_(origins),
current_input_block_(nullptr),
- block_mapping_(input_graph.block_count(), nullptr, phase_zone),
- op_mapping_(input_graph.op_id_count(), OpIndex::Invalid(), phase_zone) {
+ op_mapping_(input_graph.op_id_count(), OpIndex::Invalid(), phase_zone),
+ blocks_needing_variables(phase_zone),
+ old_opindex_to_variables(input_graph.op_id_count(), phase_zone) {
output_graph_.Reset();
}
@@ -140,10 +114,11 @@ class GraphVisitor {
// runtime.
template <bool trace_reduction>
void VisitGraph() {
+ assembler().Analyze();
+
// Creating initial old-to-new Block mapping.
- for (const Block& input_block : input_graph().blocks()) {
- block_mapping_[input_block.index().id()] =
- assembler().NewBlock(input_block.kind());
+ for (Block& input_block : modifiable_input_graph().blocks()) {
+ output_graph().NewMappedBlock(&input_block);
}
// Visiting the graph.
@@ -172,6 +147,71 @@ class GraphVisitor {
const Graph& input_graph() const { return input_graph_; }
Graph& output_graph() const { return output_graph_; }
Zone* phase_zone() { return phase_zone_; }
+ const Block* current_input_block() { return current_input_block_; }
+
+ // Analyzers set Operations' saturated_use_count to zero when they are unused,
+ // and thus need to have a non-const input graph.
+ Graph& modifiable_input_graph() const { return input_graph_; }
+
+ // Visits and emits {input_block} right now (ie, in the current block).
+ void CloneAndInlineBlock(const Block* input_block) {
+ // Computing which input of Phi operations to use when visiting
+ // {input_block} (since {input_block} doesn't really have predecessors
+ // anymore).
+ int added_block_phi_input = input_block->GetPredecessorIndex(
+ assembler().current_block()->OriginForBlockEnd());
+
+ // There is no guarantees that {input_block} will be entirely removed just
+ // because it's cloned/inlined, since it's possible that it has predecessors
+ // for which this optimization didn't apply. As a result, we add it to
+ // {blocks_needing_variables}, so that if it's ever generated
+ // normally, Variables are used when emitting its content, so that
+ // they can later be merged when control flow merges with the current
+ // version of {input_block} that we just cloned.
+ blocks_needing_variables.insert(input_block->index());
+
+ // Updating the origin of "current_block", so that translating Phis can
+ // still properly be done (in OptimizationPhase::ReducePhi).
+ assembler().current_block()->SetOrigin(input_block);
+
+ ScopedModification<bool> set_true(&current_block_needs_variables_, true);
+ for (OpIndex index : input_graph().OperationIndices(*input_block)) {
+ if (const PhiOp* phi =
+ input_graph().Get(index).template TryCast<PhiOp>()) {
+ // This Phi has been cloned/inlined, and has thus now a single
+ // predecessor, and shouldn't be a Phi anymore.
+ CreateOldToNewMapping(index,
+ MapToNewGraph(phi->input(added_block_phi_input)));
+ } else {
+ if (!VisitOp<false>(index, input_block)) break;
+ }
+ }
+ }
+
+ template <bool can_be_invalid = false>
+ OpIndex MapToNewGraph(OpIndex old_index, int predecessor_index = -1) {
+ DCHECK(old_index.valid());
+ OpIndex result = op_mapping_[old_index.id()];
+ if (!result.valid()) {
+ // {op_mapping} doesn't have a mapping for {old_index}. The assembler
+ // should provide the mapping.
+ MaybeVariable var = GetVariableFor(old_index);
+ if constexpr (can_be_invalid) {
+ if (!var.has_value()) {
+ return OpIndex::Invalid();
+ }
+ }
+ DCHECK(var.has_value());
+ if (predecessor_index == -1) {
+ result = assembler().Get(var.value());
+ } else {
+ result =
+ assembler().GetPredecessorValue(var.value(), predecessor_index);
+ }
+ }
+ DCHECK(result.valid());
+ return result;
+ }
private:
template <bool trace_reduction>
@@ -194,69 +234,88 @@ class GraphVisitor {
template <bool trace_reduction>
void VisitBlock(const Block* input_block) {
current_input_block_ = input_block;
+ current_block_needs_variables_ =
+ blocks_needing_variables.find(input_block->index()) !=
+ blocks_needing_variables.end();
if constexpr (trace_reduction) {
- std::cout << PrintAsBlockHeader{*input_block} << "\n";
+ std::cout << "\nold " << PrintAsBlockHeader{*input_block} << "\n";
+ std::cout
+ << "new "
+ << PrintAsBlockHeader{*input_block->MapToNextGraph(),
+ assembler().output_graph().next_block_index()}
+ << "\n";
}
- if (!assembler().Bind(MapToNewGraph(input_block->index()))) {
+ Block* new_block = input_block->MapToNextGraph();
+ if (assembler().Bind(new_block)) {
+ for (OpIndex index : input_graph().OperationIndices(*input_block)) {
+ if (!VisitOp<trace_reduction>(index, input_block)) break;
+ }
+ if constexpr (trace_reduction) TraceBlockFinished();
+ } else {
if constexpr (trace_reduction) TraceBlockUnreachable();
- // If we eliminate a loop backedge, we need to turn the loop into a
- // single-predecessor merge block.
- const Operation& last_op =
- *base::Reversed(input_graph().operations(*input_block)).begin();
- if (auto* final_goto = last_op.TryCast<GotoOp>()) {
- if (final_goto->destination->IsLoop()) {
- Block* new_loop = MapToNewGraph(final_goto->destination->index());
+ }
+
+ // If we eliminate a loop backedge, we need to turn the loop into a
+ // single-predecessor merge block.
+ const Operation& last_op =
+ *base::Reversed(input_graph().operations(*input_block)).begin();
+ if (auto* final_goto = last_op.TryCast<GotoOp>()) {
+ if (final_goto->destination->IsLoop()) {
+ if (input_block->index() > final_goto->destination->index()) {
+ Block* new_loop = final_goto->destination->MapToNextGraph();
DCHECK(new_loop->IsLoop());
if (new_loop->IsLoop() && new_loop->PredecessorCount() == 1) {
output_graph_.TurnLoopIntoMerge(new_loop);
}
+ } else {
+ // We have a forward jump to a loop, rather than a backedge. We
+ // don't need to do anything.
}
}
- return;
}
- assembler().current_block()->SetDeferred(input_block->IsDeferred());
- for (OpIndex index : input_graph().OperationIndices(*input_block)) {
- if (!VisitOp<trace_reduction>(index, input_block)) break;
- }
- if constexpr (trace_reduction) TraceBlockFinished();
}
template <bool trace_reduction>
bool VisitOp(OpIndex index, const Block* input_block) {
- if (!assembler().current_block()) return false;
+ Block* current_block = assembler().current_block();
+ if (!current_block) return false;
assembler().SetCurrentOrigin(index);
OpIndex first_output_index =
assembler().output_graph().next_operation_index();
USE(first_output_index);
const Operation& op = input_graph().Get(index);
- if (op.saturated_use_count == 0 &&
- !op.Properties().is_required_when_unused) {
- if constexpr (trace_reduction) TraceOperationUnused();
+ if constexpr (trace_reduction) TraceReductionStart(index);
+ if (ShouldSkipOperation(op)) {
+ if constexpr (trace_reduction) TraceOperationSkipped();
return true;
}
- if constexpr (trace_reduction) TraceReductionStart(index);
OpIndex new_index;
- if (input_block->IsLoop() && op.Is<PhiOp>()) {
- const PhiOp& phi = op.Cast<PhiOp>();
- new_index = assembler().PendingLoopPhi(MapToNewGraph(phi.inputs()[0]),
- phi.rep, phi.inputs()[1]);
- if constexpr (trace_reduction) {
- TraceReductionResult(first_output_index, new_index);
- }
- } else {
- switch (op.opcode) {
-#define EMIT_INSTR_CASE(Name) \
- case Opcode::k##Name: \
- new_index = this->Visit##Name(op.Cast<Name##Op>()); \
+ switch (op.opcode) {
+#define EMIT_INSTR_CASE(Name) \
+ case Opcode::k##Name: \
+ new_index = \
+ assembler().ReduceInputGraph##Name(index, op.Cast<Name##Op>()); \
+ if (CanBeUsedAsInput(op.Cast<Name##Op>())) { \
+ if (!new_index.valid()) { \
+ if constexpr (trace_reduction) TraceOperationSkipped(); \
+ return true; \
+ } \
+ CreateOldToNewMapping(index, new_index); \
+ } \
break;
- TURBOSHAFT_OPERATION_LIST(EMIT_INSTR_CASE)
+ TURBOSHAFT_OPERATION_LIST(EMIT_INSTR_CASE)
#undef EMIT_INSTR_CASE
- }
+ }
if constexpr (trace_reduction) {
- TraceReductionResult(first_output_index, new_index);
+ TraceReductionResult(current_block, first_output_index, new_index);
+ }
+#ifdef DEBUG
+ if (V8_UNLIKELY(v8_flags.turboshaft_verify_reductions)) {
+ if (new_index.valid()) {
+ assembler().Verify(index, new_index);
}
}
- op_mapping_[index.id()] = new_index;
+#endif // DEBUG
return true;
}
@@ -265,9 +324,10 @@ class GraphVisitor {
<< PaddingSpace{5 - CountDecimalDigits(index.id())}
<< OperationPrintStyle{input_graph().Get(index), "#o"} << "\n";
}
- void TraceOperationUnused() { std::cout << "╰─> unused\n\n"; }
+ void TraceOperationSkipped() { std::cout << "╰─> skipped\n\n"; }
void TraceBlockUnreachable() { std::cout << "╰─> unreachable\n\n"; }
- void TraceReductionResult(OpIndex first_output_index, OpIndex new_index) {
+ void TraceReductionResult(Block* current_block, OpIndex first_output_index,
+ OpIndex new_index) {
if (new_index < first_output_index) {
// The operation was replaced with an already existing one.
std::cout << "╰─> #n" << new_index.id() << "\n";
@@ -288,6 +348,12 @@ class GraphVisitor {
std::cout << prefix << " n" << index.id() << ": "
<< PaddingSpace{5 - CountDecimalDigits(index.id())}
<< OperationPrintStyle{output_graph_.Get(index), "#n"} << "\n";
+ if (op.Properties().is_block_terminator && assembler().current_block() &&
+ assembler().current_block() != current_block) {
+ current_block = &assembler().output_graph().Get(
+ BlockIndex(current_block->index().id() + 1));
+ std::cout << "new " << PrintAsBlockHeader{*current_block} << "\n";
+ }
}
std::cout << "\n";
}
@@ -296,10 +362,8 @@ class GraphVisitor {
// These functions take an operation from the old graph and use the assembler
// to emit a corresponding operation in the new graph, translating inputs and
// blocks accordingly.
-
- V8_INLINE OpIndex VisitGoto(const GotoOp& op) {
- Block* destination = MapToNewGraph(op.destination->index());
- assembler().current_block()->SetOrigin(current_input_block_);
+ V8_INLINE OpIndex AssembleOutputGraphGoto(const GotoOp& op) {
+ Block* destination = op.destination->MapToNextGraph();
assembler().ReduceGoto(destination);
if (destination->IsBound()) {
DCHECK(destination->IsLoop());
@@ -307,31 +371,37 @@ class GraphVisitor {
}
return OpIndex::Invalid();
}
- V8_INLINE OpIndex VisitBranch(const BranchOp& op) {
- Block* if_true = MapToNewGraph(op.if_true->index());
- Block* if_false = MapToNewGraph(op.if_false->index());
+ V8_INLINE OpIndex AssembleOutputGraphBranch(const BranchOp& op) {
+ Block* if_true = op.if_true->MapToNextGraph();
+ Block* if_false = op.if_false->MapToNextGraph();
return assembler().ReduceBranch(MapToNewGraph(op.condition()), if_true,
- if_false);
- }
- OpIndex VisitCatchException(const CatchExceptionOp& op) {
- Block* if_success = MapToNewGraph(op.if_success->index());
- Block* if_exception = MapToNewGraph(op.if_exception->index());
- return assembler().ReduceCatchException(MapToNewGraph(op.call()),
- if_success, if_exception);
+ if_false, op.hint);
}
- OpIndex VisitSwitch(const SwitchOp& op) {
+ OpIndex AssembleOutputGraphSwitch(const SwitchOp& op) {
base::SmallVector<SwitchOp::Case, 16> cases;
for (SwitchOp::Case c : op.cases) {
- cases.emplace_back(c.value, MapToNewGraph(c.destination->index()));
+ cases.emplace_back(c.value, c.destination->MapToNextGraph(), c.hint);
}
return assembler().ReduceSwitch(
MapToNewGraph(op.input()),
graph_zone()->CloneVector(base::VectorOf(cases)),
- MapToNewGraph(op.default_case->index()));
- }
- OpIndex VisitPhi(const PhiOp& op) {
+ op.default_case->MapToNextGraph(), op.default_hint);
+ }
+ OpIndex AssembleOutputGraphPhi(const PhiOp& op) {
+ OpIndex ig_index = input_graph().Index(op);
+ if (current_input_block_->IsLoop()) {
+ if (ig_index == op.input(PhiOp::kLoopPhiBackEdgeIndex)) {
+ // Avoid emitting a Loop Phi which points to itself, instead
+ // emit it's 0'th input.
+ return MapToNewGraph(op.input(0));
+ }
+ return assembler().PendingLoopPhi(MapToNewGraph(op.input(0)), op.rep,
+ op.input(PhiOp::kLoopPhiBackEdgeIndex));
+ }
+
base::Vector<const OpIndex> old_inputs = op.inputs();
base::SmallVector<OpIndex, 8> new_inputs;
+ int predecessor_count = assembler().current_block()->PredecessorCount();
Block* old_pred = current_input_block_->LastPredecessor();
Block* new_pred = assembler().current_block()->LastPredecessor();
// Control predecessors might be missing after the optimization phase. So we
@@ -342,10 +412,16 @@ class GraphVisitor {
// did not change. If it did, {new_pred} won't be nullptr at the end of this
// loop, and we'll instead fall back to the slower code below to compute the
// inputs of the Phi.
+ int predecessor_index = predecessor_count - 1;
for (OpIndex input : base::Reversed(old_inputs)) {
- if (new_pred && new_pred->Origin() == old_pred) {
- new_inputs.push_back(MapToNewGraph(input));
+ if (new_pred && new_pred->OriginForBlockEnd() == old_pred) {
+ // Phis inputs have to come from predecessors. We thus have to
+ // MapToNewGraph with {predecessor_index} so that we get an OpIndex that
+ // is from a predecessor rather than one that comes from a Variable
+ // merged in the current block.
+ new_inputs.push_back(MapToNewGraph(input, predecessor_index));
new_pred = new_pred->NeighboringPredecessor();
+ predecessor_index--;
}
old_pred = old_pred->NeighboringPredecessor();
}
@@ -370,33 +446,38 @@ class GraphVisitor {
// To account for this, we reorder the inputs of the Phi, and get rid of
// inputs from blocks that vanished.
- base::SmallVector<uint32_t, 16> old_pred_vec;
+#ifdef DEBUG
+ // To check that indices are set properly, we zap them in debug builds.
+ const uint32_t invalid_custom_data = std::numeric_limits<uint32_t>::max();
+ for (auto& block : assembler().modifiable_input_graph().blocks()) {
+ block.custom_data() = invalid_custom_data;
+ }
+#endif
+ uint32_t pos = current_input_block_->PredecessorCount() - 1;
for (old_pred = current_input_block_->LastPredecessor();
old_pred != nullptr; old_pred = old_pred->NeighboringPredecessor()) {
- old_pred_vec.push_back(old_pred->index().id());
- // Checking that predecessors are indeed sorted.
- DCHECK_IMPLIES(old_pred->NeighboringPredecessor() != nullptr,
- old_pred->index().id() >
- old_pred->NeighboringPredecessor()->index().id());
+ // Store the current index of the {old_pred}.
+ DCHECK_EQ(old_pred->custom_data(), invalid_custom_data);
+ old_pred->custom_data() = pos--;
}
- std::reverse(old_pred_vec.begin(), old_pred_vec.end());
// Filling {new_inputs}: we iterate the new predecessors, and, for each
// predecessor, we check the index of the input corresponding to the old
// predecessor, and we put it next in {new_inputs}.
new_inputs.clear();
+ int predecessor_index = predecessor_count - 1;
for (new_pred = assembler().current_block()->LastPredecessor();
new_pred != nullptr; new_pred = new_pred->NeighboringPredecessor()) {
- const Block* origin = new_pred->Origin();
+ const Block* origin = new_pred->OriginForBlockEnd();
DCHECK_NOT_NULL(origin);
- // {old_pred_vec} is sorted. We can thus use a binary search to find the
- // index of {origin} in {old_pred_vec}: the index is the index of the
- // old input corresponding to {new_pred}.
- auto lower = std::lower_bound(old_pred_vec.begin(), old_pred_vec.end(),
- origin->index().id());
- DCHECK_NE(lower, old_pred_vec.end());
- new_inputs.push_back(
- MapToNewGraph(old_inputs[lower - old_pred_vec.begin()]));
+ DCHECK_NE(origin->custom_data(), invalid_custom_data);
+ OpIndex input = old_inputs[origin->custom_data()];
+ // Phis inputs have to come from predecessors. We thus have to
+ // MapToNewGraph with {predecessor_index} so that we get an OpIndex that
+ // is from a predecessor rather than one that comes from a Variable
+ // merged in the current block.
+ new_inputs.push_back(MapToNewGraph(input, predecessor_index));
+ predecessor_index--;
}
}
@@ -413,157 +494,334 @@ class GraphVisitor {
std::reverse(new_inputs.begin(), new_inputs.end());
return assembler().ReducePhi(base::VectorOf(new_inputs), op.rep);
}
- OpIndex VisitPendingLoopPhi(const PendingLoopPhiOp& op) { UNREACHABLE(); }
- V8_INLINE OpIndex VisitFrameState(const FrameStateOp& op) {
+ OpIndex AssembleOutputGraphPendingLoopPhi(const PendingLoopPhiOp& op) {
+ UNREACHABLE();
+ }
+ V8_INLINE OpIndex AssembleOutputGraphFrameState(const FrameStateOp& op) {
auto inputs = MapToNewGraph<32>(op.inputs());
return assembler().ReduceFrameState(base::VectorOf(inputs), op.inlined,
op.data);
}
- OpIndex VisitCall(const CallOp& op) {
+ OpIndex AssembleOutputGraphCall(const CallOp& op) {
OpIndex callee = MapToNewGraph(op.callee());
+ OpIndex frame_state = MapToNewGraphIfValid(op.frame_state());
auto arguments = MapToNewGraph<16>(op.arguments());
- return assembler().ReduceCall(callee, base::VectorOf(arguments),
- op.descriptor);
+ return assembler().ReduceCall(callee, frame_state,
+ base::VectorOf(arguments), op.descriptor);
}
- OpIndex VisitTailCall(const TailCallOp& op) {
+ OpIndex AssembleOutputGraphCallAndCatchException(
+ const CallAndCatchExceptionOp& op) {
+ OpIndex callee = MapToNewGraph(op.callee());
+ Block* if_success = op.if_success->MapToNextGraph();
+ Block* if_exception = op.if_exception->MapToNextGraph();
+ OpIndex frame_state = MapToNewGraphIfValid(op.frame_state());
+ auto arguments = MapToNewGraph<16>(op.arguments());
+ return assembler().ReduceCallAndCatchException(
+ callee, frame_state, base::VectorOf(arguments), if_success,
+ if_exception, op.descriptor);
+ }
+ OpIndex AssembleOutputGraphLoadException(const LoadExceptionOp& op) {
+ return assembler().ReduceLoadException();
+ }
+ OpIndex AssembleOutputGraphTailCall(const TailCallOp& op) {
OpIndex callee = MapToNewGraph(op.callee());
auto arguments = MapToNewGraph<16>(op.arguments());
return assembler().ReduceTailCall(callee, base::VectorOf(arguments),
op.descriptor);
}
- OpIndex VisitReturn(const ReturnOp& op) {
+ OpIndex AssembleOutputGraphReturn(const ReturnOp& op) {
// We very rarely have tuples longer than 4.
auto return_values = MapToNewGraph<4>(op.return_values());
return assembler().ReduceReturn(MapToNewGraph(op.pop_count()),
base::VectorOf(return_values));
}
- OpIndex VisitOverflowCheckedBinop(const OverflowCheckedBinopOp& op) {
+ OpIndex AssembleOutputGraphOverflowCheckedBinop(
+ const OverflowCheckedBinopOp& op) {
return assembler().ReduceOverflowCheckedBinop(
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
}
- OpIndex VisitWordUnary(const WordUnaryOp& op) {
+ OpIndex AssembleOutputGraphWordUnary(const WordUnaryOp& op) {
return assembler().ReduceWordUnary(MapToNewGraph(op.input()), op.kind,
op.rep);
}
- OpIndex VisitFloatUnary(const FloatUnaryOp& op) {
+ OpIndex AssembleOutputGraphFloatUnary(const FloatUnaryOp& op) {
return assembler().ReduceFloatUnary(MapToNewGraph(op.input()), op.kind,
op.rep);
}
- OpIndex VisitShift(const ShiftOp& op) {
+ OpIndex AssembleOutputGraphShift(const ShiftOp& op) {
return assembler().ReduceShift(MapToNewGraph(op.left()),
MapToNewGraph(op.right()), op.kind, op.rep);
}
- OpIndex VisitEqual(const EqualOp& op) {
+ OpIndex AssembleOutputGraphEqual(const EqualOp& op) {
return assembler().ReduceEqual(MapToNewGraph(op.left()),
MapToNewGraph(op.right()), op.rep);
}
- OpIndex VisitComparison(const ComparisonOp& op) {
+ OpIndex AssembleOutputGraphComparison(const ComparisonOp& op) {
return assembler().ReduceComparison(
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
}
- OpIndex VisitChange(const ChangeOp& op) {
+ OpIndex AssembleOutputGraphChange(const ChangeOp& op) {
return assembler().ReduceChange(MapToNewGraph(op.input()), op.kind,
op.assumption, op.from, op.to);
}
- OpIndex VisitTryChange(const TryChangeOp& op) {
+ OpIndex AssembleOutputGraphChangeOrDeopt(const ChangeOrDeoptOp& op) {
+ return assembler().ReduceChangeOrDeopt(
+ MapToNewGraph(op.input()), MapToNewGraph(op.frame_state()), op.kind,
+ op.minus_zero_mode, op.feedback);
+ }
+ OpIndex AssembleOutputGraphTryChange(const TryChangeOp& op) {
return assembler().ReduceTryChange(MapToNewGraph(op.input()), op.kind,
op.from, op.to);
}
+ OpIndex AssembleOutputGraphTag(const TagOp& op) {
+ return assembler().ReduceTag(MapToNewGraph(op.input()), op.kind);
+ }
+ OpIndex AssembleOutputGraphUntag(const UntagOp& op) {
+ return assembler().ReduceUntag(MapToNewGraph(op.input()), op.kind, op.rep);
+ }
- OpIndex VisitFloat64InsertWord32(const Float64InsertWord32Op& op) {
+ OpIndex AssembleOutputGraphFloat64InsertWord32(
+ const Float64InsertWord32Op& op) {
return assembler().ReduceFloat64InsertWord32(
MapToNewGraph(op.float64()), MapToNewGraph(op.word32()), op.kind);
}
- OpIndex VisitTaggedBitcast(const TaggedBitcastOp& op) {
+ OpIndex AssembleOutputGraphTaggedBitcast(const TaggedBitcastOp& op) {
return assembler().ReduceTaggedBitcast(MapToNewGraph(op.input()), op.from,
op.to);
}
- OpIndex VisitSelect(const SelectOp& op) {
+ OpIndex AssembleOutputGraphObjectIs(const ObjectIsOp& op) {
+ return assembler().ReduceObjectIs(MapToNewGraph(op.input()), op.kind,
+ op.input_assumptions);
+ }
+ OpIndex AssembleOutputGraphFloatIs(const FloatIsOp& op) {
+ return assembler().ReduceFloatIs(MapToNewGraph(op.input()), op.kind,
+ op.input_rep);
+ }
+ OpIndex AssembleOutputGraphConvertToObject(const ConvertToObjectOp& op) {
+ return assembler().ReduceConvertToObject(
+ MapToNewGraph(op.input()), op.kind, op.input_rep,
+ op.input_interpretation, op.minus_zero_mode);
+ }
+ OpIndex AssembleOutputGraphConvertToObjectOrDeopt(
+ const ConvertToObjectOrDeoptOp& op) {
+ return assembler().ReduceConvertToObjectOrDeopt(
+ MapToNewGraph(op.input()), MapToNewGraph(op.frame_state()), op.kind,
+ op.input_rep, op.input_interpretation, op.feedback);
+ }
+ OpIndex AssembleOutputGraphConvertObjectToPrimitive(
+ const ConvertObjectToPrimitiveOp& op) {
+ return assembler().ReduceConvertObjectToPrimitive(
+ MapToNewGraph(op.input()), op.kind, op.input_assumptions);
+ }
+ OpIndex AssembleOutputGraphConvertObjectToPrimitiveOrDeopt(
+ const ConvertObjectToPrimitiveOrDeoptOp& op) {
+ return assembler().ReduceConvertObjectToPrimitiveOrDeopt(
+ MapToNewGraph(op.input()), MapToNewGraph(op.frame_state()),
+ op.from_kind, op.to_kind, op.minus_zero_mode, op.feedback);
+ }
+ OpIndex AssembleOutputGraphTruncateObjectToPrimitive(
+ const TruncateObjectToPrimitiveOp& op) {
+ return assembler().ReduceTruncateObjectToPrimitive(
+ MapToNewGraph(op.input()), op.kind, op.input_assumptions);
+ }
+ OpIndex AssembleOutputGraphSelect(const SelectOp& op) {
return assembler().ReduceSelect(
MapToNewGraph(op.cond()), MapToNewGraph(op.vtrue()),
MapToNewGraph(op.vfalse()), op.rep, op.hint, op.implem);
}
- OpIndex VisitConstant(const ConstantOp& op) {
+ OpIndex AssembleOutputGraphConstant(const ConstantOp& op) {
return assembler().ReduceConstant(op.kind, op.storage);
}
- OpIndex VisitLoad(const LoadOp& op) {
+ OpIndex AssembleOutputGraphLoad(const LoadOp& op) {
return assembler().ReduceLoad(
- MapToNewGraph(op.base()),
- op.index().valid() ? MapToNewGraph(op.index()) : OpIndex::Invalid(),
- op.kind, op.loaded_rep, op.result_rep, op.offset, op.element_size_log2);
+ MapToNewGraph(op.base()), MapToNewGraphIfValid(op.index()), op.kind,
+ op.loaded_rep, op.result_rep, op.offset, op.element_size_log2);
}
- OpIndex VisitStore(const StoreOp& op) {
+ OpIndex AssembleOutputGraphStore(const StoreOp& op) {
return assembler().ReduceStore(
- MapToNewGraph(op.base()),
- op.index().valid() ? MapToNewGraph(op.index()) : OpIndex::Invalid(),
+ MapToNewGraph(op.base()), MapToNewGraphIfValid(op.index()),
MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier,
op.offset, op.element_size_log2);
}
- OpIndex VisitRetain(const RetainOp& op) {
+ OpIndex AssembleOutputGraphAllocate(const AllocateOp& op) {
+ return assembler().Allocate(MapToNewGraph(op.size()), op.type,
+ op.allow_large_objects);
+ }
+ OpIndex AssembleOutputGraphDecodeExternalPointer(
+ const DecodeExternalPointerOp& op) {
+ return assembler().DecodeExternalPointer(MapToNewGraph(op.handle()),
+ op.tag);
+ }
+ OpIndex AssembleOutputGraphRetain(const RetainOp& op) {
return assembler().ReduceRetain(MapToNewGraph(op.retained()));
}
- OpIndex VisitParameter(const ParameterOp& op) {
- return assembler().ReduceParameter(op.parameter_index, op.debug_name);
+ OpIndex AssembleOutputGraphParameter(const ParameterOp& op) {
+ return assembler().ReduceParameter(op.parameter_index, op.rep,
+ op.debug_name);
}
- OpIndex VisitOsrValue(const OsrValueOp& op) {
+ OpIndex AssembleOutputGraphOsrValue(const OsrValueOp& op) {
return assembler().ReduceOsrValue(op.index);
}
- OpIndex VisitStackPointerGreaterThan(const StackPointerGreaterThanOp& op) {
+ OpIndex AssembleOutputGraphStackPointerGreaterThan(
+ const StackPointerGreaterThanOp& op) {
return assembler().ReduceStackPointerGreaterThan(
MapToNewGraph(op.stack_limit()), op.kind);
}
- OpIndex VisitStackSlot(const StackSlotOp& op) {
+ OpIndex AssembleOutputGraphStackSlot(const StackSlotOp& op) {
return assembler().ReduceStackSlot(op.size, op.alignment);
}
- OpIndex VisitFrameConstant(const FrameConstantOp& op) {
+ OpIndex AssembleOutputGraphFrameConstant(const FrameConstantOp& op) {
return assembler().ReduceFrameConstant(op.kind);
}
- OpIndex VisitCheckLazyDeopt(const CheckLazyDeoptOp& op) {
- return assembler().ReduceCheckLazyDeopt(MapToNewGraph(op.call()),
- MapToNewGraph(op.frame_state()));
- }
- OpIndex VisitDeoptimize(const DeoptimizeOp& op) {
+ OpIndex AssembleOutputGraphDeoptimize(const DeoptimizeOp& op) {
return assembler().ReduceDeoptimize(MapToNewGraph(op.frame_state()),
op.parameters);
}
- OpIndex VisitDeoptimizeIf(const DeoptimizeIfOp& op) {
+ OpIndex AssembleOutputGraphDeoptimizeIf(const DeoptimizeIfOp& op) {
return assembler().ReduceDeoptimizeIf(MapToNewGraph(op.condition()),
MapToNewGraph(op.frame_state()),
op.negated, op.parameters);
}
- OpIndex VisitTrapIf(const TrapIfOp& op) {
+ OpIndex AssembleOutputGraphTrapIf(const TrapIfOp& op) {
return assembler().ReduceTrapIf(MapToNewGraph(op.condition()), op.negated,
op.trap_id);
}
- OpIndex VisitTuple(const TupleOp& op) {
+ OpIndex AssembleOutputGraphTuple(const TupleOp& op) {
return assembler().ReduceTuple(
base::VectorOf(MapToNewGraph<4>(op.inputs())));
}
- OpIndex VisitProjection(const ProjectionOp& op) {
- return assembler().ReduceProjection(MapToNewGraph(op.input()), op.index);
+ OpIndex AssembleOutputGraphProjection(const ProjectionOp& op) {
+ return assembler().ReduceProjection(MapToNewGraph(op.input()), op.index,
+ op.rep);
}
- OpIndex VisitWordBinop(const WordBinopOp& op) {
+ OpIndex AssembleOutputGraphWordBinop(const WordBinopOp& op) {
return assembler().ReduceWordBinop(
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
}
- OpIndex VisitFloatBinop(const FloatBinopOp& op) {
+ OpIndex AssembleOutputGraphFloatBinop(const FloatBinopOp& op) {
return assembler().ReduceFloatBinop(
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
}
- OpIndex VisitUnreachable(const UnreachableOp& op) {
+ OpIndex AssembleOutputGraphUnreachable(const UnreachableOp& op) {
return assembler().ReduceUnreachable();
}
+ OpIndex AssembleOutputGraphStaticAssert(const StaticAssertOp& op) {
+ return assembler().ReduceStaticAssert(op.condition(), op.source);
+ }
+ OpIndex AssembleOutputGraphCheckTurboshaftTypeOf(
+ const CheckTurboshaftTypeOfOp& op) {
+ return assembler().ReduceCheckTurboshaftTypeOf(
+ MapToNewGraph(op.input()), op.rep, op.type, op.successful);
+ }
+ OpIndex AssembleOutputGraphNewConsString(const NewConsStringOp& op) {
+ return assembler().ReduceNewConsString(MapToNewGraph(op.length()),
+ MapToNewGraph(op.first()),
+ MapToNewGraph(op.second()));
+ }
+ OpIndex AssembleOutputGraphNewArray(const NewArrayOp& op) {
+ return assembler().ReduceNewArray(MapToNewGraph(op.length()), op.kind,
+ op.allocation_type);
+ }
+ OpIndex AssembleOutputGraphDoubleArrayMinMax(const DoubleArrayMinMaxOp& op) {
+ return assembler().ReduceDoubleArrayMinMax(MapToNewGraph(op.array()),
+ op.kind);
+ }
+ OpIndex AssembleOutputGraphLoadFieldByIndex(const LoadFieldByIndexOp& op) {
+ return assembler().ReduceLoadFieldByIndex(MapToNewGraph(op.object()),
+ MapToNewGraph(op.index()));
+ }
+ OpIndex AssembleOutputGraphDebugBreak(const DebugBreakOp& op) {
+ return assembler().ReduceDebugBreak();
+ }
+ OpIndex AssembleOutputGraphBigIntBinop(const BigIntBinopOp& op) {
+ return assembler().ReduceBigIntBinop(
+ MapToNewGraph(op.left()), MapToNewGraph(op.right()),
+ MapToNewGraph(op.frame_state()), op.kind);
+ }
+ OpIndex AssembleOutputGraphBigIntEqual(const BigIntEqualOp& op) {
+ return assembler().ReduceBigIntEqual(MapToNewGraph(op.left()),
+ MapToNewGraph(op.right()));
+ }
+ OpIndex AssembleOutputGraphBigIntComparison(const BigIntComparisonOp& op) {
+ return assembler().ReduceBigIntComparison(
+ MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind);
+ }
+ OpIndex AssembleOutputGraphBigIntUnary(const BigIntUnaryOp& op) {
+ return assembler().ReduceBigIntUnary(MapToNewGraph(op.input()), op.kind);
+ }
+ OpIndex AssembleOutputGraphLoadRootRegister(const LoadRootRegisterOp& op) {
+ return assembler().ReduceLoadRootRegister();
+ }
+ OpIndex AssembleOutputGraphStringAt(const StringAtOp& op) {
+ return assembler().ReduceStringAt(MapToNewGraph(op.string()),
+ MapToNewGraph(op.position()), op.kind);
+ }
+#ifdef V8_INTL_SUPPORT
+ OpIndex AssembleOutputGraphStringToCaseIntl(const StringToCaseIntlOp& op) {
+ return assembler().ReduceStringToCaseIntl(MapToNewGraph(op.string()),
+ op.kind);
+ }
+#endif // V8_INTL_SUPPORT
+ OpIndex AssembleOutputGraphStringLength(const StringLengthOp& op) {
+ return assembler().ReduceStringLength(MapToNewGraph(op.string()));
+ }
+ OpIndex AssembleOutputGraphStringIndexOf(const StringIndexOfOp& op) {
+ return assembler().ReduceStringIndexOf(MapToNewGraph(op.string()),
+ MapToNewGraph(op.search()),
+ MapToNewGraph(op.position()));
+ }
+ OpIndex AssembleOutputGraphStringFromCodePointAt(
+ const StringFromCodePointAtOp& op) {
+ return assembler().ReduceStringFromCodePointAt(MapToNewGraph(op.string()),
+ MapToNewGraph(op.index()));
+ }
+ OpIndex AssembleOutputGraphStringSubstring(const StringSubstringOp& op) {
+ return assembler().ReduceStringSubstring(MapToNewGraph(op.string()),
+ MapToNewGraph(op.start()),
+ MapToNewGraph(op.end()));
+ }
+ OpIndex AssembleOutputGraphStringEqual(const StringEqualOp& op) {
+ return assembler().ReduceStringEqual(MapToNewGraph(op.left()),
+ MapToNewGraph(op.right()));
+ }
+ OpIndex AssembleOutputGraphStringComparison(const StringComparisonOp& op) {
+ return assembler().ReduceStringComparison(
+ MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind);
+ }
+
+ void CreateOldToNewMapping(OpIndex old_index, OpIndex new_index) {
+ if (current_block_needs_variables_) {
+ MaybeVariable var = GetVariableFor(old_index);
+ if (!var.has_value()) {
+ base::Optional<RegisterRepresentation> rep =
+ input_graph().Get(old_index).outputs_rep().size() == 1
+ ? base::Optional<RegisterRepresentation>{input_graph()
+ .Get(old_index)
+ .outputs_rep()[0]}
+ : base::nullopt;
+ var = assembler().NewFreshVariable(rep);
+ SetVariableFor(old_index, *var);
+ }
+ assembler().Set(*var, new_index);
+ return;
+ }
+ op_mapping_[old_index.id()] = new_index;
+ }
- Block* MapToNewGraph(BlockIndex old_index) const {
- Block* result = block_mapping_[old_index.id()];
- DCHECK_NOT_NULL(result);
- return result;
+ template <bool can_be_invalid = false>
+ OpIndex MapToNewGraphIfValid(OpIndex old_index, int predecessor_index = -1) {
+ return old_index.valid()
+ ? MapToNewGraph<can_be_invalid>(old_index, predecessor_index)
+ : OpIndex::Invalid();
}
- OpIndex MapToNewGraph(OpIndex old_index) {
- OpIndex result = op_mapping_[old_index.id()];
- DCHECK(result.valid());
- return result;
+ MaybeVariable GetVariableFor(OpIndex old_index) const {
+ return old_opindex_to_variables[old_index];
+ }
+
+ void SetVariableFor(OpIndex old_index, MaybeVariable var) {
+ DCHECK(!old_opindex_to_variables[old_index].has_value());
+ old_opindex_to_variables[old_index] = var;
}
template <size_t expected_size>
@@ -582,8 +840,9 @@ class GraphVisitor {
if (auto* pending_phi = op.TryCast<PendingLoopPhiOp>()) {
assembler().output_graph().template Replace<PhiOp>(
assembler().output_graph().Index(*pending_phi),
- base::VectorOf({pending_phi->first(),
- MapToNewGraph(pending_phi->old_backedge_index)}),
+ base::VectorOf(
+ {pending_phi->first(),
+ MapToNewGraph(pending_phi->data.old_backedge_index)}),
pending_phi->rep);
}
}
@@ -601,8 +860,19 @@ class GraphVisitor {
const Block* current_input_block_;
// Mappings from the old graph to the new graph.
- ZoneVector<Block*> block_mapping_;
ZoneVector<OpIndex> op_mapping_;
+
+ // {current_block_needs_variables_} is set to true if the current block should
+ // use Variables to map old to new OpIndex rather than just {op_mapping}. This
+ // is typically the case when the block has been cloned.
+ bool current_block_needs_variables_ = false;
+
+ // Set of Blocks for which Variables should be used rather than
+ // {op_mapping}.
+ ZoneSet<BlockIndex> blocks_needing_variables;
+
+ // Mapping from old OpIndex to Variables.
+ FixedSidetable<MaybeVariable> old_opindex_to_variables;
};
} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/optimize-phase.cc b/deps/v8/src/compiler/turboshaft/optimize-phase.cc
new file mode 100644
index 0000000000..c104f3b00e
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/optimize-phase.cc
@@ -0,0 +1,30 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/optimize-phase.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/turboshaft/late-escape-analysis-reducer.h"
+#include "src/compiler/turboshaft/machine-optimization-reducer.h"
+#include "src/compiler/turboshaft/memory-optimization-reducer.h"
+#include "src/compiler/turboshaft/value-numbering-reducer.h"
+#include "src/compiler/turboshaft/variable-reducer.h"
+#include "src/numbers/conversions-inl.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void OptimizePhase::Run(PipelineData* data, Zone* temp_zone) {
+ UnparkedScopeIfNeeded scope(data->broker(),
+ v8_flags.turboshaft_trace_reduction);
+ turboshaft::OptimizationPhase<
+ turboshaft::LateEscapeAnalysisReducer,
+ turboshaft::MemoryOptimizationReducer, turboshaft::VariableReducer,
+ turboshaft::MachineOptimizationReducerSignallingNanImpossible,
+ turboshaft::ValueNumberingReducer>::
+ Run(data->isolate(), &data->graph(), temp_zone, data->node_origins(),
+ std::tuple{
+ turboshaft::MemoryOptimizationReducerArgs{data->isolate()}});
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/optimize-phase.h b/deps/v8/src/compiler/turboshaft/optimize-phase.h
new file mode 100644
index 0000000000..43fcd2471d
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/optimize-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_OPTIMIZE_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_OPTIMIZE_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct OptimizePhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(Optimize)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_OPTIMIZE_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/phase.cc b/deps/v8/src/compiler/turboshaft/phase.cc
new file mode 100644
index 0000000000..be26569e1a
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/phase.cc
@@ -0,0 +1,82 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/phase.h"
+
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/turboshaft/graph-visualizer.h"
+#include "src/diagnostics/code-tracer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void PrintTurboshaftGraph(PipelineData* data, Zone* temp_zone,
+ CodeTracer* code_tracer, const char* phase_name) {
+ if (data->info()->trace_turbo_json()) {
+ UnparkedScopeIfNeeded scope(data->broker());
+ AllowHandleDereference allow_deref;
+ turboshaft::Graph& graph = data->graph();
+
+ {
+ TurboJsonFile json_of(data->info(), std::ios_base::app);
+ json_of << "{\"name\":\"" << phase_name
+ << "\",\"type\":\"turboshaft_graph\",\"data\":"
+ << AsJSON(graph, data->node_origins(), temp_zone) << "},\n";
+ }
+ PrintTurboshaftCustomDataPerOperation(
+ data->info(), "Properties", graph,
+ [](std::ostream& stream, const turboshaft::Graph& graph,
+ turboshaft::OpIndex index) -> bool {
+ const auto& op = graph.Get(index);
+ op.PrintOptions(stream);
+ return true;
+ });
+ PrintTurboshaftCustomDataPerOperation(
+ data->info(), "Types", graph,
+ [](std::ostream& stream, const turboshaft::Graph& graph,
+ turboshaft::OpIndex index) -> bool {
+ turboshaft::Type type = graph.operation_types()[index];
+ if (!type.IsInvalid() && !type.IsNone()) {
+ type.PrintTo(stream);
+ return true;
+ }
+ return false;
+ });
+ PrintTurboshaftCustomDataPerOperation(
+ data->info(), "Use Count (saturated)", graph,
+ [](std::ostream& stream, const turboshaft::Graph& graph,
+ turboshaft::OpIndex index) -> bool {
+ stream << static_cast<int>(graph.Get(index).saturated_use_count);
+ return true;
+ });
+#ifdef DEBUG
+ PrintTurboshaftCustomDataPerBlock(
+ data->info(), "Type Refinements", graph,
+ [](std::ostream& stream, const turboshaft::Graph& graph,
+ turboshaft::BlockIndex index) -> bool {
+ const std::vector<std::pair<turboshaft::OpIndex, turboshaft::Type>>&
+ refinements = graph.block_type_refinement()[index];
+ if (refinements.empty()) return false;
+ stream << "\\n";
+ for (const auto& [op, type] : refinements) {
+ stream << op << " : " << type << "\\n";
+ }
+ return true;
+ });
+#endif // DEBUG
+ }
+
+ if (data->info()->trace_turbo_graph()) {
+ DCHECK(code_tracer);
+ UnparkedScopeIfNeeded scope(data->broker());
+ AllowHandleDereference allow_deref;
+
+ CodeTracer::StreamScope tracing_scope(code_tracer);
+ tracing_scope.stream() << "\n----- " << phase_name << " -----\n"
+ << data->graph();
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/phase.h b/deps/v8/src/compiler/turboshaft/phase.h
new file mode 100644
index 0000000000..8096ac4622
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/phase.h
@@ -0,0 +1,81 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_PHASE_H_
+
+#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/node-origin-table.h"
+#include "src/compiler/phase.h"
+#include "src/compiler/turboshaft/graph.h"
+
+#define DECL_TURBOSHAFT_PHASE_CONSTANTS(Name) \
+ DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Turboshaft##Name, \
+ PhaseKind::kTurboshaft, \
+ RuntimeCallStats::kThreadSpecific)
+
+namespace v8::internal::compiler {
+class Schedule;
+} // namespace v8::internal::compiler
+
+namespace v8::internal::compiler::turboshaft {
+
+class PipelineData {
+ public:
+ explicit PipelineData(OptimizedCompilationInfo* const& info,
+ Schedule*& schedule, Zone*& graph_zone,
+ JSHeapBroker*& broker, Isolate* const& isolate,
+ SourcePositionTable*& source_positions,
+ NodeOriginTable*& node_origins)
+ : info_(info),
+ schedule_(schedule),
+ graph_zone_(graph_zone),
+ broker_(broker),
+ isolate_(isolate),
+ source_positions_(source_positions),
+ node_origins_(node_origins) {}
+
+ bool has_graph() const { return graph_ != nullptr; }
+ turboshaft::Graph& graph() const { return *graph_; }
+
+ OptimizedCompilationInfo* info() const { return info_; }
+ Schedule* schedule() const { return schedule_; }
+ Zone* graph_zone() const { return graph_zone_; }
+ JSHeapBroker* broker() const { return broker_; }
+ Isolate* isolate() const { return isolate_; }
+ SourcePositionTable* source_positions() const { return source_positions_; }
+ NodeOriginTable* node_origins() const { return node_origins_; }
+
+ void CreateTurboshaftGraph() {
+ DCHECK_NULL(graph_);
+ DCHECK(graph_zone_);
+ graph_ = std::make_unique<turboshaft::Graph>(graph_zone_);
+ }
+
+ void reset_schedule() { schedule_ = nullptr; }
+
+ void DeleteGraphZone() { graph_ = nullptr; }
+
+ private:
+ // Turbofan's PipelineData owns most of these objects. We only hold references
+ // to them.
+ // TODO(v8:12783, nicohartmann@): Change this once Turbofan pipeline is fully
+ // replaced.
+ OptimizedCompilationInfo* const& info_;
+ Schedule*& schedule_;
+ Zone*& graph_zone_;
+ JSHeapBroker*& broker_;
+ Isolate* const& isolate_;
+ SourcePositionTable*& source_positions_;
+ NodeOriginTable*& node_origins_;
+
+ std::unique_ptr<turboshaft::Graph> graph_ = nullptr;
+};
+
+void PrintTurboshaftGraph(PipelineData* data, Zone* temp_zone,
+ CodeTracer* code_tracer, const char* phase_name);
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/recreate-schedule-phase.cc b/deps/v8/src/compiler/turboshaft/recreate-schedule-phase.cc
new file mode 100644
index 0000000000..e6935de65d
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/recreate-schedule-phase.cc
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/recreate-schedule-phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+RecreateScheduleResult RecreateSchedulePhase::Run(PipelineData* data,
+ Zone* temp_zone,
+ Linkage* linkage) {
+ return RecreateSchedule(data->graph(), data->broker(),
+ linkage->GetIncomingDescriptor(), data->graph_zone(),
+ temp_zone, data->source_positions(),
+ data->node_origins());
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/recreate-schedule-phase.h b/deps/v8/src/compiler/turboshaft/recreate-schedule-phase.h
new file mode 100644
index 0000000000..d1e980b4f9
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/recreate-schedule-phase.h
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+#include "src/compiler/turboshaft/recreate-schedule.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct RecreateSchedulePhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(RecreateSchedule)
+
+ RecreateScheduleResult Run(PipelineData* data, Zone* temp_zone,
+ Linkage* linkage);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/recreate-schedule.cc b/deps/v8/src/compiler/turboshaft/recreate-schedule.cc
index 23ae2899e1..4832cf415e 100644
--- a/deps/v8/src/compiler/turboshaft/recreate-schedule.cc
+++ b/deps/v8/src/compiler/turboshaft/recreate-schedule.cc
@@ -16,6 +16,7 @@
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-origin-table.h"
@@ -35,6 +36,7 @@ namespace {
struct ScheduleBuilder {
const Graph& input_graph;
+ JSHeapBroker* broker;
CallDescriptor* call_descriptor;
Zone* graph_zone;
Zone* phase_zone;
@@ -133,7 +135,6 @@ RecreateScheduleResult ScheduleBuilder::Run() {
for (const Block& block : input_graph.blocks()) {
current_input_block = &block;
current_block = GetBlock(block);
- current_block->set_deferred(current_input_block->IsDeferred());
for (OpIndex op : input_graph.OperationIndices(block)) {
DCHECK_NOT_NULL(current_block);
ProcessOperation(input_graph.Get(op));
@@ -146,6 +147,10 @@ RecreateScheduleResult ScheduleBuilder::Run() {
DCHECK(schedule->rpo_order()->empty());
Scheduler::ComputeSpecialRPO(phase_zone, schedule);
+ // Note that Scheduler::GenerateDominatorTree also infers which blocks are
+ // deferred, so we only need to set branch targets as deferred based on the
+ // hints, and we let Scheduler::GenerateDominatorTree propagate this
+ // information to other blocks.
Scheduler::GenerateDominatorTree(schedule);
return {tf_graph, schedule};
}
@@ -172,6 +177,41 @@ void ScheduleBuilder::ProcessOperation(const Operation& op) {
}
}
+#define SHOULD_HAVE_BEEN_LOWERED(op) \
+ Node* ScheduleBuilder::ProcessOperation(const op##Op&) { UNREACHABLE(); }
+// These operations should have been lowered in previous reducers already.
+SHOULD_HAVE_BEEN_LOWERED(Allocate)
+SHOULD_HAVE_BEEN_LOWERED(BigIntBinop)
+SHOULD_HAVE_BEEN_LOWERED(BigIntComparison)
+SHOULD_HAVE_BEEN_LOWERED(BigIntEqual)
+SHOULD_HAVE_BEEN_LOWERED(BigIntUnary)
+SHOULD_HAVE_BEEN_LOWERED(ChangeOrDeopt)
+SHOULD_HAVE_BEEN_LOWERED(ConvertToObject)
+SHOULD_HAVE_BEEN_LOWERED(ConvertToObjectOrDeopt)
+SHOULD_HAVE_BEEN_LOWERED(ConvertObjectToPrimitive)
+SHOULD_HAVE_BEEN_LOWERED(ConvertObjectToPrimitiveOrDeopt)
+SHOULD_HAVE_BEEN_LOWERED(DecodeExternalPointer)
+SHOULD_HAVE_BEEN_LOWERED(DoubleArrayMinMax)
+SHOULD_HAVE_BEEN_LOWERED(FloatIs)
+SHOULD_HAVE_BEEN_LOWERED(LoadFieldByIndex)
+SHOULD_HAVE_BEEN_LOWERED(NewArray)
+SHOULD_HAVE_BEEN_LOWERED(NewConsString)
+SHOULD_HAVE_BEEN_LOWERED(ObjectIs)
+SHOULD_HAVE_BEEN_LOWERED(StringAt)
+SHOULD_HAVE_BEEN_LOWERED(StringComparison)
+SHOULD_HAVE_BEEN_LOWERED(StringEqual)
+SHOULD_HAVE_BEEN_LOWERED(StringFromCodePointAt)
+SHOULD_HAVE_BEEN_LOWERED(StringIndexOf)
+SHOULD_HAVE_BEEN_LOWERED(StringLength)
+SHOULD_HAVE_BEEN_LOWERED(StringSubstring)
+#ifdef V8_INTL_SUPPORT
+SHOULD_HAVE_BEEN_LOWERED(StringToCaseIntl)
+#endif // V8_INTL_SUPPORT
+SHOULD_HAVE_BEEN_LOWERED(Tag)
+SHOULD_HAVE_BEEN_LOWERED(TruncateObjectToPrimitive)
+SHOULD_HAVE_BEEN_LOWERED(Untag)
+#undef SHOULD_HAVE_BEEN_LOWERED
+
Node* ScheduleBuilder::ProcessOperation(const WordBinopOp& op) {
using Kind = WordBinopOp::Kind;
const Operator* o;
@@ -544,6 +584,9 @@ Node* ScheduleBuilder::ProcessOperation(const EqualOp& op) {
case RegisterRepresentation::Float64():
o = machine.Float64Equal();
break;
+ case RegisterRepresentation::Tagged():
+ o = machine.TaggedEqual();
+ break;
default:
UNREACHABLE();
}
@@ -830,6 +873,9 @@ Node* ScheduleBuilder::ProcessOperation(const TaggedBitcastOp& op) {
} else if (op.from == RegisterRepresentation::PointerSized() &&
op.to == RegisterRepresentation::Tagged()) {
o = machine.BitcastWordToTagged();
+ } else if (op.from == RegisterRepresentation::Compressed() &&
+ op.to == RegisterRepresentation::Word32()) {
+ o = machine.BitcastTaggedToWord();
} else {
UNIMPLEMENTED();
}
@@ -1016,12 +1062,6 @@ Node* ScheduleBuilder::ProcessOperation(const FrameConstantOp& op) {
return AddNode(machine.LoadParentFramePointer(), {});
}
}
-Node* ScheduleBuilder::ProcessOperation(const CheckLazyDeoptOp& op) {
- Node* call = GetNode(op.call());
- Node* frame_state = GetNode(op.frame_state());
- call->AppendInput(graph_zone, frame_state);
- return nullptr;
-}
Node* ScheduleBuilder::ProcessOperation(const DeoptimizeIfOp& op) {
Node* condition = GetNode(op.condition());
Node* frame_state = GetNode(op.frame_state());
@@ -1069,6 +1109,24 @@ Node* ScheduleBuilder::ProcessOperation(const PhiOp& op) {
Node* ScheduleBuilder::ProcessOperation(const ProjectionOp& op) {
return AddNode(common.Projection(op.index), {GetNode(op.input())});
}
+Node* ScheduleBuilder::ProcessOperation(const StaticAssertOp& op) {
+ // Static asserts should be (statically asserted and) removed by turboshaft.
+ UnparkedScopeIfNeeded scope(broker);
+ AllowHandleDereference allow_handle_dereference;
+ std::cout << input_graph.Get(op.condition());
+ FATAL(
+ "Expected Turbofan static assert to hold, but got non-true input:\n %s",
+ op.source);
+}
+Node* ScheduleBuilder::ProcessOperation(const CheckTurboshaftTypeOfOp& op) {
+ if (op.successful) return GetNode(op.input());
+
+ UnparkedScopeIfNeeded scope(broker);
+ AllowHandleDereference allow_handle_dereference;
+ FATAL("Checking type %s of operation %d:%s failed!",
+ op.type.ToString().c_str(), op.input().id(),
+ input_graph.Get(op.input()).ToString().c_str());
+}
std::pair<Node*, MachineType> ScheduleBuilder::BuildDeoptInput(
FrameStateData::Iterator* it) {
@@ -1078,6 +1136,15 @@ std::pair<Node*, MachineType> ScheduleBuilder::BuildDeoptInput(
MachineType type;
OpIndex input;
it->ConsumeInput(&type, &input);
+ const Operation& op = input_graph.Get(input);
+ if (op.outputs_rep()[0] == RegisterRepresentation::Word64() &&
+ type.representation() == MachineRepresentation::kWord32) {
+ // 64 to 32-bit conversion is implicit in turboshaft, but explicit in
+ // turbofan, so we insert this conversion.
+ Node* conversion =
+ AddNode(machine.TruncateInt64ToInt32(), {GetNode(input)});
+ return {conversion, type};
+ }
return {GetNode(input), type};
}
case Instr::kDematerializedObject: {
@@ -1196,7 +1263,45 @@ Node* ScheduleBuilder::ProcessOperation(const CallOp& op) {
for (OpIndex i : op.arguments()) {
inputs.push_back(GetNode(i));
}
- return AddNode(common.Call(op.descriptor), base::VectorOf(inputs));
+ if (op.HasFrameState()) {
+ DCHECK(op.frame_state().valid());
+ inputs.push_back(GetNode(op.frame_state()));
+ }
+ return AddNode(common.Call(op.descriptor->descriptor),
+ base::VectorOf(inputs));
+}
+Node* ScheduleBuilder::ProcessOperation(const CallAndCatchExceptionOp& op) {
+ // Re-building the call
+ base::SmallVector<Node*, 16> inputs;
+ inputs.push_back(GetNode(op.callee()));
+ for (OpIndex i : op.arguments()) {
+ inputs.push_back(GetNode(i));
+ }
+ if (op.HasFrameState()) {
+ DCHECK(op.frame_state().valid());
+ inputs.push_back(GetNode(op.frame_state()));
+ }
+ Node* call =
+ AddNode(common.Call(op.descriptor->descriptor), base::VectorOf(inputs));
+
+ // Re-building the IfSuccess/IfException mechanism.
+ BasicBlock* success_block = GetBlock(*op.if_success);
+ BasicBlock* exception_block = GetBlock(*op.if_exception);
+ schedule->AddCall(current_block, call, success_block, exception_block);
+ // Pass `call` as the control input of `IfSuccess` and as both the effect and
+ // control input of `IfException`.
+ Node* if_success = MakeNode(common.IfSuccess(), {call});
+ Node* if_exception = MakeNode(common.IfException(), {call, call});
+ schedule->AddNode(success_block, if_success);
+ schedule->AddNode(exception_block, if_exception);
+ current_block = nullptr;
+ return call;
+}
+Node* ScheduleBuilder::ProcessOperation(const LoadExceptionOp& op) {
+ Node* if_exception = current_block->NodeAt(0);
+ DCHECK(if_exception != nullptr &&
+ if_exception->opcode() == IrOpcode::kIfException);
+ return if_exception;
}
Node* ScheduleBuilder::ProcessOperation(const TailCallOp& op) {
base::SmallVector<Node*, 16> inputs;
@@ -1204,7 +1309,8 @@ Node* ScheduleBuilder::ProcessOperation(const TailCallOp& op) {
for (OpIndex i : op.arguments()) {
inputs.push_back(GetNode(i));
}
- Node* call = MakeNode(common.TailCall(op.descriptor), base::VectorOf(inputs));
+ Node* call = MakeNode(common.TailCall(op.descriptor->descriptor),
+ base::VectorOf(inputs));
schedule->AddTailCall(current_block, call);
current_block = nullptr;
return nullptr;
@@ -1228,29 +1334,25 @@ Node* ScheduleBuilder::ProcessOperation(const ReturnOp& op) {
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const BranchOp& op) {
- Node* branch =
- MakeNode(common.Branch(BranchHint::kNone), {GetNode(op.condition())});
+ Node* branch = MakeNode(common.Branch(op.hint), {GetNode(op.condition())});
BasicBlock* true_block = GetBlock(*op.if_true);
BasicBlock* false_block = GetBlock(*op.if_false);
schedule->AddBranch(current_block, branch, true_block, false_block);
schedule->AddNode(true_block, MakeNode(common.IfTrue(), {branch}));
schedule->AddNode(false_block, MakeNode(common.IfFalse(), {branch}));
+ switch (op.hint) {
+ case BranchHint::kNone:
+ break;
+ case BranchHint::kTrue:
+ false_block->set_deferred(true);
+ break;
+ case BranchHint::kFalse:
+ true_block->set_deferred(true);
+ break;
+ }
current_block = nullptr;
return nullptr;
}
-Node* ScheduleBuilder::ProcessOperation(const CatchExceptionOp& op) {
- Node* call = GetNode(op.call());
- BasicBlock* success_block = GetBlock(*op.if_success);
- BasicBlock* exception_block = GetBlock(*op.if_exception);
- schedule->AddCall(current_block, call, success_block, exception_block);
- Node* if_success = MakeNode(common.IfSuccess(), {call});
- Node* if_exception = MakeNode(common.IfException(), {call, call});
- schedule->AddNode(success_block, if_success);
- // Pass `call` as both the effect and control input of `IfException`.
- schedule->AddNode(exception_block, if_exception);
- current_block = nullptr;
- return if_exception;
-}
Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
size_t succ_count = op.cases.size() + 1;
Node* switch_node =
@@ -1260,12 +1362,20 @@ Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
for (SwitchOp::Case c : op.cases) {
BasicBlock* case_block = GetBlock(*c.destination);
successors.push_back(case_block);
- Node* case_node = MakeNode(common.IfValue(c.value), {switch_node});
+ Node* case_node =
+ MakeNode(common.IfValue(c.value, 0, c.hint), {switch_node});
schedule->AddNode(case_block, case_node);
+ if (c.hint == BranchHint::kFalse) {
+ case_block->set_deferred(true);
+ }
}
BasicBlock* default_block = GetBlock(*op.default_case);
successors.push_back(default_block);
- schedule->AddNode(default_block, MakeNode(common.IfDefault(), {switch_node}));
+ schedule->AddNode(default_block,
+ MakeNode(common.IfDefault(op.default_hint), {switch_node}));
+ if (op.default_hint == BranchHint::kFalse) {
+ default_block->set_deferred(true);
+ }
schedule->AddSwitch(current_block, switch_node, successors.data(),
successors.size());
@@ -1273,15 +1383,25 @@ Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
return nullptr;
}
+Node* ScheduleBuilder::ProcessOperation(const DebugBreakOp& op) {
+ return AddNode(machine.DebugBreak(), {});
+}
+
+Node* ScheduleBuilder::ProcessOperation(const LoadRootRegisterOp& op) {
+ return AddNode(machine.LoadRootRegister(), {});
+}
+
} // namespace
RecreateScheduleResult RecreateSchedule(const Graph& graph,
+ JSHeapBroker* broker,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone,
SourcePositionTable* source_positions,
NodeOriginTable* origins) {
- ScheduleBuilder builder{graph, call_descriptor, graph_zone,
- phase_zone, source_positions, origins};
+ ScheduleBuilder builder{graph, broker, call_descriptor,
+ graph_zone, phase_zone, source_positions,
+ origins};
return builder.Run();
}
diff --git a/deps/v8/src/compiler/turboshaft/recreate-schedule.h b/deps/v8/src/compiler/turboshaft/recreate-schedule.h
index 8fb3108775..43eac5497f 100644
--- a/deps/v8/src/compiler/turboshaft/recreate-schedule.h
+++ b/deps/v8/src/compiler/turboshaft/recreate-schedule.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-origin-table.h"
namespace v8::internal {
@@ -25,6 +26,7 @@ struct RecreateScheduleResult {
};
RecreateScheduleResult RecreateSchedule(const Graph& graph,
+ JSHeapBroker* broker,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone,
SourcePositionTable* source_positions,
diff --git a/deps/v8/src/compiler/turboshaft/reducer-traits.h b/deps/v8/src/compiler/turboshaft/reducer-traits.h
new file mode 100644
index 0000000000..edcbd2a0b7
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/reducer-traits.h
@@ -0,0 +1,81 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_REDUCER_TRAITS_H_
+#define V8_COMPILER_TURBOSHAFT_REDUCER_TRAITS_H_
+
+#include <type_traits>
+
+namespace v8::internal::compiler::turboshaft {
+
+template <class Assembler, template <class> class... Reducers>
+class ReducerStack;
+
+template <typename Next>
+class ReducerBase;
+
+// is_same_reducer compares two reducers.
+template <template <typename> typename T, template <typename> typename U>
+struct is_same_reducer : public std::bool_constant<false> {};
+
+template <template <typename> typename Reducer>
+struct is_same_reducer<Reducer, Reducer> : public std::bool_constant<true> {};
+
+template <template <typename> typename...>
+struct reducer_list {};
+// Converts a ReducerStack {Next} to a reducer_list<>;
+template <typename Next>
+struct reducer_stack_to_list;
+template <typename A, template <typename> typename... Reducers>
+struct reducer_stack_to_list<ReducerStack<A, Reducers...>> {
+ using type = reducer_list<Reducers...>;
+};
+
+// Checks if a reducer_list<> {RL} contains reducer {R}.
+template <typename RL, template <typename> typename R>
+struct reducer_list_contains;
+template <template <typename> typename R, template <typename> typename Head,
+ template <typename> typename... Tail>
+struct reducer_list_contains<reducer_list<Head, Tail...>, R> {
+ static constexpr bool value =
+ is_same_reducer<Head, R>::value ||
+ reducer_list_contains<reducer_list<Tail...>, R>::value;
+};
+template <template <typename> typename R>
+struct reducer_list_contains<reducer_list<>, R>
+ : public std::bool_constant<false> {};
+
+// Checks if a reducer_list<> {RL} starts with reducer {R}.
+template <typename RL, template <typename> typename R>
+struct reducer_list_starts_with;
+template <template <typename> typename R, template <typename> typename Head,
+ template <typename> typename... Tail>
+struct reducer_list_starts_with<reducer_list<Head, Tail...>, R>
+ : public std::bool_constant<is_same_reducer<Head, R>::value> {};
+template <template <typename> typename R>
+struct reducer_list_starts_with<reducer_list<>, R>
+ : public std::bool_constant<false> {};
+
+// Check if the {Next} ReducerStack contains {Reducer}.
+template <typename Next, template <typename> typename Reducer>
+struct next_contains_reducer {
+ using list = typename reducer_stack_to_list<Next>::type;
+ static constexpr bool value = reducer_list_contains<list, Reducer>::value;
+};
+
+// Check if in the {Next} ReducerStack, {Reducer} comes next.
+template <typename Next, template <typename> typename Reducer>
+struct next_reducer_is {
+ using list = typename reducer_stack_to_list<Next>::type;
+ static constexpr bool value = reducer_list_starts_with<list, Reducer>::value;
+};
+
+// Check if {Next} is the bottom of the ReducerStack.
+template <typename Next>
+struct next_is_bottom_of_assembler_stack
+ : public next_reducer_is<Next, ReducerBase> {};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_REDUCER_TRAITS_H_
diff --git a/deps/v8/src/compiler/turboshaft/representations.h b/deps/v8/src/compiler/turboshaft/representations.h
index b99ffeec7c..ce6490aa01 100644
--- a/deps/v8/src/compiler/turboshaft/representations.h
+++ b/deps/v8/src/compiler/turboshaft/representations.h
@@ -83,7 +83,7 @@ class RegisterRepresentation {
}
}
- bool IsFloat() {
+ constexpr bool IsFloat() {
switch (*this) {
case Enum::kFloat32:
case Enum::kFloat64:
@@ -180,10 +180,12 @@ class RegisterRepresentation {
static constexpr Enum kInvalid = static_cast<Enum>(-1);
};
-V8_INLINE bool operator==(RegisterRepresentation a, RegisterRepresentation b) {
+V8_INLINE constexpr bool operator==(RegisterRepresentation a,
+ RegisterRepresentation b) {
return a.value() == b.value();
}
-V8_INLINE bool operator!=(RegisterRepresentation a, RegisterRepresentation b) {
+V8_INLINE constexpr bool operator!=(RegisterRepresentation a,
+ RegisterRepresentation b) {
return a.value() != b.value();
}
@@ -344,6 +346,14 @@ class MemoryRepresentation {
static constexpr MemoryRepresentation SandboxedPointer() {
return MemoryRepresentation(Enum::kSandboxedPointer);
}
+ static constexpr MemoryRepresentation PointerSized() {
+ if constexpr (kSystemPointerSize == 4) {
+ return Uint32();
+ } else {
+ DCHECK_EQ(kSystemPointerSize, 8);
+ return Uint64();
+ }
+ }
bool IsWord() const {
switch (*this) {
@@ -456,6 +466,19 @@ class MemoryRepresentation {
}
}
+ // The required register representation for storing a value. When pointer
+ // compression is enabled, we only store the lower 32bit of a tagged value,
+ // which we indicate as `RegisterRepresentation::Compressed()` here.
+ RegisterRepresentation ToRegisterRepresentationForStore() const {
+ RegisterRepresentation result = ToRegisterRepresentation();
+#ifdef V8_COMPRESS_POINTERS
+ if (result == RegisterRepresentation::Tagged()) {
+ result = RegisterRepresentation::Compressed();
+ }
+#endif
+ return result;
+ }
+
MachineType ToMachineType() const {
switch (*this) {
case Int8():
@@ -503,6 +526,10 @@ class MemoryRepresentation {
return TaggedSigned();
case MachineRepresentation::kTaggedPointer:
return TaggedPointer();
+ case MachineRepresentation::kMapWord:
+ // Turboshaft does not support map packing.
+ DCHECK(!V8_MAP_PACKING_BOOL);
+ return TaggedPointer();
case MachineRepresentation::kTagged:
return AnyTagged();
case MachineRepresentation::kFloat32:
@@ -512,7 +539,6 @@ class MemoryRepresentation {
case MachineRepresentation::kSandboxedPointer:
return SandboxedPointer();
case MachineRepresentation::kNone:
- case MachineRepresentation::kMapWord:
case MachineRepresentation::kBit:
case MachineRepresentation::kSimd128:
case MachineRepresentation::kSimd256:
@@ -556,27 +582,31 @@ class MemoryRepresentation {
}
}
- uint8_t SizeInBytes() const {
+ constexpr uint8_t SizeInBytes() const {
+ return uint8_t{1} << SizeInBytesLog2();
+ }
+
+ constexpr uint8_t SizeInBytesLog2() const {
switch (*this) {
case Int8():
case Uint8():
- return 1;
+ return 0;
case Int16():
case Uint16():
- return 2;
+ return 1;
case Int32():
case Uint32():
case Float32():
- return 4;
+ return 2;
case Int64():
case Uint64():
case Float64():
case SandboxedPointer():
- return 8;
+ return 3;
case AnyTagged():
case TaggedPointer():
case TaggedSigned():
- return kTaggedSize;
+ return kTaggedSizeLog2;
}
}
diff --git a/deps/v8/src/compiler/turboshaft/runtime-call-descriptors.h b/deps/v8/src/compiler/turboshaft/runtime-call-descriptors.h
new file mode 100644
index 0000000000..d4c4a25d7b
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/runtime-call-descriptors.h
@@ -0,0 +1,104 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_RUNTIME_CALL_DESCRIPTORS_H_
+#define V8_COMPILER_TURBOSHAFT_RUNTIME_CALL_DESCRIPTORS_H_
+
+#include "src/compiler/turboshaft/operations.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct RuntimeCallDescriptor {
+ private:
+ template <typename Derived>
+ struct Descriptor {
+ static const TSCallDescriptor* Create(Zone* zone) {
+ auto descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone, Derived::Function,
+ std::tuple_size_v<typename Derived::arguments_t>, Derived::Properties,
+ Derived::NeedsFrameState ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags);
+#ifdef DEBUG
+ Derived::Verify(descriptor);
+#endif // DEBUG
+ return TSCallDescriptor::Create(descriptor, zone);
+ }
+
+#ifdef DEBUG
+ static void Verify(const CallDescriptor* desc) {
+ using result_t = typename Derived::result_t;
+ using arguments_t = typename Derived::arguments_t;
+ if constexpr (std::is_same_v<result_t, void>) {
+ DCHECK_EQ(desc->ReturnCount(), 0);
+ } else {
+ DCHECK_EQ(desc->ReturnCount(), 1);
+ DCHECK(result_t::allows_representation(
+ RegisterRepresentation::FromMachineRepresentation(
+ desc->GetReturnType(0).representation())));
+ }
+ DCHECK_EQ(desc->NeedsFrameState(), Derived::NeedsFrameState);
+ DCHECK_EQ(desc->properties(), Derived::Properties);
+ constexpr int additional_stub_arguments =
+ 3; // function id, argument count, context (or NoContextConstant)
+ DCHECK_EQ(desc->ParameterCount(),
+ std::tuple_size_v<arguments_t> + additional_stub_arguments);
+ DCHECK(VerifyArguments<arguments_t>(desc));
+ }
+
+ template <typename Arguments>
+ static bool VerifyArguments(const CallDescriptor* desc) {
+ return VerifyArgumentsImpl<Arguments>(
+ desc, std::make_index_sequence<std::tuple_size_v<Arguments>>());
+ }
+
+ private:
+ template <typename Arguments, size_t... Indices>
+ static bool VerifyArgumentsImpl(const CallDescriptor* desc,
+ std::index_sequence<Indices...>) {
+ return (std::tuple_element_t<Indices, Arguments>::allows_representation(
+ RegisterRepresentation::FromMachineRepresentation(
+ desc->GetParameterType(Indices).representation())) &&
+ ...);
+ }
+#endif // DEBUG
+ };
+
+ using Boolean = Oddball;
+
+ public:
+ struct StringCharCodeAt : public Descriptor<StringCharCodeAt> {
+ static constexpr auto Function = Runtime::kStringCharCodeAt;
+ using arguments_t = std::tuple<V<String>, V<Number>>;
+ using result_t = V<Smi>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr Operator::Properties Properties =
+ Operator::kNoDeopt | Operator::kNoThrow;
+ };
+
+#ifdef V8_INTL_SUPPORT
+ struct StringToUpperCaseIntl : public Descriptor<StringToUpperCaseIntl> {
+ static constexpr auto Function = Runtime::kStringToUpperCaseIntl;
+ using arguments_t = std::tuple<V<String>>;
+ using result_t = V<String>;
+
+ static constexpr bool NeedsFrameState = false;
+ static constexpr Operator::Properties Properties =
+ Operator::kNoDeopt | Operator::kNoThrow;
+ };
+#endif // V8_INTL_SUPPORT
+
+ struct TerminateExecution : public Descriptor<TerminateExecution> {
+ static constexpr auto Function = Runtime::kTerminateExecution;
+ using arguments_t = std::tuple<>;
+ using result_t = V<Object>;
+
+ static constexpr bool NeedsFrameState = true;
+ static constexpr Operator::Properties Properties = Operator::kNoDeopt;
+ };
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_RUNTIME_CALL_DESCRIPTORS_H_
diff --git a/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h
index 1035129a6f..71eb822b73 100644
--- a/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h
+++ b/deps/v8/src/compiler/turboshaft/select-lowering-reducer.h
@@ -33,7 +33,11 @@ namespace v8::internal::compiler::turboshaft {
template <class Next>
class SelectLoweringReducer : public Next {
public:
- using Next::Asm;
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ template <class... Args>
+ explicit SelectLoweringReducer(const std::tuple<Args...>& args)
+ : Next(args) {}
OpIndex ReduceSelect(OpIndex cond, OpIndex vtrue, OpIndex vfalse,
RegisterRepresentation rep, BranchHint hint,
@@ -43,17 +47,12 @@ class SelectLoweringReducer : public Next {
// CMove.
return Next::ReduceSelect(cond, vtrue, vfalse, rep, hint, implem);
}
- Block* true_block = Asm().NewBlock(Block::Kind::kBranchTarget);
- Block* false_block = Asm().NewBlock(Block::Kind::kBranchTarget);
- Block* merge_block = Asm().NewBlock(Block::Kind::kMerge);
-
- if (hint == BranchHint::kTrue) {
- false_block->SetDeferred(true);
- } else if (hint == BranchHint::kFalse) {
- true_block->SetDeferred(true);
- }
- Asm().Branch(cond, true_block, false_block);
+ Block* true_block = Asm().NewBlock();
+ Block* false_block = Asm().NewBlock();
+ Block* merge_block = Asm().NewBlock();
+
+ Asm().Branch(cond, true_block, false_block, hint);
// Note that it's possible that other reducers of the stack optimizes the
// Branch that we just introduced into a Goto (if its condition is already
diff --git a/deps/v8/src/compiler/turboshaft/sidetable.h b/deps/v8/src/compiler/turboshaft/sidetable.h
index beabd33816..84b812f85e 100644
--- a/deps/v8/src/compiler/turboshaft/sidetable.h
+++ b/deps/v8/src/compiler/turboshaft/sidetable.h
@@ -22,13 +22,18 @@ namespace v8::internal::compiler::turboshaft {
// This sidetable is a conceptually infinite mapping from Turboshaft operation
// indices to values. It grows automatically and default-initializes the table
// when accessed out-of-bounds.
-template <class T>
+template <class T, class Key = OpIndex>
class GrowingSidetable {
public:
+ static_assert(std::is_same_v<Key, OpIndex> ||
+ std::is_same_v<Key, BlockIndex>);
explicit GrowingSidetable(Zone* zone) : table_(zone) {}
- T& operator[](OpIndex op) {
- size_t i = op.id();
+ GrowingSidetable(size_t size, const T& initial_value, Zone* zone)
+ : table_(size, initial_value, zone) {}
+
+ T& operator[](Key index) {
+ size_t i = index.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
@@ -38,8 +43,8 @@ class GrowingSidetable {
return table_[i];
}
- const T& operator[](OpIndex op) const {
- size_t i = op.id();
+ const T& operator[](Key index) const {
+ size_t i = index.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
@@ -66,19 +71,23 @@ class GrowingSidetable {
}
};
-// A fixed-size sidetable mapping from `OpIndex` to `T`.
+// A fixed-size sidetable mapping from `Key` to `T`.
// Elements are default-initialized.
-template <class T>
+template <class T, class Key = OpIndex>
class FixedSidetable {
public:
+ static_assert(std::is_same_v<Key, OpIndex> ||
+ std::is_same_v<Key, BlockIndex>);
explicit FixedSidetable(size_t size, Zone* zone) : table_(size, zone) {}
+ FixedSidetable(size_t size, const T& default_value, Zone* zone)
+ : table_(size, default_value, zone) {}
- T& operator[](OpIndex op) {
+ T& operator[](Key op) {
DCHECK_LT(op.id(), table_.size());
return table_[op.id()];
}
- const T& operator[](OpIndex op) const {
+ const T& operator[](Key op) const {
DCHECK_LT(op.id(), table_.size());
return table_[op.id()];
}
@@ -87,6 +96,11 @@ class FixedSidetable {
ZoneVector<T> table_;
};
+template <typename T>
+using GrowingBlockSidetable = GrowingSidetable<T, BlockIndex>;
+template <typename T>
+using FixedBlockSidetable = FixedSidetable<T, BlockIndex>;
+
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_SIDETABLE_H_
diff --git a/deps/v8/src/compiler/turboshaft/snapshot-table.h b/deps/v8/src/compiler/turboshaft/snapshot-table.h
index 2a4b8c3eb2..8a8447c07e 100644
--- a/deps/v8/src/compiler/turboshaft/snapshot-table.h
+++ b/deps/v8/src/compiler/turboshaft/snapshot-table.h
@@ -18,14 +18,16 @@
// similar snapshots with a closeby common ancestor.
//
// Complexity:
-// creating a scope linear in the number of `Set` operations between the
-// current state and the common ancestor of all
-// predecessors and the current state, plus the `Set`
-// operations from the common ancestor to all predecessors.
-// Scope::Get() O(1)
-// Scope::Set() O(1) + operator== for Value
-// Scope::Seal() O(1)
-// NewKey() O(1)
+// creating a snapshot linear in the number of `Set` operations between the
+// current state and the common ancestor of all
+// predecessors and the current state, plus the `Set`
+// operations from the common ancestor to all
+// predecessors.
+// Get() O(1)
+// Set() O(1) + operator== for Value
+// Seal() O(1)
+// NewKey() O(1)
+// GetPredecessorValue() O(1)
namespace v8::internal::compiler::turboshaft {
struct NoKeyData {};
@@ -55,8 +57,7 @@ class SnapshotTable {
explicit Key(TableEntry& entry) : entry_(&entry) {}
};
- // A `Snapshot` captures the state of the `SnapshotTable`. Using a `Scope`,
- // the state of the table can be reset to a given snapshot.
+ // A `Snapshot` captures the state of the `SnapshotTable`.
// A `Snapshot` is implemented as a pointer to internal data and is therefore
// cheap to copy.
class Snapshot {
@@ -69,77 +70,84 @@ class SnapshotTable {
explicit Snapshot(SnapshotData& data) : data_(&data) {}
};
- // All modifications to the table need to be performed through a `Scope`.
- // There can only be a single active scope for a table at a time. A new scope
- // is based on a list of predecessor snapshots. If no predecessor is given,
- // the scope is based on the initial state of the table. A single predecessor
- // snapshot resets the table to exactly this snapshot. In the case of multiple
- // snapshots, a merge function is used to unify values that were set since the
- // last common ancestor snapshot.
- // The previous scope needs to be closed using Seal() before another one can
- // be created.
- class Scope {
- public:
- // These overloads move to a new snapshot based on the common ancestor,
- // without merging different values from the predecessors.
- Scope(SnapshotTable& table, base::Vector<const Snapshot> predecessors)
- : snapshot_table_(table),
- snapshot_(&table.MoveToNewSnapshot(predecessors)) {}
- explicit Scope(SnapshotTable& table,
- std::initializer_list<Snapshot> predecessors = {})
- : Scope(table, base::VectorOf(predecessors)) {}
- Scope(SnapshotTable& table, Snapshot parent) : Scope(table, {parent}) {}
- // These overloads merge different values from the predecessors using the
- // given function.
- template <class F>
- Scope(SnapshotTable& table, base::Vector<const Snapshot> predecessors,
- F merge_fun)
- : Scope(table, predecessors) {
- table.MergePredecessors(predecessors, merge_fun);
- }
- template <class F>
- Scope(SnapshotTable& table, std::initializer_list<Snapshot> predecessors,
- F merge_fun)
- : Scope(table, base::VectorOf(predecessors), merge_fun) {}
-
- Scope(const Scope&) = delete;
- Scope& operator=(const Scope&) = delete;
-
- const Value& Get(Key key) {
- DCHECK_EQ(snapshot_, snapshot_table_.current_snapshot_);
- return key.entry_->value;
- }
+ // A new Snapshot is based on a list of predecessor Snapshots. If no
+ // predecessor is given, the new Snapshot is based on the initial state of the
+ // table. A single predecessor Snapshot resets the table to exactly this
+ // Snapshot. In the case of multiple Snapshots, a merge function is used to
+ // unify values that were set since the last common ancestor snapshot.
+ // The previous Snapshot needs to be closed using Seal() before another one
+ // can be created.
+ void StartNewSnapshot(base::Vector<const Snapshot> predecessors) {
+ DCHECK(current_snapshot_->IsSealed());
+ MoveToNewSnapshot(predecessors);
+#ifdef DEBUG
+ snapshot_was_created_with_merge = false;
+#endif
+ }
+ void StartNewSnapshot(std::initializer_list<Snapshot> predecessors = {}) {
+ StartNewSnapshot(base::VectorOf(predecessors));
+ }
+ void StartNewSnapshot(Snapshot parent) { StartNewSnapshot({parent}); }
+ template <class F>
+ void StartNewSnapshot(base::Vector<const Snapshot> predecessors,
+ F merge_fun) {
+ StartNewSnapshot(predecessors);
+ MergePredecessors(predecessors, merge_fun);
+#ifdef DEBUG
+ snapshot_was_created_with_merge = true;
+#endif
+ }
+ template <class F>
+ void StartNewSnapshot(std::initializer_list<Snapshot> predecessors,
+ F merge_fun) {
+ StartNewSnapshot(base::VectorOf(predecessors), merge_fun);
+ }
- void Set(Key key, Value new_value) {
- DCHECK(!snapshot_->IsSealed());
- snapshot_table_.Set(key, new_value);
+ Snapshot Seal() {
+ current_snapshot_->Seal(log_.size());
+ // Reseting the entries' `merge_offset` and `last_merged_predecessor`
+ // fields, so that they are cleared for the next Merge.
+ for (TableEntry* entry : merging_entries_) {
+ entry->last_merged_predecessor = kNoMergedPredecessor;
+ entry->merge_offset = kNoMergeOffset;
}
-
- // Sealing the current scope means that no more modifications are possible.
- // Produces a new snapshot which represents the current state.
- Snapshot Seal() {
- snapshot_->Seal(snapshot_table_.log_.size());
- // Optimization: If nothing changed in the new snapshot, we discard it and
- // use its parent instead.
- if (snapshot_->log_begin == snapshot_->log_end) {
- SnapshotData* parent = snapshot_->parent;
- snapshot_table_.current_snapshot_ = parent;
- DCHECK_EQ(snapshot_, &snapshot_table_.snapshots_.back());
- snapshot_table_.snapshots_.pop_back();
- return Snapshot{*parent};
- }
- return Snapshot{*snapshot_};
+ merge_values_.clear();
+ merging_entries_.clear();
+
+ // Optimization: If nothing changed in the new snapshot, we discard it and
+ // use its parent instead.
+ if (current_snapshot_->log_begin == current_snapshot_->log_end) {
+ SnapshotData* parent = current_snapshot_->parent;
+ DCHECK_EQ(current_snapshot_, &snapshots_.back());
+ snapshots_.pop_back();
+ current_snapshot_ = parent;
+ return Snapshot{*parent};
}
+ return Snapshot{*current_snapshot_};
+ }
- ~Scope() {
- // Seal() should have been used to obtain the new snapshot.
- DCHECK(snapshot_->IsSealed());
- }
+ const Value& Get(Key key) { return key.entry_->value; }
+
+ // Returns the value associated to {key} in its {predecessor_index}th
+ // predecessor (where "predecessor" refers to the predecessors that were
+ // passed to StartNewSnapshot when creating the current snapshot).
+ // This function should only be used if the snapshot was started with a merge
+ // function.
+ // If {key} wasn't merged but was Set in the current snapshot, then
+ // the newly set value will be returned rather than the predecessor value.
+ const Value& GetPredecessorValue(Key key, int predecessor_index) {
+ DCHECK(!current_snapshot_->IsSealed());
+ DCHECK(snapshot_was_created_with_merge);
+ if (key.entry_->merge_offset == kNoMergeOffset) return Get(key);
+ return merge_values_[key.entry_->merge_offset + predecessor_index];
+ }
- private:
- SnapshotTable& snapshot_table_;
- SnapshotData* snapshot_;
- };
+ void Set(Key key, Value new_value) {
+ DCHECK(!current_snapshot_->IsSealed());
+ if (key.entry_->value == new_value) return;
+ log_.push_back(LogEntry{*key.entry_, key.entry_->value, new_value});
+ key.entry_->value = new_value;
+ }
explicit SnapshotTable(Zone* zone) : zone_(zone) {
root_snapshot_ = &NewSnapshot(nullptr);
@@ -159,6 +167,9 @@ class SnapshotTable {
return NewKey(KeyData{}, initial_value);
}
+ // Returns true if {current_snapshot_} is sealed.
+ bool IsSealed() { return current_snapshot_->IsSealed(); }
+
private:
Zone* zone_;
ZoneDeque<TableEntry> table_{zone_};
@@ -175,6 +186,10 @@ class SnapshotTable {
ZoneVector<TableEntry*> merging_entries_{zone_};
ZoneVector<Value> merge_values_{zone_};
+#ifdef DEBUG
+ bool snapshot_was_created_with_merge = false;
+#endif
+
SnapshotData& NewSnapshot(SnapshotData* parent) {
return snapshots_.emplace_back(parent, log_.size());
}
@@ -203,12 +218,6 @@ class SnapshotTable {
current_snapshot_ = snapshot;
}
- void Set(Key key, Value new_value) {
- if (key.entry_->value == new_value) return;
- log_.push_back(LogEntry{*key.entry_, key.entry_->value, new_value});
- key.entry_->value = new_value;
- }
-
void RecordMergeValue(TableEntry& entry, const Value& value,
uint32_t predecessor_index, uint32_t predecessor_count);
SnapshotData& MoveToNewSnapshot(base::Vector<const Snapshot> predecessors);
@@ -226,8 +235,9 @@ class SnapshotTable {
template <class Value, class KeyData>
struct SnapshotTable<Value, KeyData>::TableEntry : KeyData {
Value value;
- // Used during merging: the offset in `merge_values_` where we store the
- // merged values.
+ // `merge_offset` is the offset in `merge_values_` where we store the
+ // merged values. It is used during merging (to know what to merge) and when
+ // calling GetPredecessorValue.
uint32_t merge_offset = kNoMergeOffset;
// Used during merging: the index of the predecessor for which we last
// recorded a value. This allows us to only use the last value for a given
@@ -269,7 +279,7 @@ struct SnapshotTable<Value, KeyData>::SnapshotData {
return self;
}
void Seal(size_t log_end) {
- DCHECK_WITH_MSG(!IsSealed(), "A scope can only be sealed once.");
+ DCHECK_WITH_MSG(!IsSealed(), "A Snapshot can only be sealed once.");
this->log_end = log_end;
}
@@ -303,7 +313,7 @@ void SnapshotTable<Value, KeyData>::RecordMergeValue(
entry.last_merged_predecessor = predecessor_index;
}
-// This function prepares the SnapshotTable to start a new snapshot/scope whose
+// This function prepares the SnapshotTable to start a new snapshot whose
// predecessors are `predecessors`. To do this, it resets and replay snapshots
// in between the `current_snapshot_` and the position of the new snapshot. For
// instance:
@@ -330,7 +340,7 @@ SnapshotTable<Value, KeyData>::MoveToNewSnapshot(
base::Vector<const Snapshot> predecessors) {
DCHECK_WITH_MSG(
current_snapshot_->IsSealed(),
- "A new scope was opened before the previous scope was sealed.");
+ "A new Snapshot was opened before the previous Snapshot was sealed.");
SnapshotData* common_ancestor;
if (predecessors.empty()) {
@@ -363,7 +373,7 @@ SnapshotTable<Value, KeyData>::MoveToNewSnapshot(
}
// Merges all entries modified in `predecessors` since the last common ancestor
-// by adding them to the current scope.
+// by adding them to the current snapshot.
template <class Value, class KeyData>
template <class F>
void SnapshotTable<Value, KeyData>::MergePredecessors(
@@ -399,11 +409,7 @@ void SnapshotTable<Value, KeyData>::MergePredecessors(
Key key{*entry};
Set(key, merge_fun(key, base::VectorOf(&merge_values_[entry->merge_offset],
predecessor_count)));
- entry->last_merged_predecessor = kNoMergedPredecessor;
- entry->merge_offset = kNoMergeOffset;
}
- merge_values_.clear();
- merging_entries_.clear();
}
} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.cc b/deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.cc
new file mode 100644
index 0000000000..b927e69dd9
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.cc
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/tag-untag-lowering-phase.h"
+
+#include "src/compiler/turboshaft/tag-untag-lowering-reducer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void TagUntagLoweringPhase::Run(PipelineData* data, Zone* temp_zone) {
+ turboshaft::OptimizationPhase<turboshaft::TagUntagLoweringReducer>::Run(
+ data->isolate(), &data->graph(), temp_zone, data->node_origins());
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.h b/deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.h
new file mode 100644
index 0000000000..c132cda4ac
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/tag-untag-lowering-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TAG_UNTAG_LOWERING_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_TAG_UNTAG_LOWERING_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct TagUntagLoweringPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(TagUntagLowering)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TAG_UNTAG_LOWERING_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/tag-untag-lowering-reducer.h b/deps/v8/src/compiler/turboshaft/tag-untag-lowering-reducer.h
new file mode 100644
index 0000000000..65b756aea7
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/tag-untag-lowering-reducer.h
@@ -0,0 +1,64 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TAG_UNTAG_LOWERING_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_TAG_UNTAG_LOWERING_REDUCER_H_
+
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/graph.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+#include "src/compiler/turboshaft/define-assembler-macros.inc"
+
+template <class Next>
+class TagUntagLoweringReducer : public Next {
+ static constexpr int kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ template <class... Args>
+ explicit TagUntagLoweringReducer(const std::tuple<Args...>& args)
+ : Next(args) {}
+
+ OpIndex ReduceTag(OpIndex input, TagKind kind) {
+ DCHECK_EQ(kind, TagKind::kSmiTag);
+ // Do shift on 32bit values if Smis are stored in the lower word.
+ if constexpr (Is64() && SmiValuesAre31Bits()) {
+ return ChangeTaggedInt32ToSmi(__ Word32ShiftLeft(input, kSmiShiftBits));
+ } else {
+ return V<Tagged>::Cast(
+ __ WordPtrShiftLeft(__ ChangeInt32ToIntPtr(input), kSmiShiftBits));
+ }
+ }
+
+ OpIndex ReduceUntag(OpIndex input, TagKind kind, RegisterRepresentation rep) {
+ DCHECK_EQ(kind, TagKind::kSmiTag);
+ DCHECK_EQ(rep, RegisterRepresentation::Word32());
+ if constexpr (Is64() && SmiValuesAre31Bits()) {
+ return __ Word32ShiftRightArithmeticShiftOutZeros(input, kSmiShiftBits);
+ }
+ return V<Word32>::Cast(
+ __ WordPtrShiftRightArithmeticShiftOutZeros(input, kSmiShiftBits));
+ }
+
+ private:
+ V<Tagged> ChangeTaggedInt32ToSmi(V<Word32> input) {
+ DCHECK(SmiValuesAre31Bits());
+ // In pointer compression, we smi-corrupt. Then, the upper bits are not
+ // important.
+ return COMPRESS_POINTERS_BOOL
+ ? V<Tagged>::Cast(__ BitcastWord32ToWord64(input))
+ : V<Tagged>::Cast(__ ChangeInt32ToIntPtr(input));
+ }
+};
+
+#include "src/compiler/turboshaft/undef-assembler-macros.inc"
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TAG_UNTAG_LOWERING_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/tracing.h b/deps/v8/src/compiler/turboshaft/tracing.h
new file mode 100644
index 0000000000..17fcdd0cce
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/tracing.h
@@ -0,0 +1,47 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TRACING_H_
+#define V8_COMPILER_TURBOSHAFT_TRACING_H_
+
+#include "src/base/contextual.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/compiler/turboshaft/graph-visualizer.h"
+#include "src/compiler/turboshaft/graph.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+class Tracing : public base::ContextualClass<Tracing> {
+ public:
+ explicit Tracing(OptimizedCompilationInfo* info) : info_(info) {
+ DCHECK_NOT_NULL(info_);
+ }
+
+ using OperationDataPrinter =
+ std::function<bool(std::ostream&, const Graph&, OpIndex)>;
+ using BlockDataPrinter =
+ std::function<bool(std::ostream&, const Graph&, BlockIndex)>;
+
+ inline bool is_enabled() const { return info_->trace_turbo_json(); }
+
+ void PrintPerOperationData(const char* data_name, const Graph& graph,
+ OperationDataPrinter printer) {
+ DCHECK(printer);
+ if (!is_enabled()) return;
+ PrintTurboshaftCustomDataPerOperation(info_, data_name, graph, printer);
+ }
+ void PrintPerBlockData(const char* data_name, const Graph& graph,
+ BlockDataPrinter printer) {
+ DCHECK(printer);
+ if (!is_enabled()) return;
+ PrintTurboshaftCustomDataPerBlock(info_, data_name, graph, printer);
+ }
+
+ private:
+ OptimizedCompilationInfo* info_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TRACING_H_
diff --git a/deps/v8/src/compiler/turboshaft/type-assertions-phase.cc b/deps/v8/src/compiler/turboshaft/type-assertions-phase.cc
new file mode 100644
index 0000000000..76966088f1
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/type-assertions-phase.cc
@@ -0,0 +1,31 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/type-assertions-phase.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/turboshaft/assert-types-reducer.h"
+#include "src/compiler/turboshaft/type-inference-reducer.h"
+#include "src/compiler/turboshaft/value-numbering-reducer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void TypeAssertionsPhase::Run(PipelineData* data, Zone* temp_zone) {
+ UnparkedScopeIfNeeded scope(data->broker());
+
+ turboshaft::TypeInferenceReducerArgs typing_args{
+ data->isolate(),
+ turboshaft::TypeInferenceReducerArgs::InputGraphTyping::kPrecise,
+ turboshaft::TypeInferenceReducerArgs::OutputGraphTyping::
+ kPreserveFromInputGraph};
+
+ turboshaft::OptimizationPhase<turboshaft::AssertTypesReducer,
+ turboshaft::ValueNumberingReducer,
+ turboshaft::TypeInferenceReducer>::
+ Run(data->isolate(), &data->graph(), temp_zone, data->node_origins(),
+ std::tuple{typing_args,
+ turboshaft::AssertTypesReducerArgs{data->isolate()}});
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/type-assertions-phase.h b/deps/v8/src/compiler/turboshaft/type-assertions-phase.h
new file mode 100644
index 0000000000..2388013daa
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/type-assertions-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPE_ASSERTIONS_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_TYPE_ASSERTIONS_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct TypeAssertionsPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(TypeAssertions)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPE_ASSERTIONS_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/type-inference-analysis.h b/deps/v8/src/compiler/turboshaft/type-inference-analysis.h
new file mode 100644
index 0000000000..92c4637122
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/type-inference-analysis.h
@@ -0,0 +1,566 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_ANALYSIS_H_
+#define V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_ANALYSIS_H_
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/vector.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/sidetable.h"
+#include "src/compiler/turboshaft/snapshot-table.h"
+#include "src/compiler/turboshaft/typer.h"
+#include "src/compiler/turboshaft/types.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// This analysis infers types for all operations. It does so by running a
+// fixpoint analysis on the input graph in order to properly type PhiOps. The
+// analysis visits blocks in order and computes operation types using
+// Turboshaft's Typer. For Goto operations, the analysis checks if this is a
+// back edge (the Goto's target is a loop block with an index less than the
+// index of the current block). If this is the case, the analysis revisits the
+// loop block (this is when ProcessBlock<true> is called). During this revisit,
+// two things are different to the normal processing of a block:
+//
+// 1.) PhiOps are handled specially, which means applying proper
+// widening/narrowing mechanics to accelerate termination while still computing
+// somewhat precise types for Phis. 2.) If the type of any of the loop's Phis
+// grows, we reset the index of unprocessed blocks to the block after the loop
+// header, such that the entire loop body is revisited with the new type
+// information.
+class TypeInferenceAnalysis {
+ public:
+ explicit TypeInferenceAnalysis(const Graph& graph, Zone* phase_zone)
+ : graph_(graph),
+ // TODO(nicohartmann@): Might put types back into phase_zone once we
+ // don't store them in the graph anymore.
+ types_(graph.op_id_count(), Type{}, graph.graph_zone()),
+ table_(phase_zone),
+ op_to_key_mapping_(phase_zone),
+ block_to_snapshot_mapping_(graph.block_count(), base::nullopt,
+ phase_zone),
+ predecessors_(phase_zone),
+ graph_zone_(graph.graph_zone()) {}
+
+ GrowingSidetable<Type> Run(
+ GrowingBlockSidetable<std::vector<std::pair<OpIndex, Type>>>*
+ block_refinements = nullptr) {
+#ifdef DEBUG
+ block_refinements_ = block_refinements;
+#endif // DEBUG
+ TURBOSHAFT_TRACE_TYPING("=== Running Type Inference Analysis ===\n");
+ for (uint32_t unprocessed_index = 0;
+ unprocessed_index < graph_.block_count();) {
+ BlockIndex block_index = static_cast<BlockIndex>(unprocessed_index);
+ ++unprocessed_index;
+
+ const Block& block = graph_.Get(block_index);
+ ProcessBlock<false>(block, &unprocessed_index);
+ }
+ TURBOSHAFT_TRACE_TYPING("=== Completed Type Inference Analysis ===\n");
+
+ return std::move(types_);
+ }
+
+ template <bool revisit_loop_header>
+ void ProcessBlock(const Block& block, uint32_t* unprocessed_index) {
+ DCHECK_IMPLIES(revisit_loop_header, block.IsLoop());
+
+ // Seal the current block first.
+ if (table_.IsSealed()) {
+ DCHECK_NULL(current_block_);
+ } else {
+ // If we process a new block while the previous one is still unsealed, we
+ // finalize it.
+ DCHECK_NOT_NULL(current_block_);
+ DCHECK(current_block_->index().valid());
+ block_to_snapshot_mapping_[current_block_->index()] = table_.Seal();
+ current_block_ = nullptr;
+ }
+
+ // Collect the snapshots of all predecessors.
+ {
+ predecessors_.clear();
+ for (const Block* pred = block.LastPredecessor(); pred != nullptr;
+ pred = pred->NeighboringPredecessor()) {
+ base::Optional<table_t::Snapshot> pred_snapshot =
+ block_to_snapshot_mapping_[pred->index()];
+ if (pred_snapshot.has_value()) {
+ predecessors_.push_back(pred_snapshot.value());
+ } else {
+ // The only case where we might not have a snapshot for the
+ // predecessor is when we visit a loop header for the first time.
+ DCHECK(block.IsLoop() && pred == block.LastPredecessor() &&
+ !revisit_loop_header);
+ }
+ }
+ std::reverse(predecessors_.begin(), predecessors_.end());
+ }
+
+ // Start a new snapshot for this block by merging information from
+ // predecessors.
+ {
+ auto MergeTypes = [&](table_t::Key,
+ base::Vector<Type> predecessors) -> Type {
+ DCHECK_GT(predecessors.size(), 0);
+ Type result_type = predecessors[0];
+ for (size_t i = 1; i < predecessors.size(); ++i) {
+ result_type =
+ Type::LeastUpperBound(result_type, predecessors[i], graph_zone_);
+ }
+ return result_type;
+ };
+
+ table_.StartNewSnapshot(base::VectorOf(predecessors_), MergeTypes);
+ }
+
+ // Check if the predecessor is a branch that allows us to refine a few
+ // types.
+ DCHECK_IMPLIES(revisit_loop_header, block.HasExactlyNPredecessors(2));
+ if (block.HasExactlyNPredecessors(1)) {
+ Block* predecessor = block.LastPredecessor();
+ const Operation& terminator = predecessor->LastOperation(graph_);
+ if (const BranchOp* branch = terminator.TryCast<BranchOp>()) {
+ DCHECK(branch->if_true == &block || branch->if_false == &block);
+ RefineTypesAfterBranch(branch, &block, branch->if_true == &block);
+ }
+ }
+ current_block_ = &block;
+
+ bool loop_needs_revisit = false;
+ auto op_range = graph_.OperationIndices(block);
+ for (auto it = op_range.begin(); it != op_range.end(); ++it) {
+ OpIndex index = *it;
+ const Operation& op = graph_.Get(index);
+
+ switch (op.opcode) {
+ case Opcode::kBranch:
+ case Opcode::kDeoptimize:
+ case Opcode::kDeoptimizeIf:
+ case Opcode::kFrameState:
+ case Opcode::kReturn:
+ case Opcode::kStore:
+ case Opcode::kRetain:
+ case Opcode::kTrapIf:
+ case Opcode::kUnreachable:
+ case Opcode::kSwitch:
+ case Opcode::kTuple:
+ case Opcode::kStaticAssert:
+ case Opcode::kDebugBreak:
+ // These operations do not produce any output that needs to be typed.
+ DCHECK_EQ(0, op.outputs_rep().size());
+ break;
+ case Opcode::kCheckTurboshaftTypeOf:
+ ProcessCheckTurboshaftTypeOf(index,
+ op.Cast<CheckTurboshaftTypeOfOp>());
+ break;
+ case Opcode::kComparison:
+ ProcessComparison(index, op.Cast<ComparisonOp>());
+ break;
+ case Opcode::kConstant:
+ ProcessConstant(index, op.Cast<ConstantOp>());
+ break;
+ case Opcode::kFloatBinop:
+ ProcessFloatBinop(index, op.Cast<FloatBinopOp>());
+ break;
+ case Opcode::kOverflowCheckedBinop:
+ ProcessOverflowCheckedBinop(index, op.Cast<OverflowCheckedBinopOp>());
+ break;
+ case Opcode::kProjection:
+ ProcessProjection(index, op.Cast<ProjectionOp>());
+ break;
+ case Opcode::kWordBinop:
+ ProcessWordBinop(index, op.Cast<WordBinopOp>());
+ break;
+ case Opcode::kPendingLoopPhi:
+ // Input graph must not contain PendingLoopPhi.
+ UNREACHABLE();
+ case Opcode::kPhi:
+ if constexpr (revisit_loop_header) {
+ loop_needs_revisit =
+ ProcessLoopPhi(index, op.Cast<PhiOp>()) || loop_needs_revisit;
+ } else {
+ ProcessPhi(index, op.Cast<PhiOp>());
+ }
+ break;
+ case Opcode::kGoto: {
+ const GotoOp& gto = op.Cast<GotoOp>();
+ // Check if this is a backedge.
+ if (gto.destination->IsLoop() &&
+ gto.destination->index() < current_block_->index()) {
+ ProcessBlock<true>(*gto.destination, unprocessed_index);
+ }
+ break;
+ }
+
+ case Opcode::kWordUnary:
+ case Opcode::kFloatUnary:
+ case Opcode::kShift:
+ case Opcode::kEqual:
+ case Opcode::kChange:
+ case Opcode::kChangeOrDeopt:
+ case Opcode::kTryChange:
+ case Opcode::kFloat64InsertWord32:
+ case Opcode::kTaggedBitcast:
+ case Opcode::kSelect:
+ case Opcode::kLoad:
+ case Opcode::kAllocate:
+ case Opcode::kDecodeExternalPointer:
+ case Opcode::kParameter:
+ case Opcode::kOsrValue:
+ case Opcode::kStackPointerGreaterThan:
+ case Opcode::kStackSlot:
+ case Opcode::kFrameConstant:
+ case Opcode::kCall:
+ case Opcode::kCallAndCatchException:
+ case Opcode::kLoadException:
+ case Opcode::kTailCall:
+ case Opcode::kObjectIs:
+ case Opcode::kFloatIs:
+ case Opcode::kConvertToObject:
+ case Opcode::kConvertToObjectOrDeopt:
+ case Opcode::kConvertObjectToPrimitive:
+ case Opcode::kConvertObjectToPrimitiveOrDeopt:
+ case Opcode::kTruncateObjectToPrimitive:
+ case Opcode::kTag:
+ case Opcode::kUntag:
+ case Opcode::kNewConsString:
+ case Opcode::kNewArray:
+ case Opcode::kDoubleArrayMinMax:
+ case Opcode::kLoadFieldByIndex:
+ case Opcode::kBigIntBinop:
+ case Opcode::kBigIntEqual:
+ case Opcode::kBigIntComparison:
+ case Opcode::kBigIntUnary:
+ case Opcode::kStringAt:
+#ifdef V8_INTL_SUPPORT
+ case Opcode::kStringToCaseIntl:
+#endif // V8_INTL_SUPPORT
+ case Opcode::kStringLength:
+ case Opcode::kStringIndexOf:
+ case Opcode::kStringFromCodePointAt:
+ case Opcode::kStringSubstring:
+ case Opcode::kStringEqual:
+ case Opcode::kStringComparison:
+ // TODO(nicohartmann@): Support remaining operations. For now we
+ // compute fallback types.
+ if (op.outputs_rep().size() > 0) {
+ constexpr bool allow_narrowing = false;
+ constexpr bool is_fallback_for_unsupported_operation = true;
+ SetType(index,
+ Typer::TypeForRepresentation(op.outputs_rep(), graph_zone_),
+ allow_narrowing, is_fallback_for_unsupported_operation);
+ }
+ break;
+ case Opcode::kLoadRootRegister:
+ SetType(index,
+ Typer::TypeForRepresentation(op.outputs_rep(), graph_zone_));
+ break;
+ }
+ }
+
+ if constexpr (revisit_loop_header) {
+ if (loop_needs_revisit) {
+ // This is a loop header and the loop body needs to be revisited. Reset
+ // {unprocessed_index} to the loop header's successor.
+ *unprocessed_index =
+ std::min(*unprocessed_index, block.index().id() + 1);
+ }
+ }
+ }
+
+ void ProcessCheckTurboshaftTypeOf(OpIndex index,
+ const CheckTurboshaftTypeOfOp& check) {
+ Type input_type = GetType(check.input());
+
+ if (input_type.IsSubtypeOf(check.type)) {
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "CTOF %3d:%-40s\n P: %3d:%-40s ~~> %s\n", index.id(),
+ graph_.Get(index).ToString().substr(0, 40).c_str(),
+ check.input().id(),
+ graph_.Get(check.input()).ToString().substr(0, 40).c_str(),
+ input_type.ToString().c_str());
+ } else if (check.successful) {
+ FATAL(
+ "Checking type %s of operation %d:%s failed after it passed in a "
+ "previous phase",
+ check.type.ToString().c_str(), check.input().id(),
+ graph_.Get(check.input()).ToString().c_str());
+ } else {
+ TURBOSHAFT_TRACE_TYPING_FAIL(
+ "CTOF %3d:%-40s\n F: %3d:%-40s ~~> %s\n", index.id(),
+ graph_.Get(index).ToString().substr(0, 40).c_str(),
+ check.input().id(),
+ graph_.Get(check.input()).ToString().substr(0, 40).c_str(),
+ input_type.ToString().c_str());
+ }
+ }
+
+ void ProcessComparison(OpIndex index, const ComparisonOp& comparison) {
+ Type left_type = GetType(comparison.left());
+ Type right_type = GetType(comparison.right());
+
+ Type result_type = Typer::TypeComparison(
+ left_type, right_type, comparison.rep, comparison.kind, graph_zone_);
+ SetType(index, result_type);
+ }
+
+ void ProcessConstant(OpIndex index, const ConstantOp& constant) {
+ Type type = Typer::TypeConstant(constant.kind, constant.storage);
+ SetType(index, type);
+ }
+
+ void ProcessFloatBinop(OpIndex index, const FloatBinopOp& binop) {
+ Type left_type = GetType(binop.left());
+ Type right_type = GetType(binop.right());
+
+ Type result_type = Typer::TypeFloatBinop(left_type, right_type, binop.kind,
+ binop.rep, graph_zone_);
+ SetType(index, result_type);
+ }
+
+ bool ProcessLoopPhi(OpIndex index, const PhiOp& phi) {
+ Type old_type = GetTypeAtDefinition(index);
+ Type new_type = ComputeTypeForPhi(phi);
+
+ if (old_type.IsInvalid()) {
+ SetType(index, new_type);
+ return true;
+ }
+
+ // If the new type is smaller, we narrow it without revisiting the loop.
+ if (new_type.IsSubtypeOf(old_type)) {
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "LOOP %3d:%-40s (FIXPOINT)\n N: %-40s ~~> %-40s\n", index.id(),
+ graph_.Get(index).ToString().substr(0, 40).c_str(),
+ old_type.ToString().c_str(), new_type.ToString().c_str());
+
+ constexpr bool allow_narrowing = true;
+ SetType(index, new_type, allow_narrowing);
+ return false;
+ }
+
+ // Otherwise, the new type is larger and we widen and revisit the loop.
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "LOOP %3d:%-40s (REVISIT)\n W: %-40s ~~> %-40s\n", index.id(),
+ graph_.Get(index).ToString().substr(0, 40).c_str(),
+ old_type.ToString().c_str(), new_type.ToString().c_str());
+
+ if (!old_type.IsNone()) {
+ new_type = Widen(old_type, new_type);
+ }
+ SetType(index, new_type);
+ return true;
+ }
+
+ void ProcessOverflowCheckedBinop(OpIndex index,
+ const OverflowCheckedBinopOp& binop) {
+ Type left_type = GetType(binop.left());
+ Type right_type = GetType(binop.right());
+
+ Type result_type = Typer::TypeOverflowCheckedBinop(
+ left_type, right_type, binop.kind, binop.rep, graph_zone_);
+ SetType(index, result_type);
+ }
+
+ void ProcessPhi(OpIndex index, const PhiOp& phi) {
+ Type result_type = ComputeTypeForPhi(phi);
+ SetType(index, result_type);
+ }
+
+ void ProcessProjection(OpIndex index, const ProjectionOp& projection) {
+ Type input_type = GetType(projection.input());
+
+ Type result_type;
+ if (input_type.IsNone()) {
+ result_type = Type::None();
+ } else if (input_type.IsTuple()) {
+ const TupleType& tuple = input_type.AsTuple();
+ DCHECK_LT(projection.index, tuple.size());
+ result_type = tuple.element(projection.index);
+ DCHECK(result_type.IsSubtypeOf(
+ Typer::TypeForRepresentation(projection.rep)));
+ } else {
+ result_type = Typer::TypeForRepresentation(projection.rep);
+ }
+
+ SetType(index, result_type);
+ }
+
+ void ProcessWordBinop(OpIndex index, const WordBinopOp& binop) {
+ Type left_type = GetType(binop.left());
+ Type right_type = GetType(binop.right());
+
+ Type result_type = Typer::TypeWordBinop(left_type, right_type, binop.kind,
+ binop.rep, graph_zone_);
+ SetType(index, result_type);
+ }
+
+ Type ComputeTypeForPhi(const PhiOp& phi) {
+ Type result_type = GetTypeOrDefault(phi.inputs()[0], Type::None());
+ for (size_t i = 1; i < phi.inputs().size(); ++i) {
+ Type input_type = GetTypeOrDefault(phi.inputs()[i], Type::None());
+ result_type = Type::LeastUpperBound(result_type, input_type, graph_zone_);
+ }
+ return result_type;
+ }
+
+ void RefineTypesAfterBranch(const BranchOp* branch, const Block* new_block,
+ bool then_branch) {
+ TURBOSHAFT_TRACE_TYPING_OK("Br %3d:%-40s\n", graph_.Index(*branch).id(),
+ branch->ToString().substr(0, 40).c_str());
+
+ Typer::BranchRefinements refinements(
+ [this](OpIndex index) { return GetType(index); },
+ [&](OpIndex index, const Type& refined_type) {
+ RefineOperationType(new_block, index, refined_type,
+ then_branch ? 'T' : 'F');
+ });
+
+ // Inspect branch condition.
+ const Operation& condition = graph_.Get(branch->condition());
+ refinements.RefineTypes(condition, then_branch, graph_zone_);
+ }
+
+ void RefineOperationType(const Block* new_block, OpIndex op, const Type& type,
+ char case_for_tracing) {
+ DCHECK(op.valid());
+ DCHECK(!type.IsInvalid());
+
+ TURBOSHAFT_TRACE_TYPING_OK(" %c: %3d:%-40s ~~> %s\n", case_for_tracing,
+ op.id(),
+ graph_.Get(op).ToString().substr(0, 40).c_str(),
+ type.ToString().c_str());
+
+ auto key_opt = op_to_key_mapping_[op];
+ DCHECK(key_opt.has_value());
+ table_.Set(*key_opt, type);
+
+#ifdef DEBUG
+ if (block_refinements_) {
+ (*block_refinements_)[new_block->index()].emplace_back(op, type);
+ }
+#endif
+
+ // TODO(nicohartmann@): One could push the refined type deeper into the
+ // operations.
+ }
+
+ void SetType(OpIndex index, Type result_type, bool allow_narrowing = false,
+ bool is_fallback_for_unsupported_operation = false) {
+ DCHECK(!result_type.IsInvalid());
+
+ if (auto key_opt = op_to_key_mapping_[index]) {
+ table_.Set(*key_opt, result_type);
+ types_[index] = result_type;
+ } else {
+ auto key = table_.NewKey(Type::None());
+ op_to_key_mapping_[index] = key;
+ table_.Set(key, result_type);
+ types_[index] = result_type;
+ }
+
+ if (!is_fallback_for_unsupported_operation) {
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "Type %3d:%-40s ==> %s\n", index.id(),
+ graph_.Get(index).ToString().substr(0, 40).c_str(),
+ result_type.ToString().c_str());
+ } else {
+ // TODO(nicohartmann@): Remove the fallback case once all operations are
+ // supported.
+ TURBOSHAFT_TRACE_TYPING_FAIL(
+ "TODO %3d:%-40s ==> %s\n", index.id(),
+ graph_.Get(index).ToString().substr(0, 40).c_str(),
+ result_type.ToString().c_str());
+ }
+ }
+
+ Type GetTypeOrInvalid(const OpIndex index) {
+ if (auto key = op_to_key_mapping_[index]) return table_.Get(*key);
+ return Type::Invalid();
+ }
+
+ Type GetTypeOrDefault(OpIndex index, const Type& default_type) {
+ Type t = GetTypeOrInvalid(index);
+ if (t.IsInvalid()) return default_type;
+ return t;
+ }
+
+ Type GetType(OpIndex index) {
+ Type t = GetTypeOrInvalid(index);
+ if (t.IsInvalid()) {
+ // TODO(nicohartmann@): This is a fallback mechanism as long as not all
+ // operations are properly typed. Remove this once typing is complete.
+ const Operation& op = graph_.Get(index);
+ return Typer::TypeForRepresentation(op.outputs_rep(), graph_zone_);
+ }
+ return t;
+ }
+
+ Type GetTypeAtDefinition(OpIndex index) const { return types_[index]; }
+
+ Type Widen(const Type& old_type, const Type& new_type) {
+ if (new_type.IsAny()) return new_type;
+ // We might have to relax this eventually and widen different types.
+ DCHECK_EQ(old_type.kind(), new_type.kind());
+
+ switch (old_type.kind()) {
+ case Type::Kind::kWord32:
+ // TODO(nicohartmann@): Reevaluate whether exponential widening is
+ // better here.
+ //
+ // return WordOperationTyper<32>::WidenExponential(old_type.AsWord32(),
+ // new_type.AsWord32(), graph_zone_);
+ return WordOperationTyper<32>::WidenMaximal(
+ old_type.AsWord32(), new_type.AsWord32(), graph_zone_);
+ case Type::Kind::kWord64:
+ // TODO(nicohartmann@): Reevaluate whether exponential widening is
+ // better here.
+ //
+ // return WordOperationTyper<64>::WidenExponential(old_type.AsWord64(),
+ // new_type.AsWord64(), graph_zone_);
+ return WordOperationTyper<64>::WidenMaximal(
+ old_type.AsWord64(), new_type.AsWord64(), graph_zone_);
+ case Type::Kind::kFloat32:
+ // TODO(nicohartmann@): Implement proper widening.
+ return Float32Type::Any();
+ case Type::Kind::kFloat64:
+ // TODO(nicohartmann@): Implement proper widening.
+ return Float64Type::Any();
+ default:
+ // TODO(nicohartmann@): Handle remaining cases.
+ UNREACHABLE();
+ }
+ }
+
+ private:
+ const Graph& graph_;
+ GrowingSidetable<Type> types_;
+ using table_t = SnapshotTable<Type>;
+ table_t table_;
+ const Block* current_block_ = nullptr;
+ GrowingSidetable<base::Optional<table_t::Key>> op_to_key_mapping_;
+ GrowingBlockSidetable<base::Optional<table_t::Snapshot>>
+ block_to_snapshot_mapping_;
+ // {predecessors_} is used during merging, but we use an instance variable for
+ // it, in order to save memory and not reallocate it for each merge.
+ ZoneVector<table_t::Snapshot> predecessors_;
+ Zone* graph_zone_;
+
+#ifdef DEBUG
+ // {block_refinements_} are only stored for tracing in Debug builds.
+ GrowingBlockSidetable<std::vector<std::pair<OpIndex, Type>>>*
+ block_refinements_ = nullptr;
+#endif
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/turboshaft/type-inference-reducer.h b/deps/v8/src/compiler/turboshaft/type-inference-reducer.h
new file mode 100644
index 0000000000..c6e039a5dc
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/type-inference-reducer.h
@@ -0,0 +1,557 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_REDUCER_H_
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/vector.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/optimization-phase.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/sidetable.h"
+#include "src/compiler/turboshaft/snapshot-table.h"
+#include "src/compiler/turboshaft/tracing.h"
+#include "src/compiler/turboshaft/type-inference-analysis.h"
+#include "src/compiler/turboshaft/typer.h"
+#include "src/compiler/turboshaft/types.h"
+#include "src/compiler/turboshaft/uniform-reducer-adapter.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+template <typename Op>
+V8_INLINE bool CanBeTyped(const Op& operation) {
+ return operation.outputs_rep().size() > 0;
+}
+
+struct TypeInferenceReducerArgs {
+ enum class InputGraphTyping {
+ kNone, // Do not compute types for the input graph.
+ kPrecise, // Run a complete fixpoint analysis on the input graph.
+ };
+ enum class OutputGraphTyping {
+ kNone, // Do not compute types for the output graph.
+ kPreserveFromInputGraph, // Reuse types of the input graph where
+ // possible.
+ kRefineFromInputGraph, // Reuse types of the input graph and compute types
+ // for new nodes and more precise types where
+ // possible.
+ };
+ Isolate* isolate;
+ InputGraphTyping input_graph_typing;
+ OutputGraphTyping output_graph_typing;
+};
+
+// TypeInferenceReducer is the central component to infer types for Turboshaft
+// graphs. It comes with different options for how the input and output graph
+// should be typed:
+//
+// - InputGraphTyping::kNone: No types are computed for the input graph.
+// - InputGraphTyping::kPrecise: We run a full fixpoint analysis on the input
+// graph to infer the most precise types possible (see TypeInferenceAnalysis).
+//
+// - OutputGraphTyping::kNone: No types will be set for the output graph.
+// - OutputGraphTyping::kPreserveFromInputGraph: Types from the input graph will
+// be preserved for the output graph. Where this is not possible (e.g. new
+// operations introduced during lowering), the output operation will be untyped.
+// - OutputGraphTyping::kRefineFromInputGraph: Types from the input graph will
+// be used where they provide additional precision (e.g loop phis). For new
+// operations, the reducer reruns the typer to make sure that the output graph
+// is fully typed.
+//
+// NOTE: The TypeInferenceReducer has to be the last reducer in the stack!
+template <class Next>
+class TypeInferenceReducer
+ : public UniformReducerAdapter<TypeInferenceReducer, Next> {
+ static_assert(next_is_bottom_of_assembler_stack<Next>::value);
+ using table_t = SnapshotTable<Type>;
+
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ using Adapter = UniformReducerAdapter<TypeInferenceReducer, Next>;
+ using Args = TypeInferenceReducerArgs;
+ using ArgT = base::append_tuple_type<typename Next::ArgT, Args>;
+
+ template <typename... Ts>
+ explicit TypeInferenceReducer(const std::tuple<Ts...>& args)
+ : Adapter(args),
+ args_(std::get<Args>(args)),
+ input_graph_types_(Asm().graph_zone()),
+ output_graph_types_(Asm().output_graph().operation_types()),
+ table_(Asm().phase_zone()),
+ op_to_key_mapping_(Asm().phase_zone()),
+ block_to_snapshot_mapping_(Asm().input_graph().block_count(),
+ base::nullopt, Asm().phase_zone()),
+ predecessors_(Asm().phase_zone()),
+ analyzer_(Asm().modifiable_input_graph(), Asm().phase_zone()) {
+ // It is not reasonable to try to reuse input graph types if there are none.
+ DCHECK_IMPLIES(args_.output_graph_typing ==
+ Args::OutputGraphTyping::kPreserveFromInputGraph,
+ args_.input_graph_typing != Args::InputGraphTyping::kNone);
+ }
+
+ void Analyze() {
+ if (args_.input_graph_typing == Args::InputGraphTyping::kPrecise) {
+#ifdef DEBUG
+ GrowingBlockSidetable<std::vector<std::pair<OpIndex, Type>>>
+ block_refinements(Asm().input_graph().block_count(), {},
+ Asm().phase_zone());
+ input_graph_types_ = analyzer_.Run(&block_refinements);
+ Tracing::Get().PrintPerBlockData(
+ "Type Refinements", Asm().input_graph(),
+ [&](std::ostream& stream, const turboshaft::Graph& graph,
+ turboshaft::BlockIndex index) -> bool {
+ const std::vector<std::pair<turboshaft::OpIndex, turboshaft::Type>>&
+ refinements = block_refinements[index];
+ if (refinements.empty()) return false;
+ stream << "\\n";
+ for (const auto& [op, type] : refinements) {
+ stream << op << " : " << type << "\\n";
+ }
+ return true;
+ });
+#else
+ input_graph_types_ = analyzer_.Run(nullptr);
+#endif // DEBUG
+ Tracing::Get().PrintPerOperationData(
+ "Types", Asm().input_graph(),
+ [&](std::ostream& stream, const turboshaft::Graph& graph,
+ turboshaft::OpIndex index) -> bool {
+ turboshaft::Type type = input_graph_types_[index];
+ if (!type.IsInvalid() && !type.IsNone()) {
+ type.PrintTo(stream);
+ return true;
+ }
+ return false;
+ });
+ }
+ Next::Analyze();
+ }
+
+ Type GetInputGraphType(OpIndex ig_index) {
+ return input_graph_types_[ig_index];
+ }
+
+ Type GetOutputGraphType(OpIndex og_index) { return GetType(og_index); }
+
+ template <Opcode opcode, typename Continuation, typename... Ts>
+ OpIndex ReduceOperation(Ts... args) {
+ OpIndex index = Continuation{this}.Reduce(args...);
+ if (!NeedsTyping(index)) return index;
+
+ const Operation& op = Asm().output_graph().Get(index);
+ if (CanBeTyped(op)) {
+ Type type = Typer::TypeForRepresentation(
+ Asm().output_graph().Get(index).outputs_rep(), Asm().graph_zone());
+ SetType(index, type, true);
+ }
+ return index;
+ }
+
+ template <typename Op, typename Continuation>
+ OpIndex ReduceInputGraphOperation(OpIndex ig_index, const Op& operation) {
+ OpIndex og_index = Continuation{this}.ReduceInputGraph(ig_index, operation);
+ if (!og_index.valid()) return og_index;
+ if (args_.output_graph_typing == Args::OutputGraphTyping::kNone) {
+ return og_index;
+ }
+ if (!CanBeTyped(operation)) return og_index;
+
+ Type ig_type = GetInputGraphType(ig_index);
+ DCHECK_IMPLIES(args_.input_graph_typing != Args::InputGraphTyping::kNone,
+ !ig_type.IsInvalid());
+ if (!ig_type.IsInvalid()) {
+ Type og_type = GetType(og_index);
+ // If the type we have from the input graph is more precise, we keep it.
+ if (og_type.IsInvalid() ||
+ (ig_type.IsSubtypeOf(og_type) && !og_type.IsSubtypeOf(ig_type))) {
+ RefineTypeFromInputGraph(og_index, og_type, ig_type);
+ }
+ }
+ return og_index;
+ }
+
+ void Bind(Block* new_block) {
+ Next::Bind(new_block);
+
+ // Seal the current block first.
+ if (table_.IsSealed()) {
+ DCHECK_NULL(current_block_);
+ } else {
+ // If we bind a new block while the previous one is still unsealed, we
+ // finalize it.
+ DCHECK_NOT_NULL(current_block_);
+ DCHECK(current_block_->index().valid());
+ block_to_snapshot_mapping_[current_block_->index()] = table_.Seal();
+ current_block_ = nullptr;
+ }
+
+ // Collect the snapshots of all predecessors.
+ {
+ predecessors_.clear();
+ for (const Block* pred = new_block->LastPredecessor(); pred != nullptr;
+ pred = pred->NeighboringPredecessor()) {
+ base::Optional<table_t::Snapshot> pred_snapshot =
+ block_to_snapshot_mapping_[pred->index()];
+ DCHECK(pred_snapshot.has_value());
+ predecessors_.push_back(pred_snapshot.value());
+ }
+ std::reverse(predecessors_.begin(), predecessors_.end());
+ }
+
+ // Start a new snapshot for this block by merging information from
+ // predecessors.
+ {
+ auto MergeTypes = [&](table_t::Key,
+ base::Vector<Type> predecessors) -> Type {
+ DCHECK_GT(predecessors.size(), 0);
+ Type result_type = predecessors[0];
+ for (size_t i = 1; i < predecessors.size(); ++i) {
+ result_type = Type::LeastUpperBound(result_type, predecessors[i],
+ Asm().graph_zone());
+ }
+ return result_type;
+ };
+
+ table_.StartNewSnapshot(base::VectorOf(predecessors_), MergeTypes);
+ }
+
+ // Check if the predecessor is a branch that allows us to refine a few
+ // types.
+ if (args_.output_graph_typing ==
+ Args::OutputGraphTyping::kRefineFromInputGraph) {
+ if (new_block->HasExactlyNPredecessors(1)) {
+ Block* predecessor = new_block->LastPredecessor();
+ const Operation& terminator =
+ predecessor->LastOperation(Asm().output_graph());
+ if (const BranchOp* branch = terminator.TryCast<BranchOp>()) {
+ DCHECK(branch->if_true == new_block || branch->if_false == new_block);
+ RefineTypesAfterBranch(branch, new_block,
+ branch->if_true == new_block);
+ }
+ }
+ }
+ current_block_ = new_block;
+ }
+
+ void RefineTypesAfterBranch(const BranchOp* branch, Block* new_block,
+ bool then_branch) {
+ const std::string branch_str = branch->ToString().substr(0, 40);
+ USE(branch_str);
+ TURBOSHAFT_TRACE_TYPING_OK("Br %3d:%-40s\n",
+ Asm().output_graph().Index(*branch).id(),
+ branch_str.c_str());
+
+ Typer::BranchRefinements refinements(
+ [this](OpIndex index) { return GetType(index); },
+ [&](OpIndex index, const Type& refined_type) {
+ RefineOperationType(new_block, index, refined_type,
+ then_branch ? 'T' : 'F');
+ });
+
+ // Inspect branch condition.
+ const Operation& condition = Asm().output_graph().Get(branch->condition());
+ refinements.RefineTypes(condition, then_branch, Asm().graph_zone());
+ }
+
+ void RefineOperationType(Block* new_block, OpIndex op, const Type& type,
+ char case_for_tracing) {
+ DCHECK(op.valid());
+ DCHECK(!type.IsInvalid());
+
+ TURBOSHAFT_TRACE_TYPING_OK(
+ " %c: %3d:%-40s ~~> %s\n", case_for_tracing, op.id(),
+ Asm().output_graph().Get(op).ToString().substr(0, 40).c_str(),
+ type.ToString().c_str());
+
+ auto key_opt = op_to_key_mapping_[op];
+ // We might not have a key for this value, because we are running in a mode
+ // where we don't type all operations.
+ if (key_opt.has_value()) {
+ table_.Set(*key_opt, type);
+
+#ifdef DEBUG
+ std::vector<std::pair<OpIndex, Type>>& refinement =
+ Asm().output_graph().block_type_refinement()[new_block->index()];
+ refinement.push_back(std::make_pair(op, type));
+#endif
+
+ // TODO(nicohartmann@): One could push the refined type deeper into the
+ // operations.
+ }
+ }
+
+ OpIndex ReducePendingLoopPhi(OpIndex first, RegisterRepresentation rep,
+ PendingLoopPhiOp::Data data) {
+ OpIndex index = Next::ReducePendingLoopPhi(first, rep, data);
+ if (!NeedsTyping(index)) return index;
+
+ // There is not much we can do for pending loop phis, because we don't know
+ // the type of the backedge yet, so we have to assume maximal type. If we
+ // run with a typed input graph, we can refine this type using the input
+ // graph's type (see ReduceInputGraphOperation).
+ SetType(index, Typer::TypeForRepresentation(rep));
+ return index;
+ }
+
+ OpIndex ReducePhi(base::Vector<const OpIndex> inputs,
+ RegisterRepresentation rep) {
+ OpIndex index = Next::ReducePhi(inputs, rep);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Type::None();
+ for (const OpIndex input : inputs) {
+ type = Type::LeastUpperBound(type, GetType(input), Asm().graph_zone());
+ }
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceConstant(ConstantOp::Kind kind, ConstantOp::Storage value) {
+ OpIndex index = Next::ReduceConstant(kind, value);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Typer::TypeConstant(kind, value);
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceComparison(OpIndex left, OpIndex right, ComparisonOp::Kind kind,
+ RegisterRepresentation rep) {
+ OpIndex index = Next::ReduceComparison(left, right, kind, rep);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Typer::TypeComparison(GetType(left), GetType(right), rep, kind,
+ Asm().graph_zone());
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceProjection(OpIndex input, uint16_t idx,
+ RegisterRepresentation rep) {
+ OpIndex index = Next::ReduceProjection(input, idx, rep);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Typer::TypeProjection(GetType(input), idx);
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceWordBinop(OpIndex left, OpIndex right, WordBinopOp::Kind kind,
+ WordRepresentation rep) {
+ OpIndex index = Next::ReduceWordBinop(left, right, kind, rep);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Typer::TypeWordBinop(GetType(left), GetType(right), kind, rep,
+ Asm().graph_zone());
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceOverflowCheckedBinop(OpIndex left, OpIndex right,
+ OverflowCheckedBinopOp::Kind kind,
+ WordRepresentation rep) {
+ OpIndex index = Next::ReduceOverflowCheckedBinop(left, right, kind, rep);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Typer::TypeOverflowCheckedBinop(GetType(left), GetType(right),
+ kind, rep, Asm().graph_zone());
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceFloatBinop(OpIndex left, OpIndex right, FloatBinopOp::Kind kind,
+ FloatRepresentation rep) {
+ OpIndex index = Next::ReduceFloatBinop(left, right, kind, rep);
+ if (!NeedsTyping(index)) return index;
+
+ Type type = Typer::TypeFloatBinop(GetType(left), GetType(right), kind, rep,
+ Asm().graph_zone());
+ SetType(index, type);
+ return index;
+ }
+
+ OpIndex ReduceCheckTurboshaftTypeOf(OpIndex input, RegisterRepresentation rep,
+ Type type, bool successful) {
+ Type input_type = GetType(input);
+ if (input_type.IsSubtypeOf(type)) {
+ OpIndex index = Next::ReduceCheckTurboshaftTypeOf(input, rep, type, true);
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "CTOF %3d:%-40s\n P: %3d:%-40s ~~> %s\n", index.id(),
+ Asm().output_graph().Get(index).ToString().substr(0, 40).c_str(),
+ input.id(),
+ Asm().output_graph().Get(input).ToString().substr(0, 40).c_str(),
+ input_type.ToString().c_str());
+ return index;
+ }
+ if (successful) {
+ FATAL(
+ "Checking type %s of operation %d:%s failed after it passed in a "
+ "previous phase",
+ type.ToString().c_str(), input.id(),
+ Asm().output_graph().Get(input).ToString().c_str());
+ }
+ OpIndex index =
+ Next::ReduceCheckTurboshaftTypeOf(input, rep, type, successful);
+ TURBOSHAFT_TRACE_TYPING_FAIL(
+ "CTOF %3d:%-40s\n F: %3d:%-40s ~~> %s\n", index.id(),
+ Asm().output_graph().Get(index).ToString().substr(0, 40).c_str(),
+ input.id(),
+ Asm().output_graph().Get(input).ToString().substr(0, 40).c_str(),
+ input_type.ToString().c_str());
+ return index;
+ }
+
+ void RemoveLast(OpIndex index_of_last_operation) {
+ if (auto key_opt = op_to_key_mapping_[index_of_last_operation]) {
+ op_to_key_mapping_[index_of_last_operation] = base::nullopt;
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "REM %3d:%-40s %-40s\n", index_of_last_operation.id(),
+ Asm()
+ .output_graph()
+ .Get(index_of_last_operation)
+ .ToString()
+ .substr(0, 40)
+ .c_str(),
+ GetType(index_of_last_operation).ToString().substr(0, 40).c_str());
+ output_graph_types_[index_of_last_operation] = Type::Invalid();
+ }
+ Next::RemoveLast(index_of_last_operation);
+ }
+
+ private:
+ void RefineTypeFromInputGraph(OpIndex index, const Type& og_type,
+ const Type& ig_type) {
+ // Refinement should happen when we just lowered the corresponding
+ // operation, so we should be at the point where the operation is defined
+ // (e.g. not in a refinement after a branch). So the current block must
+ // contain the operation.
+ DCHECK(!ig_type.IsInvalid());
+
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "Refi %3d:%-40s\n I: %-40s ~~> %-40s\n", index.id(),
+ Asm().output_graph().Get(index).ToString().substr(0, 40).c_str(),
+ (og_type.IsInvalid() ? "invalid" : og_type.ToString().c_str()),
+ ig_type.ToString().c_str());
+
+ SetType(index, ig_type);
+ }
+
+ Type GetTypeOrInvalid(OpIndex index) {
+ if (auto key = op_to_key_mapping_[index]) return table_.Get(*key);
+ return Type::Invalid();
+ }
+
+ Type GetType(OpIndex index) {
+ Type type = GetTypeOrInvalid(index);
+ if (type.IsInvalid()) {
+ const Operation& op = Asm().output_graph().Get(index);
+ return Typer::TypeForRepresentation(op.outputs_rep(), Asm().graph_zone());
+ }
+ return type;
+ }
+
+ void SetType(OpIndex index, const Type& result_type,
+ bool is_fallback_for_unsupported_operation = false) {
+ DCHECK(!result_type.IsInvalid());
+
+ if (auto key_opt = op_to_key_mapping_[index]) {
+ table_.Set(*key_opt, result_type);
+ DCHECK(result_type.IsSubtypeOf(output_graph_types_[index]));
+ output_graph_types_[index] = result_type;
+ DCHECK(!output_graph_types_[index].IsInvalid());
+ } else {
+ auto key = table_.NewKey(Type::None());
+ op_to_key_mapping_[index] = key;
+ table_.Set(key, result_type);
+ output_graph_types_[index] = result_type;
+ }
+
+ if (!is_fallback_for_unsupported_operation) {
+ TURBOSHAFT_TRACE_TYPING_OK(
+ "Type %3d:%-40s ==> %s\n", index.id(),
+ Asm().output_graph().Get(index).ToString().substr(0, 40).c_str(),
+ result_type.ToString().c_str());
+ } else {
+ // TODO(nicohartmann@): Remove the fallback case once all operations are
+ // supported.
+ TURBOSHAFT_TRACE_TYPING_FAIL(
+ "TODO %3d:%-40s ==> %s\n", index.id(),
+ Asm().output_graph().Get(index).ToString().substr(0, 40).c_str(),
+ result_type.ToString().c_str());
+ }
+ }
+
+// Verification is more difficult, now that the output graph uses types from the
+// input graph. It is generally not possible to verify that the output graph's
+// type is a subtype of the input graph's type, because the typer might not
+// support a precise typing of the operations after the lowering.
+// TODO(nicohartmann@): Evaluate new strategies for verification.
+#if 0
+#ifdef DEBUG
+ void Verify(OpIndex input_index, OpIndex output_index) {
+ DCHECK(input_index.valid());
+ DCHECK(output_index.valid());
+
+ const auto& input_type = Asm().input_graph().operation_types()[input_index];
+ const auto& output_type = types_[output_index];
+
+ if (input_type.IsInvalid()) return;
+ DCHECK(!output_type.IsInvalid());
+
+ const bool is_okay = output_type.IsSubtypeOf(input_type);
+
+ TURBOSHAFT_TRACE_TYPING(
+ "\033[%s %3d:%-40s %-40s\n %3d:%-40s %-40s\033[0m\n",
+ is_okay ? "32mOK " : "31mFAIL", input_index.id(),
+ Asm().input_graph().Get(input_index).ToString().substr(0, 40).c_str(),
+ input_type.ToString().substr(0, 40).c_str(), output_index.id(),
+ Asm().output_graph().Get(output_index).ToString().substr(0, 40).c_str(),
+ output_type.ToString().substr(0, 40).c_str());
+
+ if (V8_UNLIKELY(!is_okay)) {
+ FATAL(
+ "\033[%s %3d:%-40s %-40s\n %3d:%-40s %-40s\033[0m\n",
+ is_okay ? "32mOK " : "31mFAIL", input_index.id(),
+ Asm().input_graph().Get(input_index).ToString().substr(0, 40).c_str(),
+ input_type.ToString().substr(0, 40).c_str(), output_index.id(),
+ Asm()
+ .output_graph()
+ .Get(output_index)
+ .ToString()
+ .substr(0, 40)
+ .c_str(),
+ output_type.ToString().substr(0, 40).c_str());
+ }
+ }
+#endif
+#endif
+
+ bool NeedsTyping(OpIndex index) const {
+ return index.valid() && args_.output_graph_typing ==
+ Args::OutputGraphTyping::kRefineFromInputGraph;
+ }
+
+ Args args_;
+ GrowingSidetable<Type> input_graph_types_;
+ GrowingSidetable<Type>& output_graph_types_;
+ table_t table_;
+ const Block* current_block_ = nullptr;
+ GrowingSidetable<base::Optional<table_t::Key>> op_to_key_mapping_;
+ GrowingBlockSidetable<base::Optional<table_t::Snapshot>>
+ block_to_snapshot_mapping_;
+ // {predecessors_} is used during merging, but we use an instance variable for
+ // it, in order to save memory and not reallocate it for each merge.
+ ZoneVector<table_t::Snapshot> predecessors_;
+ TypeInferenceAnalysis analyzer_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/type-parser.cc b/deps/v8/src/compiler/turboshaft/type-parser.cc
new file mode 100644
index 0000000000..15fe1ec926
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/type-parser.cc
@@ -0,0 +1,33 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/type-parser.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+base::Optional<Type> TypeParser::ParseType() {
+ if (ConsumeIf("Word32")) {
+ if (IsNext("{")) return ParseSet<Word32Type>();
+ if (IsNext("[")) return ParseRange<Word32Type>();
+ return Word32Type::Any();
+ } else if (ConsumeIf("Word64")) {
+ if (IsNext("{")) return ParseSet<Word64Type>();
+ if (IsNext("[")) return ParseRange<Word64Type>();
+ return Word64Type::Any();
+ } else if (ConsumeIf("Float32")) {
+ // TODO(nicohartmann@): Handle NaN.
+ if (IsNext("{")) return ParseSet<Float32Type>();
+ if (IsNext("[")) return ParseRange<Float32Type>();
+ return Float64Type::Any();
+ } else if (ConsumeIf("Float64")) {
+ // TODO(nicohartmann@): Handle NaN.
+ if (IsNext("{")) return ParseSet<Float64Type>();
+ if (IsNext("[")) return ParseRange<Float64Type>();
+ return Float64Type::Any();
+ } else {
+ return base::nullopt;
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/type-parser.h b/deps/v8/src/compiler/turboshaft/type-parser.h
new file mode 100644
index 0000000000..fa0a747b46
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/type-parser.h
@@ -0,0 +1,124 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPE_PARSER_H_
+#define V8_COMPILER_TURBOSHAFT_TYPE_PARSER_H_
+
+#include "src/compiler/turboshaft/types.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// TypeParser is used to construct a Type from a string literal.
+// It's primary use is the %CheckTurboshaftTypeOf intrinsic, which allows
+// mjsunit tests to check the static type of expressions. Typically the string
+// has to have the format that Type::ToString() would produce.
+//
+// Examples: "Word32", "Word64[30, 100]", "Float32{-1.02}", "Float64{3.2, 17.8}"
+class TypeParser {
+ public:
+ explicit TypeParser(const std::string_view& str, Zone* zone)
+ : str_(str), zone_(zone) {}
+
+ base::Optional<Type> Parse() {
+ base::Optional<Type> type = ParseType();
+ // Skip trailing whitespace.
+ while (pos_ < str_.length() && str_[pos_] == ' ') ++pos_;
+ if (pos_ < str_.length()) return base::nullopt;
+ return type;
+ }
+
+ private:
+ base::Optional<Type> ParseType();
+
+ template <typename T>
+ base::Optional<T> ParseRange() {
+ if (!ConsumeIf("[")) return base::nullopt;
+ auto from = ReadValue<typename T::value_type>();
+ if (!from) return base::nullopt;
+ if (!ConsumeIf(",")) return base::nullopt;
+ auto to = ReadValue<typename T::value_type>();
+ if (!to) return base::nullopt;
+ if (!ConsumeIf("]")) return base::nullopt;
+ if constexpr (!std::is_same_v<T, Word32Type> &&
+ !std::is_same_v<T, Word64Type>) {
+ CHECK_LE(*from, *to);
+ }
+ return T::Range(*from, *to, zone_);
+ }
+
+ template <typename T>
+ base::Optional<T> ParseSet() {
+ if (!ConsumeIf("{")) return base::nullopt;
+ auto elements = ParseSetElements<typename T::value_type>();
+ if (!elements) return base::nullopt;
+ if (!ConsumeIf("}")) return base::nullopt;
+ CHECK_LT(0, elements->size());
+ CHECK_LE(elements->size(), T::kMaxSetSize);
+ return T::Set(*elements, zone_);
+ }
+
+ template <typename T>
+ base::Optional<std::vector<T>> ParseSetElements() {
+ std::vector<T> elements;
+ if (IsNext("}")) return elements;
+ while (true) {
+ auto element_opt = ReadValue<T>();
+ if (!element_opt) return base::nullopt;
+ elements.push_back(*element_opt);
+
+ if (IsNext("}")) break;
+ if (!ConsumeIf(",")) return base::nullopt;
+ }
+ base::sort(elements);
+ elements.erase(std::unique(elements.begin(), elements.end()),
+ elements.end());
+ return elements;
+ }
+
+ bool ConsumeIf(const std::string_view& prefix) {
+ if (IsNext(prefix)) {
+ pos_ += prefix.length();
+ return true;
+ }
+ return false;
+ }
+
+ bool IsNext(const std::string_view& prefix) {
+ // Skip leading whitespace.
+ while (pos_ < str_.length() && str_[pos_] == ' ') ++pos_;
+ if (pos_ >= str_.length()) return false;
+ size_t remaining_length = str_.length() - pos_;
+ if (prefix.length() > remaining_length) return false;
+ return str_.compare(pos_, prefix.length(), prefix, 0, prefix.length()) == 0;
+ }
+
+ template <typename T>
+ base::Optional<T> ReadValue() {
+ T result;
+ size_t read = 0;
+ // TODO(nicohartmann@): Ideally we want to avoid this string construction
+ // (e.g. using std::from_chars).
+ std::string s(str_.cbegin() + pos_, str_.cend());
+ if constexpr (std::is_same_v<T, uint32_t>) {
+ result = static_cast<uint32_t>(std::stoul(s, &read));
+ } else if constexpr (std::is_same_v<T, uint64_t>) {
+ result = std::stoull(s, &read);
+ } else if constexpr (std::is_same_v<T, float>) {
+ result = std::stof(s, &read);
+ } else if constexpr (std::is_same_v<T, double>) {
+ result = std::stod(s, &read);
+ }
+ if (read == 0) return base::nullopt;
+ pos_ += read;
+ return result;
+ }
+
+ std::string_view str_;
+ Zone* zone_;
+ size_t pos_ = 0;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPE_PARSER_H_
diff --git a/deps/v8/src/compiler/turboshaft/typed-optimizations-phase.cc b/deps/v8/src/compiler/turboshaft/typed-optimizations-phase.cc
new file mode 100644
index 0000000000..9b40e28369
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/typed-optimizations-phase.cc
@@ -0,0 +1,30 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/typed-optimizations-phase.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/turboshaft/type-inference-reducer.h"
+#include "src/compiler/turboshaft/typed-optimizations-reducer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void TypedOptimizationsPhase::Run(PipelineData* data, Zone* temp_zone) {
+#ifdef DEBUG
+ UnparkedScopeIfNeeded scope(data->broker(), v8_flags.turboshaft_trace_typing);
+#endif
+
+ turboshaft::TypeInferenceReducerArgs typing_args{
+ data->isolate(),
+ turboshaft::TypeInferenceReducerArgs::InputGraphTyping::kPrecise,
+ turboshaft::TypeInferenceReducerArgs::OutputGraphTyping::kNone};
+
+ turboshaft::OptimizationPhase<
+ turboshaft::TypedOptimizationsReducer,
+ turboshaft::TypeInferenceReducer>::Run(data->isolate(), &data->graph(),
+ temp_zone, data->node_origins(),
+ {typing_args});
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/typed-optimizations-phase.h b/deps/v8/src/compiler/turboshaft/typed-optimizations-phase.h
new file mode 100644
index 0000000000..21440a53e9
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/typed-optimizations-phase.h
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_PHASE_H_
+#define V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_PHASE_H_
+
+#include "src/compiler/turboshaft/phase.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+struct TypedOptimizationsPhase {
+ DECL_TURBOSHAFT_PHASE_CONSTANTS(TypedOptimizations)
+
+ void Run(PipelineData* data, Zone* temp_zone);
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_PHASE_H_
diff --git a/deps/v8/src/compiler/turboshaft/typed-optimizations-reducer.h b/deps/v8/src/compiler/turboshaft/typed-optimizations-reducer.h
new file mode 100644
index 0000000000..be62b77f20
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/typed-optimizations-reducer.h
@@ -0,0 +1,129 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_REDUCER_H_
+
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/index.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/typer.h"
+#include "src/compiler/turboshaft/uniform-reducer-adapter.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+template <typename>
+class TypeInferenceReducer;
+
+template <typename Next>
+class TypedOptimizationsReducer
+ : public UniformReducerAdapter<TypedOptimizationsReducer, Next> {
+ // Typed optimizations require a typed graph.
+ // TODO(nicohartmann@): Reenable this in a way that compiles with msvc light.
+ // static_assert(next_contains_reducer<Next, TypeInferenceReducer>::value);
+
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ using Adapter = UniformReducerAdapter<TypedOptimizationsReducer, Next>;
+
+ template <typename... Args>
+ explicit TypedOptimizationsReducer(const std::tuple<Args...>& args)
+ : Adapter(args) {}
+
+ OpIndex ReduceInputGraphBranch(OpIndex ig_index, const BranchOp& operation) {
+ Type condition_type = GetType(operation.condition());
+ if (!condition_type.IsInvalid()) {
+ if (condition_type.IsNone()) {
+ Asm().Unreachable();
+ return OpIndex::Invalid();
+ }
+ condition_type =
+ Typer::TruncateWord32Input(condition_type, true, Asm().graph_zone());
+ DCHECK(condition_type.IsWord32());
+ if (auto c = condition_type.AsWord32().try_get_constant()) {
+ Block* goto_target = *c == 0 ? operation.if_false : operation.if_true;
+ Asm().Goto(goto_target->MapToNextGraph());
+ return OpIndex::Invalid();
+ }
+ }
+ return Adapter::ReduceInputGraphBranch(ig_index, operation);
+ }
+
+ template <typename Op, typename Continuation>
+ OpIndex ReduceInputGraphOperation(OpIndex ig_index, const Op& operation) {
+ Type type = GetType(ig_index);
+ if (type.IsNone()) {
+ // This operation is dead. Remove it.
+ DCHECK(CanBeTyped(operation));
+ return OpIndex::Invalid();
+ } else if (!type.IsInvalid()) {
+ // See if we can replace the operation by a constant.
+ if (OpIndex constant = TryAssembleConstantForType(type);
+ constant.valid()) {
+ return constant;
+ }
+ }
+
+ // Otherwise just continue with reduction.
+ return Continuation{this}.ReduceInputGraph(ig_index, operation);
+ }
+
+ private:
+ // If {type} is a single value that can be respresented by a constant, this
+ // function returns the index for a corresponding ConstantOp. It returns
+ // OpIndex::Invalid otherwise.
+ OpIndex TryAssembleConstantForType(const Type& type) {
+ switch (type.kind()) {
+ case Type::Kind::kWord32: {
+ auto w32 = type.AsWord32();
+ if (auto c = w32.try_get_constant()) {
+ return Asm().Word32Constant(*c);
+ }
+ break;
+ }
+ case Type::Kind::kWord64: {
+ auto w64 = type.AsWord64();
+ if (auto c = w64.try_get_constant()) {
+ return Asm().Word64Constant(*c);
+ }
+ break;
+ }
+ case Type::Kind::kFloat32: {
+ auto f32 = type.AsFloat32();
+ if (f32.is_only_nan()) {
+ return Asm().Float32Constant(nan_v<32>);
+ } else if (f32.is_only_minus_zero()) {
+ return Asm().Float32Constant(-0.0f);
+ } else if (auto c = f32.try_get_constant()) {
+ return Asm().Float32Constant(*c);
+ }
+ break;
+ }
+ case Type::Kind::kFloat64: {
+ auto f64 = type.AsFloat64();
+ if (f64.is_only_nan()) {
+ return Asm().Float64Constant(nan_v<64>);
+ } else if (f64.is_only_minus_zero()) {
+ return Asm().Float64Constant(-0.0);
+ } else if (auto c = f64.try_get_constant()) {
+ return Asm().Float64Constant(*c);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return OpIndex::Invalid();
+ }
+
+ Type GetType(const OpIndex index) {
+ // Typed optimizations use the types of the input graph.
+ return Asm().GetInputGraphType(index);
+ }
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_REDUCER_H_
diff --git a/deps/v8/src/compiler/turboshaft/typer.cc b/deps/v8/src/compiler/turboshaft/typer.cc
new file mode 100644
index 0000000000..f007f26e35
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/typer.cc
@@ -0,0 +1,99 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/typer.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+void Typer::BranchRefinements::RefineTypes(const Operation& condition,
+ bool then_branch, Zone* zone) {
+ if (const ComparisonOp* comparison = condition.TryCast<ComparisonOp>()) {
+ Type lhs = type_getter_(comparison->left());
+ Type rhs = type_getter_(comparison->right());
+
+ const bool is_signed = ComparisonOp::IsSigned(comparison->kind);
+ const bool is_less_than = ComparisonOp::IsLessThan(comparison->kind);
+ Type l_refined;
+ Type r_refined;
+
+ if (lhs.IsNone() || rhs.IsNone()) {
+ type_refiner_(comparison->left(), Type::None());
+ type_refiner_(comparison->right(), Type::None());
+ return;
+ } else if (lhs.IsAny() || rhs.IsAny()) {
+ // If either side has any type, there is not much we can do.
+ return;
+ }
+
+ switch (comparison->rep.value()) {
+ case RegisterRepresentation::Word32(): {
+ if (is_signed) {
+ // TODO(nicohartmann@): Support signed comparison.
+ return;
+ }
+ Word32Type l = Typer::TruncateWord32Input(lhs, true, zone).AsWord32();
+ Word32Type r = Typer::TruncateWord32Input(rhs, true, zone).AsWord32();
+ Type l_restrict, r_restrict;
+ using OpTyper = WordOperationTyper<32>;
+ if (is_less_than) {
+ std::tie(l_restrict, r_restrict) =
+ then_branch
+ ? OpTyper::RestrictionForUnsignedLessThan_True(l, r, zone)
+ : OpTyper::RestrictionForUnsignedLessThan_False(l, r, zone);
+ } else {
+ std::tie(l_restrict, r_restrict) =
+ then_branch
+ ? OpTyper::RestrictionForUnsignedLessThanOrEqual_True(l, r,
+ zone)
+ : OpTyper::RestrictionForUnsignedLessThanOrEqual_False(l, r,
+ zone);
+ }
+
+ // Special handling for word32 restriction, because the inputs might
+ // have been truncated from word64 implicitly.
+ l_refined = RefineWord32Type<true>(lhs, l_restrict, zone);
+ r_refined = RefineWord32Type<true>(rhs, r_restrict, zone);
+ break;
+ }
+ case RegisterRepresentation::Float64(): {
+ Float64Type l = lhs.AsFloat64();
+ Float64Type r = rhs.AsFloat64();
+ Type l_restrict, r_restrict;
+ using OpTyper = FloatOperationTyper<64>;
+ if (is_less_than) {
+ std::tie(l_restrict, r_restrict) =
+ then_branch ? OpTyper::RestrictionForLessThan_True(l, r, zone)
+ : OpTyper::RestrictionForLessThan_False(l, r, zone);
+ } else {
+ std::tie(l_restrict, r_restrict) =
+ then_branch
+ ? OpTyper::RestrictionForLessThanOrEqual_True(l, r, zone)
+ : OpTyper::RestrictionForLessThanOrEqual_False(l, r, zone);
+ }
+
+ l_refined = l_restrict.IsNone() ? Type::None()
+ : Float64Type::Intersect(
+ l, l_restrict.AsFloat64(), zone);
+ r_refined = r_restrict.IsNone() ? Type::None()
+ : Float64Type::Intersect(
+ r, r_restrict.AsFloat64(), zone);
+ break;
+ }
+ default:
+ return;
+ }
+
+ // In some cases, the refined type is not a subtyp eof the old type,
+ // because it cannot be represented precisely. In this case we keep the
+ // old type to be stable.
+ if (l_refined.IsSubtypeOf(lhs)) {
+ type_refiner_(comparison->left(), l_refined);
+ }
+ if (r_refined.IsSubtypeOf(rhs)) {
+ type_refiner_(comparison->right(), r_refined);
+ }
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/typer.h b/deps/v8/src/compiler/turboshaft/typer.h
new file mode 100644
index 0000000000..f65ffcdc41
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/typer.h
@@ -0,0 +1,1594 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPER_H_
+#define V8_COMPILER_TURBOSHAFT_TYPER_H_
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/vector.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/types.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// Returns the array's least element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+template <typename T, size_t N>
+T array_min(const std::array<T, N>& a) {
+ DCHECK_NE(0, N);
+ T x = +std::numeric_limits<T>::infinity();
+ for (size_t i = 0; i < N; ++i) {
+ if (!std::isnan(a[i])) {
+ x = std::min(a[i], x);
+ }
+ }
+ DCHECK(!std::isnan(x));
+ return x == T{0} ? T{0} : x; // -0 -> 0
+}
+
+// Returns the array's greatest element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+template <typename T, size_t N>
+T array_max(const std::array<T, N>& a) {
+ DCHECK_NE(0, N);
+ T x = -std::numeric_limits<T>::infinity();
+ for (size_t i = 0; i < N; ++i) {
+ if (!std::isnan(a[i])) {
+ x = std::max(a[i], x);
+ }
+ }
+ DCHECK(!std::isnan(x));
+ return x == T{0} ? T{0} : x; // -0 -> 0
+}
+
+template <size_t Bits>
+struct WordOperationTyper {
+ static_assert(Bits == 32 || Bits == 64);
+ using word_t = uint_type<Bits>;
+ using type_t = WordType<Bits>;
+ using ElementsVector = base::SmallVector<word_t, type_t::kMaxSetSize * 2>;
+ static constexpr word_t max = std::numeric_limits<word_t>::max();
+
+ static type_t FromElements(ElementsVector elements, Zone* zone) {
+ base::sort(elements);
+ auto it = std::unique(elements.begin(), elements.end());
+ elements.pop_back(std::distance(it, elements.end()));
+ DCHECK(!elements.empty());
+ if (elements.size() <= type_t::kMaxSetSize) {
+ return type_t::Set(elements, zone);
+ }
+
+ auto range =
+ MakeRange(base::Vector<const word_t>{elements.data(), elements.size()});
+ auto result = type_t::Range(range.first, range.second, zone);
+ DCHECK(
+ base::all_of(elements, [&](word_t e) { return result.Contains(e); }));
+ return result;
+ }
+
+ static std::pair<word_t, word_t> MakeRange(const type_t& t) {
+ if (t.is_range()) return t.range();
+ DCHECK(t.is_set());
+ return MakeRange(t.set_elements());
+ }
+
+ // This function tries to find a somewhat reasonable range for a given set of
+ // values. If the elements span no more than half of the range, we just
+ // construct the range from min(elements) to max(elements) Otherwise, we
+ // consider a wrapping range because it is likely that there is a larger gap
+ // in the middle of the elements. For that, we start with a wrapping range
+ // from max(elements) to min(elements) and then incrementally add another
+ // element either by increasing the 'to' or decreasing the 'from' of the
+ // range, whichever leads to a smaller range.
+ static std::pair<word_t, word_t> MakeRange(
+ const base::Vector<const word_t>& elements) {
+ DCHECK(!elements.empty());
+ DCHECK(detail::is_unique_and_sorted(elements));
+ if (elements[elements.size() - 1] - elements[0] <= max / 2) {
+ // Construct a non-wrapping range.
+ return {elements[0], elements[elements.size() - 1]};
+ }
+ // Construct a wrapping range.
+ size_t from_index = elements.size() - 1;
+ size_t to_index = 0;
+ while (to_index + 1 < from_index) {
+ if ((elements[to_index + 1] - elements[to_index]) <
+ (elements[from_index] - elements[from_index - 1])) {
+ ++to_index;
+ } else {
+ --from_index;
+ }
+ }
+ return {elements[from_index], elements[to_index]};
+ }
+
+ static word_t distance(const std::pair<word_t, word_t>& range) {
+ return distance(range.first, range.second);
+ }
+ static word_t distance(word_t from, word_t to) {
+ return is_wrapping(from, to) ? (max - from + to) : to - from;
+ }
+
+ static bool is_wrapping(const std::pair<word_t, word_t>& range) {
+ return is_wrapping(range.first, range.second);
+ }
+ static bool is_wrapping(word_t from, word_t to) { return from > to; }
+
+ static type_t Add(const type_t& lhs, const type_t& rhs, Zone* zone) {
+ if (lhs.is_any() || rhs.is_any()) return type_t::Any();
+
+ // If both sides are decently small sets, we produce the product set (which
+ // we convert to a range if it exceeds the set limit).
+ if (lhs.is_set() && rhs.is_set()) {
+ ElementsVector result_elements;
+ for (int i = 0; i < lhs.set_size(); ++i) {
+ for (int j = 0; j < rhs.set_size(); ++j) {
+ result_elements.push_back(lhs.set_element(i) + rhs.set_element(j));
+ }
+ }
+ return FromElements(std::move(result_elements), zone);
+ }
+
+ // Otherwise just construct a range.
+ std::pair<word_t, word_t> x = MakeRange(lhs);
+ std::pair<word_t, word_t> y = MakeRange(rhs);
+
+ // If the result would not be a complete range, we compute it.
+ // Check: (lhs.to + rhs.to + 1) - (lhs.from + rhs.from + 1) < max
+ // =====> (lhs.to - lhs.from) + (rhs.to - rhs.from) < max
+ // =====> (lhs.to - lhs.from) < max - (rhs.to - rhs.from)
+ if (distance(x) < max - distance(y)) {
+ return type_t::Range(x.first + y.first, x.second + y.second, zone);
+ }
+
+ return type_t::Any();
+ }
+
+ static type_t Subtract(const type_t& lhs, const type_t& rhs, Zone* zone) {
+ if (lhs.is_any() || rhs.is_any()) return type_t::Any();
+
+ // If both sides are decently small sets, we produce the product set (which
+ // we convert to a range if it exceeds the set limit).
+ if (lhs.is_set() && rhs.is_set()) {
+ ElementsVector result_elements;
+ for (int i = 0; i < lhs.set_size(); ++i) {
+ for (int j = 0; j < rhs.set_size(); ++j) {
+ result_elements.push_back(lhs.set_element(i) - rhs.set_element(j));
+ }
+ }
+ return FromElements(std::move(result_elements), zone);
+ }
+
+ // Otherwise just construct a range.
+ std::pair<word_t, word_t> x = MakeRange(lhs);
+ std::pair<word_t, word_t> y = MakeRange(rhs);
+
+ if (is_wrapping(x) && is_wrapping(y)) {
+ return type_t::Range(x.first - y.second, x.second - y.first, zone);
+ }
+
+ // TODO(nicohartmann@): Improve the wrapping cases.
+ return type_t::Any();
+ }
+
+ static Word32Type UnsignedLessThan(const type_t& lhs, const type_t& rhs,
+ Zone* zone) {
+ bool can_be_true = lhs.unsigned_min() < rhs.unsigned_max();
+ bool can_be_false = lhs.unsigned_max() >= rhs.unsigned_min();
+
+ if (!can_be_true) return Word32Type::Constant(0);
+ if (!can_be_false) return Word32Type::Constant(1);
+ return Word32Type::Set({0, 1}, zone);
+ }
+
+ static Word32Type UnsignedLessThanOrEqual(const type_t& lhs,
+ const type_t& rhs, Zone* zone) {
+ bool can_be_true = lhs.unsigned_min() <= rhs.unsigned_max();
+ bool can_be_false = lhs.unsigned_max() > rhs.unsigned_min();
+
+ if (!can_be_true) return Word32Type::Constant(0);
+ if (!can_be_false) return Word32Type::Constant(1);
+ return Word32Type::Set({0, 1}, zone);
+ }
+
+ // Computes the ranges to which the sides of the unsigned comparison (lhs <
+ // rhs) can be restricted when the comparison is true. When the comparison is
+ // true, we learn: lhs cannot be >= rhs.max and rhs cannot be <= lhs.min.
+ static std::pair<Type, Type> RestrictionForUnsignedLessThan_True(
+ const type_t& lhs, const type_t& rhs, Zone* zone) {
+ Type restrict_lhs;
+ if (rhs.unsigned_max() == 0) {
+ // There is no value for lhs that could make (lhs < 0) true.
+ restrict_lhs = Type::None();
+ } else {
+ restrict_lhs = type_t::Range(0, next_smaller(rhs.unsigned_max()), zone);
+ }
+
+ Type restrict_rhs;
+ if (lhs.unsigned_min() == max) {
+ // There is no value for rhs that could make (max < rhs) true.
+ restrict_rhs = Type::None();
+ } else {
+ restrict_rhs = type_t::Range(next_larger(lhs.unsigned_min()), max, zone);
+ }
+
+ return {restrict_lhs, restrict_rhs};
+ }
+
+ // Computes the ranges to which the sides of the unsigned comparison (lhs <
+ // rhs) can be restricted when the comparison is false. When the comparison is
+ // false, we learn: lhs cannot be < rhs.min and rhs cannot be > lhs.max.
+ static std::pair<Type, Type> RestrictionForUnsignedLessThan_False(
+ const type_t& lhs, const type_t& rhs, Zone* zone) {
+ return {type_t::Range(rhs.unsigned_min(), max, zone),
+ type_t::Range(0, lhs.unsigned_max(), zone)};
+ }
+
+ // Computes the ranges to which the sides of the unsigned comparison (lhs <=
+ // rhs) can be restricted when the comparison is true. When the comparison is
+ // true, we learn: lhs cannot be > rhs.max and rhs cannot be < lhs.min.
+ static std::pair<Type, Type> RestrictionForUnsignedLessThanOrEqual_True(
+ const type_t& lhs, const type_t& rhs, Zone* zone) {
+ return {type_t::Range(0, rhs.unsigned_max(), zone),
+ type_t::Range(lhs.unsigned_min(), max, zone)};
+ }
+
+ // Computes the ranges to which the sides of the unsigned comparison (lhs <=
+ // rhs) can be restricted when the comparison is false. When the comparison is
+ // false, we learn: lhs cannot be <= rhs.min and rhs cannot be >= lhs.max.
+ static std::pair<Type, Type> RestrictionForUnsignedLessThanOrEqual_False(
+ const type_t& lhs, const type_t& rhs, Zone* zone) {
+ Type restrict_lhs;
+ if (rhs.unsigned_min() == max) {
+ // There is no value for lhs that could make (lhs <= max) false.
+ restrict_lhs = Type::None();
+ } else {
+ restrict_lhs = type_t::Range(next_larger(rhs.unsigned_min()), max, zone);
+ }
+
+ Type restrict_rhs;
+ if (lhs.unsigned_max() == 0) {
+ // There is no value for rhs that could make (0 <= rhs) false.
+ restrict_rhs = Type::None();
+ } else {
+ restrict_rhs = type_t::Range(0, next_smaller(lhs.unsigned_max()), zone);
+ }
+
+ return {restrict_lhs, restrict_rhs};
+ }
+
+ // WidenMaximal widens one of the boundary to the extreme immediately.
+ static type_t WidenMaximal(const type_t& old_type, const type_t& new_type,
+ Zone* zone) {
+ if (new_type.is_any()) return new_type;
+
+ if (old_type.is_wrapping()) {
+ DCHECK(new_type.is_wrapping());
+ return type_t::Any();
+ } else if (new_type.is_wrapping()) {
+ return type_t::Any();
+ } else {
+ word_t result_from = new_type.unsigned_min();
+ if (result_from < old_type.unsigned_min()) result_from = 0;
+ word_t result_to = new_type.unsigned_max();
+ if (result_to > old_type.unsigned_max()) {
+ result_to = std::numeric_limits<word_t>::max();
+ }
+ return type_t::Range(result_from, result_to, zone);
+ }
+ }
+
+ // Performs exponential widening, which means that the number of values
+ // described by the resulting type is at least doubled with respect to the
+ // {old_type}. If {new_type} is already twice the size of {old_type},
+ // {new_type} may be returned directly.
+ static type_t WidenExponential(const type_t& old_type, type_t new_type,
+ Zone* zone) {
+ if (new_type.is_any()) return new_type;
+ word_t old_from, old_to, new_from, new_to;
+ if (old_type.is_set()) {
+ const word_t old_size = old_type.set_size();
+ if (new_type.is_set()) {
+ const word_t new_size = new_type.set_size();
+ if (new_size >= 2 * old_size) return new_type;
+ std::tie(new_from, new_to) = MakeRange(new_type);
+ } else {
+ DCHECK(new_type.is_range());
+ std::tie(new_from, new_to) = new_type.range();
+ }
+ if (distance(new_from, new_to) >= 2 * old_size) {
+ return type_t::Range(new_from, new_to, zone);
+ }
+ std::tie(old_from, old_to) = MakeRange(old_type);
+ } else {
+ DCHECK(old_type.is_range());
+ std::tie(old_from, old_to) = old_type.range();
+ if (new_type.is_set()) {
+ std::tie(new_from, new_to) = MakeRange(new_type);
+ } else {
+ DCHECK(new_type.is_range());
+ std::tie(new_from, new_to) = new_type.range();
+ }
+ }
+
+ // If the old type is already quite large, we go to full range.
+ if (distance(old_from, old_to) >= std::numeric_limits<word_t>::max() / 4) {
+ return type_t::Any();
+ }
+
+ const word_t min_size = 2 * (distance(old_from, old_to) + 1);
+ if (distance(new_from, new_to) >= min_size) {
+ return type_t::Range(new_from, new_to, zone);
+ }
+
+ // If old is wrapping (and so is new).
+ if (is_wrapping(old_from, old_to)) {
+ DCHECK(is_wrapping(new_from, new_to));
+ if (new_from < old_from) {
+ DCHECK_LE(old_to, new_to);
+ // We widen the `from` (although `to` might have grown, too).
+ DCHECK_LT(new_to, min_size);
+ word_t result_from =
+ std::numeric_limits<word_t>::max() - (min_size - new_to);
+ DCHECK_LT(result_from, new_from);
+ DCHECK_LE(min_size, distance(result_from, new_to));
+ return type_t::Range(result_from, new_to, zone);
+ } else {
+ DCHECK_EQ(old_from, new_from);
+ // We widen the `to`.
+ DCHECK_LT(std::numeric_limits<word_t>::max() - new_from, min_size);
+ word_t result_to =
+ min_size - (std::numeric_limits<word_t>::max() - new_from);
+ DCHECK_GT(result_to, new_to);
+ DCHECK_LE(min_size, distance(new_from, result_to));
+ return type_t::Range(new_from, result_to, zone);
+ }
+ }
+
+ // If old is not wrapping, but new is.
+ if (is_wrapping(new_from, new_to)) {
+ if (new_to < old_to) {
+ // If wrapping was caused by to growing over max, grow `to` further
+ // (although `from` might have grown, too).
+ DCHECK_LT(std::numeric_limits<word_t>::max() - new_from, min_size);
+ word_t result_to =
+ min_size - (std::numeric_limits<word_t>::max() - new_from);
+ DCHECK_LT(new_to, result_to);
+ return type_t::Range(new_from, result_to, zone);
+ } else {
+ DCHECK_LT(old_from, new_from);
+ // If wrapping was caused by `from` growing below 0, grow `from`
+ // further.
+ DCHECK_LT(new_to, min_size);
+ word_t result_from =
+ std::numeric_limits<word_t>::max() - (min_size - new_to);
+ DCHECK_LT(result_from, new_from);
+ return type_t::Range(result_from, new_to, zone);
+ }
+ }
+
+ // Neither old nor new is wrapping.
+ if (new_from < old_from) {
+ DCHECK_LE(old_to, new_to);
+ // Check if we can widen the `from`.
+ if (new_to >= min_size) {
+ // We can decrease `from` without going below 0.
+ word_t result_from = new_to - min_size;
+ DCHECK_LT(result_from, new_from);
+ return type_t::Range(result_from, new_to, zone);
+ } else {
+ // We cannot grow `from` enough, so we also have to grow `to`.
+ return type_t::Range(0, min_size, zone);
+ }
+ } else {
+ DCHECK_EQ(old_from, new_from);
+ // Check if we can widen the `to`.
+ if (new_from <= std::numeric_limits<word_t>::max() - min_size) {
+ // We can increase `to` without going above max.
+ word_t result_to = new_from + min_size;
+ DCHECK_GT(result_to, new_to);
+ return type_t::Range(new_from, result_to, zone);
+ } else {
+ // We cannot grow `to` enough, so we also have to grow `from`.
+ return type_t::Range(std::numeric_limits<word_t>::max() - min_size,
+ std::numeric_limits<word_t>::max(), zone);
+ }
+ }
+ }
+};
+
+template <size_t Bits>
+struct FloatOperationTyper {
+ static_assert(Bits == 32 || Bits == 64);
+ using float_t = std::conditional_t<Bits == 32, float, double>;
+ using type_t = FloatType<Bits>;
+ static constexpr float_t inf = std::numeric_limits<float_t>::infinity();
+ static constexpr int kSetThreshold = type_t::kMaxSetSize;
+
+ static type_t Range(float_t min, float_t max, uint32_t special_values,
+ Zone* zone) {
+ DCHECK_LE(min, max);
+ if (min == max) return Set({min}, special_values, zone);
+ return type_t::Range(min, max, special_values, zone);
+ }
+
+ static type_t Set(std::vector<float_t> elements, uint32_t special_values,
+ Zone* zone) {
+ base::sort(elements);
+ elements.erase(std::unique(elements.begin(), elements.end()),
+ elements.end());
+ if (base::erase_if(elements, [](float_t v) { return std::isnan(v); }) > 0) {
+ special_values |= type_t::kNaN;
+ }
+ if (base::erase_if(elements, [](float_t v) { return IsMinusZero(v); }) >
+ 0) {
+ special_values |= type_t::kMinusZero;
+ }
+ return type_t::Set(elements, special_values, zone);
+ }
+
+ // Check if the elements in the set are all integers. This ignores special
+ // values (NaN, -0)!
+ static bool IsIntegerSet(const type_t& t) {
+ if (!t.is_set()) return false;
+ int size = t.set_size();
+ DCHECK_LT(0, size);
+
+ float_t unused_ipart;
+ float_t min = t.set_element(0);
+ if (std::modf(min, &unused_ipart) != 0.0) return false;
+ if (min == -inf) return false;
+ float_t max = t.set_element(size - 1);
+ if (std::modf(max, &unused_ipart) != 0.0) return false;
+ if (max == inf) return false;
+
+ for (int i = 1; i < size - 1; ++i) {
+ if (std::modf(t.set_element(i), &unused_ipart) != 0.0) return false;
+ }
+ return true;
+ }
+
+ static bool IsZeroish(const type_t& l) {
+ return l.has_nan() || l.has_minus_zero() || l.Contains(0);
+ }
+
+ // Tries to construct the product of two sets where values are generated using
+ // {combine}. Returns Type::Invalid() if a set cannot be constructed (e.g.
+ // because the result exceeds the maximal number of set elements).
+ static Type ProductSet(const type_t& l, const type_t& r,
+ uint32_t special_values, Zone* zone,
+ std::function<float_t(float_t, float_t)> combine) {
+ DCHECK(l.is_set());
+ DCHECK(r.is_set());
+
+ std::vector<float_t> results;
+ auto CombineWithLeft = [&](float_t left) {
+ for (int j = 0; j < r.set_size(); ++j) {
+ results.push_back(combine(left, r.set_element(j)));
+ }
+ if (r.has_minus_zero()) results.push_back(combine(left, -0.0));
+ if (r.has_nan()) results.push_back(combine(left, nan_v<Bits>));
+ };
+
+ for (int i = 0; i < l.set_size(); ++i) {
+ CombineWithLeft(l.set_element(i));
+ }
+ if (l.has_minus_zero()) CombineWithLeft(-0.0);
+ if (l.has_nan()) CombineWithLeft(nan_v<Bits>);
+
+ if (base::erase_if(results, [](float_t v) { return std::isnan(v); }) > 0) {
+ special_values |= type_t::kNaN;
+ }
+ if (base::erase_if(results, [](float_t v) { return IsMinusZero(v); }) > 0) {
+ special_values |= type_t::kMinusZero;
+ }
+ base::sort(results);
+ auto it = std::unique(results.begin(), results.end());
+ if (std::distance(results.begin(), it) > kSetThreshold)
+ return Type::Invalid();
+ results.erase(it, results.end());
+ if (results.empty()) return type_t::OnlySpecialValues(special_values);
+ return Set(std::move(results), special_values, zone);
+ }
+
+ static Type Add(type_t l, type_t r, Zone* zone) {
+ // Addition can return NaN if either input can be NaN or we try to compute
+ // the sum of two infinities of opposite sign.
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+ bool maybe_nan = l.has_nan() || r.has_nan();
+
+ // Addition can yield minus zero only if both inputs can be minus zero.
+ bool maybe_minuszero = true;
+ if (l.has_minus_zero()) {
+ l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
+ } else {
+ maybe_minuszero = false;
+ }
+ if (r.has_minus_zero()) {
+ r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
+ } else {
+ maybe_minuszero = false;
+ }
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) { return a + b; };
+ if (l.is_set() && r.is_set()) {
+ auto result = ProductSet(l, r, special_values, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ // Otherwise just construct a range.
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+
+ std::array<float_t, 4> results;
+ results[0] = l_min + r_min;
+ results[1] = l_min + r_max;
+ results[2] = l_max + r_min;
+ results[3] = l_max + r_max;
+
+ int nans = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) ++nans;
+ }
+ if (nans > 0) {
+ special_values |= type_t::kNaN;
+ if (nans >= 4) {
+ // All combinations of inputs produce NaN.
+ return type_t::OnlySpecialValues(special_values);
+ }
+ }
+ const float_t result_min = array_min(results);
+ const float_t result_max = array_max(results);
+ return Range(result_min, result_max, special_values, zone);
+ }
+
+ static Type Subtract(type_t l, type_t r, Zone* zone) {
+ // Subtraction can return NaN if either input can be NaN or we try to
+ // compute the sum of two infinities of opposite sign.
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+ bool maybe_nan = l.has_nan() || r.has_nan();
+
+ // Subtraction can yield minus zero if {lhs} can be minus zero and {rhs}
+ // can be zero.
+ bool maybe_minuszero = false;
+ if (l.has_minus_zero()) {
+ l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
+ maybe_minuszero = r.Contains(0);
+ }
+ if (r.has_minus_zero()) {
+ r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
+ }
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) { return a - b; };
+ if (l.is_set() && r.is_set()) {
+ auto result = ProductSet(l, r, special_values, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ // Otherwise just construct a range.
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+
+ std::array<float_t, 4> results;
+ results[0] = l_min - r_min;
+ results[1] = l_min - r_max;
+ results[2] = l_max - r_min;
+ results[3] = l_max - r_max;
+
+ int nans = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) ++nans;
+ }
+ if (nans > 0) {
+ special_values |= type_t::kNaN;
+ if (nans >= 4) {
+ // All combinations of inputs produce NaN.
+ return type_t::NaN();
+ }
+ }
+ const float_t result_min = array_min(results);
+ const float_t result_max = array_max(results);
+ return Range(result_min, result_max, special_values, zone);
+ }
+
+ static Type Multiply(type_t l, type_t r, Zone* zone) {
+ // Multiplication propagates NaN:
+ // NaN * x = NaN (regardless of sign of x)
+ // 0 * Infinity = NaN (regardless of signs)
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+ bool maybe_nan = l.has_nan() || r.has_nan() ||
+ (IsZeroish(l) && (r.min() == -inf || r.max() == inf)) ||
+ (IsZeroish(r) && (l.min() == -inf || r.max() == inf));
+
+ // Try to rule out -0.
+ bool maybe_minuszero = l.has_minus_zero() || r.has_minus_zero() ||
+ (IsZeroish(l) && r.min() < 0.0) ||
+ (IsZeroish(r) && l.min() < 0.0);
+ if (l.has_minus_zero()) {
+ l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
+ }
+ if (r.has_minus_zero()) {
+ r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
+ }
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) { return a * b; };
+ if (l.is_set() && r.is_set()) {
+ auto result = ProductSet(l, r, special_values, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ // Otherwise just construct a range.
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+
+ std::array<float_t, 4> results;
+ results[0] = l_min * r_min;
+ results[1] = l_min * r_max;
+ results[2] = l_max * r_min;
+ results[3] = l_max * r_max;
+
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) {
+ return type_t::Any();
+ }
+ }
+
+ float_t result_min = array_min(results);
+ float_t result_max = array_max(results);
+ if (result_min <= 0.0 && 0.0 <= result_max &&
+ (l_min < 0.0 || r_min < 0.0)) {
+ special_values |= type_t::kMinusZero;
+ // Remove -0.
+ result_min += 0.0;
+ result_max += 0.0;
+ }
+ // 0 * V8_INFINITY is NaN, regardless of sign
+ if (((l_min == -inf || l_max == inf) && (r_min <= 0.0 && 0.0 <= r_max)) ||
+ ((r_min == -inf || r_max == inf) && (l_min <= 0.0 && 0.0 <= l_max))) {
+ special_values |= type_t::kNaN;
+ }
+
+ type_t type = Range(result_min, result_max, special_values, zone);
+ return type;
+ }
+
+ static Type Divide(const type_t& l, const type_t& r, Zone* zone) {
+ // Division is tricky, so all we do is try ruling out -0 and NaN.
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) {
+ if V8_UNLIKELY (!std::isfinite(a) && !std::isfinite(b)) {
+ return nan_v<Bits>;
+ }
+ if V8_UNLIKELY (IsMinusZero(b)) {
+ // +-0 / -0 ==> NaN
+ if (a == 0) return nan_v<Bits>;
+ return a > 0 ? -inf : inf;
+ }
+ if V8_UNLIKELY (b == 0) {
+ // +-0 / 0 ==> NaN
+ if (a == 0) return nan_v<Bits>;
+ return a > 0 ? inf : -inf;
+ }
+ return a / b;
+ };
+ if (l.is_set() && r.is_set()) {
+ auto result = ProductSet(l, r, 0, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+
+ bool maybe_nan =
+ l.has_nan() || IsZeroish(r) ||
+ ((l_min == -inf || l_max == inf) && (r_min == -inf || r_max == inf));
+
+ // Try to rule out -0.
+ // -0 / r (r > 0)
+ bool maybe_minuszero =
+ (l.has_minus_zero() && r_max > 0)
+ // 0 / r (r < 0 || r == -0)
+ || (l.Contains(0) && (r_min < 0 || r.has_minus_zero()))
+ // l / inf (l < 0 || l == -0)
+ || (r_max == inf && (l_min < 0 || l.has_minus_zero()))
+ // l / -inf (l >= 0)
+ || (r_min == -inf && l_max >= 0);
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+
+ const bool r_all_positive = r_min >= 0 && !r.has_minus_zero();
+ const bool r_all_negative = r_max < 0;
+
+ // If r doesn't span 0, we can try to compute a more precise type.
+ if (r_all_positive || r_all_negative) {
+ // If r does not contain 0 or -0, we can compute a range.
+ if (r_min > 0 && !r.has_minus_zero()) {
+ std::array<float_t, 4> results;
+ results[0] = l_min / r_min;
+ results[1] = l_min / r_max;
+ results[2] = l_max / r_min;
+ results[3] = l_max / r_max;
+
+ for (float_t r : results) {
+ if (std::isnan(r)) return type_t::Any();
+ }
+
+ const float_t result_min = array_min(results);
+ const float_t result_max = array_max(results);
+ return Range(result_min, result_max, special_values, zone);
+ }
+
+ // Otherwise we try to check for the sign of the result.
+ if (l_max < 0) {
+ if (r_all_positive) {
+ // All values are negative.
+ return Range(-inf, next_smaller(float_t{0}), special_values, zone);
+ } else {
+ DCHECK(r_all_negative);
+ // All values are positive.
+ return Range(0, inf, special_values, zone);
+ }
+ } else if (l_min >= 0 && !l.has_minus_zero()) {
+ if (r_all_positive) {
+ // All values are positive.
+ DCHECK_EQ(special_values & type_t::kMinusZero, 0);
+ return Range(0, inf, special_values, zone);
+ } else {
+ DCHECK(r_all_negative);
+ // All values are negative.
+ return Range(-inf, next_smaller(float_t{0}), special_values, zone);
+ }
+ }
+ }
+
+ // Otherwise we give up on a precise type.
+ return type_t::Any(special_values);
+ }
+
+ static Type Modulus(type_t l, type_t r, Zone* zone) {
+ // Modulus can yield NaN if either {lhs} or {rhs} are NaN, or
+ // {lhs} is not finite, or the {rhs} is a zero value.
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+ bool maybe_nan =
+ l.has_nan() || IsZeroish(r) || l.min() == -inf || l.max() == inf;
+
+ // Deal with -0 inputs, only the signbit of {lhs} matters for the result.
+ bool maybe_minuszero = false;
+ if (l.has_minus_zero()) {
+ maybe_minuszero = true;
+ l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
+ }
+ if (r.has_minus_zero()) {
+ r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
+ }
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+ // For integer inputs {l} and {r} we can infer a precise type.
+ if (IsIntegerSet(l) && IsIntegerSet(r)) {
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+ // l % r is:
+ // - never greater than abs(l)
+ // - never greater than abs(r) - 1
+ auto l_abs = std::max(std::abs(l_min), std::abs(l_max));
+ auto r_abs = std::max(std::abs(r_min), std::abs(r_max));
+ // If rhs is 0, we can only produce NaN.
+ if (r_abs == 0) return type_t::NaN();
+ r_abs -= 1;
+ auto abs = std::min(l_abs, r_abs);
+ float_t min = 0.0, max = 0.0;
+ if (l_min >= 0.0) {
+ // {l} positive.
+ max = abs;
+ } else if (l_max <= 0.0) {
+ // {l} negative.
+ min = 0.0 - abs;
+ } else {
+ // {l} positive or negative.
+ min = 0.0 - abs;
+ max = abs;
+ }
+ if (min == max) return Set({min}, special_values, zone);
+ return Range(min, max, special_values, zone);
+ }
+
+ // Otherwise, we give up.
+ return type_t::Any(special_values);
+ }
+
+ static Type Min(type_t l, type_t r, Zone* zone) {
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+ bool maybe_nan = l.has_nan() || r.has_nan();
+
+ // In order to ensure monotonicity of the computation below, we additionally
+ // pretend +0 is present (for simplicity on both sides).
+ bool maybe_minuszero = false;
+ if (l.has_minus_zero() && !(r.max() < 0.0)) {
+ maybe_minuszero = true;
+ l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
+ }
+ if (r.has_minus_zero() && !(l.max() < 0.0)) {
+ maybe_minuszero = true;
+ r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
+ }
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) { return std::min(a, b); };
+ if (l.is_set() && r.is_set()) {
+ // TODO(nicohartmann@): There is a faster way to compute this set.
+ auto result = ProductSet(l, r, special_values, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ // Otherwise just construct a range.
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+
+ auto min = std::min(l_min, r_min);
+ auto max = std::min(l_max, r_max);
+ return Range(min, max, special_values, zone);
+ }
+
+ static Type Max(type_t l, type_t r, Zone* zone) {
+ if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
+ bool maybe_nan = l.has_nan() || r.has_nan();
+
+ // In order to ensure monotonicity of the computation below, we additionally
+ // pretend +0 is present (for simplicity on both sides).
+ bool maybe_minuszero = false;
+ if (l.has_minus_zero() && !(r.min() > 0.0)) {
+ maybe_minuszero = true;
+ l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
+ }
+ if (r.has_minus_zero() && !(l.min() > 0.0)) {
+ maybe_minuszero = true;
+ r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
+ }
+
+ uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
+ (maybe_minuszero ? type_t::kMinusZero : 0);
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) { return std::max(a, b); };
+ if (l.is_set() && r.is_set()) {
+ // TODO(nicohartmann@): There is a faster way to compute this set.
+ auto result = ProductSet(l, r, special_values, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ // Otherwise just construct a range.
+ auto [l_min, l_max] = l.minmax();
+ auto [r_min, r_max] = r.minmax();
+
+ auto min = std::max(l_min, r_min);
+ auto max = std::max(l_max, r_max);
+ return Range(min, max, special_values, zone);
+ }
+
+ static Type Power(const type_t& l, const type_t& r, Zone* zone) {
+ // x ** NaN => Nan.
+ if (r.is_only_nan()) return type_t::NaN();
+ // x ** +-0 => 1.
+ if (r.is_constant(0) || r.is_only_minus_zero()) return type_t::Constant(1);
+ if (l.is_only_nan()) {
+ // NaN ** 0 => 1.
+ if (r.Contains(0) || r.has_minus_zero()) {
+ return type_t::Set({1}, type_t::kNaN, zone);
+ }
+ // NaN ** x => NaN (x != +-0).
+ return type_t::NaN();
+ }
+ bool maybe_nan = l.has_nan() || r.has_nan();
+
+ // a ** b produces NaN if a < 0 && b is fraction.
+ if (l.min() < 0.0 && !IsIntegerSet(r)) maybe_nan = true;
+
+ // a ** b produces -0 iff a == -0 and b is odd. Checking for all the cases
+ // where b does only contain odd integer values seems not worth the
+ // additional information we get here. We accept this over-approximation for
+ // now. We could refine this whenever we see a benefit.
+ uint32_t special_values =
+ (maybe_nan ? type_t::kNaN : 0) | l.special_values();
+
+ // If both sides are decently small sets, we produce the product set.
+ auto combine = [](float_t a, float_t b) { return std::pow(a, b); };
+ if (l.is_set() && r.is_set()) {
+ auto result = ProductSet(l, r, special_values, zone, combine);
+ if (!result.IsInvalid()) return result;
+ }
+
+ // TODO(nicohartmann@): Maybe we can produce a more precise range here.
+ return type_t::Any(special_values);
+ }
+
+ static Type Atan2(const type_t& l, const type_t& r, Zone* zone) {
+ // TODO(nicohartmann@): Maybe we can produce a more precise range here.
+ return type_t::Any();
+ }
+
+ static Type LessThan(const type_t& lhs, const type_t& rhs, Zone* zone) {
+ bool can_be_true = false;
+ bool can_be_false = false;
+ if (lhs.is_only_special_values()) {
+ if (lhs.has_minus_zero()) {
+ can_be_true = !rhs.is_only_special_values() && rhs.max() > 0.0;
+ can_be_false = rhs.min() <= 0.0;
+ } else {
+ DCHECK(lhs.is_only_nan());
+ }
+ } else if (rhs.is_only_special_values()) {
+ if (rhs.has_minus_zero()) {
+ can_be_true = lhs.min() < 0.0;
+ can_be_false = lhs.max() >= 0.0;
+ } else {
+ DCHECK(rhs.is_only_nan());
+ }
+ } else {
+ // Both sides have at least one non-special value. We don't have to treat
+ // special values here, because nan has been taken care of already and
+ // -0.0 is included in min/max.
+ can_be_true = lhs.min() < rhs.max();
+ can_be_false = lhs.max() >= rhs.min();
+ }
+
+ // Consider NaN.
+ can_be_false = can_be_false || lhs.has_nan() || rhs.has_nan();
+
+ if (!can_be_true) return Word32Type::Constant(0);
+ if (!can_be_false) return Word32Type::Constant(1);
+ return Word32Type::Set({0, 1}, zone);
+ }
+
+ static Type LessThanOrEqual(const type_t& lhs, const type_t& rhs,
+ Zone* zone) {
+ bool can_be_true = false;
+ bool can_be_false = false;
+ if (lhs.is_only_special_values()) {
+ if (lhs.has_minus_zero()) {
+ can_be_true = (!rhs.is_only_special_values() && rhs.max() >= 0.0) ||
+ rhs.has_minus_zero();
+ can_be_false = rhs.min() < 0.0;
+ } else {
+ DCHECK(lhs.is_only_nan());
+ }
+ } else if (rhs.is_only_special_values()) {
+ if (rhs.has_minus_zero()) {
+ can_be_true = (!lhs.is_only_special_values() && lhs.min() <= 0.0) ||
+ lhs.has_minus_zero();
+ can_be_false = lhs.max() > 0.0;
+ } else {
+ DCHECK(rhs.is_only_nan());
+ }
+ } else {
+ // Both sides have at least one non-special value. We don't have to treat
+ // special values here, because nan has been taken care of already and
+ // -0.0 is included in min/max.
+ can_be_true = can_be_true || lhs.min() <= rhs.max();
+ can_be_false = can_be_false || lhs.max() > rhs.min();
+ }
+
+ // Consider NaN.
+ can_be_false = can_be_false || lhs.has_nan() || rhs.has_nan();
+
+ if (!can_be_true) return Word32Type::Constant(0);
+ if (!can_be_false) return Word32Type::Constant(1);
+ return Word32Type::Set({0, 1}, zone);
+ }
+
+ static Word32Type UnsignedLessThanOrEqual(const type_t& lhs,
+ const type_t& rhs, Zone* zone) {
+ bool can_be_true = lhs.unsigned_min() <= rhs.unsigned_max();
+ bool can_be_false = lhs.unsigned_max() > rhs.unsigned_min();
+
+ if (!can_be_true) return Word32Type::Constant(0);
+ if (!can_be_false) return Word32Type::Constant(1);
+ return Word32Type::Set({0, 1}, zone);
+ }
+
+ // Computes the ranges to which the sides of the comparison (lhs < rhs) can be
+ // restricted when the comparison is true. When the comparison is true, we
+ // learn: lhs cannot be >= rhs.max and rhs cannot be <= lhs.min and neither
+ // can be NaN.
+ static std::pair<Type, Type> RestrictionForLessThan_True(const type_t& lhs,
+ const type_t& rhs,
+ Zone* zone) {
+ // If either side is only NaN, this comparison can never be true.
+ if (lhs.is_only_nan() || rhs.is_only_nan()) {
+ return {Type::None(), Type::None()};
+ }
+
+ Type restrict_lhs;
+ if (rhs.max() == -inf) {
+ // There is no value for lhs that could make (lhs < -inf) true.
+ restrict_lhs = Type::None();
+ } else {
+ const auto max = next_smaller(rhs.max());
+ uint32_t sv = max >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
+ restrict_lhs = type_t::Range(-inf, max, sv, zone);
+ }
+
+ Type restrict_rhs;
+ if (lhs.min() == inf) {
+ // There is no value for rhs that could make (inf < rhs) true.
+ restrict_rhs = Type::None();
+ } else {
+ const auto min = next_larger(lhs.min());
+ uint32_t sv = min <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
+ restrict_rhs = type_t::Range(min, inf, sv, zone);
+ }
+
+ return {restrict_lhs, restrict_rhs};
+ }
+
+ // Computes the ranges to which the sides of the comparison (lhs < rhs) can be
+ // restricted when the comparison is false. When the comparison is false, we
+ // learn: lhs cannot be < rhs.min and rhs cannot be > lhs.max.
+ static std::pair<Type, Type> RestrictionForLessThan_False(const type_t& lhs,
+ const type_t& rhs,
+ Zone* zone) {
+ Type restrict_lhs;
+ if (rhs.has_nan()) {
+ restrict_lhs = type_t::Any();
+ } else {
+ uint32_t lhs_sv =
+ type_t::kNaN |
+ (rhs.min() <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
+ restrict_lhs = type_t::Range(rhs.min(), inf, lhs_sv, zone);
+ }
+
+ Type restrict_rhs;
+ if (lhs.has_nan()) {
+ restrict_rhs = type_t::Any();
+ } else {
+ uint32_t rhs_sv =
+ type_t::kNaN |
+ (lhs.max() >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
+ restrict_rhs = type_t::Range(-inf, lhs.max(), rhs_sv, zone);
+ }
+
+ return {restrict_lhs, restrict_rhs};
+ }
+
+ // Computes the ranges to which the sides of the comparison (lhs <= rhs) can
+ // be restricted when the comparison is true. When the comparison is true, we
+ // learn: lhs cannot be > rhs.max and rhs cannot be < lhs.min and neither can
+ // be NaN.
+ static std::pair<Type, Type> RestrictionForLessThanOrEqual_True(
+ const type_t& lhs, const type_t& rhs, Zone* zone) {
+ // If either side is only NaN, this comparison can never be true.
+ if (lhs.is_only_nan() || rhs.is_only_nan()) {
+ return {Type::None(), Type::None()};
+ }
+
+ uint32_t lhs_sv =
+ rhs.max() >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
+ uint32_t rhs_sv =
+ lhs.min() <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
+ return {type_t::Range(-inf, rhs.max(), lhs_sv, zone),
+ type_t::Range(lhs.min(), inf, rhs_sv, zone)};
+ }
+
+ // Computes the ranges to which the sides of the comparison (lhs <= rhs) can
+ // be restricted when the comparison is false. When the comparison is false,
+ // we learn: lhs cannot be <= rhs.min and rhs cannot be >= lhs.max.
+ static std::pair<Type, Type> RestrictionForLessThanOrEqual_False(
+ const type_t& lhs, const type_t& rhs, Zone* zone) {
+ Type restrict_lhs;
+ if (rhs.has_nan()) {
+ restrict_lhs = type_t::Any();
+ } else if (rhs.min() == inf) {
+ // The only value for lhs that could make (lhs <= inf) false is NaN.
+ restrict_lhs = type_t::NaN();
+ } else {
+ const auto min = next_larger(rhs.min());
+ uint32_t sv = type_t::kNaN |
+ (min <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
+ restrict_lhs = type_t::Range(min, inf, sv, zone);
+ }
+
+ Type restrict_rhs;
+ if (lhs.has_nan()) {
+ restrict_rhs = type_t::Any();
+ } else if (lhs.max() == -inf) {
+ // The only value for rhs that could make (-inf <= rhs) false is NaN.
+ restrict_rhs = type_t::NaN();
+ } else {
+ const auto max = next_smaller(lhs.max());
+ uint32_t sv = type_t::kNaN |
+ (max >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
+ restrict_rhs = type_t::Range(-inf, max, sv, zone);
+ }
+
+ return {restrict_lhs, restrict_rhs};
+ }
+};
+
+class Typer {
+ public:
+ static Type TypeForRepresentation(RegisterRepresentation rep) {
+ switch (rep.value()) {
+ case RegisterRepresentation::Word32():
+ return Word32Type::Any();
+ case RegisterRepresentation::Word64():
+ return Word64Type::Any();
+ case RegisterRepresentation::Float32():
+ return Float32Type::Any();
+ case RegisterRepresentation::Float64():
+ return Float64Type::Any();
+
+ case RegisterRepresentation::Tagged():
+ case RegisterRepresentation::Compressed():
+ // TODO(nicohartmann@): Support these representations.
+ return Type::Any();
+ }
+ }
+
+ static Type TypeForRepresentation(
+ base::Vector<const RegisterRepresentation> reps, Zone* zone) {
+ DCHECK_LT(0, reps.size());
+ if (reps.size() == 1) return TypeForRepresentation(reps[0]);
+ base::SmallVector<Type, 4> tuple_types;
+ for (auto rep : reps) tuple_types.push_back(TypeForRepresentation(rep));
+ return TupleType::Tuple(base::VectorOf(tuple_types), zone);
+ }
+
+ static Type TypeConstant(ConstantOp::Kind kind, ConstantOp::Storage value) {
+ switch (kind) {
+ case ConstantOp::Kind::kFloat32:
+ if (std::isnan(value.float32)) return Float32Type::NaN();
+ if (IsMinusZero(value.float32)) return Float32Type::MinusZero();
+ return Float32Type::Constant(value.float32);
+ case ConstantOp::Kind::kFloat64:
+ if (std::isnan(value.float64)) return Float64Type::NaN();
+ if (IsMinusZero(value.float64)) return Float64Type::MinusZero();
+ return Float64Type::Constant(value.float64);
+ case ConstantOp::Kind::kWord32:
+ return Word32Type::Constant(static_cast<uint32_t>(value.integral));
+ case ConstantOp::Kind::kWord64:
+ return Word64Type::Constant(static_cast<uint64_t>(value.integral));
+ default:
+ // TODO(nicohartmann@): Support remaining {kind}s.
+ return Type::Any();
+ }
+ }
+
+ static Type TypeProjection(const Type& input, uint16_t idx) {
+ if (input.IsNone()) return Type::None();
+ if (!input.IsTuple()) return Type::Any();
+ const TupleType& tuple = input.AsTuple();
+ DCHECK_LT(idx, tuple.size());
+ return tuple.element(idx);
+ }
+
+ static Type TypeWordBinop(Type left_type, Type right_type,
+ WordBinopOp::Kind kind, WordRepresentation rep,
+ Zone* zone) {
+ DCHECK(!left_type.IsInvalid());
+ DCHECK(!right_type.IsInvalid());
+
+ if (rep == WordRepresentation::Word32()) {
+ switch (kind) {
+ case WordBinopOp::Kind::kAdd:
+ return TypeWord32Add(left_type, right_type, zone);
+ case WordBinopOp::Kind::kSub:
+ return TypeWord32Sub(left_type, right_type, zone);
+ default:
+ // TODO(nicohartmann@): Support remaining {kind}s.
+ return Word32Type::Any();
+ }
+ } else {
+ DCHECK_EQ(rep, WordRepresentation::Word64());
+ switch (kind) {
+ case WordBinopOp::Kind::kAdd:
+ return TypeWord64Add(left_type, right_type, zone);
+ case WordBinopOp::Kind::kSub:
+ return TypeWord64Sub(left_type, right_type, zone);
+ default:
+ // TODO(nicohartmann@): Support remaining {kind}s.
+ return Word64Type::Any();
+ }
+ }
+ }
+
+ static Type TypeWord32Add(const Type& lhs, const Type& rhs, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ auto l = TruncateWord32Input(lhs, true, zone);
+ auto r = TruncateWord32Input(rhs, true, zone);
+ return WordOperationTyper<32>::Add(l, r, zone);
+ }
+
+ static Type TypeWord32Sub(const Type& lhs, const Type& rhs, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ auto l = TruncateWord32Input(lhs, true, zone);
+ auto r = TruncateWord32Input(rhs, true, zone);
+ return WordOperationTyper<32>::Subtract(l, r, zone);
+ }
+
+ static Type TypeWord64Add(const Type& lhs, const Type& rhs, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ if (!InputIs(lhs, Type::Kind::kWord64) ||
+ !InputIs(rhs, Type::Kind::kWord64)) {
+ return Word64Type::Any();
+ }
+ const auto& l = lhs.AsWord64();
+ const auto& r = rhs.AsWord64();
+
+ return WordOperationTyper<64>::Add(l, r, zone);
+ }
+
+ static Type TypeWord64Sub(const Type& lhs, const Type& rhs, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ if (!InputIs(lhs, Type::Kind::kWord64) ||
+ !InputIs(rhs, Type::Kind::kWord64)) {
+ return Word64Type::Any();
+ }
+
+ const auto& l = lhs.AsWord64();
+ const auto& r = rhs.AsWord64();
+
+ return WordOperationTyper<64>::Subtract(l, r, zone);
+ }
+
+ static Type TypeFloatBinop(Type left_type, Type right_type,
+ FloatBinopOp::Kind kind, FloatRepresentation rep,
+ Zone* zone) {
+ DCHECK(!left_type.IsInvalid());
+ DCHECK(!right_type.IsInvalid());
+
+#define FLOAT_BINOP(op, bits) \
+ case FloatBinopOp::Kind::k##op: \
+ return TypeFloat##bits##op(left_type, right_type, zone);
+
+ if (rep == FloatRepresentation::Float32()) {
+ switch (kind) {
+ FLOAT_BINOP(Add, 32)
+ FLOAT_BINOP(Sub, 32)
+ FLOAT_BINOP(Mul, 32)
+ FLOAT_BINOP(Div, 32)
+ FLOAT_BINOP(Mod, 32)
+ FLOAT_BINOP(Min, 32)
+ FLOAT_BINOP(Max, 32)
+ FLOAT_BINOP(Power, 32)
+ FLOAT_BINOP(Atan2, 32)
+ }
+ } else {
+ DCHECK_EQ(rep, FloatRepresentation::Float64());
+ switch (kind) {
+ FLOAT_BINOP(Add, 64)
+ FLOAT_BINOP(Sub, 64)
+ FLOAT_BINOP(Mul, 64)
+ FLOAT_BINOP(Div, 64)
+ FLOAT_BINOP(Mod, 64)
+ FLOAT_BINOP(Min, 64)
+ FLOAT_BINOP(Max, 64)
+ FLOAT_BINOP(Power, 64)
+ FLOAT_BINOP(Atan2, 64)
+ }
+ }
+
+#undef FLOAT_BINOP
+ }
+
+#define FLOAT_BINOP(op, bits, float_typer_handler) \
+ static Type TypeFloat##bits##op(const Type& lhs, const Type& rhs, \
+ Zone* zone) { \
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None(); \
+ if (!InputIs(lhs, Type::Kind::kFloat##bits) || \
+ !InputIs(rhs, Type::Kind::kFloat##bits)) { \
+ return Float##bits##Type::Any(); \
+ } \
+ const auto& l = lhs.AsFloat##bits(); \
+ const auto& r = rhs.AsFloat##bits(); \
+ return FloatOperationTyper<bits>::float_typer_handler(l, r, zone); \
+ }
+
+ // Float32 operations
+ FLOAT_BINOP(Add, 32, Add)
+ FLOAT_BINOP(Sub, 32, Subtract)
+ FLOAT_BINOP(Mul, 32, Multiply)
+ FLOAT_BINOP(Div, 32, Divide)
+ FLOAT_BINOP(Mod, 32, Modulus)
+ FLOAT_BINOP(Min, 32, Min)
+ FLOAT_BINOP(Max, 32, Max)
+ FLOAT_BINOP(Power, 32, Power)
+ FLOAT_BINOP(Atan2, 32, Atan2)
+ // Float64 operations
+ FLOAT_BINOP(Add, 64, Add)
+ FLOAT_BINOP(Sub, 64, Subtract)
+ FLOAT_BINOP(Mul, 64, Multiply)
+ FLOAT_BINOP(Div, 64, Divide)
+ FLOAT_BINOP(Mod, 64, Modulus)
+ FLOAT_BINOP(Min, 64, Min)
+ FLOAT_BINOP(Max, 64, Max)
+ FLOAT_BINOP(Power, 64, Power)
+ FLOAT_BINOP(Atan2, 64, Atan2)
+#undef FLOAT_BINOP
+
+ static Type TypeOverflowCheckedBinop(const Type& left_type,
+ const Type& right_type,
+ OverflowCheckedBinopOp::Kind kind,
+ WordRepresentation rep, Zone* zone) {
+ DCHECK(!left_type.IsInvalid());
+ DCHECK(!right_type.IsInvalid());
+
+ if (rep == WordRepresentation::Word32()) {
+ switch (kind) {
+ case OverflowCheckedBinopOp::Kind::kSignedAdd:
+ return TypeWord32OverflowCheckedAdd(left_type, right_type, zone);
+ case OverflowCheckedBinopOp::Kind::kSignedSub:
+ case OverflowCheckedBinopOp::Kind::kSignedMul:
+ // TODO(nicohartmann@): Support these.
+ return TupleType::Tuple(Word32Type::Any(),
+ Word32Type::Set({0, 1}, zone), zone);
+ }
+ } else {
+ DCHECK_EQ(rep, WordRepresentation::Word64());
+ switch (kind) {
+ case OverflowCheckedBinopOp::Kind::kSignedAdd:
+ case OverflowCheckedBinopOp::Kind::kSignedSub:
+ case OverflowCheckedBinopOp::Kind::kSignedMul:
+ // TODO(nicohartmann@): Support these.
+ return TupleType::Tuple(Word64Type::Any(),
+ Word32Type::Set({0, 1}, zone), zone);
+ }
+ }
+ }
+
+ static Type TypeWord32OverflowCheckedAdd(const Type& lhs, const Type& rhs,
+ Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ auto l = TruncateWord32Input(lhs, true, zone);
+ auto r = TruncateWord32Input(rhs, true, zone);
+
+ auto value = WordOperationTyper<32>::Add(l, r, zone);
+ // We check for signed overflow and if the topmost bits of both opperands
+ // are 0, we know that the result cannot overflow.
+ if ((0xC0000000 & l.unsigned_max()) == 0 &&
+ (0xC0000000 & r.unsigned_max()) == 0) {
+ // Cannot overflow.
+ return TupleType::Tuple(value, Word32Type::Constant(0), zone);
+ }
+ // Special case for two constant inputs to figure out the overflow.
+ if (l.is_constant() && r.is_constant()) {
+ constexpr uint32_t msb_mask = 0x80000000;
+ DCHECK(value.is_constant());
+ uint32_t l_msb = (*l.try_get_constant()) & msb_mask;
+ uint32_t r_msb = (*r.try_get_constant()) & msb_mask;
+ if (l_msb != r_msb) {
+ // Different sign bits can never lead to an overflow.
+ return TupleType::Tuple(value, Word32Type::Constant(0), zone);
+ }
+ uint32_t value_msb = (*value.try_get_constant()) & msb_mask;
+ const uint32_t overflow = value_msb == l_msb ? 0 : 1;
+ return TupleType::Tuple(value, Word32Type::Constant(overflow), zone);
+ }
+ // Otherwise we accept some imprecision.
+ return TupleType::Tuple(value, Word32Type::Set({0, 1}, zone), zone);
+ }
+
+ static Type TypeComparison(const Type& lhs, const Type& rhs,
+ RegisterRepresentation rep,
+ ComparisonOp::Kind kind, Zone* zone) {
+ switch (rep.value()) {
+ case RegisterRepresentation::Word32():
+ return TypeWord32Comparison(lhs, rhs, kind, zone);
+ case RegisterRepresentation::Word64():
+ return TypeWord64Comparison(lhs, rhs, kind, zone);
+ case RegisterRepresentation::Float32():
+ return TypeFloat32Comparison(lhs, rhs, kind, zone);
+ case RegisterRepresentation::Float64():
+ return TypeFloat64Comparison(lhs, rhs, kind, zone);
+ case RegisterRepresentation::Tagged():
+ case RegisterRepresentation::Compressed():
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ // TODO(nicohartmann@): Support those cases.
+ return Word32Type::Set({0, 1}, zone);
+ }
+ }
+
+ static Type TypeWord32Comparison(const Type& lhs, const Type& rhs,
+ ComparisonOp::Kind kind, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ auto l = TruncateWord32Input(lhs, true, zone);
+ auto r = TruncateWord32Input(rhs, true, zone);
+ switch (kind) {
+ case ComparisonOp::Kind::kSignedLessThan:
+ case ComparisonOp::Kind::kSignedLessThanOrEqual:
+ // TODO(nicohartmann@): Support this.
+ return Word32Type::Set({0, 1}, zone);
+ case ComparisonOp::Kind::kUnsignedLessThan:
+ return WordOperationTyper<32>::UnsignedLessThan(l, r, zone);
+ case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
+ return WordOperationTyper<32>::UnsignedLessThanOrEqual(l, r, zone);
+ }
+ UNREACHABLE();
+ }
+
+ static Type TypeWord64Comparison(const Type& lhs, const Type& rhs,
+ ComparisonOp::Kind kind, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ switch (kind) {
+ case ComparisonOp::Kind::kSignedLessThan:
+ case ComparisonOp::Kind::kSignedLessThanOrEqual:
+ // TODO(nicohartmann@): Support this.
+ return Word32Type::Set({0, 1}, zone);
+ case ComparisonOp::Kind::kUnsignedLessThan:
+ return WordOperationTyper<64>::UnsignedLessThan(lhs.AsWord64(),
+ rhs.AsWord64(), zone);
+ case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
+ return WordOperationTyper<64>::UnsignedLessThanOrEqual(
+ lhs.AsWord64(), rhs.AsWord64(), zone);
+ }
+ UNREACHABLE();
+ }
+
+ static Type TypeFloat32Comparison(const Type& lhs, const Type& rhs,
+ ComparisonOp::Kind kind, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ switch (kind) {
+ case ComparisonOp::Kind::kSignedLessThan:
+ return FloatOperationTyper<32>::LessThan(lhs.AsFloat32(),
+ rhs.AsFloat32(), zone);
+ case ComparisonOp::Kind::kSignedLessThanOrEqual:
+ return FloatOperationTyper<32>::LessThanOrEqual(lhs.AsFloat32(),
+ rhs.AsFloat32(), zone);
+ case ComparisonOp::Kind::kUnsignedLessThan:
+ case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
+ UNREACHABLE();
+ }
+ }
+
+ static Type TypeFloat64Comparison(const Type& lhs, const Type& rhs,
+ ComparisonOp::Kind kind, Zone* zone) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ switch (kind) {
+ case ComparisonOp::Kind::kSignedLessThan:
+ return FloatOperationTyper<64>::LessThan(lhs.AsFloat64(),
+ rhs.AsFloat64(), zone);
+ case ComparisonOp::Kind::kSignedLessThanOrEqual:
+ return FloatOperationTyper<64>::LessThanOrEqual(lhs.AsFloat64(),
+ rhs.AsFloat64(), zone);
+ case ComparisonOp::Kind::kUnsignedLessThan:
+ case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
+ UNREACHABLE();
+ }
+ }
+
+ static Word64Type ExtendWord32ToWord64(const Word32Type& t, Zone* zone) {
+ // We cannot infer much, but the lower bound of the word32 is also the lower
+ // bound of the word64 type.
+ if (t.is_wrapping()) return Word64Type::Any();
+ return Word64Type::Range(static_cast<uint64_t>(t.unsigned_min()),
+ std::numeric_limits<uint64_t>::max(), zone);
+ }
+
+ static Word32Type TruncateWord32Input(const Type& input,
+ bool implicit_word64_narrowing,
+ Zone* zone) {
+ DCHECK(!input.IsInvalid());
+ DCHECK(!input.IsNone());
+
+ if (input.IsAny()) {
+ if (allow_invalid_inputs()) return Word32Type::Any();
+ } else if (input.IsWord32()) {
+ return input.AsWord32();
+ } else if (input.IsWord64() && implicit_word64_narrowing) {
+ // The input is implicitly converted to word32.
+ const auto& w64 = input.AsWord64();
+ if (w64.is_set()) {
+ WordOperationTyper<32>::ElementsVector elements;
+ for (uint64_t e : w64.set_elements()) {
+ elements.push_back(static_cast<uint32_t>(e));
+ }
+ return WordOperationTyper<32>::FromElements(std::move(elements), zone);
+ }
+
+ if (w64.is_any() || w64.is_wrapping()) return Word32Type::Any();
+
+ if (w64.range_to() <= std::numeric_limits<uint32_t>::max()) {
+ DCHECK_LE(w64.range_from(), std::numeric_limits<uint32_t>::max());
+ return Word32Type::Range(static_cast<uint32_t>(w64.range_from()),
+ static_cast<uint32_t>(w64.range_to()), zone);
+ }
+
+ // TODO(nicohartmann@): Might compute a more precise range here.
+ return Word32Type::Any();
+ }
+
+ FATAL("Missing proper type for TruncateWord32Input. Type is: %s",
+ input.ToString().c_str());
+ }
+
+ class BranchRefinements {
+ public:
+ // type_getter_t has to provide the type for a given input index.
+ using type_getter_t = std::function<Type(OpIndex)>;
+ // type_refiner_t is called with those arguments:
+ // - OpIndex: index of the operation whose type is refined by the branch.
+ // - Type: the refined type of the operation (after refinement, guaranteed
+ // to be a subtype of the original type).
+ using type_refiner_t = std::function<void(OpIndex, const Type&)>;
+
+ BranchRefinements(type_getter_t type_getter, type_refiner_t type_refiner)
+ : type_getter_(type_getter), type_refiner_(type_refiner) {
+ DCHECK(type_getter_);
+ DCHECK(type_refiner_);
+ }
+
+ void RefineTypes(const Operation& condition, bool then_branch, Zone* zone);
+
+ private:
+ template <bool allow_implicit_word64_truncation>
+ Type RefineWord32Type(const Type& type, const Type& refinement,
+ Zone* zone) {
+ // If refinement is Type::None(), the operation/branch is unreachable.
+ if (refinement.IsNone()) return Type::None();
+ DCHECK(refinement.IsWord32());
+ if constexpr (allow_implicit_word64_truncation) {
+ // Turboshaft allows implicit trunction of Word64 values to Word32. When
+ // an operation on Word32 representation computes a refinement type,
+ // this is going to be a Type::Word32() even if the actual {type} was
+ // Word64 before truncation. To correctly refine this type, we need to
+ // extend the {refinement} to Word64 such that it reflects the
+ // corresponding values in the original type (before truncation) before
+ // we intersect.
+ if (type.IsWord64()) {
+ return Word64Type::Intersect(
+ type.AsWord64(),
+ Typer::ExtendWord32ToWord64(refinement.AsWord32(), zone),
+ Type::ResolutionMode::kOverApproximate, zone);
+ }
+ }
+ // We limit the values of {type} to those in {refinement}.
+ return Word32Type::Intersect(type.AsWord32(), refinement.AsWord32(),
+ Type::ResolutionMode::kOverApproximate,
+ zone);
+ }
+
+ type_getter_t type_getter_;
+ type_refiner_t type_refiner_;
+ };
+
+ static bool InputIs(const Type& input, Type::Kind expected) {
+ if (input.IsInvalid()) {
+ if (allow_invalid_inputs()) return false;
+ } else if (input.kind() == expected) {
+ return true;
+ } else if (input.IsAny()) {
+ if (allow_invalid_inputs()) return false;
+ }
+
+ std::stringstream s;
+ s << expected;
+ FATAL("Missing proper type (%s). Type is: %s", s.str().c_str(),
+ input.ToString().c_str());
+ }
+
+ // For now we allow invalid inputs (which will then just lead to very generic
+ // typing). Once all operations are implemented, we are going to disable this.
+ static bool allow_invalid_inputs() { return true; }
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPER_H_
diff --git a/deps/v8/src/compiler/turboshaft/types.cc b/deps/v8/src/compiler/turboshaft/types.cc
new file mode 100644
index 0000000000..5cdef9eb20
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/types.cc
@@ -0,0 +1,715 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/types.h"
+
+#include <sstream>
+#include <string_view>
+
+#include "src/base/logging.h"
+#include "src/compiler/turboshaft/type-parser.h"
+#include "src/heap/factory.h"
+#include "src/objects/turboshaft-types-inl.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+namespace {
+
+std::pair<uint32_t, uint32_t> uint64_to_high_low(uint64_t value) {
+ return {static_cast<uint32_t>(value >> 32), static_cast<uint32_t>(value)};
+}
+
+} // namespace
+
+bool Type::Equals(const Type& other) const {
+ DCHECK(!IsInvalid());
+ DCHECK(!other.IsInvalid());
+
+ if (kind_ != other.kind_) return false;
+ switch (kind_) {
+ case Kind::kInvalid:
+ UNREACHABLE();
+ case Kind::kNone:
+ return true;
+ case Kind::kWord32:
+ return AsWord32().Equals(other.AsWord32());
+ case Kind::kWord64:
+ return AsWord64().Equals(other.AsWord64());
+ case Kind::kFloat32:
+ return AsFloat32().Equals(other.AsFloat32());
+ case Kind::kFloat64:
+ return AsFloat64().Equals(other.AsFloat64());
+ case Kind::kTuple:
+ return AsTuple().Equals(other.AsTuple());
+ case Kind::kAny:
+ return true;
+ }
+}
+
+bool Type::IsSubtypeOf(const Type& other) const {
+ DCHECK(!IsInvalid());
+ DCHECK(!other.IsInvalid());
+
+ if (other.IsAny() || IsNone()) return true;
+ if (kind_ != other.kind_) return false;
+
+ switch (kind_) {
+ case Kind::kInvalid:
+ case Kind::kNone:
+ UNREACHABLE();
+ case Kind::kWord32:
+ return AsWord32().IsSubtypeOf(other.AsWord32());
+ case Kind::kWord64:
+ return AsWord64().IsSubtypeOf(other.AsWord64());
+ case Kind::kFloat32:
+ return AsFloat32().IsSubtypeOf(other.AsFloat32());
+ case Kind::kFloat64:
+ return AsFloat64().IsSubtypeOf(other.AsFloat64());
+ case Kind::kTuple:
+ return AsTuple().IsSubtypeOf(other.AsTuple());
+ case Kind::kAny:
+ UNREACHABLE();
+ }
+}
+
+void Type::PrintTo(std::ostream& stream) const {
+ switch (kind_) {
+ case Kind::kInvalid:
+ UNREACHABLE();
+ case Kind::kNone:
+ stream << "None";
+ break;
+ case Kind::kWord32: {
+ AsWord32().PrintTo(stream);
+ break;
+ }
+ case Kind::kWord64: {
+ AsWord64().PrintTo(stream);
+ break;
+ }
+ case Kind::kFloat32: {
+ AsFloat32().PrintTo(stream);
+ break;
+ }
+ case Kind::kFloat64: {
+ AsFloat64().PrintTo(stream);
+ break;
+ }
+ case Kind::kTuple: {
+ AsTuple().PrintTo(stream);
+ break;
+ }
+ case Kind::kAny: {
+ stream << "Any";
+ break;
+ }
+ }
+}
+
+void Type::Print() const {
+ StdoutStream os;
+ PrintTo(os);
+ os << std::endl;
+}
+
+// static
+Type Type::LeastUpperBound(const Type& lhs, const Type& rhs, Zone* zone) {
+ if (lhs.IsAny() || rhs.IsAny()) return Type::Any();
+ if (lhs.IsNone()) return rhs;
+ if (rhs.IsNone()) return lhs;
+
+ // TODO(nicohartmann@): We might use more precise types here but currently
+ // there is not much benefit in that.
+ if (lhs.kind() != rhs.kind()) return Type::Any();
+
+ switch (lhs.kind()) {
+ case Type::Kind::kInvalid:
+ case Type::Kind::kNone:
+ case Type::Kind::kAny:
+ UNREACHABLE();
+ case Type::Kind::kWord32:
+ return Word32Type::LeastUpperBound(lhs.AsWord32(), rhs.AsWord32(), zone);
+ case Type::Kind::kWord64:
+ return Word64Type::LeastUpperBound(lhs.AsWord64(), rhs.AsWord64(), zone);
+ case Type::Kind::kFloat32:
+ return Float32Type::LeastUpperBound(lhs.AsFloat32(), rhs.AsFloat32(),
+ zone);
+ case Type::Kind::kFloat64:
+ return Float64Type::LeastUpperBound(lhs.AsFloat64(), rhs.AsFloat64(),
+ zone);
+ case Type::Kind::kTuple:
+ return TupleType::LeastUpperBound(lhs.AsTuple(), rhs.AsTuple(), zone);
+ }
+}
+
+base::Optional<Type> Type::ParseFromString(const std::string_view& str,
+ Zone* zone) {
+ TypeParser parser(str, zone);
+ return parser.Parse();
+}
+
+Handle<TurboshaftType> Type::AllocateOnHeap(Factory* factory) const {
+ DCHECK_NOT_NULL(factory);
+ switch (kind_) {
+ case Kind::kInvalid:
+ UNREACHABLE();
+ case Kind::kNone:
+ UNIMPLEMENTED();
+ case Kind::kWord32:
+ return AsWord32().AllocateOnHeap(factory);
+ case Kind::kWord64:
+ return AsWord64().AllocateOnHeap(factory);
+ case Kind::kFloat32:
+ return AsFloat32().AllocateOnHeap(factory);
+ case Kind::kFloat64:
+ return AsFloat64().AllocateOnHeap(factory);
+ case Kind::kTuple:
+ UNIMPLEMENTED();
+ case Kind::kAny:
+ UNIMPLEMENTED();
+ }
+}
+
+template <size_t Bits>
+bool WordType<Bits>::Contains(word_t value) const {
+ switch (sub_kind()) {
+ case SubKind::kRange: {
+ if (is_wrapping()) return range_to() >= value || range_from() <= value;
+ return range_from() <= value && value <= range_to();
+ }
+ case SubKind::kSet: {
+ for (int i = 0; i < set_size(); ++i) {
+ if (set_element(i) == value) return true;
+ }
+ return false;
+ }
+ }
+}
+
+template <size_t Bits>
+bool WordType<Bits>::Equals(const WordType<Bits>& other) const {
+ if (sub_kind() != other.sub_kind()) return false;
+ switch (sub_kind()) {
+ case SubKind::kRange:
+ return (range_from() == other.range_from() &&
+ range_to() == other.range_to()) ||
+ (is_any() && other.is_any());
+ case SubKind::kSet: {
+ if (set_size() != other.set_size()) return false;
+ for (int i = 0; i < set_size(); ++i) {
+ if (set_element(i) != other.set_element(i)) return false;
+ }
+ return true;
+ }
+ }
+}
+
+template <size_t Bits>
+bool WordType<Bits>::IsSubtypeOf(const WordType<Bits>& other) const {
+ if (other.is_any()) return true;
+ switch (sub_kind()) {
+ case SubKind::kRange: {
+ if (other.is_set()) return false;
+ DCHECK(other.is_range());
+ if (is_wrapping() == other.is_wrapping()) {
+ return range_from() >= other.range_from() &&
+ range_to() <= other.range_to();
+ }
+ return !is_wrapping() && (range_to() <= other.range_to() ||
+ range_from() >= other.range_from());
+ }
+ case SubKind::kSet: {
+ if (other.is_set() && set_size() > other.set_size()) return false;
+ for (int i = 0; i < set_size(); ++i) {
+ if (!other.Contains(set_element(i))) return false;
+ }
+ return true;
+ }
+ }
+}
+
+template <size_t Bits, typename word_t = typename WordType<Bits>::word_t>
+WordType<Bits> LeastUpperBoundFromRanges(word_t l_from, word_t l_to,
+ word_t r_from, word_t r_to,
+ Zone* zone) {
+ const bool lhs_wrapping = l_to < l_from;
+ const bool rhs_wrapping = r_to < r_from;
+ // Case 1: Both ranges non-wrapping
+ // lhs ---|XXX|-- --|XXX|--- -|XXXXXX|- ---|XX|--- -|XX|------
+ // rhs -|XXX|---- ----|XXX|- ---|XX|--- -|XXXXXX|- ------|XX|-
+ // ==> -|XXXXX|-- --|XXXXX|- -|XXXXXX|- -|XXXXXX|- -|XXXXXXX|-
+ if (!lhs_wrapping && !rhs_wrapping) {
+ return WordType<Bits>::Range(std::min(l_from, r_from), std::max(l_to, r_to),
+ zone);
+ }
+ // Case 2: Both ranges wrapping
+ // lhs XXX|----|XXX X|---|XXXXXX XXXXXX|---|X XX|--|XXXXXX
+ // rhs X|---|XXXXXX XXX|----|XXX XX|--|XXXXXX XXXXXX|--|XX
+ // ==> XXX|-|XXXXXX XXX|-|XXXXXX XXXXXXXXXXXX XXXXXXXXXXXX
+ if (lhs_wrapping && rhs_wrapping) {
+ const auto from = std::min(l_from, r_from);
+ const auto to = std::max(l_to, r_to);
+ if (to >= from) return WordType<Bits>::Any();
+ auto result = WordType<Bits>::Range(from, to, zone);
+ DCHECK(result.is_wrapping());
+ return result;
+ }
+
+ if (rhs_wrapping)
+ return LeastUpperBoundFromRanges<Bits>(r_from, r_to, l_from, l_to, zone);
+ DCHECK(lhs_wrapping);
+ DCHECK(!rhs_wrapping);
+ // Case 3 & 4: lhs is wrapping, rhs is not
+ // lhs XXX|----|XXX XXX|----|XXX XXXXX|--|XXX X|-------|XX
+ // rhs -------|XX|- -|XX|------- ----|XXXXX|- ---|XX|-----
+ // ==> XXX|---|XXXX XXXX|---|XXX XXXXXXXXXXXX XXXXXX|--|XX
+ if (r_from <= l_to) {
+ if (r_to <= l_to)
+ return WordType<Bits>::Range(l_from, l_to, zone); // y covered by x
+ if (r_to >= l_from) return WordType<Bits>::Any(); // ex3
+ auto result = WordType<Bits>::Range(l_from, r_to, zone); // ex 1
+ DCHECK(result.is_wrapping());
+ return result;
+ } else if (r_to >= l_from) {
+ if (r_from >= l_from)
+ return WordType<Bits>::Range(l_from, l_to, zone); // y covered by x
+ DCHECK_GT(r_from, l_to); // handled above
+ auto result = WordType<Bits>::Range(r_from, l_to, zone); // ex 2
+ DCHECK(result.is_wrapping());
+ return result;
+ } else {
+ const auto df = r_from - l_to;
+ const auto dt = l_from - r_to;
+ WordType<Bits> result =
+ df > dt ? WordType<Bits>::Range(r_from, l_to, zone) // ex 4
+ : WordType<Bits>::Range(l_from, r_to, zone);
+ DCHECK(result.is_wrapping());
+ return result;
+ }
+}
+
+template <size_t Bits>
+// static
+WordType<Bits> WordType<Bits>::LeastUpperBound(const WordType<Bits>& lhs,
+ const WordType<Bits>& rhs,
+ Zone* zone) {
+ if (lhs.is_set()) {
+ if (!rhs.is_set()) {
+ if (lhs.set_size() == 1) {
+ word_t e = lhs.set_element(0);
+ if (rhs.is_wrapping()) {
+ // If {rhs} already contains e, {rhs} is the upper bound.
+ if (e <= rhs.range_to() || rhs.range_from() <= e) return rhs;
+ return (e - rhs.range_to() < rhs.range_from() - e)
+ ? Range(rhs.range_from(), e, zone)
+ : Range(e, rhs.range_to(), zone);
+ }
+ return Range(std::min(e, rhs.range_from()), std::max(e, rhs.range_to()),
+ zone);
+ }
+
+ // TODO(nicohartmann@): A wrapping range may be a better fit in some
+ // cases.
+ return LeastUpperBoundFromRanges<Bits>(
+ lhs.unsigned_min(), lhs.unsigned_max(), rhs.range_from(),
+ rhs.range_to(), zone);
+ }
+
+ // Both sides are sets. We try to construct the combined set.
+ base::SmallVector<word_t, kMaxSetSize * 2> result_elements;
+ base::vector_append(result_elements, lhs.set_elements());
+ base::vector_append(result_elements, rhs.set_elements());
+ DCHECK(!result_elements.empty());
+ base::sort(result_elements);
+ auto it = std::unique(result_elements.begin(), result_elements.end());
+ result_elements.pop_back(std::distance(it, result_elements.end()));
+ if (result_elements.size() <= kMaxSetSize) {
+ return Set(result_elements, zone);
+ }
+ // We have to construct a range instead.
+ // TODO(nicohartmann@): A wrapping range may be a better fit in some cases.
+ return Range(result_elements.front(), result_elements.back(), zone);
+ } else if (rhs.is_set()) {
+ return LeastUpperBound(rhs, lhs, zone);
+ }
+
+ // Both sides are ranges.
+ return LeastUpperBoundFromRanges<Bits>(
+ lhs.range_from(), lhs.range_to(), rhs.range_from(), rhs.range_to(), zone);
+}
+
+template <size_t Bits>
+Type WordType<Bits>::Intersect(const WordType<Bits>& lhs,
+ const WordType<Bits>& rhs,
+ ResolutionMode resolution_mode, Zone* zone) {
+ if (lhs.is_any()) return rhs;
+ if (rhs.is_any()) return lhs;
+
+ if (lhs.is_set() || rhs.is_set()) {
+ const auto& x = lhs.is_set() ? lhs : rhs;
+ const auto& y = lhs.is_set() ? rhs : lhs;
+ base::SmallVector<word_t, kMaxSetSize * 2> result_elements;
+ for (int i = 0; i < x.set_size(); ++i) {
+ const word_t element = x.set_element(i);
+ if (y.Contains(element)) result_elements.push_back(element);
+ }
+ if (result_elements.empty()) return Type::None();
+ DCHECK(detail::is_unique_and_sorted(result_elements));
+ return Set(result_elements, zone);
+ }
+
+ DCHECK(lhs.is_range() && rhs.is_range());
+ const bool lhs_wrapping = lhs.is_wrapping();
+ if (!lhs_wrapping && !rhs.is_wrapping()) {
+ const auto result_from = std::max(lhs.range_from(), rhs.range_from());
+ const auto result_to = std::min(lhs.range_to(), rhs.range_to());
+ return result_to < result_from
+ ? Type::None()
+ : WordType::Range(result_from, result_to, zone);
+ }
+
+ if (lhs_wrapping && rhs.is_wrapping()) {
+ const auto result_from = std::max(lhs.range_from(), rhs.range_from());
+ const auto result_to = std::min(lhs.range_to(), rhs.range_to());
+ auto result = WordType::Range(result_from, result_to, zone);
+ DCHECK(result.is_wrapping());
+ return result;
+ }
+
+ const auto& x = lhs_wrapping ? lhs : rhs;
+ const auto& y = lhs_wrapping ? rhs : lhs;
+ DCHECK(x.is_wrapping());
+ DCHECK(!y.is_wrapping());
+ auto subrange_low = Intersect(y, Range(0, x.range_to(), zone),
+ ResolutionMode::kPreciseOrInvalid, zone);
+ DCHECK(!subrange_low.IsInvalid());
+ auto subrange_high = Intersect(
+ y, Range(x.range_from(), std::numeric_limits<word_t>::max(), zone),
+ ResolutionMode::kPreciseOrInvalid, zone);
+ DCHECK(!subrange_high.IsInvalid());
+
+ if (subrange_low.IsNone()) return subrange_high;
+ if (subrange_high.IsNone()) return subrange_low;
+ auto s_l = subrange_low.template AsWord<Bits>();
+ auto s_h = subrange_high.template AsWord<Bits>();
+
+ switch (resolution_mode) {
+ case ResolutionMode::kPreciseOrInvalid:
+ return Type::Invalid();
+ case ResolutionMode::kOverApproximate:
+ return LeastUpperBound(s_l, s_h, zone);
+ case ResolutionMode::kGreatestLowerBound:
+ return (s_l.unsigned_max() - s_l.unsigned_min() <
+ s_h.unsigned_max() - s_h.unsigned_min())
+ ? s_h
+ : s_l;
+ }
+}
+
+template <size_t Bits>
+void WordType<Bits>::PrintTo(std::ostream& stream) const {
+ stream << (Bits == 32 ? "Word32" : "Word64");
+ switch (sub_kind()) {
+ case SubKind::kRange:
+ stream << "[0x" << std::hex << range_from() << ", 0x" << range_to()
+ << std::dec << "]";
+ break;
+ case SubKind::kSet:
+ stream << "{" << std::hex;
+ for (int i = 0; i < set_size(); ++i) {
+ stream << (i == 0 ? "0x" : ", 0x");
+ stream << set_element(i);
+ }
+ stream << std::dec << "}";
+ break;
+ }
+}
+
+template <size_t Bits>
+Handle<TurboshaftType> WordType<Bits>::AllocateOnHeap(Factory* factory) const {
+ if constexpr (Bits == 32) {
+ if (is_range()) {
+ return factory->NewTurboshaftWord32RangeType(range_from(), range_to(),
+ AllocationType::kYoung);
+ } else {
+ DCHECK(is_set());
+ auto result = factory->NewTurboshaftWord32SetType(set_size(),
+ AllocationType::kYoung);
+ for (int i = 0; i < set_size(); ++i) {
+ result->set_elements(i, set_element(i));
+ }
+ return result;
+ }
+ } else {
+ if (is_range()) {
+ const auto [from_high, from_low] = uint64_to_high_low(range_from());
+ const auto [to_high, to_low] = uint64_to_high_low(range_to());
+ return factory->NewTurboshaftWord64RangeType(
+ from_high, from_low, to_high, to_low, AllocationType::kYoung);
+ } else {
+ DCHECK(is_set());
+ auto result = factory->NewTurboshaftWord64SetType(set_size(),
+ AllocationType::kYoung);
+ for (int i = 0; i < set_size(); ++i) {
+ const auto [high, low] = uint64_to_high_low(set_element(i));
+ result->set_elements_high(i, high);
+ result->set_elements_low(i, low);
+ }
+ return result;
+ }
+ }
+}
+
+template <size_t Bits>
+bool FloatType<Bits>::Contains(float_t value) const {
+ if (IsMinusZero(value)) return has_minus_zero();
+ if (std::isnan(value)) return has_nan();
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ return false;
+ case SubKind::kRange: {
+ return range_min() <= value && value <= range_max();
+ }
+ case SubKind::kSet: {
+ for (int i = 0; i < set_size(); ++i) {
+ if (set_element(i) == value) return true;
+ }
+ return false;
+ }
+ }
+}
+
+template <size_t Bits>
+bool FloatType<Bits>::Equals(const FloatType<Bits>& other) const {
+ if (sub_kind() != other.sub_kind()) return false;
+ if (special_values() != other.special_values()) return false;
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ return true;
+ case SubKind::kRange: {
+ return range() == other.range();
+ }
+ case SubKind::kSet: {
+ if (set_size() != other.set_size()) {
+ return false;
+ }
+ for (int i = 0; i < set_size(); ++i) {
+ if (set_element(i) != other.set_element(i)) return false;
+ }
+ return true;
+ }
+ }
+}
+
+template <size_t Bits>
+bool FloatType<Bits>::IsSubtypeOf(const FloatType<Bits>& other) const {
+ if (special_values() & ~other.special_values()) return false;
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ return true;
+ case SubKind::kRange:
+ if (!other.is_range()) {
+ // This relies on the fact that we don't have singleton ranges.
+ DCHECK_NE(range_min(), range_max());
+ return false;
+ }
+ return other.range_min() <= range_min() &&
+ range_max() <= other.range_max();
+ case SubKind::kSet: {
+ switch (other.sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ return false;
+ case SubKind::kRange:
+ return other.range_min() <= min() && max() <= other.range_max();
+ case SubKind::kSet:
+ for (int i = 0; i < set_size(); ++i) {
+ if (!other.Contains(set_element(i))) return false;
+ }
+ return true;
+ }
+ }
+ }
+}
+
+template <size_t Bits>
+// static
+FloatType<Bits> FloatType<Bits>::LeastUpperBound(const FloatType<Bits>& lhs,
+ const FloatType<Bits>& rhs,
+ Zone* zone) {
+ uint32_t special_values = lhs.special_values() | rhs.special_values();
+ if (lhs.is_any() || rhs.is_any()) {
+ return Any(special_values);
+ }
+
+ const bool lhs_finite = lhs.is_set() || lhs.is_only_special_values();
+ const bool rhs_finite = rhs.is_set() || rhs.is_only_special_values();
+
+ if (lhs_finite && rhs_finite) {
+ base::SmallVector<float_t, kMaxSetSize * 2> result_elements;
+ if (lhs.is_set()) base::vector_append(result_elements, lhs.set_elements());
+ if (rhs.is_set()) base::vector_append(result_elements, rhs.set_elements());
+ if (result_elements.empty()) {
+ return OnlySpecialValues(special_values);
+ }
+ base::sort(result_elements);
+ auto it = std::unique(result_elements.begin(), result_elements.end());
+ result_elements.pop_back(std::distance(it, result_elements.end()));
+ if (result_elements.size() <= kMaxSetSize) {
+ return Set(result_elements, special_values, zone);
+ }
+ return Range(result_elements.front(), result_elements.back(),
+ special_values, zone);
+ } else if (lhs.is_only_special_values()) {
+ return ReplacedSpecialValues(rhs, special_values);
+ } else if (rhs.is_only_special_values()) {
+ return ReplacedSpecialValues(lhs, special_values);
+ }
+
+ // We need to construct a range.
+ float_t result_min = std::min(lhs.range_or_set_min(), rhs.range_or_set_min());
+ float_t result_max = std::max(lhs.range_or_set_max(), rhs.range_or_set_max());
+ return Range(result_min, result_max, special_values, zone);
+}
+
+template <size_t Bits>
+// static
+Type FloatType<Bits>::Intersect(const FloatType<Bits>& lhs,
+ const FloatType<Bits>& rhs, Zone* zone) {
+ const uint32_t special_values = lhs.special_values() & rhs.special_values();
+ if (lhs.is_any()) return ReplacedSpecialValues(rhs, special_values);
+ if (rhs.is_any()) return ReplacedSpecialValues(lhs, special_values);
+ if (lhs.is_only_special_values() || rhs.is_only_special_values()) {
+ return special_values ? OnlySpecialValues(special_values) : Type::None();
+ }
+
+ if (lhs.is_set() || rhs.is_set()) {
+ const auto& x = lhs.is_set() ? lhs : rhs;
+ const auto& y = lhs.is_set() ? rhs : lhs;
+ base::SmallVector<float_t, kMaxSetSize * 2> result_elements;
+ for (int i = 0; i < x.set_size(); ++i) {
+ const float_t element = x.set_element(i);
+ if (y.Contains(element)) result_elements.push_back(element);
+ }
+ if (result_elements.empty()) {
+ return special_values ? OnlySpecialValues(special_values) : Type::None();
+ }
+ return Set(result_elements, special_values, zone);
+ }
+
+ DCHECK(lhs.is_range() && rhs.is_range());
+ const float_t result_min = std::max(lhs.min(), rhs.min());
+ const float_t result_max = std::min(lhs.max(), rhs.max());
+ if (result_min < result_max) {
+ return Range(result_min, result_max, special_values, zone);
+ } else if (result_min == result_max) {
+ return Set({result_min}, special_values, zone);
+ }
+ return special_values ? OnlySpecialValues(special_values) : Type::None();
+}
+
+template <size_t Bits>
+void FloatType<Bits>::PrintTo(std::ostream& stream) const {
+ auto PrintSpecials = [this](auto& stream) {
+ if (has_nan()) {
+ stream << "NaN" << (has_minus_zero() ? "|MinusZero" : "");
+ } else {
+ DCHECK(has_minus_zero());
+ stream << "MinusZero";
+ }
+ };
+ stream << (Bits == 32 ? "Float32" : "Float64");
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ PrintSpecials(stream);
+ break;
+ case SubKind::kRange:
+ stream << "[" << range_min() << ", " << range_max() << "]";
+ if (has_special_values()) {
+ stream << "|";
+ PrintSpecials(stream);
+ }
+ break;
+ case SubKind::kSet:
+ stream << "{";
+ for (int i = 0; i < set_size(); ++i) {
+ if (i != 0) stream << ", ";
+ stream << set_element(i);
+ }
+ if (has_special_values()) {
+ stream << "}|";
+ PrintSpecials(stream);
+ } else {
+ stream << "}";
+ }
+ break;
+ }
+}
+
+template <size_t Bits>
+Handle<TurboshaftType> FloatType<Bits>::AllocateOnHeap(Factory* factory) const {
+ float_t min = 0.0f, max = 0.0f;
+ constexpr uint32_t padding = 0;
+ if (is_only_special_values()) {
+ min = std::numeric_limits<float_t>::infinity();
+ max = -std::numeric_limits<float_t>::infinity();
+ return factory->NewTurboshaftFloat64RangeType(
+ special_values(), padding, min, max, AllocationType::kYoung);
+ } else if (is_range()) {
+ std::tie(min, max) = minmax();
+ return factory->NewTurboshaftFloat64RangeType(
+ special_values(), padding, min, max, AllocationType::kYoung);
+ } else {
+ DCHECK(is_set());
+ auto result = factory->NewTurboshaftFloat64SetType(
+ special_values(), set_size(), AllocationType::kYoung);
+ for (int i = 0; i < set_size(); ++i) {
+ result->set_elements(i, set_element(i));
+ }
+ return result;
+ }
+}
+
+bool TupleType::Equals(const TupleType& other) const {
+ if (size() != other.size()) return false;
+ for (int i = 0; i < size(); ++i) {
+ if (!element(i).Equals(other.element(i))) return false;
+ }
+ return true;
+}
+
+bool TupleType::IsSubtypeOf(const TupleType& other) const {
+ if (size() != other.size()) return false;
+ for (int i = 0; i < size(); ++i) {
+ if (!element(i).IsSubtypeOf(other.element(i))) return false;
+ }
+ return true;
+}
+
+// static
+Type TupleType::LeastUpperBound(const TupleType& lhs, const TupleType& rhs,
+ Zone* zone) {
+ if (lhs.size() != rhs.size()) return Type::Any();
+ Payload p;
+ p.array = zone->NewArray<Type>(lhs.size());
+ for (int i = 0; i < lhs.size(); ++i) {
+ p.array[i] = Type::LeastUpperBound(lhs.element(i), rhs.element(i), zone);
+ }
+ return TupleType{static_cast<uint8_t>(lhs.size()), p};
+}
+
+void TupleType::PrintTo(std::ostream& stream) const {
+ stream << "(";
+ for (int i = 0; i < size(); ++i) {
+ if (i != 0) stream << ", ";
+ element(i).PrintTo(stream);
+ }
+ stream << ")";
+}
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) WordType<32>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) WordType<64>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) FloatType<32>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) FloatType<64>;
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/src/compiler/turboshaft/types.h b/deps/v8/src/compiler/turboshaft/types.h
new file mode 100644
index 0000000000..3a6f4d192b
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/types.h
@@ -0,0 +1,919 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_TYPES_H_
+#define V8_COMPILER_TURBOSHAFT_TYPES_H_
+
+#include <cmath>
+#include <limits>
+
+#include "src/base/container-utils.h"
+#include "src/base/export-template.h"
+#include "src/base/logging.h"
+#include "src/base/small-vector.h"
+#include "src/common/globals.h"
+#include "src/compiler/turboshaft/fast-hash.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/turboshaft-types.h"
+#include "src/utils/ostreams.h"
+#include "src/zone/zone-containers.h"
+
+#ifdef DEBUG
+#define TURBOSHAFT_TRACE_TYPING(...) \
+ do { \
+ if (V8_UNLIKELY(v8_flags.turboshaft_trace_typing)) { \
+ PrintF(__VA_ARGS__); \
+ } \
+ } while (false)
+
+#define TURBOSHAFT_TRACE_TYPING_WITH_COLOR(colorcode, str, ...) \
+ TURBOSHAFT_TRACE_TYPING( \
+ (v8_flags.log_colour ? ("\033[" colorcode "m" str "\033[m") : str), \
+ __VA_ARGS__)
+#define TURBOSHAFT_TRACE_TYPING_OK(str, ...) \
+ TURBOSHAFT_TRACE_TYPING_WITH_COLOR("32", str, __VA_ARGS__)
+#define TURBOSHAFT_TRACE_TYPING_FAIL(str, ...) \
+ TURBOSHAFT_TRACE_TYPING_WITH_COLOR("31", str, __VA_ARGS__)
+#else
+#define TURBOSHAFT_TRACE_TYPING(...) ((void)0)
+#define TURBOSHAFT_TRACE_TYPING_WITH_COLOR(colorcode, str, ...) ((void)0)
+#define TURBOSHAFT_TRACE_TYPING_OK(str, ...) ((void)0)
+#define TURBOSHAFT_TRACE_TYPING_FAIL(str, ...) ((void)0)
+#endif // DEBUG
+
+namespace v8::internal {
+class Factory;
+}
+
+namespace v8::internal::compiler::turboshaft {
+
+namespace detail {
+
+template <typename T>
+inline bool is_unique_and_sorted(const T& container) {
+ if (std::size(container) <= 1) return true;
+ auto cur = std::begin(container);
+ auto next = cur;
+ for (++next; next != std::end(container); ++cur, ++next) {
+ if (!(*cur < *next)) return false;
+ }
+ return true;
+}
+
+template <typename T>
+inline bool is_minus_zero(T value) {
+ return IsMinusZero(value);
+}
+
+template <typename T>
+inline bool is_float_special_value(T value) {
+ return std::isnan(value) || is_minus_zero(value);
+}
+
+template <size_t Bits>
+struct TypeForBits;
+template <>
+struct TypeForBits<32> {
+ using uint_type = uint32_t;
+ using float_type = float;
+ static constexpr float_type nan =
+ std::numeric_limits<float_type>::quiet_NaN();
+};
+template <>
+struct TypeForBits<64> {
+ using uint_type = uint64_t;
+ using float_type = double;
+ static constexpr float_type nan =
+ std::numeric_limits<float_type>::quiet_NaN();
+};
+
+// gcc versions < 9 may produce the following compilation error:
+// > '<anonymous>' is used uninitialized in this function
+// if Payload_Empty is initialized without any data, link to a relevant bug:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86465
+// A workaround is to add a dummy value which is zero initialized by default.
+// More information as well as a sample reproducible code can be found at the
+// comment section of this CL crrev.com/c/4057111
+// TODO(nicohartmann@): Remove dummy once all platforms are using gcc >= 9.
+struct Payload_Empty {
+ uint8_t dummy = 0;
+};
+
+template <typename T>
+struct Payload_Range {
+ T min;
+ T max;
+};
+
+template <typename T>
+struct Payload_InlineSet {
+ T elements[2];
+};
+
+template <typename T>
+struct Payload_OutlineSet {
+ T* array;
+};
+
+} // namespace detail
+
+template <typename T>
+std::enable_if_t<std::is_floating_point<T>::value, T> next_smaller(T v) {
+ DCHECK(!std::isnan(v));
+ DCHECK_LT(-std::numeric_limits<T>::infinity(), v);
+ return std::nextafter(v, -std::numeric_limits<T>::infinity());
+}
+
+template <typename T>
+std::enable_if_t<std::is_floating_point<T>::value, T> next_larger(T v) {
+ DCHECK(!std::isnan(v));
+ DCHECK_LT(v, std::numeric_limits<T>::infinity());
+ return std::nextafter(v, std::numeric_limits<T>::infinity());
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, T> next_smaller(T v) {
+ DCHECK_LT(std::numeric_limits<T>::min(), v);
+ return v - 1;
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, T> next_larger(T v) {
+ DCHECK_LT(v, std::numeric_limits<T>::max());
+ return v + 1;
+}
+
+template <size_t Bits>
+using uint_type = typename detail::TypeForBits<Bits>::uint_type;
+template <size_t Bits>
+using float_type = typename detail::TypeForBits<Bits>::float_type;
+template <size_t Bits>
+constexpr float_type<Bits> nan_v = detail::TypeForBits<Bits>::nan;
+
+template <size_t Bits>
+class WordType;
+template <size_t Bits>
+class FloatType;
+class TupleType;
+
+using Word32Type = WordType<32>;
+using Word64Type = WordType<64>;
+using Float32Type = FloatType<32>;
+using Float64Type = FloatType<64>;
+
+class V8_EXPORT_PRIVATE Type {
+ public:
+ enum class Kind : uint8_t {
+ kInvalid,
+ kNone,
+ kWord32,
+ kWord64,
+ kFloat32,
+ kFloat64,
+ kTuple,
+ kAny,
+ };
+
+ // Some operations cannot express the result precisely in a type, e.g. when an
+ // intersection with a wrapping range may produce to disconnect subranges,
+ // which cannot be represented. {ResolutionMode} allows to specify what the
+ // operation should do when the result cannot be represented precisely.
+ enum class ResolutionMode {
+ // Return Type::Invalid().
+ kPreciseOrInvalid,
+ // Return a safe over approximation.
+ kOverApproximate,
+ // Return the greatest lower bound that can be represented.
+ kGreatestLowerBound,
+ };
+
+ Type() : Type(Kind::kInvalid) {}
+
+ // Type constructors
+ static inline Type Invalid() { return Type(); }
+ static inline Type None() { return Type(Kind::kNone); }
+ static inline Type Any() { return Type(Kind::kAny); }
+
+ // Checks and casts
+ inline Kind kind() const { return kind_; }
+ inline bool IsInvalid() const { return kind_ == Kind::kInvalid; }
+ inline bool IsNone() const { return kind_ == Kind::kNone; }
+ inline bool IsWord32() const { return kind_ == Kind::kWord32; }
+ inline bool IsWord64() const { return kind_ == Kind::kWord64; }
+ inline bool IsFloat32() const { return kind_ == Kind::kFloat32; }
+ inline bool IsFloat64() const { return kind_ == Kind::kFloat64; }
+ inline bool IsTuple() const { return kind_ == Kind::kTuple; }
+ inline bool IsAny() const { return kind_ == Kind::kAny; }
+ template <size_t B>
+ inline bool IsWord() const {
+ if constexpr (B == 32)
+ return IsWord32();
+ else
+ return IsWord64();
+ }
+
+ // Casts
+ inline const Word32Type& AsWord32() const;
+ inline const Word64Type& AsWord64() const;
+ inline const Float32Type& AsFloat32() const;
+ inline const Float64Type& AsFloat64() const;
+ inline const TupleType& AsTuple() const;
+ template <size_t B>
+ inline const auto& AsWord() const {
+ if constexpr (B == 32)
+ return AsWord32();
+ else
+ return AsWord64();
+ }
+
+ // Comparison
+ bool Equals(const Type& other) const;
+ bool IsSubtypeOf(const Type& other) const;
+
+ // Printing
+ void PrintTo(std::ostream& stream) const;
+ void Print() const;
+ std::string ToString() const {
+ std::stringstream stream;
+ PrintTo(stream);
+ return stream.str();
+ }
+
+ // Other functions
+ static Type LeastUpperBound(const Type& lhs, const Type& rhs, Zone* zone);
+ static base::Optional<Type> ParseFromString(const std::string_view& str,
+ Zone* zone);
+ Handle<TurboshaftType> AllocateOnHeap(Factory* factory) const;
+
+ protected:
+ template <typename Payload>
+ Type(Kind kind, uint8_t sub_kind, uint8_t set_size, uint32_t bitfield,
+ uint8_t reserved, const Payload& payload)
+ : kind_(kind),
+ sub_kind_(sub_kind),
+ set_size_(set_size),
+ reserved_(reserved),
+ bitfield_(bitfield) {
+ static_assert(sizeof(Payload) <= sizeof(payload_));
+ memcpy(&payload_[0], &payload, sizeof(Payload));
+ if constexpr (sizeof(Payload) < sizeof(payload_)) {
+ memset(reinterpret_cast<uint8_t*>(&payload_[0]) + sizeof(Payload), 0x00,
+ sizeof(payload_) - sizeof(Payload));
+ }
+ }
+
+ template <typename Payload>
+ const Payload& get_payload() const {
+ static_assert(sizeof(Payload) <= sizeof(payload_));
+ return *reinterpret_cast<const Payload*>(&payload_[0]);
+ }
+
+ union {
+ struct {
+ Kind kind_;
+ uint8_t sub_kind_;
+ uint8_t set_size_;
+ uint8_t reserved_;
+ uint32_t bitfield_;
+ };
+ // {header_} can be used for faster hashing or comparison.
+ uint64_t header_;
+ };
+
+ private:
+ // Access through get_payload<>().
+ uint64_t payload_[2]; // Type specific data
+
+ friend struct fast_hash<Type>;
+ explicit Type(Kind kind) : Type(kind, 0, 0, 0, 0, detail::Payload_Empty{}) {
+ DCHECK(kind == Kind::kInvalid || kind == Kind::kNone || kind == Kind::kAny);
+ }
+};
+static_assert(sizeof(Type) == 24);
+
+template <size_t Bits>
+class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) WordType : public Type {
+ static_assert(Bits == 32 || Bits == 64);
+ friend class Type;
+ static constexpr int kMaxInlineSetSize = 2;
+
+ enum class SubKind : uint8_t {
+ kRange,
+ kSet,
+ };
+
+ public:
+ static constexpr int kMaxSetSize = 8;
+ using word_t = uint_type<Bits>;
+ using value_type = word_t;
+
+ // Constructors
+ static WordType Any() {
+ return Range(0, std::numeric_limits<word_t>::max(), nullptr);
+ }
+ static WordType Range(word_t from, word_t to, Zone* zone) {
+ // Normalize ranges smaller than {kMaxSetSize} to sets.
+ if (to >= from) {
+ // (to - from + 1) <= kMaxSetSize
+ if (to - from <= kMaxSetSize - 1) {
+ // Normalizing non-wrapping ranges to a Set.
+ base::SmallVector<word_t, kMaxSetSize> elements;
+ for (word_t i = from; i < to; ++i) elements.push_back(i);
+ elements.push_back(to);
+ return Set(elements, zone);
+ }
+ } else {
+ // (max - from + 1) + (to + 1) <= kMaxSetSize
+ if ((std::numeric_limits<word_t>::max() - from + to) <= kMaxSetSize - 2) {
+ // Normalizing wrapping ranges to a Set.
+ base::SmallVector<word_t, kMaxSetSize> elements;
+ for (word_t i = from; i < std::numeric_limits<word_t>::max(); ++i) {
+ elements.push_back(i);
+ }
+ elements.push_back(std::numeric_limits<word_t>::max());
+ for (word_t i = 0; i < to; ++i) elements.push_back(i);
+ elements.push_back(to);
+ base::sort(elements);
+ return Set(elements, zone);
+ }
+ }
+ return WordType{SubKind::kRange, 0, Payload_Range{from, to}};
+ }
+ template <size_t N>
+ static WordType Set(const base::SmallVector<word_t, N>& elements,
+ Zone* zone) {
+ return Set(base::Vector<const word_t>{elements.data(), elements.size()},
+ zone);
+ }
+ static WordType Set(const std::vector<word_t>& elements, Zone* zone) {
+ return Set(base::Vector<const word_t>{elements.data(), elements.size()},
+ zone);
+ }
+ static WordType Set(const std::initializer_list<word_t>& elements,
+ Zone* zone) {
+ return Set(base::Vector<const word_t>{elements.begin(), elements.size()},
+ zone);
+ }
+ static WordType Set(const base::Vector<const word_t>& elements, Zone* zone) {
+ DCHECK(detail::is_unique_and_sorted(elements));
+ DCHECK_IMPLIES(elements.size() > kMaxInlineSetSize, zone != nullptr);
+ DCHECK_GT(elements.size(), 0);
+ DCHECK_LE(elements.size(), kMaxSetSize);
+
+ if (elements.size() <= kMaxInlineSetSize) {
+ // Use inline storage.
+ Payload_InlineSet p;
+ DCHECK_LT(0, elements.size());
+ p.elements[0] = elements[0];
+ if (elements.size() > 1) p.elements[1] = elements[1];
+ return WordType{SubKind::kSet, static_cast<uint8_t>(elements.size()), p};
+ } else {
+ // Allocate storage in the zone.
+ Payload_OutlineSet p;
+ p.array = zone->NewArray<word_t>(elements.size());
+ DCHECK_NOT_NULL(p.array);
+ for (size_t i = 0; i < elements.size(); ++i) p.array[i] = elements[i];
+ return WordType{SubKind::kSet, static_cast<uint8_t>(elements.size()), p};
+ }
+ }
+ static WordType Constant(word_t constant) { return Set({constant}, nullptr); }
+
+ // Checks
+ bool is_range() const { return sub_kind() == SubKind::kRange; }
+ bool is_set() const { return sub_kind() == SubKind::kSet; }
+ bool is_any() const { return is_range() && range_to() + 1 == range_from(); }
+ bool is_constant() const {
+ DCHECK_EQ(set_size_ > 0, is_set());
+ return set_size_ == 1;
+ }
+ bool is_wrapping() const { return is_range() && range_from() > range_to(); }
+
+ // Accessors
+ word_t range_from() const {
+ DCHECK(is_range());
+ return get_payload<Payload_Range>().min;
+ }
+ word_t range_to() const {
+ DCHECK(is_range());
+ return get_payload<Payload_Range>().max;
+ }
+ std::pair<word_t, word_t> range() const {
+ DCHECK(is_range());
+ return {range_from(), range_to()};
+ }
+ int set_size() const {
+ DCHECK(is_set());
+ return static_cast<int>(set_size_);
+ }
+ word_t set_element(int index) const {
+ DCHECK(is_set());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, set_size());
+ return set_elements()[index];
+ }
+ base::Vector<const word_t> set_elements() const {
+ DCHECK(is_set());
+ if (set_size() <= kMaxInlineSetSize) {
+ return base::Vector<const word_t>(
+ get_payload<Payload_InlineSet>().elements, set_size());
+ } else {
+ return base::Vector<const word_t>(get_payload<Payload_OutlineSet>().array,
+ set_size());
+ }
+ }
+ base::Optional<word_t> try_get_constant() const {
+ if (!is_constant()) return base::nullopt;
+ DCHECK(is_set());
+ DCHECK_EQ(set_size(), 1);
+ return set_element(0);
+ }
+ bool is_constant(word_t value) const {
+ if (auto c = try_get_constant()) return *c == value;
+ return false;
+ }
+ word_t unsigned_min() const {
+ switch (sub_kind()) {
+ case SubKind::kRange:
+ return is_wrapping() ? word_t{0} : range_from();
+ case SubKind::kSet:
+ return set_element(0);
+ }
+ }
+ word_t unsigned_max() const {
+ switch (sub_kind()) {
+ case SubKind::kRange:
+ return is_wrapping() ? std::numeric_limits<word_t>::max() : range_to();
+ case SubKind::kSet:
+ DCHECK_GE(set_size(), 1);
+ return set_element(set_size() - 1);
+ }
+ }
+
+ // Misc
+ bool Contains(word_t value) const;
+ bool Equals(const WordType& other) const;
+ bool IsSubtypeOf(const WordType& other) const;
+ static WordType LeastUpperBound(const WordType& lhs, const WordType& rhs,
+ Zone* zone);
+ static Type Intersect(const WordType& lhs, const WordType& rhs,
+ ResolutionMode resolution_mode, Zone* zone);
+ void PrintTo(std::ostream& stream) const;
+ Handle<TurboshaftType> AllocateOnHeap(Factory* factory) const;
+
+ private:
+ static constexpr Kind KIND = Bits == 32 ? Kind::kWord32 : Kind::kWord64;
+ using Payload_Range = detail::Payload_Range<word_t>;
+ using Payload_InlineSet = detail::Payload_InlineSet<word_t>;
+ using Payload_OutlineSet = detail::Payload_OutlineSet<word_t>;
+
+ SubKind sub_kind() const { return static_cast<SubKind>(sub_kind_); }
+ template <typename Payload>
+ WordType(SubKind sub_kind, uint8_t set_size, const Payload& payload)
+ : Type(KIND, static_cast<uint8_t>(sub_kind), set_size, 0, 0, payload) {}
+};
+
+template <size_t Bits>
+class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
+ static_assert(Bits == 32 || Bits == 64);
+ friend class Type;
+ static constexpr int kMaxInlineSetSize = 2;
+
+ enum class SubKind : uint8_t {
+ kRange,
+ kSet,
+ kOnlySpecialValues,
+ };
+
+ public:
+ static constexpr int kMaxSetSize = 8;
+ using float_t = float_type<Bits>;
+ using value_type = float_t;
+
+ enum Special : uint32_t {
+ kNoSpecialValues = 0x0,
+ kNaN = 0x1,
+ kMinusZero = 0x2,
+ };
+
+ // Constructors
+ static FloatType OnlySpecialValues(uint32_t special_values) {
+ DCHECK_NE(0, special_values);
+ return FloatType{SubKind::kOnlySpecialValues, 0, special_values,
+ Payload_OnlySpecial{}};
+ }
+ static FloatType NaN() {
+ return FloatType{SubKind::kOnlySpecialValues, 0, Special::kNaN,
+ Payload_OnlySpecial{}};
+ }
+ static FloatType MinusZero() {
+ return FloatType{SubKind::kOnlySpecialValues, 0, Special::kMinusZero,
+ Payload_OnlySpecial{}};
+ }
+ static FloatType Any(uint32_t special_values = Special::kNaN |
+ Special::kMinusZero) {
+ return FloatType::Range(-std::numeric_limits<float_t>::infinity(),
+ std::numeric_limits<float_t>::infinity(),
+ special_values, nullptr);
+ }
+ static FloatType Range(float_t min, float_t max, Zone* zone) {
+ return Range(min, max, Special::kNoSpecialValues, zone);
+ }
+ static FloatType Range(float_t min, float_t max, uint32_t special_values,
+ Zone* zone) {
+ special_values |= IdentifyMinusZero(min);
+ special_values |= IdentifyMinusZero(max);
+ DCHECK(!detail::is_float_special_value(min));
+ DCHECK(!detail::is_float_special_value(max));
+ DCHECK_LE(min, max);
+ if (min == max) return Set({min}, zone);
+ return FloatType{SubKind::kRange, 0, special_values,
+ Payload_Range{min, max}};
+ }
+ template <size_t N>
+ static FloatType Set(const base::SmallVector<const float_t, N>& elements,
+ Zone* zone) {
+ return Set(elements, Special::kNoSpecialValues, zone);
+ }
+ template <size_t N>
+ static FloatType Set(const base::SmallVector<float_t, N>& elements,
+ uint32_t special_values, Zone* zone) {
+ return Set(base::Vector<const float_t>{elements.data(), elements.size()},
+ special_values, zone);
+ }
+ static FloatType Set(const std::initializer_list<float_t>& elements,
+ uint32_t special_values, Zone* zone) {
+ return Set(base::Vector<const float_t>{elements.begin(), elements.size()},
+ special_values, zone);
+ }
+ static FloatType Set(const std::vector<float_t>& elements, Zone* zone) {
+ return Set(elements, Special::kNoSpecialValues, zone);
+ }
+ static FloatType Set(const std::vector<float_t>& elements,
+ uint32_t special_values, Zone* zone) {
+ return Set(base::Vector<const float_t>{elements.data(), elements.size()},
+ special_values, zone);
+ }
+ static FloatType Set(const base::Vector<const float_t>& elements,
+ uint32_t special_values, Zone* zone) {
+ DCHECK(detail::is_unique_and_sorted(elements));
+ // NaN should be passed via {special_values} rather than {elements}.
+ DCHECK(base::none_of(elements, [](float_t f) { return std::isnan(f); }));
+ DCHECK_IMPLIES(elements.size() > kMaxInlineSetSize, zone != nullptr);
+ DCHECK_GT(elements.size(), 0);
+ DCHECK_LE(elements.size(), kMaxSetSize);
+
+ if (elements.size() <= kMaxInlineSetSize) {
+ // Use inline storage.
+ Payload_InlineSet p;
+ DCHECK_LT(0, elements.size());
+ p.elements[0] = elements[0];
+ special_values |= IdentifyMinusZero(p.elements[0]);
+ if (elements.size() > 1) {
+ p.elements[1] = elements[1];
+ special_values |= IdentifyMinusZero(p.elements[1]);
+ }
+ return FloatType{SubKind::kSet, static_cast<uint8_t>(elements.size()),
+ special_values, p};
+ } else {
+ // Allocate storage in the zone.
+ Payload_OutlineSet p;
+ p.array = zone->NewArray<float_t>(elements.size());
+ DCHECK_NOT_NULL(p.array);
+ for (size_t i = 0; i < elements.size(); ++i) {
+ p.array[i] = elements[i];
+ special_values |= IdentifyMinusZero(p.array[i]);
+ }
+ return FloatType{SubKind::kSet, static_cast<uint8_t>(elements.size()),
+ special_values, p};
+ }
+ }
+ static FloatType Constant(float_t constant) {
+ return Set({constant}, 0, nullptr);
+ }
+
+ // Checks
+ bool is_only_special_values() const {
+ return sub_kind() == SubKind::kOnlySpecialValues;
+ }
+ bool is_only_nan() const {
+ return is_only_special_values() && (special_values() == Special::kNaN);
+ }
+ bool is_only_minus_zero() const {
+ return is_only_special_values() &&
+ (special_values() == Special::kMinusZero);
+ }
+ bool is_range() const { return sub_kind() == SubKind::kRange; }
+ bool is_set() const { return sub_kind() == SubKind::kSet; }
+ bool is_any() const {
+ return is_range() &&
+ range_min() == -std::numeric_limits<float_t>::infinity() &&
+ range_max() == std::numeric_limits<float_t>::infinity();
+ }
+ bool is_constant() const {
+ DCHECK_EQ(set_size_ > 0, is_set());
+ return set_size_ == 1 && !has_special_values();
+ }
+ uint32_t special_values() const { return bitfield_; }
+ bool has_special_values() const { return special_values() != 0; }
+ bool has_nan() const { return (special_values() & Special::kNaN) != 0; }
+ bool has_minus_zero() const {
+ return (special_values() & Special::kMinusZero) != 0;
+ }
+
+ // Accessors
+ float_t range_min() const {
+ DCHECK(is_range());
+ return get_payload<Payload_Range>().min;
+ }
+ float_t range_max() const {
+ DCHECK(is_range());
+ return get_payload<Payload_Range>().max;
+ }
+ std::pair<float_t, float_t> range() const {
+ DCHECK(is_range());
+ return {range_min(), range_max()};
+ }
+ int set_size() const {
+ DCHECK(is_set());
+ return static_cast<int>(set_size_);
+ }
+ float_t set_element(int index) const {
+ DCHECK(is_set());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, set_size());
+ return set_elements()[index];
+ }
+ base::Vector<const float_t> set_elements() const {
+ DCHECK(is_set());
+ if (set_size() <= kMaxInlineSetSize) {
+ return base::Vector<const float_t>(
+ get_payload<Payload_InlineSet>().elements, set_size());
+ } else {
+ return base::Vector<const float_t>(
+ get_payload<Payload_OutlineSet>().array, set_size());
+ }
+ }
+ float_t min() const {
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ if (has_minus_zero()) return float_t{-0.0};
+ DCHECK(is_only_nan());
+ return nan_v<Bits>;
+ case SubKind::kRange:
+ if (has_minus_zero()) return std::min(float_t{-0.0}, range_min());
+ return range_min();
+ case SubKind::kSet:
+ if (has_minus_zero()) return std::min(float_t{-0.0}, set_element(0));
+ return set_element(0);
+ }
+ }
+ float_t max() const {
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ if (has_minus_zero()) return float_t{-0.0};
+ DCHECK(is_only_nan());
+ return nan_v<Bits>;
+ case SubKind::kRange:
+ if (has_minus_zero()) return std::max(float_t{-0.0}, range_max());
+ return range_max();
+ case SubKind::kSet:
+ if (has_minus_zero()) {
+ return std::max(float_t{-0.0}, set_element(set_size() - 1));
+ }
+ return set_element(set_size() - 1);
+ }
+ }
+ std::pair<float_t, float_t> minmax() const { return {min(), max()}; }
+ base::Optional<float_t> try_get_constant() const {
+ if (!is_constant()) return base::nullopt;
+ DCHECK(is_set());
+ DCHECK_EQ(set_size(), 1);
+ return set_element(0);
+ }
+ bool is_constant(float_t value) const {
+ if (V8_UNLIKELY(std::isnan(value))) return is_only_nan();
+ if (V8_UNLIKELY(IsMinusZero(value))) return is_only_minus_zero();
+ if (auto c = try_get_constant()) return *c == value;
+ return false;
+ }
+ // Returns the minimium value of a range or set, ignoring any special values
+ // (in contrast to min() above).
+ float_t range_or_set_min() const {
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ UNREACHABLE();
+ case SubKind::kRange:
+ return range_min();
+ case SubKind::kSet:
+ return set_element(0);
+ }
+ }
+ // Returns the maximum value of a range or set, ignoring any special values
+ // (in contrast to max() above).
+ float_t range_or_set_max() const {
+ switch (sub_kind()) {
+ case SubKind::kOnlySpecialValues:
+ UNREACHABLE();
+ case SubKind::kRange:
+ return range_max();
+ case SubKind::kSet:
+ return set_element(set_size() - 1);
+ }
+ }
+ std::pair<float_t, float_t> range_or_set_minmax() const {
+ return {range_or_set_min(), range_or_set_max()};
+ }
+
+ // Misc
+ bool Contains(float_t value) const;
+ bool Equals(const FloatType& other) const;
+ bool IsSubtypeOf(const FloatType& other) const;
+ static FloatType LeastUpperBound(const FloatType& lhs, const FloatType& rhs,
+ Zone* zone);
+ static Type Intersect(const FloatType& lhs, const FloatType& rhs, Zone* zone);
+ void PrintTo(std::ostream& stream) const;
+ Handle<TurboshaftType> AllocateOnHeap(Factory* factory) const;
+
+ private:
+ // This helper turns a -0 into a 0 in {value} and returns the
+ // Special::kMinusZero flag in that case. Otherwise the {value} is unchanged
+ // and Special::kNoSpecialValues is returned.
+ static uint32_t IdentifyMinusZero(float_t& value) {
+ if (V8_UNLIKELY(detail::is_minus_zero(value))) {
+ value = float_t{0};
+ return Special::kMinusZero;
+ }
+ return Special::kNoSpecialValues;
+ }
+ static FloatType ReplacedSpecialValues(const FloatType& t,
+ uint32_t special_values) {
+ auto result = t;
+ result.bitfield_ = special_values;
+ DCHECK_EQ(result.bitfield_, result.special_values());
+ return result;
+ }
+
+ static constexpr Kind KIND = Bits == 32 ? Kind::kFloat32 : Kind::kFloat64;
+ SubKind sub_kind() const { return static_cast<SubKind>(sub_kind_); }
+ using Payload_Range = detail::Payload_Range<float_t>;
+ using Payload_InlineSet = detail::Payload_InlineSet<float_t>;
+ using Payload_OutlineSet = detail::Payload_OutlineSet<float_t>;
+ using Payload_OnlySpecial = detail::Payload_Empty;
+
+ template <typename Payload>
+ FloatType(SubKind sub_kind, uint8_t set_size, uint32_t special_values,
+ const Payload& payload)
+ : Type(KIND, static_cast<uint8_t>(sub_kind), set_size, special_values, 0,
+ payload) {
+ DCHECK_EQ(special_values & ~(Special::kNaN | Special::kMinusZero), 0);
+ }
+};
+
+class TupleType : public Type {
+ public:
+ static constexpr int kMaxTupleSize = std::numeric_limits<uint8_t>::max();
+
+ // Constructors
+ static TupleType Tuple(const Type& element0, const Type& element1,
+ Zone* zone) {
+ Payload p;
+ p.array = zone->NewArray<Type>(2);
+ DCHECK_NOT_NULL(p.array);
+ p.array[0] = element0;
+ p.array[1] = element1;
+ return TupleType{2, p};
+ }
+
+ static TupleType Tuple(const base::Vector<Type>& elements, Zone* zone) {
+ DCHECK_LE(elements.size(), kMaxTupleSize);
+ Payload p;
+ p.array = zone->NewArray<Type>(elements.size());
+ DCHECK_NOT_NULL(p.array);
+ for (size_t i = 0; i < elements.size(); ++i) {
+ p.array[i] = elements[i];
+ }
+ return TupleType{static_cast<uint8_t>(elements.size()), p};
+ }
+
+ // Accessors
+ int size() const { return static_cast<int>(set_size_); }
+ const Type& element(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, size());
+ return get_payload<Payload>().array[index];
+ }
+ base::Vector<Type> elements() const {
+ return base::Vector<Type>{get_payload<Payload>().array,
+ static_cast<size_t>(size())};
+ }
+
+ // Misc
+ bool Equals(const TupleType& other) const;
+ bool IsSubtypeOf(const TupleType& other) const;
+ static Type LeastUpperBound(const TupleType& lhs, const TupleType& rhs,
+ Zone* zone);
+ void PrintTo(std::ostream& stream) const;
+
+ private:
+ static constexpr Kind KIND = Kind::kTuple;
+ using Payload = detail::Payload_OutlineSet<Type>;
+
+ TupleType(uint8_t tuple_size, const Payload& payload)
+ : Type(KIND, 0, tuple_size, 0, 0, payload) {}
+};
+
+const Word32Type& Type::AsWord32() const {
+ DCHECK(IsWord32());
+ return *static_cast<const Word32Type*>(this);
+}
+
+const Word64Type& Type::AsWord64() const {
+ DCHECK(IsWord64());
+ return *static_cast<const Word64Type*>(this);
+}
+
+const Float32Type& Type::AsFloat32() const {
+ DCHECK(IsFloat32());
+ return *static_cast<const Float32Type*>(this);
+}
+
+const Float64Type& Type::AsFloat64() const {
+ DCHECK(IsFloat64());
+ return *static_cast<const Float64Type*>(this);
+}
+
+const TupleType& Type::AsTuple() const {
+ DCHECK(IsTuple());
+ return *static_cast<const TupleType*>(this);
+}
+
+inline std::ostream& operator<<(std::ostream& stream, Type::Kind kind) {
+ switch (kind) {
+ case Type::Kind::kInvalid:
+ return stream << "Invalid";
+ case Type::Kind::kNone:
+ return stream << "None";
+ case Type::Kind::kWord32:
+ return stream << "Word32";
+ case Type::Kind::kWord64:
+ return stream << "Word64";
+ case Type::Kind::kFloat32:
+ return stream << "Float32";
+ case Type::Kind::kFloat64:
+ return stream << "Float64";
+ case Type::Kind::kTuple:
+ return stream << "Tuple";
+ case Type::Kind::kAny:
+ return stream << "Any";
+ }
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const Type& type) {
+ type.PrintTo(stream);
+ return stream;
+}
+
+inline bool operator==(const Type& lhs, const Type& rhs) {
+ return lhs.Equals(rhs);
+}
+
+template <>
+struct fast_hash<Type> {
+ size_t operator()(const Type& v) const {
+ // TODO(nicohartmann@): Fix fast_hash for outline payload once this is
+ // required.
+ UNREACHABLE();
+ // return fast_hash_combine(v.header_, v.payload_[0], v.payload_[1]);
+ }
+};
+
+// The below exports of the explicitly instantiated template instances produce
+// build errors on v8_linux64_gcc_light_compile_dbg build with
+//
+// error: type attributes ignored after type is already defined
+// [-Werror=attributes] extern template class
+// EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) WordType<32>;
+//
+// No combination of export macros seems to be able to resolve this issue
+// although they seem to work for other classes. A temporary workaround is to
+// disable this warning here locally.
+// TODO(nicohartmann@): Ideally, we would find a better solution than to disable
+// the warning.
+#if V8_CC_GNU
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wattributes"
+#endif // V8_CC_GNU
+
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) WordType<32>;
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) WordType<64>;
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType<32>;
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType<64>;
+
+#if V8_CC_GNU
+#pragma GCC diagnostic pop
+#endif // V8_CC_GNU
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_TYPES_H_
diff --git a/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc b/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc
new file mode 100644
index 0000000000..e133776754
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/undef-assembler-macros.inc
@@ -0,0 +1,40 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+// This file undefines Turboshaft's assembler macros. Include this file after
+// your reducer and don't forget to include 'define-assembler-macros.inc' before.
+
+#ifndef V8_COMPILER_TURBOSHAFT_ASSEMBLER_MACROS_DEFINED
+#error \
+ "Assembler macros not defined. Did you forget to #include \"define-assembler-macros.inc\" in this file?"
+#endif
+
+#undef __
+
+#undef BIND
+#undef ELSE
+#undef ELSE_IF
+#undef ELSE_IF_LIKELY
+#undef ELSE_IF_UNLIKELY
+#undef ELSE_IF_WITH_HINT
+#undef END_IF
+#undef GOTO
+#undef GOTO_IF
+#undef GOTO_IF_LIKELY
+#undef GOTO_IF_NOT
+#undef GOTO_IF_NOT_LIKELY
+#undef GOTO_IF_NOT_UNLIKELY
+#undef GOTO_IF_UNLIKELY
+#undef IF
+#undef IF_LIKELY
+#undef IF_UNLIKELY
+#undef IF_WITH_HINT
+#undef IF_NOT
+#undef IF_NOT_LIKELY
+#undef IF_NOT_UNLIKELY
+#undef LOOP
+
+#undef V8_COMPILER_TURBOSHAFT_ASSEMBLER_MACROS_DEFINED
diff --git a/deps/v8/src/compiler/turboshaft/uniform-reducer-adapter.h b/deps/v8/src/compiler/turboshaft/uniform-reducer-adapter.h
new file mode 100644
index 0000000000..10047bba16
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/uniform-reducer-adapter.h
@@ -0,0 +1,160 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_UNIFORM_REDUCER_ADAPTER_H_
+#define V8_COMPILER_TURBOSHAFT_UNIFORM_REDUCER_ADAPTER_H_
+
+#include "src/compiler/turboshaft/operations.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// UniformReducerAdapter allows to handle all operations uniformly during a
+// reduction by wiring all ReduceInputGraphXyz and ReduceXyz calls through
+// a single ReduceInputGraphOperation and ReduceOperation, respectively.
+//
+// This is how to use the adapter with your reducer MyReducer, which can then
+// be used in a ReducerStack like any other reducer):
+//
+// template <typename Next>
+// class MyReducer : public UniformReducerAdapter<MyReducer, Next> {
+// public:
+// TURBOSHAFT_REDUCER_BOILERPLATE()
+// using Adapter = UniformReducerAdapter<MyReducer, Next>;
+//
+// template <typename... Args>
+// explicit MyReducer(const std::tuple<Args...>& args)
+// : Adapter(args) { /* ... */ }
+//
+// OpIndex ReduceInputGraphConstant(OpIndex ig_index, const ConstantOp& op) {
+// /* Handle ConstantOps separately */
+// /* ... */
+//
+// /* Call Adapter::ReduceInputGraphConstant(index, op) to also run */
+// /* through the generic handling in ReduceInputGraphOperation */
+// return Next::ReduceInputGraphConstant(index, op);
+// }
+//
+// template <typename Op, typename Continuation>
+// OpIndex ReduceInputGraphOperation(OpIndex ig_index, const Op& op) {
+// /* Handle all (other) operations uniformly */
+// /* ... */
+//
+// /* Forward to next reducer using the Continuation object */
+// return Continuation{this}.ReduceInputGraph(ig_index, op);
+// }
+//
+// OpIndex ReduceConstant(ConstantOp::Kind kind, ConstantOp::Storage st) {
+// /* Handle Constants separately */
+// /* ... */
+//
+// /* Call Adapter::ReduceConstant(kind, st) to also run through the */
+// /* generic handling in ReduceOperation */
+// return Next::ReduceConstant(kind, st);
+// }
+//
+// template <Opcode opcode, typename Continuation, typename... Args>
+// OpIndex ReduceOperation(Args... args) {
+// /* Handle all (other) operations uniformly */
+// /* ... */
+//
+// /* Forward to next reducer using the Continuation object */
+// return Continuation{this}.Reduce(args...);
+// }
+//
+// private:
+// /* ... */
+// };
+//
+// NOTICE: Inside the ReduceOperation and ReduceInputGraphOperation callbacks,
+// you need to make a choice:
+//
+// A) Call Next::ReduceXyz (or Next::ReduceInputGraphXyz) to forward to the
+// next reducer in the stack. Then the uniform ReduceOperation (and
+// ReduceInputGraphOperation) of the current reducer is not visited for
+// OperationXyz.
+// B) Call Adapter::ReduceXyz (or Adapter::ReduceInputGraphXyz) to forward to
+// the uniform ReduceOperation (and ReduceInputGraphOperation) such that
+// OperationXyz is also processed by those (in addition to the special
+// handling in ReduceXyz and ReduceInputGraphXyz).
+//
+// For the above MyReducer, consider this OptimizationPhase<R1, MyReducer, R2>.
+// Then the ReduceInputGraph (RIG) and Reduce (R) implementations are visited as
+// follows for Operations OpA and OpB (and all other operations that are not
+// ConstantOp), when all reducers just forward to Next. For ConstantOp, the
+// reduction is equivalent to any "normal" reducer that does not use a
+// UniformReducerAdapter.
+//
+//
+// InputGraph OpA OpB ____________________________
+// | | | ___ |
+// | | | | | |
+// v v | | v v
+// R1 RIGOpA RIGOpB | | ROpA ROpB
+// | __ __ | | | | ___ ___ |
+// | | | | | | | | | | | | | |
+// | | v v | | | | | | v v | |
+// MyReducer | | RIGOperation | | | | | | ROperation | |
+// v | v | | | | v | v | v
+// (Adapter) RIGOpA | Continuation | RIGOpB | | ROpA | Continuation | ROpB
+// |____| | | |___| | | |___| | | |___|
+// | | | | | |
+// _______| |______ | | ______| |______
+// | | | | | |
+// | | | | | |
+// v v | | v v
+// R2 RIGOpA RIGOpB | | ROpA ROpB
+// | |_____| | | |
+// |_______________________________| | |
+// v v
+// OutputGraph OpA OpB
+//
+//
+template <template <typename> typename Reducer, typename Next>
+class UniformReducerAdapter : public Next {
+ public:
+ template <typename... Args>
+ explicit UniformReducerAdapter(const std::tuple<Args...>& args)
+ : Next(args) {}
+
+ template <Opcode opcode, typename Continuation, typename... Args>
+ OpIndex ReduceOperation(Args... args) {
+ return Continuation{this}.Reduce(args...);
+ }
+
+ template <typename Op, typename Continuation>
+ OpIndex ReduceInputGraphOperation(OpIndex ig_index, const Op& operation) {
+ return Continuation{this}.ReduceInputGraph(ig_index, operation);
+ }
+
+#define REDUCE(op) \
+ struct Reduce##op##Continuation final { \
+ explicit Reduce##op##Continuation(Next* _this) : this_(_this) {} \
+ OpIndex ReduceInputGraph(OpIndex ig_index, const op##Op& operation) { \
+ return this_->ReduceInputGraph##op(ig_index, operation); \
+ } \
+ template <typename... Args> \
+ OpIndex Reduce(Args... args) const { \
+ return this_->Reduce##op(args...); \
+ } \
+ Next* this_; \
+ }; \
+ OpIndex ReduceInputGraph##op(OpIndex ig_index, const op##Op& operation) { \
+ return static_cast<Reducer<Next>*>(this) \
+ ->template ReduceInputGraphOperation<op##Op, \
+ Reduce##op##Continuation>( \
+ ig_index, operation); \
+ } \
+ template <typename... Args> \
+ OpIndex Reduce##op(Args... args) { \
+ return static_cast<Reducer<Next>*>(this) \
+ ->template ReduceOperation<Opcode::k##op, Reduce##op##Continuation>( \
+ args...); \
+ }
+ TURBOSHAFT_OPERATION_LIST(REDUCE)
+#undef REDUCE
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_UNIFORM_REDUCER_ADAPTER_H_
diff --git a/deps/v8/src/compiler/turboshaft/utils.h b/deps/v8/src/compiler/turboshaft/utils.h
index 5ef2a67ee8..62711415b6 100644
--- a/deps/v8/src/compiler/turboshaft/utils.h
+++ b/deps/v8/src/compiler/turboshaft/utils.h
@@ -82,6 +82,25 @@ bool ShouldSkipOptimizationStep();
inline bool ShouldSkipOptimizationStep() { return false; }
#endif
+// Set `*ptr` to `new_value` while the scope is active, reset to the previous
+// value upon destruction.
+template <class T>
+class ScopedModification {
+ public:
+ ScopedModification(T* ptr, T new_value)
+ : ptr_(ptr), old_value_(std::move(*ptr)) {
+ *ptr = std::move(new_value);
+ }
+
+ ~ScopedModification() { *ptr_ = std::move(old_value_); }
+
+ const T& old_value() const { return old_value_; }
+
+ private:
+ T* ptr_;
+ T old_value_;
+};
+
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_UTILS_H_
diff --git a/deps/v8/src/compiler/turboshaft/value-numbering-reducer.h b/deps/v8/src/compiler/turboshaft/value-numbering-reducer.h
index c21c8fdec0..f2667f8e80 100644
--- a/deps/v8/src/compiler/turboshaft/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/turboshaft/value-numbering-reducer.h
@@ -71,9 +71,13 @@ namespace turboshaft {
template <class Next>
class ValueNumberingReducer : public Next {
public:
- using Next::Asm;
- ValueNumberingReducer()
- : dominator_path_(Asm().phase_zone()), depths_heads_(Asm().phase_zone()) {
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ template <class... Args>
+ explicit ValueNumberingReducer(const std::tuple<Args...>& args)
+ : Next(args),
+ dominator_path_(Asm().phase_zone()),
+ depths_heads_(Asm().phase_zone()) {
table_ = Asm().phase_zone()->template NewVector<Entry>(
base::bits::RoundUpToPowerOfTwo(
std::max<size_t>(128, Asm().input_graph().op_id_capacity() / 2)),
@@ -94,8 +98,8 @@ class ValueNumberingReducer : public Next {
TURBOSHAFT_OPERATION_LIST(EMIT_OP)
#undef EMIT_OP
- void Bind(Block* block, const Block* origin = nullptr) {
- Next::Bind(block, origin);
+ void Bind(Block* block) {
+ Next::Bind(block);
ResetToBlock(block);
dominator_path_.push_back(block);
depths_heads_.push_back(nullptr);
@@ -157,7 +161,7 @@ class ValueNumberingReducer : public Next {
(!same_block_only ||
entry.block == Asm().current_block()->index()) &&
entry_op.Cast<Op>() == op) {
- Asm().output_graph().RemoveLast();
+ Next::RemoveLast(op_idx);
return entry.value;
}
}
diff --git a/deps/v8/src/compiler/turboshaft/variable-reducer.h b/deps/v8/src/compiler/turboshaft/variable-reducer.h
new file mode 100644
index 0000000000..a5b2620750
--- /dev/null
+++ b/deps/v8/src/compiler/turboshaft/variable-reducer.h
@@ -0,0 +1,308 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TURBOSHAFT_VARIABLE_REDUCER_H_
+#define V8_COMPILER_TURBOSHAFT_VARIABLE_REDUCER_H_
+
+#include <algorithm>
+
+#include "src/base/logging.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/turboshaft/assembler.h"
+#include "src/compiler/turboshaft/graph.h"
+#include "src/compiler/turboshaft/operations.h"
+#include "src/compiler/turboshaft/representations.h"
+#include "src/compiler/turboshaft/snapshot-table.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+// When cloning a Block or duplicating an Operation, we end up with some
+// Operations of the old graph mapping to multiple Operations in the new graph.
+// When using those Operations in subsequent Operations, we need to know which
+// of the new-Operation to use, and, in particular, if a Block has 2
+// predecessors that have a mapping for the same old-Operation, we need to
+// merge them in a Phi node. All of this is handled by the VariableAssembler.
+//
+// The typical workflow when working with the VariableAssembler would be:
+// - At some point, you need to introduce a Variable (for instance
+// because you cloned a block or an Operation) and call NewFreshVariable to
+// get a fresh Variable.
+// - You can then Set the new-OpIndex associated with this Variable in the
+// current Block with the Set method.
+// - If you later need to set an OpIndex for this Variable in another Block,
+// call Set again.
+// - At any time, you can call Get to get the new-Operation associated to
+// this Variable. Get will return:
+// * if the current block is dominated by a block who did a Set on the
+// Variable, then the Operation that was Set then.
+// * otherwise, the current block must be dominated by a Merge whose
+// predecessors have all Set this Variable. In that case, the
+// VariableAssembler introduced a Phi in this merge, and will return
+// this Phi.
+//
+// Note that the VariableAssembler does not do "old-OpIndex => Variable"
+// book-keeping: the users of the Variable should do that themselves (which
+// is what OptimizationPhase does for instance).
+
+using Variable =
+ SnapshotTable<OpIndex, base::Optional<RegisterRepresentation>>::Key;
+
+template <class Next>
+class VariableReducer : public Next {
+ using Snapshot =
+ SnapshotTable<OpIndex, base::Optional<RegisterRepresentation>>::Snapshot;
+
+ public:
+ TURBOSHAFT_REDUCER_BOILERPLATE()
+
+ template <class... Args>
+ explicit VariableReducer(const std::tuple<Args...>& args)
+ : Next(args),
+ table_(Asm().phase_zone()),
+ block_to_snapshot_mapping_(Asm().input_graph().block_count(),
+ base::nullopt, Asm().phase_zone()),
+ predecessors_(Asm().phase_zone()) {}
+
+ void Bind(Block* new_block) {
+ Next::Bind(new_block);
+
+ SealAndSave();
+
+ predecessors_.clear();
+ for (const Block* pred = new_block->LastPredecessor(); pred != nullptr;
+ pred = pred->NeighboringPredecessor()) {
+ DCHECK_LT(pred->index().id(), block_to_snapshot_mapping_.size());
+ base::Optional<Snapshot> pred_snapshot =
+ block_to_snapshot_mapping_[pred->index().id()];
+ DCHECK(pred_snapshot.has_value());
+ predecessors_.push_back(pred_snapshot.value());
+ }
+ std::reverse(predecessors_.begin(), predecessors_.end());
+
+ auto merge_variables = [&](Variable var,
+ base::Vector<OpIndex> predecessors) -> OpIndex {
+ ConstantOp* first_constant = nullptr;
+ if (predecessors[0].valid()) {
+ first_constant = Asm()
+ .output_graph()
+ .Get(predecessors[0])
+ .template TryCast<ConstantOp>();
+ }
+ bool all_are_same_constant = first_constant != nullptr;
+
+ for (OpIndex idx : predecessors) {
+ if (!idx.valid()) {
+ // If any of the predecessors' value is Invalid, then we shouldn't
+ // merge {var}.
+ return OpIndex::Invalid();
+ }
+ if (all_are_same_constant) {
+ if (ConstantOp* other_constant =
+ Asm()
+ .output_graph()
+ .Get(idx)
+ .template TryCast<ConstantOp>()) {
+ all_are_same_constant = *first_constant == *other_constant;
+ } else {
+ all_are_same_constant = false;
+ }
+ }
+ }
+
+ if (all_are_same_constant) {
+ // If all of the predecessors are the same Constant, then we re-emit
+ // this Constant rather than emitting a Phi. This is a good idea in
+ // general, but is in particular needed for Constant that are used as
+ // call target: if they were merged into a Phi, this would result in an
+ // indirect call rather than a direct one, which:
+ // - is probably slower than a direct call in general
+ // - is probably not supported for builtins on 32-bit architectures.
+ return Asm().ReduceConstant(first_constant->kind,
+ first_constant->storage);
+ }
+ return MergeOpIndices(predecessors, var.data());
+ };
+
+ table_.StartNewSnapshot(base::VectorOf(predecessors_), merge_variables);
+ current_block_ = new_block;
+ }
+
+ OpIndex Get(Variable var) { return table_.Get(var); }
+
+ OpIndex GetPredecessorValue(Variable var, int predecessor_index) {
+ return table_.GetPredecessorValue(var, predecessor_index);
+ }
+
+ void Set(Variable var, OpIndex new_index) {
+ if (V8_UNLIKELY(Asm().generating_unreachable_operations())) return;
+ table_.Set(var, new_index);
+ }
+ template <typename Rep>
+ void Set(Variable var, V<Rep> value) {
+ if (V8_UNLIKELY(Asm().generating_unreachable_operations())) return;
+ DCHECK(Rep::allows_representation(*var.data()));
+ table_.Set(var, value);
+ }
+
+ Variable NewFreshVariable(base::Optional<RegisterRepresentation> rep) {
+ return table_.NewKey(rep, OpIndex::Invalid());
+ }
+
+ private:
+ // SealAndSave seals the current snapshot, and stores it in
+ // {block_to_snapshot_mapping_}, so that it can be used for later merging.
+ void SealAndSave() {
+ if (table_.IsSealed()) {
+ DCHECK_EQ(current_block_, nullptr);
+ return;
+ }
+
+ DCHECK_NOT_NULL(current_block_);
+ Snapshot snapshot = table_.Seal();
+
+ DCHECK(current_block_->index().valid());
+ size_t id = current_block_->index().id();
+ if (id >= block_to_snapshot_mapping_.size()) {
+ // The table initially contains as many entries as blocks in the input
+ // graphs. In most cases, the number of blocks between input and ouput
+ // graphs shouldn't grow too much, so a growth factor of 1.5 should be
+ // reasonable.
+ static constexpr double kGrowthFactor = 1.5;
+ size_t new_size = std::max<size_t>(
+ id, kGrowthFactor * block_to_snapshot_mapping_.size());
+ block_to_snapshot_mapping_.resize(new_size);
+ }
+
+ block_to_snapshot_mapping_[id] = snapshot;
+ current_block_ = nullptr;
+ }
+
+ OpIndex MergeOpIndices(base::Vector<OpIndex> inputs,
+ base::Optional<RegisterRepresentation> maybe_rep) {
+ if (maybe_rep.has_value()) {
+ // Every Operation that has a RegisterRepresentation can be merged with a
+ // simple Phi.
+ return Asm().Phi(base::VectorOf(inputs), maybe_rep.value());
+ } else {
+ switch (Asm().output_graph().Get(inputs[0]).opcode) {
+ case Opcode::kStackPointerGreaterThan:
+ return Asm().Phi(base::VectorOf(inputs),
+ RegisterRepresentation::Word32());
+ case Opcode::kFrameConstant:
+ return Asm().Phi(base::VectorOf(inputs),
+ RegisterRepresentation::PointerSized());
+
+ case Opcode::kFrameState:
+ // Merging inputs of the n kFrameState one by one.
+ return MergeFrameState(inputs);
+
+ case Opcode::kOverflowCheckedBinop:
+ case Opcode::kFloat64InsertWord32:
+ case Opcode::kStore:
+ case Opcode::kRetain:
+ case Opcode::kStackSlot:
+ case Opcode::kDeoptimize:
+ case Opcode::kDeoptimizeIf:
+ case Opcode::kTrapIf:
+ case Opcode::kParameter:
+ case Opcode::kOsrValue:
+ case Opcode::kCall:
+ case Opcode::kTailCall:
+ case Opcode::kUnreachable:
+ case Opcode::kReturn:
+ case Opcode::kGoto:
+ case Opcode::kBranch:
+ case Opcode::kSwitch:
+ case Opcode::kTuple:
+ case Opcode::kProjection:
+ case Opcode::kSelect:
+ return OpIndex::Invalid();
+
+ default:
+ // In all other cases, {maybe_rep} should have a value and we
+ // shouldn't end up here.
+ UNREACHABLE();
+ }
+ }
+ }
+
+ OpIndex MergeFrameState(base::Vector<OpIndex> frame_states_indices) {
+ base::SmallVector<const FrameStateOp*, 32> frame_states;
+ for (OpIndex idx : frame_states_indices) {
+ frame_states.push_back(
+ &Asm().output_graph().Get(idx).template Cast<FrameStateOp>());
+ }
+ const FrameStateOp* first_frame = frame_states[0];
+
+#if DEBUG
+ // Making sure that all frame states have the same number of inputs, the
+ // same "inlined" field, and the same data.
+ for (auto frame_state : frame_states) {
+ DCHECK_EQ(first_frame->input_count, frame_state->input_count);
+ DCHECK_EQ(first_frame->inlined, frame_state->inlined);
+ DCHECK_EQ(first_frame->data, frame_state->data);
+ }
+#endif
+
+ base::SmallVector<OpIndex, 32> new_inputs;
+
+ // Merging the parent frame states.
+ if (first_frame->inlined) {
+ ZoneVector<OpIndex> indices_to_merge(Asm().phase_zone());
+ bool all_parent_frame_states_are_the_same = true;
+ for (auto frame_state : frame_states) {
+ indices_to_merge.push_back(frame_state->parent_frame_state());
+ all_parent_frame_states_are_the_same =
+ all_parent_frame_states_are_the_same &&
+ first_frame->parent_frame_state() ==
+ frame_state->parent_frame_state();
+ }
+ if (all_parent_frame_states_are_the_same) {
+ new_inputs.push_back(first_frame->parent_frame_state());
+ } else {
+ OpIndex merged_parent_frame_state =
+ MergeFrameState(base::VectorOf(indices_to_merge));
+ new_inputs.push_back(merged_parent_frame_state);
+ }
+ }
+
+ // Merging the state values.
+ for (int i = 0; i < first_frame->state_values_count(); i++) {
+ ZoneVector<OpIndex> indices_to_merge(Asm().phase_zone());
+ bool all_inputs_are_the_same = true;
+ for (auto frame_state : frame_states) {
+ indices_to_merge.push_back(frame_state->state_value(i));
+ all_inputs_are_the_same =
+ all_inputs_are_the_same &&
+ first_frame->state_value(i) == frame_state->state_value(i);
+ }
+ if (all_inputs_are_the_same) {
+ // This input does not need to be merged, since its identical for all of
+ // the frame states.
+ new_inputs.push_back(first_frame->state_value(i));
+ } else {
+ RegisterRepresentation rep = first_frame->state_value_rep(i);
+ OpIndex new_input =
+ MergeOpIndices(base::VectorOf(indices_to_merge), rep);
+ new_inputs.push_back(new_input);
+ }
+ }
+
+ return Asm().FrameState(base::VectorOf(new_inputs), first_frame->inlined,
+ first_frame->data);
+ }
+
+ SnapshotTable<OpIndex, base::Optional<RegisterRepresentation>> table_;
+ const Block* current_block_ = nullptr;
+ ZoneVector<base::Optional<Snapshot>> block_to_snapshot_mapping_;
+
+ // {predecessors_} is used during merging, but we use an instance variable for
+ // it, in order to save memory and not reallocate it for each merge.
+ ZoneVector<Snapshot> predecessors_;
+};
+
+} // namespace v8::internal::compiler::turboshaft
+
+#endif // V8_COMPILER_TURBOSHAFT_VARIABLE_REDUCER_H_
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 48638c0a63..f0769068a5 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -38,12 +38,14 @@ class V8_EXPORT_PRIVATE TypeCache final {
Type const kUint32 = Type::Unsigned32();
Type const kDoubleRepresentableInt64 = CreateRange(
std::numeric_limits<int64_t>::min(), kMaxDoubleRepresentableInt64);
+ Type const kDoubleRepresentableInt64OrMinusZero =
+ Type::Union(kDoubleRepresentableInt64, Type::MinusZero(), zone());
Type const kDoubleRepresentableUint64 = CreateRange(
std::numeric_limits<uint64_t>::min(), kMaxDoubleRepresentableUint64);
Type const kFloat32 = Type::Number();
Type const kFloat64 = Type::Number();
- Type const kBigInt64 = Type::BigInt();
- Type const kBigUint64 = Type::BigInt();
+ Type const kBigInt64 = Type::SignedBigInt64();
+ Type const kBigUint64 = Type::UnsignedBigInt64();
Type const kHoleySmi = Type::Union(Type::SignedSmall(), Type::Hole(), zone());
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index bdc94dc904..c40976fe06 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -25,10 +25,9 @@ TypedOptimization::TypedOptimization(Editor* editor,
dependencies_(dependencies),
jsgraph_(jsgraph),
broker_(broker),
- true_type_(
- Type::Constant(broker, factory()->true_value(), graph()->zone())),
+ true_type_(Type::Constant(broker, broker->true_value(), graph()->zone())),
false_type_(
- Type::Constant(broker, factory()->false_value(), graph()->zone())),
+ Type::Constant(broker, broker->false_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {}
TypedOptimization::~TypedOptimization() = default;
@@ -107,11 +106,11 @@ Reduction TypedOptimization::Reduce(Node* node) {
namespace {
-base::Optional<MapRef> GetStableMapFromObjectType(JSHeapBroker* broker,
- Type object_type) {
+OptionalMapRef GetStableMapFromObjectType(JSHeapBroker* broker,
+ Type object_type) {
if (object_type.IsHeapConstant()) {
HeapObjectRef object = object_type.AsHeapConstant()->Ref();
- MapRef object_map = object.map();
+ MapRef object_map = object.map(broker);
if (object_map.is_stable()) return object_map;
}
return {};
@@ -224,8 +223,7 @@ Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Type const object_type = NodeProperties::GetType(object);
Node* const effect = NodeProperties::GetEffectInput(node);
- base::Optional<MapRef> object_map =
- GetStableMapFromObjectType(broker(), object_type);
+ OptionalMapRef object_map = GetStableMapFromObjectType(broker(), object_type);
if (object_map.has_value()) {
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
Node* const map = NodeProperties::GetValueInput(node, i);
@@ -295,11 +293,11 @@ Reduction TypedOptimization::ReduceLoadField(Node* node) {
// (1) map cannot transition further, or
// (2) deoptimization is enabled and we can add a code dependency on the
// stability of map (to guard the Constant type information).
- base::Optional<MapRef> object_map =
+ OptionalMapRef object_map =
GetStableMapFromObjectType(broker(), object_type);
if (object_map.has_value()) {
dependencies()->DependOnStableMap(*object_map);
- Node* const value = jsgraph()->Constant(*object_map);
+ Node* const value = jsgraph()->Constant(*object_map, broker());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -411,7 +409,7 @@ Reduction TypedOptimization::ReduceReferenceEqual(Node* node) {
if (rhs_type.Is(Type::Boolean()) && rhs_type.IsHeapConstant() &&
lhs_type.Is(Type::Boolean())) {
base::Optional<bool> maybe_result =
- rhs_type.AsHeapConstant()->Ref().TryGetBooleanValue();
+ rhs_type.AsHeapConstant()->Ref().TryGetBooleanValue(broker());
if (maybe_result.has_value()) {
if (maybe_result.value()) {
return Replace(node->InputAt(0));
@@ -494,8 +492,9 @@ TypedOptimization::TryReduceStringComparisonOfStringFromSingleCharCode(
simplified()->NumberBitwiseAnd(), from_char_code_repl,
jsgraph()->Constant(std::numeric_limits<uint16_t>::max()));
}
- if (!string.GetFirstChar().has_value()) return NoChange();
- Node* constant_repl = jsgraph()->Constant(string.GetFirstChar().value());
+ if (!string.GetFirstChar(broker()).has_value()) return NoChange();
+ Node* constant_repl =
+ jsgraph()->Constant(string.GetFirstChar(broker()).value());
Node* number_comparison = nullptr;
if (inverted) {
@@ -693,25 +692,22 @@ Reduction TypedOptimization::ReduceSpeculativeToNumber(Node* node) {
Reduction TypedOptimization::ReduceTypeOf(Node* node) {
Node* const input = node->InputAt(0);
Type const type = NodeProperties::GetType(input);
- Factory* const f = factory();
if (type.Is(Type::Boolean())) {
- return Replace(jsgraph()->Constant(MakeRef(broker(), f->boolean_string())));
+ return Replace(jsgraph()->Constant(broker()->boolean_string(), broker()));
} else if (type.Is(Type::Number())) {
- return Replace(jsgraph()->Constant(MakeRef(broker(), f->number_string())));
+ return Replace(jsgraph()->Constant(broker()->number_string(), broker()));
} else if (type.Is(Type::String())) {
- return Replace(jsgraph()->Constant(MakeRef(broker(), f->string_string())));
+ return Replace(jsgraph()->Constant(broker()->string_string(), broker()));
} else if (type.Is(Type::BigInt())) {
- return Replace(jsgraph()->Constant(MakeRef(broker(), f->bigint_string())));
+ return Replace(jsgraph()->Constant(broker()->bigint_string(), broker()));
} else if (type.Is(Type::Symbol())) {
- return Replace(jsgraph()->Constant(MakeRef(broker(), f->symbol_string())));
+ return Replace(jsgraph()->Constant(broker()->symbol_string(), broker()));
} else if (type.Is(Type::OtherUndetectableOrUndefined())) {
- return Replace(
- jsgraph()->Constant(MakeRef(broker(), f->undefined_string())));
+ return Replace(jsgraph()->Constant(broker()->undefined_string(), broker()));
} else if (type.Is(Type::NonCallableOrNull())) {
- return Replace(jsgraph()->Constant(MakeRef(broker(), f->object_string())));
+ return Replace(jsgraph()->Constant(broker()->object_string(), broker()));
} else if (type.Is(Type::Function())) {
- return Replace(
- jsgraph()->Constant(MakeRef(broker(), f->function_string())));
+ return Replace(jsgraph()->Constant(broker()->function_string(), broker()));
}
return NoChange();
}
@@ -829,7 +825,7 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- base::Optional<double> number = input_value.ToNumber();
+ base::Optional<double> number = input_value.ToNumber(broker());
if (!number.has_value()) return NoChange();
return Replace(jsgraph()->Constant(number.value()));
}
@@ -837,7 +833,7 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
if (input_type.IsHeapConstant()) {
HeapObjectRef input_value = input_type.AsHeapConstant()->Ref();
double value;
- if (input_value.OddballToNumber().To(&value)) {
+ if (input_value.OddballToNumber(broker()).To(&value)) {
return Replace(jsgraph()->Constant(value));
}
}
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 5888a5cdab..09f1af2e0e 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -79,6 +79,7 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_UNARY_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_UNARY_CASE)
+ DECLARE_UNARY_CASE(ChangeUint32ToUint64)
#undef DECLARE_UNARY_CASE
#define DECLARE_BINARY_CASE(x, ...) \
case IrOpcode::k##x: \
@@ -125,7 +126,8 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_CHANGE_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
SIMPLIFIED_CHECKED_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
IF_WASM(SIMPLIFIED_WASM_OP_LIST, DECLARE_IMPOSSIBLE_CASE)
- MACHINE_SIMD_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
+ MACHINE_SIMD128_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
+ MACHINE_SIMD256_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
MACHINE_UNOP_32_LIST(DECLARE_IMPOSSIBLE_CASE)
DECLARE_IMPOSSIBLE_CASE(Word32Xor)
DECLARE_IMPOSSIBLE_CASE(Word32Sar)
@@ -158,7 +160,6 @@ class Typer::Visitor : public Reducer {
DECLARE_IMPOSSIBLE_CASE(Uint64MulHigh)
DECLARE_IMPOSSIBLE_CASE(Word64Equal)
DECLARE_IMPOSSIBLE_CASE(Int32LessThan)
- DECLARE_IMPOSSIBLE_CASE(Int32LessThanOrEqual)
DECLARE_IMPOSSIBLE_CASE(Int64LessThan)
DECLARE_IMPOSSIBLE_CASE(Int64LessThanOrEqual)
DECLARE_IMPOSSIBLE_CASE(Uint64LessThan)
@@ -215,7 +216,6 @@ class Typer::Visitor : public Reducer {
DECLARE_IMPOSSIBLE_CASE(ChangeInt32ToInt64)
DECLARE_IMPOSSIBLE_CASE(ChangeInt64ToFloat64)
DECLARE_IMPOSSIBLE_CASE(ChangeUint32ToFloat64)
- DECLARE_IMPOSSIBLE_CASE(ChangeUint32ToUint64)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat64ToFloat32)
DECLARE_IMPOSSIBLE_CASE(TruncateInt64ToInt32)
DECLARE_IMPOSSIBLE_CASE(RoundFloat64ToInt32)
@@ -240,6 +240,7 @@ class Typer::Visitor : public Reducer {
DECLARE_IMPOSSIBLE_CASE(LoadStackCheckOffset)
DECLARE_IMPOSSIBLE_CASE(LoadFramePointer)
DECLARE_IMPOSSIBLE_CASE(LoadParentFramePointer)
+ DECLARE_IMPOSSIBLE_CASE(LoadRootRegister)
DECLARE_IMPOSSIBLE_CASE(UnalignedLoad)
DECLARE_IMPOSSIBLE_CASE(UnalignedStore)
DECLARE_IMPOSSIBLE_CASE(Int32PairAdd)
@@ -250,6 +251,8 @@ class Typer::Visitor : public Reducer {
DECLARE_IMPOSSIBLE_CASE(Word32PairSar)
DECLARE_IMPOSSIBLE_CASE(ProtectedLoad)
DECLARE_IMPOSSIBLE_CASE(ProtectedStore)
+ DECLARE_IMPOSSIBLE_CASE(LoadTrapOnNull)
+ DECLARE_IMPOSSIBLE_CASE(StoreTrapOnNull)
DECLARE_IMPOSSIBLE_CASE(MemoryBarrier)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord8ToInt32)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord16ToInt32)
@@ -302,6 +305,7 @@ class Typer::Visitor : public Reducer {
Zone* zone() { return typer_->zone(); }
Graph* graph() { return typer_->graph(); }
+ JSHeapBroker* broker() { return typer_->broker(); }
void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
bool IsWeakened(NodeId node_id) {
@@ -341,6 +345,8 @@ class Typer::Visitor : public Reducer {
static Type ToName(Type, Typer*);
static Type ToNumber(Type, Typer*);
static Type ToNumberConvertBigInt(Type, Typer*);
+ static Type ToBigInt(Type, Typer*);
+ static Type ToBigIntConvertNumber(Type, Typer*);
static Type ToNumeric(Type, Typer*);
static Type ToObject(Type, Typer*);
static Type ToString(Type, Typer*);
@@ -352,6 +358,7 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
+ DECLARE_METHOD(ChangeUint32ToUint64)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name) \
static Type Name(Type lhs, Type rhs, Typer* t) { \
@@ -385,6 +392,7 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
+ DECLARE_METHOD(ChangeUint32ToUint64)
#undef DECLARE_METHOD
static Type ObjectIsArrayBufferView(Type, Typer*);
static Type ObjectIsBigInt(Type, Typer*);
@@ -415,6 +423,7 @@ class Typer::Visitor : public Reducer {
static Type NumberEqualTyper(Type, Type, Typer*);
static Type NumberLessThanTyper(Type, Type, Typer*);
static Type NumberLessThanOrEqualTyper(Type, Type, Typer*);
+ static Type BigIntCompareTyper(Type, Type, Typer*);
static Type ReferenceEqualTyper(Type, Type, Typer*);
static Type SameValueTyper(Type, Type, Typer*);
static Type SameValueNumbersOnlyTyper(Type, Type, Typer*);
@@ -675,6 +684,16 @@ Type Typer::Visitor::ToNumberConvertBigInt(Type type, Typer* t) {
}
// static
+Type Typer::Visitor::ToBigInt(Type type, Typer* t) {
+ return t->operation_typer_.ToBigInt(type);
+}
+
+// static
+Type Typer::Visitor::ToBigIntConvertNumber(Type type, Typer* t) {
+ return t->operation_typer_.ToBigIntConvertNumber(type);
+}
+
+// static
Type Typer::Visitor::ToNumeric(Type type, Typer* t) {
return t->operation_typer_.ToNumeric(type);
}
@@ -726,7 +745,7 @@ Type Typer::Visitor::ObjectIsConstructor(Type type, Typer* t) {
// TODO(turbofan): Introduce a Type::Constructor?
CHECK(!type.IsNone());
if (type.IsHeapConstant() &&
- type.AsHeapConstant()->Ref().map().is_constructor()) {
+ type.AsHeapConstant()->Ref().map(t->broker()).is_constructor()) {
return t->singleton_true_;
}
if (!type.Maybe(Type::Callable())) return t->singleton_false_;
@@ -1361,6 +1380,8 @@ DEFINE_METHOD(ToLength)
DEFINE_METHOD(ToName)
DEFINE_METHOD(ToNumber)
DEFINE_METHOD(ToNumberConvertBigInt)
+DEFINE_METHOD(ToBigInt)
+DEFINE_METHOD(ToBigIntConvertNumber)
DEFINE_METHOD(ToNumeric)
DEFINE_METHOD(ToObject)
DEFINE_METHOD(ToString)
@@ -1415,7 +1436,7 @@ Type Typer::Visitor::TypeJSCreateGeneratorObject(Node* node) {
Type Typer::Visitor::TypeJSCreateClosure(Node* node) {
SharedFunctionInfoRef shared =
- JSCreateClosureNode{node}.Parameters().shared_info(typer_->broker());
+ JSCreateClosureNode{node}.Parameters().shared_info();
if (IsClassConstructor(shared.kind())) {
return Type::ClassConstructor();
} else {
@@ -1489,7 +1510,7 @@ Type Typer::Visitor::TypeJSLoadNamed(Node* node) {
// is not a private brand here. Otherwise Type::NonInternal() is wrong.
JSLoadNamedNode n(node);
NamedAccess const& p = n.Parameters();
- DCHECK(!p.name(typer_->broker()).object()->IsPrivateBrand());
+ DCHECK(!p.name().object()->IsPrivateBrand());
#endif
return Type::NonInternal();
}
@@ -1697,10 +1718,10 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
return Type::NonInternal();
}
JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.shared().HasBuiltinId()) {
+ if (!function.shared(t->broker()).HasBuiltinId()) {
return Type::NonInternal();
}
- switch (function.shared().builtin_id()) {
+ switch (function.shared(t->broker()).builtin_id()) {
case Builtin::kMathRandom:
return Type::PlainNumber();
case Builtin::kMathFloor:
@@ -2124,6 +2145,14 @@ Type Typer::Visitor::NumberLessThanOrEqualTyper(Type lhs, Type rhs, Typer* t) {
Invert(JSCompareTyper(ToNumber(rhs, t), ToNumber(lhs, t), t), t), t);
}
+// static
+Type Typer::Visitor::BigIntCompareTyper(Type lhs, Type rhs, Typer* t) {
+ if (lhs.IsNone() || rhs.IsNone()) {
+ return Type::None();
+ }
+ return Type::Boolean();
+}
+
Type Typer::Visitor::TypeNumberEqual(Node* node) {
return TypeBinaryOp(node, NumberEqualTyper);
}
@@ -2148,6 +2177,18 @@ Type Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
return TypeBinaryOp(node, NumberLessThanOrEqualTyper);
}
+#define BIGINT_COMPARISON_BINOP(Name) \
+ Type Typer::Visitor::Type##Name(Node* node) { \
+ return TypeBinaryOp(node, BigIntCompareTyper); \
+ }
+BIGINT_COMPARISON_BINOP(BigIntEqual)
+BIGINT_COMPARISON_BINOP(BigIntLessThan)
+BIGINT_COMPARISON_BINOP(BigIntLessThanOrEqual)
+BIGINT_COMPARISON_BINOP(SpeculativeBigIntEqual)
+BIGINT_COMPARISON_BINOP(SpeculativeBigIntLessThan)
+BIGINT_COMPARISON_BINOP(SpeculativeBigIntLessThanOrEqual)
+#undef BIGINT_COMPARISON_BINOP
+
Type Typer::Visitor::TypeStringConcat(Node* node) { return Type::String(); }
Type Typer::Visitor::TypeStringToNumber(Node* node) {
@@ -2314,7 +2355,7 @@ Type Typer::Visitor::TypeCheckNotTaggedHole(Node* node) {
Type Typer::Visitor::TypeCheckClosure(Node* node) {
FeedbackCellRef cell = MakeRef(typer_->broker(), FeedbackCellOf(node->op()));
- base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ OptionalSharedFunctionInfoRef shared = cell.shared_function_info(broker());
if (!shared.has_value()) return Type::Function();
if (IsClassConstructor(shared->kind())) {
@@ -2544,6 +2585,10 @@ Type Typer::Visitor::TypeVerifyType(Node* node) {
return TypeOrNone(node->InputAt(0));
}
+Type Typer::Visitor::TypeCheckTurboshaftTypeOf(Node* node) {
+ return TypeOrNone(node->InputAt(0));
+}
+
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index c81a185da0..8b0ba9620f 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -141,12 +141,11 @@ Type::bitset Type::BitsetLub() const {
// TODO(neis): Once the broker mode kDisabled is gone, change the input type to
// MapRef and get rid of the HeapObjectType class.
template <typename MapRefLike>
-Type::bitset BitsetType::Lub(const MapRefLike& map) {
+Type::bitset BitsetType::Lub(const MapRefLike& map, JSHeapBroker* broker) {
switch (map.instance_type()) {
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
case THIN_STRING_TYPE:
- case THIN_ONE_BYTE_STRING_TYPE:
case SLICED_STRING_TYPE:
case SLICED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@@ -155,6 +154,12 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
+ case SHARED_STRING_TYPE:
+ case SHARED_EXTERNAL_STRING_TYPE:
+ case SHARED_ONE_BYTE_STRING_TYPE:
+ case SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SHARED_UNCACHED_EXTERNAL_STRING_TYPE:
+ case SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
return kString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
@@ -168,7 +173,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case BIGINT_TYPE:
return kBigInt;
case ODDBALL_TYPE:
- switch (map.oddball_type()) {
+ switch (map.oddball_type(broker)) {
case OddballType::kNone:
break;
case OddballType::kHole:
@@ -248,6 +253,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_REG_EXP_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
+ case JS_RAB_GSAB_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
@@ -257,6 +263,11 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_ITERATOR_MAP_HELPER_TYPE:
+ case JS_ITERATOR_FILTER_HELPER_TYPE:
+ case JS_ITERATOR_TAKE_HELPER_TYPE:
+ case JS_ITERATOR_DROP_HELPER_TYPE:
+ case JS_VALID_ITERATOR_WRAPPER_TYPE:
case JS_FINALIZATION_REGISTRY_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_REF_TYPE:
@@ -368,8 +379,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
case SCRIPT_TYPE:
+ case INSTRUCTION_STREAM_TYPE:
case CODE_TYPE:
- case CODE_DATA_CONTAINER_TYPE:
case PROPERTY_CELL_TYPE:
case SOURCE_TEXT_MODULE_TYPE:
case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE:
@@ -393,7 +404,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
}
// Explicit instantiation.
-template Type::bitset BitsetType::Lub<MapRef>(const MapRef& map);
+template Type::bitset BitsetType::Lub<MapRef>(const MapRef& map,
+ JSHeapBroker* broker);
Type::bitset BitsetType::Lub(double value) {
DisallowGarbageCollection no_gc;
@@ -889,6 +901,10 @@ Type Type::Constant(JSHeapBroker* broker, Handle<i::Object> value, Zone* zone) {
// consider having the graph store ObjectRefs or ObjectData pointer instead,
// which would make new ref construction here unnecessary.
ObjectRef ref = MakeRefAssumeMemoryFence(broker, value);
+ return Constant(broker, ref, zone);
+}
+
+Type Type::Constant(JSHeapBroker* broker, ObjectRef ref, Zone* zone) {
if (ref.IsSmi()) {
return Constant(static_cast<double>(ref.AsSmi()), zone);
}
@@ -898,7 +914,7 @@ Type Type::Constant(JSHeapBroker* broker, Handle<i::Object> value, Zone* zone) {
if (ref.IsString() && !ref.IsInternalizedString()) {
return Type::String();
}
- return HeapConstant(ref.AsHeapObject(), zone);
+ return HeapConstant(ref.AsHeapObject(), broker, zone);
}
Type Type::Union(Type type1, Type type2, Zone* zone) {
@@ -1136,10 +1152,12 @@ Type Type::OtherNumberConstant(double value, Zone* zone) {
}
// static
-Type Type::HeapConstant(const HeapObjectRef& value, Zone* zone) {
+Type Type::HeapConstant(const HeapObjectRef& value, JSHeapBroker* broker,
+ Zone* zone) {
DCHECK(!value.IsHeapNumber());
DCHECK_IMPLIES(value.IsString(), value.IsInternalizedString());
- BitsetType::bitset bitset = BitsetType::Lub(value.GetHeapObjectType());
+ BitsetType::bitset bitset =
+ BitsetType::Lub(value.GetHeapObjectType(broker), broker);
if (Type(bitset).IsSingleton()) return Type(bitset);
return HeapConstantType::New(value, bitset, zone);
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 015abd31c2..4d56b88477 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -205,7 +205,7 @@ namespace compiler {
V(NonCallableOrNull, kNonCallable | kNull) \
V(DetectableObject, kArray | kFunction | kBoundFunction | \
kOtherCallable | kOtherObject) \
- V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(DetectableReceiver, kDetectableObject | kProxy | kWasmObject) \
V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
V(Object, kDetectableObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy | kWasmObject) \
@@ -279,10 +279,12 @@ class V8_EXPORT_PRIVATE BitsetType {
static double Max(bitset);
static bitset Glb(double min, double max);
- static bitset Lub(HeapObjectType const& type) {
- return Lub<HeapObjectType>(type);
+ static bitset Lub(HeapObjectType const& type, JSHeapBroker* broker) {
+ return Lub<HeapObjectType>(type, broker);
+ }
+ static bitset Lub(MapRef const& map, JSHeapBroker* broker) {
+ return Lub<MapRef>(map, broker);
}
- static bitset Lub(MapRef const& map) { return Lub<MapRef>(map); }
static bitset Lub(double value);
static bitset Lub(double min, double max);
static bitset ExpandInternals(bitset bits);
@@ -306,7 +308,7 @@ class V8_EXPORT_PRIVATE BitsetType {
static inline size_t BoundariesSize();
template <typename MapRefLike>
- static bitset Lub(MapRefLike const& map);
+ static bitset Lub(MapRefLike const& map, JSHeapBroker* broker);
};
// -----------------------------------------------------------------------------
@@ -423,6 +425,7 @@ class V8_EXPORT_PRIVATE Type {
static Type Constant(JSHeapBroker* broker, Handle<i::Object> value,
Zone* zone);
+ static Type Constant(JSHeapBroker* broker, ObjectRef value, Zone* zone);
static Type Constant(double value, Zone* zone);
static Type Range(double min, double max, Zone* zone);
static Type Tuple(Type first, Type second, Type third, Zone* zone);
@@ -436,8 +439,9 @@ class V8_EXPORT_PRIVATE Type {
static Type Wasm(wasm::TypeInModule type_in_module, Zone* zone);
#endif
- static Type For(MapRef const& type) {
- return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
+ static Type For(MapRef const& type, JSHeapBroker* broker) {
+ return NewBitset(
+ BitsetType::ExpandInternals(BitsetType::Lub(type, broker)));
}
// Predicates.
@@ -555,7 +559,8 @@ class V8_EXPORT_PRIVATE Type {
static Type Range(RangeType::Limits lims, Zone* zone);
static Type OtherNumberConstant(double value, Zone* zone);
- static Type HeapConstant(const HeapObjectRef& value, Zone* zone);
+ static Type HeapConstant(const HeapObjectRef& value, JSHeapBroker* broker,
+ Zone* zone);
static bool Overlap(const RangeType* lhs, const RangeType* rhs);
static bool Contains(const RangeType* lhs, const RangeType* rhs);
diff --git a/deps/v8/src/compiler/use-info.h b/deps/v8/src/compiler/use-info.h
index 8a97ef0935..670a8d6e8b 100644
--- a/deps/v8/src/compiler/use-info.h
+++ b/deps/v8/src/compiler/use-info.h
@@ -189,6 +189,9 @@ class UseInfo {
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
static UseInfo CheckedBigIntTruncatingWord64(const FeedbackSource& feedback) {
// Note that Trunction::Word64() can safely use kIdentifyZero, because
// TypeCheckKind::kBigInt will make sure we deopt for anything other than
@@ -200,8 +203,9 @@ class UseInfo {
return UseInfo(MachineRepresentation::kWord64, Truncation::Any(),
TypeCheckKind::kBigInt64, feedback);
}
- static UseInfo Word64() {
- return UseInfo(MachineRepresentation::kWord64, Truncation::Any());
+ static UseInfo Word64(IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return UseInfo(MachineRepresentation::kWord64,
+ Truncation::Any(identify_zeros));
}
static UseInfo Word() {
return UseInfo(MachineType::PointerRepresentation(), Truncation::Any());
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 0f0bdcb1f1..b04f380f04 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -674,6 +674,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kJSToNumberConvertBigInt:
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kJSToBigInt:
+ case IrOpcode::kJSToBigIntConvertNumber:
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kJSToNumeric:
CheckTypeIs(node, Type::Numeric());
break;
@@ -990,14 +994,16 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kSpeculativeBigIntAdd:
- case IrOpcode::kSpeculativeBigIntSubtract:
- case IrOpcode::kSpeculativeBigIntMultiply:
- case IrOpcode::kSpeculativeBigIntDivide:
- case IrOpcode::kSpeculativeBigIntModulus:
- case IrOpcode::kSpeculativeBigIntBitwiseAnd:
+#define SPECULATIVE_BIGINT_BINOP(Name) case IrOpcode::k##Name:
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(SPECULATIVE_BIGINT_BINOP)
+#undef SPECULATIVE_BIGINT_BINOP
CheckTypeIs(node, Type::BigInt());
break;
+ case IrOpcode::kSpeculativeBigIntEqual:
+ case IrOpcode::kSpeculativeBigIntLessThan:
+ case IrOpcode::kSpeculativeBigIntLessThanOrEqual:
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kSpeculativeBigIntNegate:
CheckTypeIs(node, Type::BigInt());
break;
@@ -1006,20 +1012,28 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::BigInt());
break;
- case IrOpcode::kBigIntAdd:
- case IrOpcode::kBigIntSubtract:
- case IrOpcode::kBigIntMultiply:
- case IrOpcode::kBigIntDivide:
- case IrOpcode::kBigIntModulus:
- case IrOpcode::kBigIntBitwiseAnd:
+#define BIGINT_BINOP(Name) case IrOpcode::k##Name:
+ SIMPLIFIED_BIGINT_BINOP_LIST(BIGINT_BINOP)
+#undef BIGINT_BINOP
CheckValueInputIs(node, 0, Type::BigInt());
CheckValueInputIs(node, 1, Type::BigInt());
CheckTypeIs(node, Type::BigInt());
break;
+ case IrOpcode::kBigIntEqual:
+ case IrOpcode::kBigIntLessThan:
+ case IrOpcode::kBigIntLessThanOrEqual:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckValueInputIs(node, 1, Type::BigInt());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kBigIntNegate:
CheckValueInputIs(node, 0, Type::BigInt());
CheckTypeIs(node, Type::BigInt());
break;
+ case IrOpcode::kSpeculativeToBigInt:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
case IrOpcode::kNumberMultiply:
@@ -1127,6 +1141,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kIntegral32OrMinusZeroToBigInt:
+ CheckValueInputIs(node, 0, Type::Integral32OrMinusZero());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kUnsigned32Divide:
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckValueInputIs(node, 1, Type::Unsigned32());
@@ -1559,6 +1577,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedInt64Mod:
case IrOpcode::kAssertType:
case IrOpcode::kVerifyType:
+ case IrOpcode::kCheckTurboshaftTypeOf:
break;
case IrOpcode::kDoubleArrayMin:
case IrOpcode::kDoubleArrayMax:
@@ -1706,6 +1725,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kAssertNotNull:
case IrOpcode::kWasmExternInternalize:
case IrOpcode::kWasmExternExternalize:
+ case IrOpcode::kWasmStructGet:
+ case IrOpcode::kWasmStructSet:
+ case IrOpcode::kWasmArrayGet:
+ case IrOpcode::kWasmArraySet:
+ case IrOpcode::kWasmArrayLength:
+ case IrOpcode::kWasmArrayInitializeLength:
+ case IrOpcode::kStringAsWtf16:
+ case IrOpcode::kStringPrepareForGetCodeunit:
// TODO(manoskouk): What are the constraints here?
break;
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1716,6 +1743,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kLoadTrapOnNull:
+ case IrOpcode::kStoreTrapOnNull:
case IrOpcode::kStore:
case IrOpcode::kStackSlot:
case IrOpcode::kWord32And:
@@ -1897,6 +1926,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadStackCheckOffset:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
+ case IrOpcode::kLoadRootRegister:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kMemoryBarrier:
@@ -1937,7 +1967,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTraceInstruction:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
- MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
+ MACHINE_SIMD128_OP_LIST(SIMD_MACHINE_OP_CASE)
+ MACHINE_SIMD256_OP_LIST(SIMD_MACHINE_OP_CASE)
#undef SIMD_MACHINE_OP_CASE
// TODO(rossberg): Check.
@@ -2075,34 +2106,34 @@ void ScheduleVerifier::Run(Schedule* schedule) {
}
// Verify that all blocks reachable from start are in the RPO.
- BoolVector marked(static_cast<int>(count), false, zone);
+ BitVector marked(static_cast<int>(count), zone);
{
ZoneQueue<BasicBlock*> queue(zone);
queue.push(start);
- marked[start->id().ToSize()] = true;
+ marked.Add(start->id().ToInt());
while (!queue.empty()) {
BasicBlock* block = queue.front();
queue.pop();
for (size_t s = 0; s < block->SuccessorCount(); s++) {
BasicBlock* succ = block->SuccessorAt(s);
- if (!marked[succ->id().ToSize()]) {
- marked[succ->id().ToSize()] = true;
+ if (!marked.Contains(succ->id().ToInt())) {
+ marked.Add(succ->id().ToInt());
queue.push(succ);
}
}
}
}
// Verify marked blocks are in the RPO.
- for (size_t i = 0; i < count; i++) {
- BasicBlock* block = schedule->GetBlockById(BasicBlock::Id::FromSize(i));
- if (marked[i]) {
+ for (int i = 0; i < static_cast<int>(count); i++) {
+ BasicBlock* block = schedule->GetBlockById(BasicBlock::Id::FromInt(i));
+ if (marked.Contains(i)) {
CHECK_GE(block->rpo_number(), 0);
CHECK_EQ(block, rpo_order->at(block->rpo_number()));
}
}
// Verify RPO blocks are marked.
for (size_t b = 0; b < rpo_order->size(); b++) {
- CHECK(marked[rpo_order->at(b)->id().ToSize()]);
+ CHECK(marked.Contains(rpo_order->at(b)->id().ToInt()));
}
{
diff --git a/deps/v8/src/compiler/wasm-call-descriptors.cc b/deps/v8/src/compiler/wasm-call-descriptors.cc
new file mode 100644
index 0000000000..14633ea9fe
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-call-descriptors.cc
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-call-descriptors.h"
+
+#include "src/common/globals.h"
+#include "src/compiler/wasm-graph-assembler.h"
+#include "src/zone/zone.h"
+
+namespace v8::internal::compiler {
+
+WasmCallDescriptors::WasmCallDescriptors(AccountingAllocator* allocator)
+ : zone_(new Zone(allocator, "wasm_call_descriptors")) {
+ for (int i = 0; i < kNumCallModes; i++) {
+ i64_to_bigint_descriptors_[i] = compiler::GetBuiltinCallDescriptor(
+ Builtin::kI64ToBigInt, zone_.get(), static_cast<StubCallMode>(i));
+ bigint_to_i64_descriptors_[i] = compiler::GetBuiltinCallDescriptor(
+ Builtin::kBigIntToI64, zone_.get(), static_cast<StubCallMode>(i));
+ bigint_to_i64_descriptor_with_framestate_ =
+ compiler::GetBuiltinCallDescriptor(Builtin::kBigIntToI64, zone_.get(),
+ StubCallMode::kCallBuiltinPointer,
+ true);
+#if V8_TARGET_ARCH_32_BIT
+ i32pair_to_bigint_descriptors_[i] = compiler::GetBuiltinCallDescriptor(
+ Builtin::kI32PairToBigInt, zone_.get(), static_cast<StubCallMode>(i));
+ bigint_to_i32pair_descriptors_[i] = compiler::GetBuiltinCallDescriptor(
+ Builtin::kBigIntToI32Pair, zone_.get(), static_cast<StubCallMode>(i));
+ bigint_to_i32pair_descriptor_with_framestate_ =
+ compiler::GetBuiltinCallDescriptor(
+ Builtin::kBigIntToI32Pair, zone_.get(),
+ StubCallMode::kCallBuiltinPointer, true);
+#endif // V8_TARGET_ARCH_32_BIT
+ }
+}
+
+#if V8_TARGET_ARCH_32_BIT
+compiler::CallDescriptor* WasmCallDescriptors::GetLoweredCallDescriptor(
+ const compiler::CallDescriptor* original) {
+ // As long as we only have six candidates, linear search is fine.
+ // If we ever support more cases, we could use a hash map or something.
+ for (int i = 0; i < kNumCallModes; i++) {
+ if (original == i64_to_bigint_descriptors_[i]) {
+ return i32pair_to_bigint_descriptors_[i];
+ }
+ if (original == bigint_to_i64_descriptors_[i]) {
+ return bigint_to_i32pair_descriptors_[i];
+ }
+ }
+ if (original == bigint_to_i64_descriptor_with_framestate_) {
+ return bigint_to_i32pair_descriptor_with_framestate_;
+ }
+ return nullptr;
+}
+#endif // V8_TARGET_ARCH_32_BIT
+
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/wasm-call-descriptors.h b/deps/v8/src/compiler/wasm-call-descriptors.h
new file mode 100644
index 0000000000..8f3fcdcbc8
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-call-descriptors.h
@@ -0,0 +1,67 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_CALL_DESCRIPTORS_H_
+#define V8_COMPILER_WASM_CALL_DESCRIPTORS_H_
+
+#include <memory>
+
+#include "src/common/globals.h"
+
+namespace v8::internal {
+
+class AccountingAllocator;
+class Zone;
+
+namespace compiler {
+class CallDescriptor;
+
+class WasmCallDescriptors {
+ public:
+ explicit WasmCallDescriptors(AccountingAllocator* allocator);
+
+ compiler::CallDescriptor* GetI64ToBigIntDescriptor(StubCallMode mode) {
+ return i64_to_bigint_descriptors_[static_cast<size_t>(mode)];
+ }
+ compiler::CallDescriptor* GetBigIntToI64Descriptor(StubCallMode mode,
+ bool needs_frame_state) {
+ if (needs_frame_state) {
+ DCHECK_EQ(mode, StubCallMode::kCallBuiltinPointer);
+ return bigint_to_i64_descriptor_with_framestate_;
+ }
+ return bigint_to_i64_descriptors_[static_cast<size_t>(mode)];
+ }
+
+#if V8_TARGET_ARCH_32_BIT
+ V8_EXPORT_PRIVATE compiler::CallDescriptor* GetLoweredCallDescriptor(
+ const compiler::CallDescriptor* original);
+#endif // V8_TARGET_ARCH_32_BIT
+
+ private:
+ static_assert(static_cast<int>(StubCallMode::kCallCodeObject) == 0);
+ static_assert(static_cast<int>(StubCallMode::kCallWasmRuntimeStub) == 1);
+ static_assert(static_cast<int>(StubCallMode::kCallBuiltinPointer) == 2);
+ static constexpr int kNumCallModes = 3;
+
+ std::unique_ptr<Zone> zone_;
+
+ compiler::CallDescriptor* i64_to_bigint_descriptors_[kNumCallModes];
+ compiler::CallDescriptor* bigint_to_i64_descriptors_[kNumCallModes];
+ compiler::CallDescriptor* bigint_to_i64_descriptor_with_framestate_;
+
+#if V8_TARGET_ARCH_32_BIT
+ compiler::CallDescriptor* i32pair_to_bigint_descriptors_[kNumCallModes];
+ compiler::CallDescriptor* bigint_to_i32pair_descriptors_[kNumCallModes];
+ compiler::CallDescriptor* bigint_to_i32pair_descriptor_with_framestate_;
+#endif // V8_TARGET_ARCH_32_BIT
+};
+
+} // namespace compiler
+} // namespace v8::internal
+
+#endif // V8_COMPILER_WASM_CALL_DESCRIPTORS_H_
diff --git a/deps/v8/src/compiler/wasm-compiler-definitions.h b/deps/v8/src/compiler/wasm-compiler-definitions.h
index e5f9e6d6e6..34c5228d19 100644
--- a/deps/v8/src/compiler/wasm-compiler-definitions.h
+++ b/deps/v8/src/compiler/wasm-compiler-definitions.h
@@ -42,6 +42,14 @@ V8_INLINE bool operator==(const WasmTypeCheckConfig& p1,
return p1.from == p2.from && p1.to == p2.to;
}
+static constexpr int kCharWidthBailoutSentinel = 3;
+
+enum NullCheckStrategy { kExplicitNullChecks, kTrapHandler };
+
+// Static knowledge about whether a wasm-gc operation, such as struct.get, needs
+// a null check.
+enum CheckForNull { kWithoutNullCheck, kWithNullCheck };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index c8daa6743c..2d9f56143d 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -31,13 +31,17 @@
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
+#include "src/compiler/wasm-call-descriptors.h"
#include "src/compiler/wasm-compiler-definitions.h"
#include "src/compiler/wasm-graph-assembler.h"
+#include "src/compiler/wasm-inlining-into-js.h"
#include "src/execution/simulator-base.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/instance-type.h"
+#include "src/objects/name.h"
+#include "src/objects/string.h"
#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
@@ -93,13 +97,20 @@ MachineType assert_size(int expected_size, MachineType type) {
// Use MachineType::Pointer() over Tagged() to load root pointers because they
// do not get compressed.
-#define LOAD_ROOT(root_name, factory_name) \
+#define LOAD_ROOT(RootName, factory_name) \
(parameter_mode_ == kNoSpecialParameterMode \
? graph()->NewNode(mcgraph()->common()->HeapConstant( \
isolate_->factory()->factory_name())) \
: gasm_->LoadImmutable( \
MachineType::Pointer(), BuildLoadIsolateRoot(), \
- IsolateData::root_slot_offset(RootIndex::k##root_name)))
+ IsolateData::root_slot_offset(RootIndex::k##RootName)))
+
+#define LOAD_MUTABLE_ROOT(RootName, factory_name) \
+ (parameter_mode_ == kNoSpecialParameterMode \
+ ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
+ isolate_->factory()->factory_name())) \
+ : gasm_->Load(MachineType::Pointer(), BuildLoadIsolateRoot(), \
+ IsolateData::root_slot_offset(RootIndex::k##RootName)))
bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
@@ -121,16 +132,22 @@ WasmGraphBuilder::WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
- Parameter0Mode parameter_mode, Isolate* isolate)
+ Parameter0Mode parameter_mode, Isolate* isolate,
+ wasm::WasmFeatures enabled_features)
: gasm_(std::make_unique<WasmGraphAssembler>(mcgraph, zone)),
zone_(zone),
mcgraph_(mcgraph),
env_(env),
+ enabled_features_(enabled_features),
has_simd_(ContainsSimd(sig)),
sig_(sig),
source_position_table_(source_position_table),
parameter_mode_(parameter_mode),
- isolate_(isolate) {
+ isolate_(isolate),
+ null_check_strategy_(trap_handler::IsTrapHandlerEnabled() &&
+ V8_STATIC_ROOTS_BOOL
+ ? NullCheckStrategy::kTrapHandler
+ : NullCheckStrategy::kExplicitNullChecks) {
DCHECK_EQ(isolate == nullptr, parameter_mode_ != kNoSpecialParameterMode);
DCHECK_IMPLIES(env && env->bounds_checks == wasm::kTrapHandler,
trap_handler::IsTrapHandlerEnabled());
@@ -141,6 +158,44 @@ WasmGraphBuilder::WasmGraphBuilder(
// available.
WasmGraphBuilder::~WasmGraphBuilder() = default;
+bool WasmGraphBuilder::TryWasmInlining(int fct_index,
+ wasm::NativeModule* native_module) {
+ DCHECK(v8_flags.experimental_wasm_js_inlining);
+ DCHECK(native_module->enabled_features().has_gc());
+ DCHECK(native_module->HasWireBytes());
+ const wasm::WasmModule* module = native_module->module();
+ const wasm::WasmFunction& inlinee = module->functions[fct_index];
+ // TODO(mliedtke): What would be a proper maximum size?
+ const uint32_t kMaxWasmInlineeSize = 30;
+ if (inlinee.code.length() > kMaxWasmInlineeSize) {
+ return false;
+ }
+ if (inlinee.imported) {
+ // Inlining of imported functions is not supported.
+ return false;
+ }
+ base::Vector<const byte> bytes(native_module->wire_bytes().SubVector(
+ inlinee.code.offset(), inlinee.code.end_offset()));
+ const wasm::FunctionBody inlinee_body(inlinee.sig, inlinee.code.offset(),
+ bytes.begin(), bytes.end());
+ // If the inlinee was not validated before, do that now.
+ if (V8_UNLIKELY(!module->function_was_validated(fct_index))) {
+ wasm::WasmFeatures unused_detected_features;
+ if (ValidateFunctionBody(enabled_features_, module,
+ &unused_detected_features, inlinee_body)
+ .failed()) {
+ // At this point we cannot easily raise a compilation error any more.
+ // Since this situation is highly unlikely though, we just ignore this
+ // inlinee and move on. The same validation error will be triggered
+ // again when actually compiling the invalid function.
+ return false;
+ }
+ module->set_function_validated(fct_index);
+ }
+ return WasmIntoJSInliner::TryInlining(graph()->zone(), module, mcgraph_,
+ inlinee_body, bytes);
+}
+
void WasmGraphBuilder::Start(unsigned params) {
Node* start = graph()->NewNode(mcgraph()->common()->Start(params));
graph()->SetStart(start);
@@ -275,10 +330,12 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
effects_and_control);
}
-Node* WasmGraphBuilder::RefNull() {
- return (v8_flags.experimental_wasm_gc && parameter_mode_ == kInstanceMode)
- ? gasm_->Null()
- : LOAD_ROOT(NullValue, null_value);
+Node* WasmGraphBuilder::RefNull(wasm::ValueType type) {
+ return (enabled_features_.has_gc() && parameter_mode_ == kInstanceMode)
+ ? gasm_->Null(type)
+ : (type == wasm::kWasmExternRef || type == wasm::kWasmNullExternRef)
+ ? LOAD_ROOT(NullValue, null_value)
+ : LOAD_ROOT(WasmNull, wasm_null);
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
@@ -287,12 +344,18 @@ Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
Node* maybe_function =
gasm_->LoadFixedArrayElementPtr(functions, function_index);
auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
- gasm_->GotoIfNot(gasm_->TaggedEqual(maybe_function, UndefinedValue()), &done,
- maybe_function);
+ auto create_funcref = gasm_->MakeDeferredLabel();
+ // We only care to distinguish between zero and funcref, "IsI31" is close
+ // enough.
+ gasm_->GotoIf(gasm_->IsSmi(maybe_function), &create_funcref);
+ gasm_->Goto(&done, maybe_function);
+
+ gasm_->Bind(&create_funcref);
Node* function_from_builtin =
gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRefFunc, Operator::kNoThrow,
gasm_->Uint32Constant(function_index));
gasm_->Goto(&done, function_from_builtin);
+
gasm_->Bind(&done);
return done.PhiAt(0);
}
@@ -306,16 +369,8 @@ Node* WasmGraphBuilder::GetInstance() { return instance_node_.get(); }
Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
switch (parameter_mode_) {
case kInstanceMode:
- // For wasm functions, the IsolateRoot is loaded from the instance node so
- // that the generated code is Isolate independent.
- return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
case kWasmApiFunctionRefMode:
- // Note: Even if the sandbox is enabled, the pointer to the isolate root
- // is not encoded, much like the case above.
- // TODO(manoskouk): Decode the pointer here if that changes.
- return gasm_->Load(
- MachineType::Pointer(), Param(0),
- wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kIsolateRootOffset));
+ return gasm_->LoadRootRegister();
case kNoSpecialParameterMode:
return mcgraph()->IntPtrConstant(isolate_->isolate_root());
}
@@ -349,7 +404,9 @@ void WasmGraphBuilder::StackCheck(
Node* limit_address =
LOAD_INSTANCE_FIELD(StackLimitAddress, MachineType::Pointer());
- Node* limit = gasm_->LoadFromObject(MachineType::Pointer(), limit_address, 0);
+ // Since the limit can be mutated by a trap handler, we cannot use load
+ // elimination.
+ Node* limit = gasm_->Load(MachineType::Pointer(), limit_address, 0);
Node* check = SetEffect(graph()->NewNode(
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
@@ -741,6 +798,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
}
Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
+ wasm::ValueType type,
wasm::WasmCodePosition position) {
const Operator* op;
MachineOperatorBuilder* m = mcgraph()->machine();
@@ -1002,11 +1060,11 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
? BuildCcallConvertFloat(input, position, opcode)
: BuildIntConvertFloat(input, position, opcode);
case wasm::kExprRefIsNull:
- return IsNull(input);
+ return IsNull(input, type);
// We abuse ref.as_non_null, which isn't otherwise used in this switch, as
// a sentinel for the negation of ref.is_null.
case wasm::kExprRefAsNonNull:
- return gasm_->Int32Sub(gasm_->Int32Constant(1), IsNull(input));
+ return gasm_->Word32Equal(gasm_->Int32Constant(0), IsNull(input, type));
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1021,8 +1079,9 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
return BuildAsmjsLoadMem(MachineType::Float32(), input);
case wasm::kExprF64AsmjsLoadMem:
return BuildAsmjsLoadMem(MachineType::Float64(), input);
- case wasm::kExprExternInternalize:
+ case wasm::kExprExternInternalize: {
return gasm_->WasmExternInternalize(input);
+ }
case wasm::kExprExternExternalize:
return gasm_->WasmExternExternalize(input);
default:
@@ -1127,9 +1186,11 @@ void WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
SetSourcePosition(control(), position);
}
-Node* WasmGraphBuilder::AssertNotNull(Node* object,
- wasm::WasmCodePosition position) {
- Node* result = gasm_->AssertNotNull(object);
+Node* WasmGraphBuilder::AssertNotNull(Node* object, wasm::ValueType type,
+ wasm::WasmCodePosition position,
+ wasm::TrapReason reason) {
+ TrapId trap_id = GetTrapIdForTrap(reason);
+ Node* result = gasm_->AssertNotNull(object, type, trap_id);
SetSourcePosition(result, position);
return result;
}
@@ -2589,10 +2650,10 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
return gasm_->Load(result_type, stack_slot, 0);
}
-Node* WasmGraphBuilder::IsNull(Node* object) {
- return (v8_flags.experimental_wasm_gc && parameter_mode_ == kInstanceMode)
- ? gasm_->IsNull(object)
- : gasm_->TaggedEqual(object, RefNull());
+Node* WasmGraphBuilder::IsNull(Node* object, wasm::ValueType type) {
+ return (enabled_features_.has_gc() && parameter_mode_ == kInstanceMode)
+ ? gasm_->IsNull(object, type)
+ : gasm_->TaggedEqual(object, RefNull(type));
}
template <typename... Args>
@@ -2832,33 +2893,98 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
&ift_instances);
- const wasm::FunctionSig* sig = env_->module->signature(sig_index);
-
Node* key = args[0];
// Bounds check against the table size.
Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
- // Check that the table entry is not null and that the type of the function is
- // **identical with** the function type declared at the call site (no
- // subtyping of functions is allowed).
- // Note: Since null entries are identified by having ift_sig_id (-1), we only
- // need one comparison.
- // TODO(9495): Change this if we should do full function subtyping instead.
- Node* isorecursive_canonical_types =
- LOAD_INSTANCE_FIELD(IsorecursiveCanonicalTypes, MachineType::Pointer());
- Node* expected_sig_id =
- gasm_->LoadImmutable(MachineType::Uint32(), isorecursive_canonical_types,
- gasm_->IntPtrConstant(sig_index * kInt32Size));
-
- Node* int32_scaled_key = gasm_->BuildChangeUint32ToUintPtr(
- gasm_->Word32Shl(key, Int32Constant(2)));
- Node* loaded_sig = gasm_->LoadFromObject(MachineType::Int32(), ift_sig_ids,
- int32_scaled_key);
- Node* sig_match = gasm_->Word32Equal(loaded_sig, expected_sig_id);
-
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ wasm::ValueType table_type = env_->module->tables[table_index].type;
+
+ bool needs_type_check = !wasm::EquivalentTypes(
+ table_type.AsNonNull(), wasm::ValueType::Ref(sig_index), env_->module,
+ env_->module);
+ bool needs_null_check = table_type.is_nullable();
+
+ // Skip check if table type matches declared signature.
+ if (needs_type_check) {
+ Node* isorecursive_canonical_types =
+ LOAD_INSTANCE_FIELD(IsorecursiveCanonicalTypes, MachineType::Pointer());
+ Node* expected_sig_id = gasm_->LoadImmutable(
+ MachineType::Uint32(), isorecursive_canonical_types,
+ gasm_->IntPtrConstant(sig_index * kInt32Size));
+
+ Node* int32_scaled_key = gasm_->BuildChangeUint32ToUintPtr(
+ gasm_->Word32Shl(key, Int32Constant(2)));
+ Node* loaded_sig = gasm_->LoadFromObject(MachineType::Int32(), ift_sig_ids,
+ int32_scaled_key);
+ Node* sig_match = gasm_->Word32Equal(loaded_sig, expected_sig_id);
+
+ if (enabled_features_.has_gc() &&
+ !env_->module->types[sig_index].is_final) {
+ // Do a full subtyping check.
+ auto end_label = gasm_->MakeLabel();
+ gasm_->GotoIf(sig_match, &end_label);
+
+ // Trap on null element.
+ if (needs_null_check) {
+ TrapIfTrue(wasm::kTrapFuncSigMismatch,
+ gasm_->Word32Equal(loaded_sig, Int32Constant(-1)), position);
+ }
+
+ Node* formal_rtt = RttCanon(sig_index);
+ int rtt_depth = wasm::GetSubtypingDepth(env_->module, sig_index);
+ DCHECK_GE(rtt_depth, 0);
+
+ // Since we have the canonical index of the real rtt, we have to load it
+ // from the isolate rtt-array (which is canonically indexed). Since this
+ // reference is weak, we have to promote it to a strong reference.
+ // Note: The reference cannot have been cleared: Since the loaded_sig
+ // corresponds to a function of the same canonical type, that function
+ // will have kept the type alive.
+ Node* rtts = LOAD_MUTABLE_ROOT(WasmCanonicalRtts, wasm_canonical_rtts);
+ Node* real_rtt =
+ gasm_->WordAnd(gasm_->LoadWeakArrayListElement(rtts, loaded_sig),
+ gasm_->IntPtrConstant(~kWeakHeapObjectMask));
+ Node* type_info = gasm_->LoadWasmTypeInfo(real_rtt);
+
+ // If the depth of the rtt is known to be less than the minimum supertype
+ // array length, we can access the supertype without bounds-checking the
+ // supertype array.
+ if (static_cast<uint32_t>(rtt_depth) >=
+ wasm::kMinimumSupertypeArraySize) {
+ Node* supertypes_length =
+ gasm_->BuildChangeSmiToIntPtr(gasm_->LoadImmutableFromObject(
+ MachineType::TaggedSigned(), type_info,
+ wasm::ObjectAccess::ToTagged(
+ WasmTypeInfo::kSupertypesLengthOffset)));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch,
+ gasm_->UintLessThan(gasm_->IntPtrConstant(rtt_depth),
+ supertypes_length),
+ position);
+ }
+
+ Node* maybe_match = gasm_->LoadImmutableFromObject(
+ MachineType::TaggedPointer(), type_info,
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
+ kTaggedSize * rtt_depth));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch,
+ gasm_->TaggedEqual(maybe_match, formal_rtt), position);
+ gasm_->Goto(&end_label);
+
+ gasm_->Bind(&end_label);
+ } else {
+ // In absence of subtyping, we just need to check for type equality.
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ }
+ } else if (needs_null_check) {
+ Node* int32_scaled_key = gasm_->BuildChangeUint32ToUintPtr(
+ gasm_->Word32Shl(key, Int32Constant(2)));
+ Node* loaded_sig = gasm_->LoadFromObject(MachineType::Int32(), ift_sig_ids,
+ int32_scaled_key);
+ TrapIfTrue(wasm::kTrapFuncSigMismatch,
+ gasm_->Word32Equal(loaded_sig, Int32Constant(-1)), position);
+ }
Node* key_intptr = gasm_->BuildChangeUint32ToUintPtr(key);
@@ -2873,6 +2999,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
args[0] = target;
+ const wasm::FunctionSig* sig = env_->module->signature(sig_index);
+
switch (continuation) {
case kCallContinues:
return BuildWasmCall(sig, args, rets, position, target_instance);
@@ -2881,39 +3009,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
}
}
-Node* WasmGraphBuilder::BuildLoadExternalPointerFromObject(
- Node* object, int offset, ExternalPointerTag tag) {
-#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- DCHECK(!IsSharedExternalPointerType(tag));
- Node* external_pointer = gasm_->LoadFromObject(
- MachineType::Uint32(), object, wasm::ObjectAccess::ToTagged(offset));
- static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2);
- Node* shift_amount = gasm_->Int32Constant(kExternalPointerIndexShift -
- kSystemPointerSizeLog2);
- Node* scaled_index = gasm_->Word32Shr(external_pointer, shift_amount);
- Node* isolate_root = BuildLoadIsolateRoot();
- Node* table =
- gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
- IsolateData::external_pointer_table_offset() +
- Internals::kExternalPointerTableBufferOffset);
- Node* decoded_ptr =
- gasm_->Load(MachineType::Pointer(), table, scaled_index);
- return gasm_->WordAnd(decoded_ptr, gasm_->IntPtrConstant(~tag));
- }
-#endif
- return gasm_->LoadFromObject(MachineType::Pointer(), object,
- wasm::ObjectAccess::ToTagged(offset));
-}
-
Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
Node* function) {
Node* internal = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmExportedFunctionData::kInternalOffset));
- return BuildLoadExternalPointerFromObject(
+ return gasm_->BuildLoadExternalPointerFromObject(
internal, WasmInternalFunction::kCallTargetOffset,
- kWasmInternalFunctionCallTargetTag);
+ kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot());
}
// TODO(9495): Support CAPI function refs.
@@ -2923,8 +3026,10 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
CheckForNull null_check,
IsReturnCall continuation,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) {
- args[0] = AssertNotNull(args[0], position);
+ if (null_check == kWithNullCheck &&
+ null_check_strategy_ == NullCheckStrategy::kExplicitNullChecks) {
+ args[0] =
+ AssertNotNull(args[0], wasm::kWasmFuncRef /* good enough */, position);
}
Node* function = args[0];
@@ -2932,13 +3037,21 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
auto load_target = gasm_->MakeLabel();
auto end_label = gasm_->MakeLabel(MachineType::PointerRepresentation());
- Node* ref_node = gasm_->LoadImmutableFromObject(
- MachineType::TaggedPointer(), function,
- wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
-
- Node* target = BuildLoadExternalPointerFromObject(
+ Node* ref_node =
+ null_check == kWithNullCheck &&
+ null_check_strategy_ == NullCheckStrategy::kTrapHandler
+ ? gasm_->LoadTrapOnNull(
+ MachineType::TaggedPointer(), function,
+ gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kRefOffset)))
+ : gasm_->LoadImmutableFromObject(
+ MachineType::TaggedPointer(), function,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
+ SetSourcePosition(ref_node, position);
+
+ Node* target = gasm_->BuildLoadExternalPointerFromObject(
function, WasmInternalFunction::kCallTargetOffset,
- kWasmInternalFunctionCallTargetTag);
+ kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot());
Node* is_null_target = gasm_->WordEqual(target, gasm_->IntPtrConstant(0));
gasm_->GotoIfNot(is_null_target, &end_label, target);
{
@@ -2947,17 +3060,9 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
Node* wrapper_code = gasm_->LoadImmutableFromObject(
MachineType::TaggedPointer(), function,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset));
- Node* call_target;
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- call_target =
- gasm_->LoadFromObject(MachineType::Pointer(), wrapper_code,
- wasm::ObjectAccess::ToTagged(
- CodeDataContainer::kCodeEntryPointOffset));
- } else {
- call_target = gasm_->IntAdd(
- wrapper_code, gasm_->IntPtrConstant(
- wasm::ObjectAccess::ToTagged(Code::kHeaderSize)));
- }
+ Node* call_target = gasm_->LoadFromObject(
+ MachineType::Pointer(), wrapper_code,
+ wasm::ObjectAccess::ToTagged(Code::kCodeEntryPointOffset));
gasm_->Goto(&end_label, call_target);
}
@@ -2997,7 +3102,7 @@ void WasmGraphBuilder::CompareToInternalFunctionAtIndex(Node* func_ref,
Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
- WasmGraphBuilder::CheckForNull null_check,
+ CheckForNull null_check,
wasm::WasmCodePosition position) {
return BuildCallRef(sig, args, rets, null_check, IsReturnCall::kCallContinues,
position);
@@ -3005,7 +3110,7 @@ Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* sig,
Node* WasmGraphBuilder::ReturnCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
- WasmGraphBuilder::CheckForNull null_check,
+ CheckForNull null_check,
wasm::WasmCodePosition position) {
return BuildCallRef(sig, args, {}, null_check, IsReturnCall::kReturnCall,
position);
@@ -3038,9 +3143,9 @@ Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
kReturnCall);
}
-void WasmGraphBuilder::BrOnNull(Node* ref_object, Node** null_node,
- Node** non_null_node) {
- BranchExpectFalse(IsNull(ref_object), null_node, non_null_node);
+void WasmGraphBuilder::BrOnNull(Node* ref_object, wasm::ValueType type,
+ Node** null_node, Node** non_null_node) {
+ BranchExpectFalse(IsNull(ref_object, type), null_node, non_null_node);
}
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -3112,7 +3217,7 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
}
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
- INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
+ INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation());
#undef INTRODUCE_PHI
}
@@ -3220,8 +3325,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
// Isolate independent. At the moment this is only done for CEntryStub(1).
Node* isolate_root = BuildLoadIsolateRoot();
DCHECK_EQ(1, fun->result_size);
- auto centry_id =
- Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
+ auto centry_id = Builtin::kCEntry_Return1_ArgvOnStack_NoBuiltinExit;
int builtin_slot_offset = IsolateData::BuiltinSlotOffset(centry_id);
Node* centry_stub = gasm_->LoadFromObject(MachineType::Pointer(),
isolate_root, builtin_slot_offset);
@@ -3396,9 +3500,9 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// machine.
if (offset > std::numeric_limits<uintptr_t>::max() ||
!base::IsInBounds<uintptr_t>(offset, access_size,
- env_->max_memory_size)) {
+ env_->module->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
- TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
+ Trap(wasm::TrapReason::kTrapMemOutOfBounds, position);
return {gasm_->UintPtrConstant(0), kOutOfBounds};
}
@@ -3434,8 +3538,8 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uintptr_t end_offset = offset + access_size - 1u;
UintPtrMatcher match(index);
- if (match.HasResolvedValue() && end_offset <= env_->min_memory_size &&
- match.ResolvedValue() < env_->min_memory_size - end_offset) {
+ if (match.HasResolvedValue() && end_offset <= env_->module->min_memory_size &&
+ match.ResolvedValue() < env_->module->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
return {index, kInBounds};
@@ -3448,7 +3552,7 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
Node* mem_size = instance_cache_->mem_size;
Node* end_offset_node = mcgraph_->UintPtrConstant(end_offset);
- if (end_offset > env_->min_memory_size) {
+ if (end_offset > env_->module->min_memory_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the dynamic memory size.
Node* cond = gasm_->UintLessThan(end_offset_node, mem_size);
@@ -3788,15 +3892,23 @@ void WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
gasm_->StoreUnaligned(UnalignedStoreRepresentation{mem_rep},
MemBuffer(capped_offset), index, val);
break;
- case MemoryAccessKind::kProtected:
- SetSourcePosition(
- gasm_->ProtectedStore(mem_rep, MemBuffer(capped_offset), index, val),
- position);
+ case MemoryAccessKind::kProtected: {
+ Node* store =
+ gasm_->ProtectedStore(mem_rep, MemBuffer(capped_offset), index, val);
+ SetSourcePosition(store, position);
+ if (mem_rep == MachineRepresentation::kSimd128) {
+ graph()->RecordSimdStore(store);
+ }
break;
- case MemoryAccessKind::kNormal:
- gasm_->Store(StoreRepresentation{mem_rep, kNoWriteBarrier},
- MemBuffer(capped_offset), index, val);
+ }
+ case MemoryAccessKind::kNormal: {
+ Node* store = gasm_->Store(StoreRepresentation{mem_rep, kNoWriteBarrier},
+ MemBuffer(capped_offset), index, val);
+ if (mem_rep == MachineRepresentation::kSimd128) {
+ graph()->RecordSimdStore(store);
+ }
break;
+ }
}
if (v8_flags.trace_wasm_memory) {
@@ -3955,25 +4067,15 @@ Signature<MachineRepresentation>* CreateMachineSignature(
builder.AddParam(param.machine_representation());
}
}
- return builder.Build();
+ return builder.Get();
}
} // namespace
-void WasmGraphBuilder::AddInt64LoweringReplacement(
- CallDescriptor* original, CallDescriptor* replacement) {
- if (!lowering_special_case_) {
- lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
- }
- lowering_special_case_->replacements.insert({original, replacement});
-}
-
void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
- gasm_->simplified(), mcgraph()->zone(),
- env_ != nullptr ? env_->module : nullptr, sig,
- std::move(lowering_special_case_));
+ gasm_->simplified(), mcgraph()->zone(), sig);
r.LowerGraph();
}
@@ -3981,23 +4083,6 @@ void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
LowerInt64(CreateMachineSignature(mcgraph()->zone(), sig_, origin));
}
-CallDescriptor* WasmGraphBuilder::GetI64ToBigIntCallDescriptor(
- StubCallMode stub_mode) {
- CallDescriptor** i64_to_bigint_descriptor =
- stub_mode == StubCallMode::kCallCodeObject
- ? &i64_to_bigint_stub_descriptor_
- : &i64_to_bigint_builtin_descriptor_;
- if (*i64_to_bigint_descriptor) return *i64_to_bigint_descriptor;
-
- *i64_to_bigint_descriptor =
- GetBuiltinCallDescriptor(Builtin::kI64ToBigInt, zone_, stub_mode);
-
- AddInt64LoweringReplacement(
- *i64_to_bigint_descriptor,
- GetBuiltinCallDescriptor(Builtin::kI32PairToBigInt, zone_, stub_mode));
- return *i64_to_bigint_descriptor;
-}
-
Node* WasmGraphBuilder::BuildChangeInt64ToBigInt(Node* input,
StubCallMode stub_mode) {
Node* target;
@@ -4017,14 +4102,18 @@ Node* WasmGraphBuilder::BuildChangeInt64ToBigInt(Node* input,
wasm::WasmCode::kI32PairToBigInt, RelocInfo::WASM_STUB_CALL)
: gasm_->GetBuiltinPointerTarget(Builtin::kI32PairToBigInt);
}
- return gasm_->Call(GetI64ToBigIntCallDescriptor(stub_mode), target, input);
+ CallDescriptor* descriptor =
+ wasm::GetWasmEngine()->call_descriptors()->GetI64ToBigIntDescriptor(
+ stub_mode);
+ return gasm_->Call(descriptor, target, input);
}
void WasmGraphBuilder::SetSourcePosition(Node* node,
wasm::WasmCodePosition position) {
DCHECK_NE(position, wasm::kNoCodePosition);
if (source_position_table_) {
- source_position_table_->SetSourcePosition(node, SourcePosition(position));
+ source_position_table_->SetSourcePosition(
+ node, SourcePosition(position, inlining_id_));
}
}
@@ -4916,7 +5005,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
std::tie(index, bounds_check_result) =
CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0], offset,
position, enforce_bounds_check);
- // MemoryAccessKind::kUnalligned is impossible due to explicit aligment check.
+ // MemoryAccessKind::kUnaligned is impossible due to explicit aligment check.
MemoryAccessKind access_kind =
bounds_check_result == WasmGraphBuilder::kTrapHandler
? MemoryAccessKind::kProtected
@@ -5160,14 +5249,14 @@ void WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
// validation.
DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
- Node* dropped_elem_segments =
- LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::TaggedPointer());
- auto store_rep =
- StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier);
- gasm_->Store(store_rep, dropped_elem_segments,
- wasm::ObjectAccess::ElementOffsetInTaggedFixedUInt8Array(
- elem_segment_index),
- Int32Constant(1));
+ Node* elem_segments =
+ LOAD_INSTANCE_FIELD(ElementSegments, MachineType::TaggedPointer());
+ auto store_rep = StoreRepresentation(MachineRepresentation::kTaggedPointer,
+ kFullWriteBarrier);
+ gasm_->Store(
+ store_rep, elem_segments,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(elem_segment_index),
+ LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
}
void WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
@@ -5223,7 +5312,7 @@ Node* WasmGraphBuilder::DefaultValue(wasm::ValueType type) {
case wasm::kS128:
return S128Zero();
case wasm::kRefNull:
- return RefNull();
+ return RefNull(type);
case wasm::kRtt:
case wasm::kVoid:
case wasm::kBottom:
@@ -5243,7 +5332,7 @@ Node* WasmGraphBuilder::StructNew(uint32_t struct_index,
wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset),
LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
for (uint32_t i = 0; i < type->field_count(); i++) {
- gasm_->StoreStructField(s, type, i, fields[i]);
+ gasm_->StructSet(s, fields[i], type, i, kWithoutNullCheck);
}
// If this assert fails then initialization of padding field might be
// necessary.
@@ -5281,107 +5370,13 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
ObjectAccess(MachineType::TaggedPointer(), kNoWriteBarrier), a,
wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset),
LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
- gasm_->InitializeImmutableInObject(
- ObjectAccess(MachineType::Uint32(), kNoWriteBarrier), a,
- wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset), length);
+ gasm_->ArrayInitializeLength(a, length);
- // Initialize the array. Use an external function for large arrays with
- // null/number initializer. Use a loop for small arrays and reference arrays
- // with a non-null initial value.
- auto done = gasm_->MakeLabel();
- // TODO(manoskouk): If the loop is ever removed here, we have to update
- // ArrayNew() in graph-builder-interface.cc to not mark the current
- // loop as non-innermost.
- auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
- Node* start_offset = gasm_->IntPtrConstant(
- wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
-
- if ((initial_value == nullptr && (element_type.kind() == wasm::kRefNull ||
- element_type.kind() == wasm::kS128)) ||
- (element_type.is_numeric() && element_type != wasm::kWasmS128)) {
- constexpr uint32_t kArrayNewMinimumSizeForMemSet = 16;
- gasm_->GotoIf(gasm_->Uint32LessThan(
- length, Int32Constant(kArrayNewMinimumSizeForMemSet)),
- &loop, BranchHint::kNone, start_offset);
- Node* function = gasm_->ExternalConstant(
- ExternalReference::wasm_array_fill_with_number_or_null());
-
- Node* initial_value_i64 = nullptr;
- if (initial_value == nullptr && element_type.is_numeric()) {
- initial_value_i64 = Int64Constant(0);
- } else {
- switch (element_type.kind()) {
- case wasm::kI32:
- case wasm::kI8:
- case wasm::kI16:
- initial_value_i64 = graph()->NewNode(
- mcgraph()->machine()->ChangeInt32ToInt64(), initial_value);
- break;
- case wasm::kI64:
- initial_value_i64 = initial_value;
- break;
- case wasm::kF32:
- initial_value_i64 = graph()->NewNode(
- mcgraph()->machine()->ChangeInt32ToInt64(),
- graph()->NewNode(mcgraph()->machine()->BitcastFloat32ToInt32(),
- initial_value));
- break;
- case wasm::kF64:
- initial_value_i64 = graph()->NewNode(
- mcgraph()->machine()->BitcastFloat64ToInt64(), initial_value);
- break;
- case wasm::kRefNull:
- initial_value_i64 =
- initial_value == nullptr ? gasm_->Null() : initial_value;
- if (kSystemPointerSize == 4) {
- initial_value_i64 = graph()->NewNode(
- mcgraph()->machine()->ChangeInt32ToInt64(), initial_value_i64);
- }
- break;
- case wasm::kS128:
- case wasm::kRtt:
- case wasm::kRef:
- case wasm::kVoid:
- case wasm::kBottom:
- UNREACHABLE();
- }
- }
+ ArrayFillImpl(
+ a, gasm_->Int32Constant(0),
+ initial_value != nullptr ? initial_value : DefaultValue(element_type),
+ length, type, false);
- Node* stack_slot = StoreArgsInStackSlot(
- {{MachineRepresentation::kWord64, initial_value_i64}});
-
- MachineType arg_types[]{MachineType::TaggedPointer(), MachineType::Uint32(),
- MachineType::Uint32(), MachineType::Pointer()};
- MachineSignature sig(0, 4, arg_types);
- BuildCCall(&sig, function, a, length,
- Int32Constant(element_type.raw_bit_field()), stack_slot);
- gasm_->Goto(&done);
- } else {
- gasm_->Goto(&loop, start_offset);
- }
- gasm_->Bind(&loop);
- auto object_access = ObjectAccessForGCStores(element_type);
- if (initial_value == nullptr) {
- initial_value = DefaultValue(element_type);
- object_access.write_barrier_kind = kNoWriteBarrier;
- }
- Node* element_size = gasm_->IntPtrConstant(element_type.value_kind_size());
- Node* end_offset =
- gasm_->IntAdd(start_offset, gasm_->IntMul(element_size, length));
- {
- Node* offset = loop.PhiAt(0);
- Node* check = gasm_->UintLessThan(offset, end_offset);
- gasm_->GotoIfNot(check, &done);
- if (type->mutability()) {
- gasm_->StoreToObject(object_access, a, offset, initial_value);
- } else {
- gasm_->InitializeImmutableInObject(object_access, a, offset,
- initial_value);
- }
- offset = gasm_->IntAdd(offset, element_size);
- gasm_->Goto(&loop, offset);
- }
- gasm_->Bind(&done);
return a;
}
@@ -5397,20 +5392,11 @@ Node* WasmGraphBuilder::ArrayNewFixed(const wasm::ArrayType* type, Node* rtt,
ObjectAccess(MachineType::TaggedPointer(), kNoWriteBarrier), array,
wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset),
LOAD_ROOT(EmptyFixedArray, empty_fixed_array));
- gasm_->InitializeImmutableInObject(
- ObjectAccess(MachineType::Uint32(), kNoWriteBarrier), array,
- wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset),
- Int32Constant(static_cast<int>(elements.size())));
+ gasm_->ArrayInitializeLength(
+ array, SetType(Int32Constant(static_cast<int>(elements.size())),
+ wasm::kWasmI32));
for (int i = 0; i < static_cast<int>(elements.size()); i++) {
- Node* offset =
- gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
- if (type->mutability()) {
- gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
- elements[i]);
- } else {
- gasm_->InitializeImmutableInObject(ObjectAccessForGCStores(element_type),
- array, offset, elements[i]);
- }
+ gasm_->ArraySet(array, gasm_->Int32Constant(i), elements[i], type);
}
return array;
}
@@ -5425,7 +5411,8 @@ Node* WasmGraphBuilder::ArrayNewSegment(const wasm::ArrayType* type,
}
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
- return graph()->NewNode(gasm_->simplified()->RttCanon(type_index));
+ return graph()->NewNode(gasm_->simplified()->RttCanon(type_index),
+ GetInstance());
}
WasmGraphBuilder::Callbacks WasmGraphBuilder::TestCallbacks(
@@ -5493,33 +5480,17 @@ WasmGraphBuilder::Callbacks WasmGraphBuilder::BranchCallbacks(
}};
}
-void WasmGraphBuilder::DataCheck(Node* object, bool object_can_be_null,
- Callbacks callbacks, bool null_succeeds) {
- // TODO(7748): Only used for backwards compatibility in combination with
- // v8_flags.wasm_gc_structref_as_dataref. Remove.
- if (object_can_be_null) {
- if (null_succeeds) {
- callbacks.succeed_if(IsNull(object), BranchHint::kFalse);
- } else {
- callbacks.fail_if(IsNull(object), BranchHint::kFalse);
- }
- }
- callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
- Node* map = gasm_->LoadMap(object);
- callbacks.fail_if_not(gasm_->IsDataRefMap(map), BranchHint::kTrue);
-}
-
void WasmGraphBuilder::EqCheck(Node* object, bool object_can_be_null,
Callbacks callbacks, bool null_succeeds) {
- // TODO(7748): Is the extra null check actually beneficial for performance?
if (object_can_be_null) {
if (null_succeeds) {
- callbacks.succeed_if(IsNull(object), BranchHint::kFalse);
+ callbacks.succeed_if(IsNull(object, wasm::kWasmAnyRef),
+ BranchHint::kFalse);
} else {
- callbacks.fail_if(IsNull(object), BranchHint::kFalse);
+ // The {IsDataRefMap} check below will fail for {null} anyway.
}
}
- callbacks.succeed_if(gasm_->IsI31(object), BranchHint::kFalse);
+ callbacks.succeed_if(gasm_->IsSmi(object), BranchHint::kFalse);
Node* map = gasm_->LoadMap(object);
callbacks.fail_if_not(gasm_->IsDataRefMap(map), BranchHint::kTrue);
}
@@ -5531,12 +5502,13 @@ void WasmGraphBuilder::ManagedObjectInstanceCheck(Node* object,
bool null_succeeds) {
if (object_can_be_null) {
if (null_succeeds) {
- callbacks.succeed_if(IsNull(object), BranchHint::kFalse);
+ callbacks.succeed_if(IsNull(object, wasm::kWasmAnyRef),
+ BranchHint::kFalse);
} else {
- callbacks.fail_if(IsNull(object), BranchHint::kFalse);
+ // The {IsDataRefMap} check below will fail for {null} anyway.
}
}
- callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
+ callbacks.fail_if(gasm_->IsSmi(object), BranchHint::kFalse);
callbacks.fail_if_not(gasm_->HasInstanceType(object, instance_type),
BranchHint::kTrue);
}
@@ -5586,9 +5558,7 @@ Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
}
Node* WasmGraphBuilder::RefTestAbstract(Node* object, wasm::HeapType type,
- bool null_succeeds) {
- bool is_nullable =
- compiler::NodeProperties::GetType(object).AsWasm().type.is_nullable();
+ bool is_nullable, bool null_succeeds) {
switch (type.representation()) {
case wasm::HeapType::kEq:
return RefIsEq(object, is_nullable, null_succeeds);
@@ -5598,6 +5568,11 @@ Node* WasmGraphBuilder::RefTestAbstract(Node* object, wasm::HeapType type,
return RefIsStruct(object, is_nullable, null_succeeds);
case wasm::HeapType::kArray:
return RefIsArray(object, is_nullable, null_succeeds);
+ case wasm::HeapType::kNone:
+ case wasm::HeapType::kNoExtern:
+ case wasm::HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ return IsNull(object, wasm::ValueType::RefNull(type));
case wasm::HeapType::kAny:
// Any may never need a cast as it is either implicitly convertible or
// never convertible for any given type.
@@ -5609,14 +5584,14 @@ Node* WasmGraphBuilder::RefTestAbstract(Node* object, wasm::HeapType type,
Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
WasmTypeCheckConfig config,
wasm::WasmCodePosition position) {
- return gasm_->WasmTypeCast(object, rtt, config);
+ Node* cast = gasm_->WasmTypeCast(object, rtt, config);
+ SetSourcePosition(cast, position);
+ return cast;
}
Node* WasmGraphBuilder::RefCastAbstract(Node* object, wasm::HeapType type,
wasm::WasmCodePosition position,
- bool null_succeeds) {
- bool is_nullable =
- compiler::NodeProperties::GetType(object).AsWasm().type.is_nullable();
+ bool is_nullable, bool null_succeeds) {
switch (type.representation()) {
case wasm::HeapType::kEq:
return RefAsEq(object, is_nullable, position, null_succeeds);
@@ -5626,6 +5601,14 @@ Node* WasmGraphBuilder::RefCastAbstract(Node* object, wasm::HeapType type,
return RefAsStruct(object, is_nullable, position, null_succeeds);
case wasm::HeapType::kArray:
return RefAsArray(object, is_nullable, position, null_succeeds);
+ case wasm::HeapType::kNone:
+ case wasm::HeapType::kNoExtern:
+ case wasm::HeapType::kNoFunc: {
+ DCHECK(null_succeeds);
+ TrapIfFalse(wasm::kTrapIllegalCast,
+ IsNull(object, wasm::ValueType::RefNull(type)), position);
+ return object;
+ }
case wasm::HeapType::kAny:
// Any may never need a cast as it is either implicitly convertible or
// never convertible for any given type.
@@ -5669,15 +5652,32 @@ Node* WasmGraphBuilder::RefAsEq(Node* object, bool object_can_be_null,
return object;
}
+void WasmGraphBuilder::BrOnEq(Node* object, Node* /*rtt*/,
+ WasmTypeCheckConfig config, Node** match_control,
+ Node** match_effect, Node** no_match_control,
+ Node** no_match_effect) {
+ BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
+ [=](Callbacks callbacks) -> void {
+ if (config.from.is_nullable()) {
+ if (config.to.is_nullable()) {
+ callbacks.succeed_if(gasm_->IsNull(object, config.from),
+ BranchHint::kFalse);
+ } else {
+ // The {IsDataRefMap} check below will fail for {null}.
+ }
+ }
+ callbacks.succeed_if(gasm_->IsSmi(object), BranchHint::kFalse);
+ Node* map = gasm_->LoadMap(object);
+ callbacks.fail_if_not(gasm_->IsDataRefMap(map),
+ BranchHint::kTrue);
+ });
+}
+
Node* WasmGraphBuilder::RefIsStruct(Node* object, bool object_can_be_null,
bool null_succeeds) {
auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
- if (!v8_flags.wasm_gc_structref_as_dataref) {
- ManagedObjectInstanceCheck(object, object_can_be_null, WASM_STRUCT_TYPE,
- TestCallbacks(&done), null_succeeds);
- } else {
- DataCheck(object, object_can_be_null, TestCallbacks(&done), null_succeeds);
- }
+ ManagedObjectInstanceCheck(object, object_can_be_null, WASM_STRUCT_TYPE,
+ TestCallbacks(&done), null_succeeds);
gasm_->Goto(&done, Int32Constant(1));
gasm_->Bind(&done);
return done.PhiAt(0);
@@ -5687,13 +5687,8 @@ Node* WasmGraphBuilder::RefAsStruct(Node* object, bool object_can_be_null,
wasm::WasmCodePosition position,
bool null_succeeds) {
auto done = gasm_->MakeLabel();
- if (!v8_flags.wasm_gc_structref_as_dataref) {
- ManagedObjectInstanceCheck(object, object_can_be_null, WASM_STRUCT_TYPE,
- CastCallbacks(&done, position), null_succeeds);
- } else {
- DataCheck(object, object_can_be_null, CastCallbacks(&done, position),
- null_succeeds);
- }
+ ManagedObjectInstanceCheck(object, object_can_be_null, WASM_STRUCT_TYPE,
+ CastCallbacks(&done, position), null_succeeds);
gasm_->Goto(&done);
gasm_->Bind(&done);
return object;
@@ -5704,17 +5699,12 @@ void WasmGraphBuilder::BrOnStruct(Node* object, Node* /*rtt*/,
Node** match_control, Node** match_effect,
Node** no_match_control,
Node** no_match_effect) {
- bool null_succeeds = false;
+ bool null_succeeds = config.to.is_nullable();
BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
[=](Callbacks callbacks) -> void {
- if (!v8_flags.wasm_gc_structref_as_dataref) {
- return ManagedObjectInstanceCheck(
- object, config.from.is_nullable(), WASM_STRUCT_TYPE,
- callbacks, null_succeeds);
- } else {
- return DataCheck(object, config.from.is_nullable(), callbacks,
- null_succeeds);
- }
+ return ManagedObjectInstanceCheck(
+ object, config.from.is_nullable(), WASM_STRUCT_TYPE,
+ callbacks, null_succeeds);
});
}
@@ -5744,7 +5734,7 @@ void WasmGraphBuilder::BrOnArray(Node* object, Node* /*rtt*/,
Node** match_control, Node** match_effect,
Node** no_match_control,
Node** no_match_effect) {
- bool null_succeeds = false;
+ bool null_succeeds = config.to.is_nullable();
BrOnCastAbs(match_control, match_effect, no_match_control, no_match_effect,
[=](Callbacks callbacks) -> void {
return ManagedObjectInstanceCheck(
@@ -5756,39 +5746,46 @@ void WasmGraphBuilder::BrOnArray(Node* object, Node* /*rtt*/,
Node* WasmGraphBuilder::RefIsI31(Node* object, bool null_succeeds) {
if (null_succeeds) {
auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
- gasm_->GotoIf(gasm_->IsI31(object), &done, BranchHint::kTrue,
+ gasm_->GotoIf(gasm_->IsSmi(object), &done, BranchHint::kTrue,
Int32Constant(1));
- gasm_->Goto(&done, gasm_->IsNull(object));
+ gasm_->Goto(&done, gasm_->IsNull(object, wasm::kWasmAnyRef));
gasm_->Bind(&done);
return done.PhiAt(0);
}
- return gasm_->IsI31(object);
+ return gasm_->IsSmi(object);
}
Node* WasmGraphBuilder::RefAsI31(Node* object, wasm::WasmCodePosition position,
bool null_succeeds) {
if (null_succeeds) {
auto done = gasm_->MakeLabel();
- gasm_->GotoIf(gasm_->IsNull(object), &done);
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
+ gasm_->GotoIf(gasm_->IsNull(object, wasm::kWasmAnyRef), &done);
+ TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsSmi(object), position);
gasm_->Goto(&done);
gasm_->Bind(&done);
return object;
}
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
+ TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsSmi(object), position);
return object;
}
void WasmGraphBuilder::BrOnI31(Node* object, Node* /* rtt */,
- WasmTypeCheckConfig /* config */,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
+ WasmTypeCheckConfig config, Node** match_control,
+ Node** match_effect, Node** no_match_control,
Node** no_match_effect) {
- gasm_->Branch(gasm_->IsI31(object), match_control, no_match_control,
- BranchHint::kTrue);
- SetControl(*no_match_control);
- *match_effect = effect();
- *no_match_effect = effect();
+ BrOnCastAbs(
+ match_control, match_effect, no_match_control, no_match_effect,
+ [=](Callbacks callbacks) -> void {
+ if (config.from.is_nullable()) {
+ if (config.to.is_nullable()) {
+ callbacks.succeed_if(gasm_->IsNull(object, config.from),
+ BranchHint::kFalse);
+ } else {
+ // Covered by the {IsSmi} check below.
+ }
+ }
+ callbacks.fail_if_not(gasm_->IsSmi(object), BranchHint::kTrue);
+ });
}
Node* WasmGraphBuilder::TypeGuard(Node* value, wasm::ValueType type) {
@@ -5803,18 +5800,10 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object,
uint32_t field_index, CheckForNull null_check,
bool is_signed,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) {
- struct_object = AssertNotNull(struct_object, position);
- }
- // It is not enough to invoke ValueType::machine_type(), because the
- // signedness has to be determined by {is_signed}.
- MachineType machine_type = MachineType::TypeForRepresentation(
- struct_type->field(field_index).machine_representation(), is_signed);
- Node* offset = gasm_->FieldOffset(struct_type, field_index);
- return struct_type->mutability(field_index)
- ? gasm_->LoadFromObject(machine_type, struct_object, offset)
- : gasm_->LoadImmutableFromObject(machine_type, struct_object,
- offset);
+ Node* result = gasm_->StructGet(struct_object, struct_type, field_index,
+ is_signed, null_check);
+ SetSourcePosition(result, position);
+ return result;
}
void WasmGraphBuilder::StructSet(Node* struct_object,
@@ -5822,29 +5811,38 @@ void WasmGraphBuilder::StructSet(Node* struct_object,
uint32_t field_index, Node* field_value,
CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) {
- struct_object = AssertNotNull(struct_object, position);
- }
- gasm_->StoreStructField(struct_object, struct_type, field_index, field_value);
+ gasm_->StructSet(struct_object, field_value, struct_type, field_index,
+ null_check);
+ SetSourcePosition(effect(), position);
}
void WasmGraphBuilder::BoundsCheckArray(Node* array, Node* index,
+ CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (V8_UNLIKELY(v8_flags.experimental_wasm_skip_bounds_checks)) return;
- Node* length = gasm_->LoadWasmArrayLength(array);
- TrapIfFalse(wasm::kTrapArrayOutOfBounds, gasm_->Uint32LessThan(index, length),
- position);
+ if (V8_UNLIKELY(v8_flags.experimental_wasm_skip_bounds_checks)) {
+ if (null_check == kWithNullCheck) {
+ AssertNotNull(array, wasm::kWasmArrayRef, position);
+ }
+ } else {
+ Node* length = gasm_->ArrayLength(array, null_check);
+ SetSourcePosition(length, position);
+ TrapIfFalse(wasm::kTrapArrayOutOfBounds,
+ gasm_->Uint32LessThan(index, length), position);
+ }
}
-void WasmGraphBuilder::BoundsCheckArrayCopy(Node* array, Node* index,
- Node* length,
- wasm::WasmCodePosition position) {
+void WasmGraphBuilder::BoundsCheckArrayWithLength(
+ Node* array, Node* index, Node* length, CheckForNull null_check,
+ wasm::WasmCodePosition position) {
if (V8_UNLIKELY(v8_flags.experimental_wasm_skip_bounds_checks)) return;
- Node* array_length = gasm_->LoadWasmArrayLength(array);
+ Node* array_length = gasm_->ArrayLength(array, null_check);
+ SetSourcePosition(array_length, position);
Node* range_end = gasm_->Int32Add(index, length);
Node* range_valid = gasm_->Word32And(
+ // OOB if (index + length > array.len).
gasm_->Uint32LessThanOrEqual(range_end, array_length),
- gasm_->Uint32LessThanOrEqual(index, range_end)); // No overflow
+ // OOB if (index + length) overflows.
+ gasm_->Uint32LessThanOrEqual(index, range_end));
TrapIfFalse(wasm::kTrapArrayOutOfBounds, range_valid, position);
}
@@ -5852,72 +5850,234 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object,
const wasm::ArrayType* type, Node* index,
CheckForNull null_check, bool is_signed,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) {
- array_object = AssertNotNull(array_object, position);
- }
- BoundsCheckArray(array_object, index, position);
- MachineType machine_type = MachineType::TypeForRepresentation(
- type->element_type().machine_representation(), is_signed);
- Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
- return type->mutability()
- ? gasm_->LoadFromObject(machine_type, array_object, offset)
- : gasm_->LoadImmutableFromObject(machine_type, array_object,
- offset);
+ BoundsCheckArray(array_object, index, null_check, position);
+ return gasm_->ArrayGet(array_object, index, type, is_signed);
}
void WasmGraphBuilder::ArraySet(Node* array_object, const wasm::ArrayType* type,
Node* index, Node* value,
CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) {
- array_object = AssertNotNull(array_object, position);
- }
- BoundsCheckArray(array_object, index, position);
- Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
- gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()),
- array_object, offset, value);
+ BoundsCheckArray(array_object, index, null_check, position);
+ gasm_->ArraySet(array_object, index, value, type);
}
Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) {
- array_object = AssertNotNull(array_object, position);
- }
- return gasm_->LoadWasmArrayLength(array_object);
+ Node* result = gasm_->ArrayLength(array_object, null_check);
+ SetSourcePosition(result, position);
+ return result;
}
-// TODO(7748): Add an option to copy in a loop for small array sizes. To find
-// the length limit, run test/mjsunit/wasm/array-copy-benchmark.js.
void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
CheckForNull dst_null_check, Node* src_array,
Node* src_index, CheckForNull src_null_check,
Node* length,
+ const wasm::ArrayType* array_type,
wasm::WasmCodePosition position) {
- if (dst_null_check == kWithNullCheck) {
- dst_array = AssertNotNull(dst_array, position);
+ BoundsCheckArrayWithLength(dst_array, dst_index, length, dst_null_check,
+ position);
+ BoundsCheckArrayWithLength(src_array, src_index, length, src_null_check,
+ position);
+
+ auto end = gasm_->MakeLabel();
+
+ gasm_->GotoIf(gasm_->Word32Equal(length, Int32Constant(0)), &end);
+
+ auto builtin = gasm_->MakeLabel();
+
+ // Values determined by test/mjsunit/wasm/array-copy-benchmark.js on x64.
+ int array_copy_max_loop_length;
+ switch (array_type->element_type().kind()) {
+ case wasm::kI32:
+ case wasm::kI64:
+ case wasm::kI8:
+ case wasm::kI16:
+ array_copy_max_loop_length = 20;
+ break;
+ case wasm::kF32:
+ case wasm::kF64:
+ array_copy_max_loop_length = 35;
+ break;
+ case wasm::kS128:
+ array_copy_max_loop_length = 100;
+ break;
+ case wasm::kRtt:
+ case wasm::kRef:
+ case wasm::kRefNull:
+ array_copy_max_loop_length = 15;
+ break;
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
}
- if (src_null_check == kWithNullCheck) {
- src_array = AssertNotNull(src_array, position);
+
+ gasm_->GotoIf(
+ gasm_->Uint32LessThan(Int32Constant(array_copy_max_loop_length), length),
+ &builtin);
+
+ auto reverse = gasm_->MakeLabel();
+
+ gasm_->GotoIf(gasm_->Uint32LessThan(src_index, dst_index), &reverse);
+
+ Node* src_end_index = gasm_->Int32Sub(gasm_->Int32Add(src_index, length),
+ gasm_->Int32Constant(1));
+ Node* dst_end_index = gasm_->Int32Sub(gasm_->Int32Add(dst_index, length),
+ gasm_->Int32Constant(1));
+
+ {
+ auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32,
+ MachineRepresentation::kWord32);
+
+ gasm_->Goto(&loop, src_index, dst_index);
+ gasm_->Bind(&loop);
+
+ Node* value = gasm_->ArrayGet(src_array, loop.PhiAt(0), array_type, false);
+ gasm_->ArraySet(dst_array, loop.PhiAt(1), value, array_type);
+
+ Node* condition = gasm_->Uint32LessThan(loop.PhiAt(0), src_end_index);
+ gasm_->GotoIfNot(condition, &end);
+ gasm_->Goto(&loop, gasm_->Int32Add(loop.PhiAt(0), Int32Constant(1)),
+ gasm_->Int32Add(loop.PhiAt(1), Int32Constant(1)));
}
- BoundsCheckArrayCopy(dst_array, dst_index, length, position);
- BoundsCheckArrayCopy(src_array, src_index, length, position);
- auto skip = gasm_->MakeLabel();
+ {
+ gasm_->Bind(&reverse);
+ auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32,
+ MachineRepresentation::kWord32);
- gasm_->GotoIf(gasm_->Word32Equal(length, Int32Constant(0)), &skip,
- BranchHint::kFalse);
+ gasm_->Goto(&loop, src_end_index, dst_end_index);
+ gasm_->Bind(&loop);
- Node* function =
- gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
- MachineType arg_types[]{
- MachineType::TaggedPointer(), MachineType::TaggedPointer(),
- MachineType::Uint32(), MachineType::TaggedPointer(),
- MachineType::Uint32(), MachineType::Uint32()};
- MachineSignature sig(0, 6, arg_types);
- BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
- src_index, length);
- gasm_->Goto(&skip);
- gasm_->Bind(&skip);
+ Node* value = gasm_->ArrayGet(src_array, loop.PhiAt(0), array_type, false);
+ gasm_->ArraySet(dst_array, loop.PhiAt(1), value, array_type);
+
+ Node* condition = gasm_->Uint32LessThan(src_index, loop.PhiAt(0));
+ gasm_->GotoIfNot(condition, &end);
+ gasm_->Goto(&loop, gasm_->Int32Sub(loop.PhiAt(0), Int32Constant(1)),
+ gasm_->Int32Sub(loop.PhiAt(1), Int32Constant(1)));
+ }
+
+ {
+ gasm_->Bind(&builtin);
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
+ MachineType arg_types[]{
+ MachineType::TaggedPointer(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::Uint32()};
+ MachineSignature sig(0, 6, arg_types);
+ BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
+ src_index, length);
+ gasm_->Goto(&end);
+ }
+
+ gasm_->Bind(&end);
+}
+
+Node* WasmGraphBuilder::StoreInInt64StackSlot(Node* value,
+ wasm::ValueType type) {
+ Node* value_int64;
+ switch (type.kind()) {
+ case wasm::kI32:
+ case wasm::kI8:
+ case wasm::kI16:
+ value_int64 =
+ graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
+ break;
+ case wasm::kI64:
+ value_int64 = value;
+ break;
+ case wasm::kS128:
+ // We can only get here if {value} is the constant 0.
+ DCHECK_EQ(value->opcode(), IrOpcode::kS128Zero);
+ value_int64 = Int64Constant(0);
+ break;
+ case wasm::kF32:
+ value_int64 = graph()->NewNode(
+ mcgraph()->machine()->ChangeInt32ToInt64(),
+ graph()->NewNode(mcgraph()->machine()->BitcastFloat32ToInt32(),
+ value));
+ break;
+ case wasm::kF64:
+ value_int64 = graph()->NewNode(
+ mcgraph()->machine()->BitcastFloat64ToInt64(), value);
+ break;
+ case wasm::kRefNull:
+ case wasm::kRef:
+ value_int64 = kSystemPointerSize == 4
+ ? graph()->NewNode(
+ mcgraph()->machine()->ChangeInt32ToInt64(), value)
+ : value;
+ break;
+ case wasm::kRtt:
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+
+ return StoreArgsInStackSlot({{MachineRepresentation::kWord64, value_int64}});
+}
+
+void WasmGraphBuilder::ArrayFill(Node* array, Node* index, Node* value,
+ Node* length, const wasm::ArrayType* type,
+ CheckForNull null_check,
+ wasm::WasmCodePosition position) {
+ BoundsCheckArrayWithLength(array, index, length, null_check, position);
+ ArrayFillImpl(array, index, value, length, type,
+ type->element_type().is_reference());
+}
+
+void WasmGraphBuilder::ArrayFillImpl(Node* array, Node* index, Node* value,
+ Node* length, const wasm::ArrayType* type,
+ bool emit_write_barrier) {
+ DCHECK_NOT_NULL(value);
+ wasm::ValueType element_type = type->element_type();
+
+ // Initialize the array. Use an external function for large arrays with
+ // null/number initializer. Use a loop for small arrays and reference arrays
+ // with a non-null initial value.
+ auto done = gasm_->MakeLabel();
+ // TODO(manoskouk): If the loop is ever removed here, we have to update
+ // ArrayNew(), ArrayNewDefault(), and ArrayFill() in
+ // graph-builder-interface.cc to not mark the current loop as non-innermost.
+ auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
+
+ // The builtin cannot handle s128 values other than 0.
+ if (!(element_type == wasm::kWasmS128 &&
+ value->opcode() != IrOpcode::kS128Zero)) {
+ constexpr uint32_t kArrayNewMinimumSizeForMemSet = 16;
+ gasm_->GotoIf(gasm_->Uint32LessThan(
+ length, Int32Constant(kArrayNewMinimumSizeForMemSet)),
+ &loop, BranchHint::kNone, index);
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_array_fill());
+
+ Node* stack_slot = StoreInInt64StackSlot(value, element_type);
+
+ MachineType arg_types[]{
+ MachineType::TaggedPointer(), MachineType::Uint32(),
+ MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32(), MachineType::Pointer()};
+ MachineSignature sig(0, 6, arg_types);
+ BuildCCall(&sig, function, array, index, length,
+ Int32Constant(emit_write_barrier ? 1 : 0),
+ Int32Constant(element_type.raw_bit_field()), stack_slot);
+ gasm_->Goto(&done);
+ } else {
+ gasm_->Goto(&loop, index);
+ }
+ gasm_->Bind(&loop);
+ {
+ Node* current_index = loop.PhiAt(0);
+ Node* check =
+ gasm_->UintLessThan(current_index, gasm_->Int32Add(index, length));
+ gasm_->GotoIfNot(check, &done);
+ gasm_->ArraySet(array, current_index, value, type);
+ current_index = gasm_->Int32Add(current_index, Int32Constant(1));
+ gasm_->Goto(&loop, current_index);
+ }
+ gasm_->Bind(&done);
}
// General rules for operator properties for builtin calls:
@@ -5969,7 +6129,7 @@ Node* WasmGraphBuilder::StringConst(uint32_t index) {
Node* WasmGraphBuilder::StringMeasureUtf8(Node* string, CheckForNull null_check,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringMeasureUtf8,
Operator::kEliminatable, string);
@@ -5978,7 +6138,7 @@ Node* WasmGraphBuilder::StringMeasureUtf8(Node* string, CheckForNull null_check,
Node* WasmGraphBuilder::StringMeasureWtf8(Node* string, CheckForNull null_check,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringMeasureWtf8,
Operator::kEliminatable, string);
@@ -5988,7 +6148,7 @@ Node* WasmGraphBuilder::StringMeasureWtf16(Node* string,
CheckForNull null_check,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->LoadImmutableFromObject(
MachineType::Int32(), string,
@@ -6001,7 +6161,7 @@ Node* WasmGraphBuilder::StringEncodeWtf8(uint32_t memory,
Node* offset,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringEncodeWtf8,
Operator::kNoDeopt | Operator::kNoThrow, string,
@@ -6014,10 +6174,10 @@ Node* WasmGraphBuilder::StringEncodeWtf8Array(
Node* array, CheckForNull array_null_check, Node* start,
wasm::WasmCodePosition position) {
if (string_null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
if (array_null_check == kWithNullCheck) {
- array = AssertNotNull(array, position);
+ array = AssertNotNull(array, wasm::kWasmArrayRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringEncodeWtf8Array,
Operator::kNoDeopt | Operator::kNoThrow, string,
@@ -6029,22 +6189,30 @@ Node* WasmGraphBuilder::StringEncodeWtf16(uint32_t memory, Node* string,
CheckForNull null_check, Node* offset,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringEncodeWtf16,
Operator::kNoDeopt | Operator::kNoThrow, string,
offset, gasm_->SmiConstant(memory));
}
+Node* WasmGraphBuilder::StringAsWtf16(Node* string, CheckForNull null_check,
+ wasm::WasmCodePosition position) {
+ if (null_check == kWithNullCheck) {
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
+ }
+ return gasm_->StringAsWtf16(string);
+}
+
Node* WasmGraphBuilder::StringEncodeWtf16Array(
Node* string, CheckForNull string_null_check, Node* array,
CheckForNull array_null_check, Node* start,
wasm::WasmCodePosition position) {
if (string_null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
if (array_null_check == kWithNullCheck) {
- array = AssertNotNull(array, position);
+ array = AssertNotNull(array, wasm::kWasmArrayRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringEncodeWtf16Array,
Operator::kNoDeopt | Operator::kNoThrow, string,
@@ -6054,8 +6222,12 @@ Node* WasmGraphBuilder::StringEncodeWtf16Array(
Node* WasmGraphBuilder::StringConcat(Node* head, CheckForNull head_null_check,
Node* tail, CheckForNull tail_null_check,
wasm::WasmCodePosition position) {
- if (head_null_check == kWithNullCheck) head = AssertNotNull(head, position);
- if (tail_null_check == kWithNullCheck) tail = AssertNotNull(tail, position);
+ if (head_null_check == kWithNullCheck) {
+ head = AssertNotNull(head, wasm::kWasmStringRef, position);
+ }
+ if (tail_null_check == kWithNullCheck) {
+ tail = AssertNotNull(tail, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(
Builtin::kStringAdd_CheckNone, Operator::kNoDeopt | Operator::kNoThrow,
head, tail,
@@ -6069,10 +6241,12 @@ Node* WasmGraphBuilder::StringEqual(Node* a, CheckForNull a_null_check, Node* b,
// Covers "identical string pointer" and "both are null" cases.
gasm_->GotoIf(gasm_->TaggedEqual(a, b), &done, Int32Constant(1));
if (a_null_check == kWithNullCheck) {
- gasm_->GotoIf(gasm_->IsNull(a), &done, Int32Constant(0));
+ gasm_->GotoIf(gasm_->IsNull(a, wasm::kWasmStringRef), &done,
+ Int32Constant(0));
}
if (b_null_check == kWithNullCheck) {
- gasm_->GotoIf(gasm_->IsNull(b), &done, Int32Constant(0));
+ gasm_->GotoIf(gasm_->IsNull(b, wasm::kWasmStringRef), &done,
+ Int32Constant(0));
}
gasm_->Goto(&done, gasm_->CallBuiltin(Builtin::kWasmStringEqual,
Operator::kEliminatable, a, b));
@@ -6082,7 +6256,9 @@ Node* WasmGraphBuilder::StringEqual(Node* a, CheckForNull a_null_check, Node* b,
Node* WasmGraphBuilder::StringIsUSVSequence(Node* str, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) str = AssertNotNull(str, position);
+ if (null_check == kWithNullCheck) {
+ str = AssertNotNull(str, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringIsUSVSequence,
Operator::kEliminatable, str);
@@ -6090,7 +6266,9 @@ Node* WasmGraphBuilder::StringIsUSVSequence(Node* str, CheckForNull null_check,
Node* WasmGraphBuilder::StringAsWtf8(Node* str, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) str = AssertNotNull(str, position);
+ if (null_check == kWithNullCheck) {
+ str = AssertNotNull(str, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringAsWtf8, Operator::kEliminatable,
str);
@@ -6100,7 +6278,9 @@ Node* WasmGraphBuilder::StringViewWtf8Advance(Node* view,
CheckForNull null_check,
Node* pos, Node* bytes,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) view = AssertNotNull(view, position);
+ if (null_check == kWithNullCheck) {
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringViewWtf8Advance,
Operator::kEliminatable, view, pos, bytes);
@@ -6111,7 +6291,7 @@ void WasmGraphBuilder::StringViewWtf8Encode(
CheckForNull null_check, Node* addr, Node* pos, Node* bytes,
Node** next_pos, Node** bytes_written, wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- view = AssertNotNull(view, position);
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
}
Node* pair =
gasm_->CallBuiltin(Builtin::kWasmStringViewWtf8Encode,
@@ -6126,7 +6306,7 @@ Node* WasmGraphBuilder::StringViewWtf8Slice(Node* view, CheckForNull null_check,
Node* pos, Node* bytes,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- view = AssertNotNull(view, position);
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringViewWtf8Slice,
Operator::kEliminatable, view, pos, bytes);
@@ -6136,11 +6316,58 @@ Node* WasmGraphBuilder::StringViewWtf16GetCodeUnit(
Node* string, CheckForNull null_check, Node* offset,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
- return gasm_->CallBuiltin(Builtin::kWasmStringViewWtf16GetCodeUnit,
- Operator::kNoDeopt | Operator::kNoThrow, string,
- offset);
+ Node* prepare = gasm_->StringPrepareForGetCodeunit(string);
+ Node* base = gasm_->Projection(0, prepare);
+ Node* base_offset = gasm_->Projection(1, prepare);
+ Node* charwidth_shift = gasm_->Projection(2, prepare);
+
+ // Bounds check.
+ Node* length = gasm_->LoadImmutableFromObject(
+ MachineType::Int32(), string,
+ wasm::ObjectAccess::ToTagged(String::kLengthOffset));
+ TrapIfFalse(wasm::kTrapStringOffsetOutOfBounds,
+ gasm_->Uint32LessThan(offset, length), position);
+
+ auto onebyte = gasm_->MakeLabel();
+ auto bailout = gasm_->MakeDeferredLabel();
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ gasm_->GotoIf(
+ gasm_->Word32Equal(charwidth_shift,
+ gasm_->Int32Constant(kCharWidthBailoutSentinel)),
+ &bailout);
+ gasm_->GotoIf(gasm_->Word32Equal(charwidth_shift, gasm_->Int32Constant(0)),
+ &onebyte);
+
+ // Two-byte.
+ Node* object_offset =
+ gasm_->IntAdd(gasm_->IntMul(gasm_->BuildChangeInt32ToIntPtr(offset),
+ gasm_->IntPtrConstant(2)),
+ base_offset);
+ Node* result = gasm_->LoadImmutableFromObject(MachineType::Uint16(), base,
+ object_offset);
+ gasm_->Goto(&done, result);
+
+ // One-byte.
+ gasm_->Bind(&onebyte);
+ object_offset =
+ gasm_->IntAdd(gasm_->BuildChangeInt32ToIntPtr(offset), base_offset);
+ result =
+ gasm_->LoadImmutableFromObject(MachineType::Uint8(), base, object_offset);
+ gasm_->Goto(&done, result);
+
+ gasm_->Bind(&bailout);
+ gasm_->Goto(&done, gasm_->CallRuntimeStub(
+ wasm::WasmCode::kWasmStringViewWtf16GetCodeUnit,
+ Operator::kPure, string, offset));
+
+ gasm_->Bind(&done);
+ // Make sure the original string is kept alive as long as we're operating
+ // on pointers extracted from it (otherwise e.g. external strings' resources
+ // might get freed prematurely).
+ gasm_->Retain(string);
+ return done.PhiAt(0);
}
Node* WasmGraphBuilder::StringViewWtf16Encode(uint32_t memory, Node* string,
@@ -6149,7 +6376,7 @@ Node* WasmGraphBuilder::StringViewWtf16Encode(uint32_t memory, Node* string,
Node* codeunits,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringViewWtf16Encode,
Operator::kNoDeopt | Operator::kNoThrow, offset,
@@ -6162,7 +6389,7 @@ Node* WasmGraphBuilder::StringViewWtf16Slice(Node* string,
Node* start, Node* end,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- string = AssertNotNull(string, position);
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
}
return gasm_->CallBuiltin(Builtin::kWasmStringViewWtf16Slice,
Operator::kEliminatable, string, start, end);
@@ -6170,7 +6397,9 @@ Node* WasmGraphBuilder::StringViewWtf16Slice(Node* string,
Node* WasmGraphBuilder::StringAsIter(Node* str, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) str = AssertNotNull(str, position);
+ if (null_check == kWithNullCheck) {
+ str = AssertNotNull(str, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringAsIter, Operator::kEliminatable,
str);
@@ -6178,7 +6407,9 @@ Node* WasmGraphBuilder::StringAsIter(Node* str, CheckForNull null_check,
Node* WasmGraphBuilder::StringViewIterNext(Node* view, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) view = AssertNotNull(view, position);
+ if (null_check == kWithNullCheck) {
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringViewIterNext,
Operator::kEliminatable, view);
@@ -6188,7 +6419,9 @@ Node* WasmGraphBuilder::StringViewIterAdvance(Node* view,
CheckForNull null_check,
Node* codepoints,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) view = AssertNotNull(view, position);
+ if (null_check == kWithNullCheck) {
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringViewIterAdvance,
Operator::kEliminatable, view, codepoints);
@@ -6198,7 +6431,9 @@ Node* WasmGraphBuilder::StringViewIterRewind(Node* view,
CheckForNull null_check,
Node* codepoints,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) view = AssertNotNull(view, position);
+ if (null_check == kWithNullCheck) {
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringViewIterRewind,
Operator::kEliminatable, view, codepoints);
@@ -6207,47 +6442,178 @@ Node* WasmGraphBuilder::StringViewIterRewind(Node* view,
Node* WasmGraphBuilder::StringViewIterSlice(Node* view, CheckForNull null_check,
Node* codepoints,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) view = AssertNotNull(view, position);
+ if (null_check == kWithNullCheck) {
+ view = AssertNotNull(view, wasm::kWasmStringRef, position);
+ }
return gasm_->CallBuiltin(Builtin::kWasmStringViewIterSlice,
Operator::kEliminatable, view, codepoints);
}
-// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
-constexpr int kI31To32BitSmiShift = 33;
+Node* WasmGraphBuilder::StringCompare(Node* lhs, CheckForNull null_check_lhs,
+ Node* rhs, CheckForNull null_check_rhs,
+ wasm::WasmCodePosition position) {
+ if (null_check_lhs == kWithNullCheck) {
+ lhs = AssertNotNull(lhs, wasm::kWasmStringRef, position);
+ }
+ if (null_check_rhs == kWithNullCheck) {
+ rhs = AssertNotNull(rhs, wasm::kWasmStringRef, position);
+ }
+ return gasm_->BuildChangeSmiToInt32(gasm_->CallBuiltin(
+ Builtin::kStringCompare, Operator::kEliminatable, lhs, rhs));
+}
+
+Node* WasmGraphBuilder::StringFromCodePoint(Node* code_point) {
+ return gasm_->CallBuiltin(Builtin::kWasmStringFromCodePoint,
+ Operator::kEliminatable, code_point);
+}
+
+Node* WasmGraphBuilder::StringHash(Node* string, CheckForNull null_check,
+ wasm::WasmCodePosition position) {
+ if (null_check == kWithNullCheck) {
+ string = AssertNotNull(string, wasm::kWasmStringRef, position);
+ }
+
+ auto runtime_label = gasm_->MakeLabel();
+ auto end_label = gasm_->MakeLabel(MachineRepresentation::kWord32);
+
+ Node* raw_hash = gasm_->LoadFromObject(
+ MachineType::Int32(), string,
+ wasm::ObjectAccess::ToTagged(Name::kRawHashFieldOffset));
+ Node* hash_not_computed_mask =
+ gasm_->Int32Constant(static_cast<int32_t>(Name::kHashNotComputedMask));
+ static_assert(Name::HashFieldTypeBits::kShift == 0);
+ Node* hash_not_computed = gasm_->Word32And(raw_hash, hash_not_computed_mask);
+ gasm_->GotoIf(hash_not_computed, &runtime_label);
+
+ // Fast path if hash is already computed: Decode raw hash value.
+ static_assert(Name::HashBits::kLastUsedBit == kBitsPerInt - 1);
+ Node* hash = gasm_->Word32Shr(
+ raw_hash,
+ gasm_->Int32Constant(static_cast<int32_t>(Name::HashBits::kShift)));
+ gasm_->Goto(&end_label, hash);
+
+ gasm_->Bind(&runtime_label);
+ Node* hash_runtime = gasm_->CallBuiltin(Builtin::kWasmStringHash,
+ Operator::kEliminatable, string);
+ gasm_->Goto(&end_label, hash_runtime);
+
+ gasm_->Bind(&end_label);
+ return end_label.PhiAt(0);
+}
+
+void WasmGraphBuilder::BuildModifyThreadInWasmFlagHelper(
+ Node* thread_in_wasm_flag_address, bool new_value) {
+ if (v8_flags.debug_code) {
+ Node* flag_value =
+ gasm_->Load(MachineType::Int32(), thread_in_wasm_flag_address, 0);
+ Node* check =
+ gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1));
+
+ Diamond flag_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
+ flag_check.Chain(control());
+ SetControl(flag_check.if_false);
+ Node* message_id = gasm_->NumberConstant(static_cast<int32_t>(
+ new_value ? AbortReason::kUnexpectedThreadInWasmSet
+ : AbortReason::kUnexpectedThreadInWasmUnset));
+
+ Node* old_effect = effect();
+ Node* call = BuildCallToRuntimeWithContext(
+ Runtime::kAbort, NoContextConstant(), &message_id, 1);
+ flag_check.merge->ReplaceInput(1, call);
+ SetEffectControl(flag_check.EffectPhi(old_effect, effect()),
+ flag_check.merge);
+ }
+
+ gasm_->Store({MachineRepresentation::kWord32, kNoWriteBarrier},
+ thread_in_wasm_flag_address, 0,
+ Int32Constant(new_value ? 1 : 0));
+}
+
+void WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* isolate_root = BuildLoadIsolateRoot();
+
+ Node* thread_in_wasm_flag_address =
+ gasm_->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
+
+ BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value);
+}
+
+Node* WasmGraphBuilder::WellKnown_StringToLowerCaseStringref(
+ Node* string, CheckForNull null_check) {
+#if V8_INTL_SUPPORT
+ BuildModifyThreadInWasmFlag(false);
+ if (null_check == kWithNullCheck) {
+ auto if_not_null = gasm_->MakeLabel();
+ auto if_null = gasm_->MakeDeferredLabel();
+ gasm_->GotoIf(IsNull(string, wasm::kWasmStringRef), &if_null);
+ gasm_->Goto(&if_not_null);
+ gasm_->Bind(&if_null);
+ gasm_->CallBuiltin(Builtin::kThrowToLowerCaseCalledOnNull,
+ Operator::kNoWrite);
+ gasm_->Unreachable();
+ gasm_->Bind(&if_not_null);
+ }
+ Node* result =
+ gasm_->CallBuiltin(Builtin::kStringToLowerCaseIntl,
+ Operator::kEliminatable, string, NoContextConstant());
+ BuildModifyThreadInWasmFlag(true);
+ return result;
+#else
+ UNREACHABLE();
+#endif
+}
Node* WasmGraphBuilder::I31New(Node* input) {
- if (SmiValuesAre31Bits()) {
+ if constexpr (SmiValuesAre31Bits()) {
return gasm_->Word32Shl(input, gasm_->BuildSmiShiftBitsConstant32());
+ } else {
+ DCHECK(SmiValuesAre32Bits());
+ // Set the topmost bit to sign-extend the second bit. This way,
+ // interpretation in JS (if this value escapes there) will be the same as
+ // i31.get_s.
+ input = gasm_->BuildChangeInt32ToIntPtr(input);
+ return gasm_->WordSar(
+ gasm_->WordShl(input,
+ gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize + 1)),
+ gasm_->IntPtrConstant(1));
}
- DCHECK(SmiValuesAre32Bits());
- input = gasm_->BuildChangeInt32ToIntPtr(input);
- return gasm_->WordShl(input, gasm_->IntPtrConstant(kI31To32BitSmiShift));
}
Node* WasmGraphBuilder::I31GetS(Node* input, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) input = AssertNotNull(input, position);
- if (SmiValuesAre31Bits()) {
+ if (null_check == kWithNullCheck) {
+ input = AssertNotNull(input, wasm::kWasmI31Ref, position);
+ }
+ if constexpr (SmiValuesAre31Bits()) {
input = gasm_->BuildTruncateIntPtrToInt32(input);
return gasm_->Word32SarShiftOutZeros(input,
gasm_->BuildSmiShiftBitsConstant32());
+ } else {
+ DCHECK(SmiValuesAre32Bits());
+ // Topmost bit is already sign-extended.
+ return gasm_->BuildTruncateIntPtrToInt32(gasm_->WordSar(
+ input, gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize)));
}
- DCHECK(SmiValuesAre32Bits());
- return gasm_->BuildTruncateIntPtrToInt32(
- gasm_->WordSar(input, gasm_->IntPtrConstant(kI31To32BitSmiShift)));
}
Node* WasmGraphBuilder::I31GetU(Node* input, CheckForNull null_check,
wasm::WasmCodePosition position) {
- if (null_check == kWithNullCheck) input = AssertNotNull(input, position);
- if (SmiValuesAre31Bits()) {
+ if (null_check == kWithNullCheck) {
+ input = AssertNotNull(input, wasm::kWasmI31Ref, position);
+ }
+ if constexpr (SmiValuesAre31Bits()) {
input = gasm_->BuildTruncateIntPtrToInt32(input);
return gasm_->Word32Shr(input, gasm_->BuildSmiShiftBitsConstant32());
+ } else {
+ DCHECK(SmiValuesAre32Bits());
+ // We need to remove the topmost bit of the 32-bit Smi.
+ return gasm_->BuildTruncateIntPtrToInt32(
+ gasm_->WordShr(gasm_->WordShl(input, gasm_->IntPtrConstant(1)),
+ gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize + 1)));
}
- DCHECK(SmiValuesAre32Bits());
- return gasm_->BuildTruncateIntPtrToInt32(
- gasm_->WordShr(input, gasm_->IntPtrConstant(kI31To32BitSmiShift)));
}
Node* WasmGraphBuilder::SetType(Node* node, wasm::ValueType type) {
@@ -6295,7 +6661,7 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
namespace {
// A non-null {isolate} signifies that the generated code is treated as being in
-// a JS frame for functions like BuildIsolateRoot().
+// a JS frame for functions like BuildLoadIsolateRoot().
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, MachineGraph* mcgraph,
@@ -6305,21 +6671,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
compiler::SourcePositionTable* spt,
StubCallMode stub_mode, wasm::WasmFeatures features)
: WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt, parameter_mode,
- isolate),
+ isolate, features),
module_(module),
- stub_mode_(stub_mode),
- enabled_features_(features) {}
+ stub_mode_(stub_mode) {}
CallDescriptor* GetBigIntToI64CallDescriptor(bool needs_frame_state) {
- if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
-
- bigint_to_i64_descriptor_ = GetBuiltinCallDescriptor(
- Builtin::kBigIntToI64, zone_, stub_mode_, needs_frame_state);
-
- AddInt64LoweringReplacement(
- bigint_to_i64_descriptor_,
- GetBuiltinCallDescriptor(Builtin::kBigIntToI32Pair, zone_, stub_mode_));
- return bigint_to_i64_descriptor_;
+ return wasm::GetWasmEngine()->call_descriptors()->GetBigIntToI64Descriptor(
+ stub_mode_, needs_frame_state);
}
Node* GetTargetForBuiltinCall(wasm::WasmCode::RuntimeStubId wasm_stub,
@@ -6481,53 +6839,82 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kF64:
return BuildChangeFloat64ToNumber(node);
case wasm::kRef:
- case wasm::kRefNull:
switch (type.heap_representation()) {
- case wasm::HeapType::kFunc: {
- if (type.kind() == wasm::kRefNull) {
- auto done =
- gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
- // Do not wrap {null}.
- gasm_->GotoIf(IsNull(node), &done, node);
- gasm_->Goto(&done,
- gasm_->LoadFromObject(
- MachineType::TaggedPointer(), node,
- wasm::ObjectAccess::ToTagged(
- WasmInternalFunction::kExternalOffset)));
- gasm_->Bind(&done);
- return done.PhiAt(0);
- } else {
- return gasm_->LoadFromObject(
- MachineType::TaggedPointer(), node,
- wasm::ObjectAccess::ToTagged(
- WasmInternalFunction::kExternalOffset));
- }
- }
case wasm::HeapType::kEq:
+ case wasm::HeapType::kI31:
case wasm::HeapType::kStruct:
case wasm::HeapType::kArray:
- case wasm::HeapType::kString:
- case wasm::HeapType::kExtern:
case wasm::HeapType::kAny:
- return node;
+ case wasm::HeapType::kExtern:
+ case wasm::HeapType::kString:
case wasm::HeapType::kNone:
case wasm::HeapType::kNoFunc:
case wasm::HeapType::kNoExtern:
- case wasm::HeapType::kI31:
+ return node;
+ case wasm::HeapType::kBottom:
+ case wasm::HeapType::kStringViewWtf8:
+ case wasm::HeapType::kStringViewWtf16:
+ case wasm::HeapType::kStringViewIter:
UNREACHABLE();
+ case wasm::HeapType::kFunc:
default:
- DCHECK(type.has_index());
- if (module_->has_signature(type.ref_index())) {
+ if (type.heap_representation() == wasm::HeapType::kFunc ||
+ module_->has_signature(type.ref_index())) {
// Typed function. Extract the external function.
return gasm_->LoadFromObject(
MachineType::TaggedPointer(), node,
wasm::ObjectAccess::ToTagged(
WasmInternalFunction::kExternalOffset));
+ } else {
+ return node;
}
- // If this is reached, then IsJSCompatibleSignature() is too
- // permissive.
- // TODO(7748): Figure out a JS interop story for arrays and structs.
- UNREACHABLE();
+ }
+ case wasm::kRefNull:
+ switch (type.heap_representation()) {
+ case wasm::HeapType::kExtern:
+ case wasm::HeapType::kNoExtern:
+ return node;
+ case wasm::HeapType::kNone:
+ case wasm::HeapType::kNoFunc:
+ return LOAD_ROOT(NullValue, null_value);
+ case wasm::HeapType::kEq:
+ case wasm::HeapType::kStruct:
+ case wasm::HeapType::kArray:
+ case wasm::HeapType::kString:
+ case wasm::HeapType::kI31:
+ case wasm::HeapType::kAny: {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+ gasm_->GotoIfNot(IsNull(node, type), &done, node);
+ gasm_->Goto(&done, LOAD_ROOT(NullValue, null_value));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
+ case wasm::HeapType::kFunc:
+ default: {
+ if (type == wasm::kWasmFuncRef ||
+ module_->has_signature(type.ref_index())) {
+ auto done =
+ gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+ auto null_label = gasm_->MakeLabel();
+ gasm_->GotoIf(IsNull(node, type), &null_label);
+ gasm_->Goto(&done,
+ gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), node,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kExternalOffset)));
+ gasm_->Bind(&null_label);
+ gasm_->Goto(&done, LOAD_ROOT(NullValue, null_value));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ } else {
+ auto done =
+ gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+ gasm_->GotoIfNot(IsNull(node, type), &done, node);
+ gasm_->Goto(&done, LOAD_ROOT(NullValue, null_value));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
+ }
}
case wasm::kRtt:
case wasm::kI8:
@@ -6540,11 +6927,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
}
- enum UnwrapExternalFunctions : bool {
- kUnwrapWasmExternalFunctions = true,
- kLeaveFunctionsAlone = false
- };
-
Node* BuildChangeBigIntToInt64(Node* input, Node* context,
Node* frame_state) {
Node* target;
@@ -6568,9 +6950,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildCheckString(Node* input, Node* js_context, wasm::ValueType type) {
auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
- auto type_error = gasm_->MakeLabel();
+ auto type_error = gasm_->MakeDeferredLabel();
gasm_->GotoIf(IsSmi(input), &type_error, BranchHint::kFalse);
- if (type.is_nullable()) gasm_->GotoIf(IsNull(input), &done, input);
+ if (type.is_nullable()) {
+ auto not_null = gasm_->MakeLabel();
+ gasm_->GotoIfNot(IsNull(input, wasm::kWasmExternRef), &not_null);
+ gasm_->Goto(&done, LOAD_ROOT(WasmNull, wasm_null));
+ gasm_->Bind(&not_null);
+ }
Node* map = gasm_->LoadMap(input);
Node* instance_type = gasm_->LoadInstanceType(map);
Node* check = gasm_->Uint32LessThan(
@@ -6586,23 +6973,21 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* FromJS(Node* input, Node* js_context, wasm::ValueType type,
- Node* frame_state = nullptr) {
+ const wasm::WasmModule* module, Node* frame_state = nullptr) {
switch (type.kind()) {
case wasm::kRef:
case wasm::kRefNull: {
switch (type.heap_representation()) {
- // Fast paths for extern and string.
- // TODO(7748): Add more/all fast paths?
+ // TODO(7748): Add more fast paths?
case wasm::HeapType::kExtern:
+ case wasm::HeapType::kNoExtern:
return input;
case wasm::HeapType::kString:
return BuildCheckString(input, js_context, type);
case wasm::HeapType::kNone:
case wasm::HeapType::kNoFunc:
- case wasm::HeapType::kNoExtern:
- case wasm::HeapType::kAny:
case wasm::HeapType::kI31:
- UNREACHABLE();
+ case wasm::HeapType::kAny:
case wasm::HeapType::kFunc:
case wasm::HeapType::kStruct:
case wasm::HeapType::kArray:
@@ -6611,14 +6996,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Make sure ValueType fits in a Smi.
static_assert(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
- // The instance node is always defined: if an instance is not
- // available, it is the undefined value.
- Node* inputs[] = {GetInstance(), input,
- mcgraph()->IntPtrConstant(IntToSmi(
- static_cast<int>(type.raw_bit_field())))};
+ if (type.has_index()) {
+ DCHECK_NOT_NULL(module);
+ uint32_t canonical_index =
+ module->isorecursive_canonical_type_ids[type.ref_index()];
+ type = wasm::ValueType::RefMaybeNull(canonical_index,
+ type.nullability());
+ }
+
+ Node* inputs[] = {
+ input, mcgraph()->IntPtrConstant(
+ IntToSmi(static_cast<int>(type.raw_bit_field())))};
return BuildCallToRuntimeWithContext(Runtime::kWasmJSToWasmObject,
- js_context, inputs, 3);
+ js_context, inputs, 2);
}
}
}
@@ -6700,46 +7091,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
}
- void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address,
- bool new_value) {
- if (v8_flags.debug_code) {
- Node* flag_value = gasm_->LoadFromObject(MachineType::Pointer(),
- thread_in_wasm_flag_address, 0);
- Node* check =
- gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1));
-
- Diamond flag_check(graph(), mcgraph()->common(), check,
- BranchHint::kTrue);
- flag_check.Chain(control());
- SetControl(flag_check.if_false);
- Node* message_id = gasm_->NumberConstant(static_cast<int32_t>(
- new_value ? AbortReason::kUnexpectedThreadInWasmSet
- : AbortReason::kUnexpectedThreadInWasmUnset));
-
- Node* old_effect = effect();
- Node* call = BuildCallToRuntimeWithContext(
- Runtime::kAbort, NoContextConstant(), &message_id, 1);
- flag_check.merge->ReplaceInput(1, call);
- SetEffectControl(flag_check.EffectPhi(old_effect, effect()),
- flag_check.merge);
- }
-
- gasm_->StoreToObject(ObjectAccess(MachineType::Int32(), kNoWriteBarrier),
- thread_in_wasm_flag_address, 0,
- Int32Constant(new_value ? 1 : 0));
- }
-
- void BuildModifyThreadInWasmFlag(bool new_value) {
- if (!trap_handler::IsTrapHandlerEnabled()) return;
- Node* isolate_root = BuildLoadIsolateRoot();
-
- Node* thread_in_wasm_flag_address =
- gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
- Isolate::thread_in_wasm_flag_address_offset());
-
- BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value);
- }
-
class ModifyThreadInWasmFlagScope {
public:
ModifyThreadInWasmFlagScope(
@@ -6750,13 +7101,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* isolate_root = wasm_wrapper_graph_builder_->BuildLoadIsolateRoot();
thread_in_wasm_flag_address_ =
- gasm->LoadFromObject(MachineType::Pointer(), isolate_root,
- Isolate::thread_in_wasm_flag_address_offset());
+ gasm->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
thread_in_wasm_flag_address_, true);
}
+ ModifyThreadInWasmFlagScope(const ModifyThreadInWasmFlagScope&) = delete;
+
~ModifyThreadInWasmFlagScope() {
if (!trap_handler::IsTrapHandlerEnabled()) return;
@@ -6791,14 +7144,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildCallAndReturn(bool is_import, Node* js_context,
Node* function_data,
base::SmallVector<Node*, 16> args,
- bool do_conversion, Node* frame_state) {
+ bool do_conversion, Node* frame_state,
+ bool set_in_wasm_flag) {
const int rets_count = static_cast<int>(sig_->return_count());
base::SmallVector<Node*, 1> rets(rets_count);
// Set the ThreadInWasm flag before we do the actual call.
{
- ModifyThreadInWasmFlagScope modify_thread_in_wasm_flag_builder(
- this, gasm_.get());
+ base::Optional<ModifyThreadInWasmFlagScope>
+ modify_thread_in_wasm_flag_builder;
+ if (set_in_wasm_flag) {
+ modify_thread_in_wasm_flag_builder.emplace(this, gasm_.get());
+ }
if (is_import) {
// Call to an imported function.
@@ -6813,9 +7170,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* internal = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function_data,
wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset));
- args[0] = BuildLoadExternalPointerFromObject(
+ args[0] = gasm_->BuildLoadExternalPointerFromObject(
internal, WasmInternalFunction::kCallTargetOffset,
- kWasmInternalFunctionCallTargetTag);
+ kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot());
Node* instance_node = gasm_->LoadFromObject(
MachineType::TaggedPointer(), internal,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
@@ -6915,7 +7272,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
void BuildJSToWasmWrapper(bool is_import, bool do_conversion = true,
- Node* frame_state = nullptr) {
+ Node* frame_state = nullptr,
+ bool set_in_wasm_flag = true) {
const int wasm_param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
@@ -6927,7 +7285,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Linkage::GetJSCallContextParamIndex(wasm_param_count + 1), "%context");
Node* function_data = gasm_->LoadFunctionDataFromJSFunction(js_closure);
- if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
+ if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the js_context of the calling javascript
// function (passed as a parameter), such that the generated code is
// js_context independent.
@@ -6967,8 +7325,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* wasm_param = FromJSFast(params[i + 1], sig_->GetParam(i));
args[i + 1] = wasm_param;
}
- Node* jsval = BuildCallAndReturn(is_import, js_context, function_data,
- args, do_conversion, frame_state);
+ Node* jsval =
+ BuildCallAndReturn(is_import, js_context, function_data, args,
+ do_conversion, frame_state, set_in_wasm_flag);
gasm_->Goto(&done, jsval);
gasm_->Bind(&slow_path);
}
@@ -6977,8 +7336,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
base::SmallVector<Node*, 16> args(args_count);
for (int i = 0; i < wasm_param_count; ++i) {
if (do_conversion) {
- args[i + 1] =
- FromJS(params[i + 1], js_context, sig_->GetParam(i), frame_state);
+ args[i + 1] = FromJS(params[i + 1], js_context, sig_->GetParam(i),
+ module_, frame_state);
} else {
Node* wasm_param = params[i + 1];
@@ -6993,8 +7352,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[i + 1] = wasm_param;
}
}
- Node* jsval = BuildCallAndReturn(is_import, js_context, function_data, args,
- do_conversion, frame_state);
+
+ Node* jsval =
+ BuildCallAndReturn(is_import, js_context, function_data, args,
+ do_conversion, frame_state, set_in_wasm_flag);
// If both the default and a fast transformation paths are present,
// get the return value based on the path used.
if (include_fast_path) {
@@ -7042,7 +7403,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* native_context = gasm_->Load(
MachineType::TaggedPointer(), api_function_ref,
wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kNativeContextOffset));
- Node* active_suspender = LOAD_ROOT(ActiveSuspender, active_suspender);
+ Node* active_suspender =
+ LOAD_MUTABLE_ROOT(ActiveSuspender, active_suspender);
gasm_->GotoIf(gasm_->TaggedEqual(active_suspender, UndefinedValue()),
&bad_suspender, BranchHint::kFalse);
gasm_->GotoIfNot(gasm_->TaggedEqual(suspender, active_suspender),
@@ -7069,8 +7431,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
// For wasm-to-js wrappers, parameter 0 is a WasmApiFunctionRef.
- bool BuildWasmToJSWrapper(WasmImportCallKind kind, int expected_arity,
- wasm::Suspend suspend) {
+ bool BuildWasmToJSWrapper(wasm::ImportCallKind kind, int expected_arity,
+ wasm::Suspend suspend,
+ const wasm::WasmModule* module) {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -7080,7 +7443,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
MachineType::TaggedPointer(), Param(0),
wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kNativeContextOffset));
- if (kind == WasmImportCallKind::kRuntimeTypeError) {
+ if (kind == wasm::ImportCallKind::kRuntimeTypeError) {
// =======================================================================
// === Runtime TypeError =================================================
// =======================================================================
@@ -7105,7 +7468,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// =======================================================================
// === JS Functions with matching arity ==================================
// =======================================================================
- case WasmImportCallKind::kJSFunctionArityMatch: {
+ case wasm::ImportCallKind::kJSFunctionArityMatch: {
base::SmallVector<Node*, 16> args(wasm_count + 7 - suspend);
int pos = 0;
Node* function_context =
@@ -7141,7 +7504,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// =======================================================================
// === JS Functions with mismatching arity ===============================
// =======================================================================
- case WasmImportCallKind::kJSFunctionArityMismatch: {
+ case wasm::ImportCallKind::kJSFunctionArityMismatch: {
int pushed_count = std::max(expected_arity, wasm_count - suspend);
base::SmallVector<Node*, 16> args(pushed_count + 7);
int pos = 0;
@@ -7179,7 +7542,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// =======================================================================
// === General case of unknown callable ==================================
// =======================================================================
- case WasmImportCallKind::kUseCallBuiltin: {
+ case wasm::ImportCallKind::kUseCallBuiltin: {
base::SmallVector<Node*, 16> args(wasm_count + 7 - suspend);
int pos = 0;
args[pos++] =
@@ -7225,7 +7588,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (sig_->return_count() <= 1) {
Node* val = sig_->return_count() == 0
? Int32Constant(0)
- : FromJS(call, native_context, sig_->GetReturn());
+ : FromJS(call, native_context, sig_->GetReturn(), module);
BuildModifyThreadInWasmFlag(true);
Return(val);
} else {
@@ -7234,7 +7597,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
base::SmallVector<Node*, 8> wasm_values(sig_->return_count());
for (unsigned i = 0; i < sig_->return_count(); ++i) {
wasm_values[i] = FromJS(gasm_->LoadFixedArrayElementAny(fixed_array, i),
- native_context, sig_->GetReturn(i));
+ native_context, sig_->GetReturn(i), module);
}
BuildModifyThreadInWasmFlag(true);
Return(base::VectorOf(wasm_values));
@@ -7513,7 +7876,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* call = gasm_->Call(call_descriptor, pos, args.begin());
return sig_->return_count() == 0
? Int32Constant(0)
- : FromJS(call, native_context, sig_->GetReturn());
+ : FromJS(call, native_context, sig_->GetReturn(), nullptr);
});
BuildModifyThreadInWasmFlag(true);
@@ -7532,7 +7895,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* context = Param(Linkage::GetJSCallContextParamIndex(wasm_count + 1));
// Throw a TypeError if the signature is incompatible with JavaScript.
- if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
+ if (!wasm::IsJSCompatibleSignature(sig_)) {
BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, context,
nullptr, 0);
TerminateThrow(effect(), control());
@@ -7568,7 +7931,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert parameter JS values to wasm numbers and back to JS values.
for (int i = 0; i < wasm_count; ++i) {
Node* param = Param(i + 1); // Start from index 1 to skip receiver.
- args[pos++] = ToJS(FromJS(param, context, sig_->GetParam(i)),
+ args[pos++] = ToJS(FromJS(param, context, sig_->GetParam(i), nullptr),
sig_->GetParam(i), context);
}
@@ -7584,8 +7947,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (sig_->return_count() == 0) {
jsval = UndefinedValue();
} else if (sig_->return_count() == 1) {
- jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn(),
- context);
+ jsval = ToJS(FromJS(call, context, sig_->GetReturn(), nullptr),
+ sig_->GetReturn(), context);
} else {
Node* fixed_array =
BuildMultiReturnFixedArrayFromIterable(sig_, call, context);
@@ -7596,7 +7959,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (unsigned i = 0; i < sig_->return_count(); ++i) {
const auto& type = sig_->GetReturn(i);
Node* elem = gasm_->LoadFixedArrayElementAny(fixed_array, i);
- Node* cast = ToJS(FromJS(elem, context, type), type, context);
+ Node* cast = ToJS(FromJS(elem, context, type, nullptr), type, context);
gasm_->StoreFixedArrayElementAny(result_fixed_array, i, cast);
}
}
@@ -7670,9 +8033,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Return(mcgraph()->IntPtrConstant(0));
if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) {
- // No special lowering should be requested in the C entry.
- DCHECK_NULL(lowering_special_case_);
-
+ // These correspond to {sig_types[]} in {CompileCWasmEntry}.
MachineRepresentation sig_reps[] = {
MachineType::PointerRepresentation(), // return value
MachineType::PointerRepresentation(), // target
@@ -7683,7 +8044,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
mcgraph()->common(), gasm_->simplified(),
- mcgraph()->zone(), module_, &c_entry_sig);
+ mcgraph()->zone(), &c_entry_sig);
r.LowerGraph();
}
}
@@ -7696,21 +8057,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetOncePointer<const Operator> float32_to_number_operator_;
SetOncePointer<const Operator> float64_to_number_operator_;
SetOncePointer<const Operator> tagged_to_float64_operator_;
- wasm::WasmFeatures enabled_features_;
- CallDescriptor* bigint_to_i64_descriptor_ = nullptr;
};
} // namespace
-void BuildInlinedJSToWasmWrapper(
- Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
- const wasm::WasmModule* module, Isolate* isolate,
- compiler::SourcePositionTable* spt, StubCallMode stub_mode,
- wasm::WasmFeatures features, Node* frame_state) {
+void BuildInlinedJSToWasmWrapper(Zone* zone, MachineGraph* mcgraph,
+ const wasm::FunctionSig* signature,
+ const wasm::WasmModule* module,
+ Isolate* isolate,
+ compiler::SourcePositionTable* spt,
+ wasm::WasmFeatures features, Node* frame_state,
+ bool set_in_wasm_flag) {
WasmWrapperGraphBuilder builder(zone, mcgraph, signature, module,
WasmGraphBuilder::kNoSpecialParameterMode,
- isolate, spt, stub_mode, features);
- builder.BuildJSToWasmWrapper(false, false, frame_state);
+ isolate, spt,
+ StubCallMode::kCallBuiltinPointer, features);
+ builder.BuildJSToWasmWrapper(false, false, frame_state, set_in_wasm_flag);
}
std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
@@ -7750,258 +8112,12 @@ std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
std::move(debug_name), WasmAssemblerOptions());
}
-static MachineRepresentation NormalizeFastApiRepresentation(
- const CTypeInfo& info) {
- MachineType t = MachineType::TypeForCType(info);
- // Wasm representation of bool is i32 instead of i1.
- if (t.semantic() == MachineSemantic::kBool) {
- return MachineRepresentation::kWord32;
- }
- return t.representation();
-}
-
-static bool IsSupportedWasmFastApiFunction(
- const wasm::FunctionSig* expected_sig, Handle<SharedFunctionInfo> shared) {
- if (!shared->IsApiFunction()) {
- return false;
- }
- if (shared->get_api_func_data().GetCFunctionsCount() == 0) {
- return false;
- }
- if (!shared->get_api_func_data().accept_any_receiver()) {
- return false;
- }
- if (!shared->get_api_func_data().signature().IsUndefined()) {
- // TODO(wasm): CFunctionInfo* signature check.
- return false;
- }
- const CFunctionInfo* info = shared->get_api_func_data().GetCSignature(0);
- if (!fast_api_call::CanOptimizeFastSignature(info)) {
- return false;
- }
-
- const auto log_imported_function_mismatch = [&shared](const char* reason) {
- if (v8_flags.trace_opt) {
- CodeTracer::Scope scope(shared->GetIsolate()->GetCodeTracer());
- PrintF(scope.file(), "[disabled optimization for ");
- shared->ShortPrint(scope.file());
- PrintF(scope.file(),
- ", reason: the signature of the imported function in the Wasm "
- "module doesn't match that of the Fast API function (%s)]\n",
- reason);
- }
- };
-
- // C functions only have one return value.
- if (expected_sig->return_count() > 1) {
- // Here and below, we log when the function we call is declared as an Api
- // function but we cannot optimize the call, which might be unxepected. In
- // that case we use the "slow" path making a normal Wasm->JS call and
- // calling the "slow" callback specified in FunctionTemplate::New().
- log_imported_function_mismatch("too many return values");
- return false;
- }
- CTypeInfo return_info = info->ReturnInfo();
- // Unsupported if return type doesn't match.
- if (expected_sig->return_count() == 0 &&
- return_info.GetType() != CTypeInfo::Type::kVoid) {
- log_imported_function_mismatch("too few return values");
- return false;
- }
- // Unsupported if return type doesn't match.
- if (expected_sig->return_count() == 1) {
- if (return_info.GetType() == CTypeInfo::Type::kVoid) {
- log_imported_function_mismatch("too many return values");
- return false;
- }
- if (NormalizeFastApiRepresentation(return_info) !=
- expected_sig->GetReturn(0).machine_type().representation()) {
- log_imported_function_mismatch("mismatching return value");
- return false;
- }
- }
- // Unsupported if arity doesn't match.
- if (expected_sig->parameter_count() != info->ArgumentCount() - 1) {
- log_imported_function_mismatch("mismatched arity");
- return false;
- }
- // Unsupported if any argument types don't match.
- for (unsigned int i = 0; i < expected_sig->parameter_count(); i += 1) {
- // Arg 0 is the receiver, skip over it since wasm doesn't
- // have a concept of receivers.
- CTypeInfo arg = info->ArgumentInfo(i + 1);
- if (NormalizeFastApiRepresentation(arg) !=
- expected_sig->GetParam(i).machine_type().representation()) {
- log_imported_function_mismatch("parameter type mismatch");
- return false;
- }
- }
- return true;
-}
-
-bool ResolveBoundJSFastApiFunction(const wasm::FunctionSig* expected_sig,
- Handle<JSReceiver> callable) {
- Handle<JSFunction> target;
- if (callable->IsJSBoundFunction()) {
- Handle<JSBoundFunction> bound_target =
- Handle<JSBoundFunction>::cast(callable);
- // Nested bound functions and arguments not supported yet.
- if (bound_target->bound_arguments().length() > 0) {
- return false;
- }
- if (bound_target->bound_target_function().IsJSBoundFunction()) {
- return false;
- }
- Handle<JSReceiver> bound_target_function =
- handle(bound_target->bound_target_function(), callable->GetIsolate());
- if (!bound_target_function->IsJSFunction()) {
- return false;
- }
- target = Handle<JSFunction>::cast(bound_target_function);
- } else if (callable->IsJSFunction()) {
- target = Handle<JSFunction>::cast(callable);
- } else {
- return false;
- }
-
- Handle<SharedFunctionInfo> shared(target->shared(), target->GetIsolate());
- return IsSupportedWasmFastApiFunction(expected_sig, shared);
-}
-
-WasmImportData ResolveWasmImportCall(
- Handle<JSReceiver> callable, const wasm::FunctionSig* expected_sig,
- const wasm::WasmModule* module,
- const wasm::WasmFeatures& enabled_features) {
- Isolate* isolate = callable->GetIsolate();
- if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
- auto imported_function = Handle<WasmExportedFunction>::cast(callable);
- if (!imported_function->MatchesSignature(module, expected_sig)) {
- return {WasmImportCallKind::kLinkError, callable, wasm::kNoSuspend};
- }
- uint32_t func_index =
- static_cast<uint32_t>(imported_function->function_index());
- if (func_index >=
- imported_function->instance().module()->num_imported_functions) {
- return {WasmImportCallKind::kWasmToWasm, callable, wasm::kNoSuspend};
- }
- // Resolve the shortcut to the underlying callable and continue.
- Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
- ImportedFunctionEntry entry(instance, func_index);
- callable = handle(entry.callable(), isolate);
- }
- wasm::Suspend suspend = wasm::kNoSuspend;
- if (WasmJSFunction::IsWasmJSFunction(*callable)) {
- auto js_function = Handle<WasmJSFunction>::cast(callable);
- suspend = js_function->GetSuspend();
- if (!js_function->MatchesSignature(expected_sig)) {
- return {WasmImportCallKind::kLinkError, callable, wasm::kNoSuspend};
- }
- // Resolve the short-cut to the underlying callable and continue.
- callable = handle(js_function->GetCallable(), isolate);
- }
- if (WasmCapiFunction::IsWasmCapiFunction(*callable)) {
- auto capi_function = Handle<WasmCapiFunction>::cast(callable);
- if (!capi_function->MatchesSignature(expected_sig)) {
- return {WasmImportCallKind::kLinkError, callable, wasm::kNoSuspend};
- }
- return {WasmImportCallKind::kWasmToCapi, callable, wasm::kNoSuspend};
- }
- // Assuming we are calling to JS, check whether this would be a runtime error.
- if (!wasm::IsJSCompatibleSignature(expected_sig, module, enabled_features)) {
- return {WasmImportCallKind::kRuntimeTypeError, callable, wasm::kNoSuspend};
- }
- // Check if this can be a JS fast API call.
- if (v8_flags.turbo_fast_api_calls &&
- ResolveBoundJSFastApiFunction(expected_sig, callable)) {
- return {WasmImportCallKind::kWasmToJSFastApi, callable, wasm::kNoSuspend};
- }
- // For JavaScript calls, determine whether the target has an arity match.
- if (callable->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- Handle<SharedFunctionInfo> shared(function->shared(),
- function->GetIsolate());
-
-// Check for math intrinsics.
-#define COMPARE_SIG_FOR_BUILTIN(name) \
- { \
- const wasm::FunctionSig* sig = \
- wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
- if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
- DCHECK_NOT_NULL(sig); \
- if (*expected_sig == *sig) { \
- return {WasmImportCallKind::k##name, callable, wasm::kNoSuspend}; \
- } \
- }
-#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
- case Builtin::kMath##name: \
- COMPARE_SIG_FOR_BUILTIN(F64##name); \
- break;
-#define COMPARE_SIG_FOR_BUILTIN_F32_F64(name) \
- case Builtin::kMath##name: \
- COMPARE_SIG_FOR_BUILTIN(F64##name); \
- COMPARE_SIG_FOR_BUILTIN(F32##name); \
- break;
-
- if (v8_flags.wasm_math_intrinsics && shared->HasBuiltinId()) {
- switch (shared->builtin_id()) {
- COMPARE_SIG_FOR_BUILTIN_F64(Acos);
- COMPARE_SIG_FOR_BUILTIN_F64(Asin);
- COMPARE_SIG_FOR_BUILTIN_F64(Atan);
- COMPARE_SIG_FOR_BUILTIN_F64(Cos);
- COMPARE_SIG_FOR_BUILTIN_F64(Sin);
- COMPARE_SIG_FOR_BUILTIN_F64(Tan);
- COMPARE_SIG_FOR_BUILTIN_F64(Exp);
- COMPARE_SIG_FOR_BUILTIN_F64(Log);
- COMPARE_SIG_FOR_BUILTIN_F64(Atan2);
- COMPARE_SIG_FOR_BUILTIN_F64(Pow);
- COMPARE_SIG_FOR_BUILTIN_F32_F64(Min);
- COMPARE_SIG_FOR_BUILTIN_F32_F64(Max);
- COMPARE_SIG_FOR_BUILTIN_F32_F64(Abs);
- COMPARE_SIG_FOR_BUILTIN_F32_F64(Ceil);
- COMPARE_SIG_FOR_BUILTIN_F32_F64(Floor);
- COMPARE_SIG_FOR_BUILTIN_F32_F64(Sqrt);
- case Builtin::kMathFround:
- COMPARE_SIG_FOR_BUILTIN(F32ConvertF64);
- break;
- default:
- break;
- }
- }
-
-#undef COMPARE_SIG_FOR_BUILTIN
-#undef COMPARE_SIG_FOR_BUILTIN_F64
-#undef COMPARE_SIG_FOR_BUILTIN_F32_F64
-
- if (IsClassConstructor(shared->kind())) {
- // Class constructor will throw anyway.
- return {WasmImportCallKind::kUseCallBuiltin, callable, suspend};
- }
-
- if (shared->internal_formal_parameter_count_without_receiver() ==
- expected_sig->parameter_count() - suspend) {
- return {WasmImportCallKind::kJSFunctionArityMatch, callable, suspend};
- }
-
- // If function isn't compiled, compile it now.
- Isolate* isolate = callable->GetIsolate();
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
- if (!is_compiled_scope.is_compiled()) {
- Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope);
- }
-
- return {WasmImportCallKind::kJSFunctionArityMismatch, callable, suspend};
- }
- // Unknown case. Use the call builtin.
- return {WasmImportCallKind::kUseCallBuiltin, callable, suspend};
-}
-
namespace {
-wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
+wasm::WasmOpcode GetMathIntrinsicOpcode(wasm::ImportCallKind kind,
const char** name_ptr) {
#define CASE(name) \
- case WasmImportCallKind::k##name: \
+ case wasm::ImportCallKind::k##name: \
*name_ptr = "WasmMathIntrinsic:" #name; \
return wasm::kExpr##name
switch (kind) {
@@ -8035,7 +8151,7 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
}
wasm::WasmCompilationResult CompileWasmMathIntrinsic(
- WasmImportCallKind kind, const wasm::FunctionSig* sig) {
+ wasm::ImportCallKind kind, const wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
@@ -8098,17 +8214,17 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
} // namespace
wasm::WasmCompilationResult CompileWasmImportCallWrapper(
- wasm::CompilationEnv* env, WasmImportCallKind kind,
+ wasm::CompilationEnv* env, wasm::ImportCallKind kind,
const wasm::FunctionSig* sig, bool source_positions, int expected_arity,
wasm::Suspend suspend) {
- DCHECK_NE(WasmImportCallKind::kLinkError, kind);
- DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
- DCHECK_NE(WasmImportCallKind::kWasmToJSFastApi, kind);
+ DCHECK_NE(wasm::ImportCallKind::kLinkError, kind);
+ DCHECK_NE(wasm::ImportCallKind::kWasmToWasm, kind);
+ DCHECK_NE(wasm::ImportCallKind::kWasmToJSFastApi, kind);
// Check for math intrinsics first.
if (v8_flags.wasm_math_intrinsics &&
- kind >= WasmImportCallKind::kFirstMathIntrinsic &&
- kind <= WasmImportCallKind::kLastMathIntrinsic) {
+ kind >= wasm::ImportCallKind::kFirstMathIntrinsic &&
+ kind <= wasm::ImportCallKind::kLastMathIntrinsic) {
return CompileWasmMathIntrinsic(kind, sig);
}
@@ -8138,7 +8254,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
&zone, mcgraph, sig, env->module,
WasmGraphBuilder::kWasmApiFunctionRefMode, nullptr, source_position_table,
StubCallMode::kCallWasmRuntimeStub, env->enabled_features);
- builder.BuildWasmToJSWrapper(kind, expected_arity, suspend);
+ builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, env->module);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
constexpr size_t kMaxNameLen = 128;
@@ -8211,12 +8327,16 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
published_code = native_module->PublishCode(std::move(wasm_code));
}
return published_code;
}
+bool IsFastCallSupportedSignature(const v8::CFunctionInfo* sig) {
+ return fast_api_call::CanOptimizeFastSignature(sig);
+}
+
wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
const wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
@@ -8264,14 +8384,14 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), wasm::WasmCode::kWasmToJsWrapper,
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
return native_module->PublishCode(std::move(wasm_code));
}
}
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
- WasmImportCallKind kind,
+ wasm::ImportCallKind kind,
int expected_arity,
wasm::Suspend suspend) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
@@ -8291,7 +8411,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
nullptr, nullptr,
StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
- builder.BuildWasmToJSWrapper(kind, expected_arity, suspend);
+ builder.BuildWasmToJSWrapper(kind, expected_arity, suspend, nullptr);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
constexpr size_t kMaxNameLen = 128;
@@ -8316,10 +8436,9 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
CompilationJob::FAILED ||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
- return Handle<Code>();
+ return {};
}
- Handle<Code> code = job->compilation_info()->code();
- return code;
+ return job->compilation_info()->code();
}
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
@@ -8366,13 +8485,11 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
return {};
}
- Handle<Code> code = job->compilation_info()->code();
-
- return code;
+ return job->compilation_info()->code();
}
-Handle<CodeT> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
- const wasm::WasmModule* module) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
+ const wasm::WasmModule* module) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
Graph* graph = zone->New<Graph>(zone.get());
@@ -8421,39 +8538,29 @@ Handle<CodeT> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
CompilationJob::FAILED);
CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
- return ToCodeT(job->compilation_info()->code(), isolate);
+ return job->compilation_info()->code();
}
namespace {
-bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
- const wasm::FunctionBody& func_body,
- int func_index, wasm::WasmFeatures* detected,
- MachineGraph* mcgraph,
- std::vector<compiler::WasmLoopInfo>* loop_infos,
- NodeOriginTable* node_origins,
- SourcePositionTable* source_positions) {
+void BuildGraphForWasmFunction(wasm::CompilationEnv* env,
+ WasmCompilationData& data,
+ wasm::WasmFeatures* detected,
+ MachineGraph* mcgraph) {
// Create a TF graph during decoding.
- WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
- source_positions);
+ WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, data.func_body.sig,
+ data.source_positions);
auto* allocator = wasm::GetWasmEngine()->allocator();
- wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
- allocator, env->enabled_features, env->module, &builder, detected,
- func_body, loop_infos, node_origins, func_index, wasm::kRegularFunction);
- if (graph_construction_result.failed()) {
- if (v8_flags.trace_wasm_compiler) {
- StdoutStream{} << "Compilation failed: "
- << graph_construction_result.error().message()
- << std::endl;
- }
- return false;
- }
+ wasm::BuildTFGraph(allocator, env->enabled_features, env->module, &builder,
+ detected, data.func_body, data.loop_infos, nullptr,
+ data.node_origins, data.func_index, data.assumptions,
+ wasm::kRegularFunction);
- auto sig = CreateMachineSignature(mcgraph->zone(), func_body.sig,
- WasmGraphBuilder::kCalledFromWasm);
- builder.LowerInt64(sig);
-
- return true;
+#ifdef V8_ENABLE_WASM_SIMD256_REVEC
+ if (v8_flags.experimental_wasm_revectorize && builder.has_simd()) {
+ mcgraph->graph()->SetSimd(true);
+ }
+#endif
}
base::Vector<const char> GetDebugName(Zone* zone,
@@ -8489,16 +8596,15 @@ base::Vector<const char> GetDebugName(Zone* zone,
} // namespace
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_byte_storage,
- const wasm::FunctionBody& func_body, int func_index, Counters* counters,
- wasm::AssemblerBufferCache* buffer_cache, wasm::WasmFeatures* detected) {
+ wasm::CompilationEnv* env, WasmCompilationData& data, Counters* counters,
+ wasm::WasmFeatures* detected) {
// Check that we do not accidentally compile a Wasm function to TurboFan if
// --liftoff-only is set.
DCHECK(!v8_flags.liftoff_only);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.CompileTopTier", "func_index", func_index, "body_size",
- func_body.end - func_body.start);
+ "wasm.CompileTopTier", "func_index", data.func_index,
+ "body_size", data.body_size());
Zone zone(wasm::GetWasmEngine()->allocator(), ZONE_NAME, kCompressGraphZone);
MachineGraph* mcgraph = zone.New<MachineGraph>(
zone.New<Graph>(&zone), zone.New<CommonOperatorBuilder>(&zone),
@@ -8508,59 +8614,59 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
InstructionSelector::AlignmentRequirements()));
OptimizedCompilationInfo info(
- GetDebugName(&zone, env->module, wire_byte_storage, func_index), &zone,
- CodeKind::WASM_FUNCTION);
+ GetDebugName(&zone, env->module, data.wire_bytes_storage,
+ data.func_index),
+ &zone, CodeKind::WASM_FUNCTION);
if (env->runtime_exception_support) {
info.set_wasm_runtime_exception_support();
}
- if (v8_flags.experimental_wasm_gc) info.set_allocation_folding();
+ if (env->enabled_features.has_gc()) info.set_allocation_folding();
if (info.trace_turbo_json()) {
TurboCfgFile tcf;
tcf << AsC1VCompilation(&info);
}
- NodeOriginTable* node_origins =
- info.trace_turbo_json() ? zone.New<NodeOriginTable>(mcgraph->graph())
- : nullptr;
- SourcePositionTable* source_positions =
+ if (info.trace_turbo_json()) {
+ data.node_origins = zone.New<NodeOriginTable>(mcgraph->graph());
+ }
+
+ data.source_positions =
mcgraph->zone()->New<SourcePositionTable>(mcgraph->graph());
+ ZoneVector<WasmInliningPosition> inlining_positions(&zone);
std::vector<WasmLoopInfo> loop_infos;
+ data.loop_infos = &loop_infos;
+ data.assumptions = new wasm::AssumptionsJournal();
wasm::WasmFeatures unused_detected_features;
if (!detected) detected = &unused_detected_features;
- if (!BuildGraphForWasmFunction(env, func_body, func_index, detected, mcgraph,
- &loop_infos, node_origins, source_positions)) {
- return wasm::WasmCompilationResult{};
- }
+ BuildGraphForWasmFunction(env, data, detected, mcgraph);
- if (node_origins) {
- node_origins->AddDecorator();
+ if (data.node_origins) {
+ data.node_origins->AddDecorator();
}
// Run the compiler pipeline to generate machine code.
- auto call_descriptor = GetWasmCallDescriptor(&zone, func_body.sig);
+ auto call_descriptor = GetWasmCallDescriptor(&zone, data.func_body.sig);
if (mcgraph->machine()->Is32()) {
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
- if (ContainsSimd(func_body.sig) && !CpuFeatures::SupportsWasmSimd128()) {
+ if (ContainsSimd(data.func_body.sig) && !CpuFeatures::SupportsWasmSimd128()) {
// Fail compilation if hardware does not support SIMD.
return wasm::WasmCompilationResult{};
}
- Pipeline::GenerateCodeForWasmFunction(&info, env, wire_byte_storage, mcgraph,
- call_descriptor, source_positions,
- node_origins, func_body, env->module,
- func_index, &loop_infos, buffer_cache);
+ Pipeline::GenerateCodeForWasmFunction(&info, env, data, mcgraph,
+ call_descriptor, &inlining_positions);
if (counters) {
int zone_bytes =
static_cast<int>(mcgraph->graph()->zone()->allocation_size());
counters->wasm_compile_function_peak_memory_bytes()->AddSample(zone_bytes);
- if (func_body.end - func_body.start >= 100 * KB) {
+ if (data.body_size() >= 100 * KB) {
counters->wasm_compile_huge_function_peak_memory_bytes()->AddSample(
zone_bytes);
}
@@ -8574,6 +8680,7 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
auto result = info.ReleaseWasmCompilationResult();
CHECK_NOT_NULL(result); // Compilation expected to succeed.
DCHECK_EQ(wasm::ExecutionTier::kTurbofan, result->result_tier);
+ result->assumptions.reset(data.assumptions);
return std::move(*result);
}
@@ -8614,20 +8721,16 @@ class LinkageLocationAllocator {
int slot_offset_;
};
-const MachineSignature* FunctionSigToMachineSig(Zone* zone,
- const wasm::FunctionSig* fsig) {
- MachineSignature::Builder builder(zone, fsig->return_count(),
- fsig->parameter_count());
- for (wasm::ValueType ret : fsig->returns()) {
- builder.AddReturn(ret.machine_type());
- }
- for (wasm::ValueType param : fsig->parameters()) {
- builder.AddParam(param.machine_type());
- }
- return builder.Build();
+MachineRepresentation GetMachineRepresentation(wasm::ValueType type) {
+ return type.machine_representation();
}
-LocationSignature* BuildLocations(Zone* zone, const MachineSignature* sig,
+MachineRepresentation GetMachineRepresentation(MachineType type) {
+ return type.representation();
+}
+
+template <typename T>
+LocationSignature* BuildLocations(Zone* zone, const Signature<T>* sig,
bool extra_callable_param,
int* parameter_slots, int* return_slots) {
int extra_params = extra_callable_param ? 2 : 1;
@@ -8646,10 +8749,14 @@ LocationSignature* BuildLocations(Zone* zone, const MachineSignature* sig,
// tagged parameters). This allows for easy iteration of tagged parameters
// during frame iteration.
const size_t parameter_count = sig->parameter_count();
+ bool has_tagged_param = false;
for (size_t i = 0; i < parameter_count; i++) {
- MachineRepresentation param = sig->GetParam(i).representation();
+ MachineRepresentation param = GetMachineRepresentation(sig->GetParam(i));
// Skip tagged parameters (e.g. any-ref).
- if (IsAnyTagged(param)) continue;
+ if (IsAnyTagged(param)) {
+ has_tagged_param = true;
+ continue;
+ }
auto l = params.Next(param);
locations.AddParamAt(i + param_offset, l);
}
@@ -8657,12 +8764,14 @@ LocationSignature* BuildLocations(Zone* zone, const MachineSignature* sig,
// End the untagged area, so tagged slots come after.
params.EndSlotArea();
- for (size_t i = 0; i < parameter_count; i++) {
- MachineRepresentation param = sig->GetParam(i).representation();
- // Skip untagged parameters.
- if (!IsAnyTagged(param)) continue;
- auto l = params.Next(param);
- locations.AddParamAt(i + param_offset, l);
+ if (has_tagged_param) {
+ for (size_t i = 0; i < parameter_count; i++) {
+ MachineRepresentation param = GetMachineRepresentation(sig->GetParam(i));
+ // Skip untagged parameters.
+ if (!IsAnyTagged(param)) continue;
+ auto l = params.Next(param);
+ locations.AddParamAt(i + param_offset, l);
+ }
}
// Import call wrappers have an additional (implicit) parameter, the callable.
@@ -8680,20 +8789,13 @@ LocationSignature* BuildLocations(Zone* zone, const MachineSignature* sig,
const size_t return_count = locations.return_count_;
for (size_t i = 0; i < return_count; i++) {
- MachineRepresentation ret = sig->GetReturn(i).representation();
+ MachineRepresentation ret = GetMachineRepresentation(sig->GetReturn(i));
locations.AddReturn(rets.Next(ret));
}
*return_slots = rets.NumStackSlots();
- return locations.Build();
-}
-
-LocationSignature* BuildLocations(Zone* zone, const wasm::FunctionSig* fsig,
- bool extra_callable_param,
- int* parameter_slots, int* return_slots) {
- return BuildLocations(zone, FunctionSigToMachineSig(zone, fsig),
- extra_callable_param, parameter_slots, return_slots);
+ return locations.Get();
}
} // namespace
@@ -8897,6 +8999,7 @@ AssemblerOptions WasmStubAssemblerOptions() {
#undef LOAD_INSTANCE_FIELD
#undef LOAD_MUTABLE_INSTANCE_FIELD
#undef LOAD_ROOT
+#undef LOAD_MUTABLE_ROOT
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index bcd6e0b53e..93bb637e16 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -13,17 +13,23 @@
#include <utility>
// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
+// Do not include anything else from src/compiler here!
#include "src/base/small-vector.h"
+#include "src/compiler/wasm-compiler-definitions.h"
#include "src/runtime/runtime.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/module-instantiate.h"
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone.h"
namespace v8 {
+
+class CFunctionInfo;
+
namespace internal {
struct AssemblerOptions;
class TurbofanCompilationJob;
@@ -38,6 +44,7 @@ class Node;
class NodeOriginTable;
class Operator;
class SourcePositionTable;
+struct WasmCompilationData;
class WasmDecorator;
class WasmGraphAssembler;
enum class TrapId : uint32_t;
@@ -51,7 +58,6 @@ namespace wasm {
class AssemblerBufferCache;
struct DecodeStruct;
class WasmCode;
-class WasmFeatures;
class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
enum Suspend : bool;
@@ -60,77 +66,19 @@ enum Suspend : bool;
namespace compiler {
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv*, const wasm::WireBytesStorage* wire_bytes_storage,
- const wasm::FunctionBody&, int func_index, Counters*,
- wasm::AssemblerBufferCache* buffer_cache, wasm::WasmFeatures* detected);
-
-// Calls to Wasm imports are handled in several different ways, depending on the
-// type of the target function/callable and whether the signature matches the
-// argument arity.
-enum class WasmImportCallKind : uint8_t {
- kLinkError, // static Wasm->Wasm type error
- kRuntimeTypeError, // runtime Wasm->JS type error
- kWasmToCapi, // fast Wasm->C-API call
- kWasmToJSFastApi, // fast Wasm->JS Fast API C call
- kWasmToWasm, // fast Wasm->Wasm call
- kJSFunctionArityMatch, // fast Wasm->JS call
- kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
- // Math functions imported from JavaScript that are intrinsified
- kFirstMathIntrinsic,
- kF64Acos = kFirstMathIntrinsic,
- kF64Asin,
- kF64Atan,
- kF64Cos,
- kF64Sin,
- kF64Tan,
- kF64Exp,
- kF64Log,
- kF64Atan2,
- kF64Pow,
- kF64Ceil,
- kF64Floor,
- kF64Sqrt,
- kF64Min,
- kF64Max,
- kF64Abs,
- kF32Min,
- kF32Max,
- kF32Abs,
- kF32Ceil,
- kF32Floor,
- kF32Sqrt,
- kF32ConvertF64,
- kLastMathIntrinsic = kF32ConvertF64,
- // For everything else, there's the call builtin.
- kUseCallBuiltin
-};
-
-constexpr WasmImportCallKind kDefaultImportCallKind =
- WasmImportCallKind::kJSFunctionArityMatch;
-
-struct WasmImportData {
- WasmImportCallKind kind;
- Handle<JSReceiver> callable;
- wasm::Suspend suspend;
-};
-// Resolves which import call wrapper is required for the given JS callable.
-// Returns the kind of wrapper needed, the ultimate target callable, and the
-// suspender object if applicable. Note that some callables (e.g. a
-// {WasmExportedFunction} or {WasmJSFunction}) just wrap another target, which
-// is why the ultimate target is returned as well.
-V8_EXPORT_PRIVATE WasmImportData ResolveWasmImportCall(
- Handle<JSReceiver> callable, const wasm::FunctionSig* sig,
- const wasm::WasmModule* module, const wasm::WasmFeatures& enabled_features);
+ wasm::CompilationEnv*, WasmCompilationData& compilation_data, Counters*,
+ wasm::WasmFeatures* detected);
// Compiles an import call wrapper, which allows Wasm to call imports.
V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
- wasm::CompilationEnv* env, WasmImportCallKind, const wasm::FunctionSig*,
+ wasm::CompilationEnv* env, wasm::ImportCallKind, const wasm::FunctionSig*,
bool source_positions, int expected_arity, wasm::Suspend);
// Compiles a host call wrapper, which allows Wasm to call host functions.
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule*,
const wasm::FunctionSig*);
+bool IsFastCallSupportedSignature(const v8::CFunctionInfo*);
// Compiles a wrapper to call a Fast API function from Wasm.
wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule*,
const wasm::FunctionSig*,
@@ -144,7 +92,7 @@ std::unique_ptr<TurbofanCompilationJob> NewJSToWasmCompilationJob(
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
- WasmImportCallKind kind,
+ wasm::ImportCallKind kind,
int expected_arity,
wasm::Suspend suspend);
@@ -165,7 +113,7 @@ enum CWasmEntryParameters {
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
// which knows how to feed it its parameters.
-V8_EXPORT_PRIVATE Handle<CodeT> CompileCWasmEntry(
+V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
// Values from the instance object are cached between Wasm-level function calls.
@@ -190,6 +138,22 @@ struct WasmLoopInfo {
can_be_innermost(can_be_innermost) {}
};
+struct WasmCompilationData {
+ explicit WasmCompilationData(const wasm::FunctionBody& func_body)
+ : func_body(func_body) {}
+
+ size_t body_size() { return func_body.end - func_body.start; }
+
+ const wasm::FunctionBody& func_body;
+ const wasm::WireBytesStorage* wire_bytes_storage;
+ wasm::AssemblerBufferCache* buffer_cache;
+ NodeOriginTable* node_origins{nullptr};
+ std::vector<WasmLoopInfo>* loop_infos{nullptr};
+ wasm::AssumptionsJournal* assumptions{nullptr};
+ SourcePositionTable* source_positions{nullptr};
+ int func_index;
+};
+
// Abstracts details of building TurboFan graph nodes for wasm to separate
// the wasm decoder from the internal details of TurboFan.
class WasmGraphBuilder {
@@ -210,10 +174,6 @@ class WasmGraphBuilder {
kNeedsBoundsCheck = true,
kCanOmitBoundsCheck = false
};
- enum CheckForNull : bool { // --
- kWithNullCheck = true,
- kWithoutNullCheck = false
- };
enum BoundsCheckResult {
// Statically OOB.
kOutOfBounds,
@@ -229,11 +189,21 @@ class WasmGraphBuilder {
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr)
- : WasmGraphBuilder(env, zone, mcgraph, sig, spt, kInstanceMode, nullptr) {
- }
+ : WasmGraphBuilder(env, zone, mcgraph, sig, spt, kInstanceMode, nullptr,
+ env->enabled_features) {}
+
+ V8_EXPORT_PRIVATE WasmGraphBuilder(wasm::CompilationEnv* env, Zone* zone,
+ MachineGraph* mcgraph,
+ const wasm::FunctionSig* sig,
+ compiler::SourcePositionTable* spt,
+ Parameter0Mode parameter_mode,
+ Isolate* isolate,
+ wasm::WasmFeatures enabled_features);
V8_EXPORT_PRIVATE ~WasmGraphBuilder();
+ bool TryWasmInlining(int fct_index, wasm::NativeModule* native_module);
+
//-----------------------------------------------------------------------
// Operations independent of {control} or {effect}.
//-----------------------------------------------------------------------
@@ -253,9 +223,11 @@ class WasmGraphBuilder {
Node* tnode, Node* fnode);
Node* CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, Node* fnode);
Node* EffectPhi(unsigned count, Node** effects_and_control);
- Node* RefNull();
+ Node* RefNull(wasm::ValueType type);
Node* RefFunc(uint32_t function_index);
- Node* AssertNotNull(Node* object, wasm::WasmCodePosition position);
+ Node* AssertNotNull(
+ Node* object, wasm::ValueType type, wasm::WasmCodePosition position,
+ wasm::TrapReason reason = wasm::TrapReason::kTrapNullDereference);
Node* TraceInstruction(uint32_t mark_id);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
@@ -264,7 +236,9 @@ class WasmGraphBuilder {
Node* Simd128Constant(const uint8_t value[16]);
Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
+ // The {type} argument is only required for null-checking operations.
Node* Unop(wasm::WasmOpcode opcode, Node* input,
+ wasm::ValueType type = wasm::kWasmBottom,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* MemoryGrow(Node* input);
Node* Throw(uint32_t tag_index, const wasm::WasmTag* tag,
@@ -350,7 +324,8 @@ class WasmGraphBuilder {
Node** failure_control,
bool is_last_case);
- void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
+ void BrOnNull(Node* ref_object, wasm::ValueType type, Node** non_null_node,
+ Node** null_node);
Node* Invert(Node* node);
@@ -480,7 +455,11 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
Node* src_array, Node* src_index, CheckForNull src_null_check,
- Node* length, wasm::WasmCodePosition position);
+ Node* length, const wasm::ArrayType* type,
+ wasm::WasmCodePosition position);
+ void ArrayFill(Node* array, Node* index, Node* value, Node* length,
+ const wasm::ArrayType* type, CheckForNull null_check,
+ wasm::WasmCodePosition position);
Node* ArrayNewFixed(const wasm::ArrayType* type, Node* rtt,
base::Vector<Node*> elements);
Node* ArrayNewSegment(const wasm::ArrayType* type, uint32_t data_segment,
@@ -494,17 +473,22 @@ class WasmGraphBuilder {
Node* RttCanon(uint32_t type_index);
Node* RefTest(Node* object, Node* rtt, WasmTypeCheckConfig config);
- Node* RefTestAbstract(Node* object, wasm::HeapType type, bool null_succeeds);
+ Node* RefTestAbstract(Node* object, wasm::HeapType type, bool is_nullable,
+ bool null_succeeds);
Node* RefCast(Node* object, Node* rtt, WasmTypeCheckConfig config,
wasm::WasmCodePosition position);
Node* RefCastAbstract(Node* object, wasm::HeapType type,
- wasm::WasmCodePosition position, bool null_succeeds);
+ wasm::WasmCodePosition position, bool is_nullable,
+ bool null_succeeds);
void BrOnCast(Node* object, Node* rtt, WasmTypeCheckConfig config,
Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect);
Node* RefIsEq(Node* object, bool object_can_be_null, bool null_succeeds);
Node* RefAsEq(Node* object, bool object_can_be_null,
wasm::WasmCodePosition position, bool null_succeeds);
+ void BrOnEq(Node* object, Node* rtt, WasmTypeCheckConfig config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
Node* RefIsStruct(Node* object, bool object_can_be_null, bool null_succeeds);
Node* RefAsStruct(Node* object, bool object_can_be_null,
wasm::WasmCodePosition position, bool null_succeeds);
@@ -529,6 +513,8 @@ class WasmGraphBuilder {
Node* start, Node* end);
Node* StringNewWtf16(uint32_t memory, Node* offset, Node* size);
Node* StringNewWtf16Array(Node* array, Node* start, Node* end);
+ Node* StringAsWtf16(Node* string, CheckForNull null_check,
+ wasm::WasmCodePosition position);
Node* StringConst(uint32_t index);
Node* StringMeasureUtf8(Node* string, CheckForNull null_check,
wasm::WasmCodePosition position);
@@ -587,9 +573,19 @@ class WasmGraphBuilder {
Node* codepoints, wasm::WasmCodePosition position);
Node* StringViewIterSlice(Node* view, CheckForNull null_check,
Node* codepoints, wasm::WasmCodePosition position);
- Node* IsNull(Node* object);
+ Node* StringCompare(Node* lhs, CheckForNull null_check_lhs, Node* rhs,
+ CheckForNull null_check_rhs,
+ wasm::WasmCodePosition position);
+ Node* StringFromCodePoint(Node* code_point);
+ Node* StringHash(Node* string, CheckForNull null_check,
+ wasm::WasmCodePosition position);
+ Node* IsNull(Node* object, wasm::ValueType type);
Node* TypeGuard(Node* value, wasm::ValueType type);
+ // Support for well-known imports.
+ // See {CheckWellKnownImport} for signature and builtin ID definitions.
+ Node* WellKnown_StringToLowerCaseStringref(Node* string,
+ CheckForNull null_check);
bool has_simd() const { return has_simd_; }
wasm::BoundsCheckStrategy bounds_checks() const {
@@ -610,14 +606,12 @@ class WasmGraphBuilder {
void StoreCallCount(Node* call, int count);
void ReserveCallCounts(size_t num_call_instructions);
- protected:
- V8_EXPORT_PRIVATE WasmGraphBuilder(wasm::CompilationEnv* env, Zone* zone,
- MachineGraph* mcgraph,
- const wasm::FunctionSig* sig,
- compiler::SourcePositionTable* spt,
- Parameter0Mode parameter_mode,
- Isolate* isolate);
+ void set_inlining_id(int inlining_id) {
+ DCHECK_NE(inlining_id, -1);
+ inlining_id_ = inlining_id;
+ }
+ protected:
Node* NoContextConstant();
Node* GetInstance();
@@ -768,8 +762,6 @@ class WasmGraphBuilder {
SmallNodeVector& match_controls,
SmallNodeVector& match_effects);
- void DataCheck(Node* object, bool object_can_be_null, Callbacks callbacks,
- bool null_succeeds);
void EqCheck(Node* object, bool object_can_be_null, Callbacks callbacks,
bool null_succeeds);
void ManagedObjectInstanceCheck(Node* object, bool object_can_be_null,
@@ -779,10 +771,14 @@ class WasmGraphBuilder {
void BrOnCastAbs(Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect,
std::function<void(Callbacks)> type_checker);
- void BoundsCheckArray(Node* array, Node* index,
+ void BoundsCheckArray(Node* array, Node* index, CheckForNull null_check,
wasm::WasmCodePosition position);
- void BoundsCheckArrayCopy(Node* array, Node* index, Node* length,
- wasm::WasmCodePosition position);
+ void BoundsCheckArrayWithLength(Node* array, Node* index, Node* length,
+ CheckForNull null_check,
+ wasm::WasmCodePosition position);
+ Node* StoreInInt64StackSlot(Node* value, wasm::ValueType type);
+ void ArrayFillImpl(Node* array, Node* index, Node* value, Node* length,
+ const wasm::ArrayType* type, bool emit_write_barrier);
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
@@ -814,10 +810,6 @@ class WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context);
- Node* BuildLoadExternalPointerFromObject(
- Node* object, int offset,
- ExternalPointerTag tag = kForeignForeignAddressTag);
-
Node* BuildLoadCallTargetFromExportedFunctionData(Node* function_data);
//-----------------------------------------------------------------------
@@ -831,13 +823,12 @@ class WasmGraphBuilder {
Node** parameters, int parameter_count);
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
- void AddInt64LoweringReplacement(CallDescriptor* original,
- CallDescriptor* replacement);
+ void BuildModifyThreadInWasmFlag(bool new_value);
+ void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address,
+ bool new_value);
Node* BuildChangeInt64ToBigInt(Node* input, StubCallMode stub_mode);
- CallDescriptor* GetI64ToBigIntCallDescriptor(StubCallMode stub_mode);
-
Node* StoreArgsInStackSlot(
std::initializer_list<std::pair<MachineRepresentation, Node*>> args);
@@ -845,6 +836,10 @@ class WasmGraphBuilder {
Zone* const zone_;
MachineGraph* const mcgraph_;
wasm::CompilationEnv* const env_;
+ // For the main WasmGraphBuilder class, this is identical to the features
+ // field in {env_}, but the WasmWrapperGraphBuilder subclass doesn't have
+ // that, so common code should use this field instead.
+ wasm::WasmFeatures enabled_features_;
Node** parameters_;
@@ -861,13 +856,11 @@ class WasmGraphBuilder {
compiler::WasmDecorator* decorator_ = nullptr;
compiler::SourcePositionTable* const source_position_table_ = nullptr;
+ int inlining_id_ = -1;
Parameter0Mode parameter_mode_;
Isolate* const isolate_;
SetOncePointer<Node> instance_node_;
-
- std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
- CallDescriptor* i64_to_bigint_builtin_descriptor_ = nullptr;
- CallDescriptor* i64_to_bigint_stub_descriptor_ = nullptr;
+ NullCheckStrategy null_check_strategy_;
};
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
@@ -875,8 +868,8 @@ enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
const wasm::WasmModule* module, Isolate* isolate,
- compiler::SourcePositionTable* spt, StubCallMode stub_mode,
- wasm::WasmFeatures features, Node* frame_state);
+ compiler::SourcePositionTable* spt, wasm::WasmFeatures features,
+ Node* frame_state, bool set_in_wasm_flag);
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
diff --git a/deps/v8/src/compiler/wasm-gc-lowering.cc b/deps/v8/src/compiler/wasm-gc-lowering.cc
index 4c125f3c21..83b0d66faa 100644
--- a/deps/v8/src/compiler/wasm-gc-lowering.cc
+++ b/deps/v8/src/compiler/wasm-gc-lowering.cc
@@ -7,11 +7,12 @@
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/compiler/wasm-compiler-definitions.h"
#include "src/compiler/wasm-graph-assembler.h"
+#include "src/objects/heap-number.h"
#include "src/wasm/object-access.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
@@ -23,22 +24,19 @@ namespace internal {
namespace compiler {
WasmGCLowering::WasmGCLowering(Editor* editor, MachineGraph* mcgraph,
- const wasm::WasmModule* module)
+ const wasm::WasmModule* module,
+ bool disable_trap_handler,
+ SourcePositionTable* source_position_table)
: AdvancedReducer(editor),
+ null_check_strategy_(trap_handler::IsTrapHandlerEnabled() &&
+ V8_STATIC_ROOTS_BOOL && !disable_trap_handler
+ ? kTrapHandler
+ : kExplicitNullChecks),
gasm_(mcgraph, mcgraph->zone()),
module_(module),
dead_(mcgraph->Dead()),
- instance_node_(nullptr) {
- // Find and store the instance node.
- for (Node* start_use : mcgraph->graph()->start()->uses()) {
- if (start_use->opcode() == IrOpcode::kParameter &&
- ParameterIndexOf(start_use->op()) == 0) {
- instance_node_ = start_use;
- break;
- }
- }
- DCHECK_NOT_NULL(instance_node_);
-}
+ mcgraph_(mcgraph),
+ source_position_table_(source_position_table) {}
Reduction WasmGCLowering::Reduce(Node* node) {
switch (node->opcode()) {
@@ -62,26 +60,42 @@ Reduction WasmGCLowering::Reduce(Node* node) {
return ReduceWasmExternInternalize(node);
case IrOpcode::kWasmExternExternalize:
return ReduceWasmExternExternalize(node);
+ case IrOpcode::kWasmStructGet:
+ return ReduceWasmStructGet(node);
+ case IrOpcode::kWasmStructSet:
+ return ReduceWasmStructSet(node);
+ case IrOpcode::kWasmArrayGet:
+ return ReduceWasmArrayGet(node);
+ case IrOpcode::kWasmArraySet:
+ return ReduceWasmArraySet(node);
+ case IrOpcode::kWasmArrayLength:
+ return ReduceWasmArrayLength(node);
+ case IrOpcode::kWasmArrayInitializeLength:
+ return ReduceWasmArrayInitializeLength(node);
+ case IrOpcode::kStringAsWtf16:
+ return ReduceStringAsWtf16(node);
+ case IrOpcode::kStringPrepareForGetCodeunit:
+ return ReduceStringPrepareForGetCodeunit(node);
default:
return NoChange();
}
}
-Node* WasmGCLowering::RootNode(RootIndex index) {
- // TODO(13449): Use root register instead of isolate.
- Node* isolate_root = gasm_.LoadImmutable(
- MachineType::Pointer(), instance_node_,
- WasmInstanceObject::kIsolateRootOffset - kHeapObjectTag);
- return gasm_.LoadImmutable(MachineType::Pointer(), isolate_root,
+Node* WasmGCLowering::Null(wasm::ValueType type) {
+ RootIndex index = wasm::IsSubtypeOf(type, wasm::kWasmExternRef, module_)
+ ? RootIndex::kNullValue
+ : RootIndex::kWasmNull;
+ return gasm_.LoadImmutable(MachineType::Pointer(), gasm_.LoadRootRegister(),
IsolateData::root_slot_offset(index));
}
-Node* WasmGCLowering::Null() { return RootNode(RootIndex::kNullValue); }
-
-Node* WasmGCLowering::IsNull(Node* object) {
- Tagged_t static_null = wasm::GetWasmEngine()->compressed_null_value_or_zero();
- Node* null_value =
- static_null != 0 ? gasm_.UintPtrConstant(static_null) : Null();
+Node* WasmGCLowering::IsNull(Node* object, wasm::ValueType type) {
+ Tagged_t static_null =
+ wasm::GetWasmEngine()->compressed_wasm_null_value_or_zero();
+ Node* null_value = !wasm::IsSubtypeOf(type, wasm::kWasmExternRef, module_) &&
+ static_null != 0
+ ? gasm_.UintPtrConstant(static_null)
+ : Null(type);
return gasm_.TaggedEqual(object, null_value);
}
@@ -111,50 +125,54 @@ Reduction WasmGCLowering::ReduceWasmTypeCheck(Node* node) {
// being a wasm object and return 0 (failure).
if (object_can_be_null && (!is_cast_from_any || config.to.is_nullable())) {
const int kResult = config.to.is_nullable() ? 1 : 0;
- gasm_.GotoIf(IsNull(object), &end_label, BranchHint::kFalse,
- gasm_.Int32Constant(kResult));
+ gasm_.GotoIf(IsNull(object, wasm::kWasmAnyRef), &end_label,
+ BranchHint::kFalse, gasm_.Int32Constant(kResult));
}
if (object_can_be_i31) {
- gasm_.GotoIf(gasm_.IsI31(object), &end_label, gasm_.Int32Constant(0));
+ gasm_.GotoIf(gasm_.IsSmi(object), &end_label, gasm_.Int32Constant(0));
}
Node* map = gasm_.LoadMap(object);
- // First, check if types happen to be equal. This has been shown to give large
- // speedups.
- gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue,
- gasm_.Int32Constant(1));
-
- // Check if map instance type identifies a wasm object.
- if (is_cast_from_any) {
- Node* is_wasm_obj = gasm_.IsDataRefMap(map);
- gasm_.GotoIfNot(is_wasm_obj, &end_label, BranchHint::kTrue,
- gasm_.Int32Constant(0));
- }
+ if (module_->types[config.to.ref_index()].is_final) {
+ gasm_.Goto(&end_label, gasm_.TaggedEqual(map, rtt));
+ } else {
+ // First, check if types happen to be equal. This has been shown to give
+ // large speedups.
+ gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue,
+ gasm_.Int32Constant(1));
+
+ // Check if map instance type identifies a wasm object.
+ if (is_cast_from_any) {
+ Node* is_wasm_obj = gasm_.IsDataRefMap(map);
+ gasm_.GotoIfNot(is_wasm_obj, &end_label, BranchHint::kTrue,
+ gasm_.Int32Constant(0));
+ }
- Node* type_info = gasm_.LoadWasmTypeInfo(map);
- DCHECK_GE(rtt_depth, 0);
- // If the depth of the rtt is known to be less that the minimum supertype
- // array length, we can access the supertype without bounds-checking the
- // supertype array.
- if (static_cast<uint32_t>(rtt_depth) >= wasm::kMinimumSupertypeArraySize) {
- Node* supertypes_length =
- gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject(
- MachineType::TaggedSigned(), type_info,
- wasm::ObjectAccess::ToTagged(
- WasmTypeInfo::kSupertypesLengthOffset)));
- gasm_.GotoIfNot(
- gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth), supertypes_length),
- &end_label, BranchHint::kTrue, gasm_.Int32Constant(0));
- }
+ Node* type_info = gasm_.LoadWasmTypeInfo(map);
+ DCHECK_GE(rtt_depth, 0);
+ // If the depth of the rtt is known to be less that the minimum supertype
+ // array length, we can access the supertype without bounds-checking the
+ // supertype array.
+ if (static_cast<uint32_t>(rtt_depth) >= wasm::kMinimumSupertypeArraySize) {
+ Node* supertypes_length =
+ gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject(
+ MachineType::TaggedSigned(), type_info,
+ wasm::ObjectAccess::ToTagged(
+ WasmTypeInfo::kSupertypesLengthOffset)));
+ gasm_.GotoIfNot(gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth),
+ supertypes_length),
+ &end_label, BranchHint::kTrue, gasm_.Int32Constant(0));
+ }
- Node* maybe_match = gasm_.LoadImmutableFromObject(
- MachineType::TaggedPointer(), type_info,
- wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
- kTaggedSize * rtt_depth));
+ Node* maybe_match = gasm_.LoadImmutableFromObject(
+ MachineType::TaggedPointer(), type_info,
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
+ kTaggedSize * rtt_depth));
- gasm_.Goto(&end_label, gasm_.TaggedEqual(maybe_match, rtt));
+ gasm_.Goto(&end_label, gasm_.TaggedEqual(maybe_match, rtt));
+ }
gasm_.Bind(&end_label);
@@ -185,54 +203,65 @@ Reduction WasmGCLowering::ReduceWasmTypeCast(Node* node) {
// failure. In that case the instance type check will identify null as not
// being a wasm object and trap.
if (object_can_be_null && (!is_cast_from_any || config.to.is_nullable())) {
- Node* is_null = IsNull(object);
+ Node* is_null = IsNull(object, wasm::kWasmAnyRef);
if (config.to.is_nullable()) {
gasm_.GotoIf(is_null, &end_label, BranchHint::kFalse);
} else if (!v8_flags.experimental_wasm_skip_null_checks) {
gasm_.TrapIf(is_null, TrapId::kTrapIllegalCast);
+ UpdateSourcePosition(gasm_.effect(), node);
}
}
if (object_can_be_i31) {
- gasm_.TrapIf(gasm_.IsI31(object), TrapId::kTrapIllegalCast);
+ gasm_.TrapIf(gasm_.IsSmi(object), TrapId::kTrapIllegalCast);
+ UpdateSourcePosition(gasm_.effect(), node);
}
Node* map = gasm_.LoadMap(object);
- // First, check if types happen to be equal. This has been shown to give large
- // speedups.
- gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue);
-
- // Check if map instance type identifies a wasm object.
- if (is_cast_from_any) {
- Node* is_wasm_obj = gasm_.IsDataRefMap(map);
- gasm_.TrapUnless(is_wasm_obj, TrapId::kTrapIllegalCast);
- }
+ if (module_->types[config.to.ref_index()].is_final) {
+ gasm_.TrapUnless(gasm_.TaggedEqual(map, rtt), TrapId::kTrapIllegalCast);
+ UpdateSourcePosition(gasm_.effect(), node);
+ gasm_.Goto(&end_label);
+ } else {
+ // First, check if types happen to be equal. This has been shown to give
+ // large speedups.
+ gasm_.GotoIf(gasm_.TaggedEqual(map, rtt), &end_label, BranchHint::kTrue);
+
+ // Check if map instance type identifies a wasm object.
+ if (is_cast_from_any) {
+ Node* is_wasm_obj = gasm_.IsDataRefMap(map);
+ gasm_.TrapUnless(is_wasm_obj, TrapId::kTrapIllegalCast);
+ UpdateSourcePosition(gasm_.effect(), node);
+ }
- Node* type_info = gasm_.LoadWasmTypeInfo(map);
- DCHECK_GE(rtt_depth, 0);
- // If the depth of the rtt is known to be less that the minimum supertype
- // array length, we can access the supertype without bounds-checking the
- // supertype array.
- if (static_cast<uint32_t>(rtt_depth) >= wasm::kMinimumSupertypeArraySize) {
- Node* supertypes_length =
- gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject(
- MachineType::TaggedSigned(), type_info,
- wasm::ObjectAccess::ToTagged(
- WasmTypeInfo::kSupertypesLengthOffset)));
- gasm_.TrapUnless(
- gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth), supertypes_length),
- TrapId::kTrapIllegalCast);
- }
+ Node* type_info = gasm_.LoadWasmTypeInfo(map);
+ DCHECK_GE(rtt_depth, 0);
+ // If the depth of the rtt is known to be less that the minimum supertype
+ // array length, we can access the supertype without bounds-checking the
+ // supertype array.
+ if (static_cast<uint32_t>(rtt_depth) >= wasm::kMinimumSupertypeArraySize) {
+ Node* supertypes_length =
+ gasm_.BuildChangeSmiToIntPtr(gasm_.LoadImmutableFromObject(
+ MachineType::TaggedSigned(), type_info,
+ wasm::ObjectAccess::ToTagged(
+ WasmTypeInfo::kSupertypesLengthOffset)));
+ gasm_.TrapUnless(gasm_.UintLessThan(gasm_.IntPtrConstant(rtt_depth),
+ supertypes_length),
+ TrapId::kTrapIllegalCast);
+ UpdateSourcePosition(gasm_.effect(), node);
+ }
- Node* maybe_match = gasm_.LoadImmutableFromObject(
- MachineType::TaggedPointer(), type_info,
- wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
- kTaggedSize * rtt_depth));
+ Node* maybe_match = gasm_.LoadImmutableFromObject(
+ MachineType::TaggedPointer(), type_info,
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
+ kTaggedSize * rtt_depth));
- gasm_.TrapUnless(gasm_.TaggedEqual(maybe_match, rtt),
- TrapId::kTrapIllegalCast);
- gasm_.Goto(&end_label);
+ gasm_.TrapUnless(gasm_.TaggedEqual(maybe_match, rtt),
+ TrapId::kTrapIllegalCast);
+ UpdateSourcePosition(gasm_.effect(), node);
+ gasm_.Goto(&end_label);
+ }
gasm_.Bind(&end_label);
@@ -247,8 +276,36 @@ Reduction WasmGCLowering::ReduceAssertNotNull(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* object = NodeProperties::GetValueInput(node, 0);
gasm_.InitializeEffectControl(effect, control);
- if (!v8_flags.experimental_wasm_skip_null_checks) {
- gasm_.TrapIf(IsNull(object), TrapId::kTrapNullDereference);
+ auto op_parameter = OpParameter<AssertNotNullParameters>(node->op());
+ // When able, implement a non-null assertion by loading from the object just
+ // after the map word. This will trap for null and be handled by the trap
+ // handler.
+ if (op_parameter.trap_id == TrapId::kTrapNullDereference) {
+ if (!v8_flags.experimental_wasm_skip_null_checks) {
+ // For supertypes of i31ref, we would need to check for i31ref anyway
+ // before loading from the object, so we might as well just check directly
+ // for null.
+ // For subtypes of externref, we use JS null, so we have to check
+ // explicitly.
+ if (null_check_strategy_ == kExplicitNullChecks ||
+ wasm::IsSubtypeOf(wasm::kWasmI31Ref.AsNonNull(), op_parameter.type,
+ module_) ||
+ wasm::IsSubtypeOf(op_parameter.type, wasm::kWasmExternRef, module_)) {
+ gasm_.TrapIf(IsNull(object, op_parameter.type), op_parameter.trap_id);
+ UpdateSourcePosition(gasm_.effect(), node);
+ } else {
+ static_assert(WasmStruct::kHeaderSize > kTaggedSize);
+ static_assert(WasmArray::kHeaderSize > kTaggedSize);
+ // TODO(manoskouk): JSFunction::kHeaderSize also has to be >kTaggedSize.
+ Node* trap_null = gasm_.LoadTrapOnNull(
+ MachineType::Int32(), object,
+ gasm_.IntPtrConstant(wasm::ObjectAccess::ToTagged(kTaggedSize)));
+ UpdateSourcePosition(trap_null, node);
+ }
+ }
+ } else {
+ gasm_.TrapIf(IsNull(object, op_parameter.type), op_parameter.trap_id);
+ UpdateSourcePosition(gasm_.effect(), node);
}
ReplaceWithValue(node, object, gasm_.effect(), gasm_.control());
@@ -258,26 +315,31 @@ Reduction WasmGCLowering::ReduceAssertNotNull(Node* node) {
Reduction WasmGCLowering::ReduceNull(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kNull);
- return Replace(Null());
+ auto type = OpParameter<wasm::ValueType>(node->op());
+ return Replace(Null(type));
}
Reduction WasmGCLowering::ReduceIsNull(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kIsNull);
Node* object = NodeProperties::GetValueInput(node, 0);
- return Replace(IsNull(object));
+ auto type = OpParameter<wasm::ValueType>(node->op());
+ return Replace(IsNull(object, type));
}
Reduction WasmGCLowering::ReduceIsNotNull(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kIsNotNull);
Node* object = NodeProperties::GetValueInput(node, 0);
- return Replace(gasm_.Word32Equal(IsNull(object), gasm_.Int32Constant(0)));
+ auto type = OpParameter<wasm::ValueType>(node->op());
+ return Replace(
+ gasm_.Word32Equal(IsNull(object, type), gasm_.Int32Constant(0)));
}
Reduction WasmGCLowering::ReduceRttCanon(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kRttCanon);
int type_index = OpParameter<int>(node->op());
+ Node* instance_node = node->InputAt(0);
Node* maps_list = gasm_.LoadImmutable(
- MachineType::TaggedPointer(), instance_node_,
+ MachineType::TaggedPointer(), instance_node,
WasmInstanceObject::kManagedObjectMapsOffset - kHeapObjectTag);
return Replace(gasm_.LoadImmutable(
MachineType::TaggedPointer(), maps_list,
@@ -292,22 +354,500 @@ Reduction WasmGCLowering::ReduceTypeGuard(Node* node) {
return Replace(alias);
}
+namespace {
+constexpr int32_t kInt31MaxValue = 0x3fffffff;
+constexpr int32_t kInt31MinValue = -kInt31MaxValue - 1;
+} // namespace
+
Reduction WasmGCLowering::ReduceWasmExternInternalize(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kWasmExternInternalize);
- Node* object = NodeProperties::GetValueInput(node, 0);
- // TODO(7748): Canonicalize HeapNumbers.
- ReplaceWithValue(node, object);
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ gasm_.InitializeEffectControl(effect, control);
+
+ auto end_label = gasm_.MakeLabel(MachineRepresentation::kTagged);
+ auto null_label = gasm_.MakeLabel();
+ auto smi_label = gasm_.MakeLabel();
+ auto int_to_smi_label = gasm_.MakeLabel();
+ auto heap_number_label = gasm_.MakeLabel();
+
+ gasm_.GotoIf(IsNull(input, wasm::kWasmExternRef), &null_label);
+ gasm_.GotoIf(gasm_.IsSmi(input), &smi_label);
+ Node* is_heap_number = gasm_.HasInstanceType(input, HEAP_NUMBER_TYPE);
+ gasm_.GotoIf(is_heap_number, &heap_number_label);
+ // For anything else, just pass through the value.
+ gasm_.Goto(&end_label, input);
+
+ gasm_.Bind(&null_label);
+ gasm_.Goto(&end_label, Null(wasm::kWasmNullRef));
+
+ // Canonicalize SMI.
+ gasm_.Bind(&smi_label);
+ if constexpr (SmiValuesAre31Bits()) {
+ gasm_.Goto(&end_label, input);
+ } else {
+ auto to_heap_number_label = gasm_.MakeLabel();
+ Node* int_value = gasm_.BuildChangeSmiToInt32(input);
+
+ // Convert to heap number if the int32 does not fit into an i31ref.
+ gasm_.GotoIf(
+ gasm_.Int32LessThan(gasm_.Int32Constant(kInt31MaxValue), int_value),
+ &to_heap_number_label);
+ gasm_.GotoIf(
+ gasm_.Int32LessThan(int_value, gasm_.Int32Constant(kInt31MinValue)),
+ &to_heap_number_label);
+ gasm_.Goto(&end_label, input);
+
+ gasm_.Bind(&to_heap_number_label);
+ Node* heap_number = gasm_.CallBuiltin(Builtin::kWasmInt32ToHeapNumber,
+ Operator::kPure, int_value);
+ gasm_.Goto(&end_label, heap_number);
+ }
+
+ // Convert HeapNumber to SMI if possible.
+ gasm_.Bind(&heap_number_label);
+ Node* float_value = gasm_.LoadFromObject(
+ MachineType::Float64(), input,
+ wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset));
+ // Check range of float value.
+ gasm_.GotoIf(
+ gasm_.Float64LessThan(float_value, gasm_.Float64Constant(kInt31MinValue)),
+ &end_label, input);
+ gasm_.GotoIf(
+ gasm_.Float64LessThan(gasm_.Float64Constant(kInt31MaxValue), float_value),
+ &end_label, input);
+ // Check if value is -0.
+ Node* is_minus_zero = nullptr;
+ if (mcgraph_->machine()->Is64()) {
+ Node* minus_zero = gasm_.Int64Constant(base::bit_cast<int64_t>(-0.0));
+ Node* float_bits = gasm_.BitcastFloat64ToInt64(float_value);
+ is_minus_zero = gasm_.Word64Equal(float_bits, minus_zero);
+ } else {
+ constexpr int32_t kMinusZeroLoBits = static_cast<int32_t>(0);
+ constexpr int32_t kMinusZeroHiBits = static_cast<int32_t>(1) << 31;
+ auto done = gasm_.MakeLabel(MachineRepresentation::kBit);
+
+ Node* value_lo = gasm_.Float64ExtractLowWord32(float_value);
+ gasm_.GotoIfNot(
+ gasm_.Word32Equal(value_lo, gasm_.Int32Constant(kMinusZeroLoBits)),
+ &done, gasm_.Int32Constant(0));
+ Node* value_hi = gasm_.Float64ExtractHighWord32(float_value);
+ gasm_.Goto(&done, gasm_.Word32Equal(value_hi,
+ gasm_.Int32Constant(kMinusZeroHiBits)));
+ gasm_.Bind(&done);
+ is_minus_zero = done.PhiAt(0);
+ }
+ gasm_.GotoIf(is_minus_zero, &end_label, input);
+ // Check if value is integral.
+ Node* int_value = gasm_.ChangeFloat64ToInt32(float_value);
+ gasm_.GotoIf(
+ gasm_.Float64Equal(float_value, gasm_.ChangeInt32ToFloat64(int_value)),
+ &int_to_smi_label);
+ gasm_.Goto(&end_label, input);
+
+ gasm_.Bind(&int_to_smi_label);
+ gasm_.Goto(&end_label, gasm_.BuildChangeInt32ToSmi(int_value));
+
+ gasm_.Bind(&end_label);
+ ReplaceWithValue(node, end_label.PhiAt(0), gasm_.effect(), gasm_.control());
node->Kill();
- return Replace(object);
+ return Replace(end_label.PhiAt(0));
}
-// TODO(7748): WasmExternExternalize is a no-op. Consider removing it.
Reduction WasmGCLowering::ReduceWasmExternExternalize(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kWasmExternExternalize);
+ Node* object = node->InputAt(0);
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+ auto label = gasm_.MakeLabel(MachineRepresentation::kTagged);
+ gasm_.GotoIfNot(IsNull(object, wasm::kWasmAnyRef), &label, object);
+ gasm_.Goto(&label, Null(wasm::kWasmExternRef));
+ gasm_.Bind(&label);
+ ReplaceWithValue(node, label.PhiAt(0), gasm_.effect(), gasm_.control());
+ node->Kill();
+ return Replace(label.PhiAt(0));
+}
+
+Reduction WasmGCLowering::ReduceWasmStructGet(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmStructGet);
+ WasmFieldInfo info = OpParameter<WasmFieldInfo>(node->op());
+
+ Node* object = NodeProperties::GetValueInput(node, 0);
+
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+
+ MachineType type = MachineType::TypeForRepresentation(
+ info.type->field(info.field_index).machine_representation(),
+ info.is_signed);
+
+ Node* offset = gasm_.FieldOffset(info.type, info.field_index);
+
+ if (null_check_strategy_ == kExplicitNullChecks &&
+ info.null_check == kWithNullCheck) {
+ gasm_.TrapIf(IsNull(object, wasm::kWasmAnyRef),
+ TrapId::kTrapNullDereference);
+ UpdateSourcePosition(gasm_.effect(), node);
+ }
+
+ bool use_null_trap =
+ null_check_strategy_ == kTrapHandler && info.null_check == kWithNullCheck;
+ Node* load = use_null_trap ? gasm_.LoadTrapOnNull(type, object, offset)
+ : info.type->mutability(info.field_index)
+ ? gasm_.LoadFromObject(type, object, offset)
+ : gasm_.LoadImmutableFromObject(type, object, offset);
+ if (use_null_trap) {
+ UpdateSourcePosition(load, node);
+ }
+
+ ReplaceWithValue(node, load, gasm_.effect(), gasm_.control());
+ node->Kill();
+ return Replace(load);
+}
+
+Reduction WasmGCLowering::ReduceWasmStructSet(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmStructSet);
+ WasmFieldInfo info = OpParameter<WasmFieldInfo>(node->op());
+
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+
Node* object = NodeProperties::GetValueInput(node, 0);
- ReplaceWithValue(node, object);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+
+ if (null_check_strategy_ == kExplicitNullChecks &&
+ info.null_check == kWithNullCheck) {
+ gasm_.TrapIf(IsNull(object, wasm::kWasmAnyRef),
+ TrapId::kTrapNullDereference);
+ UpdateSourcePosition(gasm_.effect(), node);
+ }
+
+ wasm::ValueType field_type = info.type->field(info.field_index);
+ Node* offset = gasm_.FieldOffset(info.type, info.field_index);
+
+ Node* store =
+ null_check_strategy_ == kTrapHandler && info.null_check == kWithNullCheck
+ ? gasm_.StoreTrapOnNull({field_type.machine_representation(),
+ field_type.is_reference() ? kFullWriteBarrier
+ : kNoWriteBarrier},
+ object, offset, value)
+ : info.type->mutability(info.field_index)
+ ? gasm_.StoreToObject(ObjectAccessForGCStores(field_type), object,
+ offset, value)
+ : gasm_.InitializeImmutableInObject(
+ ObjectAccessForGCStores(field_type), object, offset, value);
+ ReplaceWithValue(node, store, gasm_.effect(), gasm_.control());
node->Kill();
- return Replace(object);
+ return Replace(store);
+}
+
+Reduction WasmGCLowering::ReduceWasmArrayGet(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArrayGet);
+ WasmElementInfo info = OpParameter<WasmElementInfo>(node->op());
+
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* index = NodeProperties::GetValueInput(node, 1);
+
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+
+ Node* offset = gasm_.WasmArrayElementOffset(index, info.type->element_type());
+
+ MachineType type = MachineType::TypeForRepresentation(
+ info.type->element_type().machine_representation(), info.is_signed);
+
+ Node* value = info.type->mutability()
+ ? gasm_.LoadFromObject(type, object, offset)
+ : gasm_.LoadImmutableFromObject(type, object, offset);
+
+ return Replace(value);
+}
+
+Reduction WasmGCLowering::ReduceWasmArraySet(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArraySet);
+ const wasm::ArrayType* type = OpParameter<const wasm::ArrayType*>(node->op());
+
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* index = NodeProperties::GetValueInput(node, 1);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+
+ Node* offset = gasm_.WasmArrayElementOffset(index, type->element_type());
+
+ ObjectAccess access = ObjectAccessForGCStores(type->element_type());
+
+ Node* store =
+ type->mutability()
+ ? gasm_.StoreToObject(access, object, offset, value)
+ : gasm_.InitializeImmutableInObject(access, object, offset, value);
+
+ return Replace(store);
+}
+
+Reduction WasmGCLowering::ReduceWasmArrayLength(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArrayLength);
+ Node* object = NodeProperties::GetValueInput(node, 0);
+
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+
+ bool null_check = OpParameter<bool>(node->op());
+
+ if (null_check_strategy_ == kExplicitNullChecks &&
+ null_check == kWithNullCheck) {
+ gasm_.TrapIf(IsNull(object, wasm::kWasmAnyRef),
+ TrapId::kTrapNullDereference);
+ UpdateSourcePosition(gasm_.effect(), node);
+ }
+
+ bool use_null_trap =
+ null_check_strategy_ == kTrapHandler && null_check == kWithNullCheck;
+ Node* length =
+ use_null_trap
+ ? gasm_.LoadTrapOnNull(
+ MachineType::Uint32(), object,
+ gasm_.IntPtrConstant(
+ wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset)))
+ : gasm_.LoadImmutableFromObject(
+ MachineType::Uint32(), object,
+ wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
+ if (use_null_trap) {
+ UpdateSourcePosition(length, node);
+ }
+
+ ReplaceWithValue(node, length, gasm_.effect(), gasm_.control());
+ node->Kill();
+ return Replace(length);
+}
+
+Reduction WasmGCLowering::ReduceWasmArrayInitializeLength(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArrayInitializeLength);
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* length = NodeProperties::GetValueInput(node, 1);
+
+ gasm_.InitializeEffectControl(NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+
+ Node* set_length = gasm_.InitializeImmutableInObject(
+ ObjectAccess{MachineType::Uint32(), kNoWriteBarrier}, object,
+ wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset), length);
+
+ return Replace(set_length);
+}
+
+Reduction WasmGCLowering::ReduceStringAsWtf16(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStringAsWtf16);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* str = NodeProperties::GetValueInput(node, 0);
+
+ gasm_.InitializeEffectControl(effect, control);
+
+ auto done = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer);
+ Node* instance_type = gasm_.LoadInstanceType(gasm_.LoadMap(str));
+ Node* string_representation = gasm_.Word32And(
+ instance_type, gasm_.Int32Constant(kStringRepresentationMask));
+ gasm_.GotoIf(gasm_.Word32Equal(string_representation,
+ gasm_.Int32Constant(kSeqStringTag)),
+ &done, str);
+ gasm_.Goto(&done, gasm_.CallBuiltin(Builtin::kWasmStringAsWtf16,
+ Operator::kPure, str));
+ gasm_.Bind(&done);
+ ReplaceWithValue(node, done.PhiAt(0), gasm_.effect(), gasm_.control());
+ node->Kill();
+ return Replace(done.PhiAt(0));
+}
+
+Reduction WasmGCLowering::ReduceStringPrepareForGetCodeunit(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStringPrepareForGetCodeunit);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* original_string = NodeProperties::GetValueInput(node, 0);
+
+ gasm_.InitializeEffectControl(effect, control);
+
+ auto dispatch =
+ gasm_.MakeLoopLabel(MachineRepresentation::kTaggedPointer, // String.
+ MachineRepresentation::kWord32, // Instance type.
+ MachineRepresentation::kWord32); // Offset.
+ auto next = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer, // String.
+ MachineRepresentation::kWord32, // Instance type.
+ MachineRepresentation::kWord32); // Offset.
+ auto direct_string =
+ gasm_.MakeLabel(MachineRepresentation::kTaggedPointer, // String.
+ MachineRepresentation::kWord32, // Instance type.
+ MachineRepresentation::kWord32); // Offset.
+
+ // These values will be used to replace the original node's projections.
+ // The first, "string", is either a SeqString or Smi(0) (in case of external
+ // string). Notably this makes it GC-safe: if that string moves, this pointer
+ // will be updated accordingly.
+ // The second, "offset", has full register width so that it can be used to
+ // store external pointers: for external strings, we add up the character
+ // backing store's base address and any slice offset.
+ // The third, "character width", is a shift width, i.e. it is 0 for one-byte
+ // strings, 1 for two-byte strings, kCharWidthBailoutSentinel for uncached
+ // external strings (for which "string"/"offset" are invalid and unusable).
+ auto done =
+ gasm_.MakeLabel(MachineRepresentation::kTagged, // String.
+ MachineType::PointerRepresentation(), // Offset.
+ MachineRepresentation::kWord32); // Character width.
+
+ Node* original_type = gasm_.LoadInstanceType(gasm_.LoadMap(original_string));
+ gasm_.Goto(&dispatch, original_string, original_type, gasm_.Int32Constant(0));
+
+ gasm_.Bind(&dispatch);
+ {
+ auto thin_string = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer);
+ auto cons_string = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer);
+
+ Node* string = dispatch.PhiAt(0);
+ Node* instance_type = dispatch.PhiAt(1);
+ Node* offset = dispatch.PhiAt(2);
+ static_assert(kIsIndirectStringTag == 1);
+ static constexpr int kIsDirectStringTag = 0;
+ gasm_.GotoIf(gasm_.Word32Equal(
+ gasm_.Word32And(instance_type, gasm_.Int32Constant(
+ kIsIndirectStringMask)),
+ gasm_.Int32Constant(kIsDirectStringTag)),
+ &direct_string, string, instance_type, offset);
+
+ // Handle indirect strings.
+ Node* string_representation = gasm_.Word32And(
+ instance_type, gasm_.Int32Constant(kStringRepresentationMask));
+ gasm_.GotoIf(gasm_.Word32Equal(string_representation,
+ gasm_.Int32Constant(kThinStringTag)),
+ &thin_string, string);
+ gasm_.GotoIf(gasm_.Word32Equal(string_representation,
+ gasm_.Int32Constant(kConsStringTag)),
+ &cons_string, string);
+
+ // Sliced string.
+ Node* new_offset = gasm_.Int32Add(
+ offset,
+ gasm_.BuildChangeSmiToInt32(gasm_.LoadImmutableFromObject(
+ MachineType::TaggedSigned(), string,
+ wasm::ObjectAccess::ToTagged(SlicedString::kOffsetOffset))));
+ Node* parent = gasm_.LoadImmutableFromObject(
+ MachineType::TaggedPointer(), string,
+ wasm::ObjectAccess::ToTagged(SlicedString::kParentOffset));
+ Node* parent_type = gasm_.LoadInstanceType(gasm_.LoadMap(parent));
+ gasm_.Goto(&next, parent, parent_type, new_offset);
+
+ // Thin string.
+ gasm_.Bind(&thin_string);
+ Node* actual = gasm_.LoadImmutableFromObject(
+ MachineType::TaggedPointer(), string,
+ wasm::ObjectAccess::ToTagged(ThinString::kActualOffset));
+ Node* actual_type = gasm_.LoadInstanceType(gasm_.LoadMap(actual));
+ // ThinStrings always reference (internalized) direct strings.
+ gasm_.Goto(&direct_string, actual, actual_type, offset);
+
+ // Flat cons string. (Non-flat cons strings are ruled out by
+ // string.as_wtf16.)
+ gasm_.Bind(&cons_string);
+ Node* first = gasm_.LoadImmutableFromObject(
+ MachineType::TaggedPointer(), string,
+ wasm::ObjectAccess::ToTagged(ConsString::kFirstOffset));
+ Node* first_type = gasm_.LoadInstanceType(gasm_.LoadMap(first));
+ gasm_.Goto(&next, first, first_type, offset);
+
+ gasm_.Bind(&next);
+ gasm_.Goto(&dispatch, next.PhiAt(0), next.PhiAt(1), next.PhiAt(2));
+ }
+
+ gasm_.Bind(&direct_string);
+ {
+ Node* string = direct_string.PhiAt(0);
+ Node* instance_type = direct_string.PhiAt(1);
+ Node* offset = direct_string.PhiAt(2);
+
+ Node* is_onebyte = gasm_.Word32And(
+ instance_type, gasm_.Int32Constant(kStringEncodingMask));
+ // Char width shift is 1 - (is_onebyte).
+ static_assert(kStringEncodingMask == 1 << 3);
+ Node* charwidth_shift =
+ gasm_.Int32Sub(gasm_.Int32Constant(1),
+ gasm_.Word32Shr(is_onebyte, gasm_.Int32Constant(3)));
+
+ auto external = gasm_.MakeLabel();
+ Node* string_representation = gasm_.Word32And(
+ instance_type, gasm_.Int32Constant(kStringRepresentationMask));
+ gasm_.GotoIf(gasm_.Word32Equal(string_representation,
+ gasm_.Int32Constant(kExternalStringTag)),
+ &external);
+
+ // Sequential string.
+ static_assert(SeqOneByteString::kCharsOffset ==
+ SeqTwoByteString::kCharsOffset);
+ Node* final_offset = gasm_.Int32Add(
+ gasm_.Int32Constant(
+ wasm::ObjectAccess::ToTagged(SeqOneByteString::kCharsOffset)),
+ gasm_.Word32Shl(offset, charwidth_shift));
+ gasm_.Goto(&done, string, gasm_.BuildChangeInt32ToIntPtr(final_offset),
+ charwidth_shift);
+
+ // External string.
+ gasm_.Bind(&external);
+ gasm_.GotoIf(
+ gasm_.Word32And(instance_type,
+ gasm_.Int32Constant(kUncachedExternalStringMask)),
+ &done, string, gasm_.IntPtrConstant(0),
+ gasm_.Int32Constant(kCharWidthBailoutSentinel));
+ Node* resource = gasm_.BuildLoadExternalPointerFromObject(
+ string, ExternalString::kResourceDataOffset,
+ kExternalStringResourceDataTag, gasm_.LoadRootRegister());
+ Node* shifted_offset = gasm_.Word32Shl(offset, charwidth_shift);
+ final_offset = gasm_.IntPtrAdd(
+ resource, gasm_.BuildChangeInt32ToIntPtr(shifted_offset));
+ gasm_.Goto(&done, gasm_.SmiConstant(0), final_offset, charwidth_shift);
+ }
+
+ gasm_.Bind(&done);
+ Node* base = done.PhiAt(0);
+ Node* final_offset = done.PhiAt(1);
+ Node* charwidth_shift = done.PhiAt(2);
+
+ Node* base_proj = NodeProperties::FindProjection(node, 0);
+ Node* offset_proj = NodeProperties::FindProjection(node, 1);
+ Node* charwidth_proj = NodeProperties::FindProjection(node, 2);
+ if (base_proj) {
+ ReplaceWithValue(base_proj, base, gasm_.effect(), gasm_.control());
+ base_proj->Kill();
+ }
+ if (offset_proj) {
+ ReplaceWithValue(offset_proj, final_offset, gasm_.effect(),
+ gasm_.control());
+ offset_proj->Kill();
+ }
+ if (charwidth_proj) {
+ ReplaceWithValue(charwidth_proj, charwidth_shift, gasm_.effect(),
+ gasm_.control());
+ charwidth_proj->Kill();
+ }
+
+ // Wire up the dangling end of the new effect chain.
+ ReplaceWithValue(node, node, gasm_.effect(), gasm_.control());
+
+ node->Kill();
+ return Replace(base);
+}
+
+void WasmGCLowering::UpdateSourcePosition(Node* new_node, Node* old_node) {
+ if (source_position_table_) {
+ SourcePosition position =
+ source_position_table_->GetSourcePosition(old_node);
+ if (position.ScriptOffset() != kNoSourcePosition) {
+ source_position_table_->SetSourcePosition(new_node, position);
+ } else {
+ // TODO(mliedtke): Source positions are not yet supported for inlining
+ // wasm into JS. Add support for it and replace the if with a DCHECK.
+ DCHECK_EQ(kExplicitNullChecks, null_check_strategy_);
+ }
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/wasm-gc-lowering.h b/deps/v8/src/compiler/wasm-gc-lowering.h
index 9269555102..56d89c927d 100644
--- a/deps/v8/src/compiler/wasm-gc-lowering.h
+++ b/deps/v8/src/compiler/wasm-gc-lowering.h
@@ -10,6 +10,7 @@
#define V8_COMPILER_WASM_GC_LOWERING_H_
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/wasm-compiler-definitions.h"
#include "src/compiler/wasm-graph-assembler.h"
namespace v8 {
@@ -17,12 +18,14 @@ namespace internal {
namespace compiler {
class MachineGraph;
+class SourcePositionTable;
class WasmGraphAssembler;
class WasmGCLowering final : public AdvancedReducer {
public:
WasmGCLowering(Editor* editor, MachineGraph* mcgraph,
- const wasm::WasmModule* module);
+ const wasm::WasmModule* module, bool disable_trap_handler,
+ SourcePositionTable* source_position_table);
const char* reducer_name() const override { return "WasmGCLowering"; }
@@ -39,13 +42,25 @@ class WasmGCLowering final : public AdvancedReducer {
Reduction ReduceTypeGuard(Node* node);
Reduction ReduceWasmExternInternalize(Node* node);
Reduction ReduceWasmExternExternalize(Node* node);
- Node* RootNode(RootIndex index);
- Node* Null();
- Node* IsNull(Node* object);
+ Reduction ReduceWasmStructGet(Node* node);
+ Reduction ReduceWasmStructSet(Node* node);
+ Reduction ReduceWasmArrayGet(Node* node);
+ Reduction ReduceWasmArraySet(Node* node);
+ Reduction ReduceWasmArrayLength(Node* node);
+ Reduction ReduceWasmArrayInitializeLength(Node* node);
+ Reduction ReduceStringAsWtf16(Node* node);
+ Reduction ReduceStringPrepareForGetCodeunit(Node* node);
+ Node* Null(wasm::ValueType type);
+ Node* IsNull(Node* object, wasm::ValueType type);
+ Node* BuildLoadExternalPointerFromObject(Node* object, int offset,
+ ExternalPointerTag tag);
+ void UpdateSourcePosition(Node* new_node, Node* old_node);
+ NullCheckStrategy null_check_strategy_;
WasmGraphAssembler gasm_;
const wasm::WasmModule* module_;
Node* dead_;
- Node* instance_node_;
+ const MachineGraph* mcgraph_;
+ SourcePositionTable* source_position_table_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/wasm-gc-operator-reducer.cc b/deps/v8/src/compiler/wasm-gc-operator-reducer.cc
index cbda5422e8..40aaa43bb3 100644
--- a/deps/v8/src/compiler/wasm-gc-operator-reducer.cc
+++ b/deps/v8/src/compiler/wasm-gc-operator-reducer.cc
@@ -25,6 +25,11 @@ Reduction WasmGCOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kStart:
return ReduceStart(node);
+ case IrOpcode::kWasmStructGet:
+ case IrOpcode::kWasmStructSet:
+ return ReduceWasmStructOperation(node);
+ case IrOpcode::kWasmArrayLength:
+ return ReduceWasmArrayLength(node);
case IrOpcode::kAssertNotNull:
return ReduceAssertNotNull(node);
case IrOpcode::kIsNull:
@@ -34,12 +39,16 @@ Reduction WasmGCOperatorReducer::Reduce(Node* node) {
return ReduceWasmTypeCheck(node);
case IrOpcode::kWasmTypeCast:
return ReduceWasmTypeCast(node);
+ case IrOpcode::kWasmExternInternalize:
+ return ReduceWasmExternInternalize(node);
case IrOpcode::kMerge:
return ReduceMerge(node);
case IrOpcode::kIfTrue:
return ReduceIf(node, true);
case IrOpcode::kIfFalse:
return ReduceIf(node, false);
+ case IrOpcode::kDead:
+ return NoChange();
case IrOpcode::kLoop:
return TakeStatesFromFirstControl(node);
default:
@@ -60,7 +69,7 @@ bool InDeadBranch(Node* node) {
Node* GetAlias(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kWasmTypeCheck:
+ case IrOpcode::kWasmTypeCast:
case IrOpcode::kTypeGuard:
case IrOpcode::kAssertNotNull:
return NodeProperties::GetValueInput(node, 0);
@@ -117,6 +126,58 @@ wasm::TypeInModule WasmGCOperatorReducer::ObjectTypeFromContext(Node* object,
: type_from_node;
}
+Reduction WasmGCOperatorReducer::ReduceWasmStructOperation(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kWasmStructGet ||
+ node->opcode() == IrOpcode::kWasmStructSet);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!IsReduced(control)) return NoChange();
+ Node* object = NodeProperties::GetValueInput(node, 0);
+
+ wasm::TypeInModule object_type = ObjectTypeFromContext(object, control);
+ if (object_type.type.is_bottom()) return NoChange();
+
+ if (object_type.type.is_non_nullable()) {
+ // If the object is known to be non-nullable in the context, remove the null
+ // check.
+ auto op_params = OpParameter<WasmFieldInfo>(node->op());
+ const Operator* new_op =
+ node->opcode() == IrOpcode::kWasmStructGet
+ ? simplified()->WasmStructGet(op_params.type, op_params.field_index,
+ op_params.is_signed,
+ kWithoutNullCheck)
+ : simplified()->WasmStructSet(op_params.type, op_params.field_index,
+ kWithoutNullCheck);
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
+ object_type.type = object_type.type.AsNonNull();
+
+ return UpdateNodeAndAliasesTypes(node, GetState(control), object, object_type,
+ false);
+}
+
+Reduction WasmGCOperatorReducer::ReduceWasmArrayLength(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArrayLength);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!IsReduced(control)) return NoChange();
+ Node* object = NodeProperties::GetValueInput(node, 0);
+
+ wasm::TypeInModule object_type = ObjectTypeFromContext(object, control);
+ if (object_type.type.is_bottom()) return NoChange();
+
+ if (object_type.type.is_non_nullable()) {
+ // If the object is known to be non-nullable in the context, remove the null
+ // check.
+ const Operator* new_op = simplified()->WasmArrayLength(kWithoutNullCheck);
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
+ object_type.type = object_type.type.AsNonNull();
+
+ return UpdateNodeAndAliasesTypes(node, GetState(control), object, object_type,
+ false);
+}
+
// If the condition of this node's branch is a type check or a null check,
// add the additional information about the type-checked node to the path
// state.
@@ -202,13 +263,15 @@ Reduction WasmGCOperatorReducer::ReduceAssertNotNull(Node* node) {
// Optimize the check away if the argument is known to be non-null.
if (object_type.type.is_non_nullable()) {
- ReplaceWithValue(node, object);
- node->Kill();
- return Replace(object);
+ // First, relax control.
+ ReplaceWithValue(node, node, node, control);
+ // Use a TypeGuard node to not lose any type information.
+ NodeProperties::ChangeOp(
+ node, common()->TypeGuard(NodeProperties::GetType(node)));
+ return Changed(node);
}
object_type.type = object_type.type.AsNonNull();
-
return UpdateNodeAndAliasesTypes(node, GetState(control), node, object_type,
false);
}
@@ -224,16 +287,20 @@ Reduction WasmGCOperatorReducer::ReduceCheckNull(Node* node) {
// Optimize the check away if the argument is known to be non-null.
if (object_type.type.is_non_nullable()) {
- ReplaceWithValue(
- node, gasm_.Int32Constant(node->opcode() == IrOpcode::kIsNull ? 0 : 1));
+ ReplaceWithValue(node,
+ SetType(gasm_.Int32Constant(
+ node->opcode() == IrOpcode::kIsNull ? 0 : 1),
+ wasm::kWasmI32));
node->Kill();
return Replace(object); // Irrelevant replacement.
}
// Optimize the check away if the argument is known to be null.
if (object->opcode() == IrOpcode::kNull) {
- ReplaceWithValue(
- node, gasm_.Int32Constant(node->opcode() == IrOpcode::kIsNull ? 1 : 0));
+ ReplaceWithValue(node,
+ SetType(gasm_.Int32Constant(
+ node->opcode() == IrOpcode::kIsNull ? 1 : 0),
+ wasm::kWasmI32));
node->Kill();
return Replace(object); // Irrelevant replacement.
}
@@ -241,6 +308,23 @@ Reduction WasmGCOperatorReducer::ReduceCheckNull(Node* node) {
return NoChange();
}
+Reduction WasmGCOperatorReducer::ReduceWasmExternInternalize(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmExternInternalize);
+ // Remove redundant extern.internalize(extern.externalize(...)) pattern.
+ // TODO(mliedtke): Currently this doesn't get fully removed, probably due to
+ // not running dead code elimination in this pipeline step. What would it cost
+ // us to run it here?
+ if (NodeProperties::GetValueInput(node, 0)->opcode() ==
+ IrOpcode::kWasmExternExternalize) {
+ Node* externalize = node->InputAt(0);
+ Node* input = externalize->InputAt(0);
+ ReplaceWithValue(node, input);
+ node->Kill();
+ return Replace(input);
+ }
+ return TakeStatesFromFirstControl(node);
+}
+
Reduction WasmGCOperatorReducer::ReduceWasmTypeCast(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kWasmTypeCast);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -259,13 +343,20 @@ Reduction WasmGCOperatorReducer::ReduceWasmTypeCast(Node* node) {
wasm::HeapType(rtt_type.type.ref_index()),
object_type.module, rtt_type.module)) {
if (to_nullable) {
- // Type cast will always succeed. Remove it.
- ReplaceWithValue(node, object);
- node->Kill();
- return Replace(object);
+ // Type cast will always succeed. Turn it into a TypeGuard to not lose any
+ // type information.
+ // First, relax control.
+ ReplaceWithValue(node, node, node, control);
+ // Remove rtt input.
+ node->RemoveInput(1);
+ NodeProperties::ChangeOp(
+ node, common()->TypeGuard(NodeProperties::GetType(node)));
+ return Changed(node);
} else {
gasm_.InitializeEffectControl(effect, control);
- return Replace(gasm_.AssertNotNull(object));
+ return Replace(SetType(gasm_.AssertNotNull(object, object_type.type,
+ TrapId::kTrapIllegalCast),
+ object_type.type.AsNonNull()));
}
}
@@ -276,11 +367,12 @@ Reduction WasmGCOperatorReducer::ReduceWasmTypeCast(Node* node) {
// A cast between unrelated types can only succeed if the argument is null.
// Otherwise, it always fails.
Node* non_trapping_condition = object_type.type.is_nullable() && to_nullable
- ? gasm_.IsNull(object)
+ ? gasm_.IsNull(object, object_type.type)
: gasm_.Int32Constant(0);
gasm_.TrapUnless(SetType(non_trapping_condition, wasm::kWasmI32),
TrapId::kTrapIllegalCast);
- Node* null_node = SetType(gasm_.Null(), wasm::ToNullSentinel(object_type));
+ Node* null_node = SetType(gasm_.Null(object_type.type),
+ wasm::ToNullSentinel(object_type));
ReplaceWithValue(node, null_node, gasm_.effect(), gasm_.control());
node->Kill();
return Replace(null_node);
@@ -322,7 +414,7 @@ Reduction WasmGCOperatorReducer::ReduceWasmTypeCheck(Node* node) {
// Type cast will fail only on null.
gasm_.InitializeEffectControl(effect, control);
Node* condition = SetType(object_type.type.is_nullable() && !null_succeeds
- ? gasm_.IsNotNull(object)
+ ? gasm_.IsNotNull(object, object_type.type)
: gasm_.Int32Constant(1),
wasm::kWasmI32);
ReplaceWithValue(node, condition);
@@ -339,7 +431,8 @@ Reduction WasmGCOperatorReducer::ReduceWasmTypeCheck(Node* node) {
if (null_succeeds && object_type.type.is_nullable()) {
// The cast only succeeds in case of null.
gasm_.InitializeEffectControl(effect, control);
- condition = SetType(gasm_.IsNull(object), wasm::kWasmI32);
+ condition =
+ SetType(gasm_.IsNull(object, object_type.type), wasm::kWasmI32);
} else {
// The cast never succeeds.
condition = SetType(gasm_.Int32Constant(0), wasm::kWasmI32);
diff --git a/deps/v8/src/compiler/wasm-gc-operator-reducer.h b/deps/v8/src/compiler/wasm-gc-operator-reducer.h
index d933609448..f2e6706a2a 100644
--- a/deps/v8/src/compiler/wasm-gc-operator-reducer.h
+++ b/deps/v8/src/compiler/wasm-gc-operator-reducer.h
@@ -54,10 +54,13 @@ class WasmGCOperatorReducer final
private:
using ControlPathTypes = ControlPathState<NodeWithType, kMultipleInstances>;
+ Reduction ReduceWasmStructOperation(Node* node);
+ Reduction ReduceWasmArrayLength(Node* node);
Reduction ReduceAssertNotNull(Node* node);
Reduction ReduceCheckNull(Node* node);
Reduction ReduceWasmTypeCheck(Node* node);
Reduction ReduceWasmTypeCast(Node* node);
+ Reduction ReduceWasmExternInternalize(Node* node);
Reduction ReduceMerge(Node* node);
Reduction ReduceIf(Node* node, bool condition);
Reduction ReduceStart(Node* node);
@@ -73,6 +76,7 @@ class WasmGCOperatorReducer final
Graph* graph() { return mcgraph_->graph(); }
CommonOperatorBuilder* common() { return mcgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return gasm_.simplified(); }
MachineGraph* mcgraph_;
WasmGraphAssembler gasm_;
diff --git a/deps/v8/src/compiler/wasm-graph-assembler.cc b/deps/v8/src/compiler/wasm-graph-assembler.cc
index 8e14362d9f..099871a0cd 100644
--- a/deps/v8/src/compiler/wasm-graph-assembler.cc
+++ b/deps/v8/src/compiler/wasm-graph-assembler.cc
@@ -84,10 +84,10 @@ Node* WasmGraphAssembler::BuildSmiShiftBitsConstant32() {
Node* WasmGraphAssembler::BuildChangeInt32ToSmi(Node* value) {
// With pointer compression, only the lower 32 bits are used.
- return COMPRESS_POINTERS_BOOL
- ? Word32Shl(value, BuildSmiShiftBitsConstant32())
- : WordShl(BuildChangeInt32ToIntPtr(value),
- BuildSmiShiftBitsConstant());
+ return COMPRESS_POINTERS_BOOL ? BitcastWord32ToWord64(Word32Shl(
+ value, BuildSmiShiftBitsConstant32()))
+ : WordShl(BuildChangeInt32ToIntPtr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphAssembler::BuildChangeUint31ToSmi(Node* value) {
@@ -173,7 +173,38 @@ Node* WasmGraphAssembler::InitializeImmutableInObject(ObjectAccess access,
offset, value, effect(), control()));
}
-Node* WasmGraphAssembler::IsI31(Node* object) {
+Node* WasmGraphAssembler::BuildLoadExternalPointerFromObject(
+ Node* object, int offset, ExternalPointerTag tag, Node* isolate_root) {
+#ifdef V8_ENABLE_SANDBOX
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ Node* external_pointer = LoadFromObject(MachineType::Uint32(), object,
+ wasm::ObjectAccess::ToTagged(offset));
+ static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2);
+ Node* shift_amount =
+ Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
+ Node* scaled_index =
+ ChangeUint32ToUint64(Word32Shr(external_pointer, shift_amount));
+ Node* table;
+ if (IsSharedExternalPointerType(tag)) {
+ Node* table_address =
+ Load(MachineType::Pointer(), isolate_root,
+ IsolateData::shared_external_pointer_table_offset());
+ table = Load(MachineType::Pointer(), table_address,
+ Internals::kExternalPointerTableBufferOffset);
+ } else {
+ table = Load(MachineType::Pointer(), isolate_root,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset);
+ }
+ Node* decoded_ptr = Load(MachineType::Pointer(), table, scaled_index);
+ return WordAnd(decoded_ptr, IntPtrConstant(~tag));
+#else
+ return LoadFromObject(MachineType::Pointer(), object,
+ wasm::ObjectAccess::ToTagged(offset));
+#endif // V8_ENABLE_SANDBOX
+}
+
+Node* WasmGraphAssembler::IsSmi(Node* object) {
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)),
Int32Constant(kSmiTag));
@@ -232,6 +263,15 @@ Node* WasmGraphAssembler::LoadFixedArrayElement(Node* fixed_array,
return LoadFromObject(type, fixed_array, offset);
}
+Node* WasmGraphAssembler::LoadWeakArrayListElement(Node* fixed_array,
+ Node* index_intptr,
+ MachineType type) {
+ Node* offset = IntAdd(
+ IntMul(index_intptr, IntPtrConstant(kTaggedSize)),
+ IntPtrConstant(wasm::ObjectAccess::ToTagged(WeakArrayList::kHeaderSize)));
+ return LoadFromObject(type, fixed_array, offset);
+}
+
Node* WasmGraphAssembler::LoadImmutableFixedArrayElement(Node* fixed_array,
Node* index_intptr,
MachineType type) {
@@ -304,18 +344,6 @@ Node* WasmGraphAssembler::FieldOffset(const wasm::StructType* type,
WasmStruct::kHeaderSize + type->field_offset(field_index)));
}
-Node* WasmGraphAssembler::StoreStructField(Node* struct_object,
- const wasm::StructType* type,
- uint32_t field_index, Node* value) {
- ObjectAccess access = ObjectAccessForGCStores(type->field(field_index));
- return type->mutability(field_index)
- ? StoreToObject(access, struct_object,
- FieldOffset(type, field_index), value)
- : InitializeImmutableInObject(access, struct_object,
- FieldOffset(type, field_index),
- value);
-}
-
Node* WasmGraphAssembler::WasmArrayElementOffset(Node* index,
wasm::ValueType element_type) {
Node* index_intptr =
@@ -325,12 +353,6 @@ Node* WasmGraphAssembler::WasmArrayElementOffset(Node* index,
IntMul(index_intptr, IntPtrConstant(element_type.value_kind_size())));
}
-Node* WasmGraphAssembler::LoadWasmArrayLength(Node* array) {
- return LoadImmutableFromObject(
- MachineType::Uint32(), array,
- wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
-}
-
Node* WasmGraphAssembler::IsDataRefMap(Node* map) {
Node* instance_type = LoadInstanceType(map);
// We're going to test a range of WasmObject instance types with a single
@@ -354,21 +376,23 @@ Node* WasmGraphAssembler::WasmTypeCast(Node* object, Node* rtt,
effect(), control()));
}
-Node* WasmGraphAssembler::Null() {
- return AddNode(graph()->NewNode(simplified_.Null()));
+Node* WasmGraphAssembler::Null(wasm::ValueType type) {
+ return AddNode(graph()->NewNode(simplified_.Null(type)));
}
-Node* WasmGraphAssembler::IsNull(Node* object) {
- return AddNode(graph()->NewNode(simplified_.IsNull(), object, control()));
+Node* WasmGraphAssembler::IsNull(Node* object, wasm::ValueType type) {
+ return AddNode(graph()->NewNode(simplified_.IsNull(type), object, control()));
}
-Node* WasmGraphAssembler::IsNotNull(Node* object) {
- return AddNode(graph()->NewNode(simplified_.IsNotNull(), object, control()));
+Node* WasmGraphAssembler::IsNotNull(Node* object, wasm::ValueType type) {
+ return AddNode(
+ graph()->NewNode(simplified_.IsNotNull(type), object, control()));
}
-Node* WasmGraphAssembler::AssertNotNull(Node* object) {
- return AddNode(graph()->NewNode(simplified_.AssertNotNull(), object, effect(),
- control()));
+Node* WasmGraphAssembler::AssertNotNull(Node* object, wasm::ValueType type,
+ TrapId trap_id) {
+ return AddNode(graph()->NewNode(simplified_.AssertNotNull(type, trap_id),
+ object, effect(), control()));
}
Node* WasmGraphAssembler::WasmExternInternalize(Node* object) {
@@ -381,6 +405,55 @@ Node* WasmGraphAssembler::WasmExternExternalize(Node* object) {
effect(), control()));
}
+Node* WasmGraphAssembler::StructGet(Node* object, const wasm::StructType* type,
+ int field_index, bool is_signed,
+ CheckForNull null_check) {
+ return AddNode(graph()->NewNode(
+ simplified_.WasmStructGet(type, field_index, is_signed, null_check),
+ object, effect(), control()));
+}
+
+void WasmGraphAssembler::StructSet(Node* object, Node* value,
+ const wasm::StructType* type,
+ int field_index, CheckForNull null_check) {
+ AddNode(
+ graph()->NewNode(simplified_.WasmStructSet(type, field_index, null_check),
+ object, value, effect(), control()));
+}
+
+Node* WasmGraphAssembler::ArrayGet(Node* array, Node* index,
+ const wasm::ArrayType* type,
+ bool is_signed) {
+ return AddNode(graph()->NewNode(simplified_.WasmArrayGet(type, is_signed),
+ array, index, effect(), control()));
+}
+
+void WasmGraphAssembler::ArraySet(Node* array, Node* index, Node* value,
+ const wasm::ArrayType* type) {
+ AddNode(graph()->NewNode(simplified_.WasmArraySet(type), array, index, value,
+ effect(), control()));
+}
+
+Node* WasmGraphAssembler::ArrayLength(Node* array, CheckForNull null_check) {
+ return AddNode(graph()->NewNode(simplified_.WasmArrayLength(null_check),
+ array, effect(), control()));
+}
+
+void WasmGraphAssembler::ArrayInitializeLength(Node* array, Node* length) {
+ AddNode(graph()->NewNode(simplified_.WasmArrayInitializeLength(), array,
+ length, effect(), control()));
+}
+
+Node* WasmGraphAssembler::StringAsWtf16(Node* string) {
+ return AddNode(graph()->NewNode(simplified_.StringAsWtf16(), string, effect(),
+ control()));
+}
+
+Node* WasmGraphAssembler::StringPrepareForGetCodeunit(Node* string) {
+ return AddNode(graph()->NewNode(simplified_.StringPrepareForGetCodeunit(),
+ string, effect(), control()));
+}
+
// Generic HeapObject helpers.
Node* WasmGraphAssembler::HasInstanceType(Node* heap_object,
diff --git a/deps/v8/src/compiler/wasm-graph-assembler.h b/deps/v8/src/compiler/wasm-graph-assembler.h
index 948a775fda..7a0a8f9622 100644
--- a/deps/v8/src/compiler/wasm-graph-assembler.h
+++ b/deps/v8/src/compiler/wasm-graph-assembler.h
@@ -159,7 +159,11 @@ class WasmGraphAssembler : public GraphAssembler {
value);
}
- Node* IsI31(Node* object);
+ Node* BuildLoadExternalPointerFromObject(Node* object, int offset,
+ ExternalPointerTag tag,
+ Node* isolate_root);
+
+ Node* IsSmi(Node* object);
// Maps and their contents.
@@ -211,6 +215,9 @@ class WasmGraphAssembler : public GraphAssembler {
ObjectAccess(MachineType::AnyTagged(), kFullWriteBarrier));
}
+ Node* LoadWeakArrayListElement(Node* fixed_array, Node* index_intptr,
+ MachineType type = MachineType::AnyTagged());
+
// Functions, SharedFunctionInfos, FunctionData.
Node* LoadSharedFunctionInfo(Node* js_function);
@@ -231,30 +238,46 @@ class WasmGraphAssembler : public GraphAssembler {
Node* FieldOffset(const wasm::StructType* type, uint32_t field_index);
- Node* StoreStructField(Node* struct_object, const wasm::StructType* type,
- uint32_t field_index, Node* value);
Node* WasmArrayElementOffset(Node* index, wasm::ValueType element_type);
- Node* LoadWasmArrayLength(Node* array);
-
Node* IsDataRefMap(Node* map);
Node* WasmTypeCheck(Node* object, Node* rtt, WasmTypeCheckConfig config);
Node* WasmTypeCast(Node* object, Node* rtt, WasmTypeCheckConfig config);
- Node* Null();
+ Node* Null(wasm::ValueType type);
- Node* IsNull(Node* object);
+ Node* IsNull(Node* object, wasm::ValueType type);
- Node* IsNotNull(Node* object);
+ Node* IsNotNull(Node* object, wasm::ValueType type);
- Node* AssertNotNull(Node* object);
+ Node* AssertNotNull(Node* object, wasm::ValueType type, TrapId trap_id);
Node* WasmExternInternalize(Node* object);
Node* WasmExternExternalize(Node* object);
+ Node* StructGet(Node* object, const wasm::StructType* type, int field_index,
+ bool is_signed, CheckForNull null_check);
+
+ void StructSet(Node* object, Node* value, const wasm::StructType* type,
+ int field_index, CheckForNull null_check);
+
+ Node* ArrayGet(Node* array, Node* index, const wasm::ArrayType* type,
+ bool is_signed);
+
+ void ArraySet(Node* array, Node* index, Node* value,
+ const wasm::ArrayType* type);
+
+ Node* ArrayLength(Node* array, CheckForNull null_check);
+
+ void ArrayInitializeLength(Node* array, Node* length);
+
+ Node* StringAsWtf16(Node* string);
+
+ Node* StringPrepareForGetCodeunit(Node* string);
+
// Generic helpers.
Node* HasInstanceType(Node* heap_object, InstanceType type);
@@ -269,6 +292,10 @@ class WasmGraphAssembler : public GraphAssembler {
effect(), control()));
}
+ Node* LoadRootRegister() {
+ return AddNode(graph()->NewNode(mcgraph()->machine()->LoadRootRegister()));
+ }
+
SimplifiedOperatorBuilder* simplified() override { return &simplified_; }
private:
diff --git a/deps/v8/src/compiler/wasm-inlining-into-js.cc b/deps/v8/src/compiler/wasm-inlining-into-js.cc
new file mode 100644
index 0000000000..ac52e9ee52
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining-into-js.cc
@@ -0,0 +1,346 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-inlining-into-js.h"
+
+#include "src/compiler/wasm-compiler-definitions.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/compiler/wasm-graph-assembler.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-opcodes-inl.h"
+#include "src/wasm/wasm-subtyping.h"
+
+namespace v8::internal::compiler {
+
+namespace {
+
+using wasm::WasmOpcode;
+using wasm::WasmOpcodes;
+
+class WasmIntoJSInlinerImpl : private wasm::Decoder {
+ using ValidationTag = NoValidationTag;
+
+ struct Value {
+ Node* node = nullptr;
+ wasm::ValueType type = wasm::kWasmBottom;
+ };
+
+ public:
+ WasmIntoJSInlinerImpl(Zone* zone, const wasm::WasmModule* module,
+ MachineGraph* mcgraph, const wasm::FunctionBody& body,
+ const base::Vector<const byte>& bytes)
+ : wasm::Decoder(bytes.begin(), bytes.end()),
+ module_(module),
+ mcgraph_(mcgraph),
+ body_(body),
+ graph_(mcgraph->graph()),
+ gasm_(mcgraph, zone) {
+ // +1 for instance node.
+ size_t params = body.sig->parameter_count() + 1;
+ Node* start =
+ graph_->NewNode(mcgraph->common()->Start(static_cast<int>(params)));
+ graph_->SetStart(start);
+ graph_->SetEnd(graph_->NewNode(mcgraph->common()->End(0)));
+ gasm_.InitializeEffectControl(start, start);
+
+ // Initialize parameter nodes.
+ // We have to add another +1 as the minimum parameter index is actually
+ // -1, not 0...
+ size_t params_extended = params + 1;
+ parameters_ = zone->NewArray<Node*>(params_extended);
+ for (unsigned i = 0; i < params_extended; i++) {
+ parameters_[i] = nullptr;
+ }
+ // Instance node at parameter 0.
+ instance_node_ = Param(wasm::kWasmInstanceParameterIndex);
+ }
+
+ Node* Param(int index, const char* debug_name = nullptr) {
+ DCHECK_NOT_NULL(graph_->start());
+ // Turbofan allows negative parameter indices.
+ static constexpr int kMinParameterIndex = -1;
+ DCHECK_GE(index, kMinParameterIndex);
+ int array_index = index - kMinParameterIndex;
+ if (parameters_[array_index] == nullptr) {
+ parameters_[array_index] = graph_->NewNode(
+ mcgraph_->common()->Parameter(index, debug_name), graph_->start());
+ }
+ return parameters_[array_index];
+ }
+
+ bool TryInlining() {
+ if (body_.sig->return_count() > 1) {
+ return false; // Multi-return is not supported.
+ }
+ // Parse locals.
+ if (consume_u32v() != 0) {
+ // Functions with locals are not supported.
+ return false;
+ }
+ // Parse body.
+ // TODO(mliedtke): Use zone vector?
+ base::SmallVector<Value, 4> stack;
+ while (is_inlineable_) {
+ WasmOpcode opcode = ReadOpcode();
+ switch (opcode) {
+ case wasm::kExprExternInternalize:
+ DCHECK(!stack.empty());
+ stack.back() = ParseExternInternalize(stack.back());
+ continue;
+ case wasm::kExprExternExternalize:
+ DCHECK(!stack.empty());
+ stack.back() = ParseExternExternalize(stack.back());
+ continue;
+ case wasm::kExprRefCast:
+ case wasm::kExprRefCastNull:
+ DCHECK(!stack.empty());
+ stack.back() =
+ ParseRefCast(stack.back(), opcode == wasm::kExprRefCastNull);
+ continue;
+ case wasm::kExprArrayLen:
+ DCHECK(!stack.empty());
+ stack.back() = ParseArrayLen(stack.back());
+ continue;
+ case wasm::kExprArrayGet:
+ case wasm::kExprArrayGetS:
+ case wasm::kExprArrayGetU: {
+ DCHECK_GE(stack.size(), 2);
+ Value index = stack.back();
+ stack.pop_back();
+ Value array = stack.back();
+ stack.back() = ParseArrayGet(array, index, opcode);
+ continue;
+ }
+ case wasm::kExprArraySet: {
+ DCHECK_GE(stack.size(), 3);
+ Value value = stack.back();
+ stack.pop_back();
+ Value index = stack.back();
+ stack.pop_back();
+ Value array = stack.back();
+ stack.pop_back();
+ ParseArraySet(array, index, value);
+ continue;
+ }
+ case wasm::kExprStructGet:
+ case wasm::kExprStructGetS:
+ case wasm::kExprStructGetU:
+ DCHECK(!stack.empty());
+ stack.back() = ParseStructGet(stack.back(), opcode);
+ continue;
+ case wasm::kExprStructSet: {
+ DCHECK_GE(stack.size(), 2);
+ Value value = stack.back();
+ stack.pop_back();
+ Value wasm_struct = stack.back();
+ stack.pop_back();
+ ParseStructSet(wasm_struct, value);
+ continue;
+ }
+ case wasm::kExprLocalGet:
+ stack.push_back(ParseLocalGet());
+ continue;
+ case wasm::kExprDrop:
+ DCHECK(!stack.empty());
+ stack.pop_back();
+ continue;
+ case wasm::kExprEnd: {
+ DCHECK_LT(stack.size(), 2);
+ int return_count = static_cast<int>(stack.size());
+ base::SmallVector<Node*, 8> buf(return_count + 3);
+ buf[0] = mcgraph_->Int32Constant(0);
+ if (return_count) {
+ buf[1] = stack.back().node;
+ }
+ buf[return_count + 1] = gasm_.effect();
+ buf[return_count + 2] = gasm_.control();
+ Node* ret = graph_->NewNode(mcgraph_->common()->Return(return_count),
+ return_count + 3, buf.data());
+
+ gasm_.MergeControlToEnd(ret);
+ return true;
+ }
+ default:
+ // Instruction not supported for inlining.
+ return false;
+ }
+ }
+ // The decoder found an instruction it couldn't inline successfully.
+ return false;
+ }
+
+ private:
+ Value ParseExternInternalize(Value input) {
+ DCHECK(input.type.is_reference_to(wasm::HeapType::kExtern) ||
+ input.type.is_reference_to(wasm::HeapType::kNoExtern));
+ wasm::ValueType result_type = wasm::ValueType::RefMaybeNull(
+ wasm::HeapType::kAny, input.type.is_nullable()
+ ? wasm::Nullability::kNullable
+ : wasm::Nullability::kNonNullable);
+ Node* internalized = gasm_.WasmExternInternalize(input.node);
+ return {internalized, result_type};
+ }
+
+ Value ParseExternExternalize(Value input) {
+ DCHECK(input.type.is_reference());
+ wasm::ValueType result_type = wasm::ValueType::RefMaybeNull(
+ wasm::HeapType::kExtern, input.type.is_nullable()
+ ? wasm::Nullability::kNullable
+ : wasm::Nullability::kNonNullable);
+ Node* internalized = gasm_.WasmExternExternalize(input.node);
+ return {internalized, result_type};
+ }
+
+ Value ParseLocalGet() {
+ uint32_t index = consume_u32v();
+ DCHECK_LT(index, body_.sig->parameter_count());
+ return {Param(index + 1), body_.sig->GetParam(index)};
+ }
+
+ Value ParseStructGet(Value struct_val, WasmOpcode opcode) {
+ uint32_t struct_index = consume_u32v();
+ DCHECK(module_->has_struct(struct_index));
+ const wasm::StructType* struct_type = module_->struct_type(struct_index);
+ uint32_t field_index = consume_u32v();
+ DCHECK_GT(struct_type->field_count(), field_index);
+ const bool is_signed = opcode == wasm::kExprStructGetS;
+ const CheckForNull null_check =
+ struct_val.type.is_nullable() ? kWithNullCheck : kWithoutNullCheck;
+ Node* member = gasm_.StructGet(struct_val.node, struct_type, field_index,
+ is_signed, null_check);
+ return {member, struct_type->field(field_index).Unpacked()};
+ }
+
+ void ParseStructSet(Value wasm_struct, Value value) {
+ uint32_t struct_index = consume_u32v();
+ DCHECK(module_->has_struct(struct_index));
+ const wasm::StructType* struct_type = module_->struct_type(struct_index);
+ uint32_t field_index = consume_u32v();
+ DCHECK_GT(struct_type->field_count(), field_index);
+ const CheckForNull null_check =
+ wasm_struct.type.is_nullable() ? kWithNullCheck : kWithoutNullCheck;
+ gasm_.StructSet(wasm_struct.node, value.node, struct_type, field_index,
+ null_check);
+ }
+
+ Value ParseRefCast(Value input, bool null_succeeds) {
+ auto [heap_index, length] = read_i33v<ValidationTag>(pc_);
+ pc_ += length;
+ if (heap_index < 0) {
+ if ((heap_index & 0x7f) != wasm::kArrayRefCode) {
+ // Abstract casts for non array type are not supported.
+ is_inlineable_ = false;
+ return {};
+ }
+ auto done = gasm_.MakeLabel();
+ // Abstract cast to array.
+ if (input.type.is_nullable() && null_succeeds) {
+ gasm_.GotoIf(gasm_.IsNull(input.node, input.type), &done);
+ }
+ gasm_.TrapIf(gasm_.IsSmi(input.node), TrapId::kTrapIllegalCast);
+ gasm_.TrapUnless(gasm_.HasInstanceType(input.node, WASM_ARRAY_TYPE),
+ TrapId::kTrapIllegalCast);
+ gasm_.Goto(&done);
+ gasm_.Bind(&done);
+ // Add TypeGuard for graph typing.
+ Graph* graph = mcgraph_->graph();
+ wasm::ValueType result_type = wasm::ValueType::RefMaybeNull(
+ wasm::HeapType::kArray,
+ null_succeeds ? wasm::kNullable : wasm::kNonNullable);
+ Node* type_guard =
+ graph->NewNode(mcgraph_->common()->TypeGuard(
+ Type::Wasm(result_type, module_, graph->zone())),
+ input.node, gasm_.effect(), gasm_.control());
+ gasm_.InitializeEffectControl(type_guard, gasm_.control());
+ return {type_guard, result_type};
+ }
+ if (module_->has_signature(static_cast<uint32_t>(heap_index))) {
+ is_inlineable_ = false;
+ return {};
+ }
+ wasm::ValueType target_type = wasm::ValueType::RefMaybeNull(
+ static_cast<uint32_t>(heap_index),
+ null_succeeds ? wasm::kNullable : wasm::kNonNullable);
+ Node* rtt = mcgraph_->graph()->NewNode(
+ gasm_.simplified()->RttCanon(target_type.ref_index()), instance_node_);
+ Node* cast = gasm_.WasmTypeCast(input.node, rtt, {input.type, target_type});
+ return {cast, target_type};
+ }
+
+ Value ParseArrayLen(Value input) {
+ DCHECK(wasm::IsHeapSubtypeOf(input.type.heap_type(),
+ wasm::HeapType(wasm::HeapType::kArray),
+ module_));
+ const CheckForNull null_check =
+ input.type.is_nullable() ? kWithNullCheck : kWithoutNullCheck;
+ Node* len = gasm_.ArrayLength(input.node, null_check);
+ return {len, wasm::kWasmI32};
+ }
+
+ Value ParseArrayGet(Value array, Value index, WasmOpcode opcode) {
+ uint32_t array_index = consume_u32v();
+ DCHECK(module_->has_array(array_index));
+ const wasm::ArrayType* array_type = module_->array_type(array_index);
+ const bool is_signed = opcode == WasmOpcode::kExprArrayGetS;
+ const CheckForNull null_check =
+ array.type.is_nullable() ? kWithNullCheck : kWithoutNullCheck;
+ // Perform bounds check.
+ Node* length = gasm_.ArrayLength(array.node, null_check);
+ gasm_.TrapUnless(gasm_.Uint32LessThan(index.node, length),
+ TrapId::kTrapArrayOutOfBounds);
+ // Perform array.get.
+ Node* element =
+ gasm_.ArrayGet(array.node, index.node, array_type, is_signed);
+ return {element, array_type->element_type().Unpacked()};
+ }
+
+ void ParseArraySet(Value array, Value index, Value value) {
+ uint32_t array_index = consume_u32v();
+ DCHECK(module_->has_array(array_index));
+ const wasm::ArrayType* array_type = module_->array_type(array_index);
+ const CheckForNull null_check =
+ array.type.is_nullable() ? kWithNullCheck : kWithoutNullCheck;
+ // Perform bounds check.
+ Node* length = gasm_.ArrayLength(array.node, null_check);
+ gasm_.TrapUnless(gasm_.Uint32LessThan(index.node, length),
+ TrapId::kTrapArrayOutOfBounds);
+ // Perform array.set.
+ gasm_.ArraySet(array.node, index.node, value.node, array_type);
+ }
+
+ WasmOpcode ReadOpcode() {
+ DCHECK_LT(pc_, end_);
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ ++pc_;
+ return opcode;
+ }
+ auto [opcode_with_prefix, length] =
+ read_prefixed_opcode<ValidationTag>(pc_);
+ pc_ += length;
+ return opcode_with_prefix;
+ }
+
+ const wasm::WasmModule* module_;
+ MachineGraph* mcgraph_;
+ const wasm::FunctionBody& body_;
+ Node** parameters_;
+ Graph* graph_;
+ Node* instance_node_;
+ WasmGraphAssembler gasm_;
+ bool is_inlineable_ = true;
+};
+
+} // anonymous namespace
+
+bool WasmIntoJSInliner::TryInlining(Zone* zone, const wasm::WasmModule* module,
+ MachineGraph* mcgraph,
+ const wasm::FunctionBody& body,
+ const base::Vector<const byte>& bytes) {
+ WasmIntoJSInlinerImpl inliner(zone, module, mcgraph, body, bytes);
+ return inliner.TryInlining();
+}
+
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/wasm-inlining-into-js.h b/deps/v8/src/compiler/wasm-inlining-into-js.h
new file mode 100644
index 0000000000..2f66e02b78
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining-into-js.h
@@ -0,0 +1,39 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_INLINING_INTO_JS_H_
+#define V8_COMPILER_WASM_INLINING_INTO_JS_H_
+
+#include "src/base/vector.h"
+#include "src/common/globals.h"
+
+namespace v8::internal {
+class Zone;
+
+namespace wasm {
+struct FunctionBody;
+struct WasmModule;
+} // namespace wasm
+
+namespace compiler {
+class MachineGraph;
+class Node;
+
+// The WasmIntoJsInliner provides support for inlining very small wasm functions
+// which only contain very specific supported instructions into JS.
+class WasmIntoJSInliner {
+ public:
+ static bool TryInlining(Zone* zone, const wasm::WasmModule* module,
+ MachineGraph* mcgraph, const wasm::FunctionBody& body,
+ const base::Vector<const byte>& bytes);
+};
+
+} // namespace compiler
+} // namespace v8::internal
+
+#endif // V8_COMPILER_WASM_INLINING_INTO_JS_H_
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
index 3d7b4b3f68..9c04545da9 100644
--- a/deps/v8/src/compiler/wasm-inlining.cc
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -32,24 +32,23 @@ Reduction WasmInliner::Reduce(Node* node) {
if (v8_flags.trace_wasm_inlining) PrintF(__VA_ARGS__)
void WasmInliner::Trace(Node* call, int inlinee, const char* decision) {
- TRACE("[function %d: considering node %d, call to %d: %s]\n", function_index_,
- call->id(), inlinee, decision);
+ TRACE("[function %d: considering node %d, call to %d: %s]\n",
+ data_.func_index, call->id(), inlinee, decision);
}
int WasmInliner::GetCallCount(Node* call) {
- if (!v8_flags.wasm_speculative_inlining) return 0;
+ if (!env_->enabled_features.has_inlining()) return 0;
return mcgraph()->GetCallCount(call->id());
}
-// TODO(12166): Save inlined frames for trap/--trace-wasm purposes. Consider
-// tail calls.
+// TODO(12166): Save inlined frames for trap/--trace-wasm purposes.
Reduction WasmInliner::ReduceCall(Node* call) {
DCHECK(call->opcode() == IrOpcode::kCall ||
call->opcode() == IrOpcode::kTailCall);
if (seen_.find(call) != seen_.end()) {
- TRACE("function %d: have already seen node %d, skipping\n", function_index_,
- call->id());
+ TRACE("function %d: have already seen node %d, skipping\n",
+ data_.func_index, call->id());
return NoChange();
}
seen_.insert(call);
@@ -60,7 +59,7 @@ Reduction WasmInliner::ReduceCall(Node* call) {
: IrOpcode::kRelocatableInt64Constant;
if (callee->opcode() != reloc_opcode) {
TRACE("[function %d: considering node %d... not a relocatable constant]\n",
- function_index_, call->id());
+ data_.func_index, call->id());
return NoChange();
}
auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
@@ -88,7 +87,8 @@ Reduction WasmInliner::ReduceCall(Node* call) {
CHECK_LT(inlinee_index, module()->functions.size());
const wasm::WasmFunction* inlinee = &module()->functions[inlinee_index];
- base::Vector<const byte> function_bytes = wire_bytes_->GetCode(inlinee->code);
+ base::Vector<const byte> function_bytes =
+ data_.wire_bytes_storage->GetCode(inlinee->code);
int call_count = GetCallCount(call);
@@ -97,7 +97,7 @@ Reduction WasmInliner::ReduceCall(Node* call) {
// If liftoff ran and collected call counts, only inline calls that have been
// invoked often, except for truly tiny functions.
- if (v8_flags.liftoff && v8_flags.wasm_speculative_inlining &&
+ if (v8_flags.liftoff && env_->enabled_features.has_inlining() &&
wire_byte_size >= 12 && call_count < min_count_for_inlining) {
Trace(call, inlinee_index, "not called often enough");
return NoChange();
@@ -113,6 +113,9 @@ Reduction WasmInliner::ReduceCall(Node* call) {
}
bool SmallEnoughToInline(size_t current_graph_size, uint32_t candidate_size) {
+ if (candidate_size > v8_flags.wasm_inlining_max_size) {
+ return false;
+ }
if (WasmInliner::graph_size_allows_inlining(current_graph_size +
candidate_size)) {
return true;
@@ -126,13 +129,13 @@ void WasmInliner::Trace(const CandidateInfo& candidate, const char* decision) {
TRACE(
" [function %d: considering candidate {@%d, index=%d, count=%d, "
"size=%d}: %s]\n",
- function_index_, candidate.node->id(), candidate.inlinee_index,
+ data_.func_index, candidate.node->id(), candidate.inlinee_index,
candidate.call_count, candidate.wire_byte_size, decision);
}
void WasmInliner::Finalize() {
TRACE("function %d %s: going though inlining candidates...\n",
- function_index_, debug_name_);
+ data_.func_index, debug_name_);
if (inlining_candidates_.empty()) return;
while (!inlining_candidates_.empty()) {
CandidateInfo candidate = inlining_candidates_.top();
@@ -152,43 +155,33 @@ void WasmInliner::Finalize() {
const wasm::WasmFunction* inlinee =
&module()->functions[candidate.inlinee_index];
- const wasm::FunctionSig* lowered_sig =
- mcgraph_->machine()->Is64() ? inlinee->sig
- : GetI32Sig(zone(), inlinee->sig);
-
- DCHECK_EQ(lowered_sig->parameter_count(),
+ DCHECK_EQ(inlinee->sig->parameter_count(),
call->op()->ValueInputCount() - 2);
#if DEBUG
// The two first parameters in the call are the function and instance, and
// then come the wasm function parameters.
- for (uint32_t i = 0; i < lowered_sig->parameter_count(); i++) {
+ for (uint32_t i = 0; i < inlinee->sig->parameter_count(); i++) {
if (!NodeProperties::IsTyped(call->InputAt(i + 2))) continue;
wasm::TypeInModule param_type =
NodeProperties::GetType(call->InputAt(i + 2)).AsWasm();
- CHECK(IsSubtypeOf(param_type.type, lowered_sig->GetParam(i),
+ CHECK(IsSubtypeOf(param_type.type, inlinee->sig->GetParam(i),
param_type.module, module()));
}
#endif
base::Vector<const byte> function_bytes =
- wire_bytes_->GetCode(inlinee->code);
-
- wasm::WasmFeatures detected;
- std::vector<WasmLoopInfo> inlinee_loop_infos;
+ data_.wire_bytes_storage->GetCode(inlinee->code);
- size_t subgraph_min_node_id = graph()->NodeCount();
- Node* inlinee_start;
- Node* inlinee_end;
const wasm::FunctionBody inlinee_body{inlinee->sig, inlinee->code.offset(),
function_bytes.begin(),
function_bytes.end()};
// If the inlinee was not validated before, do that now.
- if (!module()->function_was_validated(candidate.inlinee_index)) {
+ if (V8_UNLIKELY(
+ !module()->function_was_validated(candidate.inlinee_index))) {
wasm::WasmFeatures unused_detected_features;
- if (ValidateFunctionBody(zone()->allocator(), env_->enabled_features,
- module(), &unused_detected_features,
- inlinee_body)
+ if (ValidateFunctionBody(env_->enabled_features, module(),
+ &unused_detected_features, inlinee_body)
.failed()) {
Trace(candidate, "function is invalid");
// At this point we cannot easily raise a compilation error any more.
@@ -200,19 +193,31 @@ void WasmInliner::Finalize() {
module()->set_function_validated(candidate.inlinee_index);
}
+ wasm::WasmFeatures detected;
+ std::vector<WasmLoopInfo> inlinee_loop_infos;
+ wasm::DanglingExceptions dangling_exceptions;
+
+ size_t subgraph_min_node_id = graph()->NodeCount();
+ Node* inlinee_start;
+ Node* inlinee_end;
+ SourcePosition caller_pos =
+ data_.source_positions->GetSourcePosition(candidate.node);
+ inlining_positions_->push_back(
+ {static_cast<int>(candidate.inlinee_index), caller_pos});
+ int inlining_position_id =
+ static_cast<int>(inlining_positions_->size()) - 1;
WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
- source_positions_);
+ data_.source_positions);
+ builder.set_inlining_id(inlining_position_id);
{
Graph::SubgraphScope scope(graph());
- wasm::DecodeResult result = wasm::BuildTFGraph(
- zone()->allocator(), env_->enabled_features, module(), &builder,
- &detected, inlinee_body, &inlinee_loop_infos, node_origins_,
- candidate.inlinee_index,
- NodeProperties::IsExceptionalCall(call)
- ? wasm::kInlinedHandledCall
- : wasm::kInlinedNonHandledCall);
- CHECK(result.ok());
- builder.LowerInt64(WasmGraphBuilder::kCalledFromWasm);
+ wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features, module(),
+ &builder, &detected, inlinee_body, &inlinee_loop_infos,
+ &dangling_exceptions, data_.node_origins,
+ candidate.inlinee_index, data_.assumptions,
+ NodeProperties::IsExceptionalCall(call)
+ ? wasm::kInlinedHandledCall
+ : wasm::kInlinedNonHandledCall);
inlinee_start = graph()->start();
inlinee_end = graph()->end();
}
@@ -224,14 +229,15 @@ void WasmInliner::Finalize() {
function_inlining_count_[candidate.inlinee_index]++;
if (call->opcode() == IrOpcode::kCall) {
- InlineCall(call, inlinee_start, inlinee_end, lowered_sig,
- subgraph_min_node_id);
+ InlineCall(call, inlinee_start, inlinee_end, inlinee->sig,
+ &dangling_exceptions);
} else {
InlineTailCall(call, inlinee_start, inlinee_end);
}
call->Kill();
- loop_infos_->insert(loop_infos_->end(), inlinee_loop_infos.begin(),
- inlinee_loop_infos.end());
+ data_.loop_infos->insert(data_.loop_infos->end(),
+ inlinee_loop_infos.begin(),
+ inlinee_loop_infos.end());
// Returning after only one inlining has been tried and found worse.
}
}
@@ -291,62 +297,13 @@ void WasmInliner::InlineTailCall(Node* call, Node* callee_start,
Revisit(graph()->end());
}
-namespace {
-// graph-builder-interface generates a dangling exception handler for each
-// throwing call in the inlinee. This might be followed by a LoopExit node.
-Node* DanglingHandler(Node* call) {
- Node* if_exception = nullptr;
- for (Node* use : call->uses()) {
- if (use->opcode() == IrOpcode::kIfException) {
- if_exception = use;
- break;
- }
- }
- DCHECK_NOT_NULL(if_exception);
-
- // If this handler is dangling, return it.
- if (if_exception->UseCount() == 0) return if_exception;
-
- for (Node* use : if_exception->uses()) {
- // Otherwise, look for a LoopExit use of this handler.
- if (use->opcode() == IrOpcode::kLoopExit) {
- for (Node* loop_exit_use : use->uses()) {
- if (loop_exit_use->opcode() != IrOpcode::kLoopExitEffect &&
- loop_exit_use->opcode() != IrOpcode::kLoopExitValue) {
- // This LoopExit has a use other than LoopExitEffect/Value, so it is
- // not dangling.
- return nullptr;
- }
- }
- return use;
- }
- }
-
- return nullptr;
-}
-} // namespace
-
void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
const wasm::FunctionSig* inlinee_sig,
- size_t subgraph_min_node_id) {
+ wasm::DanglingExceptions* dangling_exceptions) {
DCHECK_EQ(call->opcode(), IrOpcode::kCall);
- // 0) Before doing anything, if {call} has an exception handler, collect all
- // unhandled calls in the subgraph.
Node* handler = nullptr;
- std::vector<Node*> dangling_handlers;
- if (NodeProperties::IsExceptionalCall(call, &handler)) {
- AllNodes subgraph_nodes(zone(), callee_end, graph());
- for (Node* node : subgraph_nodes.reachable) {
- if (node->id() >= subgraph_min_node_id &&
- !node->op()->HasProperty(Operator::kNoThrow)) {
- Node* dangling_handler = DanglingHandler(node);
- if (dangling_handler != nullptr) {
- dangling_handlers.push_back(dangling_handler);
- }
- }
- }
- }
+ bool is_exceptional_call = NodeProperties::IsExceptionalCall(call, &handler);
// 1) Rewire function entry.
RewireFunctionEntry(call, callee_start);
@@ -378,6 +335,8 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
return_inputs.push_back(graph()->NewNode(common()->Int32Constant(0)));
if (return_arity == 1) {
// Tail calls are untyped; we have to type the node here.
+ // TODO(manoskouk): Try to compute a more precise type from the callee
+ // node.
NodeProperties::SetType(
input, Type::Wasm({inlinee_sig->GetReturn(0), module()},
graph()->zone()));
@@ -395,13 +354,19 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
}
}
+ Node* effect = input;
+ Node* control = input;
+ if (is_exceptional_call) {
+ // Remember dangling exception (will be connected later).
+ Node* if_exception = graph()->NewNode(
+ mcgraph()->common()->IfException(), input, input);
+ dangling_exceptions->Add(if_exception, if_exception, if_exception);
+ control = graph()->NewNode(mcgraph()->common()->IfSuccess(), input);
+ }
+
// Add effect and control inputs.
- return_inputs.push_back(input->op()->EffectOutputCount() > 0
- ? input
- : NodeProperties::GetEffectInput(input));
- return_inputs.push_back(input->op()->ControlOutputCount() > 0
- ? input
- : NodeProperties::GetControlInput(input));
+ return_inputs.push_back(effect);
+ return_inputs.push_back(control);
Node* ret = graph()->NewNode(common()->Return(return_arity),
static_cast<int>(return_inputs.size()),
@@ -416,42 +381,28 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
callee_end->Kill();
// 3) Rewire unhandled calls to the handler.
- int handler_count = static_cast<int>(dangling_handlers.size());
-
- if (handler_count > 0) {
- Node* control_output =
- graph()->NewNode(common()->Merge(handler_count), handler_count,
- dangling_handlers.data());
- std::vector<Node*> effects;
- std::vector<Node*> values;
- for (Node* control : dangling_handlers) {
- if (control->opcode() == IrOpcode::kIfException) {
- effects.push_back(control);
- values.push_back(control);
- } else {
- DCHECK_EQ(control->opcode(), IrOpcode::kLoopExit);
- Node* if_exception = control->InputAt(0);
- DCHECK_EQ(if_exception->opcode(), IrOpcode::kIfException);
- effects.push_back(graph()->NewNode(common()->LoopExitEffect(),
- if_exception, control));
- values.push_back(graph()->NewNode(
- common()->LoopExitValue(MachineRepresentation::kTagged),
- if_exception, control));
- }
+ if (is_exceptional_call) {
+ int handler_count = static_cast<int>(dangling_exceptions->Size());
+ if (handler_count > 0) {
+ Node* control_output =
+ graph()->NewNode(common()->Merge(handler_count), handler_count,
+ dangling_exceptions->controls.data());
+ std::vector<Node*>& effects(dangling_exceptions->effects);
+ std::vector<Node*>& values(dangling_exceptions->exception_values);
+
+ effects.push_back(control_output);
+ values.push_back(control_output);
+ Node* value_output = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, handler_count),
+ handler_count + 1, values.data());
+ Node* effect_output = graph()->NewNode(common()->EffectPhi(handler_count),
+ handler_count + 1, effects.data());
+ ReplaceWithValue(handler, value_output, effect_output, control_output);
+ } else {
+ // Nothing in the inlined function can throw. Remove the handler.
+ ReplaceWithValue(handler, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
}
-
- effects.push_back(control_output);
- values.push_back(control_output);
- Node* value_output = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, handler_count),
- handler_count + 1, values.data());
- Node* effect_output = graph()->NewNode(common()->EffectPhi(handler_count),
- handler_count + 1, effects.data());
- ReplaceWithValue(handler, value_output, effect_output, control_output);
- } else if (handler != nullptr) {
- // Nothing in the inlined function can throw. Remove the handler.
- ReplaceWithValue(handler, mcgraph()->Dead(), mcgraph()->Dead(),
- mcgraph()->Dead());
}
if (return_nodes.size() > 0) {
@@ -506,6 +457,8 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
} else if (return_arity == 1) {
// One return value. Just replace value uses of the call node with it.
+ // Note: This will automatically detect and replace the IfSuccess node
+ // correctly.
ReplaceWithValue(call, values[0], effect_output, control_output);
} else {
// Multiple returns. We have to find the projections of the call node and
@@ -513,12 +466,8 @@ void WasmInliner::InlineCall(Node* call, Node* callee_start, Node* callee_end,
for (Edge use_edge : call->use_edges()) {
if (NodeProperties::IsValueEdge(use_edge)) {
Node* use = use_edge.from();
- // Other nodes are unreachable leftovers from Int32Lowering.
- if (use->opcode() == IrOpcode::kProjection) {
- ReplaceWithValue(use, values[ProjectionIndexOf(use->op())]);
- } else {
- DCHECK(mcgraph()->machine()->Is32());
- }
+ DCHECK_EQ(use->opcode(), IrOpcode::kProjection);
+ ReplaceWithValue(use, values[ProjectionIndexOf(use->op())]);
}
}
// All value inputs are replaced by the above loop, so it is ok to use
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
index e7ec4eb271..f1e0d126af 100644
--- a/deps/v8/src/compiler/wasm-inlining.h
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -15,43 +15,35 @@
namespace v8 {
namespace internal {
+struct WasmInliningPosition;
+
namespace wasm {
struct CompilationEnv;
+struct DanglingExceptions;
struct WasmModule;
-struct WasmFunction;
-class WireBytesStorage;
} // namespace wasm
-class BytecodeOffset;
-class OptimizedCompilationInfo;
-
namespace compiler {
-class NodeOriginTable;
-class SourcePositionTable;
-struct WasmLoopInfo;
+struct WasmCompilationData;
// The WasmInliner provides the core graph inlining machinery for Webassembly
// graphs.
class WasmInliner final : public AdvancedReducer {
public:
WasmInliner(Editor* editor, wasm::CompilationEnv* env,
- uint32_t function_index, SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, MachineGraph* mcgraph,
- const wasm::WireBytesStorage* wire_bytes,
- std::vector<WasmLoopInfo>* loop_infos, const char* debug_name)
+ WasmCompilationData& data, MachineGraph* mcgraph,
+ const char* debug_name,
+ ZoneVector<WasmInliningPosition>* inlining_positions)
: AdvancedReducer(editor),
env_(env),
- function_index_(function_index),
- source_positions_(source_positions),
- node_origins_(node_origins),
+ data_(data),
mcgraph_(mcgraph),
- wire_bytes_(wire_bytes),
- loop_infos_(loop_infos),
debug_name_(debug_name),
initial_graph_size_(mcgraph->graph()->NodeCount()),
current_graph_size_(initial_graph_size_),
- inlining_candidates_() {}
+ inlining_candidates_(),
+ inlining_positions_(inlining_positions) {}
const char* reducer_name() const override { return "WasmInliner"; }
@@ -93,7 +85,7 @@ class WasmInliner final : public AdvancedReducer {
Reduction ReduceCall(Node* call);
void InlineCall(Node* call, Node* callee_start, Node* callee_end,
const wasm::FunctionSig* inlinee_sig,
- size_t subgraph_min_node_id);
+ wasm::DanglingExceptions* dangling_exceptions);
void InlineTailCall(Node* call, Node* callee_start, Node* callee_end);
void RewireFunctionEntry(Node* call, Node* callee_start);
@@ -103,12 +95,8 @@ class WasmInliner final : public AdvancedReducer {
void Trace(const CandidateInfo& candidate, const char* decision);
wasm::CompilationEnv* const env_;
- uint32_t function_index_;
- SourcePositionTable* const source_positions_;
- NodeOriginTable* const node_origins_;
+ WasmCompilationData& data_;
MachineGraph* const mcgraph_;
- const wasm::WireBytesStorage* const wire_bytes_;
- std::vector<WasmLoopInfo>* const loop_infos_;
const char* debug_name_;
const size_t initial_graph_size_;
size_t current_graph_size_;
@@ -117,6 +105,7 @@ class WasmInliner final : public AdvancedReducer {
inlining_candidates_;
std::unordered_set<Node*> seen_;
std::unordered_map<uint32_t, int> function_inlining_count_;
+ ZoneVector<WasmInliningPosition>* inlining_positions_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/wasm-load-elimination.cc b/deps/v8/src/compiler/wasm-load-elimination.cc
new file mode 100644
index 0000000000..6abd39ef3f
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-load-elimination.cc
@@ -0,0 +1,526 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-load-elimination.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/wasm/struct-types.h"
+#include "src/wasm/wasm-subtyping.h"
+
+namespace v8::internal::compiler {
+
+/**** Helpers ****/
+
+namespace {
+bool TypesUnrelated(Node* lhs, Node* rhs) {
+ wasm::TypeInModule type1 = NodeProperties::GetType(lhs).AsWasm();
+ wasm::TypeInModule type2 = NodeProperties::GetType(rhs).AsWasm();
+ return wasm::TypesUnrelated(type1.type, type2.type, type1.module,
+ type2.module);
+}
+
+bool IsFresh(Node* node) {
+ return node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kAllocateRaw;
+}
+
+bool IsConstant(Node* node) {
+ return node->opcode() == IrOpcode::kParameter ||
+ node->opcode() == IrOpcode::kHeapConstant;
+}
+
+bool MayAlias(Node* lhs, Node* rhs) {
+ if (lhs == rhs) return true;
+ if (TypesUnrelated(lhs, rhs) || (IsFresh(lhs) && IsFresh(rhs)) ||
+ (IsFresh(lhs) && IsConstant(rhs)) || (IsConstant(lhs) && IsFresh(rhs))) {
+ return false;
+ }
+ return true;
+}
+
+Node* ResolveAliases(Node* node) {
+ while (node->opcode() == IrOpcode::kWasmTypeCast ||
+ node->opcode() == IrOpcode::kAssertNotNull ||
+ node->opcode() == IrOpcode::kTypeGuard) {
+ node = NodeProperties::GetValueInput(node, 0);
+ }
+ return node;
+}
+
+// We model array length and string canonicalization as fields at negative
+// indices.
+constexpr int kArrayLengthFieldIndex = -1;
+constexpr int kStringPrepareForGetCodeunitIndex = -2;
+constexpr int kStringAsWtf16Index = -3;
+} // namespace
+
+Reduction WasmLoadElimination::UpdateState(Node* node,
+ AbstractState const* state) {
+ AbstractState const* original = node_states_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {state}
+ // has changed wrt. the {original}.
+ if (state != original) {
+ if (original == nullptr || !state->Equals(original)) {
+ node_states_.Set(node, state);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+std::tuple<Node*, Node*> WasmLoadElimination::TruncateAndExtendOrType(
+ Node* value, Node* effect, Node* control, wasm::ValueType field_type,
+ bool is_signed) {
+ if (field_type == wasm::kWasmI8 || field_type == wasm::kWasmI16) {
+ Node* ret = nullptr;
+ if (is_signed) {
+ int shift = 32 - 8 * field_type.value_kind_size();
+ ret = graph()->NewNode(machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), value,
+ jsgraph()->Int32Constant(shift)),
+ jsgraph()->Int32Constant(shift));
+ } else {
+ int mask = (1 << 8 * field_type.value_kind_size()) - 1;
+ ret = graph()->NewNode(machine()->Word32And(), value,
+ jsgraph()->Int32Constant(mask));
+ }
+
+ NodeProperties::SetType(ret, NodeProperties::GetType(value));
+ return {ret, effect};
+ }
+
+ wasm::TypeInModule node_type = NodeProperties::GetType(value).AsWasm();
+
+ // TODO(12166): Adapt this if cross-module inlining is allowed.
+ if (!wasm::IsSubtypeOf(node_type.type, field_type, node_type.module)) {
+ Type type = Type::Wasm({field_type, node_type.module}, graph()->zone());
+ Node* ret =
+ graph()->NewNode(common()->TypeGuard(type), value, effect, control);
+ NodeProperties::SetType(ret, type);
+ return {ret, ret};
+ }
+
+ return {value, effect};
+}
+
+/***** Reductions *****/
+
+Reduction WasmLoadElimination::Reduce(Node* node) {
+ if (v8_flags.trace_turbo_load_elimination) {
+ // TODO(manoskouk): Add some tracing.
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kWasmStructGet:
+ return ReduceWasmStructGet(node);
+ case IrOpcode::kWasmStructSet:
+ return ReduceWasmStructSet(node);
+ case IrOpcode::kWasmArrayLength:
+ return ReduceWasmArrayLength(node);
+ case IrOpcode::kWasmArrayInitializeLength:
+ return ReduceWasmArrayInitializeLength(node);
+ case IrOpcode::kStringPrepareForGetCodeunit:
+ return ReduceStringPrepareForGetCodeunit(node);
+ case IrOpcode::kStringAsWtf16:
+ return ReduceStringAsWtf16(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
+ return NoChange();
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
+ }
+}
+
+Reduction WasmLoadElimination::ReduceWasmStructGet(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmStructGet);
+ Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0));
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ const WasmFieldInfo& field_info = OpParameter<WasmFieldInfo>(node->op());
+ bool is_mutable = field_info.type->mutability(field_info.field_index);
+
+ // - The node can only be typed as bottom in unreachable code.
+ // - We can only find the field in the wrong half-state in unreachable code.
+ if (NodeProperties::GetType(node).AsWasm().type.is_bottom() ||
+ !(is_mutable ? &state->immutable_state : &state->mutable_state)
+ ->LookupField(field_info.field_index, object)
+ .IsEmpty()) {
+ Node* unreachable =
+ graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control);
+ MachineRepresentation rep =
+ field_info.type->field(field_info.field_index).machine_representation();
+ Node* dead_value =
+ graph()->NewNode(jsgraph()->common()->DeadValue(rep), unreachable);
+ NodeProperties::SetType(dead_value, NodeProperties::GetType(node));
+ ReplaceWithValue(node, dead_value, unreachable, control);
+ node->Kill();
+ return Replace(dead_value);
+ }
+
+ HalfState const* half_state =
+ is_mutable ? &state->mutable_state : &state->immutable_state;
+
+ FieldOrElementValue lookup_result =
+ half_state->LookupField(field_info.field_index, object);
+
+ if (!lookup_result.IsEmpty() && !lookup_result.value->IsDead()) {
+ std::tuple<Node*, Node*> replacement = TruncateAndExtendOrType(
+ lookup_result.value, effect, control,
+ field_info.type->field(field_info.field_index), field_info.is_signed);
+ ReplaceWithValue(node, std::get<0>(replacement), std::get<1>(replacement),
+ control);
+ node->Kill();
+ return Replace(std::get<0>(replacement));
+ }
+
+ half_state = half_state->AddField(field_info.field_index, object, node);
+
+ AbstractState const* new_state =
+ is_mutable
+ ? zone()->New<AbstractState>(*half_state, state->immutable_state)
+ : zone()->New<AbstractState>(state->mutable_state, *half_state);
+
+ return UpdateState(node, new_state);
+}
+
+Reduction WasmLoadElimination::ReduceWasmStructSet(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmStructSet);
+ Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0));
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ const WasmFieldInfo& field_info = OpParameter<WasmFieldInfo>(node->op());
+ bool is_mutable = field_info.type->mutability(field_info.field_index);
+
+ if (is_mutable) {
+ // We can find the field in the wrong half-state only in unreachable code.
+ if (!(state->immutable_state.LookupField(field_info.field_index, object)
+ .IsEmpty())) {
+ Node* unreachable =
+ graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control);
+ ReplaceWithValue(node, unreachable, unreachable, control);
+ node->Kill();
+ return Replace(unreachable);
+ }
+
+ HalfState const* mutable_state =
+ state->mutable_state.KillField(field_info.field_index, object);
+ mutable_state =
+ mutable_state->AddField(field_info.field_index, object, value);
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(*mutable_state, state->immutable_state);
+ return UpdateState(node, new_state);
+ } else {
+ // We can find the field in the wrong half-state only in unreachable code.
+ if (!(state->mutable_state.LookupField(field_info.field_index, object)
+ .IsEmpty())) {
+ Node* unreachable =
+ graph()->NewNode(jsgraph()->common()->Unreachable(), effect, control);
+ ReplaceWithValue(node, unreachable, unreachable, control);
+ node->Kill();
+ return Replace(unreachable);
+ }
+ // We should not initialize the same immutable field twice.
+ DCHECK(state->immutable_state.LookupField(field_info.field_index, object)
+ .IsEmpty());
+ HalfState const* immutable_state =
+ state->immutable_state.AddField(field_info.field_index, object, value);
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(state->mutable_state, *immutable_state);
+ return UpdateState(node, new_state);
+ }
+}
+
+Reduction WasmLoadElimination::ReduceWasmArrayLength(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArrayLength);
+ Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0));
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ HalfState const* immutable_state = &state->immutable_state;
+
+ FieldOrElementValue lookup_result =
+ immutable_state->LookupField(kArrayLengthFieldIndex, object);
+
+ if (!lookup_result.IsEmpty() && !lookup_result.value->IsDead()) {
+ ReplaceWithValue(node, lookup_result.value, effect, control);
+ node->Kill();
+ return Replace(lookup_result.value);
+ }
+
+ immutable_state =
+ immutable_state->AddField(kArrayLengthFieldIndex, object, node);
+
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(state->mutable_state, *immutable_state);
+
+ return UpdateState(node, new_state);
+}
+
+Reduction WasmLoadElimination::ReduceWasmArrayInitializeLength(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWasmArrayInitializeLength);
+ Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0));
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ // We should not initialize the length twice.
+ DCHECK(state->immutable_state.LookupField(kArrayLengthFieldIndex, object)
+ .IsEmpty());
+ HalfState const* immutable_state =
+ state->immutable_state.AddField(kArrayLengthFieldIndex, object, value);
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(state->mutable_state, *immutable_state);
+ return UpdateState(node, new_state);
+}
+
+Reduction WasmLoadElimination::ReduceStringPrepareForGetCodeunit(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStringPrepareForGetCodeunit);
+ Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0));
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ HalfState const* mutable_state = &state->mutable_state;
+
+ FieldOrElementValue lookup_result =
+ mutable_state->LookupField(kStringPrepareForGetCodeunitIndex, object);
+
+ if (!lookup_result.IsEmpty() && !lookup_result.value->IsDead()) {
+ for (size_t i : {0, 1, 2}) {
+ Node* proj_to_replace = NodeProperties::FindProjection(node, i);
+ ReplaceWithValue(proj_to_replace,
+ NodeProperties::FindProjection(lookup_result.value, i));
+ proj_to_replace->Kill();
+ }
+ ReplaceWithValue(node, lookup_result.value, effect, control);
+ node->Kill();
+ return Replace(lookup_result.value);
+ }
+
+ mutable_state =
+ mutable_state->AddField(kStringPrepareForGetCodeunitIndex, object, node);
+
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(*mutable_state, state->immutable_state);
+
+ return UpdateState(node, new_state);
+}
+
+Reduction WasmLoadElimination::ReduceStringAsWtf16(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStringAsWtf16);
+ Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0));
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ HalfState const* immutable_state = &state->immutable_state;
+
+ FieldOrElementValue lookup_result =
+ immutable_state->LookupField(kStringAsWtf16Index, object);
+
+ if (!lookup_result.IsEmpty() && !lookup_result.value->IsDead()) {
+ ReplaceWithValue(node, lookup_result.value, effect, control);
+ node->Kill();
+ return Replace(lookup_result.value);
+ }
+
+ immutable_state =
+ immutable_state->AddField(kStringAsWtf16Index, object, node);
+
+ AbstractState const* new_state =
+ zone()->New<AbstractState>(state->mutable_state, *immutable_state);
+
+ return UpdateState(node, new_state);
+}
+
+Reduction WasmLoadElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectOutputCount() == 0) return NoChange();
+ DCHECK_EQ(node->op()->EffectInputCount(), 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (state == nullptr) return NoChange();
+ // If this {node} has some uncontrolled side effects (i.e. it is a call
+ // without {kNoWrite}), set its state to the immutable half-state of its
+ // input state, otherwise to its input state.
+ // Any cached StringPrepareForGetCodeUnit nodes must be killed at any point
+ // that can cause internalization of strings (i.e. that can turn sequential
+ // strings into thin strings). Currently, that can only happen in JS, so
+ // from Wasm's point of view only in calls.
+ return UpdateState(node, node->opcode() == IrOpcode::kCall &&
+ !node->op()->HasProperty(Operator::kNoWrite)
+ ? zone()->New<AbstractState>(
+ HalfState(zone()), state->immutable_state)
+ : state);
+}
+
+Reduction WasmLoadElimination::ReduceStart(Node* node) {
+ return UpdateState(node, empty_state());
+}
+
+Reduction WasmLoadElimination::ReduceEffectPhi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ AbstractState const* state0 = node_states_.Get(effect0);
+ if (state0 == nullptr) return NoChange();
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just take
+ // the state from the first input, and compute the loop state based on it.
+ AbstractState const* state = ComputeLoopState(node, state0);
+ return UpdateState(node, state);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 1; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_states_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's state and intersect it with the state
+ // from other inputs.
+ // TODO(manoskouk): Consider computing phis for at least a subset of the
+ // state.
+ AbstractState* state = zone()->New<AbstractState>(*state0);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ state->IntersectWith(node_states_.Get(input));
+ }
+ return UpdateState(node, state);
+}
+
+/***** AbstractState implementation *****/
+
+WasmLoadElimination::FieldOrElementValue
+WasmLoadElimination::HalfState::LookupField(int field_index,
+ Node* object) const {
+ return fields_.Get(field_index).Get(object);
+}
+
+WasmLoadElimination::HalfState const* WasmLoadElimination::HalfState::AddField(
+ int field_index, Node* object, Node* value) const {
+ HalfState* new_state = zone_->New<HalfState>(*this);
+ Update(new_state->fields_, field_index, object, FieldOrElementValue(value));
+ return new_state;
+}
+
+WasmLoadElimination::HalfState const* WasmLoadElimination::HalfState::KillField(
+ int field_index, Node* object) const {
+ const InnerMap& same_index_map = fields_.Get(field_index);
+ InnerMap new_map(same_index_map);
+ for (std::pair<Node*, FieldOrElementValue> pair : same_index_map) {
+ if (MayAlias(pair.first, object)) {
+ new_map.Set(pair.first, FieldOrElementValue());
+ }
+ }
+ HalfState* result = zone_->New<HalfState>(*this);
+ result->fields_.Set(field_index, new_map);
+ return result;
+}
+
+WasmLoadElimination::AbstractState const* WasmLoadElimination::ComputeLoopState(
+ Node* node, AbstractState const* state) const {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ std::queue<Node*> queue;
+ std::unordered_set<Node*> visited;
+ visited.insert(node);
+ for (int i = 1; i < node->InputCount() - 1; ++i) {
+ queue.push(node->InputAt(i));
+ }
+ while (!queue.empty()) {
+ Node* const current = queue.front();
+ queue.pop();
+ if (visited.insert(current).second) {
+ if (current->opcode() == IrOpcode::kWasmStructSet) {
+ Node* object = NodeProperties::GetValueInput(current, 0);
+ WasmFieldInfo field_info = OpParameter<WasmFieldInfo>(current->op());
+ bool is_mutable = field_info.type->mutability(field_info.field_index);
+ if (is_mutable) {
+ const HalfState* new_mutable_state =
+ state->mutable_state.KillField(field_info.field_index, object);
+ state = zone()->New<AbstractState>(*new_mutable_state,
+ state->immutable_state);
+ } else {
+ // TODO(manoskouk): DCHECK
+ }
+ } else if (current->opcode() == IrOpcode::kCall &&
+ !current->op()->HasProperty(Operator::kNoWrite)) {
+ return zone()->New<AbstractState>(HalfState(zone()),
+ state->immutable_state);
+ }
+ for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
+ queue.push(NodeProperties::GetEffectInput(current, i));
+ }
+ }
+ }
+ return state;
+}
+
+void WasmLoadElimination::HalfState::IntersectWith(HalfState const* that) {
+ FieldOrElementValue empty;
+ for (const std::pair<int, InnerMap> to_map : fields_) {
+ InnerMap to_map_copy(to_map.second);
+ int key = to_map.first;
+ const InnerMap& current_map = that->fields_.Get(key);
+ for (std::pair<Node*, FieldOrElementValue> value : to_map.second) {
+ if (current_map.Get(value.first) != value.second) {
+ to_map_copy.Set(value.first, empty);
+ }
+ }
+ fields_.Set(key, to_map_copy);
+ }
+}
+
+/***** Constructor/ trivial accessors *****/
+WasmLoadElimination::WasmLoadElimination(Editor* editor, JSGraph* jsgraph,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ empty_state_(zone),
+ node_states_(jsgraph->graph()->NodeCount(), zone),
+ jsgraph_(jsgraph),
+ zone_(zone) {}
+
+CommonOperatorBuilder* WasmLoadElimination::common() const {
+ return jsgraph()->common();
+}
+
+MachineOperatorBuilder* WasmLoadElimination::machine() const {
+ return jsgraph()->machine();
+}
+
+Graph* WasmLoadElimination::graph() const { return jsgraph()->graph(); }
+
+Isolate* WasmLoadElimination::isolate() const { return jsgraph()->isolate(); }
+
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/wasm-load-elimination.h b/deps/v8/src/compiler/wasm-load-elimination.h
new file mode 100644
index 0000000000..76be09d27c
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-load-elimination.h
@@ -0,0 +1,155 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_WASM_LOAD_ELIMINATION_H_
+#define V8_COMPILER_WASM_LOAD_ELIMINATION_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/persistent-map.h"
+
+namespace v8::internal::compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+struct ObjectAccess;
+
+class V8_EXPORT_PRIVATE WasmLoadElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ WasmLoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ ~WasmLoadElimination() final = default;
+ WasmLoadElimination(const WasmLoadElimination&) = delete;
+ WasmLoadElimination& operator=(const WasmLoadElimination&) = delete;
+
+ const char* reducer_name() const override { return "WasmLoadElimination"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct FieldOrElementValue {
+ FieldOrElementValue() = default;
+ explicit FieldOrElementValue(Node* value) : value(value) {}
+
+ bool operator==(const FieldOrElementValue& other) const {
+ return value == other.value;
+ }
+
+ bool operator!=(const FieldOrElementValue& other) const {
+ return !(*this == other);
+ }
+
+ bool IsEmpty() const { return value == nullptr; }
+
+ Node* value = nullptr;
+ };
+
+ class HalfState final : public ZoneObject {
+ public:
+ explicit HalfState(Zone* zone)
+ : zone_(zone),
+ fields_(zone, InnerMap(zone)),
+ elements_(zone, InnerMap(zone)) {}
+
+ bool Equals(HalfState const* that) const {
+ return fields_ == that->fields_ && elements_ == that->elements_;
+ }
+ void IntersectWith(HalfState const* that);
+ HalfState const* KillField(int field_index, Node* object) const;
+ HalfState const* AddField(int field_index, Node* object, Node* value) const;
+ FieldOrElementValue LookupField(int field_index, Node* object) const;
+ void Print() const;
+
+ private:
+ using InnerMap = PersistentMap<Node*, FieldOrElementValue>;
+ template <typename OuterKey>
+ using OuterMap = PersistentMap<OuterKey, InnerMap>;
+ // offset -> object -> info
+ using FieldInfos = OuterMap<int>;
+ // object -> offset -> info
+ using ElementInfos = OuterMap<Node*>;
+
+ // Update {map} so that {map.Get(outer_key).Get(inner_key)} returns {info}.
+ template <typename OuterKey>
+ static void Update(OuterMap<OuterKey>& map, OuterKey outer_key,
+ Node* inner_key, FieldOrElementValue info) {
+ InnerMap map_copy(map.Get(outer_key));
+ map_copy.Set(inner_key, info);
+ map.Set(outer_key, map_copy);
+ }
+
+ static void Print(const FieldInfos& infos);
+ static void Print(const ElementInfos& infos);
+
+ Zone* zone_;
+ FieldInfos fields_;
+ ElementInfos elements_;
+ };
+
+ // An {AbstractState} consists of two {HalfState}s, representing the sets of
+ // known mutable and immutable struct fields, respectively. The two
+ // half-states should not overlap.
+ struct AbstractState : public ZoneObject {
+ explicit AbstractState(Zone* zone)
+ : mutable_state(zone), immutable_state(zone) {}
+ explicit AbstractState(HalfState mutable_state, HalfState immutable_state)
+ : mutable_state(mutable_state), immutable_state(immutable_state) {}
+
+ bool Equals(AbstractState const* that) const {
+ return this->immutable_state.Equals(&that->immutable_state) &&
+ this->mutable_state.Equals(&that->mutable_state);
+ }
+ void IntersectWith(AbstractState const* that) {
+ mutable_state.IntersectWith(&that->mutable_state);
+ immutable_state.IntersectWith(&that->immutable_state);
+ }
+
+ HalfState mutable_state;
+ HalfState immutable_state;
+ };
+
+ Reduction ReduceWasmStructGet(Node* node);
+ Reduction ReduceWasmStructSet(Node* node);
+ Reduction ReduceWasmArrayLength(Node* node);
+ Reduction ReduceWasmArrayInitializeLength(Node* node);
+ Reduction ReduceStringPrepareForGetCodeunit(Node* node);
+ Reduction ReduceStringAsWtf16(Node* node);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction UpdateState(Node* node, AbstractState const* state);
+
+ AbstractState const* ComputeLoopState(Node* node,
+ AbstractState const* state) const;
+ // Returns the replacement value and effect for a load given an initial value
+ // node, after optional {TypeGuard}ing and i8/i16 adaptation to i32.
+ std::tuple<Node*, Node*> TruncateAndExtendOrType(Node* value, Node* effect,
+ Node* control,
+ wasm::ValueType field_type,
+ bool is_signed);
+ Reduction AssertUnreachable(Node* node);
+
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ Isolate* isolate() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* zone() const { return zone_; }
+ AbstractState const* empty_state() const { return &empty_state_; }
+
+ AbstractState const empty_state_;
+ NodeAuxData<AbstractState const*> node_states_;
+ JSGraph* const jsgraph_;
+ Zone* zone_;
+};
+
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_WASM_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/wasm-typer.cc b/deps/v8/src/compiler/wasm-typer.cc
index e0eb381e5b..abff295284 100644
--- a/deps/v8/src/compiler/wasm-typer.cc
+++ b/deps/v8/src/compiler/wasm-typer.cc
@@ -9,6 +9,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/wasm-compiler-definitions.h"
#include "src/utils/utils.h"
#include "src/wasm/object-access.h"
@@ -26,7 +27,6 @@ WasmTyper::WasmTyper(Editor* editor, MachineGraph* mcgraph,
uint32_t function_index)
: AdvancedReducer(editor),
function_index_(function_index),
- mcgraph_(mcgraph),
graph_zone_(mcgraph->graph()->zone()) {}
namespace {
@@ -38,32 +38,6 @@ bool AllInputsTyped(Node* node) {
}
return true;
}
-
-// Traverse the fields of a struct until we find one at offset equal to
-// {offset}, and return its type.
-// If we are in a 32-bit platform, the code has undergone int64 lowering:
-// loads from i64 fields have been transformed into a pair of i32 loads. The
-// first load has the offset of the original field, and the second one has
-// an offset which is greater by size of i32.
-// TODO(manoskouk): Improve this.
-wasm::ValueType StructFieldFromOffset(const wasm::StructType* type,
- uint32_t offset, bool is_32) {
- for (uint32_t index = 0; index < type->field_count(); index++) {
- uint32_t field_offset = wasm::ObjectAccess::ToTagged(
- WasmStruct::kHeaderSize + type->field_offset(index));
- if (is_32 && type->field(index) == wasm::kWasmI64 &&
- field_offset + wasm::kWasmI32.value_kind_size() == offset) {
- return wasm::kWasmI32;
- }
- if (field_offset == offset) {
- wasm::ValueType field_type = type->field(index);
- return is_32 && field_type == wasm::kWasmI64 ? wasm::kWasmI32
- : field_type.Unpacked();
- }
- }
- return wasm::kWasmBottom;
-}
-
} // namespace
Reduction WasmTyper::Reduce(Node* node) {
@@ -94,42 +68,6 @@ Reduction WasmTyper::Reduce(Node* node) {
break;
}
case IrOpcode::kAssertNotNull: {
- {
- Node* object = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Optimize the common pattern where a TypeCast is followed by an
- // AssertNotNull: Reverse the order of these operations, as this will
- // unlock more optimizations later.
- // We are implementing this in the typer so we can retype the nodes.
- while (control->opcode() == IrOpcode::kWasmTypeCast &&
- effect == object && control == object &&
- !NodeProperties::GetType(object).AsWasm().type.is_bottom()) {
- Node* initial_object = NodeProperties::GetValueInput(object, 0);
- Node* previous_control = NodeProperties::GetControlInput(object);
- Node* previous_effect = NodeProperties::GetEffectInput(object);
- ReplaceWithValue(node, object);
- node->ReplaceInput(NodeProperties::FirstValueIndex(node),
- initial_object);
- node->ReplaceInput(NodeProperties::FirstEffectIndex(node),
- previous_effect);
- node->ReplaceInput(NodeProperties::FirstControlIndex(node),
- previous_control);
- object->ReplaceInput(NodeProperties::FirstValueIndex(object), node);
- object->ReplaceInput(NodeProperties::FirstEffectIndex(object), node);
- object->ReplaceInput(NodeProperties::FirstControlIndex(object), node);
- Revisit(node);
- Revisit(object);
- object = initial_object;
- control = previous_control;
- effect = previous_effect;
- // We untype the node, because its new input might have a type not
- // compatible with its current type.
- NodeProperties::RemoveType(node);
- }
- }
-
if (!AllInputsTyped(node)) return NoChange();
TypeInModule object_type =
NodeProperties::GetType(NodeProperties::GetValueInput(node, 0))
@@ -152,14 +90,20 @@ Reduction WasmTyper::Reduce(Node* node) {
node->id(), computed_type.type.name().c_str());
break;
}
- computed_type =
+
+ computed_type = {
+ wasm::kWasmBottom,
NodeProperties::GetType(NodeProperties::GetValueInput(node, 0))
- .AsWasm();
- for (int i = 1; i < node->op()->ValueInputCount(); i++) {
+ .AsWasm()
+ .module};
+ for (int i = 0; i < node->op()->ValueInputCount(); i++) {
Node* input = NodeProperties::GetValueInput(node, i);
TypeInModule input_type = NodeProperties::GetType(input).AsWasm();
- // We do not want union of types from unreachable branches.
- if (!input_type.type.is_bottom()) {
+ if (computed_type.type.is_bottom()) {
+ // We have not found a non-bottom branch yet.
+ computed_type = input_type;
+ } else if (!input_type.type.is_bottom()) {
+ // We do not want union of types from unreachable branches.
computed_type = wasm::Union(computed_type, input_type);
}
}
@@ -182,10 +126,8 @@ Reduction WasmTyper::Reduce(Node* node) {
computed_type.type.name().c_str());
break;
}
- case IrOpcode::kLoadFromObject:
- case IrOpcode::kLoadImmutableFromObject: {
+ case IrOpcode::kWasmArrayGet: {
Node* object = NodeProperties::GetValueInput(node, 0);
- Node* offset = NodeProperties::GetValueInput(node, 1);
// This can happen either because the object has not been typed yet, or
// because it is an internal VM object (e.g. the instance).
if (!NodeProperties::IsTyped(object)) return NoChange();
@@ -195,66 +137,37 @@ Reduction WasmTyper::Reduce(Node* node) {
computed_type = {wasm::kWasmBottom, object_type.module};
break;
}
- if (object_type.type.is_rtt()) return NoChange();
-
- DCHECK(object_type.type.is_object_reference());
-
- IntPtrMatcher m(offset);
- // Do not modify if we are getting the map.
- if (m.Is(wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset))) {
- return NoChange();
- }
- // Do not modify if we are retrieving the array length.
- if (object_type.type.is_reference_to(wasm::HeapType::kArray) &&
- m.Is(wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset))) {
- return NoChange();
- }
- // Do not modify if we are retrieving anything from a string or a view on
- // a string.
- if (object_type.type.is_reference_to(wasm::HeapType::kString) ||
- object_type.type.is_reference_to(wasm::HeapType::kStringViewWtf8) ||
- object_type.type.is_reference_to(wasm::HeapType::kStringViewWtf16) ||
- object_type.type.is_reference_to(wasm::HeapType::kStringViewIter)) {
- return NoChange();
- }
uint32_t ref_index = object_type.type.ref_index();
- DCHECK(object_type.module->has_type(ref_index));
- wasm::TypeDefinition type_def = object_type.module->types[ref_index];
- switch (type_def.kind) {
- case wasm::TypeDefinition::kFunction:
- // This can happen for internal structures only.
- return NoChange();
- case wasm::TypeDefinition::kStruct: {
- wasm::ValueType field_type = StructFieldFromOffset(
- type_def.struct_type, static_cast<uint32_t>(m.ResolvedValue()),
- mcgraph_->machine()->Is32());
- if (field_type.is_bottom()) {
- FATAL(
- "Error - Bottom struct field. function: %d, node %d:%s, "
- "input0: %d, type: %s, offset %d\n",
- function_index_, node->id(), node->op()->mnemonic(),
- node->InputAt(0)->id(), object_type.type.name().c_str(),
- static_cast<int>(m.ResolvedValue()));
- }
- computed_type = {field_type, object_type.module};
- break;
- }
- case wasm::TypeDefinition::kArray: {
- // Do not modify if we are retrieving the array length.
- if (m.Is(wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset))) {
- return NoChange();
- }
- wasm::ValueType element_type = type_def.array_type->element_type();
- // We have to consider that, after int64 lowering in 32-bit platforms,
- // loads from i64 arrays get transformed into pairs of i32 loads.
- computed_type = {
- mcgraph_->machine()->Is32() && element_type == wasm::kWasmI64
- ? wasm::kWasmI32
- : element_type.Unpacked(),
- object_type.module};
- break;
- }
+ DCHECK(object_type.module->has_array(ref_index));
+ const wasm::ArrayType* type_from_object =
+ object_type.module->types[ref_index].array_type;
+ computed_type = {type_from_object->element_type().Unpacked(),
+ object_type.module};
+ break;
+ }
+ case IrOpcode::kWasmStructGet: {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ // This can happen either because the object has not been typed yet.
+ if (!NodeProperties::IsTyped(object)) return NoChange();
+ TypeInModule object_type = NodeProperties::GetType(object).AsWasm();
+ // This can happen in unreachable branches.
+ if (object_type.type.is_bottom() || object_type.type.is_uninhabited() ||
+ object_type.type == wasm::kWasmNullRef) {
+ computed_type = {wasm::kWasmBottom, object_type.module};
+ break;
}
+ WasmFieldInfo info = OpParameter<WasmFieldInfo>(node->op());
+
+ uint32_t ref_index = object_type.type.ref_index();
+
+ DCHECK(object_type.module->has_struct(ref_index));
+
+ const wasm::StructType* struct_type_from_object =
+ object_type.module->types[ref_index].struct_type;
+
+ computed_type = {
+ struct_type_from_object->field(info.field_index).Unpacked(),
+ object_type.module};
break;
}
default:
diff --git a/deps/v8/src/compiler/wasm-typer.h b/deps/v8/src/compiler/wasm-typer.h
index de101f5708..ccfa1d7d3e 100644
--- a/deps/v8/src/compiler/wasm-typer.h
+++ b/deps/v8/src/compiler/wasm-typer.h
@@ -33,7 +33,6 @@ class WasmTyper final : public AdvancedReducer {
private:
uint32_t function_index_;
- MachineGraph* mcgraph_;
Zone* graph_zone_;
};
diff --git a/deps/v8/src/d8/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index dccc5e515b..8076448a0b 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -67,8 +67,8 @@ AsyncHooks::AsyncHooks(Isolate* isolate) : isolate_(isolate) {
async_hooks_templ.Get(isolate_)->Set(
isolate_, "disable", FunctionTemplate::New(isolate_, DisableHook));
- async_id_smb.Reset(isolate_, Private::New(isolate_));
- trigger_id_smb.Reset(isolate_, Private::New(isolate_));
+ async_id_symbol.Reset(isolate_, Private::New(isolate_));
+ trigger_id_symbol.Reset(isolate_, Private::New(isolate_));
isolate_->SetPromiseHook(ShellPromiseHook);
}
@@ -166,6 +166,7 @@ Local<Object> AsyncHooks::CreateHook(
void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent) {
v8::Isolate* isolate = promise->GetIsolate();
+ if (isolate->IsExecutionTerminating()) return;
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
@@ -188,34 +189,40 @@ void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
++hooks->current_async_id;
Local<Integer> async_id = Integer::New(isolate, hooks->current_async_id);
CHECK(
- !promise->HasPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ !promise
+ ->HasPrivate(currentContext, hooks->async_id_symbol.Get(isolate))
.ToChecked());
- promise->SetPrivate(currentContext, hooks->async_id_smb.Get(isolate),
+ promise->SetPrivate(currentContext, hooks->async_id_symbol.Get(isolate),
async_id);
if (parent->IsPromise()) {
Local<Promise> parent_promise = parent.As<Promise>();
Local<Value> parent_async_id =
parent_promise
- ->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ ->GetPrivate(currentContext,
+ hooks->async_id_symbol.Get(isolate))
.ToLocalChecked();
- promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ promise->SetPrivate(currentContext,
+ hooks->trigger_id_symbol.Get(isolate),
parent_async_id);
} else {
CHECK(parent->IsUndefined());
- promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ promise->SetPrivate(currentContext,
+ hooks->trigger_id_symbol.Get(isolate),
Integer::New(isolate, 0));
}
} else if (type == PromiseHookType::kBefore) {
AsyncContext ctx;
ctx.execution_async_id =
- promise->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ promise
+ ->GetPrivate(currentContext, hooks->async_id_symbol.Get(isolate))
.ToLocalChecked()
.As<Integer>()
->Value();
ctx.trigger_async_id =
promise
- ->GetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate))
+ ->GetPrivate(currentContext,
+ hooks->trigger_id_symbol.Get(isolate))
.ToLocalChecked()
.As<Integer>()
->Value();
@@ -244,12 +251,13 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
AsyncHooks* hooks) {
if (!wrap.IsEnabled()) return;
v8::Isolate* v8_isolate = hooks->isolate_;
+ if (v8_isolate->IsExecutionTerminating()) return;
HandleScope handle_scope(v8_isolate);
Local<Value> rcv = Undefined(v8_isolate);
Local<Context> context = v8_isolate->GetCurrentContext();
Local<Value> async_id =
- promise->GetPrivate(context, hooks->async_id_smb.Get(v8_isolate))
+ promise->GetPrivate(context, hooks->async_id_symbol.Get(v8_isolate))
.ToLocalChecked();
Local<Value> args[1] = {async_id};
@@ -258,7 +266,8 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
if (!wrap.init_function().IsEmpty()) {
Local<Value> initArgs[4] = {
async_id, String::NewFromUtf8Literal(v8_isolate, "PROMISE"),
- promise->GetPrivate(context, hooks->trigger_id_smb.Get(v8_isolate))
+ promise
+ ->GetPrivate(context, hooks->trigger_id_symbol.Get(v8_isolate))
.ToLocalChecked(),
promise};
USE(wrap.init_function()->Call(context, rcv, 4, initArgs));
diff --git a/deps/v8/src/d8/async-hooks-wrapper.h b/deps/v8/src/d8/async-hooks-wrapper.h
index 90a03a2656..3111c3033a 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.h
+++ b/deps/v8/src/d8/async-hooks-wrapper.h
@@ -72,8 +72,8 @@ class AsyncHooks {
std::vector<std::shared_ptr<AsyncHooksWrap>> async_wraps_;
Isolate* isolate_;
Persistent<ObjectTemplate> async_hooks_templ;
- Persistent<Private> async_id_smb;
- Persistent<Private> trigger_id_smb;
+ Persistent<Private> async_id_symbol;
+ Persistent<Private> trigger_id_symbol;
static void ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent);
diff --git a/deps/v8/src/d8/d8-console.cc b/deps/v8/src/d8/d8-console.cc
index f48095302a..a4d65ba61c 100644
--- a/deps/v8/src/d8/d8-console.cc
+++ b/deps/v8/src/d8/d8-console.cc
@@ -4,6 +4,12 @@
#include "src/d8/d8-console.h"
+#include <stdio.h>
+
+#include <fstream>
+
+#include "include/v8-profiler.h"
+#include "src/d8/d8.h"
#include "src/execution/isolate.h"
namespace v8 {
@@ -30,13 +36,62 @@ void WriteToFile(const char* prefix, FILE* file, Isolate* isolate,
}
}
fprintf(file, "\n");
+ // Flush the file to avoid output to pile up in a buffer. Console output is
+ // often used for timing, so it should appear as soon as the code is executed.
+ fflush(file);
}
+
+class FileOutputStream : public v8::OutputStream {
+ public:
+ explicit FileOutputStream(const char* filename)
+ : os_(filename, std::ios_base::out | std::ios_base::trunc) {}
+
+ WriteResult WriteAsciiChunk(char* data, int size) override {
+ os_.write(data, size);
+ return kContinue;
+ }
+
+ void EndOfStream() override { os_.close(); }
+
+ private:
+ std::ofstream os_;
+};
+
+static constexpr const char* kCpuProfileOutputFilename = "v8.prof";
+
+class StringOutputStream : public v8::OutputStream {
+ public:
+ WriteResult WriteAsciiChunk(char* data, int size) override {
+ os_.write(data, size);
+ return kContinue;
+ }
+
+ void EndOfStream() override {}
+
+ std::string result() { return os_.str(); }
+
+ private:
+ std::ostringstream os_;
+};
} // anonymous namespace
D8Console::D8Console(Isolate* isolate) : isolate_(isolate) {
default_timer_ = base::TimeTicks::Now();
}
+D8Console::~D8Console() { DCHECK_NULL(profiler_); }
+
+void D8Console::DisposeProfiler() {
+ if (profiler_) {
+ if (profiler_active_) {
+ profiler_->StopProfiling(String::Empty(isolate_));
+ profiler_active_ = false;
+ }
+ profiler_->Dispose();
+ profiler_ = nullptr;
+ }
+}
+
void D8Console::Assert(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
// If no arguments given, the "first" argument is undefined which is
@@ -71,6 +126,32 @@ void D8Console::Debug(const debug::ConsoleCallArguments& args,
WriteToFile("console.debug", stdout, isolate_, args);
}
+void D8Console::Profile(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
+ if (!profiler_) {
+ profiler_ = CpuProfiler::New(isolate_);
+ }
+ profiler_active_ = true;
+ profiler_->StartProfiling(String::Empty(isolate_), CpuProfilingOptions{});
+}
+
+void D8Console::ProfileEnd(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
+ if (!profiler_) return;
+ CpuProfile* profile = profiler_->StopProfiling(String::Empty(isolate_));
+ profiler_active_ = false;
+ if (!profile) return;
+ if (Shell::HasOnProfileEndListener(isolate_)) {
+ StringOutputStream out;
+ profile->Serialize(&out);
+ Shell::TriggerOnProfileEndListener(isolate_, out.result());
+ } else {
+ FileOutputStream out(kCpuProfileOutputFilename);
+ profile->Serialize(&out);
+ }
+ profile->Delete();
+}
+
void D8Console::Time(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) {
if (i::v8_flags.correctness_fuzzer_suppressions) return;
diff --git a/deps/v8/src/d8/d8-console.h b/deps/v8/src/d8/d8-console.h
index 6af5190988..8b637c0930 100644
--- a/deps/v8/src/d8/d8-console.h
+++ b/deps/v8/src/d8/d8-console.h
@@ -12,9 +12,16 @@
namespace v8 {
+class CpuProfiler;
+
class D8Console : public debug::ConsoleDelegate {
public:
explicit D8Console(Isolate* isolate);
+ ~D8Console() override;
+
+ CpuProfiler* profiler() const { return profiler_; }
+
+ void DisposeProfiler();
private:
void Assert(const debug::ConsoleCallArguments& args,
@@ -29,6 +36,10 @@ class D8Console : public debug::ConsoleDelegate {
const v8::debug::ConsoleContext&) override;
void Debug(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) override;
+ void Profile(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext& context) override;
+ void ProfileEnd(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext& context) override;
void Time(const debug::ConsoleCallArguments& args,
const v8::debug::ConsoleContext&) override;
void TimeEnd(const debug::ConsoleCallArguments& args,
@@ -41,6 +52,8 @@ class D8Console : public debug::ConsoleDelegate {
Isolate* isolate_;
std::map<std::string, base::TimeTicks> timers_;
base::TimeTicks default_timer_;
+ CpuProfiler* profiler_{nullptr};
+ bool profiler_active_{false};
};
} // namespace v8
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index a4d5abdc44..c7e790379e 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -922,7 +922,7 @@ class FastCApiObject {
args.GetReturnValue().Set(Number::New(isolate, result));
} else {
IntegerT clamped = std::numeric_limits<IntegerT>::max();
- if (std::isnan(checked_arg_dbl)) {
+ if (std::isnan(checked_arg_dbl) || std::isnan(real_arg)) {
clamped = 0;
} else {
IntegerT lower_bound = std::numeric_limits<IntegerT>::min();
@@ -1029,6 +1029,140 @@ class FastCApiObject {
args.GetIsolate()->ThrowError("should be unreachable from wasm");
}
+ static void AssertIsExternal(const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+
+ Local<Value> value = args[0];
+
+ if (!value->IsExternal()) {
+ args.GetIsolate()->ThrowError("Did not get an external.");
+ }
+ }
+
+ static void* GetPointerFastCallback(Local<Object> receiver,
+ FastApiCallbackOptions& options) {
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(nullptr);
+ self->fast_call_count_++;
+
+ return static_cast<void*>(self);
+ }
+
+ static void GetPointerSlowCallback(const FunctionCallbackInfo<Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ self->slow_call_count_++;
+
+ args.GetReturnValue().Set(External::New(isolate, static_cast<void*>(self)));
+ }
+
+ static void* GetNullPointerFastCallback(Local<Object> receiver,
+ FastApiCallbackOptions& options) {
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(nullptr);
+ self->fast_call_count_++;
+
+ return nullptr;
+ }
+
+ static void GetNullPointerSlowCallback(
+ const FunctionCallbackInfo<Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ self->slow_call_count_++;
+
+ args.GetReturnValue().Set(v8::Null(isolate));
+ }
+
+ static void* PassPointerFastCallback(Local<Object> receiver, void* pointer,
+ FastApiCallbackOptions& options) {
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(nullptr);
+ self->fast_call_count_++;
+
+ return pointer;
+ }
+
+ static void PassPointerSlowCallback(const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ self->slow_call_count_++;
+
+ if (args.Length() != 1) {
+ args.GetIsolate()->ThrowError(
+ "Invalid number of arguments, expected one.");
+ return;
+ }
+
+ Local<Value> maybe_external = args[0].As<Value>();
+
+ if (maybe_external->IsNull()) {
+ args.GetReturnValue().Set(maybe_external);
+ return;
+ }
+ if (!maybe_external->IsExternal()) {
+ args.GetIsolate()->ThrowError("Did not get an external.");
+ return;
+ }
+
+ Local<External> external = args[0].As<External>();
+
+ args.GetReturnValue().Set(external);
+ }
+
+ static bool ComparePointersFastCallback(Local<Object> receiver,
+ void* pointer_a, void* pointer_b,
+ FastApiCallbackOptions& options) {
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(false);
+ self->fast_call_count_++;
+
+ return pointer_a == pointer_b;
+ }
+
+ static void ComparePointersSlowCallback(
+ const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ self->slow_call_count_++;
+
+ if (args.Length() != 2) {
+ args.GetIsolate()->ThrowError(
+ "Invalid number of arguments, expected two.");
+ return;
+ }
+
+ Local<Value> value_a = args[0];
+ Local<Value> value_b = args[1];
+
+ void* pointer_a;
+ if (value_a->IsNull()) {
+ pointer_a = nullptr;
+ } else if (value_a->IsExternal()) {
+ pointer_a = value_a.As<External>()->Value();
+ } else {
+ args.GetIsolate()->ThrowError(
+ "Did not get an external as first parameter.");
+ return;
+ }
+
+ void* pointer_b;
+ if (value_b->IsNull()) {
+ pointer_b = nullptr;
+ } else if (value_b->IsExternal()) {
+ pointer_b = value_b.As<External>()->Value();
+ } else {
+ args.GetIsolate()->ThrowError(
+ "Did not get an external as second parameter.");
+ return;
+ }
+
+ args.GetReturnValue().Set(pointer_a == pointer_b);
+ }
+
static void FastCallCount(const FunctionCallbackInfo<Value>& args) {
FastCApiObject* self = UnwrapObject(args.This());
CHECK_SELF_OR_THROW();
@@ -1465,6 +1599,46 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect, &test_wasm_memory_c_func));
api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "assert_is_external",
+ FunctionTemplate::New(isolate, FastCApiObject::AssertIsExternal,
+ Local<Value>(), signature, 1,
+ ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, nullptr));
+
+ CFunction get_pointer_c_func =
+ CFunction::Make(FastCApiObject::GetPointerFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "get_pointer",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::GetPointerSlowCallback, Local<Value>(),
+ signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &get_pointer_c_func));
+ CFunction get_null_pointer_c_func =
+ CFunction::Make(FastCApiObject::GetNullPointerFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "get_null_pointer",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::GetNullPointerSlowCallback, Local<Value>(),
+ signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &get_null_pointer_c_func));
+ CFunction pass_pointer_c_func =
+ CFunction::Make(FastCApiObject::PassPointerFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "pass_pointer",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::PassPointerSlowCallback, Local<Value>(),
+ signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &pass_pointer_c_func));
+ CFunction compare_pointers_c_func =
+ CFunction::Make(FastCApiObject::ComparePointersFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "compare_pointers",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::ComparePointersSlowCallback,
+ Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &compare_pointers_c_func));
+
+ api_obj_ctor->PrototypeTemplate()->Set(
isolate, "fast_call_count",
FunctionTemplate::New(
isolate, FastCApiObject::FastCallCount, Local<Value>(), signature,
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 31ed6d7526..6a31685e46 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -18,6 +18,8 @@
#include <utility>
#include <vector>
+#include "v8-isolate.h"
+
#ifdef ENABLE_VTUNE_JIT_INTERFACE
#include "src/third_party/vtune/v8-vtune.h"
#endif
@@ -69,7 +71,6 @@
#include "src/tasks/cancelable-task.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
-#include "src/web-snapshot/web-snapshot.h"
#if V8_OS_POSIX
#include <signal.h>
@@ -80,7 +81,8 @@
#endif // V8_FUZZILLI
#ifdef V8_USE_PERFETTO
-#include "perfetto/tracing.h"
+#include "perfetto/tracing/track_event.h"
+#include "perfetto/tracing/track_event_legacy.h"
#endif // V8_USE_PERFETTO
#ifdef V8_INTL_SUPPORT
@@ -341,6 +343,19 @@ class MultiMappedAllocator : public ArrayBufferAllocatorBase {
v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
+template <int N>
+bool ThrowError(Isolate* isolate, const char (&message)[N]) {
+ if (isolate->IsExecutionTerminating()) return false;
+ isolate->ThrowError(message);
+ return true;
+}
+
+bool ThrowError(Isolate* isolate, Local<String> message) {
+ if (isolate->IsExecutionTerminating()) return false;
+ isolate->ThrowError(message);
+ return true;
+}
+
static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate,
Local<Context> context,
Local<v8::Object> object,
@@ -358,13 +373,13 @@ static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
std::shared_ptr<Worker> GetWorkerFromInternalField(Isolate* isolate,
Local<Object> object) {
if (object->InternalFieldCount() != 1) {
- isolate->ThrowError("this is not a Worker");
+ ThrowError(isolate, "this is not a Worker");
return nullptr;
}
i::Handle<i::Object> handle = Utils::OpenHandle(*object->GetInternalField(0));
if (handle->IsSmi()) {
- isolate->ThrowError("Worker is defunct because main thread is terminating");
+ ThrowError(isolate, "Worker is defunct because main thread is terminating");
return nullptr;
}
auto managed = i::Handle<i::Managed<Worker>>::cast(handle);
@@ -465,6 +480,9 @@ CounterCollection* Shell::counters_ = &local_counters_;
base::LazyMutex Shell::context_mutex_;
const base::TimeTicks Shell::kInitialTicks = base::TimeTicks::Now();
Global<Function> Shell::stringify_function_;
+base::Mutex Shell::profiler_end_callback_lock_;
+std::map<Isolate*, std::pair<Global<Function>, Global<Context>>>
+ Shell::profiler_end_callback_;
base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
std::unordered_set<std::shared_ptr<Worker>> Shell::running_workers_;
@@ -911,52 +929,6 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
return success;
}
-bool Shell::TakeWebSnapshot(Isolate* isolate) {
- PerIsolateData* data = PerIsolateData::Get(isolate);
- Local<Context> realm =
- Local<Context>::New(isolate, data->realms_[data->realm_current_]);
- Context::Scope context_scope(realm);
- Local<Context> context(isolate->GetCurrentContext());
-
- v8::TryCatch try_catch(isolate);
- try_catch.SetVerbose(true);
- const char* web_snapshot_output_file_name = "web.snap";
- if (options.web_snapshot_output) {
- web_snapshot_output_file_name = options.web_snapshot_output;
- }
-
- if (!options.web_snapshot_config) {
- isolate->ThrowError(
- "Web snapshots: --web-snapshot-config is needed when "
- "--web-snapshot-output is passed");
- CHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
- return false;
- }
-
- MaybeLocal<PrimitiveArray> maybe_exports =
- ReadLines(isolate, options.web_snapshot_config);
- Local<PrimitiveArray> exports;
- if (!maybe_exports.ToLocal(&exports)) {
- isolate->ThrowError("Web snapshots: unable to read config");
- CHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
- return false;
- }
-
- i::WebSnapshotSerializer serializer(isolate);
- i::WebSnapshotData snapshot_data;
- if (serializer.TakeSnapshot(context, exports, snapshot_data)) {
- DCHECK_NOT_NULL(snapshot_data.buffer);
- WriteChars(web_snapshot_output_file_name, snapshot_data.buffer,
- snapshot_data.buffer_size);
- } else {
- CHECK(try_catch.HasCaught());
- return false;
- }
- return true;
-}
-
namespace {
bool IsAbsolutePath(const std::string& path) {
@@ -1075,8 +1047,8 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
CHECK(specifier_it != module_data->module_to_specifier_map.end());
msg += "\n imported by " + specifier_it->second;
}
- isolate->ThrowError(
- v8::String::NewFromUtf8(isolate, msg.c_str()).ToLocalChecked());
+ ThrowError(isolate,
+ v8::String::NewFromUtf8(isolate, msg.c_str()).ToLocalChecked());
return MaybeLocal<Module>();
}
@@ -1139,7 +1111,7 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
context, import_assertions, true);
if (request_module_type == ModuleType::kInvalid) {
- isolate->ThrowError("Invalid module type was asserted");
+ ThrowError(isolate, "Invalid module type was asserted");
return MaybeLocal<Module>();
}
@@ -1317,6 +1289,10 @@ MaybeLocal<Context> Shell::HostCreateShadowRealmContext(
InitializeModuleEmbedderData(context);
std::shared_ptr<ModuleEmbedderData> initiator_data =
GetModuleDataFromContext(initiator_context);
+
+ // ShadowRealms are synchronously accessible and are always in the same origin
+ // as the initiator context.
+ context->SetSecurityToken(initiator_context->GetSecurityToken());
shadow_realm_data->origin = initiator_data->origin;
return context;
@@ -1348,7 +1324,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
try_catch.SetVerbose(true);
if (module_type == ModuleType::kInvalid) {
- isolate->ThrowError("Invalid module type was asserted");
+ ThrowError(isolate, "Invalid module type was asserted");
CHECK(try_catch.HasCaught());
resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
@@ -1492,44 +1468,6 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
return true;
}
-bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
- HandleScope handle_scope(isolate);
-
- PerIsolateData* data = PerIsolateData::Get(isolate);
- Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
- Context::Scope context_scope(realm);
-
- std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
-
- int length = 0;
- std::unique_ptr<uint8_t[]> snapshot_data(
- reinterpret_cast<uint8_t*>(ReadChars(absolute_path.c_str(), &length)));
- if (length == 0) {
- TryCatch try_catch(isolate);
- isolate->ThrowError("Could not read the web snapshot file");
- CHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
- return false;
- } else {
- for (int r = 0; r < DeserializationRunCount(); ++r) {
- bool skip_exports = r > 0;
- i::WebSnapshotDeserializer deserializer(isolate, snapshot_data.get(),
- static_cast<size_t>(length));
- if (!deserializer.Deserialize({}, skip_exports)) {
- // d8 is calling into the internal APIs which won't do
- // ReportPendingMessages in all error paths (it's supposed to be done at
- // the API boundary). Call it here.
- auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (i_isolate->has_pending_exception()) {
- i_isolate->ReportPendingMessages();
- }
- return false;
- }
- }
- }
- return true;
-}
-
// Treat every line as a JSON value and parse it.
bool Shell::LoadJSON(Isolate* isolate, const char* file_name) {
HandleScope handle_scope(isolate);
@@ -1572,10 +1510,6 @@ PerIsolateData::PerIsolateData(Isolate* isolate)
async_hooks_wrapper_ = new AsyncHooks(isolate);
}
ignore_unhandled_promises_ = false;
- // TODO(v8:11525): Use methods on global Snapshot objects with
- // signature checks.
- HandleScope scope(isolate);
- Shell::CreateSnapshotTemplate(isolate);
}
PerIsolateData::~PerIsolateData() {
@@ -1636,6 +1570,7 @@ void PerIsolateData::AddUnhandledPromise(Local<Promise> promise,
int PerIsolateData::HandleUnhandledPromiseRejections() {
// Avoid recursive calls to HandleUnhandledPromiseRejections.
if (ignore_unhandled_promises_) return 0;
+ if (isolate_->IsExecutionTerminating()) return 0;
ignore_unhandled_promises_ = true;
v8::HandleScope scope(isolate_);
// Ignore promises that get added during error reporting.
@@ -1671,14 +1606,6 @@ void PerIsolateData::SetTestApiObjectCtor(Local<FunctionTemplate> ctor) {
test_api_object_ctor_.Reset(isolate_, ctor);
}
-Local<FunctionTemplate> PerIsolateData::GetSnapshotObjectCtor() const {
- return snapshot_object_ctor_.Get(isolate_);
-}
-
-void PerIsolateData::SetSnapshotObjectCtor(Local<FunctionTemplate> ctor) {
- snapshot_object_ctor_.Reset(isolate_, ctor);
-}
-
Local<FunctionTemplate> PerIsolateData::GetDomNodeCtor() const {
return dom_node_ctor_.Get(isolate_);
}
@@ -1730,14 +1657,14 @@ int PerIsolateData::RealmFind(Local<Context> context) {
int PerIsolateData::RealmIndexOrThrow(
const v8::FunctionCallbackInfo<v8::Value>& args, int arg_offset) {
if (args.Length() < arg_offset || !args[arg_offset]->IsNumber()) {
- args.GetIsolate()->ThrowError("Invalid argument");
+ ThrowError(args.GetIsolate(), "Invalid argument");
return -1;
}
int index = args[arg_offset]
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (index < 0 || index >= realm_count_ || realms_[index].IsEmpty()) {
- args.GetIsolate()->ThrowError("Invalid realm index");
+ ThrowError(args.GetIsolate(), "Invalid realm index");
return -1;
}
return index;
@@ -1763,7 +1690,7 @@ uint64_t Shell::GetTracingTimestampFromPerformanceTimestamp(
base::TimeDelta::FromMillisecondsD(performance_timestamp);
// See TracingController::CurrentTimestampMicroseconds().
int64_t internal_value = (delta + kInitialTicks).ToInternalValue();
- DCHECK(internal_value >= 0);
+ DCHECK_GE(internal_value, 0);
return internal_value;
}
@@ -1779,7 +1706,7 @@ void Shell::PerformanceMark(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
if (args.Length() < 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowError("Invalid 'name' argument");
+ ThrowError(args.GetIsolate(), "Invalid 'name' argument");
return;
}
Local<String> name = args[0].As<String>();
@@ -1818,7 +1745,7 @@ void Shell::PerformanceMeasure(
Local<Context> context = isolate->GetCurrentContext();
if (args.Length() < 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowError("Invalid 'name' argument");
+ ThrowError(args.GetIsolate(), "Invalid 'name' argument");
return;
}
v8::Local<String> name = args[0].As<String>();
@@ -1827,8 +1754,8 @@ void Shell::PerformanceMeasure(
if (args.Length() >= 2) {
Local<Value> start_mark = args[1].As<Value>();
if (!start_mark->IsObject()) {
- args.GetIsolate()->ThrowError(
- "Invalid 'startMark' argument: Not an Object");
+ ThrowError(args.GetIsolate(),
+ "Invalid 'startMark' argument: Not an Object");
return;
}
Local<Value> start_time_field;
@@ -1838,14 +1765,14 @@ void Shell::PerformanceMeasure(
return;
}
if (!start_time_field->IsNumber()) {
- args.GetIsolate()->ThrowError(
- "Invalid 'startMark' argument: No numeric 'startTime' field");
+ ThrowError(args.GetIsolate(),
+ "Invalid 'startMark' argument: No numeric 'startTime' field");
return;
}
start_timestamp = start_time_field.As<Number>()->Value();
}
if (args.Length() > 2) {
- args.GetIsolate()->ThrowError("Too many arguments");
+ ThrowError(args.GetIsolate(), "Too many arguments");
return;
}
@@ -1931,7 +1858,7 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
- args.GetIsolate()->ThrowError("Invalid argument");
+ ThrowError(args.GetIsolate(), "Invalid argument");
return;
}
Local<Object> object =
@@ -1943,7 +1870,7 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> creation_context;
if (!object->GetCreationContext().ToLocal(&creation_context)) {
- args.GetIsolate()->ThrowError("object doesn't have creation context");
+ ThrowError(args.GetIsolate(), "object doesn't have creation context");
return;
}
int index = data->RealmFind(creation_context);
@@ -2017,7 +1944,6 @@ void Shell::DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
// ContextDisposedNotification expects the disposed context to be entered.
v8::Context::Scope scope(context);
isolate->ContextDisposedNotification();
- isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
}
// Realm.create() creates a new realm with a distinct security token
@@ -2046,7 +1972,7 @@ void Shell::RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (index == -1) return;
if (index == 0 || index == data->realm_current_ ||
index == data->realm_switch_) {
- args.GetIsolate()->ThrowError("Invalid realm index");
+ ThrowError(args.GetIsolate(), "Invalid realm index");
return;
}
@@ -2075,7 +2001,7 @@ void Shell::RealmDetachGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (index == -1) return;
if (index == 0 || index == data->realm_current_ ||
index == data->realm_switch_) {
- args.GetIsolate()->ThrowError("Invalid realm index");
+ ThrowError(args.GetIsolate(), "Invalid realm index");
return;
}
@@ -2092,7 +2018,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (index == -1) return;
if (index == 0 || index == data->realm_current_ ||
index == data->realm_switch_) {
- args.GetIsolate()->ThrowError("Invalid realm index");
+ ThrowError(args.GetIsolate(), "Invalid realm index");
return;
}
DisposeRealm(args, index);
@@ -2114,13 +2040,13 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
int index = data->RealmIndexOrThrow(args, 0);
if (index == -1) return;
if (args.Length() < 2) {
- isolate->ThrowError("Invalid argument");
+ ThrowError(isolate, "Invalid argument");
return;
}
Local<String> source;
if (!ReadSource(args, 1, CodeType::kString).ToLocal(&source)) {
- isolate->ThrowError("Invalid argument");
+ ThrowError(isolate, "Invalid argument");
return;
}
ScriptOrigin origin =
@@ -2161,100 +2087,6 @@ void Shell::RealmSharedSet(Local<String> property, Local<Value> value,
data->realm_shared_.Reset(isolate, value);
}
-// Realm.takeWebSnapshot(index, exports) takes a snapshot of the list of exports
-// in the realm with the specified index and returns the result.
-void Shell::RealmTakeWebSnapshot(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = args.GetIsolate();
- if (args.Length() < 2 || !args[1]->IsArray()) {
- isolate->ThrowError("Invalid argument");
- return;
- }
- PerIsolateData* data = PerIsolateData::Get(isolate);
- int index = data->RealmIndexOrThrow(args, 0);
- if (index == -1) return;
- // Create a Local<PrimitiveArray> from the exports array.
- Local<Context> current_context = isolate->GetCurrentContext();
- Local<Array> exports_array = args[1].As<Array>();
- int length = exports_array->Length();
- Local<PrimitiveArray> exports = PrimitiveArray::New(isolate, length);
- for (int i = 0; i < length; ++i) {
- Local<Value> value;
- Local<String> str;
- if (!exports_array->Get(current_context, i).ToLocal(&value) ||
- !value->ToString(current_context).ToLocal(&str) || str.IsEmpty()) {
- isolate->ThrowError("Invalid argument");
- return;
- }
- exports->Set(isolate, i, str);
- }
- // Take the snapshot in the specified Realm.
- auto snapshot_data_shared = std::make_shared<i::WebSnapshotData>();
- {
- TryCatch try_catch(isolate);
- try_catch.SetVerbose(true);
- PerIsolateData::ExplicitRealmScope realm_scope(data, index);
- i::WebSnapshotSerializer serializer(isolate);
- if (!serializer.TakeSnapshot(realm_scope.context(), exports,
- *snapshot_data_shared)) {
- CHECK(try_catch.HasCaught());
- args.GetReturnValue().Set(Undefined(isolate));
- return;
- }
- }
- // Create a snapshot object and store the WebSnapshotData as an embedder
- // field. TODO(v8:11525): Use methods on global Snapshot objects with
- // signature checks.
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::Object> snapshot_data_managed =
- i::Managed<i::WebSnapshotData>::FromSharedPtr(
- i_isolate, snapshot_data_shared->buffer_size, snapshot_data_shared);
- v8::Local<v8::Value> shapshot_data = Utils::ToLocal(snapshot_data_managed);
- Local<ObjectTemplate> snapshot_template =
- data->GetSnapshotObjectCtor()->InstanceTemplate();
- Local<Object> snapshot_instance =
- snapshot_template->NewInstance(isolate->GetCurrentContext())
- .ToLocalChecked();
- snapshot_instance->SetInternalField(0, shapshot_data);
- args.GetReturnValue().Set(snapshot_instance);
-}
-
-// Realm.useWebSnapshot(index, snapshot) deserializes the snapshot in the realm
-// with the specified index.
-void Shell::RealmUseWebSnapshot(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = args.GetIsolate();
- if (args.Length() < 2 || !args[1]->IsObject()) {
- isolate->ThrowError("Invalid argument");
- return;
- }
- PerIsolateData* data = PerIsolateData::Get(isolate);
- int index = data->RealmIndexOrThrow(args, 0);
- if (index == -1) return;
- // Restore the snapshot data from the snapshot object.
- Local<Object> snapshot_instance = args[1].As<Object>();
- Local<FunctionTemplate> snapshot_template = data->GetSnapshotObjectCtor();
- if (!snapshot_template->HasInstance(snapshot_instance)) {
- isolate->ThrowError("Invalid argument");
- return;
- }
- v8::Local<v8::Value> snapshot_data = snapshot_instance->GetInternalField(0);
- i::Handle<i::Object> snapshot_data_handle = Utils::OpenHandle(*snapshot_data);
- auto snapshot_data_managed =
- i::Handle<i::Managed<i::WebSnapshotData>>::cast(snapshot_data_handle);
- std::shared_ptr<i::WebSnapshotData> snapshot_data_shared =
- snapshot_data_managed->get();
- // Deserialize the snapshot in the specified Realm.
- {
- PerIsolateData::ExplicitRealmScope realm_scope(data, index);
- i::WebSnapshotDeserializer deserializer(isolate,
- snapshot_data_shared->buffer,
- snapshot_data_shared->buffer_size);
- bool success = deserializer.Deserialize();
- args.GetReturnValue().Set(success);
- }
-}
-
void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -2262,18 +2094,18 @@ void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
std::string file_name = i_isolate->v8_file_logger()->file_name();
if (!i::LogFile::IsLoggingToTemporaryFile(file_name)) {
- isolate->ThrowError("Only capturing from temporary files is supported.");
+ ThrowError(isolate, "Only capturing from temporary files is supported.");
return;
}
if (!i_isolate->v8_file_logger()->is_logging()) {
- isolate->ThrowError("Logging not enabled.");
+ ThrowError(isolate, "Logging not enabled.");
return;
}
std::string raw_log;
FILE* log_file = i_isolate->v8_file_logger()->TearDownAndGetLogFile();
if (!log_file) {
- isolate->ThrowError("Log file does not exist.");
+ ThrowError(isolate, "Log file does not exist.");
return;
}
@@ -2282,7 +2114,7 @@ void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
base::Fclose(log_file);
if (!exists) {
- isolate->ThrowError("Unable to read log file.");
+ ThrowError(isolate, "Unable to read log file.");
return;
}
Local<String> result =
@@ -2298,14 +2130,14 @@ void Shell::TestVerifySourcePositions(
Isolate* isolate = args.GetIsolate();
// Check if the argument is a valid function.
if (args.Length() != 1) {
- isolate->ThrowError("Expected function as single argument.");
+ ThrowError(isolate, "Expected function as single argument.");
return;
}
auto arg_handle = Utils::OpenHandle(*args[0]);
if (!arg_handle->IsHeapObject() ||
!i::Handle<i::HeapObject>::cast(arg_handle)
->IsJSFunctionOrBoundFunctionOrWrappedFunction()) {
- isolate->ThrowError("Expected function as single argument.");
+ ThrowError(isolate, "Expected function as single argument.");
return;
}
@@ -2321,7 +2153,7 @@ void Shell::TestVerifySourcePositions(
auto bound_target = bound_function->bound_target_function();
if (!bound_target.IsJSFunctionOrBoundFunctionOrWrappedFunction()) {
internal::AllowGarbageCollection allow_gc;
- isolate->ThrowError("Expected function as bound target.");
+ ThrowError(isolate, "Expected function as bound target.");
return;
}
callable = handle(
@@ -2331,7 +2163,7 @@ void Shell::TestVerifySourcePositions(
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(callable);
if (!function->shared().HasBytecodeArray()) {
- isolate->ThrowError("Function has no BytecodeArray attached.");
+ ThrowError(isolate, "Function has no BytecodeArray attached.");
return;
}
i::Handle<i::BytecodeArray> bytecodes =
@@ -2341,10 +2173,10 @@ void Shell::TestVerifySourcePositions(
i::Handle<i::ByteArray> bytecode_offsets;
std::unique_ptr<i::baseline::BytecodeOffsetIterator> offset_iterator;
if (has_baseline) {
- bytecode_offsets =
- handle(i::ByteArray::cast(
- function->shared().GetCode().bytecode_offset_table()),
- i_isolate);
+ bytecode_offsets = handle(
+ i::ByteArray::cast(
+ function->shared().GetCode(i_isolate).bytecode_offset_table()),
+ i_isolate);
offset_iterator = std::make_unique<i::baseline::BytecodeOffsetIterator>(
bytecode_offsets, bytecodes);
// A freshly initiated BytecodeOffsetIterator points to the prologue.
@@ -2357,7 +2189,7 @@ void Shell::TestVerifySourcePositions(
if (has_baseline) {
if (offset_iterator->current_bytecode_offset() !=
bytecode_iterator.current_offset()) {
- isolate->ThrowError("Baseline bytecode offset mismatch.");
+ ThrowError(isolate, "Baseline bytecode offset mismatch.");
return;
}
// Check that we map every address to this bytecode correctly.
@@ -2369,8 +2201,8 @@ void Shell::TestVerifySourcePositions(
pc_lookup.AdvanceToPCOffset(pc);
if (pc_lookup.current_bytecode_offset() !=
bytecode_iterator.current_offset()) {
- isolate->ThrowError(
- "Baseline bytecode offset mismatch for PC lookup.");
+ ThrowError(isolate,
+ "Baseline bytecode offset mismatch for PC lookup.");
return;
}
}
@@ -2378,14 +2210,14 @@ void Shell::TestVerifySourcePositions(
bytecode_iterator.Advance();
if (has_baseline && !bytecode_iterator.done()) {
if (offset_iterator->done()) {
- isolate->ThrowError("Missing bytecode(s) in baseline offset mapping.");
+ ThrowError(isolate, "Missing bytecode(s) in baseline offset mapping.");
return;
}
offset_iterator->Advance();
}
}
if (has_baseline && !offset_iterator->done()) {
- isolate->ThrowError("Excess offsets in baseline offset mapping.");
+ ThrowError(isolate, "Excess offsets in baseline offset mapping.");
return;
}
}
@@ -2442,9 +2274,9 @@ void Shell::SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args) {
// with certain promise optimizations. We might not get all callbacks for
// previously scheduled Promises or optimized code-paths that skip Promise
// creation.
- isolate->ThrowError(
- "d8.promise.setHooks is disabled with "
- "--correctness-fuzzer-suppressions");
+ ThrowError(isolate,
+ "d8.promise.setHooks is disabled with "
+ "--correctness-fuzzer-suppressions");
return;
}
#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
@@ -2459,9 +2291,9 @@ void Shell::SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8::Undefined(isolate));
#else // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
- isolate->ThrowError(
- "d8.promise.setHooks is disabled due to missing build flag "
- "v8_enabale_javascript_in_promise_hooks");
+ ThrowError(isolate,
+ "d8.promise.setHooks is disabled due to missing build flag "
+ "v8_enabale_javascript_in_promise_hooks");
#endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
}
@@ -2494,7 +2326,7 @@ void Shell::SerializerDeserialize(
Local<Context> context = isolate->GetCurrentContext();
if (!args[0]->IsArrayBuffer()) {
- isolate->ThrowError("Can only deserialize from an ArrayBuffer");
+ ThrowError(isolate, "Can only deserialize from an ArrayBuffer");
return;
}
std::shared_ptr<BackingStore> backing_store =
@@ -2509,10 +2341,75 @@ void Shell::SerializerDeserialize(
args.GetReturnValue().Set(result);
}
-void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
- for (int i = 0; i < args.Length(); i++) {
+void Shell::ProfilerSetOnProfileEndListener(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ if (!args[0]->IsFunction()) {
+ ThrowError(isolate, "The OnProfileEnd listener has to be a function");
+ return;
+ }
+ base::MutexGuard lock_guard(&profiler_end_callback_lock_);
+ profiler_end_callback_[isolate] =
+ std::make_pair(Global<Function>(isolate, args[0].As<Function>()),
+ Global<Context>(isolate, isolate->GetCurrentContext()));
+}
+
+bool Shell::HasOnProfileEndListener(Isolate* isolate) {
+ base::MutexGuard lock_guard(&profiler_end_callback_lock_);
+ return profiler_end_callback_.find(isolate) != profiler_end_callback_.end();
+}
+
+void Shell::ResetOnProfileEndListener(Isolate* isolate) {
+ // If the inspector is enabled, then the installed console is not the
+ // D8Console.
+ if (options.enable_inspector) return;
+ {
+ base::MutexGuard lock_guard(&profiler_end_callback_lock_);
+ profiler_end_callback_.erase(isolate);
+ }
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ D8Console* console =
+ reinterpret_cast<D8Console*>(i_isolate->console_delegate());
+ if (console) {
+ console->DisposeProfiler();
+ }
+}
+
+void Shell::ProfilerTriggerSample(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ D8Console* console =
+ reinterpret_cast<D8Console*>(i_isolate->console_delegate());
+ if (console && console->profiler()) {
+ console->profiler()->CollectSample(isolate);
+ }
+}
+
+void Shell::TriggerOnProfileEndListener(Isolate* isolate, std::string profile) {
+ CHECK(HasOnProfileEndListener(isolate));
+ Local<Function> callback;
+ Local<Context> context;
+ Local<Value> argv[1] = {
+ String::NewFromUtf8(isolate, profile.c_str()).ToLocalChecked()};
+ {
+ base::MutexGuard lock_guard(&profiler_end_callback_lock_);
+ auto& callback_pair = profiler_end_callback_[isolate];
+ callback = callback_pair.first.Get(isolate);
+ context = callback_pair.second.Get(isolate);
+ }
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+ USE(callback->Call(context, Undefined(isolate), 1, argv));
+}
+
+void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args,
+ int first_arg_index = 0) {
+ for (int i = first_arg_index; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
- if (i != 0) {
+ if (i != first_arg_index) {
fprintf(file, " ");
}
@@ -2558,10 +2455,59 @@ void Shell::WriteStdout(const v8::FunctionCallbackInfo<v8::Value>& args) {
WriteToFile(stdout, args);
}
+// There are two overloads of writeFile().
+//
+// The first parameter is always the filename.
+//
+// If there are exactly 2 arguments, and the second argument is an ArrayBuffer
+// or an ArrayBufferView, write the binary contents into the file.
+//
+// Otherwise, convert arguments to UTF-8 strings, and write them to the file,
+// separated by space.
+void Shell::WriteFile(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ String::Utf8Value file_name(args.GetIsolate(), args[0]);
+ if (*file_name == nullptr) {
+ ThrowError(args.GetIsolate(), "Error converting filename to string");
+ return;
+ }
+ FILE* file;
+ if (args.Length() == 2 &&
+ (args[1]->IsArrayBuffer() || args[1]->IsArrayBufferView())) {
+ file = base::Fopen(*file_name, "wb");
+ if (file == nullptr) {
+ ThrowError(args.GetIsolate(), "Error opening file");
+ return;
+ }
+
+ void* data;
+ size_t length;
+ if (args[1]->IsArrayBuffer()) {
+ Local<v8::ArrayBuffer> buffer = Local<v8::ArrayBuffer>::Cast(args[1]);
+ length = buffer->ByteLength();
+ data = buffer->Data();
+ } else {
+ Local<v8::ArrayBufferView> buffer_view =
+ Local<v8::ArrayBufferView>::Cast(args[1]);
+ length = buffer_view->ByteLength();
+ data = static_cast<uint8_t*>(buffer_view->Buffer()->Data()) +
+ buffer_view->ByteOffset();
+ }
+ fwrite(data, 1, length, file);
+ } else {
+ file = base::Fopen(*file_name, "w");
+ if (file == nullptr) {
+ ThrowError(args.GetIsolate(), "Error opening file");
+ return;
+ }
+ WriteToFile(file, args, 1);
+ }
+ base::Fclose(file);
+}
+
void Shell::ReadFile(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file_name(args.GetIsolate(), args[0]);
if (*file_name == nullptr) {
- args.GetIsolate()->ThrowError("Error converting filename to string");
+ ThrowError(args.GetIsolate(), "Error converting filename to string");
return;
}
if (args.Length() == 2) {
@@ -2624,7 +2570,8 @@ void Shell::ExecuteFile(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (*file_name == nullptr) {
std::ostringstream oss;
oss << "Cannot convert file[" << i << "] name to string.";
- isolate->ThrowError(
+ ThrowError(
+ isolate,
String::NewFromUtf8(isolate, oss.str().c_str()).ToLocalChecked());
return;
}
@@ -2638,7 +2585,8 @@ void Shell::ExecuteFile(const v8::FunctionCallbackInfo<v8::Value>& args) {
kNoProcessMessageQueue)) {
std::ostringstream oss;
oss << "Error executing file: \"" << *file_name << '"';
- isolate->ThrowError(
+ ThrowError(
+ isolate,
String::NewFromUtf8(isolate, oss.str().c_str()).ToLocalChecked());
return;
}
@@ -2701,7 +2649,7 @@ bool Shell::FunctionAndArgumentsToString(Local<Function> function,
function->FunctionProtoToString(context);
Local<String> function_string;
if (!maybe_function_string.ToLocal(&function_string)) {
- isolate->ThrowError("Failed to convert function to string");
+ ThrowError(isolate, "Failed to convert function to string");
return false;
}
*source = String::NewFromUtf8Literal(isolate, "(");
@@ -2710,7 +2658,7 @@ bool Shell::FunctionAndArgumentsToString(Local<Function> function,
*source = String::Concat(isolate, *source, middle);
if (!arguments.IsEmpty() && !arguments->IsUndefined()) {
if (!arguments->IsArray()) {
- isolate->ThrowError("'arguments' must be an array");
+ ThrowError(isolate, "'arguments' must be an array");
return false;
}
Local<String> comma = String::NewFromUtf8Literal(isolate, ",");
@@ -2722,12 +2670,12 @@ bool Shell::FunctionAndArgumentsToString(Local<Function> function,
MaybeLocal<Value> maybe_argument = array->Get(context, i);
Local<Value> argument;
if (!maybe_argument.ToLocal(&argument)) {
- isolate->ThrowError("Failed to get argument");
+ ThrowError(isolate, "Failed to get argument");
return false;
}
Local<String> argument_string;
if (!JSON::Stringify(context, argument).ToLocal(&argument_string)) {
- isolate->ThrowError("Failed to convert argument to string");
+ ThrowError(isolate, "Failed to convert argument to string");
return false;
}
*source = String::Concat(isolate, *source, argument_string);
@@ -2791,18 +2739,18 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
if (args.Length() < 1 || (!args[0]->IsString() && !args[0]->IsFunction())) {
- isolate->ThrowError("1st argument must be a string or a function");
+ ThrowError(isolate, "1st argument must be a string or a function");
return;
}
Local<String> source;
if (!ReadSource(args, 0, CodeType::kFileName).ToLocal(&source)) {
- isolate->ThrowError("Invalid argument");
+ ThrowError(isolate, "Invalid argument");
return;
}
if (!args.IsConstructCall()) {
- isolate->ThrowError("Worker must be constructed with new");
+ ThrowError(isolate, "Worker must be constructed with new");
return;
}
@@ -2821,7 +2769,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value script(isolate, source);
if (!*script) {
- isolate->ThrowError("Can't get worker script");
+ ThrowError(isolate, "Can't get worker script");
return;
}
@@ -2835,7 +2783,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
i_isolate, kWorkerSizeEstimate, worker);
args.Holder()->SetInternalField(0, Utils::ToLocal(managed));
if (!Worker::StartWorkerThread(isolate, std::move(worker))) {
- isolate->ThrowError("Can't start thread");
+ ThrowError(isolate, "Can't start thread");
return;
}
}
@@ -2846,7 +2794,7 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(isolate);
if (args.Length() < 1) {
- isolate->ThrowError("Invalid argument");
+ ThrowError(isolate, "Invalid argument");
return;
}
@@ -2913,6 +2861,7 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
->Int32Value(args->GetIsolate()->GetCurrentContext())
.FromMaybe(0);
Isolate* isolate = args->GetIsolate();
+ ResetOnProfileEndListener(isolate);
isolate->Exit();
// As we exit the process anyway, we do not dispose the platform and other
@@ -2923,10 +2872,30 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
i_isolate->thread_manager()->Unlock();
}
+ // When disposing the shared space isolate, the workers (client isolates) need
+ // to be terminated first.
+ if (i_isolate->is_shared_space_isolate()) {
+ i::ParkedScope parked(i_isolate->main_thread_local_isolate());
+ WaitForRunningWorkers(parked);
+ }
+
OnExit(isolate, false);
base::OS::ExitProcess(exit_code);
}
+void Shell::Terminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // Triggering termination from JS can cause some non-determinism thus we
+ // skip it for correctness fuzzing.
+ // Termination also currently breaks Fuzzilli's REPRL mechanism as the
+ // scheduled termination will prevent the next testcase sent by Fuzzilli from
+ // being processed. This will in turn desynchronize the communication
+ // between d8 and Fuzzilli, leading to a crash.
+ if (!i::v8_flags.correctness_fuzzer_suppressions && !fuzzilli_reprl) {
+ auto v8_isolate = args.GetIsolate();
+ if (!v8_isolate->IsExecutionTerminating()) v8_isolate->TerminateExecution();
+ }
+}
+
void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
base::CallOnce(&quit_once_, &QuitOnce,
const_cast<v8::FunctionCallbackInfo<v8::Value>*>(&args));
@@ -2961,7 +2930,6 @@ void Shell::Fuzzilli(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (*operation == nullptr) {
return;
}
-
if (strcmp(*operation, "FUZZILLI_CRASH") == 0) {
auto arg = args[1]
->Int32Value(args.GetIsolate()->GetCurrentContext())
@@ -3273,6 +3241,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
FunctionTemplate::New(isolate, PrintErr));
global_template->Set(isolate, "write",
FunctionTemplate::New(isolate, WriteStdout));
+ if (!i::v8_flags.fuzzing) {
+ global_template->Set(isolate, "writeFile",
+ FunctionTemplate::New(isolate, WriteFile));
+ }
global_template->Set(isolate, "read",
FunctionTemplate::New(isolate, ReadFile));
global_template->Set(isolate, "readbuffer",
@@ -3436,21 +3408,9 @@ Local<ObjectTemplate> Shell::CreateRealmTemplate(Isolate* isolate) {
FunctionTemplate::New(isolate, RealmEval));
realm_template->SetAccessor(String::NewFromUtf8Literal(isolate, "shared"),
RealmSharedGet, RealmSharedSet);
- if (options.d8_web_snapshot_api) {
- realm_template->Set(isolate, "takeWebSnapshot",
- FunctionTemplate::New(isolate, RealmTakeWebSnapshot));
- realm_template->Set(isolate, "useWebSnapshot",
- FunctionTemplate::New(isolate, RealmUseWebSnapshot));
- }
return realm_template;
}
-Local<FunctionTemplate> Shell::CreateSnapshotTemplate(Isolate* isolate) {
- Local<FunctionTemplate> snapshot_template = FunctionTemplate::New(isolate);
- snapshot_template->InstanceTemplate()->SetInternalFieldCount(1);
- PerIsolateData::Get(isolate)->SetSnapshotObjectCtor(snapshot_template);
- return snapshot_template;
-}
Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
Local<ObjectTemplate> d8_template = ObjectTemplate::New(isolate);
{
@@ -3535,6 +3495,21 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
Local<Signature>(), 1));
d8_template->Set(isolate, "serializer", serializer_template);
}
+ {
+ Local<ObjectTemplate> profiler_template = ObjectTemplate::New(isolate);
+ profiler_template->Set(
+ isolate, "setOnProfileEndListener",
+ FunctionTemplate::New(isolate, ProfilerSetOnProfileEndListener));
+ profiler_template->Set(
+ isolate, "triggerSample",
+ FunctionTemplate::New(isolate, ProfilerTriggerSample));
+ d8_template->Set(isolate, "profiler", profiler_template);
+ }
+ d8_template->Set(isolate, "terminate",
+ FunctionTemplate::New(isolate, Terminate));
+ if (!options.omit_quit) {
+ d8_template->Set(isolate, "quit", FunctionTemplate::New(isolate, Quit));
+ }
return d8_template;
}
@@ -3645,25 +3620,6 @@ void Shell::Initialize(Isolate* isolate, D8Console* console,
[](Local<Object> host, v8::AccessType type, Local<Value> data) {});
}
-#ifdef V8_FUZZILLI
- // Let the parent process (Fuzzilli) know we are ready.
- if (options.fuzzilli_enable_builtins_coverage) {
- cov_init_builtins_edges(static_cast<uint32_t>(
- i::BasicBlockProfiler::Get()
- ->GetCoverageBitmap(reinterpret_cast<i::Isolate*>(isolate))
- .size()));
- }
- char helo[] = "HELO";
- if (write(REPRL_CWFD, helo, 4) != 4 || read(REPRL_CRFD, helo, 4) != 4) {
- fuzzilli_reprl = false;
- }
-
- if (memcmp(helo, "HELO", 4) != 0) {
- fprintf(stderr, "Invalid response from parent\n");
- _exit(-1);
- }
-#endif // V8_FUZZILLI
-
debug::SetConsoleDelegate(isolate, console);
}
@@ -3672,7 +3628,7 @@ Local<String> Shell::WasmLoadSourceMapCallback(Isolate* isolate,
return Shell::ReadFile(isolate, path, false).ToLocalChecked();
}
-Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
+MaybeLocal<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
// This needs to be a critical section since this is not thread-safe
i::ParkedMutexGuard lock_guard(
reinterpret_cast<i::Isolate*>(isolate)->main_thread_local_isolate(),
@@ -3681,8 +3637,10 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, nullptr, global_template);
- DCHECK_IMPLIES(context.IsEmpty(), isolate->IsExecutionTerminating());
- if (context.IsEmpty()) return {};
+ if (context.IsEmpty()) {
+ DCHECK(isolate->IsExecutionTerminating());
+ return {};
+ }
if (i::v8_flags.perf_prof_annotate_wasm ||
i::v8_flags.vtune_prof_annotate_wasm) {
isolate->SetWasmLoadSourceMapCallback(Shell::WasmLoadSourceMapCallback);
@@ -4033,13 +3991,13 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value filename(isolate, args[0]);
int length;
if (*filename == nullptr) {
- isolate->ThrowError("Error loading file");
+ ThrowError(isolate, "Error loading file");
return;
}
uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == nullptr) {
- isolate->ThrowError("Error reading file");
+ ThrowError(isolate, "Error reading file");
return;
}
Local<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, length);
@@ -4059,10 +4017,10 @@ MaybeLocal<String> Shell::ReadFile(Isolate* isolate, const char* name,
if (should_throw) {
std::ostringstream oss;
oss << "Error loading file: " << name;
- isolate->ThrowError(
- v8::String::NewFromUtf8(
- isolate, oss.str().substr(0, String::kMaxLength).c_str())
- .ToLocalChecked());
+ ThrowError(isolate,
+ v8::String::NewFromUtf8(
+ isolate, oss.str().substr(0, String::kMaxLength).c_str())
+ .ToLocalChecked());
}
return MaybeLocal<String>();
}
@@ -4175,7 +4133,8 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
session_ =
inspector_->connect(1, channel_.get(), v8_inspector::StringView(),
- v8_inspector::V8Inspector::kFullyTrusted);
+ v8_inspector::V8Inspector::kFullyTrusted,
+ v8_inspector::V8Inspector::kNotWaitingForDebugger);
context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
inspector_->contextCreated(v8_inspector::V8ContextInfo(
context, kContextGroupId, v8_inspector::StringView()));
@@ -4339,15 +4298,6 @@ bool SourceGroup::Execute(Isolate* isolate) {
break;
}
continue;
- } else if (strcmp(arg, "--web-snapshot") == 0 && i + 1 < end_offset_) {
- // Treat the next file as a web snapshot.
- arg = argv_[++i];
- Shell::set_script_executed();
- if (!Shell::ExecuteWebSnapshot(isolate, arg)) {
- success = false;
- break;
- }
- continue;
} else if (strcmp(arg, "--json") == 0 && i + 1 < end_offset_) {
// Treat the next file as a JSON file.
arg = argv_[++i];
@@ -4380,13 +4330,6 @@ bool SourceGroup::Execute(Isolate* isolate) {
break;
}
}
- if (!success) {
- return false;
- }
- if (Shell::options.web_snapshot_config ||
- Shell::options.web_snapshot_output) {
- success = Shell::TakeWebSnapshot(isolate);
- }
return success;
}
@@ -4398,32 +4341,42 @@ void SourceGroup::ExecuteInThread() {
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
Isolate* isolate = Isolate::New(create_params);
Shell::SetWaitUntilDone(isolate, false);
- D8Console console(isolate);
- Shell::Initialize(isolate, &console, false);
- for (int i = 0; i < Shell::options.stress_runs; ++i) {
- {
- next_semaphore_.ParkedWait(
- reinterpret_cast<i::Isolate*>(isolate)->main_thread_local_isolate());
- }
- {
- Isolate::Scope iscope(isolate);
- PerIsolateData data(isolate);
+ {
+ Isolate::Scope isolate_scope(isolate);
+ D8Console console(isolate);
+ Shell::Initialize(isolate, &console, false);
+ PerIsolateData data(isolate);
+
+ for (int i = 0; i < Shell::options.stress_runs; ++i) {
+ {
+ next_semaphore_.ParkedWait(reinterpret_cast<i::Isolate*>(isolate)
+ ->main_thread_local_isolate());
+ }
{
- HandleScope scope(isolate);
- Local<Context> context = Shell::CreateEvaluationContext(isolate);
{
- Context::Scope cscope(context);
- InspectorClient inspector_client(context,
- Shell::options.enable_inspector);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Execute(isolate);
- Shell::CompleteMessageLoop(isolate);
+ HandleScope scope(isolate);
+ Local<Context> context;
+ if (!Shell::CreateEvaluationContext(isolate).ToLocal(&context)) {
+ DCHECK(isolate->IsExecutionTerminating());
+ break;
+ }
+ {
+ Context::Scope context_scope(context);
+ InspectorClient inspector_client(context,
+ Shell::options.enable_inspector);
+ PerIsolateData::RealmScope realm_scope(
+ PerIsolateData::Get(isolate));
+ Execute(isolate);
+ Shell::CompleteMessageLoop(isolate);
+ }
}
+ Shell::CollectGarbage(isolate);
}
- Shell::CollectGarbage(isolate);
+ done_semaphore_.Signal();
}
- done_semaphore_.Signal();
+
+ Shell::ResetOnProfileEndListener(isolate);
}
isolate->Dispose();
@@ -4597,7 +4550,7 @@ void Worker::ProcessMessage(std::unique_ptr<SerializationData> data) {
DCHECK_NOT_NULL(isolate_);
HandleScope scope(isolate_);
Local<Context> context = context_.Get(isolate_);
- Context::Scope cscope(context);
+ Context::Scope context_scope(context);
Local<Object> global = context->Global();
// Get the message handler.
@@ -4612,6 +4565,7 @@ void Worker::ProcessMessage(std::unique_ptr<SerializationData> data) {
try_catch.SetVerbose(true);
Local<Value> value;
if (Shell::DeserializeValue(isolate_, std::move(data)).ToLocal(&value)) {
+ DCHECK(!isolate_->IsExecutionTerminating());
Local<Value> argv[] = {value};
MaybeLocal<Value> result = onmessage_fun->Call(context, global, 1, argv);
USE(result);
@@ -4646,72 +4600,81 @@ void Worker::ExecuteInThread() {
// The Worker is now ready to receive messages.
started_semaphore_.Signal();
- D8Console console(isolate_);
- Shell::Initialize(isolate_, &console, false);
- // This is not really a loop, but the loop allows us to break out of this
- // block easily.
- for (bool execute = true; execute; execute = false) {
- Isolate::Scope iscope(isolate_);
- {
- HandleScope scope(isolate_);
- PerIsolateData data(isolate_);
- Local<Context> context = Shell::CreateEvaluationContext(isolate_);
- if (context.IsEmpty()) break;
- context_.Reset(isolate_, context);
+ {
+ Isolate::Scope isolate_scope(isolate_);
+ D8Console console(isolate_);
+ Shell::Initialize(isolate_, &console, false);
+ PerIsolateData data(isolate_);
+ // This is not really a loop, but the loop allows us to break out of this
+ // block easily.
+ for (bool execute = true; execute; execute = false) {
{
- Context::Scope cscope(context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
-
- Local<Object> global = context->Global();
- Local<Value> this_value = External::New(isolate_, this);
- Local<FunctionTemplate> postmessage_fun_template =
- FunctionTemplate::New(isolate_, PostMessageOut, this_value);
-
- Local<Function> postmessage_fun;
- if (postmessage_fun_template->GetFunction(context).ToLocal(
- &postmessage_fun)) {
- global
- ->Set(context,
+ HandleScope scope(isolate_);
+ Local<Context> context;
+ if (!Shell::CreateEvaluationContext(isolate_).ToLocal(&context)) {
+ DCHECK(isolate_->IsExecutionTerminating());
+ break;
+ }
+ context_.Reset(isolate_, context);
+ {
+ Context::Scope context_scope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
+
+ Local<Object> global = context->Global();
+ Local<Value> this_value = External::New(isolate_, this);
+ Local<FunctionTemplate> postmessage_fun_template =
+ FunctionTemplate::New(isolate_, PostMessageOut, this_value);
+
+ Local<Function> postmessage_fun;
+ if (postmessage_fun_template->GetFunction(context).ToLocal(
+ &postmessage_fun)) {
+ global
+ ->Set(
+ context,
v8::String::NewFromUtf8Literal(
isolate_, "postMessage", NewStringType::kInternalized),
postmessage_fun)
- .FromJust();
- }
+ .FromJust();
+ }
- // First run the script
- Local<String> file_name =
- String::NewFromUtf8Literal(isolate_, "unnamed");
- Local<String> source =
- String::NewFromUtf8(isolate_, script_).ToLocalChecked();
- if (Shell::ExecuteString(
- isolate_, source, file_name, Shell::kNoPrintResult,
- Shell::kReportExceptions, Shell::kProcessMessageQueue)) {
- // Check that there's a message handler
- MaybeLocal<Value> maybe_onmessage = global->Get(
- context,
- String::NewFromUtf8Literal(isolate_, "onmessage",
- NewStringType::kInternalized));
- Local<Value> onmessage;
- if (maybe_onmessage.ToLocal(&onmessage) && onmessage->IsFunction()) {
- // Now wait for messages.
- ProcessMessages();
+ // First run the script
+ Local<String> file_name =
+ String::NewFromUtf8Literal(isolate_, "unnamed");
+ Local<String> source =
+ String::NewFromUtf8(isolate_, script_).ToLocalChecked();
+ if (Shell::ExecuteString(
+ isolate_, source, file_name, Shell::kNoPrintResult,
+ Shell::kReportExceptions, Shell::kProcessMessageQueue)) {
+ // Check that there's a message handler
+ MaybeLocal<Value> maybe_onmessage = global->Get(
+ context,
+ String::NewFromUtf8Literal(isolate_, "onmessage",
+ NewStringType::kInternalized));
+ Local<Value> onmessage;
+ if (maybe_onmessage.ToLocal(&onmessage) &&
+ onmessage->IsFunction()) {
+ // Now wait for messages.
+ ProcessMessages();
+ }
}
}
}
+ Shell::CollectGarbage(isolate_);
}
- Shell::CollectGarbage(isolate_);
- }
- {
- base::MutexGuard lock_guard(&worker_mutex_);
- state_.store(State::kTerminated);
- CHECK(!is_running());
- task_runner_.reset();
- task_manager_ = nullptr;
+ {
+ base::MutexGuard lock_guard(&worker_mutex_);
+ state_.store(State::kTerminated);
+ CHECK(!is_running());
+ task_runner_.reset();
+ task_manager_ = nullptr;
+ }
+
+ Shell::ResetOnProfileEndListener(isolate_);
+ context_.Reset();
+ platform::NotifyIsolateShutdown(g_default_platform, isolate_);
}
- context_.Reset();
- platform::NotifyIsolateShutdown(g_default_platform, isolate_);
isolate_->Dispose();
isolate_ = nullptr;
@@ -4725,7 +4688,7 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(isolate);
if (args.Length() < 1) {
- isolate->ThrowError("Invalid argument");
+ ThrowError(isolate, "Invalid argument");
return;
}
@@ -4940,15 +4903,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--stress-deserialize") == 0) {
options.stress_deserialize = true;
argv[i] = nullptr;
- } else if (strncmp(argv[i], "--web-snapshot-config=", 22) == 0) {
- options.web_snapshot_config = argv[i] + 22;
- argv[i] = nullptr;
- } else if (strncmp(argv[i], "--web-snapshot-output=", 22) == 0) {
- options.web_snapshot_output = argv[i] + 22;
- argv[i] = nullptr;
- } else if (strcmp(argv[i], "--experimental-d8-web-snapshot-api") == 0) {
- options.d8_web_snapshot_api = true;
- argv[i] = nullptr;
} else if (strcmp(argv[i], "--compile-only") == 0) {
options.compile_only = true;
argv[i] = nullptr;
@@ -4960,8 +4914,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.max_serializer_memory = atoi(argv[i] + 24) * i::MB;
argv[i] = nullptr;
#ifdef V8_FUZZILLI
- } else if (strcmp(argv[i], "--no-fuzzilli-enable-builtins-coverage") == 0) {
- options.fuzzilli_enable_builtins_coverage = false;
+ } else if (strcmp(argv[i], "--fuzzilli-enable-builtins-coverage") == 0) {
+ options.fuzzilli_enable_builtins_coverage = true;
argv[i] = nullptr;
} else if (strcmp(argv[i], "--fuzzilli-coverage-statistics") == 0) {
options.fuzzilli_coverage_statistics = true;
@@ -5027,12 +4981,11 @@ bool Shell::SetOptions(int argc, char* argv[]) {
const char* usage =
"Synopsis:\n"
" shell [options] [--shell] [<file>...]\n"
- " d8 [options] [-e <string>] [--shell] [[--module|--web-snapshot]"
+ " d8 [options] [-e <string>] [--shell] [--module|]"
" <file>...]\n\n"
" -e execute a string in V8\n"
" --shell run an interactive JavaScript shell\n"
- " --module execute a file as a JavaScript module\n"
- " --web-snapshot execute a file as a web snapshot\n\n";
+ " --module execute a file as a JavaScript module\n";
using HelpOptions = i::FlagList::HelpOptions;
i::v8_flags.abort_on_contradictory_flags = true;
i::FlagList::SetFlagsFromCommandLine(&argc, argv, true,
@@ -5059,9 +5012,7 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current->End(i);
current++;
current->Begin(argv, i + 1);
- } else if (strcmp(str, "--module") == 0 ||
- strcmp(str, "--web-snapshot") == 0 ||
- strcmp(str, "--json") == 0) {
+ } else if (strcmp(str, "--module") == 0 || strcmp(str, "--json") == 0) {
// Pass on to SourceGroup, which understands these options.
} else if (strncmp(str, "--", 2) == 0) {
if (!i::v8_flags.correctness_fuzzer_suppressions) {
@@ -5083,45 +5034,11 @@ bool Shell::SetOptions(int argc, char* argv[]) {
return true;
}
-int Shell::RunMain(Isolate* isolate, bool last_run) {
+int Shell::RunMain(v8::Isolate* isolate, bool last_run) {
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
- bool success = true;
- {
- SetWaitUntilDone(isolate, false);
- if (options.lcov_file) {
- debug::Coverage::SelectMode(isolate, debug::CoverageMode::kBlockCount);
- }
- HandleScope scope(isolate);
- Local<Context> context = CreateEvaluationContext(isolate);
- CreateSnapshotTemplate(isolate);
- bool use_existing_context = last_run && use_interactive_shell();
- if (use_existing_context) {
- // Keep using the same context in the interactive shell.
- evaluation_context_.Reset(isolate, context);
- }
- {
- Context::Scope cscope(context);
- InspectorClient inspector_client(context, options.enable_inspector);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- if (!options.isolate_sources[0].Execute(isolate)) success = false;
- if (!CompleteMessageLoop(isolate)) success = false;
- }
- WriteLcovData(isolate, options.lcov_file);
- if (last_run && i::v8_flags.stress_snapshot) {
- static constexpr bool kClearRecompilableData = true;
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- // TODO(jgruber,v8:10500): Don't deoptimize once we support serialization
- // of optimized code.
- i::Deoptimizer::DeoptimizeAll(i_isolate);
- i::Snapshot::ClearReconstructableDataForSerialization(
- i_isolate, kClearRecompilableData);
- i::Snapshot::SerializeDeserializeAndVerifyForTesting(i_isolate,
- i_context);
- }
- }
+ bool success = RunMainIsolate(isolate, last_run);
CollectGarbage(isolate);
// Park the main thread here to prevent deadlocks in shared GCs when waiting
@@ -5150,12 +5067,64 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
return (success == Shell::options.expected_to_throw ? 1 : 0);
}
+bool Shell::RunMainIsolate(v8::Isolate* isolate, bool last_run) {
+ Shell::SetWaitUntilDone(isolate, false);
+ if (options.lcov_file) {
+ debug::Coverage::SelectMode(isolate, debug::CoverageMode::kBlockCount);
+ }
+ HandleScope scope(isolate);
+ Local<Context> context;
+ if (!CreateEvaluationContext(isolate).ToLocal(&context)) {
+ DCHECK(isolate->IsExecutionTerminating());
+ // We must not exit early here in REPRL mode as that would cause the next
+ // testcase sent by Fuzzilli to be skipped, which will desynchronize the
+ // communication between d8 and Fuzzilli, leading to a crash.
+ DCHECK(!fuzzilli_reprl);
+ return false;
+ }
+ bool use_existing_context = last_run && use_interactive_shell();
+ if (use_existing_context) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_.Reset(isolate, context);
+ }
+ bool success = true;
+ {
+ Context::Scope context_scope(context);
+ InspectorClient inspector_client(context, options.enable_inspector);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ if (!options.isolate_sources[0].Execute(isolate)) success = false;
+ if (!CompleteMessageLoop(isolate)) success = false;
+ }
+ WriteLcovData(isolate, options.lcov_file);
+ if (last_run && i::v8_flags.stress_snapshot) {
+ {
+ // We can't run the serializer while workers are still active. Ideally,
+ // we'd terminate these properly (see WaitForRunningWorkers), but that's
+ // not easily possible due to ordering issues. It's not expected to be a
+ // common case, and it's unrelated to issues that stress_snapshot is
+ // intended to catch - simply bail out.
+ base::MutexGuard lock_guard(workers_mutex_.Pointer());
+ if (!running_workers_.empty()) {
+ printf("Warning: stress_snapshot disabled due to active workers\n");
+ return success;
+ }
+ }
+
+ static constexpr bool kClearRecompilableData = true;
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ // TODO(jgruber,v8:10500): Don't deoptimize once we support serialization
+ // of optimized code.
+ i::Deoptimizer::DeoptimizeAll(i_isolate);
+ i::Snapshot::ClearReconstructableDataForSerialization(
+ i_isolate, kClearRecompilableData);
+ i::Snapshot::SerializeDeserializeAndVerifyForTesting(i_isolate, i_context);
+ }
+ return success;
+}
void Shell::CollectGarbage(Isolate* isolate) {
if (options.send_idle_notification) {
- const double kLongIdlePauseInSeconds = 1.0;
isolate->ContextDisposedNotification();
- isolate->IdleNotificationDeadline(
- g_platform->MonotonicallyIncreasingTime() + kLongIdlePauseInSeconds);
}
if (options.invoke_weak_callbacks) {
// By sending a low memory notifications, we will try hard to collect all
@@ -5210,8 +5179,10 @@ bool ProcessMessages(
SealHandleScope shs(isolate);
for (bool ran_tasks = true; ran_tasks;) {
// Execute one foreground task (if one exists), then microtasks.
+ if (isolate->IsExecutionTerminating()) return false;
ran_tasks = v8::platform::PumpMessageLoop(g_default_platform, isolate,
behavior());
+ if (isolate->IsExecutionTerminating()) return false;
if (ran_tasks) MicrotasksScope::PerformCheckpoint(isolate);
// In predictable mode we push all background tasks into the foreground
@@ -5219,11 +5190,13 @@ bool ProcessMessages(
// isolate. We execute all background tasks after running one foreground
// task.
if (i::v8_flags.verify_predictable) {
+ if (isolate->IsExecutionTerminating()) return false;
while (v8::platform::PumpMessageLoop(
g_default_platform,
kProcessGlobalPredictablePlatformWorkerTaskQueue,
platform::MessageLoopBehavior::kDoNotWait)) {
ran_tasks = true;
+ if (isolate->IsExecutionTerminating()) return false;
}
}
}
@@ -5231,6 +5204,7 @@ bool ProcessMessages(
v8::platform::RunIdleTasks(g_default_platform, isolate,
50.0 / base::Time::kMillisecondsPerSecond);
}
+ if (isolate->IsExecutionTerminating()) return false;
bool ran_set_timeout = false;
if (!RunSetTimeoutCallback(isolate, &ran_set_timeout)) return false;
if (!ran_set_timeout) return true;
@@ -5559,7 +5533,6 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
MaybeLocal<Value> Shell::DeserializeValue(
Isolate* isolate, std::unique_ptr<SerializationData> data) {
- Local<Value> value;
Local<Context> context = isolate->GetCurrentContext();
Deserializer deserializer(isolate, std::move(data));
return deserializer.ReadValue(context);
@@ -5790,11 +5763,37 @@ int Shell::Main(int argc, char* argv[]) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+ if (i::v8_flags.experimental) {
+ // This message is printed to stderr so that it is also visible in
+ // Clusterfuzz reports.
+ fprintf(stderr,
+ "V8 is running with experimental features enabled. Stability and "
+ "security will suffer.\n");
+ }
+
Isolate* isolate = Isolate::New(create_params);
+#ifdef V8_FUZZILLI
+ // Let the parent process (Fuzzilli) know we are ready.
+ if (options.fuzzilli_enable_builtins_coverage) {
+ cov_init_builtins_edges(static_cast<uint32_t>(
+ i::BasicBlockProfiler::Get()
+ ->GetCoverageBitmap(reinterpret_cast<i::Isolate*>(isolate))
+ .size()));
+ }
+ char helo[] = "HELO";
+ if (write(REPRL_CWFD, helo, 4) != 4 || read(REPRL_CRFD, helo, 4) != 4) {
+ fuzzilli_reprl = false;
+ }
+
+ if (memcmp(helo, "HELO", 4) != 0) {
+ FATAL("REPRL: Invalid response from parent");
+ }
+#endif // V8_FUZZILLI
+
{
- D8Console console(isolate);
Isolate::Scope scope(isolate);
+ D8Console console(isolate);
Initialize(isolate, &console);
PerIsolateData data(isolate);
@@ -5805,8 +5804,7 @@ int Shell::Main(int argc, char* argv[]) {
unsigned action = 0;
ssize_t nread = read(REPRL_CRFD, &action, 4);
if (nread != 4 || action != 'cexe') {
- fprintf(stderr, "Unknown action: %u\n", action);
- _exit(-1);
+ FATAL("REPRL: Unknown action: %u", action);
}
}
#endif // V8_FUZZILLI
@@ -5863,12 +5861,13 @@ int Shell::Main(int argc, char* argv[]) {
// Restore old hash seed.
i::v8_flags.hash_seed = i::v8_flags.hash_seed ^ 1337;
{
+ Isolate::Scope isolate_scope(isolate2);
D8Console console2(isolate2);
Initialize(isolate2, &console2);
PerIsolateData data2(isolate2);
- Isolate::Scope isolate_scope(isolate2);
result = RunMain(isolate2, false);
+ ResetOnProfileEndListener(isolate2);
}
isolate2->Dispose();
}
@@ -5914,11 +5913,6 @@ int Shell::Main(int argc, char* argv[]) {
cpu_profiler->Dispose();
}
- // Shut down contexts and collect garbage.
- cached_code_map_.clear();
- evaluation_context_.Reset();
- stringify_function_.Reset();
- CollectGarbage(isolate);
#ifdef V8_FUZZILLI
// Send result to parent (fuzzilli) and reset edge guards.
if (fuzzilli_reprl) {
@@ -5954,6 +5948,13 @@ int Shell::Main(int argc, char* argv[]) {
}
#endif // V8_FUZZILLI
} while (fuzzilli_reprl);
+
+ // Shut down contexts and collect garbage.
+ cached_code_map_.clear();
+ evaluation_context_.Reset();
+ stringify_function_.Reset();
+ ResetOnProfileEndListener(isolate);
+ CollectGarbage(isolate);
}
OnExit(isolate, true);
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 2c56ec82e3..2ba3b239b3 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -319,9 +319,6 @@ class PerIsolateData {
Local<FunctionTemplate> GetTestApiObjectCtor() const;
void SetTestApiObjectCtor(Local<FunctionTemplate> ctor);
- Local<FunctionTemplate> GetSnapshotObjectCtor() const;
- void SetSnapshotObjectCtor(Local<FunctionTemplate> ctor);
-
Local<FunctionTemplate> GetDomNodeCtor() const;
void SetDomNodeCtor(Local<FunctionTemplate> ctor);
@@ -344,7 +341,6 @@ class PerIsolateData {
std::unordered_set<DynamicImportData*> import_data_;
#endif
Global<FunctionTemplate> test_api_object_ctor_;
- Global<FunctionTemplate> snapshot_object_ctor_;
Global<FunctionTemplate> dom_node_ctor_;
int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
@@ -405,7 +401,7 @@ class ShellOptions {
DisallowReassignment<bool> fuzzilli_coverage_statistics = {
"fuzzilli-coverage-statistics", false};
DisallowReassignment<bool> fuzzilli_enable_builtins_coverage = {
- "fuzzilli-enable-builtins-coverage", true};
+ "fuzzilli-enable-builtins-coverage", false};
DisallowReassignment<bool> send_idle_notification = {"send-idle-notification",
false};
DisallowReassignment<bool> invoke_weak_callbacks = {"invoke-weak-callbacks",
@@ -464,13 +460,7 @@ class ShellOptions {
"enable-system-instrumentation", false};
DisallowReassignment<bool> enable_etw_stack_walking = {
"enable-etw-stack-walking", false};
- DisallowReassignment<const char*> web_snapshot_config = {
- "web-snapshot-config", nullptr};
- DisallowReassignment<const char*> web_snapshot_output = {
- "web-snapshot-output", nullptr};
- DisallowReassignment<bool> d8_web_snapshot_api = {
- "experimental-d8-web-snapshot-api", false};
- // Applies to web snapshot and JSON deserialization.
+ // Applies to JSON deserialization.
DisallowReassignment<bool> stress_deserialize = {"stress-deserialize", false};
DisallowReassignment<bool> compile_only = {"compile-only", false};
DisallowReassignment<int> repeat_compile = {"repeat-compile", 1};
@@ -508,8 +498,6 @@ class Shell : public i::AllStatic {
ReportExceptions report_exceptions,
ProcessMessageQueue process_message_queue);
static bool ExecuteModule(Isolate* isolate, const char* file_name);
- static bool TakeWebSnapshot(Isolate* isolate);
- static bool ExecuteWebSnapshot(Isolate* isolate, const char* file_name);
static bool LoadJSON(Isolate* isolate, const char* file_name);
static void ReportException(Isolate* isolate, Local<Message> message,
Local<Value> exception);
@@ -518,7 +506,7 @@ class Shell : public i::AllStatic {
bool should_throw = true);
static Local<String> WasmLoadSourceMapCallback(Isolate* isolate,
const char* name);
- static Local<Context> CreateEvaluationContext(Isolate* isolate);
+ static MaybeLocal<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(Isolate* isolate, bool last_run);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
@@ -569,10 +557,6 @@ class Shell : public i::AllStatic {
const PropertyCallbackInfo<Value>& info);
static void RealmSharedSet(Local<String> property, Local<Value> value,
const PropertyCallbackInfo<void>& info);
- static void RealmTakeWebSnapshot(
- const v8::FunctionCallbackInfo<v8::Value>& args);
- static void RealmUseWebSnapshot(
- const v8::FunctionCallbackInfo<v8::Value>& args);
static void LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args);
static void TestVerifySourcePositions(
@@ -598,6 +582,18 @@ class Shell : public i::AllStatic {
static void SerializerDeserialize(
const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ProfilerSetOnProfileEndListener(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void ProfilerTriggerSample(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ static bool HasOnProfileEndListener(Isolate* isolate);
+
+ static void TriggerOnProfileEndListener(Isolate* isolate,
+ std::string profile);
+
+ static void ResetOnProfileEndListener(Isolate* isolate);
+
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
static void WriteStdout(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -605,7 +601,9 @@ class Shell : public i::AllStatic {
static void NotifyDone(const v8::FunctionCallbackInfo<v8::Value>& args);
static void QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args);
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Terminate(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void WriteFile(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ReadFile(const v8::FunctionCallbackInfo<v8::Value>& args);
static char* ReadChars(const char* name, int* size_out);
static MaybeLocal<PrimitiveArray> ReadLines(Isolate* isolate,
@@ -724,8 +722,6 @@ class Shell : public i::AllStatic {
static void PromiseRejectCallback(v8::PromiseRejectMessage reject_message);
- static Local<FunctionTemplate> CreateSnapshotTemplate(Isolate* isolate);
-
private:
static inline int DeserializationRunCount() {
return options.stress_deserialize ? 1000 : 1;
@@ -734,6 +730,11 @@ class Shell : public i::AllStatic {
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
static Global<Function> stringify_function_;
+
+ static base::Mutex profiler_end_callback_lock_;
+ static std::map<Isolate*, std::pair<Global<Function>, Global<Context>>>
+ profiler_end_callback_;
+
static const char* stringify_source_;
static CounterMap* counter_map_;
static base::SharedMutex counter_mutex_;
@@ -759,6 +760,7 @@ class Shell : public i::AllStatic {
static Counter* GetCounter(const char* name, bool is_histogram);
static Local<String> Stringify(Isolate* isolate, Local<Value> value);
static void RunShell(Isolate* isolate);
+ static bool RunMainIsolate(Isolate* isolate, bool last_run);
static bool SetOptions(int argc, char* argv[]);
static void NodeTypeCallback(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index c434cede65..fa598f4650 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -560,7 +560,7 @@ void CollectAndMaybeResetCounts(Isolate* isolate,
// feedback allocation we may miss counting functions if the feedback
// vector wasn't allocated yet and the function's interrupt budget wasn't
// updated (i.e. it didn't execute return / jump).
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ for (JavaScriptStackFrameIterator it(isolate); !it.done(); it.Advance()) {
SharedFunctionInfo shared = it.frame()->function().shared();
if (counter_map->Get(shared) != 0) continue;
counter_map->Add(shared, 1);
@@ -621,6 +621,10 @@ std::unique_ptr<Coverage> Coverage::CollectBestEffort(Isolate* isolate) {
std::unique_ptr<Coverage> Coverage::Collect(
Isolate* isolate, v8::debug::CoverageMode collectionMode) {
+ // Unsupported if jitless mode is enabled at build-time since related
+ // optimizations deactivate invocation count updates.
+ CHECK(!V8_JITLESS_BOOL);
+
// Collect call counts for all functions.
SharedToCounterMap counter_map;
CollectAndMaybeResetCounts(isolate, &counter_map, collectionMode);
@@ -709,7 +713,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
}
}
- Handle<String> name = SharedFunctionInfo::DebugName(info);
+ Handle<String> name = SharedFunctionInfo::DebugName(isolate, info);
CoverageFunction function(start, end, count, name);
if (IsBlockMode(collectionMode) && info->HasCoverageInfo()) {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 7dbb60f5cb..48349f193d 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -96,7 +96,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
DisableBreak disable_break_scope(isolate->debug());
// Get the frame where the debugging is performed.
- StackTraceFrameIterator it(isolate, frame_id);
+ DebuggableStackFrameIterator it(isolate, frame_id);
#if V8_ENABLE_WEBASSEMBLY
if (it.is_wasm()) {
WasmFrame* frame = WasmFrame::cast(it.frame());
@@ -137,7 +137,7 @@ MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
// Handle the processing of break.
DisableBreak disable_break_scope(isolate->debug());
Factory* factory = isolate->factory();
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
// Get context and receiver.
Handle<Context> native_context(
@@ -211,9 +211,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
: isolate_(isolate),
frame_inspector_(frame, inlined_jsframe_index, isolate),
scope_iterator_(isolate, &frame_inspector_,
- v8_flags.experimental_reuse_locals_blocklists
- ? ScopeIterator::ReparseStrategy::kScriptIfNeeded
- : ScopeIterator::ReparseStrategy::kScript) {
+ ScopeIterator::ReparseStrategy::kScriptIfNeeded) {
Handle<Context> outer_context(frame_inspector_.GetFunction()->context(),
isolate);
evaluation_context_ = outer_context;
@@ -226,40 +224,31 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
// - To make stack-allocated variables visible, we materialize them and
// use a debug-evaluate context to wrap both the materialized object and
// the original context.
- // - We also wrap all contexts on the chain between the original context
- // and the function context.
+ // - Each scope from the break position up to the function scope is wrapped
+ // in a debug-evaluate context.
// - Between the function scope and the native context, we only resolve
// variable names that are guaranteed to not be shadowed by stack-allocated
- // variables. Contexts between the function context and the original
+ // variables. ScopeInfos between the function scope and the native
// context have a blocklist attached to implement that.
+ // - The various block lists are calculated by the ScopeIterator during
+ // iteration.
// Context::Lookup has special handling for debug-evaluate contexts:
// - Look up in the materialized stack variables.
- // - Check the blocklist to find out whether to abort further lookup.
// - Look up in the original context.
- for (; !scope_iterator_.Done(); scope_iterator_.Next()) {
+ // - Once we have seen a debug-evaluate context we start to take the
+ // block lists into account before moving up the context chain.
+ for (; scope_iterator_.InInnerScope(); scope_iterator_.Next()) {
ScopeIterator::ScopeType scope_type = scope_iterator_.Type();
if (scope_type == ScopeIterator::ScopeTypeScript) break;
ContextChainElement context_chain_element;
- if (scope_iterator_.InInnerScope() &&
- (scope_type == ScopeIterator::ScopeTypeLocal ||
- scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK))) {
+ if (scope_type == ScopeIterator::ScopeTypeLocal ||
+ scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK)) {
context_chain_element.materialized_object =
scope_iterator_.ScopeObject(ScopeIterator::Mode::STACK);
}
if (scope_iterator_.HasContext()) {
context_chain_element.wrapped_context = scope_iterator_.CurrentContext();
}
- if (v8_flags.experimental_reuse_locals_blocklists) {
- // With the re-use experiment we only need `DebugEvaluateContexts` up
- // to (and including) the paused function scope so the evaluated
- // expression can access the materialized stack locals.
- if (!scope_iterator_.InInnerScope()) break;
- } else {
- CHECK(!v8_flags.experimental_reuse_locals_blocklists);
- if (!scope_iterator_.InInnerScope()) {
- context_chain_element.blocklist = scope_iterator_.GetLocals();
- }
- }
context_chain_.push_back(context_chain_element);
}
@@ -273,29 +262,23 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
scope_info = ScopeInfo::CreateForWithScope(isolate, scope_info);
scope_info->SetIsDebugEvaluateScope();
- if (v8_flags.experimental_reuse_locals_blocklists) {
- // In the case where the "paused function scope" is the script scope
- // itself, we don't need (and don't have) a blocklist.
- const bool paused_scope_is_script_scope =
- scope_iterator_.Done() || scope_iterator_.InInnerScope();
- if (rit == context_chain_.rbegin() && !paused_scope_is_script_scope) {
- // The DebugEvaluateContext we create for the closure scope is the only
- // DebugEvaluateContext with a block list. This means we'll retrieve
- // the existing block list from the paused function scope
- // and also associate the temporary scope_info we create here with that
- // blocklist.
- Handle<ScopeInfo> function_scope_info = handle(
- frame_inspector_.GetFunction()->shared().scope_info(), isolate_);
- Handle<Object> block_list = handle(
- isolate_->LocalsBlockListCacheGet(function_scope_info), isolate_);
- CHECK(block_list->IsStringSet());
- isolate_->LocalsBlockListCacheSet(scope_info, Handle<ScopeInfo>::null(),
- Handle<StringSet>::cast(block_list));
- }
- } else if (!element.blocklist.is_null()) {
- CHECK(!v8_flags.experimental_reuse_locals_blocklists);
- scope_info = ScopeInfo::RecreateWithBlockList(isolate, scope_info,
- element.blocklist);
+ // In the case where the "paused function scope" is the script scope
+ // itself, we don't need (and don't have) a blocklist.
+ const bool paused_scope_is_script_scope =
+ scope_iterator_.Done() || scope_iterator_.InInnerScope();
+ if (rit == context_chain_.rbegin() && !paused_scope_is_script_scope) {
+ // The DebugEvaluateContext we create for the closure scope is the only
+ // DebugEvaluateContext with a block list. This means we'll retrieve
+ // the existing block list from the paused function scope
+ // and also associate the temporary scope_info we create here with that
+ // blocklist.
+ Handle<ScopeInfo> function_scope_info = handle(
+ frame_inspector_.GetFunction()->shared().scope_info(), isolate_);
+ Handle<Object> block_list = handle(
+ isolate_->LocalsBlockListCacheGet(function_scope_info), isolate_);
+ CHECK(block_list->IsStringSet());
+ isolate_->LocalsBlockListCacheSet(scope_info, Handle<ScopeInfo>::null(),
+ Handle<StringSet>::cast(block_list));
}
evaluation_context_ = factory->NewDebugEvaluateContext(
@@ -612,7 +595,11 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kArrayPrototypeLastIndexOf:
case Builtin::kArrayPrototypeSlice:
case Builtin::kArrayPrototypeToLocaleString:
+ case Builtin::kArrayPrototypeToReversed:
+ case Builtin::kArrayPrototypeToSorted:
+ case Builtin::kArrayPrototypeToSpliced:
case Builtin::kArrayPrototypeToString:
+ case Builtin::kArrayPrototypeWith:
case Builtin::kArrayForEach:
case Builtin::kArrayEvery:
case Builtin::kArraySome:
@@ -653,6 +640,9 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kTypedArrayPrototypeReduce:
case Builtin::kTypedArrayPrototypeReduceRight:
case Builtin::kTypedArrayPrototypeForEach:
+ case Builtin::kTypedArrayPrototypeToReversed:
+ case Builtin::kTypedArrayPrototypeToSorted:
+ case Builtin::kTypedArrayPrototypeWith:
// ArrayBuffer builtins.
case Builtin::kArrayBufferConstructor:
case Builtin::kArrayBufferPrototypeGetByteLength:
@@ -812,6 +802,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kStringPrototypeFontsize:
case Builtin::kStringPrototypeIncludes:
case Builtin::kStringPrototypeIndexOf:
+ case Builtin::kStringPrototypeIsWellFormed:
case Builtin::kStringPrototypeItalics:
case Builtin::kStringPrototypeLastIndexOf:
case Builtin::kStringPrototypeLink:
@@ -833,6 +824,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kStringPrototypeToLowerCase:
case Builtin::kStringPrototypeToUpperCase:
#endif
+ case Builtin::kStringPrototypeToWellFormed:
case Builtin::kStringPrototypeTrim:
case Builtin::kStringPrototypeTrimEnd:
case Builtin::kStringPrototypeTrimStart:
@@ -883,6 +875,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kAllocateRegularInOldGeneration:
case Builtin::kConstructVarargs:
case Builtin::kConstructWithArrayLike:
+ case Builtin::kGetOwnPropertyDescriptor:
+ case Builtin::kOrdinaryGetOwnPropertyDescriptor:
return DebugInfo::kHasNoSideEffect;
#ifdef V8_INTL_SUPPORT
@@ -1073,8 +1067,9 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
return requires_runtime_checks ? DebugInfo::kRequiresRuntimeChecks
: DebugInfo::kHasNoSideEffect;
} else if (info->IsApiFunction()) {
- if (info->GetCode().is_builtin()) {
- return info->GetCode().builtin_id() == Builtin::kHandleApiCall
+ Code code = info->GetCode(isolate);
+ if (code.is_builtin()) {
+ return code.builtin_id() == Builtin::kHandleApiCall
? DebugInfo::kHasNoSideEffect
: DebugInfo::kHasSideEffects;
}
@@ -1119,20 +1114,17 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kArrayReduceRightLoopContinuation:
case Builtin::kArraySomeLoopContinuation:
case Builtin::kArrayTimSort:
+ case Builtin::kArrayTimSortIntoCopy:
case Builtin::kCall_ReceiverIsAny:
case Builtin::kCall_ReceiverIsNotNullOrUndefined:
case Builtin::kCall_ReceiverIsNullOrUndefined:
case Builtin::kCallWithArrayLike:
- case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
- case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit:
- case Builtin::kCEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case Builtin::kCEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit:
- case Builtin::kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case Builtin::kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
- case Builtin::kCEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit:
- case Builtin::kCEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case Builtin::kCEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit:
+ case Builtin::kCEntry_Return1_ArgvOnStack_NoBuiltinExit:
+ case Builtin::kCEntry_Return1_ArgvOnStack_BuiltinExit:
+ case Builtin::kCEntry_Return1_ArgvInRegister_NoBuiltinExit:
+ case Builtin::kCEntry_Return2_ArgvOnStack_NoBuiltinExit:
+ case Builtin::kCEntry_Return2_ArgvOnStack_BuiltinExit:
+ case Builtin::kCEntry_Return2_ArgvInRegister_NoBuiltinExit:
case Builtin::kCloneFastJSArray:
case Builtin::kConstruct:
case Builtin::kConvertToLocaleString:
@@ -1145,6 +1137,8 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kFindOrderedHashSetEntry:
case Builtin::kFlatMapIntoArray:
case Builtin::kFlattenIntoArray:
+ case Builtin::kGenericArrayToReversed:
+ case Builtin::kGenericArrayWith:
case Builtin::kGetProperty:
case Builtin::kHasProperty:
case Builtin::kCreateHTML:
@@ -1165,11 +1159,13 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kStringEqual:
case Builtin::kStringIndexOf:
case Builtin::kStringRepeat:
+ case Builtin::kBigIntEqual:
case Builtin::kToInteger:
case Builtin::kToLength:
case Builtin::kToName:
case Builtin::kToObject:
case Builtin::kToString:
+ case Builtin::kTypedArrayMergeSort:
#ifdef V8_IS_TSAN
case Builtin::kTSANRelaxedStore8IgnoreFP:
case Builtin::kTSANRelaxedStore8SaveFP:
@@ -1208,6 +1204,8 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kFastCreateDataProperty:
switch (caller) {
case Builtin::kArrayPrototypeSlice:
+ case Builtin::kArrayPrototypeToSpliced:
+ case Builtin::kArrayPrototypeWith:
case Builtin::kArrayFilter:
return true;
default:
@@ -1216,6 +1214,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kSetProperty:
switch (caller) {
case Builtin::kArrayPrototypeSlice:
+ case Builtin::kArrayPrototypeToSorted:
case Builtin::kTypedArrayPrototypeMap:
case Builtin::kStringPrototypeMatchAll:
return true;
@@ -1235,17 +1234,15 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
for (Builtin caller = Builtins::kFirst; caller <= Builtins::kLast; ++caller) {
DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
if (state != DebugInfo::kHasNoSideEffect) continue;
- Code code = FromCodeT(isolate->builtins()->code(caller));
+ Code code = isolate->builtins()->code(caller);
int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
for (RelocIterator it(code, mode); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- CodeLookupResult lookup_result =
- isolate->heap()->GcSafeFindCodeForInnerPointer(
- rinfo->target_address());
- CHECK(lookup_result.IsFound());
+ Code lookup_result =
+ isolate->heap()->FindCodeForInnerPointer(rinfo->target_address());
Builtin callee = lookup_result.builtin_id();
if (BuiltinGetSideEffectState(callee) == DebugInfo::kHasNoSideEffect) {
continue;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 70d185c309..9d55442644 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -108,7 +108,8 @@ RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
void RedirectActiveFunctions::VisitThread(Isolate* isolate,
ThreadLocalTop* top) {
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ for (JavaScriptStackFrameIterator it(isolate, top); !it.done();
+ it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction function = frame->function();
if (!frame->is_interpreted()) continue;
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index c5bb3611e3..7a2a610eba 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -41,25 +41,24 @@ void SetContextId(Local<Context> context, int id) {
int GetContextId(Local<Context> context) {
auto v8_context = Utils::OpenHandle(*context);
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(v8_context->GetIsolate());
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(v8_context->GetIsolate());
i::Object value = v8_context->debug_context_id();
return (value.IsSmi()) ? i::Smi::ToInt(value) : 0;
}
void SetInspector(Isolate* isolate, v8_inspector::V8Inspector* inspector) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
if (inspector == nullptr) {
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate);
i_isolate->set_inspector(nullptr);
} else {
- DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->set_inspector(inspector);
}
}
v8_inspector::V8Inspector* GetInspector(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return i_isolate->inspector();
}
@@ -198,39 +197,62 @@ MaybeLocal<Array> GetInternalProperties(Isolate* v8_isolate,
namespace {
-void CollectPrivateMethodsAndAccessorsFromContext(
- i::Isolate* isolate, i::Handle<i::Context> context,
- i::IsStaticFlag is_static_flag, std::vector<Local<Value>>* names_out,
- std::vector<Local<Value>>* values_out) {
+using FlagFilter = std::function<bool(i::IsStaticFlag)>;
+using VariableModeFilter = std::function<bool(i::VariableMode)>;
+using ContextLocalIterator = std::function<void(
+ i::VariableMode, i::Handle<i::String>, i::Handle<i::Object>)>;
+
+void ForEachContextLocal(i::Isolate* isolate, i::Handle<i::Context> context,
+ const VariableModeFilter& var_mode_filter,
+ const FlagFilter& flag_filter,
+ const ContextLocalIterator& context_local_it) {
DCHECK_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
for (auto it : i::ScopeInfo::IterateLocalNames(scope_info)) {
i::Handle<i::String> name(it->name(), isolate);
i::VariableMode mode = scope_info->ContextLocalMode(it->index());
+ if (!var_mode_filter(mode)) {
+ continue;
+ }
i::IsStaticFlag flag = scope_info->ContextLocalIsStaticFlag(it->index());
- if (!i::IsPrivateMethodOrAccessorVariableMode(mode) ||
- flag != is_static_flag) {
+ if (!flag_filter(flag)) {
continue;
}
int context_index = scope_info->ContextHeaderLength() + it->index();
i::Handle<i::Object> slot_value(context->get(context_index), isolate);
- DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
- slot_value->IsJSFunction());
- DCHECK_IMPLIES(mode != i::VariableMode::kPrivateMethod,
- slot_value->IsAccessorPair());
- names_out->push_back(Utils::ToLocal(name));
- values_out->push_back(Utils::ToLocal(slot_value));
+ context_local_it(mode, name, slot_value);
}
}
} // namespace
-bool GetPrivateMembers(Local<Context> context, Local<Object> object,
+bool GetPrivateMembers(Local<Context> context, Local<Object> object, int filter,
std::vector<Local<Value>>* names_out,
std::vector<Local<Value>>* values_out) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
API_RCS_SCOPE(isolate, debug, GetPrivateMembers);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+
+ bool include_methods =
+ filter & static_cast<int>(PrivateMemberFilter::kPrivateMethods);
+ bool include_fields =
+ filter & static_cast<int>(PrivateMemberFilter::kPrivateFields);
+ bool include_accessors =
+ filter & static_cast<int>(PrivateMemberFilter::kPrivateAccessors);
+ bool include_methods_or_accessors = include_methods || include_accessors;
+
+ auto var_mode_filter =
+ include_methods
+ ? (include_accessors ? i::IsPrivateMethodOrAccessorVariableMode
+ : i::IsPrivateMethodVariableMode)
+ : i::IsPrivateAccessorVariableMode;
+ auto constexpr instance_filter = [](i::IsStaticFlag flag) {
+ return flag == i::IsStaticFlag::kNotStatic;
+ };
+ auto constexpr static_filter = [](i::IsStaticFlag flag) {
+ return flag == i::IsStaticFlag::kStatic;
+ };
+
i::Handle<i::JSReceiver> receiver = Utils::OpenHandle(*object);
i::Handle<i::JSArray> names;
i::Handle<i::FixedArray> values;
@@ -247,44 +269,41 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> object,
// Estimate number of private fields and private instance methods/accessors.
int private_entries_count = 0;
+ auto count_private_entry = [&](i::VariableMode mode, i::Handle<i::String>,
+ i::Handle<i::Object>) {
+ private_entries_count++;
+ };
for (int i = 0; i < keys->length(); ++i) {
// Exclude the private brand symbols.
i::Handle<i::Symbol> key(i::Symbol::cast(keys->get(i)), isolate);
if (key->is_private_brand()) {
- i::Handle<i::Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, i::Object::GetProperty(isolate, receiver, key),
- false);
-
- i::Handle<i::Context> value_context(i::Context::cast(*value), isolate);
- i::Handle<i::ScopeInfo> scope_info(value_context->scope_info(), isolate);
- // At least one slot contains the brand symbol so it does not count.
- private_entries_count += (scope_info->ContextLocalCount() - 1);
- } else {
+ if (include_methods_or_accessors) {
+ i::Handle<i::Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, i::Object::GetProperty(isolate, receiver, key),
+ false);
+
+ i::Handle<i::Context> value_context(i::Context::cast(*value), isolate);
+ ForEachContextLocal(isolate, value_context, var_mode_filter,
+ instance_filter, count_private_entry);
+ }
+ } else if (include_fields) {
private_entries_count++;
}
}
// Estimate number of static private methods/accessors for classes.
bool has_static_private_methods_or_accessors = false;
- if (receiver->IsJSFunction()) {
- i::Handle<i::JSFunction> func(i::JSFunction::cast(*receiver), isolate);
- i::Handle<i::SharedFunctionInfo> shared(func->shared(), isolate);
- if (shared->is_class_constructor() &&
- shared->has_static_private_methods_or_accessors()) {
- has_static_private_methods_or_accessors = true;
- i::Handle<i::Context> func_context(func->context(), isolate);
- i::Handle<i::ScopeInfo> scope_info(func_context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
- for (int j = 0; j < local_count; ++j) {
- i::VariableMode mode = scope_info->ContextLocalMode(j);
- i::IsStaticFlag is_static_flag =
- scope_info->ContextLocalIsStaticFlag(j);
- if (i::IsPrivateMethodOrAccessorVariableMode(mode) &&
- is_static_flag == i::IsStaticFlag::kStatic) {
- private_entries_count += local_count;
- break;
- }
+ if (include_methods_or_accessors) {
+ if (receiver->IsJSFunction()) {
+ i::Handle<i::JSFunction> func(i::JSFunction::cast(*receiver), isolate);
+ i::Handle<i::SharedFunctionInfo> shared(func->shared(), isolate);
+ if (shared->is_class_constructor() &&
+ shared->has_static_private_methods_or_accessors()) {
+ has_static_private_methods_or_accessors = true;
+ i::Handle<i::Context> func_context(func->context(), isolate);
+ ForEachContextLocal(isolate, func_context, var_mode_filter,
+ static_filter, count_private_entry);
}
}
}
@@ -294,12 +313,20 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> object,
DCHECK(values_out->empty());
values_out->reserve(private_entries_count);
+ auto add_private_entry = [&](i::VariableMode mode, i::Handle<i::String> name,
+ i::Handle<i::Object> value) {
+ DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
+ value->IsJSFunction());
+ DCHECK_IMPLIES(mode != i::VariableMode::kPrivateMethod,
+ value->IsAccessorPair());
+ names_out->push_back(Utils::ToLocal(name));
+ values_out->push_back(Utils::ToLocal(value));
+ };
if (has_static_private_methods_or_accessors) {
i::Handle<i::Context> recevier_context(
i::JSFunction::cast(*receiver).context(), isolate);
- CollectPrivateMethodsAndAccessorsFromContext(isolate, recevier_context,
- i::IsStaticFlag::kStatic,
- names_out, values_out);
+ ForEachContextLocal(isolate, recevier_context, var_mode_filter,
+ static_filter, add_private_entry);
}
for (int i = 0; i < keys->length(); ++i) {
@@ -309,14 +336,14 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> object,
i::Handle<i::Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, value, i::Object::GetProperty(isolate, receiver, key), false);
-
if (key->is_private_brand()) {
- DCHECK(value->IsContext());
- i::Handle<i::Context> value_context(i::Context::cast(*value), isolate);
- CollectPrivateMethodsAndAccessorsFromContext(isolate, value_context,
- i::IsStaticFlag::kNotStatic,
- names_out, values_out);
- } else { // Private fields
+ if (include_methods_or_accessors) {
+ DCHECK(value->IsContext());
+ i::Handle<i::Context> value_context(i::Context::cast(*value), isolate);
+ ForEachContextLocal(isolate, value_context, var_mode_filter,
+ instance_filter, add_private_entry);
+ }
+ } else if (include_fields) { // Private fields
i::Handle<i::String> name(
i::String::cast(i::Symbol::cast(*key).description()), isolate);
names_out->push_back(Utils::ToLocal(name));
@@ -958,14 +985,14 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
}
#if V8_ENABLE_WEBASSEMBLY
-void TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
+void EnterDebuggingForIsolate(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::wasm::GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
+ i::wasm::GetWasmEngine()->EnterDebuggingForIsolate(isolate);
}
-void TierUpAllModulesPerIsolate(Isolate* v8_isolate) {
+void LeaveDebuggingForIsolate(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::wasm::GetWasmEngine()->TierUpAllModulesPerIsolate(isolate);
+ i::wasm::GetWasmEngine()->LeaveDebuggingForIsolate(isolate);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1054,27 +1081,25 @@ Local<Function> GetBuiltin(Isolate* v8_isolate, Builtin requested_builtin) {
void SetConsoleDelegate(Isolate* v8_isolate, ConsoleDelegate* delegate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ DCHECK_NO_SCRIPT_NO_EXCEPTION(isolate);
if (delegate == nullptr) {
- DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(isolate);
isolate->set_console_delegate(nullptr);
} else {
- DCHECK_NO_SCRIPT_NO_EXCEPTION(isolate);
isolate->set_console_delegate(delegate);
}
}
ConsoleCallArguments::ConsoleCallArguments(
const v8::FunctionCallbackInfo<v8::Value>& info)
- : v8::FunctionCallbackInfo<v8::Value>(nullptr, info.values_, info.length_) {
-}
+ : isolate_(info.GetIsolate()),
+ values_(info.values_),
+ length_(info.length_) {}
ConsoleCallArguments::ConsoleCallArguments(
- const internal::BuiltinArguments& args)
- : v8::FunctionCallbackInfo<v8::Value>(
- nullptr,
- // Drop the first argument (receiver, i.e. the "console" object).
- args.length() > 1 ? args.address_of_first_argument() : nullptr,
- args.length() - 1) {}
+ internal::Isolate* isolate, const internal::BuiltinArguments& args)
+ : isolate_(reinterpret_cast<v8::Isolate*>(isolate)),
+ values_(args.length() > 1 ? args.address_of_first_argument() : nullptr),
+ length_(args.length() - 1) {}
v8::Local<v8::Message> CreateMessageFromException(
Isolate* v8_isolate, v8::Local<v8::Value> v8_error) {
@@ -1389,10 +1414,6 @@ MaybeLocal<Message> GetMessageFromPromise(Local<Promise> p) {
i::Handle<i::JSMessageObject>::cast(maybeMessage));
}
-bool isExperimentalRemoveInternalScopesPropertyEnabled() {
- return i::v8_flags.experimental_remove_internal_scopes_property;
-}
-
void RecordAsyncStackTaggingCreateTaskCall(v8::Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->CountUsage(v8::Isolate::kAsyncStackTaggingCreateTaskCall);
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 3d9f7c5c9f..95d6a8177b 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -75,19 +75,25 @@ V8_EXPORT_PRIVATE void ClearBreakOnNextFunctionCall(Isolate* isolate);
*/
MaybeLocal<Array> GetInternalProperties(Isolate* isolate, Local<Value> value);
+enum class PrivateMemberFilter {
+ kPrivateMethods = 1,
+ kPrivateFields = 1 << 1,
+ kPrivateAccessors = 1 << 2,
+};
+
/**
+ * Retrieve both instance and static private members on an object.
+ * filter should be a combination of PrivateMemberFilter.
* Returns through the out parameters names_out a vector of names
- * in v8::String for private members, including fields, methods,
- * accessors specific to the value type.
- * The values are returned through the out parameter values_out in the
- * corresponding indices. Private fields and methods are returned directly
- * while accessors are returned as v8::debug::AccessorPair. Missing components
- * in the accessor pairs are null.
+ * in v8::String and through values_out the corresponding values.
+ * Private fields and methods are returned directly while accessors are
+ * returned as v8::debug::AccessorPair. Missing components in the accessor
+ * pairs are null.
* If an exception occurs, false is returned. Otherwise true is returned.
* Results will be allocated in the current context and handle scope.
*/
V8_EXPORT_PRIVATE bool GetPrivateMembers(Local<Context> context,
- Local<Object> value,
+ Local<Object> value, int filter,
std::vector<Local<Value>>* names_out,
std::vector<Local<Value>>* values_out);
@@ -158,7 +164,8 @@ struct LiveEditResult {
OK,
COMPILE_ERROR,
BLOCKED_BY_RUNNING_GENERATOR,
- BLOCKED_BY_ACTIVE_FUNCTION
+ BLOCKED_BY_ACTIVE_FUNCTION,
+ BLOCKED_BY_TOP_LEVEL_ES_MODULE_CHANGE,
};
Status status = OK;
bool stack_changed = false;
@@ -287,14 +294,15 @@ class DebugDelegate {
v8::Local<v8::Context> paused_context,
const std::vector<debug::BreakpointId>& inspector_break_points_hit,
base::EnumSet<BreakReason> break_reasons = {}) {}
- enum PauseAfterInstrumentation {
- kPauseAfterInstrumentationRequested,
- kNoPauseAfterInstrumentationRequested
+ enum class ActionAfterInstrumentation {
+ kPause,
+ kPauseIfBreakpointsHit,
+ kContinue
};
- virtual PauseAfterInstrumentation BreakOnInstrumentation(
+ virtual ActionAfterInstrumentation BreakOnInstrumentation(
v8::Local<v8::Context> paused_context,
const debug::BreakpointId instrumentationId) {
- return kNoPauseAfterInstrumentationRequested;
+ return ActionAfterInstrumentation::kPauseIfBreakpointsHit;
}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
@@ -309,14 +317,21 @@ class DebugDelegate {
int column) {
return false;
}
+
+ // Called every time a breakpoint condition is evaluated. This method is
+ // called before `BreakProgramRequested` if the condition is truthy.
+ virtual void BreakpointConditionEvaluated(v8::Local<v8::Context> context,
+ debug::BreakpointId breakpoint_id,
+ bool exception_thrown,
+ v8::Local<v8::Value> exception) {}
};
V8_EXPORT_PRIVATE void SetDebugDelegate(Isolate* isolate,
DebugDelegate* listener);
#if V8_ENABLE_WEBASSEMBLY
-V8_EXPORT_PRIVATE void TierDownAllModulesPerIsolate(Isolate* isolate);
-V8_EXPORT_PRIVATE void TierUpAllModulesPerIsolate(Isolate* isolate);
+V8_EXPORT_PRIVATE void EnterDebuggingForIsolate(Isolate* isolate);
+V8_EXPORT_PRIVATE void LeaveDebuggingForIsolate(Isolate* isolate);
#endif // V8_ENABLE_WEBASSEMBLY
class AsyncEventDelegate {
@@ -499,6 +514,7 @@ class V8_EXPORT_PRIVATE StackTraceIterator {
virtual v8::Local<v8::String> GetFunctionDebugName() const = 0;
virtual v8::Local<v8::debug::Script> GetScript() const = 0;
virtual debug::Location GetSourceLocation() const = 0;
+ virtual debug::Location GetFunctionLocation() const = 0;
virtual v8::Local<v8::Function> GetFunction() const = 0;
virtual std::unique_ptr<ScopeIterator> GetScopeIterator() const = 0;
virtual bool CanBeRestarted() const = 0;
@@ -678,8 +694,6 @@ AccessorPair* AccessorPair::Cast(v8::Value* value) {
MaybeLocal<Message> GetMessageFromPromise(Local<Promise> promise);
-bool isExperimentalRemoveInternalScopesPropertyEnabled();
-
void RecordAsyncStackTaggingCreateTaskCall(v8::Isolate* isolate);
void NotifyDebuggerPausedEventSent(v8::Isolate* isolate);
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index da33c87b90..89dbdde1b4 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -225,10 +225,9 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
}
if (strategy == ReparseStrategy::kScriptIfNeeded) {
- CHECK(v8_flags.experimental_reuse_locals_blocklists);
Object maybe_block_list = isolate_->LocalsBlockListCacheGet(scope_info);
calculate_blocklists_ = maybe_block_list.IsTheHole();
- strategy = calculate_blocklists_ ? ReparseStrategy::kScript
+ strategy = calculate_blocklists_ ? ReparseStrategy::kScriptIfNeeded
: ReparseStrategy::kFunctionLiteral;
}
@@ -529,6 +528,10 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
case EVAL_SCOPE:
DCHECK_IMPLIES(NeedsContext(), context_->IsEvalContext());
return ScopeTypeEval;
+ case SHADOW_REALM_SCOPE:
+ DCHECK_IMPLIES(NeedsContext(), context_->IsNativeContext());
+ // TODO(v8:11989): New ScopeType for ShadowRealms?
+ return ScopeTypeScript;
}
UNREACHABLE();
}
@@ -921,7 +924,16 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode,
case VariableLocation::CONTEXT:
if (mode == Mode::STACK) continue;
DCHECK(var->IsContextSlot());
- value = handle(context_->get(index), isolate_);
+
+ // We know of at least one open bug where the context and scope chain
+ // don't match (https://crbug.com/753338).
+ // Return `undefined` if the context's ScopeInfo doesn't know anything
+ // about this variable.
+ if (context_->scope_info().ContextSlotIndex(var->name()) != index) {
+ value = isolate_->factory()->undefined_value();
+ } else {
+ value = handle(context_->get(index), isolate_);
+ }
break;
case VariableLocation::MODULE: {
@@ -1069,6 +1081,14 @@ bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
case VariableLocation::CONTEXT:
DCHECK(var->IsContextSlot());
+
+ // We know of at least one open bug where the context and scope chain
+ // don't match (https://crbug.com/753338).
+ // Skip the write if the context's ScopeInfo doesn't know anything
+ // about this variable.
+ if (context_->scope_info().ContextSlotIndex(variable_name) != index) {
+ return false;
+ }
context_->set(index, *new_value);
return true;
@@ -1308,7 +1328,6 @@ void ScopeIterator::MaybeCollectAndStoreLocalBlocklists() const {
return;
}
- CHECK(v8_flags.experimental_reuse_locals_blocklists);
DCHECK(isolate_
->LocalsBlockListCacheGet(
handle(function_->shared().scope_info(), isolate_))
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index f8638a2886..d11a60592d 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -41,12 +41,10 @@ class V8_EXPORT_PRIVATE ScopeIterator {
static const int kScopeDetailsSize = 6;
enum class ReparseStrategy {
- kScript,
kFunctionLiteral,
// Checks whether the paused function (and its scope chain) already has
// its blocklist calculated and re-parses the whole script if not.
// Otherwise only the function literal is re-parsed.
- // Only vaild with enabled "experimental_reuse_locals_blocklists" flag.
kScriptIfNeeded,
};
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 9f2097895f..0414fdac96 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -4,6 +4,7 @@
#include "src/debug/debug-stack-trace-iterator.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-scope-iterator.h"
@@ -147,6 +148,26 @@ debug::Location DebugStackTraceIterator::GetSourceLocation() const {
return script->GetSourceLocation(frame_inspector_->GetSourcePosition());
}
+debug::Location DebugStackTraceIterator::GetFunctionLocation() const {
+ DCHECK(!Done());
+
+ v8::Local<v8::Function> func = this->GetFunction();
+ if (!func.IsEmpty()) {
+ return v8::debug::Location(func->GetScriptLineNumber(),
+ func->GetScriptColumnNumber());
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (iterator_.frame()->is_wasm()) {
+ auto frame = WasmFrame::cast(iterator_.frame());
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate_);
+ auto offset =
+ instance->module()->functions[frame->function_index()].code.offset();
+ return v8::debug::Location(0, offset);
+ }
+#endif
+ return v8::debug::Location();
+}
+
v8::Local<v8::Function> DebugStackTraceIterator::GetFunction() const {
DCHECK(!Done());
if (!frame_inspector_->IsJavaScript()) return v8::Local<v8::Function>();
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.h b/deps/v8/src/debug/debug-stack-trace-iterator.h
index 8bddcf9a10..dd2d743caf 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.h
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.h
@@ -28,6 +28,7 @@ class DebugStackTraceIterator final : public debug::StackTraceIterator {
v8::Local<v8::String> GetFunctionDebugName() const override;
v8::Local<v8::debug::Script> GetScript() const override;
debug::Location GetSourceLocation() const override;
+ debug::Location GetFunctionLocation() const override;
v8::Local<v8::Function> GetFunction() const override;
std::unique_ptr<v8::debug::ScopeIterator> GetScopeIterator() const override;
bool CanBeRestarted() const override;
@@ -42,7 +43,7 @@ class DebugStackTraceIterator final : public debug::StackTraceIterator {
void UpdateInlineFrameIndexAndResumableFnOnStack();
Isolate* isolate_;
- StackTraceFrameIterator iterator_;
+ DebuggableStackFrameIterator iterator_;
std::unique_ptr<FrameInspector> frame_inspector_;
int inlined_frame_index_;
bool is_top_frame_;
diff --git a/deps/v8/src/debug/debug-wasm-objects.cc b/deps/v8/src/debug/debug-wasm-objects.cc
index 1a4eca61fc..22d7c26a49 100644
--- a/deps/v8/src/debug/debug-wasm-objects.cc
+++ b/deps/v8/src/debug/debug-wasm-objects.cc
@@ -948,6 +948,10 @@ Handle<WasmValueObject> WasmValueObject::New(
isolate);
}
t = GetRefTypeName(isolate, value.type(), module_object);
+ } else if (ref->IsWasmNull()) {
+ // TODO(manoskouk): Is this value correct?
+ v = isolate->factory()->null_value();
+ t = GetRefTypeName(isolate, value.type(), module_object);
} else if (ref->IsJSFunction() || ref->IsSmi() || ref->IsNull() ||
ref->IsString() ||
value.type().is_reference_to(wasm::HeapType::kExtern) ||
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 9c6c5f4ca5..ebbbe4d9a2 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -119,6 +119,11 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
return it.GetBreakLocation();
}
+bool BreakLocation::IsPausedInJsFunctionEntry(JavaScriptFrame* frame) {
+ auto summary = FrameSummary::GetTop(frame);
+ return summary.code_offset() == kFunctionEntryBytecodeOffset;
+}
+
MaybeHandle<FixedArray> Debug::CheckBreakPointsForLocations(
Handle<DebugInfo> debug_info, std::vector<BreakLocation>& break_locations,
bool* has_break_points) {
@@ -204,7 +209,10 @@ int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
bool BreakLocation::HasBreakPoint(Isolate* isolate,
Handle<DebugInfo> debug_info) const {
// First check whether there is a break point with the same source position.
- if (!debug_info->HasBreakPoint(isolate, position_)) return false;
+ if (!debug_info->HasBreakInfo() ||
+ !debug_info->HasBreakPoint(isolate, position_)) {
+ return false;
+ }
if (debug_info->CanBreakAtEntry()) {
DCHECK_EQ(Debug::kBreakAtEntryPosition, position_);
return debug_info->BreakAtEntry();
@@ -382,6 +390,7 @@ void Debug::ThreadInit() {
thread_local_.break_frame_id_ = StackFrameId::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
+ thread_local_.last_bytecode_offset_ = kFunctionEntryBytecodeOffset;
thread_local_.last_frame_count_ = -1;
thread_local_.fast_forward_to_return_ = false;
thread_local_.ignore_step_into_function_ = Smi::zero();
@@ -419,7 +428,7 @@ char* Debug::RestoreDebug(char* storage) {
int current_frame_count = CurrentFrameCount();
int target_frame_count = thread_local_.target_frame_count_;
DCHECK(current_frame_count >= target_frame_count);
- StackTraceFrameIterator frames_it(isolate_);
+ DebuggableStackFrameIterator frames_it(isolate_);
while (current_frame_count > target_frame_count) {
current_frame_count -= frames_it.FrameFunctionCount();
frames_it.Advance();
@@ -480,11 +489,12 @@ void Debug::Unload() {
debug_delegate_ = nullptr;
}
-debug::DebugDelegate::PauseAfterInstrumentation
+debug::DebugDelegate::ActionAfterInstrumentation
Debug::OnInstrumentationBreak() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
if (!debug_delegate_) {
- return debug::DebugDelegate::kNoPauseAfterInstrumentationRequested;
+ return debug::DebugDelegate::ActionAfterInstrumentation::
+ kPauseIfBreakpointsHit;
}
DCHECK(in_debug_scope());
HandleScope scope(isolate_);
@@ -517,11 +527,19 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
IsBreakOnInstrumentation(debug_info, location);
bool shouldPauseAfterInstrumentation = false;
if (hitInstrumentationBreak) {
- debug::DebugDelegate::PauseAfterInstrumentation pauseDuringInstrumentation =
+ debug::DebugDelegate::ActionAfterInstrumentation action =
OnInstrumentationBreak();
- shouldPauseAfterInstrumentation =
- pauseDuringInstrumentation ==
- debug::DebugDelegate::kPauseAfterInstrumentationRequested;
+ switch (action) {
+ case debug::DebugDelegate::ActionAfterInstrumentation::kPause:
+ shouldPauseAfterInstrumentation = true;
+ break;
+ case debug::DebugDelegate::ActionAfterInstrumentation::
+ kPauseIfBreakpointsHit:
+ shouldPauseAfterInstrumentation = false;
+ break;
+ case debug::DebugDelegate::ActionAfterInstrumentation::kContinue:
+ return;
+ }
}
// Find actual break points, if any, and trigger debug break event.
@@ -533,8 +551,6 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
if (!break_points_hit.is_null() || break_on_next_function_call() ||
scheduled_break) {
StepAction lastStepAction = last_step_action();
- DCHECK_IMPLIES(scheduled_break_on_function_call(),
- lastStepAction == StepNone);
debug::BreakReasons break_reasons;
if (scheduled_break) {
break_reasons.Add(debug::BreakReason::kScheduled);
@@ -606,10 +622,19 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
return;
}
FrameSummary summary = FrameSummary::GetTop(frame);
+ const bool frame_or_statement_changed =
+ current_frame_count != last_frame_count ||
+ thread_local_.last_statement_position_ !=
+ summary.SourceStatementPosition();
+ // If we stayed on the same frame and reached the same bytecode offset
+ // since the last step, we are in a loop and should pause. Otherwise
+ // we keep "stepping" through the loop without ever acutally pausing.
+ const bool potential_single_statement_loop =
+ current_frame_count == last_frame_count &&
+ thread_local_.last_bytecode_offset_ == summary.code_offset();
step_break = step_break || location.IsReturn() ||
- current_frame_count != last_frame_count ||
- thread_local_.last_statement_position_ !=
- summary.SourceStatementPosition();
+ potential_single_statement_loop ||
+ frame_or_statement_changed;
break;
}
}
@@ -734,13 +759,27 @@ bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
condition, throw_on_side_effect);
}
- if (!maybe_result.ToHandle(&result)) {
- if (isolate_->has_pending_exception()) {
- isolate_->clear_pending_exception();
- }
- return false;
+ Handle<Object> maybe_exception;
+ bool exception_thrown = true;
+ if (maybe_result.ToHandle(&result)) {
+ exception_thrown = false;
+ } else if (isolate_->has_pending_exception()) {
+ maybe_exception = handle(isolate_->pending_exception(), isolate_);
+ isolate_->clear_pending_exception();
+ }
+
+ CHECK(in_debug_scope());
+ DisableBreak no_recursive_break(this);
+
+ {
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebuggerCallback);
+ Handle<Context> native_context(isolate_->native_context());
+ debug_delegate_->BreakpointConditionEvaluated(
+ v8::Utils::ToLocal(native_context), break_point->id(), exception_thrown,
+ v8::Utils::ToLocal(maybe_exception));
}
- return result->BooleanValue(isolate_);
+
+ return !result.is_null() ? result->BooleanValue(isolate_) : false;
}
bool Debug::SetBreakpoint(Handle<SharedFunctionInfo> shared,
@@ -1132,7 +1171,7 @@ void Debug::PrepareStepOnThrow() {
int current_frame_count = CurrentFrameCount();
// Iterate through the JavaScript stack looking for handlers.
- JavaScriptFrameIterator it(isolate_);
+ JavaScriptStackFrameIterator it(isolate_);
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) break;
@@ -1210,7 +1249,7 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.last_step_action_ = step_action;
- StackTraceFrameIterator frames_it(isolate_, frame_id);
+ DebuggableStackFrameIterator frames_it(isolate_, frame_id);
CommonFrame* frame = frames_it.frame();
BreakLocation location = BreakLocation::Invalid();
@@ -1253,9 +1292,8 @@ void Debug::PrepareStep(StepAction step_action) {
// A step-next in blackboxed function is a step-out.
if (step_action == StepOver && IsBlackboxed(shared)) step_action = StepOut;
- thread_local_.last_statement_position_ =
- summary.abstract_code()->SourceStatementPosition(isolate_,
- summary.code_offset());
+ thread_local_.last_statement_position_ = summary.SourceStatementPosition();
+ thread_local_.last_bytecode_offset_ = summary.code_offset();
thread_local_.last_frame_count_ = current_frame_count;
// No longer perform the current async step.
clear_suspended_generator();
@@ -1282,6 +1320,7 @@ void Debug::PrepareStep(StepAction step_action) {
case StepOut: {
// Clear last position info. For stepping out it does not matter.
thread_local_.last_statement_position_ = kNoSourcePosition;
+ thread_local_.last_bytecode_offset_ = kFunctionEntryBytecodeOffset;
thread_local_.last_frame_count_ = -1;
if (!shared.is_null()) {
if (!location.IsReturnOrSuspend() && !IsBlackboxed(shared)) {
@@ -1397,6 +1436,7 @@ void Debug::ClearStepping() {
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
+ thread_local_.last_bytecode_offset_ = kFunctionEntryBytecodeOffset;
thread_local_.ignore_step_into_function_ = Smi::zero();
thread_local_.fast_forward_to_return_ = false;
thread_local_.last_frame_count_ = -1;
@@ -1433,7 +1473,8 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
DisallowGarbageCollection diallow_gc;
bool deopt_all = shared_ == SharedFunctionInfo();
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ for (JavaScriptStackFrameIterator it(isolate, top); !it.done();
+ it.Advance()) {
if (!deopt_all && it.frame()->function().shared() != shared_) continue;
if (it.frame()->type() == StackFrame::BASELINE) {
BaselineFrame* frame = BaselineFrame::cast(it.frame());
@@ -1482,13 +1523,12 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
DCHECK(shared.HasBaselineCode());
- Isolate* isolate = shared.GetIsolate();
DiscardBaselineCodeVisitor visitor(shared);
- visitor.VisitThread(isolate, isolate->thread_local_top());
- isolate->thread_manager()->IterateArchivedThreads(&visitor);
+ visitor.VisitThread(isolate_, isolate_->thread_local_top());
+ isolate_->thread_manager()->IterateArchivedThreads(&visitor);
// TODO(v8:11429): Avoid this heap walk somehow.
- HeapObjectIterator iterator(isolate->heap());
- auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
+ HeapObjectIterator iterator(isolate_->heap());
+ auto trampoline = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
shared.FlushBaselineCode();
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
@@ -1526,29 +1566,11 @@ void Debug::DiscardAllBaselineCode() {
void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- // Deoptimize all code compiled from this shared function info including
- // inlining.
- isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
if (shared->HasBaselineCode()) {
DiscardBaselineCode(*shared);
}
-
- bool found_something = false;
- Code::OptimizedCodeIterator iterator(isolate_);
- do {
- Code code = iterator.Next();
- if (code.is_null()) break;
- if (code.Inlines(*shared)) {
- code.set_marked_for_deoptimization(true);
- found_something = true;
- }
- } while (true);
-
- if (found_something) {
- // Only go through with the deoptimization if something was found.
- Deoptimizer::DeoptimizeMarkedCode(isolate_);
- }
+ Deoptimizer::DeoptimizeAllOptimizedCodeWithFunction(isolate_, shared);
}
void Debug::PrepareFunctionForDebugExecution(
@@ -1628,7 +1650,7 @@ void Debug::InstallDebugBreakTrampoline() {
if (!needs_to_use_trampoline) return;
- Handle<CodeT> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
+ Handle<Code> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
std::vector<Handle<JSFunction>> needs_compile;
using AccessorPairWithContext =
std::pair<Handle<AccessorPair>, Handle<NativeContext>>;
@@ -1932,7 +1954,8 @@ bool Debug::FindSharedFunctionInfosIntersectingRange(
for (const auto& candidate : candidates) {
IsCompiledScope is_compiled_scope(candidate->is_compiled_scope(isolate_));
if (!is_compiled_scope.is_compiled()) {
- // Code that cannot be compiled lazily are internal and not debuggable.
+ // InstructionStream that cannot be compiled lazily are internal and not
+ // debuggable.
DCHECK(candidate->allows_lazy_compilation());
if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
@@ -1997,7 +2020,8 @@ Handle<Object> Debug::FindInnermostContainingFunctionInfo(Handle<Script> script,
}
// If not, compile to reveal inner functions.
HandleScope scope(isolate_);
- // Code that cannot be compiled lazily are internal and not debuggable.
+ // InstructionStream that cannot be compiled lazily are internal and not
+ // debuggable.
DCHECK(shared.allows_lazy_compilation());
if (!Compiler::Compile(isolate_, handle(shared, isolate_),
Compiler::CLEAR_EXCEPTION, &is_compiled_scope)) {
@@ -2236,7 +2260,7 @@ bool Debug::IsExceptionBlackboxed(bool uncaught) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
// Uncaught exception is blackboxed if all current frames are blackboxed,
// caught exception if top frame is blackboxed.
- StackTraceFrameIterator it(isolate_);
+ DebuggableStackFrameIterator it(isolate_);
#if V8_ENABLE_WEBASSEMBLY
while (!it.done() && it.is_wasm()) it.Advance();
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2305,7 +2329,7 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
}
{
- JavaScriptFrameIterator it(isolate_);
+ JavaScriptStackFrameIterator it(isolate_);
// Check whether the top frame is blackboxed or the break location is muted.
if (!it.done() && (IsMutedAtCurrentLocation(it.frame()) ||
IsExceptionBlackboxed(uncaught))) {
@@ -2419,7 +2443,7 @@ bool Debug::ShouldBeSkipped() {
PostponeInterruptsScope no_interrupts(isolate_);
DisableBreak no_recursive_break(this);
- StackTraceFrameIterator iterator(isolate_);
+ DebuggableStackFrameIterator iterator(isolate_);
FrameSummary summary = iterator.GetTopValidFrame();
Handle<Object> script_obj = summary.script();
if (!script_obj->IsScript()) return false;
@@ -2440,7 +2464,7 @@ bool Debug::ShouldBeSkipped() {
bool Debug::AllFramesOnStackAreBlackboxed() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
HandleScope scope(isolate_);
- for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
+ for (DebuggableStackFrameIterator it(isolate_); !it.done(); it.Advance()) {
if (!it.is_javascript()) continue;
if (!IsFrameBlackboxed(it.javascript_frame())) return false;
}
@@ -2490,13 +2514,7 @@ void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
// inspector to filter scripts by native context.
script->set_context_data(isolate_->native_context()->debug_context_id());
if (ignore_events()) return;
-#if V8_ENABLE_WEBASSEMBLY
- if (!script->IsUserJavaScript() && script->type() != i::Script::TYPE_WASM) {
- return;
- }
-#else
- if (!script->IsUserJavaScript()) return;
-#endif // V8_ENABLE_WEBASSEMBLY
+ if (!script->IsSubjectToDebugging()) return;
if (!debug_delegate_) return;
SuppressDebug while_processing(this);
DebugScope debug_scope(this);
@@ -2511,7 +2529,7 @@ void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
}
int Debug::CurrentFrameCount() {
- StackTraceFrameIterator it(isolate_);
+ DebuggableStackFrameIterator it(isolate_);
if (break_frame_id() != StackFrameId::NO_ID) {
// Skip to break frame.
DCHECK(in_debug_scope());
@@ -2572,7 +2590,7 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode,
HandleScope scope(isolate_);
MaybeHandle<FixedArray> break_points;
{
- StackTraceFrameIterator it(isolate_);
+ DebuggableStackFrameIterator it(isolate_);
DCHECK(!it.done());
JavaScriptFrame* frame = it.frame()->is_java_script()
? JavaScriptFrame::cast(it.frame())
@@ -2586,8 +2604,8 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode,
// it's context. Instead, we step into the function and pause at the
// first official breakable position.
// This behavior mirrors "BreakOnNextFunctionCall".
- if (break_reasons.contains(v8::debug::BreakReason::kScheduled)) {
- CHECK_EQ(last_step_action(), StepAction::StepNone);
+ if (break_reasons.contains(v8::debug::BreakReason::kScheduled) &&
+ BreakLocation::IsPausedInJsFunctionEntry(frame)) {
thread_local_.scheduled_break_on_next_function_call_ = true;
PrepareStepIn(function);
return;
@@ -2644,7 +2662,7 @@ void Debug::PrintBreakLocation() {
if (!v8_flags.print_break_location) return;
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
HandleScope scope(isolate_);
- StackTraceFrameIterator iterator(isolate_);
+ DebuggableStackFrameIterator iterator(isolate_);
if (iterator.done()) return;
CommonFrame* frame = iterator.frame();
std::vector<FrameSummary> frames;
@@ -2697,7 +2715,7 @@ DebugScope::DebugScope(Debug* debug)
// Create the new break info. If there is no proper frames there is no break
// frame id.
- StackTraceFrameIterator it(isolate());
+ DebuggableStackFrameIterator it(isolate());
bool has_frames = !it.done();
debug_->thread_local_.break_frame_id_ =
has_frames ? it.frame()->id() : StackFrameId::NO_ID;
@@ -2793,7 +2811,8 @@ void Debug::StopSideEffectCheckMode() {
DCHECK(isolate_->debug_execution_mode() == DebugInfo::kSideEffects);
if (side_effect_check_failed_) {
DCHECK(isolate_->has_pending_exception());
- DCHECK(isolate_->is_execution_termination_pending());
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ isolate_->is_execution_termination_pending());
// Convert the termination exception into a regular exception.
isolate_->CancelTerminateExecution();
isolate_->Throw(*isolate_->factory()->NewEvalError(
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index a47b693b5e..cf49ce66e8 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -69,6 +69,7 @@ class BreakLocation {
static BreakLocation Invalid() { return BreakLocation(-1, NOT_DEBUG_BREAK); }
static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame);
+ static bool IsPausedInJsFunctionEntry(JavaScriptFrame* frame);
static void AllAtCurrentStatement(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame,
@@ -222,7 +223,7 @@ class V8_EXPORT_PRIVATE Debug {
// Debug event triggers.
void OnDebugBreak(Handle<FixedArray> break_points_hit, StepAction stepAction,
debug::BreakReasons break_reasons = {});
- debug::DebugDelegate::PauseAfterInstrumentation OnInstrumentationBreak();
+ debug::DebugDelegate::ActionAfterInstrumentation OnInstrumentationBreak();
base::Optional<Object> OnThrow(Handle<Object> exception)
V8_WARN_UNUSED_RESULT;
@@ -576,6 +577,9 @@ class V8_EXPORT_PRIVATE Debug {
// Source statement position from last step next action.
int last_statement_position_;
+ // Bytecode offset from last step next action.
+ int last_bytecode_offset_;
+
// Frame pointer from last step next or step frame action.
int last_frame_count_;
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 71bf7fd187..76bd29651d 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -9,7 +9,9 @@
#include "include/v8-function-callback.h"
#include "include/v8-local-handle.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "v8-isolate.h"
namespace v8 {
@@ -89,15 +91,30 @@ class V8_EXPORT_PRIVATE BreakLocation : public Location {
BreakLocationType type_;
};
-class ConsoleCallArguments : private v8::FunctionCallbackInfo<v8::Value> {
+class ConsoleCallArguments {
public:
- int Length() const { return v8::FunctionCallbackInfo<v8::Value>::Length(); }
- V8_INLINE Local<Value> operator[](int i) const {
- return v8::FunctionCallbackInfo<v8::Value>::operator[](i);
+ int Length() const { return length_; }
+ /**
+ * Accessor for the available arguments. Returns `undefined` if the index
+ * is out of bounds.
+ */
+ V8_INLINE v8::Local<v8::Value> operator[](int i) const {
+ // values_ points to the first argument.
+ if (i < 0 || length_ <= i) return Undefined(GetIsolate());
+ DCHECK_NOT_NULL(values_);
+ return Local<Value>::FromSlot(values_ + i);
}
+ V8_INLINE v8::Isolate* GetIsolate() const { return isolate_; }
+
explicit ConsoleCallArguments(const v8::FunctionCallbackInfo<v8::Value>&);
- explicit ConsoleCallArguments(const internal::BuiltinArguments&);
+ explicit ConsoleCallArguments(internal::Isolate* isolate,
+ const internal::BuiltinArguments&);
+
+ private:
+ v8::Isolate* isolate_;
+ internal::Address* values_;
+ int length_;
};
class ConsoleContext {
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 83beb194d8..b7ae4aa3ce 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -654,7 +654,8 @@ class FunctionDataMap : public ThreadVisitor {
}
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ for (JavaScriptStackFrameIterator it(isolate, top); !it.done();
+ it.Advance()) {
std::vector<Handle<SharedFunctionInfo>> sfis;
it.frame()->GetFunctions(&sfis);
for (auto& sfi : sfis) {
@@ -701,6 +702,11 @@ bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
Handle<SharedFunctionInfo> sfi;
if (!data->shared.ToHandle(&sfi)) {
continue;
+ } else if (IsModule(sfi->kind())) {
+ DCHECK(script->origin_options().IsModule() && sfi->is_toplevel());
+ result->status =
+ debug::LiveEditResult::BLOCKED_BY_TOP_LEVEL_ES_MODULE_CHANGE;
+ return false;
} else if (data->stack_position == FunctionData::ON_STACK) {
result->status = debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION;
return false;
@@ -975,7 +981,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
isolate->compilation_cache()->Remove(sfi);
for (auto& js_function : data->js_functions) {
js_function->set_shared(*new_sfi);
- js_function->set_code(js_function->shared().GetCode(), kReleaseStore);
+ js_function->set_code(js_function->shared().GetCode(isolate));
js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell());
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
index f3f891c30b..c523f3d70a 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
@@ -395,7 +395,7 @@ GdbServer::DebugDelegate::DebugDelegate(Isolate* isolate, GdbServer* gdb_server)
// Register the delegate
isolate_->debug()->SetDebugDelegate(this);
- v8::debug::TierDownAllModulesPerIsolate((v8::Isolate*)isolate_);
+ v8::debug::EnterDebuggingForIsolate((v8::Isolate*)isolate_);
v8::debug::ChangeBreakOnException((v8::Isolate*)isolate_,
v8::debug::BreakOnUncaughtException);
}
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index a96c2520fd..8e0580028a 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -116,7 +116,7 @@ std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
FrameSummary::WasmFrameSummary const& wasm = summary.AsWasm();
offset = GetWasmFunctionOffset(wasm.wasm_instance()->module(),
wasm.function_index()) +
- wasm.byte_offset();
+ wasm.code_offset();
script = wasm.script();
bool zeroth_frame = call_stack.empty();
@@ -147,7 +147,7 @@ std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
// static
std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
- StackTraceFrameIterator* frame_it, uint32_t* frame_index) {
+ DebuggableStackFrameIterator* frame_it, uint32_t* frame_index) {
while (!frame_it->done()) {
StackFrame* const frame = frame_it->frame();
switch (frame->type()) {
@@ -189,7 +189,7 @@ std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
// static
Handle<WasmInstanceObject> WasmModuleDebug::GetWasmInstance(
Isolate* isolate, uint32_t frame_index) {
- StackTraceFrameIterator frame_it(isolate);
+ DebuggableStackFrameIterator frame_it(isolate);
std::vector<FrameSummary> frames = FindWasmFrame(&frame_it, &frame_index);
if (frames.empty()) {
return Handle<WasmInstanceObject>::null();
@@ -226,7 +226,7 @@ bool WasmModuleDebug::GetWasmLocal(Isolate* isolate, uint32_t frame_index,
uint32_t buffer_size, uint32_t* size) {
HandleScope handles(isolate);
- StackTraceFrameIterator frame_it(isolate);
+ DebuggableStackFrameIterator frame_it(isolate);
std::vector<FrameSummary> frames = FindWasmFrame(&frame_it, &frame_index);
if (frames.empty()) {
return false;
@@ -259,7 +259,7 @@ bool WasmModuleDebug::GetWasmStackValue(Isolate* isolate, uint32_t frame_index,
uint32_t buffer_size, uint32_t* size) {
HandleScope handles(isolate);
- StackTraceFrameIterator frame_it(isolate);
+ DebuggableStackFrameIterator frame_it(isolate);
std::vector<FrameSummary> frames = FindWasmFrame(&frame_it, &frame_index);
if (frames.empty()) {
return false;
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h
index 726e512a12..81dd1de469 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h
@@ -93,7 +93,7 @@ class WasmModuleDebug {
// Returns an empty array if the frame specified does not correspond to a Wasm
// stack frame.
static std::vector<FrameSummary> FindWasmFrame(
- StackTraceFrameIterator* frame_it, uint32_t* frame_index);
+ DebuggableStackFrameIterator* frame_it, uint32_t* frame_index);
// Converts a WasmValue into an array of bytes.
static bool GetWasmValue(const wasm::WasmValue& wasm_value, uint8_t* buffer,
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index 1eadb8700f..5e686659e0 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -53,9 +53,11 @@ namespace internal {
V(NotAString, "not a String") \
V(NotASymbol, "not a Symbol") \
V(NotInt32, "not int32") \
+ V(NotUint32, "not unsigned int32") \
V(OutOfBounds, "out of bounds") \
V(Overflow, "overflow") \
V(Smi, "Smi") \
+ V(StoreToConstant, "Storing to a constant field") \
V(SuspendGeneratorIsDead, "SuspendGenerator is in a dead branch") \
V(TransitionedToMonomorphicIC, "IC transitioned to monomorphic") \
V(TransitionedToMegamorphicIC, "IC transitioned to megamorphic") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 199a76564a..ee5ee898a0 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -175,22 +175,6 @@ class FrameWriter {
unsigned top_offset_;
};
-Code Deoptimizer::FindDeoptimizingCode(Address addr) {
- if (function_.IsHeapObject()) {
- // Search all deoptimizing code in the native context of the function.
- Isolate* isolate = isolate_;
- NativeContext native_context = function_.native_context();
- Object element = native_context.DeoptimizedCodeListHead();
- while (!element.IsUndefined(isolate)) {
- CodeT code = CodeT::cast(element);
- CHECK(CodeKindCanDeoptimize(code.kind()));
- if (code.contains(isolate, addr)) return FromCodeT(code);
- element = code.next_code_link();
- }
- }
- return Code();
-}
-
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
@@ -245,10 +229,8 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
namespace {
class ActivationsFinder : public ThreadVisitor {
public:
- explicit ActivationsFinder(std::set<CodeT>* codes,
- CodeT topmost_optimized_code,
- bool safe_to_deopt_topmost_optimized_code)
- : codes_(codes) {
+ ActivationsFinder(GcSafeCode topmost_optimized_code,
+ bool safe_to_deopt_topmost_optimized_code) {
#ifdef DEBUG
topmost_ = topmost_optimized_code;
safe_to_deopt_ = safe_to_deopt_topmost_optimized_code;
@@ -261,19 +243,18 @@ class ActivationsFinder : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
if (it.frame()->is_optimized()) {
- CodeT code = it.frame()->LookupCodeT().ToCodeT();
+ GcSafeCode code = it.frame()->GcSafeLookupCode();
if (CodeKindCanDeoptimize(code.kind()) &&
code.marked_for_deoptimization()) {
- codes_->erase(code);
// Obtain the trampoline to the deoptimizer call.
int trampoline_pc;
if (code.is_maglevved()) {
- MaglevSafepointEntry safepoint =
- code.GetMaglevSafepointEntry(isolate, it.frame()->pc());
+ MaglevSafepointEntry safepoint = MaglevSafepointTable::FindEntry(
+ isolate, code, it.frame()->pc());
trampoline_pc = safepoint.trampoline_pc();
} else {
SafepointEntry safepoint =
- code.GetSafepointEntry(isolate, it.frame()->pc());
+ SafepointTable::FindEntry(isolate, code, it.frame()->pc());
trampoline_pc = safepoint.trampoline_pc();
}
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
@@ -282,7 +263,7 @@ class ActivationsFinder : public ThreadVisitor {
// Replace the current pc on the stack with the trampoline.
// TODO(v8:10026): avoid replacing a signed pointer.
Address* pc_addr = it.frame()->pc_address();
- Address new_pc = code.raw_instruction_start() + trampoline_pc;
+ Address new_pc = code.InstructionStart() + trampoline_pc;
PointerAuthentication::ReplacePC(pc_addr, new_pc, kSystemPointerSize);
}
}
@@ -290,23 +271,19 @@ class ActivationsFinder : public ThreadVisitor {
}
private:
- std::set<CodeT>* codes_;
-
#ifdef DEBUG
- CodeT topmost_;
+ GcSafeCode topmost_;
bool safe_to_deopt_;
#endif
};
} // namespace
-// Move marked code from the optimized code list to the deoptimized code list,
-// and replace pc on the stack for codes marked for deoptimization.
+// Replace pc on the stack for codes marked for deoptimization.
// static
-void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
+void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
DisallowGarbageCollection no_gc;
- Isolate* isolate = native_context.GetIsolate();
- CodeT topmost_optimized_code;
+ GcSafeCode topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
// Make sure all activations of optimized code can deopt at their current PC.
@@ -315,18 +292,18 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
it.Advance()) {
if (it.frame()->is_optimized()) {
- CodeT code = it.frame()->LookupCodeT().ToCodeT();
+ GcSafeCode code = it.frame()->GcSafeLookupCode();
JSFunction function =
static_cast<OptimizedFrame*>(it.frame())->function();
TraceFoundActivation(isolate, function);
bool safe_if_deopt_triggered;
if (code.is_maglevved()) {
MaglevSafepointEntry safepoint =
- code.GetMaglevSafepointEntry(isolate, it.frame()->pc());
+ MaglevSafepointTable::FindEntry(isolate, code, it.frame()->pc());
safe_if_deopt_triggered = safepoint.has_deoptimization_index();
} else {
SafepointEntry safepoint =
- code.GetSafepointEntry(isolate, it.frame()->pc());
+ SafepointTable::FindEntry(isolate, code, it.frame()->pc());
safe_if_deopt_triggered = safepoint.has_deoptimization_index();
}
@@ -342,44 +319,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
}
#endif
- // We will use this set to mark those Code objects that are marked for
- // deoptimization and have not been found in stack frames.
- std::set<CodeT> codes;
-
- // Move marked code from the optimized code list to the deoptimized code list.
- // Walk over all optimized code objects in this native context.
- CodeT prev;
- Object element = native_context.OptimizedCodeListHead();
- while (!element.IsUndefined(isolate)) {
- CodeT code = CodeT::cast(element);
- CHECK(CodeKindCanDeoptimize(code.kind()));
- Object next = code.next_code_link();
-
- if (code.marked_for_deoptimization()) {
- codes.insert(code);
-
- CodeTPageHeaderModificationScope rwx_write_scope(
- "Storing a CodeT object triggers marking barrier which requires "
- "write access to the CodeT page header");
- if (!prev.is_null()) {
- // Skip this code in the optimized code list.
- prev.set_next_code_link(next);
- } else {
- // There was no previous node, the next node is the new head.
- native_context.SetOptimizedCodeListHead(next);
- }
-
- // Move the code to the _deoptimized_ code list.
- code.set_next_code_link(native_context.DeoptimizedCodeListHead());
- native_context.SetDeoptimizedCodeListHead(code);
- } else {
- // Not marked; preserve this element.
- prev = code;
- }
- element = next;
- }
-
- ActivationsFinder visitor(&codes, topmost_optimized_code,
+ ActivationsFinder visitor(topmost_optimized_code,
safe_to_deopt_topmost_optimized_code);
// Iterate over the stack of this thread.
visitor.VisitThread(isolate, isolate->thread_local_top());
@@ -387,13 +327,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
// need to consider all the other threads as they may also use
// the code currently beings deoptimized.
isolate->thread_manager()->IterateArchivedThreads(&visitor);
-
- // If there's no activation of a code in any stack then we can remove its
- // deoptimization data. We do this to ensure that code objects that are
- // unlinked don't transitively keep objects alive unnecessarily.
- for (CodeT code : codes) {
- isolate->heap()->InvalidateCodeDeoptimizationData(FromCodeT(code));
- }
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
@@ -402,44 +335,20 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
TraceDeoptAll(isolate);
isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
- DisallowGarbageCollection no_gc;
- // For all contexts, mark all code, then deoptimize.
- Object context = isolate->heap()->native_contexts_list();
- while (!context.IsUndefined(isolate)) {
- NativeContext native_context = NativeContext::cast(context);
- MarkAllCodeForContext(native_context);
- DeoptimizeMarkedCodeForContext(native_context);
- context = native_context.next_context_link();
- }
-}
-void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
- RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
- TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.DeoptimizeCode");
- TraceDeoptMarked(isolate);
- DisallowGarbageCollection no_gc;
- // For all contexts, deoptimize code already marked.
- Object context = isolate->heap()->native_contexts_list();
- while (!context.IsUndefined(isolate)) {
- NativeContext native_context = NativeContext::cast(context);
- DeoptimizeMarkedCodeForContext(native_context);
- context = native_context.next_context_link();
+ // Mark all code, then deoptimize.
+ {
+ Code::OptimizedCodeIterator it(isolate);
+ for (Code code = it.Next(); !code.is_null(); code = it.Next()) {
+ code.set_marked_for_deoptimization(true);
+ }
}
-}
-void Deoptimizer::MarkAllCodeForContext(NativeContext native_context) {
- Object element = native_context.OptimizedCodeListHead();
- Isolate* isolate = native_context.GetIsolate();
- while (!element.IsUndefined(isolate)) {
- CodeT code = CodeT::cast(element);
- CHECK(CodeKindCanDeoptimize(code.kind()));
- code.set_marked_for_deoptimization(true);
- element = code.next_code_link();
- }
+ DeoptimizeMarkedCode(isolate);
}
-void Deoptimizer::DeoptimizeFunction(JSFunction function, CodeT code) {
+// static
+void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
Isolate* isolate = function.GetIsolate();
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
@@ -455,8 +364,35 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, CodeT code) {
// The code in the function's optimized code feedback vector slot might
// be different from the code on the function - evict it if necessary.
function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function.shared(), "unlinking code marked for deopt");
- DeoptimizeMarkedCodeForContext(function.native_context());
+ isolate, function.shared(), "unlinking code marked for deopt");
+
+ DeoptimizeMarkedCode(isolate);
+ }
+}
+
+// static
+void Deoptimizer::DeoptimizeAllOptimizedCodeWithFunction(
+ Isolate* isolate, Handle<SharedFunctionInfo> function) {
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.DeoptimizeAllOptimizedCodeWithFunction");
+
+ // Make sure no new code is compiled with the function.
+ isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
+
+ // Mark all code that inlines this function, then deoptimize.
+ bool any_marked = false;
+ {
+ Code::OptimizedCodeIterator it(isolate);
+ for (Code code = it.Next(); !code.is_null(); code = it.Next()) {
+ if (code.Inlines(*function)) {
+ code.set_marked_for_deoptimization(true);
+ any_marked = true;
+ }
+ }
+ }
+ if (any_marked) {
+ DeoptimizeMarkedCode(isolate);
}
}
@@ -511,8 +447,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
}
DCHECK_NE(from, kNullAddress);
- compiled_code_ = FindOptimizedCode();
+ compiled_code_ = isolate_->heap()->FindCodeForInnerPointer(from);
DCHECK(!compiled_code_.is_null());
+ DCHECK(compiled_code_.IsCode());
DCHECK(function.IsJSFunction());
#ifdef DEBUG
@@ -536,8 +473,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DCHECK_GT(kLazyDeoptExitSize, 0);
DeoptimizationData deopt_data =
DeoptimizationData::cast(compiled_code_.deoptimization_data());
- Address deopt_start = compiled_code_.raw_instruction_start() +
- deopt_data.DeoptExitStart().value();
+ Address deopt_start =
+ compiled_code_.InstructionStart() + deopt_data.DeoptExitStart().value();
int eager_deopt_count = deopt_data.EagerDeoptCount().value();
Address lazy_deopt_start =
deopt_start + eager_deopt_count * kEagerDeoptExitSize;
@@ -562,13 +499,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
}
}
-Code Deoptimizer::FindOptimizedCode() {
- Code compiled_code = FindDeoptimizingCode(from_);
- if (!compiled_code.is_null()) return compiled_code;
- CodeLookupResult lookup_result = isolate_->FindCodeObject(from_);
- return lookup_result.code();
-}
-
Handle<JSFunction> Deoptimizer::function() const {
return Handle<JSFunction>(function_, isolate());
}
@@ -608,45 +538,6 @@ Builtin Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind kind) {
}
}
-bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind* type_out) {
- Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate, addr);
- if (!Builtins::IsBuiltinId(builtin)) return false;
-
- switch (builtin) {
- case Builtin::kDeoptimizationEntry_Eager:
- *type_out = DeoptimizeKind::kEager;
- return true;
- case Builtin::kDeoptimizationEntry_Lazy:
- *type_out = DeoptimizeKind::kLazy;
- return true;
- default:
- return false;
- }
-
- UNREACHABLE();
-}
-
-int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
- int length = 0;
- // Count all entries in the deoptimizing code list of every context.
- Object context = isolate->heap()->native_contexts_list();
- while (!context.IsUndefined(isolate)) {
- NativeContext native_context = NativeContext::cast(context);
- Object element = native_context.DeoptimizedCodeListHead();
- while (!element.IsUndefined(isolate)) {
- CodeT code = CodeT::cast(element);
- DCHECK(CodeKindCanDeoptimize(code.kind()));
- if (!code.marked_for_deoptimization()) {
- length++;
- }
- element = code.next_code_link();
- }
- context = Context::cast(context).next_context_link();
- }
- return length;
-}
-
namespace {
int LookupCatchHandler(Isolate* isolate, TranslatedFrame* translated_frame,
@@ -710,11 +601,11 @@ void Deoptimizer::TraceDeoptEnd(double deopt_duration) {
}
// static
-void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
+void Deoptimizer::TraceMarkForDeoptimization(Isolate* isolate, Code code,
+ const char* reason) {
if (!v8_flags.trace_deopt && !v8_flags.log_deopt) return;
DisallowGarbageCollection no_gc;
- Isolate* isolate = code.GetIsolate();
Object maybe_data = code.deoptimization_data();
if (maybe_data == ReadOnlyRoots(isolate).empty_fixed_array()) return;
@@ -743,12 +634,13 @@ void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
}
// static
-void Deoptimizer::TraceEvictFromOptimizedCodeCache(SharedFunctionInfo sfi,
+void Deoptimizer::TraceEvictFromOptimizedCodeCache(Isolate* isolate,
+ SharedFunctionInfo sfi,
const char* reason) {
if (!v8_flags.trace_deopt_verbose) return;
DisallowGarbageCollection no_gc;
- CodeTracer::Scope scope(sfi.GetIsolate()->GetCodeTracer());
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(),
"[evicting optimized code marked for deoptimization (%s) for ",
reason);
@@ -774,19 +666,12 @@ void Deoptimizer::TraceDeoptAll(Isolate* isolate) {
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
}
-// static
-void Deoptimizer::TraceDeoptMarked(Isolate* isolate) {
- if (!v8_flags.trace_deopt_verbose) return;
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
-}
-
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
// When we call this function, the return address of the previous frame has
// been removed from the stack by the DeoptimizationEntry builtin, so the
- // stack is not iterable by the SafeStackFrameIterator.
+ // stack is not iterable by the StackFrameIteratorForProfiler.
#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
#endif
@@ -922,7 +807,7 @@ void Deoptimizer::DoComputeOutputFrames() {
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
topmost->GetRegisterValues()->SetRegister(kPtrComprCageBaseRegister.code(),
isolate()->cage_base());
#endif
@@ -1021,7 +906,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const bool deopt_to_baseline =
shared.HasBaselineCode() && v8_flags.deopt_to_baseline;
const bool restart_frame = goto_catch_handler && is_restart_frame();
- CodeT dispatch_builtin = builtins->code(
+ Code dispatch_builtin = builtins->code(
DispatchBuiltinFor(deopt_to_baseline, advance_bc, restart_frame));
if (verbose_tracing_enabled()) {
@@ -1262,7 +1147,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
// Set the continuation for the topmost frame.
- CodeT continuation = builtins->code(Builtin::kNotifyDeoptimized);
+ Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@@ -1295,7 +1180,7 @@ void Deoptimizer::DoComputeInlinedExtraArguments(
(std::max(0, extra_argument_count) + padding) * kSystemPointerSize;
if (verbose_tracing_enabled()) {
PrintF(trace_scope_->file(),
- " translating arguments adaptor => variable_size=%d\n",
+ " translating inlined arguments frame => variable_size=%d\n",
output_frame_size);
}
@@ -1342,7 +1227,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
Builtins* builtins = isolate_->builtins();
- CodeT construct_stub = builtins->code(Builtin::kJSConstructStubGeneric);
+ Code construct_stub = builtins->code(Builtin::kJSConstructStubGeneric);
BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
const int parameters_count = translated_frame->height();
@@ -1496,7 +1381,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
if (is_topmost) {
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
- CodeT continuation = builtins->code(Builtin::kNotifyDeoptimized);
+ Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@@ -1920,7 +1805,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// For JSToWasmBuiltinContinuations use ContinueToCodeStubBuiltin, and not
// ContinueToCodeStubBuiltinWithResult because we don't want to overwrite the
// return value that we have already set.
- CodeT continue_to_builtin =
+ Code continue_to_builtin =
isolate()->builtins()->code(TrampolineForBuiltinContinuation(
mode, frame_info.frame_has_result_stack_slot() &&
!is_js_to_wasm_builtin_continuation));
@@ -1937,7 +1822,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
static_cast<intptr_t>(continue_to_builtin.InstructionStart()));
}
- CodeT continuation = isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
+ Code continuation = isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation.InstructionStart()));
}
@@ -2000,6 +1885,36 @@ unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
return fixed_size;
}
+namespace {
+
+// Get the actual deopt call PC from the return address of the deopt, which
+// points to immediately after the deopt call).
+//
+// See also the Deoptimizer constructor.
+Address GetDeoptCallPCFromReturnPC(Address return_pc, Code code) {
+ DCHECK_GT(Deoptimizer::kEagerDeoptExitSize, 0);
+ DCHECK_GT(Deoptimizer::kLazyDeoptExitSize, 0);
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code.deoptimization_data());
+ Address deopt_start =
+ code.InstructionStart() + deopt_data.DeoptExitStart().value();
+ int eager_deopt_count = deopt_data.EagerDeoptCount().value();
+ Address lazy_deopt_start =
+ deopt_start + eager_deopt_count * Deoptimizer::kEagerDeoptExitSize;
+ // The deoptimization exits are sorted so that lazy deopt exits appear
+ // after eager deopts.
+ static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
+ static_cast<int>(kLastDeoptimizeKind),
+ "lazy deopts are expected to be emitted last");
+ if (return_pc <= lazy_deopt_start) {
+ return return_pc - Deoptimizer::kEagerDeoptExitSize;
+ } else {
+ return return_pc - Deoptimizer::kLazyDeoptExitSize;
+ }
+}
+
+} // namespace
+
unsigned Deoptimizer::ComputeInputFrameSize() const {
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
@@ -2007,10 +1922,31 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
DCHECK(CodeKindCanDeoptimize(compiled_code_.kind()));
unsigned stack_slots = compiled_code_.stack_slots();
- unsigned outgoing_size = 0;
- CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
- CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
- result);
+ if (compiled_code_.is_maglevved()) {
+ // Maglev code can deopt in deferred code which has spilled registers across
+ // the call. These will be included in the fp_to_sp_delta, but the expected
+ // frame size won't include them, so we need to check for less-equal rather
+ // than equal.
+ CHECK_LE(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
+ CommonFrameConstants::kFixedFrameSizeAboveFp,
+ result);
+ // With slow asserts we can check this exactly, by looking up the safepoint.
+ if (v8_flags.enable_slow_asserts) {
+ Address deopt_call_pc = GetDeoptCallPCFromReturnPC(from_, compiled_code_);
+ MaglevSafepointTable table(isolate_, deopt_call_pc, compiled_code_);
+ MaglevSafepointEntry safepoint = table.FindEntry(deopt_call_pc);
+ unsigned extra_spills = safepoint.num_pushed_registers();
+ CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
+ CommonFrameConstants::kFixedFrameSizeAboveFp +
+ extra_spills * kSystemPointerSize,
+ result);
+ }
+ } else {
+ unsigned outgoing_size = 0;
+ CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
+ CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
+ result);
+ }
return result;
}
@@ -2051,14 +1987,5 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
return DeoptInfo(last_position, last_reason, last_node_id, last_deopt_id);
}
-// static
-int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
- Isolate* isolate, SharedFunctionInfo shared,
- BytecodeOffset bytecode_offset) {
- DCHECK(shared.HasBytecodeArray());
- return AbstractCode::cast(shared.GetBytecodeArray(isolate))
- .SourcePosition(isolate, bytecode_offset.ToInt());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 1de0a4e6e7..02eec6d87b 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -48,10 +48,6 @@ class Deoptimizer : public Malloced {
return Deoptimizer::GetDeoptInfo(compiled_code_, from_);
}
- static int ComputeSourcePositionFromBytecodeArray(
- Isolate* isolate, SharedFunctionInfo shared,
- BytecodeOffset bytecode_offset);
-
static const char* MessageFor(DeoptimizeKind kind);
Handle<JSFunction> function() const;
@@ -79,7 +75,7 @@ class Deoptimizer : public Malloced {
// again and any activations of the optimized code will get deoptimized when
// execution returns. If {code} is specified then the given code is targeted
// instead of the function code (e.g. OSR code not installed on function).
- static void DeoptimizeFunction(JSFunction function, CodeT code = {});
+ static void DeoptimizeFunction(JSFunction function, Code code = {});
// Deoptimize all code in the given isolate.
V8_EXPORT_PRIVATE static void DeoptimizeAll(Isolate* isolate);
@@ -89,6 +85,11 @@ class Deoptimizer : public Malloced {
// refer to that code.
static void DeoptimizeMarkedCode(Isolate* isolate);
+ // Deoptimizes all optimized code that implements the given function (whether
+ // directly or inlined).
+ static void DeoptimizeAllOptimizedCodeWithFunction(
+ Isolate* isolate, Handle<SharedFunctionInfo> function);
+
// Check the given address against a list of allowed addresses, to prevent a
// potential attacker from using the frame creation process in the
// deoptimizer, in particular the signing process, to gain control over the
@@ -104,12 +105,7 @@ class Deoptimizer : public Malloced {
V8_EXPORT_PRIVATE static Builtin GetDeoptimizationEntry(DeoptimizeKind kind);
- // Returns true if {addr} is a deoptimization entry and stores its type in
- // {type_out}. Returns false if {addr} is not a deoptimization entry.
- static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind* type_out);
-
- // Code generation support.
+ // InstructionStream generation support.
static int input_offset() { return offsetof(Deoptimizer, input_); }
static int output_count_offset() {
return offsetof(Deoptimizer, output_count_);
@@ -120,8 +116,6 @@ class Deoptimizer : public Malloced {
return offsetof(Deoptimizer, caller_frame_top_);
}
- V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate);
-
Isolate* isolate() const { return isolate_; }
static constexpr int kMaxNumberOfEntries = 16384;
@@ -136,8 +130,10 @@ class Deoptimizer : public Malloced {
V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
// Tracing.
- static void TraceMarkForDeoptimization(Code code, const char* reason);
- static void TraceEvictFromOptimizedCodeCache(SharedFunctionInfo sfi,
+ static void TraceMarkForDeoptimization(Isolate* isolate, Code code,
+ const char* reason);
+ static void TraceEvictFromOptimizedCodeCache(Isolate* isolate,
+ SharedFunctionInfo sfi,
const char* reason);
private:
@@ -146,7 +142,6 @@ class Deoptimizer : public Malloced {
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
Address from, int fp_to_sp_delta);
- Code FindOptimizedCode();
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@@ -174,13 +169,6 @@ class Deoptimizer : public Malloced {
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
- static void MarkAllCodeForContext(NativeContext native_context);
- static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
- // Searches the list of known deoptimizing code for a Code object
- // containing the given address (which is supposedly faster than
- // searching all code objects).
- Code FindDeoptimizingCode(Address addr);
-
// Tracing.
bool tracing_enabled() const { return trace_scope_ != nullptr; }
bool verbose_tracing_enabled() const {
@@ -196,7 +184,6 @@ class Deoptimizer : public Malloced {
static void TraceFoundActivation(Isolate* isolate, JSFunction function);
#endif
static void TraceDeoptAll(Isolate* isolate);
- static void TraceDeoptMarked(Isolate* isolate);
bool is_restart_frame() const { return restart_frame_index_ >= 0; }
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
index 2bd0f2394e..a9c6693ca9 100644
--- a/deps/v8/src/deoptimizer/translated-state.cc
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/memory.h"
+#include "src/common/assert-scope.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/deoptimizer/materialized-object-store.h"
#include "src/deoptimizer/translation-opcode.h"
@@ -17,10 +18,12 @@
#include "src/numbers/conversions.h"
#include "src/objects/arguments.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/heap-object.h"
#include "src/objects/oddball.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
+#include "src/objects/string.h"
namespace v8 {
@@ -36,32 +39,41 @@ void TranslationArrayPrintSingleFrame(
TranslationArrayIterator iterator(translation_array, translation_index);
disasm::NameConverter converter;
- TranslationOpcode opcode = TranslationOpcodeFromInt(iterator.NextUnsigned());
- DCHECK_EQ(TranslationOpcode::BEGIN, opcode);
- int frame_count = iterator.Next();
- int jsframe_count = iterator.Next();
- int update_feedback_count = iterator.Next();
+ TranslationOpcode opcode = iterator.NextOpcode();
+ DCHECK(TranslationOpcodeIsBegin(opcode));
+ iterator.NextOperand(); // Skip the lookback distance.
+ int frame_count = iterator.NextOperand();
+ int jsframe_count = iterator.NextOperand();
os << " " << TranslationOpcodeToString(opcode)
<< " {frame count=" << frame_count << ", js frame count=" << jsframe_count
- << ", update_feedback_count=" << update_feedback_count << "}\n";
+ << "}\n";
- while (iterator.HasNext()) {
- opcode = TranslationOpcodeFromInt(iterator.NextUnsigned());
- if (opcode == TranslationOpcode::BEGIN) break;
+ while (iterator.HasNextOpcode()) {
+ opcode = iterator.NextOpcode();
+ if (TranslationOpcodeIsBegin(opcode)) break;
os << std::setw(31) << " " << TranslationOpcodeToString(opcode) << " ";
switch (opcode) {
- case TranslationOpcode::BEGIN:
+ case TranslationOpcode::BEGIN_WITH_FEEDBACK:
+ case TranslationOpcode::BEGIN_WITHOUT_FEEDBACK:
+ case TranslationOpcode::MATCH_PREVIOUS_TRANSLATION:
UNREACHABLE();
- case TranslationOpcode::INTERPRETED_FRAME: {
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 5);
- int bytecode_offset = iterator.Next();
- int shared_info_id = iterator.Next();
- unsigned height = iterator.Next();
- int return_value_offset = iterator.Next();
- int return_value_count = iterator.Next();
+ case TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN:
+ case TranslationOpcode::INTERPRETED_FRAME_WITHOUT_RETURN: {
+ int bytecode_offset = iterator.NextOperand();
+ int shared_info_id = iterator.NextOperand();
+ unsigned height = iterator.NextOperand();
+ int return_value_offset = 0;
+ int return_value_count = 0;
+ if (opcode == TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN) {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 5);
+ return_value_offset = iterator.NextOperand();
+ return_value_count = iterator.NextOperand();
+ } else {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ }
Object shared_info = literal_array.get(shared_info_id);
os << "{bytecode_offset=" << bytecode_offset << ", function="
<< SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
@@ -72,10 +84,10 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::CONSTRUCT_STUB_FRAME: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
+ int bailout_id = iterator.NextOperand();
+ int shared_info_id = iterator.NextOperand();
Object shared_info = literal_array.get(shared_info_id);
- unsigned height = iterator.Next();
+ unsigned height = iterator.NextOperand();
os << "{bailout_id=" << bailout_id << ", function="
<< SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
<< ", height=" << height << "}";
@@ -87,10 +99,10 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::
JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
+ int bailout_id = iterator.NextOperand();
+ int shared_info_id = iterator.NextOperand();
Object shared_info = literal_array.get(shared_info_id);
- unsigned height = iterator.Next();
+ unsigned height = iterator.NextOperand();
os << "{bailout_id=" << bailout_id << ", function="
<< SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
<< ", height=" << height << "}";
@@ -100,11 +112,11 @@ void TranslationArrayPrintSingleFrame(
#if V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
+ int bailout_id = iterator.NextOperand();
+ int shared_info_id = iterator.NextOperand();
Object shared_info = literal_array.get(shared_info_id);
- unsigned height = iterator.Next();
- int wasm_return_type = iterator.Next();
+ unsigned height = iterator.NextOperand();
+ int wasm_return_type = iterator.NextOperand();
os << "{bailout_id=" << bailout_id << ", function="
<< SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
<< ", height=" << height << ", wasm_return_type=" << wasm_return_type
@@ -115,9 +127,9 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::INLINED_EXTRA_ARGUMENTS: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
- int shared_info_id = iterator.Next();
+ int shared_info_id = iterator.NextOperand();
Object shared_info = literal_array.get(shared_info_id);
- unsigned height = iterator.Next();
+ unsigned height = iterator.NextOperand();
os << "{function="
<< SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
<< ", height=" << height << "}";
@@ -126,28 +138,28 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
break;
}
case TranslationOpcode::INT32_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code) << " (int32)}";
break;
}
case TranslationOpcode::INT64_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code) << " (int64)}";
break;
}
case TranslationOpcode::SIGNED_BIGINT64_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code)
<< " (signed bigint64)}";
break;
@@ -155,7 +167,7 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::UNSIGNED_BIGINT64_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code)
<< " (unsigned bigint64)}";
break;
@@ -163,7 +175,7 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::UINT32_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code)
<< " (uint32)}";
break;
@@ -171,70 +183,70 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::BOOL_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << converter.NameOfCPURegister(reg_code) << " (bool)}";
break;
}
case TranslationOpcode::FLOAT_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << FloatRegister::from_code(reg_code) << "}";
break;
}
case TranslationOpcode::DOUBLE_REGISTER: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int reg_code = iterator.NextUnsigned();
+ int reg_code = iterator.NextOperandUnsigned();
os << "{input=" << DoubleRegister::from_code(reg_code) << "}";
break;
}
case TranslationOpcode::STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << "}";
break;
}
case TranslationOpcode::INT32_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << " (int32)}";
break;
}
case TranslationOpcode::INT64_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << " (int64)}";
break;
}
case TranslationOpcode::SIGNED_BIGINT64_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << " (signed bigint64)}";
break;
}
case TranslationOpcode::UNSIGNED_BIGINT64_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << " (unsigned bigint64)}";
break;
}
case TranslationOpcode::UINT32_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << " (uint32)}";
break;
}
case TranslationOpcode::BOOL_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << " (bool)}";
break;
}
@@ -242,7 +254,7 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::FLOAT_STACK_SLOT:
case TranslationOpcode::DOUBLE_STACK_SLOT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int input_slot_index = iterator.Next();
+ int input_slot_index = iterator.NextOperand();
os << "{input=" << input_slot_index << "}";
break;
}
@@ -255,7 +267,7 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::LITERAL: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int literal_index = iterator.Next();
+ int literal_index = iterator.NextOperand();
Object literal_value = literal_array.get(literal_index);
os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
<< ")}";
@@ -264,7 +276,7 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::DUPLICATED_OBJECT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int object_index = iterator.Next();
+ int object_index = iterator.NextOperand();
os << "{object_index=" << object_index << "}";
break;
}
@@ -272,7 +284,7 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::ARGUMENTS_ELEMENTS: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
CreateArgumentsType arguments_type =
- static_cast<CreateArgumentsType>(iterator.Next());
+ static_cast<CreateArgumentsType>(iterator.NextOperand());
os << "{arguments_type=" << arguments_type << "}";
break;
}
@@ -284,15 +296,15 @@ void TranslationArrayPrintSingleFrame(
case TranslationOpcode::CAPTURED_OBJECT: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
- int args_length = iterator.Next();
+ int args_length = iterator.NextOperand();
os << "{length=" << args_length << "}";
break;
}
case TranslationOpcode::UPDATE_FEEDBACK: {
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
- int literal_index = iterator.Next();
- FeedbackSlot slot(iterator.Next());
+ int literal_index = iterator.NextOperand();
+ FeedbackSlot slot(iterator.NextOperand());
os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
<< "}}";
break;
@@ -456,8 +468,55 @@ Object TranslatedValue::GetRawValue() const {
// Otherwise, do a best effort to get the value without allocation.
switch (kind()) {
- case kTagged:
- return raw_literal();
+ case kTagged: {
+ Object object = raw_literal();
+ if (object.IsSlicedString()) {
+ // If {object} is a sliced string of length smaller than
+ // SlicedString::kMinLength, then trim the underlying SeqString and
+ // return it. This assumes that such sliced strings are only built by
+ // the fast string builder optimization of Turbofan's
+ // StringBuilderOptimizer/EffectControlLinearizer.
+ SlicedString string = SlicedString::cast(object);
+ if (string.length() < SlicedString::kMinLength) {
+ String backing_store = string.parent();
+ CHECK(backing_store.IsSeqString());
+
+ // Creating filler at the end of the backing store if needed.
+ int string_size =
+ backing_store.IsSeqOneByteString()
+ ? SeqOneByteString::SizeFor(backing_store.length())
+ : SeqTwoByteString::SizeFor(backing_store.length());
+ int needed_size = backing_store.IsSeqOneByteString()
+ ? SeqOneByteString::SizeFor(string.length())
+ : SeqTwoByteString::SizeFor(string.length());
+ if (needed_size < string_size) {
+ Address new_end = backing_store.address() + needed_size;
+ isolate()->heap()->CreateFillerObjectAt(
+ new_end, (string_size - needed_size));
+ }
+
+ // Updating backing store's length, effectively trimming it.
+ backing_store.set_length(string.length());
+
+ // Zeroing the padding bytes of {backing_store}.
+ SeqString::DataAndPaddingSizes sz =
+ SeqString::cast(backing_store).GetDataAndPaddingSizes();
+ auto padding =
+ reinterpret_cast<char*>(backing_store.address() + sz.data_size);
+ for (int i = 0; i < sz.padding_size; ++i) {
+ padding[i] = 0;
+ }
+
+ // Overwriting {string} with a filler, so that we don't leave around a
+ // potentially-too-small SlicedString.
+ isolate()->heap()->CreateFillerObjectAt(string.address(),
+ SlicedString::kSize);
+
+ return backing_store;
+ }
+ }
+ return object;
+ }
case kInt32: {
bool is_smi = Smi::IsValid(int32_value());
@@ -767,10 +826,9 @@ int TranslatedFrame::GetValueCount() {
UNREACHABLE();
}
-void TranslatedFrame::Handlify() {
+void TranslatedFrame::Handlify(Isolate* isolate) {
if (!raw_shared_info_.is_null()) {
- shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_,
- raw_shared_info_.GetIsolate());
+ shared_info_ = handle(raw_shared_info_, isolate);
raw_shared_info_ = SharedFunctionInfo();
}
for (auto& value : values_) {
@@ -781,15 +839,20 @@ void TranslatedFrame::Handlify() {
TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
TranslationArrayIterator* iterator,
DeoptimizationLiteralArray literal_array, Address fp, FILE* trace_file) {
- TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->NextUnsigned());
+ TranslationOpcode opcode = iterator->NextOpcode();
switch (opcode) {
- case TranslationOpcode::INTERPRETED_FRAME: {
- BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ case TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN:
+ case TranslationOpcode::INTERPRETED_FRAME_WITHOUT_RETURN: {
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->NextOperand());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- int return_value_offset = iterator->Next();
- int return_value_count = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
+ int return_value_offset = 0;
+ int return_value_count = 0;
+ if (opcode == TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN) {
+ return_value_offset = iterator->NextOperand();
+ return_value_count = iterator->NextOperand();
+ }
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading input frame %s", name.get());
@@ -808,21 +871,21 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case TranslationOpcode::INLINED_EXTRA_ARGUMENTS: {
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
+ PrintF(trace_file, " reading inlined arguments frame %s", name.get());
PrintF(trace_file, " => height=%d; inputs:\n", height);
}
return TranslatedFrame::InlinedExtraArguments(shared_info, height);
}
case TranslationOpcode::CONSTRUCT_STUB_FRAME: {
- BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->NextOperand());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading construct stub frame %s", name.get());
@@ -834,10 +897,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
}
case TranslationOpcode::BUILTIN_CONTINUATION_FRAME: {
- BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->NextOperand());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading builtin continuation frame %s",
@@ -851,11 +914,11 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
#if V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME: {
- BytecodeOffset bailout_id = BytecodeOffset(iterator->Next());
+ BytecodeOffset bailout_id = BytecodeOffset(iterator->NextOperand());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- int return_kind_code = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
+ int return_kind_code = iterator->NextOperand();
base::Optional<wasm::ValueKind> return_kind;
if (return_kind_code != kNoWasmReturnKind) {
return_kind = static_cast<wasm::ValueKind>(return_kind_code);
@@ -875,10 +938,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
#endif // V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
- BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->NextOperand());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading JavaScript builtin continuation frame %s",
@@ -891,10 +954,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
}
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
- BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->NextOperand());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
+ SharedFunctionInfo::cast(literal_array.get(iterator->NextOperand()));
+ int height = iterator->NextOperand();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file,
@@ -907,7 +970,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
bytecode_offset, shared_info, height);
}
case TranslationOpcode::UPDATE_FEEDBACK:
- case TranslationOpcode::BEGIN:
+ case TranslationOpcode::BEGIN_WITH_FEEDBACK:
+ case TranslationOpcode::BEGIN_WITHOUT_FEEDBACK:
case TranslationOpcode::DUPLICATED_OBJECT:
case TranslationOpcode::ARGUMENTS_ELEMENTS:
case TranslationOpcode::ARGUMENTS_LENGTH:
@@ -932,6 +996,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case TranslationOpcode::DOUBLE_STACK_SLOT:
case TranslationOpcode::LITERAL:
case TranslationOpcode::OPTIMIZED_OUT:
+ case TranslationOpcode::MATCH_PREVIOUS_TRANSLATION:
break;
}
UNREACHABLE();
@@ -1025,10 +1090,12 @@ int TranslatedState::CreateNextTranslatedValue(
TranslatedFrame& frame = frames_[frame_index];
int value_index = static_cast<int>(frame.values_.size());
- TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->NextUnsigned());
+ TranslationOpcode opcode = iterator->NextOpcode();
switch (opcode) {
- case TranslationOpcode::BEGIN:
- case TranslationOpcode::INTERPRETED_FRAME:
+ case TranslationOpcode::BEGIN_WITH_FEEDBACK:
+ case TranslationOpcode::BEGIN_WITHOUT_FEEDBACK:
+ case TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN:
+ case TranslationOpcode::INTERPRETED_FRAME_WITHOUT_RETURN:
case TranslationOpcode::INLINED_EXTRA_ARGUMENTS:
case TranslationOpcode::CONSTRUCT_STUB_FRAME:
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
@@ -1038,11 +1105,12 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME:
#endif // V8_ENABLE_WEBASSEMBLY
case TranslationOpcode::UPDATE_FEEDBACK:
+ case TranslationOpcode::MATCH_PREVIOUS_TRANSLATION:
// Peeled off before getting here.
break;
case TranslationOpcode::DUPLICATED_OBJECT: {
- int object_id = iterator->Next();
+ int object_id = iterator->NextOperand();
if (trace_file != nullptr) {
PrintF(trace_file, "duplicated object #%d", object_id);
}
@@ -1055,7 +1123,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::ARGUMENTS_ELEMENTS: {
CreateArgumentsType arguments_type =
- static_cast<CreateArgumentsType>(iterator->Next());
+ static_cast<CreateArgumentsType>(iterator->NextOperand());
CreateArgumentsElementsTranslatedValues(frame_index, fp, arguments_type,
trace_file);
return 0;
@@ -1071,7 +1139,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::CAPTURED_OBJECT: {
- int field_count = iterator->Next();
+ int field_count = iterator->NextOperand();
int object_index = static_cast<int>(object_positions_.size());
if (trace_file != nullptr) {
PrintF(trace_file, "captured object #%d (length = %d)", object_index,
@@ -1085,7 +1153,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1105,7 +1173,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::INT32_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1123,7 +1191,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::INT64_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1141,7 +1209,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::SIGNED_BIGINT64_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1159,7 +1227,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::UNSIGNED_BIGINT64_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1177,7 +1245,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::UINT32_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1195,7 +1263,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::BOOL_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1213,7 +1281,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::FLOAT_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1230,7 +1298,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::DOUBLE_REGISTER: {
- int input_reg = iterator->NextUnsigned();
+ int input_reg = iterator->NextOperandUnsigned();
if (registers == nullptr) {
TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
frame.Add(translated_value);
@@ -1249,7 +1317,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
Address uncompressed_value = DecompressIfNeeded(value);
if (trace_file != nullptr) {
@@ -1266,7 +1334,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::INT32_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%d ; (int32) [fp %c %3d] ",
@@ -1280,7 +1348,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::INT64_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
uint64_t value = GetUInt64Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%" V8PRIdPTR " ; (int64) [fp %c %3d] ",
@@ -1294,7 +1362,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::SIGNED_BIGINT64_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
uint64_t value = GetUInt64Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%" V8PRIdPTR " ; (signed bigint64) [fp %c %3d] ",
@@ -1309,7 +1377,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::UNSIGNED_BIGINT64_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
uint64_t value = GetUInt64Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%" V8PRIdPTR " ; (unsigned bigint64) [fp %c %3d] ",
@@ -1324,7 +1392,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::UINT32_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%u ; (uint32) [fp %c %3d] ", value,
@@ -1338,7 +1406,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::BOOL_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%u ; (bool) [fp %c %3d] ", value,
@@ -1351,7 +1419,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::FLOAT_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
Float32 value = GetFloatSlot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%e ; (float) [fp %c %3d] ", value.get_scalar(),
@@ -1364,7 +1432,7 @@ int TranslatedState::CreateNextTranslatedValue(
case TranslationOpcode::DOUBLE_STACK_SLOT: {
int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->NextOperand());
Float64 value = GetDoubleSlot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(),
@@ -1377,7 +1445,7 @@ int TranslatedState::CreateNextTranslatedValue(
}
case TranslationOpcode::LITERAL: {
- int literal_index = iterator->Next();
+ int literal_index = iterator->NextOperand();
Object value = literal_array.get(literal_index);
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value.ptr(),
@@ -1408,7 +1476,7 @@ int TranslatedState::CreateNextTranslatedValue(
Address TranslatedState::DecompressIfNeeded(intptr_t value) {
if (COMPRESS_POINTERS_BOOL) {
- return V8HeapCompressionScheme::DecompressTaggedAny(
+ return V8HeapCompressionScheme::DecompressTagged(
isolate(), static_cast<uint32_t>(value));
} else {
return value;
@@ -1448,17 +1516,14 @@ void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
isolate_ = isolate;
// Read out the 'header' translation.
- TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->NextUnsigned());
- CHECK_EQ(opcode, TranslationOpcode::BEGIN);
-
- int count = iterator->Next();
+ TranslationOpcode opcode = iterator->NextOpcode();
+ CHECK(TranslationOpcodeIsBegin(opcode));
+ iterator->NextOperand(); // Skip the lookback distance.
+ int count = iterator->NextOperand();
frames_.reserve(count);
- iterator->Next(); // Drop JS frames count.
- int update_feedback_count = iterator->Next();
- CHECK_GE(update_feedback_count, 0);
- CHECK_LE(update_feedback_count, 1);
+ iterator->NextOperand(); // Drop JS frames count.
- if (update_feedback_count == 1) {
+ if (opcode == TranslationOpcode::BEGIN_WITH_FEEDBACK) {
ReadUpdateFeedback(iterator, literal_array, trace_file);
}
@@ -1510,17 +1575,17 @@ void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
}
}
- CHECK(!iterator->HasNext() ||
- TranslationOpcodeFromInt(iterator->NextUnsigned()) ==
- TranslationOpcode::BEGIN);
+ CHECK(!iterator->HasNextOpcode() ||
+ TranslationOpcodeIsBegin(iterator->NextOpcode()));
}
void TranslatedState::Prepare(Address stack_frame_pointer) {
- for (auto& frame : frames_) frame.Handlify();
+ for (auto& frame : frames_) {
+ frame.Handlify(isolate());
+ }
if (!feedback_vector_.is_null()) {
- feedback_vector_handle_ =
- Handle<FeedbackVector>(feedback_vector_, isolate());
+ feedback_vector_handle_ = handle(feedback_vector_, isolate());
feedback_vector_ = FeedbackVector();
}
stack_frame_pointer_ = stack_frame_pointer;
@@ -1901,17 +1966,20 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
properties_slot->mark_allocated();
properties_slot->set_storage(object_storage);
+ DisallowGarbageCollection no_gc;
+ auto raw_map = *map;
+ auto raw_object_storage = *object_storage;
+
// Set markers for out-of-object properties.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate()),
- isolate());
+ DescriptorArray descriptors = map->instance_descriptors(isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Representation representation = descriptors->GetDetails(i).representation();
+ FieldIndex index = FieldIndex::ForDescriptor(raw_map, i);
+ Representation representation = descriptors.GetDetails(i).representation();
if (!index.is_inobject() &&
(representation.IsDouble() || representation.IsHeapObject())) {
int outobject_index = index.outobject_array_index();
int array_index = outobject_index * kTaggedSize;
- object_storage->set(array_index, kStoreHeapObject);
+ raw_object_storage.set(array_index, kStoreHeapObject);
}
}
}
@@ -1923,8 +1991,10 @@ Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
// does not visit them.
Handle<ByteArray> object_storage =
isolate()->factory()->NewByteArray(allocate_size, AllocationType::kOld);
+ DisallowGarbageCollection no_gc;
+ auto raw_object_storage = *object_storage;
for (int i = 0; i < object_storage->length(); i++) {
- object_storage->set(i, kStoreTagged);
+ raw_object_storage.set(i, kStoreTagged);
}
return object_storage;
}
@@ -1935,19 +2005,22 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize);
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
+
// Now we handle the interesting (JSObject) case.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate()),
- isolate());
+ DisallowGarbageCollection no_gc;
+ auto raw_map = *map;
+ auto raw_object_storage = *object_storage;
+ DescriptorArray descriptors = map->instance_descriptors(isolate());
// Set markers for in-object properties.
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Representation representation = descriptors->GetDetails(i).representation();
+ for (InternalIndex i : raw_map.IterateOwnDescriptors()) {
+ FieldIndex index = FieldIndex::ForDescriptor(raw_map, i);
+ Representation representation = descriptors.GetDetails(i).representation();
if (index.is_inobject() &&
(representation.IsDouble() || representation.IsHeapObject())) {
CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize);
int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize;
- object_storage->set(array_index, kStoreHeapObject);
+ raw_object_storage.set(array_index, kStoreHeapObject);
}
}
slot->set_storage(object_storage);
@@ -2213,8 +2286,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
previously_materialized_objects);
CHECK_EQ(frames_[0].kind(), TranslatedFrame::kUnoptimizedFunction);
CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
- Deoptimizer::DeoptimizeFunction(frame->function(),
- frame->LookupCodeT().ToCodeT());
+ Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
}
}
@@ -2253,6 +2325,7 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
void TranslatedState::VerifyMaterializedObjects() {
#if VERIFY_HEAP
+ if (!v8_flags.verify_heap) return;
int length = static_cast<int>(object_positions_.size());
for (int i = 0; i < length; i++) {
TranslatedValue* slot = GetValueByObjectIndex(i);
@@ -2283,10 +2356,10 @@ bool TranslatedState::DoUpdateFeedback() {
void TranslatedState::ReadUpdateFeedback(
TranslationArrayIterator* iterator,
DeoptimizationLiteralArray literal_array, FILE* trace_file) {
- CHECK_EQ(TranslationOpcode::UPDATE_FEEDBACK,
- TranslationOpcodeFromInt(iterator->NextUnsigned()));
- feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next()));
- feedback_slot_ = FeedbackSlot(iterator->Next());
+ CHECK_EQ(TranslationOpcode::UPDATE_FEEDBACK, iterator->NextOpcode());
+ feedback_vector_ =
+ FeedbackVector::cast(literal_array.get(iterator->NextOperand()));
+ feedback_slot_ = FeedbackSlot(iterator->NextOperand());
if (trace_file != nullptr) {
PrintF(trace_file, " reading FeedbackVector (slot %d)\n",
feedback_slot_.ToInt());
diff --git a/deps/v8/src/deoptimizer/translated-state.h b/deps/v8/src/deoptimizer/translated-state.h
index cac57ec832..38230029df 100644
--- a/deps/v8/src/deoptimizer/translated-state.h
+++ b/deps/v8/src/deoptimizer/translated-state.h
@@ -317,7 +317,7 @@ class TranslatedFrame {
void Add(const TranslatedValue& value) { values_.push_back(value); }
TranslatedValue* ValueAt(int index) { return &(values_[index]); }
- void Handlify();
+ void Handlify(Isolate* isolate);
Kind kind_;
BytecodeOffset bytecode_offset_;
diff --git a/deps/v8/src/deoptimizer/translation-array.cc b/deps/v8/src/deoptimizer/translation-array.cc
index 2a8b3dd2bb..72f1004ddb 100644
--- a/deps/v8/src/deoptimizer/translation-array.cc
+++ b/deps/v8/src/deoptimizer/translation-array.cc
@@ -52,11 +52,20 @@ TranslationArrayIterator::TranslationArrayIterator(TranslationArray buffer,
#endif // V8_USE_ZLIB
DCHECK(!v8_flags.turbo_compress_translation_arrays);
DCHECK(index >= 0 && index < buffer.length());
+ // Starting at a location other than a BEGIN would make
+ // MATCH_PREVIOUS_TRANSLATION instructions not work.
+ DCHECK(TranslationOpcodeIsBegin(
+ static_cast<TranslationOpcode>(buffer_.GetDataStartAddress()[index])));
}
-int32_t TranslationArrayIterator::Next() {
+int32_t TranslationArrayIterator::NextOperand() {
if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
return uncompressed_contents_[index_++];
+ } else if (remaining_ops_to_use_from_previous_translation_) {
+ int32_t value =
+ base::VLQDecode(buffer_.GetDataStartAddress(), &previous_index_);
+ DCHECK_LT(previous_index_, index_);
+ return value;
} else {
int32_t value = base::VLQDecode(buffer_.GetDataStartAddress(), &index_);
DCHECK_LE(index_, buffer_.length());
@@ -64,9 +73,27 @@ int32_t TranslationArrayIterator::Next() {
}
}
-uint32_t TranslationArrayIterator::NextUnsigned() {
+TranslationOpcode TranslationArrayIterator::NextOpcodeAtPreviousIndex() {
+ TranslationOpcode opcode =
+ static_cast<TranslationOpcode>(buffer_.get(previous_index_++));
+ DCHECK_LT(static_cast<uint32_t>(opcode), kNumTranslationOpcodes);
+ DCHECK_NE(opcode, TranslationOpcode::MATCH_PREVIOUS_TRANSLATION);
+ DCHECK_LT(previous_index_, index_);
+ return opcode;
+}
+
+uint32_t TranslationArrayIterator::NextUnsignedOperandAtPreviousIndex() {
+ uint32_t value =
+ base::VLQDecodeUnsigned(buffer_.GetDataStartAddress(), &previous_index_);
+ DCHECK_LT(previous_index_, index_);
+ return value;
+}
+
+uint32_t TranslationArrayIterator::NextOperandUnsigned() {
if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
return uncompressed_contents_[index_++];
+ } else if (remaining_ops_to_use_from_previous_translation_) {
+ return NextUnsignedOperandAtPreviousIndex();
} else {
uint32_t value =
base::VLQDecodeUnsigned(buffer_.GetDataStartAddress(), &index_);
@@ -75,56 +102,264 @@ uint32_t TranslationArrayIterator::NextUnsigned() {
}
}
-bool TranslationArrayIterator::HasNext() const {
+TranslationOpcode TranslationArrayIterator::NextOpcode() {
if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
- return index_ < static_cast<int>(uncompressed_contents_.size());
+ return static_cast<TranslationOpcode>(NextOperandUnsigned());
+ }
+ if (remaining_ops_to_use_from_previous_translation_) {
+ --remaining_ops_to_use_from_previous_translation_;
+ }
+ if (remaining_ops_to_use_from_previous_translation_) {
+ return NextOpcodeAtPreviousIndex();
+ }
+ uint8_t opcode_byte = buffer_.get(index_++);
+
+ // If the opcode byte is greater than any valid opcode, then the opcode is
+ // implicitly MATCH_PREVIOUS_TRANSLATION and the operand is the opcode byte
+ // minus kNumTranslationOpcodes. This special-case encoding of the most common
+ // opcode saves some memory.
+ if (opcode_byte >= kNumTranslationOpcodes) {
+ remaining_ops_to_use_from_previous_translation_ =
+ opcode_byte - kNumTranslationOpcodes;
+ opcode_byte =
+ static_cast<uint8_t>(TranslationOpcode::MATCH_PREVIOUS_TRANSLATION);
+ } else if (opcode_byte ==
+ static_cast<uint8_t>(
+ TranslationOpcode::MATCH_PREVIOUS_TRANSLATION)) {
+ remaining_ops_to_use_from_previous_translation_ = NextOperandUnsigned();
+ }
+
+ TranslationOpcode opcode = static_cast<TranslationOpcode>(opcode_byte);
+ DCHECK_LE(index_, buffer_.length());
+ DCHECK_LT(static_cast<uint32_t>(opcode), kNumTranslationOpcodes);
+ if (TranslationOpcodeIsBegin(opcode)) {
+ int temp_index = index_;
+ // The first argument for BEGIN is the distance, in bytes, since the
+ // previous BEGIN, or zero to indicate that MATCH_PREVIOUS_TRANSLATION will
+ // not be used in this translation.
+ uint32_t lookback_distance =
+ base::VLQDecodeUnsigned(buffer_.GetDataStartAddress(), &temp_index);
+ if (lookback_distance) {
+ previous_index_ = index_ - 1 - lookback_distance;
+ DCHECK(TranslationOpcodeIsBegin(
+ static_cast<TranslationOpcode>(buffer_.get(previous_index_))));
+ // The previous BEGIN should specify zero as its lookback distance,
+ // meaning it won't use MATCH_PREVIOUS_TRANSLATION.
+ DCHECK_EQ(buffer_.get(previous_index_ + 1), 0);
+ }
+ ops_since_previous_index_was_updated_ = 1;
+ } else if (opcode == TranslationOpcode::MATCH_PREVIOUS_TRANSLATION) {
+ for (int i = 0; i < ops_since_previous_index_was_updated_; ++i) {
+ SkipOpcodeAndItsOperandsAtPreviousIndex();
+ }
+ ops_since_previous_index_was_updated_ = 0;
+ opcode = NextOpcodeAtPreviousIndex();
} else {
- return index_ < buffer_.length();
+ ++ops_since_previous_index_was_updated_;
}
+ return opcode;
}
-void TranslationArrayBuilder::Add(int32_t value) {
+bool TranslationArrayIterator::HasNextOpcode() const {
if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
- contents_for_compression_.push_back(value);
+ return index_ < static_cast<int>(uncompressed_contents_.size());
} else {
- base::VLQEncode(&contents_, value);
+ return index_ < buffer_.length() ||
+ remaining_ops_to_use_from_previous_translation_ > 1;
}
}
-void TranslationArrayBuilder::AddOpcode(TranslationOpcode opcode) {
- static_assert(kNumTranslationOpcodes - 1 <= base::kDataMask);
- if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
- contents_for_compression_.push_back(static_cast<byte>(opcode));
- } else {
- contents_.push_back(static_cast<byte>(opcode));
+void TranslationArrayIterator::SkipOpcodeAndItsOperandsAtPreviousIndex() {
+ TranslationOpcode opcode = NextOpcodeAtPreviousIndex();
+ for (int count = TranslationOpcodeOperandCount(opcode); count != 0; --count) {
+ NextUnsignedOperandAtPreviousIndex();
}
}
-void TranslationArrayBuilder::AddRegister(Register reg) {
- static_assert(Register::kNumRegisters - 1 <= base::kDataMask);
+namespace {
+
+class OperandBase {
+ public:
+ explicit OperandBase(uint32_t value) : value_(value) {}
+ uint32_t value() const { return value_; }
+
+ private:
+ uint32_t value_;
+};
+
+class SmallUnsignedOperand : public OperandBase {
+ public:
+ explicit SmallUnsignedOperand(uint32_t value) : OperandBase(value) {
+ DCHECK_LE(value, base::kDataMask);
+ }
+ void WriteVLQ(ZoneVector<uint8_t>* buffer) { buffer->push_back(value()); }
+ bool IsSigned() const { return false; }
+};
+
+class UnsignedOperand : public OperandBase {
+ public:
+ explicit UnsignedOperand(uint32_t value) : OperandBase(value) {}
+ void WriteVLQ(ZoneVector<uint8_t>* buffer) {
+ base::VLQEncodeUnsigned(
+ [buffer](byte value) {
+ buffer->push_back(value);
+ return &buffer->back();
+ },
+ value());
+ }
+ bool IsSigned() const { return false; }
+};
+
+class SignedOperand : public OperandBase {
+ public:
+ explicit SignedOperand(int32_t value) : OperandBase(value) {}
+ void WriteVLQ(ZoneVector<uint8_t>* buffer) {
+ base::VLQEncode(
+ [buffer](byte value) {
+ buffer->push_back(value);
+ return &buffer->back();
+ },
+ value());
+ }
+ bool IsSigned() const { return true; }
+};
+
+template <typename... T>
+inline bool OperandsEqual(uint32_t* expected_operands, T... operands) {
+ return (... && (*(expected_operands++) == operands.value()));
+}
+
+} // namespace
+
+template <typename... T>
+void TranslationArrayBuilder::AddRawToContents(TranslationOpcode opcode,
+ T... operands) {
+ DCHECK_EQ(sizeof...(T), TranslationOpcodeOperandCount(opcode));
+ DCHECK(!v8_flags.turbo_compress_translation_arrays);
+ contents_.push_back(static_cast<byte>(opcode));
+ (..., operands.WriteVLQ(&contents_));
+}
+
+template <typename... T>
+void TranslationArrayBuilder::AddRawToContentsForCompression(
+ TranslationOpcode opcode, T... operands) {
+ DCHECK_EQ(sizeof...(T), TranslationOpcodeOperandCount(opcode));
+ DCHECK(v8_flags.turbo_compress_translation_arrays);
+ contents_for_compression_.push_back(static_cast<byte>(opcode));
+ (..., contents_for_compression_.push_back(operands.value()));
+}
+
+template <typename... T>
+void TranslationArrayBuilder::AddRawBegin(bool update_feedback, T... operands) {
+ auto opcode = update_feedback ? TranslationOpcode::BEGIN_WITH_FEEDBACK
+ : TranslationOpcode::BEGIN_WITHOUT_FEEDBACK;
if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
- contents_for_compression_.push_back(static_cast<byte>(reg.code()));
+ AddRawToContentsForCompression(opcode, operands...);
} else {
- contents_.push_back(static_cast<byte>(reg.code()));
+ AddRawToContents(opcode, operands...);
+#ifdef ENABLE_SLOW_DCHECKS
+ if (v8_flags.enable_slow_asserts) {
+ all_instructions_.emplace_back(opcode, operands...);
+ }
+#endif
}
}
-void TranslationArrayBuilder::AddFloatRegister(FloatRegister reg) {
- static_assert(FloatRegister::kNumRegisters - 1 <= base::kDataMask);
- if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
- contents_for_compression_.push_back(static_cast<byte>(reg.code()));
+int TranslationArrayBuilder::BeginTranslation(int frame_count,
+ int jsframe_count,
+ bool update_feedback) {
+ FinishPendingInstructionIfNeeded();
+ int start_index = Size();
+ int distance_from_last_start = 0;
+
+ // We should reuse an existing basis translation if:
+ // - we just finished writing the basis translation
+ // (match_previous_allowed_ is false), or
+ // - the translation we just finished was moderately successful at reusing
+ // instructions from the basis translation. We'll define "moderately
+ // successful" as reusing more than 3/4 of the basis instructions.
+ // Otherwise we should reset and write a new basis translation. At the
+ // beginning, match_previous_allowed_ is initialized to true so that this
+ // logic decides to start a new basis translation.
+ if (!match_previous_allowed_ ||
+ total_matching_instructions_in_current_translation_ >
+ instruction_index_within_translation_ / 4 * 3) {
+ // Use the existing basis translation.
+ distance_from_last_start = start_index - index_of_basis_translation_start_;
+ match_previous_allowed_ = true;
} else {
- contents_.push_back(static_cast<byte>(reg.code()));
+ // Abandon the existing basis translation and write a new one.
+ basis_instructions_.clear();
+ index_of_basis_translation_start_ = start_index;
+ match_previous_allowed_ = false;
+ }
+
+ total_matching_instructions_in_current_translation_ = 0;
+ instruction_index_within_translation_ = 0;
+
+ // BEGIN instructions can't be replaced by MATCH_PREVIOUS_TRANSLATION, so
+ // use a special helper function rather than calling Add().
+ AddRawBegin(update_feedback, UnsignedOperand(distance_from_last_start),
+ SignedOperand(frame_count), SignedOperand(jsframe_count));
+ return start_index;
+}
+
+void TranslationArrayBuilder::FinishPendingInstructionIfNeeded() {
+ if (matching_instructions_count_) {
+ total_matching_instructions_in_current_translation_ +=
+ matching_instructions_count_;
+
+ // There is a short form for the MATCH_PREVIOUS_TRANSLATION instruction
+ // because it's the most common opcode: rather than spending a byte on the
+ // opcode and a second byte on the operand, we can use only a single byte
+ // which doesn't match any valid opcode.
+ const int kMaxShortenableOperand =
+ std::numeric_limits<uint8_t>::max() - kNumTranslationOpcodes;
+ if (matching_instructions_count_ <= kMaxShortenableOperand) {
+ contents_.push_back(kNumTranslationOpcodes +
+ matching_instructions_count_);
+ } else {
+ // The operand didn't fit in the opcode byte, so encode it normally.
+ AddRawToContents(
+ TranslationOpcode::MATCH_PREVIOUS_TRANSLATION,
+ UnsignedOperand(static_cast<uint32_t>(matching_instructions_count_)));
+ }
+ matching_instructions_count_ = 0;
}
}
-void TranslationArrayBuilder::AddDoubleRegister(DoubleRegister reg) {
- static_assert(DoubleRegister::kNumRegisters - 1 <= base::kDataMask);
+template <typename... T>
+void TranslationArrayBuilder::Add(TranslationOpcode opcode, T... operands) {
+ DCHECK_EQ(sizeof...(T), TranslationOpcodeOperandCount(opcode));
if (V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)) {
- contents_for_compression_.push_back(static_cast<byte>(reg.code()));
+ AddRawToContentsForCompression(opcode, operands...);
+ return;
+ }
+#ifdef ENABLE_SLOW_DCHECKS
+ if (v8_flags.enable_slow_asserts) {
+ all_instructions_.emplace_back(opcode, operands...);
+ }
+#endif
+ if (match_previous_allowed_ &&
+ instruction_index_within_translation_ < basis_instructions_.size() &&
+ opcode ==
+ basis_instructions_[instruction_index_within_translation_].opcode &&
+ OperandsEqual(
+ basis_instructions_[instruction_index_within_translation_].operands,
+ operands...)) {
+ ++matching_instructions_count_;
} else {
- contents_.push_back(static_cast<byte>(reg.code()));
+ FinishPendingInstructionIfNeeded();
+ AddRawToContents(opcode, operands...);
+ if (!match_previous_allowed_) {
+ // Include this instruction in basis_instructions_ so that future
+ // translations can check whether they match with it.
+ DCHECK_EQ(basis_instructions_.size(),
+ instruction_index_within_translation_);
+ basis_instructions_.emplace_back(opcode, operands...);
+ }
}
+ ++instruction_index_within_translation_;
}
Handle<TranslationArray> TranslationArrayBuilder::ToTranslationArray(
@@ -156,21 +391,37 @@ Handle<TranslationArray> TranslationArrayBuilder::ToTranslationArray(
}
#endif
DCHECK(!v8_flags.turbo_compress_translation_arrays);
+ FinishPendingInstructionIfNeeded();
Handle<TranslationArray> result =
factory->NewByteArray(SizeInBytes(), AllocationType::kOld);
memcpy(result->GetDataStartAddress(), contents_.data(),
contents_.size() * sizeof(uint8_t));
+#ifdef ENABLE_SLOW_DCHECKS
+ if (v8_flags.enable_slow_asserts) {
+ // Check that we can read back all of the same content we intended to write.
+ TranslationArrayIterator it(*result, 0);
+ for (size_t i = 0; i < all_instructions_.size(); ++i) {
+ CHECK(it.HasNextOpcode());
+ const Instruction& instruction = all_instructions_[i];
+ CHECK_EQ(instruction.opcode, it.NextOpcode());
+ for (int j = 0; j < TranslationOpcodeOperandCount(instruction.opcode);
+ ++j) {
+ uint32_t operand = instruction.is_operand_signed[j]
+ ? it.NextOperand()
+ : it.NextOperandUnsigned();
+ CHECK_EQ(instruction.operands[j], operand);
+ }
+ }
+ }
+#endif
return result;
}
void TranslationArrayBuilder::BeginBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
auto opcode = TranslationOpcode::BUILTIN_CONTINUATION_FRAME;
- AddOpcode(opcode);
- Add(bytecode_offset.ToInt());
- Add(literal_id);
- Add(height);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()), SignedOperand(literal_id),
+ SignedOperand(height));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -178,237 +429,188 @@ void TranslationArrayBuilder::BeginJSToWasmBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height,
base::Optional<wasm::ValueKind> return_kind) {
auto opcode = TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME;
- AddOpcode(opcode);
- Add(bytecode_offset.ToInt());
- Add(literal_id);
- Add(height);
- Add(return_kind ? static_cast<int>(return_kind.value()) : kNoWasmReturnKind);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()), SignedOperand(literal_id),
+ SignedOperand(height),
+ SignedOperand(return_kind ? static_cast<int>(return_kind.value())
+ : kNoWasmReturnKind));
}
#endif // V8_ENABLE_WEBASSEMBLY
void TranslationArrayBuilder::BeginJavaScriptBuiltinContinuationFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
auto opcode = TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME;
- AddOpcode(opcode);
- Add(bytecode_offset.ToInt());
- Add(literal_id);
- Add(height);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()), SignedOperand(literal_id),
+ SignedOperand(height));
}
void TranslationArrayBuilder::BeginJavaScriptBuiltinContinuationWithCatchFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
auto opcode =
TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME;
- AddOpcode(opcode);
- Add(bytecode_offset.ToInt());
- Add(literal_id);
- Add(height);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()), SignedOperand(literal_id),
+ SignedOperand(height));
}
void TranslationArrayBuilder::BeginConstructStubFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
auto opcode = TranslationOpcode::CONSTRUCT_STUB_FRAME;
- AddOpcode(opcode);
- Add(bytecode_offset.ToInt());
- Add(literal_id);
- Add(height);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()), SignedOperand(literal_id),
+ SignedOperand(height));
}
void TranslationArrayBuilder::BeginInlinedExtraArguments(int literal_id,
unsigned height) {
auto opcode = TranslationOpcode::INLINED_EXTRA_ARGUMENTS;
- AddOpcode(opcode);
- Add(literal_id);
- Add(height);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
+ Add(opcode, SignedOperand(literal_id), SignedOperand(height));
}
void TranslationArrayBuilder::BeginInterpretedFrame(
BytecodeOffset bytecode_offset, int literal_id, unsigned height,
int return_value_offset, int return_value_count) {
- auto opcode = TranslationOpcode::INTERPRETED_FRAME;
- AddOpcode(opcode);
- Add(bytecode_offset.ToInt());
- Add(literal_id);
- Add(height);
- Add(return_value_offset);
- Add(return_value_count);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 5);
+ if (return_value_count == 0) {
+ auto opcode = TranslationOpcode::INTERPRETED_FRAME_WITHOUT_RETURN;
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()),
+ SignedOperand(literal_id), SignedOperand(height));
+ } else {
+ auto opcode = TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN;
+ Add(opcode, SignedOperand(bytecode_offset.ToInt()),
+ SignedOperand(literal_id), SignedOperand(height),
+ SignedOperand(return_value_offset), SignedOperand(return_value_count));
+ }
}
void TranslationArrayBuilder::ArgumentsElements(CreateArgumentsType type) {
auto opcode = TranslationOpcode::ARGUMENTS_ELEMENTS;
- AddOpcode(opcode);
- Add(static_cast<uint8_t>(type));
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(static_cast<uint8_t>(type)));
}
void TranslationArrayBuilder::ArgumentsLength() {
auto opcode = TranslationOpcode::ARGUMENTS_LENGTH;
- AddOpcode(opcode);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 0);
+ Add(opcode);
}
void TranslationArrayBuilder::BeginCapturedObject(int length) {
auto opcode = TranslationOpcode::CAPTURED_OBJECT;
- AddOpcode(opcode);
- Add(length);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(length));
}
void TranslationArrayBuilder::DuplicateObject(int object_index) {
auto opcode = TranslationOpcode::DUPLICATED_OBJECT;
- AddOpcode(opcode);
- Add(object_index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(object_index));
+}
+
+void TranslationArrayBuilder::StoreRegister(TranslationOpcode opcode,
+ Register reg) {
+ static_assert(Register::kNumRegisters - 1 <= base::kDataMask);
+ Add(opcode, SmallUnsignedOperand(static_cast<byte>(reg.code())));
}
void TranslationArrayBuilder::StoreRegister(Register reg) {
auto opcode = TranslationOpcode::REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreInt32Register(Register reg) {
auto opcode = TranslationOpcode::INT32_REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreInt64Register(Register reg) {
auto opcode = TranslationOpcode::INT64_REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreSignedBigInt64Register(Register reg) {
auto opcode = TranslationOpcode::SIGNED_BIGINT64_REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreUnsignedBigInt64Register(Register reg) {
auto opcode = TranslationOpcode::UNSIGNED_BIGINT64_REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreUint32Register(Register reg) {
auto opcode = TranslationOpcode::UINT32_REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreBoolRegister(Register reg) {
auto opcode = TranslationOpcode::BOOL_REGISTER;
- AddOpcode(opcode);
- AddRegister(reg);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ StoreRegister(opcode, reg);
}
void TranslationArrayBuilder::StoreFloatRegister(FloatRegister reg) {
+ static_assert(FloatRegister::kNumRegisters - 1 <= base::kDataMask);
auto opcode = TranslationOpcode::FLOAT_REGISTER;
- AddOpcode(opcode);
- AddFloatRegister(reg);
+ Add(opcode, SmallUnsignedOperand(static_cast<byte>(reg.code())));
}
void TranslationArrayBuilder::StoreDoubleRegister(DoubleRegister reg) {
+ static_assert(DoubleRegister::kNumRegisters - 1 <= base::kDataMask);
auto opcode = TranslationOpcode::DOUBLE_REGISTER;
- AddOpcode(opcode);
- AddDoubleRegister(reg);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SmallUnsignedOperand(static_cast<byte>(reg.code())));
}
void TranslationArrayBuilder::StoreStackSlot(int index) {
auto opcode = TranslationOpcode::STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreInt32StackSlot(int index) {
auto opcode = TranslationOpcode::INT32_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreInt64StackSlot(int index) {
auto opcode = TranslationOpcode::INT64_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreSignedBigInt64StackSlot(int index) {
auto opcode = TranslationOpcode::SIGNED_BIGINT64_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreUnsignedBigInt64StackSlot(int index) {
auto opcode = TranslationOpcode::UNSIGNED_BIGINT64_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreUint32StackSlot(int index) {
auto opcode = TranslationOpcode::UINT32_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreBoolStackSlot(int index) {
auto opcode = TranslationOpcode::BOOL_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreFloatStackSlot(int index) {
auto opcode = TranslationOpcode::FLOAT_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreDoubleStackSlot(int index) {
auto opcode = TranslationOpcode::DOUBLE_STACK_SLOT;
- AddOpcode(opcode);
- Add(index);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ Add(opcode, SignedOperand(index));
}
void TranslationArrayBuilder::StoreLiteral(int literal_id) {
auto opcode = TranslationOpcode::LITERAL;
- AddOpcode(opcode);
- Add(literal_id);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ DCHECK_GE(literal_id, 0);
+ Add(opcode, SignedOperand(literal_id));
}
void TranslationArrayBuilder::StoreOptimizedOut() {
auto opcode = TranslationOpcode::OPTIMIZED_OUT;
- AddOpcode(opcode);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 0);
+ Add(opcode);
}
void TranslationArrayBuilder::AddUpdateFeedback(int vector_literal, int slot) {
auto opcode = TranslationOpcode::UPDATE_FEEDBACK;
- AddOpcode(opcode);
- Add(vector_literal);
- Add(slot);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
+ Add(opcode, SignedOperand(vector_literal), SignedOperand(slot));
}
void TranslationArrayBuilder::StoreJSFrameFunction() {
diff --git a/deps/v8/src/deoptimizer/translation-array.h b/deps/v8/src/deoptimizer/translation-array.h
index 2dee8e86c2..e6279c266a 100644
--- a/deps/v8/src/deoptimizer/translation-array.h
+++ b/deps/v8/src/deoptimizer/translation-array.h
@@ -30,40 +30,52 @@ class TranslationArrayIterator {
public:
TranslationArrayIterator(TranslationArray buffer, int index);
- int32_t Next();
+ int32_t NextOperand();
- uint32_t NextUnsigned();
+ uint32_t NextOperandUnsigned();
- bool HasNext() const;
+ TranslationOpcode NextOpcode();
- void Skip(int n) {
- for (int i = 0; i < n; i++) Next();
+ bool HasNextOpcode() const;
+
+ void SkipOperands(int n) {
+ for (int i = 0; i < n; i++) NextOperand();
}
private:
+ TranslationOpcode NextOpcodeAtPreviousIndex();
+ uint32_t NextUnsignedOperandAtPreviousIndex();
+ void SkipOpcodeAndItsOperandsAtPreviousIndex();
+
std::vector<int32_t> uncompressed_contents_;
TranslationArray buffer_;
int index_;
+
+ // This decrementing counter indicates how many more times to read operations
+ // from the previous translation before continuing to move the index forward.
+ int remaining_ops_to_use_from_previous_translation_ = 0;
+
+ // An index into buffer_ for operations starting at a previous BEGIN, which
+ // can be used to read operations referred to by MATCH_PREVIOUS_TRANSLATION.
+ int previous_index_ = 0;
+
+ // When starting a new MATCH_PREVIOUS_TRANSLATION operation, we'll need to
+ // advance the previous_index_ by this many steps.
+ int ops_since_previous_index_was_updated_ = 0;
};
class TranslationArrayBuilder {
public:
explicit TranslationArrayBuilder(Zone* zone)
- : contents_(zone), contents_for_compression_(zone), zone_(zone) {}
+ : contents_(zone),
+ contents_for_compression_(zone),
+ basis_instructions_(zone),
+ zone_(zone) {}
Handle<TranslationArray> ToTranslationArray(Factory* factory);
int BeginTranslation(int frame_count, int jsframe_count,
- int update_feedback_count) {
- int start_index = Size();
- auto opcode = TranslationOpcode::BEGIN;
- AddOpcode(opcode);
- Add(frame_count);
- Add(jsframe_count);
- Add(update_feedback_count);
- DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
- return start_index;
- }
+ bool update_feedback);
void BeginInterpretedFrame(BytecodeOffset bytecode_offset, int literal_id,
unsigned height, int return_value_offset,
@@ -87,6 +99,7 @@ class TranslationArrayBuilder {
void BeginCapturedObject(int length);
void AddUpdateFeedback(int vector_literal, int slot);
void DuplicateObject(int object_index);
+ void StoreRegister(TranslationOpcode opcode, Register reg);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreInt64Register(Register reg);
@@ -110,11 +123,46 @@ class TranslationArrayBuilder {
void StoreJSFrameFunction();
private:
- void Add(int32_t value);
- void AddOpcode(TranslationOpcode opcode);
- void AddRegister(Register reg);
- void AddFloatRegister(FloatRegister reg);
- void AddDoubleRegister(DoubleRegister reg);
+ struct Instruction {
+ template <typename... T>
+ Instruction(TranslationOpcode opcode, T... operands)
+ : opcode(opcode),
+ operands{operands.value()...}
+#ifdef ENABLE_SLOW_DCHECKS
+ ,
+ is_operand_signed{operands.IsSigned()...}
+#endif
+ {
+ }
+ TranslationOpcode opcode;
+ // The operands for the instruction. Signed values were static_casted to
+ // unsigned.
+ uint32_t operands[kMaxTranslationOperandCount];
+#ifdef ENABLE_SLOW_DCHECKS
+ bool is_operand_signed[kMaxTranslationOperandCount];
+#endif
+ };
+
+ // Either adds the instruction or increments matching_instructions_count_,
+ // depending on whether the instruction matches the corresponding instruction
+ // from the previous translation.
+ template <typename... T>
+ void Add(TranslationOpcode opcode, T... operands);
+
+ // Adds the instruction to contents_, without performing the other steps of
+ // Add(). Requires !v8_flags.turbo_compress_translation_arrays.
+ template <typename... T>
+ void AddRawToContents(TranslationOpcode opcode, T... operands);
+
+ // Adds the instruction to contents_for_compression_, without performing the
+ // other steps of Add(). Requires v8_flags.turbo_compress_translation_arrays.
+ template <typename... T>
+ void AddRawToContentsForCompression(TranslationOpcode opcode, T... operands);
+
+ // Adds a BEGIN instruction to contents_ or contents_for_compression_, but
+ // does not update other state. Used by BeginTranslation.
+ template <typename... T>
+ void AddRawBegin(bool update_feedback, T... operands);
int Size() const {
return V8_UNLIKELY(v8_flags.turbo_compress_translation_arrays)
@@ -129,9 +177,35 @@ class TranslationArrayBuilder {
Zone* zone() const { return zone_; }
+ void FinishPendingInstructionIfNeeded();
+
ZoneVector<uint8_t> contents_;
ZoneVector<int32_t> contents_for_compression_;
+ // If match_previous_allowed_ is false, then this vector contains the
+ // instructions written so far in the current translation (since the last
+ // BEGIN). If match_previous_allowed_ is true, then this vector contains the
+ // instructions from the basis translation (the one written with
+ // !match_previous_allowed_). This allows Add() to easily check whether a
+ // newly added instruction matches the corresponding one from the basis
+ // translation.
+ ZoneVector<Instruction> basis_instructions_;
+#ifdef ENABLE_SLOW_DCHECKS
+ std::vector<Instruction> all_instructions_;
+#endif
Zone* const zone_;
+ // How many consecutive instructions we've skipped writing because they match
+ // the basis translation.
+ size_t matching_instructions_count_ = 0;
+ size_t total_matching_instructions_in_current_translation_ = 0;
+ // The current index within basis_instructions_.
+ size_t instruction_index_within_translation_ = 0;
+ // The byte index within the contents_ array of the BEGIN instruction for the
+ // basis translation (the most recent translation which was fully written out,
+ // not using MATCH_PREVIOUS_TRANSLATION instructions).
+ int index_of_basis_translation_start_ = 0;
+ // Whether the builder can use MATCH_PREVIOUS_TRANSLATION in the current
+ // translation.
+ bool match_previous_allowed_ = true;
};
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/translation-opcode.h b/deps/v8/src/deoptimizer/translation-opcode.h
index 1f83738ec4..31e8e54203 100644
--- a/deps/v8/src/deoptimizer/translation-opcode.h
+++ b/deps/v8/src/deoptimizer/translation-opcode.h
@@ -14,7 +14,8 @@ namespace internal {
#define TRANSLATION_OPCODE_LIST(V) \
V(ARGUMENTS_ELEMENTS, 1) \
V(ARGUMENTS_LENGTH, 0) \
- V(BEGIN, 3) \
+ V(BEGIN_WITHOUT_FEEDBACK, 3) \
+ V(BEGIN_WITH_FEEDBACK, 3) \
V(BOOL_REGISTER, 1) \
V(BOOL_STACK_SLOT, 1) \
V(BUILTIN_CONTINUATION_FRAME, 3) \
@@ -34,7 +35,8 @@ namespace internal {
V(SIGNED_BIGINT64_STACK_SLOT, 1) \
V(UNSIGNED_BIGINT64_REGISTER, 1) \
V(UNSIGNED_BIGINT64_STACK_SLOT, 1) \
- V(INTERPRETED_FRAME, 5) \
+ V(INTERPRETED_FRAME_WITH_RETURN, 5) \
+ V(INTERPRETED_FRAME_WITHOUT_RETURN, 3) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME, 3) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME, 3) \
IF_WASM(V, JS_TO_WASM_BUILTIN_CONTINUATION_FRAME, 4) \
@@ -44,7 +46,8 @@ namespace internal {
V(STACK_SLOT, 1) \
V(UINT32_REGISTER, 1) \
V(UINT32_STACK_SLOT, 1) \
- V(UPDATE_FEEDBACK, 2)
+ V(UPDATE_FEEDBACK, 2) \
+ V(MATCH_PREVIOUS_TRANSLATION, 1)
enum class TranslationOpcode {
#define CASE(name, ...) name,
@@ -57,10 +60,6 @@ static constexpr int kNumTranslationOpcodes =
0 TRANSLATION_OPCODE_LIST(PLUS_ONE);
#undef PLUS_ONE
-constexpr TranslationOpcode TranslationOpcodeFromInt(uint32_t i) {
- return static_cast<TranslationOpcode>(i);
-}
-
inline int TranslationOpcodeOperandCount(TranslationOpcode o) {
#define CASE(name, operand_count) operand_count,
static const int counts[] = {TRANSLATION_OPCODE_LIST(CASE)};
@@ -75,6 +74,17 @@ inline const char* TranslationOpcodeToString(TranslationOpcode o) {
return names[static_cast<int>(o)];
}
+constexpr int kMaxTranslationOperandCount = 5;
+#define CASE(name, operand_count) \
+ static_assert(operand_count <= kMaxTranslationOperandCount);
+TRANSLATION_OPCODE_LIST(CASE)
+#undef CASE
+
+inline bool TranslationOpcodeIsBegin(TranslationOpcode o) {
+ return o == TranslationOpcode::BEGIN_WITH_FEEDBACK ||
+ o == TranslationOpcode::BEGIN_WITHOUT_FEEDBACK;
+}
+
#undef TRANSLATION_OPCODE_LIST
} // namespace internal
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index af6e7f5441..94471fa2c7 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -1032,72 +1032,212 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
#undef LOAD_STORE_PAIR_LIST
+#define LOAD_STORE_ACQUIRE_RELEASE_LIST(V) \
+ V(STLXR_b, "stlxrb", "'Ws, 'Wt") \
+ V(STLXR_h, "stlxrh", "'Ws, 'Wt") \
+ V(STLXR_w, "stlxr", "'Ws, 'Wt") \
+ V(STLXR_x, "stlxr", "'Ws, 'Xt") \
+ V(LDAXR_b, "ldaxrb", "'Wt") \
+ V(LDAXR_h, "ldaxrh", "'Wt") \
+ V(LDAXR_w, "ldaxr", "'Wt") \
+ V(LDAXR_x, "ldaxr", "'Xt") \
+ V(STLR_b, "stlrb", "'Wt") \
+ V(STLR_h, "stlrh", "'Wt") \
+ V(STLR_w, "stlr", "'Wt") \
+ V(STLR_x, "stlr", "'Xt") \
+ V(LDAR_b, "ldarb", "'Wt") \
+ V(LDAR_h, "ldarh", "'Wt") \
+ V(LDAR_w, "ldar", "'Wt") \
+ V(LDAR_x, "ldar", "'Xt") \
+ V(CAS_w, "cas", "'Ws, 'Wt") \
+ V(CAS_x, "cas", "'Xs, 'Xt") \
+ V(CASA_w, "casa", "'Ws, 'Wt") \
+ V(CASA_x, "casa", "'Xs, 'Xt") \
+ V(CASL_w, "casl", "'Ws, 'Wt") \
+ V(CASL_x, "casl", "'Xs, 'Xt") \
+ V(CASAL_w, "casal", "'Ws, 'Wt") \
+ V(CASAL_x, "casal", "'Xs, 'Xt") \
+ V(CASB, "casb", "'Ws, 'Wt") \
+ V(CASAB, "casab", "'Ws, 'Wt") \
+ V(CASLB, "caslb", "'Ws, 'Wt") \
+ V(CASALB, "casalb", "'Ws, 'Wt") \
+ V(CASH, "cash", "'Ws, 'Wt") \
+ V(CASAH, "casah", "'Ws, 'Wt") \
+ V(CASLH, "caslh", "'Ws, 'Wt") \
+ V(CASALH, "casalh", "'Ws, 'Wt") \
+ V(CASP_w, "casp", "'Ws, 'Ws+, 'Wt, 'Wt+") \
+ V(CASP_x, "casp", "'Xs, 'Xs+, 'Xt, 'Xt+") \
+ V(CASPA_w, "caspa", "'Ws, 'Ws+, 'Wt, 'Wt+") \
+ V(CASPA_x, "caspa", "'Xs, 'Xs+, 'Xt, 'Xt+") \
+ V(CASPL_w, "caspl", "'Ws, 'Ws+, 'Wt, 'Wt+") \
+ V(CASPL_x, "caspl", "'Xs, 'Xs+, 'Xt, 'Xt+") \
+ V(CASPAL_w, "caspal", "'Ws, 'Ws+, 'Wt, 'Wt+") \
+ V(CASPAL_x, "caspal", "'Xs, 'Xs+, 'Xt, 'Xt+")
+
void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction* instr) {
const char* mnemonic = "unimplemented";
- const char* form = "'Wt, ['Xns]";
- const char* form_x = "'Xt, ['Xns]";
- const char* form_stlx = "'Ws, 'Wt, ['Xns]";
- const char* form_stlx_x = "'Ws, 'Xt, ['Xns]";
+ const char* form;
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
- case LDAXR_b:
- mnemonic = "ldaxrb";
- break;
- case STLR_b:
- mnemonic = "stlrb";
- break;
- case LDAR_b:
- mnemonic = "ldarb";
- break;
- case LDAXR_h:
- mnemonic = "ldaxrh";
- break;
- case STLR_h:
- mnemonic = "stlrh";
- break;
- case LDAR_h:
- mnemonic = "ldarh";
- break;
- case LDAXR_w:
- mnemonic = "ldaxr";
- break;
- case STLR_w:
- mnemonic = "stlr";
- break;
- case LDAR_w:
- mnemonic = "ldar";
- break;
- case LDAXR_x:
- mnemonic = "ldaxr";
- form = form_x;
- break;
- case STLR_x:
- mnemonic = "stlr";
- form = form_x;
- break;
- case LDAR_x:
- mnemonic = "ldar";
- form = form_x;
- break;
- case STLXR_h:
- mnemonic = "stlxrh";
- form = form_stlx;
- break;
- case STLXR_b:
- mnemonic = "stlxrb";
- form = form_stlx;
- break;
- case STLXR_w:
- mnemonic = "stlxr";
- form = form_stlx;
- break;
- case STLXR_x:
- mnemonic = "stlxr";
- form = form_stlx_x;
- break;
+#define LSAR(A, B, C) \
+ case A: \
+ mnemonic = B; \
+ form = C ", ['Xns]"; \
+ break;
+ LOAD_STORE_ACQUIRE_RELEASE_LIST(LSAR)
+#undef LSAR
default:
form = "(LoadStoreAcquireRelease)";
}
+
+ switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
+ case CASP_w:
+ case CASP_x:
+ case CASPA_w:
+ case CASPA_x:
+ case CASPL_w:
+ case CASPL_x:
+ case CASPAL_w:
+ case CASPAL_x:
+ if ((instr->Rs() % 2 == 1) || (instr->Rt() % 2 == 1)) {
+ mnemonic = "unallocated";
+ form = "(LoadStoreExclusive)";
+ }
+ break;
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+#undef LOAD_STORE_ACQUIRE_RELEASE_LIST
+
+#define ATOMIC_MEMORY_SIMPLE_LIST(V) \
+ V(LDADD, "add") \
+ V(LDCLR, "clr") \
+ V(LDEOR, "eor") \
+ V(LDSET, "set") \
+ V(LDSMAX, "smax") \
+ V(LDSMIN, "smin") \
+ V(LDUMAX, "umax") \
+ V(LDUMIN, "umin")
+
+void DisassemblingDecoder::VisitAtomicMemory(Instruction* instr) {
+ const int kMaxAtomicOpMnemonicLength = 16;
+ const char* mnemonic;
+ const char* form = "'Ws, 'Wt, ['Xns]";
+
+ switch (instr->Mask(AtomicMemoryMask)) {
+#define AMS(A, MN) \
+ case A##B: \
+ mnemonic = MN "b"; \
+ break; \
+ case A##AB: \
+ mnemonic = MN "ab"; \
+ break; \
+ case A##LB: \
+ mnemonic = MN "lb"; \
+ break; \
+ case A##ALB: \
+ mnemonic = MN "alb"; \
+ break; \
+ case A##H: \
+ mnemonic = MN "h"; \
+ break; \
+ case A##AH: \
+ mnemonic = MN "ah"; \
+ break; \
+ case A##LH: \
+ mnemonic = MN "lh"; \
+ break; \
+ case A##ALH: \
+ mnemonic = MN "alh"; \
+ break; \
+ case A##_w: \
+ mnemonic = MN; \
+ break; \
+ case A##A_w: \
+ mnemonic = MN "a"; \
+ break; \
+ case A##L_w: \
+ mnemonic = MN "l"; \
+ break; \
+ case A##AL_w: \
+ mnemonic = MN "al"; \
+ break; \
+ case A##_x: \
+ mnemonic = MN; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break; \
+ case A##A_x: \
+ mnemonic = MN "a"; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break; \
+ case A##L_x: \
+ mnemonic = MN "l"; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break; \
+ case A##AL_x: \
+ mnemonic = MN "al"; \
+ form = "'Xs, 'Xt, ['Xns]"; \
+ break;
+ ATOMIC_MEMORY_SIMPLE_LIST(AMS)
+
+ // SWP has the same semantics as ldadd etc but without the store aliases.
+ AMS(SWP, "swp")
+#undef AMS
+
+ default:
+ mnemonic = "unimplemented";
+ form = "(AtomicMemory)";
+ }
+
+ const char* prefix = "";
+ switch (instr->Mask(AtomicMemoryMask)) {
+#define AMS(A, MN) \
+ case A##AB: \
+ case A##ALB: \
+ case A##AH: \
+ case A##ALH: \
+ case A##A_w: \
+ case A##AL_w: \
+ case A##A_x: \
+ case A##AL_x: \
+ prefix = "ld"; \
+ break; \
+ case A##B: \
+ case A##LB: \
+ case A##H: \
+ case A##LH: \
+ case A##_w: \
+ case A##L_w: { \
+ prefix = "ld"; \
+ unsigned rt = instr->Rt(); \
+ if (rt == kZeroRegCode) { \
+ prefix = "st"; \
+ form = "'Ws, ['Xns]"; \
+ } \
+ break; \
+ } \
+ case A##_x: \
+ case A##L_x: { \
+ prefix = "ld"; \
+ unsigned rt = instr->Rt(); \
+ if (rt == kZeroRegCode) { \
+ prefix = "st"; \
+ form = "'Xs, ['Xns]"; \
+ } \
+ break; \
+ }
+ ATOMIC_MEMORY_SIMPLE_LIST(AMS)
+#undef AMS
+ }
+
+ char buffer[kMaxAtomicOpMnemonicLength];
+ if (strlen(prefix) > 0) {
+ snprintf(buffer, kMaxAtomicOpMnemonicLength, "%s%s", prefix, mnemonic);
+ mnemonic = buffer;
+ }
+
Format(instr, mnemonic, form);
}
@@ -3782,6 +3922,14 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
field_len = 3;
}
+ // W or X registers tagged with '+' have their number incremented, to support
+ // instructions such as CASP.
+ if (format[2] == '+') {
+ DCHECK((reg_prefix == 'W') || (reg_prefix == 'X'));
+ reg_num++;
+ field_len++;
+ }
+
CPURegister::RegisterType reg_type;
unsigned reg_size;
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index dad22ba046..6f0b60f41a 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -257,10 +257,8 @@ static void PrintRelocInfo(std::ostringstream& out, Isolate* isolate,
out << " ;; external reference (" << reference_name << ")";
} else if (RelocInfo::IsCodeTargetMode(rmode)) {
out << " ;; code:";
- CodeT code =
- isolate->heap()
- ->GcSafeFindCodeForInnerPointer(relocinfo->target_address())
- .ToCodeT();
+ Code code =
+ isolate->heap()->FindCodeForInnerPointer(relocinfo->target_address());
CodeKind kind = code.kind();
if (code.is_builtin()) {
out << " Builtin::" << Builtins::name(code.builtin_id());
@@ -378,21 +376,21 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// Print all the reloc info for this instruction which are not comments.
for (size_t i = 0; i < pcs.size(); i++) {
- // Put together the reloc info
+ // Put together the reloc info.
const CodeReference& host = code;
Address constant_pool =
host.is_null() ? kNullAddress : host.constant_pool();
- Code code_pointer;
+ Handle<Code> code_handle;
if (host.is_code()) {
- code_pointer = *host.as_code();
- }
+ code_handle = host.as_code();
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], code_pointer,
- constant_pool);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], *code_handle,
+ code_handle->instruction_stream(), constant_pool);
- bool first_reloc_info = (i == 0);
- PrintRelocInfo(out, isolate, ref_encoder, os, code, &relocinfo,
- first_reloc_info);
+ bool first_reloc_info = (i == 0);
+ PrintRelocInfo(out, isolate, ref_encoder, os, code, &relocinfo,
+ first_reloc_info);
+ }
}
// If this is a constant pool load and we haven't found any RelocInfo
@@ -404,7 +402,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// by IsInConstantPool() below.
if (pcs.empty() && !code.is_null() && !decoding_constant_pool) {
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
- RelocInfo::NO_INFO, 0, Code());
+ RelocInfo::NO_INFO, 0, Code(), InstructionStream());
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();
diff --git a/deps/v8/src/diagnostics/etw-jit-win.cc b/deps/v8/src/diagnostics/etw-jit-win.cc
index 39395f3ea1..9f3820220c 100644
--- a/deps/v8/src/diagnostics/etw-jit-win.cc
+++ b/deps/v8/src/diagnostics/etw-jit-win.cc
@@ -13,6 +13,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/diagnostics/etw-jit-metadata-win.h"
+#include "src/logging/log.h"
#include "src/objects/shared-function-info.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"
diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index ee087fac17..83d69ae72d 100644
--- a/deps/v8/src/diagnostics/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -2064,9 +2064,7 @@ void EventHandler(const v8::JitCodeEvent* event) {
// use event->code_type here instead of finding the Code.
// TODO(zhin): Rename is_function to be more accurate.
if (event->code_type == v8::JitCodeEvent::JIT_CODE) {
- CodeLookupResult lookup_result =
- isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
- CHECK(lookup_result.IsFound());
+ Code lookup_result = isolate->heap()->FindCodeForInnerPointer(addr);
is_function = CodeKindIsOptimizedJSFunction(lookup_result.kind());
}
AddCode(event_name.c_str(), {addr, event->code_len}, shared, lineinfo,
diff --git a/deps/v8/src/diagnostics/gdb-jit.h b/deps/v8/src/diagnostics/gdb-jit.h
index eb4d515a81..522c5a2ca4 100644
--- a/deps/v8/src/diagnostics/gdb-jit.h
+++ b/deps/v8/src/diagnostics/gdb-jit.h
@@ -37,8 +37,8 @@ namespace GDBJITInterface {
void EventHandler(const v8::JitCodeEvent* event);
// Expose some functions for unittests. These only exercise the logic to add
-// AddressRegion to CodeMap, and checking for overlap. It does not touch the
-// actual JITCodeEntry at all.
+// AddressRegion to InstructionStreamMap, and checking for overlap. It does not
+// touch the actual JITCodeEntry at all.
V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
V8_EXPORT_PRIVATE size_t
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index a122312a1f..17578d7855 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -37,6 +37,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/objects/turbofan-types-inl.h"
+#include "src/objects/turboshaft-types-inl.h"
#include "src/roots/roots.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
@@ -49,6 +50,7 @@
#include "src/objects/js-duration-format-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-iterator-helpers-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
@@ -242,8 +244,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
TransitionArray::cast(*this).TransitionArrayVerify(isolate);
break;
- case CODE_TYPE:
- Code::cast(*this).CodeVerify(isolate);
+ case INSTRUCTION_STREAM_TYPE:
+ InstructionStream::cast(*this).InstructionStreamVerify(isolate);
break;
case JS_API_OBJECT_TYPE:
case JS_ARRAY_ITERATOR_PROTOTYPE_TYPE:
@@ -283,8 +285,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
break;
case FILLER_TYPE:
break;
- case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
+ case CODE_TYPE:
+ Code::cast(*this).CodeVerify(isolate);
break;
#define MAKE_TORQUE_CASE(Name, TYPE) \
@@ -334,7 +336,7 @@ void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
// If you crashed here and {isolate->is_shared()}, there is a bug causing the
// host of {p} to point to a non-shared object.
CHECK(IsValidHeapObject(isolate->heap(), HeapObject::cast(p)));
- CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !p.IsCode());
+ CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !p.IsInstructionStream());
}
// static
@@ -342,7 +344,7 @@ void HeapObject::VerifyCodePointer(Isolate* isolate, Object p) {
CHECK(p.IsHeapObject());
CHECK(IsValidCodeObject(isolate->heap(), HeapObject::cast(p)));
PtrComprCageBase cage_base(isolate);
- CHECK(HeapObject::cast(p).IsCode(cage_base));
+ CHECK(HeapObject::cast(p).IsInstructionStream(cage_base));
}
void Symbol::SymbolVerify(Isolate* isolate) {
@@ -446,7 +448,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(PropertyKind::kData, details.kind());
Representation r = details.representation();
- FieldIndex index = FieldIndex::ForDescriptor(map(), i);
+ FieldIndex index = FieldIndex::ForDetails(map(), details);
if (COMPRESS_POINTERS_BOOL && index.is_inobject()) {
VerifyObjectField(isolate, index.offset());
}
@@ -588,8 +590,8 @@ void Map::MapVerify(Isolate* isolate) {
IsSharedArrayElementsKind(elements_kind()));
CHECK_IMPLIES(is_deprecated(), !is_stable());
if (is_prototype_map()) {
- DCHECK(prototype_info() == Smi::zero() ||
- prototype_info().IsPrototypeInfo());
+ CHECK(prototype_info() == Smi::zero() ||
+ prototype_info().IsPrototypeInfo());
}
}
@@ -890,7 +892,15 @@ void SlicedString::SlicedStringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::SlicedStringVerify(*this, isolate);
CHECK(!parent().IsConsString());
CHECK(!parent().IsSlicedString());
- CHECK_GE(length(), SlicedString::kMinLength);
+#ifdef DEBUG
+ if (!isolate->has_turbofan_string_builders()) {
+ // Turbofan's string builder optimization can introduce SlicedString that
+ // are less than SlicedString::kMinLength characters. Their live range and
+ // scope are pretty limitted, but they can be visible to the GC, which
+ // shouldn't treat them as invalid.
+ CHECK_GE(length(), SlicedString::kMinLength);
+ }
+#endif
}
USE_TORQUE_VERIFIER(ExternalString)
@@ -920,7 +930,7 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
VerifyPointer(isolate, raw_feedback_cell(isolate));
CHECK(raw_feedback_cell(isolate).IsFeedbackCell());
VerifyPointer(isolate, code(isolate));
- CHECK(code(isolate).IsCodeT());
+ CHECK(code(isolate).IsCode());
CHECK(map(isolate).is_callable());
Handle<JSFunction> function(*this, isolate);
LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
@@ -1088,90 +1098,62 @@ void PropertyCell::PropertyCellVerify(Isolate* isolate) {
CheckDataIsCompatible(property_details(), value());
}
-void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
- CHECK(IsCodeDataContainer());
- VerifyObjectField(isolate, kNextCodeLinkOffset);
- CHECK(next_code_link().IsCodeT() || next_code_link().IsUndefined(isolate));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- if (raw_code() != Smi::zero()) {
- Code code = this->code();
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind() and builtin_id() getters are not available on CodeDataContainer
- // when external code space is not enabled.
- CHECK_EQ(code.kind(), kind());
- CHECK_EQ(code.builtin_id(), builtin_id());
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // When v8_flags.interpreted_frames_native_stack is enabled each
- // interpreted function gets its own copy of the
- // InterpreterEntryTrampoline. Thus, there could be Code'ful builtins.
- CHECK_IMPLIES(isolate->embedded_blob_code() && is_off_heap_trampoline(),
- builtin_id() == Builtin::kInterpreterEntryTrampoline);
- }
-#endif // V8_EXTERNAL_CODE_SPACE
- CHECK_EQ(code.code_data_container(kAcquireLoad), *this);
-
- // Ensure the cached code entry point corresponds to the Code object
- // associated with this CodeDataContainer.
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- if (V8_SHORT_BUILTIN_CALLS_BOOL) {
- if (code.InstructionStart() == code_entry_point()) {
- // Most common case, all good.
- } else {
- // When shared pointer compression cage is enabled and it has the
- // embedded code blob copy then the Code::InstructionStart() might
- // return address of the remapped builtin regardless of whether the
- // builtins copy exsisted when the code_entry_point value was cached
- // in the CodeDataContainer (see Code::OffHeapInstructionStart()).
- // So, do a reverse Code object lookup via code_entry_point value to
- // ensure it corresponds to the same Code object associated with this
- // CodeDataContainer.
- CodeLookupResult lookup_result =
- isolate->heap()->GcSafeFindCodeForInnerPointer(
- code_entry_point());
- CHECK(lookup_result.IsFound());
- CHECK_EQ(lookup_result.ToCode(), code);
- }
- } else {
- CHECK_EQ(code.InstructionStart(), code_entry_point());
- }
-#else
- CHECK_EQ(code.InstructionStart(), code_entry_point());
-#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+void Code::CodeVerify(Isolate* isolate) {
+ CHECK(IsCode());
+ if (has_instruction_stream()) {
+ InstructionStream istream = instruction_stream();
+ CHECK_EQ(istream.code(kAcquireLoad), *this);
+ CHECK_EQ(safepoint_table_offset(), 0);
+ CHECK_LE(safepoint_table_offset(), handler_table_offset());
+ CHECK_LE(handler_table_offset(), constant_pool_offset());
+ CHECK_LE(constant_pool_offset(), code_comments_offset());
+ CHECK_LE(code_comments_offset(), unwinding_info_offset());
+ CHECK_LE(unwinding_info_offset(), metadata_size());
+
+ relocation_info().ObjectVerify(isolate);
+
+ // Ensure the cached code entry point corresponds to the InstructionStream
+ // object associated with this Code.
+#if defined(V8_COMPRESS_POINTERS) && defined(V8_SHORT_BUILTIN_CALLS)
+ if (istream.instruction_start() == code_entry_point()) {
+ // Most common case, all good.
+ } else {
+ // When shared pointer compression cage is enabled and it has the
+ // embedded code blob copy then the
+ // InstructionStream::instruction_start() might return the address of
+ // the remapped builtin regardless of whether the builtins copy existed
+ // when the code_entry_point value was cached in the Code (see
+ // InstructionStream::OffHeapInstructionStart()). So, do a reverse
+ // Code object lookup via code_entry_point value to ensure it
+ // corresponds to this current Code object.
+ Code lookup_result =
+ isolate->heap()->FindCodeForInnerPointer(code_entry_point());
+ CHECK_EQ(lookup_result, *this);
}
+#else
+ CHECK_EQ(istream.instruction_start(), code_entry_point());
+#endif // V8_COMPRESS_POINTERS && V8_SHORT_BUILTIN_CALLS
}
}
-void Code::CodeVerify(Isolate* isolate) {
- CHECK(IsAligned(InstructionSize(),
- static_cast<unsigned>(Code::kMetadataAlignment)));
- CHECK_EQ(safepoint_table_offset(), 0);
- CHECK_LE(safepoint_table_offset(), handler_table_offset());
- CHECK_LE(handler_table_offset(), constant_pool_offset());
- CHECK_LE(constant_pool_offset(), code_comments_offset());
- CHECK_LE(code_comments_offset(), unwinding_info_offset());
- CHECK_LE(unwinding_info_offset(), MetadataSize());
+void InstructionStream::InstructionStreamVerify(Isolate* isolate) {
+ CHECK(
+ IsAligned(code(kAcquireLoad).instruction_size(),
+ static_cast<unsigned>(InstructionStream::kMetadataAlignment)));
#if !defined(_MSC_VER) || defined(__clang__)
// See also: PlatformEmbeddedFileWriterWin::AlignToCodeAlignment.
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
- IsAligned(InstructionStart(), kCodeAlignment));
+ IsAligned(instruction_start(), kCodeAlignment));
#endif // !defined(_MSC_VER) || defined(__clang__)
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
- IsAligned(raw_instruction_start(), kCodeAlignment));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CHECK_EQ(*this, code_data_container(kAcquireLoad).code());
- }
- // TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
- // following CHECK works builtin trampolines. It currently fails because
- // CodeVerify is called halfway through constructing the trampoline and so not
- // everything is set up.
- // CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
- relocation_info().ObjectVerify(isolate);
+ IsAligned(instruction_start(), kCodeAlignment));
+ CHECK_EQ(*this, code(kAcquireLoad).instruction_stream());
CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
CodeSize() <= MemoryChunkLayout::MaxRegularCodeObjectSize() ||
isolate->heap()->InSpace(*this, CODE_LO_SPACE));
Address last_gc_pc = kNullAddress;
- for (RelocIterator it(*this); !it.done(); it.next()) {
+ for (RelocIterator it(code(kAcquireLoad)); !it.done(); it.next()) {
it.rinfo()->Verify(isolate);
// Ensure that GC will not iterate twice over the same pointer.
if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) {
@@ -1252,9 +1234,28 @@ void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(JSShadowRealm)
USE_TORQUE_VERIFIER(JSWrappedFunction)
+namespace {
+
+void VerifyElementIsShared(Object element) {
+ // Exception for ThinStrings:
+ // When storing a ThinString in a shared object, we want to store the actual
+ // string, which is shared when sharing the string table.
+ // It is possible that a stored shared string migrates to a ThinString later
+ // on, which is fine as the ThinString resides in shared space if the original
+ // string was in shared space.
+ if (element.IsThinString()) {
+ CHECK(v8_flags.shared_string_table);
+ CHECK(element.InWritableSharedSpace());
+ } else {
+ CHECK(element.IsShared());
+ }
+}
+
+} // namespace
+
void JSSharedStruct::JSSharedStructVerify(Isolate* isolate) {
CHECK(IsJSSharedStruct());
- CHECK(InSharedWritableHeap());
+ CHECK(InWritableSharedSpace());
JSObjectVerify(isolate);
CHECK(HasFastProperties());
// Shared structs can only point to primitives or other shared HeapObjects,
@@ -1267,14 +1268,14 @@ void JSSharedStruct::JSSharedStructVerify(Isolate* isolate) {
CHECK_EQ(PropertyKind::kData, details.kind());
CHECK_EQ(PropertyLocation::kField, details.location());
CHECK(details.representation().IsTagged());
- FieldIndex field_index = FieldIndex::ForDescriptor(struct_map, i);
- CHECK(RawFastPropertyAt(field_index).IsShared());
+ FieldIndex field_index = FieldIndex::ForDetails(struct_map, details);
+ VerifyElementIsShared(RawFastPropertyAt(field_index));
}
}
void JSAtomicsMutex::JSAtomicsMutexVerify(Isolate* isolate) {
CHECK(IsJSAtomicsMutex());
- CHECK(InSharedWritableHeap());
+ CHECK(InWritableSharedSpace());
JSObjectVerify(isolate);
}
@@ -1294,10 +1295,32 @@ void JSSharedArray::JSSharedArrayVerify(Isolate* isolate) {
uint32_t length = storage.length();
for (uint32_t j = 0; j < length; j++) {
Object element_value = storage.get(j);
- CHECK(element_value.IsShared());
+ VerifyElementIsShared(element_value);
}
}
+void JSIteratorMapHelper::JSIteratorMapHelperVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSIteratorMapHelperVerify(*this, isolate);
+ CHECK(mapper().IsCallable());
+ CHECK_GE(counter().Number(), 0);
+}
+
+void JSIteratorFilterHelper::JSIteratorFilterHelperVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSIteratorFilterHelperVerify(*this, isolate);
+ CHECK(predicate().IsCallable());
+ CHECK_GE(counter().Number(), 0);
+}
+
+void JSIteratorTakeHelper::JSIteratorTakeHelperVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSIteratorTakeHelperVerify(*this, isolate);
+ CHECK_GE(remaining().Number(), 0);
+}
+
+void JSIteratorDropHelper::JSIteratorDropHelperVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSIteratorDropHelperVerify(*this, isolate);
+ CHECK_GE(remaining().Number(), 0);
+}
+
void WeakCell::WeakCellVerify(Isolate* isolate) {
CHECK(IsWeakCell());
@@ -1558,9 +1581,9 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
Object latin1_bytecode = arr.get(JSRegExp::kIrregexpLatin1BytecodeIndex);
Object uc16_bytecode = arr.get(JSRegExp::kIrregexpUC16BytecodeIndex);
- bool is_compiled = latin1_code.IsCodeT();
+ bool is_compiled = latin1_code.IsCode();
if (is_compiled) {
- CHECK_EQ(CodeT::cast(latin1_code).builtin_id(),
+ CHECK_EQ(Code::cast(latin1_code).builtin_id(),
Builtin::kRegExpExperimentalTrampoline);
CHECK_EQ(uc16_code, latin1_code);
@@ -1589,14 +1612,15 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
FixedArray arr = FixedArray::cast(data());
Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
// Smi : Not compiled yet (-1).
- // Code: Compiled irregexp code or trampoline to the interpreter.
+ // InstructionStream: Compiled irregexp code or trampoline to the
+ // interpreter.
CHECK((one_byte_data.IsSmi() &&
Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
- one_byte_data.IsCodeT());
+ one_byte_data.IsCode());
Object uc16_data = arr.get(JSRegExp::kIrregexpUC16CodeIndex);
CHECK((uc16_data.IsSmi() &&
Smi::ToInt(uc16_data) == JSRegExp::kUninitializedValue) ||
- uc16_data.IsCodeT());
+ uc16_data.IsCode());
Object one_byte_bytecode =
arr.get(JSRegExp::kIrregexpLatin1BytecodeIndex);
@@ -1661,6 +1685,18 @@ void JSTypedArray::JSTypedArrayVerify(Isolate* isolate) {
void JSDataView::JSDataViewVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSDataViewVerify(*this, isolate);
+ CHECK(!IsVariableLength());
+ if (!WasDetached()) {
+ CHECK_EQ(reinterpret_cast<uint8_t*>(
+ JSArrayBuffer::cast(buffer()).backing_store()) +
+ byte_offset(),
+ data_pointer());
+ }
+}
+
+void JSRabGsabDataView::JSRabGsabDataViewVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSRabGsabDataViewVerify(*this, isolate);
+ CHECK(IsVariableLength());
if (!WasDetached()) {
CHECK_EQ(reinterpret_cast<uint8_t*>(
JSArrayBuffer::cast(buffer()).backing_store()) +
@@ -1867,7 +1903,7 @@ void DataHandler::DataHandlerVerify(Isolate* isolate) {
CHECK(IsDataHandler());
VerifyPointer(isolate, smi_handler(isolate));
CHECK_IMPLIES(!smi_handler().IsSmi(),
- IsStoreHandler() && smi_handler().IsCodeT());
+ IsStoreHandler() && smi_handler().IsCode());
VerifyPointer(isolate, validity_cell(isolate));
CHECK(validity_cell().IsSmi() || validity_cell().IsCell());
int data_count = data_field_count();
@@ -1911,12 +1947,6 @@ void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
void Script::ScriptVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ScriptVerify(*this, isolate);
- if (V8_UNLIKELY(type() == Script::TYPE_WEB_SNAPSHOT)) {
- CHECK_LE(shared_function_info_count(), shared_function_infos().length());
- } else {
- // No overallocating shared_function_infos.
- CHECK_EQ(shared_function_info_count(), shared_function_infos().length());
- }
for (int i = 0; i < shared_function_info_count(); ++i) {
MaybeObject maybe_object = shared_function_infos().Get(i);
HeapObject heap_object;
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index d020765155..30d8095f62 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -210,12 +210,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
WasmExceptionPackage::cast(*this).WasmExceptionPackagePrint(os);
break;
#endif // V8_ENABLE_WEBASSEMBLY
+ case INSTRUCTION_STREAM_TYPE:
+ InstructionStream::cast(*this).InstructionStreamPrint(os);
+ break;
case CODE_TYPE:
Code::cast(*this).CodePrint(os);
break;
- case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(*this).CodeDataContainerPrint(os);
- break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
JSSetIterator::cast(*this).JSSetIteratorPrint(os);
@@ -275,7 +275,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case CONS_ONE_BYTE_STRING_TYPE:
case EXTERNAL_ONE_BYTE_STRING_TYPE:
case SLICED_ONE_BYTE_STRING_TYPE:
- case THIN_ONE_BYTE_STRING_TYPE:
case UNCACHED_EXTERNAL_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHARED_STRING_TYPE:
@@ -284,8 +283,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHARED_UNCACHED_EXTERNAL_STRING_TYPE:
case SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case SHARED_THIN_STRING_TYPE:
- case SHARED_THIN_ONE_BYTE_STRING_TYPE:
case JS_LAST_DUMMY_API_OBJECT_TYPE:
// TODO(all): Handle these types too.
os << "UNKNOWN TYPE " << map().instance_type();
@@ -321,7 +318,7 @@ bool JSObject::PrintProperties(std::ostream& os) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
case PropertyLocation::kField: {
- FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
+ FieldIndex field_index = FieldIndex::ForDetails(map(), details);
os << Brief(RawFastPropertyAt(field_index));
break;
}
@@ -347,7 +344,7 @@ bool JSObject::PrintProperties(std::ostream& os) {
} else if (IsJSGlobalObject()) {
PrintDictionaryContents(
os, JSGlobalObject::cast(*this).global_dictionary(kAcquireLoad));
- } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ } else if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
PrintDictionaryContents(os, property_dictionary_swiss());
} else {
PrintDictionaryContents(os, property_dictionary());
@@ -760,10 +757,11 @@ void DescriptorArray::DescriptorArrayPrint(std::ostream& os) {
}
os << "\n - nof slack descriptors: " << number_of_slack_descriptors();
os << "\n - nof descriptors: " << number_of_descriptors();
- int16_t raw_marked = raw_number_of_marked_descriptors();
- os << "\n - raw marked descriptors: mc epoch "
- << NumberOfMarkedDescriptors::Epoch::decode(raw_marked) << ", marked "
- << NumberOfMarkedDescriptors::Marked::decode(raw_marked);
+ const auto raw = raw_gc_state(kRelaxedLoad);
+ os << "\n - raw gc state: mc epoch "
+ << DescriptorArrayMarkingState::Epoch::decode(raw) << ", marked "
+ << DescriptorArrayMarkingState::Marked::decode(raw) << ", delta "
+ << DescriptorArrayMarkingState::Delta::decode(raw);
PrintDescriptors(os);
}
@@ -920,6 +918,12 @@ void PrintTableContentsGeneric(std::ostream& os, T dict,
}
}
+void PrintNameDictionaryFlags(std::ostream& os, NameDictionary dict) {
+ if (dict.may_have_interesting_symbols()) {
+ os << "\n - may_have_interesting_symbols";
+ }
+}
+
// Used for ordered and unordered dictionaries.
template <typename T>
void PrintDictionaryContentsFull(std::ostream& os, T dict) {
@@ -1010,6 +1014,7 @@ void EphemeronHashTable::EphemeronHashTablePrint(std::ostream& os) {
void NameDictionary::NameDictionaryPrint(std::ostream& os) {
PrintHashTableHeader(os, *this, "NameDictionary");
+ PrintNameDictionaryFlags(os, *this);
PrintDictionaryContentsFull(os, *this);
}
@@ -1259,29 +1264,45 @@ void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot) {
}
void FeedbackNexus::Print(std::ostream& os) {
- switch (kind()) {
+ auto slot_kind = kind();
+ switch (slot_kind) {
case FeedbackSlotKind::kCall:
case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kDefineKeyedOwn:
case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
- case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kDefineNamedOwn: {
os << InlineCacheState2String(ic_state());
break;
}
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
- case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict: {
os << InlineCacheState2String(ic_state());
if (ic_state() == InlineCacheState::MONOMORPHIC) {
os << "\n ";
- if (GetFeedback().GetHeapObjectOrSmi().IsPropertyCell()) {
+ if (GetFeedback().IsCleared()) {
+ // Handler mode: feedback is the cleared value, extra is the handler.
+ if (IsLoadGlobalICKind(slot_kind)) {
+ LoadHandler::PrintHandler(GetFeedbackExtra().GetHeapObjectOrSmi(),
+ os);
+ } else {
+ StoreHandler::PrintHandler(GetFeedbackExtra().GetHeapObjectOrSmi(),
+ os);
+ }
+ } else if (GetFeedback().GetHeapObjectOrSmi().IsPropertyCell()) {
os << Brief(GetFeedback());
} else {
- LoadHandler::PrintHandler(GetFeedback().GetHeapObjectOrSmi(), os);
+ // Lexical variable mode: the variable location is encoded in the SMI.
+ int handler = GetFeedback().GetHeapObjectOrSmi().ToSmi().value();
+ os << (IsLoadGlobalICKind(slot_kind) ? "Load" : "Store");
+ os << "Handler(Lexical variable mode)(context ix = "
+ << FeedbackNexus::ContextIndexBits::decode(handler)
+ << ", slot ix = " << FeedbackNexus::SlotIndexBits::decode(handler)
+ << ")";
}
}
break;
@@ -1291,10 +1312,20 @@ void FeedbackNexus::Print(std::ostream& os) {
os << InlineCacheState2String(ic_state());
if (ic_state() == InlineCacheState::MONOMORPHIC) {
os << "\n " << Brief(GetFeedback()) << ": ";
- LoadHandler::PrintHandler(GetFeedbackExtra().GetHeapObjectOrSmi(), os);
+ Object handler = GetFeedbackExtra().GetHeapObjectOrSmi();
+ if (handler.IsWeakFixedArray()) {
+ handler = WeakFixedArray::cast(handler).Get(0).GetHeapObjectOrSmi();
+ }
+ LoadHandler::PrintHandler(handler, os);
} else if (ic_state() == InlineCacheState::POLYMORPHIC) {
- WeakFixedArray array =
- WeakFixedArray::cast(GetFeedback().GetHeapObject());
+ HeapObject feedback = GetFeedback().GetHeapObject();
+ WeakFixedArray array;
+ if (feedback.IsName()) {
+ os << " with name " << Brief(feedback);
+ array = WeakFixedArray::cast(GetFeedbackExtra().GetHeapObject());
+ } else {
+ array = WeakFixedArray::cast(feedback);
+ }
for (int i = 0; i < array.length(); i += 2) {
os << "\n " << Brief(array.Get(i)) << ": ";
LoadHandler::PrintHandler(array.Get(i + 1).GetHeapObjectOrSmi(), os);
@@ -1309,10 +1340,20 @@ void FeedbackNexus::Print(std::ostream& os) {
os << InlineCacheState2String(ic_state());
if (ic_state() == InlineCacheState::MONOMORPHIC) {
os << "\n " << Brief(GetFeedback()) << ": ";
- StoreHandler::PrintHandler(GetFeedbackExtra().GetHeapObjectOrSmi(), os);
+ Object handler = GetFeedbackExtra().GetHeapObjectOrSmi();
+ if (handler.IsWeakFixedArray()) {
+ handler = WeakFixedArray::cast(handler).Get(0).GetHeapObjectOrSmi();
+ }
+ StoreHandler::PrintHandler(handler, os);
} else if (ic_state() == InlineCacheState::POLYMORPHIC) {
- WeakFixedArray array =
- WeakFixedArray::cast(GetFeedback().GetHeapObject());
+ HeapObject feedback = GetFeedback().GetHeapObject();
+ WeakFixedArray array;
+ if (feedback.IsName()) {
+ os << " with name " << Brief(feedback);
+ array = WeakFixedArray::cast(GetFeedbackExtra().GetHeapObject());
+ } else {
+ array = WeakFixedArray::cast(feedback);
+ }
for (int i = 0; i < array.length(); i += 2) {
os << "\n " << Brief(array.Get(i)) << ": ";
StoreHandler::PrintHandler(array.Get(i + 1).GetHeapObjectOrSmi(), os);
@@ -1378,6 +1419,13 @@ void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
+void JSValidIteratorWrapper::JSValidIteratorWrapperPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSValidIteratorWrapper");
+ os << "\n - underlying.object: " << Brief(underlying_object());
+ os << "\n - underlying.next: " << Brief(underlying_next());
+ JSObjectPrintBody(os, *this);
+}
+
void JSPrimitiveWrapper::JSPrimitiveWrapperPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSPrimitiveWrapper");
os << "\n - value: " << Brief(value());
@@ -1505,7 +1553,7 @@ void JSSharedArray::JSSharedArrayPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSharedArray");
Isolate* isolate = GetIsolateFromWritableObject(*this);
os << "\n - isolate: " << isolate;
- if (isolate->is_shared()) os << " (shared)";
+ if (InWritableSharedSpace()) os << " (shared)";
JSObjectPrintBody(os, *this);
}
@@ -1513,7 +1561,7 @@ void JSSharedStruct::JSSharedStructPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSharedStruct");
Isolate* isolate = GetIsolateFromWritableObject(*this);
os << "\n - isolate: " << isolate;
- if (isolate->is_shared()) os << " (shared)";
+ if (InWritableSharedSpace()) os << " (shared)";
JSObjectPrintBody(os, *this);
}
@@ -1521,7 +1569,7 @@ void JSAtomicsMutex::JSAtomicsMutexPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSAtomicsMutex");
Isolate* isolate = GetIsolateFromWritableObject(*this);
os << "\n - isolate: " << isolate;
- if (isolate->is_shared()) os << " (shared)";
+ if (InWritableSharedSpace()) os << " (shared)";
os << "\n - state: " << this->state();
os << "\n - owner_thread_id: " << this->owner_thread_id();
JSObjectPrintBody(os, *this);
@@ -1531,11 +1579,44 @@ void JSAtomicsCondition::JSAtomicsConditionPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSAtomicsCondition");
Isolate* isolate = GetIsolateFromWritableObject(*this);
os << "\n - isolate: " << isolate;
- if (isolate->is_shared()) os << " (shared)";
+ if (InWritableSharedSpace()) os << " (shared)";
os << "\n - state: " << this->state();
JSObjectPrintBody(os, *this);
}
+void JSIteratorHelper::JSIteratorHelperPrintHeader(std::ostream& os,
+ const char* helper_name) {
+ JSObjectPrintHeader(os, *this, helper_name);
+ os << "\n - underlying.object: " << Brief(underlying_object());
+ os << "\n - underlying.next: " << Brief(underlying_next());
+}
+
+void JSIteratorMapHelper::JSIteratorMapHelperPrint(std::ostream& os) {
+ JSIteratorHelperPrintHeader(os, "JSIteratorMapHelper");
+ os << "\n - mapper: " << Brief(mapper());
+ os << "\n - counter: " << counter();
+ JSObjectPrintBody(os, *this);
+}
+
+void JSIteratorFilterHelper::JSIteratorFilterHelperPrint(std::ostream& os) {
+ JSIteratorHelperPrintHeader(os, "JSIteratorFilterHelper");
+ os << "\n - predicate: " << Brief(predicate());
+ os << "\n - counter: " << counter();
+ JSObjectPrintBody(os, *this);
+}
+
+void JSIteratorTakeHelper::JSIteratorTakeHelperPrint(std::ostream& os) {
+ JSIteratorHelperPrintHeader(os, "JSIteratorTakeHelper");
+ os << "\n - remaining: " << remaining();
+ JSObjectPrintBody(os, *this);
+}
+
+void JSIteratorDropHelper::JSIteratorDropHelperPrint(std::ostream& os) {
+ JSIteratorHelperPrintHeader(os, "JSIteratorDropHelper");
+ os << "\n - remaining: " << remaining();
+ JSObjectPrintBody(os, *this);
+}
+
void JSWeakMap::JSWeakMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());
@@ -1605,6 +1686,21 @@ void JSDataView::JSDataViewPrint(std::ostream& os) {
JSObjectPrintBody(os, *this, !WasDetached());
}
+void JSRabGsabDataView::JSRabGsabDataViewPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSRabGsabDataView");
+ os << "\n - buffer =" << Brief(buffer());
+ os << "\n - byte_offset: " << byte_offset();
+ os << "\n - byte_length: " << byte_length();
+ if (is_length_tracking()) os << "\n - length-tracking";
+ if (is_backed_by_rab()) os << "\n - backed-by-rab";
+ if (!buffer().IsJSArrayBuffer()) {
+ os << "\n <invalid buffer>";
+ return;
+ }
+ if (WasDetached()) os << "\n - detached";
+ JSObjectPrintBody(os, *this, !WasDetached());
+}
+
void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSBoundFunction");
os << "\n - bound_target_function: " << Brief(bound_target_function());
@@ -1719,7 +1815,12 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data(kAcquireLoad));
os << "\n - code (from data): ";
- os << Brief(GetCode());
+ Isolate* isolate;
+ if (GetIsolateFromHeapObject(*this, &isolate)) {
+ os << Brief(GetCode(isolate));
+ } else {
+ os << "<unavailable>";
+ }
PrintSourceCode(os);
// Script files are often large, thus only print their {Brief} representation.
os << "\n - script: " << Brief(script());
@@ -1774,33 +1875,31 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) {
os << "\n";
}
-void Code::CodePrint(std::ostream& os) {
- PrintHeader(os, "Code");
- os << "\n - code_data_container: "
- << Brief(code_data_container(kAcquireLoad));
- if (is_builtin()) {
- os << "\n - builtin_id: " << Builtins::name(builtin_id());
- }
- os << "\n";
+void InstructionStream::InstructionStreamPrint(std::ostream& os) {
+ PrintHeader(os, "InstructionStream");
+ Code the_code = code(kAcquireLoad);
+ os << "\n - code: " << Brief(the_code);
#ifdef ENABLE_DISASSEMBLER
- Disassemble(nullptr, os, GetIsolate());
+ the_code.Disassemble(nullptr, os, GetIsolate());
#endif
}
-void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
- PrintHeader(os, "CodeDataContainer");
-#ifdef V8_EXTERNAL_CODE_SPACE
+void Code::CodePrint(std::ostream& os) {
+ PrintHeader(os, "Code");
os << "\n - kind: " << CodeKindToString(kind());
if (is_builtin()) {
os << "\n - builtin: " << Builtins::name(builtin_id());
}
- os << "\n - is_off_heap_trampoline: " << is_off_heap_trampoline();
- os << "\n - code: " << Brief(raw_code());
+ if (has_instruction_stream()) {
+ os << "\n - instruction_stream: " << Brief(raw_instruction_stream());
+ }
os << "\n - code_entry_point: "
<< reinterpret_cast<void*>(code_entry_point());
-#endif // V8_EXTERNAL_CODE_SPACE
os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
os << "\n";
+ if (has_instruction_stream()) {
+ instruction_stream().Print(os);
+ }
}
void Foreign::ForeignPrint(std::ostream& os) {
@@ -1935,8 +2034,7 @@ void WasmStruct::WasmStructPrint(std::ostream& os) {
case wasm::kRtt: {
Tagged_t raw = base::ReadUnalignedValue<Tagged_t>(field_address);
#if V8_COMPRESS_POINTERS
- Address obj =
- V8HeapCompressionScheme::DecompressTaggedPointer(address(), raw);
+ Address obj = V8HeapCompressionScheme::DecompressTagged(address(), raw);
#else
Address obj = raw;
#endif
@@ -2071,7 +2169,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
PRINT_WASM_INSTANCE_FIELD(feedback_vectors, Brief);
PRINT_WASM_INSTANCE_FIELD(memory_start, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(memory_size, +);
- PRINT_WASM_INSTANCE_FIELD(isolate_root, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(stack_limit_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(real_stack_limit_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(new_allocation_limit_address, to_void_ptr);
@@ -2089,7 +2186,7 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
PRINT_WASM_INSTANCE_FIELD(jump_table_start, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(data_segment_starts, Brief);
PRINT_WASM_INSTANCE_FIELD(data_segment_sizes, Brief);
- PRINT_WASM_INSTANCE_FIELD(dropped_elem_segments, Brief);
+ PRINT_WASM_INSTANCE_FIELD(element_segments, Brief);
PRINT_WASM_INSTANCE_FIELD(hook_on_function_call_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(tiering_budget_array, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(break_on_entry, static_cast<int>);
@@ -2135,9 +2232,9 @@ void WasmResumeData::WasmResumeDataPrint(std::ostream& os) {
void WasmApiFunctionRef::WasmApiFunctionRefPrint(std::ostream& os) {
PrintHeader(os, "WasmApiFunctionRef");
- os << "\n - isolate_root: " << reinterpret_cast<void*>(isolate_root());
os << "\n - native_context: " << Brief(native_context());
os << "\n - callable: " << Brief(callable());
+ os << "\n - instance: " << Brief(instance());
os << "\n - suspend: " << suspend();
os << "\n";
}
@@ -2340,6 +2437,8 @@ void Script::ScriptPrint(std::ostream& os) {
os << "\n - source_mapping_url: " << Brief(source_mapping_url());
os << "\n - host_defined_options: " << Brief(host_defined_options());
os << "\n - compilation type: " << compilation_type();
+ os << "\n - compiled lazy function positions: "
+ << compiled_lazy_function_positions();
bool is_wasm = false;
#if V8_ENABLE_WEBASSEMBLY
if ((is_wasm = (type() == TYPE_WASM))) {
@@ -2353,9 +2452,6 @@ void Script::ScriptPrint(std::ostream& os) {
os << "\n - eval from shared: " << Brief(eval_from_shared());
} else if (is_wrapped()) {
os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
- } else if (type() == TYPE_WEB_SNAPSHOT) {
- os << "\n - shared function info table: "
- << Brief(shared_function_info_table());
}
os << "\n - eval from position: " << eval_from_position();
}
@@ -2642,9 +2738,13 @@ void HeapNumber::HeapNumberShortPrint(std::ostream& os) {
static constexpr int64_t kMaxSafeInteger = -(kMinSafeInteger + 1);
double val = value();
- if (val == DoubleToInteger(val) &&
- val >= static_cast<double>(kMinSafeInteger) &&
- val <= static_cast<double>(kMaxSafeInteger)) {
+ if (i::IsMinusZero(val)) {
+ os << "-0.0";
+ } else if (val == DoubleToInteger(val) &&
+ val >= static_cast<double>(kMinSafeInteger) &&
+ val <= static_cast<double>(kMaxSafeInteger)) {
+ // Print integer HeapNumbers in safe integer range with max precision: as
+ // 9007199254740991.0 instead of 9.0072e+15
int64_t i = static_cast<int64_t>(val);
os << i << ".0";
} else {
@@ -2958,7 +3058,7 @@ inline i::Object GetObjectFromRaw(void* object) {
if (RoundDown<i::kPtrComprCageBaseAlignment>(object_ptr) == i::kNullAddress) {
// Try to decompress pointer.
i::Isolate* isolate = i::Isolate::Current();
- object_ptr = i::V8HeapCompressionScheme::DecompressTaggedAny(
+ object_ptr = i::V8HeapCompressionScheme::DecompressTagged(
isolate, static_cast<i::Tagged_t>(object_ptr));
}
#endif
@@ -3014,30 +3114,20 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
}
#endif // V8_ENABLE_WEBASSEMBLY
- i::CodeLookupResult lookup_result =
- isolate->heap()->GcSafeFindCodeForInnerPointerForPrinting(address);
- if (!lookup_result.IsFound()) {
+ v8::base::Optional<i::Code> lookup_result =
+ isolate->heap()->TryFindCodeForInnerPointerForPrinting(address);
+ if (!lookup_result.has_value()) {
i::PrintF(
- "%p is not within the current isolate's code, read_only or embedded "
- "spaces\n",
+ "%p is not within the current isolate's code or embedded spaces\n",
object);
return;
}
#ifdef ENABLE_DISASSEMBLER
i::StdoutStream os;
- if (lookup_result.IsCodeDataContainer()) {
- i::CodeT code = i::CodeT::cast(lookup_result.code_data_container());
- code.Disassemble(nullptr, os, isolate, address);
- } else {
- lookup_result.code().Disassemble(nullptr, os, isolate, address);
- }
+ lookup_result->Disassemble(nullptr, os, isolate, address);
#else // ENABLE_DISASSEMBLER
- if (lookup_result.IsCodeDataContainer()) {
- lookup_result.code_data_container().Print();
- } else {
- lookup_result.code().Print();
- }
+ lookup_result->Print();
#endif // ENABLE_DISASSEMBLER
}
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index ca5fc54a4b..b3758680d1 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -54,6 +54,11 @@
namespace v8 {
namespace internal {
+base::LazyRecursiveMutex& GetFileMutex() {
+ static base::LazyRecursiveMutex file_mutex;
+ return file_mutex;
+}
+
struct PerfJitHeader {
uint32_t magic_;
uint32_t version_;
@@ -117,11 +122,9 @@ const char LinuxPerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump";
const int LinuxPerfJitLogger::kFilenameBufferPadding = 16;
static const char kStringTerminator[] = {'\0'};
-static const char kRepeatedNameMarker[] = {'\xff', '\0'};
-base::LazyRecursiveMutex LinuxPerfJitLogger::file_mutex_;
// The following static variables are protected by
-// LinuxPerfJitLogger::file_mutex_.
+// GetFileMutex().
int LinuxPerfJitLogger::process_id_ = 0;
uint64_t LinuxPerfJitLogger::reference_count_ = 0;
void* LinuxPerfJitLogger::marker_address_ = nullptr;
@@ -182,7 +185,7 @@ void LinuxPerfJitLogger::CloseMarkerFile(void* marker_address) {
LinuxPerfJitLogger::LinuxPerfJitLogger(Isolate* isolate)
: CodeEventLogger(isolate) {
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ base::LockGuard<base::RecursiveMutex> guard_file(GetFileMutex().Pointer());
process_id_ = base::OS::GetCurrentProcessId();
reference_count_++;
@@ -195,7 +198,7 @@ LinuxPerfJitLogger::LinuxPerfJitLogger(Isolate* isolate)
}
LinuxPerfJitLogger::~LinuxPerfJitLogger() {
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ base::LockGuard<base::RecursiveMutex> guard_file(GetFileMutex().Pointer());
reference_count_--;
// If this was the last logger, close the file.
@@ -214,11 +217,11 @@ uint64_t LinuxPerfJitLogger::GetTimestamp() {
}
void LinuxPerfJitLogger::LogRecordedBuffer(
- Handle<AbstractCode> abstract_code,
- MaybeHandle<SharedFunctionInfo> maybe_shared, const char* name,
- int length) {
+ AbstractCode abstract_code, MaybeHandle<SharedFunctionInfo> maybe_shared,
+ const char* name, int length) {
+ DisallowGarbageCollection no_gc;
if (v8_flags.perf_basic_prof_only_functions) {
- CodeKind code_kind = abstract_code->kind(isolate_);
+ CodeKind code_kind = abstract_code.kind(isolate_);
if (code_kind != CodeKind::INTERPRETED_FUNCTION &&
code_kind != CodeKind::TURBOFAN && code_kind != CodeKind::MAGLEV &&
code_kind != CodeKind::BASELINE) {
@@ -226,39 +229,39 @@ void LinuxPerfJitLogger::LogRecordedBuffer(
}
}
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ base::LockGuard<base::RecursiveMutex> guard_file(GetFileMutex().Pointer());
if (perf_output_handle_ == nullptr) return;
// We only support non-interpreted functions.
- if (!abstract_code->IsCode(isolate_)) return;
- Handle<Code> code = Handle<Code>::cast(abstract_code);
- DCHECK(code->raw_instruction_start() == code->address() + Code::kHeaderSize);
+ if (!abstract_code.IsCode(isolate_)) return;
+ Code code = Code::cast(abstract_code);
// Debug info has to be emitted first.
Handle<SharedFunctionInfo> shared;
if (v8_flags.perf_prof && maybe_shared.ToHandle(&shared)) {
// TODO(herhut): This currently breaks for js2wasm/wasm2js functions.
- if (code->kind() != CodeKind::JS_TO_WASM_FUNCTION &&
- code->kind() != CodeKind::WASM_TO_JS_FUNCTION) {
+ CodeKind kind = code.kind();
+ if (kind != CodeKind::JS_TO_WASM_FUNCTION &&
+ kind != CodeKind::WASM_TO_JS_FUNCTION) {
LogWriteDebugInfo(code, shared);
}
}
const char* code_name = name;
- uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->InstructionStart());
+ uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code.InstructionStart());
// Unwinding info comes right after debug info.
- if (v8_flags.perf_prof_unwinding_info) LogWriteUnwindingInfo(*code);
+ if (v8_flags.perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
- WriteJitCodeLoadEntry(code_pointer, code->InstructionSize(), code_name,
+ WriteJitCodeLoadEntry(code_pointer, code.InstructionSize(), code_name,
length);
}
#if V8_ENABLE_WEBASSEMBLY
void LinuxPerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
const char* name, int length) {
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ base::LockGuard<base::RecursiveMutex> guard_file(GetFileMutex().Pointer());
if (perf_output_handle_ == nullptr) return;
@@ -320,51 +323,55 @@ base::Vector<const char> GetScriptName(Object maybeScript,
} // namespace
-SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
+SourcePositionInfo GetSourcePositionInfo(Isolate* isolate, Code code,
Handle<SharedFunctionInfo> function,
SourcePosition pos) {
DisallowGarbageCollection disallow;
- if (code->is_turbofanned()) {
- return pos.FirstInfo(code);
+ if (code.is_turbofanned()) {
+ return pos.FirstInfo(isolate, code);
} else {
- return SourcePositionInfo(pos, function);
+ return SourcePositionInfo(isolate, pos, function);
}
}
} // namespace
-void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
+void LinuxPerfJitLogger::LogWriteDebugInfo(Code code,
Handle<SharedFunctionInfo> shared) {
// Line ends of all scripts have been initialized prior to this.
DisallowGarbageCollection no_gc;
// The WasmToJS wrapper stubs have source position entries.
- if (!shared->HasSourceCode()) return;
+ SharedFunctionInfo raw_shared = *shared;
+ if (!raw_shared.HasSourceCode()) return;
PerfJitCodeDebugInfo debug_info;
uint32_t size = sizeof(debug_info);
ByteArray source_position_table =
- code->SourcePositionTable(isolate_, *shared);
+ code.SourcePositionTable(isolate_, raw_shared);
// Compute the entry count and get the names of all scripts.
// Avoid additional work if the script name is repeated. Multiple script
// names only occur for cross-script inlining.
uint32_t entry_count = 0;
Object last_script = Smi::zero();
+ size_t last_script_name_size = 0;
std::vector<base::Vector<const char>> script_names;
for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
- SourcePositionInfo info(
- GetSourcePositionInfo(code, shared, iterator.source_position()));
+ SourcePositionInfo info(GetSourcePositionInfo(isolate_, code, shared,
+ iterator.source_position()));
Object current_script = *info.script;
if (current_script != last_script) {
std::unique_ptr<char[]> name_storage;
- auto name = GetScriptName(shared->script(), &name_storage, no_gc);
+ auto name = GetScriptName(raw_shared.script(), &name_storage, no_gc);
script_names.push_back(name);
// Add the size of the name after each entry.
- size += name.size() + sizeof(kStringTerminator);
+ last_script_name_size = name.size() + sizeof(kStringTerminator);
+ size += last_script_name_size;
last_script = current_script;
} else {
- size += sizeof(kRepeatedNameMarker);
+ DCHECK_LT(0, last_script_name_size);
+ size += last_script_name_size;
}
entry_count++;
}
@@ -372,7 +379,7 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
debug_info.time_stamp_ = GetTimestamp();
- debug_info.address_ = code->InstructionStart();
+ debug_info.address_ = code.InstructionStart();
debug_info.entry_count_ = entry_count;
// Add the sizes of fixed parts of entries.
@@ -382,14 +389,14 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
debug_info.size_ = size + padding;
LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
- Address code_start = code->InstructionStart();
+ Address code_start = code.InstructionStart();
last_script = Smi::zero();
int script_names_index = 0;
for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
- SourcePositionInfo info(
- GetSourcePositionInfo(code, shared, iterator.source_position()));
+ SourcePositionInfo info(GetSourcePositionInfo(isolate_, code, shared,
+ iterator.source_position()));
PerfJitDebugEntry entry;
// The entry point of the function will be placed straight after the ELF
// header when processed by "perf inject". Adjust the position addresses
@@ -399,16 +406,13 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
entry.column_ = info.column + 1;
LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
Object current_script = *info.script;
+ auto name_string = script_names[script_names_index];
+ LogWriteBytes(name_string.begin(),
+ static_cast<uint32_t>(name_string.size()));
+ LogWriteBytes(kStringTerminator, sizeof(kStringTerminator));
if (current_script != last_script) {
- auto name_string = script_names[script_names_index];
- LogWriteBytes(name_string.begin(),
- static_cast<uint32_t>(name_string.size()));
- LogWriteBytes(kStringTerminator, sizeof(kStringTerminator));
- script_names_index++;
+ if (last_script != Smi::zero()) script_names_index++;
last_script = current_script;
- } else {
- // Use the much shorter kRepeatedNameMarker for repeated names.
- LogWriteBytes(kRepeatedNameMarker, sizeof(kRepeatedNameMarker));
}
}
char padding_bytes[8] = {0};
@@ -519,12 +523,6 @@ void LinuxPerfJitLogger::LogWriteUnwindingInfo(Code code) {
LogWriteBytes(padding_bytes, static_cast<int>(padding_size));
}
-void LinuxPerfJitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
- // We may receive a CodeMove event if a BytecodeArray object moves. Otherwise
- // code relocation is not supported.
- CHECK(from.IsBytecodeArray(isolate_));
-}
-
void LinuxPerfJitLogger::LogWriteBytes(const char* bytes, int size) {
size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
DCHECK(static_cast<size_t>(size) == rv);
@@ -541,9 +539,9 @@ void LinuxPerfJitLogger::LogWriteHeader() {
header.elf_mach_target_ = GetElfMach();
header.reserved_ = 0xDEADBEEF;
header.process_id_ = process_id_;
- header.time_stamp_ =
- static_cast<uint64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis() *
- base::Time::kMicrosecondsPerMillisecond);
+ header.time_stamp_ = static_cast<uint64_t>(
+ V8::GetCurrentPlatform()->CurrentClockTimeMillisecondsHighResolution() *
+ base::Time::kMicrosecondsPerMillisecond);
header.flags_ = 0;
LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 0211b1baf6..64161d0722 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -44,7 +44,10 @@ class LinuxPerfJitLogger : public CodeEventLogger {
explicit LinuxPerfJitLogger(Isolate* isolate);
~LinuxPerfJitLogger() override;
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override {
+ UNREACHABLE(); // Unsupported.
+ }
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
@@ -55,7 +58,7 @@ class LinuxPerfJitLogger : public CodeEventLogger {
void CloseMarkerFile(void* marker_address);
uint64_t GetTimestamp();
- void LogRecordedBuffer(Handle<AbstractCode> code,
+ void LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
#if V8_ENABLE_WEBASSEMBLY
@@ -76,7 +79,7 @@ class LinuxPerfJitLogger : public CodeEventLogger {
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
- void LogWriteDebugInfo(Handle<Code> code, Handle<SharedFunctionInfo> shared);
+ void LogWriteDebugInfo(Code code, Handle<SharedFunctionInfo> shared);
#if V8_ENABLE_WEBASSEMBLY
void LogWriteDebugInfo(const wasm::WasmCode* code);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -127,7 +130,6 @@ class LinuxPerfJitLogger : public CodeEventLogger {
// Per-process singleton file. We assume that there is one main isolate;
// to determine when it goes away, we keep reference count.
- static base::LazyRecursiveMutex file_mutex_;
static FILE* perf_output_handle_;
static uint64_t reference_count_;
static void* marker_address_;
diff --git a/deps/v8/src/diagnostics/riscv/disasm-riscv.cc b/deps/v8/src/diagnostics/riscv/disasm-riscv.cc
index 1119a38014..57931af574 100644
--- a/deps/v8/src/diagnostics/riscv/disasm-riscv.cc
+++ b/deps/v8/src/diagnostics/riscv/disasm-riscv.cc
@@ -1269,7 +1269,7 @@ void Decoder::DecodeRFPType(Instruction* instr) {
}
case (RO_FCVT_S_D & kRFPTypeMask): {
if (instr->Rs2Value() == 0b00001) {
- Format(instr, "fcvt.s.d ['frm] 'fd, 'rs1");
+ Format(instr, "fcvt.s.d ['frm] 'fd, 'fs1");
} else {
UNSUPPORTED_RISCV();
}
@@ -1302,7 +1302,6 @@ void Decoder::DecodeRFPType(Instruction* instr) {
case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
if (instr->Rs2Value() != 0b00000) {
UNSUPPORTED_RISCV();
- break;
}
switch (instr->Funct3Value()) {
case 0b001: // RO_FCLASS_D
@@ -1736,23 +1735,28 @@ void Decoder::DecodeJType(Instruction* instr) {
void Decoder::DecodeCRType(Instruction* instr) {
switch (instr->RvcFunct4Value()) {
case 0b1000:
- if (instr->RvcRs1Value() != 0 && instr->RvcRs2Value() == 0)
+ if (instr->RvcRs1Value() != 0 && instr->RvcRs2Value() == 0) {
Format(instr, "jr 'Crs1");
- else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ break;
+ } else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0) {
Format(instr, "mv 'Crd, 'Crs2");
- else
+ break;
+ } else {
UNSUPPORTED_RISCV();
- break;
+ }
case 0b1001:
- if (instr->RvcRs1Value() == 0 && instr->RvcRs2Value() == 0)
+ if (instr->RvcRs1Value() == 0 && instr->RvcRs2Value() == 0) {
Format(instr, "ebreak");
- else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() == 0)
+ break;
+ } else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() == 0) {
Format(instr, "jalr 'Crs1");
- else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ break;
+ } else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0) {
Format(instr, "add 'Crd, 'Crd, 'Crs2");
- else
+ break;
+ } else {
UNSUPPORTED_RISCV();
- break;
+ }
default:
UNSUPPORTED_RISCV();
}
@@ -1802,13 +1806,15 @@ void Decoder::DecodeCIType(Instruction* instr) {
Format(instr, "li 'Crd, 'Cimm6");
break;
case RO_C_LUI_ADD:
- if (instr->RvcRdValue() == 2)
+ if (instr->RvcRdValue() == 2) {
Format(instr, "addi sp, sp, 'Cimm6Addi16sp");
- else if (instr->RvcRdValue() != 0 && instr->RvcRdValue() != 2)
+ break;
+ } else if (instr->RvcRdValue() != 0 && instr->RvcRdValue() != 2) {
Format(instr, "lui 'Crd, 'Cimm6U");
- else
+ break;
+ } else {
UNSUPPORTED_RISCV();
- break;
+ }
case RO_C_SLLI:
Format(instr, "slli 'Crd, 'Crd, 'Cshamt");
break;
@@ -1928,15 +1934,18 @@ void Decoder::DecodeCBType(Instruction* instr) {
Format(instr, "beqz 'Crs1s, x0, 'Cimm8B");
break;
case RO_C_MISC_ALU:
- if (instr->RvcFunct2BValue() == 0b00)
+ if (instr->RvcFunct2BValue() == 0b00) {
Format(instr, "srli 'Crs1s, 'Crs1s, 'Cshamt");
- else if (instr->RvcFunct2BValue() == 0b01)
+ break;
+ } else if (instr->RvcFunct2BValue() == 0b01) {
Format(instr, "srai 'Crs1s, 'Crs1s, 'Cshamt");
- else if (instr->RvcFunct2BValue() == 0b10)
+ break;
+ } else if (instr->RvcFunct2BValue() == 0b10) {
Format(instr, "andi 'Crs1s, 'Crs1s, 'Cimm6");
- else
+ break;
+ } else {
UNSUPPORTED_RISCV();
- break;
+ }
default:
UNSUPPORTED_RISCV();
}
@@ -2046,7 +2055,6 @@ void Decoder::DecodeRvvIVV(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2139,7 +2147,6 @@ void Decoder::DecodeRvvIVI(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2256,7 +2263,6 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2340,7 +2346,6 @@ void Decoder::DecodeRvvMVV(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2383,7 +2388,6 @@ void Decoder::DecodeRvvMVX(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2430,7 +2434,6 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
break;
case RO_V_VFUNARY1:
@@ -2567,7 +2570,6 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2637,9 +2639,11 @@ void Decoder::DecodeRvvFVF(Instruction* instr) {
case RO_V_VFWNMSAC_VF:
Format(instr, "vfwnmsac.vf 'vd, 'fs1, 'vs2'vm");
break;
+ case RO_V_VFADD_VF:
+ Format(instr, "vfadd.vf 'vd, 'vs2, 'fs1'vm");
+ break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -2681,7 +2685,6 @@ void Decoder::DecodeVType(Instruction* instr) {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
int Decoder::switch_nf(Instruction* instr) {
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 767eb015ab..a71b866135 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -447,7 +447,7 @@ void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) {
// Hardcoded thunk.
AssemblerOptions options;
options.record_reloc_info_for_serialization = false;
- TurboAssembler masm(nullptr, options, CodeObjectRequired::kNo,
+ MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo,
NewAssemblerBuffer(64));
masm.Mov(x16,
Operand(reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME)));
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 64a339c25b..69a315d494 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -767,7 +767,8 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
AppendToBuffer("%s%c %s", mnem, operand_size_code(),
NameOfCPURegister(rm));
return 2;
- } else if (mod == 1) {
+ } else if (mod == 1 ||
+ mod == 2) { // Byte displacement or 32-bit displacement
AppendToBuffer("%s%c ", mnem, operand_size_code());
int count = PrintRightOperand(data + 1); // Use name of 64-bit register.
return 1 + count;
@@ -1161,7 +1162,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
break;
case 0xE6:
AppendToBuffer("vcvtdq2pd %s,", NameOfAVXRegister(regop));
- current += PrintRightAVXOperand(current);
+ current += PrintRightXMMOperand(current);
break;
case 0xC2:
AppendToBuffer("vcmpss %s,%s,", NameOfAVXRegister(regop),
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.cc b/deps/v8/src/execution/arm/frame-constants-arm.cc
index 7a72dab870..bcf26f489a 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.cc
+++ b/deps/v8/src/execution/arm/frame-constants-arm.cc
@@ -26,6 +26,12 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index 2145711b0f..96faad0d79 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -33,7 +33,7 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize;
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgcOffset = +0 * kSystemPointerSize;
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 5879fd0cdb..1c67a84a0a 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -185,6 +185,10 @@ void ArmDebugger::RedoBreakpoint() {
}
void ArmDebugger::Debug() {
+ if (v8_flags.correctness_fuzzer_suppressions) {
+ PrintF("Debugger disabled for differential fuzzing.\n");
+ return;
+ }
intptr_t last_pc = -1;
bool done = false;
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.cc b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
index 96f6f25e75..c7f61b3fc4 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
@@ -35,6 +35,16 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return rounded_slot_count - slot_count;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ // Include any paddings from kFixedFrameSizeFromFp, an extra slot + padding
+ // for the single argument into StackGuardWithGap and finally padded register
+ // input count.
+ int slot_count = RoundUp(StandardFrameConstants::kFixedSlotCountFromFp, 2) +
+ 2 /* argument */ + RoundUp(register_input_count, 2);
+ return slot_count * kSystemPointerSize;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index d69a90c112..baeb08501f 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -49,7 +49,7 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize;
static constexpr int kFixedFrameSize = 4 * kSystemPointerSize;
// The following constants are defined so we can static-assert their values
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index adb1ef1041..16cf4f3b84 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -2390,6 +2390,48 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
unsigned rn = instr->Rn();
LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>(
instr->Mask(LoadStoreAcquireReleaseMask));
+
+ switch (op) {
+ case CAS_w:
+ case CASA_w:
+ case CASL_w:
+ case CASAL_w:
+ CompareAndSwapHelper<uint32_t>(instr);
+ return;
+ case CAS_x:
+ case CASA_x:
+ case CASL_x:
+ case CASAL_x:
+ CompareAndSwapHelper<uint64_t>(instr);
+ return;
+ case CASB:
+ case CASAB:
+ case CASLB:
+ case CASALB:
+ CompareAndSwapHelper<uint8_t>(instr);
+ return;
+ case CASH:
+ case CASAH:
+ case CASLH:
+ case CASALH:
+ CompareAndSwapHelper<uint16_t>(instr);
+ return;
+ case CASP_w:
+ case CASPA_w:
+ case CASPL_w:
+ case CASPAL_w:
+ CompareAndSwapPairHelper<uint32_t>(instr);
+ return;
+ case CASP_x:
+ case CASPA_x:
+ case CASPL_x:
+ case CASPAL_x:
+ CompareAndSwapPairHelper<uint64_t>(instr);
+ return;
+ default:
+ break;
+ }
+
int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
int32_t is_exclusive = (instr->LoadStoreXNotExclusive() == 0);
int32_t is_load = instr->LoadStoreXLoad();
@@ -2486,6 +2528,319 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
}
}
+template <typename T>
+void Simulator::CompareAndSwapHelper(const Instruction* instr) {
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // First, check whether the memory is accessible (for wasm trap handling).
+ if (!ProbeMemory(address, element_size)) return;
+
+ bool is_acquire = instr->Bit(22) == 1;
+ bool is_release = instr->Bit(15) == 1;
+
+ T comparevalue = reg<T>(rs);
+ T newvalue = reg<T>(rt);
+
+ // The architecture permits that the data read clears any exclusive monitors
+ // associated with that location, even if the compare subsequently fails.
+ local_monitor_.NotifyLoad();
+
+ T data = MemoryRead<T>(address);
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ if (data == comparevalue) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+
+ if (is_release) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+
+ MemoryWrite<T>(address, newvalue);
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ }
+
+ set_reg<T>(rs, data);
+ LogRead(address, rs, GetPrintRegisterFormatForSize(element_size));
+}
+
+template <typename T>
+void Simulator::CompareAndSwapPairHelper(const Instruction* instr) {
+ DCHECK((sizeof(T) == 4) || (sizeof(T) == 8));
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ DCHECK((rs % 2 == 0) && (rt % 2 == 0));
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ uint64_t address2 = address + element_size;
+
+ // First, check whether the memory is accessible (for wasm trap handling).
+ if (!ProbeMemory(address, element_size)) return;
+ if (!ProbeMemory(address2, element_size)) return;
+
+ bool is_acquire = instr->Bit(22) == 1;
+ bool is_release = instr->Bit(15) == 1;
+
+ T comparevalue_high = reg<T>(rs + 1);
+ T comparevalue_low = reg<T>(rs);
+ T newvalue_high = reg<T>(rt + 1);
+ T newvalue_low = reg<T>(rt);
+
+ // The architecture permits that the data read clears any exclusive monitors
+ // associated with that location, even if the compare subsequently fails.
+ local_monitor_.NotifyLoad();
+
+ T data_low = MemoryRead<T>(address);
+ T data_high = MemoryRead<T>(address2);
+
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ bool same =
+ (data_high == comparevalue_high) && (data_low == comparevalue_low);
+ if (same) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+
+ if (is_release) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+
+ MemoryWrite<T>(address, newvalue_low);
+ MemoryWrite<T>(address2, newvalue_high);
+ }
+
+ set_reg<T>(rs + 1, data_high);
+ set_reg<T>(rs, data_low);
+
+ PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
+ LogRead(address, rs, format);
+ LogRead(address2, rs + 1, format);
+
+ if (same) {
+ LogWrite(address, rt, format);
+ LogWrite(address2, rt + 1, format);
+ }
+}
+
+template <typename T>
+void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) {
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ bool is_acquire = (instr->Bit(23) == 1) && (rt != kZeroRegCode);
+ bool is_release = instr->Bit(22) == 1;
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = xreg(rn, Reg31IsStackPointer);
+ DCHECK_EQ(address % element_size, 0);
+
+ // First, check whether the memory is accessible (for wasm trap handling).
+ if (!ProbeMemory(address, element_size)) return;
+
+ local_monitor_.NotifyLoad();
+
+ T value = reg<T>(rs);
+
+ T data = MemoryRead<T>(address);
+
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ T result = 0;
+ switch (instr->Mask(AtomicMemorySimpleOpMask)) {
+ case LDADDOp:
+ result = data + value;
+ break;
+ case LDCLROp:
+ DCHECK(!std::numeric_limits<T>::is_signed);
+ result = data & ~value;
+ break;
+ case LDEOROp:
+ DCHECK(!std::numeric_limits<T>::is_signed);
+ result = data ^ value;
+ break;
+ case LDSETOp:
+ DCHECK(!std::numeric_limits<T>::is_signed);
+ result = data | value;
+ break;
+
+ // Signed/Unsigned difference is done via the templated type T.
+ case LDSMAXOp:
+ case LDUMAXOp:
+ result = (data > value) ? data : value;
+ break;
+ case LDSMINOp:
+ case LDUMINOp:
+ result = (data > value) ? value : data;
+ break;
+ }
+
+ if (is_release) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+
+ MemoryWrite<T>(address, result);
+ set_reg<T>(rt, data);
+
+ PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
+ LogRead(address, rt, format);
+ LogWrite(address, rs, format);
+}
+
+template <typename T>
+void Simulator::AtomicMemorySwapHelper(const Instruction* instr) {
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rn = instr->Rn();
+
+ bool is_acquire = (instr->Bit(23) == 1) && (rt != kZeroRegCode);
+ bool is_release = instr->Bit(22) == 1;
+
+ unsigned element_size = sizeof(T);
+ uint64_t address = xreg(rn, Reg31IsStackPointer);
+
+ // First, check whether the memory is accessible (for wasm trap handling).
+ if (!ProbeMemory(address, element_size)) return;
+
+ local_monitor_.NotifyLoad();
+
+ T data = MemoryRead<T>(address);
+ if (is_acquire) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ __sync_synchronize();
+ }
+
+ if (is_release) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
+ // Approximate store-release by issuing a full barrier before the store.
+ __sync_synchronize();
+ }
+ MemoryWrite<T>(address, reg<T>(rs));
+
+ set_reg<T>(rt, data);
+
+ PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
+ LogRead(address, rt, format);
+ LogWrite(address, rs, format);
+}
+
+#define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \
+ V(LDADD) \
+ V(LDCLR) \
+ V(LDEOR) \
+ V(LDSET) \
+ V(LDUMAX) \
+ V(LDUMIN)
+
+#define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \
+ V(LDSMAX) \
+ V(LDSMIN)
+
+void Simulator::VisitAtomicMemory(Instruction* instr) {
+ switch (instr->Mask(AtomicMemoryMask)) {
+// clang-format off
+#define SIM_FUNC_B(A) \
+ case A##B: \
+ case A##AB: \
+ case A##LB: \
+ case A##ALB:
+#define SIM_FUNC_H(A) \
+ case A##H: \
+ case A##AH: \
+ case A##LH: \
+ case A##ALH:
+#define SIM_FUNC_w(A) \
+ case A##_w: \
+ case A##A_w: \
+ case A##L_w: \
+ case A##AL_w:
+#define SIM_FUNC_x(A) \
+ case A##_x: \
+ case A##A_x: \
+ case A##L_x: \
+ case A##AL_x:
+
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B)
+ AtomicMemorySimpleHelper<uint8_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B)
+ AtomicMemorySimpleHelper<int8_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H)
+ AtomicMemorySimpleHelper<uint16_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H)
+ AtomicMemorySimpleHelper<int16_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w)
+ AtomicMemorySimpleHelper<uint32_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w)
+ AtomicMemorySimpleHelper<int32_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x)
+ AtomicMemorySimpleHelper<uint64_t>(instr);
+ break;
+ ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x)
+ AtomicMemorySimpleHelper<int64_t>(instr);
+ break;
+ // clang-format on
+
+ case SWPB:
+ case SWPAB:
+ case SWPLB:
+ case SWPALB:
+ AtomicMemorySwapHelper<uint8_t>(instr);
+ break;
+ case SWPH:
+ case SWPAH:
+ case SWPLH:
+ case SWPALH:
+ AtomicMemorySwapHelper<uint16_t>(instr);
+ break;
+ case SWP_w:
+ case SWPA_w:
+ case SWPL_w:
+ case SWPAL_w:
+ AtomicMemorySwapHelper<uint32_t>(instr);
+ break;
+ case SWP_x:
+ case SWPA_x:
+ case SWPL_x:
+ case SWPAL_x:
+ AtomicMemorySwapHelper<uint64_t>(instr);
+ break;
+ }
+}
+
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) {
fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
@@ -3466,6 +3821,10 @@ bool Simulator::PrintValue(const char* desc) {
}
void Simulator::Debug() {
+ if (v8_flags.correctness_fuzzer_suppressions) {
+ PrintF("Debugger disabled for differential fuzzing.\n");
+ return;
+ }
bool done = false;
while (!done) {
// Disassemble the next instruction to execute before doing anything else.
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index 4a7b81f8a3..b3c5eda2a7 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -1492,6 +1492,14 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
void ConditionalCompareHelper(Instruction* instr, T op2);
void LoadStoreHelper(Instruction* instr, int64_t offset, AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
+ template <typename T>
+ void CompareAndSwapHelper(const Instruction* instr);
+ template <typename T>
+ void CompareAndSwapPairHelper(const Instruction* instr);
+ template <typename T>
+ void AtomicMemorySimpleHelper(const Instruction* instr);
+ template <typename T>
+ void AtomicMemorySwapHelper(const Instruction* instr);
uintptr_t LoadStoreAddress(unsigned addr_reg, int64_t offset,
AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg, int64_t offset, AddrMode addrmode);
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index b76a601b8f..00da2d38a7 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -168,8 +168,8 @@ InvokeParams InvokeParams::SetUpForRunMicrotasks(
return params;
}
-Handle<CodeT> JSEntry(Isolate* isolate, Execution::Target execution_target,
- bool is_construct) {
+Handle<Code> JSEntry(Isolate* isolate, Execution::Target execution_target,
+ bool is_construct) {
if (is_construct) {
DCHECK_EQ(Execution::Target::kCallable, execution_target);
return BUILTIN_CODE(isolate, JSConstructEntry);
@@ -397,7 +397,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
// Placeholder for return value.
Object value;
- Handle<CodeT> code =
+ Handle<Code> code =
JSEntry(isolate, params.execution_target, params.is_construct);
{
// Save and restore context around invocation and block the
@@ -468,6 +468,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
MaybeHandle<Object> InvokeWithTryCatch(Isolate* isolate,
const InvokeParams& params) {
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ !isolate->is_execution_terminating());
bool is_termination = false;
MaybeHandle<Object> maybe_result;
if (params.exception_out != nullptr) {
@@ -611,7 +613,7 @@ static_assert(offsetof(StackHandlerMarker, padding) ==
static_assert(sizeof(StackHandlerMarker) == StackHandlerConstants::kSize);
#if V8_ENABLE_WEBASSEMBLY
-void Execution::CallWasm(Isolate* isolate, Handle<CodeT> wrapper_code,
+void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
Address wasm_call_target, Handle<Object> object_ref,
Address packed_args) {
using WasmEntryStub = GeneratedCode<Address(
diff --git a/deps/v8/src/execution/execution.h b/deps/v8/src/execution/execution.h
index 7fb97a7745..cca98f5568 100644
--- a/deps/v8/src/execution/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -77,7 +77,7 @@ class Execution final : public AllStatic {
// Upon return, either isolate->has_pending_exception() is true, or
// the function's return values are in {packed_args}.
V8_EXPORT_PRIVATE static void CallWasm(Isolate* isolate,
- Handle<CodeT> wrapper_code,
+ Handle<Code> wrapper_code,
Address wasm_call_target,
Handle<Object> object_ref,
Address packed_args);
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index de1306e72a..22e9719ff2 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -15,11 +15,11 @@
namespace v8 {
namespace internal {
-class InnerPointerToCodeCache {
+class InnerPointerToCodeCache final {
public:
struct InnerPointerToCodeCacheEntry {
Address inner_pointer;
- CodeLookupResult code;
+ base::Optional<GcSafeCode> code;
union {
SafepointEntry safepoint_entry;
MaglevSafepointEntry maglev_safepoint_entry;
@@ -27,20 +27,8 @@ class InnerPointerToCodeCache {
InnerPointerToCodeCacheEntry() : safepoint_entry() {}
};
- static void FlushCallback(LocalIsolate*, GCType, GCCallbackFlags,
- void* data) {
- static_cast<InnerPointerToCodeCache*>(data)->Flush();
- }
-
explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
Flush();
- isolate_->main_thread_local_heap()->AddGCEpilogueCallback(
- FlushCallback, this, GCType::kGCTypeMarkSweepCompact);
- }
-
- ~InnerPointerToCodeCache() {
- isolate_->main_thread_local_heap()->RemoveGCEpilogueCallback(FlushCallback,
- this);
}
InnerPointerToCodeCache(const InnerPointerToCodeCache&) = delete;
@@ -53,7 +41,7 @@ class InnerPointerToCodeCache {
private:
InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
- Isolate* isolate_;
+ Isolate* const isolate_;
static const int kInnerPointerToCodeCacheSize = 1024;
InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
@@ -91,7 +79,12 @@ inline Address StackFrame::callee_pc() const {
inline Address StackFrame::pc() const { return ReadPC(pc_address()); }
inline Address StackFrame::unauthenticated_pc() const {
- return PointerAuthentication::StripPAC(*pc_address());
+ return unauthenticated_pc(pc_address());
+}
+
+// static
+inline Address StackFrame::unauthenticated_pc(Address* pc_address) {
+ return PointerAuthentication::StripPAC(*pc_address);
}
inline Address StackFrame::ReadPC(Address* pc_address) {
@@ -179,15 +172,8 @@ inline Address CommonFrame::caller_fp() const {
}
inline Address CommonFrame::caller_pc() const {
- return ReadPC(reinterpret_cast<Address*>(ComputePCAddress(fp())));
-}
-
-inline Address CommonFrame::ComputePCAddress(Address fp) {
- return fp + StandardFrameConstants::kCallerPCOffset;
-}
-
-inline Address CommonFrame::ComputeConstantPoolAddress(Address fp) {
- return fp + StandardFrameConstants::kConstantPoolOffset;
+ return ReadPC(reinterpret_cast<Address*>(
+ fp() + StandardFrameConstants::kCallerPCOffset));
}
inline bool CommonFrameWithJSLinkage::IsConstructFrame(Address fp) {
@@ -299,28 +285,10 @@ inline JavaScriptBuiltinContinuationWithCatchFrame::
StackFrameIteratorBase* iterator)
: JavaScriptBuiltinContinuationFrame(iterator) {}
-inline JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate)
- : iterator_(isolate) {
- if (!done()) Advance();
-}
-
-inline JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate,
- ThreadLocalTop* top)
- : iterator_(isolate, top) {
- if (!done()) Advance();
-}
-
-inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
- StackFrame* frame = iterator_.frame();
- return JavaScriptFrame::cast(frame);
-}
-
-inline JavaScriptFrame* JavaScriptFrameIterator::Reframe() {
- StackFrame* frame = iterator_.Reframe();
- return JavaScriptFrame::cast(frame);
-}
+inline IrregexpFrame::IrregexpFrame(StackFrameIteratorBase* iterator)
+ : TypedFrame(iterator) {}
-inline CommonFrame* StackTraceFrameIterator::frame() const {
+inline CommonFrame* DebuggableStackFrameIterator::frame() const {
StackFrame* frame = iterator_.frame();
#if V8_ENABLE_WEBASSEMBLY
DCHECK(frame->is_java_script() || frame->is_wasm());
@@ -330,33 +298,41 @@ inline CommonFrame* StackTraceFrameIterator::frame() const {
return static_cast<CommonFrame*>(frame);
}
-inline CommonFrame* StackTraceFrameIterator::Reframe() {
+inline CommonFrame* DebuggableStackFrameIterator::Reframe() {
iterator_.Reframe();
return frame();
}
-bool StackTraceFrameIterator::is_javascript() const {
+bool DebuggableStackFrameIterator::is_javascript() const {
return frame()->is_java_script();
}
#if V8_ENABLE_WEBASSEMBLY
-bool StackTraceFrameIterator::is_wasm() const { return frame()->is_wasm(); }
+bool DebuggableStackFrameIterator::is_wasm() const {
+ return frame()->is_wasm();
+}
#endif // V8_ENABLE_WEBASSEMBLY
-JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
+JavaScriptFrame* DebuggableStackFrameIterator::javascript_frame() const {
return JavaScriptFrame::cast(frame());
}
-inline StackFrame* SafeStackFrameIterator::frame() const {
- DCHECK(!done());
+// static
+inline bool StackFrameIteratorForProfiler::IsValidFrameType(
+ StackFrame::Type type) {
#if V8_ENABLE_WEBASSEMBLY
- DCHECK(frame_->is_java_script() || frame_->is_exit() ||
- frame_->is_builtin_exit() || frame_->is_wasm() ||
- frame_->is_wasm_to_js() || frame_->is_js_to_wasm());
+ return StackFrame::IsJavaScript(type) || type == StackFrame::EXIT ||
+ type == StackFrame::BUILTIN_EXIT || type == StackFrame::WASM ||
+ type == StackFrame::WASM_TO_JS || type == StackFrame::JS_TO_WASM;
#else
- DCHECK(frame_->is_java_script() || frame_->is_exit() ||
- frame_->is_builtin_exit());
+ return StackFrame::IsJavaScript(type) || type == StackFrame::EXIT ||
+ type == StackFrame::BUILTIN_EXIT;
#endif // V8_ENABLE_WEBASSEMBLY
+}
+
+inline StackFrame* StackFrameIteratorForProfiler::frame() const {
+ DCHECK(!done());
+ DCHECK(IsValidFrameType(frame_->type()));
return frame_;
}
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 27439c2607..b5def7fa47 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -99,25 +99,23 @@ class StackHandlerIterator {
// -------------------------------------------------------------------------
#define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
- bool can_access_heap_objects)
+StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate)
: isolate_(isolate),
STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON) frame_(nullptr),
- handler_(nullptr),
- can_access_heap_objects_(can_access_heap_objects) {}
+ handler_(nullptr) {}
#undef INITIALIZE_SINGLETON
StackFrameIterator::StackFrameIterator(Isolate* isolate)
: StackFrameIterator(isolate, isolate->thread_local_top()) {}
StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
- : StackFrameIteratorBase(isolate, true) {
+ : StackFrameIteratorBase(isolate) {
Reset(t);
}
#if V8_ENABLE_WEBASSEMBLY
StackFrameIterator::StackFrameIterator(Isolate* isolate,
wasm::StackMemory* stack)
- : StackFrameIteratorBase(isolate, true) {
+ : StackFrameIteratorBase(isolate) {
Reset(isolate->thread_local_top(), stack);
}
#endif
@@ -151,7 +149,7 @@ void StackFrameIterator::Advance() {
}
StackFrame* StackFrameIterator::Reframe() {
- StackFrame::Type type = frame_->ComputeType(this, &frame_->state_);
+ StackFrame::Type type = ComputeStackFrameType(&frame_->state_);
frame_ = SingletonFor(type, &frame_->state_);
return frame();
}
@@ -205,12 +203,12 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
void TypedFrameWithJSLinkage::Iterate(RootVisitor* v) const {
IterateExpressions(v);
- IteratePc(v, pc_address(), constant_pool_address(), LookupCodeT());
+ IteratePc(v, pc_address(), constant_pool_address(), GcSafeLookupCode());
}
// -------------------------------------------------------------------------
-void JavaScriptFrameIterator::Advance() {
+void JavaScriptStackFrameIterator::Advance() {
do {
iterator_.Advance();
} while (!iterator_.done() && !iterator_.frame()->is_java_script());
@@ -218,24 +216,24 @@ void JavaScriptFrameIterator::Advance() {
// -------------------------------------------------------------------------
-StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
+DebuggableStackFrameIterator::DebuggableStackFrameIterator(Isolate* isolate)
: iterator_(isolate) {
if (!done() && !IsValidFrame(iterator_.frame())) Advance();
}
-StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate,
- StackFrameId id)
- : StackTraceFrameIterator(isolate) {
+DebuggableStackFrameIterator::DebuggableStackFrameIterator(Isolate* isolate,
+ StackFrameId id)
+ : DebuggableStackFrameIterator(isolate) {
while (!done() && frame()->id() != id) Advance();
}
-void StackTraceFrameIterator::Advance() {
+void DebuggableStackFrameIterator::Advance() {
do {
iterator_.Advance();
} while (!done() && !IsValidFrame(iterator_.frame()));
}
-int StackTraceFrameIterator::FrameFunctionCount() const {
+int DebuggableStackFrameIterator::FrameFunctionCount() const {
DCHECK(!done());
if (!iterator_.frame()->is_optimized()) return 1;
std::vector<SharedFunctionInfo> infos;
@@ -243,15 +241,16 @@ int StackTraceFrameIterator::FrameFunctionCount() const {
return static_cast<int>(infos.size());
}
-FrameSummary StackTraceFrameIterator::GetTopValidFrame() const {
+FrameSummary DebuggableStackFrameIterator::GetTopValidFrame() const {
DCHECK(!done());
// Like FrameSummary::GetTop, but additionally observes
- // StackTraceFrameIterator filtering semantics.
+ // DebuggableStackFrameIterator filtering semantics.
std::vector<FrameSummary> frames;
frame()->Summarize(&frames);
if (is_javascript()) {
for (int i = static_cast<int>(frames.size()) - 1; i >= 0; i--) {
- if (!IsValidJSFunction(*frames[i].AsJavaScript().function())) continue;
+ JSFunction function = *frames[i].AsJavaScript().function();
+ if (!function.shared().IsSubjectToDebugging()) continue;
return frames[i];
}
UNREACHABLE();
@@ -263,9 +262,10 @@ FrameSummary StackTraceFrameIterator::GetTopValidFrame() const {
}
// static
-bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) {
+bool DebuggableStackFrameIterator::IsValidFrame(StackFrame* frame) {
if (frame->is_java_script()) {
- return IsValidJSFunction(static_cast<JavaScriptFrame*>(frame)->function());
+ JSFunction function = static_cast<JavaScriptFrame*>(frame)->function();
+ return function.shared().IsSubjectToDebugging();
}
#if V8_ENABLE_WEBASSEMBLY
if (frame->is_wasm()) return true;
@@ -273,12 +273,6 @@ bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) {
return false;
}
-// static
-bool StackTraceFrameIterator::IsValidJSFunction(JSFunction f) {
- if (!f.IsJSFunction()) return false;
- return f.shared().IsSubjectToDebugging();
-}
-
// -------------------------------------------------------------------------
namespace {
@@ -308,10 +302,9 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
} else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) {
return false;
}
- CodeLookupResult interpreter_entry_trampoline =
- isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
- return interpreter_entry_trampoline.code()
- .is_interpreter_trampoline_builtin();
+ Code interpreter_entry_trampoline =
+ isolate->heap()->FindCodeForInnerPointer(pc);
+ return interpreter_entry_trampoline.is_interpreter_trampoline_builtin();
} else {
return false;
}
@@ -319,29 +312,23 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
} // namespace
-bool SafeStackFrameIterator::IsNoFrameBytecodeHandlerPc(Isolate* isolate,
- Address pc,
- Address fp) const {
- // Return false for builds with non-embedded bytecode handlers.
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr) return false;
-
+bool StackFrameIteratorForProfiler::IsNoFrameBytecodeHandlerPc(
+ Isolate* isolate, Address pc, Address fp) const {
EmbeddedData d = EmbeddedData::FromBlob(isolate);
if (pc < d.InstructionStartOfBytecodeHandlers() ||
pc >= d.InstructionEndOfBytecodeHandlers()) {
- // Not a bytecode handler pc address.
return false;
}
- if (!IsValidStackAddress(fp +
- CommonFrameConstants::kContextOrFrameTypeOffset)) {
+ Address frame_type_address =
+ fp + CommonFrameConstants::kContextOrFrameTypeOffset;
+ if (!IsValidStackAddress(frame_type_address)) {
return false;
}
// Check if top stack frame is a bytecode handler stub frame.
- MSAN_MEMORY_IS_INITIALIZED(
- fp + CommonFrameConstants::kContextOrFrameTypeOffset, kSystemPointerSize);
- intptr_t marker =
- Memory<intptr_t>(fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+ MSAN_MEMORY_IS_INITIALIZED(frame_type_address, kSystemPointerSize);
+ intptr_t marker = Memory<intptr_t>(frame_type_address);
if (StackFrame::IsTypeMarker(marker) &&
StackFrame::MarkerToType(marker) == StackFrame::STUB) {
// Bytecode handler built a frame.
@@ -350,43 +337,63 @@ bool SafeStackFrameIterator::IsNoFrameBytecodeHandlerPc(Isolate* isolate,
return true;
}
-SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
- Address fp, Address sp,
- Address lr, Address js_entry_sp)
- : StackFrameIteratorBase(isolate, false),
+StackFrameIteratorForProfiler::StackFrameIteratorForProfiler(
+ Isolate* isolate, Address pc, Address fp, Address sp, Address lr,
+ Address js_entry_sp)
+ : StackFrameIteratorBase(isolate),
low_bound_(sp),
high_bound_(js_entry_sp),
top_frame_type_(StackFrame::NO_FRAME_TYPE),
- top_context_address_(kNullAddress),
external_callback_scope_(isolate->external_callback_scope()),
top_link_register_(lr) {
+ if (!isolate->isolate_data()->stack_is_iterable()) {
+ // The stack is not iterable in a short time interval during deoptimization.
+ // See also: ExternalReference::stack_is_iterable_address.
+ DCHECK(done());
+ return;
+ }
+
+ // For Advance below, we need frame_ to be set; and that only happens if the
+ // type is not NO_FRAME_TYPE.
+ // TODO(jgruber): Clean this up.
+ static constexpr StackFrame::Type kTypeForAdvance = StackFrame::TURBOFAN;
+
StackFrame::State state;
StackFrame::Type type;
- ThreadLocalTop* top = isolate->thread_local_top();
+ ThreadLocalTop* const top = isolate->thread_local_top();
bool advance_frame = true;
-
- Address fast_c_fp = isolate->isolate_data()->fast_c_call_caller_fp();
- uint8_t stack_is_iterable = isolate->isolate_data()->stack_is_iterable();
- if (!stack_is_iterable) {
- frame_ = nullptr;
- return;
- }
- // 'Fast C calls' are a special type of C call where we call directly from
- // JS to C without an exit frame inbetween. The CEntryStub is responsible
- // for setting Isolate::c_entry_fp, meaning that it won't be set for fast C
- // calls. To keep the stack iterable, we store the FP and PC of the caller
- // of the fast C call on the isolate. This is guaranteed to be the topmost
- // JS frame, because fast C calls cannot call back into JS. We start
- // iterating the stack from this topmost JS frame.
- if (fast_c_fp) {
+ const Address fast_c_fp = isolate->isolate_data()->fast_c_call_caller_fp();
+ if (fast_c_fp != kNullAddress) {
+ // 'Fast C calls' are a special type of C call where we call directly from
+ // JS to C without an exit frame inbetween. The CEntryStub is responsible
+ // for setting Isolate::c_entry_fp, meaning that it won't be set for fast C
+ // calls. To keep the stack iterable, we store the FP and PC of the caller
+ // of the fast C call on the isolate. This is guaranteed to be the topmost
+ // JS frame, because fast C calls cannot call back into JS. We start
+ // iterating the stack from this topmost JS frame.
DCHECK_NE(kNullAddress, isolate->isolate_data()->fast_c_call_caller_pc());
- type = StackFrame::Type::TURBOFAN;
- top_frame_type_ = type;
state.fp = fast_c_fp;
state.sp = sp;
state.pc_address = reinterpret_cast<Address*>(
isolate->isolate_data()->fast_c_call_caller_pc_address());
- advance_frame = false;
+
+ // ComputeStackFrameType will read both kContextOffset and
+ // kFunctionOffset, we check only that kFunctionOffset is within the stack
+ // bounds and do a compile time check that kContextOffset slot is pushed on
+ // the stack before kFunctionOffset.
+ static_assert(StandardFrameConstants::kFunctionOffset <
+ StandardFrameConstants::kContextOffset);
+ if (IsValidStackAddress(state.fp +
+ StandardFrameConstants::kFunctionOffset)) {
+ type = ComputeStackFrameType(&state);
+ if (IsValidFrameType(type)) {
+ top_frame_type_ = type;
+ advance_frame = false;
+ }
+ } else {
+ // Cannot determine the actual type; the frame will be skipped below.
+ type = kTypeForAdvance;
+ }
} else if (IsValidTop(top)) {
type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
top_frame_type_ = type;
@@ -394,8 +401,9 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
DCHECK_NE(fp, kNullAddress);
state.fp = fp;
state.sp = sp;
- state.pc_address = StackFrame::ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(CommonFrame::ComputePCAddress(fp)));
+ state.pc_address =
+ StackFrame::ResolveReturnAddressLocation(reinterpret_cast<Address*>(
+ fp + StandardFrameConstants::kCallerPCOffset));
// If the current PC is in a bytecode handler, the top stack frame isn't
// the bytecode handler's frame and the top of stack or link register is a
@@ -404,62 +412,52 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
// properly and make sure we do not drop the frame.
bool is_no_frame_bytecode_handler = false;
if (IsNoFrameBytecodeHandlerPc(isolate, pc, fp)) {
- Address* tos_location = nullptr;
+ Address* top_location = nullptr;
if (top_link_register_) {
- tos_location = &top_link_register_;
+ top_location = &top_link_register_;
} else if (IsValidStackAddress(sp)) {
MSAN_MEMORY_IS_INITIALIZED(sp, kSystemPointerSize);
- tos_location = reinterpret_cast<Address*>(sp);
+ top_location = reinterpret_cast<Address*>(sp);
}
- if (IsInterpreterFramePc(isolate, *tos_location, &state)) {
- state.pc_address = tos_location;
+ if (IsInterpreterFramePc(isolate, *top_location, &state)) {
+ state.pc_address = top_location;
is_no_frame_bytecode_handler = true;
advance_frame = false;
}
}
- // StackFrame::ComputeType will read both kContextOffset and kMarkerOffset,
- // we check only that kMarkerOffset is within the stack bounds and do
- // compile time check that kContextOffset slot is pushed on the stack before
- // kMarkerOffset.
+ // ComputeStackFrameType will read both kContextOffset and
+ // kFunctionOffset, we check only that kFunctionOffset is within the stack
+ // bounds and do a compile time check that kContextOffset slot is pushed on
+ // the stack before kFunctionOffset.
static_assert(StandardFrameConstants::kFunctionOffset <
StandardFrameConstants::kContextOffset);
- Address frame_marker = fp + StandardFrameConstants::kFunctionOffset;
- if (IsValidStackAddress(frame_marker)) {
+ Address function_slot = fp + StandardFrameConstants::kFunctionOffset;
+ if (IsValidStackAddress(function_slot)) {
if (is_no_frame_bytecode_handler) {
type = StackFrame::INTERPRETED;
} else {
- type = StackFrame::ComputeType(this, &state);
+ type = ComputeStackFrameType(&state);
}
top_frame_type_ = type;
- MSAN_MEMORY_IS_INITIALIZED(
- fp + CommonFrameConstants::kContextOrFrameTypeOffset,
- kSystemPointerSize);
- Address type_or_context_address =
- Memory<Address>(fp + CommonFrameConstants::kContextOrFrameTypeOffset);
- if (!StackFrame::IsTypeMarker(type_or_context_address))
- top_context_address_ = type_or_context_address;
} else {
- // Mark the frame as TURBOFAN if we cannot determine its type.
- // We chose TURBOFAN rather than INTERPRETED because it's closer to
- // the original value of StackFrame::JAVA_SCRIPT here, in that JAVA_SCRIPT
- // referred to full-codegen frames (now removed from the tree), and
- // TURBOFAN refers to turbofan frames, both of which are generated
- // code. INTERPRETED frames refer to bytecode.
- // The frame anyways will be skipped.
- type = StackFrame::TURBOFAN;
- // Top frame is incomplete so we cannot reliably determine its type.
- top_frame_type_ = StackFrame::NO_FRAME_TYPE;
+ // Cannot determine the actual type; the frame will be skipped below.
+ type = kTypeForAdvance;
}
} else {
+ // Not iterable.
+ DCHECK(done());
return;
}
+
frame_ = SingletonFor(type, &state);
- if (advance_frame && frame_) Advance();
+ if (advance_frame && !done()) {
+ Advance();
+ }
}
-bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
+bool StackFrameIteratorForProfiler::IsValidTop(ThreadLocalTop* top) const {
Address c_entry_fp = Isolate::c_entry_fp(top);
if (!IsValidExitFrame(c_entry_fp)) return false;
// There should be at least one JS_ENTRY stack handler.
@@ -469,7 +467,7 @@ bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
return c_entry_fp < handler;
}
-void SafeStackFrameIterator::AdvanceOneFrame() {
+void StackFrameIteratorForProfiler::AdvanceOneFrame() {
DCHECK(!done());
StackFrame* last_frame = frame_;
Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
@@ -492,26 +490,26 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
}
}
-bool SafeStackFrameIterator::IsValidFrame(StackFrame* frame) const {
+bool StackFrameIteratorForProfiler::IsValidFrame(StackFrame* frame) const {
return IsValidStackAddress(frame->sp()) && IsValidStackAddress(frame->fp());
}
-bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
+bool StackFrameIteratorForProfiler::IsValidCaller(StackFrame* frame) {
StackFrame::State state;
if (frame->is_entry() || frame->is_construct_entry()) {
// See EntryFrame::GetCallerState. It computes the caller FP address
// and calls ExitFrame::GetStateForFramePointer on it. We need to be
// sure that caller FP address is valid.
- Address caller_fp =
- Memory<Address>(frame->fp() + EntryFrameConstants::kCallerFPOffset);
- if (!IsValidExitFrame(caller_fp)) return false;
+ Address next_exit_frame_fp = Memory<Address>(
+ frame->fp() + EntryFrameConstants::kNextExitFrameFPOffset);
+ if (!IsValidExitFrame(next_exit_frame_fp)) return false;
}
frame->ComputeCallerState(&state);
return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
SingletonFor(frame->GetCallerState(&state)) != nullptr;
}
-bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
+bool StackFrameIteratorForProfiler::IsValidExitFrame(Address fp) const {
if (!IsValidStackAddress(fp)) return false;
Address sp = ExitFrame::ComputeStackPointer(fp);
if (!IsValidStackAddress(sp)) return false;
@@ -521,7 +519,7 @@ bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
return *state.pc_address != kNullAddress;
}
-void SafeStackFrameIterator::Advance() {
+void StackFrameIteratorForProfiler::Advance() {
while (true) {
AdvanceOneFrame();
if (done()) break;
@@ -561,54 +559,59 @@ void SafeStackFrameIterator::Advance() {
// -------------------------------------------------------------------------
namespace {
-CodeLookupResult GetContainingCode(Isolate* isolate, Address pc) {
+
+base::Optional<GcSafeCode> GetContainingCode(Isolate* isolate, Address pc) {
return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
}
+
} // namespace
-CodeLookupResult StackFrame::LookupCodeT() const {
- CodeLookupResult result = GetContainingCode(isolate(), pc());
- if (DEBUG_BOOL) {
- CHECK(result.IsFound());
- if (result.IsCode()) {
- Code code = result.code();
- CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
- CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
- } else {
-#ifdef V8_EXTERNAL_CODE_SPACE
- CodeDataContainer code = result.code_data_container();
- CHECK_GE(pc(), code.InstructionStart(isolate(), pc()));
- CHECK_LT(pc(), code.InstructionEnd(isolate(), pc()));
-#endif
- }
- }
- return result;
+GcSafeCode StackFrame::GcSafeLookupCode() const {
+ base::Optional<GcSafeCode> result = GetContainingCode(isolate(), pc());
+ DCHECK_GE(pc(), result->InstructionStart(isolate(), pc()));
+ DCHECK_LT(pc(), result->InstructionEnd(isolate(), pc()));
+ return result.value();
+}
+
+Code StackFrame::LookupCode() const {
+ DCHECK_NE(isolate()->heap()->gc_state(), Heap::MARK_COMPACT);
+ return GcSafeLookupCode().UnsafeCastToCode();
}
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address,
- CodeLookupResult lookup_result) const {
- if (lookup_result.IsCodeDataContainer()) {
- // The embeded builtins are immovable, so there's no need to update PCs on
- // the stack, just visit the CodeT object.
- Object code = lookup_result.code_data_container();
- v->VisitRunningCode(FullObjectSlot(&code));
+ GcSafeCode holder) const {
+ const Address old_pc = ReadPC(pc_address);
+ DCHECK_GE(old_pc, holder.InstructionStart(isolate(), old_pc));
+ DCHECK_LT(old_pc, holder.InstructionEnd(isolate(), old_pc));
+
+ // Keep the old pc offset before visiting the code since we need it to
+ // calculate the new pc after a potential InstructionStream move.
+ const uintptr_t pc_offset_from_start = old_pc - holder.InstructionStart();
+
+ // Visit.
+ GcSafeCode visited_holder = holder;
+ PtrComprCageBase code_cage_base{isolate()->code_cage_base()};
+ const Object old_istream = holder.raw_instruction_stream(code_cage_base);
+ Object visited_istream = old_istream;
+ v->VisitRunningCode(FullObjectSlot{&visited_holder},
+ FullObjectSlot{&visited_istream});
+ if (visited_istream == old_istream) {
+ // Note this covers two important cases:
+ // 1. the associated InstructionStream object did not move, and
+ // 2. `holder` is an embedded builtin and has no InstructionStream.
return;
}
- Code holder = lookup_result.code();
- Address old_pc = ReadPC(pc_address);
- DCHECK(ReadOnlyHeap::Contains(holder) ||
- holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
- unsigned pc_offset = holder.GetOffsetFromInstructionStart(isolate_, old_pc);
- Object code = holder;
- v->VisitRunningCode(FullObjectSlot(&code));
- if (code == holder) return;
- holder = Code::unchecked_cast(code);
- Address pc = holder.InstructionStart(isolate_, old_pc) + pc_offset;
+
+ DCHECK(visited_holder.has_instruction_stream());
+
+ InstructionStream istream =
+ InstructionStream::unchecked_cast(visited_istream);
+ const Address new_pc = istream.instruction_start() + pc_offset_from_start;
// TODO(v8:10026): avoid replacing a signed pointer.
- PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize);
- if (V8_EMBEDDED_CONSTANT_POOL_BOOL && constant_pool_address) {
- *constant_pool_address = holder.constant_pool();
+ PointerAuthentication::ReplacePC(pc_address, new_pc, kSystemPointerSize);
+ if (V8_EMBEDDED_CONSTANT_POOL_BOOL && constant_pool_address != nullptr) {
+ *constant_pool_address = visited_holder.constant_pool();
}
}
@@ -620,180 +623,211 @@ void StackFrame::SetReturnAddressLocationResolver(
namespace {
-template <typename CodeOrCodeT>
-inline StackFrame::Type ComputeBuiltinFrameType(CodeOrCodeT code) {
+StackFrame::Type ComputeBuiltinFrameType(GcSafeCode code) {
if (code.is_interpreter_trampoline_builtin() ||
- // Frames for baseline entry trampolines on the stack are still
- // interpreted frames.
code.is_baseline_trampoline_builtin()) {
+ // Frames for baseline entry trampolines on the stack are still interpreted
+ // frames.
return StackFrame::INTERPRETED;
- }
- if (code.is_baseline_leave_frame_builtin()) {
+ } else if (code.is_baseline_leave_frame_builtin()) {
return StackFrame::BASELINE;
- }
- if (code.is_turbofanned()) {
+ } else if (code.is_turbofanned()) {
// TODO(bmeurer): We treat frames for BUILTIN Code objects as
- // OptimizedFrame for now (all the builtins with JavaScript
- // linkage are actually generated with TurboFan currently, so
- // this is sound).
+ // OptimizedFrame for now (all the builtins with JavaScript linkage are
+ // actually generated with TurboFan currently, so this is sound).
return StackFrame::TURBOFAN;
}
return StackFrame::BUILTIN;
}
+StackFrame::Type SafeStackFrameType(StackFrame::Type candidate) {
+ DCHECK_LE(static_cast<uintptr_t>(candidate), StackFrame::NUMBER_OF_TYPES);
+ switch (candidate) {
+ case StackFrame::BUILTIN_CONTINUATION:
+ case StackFrame::BUILTIN_EXIT:
+ case StackFrame::CONSTRUCT:
+ case StackFrame::CONSTRUCT_ENTRY:
+ case StackFrame::ENTRY:
+ case StackFrame::EXIT:
+ case StackFrame::INTERNAL:
+ case StackFrame::IRREGEXP:
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
+ case StackFrame::STUB:
+ return candidate;
+
+#if V8_ENABLE_WEBASSEMBLY
+ case StackFrame::JS_TO_WASM:
+ case StackFrame::STACK_SWITCH:
+ case StackFrame::WASM:
+ case StackFrame::WASM_DEBUG_BREAK:
+ case StackFrame::WASM_EXIT:
+ case StackFrame::WASM_LIFTOFF_SETUP:
+ case StackFrame::WASM_TO_JS:
+ return candidate;
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ // Any other marker value is likely to be a bogus stack frame when being
+ // called from the profiler (in particular, JavaScript frames, including
+ // interpreted frames, should never have a StackFrame::Type marker).
+ // Consider these frames "native".
+ // TODO(jgruber): For the StackFrameIterator, I'm not sure this fallback
+ // makes sense. Shouldn't we know how to handle all frames we encounter
+ // there?
+ case StackFrame::BASELINE:
+ case StackFrame::BUILTIN:
+ case StackFrame::INTERPRETED:
+ case StackFrame::MAGLEV:
+ case StackFrame::MANUAL:
+ case StackFrame::NATIVE:
+ case StackFrame::NO_FRAME_TYPE:
+ case StackFrame::NUMBER_OF_TYPES:
+ case StackFrame::TURBOFAN:
+ case StackFrame::TURBOFAN_STUB_WITH_CONTEXT:
+#if V8_ENABLE_WEBASSEMBLY
+ case StackFrame::C_WASM_ENTRY:
+ case StackFrame::WASM_TO_JS_FUNCTION:
+#endif // V8_ENABLE_WEBASSEMBLY
+ return StackFrame::NATIVE;
+ }
+ UNREACHABLE();
+}
+
} // namespace
-StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
- State* state) {
+StackFrame::Type StackFrameIterator::ComputeStackFrameType(
+ StackFrame::State* state) const {
#if V8_ENABLE_WEBASSEMBLY
if (state->fp == kNullAddress) {
DCHECK(v8_flags.experimental_wasm_stack_switching);
- return NO_FRAME_TYPE;
+ return StackFrame::NO_FRAME_TYPE;
}
#endif
+ const Address pc = StackFrame::ReadPC(state->pc_address);
+
+#if V8_ENABLE_WEBASSEMBLY
+ // If the {pc} does not point into WebAssembly code we can rely on the
+ // returned {wasm_code} to be null and fall back to {GetContainingCode}.
+ wasm::WasmCodeRefScope code_ref_scope;
+ if (wasm::WasmCode* wasm_code = wasm::GetWasmCodeManager()->LookupCode(pc)) {
+ switch (wasm_code->kind()) {
+ case wasm::WasmCode::kWasmFunction:
+ return StackFrame::WASM;
+ case wasm::WasmCode::kWasmToCapiWrapper:
+ return StackFrame::WASM_EXIT;
+ case wasm::WasmCode::kWasmToJsWrapper:
+ return StackFrame::WASM_TO_JS;
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ // Look up the code object to figure out the type of the stack frame.
+ base::Optional<GcSafeCode> lookup_result = GetContainingCode(isolate(), pc);
+ if (!lookup_result.has_value()) return StackFrame::NATIVE;
+
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
kSystemPointerSize);
- intptr_t marker = Memory<intptr_t>(
+ const intptr_t marker = Memory<intptr_t>(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
- Address pc = StackFrame::ReadPC(state->pc_address);
- if (!iterator->can_access_heap_objects_) {
- // TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
- // means that we are being called from the profiler, which can interrupt
- // the VM with a signal at any arbitrary instruction, with essentially
- // anything on the stack. So basically none of these checks are 100%
- // reliable.
- MSAN_MEMORY_IS_INITIALIZED(
- state->fp + StandardFrameConstants::kFunctionOffset,
- kSystemPointerSize);
- Object maybe_function = Object(
- Memory<Address>(state->fp + StandardFrameConstants::kFunctionOffset));
- if (!StackFrame::IsTypeMarker(marker)) {
- if (maybe_function.IsSmi()) {
- return NATIVE;
- } else if (IsInterpreterFramePc(iterator->isolate(), pc, state)) {
- return INTERPRETED;
- } else {
- return TURBOFAN;
- }
+ switch (lookup_result->kind()) {
+ case CodeKind::BUILTIN: {
+ if (StackFrame::IsTypeMarker(marker)) break;
+ return ComputeBuiltinFrameType(lookup_result.value());
}
- } else {
+ case CodeKind::BASELINE:
+ return StackFrame::BASELINE;
+ case CodeKind::MAGLEV:
+ if (StackFrame::IsTypeMarker(marker)) {
+ // An INTERNAL frame can be set up with an associated Maglev code
+ // object when calling into runtime to handle tiering. In this case,
+ // all stack slots are tagged pointers and should be visited through
+ // the usual logic.
+ DCHECK_EQ(StackFrame::MarkerToType(marker), StackFrame::INTERNAL);
+ return StackFrame::INTERNAL;
+ }
+ return StackFrame::MAGLEV;
+ case CodeKind::TURBOFAN:
+ return StackFrame::TURBOFAN;
#if V8_ENABLE_WEBASSEMBLY
- // If the {pc} does not point into WebAssembly code we can rely on the
- // returned {wasm_code} to be null and fall back to {GetContainingCode}.
- wasm::WasmCodeRefScope code_ref_scope;
- if (wasm::WasmCode* wasm_code =
- wasm::GetWasmCodeManager()->LookupCode(pc)) {
- switch (wasm_code->kind()) {
- case wasm::WasmCode::kWasmFunction:
- return WASM;
- case wasm::WasmCode::kWasmToCapiWrapper:
- return WASM_EXIT;
- case wasm::WasmCode::kWasmToJsWrapper:
- return WASM_TO_JS;
- default:
- UNREACHABLE();
+ case CodeKind::JS_TO_WASM_FUNCTION:
+ if (lookup_result->builtin_id() == Builtin::kGenericJSToWasmWrapper) {
+ return StackFrame::JS_TO_WASM;
}
- }
+ return StackFrame::TURBOFAN_STUB_WITH_CONTEXT;
+ case CodeKind::JS_TO_JS_FUNCTION:
+ return StackFrame::TURBOFAN_STUB_WITH_CONTEXT;
+ case CodeKind::C_WASM_ENTRY:
+ return StackFrame::C_WASM_ENTRY;
+ case CodeKind::WASM_TO_JS_FUNCTION:
+ return StackFrame::WASM_TO_JS_FUNCTION;
+ case CodeKind::WASM_FUNCTION:
+ case CodeKind::WASM_TO_CAPI_FUNCTION:
+ // These never appear as on-heap Code objects.
+ UNREACHABLE();
+#else
+ case CodeKind::C_WASM_ENTRY:
+ case CodeKind::JS_TO_JS_FUNCTION:
+ case CodeKind::JS_TO_WASM_FUNCTION:
+ case CodeKind::WASM_FUNCTION:
+ case CodeKind::WASM_TO_CAPI_FUNCTION:
+ case CodeKind::WASM_TO_JS_FUNCTION:
+ UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
+ case CodeKind::BYTECODE_HANDLER:
+ case CodeKind::FOR_TESTING:
+ case CodeKind::REGEXP:
+ case CodeKind::INTERPRETED_FUNCTION:
+ // Fall back to the marker.
+ break;
+ }
- // Look up the code object to figure out the type of the stack frame.
- CodeLookupResult lookup_result = GetContainingCode(iterator->isolate(), pc);
- if (lookup_result.IsFound()) {
- switch (lookup_result.kind()) {
- case CodeKind::BUILTIN: {
- if (StackFrame::IsTypeMarker(marker)) break;
- // We can't use lookup_result.ToCodeT() because we might in the
- // middle of GC.
- if (lookup_result.IsCodeDataContainer()) {
- return ComputeBuiltinFrameType(
- CodeT::cast(lookup_result.code_data_container()));
- }
- return ComputeBuiltinFrameType(lookup_result.code());
- }
- case CodeKind::BASELINE:
- return BASELINE;
- case CodeKind::MAGLEV:
- if (IsTypeMarker(marker)) {
- // An INTERNAL frame can be set up with an associated Maglev code
- // object when calling into runtime to handle tiering. In this case,
- // all stack slots are tagged pointers and should be visited through
- // the usual logic.
- DCHECK_EQ(MarkerToType(marker), StackFrame::INTERNAL);
- return StackFrame::INTERNAL;
- }
- return MAGLEV;
- case CodeKind::TURBOFAN:
- return TURBOFAN;
+ return SafeStackFrameType(StackFrame::MarkerToType(marker));
+}
+
+StackFrame::Type StackFrameIteratorForProfiler::ComputeStackFrameType(
+ StackFrame::State* state) const {
#if V8_ENABLE_WEBASSEMBLY
- case CodeKind::JS_TO_WASM_FUNCTION:
- if (lookup_result.builtin_id() == Builtin::kGenericJSToWasmWrapper) {
- return JS_TO_WASM;
- } else {
- return TURBOFAN_STUB_WITH_CONTEXT;
- }
- case CodeKind::JS_TO_JS_FUNCTION:
- return TURBOFAN_STUB_WITH_CONTEXT;
- case CodeKind::C_WASM_ENTRY:
- return C_WASM_ENTRY;
- case CodeKind::WASM_TO_JS_FUNCTION:
- return WASM_TO_JS_FUNCTION;
- case CodeKind::WASM_FUNCTION:
- case CodeKind::WASM_TO_CAPI_FUNCTION:
- // Never appear as on-heap {Code} objects.
- UNREACHABLE();
-#endif // V8_ENABLE_WEBASSEMBLY
- default:
- // All other types should have an explicit marker
- break;
- }
- } else {
- return NATIVE;
- }
+ if (state->fp == kNullAddress) {
+ DCHECK(v8_flags.experimental_wasm_stack_switching);
+ return StackFrame::NO_FRAME_TYPE;
}
- DCHECK(StackFrame::IsTypeMarker(marker));
- StackFrame::Type candidate = StackFrame::MarkerToType(marker);
- switch (candidate) {
- case ENTRY:
- case CONSTRUCT_ENTRY:
- case EXIT:
- case BUILTIN_CONTINUATION:
- case JAVA_SCRIPT_BUILTIN_CONTINUATION:
- case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
- case BUILTIN_EXIT:
- case STUB:
- case INTERNAL:
- case CONSTRUCT:
-#if V8_ENABLE_WEBASSEMBLY
- case WASM_TO_JS:
- case WASM:
- case WASM_LIFTOFF_SETUP:
- case WASM_EXIT:
- case WASM_DEBUG_BREAK:
- case JS_TO_WASM:
- case STACK_SWITCH:
-#endif // V8_ENABLE_WEBASSEMBLY
- return candidate;
+#endif
- // Any other marker value is likely to be a bogus stack frame when being
- // called from the profiler (in particular, JavaScript frames, including
- // interpreted frames, should never have a StackFrame::Type
- // marker). Consider these frames "native".
- default:
- return NATIVE;
+ MSAN_MEMORY_IS_INITIALIZED(
+ state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
+ kSystemPointerSize);
+ const intptr_t marker = Memory<intptr_t>(
+ state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+ if (StackFrame::IsTypeMarker(marker)) {
+ if (static_cast<uintptr_t>(marker) > StackFrame::NUMBER_OF_TYPES) {
+ // We've read some bogus value from the stack.
+ return StackFrame::NATIVE;
+ }
+ return SafeStackFrameType(StackFrame::MarkerToType(marker));
}
-}
-#ifdef DEBUG
-bool StackFrame::can_access_heap_objects() const {
- return iterator_->can_access_heap_objects_;
+ // We use unauthenticated_pc because it may come from
+ // fast_c_call_caller_pc_address, for which authentication does not work.
+ const Address pc = StackFrame::unauthenticated_pc(state->pc_address);
+ MSAN_MEMORY_IS_INITIALIZED(
+ state->fp + StandardFrameConstants::kFunctionOffset, kSystemPointerSize);
+ Object maybe_function = Object(
+ Memory<Address>(state->fp + StandardFrameConstants::kFunctionOffset));
+ if (maybe_function.IsSmi()) {
+ return StackFrame::NATIVE;
+ } else if (IsInterpreterFramePc(isolate(), pc, state)) {
+ return StackFrame::INTERPRETED;
+ }
+ return StackFrame::TURBOFAN;
}
-#endif
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
- return ComputeType(iterator_, state);
+ return iterator_->ComputeStackFrameType(state);
}
Address CommonFrame::GetCallerStackPointer() const {
@@ -818,9 +852,9 @@ void EntryFrame::ComputeCallerState(State* state) const {
}
StackFrame::Type EntryFrame::GetCallerState(State* state) const {
- const int offset = EntryFrameConstants::kCallerFPOffset;
- Address fp = Memory<Address>(this->fp() + offset);
- return ExitFrame::GetStateForFramePointer(fp, state);
+ Address next_exit_frame_fp =
+ Memory<Address>(fp() + EntryFrameConstants::kNextExitFrameFPOffset);
+ return ExitFrame::GetStateForFramePointer(next_exit_frame_fp, state);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -851,7 +885,7 @@ void ExitFrame::ComputeCallerState(State* state) const {
void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
- IteratePc(v, pc_address(), constant_pool_address(), LookupCodeT());
+ IteratePc(v, pc_address(), constant_pool_address(), GcSafeLookupCode());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
@@ -927,10 +961,10 @@ void BuiltinExitFrame::Summarize(std::vector<FrameSummary>* frames) const {
DCHECK(frames->empty());
Handle<FixedArray> parameters = GetParameters();
DisallowGarbageCollection no_gc;
- CodeLookupResult code = LookupCodeT();
+ Code code = LookupCode();
int code_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
FrameSummary::JavaScriptFrameSummary summary(
- isolate(), receiver(), function(), code.ToAbstractCode(), code_offset,
+ isolate(), receiver(), function(), AbstractCode::cast(code), code_offset,
IsConstructor(), *parameters);
frames->push_back(summary);
}
@@ -1039,9 +1073,9 @@ Object CommonFrame::context() const {
}
int CommonFrame::position() const {
- CodeLookupResult code = LookupCodeT();
+ Code code = LookupCode();
int code_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
- return code.ToAbstractCode().SourcePosition(isolate(), code_offset);
+ return AbstractCode::cast(code).SourcePosition(isolate(), code_offset);
}
int CommonFrame::ComputeExpressionsCount() const {
@@ -1063,12 +1097,12 @@ void CommonFrame::ComputeCallerState(State* state) const {
}
#endif
state->sp = caller_sp();
- state->pc_address = ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(ComputePCAddress(fp())));
+ state->pc_address = ResolveReturnAddressLocation(reinterpret_cast<Address*>(
+ fp() + StandardFrameConstants::kCallerPCOffset));
state->callee_fp = fp();
state->callee_pc_address = pc_address();
- state->constant_pool_address =
- reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
+ state->constant_pool_address = reinterpret_cast<Address*>(
+ fp() + StandardFrameConstants::kConstantPoolOffset);
}
void CommonFrame::Summarize(std::vector<FrameSummary>* functions) const {
@@ -1092,10 +1126,10 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
// FullMaybeObjectSlots here.
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// When external code space is enabled the spill slot could contain both
- // Code and non-Code references, which have different cage bases. So
- // unconditional decompression of the value might corrupt Code pointers.
- // However, given that
- // 1) the Code pointers are never compressed by design (because
+ // InstructionStream and non-InstructionStream references, which have
+ // different cage bases. So unconditional decompression of the value might
+ // corrupt InstructionStream pointers. However, given that 1) the
+ // InstructionStream pointers are never compressed by design (because
// otherwise we wouldn't know which cage base to apply for
// decompression, see respective DCHECKs in
// RelocInfo::target_object()),
@@ -1104,23 +1138,23 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
// we can avoid updating upper part of the spill slot if it already
// contains full value.
// TODO(v8:11880): Remove this special handling by enforcing builtins
- // to use CodeTs instead of Code objects.
+ // to use CodeTs instead of InstructionStream objects.
Address value = *spill_slot.location();
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
// We don't need to update smi values or full pointers.
was_compressed = true;
- *spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer(
+ *spill_slot.location() = V8HeapCompressionScheme::DecompressTagged(
cage_base, static_cast<Tagged_t>(value));
if (DEBUG_BOOL) {
// Ensure that the spill slot contains correct heap object.
HeapObject raw = HeapObject::cast(Object(*spill_slot.location()));
MapWord map_word = raw.map_word(cage_base, kRelaxedLoad);
HeapObject forwarded = map_word.IsForwardingAddress()
- ? map_word.ToForwardingAddress()
+ ? map_word.ToForwardingAddress(raw)
: raw;
bool is_self_forwarded =
- forwarded.map_word(cage_base, kRelaxedLoad).ptr() ==
- forwarded.address();
+ forwarded.map_word(cage_base, kRelaxedLoad) ==
+ MapWord::FromForwardingAddress(forwarded, forwarded);
if (is_self_forwarded) {
// The object might be in a self-forwarding state if it's located
// in new large object space. GC will fix this at a later stage.
@@ -1132,7 +1166,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
MapWord fwd_map_map_word =
forwarded_map.map_word(cage_base, kRelaxedLoad);
if (fwd_map_map_word.IsForwardingAddress()) {
- forwarded_map = fwd_map_map_word.ToForwardingAddress();
+ forwarded_map = fwd_map_map_word.ToForwardingAddress(forwarded_map);
}
CHECK(forwarded_map.IsMap(cage_base));
}
@@ -1144,7 +1178,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
if (!HAS_SMI_TAG(compressed_value)) {
was_compressed = slot_contents <= 0xFFFFFFFF;
// We don't need to update smi values.
- *spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer(
+ *spill_slot.location() = V8HeapCompressionScheme::DecompressTagged(
cage_base, compressed_value);
}
}
@@ -1155,7 +1189,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v,
// Restore compression. Generated code should be able to trust that
// compressed spill slots remain compressed.
*spill_slot.location() =
- V8HeapCompressionScheme::CompressTagged(*spill_slot.location());
+ V8HeapCompressionScheme::CompressObject(*spill_slot.location());
}
#endif
}
@@ -1180,11 +1214,12 @@ SafepointEntry GetSafepointEntryFromCodeCache(
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry) {
if (!entry->safepoint_entry.is_initialized()) {
entry->safepoint_entry =
- entry->code.GetSafepointEntry(isolate, inner_pointer);
+ SafepointTable::FindEntry(isolate, entry->code.value(), inner_pointer);
DCHECK(entry->safepoint_entry.is_initialized());
} else {
- DCHECK_EQ(entry->safepoint_entry,
- entry->code.GetSafepointEntry(isolate, inner_pointer));
+ DCHECK_EQ(
+ entry->safepoint_entry,
+ SafepointTable::FindEntry(isolate, entry->code.value(), inner_pointer));
}
return entry->safepoint_entry;
}
@@ -1193,12 +1228,13 @@ MaglevSafepointEntry GetMaglevSafepointEntryFromCodeCache(
Isolate* isolate, Address inner_pointer,
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry) {
if (!entry->maglev_safepoint_entry.is_initialized()) {
- entry->maglev_safepoint_entry =
- entry->code.GetMaglevSafepointEntry(isolate, inner_pointer);
+ entry->maglev_safepoint_entry = MaglevSafepointTable::FindEntry(
+ isolate, entry->code.value(), inner_pointer);
DCHECK(entry->maglev_safepoint_entry.is_initialized());
} else {
DCHECK_EQ(entry->maglev_safepoint_entry,
- entry->code.GetMaglevSafepointEntry(isolate, inner_pointer));
+ MaglevSafepointTable::FindEntry(isolate, entry->code.value(),
+ inner_pointer));
}
return entry->maglev_safepoint_entry;
}
@@ -1207,9 +1243,7 @@ MaglevSafepointEntry GetMaglevSafepointEntryFromCodeCache(
#ifdef V8_ENABLE_WEBASSEMBLY
void WasmFrame::Iterate(RootVisitor* v) const {
- // Make sure that we're not doing "safe" stack frame iteration. We cannot
- // possibly find pointers in optimized frames in that state.
- DCHECK(can_access_heap_objects());
+ DCHECK(!iterator_->IsStackFrameIteratorForProfiler());
// === WasmFrame ===
// +-----------------+-----------------------------------------
@@ -1261,7 +1295,7 @@ void WasmFrame::Iterate(RootVisitor* v) const {
"WasmExitFrame has one slot more than WasmFrame");
int frame_header_size = WasmFrameConstants::kFixedFrameSizeFromFp;
- if (wasm_code->is_liftoff() && v8_flags.wasm_speculative_inlining) {
+ if (wasm_code->is_liftoff() && wasm_code->frame_has_feedback_slot()) {
// Frame has Wasm feedback slot.
frame_header_size += kSystemPointerSize;
}
@@ -1314,9 +1348,7 @@ void WasmFrame::Iterate(RootVisitor* v) const {
#endif // V8_ENABLE_WEBASSEMBLY
void TypedFrame::Iterate(RootVisitor* v) const {
- // Make sure that we're not doing "safe" stack frame iteration. We cannot
- // possibly find pointers in optimized frames in that state.
- DCHECK(can_access_heap_objects());
+ DCHECK(!iterator_->IsStackFrameIteratorForProfiler());
// === TypedFrame ===
// +-----------------+-----------------------------------------
@@ -1341,8 +1373,9 @@ void TypedFrame::Iterate(RootVisitor* v) const {
Address inner_pointer = pc();
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
- CHECK(entry->code.IsFound());
- DCHECK(entry->code.is_turbofanned());
+ CHECK(entry->code.has_value());
+ GcSafeCode code = entry->code.value();
+ DCHECK(code.is_turbofanned());
SafepointEntry safepoint_entry =
GetSafepointEntryFromCodeCache(isolate(), inner_pointer, entry);
@@ -1355,7 +1388,7 @@ void TypedFrame::Iterate(RootVisitor* v) const {
// Determine the fixed header and spill slot area size.
int frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
int spill_slots_size =
- entry->code.stack_slots() * kSystemPointerSize -
+ code.stack_slots() * kSystemPointerSize -
(frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
// Fixed frame slots.
@@ -1368,13 +1401,13 @@ void TypedFrame::Iterate(RootVisitor* v) const {
spill_slots_size);
// Visit the rest of the parameters.
- if (HasTaggedOutgoingParams(entry->code)) {
+ if (HasTaggedOutgoingParams(code)) {
v->VisitRootPointers(Root::kStackRoots, nullptr, parameters_base,
parameters_limit);
}
// Visit pointer spill slots and locals.
- DCHECK_GE((entry->code.stack_slots() + kBitsPerByte) / kBitsPerByte,
+ DCHECK_GE((code.stack_slots() + kBitsPerByte) / kBitsPerByte,
safepoint_entry.tagged_slots().size());
VisitSpillSlots(isolate(), v, parameters_limit,
safepoint_entry.tagged_slots());
@@ -1384,13 +1417,11 @@ void TypedFrame::Iterate(RootVisitor* v) const {
frame_header_limit);
// Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), constant_pool_address(), entry->code);
+ IteratePc(v, pc_address(), constant_pool_address(), code);
}
void MaglevFrame::Iterate(RootVisitor* v) const {
- // Make sure that we're not doing "safe" stack frame iteration. We cannot
- // possibly find pointers in optimized frames in that state.
- DCHECK(can_access_heap_objects());
+ DCHECK(!iterator_->IsStackFrameIteratorForProfiler());
// === MaglevFrame ===
// +-----------------+-----------------------------------------
@@ -1431,8 +1462,9 @@ void MaglevFrame::Iterate(RootVisitor* v) const {
Address inner_pointer = pc();
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
- CHECK(entry->code.IsFound());
- DCHECK(entry->code.is_maglevved());
+ CHECK(entry->code.has_value());
+ GcSafeCode code = entry->code.value();
+ DCHECK(code.is_maglevved());
MaglevSafepointEntry maglev_safepoint_entry =
GetMaglevSafepointEntryFromCodeCache(isolate(), inner_pointer, entry);
@@ -1453,50 +1485,13 @@ void MaglevFrame::Iterate(RootVisitor* v) const {
uint32_t tagged_slot_count = maglev_safepoint_entry.num_tagged_slots();
uint32_t spill_slot_count =
tagged_slot_count + maglev_safepoint_entry.num_untagged_slots();
- DCHECK_EQ(entry->code.stack_slots(),
+ DCHECK_EQ(code.stack_slots(),
StandardFrameConstants::kFixedSlotCount +
maglev_safepoint_entry.num_tagged_slots() +
maglev_safepoint_entry.num_untagged_slots());
- // Check that our frame size is big enough for our spill slots and pushed
- // registers.
- intptr_t actual_frame_size = static_cast<intptr_t>(fp() - sp());
- intptr_t expected_frame_size_excl_outgoing_params =
- StandardFrameConstants::kFixedFrameSizeFromFp +
- (spill_slot_count + maglev_safepoint_entry.num_pushed_registers()) *
- kSystemPointerSize;
- if (actual_frame_size < expected_frame_size_excl_outgoing_params) {
- // If the frame size is smaller than the expected size, then we must be in
- // the stack guard in the prologue of the maglev function. This means that
- // we've set up the frame header, but not the spill slots yet.
-
- if (v8_flags.maglev_ool_prologue) {
- // DCHECK the frame setup under the above assumption. The
- // MaglevOutOfLinePrologue builtin creates an INTERNAL frame for the
- // StackGuardWithGap call (where extra slots and args are), so the MAGLEV
- // frame itself is exactly kFixedFrameSizeFromFp.
- DCHECK_EQ(actual_frame_size,
- StandardFrameConstants::kFixedFrameSizeFromFp);
- DCHECK_EQ(isolate()->c_function(),
- Runtime::FunctionForId(Runtime::kStackGuardWithGap)->entry);
- DCHECK_EQ(maglev_safepoint_entry.num_pushed_registers(), 0);
- } else {
- // DCHECK the frame setup under the above assumption. Include one extra
- // slot for the single argument into StackGuardWithGap, and another for
- // the saved new.target register.
- DCHECK_EQ(actual_frame_size,
- StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kSystemPointerSize);
- DCHECK_EQ(isolate()->c_function(),
- Runtime::FunctionForId(Runtime::kStackGuardWithGap)->entry);
- DCHECK_EQ(maglev_safepoint_entry.num_pushed_registers(), 0);
- }
- spill_slot_count = 0;
- tagged_slot_count = 0;
- }
-
// Visit the outgoing parameters if they are tagged.
- DCHECK(entry->code.has_tagged_outgoing_params());
+ DCHECK(code.has_tagged_outgoing_params());
FullObjectSlot parameters_base(&Memory<Address>(sp()));
FullObjectSlot parameters_limit =
frame_header_base - spill_slot_count -
@@ -1533,7 +1528,7 @@ void MaglevFrame::Iterate(RootVisitor* v) const {
frame_header_limit);
// Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), constant_pool_address(), entry->code);
+ IteratePc(v, pc_address(), constant_pool_address(), code);
}
BytecodeOffset MaglevFrame::GetBytecodeOffsetForOSR() const {
@@ -1547,7 +1542,7 @@ BytecodeOffset MaglevFrame::GetBytecodeOffsetForOSR() const {
return data.GetBytecodeOffset(deopt_index);
}
-bool CommonFrame::HasTaggedOutgoingParams(CodeLookupResult& code_lookup) const {
+bool CommonFrame::HasTaggedOutgoingParams(GcSafeCode code_lookup) const {
#if V8_ENABLE_WEBASSEMBLY
// With inlined JS-to-Wasm calls, we can be in an OptimizedFrame and
// directly call a Wasm function from JavaScript. In this case the
@@ -1561,20 +1556,14 @@ bool CommonFrame::HasTaggedOutgoingParams(CodeLookupResult& code_lookup) const {
}
HeapObject TurbofanStubWithContextFrame::unchecked_code() const {
- CodeLookupResult code_lookup = isolate()->FindCodeObject(pc());
- if (code_lookup.IsCodeDataContainer()) {
- return code_lookup.code_data_container();
- }
- if (code_lookup.IsCode()) {
- return code_lookup.code();
- }
- return {};
+ base::Optional<GcSafeCode> code_lookup =
+ isolate()->heap()->GcSafeTryFindCodeForInnerPointer(pc());
+ if (!code_lookup.has_value()) return {};
+ return code_lookup.value();
}
void CommonFrame::IterateTurbofanOptimizedFrame(RootVisitor* v) const {
- // Make sure that we're not doing "safe" stack frame iteration. We cannot
- // possibly find pointers in optimized frames in that state.
- DCHECK(can_access_heap_objects());
+ DCHECK(!iterator_->IsStackFrameIteratorForProfiler());
// === TurbofanFrame ===
// +-----------------+-----------------------------------------
@@ -1603,8 +1592,9 @@ void CommonFrame::IterateTurbofanOptimizedFrame(RootVisitor* v) const {
Address inner_pointer = pc();
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
- CHECK(entry->code.IsFound());
- DCHECK(entry->code.is_turbofanned());
+ CHECK(entry->code.has_value());
+ GcSafeCode code = entry->code.value();
+ DCHECK(code.is_turbofanned());
SafepointEntry safepoint_entry =
GetSafepointEntryFromCodeCache(isolate(), inner_pointer, entry);
@@ -1618,7 +1608,7 @@ void CommonFrame::IterateTurbofanOptimizedFrame(RootVisitor* v) const {
// Determine the fixed header and spill slot area size.
int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
int spill_slot_count =
- entry->code.stack_slots() - StandardFrameConstants::kFixedSlotCount;
+ code.stack_slots() - StandardFrameConstants::kFixedSlotCount;
// Fixed frame slots.
FullObjectSlot frame_header_base(&Memory<Address>(fp() - frame_header_size));
@@ -1629,14 +1619,14 @@ void CommonFrame::IterateTurbofanOptimizedFrame(RootVisitor* v) const {
FullObjectSlot parameters_limit = frame_header_base - spill_slot_count;
// Visit the outgoing parameters if they are tagged.
- if (HasTaggedOutgoingParams(entry->code)) {
+ if (HasTaggedOutgoingParams(code)) {
v->VisitRootPointers(Root::kStackRoots, nullptr, parameters_base,
parameters_limit);
}
// Spill slots are in the region ]frame_header_base, parameters_limit];
// Visit pointer spill slots and locals.
- DCHECK_GE((entry->code.stack_slots() + kBitsPerByte) / kBitsPerByte,
+ DCHECK_GE((code.stack_slots() + kBitsPerByte) / kBitsPerByte,
safepoint_entry.tagged_slots().size());
VisitSpillSlots(isolate(), v, parameters_limit,
safepoint_entry.tagged_slots());
@@ -1647,7 +1637,7 @@ void CommonFrame::IterateTurbofanOptimizedFrame(RootVisitor* v) const {
frame_header_limit);
// Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), constant_pool_address(), entry->code);
+ IteratePc(v, pc_address(), constant_pool_address(), code);
}
void TurbofanStubWithContextFrame::Iterate(RootVisitor* v) const {
@@ -1659,21 +1649,17 @@ void TurbofanFrame::Iterate(RootVisitor* v) const {
}
HeapObject StubFrame::unchecked_code() const {
- CodeLookupResult code_lookup = isolate()->FindCodeObject(pc());
- if (code_lookup.IsCodeDataContainer()) {
- return code_lookup.code_data_container();
- }
- if (code_lookup.IsCode()) {
- return code_lookup.code();
- }
- return {};
+ base::Optional<GcSafeCode> code_lookup =
+ isolate()->heap()->GcSafeTryFindCodeForInnerPointer(pc());
+ if (!code_lookup.has_value()) return {};
+ return code_lookup.value();
}
int StubFrame::LookupExceptionHandlerInTable() {
- CodeLookupResult code = LookupCodeT();
+ Code code = LookupCode();
DCHECK(code.is_turbofanned());
DCHECK_EQ(code.kind(), CodeKind::BUILTIN);
- HandlerTable table(code.codet());
+ HandlerTable table(code);
int pc_offset = code.GetOffsetFromInstructionStart(isolate(), pc());
return table.LookupReturn(pc_offset);
}
@@ -1686,19 +1672,12 @@ bool JavaScriptFrame::IsConstructor() const {
return IsConstructFrame(caller_fp());
}
-bool JavaScriptFrame::HasInlinedFrames() const {
- std::vector<SharedFunctionInfo> functions;
- GetFunctions(&functions);
- return functions.size() > 1;
-}
-
HeapObject CommonFrameWithJSLinkage::unchecked_code() const {
return function().code();
}
int TurbofanFrame::ComputeParametersCount() const {
- CodeLookupResult code = LookupCodeT();
- if (code.kind() == CodeKind::BUILTIN) {
+ if (GcSafeLookupCode().kind() == CodeKind::BUILTIN) {
return static_cast<int>(
Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
kJSArgcReceiverSlots;
@@ -1735,9 +1714,10 @@ bool CommonFrameWithJSLinkage::IsConstructor() const {
void CommonFrameWithJSLinkage::Summarize(
std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- CodeLookupResult code = LookupCodeT();
+ GcSafeCode code = GcSafeLookupCode();
int offset = code.GetOffsetFromInstructionStart(isolate(), pc());
- Handle<AbstractCode> abstract_code(code.ToAbstractCode(), isolate());
+ Handle<AbstractCode> abstract_code(
+ AbstractCode::cast(code.UnsafeCastToCode()), isolate());
Handle<FixedArray> params = GetParameters();
FrameSummary::JavaScriptFrameSummary summary(
isolate(), receiver(), function(), *abstract_code, offset,
@@ -1774,7 +1754,7 @@ Script JavaScriptFrame::script() const {
int CommonFrameWithJSLinkage::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
if (DEBUG_BOOL) {
- CodeLookupResult code_lookup_result = LookupCodeT();
+ Code code_lookup_result = LookupCode();
CHECK(!code_lookup_result.has_handler_table());
CHECK(!code_lookup_result.is_optimized_code() ||
code_lookup_result.kind() == CodeKind::BASELINE);
@@ -1816,7 +1796,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
bool print_line_number) {
// constructor calls
DisallowGarbageCollection no_gc;
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
@@ -1833,8 +1813,8 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
code_offset = baseline_frame->GetBytecodeOffset();
abstract_code = AbstractCode::cast(baseline_frame->GetBytecodeArray());
} else {
- CodeLookupResult code = frame->LookupCodeT();
- code_offset = code.GetOffsetFromInstructionStart(isolate, frame->pc());
+ code_offset = frame->LookupCode().GetOffsetFromInstructionStart(
+ isolate, frame->pc());
}
PrintFunctionAndOffset(function, abstract_code, code_offset, file,
print_line_number);
@@ -1884,7 +1864,7 @@ Object CommonFrameWithJSLinkage::GetParameter(int index) const {
}
int CommonFrameWithJSLinkage::ComputeParametersCount() const {
- DCHECK(can_access_heap_objects() &&
+ DCHECK(!iterator_->IsStackFrameIteratorForProfiler() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
return function().shared().internal_formal_parameter_count_without_receiver();
}
@@ -2038,28 +2018,25 @@ FrameSummary::JavaScriptFrameSummary::CreateStackFrameInfo() const {
#if V8_ENABLE_WEBASSEMBLY
FrameSummary::WasmFrameSummary::WasmFrameSummary(
Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::WasmCode* code,
- int code_offset, bool at_to_number_conversion)
+ int byte_offset, int function_index, bool at_to_number_conversion)
: FrameSummaryBase(isolate, WASM),
wasm_instance_(instance),
at_to_number_conversion_(at_to_number_conversion),
code_(code),
- code_offset_(code_offset) {}
+ byte_offset_(byte_offset),
+ function_index_(function_index) {}
Handle<Object> FrameSummary::WasmFrameSummary::receiver() const {
return wasm_instance_->GetIsolate()->global_proxy();
}
uint32_t FrameSummary::WasmFrameSummary::function_index() const {
- return code()->index();
-}
-
-int FrameSummary::WasmFrameSummary::byte_offset() const {
- return code_->GetSourcePositionBefore(code_offset());
+ return function_index_;
}
int FrameSummary::WasmFrameSummary::SourcePosition() const {
const wasm::WasmModule* module = wasm_instance()->module_object().module();
- return GetSourcePosition(module, function_index(), byte_offset(),
+ return GetSourcePosition(module, function_index(), code_offset(),
at_to_number_conversion());
}
@@ -2158,7 +2135,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// Delegate to JS frame in absence of deoptimization info.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- CodeLookupResult code = LookupCodeT();
+ GcSafeCode code = GcSafeLookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return JavaScriptFrame::Summarize(frames);
}
@@ -2173,7 +2150,17 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// summary which is a bit more aware of maglev behaviour and can e.g. handle
// more compact safepointed frame information for both function entry and
// loop stack checks.
- if (code.is_maglevved()) {
+ //
+ // TODO(7748): For JS functions containing inlined wasm we need support to
+ // create a frame summary for the wasm function as well which is needed for
+ // wasm trap stack traces. Also, the current hack does not preserve the code
+ // position in the JavaScript frame.
+ if (code.is_maglevved()
+#if V8_ENABLE_WEBASSEMBLY
+ || ((code.kind() == CodeKind::TURBOFAN) &&
+ v8_flags.experimental_wasm_js_inlining)
+#endif
+ ) {
DCHECK(frames->empty());
Handle<AbstractCode> abstract_code(
AbstractCode::cast(function().shared().GetBytecodeArray(isolate())),
@@ -2228,10 +2215,9 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
it->kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
code_offset = 0;
- abstract_code = ToAbstractCode(
- isolate()->builtins()->code_handle(
- Builtins::GetBuiltinFromBytecodeOffset(it->bytecode_offset())),
- isolate());
+ abstract_code =
+ Handle<AbstractCode>::cast(isolate()->builtins()->code_handle(
+ Builtins::GetBuiltinFromBytecodeOffset(it->bytecode_offset())));
} else {
DCHECK_EQ(it->kind(), TranslatedFrame::kUnoptimizedFunction);
code_offset = it->bytecode_offset().ToInt();
@@ -2260,7 +2246,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
// to use FrameSummary to find the corresponding code offset in unoptimized
// code to perform prediction there.
DCHECK_NULL(prediction);
- CodeT code = LookupCodeT().ToCodeT();
+ Code code = LookupCode();
HandlerTable table(code);
if (table.NumberOfReturnEntries() == 0) return -1;
@@ -2277,15 +2263,14 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
return table.LookupReturn(pc_offset);
}
-int MaglevFrame::FindReturnPCForTrampoline(CodeT code,
- int trampoline_pc) const {
+int MaglevFrame::FindReturnPCForTrampoline(Code code, int trampoline_pc) const {
DCHECK_EQ(code.kind(), CodeKind::MAGLEV);
DCHECK(code.marked_for_deoptimization());
MaglevSafepointTable safepoints(isolate(), pc(), code);
return safepoints.find_return_pc(trampoline_pc);
}
-int TurbofanFrame::FindReturnPCForTrampoline(CodeT code,
+int TurbofanFrame::FindReturnPCForTrampoline(Code code,
int trampoline_pc) const {
DCHECK_EQ(code.kind(), CodeKind::TURBOFAN);
DCHECK(code.marked_for_deoptimization());
@@ -2298,16 +2283,15 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
DCHECK(is_optimized());
JSFunction opt_function = function();
- CodeT code = opt_function.code();
+ Code code = opt_function.code();
- // The code object may have been replaced by lazy deoptimization. Fall
- // back to a slow search in this case to find the original optimized
- // code object.
+ // The code object may have been replaced by lazy deoptimization. Fall back
+ // to a slow search in this case to find the original optimized code object.
if (!code.contains(isolate(), pc())) {
- CodeLookupResult lookup_result =
- isolate()->heap()->GcSafeFindCodeForInnerPointer(pc());
- CHECK(lookup_result.IsFound());
- code = lookup_result.ToCodeT();
+ code = isolate()
+ ->heap()
+ ->GcSafeFindCodeForInnerPointer(pc())
+ .UnsafeCastToCode();
}
DCHECK(!code.is_null());
DCHECK(CodeKindCanDeoptimize(code.kind()));
@@ -2337,7 +2321,7 @@ void OptimizedFrame::GetFunctions(
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- CodeLookupResult code = LookupCodeT();
+ Code code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return JavaScriptFrame::GetFunctions(functions);
}
@@ -2351,32 +2335,33 @@ void OptimizedFrame::GetFunctions(
TranslationArrayIterator it(data.TranslationByteArray(),
data.TranslationIndex(deopt_index).value());
- TranslationOpcode opcode = TranslationOpcodeFromInt(it.NextUnsigned());
- DCHECK_EQ(TranslationOpcode::BEGIN, opcode);
- it.Next(); // Skip frame count.
- int jsframe_count = it.Next();
- it.Next(); // Skip update feedback count.
+ TranslationOpcode opcode = it.NextOpcode();
+ DCHECK(TranslationOpcodeIsBegin(opcode));
+ it.NextOperand(); // Skip lookback distance.
+ it.NextOperand(); // Skip frame count.
+ int jsframe_count = it.NextOperand();
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
while (jsframe_count != 0) {
- opcode = TranslationOpcodeFromInt(it.NextUnsigned());
- if (opcode == TranslationOpcode::INTERPRETED_FRAME ||
+ opcode = it.NextOpcode();
+ if (opcode == TranslationOpcode::INTERPRETED_FRAME_WITH_RETURN ||
+ opcode == TranslationOpcode::INTERPRETED_FRAME_WITHOUT_RETURN ||
opcode == TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME ||
opcode == TranslationOpcode::
JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) {
- it.Next(); // Skip bailout id.
+ it.NextOperand(); // Skip bailout id.
jsframe_count--;
// The second operand of the frame points to the function.
- Object shared = literal_array.get(it.Next());
+ Object shared = literal_array.get(it.NextOperand());
functions->push_back(SharedFunctionInfo::cast(shared));
// Skip over remaining operands to advance to the next opcode.
- it.Skip(TranslationOpcodeOperandCount(opcode) - 2);
+ it.SkipOperands(TranslationOpcodeOperandCount(opcode) - 2);
} else {
// Skip over operands to advance to the next opcode.
- it.Skip(TranslationOpcodeOperandCount(opcode));
+ it.SkipOperands(TranslationOpcodeOperandCount(opcode));
}
}
}
@@ -2464,12 +2449,12 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
}
int BaselineFrame::GetBytecodeOffset() const {
- Code code = LookupCodeT().code();
+ Code code = LookupCode();
return code.GetBytecodeOffsetForBaselinePC(this->pc(), GetBytecodeArray());
}
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
- Code code = LookupCodeT().code();
+ Code code = LookupCode();
return code.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
GetBytecodeArray());
}
@@ -2500,7 +2485,7 @@ void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
return;
}
wasm::WasmCodeRefScope code_ref_scope;
- accumulator->Add("Wasm [");
+ accumulator->Add(is_wasm_to_js() ? "Wasm-to-JS [" : "Wasm [");
accumulator->PrintName(script().name());
Address instruction_start = wasm_code()->instruction_start();
base::Vector<const uint8_t> raw_func_name =
@@ -2549,14 +2534,14 @@ Script WasmFrame::script() const { return module_object().script(); }
int WasmFrame::position() const {
wasm::WasmCodeRefScope code_ref_scope;
const wasm::WasmModule* module = wasm_instance().module_object().module();
- return GetSourcePosition(module, function_index(), byte_offset(),
+ return GetSourcePosition(module, function_index(), generated_code_offset(),
at_to_number_conversion());
}
-int WasmFrame::byte_offset() const {
+int WasmFrame::generated_code_offset() const {
wasm::WasmCode* code = wasm_code();
int offset = static_cast<int>(pc() - code->instruction_start());
- return code->GetSourcePositionBefore(offset);
+ return code->GetSourceOffsetBefore(offset);
}
bool WasmFrame::is_inspectable() const {
@@ -2574,9 +2559,33 @@ void WasmFrame::Summarize(std::vector<FrameSummary>* functions) const {
wasm::WasmCode* code = wasm_code();
int offset = static_cast<int>(pc() - code->instruction_start());
Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
- FrameSummary::WasmFrameSummary summary(isolate(), instance, code, offset,
- at_to_number_conversion());
+ // Push regular non-inlined summary.
+ SourcePosition pos = code->GetSourcePositionBefore(offset);
+ bool at_conversion = at_to_number_conversion();
+ // Add summaries for each inlined function at the current location.
+ while (pos.isInlined()) {
+ // Use current pc offset as the code offset for inlined functions.
+ // This is not fully correct but there isn't a real code offset of a stack
+ // frame for an inlined function as the inlined function is not a true
+ // function with a defined start and end in the generated code.
+ //
+ const auto [func_index, caller_pos] =
+ code->GetInliningPosition(pos.InliningId());
+ FrameSummary::WasmFrameSummary summary(isolate(), instance, code,
+ pos.ScriptOffset(), func_index,
+ at_conversion);
+ functions->push_back(summary);
+ pos = caller_pos;
+ at_conversion = false;
+ }
+
+ int func_index = code->index();
+ FrameSummary::WasmFrameSummary summary(
+ isolate(), instance, code, pos.ScriptOffset(), func_index, at_conversion);
functions->push_back(summary);
+
+ // The caller has to be on top.
+ std::reverse(functions->begin(), functions->end());
}
bool WasmFrame::at_to_number_conversion() const {
@@ -2588,7 +2597,7 @@ bool WasmFrame::at_to_number_conversion() const {
: nullptr;
if (!code || code->kind() != wasm::WasmCode::kWasmToJsWrapper) return false;
int offset = static_cast<int>(callee_pc() - code->instruction_start());
- int pos = code->GetSourcePositionBefore(offset);
+ int pos = code->GetSourceOffsetBefore(offset);
// The imported call has position 0, ToNumber has position 1.
// If there is no source position available, this is also not a ToNumber call.
DCHECK(pos == wasm::kNoCodePosition || pos == 0 || pos == 1);
@@ -2631,13 +2640,19 @@ void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
+WasmInstanceObject WasmToJsFrame::wasm_instance() const {
+ // WasmToJsFrames hold the {WasmApiFunctionRef} object in the instance slot.
+ // Load the instance from there.
+ const int offset = WasmFrameConstants::kWasmInstanceOffset;
+ Object func_ref_obj(Memory<Address>(fp() + offset));
+ WasmApiFunctionRef func_ref = WasmApiFunctionRef::cast(func_ref_obj);
+ return WasmInstanceObject::cast(func_ref.instance());
+}
+
void JsToWasmFrame::Iterate(RootVisitor* v) const {
- CodeLookupResult lookup_result = GetContainingCode(isolate(), pc());
- CHECK(lookup_result.IsFound());
-#ifdef DEBUG
- Builtin builtin = lookup_result.builtin_id();
- DCHECK_EQ(builtin, Builtin::kGenericJSToWasmWrapper);
-#endif // DEBUG
+ DCHECK_EQ(GetContainingCode(isolate(), pc())->builtin_id(),
+ Builtin::kGenericJSToWasmWrapper);
+
// GenericJSToWasmWrapper stack layout
// ------+-----------------+----------------------
// | return addr |
@@ -2910,7 +2925,7 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
}
void EntryFrame::Iterate(RootVisitor* v) const {
- IteratePc(v, pc_address(), constant_pool_address(), LookupCodeT());
+ IteratePc(v, pc_address(), constant_pool_address(), GcSafeLookupCode());
}
void CommonFrame::IterateExpressions(RootVisitor* v) const {
@@ -2933,11 +2948,11 @@ void CommonFrame::IterateExpressions(RootVisitor* v) const {
void JavaScriptFrame::Iterate(RootVisitor* v) const {
IterateExpressions(v);
- IteratePc(v, pc_address(), constant_pool_address(), LookupCodeT());
+ IteratePc(v, pc_address(), constant_pool_address(), GcSafeLookupCode());
}
void InternalFrame::Iterate(RootVisitor* v) const {
- CodeLookupResult code = LookupCodeT();
+ GcSafeCode code = GcSafeLookupCode();
IteratePc(v, pc_address(), constant_pool_address(), code);
// Internal frames typically do not receive any arguments, hence their stack
// only contains tagged pointers.
@@ -2973,12 +2988,18 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
- if (DEBUG_BOOL) {
- CodeLookupResult lookup_result =
- isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer);
- CHECK(lookup_result.IsFound());
- CHECK_EQ(entry->code, lookup_result);
- }
+ // Why this DCHECK holds is nontrivial:
+ //
+ // - the cache is filled lazily on calls to this function.
+ // - this function may be called while GC, and in particular
+ // MarkCompactCollector::UpdatePointersAfterEvacuation, is in progress.
+ // - the cache is cleared at the end of UpdatePointersAfterEvacuation.
+ // - now, why does pointer equality hold even during moving GC?
+ // - .. because GcSafeFindCodeForInnerPointer does not follow forwarding
+ // pointers and always returns the old object (which is still valid,
+ // *except* for the map_word).
+ DCHECK_EQ(entry->code,
+ isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer));
} else {
// Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update inner_pointer before the code
@@ -2986,7 +3007,7 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
// the code has been computed.
entry->code =
isolate_->heap()->GcSafeFindCodeForInnerPointer(inner_pointer);
- if (entry->code.IsCode() && entry->code.code().is_maglevved()) {
+ if (entry->code->is_maglevved()) {
entry->maglev_safepoint_entry.Reset();
} else {
entry->safepoint_entry.Reset();
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 536b044100..03ab9d0e8d 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -46,7 +46,7 @@
// - WasmToJsFrame
// - WasmDebugBreakFrame
// - WasmLiftoffSetupFrame
-//
+// - IrregexpFrame
namespace v8 {
namespace internal {
@@ -126,7 +126,8 @@ class StackHandler {
V(CONSTRUCT, ConstructFrame) \
V(BUILTIN, BuiltinFrame) \
V(BUILTIN_EXIT, BuiltinExitFrame) \
- V(NATIVE, NativeFrame)
+ V(NATIVE, NativeFrame) \
+ V(IRREGEXP, IrregexpFrame)
// Abstract base class for all stack frames.
class StackFrame {
@@ -210,11 +211,10 @@ class StackFrame {
// Copy constructor; it breaks the connection to host iterator
// (as an iterator usually lives on stack).
- StackFrame(const StackFrame& original) V8_NOEXCEPT {
- this->state_ = original.state_;
- this->iterator_ = nullptr;
- this->isolate_ = original.isolate_;
- }
+ StackFrame(const StackFrame& original) V8_NOEXCEPT
+ : iterator_(nullptr),
+ isolate_(original.isolate_),
+ state_(original.state_) {}
// Type testers.
bool is_entry() const { return type() == ENTRY; }
@@ -255,6 +255,7 @@ class StackFrame {
}
bool is_construct() const { return type() == CONSTRUCT; }
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
+ bool is_irregexp() const { return type() == IRREGEXP; }
static bool IsJavaScript(Type t) {
static_assert(INTERPRETED + 1 == BASELINE);
@@ -276,6 +277,7 @@ class StackFrame {
// in certain corner-cases we do not use an address on the stack, which would
// be signed, as the PC of the frame.
inline Address unauthenticated_pc() const;
+ static inline Address unauthenticated_pc(Address* pc_address);
Address constant_pool() const { return *constant_pool_address(); }
void set_constant_pool(Address constant_pool) {
@@ -297,20 +299,20 @@ class StackFrame {
// Get the type of this frame.
virtual Type type() const = 0;
- // Get the code associated with this frame. The result might be a Code object,
- // a CodeT object or an empty value.
+ // Get the code associated with this frame. The result might be a Code object
+ // or an empty value.
// This method is used by Isolate::PushStackTraceAndDie() for collecting a
// stack trace on fatal error and thus it might be called in the middle of GC
// and should be as safe as possible.
virtual HeapObject unchecked_code() const = 0;
// Search for the code associated with this frame.
- // TODO(v8:11880): rename to LookupCode()
- V8_EXPORT_PRIVATE CodeLookupResult LookupCodeT() const;
+ V8_EXPORT_PRIVATE Code LookupCode() const;
+ V8_EXPORT_PRIVATE GcSafeCode GcSafeLookupCode() const;
virtual void Iterate(RootVisitor* v) const = 0;
void IteratePc(RootVisitor* v, Address* pc_address,
- Address* constant_pool_address, CodeLookupResult holder) const;
+ Address* constant_pool_address, GcSafeCode holder) const;
// Sets a callback function for return-address rewriting profilers
// to resolve the location of a return address to the location of the
@@ -339,16 +341,10 @@ class StackFrame {
// Compute the stack pointer for the calling frame.
virtual Address GetCallerStackPointer() const = 0;
- // Compute the stack frame type for the given state.
- static Type ComputeType(const StackFrameIteratorBase* iterator, State* state);
-
-#ifdef DEBUG
- bool can_access_heap_objects() const;
-#endif
+ const StackFrameIteratorBase* const iterator_;
private:
- const StackFrameIteratorBase* iterator_;
- Isolate* isolate_;
+ Isolate* const isolate_;
State state_;
static ReturnAddressLocationResolver return_address_location_resolver_;
@@ -364,7 +360,7 @@ class StackFrame {
friend class StackFrameIterator;
friend class StackFrameIteratorBase;
friend class StackHandlerIterator;
- friend class SafeStackFrameIterator;
+ friend class StackFrameIteratorForProfiler;
};
class CommonFrame;
@@ -428,13 +424,14 @@ class V8_EXPORT_PRIVATE FrameSummary {
class WasmFrameSummary : public FrameSummaryBase {
public:
WasmFrameSummary(Isolate*, Handle<WasmInstanceObject>, wasm::WasmCode*,
- int code_offset, bool at_to_number_conversion);
+ int byte_offset, int function_index,
+ bool at_to_number_conversion);
Handle<Object> receiver() const;
uint32_t function_index() const;
wasm::WasmCode* code() const { return code_; }
- int code_offset() const { return code_offset_; }
- V8_EXPORT_PRIVATE int byte_offset() const;
+ // Returns the wire bytes offset relative to the function entry.
+ int code_offset() const { return byte_offset_; }
bool is_constructor() const { return false; }
bool is_subject_to_debugging() const { return true; }
int SourcePosition() const;
@@ -448,8 +445,9 @@ class V8_EXPORT_PRIVATE FrameSummary {
private:
Handle<WasmInstanceObject> wasm_instance_;
bool at_to_number_conversion_;
- wasm::WasmCode* const code_;
- int code_offset_;
+ wasm::WasmCode* code_;
+ int byte_offset_;
+ int function_index_;
};
#endif // V8_ENABLE_WEBASSEMBLY
@@ -509,8 +507,6 @@ class CommonFrame : public StackFrame {
inline void SetExpression(int index, Object value);
int ComputeExpressionsCount() const;
- bool HasTaggedOutgoingParams(CodeLookupResult& code_lookup) const;
-
Address GetCallerStackPointer() const override;
// Build a list with summaries for this frame including all inlined frames.
@@ -526,20 +522,14 @@ class CommonFrame : public StackFrame {
protected:
inline explicit CommonFrame(StackFrameIteratorBase* iterator);
+ bool HasTaggedOutgoingParams(GcSafeCode code_lookup) const;
+
void ComputeCallerState(State* state) const override;
// Accessors.
inline Address caller_fp() const;
inline Address caller_pc() const;
- // Computes the address of the PC field in the standard frame given
- // by the provided frame pointer.
- static inline Address ComputePCAddress(Address fp);
-
- // Computes the address of the constant pool field in the standard
- // frame given by the provided frame pointer.
- static inline Address ComputeConstantPoolAddress(Address fp);
-
// Iterate over expression stack including stack handlers, locals,
// and parts of the fixed part including context and code fields.
void IterateExpressions(RootVisitor* v) const;
@@ -548,10 +538,6 @@ class CommonFrame : public StackFrame {
// Returns the address of the n'th expression stack element.
virtual Address GetExpressionAddress(int n) const;
-
- private:
- friend class StackFrame;
- friend class SafeStackFrameIterator;
};
// This frame is used for TF-optimized code without JS linkage, but
@@ -643,10 +629,6 @@ class JavaScriptFrame : public CommonFrameWithJSLinkage {
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const override;
- // Determines whether this frame includes inlined activations. To get details
- // about the inlined frames use {GetFunctions} and {Summarize}.
- bool HasInlinedFrames() const;
-
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
@@ -857,8 +839,7 @@ class OptimizedFrame : public JavaScriptFrame {
int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction) override;
- virtual int FindReturnPCForTrampoline(CodeT code,
- int trampoline_pc) const = 0;
+ virtual int FindReturnPCForTrampoline(Code code, int trampoline_pc) const = 0;
protected:
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
@@ -969,10 +950,12 @@ class MaglevFrame : public OptimizedFrame {
void Iterate(RootVisitor* v) const override;
- int FindReturnPCForTrampoline(CodeT code, int trampoline_pc) const override;
+ int FindReturnPCForTrampoline(Code code, int trampoline_pc) const override;
BytecodeOffset GetBytecodeOffsetForOSR() const;
+ static intptr_t StackGuardFrameSize(int register_input_count);
+
protected:
inline explicit MaglevFrame(StackFrameIteratorBase* iterator);
@@ -988,7 +971,7 @@ class TurbofanFrame : public OptimizedFrame {
void Iterate(RootVisitor* v) const override;
- int FindReturnPCForTrampoline(CodeT code, int trampoline_pc) const override;
+ int FindReturnPCForTrampoline(Code code, int trampoline_pc) const override;
protected:
inline explicit TurbofanFrame(StackFrameIteratorBase* iterator);
@@ -1035,7 +1018,7 @@ class WasmFrame : public TypedFrame {
void Iterate(RootVisitor* v) const override;
// Accessors.
- V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const;
+ virtual V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const;
V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const;
wasm::WasmCode* wasm_code() const;
int function_index() const;
@@ -1044,8 +1027,8 @@ class WasmFrame : public TypedFrame {
int position() const override;
Object context() const override;
bool at_to_number_conversion() const;
- // Byte offset in the function.
- int byte_offset() const;
+ // Generated code byte offset in the function.
+ int generated_code_offset() const;
bool is_inspectable() const;
void Summarize(std::vector<FrameSummary>* frames) const override;
@@ -1101,6 +1084,9 @@ class WasmToJsFrame : public WasmFrame {
public:
Type type() const override { return WASM_TO_JS; }
+ int position() const override { return 0; }
+ WasmInstanceObject wasm_instance() const override;
+
protected:
inline explicit WasmToJsFrame(StackFrameIteratorBase* iterator);
@@ -1283,6 +1269,29 @@ class JavaScriptBuiltinContinuationWithCatchFrame
friend class StackFrameIteratorBase;
};
+class IrregexpFrame : public TypedFrame {
+ public:
+ Type type() const override { return IRREGEXP; }
+
+ void Iterate(RootVisitor* v) const override {
+ // Irregexp frames should not be visited by GC because they are not visible
+ // to any stack iterator except StackFrameIteratorForProfiler, which is not
+ // used by GC.
+ UNREACHABLE();
+ }
+
+ static IrregexpFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_irregexp());
+ return static_cast<IrregexpFrame*>(frame);
+ }
+
+ protected:
+ inline explicit IrregexpFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class StackFrameIteratorBase {
public:
StackFrameIteratorBase(const StackFrameIteratorBase&) = delete;
@@ -1292,17 +1301,25 @@ class StackFrameIteratorBase {
bool done() const { return frame_ == nullptr; }
+#ifdef DEBUG
+ // The StackFrameIteratorForProfiler is limited in functionality because it
+ // may run at an arbitrary point in time where stack contents are not
+ // guaranteed to be in a consistent state and heap accesses may be limited.
+ virtual bool IsStackFrameIteratorForProfiler() const = 0;
+#endif // DEBUG
+ virtual StackFrame::Type ComputeStackFrameType(
+ StackFrame::State* state) const = 0;
+
protected:
// An iterator that iterates over a given thread's stack.
- StackFrameIteratorBase(Isolate* isolate, bool can_access_heap_objects);
+ explicit StackFrameIteratorBase(Isolate* isolate);
- Isolate* isolate_;
+ Isolate* const isolate_;
#define DECLARE_SINGLETON(ignore, type) type type##_;
STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
#undef DECLARE_SINGLETON
StackFrame* frame_;
StackHandler* handler_;
- const bool can_access_heap_objects_;
StackHandler* handler() const {
DCHECK(!done());
@@ -1345,36 +1362,50 @@ class StackFrameIterator : public StackFrameIteratorBase {
void Reset(ThreadLocalTop* top, wasm::StackMemory* stack);
#endif
+#ifdef DEBUG
+ bool IsStackFrameIteratorForProfiler() const override { return false; }
+#endif // DEBUG
+ StackFrame::Type ComputeStackFrameType(
+ StackFrame::State* state) const override;
+
private:
// Go back to the first frame.
void Reset(ThreadLocalTop* top);
};
-// Iterator that supports iterating through all JavaScript frames.
-class JavaScriptFrameIterator {
+// A wrapper around StackFrameIterator that skips over all non-JS frames.
+class JavaScriptStackFrameIterator final {
public:
- inline explicit JavaScriptFrameIterator(Isolate* isolate);
- inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
-
- inline JavaScriptFrame* frame() const;
+ explicit JavaScriptStackFrameIterator(Isolate* isolate) : iterator_(isolate) {
+ if (!done()) Advance();
+ }
+ JavaScriptStackFrameIterator(Isolate* isolate, ThreadLocalTop* top)
+ : iterator_(isolate, top) {
+ if (!done()) Advance();
+ }
+ JavaScriptFrame* frame() const {
+ return JavaScriptFrame::cast(iterator_.frame());
+ }
+ JavaScriptFrame* Reframe() {
+ return JavaScriptFrame::cast(iterator_.Reframe());
+ }
bool done() const { return iterator_.done(); }
+
V8_EXPORT_PRIVATE void Advance();
- void AdvanceOneFrame() { iterator_.Advance(); }
- inline JavaScriptFrame* Reframe();
private:
StackFrameIterator iterator_;
};
-// NOTE: The stack trace frame iterator is an iterator that only traverse proper
-// JavaScript frames that have proper JavaScript functions and WebAssembly
-// frames.
-class V8_EXPORT_PRIVATE StackTraceFrameIterator {
+// A wrapper around StackFrameIterator that skips over all non-debuggable
+// frames (i.e. it iterates over Wasm and debuggable JS frames).
+class V8_EXPORT_PRIVATE DebuggableStackFrameIterator {
public:
- explicit StackTraceFrameIterator(Isolate* isolate);
+ explicit DebuggableStackFrameIterator(Isolate* isolate);
// Skip frames until the frame with the given id is reached.
- StackTraceFrameIterator(Isolate* isolate, StackFrameId id);
+ DebuggableStackFrameIterator(Isolate* isolate, StackFrameId id);
+
bool done() const { return iterator_.done(); }
void Advance();
void AdvanceOneFrame() { iterator_.Advance(); }
@@ -1390,25 +1421,34 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
inline JavaScriptFrame* javascript_frame() const;
// Use this instead of FrameSummary::GetTop(javascript_frame) to keep
- // filtering behavior consistent with the rest of StackTraceFrameIterator.
+ // filtering behavior consistent with the rest of
+ // DebuggableStackFrameIterator.
FrameSummary GetTopValidFrame() const;
private:
StackFrameIterator iterator_;
static bool IsValidFrame(StackFrame* frame);
- static bool IsValidJSFunction(JSFunction f);
};
-class SafeStackFrameIterator : public StackFrameIteratorBase {
+// Similar to StackFrameIterator, but can be created and used at any time and
+// any stack state. Currently, the only user is the profiler; if this ever
+// changes, find another name for this class.
+class V8_EXPORT_PRIVATE StackFrameIteratorForProfiler
+ : public StackFrameIteratorBase {
public:
- SafeStackFrameIterator(Isolate* isolate, Address pc, Address fp, Address sp,
- Address lr, Address js_entry_sp);
+ StackFrameIteratorForProfiler(Isolate* isolate, Address pc, Address fp,
+ Address sp, Address lr, Address js_entry_sp);
inline StackFrame* frame() const;
void Advance();
StackFrame::Type top_frame_type() const { return top_frame_type_; }
- Address top_context_address() const { return top_context_address_; }
+
+#ifdef DEBUG
+ bool IsStackFrameIteratorForProfiler() const override { return true; }
+#endif // DEBUG
+ StackFrame::Type ComputeStackFrameType(
+ StackFrame::State* state) const override;
private:
void AdvanceOneFrame();
@@ -1420,6 +1460,7 @@ class SafeStackFrameIterator : public StackFrameIteratorBase {
bool IsValidCaller(StackFrame* frame);
bool IsValidExitFrame(Address fp) const;
bool IsValidTop(ThreadLocalTop* top) const;
+ static bool IsValidFrameType(StackFrame::Type type);
// Returns true if the pc points to a bytecode handler and the frame pointer
// doesn't seem to be a bytecode handler's frame, which implies that the
@@ -1432,7 +1473,6 @@ class SafeStackFrameIterator : public StackFrameIteratorBase {
const Address low_bound_;
const Address high_bound_;
StackFrame::Type top_frame_type_;
- Address top_context_address_;
ExternalCallbackScope* external_callback_scope_;
Address top_link_register_;
};
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 2c29b34db2..cae89d616f 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -253,7 +253,7 @@ void AtomicsWaitWakeHandle::Wake() {
isolate_->futex_wait_list_node()->NotifyWake();
}
-enum WaitReturnValue : int { kOk = 0, kNotEqual = 1, kTimedOut = 2 };
+enum WaitReturnValue : int { kOk = 0, kNotEqualValue = 1, kTimedOut = 2 };
namespace {
@@ -263,7 +263,7 @@ Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
switch (val) {
case WaitReturnValue::kOk:
return ReadOnlyRoots(isolate).ok_string();
- case WaitReturnValue::kNotEqual:
+ case WaitReturnValue::kNotEqualValue:
return ReadOnlyRoots(isolate).not_equal_string();
case WaitReturnValue::kTimedOut:
return ReadOnlyRoots(isolate).timed_out_string();
@@ -408,7 +408,7 @@ Object FutexEmulation::WaitSync(Isolate* isolate,
}
#endif
if (loaded_value != value) {
- result = handle(Smi::FromInt(WaitReturnValue::kNotEqual), isolate);
+ result = handle(Smi::FromInt(WaitReturnValue::kNotEqualValue), isolate);
callback_result = AtomicsWaitEvent::kNotEqual;
break;
}
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.cc b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
index 16e9e75a44..bdf48e2b8f 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
@@ -27,6 +27,12 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.h b/deps/v8/src/execution/ia32/frame-constants-ia32.h
index fec118d612..57ac1268ee 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.h
@@ -17,7 +17,7 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -6 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -6 * kSystemPointerSize;
// EntryFrame is used by JSEntry, JSConstructEntry and JSRunMicrotasksEntry.
// All of them take |root_register_value| as the first parameter.
diff --git a/deps/v8/src/execution/interrupts-scope.cc b/deps/v8/src/execution/interrupts-scope.cc
index 7bf9821685..799b8a6ecd 100644
--- a/deps/v8/src/execution/interrupts-scope.cc
+++ b/deps/v8/src/execution/interrupts-scope.cc
@@ -9,15 +9,6 @@
namespace v8 {
namespace internal {
-InterruptsScope::InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
- Mode mode)
- : stack_guard_(isolate->stack_guard()),
- intercept_mask_(intercept_mask),
- intercepted_flags_(0),
- mode_(mode) {
- if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
-}
-
bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
InterruptsScope* last_postpone_scope = nullptr;
for (InterruptsScope* current = this; current; current = current->prev_) {
diff --git a/deps/v8/src/execution/interrupts-scope.h b/deps/v8/src/execution/interrupts-scope.h
index 19611142b0..29de448ad6 100644
--- a/deps/v8/src/execution/interrupts-scope.h
+++ b/deps/v8/src/execution/interrupts-scope.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_INTERRUPTS_SCOPE_H_
#define V8_EXECUTION_INTERRUPTS_SCOPE_H_
+#include "src/execution/isolate.h"
#include "src/execution/stack-guard.h"
namespace v8 {
@@ -16,13 +17,24 @@ class Isolate;
// not affect other interrupts.
class V8_NODISCARD InterruptsScope {
public:
- enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
+ enum Mode : byte { kPostponeInterrupts, kRunInterrupts, kNoop };
- V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
- Mode mode);
+ V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, uint32_t intercept_mask,
+ Mode mode)
+ : stack_guard_(nullptr),
+ intercept_mask_(intercept_mask),
+ intercepted_flags_(0),
+ mode_(mode) {
+ if (mode_ != kNoop) {
+ stack_guard_ = isolate->stack_guard();
+ stack_guard_->PushInterruptsScope(this);
+ }
+ }
- virtual ~InterruptsScope() {
- if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
+ ~InterruptsScope() {
+ if (mode_ != kNoop) {
+ stack_guard_->PopInterruptsScope();
+ }
}
// Find the scope that intercepts this interrupt.
@@ -33,10 +45,10 @@ class V8_NODISCARD InterruptsScope {
private:
StackGuard* stack_guard_;
- intptr_t intercept_mask_;
- intptr_t intercepted_flags_;
- Mode mode_;
InterruptsScope* prev_;
+ const uint32_t intercept_mask_;
+ uint32_t intercepted_flags_;
+ const Mode mode_;
friend class StackGuard;
};
@@ -47,11 +59,10 @@ class V8_NODISCARD InterruptsScope {
// account.
class V8_NODISCARD PostponeInterruptsScope : public InterruptsScope {
public:
- PostponeInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ explicit PostponeInterruptsScope(
+ Isolate* isolate, uint32_t intercept_mask = StackGuard::ALL_INTERRUPTS)
: InterruptsScope(isolate, intercept_mask,
InterruptsScope::kPostponeInterrupts) {}
- ~PostponeInterruptsScope() override = default;
};
// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
@@ -59,11 +70,10 @@ class V8_NODISCARD PostponeInterruptsScope : public InterruptsScope {
// PostponeInterruptsScopes.
class V8_NODISCARD SafeForInterruptsScope : public InterruptsScope {
public:
- SafeForInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ explicit SafeForInterruptsScope(
+ Isolate* isolate, uint32_t intercept_mask = StackGuard::ALL_INTERRUPTS)
: InterruptsScope(isolate, intercept_mask,
InterruptsScope::kRunInterrupts) {}
- ~SafeForInterruptsScope() override = default;
};
} // namespace internal
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index 9886287fe0..9961be3ff5 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -28,10 +28,11 @@ class Isolate;
V(kStackGuardOffset, StackGuard::kSizeInBytes, stack_guard) \
V(kIsMarkingFlag, kUInt8Size, is_marking_flag) \
V(kIsMinorMarkingFlag, kUInt8Size, is_minor_marking_flag) \
+ V(kIsSharedSpaceIsolateFlag, kUInt8Size, is_shared_space_isolate_flag) \
+ V(kUsesSharedHeapFlag, kUInt8Size, uses_shared_heap_flag) \
V(kIsProfilingOffset, kUInt8Size, is_profiling) \
V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable) \
- IF_TARGET_ARCH_64_BIT(V, kTablesAlignmentPaddingOffset, \
- kSystemPointerSize - 4, tables_alignment_padding) \
+ V(kTablesAlignmentPaddingOffset, 2, tables_alignment_padding) \
/* Tier 0 tables (small but fast access). */ \
V(kBuiltinTier0EntryTableOffset, \
Builtins::kBuiltinTier0Count* kSystemPointerSize, \
@@ -39,26 +40,27 @@ class Isolate;
V(kBuiltinsTier0TableOffset, \
Builtins::kBuiltinTier0Count* kSystemPointerSize, builtin_tier0_table) \
/* Misc. fields. */ \
- V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize, \
- embedder_data) \
+ V(kNewAllocationInfoOffset, LinearAllocationArea::kSize, \
+ new_allocation_info) \
+ V(kOldAllocationInfoOffset, LinearAllocationArea::kSize, \
+ old_allocation_info) \
V(kFastCCallCallerFPOffset, kSystemPointerSize, fast_c_call_caller_fp) \
V(kFastCCallCallerPCOffset, kSystemPointerSize, fast_c_call_caller_pc) \
V(kFastApiCallTargetOffset, kSystemPointerSize, fast_api_call_target) \
V(kLongTaskStatsCounterOffset, kSizetSize, long_task_stats_counter) \
+ V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes, thread_local_top) \
+ V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize, \
+ embedder_data) \
ISOLATE_DATA_FIELDS_POINTER_COMPRESSION(V) \
/* Full tables (arbitrary size, potentially slower access). */ \
V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize, \
roots_table) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes, \
external_reference_table) \
- V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes, thread_local_top) \
V(kBuiltinEntryTableOffset, Builtins::kBuiltinCount* kSystemPointerSize, \
builtin_entry_table) \
V(kBuiltinTableOffset, Builtins::kBuiltinCount* kSystemPointerSize, \
- builtin_table) \
- /* Linear allocation areas for the heap's new and old space */ \
- V(kNewAllocationInfo, LinearAllocationArea::kSize, new_allocation_info) \
- V(kOldAllocationInfo, LinearAllocationArea::kSize, old_allocation_info)
+ builtin_table)
#ifdef V8_COMPRESS_POINTERS
#define ISOLATE_DATA_FIELDS_POINTER_COMPRESSION(V) \
@@ -138,7 +140,10 @@ class IsolateData final {
ThreadLocalTop const& thread_local_top() const { return thread_local_top_; }
Address* builtin_entry_table() { return builtin_entry_table_; }
Address* builtin_table() { return builtin_table_; }
- uint8_t stack_is_iterable() const { return stack_is_iterable_; }
+ bool stack_is_iterable() const {
+ DCHECK(stack_is_iterable_ == 0 || stack_is_iterable_ == 1);
+ return stack_is_iterable_ != 0;
+ }
// Returns true if this address points to data stored in this instance. If
// it's the case then the value can be accessed indirectly through the root
@@ -182,6 +187,8 @@ class IsolateData final {
// Only valid values are 0 or 1.
uint8_t is_marking_flag_ = false;
uint8_t is_minor_marking_flag_ = false;
+ uint8_t is_shared_space_isolate_flag_ = false;
+ uint8_t uses_shared_heap_flag_ = false;
// true if the Isolate is being profiled. Causes collection of extra compile
// info.
@@ -194,33 +201,27 @@ class IsolateData final {
// builtin entry table to kSystemPointerSize anyway.
//
- // Whether the SafeStackFrameIterator can successfully iterate the current
- // stack. Only valid values are 0 or 1.
+ // Whether the StackFrameIteratorForProfiler can successfully iterate the
+ // current stack. The only valid values are 0 or 1.
uint8_t stack_is_iterable_ = 1;
-#if V8_TARGET_ARCH_64_BIT
// Ensure the following tables are kSystemPointerSize-byte aligned.
- // 32-bit architectures currently don't require the alignment.
static_assert(FIELD_SIZE(kTablesAlignmentPaddingOffset) > 0);
uint8_t tables_alignment_padding_[FIELD_SIZE(kTablesAlignmentPaddingOffset)];
-#endif // V8_TARGET_ARCH_64_BIT
// Tier 0 tables. See also builtin_entry_table_ and builtin_table_.
Address builtin_tier0_entry_table_[Builtins::kBuiltinTier0Count] = {};
Address builtin_tier0_table_[Builtins::kBuiltinTier0Count] = {};
- // These fields are accessed through the API, offsets must be kept in sync
- // with v8::internal::Internals (in include/v8-internal.h) constants. The
- // layout consistency is verified in Isolate::CheckIsolateLayout() using
- // runtime checks.
- void* embedder_data_[Internals::kNumIsolateDataSlots] = {};
+ LinearAllocationArea new_allocation_info_;
+ LinearAllocationArea old_allocation_info_;
- // Stores the state of the caller for TurboAssembler::CallCFunction so that
+ // Stores the state of the caller for MacroAssembler::CallCFunction so that
// the sampling CPU profiler can iterate the stack during such calls. These
// are stored on IsolateData so that they can be stored to with only one move
// instruction in compiled code.
//
- // The FP and PC that are saved right before TurboAssembler::CallCFunction.
+ // The FP and PC that are saved right before MacroAssembler::CallCFunction.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;
// The address of the fast API callback right before it's executed from
@@ -231,6 +232,14 @@ class IsolateData final {
// long tasks.
size_t long_task_stats_counter_ = 0;
+ ThreadLocalTop thread_local_top_;
+
+ // These fields are accessed through the API, offsets must be kept in sync
+ // with v8::internal::Internals (in include/v8-internal.h) constants. The
+ // layout consistency is verified in Isolate::CheckIsolateLayout() using
+ // runtime checks.
+ void* embedder_data_[Internals::kNumIsolateDataSlots] = {};
+
// Table containing pointers to external objects.
#ifdef V8_COMPRESS_POINTERS
ExternalPointerTable external_pointer_table_;
@@ -240,19 +249,15 @@ class IsolateData final {
RootsTable roots_table_;
ExternalReferenceTable external_reference_table_;
- ThreadLocalTop thread_local_top_;
-
// The entry points for builtins. This corresponds to
- // Code::InstructionStart() for each Code object in the builtins table below.
- // The entry table is in IsolateData for easy access through kRootRegister.
+ // InstructionStream::InstructionStart() for each InstructionStream object in
+ // the builtins table below. The entry table is in IsolateData for easy access
+ // through kRootRegister.
Address builtin_entry_table_[Builtins::kBuiltinCount] = {};
// The entries in this array are tagged pointers to Code objects.
Address builtin_table_[Builtins::kBuiltinCount] = {};
- LinearAllocationArea new_allocation_info_;
- LinearAllocationArea old_allocation_info_;
-
// Ensure the size is 8-byte aligned in order to make alignment of the field
// following the IsolateData field predictable. This solves the issue with
// C++ compilers for 32-bit platforms which are not consistent at aligning
@@ -279,6 +284,7 @@ void IsolateData::AssertPredictableLayout() {
static_assert(std::is_standard_layout<ThreadLocalTop>::value);
static_assert(std::is_standard_layout<ExternalReferenceTable>::value);
static_assert(std::is_standard_layout<IsolateData>::value);
+ static_assert(std::is_standard_layout<LinearAllocationArea>::value);
#define V(Offset, Size, Name) \
static_assert(offsetof(IsolateData, Name##_) == Offset);
ISOLATE_DATA_FIELDS(V)
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index 026e7cfd71..8edeb5dfb8 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -133,7 +133,7 @@ Object Isolate::VerifyBuiltinsResult(Object result) {
// because that's the assumption in generated code (which might call this
// builtin).
if (!result.IsSmi()) {
- DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTaggedPointer(
+ DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTagged(
this, static_cast<Tagged_t>(result.ptr())));
}
#endif
@@ -149,11 +149,11 @@ ObjectPair Isolate::VerifyBuiltinsResult(ObjectPair pair) {
// because that's the assumption in generated code (which might call this
// builtin).
if (!HAS_SMI_TAG(pair.x)) {
- DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTaggedPointer(
+ DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTagged(
this, static_cast<Tagged_t>(pair.x)));
}
if (!HAS_SMI_TAG(pair.y)) {
- DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTaggedPointer(
+ DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTagged(
this, static_cast<Tagged_t>(pair.y)));
}
#endif // V8_COMPRESS_POINTERS
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 161edbe2aa..bbefedff41 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -13,69 +13,31 @@
namespace v8 {
namespace internal {
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
-
-// Aliases for GetPtrComprCageBase when
-// V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE. Each Isolate has its own cage, whose
-// base address is also the Isolate root.
-V8_INLINE Address GetIsolateRootAddress(Address on_heap_addr) {
- return V8HeapCompressionScheme::GetPtrComprCageBaseAddress(on_heap_addr);
-}
-
-V8_INLINE Address GetIsolateRootAddress(PtrComprCageBase cage_base) {
- return cage_base.address();
-}
-
-#else
-
-V8_INLINE Address GetIsolateRootAddress(Address on_heap_addr) { UNREACHABLE(); }
-
-V8_INLINE Address GetIsolateRootAddress(PtrComprCageBase cage_base) {
- UNREACHABLE();
-}
-
-#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
-
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
// Avoid using the below GetIsolateFromWritableObject because we want to be
// able to get the heap, but not the isolate, for off-thread objects.
#if defined V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object)->heap();
-#elif defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE) && \
- !defined(V8_EXTERNAL_CODE_SPACE)
- Isolate* isolate =
- Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
- DCHECK_NOT_NULL(isolate);
- return isolate->heap();
#else
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->GetHeap();
-#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
+#endif // V8_ENABLE_THIRD_PARTY_HEAP
}
V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object);
-#elif defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE) && \
- !defined(V8_EXTERNAL_CODE_SPACE)
- Isolate* isolate =
- Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
- DCHECK_NOT_NULL(isolate);
- return isolate;
#else
return Isolate::FromHeap(GetHeapFromWritableObject(object));
-#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
+#endif // V8_ENABLE_THIRD_PARTY_HEAP
}
V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
*isolate = Heap::GetIsolateFromWritableObject(object);
return true;
-#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- *isolate = GetIsolateFromWritableObject(object);
- return true;
#else
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -85,7 +47,7 @@ V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
}
*isolate = Isolate::FromHeap(chunk->GetHeap());
return true;
-#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
+#endif // V8_ENABLE_THIRD_PARTY_HEAP
}
// Use this function instead of Internals::GetIsolateForSandbox for internal
@@ -115,8 +77,8 @@ V8_INLINE PtrComprCageBase GetPtrComprCageBaseSlow(HeapObject object) {
return PtrComprCageBase{isolate};
}
// If the Isolate can't be obtained then the heap object is a read-only
- // one and therefore not a Code object, so fallback to auto-computing cage
- // base value.
+ // one and therefore not a InstructionStream object, so fallback to
+ // auto-computing cage base value.
}
return GetPtrComprCageBase(object);
}
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 7e1a67da96..06c707da96 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -34,15 +34,12 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/flush-instruction-cache.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/date/date.h"
#include "src/debug/debug-frames.h"
-#if V8_ENABLE_WEBASSEMBLY
-#include "src/debug/debug-wasm-objects.h"
-#include "src/wasm/stacks.h"
-#endif // V8_ENABLE_WEBASSEMBLY
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/deoptimizer/materialized-object-store.h"
@@ -83,6 +80,7 @@
#include "src/objects/elements.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
@@ -99,6 +97,7 @@
#include "src/profiler/heap-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/roots/static-roots.h"
#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/snapshot/embedded/embedded-file-writer-interface.h"
#include "src/snapshot/read-only-deserializer.h"
@@ -125,7 +124,9 @@
#endif // V8_ENABLE_MAGLEV
#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/stacks.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
@@ -179,12 +180,13 @@ uint32_t DefaultEmbeddedBlobDataSize() {
namespace {
// These variables provide access to the current embedded blob without requiring
-// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
-// not have access to an isolate but still needs to access the embedded blob.
-// The variables are initialized by each isolate in Init(). Writes and reads are
-// relaxed since we can guarantee that the current thread has initialized these
-// variables before accessing them. Different threads may race, but this is fine
-// since they all attempt to set the same values of the blob pointer and size.
+// an isolate instance. This is needed e.g. by
+// InstructionStream::InstructionStart, which may not have access to an isolate
+// but still needs to access the embedded blob. The variables are initialized by
+// each isolate in Init(). Writes and reads are relaxed since we can guarantee
+// that the current thread has initialized these variables before accessing
+// them. Different threads may race, but this is fine since they all attempt to
+// set the same values of the blob pointer and size.
std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
std::atomic<uint32_t> current_embedded_blob_code_size_(0);
@@ -423,71 +425,58 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
static constexpr size_t kSeed = 0;
size_t hash = kSeed;
+ // Hash static entries of the roots table.
+ hash = base::hash_combine(hash, V8_STATIC_ROOTS_BOOL);
+#if V8_STATIC_ROOTS_BOOL
+ hash = base::hash_combine(hash,
+ static_cast<int>(RootIndex::kReadOnlyRootsCount));
+ RootIndex i = RootIndex::kFirstReadOnlyRoot;
+ for (auto ptr : StaticReadOnlyRootsPointerTable) {
+ hash = base::hash_combine(ptr, hash);
+ ++i;
+ }
+#endif // V8_STATIC_ROOTS_BOOL
+
// Hash data sections of builtin code objects.
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- CodeT codet = builtins()->code(builtin);
-
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK(Internals::HasHeapObjectTag(codet.ptr()));
- uint8_t* const code_ptr = reinterpret_cast<uint8_t*>(codet.address());
-
- // These static asserts ensure we don't miss relevant fields. We don't
- // hash code cage base and code entry point. Other data fields must
- // remain the same.
- static_assert(CodeDataContainer::kCodePointerFieldsStrongEndOffset ==
- CodeDataContainer::kCodeEntryPointOffset);
-
- static_assert(CodeDataContainer::kCodeEntryPointOffsetEnd + 1 ==
- CodeDataContainer::kFlagsOffset);
- static_assert(CodeDataContainer::kFlagsOffsetEnd + 1 ==
- CodeDataContainer::kBuiltinIdOffset);
- static_assert(CodeDataContainer::kBuiltinIdOffsetEnd + 1 ==
- CodeDataContainer::kKindSpecificFlagsOffset);
- static_assert(CodeDataContainer::kKindSpecificFlagsOffsetEnd + 1 ==
- CodeDataContainer::kUnalignedSize);
- constexpr int kStartOffset = CodeDataContainer::kFlagsOffset;
-
- // |is_off_heap_trampoline| is false during builtins compilation (since
- // the builtins are not trampolines yet) but it's true for off-heap
- // builtin trampolines. The rest of the data fields should be the same.
- // So we temporarily set |is_off_heap_trampoline| to true during hash
- // computation.
- bool is_off_heap_trampoline_sav = codet.is_off_heap_trampoline();
- codet.set_is_off_heap_trampoline_for_hash(true);
-
- for (int j = kStartOffset; j < CodeDataContainer::kUnalignedSize; j++) {
- hash = base::hash_combine(hash, size_t{code_ptr[j]});
- }
- codet.set_is_off_heap_trampoline_for_hash(is_off_heap_trampoline_sav);
-#endif // V8_EXTERNAL_CODE_SPACE
- } else {
- Code code = FromCodeT(codet);
-
- DCHECK(Internals::HasHeapObjectTag(code.ptr()));
- uint8_t* const code_ptr = reinterpret_cast<uint8_t*>(code.address());
-
- // These static asserts ensure we don't miss relevant fields. We don't
- // hash pointer compression base, instruction/metadata size value and
- // flags since they change when creating the off-heap trampolines. Other
- // data fields must remain the same.
-#ifdef V8_EXTERNAL_CODE_SPACE
- static_assert(Code::kMainCageBaseUpper32BitsOffset == Code::kDataStart);
- static_assert(Code::kInstructionSizeOffset ==
- Code::kMainCageBaseUpper32BitsOffsetEnd + 1);
-#else
- static_assert(Code::kInstructionSizeOffset == Code::kDataStart);
-#endif // V8_EXTERNAL_CODE_SPACE
- static_assert(Code::kMetadataSizeOffset ==
- Code::kInstructionSizeOffsetEnd + 1);
- static_assert(Code::kFlagsOffset == Code::kMetadataSizeOffsetEnd + 1);
- static_assert(Code::kBuiltinIndexOffset == Code::kFlagsOffsetEnd + 1);
- static constexpr int kStartOffset = Code::kBuiltinIndexOffset;
-
- for (int j = kStartOffset; j < Code::kUnalignedHeaderSize; j++) {
- hash = base::hash_combine(hash, size_t{code_ptr[j]});
- }
+ Code code = builtins()->code(builtin);
+
+ DCHECK(Internals::HasHeapObjectTag(code.ptr()));
+ uint8_t* const code_ptr = reinterpret_cast<uint8_t*>(code.address());
+
+ // These static asserts ensure we don't miss relevant fields. We don't hash
+ // code cage base and code entry point. Other data fields must remain the
+ // same.
+ static_assert(Code::kCodePointerFieldsStrongEndOffset ==
+ Code::kCodeEntryPointOffset);
+
+ static_assert(Code::kCodeEntryPointOffsetEnd + 1 == Code::kFlagsOffset);
+ static_assert(Code::kFlagsOffsetEnd + 1 == Code::kBuiltinIdOffset);
+ static_assert(Code::kBuiltinIdOffsetEnd + 1 ==
+ Code::kKindSpecificFlagsOffset);
+ static_assert(Code::kKindSpecificFlagsOffsetEnd + 1 ==
+ Code::kInstructionSizeOffset);
+ static_assert(Code::kInstructionSizeOffsetEnd + 1 ==
+ Code::kMetadataSizeOffset);
+ static_assert(Code::kMetadataSizeOffsetEnd + 1 ==
+ Code::kInlinedBytecodeSizeOffset);
+ static_assert(Code::kInlinedBytecodeSizeOffsetEnd + 1 ==
+ Code::kOsrOffsetOffset);
+ static_assert(Code::kOsrOffsetOffsetEnd + 1 ==
+ Code::kHandlerTableOffsetOffset);
+ static_assert(Code::kHandlerTableOffsetOffsetEnd + 1 ==
+ Code::kUnwindingInfoOffsetOffset);
+ static_assert(Code::kUnwindingInfoOffsetOffsetEnd + 1 ==
+ Code::kConstantPoolOffsetOffset);
+ static_assert(Code::kConstantPoolOffsetOffsetEnd + 1 ==
+ Code::kCodeCommentsOffsetOffset);
+ static_assert(Code::kCodeCommentsOffsetOffsetEnd + 1 ==
+ Code::kUnalignedSize);
+ constexpr int kStartOffset = Code::kFlagsOffset;
+
+ for (int j = kStartOffset; j < Code::kUnalignedSize; j++) {
+ hash = base::hash_combine(hash, size_t{code_ptr[j]});
}
}
@@ -498,10 +487,6 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
return hash;
}
-base::LazyMutex Isolate::process_wide_shared_isolate_mutex_ =
- LAZY_MUTEX_INITIALIZER;
-Isolate* Isolate::process_wide_shared_isolate_{nullptr};
-
Isolate* Isolate::process_wide_shared_space_isolate_{nullptr};
thread_local Isolate::PerIsolateThreadData* g_current_per_isolate_thread_data_
@@ -709,7 +694,7 @@ StackTraceFailureMessage::StackTraceFailureMessage(
FixedStringAllocator fixed(&js_stack_trace_[0], buffer_length - 1);
StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
isolate->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
- // Keeping a reference to the last code objects to increase likelyhood that
+ // Keeping a reference to the last code objects to increase likelihood that
// they get included in the minidump.
const size_t code_objects_length = arraysize(code_objects_);
size_t i = 0;
@@ -787,7 +772,7 @@ class CallSiteBuilder {
Handle<Object> receiver(combinator->native_context().promise_function(),
isolate_);
- Handle<CodeT> code(combinator->code(), isolate_);
+ Handle<Code> code(combinator->code(), isolate_);
// TODO(mmarchini) save Promises list from the Promise combinator
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
@@ -1309,7 +1294,7 @@ Handle<FixedArray> Isolate::GetSimpleStackTrace(
}
Address Isolate::GetAbstractPC(int* line, int* column) {
- JavaScriptFrameIterator it(this);
+ JavaScriptStackFrameIterator it(this);
if (it.done()) {
*line = -1;
@@ -1742,14 +1727,18 @@ Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
// embedder didn't specify a custom uncaught exception callback,
// or if the custom callback determined that V8 should abort, then
// abort.
- if (v8_flags.abort_on_uncaught_exception) {
+ // Cache the flag on a static so that we can modify the value looked up below
+ // in the presence of read-only flags.
+ static bool abort_on_uncaught_exception =
+ v8_flags.abort_on_uncaught_exception;
+ if (abort_on_uncaught_exception) {
CatchType prediction = PredictExceptionCatcher();
if ((prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) &&
(!abort_on_uncaught_exception_callback_ ||
abort_on_uncaught_exception_callback_(
reinterpret_cast<v8::Isolate*>(this)))) {
// Prevent endless recursion.
- v8_flags.abort_on_uncaught_exception = false;
+ abort_on_uncaught_exception = false;
// This flag is intended for use by JavaScript developers, so
// print a user-friendly stack trace (not an internal one).
PrintF(stderr, "%s\n\nFROM\n",
@@ -2013,7 +2002,7 @@ Object Isolate::UnwindAndFindHandler() {
CHECK(frame->is_java_script());
if (frame->is_turbofan()) {
- Code code = frame->LookupCodeT().code();
+ Code code = frame->LookupCode();
// The debugger triggers lazy deopt for the "to-be-restarted" frame
// immediately when the CDP event arrives while paused.
CHECK(code.marked_for_deoptimization());
@@ -2021,7 +2010,8 @@ Object Isolate::UnwindAndFindHandler() {
// Jump directly to the optimized frames return, to immediately fall
// into the deoptimizer.
- int offset = code.GetOffsetFromInstructionStart(this, frame->pc());
+ const int offset =
+ static_cast<int>(frame->pc() - code.InstructionStart());
// Compute the stack pointer from the frame pointer. This ensures that
// argument slots on the stack are dropped as returning would.
@@ -2029,14 +2019,14 @@ Object Isolate::UnwindAndFindHandler() {
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
code.stack_slots() * kSystemPointerSize;
- return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
- offset, code.constant_pool(), return_sp,
- frame->fp(), visited_frames);
+ return FoundHandler(Context(), code.InstructionStart(), offset,
+ code.constant_pool(), return_sp, frame->fp(),
+ visited_frames);
}
DCHECK(!frame->is_maglev());
debug()->clear_restart_frame();
- CodeT code = *BUILTIN_CODE(this, RestartFrameTrampoline);
+ Code code = *BUILTIN_CODE(this, RestartFrameTrampoline);
return FoundHandler(Context(), code.InstructionStart(), 0,
code.constant_pool(), kNullAddress, frame->fp(),
visited_frames);
@@ -2052,7 +2042,7 @@ Object Isolate::UnwindAndFindHandler() {
thread_local_top()->handler_ = handler->next_address();
// Gather information from the handler.
- CodeT code = frame->LookupCodeT().codet();
+ Code code = frame->LookupCode();
HandlerTable table(code);
return FoundHandler(Context(), code.InstructionStart(this, frame->pc()),
table.LookupReturn(0), code.constant_pool(),
@@ -2064,9 +2054,9 @@ Object Isolate::UnwindAndFindHandler() {
case StackFrame::C_WASM_ENTRY: {
StackHandler* handler = frame->top_handler();
thread_local_top()->handler_ = handler->next_address();
- Code code = frame->LookupCodeT().code();
+ Code code = frame->LookupCode();
HandlerTable table(code);
- Address instruction_start = code.InstructionStart(this, frame->pc());
+ Address instruction_start = code.InstructionStart();
int return_offset = static_cast<int>(frame->pc() - instruction_start);
int handler_offset = table.LookupReturn(return_offset);
DCHECK_NE(-1, handler_offset);
@@ -2124,7 +2114,7 @@ Object Isolate::UnwindAndFindHandler() {
int offset = opt_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
if (offset < 0) break;
// The code might be an optimized code or a turbofanned builtin.
- CodeT code = frame->LookupCodeT().ToCodeT();
+ Code code = frame->LookupCode();
// Compute the stack pointer from the frame pointer. This ensures
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
@@ -2138,7 +2128,7 @@ Object Isolate::UnwindAndFindHandler() {
// If the target code is lazy deoptimized, we jump to the original
// return address, but we make a note that we are throwing, so
// that the deoptimizer can do the right thing.
- offset = static_cast<int>(frame->pc() - code.entry());
+ offset = static_cast<int>(frame->pc() - code.InstructionStart());
set_deoptimizer_lazy_throw(true);
}
@@ -2158,7 +2148,7 @@ Object Isolate::UnwindAndFindHandler() {
// The code might be a dynamically generated stub or a turbofanned
// embedded builtin.
- CodeT code = stub_frame->LookupCodeT().ToCodeT();
+ Code code = stub_frame->LookupCode();
if (code.kind() != CodeKind::BUILTIN || !code.is_turbofanned() ||
!code.has_handler_table()) {
break;
@@ -2208,8 +2198,7 @@ Object Isolate::UnwindAndFindHandler() {
if (frame->is_baseline()) {
BaselineFrame* sp_frame = BaselineFrame::cast(js_frame);
- Code code = sp_frame->LookupCodeT().code();
- DCHECK(!code.is_off_heap_trampoline());
+ Code code = sp_frame->LookupCode();
intptr_t pc_offset = sp_frame->GetPCForBytecodeOffset(offset);
// Patch the context register directly on the frame, so that we don't
// need to have a context read + write in the baseline code.
@@ -2221,7 +2210,7 @@ Object Isolate::UnwindAndFindHandler() {
InterpretedFrame::cast(js_frame)->PatchBytecodeOffset(
static_cast<int>(offset));
- CodeT code = *BUILTIN_CODE(this, InterpreterEnterAtBytecode);
+ Code code = *BUILTIN_CODE(this, InterpreterEnterAtBytecode);
// We subtract a frame from visited_frames because otherwise the
// shadow stack will drop the underlying interpreter entry trampoline
// in which the handler runs.
@@ -2253,7 +2242,7 @@ Object Isolate::UnwindAndFindHandler() {
// Reconstruct the stack pointer from the frame pointer.
Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
- CodeT code = js_frame->LookupCodeT().codet();
+ Code code = js_frame->LookupCode();
return FoundHandler(Context(), code.InstructionStart(), 0,
code.constant_pool(), return_sp, frame->fp(),
visited_frames);
@@ -2270,8 +2259,7 @@ Object Isolate::UnwindAndFindHandler() {
USE(removed);
// If there were any materialized objects, the code should be
// marked for deopt.
- DCHECK_IMPLIES(
- removed, frame->LookupCodeT().ToCodeT().marked_for_deoptimization());
+ DCHECK_IMPLIES(removed, frame->LookupCode().marked_for_deoptimization());
}
}
@@ -2370,20 +2358,20 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
}
case StackFrame::STUB: {
- CodeLookupResult code = frame->LookupCodeT();
- if (code.kind() != CodeKind::BUILTIN || !code.has_handler_table() ||
- !code.is_turbofanned()) {
+ base::Optional<Code> code = frame->LookupCode();
+ if (code->kind() != CodeKind::BUILTIN || !code->has_handler_table() ||
+ !code->is_turbofanned()) {
break;
}
- CatchType prediction = ToCatchType(code.GetBuiltinCatchPrediction());
+ CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
if (prediction != NOT_CAUGHT) return prediction;
break;
}
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
- CodeLookupResult code = frame->LookupCodeT();
- CatchType prediction = ToCatchType(code.GetBuiltinCatchPrediction());
+ base::Optional<Code> code = frame->LookupCode();
+ CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
if (prediction != NOT_CAUGHT) return prediction;
break;
}
@@ -2430,10 +2418,12 @@ void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
DCHECK(has_scheduled_exception());
if (reinterpret_cast<void*>(scheduled_exception().ptr()) ==
handler->exception_) {
- DCHECK(!is_execution_terminating());
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ !is_execution_terminating());
clear_scheduled_exception();
} else {
- DCHECK(is_execution_terminating());
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ is_execution_terminating());
// Clear termination once we returned from all V8 frames.
if (thread_local_top()->CallDepthIsZero()) {
thread_local_top()->external_caught_exception_ = false;
@@ -2468,7 +2458,7 @@ void Isolate::PrintCurrentStackTrace(std::ostream& out) {
}
bool Isolate::ComputeLocation(MessageLocation* target) {
- StackTraceFrameIterator it(this);
+ DebuggableStackFrameIterator it(this);
if (it.done()) return false;
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
@@ -2724,7 +2714,7 @@ bool Isolate::OptionalRescheduleException(bool clear_exception) {
DCHECK_NE(thread_local_top()->try_catch_handler_address(), kNullAddress);
Address external_handler_address =
thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it(this);
+ JavaScriptStackFrameIterator it(this);
if (it.done() || (it.frame()->sp() > external_handler_address)) {
clear_exception = true;
}
@@ -2849,12 +2839,12 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
if (frame->is_java_script()) {
catch_prediction = PredictException(JavaScriptFrame::cast(frame));
} else if (frame->type() == StackFrame::STUB) {
- CodeLookupResult code = frame->LookupCodeT();
- if (code.kind() != CodeKind::BUILTIN || !code.has_handler_table() ||
- !code.is_turbofanned()) {
+ base::Optional<Code> code = frame->LookupCode();
+ if (code->kind() != CodeKind::BUILTIN || !code->has_handler_table() ||
+ !code->is_turbofanned()) {
continue;
}
- catch_prediction = code.GetBuiltinCatchPrediction();
+ catch_prediction = code->GetBuiltinCatchPrediction();
} else {
continue;
}
@@ -2896,9 +2886,10 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
}
Handle<PromiseOnStack> promise_on_stack =
Handle<PromiseOnStack>::cast(promise_stack);
- if (!PromiseOnStack::GetPromise(promise_on_stack).ToHandle(&retval)) {
- return retval;
- }
+ MaybeHandle<JSObject> maybe_promise =
+ PromiseOnStack::GetPromise(promise_on_stack);
+ if (maybe_promise.is_null()) return retval;
+ retval = maybe_promise.ToHandleChecked();
if (retval->IsJSPromise()) {
if (PromiseHasUserDefinedRejectHandler(
Handle<JSPromise>::cast(retval))) {
@@ -2931,6 +2922,9 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
void Isolate::InstallConditionalFeatures(Handle<Context> context) {
Handle<JSGlobalObject> global = handle(context->global_object(), this);
+ // If some fuzzer decided to make the global object non-extensible, then
+ // we can't install any features (and would CHECK-fail if we tried).
+ if (!global->map().is_extensible()) return;
Handle<String> sab_name = factory()->SharedArrayBuffer_string();
if (IsSharedArrayBufferConstructorEnabled(context)) {
if (!JSObject::HasRealNamedProperty(this, global, sab_name)
@@ -2953,8 +2947,49 @@ bool Isolate::IsSharedArrayBufferConstructorEnabled(Handle<Context> context) {
return false;
}
+bool Isolate::IsWasmGCEnabled(Handle<Context> context) {
+#ifdef V8_ENABLE_WEBASSEMBLY
+ v8::WasmGCEnabledCallback callback = wasm_gc_enabled_callback();
+ if (callback) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ if (callback(api_context)) return true;
+ }
+ return v8_flags.experimental_wasm_gc;
+#else
+ return false;
+#endif
+}
+
+bool Isolate::IsWasmStringRefEnabled(Handle<Context> context) {
+ // If Wasm GC is explicitly enabled via a callback, also enable stringref.
+#ifdef V8_ENABLE_WEBASSEMBLY
+ v8::WasmGCEnabledCallback callback = wasm_gc_enabled_callback();
+ if (callback) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ if (callback(api_context)) return true;
+ }
+ return v8_flags.experimental_wasm_stringref;
+#else
+ return false;
+#endif
+}
+
+bool Isolate::IsWasmInliningEnabled(Handle<Context> context) {
+ // If Wasm GC is explicitly enabled via a callback, also enable inlining.
+#ifdef V8_ENABLE_WEBASSEMBLY
+ v8::WasmGCEnabledCallback callback = wasm_gc_enabled_callback();
+ if (callback) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ if (callback(api_context)) return true;
+ }
+ return v8_flags.experimental_wasm_inlining;
+#else
+ return false;
+#endif
+}
+
Handle<Context> Isolate::GetIncumbentContext() {
- JavaScriptFrameIterator it(this);
+ JavaScriptStackFrameIterator it(this);
// 1st candidate: most-recently-entered author function's context
// if it's newer than the last Context::BackupIncumbentScope entry.
@@ -3054,6 +3089,31 @@ void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object));
heap()->set_shared_wasm_memories(*shared_wasm_memories);
}
+
+void Isolate::RecordStackSwitchForScanning() {
+ Object current = root(RootIndex::kActiveContinuation);
+ DCHECK(!current.IsUndefined());
+ stack().ClearStackSegments();
+ wasm::StackMemory* wasm_stack =
+ Managed<wasm::StackMemory>::cast(
+ WasmContinuationObject::cast(current).stack())
+ .get()
+ .get();
+ current = WasmContinuationObject::cast(current).parent();
+ heap()->SetStackStart(reinterpret_cast<void*>(wasm_stack->base()));
+ // We don't need to add all inactive stacks. Only the ones in the active chain
+ // may contain cpp heap pointers.
+ while (!current.IsUndefined()) {
+ auto cont = WasmContinuationObject::cast(current);
+ auto* wasm_stack =
+ Managed<wasm::StackMemory>::cast(cont.stack()).get().get();
+ stack().AddStackSegment(
+ reinterpret_cast<const void*>(wasm_stack->base()),
+ reinterpret_cast<const void*>(wasm_stack->jmpbuf()->sp));
+ current = cont.parent();
+ }
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
@@ -3233,75 +3293,10 @@ bool HasFlagThatRequiresSharedHeap() {
} // namespace
// static
-Isolate* Isolate::GetProcessWideSharedIsolate(bool* created_shared_isolate) {
- if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) {
- DCHECK(HasFlagThatRequiresSharedHeap());
- FATAL(
- "Build configuration does not support creating shared heap. The RO "
- "heap must be shared, pointer compression must either be off or "
- "use a shared cage, and write barriers must not be disabled. V8 is "
- "compiled with RO heap %s, pointers %s and write barriers %s.",
- V8_SHARED_RO_HEAP_BOOL ? "SHARED" : "NOT SHARED",
- !COMPRESS_POINTERS_BOOL ? "NOT COMPRESSED"
- : (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
- ? "COMPRESSED IN SHARED CAGE"
- : "COMPRESSED IN PER-ISOLATE CAGE"),
- V8_DISABLE_WRITE_BARRIERS_BOOL ? "DISABLED" : "ENABLED");
- }
-
- base::MutexGuard guard(process_wide_shared_isolate_mutex_.Pointer());
- if (process_wide_shared_isolate_ == nullptr) {
- process_wide_shared_isolate_ = Allocate(true);
- // TODO(v8:12547): Make shared heap constraints programmatically
- // configurable and tailored for the shared heap.
- v8::Isolate::CreateParams params;
- size_t initial_shared_heap_size =
- static_cast<size_t>(v8_flags.initial_shared_heap_size) * MB;
- size_t max_shared_heap_size =
- static_cast<size_t>(v8_flags.max_shared_heap_size) * MB;
- if (initial_shared_heap_size != 0 && max_shared_heap_size != 0) {
- params.constraints.ConfigureDefaultsFromHeapSize(initial_shared_heap_size,
- max_shared_heap_size);
- } else {
- params.constraints.ConfigureDefaults(
- base::SysInfo::AmountOfPhysicalMemory(),
- base::SysInfo::AmountOfVirtualMemory());
- }
- params.array_buffer_allocator =
- v8::ArrayBuffer::Allocator::NewDefaultAllocator();
- v8::Isolate::Initialize(
- reinterpret_cast<v8::Isolate*>(process_wide_shared_isolate_), params);
- *created_shared_isolate = true;
- } else {
- *created_shared_isolate = false;
- }
- return process_wide_shared_isolate_;
-}
-
-// static
-void Isolate::DeleteProcessWideSharedIsolate() {
- base::MutexGuard guard(process_wide_shared_isolate_mutex_.Pointer());
- DCHECK_NOT_NULL(process_wide_shared_isolate_);
- delete process_wide_shared_isolate_->array_buffer_allocator();
- Delete(process_wide_shared_isolate_);
- process_wide_shared_isolate_ = nullptr;
-}
+Isolate* Isolate::New() { return Allocate(); }
// static
-Isolate* Isolate::New() {
- Isolate* isolate = Allocate(false);
- if (HasFlagThatRequiresSharedHeap() && !v8_flags.shared_space) {
- // The Isolate that creates the shared Isolate, which is usually the main
- // thread Isolate, owns the lifetime of shared heap.
- bool created;
- isolate->set_shared_isolate(GetProcessWideSharedIsolate(&created));
- isolate->owns_shared_isolate_ = created;
- }
- return isolate;
-}
-
-// static
-Isolate* Isolate::Allocate(bool is_shared) {
+Isolate* Isolate::Allocate() {
// v8::V8::Initialize() must be called before creating any isolates.
DCHECK_NOT_NULL(V8::GetCurrentPlatform());
// IsolateAllocator allocates the memory for the Isolate object according to
@@ -3310,12 +3305,7 @@ Isolate* Isolate::Allocate(bool is_shared) {
std::make_unique<IsolateAllocator>();
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
- Isolate* isolate =
- new (isolate_ptr) Isolate(std::move(isolate_allocator), is_shared);
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- DCHECK(IsAligned(isolate->isolate_root(), kPtrComprCageBaseAlignment));
- DCHECK_EQ(isolate->isolate_root(), isolate->cage_base());
-#endif
+ Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
#ifdef DEBUG
non_disposed_isolates_++;
@@ -3337,12 +3327,7 @@ void Isolate::Delete(Isolate* isolate) {
Isolate* saved_isolate = isolate->TryGetCurrent();
SetIsolateThreadLocals(isolate, nullptr);
isolate->set_thread_id(ThreadId::Current());
- isolate->thread_local_top()->stack_ =
- saved_isolate ? saved_isolate->thread_local_top()->stack_
- : ::heap::base::Stack(base::Stack::GetStackStart());
-
- bool owns_shared_isolate = isolate->owns_shared_isolate_;
- Isolate* maybe_shared_isolate = isolate->shared_isolate_;
+ isolate->heap()->SetStackStart(base::Stack::GetStackStart());
isolate->Deinit();
@@ -3360,14 +3345,6 @@ void Isolate::Delete(Isolate* isolate) {
// Restore the previous current isolate.
SetIsolateThreadLocals(saved_isolate, saved_data);
-
- // The first isolate, which is usually the main thread isolate, owns the
- // lifetime of the shared isolate.
- if (owns_shared_isolate) {
- DCHECK_NOT_NULL(maybe_shared_isolate);
- USE(maybe_shared_isolate);
- DeleteProcessWideSharedIsolate();
- }
}
void Isolate::SetUpFromReadOnlyArtifacts(
@@ -3388,10 +3365,8 @@ v8::PageAllocator* Isolate::page_allocator() const {
return isolate_allocator_->page_allocator();
}
-Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
- bool is_shared)
+Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_data_(this, isolate_allocator->GetPtrComprCageBase()),
- is_shared_(is_shared),
isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(new TracingAccountingAllocator(this)),
@@ -3420,8 +3395,6 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
handle_scope_data_.Initialize();
- CHECK_IMPLIES(is_shared_, V8_CAN_CREATE_SHARED_HEAP_BOOL);
-
#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
name##_ = (initial_value);
ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
@@ -3461,8 +3434,12 @@ void Isolate::CheckIsolateLayout() {
CHECK_EQ(
static_cast<int>(OFFSET_OF(Isolate, isolate_data_.builtin_tier0_table_)),
Internals::kBuiltinTier0TableOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
- Internals::kIsolateEmbedderDataOffset);
+ CHECK_EQ(
+ static_cast<int>(OFFSET_OF(Isolate, isolate_data_.new_allocation_info_)),
+ Internals::kNewAllocationInfoOffset);
+ CHECK_EQ(
+ static_cast<int>(OFFSET_OF(Isolate, isolate_data_.old_allocation_info_)),
+ Internals::kOldAllocationInfoOffset);
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_fp_)),
Internals::kIsolateFastCCallCallerFpOffset);
@@ -3476,6 +3453,8 @@ void Isolate::CheckIsolateLayout() {
Internals::kIsolateLongTaskStatsCounterOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
Internals::kIsolateStackGuardOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
+ Internals::kIsolateEmbedderDataOffset);
#ifdef V8_COMPRESS_POINTERS
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.external_pointer_table_)),
@@ -3509,7 +3488,20 @@ void Isolate::UpdateLogObjectRelocation() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
- DisallowHeapAllocation no_allocation;
+
+ // All client isolates should already be detached when the shared heap isolate
+ // tears down.
+ if (is_shared_space_isolate()) {
+ global_safepoint()->AssertNoClientsOnTearDown();
+ }
+
+ if (has_shared_space() && !is_shared_space_isolate()) {
+ IgnoreLocalGCRequests ignore_gc_requests(heap());
+ ParkedScope parked_scope(main_thread_local_heap());
+ shared_space_isolate()->global_safepoint()->clients_mutex_.Lock();
+ }
+
+ DisallowGarbageCollection no_gc;
tracing_cpu_profiler_.reset();
if (v8_flags.stress_sampling_allocation_profiler > 0) {
@@ -3519,22 +3511,12 @@ void Isolate::Deinit() {
metrics_recorder_->NotifyIsolateDisposal();
recorder_context_id_map_.clear();
-#if defined(V8_OS_WIN64)
- if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- heap()->memory_allocator() && RequiresCodeRange() &&
- heap()->code_range()->AtomicDecrementUnwindInfoUseCount() == 1) {
- const base::AddressRegion& code_region = heap()->code_region();
- void* start = reinterpret_cast<void*>(code_region.begin());
- win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
- }
-#endif // V8_OS_WIN64
-
FutexEmulation::IsolateDeinit(this);
debug()->Unload();
#if V8_ENABLE_WEBASSEMBLY
- if (!is_shared()) wasm::GetWasmEngine()->DeleteCompileJobsOnIsolate(this);
+ wasm::GetWasmEngine()->DeleteCompileJobsOnIsolate(this);
BackingStore::RemoveSharedWasmMemoryObjects(this);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -3545,11 +3527,6 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
- // All client isolates should already be detached.
- if (is_shared()) {
- global_safepoint()->AssertNoClientsOnTearDown();
- }
-
if (v8_flags.print_deopt_stress) {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
}
@@ -3567,11 +3544,7 @@ void Isolate::Deinit() {
// Stop concurrent tasks before destroying resources since they might still
// use those.
- {
- IgnoreLocalGCRequests ignore_gc_requests(heap());
- ParkedScope parked_scope(main_thread_local_heap());
- cancelable_task_manager()->CancelAndWait();
- }
+ cancelable_task_manager()->CancelAndWait();
// Cancel all compiler tasks.
delete baseline_batch_compiler_;
@@ -3590,21 +3563,18 @@ void Isolate::Deinit() {
// At this point there are no more background threads left in this isolate.
heap_.safepoint()->AssertMainThreadIsOnlyThread();
- // Tear down data using the shared heap before detaching.
+ // Tear down data that requires the shared heap before detaching.
heap_.TearDownWithSharedHeap();
- {
- // This isolate might have to park for a shared GC initiated by another
- // client isolate before it can actually detach from the shared isolate.
- AllowGarbageCollection allow_shared_gc;
- DetachFromSharedIsolate();
- DetachFromSharedSpaceIsolate();
+ // Detach from the shared heap isolate and then unlock the mutex.
+ if (has_shared_space() && !is_shared_space_isolate()) {
+ GlobalSafepoint* global_safepoint =
+ this->shared_space_isolate()->global_safepoint();
+ global_safepoint->RemoveClient(this);
+ global_safepoint->clients_mutex_.Unlock();
}
- // All client isolates should already be detached.
- if (is_shared_space_isolate()) {
- global_safepoint()->AssertNoClientsOnTearDown();
- }
+ shared_space_isolate_.reset();
// Since there are no other threads left, we can lock this mutex without any
// ceremony. This signals to the tear down code that we are in a safepoint.
@@ -3651,7 +3621,7 @@ void Isolate::Deinit() {
#endif // defined(V8_OS_WIN)
#if V8_ENABLE_WEBASSEMBLY
- if (!is_shared()) wasm::GetWasmEngine()->RemoveIsolate(this);
+ wasm::GetWasmEngine()->RemoveIsolate(this);
#endif // V8_ENABLE_WEBASSEMBLY
TearDownEmbeddedBlob();
@@ -3699,6 +3669,13 @@ void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
g_current_isolate_ = isolate;
g_current_per_isolate_thread_data_ = data;
+
+ if (isolate && isolate->main_thread_local_isolate()) {
+ WriteBarrier::SetForThread(
+ isolate->main_thread_local_heap()->marking_barrier());
+ } else {
+ WriteBarrier::SetForThread(nullptr);
+ }
}
Isolate::~Isolate() {
@@ -3791,7 +3768,9 @@ void Isolate::InitializeThreadLocal() {
}
void Isolate::SetTerminationOnExternalTryCatch() {
- DCHECK(is_execution_termination_pending() || is_execution_terminating());
+ DCHECK_IMPLIES(
+ v8_flags.strict_termination_checks,
+ is_execution_termination_pending() || is_execution_terminating());
if (try_catch_handler() == nullptr) return;
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
@@ -3861,7 +3840,7 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
Address instruction_start = d.InstructionStartOfBuiltin(builtin);
- Handle<CodeT> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
+ Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
builtins->code_handle(builtin), instruction_start);
// From this point onwards, the old builtin code object is unreachable and
@@ -3940,6 +3919,36 @@ void Isolate::CreateAndSetEmbeddedBlob() {
CreateOffHeapTrampolines(this);
}
+void Isolate::InitializeIsShortBuiltinCallsEnabled() {
+ if (V8_SHORT_BUILTIN_CALLS_BOOL && v8_flags.short_builtin_calls) {
+#if defined(V8_OS_ANDROID)
+ // On Android, the check is not operative to detect memory, and re-embedded
+ // builtins don't have a memory cost.
+ is_short_builtin_calls_enabled_ = true;
+#else
+ // Check if the system has more than 4GB of physical memory by comparing the
+ // old space size with respective threshold value.
+ is_short_builtin_calls_enabled_ = (heap_.MaxOldGenerationSize() >=
+ kShortBuiltinCallsOldSpaceSizeThreshold);
+#endif // defined(V8_OS_ANDROID)
+ // Additionally, enable if there is already a process-wide CodeRange that
+ // has re-embedded builtins.
+ if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange();
+ if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
+ is_short_builtin_calls_enabled_ = true;
+ }
+ }
+ if (V8_ENABLE_NEAR_CODE_RANGE_BOOL) {
+ // The short builtin calls could still be enabled if allocated code range
+ // is close enough to embedded builtins so that the latter could be
+ // reached using pc-relative (short) calls/jumps.
+ is_short_builtin_calls_enabled_ |=
+ GetShortBuiltinsCallRegion().contains(heap_.code_region());
+ }
+ }
+}
+
void Isolate::MaybeRemapEmbeddedBuiltinsIntoCodeRange() {
if (!is_short_builtin_calls_enabled() || !RequiresCodeRange()) {
return;
@@ -4032,7 +4041,7 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
ToHexString(code_range_base_address));
}
- if (!V8_EXTERNAL_CODE_SPACE_BOOL || heap()->code_space()->first_page()) {
+ if (heap()->code_space()->first_page()) {
const uintptr_t code_space_firstpage_address =
heap()->code_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
@@ -4040,13 +4049,13 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
}
const v8::StartupData* data = Snapshot::DefaultSnapshotBlob();
// TODO(cbruni): Implement strategy to infrequently collect this.
- const uint32_t v8_snapshot_checkum_calculated = 0;
+ const uint32_t v8_snapshot_checksum_calculated = 0;
add_crash_key_callback_(v8::CrashKeyId::kSnapshotChecksumCalculated,
- ToHexString(v8_snapshot_checkum_calculated));
- const uint32_t v8_snapshot_checkum_expected =
+ ToHexString(v8_snapshot_checksum_calculated));
+ const uint32_t v8_snapshot_checksum_expected =
Snapshot::GetExpectedChecksum(data);
add_crash_key_callback_(v8::CrashKeyId::kSnapshotChecksumExpected,
- ToHexString(v8_snapshot_checkum_expected));
+ ToHexString(v8_snapshot_checksum_expected));
}
void Isolate::InitializeCodeRanges() {
@@ -4093,14 +4102,116 @@ VirtualMemoryCage* Isolate::GetPtrComprCodeCageForTesting() {
return V8_EXTERNAL_CODE_SPACE_BOOL ? heap_.code_range() : GetPtrComprCage();
}
+void Isolate::VerifyStaticRoots() {
+#if V8_STATIC_ROOTS_BOOL
+ static_assert(ReadOnlyHeap::IsReadOnlySpaceShared(),
+ "Static read only roots are only supported when there is one "
+ "shared read only space per cage");
+#define STATIC_ROOTS_FAILED_MSG \
+ "Read-only heap layout changed. Run `tools/dev/gen-static-roots.py` to " \
+ "update static-roots.h."
+ static_assert(static_cast<int>(RootIndex::kReadOnlyRootsCount) ==
+ StaticReadOnlyRootsPointerTable.size(),
+ STATIC_ROOTS_FAILED_MSG);
+ auto& roots = roots_table();
+ RootIndex idx = RootIndex::kFirstReadOnlyRoot;
+ ReadOnlyPage* first_page = read_only_heap()->read_only_space()->pages()[0];
+ for (Tagged_t cmp_ptr : StaticReadOnlyRootsPointerTable) {
+ Address the_root = roots[idx];
+ Address ptr =
+ V8HeapCompressionScheme::DecompressTagged(cage_base(), cmp_ptr);
+ CHECK_WITH_MSG(the_root == ptr, STATIC_ROOTS_FAILED_MSG);
+ // All roots must fit on first page, since only this page is guaranteed to
+ // have a stable offset from the cage base. If this ever changes we need
+ // to load more pages with predictable offset at
+ // ReadOnlySpace::InitFromMemoryDump.
+ CHECK(first_page->Contains(the_root));
+ ++idx;
+ }
+
+ idx = RootIndex::kFirstReadOnlyRoot;
+#define CHECK_NAME(_1, _2, CamelName) \
+ CHECK_WITH_MSG(StaticReadOnlyRoot::k##CamelName == \
+ V8HeapCompressionScheme::CompressObject(roots[idx]), \
+ STATIC_ROOTS_FAILED_MSG); \
+ ++idx;
+ STRONG_READ_ONLY_ROOT_LIST(CHECK_NAME)
+#undef CHECK_NAME
+
+ // Check if instance types to map range mappings are still valid.
+ //
+ // Is##type(map) may be computed by checking if the map pointer lies in a
+ // statically known range of addresses, whereas Is##type(instance_type) is the
+ // definitive source of truth. If they disagree it means that a particular
+ // entry in InstanceTypeChecker::kUniqueMapRangeOfInstanceTypeRangeList is out
+ // of date. This can also happen if an instance type is starting to be used by
+ // more maps.
+ //
+ // If this check fails either re-arrange allocations in the read-only heap
+ // such that the static map range is restored (consult static-roots.h for a
+ // sorted list of addresses) or remove the offending entry from the list.
+ for (auto idx = RootIndex::kFirstRoot; idx <= RootIndex::kLastRoot; ++idx) {
+ Object obj = roots_table().slot(idx).load(this);
+ if (obj.ptr() == kNullAddress || !obj.IsMap()) continue;
+ Map map = Map::cast(obj);
+
+#define INSTANCE_TYPE_CHECKER_SINGLE(type, _) \
+ CHECK_EQ(InstanceTypeChecker::Is##type(map), \
+ InstanceTypeChecker::Is##type(map.instance_type()));
+ INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER_SINGLE)
+#undef INSTANCE_TYPE_CHECKER_SINGLE
+
+#define INSTANCE_TYPE_CHECKER_RANGE(type, _1, _2) \
+ CHECK_EQ(InstanceTypeChecker::Is##type(map), \
+ InstanceTypeChecker::Is##type(map.instance_type()));
+ INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE)
+#undef INSTANCE_TYPE_CHECKER_RANGE
+
+ // This limit is used in various places as a fast IsJSReceiver check.
+ CHECK_IMPLIES(
+ InstanceTypeChecker::IsPrimitiveHeapObject(map.instance_type()),
+ V8HeapCompressionScheme::CompressObject(map.ptr()) <
+ InstanceTypeChecker::kNonJsReceiverMapLimit);
+ CHECK_IMPLIES(InstanceTypeChecker::IsJSReceiver(map.instance_type()),
+ V8HeapCompressionScheme::CompressObject(map.ptr()) >
+ InstanceTypeChecker::kNonJsReceiverMapLimit);
+ CHECK(InstanceTypeChecker::kNonJsReceiverMapLimit <
+ read_only_heap()->read_only_space()->Size());
+
+ if (InstanceTypeChecker::IsString(map.instance_type())) {
+ CHECK_EQ(InstanceTypeChecker::IsString(map),
+ InstanceTypeChecker::IsString(map.instance_type()));
+ CHECK_EQ(InstanceTypeChecker::IsExternalString(map),
+ InstanceTypeChecker::IsExternalString(map.instance_type()));
+ CHECK_EQ(InstanceTypeChecker::IsInternalizedString(map),
+ InstanceTypeChecker::IsInternalizedString(map.instance_type()));
+ CHECK_EQ(InstanceTypeChecker::IsThinString(map),
+ InstanceTypeChecker::IsThinString(map.instance_type()));
+ }
+ }
+
+ // Sanity check the API
+ CHECK_EQ(
+ v8::internal::Internals::GetRoot(reinterpret_cast<v8::Isolate*>(this),
+ static_cast<int>(RootIndex::kNullValue)),
+ ReadOnlyRoots(this).null_value().ptr());
+#undef STATIC_ROOTS_FAILED_MSG
+#endif // V8_STATIC_ROOTS_BOOL
+}
+
bool Isolate::Init(SnapshotData* startup_snapshot_data,
SnapshotData* read_only_snapshot_data,
SnapshotData* shared_heap_snapshot_data, bool can_rehash) {
TRACE_ISOLATE(init);
- const bool create_heap_objects = (read_only_snapshot_data == nullptr);
- // We either have all or none.
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ CHECK_EQ(V8HeapCompressionScheme::base(), cage_base());
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+ const bool create_heap_objects = (shared_heap_snapshot_data == nullptr);
+ // We either have both or none.
DCHECK_EQ(create_heap_objects, startup_snapshot_data == nullptr);
- DCHECK_EQ(create_heap_objects, shared_heap_snapshot_data == nullptr);
+ DCHECK_EQ(create_heap_objects, read_only_snapshot_data == nullptr);
// Code space setup requires the permissions to be set to default state.
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
@@ -4109,18 +4220,18 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
- Isolate* attach_to_shared_space_isolate = nullptr;
+ Isolate* use_shared_space_isolate = nullptr;
- if (HasFlagThatRequiresSharedHeap() && v8_flags.shared_space) {
+ if (HasFlagThatRequiresSharedHeap()) {
if (process_wide_shared_space_isolate_) {
owns_shareable_data_ = false;
+ use_shared_space_isolate = process_wide_shared_space_isolate_;
} else {
process_wide_shared_space_isolate_ = this;
+ use_shared_space_isolate = this;
is_shared_space_isolate_ = true;
DCHECK(owns_shareable_data_);
}
-
- attach_to_shared_space_isolate = process_wide_shared_space_isolate_;
}
CHECK_IMPLIES(is_shared_space_isolate_, V8_CAN_CREATE_SHARED_HEAP_BOOL);
@@ -4158,7 +4269,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
interpreter_ = new interpreter::Interpreter(this);
bigint_processor_ = bigint::Processor::New(new BigIntPlatform(this));
- if (is_shared_ || is_shared_space_isolate_) {
+ if (is_shared_space_isolate_) {
global_safepoint_ = std::make_unique<GlobalSafepoint>(this);
}
@@ -4201,81 +4312,82 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// Lock clients_mutex_ in order to prevent shared GCs from other clients
// during deserialization.
- base::Optional<base::MutexGuard> clients_guard;
+ base::Optional<base::RecursiveMutexGuard> clients_guard;
- if (Isolate* isolate =
- shared_isolate_ ? shared_isolate_ : attach_to_shared_space_isolate) {
- clients_guard.emplace(&isolate->global_safepoint()->clients_mutex_);
+ if (use_shared_space_isolate && !is_shared_space_isolate()) {
+ clients_guard.emplace(
+ &use_shared_space_isolate->global_safepoint()->clients_mutex_);
+ use_shared_space_isolate->global_safepoint()->AppendClient(this);
}
- // The main thread LocalHeap needs to be set up when attaching to the shared
- // isolate. Otherwise a global safepoint would find an isolate without
- // LocalHeaps and not wait until this thread is ready for a GC.
- AttachToSharedIsolate();
- AttachToSharedSpaceIsolate(attach_to_shared_space_isolate);
+ shared_space_isolate_ = use_shared_space_isolate;
+
+ isolate_data_.is_shared_space_isolate_flag_ = is_shared_space_isolate();
+ isolate_data_.uses_shared_heap_flag_ = has_shared_space();
- // Ensure that we use at most one of shared_isolate() and
- // shared_space_isolate().
- DCHECK_IMPLIES(shared_isolate(), !shared_space_isolate());
- DCHECK_IMPLIES(shared_space_isolate(), !shared_isolate());
+ if (use_shared_space_isolate && !is_shared_space_isolate() &&
+ use_shared_space_isolate->heap()
+ ->incremental_marking()
+ ->IsMajorMarking()) {
+ heap_.SetIsMarkingFlag(true);
+ }
- // SetUp the object heap.
+ // Set up the object heap.
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp(main_thread_local_heap());
+ InitializeIsShortBuiltinCallsEnabled();
+ if (!create_heap_objects) {
+ // Must be done before deserializing RO space, since RO space may contain
+ // builtin Code objects which point into the (potentially remapped)
+ // embedded blob.
+ MaybeRemapEmbeddedBuiltinsIntoCodeRange();
+ }
ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
heap_.SetUpSpaces(isolate_data_.new_allocation_info_,
isolate_data_.old_allocation_info_);
+ DCHECK_EQ(this, Isolate::Current());
+ PerIsolateThreadData* const current_data = CurrentPerIsolateThreadData();
+ DCHECK_EQ(current_data->isolate(), this);
+ SetIsolateThreadLocals(this, current_data);
+
if (OwnsStringTables()) {
string_table_ = std::make_shared<StringTable>(this);
string_forwarding_table_ = std::make_shared<StringForwardingTable>(this);
} else {
// Only refer to shared string table after attaching to the shared isolate.
- DCHECK(has_shared_heap());
- DCHECK(!is_shared());
+ DCHECK(has_shared_space());
DCHECK(!is_shared_space_isolate());
- string_table_ = shared_heap_isolate()->string_table_;
- string_forwarding_table_ = shared_heap_isolate()->string_forwarding_table_;
- }
-
- if (V8_SHORT_BUILTIN_CALLS_BOOL && v8_flags.short_builtin_calls) {
-#if defined(V8_OS_ANDROID)
- // On Android, the check is not operative to detect memory, and re-embedded
- // builtins don't have a memory cost.
- is_short_builtin_calls_enabled_ = true;
-#else
- // Check if the system has more than 4GB of physical memory by comparing the
- // old space size with respective threshold value.
- is_short_builtin_calls_enabled_ = (heap_.MaxOldGenerationSize() >=
- kShortBuiltinCallsOldSpaceSizeThreshold);
-#endif // defined(V8_OS_ANDROID)
- // Additionally, enable if there is already a process-wide CodeRange that
- // has re-embedded builtins.
- if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
- std::shared_ptr<CodeRange> code_range =
- CodeRange::GetProcessWideCodeRange();
- if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
- is_short_builtin_calls_enabled_ = true;
- }
- }
- if (V8_ENABLE_NEAR_CODE_RANGE_BOOL) {
- // The short builtin calls could still be enabled if allocated code range
- // is close enough to embedded builtins so that the latter could be
- // reached using pc-relative (short) calls/jumps.
- is_short_builtin_calls_enabled_ |=
- GetShortBuiltinsCallRegion().contains(heap_.code_region());
- }
+ string_table_ = shared_space_isolate()->string_table_;
+ string_forwarding_table_ = shared_space_isolate()->string_forwarding_table_;
}
#ifdef V8_EXTERNAL_CODE_SPACE
- if (heap_.code_range()) {
- code_cage_base_ = ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(
- heap_.code_range()->base());
- } else {
- CHECK(jitless_);
- // In jitless mode the code space pages will be allocated in the main
- // pointer compression cage.
- code_cage_base_ =
- ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(cage_base());
+ {
+ VirtualMemoryCage* code_cage;
+ if (heap_.code_range()) {
+ code_cage = heap_.code_range();
+ } else {
+ CHECK(jitless_);
+ // In jitless mode the code space pages will be allocated in the main
+ // pointer compression cage.
+ code_cage = GetPtrComprCage();
+ }
+ code_cage_base_ = ExternalCodeCompressionScheme::PrepareCageBaseAddress(
+ code_cage->base());
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ CHECK_EQ(ExternalCodeCompressionScheme::base(), code_cage_base_);
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+ // Ensure that ExternalCodeCompressionScheme is applicable to all objects
+ // stored in the code cage.
+ using ComprScheme = ExternalCodeCompressionScheme;
+ Address base = code_cage->base();
+ Address last = base + code_cage->size() - 1;
+ PtrComprCageBase code_cage_base{code_cage_base_};
+ CHECK_EQ(base, ComprScheme::DecompressTagged(
+ code_cage_base, ComprScheme::CompressObject(base)));
+ CHECK_EQ(last, ComprScheme::DecompressTagged(
+ code_cage_base, ComprScheme::CompressObject(last)));
}
#endif // V8_EXTERNAL_CODE_SPACE
@@ -4287,14 +4399,14 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
isolate_data_.shared_external_pointer_table_ = new ExternalPointerTable();
shared_external_pointer_table().Init(this);
} else {
- DCHECK(has_shared_heap());
+ DCHECK(has_shared_space());
isolate_data_.shared_external_pointer_table_ =
- shared_heap_isolate()->isolate_data_.shared_external_pointer_table_;
+ shared_space_isolate()->isolate_data_.shared_external_pointer_table_;
}
#endif // V8_COMPRESS_POINTERS
#if V8_ENABLE_WEBASSEMBLY
- if (!is_shared()) wasm::GetWasmEngine()->AddIsolate(this);
+ wasm::GetWasmEngine()->AddIsolate(this);
#endif // V8_ENABLE_WEBASSEMBLY
#if defined(V8_OS_WIN) && defined(V8_ENABLE_ETW_STACK_WALKING)
@@ -4304,12 +4416,12 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
#endif // defined(V8_OS_WIN)
if (setup_delegate_ == nullptr) {
- setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
+ setup_delegate_ = new SetupIsolateDelegate;
}
if (!v8_flags.inline_new) heap_.DisableInlineAllocation();
- if (!setup_delegate_->SetupHeap(&heap_)) {
+ if (!setup_delegate_->SetupHeap(this, create_heap_objects)) {
V8::FatalProcessOutOfMemory(this, "heap object creation");
}
@@ -4330,7 +4442,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
if (create_heap_objects) {
builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
- setup_delegate_->SetupBuiltins(this);
+ setup_delegate_->SetupBuiltins(this, true);
builtins_constants_table_builder_->Finalize();
delete builtins_constants_table_builder_;
@@ -4338,8 +4450,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
CreateAndSetEmbeddedBlob();
} else {
- setup_delegate_->SetupBuiltins(this);
- MaybeRemapEmbeddedBuiltinsIntoCodeRange();
+ setup_delegate_->SetupBuiltins(this, false);
}
// Initialize custom memcopy and memmove functions (must happen after
@@ -4376,6 +4487,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
can_rehash);
startup_deserializer.DeserializeIntoIsolate();
}
+ if (DEBUG_BOOL) VerifyStaticRoots();
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->Initialize();
@@ -4443,16 +4555,6 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
sampling_flags);
}
-#if defined(V8_OS_WIN64)
- if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- heap()->code_range()->AtomicIncrementUnwindInfoUseCount() == 0) {
- const base::AddressRegion& code_region = heap()->code_region();
- void* start = reinterpret_cast<void*>(code_region.begin());
- size_t size_in_bytes = code_region.size();
- win64_unwindinfo::RegisterNonABICompliantCodeRange(start, size_in_bytes);
- }
-#endif // V8_OS_WIN64
-
if (create_heap_objects && v8_flags.profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
@@ -4475,15 +4577,24 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
.slot(RootIndex::kActiveContinuation)
.store(*continuation);
}
-#endif
+#if V8_STATIC_ROOTS_BOOL
+ if (!create_heap_objects) {
+ // Protect the payload of wasm null.
+ page_allocator()->DecommitPages(
+ reinterpret_cast<void*>(factory()->wasm_null()->payload()),
+ WasmNull::kSize - kTaggedSize);
+ }
+#endif // V8_STATIC_ROOTS_BOOL
+#endif // V8_ENABLE_WEBASSEMBLY
heap()->AddGCPrologueCallback(ResetBeforeGC, kGCTypeMarkSweepCompact,
nullptr);
// Isolate initialization allocates long living objects that should be
- // pretentured to old space.
- DCHECK_IMPLIES(heap()->new_space(), (heap()->new_space()->Size() == 0) &&
- (heap()->gc_count() == 0));
+ // pretenured to old space.
+ DCHECK_IMPLIES(heap()->new_space(), heap()->new_space()->Size() == 0);
+ DCHECK_IMPLIES(heap()->new_lo_space(), heap()->new_lo_space()->Size() == 0);
+ DCHECK_EQ(heap()->gc_count(), 0);
initialized_ = true;
@@ -4493,6 +4604,10 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
void Isolate::Enter() {
Isolate* current_isolate = nullptr;
PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
+
+ // Set the stack start for the main thread that enters the isolate.
+ heap()->SetStackStart(base::Stack::GetStackStart());
+
if (current_data != nullptr) {
current_isolate = current_data->isolate_;
DCHECK_NOT_NULL(current_isolate);
@@ -4775,6 +4890,24 @@ void Isolate::UpdateTypedArraySpeciesLookupChainProtectorOnSetPrototype(
}
}
+void Isolate::UpdateNumberStringPrototypeNoReplaceProtectorOnSetPrototype(
+ Handle<JSObject> object) {
+ if (!Protectors::IsNumberStringPrototypeNoReplaceIntact(this)) {
+ return;
+ }
+ // We need to protect the prototype chain of `Number.prototype` and
+ // `String.prototype`.
+ // Since `Object.prototype.__proto__` is not writable, we can assume it
+ // doesn't occur here. We detect `Number.prototype` and `String.prototype` by
+ // checking for a prototype that is a JSPrimitiveWrapper. This is a safe
+ // approximation. Using JSPrimitiveWrapper as prototype should be
+ // sufficiently rare.
+ DCHECK(!object->IsJSObjectPrototype());
+ if (object->map().is_prototype_map() && (object->IsJSPrimitiveWrapper())) {
+ Protectors::InvalidateNumberStringPrototypeNoReplace(this);
+ }
+}
+
static base::RandomNumberGenerator* ensure_rng_exists(
base::RandomNumberGenerator** rng, int seed) {
if (*rng == nullptr) {
@@ -4815,10 +4948,6 @@ int Isolate::GenerateIdentityHash(uint32_t mask) {
return hash != 0 ? hash : 1;
}
-CodeLookupResult Isolate::FindCodeObject(Address a) {
- return heap()->GcSafeFindCodeForInnerPointer(a);
-}
-
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
@@ -4895,7 +5024,8 @@ void Isolate::FireCallCompletedCallbackInternal(
bool perform_checkpoint =
microtask_queue &&
- microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kAuto;
+ microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kAuto &&
+ !is_execution_terminating();
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
if (perform_checkpoint) microtask_queue->PerformCheckpoint(isolate);
@@ -4999,7 +5129,7 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
ToApiHandle<v8::FixedArray>(import_assertions_array)),
MaybeHandle<JSPromise>());
} else {
- // TODO(cbruni, v8:12302): Avoid creating tempory ScriptOrModule objects.
+ // TODO(cbruni, v8:12302): Avoid creating temporary ScriptOrModule objects.
auto script_or_module = i::Handle<i::ScriptOrModule>::cast(
this->factory()->NewStruct(i::SCRIPT_OR_MODULE_TYPE));
script_or_module->set_resource_name(*resource_name);
@@ -5160,6 +5290,8 @@ MaybeHandle<NativeContext> Isolate::RunHostCreateShadowRealmContextCallback() {
Handle<Context> shadow_realm_context_handle =
v8::Utils::OpenHandle(*shadow_realm_context);
DCHECK(shadow_realm_context_handle->IsNativeContext());
+ shadow_realm_context_handle->set_scope_info(
+ ReadOnlyRoots(this).shadow_realm_scope_info());
return Handle<NativeContext>::cast(shadow_realm_context_handle);
}
@@ -5306,7 +5438,7 @@ void Isolate::OnPromiseThen(Handle<JSPromise> promise) {
if (!HasAsyncEventDelegate()) return;
Maybe<debug::DebugAsyncActionType> action_type =
Nothing<debug::DebugAsyncActionType>();
- for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
+ for (JavaScriptStackFrameIterator it(this); !it.done(); it.Advance()) {
std::vector<Handle<SharedFunctionInfo>> infos;
it.frame()->GetFunctions(&infos);
for (auto it = infos.rbegin(); it != infos.rend(); ++it) {
@@ -5682,18 +5814,19 @@ void Isolate::clear_cached_icu_objects() {
#endif // V8_INTL_SUPPORT
-bool StackLimitCheck::HandleInterrupt(Isolate* isolate) {
+bool StackLimitCheck::HandleStackOverflowAndTerminationRequest() {
DCHECK(InterruptRequested());
- if (HasOverflowed()) {
- isolate->StackOverflow();
+ if (V8_UNLIKELY(HasOverflowed())) {
+ isolate_->StackOverflow();
return true;
}
- if (isolate->stack_guard()->HasTerminationRequest()) {
- isolate->TerminateExecution();
+ if (V8_UNLIKELY(isolate_->stack_guard()->HasTerminationRequest())) {
+ isolate_->TerminateExecution();
return true;
}
return false;
}
+
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
@@ -5709,18 +5842,12 @@ SaveContext::SaveContext(Isolate* isolate) : isolate_(isolate) {
if (!isolate->context().is_null()) {
context_ = Handle<Context>(isolate->context(), isolate);
}
-
- c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
}
SaveContext::~SaveContext() {
isolate_->set_context(context_.is_null() ? Context() : *context_);
}
-bool SaveContext::IsBelowFrame(CommonFrame* frame) {
- return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
-}
-
SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate,
Context new_context)
: SaveContext(isolate) {
@@ -5928,53 +6055,6 @@ Address Isolate::store_to_stack_count_address(const char* function_name) {
return reinterpret_cast<Address>(&map[name].second);
}
-void Isolate::AttachToSharedIsolate() {
- DCHECK(!attached_to_shared_isolate_);
-
- if (shared_isolate_) {
- DCHECK(shared_isolate_->is_shared());
- DCHECK(!v8_flags.shared_space);
- shared_isolate_->global_safepoint()->AppendClient(this);
- }
-
-#if DEBUG
- attached_to_shared_isolate_ = true;
-#endif // DEBUG
-}
-
-void Isolate::DetachFromSharedIsolate() {
- DCHECK(attached_to_shared_isolate_);
-
- if (shared_isolate_) {
- DCHECK(!v8_flags.shared_space);
- shared_isolate_->global_safepoint()->RemoveClient(this);
- shared_isolate_ = nullptr;
- }
-
-#if DEBUG
- attached_to_shared_isolate_ = false;
-#endif // DEBUG
-}
-
-void Isolate::AttachToSharedSpaceIsolate(Isolate* shared_space_isolate) {
- DCHECK(!shared_space_isolate_.has_value());
- shared_space_isolate_ = shared_space_isolate;
- if (shared_space_isolate) {
- DCHECK(v8_flags.shared_space);
- shared_space_isolate->global_safepoint()->AppendClient(this);
- }
-}
-
-void Isolate::DetachFromSharedSpaceIsolate() {
- DCHECK(shared_space_isolate_.has_value());
- Isolate* shared_space_isolate = shared_space_isolate_.value();
- if (shared_space_isolate) {
- DCHECK(v8_flags.shared_space);
- shared_space_isolate->global_safepoint()->RemoveClient(this);
- }
- shared_space_isolate_.reset();
-}
-
#ifdef V8_COMPRESS_POINTERS
ExternalPointerHandle Isolate::GetOrCreateWaiterQueueNodeExternalPointer() {
ExternalPointerHandle handle;
@@ -6056,11 +6136,12 @@ class DefaultWasmAsyncResolvePromiseTask : public v8::Task {
v8::Local<v8::Promise::Resolver> resolver = resolver_.Get(isolate_);
v8::Local<v8::Value> result = result_.Get(isolate_);
- if (success_ == WasmAsyncSuccess::kSuccess) {
- CHECK(resolver->Resolve(context, result).FromJust());
- } else {
- CHECK(resolver->Reject(context, result).FromJust());
- }
+ Maybe<bool> ret = success_ == WasmAsyncSuccess::kSuccess
+ ? resolver->Resolve(context, result)
+ : resolver->Reject(context, result);
+ // It's guaranteed that no exceptions will be thrown by these
+ // operations, but execution might be terminating.
+ CHECK(ret.IsJust() ? ret.FromJust() : isolate_->IsExecutionTerminating());
}
private:
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 1f374165b6..929435239f 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -32,6 +32,7 @@
#include "src/execution/stack-guard.h"
#include "src/handles/handles.h"
#include "src/handles/traced-handles.h"
+#include "src/heap/base/stack.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
@@ -510,6 +511,7 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
V(WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback, nullptr) \
+ V(WasmGCEnabledCallback, wasm_gc_enabled_callback, nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -538,8 +540,6 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(int, embedder_wrapper_type_index, -1) \
V(int, embedder_wrapper_object_index, -1) \
V(compiler::NodeObserver*, node_observer, nullptr) \
- /* Used in combination with --script-run-delay-once */ \
- V(bool, did_run_script_delay, false) \
V(bool, javascript_execution_assert, true) \
V(bool, javascript_execution_throws, true) \
V(bool, javascript_execution_dump, true) \
@@ -553,7 +553,7 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
inline type name() const { return thread_local_top()->name##_; }
#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
- type* name##_address() { return &thread_local_top()->name##_; }
+ inline type* name##_address() { return &thread_local_top()->name##_; }
// HiddenFactory exists so Isolate can privately inherit from it without making
// Factory's members available to Isolate directly.
@@ -680,7 +680,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// at the same time, this should be prevented using external locking.
void Enter();
- // Exits the current thread. The previosuly entered Isolate is restored
+ // Exits the current thread. The previously entered Isolate is restored
// for the thread.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
@@ -768,6 +768,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsSharedArrayBufferConstructorEnabled(Handle<Context> context);
+ bool IsWasmGCEnabled(Handle<Context> context);
+ bool IsWasmStringRefEnabled(Handle<Context> context);
+ bool IsWasmInliningEnabled(Handle<Context> context);
+
THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
@@ -1155,8 +1159,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// compression cage, and the kPtrComprCageBaseRegister is set to this
// value. When pointer compression is off, this is always kNullAddress.
Address cage_base() const {
- DCHECK_IMPLIES(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL &&
- !COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL,
+ DCHECK_IMPLIES(!COMPRESS_POINTERS_BOOL,
isolate_data()->cage_base() == kNullAddress);
return isolate_data()->cage_base();
}
@@ -1343,6 +1346,19 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
#ifdef DEBUG
static size_t non_disposed_isolates() { return non_disposed_isolates_; }
+
+ // Turbofan's string builder optimization can introduce SlicedString that are
+ // less than SlicedString::kMinLength characters. Their live range and scope
+ // are pretty limitted, but they can be visible to the GC, which shouldn't
+ // treat them as invalid. When such short SlicedString are introduced,
+ // Turbofan will set has_turbofan_string_builders_ to true, which
+ // SlicedString::SlicedStringVerify will check when verifying SlicedString to
+ // decide if a too-short SlicedString is an issue or not.
+ // See the compiler's StringBuilderOptimizer class for more details.
+ bool has_turbofan_string_builders() { return has_turbofan_string_builders_; }
+ void set_has_turbofan_string_builders() {
+ has_turbofan_string_builders_ = true;
+ }
#endif
v8::internal::Factory* factory() {
@@ -1478,6 +1494,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
void UpdateTypedArraySpeciesLookupChainProtectorOnSetPrototype(
Handle<JSObject> object);
+ void UpdateNumberStringPrototypeNoReplaceProtectorOnSetPrototype(
+ Handle<JSObject> object);
void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
UpdateNoElementsProtectorOnSetElement(object);
}
@@ -1518,7 +1536,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
DCHECK_NOT_NULL(optimizing_compile_dispatcher_);
return optimizing_compile_dispatcher_;
}
- // Flushes all pending concurrent optimzation jobs from the optimizing
+ // Flushes all pending concurrent optimization jobs from the optimizing
// compile dispatcher's queue.
void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
@@ -1556,9 +1574,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// with the provided mask.
int GenerateIdentityHash(uint32_t mask);
- // Given an address occupied by a live code object, return that object.
- CodeLookupResult FindCodeObject(Address a);
-
int NextOptimizationId() {
int id = next_optimization_id_++;
if (!Smi::IsValid(next_optimization_id_)) {
@@ -1695,8 +1710,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// shared heap object cache holds objects in shared among Isolates. Otherwise
// this object cache is per-Isolate like the startup object cache.
std::vector<Object>* shared_heap_object_cache() {
- if (has_shared_heap()) {
- return &shared_heap_isolate()->shared_heap_object_cache_;
+ if (has_shared_space()) {
+ return &shared_space_isolate()->shared_heap_object_cache_;
}
return &shared_heap_object_cache_;
}
@@ -1710,8 +1725,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
// Hashes bits of the Isolate that are relevant for embedded builtins. In
- // particular, the embedded blob requires builtin Code object layout and the
- // builtins constants table to remain unchanged from build-time.
+ // particular, the embedded blob requires builtin InstructionStream object
+ // layout and the builtins constants table to remain unchanged from
+ // build-time.
size_t HashIsolateForEmbeddedBlob();
static const uint8_t* CurrentEmbeddedBlobCode();
@@ -1727,7 +1743,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const uint8_t* embedded_blob_data() const;
uint32_t embedded_blob_data_size() const;
- // Returns true if short bultin calls optimization is enabled for the Isolate.
+ // Returns true if short builtin calls optimization is enabled for the
+ // Isolate.
bool is_short_builtin_calls_enabled() const {
return V8_SHORT_BUILTIN_CALLS_BOOL && is_short_builtin_calls_enabled_;
}
@@ -1966,43 +1983,19 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
using IsDebugActive = HasAsyncEventDelegate::Next<bool, 1>;
};
- bool is_shared() const { return is_shared_; }
- Isolate* shared_isolate() const {
- DCHECK(attached_to_shared_isolate_);
- return shared_isolate_;
- }
-
+ // Returns true when this isolate contains the shared spaces.
bool is_shared_space_isolate() const { return is_shared_space_isolate_; }
- Isolate* shared_space_isolate() const {
- return shared_space_isolate_.value();
- }
-
- void set_shared_isolate(Isolate* shared_isolate) {
- DCHECK(shared_isolate->is_shared());
- DCHECK_NULL(shared_isolate_);
- DCHECK(!attached_to_shared_isolate_);
- DCHECK(!v8_flags.shared_space);
- shared_isolate_ = shared_isolate;
- owns_shareable_data_ = false;
- }
-
- // Returns true when this isolate supports allocation in shared spaces.
- bool has_shared_heap() const {
- return v8_flags.shared_space ? shared_space_isolate() : shared_isolate();
- }
// Returns the isolate that owns the shared spaces.
- Isolate* shared_heap_isolate() const {
- DCHECK(has_shared_heap());
- Isolate* isolate =
- v8_flags.shared_space ? shared_space_isolate() : shared_isolate();
- DCHECK_NOT_NULL(isolate);
+ Isolate* shared_space_isolate() const {
+ DCHECK(has_shared_space());
+ Isolate* isolate = shared_space_isolate_.value();
+ DCHECK(has_shared_space());
return isolate;
}
- bool is_shared_heap_isolate() const {
- return is_shared() || is_shared_space_isolate();
- }
+ // Returns true when this isolate supports allocation in shared spaces.
+ bool has_shared_space() const { return shared_space_isolate_.value(); }
GlobalSafepoint* global_safepoint() const { return global_safepoint_.get(); }
@@ -2013,16 +2006,20 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// TODO(pthier): Unify with owns_shareable_data() once the flag
// --shared-string-table is removed.
bool OwnsStringTables() {
- return !v8_flags.shared_string_table || is_shared() ||
- is_shared_space_isolate();
+ return !v8_flags.shared_string_table || is_shared_space_isolate();
}
#if USE_SIMULATOR
SimulatorData* simulator_data() { return simulator_data_; }
#endif
+ ::heap::base::Stack& stack() { return stack_; }
+
#ifdef V8_ENABLE_WEBASSEMBLY
wasm::StackMemory*& wasm_stacks() { return wasm_stacks_; }
+ // Update the thread local's Stack object so that it is aware of the new stack
+ // start and the inactive stacks.
+ void RecordStackSwitchForScanning();
#endif
// Access to the global "locals block list cache". Caches outer-stack
@@ -2035,12 +2032,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Returns either `TheHole` or `StringSet`.
Object LocalsBlockListCacheGet(Handle<ScopeInfo> scope_info);
+ void VerifyStaticRoots();
+
private:
- explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
- bool is_shared);
+ explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
- static Isolate* Allocate(bool is_shared);
+ static Isolate* Allocate();
bool Init(SnapshotData* startup_snapshot_data,
SnapshotData* read_only_snapshot_data,
@@ -2097,18 +2095,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
EntryStackItem* previous_item;
};
- // When a feature flag that requires the shared heap is passed, a shared
- // isolate is created to hold the shared allocations. The shared isolate is
- // created by the first isolate to be created in the process, which is
- // considered the main isolate and owns the lifetime of the shared
- // isolate. The main isolate deletes the shared isolate when it itself is
- // deleted.
- static base::LazyMutex process_wide_shared_isolate_mutex_;
- static Isolate* process_wide_shared_isolate_;
-
- static Isolate* GetProcessWideSharedIsolate(bool* created_shared_isolate);
- static void DeleteProcessWideSharedIsolate();
-
static Isolate* process_wide_shared_space_isolate_;
void Deinit();
@@ -2153,24 +2139,11 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Returns the Exception sentinel.
Object ThrowInternal(Object exception, MessageLocation* location);
- // These methods add/remove the isolate to/from the list of clients in the
- // shared isolate. Isolates in the client list need to participate in a global
- // safepoint.
- void AttachToSharedIsolate();
- void DetachFromSharedIsolate();
-
- void AttachToSharedSpaceIsolate(Isolate* shared_space_isolate);
- void DetachFromSharedSpaceIsolate();
-
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
IsolateData isolate_data_;
- // Set to true if this isolate is used as shared heap. This field must be set
- // before Heap is constructed, as Heap's constructor consults it.
- const bool is_shared_;
-
// Set to true if this isolate is used as main isolate with a shared space.
bool is_shared_space_isolate_{false};
@@ -2291,7 +2264,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_ = false;
- // True if short bultin calls optimization is enabled.
+ // True if short builtin calls optimization is enabled.
bool is_short_builtin_calls_enabled_ = false;
// True if the isolate is in background. This flag is used
@@ -2306,11 +2279,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Only false for client isolates attached to a shared isolate.
bool owns_shareable_data_ = true;
- // True if this isolate is attached to a shared isolate, and this isolate is
- // the main isolate in the process and owns the lifetime of the shared
- // isolate.
- bool owns_shared_isolate_ = false;
-
bool log_object_relocation_ = false;
#ifdef V8_EXTERNAL_CODE_SPACE
@@ -2326,6 +2294,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static std::atomic<size_t> non_disposed_isolates_;
JSObject::SpillInformation js_spill_information_;
+
+ std::atomic<bool> has_turbofan_string_builders_ = false;
#endif
Debug* debug_ = nullptr;
@@ -2422,6 +2392,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void InitializeDefaultEmbeddedBlob();
void CreateAndSetEmbeddedBlob();
+ void InitializeIsShortBuiltinCallsEnabled();
void MaybeRemapEmbeddedBuiltinsIntoCodeRange();
void TearDownEmbeddedBlob();
void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
@@ -2476,12 +2447,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
base::Mutex thread_data_table_mutex_;
ThreadDataTable thread_data_table_;
- // Stores the shared isolate for this client isolate. nullptr for shared
- // isolates or when no shared isolate is used.
- //
- // When non-null, it is identical to process_wide_shared_isolate_.
- Isolate* shared_isolate_ = nullptr;
-
// Stores the isolate containing the shared space.
base::Optional<Isolate*> shared_space_isolate_;
@@ -2492,13 +2457,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
kNullExternalPointerHandle;
#endif
-#if DEBUG
- // Set to true once during isolate initialization right when attaching to the
- // shared isolate. If there was no shared isolate given it will still be set
- // to true. After this point invocations of shared_isolate() are valid.
- bool attached_to_shared_isolate_ = false;
-#endif // DEBUG
-
// Used to track and safepoint all client isolates attached to this shared
// isolate.
std::unique_ptr<GlobalSafepoint> global_safepoint_;
@@ -2514,6 +2472,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// The mutex only guards adding pages, the retrieval is signal safe.
base::Mutex code_pages_mutex_;
+ // Stack information for the main thread.
+ ::heap::base::Stack stack_;
+
#ifdef V8_ENABLE_WEBASSEMBLY
wasm::StackMemory* wasm_stacks_;
#endif
@@ -2549,6 +2510,7 @@ extern thread_local Isolate* g_current_isolate_ V8_CONSTINIT;
#undef FIELD_ACCESSOR
#undef THREAD_LOCAL_TOP_ACCESSOR
+#undef THREAD_LOCAL_TOP_ADDRESS
// SaveContext scopes save the current context on the Isolate on creation, and
// restore it on destruction.
@@ -2558,15 +2520,9 @@ class V8_EXPORT_PRIVATE SaveContext {
~SaveContext();
- Handle<Context> context() { return context_; }
-
- // Returns true if this save context is below a given JavaScript frame.
- bool IsBelowFrame(CommonFrame* frame);
-
private:
Isolate* const isolate_;
Handle<Context> context_;
- Address c_entry_fp_;
};
// Like SaveContext, but also switches the Context to a new one in the
@@ -2629,34 +2585,33 @@ class StackLimitCheck {
}
static bool HasOverflowed(LocalIsolate* local_isolate);
+ // Use this to check for stack-overflow when entering runtime from JS code.
+ bool JsHasOverflowed(uintptr_t gap = 0) const;
+
// Use this to check for interrupt request in C++ code.
V8_INLINE bool InterruptRequested() {
StackGuard* stack_guard = isolate_->stack_guard();
return GetCurrentStackPosition() < stack_guard->climit();
}
- // Handle interripts if InterruptRequested was true.
+ // Precondition: InterruptRequested == true.
// Returns true if any interrupt (overflow or termination) was handled, in
- // which case the caller should prevent further JS execution.
- V8_EXPORT_PRIVATE bool HandleInterrupt(Isolate* isolate);
-
- // Use this to check for stack-overflow when entering runtime from JS code.
- bool JsHasOverflowed(uintptr_t gap = 0) const;
+ // which case the caller must prevent further JS execution.
+ V8_EXPORT_PRIVATE bool HandleStackOverflowAndTerminationRequest();
private:
- Isolate* isolate_;
+ Isolate* const isolate_;
};
// This macro may be used in context that disallows JS execution.
// That is why it checks only for a stack overflow and termination.
-#define STACK_CHECK(isolate, result_value) \
- do { \
- StackLimitCheck stack_check(isolate); \
- if (stack_check.InterruptRequested()) { \
- if (stack_check.HandleInterrupt(isolate)) { \
- return result_value; \
- } \
- } \
+#define STACK_CHECK(isolate, result_value) \
+ do { \
+ StackLimitCheck stack_check(isolate); \
+ if (V8_UNLIKELY(stack_check.InterruptRequested()) && \
+ V8_UNLIKELY(stack_check.HandleStackOverflowAndTerminationRequest())) { \
+ return result_value; \
+ } \
} while (false)
class StackTraceFailureMessage {
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index f67bc78452..2cddb1567e 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -67,6 +67,9 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
base::SharedMutex* internalized_string_access() {
return isolate_->internalized_string_access();
}
+ base::SharedMutex* shared_function_info_access() {
+ return isolate_->shared_function_info_access();
+ }
const AstStringConstants* ast_string_constants() {
return isolate_->ast_string_constants();
}
@@ -79,6 +82,10 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
return isolate_->v8_file_logger();
}
+ bool is_precise_binary_code_coverage() const {
+ return isolate_->is_precise_binary_code_coverage();
+ }
+
v8::internal::LocalFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
// undefined behavior (as static_cast cannot cast across private bases).
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.cc b/deps/v8/src/execution/loong64/frame-constants-loong64.cc
index 4bd809266c..86d271d207 100644
--- a/deps/v8/src/execution/loong64/frame-constants-loong64.cc
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.cc
@@ -26,6 +26,12 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.h b/deps/v8/src/execution/loong64/frame-constants-loong64.h
index a88758d256..9047d728e0 100644
--- a/deps/v8/src/execution/loong64/frame-constants-loong64.h
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.h
@@ -17,7 +17,7 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize;
};
class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
@@ -51,10 +51,10 @@ class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
- // {a0 ... a7, t0 ... t5, s0, s1, s2, s5, s7, s8}
+ // {a0 ... a7, t0 ... t5, s0, s1, s2, s5, s7}
static constexpr RegList kPushedGpRegs = {a0, a1, a2, a3, a4, a5, a6,
a7, t0, t1, t2, t3, t4, t5,
- s0, s1, s2, s5, s7, s8};
+ s0, s1, s2, s5, s7};
// {f0, f1, f2, ... f27, f28}
static constexpr DoubleRegList kPushedFpRegs = {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc
index 9e5f13b1e1..7c1e4b1d8f 100644
--- a/deps/v8/src/execution/loong64/simulator-loong64.cc
+++ b/deps/v8/src/execution/loong64/simulator-loong64.cc
@@ -284,6 +284,10 @@ void Loong64Debugger::PrintAllRegsIncludingFPU() {
}
void Loong64Debugger::Debug() {
+ if (v8_flags.correctness_fuzzer_suppressions) {
+ PrintF("Debugger disabled for differential fuzzing.\n");
+ return;
+ }
intptr_t last_pc = -1;
bool done = false;
@@ -1723,7 +1727,7 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
}
void Simulator::WriteConditionalW(int64_t addr, int32_t value,
- Instruction* instr, int32_t rk_reg) {
+ Instruction* instr, int32_t* done) {
if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
@@ -1741,9 +1745,9 @@ void Simulator::WriteConditionalW(int64_t addr, int32_t value,
TraceMemWr(addr, value, WORD);
int* ptr = reinterpret_cast<int*>(addr);
*ptr = value;
- set_register(rk_reg, 1);
+ *done = 1;
} else {
- set_register(rk_reg, 0);
+ *done = 0;
}
return;
}
@@ -1797,7 +1801,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
}
void Simulator::WriteConditional2W(int64_t addr, int64_t value,
- Instruction* instr, int32_t rk_reg) {
+ Instruction* instr, int32_t* done) {
if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
@@ -1816,9 +1820,9 @@ void Simulator::WriteConditional2W(int64_t addr, int64_t value,
TraceMemWr(addr, value, DWORD);
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
*ptr = value;
- set_register(rk_reg, 1);
+ *done = 1;
} else {
- set_register(rk_reg, 0);
+ *done = 0;
}
return;
}
@@ -2867,8 +2871,10 @@ void Simulator::DecodeTypeOp8() {
Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
rj(), si14_se);
addr = si14_se + rj();
+ int32_t LLbit = 0;
WriteConditionalW(addr, static_cast<int32_t>(rd()), instr_.instr(),
- rd_reg());
+ &LLbit);
+ set_register(rd_reg(), LLbit);
break;
}
case LL_D: {
@@ -2888,7 +2894,9 @@ void Simulator::DecodeTypeOp8() {
Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
rj(), si14_se);
addr = si14_se + rj();
- WriteConditional2W(addr, rd(), instr_.instr(), rd_reg());
+ int32_t LLbit = 0;
+ WriteConditional2W(addr, rd(), instr_.instr(), &LLbit);
+ set_register(rd_reg(), LLbit);
break;
}
default:
@@ -4174,7 +4182,7 @@ void Simulator::DecodeTypeOp17() {
printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int32_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4183,17 +4191,15 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
WriteConditionalW(rj(), static_cast<int32_t>(rk()), instr_.instr(),
- rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ &success);
+ } while (!success);
} break;
case AMSWAP_DB_D: {
printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int64_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4202,16 +4208,14 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
- WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ WriteConditional2W(rj(), rk(), instr_.instr(), &success);
+ } while (!success);
} break;
case AMADD_DB_W: {
printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int32_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4220,19 +4224,17 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
WriteConditionalW(rj(),
static_cast<int32_t>(static_cast<int32_t>(rk()) +
static_cast<int32_t>(rd())),
- instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ instr_.instr(), &success);
+ } while (!success);
} break;
case AMADD_DB_D: {
printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int64_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4241,16 +4243,14 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
- WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), &success);
+ } while (!success);
} break;
case AMAND_DB_W: {
printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int32_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4259,19 +4259,17 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
WriteConditionalW(rj(),
static_cast<int32_t>(static_cast<int32_t>(rk()) &
static_cast<int32_t>(rd())),
- instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ instr_.instr(), &success);
+ } while (!success);
} break;
case AMAND_DB_D: {
printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int64_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4280,16 +4278,14 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
- WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), &success);
+ } while (!success);
} break;
case AMOR_DB_W: {
printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int32_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4298,19 +4294,17 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
WriteConditionalW(rj(),
static_cast<int32_t>(static_cast<int32_t>(rk()) |
static_cast<int32_t>(rd())),
- instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ instr_.instr(), &success);
+ } while (!success);
} break;
case AMOR_DB_D: {
printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int64_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4319,16 +4313,14 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
- WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), &success);
+ } while (!success);
} break;
case AMXOR_DB_W: {
printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int32_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4337,19 +4329,17 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
WriteConditionalW(rj(),
static_cast<int32_t>(static_cast<int32_t>(rk()) ^
static_cast<int32_t>(rd())),
- instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ instr_.instr(), &success);
+ } while (!success);
} break;
case AMXOR_DB_D: {
printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
rk(), Registers::Name(rj_reg()), rj());
- int64_t rdvalue;
+ int32_t success = 0;
do {
{
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -4358,10 +4348,8 @@ void Simulator::DecodeTypeOp17() {
GlobalMonitor::Get()->NotifyLoadLinked_Locked(
rj(), &global_monitor_thread_);
}
- rdvalue = get_register(rd_reg());
- WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg());
- } while (!get_register(rd_reg()));
- set_register(rd_reg(), rdvalue);
+ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), &success);
+ } while (!success);
} break;
case AMMAX_DB_W:
printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n");
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.h b/deps/v8/src/execution/loong64/simulator-loong64.h
index a95b6cb4af..70e53d97d2 100644
--- a/deps/v8/src/execution/loong64/simulator-loong64.h
+++ b/deps/v8/src/execution/loong64/simulator-loong64.h
@@ -410,11 +410,11 @@ class Simulator : public SimulatorBase {
inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
- int32_t rt_reg);
+ int32_t* done);
inline int64_t Read2W(int64_t addr, Instruction* instr);
inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
inline void WriteConditional2W(int64_t addr, int64_t value,
- Instruction* instr, int32_t rt_reg);
+ Instruction* instr, int32_t* done);
inline double ReadD(int64_t addr, Instruction* instr);
inline void WriteD(int64_t addr, double value, Instruction* instr);
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index c8ab777730..bceca2180f 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -346,7 +346,13 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
const int argc = 2;
base::ScopedVector<Handle<Object>> argv(argc);
- argv[0] = error;
+ if (V8_UNLIKELY(error->IsJSGlobalObject())) {
+ // Pass global proxy instead of global object.
+ argv[0] =
+ handle(JSGlobalObject::cast(*error).global_proxy(), isolate);
+ } else {
+ argv[0] = error;
+ }
argv[1] = sites;
Handle<Object> result;
@@ -419,7 +425,7 @@ Handle<String> MessageFormatter::Format(Isolate* isolate, MessageTemplate index,
if (!arg2.is_null()) {
arg2_string = Object::NoSideEffectsToString(isolate, arg2);
}
- MaybeHandle<String> maybe_result_string = MessageFormatter::Format(
+ MaybeHandle<String> maybe_result_string = MessageFormatter::TryFormat(
isolate, index, arg0_string, arg1_string, arg2_string);
Handle<String> result_string;
if (!maybe_result_string.ToHandle(&result_string)) {
@@ -448,11 +454,11 @@ const char* MessageFormatter::TemplateString(MessageTemplate index) {
}
}
-MaybeHandle<String> MessageFormatter::Format(Isolate* isolate,
- MessageTemplate index,
- Handle<String> arg0,
- Handle<String> arg1,
- Handle<String> arg2) {
+MaybeHandle<String> MessageFormatter::TryFormat(Isolate* isolate,
+ MessageTemplate index,
+ Handle<String> arg0,
+ Handle<String> arg1,
+ Handle<String> arg2) {
const char* template_string = TemplateString(index);
if (template_string == nullptr) {
isolate->ThrowIllegalOperation();
@@ -675,7 +681,7 @@ Handle<String> DoFormatMessage(Isolate* isolate, MessageTemplate index,
isolate->native_context()->IncrementErrorsThrown();
Handle<String> msg;
- if (!MessageFormatter::Format(isolate, index, arg0_str, arg1_str, arg2_str)
+ if (!MessageFormatter::TryFormat(isolate, index, arg0_str, arg1_str, arg2_str)
.ToHandle(&msg)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
@@ -715,7 +721,7 @@ Handle<JSObject> ErrorUtils::MakeGenericError(
namespace {
bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
if (!it.done()) {
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 7e617ab88f..a6bbb30ba6 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -122,11 +122,11 @@ class MessageFormatter {
public:
V8_EXPORT_PRIVATE static const char* TemplateString(MessageTemplate index);
- V8_EXPORT_PRIVATE static MaybeHandle<String> Format(Isolate* isolate,
- MessageTemplate index,
- Handle<String> arg0,
- Handle<String> arg1,
- Handle<String> arg2);
+ V8_EXPORT_PRIVATE static MaybeHandle<String> TryFormat(Isolate* isolate,
+ MessageTemplate index,
+ Handle<String> arg0,
+ Handle<String> arg1,
+ Handle<String> arg2);
static Handle<String> Format(Isolate* isolate, MessageTemplate index,
Handle<Object> arg0,
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index fa189770da..64c853e302 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -153,10 +153,12 @@ int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
return 0;
}
- intptr_t base_count = finished_microtask_count_;
+ // We should not enter V8 if it's marked for termination.
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ !isolate->is_execution_terminating());
+ intptr_t base_count = finished_microtask_count_;
HandleScope handle_scope(isolate);
-
MaybeHandle<Object> maybe_result;
int processed_microtask_count;
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.cc b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
index cfe899730c..66f14f8516 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
@@ -26,6 +26,12 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.h b/deps/v8/src/execution/mips64/frame-constants-mips64.h
index e65710d5c1..c7103174a1 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.h
@@ -17,7 +17,7 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize;
};
class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 2dce655aed..6be43e6ecd 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -271,6 +271,10 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
}
void MipsDebugger::Debug() {
+ if (v8_flags.correctness_fuzzer_suppressions) {
+ PrintF("Debugger disabled for differential fuzzing.\n");
+ return;
+ }
intptr_t last_pc = -1;
bool done = false;
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.cc b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
index 45a53b07b6..f4256a0e63 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
@@ -29,6 +29,12 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index f3d5c99f12..11f56d73a9 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -16,9 +16,9 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
// Need to take constant pool into account.
- static constexpr int kCallerFPOffset = V8_EMBEDDED_CONSTANT_POOL_BOOL
- ? -4 * kSystemPointerSize
- : -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = V8_EMBEDDED_CONSTANT_POOL_BOOL
+ ? -4 * kSystemPointerSize
+ : -3 * kSystemPointerSize;
};
class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
@@ -31,10 +31,11 @@ class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
static constexpr int kInstanceSpillOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ // Spilled registers are implicitly sorted backwards by number.
static constexpr int kParameterSpillsOffset[] = {
- TYPED_FRAME_PUSHED_VALUE_OFFSET(2), TYPED_FRAME_PUSHED_VALUE_OFFSET(3),
- TYPED_FRAME_PUSHED_VALUE_OFFSET(4), TYPED_FRAME_PUSHED_VALUE_OFFSET(5),
- TYPED_FRAME_PUSHED_VALUE_OFFSET(6), TYPED_FRAME_PUSHED_VALUE_OFFSET(7)};
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(7), TYPED_FRAME_PUSHED_VALUE_OFFSET(6),
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(5), TYPED_FRAME_PUSHED_VALUE_OFFSET(4),
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(3), TYPED_FRAME_PUSHED_VALUE_OFFSET(2)};
// SP-relative.
static constexpr int kWasmInstanceOffset = 2 * kSystemPointerSize;
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index cf4c11d6cb..b2d57fba1a 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -155,6 +155,10 @@ void PPCDebugger::RedoBreakpoint() {
}
void PPCDebugger::Debug() {
+ if (v8_flags.correctness_fuzzer_suppressions) {
+ PrintF("Debugger disabled for differential fuzzing.\n");
+ return;
+ }
intptr_t last_pc = -1;
bool done = false;
@@ -5251,6 +5255,31 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
+ case VMSUMMBM: {
+ int vrt = instr->RTValue();
+ int vra = instr->RAValue();
+ int vrb = instr->RBValue();
+ int vrc = instr->RCValue();
+ FOR_EACH_LANE(i, int32_t) {
+ int8_t vra_1_val = get_simd_register_by_lane<int8_t>(vra, 4 * i),
+ vra_2_val = get_simd_register_by_lane<int8_t>(vra, (4 * i) + 1),
+ vra_3_val = get_simd_register_by_lane<int8_t>(vra, (4 * i) + 2),
+ vra_4_val = get_simd_register_by_lane<int8_t>(vra, (4 * i) + 3);
+ uint8_t vrb_1_val = get_simd_register_by_lane<uint8_t>(vrb, 4 * i),
+ vrb_2_val =
+ get_simd_register_by_lane<uint8_t>(vrb, (4 * i) + 1),
+ vrb_3_val =
+ get_simd_register_by_lane<uint8_t>(vrb, (4 * i) + 2),
+ vrb_4_val =
+ get_simd_register_by_lane<uint8_t>(vrb, (4 * i) + 3);
+ int32_t vrc_val = get_simd_register_by_lane<int32_t>(vrc, i);
+ int32_t temp1 = vra_1_val * vrb_1_val, temp2 = vra_2_val * vrb_2_val,
+ temp3 = vra_3_val * vrb_3_val, temp4 = vra_4_val * vrb_4_val;
+ temp1 = temp1 + temp2 + temp3 + temp4 + vrc_val;
+ set_simd_register_by_lane<int32_t>(vrt, i, temp1);
+ }
+ break;
+ }
case VMSUMSHM: {
int vrt = instr->RTValue();
int vra = instr->RAValue();
diff --git a/deps/v8/src/execution/protectors.h b/deps/v8/src/execution/protectors.h
index aa89275c11..271661dddf 100644
--- a/deps/v8/src/execution/protectors.h
+++ b/deps/v8/src/execution/protectors.h
@@ -40,6 +40,15 @@ class Protectors : public AllStatic {
/* property holder is the %IteratorPrototype%. Note that this also */ \
/* invalidates the SetIterator protector (see below). */ \
V(MapIteratorLookupChain, MapIteratorProtector, map_iterator_protector) \
+ /* String.prototype.replace looks up Symbol.replace (aka @@replace) on */ \
+ /* the search term to check if it is regexp-like. */ \
+ /* This protector ensures the prototype chain of String.prototype and */ \
+ /* Number.prototype does not contain Symbol.replace. */ \
+ /* It enables a fast-path for String.prototype.replace by ensuring that */ \
+ /* the implicit wrapper object for strings and numbers do not contain */ \
+ /* the property Symbol.replace. */ \
+ V(NumberStringPrototypeNoReplace, NumberStringPrototypeNoReplaceProtector, \
+ number_string_prototype_no_replace_protector) \
V(RegExpSpeciesLookupChain, RegExpSpeciesProtector, \
regexp_species_protector) \
V(PromiseHook, PromiseHookProtector, promise_hook_protector) \
diff --git a/deps/v8/src/execution/riscv/frame-constants-riscv.cc b/deps/v8/src/execution/riscv/frame-constants-riscv.cc
index 833af91e7e..3fbbc7ab64 100644
--- a/deps/v8/src/execution/riscv/frame-constants-riscv.cc
+++ b/deps/v8/src/execution/riscv/frame-constants-riscv.cc
@@ -23,5 +23,11 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/riscv/frame-constants-riscv.h b/deps/v8/src/execution/riscv/frame-constants-riscv.h
index 5873b4e16b..e73e43629f 100644
--- a/deps/v8/src/execution/riscv/frame-constants-riscv.h
+++ b/deps/v8/src/execution/riscv/frame-constants-riscv.h
@@ -18,7 +18,7 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize;
};
class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/execution/riscv/simulator-riscv.cc b/deps/v8/src/execution/riscv/simulator-riscv.cc
index 334ce9fcf5..9582db4896 100644
--- a/deps/v8/src/execution/riscv/simulator-riscv.cc
+++ b/deps/v8/src/execution/riscv/simulator-riscv.cc
@@ -3208,6 +3208,31 @@ void Simulator::SoftwareInterrupt() {
if (code != -1 && static_cast<uint32_t>(code) <= kMaxStopCode) {
if (IsWatchpoint(code)) {
PrintWatchpoint(code);
+ } else if (IsTracepoint(code)) {
+ if (!v8_flags.debug_sim) {
+ PrintF("Add --debug-sim when tracepoint instruction is used.\n");
+ abort();
+ }
+ printf("%d %d %d %d\n", code, code & LOG_TRACE, code & LOG_REGS,
+ code & kDebuggerTracingDirectivesMask);
+ switch (code & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ if (code & LOG_TRACE) {
+ v8_flags.trace_sim = true;
+ }
+ if (code & LOG_REGS) {
+ RiscvDebugger dbg(this);
+ dbg.PrintAllRegs();
+ }
+ break;
+ case TRACE_DISABLE:
+ if (code & LOG_TRACE) {
+ v8_flags.trace_sim = false;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
} else {
IncreaseStopCounter(code);
HandleStop(code);
@@ -3227,6 +3252,10 @@ bool Simulator::IsWatchpoint(reg_t code) {
return (code <= kMaxWatchpointCode);
}
+bool Simulator::IsTracepoint(reg_t code) {
+ return (code <= kMaxTracepointCode && code > kMaxWatchpointCode);
+}
+
void Simulator::PrintWatchpoint(reg_t code) {
RiscvDebugger dbg(this);
++break_count_;
@@ -3636,14 +3665,14 @@ I_TYPE Simulator::RoundF2IHelper(F_TYPE original, int rmode) {
// so use its float representation directly
: static_cast<float>(static_cast<uint64_t>(max_i) + 1);
if (rounded >= max_i_plus_1) {
- set_fflags(kOverflow | kInvalidOperation);
+ set_fflags(kFPUOverflow | kInvalidOperation);
return max_i;
}
// Since min_i (either 0 for unsigned, or for signed) is represented
// precisely in floating-point, comparing rounded directly against min_i
if (rounded <= min_i) {
- if (rounded < min_i) set_fflags(kOverflow | kInvalidOperation);
+ if (rounded < min_i) set_fflags(kFPUOverflow | kInvalidOperation);
return min_i;
}
@@ -4105,7 +4134,7 @@ void Simulator::DecodeRVRFPType() {
case 0b000: {
if (instr_.Rs2Value() == 0b00000) {
// RO_FMV_X_W
- set_rd(sext_xlen(get_fpu_register_word(rs1_reg())));
+ set_rd(sext32(get_fpu_register_word(rs1_reg())));
} else {
UNSUPPORTED();
}
@@ -6288,6 +6317,7 @@ void Simulator::DecodeRvvMVX() {
DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
switch (instr_.InstructionBits() & kVTypeMask) {
case RO_V_VRXUNARY0:
+ // vmv.s.x
if (instr_.Vs2Value() == 0x0) {
if (rvv_vl() > 0 && rvv_vstart() < rvv_vl()) {
switch (rvv_vsew()) {
@@ -6310,7 +6340,6 @@ void Simulator::DecodeRvvMVX() {
default:
UNREACHABLE();
}
- // set_rvv_vl(0);
}
set_rvv_vstart(0);
rvv_trace_vd();
@@ -6607,7 +6636,6 @@ void Simulator::DecodeRvvFVV() {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
break;
case RO_V_VFUNARY1:
@@ -6958,7 +6986,6 @@ void Simulator::DecodeRvvFVV() {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
@@ -6992,6 +7019,48 @@ void Simulator::DecodeRvvFVF() {
USE(vs2);
})
break;
+ case RO_V_VFADD_VF:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ auto alu_out = fn(fs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(fs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(fs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ auto fn = [this](double frs1, double frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ auto alu_out = fn(fs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(fs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(fs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
case RO_V_VFWADD_VF:
RVV_VI_CHECK_DSS(true);
RVV_VI_VFP_VF_LOOP_WIDEN(
@@ -7087,7 +7156,6 @@ void Simulator::DecodeRvvFVF() {
break;
default:
UNSUPPORTED_RISCV();
- break;
}
}
void Simulator::DecodeVType() {
@@ -7185,7 +7253,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
v8::base::EmbeddedVector<char, 256> buffer;
- if (v8_flags.trace_sim) {
+ if (v8_flags.trace_sim || v8_flags.debug_sim) {
SNPrintF(trace_buf_, " ");
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -7195,7 +7263,6 @@ void Simulator::InstructionDecode(Instruction* instr) {
// PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
// reinterpret_cast<intptr_t>(instr), buffer.begin());
}
-
instr_ = instr;
switch (instr_.InstructionType()) {
case Instruction::kRType:
diff --git a/deps/v8/src/execution/riscv/simulator-riscv.h b/deps/v8/src/execution/riscv/simulator-riscv.h
index f3b022b36f..c744bf9921 100644
--- a/deps/v8/src/execution/riscv/simulator-riscv.h
+++ b/deps/v8/src/execution/riscv/simulator-riscv.h
@@ -200,7 +200,7 @@ inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
}
inline Float64 fsgnj64(Float64 rs1, Float64 rs2, bool n, bool x) {
- u64_f64 a = {.d = rs1.get_scalar()}, b = {.d = rs2.get_scalar()};
+ u64_f64 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
u64_f64 res;
if (x) { // RO_FSQNJX_D
res.u = (a.u & ~F64_SIGN) | ((a.u ^ b.u) & F64_SIGN);
@@ -1035,6 +1035,7 @@ class Simulator : public SimulatorBase {
// Stop helper functions.
bool IsWatchpoint(reg_t code);
+ bool IsTracepoint(reg_t code);
void PrintWatchpoint(reg_t code);
void HandleStop(reg_t code);
bool IsStopInstruction(Instruction* instr);
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.cc b/deps/v8/src/execution/s390/frame-constants-s390.cc
index 50f3445556..8607145961 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.cc
+++ b/deps/v8/src/execution/s390/frame-constants-s390.cc
@@ -26,6 +26,12 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ USE(register_input_count);
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index 7643a25d2b..3b586dfcbf 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -15,7 +15,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize;
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgvOffset = 20 * kSystemPointerSize;
@@ -23,6 +23,7 @@ class EntryFrameConstants : public AllStatic {
class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
public:
+ // Number of gp parameters, without the instance.
static constexpr int kNumberOfSavedGpParamRegs = 3;
#ifdef V8_TARGET_ARCH_S390X
static constexpr int kNumberOfSavedFpParamRegs = 4;
@@ -34,9 +35,10 @@ class WasmLiftoffSetupFrameConstants : public TypedFrameConstants {
static constexpr int kInstanceSpillOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ // Spilled registers are implicitly sorted backwards by number.
static constexpr int kParameterSpillsOffset[] = {
- TYPED_FRAME_PUSHED_VALUE_OFFSET(2), TYPED_FRAME_PUSHED_VALUE_OFFSET(3),
- TYPED_FRAME_PUSHED_VALUE_OFFSET(4)};
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(4), TYPED_FRAME_PUSHED_VALUE_OFFSET(3),
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(2)};
// SP-relative.
static constexpr int kWasmInstanceOffset = 2 * kSystemPointerSize;
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 4d4a0bbf39..84df3a2bbd 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -165,6 +165,10 @@ void S390Debugger::RedoBreakpoint() {
}
void S390Debugger::Debug() {
+ if (v8_flags.correctness_fuzzer_suppressions) {
+ PrintF("Debugger disabled for differential fuzzing.\n");
+ return;
+ }
intptr_t last_pc = -1;
bool done = false;
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 11887e6d9a..c685609e3d 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -110,9 +110,8 @@ class GeneratedCode {
return GeneratedCode(isolate, reinterpret_cast<Signature*>(buffer));
}
- template <typename CodeOrCodeT>
- static GeneratedCode FromCode(CodeOrCodeT code) {
- return FromAddress(code.GetIsolate(), code.entry());
+ static GeneratedCode FromCode(Isolate* isolate, Code code) {
+ return FromAddress(isolate, code.InstructionStart());
}
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index 2b4af70bc0..ff64beb8b2 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -81,14 +81,14 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
// Intercept already requested interrupts.
- intptr_t intercepted =
+ uint32_t intercepted =
thread_local_.interrupt_flags_ & scope->intercept_mask_;
scope->intercepted_flags_ = intercepted;
thread_local_.interrupt_flags_ &= ~intercepted;
} else {
DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
// Restore postponed interrupts.
- int restored_flags = 0;
+ uint32_t restored_flags = 0;
for (InterruptsScope* current = thread_local_.interrupt_scopes_;
current != nullptr; current = current->prev_) {
restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
@@ -116,7 +116,7 @@ void StackGuard::PopInterruptsScope() {
DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
// Postpone existing interupts if needed.
if (top->prev_) {
- for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
+ for (uint32_t interrupt = 1; interrupt < ALL_INTERRUPTS;
interrupt = interrupt << 1) {
InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
if ((thread_local_.interrupt_flags_ & flag) &&
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
index e39a053f3a..8cdf755c0d 100644
--- a/deps/v8/src/execution/stack-guard.h
+++ b/deps/v8/src/execution/stack-guard.h
@@ -66,7 +66,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
#undef V
// Flag used to set the interrupt causes.
- enum InterruptFlag {
+ enum InterruptFlag : uint32_t {
#define V(NAME, Name, id) NAME = (1 << id),
INTERRUPT_LIST(V)
#undef V
@@ -74,6 +74,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
#undef V
};
+ static_assert(InterruptFlag::ALL_INTERRUPTS <
+ std::numeric_limits<uint32_t>::max());
uintptr_t climit() { return thread_local_.climit(); }
uintptr_t jslimit() { return thread_local_.jslimit(); }
@@ -179,7 +181,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
}
InterruptsScope* interrupt_scopes_ = nullptr;
- intptr_t interrupt_flags_ = 0;
+ uint32_t interrupt_flags_ = 0;
};
// TODO(isolates): Technically this could be calculated directly from a
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 0d7071ddda..05cc20b8e4 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -37,14 +37,12 @@ void ThreadLocalTop::Clear() {
current_embedder_state_ = nullptr;
failed_access_check_callback_ = nullptr;
thread_in_wasm_flag_address_ = kNullAddress;
- stack_ = ::heap::base::Stack();
}
void ThreadLocalTop::Initialize(Isolate* isolate) {
Clear();
isolate_ = isolate;
thread_id_ = ThreadId::Current();
- stack_.SetStackStart(base::Stack::GetStackStart());
#if V8_ENABLE_WEBASSEMBLY
thread_in_wasm_flag_address_ = reinterpret_cast<Address>(
trap_handler::GetThreadInWasmThreadLocalAddress());
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index 0815f856ad..989c817f31 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -10,7 +10,6 @@
#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/execution/thread-id.h"
-#include "src/heap/base/stack.h"
#include "src/objects/contexts.h"
#include "src/utils/utils.h"
@@ -30,7 +29,7 @@ class ThreadLocalTop {
// TODO(all): This is not particularly beautiful. We should probably
// refactor this to really consist of just Addresses and 32-bit
// integer fields.
- static constexpr uint32_t kSizeInBytes = 27 * kSystemPointerSize;
+ static constexpr uint32_t kSizeInBytes = 25 * kSystemPointerSize;
// Does early low-level initialization that does not depend on the
// isolate being present.
@@ -147,9 +146,6 @@ class ThreadLocalTop {
// Address of the thread-local "thread in wasm" flag.
Address thread_in_wasm_flag_address_;
-
- // Stack information.
- ::heap::base::Stack stack_;
};
} // namespace internal
diff --git a/deps/v8/src/execution/tiering-manager.cc b/deps/v8/src/execution/tiering-manager.cc
index d34777f74c..ce51a184f4 100644
--- a/deps/v8/src/execution/tiering-manager.cc
+++ b/deps/v8/src/execution/tiering-manager.cc
@@ -15,6 +15,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
+#include "src/flags/flags.h"
#include "src/handles/global-handles.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
@@ -96,10 +97,10 @@ static_assert(sizeof(OptimizationDecision) <= kInt32Size);
namespace {
-void TraceInOptimizationQueue(JSFunction function) {
+void TraceInOptimizationQueue(JSFunction function, CodeKind current_code_kind) {
if (v8_flags.trace_opt_verbose) {
- PrintF("[not marking function %s for optimization: already queued]\n",
- function.DebugNameCStr().get());
+ PrintF("[not marking function %s (%s) for optimization: already queued]\n",
+ function.DebugNameCStr().get(), CodeKindToString(current_code_kind));
}
}
@@ -156,9 +157,13 @@ bool TiersUpToMaglev(base::Optional<CodeKind> code_kind) {
}
int InterruptBudgetFor(base::Optional<CodeKind> code_kind,
- TieringState tiering_state) {
+ TieringState tiering_state, int bytecode_length) {
+ if (IsRequestTurbofan(tiering_state) ||
+ (code_kind.has_value() && code_kind.value() == CodeKind::TURBOFAN)) {
+ return v8_flags.invocation_count_for_osr * bytecode_length;
+ }
return TiersUpToMaglev(code_kind) && tiering_state == TieringState::kNone
- ? v8_flags.interrupt_budget_for_maglev
+ ? v8_flags.invocation_count_for_maglev * bytecode_length
: v8_flags.interrupt_budget;
}
@@ -166,21 +171,22 @@ int InterruptBudgetFor(base::Optional<CodeKind> code_kind,
// static
int TieringManager::InterruptBudgetFor(Isolate* isolate, JSFunction function) {
+ DCHECK(function.shared().is_compiled());
+ const int bytecode_length =
+ function.shared().GetBytecodeArray(isolate).length();
if (function.has_feedback_vector()) {
- if (function.shared().GetBytecodeArray(isolate).length() >
- v8_flags.max_optimized_bytecode_size) {
+ if (bytecode_length > v8_flags.max_optimized_bytecode_size) {
// Decrease times of interrupt budget underflow, the reason of not setting
// to INT_MAX is the interrupt budget may overflow when doing add
// operation for forward jump.
return INT_MAX / 2;
}
return ::i::InterruptBudgetFor(function.GetActiveTier(),
- function.tiering_state());
+ function.tiering_state(), bytecode_length);
}
DCHECK(!function.has_feedback_vector());
- DCHECK(function.shared().is_compiled());
- return function.shared().GetBytecodeArray(isolate).length() *
+ return bytecode_length *
v8_flags.interrupt_budget_factor_for_feedback_allocation;
}
@@ -193,45 +199,8 @@ int TieringManager::InitialInterruptBudget() {
namespace {
-bool SmallEnoughForOSR(Isolate* isolate, JSFunction function,
- CodeKind code_kind) {
- // "The answer to life the universe and everything.. 42? Or was it 44?"
- //
- // Note the OSR allowance's origin is somewhat accidental - with the advent
- // of Ignition it started at 48 and through several rounds of micro-tuning
- // ended up at 42. See
- // https://chromium-review.googlesource.com/649149.
- //
- // The allowance was originally chosen based on the Ignition-to-Turbofan
- // interrupt budget. In the presence of multiple tiers and multiple budgets
- // (which control how often ticks are incremented), it must be scaled to the
- // currently active budget to somewhat preserve old behavior.
- //
- // TODO(all): Since the origins of this constant are so arbitrary, this is
- // worth another re-evaluation. For now, we stick with 44 to preserve
- // behavior for comparability, but feel free to change this in the future.
- static const int kOSRBytecodeSizeAllowanceBase = 119;
- static const int kOSRBytecodeSizeAllowancePerTick = 44;
- const double scale_factor_for_active_tier =
- InterruptBudgetFor(code_kind, TieringState::kNone) /
- static_cast<double>(v8_flags.interrupt_budget);
-
- const double raw_limit = kOSRBytecodeSizeAllowanceBase +
- scale_factor_for_active_tier *
- kOSRBytecodeSizeAllowancePerTick *
- function.feedback_vector().profiler_ticks();
- const int limit = raw_limit < BytecodeArray::kMaxLength
- ? static_cast<int>(raw_limit)
- : BytecodeArray::kMaxLength;
- DCHECK_GT(limit, 0);
- return function.shared().GetBytecodeArray(isolate).length() <= limit;
-}
-
void TrySetOsrUrgency(Isolate* isolate, JSFunction function, int osr_urgency) {
SharedFunctionInfo shared = function.shared();
- // Guaranteed since we've got a feedback vector.
- DCHECK(shared.IsUserJavaScript());
-
if (V8_UNLIKELY(!v8_flags.use_osr)) return;
if (V8_UNLIKELY(shared.optimization_disabled())) return;
@@ -273,15 +242,17 @@ void TieringManager::RequestOsrAtNextOpportunity(JSFunction function) {
}
void TieringManager::MaybeOptimizeFrame(JSFunction function,
- CodeKind calling_code_kind) {
+ CodeKind current_code_kind) {
const TieringState tiering_state = function.feedback_vector().tiering_state();
const TieringState osr_tiering_state =
function.feedback_vector().osr_tiering_state();
+ // Attenzione! Update this constant in case the condition below changes.
+ static_assert(kTieringStateInProgressBlocksTierup);
if (V8_UNLIKELY(IsInProgress(tiering_state)) ||
V8_UNLIKELY(IsInProgress(osr_tiering_state))) {
- // Note: This effectively disables OSR for the function while it is being
- // compiled.
- TraceInOptimizationQueue(function);
+ // Note: This effectively disables further tiering actions (e.g. OSR, or
+ // tiering up into Maglev) for the function while it is being compiled.
+ TraceInOptimizationQueue(function, current_code_kind);
return;
}
@@ -308,9 +279,7 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
function.HasAvailableCodeKind(CodeKind::TURBOFAN)) {
// OSR kicks in only once we've previously decided to tier up, but we are
// still in a lower-tier frame (this implies a long-running loop).
- if (SmallEnoughForOSR(isolate_, function, calling_code_kind)) {
- TryIncrementOsrUrgency(isolate_, function);
- }
+ TryIncrementOsrUrgency(isolate_, function);
// Return unconditionally and don't run through the optimization decision
// again; we've already decided to tier up previously.
@@ -319,7 +288,8 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
DCHECK(!IsRequestTurbofan(tiering_state));
DCHECK(!function.HasAvailableCodeKind(CodeKind::TURBOFAN));
- OptimizationDecision d = ShouldOptimize(function, calling_code_kind);
+ OptimizationDecision d =
+ ShouldOptimize(function.feedback_vector(), current_code_kind);
// We might be stuck in a baseline frame that wants to tier up to Maglev, but
// is in a loop, and can't OSR, because Maglev doesn't have OSR. Allow it to
// skip over Maglev by re-checking ShouldOptimize as if we were in Maglev.
@@ -330,7 +300,7 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
IsRequestMaglev(tiering_state) ||
function.HasAvailableCodeKind(CodeKind::MAGLEV);
if (is_marked_for_maglev_optimization) {
- d = ShouldOptimize(function, CodeKind::MAGLEV);
+ d = ShouldOptimize(function.feedback_vector(), CodeKind::MAGLEV);
}
}
@@ -338,26 +308,29 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
}
OptimizationDecision TieringManager::ShouldOptimize(
- JSFunction function, CodeKind calling_code_kind) {
- if (TiersUpToMaglev(calling_code_kind) &&
- function.shared().PassesFilter(v8_flags.maglev_filter) &&
- !function.shared(isolate_).maglev_compilation_failed()) {
+ FeedbackVector feedback_vector, CodeKind current_code_kind,
+ bool after_next_tick) {
+ SharedFunctionInfo shared = feedback_vector.shared_function_info();
+ if (TiersUpToMaglev(current_code_kind) &&
+ shared.PassesFilter(v8_flags.maglev_filter) &&
+ !shared.maglev_compilation_failed()) {
+ if (any_ic_changed_) return OptimizationDecision::DoNotOptimize();
return OptimizationDecision::Maglev();
- } else if (calling_code_kind == CodeKind::TURBOFAN) {
+ } else if (current_code_kind == CodeKind::TURBOFAN) {
// Already in the top tier.
return OptimizationDecision::DoNotOptimize();
}
- if (!v8_flags.turbofan ||
- !function.shared().PassesFilter(v8_flags.turbo_filter)) {
+ if (!v8_flags.turbofan || !shared.PassesFilter(v8_flags.turbo_filter)) {
return OptimizationDecision::DoNotOptimize();
}
- BytecodeArray bytecode = function.shared().GetBytecodeArray(isolate_);
+ BytecodeArray bytecode = shared.GetBytecodeArray(isolate_);
if (bytecode.length() > v8_flags.max_optimized_bytecode_size) {
return OptimizationDecision::DoNotOptimize();
}
- const int ticks = function.feedback_vector().profiler_ticks();
+ const int ticks =
+ feedback_vector.profiler_ticks() + (after_next_tick ? 1 : 0);
const int ticks_for_optimization =
v8_flags.ticks_before_optimization +
(bytecode.length() / v8_flags.bytecode_size_allowance_per_tick);
@@ -368,9 +341,9 @@ OptimizationDecision TieringManager::ShouldOptimize(
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationDecision::TurbofanSmallFunction();
- } else if (v8_flags.trace_opt_verbose) {
+ } else if (!after_next_tick && v8_flags.trace_opt_verbose) {
PrintF("[not yet optimizing %s, not enough ticks: %d/%d and ",
- function.DebugNameCStr().get(), ticks, ticks_for_optimization);
+ shared.DebugNameCStr().get(), ticks, ticks_for_optimization);
if (any_ic_changed_) {
PrintF("ICs changed]\n");
} else {
@@ -383,6 +356,35 @@ OptimizationDecision TieringManager::ShouldOptimize(
return OptimizationDecision::DoNotOptimize();
}
+void TieringManager::NotifyICChanged(FeedbackVector vector) {
+ if (v8_flags.global_ic_updated_flag) {
+ any_ic_changed_ = true;
+ }
+ if (v8_flags.reset_interrupt_on_ic_update) {
+ CodeKind code_kind = vector.has_optimized_code()
+ ? vector.optimized_code().kind()
+ : vector.shared_function_info().HasBaselineCode()
+ ? CodeKind::BASELINE
+ : CodeKind::INTERPRETED_FUNCTION;
+ OptimizationDecision decision = ShouldOptimize(vector, code_kind, true);
+ if (decision.should_optimize()) {
+ SharedFunctionInfo shared = vector.shared_function_info();
+ int bytecode_length = shared.GetBytecodeArray(isolate_).length();
+ FeedbackCell cell = vector.parent_feedback_cell();
+ int minimum = v8_flags.minimum_invocations_after_ic_update;
+ int new_budget = minimum * bytecode_length;
+ int current_budget = cell.interrupt_budget();
+ if (new_budget > current_budget) {
+ if (v8_flags.trace_opt_verbose) {
+ PrintF("[delaying optimization of %s, IC changed]\n",
+ shared.DebugNameCStr().get());
+ }
+ cell.set_interrupt_budget(new_budget);
+ }
+ }
+ }
+}
+
TieringManager::OnInterruptTickScope::OnInterruptTickScope(
TieringManager* profiler)
: profiler_(profiler) {
diff --git a/deps/v8/src/execution/tiering-manager.h b/deps/v8/src/execution/tiering-manager.h
index 83a39eaee0..f7645b1993 100644
--- a/deps/v8/src/execution/tiering-manager.h
+++ b/deps/v8/src/execution/tiering-manager.h
@@ -28,7 +28,7 @@ class TieringManager {
void OnInterruptTick(Handle<JSFunction> function, CodeKind code_kind);
- void NotifyICChanged() { any_ic_changed_ = true; }
+ void NotifyICChanged(FeedbackVector vector);
// After this request, the next JumpLoop will perform OSR.
void RequestOsrAtNextOpportunity(JSFunction function);
@@ -44,7 +44,12 @@ class TieringManager {
// This function is also responsible for bumping the OSR urgency.
void MaybeOptimizeFrame(JSFunction function, CodeKind code_kind);
- OptimizationDecision ShouldOptimize(JSFunction function, CodeKind code_kind);
+ // After next tick indicates whether we've precremented the ticks before
+ // calling this function, or whether we're pretending that we already got the
+ // tick.
+ OptimizationDecision ShouldOptimize(FeedbackVector feedback_vector,
+ CodeKind code_kind,
+ bool after_next_tick = false);
void Optimize(JSFunction function, OptimizationDecision decision);
void Baseline(JSFunction function, OptimizationReason reason);
diff --git a/deps/v8/src/execution/vm-state-inl.h b/deps/v8/src/execution/vm-state-inl.h
index d4de5abf8c..58429adde6 100644
--- a/deps/v8/src/execution/vm-state-inl.h
+++ b/deps/v8/src/execution/vm-state-inl.h
@@ -52,21 +52,20 @@ VMState<Tag>::~VMState() {
}
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
- : isolate_(isolate),
- callback_(callback),
+ : callback_(callback),
previous_scope_(isolate->external_callback_scope()),
vm_state_(isolate),
pause_timed_histogram_scope_(isolate->counters()->execute()) {
#ifdef USE_SIMULATOR
scope_address_ = Simulator::current(isolate)->get_sp();
#endif
- isolate_->set_external_callback_scope(this);
+ vm_state_.isolate_->set_external_callback_scope(this);
TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
"V8.ExternalCallback");
}
ExternalCallbackScope::~ExternalCallbackScope() {
- isolate_->set_external_callback_scope(previous_scope_);
+ vm_state_.isolate_->set_external_callback_scope(previous_scope_);
TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
"V8.ExternalCallback");
}
diff --git a/deps/v8/src/execution/vm-state.h b/deps/v8/src/execution/vm-state.h
index d903b222ee..70ecbb3bce 100644
--- a/deps/v8/src/execution/vm-state.h
+++ b/deps/v8/src/execution/vm-state.h
@@ -8,6 +8,7 @@
#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/logging/counters-scopes.h"
+#include "v8-internal.h"
namespace v8 {
namespace internal {
@@ -23,8 +24,10 @@ class VMState {
inline ~VMState();
private:
- Isolate* isolate_;
- StateTag previous_tag_;
+ Isolate* const isolate_;
+ StateTag const previous_tag_;
+
+ friend ExternalCallbackScope;
};
class V8_NODISCARD ExternalCallbackScope {
@@ -37,18 +40,17 @@ class V8_NODISCARD ExternalCallbackScope {
#if USES_FUNCTION_DESCRIPTORS
return FUNCTION_ENTRYPOINT_ADDRESS(callback_);
#else
- return &callback_;
+ return const_cast<Address*>(&callback_);
#endif
}
ExternalCallbackScope* previous() { return previous_scope_; }
inline Address scope_address();
private:
- Isolate* isolate_;
- Address callback_;
- ExternalCallbackScope* previous_scope_;
- VMState<EXTERNAL> vm_state_;
- PauseNestedTimedHistogramScope pause_timed_histogram_scope_;
+ Address const callback_;
+ ExternalCallbackScope* const previous_scope_;
+ VMState<EXTERNAL> const vm_state_;
+ PauseNestedTimedHistogramScope const pause_timed_histogram_scope_;
#ifdef USE_SIMULATOR
Address scope_address_;
#endif
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.cc b/deps/v8/src/execution/x64/frame-constants-x64.cc
index fb242505dd..df612a08cf 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.cc
+++ b/deps/v8/src/execution/x64/frame-constants-x64.cc
@@ -26,6 +26,14 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
return 0;
}
+// static
+intptr_t MaglevFrame::StackGuardFrameSize(int register_input_count) {
+ // Include one extra slot for the single argument into StackGuardWithGap +
+ // register input count.
+ return StandardFrameConstants::kFixedFrameSizeFromFp +
+ (1 + register_input_count) * kSystemPointerSize;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index d1ba413c8a..deb66d05e6 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -26,9 +26,9 @@ class EntryFrameConstants : public AllStatic {
// On x64, there are 7 pushq() and 3 Push() calls between setting up rbp and
// pushing the c_entry_fp, plus we manually allocate kXMMRegistersBlockSize
// bytes on the stack.
- static constexpr int kCallerFPOffset = -3 * kSystemPointerSize +
- -7 * kSystemPointerSize -
- kXMMRegistersBlockSize;
+ static constexpr int kNextExitFrameFPOffset = -3 * kSystemPointerSize +
+ -7 * kSystemPointerSize -
+ kXMMRegistersBlockSize;
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgcOffset = 6 * kSystemPointerSize;
@@ -38,7 +38,7 @@ class EntryFrameConstants : public AllStatic {
// Isolate::c_entry_fp onto the stack.
// On x64, there are 5 pushq() and 3 Push() calls between setting up rbp and
// pushing the c_entry_fp.
- static constexpr int kCallerFPOffset =
+ static constexpr int kNextExitFrameFPOffset =
-3 * kSystemPointerSize + -5 * kSystemPointerSize;
#endif
};
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 98cfe7f14f..16fdeb457b 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -5,6 +5,7 @@
#include "src/extensions/gc-extension.h"
#include "include/v8-isolate.h"
+#include "include/v8-microtask-queue.h"
#include "include/v8-object.h"
#include "include/v8-persistent-handle.h"
#include "include/v8-primitive.h"
@@ -121,6 +122,8 @@ class AsyncGC final : public CancelableTask {
InvokeGC(isolate_, ExecutionType::kAsync, type_);
auto resolver = v8::Local<v8::Promise::Resolver>::New(isolate_, resolver_);
auto ctx = Local<v8::Context>::New(isolate_, ctx_);
+ v8::MicrotasksScope microtasks_scope(
+ ctx, v8::MicrotasksScope::kDoNotRunMicrotasks);
resolver->Resolve(ctx, v8::Undefined(isolate_)).ToChecked();
}
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 0127d2a805..ea333762c9 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -82,8 +82,7 @@ void StatisticsExtension::GetCounters(
// clang-format off
const StatisticsCounter counter_list[] = {
#define ADD_COUNTER(name, caption) {counters->name(), #name},
- STATS_COUNTER_LIST_1(ADD_COUNTER)
- STATS_COUNTER_LIST_2(ADD_COUNTER)
+ STATS_COUNTER_LIST(ADD_COUNTER)
STATS_COUNTER_NATIVE_CODE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
}; // End counter_list array.
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index dae03199f1..fcfff23a83 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -42,12 +42,18 @@
#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
static constexpr ctype FLAGDEFAULT_##nam{def};
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
+ static constexpr ctype FLAGDEFAULT_##nam{def};
// We want to write entries into our meta data table, for internal parsing and
-// printing / etc in the flag parser code. We only do this for writable flags.
+// printing / etc in the flag parser code.
#elif defined(FLAG_MODE_META)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
{Flag::TYPE_##ftype, #nam, &v8_flags.nam, &FLAGDEFAULT_##nam, cmt, false},
+// Readonly flags don't pass the value pointer since the struct expects a
+// mutable value. That's okay since the value always equals the default.
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
+ {Flag::TYPE_##ftype, #nam, nullptr, &FLAGDEFAULT_##nam, cmt, false},
#define FLAG_ALIAS(ftype, ctype, alias, nam) \
{Flag::TYPE_##ftype, #alias, &v8_flags.nam, &FLAGDEFAULT_##nam, \
"alias for --" #nam, false}, // NOLINT(whitespace/indent)
@@ -56,20 +62,20 @@
#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
#define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) \
changed |= TriggerImplication(v8_flags.whenflag, #whenflag, \
- &v8_flags.thenflag, value, false);
+ &v8_flags.thenflag, #thenflag, value, false);
// A weak implication will be overwritten by a normal implication or by an
// explicit flag.
#define DEFINE_WEAK_VALUE_IMPLICATION(whenflag, thenflag, value) \
changed |= TriggerImplication(v8_flags.whenflag, #whenflag, \
- &v8_flags.thenflag, value, true);
+ &v8_flags.thenflag, #thenflag, value, true);
#define DEFINE_GENERIC_IMPLICATION(whenflag, statement) \
if (v8_flags.whenflag) statement;
#define DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) \
changed |= TriggerImplication(!v8_flags.whenflag, "!" #whenflag, \
- &v8_flags.thenflag, value, false);
+ &v8_flags.thenflag, #thenflag, value, false);
// We apply a generic macro to the flags.
#elif defined(FLAG_MODE_APPLY)
@@ -181,6 +187,22 @@
//
#define FLAG FLAG_FULL
+// Experimental features.
+// Features that are still considered experimental and which are not ready for
+// fuzz testing should be defined using this macro. The feature will then imply
+// --experimental, which will indicate to the user that they are running an
+// experimental configuration of V8. Experimental features are always disabled
+// by default. When these features mature, the flag should first turn into a
+// regular feature flag (still disabled by default) and then ideally be staged
+// behind (for example) --future before being enabled by default.
+DEFINE_BOOL(experimental, false,
+ "Indicates that V8 is running with experimental features enabled. "
+ "This flag is typically not set explicitly but instead enabled as "
+ "an implication of other flags which enable experimental features.")
+#define DEFINE_EXPERIMENTAL_FEATURE(nam, cmt) \
+ FLAG(BOOL, bool, nam, false, cmt " (experimental)") \
+ DEFINE_IMPLICATION(nam, experimental)
+
// ATTENTION: This is set to true by default in d8. But for API compatibility,
// it generally defaults to false.
DEFINE_BOOL(abort_on_contradictory_flags, false,
@@ -218,8 +240,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_temporal, "Temporal") \
V(harmony_shadow_realm, "harmony ShadowRealm") \
V(harmony_struct, "harmony structs, shared structs, and shared arrays") \
- V(harmony_regexp_unicode_sets, "harmony RegExp Unicode Sets") \
- V(harmony_json_parse_with_source, "harmony json parse with source")
+ V(harmony_array_from_async, "harmony Array.fromAsync") \
+ V(harmony_iterator_helpers, "JavaScript iterator helpers")
#ifdef V8_INTL_SUPPORT
#define HARMONY_INPROGRESS(V) \
@@ -231,11 +253,12 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#endif
// Features that are complete (but still behind the --harmony flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_rab_gsab, \
- "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") \
- V(harmony_array_grouping, "harmony array grouping") \
- V(harmony_change_array_by_copy, "harmony change-Array-by-copy")
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_rab_gsab_transfer, "harmony ArrayBuffer.transfer") \
+ V(harmony_array_grouping, "harmony array grouping") \
+ V(harmony_json_parse_with_source, "harmony json parse with source")
+
+DEFINE_IMPLICATION(harmony_rab_gsab_transfer, harmony_rab_gsab)
#ifdef V8_INTL_SUPPORT
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -244,13 +267,16 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_atomics, "harmony atomics") \
- V(harmony_class_static_blocks, "harmony static initializer blocks") \
- V(harmony_array_find_last, "harmony array find last helpers") \
- V(harmony_import_assertions, "harmony import assertions") \
- V(harmony_symbol_as_weakmap_key, "harmony symbols as weakmap keys")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_atomics, "harmony atomics") \
+ V(harmony_import_assertions, "harmony import assertions") \
+ V(harmony_symbol_as_weakmap_key, "harmony symbols as weakmap keys") \
+ V(harmony_change_array_by_copy, "harmony change-Array-by-copy") \
+ V(harmony_string_is_well_formed, "harmony String#{is,to}WellFormed") \
+ V(harmony_rab_gsab, \
+ "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") \
+ V(harmony_regexp_unicode_sets, "harmony RegExp Unicode Sets")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) \
@@ -264,9 +290,14 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
// and associated tests are moved from the harmony directory to the appropriate
// esN directory.
+//
+// In-progress features are not code complete and are considered experimental,
+// i.e. not ready for fuzz testing.
-#define FLAG_INPROGRESS_FEATURES(id, description) \
- DEFINE_BOOL(id, false, "enable " #description " (in progress)")
+#define FLAG_INPROGRESS_FEATURES(id, description) \
+ DEFINE_BOOL(id, false, \
+ "enable " #description " (in progress / experimental)") \
+ DEFINE_IMPLICATION(id, experimental)
HARMONY_INPROGRESS(FLAG_INPROGRESS_FEATURES)
#undef FLAG_INPROGRESS_FEATURES
@@ -302,12 +333,6 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#define V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL false
#endif
-#ifdef V8_LITE_MODE
-#define V8_LITE_BOOL true
-#else
-#define V8_LITE_BOOL false
-#endif
-
#ifdef V8_ENABLE_LAZY_SOURCE_POSITIONS
#define V8_LAZY_SOURCE_POSITIONS_BOOL true
#else
@@ -331,12 +356,17 @@ DEFINE_BOOL(stress_snapshot, false,
// there (that only happens in mksnapshot and in --stress-snapshot mode).
DEFINE_NEG_IMPLICATION(stress_snapshot, incremental_marking)
-DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
+#ifdef V8_LITE_MODE
+#define V8_LITE_MODE_BOOL true
+#else
+#define V8_LITE_MODE_BOOL false
+#endif
+
+DEFINE_BOOL(lite_mode, V8_LITE_MODE_BOOL,
"enables trade-off of performance for memory savings")
// Lite mode implies other flags to trade-off performance for memory.
DEFINE_IMPLICATION(lite_mode, jitless)
-DEFINE_IMPLICATION(lite_mode, lazy_feedback_allocation)
DEFINE_IMPLICATION(lite_mode, optimize_for_size)
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
@@ -413,30 +443,14 @@ DEFINE_BOOL_READONLY(
DEFINE_BOOL_READONLY(conservative_stack_scanning,
V8_ENABLE_CONSERVATIVE_STACK_SCANNING_BOOL,
"use conservative stack scanning")
+DEFINE_IMPLICATION(conservative_stack_scanning, minor_mc)
+DEFINE_NEG_IMPLICATION(conservative_stack_scanning, compact_with_stack)
#if V8_ENABLE_WEBASSEMBLY
DEFINE_NEG_IMPLICATION(conservative_stack_scanning,
experimental_wasm_stack_switching)
#endif // V8_ENABLE_WEBASSEMBLY
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-#define V8_ENABLE_INNER_POINTER_RESOLUTION_OSB_BOOL true
-#else
-#define V8_ENABLE_INNER_POINTER_RESOLUTION_OSB_BOOL false
-#endif
-DEFINE_BOOL_READONLY(inner_pointer_resolution_osb,
- V8_ENABLE_INNER_POINTER_RESOLUTION_OSB_BOOL,
- "use object start bitmap for IPR")
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_MB
-#define V8_ENABLE_INNER_POINTER_RESOLUTION_MB_BOOL true
-#else
-#define V8_ENABLE_INNER_POINTER_RESOLUTION_MB_BOOL false
-#endif
-DEFINE_BOOL_READONLY(inner_pointer_resolution_mb,
- V8_ENABLE_INNER_POINTER_RESOLUTION_MB_BOOL,
- "use marking bitmap for IPR")
-
#ifdef V8_ENABLE_FUTURE
#define FUTURE_BOOL true
#else
@@ -446,49 +460,85 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_BOOL(lower_tier_as_toptier, false,
- "remove tier-up logic from the top tier")
-
+DEFINE_BOOL(force_emit_interrupt_budget_checks, false,
+ "force emit tier-up logic from all non-turbofan code, even if it "
+ "is the top enabled tier")
#ifdef V8_ENABLE_MAGLEV
#define V8_ENABLE_MAGLEV_BOOL true
DEFINE_BOOL(maglev, false, "enable the maglev optimizing compiler")
-DEFINE_BOOL(maglev_inlining, false,
- "enable inlining in the maglev optimizing compiler")
+DEFINE_WEAK_IMPLICATION(future, maglev)
+DEFINE_EXPERIMENTAL_FEATURE(
+ maglev_future,
+ "enable maglev features that we want to ship in the not-too-far future")
+DEFINE_IMPLICATION(maglev_future, maglev)
+DEFINE_EXPERIMENTAL_FEATURE(maglev_inlining,
+ "enable inlining in the maglev optimizing compiler")
+DEFINE_EXPERIMENTAL_FEATURE(
+ maglev_untagged_phis,
+ "enable phi untagging in the maglev optimizing compiler")
+DEFINE_WEAK_IMPLICATION(maglev_future, maglev_inlining)
+DEFINE_WEAK_IMPLICATION(maglev_future, maglev_untagged_phis)
+
+DEFINE_INT(max_maglev_inline_depth, 1,
+ "max depth of functions that Maglev will inline")
+DEFINE_INT(max_maglev_inlined_bytecode_size, 460,
+ "maximum size of bytecode for a single inlining")
+DEFINE_INT(max_maglev_inlined_bytecode_size_cumulative, 920,
+ "maximum cumulative size of bytecode considered for inlining")
+DEFINE_INT(max_maglev_inlined_bytecode_size_small, 27,
+ "maximum size of bytecode considered for small function inlining")
+DEFINE_FLOAT(min_maglev_inlining_frequency, 0.10,
+ "minimum frequency for inlining")
DEFINE_BOOL(maglev_reuse_stack_slots, true,
"reuse stack slots in the maglev optimizing compiler")
+DEFINE_BOOL(
+ optimize_on_next_call_optimizes_to_maglev, false,
+ "make OptimizeFunctionOnNextCall optimize to maglev instead of turbofan")
+
// We stress maglev by setting a very low interrupt budget for maglev. This
// way, we still gather *some* feedback before compiling optimized code.
DEFINE_BOOL(stress_maglev, false, "trigger maglev compilation earlier")
DEFINE_IMPLICATION(stress_maglev, maglev)
-DEFINE_VALUE_IMPLICATION(stress_maglev, interrupt_budget_for_maglev, 128)
+DEFINE_WEAK_VALUE_IMPLICATION(stress_maglev, invocation_count_for_maglev, 4)
#else
#define V8_ENABLE_MAGLEV_BOOL false
DEFINE_BOOL_READONLY(maglev, false, "enable the maglev optimizing compiler")
+DEFINE_BOOL_READONLY(
+ maglev_future, false,
+ "enable maglev features that we want to ship in the not-too-far future")
+DEFINE_BOOL_READONLY(maglev_inlining, false,
+ "enable inlining in the maglev optimizing compiler")
+DEFINE_BOOL_READONLY(maglev_untagged_phis, false,
+ "enable phi untagging in the maglev optimizing compiler")
DEFINE_BOOL_READONLY(stress_maglev, false, "trigger maglev compilation earlier")
+DEFINE_BOOL_READONLY(
+ optimize_on_next_call_optimizes_to_maglev, false,
+ "make OptimizeFunctionOnNextCall optimize to maglev instead of turbofan")
#endif // V8_ENABLE_MAGLEV
DEFINE_STRING(maglev_filter, "*", "optimization filter for the maglev compiler")
DEFINE_BOOL(maglev_assert, false, "insert extra assertion in maglev code")
+DEFINE_DEBUG_BOOL(maglev_assert_stack_size, true,
+ "insert stack size checks before every IR node")
DEFINE_BOOL(maglev_break_on_entry, false, "insert an int3 on maglev entries")
DEFINE_BOOL(print_maglev_graph, false, "print maglev graph")
+DEFINE_BOOL(print_maglev_deopt_verbose, false, "print verbose deopt info")
DEFINE_BOOL(print_maglev_code, false, "print maglev code")
DEFINE_BOOL(trace_maglev_graph_building, false, "trace maglev graph building")
DEFINE_BOOL(trace_maglev_regalloc, false, "trace maglev register allocation")
+DEFINE_BOOL(trace_maglev_inlining, false, "trace maglev inlining")
+DEFINE_BOOL(trace_maglev_inlining_verbose, false,
+ "trace maglev inlining (verbose)")
+DEFINE_IMPLICATION(trace_maglev_inlining_verbose, trace_maglev_inlining)
// TODO(v8:7700): Remove once stable.
DEFINE_BOOL(maglev_function_context_specialization, true,
"enable function context specialization in maglev")
-DEFINE_BOOL(maglev_ool_prologue, false, "use the Maglev out of line prologue")
#if ENABLE_SPARKPLUG
-DEFINE_WEAK_IMPLICATION(future, sparkplug)
DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
#endif
-#if V8_SHORT_BUILTIN_CALLS
-DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
-#endif
-DEFINE_WEAK_NEG_IMPLICATION(future, write_protect_code_memory)
DEFINE_BOOL_READONLY(dict_property_const_tracking,
V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
@@ -499,14 +549,15 @@ DEFINE_UINT(max_opt, 999,
"> 3 == any, 0 == ignition/interpreter, 1 == sparkplug/baseline, "
"2 == maglev, 3 == turbofan")
+#ifdef V8_ENABLE_TURBOFAN
DEFINE_WEAK_VALUE_IMPLICATION(max_opt < 3, turbofan, false)
+#endif // V8_ENABLE_TURBOFAN
#ifdef V8_ENABLE_MAGLEV
DEFINE_WEAK_VALUE_IMPLICATION(max_opt < 2, maglev, false)
#endif // V8_ENABLE_MAGLEV
#if ENABLE_SPARKPLUG
DEFINE_WEAK_VALUE_IMPLICATION(max_opt < 1, sparkplug, false)
#endif // ENABLE_SPARKPLUG
- //
// Flag to select wasm trace mark type
DEFINE_STRING(
@@ -514,34 +565,40 @@ DEFINE_STRING(
"Select which native code sequence to use for wasm trace instruction: "
"default or cpuid")
-// Flags for jitless
-DEFINE_BOOL(jitless, V8_LITE_BOOL,
+#ifdef V8_JITLESS
+#define V8_JITLESS_BOOL true
+DEFINE_BOOL_READONLY(jitless, true,
+ "Disable runtime allocation of executable memory.")
+#else
+#define V8_JITLESS_BOOL false
+DEFINE_BOOL(jitless, V8_LITE_MODE_BOOL,
"Disable runtime allocation of executable memory.")
-
-DEFINE_WEAK_IMPLICATION(jitless, lower_tier_as_toptier)
+#endif // V8_JITLESS
// Jitless V8 has a few implications:
-DEFINE_NEG_IMPLICATION(jitless, turbofan)
// Field type tracking is only used by TurboFan.
DEFINE_NEG_IMPLICATION(jitless, track_field_types)
-// Regexps are interpreted.
+// No code generation at runtime.
DEFINE_IMPLICATION(jitless, regexp_interpret_all)
+DEFINE_NEG_IMPLICATION(jitless, turbofan)
#if ENABLE_SPARKPLUG
-// No Sparkplug compilation.
DEFINE_NEG_IMPLICATION(jitless, sparkplug)
DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
#endif // ENABLE_SPARKPLUG
#ifdef V8_ENABLE_MAGLEV
-// No Maglev compilation.
DEFINE_NEG_IMPLICATION(jitless, maglev)
#endif // V8_ENABLE_MAGLEV
-
+// Doesn't work without an executable code space.
DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
// TODO(tebbi): Support allocating types from background thread.
DEFINE_NEG_IMPLICATION(assert_types, concurrent_recompilation)
+DEFINE_BOOL(
+ turboshaft_assert_types, false,
+ "generate runtime type assertions to test the turboshaft type system")
+DEFINE_NEG_IMPLICATION(turboshaft_assert_types, concurrent_recompilation)
// Enable verification of SimplifiedLowering in debug builds.
DEFINE_BOOL(verify_simplified_lowering, DEBUG_BOOL,
@@ -577,7 +634,6 @@ DEFINE_BOOL(trace_block_coverage, false,
"trace collected block coverage information")
DEFINE_BOOL(trace_protector_invalidation, false,
"trace protector cell invalidations")
-DEFINE_BOOL(trace_web_snapshot, false, "trace web snapshot deserialization")
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
@@ -602,9 +658,7 @@ DEFINE_INT(interrupt_budget_factor_for_feedback_allocation, 8,
"allocating feedback vectors, used when bytecode size is known")
// Tiering: Maglev.
-// The Maglev interrupt budget is chosen to be roughly 1/10th of Turbofan's
-// overall budget (including the multiple required ticks).
-DEFINE_INT(interrupt_budget_for_maglev, 30 * KB,
+DEFINE_INT(invocation_count_for_maglev, 100,
"interrupt budget which should be used for the profiler counter")
// Tiering: Turbofan.
@@ -616,9 +670,27 @@ DEFINE_INT(ticks_before_optimization, 3,
DEFINE_INT(bytecode_size_allowance_per_tick, 150,
"increases the number of ticks required for optimization by "
"bytecode.length/X")
+DEFINE_INT(invocation_count_for_osr, 500,
+ "number of invocations we want to see after requesting previous "
+ "tier up to increase the OSR urgency")
DEFINE_INT(
max_bytecode_size_for_early_opt, 81,
"Maximum bytecode length for a function to be optimized on the first tick")
+DEFINE_BOOL(global_ic_updated_flag, false,
+ "Track, globally, whether any IC changed, and use this in tierup "
+ "heuristics.")
+DEFINE_INT(minimum_invocations_after_ic_update, 500,
+ "How long to minimally wait after IC update before tier up")
+DEFINE_BOOL(reset_interrupt_on_ic_update, true,
+ "On IC change, reset the interrupt budget for just that function.")
+DEFINE_BOOL(reset_ticks_on_ic_update, true,
+ "On IC change, reset the ticks for just that function.")
+DEFINE_BOOL(maglev_increase_budget_forward_jump, false,
+ "Increase interrupt budget on forward jumps in maglev code")
+DEFINE_WEAK_VALUE_IMPLICATION(maglev, max_bytecode_size_for_early_opt, 0)
+DEFINE_WEAK_VALUE_IMPLICATION(maglev, ticks_before_optimization, 1)
+DEFINE_WEAK_VALUE_IMPLICATION(maglev, bytecode_size_allowance_per_tick, 10000)
+DEFINE_WEAK_VALUE_IMPLICATION(maglev, reset_ticks_on_ic_update, false)
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
@@ -689,7 +761,7 @@ DEFINE_BOOL(baseline_batch_compilation, true, "batch compile Sparkplug code")
DEFINE_BOOL_READONLY(concurrent_sparkplug, false,
"compile Sparkplug code in a background thread")
#else
-DEFINE_BOOL(concurrent_sparkplug, false,
+DEFINE_BOOL(concurrent_sparkplug, ENABLE_SPARKPLUG_BY_DEFAULT,
"compile Sparkplug code in a background thread")
DEFINE_WEAK_IMPLICATION(future, concurrent_sparkplug)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sparkplug)
@@ -697,7 +769,7 @@ DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sparkplug)
DEFINE_NEG_IMPLICATION(jitless, concurrent_sparkplug)
#endif
DEFINE_UINT(
- concurrent_sparkplug_max_threads, 0,
+ concurrent_sparkplug_max_threads, 1,
"max number of threads that concurrent Sparkplug can use (0 for unbounded)")
DEFINE_BOOL(concurrent_sparkplug_high_priority_threads, false,
"use high priority compiler threads for concurrent Sparkplug")
@@ -733,6 +805,9 @@ DEFINE_BOOL(
// forwarding table.
DEFINE_NEG_IMPLICATION(shared_string_table, always_use_string_forwarding_table)
+DEFINE_BOOL(transition_strings_during_gc_with_stack, false,
+ "Transition strings during a full GC with stack")
+
DEFINE_SIZE_T(initial_shared_heap_size, 0,
"initial size of the shared heap (in Mbytes); "
"other heap size flags (e.g. initial_heap_size) take precedence")
@@ -758,6 +833,7 @@ DEFINE_BOOL(
stress_concurrent_inlining, false,
"create additional concurrent optimization jobs but throw away result")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_recompilation)
+DEFINE_IMPLICATION(stress_concurrent_inlining, turbofan)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
@@ -766,7 +842,7 @@ DEFINE_BOOL(maglev_overwrite_budget, false,
DEFINE_WEAK_IMPLICATION(maglev, maglev_overwrite_budget)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, maglev_overwrite_budget)
DEFINE_WEAK_VALUE_IMPLICATION(maglev_overwrite_budget, interrupt_budget,
- 80 * KB)
+ 200 * KB)
DEFINE_BOOL(stress_concurrent_inlining_attach_code, false,
"create additional concurrent optimization jobs")
DEFINE_IMPLICATION(stress_concurrent_inlining_attach_code,
@@ -790,10 +866,17 @@ DEFINE_INT(deopt_every_n_times, 0,
DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points")
// Flags for TurboFan.
+#ifdef V8_ENABLE_TURBOFAN
+#define V8_ENABLE_TURBOFAN_BOOL true
DEFINE_BOOL(turbofan, true, "use the Turbofan optimizing compiler")
// TODO(leszeks): Temporary alias until we make sure all our infra is passing
// --turbofan instead of --opt.
DEFINE_ALIAS_BOOL(opt, turbofan)
+#else
+#define V8_ENABLE_TURBOFAN_BOOL false
+DEFINE_BOOL_READONLY(turbofan, false, "use the Turbofan optimizing compiler")
+DEFINE_BOOL_READONLY(opt, false, "use the Turbofan optimizing compiler")
+#endif // V8_ENABLE_TURBOFAN
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
@@ -883,7 +966,11 @@ DEFINE_BOOL(turbo_inline_array_builtins, true,
"inline array builtins in TurboFan code")
DEFINE_BOOL(use_osr, true, "use on-stack replacement")
DEFINE_BOOL(concurrent_osr, true, "enable concurrent OSR")
-DEFINE_WEAK_IMPLICATION(future, concurrent_osr)
+
+// TODO(dmercadier): re-enable Turbofan's string builder once it's fixed.
+DEFINE_BOOL_READONLY(turbo_string_builder, false,
+ "use TurboFan fast string builder")
+// DEFINE_WEAK_IMPLICATION(future, turbo_string_builder)
DEFINE_BOOL(trace_osr, false, "trace on-stack replacement")
DEFINE_BOOL(log_or_trace_osr, false,
@@ -908,6 +995,8 @@ DEFINE_STRING(
"emit data about basic block usage in builtins to this file "
"(requires that V8 was built with v8_enable_builtins_profiling=true)")
+DEFINE_BOOL(abort_on_bad_builtin_profile_data, false,
+ "flag for mksnapshot, abort if builtins profile can't be applied")
DEFINE_BOOL(
warn_about_builtin_profile_data, false,
"flag for mksnapshot, emit warnings when applying builtin profile data")
@@ -963,23 +1052,17 @@ DEFINE_BOOL(turbo_optimize_apply, true, "optimize Function.prototype.apply")
DEFINE_BOOL(turbo_optimize_math_minmax, true,
"optimize call math.min/max with double array")
-DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
+DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, false,
"enable experimental feedback collection in generic lowering.")
DEFINE_BOOL(isolate_script_cache_ageing, true,
"enable ageing of the isolate script cache.")
-DEFINE_FLOAT(script_delay, 0, "busy wait [ms] on every Script::Run")
-DEFINE_FLOAT(script_delay_once, 0, "busy wait [ms] on the first Script::Run")
-DEFINE_FLOAT(script_delay_fraction, 0.0,
- "busy wait after each Script::Run by the given fraction of the "
- "run's duration")
-
-DEFINE_BOOL(turboshaft, false, "enable TurboFan's Turboshaft phases for JS")
-DEFINE_WEAK_IMPLICATION(future, turboshaft)
+DEFINE_EXPERIMENTAL_FEATURE(turboshaft,
+ "enable TurboFan's Turboshaft phases for JS")
DEFINE_BOOL(turboshaft_trace_reduction, false,
"trace individual Turboshaft reduction steps")
-DEFINE_BOOL(turboshaft_wasm, false,
- "enable TurboFan's Turboshaft phases for wasm")
+DEFINE_EXPERIMENTAL_FEATURE(turboshaft_wasm,
+ "enable TurboFan's Turboshaft phases for wasm")
#ifdef DEBUG
DEFINE_UINT64(turboshaft_opt_bisect_limit, std::numeric_limits<uint64_t>::max(),
"stop applying optional optimizations after a specified number "
@@ -987,6 +1070,11 @@ DEFINE_UINT64(turboshaft_opt_bisect_limit, std::numeric_limits<uint64_t>::max(),
DEFINE_UINT64(turboshaft_opt_bisect_break, std::numeric_limits<uint64_t>::max(),
"abort after a specified number of steps, useful for bisecting "
"optimization bugs")
+DEFINE_BOOL(turboshaft_verify_reductions, false,
+ "check that turboshaft reductions are correct with respect to "
+ "inferred types")
+DEFINE_BOOL(turboshaft_trace_typing, false,
+ "print typing steps of turboshaft type inference")
#endif // DEBUG
// Favor memory over execution speed.
@@ -1010,11 +1098,8 @@ DEFINE_INT(wasm_num_compilation_tasks, 128,
DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
"trace wasm native heap events")
-DEFINE_BOOL(wasm_write_protect_code_memory, true,
- "write protect code memory on the wasm native heap with mprotect")
DEFINE_BOOL(wasm_memory_protection_keys, true,
- "protect wasm code memory with PKU if available (takes precedence "
- "over --wasm-write-protect-code-memory)")
+ "protect wasm code memory with PKU if available")
DEFINE_DEBUG_BOOL(trace_wasm_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
@@ -1029,8 +1114,10 @@ DEFINE_UINT(wasm_max_mem_pages, kMaxUInt32,
"maximum number of 64KiB memory pages per wasm memory")
DEFINE_UINT(wasm_max_table_size, wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
-DEFINE_UINT(wasm_max_code_space, kMaxWasmCodeMB,
+DEFINE_UINT(wasm_max_committed_code_mb, kMaxCommittedWasmCodeMB,
"maximum committed code space for wasm (in MB)")
+DEFINE_UINT(wasm_max_code_space_size_mb, kDefaultMaxWasmCodeSpaceSizeMb,
+ "maximum size of a single wasm code space")
DEFINE_BOOL(wasm_tier_up, true,
"enable tier up to the optimizing compiler (requires --liftoff to "
"have an effect)")
@@ -1039,6 +1126,8 @@ DEFINE_BOOL(wasm_dynamic_tiering, true,
DEFINE_NEG_NEG_IMPLICATION(liftoff, wasm_dynamic_tiering)
DEFINE_INT(wasm_tiering_budget, 1800000,
"budget for dynamic tiering (rough approximation of bytes executed")
+DEFINE_INT(max_wasm_functions, wasm::kV8MaxWasmFunctions,
+ "maximum number of wasm functions supported in a module")
DEFINE_INT(
wasm_caching_threshold, 1000000,
"the amount of wasm top tier code that triggers the next caching event")
@@ -1111,20 +1200,25 @@ DEFINE_STRING(dump_wasm_module_path, nullptr,
// for configurability.
#include "src/wasm/wasm-feature-flags.h"
-#define DECL_WASM_FLAG(feat, desc, val) \
- DEFINE_BOOL(experimental_wasm_##feat, val, \
- "enable prototype " desc " for wasm")
-FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG)
+#define DECL_WASM_FLAG(feat, desc, val) \
+ DEFINE_BOOL(experimental_wasm_##feat, val, "enable " desc " for Wasm")
+#define DECL_EXPERIMENTAL_WASM_FLAG(feat, desc, val) \
+ DEFINE_EXPERIMENTAL_FEATURE(experimental_wasm_##feat, \
+ "enable " desc " for Wasm")
+// Experimental wasm features imply --experimental and get the " (experimental)"
+// suffix.
+FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(DECL_EXPERIMENTAL_WASM_FLAG)
+// Staging and shipped features do not imply --experimental.
+FOREACH_WASM_STAGING_FEATURE_FLAG(DECL_WASM_FLAG)
+FOREACH_WASM_SHIPPED_FEATURE_FLAG(DECL_WASM_FLAG)
#undef DECL_WASM_FLAG
+#undef DECL_EXPERIMENTAL_WASM_FLAG
DEFINE_IMPLICATION(experimental_wasm_gc, experimental_wasm_typed_funcref)
DEFINE_IMPLICATION(experimental_wasm_stack_switching,
experimental_wasm_type_reflection)
-DEFINE_BOOL(wasm_gc_structref_as_dataref, true,
- "compatibility mode: Treat structref as dataref")
-
DEFINE_BOOL(wasm_staging, false, "enable staged wasm features")
#define WASM_STAGING_IMPLICATION(feat, desc, val) \
@@ -1149,21 +1243,30 @@ DEFINE_BOOL(wasm_math_intrinsics, true,
DEFINE_BOOL(
wasm_inlining, false,
"enable inlining of wasm functions into wasm functions (experimental)")
-DEFINE_SIZE_T(wasm_inlining_budget, 9000,
+DEFINE_SIZE_T(wasm_inlining_budget, 5000,
"maximum graph size (in TF nodes) that allows inlining more")
+DEFINE_SIZE_T(wasm_inlining_max_size, 500,
+ "maximum function size (in wire bytes) that may be inlined")
DEFINE_BOOL(wasm_speculative_inlining, false,
"enable speculative inlining of call_ref targets (experimental)")
DEFINE_BOOL(trace_wasm_inlining, false, "trace wasm inlining")
DEFINE_BOOL(trace_wasm_speculative_inlining, false,
"trace wasm speculative inlining")
DEFINE_BOOL(trace_wasm_typer, false, "trace wasm typer")
+DEFINE_BOOL(wasm_final_types, false,
+ "enable final types as default for wasm-gc")
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining)
DEFINE_WEAK_IMPLICATION(experimental_wasm_gc, wasm_speculative_inlining)
+// For historical reasons, both --wasm-inlining and --wasm-speculative-inlining
+// are aliases for --experimental-wasm-inlining.
+DEFINE_IMPLICATION(wasm_inlining, experimental_wasm_inlining)
+DEFINE_IMPLICATION(wasm_speculative_inlining, experimental_wasm_inlining)
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
-DEFINE_BOOL(wasm_loop_peeling, false, "enable loop peeling for wasm functions")
+DEFINE_BOOL(wasm_loop_peeling, true, "enable loop peeling for wasm functions")
DEFINE_SIZE_T(wasm_loop_peeling_max_size, 1000, "maximum size for peeling")
+DEFINE_BOOL(trace_wasm_loop_peeling, false, "trace wasm loop peeling")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
@@ -1174,12 +1277,8 @@ DEFINE_BOOL(print_wasm_stub_code, false, "print WebAssembly stub code")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
"enable lazy compilation for asm-wasm modules")
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
-DEFINE_BOOL(wasm_lazy_compilation, false,
+DEFINE_BOOL(wasm_lazy_compilation, true,
"enable lazy compilation for all wasm modules")
-DEFINE_WEAK_IMPLICATION(future, wasm_lazy_compilation)
-// Write protect code causes too much overhead for lazy compilation.
-DEFINE_WEAK_NEG_IMPLICATION(wasm_lazy_compilation,
- wasm_write_protect_code_memory)
DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
DEFINE_BOOL(wasm_lazy_validation, false,
@@ -1200,6 +1299,8 @@ DEFINE_SIZE_T(wasm_disassembly_max_mb, 1000,
"maximum size of produced disassembly (in MB, approximate)")
DEFINE_BOOL(trace_wasm, false, "trace wasm function calls")
+// Inlining breaks --trace-wasm, hence disable that if --trace-wasm is enabled.
+DEFINE_NEG_IMPLICATION(trace_wasm, experimental_wasm_inlining)
// Flags for Wasm GDB remote debugging.
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
@@ -1219,6 +1320,13 @@ DEFINE_BOOL(trace_wasm_gdb_remote, false, "trace Webassembly GDB-remote server")
DEFINE_DEBUG_BOOL(trace_wasm_instances, false,
"trace creation and collection of wasm instances")
+// Flags for WASM SIMD256 revectorize
+#ifdef V8_ENABLE_WASM_SIMD256_REVEC
+DEFINE_BOOL(experimental_wasm_revectorize, false,
+ "enable 128 to 256 bit revectorization for Webassembly SIMD")
+DEFINE_BOOL(trace_wasm_revectorize, false, "trace wasm revectorize")
+#endif // V8_ENABLE_WASM_SIMD256_REVEC
+
#endif // V8_ENABLE_WEBASSEMBLY
DEFINE_INT(stress_sampling_allocation_profiler, 0,
@@ -1247,11 +1355,7 @@ DEFINE_BOOL(huge_max_old_generation_size, true,
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_BOOL(separate_gc_phases, false,
"young and full garbage collection phases are not overlapping")
-DEFINE_BOOL(global_gc_scheduling, true,
- "enable GC scheduling based on global memory")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
-DEFINE_BOOL(shared_space, false,
- "Implement shared heap as shared space on a main isolate.")
// TODO(12950): The next two flags only have an effect if
// V8_ENABLE_ALLOCATION_TIMEOUT is set, so we should only define them in that
@@ -1325,25 +1429,29 @@ DEFINE_INT(incremental_marking_hard_trigger, 0,
"threshold for starting incremental marking immediately in percent "
"of available space: limit - size")
DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
-DEFINE_INT(minor_mc_task_trigger, 80,
- "minormc task trigger in percent of the current heap limit")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
-DEFINE_BOOL(scavenge_task, true, "schedule scavenge tasks")
-DEFINE_INT(scavenge_task_trigger, 80,
- "scavenge task trigger in percent of the current heap limit")
+DEFINE_BOOL(minor_gc_task, true, "schedule scavenge tasks")
+DEFINE_INT(minor_gc_task_trigger, 80,
+ "minor GC task trigger in percent of the current heap limit")
DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
-DEFINE_BOOL(cppgc_young_generation, false,
- "run young generation garbage collections in Oilpan")
-DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
+DEFINE_EXPERIMENTAL_FEATURE(
+ cppgc_young_generation,
+ "run young generation garbage collections in Oilpan")
+// CppGC young generation (enables unified young heap) is based on Minor MC.
+DEFINE_IMPLICATION(cppgc_young_generation, minor_mc)
+// Unified young generation disables the unmodified wrapper reclamation
+// optimization.
+DEFINE_NEG_IMPLICATION(cppgc_young_generation, reclaim_unmodified_wrappers)
+DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
#if defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
-#define V8_CONCURRENT_MARKING_BOOL true
+DEFINE_BOOL(concurrent_marking, true, "use concurrent marking")
#else
-#define V8_CONCURRENT_MARKING_BOOL false
+// Concurrent marking cannot be used without atomic object field loads and
+// stores.
+DEFINE_BOOL(concurrent_marking, false, "use concurrent marking")
#endif
-DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
- "use concurrent marking")
DEFINE_INT(
concurrent_marking_max_worker_num, 7,
"max worker number of concurrent marking, 0 for NumberOfWorkerThreads")
@@ -1351,13 +1459,14 @@ DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers")
DEFINE_BOOL(stress_concurrent_allocation, false,
"start background threads that allocate memory")
-DEFINE_BOOL(parallel_marking, V8_CONCURRENT_MARKING_BOOL,
- "use parallel marking in atomic pause")
+DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
"ephemeron algorithm")
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
+DEFINE_NEG_NEG_IMPLICATION(concurrent_sweeping,
+ concurrent_array_buffer_sweeping)
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, true,
"use parallel pointer update during compaction")
@@ -1419,6 +1528,8 @@ DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
DEFINE_BOOL(memory_reducer, true, "use memory reducer")
DEFINE_BOOL(memory_reducer_for_small_heaps, true,
"use memory reducer for small heaps")
+DEFINE_BOOL(memory_reducer_single_gc, false,
+ "only schedule a single GC from memory reducer")
DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
@@ -1434,6 +1545,8 @@ DEFINE_BOOL(compact_with_stack, true,
DEFINE_BOOL(
compact_code_space_with_stack, true,
"Perform code space compaction when finalizing a full GC with stack")
+DEFINE_BOOL(shortcut_strings_with_stack, true,
+ "Shortcut Strings during GC with stack")
DEFINE_BOOL(stress_compaction, false,
"Stress GC compaction to flush out bugs (implies "
"--force_marking_deque_overflows)")
@@ -1500,6 +1613,7 @@ DEFINE_BOOL(crash_on_aborted_evacuation, false,
DEFINE_BOOL(cppheap_incremental_marking, false,
"use incremental marking for CppHeap")
DEFINE_NEG_NEG_IMPLICATION(incremental_marking, cppheap_incremental_marking)
+DEFINE_NEG_NEG_IMPLICATION(incremental_marking, memory_reducer)
DEFINE_WEAK_IMPLICATION(incremental_marking, cppheap_incremental_marking)
DEFINE_BOOL(cppheap_concurrent_marking, false,
"use concurrent marking for CppHeap")
@@ -1670,7 +1784,10 @@ DEFINE_BOOL(always_turbofan, false, "always try to optimize functions")
DEFINE_IMPLICATION(always_turbofan, turbofan)
DEFINE_BOOL(always_osr, false, "always try to OSR functions")
DEFINE_BOOL(prepare_always_turbofan, false, "prepare for turning on always opt")
-DEFINE_BOOL(deopt_to_baseline, ENABLE_SPARKPLUG,
+// On Arm64, every entry point in a function needs a BTI landing pad
+// instruction. Deopting to baseline means every bytecode is a potential entry
+// point, which increases codesize significantly.
+DEFINE_BOOL(deopt_to_baseline, false,
"deoptimize to baseline code when available")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
@@ -1712,11 +1829,6 @@ DEFINE_NEG_IMPLICATION(fuzzing, hard_abort)
DEFINE_BOOL(experimental_value_unavailable, false,
"enable experimental <value unavailable> in scopes")
-DEFINE_BOOL(experimental_reuse_locals_blocklists, true,
- "enable reuse of local blocklists across multiple debug-evaluates")
-
-DEFINE_BOOL(experimental_remove_internal_scopes_property, false,
- "don't report the artificial [[Scopes]] property for functions")
// disassembler
DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
@@ -1781,7 +1893,7 @@ DEFINE_BOOL(native_code_counters, DEBUG_BOOL,
DEFINE_BOOL(super_ic, true, "use an IC for super property loads")
-DEFINE_BOOL(enable_mega_dom_ic, false, "use MegaDOM IC state for API objects")
+DEFINE_BOOL(mega_dom_ic, false, "use MegaDOM IC state for API objects")
// objects.cc
DEFINE_BOOL(trace_prototype_users, false,
@@ -1875,6 +1987,26 @@ DEFINE_BOOL(experimental_flush_embedded_blob_icache, true,
DEFINE_BOOL(short_builtin_calls, V8_SHORT_BUILTIN_CALLS_BOOL,
"Put embedded builtins code into the code range for shorter "
"builtin calls/jumps if system has >=4GB memory")
+DEFINE_BOOL(trace_code_range_allocation, false,
+ "Trace code range allocation process.")
+
+#ifdef V8_TARGET_OS_CHROMEOS
+#define V8_TARGET_OS_CHROMEOS_BOOL true
+#else
+#define V8_TARGET_OS_CHROMEOS_BOOL false
+#endif // V8_TARGET_OS_CHROMEOS
+
+// TODO(1417652): Enable on ChromeOS once the issue is fixed.
+DEFINE_BOOL(
+ better_code_range_allocation,
+ V8_EXTERNAL_CODE_SPACE_BOOL&& COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL &&
+ !V8_TARGET_OS_CHROMEOS_BOOL,
+ "This mode tries harder to allocate code range near .text section. "
+ "Works only for configurations with external code space and "
+ "shared pointer compression cage.")
+DEFINE_BOOL(abort_on_far_code_range, false,
+ "Abort if code range is allocated further away than 4GB from the"
+ ".text section")
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1955,11 +2087,22 @@ DEFINE_BOOL(
"test runner turns on this flag to enable a check that the function was "
"prepared for optimization before marking it for optimization")
+DEFINE_EXPERIMENTAL_FEATURE(
+ strict_termination_checks,
+ "Enable strict terminating DCHECKs to prevent accidentally "
+ "keeping on executing JS after terminating V8.")
+
DEFINE_BOOL(
fuzzing, false,
"Fuzzers use this flag to signal that they are ... fuzzing. This causes "
"intrinsics to fail silently (e.g. return undefined) on invalid usage.")
+// When fuzzing, always compile functions twice and ensure that the generated
+// bytecode is the same. This can help find bugs such as crbug.com/1394403 as it
+// avoids the need for bytecode aging to kick in to trigger the recomplication.
+DEFINE_WEAK_NEG_IMPLICATION(fuzzing, lazy)
+DEFINE_WEAK_IMPLICATION(fuzzing, stress_lazy_source_positions)
+
#if defined(V8_OS_AIX) && defined(COMPONENT_BUILD)
// FreezeFlags relies on mprotect() method, which does not work by default on
// shared mem: https://www.ibm.com/docs/en/aix/7.2?topic=m-mprotect-subroutine
@@ -1976,6 +2119,9 @@ DEFINE_STRING(embedded_src, nullptr,
DEFINE_STRING(
embedded_variant, nullptr,
"Label to disambiguate symbols in embedded data file. (mksnapshot only)")
+DEFINE_STRING(static_roots_src, nullptr,
+ "Path for writing a fresh static-roots.h. (mksnapshot only, "
+ "build without static roots only)")
DEFINE_STRING(startup_src, nullptr,
"Write V8 startup as C++ src. (mksnapshot only)")
DEFINE_STRING(startup_blob, nullptr,
@@ -2001,11 +2147,12 @@ DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool)
//
DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
-DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
+DEFINE_EXPERIMENTAL_FEATURE(minor_mc,
+ "perform young generation mark compact GCs")
DEFINE_IMPLICATION(minor_mc, separate_gc_phases)
-DEFINE_BOOL(concurrent_minor_mc_marking, false,
- "perform young generation marking concurrently")
+DEFINE_EXPERIMENTAL_FEATURE(concurrent_minor_mc_marking,
+ "perform young generation marking concurrently")
DEFINE_NEG_NEG_IMPLICATION(concurrent_marking, concurrent_minor_mc_marking)
//
@@ -2200,23 +2347,19 @@ DEFINE_PERF_PROF_BOOL(
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
// TODO(v8:8462) Remove implication once perf supports remapping.
DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
-#if V8_ENABLE_WEBASSEMBLY
-DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
-#endif // V8_ENABLE_WEBASSEMBLY
// --perf-prof-unwinding-info is available only on selected architectures.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
-#undef DEFINE_PERF_PROF_BOOL
-#define DEFINE_PERF_PROF_BOOL(nam, cmt) DEFINE_BOOL_READONLY(nam, false, cmt)
-#undef DEFINE_PERF_PROF_IMPLICATION
-#define DEFINE_PERF_PROF_IMPLICATION(...)
-#endif
-
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
+ V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_PPC64
DEFINE_PERF_PROF_BOOL(
perf_prof_unwinding_info,
"Enable unwinding info for perf linux profiler (experimental).")
DEFINE_PERF_PROF_IMPLICATION(perf_prof, perf_prof_unwinding_info)
+#else
+DEFINE_BOOL_READONLY(
+ perf_prof_unwinding_info, false,
+ "Enable unwinding info for perf linux profiler (experimental).")
+#endif
#undef DEFINE_PERF_PROF_BOOL
#undef DEFINE_PERF_PROF_IMPLICATION
@@ -2347,11 +2490,9 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_array_buffer_sweeping)
DEFINE_NEG_IMPLICATION(single_threaded_gc, stress_concurrent_allocation)
DEFINE_NEG_IMPLICATION(single_threaded_gc, cppheap_concurrent_marking)
-// Web snapshots: 1) expose WebSnapshot.* API 2) interpret scripts as web
-// snapshots if they start with a magic number.
-// TODO(v8:11525): Remove this flag once proper embedder integration is done.
-DEFINE_BOOL(experimental_web_snapshots, false, "enable Web Snapshots")
-DEFINE_NEG_IMPLICATION(experimental_web_snapshots, script_streaming)
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+DEFINE_BOOL(use_libm_trig_functions, true, "use libm trig functions")
+#endif
#undef FLAG
diff --git a/deps/v8/src/flags/flags.cc b/deps/v8/src/flags/flags.cc
index ab66eca43b..e41b71f85e 100644
--- a/deps/v8/src/flags/flags.cc
+++ b/deps/v8/src/flags/flags.cc
@@ -44,6 +44,7 @@ static_assert(sizeof(FlagValues) % kMinimumOSPageSize == 0);
// Define all of our flags default values.
#define FLAG_MODE_DEFINE_DEFAULTS
#include "src/flags/flag-definitions.h" // NOLINT(build/include)
+#undef FLAG_MODE_DEFINE_DEFAULTS
namespace {
@@ -91,6 +92,10 @@ struct Flag {
enum class SetBy { kDefault, kWeakImplication, kImplication, kCommandLine };
+ constexpr bool IsAnyImplication(Flag::SetBy set_by) {
+ return set_by == SetBy::kWeakImplication || set_by == SetBy::kImplication;
+ }
+
FlagType type_; // What type of flag, bool, int, or string.
const char* name_; // Name of the flag, ex "my_flag".
void* valptr_; // Pointer to the global flag variable.
@@ -178,39 +183,44 @@ struct Flag {
}
}
+ template <typename T>
+ T GetDefaultValue() const {
+ return *reinterpret_cast<const T*>(defptr_);
+ }
+
bool bool_default() const {
DCHECK_EQ(TYPE_BOOL, type_);
- return *reinterpret_cast<const bool*>(defptr_);
+ return GetDefaultValue<bool>();
}
int int_default() const {
DCHECK_EQ(TYPE_INT, type_);
- return *reinterpret_cast<const int*>(defptr_);
+ return GetDefaultValue<int>();
}
unsigned int uint_default() const {
DCHECK_EQ(TYPE_UINT, type_);
- return *reinterpret_cast<const unsigned int*>(defptr_);
+ return GetDefaultValue<unsigned int>();
}
uint64_t uint64_default() const {
DCHECK_EQ(TYPE_UINT64, type_);
- return *reinterpret_cast<const uint64_t*>(defptr_);
+ return GetDefaultValue<uint64_t>();
}
double float_default() const {
DCHECK_EQ(TYPE_FLOAT, type_);
- return *reinterpret_cast<const double*>(defptr_);
+ return GetDefaultValue<double>();
}
size_t size_t_default() const {
DCHECK_EQ(TYPE_SIZE_T, type_);
- return *reinterpret_cast<const size_t*>(defptr_);
+ return GetDefaultValue<size_t>();
}
const char* string_default() const {
DCHECK_EQ(TYPE_STRING, type_);
- return *reinterpret_cast<const char* const*>(defptr_);
+ return GetDefaultValue<const char*>();
}
static bool ShouldCheckFlagContradictions() {
@@ -244,6 +254,19 @@ struct Flag {
MSVC_SUPPRESS_WARNING(4722)
~FatalError() { FATAL("%s.\n%s", str().c_str(), kHint); }
};
+ // Readonly flags cannot change value.
+ if (change_flag && IsReadOnly()) {
+ // Exit instead of abort for certain testing situations.
+ if (v8_flags.exit_on_contradictory_flags) base::OS::ExitProcess(0);
+ if (implied_by == nullptr) {
+ FatalError{} << "Contradictory value for readonly flag "
+ << FlagName{name()};
+ } else {
+ DCHECK(IsAnyImplication(new_set_by));
+ FatalError{} << "Contradictory value for readonly flag "
+ << FlagName{name()} << " implied by " << implied_by;
+ }
+ }
// For bool flags, we only check for a conflict if the value actually
// changes. So specifying the same flag with the same value multiple times
// is allowed.
@@ -302,28 +325,39 @@ struct Flag {
break;
}
}
+ if (change_flag && IsReadOnly()) {
+ // Readonly flags must never change value.
+ return false;
+ }
set_by_ = new_set_by;
- if (new_set_by == SetBy::kImplication ||
- new_set_by == SetBy::kWeakImplication) {
+ if (IsAnyImplication(new_set_by)) {
DCHECK_NOT_NULL(implied_by);
implied_by_ = implied_by;
}
return change_flag;
}
+ bool IsReadOnly() const {
+ // See the FLAG_READONLY definition for FLAG_MODE_META.
+ return valptr_ == nullptr;
+ }
+
template <FlagType flag_type, typename T>
T GetValue() const {
DCHECK_EQ(flag_type, type_);
+ if (IsReadOnly()) return GetDefaultValue<T>();
return *reinterpret_cast<const FlagValue<T>*>(valptr_);
}
template <FlagType flag_type, typename T>
void SetValue(T new_value, SetBy set_by) {
DCHECK_EQ(flag_type, type_);
- auto* flag_value = reinterpret_cast<FlagValue<T>*>(valptr_);
- bool change_flag = flag_value->value() != new_value;
+ bool change_flag = GetValue<flag_type, T>() != new_value;
change_flag = CheckFlagChange(set_by, change_flag);
- if (change_flag) *flag_value = new_value;
+ if (change_flag) {
+ DCHECK(!IsReadOnly());
+ *reinterpret_cast<FlagValue<T>*>(valptr_) = new_value;
+ }
}
// Compare this flag's current value against the default.
@@ -395,6 +429,7 @@ struct Flag {
Flag flags[] = {
#define FLAG_MODE_META
#include "src/flags/flag-definitions.h" // NOLINT(build/include)
+#undef FLAG_MODE_META
};
constexpr size_t kNumFlags = arraysize(flags);
@@ -851,10 +886,11 @@ class ImplicationProcessor {
// Called from {DEFINE_*_IMPLICATION} in flag-definitions.h.
template <class T>
bool TriggerImplication(bool premise, const char* premise_name,
- FlagValue<T>* conclusion_value, T value,
+ FlagValue<T>* conclusion_value,
+ const char* conclusion_name, T value,
bool weak_implication) {
if (!premise) return false;
- Flag* conclusion_flag = FindFlagByPointer(conclusion_value);
+ Flag* conclusion_flag = FindFlagByName(conclusion_name);
if (!conclusion_flag->CheckFlagChange(
weak_implication ? Flag::SetBy::kWeakImplication
: Flag::SetBy::kImplication,
@@ -873,6 +909,30 @@ class ImplicationProcessor {
return true;
}
+ // Called from {DEFINE_*_IMPLICATION} in flag-definitions.h, when the
+ // conclusion flag is read-only (note this is the const overload of the
+ // function just above).
+ template <class T>
+ bool TriggerImplication(bool premise, const char* premise_name,
+ const FlagValue<T>* conclusion_value,
+ const char* conclusion_name, T value,
+ bool weak_implication) {
+ if (!premise) return false;
+ Flag* conclusion_flag = FindFlagByName(conclusion_name);
+ // Because this is the `const FlagValue*` overload:
+ DCHECK(conclusion_flag->IsReadOnly());
+ if (!conclusion_flag->CheckFlagChange(
+ weak_implication ? Flag::SetBy::kWeakImplication
+ : Flag::SetBy::kImplication,
+ conclusion_value->value() != value, premise_name)) {
+ return false;
+ }
+ // Must equal the default value, otherwise CheckFlagChange should've
+ // returned false.
+ DCHECK_EQ(value, conclusion_flag->GetDefaultValue<T>());
+ return true;
+ }
+
void CheckForCycle() {
// Make sure flag implications reach a fixed point within
// {kMaxNumIterations} iterations.
diff --git a/deps/v8/src/flags/flags.h b/deps/v8/src/flags/flags.h
index 690492f078..18446c78bf 100644
--- a/deps/v8/src/flags/flags.h
+++ b/deps/v8/src/flags/flags.h
@@ -66,6 +66,7 @@ struct alignas(kMinimumOSPageSize) FlagValues {
#define FLAG_MODE_DECLARE
#include "src/flags/flag-definitions.h" // NOLINT(build/include)
+#undef FLAG_MODE_DECLARE
};
V8_EXPORT_PRIVATE extern FlagValues v8_flags;
diff --git a/deps/v8/src/handles/global-handles-inl.h b/deps/v8/src/handles/global-handles-inl.h
index 1f86e2dcb4..1017d9437a 100644
--- a/deps/v8/src/handles/global-handles-inl.h
+++ b/deps/v8/src/handles/global-handles-inl.h
@@ -27,6 +27,14 @@ T GlobalHandleVector<T>::Pop() {
return obj;
}
+template <typename T>
+GlobalHandleVector<T>::GlobalHandleVector(LocalHeap* local_heap)
+ : GlobalHandleVector(local_heap->AsHeap()) {}
+
+template <typename T>
+GlobalHandleVector<T>::GlobalHandleVector(Heap* heap)
+ : locations_(StrongRootBlockAllocator(heap)) {}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 66b9cff80e..3cacf0a3a7 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -14,13 +14,16 @@
#include "src/base/compiler-specific.h"
#include "src/base/logging.h"
#include "src/base/sanitizer/asan.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/base/stack.h"
-#include "src/heap/embedder-tracing.h"
+#include "src/heap/gc-tracer-inl.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap-write-barrier.h"
+#include "src/heap/local-heap.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
@@ -698,7 +701,7 @@ V8_INLINE bool GlobalHandles::ResetWeakNodeIfDead(
case WeaknessType::kCallback:
V8_FALLTHROUGH;
case WeaknessType::kCallbackWithTwoEmbedderFields:
- node->CollectPhantomCallbackData(&regular_pending_phantom_callbacks_);
+ node->CollectPhantomCallbackData(&pending_phantom_callbacks_);
break;
}
return true;
@@ -728,27 +731,35 @@ void GlobalHandles::ProcessWeakYoungObjects(
if (node->IsWeakRetainer() &&
!ResetWeakNodeIfDead(node, should_reset_handle)) {
- // Node is weak and alive, so it should be passed onto the visitor.
- v->VisitRootPointer(Root::kGlobalHandles, node->label(),
- node->location());
+ // Node is weak and alive, so it should be passed onto the visitor if
+ // present.
+ if (v) {
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
+ }
}
}
}
void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
+ DCHECK(AllowJavascriptExecution::IsAllowed(isolate()));
+ DCHECK(AllowGarbageCollection::IsAllowed());
+
if (second_pass_callbacks_.empty()) return;
- GCCallbacksScope scope(isolate()->heap());
// The callbacks may execute JS, which in turn may lead to another GC run.
// If we are already processing the callbacks, we do not want to start over
// from within the inner GC. Newly added callbacks will always be run by the
// outermost GC run only.
+ GCCallbacksScope scope(isolate()->heap());
if (scope.CheckReenter()) {
TRACE_EVENT0("v8", "V8.GCPhantomHandleProcessingCallback");
isolate()->heap()->CallGCPrologueCallbacks(
- GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags,
+ GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
{
- AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(isolate_->heap()->tracer(),
+ GCTracer::Scope::HEAP_EXTERNAL_SECOND_PASS_CALLBACKS);
while (!second_pass_callbacks_.empty()) {
auto callback = second_pass_callbacks_.back();
second_pass_callbacks_.pop_back();
@@ -756,7 +767,8 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
}
}
isolate()->heap()->CallGCEpilogueCallbacks(
- GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags,
+ GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
}
}
@@ -809,35 +821,35 @@ void GlobalHandles::ClearListOfYoungNodes() {
ClearListOfYoungNodesImpl(isolate_, &young_nodes_);
}
-template <typename T>
-size_t GlobalHandles::InvokeFirstPassWeakCallbacks(
- std::vector<std::pair<T*, PendingPhantomCallback>>* pending) {
- size_t freed_nodes = 0;
- std::vector<std::pair<T*, PendingPhantomCallback>> pending_phantom_callbacks;
- pending_phantom_callbacks.swap(*pending);
- {
- // The initial pass callbacks must simply clear the nodes.
- for (auto& pair : pending_phantom_callbacks) {
- T* node = pair.first;
- DCHECK_EQ(T::NEAR_DEATH, node->state());
- pair.second.Invoke(isolate(), PendingPhantomCallback::kFirstPass);
-
- // Transition to second pass. It is required that the first pass callback
- // resets the handle using |v8::PersistentBase::Reset|. Also see comments
- // on |v8::WeakCallbackInfo|.
- CHECK_WITH_MSG(T::FREE == node->state(),
- "Handle not reset in first callback. See comments on "
- "|v8::WeakCallbackInfo|.");
-
- if (pair.second.callback()) second_pass_callbacks_.push_back(pair.second);
- freed_nodes++;
- }
- }
- return freed_nodes;
-}
-
size_t GlobalHandles::InvokeFirstPassWeakCallbacks() {
- return InvokeFirstPassWeakCallbacks(&regular_pending_phantom_callbacks_);
+ last_gc_custom_callbacks_ = 0;
+ if (pending_phantom_callbacks_.empty()) return 0;
+
+ TRACE_GC(isolate()->heap()->tracer(),
+ GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+
+ size_t freed_nodes = 0;
+ std::vector<std::pair<Node*, PendingPhantomCallback>>
+ pending_phantom_callbacks;
+ pending_phantom_callbacks.swap(pending_phantom_callbacks_);
+ // The initial pass callbacks must simply clear the nodes.
+ for (auto& pair : pending_phantom_callbacks) {
+ Node* node = pair.first;
+ DCHECK_EQ(Node::NEAR_DEATH, node->state());
+ pair.second.Invoke(isolate(), PendingPhantomCallback::kFirstPass);
+
+ // Transition to second pass. It is required that the first pass callback
+ // resets the handle using |v8::PersistentBase::Reset|. Also see comments
+ // on |v8::WeakCallbackInfo|.
+ CHECK_WITH_MSG(Node::FREE == node->state(),
+ "Handle not reset in first callback. See comments on "
+ "|v8::WeakCallbackInfo|.");
+
+ if (pair.second.callback()) second_pass_callbacks_.push_back(pair.second);
+ freed_nodes++;
+ }
+ last_gc_custom_callbacks_ = freed_nodes;
+ return 0;
}
void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate,
@@ -854,35 +866,35 @@ void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate,
}
void GlobalHandles::PostGarbageCollectionProcessing(
- GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
+ v8::GCCallbackFlags gc_callback_flags) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
DCHECK_EQ(Heap::NOT_IN_GC, isolate_->heap()->gc_state());
+ if (second_pass_callbacks_.empty()) return;
+
const bool synchronous_second_pass =
v8_flags.optimize_for_size || v8_flags.predictable ||
isolate_->heap()->IsTearingDown() ||
(gc_callback_flags &
(kGCCallbackFlagForced | kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagSynchronousPhantomCallbackProcessing)) != 0;
-
if (synchronous_second_pass) {
InvokeSecondPassPhantomCallbacks();
return;
}
- if (second_pass_callbacks_.empty() || second_pass_callbacks_task_posted_)
- return;
-
- second_pass_callbacks_task_posted_ = true;
- V8::GetCurrentPlatform()
- ->GetForegroundTaskRunner(reinterpret_cast<v8::Isolate*>(isolate()))
- ->PostTask(MakeCancelableTask(isolate(), [this] {
- DCHECK(second_pass_callbacks_task_posted_);
- second_pass_callbacks_task_posted_ = false;
- InvokeSecondPassPhantomCallbacks();
- }));
+ if (!second_pass_callbacks_task_posted_) {
+ second_pass_callbacks_task_posted_ = true;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(reinterpret_cast<v8::Isolate*>(isolate()))
+ ->PostTask(MakeCancelableTask(isolate(), [this] {
+ DCHECK(second_pass_callbacks_task_posted_);
+ second_pass_callbacks_task_posted_ = false;
+ InvokeSecondPassPhantomCallbacks();
+ }));
+ }
}
void GlobalHandles::IterateStrongRoots(RootVisitor* v) {
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index e9fb843692..5026e75b93 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -83,8 +83,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
void InvokeSecondPassPhantomCallbacks();
// Schedule or invoke second pass weak callbacks.
- void PostGarbageCollectionProcessing(
- GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
+ void PostGarbageCollectionProcessing(v8::GCCallbackFlags gc_callback_flags);
void IterateStrongRoots(RootVisitor* v);
void IterateWeakRoots(RootVisitor* v);
@@ -104,9 +103,9 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
// Iterates over strong and dependent handles. See the note above.
void IterateYoungStrongAndDependentRoots(RootVisitor* v);
- // Processes all young weak objects. Weak objects for which
- // `should_reset_handle()` returns true are reset and others are passed to the
- // visitor `v`.
+ // Processes all young weak objects:
+ // - Weak objects for which `should_reset_handle()` returns true are reset;
+ // - Others are passed to `v` iff `v` is not null.
void ProcessWeakYoungObjects(RootVisitor* v,
WeakSlotCallbackWithHeap should_reset_handle);
@@ -122,6 +121,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
size_t UsedSize() const;
// Number of global handles.
size_t handles_count() const;
+ size_t last_gc_custom_callbacks() const { return last_gc_custom_callbacks_; }
void IterateAllRootsForTesting(v8::PersistentHandleVisitor* v);
@@ -130,6 +130,8 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
void Print();
#endif // DEBUG
+ bool HasYoung() const { return !young_nodes_.empty(); }
+
private:
// Internal node structures.
class Node;
@@ -139,10 +141,6 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
class NodeSpace;
class PendingPhantomCallback;
- template <typename T>
- size_t InvokeFirstPassWeakCallbacks(
- std::vector<std::pair<T*, PendingPhantomCallback>>* pending);
-
void ApplyPersistentHandleVisitor(v8::PersistentHandleVisitor* visitor,
Node* node);
@@ -160,9 +158,10 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
// is accessed, some of the objects may have been promoted already.
std::vector<Node*> young_nodes_;
std::vector<std::pair<Node*, PendingPhantomCallback>>
- regular_pending_phantom_callbacks_;
+ pending_phantom_callbacks_;
std::vector<PendingPhantomCallback> second_pass_callbacks_;
bool second_pass_callbacks_task_posted_ = false;
+ size_t last_gc_custom_callbacks_ = 0;
};
class GlobalHandles::PendingPhantomCallback final {
@@ -248,14 +247,18 @@ class GlobalHandleVector {
return *this;
}
Handle<T> operator*() { return Handle<T>(&*it_); }
- bool operator!=(Iterator& that) { return it_ != that.it_; }
+ bool operator==(const Iterator& that) const { return it_ == that.it_; }
+ bool operator!=(const Iterator& that) const { return it_ != that.it_; }
+
+ T raw() { return T::cast(Object(*it_)); }
private:
std::vector<Address, StrongRootBlockAllocator>::iterator it_;
};
- explicit GlobalHandleVector(Heap* heap)
- : locations_(StrongRootBlockAllocator(heap)) {}
+ explicit inline GlobalHandleVector(Heap* heap);
+ // Usage with LocalHeap is safe.
+ explicit inline GlobalHandleVector(LocalHeap* local_heap);
Handle<T> operator[](size_t i) { return Handle<T>(&locations_[i]); }
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 332dc723b0..8cfe353f9a 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -58,7 +58,11 @@ bool HandleBase::IsDereferenceAllowed() const {
if (!AllowHandleDereference::IsAllowed()) return false;
// Allocations in the shared heap may be dereferenced by multiple threads.
- if (heap_object.InSharedWritableHeap()) return true;
+ if (heap_object.InWritableSharedSpace()) return true;
+
+ // Deref is explicitly allowed from any thread. Used for running internal GC
+ // epilogue callbacks in the safepoint after a GC.
+ if (AllowHandleDereferenceAllThreads::IsAllowed()) return true;
LocalHeap* local_heap = isolate->CurrentLocalHeap();
@@ -99,7 +103,7 @@ bool DirectHandle<T>::IsDereferenceAllowed() const {
if (!AllowHandleDereference::IsAllowed()) return false;
// Allocations in the shared heap may be dereferenced by multiple threads.
- if (isolate->is_shared()) return true;
+ if (heap_object.InWritableSharedSpace()) return true;
LocalHeap* local_heap = isolate->CurrentLocalHeap();
diff --git a/deps/v8/src/handles/shared-object-conveyor-handles.cc b/deps/v8/src/handles/shared-object-conveyor-handles.cc
index 37b4a2672a..f3e9e4ef3b 100644
--- a/deps/v8/src/handles/shared-object-conveyor-handles.cc
+++ b/deps/v8/src/handles/shared-object-conveyor-handles.cc
@@ -13,7 +13,7 @@ namespace internal {
// the owner to the main isolate once the shared isolate is removed.
SharedObjectConveyorHandles::SharedObjectConveyorHandles(Isolate* isolate)
: persistent_handles_(
- isolate->shared_heap_isolate()->NewPersistentHandles()) {}
+ isolate->shared_space_isolate()->NewPersistentHandles()) {}
uint32_t SharedObjectConveyorHandles::Persist(HeapObject shared_object) {
DCHECK(shared_object.IsShared());
diff --git a/deps/v8/src/handles/traced-handles.cc b/deps/v8/src/handles/traced-handles.cc
index 2931228e8b..d9e2a0e23a 100644
--- a/deps/v8/src/handles/traced-handles.cc
+++ b/deps/v8/src/handles/traced-handles.cc
@@ -97,14 +97,25 @@ class TracedNode final {
void clear_markbit() { flags_ = Markbit::update(flags_, false); }
- void set_raw_object(Address value) { object_ = value; }
+ bool has_old_host() const { return HasOldHost::decode(flags_); }
+ void set_has_old_host(bool v) { flags_ = HasOldHost::update(flags_, v); }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ void set_raw_object(Address value) {
+ if constexpr (access_mode == AccessMode::NON_ATOMIC) {
+ object_ = value;
+ } else {
+ reinterpret_cast<std::atomic<Address>*>(&object_)->store(
+ value, std::memory_order_relaxed);
+ }
+ }
Address raw_object() const { return object_; }
Object object() const { return Object(object_); }
Handle<Object> handle() { return Handle<Object>(&object_); }
FullObjectSlot location() { return FullObjectSlot(&object_); }
Handle<Object> Publish(Object object, bool needs_young_bit_update,
- bool needs_black_allocation);
+ bool needs_black_allocation, bool has_old_host);
void Release();
private:
@@ -114,6 +125,7 @@ class TracedNode final {
// The markbit is the exception as it can be set from the main and marker
// threads at the same time.
using Markbit = IsRoot::Next<bool, 1>;
+ using HasOldHost = Markbit::Next<bool, 1>;
Address object_ = kNullAddress;
union {
@@ -136,11 +148,13 @@ TracedNode::TracedNode(IndexType index, IndexType next_free_index)
DCHECK(!is_in_young_list());
DCHECK(!is_root());
DCHECK(!markbit());
+ DCHECK(!has_old_host());
}
// Publishes all internal state to be consumed by other threads.
Handle<Object> TracedNode::Publish(Object object, bool needs_young_bit_update,
- bool needs_black_allocation) {
+ bool needs_black_allocation,
+ bool has_old_host) {
DCHECK(!is_in_use());
DCHECK(!is_root());
DCHECK(!markbit());
@@ -151,6 +165,10 @@ Handle<Object> TracedNode::Publish(Object object, bool needs_young_bit_update,
if (needs_black_allocation) {
set_markbit();
}
+ if (has_old_host) {
+ DCHECK(is_in_young_list());
+ set_has_old_host(true);
+ }
set_root(true);
set_is_in_use(true);
reinterpret_cast<std::atomic<Address>*>(&object_)->store(
@@ -166,6 +184,7 @@ void TracedNode::Release() {
DCHECK(!is_in_use());
DCHECK(!is_root());
DCHECK(!markbit());
+ DCHECK(!has_old_host());
set_raw_object(kGlobalHandleZapValue);
}
@@ -455,8 +474,24 @@ void TracedNodeBlock::FreeNode(TracedNode* node) {
used_--;
}
-bool NeedsTrackingInYoungNodes(Object value, TracedNode* node) {
- return ObjectInYoungGeneration(value) && !node->is_in_young_list();
+CppHeap* GetCppHeapIfUnifiedYoungGC(Isolate* isolate) {
+ // TODO(v8:13475) Consider removing this check when unified-young-gen becomes
+ // default.
+ if (!v8_flags.cppgc_young_generation) return nullptr;
+ auto* cpp_heap = CppHeap::From(isolate->heap()->cpp_heap());
+ if (cpp_heap && cpp_heap->generational_gc_supported()) return cpp_heap;
+ return nullptr;
+}
+
+bool IsCppGCHostOld(CppHeap& cpp_heap, Address host) {
+ DCHECK(host);
+ DCHECK(cpp_heap.generational_gc_supported());
+ auto* host_ptr = reinterpret_cast<void*>(host);
+ auto* page = cppgc::internal::BasePage::FromInnerAddress(&cpp_heap, host_ptr);
+ // TracedReference may be created on stack, in which case assume it's young
+ // and doesn't need to be remembered, since it'll anyway be scanned.
+ if (!page) return false;
+ return !page->ObjectHeaderFromInnerAddress(host_ptr).IsYoung();
}
void SetSlotThreadSafe(Address** slot, Address* val) {
@@ -488,6 +523,7 @@ class TracedHandlesImpl final {
void DeleteEmptyBlocks();
void ResetDeadNodes(WeakSlotCallbackWithHeap should_reset_handle);
+ void ResetYoungDeadNodes(WeakSlotCallbackWithHeap should_reset_handle);
void ComputeWeaknessForYoungObjects(WeakSlotCallback is_unmodified);
void ProcessYoungObjects(RootVisitor* visitor,
@@ -496,21 +532,22 @@ class TracedHandlesImpl final {
void Iterate(RootVisitor* visitor);
void IterateYoung(RootVisitor* visitor);
void IterateYoungRoots(RootVisitor* visitor);
+ void IterateAndMarkYoungRootsWithOldHosts(RootVisitor* visitor);
+ void IterateYoungRootsWithOldHostsForTesting(RootVisitor* visitor);
size_t used_node_count() const { return used_nodes_; }
size_t used_size_bytes() const { return sizeof(TracedNode) * used_nodes_; }
size_t total_size_bytes() const { return block_size_bytes_; }
- START_ALLOW_USE_DEPRECATED()
-
- void Iterate(v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor);
-
- END_ALLOW_USE_DEPRECATED()
+ bool HasYoung() const { return !young_nodes_.empty(); }
private:
TracedNode* AllocateNode();
void FreeNode(TracedNode*);
+ bool NeedsToBeRemembered(Object value, TracedNode* node, Address* slot,
+ GlobalHandleStoreMode store_mode) const;
+
TracedNodeBlock::OverallList blocks_;
TracedNodeBlock::UsableList usable_blocks_;
// List of young nodes. May refer to nodes in `blocks_`, `usable_blocks_`, and
@@ -594,6 +631,31 @@ TracedHandlesImpl::~TracedHandlesImpl() {
DCHECK_EQ(block_size_bytes, block_size_bytes_);
}
+namespace {
+bool NeedsTrackingInYoungNodes(Object object, TracedNode* node) {
+ return ObjectInYoungGeneration(object) && !node->is_in_young_list();
+}
+} // namespace
+
+bool TracedHandlesImpl::NeedsToBeRemembered(
+ Object object, TracedNode* node, Address* slot,
+ GlobalHandleStoreMode store_mode) const {
+ DCHECK(!node->has_old_host());
+ if (store_mode == GlobalHandleStoreMode::kInitializingStore) {
+ // Don't record initializing stores.
+ return false;
+ }
+ if (is_marking_) {
+ // If marking is in progress, the marking barrier will be issued later.
+ return false;
+ }
+ auto* cpp_heap = GetCppHeapIfUnifiedYoungGC(isolate_);
+ if (!cpp_heap) return false;
+
+ if (!ObjectInYoungGeneration(object)) return false;
+ return IsCppGCHostOld(*cpp_heap, reinterpret_cast<Address>(slot));
+}
+
Handle<Object> TracedHandlesImpl::Create(Address value, Address* slot,
GlobalHandleStoreMode store_mode) {
Object object(value);
@@ -603,12 +665,15 @@ Handle<Object> TracedHandlesImpl::Create(Address value, Address* slot,
needs_young_bit_update = true;
young_nodes_.push_back(node);
}
+
+ const bool has_old_host = NeedsToBeRemembered(object, node, slot, store_mode);
bool needs_black_allocation = false;
if (is_marking_ && store_mode != GlobalHandleStoreMode::kInitializingStore) {
needs_black_allocation = true;
WriteBarrier::MarkingFromGlobalHandle(object);
}
- return node->Publish(object, needs_young_bit_update, needs_black_allocation);
+ return node->Publish(object, needs_young_bit_update, needs_black_allocation,
+ has_old_host);
}
void TracedHandlesImpl::Destroy(TracedNodeBlock& node_block, TracedNode& node) {
@@ -626,14 +691,15 @@ void TracedHandlesImpl::Destroy(TracedNodeBlock& node_block, TracedNode& node) {
}
if (is_marking_) {
- // Incremental marking is on. This also covers the scavenge case which
- // prohibits eagerly reclaiming nodes when marking is on during a scavenge.
+ // Incremental/concurrent marking is running. This also covers the scavenge
+ // case which prohibits eagerly reclaiming nodes when marking is on during a
+ // scavenge.
//
// On-heap traced nodes are released in the atomic pause in
// `IterateWeakRootsForPhantomHandles()` when they are discovered as not
// marked. Eagerly clear out the object here to avoid needlessly marking it
// from this point on. The node will be reclaimed on the next cycle.
- node.set_raw_object(kNullAddress);
+ node.set_raw_object<AccessMode::ATOMIC>(kNullAddress);
return;
}
@@ -681,6 +747,15 @@ void TracedHandlesImpl::Move(TracedNode& from_node, Address** from,
// Write barrier needs to cover node as well as object.
to_node->set_markbit<AccessMode::ATOMIC>();
WriteBarrier::MarkingFromGlobalHandle(to_node->object());
+ } else if (auto* cpp_heap = GetCppHeapIfUnifiedYoungGC(isolate_)) {
+ const bool object_is_young_and_not_yet_recorded =
+ !from_node.has_old_host() &&
+ ObjectInYoungGeneration(from_node.object());
+ if (object_is_young_and_not_yet_recorded &&
+ IsCppGCHostOld(*cpp_heap, reinterpret_cast<Address>(to))) {
+ DCHECK(from_node.is_in_young_list());
+ from_node.set_has_old_host(true);
+ }
}
SetSlotThreadSafe(from, nullptr);
}
@@ -711,16 +786,18 @@ const TracedHandles::NodeBounds TracedHandlesImpl::GetNodeBounds() const {
void TracedHandlesImpl::UpdateListOfYoungNodes() {
size_t last = 0;
+ const bool needs_to_mark_as_old =
+ static_cast<bool>(GetCppHeapIfUnifiedYoungGC(isolate_));
for (auto* node : young_nodes_) {
DCHECK(node->is_in_young_list());
- if (node->is_in_use()) {
- if (ObjectInYoungGeneration(node->object())) {
- young_nodes_[last++] = node;
- } else {
- node->set_is_in_young_list(false);
- }
+ if (node->is_in_use() && ObjectInYoungGeneration(node->object())) {
+ young_nodes_[last++] = node;
+ // The node was discovered through a cppgc object, which will be
+ // immediately promoted. Remember the object.
+ if (needs_to_mark_as_old) node->set_has_old_host(true);
} else {
node->set_is_in_young_list(false);
+ node->set_has_old_host(false);
}
}
DCHECK_LE(last, young_nodes_.size());
@@ -737,6 +814,7 @@ void TracedHandlesImpl::ClearListOfYoungNodes() {
DCHECK(node->is_in_young_list());
// Nodes in use and not in use can have this bit set to false.
node->set_is_in_young_list(false);
+ node->set_has_old_host(false);
}
young_nodes_.clear();
young_nodes_.shrink_to_fit();
@@ -783,6 +861,26 @@ void TracedHandlesImpl::ResetDeadNodes(
}
}
+void TracedHandlesImpl::ResetYoungDeadNodes(
+ WeakSlotCallbackWithHeap should_reset_handle) {
+ for (auto* node : young_nodes_) {
+ DCHECK(node->is_in_young_list());
+ DCHECK_IMPLIES(node->has_old_host(), node->markbit());
+
+ if (!node->is_in_use()) continue;
+
+ if (!node->markbit()) {
+ FreeNode(node);
+ continue;
+ }
+
+ // Node was reachable. Clear the markbit for the next GC.
+ node->clear_markbit();
+ // TODO(v8:13141): Turn into a DCHECK after some time.
+ CHECK(!should_reset_handle(isolate_->heap(), node->location()));
+ }
+}
+
void TracedHandlesImpl::ComputeWeaknessForYoungObjects(
WeakSlotCallback is_unmodified) {
if (!v8_flags.reclaim_unmodified_wrappers) return;
@@ -792,6 +890,8 @@ void TracedHandlesImpl::ComputeWeaknessForYoungObjects(
if (is_marking_) return;
auto* const handler = isolate_->heap()->GetEmbedderRootsHandler();
+ if (!handler) return;
+
for (TracedNode* node : young_nodes_) {
if (node->is_in_use()) {
DCHECK(node->is_root());
@@ -810,12 +910,15 @@ void TracedHandlesImpl::ProcessYoungObjects(
if (!v8_flags.reclaim_unmodified_wrappers) return;
auto* const handler = isolate_->heap()->GetEmbedderRootsHandler();
+ if (!handler) return;
+
for (TracedNode* node : young_nodes_) {
if (!node->is_in_use()) continue;
- DCHECK_IMPLIES(node->is_root(),
- !should_reset_handle(isolate_->heap(), node->location()));
- if (should_reset_handle(isolate_->heap(), node->location())) {
+ bool should_reset = should_reset_handle(isolate_->heap(), node->location());
+ CHECK_IMPLIES(node->is_root(), !should_reset);
+ if (should_reset) {
+ CHECK(!is_marking_);
v8::Value* value = ToApi<v8::Value>(node->handle());
handler->ResetRoot(
*reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
@@ -825,8 +928,10 @@ void TracedHandlesImpl::ProcessYoungObjects(
} else {
if (!node->is_root()) {
node->set_root(true);
- visitor->VisitRootPointer(Root::kGlobalHandles, nullptr,
- node->location());
+ if (visitor) {
+ visitor->VisitRootPointer(Root::kGlobalHandles, nullptr,
+ node->location());
+ }
}
}
}
@@ -855,28 +960,43 @@ void TracedHandlesImpl::IterateYoungRoots(RootVisitor* visitor) {
for (auto* node : young_nodes_) {
if (!node->is_in_use()) continue;
+ CHECK_IMPLIES(is_marking_, node->is_root());
+
if (!node->is_root()) continue;
visitor->VisitRootPointer(Root::kTracedHandles, nullptr, node->location());
}
}
-START_ALLOW_USE_DEPRECATED()
+void TracedHandlesImpl::IterateAndMarkYoungRootsWithOldHosts(
+ RootVisitor* visitor) {
+ for (auto* node : young_nodes_) {
+ if (!node->is_in_use()) continue;
+ if (!node->has_old_host()) continue;
-void TracedHandlesImpl::Iterate(
- v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor) {
- for (auto* block : blocks_) {
- for (auto* node : *block) {
- if (node->is_in_use()) {
- v8::Value* value = ToApi<v8::Value>(node->handle());
- visitor->VisitTracedReference(
- *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
- }
- }
+ CHECK_IMPLIES(is_marking_, node->is_root());
+
+ if (!node->is_root()) continue;
+
+ node->set_markbit();
+ CHECK(ObjectInYoungGeneration(node->object()));
+ visitor->VisitRootPointer(Root::kTracedHandles, nullptr, node->location());
}
}
-END_ALLOW_USE_DEPRECATED()
+void TracedHandlesImpl::IterateYoungRootsWithOldHostsForTesting(
+ RootVisitor* visitor) {
+ for (auto* node : young_nodes_) {
+ if (!node->is_in_use()) continue;
+ if (!node->has_old_host()) continue;
+
+ CHECK_IMPLIES(is_marking_, node->is_root());
+
+ if (!node->is_root()) continue;
+
+ visitor->VisitRootPointer(Root::kTracedHandles, nullptr, node->location());
+ }
+}
TracedHandles::TracedHandles(Isolate* isolate)
: impl_(std::make_unique<TracedHandlesImpl>(isolate)) {}
@@ -911,6 +1031,11 @@ void TracedHandles::ResetDeadNodes(
impl_->ResetDeadNodes(should_reset_handle);
}
+void TracedHandles::ResetYoungDeadNodes(
+ WeakSlotCallbackWithHeap should_reset_handle) {
+ impl_->ResetYoungDeadNodes(should_reset_handle);
+}
+
void TracedHandles::ComputeWeaknessForYoungObjects(
WeakSlotCallback is_unmodified) {
impl_->ComputeWeaknessForYoungObjects(is_unmodified);
@@ -931,6 +1056,15 @@ void TracedHandles::IterateYoungRoots(RootVisitor* visitor) {
impl_->IterateYoungRoots(visitor);
}
+void TracedHandles::IterateAndMarkYoungRootsWithOldHosts(RootVisitor* visitor) {
+ impl_->IterateAndMarkYoungRootsWithOldHosts(visitor);
+}
+
+void TracedHandles::IterateYoungRootsWithOldHostsForTesting(
+ RootVisitor* visitor) {
+ impl_->IterateYoungRootsWithOldHostsForTesting(visitor);
+}
+
size_t TracedHandles::used_node_count() const {
return impl_->used_node_count();
}
@@ -943,15 +1077,6 @@ size_t TracedHandles::used_size_bytes() const {
return impl_->used_size_bytes();
}
-START_ALLOW_USE_DEPRECATED()
-
-void TracedHandles::Iterate(
- v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor) {
- impl_->Iterate(visitor);
-}
-
-END_ALLOW_USE_DEPRECATED()
-
// static
void TracedHandles::Destroy(Address* location) {
if (!location) return;
@@ -988,16 +1113,38 @@ void TracedHandles::Move(Address** from, Address** to) {
traced_handles.Move(*from_node, from, to);
}
+namespace {
+Object MarkObject(Object obj, TracedNode& node,
+ TracedHandles::MarkMode mark_mode) {
+ if (mark_mode == TracedHandles::MarkMode::kOnlyYoung &&
+ !node.is_in_young_list())
+ return Smi::zero();
+ node.set_markbit<AccessMode::ATOMIC>();
+ // Being in the young list, the node may still point to an old object, in
+ // which case we want to keep the node marked, but not follow the reference.
+ if (mark_mode == TracedHandles::MarkMode::kOnlyYoung &&
+ !ObjectInYoungGeneration(obj))
+ return Smi::zero();
+ return obj;
+}
+} // namespace
+
// static
-void TracedHandles::Mark(Address* location) {
+Object TracedHandles::Mark(Address* location, MarkMode mark_mode) {
+ // The load synchronizes internal bitfields that are also read atomically
+ // from the concurrent marker. The counterpart is `TracedNode::Publish()`.
+ Object object =
+ Object(reinterpret_cast<std::atomic<Address>*>(location)->load(
+ std::memory_order_acquire));
auto* node = TracedNode::FromLocation(location);
- DCHECK(node->is_in_use());
- node->set_markbit<AccessMode::ATOMIC>();
+ DCHECK(node->is_in_use<AccessMode::ATOMIC>());
+ return MarkObject(object, *node, mark_mode);
}
// static
Object TracedHandles::MarkConservatively(Address* inner_location,
- Address* traced_node_block_base) {
+ Address* traced_node_block_base,
+ MarkMode mark_mode) {
// Compute the `TracedNode` address based on its inner pointer.
const ptrdiff_t delta = reinterpret_cast<uintptr_t>(inner_location) -
reinterpret_cast<uintptr_t>(traced_node_block_base);
@@ -1007,8 +1154,9 @@ Object TracedHandles::MarkConservatively(Address* inner_location,
// `MarkConservatively()` runs concurrently with marking code. Reading
// state concurrently to setting the markbit is safe.
if (!node.is_in_use<AccessMode::ATOMIC>()) return Smi::zero();
- node.set_markbit<AccessMode::ATOMIC>();
- return node.object();
+ return MarkObject(node.object(), node, mark_mode);
}
+bool TracedHandles::HasYoung() const { return impl_->HasYoung(); }
+
} // namespace v8::internal
diff --git a/deps/v8/src/handles/traced-handles.h b/deps/v8/src/handles/traced-handles.h
index e423cb0399..b1817e148c 100644
--- a/deps/v8/src/handles/traced-handles.h
+++ b/deps/v8/src/handles/traced-handles.h
@@ -22,18 +22,16 @@ class TracedHandlesImpl;
// handles do otherwise not keep their pointees alive.
class V8_EXPORT_PRIVATE TracedHandles final {
public:
+ enum class MarkMode : uint8_t { kOnlyYoung, kAll };
+
static void Destroy(Address* location);
static void Copy(const Address* const* from, Address** to);
static void Move(Address** from, Address** to);
- static void Mark(Address* location);
+ static Object Mark(Address* location, MarkMode mark_mode);
static Object MarkConservatively(Address* inner_location,
- Address* traced_node_block_base);
-
- V8_INLINE static Object Acquire(Address* location) {
- return Object(reinterpret_cast<std::atomic<Address>*>(location)->load(
- std::memory_order_acquire));
- }
+ Address* traced_node_block_base,
+ MarkMode mark_mode);
explicit TracedHandles(Isolate*);
~TracedHandles();
@@ -60,6 +58,7 @@ class V8_EXPORT_PRIVATE TracedHandles final {
void DeleteEmptyBlocks();
void ResetDeadNodes(WeakSlotCallbackWithHeap should_reset_handle);
+ void ResetYoungDeadNodes(WeakSlotCallbackWithHeap should_reset_handle);
// Computes whether young weak objects should be considered roots for young
// generation garbage collections or just be treated weakly. Per default
@@ -74,19 +73,15 @@ class V8_EXPORT_PRIVATE TracedHandles final {
void Iterate(RootVisitor*);
void IterateYoung(RootVisitor*);
void IterateYoungRoots(RootVisitor*);
-
- START_ALLOW_USE_DEPRECATED()
-
- // Iterates over all traces handles represented by
- // `v8::TracedReferenceBase`.
- void Iterate(v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor);
-
- END_ALLOW_USE_DEPRECATED()
+ void IterateAndMarkYoungRootsWithOldHosts(RootVisitor*);
+ void IterateYoungRootsWithOldHostsForTesting(RootVisitor*);
size_t used_node_count() const;
size_t total_size_bytes() const;
size_t used_size_bytes() const;
+ bool HasYoung() const;
+
private:
std::unique_ptr<TracedHandlesImpl> impl_;
};
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index de698441f9..bcdce0ea89 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -6,3 +6,4 @@ nikolaos@chromium.org
omerkatz@chromium.org
per-file *factory*=file:../objects/OWNERS
+per-file static-roots.h=file:../../COMMON_OWNERS
diff --git a/deps/v8/src/heap/allocation-observer.cc b/deps/v8/src/heap/allocation-observer.cc
index d25734e349..651b0add2a 100644
--- a/deps/v8/src/heap/allocation-observer.cc
+++ b/deps/v8/src/heap/allocation-observer.cc
@@ -71,10 +71,7 @@ void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
}
void AllocationCounter::AdvanceAllocationObservers(size_t allocated) {
- if (!IsActive()) {
- return;
- }
-
+ if (observers_.empty()) return;
DCHECK(!step_in_progress_);
DCHECK_LT(allocated, next_counter_ - current_counter_);
current_counter_ += allocated;
@@ -83,10 +80,7 @@ void AllocationCounter::AdvanceAllocationObservers(size_t allocated) {
void AllocationCounter::InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size) {
- if (!IsActive()) {
- return;
- }
-
+ if (observers_.empty()) return;
DCHECK(!step_in_progress_);
DCHECK_GE(aligned_object_size, next_counter_ - current_counter_);
DCHECK(soon_object);
@@ -98,6 +92,7 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
DCHECK(pending_removed_.empty());
for (AllocationObserverCounter& aoc : observers_) {
+ DCHECK_LT(current_counter_, aoc.next_counter_);
if (aoc.next_counter_ - current_counter_ <= aligned_object_size) {
{
DisallowGarbageCollection no_gc;
@@ -121,6 +116,7 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
// Now process newly added allocation observers.
for (AllocationObserverCounter& aoc : pending_added_) {
+ DCHECK_EQ(0, aoc.next_counter_);
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
@@ -163,13 +159,15 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
-
for (SpaceIterator it(heap_); it.HasNext();) {
it.Next()->PauseAllocationObservers();
}
+
+ heap_->pause_allocation_observers_depth_++;
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
+ heap_->pause_allocation_observers_depth_--;
for (SpaceIterator it(heap_); it.HasNext();) {
it.Next()->ResumeAllocationObservers();
}
diff --git a/deps/v8/src/heap/allocation-observer.h b/deps/v8/src/heap/allocation-observer.h
index 26559ed16a..5a6ba5b1ab 100644
--- a/deps/v8/src/heap/allocation-observer.h
+++ b/deps/v8/src/heap/allocation-observer.h
@@ -17,9 +17,8 @@ namespace internal {
// Observer for allocations that is aware of LAB-based allocation.
class AllocationObserver {
public:
- explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
- DCHECK_LE(kTaggedSize, step_size);
- }
+ static constexpr intptr_t kNotUsingFixedStepSize = -1;
+ explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {}
virtual ~AllocationObserver() = default;
AllocationObserver(const AllocationObserver&) = delete;
AllocationObserver& operator=(const AllocationObserver&) = delete;
@@ -30,7 +29,7 @@ class AllocationObserver {
// result for a request of `size` bytes.
//
// Some caveats:
- // 1. `soon_object` will be nullptr in cases where the allocation returns a
+ // 1. `soon_object` will be nullptr in cases zwhere the allocation returns a
// filler object, which is e.g. needed at page boundaries.
// 2. `soon_object` may actually be the first object in an
// allocation-folding group. In such a case size is the size of the group
@@ -40,7 +39,10 @@ class AllocationObserver {
virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
// Subclasses can override this method to make step size dynamic.
- virtual intptr_t GetNextStepSize() { return step_size_; }
+ virtual intptr_t GetNextStepSize() {
+ DCHECK_NE(kNotUsingFixedStepSize, step_size_);
+ return step_size_;
+ }
private:
const intptr_t step_size_;
@@ -68,29 +70,14 @@ class AllocationCounter final {
size_t object_size,
size_t aligned_object_size);
- bool IsActive() const { return !IsPaused() && observers_.size() > 0; }
-
bool IsStepInProgress() const { return step_in_progress_; }
size_t NextBytes() const {
- DCHECK(IsActive());
+ if (observers_.empty()) return SIZE_MAX;
return next_counter_ - current_counter_;
}
- void Pause() {
- DCHECK(!step_in_progress_);
- paused_++;
- }
-
- void Resume() {
- DCHECK_NE(0, paused_);
- DCHECK(!step_in_progress_);
- paused_--;
- }
-
private:
- bool IsPaused() const { return paused_; }
-
struct AllocationObserverCounter final {
AllocationObserverCounter(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
@@ -110,7 +97,6 @@ class AllocationCounter final {
size_t current_counter_ = 0;
size_t next_counter_ = 0;
bool step_in_progress_ = false;
- int paused_ = 0;
};
class V8_EXPORT_PRIVATE V8_NODISCARD PauseAllocationObserversScope {
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index 088e9e4ac5..370ed0aa81 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -7,10 +7,12 @@
#include <atomic>
#include <memory>
+#include "src/base/logging.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/remembered-set.h"
#include "src/objects/js-array-buffer.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"
@@ -76,11 +78,13 @@ bool ArrayBufferList::IsEmpty() const {
}
struct ArrayBufferSweeper::SweepingJob final {
- SweepingJob(ArrayBufferList young, ArrayBufferList old, SweepingType type)
+ SweepingJob(ArrayBufferList young, ArrayBufferList old, SweepingType type,
+ TreatAllYoungAsPromoted treat_all_young_as_promoted)
: state_(SweepingState::kInProgress),
young_(std::move(young)),
old_(std::move(old)),
- type_(type) {}
+ type_(type),
+ treat_all_young_as_promoted_(treat_all_young_as_promoted) {}
void Sweep();
void SweepYoung();
@@ -93,12 +97,14 @@ struct ArrayBufferSweeper::SweepingJob final {
ArrayBufferList young_;
ArrayBufferList old_;
const SweepingType type_;
- std::atomic<size_t> freed_bytes_{0};
+ size_t freed_bytes_{0};
+ TreatAllYoungAsPromoted treat_all_young_as_promoted_;
friend class ArrayBufferSweeper;
};
-ArrayBufferSweeper::ArrayBufferSweeper(Heap* heap) : heap_(heap) {}
+ArrayBufferSweeper::ArrayBufferSweeper(Heap* heap)
+ : heap_(heap), local_sweeper_(heap_->sweeper()) {}
ArrayBufferSweeper::~ArrayBufferSweeper() {
EnsureFinished();
@@ -115,7 +121,7 @@ void ArrayBufferSweeper::EnsureFinished() {
switch (abort_result) {
case TryAbortResult::kTaskAborted:
// Task has not run, so we need to run it synchronously here.
- job_->Sweep();
+ DoSweep();
break;
case TryAbortResult::kTaskRemoved:
// Task was removed, but did actually run, just ensure we are in the right
@@ -146,13 +152,15 @@ void ArrayBufferSweeper::FinishIfDone() {
}
}
-void ArrayBufferSweeper::RequestSweep(SweepingType type) {
+void ArrayBufferSweeper::RequestSweep(
+ SweepingType type, TreatAllYoungAsPromoted treat_all_young_as_promoted) {
DCHECK(!sweeping_in_progress());
+ DCHECK(local_sweeper_.IsEmpty());
if (young_.IsEmpty() && (old_.IsEmpty() || type == SweepingType::kYoung))
return;
- Prepare(type);
+ Prepare(type, treat_all_young_as_promoted);
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
v8_flags.concurrent_array_buffer_sweeping) {
auto task = MakeCancelableTask(heap_->isolate(), [this, type] {
@@ -162,28 +170,46 @@ void ArrayBufferSweeper::RequestSweep(SweepingType type) {
: GCTracer::Scope::BACKGROUND_FULL_ARRAY_BUFFER_SWEEP;
TRACE_GC_EPOCH(heap_->tracer(), scope_id, ThreadKind::kBackground);
base::MutexGuard guard(&sweeping_mutex_);
- job_->Sweep();
+ DoSweep();
job_finished_.NotifyAll();
});
job_->id_ = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
- job_->Sweep();
+ GCTracer::Scope::ScopeId scope_id =
+ type == SweepingType::kYoung ? GCTracer::Scope::YOUNG_ARRAY_BUFFER_SWEEP
+ : GCTracer::Scope::FULL_ARRAY_BUFFER_SWEEP;
+ TRACE_GC_EPOCH(heap_->tracer(), scope_id, ThreadKind::kMain);
+ DoSweep();
Finalize();
}
}
-void ArrayBufferSweeper::Prepare(SweepingType type) {
+void ArrayBufferSweeper::DoSweep() {
+ DCHECK_NOT_NULL(job_);
+ if (job_->treat_all_young_as_promoted_ == TreatAllYoungAsPromoted::kNo) {
+ // Waiting for promoted page iteration is only needed when not all young
+ // array buffers are promoted.
+ local_sweeper_.ContributeAndWaitForPromotedPagesIteration();
+ DCHECK(!heap_->sweeper()->IsIteratingPromotedPages());
+ }
+ job_->Sweep();
+}
+
+void ArrayBufferSweeper::Prepare(
+ SweepingType type, TreatAllYoungAsPromoted treat_all_young_as_promoted) {
DCHECK(!sweeping_in_progress());
+ DCHECK_IMPLIES(type == SweepingType::kFull,
+ treat_all_young_as_promoted == TreatAllYoungAsPromoted::kYes);
switch (type) {
case SweepingType::kYoung: {
job_ = std::make_unique<SweepingJob>(std::move(young_), ArrayBufferList(),
- type);
+ type, treat_all_young_as_promoted);
young_ = ArrayBufferList();
} break;
case SweepingType::kFull: {
job_ = std::make_unique<SweepingJob>(std::move(young_), std::move(old_),
- type);
+ type, treat_all_young_as_promoted);
young_ = ArrayBufferList();
old_ = ArrayBufferList();
} break;
@@ -196,9 +222,10 @@ void ArrayBufferSweeper::Finalize() {
CHECK_EQ(job_->state_, SweepingState::kDone);
young_.Append(&job_->young_);
old_.Append(&job_->old_);
- const size_t freed_bytes =
- job_->freed_bytes_.exchange(0, std::memory_order_relaxed);
- DecrementExternalMemoryCounters(freed_bytes);
+ DecrementExternalMemoryCounters(job_->freed_bytes_);
+
+ local_sweeper_.Finalize();
+
job_.reset();
DCHECK(!sweeping_in_progress());
}
@@ -230,16 +257,18 @@ void ArrayBufferSweeper::Append(JSArrayBuffer object,
void ArrayBufferSweeper::Detach(JSArrayBuffer object,
ArrayBufferExtension* extension) {
+ // Finish sweeping here first such that the code below is guaranteed to
+ // observe the same sweeping state.
+ FinishIfDone();
+
size_t bytes = extension->ClearAccountingLength();
// We cannot free the extension eagerly here, since extensions are tracked in
// a singly linked list. The next GC will remove it automatically.
- FinishIfDone();
-
if (!sweeping_in_progress()) {
// If concurrent sweeping isn't running at the moment, we can also adjust
- // the respective bytes in the corresponding ArraybufferLists as they are
+ // the respective bytes in the corresponding ArrayBufferLists as they are
// only approximate.
if (Heap::InYoungGeneration(object)) {
DCHECK_GE(young_.bytes_, bytes);
@@ -303,7 +332,7 @@ ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
if (!current->IsMarked()) {
const size_t bytes = current->accounting_length();
delete current;
- if (bytes) freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
+ if (bytes) freed_bytes_ += bytes;
} else {
current->Unmark();
survivor_list.Append(current);
@@ -329,8 +358,10 @@ void ArrayBufferSweeper::SweepingJob::SweepYoung() {
if (!current->IsYoungMarked()) {
size_t bytes = current->accounting_length();
delete current;
- if (bytes) freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
- } else if (current->IsYoungPromoted()) {
+ if (bytes) freed_bytes_ += bytes;
+ } else if ((treat_all_young_as_promoted_ ==
+ TreatAllYoungAsPromoted::kYes) ||
+ current->IsYoungPromoted()) {
current->YoungUnmark();
new_old.Append(current);
} else {
diff --git a/deps/v8/src/heap/array-buffer-sweeper.h b/deps/v8/src/heap/array-buffer-sweeper.h
index 14360dd67f..b49e54b994 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.h
+++ b/deps/v8/src/heap/array-buffer-sweeper.h
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/mutex.h"
+#include "src/heap/sweeper.h"
#include "src/objects/js-array-buffer.h"
#include "src/tasks/cancelable-task.h"
@@ -46,11 +47,13 @@ struct ArrayBufferList final {
class ArrayBufferSweeper final {
public:
enum class SweepingType { kYoung, kFull };
+ enum class TreatAllYoungAsPromoted { kNo, kYes };
explicit ArrayBufferSweeper(Heap* heap);
~ArrayBufferSweeper();
- void RequestSweep(SweepingType sweeping_type);
+ void RequestSweep(SweepingType sweeping_type,
+ TreatAllYoungAsPromoted treat_young_as_promoted);
void EnsureFinished();
// Track the given ArrayBufferExtension for the given JSArrayBuffer.
@@ -67,13 +70,16 @@ class ArrayBufferSweeper final {
// Bytes accounted in the old generation. Rebuilt during sweeping.
size_t OldBytes() const { return old().ApproximateBytes(); }
+ bool sweeping_in_progress() const {
+ DCHECK_IMPLIES(!job_, local_sweeper_.IsEmpty());
+ return job_.get();
+ }
+
private:
struct SweepingJob;
enum class SweepingState { kInProgress, kDone };
- bool sweeping_in_progress() const { return job_.get(); }
-
// Finishes sweeping if it is already done.
void FinishIfDone();
@@ -82,17 +88,21 @@ class ArrayBufferSweeper final {
void IncrementExternalMemoryCounters(size_t bytes);
void DecrementExternalMemoryCounters(size_t bytes);
- void Prepare(SweepingType type);
+ void Prepare(SweepingType type,
+ TreatAllYoungAsPromoted treat_all_young_as_promoted);
void Finalize();
void ReleaseAll(ArrayBufferList* extension);
+ void DoSweep();
+
Heap* const heap_;
std::unique_ptr<SweepingJob> job_;
base::Mutex sweeping_mutex_;
base::ConditionVariable job_finished_;
ArrayBufferList young_;
ArrayBufferList old_;
+ Sweeper::LocalSweeper local_sweeper_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/base-space.h b/deps/v8/src/heap/base-space.h
index a992d75d5c..fe456bb86c 100644
--- a/deps/v8/src/heap/base-space.h
+++ b/deps/v8/src/heap/base-space.h
@@ -9,7 +9,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
-#include "src/logging/log.h"
+#include "src/heap/heap-verifier.h"
#include "src/utils/allocation.h"
namespace v8 {
@@ -61,6 +61,11 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
// Returns allocated size.
virtual size_t Size() const = 0;
+#ifdef VERIFY_HEAP
+ virtual void Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const = 0;
+#endif // VERIFY_HEAP
+
protected:
BaseSpace(Heap* heap, AllocationSpace id)
: heap_(heap), id_(id), committed_(0), max_committed_(0) {}
diff --git a/deps/v8/src/heap/base/asm/arm/push_registers_asm.cc b/deps/v8/src/heap/base/asm/arm/push_registers_asm.cc
new file mode 100644
index 0000000000..5246c3f6c3
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/arm/push_registers_asm.cc
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 8-byte alignment at calls by pushing an additional
+// non-callee-saved register (r3).
+//
+// Calling convention source:
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A32)
+// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html
+asm(".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ // Only {r4-r11} are callee-saved registers. Push r3 in addition to align
+ // the stack back to 8 bytes.
+ " push {r3-r11, lr} \n"
+ // Pass 1st parameter (r0) unchanged (Stack*).
+ // Pass 2nd parameter (r1) unchanged (StackVisitor*).
+ // Save 3rd parameter (r2; IterateStackCallback).
+ " mov r3, r2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mov r2, sp \n"
+ // Call the callback.
+ " blx r3 \n"
+ // Discard all the registers.
+ " add sp, sp, #36 \n"
+ // Pop lr into pc which returns and switches mode if needed.
+ " pop {pc} \n");
diff --git a/deps/v8/src/heap/base/asm/arm/save_registers_asm.cc b/deps/v8/src/heap/base/asm/arm/save_registers_asm.cc
deleted file mode 100644
index e29babfce8..0000000000
--- a/deps/v8/src/heap/base/asm/arm/save_registers_asm.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-//
-// We maintain 8-byte alignment at calls by pushing an additional
-// non-callee-saved register (r3).
-//
-// Calling convention source:
-// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A32)
-// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html
-
-// 8 32-bit registers = 8 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 8,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 4, "Mismatch in word size");
-
-asm(".globl SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // r0: [ intptr_t* buffer ]
- // Save the callee-saved registers: {r4-r11}.
- " stm r0, {r4-r11} \n"
- // Return.
- " bx lr \n");
diff --git a/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc
new file mode 100644
index 0000000000..1efcc3430b
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/arm64/push_registers_asm.cc
@@ -0,0 +1,62 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 16-byte alignment.
+//
+// Calling convention source:
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
+
+asm(
+#if defined(__APPLE__)
+ ".globl _PushAllRegistersAndIterateStack \n"
+ ".private_extern _PushAllRegistersAndIterateStack \n"
+ ".p2align 2 \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !defined(__APPLE__)
+ ".globl PushAllRegistersAndIterateStack \n"
+#if !defined(_WIN64)
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+#endif // !defined(_WIN64)
+ ".p2align 2 \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !defined(__APPLE__)
+ // x19-x29 are callee-saved.
+ " stp x19, x20, [sp, #-16]! \n"
+ " stp x21, x22, [sp, #-16]! \n"
+ " stp x23, x24, [sp, #-16]! \n"
+ " stp x25, x26, [sp, #-16]! \n"
+ " stp x27, x28, [sp, #-16]! \n"
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // Sign return address.
+ " paciasp \n"
+#endif
+ " stp fp, lr, [sp, #-16]! \n"
+ // Maintain frame pointer.
+ " mov fp, sp \n"
+ // Pass 1st parameter (x0) unchanged (Stack*).
+ // Pass 2nd parameter (x1) unchanged (StackVisitor*).
+ // Save 3rd parameter (x2; IterateStackCallback)
+ " mov x7, x2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mov x2, sp \n"
+ " blr x7 \n"
+ // Load return address and frame pointer.
+ " ldp fp, lr, [sp], #16 \n"
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // Authenticate return address.
+ " autiasp \n"
+#endif
+ // Drop all callee-saved registers.
+ " add sp, sp, #80 \n"
+ " ret \n");
diff --git a/deps/v8/src/heap/base/asm/arm64/push_registers_masm.S b/deps/v8/src/heap/base/asm/arm64/push_registers_masm.S
new file mode 100644
index 0000000000..888523a8f1
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/arm64/push_registers_masm.S
@@ -0,0 +1,32 @@
+; Copyright 2020 the V8 project authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+; This file is exactly the same as push_registers_asm.cc, just formatted for
+; the Microsoft Arm Assembler.
+
+ AREA |.text|, CODE, ALIGN=4, READONLY
+ EXPORT PushAllRegistersAndIterateStack
+PushAllRegistersAndIterateStack
+ ; x19-x29 are callee-saved
+ STP x19, x20, [sp, #-16]!
+ STP x21, x22, [sp, #-16]!
+ STP x23, x24, [sp, #-16]!
+ STP x25, x26, [sp, #-16]!
+ STP x27, x28, [sp, #-16]!
+ STP fp, lr, [sp, #-16]!
+ ; Maintain frame pointer
+ MOV fp, sp
+ ; Pass 1st parameter (x0) unchanged (Stack*).
+ ; Pass 2nd parameter (x1) unchanged (StackVisitor*).
+ ; Save 3rd parameter (x2; IterateStackCallback)
+ MOV x7, x2
+ ; Pass 3rd parameter as sp (stack pointer)
+ MOV x2, sp
+ BLR x7
+ ; Load return address
+ LDR lr, [sp, #8]
+ ; Restore frame pointer and pop all callee-saved registers.
+ LDR fp, [sp], #96
+ RET
+ END
diff --git a/deps/v8/src/heap/base/asm/arm64/save_registers_asm.cc b/deps/v8/src/heap/base/asm/arm64/save_registers_asm.cc
deleted file mode 100644
index 5bb9e23056..0000000000
--- a/deps/v8/src/heap/base/asm/arm64/save_registers_asm.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-//
-// We maintain 16-byte alignment.
-//
-// Calling convention source:
-// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
-
-// 11 64-bit registers = 11 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 11,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(
-#if defined(__APPLE__)
- ".globl _SaveCalleeSavedRegisters \n"
- ".private_extern _SaveCalleeSavedRegisters \n"
- ".p2align 2 \n"
- "_SaveCalleeSavedRegisters: \n"
-#else // !defined(__APPLE__)
- ".globl SaveCalleeSavedRegisters \n"
-#if !defined(_WIN64)
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
-#endif // !defined(_WIN64)
- ".p2align 2 \n"
- "SaveCalleeSavedRegisters: \n"
-#endif // !defined(__APPLE__)
- // $x0: [ intptr_t* buffer ]
- // Save the callee-saved registers: x19-x29.
- " stp x19, x20, [x0], #16 \n"
- " stp x21, x22, [x0], #16 \n"
- " stp x23, x24, [x0], #16 \n"
- " stp x25, x26, [x0], #16 \n"
- " stp x27, x28, [x0], #16 \n"
- " str x29, [x0] \n"
- // Return.
- " ret \n");
diff --git a/deps/v8/src/heap/base/asm/arm64/save_registers_masm.S b/deps/v8/src/heap/base/asm/arm64/save_registers_masm.S
deleted file mode 100644
index ab79055250..0000000000
--- a/deps/v8/src/heap/base/asm/arm64/save_registers_masm.S
+++ /dev/null
@@ -1,24 +0,0 @@
-; Copyright 2020 the V8 project authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-; This file is exactly the same as save_registers_asm.cc, just formatted for
-; the Microsoft Arm Assembler.
-
-; Save all callee-saved registers in the specified buffer.
-; extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
- AREA |.text|, CODE, ALIGN=4, READONLY
- EXPORT SaveCalleeSavedRegisters
-SaveCalleeSavedRegisters
- ; x0: [ intptr_t* buffer ]
- ; x19-x29 are callee-saved
- STP x19, x20, [x0], #16
- STP x21, x22, [x0], #16
- STP x23, x24, [x0], #16
- STP x25, x26, [x0], #16
- STP x27, x28, [x0], #16
- STR x29, [x0]
- ; Return.
- RET
- END
diff --git a/deps/v8/src/heap/base/asm/ia32/push_registers_asm.cc b/deps/v8/src/heap/base/asm/ia32/push_registers_asm.cc
new file mode 100644
index 0000000000..ed9c14a50e
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/ia32/push_registers_asm.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 16-byte alignment at calls. There is an 4-byte return address
+// on the stack and we push 28 bytes which maintains 16-byte stack alignment
+// at the call.
+//
+// The following assumes cdecl calling convention.
+// Source: https://en.wikipedia.org/wiki/X86_calling_conventions#cdecl
+asm(
+#ifdef _WIN32
+ ".globl _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !_WIN32
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !_WIN32
+ // [ IterateStackCallback ]
+ // [ StackVisitor* ]
+ // [ Stack* ]
+ // [ ret ]
+ // ebp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %ebp \n"
+ " movl %esp, %ebp \n"
+ " push %ebx \n"
+ " push %esi \n"
+ " push %edi \n"
+ // Save 3rd parameter (IterateStackCallback).
+ " movl 28(%esp), %ecx \n"
+ // Pass 3rd parameter as esp (stack pointer).
+ " push %esp \n"
+ // Pass 2nd parameter (StackVisitor*).
+ " push 28(%esp) \n"
+ // Pass 1st parameter (Stack*).
+ " push 28(%esp) \n"
+ " call *%ecx \n"
+ // Pop the callee-saved registers.
+ " addl $24, %esp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %ebp \n"
+ " ret \n");
diff --git a/deps/v8/src/heap/base/asm/ia32/push_registers_masm.asm b/deps/v8/src/heap/base/asm/ia32/push_registers_masm.asm
new file mode 100644
index 0000000000..a35fd6e527
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/ia32/push_registers_masm.asm
@@ -0,0 +1,48 @@
+;; Copyright 2020 the V8 project authors. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; MASM syntax
+;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
+
+.model flat, C
+
+public PushAllRegistersAndIterateStack
+
+.code
+PushAllRegistersAndIterateStack:
+ ;; Push all callee-saved registers to get them on the stack for conservative
+ ;; stack scanning.
+ ;;
+ ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
+ ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
+ ;; at the call.
+ ;;
+ ;; The following assumes cdecl calling convention.
+ ;; Source: https://docs.microsoft.com/en-us/cpp/cpp/cdecl?view=vs-2019
+ ;;
+ ;; [ IterateStackCallback ]
+ ;; [ StackVisitor* ]
+ ;; [ Stack* ]
+ ;; [ ret ]
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+ ;; Save 3rd parameter (IterateStackCallback).
+ mov ecx, [ esp + 28 ]
+ ;; Pass 3rd parameter as esp (stack pointer).
+ push esp
+ ;; Pass 2nd parameter (StackVisitor*).
+ push [ esp + 28 ]
+ ;; Pass 1st parameter (Stack*).
+ push [ esp + 28 ]
+ call ecx
+ ;; Pop the callee-saved registers.
+ add esp, 24
+ ;; Restore rbp as it was used as frame pointer.
+ pop ebp
+ ret
+
+end
diff --git a/deps/v8/src/heap/base/asm/ia32/save_registers_asm.cc b/deps/v8/src/heap/base/asm/ia32/save_registers_asm.cc
deleted file mode 100644
index db8b0e9886..0000000000
--- a/deps/v8/src/heap/base/asm/ia32/save_registers_asm.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-//
-// The following assumes cdecl calling convention.
-// Source: https://en.wikipedia.org/wiki/X86_calling_conventions#cdecl
-
-// 3 32-bit registers = 3 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 3,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 4, "Mismatch in word size");
-
-asm(
-#ifdef _WIN32
- ".globl _SaveCalleeSavedRegisters \n"
- "_SaveCalleeSavedRegisters: \n"
-#else // !_WIN32
- ".globl SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
-#endif // !_WIN32
- // 8: [ intptr_t* buffer ]
- // 4: [ ret ]
- // 0: [ saved %ebp ]
- // %ebp is callee-saved. Maintain proper frame pointer for debugging.
- " push %ebp \n"
- " movl %esp, %ebp \n"
- // Load the buffer's address in %ecx.
- " movl 8(%ebp), %ecx \n"
- // Save the callee-saved registers.
- " movl %ebx, 0(%ecx) \n"
- " movl %esi, 4(%ecx) \n"
- " movl %edi, 8(%ecx) \n"
- // Restore %ebp as it was used as frame pointer and return.
- " pop %ebp \n"
- " ret \n");
diff --git a/deps/v8/src/heap/base/asm/ia32/save_registers_masm.asm b/deps/v8/src/heap/base/asm/ia32/save_registers_masm.asm
deleted file mode 100644
index 0892b02046..0000000000
--- a/deps/v8/src/heap/base/asm/ia32/save_registers_masm.asm
+++ /dev/null
@@ -1,36 +0,0 @@
-;; Copyright 2020 the V8 project authors. All rights reserved.
-;; Use of this source code is governed by a BSD-style license that can be
-;; found in the LICENSE file.
-
-;; MASM syntax
-;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
-
-.model flat, C
-
-public SaveCalleeSavedRegisters
-
-.code
- ;; Save all callee-saved registers in the specified buffer.
- ;; extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
- ;;
- ;; The following assumes cdecl calling convention.
- ;; Source: https://docs.microsoft.com/en-us/cpp/cpp/cdecl?view=vs-2019
-
-SaveCalleeSavedRegisters:
- ;; 8: [ intptr_t* buffer ]
- ;; 4: [ ret ]
- ;; 0: [ saved %ebp ]
- ;; %ebp is callee-saved. Maintain proper frame pointer for debugging.
- push ebp
- mov ebp, esp
- ;; Load the buffer's address in %ecx.
- mov ecx, [ebp + 8]
- ;; Save the callee-saved registers.
- mov [ecx], ebx
- mov [ecx + 4], esi
- mov [ecx + 8], edi
- ;; Restore %ebp as it was used as frame pointer and return.
- pop ebp
- ret
-
-end
diff --git a/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc
new file mode 100644
index 0000000000..aa8dcd356b
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".text \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi.d $sp, $sp, -96 \n"
+ " st.d $ra, $sp, 88 \n"
+ " st.d $s8, $sp, 80 \n"
+ " st.d $sp, $sp, 72 \n"
+ " st.d $fp, $sp, 64 \n"
+ " st.d $s7, $sp, 56 \n"
+ " st.d $s6, $sp, 48 \n"
+ " st.d $s5, $sp, 40 \n"
+ " st.d $s4, $sp, 32 \n"
+ " st.d $s3, $sp, 24 \n"
+ " st.d $s2, $sp, 16 \n"
+ " st.d $s1, $sp, 8 \n"
+ " st.d $s0, $sp, 0 \n"
+ // Maintain frame pointer.
+ " addi.d $s8, $sp, 0 \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " addi.d $a3, $a2, 0 \n"
+ // Call the callback.
+ // Pass 3rd parameter as sp (stack pointer).
+ " addi.d $a2, $sp, 0 \n"
+ " jirl $ra, $a3, 0 \n"
+ // Load return address.
+ " ld.d $ra, $sp, 88 \n"
+ // Restore frame pointer.
+ " ld.d $s8, $sp, 80 \n"
+ // Discard all callee-saved registers.
+ " addi.d $sp, $sp, 96 \n"
+ " jirl $zero, $ra, 0 \n");
diff --git a/deps/v8/src/heap/base/asm/loong64/save_registers_asm.cc b/deps/v8/src/heap/base/asm/loong64/save_registers_asm.cc
deleted file mode 100644
index e01cd3eecc..0000000000
--- a/deps/v8/src/heap/base/asm/loong64/save_registers_asm.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// 11 64-bit registers = 11 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 11,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(".text \n"
- ".global SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // $a0: [ intptr_t* buffer ]
- // Save the callee-saved registers.
- " st.d $s8, $a0, 0 \n"
- " st.d $sp, $a0, 8 \n"
- " st.d $fp, $a0, 16 \n"
- " st.d $s7, $a0, 24 \n"
- " st.d $s6, $a0, 32 \n"
- " st.d $s5, $a0, 40 \n"
- " st.d $s4, $a0, 48 \n"
- " st.d $s3, $a0, 56 \n"
- " st.d $s2, $a0, 64 \n"
- " st.d $s1, $a0, 72 \n"
- " st.d $s0, $a0, 80 \n"
- // Return.
- " jirl $zero, $ra, 0 \n");
diff --git a/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
new file mode 100644
index 0000000000..47779e0736
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
@@ -0,0 +1,49 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".text \n"
+ ".set noreorder \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " daddiu $sp, $sp, -96 \n"
+ " sd $ra, 88($sp) \n"
+ " sd $s8, 80($sp) \n"
+ " sd $sp, 72($sp) \n"
+ " sd $gp, 64($sp) \n"
+ " sd $s7, 56($sp) \n"
+ " sd $s6, 48($sp) \n"
+ " sd $s5, 40($sp) \n"
+ " sd $s4, 32($sp) \n"
+ " sd $s3, 24($sp) \n"
+ " sd $s2, 16($sp) \n"
+ " sd $s1, 8($sp) \n"
+ " sd $s0, 0($sp) \n"
+ // Maintain frame pointer.
+ " move $s8, $sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " move $a3, $a2 \n"
+ // Call the callback.
+ " jalr $a3 \n"
+ // Delay slot: Pass 3rd parameter as sp (stack pointer).
+ " move $a2, $sp \n"
+ // Load return address.
+ " ld $ra, 88($sp) \n"
+ // Restore frame pointer.
+ " ld $s8, 80($sp) \n"
+ " jr $ra \n"
+ // Delay slot: Discard all callee-saved registers.
+ " daddiu $sp, $sp, 96 \n");
diff --git a/deps/v8/src/heap/base/asm/mips64/save_registers_asm.cc b/deps/v8/src/heap/base/asm/mips64/save_registers_asm.cc
deleted file mode 100644
index b454e181ab..0000000000
--- a/deps/v8/src/heap/base/asm/mips64/save_registers_asm.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// 9 64-bit registers = 9 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 9,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(".text \n"
- ".set noreorder \n"
- ".global SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // $a0: [ intptr_t* buffer ]
- // Save the callee-saved registers.
- " sd $gp, 64($a0) \n"
- " sd $s7, 56($a0) \n"
- " sd $s6, 48($a0) \n"
- " sd $s5, 40($a0) \n"
- " sd $s4, 32($a0) \n"
- " sd $s3, 24($a0) \n"
- " sd $s2, 16($a0) \n"
- " sd $s1, 8($a0) \n"
- // ... one more in the delay slot!
- // Return.
- " jr $ra \n"
- // Delay slot:
- " sd $s0, 0($a0) \n");
diff --git a/deps/v8/src/heap/base/asm/ppc/push_registers_asm.cc b/deps/v8/src/heap/base/asm/ppc/push_registers_asm.cc
new file mode 100644
index 0000000000..f879980556
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/ppc/push_registers_asm.cc
@@ -0,0 +1,97 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// PPC ABI source:
+// http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html
+
+// AIX Runtime process stack:
+// https://www.ibm.com/support/knowledgecenter/ssw_aix_71/assembler/idalangref_runtime_process.html
+asm(
+#if defined(_AIX)
+ ".csect .text[PR] \n"
+ ".align 2 \n"
+ ".globl .PushAllRegistersAndIterateStack, hidden \n"
+ ".PushAllRegistersAndIterateStack: \n"
+#else
+ ".text \n"
+ ".align 2 \n"
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif
+ // Push all callee-saved registers.
+ // lr, TOC pointer, r16 to r31. 160 bytes.
+ // The parameter save area shall be allocated by the caller. 112 bytes.
+ // At anytime, SP (r1) needs to be multiple of 16 (i.e. 16-aligned).
+ " mflr 0 \n"
+ " std 0, 16(1) \n"
+#if defined(_AIX)
+ " std 2, 40(1) \n"
+#else
+ " std 2, 24(1) \n"
+#endif
+ " stdu 1, -256(1) \n"
+ " std 14, 112(1) \n"
+ " std 15, 120(1) \n"
+ " std 16, 128(1) \n"
+ " std 17, 136(1) \n"
+ " std 18, 144(1) \n"
+ " std 19, 152(1) \n"
+ " std 20, 160(1) \n"
+ " std 21, 168(1) \n"
+ " std 22, 176(1) \n"
+ " std 23, 184(1) \n"
+ " std 24, 192(1) \n"
+ " std 25, 200(1) \n"
+ " std 26, 208(1) \n"
+ " std 27, 216(1) \n"
+ " std 28, 224(1) \n"
+ " std 29, 232(1) \n"
+ " std 30, 240(1) \n"
+ " std 31, 248(1) \n"
+ // Pass 1st parameter (r3) unchanged (Stack*).
+ // Pass 2nd parameter (r4) unchanged (StackVisitor*).
+ // Save 3rd parameter (r5; IterateStackCallback).
+ " mr 6, 5 \n"
+#if defined(_AIX)
+ // Set up TOC for callee.
+ " ld 2,8(5) \n"
+ // AIX uses function descriptors, which means that
+ // pointers to functions do not point to code, but
+ // instead point to metadata about them, hence
+ // need to deterrence.
+ " ld 6,0(6) \n"
+#endif
+ // Pass 3rd parameter as sp (stack pointer).
+ " mr 5, 1 \n"
+#if !defined(_AIX)
+ // Set up r12 to be equal to the callee address (in order for TOC
+ // relocation). Only needed on LE Linux.
+ " mr 12, 6 \n"
+#endif
+ // Call the callback.
+ " mtctr 6 \n"
+ " bctrl \n"
+ // Discard all the registers.
+ " addi 1, 1, 256 \n"
+ // Restore lr.
+ " ld 0, 16(1) \n"
+ " mtlr 0 \n"
+#if defined(_AIX)
+ // Restore TOC pointer.
+ " ld 2, 40(1) \n"
+#else
+ " ld 2, 24(1) \n"
+#endif
+ " blr \n");
diff --git a/deps/v8/src/heap/base/asm/ppc/save_registers_asm.cc b/deps/v8/src/heap/base/asm/ppc/save_registers_asm.cc
deleted file mode 100644
index 4280c083a9..0000000000
--- a/deps/v8/src/heap/base/asm/ppc/save_registers_asm.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-//
-// PPC ABI source:
-// http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html
-
-// AIX Runtime process stack:
-// https://www.ibm.com/support/knowledgecenter/ssw_aix_71/assembler/idalangref_runtime_process.html
-
-#ifdef __PPC64__
-
-// 20 64-bit registers = 20 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 20,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(
-#if defined(_AIX)
- ".globl .SaveCalleeSavedRegisters, hidden \n"
- ".csect .text[PR] \n"
- ".SaveCalleeSavedRegisters: \n"
-#else
- ".globl SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
-#endif
- // r3: [ intptr_t* buffer ]
- // Save the callee-saved registers: lr, TOC pointer (r2), r14-r31.
- " mflr 0 \n"
- " std 0, 8(3) \n"
- " std 2, 16(3) \n"
- " std 14, 24(3) \n"
- " std 15, 32(3) \n"
- " std 16, 40(3) \n"
- " std 17, 48(3) \n"
- " std 18, 56(3) \n"
- " std 19, 64(3) \n"
- " std 20, 72(3) \n"
- " std 21, 80(3) \n"
- " std 22, 88(3) \n"
- " std 23, 96(3) \n"
- " std 24, 104(3) \n"
- " std 25, 112(3) \n"
- " std 26, 120(3) \n"
- " std 27, 128(3) \n"
- " std 28, 136(3) \n"
- " std 29, 144(3) \n"
- " std 30, 152(3) \n"
- " std 31, 160(3) \n"
- // Return.
- " blr \n");
-
-#else // !__PPC64__
-
-// 20 32-bit registers = 20 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 20,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 4, "Mismatch in word size");
-
-asm(
-#if defined(_AIX)
- ".globl .SaveCalleeSavedRegisters, hidden \n"
- ".csect .text[PR] \n"
- ".SaveCalleeSavedRegisters: \n"
-#else
- ".globl SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
-#endif
- // r3: [ intptr_t* buffer ]
- // Save the callee-saved registers: lr, TOC pointer (r2), r14-r31.
- " mflr 0 \n"
- " st 0, 4(3) \n"
- " st 2, 8(3) \n"
- " st 14, 12(3) \n"
- " st 15, 16(3) \n"
- " st 16, 20(3) \n"
- " st 17, 24(3) \n"
- " st 18, 28(3) \n"
- " st 19, 32(3) \n"
- " st 20, 36(3) \n"
- " st 21, 40(3) \n"
- " st 22, 44(3) \n"
- " st 23, 48(3) \n"
- " st 24, 52(3) \n"
- " st 25, 56(3) \n"
- " st 26, 60(3) \n"
- " st 27, 64(3) \n"
- " st 28, 68(3) \n"
- " st 29, 72(3) \n"
- " st 30, 76(3) \n"
- " st 31, 80(3) \n"
- // Return.
- " blr \n");
-
-#endif // __PPC64__
diff --git a/deps/v8/src/heap/base/asm/riscv/push_registers_asm.cc b/deps/v8/src/heap/base/asm/riscv/push_registers_asm.cc
new file mode 100644
index 0000000000..7cc13ea3f3
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/riscv/push_registers_asm.cc
@@ -0,0 +1,93 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_asm.cc for why the function is not generated
+// using clang.
+//
+// Calling convention source:
+// https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf Table 18.2
+#ifdef V8_TARGET_ARCH_RISCV64
+asm(".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi sp, sp, -112 \n"
+ // Save return address.
+ " sd ra, 104(sp) \n"
+ // sp is callee-saved.
+ " sd sp, 96(sp) \n"
+ // s0-s11 are callee-saved.
+ " sd s11, 88(sp) \n"
+ " sd s10, 80(sp) \n"
+ " sd s9, 72(sp) \n"
+ " sd s8, 64(sp) \n"
+ " sd s7, 56(sp) \n"
+ " sd s6, 48(sp) \n"
+ " sd s5, 40(sp) \n"
+ " sd s4, 32(sp) \n"
+ " sd s3, 24(sp) \n"
+ " sd s2, 16(sp) \n"
+ " sd s1, 8(sp) \n"
+ " sd s0, 0(sp) \n"
+ // Maintain frame pointer(fp is s0).
+ " mv s0, sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback) to a3.
+ " mv a3, a2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mv a2, sp \n"
+ // Call the callback.
+ " jalr a3 \n"
+ // Load return address.
+ " ld ra, 104(sp) \n"
+ // Restore frame pointer.
+ " ld s0, 0(sp) \n"
+ " addi sp, sp, 112 \n"
+ " jr ra \n");
+#elif V8_TARGET_ARCH_RISCV32
+asm(".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi sp, sp, -56 \n"
+ // Save return address.
+ " sw ra, 52(sp) \n"
+ // sp is callee-saved.
+ " sw sp, 48(sp) \n"
+ // s0-s11 are callee-saved.
+ " sw s11, 44(sp) \n"
+ " sw s10, 40(sp) \n"
+ " sw s9, 36(sp) \n"
+ " sw s8, 32(sp) \n"
+ " sw s7, 28(sp) \n"
+ " sw s6, 24(sp) \n"
+ " sw s5, 20(sp) \n"
+ " sw s4, 16(sp) \n"
+ " sw s3, 12(sp) \n"
+ " sw s2, 8(sp) \n"
+ " sw s1, 4(sp) \n"
+ " sw s0, 0(sp) \n"
+ // Maintain frame pointer(fp is s0).
+ " mv s0, sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback) to a3.
+ " mv a3, a2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mv a2, sp \n"
+ // Call the callback.
+ " jalr a3 \n"
+ // Load return address.
+ " lw ra, 52(sp) \n"
+ // Restore frame pointer.
+ " lw s0, 0(sp) \n"
+ " addi sp, sp, 56 \n"
+ " jr ra \n");
+#endif
diff --git a/deps/v8/src/heap/base/asm/riscv/save_registers_asm.cc b/deps/v8/src/heap/base/asm/riscv/save_registers_asm.cc
deleted file mode 100644
index d46d8d7062..0000000000
--- a/deps/v8/src/heap/base/asm/riscv/save_registers_asm.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Calling convention source:
-// https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf Table 18.2
-
-#if V8_HOST_ARCH_RISCV64
-// 12 64-bit registers = 12 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 12,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(".global SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // a0: [ intptr_t* buffer ]
- // Save the callee-saved registers: s0-s11.
- " sd s11, 88(a0) \n"
- " sd s10, 80(a0) \n"
- " sd s9, 72(a0) \n"
- " sd s8, 64(a0) \n"
- " sd s7, 56(a0) \n"
- " sd s6, 48(a0) \n"
- " sd s5, 40(a0) \n"
- " sd s4, 32(a0) \n"
- " sd s3, 24(a0) \n"
- " sd s2, 16(a0) \n"
- " sd s1, 8(a0) \n"
- " sd s0, 0(a0) \n"
- // Return.
- " jr ra \n");
-#elif V8_HOST_ARCH_RISCV32
-// 12 32-bit registers = 12 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 12,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 4, "Mismatch in word size");
-
-asm(".global SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // a0: [ intptr_t* buffer ]
- // Save the callee-saved registers: s0-s11.
- " sw s11, 44(a0) \n"
- " sw s10, 40(a0) \n"
- " sw s9, 36(a0) \n"
- " sw s8, 32(a0) \n"
- " sw s7, 28(a0) \n"
- " sw s6, 24(a0) \n"
- " sw s5, 20(a0) \n"
- " sw s4, 16(a0) \n"
- " sw s3, 12(a0) \n"
- " sw s2, 8(a0) \n"
- " sw s1, 4(a0) \n"
- " sw s0, 0(a0) \n"
- // Return.
- " jr ra \n");
-#endif
diff --git a/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc b/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc
new file mode 100644
index 0000000000..ef954fa03a
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// S390 ABI source:
+// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
+asm(".text \n"
+ ".align 8 \n"
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers.
+ // r6-r13, r14 and sp(r15)
+ " stmg %r6, %sp, 48(%sp) \n"
+ // Allocate frame.
+ " lay %sp, -160(%sp) \n"
+ // Pass 1st parameter (r2) unchanged (Stack*).
+ // Pass 2nd parameter (r3) unchanged (StackVisitor*).
+ // Save 3rd parameter (r4; IterateStackCallback).
+ " lgr %r5, %r4 \n"
+ // Pass sp as 3rd parameter. 160+48 to point
+ // to callee saved region stored above.
+ " lay %r4, 208(%sp) \n"
+ // Call the callback.
+ " basr %r14, %r5 \n"
+ " lmg %r14,%sp, 272(%sp) \n"
+ " br %r14 \n");
diff --git a/deps/v8/src/heap/base/asm/s390/save_registers_asm.cc b/deps/v8/src/heap/base/asm/s390/save_registers_asm.cc
deleted file mode 100644
index 57831883a5..0000000000
--- a/deps/v8/src/heap/base/asm/s390/save_registers_asm.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-// See asm/x64/save_registers_asm.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// S390 ABI source:
-// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
-
-// 10 64-bit registers = 10 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 10,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(".globl SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // r2: [ intptr_t* buffer ]
- // Save the callee-saved registers: r6-r13, r14 and sp(r15).
- " stmg %r6, %sp, 0(%r2) \n"
- // Return.
- " br %r14 \n");
diff --git a/deps/v8/src/heap/base/asm/x64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/x64/push_registers_asm.cc
new file mode 100644
index 0000000000..1781a5816a
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/x64/push_registers_asm.cc
@@ -0,0 +1,106 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// We cannot rely on clang generating the function and right symbol mangling
+// as `__attribute__((naked))` does not prevent clang from generating TSAN
+// function entry stubs (`__tsan_func_entry`). Even with
+// `__attribute__((no_sanitize_thread)` annotation clang generates the entry
+// stub.
+// See https://bugs.llvm.org/show_bug.cgi?id=45400.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+// _WIN64 Defined as 1 when the compilation target is 64-bit ARM or x64.
+// Otherwise, undefined.
+#ifdef _WIN64
+
+// We maintain 16-byte alignment at calls. There is an 8-byte return address
+// on the stack and we push 232 bytes which maintains 16-byte stack alignment
+// at the call.
+// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
+asm(".globl PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %rbp \n"
+ " mov %rsp, %rbp \n"
+ // Dummy for alignment.
+ " push $0xCDCDCD \n"
+ " push %rsi \n"
+ " push %rdi \n"
+ " push %rbx \n"
+ " push %r12 \n"
+ " push %r13 \n"
+ " push %r14 \n"
+ " push %r15 \n"
+ " sub $160, %rsp \n"
+ // Use aligned instrs as we are certain that the stack is properly aligned.
+ " movdqa %xmm6, 144(%rsp) \n"
+ " movdqa %xmm7, 128(%rsp) \n"
+ " movdqa %xmm8, 112(%rsp) \n"
+ " movdqa %xmm9, 96(%rsp) \n"
+ " movdqa %xmm10, 80(%rsp) \n"
+ " movdqa %xmm11, 64(%rsp) \n"
+ " movdqa %xmm12, 48(%rsp) \n"
+ " movdqa %xmm13, 32(%rsp) \n"
+ " movdqa %xmm14, 16(%rsp) \n"
+ " movdqa %xmm15, (%rsp) \n"
+ // Pass 1st parameter (rcx) unchanged (Stack*).
+ // Pass 2nd parameter (rdx) unchanged (StackVisitor*).
+ // Save 3rd parameter (r8; IterateStackCallback)
+ " mov %r8, %r9 \n"
+ // Pass 3rd parameter as rsp (stack pointer).
+ " mov %rsp, %r8 \n"
+ // Call the callback.
+ " call *%r9 \n"
+ // Pop the callee-saved registers.
+ " add $224, %rsp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %rbp \n"
+ " ret \n");
+
+#else // !_WIN64
+
+// We maintain 16-byte alignment at calls. There is an 8-byte return address
+// on the stack and we push 56 bytes which maintains 16-byte stack alignment
+// at the call.
+// Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
+asm(
+#ifdef __APPLE__
+ ".globl _PushAllRegistersAndIterateStack \n"
+ ".private_extern _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !__APPLE__
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !__APPLE__
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %rbp \n"
+ " mov %rsp, %rbp \n"
+ // Dummy for alignment.
+ " push $0xCDCDCD \n"
+ " push %rbx \n"
+ " push %r12 \n"
+ " push %r13 \n"
+ " push %r14 \n"
+ " push %r15 \n"
+ // Pass 1st parameter (rdi) unchanged (Stack*).
+ // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
+ // Save 3rd parameter (rdx; IterateStackCallback)
+ " mov %rdx, %r8 \n"
+ // Pass 3rd parameter as rsp (stack pointer).
+ " mov %rsp, %rdx \n"
+ // Call the callback.
+ " call *%r8 \n"
+ // Pop the callee-saved registers.
+ " add $48, %rsp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %rbp \n"
+ " ret \n");
+
+#endif // !_WIN64
diff --git a/deps/v8/src/heap/base/asm/x64/push_registers_masm.asm b/deps/v8/src/heap/base/asm/x64/push_registers_masm.asm
new file mode 100644
index 0000000000..a32e193c2f
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/x64/push_registers_masm.asm
@@ -0,0 +1,57 @@
+;; Copyright 2020 the V8 project authors. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; MASM syntax
+;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
+
+public PushAllRegistersAndIterateStack
+
+.code
+PushAllRegistersAndIterateStack:
+ ;; Push all callee-saved registers to get them on the stack for conservative
+ ;; stack scanning.
+ ;;
+ ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
+ ;; on the stack and we push 232 bytes which maintains 16-byte stack
+ ;; alignment at the call.
+ ;; Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
+ ;;
+ ;; rbp is callee-saved. Maintain proper frame pointer for debugging.
+ push rbp
+ mov rbp, rsp
+ push 0CDCDCDh ;; Dummy for alignment.
+ push rsi
+ push rdi
+ push rbx
+ push r12
+ push r13
+ push r14
+ push r15
+ sub rsp, 160
+ ;; Use aligned instrs as we are certain that the stack is properly aligned.
+ movdqa xmmword ptr [rsp + 144], xmm6
+ movdqa xmmword ptr [rsp + 128], xmm7
+ movdqa xmmword ptr [rsp + 112], xmm8
+ movdqa xmmword ptr [rsp + 96], xmm9
+ movdqa xmmword ptr [rsp + 80], xmm10
+ movdqa xmmword ptr [rsp + 64], xmm11
+ movdqa xmmword ptr [rsp + 48], xmm12
+ movdqa xmmword ptr [rsp + 32], xmm13
+ movdqa xmmword ptr [rsp + 16], xmm14
+ movdqa xmmword ptr [rsp], xmm15
+ ;; Pass 1st parameter (rcx) unchanged (Stack*).
+ ;; Pass 2nd parameter (rdx) unchanged (StackVisitor*).
+ ;; Save 3rd parameter (r8; IterateStackCallback)
+ mov r9, r8
+ ;; Pass 3rd parameter as rsp (stack pointer).
+ mov r8, rsp
+ ;; Call the callback.
+ call r9
+ ;; Pop the callee-saved registers.
+ add rsp, 224
+ ;; Restore rbp as it was used as frame pointer.
+ pop rbp
+ ret
+
+end
diff --git a/deps/v8/src/heap/base/asm/x64/save_registers_asm.cc b/deps/v8/src/heap/base/asm/x64/save_registers_asm.cc
deleted file mode 100644
index 5ff39fccd3..0000000000
--- a/deps/v8/src/heap/base/asm/x64/save_registers_asm.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <src/heap/base/stack.h>
-
-// Save all callee-saved registers in the specified buffer.
-// extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-//
-// We cannot rely on clang generating the function and right symbol mangling
-// as `__attribute__((naked))` does not prevent clang from generating TSAN
-// function entry stubs (`__tsan_func_entry`). Even with
-// `__attribute__((no_sanitize_thread)` annotation clang generates the entry
-// stub.
-// See https://bugs.llvm.org/show_bug.cgi?id=45400.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-// _WIN64 Defined as 1 when the compilation target is 64-bit ARM or x64.
-// Otherwise, undefined.
-
-#ifdef _WIN64
-// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
-
-// 7 64-bit registers + 1 for alignment purposes = 8 * 1 = 8 intprt_t
-// 10 128-bit registers = 10 * 2 = 20 intptr_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 28,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(".globl SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
- // %rcx: [ intptr_t* buffer ]
- // %rbp is callee-saved. Maintain proper frame pointer for debugging.
- " push %rbp \n"
- " mov %rsp, %rbp \n"
- // Save the callee-saved registers.
- " mov %rsi, 0(%rcx) \n"
- " mov %rdi, 8(%rcx) \n"
- " mov %rbx, 16(%rcx) \n"
- " mov %r12, 24(%rcx) \n"
- " mov %r13, 32(%rcx) \n"
- " mov %r14, 40(%rcx) \n"
- " mov %r15, 48(%rcx) \n"
- // Skip one slot to achieve proper alignment and use aligned instructions,
- // as we are sure that the buffer is properly aligned.
- " movdqa %xmm6, 64(%rcx) \n"
- " movdqa %xmm7, 80(%rcx) \n"
- " movdqa %xmm8, 96(%rcx) \n"
- " movdqa %xmm9, 112(%rcx) \n"
- " movdqa %xmm10, 128(%rcx) \n"
- " movdqa %xmm11, 144(%rcx) \n"
- " movdqa %xmm12, 160(%rcx) \n"
- " movdqa %xmm13, 176(%rcx) \n"
- " movdqa %xmm14, 192(%rcx) \n"
- " movdqa %xmm15, 208(%rcx) \n"
- // Return.
- " pop %rbp \n"
- " ret \n");
-
-#else // !_WIN64
-// Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
-
-// 5 64-bit registers = 5 intprt_t
-static_assert(heap::base::Stack::NumberOfCalleeSavedRegisters == 5,
- "Mismatch in the number of callee-saved registers");
-static_assert(sizeof(intptr_t) == 8, "Mismatch in word size");
-
-asm(
-#ifdef __APPLE__
- ".globl _SaveCalleeSavedRegisters \n"
- ".private_extern _SaveCalleeSavedRegisters \n"
- "_SaveCalleeSavedRegisters: \n"
-#else // !__APPLE__
- ".globl SaveCalleeSavedRegisters \n"
- ".type SaveCalleeSavedRegisters, %function \n"
- ".hidden SaveCalleeSavedRegisters \n"
- "SaveCalleeSavedRegisters: \n"
-#endif // !__APPLE__
- // %rdi: [ intptr_t* buffer ]
- // %rbp is callee-saved. Maintain proper frame pointer for debugging.
- " push %rbp \n"
- " mov %rsp, %rbp \n"
- // Save the callee-saved registers.
- " mov %rbx, 0(%rdi) \n"
- " mov %r12, 8(%rdi) \n"
- " mov %r13, 16(%rdi) \n"
- " mov %r14, 24(%rdi) \n"
- " mov %r15, 32(%rdi) \n"
- // Restore %rbp as it was used as frame pointer and return.
- " pop %rbp \n"
- " ret \n");
-
-#endif // !_WIN64
diff --git a/deps/v8/src/heap/base/asm/x64/save_registers_masm.asm b/deps/v8/src/heap/base/asm/x64/save_registers_masm.asm
deleted file mode 100644
index 29946a47ac..0000000000
--- a/deps/v8/src/heap/base/asm/x64/save_registers_masm.asm
+++ /dev/null
@@ -1,43 +0,0 @@
-;; Copyright 2020 the V8 project authors. All rights reserved.
-;; Use of this source code is governed by a BSD-style license that can be
-;; found in the LICENSE file.
-
-;; MASM syntax
-;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
-
-public SaveCalleeSavedRegisters
-
-.code
- ;; Save all callee-saved registers in the specified buffer.
- ;; extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-
-SaveCalleeSavedRegisters:
- ;; %rcx: [ intptr_t* buffer ]
- ;; %rbp is callee-saved. Maintain proper frame pointer for debugging.
- push rbp
- mov rbp, rsp
- ;; Save the callee-saved registers.
- mov qword ptr [rcx], rsi
- mov qword ptr [rcx + 8], rdi
- mov qword ptr [rcx + 16], rbx
- mov qword ptr [rcx + 24], r12
- mov qword ptr [rcx + 32], r13
- mov qword ptr [rcx + 40], r14
- mov qword ptr [rcx + 48], r15
- ;; Skip one slot to achieve proper alignment and use aligned instructions,
- ;; as we are sure that the buffer is properly aligned.
- movdqa xmmword ptr [rcx + 64], xmm6
- movdqa xmmword ptr [rcx + 80], xmm7
- movdqa xmmword ptr [rcx + 96], xmm8
- movdqa xmmword ptr [rcx + 112], xmm9
- movdqa xmmword ptr [rcx + 128], xmm10
- movdqa xmmword ptr [rcx + 144], xmm11
- movdqa xmmword ptr [rcx + 160], xmm12
- movdqa xmmword ptr [rcx + 176], xmm13
- movdqa xmmword ptr [rcx + 192], xmm14
- movdqa xmmword ptr [rcx + 208], xmm15
- ;; Restore %rbp as it was used as frame pointer and return.
- pop rbp
- ret
-
-end
diff --git a/deps/v8/src/heap/base/basic-slot-set.h b/deps/v8/src/heap/base/basic-slot-set.h
index 2f0bc1c872..87faceec44 100644
--- a/deps/v8/src/heap/base/basic-slot-set.h
+++ b/deps/v8/src/heap/base/basic-slot-set.h
@@ -101,6 +101,11 @@ class BasicSlotSet {
return slot_offset / (SlotGranularity * kBitsPerBucket);
}
+ // Converts bucket index into slot offset.
+ constexpr static size_t OffsetForBucket(size_t bucket_index) {
+ return bucket_index * SlotGranularity * kBitsPerBucket;
+ }
+
// The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index 337206e817..3a0ec5f352 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -12,11 +12,14 @@
namespace heap::base {
-Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
-
-void Stack::SetStackStart(const void* stack_start) {
- stack_start_ = stack_start;
-}
+// Function with architecture-specific implementation:
+// Pushes all callee-saved registers to the stack and invokes the callback,
+// passing the supplied pointers (stack and argument) and the intended stack
+// marker.
+using IterateStackCallback = void (*)(const Stack*, StackVisitor*, const void*);
+extern "C" void PushAllRegistersAndIterateStack(const Stack* stack,
+ StackVisitor* visitor,
+ IterateStackCallback callback);
bool Stack::IsOnStack(const void* slot) const {
DCHECK_NOT_NULL(stack_start_);
@@ -61,29 +64,33 @@ void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
// native frame. In case |addr| points to a fake frame of the current stack
// iterate the fake frame. Frame layout see
// https://github.com/google/sanitizers/wiki/AddressSanitizerUseAfterReturn
- if (asan_fake_stack) {
- void* fake_frame_begin;
- void* fake_frame_end;
- void* real_stack_frame = __asan_addr_is_in_fake_stack(
- const_cast<void*>(asan_fake_stack), const_cast<void*>(address),
- &fake_frame_begin, &fake_frame_end);
- if (real_stack_frame) {
- // |address| points to a fake frame. Check that the fake frame is part
- // of this stack.
- if (stack_start >= real_stack_frame && real_stack_frame >= stack_end) {
- // Iterate the fake frame.
- for (const void* const* current =
- reinterpret_cast<const void* const*>(fake_frame_begin);
- current < fake_frame_end; ++current) {
- const void* address = *current;
- if (address == nullptr) continue;
- visitor->VisitPointer(address);
- }
+ if (!asan_fake_stack) return;
+ void* fake_frame_begin;
+ void* fake_frame_end;
+ void* real_stack_frame = __asan_addr_is_in_fake_stack(
+ const_cast<void*>(asan_fake_stack), const_cast<void*>(address),
+ &fake_frame_begin, &fake_frame_end);
+ if (real_stack_frame) {
+ // |address| points to a fake frame. Check that the fake frame is part
+ // of this stack.
+ if (stack_start >= real_stack_frame && real_stack_frame >= stack_end) {
+ // Iterate the fake frame.
+ for (const void* const* current =
+ reinterpret_cast<const void* const*>(fake_frame_begin);
+ current < fake_frame_end; ++current) {
+ const void* address = *current;
+ if (address == nullptr) continue;
+ visitor->VisitPointer(address);
}
}
}
}
-
+#else
+void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
+ const void* asan_fake_stack,
+ const void* stack_start,
+ const void* stack_end,
+ const void* address) {}
#endif // V8_USE_ADDRESS_SANITIZER
void IterateUnsafeStackIfNecessary(StackVisitor* visitor) {
@@ -110,9 +117,8 @@ void IterateUnsafeStackIfNecessary(StackVisitor* visitor) {
#endif // defined(__has_feature)
}
-// Called by the trampoline that pushes registers on the stack. This method
-// should never be inlined to ensure that a possible redzone cannot contain
-// any data that needs to be scanned.
+// This method should never be inlined to ensure that a possible redzone cannot
+// contain any data that needs to be scanned.
V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
DISABLE_ASAN
@@ -120,26 +126,31 @@ DISABLE_ASAN
// thread, e.g., for interrupt handling. Atomic reads are not enough as the
// other thread may use a lock to synchronize the access.
DISABLE_TSAN
-void IteratePointersImpl(StackVisitor* visitor, const void* stack_start,
- const void* stack_end,
- const Stack::CalleeSavedRegisters* registers) {
-#ifdef V8_USE_ADDRESS_SANITIZER
- const void* asan_fake_stack = __asan_get_current_fake_stack();
-#endif // V8_USE_ADDRESS_SANITIZER
+void IteratePointersInStack(StackVisitor* visitor, const void* top,
+ const void* start, const void* asan_fake_stack) {
+ for (const void* const* current = reinterpret_cast<const void* const*>(top);
+ current < start; ++current) {
+ // MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
+ // into a local which is unpoisoned.
+ const void* address = *current;
+ MSAN_MEMORY_IS_INITIALIZED(&address, sizeof(address));
+ if (address == nullptr) continue;
+ visitor->VisitPointer(address);
+ IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack, start, top,
+ address);
+ }
+}
+
+} // namespace
- // Iterate through the registers.
- if (registers != nullptr) {
- for (intptr_t value : registers->buffer) {
- const void* address = reinterpret_cast<const void*>(value);
- MSAN_MEMORY_IS_INITIALIZED(&address, sizeof(address));
- if (address == nullptr) continue;
- visitor->VisitPointer(address);
+// static
+void Stack::IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
+ const void* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
- IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack, stack_start,
- stack_end, address);
+ const void* asan_fake_stack = __asan_get_current_fake_stack();
+#else
+ const void* asan_fake_stack = nullptr;
#endif // V8_USE_ADDRESS_SANITIZER
- }
- }
// Iterate through the stack.
// All supported platforms should have their stack aligned to at least
@@ -147,92 +158,51 @@ void IteratePointersImpl(StackVisitor* visitor, const void* stack_start,
constexpr size_t kMinStackAlignment = sizeof(void*);
CHECK_EQ(0u,
reinterpret_cast<uintptr_t>(stack_end) & (kMinStackAlignment - 1));
- for (const void* const* current =
- reinterpret_cast<const void* const*>(stack_end);
- current < stack_start; ++current) {
- // MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
- // into a local which is unpoisoned.
- const void* address = *current;
- MSAN_MEMORY_IS_INITIALIZED(&address, sizeof(address));
- if (address == nullptr) continue;
- visitor->VisitPointer(address);
-#ifdef V8_USE_ADDRESS_SANITIZER
- IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack, stack_start,
- stack_end, address);
-#endif // V8_USE_ADDRESS_SANITIZER
+ IteratePointersInStack(visitor,
+ reinterpret_cast<const void* const*>(stack_end),
+ stack->stack_start_, asan_fake_stack);
+
+ for (const auto& segment : stack->inactive_stacks_) {
+ IteratePointersInStack(visitor, segment.top, segment.start,
+ asan_fake_stack);
}
-}
-} // namespace
+ IterateUnsafeStackIfNecessary(visitor);
+}
void Stack::IteratePointers(StackVisitor* visitor) const {
- DCHECK_NOT_NULL(stack_start_);
- PushAllRegistersAndInvokeCallback(visitor, stack_start_,
- &IteratePointersImpl);
+ // TODO(v8:13493): Remove the implication as soon as IsOnCurrentStack is
+ // compatible with stack switching.
+ DCHECK_IMPLIES(!wasm_stack_switching_, IsOnCurrentStack(stack_start_));
+ PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
// TODO(chromium:1056170): Add support for SIMD and/or filtering.
IterateUnsafeStackIfNecessary(visitor);
}
-void Stack::IteratePointersUnsafe(StackVisitor* visitor,
- const void* stack_end) const {
- IteratePointersImpl(visitor, stack_start_, stack_end, nullptr);
-}
-
-namespace {
-// Function with architecture-specific implementation:
-// Saves all callee-saved registers in the specified buffer.
-extern "C" void SaveCalleeSavedRegisters(intptr_t* buffer);
-} // namespace
-
-V8_NOINLINE void Stack::PushAllRegistersAndInvokeCallback(
- StackVisitor* visitor, const void* stack_start, Callback callback) {
- Stack::CalleeSavedRegisters registers;
- SaveCalleeSavedRegisters(registers.buffer.data());
- callback(visitor, stack_start, v8::base::Stack::GetCurrentStackPosition(),
- &registers);
+void Stack::IteratePointersUntilMarker(StackVisitor* visitor) const {
+ DCHECK_NOT_NULL(stack_start_);
+ DCHECK_NOT_NULL(stack_marker_);
+ DCHECK_GE(stack_start_, stack_marker_);
+ IteratePointersImpl(this, visitor, stack_marker_);
}
-namespace {
-
#ifdef DEBUG
-
-bool IsOnCurrentStack(const void* ptr) {
+// static
+bool Stack::IsOnCurrentStack(const void* ptr) {
DCHECK_NOT_NULL(ptr);
const void* current_stack_start = v8::base::Stack::GetStackStart();
const void* current_stack_top = v8::base::Stack::GetCurrentStackPosition();
return ptr <= current_stack_start && ptr >= current_stack_top;
}
-
-bool IsValidMarker(const void* stack_start, const void* stack_marker) {
- const void* current_stack_top = v8::base::Stack::GetCurrentStackPosition();
- return stack_marker <= stack_start && stack_marker >= current_stack_top;
-}
-
#endif // DEBUG
-} // namespace
-
-// In the following three methods, the stored stack start needs not coincide
-// with the current (actual) stack start (e.g., in case it was explicitly set to
-// a lower address, in tests) but has to be inside the current stack.
-
-void Stack::set_marker(const void* stack_marker) {
- DCHECK(IsOnCurrentStack(stack_start_));
- DCHECK_NOT_NULL(stack_marker);
- DCHECK(IsValidMarker(stack_start_, stack_marker));
- stack_marker_ = stack_marker;
+void Stack::AddStackSegment(const void* start, const void* top) {
+ DCHECK_LE(top, start);
+ inactive_stacks_.push_back({start, top});
}
-void Stack::clear_marker() {
- DCHECK(IsOnCurrentStack(stack_start_));
- stack_marker_ = nullptr;
-}
-
-const void* Stack::get_marker() const {
- DCHECK_NOT_NULL(stack_marker_);
- return stack_marker_;
-}
+void Stack::ClearStackSegments() { inactive_stacks_.clear(); }
} // namespace heap::base
diff --git a/deps/v8/src/heap/base/stack.h b/deps/v8/src/heap/base/stack.h
index f8fca75153..1d3aaab3db 100644
--- a/deps/v8/src/heap/base/stack.h
+++ b/deps/v8/src/heap/base/stack.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_BASE_STACK_H_
#define V8_HEAP_BASE_STACK_H_
+#include <vector>
+
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -20,58 +22,24 @@ class StackVisitor {
// - native stack;
// - ASAN/MSAN;
// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
+//
+// Stacks grow down, so throughout this class "start" refers the highest
+// address of the stack, and top/marker the lowest.
+//
+// TODO(chromium:1056170): Consider adding a component that keeps track
+// of relevant GC stack regions where interesting pointers can be found.
class V8_EXPORT_PRIVATE Stack final {
public:
- // The following constant is architecture-specific. The size of the buffer
- // for storing the callee-saved registers is going to be equal to
- // NumberOfCalleeSavedRegisters * sizeof(intptr_t).
-
-#if V8_HOST_ARCH_IA32
- // Must be consistent with heap/base/asm/ia32/.
- static constexpr int NumberOfCalleeSavedRegisters = 3;
-#elif V8_HOST_ARCH_X64
-#ifdef _WIN64
- // Must be consistent with heap/base/asm/x64/.
- static constexpr int NumberOfCalleeSavedRegisters = 28;
-#else // !_WIN64
- // Must be consistent with heap/base/asm/x64/.
- static constexpr int NumberOfCalleeSavedRegisters = 5;
-#endif // !_WIN64
-#elif V8_HOST_ARCH_ARM64
- // Must be consistent with heap/base/asm/arm64/.
- static constexpr int NumberOfCalleeSavedRegisters = 11;
-#elif V8_HOST_ARCH_ARM
- // Must be consistent with heap/base/asm/arm/.
- static constexpr int NumberOfCalleeSavedRegisters = 8;
-#elif V8_HOST_ARCH_PPC64
- // Must be consistent with heap/base/asm/ppc/.
- static constexpr int NumberOfCalleeSavedRegisters = 20;
-#elif V8_HOST_ARCH_PPC
- // Must be consistent with heap/base/asm/ppc/.
- static constexpr int NumberOfCalleeSavedRegisters = 20;
-#elif V8_HOST_ARCH_MIPS64
- // Must be consistent with heap/base/asm/mips64el/.
- static constexpr int NumberOfCalleeSavedRegisters = 9;
-#elif V8_HOST_ARCH_LOONG64
- // Must be consistent with heap/base/asm/loong64/.
- static constexpr int NumberOfCalleeSavedRegisters = 11;
-#elif V8_HOST_ARCH_S390
- // Must be consistent with heap/base/asm/s390/.
- static constexpr int NumberOfCalleeSavedRegisters = 10;
-#elif V8_HOST_ARCH_RISCV32
- // Must be consistent with heap/base/asm/riscv/.
- static constexpr int NumberOfCalleeSavedRegisters = 12;
-#elif V8_HOST_ARCH_RISCV64
- // Must be consistent with heap/base/asm/riscv/.
- static constexpr int NumberOfCalleeSavedRegisters = 12;
-#else
-#error Unknown architecture.
-#endif
-
- explicit Stack(const void* stack_start = nullptr);
+ explicit Stack(const void* stack_start = nullptr,
+ bool wasm_stack_switching = false)
+ : stack_start_(stack_start),
+ wasm_stack_switching_(wasm_stack_switching) {}
// Sets the start of the stack.
- void SetStackStart(const void* stack_start);
+ void SetStackStart(const void* stack_start, bool wasm_stack_switching) {
+ stack_start_ = stack_start;
+ wasm_stack_switching_ = wasm_stack_switching;
+ }
// Returns true if |slot| is part of the stack and false otherwise.
bool IsOnStack(const void* slot) const;
@@ -81,43 +49,49 @@ class V8_EXPORT_PRIVATE Stack final {
// `visitor`.
void IteratePointers(StackVisitor* visitor) const;
- // Word-aligned iteration of the stack, starting at `stack_end`. Slot values
- // are passed on to `visitor`. This is intended to be used with verifiers that
- // only visit a subset of the stack of IteratePointers().
+ // Word-aligned iteration of the stack, starting at the `stack_marker_`. Slot
+ // values are passed on to `visitor`. This is intended to be used with
+ // verifiers that only visit a subset of the stack of IteratePointers().
//
// **Ignores:**
// - Callee-saved registers.
// - SafeStack.
- void IteratePointersUnsafe(StackVisitor* visitor,
- const void* stack_end) const;
+ void IteratePointersUntilMarker(StackVisitor* visitor) const;
- // Returns the start of the stack.
- const void* stack_start() const { return stack_start_; }
+ void AddStackSegment(const void* start, const void* top);
+ void ClearStackSegments();
- // Sets, clears and gets the stack marker.
- void set_marker(const void* stack_marker);
- void clear_marker();
- const void* get_marker() const;
+ // This method should be inlined, to set the marker at the current frame's
+ // stack top.
+ V8_INLINE void SetMarkerToCurrentStackPosition() {
+ stack_marker_ = v8::base::Stack::GetCurrentStackPosition();
+ }
- // Mechanism for saving the callee-saved registers, required for conservative
- // stack scanning.
+ private:
+#ifdef DEBUG
+ static bool IsOnCurrentStack(const void* ptr);
+#endif
- struct CalleeSavedRegisters {
- // We always double-align this buffer, to support for longer registers,
- // e.g., 128-bit registers in WIN64.
- alignas(2 * sizeof(intptr_t))
- std::array<intptr_t, NumberOfCalleeSavedRegisters> buffer;
- };
+ static void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
+ const void* stack_end);
- using Callback = void (*)(StackVisitor*, const void*, const void*,
- const CalleeSavedRegisters* registers);
+ const void* stack_start_;
- static V8_NOINLINE void PushAllRegistersAndInvokeCallback(
- StackVisitor* visitor, const void* stack_start, Callback callback);
+ // Marker that signals end of the interesting stack region in which on-heap
+ // pointers can be found.
+ const void* stack_marker_;
- private:
- const void* stack_start_;
- const void* stack_marker_ = nullptr;
+ // TODO(v8:13493): This is for suppressing the check that we are in the
+ // correct stack, in the case of WASM stack switching. It will be removed as
+ // soon as context saving becomes compatible with stack switching.
+ bool wasm_stack_switching_;
+
+ // Stack segments that may also contain pointers and should be scanned.
+ struct StackSegments {
+ const void* start;
+ const void* top;
+ };
+ std::vector<StackSegments> inactive_stacks_;
};
} // namespace heap::base
diff --git a/deps/v8/src/heap/base/worklist.cc b/deps/v8/src/heap/base/worklist.cc
index effca7fbc7..ab42a8af5b 100644
--- a/deps/v8/src/heap/base/worklist.cc
+++ b/deps/v8/src/heap/base/worklist.cc
@@ -4,7 +4,15 @@
#include "src/heap/base/worklist.h"
-namespace heap::base::internal {
+namespace heap::base {
+
+// static
+bool WorklistBase::predictable_order_ = false;
+
+// static
+void WorklistBase::EnforcePredictableOrder() { predictable_order_ = true; }
+
+namespace internal {
// static
SegmentBase* SegmentBase::GetSentinelSegmentAddress() {
@@ -12,4 +20,5 @@ SegmentBase* SegmentBase::GetSentinelSegmentAddress() {
return &sentinel_segment;
}
-} // namespace heap::base::internal
+} // namespace internal
+} // namespace heap::base
diff --git a/deps/v8/src/heap/base/worklist.h b/deps/v8/src/heap/base/worklist.h
index 8d07a7d747..0fc798e321 100644
--- a/deps/v8/src/heap/base/worklist.h
+++ b/deps/v8/src/heap/base/worklist.h
@@ -34,6 +34,16 @@ class V8_EXPORT_PRIVATE SegmentBase {
};
} // namespace internal
+class V8_EXPORT_PRIVATE WorklistBase final {
+ public:
+ // Enforces predictable order of push/pop sequences in single-threaded mode.
+ static void EnforcePredictableOrder();
+ static bool PredictableOrder() { return predictable_order_; }
+
+ private:
+ static bool predictable_order_;
+};
+
// A global worklist based on segments which allows for a thread-local
// producer/consumer pattern with global work stealing.
//
@@ -206,13 +216,19 @@ class Worklist<EntryType, MinSegmentSize>::Segment final
: public internal::SegmentBase {
public:
static Segment* Create(uint16_t min_segment_size) {
- auto result = v8::base::AllocateAtLeast<char>(
- MallocSizeForCapacity(min_segment_size));
+ const auto wanted_bytes = MallocSizeForCapacity(min_segment_size);
+ v8::base::AllocationResult<char*> result;
+ if (WorklistBase::PredictableOrder()) {
+ result.ptr = static_cast<char*>(v8::base::Malloc(wanted_bytes));
+ result.count = wanted_bytes;
+ } else {
+ result = v8::base::AllocateAtLeast<char>(wanted_bytes);
+ }
return new (result.ptr)
Segment(CapacityForMallocSize(result.count * sizeof(char)));
}
- static void Delete(Segment* segment) { free(segment); }
+ static void Delete(Segment* segment) { v8::base::Free(segment); }
V8_INLINE void Push(EntryType entry);
V8_INLINE void Pop(EntryType* entry);
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
index 60b65bc300..152786f19d 100644
--- a/deps/v8/src/heap/basic-memory-chunk.cc
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -17,8 +17,8 @@ namespace internal {
// Verify write barrier offsets match the the real offsets.
static_assert(BasicMemoryChunk::Flag::IS_EXECUTABLE ==
heap_internals::MemoryChunk::kIsExecutableBit);
-static_assert(BasicMemoryChunk::Flag::IN_SHARED_HEAP ==
- heap_internals::MemoryChunk::kInSharedHeapBit);
+static_assert(BasicMemoryChunk::Flag::IN_WRITABLE_SHARED_SPACE ==
+ heap_internals::MemoryChunk::kInWritableSharedSpaceBit);
static_assert(BasicMemoryChunk::Flag::INCREMENTAL_MARKING ==
heap_internals::MemoryChunk::kMarkingBit);
static_assert(BasicMemoryChunk::Flag::FROM_PAGE ==
@@ -64,7 +64,9 @@ BasicMemoryChunk::BasicMemoryChunk(Heap* heap, BaseSpace* space,
high_water_mark_(area_start - reinterpret_cast<Address>(this)),
owner_(space),
reservation_(std::move(reservation)) {
- marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
+ if (space->identity() != RO_SPACE) {
+ marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
+ }
}
bool BasicMemoryChunk::InOldSpace() const {
@@ -84,6 +86,19 @@ void BasicMemoryChunk::SynchronizedHeapLoad() const {
}
#endif
+// static
+MarkBit BasicMemoryChunk::ComputeMarkBit(HeapObject object) {
+ return BasicMemoryChunk::ComputeMarkBit(object.address());
+}
+
+// static
+MarkBit BasicMemoryChunk::ComputeMarkBit(Address address) {
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
+ int index = chunk->AddressToMarkbitIndex(address);
+ return chunk->marking_bitmap<AccessMode::NON_ATOMIC>()->MarkBitFromIndex(
+ index);
+}
+
class BasicMemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
static_assert(BasicMemoryChunk::kSizeOffset ==
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index 60a711b622..2b14498f84 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -38,7 +38,7 @@ class BasicMemoryChunk {
NO_FLAGS = 0u,
// This page belongs to a shared heap.
- IN_SHARED_HEAP = 1u << 0,
+ IN_WRITABLE_SHARED_SPACE = 1u << 0,
// These two flags are used in the write barrier to catch "interesting"
// references.
@@ -119,11 +119,6 @@ class BasicMemoryChunk {
static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
- static constexpr MainThreadFlags
- kPointersToHereAreInterestingOrInSharedHeapMask =
- MainThreadFlags(POINTERS_TO_HERE_ARE_INTERESTING) |
- MainThreadFlags(IN_SHARED_HEAP);
-
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
@@ -135,7 +130,9 @@ class BasicMemoryChunk {
static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
- static constexpr MainThreadFlags kInSharedHeap = IN_SHARED_HEAP;
+ static constexpr MainThreadFlags kInSharedHeap = IN_WRITABLE_SHARED_SPACE;
+
+ static constexpr MainThreadFlags kIncrementalMarking = INCREMENTAL_MARKING;
static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
MainThreadFlags(kEvacuationCandidateMask) |
@@ -242,6 +239,7 @@ class BasicMemoryChunk {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
+ bool IsMarking() const { return IsFlagSet(INCREMENTAL_MARKING); }
bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
bool IsToPage() const { return IsFlagSet(TO_PAGE); }
bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
@@ -255,7 +253,9 @@ class BasicMemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
- bool InSharedHeap() const { return IsFlagSet(IN_SHARED_HEAP); }
+ bool InWritableSharedSpace() const {
+ return IsFlagSet(IN_WRITABLE_SHARED_SPACE);
+ }
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
@@ -303,6 +303,7 @@ class BasicMemoryChunk {
template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const {
+ DCHECK(!InReadOnlySpace());
return static_cast<ConcurrentBitmap<mode>*>(
Bitmap::FromAddress(address() + kMarkingBitmapOffset));
}
@@ -348,6 +349,10 @@ class BasicMemoryChunk {
void SynchronizedHeapLoad() const;
#endif
+ // Computes position of object in marking bitmap. Useful for debugging.
+ V8_ALLOW_UNUSED static MarkBit ComputeMarkBit(HeapObject object);
+ V8_ALLOW_UNUSED static MarkBit ComputeMarkBit(Address address);
+
protected:
// Overall size of the chunk, including the header and guards.
size_t size_;
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index ae240d0f1c..34bf431458 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -6,30 +6,25 @@
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
+#include "src/base/once.h"
#include "src/codegen/constants-arch.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/heap-inl.h"
#include "src/utils/allocation.h"
+#if defined(V8_OS_WIN64)
+#include "src/diagnostics/unwinding-info-win64.h"
+#endif // V8_OS_WIN64
namespace v8 {
namespace internal {
namespace {
-// Mutex for creating process_wide_code_range_.
-base::LazyMutex process_wide_code_range_creation_mutex_ =
- LAZY_MUTEX_INITIALIZER;
-
-// Weak pointer holding the process-wide CodeRange, if one has been created. All
-// Heaps hold a std::shared_ptr to this, so this is destroyed when no Heaps
-// remain.
-base::LazyInstance<std::weak_ptr<CodeRange>>::type process_wide_code_range_ =
- LAZY_INSTANCE_INITIALIZER;
-
DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
void FunctionInStaticBinaryForAddressHint() {}
+
} // anonymous namespace
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size,
@@ -97,6 +92,9 @@ size_t CodeRange::GetWritableReservedAreaSize() {
return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
}
+#define TRACE(...) \
+ if (v8_flags.trace_code_range_allocation) PrintF(__VA_ARGS__)
+
bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
size_t requested) {
DCHECK_NE(requested, 0);
@@ -108,67 +106,131 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
requested = kMinimumCodeRangeSize;
}
+ const size_t kPageSize = MemoryChunk::kPageSize;
+ CHECK(IsAligned(kPageSize, page_allocator->AllocatePageSize()));
+
// When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
// not cross the 4Gb boundary and thus the default compression scheme of
- // truncating the Code pointers to 32-bits still works. It's achieved by
- // specifying base_alignment parameter.
- // Note that the alignment is calculated before adjusting the requested size
- // for GetWritableReservedAreaSize(). The reasons are:
- // - this extra page is used by breakpad on Windows and it's allowed to cross
- // the 4Gb boundary,
- // - rounding up the adjusted size would result in requresting unnecessarily
- // big aligment.
- const size_t base_alignment =
- V8_EXTERNAL_CODE_SPACE_BOOL
- ? base::bits::RoundUpToPowerOfTwo(requested)
- : VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
+ // truncating the InstructionStream pointers to 32-bits still works. It's
+ // achieved by specifying base_alignment parameter.
+ const size_t base_alignment = V8_EXTERNAL_CODE_SPACE_BOOL
+ ? base::bits::RoundUpToPowerOfTwo(requested)
+ : kPageSize;
- const size_t reserved_area = GetWritableReservedAreaSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area)) {
- requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
- // Fulfilling both reserved pages requirement and huge code area
- // alignments is not supported (requires re-implementation).
- DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
- }
DCHECK_IMPLIES(kPlatformRequiresCodeRange,
requested <= kMaximalCodeRangeSize);
VirtualMemoryCage::ReservationParams params;
params.page_allocator = page_allocator;
params.reservation_size = requested;
- const size_t allocate_page_size = page_allocator->AllocatePageSize();
- params.base_alignment = base_alignment;
- params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
- params.page_size = MemoryChunk::kPageSize;
- params.requested_start_hint =
- GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
+ params.page_size = kPageSize;
params.jit =
v8_flags.jitless ? JitPermission::kNoJit : JitPermission::kMapAsJittable;
- if (!VirtualMemoryCage::InitReservation(params)) return false;
+ const size_t allocate_page_size = page_allocator->AllocatePageSize();
+ // TODO(v8:11880): Use base_alignment here once ChromeOS issue is fixed.
+ Address the_hint =
+ GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
+ the_hint = RoundDown(the_hint, base_alignment);
+
+ constexpr size_t kRadiusInMB =
+ kMaxPCRelativeCodeRangeInMB > 1024 ? kMaxPCRelativeCodeRangeInMB : 4096;
+ auto preferred_region = GetPreferredRegion(kRadiusInMB, kPageSize);
+
+ TRACE("=== Preferred region: [%p, %p)\n",
+ reinterpret_cast<void*>(preferred_region.begin()),
+ reinterpret_cast<void*>(preferred_region.end()));
+
+ // For configurations with enabled pointer compression and shared external
+ // code range we can afford trying harder to allocate code range near .text
+ // section.
+ const bool kShouldTryHarder = V8_EXTERNAL_CODE_SPACE_BOOL &&
+ COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL &&
+ v8_flags.better_code_range_allocation;
+
+ if (kShouldTryHarder) {
+ // Relax alignment requirement while trying to allocate code range inside
+ // preferred region.
+ params.base_alignment = kPageSize;
+
+ // TODO(v8:11880): consider using base::OS::GetFreeMemoryRangesWithin()
+ // to avoid attempts that's going to fail anyway.
+
+ VirtualMemoryCage candidate_cage;
+
+ // Try to allocate code range at the end of preferred region, by going
+ // towards the start in steps.
+ const int kAllocationTries = 16;
+ params.requested_start_hint =
+ RoundDown(preferred_region.end() - requested, kPageSize);
+ Address step =
+ RoundDown(preferred_region.size() / kAllocationTries, kPageSize);
+ for (int i = 0; i < kAllocationTries; i++) {
+ TRACE("=== Attempt #%d, hint=%p\n", i,
+ reinterpret_cast<void*>(params.requested_start_hint));
+ if (candidate_cage.InitReservation(params)) {
+ TRACE("=== Attempt #%d (%p): [%p, %p)\n", i,
+ reinterpret_cast<void*>(params.requested_start_hint),
+ reinterpret_cast<void*>(candidate_cage.region().begin()),
+ reinterpret_cast<void*>(candidate_cage.region().end()));
+ // Allocation succeeded, check if it's in the preferred range.
+ if (preferred_region.contains(candidate_cage.region())) break;
+ // This allocation is not the one we are searhing for.
+ candidate_cage.Free();
+ }
+ if (step == 0) break;
+ params.requested_start_hint -= step;
+ }
+ if (candidate_cage.IsReserved()) {
+ *static_cast<VirtualMemoryCage*>(this) = std::move(candidate_cage);
+ }
+ }
+ if (!IsReserved()) {
+ // Last resort, use whatever region we get.
+ params.base_alignment = base_alignment;
+ params.requested_start_hint = the_hint;
+ if (!VirtualMemoryCage::InitReservation(params)) return false;
+ TRACE("=== Fallback attempt, hint=%p: [%p, %p)\n",
+ reinterpret_cast<void*>(params.requested_start_hint),
+ reinterpret_cast<void*>(region().begin()),
+ reinterpret_cast<void*>(region().end()));
+ }
-#ifdef V8_EXTERNAL_CODE_SPACE
- // Ensure that ExternalCodeCompressionScheme is applicable to all objects
- // stored in the code range.
- Address base = page_allocator_->begin();
- Address last = base + page_allocator_->size() - 1;
- CHECK_EQ(ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(base),
- ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(last));
-#endif // V8_EXTERNAL_CODE_SPACE
+ if (v8_flags.abort_on_far_code_range &&
+ !preferred_region.contains(region())) {
+ // We didn't manage to allocate the code range close enough.
+ FATAL("Failed to allocate code range close to the .text section");
+ }
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space. See
// https://cs.chromium.org/chromium/src/components/crash/content/
// app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
+ const size_t reserved_area = GetWritableReservedAreaSize();
if (reserved_area > 0) {
- if (!reservation()->SetPermissions(reservation()->address(), reserved_area,
+ CHECK_LE(reserved_area, kPageSize);
+ // Exclude the reserved area from further allocations.
+ CHECK(page_allocator_->AllocatePagesAt(base(), kPageSize,
+ PageAllocator::kNoAccess));
+ // Commit required amount of writable memory.
+ if (!reservation()->SetPermissions(base(), reserved_area,
PageAllocator::kReadWrite)) {
return false;
}
+#if defined(V8_OS_WIN64)
+ if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
+ win64_unwindinfo::RegisterNonABICompliantCodeRange(
+ reinterpret_cast<void*>(base()), size());
+ }
+#endif // V8_OS_WIN64
}
+
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT &&
params.jit == JitPermission::kMapAsJittable) {
+ // Should the reserved area ever become non-empty we shouldn't mark it as
+ // RWX below.
+ CHECK_EQ(reserved_area, 0);
void* base = reinterpret_cast<void*>(page_allocator_->begin());
size_t size = page_allocator_->size();
CHECK(params.page_allocator->SetPermissions(
@@ -178,8 +240,83 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
return true;
}
+// Preferred region for the code range is an intersection of the following
+// regions:
+// a) [builtins - kMaxPCRelativeDistance, builtins + kMaxPCRelativeDistance)
+// b) [RoundDown(builtins, 4GB), RoundUp(builtins, 4GB)) in order to ensure
+// Requirement (a) is there to avoid remaping of embedded builtins into
+// the code for architectures where PC-relative jump/call distance is big
+// enough.
+// Requirement (b) is aiming at helping CPU branch predictors in general and
+// in case V8_EXTERNAL_CODE_SPACE is enabled it ensures that
+// ExternalCodeCompressionScheme works for all pointers in the code range.
+// static
+base::AddressRegion CodeRange::GetPreferredRegion(size_t radius_in_megabytes,
+ size_t allocate_page_size) {
+#ifdef V8_TARGET_ARCH_64_BIT
+ // Compute builtins location.
+ Address embedded_blob_code_start =
+ reinterpret_cast<Address>(Isolate::CurrentEmbeddedBlobCode());
+ Address embedded_blob_code_end;
+ if (embedded_blob_code_start == kNullAddress) {
+ // When there's no embedded blob use address of a function from the binary
+ // as an approximation.
+ embedded_blob_code_start =
+ FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
+ embedded_blob_code_end = embedded_blob_code_start + 1;
+ } else {
+ embedded_blob_code_end =
+ embedded_blob_code_start + Isolate::CurrentEmbeddedBlobCodeSize();
+ }
+
+ // Fulfil requirement (a).
+ constexpr size_t max_size = std::numeric_limits<size_t>::max();
+ size_t radius = radius_in_megabytes * MB;
+
+ Address region_start =
+ RoundUp(embedded_blob_code_end - radius, allocate_page_size);
+ if (region_start > embedded_blob_code_end) {
+ // |region_start| underflowed.
+ region_start = 0;
+ }
+ Address region_end =
+ RoundDown(embedded_blob_code_start + radius, allocate_page_size);
+ if (region_end < embedded_blob_code_start) {
+ // |region_end| overflowed.
+ region_end = RoundDown(max_size, allocate_page_size);
+ }
+
+ // Fulfil requirement (b).
+ constexpr size_t k4GB = size_t{4} * GB;
+ Address four_gb_cage_start = RoundDown(embedded_blob_code_start, k4GB);
+ Address four_gb_cage_end = four_gb_cage_start + k4GB;
+
+ region_start = std::max(region_start, four_gb_cage_start);
+ region_end = std::min(region_end, four_gb_cage_end);
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // If ExternalCodeCompressionScheme ever changes then the requirements might
+ // need to be updated.
+ static_assert(k4GB <= kPtrComprCageReservationSize);
+ DCHECK_EQ(four_gb_cage_start,
+ ExternalCodeCompressionScheme::PrepareCageBaseAddress(
+ embedded_blob_code_start));
+#endif // V8_EXTERNAL_CODE_SPACE
+
+ return base::AddressRegion(region_start, region_end - region_start);
+#else
+ return {};
+#endif // V8_TARGET_ARCH_64_BIT
+}
+
void CodeRange::Free() {
if (IsReserved()) {
+#if defined(V8_OS_WIN64)
+ if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
+ win64_unwindinfo::UnregisterNonABICompliantCodeRange(
+ reinterpret_cast<void*>(base()));
+ }
+#endif // V8_OS_WIN64
GetCodeRangeAddressHint()->NotifyFreedCodeRange(
reservation()->region().begin(), reservation()->region().size());
VirtualMemoryCage::Free();
@@ -307,25 +444,40 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
return embedded_blob_code_copy;
}
+namespace {
+
+CodeRange* process_wide_code_range_ = nullptr;
+
+V8_DECLARE_ONCE(init_code_range_once);
+void InitProcessWideCodeRange(v8::PageAllocator* page_allocator,
+ size_t requested_size) {
+ CodeRange* code_range = new CodeRange();
+ if (!code_range->InitReservation(page_allocator, requested_size)) {
+ V8::FatalProcessOutOfMemory(
+ nullptr, "Failed to reserve virtual memory for CodeRange");
+ }
+ process_wide_code_range_ = code_range;
+#ifdef V8_EXTERNAL_CODE_SPACE
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ ExternalCodeCompressionScheme::InitBase(
+ ExternalCodeCompressionScheme::PrepareCageBaseAddress(
+ code_range->base()));
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#endif // V8_EXTERNAL_CODE_SPACE
+}
+} // namespace
+
// static
-std::shared_ptr<CodeRange> CodeRange::EnsureProcessWideCodeRange(
+CodeRange* CodeRange::EnsureProcessWideCodeRange(
v8::PageAllocator* page_allocator, size_t requested_size) {
- base::MutexGuard guard(process_wide_code_range_creation_mutex_.Pointer());
- std::shared_ptr<CodeRange> code_range = process_wide_code_range_.Get().lock();
- if (!code_range) {
- code_range = std::make_shared<CodeRange>();
- if (!code_range->InitReservation(page_allocator, requested_size)) {
- V8::FatalProcessOutOfMemory(
- nullptr, "Failed to reserve virtual memory for CodeRange");
- }
- *process_wide_code_range_.Pointer() = code_range;
- }
- return code_range;
+ base::CallOnce(&init_code_range_once, InitProcessWideCodeRange,
+ page_allocator, requested_size);
+ return process_wide_code_range_;
}
// static
-std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
- return process_wide_code_range_.Get().lock();
+CodeRange* CodeRange::GetProcessWideCodeRange() {
+ return process_wide_code_range_;
}
} // namespace internal
diff --git a/deps/v8/src/heap/code-range.h b/deps/v8/src/heap/code-range.h
index 4fcea5f26f..e6db177833 100644
--- a/deps/v8/src/heap/code-range.h
+++ b/deps/v8/src/heap/code-range.h
@@ -11,6 +11,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/utils/allocation.h"
+#include "v8-internal.h"
namespace v8 {
namespace internal {
@@ -44,33 +45,32 @@ class CodeRangeAddressHint {
// A code range is a virtual memory cage that may contain executable code. It
// has the following layout.
//
-// +------------+-----+---------------- ~~~ -+
-// | RW | ... | ... |
-// +------------+-----+----------------- ~~~ -+
-// ^ ^ ^
-// start base allocatable base
+// +---------+-----+----------------- ~~~ -+
+// | RW | ... | ... |
+// +---------+-----+------------------ ~~~ -+
+// ^ ^
+// base allocatable base
//
-// <------------> <------------------------>
-// reserved allocatable region
-// <------------------------------------------->
-// code region
+// <--------> <------------------------->
+// reserved allocatable region
+// <----------------------------------------->
+// CodeRange
//
// The start of the reservation may include reserved page with read-write access
-// as required by some platforms (Win64). The cage's page allocator does not
-// control the optional reserved page in the beginning of the code region.
+// as required by some platforms (Win64) followed by an unmapped region which
+// make allocatable base MemoryChunk::kAlignment-aligned. The cage's page
+// allocator explicitly marks the optional reserved page as occupied, so it's
+// excluded from further allocations.
//
// The following conditions hold:
-// 1) |reservation()->region()| >= |optional RW pages| +
-// |reservation()->page_allocator()|
-// 2) |reservation()| is AllocatePageSize()-aligned
-// 3) |reservation()->page_allocator()| (i.e. allocatable base) is
-// MemoryChunk::kAlignment-aligned
-// 4) |base()| is CommitPageSize()-aligned
+// 1) |reservation()->region()| == [base(), base() + size()[,
+// 2) if optional RW pages are not necessary, then |base| == |allocatable base|,
+// 3) both |base| and |allocatable base| are MemoryChunk::kAlignment-aligned.
class CodeRange final : public VirtualMemoryCage {
public:
V8_EXPORT_PRIVATE ~CodeRange() override;
- // Returns the size of the initial area of a code-range, which is marked
+ // Returns the size of the initial area of a code range, which is marked
// writable and reserved to contain unwind information.
static size_t GetWritableReservedAreaSize();
@@ -97,21 +97,6 @@ class CodeRange final : public VirtualMemoryCage {
return embedded_blob_code_copy_.load(std::memory_order_acquire);
}
-#ifdef V8_OS_WIN64
- // 64-bit Windows needs to track how many Isolates are using the CodeRange for
- // registering and unregistering of unwind info. Note that even though
- // CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
- // not be used for synchronization as it's usually implemented with a relaxed
- // read.
- uint32_t AtomicIncrementUnwindInfoUseCount() {
- return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
- }
-
- uint32_t AtomicDecrementUnwindInfoUseCount() {
- return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
- }
-#endif // V8_OS_WIN64
-
bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
void Free();
@@ -129,14 +114,17 @@ class CodeRange final : public VirtualMemoryCage {
const uint8_t* embedded_blob_code,
size_t embedded_blob_code_size);
- static std::shared_ptr<CodeRange> EnsureProcessWideCodeRange(
+ static CodeRange* EnsureProcessWideCodeRange(
v8::PageAllocator* page_allocator, size_t requested_size);
// If InitializeProcessWideCodeRangeOnce has been called, returns the
- // initialized CodeRange. Otherwise returns an empty std::shared_ptr.
- V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
+ // initialized CodeRange. Otherwise returns a null pointer.
+ V8_EXPORT_PRIVATE static CodeRange* GetProcessWideCodeRange();
private:
+ static base::AddressRegion GetPreferredRegion(size_t radius_in_megabytes,
+ size_t allocate_page_size);
+
// Used when short builtin calls are enabled, where embedded builtins are
// copied into the CodeRange so calls can be nearer.
std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};
@@ -144,10 +132,6 @@ class CodeRange final : public VirtualMemoryCage {
// When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
// race during Isolate::Init.
base::Mutex remap_embedded_builtins_mutex_;
-
-#ifdef V8_OS_WIN64
- std::atomic<uint32_t> unwindinfo_use_count_{0};
-#endif
};
} // namespace internal
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index 002aa44784..66d1791c26 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -199,14 +199,19 @@ void CodeStatistics::CollectCommentStatistics(Isolate* isolate,
// Collects code comment statistics.
void CodeStatistics::CollectCodeCommentStatistics(AbstractCode obj,
Isolate* isolate) {
- // Bytecode objects do not contain RelocInfo. Off-heap builtins might contain
- // comments but they are a part of binary so it doesn't make sense to account
- // them in the stats.
- // Only process code objects for code comment statistics.
- PtrComprCageBase cage_base(isolate);
+ // Bytecode objects do not contain RelocInfo.
+ PtrComprCageBase cage_base{isolate};
if (!obj.IsCode(cage_base)) return;
Code code = Code::cast(obj);
+
+ // Off-heap builtins might contain comments but they are a part of binary so
+ // it doesn't make sense to account them in the stats.
+ // TODO(jgruber): We can change this to `IsBuiltin` once it's guaranteed that
+ // non-builtin Code objects have an instruction_stream at all times (even
+ // during initialization).
+ if (!obj.has_instruction_stream(cage_base)) return;
+
CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
int delta = 0;
int prev_pc_offset = 0;
diff --git a/deps/v8/src/heap/combined-heap.cc b/deps/v8/src/heap/combined-heap.cc
index 3079e600f2..0416bb62a4 100644
--- a/deps/v8/src/heap/combined-heap.cc
+++ b/deps/v8/src/heap/combined-heap.cc
@@ -10,8 +10,7 @@ namespace internal {
CombinedHeapObjectIterator::CombinedHeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
- : safepoint_scope_(heap),
- heap_iterator_(heap, filtering),
+ : heap_iterator_(heap, filtering),
ro_heap_iterator_(heap->isolate()->read_only_heap()) {}
HeapObject CombinedHeapObjectIterator::Next() {
diff --git a/deps/v8/src/heap/combined-heap.h b/deps/v8/src/heap/combined-heap.h
index 7a7e30a8a8..b8627686a2 100644
--- a/deps/v8/src/heap/combined-heap.h
+++ b/deps/v8/src/heap/combined-heap.h
@@ -26,7 +26,6 @@ class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
HeapObject Next();
private:
- IsolateSafepointScope safepoint_scope_;
HeapObjectIterator heap_iterator_;
ReadOnlyHeapObjectIterator ro_heap_iterator_;
};
diff --git a/deps/v8/src/heap/concurrent-allocator-inl.h b/deps/v8/src/heap/concurrent-allocator-inl.h
index efce44d363..9b9ab114e6 100644
--- a/deps/v8/src/heap/concurrent-allocator-inl.h
+++ b/deps/v8/src/heap/concurrent-allocator-inl.h
@@ -23,6 +23,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
AllocationOrigin origin) {
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
DCHECK(!v8_flags.enable_third_party_heap);
+ DCHECK_EQ(origin == AllocationOrigin::kGC, context_ == Context::kGC);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
if (local_heap_) local_heap_->VerifyCurrent();
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 6125d18f19..fac728b454 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -80,8 +80,13 @@ void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
}
ConcurrentAllocator::ConcurrentAllocator(LocalHeap* local_heap,
- PagedSpace* space)
- : local_heap_(local_heap), space_(space), owning_heap_(space_->heap()) {}
+ PagedSpace* space, Context context)
+ : local_heap_(local_heap),
+ space_(space),
+ owning_heap_(space_->heap()),
+ context_(context) {
+ DCHECK_IMPLIES(!local_heap_, context_ == Context::kGC);
+}
void ConcurrentAllocator::FreeLinearAllocationArea() {
// The code page of the linear allocation area needs to be unprotected
@@ -90,8 +95,7 @@ void ConcurrentAllocator::FreeLinearAllocationArea() {
if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
- if (lab_.top() != lab_.limit() &&
- owning_heap()->incremental_marking()->black_allocation()) {
+ if (lab_.top() != lab_.limit() && IsBlackAllocationEnabled()) {
Page::FromAddress(lab_.top())
->DestroyBlackAreaBackground(lab_.top(), lab_.limit());
}
@@ -118,7 +122,8 @@ void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
if (space_->identity() == CODE_SPACE) {
optional_rwx_write_scope.emplace(
- "Marking Code objects requires write access to the Code page header");
+ "Marking InstructionStream objects requires write access to the "
+ "Code page header");
}
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
@@ -132,7 +137,8 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
if (space_->identity() == CODE_SPACE) {
optional_rwx_write_scope.emplace(
- "Marking Code objects requires write access to the Code page header");
+ "Marking InstructionStream objects requires write access to the "
+ "Code page header");
}
Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
limit);
@@ -225,7 +231,8 @@ ConcurrentAllocator::AllocateFromSpaceFreeList(size_t min_size_in_bytes,
}
}
- if (owning_heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap_) &&
+ if (owning_heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap_,
+ origin) &&
owning_heap()->CanExpandOldGenerationBackground(local_heap_,
space_->AreaSize())) {
result = space_->TryExpandBackground(max_size_in_bytes);
@@ -277,7 +284,8 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
}
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
- return owning_heap()->incremental_marking()->black_allocation();
+ return context_ == Context::kNotGC &&
+ owning_heap()->incremental_marking()->black_allocation();
}
void ConcurrentAllocator::MakeLabIterable() {
diff --git a/deps/v8/src/heap/concurrent-allocator.h b/deps/v8/src/heap/concurrent-allocator.h
index b9e003e6cb..f35e9cc989 100644
--- a/deps/v8/src/heap/concurrent-allocator.h
+++ b/deps/v8/src/heap/concurrent-allocator.h
@@ -35,11 +35,17 @@ class StressConcurrentAllocatorTask : public CancelableTask {
// Allocations are served from a TLAB if possible.
class ConcurrentAllocator {
public:
+ enum class Context {
+ kGC,
+ kNotGC,
+ };
+
static constexpr int kMinLabSize = 4 * KB;
static constexpr int kMaxLabSize = 32 * KB;
static constexpr int kMaxLabObjectSize = 2 * KB;
- ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space);
+ ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space,
+ Context context);
inline AllocationResult AllocateRaw(int object_size,
AllocationAlignment alignment,
@@ -96,6 +102,7 @@ class ConcurrentAllocator {
PagedSpace* const space_;
Heap* const owning_heap_;
LinearAllocationArea lab_;
+ const Context context_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 652037e2e4..bb9f1ae917 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -23,6 +23,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
+#include "src/heap/object-lock.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/weak-object-worklists.h"
@@ -37,6 +38,29 @@
#include "src/utils/utils-inl.h"
#include "src/utils/utils.h"
+// These strings can be sources of safe string transitions. Transitions are safe
+// if they don't result in invalidated slots. It's safe to read the length field
+// on such strings as that's common for all.
+//
+// No special visitors are generated for such strings.
+// V(VisitorId, Typename)
+#define SAFE_STRING_TRANSITION_SOURCES(V) \
+ V(SeqOneByteString, SeqOneByteString) \
+ V(SeqTwoByteString, SeqTwoByteString)
+
+// These strings can be sources of unsafe string transitions.
+// V(VisitorId, TypeName)
+#define UNSAFE_STRING_TRANSITION_SOURCES(V) \
+ V(ExternalString, ExternalString) \
+ V(ConsString, ConsString) \
+ V(SlicedString, SlicedString)
+
+// V(VisitorId, TypeName)
+#define UNSAFE_STRING_TRANSITION_TARGETS(V) \
+ UNSAFE_STRING_TRANSITION_SOURCES(V) \
+ V(ShortcutCandidate, ConsString) \
+ V(ThinString, ThinString)
+
namespace v8 {
namespace internal {
@@ -65,190 +89,33 @@ class ConcurrentMarkingState final
MemoryChunkDataMap* memory_chunk_data_;
};
-// Helper class for storing in-object slot addresses and values.
-class SlotSnapshot {
- public:
- SlotSnapshot()
- : number_of_object_slots_(0), number_of_external_pointer_slots_(0) {}
- SlotSnapshot(const SlotSnapshot&) = delete;
- SlotSnapshot& operator=(const SlotSnapshot&) = delete;
- int number_of_object_slots() const { return number_of_object_slots_; }
- int number_of_external_pointer_slots() const {
- return number_of_external_pointer_slots_;
- }
- ObjectSlot object_slot(int i) const { return object_snapshot_[i].first; }
- Object object_value(int i) const { return object_snapshot_[i].second; }
- ExternalPointerSlot external_pointer_slot(int i) const {
- return external_pointer_snapshot_[i].first;
- }
- ExternalPointerTag external_pointer_tag(int i) const {
- return external_pointer_snapshot_[i].second;
- }
- void clear() {
- number_of_object_slots_ = 0;
- number_of_external_pointer_slots_ = 0;
- }
- void add(ObjectSlot slot, Object value) {
- DCHECK_LT(number_of_object_slots_, kMaxObjectSlots);
- object_snapshot_[number_of_object_slots_++] = {slot, value};
- }
- void add(ExternalPointerSlot slot, ExternalPointerTag tag) {
- DCHECK_LT(number_of_external_pointer_slots_, kMaxExternalPointerSlots);
- external_pointer_snapshot_[number_of_external_pointer_slots_++] = {slot,
- tag};
- }
-
- private:
- // Maximum number of pointer slots of objects we use snapshotting for.
- // ConsStrings can have 3 (Map + Left + Right) pointers.
- static constexpr int kMaxObjectSlots = 3;
- // Maximum number of external pointer slots of objects we use snapshotting
- // for. ExternalStrings can have 2 (resource + cached data) external pointers.
- static constexpr int kMaxExternalPointerSlots = 2;
- int number_of_object_slots_;
- int number_of_external_pointer_slots_;
- std::pair<ObjectSlot, Object> object_snapshot_[kMaxObjectSlots];
- std::pair<ExternalPointerSlot, ExternalPointerTag>
- external_pointer_snapshot_[kMaxExternalPointerSlots];
-};
-
class ConcurrentMarkingVisitorUtility {
public:
- template <typename Visitor, typename T,
- typename TBodyDescriptor = typename T::BodyDescriptor>
- static int VisitJSObjectSubclass(Visitor* visitor, Map map, T object) {
- if (!visitor->ShouldVisit(object)) return 0;
- int size = TBodyDescriptor::SizeOf(map, object);
- int used_size = map.UsedInstanceSize();
- DCHECK_LE(used_size, size);
- DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
- // It is important to visit only the used field and ignore the slack fields
- // because the slack fields may be trimmed concurrently.
- TBodyDescriptor::IterateBody(map, object, used_size, visitor);
- return size;
- }
-
template <typename Visitor, typename T>
- static int VisitJSObjectSubclassFast(Visitor* visitor, Map map, T object) {
- using TBodyDescriptor = typename T::FastBodyDescriptor;
- return VisitJSObjectSubclass<Visitor, T, TBodyDescriptor>(visitor, map,
- object);
- }
-
- template <typename Visitor>
- static void VisitPointersInSnapshot(Visitor* visitor, HeapObject host,
- const SlotSnapshot& snapshot) {
- for (int i = 0; i < snapshot.number_of_object_slots(); i++) {
- ObjectSlot slot = snapshot.object_slot(i);
- Object object = snapshot.object_value(i);
- DCHECK(!HasWeakHeapObjectTag(object));
- if (!object.IsHeapObject()) continue;
- HeapObject heap_object = HeapObject::cast(object);
- visitor->SynchronizePageAccess(heap_object);
- if (!visitor->ShouldMarkObject(heap_object)) continue;
- visitor->MarkObject(host, heap_object);
- visitor->RecordSlot(host, slot, heap_object);
- }
- }
-
- template <typename Visitor>
- static void VisitExternalPointersInSnapshot(Visitor* visitor, HeapObject host,
- const SlotSnapshot& snapshot) {
- for (int i = 0; i < snapshot.number_of_external_pointer_slots(); i++) {
- ExternalPointerSlot slot = snapshot.external_pointer_slot(i);
- ExternalPointerTag tag = snapshot.external_pointer_tag(i);
- visitor->VisitExternalPointer(host, slot, tag);
+ static int VisitStringLocked(Visitor* visitor, T object) {
+ SharedObjectLockGuard guard(object);
+ CHECK(visitor->ShouldVisit(object));
+ visitor->VisitMapPointerIfNeeded(object);
+ // The object has been locked. At this point exclusive access is guaranteed
+ // but we must re-read the map and check whether the string has
+ // transitioned.
+ Map map = object.map();
+ int size;
+ switch (map.visitor_id()) {
+#define UNSAFE_STRING_TRANSITION_TARGET_CASE(VisitorId, TypeName) \
+ case kVisit##VisitorId: \
+ size = TypeName::BodyDescriptor::SizeOf(map, object); \
+ TypeName::BodyDescriptor::IterateBody( \
+ map, TypeName::unchecked_cast(object), size, visitor); \
+ break;
+
+ UNSAFE_STRING_TRANSITION_TARGETS(UNSAFE_STRING_TRANSITION_TARGET_CASE)
+#undef UNSAFE_STRING_TRANSITION_TARGET_CASE
+ default:
+ UNREACHABLE();
}
- }
-
- template <typename Visitor, typename T>
- static int VisitFullyWithSnapshot(Visitor* visitor, Map map, T object) {
- using TBodyDescriptor = typename T::BodyDescriptor;
- int size = TBodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot =
- MakeSlotSnapshot<Visitor, T, TBodyDescriptor>(visitor, map, object,
- size);
- if (!visitor->ShouldVisit(object)) return 0;
- ConcurrentMarkingVisitorUtility::VisitPointersInSnapshot(visitor, object,
- snapshot);
- ConcurrentMarkingVisitorUtility::VisitExternalPointersInSnapshot(
- visitor, object, snapshot);
return size;
}
-
- template <typename Visitor, typename T, typename TBodyDescriptor>
- static const SlotSnapshot& MakeSlotSnapshot(Visitor* visitor, Map map,
- T object, int size) {
- SlotSnapshottingVisitor slot_snaphotting_visitor(visitor->slot_snapshot(),
- visitor->cage_base(),
- visitor->code_cage_base());
- slot_snaphotting_visitor.VisitPointer(object, object.map_slot());
- TBodyDescriptor::IterateBody(map, object, size, &slot_snaphotting_visitor);
- return *(visitor->slot_snapshot());
- }
-
- // Helper class for collecting in-object slot addresses and values.
- class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
- public:
- explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot,
- PtrComprCageBase cage_base,
- PtrComprCageBase code_cage_base)
- : ObjectVisitorWithCageBases(cage_base, code_cage_base),
- slot_snapshot_(slot_snapshot) {
- slot_snapshot_->clear();
- }
-
- void VisitPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) override {
- for (ObjectSlot p = start; p < end; ++p) {
- Object object = p.Relaxed_Load(cage_base());
- slot_snapshot_->add(p, object);
- }
- }
-
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Object code = slot.Relaxed_Load(code_cage_base());
- slot_snapshot_->add(ObjectSlot(slot.address()), code);
- }
-
- void VisitPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- // This should never happen, because we don't use snapshotting for objects
- // which contain weak references.
- UNREACHABLE();
- }
-
- void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
- ExternalPointerTag tag) override {
- slot_snapshot_->add(slot, tag);
- }
-
- void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
- // This should never happen, because snapshotting is performed only on
- // some String subclasses.
- UNREACHABLE();
- }
-
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
- // This should never happen, because snapshotting is performed only on
- // some String subclasses.
- UNREACHABLE();
- }
-
- void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) override {
- // This should never happen, because snapshotting is performed only on
- // some String subclasses.
- UNREACHABLE();
- }
-
- private:
- SlotSnapshot* slot_snapshot_;
- };
};
class YoungGenerationConcurrentMarkingVisitor final
@@ -263,148 +130,49 @@ class YoungGenerationConcurrentMarkingVisitor final
heap->isolate(), worklists_local),
marking_state_(heap->isolate(), memory_chunk_data) {}
- bool ShouldMarkObject(HeapObject object) const {
- return !object.InSharedHeap();
- }
-
- void SynchronizePageAccess(HeapObject heap_object) {
-#ifdef THREAD_SANITIZER
- // This is needed because TSAN does not process the memory fence
- // emitted after page initialization.
- BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
-#endif
- }
+ using YoungGenerationMarkingVisitorBase<
+ YoungGenerationConcurrentMarkingVisitor,
+ ConcurrentMarkingState>::VisitMapPointerIfNeeded;
template <typename T>
static V8_INLINE T Cast(HeapObject object) {
return T::cast(object);
}
- // Used by utility functions
- void MarkObject(HeapObject host, HeapObject object) {
- if (Heap::InYoungGeneration(object)) {
- SynchronizePageAccess(object);
- MarkObjectViaMarkingWorklist(object);
- }
- }
-
- // HeapVisitor overrides to implement the snapshotting protocol.
-
- bool AllowDefaultJSObjectVisit() { return false; }
-
- int VisitJSObject(Map map, JSObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSObjectFast(Map map, JSObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclassFast(this, map,
- object);
- }
-
- int VisitJSExternalObject(Map map, JSExternalObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
-#if V8_ENABLE_WEBASSEMBLY
- int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
- int VisitWasmSuspenderObject(Map map, WasmSuspenderObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-#endif // V8_ENABLE_WEBASSEMBLY
-
- int VisitJSWeakCollection(Map map, JSWeakCollection object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSFinalizationRegistry(Map map, JSFinalizationRegistry object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSDataView(Map map, JSDataView object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSFunction(Map map, JSFunction object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSTypedArray(Map map, JSTypedArray object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitConsString(Map map, ConsString object) {
- return ConcurrentMarkingVisitorUtility::VisitFullyWithSnapshot(this, map,
- object);
- }
-
- int VisitSlicedString(Map map, SlicedString object) {
- return ConcurrentMarkingVisitorUtility::VisitFullyWithSnapshot(this, map,
- object);
- }
-
- int VisitSeqOneByteString(Map map, SeqOneByteString object) {
- if (!ShouldVisit(object)) return 0;
- return SeqOneByteString::SizeFor(object.length(kAcquireLoad));
- }
-
- int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
- if (!ShouldVisit(object)) return 0;
- return SeqTwoByteString::SizeFor(object.length(kAcquireLoad));
- }
-
- void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
-
- // HeapVisitor override.
-
bool ShouldVisit(HeapObject object) {
- return marking_state_.GreyToBlack(object);
+ CHECK(marking_state_.GreyToBlack(object));
+ return true;
}
- bool ShouldVisitUnaccounted(HeapObject object) {
- return marking_state_.GreyToBlackUnaccounted(object);
+#define VISIT_AS_LOCKED_STRING(VisitorId, TypeName) \
+ int Visit##TypeName(Map map, TypeName object) { \
+ return ConcurrentMarkingVisitorUtility::VisitStringLocked(this, object); \
}
+ UNSAFE_STRING_TRANSITION_SOURCES(VISIT_AS_LOCKED_STRING)
+#undef VISIT_AS_LOCKED_STRING
+
+ void VisitMapPointer(HeapObject host) final { UNREACHABLE(); }
template <typename TSlot>
void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {}
- SlotSnapshot* slot_snapshot() { return &slot_snapshot_; }
-
ConcurrentMarkingState* marking_state() { return &marking_state_; }
private:
- template <typename T>
- int VisitLeftTrimmableArray(Map map, T object) {
- // The length() function checks that the length is a Smi.
- // This is not necessarily the case if the array is being left-trimmed.
- Object length = object.unchecked_length(kAcquireLoad);
- // No accounting here to avoid re-reading the length which could already
- // contain a non-SMI value when left-trimming happens concurrently.
- if (!ShouldVisitUnaccounted(object)) return 0;
- // The cached length must be the actual length as the array is not black.
- // Left trimming marks the array black before over-writing the length.
- DCHECK(length.IsSmi());
- int size = T::SizeFor(Smi::ToInt(length));
- marking_state_.IncrementLiveBytes(MemoryChunk::FromHeapObject(object),
- size);
- T::BodyDescriptor::IterateBody(map, object, size, this);
- return size;
- }
-
ConcurrentMarkingState marking_state_;
- SlotSnapshot slot_snapshot_;
};
+#define UNCHECKED_CAST(VisitorId, TypeName) \
+ template <> \
+ TypeName YoungGenerationConcurrentMarkingVisitor::Cast(HeapObject object) { \
+ return TypeName::unchecked_cast(object); \
+ }
+SAFE_STRING_TRANSITION_SOURCES(UNCHECKED_CAST)
+// Casts are also needed for unsafe ones for the initial dispatch in
+// HeapVisitor.
+UNSAFE_STRING_TRANSITION_SOURCES(UNCHECKED_CAST)
+#undef UNCHECKED_CAST
+
class ConcurrentMarkingVisitor final
: public MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState> {
@@ -424,152 +192,54 @@ class ConcurrentMarkingVisitor final
marking_state_(heap->isolate(), memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
+ using MarkingVisitorBase<ConcurrentMarkingVisitor,
+ ConcurrentMarkingState>::VisitMapPointerIfNeeded;
+
template <typename T>
static V8_INLINE T Cast(HeapObject object) {
return T::cast(object);
}
- // HeapVisitor overrides to implement the snapshotting protocol.
-
- bool AllowDefaultJSObjectVisit() { return false; }
-
- int VisitJSObject(Map map, JSObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSObjectFast(Map map, JSObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclassFast(this, map,
- object);
- }
-
- int VisitJSExternalObject(Map map, JSExternalObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
-#if V8_ENABLE_WEBASSEMBLY
- int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
- int VisitWasmSuspenderObject(Map map, WasmSuspenderObject object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-#endif // V8_ENABLE_WEBASSEMBLY
-
- int VisitJSWeakCollection(Map map, JSWeakCollection object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSFinalizationRegistry(Map map, JSFinalizationRegistry object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitJSSynchronizationPrimitive(Map map,
- JSSynchronizationPrimitive object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass(this, map,
- object);
- }
-
- int VisitConsString(Map map, ConsString object) {
- return ConcurrentMarkingVisitorUtility::VisitFullyWithSnapshot(this, map,
- object);
- }
-
- int VisitSlicedString(Map map, SlicedString object) {
- return ConcurrentMarkingVisitorUtility::VisitFullyWithSnapshot(this, map,
- object);
- }
-
- int VisitSeqOneByteString(Map map, SeqOneByteString object) {
- if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object);
- return SeqOneByteString::SizeFor(object.length(kAcquireLoad));
- }
-
- int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
- if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object);
- return SeqTwoByteString::SizeFor(object.length(kAcquireLoad));
- }
-
- int VisitExternalOneByteString(Map map, ExternalOneByteString object) {
- return ConcurrentMarkingVisitorUtility::VisitFullyWithSnapshot(this, map,
- object);
+ bool ShouldVisit(HeapObject object) {
+ CHECK(marking_state_.GreyToBlack(object));
+ return true;
}
- int VisitExternalTwoByteString(Map map, ExternalTwoByteString object) {
- return ConcurrentMarkingVisitorUtility::VisitFullyWithSnapshot(this, map,
- object);
+#define VISIT_AS_LOCKED_STRING(VisitorId, TypeName) \
+ int Visit##TypeName(Map map, TypeName object) { \
+ return ConcurrentMarkingVisitorUtility::VisitStringLocked(this, object); \
}
+ UNSAFE_STRING_TRANSITION_SOURCES(VISIT_AS_LOCKED_STRING)
+#undef VISIT_AS_LOCKED_STRING
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
- if (marking_state_.WhiteToGrey(value)) {
+ if (marking_state_.TryMark(value)) {
local_marking_worklists_->Push(value);
return true;
}
- } else if (marking_state_.IsWhite(value)) {
+ } else if (marking_state_.IsUnmarked(value)) {
local_weak_objects_->next_ephemerons_local.Push(Ephemeron{key, value});
}
return false;
}
- // HeapVisitor override.
- bool ShouldVisit(HeapObject object) {
- return marking_state_.GreyToBlack(object);
- }
-
- bool ShouldVisitUnaccounted(HeapObject object) {
- return marking_state_.GreyToBlackUnaccounted(object);
- }
-
template <typename TSlot>
void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {
MarkCompactCollector::RecordSlot(object, slot, target);
}
- SlotSnapshot* slot_snapshot() { return &slot_snapshot_; }
+ ConcurrentMarkingState* marking_state() { return &marking_state_; }
private:
- template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
- int VisitJSObjectSubclass(Map map, T object) {
- return ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass<
- ConcurrentMarkingVisitor, T, TBodyDescriptor>(this, map, object);
- }
-
- template <typename T>
- int VisitLeftTrimmableArray(Map map, T object) {
- // The length() function checks that the length is a Smi.
- // This is not necessarily the case if the array is being left-trimmed.
- Object length = object.unchecked_length(kAcquireLoad);
- // No accounting here to avoid re-reading the length which could already
- // contain a non-SMI value when left-trimming happens concurrently.
- if (!ShouldVisitUnaccounted(object)) return 0;
- // The cached length must be the actual length as the array is not black.
- // Left trimming marks the array black before over-writing the length.
- DCHECK(length.IsSmi());
- int size = T::SizeFor(Smi::ToInt(length));
- marking_state_.IncrementLiveBytes(MemoryChunk::FromHeapObject(object),
- ALIGN_TO_ALLOCATION_ALIGNMENT(size));
- VisitMapPointer(object);
- T::BodyDescriptor::IterateBody(map, object, size, this);
- return size;
- }
-
- void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
- if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target))
- return;
+ void RecordRelocSlot(RelocInfo* rinfo, HeapObject target) {
+ if (!MarkCompactCollector::ShouldRecordRelocSlot(rinfo, target)) return;
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
+ MarkCompactCollector::ProcessRelocInfo(rinfo, target);
MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
if (!data.typed_slots) {
@@ -578,52 +248,27 @@ class ConcurrentMarkingVisitor final
data.typed_slots->Insert(info.slot_type, info.offset);
}
- ConcurrentMarkingState* marking_state() { return &marking_state_; }
-
TraceRetainingPathMode retaining_path_mode() {
return TraceRetainingPathMode::kDisabled;
}
ConcurrentMarkingState marking_state_;
MemoryChunkDataMap* memory_chunk_data_;
- SlotSnapshot slot_snapshot_;
friend class MarkingVisitorBase<ConcurrentMarkingVisitor,
ConcurrentMarkingState>;
};
-// Strings can change maps due to conversion to thin string or external strings.
-// Use unchecked cast to avoid data race in slow dchecks.
-template <>
-ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
- return ConsString::unchecked_cast(object);
-}
-
-template <>
-SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
- return SlicedString::unchecked_cast(object);
-}
-
-template <>
-ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
- return ThinString::unchecked_cast(object);
-}
-
-template <>
-SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
- return SeqOneByteString::unchecked_cast(object);
-}
-
-template <>
-SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
- return SeqTwoByteString::unchecked_cast(object);
-}
-
-// Fixed array can become a free space during left trimming.
-template <>
-FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
- return FixedArray::unchecked_cast(object);
-}
+#define UNCHECKED_CAST(VisitorId, TypeName) \
+ template <> \
+ TypeName ConcurrentMarkingVisitor::Cast(HeapObject object) { \
+ return TypeName::unchecked_cast(object); \
+ }
+SAFE_STRING_TRANSITION_SOURCES(UNCHECKED_CAST)
+// Casts are also needed for unsafe ones for the initial dispatch in
+// HeapVisitor.
+UNSAFE_STRING_TRANSITION_SOURCES(UNCHECKED_CAST)
+#undef UNCHECKED_CAST
// The Deserializer changes the map from StrongDescriptorArray to
// DescriptorArray
@@ -741,9 +386,8 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate,
WeakObjects::Local local_weak_objects(weak_objects_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, &local_weak_objects, heap_,
- mark_compact_epoch, code_flush_mode,
- heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
- &task_state->memory_chunk_data);
+ mark_compact_epoch, code_flush_mode, heap_->cpp_heap(),
+ should_keep_ages_unchanged, &task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer;
NativeContextStats& native_context_stats = task_state->native_context_stats;
@@ -770,7 +414,8 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate,
bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
bool done = false;
CodePageHeaderModificationScope rwx_write_scope(
- "Marking a Code object requires write access to the Code page header");
+ "Marking a InstructionStream object requires write access to the "
+ "Code page header");
while (!done) {
size_t current_marked_bytes = 0;
int objects_processed = 0;
@@ -781,6 +426,7 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate,
done = true;
break;
}
+ DCHECK(!object.InReadOnlySpace());
objects_processed++;
Address new_space_top = kNullAddress;
@@ -810,7 +456,10 @@ void ConcurrentMarking::RunMajor(JobDelegate* delegate,
local_marking_worklists.SwitchToContext(context);
}
}
- size_t visited_size = visitor.Visit(map, object);
+ const auto visited_size = visitor.Visit(map, object);
+ visitor.marking_state()->IncrementLiveBytes(
+ MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(object)),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size));
if (is_per_context_mode) {
native_context_stats.IncrementSize(
local_marking_worklists.Context(), map, object, visited_size);
@@ -876,7 +525,8 @@ void ConcurrentMarking::RunMinor(JobDelegate* delegate) {
TimedScope scope(&time_ms);
bool done = false;
CodePageHeaderModificationScope rwx_write_scope(
- "Marking a Code object requires write access to the Code page header");
+ "Marking a InstructionStream object requires write access to the "
+ "Code page header");
while (!done) {
size_t current_marked_bytes = 0;
int objects_processed = 0;
@@ -910,7 +560,13 @@ void ConcurrentMarking::RunMinor(JobDelegate* delegate) {
local_marking_worklists.PushOnHold(object);
} else {
Map map = object.map(isolate, kAcquireLoad);
- current_marked_bytes += visitor.Visit(map, object);
+ const auto visited_size = visitor.Visit(map, object);
+ current_marked_bytes += visited_size;
+ if (visited_size) {
+ visitor.marking_state()->IncrementLiveBytes(
+ MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(object)),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size));
+ }
}
}
marked_bytes += current_marked_bytes;
diff --git a/deps/v8/src/heap/conservative-stack-visitor.cc b/deps/v8/src/heap/conservative-stack-visitor.cc
index a9785fb284..6c50b7b9a7 100644
--- a/deps/v8/src/heap/conservative-stack-visitor.cc
+++ b/deps/v8/src/heap/conservative-stack-visitor.cc
@@ -5,7 +5,6 @@
#include "src/heap/conservative-stack-visitor.h"
#include "src/execution/isolate-inl.h"
-#include "src/heap/mark-compact.h"
#include "src/objects/visitors.h"
#ifdef V8_COMPRESS_POINTERS
@@ -17,26 +16,157 @@ namespace internal {
ConservativeStackVisitor::ConservativeStackVisitor(Isolate* isolate,
RootVisitor* delegate)
- : isolate_(isolate), delegate_(delegate) {}
+ : cage_base_(isolate),
+ delegate_(delegate),
+ allocator_(isolate->heap()->memory_allocator()),
+ collector_(delegate->collector()) {}
+
+namespace {
+
+// This utility function returns the highest address in the page that is lower
+// than maybe_inner_ptr, has its markbit set, and whose previous address (if it
+// exists) does not have its markbit set. This address is guaranteed to be the
+// start of a valid object in the page. In case the markbit corresponding to
+// maybe_inner_ptr is set, the function bails out and returns kNullAddress.
+Address FindPreviousObjectForConservativeMarking(const Page* page,
+ Address maybe_inner_ptr) {
+ auto* bitmap = page->marking_bitmap<AccessMode::NON_ATOMIC>();
+ const MarkBit::CellType* cells = bitmap->cells();
+
+ // The first actual bit of the bitmap, corresponding to page->area_start(),
+ // is at start_index which is somewhere in (not necessarily at the start of)
+ // start_cell_index.
+ const uint32_t start_index = page->AddressToMarkbitIndex(page->area_start());
+ const uint32_t start_cell_index = Bitmap::IndexToCell(start_index);
+ // We assume that all markbits before start_index are clear:
+ // SLOW_DCHECK(bitmap->AllBitsClearInRange(0, start_index));
+ // This has already been checked for the entire bitmap before starting marking
+ // by MarkCompactCollector::VerifyMarkbitsAreClean.
+
+ const uint32_t index = page->AddressToMarkbitIndex(maybe_inner_ptr);
+ uint32_t cell_index = Bitmap::IndexToCell(index);
+ const MarkBit::CellType mask = 1u << Bitmap::IndexInCell(index);
+ MarkBit::CellType cell = cells[cell_index];
+
+ // If the markbit is already set, bail out.
+ if ((cell & mask) != 0) return kNullAddress;
+
+ // Clear the bits corresponding to higher addresses in the cell.
+ cell &= ((~static_cast<MarkBit::CellType>(0)) >>
+ (Bitmap::kBitsPerCell - Bitmap::IndexInCell(index) - 1));
+
+ // Traverse the bitmap backwards, until we find a markbit that is set and
+ // whose previous markbit (if it exists) is unset.
+ // First, iterate backwards to find a cell with any set markbit.
+ while (cell == 0 && cell_index > start_cell_index) cell = cells[--cell_index];
+ if (cell == 0) {
+ DCHECK_EQ(start_cell_index, cell_index);
+ // We have reached the start of the page.
+ return page->area_start();
+ }
+
+ // We have found such a cell.
+ const uint32_t leading_zeros = base::bits::CountLeadingZeros(cell);
+ const uint32_t leftmost_ones =
+ base::bits::CountLeadingZeros(~(cell << leading_zeros));
+ const uint32_t index_of_last_leftmost_one =
+ Bitmap::kBitsPerCell - leading_zeros - leftmost_ones;
+
+ // If the leftmost sequence of set bits does not reach the start of the cell,
+ // we found it.
+ if (index_of_last_leftmost_one > 0) {
+ return page->MarkbitIndexToAddress(cell_index * Bitmap::kBitsPerCell +
+ index_of_last_leftmost_one);
+ }
+
+ // The leftmost sequence of set bits reaches the start of the cell. We must
+ // keep traversing backwards until we find the first unset markbit.
+ if (cell_index == start_cell_index) {
+ // We have reached the start of the page.
+ return page->area_start();
+ }
+
+ // Iterate backwards to find a cell with any unset markbit.
+ do {
+ cell = cells[--cell_index];
+ } while (~cell == 0 && cell_index > start_cell_index);
+ if (~cell == 0) {
+ DCHECK_EQ(start_cell_index, cell_index);
+ // We have reached the start of the page.
+ return page->area_start();
+ }
+
+ // We have found such a cell.
+ const uint32_t leading_ones = base::bits::CountLeadingZeros(~cell);
+ const uint32_t index_of_last_leading_one =
+ Bitmap::kBitsPerCell - leading_ones;
+ DCHECK_LT(0, index_of_last_leading_one);
+ return page->MarkbitIndexToAddress(cell_index * Bitmap::kBitsPerCell +
+ index_of_last_leading_one);
+}
+
+} // namespace
+
+// static
+Address ConservativeStackVisitor::FindBasePtrForMarking(
+ Address maybe_inner_ptr, MemoryAllocator* allocator,
+ GarbageCollector collector) {
+ // Check if the pointer is contained by a normal or large page owned by this
+ // heap. Bail out if it is not.
+ const MemoryChunk* chunk =
+ allocator->LookupChunkContainingAddress(maybe_inner_ptr);
+ if (chunk == nullptr) return kNullAddress;
+ DCHECK(chunk->Contains(maybe_inner_ptr));
+ // If it is contained in a large page, we want to mark the only object on it.
+ if (chunk->IsLargePage()) {
+ // This could be simplified if we could guarantee that there are no free
+ // space or filler objects in large pages. A few cctests violate this now.
+ HeapObject obj(static_cast<const LargePage*>(chunk)->GetObject());
+ PtrComprCageBase cage_base{chunk->heap()->isolate()};
+ return obj.IsFreeSpaceOrFiller(cage_base) ? kNullAddress : obj.address();
+ }
+ // Otherwise, we have a pointer inside a normal page.
+ const Page* page = static_cast<const Page*>(chunk);
+ // If it is not in the young generation and we're only interested in young
+ // generation pointers, we must ignore it.
+ if (Heap::IsYoungGenerationCollector(collector) && !page->InYoungGeneration())
+ return kNullAddress;
+ // If it is in the young generation "from" semispace, it is not used and we
+ // must ignore it, as its markbits may not be clean.
+ if (page->IsFromPage()) return kNullAddress;
+ // Try to find the address of a previous valid object on this page.
+ Address base_ptr =
+ FindPreviousObjectForConservativeMarking(page, maybe_inner_ptr);
+ // If the markbit is set, then we have an object that does not need to be
+ // marked.
+ if (base_ptr == kNullAddress) return kNullAddress;
+ // Iterate through the objects in the page forwards, until we find the object
+ // containing maybe_inner_ptr.
+ DCHECK_LE(base_ptr, maybe_inner_ptr);
+ PtrComprCageBase cage_base{page->heap()->isolate()};
+ while (true) {
+ HeapObject obj(HeapObject::FromAddress(base_ptr));
+ const int size = obj.Size(cage_base);
+ DCHECK_LT(0, size);
+ if (maybe_inner_ptr < base_ptr + size)
+ return obj.IsFreeSpaceOrFiller(cage_base) ? kNullAddress : base_ptr;
+ base_ptr += size;
+ DCHECK_LT(base_ptr, page->area_end());
+ }
+}
void ConservativeStackVisitor::VisitPointer(const void* pointer) {
auto address = reinterpret_cast<Address>(const_cast<void*>(pointer));
VisitConservativelyIfPointer(address);
#ifdef V8_COMPRESS_POINTERS
V8HeapCompressionScheme::ProcessIntermediatePointers(
- isolate_, address,
+ cage_base_, address,
[this](Address ptr) { VisitConservativelyIfPointer(ptr); });
#endif // V8_COMPRESS_POINTERS
}
void ConservativeStackVisitor::VisitConservativelyIfPointer(Address address) {
- Address base_ptr;
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_MB
- base_ptr = isolate_->heap()->mark_compact_collector()->FindBasePtrForMarking(
- address);
-#else
-#error "Some inner pointer resolution mechanism is needed"
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_MB
+ Address base_ptr = FindBasePtrForMarking(address, allocator_, collector_);
if (base_ptr == kNullAddress) return;
HeapObject obj = HeapObject::FromAddress(base_ptr);
Object root = obj;
diff --git a/deps/v8/src/heap/conservative-stack-visitor.h b/deps/v8/src/heap/conservative-stack-visitor.h
index e63ba5b673..432af420c1 100644
--- a/deps/v8/src/heap/conservative-stack-visitor.h
+++ b/deps/v8/src/heap/conservative-stack-visitor.h
@@ -6,11 +6,13 @@
#define V8_HEAP_CONSERVATIVE_STACK_VISITOR_H_
#include "include/v8-internal.h"
+#include "src/common/globals.h"
#include "src/heap/base/stack.h"
namespace v8 {
namespace internal {
+class MemoryAllocator;
class RootVisitor;
class V8_EXPORT_PRIVATE ConservativeStackVisitor
@@ -20,11 +22,25 @@ class V8_EXPORT_PRIVATE ConservativeStackVisitor
void VisitPointer(const void* pointer) final;
+ // This method finds an object header based on a `maybe_inner_ptr`. It returns
+ // `kNullAddress` if the parameter does not point to (the interior of) a valid
+ // heap object, or if it points to (the interior of) some object that is
+ // already marked as live (black or grey).
+ // The GarbageCollector parameter is only used to determine which kind of
+ // heap objects we are interested in. For MARK_COMPACTOR all heap objects are
+ // considered, whereas for young generation collectors we only consider
+ // objects in the young generation.
+ static Address FindBasePtrForMarking(Address maybe_inner_ptr,
+ MemoryAllocator* allocator,
+ GarbageCollector collector);
+
private:
void VisitConservativelyIfPointer(Address address);
- Isolate* isolate_ = nullptr;
- RootVisitor* delegate_ = nullptr;
+ const PtrComprCageBase cage_base_;
+ RootVisitor* const delegate_;
+ MemoryAllocator* const allocator_;
+ const GarbageCollector collector_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 6cd829bebc..7481a81c4a 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -16,7 +16,6 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
-#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate-inl.h"
#include "src/flags/flags.h"
@@ -38,19 +37,18 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/cppgc/unmarker.h"
#include "src/heap/cppgc/visitor.h"
-#include "src/heap/embedder-tracing-inl.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-tracer.h"
-#include "src/heap/global-handle-marking-visitor.h"
#include "src/heap/heap.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/sweeper.h"
+#include "src/heap/traced-handles-marking-visitor.h"
#include "src/init/v8.h"
#include "src/profiler/heap-profiler.h"
@@ -99,64 +97,6 @@ class MinorGCHeapGrowing
} // namespace internal
-namespace {
-
-START_ALLOW_USE_DEPRECATED()
-
-class V8ToCppGCReferencesVisitor final
- : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
- public:
- V8ToCppGCReferencesVisitor(
- cppgc::internal::MutatorMarkingState& marking_state,
- v8::internal::Isolate* isolate,
- const v8::WrapperDescriptor& wrapper_descriptor)
- : marking_state_(marking_state),
- isolate_(isolate),
- wrapper_descriptor_(wrapper_descriptor) {}
-
- void VisitTracedReference(const v8::TracedReference<v8::Value>& value) final {
- VisitHandle(value, value.WrapperClassId());
- }
-
- private:
- void VisitHandle(const v8::TracedReference<v8::Value>& value,
- uint16_t class_id) {
- DCHECK(!value.IsEmpty());
-
- const internal::JSObject js_object =
- *reinterpret_cast<const internal::JSObject* const&>(value);
- if (!js_object.ptr() || js_object.IsSmi() ||
- !js_object.MayHaveEmbedderFields())
- return;
-
- internal::LocalEmbedderHeapTracer::WrapperInfo info;
- if (!internal::LocalEmbedderHeapTracer::ExtractWrappableInfo(
- isolate_, js_object, wrapper_descriptor_, &info))
- return;
-
- marking_state_.MarkAndPush(
- cppgc::internal::HeapObjectHeader::FromObject(info.second));
- }
-
- cppgc::internal::MutatorMarkingState& marking_state_;
- v8::internal::Isolate* isolate_;
- const v8::WrapperDescriptor& wrapper_descriptor_;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-void TraceV8ToCppGCReferences(
- v8::internal::Isolate* isolate,
- cppgc::internal::MutatorMarkingState& marking_state,
- const v8::WrapperDescriptor& wrapper_descriptor) {
- DCHECK(isolate);
- V8ToCppGCReferencesVisitor forwarding_visitor(marking_state, isolate,
- wrapper_descriptor);
- isolate->traced_handles()->Iterate(&forwarding_visitor);
-}
-
-} // namespace
-
// static
constexpr uint16_t WrapperDescriptor::kUnknownEmbedderId;
@@ -213,14 +153,16 @@ namespace {
class CppgcPlatformAdapter final : public cppgc::Platform {
public:
- explicit CppgcPlatformAdapter(v8::Platform* platform) : platform_(platform) {}
+ explicit CppgcPlatformAdapter(v8::Platform* platform)
+ : platform_(platform),
+ page_allocator_(platform->GetPageAllocator()
+ ? platform->GetPageAllocator()
+ : &cppgc::internal::GetGlobalPageAllocator()) {}
CppgcPlatformAdapter(const CppgcPlatformAdapter&) = delete;
CppgcPlatformAdapter& operator=(const CppgcPlatformAdapter&) = delete;
- PageAllocator* GetPageAllocator() final {
- return platform_->GetPageAllocator();
- }
+ PageAllocator* GetPageAllocator() final { return page_allocator_; }
double MonotonicallyIncreasingTime() final {
return platform_->MonotonicallyIncreasingTime();
@@ -249,6 +191,7 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
private:
v8::Platform* platform_;
+ cppgc::PageAllocator* page_allocator_;
v8::Isolate* isolate_ = nullptr;
bool is_in_detached_mode_ = false;
};
@@ -279,12 +222,8 @@ class UnifiedHeapConcurrentMarker
std::unique_ptr<cppgc::Visitor>
UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
cppgc::internal::ConcurrentMarkingState& marking_state) const {
- if (collection_type_ == CppHeap::CollectionType::kMajor)
- return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
- heap(), v8_heap_, marking_state);
- else
- return std::make_unique<ConcurrentMinorGCMarkingVisitor>(heap(), v8_heap_,
- marking_state);
+ return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
+ heap(), v8_heap_, marking_state, collection_type_);
}
void FatalOutOfMemoryHandlerImpl(const std::string& reason,
@@ -307,22 +246,21 @@ class UnifiedHeapConservativeMarkingVisitor final
: ConservativeMarkingVisitor(heap, mutator_marking_state, visitor) {}
~UnifiedHeapConservativeMarkingVisitor() override = default;
- void SetGlobalHandlesMarkingVisitor(
- std::unique_ptr<GlobalHandleMarkingVisitor>
+ void SetConservativeTracedHandlesMarkingVisitor(
+ std::unique_ptr<ConservativeTracedHandlesMarkingVisitor>
global_handle_marking_visitor) {
- global_handle_marking_visitor_ = std::move(global_handle_marking_visitor);
+ marking_visitor_ = std::move(global_handle_marking_visitor);
}
void TraceConservativelyIfNeeded(const void* address) override {
ConservativeMarkingVisitor::TraceConservativelyIfNeeded(address);
- if (global_handle_marking_visitor_) {
- global_handle_marking_visitor_->VisitPointer(address);
+ if (marking_visitor_) {
+ marking_visitor_->VisitPointer(address);
}
}
private:
- std::unique_ptr<GlobalHandleMarkingVisitor> global_handle_marking_visitor_ =
- nullptr;
+ std::unique_ptr<ConservativeTracedHandlesMarkingVisitor> marking_visitor_;
};
} // namespace
@@ -371,14 +309,10 @@ UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::Platform* platform,
cppgc::internal::MarkingConfig config)
: cppgc::internal::MarkerBase(heap, platform, config),
- mutator_unified_heap_marking_state_(v8_heap, nullptr),
- marking_visitor_(config.collection_type == CppHeap::CollectionType::kMajor
- ? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
- heap, mutator_marking_state_,
- mutator_unified_heap_marking_state_)
- : std::make_unique<MutatorMinorGCMarkingVisitor>(
- heap, mutator_marking_state_,
- mutator_unified_heap_marking_state_)),
+ mutator_unified_heap_marking_state_(v8_heap, nullptr,
+ config.collection_type),
+ marking_visitor_(std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
+ heap, mutator_marking_state_, mutator_unified_heap_marking_state_)),
conservative_marking_visitor_(heap, mutator_marking_state_,
*marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
@@ -534,6 +468,7 @@ CppHeap::CppHeap(
marking_support, sweeping_support, *this),
minor_gc_heap_growing_(
std::make_unique<MinorGCHeapGrowing>(*stats_collector())),
+ cross_heap_remembered_set_(*this),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
@@ -595,6 +530,7 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
CHECK(!in_detached_testing_mode_);
CHECK_NULL(isolate_);
isolate_ = isolate;
+ heap_ = isolate->heap();
static_cast<CppgcPlatformAdapter*>(platform())
->SetIsolate(reinterpret_cast<v8::Isolate*>(isolate_));
if (isolate_->heap_profiler()) {
@@ -603,7 +539,7 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
}
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
- ReduceGCCapabilititesFromFlags();
+ ReduceGCCapabilitiesFromFlags();
sweeping_on_mutator_thread_observer_ =
std::make_unique<SweepingOnMutatorThreadForGlobalHandlesObserver>(
*this, *isolate_->traced_handles());
@@ -615,8 +551,7 @@ void CppHeap::DetachIsolate() {
// CHECK across all relevant embedders and setups.
if (!isolate_) return;
- // Delegate to existing EmbedderHeapTracer API to finish any ongoing garbage
- // collection.
+ // Finish any ongoing garbage collection.
if (isolate_->heap()->incremental_marking()->IsMarking()) {
isolate_->heap()->FinalizeIncrementalMarkingAtomically(
i::GarbageCollectionReason::kExternalFinalize);
@@ -632,6 +567,7 @@ void CppHeap::DetachIsolate() {
}
SetMetricRecorder(nullptr);
isolate_ = nullptr;
+ heap_ = nullptr;
// Any future garbage collections will ignore the V8->C++ references.
oom_handler().SetCustomHandler(nullptr);
// Enter no GC scope.
@@ -656,6 +592,8 @@ bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) {
return IsMemoryReducingGC(flags) || IsForceGC(flags);
}
+constexpr size_t kIncrementalMarkingCheckInterval = 128 * KB;
+
} // namespace
CppHeap::MarkingType CppHeap::SelectMarkingType() const {
@@ -674,7 +612,7 @@ CppHeap::SweepingType CppHeap::SelectSweepingType() const {
return sweeping_support();
}
-void CppHeap::ReduceGCCapabilititesFromFlags() {
+void CppHeap::ReduceGCCapabilitiesFromFlags() {
CHECK_IMPLIES(v8_flags.cppheap_concurrent_marking,
v8_flags.cppheap_incremental_marking);
if (v8_flags.cppheap_concurrent_marking) {
@@ -694,6 +632,16 @@ void CppHeap::ReduceGCCapabilititesFromFlags() {
void CppHeap::InitializeTracing(CollectionType collection_type,
GarbageCollectionFlags gc_flags) {
+ DCHECK(!collection_type_);
+
+ if (collection_type == CollectionType::kMinor) {
+ if (!generational_gc_supported()) return;
+ // Notify GC tracer that CppGC started young GC cycle.
+ isolate_->heap()->tracer()->NotifyYoungCppGCRunning();
+ }
+
+ collection_type_ = collection_type;
+
CHECK(!sweeper_.IsSweepingInProgress());
// Check that previous cycle metrics for the same collection type have been
@@ -705,18 +653,24 @@ void CppHeap::InitializeTracing(CollectionType collection_type,
DCHECK(!GetMetricRecorder()->YoungGCMetricsReportPending());
}
- DCHECK(!collection_type_);
- collection_type_ = collection_type;
-
#if defined(CPPGC_YOUNG_GENERATION)
if (generational_gc_supported() &&
*collection_type_ == CollectionType::kMajor) {
+ stats_collector()->NotifyUnmarkingStarted(*collection_type_);
cppgc::internal::StatsCollector::EnabledScope stats_scope(
stats_collector(), cppgc::internal::StatsCollector::kUnmark);
cppgc::internal::SequentialUnmarker unmarker(raw_heap());
}
#endif // defined(CPPGC_YOUNG_GENERATION)
+ if (gc_flags == GarbageCollectionFlagValues::kNoFlags) {
+ if (heap()->is_current_gc_forced()) {
+ gc_flags |= CppHeap::GarbageCollectionFlagValues::kForced;
+ }
+ if (heap()->ShouldReduceMemory()) {
+ gc_flags |= CppHeap::GarbageCollectionFlagValues::kReduceMemory;
+ }
+ }
current_gc_flags_ = gc_flags;
const cppgc::internal::MarkingConfig marking_config{
@@ -738,7 +692,21 @@ void CppHeap::InitializeTracing(CollectionType collection_type,
marking_config);
}
+namespace {
+MarkingWorklists::Local* GetV8MarkingWorklists(
+ Isolate* isolate, cppgc::internal::CollectionType collection_type) {
+ auto* heap = isolate->heap();
+ if (collection_type == cppgc::internal::CollectionType::kMajor) {
+ return heap->mark_compact_collector()->local_marking_worklists();
+ } else {
+ return heap->minor_mark_compact_collector()->local_marking_worklists();
+ }
+}
+} // namespace
+
void CppHeap::StartTracing() {
+ CHECK(marking_done_);
+ if (!TracingInitialized()) return;
if (isolate_) {
// Reuse the same local worklist for the mutator marking state which results
// in directly processing the objects by the JS logic. Also avoids
@@ -746,15 +714,14 @@ void CppHeap::StartTracing() {
marker_.get()
->To<UnifiedHeapMarker>()
.GetMutatorUnifiedHeapMarkingState()
- .Update(isolate_->heap()
- ->mark_compact_collector()
- ->local_marking_worklists());
+ .Update(GetV8MarkingWorklists(isolate_, *collection_type_));
}
marker_->StartMarking();
marking_done_ = false;
}
bool CppHeap::AdvanceTracing(double max_duration) {
+ if (!TracingInitialized()) return true;
is_in_v8_marking_step_ = true;
cppgc::internal::StatsCollector::EnabledScope stats_scope(
stats_collector(),
@@ -778,33 +745,72 @@ bool CppHeap::AdvanceTracing(double max_duration) {
return marking_done_;
}
-bool CppHeap::IsTracingDone() { return marking_done_; }
+bool CppHeap::IsTracingDone() const {
+ return !TracingInitialized() || marking_done_;
+}
+
+bool CppHeap::ShouldFinalizeIncrementalMarking() const {
+ return !incremental_marking_supported() || IsTracingDone();
+}
void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
+ // Enter atomic pause even if tracing is not initialized. This is needed to
+ // make sure that we always enable young generation from the atomic pause.
in_atomic_pause_ = true;
+ if (!TracingInitialized()) return;
auto& marker = marker_.get()->To<UnifiedHeapMarker>();
// Scan global handles conservatively in case we are attached to an Isolate.
// TODO(1029379): Support global handle marking visitors with minor GC.
- if (isolate_ && !generational_gc_supported()) {
+ if (isolate_) {
auto& heap = *isolate()->heap();
- marker.conservative_visitor().SetGlobalHandlesMarkingVisitor(
- std::make_unique<GlobalHandleMarkingVisitor>(
- heap, *heap.mark_compact_collector()->local_marking_worklists()));
+ marker.conservative_visitor().SetConservativeTracedHandlesMarkingVisitor(
+ std::make_unique<ConservativeTracedHandlesMarkingVisitor>(
+ heap, *GetV8MarkingWorklists(isolate_, *collection_type_),
+ *collection_type_));
}
marker.EnterAtomicPause(stack_state);
- if (isolate_ && *collection_type_ == CollectionType::kMinor) {
- // Visit V8 -> cppgc references.
- TraceV8ToCppGCReferences(isolate_, marker.GetMutatorMarkingState(),
- wrapper_descriptor_);
- }
compactor_.CancelIfShouldNotCompact(MarkingType::kAtomic, stack_state);
}
bool CppHeap::FinishConcurrentMarkingIfNeeded() {
+ if (!TracingInitialized()) return true;
return marker_->JoinConcurrentMarkingIfNeeded();
}
+void CppHeap::WriteBarrier(JSObject js_object) {
+ DCHECK(js_object.MayHaveEmbedderFields());
+ DCHECK_NOT_NULL(isolate()->heap()->mark_compact_collector());
+
+ const auto descriptor = wrapper_descriptor();
+ const auto min_field_count =
+ 1 + std::max(descriptor.wrappable_type_index,
+ descriptor.wrappable_instance_index);
+ if (js_object.GetEmbedderFieldCount() < min_field_count) return;
+
+ const EmbedderDataSlot type_slot(js_object, descriptor.wrappable_type_index);
+ const EmbedderDataSlot instance_slot(js_object,
+ descriptor.wrappable_instance_index);
+ isolate()
+ ->heap()
+ ->mark_compact_collector()
+ ->local_marking_worklists()
+ ->cpp_marking_state()
+ ->MarkAndPushForWriteBarrier(type_slot, instance_slot);
+}
+
+namespace {
+
+void RecordEmbedderSpeed(GCTracer* tracer, base::TimeDelta marking_time,
+ size_t marked_bytes) {
+ constexpr auto kMinReportingTime = base::TimeDelta::FromMillisecondsD(0.5);
+ if (marking_time > kMinReportingTime) {
+ tracer->RecordEmbedderSpeed(marked_bytes, marking_time.InMillisecondsF());
+ }
+}
+
+} // namespace
+
void CppHeap::TraceEpilogue() {
CHECK(in_atomic_pause_);
CHECK(marking_done_);
@@ -817,17 +823,25 @@ void CppHeap::TraceEpilogue() {
EnableGenerationalGC();
}
#endif // defined(CPPGC_YOUNG_GENERATION)
+
+ if (!TracingInitialized()) {
+ in_atomic_pause_ = false;
+ return;
+ }
+
{
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause();
}
marker_.reset();
if (isolate_) {
- auto* tracer = isolate_->heap()->local_embedder_heap_tracer();
- DCHECK_NOT_NULL(tracer);
- tracer->UpdateRemoteStats(
- stats_collector_->marked_bytes(),
- stats_collector_->marking_time().InMillisecondsF());
+ used_size_ = stats_collector_->marked_bytes();
+ // Force a check next time increased memory is reported. This allows for
+ // setting limits close to actual heap sizes.
+ allocated_size_limit_for_check_ = 0;
+
+ RecordEmbedderSpeed(isolate_->heap()->tracer(),
+ stats_collector_->marking_time(), used_size_);
}
// The allocated bytes counter in v8 was reset to the current marked bytes, so
// any pending allocated bytes updates should be discarded.
@@ -835,7 +849,7 @@ void CppHeap::TraceEpilogue() {
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
UnifiedHeapMarkingVerifier verifier(*this, *collection_type_);
- verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ verifier.Run(stack_state_of_prev_gc(),
stats_collector()->marked_bytes_on_current_cycle() +
bytes_allocated_in_prefinalizers);
#endif // CPPGC_VERIFY_HEAP
@@ -843,6 +857,9 @@ void CppHeap::TraceEpilogue() {
#if defined(CPPGC_YOUNG_GENERATION)
ResetRememberedSet();
+ // We can reset the remembered set on each GC because surviving Oilpan objects
+ // are immediately considered old.
+ ResetCrossHeapRememberedSet();
#endif // defined(CPPGC_YOUNG_GENERATION)
{
@@ -874,35 +891,6 @@ void CppHeap::TraceEpilogue() {
sweeper().NotifyDoneIfNeeded();
}
-void CppHeap::RunMinorGCIfNeeded() {
- if (!generational_gc_supported()) return;
- if (in_no_gc_scope()) return;
- // Minor GC does not support nesting in full GCs.
- if (IsMarking()) return;
- // Run only when the limit is reached.
- if (!minor_gc_heap_growing_->LimitReached()) return;
-
- DCHECK(!sweeper_.IsSweepingInProgress());
-
- // Notify GC tracer that CppGC started young GC cycle.
- isolate_->heap()->tracer()->NotifyYoungCppGCRunning();
-
- SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
-
- // Perform an atomic GC, with starting incremental/concurrent marking and
- // immediately finalizing the garbage collection.
- InitializeTracing(CollectionType::kMinor,
- GarbageCollectionFlagValues::kNoFlags);
- StartTracing();
- // TODO(chromium:1029379): Should be safe to run without stack.
- EnterFinalPause(cppgc::EmbedderStackState::kMayContainHeapPointers);
- CHECK(AdvanceTracing(std::numeric_limits<double>::infinity()));
- if (FinishConcurrentMarkingIfNeeded()) {
- CHECK(AdvanceTracing(std::numeric_limits<double>::infinity()));
- }
- TraceEpilogue();
-}
-
void CppHeap::AllocatedObjectSizeIncreased(size_t bytes) {
buffered_allocated_bytes_ += static_cast<int64_t>(bytes);
ReportBufferedAllocationSizeIfPossible();
@@ -922,18 +910,38 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
return;
}
+ // We are in attached state.
+ DCHECK_NOT_NULL(isolate_);
+
// The calls below may trigger full GCs that are synchronous and also execute
// epilogue callbacks. Since such callbacks may allocate, the counter must
// already be zeroed by that time.
const int64_t bytes_to_report = buffered_allocated_bytes_;
buffered_allocated_bytes_ = 0;
- auto* const tracer = isolate_->heap()->local_embedder_heap_tracer();
- DCHECK_NOT_NULL(tracer);
if (bytes_to_report < 0) {
- tracer->DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
+ DCHECK_GE(used_size_.load(std::memory_order_relaxed), bytes_to_report);
+ used_size_.fetch_sub(static_cast<size_t>(-bytes_to_report),
+ std::memory_order_relaxed);
} else {
- tracer->IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
+ used_size_.fetch_add(static_cast<size_t>(bytes_to_report),
+ std::memory_order_relaxed);
+ allocated_size_ += bytes_to_report;
+
+ if (v8_flags.incremental_marking) {
+ if (allocated_size_ > allocated_size_limit_for_check_) {
+ Heap* heap = isolate_->heap();
+ heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ if (heap->AllocationLimitOvershotByLargeMargin()) {
+ heap->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
+ allocated_size_limit_for_check_ =
+ allocated_size_ + kIncrementalMarkingCheckInterval;
+ }
+ }
}
}
@@ -944,7 +952,7 @@ void CppHeap::CollectGarbageForTesting(CollectionType collection_type,
// Finish sweeping in case it is still running.
sweeper().FinishIfRunning();
- SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+ stack()->SetMarkerToCurrentStackPosition();
if (isolate_) {
reinterpret_cast<v8::Isolate*>(isolate_)
@@ -1089,6 +1097,7 @@ void CppHeap::FinishSweepingIfRunning() {
void CppHeap::FinishSweepingIfOutOfWork() { sweeper_.FinishIfOutOfWork(); }
std::unique_ptr<CppMarkingState> CppHeap::CreateCppMarkingState() {
+ if (!TracingInitialized()) return {};
DCHECK(IsMarking());
return std::make_unique<CppMarkingState>(
isolate(), wrapper_descriptor_,
@@ -1098,6 +1107,7 @@ std::unique_ptr<CppMarkingState> CppHeap::CreateCppMarkingState() {
std::unique_ptr<CppMarkingState>
CppHeap::CreateCppMarkingStateForMutatorThread() {
+ if (!TracingInitialized()) return {};
DCHECK(IsMarking());
return std::make_unique<CppMarkingState>(
isolate(), wrapper_descriptor_,
@@ -1131,7 +1141,21 @@ const cppgc::EmbedderStackState* CppHeap::override_stack_state() const {
void CppHeap::StartIncrementalGarbageCollection(cppgc::internal::GCConfig) {
UNIMPLEMENTED();
}
+
size_t CppHeap::epoch() const { UNIMPLEMENTED(); }
+void CppHeap::ResetCrossHeapRememberedSet() {
+ if (!generational_gc_supported()) {
+ DCHECK(cross_heap_remembered_set_.IsEmpty());
+ return;
+ }
+ DCHECK(isolate_);
+ cross_heap_remembered_set_.Reset(*isolate_);
+}
+
+void CppHeap::ReduceGCCapabilitiesFromFlagsForTesting() {
+ ReduceGCCapabilitiesFromFlags();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index fc0923c390..5cc53037f6 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -16,10 +16,12 @@ static_assert(
#include "src/base/flags.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
+#include "src/heap/cppgc-js/cross-heap-remembered-set.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/logging/metrics.h"
+#include "src/objects/js-objects.h"
namespace v8 {
@@ -128,10 +130,6 @@ class V8_EXPORT_PRIVATE CppHeap final
void Terminate();
- void EnableDetachedGarbageCollectionsForTesting();
-
- void CollectGarbageForTesting(CollectionType, StackState);
-
void CollectCustomSpaceStatisticsAtLastGC(
std::vector<cppgc::CustomSpaceIndex>,
std::unique_ptr<CustomSpaceStatisticsReceiver>);
@@ -139,15 +137,18 @@ class V8_EXPORT_PRIVATE CppHeap final
void FinishSweepingIfRunning();
void FinishSweepingIfOutOfWork();
- void InitializeTracing(CollectionType, GarbageCollectionFlags);
+ void InitializeTracing(
+ CollectionType,
+ GarbageCollectionFlags = GarbageCollectionFlagValues::kNoFlags);
void StartTracing();
bool AdvanceTracing(double max_duration);
- bool IsTracingDone();
+ bool IsTracingDone() const;
void TraceEpilogue();
void EnterFinalPause(cppgc::EmbedderStackState stack_state);
bool FinishConcurrentMarkingIfNeeded();
+ void WriteBarrier(JSObject);
- void RunMinorGCIfNeeded();
+ bool ShouldFinalizeIncrementalMarking() const;
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
@@ -162,6 +163,11 @@ class V8_EXPORT_PRIVATE CppHeap final
Isolate* isolate() const { return isolate_; }
+ size_t used_size() const {
+ return used_size_.load(std::memory_order_relaxed);
+ }
+ size_t allocated_size() const { return allocated_size_; }
+
::heap::base::Stack* stack() final;
std::unique_ptr<CppMarkingState> CreateCppMarkingState();
@@ -173,8 +179,19 @@ class V8_EXPORT_PRIVATE CppHeap final
void StartIncrementalGarbageCollection(cppgc::internal::GCConfig) override;
size_t epoch() const override;
+ V8_INLINE void RememberCrossHeapReferenceIfNeeded(
+ v8::internal::JSObject host_obj, void* value);
+ template <typename F>
+ inline void VisitCrossHeapRememberedSetIfNeeded(F f);
+ void ResetCrossHeapRememberedSet();
+
+ // Testing-only APIs.
+ void EnableDetachedGarbageCollectionsForTesting();
+ void CollectGarbageForTesting(CollectionType, StackState);
+ void ReduceGCCapabilitiesFromFlagsForTesting();
+
private:
- void ReduceGCCapabilititesFromFlags();
+ void ReduceGCCapabilitiesFromFlags();
void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) final {
@@ -191,13 +208,19 @@ class V8_EXPORT_PRIVATE CppHeap final
MarkingType SelectMarkingType() const;
SweepingType SelectSweepingType() const;
+ bool TracingInitialized() const { return collection_type_.has_value(); }
+
+ Heap* heap() const { return heap_; }
+
Isolate* isolate_ = nullptr;
- bool marking_done_ = false;
+ Heap* heap_ = nullptr;
+ bool marking_done_ = true;
// |collection_type_| is initialized when marking is in progress.
base::Optional<CollectionType> collection_type_;
GarbageCollectionFlags current_gc_flags_;
std::unique_ptr<MinorGCHeapGrowing> minor_gc_heap_growing_;
+ CrossHeapRememberedSet cross_heap_remembered_set_;
std::unique_ptr<cppgc::internal::Sweeper::SweepingOnMutatorThreadObserver>
sweeping_on_mutator_thread_observer_;
@@ -213,9 +236,33 @@ class V8_EXPORT_PRIVATE CppHeap final
bool force_incremental_marking_for_testing_ = false;
bool is_in_v8_marking_step_ = false;
+ // Used size of objects. Reported to V8's regular heap growing strategy.
+ std::atomic<size_t> used_size_{0};
+ // Total bytes allocated since the last GC. Monotonically increasing value.
+ // Used to approximate allocation rate.
+ size_t allocated_size_ = 0;
+ // Limit for |allocated_size| in bytes to avoid checking for starting a GC
+ // on each increment.
+ size_t allocated_size_limit_for_check_ = 0;
+
friend class MetricRecorderAdapter;
};
+void CppHeap::RememberCrossHeapReferenceIfNeeded(
+ v8::internal::JSObject host_obj, void* value) {
+ if (!generational_gc_supported()) return;
+ DCHECK(isolate_);
+ cross_heap_remembered_set_.RememberReferenceIfNeeded(*isolate_, host_obj,
+ value);
+}
+
+template <typename F>
+void CppHeap::VisitCrossHeapRememberedSetIfNeeded(F f) {
+ if (!generational_gc_supported()) return;
+ DCHECK(isolate_);
+ cross_heap_remembered_set_.Visit(*isolate_, std::move(f));
+}
+
DEFINE_OPERATORS_FOR_FLAGS(CppHeap::GarbageCollectionFlags)
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h b/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
index 23294b4dca..3e52ae3c43 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
@@ -6,7 +6,9 @@
#define V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
#include "src/heap/cppgc-js/cpp-marking-state.h"
-#include "src/heap/embedder-tracing-inl.h"
+#include "src/heap/cppgc-js/wrappable-info-inl.h"
+#include "src/heap/cppgc-js/wrappable-info.h"
+#include "src/heap/cppgc/heap-page.h"
#include "src/objects/embedder-data-slot.h"
#include "src/objects/js-objects.h"
@@ -33,11 +35,25 @@ void CppMarkingState::MarkAndPush(const EmbedderDataSnapshot& snapshot) {
void CppMarkingState::MarkAndPush(const EmbedderDataSlot type_slot,
const EmbedderDataSlot instance_slot) {
- LocalEmbedderHeapTracer::WrapperInfo info;
- if (LocalEmbedderHeapTracer::ExtractWrappableInfo(
- isolate_, wrapper_descriptor_, type_slot, instance_slot, &info)) {
+ const auto maybe_info = WrappableInfo::From(
+ isolate_, type_slot, instance_slot, wrapper_descriptor_);
+ if (maybe_info.has_value()) {
marking_state_.MarkAndPush(
- cppgc::internal::HeapObjectHeader::FromObject(info.second));
+ cppgc::internal::HeapObjectHeader::FromObject(maybe_info->instance));
+ }
+}
+
+// TODO(v8:13796): Remove this if it doesn't flush out any issues.
+void CppMarkingState::MarkAndPushForWriteBarrier(
+ const EmbedderDataSlot type_slot, const EmbedderDataSlot instance_slot) {
+ const auto maybe_info = WrappableInfo::From(
+ isolate_, type_slot, instance_slot, wrapper_descriptor_);
+ if (maybe_info.has_value()) {
+ cppgc::internal::HeapObjectHeader& header =
+ cppgc::internal::HeapObjectHeader::FromObject(maybe_info->instance);
+ CHECK_EQ(&header, &cppgc::internal::BasePage::FromPayload(&header)
+ ->ObjectHeaderFromInnerAddress(&header));
+ marking_state_.MarkAndPush(header);
}
}
diff --git a/deps/v8/src/heap/cppgc-js/cpp-marking-state.h b/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
index ad8ef3b680..3710ad4c19 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
@@ -18,7 +18,7 @@ namespace internal {
class JSObject;
class EmbedderDataSlot;
-class CppMarkingState {
+class CppMarkingState final {
public:
using EmbedderDataSnapshot =
std::pair<EmbedderDataSlot::EmbedderDataSlotSnapshot,
@@ -48,6 +48,8 @@ class CppMarkingState {
inline void MarkAndPush(const EmbedderDataSnapshot&);
inline void MarkAndPush(const EmbedderDataSlot type_slot,
const EmbedderDataSlot instance_slot);
+ inline void MarkAndPushForWriteBarrier(const EmbedderDataSlot type_slot,
+ const EmbedderDataSlot instance_slot);
bool IsLocalEmpty() {
return marking_state_.marking_worklist().IsLocalEmpty();
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index 1424f97618..61d635e3bf 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -14,10 +14,11 @@
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc-js/wrappable-info-inl.h"
+#include "src/heap/cppgc-js/wrappable-info.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/visitor.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
#include "src/objects/js-objects.h"
#include "src/profiler/heap-profiler.h"
@@ -352,10 +353,8 @@ class StateStorage final {
size_t state_count_ = 0;
};
-void* ExtractEmbedderDataBackref(Isolate* isolate,
+void* ExtractEmbedderDataBackref(Isolate* isolate, CppHeap& cpp_heap,
v8::Local<v8::Value> v8_value) {
- // See LocalEmbedderHeapTracer::VerboseWrapperTypeInfo for details on how
- // wrapper objects are set up.
if (!v8_value->IsObject()) return nullptr;
Handle<Object> v8_object = Utils::OpenHandle(*v8_value);
@@ -364,10 +363,10 @@ void* ExtractEmbedderDataBackref(Isolate* isolate,
return nullptr;
JSObject js_object = JSObject::cast(*v8_object);
- return LocalEmbedderHeapTracer::VerboseWrapperInfo(
- isolate->heap()->local_embedder_heap_tracer()->ExtractWrapperInfo(
- isolate, js_object))
- .instance();
+
+ const auto maybe_info =
+ WrappableInfo::From(isolate, js_object, cpp_heap.wrapper_descriptor());
+ return maybe_info.has_value() ? maybe_info->instance : nullptr;
}
// The following implements a snapshotting algorithm for C++ objects that also
@@ -488,7 +487,7 @@ class CppGraphBuilderImpl final {
void* back_reference_object = ExtractEmbedderDataBackref(
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
- v8_value);
+ cpp_heap_, v8_value);
if (back_reference_object) {
auto& back_header = HeapObjectHeader::FromObject(back_reference_object);
auto& back_state = states_.GetExistingState(back_header);
diff --git a/deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.cc b/deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.cc
new file mode 100644
index 0000000000..5c70fdeb22
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.cc
@@ -0,0 +1,36 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/cross-heap-remembered-set.h"
+
+#include "src/api/api-inl.h"
+#include "src/handles/global-handles-inl.h"
+#include "src/heap/cppgc/heap-page.h"
+
+namespace v8::internal {
+
+void CrossHeapRememberedSet::RememberReferenceIfNeeded(Isolate& isolate,
+ JSObject host_obj,
+ void* cppgc_object) {
+ DCHECK_NOT_NULL(cppgc_object);
+ // Any in-cage pointer must point to a vaild, not freed cppgc object.
+ auto* page =
+ cppgc::internal::BasePage::FromInnerAddress(&heap_base_, cppgc_object);
+ // TODO(v8:13475): Better filter with on-cage check.
+ if (!page) return;
+ auto& value_hoh = page->ObjectHeaderFromInnerAddress(cppgc_object);
+ if (!value_hoh.IsYoung()) return;
+ remembered_v8_to_cppgc_references_.push_back(
+ isolate.global_handles()->Create(host_obj));
+}
+
+void CrossHeapRememberedSet::Reset(Isolate& isolate) {
+ for (auto& h : remembered_v8_to_cppgc_references_) {
+ isolate.global_handles()->Destroy(h.location());
+ }
+ remembered_v8_to_cppgc_references_.clear();
+ remembered_v8_to_cppgc_references_.shrink_to_fit();
+}
+
+} // namespace v8::internal
diff --git a/deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.h b/deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.h
new file mode 100644
index 0000000000..ba28c377f5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cross-heap-remembered-set.h
@@ -0,0 +1,54 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CROSS_HEAP_REMEMBERED_SET_H_
+#define V8_HEAP_CPPGC_JS_CROSS_HEAP_REMEMBERED_SET_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/handles/handles.h"
+#include "src/objects/js-objects.h"
+
+namespace cppgc::internal {
+class HeapBase;
+}
+
+namespace v8::internal {
+
+// The class is used to remember V8 to Oilpan references.
+class V8_EXPORT_PRIVATE CrossHeapRememberedSet final {
+ public:
+ explicit CrossHeapRememberedSet(cppgc::internal::HeapBase& heap_base)
+ : heap_base_(heap_base) {}
+
+ CrossHeapRememberedSet(const CrossHeapRememberedSet&) = delete;
+ CrossHeapRememberedSet(CrossHeapRememberedSet&&) = delete;
+
+ void RememberReferenceIfNeeded(Isolate& isolate, JSObject host_obj,
+ void* cppgc_object);
+ void Reset(Isolate& isolate);
+
+ template <typename F>
+ void Visit(Isolate&, F);
+
+ bool IsEmpty() const { return remembered_v8_to_cppgc_references_.empty(); }
+
+ private:
+ cppgc::internal::HeapBase& heap_base_;
+ // The vector keeps handles to remembered V8 objects that have outgoing
+ // references to the cppgc heap. Plese note that the handles are global.
+ std::vector<Handle<JSObject>> remembered_v8_to_cppgc_references_;
+};
+
+template <typename F>
+void CrossHeapRememberedSet::Visit(Isolate& isolate, F f) {
+ for (auto& obj : remembered_v8_to_cppgc_references_) {
+ f(*obj);
+ }
+}
+
+} // namespace v8::internal
+
+#endif // V8_HEAP_CPPGC_JS_CROSS_HEAP_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
index beaaa9c407..719cb95de1 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
@@ -15,25 +15,16 @@
#include "src/heap/mark-compact.h"
#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-worklist-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-class BasicTracedReferenceExtractor {
+class BasicTracedReferenceExtractor final {
public:
- static Object GetObjectForMarking(const TracedReferenceBase& ref) {
- Address* traced_handle_location = const_cast<Address*>(
+ static Address* GetObjectSlotForMarking(const TracedReferenceBase& ref) {
+ return const_cast<Address*>(
reinterpret_cast<const Address*>(ref.GetSlotThreadSafe()));
- // We cannot assume that the reference is non-null as we may get here by
- // tracing an ephemeron which doesn't have early bailouts, see
- // `cppgc::Visitor::TraceEphemeron()` for non-Member values.
- if (!traced_handle_location) return Object();
-
- // The load synchronizes internal bitfields that are also read atomically
- // from the concurrent marker.
- Object object = TracedHandles::Acquire(traced_handle_location);
- TracedHandles::Mark(traced_handle_location);
- return object;
}
};
@@ -41,17 +32,23 @@ void UnifiedHeapMarkingState::MarkAndPush(
const TracedReferenceBase& reference) {
// The following code will crash with null pointer derefs when finding a
// non-empty `TracedReferenceBase` when `CppHeap` is in detached mode.
-
- Object object = BasicTracedReferenceExtractor::GetObjectForMarking(reference);
+ Address* traced_handle_location =
+ BasicTracedReferenceExtractor::GetObjectSlotForMarking(reference);
+ // We cannot assume that the reference is non-null as we may get here by
+ // tracing an ephemeron which doesn't have early bailouts, see
+ // `cppgc::Visitor::TraceEphemeron()` for non-Member values.
+ if (!traced_handle_location) {
+ return;
+ }
+ Object object = TracedHandles::Mark(traced_handle_location, mark_mode_);
if (!object.IsHeapObject()) {
// The embedder is not aware of whether numbers are materialized as heap
- // objects are just passed around as Smis. This branch also filters out
- // intentionally passed `Smi::zero()` that indicate that there's no object
- // to mark.
+ // objects are just passed around as Smis.
return;
}
HeapObject heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
+ if (heap_object.InReadOnlySpace()) return;
+ if (marking_state_->TryMark(heap_object)) {
local_marking_worklist_->Push(heap_object);
}
if (V8_UNLIKELY(track_retaining_path_)) {
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
index 116563769f..83c4e17eea 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
@@ -11,11 +11,15 @@ namespace v8 {
namespace internal {
UnifiedHeapMarkingState::UnifiedHeapMarkingState(
- Heap* heap, MarkingWorklists::Local* local_marking_worklist)
+ Heap* heap, MarkingWorklists::Local* local_marking_worklist,
+ cppgc::internal::CollectionType collection_type)
: heap_(heap),
marking_state_(heap_ ? heap_->marking_state() : nullptr),
local_marking_worklist_(local_marking_worklist),
- track_retaining_path_(v8_flags.track_retaining_path) {
+ track_retaining_path_(v8_flags.track_retaining_path),
+ mark_mode_(collection_type == cppgc::internal::CollectionType::kMinor
+ ? TracedHandles::MarkMode::kOnlyYoung
+ : TracedHandles::MarkMode::kAll) {
DCHECK_IMPLIES(v8_flags.track_retaining_path,
!v8_flags.concurrent_marking && !v8_flags.parallel_marking);
DCHECK_IMPLIES(heap_, marking_state_);
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index af1320721b..21ac09d81c 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,6 +6,7 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
+#include "src/handles/traced-handles.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist.h"
@@ -17,7 +18,8 @@ namespace internal {
// mode, the expectation is that no non-null `TracedReferenceBase` is found.
class UnifiedHeapMarkingState final {
public:
- UnifiedHeapMarkingState(Heap*, MarkingWorklists::Local*);
+ UnifiedHeapMarkingState(Heap*, MarkingWorklists::Local*,
+ cppgc::internal::CollectionType);
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
@@ -31,6 +33,7 @@ class UnifiedHeapMarkingState final {
MarkingState* const marking_state_;
MarkingWorklists::Local* local_marking_worklist_ = nullptr;
const bool track_retaining_path_;
+ const TracedHandles::MarkMode mark_mode_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index 677a897a93..c44ea62054 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -14,6 +14,18 @@
namespace v8 {
namespace internal {
+namespace {
+std::unique_ptr<MarkingWorklists::Local> GetV8MarkingWorklists(
+ Heap* heap, cppgc::internal::CollectionType collection_type) {
+ if (!heap) return {};
+ auto* worklist =
+ (collection_type == cppgc::internal::CollectionType::kMajor)
+ ? heap->mark_compact_collector()->marking_worklists()
+ : heap->minor_mark_compact_collector()->marking_worklists();
+ return std::make_unique<MarkingWorklists::Local>(worklist);
+}
+} // namespace
+
UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
HeapBase& heap, cppgc::internal::BasicMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
@@ -48,7 +60,7 @@ void UnifiedHeapMarkingVisitorBase::VisitWeakContainer(
void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
const void* object) {
- marking_state_.RegisterWeakCallback(callback, object);
+ marking_state_.RegisterWeakCustomCallback(callback, object);
}
void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
@@ -67,15 +79,13 @@ MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
HeapBase& heap, Heap* v8_heap,
- cppgc::internal::ConcurrentMarkingState& marking_state)
+ cppgc::internal::ConcurrentMarkingState& marking_state,
+ CppHeap::CollectionType collection_type)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
concurrent_unified_heap_marking_state_),
- local_marking_worklist_(
- v8_heap ? std::make_unique<MarkingWorklists::Local>(
- v8_heap->mark_compact_collector()->marking_worklists())
- : nullptr),
- concurrent_unified_heap_marking_state_(v8_heap,
- local_marking_worklist_.get()) {}
+ local_marking_worklist_(GetV8MarkingWorklists(v8_heap, collection_type)),
+ concurrent_unified_heap_marking_state_(
+ v8_heap, local_marking_worklist_.get(), collection_type) {}
ConcurrentUnifiedHeapMarkingVisitor::~ConcurrentUnifiedHeapMarkingVisitor() {
if (local_marking_worklist_) {
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index b90dccc7a3..9031fa4f9a 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -67,23 +67,12 @@ class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
~MutatorUnifiedHeapMarkingVisitor() override = default;
};
-class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final
- : public MutatorUnifiedHeapMarkingVisitor {
- public:
- using MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor;
- ~MutatorMinorGCMarkingVisitor() override = default;
-
- protected:
- // Override and make the function empty, since we don't want to trace V8
- // reference during cppgc's minor GC.
- void Visit(const TracedReferenceBase&) final {}
-};
-
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, Heap*,
- cppgc::internal::ConcurrentMarkingState&);
+ cppgc::internal::ConcurrentMarkingState&,
+ CppHeap::CollectionType);
~ConcurrentUnifiedHeapMarkingVisitor() override;
protected:
@@ -100,20 +89,6 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
UnifiedHeapMarkingState concurrent_unified_heap_marking_state_;
};
-// Same visitor as for full GCs unified heap, but avoids visiting
-// TracedReferences.
-class V8_EXPORT_PRIVATE ConcurrentMinorGCMarkingVisitor final
- : public ConcurrentUnifiedHeapMarkingVisitor {
- public:
- using ConcurrentUnifiedHeapMarkingVisitor::
- ConcurrentUnifiedHeapMarkingVisitor;
-
- private:
- // Override and make the function empty, since we don't want to trace V8
- // reference during cppgc's minor GC.
- void Visit(const TracedReferenceBase&) final {}
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/wrappable-info-inl.h b/deps/v8/src/heap/cppgc-js/wrappable-info-inl.h
new file mode 100644
index 0000000000..e9c65fd2d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/wrappable-info-inl.h
@@ -0,0 +1,50 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_INL_H_
+#define V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_INL_H_
+
+#include "src/base/optional.h"
+#include "src/heap/cppgc-js/wrappable-info.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
+
+namespace v8::internal {
+
+// static
+base::Optional<WrappableInfo> WrappableInfo::From(
+ Isolate* isolate, JSObject wrapper,
+ const WrapperDescriptor& wrapper_descriptor) {
+ DCHECK(wrapper.MayHaveEmbedderFields());
+ return wrapper.GetEmbedderFieldCount() < 2
+ ? base::Optional<WrappableInfo>()
+ : From(isolate,
+ EmbedderDataSlot(wrapper,
+ wrapper_descriptor.wrappable_type_index),
+ EmbedderDataSlot(
+ wrapper, wrapper_descriptor.wrappable_instance_index),
+ wrapper_descriptor);
+}
+
+// static
+base::Optional<WrappableInfo> WrappableInfo::From(
+ Isolate* isolate, const EmbedderDataSlot& type_slot,
+ const EmbedderDataSlot& instance_slot,
+ const WrapperDescriptor& wrapper_descriptor) {
+ void* type;
+ void* instance;
+ if (type_slot.ToAlignedPointer(isolate, &type) && type &&
+ instance_slot.ToAlignedPointer(isolate, &instance) && instance &&
+ (wrapper_descriptor.embedder_id_for_garbage_collected ==
+ WrapperDescriptor::kUnknownEmbedderId ||
+ (*static_cast<uint16_t*>(type) ==
+ wrapper_descriptor.embedder_id_for_garbage_collected))) {
+ return base::Optional<WrappableInfo>(base::in_place, type, instance);
+ }
+ return {};
+}
+
+} // namespace v8::internal
+
+#endif // V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_INL_H_
diff --git a/deps/v8/src/heap/cppgc-js/wrappable-info.h b/deps/v8/src/heap/cppgc-js/wrappable-info.h
new file mode 100644
index 0000000000..7a11daa10a
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/wrappable-info.h
@@ -0,0 +1,34 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_H_
+#define V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_H_
+
+#include "include/v8-cppgc.h"
+#include "src/base/optional.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects.h"
+
+namespace v8::internal {
+
+class Isolate;
+
+struct WrappableInfo final {
+ public:
+ static V8_INLINE base::Optional<WrappableInfo> From(Isolate*, JSObject,
+ const WrapperDescriptor&);
+ static V8_INLINE base::Optional<WrappableInfo> From(
+ Isolate*, const EmbedderDataSlot& type_slot,
+ const EmbedderDataSlot& instance_slot, const WrapperDescriptor&);
+
+ constexpr WrappableInfo(void* type, void* instance)
+ : type(type), instance(instance) {}
+
+ void* type = nullptr;
+ void* instance = nullptr;
+};
+
+} // namespace v8::internal
+
+#endif // V8_HEAP_CPPGC_JS_WRAPPABLE_INFO_H_
diff --git a/deps/v8/src/heap/cppgc/DEPS b/deps/v8/src/heap/cppgc/DEPS
index 37049928d5..d5c1d108b8 100644
--- a/deps/v8/src/heap/cppgc/DEPS
+++ b/deps/v8/src/heap/cppgc/DEPS
@@ -1,3 +1,8 @@
include_rules = [
"+include/cppgc",
+ "-src",
+ "+src/base",
+ "+src/heap/base",
+ "+src/heap/cppgc",
+ "+src/tracing/trace-event.h",
]
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
index 326b35d1f7..0a02ddf61f 100644
--- a/deps/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -148,20 +148,6 @@ void ConcurrentMarkingTask::ProcessWorklists(
return;
}
- if (!DrainWorklistWithYielding(
- job_delegate, concurrent_marking_state,
- concurrent_marker_.incremental_marking_schedule(),
- concurrent_marking_state.retrace_marked_objects_worklist(),
- [&concurrent_marking_visitor](HeapObjectHeader* header) {
- BasePage::FromPayload(header)->SynchronizedLoad();
- // Retracing does not increment marked bytes as the object has
- // already been processed before.
- DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
- concurrent_marking_visitor, *header);
- })) {
- return;
- }
-
{
StatsCollector::DisabledConcurrentScope stats_scope(
concurrent_marker_.heap().stats_collector(),
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index ddb294cb5c..fcbf6ac356 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -20,68 +20,68 @@ HeapObjectName GetHiddenName(const void*, HeapObjectNameForUnnamedObject) {
} // namespace
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback, NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, name_callback, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, GetHiddenName, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, name_callback, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, GetHiddenName, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback, NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, name_callback, false});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, GetHiddenName, false});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, name_callback, false});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, GetHiddenName, false});
}
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 84fb389a7e..67ccd37e25 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -43,7 +43,7 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
-#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
+#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_DARWIN)
// No guard pages on ARM64 macOS. This target has 16 kiB pages, meaning that
// the guard pages do not protect anything, since there is no inaccessible
// region surrounding the allocation.
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index f399665b48..bf76376939 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -94,41 +94,6 @@ class AgeTableResetter final : protected HeapVisitor<AgeTableResetter> {
};
#endif // defined(CPPGC_YOUNG_GENERATION)
-class PlatformWithPageAllocator final : public cppgc::Platform {
- public:
- explicit PlatformWithPageAllocator(std::shared_ptr<cppgc::Platform> delegate)
- : delegate_(std::move(delegate)),
- page_allocator_(GetGlobalPageAllocator()) {
- // This platform wrapper should only be used if the platform doesn't provide
- // a `PageAllocator`.
- CHECK_NULL(delegate->GetPageAllocator());
- }
- ~PlatformWithPageAllocator() override = default;
-
- PageAllocator* GetPageAllocator() final { return &page_allocator_; }
-
- double MonotonicallyIncreasingTime() final {
- return delegate_->MonotonicallyIncreasingTime();
- }
-
- std::shared_ptr<TaskRunner> GetForegroundTaskRunner() final {
- return delegate_->GetForegroundTaskRunner();
- }
-
- std::unique_ptr<JobHandle> PostJob(TaskPriority priority,
- std::unique_ptr<JobTask> job_task) final {
- return delegate_->PostJob(std::move(priority), std::move(job_task));
- }
-
- TracingController* GetTracingController() final {
- return delegate_->GetTracingController();
- }
-
- private:
- std::shared_ptr<cppgc::Platform> delegate_;
- cppgc::PageAllocator& page_allocator_;
-};
-
} // namespace
HeapBase::HeapBase(
@@ -137,11 +102,7 @@ HeapBase::HeapBase(
StackSupport stack_support, MarkingType marking_support,
SweepingType sweeping_support, GarbageCollector& garbage_collector)
: raw_heap_(this, custom_spaces),
- platform_(platform->GetPageAllocator()
- ? std::move(platform)
- : std::static_pointer_cast<cppgc::Platform>(
- std::make_shared<PlatformWithPageAllocator>(
- std::move(platform)))),
+ platform_(std::move(platform)),
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
#if defined(LEAK_SANITIZER)
lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
@@ -212,10 +173,13 @@ size_t HeapBase::ExecutePreFinalizers() {
#if defined(CPPGC_YOUNG_GENERATION)
void HeapBase::EnableGenerationalGC() {
DCHECK(in_atomic_pause());
+ if (HeapHandle::is_young_generation_enabled_) return;
// Notify the global flag that the write barrier must always be enabled.
YoungGenerationEnabler::Enable();
// Enable young generation for the current heap.
HeapHandle::is_young_generation_enabled_ = true;
+ // Assume everything that has so far been allocated is young.
+ object_allocator_.MarkAllPagesAsYoung();
}
void HeapBase::ResetRememberedSet() {
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 3e4f24cad4..ac1dd3ff5b 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -183,11 +183,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
stack_state_of_prev_gc_ = stack_state;
}
- uintptr_t stack_end_of_current_gc() const { return stack_end_of_current_gc_; }
- void SetStackEndOfCurrentGC(uintptr_t stack_end) {
- stack_end_of_current_gc_ = stack_end;
- }
-
void SetInAtomicPauseForTesting(bool value) { in_atomic_pause_ = value; }
virtual void StartIncrementalGarbageCollectionForTesting() = 0;
@@ -203,6 +198,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
MarkingType marking_support() const { return marking_support_; }
SweepingType sweeping_support() const { return sweeping_support_; }
+ bool incremental_marking_supported() const {
+ return marking_support_ != MarkingType::kAtomic;
+ }
+
bool generational_gc_supported() const {
const bool supported = is_young_generation_enabled();
#if defined(CPPGC_YOUNG_GENERATION)
@@ -289,10 +288,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
EmbedderStackState::kNoHeapPointers;
std::unique_ptr<EmbedderStackState> override_stack_state_;
- // Marker that signals end of the interesting stack region in which on-heap
- // pointers can be found.
- uintptr_t stack_end_of_current_gc_ = 0;
-
bool in_atomic_pause_ = false;
int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 7e85eeca47..51c2e5b7c6 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -202,8 +202,7 @@ void NormalPage::Destroy(NormalPage* page) {
}
NormalPage::NormalPage(HeapBase& heap, BaseSpace& space)
- : BasePage(heap, space, PageType::kNormal),
- object_start_bitmap_(PayloadStart()) {
+ : BasePage(heap, space, PageType::kNormal), object_start_bitmap_() {
DCHECK_LT(kLargeObjectSizeThreshold,
static_cast<size_t>(PayloadEnd() - PayloadStart()));
}
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 13e2fe1993..1f3e70440d 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -146,8 +146,13 @@ void Heap::StartGarbageCollection(GCConfig config) {
epoch_++;
#if defined(CPPGC_YOUNG_GENERATION)
- if (config.collection_type == CollectionType::kMajor)
+ if (config.collection_type == CollectionType::kMajor &&
+ generational_gc_supported()) {
+ stats_collector()->NotifyUnmarkingStarted(config.collection_type);
+ cppgc::internal::StatsCollector::EnabledScope stats_scope(
+ stats_collector(), cppgc::internal::StatsCollector::kUnmark);
SequentialUnmarker unmarker(raw_heap());
+ }
#endif // defined(CPPGC_YOUNG_GENERATION)
const MarkingConfig marking_config{config.collection_type, config.stack_state,
@@ -161,7 +166,7 @@ void Heap::FinalizeGarbageCollection(StackState stack_state) {
DCHECK(!in_no_gc_scope());
CHECK(!in_disallow_gc_scope());
config_.stack_state = stack_state;
- SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+ stack()->SetMarkerToCurrentStackPosition();
in_atomic_pause_ = true;
#if defined(CPPGC_YOUNG_GENERATION)
@@ -182,7 +187,7 @@ void Heap::FinalizeGarbageCollection(StackState stack_state) {
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
MarkingVerifier verifier(*this, config_.collection_type);
- verifier.Run(config_.stack_state, stack_end_of_current_gc(),
+ verifier.Run(config_.stack_state,
stats_collector()->marked_bytes_on_current_cycle() +
bytes_allocated_in_prefinalizers);
#endif // CPPGC_VERIFY_HEAP
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 306b880857..bc715e1917 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -395,15 +395,32 @@ void MarkerBase::ProcessWeakness() {
}
#endif // defined(CPPGC_YOUNG_GENERATION)
- MarkingWorklists::WeakCallbackItem item;
- MarkingWorklists::WeakCallbackWorklist::Local& local =
- mutator_marking_state_.weak_callback_worklist();
- while (local.Pop(&item)) {
- item.callback(broker, item.parameter);
+ {
+ // First, process weak container callbacks.
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(),
+ StatsCollector::kWeakContainerCallbacksProcessing);
+ MarkingWorklists::WeakCallbackItem item;
+ MarkingWorklists::WeakCallbackWorklist::Local& collections_local =
+ mutator_marking_state_.weak_container_callback_worklist();
+ while (collections_local.Pop(&item)) {
+ item.callback(broker, item.parameter);
+ }
+ }
+ {
+ // Then, process custom weak callbacks.
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(), StatsCollector::kCustomCallbacksProcessing);
+ MarkingWorklists::WeakCallbackItem item;
+ MarkingWorklists::WeakCustomCallbackWorklist::Local& custom_callbacks =
+ mutator_marking_state_.weak_custom_callback_worklist();
+ while (custom_callbacks.Pop(&item)) {
+ item.callback(broker, item.parameter);
#if defined(CPPGC_YOUNG_GENERATION)
- if (heap().generational_gc_supported())
- heap().remembered_set().AddWeakCallback(item);
+ if (heap().generational_gc_supported())
+ heap().remembered_set().AddWeakCallback(item);
#endif // defined(CPPGC_YOUNG_GENERATION)
+ }
}
if (job_handle) {
diff --git a/deps/v8/src/heap/cppgc/marking-state.cc b/deps/v8/src/heap/cppgc/marking-state.cc
index c64173e1ec..660e6b2aec 100644
--- a/deps/v8/src/heap/cppgc/marking-state.cc
+++ b/deps/v8/src/heap/cppgc/marking-state.cc
@@ -12,6 +12,48 @@
namespace cppgc {
namespace internal {
+void MarkingStateBase::Publish() { marking_worklist_.Publish(); }
+
+BasicMarkingState::BasicMarkingState(HeapBase& heap,
+ MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists),
+ previously_not_fully_constructed_worklist_(
+ *marking_worklists.previously_not_fully_constructed_worklist()),
+ weak_container_callback_worklist_(
+ *marking_worklists.weak_container_callback_worklist()),
+ parallel_weak_callback_worklist_(
+ *marking_worklists.parallel_weak_callback_worklist()),
+ weak_custom_callback_worklist_(
+ *marking_worklists.weak_custom_callback_worklist()),
+ write_barrier_worklist_(*marking_worklists.write_barrier_worklist()),
+ concurrent_marking_bailout_worklist_(
+ *marking_worklists.concurrent_marking_bailout_worklist()),
+ discovered_ephemeron_pairs_worklist_(
+ *marking_worklists.discovered_ephemeron_pairs_worklist()),
+ ephemeron_pairs_for_processing_worklist_(
+ *marking_worklists.ephemeron_pairs_for_processing_worklist()),
+ weak_containers_worklist_(*marking_worklists.weak_containers_worklist()) {
+ if (compaction_worklists) {
+ movable_slots_worklist_ =
+ std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
+ *compaction_worklists->movable_slots_worklist());
+ }
+}
+
+void BasicMarkingState::Publish() {
+ MarkingStateBase::Publish();
+ previously_not_fully_constructed_worklist_.Publish();
+ weak_container_callback_worklist_.Publish();
+ parallel_weak_callback_worklist_.Publish();
+ weak_custom_callback_worklist_.Publish();
+ write_barrier_worklist_.Publish();
+ concurrent_marking_bailout_worklist_.Publish();
+ discovered_ephemeron_pairs_worklist_.Publish();
+ ephemeron_pairs_for_processing_worklist_.Publish();
+ if (movable_slots_worklist_) movable_slots_worklist_->Publish();
+}
+
void MutatorMarkingState::FlushNotFullyConstructedObjects() {
std::unordered_set<HeapObjectHeader*> objects =
not_fully_constructed_worklist_.Extract<AccessMode::kAtomic>();
@@ -31,5 +73,10 @@ void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
}
}
+void MutatorMarkingState::Publish() {
+ BasicMarkingState::Publish();
+ retrace_marked_objects_worklist_.Publish();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index ca3656a8d1..4ce1ce4074 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -24,6 +24,7 @@ namespace internal {
class MarkingStateBase {
public:
inline MarkingStateBase(HeapBase&, MarkingWorklists&);
+ virtual ~MarkingStateBase() = default;
MarkingStateBase(const MarkingStateBase&) = delete;
MarkingStateBase& operator=(const MarkingStateBase&) = delete;
@@ -33,7 +34,7 @@ class MarkingStateBase {
inline void PushMarked(HeapObjectHeader&, TraceDescriptor desc);
- void Publish() { marking_worklist_.Publish(); }
+ V8_EXPORT_PRIVATE virtual void Publish();
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_;
@@ -107,15 +108,16 @@ void MarkingStateBase::PushMarked(HeapObjectHeader& header,
class BasicMarkingState : public MarkingStateBase {
public:
- inline BasicMarkingState(HeapBase& heap, MarkingWorklists&,
- CompactionWorklists*);
+ BasicMarkingState(HeapBase& heap, MarkingWorklists&, CompactionWorklists*);
+ ~BasicMarkingState() override = default;
BasicMarkingState(const BasicMarkingState&) = delete;
BasicMarkingState& operator=(const BasicMarkingState&) = delete;
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
- inline void RegisterWeakCallback(WeakCallback, const void*);
+ inline void RegisterWeakContainerCallback(WeakCallback, const void*);
+ inline void RegisterWeakCustomCallback(WeakCallback, const void*);
void RegisterMovableReference(const void** slot) {
if (!movable_slots_worklist_) return;
@@ -136,29 +138,24 @@ class BasicMarkingState : public MarkingStateBase {
inline void AccountMarkedBytes(size_t);
size_t marked_bytes() const { return marked_bytes_; }
- void Publish() {
- MarkingStateBase::Publish();
- previously_not_fully_constructed_worklist_.Publish();
- weak_callback_worklist_.Publish();
- parallel_weak_callback_worklist_.Publish();
- write_barrier_worklist_.Publish();
- concurrent_marking_bailout_worklist_.Publish();
- discovered_ephemeron_pairs_worklist_.Publish();
- ephemeron_pairs_for_processing_worklist_.Publish();
- if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
- }
+ V8_EXPORT_PRIVATE void Publish() override;
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
}
- MarkingWorklists::WeakCallbackWorklist::Local& weak_callback_worklist() {
- return weak_callback_worklist_;
+ MarkingWorklists::WeakCallbackWorklist::Local&
+ weak_container_callback_worklist() {
+ return weak_container_callback_worklist_;
}
MarkingWorklists::WeakCallbackWorklist::Local&
parallel_weak_callback_worklist() {
return parallel_weak_callback_worklist_;
}
+ MarkingWorklists::WeakCustomCallbackWorklist::Local&
+ weak_custom_callback_worklist() {
+ return weak_custom_callback_worklist_;
+ }
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_;
}
@@ -177,10 +174,6 @@ class BasicMarkingState : public MarkingStateBase {
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist() {
return weak_containers_worklist_;
}
- MarkingWorklists::RetraceMarkedObjectsWorklist::Local&
- retrace_marked_objects_worklist() {
- return retrace_marked_objects_worklist_;
- }
CompactionWorklists::MovableReferencesWorklist::Local*
movable_slots_worklist() {
@@ -200,15 +193,14 @@ class BasicMarkingState : public MarkingStateBase {
protected:
inline void RegisterWeakContainer(HeapObjectHeader&);
- inline bool IsCompactionEnabled() const {
- return movable_slots_worklist_.get();
- }
-
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
- MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
+ MarkingWorklists::WeakCallbackWorklist::Local
+ weak_container_callback_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local
parallel_weak_callback_worklist_;
+ MarkingWorklists::WeakCustomCallbackWorklist::Local
+ weak_custom_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local
concurrent_marking_bailout_worklist_;
@@ -217,8 +209,6 @@ class BasicMarkingState : public MarkingStateBase {
MarkingWorklists::EphemeronPairsWorklist::Local
ephemeron_pairs_for_processing_worklist_;
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist_;
- MarkingWorklists::RetraceMarkedObjectsWorklist::Local
- retrace_marked_objects_worklist_;
// Existence of the worklist (|movable_slot_worklist_| != nullptr) denotes
// that compaction is currently enabled and slots must be recorded.
std::unique_ptr<CompactionWorklists::MovableReferencesWorklist::Local>
@@ -230,32 +220,6 @@ class BasicMarkingState : public MarkingStateBase {
bool in_atomic_pause_ = false;
};
-BasicMarkingState::BasicMarkingState(HeapBase& heap,
- MarkingWorklists& marking_worklists,
- CompactionWorklists* compaction_worklists)
- : MarkingStateBase(heap, marking_worklists),
- previously_not_fully_constructed_worklist_(
- *marking_worklists.previously_not_fully_constructed_worklist()),
- weak_callback_worklist_(*marking_worklists.weak_callback_worklist()),
- parallel_weak_callback_worklist_(
- *marking_worklists.parallel_weak_callback_worklist()),
- write_barrier_worklist_(*marking_worklists.write_barrier_worklist()),
- concurrent_marking_bailout_worklist_(
- *marking_worklists.concurrent_marking_bailout_worklist()),
- discovered_ephemeron_pairs_worklist_(
- *marking_worklists.discovered_ephemeron_pairs_worklist()),
- ephemeron_pairs_for_processing_worklist_(
- *marking_worklists.ephemeron_pairs_for_processing_worklist()),
- weak_containers_worklist_(*marking_worklists.weak_containers_worklist()),
- retrace_marked_objects_worklist_(
- *marking_worklists.retrace_marked_objects_worklist()) {
- if (compaction_worklists) {
- movable_slots_worklist_ =
- std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
- *compaction_worklists->movable_slots_worklist());
- }
-}
-
void BasicMarkingState::RegisterWeakReferenceIfNeeded(
const void* object, TraceDescriptor desc, WeakCallback weak_callback,
const void* parameter) {
@@ -270,10 +234,16 @@ void BasicMarkingState::RegisterWeakReferenceIfNeeded(
parallel_weak_callback_worklist_.Push({weak_callback, parameter});
}
-void BasicMarkingState::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void BasicMarkingState::RegisterWeakContainerCallback(WeakCallback callback,
+ const void* object) {
DCHECK_NOT_NULL(callback);
- weak_callback_worklist_.Push({callback, object});
+ weak_container_callback_worklist_.Push({callback, object});
+}
+
+void BasicMarkingState::RegisterWeakCustomCallback(WeakCallback callback,
+ const void* object) {
+ DCHECK_NOT_NULL(callback);
+ weak_custom_callback_worklist_.Push({callback, object});
}
void BasicMarkingState::RegisterWeakContainer(HeapObjectHeader& header) {
@@ -301,7 +271,7 @@ void BasicMarkingState::ProcessWeakContainer(const void* object,
if (!MarkNoPush(header)) return;
// Register final weak processing of the backing store.
- RegisterWeakCallback(callback, data);
+ RegisterWeakContainerCallback(callback, data);
// Weak containers might not require tracing. In such cases the callback in
// the TraceDescriptor will be nullptr. For ephemerons the callback will be
@@ -362,11 +332,14 @@ void BasicMarkingState::AccountMarkedBytes(size_t marked_bytes) {
marked_bytes_ += marked_bytes;
}
-class MutatorMarkingState : public BasicMarkingState {
+class MutatorMarkingState final : public BasicMarkingState {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- : BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
+ : BasicMarkingState(heap, marking_worklists, compaction_worklists),
+ retrace_marked_objects_worklist_(
+ *marking_worklists.retrace_marked_objects_worklist()) {}
+ ~MutatorMarkingState() override = default;
inline bool MarkNoPush(HeapObjectHeader& header) {
return MutatorMarkingState::BasicMarkingState::MarkNoPush(header);
@@ -389,6 +362,13 @@ class MutatorMarkingState : public BasicMarkingState {
inline bool IsMarkedWeakContainer(HeapObjectHeader&);
+ MarkingWorklists::RetraceMarkedObjectsWorklist::Local&
+ retrace_marked_objects_worklist() {
+ return retrace_marked_objects_worklist_;
+ }
+
+ V8_EXPORT_PRIVATE void Publish() override;
+
private:
// Weak containers are strongly retraced during conservative stack scanning.
// Stack scanning happens once per GC at the start of the atomic pause.
@@ -398,13 +378,16 @@ class MutatorMarkingState : public BasicMarkingState {
static constexpr size_t kMaxCacheSize = 8;
public:
- inline bool Contains(const HeapObjectHeader*);
+ inline bool Contains(const HeapObjectHeader*) const;
inline void Insert(const HeapObjectHeader*);
private:
std::vector<const HeapObjectHeader*> recently_retraced_cache_;
size_t last_used_index_ = -1;
} recently_retraced_weak_containers_;
+
+ MarkingWorklists::RetraceMarkedObjectsWorklist::Local
+ retrace_marked_objects_worklist_;
};
void MutatorMarkingState::ReTraceMarkedWeakContainer(cppgc::Visitor& visitor,
@@ -450,7 +433,7 @@ bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
}
bool MutatorMarkingState::RecentlyRetracedWeakContainers::Contains(
- const HeapObjectHeader* header) {
+ const HeapObjectHeader* header) const {
return std::find(recently_retraced_cache_.begin(),
recently_retraced_cache_.end(),
header) != recently_retraced_cache_.end();
@@ -465,13 +448,15 @@ void MutatorMarkingState::RecentlyRetracedWeakContainers::Insert(
recently_retraced_cache_[last_used_index_] = header;
}
-class ConcurrentMarkingState : public BasicMarkingState {
+class ConcurrentMarkingState final : public BasicMarkingState {
public:
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
: BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
- ~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
+ ~ConcurrentMarkingState() override {
+ DCHECK_EQ(last_marked_bytes_, marked_bytes_);
+ }
size_t RecentlyMarkedBytes() {
return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 0b649c7d3f..fbd7de335f 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -45,8 +45,7 @@ MarkingVerifierBase::MarkingVerifierBase(
collection_type_(collection_type) {}
void MarkingVerifierBase::Run(
- StackState stack_state, uintptr_t stack_end,
- v8::base::Optional<size_t> expected_marked_bytes) {
+ StackState stack_state, v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
// Avoid verifying the stack when running with TSAN as the TSAN runtime changes
// stack contents when e.g. working with locks. Specifically, the marker uses
@@ -63,8 +62,7 @@ void MarkingVerifierBase::Run(
#if !defined(THREAD_SANITIZER) && !defined(CPPGC_POINTER_COMPRESSION)
if (stack_state == StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
- heap_.stack()->IteratePointersUnsafe(
- this, reinterpret_cast<const void*>(stack_end));
+ heap_.stack()->IteratePointersUntilMarker(this);
// The objects found through the unsafe iteration are only a subset of the
// regular iteration as they miss objects held alive only from callee-saved
// registers that are never pushed on the stack and SafeStack.
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index c966aea51f..5132b3af9f 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -41,7 +41,7 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(StackState, uintptr_t, v8::base::Optional<size_t>);
+ void Run(StackState, v8::base::Optional<size_t>);
protected:
MarkingVerifierBase(HeapBase&, CollectionType, VerificationState&,
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index e479f7f6b4..544b6f8100 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -41,7 +41,7 @@ void MarkingVisitorBase::VisitWeakContainer(const void* object,
void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
const void* object) {
- marking_state_.RegisterWeakCallback(callback, object);
+ marking_state_.RegisterWeakCustomCallback(callback, object);
}
void MarkingVisitorBase::HandleMovableReference(const void** slot) {
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.cc b/deps/v8/src/heap/cppgc/marking-worklists.cc
index 8307f0ed0a..2f9afeddc9 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.cc
+++ b/deps/v8/src/heap/cppgc/marking-worklists.cc
@@ -15,8 +15,9 @@ void MarkingWorklists::ClearForTesting() {
not_fully_constructed_worklist_.Clear();
previously_not_fully_constructed_worklist_.Clear();
write_barrier_worklist_.Clear();
- weak_callback_worklist_.Clear();
+ weak_container_callback_worklist_.Clear();
parallel_weak_callback_worklist_.Clear();
+ weak_custom_callback_worklist_.Clear();
concurrent_marking_bailout_worklist_.Clear();
discovered_ephemeron_pairs_worklist_.Clear();
ephemeron_pairs_for_processing_worklist_.Clear();
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index f2cde89c4c..7ec2e8daf2 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -76,6 +76,8 @@ class MarkingWorklists {
heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
+ using WeakCustomCallbackWorklist =
+ heap::base::Worklist<WeakCallbackItem, 16 /* local entries */>;
using WriteBarrierWorklist =
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
using ConcurrentMarkingBailoutWorklist =
@@ -98,12 +100,15 @@ class MarkingWorklists {
WriteBarrierWorklist* write_barrier_worklist() {
return &write_barrier_worklist_;
}
- WeakCallbackWorklist* weak_callback_worklist() {
- return &weak_callback_worklist_;
+ WeakCallbackWorklist* weak_container_callback_worklist() {
+ return &weak_container_callback_worklist_;
}
WeakCallbackWorklist* parallel_weak_callback_worklist() {
return &parallel_weak_callback_worklist_;
}
+ WeakCustomCallbackWorklist* weak_custom_callback_worklist() {
+ return &weak_custom_callback_worklist_;
+ }
ConcurrentMarkingBailoutWorklist* concurrent_marking_bailout_worklist() {
return &concurrent_marking_bailout_worklist_;
}
@@ -128,9 +133,12 @@ class MarkingWorklists {
PreviouslyNotFullyConstructedWorklist
previously_not_fully_constructed_worklist_;
WriteBarrierWorklist write_barrier_worklist_;
- // Hold weak callbacks which can only invoke on main thread.
- WeakCallbackWorklist weak_callback_worklist_;
- // Hold weak callbacks which can invoke on main or worker thread.
+ // Hold weak callbacks for weak containers (e.g. containers with WeakMembers).
+ WeakCallbackWorklist weak_container_callback_worklist_;
+ // Hold weak custom callbacks (e.g. for containers with UntracedMembers).
+ WeakCustomCallbackWorklist weak_custom_callback_worklist_;
+ // Hold weak callbacks which can invoke on main or worker thread (used for
+ // regular WeakMember).
WeakCallbackWorklist parallel_weak_callback_worklist_;
ConcurrentMarkingBailoutWorklist concurrent_marking_bailout_worklist_;
EphemeronPairsWorklist discovered_ephemeron_pairs_worklist_;
diff --git a/deps/v8/src/heap/cppgc/member-storage.cc b/deps/v8/src/heap/cppgc/member-storage.cc
index c457c60ba4..b315ecfda2 100644
--- a/deps/v8/src/heap/cppgc/member-storage.cc
+++ b/deps/v8/src/heap/cppgc/member-storage.cc
@@ -13,7 +13,8 @@ namespace cppgc {
namespace internal {
#if defined(CPPGC_POINTER_COMPRESSION)
-uintptr_t CageBaseGlobal::g_base_ = CageBaseGlobal::kLowerHalfWordMask;
+alignas(api_constants::kCachelineSize) CageBaseGlobal::Base
+ CageBaseGlobal::g_base_ = {CageBaseGlobal::kLowerHalfWordMask};
#endif // defined(CPPGC_POINTER_COMPRESSION)
// Debugging helpers.
@@ -21,20 +22,21 @@ uintptr_t CageBaseGlobal::g_base_ = CageBaseGlobal::kLowerHalfWordMask;
#if defined(CPPGC_POINTER_COMPRESSION)
extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
_cppgc_internal_Decompress_Compressed_Pointer(uint32_t cmprsd) {
- return MemberStorage::Decompress(cmprsd);
+ return CompressedPointer::Decompress(cmprsd);
}
#endif // !defined(CPPGC_POINTER_COMPRESSION)
class MemberDebugHelper final {
public:
- static void* PrintUncompressed(MemberBase* m) {
+ static void* Uncompress(MemberBase<DefaultMemberStorage>* m) {
return const_cast<void*>(m->GetRaw());
}
};
extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
-_cppgc_internal_Print_Member(MemberBase* m) {
- return MemberDebugHelper::PrintUncompressed(m);
+_cppgc_internal_Uncompress_Member(void* m) {
+ return MemberDebugHelper::Uncompress(
+ static_cast<MemberBase<DefaultMemberStorage>*>(m));
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/member-storage.h b/deps/v8/src/heap/cppgc/member-storage.h
index 829bea28d5..168c79caf1 100644
--- a/deps/v8/src/heap/cppgc/member-storage.h
+++ b/deps/v8/src/heap/cppgc/member-storage.h
@@ -17,12 +17,13 @@ class CageBaseGlobalUpdater final {
static void UpdateCageBase(uintptr_t cage_base) {
CPPGC_DCHECK(CageBaseGlobal::IsBaseConsistent());
CPPGC_DCHECK(0u == (cage_base & CageBaseGlobal::kLowerHalfWordMask));
- CageBaseGlobal::g_base_ = cage_base | CageBaseGlobal::kLowerHalfWordMask;
+ CageBaseGlobal::g_base_.base =
+ cage_base | CageBaseGlobal::kLowerHalfWordMask;
}
static uintptr_t GetCageBase() {
CPPGC_DCHECK(CageBaseGlobal::IsBaseConsistent());
- return CageBaseGlobal::g_base_ & ~CageBaseGlobal::kLowerHalfWordMask;
+ return CageBaseGlobal::g_base_.base & ~CageBaseGlobal::kLowerHalfWordMask;
}
};
#endif // defined(CPPGC_POINTER_COMPRESSION)
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index b88ba5c200..f0a394a72d 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -113,22 +113,23 @@ ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
oom_handler_(oom_handler),
garbage_collector_(garbage_collector) {}
-void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
- AlignVal alignment,
- GCInfoIndex gcinfo) {
- void* memory = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
+void ObjectAllocator::OutOfLineAllocateGCSafePoint(NormalPageSpace& space,
+ size_t size,
+ AlignVal alignment,
+ GCInfoIndex gcinfo,
+ void** object) {
+ *object = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
stats_collector_.NotifySafePointForConservativeCollection();
if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
// Objects allocated during pre finalizers should be allocated as black
// since marking is already done. Atomics are not needed because there is
// no concurrent marking in the background.
- HeapObjectHeader::FromObject(memory).MarkNonAtomic();
+ HeapObjectHeader::FromObject(*object).MarkNonAtomic();
// Resetting the allocation buffer forces all further allocations in pre
// finalizers to go through this slow path.
ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
}
- return memory;
}
void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
@@ -283,6 +284,26 @@ void ObjectAllocator::ResetLinearAllocationBuffers() {
visitor.Traverse(raw_heap_);
}
+void ObjectAllocator::MarkAllPagesAsYoung() {
+ class YoungMarker : public HeapVisitor<YoungMarker> {
+ public:
+ bool VisitNormalPage(NormalPage& page) {
+ MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
+ return true;
+ }
+
+ bool VisitLargePage(LargePage& page) {
+ MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
+ return true;
+ }
+ } visitor;
+ USE(visitor);
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ visitor.Traverse(raw_heap_);
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
bool ObjectAllocator::in_disallow_gc_scope() const {
return raw_heap_.heap()->in_disallow_gc_scope();
}
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index 77f26ce3b5..82d1441af1 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -52,6 +52,7 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
GCInfoIndex gcinfo, CustomSpaceIndex space_index);
void ResetLinearAllocationBuffers();
+ void MarkAllPagesAsYoung();
private:
bool in_disallow_gc_scope() const;
@@ -61,11 +62,21 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
- inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
- GCInfoIndex gcinfo);
- inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
- AlignVal alignment, GCInfoIndex gcinfo);
- void* OutOfLineAllocate(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
+ inline void* AllocateObjectOnSpace(NormalPageSpace&, size_t, GCInfoIndex);
+ inline void* AllocateObjectOnSpace(NormalPageSpace&, size_t, AlignVal,
+ GCInfoIndex);
+ inline void* OutOfLineAllocate(NormalPageSpace&, size_t, AlignVal,
+ GCInfoIndex);
+
+ // Called from the fast path LAB allocation when the LAB capacity cannot fit
+ // the allocation or a large object is requested. Use out parameter as
+ // `V8_PRESERVE_MOST` cannot handle non-void return values.
+ //
+ // Prefer using `OutOfLineAllocate()`.
+ void V8_PRESERVE_MOST OutOfLineAllocateGCSafePoint(NormalPageSpace&, size_t,
+ AlignVal, GCInfoIndex,
+ void**);
+ // Raw allocation, does not emit safepoint for conservative GC.
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
bool TryRefillLinearAllocationBuffer(NormalPageSpace&, size_t);
@@ -135,6 +146,14 @@ RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
return RawHeap::RegularSpaceType::kNormal4;
}
+void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
+ AlignVal alignment,
+ GCInfoIndex gcinfo) {
+ void* object;
+ OutOfLineAllocateGCSafePoint(space, size, alignment, gcinfo, &object);
+ return object;
+}
+
void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
size_t size, AlignVal alignment,
GCInfoIndex gcinfo) {
@@ -174,13 +193,13 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
.SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(&filler));
lab_allocation_will_succeed = true;
}
- if (lab_allocation_will_succeed) {
- void* object = AllocateObjectOnSpace(space, size, gcinfo);
- DCHECK_NOT_NULL(object);
- DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(object) & kAlignmentMask);
- return object;
+ if (V8_UNLIKELY(!lab_allocation_will_succeed)) {
+ return OutOfLineAllocate(space, size, alignment, gcinfo);
}
- return OutOfLineAllocate(space, size, alignment, gcinfo);
+ void* object = AllocateObjectOnSpace(space, size, gcinfo);
+ DCHECK_NOT_NULL(object);
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(object) & kAlignmentMask);
+ return object;
}
void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
@@ -189,7 +208,7 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
NormalPageSpace::LinearAllocationBuffer& current_lab =
space.linear_allocation_buffer();
- if (current_lab.size() < size) {
+ if (V8_UNLIKELY(current_lab.size() < size)) {
return OutOfLineAllocate(
space, size, static_cast<AlignVal>(kAllocationGranularity), gcinfo);
}
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
index dff8b6eae3..cf45d29190 100644
--- a/deps/v8/src/heap/cppgc/object-start-bitmap.h
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -28,7 +28,8 @@ namespace internal {
// - kAllocationGranularity
//
// ObjectStartBitmap supports concurrent reads from multiple threads but
-// only a single mutator thread can write to it.
+// only a single mutator thread can write to it. ObjectStartBitmap relies on
+// being allocated inside the same normal page.
class V8_EXPORT_PRIVATE ObjectStartBitmap {
public:
// Granularity of addresses added to the bitmap.
@@ -39,7 +40,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
return kReservedForBitmap * kBitsPerCell;
}
- explicit inline ObjectStartBitmap(Address offset);
+ inline ObjectStartBitmap();
// Finds an object header based on a
// address_maybe_pointing_to_the_middle_of_object. Will search for an object
@@ -87,7 +88,6 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
- const Address offset_;
// `fully_populated_` is used to denote that the bitmap is populated with all
// currently allocated objects on the page and is in a consistent state. It is
// used to guard against using the bitmap for finding headers during
@@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
};
-ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
+ObjectStartBitmap::ObjectStartBitmap() {
Clear();
MarkAsFullyPopulated();
}
@@ -113,9 +113,13 @@ template <AccessMode mode>
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
DCHECK(fully_populated_);
- DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
- size_t object_offset =
- address_maybe_pointing_to_the_middle_of_object - offset_;
+ const size_t page_base = reinterpret_cast<uintptr_t>(
+ address_maybe_pointing_to_the_middle_of_object) &
+ kPageBaseMask;
+ DCHECK_EQ(page_base, reinterpret_cast<uintptr_t>(this) & kPageBaseMask);
+ size_t object_offset = reinterpret_cast<uintptr_t>(
+ address_maybe_pointing_to_the_middle_of_object) &
+ kPageOffsetMask;
size_t object_start_number = object_offset / kAllocationGranularity;
size_t cell_index = object_start_number / kBitsPerCell;
DCHECK_GT(object_start_bit_map_.size(), cell_index);
@@ -129,7 +133,7 @@ HeapObjectHeader* ObjectStartBitmap::FindHeader(
object_start_number =
(cell_index * kBitsPerCell) + (kBitsPerCell - 1) - leading_zeroes;
object_offset = object_start_number * kAllocationGranularity;
- return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
+ return reinterpret_cast<HeapObjectHeader*>(page_base + object_offset);
}
template <AccessMode mode>
@@ -178,7 +182,8 @@ uint8_t ObjectStartBitmap::load(size_t cell_index) const {
void ObjectStartBitmap::ObjectStartIndexAndBit(ConstAddress header_address,
size_t* cell_index,
size_t* bit) const {
- const size_t object_offset = header_address - offset_;
+ const size_t object_offset =
+ reinterpret_cast<size_t>(header_address) & kPageOffsetMask;
DCHECK(!(object_offset & kAllocationMask));
const size_t object_start_number = object_offset / kAllocationGranularity;
*cell_index = object_start_number / kBitsPerCell;
@@ -188,6 +193,8 @@ void ObjectStartBitmap::ObjectStartIndexAndBit(ConstAddress header_address,
template <typename Callback>
inline void ObjectStartBitmap::Iterate(Callback callback) const {
+ const Address page_base = reinterpret_cast<Address>(
+ reinterpret_cast<uintptr_t>(this) & kPageBaseMask);
for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
if (!object_start_bit_map_[cell_index]) continue;
@@ -197,7 +204,7 @@ inline void ObjectStartBitmap::Iterate(Callback callback) const {
const size_t object_start_number =
(cell_index * kBitsPerCell) + trailing_zeroes;
const Address object_address =
- offset_ + (kAllocationGranularity * object_start_number);
+ page_base + (kAllocationGranularity * object_start_number);
callback(object_address);
// Clear current object bit in temporary value to advance iteration.
value &= ~(1 << (object_start_number & kCellMask));
@@ -220,8 +227,6 @@ void ObjectStartBitmap::Clear() {
class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
: public ObjectStartBitmap {
public:
- explicit inline PlatformAwareObjectStartBitmap(Address offset);
-
template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
template <AccessMode = AccessMode::kNonAtomic>
@@ -232,9 +237,6 @@ class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
static bool ShouldForceNonAtomic();
};
-PlatformAwareObjectStartBitmap::PlatformAwareObjectStartBitmap(Address offset)
- : ObjectStartBitmap(offset) {}
-
// static
template <AccessMode mode>
bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 9087b14d21..0cbde5ede3 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -68,7 +68,7 @@ void SameThreadEnabledCheckingPolicyBase::CheckPointerImpl(
// in progress.
header = &base_page->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(ptr);
DCHECK_LE(header->ObjectStart(), ptr);
- DCHECK_GT(header->ObjectEnd(), ptr);
+ DCHECK_GT(header->ObjectEnd<AccessMode::kAtomic>(), ptr);
}
if (header) {
DCHECK(!header->IsFree());
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index f65309b6f4..ccad82c81d 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -107,10 +107,18 @@ StatsCollector::Event::Event() {
epoch = epoch_counter.fetch_add(1);
}
+void StatsCollector::NotifyUnmarkingStarted(CollectionType collection_type) {
+ DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ DCHECK_EQ(CollectionType::kMajor, collection_type);
+ gc_state_ = GarbageCollectionState::kUnmarking;
+}
+
void StatsCollector::NotifyMarkingStarted(CollectionType collection_type,
MarkingType marking_type,
IsForcedGC is_forced_gc) {
- DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ DCHECK_IMPLIES(gc_state_ != GarbageCollectionState::kNotRunning,
+ (gc_state_ == GarbageCollectionState::kUnmarking &&
+ collection_type == CollectionType::kMajor));
current_.collection_type = collection_type;
current_.is_forced_gc = is_forced_gc;
current_.marking_type = marking_type;
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index ff040a3dcc..2cf728489d 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -53,6 +53,8 @@ namespace internal {
V(MarkVisitCrossThreadPersistents) \
V(MarkVisitStack) \
V(MarkVisitRememberedSets) \
+ V(WeakContainerCallbacksProcessing) \
+ V(CustomCallbacksProcessing) \
V(SweepFinishIfOutOfWork) \
V(SweepInvokePreFinalizers) \
V(SweepInTask) \
@@ -274,7 +276,11 @@ class V8_EXPORT_PRIVATE StatsCollector final {
void NotifySafePointForTesting();
- // Indicates a new garbage collection cycle.
+ // Indicates a new garbage collection cycle. The phase is optional and is only
+ // used for major GC when generational GC is enabled.
+ void NotifyUnmarkingStarted(CollectionType);
+ // Indicates a new minor garbage collection cycle or a major, if generational
+ // GC is not enabled.
void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC);
// Indicates that marking of the current garbage collection cycle is
// completed.
@@ -323,6 +329,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
private:
enum class GarbageCollectionState : uint8_t {
kNotRunning,
+ kUnmarking,
kMarking,
kSweeping
};
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 3cb96f8baa..953bb0aeab 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -413,9 +413,17 @@ class SweepFinalizer final {
using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
+ enum class EmptyPageHandling {
+ kDestroy,
+ kReturn,
+ };
+
SweepFinalizer(cppgc::Platform* platform,
- FreeMemoryHandling free_memory_handling)
- : platform_(platform), free_memory_handling_(free_memory_handling) {}
+ FreeMemoryHandling free_memory_handling,
+ EmptyPageHandling empty_page_handling_type)
+ : platform_(platform),
+ free_memory_handling_(free_memory_handling),
+ empty_page_handling_(empty_page_handling_type) {}
void FinalizeHeap(SpaceStates* space_states) {
for (SpaceState& space_state : *space_states) {
@@ -471,8 +479,22 @@ class SweepFinalizer final {
// Unmap page if empty.
if (page_state->is_empty) {
- BasePage::Destroy(page);
- return;
+ if (empty_page_handling_ == EmptyPageHandling::kDestroy ||
+ page->is_large()) {
+ BasePage::Destroy(page);
+ return;
+ }
+
+ // Otherwise, we currently sweep on allocation. Reinitialize the empty
+ // page and return it right away.
+ auto* normal_page = NormalPage::From(page);
+
+ page_state->cached_free_list.Clear();
+ page_state->cached_free_list.Add(
+ {normal_page->PayloadStart(), normal_page->PayloadSize()});
+
+ page_state->unfinalized_free_list.clear();
+ page_state->largest_new_free_list_entry = normal_page->PayloadSize();
}
DCHECK(!page->is_large());
@@ -482,13 +504,15 @@ class SweepFinalizer final {
space_freelist.Append(std::move(page_state->cached_free_list));
// Merge freelist with finalizers.
- std::unique_ptr<FreeHandlerBase> handler =
- (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
- ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
- *platform_->GetPageAllocator(), space_freelist, *page))
- : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
- *platform_->GetPageAllocator(), space_freelist, *page));
- handler->FreeFreeList(page_state->unfinalized_free_list);
+ if (!page_state->unfinalized_free_list.empty()) {
+ std::unique_ptr<FreeHandlerBase> handler =
+ (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
+ ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
+ *platform_->GetPageAllocator(), space_freelist, *page))
+ : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
+ *platform_->GetPageAllocator(), space_freelist, *page));
+ handler->FreeFreeList(page_state->unfinalized_free_list);
+ }
largest_new_free_list_entry_ = std::max(
page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
@@ -509,6 +533,7 @@ class SweepFinalizer final {
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
const FreeMemoryHandling free_memory_handling_;
+ const EmptyPageHandling empty_page_handling_;
};
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
@@ -544,7 +569,8 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
const auto deadline = v8::base::TimeTicks::Now() + max_duration;
// First, prioritize finalization of pages that were swept concurrently.
- SweepFinalizer finalizer(platform_, free_memory_handling_);
+ SweepFinalizer finalizer(platform_, free_memory_handling_,
+ SweepFinalizer::EmptyPageHandling::kDestroy);
if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline)) {
return false;
}
@@ -831,7 +857,8 @@ class Sweeper::SweeperImpl final {
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
- SweepFinalizer finalizer(platform_, config_.free_memory_handling);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling,
+ SweepFinalizer::EmptyPageHandling::kReturn);
while (auto page = space_state.swept_unfinalized_pages.Pop()) {
finalizer.FinalizePage(&*page);
if (size <= finalizer.largest_new_free_list_entry()) {
@@ -924,7 +951,8 @@ class Sweeper::SweeperImpl final {
MutatorThreadSweepingScope sweeping_in_progress(*this);
// First, call finalizers on the mutator thread.
- SweepFinalizer finalizer(platform_, config_.free_memory_handling);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling,
+ SweepFinalizer::EmptyPageHandling::kDestroy);
finalizer.FinalizeHeap(&space_states_);
// Then, help out the concurrent thread.
@@ -1108,7 +1136,8 @@ class Sweeper::SweeperImpl final {
void SynchronizeAndFinalizeConcurrentSweeping() {
CancelSweepers();
- SweepFinalizer finalizer(platform_, config_.free_memory_handling);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling,
+ SweepFinalizer::EmptyPageHandling::kDestroy);
finalizer.FinalizeHeap(&space_states_);
}
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 5cbec656a9..a2f1eb4ab4 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -5,6 +5,7 @@
#include "src/heap/cppgc/write-barrier.h"
#include "include/cppgc/heap-consistency.h"
+#include "include/cppgc/internal/member-storage.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -222,5 +223,53 @@ bool YoungGenerationEnabler::IsEnabled() {
#endif // defined(CPPGC_YOUNG_GENERATION)
+#ifdef CPPGC_SLIM_WRITE_BARRIER
+
+// static
+template <WriteBarrierSlotType SlotType>
+void WriteBarrier::CombinedWriteBarrierSlow(const void* slot) {
+ DCHECK_NOT_NULL(slot);
+
+ const void* value = nullptr;
+#if defined(CPPGC_POINTER_COMPRESSION)
+ if constexpr (SlotType == WriteBarrierSlotType::kCompressed) {
+ value = CompressedPointer::Decompress(
+ *static_cast<const CompressedPointer::IntegralType*>(slot));
+ } else {
+ value = *reinterpret_cast<const void* const*>(slot);
+ }
+#else
+ static_assert(SlotType == WriteBarrierSlotType::kUncompressed);
+ value = *reinterpret_cast<const void* const*>(slot);
+#endif
+
+ WriteBarrier::Params params;
+ const WriteBarrier::Type type =
+ WriteBarrier::GetWriteBarrierType(slot, value, params);
+ switch (type) {
+ case WriteBarrier::Type::kGenerational:
+ WriteBarrier::GenerationalBarrier<
+ WriteBarrier::GenerationalBarrierType::kPreciseSlot>(params, slot);
+ break;
+ case WriteBarrier::Type::kMarking:
+ WriteBarrier::DijkstraMarkingBarrier(params, value);
+ break;
+ case WriteBarrier::Type::kNone:
+ // The fast checks are approximate and may trigger spuriously if any heap
+ // has marking in progress. `GetWriteBarrierType()` above is exact which
+ // is the reason we could still observe a bailout here.
+ break;
+ }
+}
+
+template V8_EXPORT_PRIVATE void WriteBarrier::CombinedWriteBarrierSlow<
+ WriteBarrierSlotType::kUncompressed>(const void* slot);
+#if defined(CPPGC_POINTER_COMPRESSION)
+template V8_EXPORT_PRIVATE void WriteBarrier::CombinedWriteBarrierSlow<
+ WriteBarrierSlotType::kCompressed>(const void* slot);
+#endif // defined(CPPGC_POINTER_COMPRESSION)
+
+#endif // CPPGC_SLIM_WRITE_BARRIER
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/embedder-tracing-inl.h b/deps/v8/src/heap/embedder-tracing-inl.h
deleted file mode 100644
index 9a1c201f41..0000000000
--- a/deps/v8/src/heap/embedder-tracing-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_HEAP_EMBEDDER_TRACING_INL_H_
-#define V8_HEAP_EMBEDDER_TRACING_INL_H_
-
-#include "src/heap/embedder-tracing.h"
-#include "src/objects/embedder-data-slot.h"
-#include "src/objects/js-objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-bool LocalEmbedderHeapTracer::ExtractWrappableInfo(
- Isolate* isolate, JSObject js_object,
- const WrapperDescriptor& wrapper_descriptor, WrapperInfo* info) {
- DCHECK(js_object.MayHaveEmbedderFields());
- if (js_object.GetEmbedderFieldCount() < 2) return false;
-
- return ExtractWrappableInfo(
- isolate, wrapper_descriptor,
- EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index),
- EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index),
- info);
-}
-
-// static
-bool LocalEmbedderHeapTracer::ExtractWrappableInfo(
- Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
- const EmbedderDataSlot& type_slot, const EmbedderDataSlot& instance_slot,
- WrapperInfo* info) {
- if (type_slot.ToAlignedPointer(isolate, &info->first) && info->first &&
- instance_slot.ToAlignedPointer(isolate, &info->second) && info->second) {
- return (wrapper_descriptor.embedder_id_for_garbage_collected ==
- WrapperDescriptor::kUnknownEmbedderId) ||
- (*static_cast<uint16_t*>(info->first) ==
- wrapper_descriptor.embedder_id_for_garbage_collected);
- }
- return false;
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_EMBEDDER_TRACING_INL_H_
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
deleted file mode 100644
index ceac516f9c..0000000000
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/embedder-tracing.h"
-
-#include "include/v8-cppgc.h"
-#include "src/base/logging.h"
-#include "src/handles/global-handles.h"
-#include "src/heap/embedder-tracing-inl.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/marking-worklist-inl.h"
-
-namespace v8::internal {
-
-START_ALLOW_USE_DEPRECATED()
-
-void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
- CHECK_NULL(cpp_heap_);
- if (remote_tracer_) remote_tracer_->v8_isolate_ = nullptr;
-
- remote_tracer_ = tracer;
- default_embedder_roots_handler_.SetTracer(tracer);
- if (remote_tracer_)
- remote_tracer_->v8_isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
-}
-
-void LocalEmbedderHeapTracer::SetCppHeap(CppHeap* cpp_heap) {
- CHECK_NULL(remote_tracer_);
- cpp_heap_ = cpp_heap;
-}
-
-namespace {
-CppHeap::GarbageCollectionFlags ConvertTraceFlags(
- EmbedderHeapTracer::TraceFlags flags) {
- CppHeap::GarbageCollectionFlags result;
- if (flags & EmbedderHeapTracer::TraceFlags::kForced)
- result |= CppHeap::GarbageCollectionFlagValues::kForced;
- if (flags & EmbedderHeapTracer::TraceFlags::kReduceMemory)
- result |= CppHeap::GarbageCollectionFlagValues::kReduceMemory;
- return result;
-}
-} // namespace
-
-void LocalEmbedderHeapTracer::PrepareForTrace(
- EmbedderHeapTracer::TraceFlags flags) {
- if (cpp_heap_)
- cpp_heap()->InitializeTracing(cppgc::internal::CollectionType::kMajor,
- ConvertTraceFlags(flags));
-}
-
-void LocalEmbedderHeapTracer::TracePrologue(
- EmbedderHeapTracer::TraceFlags flags) {
- if (!InUse()) return;
-
- embedder_worklist_empty_ = false;
- if (cpp_heap_)
- cpp_heap()->StartTracing();
- else
- remote_tracer_->TracePrologue(flags);
-}
-
-void LocalEmbedderHeapTracer::TraceEpilogue() {
- if (!InUse()) return;
-
- // Resetting to state unknown as there may be follow up garbage collections
- // triggered from callbacks that have a different stack state.
- embedder_stack_state_ =
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
-
- if (cpp_heap_) {
- cpp_heap()->TraceEpilogue();
- } else {
- EmbedderHeapTracer::TraceSummary summary;
- remote_tracer_->TraceEpilogue(&summary);
- UpdateRemoteStats(summary.allocated_size, summary.time);
- }
-}
-
-void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size,
- double time) {
- remote_stats_.used_size = allocated_size;
- // Force a check next time increased memory is reported. This allows for
- // setting limits close to actual heap sizes.
- remote_stats_.allocated_size_limit_for_check = 0;
- constexpr double kMinReportingTimeMs = 0.5;
- if (time > kMinReportingTimeMs) {
- isolate_->heap()->tracer()->RecordEmbedderSpeed(allocated_size, time);
- }
-}
-
-void LocalEmbedderHeapTracer::EnterFinalPause() {
- if (!InUse()) return;
-
- if (cpp_heap_)
- cpp_heap()->EnterFinalPause(embedder_stack_state_);
- else
- remote_tracer_->EnterFinalPause(embedder_stack_state_);
-}
-
-bool LocalEmbedderHeapTracer::Trace(double max_duration) {
- if (!InUse()) return true;
-
- return cpp_heap_ ? cpp_heap_->AdvanceTracing(max_duration)
- : remote_tracer_->AdvanceTracing(max_duration);
-}
-
-bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
- return !InUse() || (cpp_heap_ ? cpp_heap()->IsTracingDone()
- : remote_tracer_->IsTracingDone());
-}
-
-LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
- LocalEmbedderHeapTracer* tracer)
- : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) {
- DCHECK(!tracer_->cpp_heap_);
- wrapper_cache_.reserve(kWrapperCacheSize);
-}
-
-LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
- DCHECK(!tracer_->cpp_heap_);
- if (!wrapper_cache_.empty()) {
- tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
- }
-}
-
-LocalEmbedderHeapTracer::WrapperInfo
-LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
- JSObject js_object) {
- WrapperInfo info;
- if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor(), &info)) {
- return info;
- }
- return {nullptr, nullptr};
-}
-
-void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
- JSObject js_object) {
- DCHECK(js_object.MayHaveEmbedderFields());
- WrapperInfo info;
- if (ExtractWrappableInfo(tracer_->isolate_, js_object, wrapper_descriptor_,
- &info)) {
- wrapper_cache_.push_back(std::move(info));
- FlushWrapperCacheIfFull();
- }
-}
-
-void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
- DCHECK(!tracer_->cpp_heap_);
- if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
- tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
- wrapper_cache_.clear();
- wrapper_cache_.reserve(kWrapperCacheSize);
- }
-}
-
-void LocalEmbedderHeapTracer::ProcessingScope::AddWrapperInfoForTesting(
- WrapperInfo info) {
- wrapper_cache_.push_back(info);
- FlushWrapperCacheIfFull();
-}
-
-void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
- if (!v8_flags.global_gc_scheduling || !v8_flags.incremental_marking) return;
-
- Heap* heap = isolate_->heap();
- heap->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
- if (heap->AllocationLimitOvershotByLargeMargin()) {
- heap->FinalizeIncrementalMarkingAtomically(
- i::GarbageCollectionReason::kExternalFinalize);
- }
-}
-
-void LocalEmbedderHeapTracer::EmbedderWriteBarrier(Heap* heap,
- JSObject js_object) {
- DCHECK(InUse());
- DCHECK(js_object.MayHaveEmbedderFields());
- if (cpp_heap_) {
- DCHECK_NOT_NULL(heap->mark_compact_collector());
- const EmbedderDataSlot type_slot(js_object,
- wrapper_descriptor_.wrappable_type_index);
- const EmbedderDataSlot instance_slot(
- js_object, wrapper_descriptor_.wrappable_instance_index);
- heap->mark_compact_collector()
- ->local_marking_worklists()
- ->cpp_marking_state()
- ->MarkAndPush(type_slot, instance_slot);
- return;
- }
- LocalEmbedderHeapTracer::ProcessingScope scope(this);
- scope.TracePossibleWrapper(js_object);
-}
-
-bool DefaultEmbedderRootsHandler::IsRoot(
- const v8::TracedReference<v8::Value>& handle) {
- return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
-}
-
-void DefaultEmbedderRootsHandler::ResetRoot(
- const v8::TracedReference<v8::Value>& handle) {
- // Resetting is only called when IsRoot() returns false which
- // can only happen the EmbedderHeapTracer is set on API level.
- DCHECK(tracer_);
- tracer_->ResetHandleInNonTracingGC(handle);
-}
-
-END_ALLOW_USE_DEPRECATED()
-
-} // namespace v8::internal
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
deleted file mode 100644
index a32e5bf3a4..0000000000
--- a/deps/v8/src/heap/embedder-tracing.h
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_EMBEDDER_TRACING_H_
-#define V8_HEAP_EMBEDDER_TRACING_H_
-
-#include <atomic>
-
-#include "include/v8-cppgc.h"
-#include "include/v8-embedder-heap.h"
-#include "include/v8-traced-handle.h"
-#include "src/common/globals.h"
-#include "src/execution/isolate.h"
-#include "src/flags/flags.h"
-#include "src/heap/cppgc-js/cpp-heap.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class JSObject;
-
-START_ALLOW_USE_DEPRECATED()
-
-class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final
- : public EmbedderRootsHandler {
- public:
- bool IsRoot(const v8::TracedReference<v8::Value>& handle) final;
-
- void ResetRoot(const v8::TracedReference<v8::Value>& handle) final;
-
- void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; }
-
- private:
- EmbedderHeapTracer* tracer_ = nullptr;
-};
-
-class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
- public:
- using WrapperInfo = std::pair<void*, void*>;
- using WrapperCache = std::vector<WrapperInfo>;
-
- // WrapperInfo is passed over the API. Use VerboseWrapperInfo to access pair
- // internals in a named way. See ProcessingScope::TracePossibleJSWrapper()
- // below on how a V8 object is parsed to gather the information.
- struct VerboseWrapperInfo {
- constexpr explicit VerboseWrapperInfo(const WrapperInfo& raw_info)
- : raw_info(raw_info) {}
-
- // Information describing the type pointed to via instance().
- void* type_info() const { return raw_info.first; }
- // Direct pointer to an instance described by type_info().
- void* instance() const { return raw_info.second; }
- // Returns whether the info is empty and thus does not keep a C++ object
- // alive.
- bool is_empty() const { return !type_info() || !instance(); }
-
- const WrapperInfo& raw_info;
- };
-
- class V8_EXPORT_PRIVATE V8_NODISCARD ProcessingScope {
- public:
- explicit ProcessingScope(LocalEmbedderHeapTracer* tracer);
- ~ProcessingScope();
-
- void TracePossibleWrapper(JSObject js_object);
-
- void AddWrapperInfoForTesting(WrapperInfo info);
-
- private:
- static constexpr size_t kWrapperCacheSize = 1000;
-
- void FlushWrapperCacheIfFull();
-
- LocalEmbedderHeapTracer* const tracer_;
- const WrapperDescriptor wrapper_descriptor_;
- WrapperCache wrapper_cache_;
- };
-
- static V8_INLINE bool ExtractWrappableInfo(Isolate*, JSObject,
- const WrapperDescriptor&,
- WrapperInfo*);
- static V8_INLINE bool ExtractWrappableInfo(
- Isolate*, const WrapperDescriptor&, const EmbedderDataSlot& type_slot,
- const EmbedderDataSlot& instance_slot, WrapperInfo*);
-
- explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
-
- ~LocalEmbedderHeapTracer() {
- if (remote_tracer_) remote_tracer_->v8_isolate_ = nullptr;
- // CppHeap is not detached from Isolate here. Detaching is done explicitly
- // on Isolate/Heap/CppHeap destruction.
- }
-
- bool InUse() const { return cpp_heap_ || (remote_tracer_ != nullptr); }
- // This method doesn't take CppHeap into account.
- EmbedderHeapTracer* remote_tracer() const {
- DCHECK_NULL(cpp_heap_);
- return remote_tracer_;
- }
-
- void SetRemoteTracer(EmbedderHeapTracer* tracer);
- void SetCppHeap(CppHeap* cpp_heap);
- void PrepareForTrace(EmbedderHeapTracer::TraceFlags flags);
- void TracePrologue(EmbedderHeapTracer::TraceFlags flags);
- void TraceEpilogue();
- void EnterFinalPause();
- bool Trace(double deadline);
- bool IsRemoteTracingDone();
-
- bool ShouldFinalizeIncrementalMarking() {
- // Covers cases where no remote tracer is in use or the flags for
- // incremental marking have been disabled.
- if (!SupportsIncrementalEmbedderSteps()) return true;
-
- return IsRemoteTracingDone() && embedder_worklist_empty_;
- }
-
- bool SupportsIncrementalEmbedderSteps() const {
- if (!InUse()) return false;
-
- return cpp_heap_ ? v8_flags.cppheap_incremental_marking
- : v8_flags.incremental_marking_wrappers;
- }
-
- void SetEmbedderWorklistEmpty(bool is_empty) {
- embedder_worklist_empty_ = is_empty;
- }
-
- void IncreaseAllocatedSize(size_t bytes) {
- remote_stats_.used_size.fetch_add(bytes, std::memory_order_relaxed);
- remote_stats_.allocated_size += bytes;
- if (remote_stats_.allocated_size >
- remote_stats_.allocated_size_limit_for_check) {
- StartIncrementalMarkingIfNeeded();
- remote_stats_.allocated_size_limit_for_check =
- remote_stats_.allocated_size + kEmbedderAllocatedThreshold;
- }
- }
-
- void DecreaseAllocatedSize(size_t bytes) {
- DCHECK_GE(remote_stats_.used_size.load(std::memory_order_relaxed), bytes);
- remote_stats_.used_size.fetch_sub(bytes, std::memory_order_relaxed);
- }
-
- void StartIncrementalMarkingIfNeeded();
-
- size_t used_size() const {
- return remote_stats_.used_size.load(std::memory_order_relaxed);
- }
- size_t allocated_size() const { return remote_stats_.allocated_size; }
-
- WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
-
- void SetWrapperDescriptor(const WrapperDescriptor& wrapper_descriptor) {
- DCHECK_NULL(cpp_heap_);
- wrapper_descriptor_ = wrapper_descriptor;
- }
-
- void UpdateRemoteStats(size_t, double);
-
- DefaultEmbedderRootsHandler& default_embedder_roots_handler() {
- return default_embedder_roots_handler_;
- }
-
- EmbedderHeapTracer::EmbedderStackState embedder_stack_state() const {
- return embedder_stack_state_;
- }
-
- void EmbedderWriteBarrier(Heap*, JSObject);
-
- private:
- static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
-
- static constexpr WrapperDescriptor::InternalFieldIndex
- kDefaultWrapperTypeEmbedderIndex = 0;
- static constexpr WrapperDescriptor::InternalFieldIndex
- kDefaultWrapperInstanceEmbedderIndex = 1;
-
- static constexpr WrapperDescriptor GetDefaultWrapperDescriptor() {
- // The default descriptor assumes the indices that known embedders use.
- return WrapperDescriptor(kDefaultWrapperTypeEmbedderIndex,
- kDefaultWrapperInstanceEmbedderIndex,
- WrapperDescriptor::kUnknownEmbedderId);
- }
-
- CppHeap* cpp_heap() {
- DCHECK_NOT_NULL(cpp_heap_);
- DCHECK_NULL(remote_tracer_);
- DCHECK_IMPLIES(isolate_, cpp_heap_ == isolate_->heap()->cpp_heap());
- return cpp_heap_;
- }
-
- WrapperDescriptor wrapper_descriptor() {
- if (cpp_heap_)
- return cpp_heap()->wrapper_descriptor();
- else
- return wrapper_descriptor_;
- }
-
- Isolate* const isolate_;
- EmbedderHeapTracer* remote_tracer_ = nullptr;
- CppHeap* cpp_heap_ = nullptr;
- DefaultEmbedderRootsHandler default_embedder_roots_handler_;
-
- EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
- // Indicates whether the embedder worklist was observed empty on the main
- // thread. This is opportunistic as concurrent marking tasks may hold local
- // segments of potential embedder fields to move to the main thread.
- bool embedder_worklist_empty_ = false;
-
- struct RemoteStatistics {
- // Used size of objects in bytes reported by the embedder. Updated via
- // TraceSummary at the end of tracing and incrementally when the GC is not
- // in progress.
- std::atomic<size_t> used_size{0};
- // Totally bytes allocated by the embedder. Monotonically
- // increasing value. Used to approximate allocation rate.
- size_t allocated_size = 0;
- // Limit for |allocated_size| in bytes to avoid checking for starting a GC
- // on each increment.
- size_t allocated_size_limit_for_check = 0;
- } remote_stats_;
-
- // Default descriptor only used when the embedder is using EmbedderHeapTracer.
- // The value is overriden by CppHeap with values that the embedder provided
- // upon initialization.
- WrapperDescriptor wrapper_descriptor_ = GetDefaultWrapperDescriptor();
-
- friend class EmbedderStackStateScope;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_EMBEDDER_TRACING_H_
diff --git a/deps/v8/src/heap/evacuation-verifier-inl.h b/deps/v8/src/heap/evacuation-verifier-inl.h
index cf1eee1351..476176306c 100644
--- a/deps/v8/src/heap/evacuation-verifier-inl.h
+++ b/deps/v8/src/heap/evacuation-verifier-inl.h
@@ -22,9 +22,9 @@ void FullEvacuationVerifier::VerifyHeapObjectImpl(HeapObject heap_object) {
}
bool FullEvacuationVerifier::ShouldVerifyObject(HeapObject heap_object) {
- const bool in_shared_heap = heap_object.InSharedWritableHeap();
- return heap_->isolate()->is_shared_heap_isolate() ? in_shared_heap
- : !in_shared_heap;
+ const bool in_shared_heap = heap_object.InWritableSharedSpace();
+ return heap_->isolate()->is_shared_space_isolate() ? in_shared_heap
+ : !in_shared_heap;
}
template <typename TSlot>
diff --git a/deps/v8/src/heap/evacuation-verifier.cc b/deps/v8/src/heap/evacuation-verifier.cc
index f7fcbccc2f..5134b7f02f 100644
--- a/deps/v8/src/heap/evacuation-verifier.cc
+++ b/deps/v8/src/heap/evacuation-verifier.cc
@@ -25,9 +25,7 @@ void EvacuationVerifier::VisitPointers(HeapObject host, MaybeObjectSlot start,
VerifyPointers(start, end);
}
-void EvacuationVerifier::VisitCodePointer(HeapObject host,
- CodeObjectSlot slot) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+void EvacuationVerifier::VisitCodePointer(Code host, CodeObjectSlot slot) {
VerifyCodePointer(slot);
}
@@ -106,19 +104,19 @@ void FullEvacuationVerifier::VerifyPointers(MaybeObjectSlot start,
VerifyPointersImpl(start, end);
}
void FullEvacuationVerifier::VerifyCodePointer(CodeObjectSlot slot) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
- // The slot might contain smi during CodeDataContainer creation, so skip it.
+ // The slot might contain smi during Code creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
}
-void FullEvacuationVerifier::VisitCodeTarget(Code host, RelocInfo* rinfo) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+void FullEvacuationVerifier::VisitCodeTarget(RelocInfo* rinfo) {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
-void FullEvacuationVerifier::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
+void FullEvacuationVerifier::VisitEmbeddedPointer(RelocInfo* rinfo) {
VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
}
void FullEvacuationVerifier::VerifyRootPointers(FullObjectSlot start,
@@ -150,21 +148,19 @@ void YoungGenerationEvacuationVerifier::VerifyPointers(MaybeObjectSlot start,
VerifyPointersImpl(start, end);
}
void YoungGenerationEvacuationVerifier::VerifyCodePointer(CodeObjectSlot slot) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
- // The slot might contain smi during CodeDataContainer creation, so skip it.
+ // The slot might contain smi during Code creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
}
-void YoungGenerationEvacuationVerifier::VisitCodeTarget(Code host,
- RelocInfo* rinfo) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+void YoungGenerationEvacuationVerifier::VisitCodeTarget(RelocInfo* rinfo) {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
-void YoungGenerationEvacuationVerifier::VisitEmbeddedPointer(Code host,
- RelocInfo* rinfo) {
+void YoungGenerationEvacuationVerifier::VisitEmbeddedPointer(RelocInfo* rinfo) {
VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
}
void YoungGenerationEvacuationVerifier::VerifyRootPointers(FullObjectSlot start,
diff --git a/deps/v8/src/heap/evacuation-verifier.h b/deps/v8/src/heap/evacuation-verifier.h
index 3aa4702eaa..039d793f33 100644
--- a/deps/v8/src/heap/evacuation-verifier.h
+++ b/deps/v8/src/heap/evacuation-verifier.h
@@ -26,7 +26,7 @@ class EvacuationVerifier : public ObjectVisitorWithCageBases,
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override;
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override;
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;
@@ -70,8 +70,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
void VerifyPointers(ObjectSlot start, ObjectSlot end) override;
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override;
void VerifyCodePointer(CodeObjectSlot slot) override;
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
+ void VisitCodeTarget(RelocInfo* rinfo) override;
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override;
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override;
};
@@ -91,8 +91,8 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
void VerifyPointers(ObjectSlot start, ObjectSlot end) override;
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override;
void VerifyCodePointer(CodeObjectSlot slot) override;
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
+ void VisitCodeTarget(RelocInfo* rinfo) override;
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override;
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override;
};
diff --git a/deps/v8/src/heap/factory-base-inl.h b/deps/v8/src/heap/factory-base-inl.h
index 1274477c49..c02d7fb410 100644
--- a/deps/v8/src/heap/factory-base-inl.h
+++ b/deps/v8/src/heap/factory-base-inl.h
@@ -107,7 +107,7 @@ template <typename StructType>
StructType FactoryBase<Impl>::NewStructInternal(InstanceType type,
AllocationType allocation) {
ReadOnlyRoots roots = read_only_roots();
- Map map = Map::GetInstanceTypeMap(roots, type);
+ Map map = Map::GetMapFor(roots, type);
int size = StructType::kSize;
return StructType::cast(NewStructInternal(roots, map, size, allocation));
}
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 7e0af7d3b9..13b712bf94 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -58,7 +58,7 @@ template <typename Impl>
Handle<Struct> FactoryBase<Impl>::NewStruct(InstanceType type,
AllocationType allocation) {
ReadOnlyRoots roots = read_only_roots();
- Map map = Map::GetInstanceTypeMap(roots, type);
+ Map map = Map::GetMapFor(roots, type);
int size = map.instance_size();
return handle(NewStructInternal(roots, map, size, allocation), isolate());
}
@@ -74,24 +74,43 @@ Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
}
template <typename Impl>
-Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
- int flags, AllocationType allocation) {
- Map map = read_only_roots().code_data_container_map();
+Handle<Code> FactoryBase<Impl>::NewCode(const NewCodeOptions& options) {
+ Map map = read_only_roots().code_map();
int size = map.instance_size();
- DCHECK_NE(allocation, AllocationType::kYoung);
- CodeDataContainer data_container = CodeDataContainer::cast(
- AllocateRawWithImmortalMap(size, allocation, map));
+ DCHECK_NE(options.allocation, AllocationType::kYoung);
+ Code code =
+ Code::cast(AllocateRawWithImmortalMap(size, options.allocation, map));
DisallowGarbageCollection no_gc;
- data_container.set_next_code_link(read_only_roots().undefined_value(),
- SKIP_WRITE_BARRIER);
- data_container.set_kind_specific_flags(flags, kRelaxedStore);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- Isolate* isolate_for_sandbox = impl()->isolate_for_sandbox();
- data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
- data_container.init_code_entry_point(isolate_for_sandbox, kNullAddress);
+ code.initialize_flags(options.kind, options.builtin, options.is_turbofanned,
+ options.stack_slots);
+ code.set_kind_specific_flags(options.kind_specific_flags, kRelaxedStore);
+ Isolate* isolate_for_sandbox = impl()->isolate_for_sandbox();
+ code.set_raw_instruction_stream(Smi::zero(), SKIP_WRITE_BARRIER);
+ code.init_code_entry_point(isolate_for_sandbox, kNullAddress);
+ code.set_instruction_size(options.instruction_size);
+ code.set_metadata_size(options.metadata_size);
+ code.set_relocation_info(*options.reloc_info);
+ code.set_inlined_bytecode_size(options.inlined_bytecode_size);
+ code.set_osr_offset(options.osr_offset);
+ code.set_handler_table_offset(options.handler_table_offset);
+ code.set_constant_pool_offset(options.constant_pool_offset);
+ code.set_code_comments_offset(options.code_comments_offset);
+ code.set_unwinding_info_offset(options.unwinding_info_offset);
+
+ if (options.kind == CodeKind::BASELINE) {
+ code.set_bytecode_or_interpreter_data(
+ *options.bytecode_or_deoptimization_data);
+ code.set_bytecode_offset_table(
+ *options.bytecode_offsets_or_source_position_table);
+ } else {
+ code.set_deoptimization_data(
+ FixedArray::cast(*options.bytecode_or_deoptimization_data));
+ code.set_source_position_table(
+ *options.bytecode_offsets_or_source_position_table);
}
- data_container.clear_padding();
- return handle(data_container, isolate());
+
+ code.clear_padding();
+ return handle(code, isolate());
}
template <typename Impl>
@@ -99,7 +118,8 @@ Handle<FixedArray> FactoryBase<Impl>::NewFixedArray(int length,
AllocationType allocation) {
if (length == 0) return impl()->empty_fixed_array();
if (length < 0 || length > FixedArray::kMaxLength) {
- FATAL("Fatal JavaScript invalid size error %d", length);
+ FATAL("Fatal JavaScript invalid size error %d (see crbug.com/1201626)",
+ length);
UNREACHABLE();
}
return NewFixedArrayWithFiller(
@@ -165,7 +185,8 @@ Handle<FixedArrayBase> FactoryBase<Impl>::NewFixedDoubleArray(
int length, AllocationType allocation) {
if (length == 0) return impl()->empty_fixed_array();
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- FATAL("Fatal JavaScript invalid size error %d", length);
+ FATAL("Fatal JavaScript invalid size error %d (see crbug.com/1201626)",
+ length);
UNREACHABLE();
}
int size = FixedDoubleArray::SizeFor(length);
@@ -259,14 +280,16 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
}
template <typename Impl>
-Handle<Script> FactoryBase<Impl>::NewScript(
- Handle<PrimitiveHeapObject> source) {
- return NewScriptWithId(source, isolate()->GetNextScriptId());
+Handle<Script> FactoryBase<Impl>::NewScript(Handle<PrimitiveHeapObject> source,
+ ScriptEventType script_event_type) {
+ return NewScriptWithId(source, isolate()->GetNextScriptId(),
+ script_event_type);
}
template <typename Impl>
Handle<Script> FactoryBase<Impl>::NewScriptWithId(
- Handle<PrimitiveHeapObject> source, int script_id) {
+ Handle<PrimitiveHeapObject> source, int script_id,
+ ScriptEventType script_event_type) {
DCHECK(source->IsString() || source->IsUndefined());
// Create and initialize script object.
ReadOnlyRoots roots = read_only_roots();
@@ -283,14 +306,16 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
raw.set_context_data(roots.undefined_value(), SKIP_WRITE_BARRIER);
raw.set_type(Script::TYPE_NORMAL);
raw.set_line_ends(roots.undefined_value(), SKIP_WRITE_BARRIER);
- raw.set_eval_from_shared_or_wrapped_arguments_or_sfi_table(
- roots.undefined_value(), SKIP_WRITE_BARRIER);
+ raw.set_eval_from_shared_or_wrapped_arguments(roots.undefined_value(),
+ SKIP_WRITE_BARRIER);
raw.set_eval_from_position(0);
raw.set_shared_function_infos(roots.empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
raw.set_flags(0);
raw.set_host_defined_options(roots.empty_fixed_array(), SKIP_WRITE_BARRIER);
raw.set_source_hash(roots.undefined_value(), SKIP_WRITE_BARRIER);
+ raw.set_compiled_lazy_function_positions(roots.undefined_value(),
+ SKIP_WRITE_BARRIER);
#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
raw.set_script_or_modules(roots.empty_array_list());
#endif
@@ -300,8 +325,7 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
impl()->AddToScriptList(script);
}
- LOG(isolate(),
- ScriptEvent(V8FileLogger::ScriptEventType::kCreate, script_id));
+ LOG(isolate(), ScriptEvent(script_event_type, script_id));
return script;
}
@@ -324,9 +348,9 @@ template <typename Impl>
Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script, bool is_toplevel) {
FunctionKind kind = literal->kind();
- Handle<SharedFunctionInfo> shared =
- NewSharedFunctionInfo(literal->GetName(isolate()), MaybeHandle<Code>(),
- Builtin::kCompileLazy, kind);
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ literal->GetName(isolate()), MaybeHandle<HeapObject>(),
+ Builtin::kCompileLazy, kind);
SharedFunctionInfo::InitFromFunctionLiteral(isolate(), shared, literal,
is_toplevel);
shared->SetScript(read_only_roots(), *script, literal->function_literal_id(),
@@ -343,8 +367,8 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::CloneSharedFunctionInfo(
SharedFunctionInfo::cast(NewWithImmortalMap(map, AllocationType::kOld));
DisallowGarbageCollection no_gc;
- shared.CopyFrom(*other);
shared.clear_padding();
+ shared.CopyFrom(*other);
return handle(shared, isolate());
}
@@ -429,8 +453,7 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
// If we pass function_data then we shouldn't pass a builtin index, and
// the function_data should not be code with a builtin.
DCHECK(!Builtins::IsBuiltinId(builtin));
- DCHECK_IMPLIES(function_data->IsCode(),
- !Code::cast(*function_data).is_builtin());
+ DCHECK(!function_data->IsInstructionStream());
raw.set_function_data(*function_data, kReleaseStore);
} else if (Builtins::IsBuiltinId(builtin)) {
raw.set_builtin_id(builtin);
@@ -675,6 +698,7 @@ MaybeHandle<SeqStringT> FactoryBase<Impl>::NewRawStringWithMap(
SeqStringT string =
SeqStringT::cast(AllocateRawWithImmortalMap(size, allocation, map));
DisallowGarbageCollection no_gc;
+ string.clear_padding_destructively(length);
string.set_length(length);
string.set_raw_hash_field(String::kEmptyHashField);
DCHECK_EQ(size, string.Size());
@@ -763,14 +787,14 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
uint8_t* dest = result->GetChars(no_gc, access_guard);
// Copy left part.
{
- const uint8_t* src =
- left->template GetChars<uint8_t>(isolate(), no_gc, access_guard);
+ const uint8_t* src = left->template GetDirectStringChars<uint8_t>(
+ isolate(), no_gc, access_guard);
CopyChars(dest, src, left_length);
}
// Copy right part.
{
- const uint8_t* src =
- right->template GetChars<uint8_t>(isolate(), no_gc, access_guard);
+ const uint8_t* src = right->template GetDirectStringChars<uint8_t>(
+ isolate(), no_gc, access_guard);
CopyChars(dest + left_length, src, right_length);
}
return result;
@@ -1020,9 +1044,22 @@ Handle<DescriptorArray> FactoryBase<Impl>::NewDescriptorArray(
HeapObject obj = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().descriptor_array_map());
DescriptorArray array = DescriptorArray::cast(obj);
+
+ auto raw_gc_state = DescriptorArrayMarkingState::kInitialGCState;
+ if (allocation != AllocationType::kYoung &&
+ allocation != AllocationType::kReadOnly) {
+ auto* heap = allocation == AllocationType::kSharedOld
+ ? isolate()->AsIsolate()->shared_space_isolate()->heap()
+ : isolate()->heap()->AsHeap();
+ if (heap->incremental_marking()->IsMajorMarking()) {
+ // Black allocation: We must create a full marked state.
+ raw_gc_state = DescriptorArrayMarkingState::GetFullyMarkedState(
+ heap->mark_compact_collector()->epoch(), number_of_descriptors);
+ }
+ }
array.Initialize(read_only_roots().empty_enum_cache(),
read_only_roots().undefined_value(), number_of_descriptors,
- slack);
+ slack, raw_gc_state);
return handle(array, isolate());
}
@@ -1055,6 +1092,7 @@ FactoryBase<Impl>::AllocateRawOneByteInternalizedString(
map);
SeqOneByteString answer = SeqOneByteString::cast(result);
DisallowGarbageCollection no_gc;
+ answer.clear_padding_destructively(length);
answer.set_length(length);
answer.set_raw_hash_field(raw_hash_field);
DCHECK_EQ(size, answer.Size());
@@ -1076,6 +1114,7 @@ FactoryBase<Impl>::AllocateRawTwoByteInternalizedString(
map),
map));
DisallowGarbageCollection no_gc;
+ answer.clear_padding_destructively(length);
answer.set_length(length);
answer.set_raw_hash_field(raw_hash_field);
DCHECK_EQ(size, answer.Size());
@@ -1148,8 +1187,9 @@ FactoryBase<Impl>::NewSwissNameDictionaryWithCapacity(
DCHECK(SwissNameDictionary::IsValidCapacity(capacity));
if (capacity == 0) {
- DCHECK_NE(read_only_roots().at(RootIndex::kEmptySwissPropertyDictionary),
- kNullAddress);
+ DCHECK_NE(
+ read_only_roots().address_at(RootIndex::kEmptySwissPropertyDictionary),
+ kNullAddress);
return read_only_roots().empty_swiss_property_dictionary_handle();
}
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index aea50e6cdb..9739d52e40 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -8,6 +8,7 @@
#include "src/base/export-template.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
+#include "src/objects/code-kind.h"
#include "src/objects/fixed-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/instance-type.h"
@@ -60,6 +61,26 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) TorqueGeneratedFactory {
#include "torque-generated/factory.inc"
};
+struct NewCodeOptions {
+ CodeKind kind;
+ Builtin builtin;
+ bool is_turbofanned;
+ int stack_slots;
+ int kind_specific_flags;
+ AllocationType allocation;
+ int instruction_size;
+ int metadata_size;
+ unsigned int inlined_bytecode_size;
+ BytecodeOffset osr_offset;
+ int handler_table_offset;
+ int constant_pool_offset;
+ int code_comments_offset;
+ int32_t unwinding_info_offset;
+ Handle<ByteArray> reloc_info;
+ Handle<HeapObject> bytecode_or_deoptimization_data;
+ Handle<ByteArray> bytecode_offsets_or_source_position_table;
+};
+
template <typename Impl>
class FactoryBase : public TorqueGeneratedFactory<Impl> {
public:
@@ -98,9 +119,8 @@ class FactoryBase : public TorqueGeneratedFactory<Impl> {
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
- // Creates a new CodeDataContainer for a Code object.
- Handle<CodeDataContainer> NewCodeDataContainer(int flags,
- AllocationType allocation);
+ // Creates a new Code for a InstructionStream object.
+ Handle<Code> NewCode(const NewCodeOptions& options);
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
@@ -161,9 +181,12 @@ class FactoryBase : public TorqueGeneratedFactory<Impl> {
Handle<TemplateObjectDescription> NewTemplateObjectDescription(
Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
- Handle<Script> NewScript(Handle<PrimitiveHeapObject> source);
- Handle<Script> NewScriptWithId(Handle<PrimitiveHeapObject> source,
- int script_id);
+ Handle<Script> NewScript(
+ Handle<PrimitiveHeapObject> source,
+ ScriptEventType event_type = ScriptEventType::kCreate);
+ Handle<Script> NewScriptWithId(
+ Handle<PrimitiveHeapObject> source, int script_id,
+ ScriptEventType event_type = ScriptEventType::kCreate);
Handle<ArrayList> NewArrayList(
int size, AllocationType allocation = AllocationType::kYoung);
@@ -336,7 +359,6 @@ class FactoryBase : public TorqueGeneratedFactory<Impl> {
AllocationType allocation);
private:
- friend class WebSnapshotDeserializer;
Impl* impl() { return static_cast<Impl*>(this); }
auto isolate() { return impl()->isolate(); }
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index a50ec11ccc..c952862340 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -20,6 +20,7 @@
#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
+#include "src/flags/flags.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-allocator-inl.h"
#include "src/heap/heap-inl.h"
@@ -47,7 +48,9 @@
#include "src/objects/fixed-array-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/instance-type-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -61,6 +64,7 @@
#include "src/objects/megadom-handler-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/promise-inl.h"
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/scope-info.h"
@@ -72,7 +76,9 @@
#include "src/roots/roots.h"
#include "src/strings/unicode-inl.h"
#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/module-decoder-impl.h"
#include "src/wasm/module-instantiate.h"
+#include "src/wasm/wasm-opcodes-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-value.h"
#endif
@@ -108,38 +114,34 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
? local_isolate_->factory()->NewByteArray(code_desc_.reloc_size,
AllocationType::kOld)
: factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld);
- Handle<CodeDataContainer> data_container;
-
- // Use a canonical off-heap trampoline CodeDataContainer if possible.
- const int32_t promise_rejection_flag =
- Code::IsPromiseRejectionField::encode(true);
- if (read_only_data_container_ &&
- (kind_specific_flags_ == 0 ||
- kind_specific_flags_ == promise_rejection_flag)) {
- const ReadOnlyRoots roots(isolate_);
- const auto canonical_code_data_container = Handle<CodeDataContainer>::cast(
- kind_specific_flags_ == 0
- ? roots.trampoline_trivial_code_data_container_handle()
- : roots.trampoline_promise_rejection_code_data_container_handle());
- DCHECK_EQ(canonical_code_data_container->kind_specific_flags(kRelaxedLoad),
- kind_specific_flags_);
- data_container = canonical_code_data_container;
+
+ Handle<Code> code;
+
+ NewCodeOptions new_code_options = {
+ /*kind=*/kind_,
+ /*builtin=*/builtin_,
+ /*is_turbofanned=*/is_turbofanned_,
+ /*stack_slots=*/stack_slots_,
+ /*kind_specific_flags=*/kind_specific_flags_,
+ /*allocation=*/AllocationType::kOld,
+ /*instruction_size=*/code_desc_.instruction_size(),
+ /*metadata_size=*/code_desc_.metadata_size(),
+ /*inlined_bytecode_size=*/inlined_bytecode_size_,
+ /*osr_offset=*/osr_offset_,
+ /*handler_table_offset=*/code_desc_.handler_table_offset_relative(),
+ /*constant_pool_offset=*/code_desc_.constant_pool_offset_relative(),
+ /*code_comments_offset=*/code_desc_.code_comments_offset_relative(),
+ /*unwinding_info_offset=*/code_desc_.unwinding_info_offset_relative(),
+ /*reloc_info=*/reloc_info,
+ /*bytecode_or_deoptimization_data=*/kind_ == CodeKind::BASELINE
+ ? interpreter_data_
+ : deoptimization_data_,
+ /*bytecode_offsets_or_source_position_table=*/position_table_};
+
+ if (CompiledWithConcurrentBaseline()) {
+ code = local_isolate_->factory()->NewCode(new_code_options);
} else {
- if (CompiledWithConcurrentBaseline()) {
- data_container = local_isolate_->factory()->NewCodeDataContainer(
- 0, AllocationType::kOld);
- } else {
- data_container = factory->NewCodeDataContainer(
- 0, read_only_data_container_ ? AllocationType::kReadOnly
- : AllocationType::kOld);
- }
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- const bool set_is_off_heap_trampoline = read_only_data_container_;
- data_container->initialize_flags(kind_, builtin_, is_turbofanned_,
- set_is_off_heap_trampoline);
- }
- data_container->set_kind_specific_flags(kind_specific_flags_,
- kRelaxedStore);
+ code = factory->NewCode(new_code_options);
}
// Basic block profiling data for builtins is stored in the JS heap rather
@@ -158,55 +160,32 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
isolate_->heap()->SetBasicBlockProfilingData(new_list);
}
- static_assert(Code::kOnHeapBodyIsContiguous);
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
Heap* heap = isolate_->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- Handle<Code> code;
+ Handle<InstructionStream> instruction_stream;
if (CompiledWithConcurrentBaseline()) {
- if (!AllocateConcurrentSparkplugCode(retry_allocation_or_fail)
- .ToHandle(&code)) {
- return MaybeHandle<Code>();
+ if (!AllocateConcurrentSparkplugInstructionStream(retry_allocation_or_fail)
+ .ToHandle(&instruction_stream)) {
+ return {};
}
- } else if (!AllocateCode(retry_allocation_or_fail).ToHandle(&code)) {
- return MaybeHandle<Code>();
+ } else if (!AllocateInstructionStream(retry_allocation_or_fail)
+ .ToHandle(&instruction_stream)) {
+ return {};
}
{
- Code raw_code = *code;
- constexpr bool kIsNotOffHeapTrampoline = false;
+ InstructionStream raw_istream = *instruction_stream;
DisallowGarbageCollection no_gc;
- raw_code.set_raw_instruction_size(code_desc_.instruction_size());
- raw_code.set_raw_metadata_size(code_desc_.metadata_size());
- raw_code.set_relocation_info(*reloc_info);
- raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
- kIsNotOffHeapTrampoline);
- raw_code.set_builtin_id(builtin_);
// This might impact direct concurrent reads from TF if we are resetting
// this field. We currently assume it's immutable thus a relaxed read (after
// passing IsPendingAllocation).
- raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
- raw_code.set_osr_offset(osr_offset_);
- raw_code.set_code_data_container(*data_container, kReleaseStore);
- if (kind_ == CodeKind::BASELINE) {
- raw_code.set_bytecode_or_interpreter_data(*interpreter_data_);
- raw_code.set_bytecode_offset_table(*position_table_);
- } else {
- raw_code.set_deoptimization_data(*deoptimization_data_);
- raw_code.set_source_position_table(*position_table_);
- }
- raw_code.set_handler_table_offset(
- code_desc_.handler_table_offset_relative());
- raw_code.set_constant_pool_offset(
- code_desc_.constant_pool_offset_relative());
- raw_code.set_code_comments_offset(
- code_desc_.code_comments_offset_relative());
- raw_code.set_unwinding_info_offset(
- code_desc_.unwinding_info_offset_relative());
+ raw_istream.set_code(*code, kReleaseStore);
// Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
+ // point to the newly allocated InstructionStream object.
Handle<Object> self_reference;
if (self_reference_.ToHandle(&self_reference)) {
DCHECK(self_reference->IsOddball());
@@ -215,9 +194,9 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
DCHECK_NE(kind_, CodeKind::BASELINE);
if (isolate_->IsGeneratingEmbeddedBuiltins()) {
isolate_->builtins_constants_table_builder()->PatchSelfReference(
- self_reference, code);
+ self_reference, instruction_stream);
}
- self_reference.PatchValue(*code);
+ self_reference.PatchValue(*instruction_stream);
}
// Likewise, any references to the basic block counters marker need to be
@@ -228,22 +207,25 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
handle(on_heap_profiler_data->counts(), isolate_));
}
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ raw_istream.set_main_cage_base(isolate_->cage_base(), kRelaxedStore);
+ }
+ code->SetInstructionStreamAndEntryPoint(isolate_, raw_istream);
+
// Migrate generated code.
// The generated code can contain embedded objects (typically from
// handles) in a pointer-to-tagged-value format (i.e. with indirection
// like a handle) that are dereferenced during the copy to point directly
// to the actual heap objects. These pointers can include references to
// the code object itself, through the self_reference parameter.
- raw_code.CopyFromNoFlush(*reloc_info, heap, code_desc_);
+ code->CopyFromNoFlush(*reloc_info, heap, code_desc_);
- raw_code.clear_padding();
+ code->ClearInstructionStreamPadding();
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- raw_code.set_main_cage_base(isolate_->cage_base(), kRelaxedStore);
- data_container->SetCodeAndEntryPoint(isolate_, raw_code);
- }
#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap) HeapObject::VerifyCodePointer(isolate_, raw_code);
+ if (v8_flags.verify_heap) {
+ HeapObject::VerifyCodePointer(isolate_, raw_istream);
+ }
#endif
// Flush the instruction cache before changing the permissions.
@@ -251,10 +233,10 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// some older ARM kernels there is a bug which causes an access error on
// cache flush instructions to trigger access error on non-writable memory.
// See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- raw_code.FlushICache();
+ code->FlushICache();
}
- if (profiler_data_ && v8_flags.turbo_profiling_verbose) {
+ if (V8_UNLIKELY(profiler_data_ && v8_flags.turbo_profiling_verbose)) {
#ifdef ENABLE_DISASSEMBLER
std::ostringstream os;
code->Disassemble(nullptr, os, isolate_);
@@ -273,15 +255,13 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
}
// TODO(victorgomes): Unify the two AllocateCodes
-MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
+MaybeHandle<InstructionStream> Factory::CodeBuilder::AllocateInstructionStream(
bool retry_allocation_or_fail) {
Heap* heap = isolate_->heap();
HeapAllocator* allocator = heap->allocator();
HeapObject result;
- AllocationType allocation_type = V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
- ? AllocationType::kCode
- : AllocationType::kReadOnly;
- const int object_size = Code::SizeFor(code_desc_.body_size());
+ const AllocationType allocation_type = AllocationType::kCode;
+ const int object_size = InstructionStream::SizeFor(code_desc_.body_size());
if (retry_allocation_or_fail) {
result = allocator->AllocateRawWith<HeapAllocator::kRetryOrFail>(
object_size, allocation_type, AllocationOrigin::kRuntime);
@@ -289,45 +269,43 @@ MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
result = allocator->AllocateRawWith<HeapAllocator::kLightRetry>(
object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
- if (result.is_null()) return MaybeHandle<Code>();
+ if (result.is_null()) return MaybeHandle<InstructionStream>();
}
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowGarbageCollection no_gc;
- result.set_map_after_allocation(*isolate_->factory()->code_map(),
- SKIP_WRITE_BARRIER);
- Handle<Code> code = handle(Code::cast(result), isolate_);
- if (is_executable_) {
- DCHECK(IsAligned(code->address(), kCodeAlignment));
- DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
- heap->code_region().contains(code->address()));
- }
- return code;
-}
-
-MaybeHandle<Code> Factory::CodeBuilder::AllocateConcurrentSparkplugCode(
+ result.set_map_after_allocation(
+ *isolate_->factory()->instruction_stream_map(), SKIP_WRITE_BARRIER);
+ Handle<InstructionStream> istream =
+ handle(InstructionStream::cast(result), isolate_);
+ DCHECK(IsAligned(istream->instruction_start(), kCodeAlignment));
+ DCHECK_IMPLIES(
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
+ heap->code_region().contains(istream->address()));
+ return istream;
+}
+
+MaybeHandle<InstructionStream>
+Factory::CodeBuilder::AllocateConcurrentSparkplugInstructionStream(
bool retry_allocation_or_fail) {
LocalHeap* heap = local_isolate_->heap();
- AllocationType allocation_type = V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
- ? AllocationType::kCode
- : AllocationType::kReadOnly;
- const int object_size = Code::SizeFor(code_desc_.body_size());
+ const int object_size = InstructionStream::SizeFor(code_desc_.body_size());
HeapObject result;
- if (!heap->AllocateRaw(object_size, allocation_type).To(&result)) {
- return MaybeHandle<Code>();
+ if (!heap->AllocateRaw(object_size, AllocationType::kCode).To(&result)) {
+ return MaybeHandle<InstructionStream>();
}
CHECK(!result.is_null());
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowGarbageCollection no_gc;
- result.set_map_after_allocation(*local_isolate_->factory()->code_map(),
- SKIP_WRITE_BARRIER);
- Handle<Code> code = handle(Code::cast(result), local_isolate_);
- DCHECK_IMPLIES(is_executable_, IsAligned(code->address(), kCodeAlignment));
- return code;
+ result.set_map_after_allocation(
+ *local_isolate_->factory()->instruction_stream_map(), SKIP_WRITE_BARRIER);
+ Handle<InstructionStream> istream =
+ handle(InstructionStream::cast(result), local_isolate_);
+ DCHECK(IsAligned(istream->instruction_start(), kCodeAlignment));
+ return istream;
}
MaybeHandle<Code> Factory::CodeBuilder::TryBuild() {
@@ -508,7 +486,8 @@ Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
Handle<FeedbackVector> Factory::NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) {
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ Handle<FeedbackCell> parent_feedback_cell) {
int length = shared->feedback_metadata().slot_count();
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
@@ -517,8 +496,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
size, AllocationType::kOld, *feedback_vector_map()));
DisallowGarbageCollection no_gc;
vector.set_shared_function_info(*shared);
- vector.set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()),
- kReleaseStore);
+ vector.set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()));
vector.set_length(length);
vector.set_invocation_count(0);
vector.set_profiler_ticks(0);
@@ -527,6 +505,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector.reset_flags();
vector.set_log_next_execution(v8_flags.log_function_events);
vector.set_closure_feedback_cell_array(*closure_feedback_cell_array);
+ vector.set_parent_feedback_cell(*parent_feedback_cell);
// TODO(leszeks): Initialize based on the feedback metadata.
MemsetTagged(ObjectSlot(vector.slots_start()), *undefined_value(), length);
@@ -640,8 +619,8 @@ Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
Handle<SwissNameDictionary> Factory::CreateCanonicalEmptySwissNameDictionary() {
// This function is only supposed to be used to create the canonical empty
// version and should not be used afterwards.
- DCHECK_EQ(kNullAddress, ReadOnlyRoots(isolate()).at(
- RootIndex::kEmptySwissPropertyDictionary));
+ DCHECK(!ReadOnlyRoots(isolate()).is_initialized(
+ RootIndex::kEmptySwissPropertyDictionary));
ReadOnlyRoots roots(isolate());
@@ -716,7 +695,9 @@ MaybeHandle<String> NewStringFromBytes(Isolate* isolate, PeekBytes peek_bytes,
MessageTemplate message) {
Decoder decoder(peek_bytes());
if (decoder.is_invalid()) {
- ThrowInvalidEncodedStringBytes(isolate, message);
+ if (message != MessageTemplate::kNone) {
+ ThrowInvalidEncodedStringBytes(isolate, message);
+ }
return MaybeHandle<String>();
}
@@ -766,6 +747,9 @@ MaybeHandle<String> NewStringFromUtf8Variant(Isolate* isolate,
return NewStringFromBytes<StrictUtf8Decoder>(
isolate, peek_bytes, allocation,
MessageTemplate::kWasmTrapStringInvalidUtf8);
+ case unibrow::Utf8Variant::kUtf8NoTrap:
+ return NewStringFromBytes<StrictUtf8Decoder>(
+ isolate, peek_bytes, allocation, MessageTemplate::kNone);
case unibrow::Utf8Variant::kWtf8:
return NewStringFromBytes<Wtf8Decoder>(
isolate, peek_bytes, allocation,
@@ -1008,7 +992,7 @@ StringTransitionStrategy Factory::ComputeInternalizationStrategyForString(
if (!internalized_map->is_null()) {
return StringTransitionStrategy::kInPlace;
}
- if (InstanceTypeChecker::IsInternalizedString(map.instance_type())) {
+ if (InstanceTypeChecker::IsInternalizedString(map)) {
return StringTransitionStrategy::kAlreadyTransitioned;
}
return StringTransitionStrategy::kCopy;
@@ -1038,8 +1022,8 @@ template Handle<ExternalTwoByteString>
StringTransitionStrategy Factory::ComputeSharingStrategyForString(
Handle<String> string, MaybeHandle<Map>* shared_map) {
DCHECK(v8_flags.shared_string_table);
- // Do not share young strings in-place: there is no shared young space.
- if (Heap::InYoungGeneration(*string)) {
+ // TODO(pthier): Avoid copying LO-space strings. Update page flags instead.
+ if (!string->InSharedHeap()) {
return StringTransitionStrategy::kCopy;
}
DCHECK_NOT_NULL(shared_map);
@@ -1229,8 +1213,15 @@ Symbol Factory::NewSymbolInternal(AllocationType allocation) {
int hash = isolate()->GenerateIdentityHash(Name::HashBits::kMax);
symbol.set_raw_hash_field(
Name::CreateHashFieldValue(hash, Name::HashFieldType::kHash));
- symbol.set_description(read_only_roots().undefined_value(),
- SKIP_WRITE_BARRIER);
+ if (isolate()->read_only_heap()->roots_init_complete()) {
+ symbol.set_description(read_only_roots().undefined_value(),
+ SKIP_WRITE_BARRIER);
+ } else {
+ // Can't use setter during bootstrapping as its typecheck tries to access
+ // the roots table before it is initialized.
+ TaggedField<Object>::store(symbol, Symbol::kDescriptionOffset,
+ read_only_roots().undefined_value());
+ }
symbol.set_flags(0);
DCHECK(!symbol.is_private());
return symbol;
@@ -1520,14 +1511,16 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
new_script.set_context_data(old_script.context_data());
new_script.set_type(old_script.type());
new_script.set_line_ends(*undefined_value(), SKIP_WRITE_BARRIER);
- new_script.set_eval_from_shared_or_wrapped_arguments_or_sfi_table(
- script->eval_from_shared_or_wrapped_arguments_or_sfi_table());
+ new_script.set_eval_from_shared_or_wrapped_arguments(
+ script->eval_from_shared_or_wrapped_arguments());
new_script.set_shared_function_infos(*empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
new_script.set_eval_from_position(old_script.eval_from_position());
new_script.set_flags(old_script.flags());
new_script.set_host_defined_options(old_script.host_defined_options());
new_script.set_source_hash(*undefined_value(), SKIP_WRITE_BARRIER);
+ new_script.set_compiled_lazy_function_positions(*undefined_value(),
+ SKIP_WRITE_BARRIER);
#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
new_script.set_script_or_modules(*list);
#endif
@@ -1537,8 +1530,7 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
scripts = WeakArrayList::AddToEnd(isolate(), scripts,
MaybeObjectHandle::Weak(new_script_handle));
heap->set_script_list(*scripts);
- LOG(isolate(),
- ScriptEvent(V8FileLogger::ScriptEventType::kCreate, script_id));
+ LOG(isolate(), ScriptEvent(ScriptEventType::kCreate, script_id));
return new_script_handle;
}
@@ -1642,7 +1634,6 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
auto result = WasmApiFunctionRef::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.set_isolate_root(isolate()->isolate_root());
result.set_native_context(*isolate()->native_context());
if (!callable.is_null()) {
result.set_callable(*callable);
@@ -1675,7 +1666,7 @@ Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<CodeT> wrapper_code, Handle<Map> rtt, wasm::Suspend suspend,
+ Handle<Code> wrapper_code, Handle<Map> rtt, wasm::Suspend suspend,
wasm::Promise promise) {
Handle<WasmApiFunctionRef> ref =
NewWasmApiFunctionRef(callable, suspend, Handle<WasmInstanceObject>());
@@ -1708,10 +1699,10 @@ Handle<WasmResumeData> Factory::NewWasmResumeData(
}
Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
- Handle<CodeT> export_wrapper, Handle<WasmInstanceObject> instance,
+ Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
- const wasm::FunctionSig* sig, int wrapper_budget, Handle<Map> rtt,
- wasm::Promise promise) {
+ const wasm::FunctionSig* sig, uint32_t canonical_type_index,
+ int wrapper_budget, Handle<Map> rtt, wasm::Promise promise) {
Handle<WasmInternalFunction> internal =
NewWasmInternalFunction(call_target, Handle<HeapObject>::cast(ref), rtt);
Map map = *wasm_exported_function_data_map();
@@ -1725,13 +1716,11 @@ Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
result.set_instance(*instance);
result.set_function_index(func_index);
result.init_sig(isolate(), sig);
+ result.set_canonical_type_index(canonical_type_index);
result.set_wrapper_budget(wrapper_budget);
- // We can't skip the write barrier when V8_EXTERNAL_CODE_SPACE is enabled
- // because in this case the CodeT (CodeDataContainer) objects are not
- // immovable.
- result.set_c_wrapper_code(
- *BUILTIN_CODE(isolate(), Illegal),
- V8_EXTERNAL_CODE_SPACE_BOOL ? UPDATE_WRITE_BARRIER : SKIP_WRITE_BARRIER);
+ // We can't skip the write barrier because Code objects are not immovable.
+ result.set_c_wrapper_code(*BUILTIN_CODE(isolate(), Illegal),
+ UPDATE_WRITE_BARRIER);
result.set_packed_args_size(0);
result.set_js_promise_flags(
WasmFunctionData::SuspendField::encode(wasm::kNoSuspend) |
@@ -1741,7 +1730,7 @@ Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<CodeT> wrapper_code, Handle<Map> rtt,
+ Handle<Code> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig) {
Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(
Handle<JSReceiver>(), wasm::kNoSuspend, Handle<WasmInstanceObject>());
@@ -1840,10 +1829,9 @@ Handle<WasmArray> Factory::NewWasmArrayFromMemory(uint32_t length,
}
Handle<Object> Factory::NewWasmArrayFromElementSegment(
- Handle<WasmInstanceObject> instance, const wasm::WasmElemSegment* segment,
+ Handle<WasmInstanceObject> instance, uint32_t segment_index,
uint32_t start_offset, uint32_t length, Handle<Map> map) {
- wasm::ValueType element_type = WasmArray::type(*map)->element_type();
- DCHECK(element_type.is_reference());
+ DCHECK(WasmArray::type(*map)->element_type().is_reference());
HeapObject raw =
AllocateRaw(WasmArray::SizeFor(*map, length), AllocationType::kYoung);
{
@@ -1861,17 +1849,24 @@ Handle<Object> Factory::NewWasmArrayFromElementSegment(
Handle<WasmArray> result = handle(WasmArray::cast(raw), isolate());
+ // Lazily initialize the element segment if needed.
AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
+ base::Optional<MessageTemplate> opt_error =
+ wasm::InitializeElementSegment(&zone, isolate(), instance, segment_index);
+ if (opt_error.has_value()) {
+ return handle(Smi::FromEnum(opt_error.value()), isolate());
+ }
+
+ Handle<FixedArray> elements =
+ handle(FixedArray::cast(instance->element_segments().get(segment_index)),
+ isolate());
+
for (uint32_t i = 0; i < length; i++) {
- wasm::ValueOrError maybe_element = wasm::EvaluateConstantExpression(
- &zone, segment->entries[start_offset + i], element_type, isolate(),
- instance);
- if (wasm::is_error(maybe_element)) {
- return handle(Smi::FromEnum(wasm::to_error(maybe_element)), isolate());
- }
- result->SetTaggedElement(i, wasm::to_value(maybe_element).to_ref());
+ result->SetTaggedElement(
+ i, handle(elements->get(start_offset + i), isolate()));
}
+
return result;
}
@@ -1931,12 +1926,21 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
}
#endif // V8_ENABLE_WEBASSEMBLY
-Handle<Cell> Factory::NewCell(Handle<Object> value) {
+Handle<Cell> Factory::NewCell(Smi value) {
static_assert(Cell::kSize <= kMaxRegularHeapObjectSize);
Cell result = Cell::cast(AllocateRawWithImmortalMap(
Cell::kSize, AllocationType::kOld, *cell_map()));
DisallowGarbageCollection no_gc;
- result.set_value(*value);
+ result.set_value(value, WriteBarrierMode::SKIP_WRITE_BARRIER);
+ return handle(result, isolate());
+}
+
+Handle<Cell> Factory::NewCell() {
+ static_assert(Cell::kSize <= kMaxRegularHeapObjectSize);
+ Cell result = Cell::cast(AllocateRawWithImmortalMap(
+ Cell::kSize, AllocationType::kOld, *cell_map()));
+ result.set_value(read_only_roots().undefined_value(),
+ WriteBarrierMode::SKIP_WRITE_BARRIER);
return handle(result, isolate());
}
@@ -2040,6 +2044,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
ElementsKind elements_kind, int inobject_properties,
AllocationType allocation_type) {
static_assert(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK(!InstanceTypeChecker::MayHaveMapCheckFastCase(type));
DCHECK_IMPLIES(InstanceTypeChecker::IsJSObject(type) &&
!Map::CanHaveFastTransitionableElementsKind(type),
IsDictionaryElementsKind(elements_kind) ||
@@ -2052,9 +2057,14 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
DisallowGarbageCollection no_gc;
Heap* roots = allocation_type == AllocationType::kMap
? isolate()->heap()
- : isolate()->shared_heap_isolate()->heap();
+ : isolate()->shared_space_isolate()->heap();
result.set_map_after_allocation(ReadOnlyRoots(roots).meta_map(),
SKIP_WRITE_BARRIER);
+#if V8_STATIC_ROOTS_BOOL
+ CHECK_IMPLIES(InstanceTypeChecker::IsJSReceiver(type),
+ V8HeapCompressionScheme::CompressObject(result.ptr()) >
+ InstanceTypeChecker::kNonJsReceiverMapLimit);
+#endif
return handle(InitializeMap(Map::cast(result), type, instance_size,
elements_kind, inobject_properties, roots),
isolate());
@@ -2074,16 +2084,14 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.set_bit_field3(bit_field3);
map.set_instance_type(type);
ReadOnlyRoots ro_roots(roots);
- HeapObject raw_null_value = ro_roots.null_value();
- map.set_prototype(raw_null_value, SKIP_WRITE_BARRIER);
- map.set_constructor_or_back_pointer(raw_null_value, SKIP_WRITE_BARRIER);
+ map.init_prototype_and_constructor_or_back_pointer(ro_roots);
map.set_instance_size(instance_size);
if (map.IsJSObjectMap()) {
DCHECK(!ReadOnlyHeap::Contains(map));
map.SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
inobject_properties);
DCHECK_EQ(map.GetInObjectProperties(), inobject_properties);
- map.set_prototype_validity_cell(roots->invalid_prototype_validity_cell(),
+ map.set_prototype_validity_cell(ro_roots.invalid_prototype_validity_cell(),
kRelaxedStore);
} else {
DCHECK_EQ(inobject_properties, 0);
@@ -2381,9 +2389,13 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
}
Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
- return CanAllocateInReadOnlySpace()
- ? NewHeapNumber<AllocationType::kReadOnly>(value)
- : NewHeapNumber<AllocationType::kOld>(value);
+ ReadOnlyRoots roots(isolate());
+ auto num = roots.FindHeapNumber(value);
+ if (!num.is_null()) return num;
+ // Add known HeapNumber constants to the read only roots. This ensures
+ // r/o snapshots to be deterministic.
+ DCHECK(!CanAllocateInReadOnlySpace());
+ return NewHeapNumber<AllocationType::kOld>(value);
}
Handle<JSObject> Factory::NewError(Handle<JSFunction> constructor,
@@ -2487,92 +2499,58 @@ Handle<DeoptimizationLiteralArray> Factory::NewDeoptimizationLiteralArray(
NewWeakFixedArray(length, AllocationType::kOld));
}
-Handle<CodeT> Factory::NewOffHeapTrampolineFor(Handle<CodeT> code,
- Address off_heap_entry) {
+Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
+ Address off_heap_entry) {
CHECK_NOT_NULL(isolate()->embedded_blob_code());
CHECK_NE(0, isolate()->embedded_blob_code_size());
CHECK(Builtins::IsIsolateIndependentBuiltin(*code));
-#ifdef V8_EXTERNAL_CODE_SPACE
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- const int no_flags = 0;
- Handle<CodeDataContainer> code_data_container =
- NewCodeDataContainer(no_flags, AllocationType::kOld);
-
- const bool set_is_off_heap_trampoline = true;
- code_data_container->initialize_flags(code->kind(), code->builtin_id(),
- code->is_turbofanned(),
- set_is_off_heap_trampoline);
- code_data_container->set_kind_specific_flags(
- code->kind_specific_flags(kRelaxedLoad), kRelaxedStore);
- code_data_container->set_code_entry_point(isolate(),
- code->code_entry_point());
- return Handle<CodeT>::cast(code_data_container);
- }
-#endif // V8_EXTERNAL_CODE_SPACE
-
- bool generate_jump_to_instruction_stream =
- Builtins::CodeObjectIsExecutable(code->builtin_id());
- Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
- isolate(), off_heap_entry,
- CodeDataContainerFromCodeT(*code).kind_specific_flags(kRelaxedLoad),
- generate_jump_to_instruction_stream);
-
- // Trampolines may not contain any metadata since all metadata offsets,
- // stored on the Code object, refer to the off-heap metadata area.
- CHECK_EQ(result->raw_metadata_size(), 0);
-
- // The CodeDataContainer should not be modified beyond this point since it's
- // now possibly canonicalized.
-
- // The trampoline code object must inherit specific flags from the original
- // builtin (e.g. the safepoint-table offset). We set them manually here.
- {
- DisallowGarbageCollection no_gc;
- CodePageMemoryModificationScope code_allocation(*result);
- Code raw_code = FromCodeT(*code);
- Code raw_result = *result;
-
- const bool set_is_off_heap_trampoline = true;
- raw_result.initialize_flags(raw_code.kind(), raw_code.is_turbofanned(),
- raw_code.stack_slots(),
- set_is_off_heap_trampoline);
- raw_result.set_builtin_id(raw_code.builtin_id());
- raw_result.set_handler_table_offset(raw_code.handler_table_offset());
- raw_result.set_constant_pool_offset(raw_code.constant_pool_offset());
- raw_result.set_code_comments_offset(raw_code.code_comments_offset());
- raw_result.set_unwinding_info_offset(raw_code.unwinding_info_offset());
-
- // Replace the newly generated trampoline's RelocInfo ByteArray with the
- // canonical one stored in the roots to avoid duplicating it for every
- // single builtin.
- ByteArray canonical_reloc_info =
- generate_jump_to_instruction_stream
- ? read_only_roots().off_heap_trampoline_relocation_info()
- : read_only_roots().empty_byte_array();
-#ifdef DEBUG
- // Verify that the contents are the same.
- ByteArray reloc_info = raw_result.relocation_info();
- DCHECK_EQ(reloc_info.length(), canonical_reloc_info.length());
- for (int i = 0; i < reloc_info.length(); ++i) {
- DCHECK_EQ(reloc_info.get(i), canonical_reloc_info.get(i));
- }
-#endif
- raw_result.set_relocation_info(canonical_reloc_info);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CodeDataContainer code_data_container =
- raw_result.code_data_container(kAcquireLoad);
- // Updating flags (in particular is_off_heap_trampoline one) might change
- // the value of the instruction start, so update it here.
- code_data_container.UpdateCodeEntryPoint(isolate(), raw_result);
- // Also update flag values cached on the code data container.
- code_data_container.initialize_flags(
- raw_code.kind(), raw_code.builtin_id(), raw_code.is_turbofanned(),
- set_is_off_heap_trampoline);
- }
- }
-
- return ToCodeT(result, isolate());
+#if !defined(V8_SHORT_BUILTIN_CALLS) || \
+ defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ // Builtins have a single unique shared entry point per process. The
+ // embedded builtins region may be remapped into the process-wide code
+ // range, but that happens before RO space is deserialized. Their Code
+ // objects can be shared in RO space.
+ const AllocationType allocation_type = AllocationType::kReadOnly;
+#else
+ // Builtins may be remapped more than once per process and thus their
+ // Code objects cannot be shared.
+ const AllocationType allocation_type = AllocationType::kOld;
+#endif // !defined(V8_SHORT_BUILTIN_CALLS) ||
+ // defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+
+ NewCodeOptions new_code_options = {
+ /*kind=*/code->kind(),
+ /*builtin=*/code->builtin_id(),
+ /*is_turbofanned=*/code->is_turbofanned(),
+ /*stack_slots=*/code->stack_slots(),
+ /*kind_specific_flags=*/code->kind_specific_flags(kRelaxedLoad),
+ /*allocation=*/allocation_type,
+ /*instruction_size=*/code->instruction_size(),
+ /*metadata_size=*/code->metadata_size(),
+ /*inlined_bytecode_size=*/code->inlined_bytecode_size(),
+ /*osr_offset=*/code->osr_offset(),
+ /*handler_table_offset=*/code->handler_table_offset(),
+ /*constant_pool_offset=*/code->constant_pool_offset(),
+ /*code_comments_offset=*/code->code_comments_offset(),
+ /*unwinding_info_offset=*/code->unwinding_info_offset(),
+ /*reloc_info=*/
+ Handle<ByteArray>(read_only_roots().empty_byte_array(), isolate()),
+ /*bytecode_or_deoptimization_data=*/
+ Handle<FixedArray>(read_only_roots().empty_fixed_array(), isolate()),
+ /*bytecode_offsets_or_source_position_table=*/
+ Handle<ByteArray>(read_only_roots().empty_byte_array(), isolate())};
+
+ Handle<Code> off_heap_trampoline = NewCode(new_code_options);
+ off_heap_trampoline->set_code_entry_point(isolate(),
+ code->code_entry_point());
+
+ DCHECK_EQ(code->instruction_size(), code->OffHeapInstructionSize());
+ DCHECK_EQ(code->metadata_size(), code->OffHeapMetadataSize());
+ DCHECK_EQ(code->inlined_bytecode_size(), 0);
+ DCHECK_EQ(code->osr_offset(), BytecodeOffset::None());
+
+ return off_heap_trampoline;
}
Handle<BytecodeArray> Factory::CopyBytecodeArray(Handle<BytecodeArray> source) {
@@ -2722,7 +2700,7 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
Handle<AllocationSite> allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
- DCHECK(!InstanceTypeChecker::IsJSFunction((map->instance_type())));
+ DCHECK(!InstanceTypeChecker::IsJSFunction(*map));
// Both types of global objects should be allocated using
// AllocateGlobalObject to be properly initialized.
@@ -2847,7 +2825,8 @@ Handle<JSArray> Factory::NewJSArrayForTemplateLiteralArray(
Handle<JSArray> raw_object =
NewJSArrayWithElements(raw_strings, PACKED_ELEMENTS,
raw_strings->length(), AllocationType::kOld);
- JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
+ JSObject::SetIntegrityLevel(isolate(), raw_object, FROZEN, kThrowOnError)
+ .ToChecked();
Handle<NativeContext> native_context = isolate()->native_context();
Handle<TemplateLiteralObject> template_object =
@@ -3037,9 +3016,13 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
std::shared_ptr<BackingStore> backing_store, AllocationType allocation) {
Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
isolate());
+ ResizableFlag resizable_by_js = ResizableFlag::kNotResizable;
+ if (v8_flags.harmony_rab_gsab && backing_store->is_resizable_by_js()) {
+ resizable_by_js = ResizableFlag::kResizable;
+ }
auto result =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
- result->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ result->Setup(SharedFlag::kNotShared, resizable_by_js,
std::move(backing_store), isolate());
return result;
}
@@ -3047,18 +3030,42 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
size_t byte_length, InitializedFlag initialized,
AllocationType allocation) {
+ return NewJSArrayBufferAndBackingStore(byte_length, byte_length, initialized,
+ ResizableFlag::kNotResizable,
+ allocation);
+}
+
+MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
+ size_t byte_length, size_t max_byte_length, InitializedFlag initialized,
+ ResizableFlag resizable, AllocationType allocation) {
+ DCHECK_LE(byte_length, max_byte_length);
std::unique_ptr<BackingStore> backing_store = nullptr;
- if (byte_length > 0) {
- backing_store = BackingStore::Allocate(isolate(), byte_length,
- SharedFlag::kNotShared, initialized);
+ if (resizable == ResizableFlag::kResizable) {
+ size_t page_size, initial_pages, max_pages;
+ if (JSArrayBuffer::GetResizableBackingStorePageConfiguration(
+ isolate(), byte_length, max_byte_length, kDontThrow, &page_size,
+ &initial_pages, &max_pages)
+ .IsNothing()) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+
+ backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
+ isolate(), byte_length, max_byte_length, page_size, initial_pages,
+ max_pages, WasmMemoryFlag::kNotWasm, SharedFlag::kNotShared);
if (!backing_store) return MaybeHandle<JSArrayBuffer>();
+ } else {
+ if (byte_length > 0) {
+ backing_store = BackingStore::Allocate(
+ isolate(), byte_length, SharedFlag::kNotShared, initialized);
+ if (!backing_store) return MaybeHandle<JSArrayBuffer>();
+ }
}
Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
isolate());
auto array_buffer =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
- array_buffer->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ array_buffer->Setup(SharedFlag::kNotShared, resizable,
std::move(backing_store), isolate());
return array_buffer;
}
@@ -3166,21 +3173,41 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
- size_t byte_offset,
- size_t length) {
+ size_t byte_offset, size_t length,
+ bool is_length_tracking) {
size_t element_size;
ElementsKind elements_kind;
JSTypedArray::ForFixedTypedArray(type, &element_size, &elements_kind);
+
+ CHECK_IMPLIES(is_length_tracking, v8_flags.harmony_rab_gsab);
+ const bool is_backed_by_rab =
+ buffer->is_resizable_by_js() && !buffer->is_shared();
+
+ Handle<Map> map;
+ if (is_backed_by_rab || is_length_tracking) {
+ map = handle(
+ isolate()->raw_native_context().TypedArrayElementsKindToRabGsabCtorMap(
+ elements_kind),
+ isolate());
+ } else {
+ map =
+ handle(isolate()->raw_native_context().TypedArrayElementsKindToCtorMap(
+ elements_kind),
+ isolate());
+ }
+
+ if (is_length_tracking) {
+ // Security: enforce the invariant that length-tracking TypedArrays have
+ // their length and byte_length set to 0.
+ length = 0;
+ }
+
size_t byte_length = length * element_size;
CHECK_LE(length, JSTypedArray::kMaxLength);
CHECK_EQ(length, byte_length / element_size);
CHECK_EQ(0, byte_offset % ElementsKindToByteSize(elements_kind));
- Handle<Map> map(
- isolate()->raw_native_context().TypedArrayElementsKindToCtorMap(
- elements_kind),
- isolate());
Handle<JSTypedArray> typed_array =
Handle<JSTypedArray>::cast(NewJSArrayBufferView(
map, empty_byte_array(), buffer, byte_offset, byte_length));
@@ -3188,25 +3215,36 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
DisallowGarbageCollection no_gc;
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
- raw.set_is_length_tracking(false);
- raw.set_is_backed_by_rab(!buffer->is_shared() &&
- buffer->is_resizable_by_js());
+ raw.set_is_length_tracking(is_length_tracking);
+ raw.set_is_backed_by_rab(is_backed_by_rab);
return typed_array;
}
-Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
- size_t byte_offset,
- size_t byte_length) {
- Handle<Map> map(isolate()->native_context()->data_view_fun().initial_map(),
- isolate());
- Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
- map, empty_fixed_array(), buffer, byte_offset, byte_length));
+Handle<JSDataViewOrRabGsabDataView> Factory::NewJSDataViewOrRabGsabDataView(
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
+ bool is_length_tracking) {
+ CHECK_IMPLIES(is_length_tracking, v8_flags.harmony_rab_gsab);
+ if (is_length_tracking) {
+ // Security: enforce the invariant that length-tracking DataViews have their
+ // byte_length set to 0.
+ byte_length = 0;
+ }
+ bool is_backed_by_rab = !buffer->is_shared() && buffer->is_resizable_by_js();
+ Handle<Map> map;
+ if (is_backed_by_rab || is_length_tracking) {
+ map = handle(isolate()->native_context()->js_rab_gsab_data_view_map(),
+ isolate());
+ } else {
+ map = handle(isolate()->native_context()->data_view_fun().initial_map(),
+ isolate());
+ }
+ Handle<JSDataViewOrRabGsabDataView> obj =
+ Handle<JSDataViewOrRabGsabDataView>::cast(NewJSArrayBufferView(
+ map, empty_fixed_array(), buffer, byte_offset, byte_length));
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- // TODO(v8:11111): Support creating length tracking DataViews via the API.
- obj->set_is_length_tracking(false);
- obj->set_is_backed_by_rab(!buffer->is_shared() &&
- buffer->is_resizable_by_js());
+ obj->set_is_length_tracking(is_length_tracking);
+ obj->set_is_backed_by_rab(is_backed_by_rab);
return obj;
}
@@ -3214,8 +3252,8 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
Handle<JSReceiver> target_function, Handle<Object> bound_this,
base::Vector<Handle<Object>> bound_args) {
DCHECK(target_function->IsCallable());
- static_assert(Code::kMaxArguments <= FixedArray::kMaxLength);
- if (bound_args.length() >= Code::kMaxArguments) {
+ static_assert(InstructionStream::kMaxArguments <= FixedArray::kMaxLength);
+ if (bound_args.length() >= InstructionStream::kMaxArguments) {
THROW_NEW_ERROR(isolate(),
NewRangeError(MessageTemplate::kTooManyArguments),
JSBoundFunction);
@@ -3375,22 +3413,14 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
MaybeHandle<String> maybe_name,
Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind) {
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
- maybe_name, function_template_info, Builtin::kNoBuiltinId, kind);
- return shared;
+ return NewSharedFunctionInfo(maybe_name, function_template_info,
+ Builtin::kNoBuiltinId, kind);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> maybe_name, Builtin builtin, FunctionKind kind) {
- Handle<SharedFunctionInfo> shared =
- NewSharedFunctionInfo(maybe_name, MaybeHandle<Code>(), builtin, kind);
- return shared;
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWebSnapshot() {
- return NewSharedFunctionInfo(empty_string(), MaybeHandle<Code>(),
- Builtin::kNoBuiltinId,
- FunctionKind::kNormalFunction);
+ return NewSharedFunctionInfo(maybe_name, MaybeHandle<HeapObject>(), builtin,
+ kind);
}
int Factory::NumberToStringCacheHash(Smi number) {
@@ -3994,19 +4024,25 @@ Handle<JSFunction> Factory::NewFunctionForTesting(Handle<String> name) {
Handle<JSSharedStruct> Factory::NewJSSharedStruct(
Handle<JSFunction> constructor) {
SharedObjectSafePublishGuard publish_guard;
- Handle<JSSharedStruct> instance = Handle<JSSharedStruct>::cast(
- NewJSObject(constructor, AllocationType::kSharedOld));
- Handle<Map> instance_map(instance->map(), isolate());
- if (instance_map->HasOutOfObjectProperties()) {
- int num_oob_fields =
- instance_map->NumberOfFields(ConcurrencyMode::kSynchronous) -
- instance_map->GetInObjectProperties();
- Handle<PropertyArray> property_array =
+ Handle<Map> instance_map(constructor->initial_map(), isolate());
+ Handle<PropertyArray> property_array;
+ const int num_oob_fields =
+ instance_map->NumberOfFields(ConcurrencyMode::kSynchronous) -
+ instance_map->GetInObjectProperties();
+ if (num_oob_fields > 0) {
+ property_array =
NewPropertyArray(num_oob_fields, AllocationType::kSharedOld);
- instance->SetProperties(*property_array);
}
+ Handle<JSSharedStruct> instance = Handle<JSSharedStruct>::cast(
+ NewJSObject(constructor, AllocationType::kSharedOld));
+
+ // The struct object has not been fully initialized yet. Disallow allocation
+ // from this point on.
+ DisallowGarbageCollection no_gc;
+ if (!property_array.is_null()) instance->SetProperties(*property_array);
+
return instance;
}
@@ -4018,6 +4054,10 @@ Handle<JSSharedArray> Factory::NewJSSharedArray(Handle<JSFunction> constructor,
Handle<JSSharedArray> instance = Handle<JSSharedArray>::cast(
NewJSObject(constructor, AllocationType::kSharedOld));
instance->set_elements(*storage);
+ FieldIndex index = FieldIndex::ForDescriptor(
+ constructor->initial_map(),
+ InternalIndex(JSSharedArray::kLengthFieldIndex));
+ instance->FastPropertyAtPut(index, Smi::FromInt(length), SKIP_WRITE_BARRIER);
return instance;
}
@@ -4049,7 +4089,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
PrepareMap();
PrepareFeedbackCell();
- Handle<CodeT> code = handle(sfi_->GetCode(), isolate_);
+ Handle<Code> code = handle(sfi_->GetCode(isolate_), isolate_);
Handle<JSFunction> result = BuildRaw(code);
if (code->kind() == CodeKind::BASELINE) {
@@ -4061,14 +4101,14 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
return result;
}
-Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<CodeT> code) {
+Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
Isolate* isolate = isolate_;
Factory* factory = isolate_->factory();
Handle<Map> map = maybe_map_.ToHandleChecked();
Handle<FeedbackCell> feedback_cell = maybe_feedback_cell_.ToHandleChecked();
- DCHECK(InstanceTypeChecker::IsJSFunction(map->instance_type()));
+ DCHECK(InstanceTypeChecker::IsJSFunction(*map));
// Allocation.
JSFunction function = JSFunction::cast(factory->New(map, allocation_type_));
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 8b7738cad8..3890807e1a 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -148,7 +148,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// values.
Handle<FeedbackVector> NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array);
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ Handle<FeedbackCell> parent_feedback_cell);
// Allocates a clean embedder data array with given capacity.
Handle<EmbedderDataArray> NewEmbedderDataArray(int length);
@@ -449,7 +450,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Foreign> NewForeign(
Address addr, AllocationType allocation_type = AllocationType::kYoung);
- Handle<Cell> NewCell(Handle<Object> value);
+ Handle<Cell> NewCell(Smi value);
+ Handle<Cell> NewCell();
Handle<PropertyCell> NewPropertyCell(
Handle<Name> name, PropertyDetails details, Handle<Object> value,
@@ -635,13 +637,13 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> rtt);
Handle<WasmCapiFunctionData> NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<CodeT> wrapper_code, Handle<Map> rtt,
+ Handle<Code> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig);
Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
- Handle<CodeT> export_wrapper, Handle<WasmInstanceObject> instance,
+ Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
- const wasm::FunctionSig* sig, int wrapper_budget, Handle<Map> rtt,
- wasm::Promise promise);
+ const wasm::FunctionSig* sig, uint32_t canonical_type_index,
+ int wrapper_budget, Handle<Map> rtt, wasm::Promise promise);
Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(
Handle<JSReceiver> callable, wasm::Suspend suspend,
Handle<WasmInstanceObject> instance);
@@ -650,7 +652,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<WasmJSFunctionData> NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<CodeT> wrapper_code, Handle<Map> rtt, wasm::Suspend suspend,
+ Handle<Code> wrapper_code, Handle<Map> rtt, wasm::Suspend suspend,
wasm::Promise promise);
Handle<WasmResumeData> NewWasmResumeData(
Handle<WasmSuspenderObject> suspender, wasm::OnResume on_resume);
@@ -667,7 +669,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Returns a handle to a WasmArray if successful, or a Smi containing a
// {MessageTemplate} if computing the array's elements leads to an error.
Handle<Object> NewWasmArrayFromElementSegment(
- Handle<WasmInstanceObject> instance, const wasm::WasmElemSegment* segment,
+ Handle<WasmInstanceObject> instance, uint32_t segment_index,
uint32_t start_offset, uint32_t length, Handle<Map> map);
Handle<WasmContinuationObject> NewWasmContinuationObject(
Address jmpbuf, Handle<Foreign> managed_stack, Handle<HeapObject> parent,
@@ -696,6 +698,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
size_t byte_length, InitializedFlag initialized,
AllocationType allocation = AllocationType::kYoung);
+ MaybeHandle<JSArrayBuffer> NewJSArrayBufferAndBackingStore(
+ size_t byte_length, size_t max_byte_length, InitializedFlag initialized,
+ ResizableFlag resizable = ResizableFlag::kNotResizable,
+ AllocationType allocation = AllocationType::kYoung);
+
Handle<JSArrayBuffer> NewJSSharedArrayBuffer(
std::shared_ptr<BackingStore> backing_store);
@@ -706,10 +713,12 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Creates a new JSTypedArray with the specified buffer.
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t length);
+ size_t byte_offset, size_t length,
+ bool is_length_tracking = false);
- Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t byte_length);
+ Handle<JSDataViewOrRabGsabDataView> NewJSDataViewOrRabGsabDataView(
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
+ bool is_length_tracking = false);
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
@@ -746,8 +755,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
- Handle<CodeT> NewOffHeapTrampolineFor(Handle<CodeT> code,
- Address off_heap_entry);
+ Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
+ Address off_heap_entry);
Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
@@ -801,8 +810,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<String> name, Builtin builtin,
FunctionKind kind = FunctionKind::kNormalFunction);
- Handle<SharedFunctionInfo> NewSharedFunctionInfoForWebSnapshot();
-
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
return (function_mode & kWithPrototypeBits) != 0;
}
@@ -910,7 +917,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
void PrepareMap();
void PrepareFeedbackCell();
- V8_WARN_UNUSED_RESULT Handle<JSFunction> BuildRaw(Handle<CodeT> code);
+ V8_WARN_UNUSED_RESULT Handle<JSFunction> BuildRaw(Handle<Code> code);
Isolate* const isolate_;
Handle<SharedFunctionInfo> sfi_;
@@ -922,8 +929,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
friend class Factory;
};
- // Allows creation of Code objects. It provides two build methods, one of
- // which tries to gracefully handle allocation failure.
+ // Allows creation of InstructionStream objects. It provides two build
+ // methods, one of which tries to gracefully handle allocation failure.
class V8_EXPORT_PRIVATE CodeBuilder final {
public:
CodeBuilder(Isolate* isolate, const CodeDesc& desc, CodeKind kind);
@@ -933,15 +940,16 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
CodeKind kind);
// Builds a new code object (fully initialized). All header fields of the
- // returned object are immutable and the code object is write protected.
+ // associated InstructionStream are immutable and the InstructionStream
+ // object is write protected.
V8_WARN_UNUSED_RESULT Handle<Code> Build();
// Like Build, builds a new code object. May return an empty handle if the
// allocation fails.
V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryBuild();
// Sets the self-reference object in which a reference to the code object is
- // stored. This allows generated code to reference its own Code object by
- // using this handle.
+ // stored. This allows generated code to reference its own InstructionStream
+ // object by using this handle.
CodeBuilder& set_self_reference(Handle<Object> self_reference) {
DCHECK(!self_reference.is_null());
self_reference_ = self_reference;
@@ -998,21 +1006,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
- CodeBuilder& set_is_executable(bool executable) {
- DCHECK_EQ(kind_, CodeKind::BUILTIN);
- is_executable_ = executable;
- return *this;
- }
-
- // Indicates the CodeDataContainer should be allocated in read-only space.
- // As an optimization, if the kind-specific flags match that of a canonical
- // container, it will be used instead.
- CodeBuilder& set_read_only_data_container(bool read_only) {
- CHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !read_only);
- read_only_data_container_ = read_only;
- return *this;
- }
-
CodeBuilder& set_kind_specific_flags(int32_t flags) {
kind_specific_flags_ = flags;
return *this;
@@ -1032,8 +1025,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
- MaybeHandle<Code> AllocateCode(bool retry_allocation_or_fail);
- MaybeHandle<Code> AllocateConcurrentSparkplugCode(
+ MaybeHandle<InstructionStream> AllocateInstructionStream(
+ bool retry_allocation_or_fail);
+ MaybeHandle<InstructionStream> AllocateConcurrentSparkplugInstructionStream(
bool retry_allocation_or_fail);
Isolate* const isolate_;
@@ -1053,15 +1047,12 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
DeoptimizationData::Empty(isolate_);
Handle<HeapObject> interpreter_data_;
BasicBlockProfilerData* profiler_data_ = nullptr;
- bool is_executable_ = true;
- bool read_only_data_container_ = false;
bool is_turbofanned_ = false;
int stack_slots_ = 0;
};
private:
friend class FactoryBase<Factory>;
- friend class WebSnapshotDeserializer;
// ------
// Customization points for FactoryBase
diff --git a/deps/v8/src/heap/free-list.h b/deps/v8/src/heap/free-list.h
index 8e205203f0..147c93e56c 100644
--- a/deps/v8/src/heap/free-list.h
+++ b/deps/v8/src/heap/free-list.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
+#include "src/heap/allocation-result.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/free-space.h"
#include "src/objects/map.h"
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 2d1e0dafad..db450fc373 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -409,6 +409,7 @@ void GCTracer::UpdateStatistics(GarbageCollector collector) {
if (V8_UNLIKELY(TracingFlags::gc.load(std::memory_order_relaxed) &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, "V8.GC_HEAP_DUMP_STATISTICS");
std::stringstream heap_stats;
heap_->DumpJSONHeapStatistics(heap_stats);
@@ -517,20 +518,26 @@ void GCTracer::StopYoungCycleIfNeeded() {
}
void GCTracer::NotifyFullSweepingCompleted() {
+ // Notifying twice that V8 sweeping is finished for the same cycle is possible
+ // only if Oilpan sweeping is still in progress.
+ DCHECK_IMPLIES(notified_full_sweeping_completed_,
+ !notified_full_cppgc_completed_);
+
if (Event::IsYoungGenerationEvent(current_.type)) {
- bool was_young_gc_while_full_gc_ = young_gc_while_full_gc_;
+ bool was_young_gc_while_full_gc = young_gc_while_full_gc_;
+ bool was_full_sweeping_notified = notified_full_sweeping_completed_;
NotifyYoungSweepingCompleted();
- if (!was_young_gc_while_full_gc_) return;
+ // NotifyYoungSweepingCompleted checks if the full cycle needs to be stopped
+ // as well. If full sweeping was already notified, nothing more needs to be
+ // done here.
+ if (!was_young_gc_while_full_gc || was_full_sweeping_notified) return;
}
+
DCHECK(!Event::IsYoungGenerationEvent(current_.type));
- if (v8_flags.verify_heap) {
- // If heap verification is enabled, sweeping finalization can also be
- // triggered from inside a full GC cycle's atomic pause.
- DCHECK(current_.state == Event::State::SWEEPING ||
- current_.state == Event::State::ATOMIC);
- } else {
- DCHECK(IsSweepingInProgress());
- }
+ // Sweeping finalization can also be triggered from inside a full GC cycle's
+ // atomic pause.
+ DCHECK(current_.state == Event::State::SWEEPING ||
+ current_.state == Event::State::ATOMIC);
// Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
// finished sweeping. This method is invoked by v8.
@@ -544,10 +551,6 @@ void GCTracer::NotifyFullSweepingCompleted() {
heap_->old_space()->PrintAllocationsOrigins();
heap_->code_space()->PrintAllocationsOrigins();
}
- // Notifying twice that V8 sweeping is finished for the same cycle is possible
- // only if Oilpan sweeping is still in progress.
- DCHECK_IMPLIES(notified_full_sweeping_completed_,
- notified_full_cppgc_completed_);
notified_full_sweeping_completed_ = true;
StopFullCycleIfNeeded();
}
@@ -727,12 +730,11 @@ void GCTracer::Print() const {
Output(
"[%d:%p] "
"%8.0f ms: "
- "%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
- "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s; %s\n",
+ "%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
+ "%.2f / %.2f ms %s (average mu = %.3f, current mu = %.3f) %s; %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
- heap_->isolate()->time_millis_since_init(),
- heap_->IsShared() ? "Shared " : "", current_.TypeName(false),
+ heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
current_.reduce_memory ? " (reduce)" : "",
static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB,
@@ -812,7 +814,7 @@ void GCTracer::PrintNVP() const {
current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
current_scope(Scope::HEAP_PROLOGUE),
current_scope(Scope::HEAP_EPILOGUE),
- current_scope(Scope::HEAP_EPILOGUE_ADJUST_NEW_SPACE),
+ current_scope(Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE),
current_scope(Scope::HEAP_EXTERNAL_PROLOGUE),
current_scope(Scope::HEAP_EXTERNAL_EPILOGUE),
current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES),
@@ -858,28 +860,24 @@ void GCTracer::PrintNVP() const {
"mark.seed=%.2f "
"mark.closure_parallel=%.2f "
"mark.closure=%.2f "
- "mark.global_handles=%.2f "
"clear=%.2f "
+ "clear.string_forwarding_table=%.2f "
"clear.string_table=%.2f "
+ "clear.global_handles=%.2f "
"complete.sweep_array_buffers=%.2f "
- "evacuate=%.2f "
- "evacuate.clean_up=%.2f "
- "evacuate.copy=%.2f "
- "evacuate.prologue=%.2f "
- "evacuate.epilogue=%.2f "
- "evacuate.rebalance=%.2f "
- "evacuate.update_pointers=%.2f "
- "evacuate.update_pointers.slots=%.2f "
- "evacuate.update_pointers.weak=%.2f "
+ "complete.sweeping=%.2f "
"sweep=%.2f "
"sweep.new=%.2f "
"sweep.new_lo=%.2f "
+ "sweep.update_string_table=%.2f "
+ "sweep.start_jobs=%.2f "
+ "sweep.array_buffers=%.2f "
"finish=%.2f "
- "finish.sweep_array_buffers=%.2f "
+ "finish.ensure_capacity=%.2f "
"background.mark=%.2f "
"background.sweep=%.2f "
+ "background.sweep.array_buffers=%.2f "
"background.evacuate.copy=%.2f "
- "background.evacuate.update_pointers=%.2f "
"background.unmapper=%.2f "
"unmapper=%.2f "
"total_size_before=%zu "
@@ -906,28 +904,24 @@ void GCTracer::PrintNVP() const {
current_scope(Scope::MINOR_MC_MARK_SEED),
current_scope(Scope::MINOR_MC_MARK_CLOSURE_PARALLEL),
current_scope(Scope::MINOR_MC_MARK_CLOSURE),
- current_scope(Scope::MINOR_MC_MARK_GLOBAL_HANDLES),
current_scope(Scope::MINOR_MC_CLEAR),
+ current_scope(Scope::MINOR_MC_CLEAR_STRING_FORWARDING_TABLE),
current_scope(Scope::MINOR_MC_CLEAR_STRING_TABLE),
+ current_scope(Scope::MINOR_MC_CLEAR_WEAK_GLOBAL_HANDLES),
current_scope(Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS),
- current_scope(Scope::MINOR_MC_EVACUATE),
- current_scope(Scope::MINOR_MC_EVACUATE_CLEAN_UP),
- current_scope(Scope::MINOR_MC_EVACUATE_COPY),
- current_scope(Scope::MINOR_MC_EVACUATE_PROLOGUE),
- current_scope(Scope::MINOR_MC_EVACUATE_EPILOGUE),
- current_scope(Scope::MINOR_MC_EVACUATE_REBALANCE),
- current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS),
- current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS),
- current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK),
+ current_scope(Scope::MINOR_MC_COMPLETE_SWEEPING),
current_scope(Scope::MINOR_MC_SWEEP),
current_scope(Scope::MINOR_MC_SWEEP_NEW),
current_scope(Scope::MINOR_MC_SWEEP_NEW_LO),
+ current_scope(Scope::MINOR_MC_SWEEP_UPDATE_STRING_TABLE),
+ current_scope(Scope::MINOR_MC_SWEEP_START_JOBS),
+ current_scope(Scope::YOUNG_ARRAY_BUFFER_SWEEP),
current_scope(Scope::MINOR_MC_FINISH),
- current_scope(Scope::MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS),
+ current_scope(Scope::MINOR_MC_FINISH_ENSURE_CAPACITY),
current_scope(Scope::MINOR_MC_BACKGROUND_MARKING),
current_scope(Scope::MINOR_MC_BACKGROUND_SWEEPING),
+ current_scope(Scope::BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP),
current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY),
- current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS),
current_scope(Scope::BACKGROUND_UNMAPPER),
current_scope(Scope::UNMAPPER), current_.start_object_size,
current_.end_object_size, current_.start_holes_size,
@@ -967,6 +961,7 @@ void GCTracer::PrintNVP() const {
"clear.weak_references=%.1f "
"clear.join_job=%.1f "
"complete.sweep_array_buffers=%.1f "
+ "complete.sweeping=%.1f "
"epilogue=%.1f "
"evacuate=%.1f "
"evacuate.candidates=%.1f "
@@ -1039,7 +1034,7 @@ void GCTracer::PrintNVP() const {
current_scope(Scope::HEAP_PROLOGUE),
current_scope(Scope::HEAP_EMBEDDER_TRACING_EPILOGUE),
current_scope(Scope::HEAP_EPILOGUE),
- current_scope(Scope::HEAP_EPILOGUE_ADJUST_NEW_SPACE),
+ current_scope(Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE),
current_scope(Scope::HEAP_EXTERNAL_PROLOGUE),
current_scope(Scope::HEAP_EXTERNAL_EPILOGUE),
current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES),
@@ -1055,6 +1050,7 @@ void GCTracer::PrintNVP() const {
current_scope(Scope::MC_CLEAR_WEAK_REFERENCES),
current_scope(Scope::MC_CLEAR_JOIN_JOB),
current_scope(Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS),
+ current_scope(Scope::MC_COMPLETE_SWEEPING),
current_scope(Scope::MC_EPILOGUE), current_scope(Scope::MC_EVACUATE),
current_scope(Scope::MC_EVACUATE_CANDIDATES),
current_scope(Scope::MC_EVACUATE_CLEAN_UP),
@@ -1355,10 +1351,6 @@ void GCTracer::NotifyIncrementalMarkingStart() {
void GCTracer::FetchBackgroundMarkCompactCounters() {
FetchBackgroundCounters(Scope::FIRST_MC_BACKGROUND_SCOPE,
Scope::LAST_MC_BACKGROUND_SCOPE);
- heap_->isolate()->counters()->background_marking()->AddSample(
- static_cast<int>(current_.scopes[Scope::MC_BACKGROUND_MARKING]));
- heap_->isolate()->counters()->background_sweeping()->AddSample(
- static_cast<int>(current_.scopes[Scope::MC_BACKGROUND_SWEEPING]));
}
void GCTracer::FetchBackgroundMinorGCCounters() {
@@ -1765,8 +1757,7 @@ void GCTracer::ReportYoungCycleToRecorder() {
current_.scopes[Scope::MINOR_MARK_COMPACTOR] +
current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL] +
current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY] +
- current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING] +
- current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]) *
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING]) *
base::Time::kMicrosecondsPerMillisecond;
// TODO(chromium:1154636): Consider adding BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
// (both for the case of the scavenger and the minor mark-compactor), and
@@ -1811,5 +1802,20 @@ void GCTracer::ReportYoungCycleToRecorder() {
recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
}
+GarbageCollector GCTracer::GetCurrentCollector() const {
+ switch (current_.type) {
+ case Event::Type::SCAVENGER:
+ return GarbageCollector::SCAVENGER;
+ case Event::Type::MARK_COMPACTOR:
+ case Event::Type::INCREMENTAL_MARK_COMPACTOR:
+ return GarbageCollector::MARK_COMPACTOR;
+ case Event::Type::MINOR_MARK_COMPACTOR:
+ case Event::Type::INCREMENTAL_MINOR_MARK_COMPACTOR:
+ return GarbageCollector::MINOR_MARK_COMPACTOR;
+ case Event::Type::START:
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 753e5d820b..e6d34cb367 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -347,7 +347,7 @@ class V8_EXPORT_PRIVATE GCTracer {
double time_ms = 0) const;
// Allocation throughput in the embedder in bytes/millisecond in the
- // last time_ms milliseconds. Reported through v8::EmbedderHeapTracer.
+ // last time_ms milliseconds.
// Returns 0 if no allocation events have been recorded.
double EmbedderAllocationThroughputInBytesPerMillisecond(
double time_ms = 0) const;
@@ -368,7 +368,7 @@ class V8_EXPORT_PRIVATE GCTracer {
double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
// Allocation throughput in the embedder in bytes/milliseconds in the last
- // kThroughputTimeFrameMs seconds. Reported through v8::EmbedderHeapTracer.
+ // kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const;
@@ -409,6 +409,8 @@ class V8_EXPORT_PRIVATE GCTracer {
return current_.gc_reason == GarbageCollectionReason::kAllocationFailure;
}
+ GarbageCollector GetCurrentCollector() const;
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
diff --git a/deps/v8/src/heap/global-handle-marking-visitor.h b/deps/v8/src/heap/global-handle-marking-visitor.h
deleted file mode 100644
index 9c1b1a1b34..0000000000
--- a/deps/v8/src/heap/global-handle-marking-visitor.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_GLOBAL_HANDLE_MARKING_VISITOR_H_
-#define V8_HEAP_GLOBAL_HANDLE_MARKING_VISITOR_H_
-
-#include "src/handles/traced-handles.h"
-#include "src/heap/base/stack.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-
-namespace v8 {
-namespace internal {
-
-// Root marking visitor for conservatively marking traced global handles.
-// The visitor assumes that on-stack pointers may point into global handle nodes
-// which requires them to be kept alive.
-class GlobalHandleMarkingVisitor final : public ::heap::base::StackVisitor {
- public:
- GlobalHandleMarkingVisitor(Heap&, MarkingWorklists::Local&);
- ~GlobalHandleMarkingVisitor() override = default;
-
- void VisitPointer(const void*) override;
-
- private:
- Heap& heap_;
- MarkingState& marking_state_;
- MarkingWorklists::Local& local_marking_worklist_;
- const TracedHandles::NodeBounds traced_node_bounds_;
-};
-
-#endif // V8_HEAP_GLOBAL_HANDLE_MARKING_VISITOR_H_
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/heap-allocator-inl.h b/deps/v8/src/heap/heap-allocator-inl.h
index 77b08f98fd..ad49b902a2 100644
--- a/deps/v8/src/heap/heap-allocator-inl.h
+++ b/deps/v8/src/heap/heap-allocator-inl.h
@@ -15,10 +15,6 @@
#include "src/heap/read-only-spaces.h"
#include "src/heap/third-party/heap-api.h"
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-#include "src/heap/object-start-bitmap-inl.h"
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
namespace v8 {
namespace internal {
@@ -143,14 +139,6 @@ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult HeapAllocator::AllocateRaw(
}
}
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- if (AllocationType::kReadOnly != type) {
- DCHECK_TAG_ALIGNED(object.address());
- Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
- object.address());
- }
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
for (auto& tracker : heap_->allocation_trackers_) {
tracker->AllocationEvent(object.address(), size_in_bytes);
}
diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc
index a6c97324eb..8cdb4f1fd0 100644
--- a/deps/v8/src/heap/heap-allocator.cc
+++ b/deps/v8/src/heap/heap-allocator.cc
@@ -87,11 +87,6 @@ AllocationResult HeapAllocator::AllocateRawWithLightRetrySlowPath(
GarbageCollectionReason::kAllocationFailure);
} else {
AllocationSpace space_to_gc = AllocationTypeToGCSpace(allocation);
- if (v8_flags.minor_mc && i > 0) {
- // Repeated young gen GCs won't have any additional effect. Do a full GC
- // instead.
- space_to_gc = AllocationSpace::OLD_SPACE;
- }
heap_->CollectGarbage(space_to_gc,
GarbageCollectionReason::kAllocationFailure);
}
@@ -117,7 +112,7 @@ AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
// We need always_allocate() to be true both on the client- and
// server-isolate. It is used in both code paths.
AlwaysAllocateScope shared_scope(
- heap_->isolate()->shared_heap_isolate()->heap());
+ heap_->isolate()->shared_space_isolate()->heap());
AlwaysAllocateScope client_scope(heap_);
result = AllocateRaw(size, allocation, origin, alignment);
} else {
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index e232d2c3cf..28ca220af1 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -49,6 +49,7 @@
#include "src/objects/struct-inl.h"
#include "src/objects/visitors-inl.h"
#include "src/profiler/heap-profiler.h"
+#include "src/roots/static-roots.h"
#include "src/strings/string-hasher.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-list-inl.h"
@@ -61,7 +62,7 @@ T ForwardingAddress(T heap_obj) {
MapWord map_word = heap_obj.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
- return T::cast(map_word.ToForwardingAddress());
+ return T::cast(map_word.ToForwardingAddress(heap_obj));
} else if (Heap::InFromPage(heap_obj)) {
DCHECK(!v8_flags.minor_mc);
return T();
@@ -104,9 +105,9 @@ bool Heap::IsMainThread() const {
}
bool Heap::IsSharedMainThread() const {
- if (!isolate()->has_shared_heap()) return false;
- Isolate* shared_heap_isolate = isolate()->shared_heap_isolate();
- return shared_heap_isolate->thread_id() == ThreadId::Current();
+ if (!isolate()->has_shared_space()) return false;
+ Isolate* shared_space_isolate = isolate()->shared_space_isolate();
+ return shared_space_isolate->thread_id() == ThreadId::Current();
}
int64_t Heap::external_memory() { return external_memory_.total(); }
@@ -129,6 +130,24 @@ FixedArray Heap::single_character_string_table() {
Object(roots_table()[RootIndex::kSingleCharacterStringTable]));
}
+#define STATIC_ROOTS_FAILED_MSG \
+ "Read-only heap layout changed. Run `tools/dev/gen-static-roots.py` to " \
+ "update static-roots.h."
+#if V8_STATIC_ROOTS_BOOL
+// Check all read only roots are allocated where we expect it. Skip `Exception`
+// which changes during setup-heap-internal.
+#define DCHECK_STATIC_ROOT(obj, name) \
+ if constexpr (RootsTable::IsReadOnly(RootIndex::k##name) && \
+ RootIndex::k##name != RootIndex::kException) { \
+ DCHECK_WITH_MSG(V8HeapCompressionScheme::CompressObject(obj.ptr()) == \
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>( \
+ RootIndex::k##name)], \
+ STATIC_ROOTS_FAILED_MSG); \
+ }
+#else
+#define DCHECK_STATIC_ROOT(obj, name)
+#endif
+
#define ROOT_ACCESSOR(type, name, CamelName) \
void Heap::set_##name(type value) { \
/* The deserializer makes use of the fact that these common roots are */ \
@@ -137,10 +156,13 @@ FixedArray Heap::single_character_string_table() {
!RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
IsImmovable(HeapObject::cast(value))); \
+ DCHECK_STATIC_ROOT(value, CamelName); \
roots_table()[RootIndex::k##CamelName] = value.ptr(); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+#undef CHECK_STATIC_ROOT
+#undef STATIC_ROOTS_FAILED_MSG
void Heap::SetRootMaterializedObjects(FixedArray objects) {
roots_table()[RootIndex::kMaterializedObjects] = objects.ptr();
@@ -160,12 +182,12 @@ void Heap::SetFunctionsMarkedForManualOptimization(Object hash_table) {
hash_table.ptr();
}
-PagedSpace* Heap::paged_space(int idx) {
+PagedSpace* Heap::paged_space(int idx) const {
DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == SHARED_SPACE);
return static_cast<PagedSpace*>(space_[idx].get());
}
-Space* Heap::space(int idx) { return space_[idx].get(); }
+Space* Heap::space(int idx) const { return space_[idx].get(); }
Address* Heap::NewSpaceAllocationTopAddress() {
return new_space_ ? new_space_->allocation_top_address() : nullptr;
@@ -275,10 +297,10 @@ bool Heap::InYoungGeneration(HeapObject heap_object) {
}
// static
-bool Heap::InSharedWritableHeap(MaybeObject object) {
+bool Heap::InWritableSharedSpace(MaybeObject object) {
HeapObject heap_object;
return object->GetHeapObject(&heap_object) &&
- heap_object.InSharedWritableHeap();
+ heap_object.InWritableSharedSpace();
}
// static
@@ -389,6 +411,10 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
case SHARED_SPACE:
case SHARED_LO_SPACE:
+ // TODO(v8:13267): Ensure that all shared space objects have a memory
+ // barrier after initialization.
+ return false;
+
case RO_SPACE:
UNREACHABLE();
}
@@ -496,9 +522,6 @@ bool Heap::HasDirtyJSFinalizationRegistries() {
return !dirty_js_finalization_registries_list().IsUndefined(isolate());
}
-VerifyPointersVisitor::VerifyPointersVisitor(Heap* heap)
- : ObjectVisitorWithCageBases(heap), heap_(heap) {}
-
AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
heap_->always_allocate_scope_count_++;
}
@@ -507,15 +530,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_--;
}
-OptionalAlwaysAllocateScope::OptionalAlwaysAllocateScope(Heap* heap)
- : heap_(heap) {
- if (heap_) heap_->always_allocate_scope_count_++;
-}
-
-OptionalAlwaysAllocateScope::~OptionalAlwaysAllocateScope() {
- if (heap_) heap_->always_allocate_scope_count_--;
-}
-
AlwaysAllocateScopeForTesting::AlwaysAllocateScopeForTesting(Heap* heap)
: scope_(heap) {}
@@ -603,7 +617,8 @@ CodePageCollectionMemoryModificationScope::
}
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
-CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
+CodePageMemoryModificationScope::CodePageMemoryModificationScope(
+ InstructionStream code)
:
#if V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT || V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
rwx_write_scope_("A part of CodePageMemoryModificationScope"),
@@ -612,7 +627,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
scope_active_(false) {
}
#else
-CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
+CodePageMemoryModificationScope::CodePageMemoryModificationScope(
+ InstructionStream code)
: CodePageMemoryModificationScope(BasicMemoryChunk::FromHeapObject(code)) {}
#endif
diff --git a/deps/v8/src/heap/heap-verifier.cc b/deps/v8/src/heap/heap-verifier.cc
index cedc531241..b550d621e7 100644
--- a/deps/v8/src/heap/heap-verifier.cc
+++ b/deps/v8/src/heap/heap-verifier.cc
@@ -7,8 +7,11 @@
#include "include/v8-locker.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/reloc-info.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
@@ -44,42 +47,252 @@ class VerifySmisVisitor final : public RootVisitor {
}
};
-class HeapVerification final {
+// Visitor class to verify interior pointers in spaces that do not contain
+// or care about inter-generational references. All heap object pointers have to
+// point into the heap to a location that has a map pointer at its first word.
+// Caveat: Heap::Contains is an approximation because it can return true for
+// objects in a heap space but above the allocation pointer.
+class VerifyPointersVisitor : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
- explicit HeapVerification(Heap* heap) : heap_(heap) {}
+ V8_INLINE explicit VerifyPointersVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
+
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override;
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override;
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override;
+ void VisitCodeTarget(RelocInfo* rinfo) override;
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override;
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
+ void VisitRootPointers(Root root, const char* description,
+ OffHeapObjectSlot start,
+ OffHeapObjectSlot end) override;
+ void VisitMapPointer(HeapObject host) override;
+
+ protected:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
+ V8_INLINE void VerifyCodeObjectImpl(HeapObject heap_object);
+
+ template <typename TSlot>
+ V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
+
+ virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end);
+
+ Heap* heap_;
+};
+
+void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) {
+ VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
+}
+
+void VerifyPointersVisitor::VisitPointers(HeapObject host,
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ VerifyPointers(host, start, end);
+}
+
+void VerifyPointersVisitor::VisitCodePointer(Code host, CodeObjectSlot slot) {
+ Object maybe_code = slot.load(code_cage_base());
+ HeapObject code;
+ // The slot might contain smi during Code creation.
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyCodeObjectImpl(code);
+ } else {
+ CHECK(maybe_code.IsSmi());
+ }
+}
+
+void VerifyPointersVisitor::VisitRootPointers(Root root,
+ const char* description,
+ FullObjectSlot start,
+ FullObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+
+void VerifyPointersVisitor::VisitRootPointers(Root root,
+ const char* description,
+ OffHeapObjectSlot start,
+ OffHeapObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+
+void VerifyPointersVisitor::VisitMapPointer(HeapObject host) {
+ VerifyHeapObjectImpl(host.map(cage_base()));
+}
+
+void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK(IsValidHeapObject(heap_, heap_object));
+ CHECK(heap_object.map(cage_base()).IsMap());
+}
+
+void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
+ CHECK(IsValidCodeObject(heap_, heap_object));
+ CHECK(heap_object.map(cage_base()).IsMap());
+ CHECK(heap_object.map(cage_base()).instance_type() ==
+ INSTRUCTION_STREAM_TYPE);
+}
+
+template <typename TSlot>
+void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = slot.load(cage_base());
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
+ } else {
+ CHECK(object.IsSmi() || object.IsCleared() ||
+ MapWord::IsPacked(object.ptr()));
+ }
+ }
+}
+
+void VerifyPointersVisitor::VerifyPointers(HeapObject host,
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ // If this DCHECK fires then you probably added a pointer field
+ // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
+ // this by moving that object to POINTER_VISITOR_ID_LIST.
+ DCHECK_EQ(ObjectFields::kMaybePointers,
+ Map::ObjectFieldsFrom(host.map(cage_base()).visitor_id()));
+ VerifyPointersImpl(start, end);
+}
+
+void VerifyPointersVisitor::VisitCodeTarget(RelocInfo* rinfo) {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+}
+
+void VerifyPointersVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
+}
+
+class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
+ public:
+ explicit VerifyReadOnlyPointersVisitor(Heap* heap)
+ : VerifyPointersVisitor(heap) {}
+
+ protected:
+ void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ if (!host.is_null()) {
+ CHECK(ReadOnlyHeap::Contains(host.map()));
+ }
+ VerifyPointersVisitor::VerifyPointers(host, start, end);
+
+ for (MaybeObjectSlot current = start; current < end; ++current) {
+ HeapObject heap_object;
+ if ((*current)->GetHeapObject(&heap_object)) {
+ CHECK(ReadOnlyHeap::Contains(heap_object));
+ }
+ }
+ }
+};
+
+class VerifySharedHeapObjectVisitor : public VerifyPointersVisitor {
+ public:
+ explicit VerifySharedHeapObjectVisitor(Heap* heap)
+ : VerifyPointersVisitor(heap),
+ shared_space_(heap->shared_space()),
+ shared_lo_space_(heap->shared_lo_space()) {
+ DCHECK_NOT_NULL(shared_space_);
+ DCHECK_NOT_NULL(shared_lo_space_);
+ }
+
+ protected:
+ void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ if (!host.is_null()) {
+ Map map = host.map();
+ CHECK(ReadOnlyHeap::Contains(map) || shared_space_->Contains(map));
+ }
+ VerifyPointersVisitor::VerifyPointers(host, start, end);
+
+ for (MaybeObjectSlot current = start; current < end; ++current) {
+ HeapObject heap_object;
+ if ((*current)->GetHeapObject(&heap_object)) {
+ CHECK(ReadOnlyHeap::Contains(heap_object) ||
+ shared_space_->Contains(heap_object) ||
+ shared_lo_space_->Contains(heap_object));
+ }
+ }
+ }
+
+ private:
+ SharedSpace* shared_space_;
+ SharedLargeObjectSpace* shared_lo_space_;
+};
+
+class HeapVerification final : public SpaceVerificationVisitor {
+ public:
+ explicit HeapVerification(Heap* heap)
+ : heap_(heap), isolate_(heap->isolate()), cage_base_(isolate_) {}
void Verify();
void VerifyReadOnlyHeap();
void VerifySharedHeap(Isolate* initiator);
private:
+ void VerifySpace(BaseSpace* space);
+
+ void VerifyPage(const BasicMemoryChunk* chunk) final;
+ void VerifyPageDone(const BasicMemoryChunk* chunk) final;
+
+ void VerifyObject(HeapObject object) final;
+ void VerifyObjectMap(HeapObject object);
+ void VerifyOutgoingPointers(HeapObject object);
+ // Verifies OLD_TO_NEW and OLD_TO_SHARED remembered sets for this object.
+ void VerifyRememberedSetFor(HeapObject object);
+
void VerifyInvalidatedObjectSize();
ReadOnlySpace* read_only_space() const { return heap_->read_only_space(); }
NewSpace* new_space() const { return heap_->new_space(); }
OldSpace* old_space() const { return heap_->old_space(); }
+ SharedSpace* shared_space() const { return heap_->shared_space(); }
+
CodeSpace* code_space() const { return heap_->code_space(); }
LargeObjectSpace* lo_space() const { return heap_->lo_space(); }
+ SharedLargeObjectSpace* shared_lo_space() const {
+ return heap_->shared_lo_space();
+ }
CodeLargeObjectSpace* code_lo_space() const { return heap_->code_lo_space(); }
NewLargeObjectSpace* new_lo_space() const { return heap_->new_lo_space(); }
- Isolate* isolate() const { return heap_->isolate(); }
+ Isolate* isolate() const { return isolate_; }
Heap* heap() const { return heap_; }
- Heap* heap_;
+ AllocationSpace current_space_identity() const {
+ return *current_space_identity_;
+ }
+
+ Heap* const heap_;
+ Isolate* const isolate_;
+ const PtrComprCageBase cage_base_;
+ base::Optional<AllocationSpace> current_space_identity_;
+ base::Optional<const BasicMemoryChunk*> current_chunk_;
};
void HeapVerification::Verify() {
CHECK(heap()->HasBeenSetUp());
AllowGarbageCollection allow_gc;
IgnoreLocalGCRequests ignore_gc_requests(heap());
- IsolateSafepointScope safepoint_scope(heap());
+ SafepointKind safepoint_kind = isolate()->is_shared_space_isolate()
+ ? SafepointKind::kGlobal
+ : SafepointKind::kIsolate;
+ SafepointScope safepoint_scope(isolate(), safepoint_kind);
HandleScope scope(isolate());
heap()->MakeHeapIterable();
- heap()->array_buffer_sweeper()->EnsureFinished();
-
+ // TODO(v8:13257): Currently we don't iterate through the stack conservatively
+ // when verifying the heap.
VerifyPointersVisitor visitor(heap());
heap()->IterateRoots(&visitor,
base::EnumSet<SkipRoot>{SkipRoot::kConservativeStack});
@@ -104,16 +317,17 @@ void HeapVerification::Verify() {
VerifySmisVisitor smis_visitor;
heap()->IterateSmiRoots(&smis_visitor);
- if (new_space()) new_space()->Verify(isolate());
+ VerifySpace(new_space());
- old_space()->Verify(isolate(), &visitor);
+ VerifySpace(old_space());
+ VerifySpace(shared_space());
+ VerifySpace(code_space());
- VerifyPointersVisitor no_dirty_regions_visitor(heap());
- code_space()->Verify(isolate(), &no_dirty_regions_visitor);
+ VerifySpace(lo_space());
+ VerifySpace(new_lo_space());
+ VerifySpace(shared_lo_space());
+ VerifySpace(code_lo_space());
- lo_space()->Verify(isolate());
- code_lo_space()->Verify(isolate());
- if (new_lo_space()) new_lo_space()->Verify(isolate());
isolate()->string_table()->VerifyIfOwnedBy(isolate());
VerifyInvalidatedObjectSize();
@@ -123,6 +337,88 @@ void HeapVerification::Verify() {
#endif // DEBUG
}
+void HeapVerification::VerifySpace(BaseSpace* space) {
+ if (!space) return;
+ current_space_identity_ = space->identity();
+ space->Verify(isolate(), this);
+ current_space_identity_.reset();
+}
+
+void HeapVerification::VerifyPage(const BasicMemoryChunk* chunk) {
+ CHECK(!current_chunk_.has_value());
+ CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ if (V8_SHARED_RO_HEAP_BOOL && chunk->InReadOnlySpace()) {
+ CHECK_NULL(chunk->owner());
+ } else {
+ CHECK_EQ(chunk->heap(), heap());
+ CHECK_EQ(chunk->owner()->identity(), current_space_identity());
+ }
+ current_chunk_ = chunk;
+}
+
+void HeapVerification::VerifyPageDone(const BasicMemoryChunk* chunk) {
+ CHECK_EQ(chunk, *current_chunk_);
+ current_chunk_.reset();
+}
+
+void HeapVerification::VerifyObject(HeapObject object) {
+ CHECK_EQ(BasicMemoryChunk::FromHeapObject(object), *current_chunk_);
+
+ // Verify object map.
+ VerifyObjectMap(object);
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate_);
+
+ // Verify outgoing references.
+ VerifyOutgoingPointers(object);
+
+ // Verify remembered set.
+ VerifyRememberedSetFor(object);
+}
+
+void HeapVerification::VerifyOutgoingPointers(HeapObject object) {
+ switch (current_space_identity()) {
+ case RO_SPACE: {
+ VerifyReadOnlyPointersVisitor visitor(heap());
+ object.Iterate(cage_base_, &visitor);
+ break;
+ }
+
+ case SHARED_SPACE:
+ case SHARED_LO_SPACE: {
+ VerifySharedHeapObjectVisitor visitor(heap());
+ object.Iterate(cage_base_, &visitor);
+ break;
+ }
+
+ default: {
+ VerifyPointersVisitor visitor(heap());
+ object.Iterate(cage_base_, &visitor);
+ break;
+ }
+ }
+}
+
+void HeapVerification::VerifyObjectMap(HeapObject object) {
+ // The first word should be a map, and we expect all map pointers to be
+ // in map space or read-only space.
+ Map map = object.map(cage_base_);
+ CHECK(map.IsMap(cage_base_));
+ CHECK(ReadOnlyHeap::Contains(map) || old_space()->Contains(map) ||
+ (shared_space() && shared_space()->Contains(map)));
+
+ if (Heap::InYoungGeneration(object)) {
+ // The object should not be code or a map.
+ CHECK(!object.IsMap(cage_base_));
+ CHECK(!object.IsAbstractCode(cage_base_));
+ } else if (current_space_identity() == RO_SPACE) {
+ CHECK(!object.IsExternalString());
+ CHECK(!object.IsJSArrayBuffer());
+ }
+}
+
namespace {
void VerifyInvalidatedSlots(InvalidatedSlots* invalidated_slots) {
if (!invalidated_slots) return;
@@ -147,7 +443,7 @@ void HeapVerification::VerifyInvalidatedObjectSize() {
void HeapVerification::VerifyReadOnlyHeap() {
CHECK(!read_only_space()->writable());
- read_only_space()->Verify(isolate());
+ VerifySpace(read_only_space());
}
class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
@@ -178,17 +474,18 @@ class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
}
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
if (ShouldHaveBeenRecorded(
host, MaybeObject::FromObject(slot.load(code_cage_base())))) {
CHECK_GT(untyped_->count(slot.address()), 0);
}
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ Object target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
+ if (ShouldHaveBeenRecorded(rinfo->instruction_stream(),
+ MaybeObject::FromObject(target))) {
CHECK(InTypedSet(SlotType::kCodeEntry, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(SlotType::kConstPoolCodeEntry,
@@ -196,9 +493,10 @@ class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
}
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Object target = rinfo->target_object(cage_base());
- if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
+ if (ShouldHaveBeenRecorded(rinfo->instruction_stream(),
+ MaybeObject::FromObject(target))) {
CHECK(InTypedSet(SlotType::kEmbeddedObjectFull, rinfo->pc()) ||
InTypedSet(SlotType::kEmbeddedObjectCompressed, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
@@ -267,8 +565,8 @@ class OldToSharedSlotVerifyingVisitor : public SlotVerifyingVisitor {
: SlotVerifyingVisitor(isolate, untyped, typed) {}
bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
- return target->IsStrongOrWeak() && Heap::InSharedWritableHeap(target) &&
- !Heap::InYoungGeneration(host) && !host.InSharedWritableHeap();
+ return target->IsStrongOrWeak() && Heap::InWritableSharedSpace(target) &&
+ !Heap::InYoungGeneration(host) && !host.InWritableSharedSpace();
}
};
@@ -308,18 +606,16 @@ class SlotCollectingVisitor final : public ObjectVisitor {
}
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
#ifdef V8_EXTERNAL_CODE_SPACE
code_slots_.push_back(slot);
#endif
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitCodeTarget(RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- UNREACHABLE();
- }
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override { UNREACHABLE(); }
void VisitMapPointer(HeapObject object) override {} // do nothing by default
@@ -338,93 +634,81 @@ class SlotCollectingVisitor final : public ObjectVisitor {
#endif
};
-// static
-void HeapVerifier::VerifyHeap(Heap* heap) {
- HeapVerification verifier(heap);
- verifier.Verify();
-}
-
-// static
-void HeapVerifier::VerifyReadOnlyHeap(Heap* heap) {
- HeapVerification verifier(heap);
- verifier.VerifyReadOnlyHeap();
-}
-
-// static
-void HeapVerifier::VerifySharedHeap(Heap* heap, Isolate* initiator) {
- DCHECK(heap->IsShared());
- Isolate* isolate = heap->isolate();
-
- // Stop all client isolates attached to this isolate.
- GlobalSafepointScope global_safepoint(initiator);
-
- // Migrate shared isolate to the main thread of the initiator isolate.
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate));
- v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate));
-
- DCHECK_NOT_NULL(isolate->global_safepoint());
-
- // Free all shared LABs to make the shared heap iterable.
- isolate->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- client->heap()->FreeSharedLinearAllocationAreas();
- });
-
- HeapVerifier::VerifyHeap(heap);
-}
+void HeapVerification::VerifyRememberedSetFor(HeapObject object) {
+ if (current_space_identity() == RO_SPACE ||
+ v8_flags.verify_heap_skip_remembered_set) {
+ return;
+ }
-// static
-void HeapVerifier::VerifyRememberedSetFor(Heap* heap, HeapObject object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
- // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
- base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
- chunk->mutex());
- PtrComprCageBase cage_base(heap->isolate());
+
Address start = object.address();
- Address end = start + object.Size(cage_base);
+ Address end = start + object.Size(cage_base_);
+
+ std::set<Address> old_to_new;
+ std::set<std::pair<SlotType, Address>> typed_old_to_new;
+ CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
+ OldToNewSlotVerifyingVisitor old_to_new_visitor(
+ isolate(), &old_to_new, &typed_old_to_new,
+ &heap()->ephemeron_remembered_set_);
+ object.IterateBody(cage_base_, &old_to_new_visitor);
+
+ std::set<Address> old_to_shared;
+ std::set<std::pair<SlotType, Address>> typed_old_to_shared;
+ CollectSlots<OLD_TO_SHARED>(chunk, start, end, &old_to_shared,
+ &typed_old_to_shared);
+ OldToSharedSlotVerifyingVisitor old_to_shared_visitor(
+ isolate(), &old_to_shared, &typed_old_to_shared);
+ object.IterateBody(cage_base_, &old_to_shared_visitor);
+
+ if (object.InWritableSharedSpace()) {
+ CHECK_NULL(chunk->slot_set<OLD_TO_SHARED>());
+ CHECK_NULL(chunk->typed_slot_set<OLD_TO_SHARED>());
+
+ CHECK_NULL(chunk->slot_set<OLD_TO_NEW>());
+ CHECK_NULL(chunk->typed_slot_set<OLD_TO_NEW>());
+ }
- if (chunk->InSharedHeap() || Heap::InYoungGeneration(object)) {
+ if (Heap::InYoungGeneration(object)) {
CHECK_NULL(chunk->slot_set<OLD_TO_NEW>());
CHECK_NULL(chunk->typed_slot_set<OLD_TO_NEW>());
CHECK_NULL(chunk->slot_set<OLD_TO_OLD>());
CHECK_NULL(chunk->typed_slot_set<OLD_TO_OLD>());
- }
-
- if (!Heap::InYoungGeneration(object)) {
- std::set<Address> old_to_new;
- std::set<std::pair<SlotType, Address>> typed_old_to_new;
- CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
- OldToNewSlotVerifyingVisitor old_to_new_visitor(
- heap->isolate(), &old_to_new, &typed_old_to_new,
- &heap->ephemeron_remembered_set_);
- object.IterateBody(cage_base, &old_to_new_visitor);
- std::set<Address> old_to_shared;
- std::set<std::pair<SlotType, Address>> typed_old_to_shared;
- CollectSlots<OLD_TO_SHARED>(chunk, start, end, &old_to_shared,
- &typed_old_to_shared);
- OldToSharedSlotVerifyingVisitor old_to_shared_visitor(
- heap->isolate(), &old_to_shared, &typed_old_to_shared);
- object.IterateBody(cage_base, &old_to_shared_visitor);
+ CHECK_NULL(chunk->slot_set<OLD_TO_SHARED>());
+ CHECK_NULL(chunk->typed_slot_set<OLD_TO_SHARED>());
}
+
// TODO(v8:11797): Add old to old slot set verification once all weak objects
// have their own instance types and slots are recorded for all weak fields.
}
// static
+void HeapVerifier::VerifyHeap(Heap* heap) {
+ HeapVerification verifier(heap);
+ verifier.Verify();
+}
+
+// static
+void HeapVerifier::VerifyReadOnlyHeap(Heap* heap) {
+ HeapVerification verifier(heap);
+ verifier.VerifyReadOnlyHeap();
+}
+
+// static
void HeapVerifier::VerifyObjectLayoutChangeIsAllowed(Heap* heap,
HeapObject object) {
- if (object.InSharedWritableHeap()) {
+ if (object.InWritableSharedSpace()) {
// Out of objects in the shared heap, only strings can change layout.
DCHECK(object.IsString());
// Shared strings only change layout under GC, never concurrently.
if (object.IsShared()) {
Isolate* isolate = heap->isolate();
- Isolate* shared_heap_isolate = isolate->is_shared_heap_isolate()
- ? isolate
- : isolate->shared_heap_isolate();
- shared_heap_isolate->global_safepoint()->AssertActive();
+ Isolate* shared_space_isolate = isolate->is_shared_space_isolate()
+ ? isolate
+ : isolate->shared_space_isolate();
+ shared_space_isolate->global_safepoint()->AssertActive();
}
// Non-shared strings in the shared heap are allowed to change layout
// outside of GC like strings in non-shared heaps.
@@ -473,10 +757,7 @@ void HeapVerifier::VerifySafeMapTransition(Heap* heap, HeapObject object,
}
if (object.IsString(cage_base) &&
- (new_map == ReadOnlyRoots(heap).thin_string_map() ||
- new_map == ReadOnlyRoots(heap).thin_one_byte_string_map() ||
- new_map == ReadOnlyRoots(heap).shared_thin_string_map() ||
- new_map == ReadOnlyRoots(heap).shared_thin_one_byte_string_map())) {
+ new_map == ReadOnlyRoots(heap).thin_string_map()) {
// When transitioning a string to ThinString,
// Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
// tagged fields are introduced.
@@ -498,11 +779,11 @@ void HeapVerifier::VerifySafeMapTransition(Heap* heap, HeapObject object,
object.IterateFast(cage_base, &old_visitor);
MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
// Temporarily set the new map to iterate new slots.
- object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
+ object.set_map_word(new_map, kRelaxedStore);
SlotCollectingVisitor new_visitor;
object.IterateFast(cage_base, &new_visitor);
// Restore the old map.
- object.set_map_word(old_map_word, kRelaxedStore);
+ object.set_map_word(old_map_word.ToMap(), kRelaxedStore);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
diff --git a/deps/v8/src/heap/heap-verifier.h b/deps/v8/src/heap/heap-verifier.h
index 320297d2cf..d64c6f9c3f 100644
--- a/deps/v8/src/heap/heap-verifier.h
+++ b/deps/v8/src/heap/heap-verifier.h
@@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/map.h"
@@ -16,6 +17,22 @@ namespace internal {
class Heap;
class ReadOnlyHeap;
+// Interface for verifying spaces in the heap.
+class SpaceVerificationVisitor {
+ public:
+ virtual ~SpaceVerificationVisitor() = default;
+
+ // This method will be invoked for every object in the space.
+ virtual void VerifyObject(HeapObject object) = 0;
+
+ // This method will be invoked for each page in the space before verifying an
+ // object on it.
+ virtual void VerifyPage(const BasicMemoryChunk* chunk) = 0;
+
+ // This method will be invoked after verifying all objects on that page.
+ virtual void VerifyPageDone(const BasicMemoryChunk* chunk) = 0;
+};
+
class HeapVerifier final {
public:
#ifdef VERIFY_HEAP
@@ -26,13 +43,6 @@ class HeapVerifier final {
// created.
static void VerifyReadOnlyHeap(Heap* heap);
- // Verify the shared heap, initiating from a client heap. This performs a
- // global safepoint, then the normal heap verification.
- static void VerifySharedHeap(Heap* heap, Isolate* initiator);
-
- // Verifies OLD_TO_NEW and OLD_TO_SHARED remembered sets for this object.
- static void VerifyRememberedSetFor(Heap* heap, HeapObject object);
-
// Checks that this is a safe map transition.
V8_EXPORT_PRIVATE static void VerifySafeMapTransition(Heap* heap,
HeapObject object,
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index e56924cb9c..dd850bbf01 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -10,6 +10,7 @@
#include "src/common/code-memory-access-inl.h"
#include "src/common/globals.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/marking-barrier.h"
#include "src/objects/code.h"
@@ -29,8 +30,7 @@ V8_EXPORT_PRIVATE void Heap_CombinedGenerationalAndSharedBarrierSlow(
V8_EXPORT_PRIVATE void Heap_CombinedGenerationalAndSharedEphemeronBarrierSlow(
EphemeronHashTable table, Address slot, HeapObject value);
-V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(Code host,
- RelocInfo* rinfo,
+V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(RelocInfo* rinfo,
HeapObject object);
V8_EXPORT_PRIVATE void Heap_GenerationalEphemeronKeyBarrierSlow(
@@ -45,7 +45,7 @@ namespace heap_internals {
struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
- static constexpr uintptr_t kInSharedHeapBit = uintptr_t{1} << 0;
+ static constexpr uintptr_t kInWritableSharedSpaceBit = uintptr_t{1} << 0;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 5;
@@ -60,7 +60,9 @@ struct MemoryChunk {
V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
- V8_INLINE bool InSharedHeap() const { return GetFlags() & kInSharedHeapBit; }
+ V8_INLINE bool InWritableSharedSpace() const {
+ return GetFlags() & kInWritableSharedSpaceBit;
+ }
V8_INLINE bool InYoungGeneration() const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
@@ -72,7 +74,7 @@ struct MemoryChunk {
V8_INLINE bool IsYoungOrSharedChunk() const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
constexpr uintptr_t kYoungOrSharedChunkMask =
- kFromPageBit | kToPageBit | kInSharedHeapBit;
+ kFromPageBit | kToPageBit | kInWritableSharedSpaceBit;
return GetFlags() & kYoungOrSharedChunkMask;
}
@@ -106,48 +108,45 @@ inline void CombinedWriteBarrierInternal(HeapObject host, HeapObjectSlot slot,
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(value);
- const bool host_in_young_gen = host_chunk->InYoungGeneration();
+ const bool pointers_from_here_are_interesting =
+ !host_chunk->IsYoungOrSharedChunk();
const bool is_marking = host_chunk->IsMarking();
- if (!host_in_young_gen && value_chunk->IsYoungOrSharedChunk()) {
+ if (pointers_from_here_are_interesting &&
+ value_chunk->IsYoungOrSharedChunk()) {
// Generational or shared heap write barrier (old-to-new or old-to-shared).
Heap_CombinedGenerationalAndSharedBarrierSlow(host, slot.address(), value);
}
// Marking barrier: mark value & record slots when marking is on.
if (V8_UNLIKELY(is_marking)) {
-#ifdef V8_EXTERNAL_CODE_SPACE
// CodePageHeaderModificationScope is not required because the only case
- // when a Code value is stored somewhere is during creation of a new Code
- // object which is then stored to CodeDataContainer's code field and this
- // case is already guarded by CodePageMemoryModificationScope.
-#else
- CodePageHeaderModificationScope rwx_write_scope(
- "Marking a Code object requires write access to the Code page header");
-#endif
- WriteBarrier::MarkingSlow(host_chunk->GetHeap(), host, HeapObjectSlot(slot),
- value);
+ // when a InstructionStream value is stored somewhere is during creation of
+ // a new InstructionStream object which is then stored to
+ // Code's code field and this case is already guarded by
+ // CodePageMemoryModificationScope.
+ WriteBarrier::MarkingSlow(host, HeapObjectSlot(slot), value);
}
}
} // namespace heap_internals
-inline void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value,
- WriteBarrierMode mode) {
+inline void WriteBarrierForCode(InstructionStream host, RelocInfo* rinfo,
+ Object value, WriteBarrierMode mode) {
DCHECK(!HasWeakHeapObjectTag(value));
if (!value.IsHeapObject()) return;
WriteBarrierForCode(host, rinfo, HeapObject::cast(value));
}
-inline void WriteBarrierForCode(Code host, RelocInfo* rinfo, HeapObject value,
- WriteBarrierMode mode) {
+inline void WriteBarrierForCode(InstructionStream host, RelocInfo* rinfo,
+ HeapObject value, WriteBarrierMode mode) {
if (mode == SKIP_WRITE_BARRIER) {
SLOW_DCHECK(!WriteBarrier::IsRequired(host, value));
return;
}
DCHECK_EQ(mode, UPDATE_WRITE_BARRIER);
- GenerationalBarrierForCode(host, rinfo, value);
+ GenerationalBarrierForCode(rinfo, value);
WriteBarrier::Shared(host, rinfo, value);
WriteBarrier::Marking(host, rinfo, value);
}
@@ -195,31 +194,32 @@ inline void CombinedEphemeronWriteBarrier(EphemeronHashTable host,
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(heap_object_value);
- const bool host_in_young_gen = host_chunk->InYoungGeneration();
+ const bool pointers_from_here_are_interesting =
+ !host_chunk->IsYoungOrSharedChunk();
const bool is_marking = host_chunk->IsMarking();
- if (!host_in_young_gen && value_chunk->IsYoungOrSharedChunk()) {
+ if (pointers_from_here_are_interesting &&
+ value_chunk->IsYoungOrSharedChunk()) {
Heap_CombinedGenerationalAndSharedEphemeronBarrierSlow(host, slot.address(),
heap_object_value);
}
// Marking barrier: mark value & record slots when marking is on.
if (is_marking) {
- // Currently Code values are never stored in EphemeronTables. If this ever
- // changes then the CodePageHeaderModificationScope might be required here.
+ // Currently InstructionStream values are never stored in EphemeronTables.
+ // If this ever changes then the CodePageHeaderModificationScope might be
+ // required here.
DCHECK(!IsCodeSpaceObject(heap_object_value));
- WriteBarrier::MarkingSlow(host_chunk->GetHeap(), host, HeapObjectSlot(slot),
- heap_object_value);
+ WriteBarrier::MarkingSlow(host, HeapObjectSlot(slot), heap_object_value);
}
}
-inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
- HeapObject object) {
+inline void GenerationalBarrierForCode(RelocInfo* rinfo, HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->InYoungGeneration()) return;
- Heap_GenerationalBarrierForCodeSlow(host, rinfo, object);
+ Heap_GenerationalBarrierForCodeSlow(rinfo, object);
}
inline WriteBarrierMode GetWriteBarrierModeForObject(
@@ -255,28 +255,20 @@ inline bool IsCodeSpaceObject(HeapObject object) {
return chunk->InCodeSpace();
}
-base::Optional<Heap*> WriteBarrier::GetHeapIfMarking(HeapObject object) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return {};
- heap_internals::MemoryChunk* chunk =
- heap_internals::MemoryChunk::FromHeapObject(object);
- if (V8_LIKELY(!chunk->IsMarking())) return {};
- return chunk->GetHeap();
-}
-
-Heap* WriteBarrier::GetHeap(HeapObject object) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+bool WriteBarrier::IsMarking(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- DCHECK(!chunk->InReadOnlySpace());
- return chunk->GetHeap();
+ return chunk->IsMarking();
}
void WriteBarrier::Marking(HeapObject host, ObjectSlot slot, Object value) {
DCHECK(!HasWeakHeapObjectTag(value));
if (!value.IsHeapObject()) return;
HeapObject value_heap_object = HeapObject::cast(value);
- // Currently this marking barrier is never used for Code values. If this ever
- // changes then the CodePageHeaderModificationScope might be required here.
+ // Currently this marking barrier is never used for InstructionStream values.
+ // If this ever changes then the CodePageHeaderModificationScope might be
+ // required here.
DCHECK(!IsCodeSpaceObject(value_heap_object));
Marking(host, HeapObjectSlot(slot), value_heap_object);
}
@@ -285,83 +277,99 @@ void WriteBarrier::Marking(HeapObject host, MaybeObjectSlot slot,
MaybeObject value) {
HeapObject value_heap_object;
if (!value->GetHeapObject(&value_heap_object)) return;
-#ifdef V8_EXTERNAL_CODE_SPACE
// This barrier is called from generated code and from C++ code.
- // There must be no stores of Code values from generated code and all stores
- // of Code values in C++ must be handled by CombinedWriteBarrierInternal().
+ // There must be no stores of InstructionStream values from generated code and
+ // all stores of InstructionStream values in C++ must be handled by
+ // CombinedWriteBarrierInternal().
DCHECK(!IsCodeSpaceObject(value_heap_object));
-#else
- CodePageHeaderModificationScope rwx_write_scope(
- "Marking a Code object requires write access to the Code page header");
-#endif
Marking(host, HeapObjectSlot(slot), value_heap_object);
}
void WriteBarrier::Marking(HeapObject host, HeapObjectSlot slot,
HeapObject value) {
- auto heap = GetHeapIfMarking(host);
- if (!heap) return;
- MarkingSlow(*heap, host, slot, value);
+ if (!IsMarking(host)) return;
+ MarkingSlow(host, slot, value);
}
-void WriteBarrier::Marking(Code host, RelocInfo* reloc_info, HeapObject value) {
- auto heap = GetHeapIfMarking(host);
- if (!heap) return;
- MarkingSlow(*heap, host, reloc_info, value);
+void WriteBarrier::Marking(InstructionStream host, RelocInfo* reloc_info,
+ HeapObject value) {
+ if (!IsMarking(host)) return;
+ MarkingSlow(host, reloc_info, value);
}
-void WriteBarrier::Shared(Code host, RelocInfo* reloc_info, HeapObject value) {
+void WriteBarrier::Shared(InstructionStream host, RelocInfo* reloc_info,
+ HeapObject value) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(value);
- if (!value_chunk->InSharedHeap()) return;
+ if (!value_chunk->InWritableSharedSpace()) return;
- Heap* heap = GetHeap(host);
- DCHECK_NOT_NULL(heap);
- SharedSlow(heap, host, reloc_info, value);
+ SharedSlow(reloc_info, value);
}
void WriteBarrier::Marking(JSArrayBuffer host,
ArrayBufferExtension* extension) {
- if (!extension) return;
- auto heap = GetHeapIfMarking(host);
- if (!heap) return;
- MarkingSlow(*heap, host, extension);
+ if (!extension || !IsMarking(host)) return;
+ MarkingSlow(host, extension);
}
void WriteBarrier::Marking(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
- auto heap = GetHeapIfMarking(descriptor_array);
- if (!heap) return;
- MarkingSlow(*heap, descriptor_array, number_of_own_descriptors);
+ if (!IsMarking(descriptor_array)) return;
+ MarkingSlow(descriptor_array, number_of_own_descriptors);
}
// static
void WriteBarrier::MarkingFromGlobalHandle(Object value) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
if (!value.IsHeapObject()) return;
+ MarkingSlowFromGlobalHandle(HeapObject::cast(value));
+}
- HeapObject heap_value = HeapObject::cast(value);
- // Value may be in read only space but the chunk should never be marked
- // as marking which would result in a bail out.
- auto heap = GetHeapIfMarking(heap_value);
- if (!heap) return;
- MarkingSlowFromGlobalHandle(*heap, heap_value);
+// static
+void WriteBarrier::CombinedBarrierFromInternalFields(JSObject host,
+ void* value) {
+ CombinedBarrierFromInternalFields(host, 1, &value);
}
// static
-void WriteBarrier::MarkingFromInternalFields(JSObject host) {
+void WriteBarrier::CombinedBarrierFromInternalFields(JSObject host, size_t argc,
+ void** values) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
- auto heap = GetHeapIfMarking(host);
- if (!heap) return;
- if (CurrentMarkingBarrier(heap.value())->is_minor()) {
+ if (V8_LIKELY(!IsMarking(host))) {
+ GenerationalBarrierFromInternalFields(host, argc, values);
+ return;
+ }
+ MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
+ if (marking_barrier->is_minor()) {
// TODO(v8:13012): We do not currently mark Oilpan objects while MinorMC is
// active. Once Oilpan uses a generational GC with incremental marking and
// unified heap, this barrier will be needed again.
return;
}
- MarkingSlowFromInternalFields(*heap, host);
+ MarkingSlowFromInternalFields(marking_barrier->heap(), host);
+}
+
+// static
+void WriteBarrier::GenerationalBarrierFromInternalFields(JSObject host,
+ void* value) {
+ GenerationalBarrierFromInternalFields(host, 1, &value);
+}
+
+// static
+void WriteBarrier::GenerationalBarrierFromInternalFields(JSObject host,
+ size_t argc,
+ void** values) {
+ auto* memory_chunk = MemoryChunk::FromHeapObject(host);
+ if (V8_LIKELY(memory_chunk->InYoungGeneration())) return;
+ auto* cpp_heap = memory_chunk->heap()->cpp_heap();
+ if (!cpp_heap) return;
+ for (size_t i = 0; i < argc; ++i) {
+ if (!values[i]) continue;
+ v8::internal::CppHeap::From(cpp_heap)->RememberCrossHeapReferenceIfNeeded(
+ host, values[i]);
+ }
}
#ifdef ENABLE_SLOW_DCHECKS
diff --git a/deps/v8/src/heap/heap-write-barrier.cc b/deps/v8/src/heap/heap-write-barrier.cc
index 4e9916974b..b730ce261e 100644
--- a/deps/v8/src/heap/heap-write-barrier.cc
+++ b/deps/v8/src/heap/heap-write-barrier.cc
@@ -4,7 +4,6 @@
#include "src/heap/heap-write-barrier.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/remembered-set.h"
@@ -21,66 +20,72 @@ namespace {
thread_local MarkingBarrier* current_marking_barrier = nullptr;
} // namespace
-MarkingBarrier* WriteBarrier::CurrentMarkingBarrier(Heap* heap) {
- return current_marking_barrier
- ? current_marking_barrier
- : heap->main_thread_local_heap()->marking_barrier();
+MarkingBarrier* WriteBarrier::CurrentMarkingBarrier(
+ HeapObject verification_candidate) {
+ MarkingBarrier* marking_barrier = current_marking_barrier;
+ DCHECK_NOT_NULL(marking_barrier);
+#if DEBUG
+ if (!verification_candidate.is_null() &&
+ !verification_candidate.InAnySharedSpace()) {
+ Heap* host_heap =
+ MemoryChunk::FromHeapObject(verification_candidate)->heap();
+ LocalHeap* local_heap = LocalHeap::Current();
+ if (!local_heap) local_heap = host_heap->main_thread_local_heap();
+ DCHECK_EQ(marking_barrier, local_heap->marking_barrier());
+ }
+#endif // DEBUG
+ return marking_barrier;
}
-void WriteBarrier::SetForThread(MarkingBarrier* marking_barrier) {
- DCHECK_NULL(current_marking_barrier);
+MarkingBarrier* WriteBarrier::SetForThread(MarkingBarrier* marking_barrier) {
+ MarkingBarrier* existing = current_marking_barrier;
current_marking_barrier = marking_barrier;
+ return existing;
}
-void WriteBarrier::ClearForThread(MarkingBarrier* marking_barrier) {
- DCHECK_EQ(current_marking_barrier, marking_barrier);
- current_marking_barrier = nullptr;
-}
-
-void WriteBarrier::MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot slot,
+void WriteBarrier::MarkingSlow(HeapObject host, HeapObjectSlot slot,
HeapObject value) {
- MarkingBarrier* marking_barrier = CurrentMarkingBarrier(heap);
+ MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
marking_barrier->Write(host, slot, value);
}
// static
-void WriteBarrier::MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value) {
- heap->main_thread_local_heap()->marking_barrier()->WriteWithoutHost(value);
+void WriteBarrier::MarkingSlowFromGlobalHandle(HeapObject value) {
+ MarkingBarrier* marking_barrier = CurrentMarkingBarrier(value);
+ marking_barrier->WriteWithoutHost(value);
}
// static
void WriteBarrier::MarkingSlowFromInternalFields(Heap* heap, JSObject host) {
- auto* local_embedder_heap_tracer = heap->local_embedder_heap_tracer();
- if (!local_embedder_heap_tracer->InUse()) return;
-
- local_embedder_heap_tracer->EmbedderWriteBarrier(heap, host);
+ if (auto* cpp_heap = heap->cpp_heap()) {
+ CppHeap::From(cpp_heap)->WriteBarrier(host);
+ }
}
-void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info,
+void WriteBarrier::MarkingSlow(InstructionStream host, RelocInfo* reloc_info,
HeapObject value) {
- MarkingBarrier* marking_barrier = CurrentMarkingBarrier(heap);
+ MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
marking_barrier->Write(host, reloc_info, value);
}
-void WriteBarrier::SharedSlow(Heap* heap, Code host, RelocInfo* reloc_info,
- HeapObject value) {
+void WriteBarrier::SharedSlow(RelocInfo* reloc_info, HeapObject value) {
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::ProcessRelocInfo(host, reloc_info, value);
+ MarkCompactCollector::ProcessRelocInfo(reloc_info, value);
base::MutexGuard write_scope(info.memory_chunk->mutex());
RememberedSet<OLD_TO_SHARED>::InsertTyped(info.memory_chunk, info.slot_type,
info.offset);
}
-void WriteBarrier::MarkingSlow(Heap* heap, JSArrayBuffer host,
+void WriteBarrier::MarkingSlow(JSArrayBuffer host,
ArrayBufferExtension* extension) {
- MarkingBarrier* marking_barrier = CurrentMarkingBarrier(heap);
+ MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
marking_barrier->Write(host, extension);
}
-void WriteBarrier::MarkingSlow(Heap* heap, DescriptorArray descriptor_array,
+void WriteBarrier::MarkingSlow(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
- MarkingBarrier* marking_barrier = CurrentMarkingBarrier(heap);
+ MarkingBarrier* marking_barrier = CurrentMarkingBarrier(descriptor_array);
marking_barrier->Write(descriptor_array, number_of_own_descriptors);
}
@@ -88,6 +93,7 @@ int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
HeapObject host = HeapObject::cast(Object(raw_host));
MaybeObjectSlot slot(raw_slot);
Address value = (*slot).ptr();
+
#ifdef V8_MAP_PACKING
if (slot.address() == host.address()) {
// Clear metadata bits and fix object tag.
@@ -96,15 +102,56 @@ int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
(uint64_t)kHeapObjectTag;
}
#endif
+
+#if DEBUG
+ Heap* heap = MemoryChunk::FromHeapObject(host)->heap();
+ DCHECK(heap->incremental_marking()->IsMarking());
+
+ // We will only reach local objects here while incremental marking in the
+ // current isolate is enabled. However, we might still reach objects in the
+ // shared space but only from the shared space isolate (= the main isolate).
+ MarkingBarrier* barrier = CurrentMarkingBarrier(host);
+ DCHECK_IMPLIES(host.InWritableSharedSpace(),
+ barrier->heap()->isolate()->is_shared_space_isolate());
+ barrier->AssertMarkingIsActivated();
+#endif // DEBUG
+
WriteBarrier::Marking(host, slot, MaybeObject(value));
// Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
return 0;
}
+int WriteBarrier::SharedMarkingFromCode(Address raw_host, Address raw_slot) {
+ HeapObject host = HeapObject::cast(Object(raw_host));
+ MaybeObjectSlot slot(raw_slot);
+ Address raw_value = (*slot).ptr();
+ MaybeObject value(raw_value);
+
+ DCHECK(host.InWritableSharedSpace());
+
+#if DEBUG
+ Heap* heap = MemoryChunk::FromHeapObject(host)->heap();
+ DCHECK(heap->incremental_marking()->IsMarking());
+ Isolate* isolate = heap->isolate();
+ DCHECK(isolate->is_shared_space_isolate());
+
+ // The shared marking barrier will only be reached from client isolates (=
+ // worker isolates).
+ MarkingBarrier* barrier = CurrentMarkingBarrier(host);
+ DCHECK(!barrier->heap()->isolate()->is_shared_space_isolate());
+ barrier->AssertSharedMarkingIsActivated();
+#endif // DEBUG
+
+ WriteBarrier::Marking(host, slot, MaybeObject(value));
+
+ // Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
+ return 0;
+}
+
int WriteBarrier::SharedFromCode(Address raw_host, Address raw_slot) {
HeapObject host = HeapObject::cast(Object(raw_host));
- if (!host.InSharedWritableHeap()) {
+ if (!host.InWritableSharedSpace()) {
Heap::SharedHeapBarrierSlow(host, raw_slot);
}
@@ -116,15 +163,7 @@ int WriteBarrier::SharedFromCode(Address raw_host, Address raw_slot) {
bool WriteBarrier::IsImmortalImmovableHeapObject(HeapObject object) {
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
// All objects in readonly space are immortal and immovable.
- if (basic_chunk->InReadOnlySpace()) return true;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- // There are also objects in "regular" spaces which are immortal and
- // immovable. Objects on a page that can get compacted are movable and can be
- // filtered out.
- if (!chunk->IsFlagSet(MemoryChunk::NEVER_EVACUATE)) return false;
- // Now we know the object is immovable, check whether it is also immortal.
- // Builtins are roots and therefore always kept alive by the GC.
- return object.IsCode() && Code::cast(object).is_builtin();
+ return basic_chunk->InReadOnlySpace();
}
#endif
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index 4e1ddce414..f44af9a9f1 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
class ArrayBufferExtension;
-class Code;
+class InstructionStream;
class DescriptorArray;
class EphemeronHashTable;
class FixedArray;
@@ -29,9 +29,10 @@ class RelocInfo;
// object-macros.h.
// Combined write barriers.
-void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value,
+void WriteBarrierForCode(InstructionStream host, RelocInfo* rinfo, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-void WriteBarrierForCode(Code host, RelocInfo* rinfo, HeapObject value,
+void WriteBarrierForCode(InstructionStream host, RelocInfo* rinfo,
+ HeapObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
void CombinedWriteBarrier(HeapObject object, ObjectSlot slot, Object value,
@@ -43,7 +44,7 @@ void CombinedEphemeronWriteBarrier(EphemeronHashTable object, ObjectSlot slot,
Object value, WriteBarrierMode mode);
// Generational write barrier.
-void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
+void GenerationalBarrierForCode(RelocInfo* rinfo, HeapObject object);
inline bool IsReadOnlyHeapObject(HeapObject object);
@@ -53,24 +54,32 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static inline void Marking(HeapObject host, HeapObjectSlot, HeapObject value);
static inline void Marking(HeapObject host, MaybeObjectSlot,
MaybeObject value);
- static inline void Marking(Code host, RelocInfo*, HeapObject value);
+ static inline void Marking(InstructionStream host, RelocInfo*,
+ HeapObject value);
static inline void Marking(JSArrayBuffer host, ArrayBufferExtension*);
static inline void Marking(DescriptorArray, int number_of_own_descriptors);
- static inline void Shared(Code host, RelocInfo*, HeapObject value);
+ static inline void Shared(InstructionStream host, RelocInfo*,
+ HeapObject value);
// It is invoked from generated code and has to take raw addresses.
static int MarkingFromCode(Address raw_host, Address raw_slot);
+ static int SharedMarkingFromCode(Address raw_host, Address raw_slot);
static int SharedFromCode(Address raw_host, Address raw_slot);
// Invoked from global handles where no host object is available.
static inline void MarkingFromGlobalHandle(Object value);
- static inline void MarkingFromInternalFields(JSObject host);
- static void SetForThread(MarkingBarrier*);
- static void ClearForThread(MarkingBarrier*);
+ static inline void CombinedBarrierFromInternalFields(JSObject host,
+ void* value);
+ static inline void CombinedBarrierFromInternalFields(JSObject host,
+ size_t argc,
+ void** values);
- static MarkingBarrier* CurrentMarkingBarrier(Heap* heap);
+ static MarkingBarrier* SetForThread(MarkingBarrier*);
+
+ static MarkingBarrier* CurrentMarkingBarrier(
+ HeapObject verification_candidate);
#ifdef ENABLE_SLOW_DCHECKS
template <typename T>
@@ -78,22 +87,24 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static bool IsImmortalImmovableHeapObject(HeapObject object);
#endif
- static void MarkingSlow(Heap* heap, HeapObject host, HeapObjectSlot,
- HeapObject value);
+ static void MarkingSlow(HeapObject host, HeapObjectSlot, HeapObject value);
private:
- static inline base::Optional<Heap*> GetHeapIfMarking(HeapObject object);
- static inline Heap* GetHeap(HeapObject object);
-
- static void MarkingSlow(Heap* heap, Code host, RelocInfo*, HeapObject value);
- static void MarkingSlow(Heap* heap, JSArrayBuffer host,
- ArrayBufferExtension*);
- static void MarkingSlow(Heap* heap, DescriptorArray,
- int number_of_own_descriptors);
- static void MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value);
+ static inline bool IsMarking(HeapObject object);
+
+ static void MarkingSlow(InstructionStream host, RelocInfo*, HeapObject value);
+ static void MarkingSlow(JSArrayBuffer host, ArrayBufferExtension*);
+ static void MarkingSlow(DescriptorArray, int number_of_own_descriptors);
+ static void MarkingSlowFromGlobalHandle(HeapObject value);
static void MarkingSlowFromInternalFields(Heap* heap, JSObject host);
- static void SharedSlow(Heap* heap, Code host, RelocInfo*, HeapObject value);
+ static inline void GenerationalBarrierFromInternalFields(JSObject host,
+ void* value);
+ static inline void GenerationalBarrierFromInternalFields(JSObject host,
+ size_t argc,
+ void** values);
+
+ static void SharedSlow(RelocInfo*, HeapObject value);
friend class Heap;
};
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index eb6883fac1..90aa3360b4 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -37,8 +37,10 @@
#include "src/flags/flags.h"
#include "src/handles/global-handles-inl.h"
#include "src/handles/traced-handles.h"
+#include "src/heap/allocation-observer.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/base/stack.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-range.h"
@@ -48,7 +50,6 @@
#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/cppgc-js/cpp-heap.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/evacuation-verifier-inl.h"
#include "src/heap/finalization-registry-cleanup-task.h"
#include "src/heap/gc-idle-time-handler.h"
@@ -72,7 +73,9 @@
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
+#include "src/heap/minor-gc-job.h"
#include "src/heap/new-spaces.h"
+#include "src/heap/object-lock.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
@@ -82,9 +85,7 @@
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
-#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
-#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
#include "src/init/bootstrapper.h"
@@ -157,9 +158,8 @@ void Heap_CombinedGenerationalAndSharedEphemeronBarrierSlow(
Heap::CombinedGenerationalAndSharedEphemeronBarrierSlow(table, slot, value);
}
-void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
- HeapObject object) {
- Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
+void Heap_GenerationalBarrierForCodeSlow(RelocInfo* rinfo, HeapObject object) {
+ Heap::GenerationalBarrierForCodeSlow(rinfo, object);
}
void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
@@ -190,27 +190,77 @@ void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
set_basic_block_profiling_data(*list);
}
-class ScavengeTaskObserver final : public AllocationObserver {
+
+class ScheduleMinorGCTaskObserver : public AllocationObserver {
public:
- ScavengeTaskObserver(Heap* heap, intptr_t step_size)
- : AllocationObserver(step_size), heap_(heap) {}
+ explicit ScheduleMinorGCTaskObserver(Heap* heap)
+ : AllocationObserver(kNotUsingFixedStepSize), heap_(heap) {
+ // Register GC callback for all atomic pause types.
+ heap_->main_thread_local_heap()->AddGCEpilogueCallback(
+ &GCEpilogueCallback, this,
+ static_cast<GCType>(GCType::kGCTypeScavenge |
+ GCType::kGCTypeMinorMarkCompact |
+ GCType::kGCTypeMarkSweepCompact));
+ AddToNewSpace();
+ }
+ ~ScheduleMinorGCTaskObserver() override {
+ RemoveFromNewSpace();
+ heap_->main_thread_local_heap()->RemoveGCEpilogueCallback(
+ &GCEpilogueCallback, this);
+ }
- void Step(int bytes_allocated, Address, size_t) override {
- heap_->ScheduleScavengeTaskIfNeeded();
+ intptr_t GetNextStepSize() final {
+ size_t new_space_threshold =
+ MinorGCJob::YoungGenerationTaskTriggerSize(heap_);
+ size_t new_space_size = heap_->new_space()->Size();
+ if (new_space_size < new_space_threshold) {
+ return new_space_threshold - new_space_size;
+ }
+ // Force a step on next allocation.
+ return 1;
}
- private:
+ void Step(int, Address, size_t) final {
+ StepImpl();
+ // Remove this observer. It will be re-added after a GC.
+ DCHECK(was_added_to_space_);
+ heap_->new_space()->RemoveAllocationObserver(this);
+ was_added_to_space_ = false;
+ }
+
+ protected:
+ static void GCEpilogueCallback(LocalIsolate*, GCType, GCCallbackFlags,
+ void* observer) {
+ reinterpret_cast<ScheduleMinorGCTaskObserver*>(observer)
+ ->RemoveFromNewSpace();
+ reinterpret_cast<ScheduleMinorGCTaskObserver*>(observer)->AddToNewSpace();
+ }
+
+ void AddToNewSpace() {
+ DCHECK(!was_added_to_space_);
+ heap_->new_space()->AddAllocationObserver(this);
+ was_added_to_space_ = true;
+ }
+
+ void RemoveFromNewSpace() {
+ if (!was_added_to_space_) return;
+ heap_->new_space()->RemoveAllocationObserver(this);
+ was_added_to_space_ = false;
+ }
+
+ virtual void StepImpl() { heap_->ScheduleMinorGCTaskIfNeeded(); }
Heap* heap_;
+ bool was_added_to_space_ = false;
};
-class MinorMCTaskObserver final : public AllocationObserver {
+class MinorMCIncrementalMarkingTaskObserver final
+ : public ScheduleMinorGCTaskObserver {
public:
- static constexpr size_t kStepSize = 64 * KB;
-
- MinorMCTaskObserver(Heap* heap, intptr_t step_size)
- : AllocationObserver(step_size), heap_(heap) {}
+ explicit MinorMCIncrementalMarkingTaskObserver(Heap* heap)
+ : ScheduleMinorGCTaskObserver(heap) {}
- void Step(int bytes_allocated, Address, size_t) override {
+ protected:
+ void StepImpl() final {
if (v8_flags.concurrent_minor_mc_marking) {
if (heap_->incremental_marking()->IsMinorMarking()) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
@@ -219,10 +269,9 @@ class MinorMCTaskObserver final : public AllocationObserver {
}
heap_->StartMinorMCIncrementalMarkingIfNeeded();
- }
- private:
- Heap* heap_;
+ ScheduleMinorGCTaskObserver::StepImpl();
+ }
};
Heap::Heap()
@@ -256,7 +305,7 @@ Heap::Heap()
Heap::~Heap() = default;
-size_t Heap::MaxReserved() {
+size_t Heap::MaxReserved() const {
const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
return static_cast<size_t>(2 * max_semi_space_size_ +
kMaxNewLargeObjectSpaceSize +
@@ -368,7 +417,7 @@ size_t Heap::Capacity() {
return NewSpaceCapacity() + OldGenerationCapacity();
}
-size_t Heap::OldGenerationCapacity() {
+size_t Heap::OldGenerationCapacity() const {
if (!HasBeenSetUp()) return 0;
PagedSpaceIterator spaces(this);
size_t total = 0;
@@ -376,6 +425,9 @@ size_t Heap::OldGenerationCapacity() {
space = spaces.Next()) {
total += space->Capacity();
}
+ if (shared_lo_space_) {
+ total += shared_lo_space_->SizeOfObjects();
+ }
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
}
@@ -388,6 +440,9 @@ size_t Heap::CommittedOldGenerationMemory() {
space = spaces.Next()) {
total += space->CommittedMemory();
}
+ if (shared_lo_space_) {
+ total += shared_lo_space_->Size();
+ }
return total + lo_space_->Size() + code_lo_space_->Size();
}
@@ -446,7 +501,7 @@ size_t Heap::Available() {
return total;
}
-bool Heap::CanExpandOldGeneration(size_t size) {
+bool Heap::CanExpandOldGeneration(size_t size) const {
if (force_oom_ || force_gc_on_next_allocation_) return false;
if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
// The OldGenerationCapacity does not account compaction spaces used
@@ -472,7 +527,7 @@ bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
memory_allocator()->Size() + size <= MaxReserved();
}
-bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
+bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) const {
size_t new_space_capacity = NewSpaceCapacity();
size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
@@ -488,7 +543,7 @@ bool Heap::HasBeenSetUp() const {
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
GarbageCollectionReason gc_reason,
- const char** reason) {
+ const char** reason) const {
if (gc_reason == GarbageCollectionReason::kFinalizeMinorMC) {
DCHECK(new_space());
*reason = "finalize MinorMC";
@@ -540,7 +595,7 @@ void Heap::SetGCState(HeapState state) {
}
bool Heap::IsGCWithStack() const {
- return local_embedder_heap_tracer()->embedder_stack_state() ==
+ return embedder_stack_state_ ==
cppgc::EmbedderStackState::kMayContainHeapPointers;
}
@@ -973,13 +1028,13 @@ void UpdateRetainersMapAfterScavenge(UnorderedHeapObjectMap<HeapObject>* map) {
if (Heap::InFromPage(object)) {
MapWord map_word = object.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) continue;
- object = map_word.ToForwardingAddress();
+ object = map_word.ToForwardingAddress(object);
}
if (Heap::InFromPage(retainer)) {
MapWord map_word = retainer.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) continue;
- retainer = map_word.ToForwardingAddress();
+ retainer = map_word.ToForwardingAddress(retainer);
}
updated_map[object] = retainer;
@@ -1005,7 +1060,7 @@ void Heap::UpdateRetainersAfterScavenge() {
if (Heap::InFromPage(object)) {
MapWord map_word = object.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) continue;
- object = map_word.ToForwardingAddress();
+ object = map_word.ToForwardingAddress(object);
}
updated_retaining_root[object] = pair.second;
@@ -1105,9 +1160,18 @@ void Heap::GarbageCollectionPrologueInSafepoint() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
gc_count_++;
+ DCHECK_EQ(ResizeNewSpaceMode::kNone, resize_new_space_mode_);
if (new_space_) {
UpdateNewSpaceAllocationCounter();
- new_space_->ResetParkedAllocationBuffers();
+ if (!v8_flags.minor_mc) {
+ resize_new_space_mode_ = ShouldResizeNewSpace();
+ // Pretenuring heuristics require that new space grows before pretenuring
+ // feedback is processed.
+ if (resize_new_space_mode_ == ResizeNewSpaceMode::kGrow) {
+ ExpandNewSpaceSize();
+ }
+ SemiSpaceNewSpace::From(new_space_)->ResetParkedAllocationBuffers();
+ }
}
}
@@ -1180,19 +1244,14 @@ void Heap::PublishPendingAllocations() {
code_lo_space_->ResetPendingObject();
}
-void Heap::InvalidateCodeDeoptimizationData(Code code) {
- CodePageMemoryModificationScope modification_scope(code);
- code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
-}
-
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache data structure in heap instead.
- ForeachAllocationSite(allocation_sites_list(), [](AllocationSite site) {
+ ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
if (site.deopt_dependent_code()) {
DependentCode::MarkCodeForDeoptimization(
- site, DependentCode::kAllocationSiteTenuringChangedGroup);
+ isolate_, site, DependentCode::kAllocationSiteTenuringChangedGroup);
site.set_deopt_dependent_code(false);
}
});
@@ -1217,14 +1276,34 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
if (collector == GarbageCollector::MARK_COMPACTOR) {
memory_pressure_level_.store(MemoryPressureLevel::kNone,
std::memory_order_relaxed);
+
+ if (v8_flags.stress_marking > 0) {
+ stress_marking_percentage_ = NextStressMarkingLimit();
+ }
}
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
- safepoint()->IterateLocalHeaps([this, collector](LocalHeap* local_heap) {
- local_heap->InvokeGCEpilogueCallbacksInSafepoint(
- GetGCTypeFromGarbageCollector(collector), current_gc_callback_flags_);
- });
+ {
+ // Allows handle derefs for all threads/isolates from this thread.
+ AllowHandleDereferenceAllThreads allow_all_handle_derefs;
+ safepoint()->IterateLocalHeaps([this, collector](LocalHeap* local_heap) {
+ local_heap->InvokeGCEpilogueCallbacksInSafepoint(
+ GetGCTypeFromGarbageCollector(collector), current_gc_callback_flags_);
+ });
+
+ if (isolate()->is_shared_space_isolate()) {
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [this, collector](Isolate* client) {
+ client->heap()->safepoint()->IterateLocalHeaps(
+ [this, collector](LocalHeap* local_heap) {
+ local_heap->InvokeGCEpilogueCallbacksInSafepoint(
+ GetGCTypeFromGarbageCollector(collector),
+ current_gc_callback_flags_);
+ });
+ });
+ }
+ }
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
@@ -1267,34 +1346,25 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
if (v8_flags.print_global_handles) isolate_->global_handles()->Print();
if (v8_flags.print_handles) PrintHandles();
- if (v8_flags.code_stats) ReportCodeStatistics("After GC");
if (v8_flags.check_handle_count) CheckHandleCount();
#endif
- if (new_space()) {
+ if (new_space() && !v8_flags.minor_mc) {
+ SemiSpaceNewSpace* semi_space_new_space =
+ SemiSpaceNewSpace::From(new_space());
if (Heap::ShouldZapGarbage() || v8_flags.clear_free_memory) {
- new_space()->ZapUnusedMemory();
+ semi_space_new_space->ZapUnusedMemory();
}
- if (!v8_flags.minor_mc) {
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_ADJUST_NEW_SPACE);
- ResizeNewSpaceMode resize_new_space = ShouldResizeNewSpace();
- if (resize_new_space == ResizeNewSpaceMode::kGrow) {
- ExpandNewSpaceSize();
- }
-
- if (resize_new_space == ResizeNewSpaceMode::kShrink) {
- ReduceNewSpaceSize();
- }
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
+ if (resize_new_space_mode_ == ResizeNewSpaceMode::kShrink) {
+ ReduceNewSpaceSize();
}
-
- SemiSpaceNewSpace::From(new_space())->MakeAllPagesInFromSpaceIterable();
}
+ resize_new_space_mode_ = ResizeNewSpaceMode::kNone;
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- new_space()->ClearUnusedObjectStartBitmaps();
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ semi_space_new_space->MakeAllPagesInFromSpaceIterable();
}
// Ensure that unmapper task isn't running during full GC. We need access to
@@ -1350,6 +1420,7 @@ void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
#ifdef DEBUG
ReportStatisticsAfterGC();
+ if (v8_flags.code_stats) ReportCodeStatistics("After GC");
#endif // DEBUG
last_gc_time_ = MonotonicallyIncreasingTimeInMs();
@@ -1380,20 +1451,17 @@ void Heap::HandleGCRequest() {
}
}
-void Heap::ScheduleScavengeTaskIfNeeded() {
- DCHECK_NOT_NULL(scavenge_job_);
- scavenge_job_->ScheduleTaskIfNeeded(this);
-}
-
-size_t Heap::MinorMCTaskTriggerSize() const {
- return new_space()->Capacity() * v8_flags.minor_mc_task_trigger / 100;
+void Heap::ScheduleMinorGCTaskIfNeeded() {
+ DCHECK_NOT_NULL(minor_gc_job_);
+ minor_gc_job_->ScheduleTaskIfNeeded(this);
}
void Heap::StartMinorMCIncrementalMarkingIfNeeded() {
if (v8_flags.concurrent_minor_mc_marking && !IsTearingDown() &&
!incremental_marking()->IsMarking() &&
incremental_marking()->CanBeStarted() && V8_LIKELY(!v8_flags.gc_global) &&
- (new_space()->Size() >= MinorMCTaskTriggerSize())) {
+ (new_space()->Size() >=
+ MinorGCJob::YoungGenerationTaskTriggerSize(this))) {
StartIncrementalMarking(Heap::kNoGCFlags, GarbageCollectionReason::kTask,
kNoGCCallbackFlags,
GarbageCollector::MINOR_MARK_COMPACTOR);
@@ -1490,21 +1558,23 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
+ isolate()->compilation_cache()->Clear();
+
set_current_gc_flags(
kReduceMemoryFootprintMask |
(gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
: 0));
- isolate_->compilation_cache()->Clear();
- const int kMaxNumberOfAttempts = 7;
- const int kMinNumberOfAttempts = 2;
+ constexpr int kMaxNumberOfAttempts = 7;
+ constexpr int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
- attempt + 1 >= kMinNumberOfAttempts) {
+ CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags);
+ if ((isolate()->global_handles()->last_gc_custom_callbacks() == 0) &&
+ (attempt + 1 >= kMinNumberOfAttempts)) {
break;
}
}
-
set_current_gc_flags(kNoGCFlags);
+
EagerlyFreeExternalMemory();
if (v8_flags.trace_duplicate_threshold_kb) {
@@ -1592,7 +1662,25 @@ Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
heap_->SizeOfObjects());
}
-bool Heap::CollectGarbage(AllocationSpace space,
+namespace {
+
+template <typename Callback>
+void InvokeExternalCallbacks(Isolate* isolate, Callback callback) {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate);
+ // Temporary override any embedder stack state as callbacks may create
+ // their own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ isolate->heap(), EmbedderStackStateScope::kExplicitInvocation,
+ StackState::kMayContainHeapPointers);
+ VMState<EXTERNAL> callback_state(isolate);
+
+ callback();
+}
+
+} // namespace
+
+void Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
if (V8_UNLIKELY(!deserialization_complete_)) {
@@ -1617,11 +1705,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
DCHECK(AllowGarbageCollection::IsAllowed());
- GarbageCollector collector;
const char* collector_reason = nullptr;
-
- collector = SelectGarbageCollector(space, gc_reason, &collector_reason);
-
+ const GarbageCollector collector =
+ SelectGarbageCollector(space, gc_reason, &collector_reason);
current_or_last_garbage_collector_ = collector;
if (collector == GarbageCollector::MARK_COMPACTOR &&
@@ -1629,31 +1715,28 @@ bool Heap::CollectGarbage(AllocationSpace space,
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kFinalizeMinorMC);
}
- // Ensure that all pending phantom callbacks are invoked.
- isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
+ const GCType gc_type = GetGCTypeFromGarbageCollector(collector);
- GCType gc_type = GetGCTypeFromGarbageCollector(collector);
- {
- GCCallbacksScope scope(this);
- // Temporary override any embedder stack state as callbacks may create
- // their own state on the stack and recursively trigger GC.
- EmbedderStackStateScope embedder_scope(
- this, EmbedderStackStateScope::kExplicitInvocation,
- StackState::kMayContainHeapPointers);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> callback_state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
- }
- }
+ // Prologue callbacks. These callbacks may trigger GC themselves and thus
+ // cannot be related exactly to garbage collection cycles.
+ //
+ // GCTracer scopes are managed by callees.
+ InvokeExternalCallbacks(isolate(), [this, gc_callback_flags, gc_type]() {
+ // Ensure that all pending phantom callbacks are invoked.
+ isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
+
+ // Prologue callbacks registered with Heap.
+ CallGCPrologueCallbacks(gc_type, gc_callback_flags,
+ GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
+ });
- // Part 2: The main garbage collection phase.
+ // The main garbage collection phase.
DisallowGarbageCollection no_gc_during_gc;
- size_t freed_global_handles = 0;
+ if (force_shared_gc_with_empty_stack_for_testing_) {
+ embedder_stack_state_ = StackState::kNoHeapPointers;
+ }
+
size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR
? CommittedOldGenerationMemory()
: 0;
@@ -1664,20 +1747,13 @@ bool Heap::CollectGarbage(AllocationSpace space,
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
- auto stack_marker = v8::base::Stack::GetCurrentStackPosition();
-#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- stack().set_marker(stack_marker);
-#endif
- if (collector == GarbageCollector::MARK_COMPACTOR && cpp_heap()) {
- // CppHeap needs a stack marker at the top of all entry points to allow
- // deterministic passes over the stack. E.g., a verifier that should only
- // find a subset of references of the marker.
- //
- // TODO(chromium:1056170): Consider adding a component that keeps track
- // of relevant GC stack regions where interesting pointers can be found.
- static_cast<v8::internal::CppHeap*>(cpp_heap())
- ->SetStackEndOfCurrentGC(stack_marker);
- }
+ // We need a stack marker at the top of all entry points to allow
+ // deterministic passes over the stack. E.g., a verifier that should only
+ // find a subset of references of the marker.
+ //
+ // TODO(chromium:1056170): Consider adding a component that keeps track
+ // of relevant GC stack regions where interesting pointers can be found.
+ stack().SetMarkerToCurrentStackPosition();
GarbageCollectionPrologue(gc_reason, gc_callback_flags);
{
@@ -1702,8 +1778,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
tp_heap_->CollectGarbage();
} else {
- freed_global_handles +=
- PerformGarbageCollection(collector, gc_reason, collector_reason);
+ PerformGarbageCollection(collector, gc_reason, collector_reason);
}
// Clear flags describing the current GC now that the current GC is
// complete. Do this before GarbageCollectionEpilogue() since that could
@@ -1724,27 +1799,12 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == GarbageCollector::MARK_COMPACTOR) {
- // Calculate used memory first, then committed memory. Following code
- // assumes that committed >= used, which might not hold when this is
- // calculated in the wrong order and background threads allocate
- // in-between.
- size_t used_memory_after = OldGenerationSizeOfObjects();
- size_t committed_memory_after = CommittedOldGenerationMemory();
if (memory_reducer_ != nullptr) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kMarkCompact;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- // Trigger one more GC if
- // - this GC decreased committed memory,
- // - there is high fragmentation,
- event.next_gc_likely_to_collect_more =
- (committed_memory_before > committed_memory_after + MB) ||
- HasHighFragmentation(used_memory_after, committed_memory_after);
- event.committed_memory = committed_memory_after;
- memory_reducer_->NotifyMarkCompact(event);
+ memory_reducer_->NotifyMarkCompact(committed_memory_before);
}
if (initial_max_old_generation_size_ < max_old_generation_size() &&
- used_memory_after < initial_max_old_generation_size_threshold_) {
+ OldGenerationSizeOfObjects() <
+ initial_max_old_generation_size_threshold_) {
set_max_old_generation_size(initial_max_old_generation_size_);
}
}
@@ -1761,39 +1821,20 @@ bool Heap::CollectGarbage(AllocationSpace space,
} else {
tracer()->StopFullCycleIfNeeded();
}
-
-#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- stack().clear_marker();
-#endif
- }
-
- // Part 3: Invoke all callbacks which should happen after the actual garbage
- // collection is triggered. Note that these callbacks may trigger another
- // garbage collection since they may allocate.
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
- gc_post_processing_depth_++;
- {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
- }
- gc_post_processing_depth_--;
}
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> callback_state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
- }
- }
+ // Epilogue callbacks. These callbacks may trigger GC themselves and thus
+ // cannot be related exactly to garbage collection cycles.
+ //
+ // GCTracer scopes are managed by callees.
+ InvokeExternalCallbacks(isolate(), [this, gc_callback_flags, gc_type]() {
+ // Epilogue callbacks registered with Heap.
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags,
+ GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
+
+ isolate()->global_handles()->PostGarbageCollectionProcessing(
+ gc_callback_flags);
+ });
if (collector == GarbageCollector::MARK_COMPACTOR &&
(gc_callback_flags & (kGCCallbackFlagForced |
@@ -1803,7 +1844,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
- if (IsYoungGenerationCollector(collector)) {
+ if (collector == GarbageCollector::SCAVENGER) {
+ DCHECK(!v8_flags.minor_mc);
StartIncrementalMarkingIfAllocationLimitIsReached(
GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
@@ -1815,8 +1857,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
FatalProcessOutOfMemory("Reached heap limit");
}
}
-
- return freed_global_handles > 0;
}
int Heap::NotifyContextDisposed(bool dependant_context) {
@@ -1825,10 +1865,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
old_generation_size_configured_ = false;
set_old_generation_allocation_limit(initial_old_generation_size_);
if (memory_reducer_ != nullptr) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyPossibleGarbage(event);
+ memory_reducer_->NotifyPossibleGarbage();
}
}
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
@@ -1846,8 +1883,16 @@ void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollector collector) {
DCHECK(incremental_marking()->IsStopped());
+ // Delay incremental marking start while concurrent sweeping still has work.
+ // This helps avoid large CompleteSweep blocks on the main thread when major
+ // incremental marking should be scheduled following a minor GC.
+ if (sweeper()->AreSweeperTasksRunning() &&
+ (!sweeper()->IsSweepingDoneForSpace(NEW_SPACE) ||
+ sweeper()->IsIteratingPromotedPages()))
+ return;
+
if (IsYoungGenerationCollector(collector)) {
- CompleteSweepingYoung(collector);
+ CompleteSweepingYoung();
} else {
// Sweeping needs to be completed such that markbits are all cleared before
// starting marking again.
@@ -1860,7 +1905,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
- SafepointKind safepoint_kind = isolate()->is_shared_heap_isolate()
+ SafepointKind safepoint_kind = isolate()->is_shared_space_isolate()
? SafepointKind::kGlobal
: SafepointKind::kIsolate;
safepoint_scope.emplace(isolate(), safepoint_kind);
@@ -1870,10 +1915,8 @@ void Heap::StartIncrementalMarking(int gc_flags,
VerifyCountersAfterSweeping();
#endif
- if (isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- if (client->is_shared_heap_isolate()) return;
-
if (v8_flags.concurrent_marking) {
client->heap()->concurrent_marking()->Pause();
}
@@ -1889,10 +1932,8 @@ void Heap::StartIncrementalMarking(int gc_flags,
incremental_marking()->Start(collector, gc_reason);
- if (isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- if (client->is_shared_heap_isolate()) return;
-
if (v8_flags.concurrent_marking &&
client->heap()->incremental_marking()->IsMarking()) {
client->heap()->concurrent_marking()->Resume();
@@ -1901,11 +1942,31 @@ void Heap::StartIncrementalMarking(int gc_flags,
}
}
-void Heap::CompleteSweepingFull() {
- {
- TRACE_GC(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
- array_buffer_sweeper()->EnsureFinished();
+namespace {
+void CompleteArrayBufferSweeping(Heap* heap) {
+ auto* array_buffer_sweeper = heap->array_buffer_sweeper();
+ if (array_buffer_sweeper->sweeping_in_progress()) {
+ auto* tracer = heap->tracer();
+ GCTracer::Scope::ScopeId scope_id;
+
+ switch (tracer->GetCurrentCollector()) {
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
+ scope_id = GCTracer::Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
+ break;
+ case GarbageCollector::SCAVENGER:
+ scope_id = GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS;
+ break;
+ case GarbageCollector::MARK_COMPACTOR:
+ scope_id = GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
+ }
+
+ TRACE_GC_EPOCH(tracer, scope_id, ThreadKind::kMain);
+ array_buffer_sweeper->EnsureFinished();
}
+}
+} // namespace
+
+void Heap::CompleteSweepingFull() {
EnsureSweepingCompleted(SweepingForcedFinalizationMode::kUnifiedHeap);
DCHECK(!sweeping_in_progress());
@@ -1940,10 +2001,7 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
// This is a fallback case where no appropriate limits have been
// configured yet.
if (memory_reducer() != nullptr) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer()->NotifyPossibleGarbage(event);
+ memory_reducer()->NotifyPossibleGarbage();
}
break;
case IncrementalMarkingLimit::kNoLimit:
@@ -1965,13 +2023,6 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
}
}
-void Heap::StartIdleIncrementalMarking(
- GarbageCollectionReason gc_reason,
- const GCCallbackFlags gc_callback_flags) {
- StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
- gc_callback_flags);
-}
-
void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
const ObjectSlot src_slot, int len,
WriteBarrierMode mode) {
@@ -1982,7 +2033,8 @@ void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
DCHECK(dst_slot < dst_end);
DCHECK(src_slot < src_slot + len);
- if (v8_flags.concurrent_marking && incremental_marking()->IsMarking()) {
+ if ((v8_flags.concurrent_marking && incremental_marking()->IsMarking()) ||
+ (v8_flags.minor_mc && sweeper()->IsIteratingPromotedPages())) {
if (dst_slot < src_slot) {
// Copy tagged values forward using relaxed load/stores that do not
// involve value decompression.
@@ -2033,7 +2085,8 @@ void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
// Ensure ranges do not overlap.
DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
- if (v8_flags.concurrent_marking && incremental_marking()->IsMarking()) {
+ if ((v8_flags.concurrent_marking && incremental_marking()->IsMarking()) ||
+ (v8_flags.minor_mc && sweeper()->IsIteratingPromotedPages())) {
// Copy tagged values using relaxed load/stores that do not involve value
// decompression.
const AtomicSlot atomic_dst_end(dst_end);
@@ -2118,6 +2171,7 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
}
namespace {
+
GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
switch (collector) {
case GarbageCollector::MARK_COMPACTOR:
@@ -2129,15 +2183,28 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
}
UNREACHABLE();
}
+
+void ClearStubCaches(Isolate* isolate) {
+ isolate->load_stub_cache()->Clear();
+ isolate->store_stub_cache()->Clear();
+
+ if (isolate->is_shared_space_isolate()) {
+ isolate->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ client->load_stub_cache()->Clear();
+ client->store_stub_cache()->Clear();
+ });
+ }
+}
+
} // namespace
-size_t Heap::PerformGarbageCollection(GarbageCollector collector,
- GarbageCollectionReason gc_reason,
- const char* collector_reason) {
+void Heap::PerformGarbageCollection(GarbageCollector collector,
+ GarbageCollectionReason gc_reason,
+ const char* collector_reason) {
DisallowJavascriptExecution no_js(isolate());
if (IsYoungGenerationCollector(collector)) {
- CompleteSweepingYoung(collector);
+ CompleteSweepingYoung();
if (v8_flags.verify_heap) {
// If heap verification is enabled, we want to ensure that sweeping is
// completed here, as it will be triggered from Heap::Verify anyway.
@@ -2145,27 +2212,29 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
// full GC cycle.
CompleteSweepingFull();
}
- if (!v8_flags.minor_mc || incremental_marking_->IsStopped()) {
- // If v8_flags.minor_mc is false, then the young GC is Scavenger, which
- // may interrupt an incremental full GC. If MinorMC incremental marking
- // was running before, there is already an active GCTracer cycle.
- tracer()->StartCycle(collector, gc_reason, collector_reason,
- GCTracer::MarkingType::kAtomic);
- }
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
memory_allocator()->unmapper()->EnsureUnmappingCompleted();
+ }
- // If incremental marking has been activated, the full GC cycle has already
- // started, so don't start a new one.
- if (!incremental_marking_->IsMarking()) {
- tracer()->StartCycle(collector, gc_reason, collector_reason,
- GCTracer::MarkingType::kAtomic);
- }
+ base::Optional<SafepointScope> safepoint_scope;
+ {
+ AllowGarbageCollection allow_shared_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+
+ SafepointKind safepoint_kind = isolate()->is_shared_space_isolate()
+ ? SafepointKind::kGlobal
+ : SafepointKind::kIsolate;
+ safepoint_scope.emplace(isolate(), safepoint_kind);
+ }
+
+ if (!incremental_marking_->IsMarking() ||
+ (collector == GarbageCollector::SCAVENGER)) {
+ tracer()->StartCycle(collector, gc_reason, collector_reason,
+ GCTracer::MarkingType::kAtomic);
}
- if (v8_flags.minor_mc) pretenuring_handler_.ProcessPretenuringFeedback();
tracer()->StartAtomicPause();
if (!Heap::IsYoungGenerationCollector(collector) &&
@@ -2176,33 +2245,25 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
DCHECK(tracer()->IsConsistentWithCollector(collector));
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
- base::Optional<SafepointScope> safepoint_scope;
-
- {
- AllowGarbageCollection allow_shared_gc;
- IgnoreLocalGCRequests ignore_gc_requests(this);
-
- SafepointKind safepoint_kind =
- v8_flags.shared_space && isolate()->is_shared_heap_isolate()
- ? SafepointKind::kGlobal
- : SafepointKind::kIsolate;
- safepoint_scope.emplace(isolate(), safepoint_kind);
- }
-
collection_barrier_->StopTimeToCollectionTimer();
HeapVerifier::VerifyHeapIfEnabled(this);
- if (isolate()->is_shared_heap_isolate()) {
- isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- if (client->is_shared_heap_isolate()) return;
-
- if (v8_flags.concurrent_marking) {
- client->heap()->concurrent_marking()->Pause();
- }
-
- HeapVerifier::VerifyHeapIfEnabled(client->heap());
- });
+ if (isolate()->is_shared_space_isolate()) {
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [collector](Isolate* client) {
+ CHECK(client->heap()->deserialization_complete());
+
+ if (v8_flags.concurrent_marking) {
+ client->heap()->concurrent_marking()->Pause();
+ }
+
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
+ Sweeper* const client_sweeper = client->heap()->sweeper();
+ client_sweeper->ContributeAndWaitForPromotedPagesIteration();
+ }
+ HeapVerifier::VerifyHeapIfEnabled(client->heap());
+ });
}
tracer()->StartInSafepoint();
@@ -2214,6 +2275,10 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
size_t start_young_generation_size =
NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
+ // Make sure allocation observers are disabled until the new new space
+ // capacity is set in the epilogue.
+ PauseAllocationObserversScope pause_observers(this);
+
if (collector == GarbageCollector::MARK_COMPACTOR) {
MarkCompact();
} else if (collector == GarbageCollector::MINOR_MARK_COMPACTOR) {
@@ -2228,7 +2293,7 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
- if (collector != GarbageCollector::MARK_COMPACTOR) {
+ if (collector == GarbageCollector::SCAVENGER) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
@@ -2242,76 +2307,56 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
- size_t freed_global_handles;
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
- // First round weak callbacks are not supposed to allocate and trigger
- // nested GCs.
- freed_global_handles =
- isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
- }
+ // First round weak callbacks are not supposed to allocate and trigger
+ // nested GCs.
+ isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
- if (collector == GarbageCollector::MARK_COMPACTOR) {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
+ if (cpp_heap() && (collector == GarbageCollector::MARK_COMPACTOR ||
+ collector == GarbageCollector::MINOR_MARK_COMPACTOR)) {
// TraceEpilogue may trigger operations that invalidate global handles. It
- // has to be called *after* all other operations that potentially touch and
- // reset global handles. It is also still part of the main garbage
- // collection pause and thus needs to be called *before* any operation that
- // can potentially trigger recursive garbage
- local_embedder_heap_tracer()->TraceEpilogue();
- }
-
-#if defined(CPPGC_YOUNG_GENERATION)
- // Schedule Oilpan's Minor GC. Since the minor GC doesn't support conservative
- // stack scanning, do it only when Scavenger runs from task, which is
- // non-nestable.
- if (cpp_heap() && IsYoungGenerationCollector(collector)) {
- CppHeap::From(cpp_heap())->RunMinorGCIfNeeded();
+ // has to be called *after* all other operations that potentially touch
+ // and reset global handles. It is also still part of the main garbage
+ // collection pause and thus needs to be called *before* any operation
+ // that can potentially trigger recursive garbage
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
+ // Resetting to state unknown as there may be follow up garbage
+ // collections triggered from callbacks that have a different stack state.
+ embedder_stack_state_ = cppgc::EmbedderStackState::kMayContainHeapPointers;
+ CppHeap::From(cpp_heap())->TraceEpilogue();
}
-#endif // defined(CPPGC_YOUNG_GENERATION)
RecomputeLimits(collector);
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
+ ClearStubCaches(isolate());
+ }
+
GarbageCollectionEpilogueInSafepoint(collector);
tracer()->StopInSafepoint();
HeapVerifier::VerifyHeapIfEnabled(this);
- if (isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- if (client->is_shared_heap_isolate()) return;
+ HeapVerifier::VerifyHeapIfEnabled(client->heap());
if (v8_flags.concurrent_marking &&
client->heap()->incremental_marking()->IsMarking()) {
client->heap()->concurrent_marking()->Resume();
}
-
- HeapVerifier::VerifyHeapIfEnabled(client->heap());
});
}
-
- return freed_global_handles;
}
bool Heap::CollectGarbageShared(LocalHeap* local_heap,
GarbageCollectionReason gc_reason) {
CHECK(deserialization_complete());
- DCHECK(isolate()->has_shared_heap());
-
- if (v8_flags.shared_space) {
- Isolate* shared_space_isolate = isolate()->shared_space_isolate();
- return shared_space_isolate->heap()->CollectGarbageFromAnyThread(local_heap,
- gc_reason);
- } else {
- DCHECK(!IsShared());
- DCHECK_NOT_NULL(isolate()->shared_isolate());
+ DCHECK(isolate()->has_shared_space());
- isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
- isolate(), gc_reason);
- return true;
- }
+ Isolate* shared_space_isolate = isolate()->shared_space_isolate();
+ return shared_space_isolate->heap()->CollectGarbageFromAnyThread(local_heap,
+ gc_reason);
}
bool Heap::CollectGarbageFromAnyThread(LocalHeap* local_heap,
@@ -2339,79 +2384,8 @@ bool Heap::CollectGarbageFromAnyThread(LocalHeap* local_heap,
}
}
-void Heap::PerformSharedGarbageCollection(Isolate* initiator,
- GarbageCollectionReason gc_reason) {
- DCHECK(IsShared());
-
- // Stop all client isolates attached to this isolate
- GlobalSafepointScope global_safepoint(initiator);
-
- // Migrate shared isolate to the main thread of the initiator isolate.
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
- v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
-
- tracer()->StartObservablePause();
- DCHECK(incremental_marking_->IsStopped());
- DCHECK_NOT_NULL(isolate()->global_safepoint());
-
-#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- stack().set_marker(v8::base::Stack::GetCurrentStackPosition());
-#endif
-
- isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- client->heap()->FreeSharedLinearAllocationAreas();
-
- // As long as we need to iterate the client heap to find references into the
- // shared heap, all client heaps need to be iterable.
- client->heap()->MakeHeapIterable();
-
- if (v8_flags.concurrent_marking) {
- client->heap()->concurrent_marking()->Pause();
- }
-
- HeapVerifier::VerifyHeapIfEnabled(client->heap());
- });
-
- const GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
- PerformGarbageCollection(collector, gc_reason, nullptr);
-
- isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- HeapVerifier::VerifyHeapIfEnabled(client->heap());
-
- if (v8_flags.concurrent_marking &&
- client->heap()->incremental_marking()->IsMarking()) {
- client->heap()->concurrent_marking()->Resume();
- }
- });
-
- tracer()->StopAtomicPause();
- tracer()->StopObservablePause();
- tracer()->UpdateStatistics(collector);
- tracer()->StopFullCycleIfNeeded();
-
-#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- stack().clear_marker();
-#endif
-}
-
-void Heap::CompleteSweepingYoung(GarbageCollector collector) {
- GCTracer::Scope::ScopeId scope_id;
-
- switch (collector) {
- case GarbageCollector::MINOR_MARK_COMPACTOR:
- scope_id = GCTracer::Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
- break;
- case GarbageCollector::SCAVENGER:
- scope_id = GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS;
- break;
- default:
- UNREACHABLE();
- }
-
- {
- TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
- array_buffer_sweeper()->EnsureFinished();
- }
+void Heap::CompleteSweepingYoung() {
+ CompleteArrayBufferSweeping(this);
// If sweeping is in progress and there are no sweeper tasks running, finish
// the sweeping here, to avoid having to pause and resume during the young
@@ -2462,20 +2436,16 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
this, max_old_generation_size(), v8_gc_speed, v8_mutator_speed);
double global_growing_factor = 0;
- if (UseGlobalMemoryScheduling()) {
- DCHECK_NOT_NULL(local_embedder_heap_tracer());
- double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
- double embedder_speed =
- tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
- double embedder_growing_factor =
- (embedder_gc_speed > 0 && embedder_speed > 0)
- ? MemoryController<GlobalMemoryTrait>::GrowingFactor(
- this, max_global_memory_size_, embedder_gc_speed,
- embedder_speed)
- : 0;
- global_growing_factor =
- std::max(v8_growing_factor, embedder_growing_factor);
- }
+ double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
+ double embedder_speed =
+ tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
+ double embedder_growing_factor =
+ (embedder_gc_speed > 0 && embedder_speed > 0)
+ ? MemoryController<GlobalMemoryTrait>::GrowingFactor(
+ this, max_global_memory_size_, embedder_gc_speed,
+ embedder_speed)
+ : 0;
+ global_growing_factor = std::max(v8_growing_factor, embedder_growing_factor);
size_t old_gen_size = OldGenerationSizeOfObjects();
size_t new_space_capacity = NewSpaceCapacity();
@@ -2489,14 +2459,12 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
mode));
- if (UseGlobalMemoryScheduling()) {
- DCHECK_GT(global_growing_factor, 0);
- global_allocation_limit_ =
- MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), min_global_memory_size_,
- max_global_memory_size_, new_space_capacity,
- global_growing_factor, mode);
- }
+ DCHECK_GT(global_growing_factor, 0);
+ global_allocation_limit_ =
+ MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity, global_growing_factor,
+ mode);
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
@@ -2509,44 +2477,50 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
if (new_old_generation_limit < old_generation_allocation_limit()) {
set_old_generation_allocation_limit(new_old_generation_limit);
}
- if (UseGlobalMemoryScheduling()) {
- DCHECK_GT(global_growing_factor, 0);
- size_t new_global_limit =
- MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), min_global_memory_size_,
- max_global_memory_size_, new_space_capacity,
- global_growing_factor, mode);
- if (new_global_limit < global_allocation_limit_) {
- global_allocation_limit_ = new_global_limit;
- }
+ DCHECK_GT(global_growing_factor, 0);
+ size_t new_global_limit =
+ MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity, global_growing_factor,
+ mode);
+ if (new_global_limit < global_allocation_limit_) {
+ global_allocation_limit_ = new_global_limit;
}
}
}
-void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
- gc_prologue_callbacks_.Invoke(gc_type, flags);
+void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags,
+ GCTracer::Scope::ScopeId scope_id) {
+ if (gc_prologue_callbacks_.IsEmpty()) return;
+
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
+ TRACE_GC(tracer(), scope_id);
+ HandleScope handle_scope(isolate());
+ gc_prologue_callbacks_.Invoke(gc_type, flags);
+ }
}
-void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
- gc_epilogue_callbacks_.Invoke(gc_type, flags);
+void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags,
+ GCTracer::Scope::ScopeId scope_id) {
+ if (gc_epilogue_callbacks_.IsEmpty()) return;
+
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
+ TRACE_GC(tracer(), scope_id);
+ HandleScope handle_scope(isolate());
+ gc_epilogue_callbacks_.Invoke(gc_type, flags);
+ }
}
void Heap::MarkCompact() {
- PauseAllocationObserversScope pause_observers(this);
-
SetGCState(MARK_COMPACT);
PROFILE(isolate_, CodeMovingGCEvent());
CodeSpaceMemoryModificationScope code_modification(this);
- // Disable soft allocation limits in the shared heap, if one exists, as
- // promotions into the shared heap should always succeed.
- OptionalAlwaysAllocateScope always_allocate_shared_heap(
- isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
- : nullptr);
-
UpdateOldGenerationAllocationCounter();
uint64_t size_of_objects_before_gc = SizeOfObjects();
@@ -2580,20 +2554,12 @@ void Heap::MinorMarkCompact() {
DCHECK(new_space());
DCHECK(!incremental_marking()->IsMajorMarking());
- PauseAllocationObserversScope pause_observers(this);
- SetGCState(MINOR_MARK_COMPACT);
-
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
+
AlwaysAllocateScope always_allocate(this);
- // Disable soft allocation limits in the shared heap, if one exists, as
- // promotions into the shared heap should always succeed.
- OptionalAlwaysAllocateScope always_allocate_shared_heap(
- isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
- : nullptr);
- minor_mark_compact_collector_->Prepare();
+ SetGCState(MINOR_MARK_COMPACT);
minor_mark_compact_collector_->CollectGarbage();
-
SetGCState(NOT_IN_GC);
}
@@ -2639,15 +2605,8 @@ void Heap::Scavenge() {
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(this);
- // Disable soft allocation limits in the shared heap, if one exists, as
- // promotions into the shared heap should always succeed.
- OptionalAlwaysAllocateScope always_allocate_shared_heap(
- isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
- : nullptr);
-
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
- PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
@@ -2751,7 +2710,7 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
heap->FinalizeExternalString(string);
return String();
}
- new_string = String::cast(first_word.ToForwardingAddress());
+ new_string = String::cast(first_word.ToForwardingAddress(obj));
} else {
new_string = String::cast(obj);
}
@@ -3129,14 +3088,12 @@ void Heap::ConfigureInitialOldGenerationSize() {
} else {
old_generation_size_configured_ = true;
}
- if (UseGlobalMemoryScheduling()) {
- const size_t new_global_memory_limit = std::max(
- GlobalSizeOfObjects() + minimum_growing_step,
- static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
- (tracer()->AverageSurvivalRatio() / 100)));
- if (new_global_memory_limit < global_allocation_limit_) {
- global_allocation_limit_ = new_global_memory_limit;
- }
+ const size_t new_global_memory_limit = std::max(
+ GlobalSizeOfObjects() + minimum_growing_step,
+ static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
+ (tracer()->AverageSurvivalRatio() / 100)));
+ if (new_global_memory_limit < global_allocation_limit_) {
+ global_allocation_limit_ = new_global_memory_limit;
}
}
}
@@ -3165,6 +3122,8 @@ void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size,
if (size == kTaggedSize) {
filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
SKIP_WRITE_BARRIER);
+ // Ensure the filler map is properly initialized.
+ DCHECK(filler.map(heap->isolate()).IsMap());
} else if (size == 2 * kTaggedSize) {
filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
SKIP_WRITE_BARRIER);
@@ -3172,6 +3131,8 @@ void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size,
AtomicSlot slot(ObjectSlot(addr) + 1);
*slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
}
+ // Ensure the filler map is properly initialized.
+ DCHECK(filler.map(heap->isolate()).IsMap());
} else {
DCHECK_GT(size, 2 * kTaggedSize);
filler.set_map_after_allocation(roots.unchecked_free_space_map(),
@@ -3181,13 +3142,13 @@ void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size,
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
}
- }
- // At this point, we may be deserializing the heap from a snapshot, and
- // none of the maps have been created yet and are nullptr.
- DCHECK((filler.map_slot().contains_map_value(kNullAddress) &&
- !heap->deserialization_complete()) ||
- filler.map(heap->isolate()).IsMap());
+ // During bootstrapping we need to create a free space object before its
+ // map is initialized. In this case we cannot access the map yet, as it
+ // might be null, or not set up properly yet.
+ DCHECK_IMPLIES(roots.is_initialized(RootIndex::kFreeSpaceMap),
+ filler.map(heap->isolate()).IsMap());
+ }
}
#ifdef DEBUG
@@ -3222,9 +3183,9 @@ void Heap::CreateFillerObjectAtSweeper(Address addr, int size) {
ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo);
}
-void Heap::CreateFillerObjectAt(Address addr, int size) {
- CreateFillerObjectAtRaw(addr, size,
- ClearFreedMemoryMode::kDontClearFreedMemory,
+void Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode) {
+ CreateFillerObjectAtRaw(addr, size, clear_memory_mode,
ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kYes);
}
@@ -3261,8 +3222,18 @@ bool Heap::CanMoveObjectStart(HeapObject object) {
return false;
}
- // We can move the object start if the page was already swept.
- return Page::FromHeapObject(object)->SweepingDone();
+ // Concurrent marking does not support moving object starts without snapshot
+ // protocol.
+ //
+ // TODO(v8:13726): This can be improved via concurrently reading the contents
+ // in the marker at the cost of some complexity.
+ if (incremental_marking()->IsMarking()) return false;
+
+ // Concurrent sweeper does not support moving object starts. It assumes that
+ // markbits (black regions) and object starts are matching up.
+ if (!Page::FromHeapObject(object)->SweepingDone()) return false;
+
+ return true;
}
bool Heap::IsImmovable(HeapObject object) {
@@ -3296,10 +3267,10 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
- // V8_EXTERNAL_CODE_SPACE specific: we might be comparing Code object
- // with non-Code object here and it might produce false positives because
- // operator== for tagged values compares only lower 32 bits when pointer
- // compression is enabled.
+ // V8_EXTERNAL_CODE_SPACE specific: we might be comparing
+ // InstructionStream object with non-InstructionStream object here and it
+ // might produce false positives because operator== for tagged values
+ // compares only lower 32 bits when pointer compression is enabled.
DCHECK_NE((*p).ptr(), to_check_.ptr());
}
}
@@ -3386,9 +3357,6 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
Address old_start = object.address();
Address new_start = old_start + bytes_to_trim;
- incremental_marking()->NotifyLeftTrimming(object,
- HeapObject::FromAddress(new_start));
-
#ifdef DEBUG
if (MayContainRecordedSlots(object)) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
@@ -3554,9 +3522,10 @@ void Heap::MakeHeapIterable() {
});
if (isolate()->is_shared_space_isolate()) {
- isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- client->heap()->MakeSharedLinearAllocationAreasIterable();
- });
+ isolate()->global_safepoint()->IterateSharedSpaceAndClientIsolates(
+ [](Isolate* client) {
+ client->heap()->MakeSharedLinearAllocationAreasIterable();
+ });
}
PagedSpaceIterator spaces(this);
@@ -3565,36 +3534,42 @@ void Heap::MakeHeapIterable() {
space->MakeLinearAllocationAreaIterable();
}
- if (v8_flags.shared_space && shared_space_allocator_) {
+ if (shared_space_allocator_) {
shared_space_allocator_->MakeLinearAllocationAreaIterable();
}
if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
}
void Heap::FreeLinearAllocationAreas() {
+ FreeMainThreadLinearAllocationAreas();
+
safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
if (isolate()->is_shared_space_isolate()) {
- isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
- client->heap()->FreeSharedLinearAllocationAreas();
- });
+ isolate()->global_safepoint()->IterateSharedSpaceAndClientIsolates(
+ [](Isolate* client) {
+ client->heap()->FreeSharedLinearAllocationAreas();
+ });
}
+}
+void Heap::FreeMainThreadLinearAllocationAreas() {
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
+ base::MutexGuard guard(space->mutex());
space->FreeLinearAllocationArea();
}
- if (v8_flags.shared_space && shared_space_allocator_) {
+ if (shared_space_allocator_) {
shared_space_allocator_->FreeLinearAllocationArea();
}
if (new_space()) new_space()->FreeLinearAllocationArea();
}
void Heap::FreeSharedLinearAllocationAreas() {
- if (!isolate()->has_shared_heap()) return;
+ if (!isolate()->has_shared_space()) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->FreeSharedLinearAllocationArea();
});
@@ -3602,19 +3577,19 @@ void Heap::FreeSharedLinearAllocationAreas() {
}
void Heap::FreeMainThreadSharedLinearAllocationAreas() {
- if (!isolate()->has_shared_heap()) return;
+ if (!isolate()->has_shared_space()) return;
shared_space_allocator_->FreeLinearAllocationArea();
main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
void Heap::MakeSharedLinearAllocationAreasIterable() {
- if (!isolate()->has_shared_heap()) return;
+ if (!isolate()->has_shared_space()) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeSharedLinearAllocationAreaIterable();
});
- if (v8_flags.shared_space && shared_space_allocator_) {
+ if (shared_space_allocator_) {
shared_space_allocator_->MakeLinearAllocationAreaIterable();
}
@@ -3622,7 +3597,6 @@ void Heap::MakeSharedLinearAllocationAreasIterable() {
}
void Heap::MarkSharedLinearAllocationAreasBlack() {
- DCHECK(v8_flags.shared_space);
if (shared_space_allocator_) {
shared_space_allocator_->MarkLinearAllocationAreaBlack();
}
@@ -3633,14 +3607,13 @@ void Heap::MarkSharedLinearAllocationAreasBlack() {
}
void Heap::UnmarkSharedLinearAllocationAreas() {
- DCHECK(v8_flags.shared_space);
if (shared_space_allocator_) {
shared_space_allocator_->UnmarkLinearAllocationArea();
}
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MarkSharedLinearAllocationAreaBlack();
+ local_heap->UnmarkSharedLinearAllocationArea();
});
- main_thread_local_heap()->MarkSharedLinearAllocationAreaBlack();
+ main_thread_local_heap()->UnmarkSharedLinearAllocationArea();
}
namespace {
@@ -3693,9 +3666,6 @@ bool Heap::HasLowOldGenerationAllocationRate() {
}
bool Heap::HasLowEmbedderAllocationRate() {
- if (!UseGlobalMemoryScheduling()) return true;
-
- DCHECK_NOT_NULL(local_embedder_heap_tracer());
double mu = ComputeMutatorUtilization(
"Embedder",
tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond(),
@@ -3739,16 +3709,17 @@ void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
}
bool Heap::HasHighFragmentation() {
- size_t used = OldGenerationSizeOfObjects();
- size_t committed = CommittedOldGenerationMemory();
- return HasHighFragmentation(used, committed);
-}
+ const size_t used = OldGenerationSizeOfObjects();
+ const size_t committed = CommittedOldGenerationMemory();
+
+ // Background thread allocation could result in committed memory being less
+ // than used memory in some situations.
+ if (committed < used) return false;
+
+ constexpr size_t kSlack = 16 * MB;
-bool Heap::HasHighFragmentation(size_t used, size_t committed) {
- const size_t kSlack = 16 * MB;
// Fragmentation is high if committed > 2 * used + kSlack.
// Rewrite the expression to avoid overflow.
- DCHECK_GE(committed, used);
return committed - used > used + kSlack;
}
@@ -3768,10 +3739,7 @@ void Heap::ActivateMemoryReducerIfNeeded() {
const int kMinCommittedMemory = 7 * Page::kPageSize;
if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
isolate()->IsIsolateInBackground()) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyPossibleGarbage(event);
+ memory_reducer_->NotifyPossibleGarbage();
}
}
@@ -3808,7 +3776,7 @@ void Heap::ExpandNewSpaceSize() {
void Heap::ReduceNewSpaceSize() {
// MinorMC shrinks new space as part of sweeping.
if (!v8_flags.minor_mc) {
- new_space()->Shrink();
+ SemiSpaceNewSpace::From(new_space())->Shrink();
} else {
paged_new_space()->FinishShrinking();
}
@@ -3817,7 +3785,7 @@ void Heap::ReduceNewSpaceSize() {
size_t Heap::NewSpaceSize() { return new_space() ? new_space()->Size() : 0; }
-size_t Heap::NewSpaceCapacity() {
+size_t Heap::NewSpaceCapacity() const {
return new_space() ? new_space()->Capacity() : 0;
}
@@ -3835,27 +3803,23 @@ void Heap::FinalizeIncrementalMarkingAtomically(
}
void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
+ AllowGarbageCollection allow_allocation;
+ VMState<EXTERNAL> state(isolate_);
+ CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags,
+ GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
}
void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
+ AllowGarbageCollection allow_allocation;
+ VMState<EXTERNAL> state(isolate_);
+ CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags,
+ GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
}
+namespace {
+thread_local Address pending_layout_change_object_address = kNullAddress;
+} // namespace
+
void Heap::NotifyObjectLayoutChange(
HeapObject object, const DisallowGarbageCollection&,
InvalidateRecordedSlots invalidate_recorded_slots, int new_size) {
@@ -3863,7 +3827,9 @@ void Heap::NotifyObjectLayoutChange(
const bool may_contain_recorded_slots = MayContainRecordedSlots(object);
if (incremental_marking()->IsMarking()) {
- incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
+ ExclusiveObjectLock::Lock(object);
+ DCHECK_EQ(pending_layout_change_object_address, kNullAddress);
+ pending_layout_change_object_address = object.address();
if (may_contain_recorded_slots && incremental_marking()->IsCompacting()) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, new_size);
@@ -3884,6 +3850,15 @@ void Heap::NotifyObjectLayoutChange(
#endif
}
+// static
+void Heap::NotifyObjectLayoutChangeDone(HeapObject object) {
+ if (pending_layout_change_object_address != kNullAddress) {
+ DCHECK_EQ(pending_layout_change_object_address, object.address());
+ ExclusiveObjectLock::Unlock(object);
+ pending_layout_change_object_address = kNullAddress;
+ }
+}
+
void Heap::NotifyObjectSizeChange(
HeapObject object, int old_size, int new_size,
ClearRecordedSlots clear_recorded_slots,
@@ -3891,6 +3866,7 @@ void Heap::NotifyObjectSizeChange(
old_size = ALIGN_TO_ALLOCATION_ALIGNMENT(old_size);
new_size = ALIGN_TO_ALLOCATION_ALIGNMENT(new_size);
DCHECK_LE(new_size, old_size);
+ DCHECK(!IsLargeObject(object));
if (new_size == old_size) return;
const bool is_main_thread = LocalHeap::Current() == nullptr;
@@ -3934,8 +3910,8 @@ void Heap::NotifyObjectSizeChange(
void Heap::UpdateInvalidatedObjectSize(HeapObject object, int new_size) {
if (!MayContainRecordedSlots(object)) return;
- // Updating invalidated_slots is unsychronized and thus needs to happen on the
- // main thread.
+ // Updating invalidated_slots is unsynchronized and thus needs to happen on
+ // the main thread.
DCHECK_NULL(LocalHeap::Current());
DCHECK_EQ(isolate()->thread_id(), ThreadId::Current());
@@ -4146,7 +4122,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
void Heap::EagerlyFreeExternalMemory() {
- array_buffer_sweeper()->EnsureFinished();
+ CompleteArrayBufferSweeping(this);
memory_allocator()->unmapper()->EnsureUnmappingCompleted();
}
@@ -4258,7 +4234,7 @@ void Heap::Print() {
}
void Heap::ReportCodeStatistics(const char* title) {
- PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
+ PrintF("###### Code Stats (%s) ######\n", title);
CollectCodeStatistics();
CodeStatistics::ReportCodeStatistics(isolate());
}
@@ -4409,12 +4385,6 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
UNREACHABLE();
}
-bool Heap::IsShared() const { return isolate()->is_shared(); }
-
-bool Heap::ShouldMarkSharedHeap() const {
- return isolate()->is_shared() || isolate()->is_shared_space_isolate();
-}
-
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
@@ -4511,9 +4481,10 @@ void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
}
void Heap::RegisterCodeObject(Handle<Code> code) {
- Address addr = code->address();
+ InstructionStream istream = code->instruction_stream();
+ Address addr = istream.address();
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && code_space()->Contains(addr)) {
- MemoryChunk::FromHeapObject(*code)
+ MemoryChunk::FromHeapObject(istream)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject(addr);
}
@@ -4582,7 +4553,7 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
@@ -4696,11 +4667,13 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
v->Synchronize(VisitorSynchronization::kGlobalHandles);
if (!options.contains(SkipRoot::kStack)) {
- ScanStackMode mode =
- options.contains(SkipRoot::kConservativeStack) ? ScanStackMode::kNone
- : options.contains(SkipRoot::kTopOfStack) ? ScanStackMode::kFromMarker
- : ScanStackMode::kComplete;
- IterateStackRoots(v, mode);
+ IterateStackRoots(v);
+ if (!options.contains(SkipRoot::kConservativeStack)) {
+ ScanStackMode stack_mode = options.contains(SkipRoot::kTopOfStack)
+ ? ScanStackMode::kFromMarker
+ : ScanStackMode::kComplete;
+ IterateConservativeStackRoots(v, stack_mode);
+ }
v->Synchronize(VisitorSynchronization::kStackRoots);
}
@@ -4762,7 +4735,7 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
//
// However, worker/client isolates do not own the shared heap object cache
// and should not iterate it.
- if (isolate_->is_shared_heap_isolate() || !isolate_->has_shared_heap()) {
+ if (isolate_->is_shared_space_isolate() || !isolate_->has_shared_space()) {
SerializerDeserializer::IterateSharedHeapObjectCache(isolate_, v);
v->Synchronize(VisitorSynchronization::kSharedHeapObjectCache);
}
@@ -4790,10 +4763,13 @@ class ClientRootVisitor : public RootVisitor {
actual_visitor_->VisitRootPointers(root, description, start, end);
}
- void VisitRunningCode(FullObjectSlot slot) final {
+ void VisitRunningCode(FullObjectSlot code_slot,
+ FullObjectSlot maybe_istream_slot) final {
#if DEBUG
- HeapObject object = HeapObject::cast(*slot);
- DCHECK(!object.InSharedWritableHeap());
+ DCHECK(!HeapObject::cast(*code_slot).InWritableSharedSpace());
+ Object maybe_istream = *maybe_istream_slot;
+ DCHECK(maybe_istream == Smi::zero() ||
+ !HeapObject::cast(maybe_istream).InWritableSharedSpace());
#endif
}
@@ -4807,7 +4783,7 @@ class ClientRootVisitor : public RootVisitor {
Object object = *slot;
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- if (heap_object.InSharedWritableHeap()) {
+ if (heap_object.InWritableSharedSpace()) {
actual_visitor_->VisitRootPointer(root, description, slot);
}
}
@@ -4819,7 +4795,7 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options) {
IterateRoots(v, options);
- if (isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
ClientRootVisitor client_root_visitor(v);
// TODO(v8:13257): We cannot run CSS on client isolates now, as the
// stack markers will not be correct.
@@ -4831,19 +4807,11 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v,
}
}
-void Heap::IterateRootsFromStackIncludingClient(RootVisitor* v,
- ScanStackMode mode) {
- IterateStackRoots(v, mode);
+void Heap::IterateConservativeStackRootsIncludingClients(
+ RootVisitor* v, ScanStackMode stack_mode) {
+ IterateConservativeStackRoots(v, stack_mode);
- if (isolate()->is_shared_heap_isolate()) {
- ClientRootVisitor client_root_visitor(v);
- isolate()->global_safepoint()->IterateClientIsolates(
- [v = &client_root_visitor](Isolate* client) {
- // TODO(v8:13257): We cannot run CSS on client isolates now, as the
- // stack markers will not be correct.
- client->heap()->IterateStackRoots(v, ScanStackMode::kNone);
- });
- }
+ // TODO(v8:13257): Iterate over client isolates for CSS once supported.
}
void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
@@ -4869,24 +4837,18 @@ void Heap::IterateBuiltins(RootVisitor* v) {
static_assert(Builtins::AllBuiltinsAreIsolateIndependent());
}
-void Heap::IterateStackRoots(RootVisitor* v, ScanStackMode mode) {
- isolate_->Iterate(v);
+void Heap::IterateStackRoots(RootVisitor* v) { isolate_->Iterate(v); }
+void Heap::IterateConservativeStackRoots(RootVisitor* v,
+ ScanStackMode stack_mode) {
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- switch (std::min(mode, scan_stack_mode_for_testing_)) {
- case ScanStackMode::kNone: {
- break;
- }
- case ScanStackMode::kComplete: {
- ConservativeStackVisitor stack_visitor(isolate(), v);
- stack().IteratePointers(&stack_visitor);
- break;
- }
- case ScanStackMode::kFromMarker: {
- ConservativeStackVisitor stack_visitor(isolate(), v);
- stack().IteratePointersUnsafe(&stack_visitor, stack().get_marker());
- break;
- }
+ if (!IsGCWithStack()) return;
+
+ ConservativeStackVisitor stack_visitor(isolate_, v);
+ if (stack_mode == ScanStackMode::kComplete) {
+ stack().IteratePointers(&stack_visitor);
+ } else {
+ stack().IteratePointersUntilMarker(&stack_visitor);
}
#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
}
@@ -4926,6 +4888,13 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
max_semi_space_size_ =
SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
}
+ if (v8_flags.minor_mc) {
+ // The conditions above this one assume a new space implementation
+ // consisting of two equally sized semi spaces. If MinorMC is used, new
+ // space contains only a single space. Thus max size can be doubled
+ // without regressing memory.
+ max_semi_space_size_ *= 2;
+ }
if (v8_flags.stress_compaction) {
// This will cause more frequent GCs when stressing.
max_semi_space_size_ = MB;
@@ -5127,31 +5096,32 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
GetFromRingBuffer(stats->last_few_messages);
}
-size_t Heap::OldGenerationSizeOfObjects() {
+size_t Heap::OldGenerationSizeOfObjects() const {
PagedSpaceIterator spaces(this);
size_t total = 0;
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
total += space->SizeOfObjects();
}
+ if (shared_lo_space_) {
+ total += shared_lo_space_->SizeOfObjects();
+ }
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
}
size_t Heap::EmbedderSizeOfObjects() const {
- return local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->used_size()
- : 0;
+ return cpp_heap_ ? CppHeap::From(cpp_heap_)->used_size() : 0;
}
-size_t Heap::GlobalSizeOfObjects() {
+size_t Heap::GlobalSizeOfObjects() const {
return OldGenerationSizeOfObjects() + EmbedderSizeOfObjects();
}
-uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() {
+uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() const {
return external_memory_.AllocatedSinceMarkCompact();
}
-bool Heap::AllocationLimitOvershotByLargeMargin() {
+bool Heap::AllocationLimitOvershotByLargeMargin() const {
// This guards against too eager finalization in small heaps.
// The number is chosen based on v8.browsing_mobile on Nexus 7v2.
constexpr size_t kMarginForSmallHeaps = 32u * MB;
@@ -5197,10 +5167,14 @@ bool Heap::ShouldOptimizeForLoadTime() {
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
-bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
+bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap,
+ AllocationOrigin origin) {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
+ // Allocations in the GC should always succeed if possible.
+ if (origin == AllocationOrigin::kGC) return true;
+
// Background threads need to be allowed to allocate without GC after teardown
// was initiated.
if (gc_state() == TEAR_DOWN) return true;
@@ -5267,8 +5241,6 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
}
base::Optional<size_t> Heap::GlobalMemoryAvailable() {
- if (!UseGlobalMemoryScheduling()) return {};
-
size_t global_size = GlobalSizeOfObjects();
if (global_size < global_allocation_limit_)
@@ -5303,8 +5275,8 @@ double Heap::PercentToGlobalMemoryLimit() {
// started as soon as the embedder does not allocate with high throughput
// anymore.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
- // Code using an AlwaysAllocateScope assumes that the GC state does not
- // change; that implies that no marking steps must be performed.
+ // InstructionStream using an AlwaysAllocateScope assumes that the GC state
+ // does not change; that implies that no marking steps must be performed.
if (!incremental_marking()->CanBeStarted() || always_allocate()) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
@@ -5337,10 +5309,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
max_marking_limit_reached_ =
std::max<double>(max_marking_limit_reached_, current_percent);
}
- } else if (current_percent >=
- stress_marking_percentage_.load(std::memory_order_relaxed)) {
- stress_marking_percentage_.store(NextStressMarkingLimit(),
- std::memory_order_relaxed);
+ } else if (current_percent >= stress_marking_percentage_) {
return IncrementalMarkingLimit::kHardLimit;
}
}
@@ -5368,8 +5337,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available > NewSpaceCapacity() &&
(!global_memory_available ||
global_memory_available > NewSpaceCapacity())) {
- if (local_embedder_heap_tracer()->InUse() &&
- !old_generation_size_configured_ && gc_count_ == 0) {
+ if (cpp_heap() && !old_generation_size_configured_ && gc_count_ == 0) {
// At this point the embedder memory is above the activation
// threshold. No GC happened so far and it's thus unlikely to get a
// configured heap any time soon. Start a memory reducer in this case
@@ -5398,33 +5366,11 @@ bool Heap::ShouldStressCompaction() const {
return v8_flags.stress_compaction && (gc_count_ & 1) != 0;
}
-void Heap::EnableInlineAllocation() {
- // Update inline allocation limit for new space.
- if (new_space()) {
- new_space()->EnableInlineAllocation();
- }
- // Update inline allocation limit for old spaces.
- PagedSpaceIterator spaces(this);
- for (PagedSpace* space = spaces.Next(); space != nullptr;
- space = spaces.Next()) {
- base::MutexGuard guard(space->mutex());
- space->EnableInlineAllocation();
- }
-}
+void Heap::EnableInlineAllocation() { inline_allocation_enabled_ = true; }
void Heap::DisableInlineAllocation() {
- // Update inline allocation limit for new space.
- if (new_space()) {
- new_space()->DisableInlineAllocation();
- }
- // Update inline allocation limit for old spaces.
- PagedSpaceIterator spaces(this);
- CodePageCollectionMemoryModificationScope modification_scope(this);
- for (PagedSpace* space = spaces.Next(); space != nullptr;
- space = spaces.Next()) {
- base::MutexGuard guard(space->mutex());
- space->DisableInlineAllocation();
- }
+ inline_allocation_enabled_ = false;
+ FreeMainThreadLinearAllocationAreas();
}
void Heap::SetUp(LocalHeap* main_thread_local_heap) {
@@ -5458,20 +5404,20 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
// When a target requires the code range feature, we put all code objects in
// a contiguous range of virtual address space, so that they can call each
// other with near calls.
- if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
- // When sharing a pointer cage among Isolates, also share the
- // CodeRange. isolate_->page_allocator() is the process-wide pointer
- // compression cage's PageAllocator.
- code_range_ = CodeRange::EnsureProcessWideCodeRange(
- isolate_->page_allocator(), requested_size);
- } else {
- code_range_ = std::make_shared<CodeRange>();
- if (!code_range_->InitReservation(isolate_->page_allocator(),
- requested_size)) {
- V8::FatalProcessOutOfMemory(
- isolate_, "Failed to reserve virtual memory for CodeRange");
- }
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ // When sharing a pointer cage among Isolates, also share the
+ // CodeRange. isolate_->page_allocator() is the process-wide pointer
+ // compression cage's PageAllocator.
+ code_range_ = CodeRange::EnsureProcessWideCodeRange(
+ isolate_->page_allocator(), requested_size);
+#else
+ code_range_ = std::make_unique<CodeRange>();
+ if (!code_range_->InitReservation(isolate_->page_allocator(),
+ requested_size)) {
+ V8::FatalProcessOutOfMemory(
+ isolate_, "Failed to reserve virtual memory for CodeRange");
}
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
LOG(isolate_,
NewEvent("CodeRange",
@@ -5564,8 +5510,7 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
LinearAllocationArea& old_allocation_info) {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
- const bool has_young_gen = !v8_flags.single_generation && !IsShared();
- if (has_young_gen) {
+ if (!v8_flags.single_generation) {
if (v8_flags.minor_mc) {
space_[NEW_SPACE] = std::make_unique<PagedNewSpace>(
this, initial_semispace_size_, max_semi_space_size_,
@@ -5616,14 +5561,11 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
array_buffer_sweeper_.reset(new ArrayBufferSweeper(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
memory_measurement_.reset(new MemoryMeasurement(isolate()));
- if (!IsShared()) memory_reducer_.reset(new MemoryReducer(this));
+ if (v8_flags.memory_reducer) memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
live_object_stats_.reset(new ObjectStats(this));
dead_object_stats_.reset(new ObjectStats(this));
}
- local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
- embedder_roots_handler_ =
- &local_embedder_heap_tracer()->default_embedder_roots_handler();
if (Heap::AllocationTrackerForDebugging::IsNeeded()) {
allocation_tracker_for_debugging_ =
std::make_unique<Heap::AllocationTrackerForDebugging>(this);
@@ -5638,21 +5580,18 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
}
if (new_space()) {
+ minor_gc_job_.reset(new MinorGCJob());
if (v8_flags.concurrent_minor_mc_marking) {
// TODO(v8:13012): Atomic MinorMC should not use ScavengeJob. Instead, we
// should schedule MinorMC tasks at a soft limit, which are used by atomic
// MinorMC, and to finalize concurrent MinorMC. The condition
// v8_flags.concurrent_minor_mc_marking can then be changed to
// v8_flags.minor_mc (here and at the RemoveAllocationObserver call site).
- minor_mc_task_observer_.reset(
- new MinorMCTaskObserver(this, MinorMCTaskObserver::kStepSize));
- new_space()->AddAllocationObserver(minor_mc_task_observer_.get());
+ minor_gc_task_observer_.reset(
+ new MinorMCIncrementalMarkingTaskObserver(this));
} else {
// ScavengeJob is used by atomic MinorMC and Scavenger.
- scavenge_job_.reset(new ScavengeJob());
- scavenge_task_observer_.reset(
- new ScavengeTaskObserver(this, ScavengeJob::kStepSize));
- new_space()->AddAllocationObserver(scavenge_task_observer_.get());
+ minor_gc_task_observer_.reset(new ScheduleMinorGCTaskObserver(this));
}
}
@@ -5661,9 +5600,6 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
if (v8_flags.stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
- stress_marking_observer_ = new StressMarkingObserver(this);
- AddAllocationObserversToAllSpaces(stress_marking_observer_,
- stress_marking_observer_);
}
if (IsStressingScavenge()) {
stress_scavenge_observer_ = new StressScavengeObserver(this);
@@ -5679,23 +5615,15 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
}
#endif // V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
- if (isolate()->shared_space_isolate()) {
+ if (isolate()->has_shared_space()) {
Heap* heap = isolate()->shared_space_isolate()->heap();
shared_space_allocator_ = std::make_unique<ConcurrentAllocator>(
- main_thread_local_heap(), heap->shared_space_);
+ main_thread_local_heap(), heap->shared_space_,
+ ConcurrentAllocator::Context::kNotGC);
shared_allocation_space_ = heap->shared_space_;
shared_lo_allocation_space_ = heap->shared_lo_space_;
-
- } else if (isolate()->shared_isolate()) {
- Heap* shared_heap = isolate()->shared_isolate()->heap();
-
- shared_space_allocator_ = std::make_unique<ConcurrentAllocator>(
- main_thread_local_heap(), shared_heap->old_space());
-
- shared_allocation_space_ = shared_heap->old_space();
- shared_lo_allocation_space_ = shared_heap->lo_space();
}
main_thread_local_heap()->SetUpMainThread();
@@ -5721,6 +5649,9 @@ void Heap::InitializeOncePerProcess() {
HeapAllocator::InitializeOncePerProcess();
#endif
MemoryAllocator::InitializeOncePerProcess();
+ if (v8_flags.predictable) {
+ ::heap::base::WorklistBase::EnforcePredictableOrder();
+ }
}
void Heap::PrintMaxMarkingLimitReached() {
@@ -5734,12 +5665,34 @@ void Heap::PrintMaxNewSpaceSizeReached() {
}
int Heap::NextStressMarkingLimit() {
- // Reuse Heap-global mutex as this getter is called from different threads on
- // allocation slow paths.
- base::MutexGuard guard(relocation_mutex());
return isolate()->fuzzer_rng()->NextInt(v8_flags.stress_marking + 1);
}
+void Heap::WeakenDescriptorArrays(
+ GlobalHandleVector<DescriptorArray> strong_descriptor_arrays) {
+ if (incremental_marking()->IsMajorMarking()) {
+ // During incremental/concurrent marking regular DescriptorArray objects are
+ // treated with custom weakness. This weakness depends on
+ // DescriptorArray::raw_gc_state() which is not set up properly upon
+ // deserialization. The strong arrays are transitioned to weak ones at the
+ // end of the GC.
+ mark_compact_collector()->RecordStrongDescriptorArraysForWeakening(
+ std::move(strong_descriptor_arrays));
+ return;
+ }
+
+ // No GC is running, weaken the arrays right away.
+ DisallowGarbageCollection no_gc;
+ Map descriptor_array_map = ReadOnlyRoots(isolate()).descriptor_array_map();
+ for (auto it = strong_descriptor_arrays.begin();
+ it != strong_descriptor_arrays.end(); ++it) {
+ DescriptorArray array = it.raw();
+ DCHECK(array.IsStrongDescriptorArray());
+ array.set_map_safe_transition_no_write_barrier(descriptor_array_map);
+ DCHECK_EQ(array.raw_gc_state(kRelaxedLoad), 0);
+ }
+}
+
void Heap::NotifyDeserializationComplete() {
PagedSpaceIterator spaces(this);
for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
@@ -5763,6 +5716,10 @@ void Heap::NotifyDeserializationComplete() {
need_to_remove_stress_concurrent_allocation_observer_ = true;
}
+ // Deserialization will never create objects in new space.
+ DCHECK_IMPLIES(new_space(), new_space()->Size() == 0);
+ DCHECK_IMPLIES(new_lo_space(), new_lo_space()->Size() == 0);
+
deserialization_complete_ = true;
}
@@ -5790,37 +5747,10 @@ void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
kMemoryReducerActivationThreshold &&
v8_flags.memory_reducer_for_small_heaps) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer()->NotifyPossibleGarbage(event);
- }
-}
-
-START_ALLOW_USE_DEPRECATED()
-
-void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- DCHECK_EQ(gc_state(), HeapState::NOT_IN_GC);
- // Setting a tracer is only supported when CppHeap is not used.
- DCHECK_IMPLIES(tracer, !cpp_heap_);
- local_embedder_heap_tracer()->SetRemoteTracer(tracer);
-}
-
-EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
- return local_embedder_heap_tracer()->remote_tracer();
-}
-
-EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
- if (is_current_gc_forced()) {
- return EmbedderHeapTracer::TraceFlags::kForced;
- } else if (ShouldReduceMemory()) {
- return EmbedderHeapTracer::TraceFlags::kReduceMemory;
+ memory_reducer()->NotifyPossibleGarbage();
}
- return EmbedderHeapTracer::TraceFlags::kNoFlags;
}
-END_ALLOW_USE_DEPRECATED()
-
void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
embedder_roots_handler_ = handler;
}
@@ -5830,14 +5760,13 @@ EmbedderRootsHandler* Heap::GetEmbedderRootsHandler() const {
}
void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
+ CHECK(!incremental_marking()->IsMarking());
CppHeap::From(cpp_heap)->AttachIsolate(isolate());
cpp_heap_ = cpp_heap;
- local_embedder_heap_tracer()->SetCppHeap(CppHeap::From(cpp_heap));
}
void Heap::DetachCppHeap() {
CppHeap::From(cpp_heap_)->DetachIsolate();
- local_embedder_heap_tracer()->SetCppHeap(nullptr);
cpp_heap_ = nullptr;
}
@@ -5847,27 +5776,15 @@ const cppgc::EmbedderStackState* Heap::overriden_stack_state() const {
}
void Heap::SetStackStart(void* stack_start) {
- stack().SetStackStart(stack_start);
-}
-
-::heap::base::Stack& Heap::stack() {
- return isolate_->thread_local_top()->stack_;
+#if V8_ENABLE_WEBASSEMBLY
+ stack().SetStackStart(stack_start,
+ v8_flags.experimental_wasm_stack_switching);
+#else
+ stack().SetStackStart(stack_start, false);
+#endif // V8_ENABLE_WEBASSEMBLY
}
-void Heap::RegisterExternallyReferencedObject(Address* location) {
- TracedHandles::Mark(location);
- Object object(*location);
- if (!object.IsHeapObject()) {
- // The embedder is not aware of whether numbers are materialized as heap
- // objects are just passed around as Smis.
- return;
- }
- HeapObject heap_object = HeapObject::cast(object);
- DCHECK(IsValidHeapObject(this, heap_object));
- DCHECK(incremental_marking()->IsMarking() ||
- mark_compact_collector()->in_use());
- mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
-}
+::heap::base::Stack& Heap::stack() { return isolate_->stack(); }
void Heap::StartTearDown() {
// Finish any ongoing sweeping to avoid stray background tasks still accessing
@@ -5893,20 +5810,13 @@ void Heap::StartTearDown() {
main_thread_local_heap()->FreeLinearAllocationArea();
FreeMainThreadSharedLinearAllocationAreas();
+}
- // {StartTearDown} is called fairly early during Isolate teardown, so it's
- // a good time to run heap verification (if requested), before starting to
- // tear down parts of the Isolate.
- if (v8_flags.verify_heap) {
- HeapVerifier::VerifyHeap(this);
-
- // If this is a client Isolate of a shared Isolate, verify that there are no
- // shared-to-local pointers before tearing down the client Isolate and
- // creating dangling pointers.
- if (Isolate* shared_isolate = isolate()->shared_isolate()) {
- HeapVerifier::VerifySharedHeap(shared_isolate->heap(), isolate());
- }
- }
+void Heap::ForceSharedGCWithEmptyStackForTesting() {
+ // No mutex or atomics as this variable is always set from only a single
+ // thread before invoking a shared GC. The shared GC then resets the flag
+ // while the initiating thread is guaranteed to wait on a condition variable.
+ force_shared_gc_with_empty_stack_for_testing_ = true;
}
void Heap::TearDownWithSharedHeap() {
@@ -5916,8 +5826,16 @@ void Heap::TearDownWithSharedHeap() {
// chunks are unprotected.
safepoint()->AssertMainThreadIsOnlyThread();
+ // Now that all threads are stopped, verify the heap before tearing down the
+ // heap/isolate.
+ HeapVerifier::VerifyHeapIfEnabled(this);
+
// Might use the external pointer which might be in the shared heap.
external_string_table_.TearDown();
+
+ // Publish shared object worklist for the main thread if incremental marking
+ // is enabled for the shared heap.
+ main_thread_local_heap()->marking_barrier()->PublishSharedIfNeeded();
}
void Heap::TearDown() {
@@ -5944,20 +5862,8 @@ void Heap::TearDown() {
}
}
- if (new_space()) {
- if (minor_mc_task_observer_) {
- DCHECK_NULL(scavenge_task_observer_);
- new_space()->RemoveAllocationObserver(minor_mc_task_observer_.get());
- } else {
- DCHECK_NOT_NULL(scavenge_task_observer_);
- new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
- }
- }
-
- scavenge_task_observer_.reset();
- scavenge_job_.reset();
-
- minor_mc_task_observer_.reset();
+ minor_gc_task_observer_.reset();
+ minor_gc_job_.reset();
if (need_to_remove_stress_concurrent_allocation_observer_) {
RemoveAllocationObserversFromAllSpaces(
@@ -5966,12 +5872,6 @@ void Heap::TearDown() {
}
stress_concurrent_allocation_observer_.reset();
- if (v8_flags.stress_marking > 0) {
- RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
- stress_marking_observer_);
- delete stress_marking_observer_;
- stress_marking_observer_ = nullptr;
- }
if (IsStressingScavenge()) {
new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
delete stress_scavenge_observer_;
@@ -6008,7 +5908,6 @@ void Heap::TearDown() {
live_object_stats_.reset();
dead_object_stats_.reset();
- local_embedder_heap_tracer_.reset();
embedder_roots_handler_ = nullptr;
if (cpp_heap_) {
@@ -6133,7 +6032,7 @@ void Heap::CompactWeakArrayLists() {
}
void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
- if (map->is_in_retained_map_list() || map->InSharedWritableHeap()) {
+ if (map->is_in_retained_map_list() || map->InWritableSharedSpace()) {
return;
}
@@ -6304,29 +6203,6 @@ PagedSpace* PagedSpaceIterator::Next() {
return nullptr;
}
-SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
-
-SpaceIterator::~SpaceIterator() = default;
-
-bool SpaceIterator::HasNext() {
- while (current_space_ <= LAST_MUTABLE_SPACE) {
- Space* space = heap_->space(current_space_);
- if (space) return true;
- ++current_space_;
- }
-
- // No more spaces left.
- return false;
-}
-
-Space* SpaceIterator::Next() {
- DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
- Space* space = heap_->space(current_space_++);
- DCHECK_NOT_NULL(space);
- return space;
-}
-
class HeapObjectsFilter {
public:
virtual ~HeapObjectsFilter() = default;
@@ -6388,8 +6264,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkPointers(start, end);
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
Object maybe_code = slot.load(code_cage_base());
HeapObject heap_object;
if (maybe_code.GetHeapObject(&heap_object)) {
@@ -6397,11 +6272,12 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ void VisitCodeTarget(RelocInfo* rinfo) final {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
MarkHeapObject(target);
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) final {
MarkHeapObject(rinfo->target_object(cage_base()));
}
@@ -6468,7 +6344,7 @@ HeapObjectIterator::HeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
safepoint_scope_(std::make_unique<SafepointScope>(
- heap->isolate(), heap->isolate()->is_shared_heap_isolate()
+ heap->isolate(), heap->isolate()->is_shared_space_isolate()
? SafepointKind::kGlobal
: SafepointKind::kIsolate)),
filtering_(filtering),
@@ -6854,90 +6730,6 @@ size_t Heap::NumberOfDetachedContexts() {
return detached_contexts().length() / 2;
}
-void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) {
- VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
-}
-
-void VerifyPointersVisitor::VisitPointers(HeapObject host,
- MaybeObjectSlot start,
- MaybeObjectSlot end) {
- VerifyPointers(host, start, end);
-}
-
-void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
- CodeObjectSlot slot) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Object maybe_code = slot.load(code_cage_base());
- HeapObject code;
- // The slot might contain smi during CodeDataContainer creation.
- if (maybe_code.GetHeapObject(&code)) {
- VerifyCodeObjectImpl(code);
- } else {
- CHECK(maybe_code.IsSmi());
- }
-}
-
-void VerifyPointersVisitor::VisitRootPointers(Root root,
- const char* description,
- FullObjectSlot start,
- FullObjectSlot end) {
- VerifyPointersImpl(start, end);
-}
-
-void VerifyPointersVisitor::VisitRootPointers(Root root,
- const char* description,
- OffHeapObjectSlot start,
- OffHeapObjectSlot end) {
- VerifyPointersImpl(start, end);
-}
-
-void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK(IsValidHeapObject(heap_, heap_object));
- CHECK(heap_object.map(cage_base()).IsMap());
-}
-
-void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- CHECK(IsValidCodeObject(heap_, heap_object));
- CHECK(heap_object.map(cage_base()).IsMap());
- CHECK(heap_object.map(cage_base()).instance_type() == CODE_TYPE);
-}
-
-template <typename TSlot>
-void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
- for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.load(cage_base());
- HeapObject heap_object;
- if (object.GetHeapObject(&heap_object)) {
- VerifyHeapObjectImpl(heap_object);
- } else {
- CHECK(object.IsSmi() || object.IsCleared() ||
- MapWord::IsPacked(object.ptr()));
- }
- }
-}
-
-void VerifyPointersVisitor::VerifyPointers(HeapObject host,
- MaybeObjectSlot start,
- MaybeObjectSlot end) {
- // If this DCHECK fires then you probably added a pointer field
- // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
- // this by moving that object to POINTER_VISITOR_ID_LIST.
- DCHECK_EQ(ObjectFields::kMaybePointers,
- Map::ObjectFieldsFrom(host.map(cage_base()).visitor_id()));
- VerifyPointersImpl(start, end);
-}
-
-void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- VerifyHeapObjectImpl(target);
-}
-
-void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
- VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
-}
-
bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
// Object migration is governed by the following rules:
//
@@ -6961,7 +6753,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
case OLD_SPACE:
return dst == OLD_SPACE;
case CODE_SPACE:
- return dst == CODE_SPACE && type == CODE_TYPE;
+ return dst == CODE_SPACE && type == INSTRUCTION_STREAM_TYPE;
case SHARED_SPACE:
return dst == SHARED_SPACE;
case LO_SPACE:
@@ -6975,9 +6767,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
}
size_t Heap::EmbedderAllocationCounter() const {
- return local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->allocated_size()
- : 0;
+ return cpp_heap_ ? CppHeap::From(cpp_heap_)->allocated_size() : 0;
}
void Heap::CreateObjectStats() {
@@ -6990,59 +6780,53 @@ void Heap::CreateObjectStats() {
}
}
-Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
+Map Heap::GcSafeMapOfHeapObject(HeapObject object) {
PtrComprCageBase cage_base(isolate());
MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- PtrComprCageBase code_cage_base(isolate()->code_cage_base());
-#else
- PtrComprCageBase code_cage_base = cage_base;
-#endif
- return map_word.ToForwardingAddress(code_cage_base).map(cage_base);
+ return map_word.ToForwardingAddress(object).map(cage_base);
}
return map_word.ToMap();
}
-CodeLookupResult Heap::GcSafeCastToCode(HeapObject object,
- Address inner_pointer) {
- Code code = Code::unchecked_cast(object);
- DCHECK(!code.is_null());
- DCHECK(GcSafeCodeContains(code, inner_pointer));
- return CodeLookupResult{code};
+GcSafeCode Heap::GcSafeGetCodeFromInstructionStream(
+ HeapObject instruction_stream, Address inner_pointer) {
+ InstructionStream istream =
+ InstructionStream::unchecked_cast(instruction_stream);
+ DCHECK(!istream.is_null());
+ DCHECK(GcSafeInstructionStreamContains(istream, inner_pointer));
+ return GcSafeCode::unchecked_cast(istream.raw_code(kAcquireLoad));
}
-bool Heap::GcSafeCodeContains(Code code, Address addr) {
- Map map = GcSafeMapOfCodeSpaceObject(code);
- DCHECK(map == ReadOnlyRoots(this).code_map());
- Builtin maybe_builtin =
+bool Heap::GcSafeInstructionStreamContains(InstructionStream istream,
+ Address addr) {
+ Map map = GcSafeMapOfHeapObject(istream);
+ DCHECK_EQ(map, ReadOnlyRoots(this).instruction_stream_map());
+
+ Builtin builtin_lookup_result =
OffHeapInstructionStream::TryLookupCode(isolate(), addr);
- if (Builtins::IsBuiltinId(maybe_builtin) &&
- code.builtin_id() == maybe_builtin) {
- return true;
+ if (Builtins::IsBuiltinId(builtin_lookup_result)) {
+ // Builtins don't have InstructionStream objects.
+ DCHECK(!Builtins::IsBuiltinId(istream.code(kAcquireLoad).builtin_id()));
+ return false;
}
- Address start = code.address();
- Address end = code.address() + code.SizeFromMap(map);
+
+ Address start = istream.address();
+ Address end = start + istream.SizeFromMap(map);
return start <= addr && addr < end;
}
-CodeLookupResult Heap::GcSafeFindCodeForInnerPointer(
- Address inner_pointer, bool die_on_unsuccessful_lookup) {
- Builtin maybe_builtin =
- OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
- if (Builtins::IsBuiltinId(maybe_builtin)) {
- return CodeLookupResult{isolate()->builtins()->code(maybe_builtin)};
- }
-
+base::Optional<InstructionStream>
+Heap::GcSafeTryFindInstructionStreamForInnerPointer(Address inner_pointer) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
Address start = tp_heap_->GetObjectFromInnerPointer(inner_pointer);
- return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
+ return InstructionStream::unchecked_cast(HeapObject::FromAddress(start));
}
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
- return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
+ return InstructionStream::unchecked_cast(large_page->GetObject());
}
if (V8_LIKELY(code_space()->Contains(inner_pointer))) {
@@ -7053,59 +6837,48 @@ CodeLookupResult Heap::GcSafeFindCodeForInnerPointer(
Address start =
page->GetCodeObjectRegistry()->GetCodeObjectStartFromInnerAddress(
inner_pointer);
- return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
+ return InstructionStream::unchecked_cast(HeapObject::FromAddress(start));
}
- if (!die_on_unsuccessful_lookup) return {};
+ return {};
+}
- // Put useful info on the stack for debugging and crash the process.
+base::Optional<GcSafeCode> Heap::GcSafeTryFindCodeForInnerPointer(
+ Address inner_pointer) {
+ Builtin maybe_builtin =
+ OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (Builtins::IsBuiltinId(maybe_builtin)) {
+ return GcSafeCode::cast(isolate()->builtins()->code(maybe_builtin));
+ }
- // TODO(1241665): Remove once the issue is solved.
- std::shared_ptr<CodeRange> code_range = CodeRange::GetProcessWideCodeRange();
- void* code_range_embedded_blob_code_copy =
- code_range ? code_range->embedded_blob_code_copy() : nullptr;
- Address flags = (isolate()->is_short_builtin_calls_enabled() ? 1 : 0) |
- (code_range ? 2 : 0) |
- static_cast<Address>(max_old_generation_size());
+ base::Optional<InstructionStream> maybe_istream =
+ GcSafeTryFindInstructionStreamForInnerPointer(inner_pointer);
+ if (!maybe_istream) return {};
- isolate()->PushParamsAndDie(
- reinterpret_cast<void*>(inner_pointer),
- const_cast<uint8_t*>(isolate()->embedded_blob_code()),
- const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
- code_range_embedded_blob_code_copy,
- reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()),
- reinterpret_cast<void*>(flags));
+ return GcSafeGetCodeFromInstructionStream(*maybe_istream, inner_pointer);
+}
- UNREACHABLE();
+Code Heap::FindCodeForInnerPointer(Address inner_pointer) {
+ return GcSafeFindCodeForInnerPointer(inner_pointer).UnsafeCastToCode();
}
-CodeLookupResult Heap::GcSafeFindCodeForInnerPointerForPrinting(
+GcSafeCode Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+ base::Optional<GcSafeCode> maybe_code =
+ GcSafeTryFindCodeForInnerPointer(inner_pointer);
+ // Callers expect that the code object is found.
+ CHECK(maybe_code.has_value());
+ return GcSafeCode::unchecked_cast(maybe_code.value());
+}
+
+base::Optional<Code> Heap::TryFindCodeForInnerPointerForPrinting(
Address inner_pointer) {
if (InSpaceSlow(inner_pointer, i::CODE_SPACE) ||
InSpaceSlow(inner_pointer, i::CODE_LO_SPACE) ||
i::OffHeapInstructionStream::PcIsOffHeap(isolate(), inner_pointer)) {
- CodeLookupResult result =
- GcSafeFindCodeForInnerPointer(inner_pointer, false);
- if (result.IsFound()) return result;
- }
-
- // During normal execution builtins from RO_SPACE can't appear on the stack
- // as instruction address because RO_SPACE is not executable. However during
- // debugging "jco" macro might be called with an address from a readonly
- // builtin trampoline.
-
- if (read_only_space()->ContainsSlow(inner_pointer)) {
- // TODO(delphick): Possibly optimize this as it iterates over all pages in
- // RO_SPACE instead of just the one containing the address.
- ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
- for (HeapObject object = iterator.Next(); !object.is_null();
- object = iterator.Next()) {
- if (!object.IsCode()) continue;
- Code code = Code::cast(object);
- if (inner_pointer >= code.address() &&
- inner_pointer < code.address() + code.Size()) {
- return CodeLookupResult{code};
- }
+ base::Optional<GcSafeCode> maybe_code =
+ GcSafeTryFindCodeForInnerPointer(inner_pointer);
+ if (maybe_code.has_value()) {
+ return maybe_code->UnsafeCastToCode();
}
}
return {};
@@ -7121,12 +6894,9 @@ void Heap::CombinedGenerationalAndSharedBarrierSlow(HeapObject object,
Heap::GenerationalBarrierSlow(object, slot, value);
} else {
- DCHECK(value_chunk->InSharedHeap());
-
- heap_internals::MemoryChunk* object_chunk =
- heap_internals::MemoryChunk::FromHeapObject(object);
- if (!object_chunk->InSharedHeap())
- Heap::SharedHeapBarrierSlow(object, slot);
+ DCHECK(value_chunk->InWritableSharedSpace());
+ DCHECK(!object.InWritableSharedSpace());
+ Heap::SharedHeapBarrierSlow(object, slot);
}
}
@@ -7139,13 +6909,9 @@ void Heap::CombinedGenerationalAndSharedEphemeronBarrierSlow(
table_chunk->heap()->RecordEphemeronKeyWrite(table, slot);
} else {
- DCHECK(value_chunk->InSharedHeap());
-
- heap_internals::MemoryChunk* table_chunk =
- heap_internals::MemoryChunk::FromHeapObject(table);
- if (!table_chunk->InSharedHeap()) {
- Heap::SharedHeapBarrierSlow(table, slot);
- }
+ DCHECK(value_chunk->InWritableSharedSpace());
+ DCHECK(!table.InWritableSharedSpace());
+ Heap::SharedHeapBarrierSlow(table, slot);
}
}
@@ -7157,7 +6923,7 @@ void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
void Heap::SharedHeapBarrierSlow(HeapObject object, Address slot) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- DCHECK(!chunk->InSharedHeap());
+ DCHECK(!chunk->InWritableSharedSpace());
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(chunk, slot);
}
@@ -7166,6 +6932,7 @@ void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
if (v8_flags.minor_mc) {
// Minor MC lacks support for specialized generational ephemeron barriers.
// The regular write barrier works as well but keeps more memory alive.
+ // TODO(v8:12612): Add support to MinorMC.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(table);
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else {
@@ -7201,12 +6968,13 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
static_assert(!(kModeMask & kDoEvacuationSlotRecording) ||
(kModeMask & kDoMarking));
- MarkingBarrier* marking_barrier = WriteBarrier::CurrentMarkingBarrier(this);
- MarkCompactCollector* collector = this->mark_compact_collector();
+ MarkingBarrier* marking_barrier = nullptr;
- CodeTPageHeaderModificationScope rwx_write_scope(
- "Marking CodeT objects might require write access to the CodeT page "
- "header");
+ if (kModeMask & kDoMarking) {
+ marking_barrier = WriteBarrier::CurrentMarkingBarrier(object);
+ }
+
+ MarkCompactCollector* collector = this->mark_compact_collector();
for (TSlot slot = start_slot; slot < end_slot; ++slot) {
typename TSlot::TObject value = *slot;
@@ -7217,14 +6985,14 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
if (Heap::InYoungGeneration(value_heap_object)) {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
source_page, slot.address());
- } else if (value_heap_object.InSharedWritableHeap()) {
+ } else if (value_heap_object.InWritableSharedSpace()) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(
source_page, slot.address());
}
}
- if ((kModeMask & kDoMarking) &&
- marking_barrier->MarkValue(object, value_heap_object)) {
+ if (kModeMask & kDoMarking) {
+ marking_barrier->MarkValue(object, value_heap_object);
if (kModeMask & kDoEvacuationSlotRecording) {
collector->RecordSlot(source_page, HeapObjectSlot(slot),
value_heap_object);
@@ -7292,11 +7060,10 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
}
}
-void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
- HeapObject object) {
+void Heap::GenerationalBarrierForCodeSlow(RelocInfo* rinfo, HeapObject object) {
DCHECK(InYoungGeneration(object));
const MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::ProcessRelocInfo(host, rinfo, object);
+ MarkCompactCollector::ProcessRelocInfo(rinfo, object);
RememberedSet<OLD_TO_NEW>::InsertTyped(info.memory_chunk, info.slot_type,
info.offset);
@@ -7422,11 +7189,23 @@ void Heap::FinishSweepingIfOutOfWork() {
}
void Heap::EnsureSweepingCompleted(SweepingForcedFinalizationMode mode) {
+ CompleteArrayBufferSweeping(this);
+
if (sweeper()->sweeping_in_progress()) {
- TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
+ // Get the scope id before finishing sweeping since it will be reset
+ // afterwards.
+ const auto new_space_sweeping_scope_id =
+ sweeper()->GetTracingScopeForCompleteYoungSweep();
sweeper()->EnsureCompleted();
+
+ if (v8_flags.minor_mc && new_space()) {
+ TRACE_GC_EPOCH(tracer(), new_space_sweeping_scope_id, ThreadKind::kMain);
+ paged_new_space()->paged_space()->RefillFreeList();
+ }
+
+ TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
old_space()->RefillFreeList();
{
CodePageHeaderModificationScope rwx_write_scope(
@@ -7438,14 +7217,10 @@ void Heap::EnsureSweepingCompleted(SweepingForcedFinalizationMode mode) {
shared_space()->RefillFreeList();
}
- if (v8_flags.minor_mc && new_space()) {
- paged_new_space()->paged_space()->RefillFreeList();
- }
-
tracer()->NotifyFullSweepingCompleted();
#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap && !evacuation()) {
+ if (v8_flags.verify_heap) {
FullEvacuationVerifier verifier(this);
verifier.Run();
}
@@ -7465,22 +7240,21 @@ void Heap::EnsureSweepingCompleted(SweepingForcedFinalizationMode mode) {
}
void Heap::PauseSweepingAndEnsureYoungSweepingCompleted() {
- if (sweeper()->sweeping_in_progress()) {
- TRACE_GC_EPOCH(tracer(), sweeper()->GetTracingScopeForCompleteYoungSweep(),
- ThreadKind::kMain);
+ if (!sweeper()->sweeping_in_progress()) return;
+ TRACE_GC_EPOCH(tracer(), sweeper()->GetTracingScopeForCompleteYoungSweep(),
+ ThreadKind::kMain);
- sweeper()->PauseAndEnsureNewSpaceCompleted();
- paged_new_space()->paged_space()->RefillFreeList();
+ sweeper()->PauseAndEnsureNewSpaceCompleted();
+ paged_new_space()->paged_space()->RefillFreeList();
- tracer()->NotifyYoungSweepingCompleted();
+ tracer()->NotifyYoungSweepingCompleted();
#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap && !evacuation()) {
- YoungGenerationEvacuationVerifier verifier(this);
- verifier.Run();
- }
-#endif
+ if (v8_flags.verify_heap) {
+ YoungGenerationEvacuationVerifier verifier(this);
+ verifier.Run();
}
+#endif
}
void Heap::DrainSweepingWorklistForSpace(AllocationSpace space) {
@@ -7490,30 +7264,23 @@ void Heap::DrainSweepingWorklistForSpace(AllocationSpace space) {
EmbedderStackStateScope::EmbedderStackStateScope(Heap* heap, Origin origin,
StackState stack_state)
- : local_tracer_(heap->local_embedder_heap_tracer()),
- old_stack_state_(local_tracer_->embedder_stack_state_) {
+ : heap_(heap), old_stack_state_(heap_->embedder_stack_state_) {
if (origin == kImplicitThroughTask && heap->overriden_stack_state()) {
stack_state = *heap->overriden_stack_state();
}
- local_tracer_->embedder_stack_state_ = stack_state;
+ heap_->embedder_stack_state_ = stack_state;
}
// static
EmbedderStackStateScope EmbedderStackStateScope::ExplicitScopeForTesting(
- LocalEmbedderHeapTracer* local_tracer, StackState stack_state) {
- return EmbedderStackStateScope(local_tracer, stack_state);
-}
-
-EmbedderStackStateScope::EmbedderStackStateScope(
- LocalEmbedderHeapTracer* local_tracer, StackState stack_state)
- : local_tracer_(local_tracer),
- old_stack_state_(local_tracer_->embedder_stack_state_) {
- local_tracer_->embedder_stack_state_ = stack_state;
+ Heap* heap, StackState stack_state) {
+ return EmbedderStackStateScope(heap, Origin::kExplicitInvocation,
+ stack_state);
}
EmbedderStackStateScope::~EmbedderStackStateScope() {
- local_tracer_->embedder_stack_state_ = old_stack_state_;
+ heap_->embedder_stack_state_ = old_stack_state_;
}
CppClassNamesAsHeapObjectNameScope::CppClassNamesAsHeapObjectNameScope(
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 2485d78671..f4b7d40362 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -107,12 +107,12 @@ class HeapStats;
class Isolate;
class JSFinalizationRegistry;
class LinearAllocationArea;
-class LocalEmbedderHeapTracer;
class LocalHeap;
class MemoryAllocator;
class MemoryChunk;
class MemoryMeasurement;
class MemoryReducer;
+class MinorGCJob;
class MinorMarkCompactCollector;
class NopRwxMemoryWriteScope;
class ObjectIterator;
@@ -124,7 +124,6 @@ class ReadOnlyHeap;
class RootVisitor;
class RwxMemoryWriteScope;
class SafepointScope;
-class ScavengeJob;
class Scavenger;
class ScavengerCollector;
class SharedLargeObjectSpace;
@@ -148,8 +147,6 @@ enum class InvalidateRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
-enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
-
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
enum class YoungGenerationHandling {
@@ -173,8 +170,8 @@ enum class SkipRoot {
kMainThreadHandles,
kUnserializable,
kWeak,
- kTopOfStack,
kConservativeStack,
+ kTopOfStack,
};
enum UnprotectMemoryOrigin {
@@ -251,9 +248,9 @@ class Heap {
class ExternalMemoryAccounting {
public:
- int64_t total() { return total_.load(std::memory_order_relaxed); }
- int64_t limit() { return limit_.load(std::memory_order_relaxed); }
- int64_t low_since_mark_compact() {
+ int64_t total() const { return total_.load(std::memory_order_relaxed); }
+ int64_t limit() const { return limit_.load(std::memory_order_relaxed); }
+ int64_t low_since_mark_compact() const {
return low_since_mark_compact_.load(std::memory_order_relaxed);
}
@@ -272,7 +269,7 @@ class Heap {
return amount;
}
- int64_t AllocatedSinceMarkCompact() {
+ int64_t AllocatedSinceMarkCompact() const {
int64_t total_bytes = total();
int64_t low_since_mark_compact_bytes = low_since_mark_compact();
@@ -488,13 +485,19 @@ class Heap {
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
- Code host, RelocInfo* rinfo, HeapObject value);
+ RelocInfo* rinfo, HeapObject value);
V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
+ // Weakens StrongDescriptorArray objects into regular DescriptorArray objects.
+ //
+ // Thread-safe.
+ void WeakenDescriptorArrays(
+ GlobalHandleVector<DescriptorArray> strong_descriptor_arrays);
+
void NotifyBootstrapComplete();
void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
@@ -505,7 +508,7 @@ class Heap {
inline Address* OldSpaceAllocationLimitAddress();
size_t NewSpaceSize();
- size_t NewSpaceCapacity();
+ size_t NewSpaceCapacity() const;
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
@@ -522,13 +525,21 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. This method will verify that no slots
// are recorded in this free memory.
- V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size);
+ V8_EXPORT_PRIVATE void CreateFillerObjectAt(
+ Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode =
+ ClearFreedMemoryMode::kDontClearFreedMemory);
// Initialize a filler object at a specific address. Unlike
// `CreateFillerObjectAt` this method will not perform slot verification since
// this would race on background threads.
void CreateFillerObjectAtBackground(Address addr, int size);
+ // This method is used by the sweeper on free memory ranges to make the page
+ // iterable again. Unlike `CreateFillerObjectAt` this method will not verify
+ // slots since the sweeper can run concurrently.
+ void CreateFillerObjectAtSweeper(Address addr, int size);
+
template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
@@ -653,15 +664,21 @@ class Heap {
}
void SetGCState(HeapState state);
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
+ bool IsInGC() const {
+ return gc_state() != NOT_IN_GC && gc_state() != TEAR_DOWN;
+ }
bool force_oom() const { return force_oom_; }
bool ignore_local_gc_requests() const {
return ignore_local_gc_requests_depth_ > 0;
}
- inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+ bool IsAllocationObserverActive() const {
+ return pause_allocation_observers_depth_ == 0;
+ }
bool IsGCWithStack() const;
+ V8_EXPORT_PRIVATE void ForceSharedGCWithEmptyStackForTesting();
// Performs GC after background allocation failure.
void CollectGarbageForBackground(LocalHeap* local_heap);
@@ -670,7 +687,8 @@ class Heap {
// Support for the API.
//
- void CreateApiObjects();
+ void CreateReadOnlyApiObjects();
+ void CreateMutableApiObjects();
// Implements the corresponding V8 API function.
bool IdleNotification(double deadline_in_seconds);
@@ -755,7 +773,6 @@ class Heap {
bool HasLowAllocationRate();
bool HasHighFragmentation();
- bool HasHighFragmentation(size_t used, size_t committed);
void ActivateMemoryReducerIfNeeded();
@@ -813,7 +830,8 @@ class Heap {
// Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
- bool CreateHeapObjects();
+ bool CreateReadOnlyHeapObjects();
+ bool CreateMutableHeapObjects();
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
@@ -854,8 +872,8 @@ class Heap {
return shared_lo_allocation_space_;
}
- inline PagedSpace* paged_space(int idx);
- inline Space* space(int idx);
+ inline PagedSpace* paged_space(int idx) const;
+ inline Space* space(int idx) const;
// ===========================================================================
// Getters to other components. ==============================================
@@ -894,7 +912,13 @@ class Heap {
// range if it exists or empty region otherwise.
const base::AddressRegion& code_region();
- CodeRange* code_range() { return code_range_.get(); }
+ CodeRange* code_range() {
+#if V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ return code_range_;
+#else
+ return code_range_.get();
+#endif
+ }
// The base of the code range if it exists or null address.
inline Address code_range_base();
@@ -975,7 +999,7 @@ class Heap {
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- V8_EXPORT_PRIVATE bool CollectGarbage(
+ V8_EXPORT_PRIVATE void CollectGarbage(
AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -1029,16 +1053,10 @@ class Heap {
// garbage collection and is usually only performed as part of
// (de)serialization or heap verification.
- // The order of this enumeration's elements is important: they should go from
- // more precise to more conservative modes for stack scanning, so that we can
- // use std::min to override for testing purposes.
- enum class ScanStackMode { kNone, kFromMarker, kComplete };
-
// Iterates over the strong roots and the weak roots.
void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
void IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options);
- void IterateRootsFromStackIncludingClient(RootVisitor* v, ScanStackMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
@@ -1047,7 +1065,13 @@ class Heap {
void IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
void IterateWeakGlobalHandles(RootVisitor* v);
void IterateBuiltins(RootVisitor* v);
- void IterateStackRoots(RootVisitor* v, ScanStackMode mode);
+
+ void IterateStackRoots(RootVisitor* v);
+
+ enum class ScanStackMode { kFromMarker, kComplete };
+ void IterateConservativeStackRoots(RootVisitor* v, ScanStackMode stack_mode);
+ void IterateConservativeStackRootsIncludingClients(RootVisitor* v,
+ ScanStackMode stack_mode);
// ===========================================================================
// Remembered set API. =======================================================
@@ -1075,12 +1099,6 @@ class Heap {
: kNoGCFlags;
}
- // Start incremental marking and ensure that idle time handler can perform
- // incremental steps.
- V8_EXPORT_PRIVATE void StartIdleIncrementalMarking(
- GarbageCollectionReason gc_reason,
- GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
-
// Starts incremental marking assuming incremental marking is currently
// stopped.
V8_EXPORT_PRIVATE void StartIncrementalMarking(
@@ -1099,7 +1117,7 @@ class Heap {
GarbageCollectionReason gc_reason);
V8_EXPORT_PRIVATE void CompleteSweepingFull();
- void CompleteSweepingYoung(GarbageCollector collector);
+ void CompleteSweepingYoung();
// Ensures that sweeping is finished for that object's page.
void EnsureSweepingCompletedForObject(HeapObject object);
@@ -1124,6 +1142,7 @@ class Heap {
void NotifyObjectLayoutChange(
HeapObject object, const DisallowGarbageCollection&,
InvalidateRecordedSlots invalidate_recorded_slots, int new_size = 0);
+ V8_EXPORT_PRIVATE static void NotifyObjectLayoutChangeDone(HeapObject object);
// The runtime uses this function to inform the GC of object size changes. The
// GC will fill this area with a filler object and might clear recorded slots
@@ -1143,31 +1162,9 @@ class Heap {
void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
void SetInterpreterEntryReturnPCOffset(int pc_offset);
- // Invalidates references in the given {code} object that are referenced
- // transitively from the deoptimization data. Mutates write-protected code.
- void InvalidateCodeDeoptimizationData(Code code);
-
void DeoptMarkedAllocationSites();
// ===========================================================================
- // Embedder heap tracer support. =============================================
- // ===========================================================================
-
- LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
- return local_embedder_heap_tracer_.get();
- }
-
- START_ALLOW_USE_DEPRECATED()
-
- V8_EXPORT_PRIVATE void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
- EmbedderHeapTracer* GetEmbedderHeapTracer() const;
- EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;
-
- END_ALLOW_USE_DEPRECATED()
-
- void RegisterExternallyReferencedObject(Address* location);
-
- // ===========================================================================
// Unified heap (C++) support. ===============================================
// ===========================================================================
@@ -1237,7 +1234,7 @@ class Heap {
V8_EXPORT_PRIVATE bool ContainsCode(HeapObject value) const;
// Checks whether object resides in the non-read-only shared heap.
- static inline bool InSharedWritableHeap(MaybeObject object);
+ static inline bool InWritableSharedSpace(MaybeObject object);
// Checks whether an address/object is in the non-read-only heap (including
// auxiliary area and unused area). Use IsValidHeapObject if checking both
@@ -1251,10 +1248,6 @@ class Heap {
// Currently used by tests, serialization and heap verification only.
V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
- // Returns true when this heap is shared.
- V8_EXPORT_PRIVATE bool IsShared() const;
- V8_EXPORT_PRIVATE bool ShouldMarkSharedHeap() const;
-
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const;
@@ -1287,7 +1280,8 @@ class Heap {
size_t NumberOfDetachedContexts();
// ===========================================================================
- // Code statistics. ==========================================================
+ // Code statistics.
+ // ==========================================================
// ===========================================================================
// Collect code (Code and BytecodeArray objects) statistics.
@@ -1298,7 +1292,7 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
- V8_EXPORT_PRIVATE size_t MaxReserved();
+ V8_EXPORT_PRIVATE size_t MaxReserved() const;
size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size(); }
@@ -1327,7 +1321,7 @@ class Heap {
size_t Capacity();
// Returns the capacity of the old generation.
- V8_EXPORT_PRIVATE size_t OldGenerationCapacity();
+ V8_EXPORT_PRIVATE size_t OldGenerationCapacity() const;
// Returns the amount of memory currently held alive by the unmapper.
size_t CommittedMemoryOfUnmapper();
@@ -1445,18 +1439,18 @@ class Heap {
// Returns the size of objects residing in non-new spaces.
// Excludes external memory held by those objects.
- V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
+ V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects() const;
// Returns the size of objects held by the EmbedderHeapTracer.
V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const;
// Returns the global size of objects (embedder + V8 non-new spaces).
- V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
+ V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects() const;
// We allow incremental marking to overshoot the V8 and global allocation
// limit for performance reasons. If the overshoot is too large then we are
// more eager to finalize incremental marking.
- bool AllocationLimitOvershotByLargeMargin();
+ bool AllocationLimitOvershotByLargeMargin() const;
// Return the maximum size objects can be before having to allocate them as
// large objects. This takes into account allocating in the code space for
@@ -1480,8 +1474,10 @@ class Heap {
void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
void* data);
- void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
- void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
+ void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags,
+ GCTracer::Scope::ScopeId scope_id);
+ void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags,
+ GCTracer::Scope::ScopeId scope_id);
// ===========================================================================
// Allocation methods. =======================================================
@@ -1559,36 +1555,30 @@ class Heap {
// Stack frame support. ======================================================
// ===========================================================================
- // Searches for compiled code or embedded builtins code object by given
- // interior pointer.
- // Crashes process on unsuccessful lookup if {die_on_unsuccessful_lookup}
- // is true. All code lookups made by GC must succeed.
- CodeLookupResult GcSafeFindCodeForInnerPointer(
- Address inner_pointer, bool die_on_unsuccessful_lookup = true);
-
- // Same as GcSafeFindCodeForInnerPointer() but it doesn't crash the process
- // on unsuccessful lookup.
- // It's intended to be used only from gdb's 'jco' macro.
- CodeLookupResult GcSafeFindCodeForInnerPointerForPrinting(
+ // Searches for a Code object by the given interior pointer.
+ V8_EXPORT_PRIVATE Code FindCodeForInnerPointer(Address inner_pointer);
+ // Use the GcSafe family of functions if called while GC is in progress.
+ GcSafeCode GcSafeFindCodeForInnerPointer(Address inner_pointer);
+ base::Optional<GcSafeCode> GcSafeTryFindCodeForInnerPointer(
+ Address inner_pointer);
+ base::Optional<InstructionStream>
+ GcSafeTryFindInstructionStreamForInnerPointer(Address inner_pointer);
+ // Only intended for use from the `jco` gdb macro.
+ base::Optional<Code> TryFindCodeForInnerPointerForPrinting(
Address inner_pointer);
- // Returns true if {addr} is contained within {code} and false otherwise.
- // Mostly useful for debugging.
- bool GcSafeCodeContains(Code code, Address addr);
-
- // Casts a heap object to a code object and checks if the inner_pointer is
- // within the object.
- CodeLookupResult GcSafeCastToCode(HeapObject object, Address inner_pointer);
-
- // Returns the map of an object. Can be used during garbage collection, i.e.
- // it supports a forwarded map. Fails if the map is not the code map.
- Map GcSafeMapOfCodeSpaceObject(HeapObject object);
+ // Returns true if {addr} is contained within {instruction_stream} and false
+ // otherwise. Mostly useful for debugging.
+ bool GcSafeInstructionStreamContains(InstructionStream instruction_stream,
+ Address addr);
// ===========================================================================
// Sweeping. =================================================================
// ===========================================================================
- bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
+ bool sweeping_in_progress() const {
+ return sweeper_ && sweeper_->sweeping_in_progress();
+ }
void FinishSweepingIfOutOfWork();
@@ -1603,10 +1593,6 @@ class Heap {
void DrainSweepingWorklistForSpace(AllocationSpace space);
- void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
-
- bool evacuation() const { return evacuation_; }
-
// =============================================================================
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
@@ -1656,8 +1642,9 @@ class Heap {
// over all objects.
V8_EXPORT_PRIVATE void MakeHeapIterable();
- V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size);
- V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
+ V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(
+ size_t size) const;
+ V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size) const;
inline bool ShouldReduceMemory() const {
return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
@@ -1671,7 +1658,9 @@ class Heap {
AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
- PretenturingHandler* pretenuring_handler() { return &pretenuring_handler_; }
+ PretenuringHandler* pretenuring_handler() { return &pretenuring_handler_; }
+
+ bool IsInlineAllocationEnabled() const { return inline_allocation_enabled_; }
private:
class AllocationTrackerForDebugging;
@@ -1709,6 +1698,8 @@ class Heap {
void UpdateReferences(
Heap::ExternalStringTableUpdaterCallback updater_func);
+ bool HasYoung() const { return !young_strings_.empty(); }
+
private:
void Verify();
void VerifyYoung();
@@ -1721,32 +1712,11 @@ class Heap {
std::vector<Object> old_strings_;
};
- struct StringTypeTable {
- InstanceType type;
- int size;
- RootIndex index;
- };
-
- struct ConstantStringTable {
- const char* contents;
- RootIndex index;
- };
-
- struct StructTable {
- InstanceType type;
- int size;
- RootIndex index;
- };
-
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
static const int kRememberedUnmappedPages = 128;
- static const StringTypeTable string_type_table[];
- static const ConstantStringTable constant_string_table[];
- static const StructTable struct_table[];
-
static const int kYoungSurvivalRateHighThreshold = 90;
static const int kYoungSurvivalRateAllowedDeviation = 15;
static const int kOldSurvivalRateLowThreshold = 10;
@@ -1779,11 +1749,14 @@ class Heap {
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
GarbageCollectionReason gc_reason,
- const char** reason);
+ const char** reason) const;
// Free all LABs in the heap.
void FreeLinearAllocationAreas();
+ // Frees all LABs owned by the main thread.
+ void FreeMainThreadLinearAllocationAreas();
+
// Free all shared LABs.
void FreeSharedLinearAllocationAreas();
@@ -1798,20 +1771,19 @@ class Heap {
void UnmarkSharedLinearAllocationAreas();
// Performs garbage collection in a safepoint.
- // Returns the number of freed global handles.
- size_t PerformGarbageCollection(GarbageCollector collector,
- GarbageCollectionReason gc_reason,
- const char* collector_reason);
-
- // Performs garbage collection in the shared heap.
- void PerformSharedGarbageCollection(Isolate* initiator,
- GarbageCollectionReason gc_reason);
+ void PerformGarbageCollection(GarbageCollector collector,
+ GarbageCollectionReason gc_reason,
+ const char* collector_reason);
inline void UpdateOldSpaceLimits();
- bool CreateInitialMaps();
+ bool CreateEarlyReadOnlyMaps();
+ bool CreateImportantReadOnlyObjects();
+ bool CreateLateReadOnlyMaps();
+ bool CreateReadOnlyObjects();
+
void CreateInternalAccessorInfoObjects();
- void CreateInitialObjects();
+ void CreateInitialMutableObjects();
// Zaps the memory of a code object.
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
@@ -1822,11 +1794,6 @@ class Heap {
enum class VerifyNoSlotsRecorded { kYes, kNo };
- // This method is used by the sweeper on free memory ranges to make the page
- // iterable again. Unlike `CreateFillerObjectAt` this method will not verify
- // slots since the sweeper can run concurrently.
- void CreateFillerObjectAtSweeper(Address addr, int size);
-
// Creates a filler object in the specified memory area. This method is the
// internal method used by all CreateFillerObjectAtXXX-methods.
void CreateFillerObjectAtRaw(Address addr, int size,
@@ -1897,12 +1864,20 @@ class Heap {
void InvokeIncrementalMarkingPrologueCallbacks();
void InvokeIncrementalMarkingEpilogueCallbacks();
+ // Casts a heap object to an InstructionStream, DCHECKs that the
+ // inner_pointer is within the object, and returns the attached Code object.
+ GcSafeCode GcSafeGetCodeFromInstructionStream(HeapObject instruction_stream,
+ Address inner_pointer);
+ // Returns the map of a HeapObject. Can be used during garbage collection,
+ // i.e. it supports a forwarded map.
+ Map GcSafeMapOfHeapObject(HeapObject object);
+
// ===========================================================================
// Actual GC. ================================================================
// ===========================================================================
- // Code that should be run before and after each GC. Includes some
- // reporting/verification activities when compiled with DEBUG set.
+ // Code that should be run before and after each GC. Includes
+ // some reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue(GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags);
void GarbageCollectionPrologueInSafepoint();
@@ -1947,7 +1922,10 @@ class Heap {
void UpdateTotalGCTime(double duration);
- bool MaximumSizeMinorGC() { return maximum_size_minor_gcs_ > 0; }
+ bool MaximumSizeMinorGC() const { return maximum_size_minor_gcs_ > 0; }
+ bool IsFirstMaximumSizeMinorGC() const {
+ return maximum_size_minor_gcs_ == 1;
+ }
bool IsIneffectiveMarkCompact(size_t old_generation_size,
double mutator_utilization);
@@ -1984,7 +1962,7 @@ class Heap {
size_t global_allocation_limit() const { return global_allocation_limit_; }
- size_t max_old_generation_size() {
+ size_t max_old_generation_size() const {
return max_old_generation_size_.load(std::memory_order_relaxed);
}
@@ -1997,8 +1975,8 @@ class Heap {
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(LocalHeap* local_heap,
size_t size);
- bool ShouldExpandOldGenerationOnSlowAllocation(
- LocalHeap* local_heap = nullptr);
+ bool ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap,
+ AllocationOrigin origin);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
bool IsMainThreadParked(LocalHeap* local_heap);
bool IsMajorMarkingComplete(LocalHeap* local_heap);
@@ -2017,10 +1995,6 @@ class Heap {
bool ShouldStressCompaction() const;
- bool UseGlobalMemoryScheduling() const {
- return v8_flags.global_gc_scheduling && local_embedder_heap_tracer();
- }
-
base::Optional<size_t> GlobalMemoryAvailable();
void RecomputeLimits(GarbageCollector collector);
@@ -2029,9 +2003,8 @@ class Heap {
// GC Tasks. =================================================================
// ===========================================================================
- void ScheduleScavengeTaskIfNeeded();
+ void ScheduleMinorGCTaskIfNeeded();
void StartMinorMCIncrementalMarkingIfNeeded();
- size_t MinorMCTaskTriggerSize() const;
bool MinorMCSizeTaskTriggerReached() const;
// ===========================================================================
@@ -2042,7 +2015,8 @@ class Heap {
// Allocates a JS Map in the heap.
V8_WARN_UNUSED_RESULT AllocationResult
- AllocateMap(InstanceType instance_type, int instance_size,
+ AllocateMap(AllocationType allocation_type, InstanceType instance_type,
+ int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
int inobject_properties = 0);
@@ -2205,18 +2179,12 @@ class Heap {
std::atomic<HeapState> gc_state_{NOT_IN_GC};
- int gc_post_processing_depth_ = 0;
-
// Returns the amount of external memory registered since last global gc.
- V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact();
+ V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact() const;
// Starts marking when stress_marking_percentage_% of the marking start limit
// is reached.
- std::atomic<int> stress_marking_percentage_{0};
-
- // Observer that causes more frequent checks for reached incremental
- // marking limit.
- AllocationObserver* stress_marking_observer_ = nullptr;
+ int stress_marking_percentage_ = 0;
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr;
@@ -2303,11 +2271,9 @@ class Heap {
std::unique_ptr<MemoryReducer> memory_reducer_;
std::unique_ptr<ObjectStats> live_object_stats_;
std::unique_ptr<ObjectStats> dead_object_stats_;
- std::unique_ptr<ScavengeJob> scavenge_job_;
- std::unique_ptr<AllocationObserver> scavenge_task_observer_;
- std::unique_ptr<AllocationObserver> minor_mc_task_observer_;
+ std::unique_ptr<MinorGCJob> minor_gc_job_;
+ std::unique_ptr<AllocationObserver> minor_gc_task_observer_;
std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
- std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<AllocationTrackerForDebugging>
allocation_tracker_for_debugging_;
@@ -2316,13 +2282,20 @@ class Heap {
//
// Owned by the heap when !V8_COMPRESS_POINTERS_IN_SHARED_CAGE, otherwise is
// process-wide.
- std::shared_ptr<CodeRange> code_range_;
+#if V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ CodeRange* code_range_ = nullptr;
+#else
+ std::unique_ptr<CodeRange> code_range_;
+#endif
// The embedder owns the C++ heap.
v8::CppHeap* cpp_heap_ = nullptr;
EmbedderRootsHandler* embedder_roots_handler_ = nullptr;
+ cppgc::EmbedderStackState embedder_stack_state_ =
+ cppgc::EmbedderStackState::kMayContainHeapPointers;
+
StrongRootsEntry* strong_roots_head_ = nullptr;
base::Mutex strong_roots_mutex_;
@@ -2387,11 +2360,15 @@ class Heap {
int max_regular_code_object_size_ = 0;
+ bool inline_allocation_enabled_ = true;
+
+ int pause_allocation_observers_depth_ = 0;
+
// Used for testing purposes.
bool force_oom_ = false;
bool force_gc_on_next_allocation_ = false;
bool delay_sweeper_tasks_for_testing_ = false;
- ScanStackMode scan_stack_mode_for_testing_ = ScanStackMode::kComplete;
+ bool force_shared_gc_with_empty_stack_for_testing_ = false;
UnorderedHeapObjectMap<HeapObject> retainer_;
UnorderedHeapObjectMap<Root> retaining_root_;
@@ -2406,15 +2383,16 @@ class Heap {
bool is_finalization_registry_cleanup_task_posted_ = false;
- bool evacuation_ = false;
-
std::unique_ptr<third_party_heap::Heap> tp_heap_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
AtomicMarkingState atomic_marking_state_;
- PretenturingHandler pretenuring_handler_;
+ PretenuringHandler pretenuring_handler_;
+
+ // This field is used only when not running with MinorMC.
+ ResizeNewSpaceMode resize_new_space_mode_ = ResizeNewSpaceMode::kNone;
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
@@ -2423,14 +2401,14 @@ class Heap {
friend class CollectorBase;
friend class ConcurrentAllocator;
friend class ConcurrentMarking;
+ friend class ConservativeTracedHandlesMarkingVisitor;
+ friend class EmbedderStackStateScope;
friend class EvacuateVisitorBase;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class GlobalHandleMarkingVisitor;
friend class HeapAllocator;
friend class HeapObjectIterator;
friend class HeapVerifier;
- friend class ScavengeTaskObserver;
friend class IgnoreLocalGCRequests;
friend class IncrementalMarking;
friend class IncrementalMarkingRootMarkingVisitor;
@@ -2439,25 +2417,28 @@ class Heap {
friend class LocalHeap;
friend class MarkingBarrier;
friend class OldLargeObjectSpace;
- friend class OptionalAlwaysAllocateScope;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
friend class MarkCompactCollector;
friend class MarkCompactCollectorBase;
+ friend class MinorGCTaskObserver;
friend class MinorMarkCompactCollector;
- friend class MinorMCTaskObserver;
+ friend class MinorMCIncrementalMarkingTaskObserver;
friend class NewLargeObjectSpace;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpaceBase;
- friend class PretenturingHandler;
+ friend class PauseAllocationObserversScope;
+ friend class PretenuringHandler;
friend class ReadOnlyRoots;
- friend class ScanStackModeScopeForTesting;
+ friend class DisableConservativeStackScanningScopeForTesting;
friend class Scavenger;
friend class ScavengerCollector;
+ friend class ScheduleMinorGCTaskObserver;
friend class StressConcurrentAllocationObserver;
friend class Space;
+ friend class SpaceWithLinearArea;
friend class Sweeper;
friend class UnifiedHeapMarkingState;
friend class heap::TestMemoryAllocatorScope;
@@ -2541,24 +2522,6 @@ class V8_NODISCARD GCCallbacksScope final {
Heap* const heap_;
};
-// Like AlwaysAllocateScope if the heap argument to the constructor is
-// non-null. No-op otherwise.
-//
-// This class exists because AlwaysAllocateScope doesn't compose with
-// base::Optional, since supporting that composition requires making
-// base::Optional a friend class, defeating the purpose of hiding its
-// constructor.
-class V8_NODISCARD OptionalAlwaysAllocateScope {
- public:
- inline ~OptionalAlwaysAllocateScope();
-
- private:
- friend class Heap;
-
- explicit inline OptionalAlwaysAllocateScope(Heap* heap);
- Heap* heap_;
-};
-
class V8_NODISCARD AlwaysAllocateScopeForTesting {
public:
explicit inline AlwaysAllocateScopeForTesting(Heap* heap);
@@ -2606,21 +2569,20 @@ class V8_EXPORT_PRIVATE V8_NODISCARD
V8_NOINLINE ~CodePageCollectionMemoryModificationScopeForTesting();
};
-// The CodePageHeaderModificationScope enables write access to Code space page
-// headers.
-// On most of the configurations it's a no-op because Code space page headers
-// are configured as writable and permissions are never changed.
-// However, on MacOS on ARM64 ("Apple M1"/Apple Silicon) the situation is
-// different. In order to be able to use fast W^X permissions switching
-// machinery (APRR/MAP_JIT) it's necessary to configure executable memory as
-// readable writable executable (RWX). Also, on MacOS on ARM64 reconfiguration
-// of RWX page permissions to anything else is prohibited.
+// The CodePageHeaderModificationScope enables write access to Code
+// space page headers. On most of the configurations it's a no-op because
+// Code space page headers are configured as writable and
+// permissions are never changed. However, on MacOS on ARM64 ("Apple M1"/Apple
+// Silicon) the situation is different. In order to be able to use fast W^X
+// permissions switching machinery (APRR/MAP_JIT) it's necessary to configure
+// executable memory as readable writable executable (RWX). Also, on MacOS on
+// ARM64 reconfiguration of RWX page permissions to anything else is prohibited.
// So, in order to be able to allocate large code pages over freed regular
-// code pages and vice versa we have to allocate Code page headers as RWX too
-// and switch them to writable mode when it's necessary to modify the code page
-// header.
-// The scope can be used from any thread and affects only current thread, see
-// RwxMemoryWriteScope for details about semantics of the scope.
+// code pages and vice versa we have to allocate Code page headers
+// as RWX too and switch them to writable mode when it's necessary to modify the
+// code page header. The scope can be used from any thread and affects only
+// current thread, see RwxMemoryWriteScope for details about semantics of the
+// scope.
#if V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
using CodePageHeaderModificationScope = RwxMemoryWriteScope;
#else
@@ -2629,27 +2591,13 @@ using CodePageHeaderModificationScope = RwxMemoryWriteScope;
using CodePageHeaderModificationScope = NopRwxMemoryWriteScope;
#endif // V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
-// The CodeTPageHeaderModificationScope enables write access to CodeT objects
-// page headers.
-#if V8_EXTERNAL_CODE_SPACE
-// When V8_EXTERNAL_CODE_SPACE is enabled this scope is no-op because CodeT
-// objects are data objects and thus the page header is always in writable
-// state.
-using CodeTPageHeaderModificationScope = NopRwxMemoryWriteScope;
-#else
-// When V8_EXTERNAL_CODE_SPACE is disabled this scope is an alias to
-// CodePageHeaderModificationScope because in CodeT is a Code object and thus
-// write access to the page headers might be required.
-using CodeTPageHeaderModificationScope = CodePageHeaderModificationScope;
-#endif // V8_EXTERNAL_CODE_SPACE
-
// The CodePageMemoryModificationScope does not check if transitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads.
class V8_NODISCARD CodePageMemoryModificationScope {
public:
explicit inline CodePageMemoryModificationScope(BasicMemoryChunk* chunk);
- explicit inline CodePageMemoryModificationScope(Code object);
+ explicit inline CodePageMemoryModificationScope(InstructionStream object);
inline ~CodePageMemoryModificationScope();
private:
@@ -2673,86 +2621,20 @@ class V8_NODISCARD IgnoreLocalGCRequests {
Heap* heap_;
};
-class V8_NODISCARD ScanStackModeScopeForTesting {
- public:
- explicit inline ScanStackModeScopeForTesting(Heap* heap,
- Heap::ScanStackMode mode)
- : heap_(heap), old_value_(heap_->scan_stack_mode_for_testing_) {
- heap_->scan_stack_mode_for_testing_ = mode;
- }
-
- inline ~ScanStackModeScopeForTesting() {
- heap_->scan_stack_mode_for_testing_ = old_value_;
- }
-
- protected:
- Heap* heap_;
- Heap::ScanStackMode old_value_;
-};
-
-// Visitor class to verify interior pointers in spaces that do not contain
-// or care about inter-generational references. All heap object pointers have to
-// point into the heap to a location that has a map pointer at its first word.
-// Caveat: Heap::Contains is an approximation because it can return true for
-// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor : public ObjectVisitorWithCageBases,
- public RootVisitor {
- public:
- V8_INLINE explicit VerifyPointersVisitor(Heap* heap);
- void VisitPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) override;
- void VisitPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override;
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
-
- void VisitRootPointers(Root root, const char* description,
- FullObjectSlot start, FullObjectSlot end) override;
- void VisitRootPointers(Root root, const char* description,
- OffHeapObjectSlot start,
- OffHeapObjectSlot end) override;
-
- protected:
- V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
- V8_INLINE void VerifyCodeObjectImpl(HeapObject heap_object);
-
- template <typename TSlot>
- V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
-
- virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end);
-
- Heap* heap_;
-};
-
// Space iterator for iterating over all the paged spaces of the heap: Map
// space, old space and code space. Returns each space in turn, and null when it
// is done.
class V8_EXPORT_PRIVATE PagedSpaceIterator {
public:
- explicit PagedSpaceIterator(Heap* heap)
+ explicit PagedSpaceIterator(const Heap* heap)
: heap_(heap), counter_(FIRST_GROWABLE_PAGED_SPACE) {}
PagedSpace* Next();
private:
- Heap* heap_;
+ const Heap* const heap_;
int counter_;
};
-class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
- public:
- explicit SpaceIterator(Heap* heap);
- virtual ~SpaceIterator();
-
- bool HasNext();
- Space* Next();
-
- private:
- Heap* heap_;
- int current_space_; // from enum AllocationSpace.
-};
-
// A HeapObjectIterator provides iteration over the entire non-read-only heap.
// It aggregates the specific iterators for the different spaces as these can
// only iterate over one space only.
@@ -2867,38 +2749,33 @@ class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
// Only used for testing where the Origin is always an explicit invocation.
static EmbedderStackStateScope ExplicitScopeForTesting(
- LocalEmbedderHeapTracer* local_tracer, StackState stack_state);
+ Heap* heap, StackState stack_state);
EmbedderStackStateScope(Heap* heap, Origin origin, StackState stack_state);
~EmbedderStackStateScope();
private:
- EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
- StackState stack_state);
-
- LocalEmbedderHeapTracer* const local_tracer_;
+ Heap* const heap_;
const StackState old_stack_state_;
};
-class V8_NODISCARD CppClassNamesAsHeapObjectNameScope final {
+class V8_NODISCARD DisableConservativeStackScanningScopeForTesting {
public:
- explicit CppClassNamesAsHeapObjectNameScope(v8::CppHeap* heap);
- ~CppClassNamesAsHeapObjectNameScope();
+ explicit inline DisableConservativeStackScanningScopeForTesting(Heap* heap)
+ : embedder_scope_(EmbedderStackStateScope::ExplicitScopeForTesting(
+ heap, cppgc::EmbedderStackState::kNoHeapPointers)) {}
private:
- std::unique_ptr<cppgc::internal::ClassNameAsHeapObjectNameScope> scope_;
+ EmbedderStackStateScope embedder_scope_;
};
-class V8_NODISCARD EvacuationScope {
+class V8_NODISCARD CppClassNamesAsHeapObjectNameScope final {
public:
- explicit EvacuationScope(Heap* heap) : heap_(heap) {
- heap_->set_evacuation(true);
- }
-
- ~EvacuationScope() { heap_->set_evacuation(false); }
+ explicit CppClassNamesAsHeapObjectNameScope(v8::CppHeap* heap);
+ ~CppClassNamesAsHeapObjectNameScope();
private:
- Heap* const heap_;
+ std::unique_ptr<cppgc::internal::ClassNameAsHeapObjectNameScope> scope_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index cc413f2b82..c6bb189372 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -5,29 +5,41 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_INL_H_
#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-state-inl.h"
+#include "src/objects/descriptor-array.h"
namespace v8 {
namespace internal {
void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
- if (atomic_marking_state()->IsBlack(to)) {
+ if (atomic_marking_state()->IsMarked(to)) {
DCHECK(black_allocation());
return;
}
- DCHECK(atomic_marking_state()->IsWhite(to));
+ DCHECK(atomic_marking_state()->IsUnmarked(to));
if (atomic_marking_state()->IsGrey(from)) {
- bool success = atomic_marking_state()->WhiteToGrey(to);
+ bool success = atomic_marking_state()->TryMark(to);
DCHECK(success);
USE(success);
- } else if (atomic_marking_state()->IsBlack(from)) {
- bool success = atomic_marking_state()->WhiteToBlack(to);
+ } else if (atomic_marking_state()->IsMarked(from)) {
+ bool success = atomic_marking_state()->TryMark(to);
DCHECK(success);
USE(success);
+ success = atomic_marking_state()->GreyToBlack(to);
+ DCHECK(success);
+ USE(success);
+ if (!to.IsDescriptorArray() ||
+ (DescriptorArrayMarkingState::Marked::decode(
+ DescriptorArray::cast(to).raw_gc_state(kRelaxedLoad)) != 0)) {
+ atomic_marking_state()->IncrementLiveBytes(
+ MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(to)),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(to.Size()));
+ }
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index bdc170f531..82cc8fb0c2 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -8,7 +8,6 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index f57041a33b..7ccfc8ea0b 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -4,11 +4,11 @@
#include "src/heap/incremental-marking.h"
+#include "src/base/logging.h"
#include "src/codegen/compilation-cache.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
#include "src/heap/concurrent-marking.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
@@ -61,55 +61,11 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
marking_state_(heap->marking_state()),
atomic_marking_state_(heap->atomic_marking_state()) {}
-void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
- HeapObject obj) {
- // TODO(v8:13012): Add scope for MinorMC.
- TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
- marking_state()->WhiteToGrey(obj);
- if (IsMajorMarking()) {
- major_collector_->VisitObject(obj);
- } else {
- // Not covered by tests.
- minor_collector_->VisitObject(obj);
- }
-}
-
void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
- MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
- Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
- IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
-}
-
-void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
- if (!IsMarking()) return;
-
- DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
- DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
- DCHECK_NE(from, to);
-
- MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
-
- if (black_allocation() &&
- Marking::IsBlack<AccessMode::ATOMIC>(new_mark_bit)) {
- // Nothing to do if the object is in black area.
- return;
- }
- MarkBlackAndVisitObjectDueToLayoutChange(from);
- DCHECK(marking_state()->IsBlack(from));
- // Mark the new address as black.
- if (from.address() + kTaggedSize == to.address()) {
- // The old and the new markbits overlap. The |to| object has the
- // grey color. To make it black, we need to set the second bit.
- DCHECK(new_mark_bit.Get<AccessMode::ATOMIC>());
- new_mark_bit.Next().Set<AccessMode::ATOMIC>();
- } else {
- bool success = Marking::WhiteToBlack<AccessMode::ATOMIC>(new_mark_bit);
- DCHECK(success);
- USE(success);
- }
- DCHECK(marking_state()->IsBlack(to));
+ CHECK(atomic_marking_state()->TryMark(obj) &&
+ atomic_marking_state()->GreyToBlack(obj));
+ IncrementLiveBytesBackground(MemoryChunk::FromHeapObject(obj),
+ static_cast<intptr_t>(object_size));
}
bool IncrementalMarking::CanBeStarted() const {
@@ -119,8 +75,7 @@ bool IncrementalMarking::CanBeStarted() const {
// 3) when we are currently not serializing or deserializing the heap, and
// 4) not a shared heap.
return v8_flags.incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
- heap_->deserialization_complete() &&
- !heap_->isolate()->serializer_enabled() && !heap_->IsShared();
+ heap_->deserialization_complete() && !isolate()->serializer_enabled();
}
bool IncrementalMarking::IsBelowActivationThresholds() const {
@@ -131,7 +86,6 @@ bool IncrementalMarking::IsBelowActivationThresholds() const {
void IncrementalMarking::Start(GarbageCollector garbage_collector,
GarbageCollectionReason gc_reason) {
DCHECK(!heap_->sweeping_in_progress());
- DCHECK(!heap_->IsShared());
if (v8_flags.trace_incremental_marking) {
const size_t old_generation_size_mb =
@@ -140,7 +94,7 @@ void IncrementalMarking::Start(GarbageCollector garbage_collector,
heap()->old_generation_allocation_limit() / MB;
const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
"/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
Heap::GarbageCollectionReasonToString(gc_reason),
@@ -155,9 +109,9 @@ void IncrementalMarking::Start(GarbageCollector garbage_collector,
DCHECK(v8_flags.incremental_marking);
DCHECK(IsStopped());
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
- DCHECK(!heap_->isolate()->serializer_enabled());
+ DCHECK(!isolate()->serializer_enabled());
- Counters* counters = heap_->isolate()->counters();
+ Counters* counters = isolate()->counters();
const bool is_major = garbage_collector == GarbageCollector::MARK_COMPACTOR;
if (is_major) {
@@ -170,10 +124,11 @@ void IncrementalMarking::Start(GarbageCollector garbage_collector,
: counters->gc_minor_incremental_marking_start());
const auto scope_id = is_major ? GCTracer::Scope::MC_INCREMENTAL_START
: GCTracer::Scope::MINOR_MC_INCREMENTAL_START;
- TRACE_EVENT1("v8",
+ TRACE_EVENT2("v8",
is_major ? "V8.GCIncrementalMarkingStart"
: "V8.GCMinorIncrementalMarkingStart",
- "epoch", heap_->tracer()->CurrentEpoch(scope_id));
+ "epoch", heap_->tracer()->CurrentEpoch(scope_id), "reason",
+ Heap::GarbageCollectionReasonToString(gc_reason));
TRACE_GC_EPOCH(heap()->tracer(), scope_id, ThreadKind::kMain);
heap_->tracer()->NotifyIncrementalMarkingStart();
@@ -202,7 +157,7 @@ void IncrementalMarking::Start(GarbageCollector garbage_collector,
}
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
- if (marking_state()->WhiteToGrey(obj)) {
+ if (marking_state()->TryMark(obj)) {
local_marking_worklists()->Push(obj);
return true;
}
@@ -236,7 +191,7 @@ class IncrementalMarking::IncrementalMarkingRootMarkingVisitor final
DCHECK(!MapWord::IsPacked(object.ptr()));
HeapObject heap_object = HeapObject::cast(object);
- if (heap_object.InSharedHeap()) return;
+ if (heap_object.InAnySharedSpace() || heap_object.InReadOnlySpace()) return;
if (incremental_marking_->IsMajorMarking()) {
if (incremental_marking_->WhiteToGreyAndPush(heap_object)) {
@@ -270,42 +225,61 @@ void IncrementalMarking::MarkRoots() {
SkipRoot::kWeak, SkipRoot::kExternalStringTable,
SkipRoot::kGlobalHandles, SkipRoot::kOldGeneration});
- heap()->isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
- &visitor);
- heap()->isolate()->traced_handles()->IterateYoungRoots(&visitor);
+ isolate()->global_handles()->IterateYoungStrongAndDependentRoots(&visitor);
+ isolate()->traced_handles()->IterateYoungRoots(&visitor);
std::vector<PageMarkingItem> marking_items;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap_, [&marking_items](MemoryChunk* chunk) {
- marking_items.emplace_back(chunk);
+ heap(), [&marking_items](MemoryChunk* chunk) {
+ if (chunk->slot_set<OLD_TO_NEW>()) {
+ marking_items.emplace_back(
+ chunk, PageMarkingItem::SlotsType::kRegularSlots);
+ } else {
+ chunk->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ }
+
+ if (chunk->typed_slot_set<OLD_TO_NEW>()) {
+ marking_items.emplace_back(chunk,
+ PageMarkingItem::SlotsType::kTypedSlots);
+ }
});
+ std::vector<YoungGenerationMarkingTask> tasks;
+ for (size_t i = 0; i < (v8_flags.parallel_marking
+ ? MinorMarkCompactCollector::kMaxParallelTasks
+ : 1);
+ ++i) {
+ tasks.emplace_back(isolate(), heap(),
+ minor_collector_->marking_worklists());
+ }
V8::GetCurrentPlatform()
- ->CreateJob(
- v8::TaskPriority::kUserBlocking,
- std::make_unique<YoungGenerationMarkingJob>(
- heap_->isolate(), heap_, minor_collector_->marking_worklists(),
- std::move(marking_items), YoungMarkingJobType::kIncremental))
+ ->CreateJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<YoungGenerationMarkingJob>(
+ isolate(), heap_, minor_collector_->marking_worklists(),
+ std::move(marking_items),
+ YoungMarkingJobType::kIncremental, tasks))
->Join();
+ for (YoungGenerationMarkingTask& task : tasks) {
+ task.Finalize();
+ }
}
}
void IncrementalMarking::MarkRootsForTesting() { MarkRoots(); }
void IncrementalMarking::StartMarkingMajor() {
- if (heap_->isolate()->serializer_enabled()) {
+ if (isolate()->serializer_enabled()) {
// Black allocation currently starts when we start incremental marking,
// but we cannot enable black allocation while deserializing. Hence, we
// have to delay the start of incremental marking in that case.
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Start delayed - serializer\n");
}
return;
}
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Start marking\n");
+ isolate()->PrintWithTimestamp("[IncrementalMarking] Start marking\n");
}
heap_->InvokeIncrementalMarkingPrologueCallbacks();
@@ -314,16 +288,16 @@ void IncrementalMarking::StartMarkingMajor() {
MarkCompactCollector::StartCompactionMode::kIncremental);
#ifdef V8_COMPRESS_POINTERS
- heap_->isolate()->external_pointer_table().StartCompactingIfNeeded();
+ isolate()->external_pointer_table().StartCompactingIfNeeded();
#endif // V8_COMPRESS_POINTERS
- auto embedder_flags = heap_->flags_for_embedder_tracer();
- {
+ if (heap_->cpp_heap()) {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
// PrepareForTrace should be called before visitor initialization in
- // StartMarking. It is only used with CppHeap.
- heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
+ // StartMarking.
+ CppHeap::From(heap_->cpp_heap())
+ ->InitializeTracing(CppHeap::CollectionType::kMajor);
}
major_collector_->StartMarking();
@@ -334,9 +308,9 @@ void IncrementalMarking::StartMarkingMajor() {
MarkingBarrier::ActivateAll(heap(), is_compacting_,
MarkingBarrierType::kMajor);
- heap()->isolate()->traced_handles()->SetIsMarking(true);
+ isolate()->traced_handles()->SetIsMarking(true);
- heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+ isolate()->compilation_cache()->MarkCompactPrologue();
StartBlackAllocation();
@@ -351,25 +325,29 @@ void IncrementalMarking::StartMarkingMajor() {
// Ready to start incremental marking.
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
+ isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
}
- {
- // TracePrologue may call back into V8 in corner cases, requiring that
+ if (heap()->cpp_heap()) {
+ // StartTracing may call back into V8 in corner cases, requiring that
// marking (including write barriers) is fully set up.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
+ CppHeap::From(heap()->cpp_heap())->StartTracing();
}
heap_->InvokeIncrementalMarkingEpilogueCallbacks();
+
+ if (v8_flags.minor_mc && heap_->new_space()) {
+ heap_->paged_new_space()->ForceAllocationSuccessUntilNextGC();
+ }
}
void IncrementalMarking::StartMarkingMinor() {
// Removed serializer_enabled() check because we don't do black allocation.
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] (MinorMC) Start marking\n");
}
@@ -393,8 +371,7 @@ void IncrementalMarking::StartMarkingMinor() {
}
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] (MinorMC) Running\n");
+ isolate()->PrintWithTimestamp("[IncrementalMarking] (MinorMC) Running\n");
}
DCHECK(!is_compacting_);
@@ -410,9 +387,9 @@ void IncrementalMarking::StartBlackAllocation() {
"Marking Code objects requires write access to the Code page header");
heap()->code_space()->MarkLinearAllocationAreaBlack();
}
- if (heap()->isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
DCHECK_EQ(heap()->shared_space()->top(), kNullAddress);
- heap()->isolate()->global_safepoint()->IterateClientIsolates(
+ isolate()->global_safepoint()->IterateSharedSpaceAndClientIsolates(
[](Isolate* client) {
client->heap()->MarkSharedLinearAllocationAreasBlack();
});
@@ -421,7 +398,7 @@ void IncrementalMarking::StartBlackAllocation() {
local_heap->MarkLinearAllocationAreaBlack();
});
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
}
}
@@ -434,9 +411,9 @@ void IncrementalMarking::PauseBlackAllocation() {
"Marking Code objects requires write access to the Code page header");
heap()->code_space()->UnmarkLinearAllocationArea();
}
- if (heap()->isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
DCHECK_EQ(heap()->shared_space()->top(), kNullAddress);
- heap()->isolate()->global_safepoint()->IterateClientIsolates(
+ isolate()->global_safepoint()->IterateSharedSpaceAndClientIsolates(
[](Isolate* client) {
client->heap()->UnmarkSharedLinearAllocationAreas();
});
@@ -444,7 +421,7 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
}
black_allocation_ = false;
@@ -454,7 +431,7 @@ void IncrementalMarking::FinishBlackAllocation() {
if (black_allocation_) {
black_allocation_ = false;
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation finished\n");
}
}
@@ -471,7 +448,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
major_collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
- PtrComprCageBase cage_base(heap_->isolate());
+ PtrComprCageBase cage_base(isolate());
major_collector_->marking_worklists()->Update([this, marking_state, cage_base,
filler_map](
HeapObject obj,
@@ -489,10 +466,11 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
// Hence, we can discard them.
return false;
}
- HeapObject dest = map_word.ToForwardingAddress();
+ HeapObject dest = map_word.ToForwardingAddress(obj);
USE(this);
- DCHECK_IMPLIES(marking_state->IsWhite(obj), obj.IsFreeSpaceOrFiller());
- if (dest.InSharedHeap()) {
+ DCHECK_IMPLIES(marking_state->IsUnmarked(obj), obj.IsFreeSpaceOrFiller());
+ if (dest.InWritableSharedSpace() &&
+ !isolate()->is_shared_space_isolate()) {
// Object got promoted into the shared heap. Drop it from the client
// heap marking worklist.
return false;
@@ -509,7 +487,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
DCHECK_IMPLIES(
v8_flags.minor_mc,
!obj.map_word(cage_base, kRelaxedLoad).IsForwardingAddress());
- if (marking_state->IsWhite(obj)) {
+ if (marking_state->IsUnmarked(obj)) {
return false;
}
// Either a large object or an object marked by the minor
@@ -521,13 +499,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
// Only applicable during minor MC garbage collections.
if (!Heap::IsLargeObject(obj) &&
Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- if (marking_state->IsWhite(obj)) {
+ if (marking_state->IsUnmarked(obj)) {
return false;
}
*out = obj;
return true;
}
- DCHECK_IMPLIES(marking_state->IsWhite(obj),
+ DCHECK_IMPLIES(marking_state->IsUnmarked(obj),
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
@@ -552,44 +530,17 @@ void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
void IncrementalMarking::EmbedderStep(double expected_duration_ms,
double* duration_ms) {
DCHECK(IsMarking());
- if (!heap_->local_embedder_heap_tracer()
- ->SupportsIncrementalEmbedderSteps()) {
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ DCHECK_NOT_NULL(cpp_heap);
+ if (!cpp_heap->incremental_marking_supported()) {
*duration_ms = 0.0;
return;
}
- constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
-
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
- LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
const double start = heap_->MonotonicallyIncreasingTimeInMs();
- const double deadline = start + expected_duration_ms;
- bool empty_worklist = true;
- if (local_marking_worklists()->PublishWrapper()) {
- DCHECK(local_marking_worklists()->IsWrapperEmpty());
- } else {
- // Cannot directly publish wrapper objects.
- LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
- HeapObject object;
- size_t cnt = 0;
- while (local_marking_worklists()->PopWrapper(&object)) {
- scope.TracePossibleWrapper(JSObject::cast(object));
- if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
- if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
- empty_worklist = false;
- break;
- }
- cnt = 0;
- }
- }
- }
- // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
- // which means |local_tracer| won't do any actual tracing, so there is no
- // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
- local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
- double current = heap_->MonotonicallyIncreasingTimeInMs();
- local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
- *duration_ms = current - start;
+ cpp_heap->AdvanceTracing(expected_duration_ms);
+ *duration_ms = heap_->MonotonicallyIncreasingTimeInMs() - start;
}
bool IncrementalMarking::Stop() {
@@ -600,7 +551,7 @@ bool IncrementalMarking::Stop() {
static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
int old_generation_limit_mb =
static_cast<int>(heap()->old_generation_allocation_limit() / MB);
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
"overshoot %dMB\n",
old_generation_size_mb, old_generation_limit_mb,
@@ -619,10 +570,24 @@ bool IncrementalMarking::Stop() {
}
collection_requested_via_stack_guard_ = false;
- heap_->isolate()->stack_guard()->ClearGC();
+ isolate()->stack_guard()->ClearGC();
is_marking_ = false;
- heap_->SetIsMarkingFlag(false);
+
+ if (isolate()->has_shared_space() && !isolate()->is_shared_space_isolate()) {
+ // When disabling local incremental marking in a client isolate (= worker
+ // isolate), the marking barrier needs to stay enabled when incremental
+ // marking in the shared heap is running.
+ const bool is_marking = isolate()
+ ->shared_space_isolate()
+ ->heap()
+ ->incremental_marking()
+ ->IsMajorMarking();
+ heap_->SetIsMarkingFlag(is_marking);
+ } else {
+ heap_->SetIsMarkingFlag(false);
+ }
+
heap_->SetIsMinorMarkingFlag(false);
is_compacting_ = false;
FinishBlackAllocation();
@@ -666,7 +631,7 @@ bool IncrementalMarking::ShouldWaitForTask() {
const bool wait_for_task = current_time < completion_task_timeout_;
if (v8_flags.trace_incremental_marking && wait_for_task) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying GC via stack guard. time left: "
"%fms\n",
completion_task_timeout_ - current_time);
@@ -689,7 +654,7 @@ bool IncrementalMarking::TryInitializeTaskTimeout() {
if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Not delaying marking completion. time to "
"task: %fms allowed overshoot: %fms\n",
time_to_marking_task, overshoot_ms);
@@ -700,7 +665,7 @@ bool IncrementalMarking::TryInitializeTaskTimeout() {
completion_task_timeout_ = now + overshoot_ms;
if (v8_flags.trace_incremental_marking) {
- heap()->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying GC via stack guard. time to task: "
"%fms "
"allowed overshoot: %fms\n",
@@ -717,7 +682,7 @@ void IncrementalMarking::FastForwardSchedule() {
if (scheduled_bytes_to_mark_ < bytes_marked_) {
scheduled_bytes_to_mark_ = bytes_marked_;
if (v8_flags.trace_incremental_marking) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Fast-forwarded schedule\n");
}
}
@@ -745,7 +710,7 @@ void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
AddScheduledBytesToMark(bytes_to_mark);
if (v8_flags.trace_incremental_marking) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
"%.1fms\n",
bytes_to_mark / KB, delta_ms);
@@ -797,7 +762,7 @@ void IncrementalMarking::AdvanceOnAllocation() {
// When task isn't run soon enough, fall back to stack guard to force
// completion.
collection_requested_via_stack_guard_ = true;
- heap_->isolate()->stack_guard()->RequestGC();
+ isolate()->stack_guard()->RequestGC();
}
}
}
@@ -805,13 +770,12 @@ void IncrementalMarking::AdvanceOnAllocation() {
bool IncrementalMarking::ShouldFinalize() const {
DCHECK(IsMarking());
+ const auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
return heap()
->mark_compact_collector()
->local_marking_worklists()
->IsEmpty() &&
- heap()
- ->local_embedder_heap_tracer()
- ->ShouldFinalizeIncrementalMarking();
+ (!cpp_heap || cpp_heap->ShouldFinalizeIncrementalMarking());
}
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
@@ -826,7 +790,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
const size_t kTargetStepCount = 256;
const size_t kTargetStepCountAtOOM = 32;
const size_t kMaxStepSizeInByte = 256 * KB;
- size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
+ size_t oom_slack = heap()->new_space()->TotalCapacity() + 64 * MB;
if (!heap()->CanExpandOldGeneration(oom_slack)) {
return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
@@ -853,7 +817,7 @@ void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
AddScheduledBytesToMark(bytes_to_mark);
if (v8_flags.trace_incremental_marking) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
"(progress=%zuKB, allocation=%zuKB)\n",
bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
@@ -872,7 +836,7 @@ void IncrementalMarking::FetchBytesMarkedConcurrently() {
bytes_marked_concurrently_ = current_bytes_marked_concurrently;
}
if (v8_flags.trace_incremental_marking) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marked %zuKB on background threads\n",
heap_->concurrent_marking()->TotalMarkedBytes() / KB);
}
@@ -883,11 +847,11 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
FetchBytesMarkedConcurrently();
if (v8_flags.trace_incremental_marking) {
if (scheduled_bytes_to_mark_ > bytes_marked_) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marker is %zuKB behind schedule\n",
(scheduled_bytes_to_mark_ - bytes_marked_) / KB);
} else {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
(bytes_marked_ - scheduled_bytes_to_mark_) / KB);
}
@@ -903,7 +867,7 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
void IncrementalMarking::Step(double max_step_size_in_ms,
StepOrigin step_origin) {
NestedTimedHistogramScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
+ isolate()->counters()->gc_incremental_marking());
TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch",
heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
@@ -931,7 +895,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
}
#endif
if (v8_flags.trace_incremental_marking) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marking speed %.fKB/ms\n",
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
}
@@ -953,7 +917,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
// processed on their own. For small graphs, helping is not necessary.
std::tie(v8_bytes_processed, std::ignore) =
major_collector_->ProcessMarkingWorklist(bytes_to_process);
- if (heap_->local_embedder_heap_tracer()->InUse()) {
+ if (heap_->cpp_heap()) {
embedder_deadline =
std::min(max_step_size_in_ms,
static_cast<double>(bytes_to_process) / marking_speed);
@@ -975,7 +939,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
if (v8_flags.trace_incremental_marking) {
- heap_->isolate()->PrintWithTimestamp(
+ isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms "
"(%fms) "
"in %.1f\n",
@@ -985,5 +949,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
}
}
+Isolate* IncrementalMarking::isolate() const { return heap_->isolate(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index a3fb20a0af..7563e5ce03 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -86,8 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
- void NotifyLeftTrimming(HeapObject from, HeapObject to);
-
bool IsStopped() const { return !IsMarking(); }
bool IsMarking() const { return is_marking_; }
bool IsMajorMarkingComplete() const {
@@ -122,16 +120,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// marking completes.
void AdvanceOnAllocation();
- // This function is used to color the object black before it undergoes an
- // unsafe layout change. This is a part of synchronization protocol with
- // the concurrent marker.
- void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
-
void MarkBlackBackground(HeapObject obj, int object_size);
bool IsCompacting() { return IsMarking() && is_compacting_; }
Heap* heap() const { return heap_; }
+ Isolate* isolate() const;
IncrementalMarkingJob* incremental_marking_job() {
return &incremental_marking_job_;
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 3ab25a0c7e..9d38e5d44d 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -53,7 +53,8 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// Check whether object has a tagged field at that particular offset.
HeapObject invalidated_object = HeapObject::FromAddress(current_.address);
- DCHECK_IMPLIES(marking_state_, marking_state_->IsBlack(invalidated_object));
+ DCHECK_IMPLIES(marking_state_,
+ marking_state_->IsMarked(invalidated_object));
DCHECK(MarkCompactCollector::IsMapOrForwarded(invalidated_object.map()));
return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
}
@@ -69,7 +70,7 @@ void InvalidatedSlotsFilter::NextInvalidatedObject() {
next_ = {sentinel_, 0, false};
} else {
HeapObject object = iterator_->first;
- bool is_live = marking_state_ ? marking_state_->IsBlack(object) : true;
+ bool is_live = marking_state_ ? marking_state_->IsMarked(object) : true;
next_ = {object.address(), iterator_->second, is_live};
iterator_++;
}
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index d17394cc2c..7fc8962120 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -6,6 +6,7 @@
#include "src/base/logging.h"
#include "src/heap/invalidated-slots-inl.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
@@ -61,6 +62,10 @@ InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToNew(MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
+InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToOld(MemoryChunk* chunk) {
+ return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
+}
+
InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToShared(
MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk,
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 1215664575..1f711b7077 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -78,6 +78,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
public:
static InvalidatedSlotsCleanup OldToNew(MemoryChunk* chunk);
+ static InvalidatedSlotsCleanup OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsCleanup OldToShared(MemoryChunk* chunk);
static InvalidatedSlotsCleanup NoCleanup(MemoryChunk* chunk);
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index dfcc2a3113..3cbeb57f2f 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/list.h"
#include "src/heap/marking-state-inl.h"
@@ -68,12 +69,21 @@ size_t LargeObjectSpace::Available() const {
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
- RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
+ DCHECK_NULL(slot_set<OLD_TO_NEW>());
+ DCHECK_NULL(typed_slot_set<OLD_TO_NEW>());
+
+ DCHECK_NULL(slot_set<OLD_TO_OLD>());
+ DCHECK_NULL(typed_slot_set<OLD_TO_OLD>());
+
+ // area_end() might not be aligned to a full bucket size with large objects.
+ // Align it to bucket size such that the following RemoveRange invocation just
+ // drops the whole bucket and the bucket is reset to nullptr.
+ Address aligned_area_end = address() + SlotSet::OffsetForBucket(buckets());
+ DCHECK_LE(area_end(), aligned_area_end);
+ RememberedSet<OLD_TO_SHARED>::RemoveRange(this, free_start, aligned_area_end,
+ SlotSet::FREE_EMPTY_BUCKETS);
+
+ RememberedSet<OLD_TO_SHARED>::RemoveRangeTyped(this, free_start, area_end());
}
// -----------------------------------------------------------------------------
@@ -116,7 +126,7 @@ void LargeObjectSpace::TearDown() {
void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
size_t object_size) {
- if (!allocation_counter_.IsActive()) return;
+ if (!heap()->IsAllocationObserverActive()) return;
if (object_size >= allocation_counter_.NextBytes()) {
allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
@@ -139,7 +149,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(
- heap()->main_thread_local_heap())) {
+ heap()->main_thread_local_heap(), AllocationOrigin::kRuntime)) {
return AllocationResult::Failure();
}
@@ -152,10 +162,10 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
- heap()->marking_state()->WhiteToBlack(object);
+ heap()->marking_state()->TryMarkAndAccountLiveBytes(object);
}
DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
- heap()->marking_state()->IsBlack(object));
+ heap()->marking_state()->IsMarked(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion(identity(), page);
AdvanceAndInvokeAllocationObservers(object.address(),
@@ -175,7 +185,8 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
- !heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
+ !heap()->ShouldExpandOldGenerationOnSlowAllocation(
+ local_heap, AllocationOrigin::kRuntime)) {
return AllocationResult::Failure();
}
@@ -185,10 +196,10 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
if (heap()->incremental_marking()->black_allocation()) {
- heap()->marking_state()->WhiteToBlack(object);
+ heap()->marking_state()->TryMarkAndAccountLiveBytes(object);
}
DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
- heap()->marking_state()->IsBlack(object));
+ heap()->marking_state()->IsMarked(object));
page->InitializationMemoryFence();
if (identity() == CODE_LO_SPACE) {
heap()->isolate()->AddCodeMemoryChunk(page);
@@ -267,8 +278,7 @@ void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
page_count_++;
memory_chunk_list_.PushBack(page);
page->set_owner(this);
- page->SetOldGenerationPageFlags(!is_off_thread() &&
- heap()->incremental_marking()->IsMarking());
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
@@ -287,26 +297,6 @@ void LargeObjectSpace::RemovePage(LargePage* page) {
}
}
-namespace {
-
-// Returns the `GetCommitPageSize()`-aligned end of the payload that can be
-// used to shrink down an object. Returns kNullAddress if shrinking is not
-// supported.
-Address GetEndOfPayload(LargePage* page, Address object_address,
- size_t object_size) {
- if (page->executable() == EXECUTABLE) {
- return kNullAddress;
- }
- const size_t used_committed_size =
- ::RoundUp((object_address - page->address()) + object_size,
- MemoryAllocator::GetCommitPageSize());
- return (used_committed_size < page->size())
- ? page->address() + used_committed_size
- : kNullAddress;
-}
-
-} // namespace
-
void LargeObjectSpace::ShrinkPageToObjectSize(LargePage* page,
HeapObject object,
size_t object_size) {
@@ -314,17 +304,34 @@ void LargeObjectSpace::ShrinkPageToObjectSize(LargePage* page,
PtrComprCageBase cage_base(heap()->isolate());
DCHECK_EQ(object, page->GetObject());
DCHECK_EQ(object_size, page->GetObject().Size(cage_base));
+ DCHECK_EQ(page->executable(), NOT_EXECUTABLE);
#endif // DEBUG
- Address free_start = GetEndOfPayload(page, object.address(), object_size);
- if (free_start != kNullAddress) {
- DCHECK(!page->IsFlagSet(Page::IS_EXECUTABLE));
- page->ClearOutOfLiveRangeSlots(free_start);
- const size_t bytes_to_free = page->size() - (free_start - page->address());
- heap()->memory_allocator()->PartialFreeMemory(
- page, free_start, bytes_to_free, page->area_start() + object_size);
- size_ -= bytes_to_free;
- AccountUncommitted(bytes_to_free);
+
+ const size_t used_committed_size =
+ ::RoundUp(object.address() - page->address() + object_size,
+ MemoryAllocator::GetCommitPageSize());
+
+ // Object shrunk since last GC.
+ if (object_size < page->area_size()) {
+ page->ClearOutOfLiveRangeSlots(object.address() + object_size);
+ const Address new_area_end = page->area_start() + object_size;
+
+ // Object shrunk enough that we can even free some OS pages.
+ if (used_committed_size < page->size()) {
+ const size_t bytes_to_free = page->size() - used_committed_size;
+ heap()->memory_allocator()->PartialFreeMemory(
+ page, page->address() + used_committed_size, bytes_to_free,
+ new_area_end);
+ size_ -= bytes_to_free;
+ AccountUncommitted(bytes_to_free);
+ } else {
+ // Can't free OS page but keep object area up-to-date.
+ page->set_area_end(new_area_end);
+ }
}
+
+ DCHECK_EQ(used_committed_size, page->size());
+ DCHECK_EQ(object_size, page->area_size());
}
bool LargeObjectSpace::Contains(HeapObject object) const {
@@ -353,7 +360,8 @@ std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify(Isolate* isolate) {
+void LargeObjectSpace::Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
size_t external_backing_store_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
@@ -361,21 +369,16 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
}
PtrComprCageBase cage_base(isolate);
- for (LargePage* chunk = first_page(); chunk != nullptr;
+ for (const LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
+ visitor->VerifyPage(chunk);
+
// Each chunk contains an object that starts at the large object page's
// object area start.
HeapObject object = chunk->GetObject();
Page* page = Page::FromHeapObject(object);
CHECK(object.address() == page->area_start());
- // The first word should be a map, and we expect all map pointers to be
- // in map space or read-only space.
- Map map = object.map(cage_base);
- CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->old_space()->Contains(map));
-
// We have only the following types in the large object space:
const bool is_valid_lo_space_object = //
object.IsAbstractCode(cage_base) || //
@@ -388,6 +391,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object.IsFixedArray(cage_base) || //
object.IsFixedDoubleArray(cage_base) || //
object.IsFreeSpace(cage_base) || //
+ object.IsInstructionStream(cage_base) || //
object.IsPreparseData(cage_base) || //
object.IsPropertyArray(cage_base) || //
object.IsScopeInfo() || //
@@ -407,45 +411,15 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object.map(cage_base).instance_type());
}
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- if (!v8_flags.verify_heap_skip_remembered_set) {
- HeapVerifier::VerifyRememberedSetFor(heap(), object);
- }
+ // Invoke visitor on each object.
+ visitor->VerifyObject(object);
- // Byte arrays and strings don't have interior pointers.
- if (object.IsAbstractCode(cage_base)) {
- VerifyPointersVisitor code_visitor(heap());
- object.IterateBody(map, object.Size(cage_base), &code_visitor);
- } else if (object.IsFixedArray(cage_base)) {
- FixedArray array = FixedArray::cast(object);
- for (int j = 0; j < array.length(); j++) {
- Object element = array.get(j);
- if (element.IsHeapObject()) {
- HeapObject element_object = HeapObject::cast(element);
- CHECK(IsValidHeapObject(heap(), element_object));
- CHECK(element_object.map(cage_base).IsMap(cage_base));
- }
- }
- } else if (object.IsPropertyArray(cage_base)) {
- PropertyArray array = PropertyArray::cast(object);
- for (int j = 0; j < array.length(); j++) {
- Object property = array.get(j);
- if (property.IsHeapObject()) {
- HeapObject property_object = HeapObject::cast(property);
- CHECK(heap()->Contains(property_object));
- CHECK(property_object.map(cage_base).IsMap(cage_base));
- }
- }
- }
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
}
- CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ visitor->VerifyPageDone(chunk);
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
@@ -476,8 +450,7 @@ OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
: LargeObjectSpace(heap, id) {}
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
- : LargeObjectSpace(heap, NEW_LO_SPACE),
- capacity_(capacity) {}
+ : LargeObjectSpace(heap, NEW_LO_SPACE), capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
diff --git a/deps/v8/src/heap/large-spaces.h b/deps/v8/src/heap/large-spaces.h
index 576c672fff..7cdd8771e5 100644
--- a/deps/v8/src/heap/large-spaces.h
+++ b/deps/v8/src/heap/large-spaces.h
@@ -13,6 +13,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -40,7 +41,7 @@ class LargePage : public MemoryChunk {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
- HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
+ HeapObject GetObject() const { return HeapObject::FromAddress(area_start()); }
LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
const LargePage* next_page() const {
@@ -113,10 +114,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
- virtual bool is_off_thread() const { return false; }
-
#ifdef VERIFY_HEAP
- virtual void Verify(Isolate* isolate);
+ void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final;
#endif
#ifdef DEBUG
diff --git a/deps/v8/src/heap/linear-allocation-area.h b/deps/v8/src/heap/linear-allocation-area.h
index 873dd31f7f..7b8c9c0449 100644
--- a/deps/v8/src/heap/linear-allocation-area.h
+++ b/deps/v8/src/heap/linear-allocation-area.h
@@ -106,10 +106,7 @@ class LinearAllocationArea final {
#endif // DEBUG
}
- static constexpr int kSize = 4 * kSystemPointerSize;
-
- bool enabled() const { return enabled_; }
- void SetEnabled(bool enabled) { enabled_ = enabled; }
+ static constexpr int kSize = 3 * kSystemPointerSize;
private:
// The start of the LAB. Initially coincides with `top_`. As top is moved
@@ -120,11 +117,9 @@ class LinearAllocationArea final {
Address top_ = kNullAddress;
// Limit of the LAB the denotes the end of the valid range for allocation.
Address limit_ = kNullAddress;
-
- bool enabled_ = true;
};
-static_assert(sizeof(LinearAllocationArea) <= LinearAllocationArea::kSize,
+static_assert(sizeof(LinearAllocationArea) == LinearAllocationArea::kSize,
"LinearAllocationArea's size must be small because it "
"is included in IsolateData.");
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index f4a1450b49..b39fbe33d3 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -61,7 +61,8 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
heap_->safepoint()->AddLocalHeap(this, [this] {
if (!is_main_thread()) {
- WriteBarrier::SetForThread(marking_barrier_.get());
+ saved_marking_barrier_ =
+ WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate(
heap_->incremental_marking()->IsCompacting(),
@@ -69,6 +70,8 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
? MarkingBarrierType::kMinor
: MarkingBarrierType::kMajor);
}
+
+ SetUpSharedMarking();
}
});
@@ -91,8 +94,12 @@ LocalHeap::~LocalHeap() {
CodePageHeaderModificationScope rwx_write_scope(
"Publishing of marking barrier results for Code space pages requires "
"write access to Code page headers");
- marking_barrier_->Publish();
- WriteBarrier::ClearForThread(marking_barrier_.get());
+ marking_barrier_->PublishIfNeeded();
+ marking_barrier_->PublishSharedIfNeeded();
+ MarkingBarrier* overwritten =
+ WriteBarrier::SetForThread(saved_marking_barrier_);
+ DCHECK_EQ(overwritten, marking_barrier_.get());
+ USE(overwritten);
}
});
@@ -104,32 +111,62 @@ LocalHeap::~LocalHeap() {
DCHECK(gc_epilogue_callbacks_.IsEmpty());
}
-void LocalHeap::SetUpMainThreadForTesting() { SetUpMainThread(); }
+void LocalHeap::SetUpMainThreadForTesting() {
+ Unpark();
+ SetUpMainThread();
+}
void LocalHeap::SetUpMainThread() {
DCHECK(is_main_thread());
+ DCHECK(IsRunning());
SetUp();
+ SetUpSharedMarking();
}
void LocalHeap::SetUp() {
DCHECK_NULL(old_space_allocator_);
- old_space_allocator_ =
- std::make_unique<ConcurrentAllocator>(this, heap_->old_space());
+ old_space_allocator_ = std::make_unique<ConcurrentAllocator>(
+ this, heap_->old_space(), ConcurrentAllocator::Context::kNotGC);
DCHECK_NULL(code_space_allocator_);
- code_space_allocator_ =
- std::make_unique<ConcurrentAllocator>(this, heap_->code_space());
+ code_space_allocator_ = std::make_unique<ConcurrentAllocator>(
+ this, heap_->code_space(), ConcurrentAllocator::Context::kNotGC);
DCHECK_NULL(shared_old_space_allocator_);
- if (heap_->isolate()->has_shared_heap()) {
+ if (heap_->isolate()->has_shared_space()) {
shared_old_space_allocator_ = std::make_unique<ConcurrentAllocator>(
- this, heap_->shared_allocation_space());
+ this, heap_->shared_allocation_space(),
+ ConcurrentAllocator::Context::kNotGC);
}
DCHECK_NULL(marking_barrier_);
marking_barrier_ = std::make_unique<MarkingBarrier>(this);
}
+void LocalHeap::SetUpSharedMarking() {
+#if DEBUG
+ // Ensure the thread is either in the running state or holds the safepoint
+ // lock. This guarantees that the state of incremental marking can't change
+ // concurrently (this requires a safepoint).
+ if (is_main_thread()) {
+ DCHECK(IsRunning());
+ } else {
+ heap()->safepoint()->AssertActive();
+ }
+#endif // DEBUG
+
+ Isolate* isolate = heap_->isolate();
+
+ if (isolate->has_shared_space() && !isolate->is_shared_space_isolate()) {
+ if (isolate->shared_space_isolate()
+ ->heap()
+ ->incremental_marking()
+ ->IsMarking()) {
+ marking_barrier_->ActivateShared();
+ }
+ }
+}
+
void LocalHeap::EnsurePersistentHandles() {
if (!persistent_handles_) {
persistent_handles_.reset(
@@ -451,5 +488,10 @@ void LocalHeap::NotifyObjectSizeChange(
update_invalidated_object_size);
}
+void LocalHeap::WeakenDescriptorArrays(
+ GlobalHandleVector<DescriptorArray> strong_descriptor_arrays) {
+ AsHeap()->WeakenDescriptorArrays(std::move(strong_descriptor_arrays));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index fbda60a787..afeaf19baa 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -14,6 +14,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-callbacks.h"
@@ -180,6 +181,10 @@ class V8_EXPORT_PRIVATE LocalHeap {
GCType::kGCTypeMinorMarkCompact));
void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
+ // Weakens StrongDescriptorArray objects into regular DescriptorArray objects.
+ void WeakenDescriptorArrays(
+ GlobalHandleVector<DescriptorArray> strong_descriptor_arrays);
+
// Used to make SetupMainThread() available to unit tests.
void SetUpMainThreadForTesting();
@@ -310,6 +315,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
void SetUpMainThread();
void SetUp();
+ void SetUpSharedMarking();
Heap* heap_;
bool is_main_thread_;
@@ -335,6 +341,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<ConcurrentAllocator> code_space_allocator_;
std::unique_ptr<ConcurrentAllocator> shared_old_space_allocator_;
+ MarkingBarrier* saved_marking_barrier_ = nullptr;
+
friend class CollectionBarrier;
friend class ConcurrentAllocator;
friend class GlobalSafepoint;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 232faabe68..13c8ccb58e 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -25,7 +25,8 @@ namespace v8 {
namespace internal {
void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
- if (marking_state()->WhiteToGrey(obj)) {
+ DCHECK(ReadOnlyHeap::Contains(obj) || heap()->Contains(obj));
+ if (marking_state()->TryMark(obj)) {
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
heap_->AddRetainer(host, obj);
@@ -34,7 +35,8 @@ void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
}
void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
- if (marking_state()->WhiteToGrey(obj)) {
+ DCHECK(ReadOnlyHeap::Contains(obj) || heap()->Contains(obj));
+ if (marking_state()->TryMark(obj)) {
local_marking_worklists()->Push(obj);
if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
heap_->AddRetainingRoot(root, obj);
@@ -44,20 +46,11 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
- non_atomic_marking_state()->WhiteToGrey(obj)) {
+ non_atomic_marking_state()->TryMark(obj)) {
local_marking_worklists_->Push(obj);
}
}
-void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
- if (marking_state()->WhiteToGrey(obj)) {
- local_marking_worklists()->Push(obj);
- if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
- heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
- }
- }
-}
-
// static
void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
HeapObject target) {
@@ -78,8 +71,7 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate()) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ if (target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::ATOMIC>(source_page,
slot.address());
} else {
@@ -94,34 +86,10 @@ void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
}
bool MarkCompactCollector::ShouldMarkObject(HeapObject object) const {
+ if (object.InReadOnlySpace()) return false;
if (V8_LIKELY(!uses_shared_heap_)) return true;
- if (v8_flags.shared_space) {
- if (is_shared_heap_isolate_) return true;
- return !object.InSharedHeap();
- } else {
- return is_shared_heap_isolate_ == object.InSharedHeap();
- }
-}
-
-template <typename MarkingState>
-template <typename T, typename TBodyDescriptor>
-int MainMarkingVisitor<MarkingState>::VisitJSObjectSubclass(Map map, T object) {
- if (!this->ShouldVisit(object)) return 0;
- this->VisitMapPointer(object);
- int size = TBodyDescriptor::SizeOf(map, object);
- TBodyDescriptor::IterateBody(map, object, size, this);
- return size;
-}
-
-template <typename MarkingState>
-template <typename T>
-int MainMarkingVisitor<MarkingState>::VisitLeftTrimmableArray(Map map,
- T object) {
- if (!this->ShouldVisit(object)) return 0;
- int size = T::SizeFor(object.length());
- this->VisitMapPointer(object);
- T::BodyDescriptor::IterateBody(map, object, size, this);
- return size;
+ if (is_shared_space_isolate_) return true;
+ return !object.InAnySharedSpace();
}
template <typename MarkingState>
@@ -132,10 +100,9 @@ void MainMarkingVisitor<MarkingState>::RecordSlot(HeapObject object, TSlot slot,
}
template <typename MarkingState>
-void MainMarkingVisitor<MarkingState>::RecordRelocSlot(Code host,
- RelocInfo* rinfo,
+void MainMarkingVisitor<MarkingState>::RecordRelocSlot(RelocInfo* rinfo,
HeapObject target) {
- MarkCompactCollector::RecordRelocSlot(host, rinfo, target);
+ MarkCompactCollector::RecordRelocSlot(rinfo, target);
}
template <LiveObjectIterationMode mode>
@@ -298,64 +265,6 @@ typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
Isolate* CollectorBase::isolate() { return heap()->isolate(); }
-class YoungGenerationMarkingTask;
-
-class PageMarkingItem : public ParallelWorkItem {
- public:
- explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- ~PageMarkingItem() = default;
-
- void Process(YoungGenerationMarkingTask* task);
-
- private:
- inline Heap* heap() { return chunk_->heap(); }
-
- void MarkUntypedPointers(YoungGenerationMarkingTask* task);
-
- void MarkTypedPointers(YoungGenerationMarkingTask* task);
-
- template <typename TSlot>
- V8_INLINE SlotCallbackResult
- CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot);
-
- MemoryChunk* chunk_;
-};
-
-enum class YoungMarkingJobType { kAtomic, kIncremental };
-
-class YoungGenerationMarkingJob : public v8::JobTask {
- public:
- YoungGenerationMarkingJob(Isolate* isolate, Heap* heap,
- MarkingWorklists* global_worklists,
- std::vector<PageMarkingItem> marking_items,
- YoungMarkingJobType young_marking_job_type)
- : isolate_(isolate),
- heap_(heap),
- global_worklists_(global_worklists),
- marking_items_(std::move(marking_items)),
- remaining_marking_items_(marking_items_.size()),
- generator_(marking_items_.size()),
- young_marking_job_type_(young_marking_job_type) {}
-
- void Run(JobDelegate* delegate) override;
- size_t GetMaxConcurrency(size_t worker_count) const override;
- bool incremental() const {
- return young_marking_job_type_ == YoungMarkingJobType::kIncremental;
- }
-
- private:
- void ProcessItems(JobDelegate* delegate);
- void ProcessMarkingItems(YoungGenerationMarkingTask* task);
-
- Isolate* isolate_;
- Heap* heap_;
- MarkingWorklists* global_worklists_;
- std::vector<PageMarkingItem> marking_items_;
- std::atomic_size_t remaining_marking_items_{0};
- IndexGenerator generator_;
- YoungMarkingJobType young_marking_job_type_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index b1a4ea1238..19e02f1bbc 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -10,6 +10,7 @@
#include "src/base/logging.h"
#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/base/v8-fallthrough.h"
@@ -31,7 +32,6 @@
#include "src/heap/evacuation-verifier-inl.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
-#include "src/heap/global-handle-marking-visitor.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
@@ -60,12 +60,14 @@
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
+#include "src/heap/traced-handles-marking-visitor.h"
#include "src/heap/weak-object-worklists.h"
#include "src/init/v8.h"
#include "src/logging/tracing-flags.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-objects-inl.h"
@@ -84,11 +86,6 @@
namespace v8 {
namespace internal {
-const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "11";
-const char* Marking::kGreyBitPattern = "10";
-const char* Marking::kImpossibleBitPattern = "01";
-
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
static_assert(Heap::kMinObjectSizeInTaggedWords >= 2);
@@ -117,7 +114,7 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
- virtual bool IsBlack(HeapObject object) = 0;
+ virtual bool IsMarked(HeapObject object) = 0;
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
@@ -129,8 +126,7 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
VerifyCodePointer(slot);
}
@@ -153,8 +149,6 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
};
void MarkingVerifier::VerifyRoots() {
- // When verifying marking, we never want to scan conservatively the top of the
- // stack.
heap_->IterateRootsIncludingClients(
this, base::EnumSet<SkipRoot>{SkipRoot::kWeak, SkipRoot::kTopOfStack});
}
@@ -170,7 +164,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address current = object.address();
if (current < start) continue;
if (current >= end) break;
- CHECK(IsBlack(object));
+ CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object.Iterate(cage_base(), this);
next_object_must_be_here_or_later = current + size;
@@ -217,7 +211,7 @@ void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
if (!lo_space) return;
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- if (IsBlack(obj)) {
+ if (IsMarked(obj)) {
obj.Iterate(cage_base(), this);
}
}
@@ -247,8 +241,8 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->bitmap(chunk);
}
- bool IsBlack(HeapObject object) override {
- return marking_state_->IsBlack(object);
+ bool IsMarked(HeapObject object) override {
+ return marking_state_->IsMarked(object);
}
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
@@ -262,10 +256,9 @@ class FullMarkingVerifier : public MarkingVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
- // The slot might contain smi during CodeDataContainer creation, so skip it.
+ // The slot might contain smi during Code creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
@@ -275,15 +268,16 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyPointersImpl(start, end);
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject target_object = rinfo->target_object(cage_base());
- if (!host.IsWeakObject(target_object)) {
+ if (!rinfo->code().IsWeakObject(target_object)) {
VerifyHeapObjectImpl(target_object);
}
}
@@ -296,13 +290,14 @@ class FullMarkingVerifier : public MarkingVerifier {
CHECK(heap_->SharedHeapContains(heap_object));
}
- CHECK(marking_state_->IsBlack(heap_object));
+ CHECK(heap_object.InReadOnlySpace() ||
+ marking_state_->IsMarked(heap_object));
}
V8_INLINE bool ShouldVerifyObject(HeapObject heap_object) {
- const bool in_shared_heap = heap_object.InSharedWritableHeap();
- return heap_->isolate()->is_shared_heap_isolate() ? in_shared_heap
- : !in_shared_heap;
+ const bool in_shared_heap = heap_object.InWritableSharedSpace();
+ return heap_->isolate()->is_shared_space_isolate() ? in_shared_heap
+ : !in_shared_heap;
}
template <typename TSlot>
@@ -402,66 +397,12 @@ void CollectorBase::StartSweepSpace(PagedSpace* space) {
}
}
-void CollectorBase::StartSweepNewSpace() {
- PagedSpaceForNewSpace* paged_space = heap()->paged_new_space()->paged_space();
- paged_space->ClearAllocatorState();
-
- int will_be_swept = 0;
-
- DCHECK_EQ(Heap::ResizeNewSpaceMode::kNone, resize_new_space_);
- resize_new_space_ = heap()->ShouldResizeNewSpace();
- if (resize_new_space_ == Heap::ResizeNewSpaceMode::kShrink) {
- paged_space->StartShrinking();
- }
-
- Sweeper* sweeper = heap()->sweeper();
-
- for (auto it = paged_space->begin(); it != paged_space->end();) {
- Page* p = *(it++);
- DCHECK(p->SweepingDone());
-
- if (non_atomic_marking_state()->live_bytes(p) > 0) {
- // Non-empty pages will be evacuated/promoted.
- continue;
- }
-
- if ((resize_new_space_ == ResizeNewSpaceMode::kShrink) &&
- paged_space->ShouldReleasePage()) {
- paged_space->ReleasePage(p);
- } else {
- sweeper->AddNewSpacePage(p);
- }
- will_be_swept++;
- }
-
- if (v8_flags.gc_verbose) {
- PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
- paged_space->name(), will_be_swept);
- }
-}
-
-void CollectorBase::SweepLargeSpace(LargeObjectSpace* space) {
- auto* marking_state = heap()->non_atomic_marking_state();
- PtrComprCageBase cage_base(heap()->isolate());
- size_t surviving_object_size = 0;
- for (auto it = space->begin(); it != space->end();) {
- LargePage* current = *(it++);
- HeapObject object = current->GetObject();
- DCHECK(!marking_state->IsGrey(object));
- if (!marking_state->IsBlack(object)) {
- // Object is dead and page can be released.
- space->RemovePage(current);
- heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
- current);
+bool CollectorBase::IsCppHeapMarkingFinished() const {
+ const auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ if (!cpp_heap) return true;
- continue;
- }
- Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
- current->ProgressBar().ResetIfEnabled();
- non_atomic_marking_state()->SetLiveBytes(current, 0);
- surviving_object_size += static_cast<size_t>(object.Size(cage_base));
- }
- space->set_objects_size(surviving_object_size);
+ return cpp_heap->IsTracingDone() &&
+ local_marking_worklists()->IsWrapperEmpty();
}
MarkCompactCollector::MarkCompactCollector(Heap* heap)
@@ -469,25 +410,20 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
- uses_shared_heap_(isolate()->has_shared_heap() || isolate()->is_shared()),
- is_shared_heap_isolate_(isolate()->is_shared_heap_isolate()),
+ uses_shared_heap_(isolate()->has_shared_space()),
+ is_shared_space_isolate_(isolate()->is_shared_space_isolate()),
sweeper_(heap_->sweeper()) {
}
MarkCompactCollector::~MarkCompactCollector() = default;
-void MarkCompactCollector::SetUp() {
- DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
- DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
- DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
- DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
-}
+void MarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
if (heap()->incremental_marking()->IsMajorMarking()) {
local_marking_worklists()->Publish();
- heap()->main_thread_local_heap()->marking_barrier()->Publish();
+ heap()->main_thread_local_heap()->marking_barrier()->PublishIfNeeded();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
local_weak_objects()->Publish();
@@ -559,6 +495,21 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
return compacting_;
}
+namespace {
+void VisitObjectWithEmbedderFields(JSObject object,
+ MarkingWorklists::Local& worklist) {
+ DCHECK(object.MayHaveEmbedderFields());
+ DCHECK(!Heap::InYoungGeneration(object));
+
+ MarkingWorklists::Local::WrapperSnapshot wrapper_snapshot;
+ const bool valid_snapshot =
+ worklist.ExtractWrapper(object.map(), object, wrapper_snapshot);
+ DCHECK(valid_snapshot);
+ USE(valid_snapshot);
+ worklist.PushExtractedWrapper(wrapper_snapshot);
+}
+} // namespace
+
void MarkCompactCollector::StartMarking() {
std::vector<Address> contexts =
heap()->memory_measurement()->StartProcessing();
@@ -579,8 +530,7 @@ void MarkCompactCollector::StartMarking() {
local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), local_weak_objects_.get(),
- heap_, epoch(), code_flush_mode(),
- heap_->local_embedder_heap_tracer()->InUse(),
+ heap_, epoch(), code_flush_mode(), heap_->cpp_heap(),
heap_->ShouldCurrentGCKeepAgesUnchanged());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
@@ -607,14 +557,6 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
-void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
- ReadOnlyHeapObjectIterator iterator(space);
- for (HeapObject object = iterator.Next(); !object.is_null();
- object = iterator.Next()) {
- CHECK(non_atomic_marking_state()->IsBlack(object));
- }
-}
-
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpaceBase* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
@@ -638,7 +580,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
if (!space) return;
LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- CHECK(non_atomic_marking_state()->IsWhite(obj));
+ CHECK(non_atomic_marking_state()->IsUnmarked(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
MemoryChunk::FromHeapObject(obj)));
}
@@ -648,9 +590,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->new_space());
- // Read-only space should always be black since we never collect any objects
- // in it or linked from it.
- VerifyMarkbitsAreDirty(heap_->read_only_space());
VerifyMarkbitsAreClean(heap_->lo_space());
VerifyMarkbitsAreClean(heap_->code_lo_space());
VerifyMarkbitsAreClean(heap_->new_lo_space());
@@ -740,7 +679,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
pages.reserve(number_of_pages);
CodePageHeaderModificationScope rwx_write_scope(
- "Modification of Code page header flags requires write access");
+ "Modification of Code page header flags requires write "
+ "access");
DCHECK(!sweeper()->sweeping_in_progress());
Page* owner_of_linear_allocation_area =
@@ -869,12 +809,11 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
CodePageHeaderModificationScope rwx_write_scope(
- "Changing Code page flags and remembered sets require write access "
+ "Changing Code page flags and remembered sets require "
+ "write access "
"to the page header");
RememberedSet<OLD_TO_OLD>::ClearAll(heap());
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- RememberedSet<OLD_TO_CODE>::ClearAll(heap());
- }
+ RememberedSet<OLD_TO_CODE>::ClearAll(heap());
for (Page* p : evacuation_candidates_) {
p->ClearEvacuationCandidate();
}
@@ -897,20 +836,20 @@ void MarkCompactCollector::Prepare() {
DCHECK(!heap_->memory_allocator()->unmapper()->IsRunning());
if (!heap()->incremental_marking()->IsMarking()) {
- const auto embedder_flags = heap_->flags_for_embedder_tracer();
- {
+ if (heap()->cpp_heap()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
- // PrepareForTrace should be called before visitor initialization in
+ // InitializeTracing should be called before visitor initialization in
// StartMarking.
- heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
+ CppHeap::From(heap()->cpp_heap())
+ ->InitializeTracing(CppHeap::CollectionType::kMajor);
}
StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
- {
+ if (heap()->cpp_heap()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
- // TracePrologue immediately starts marking which requires V8 worklists to
+ // StartTracing immediately starts marking which requires V8 worklists to
// be set up.
- heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
+ CppHeap::From(heap()->cpp_heap())->StartTracing();
}
#ifdef V8_COMPRESS_POINTERS
heap_->isolate()->external_pointer_table().StartCompactingIfNeeded();
@@ -948,17 +887,13 @@ void MarkCompactCollector::VerifyMarking() {
if (v8_flags.verify_heap) {
FullMarkingVerifier verifier(heap());
verifier.Run();
- }
-#endif
-#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap) {
heap()->old_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
if (heap()->shared_space()) heap()->shared_space()->VerifyLiveBytes();
if (v8_flags.minor_mc && heap()->paged_new_space())
heap()->paged_new_space()->paged_space()->VerifyLiveBytes();
}
-#endif
+#endif // VERIFY_HEAP
}
namespace {
@@ -982,6 +917,18 @@ void MarkCompactCollector::Finish() {
{
TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_SWEEP,
ThreadKind::kMain);
+
+ DCHECK_IMPLIES(!v8_flags.minor_mc,
+ empty_new_space_pages_to_be_swept_.empty());
+ if (!empty_new_space_pages_to_be_swept_.empty()) {
+ GCTracer::Scope sweep_scope(
+ heap()->tracer(), GCTracer::Scope::MC_SWEEP_NEW, ThreadKind::kMain);
+ for (Page* p : empty_new_space_pages_to_be_swept_) {
+ sweeper()->SweepEmptyNewSpacePage(p);
+ }
+ empty_new_space_pages_to_be_swept_.clear();
+ }
+
if (heap()->new_lo_space()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP_NEW_LO);
SweepLargeSpace(heap()->new_lo_space());
@@ -1015,6 +962,8 @@ void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+ if (heap()->new_space()) heap()->new_space()->GarbageCollectionEpilogue();
+
auto* isolate = heap()->isolate();
isolate->global_handles()->ClearListOfYoungNodes();
isolate->traced_handles()->ClearListOfYoungNodes();
@@ -1041,7 +990,6 @@ void MarkCompactCollector::Finish() {
// Shrink pages if possible after processing and filtering slots.
ShrinkPagesToObjectSizes(heap(), heap()->lo_space());
- ShrinkPagesToObjectSizes(heap(), heap()->code_lo_space());
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
@@ -1057,8 +1005,11 @@ void MarkCompactCollector::Finish() {
void MarkCompactCollector::SweepArrayBufferExtensions() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
+ DCHECK_IMPLIES(heap_->new_space(), heap_->new_space()->Size() == 0);
+ DCHECK_IMPLIES(heap_->new_lo_space(), heap_->new_lo_space()->Size() == 0);
heap_->array_buffer_sweeper()->RequestSweep(
- ArrayBufferSweeper::SweepingType::kFull);
+ ArrayBufferSweeper::SweepingType::kFull,
+ ArrayBufferSweeper::TreatAllYoungAsPromoted::kYes);
}
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
@@ -1079,40 +1030,26 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
}
}
- void VisitRunningCode(FullObjectSlot p) final {
- // If Code is currently executing, then we must not remove its
- // deoptimization literals, which it might need in order to successfully
- // deoptimize.
- //
- // Must match behavior in RootsReferencesExtractor::VisitRunningCode, so
- // that heap snapshots accurately describe the roots.
- HeapObject value = HeapObject::cast(*p);
- if (V8_EXTERNAL_CODE_SPACE_BOOL && !IsCodeSpaceObject(value)) {
- // When external code space is enabled, the slot might contain a CodeT
- // object representing an embedded builtin, which doesn't require
- // additional processing.
- DCHECK(CodeT::cast(value).is_off_heap_trampoline());
- } else {
- Code code = Code::cast(value);
- if (code.kind() != CodeKind::BASELINE) {
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(code.deoptimization_data());
- if (deopt_data.length() > 0) {
- DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
- int literals_length = literals.length();
- for (int i = 0; i < literals_length; ++i) {
- MaybeObject maybe_literal = literals.Get(i);
- HeapObject heap_literal;
- if (maybe_literal.GetHeapObject(&heap_literal)) {
- MarkObjectByPointer(Root::kStackRoots,
- FullObjectSlot(&heap_literal));
- }
- }
- }
- }
+ // Keep this synced with RootsReferencesExtractor::VisitRunningCode.
+ void VisitRunningCode(FullObjectSlot code_slot,
+ FullObjectSlot istream_or_smi_zero_slot) final {
+ Object istream_or_smi_zero = *istream_or_smi_zero_slot;
+ DCHECK(istream_or_smi_zero == Smi::zero() ||
+ istream_or_smi_zero.IsInstructionStream());
+ Code code = Code::cast(*code_slot);
+ DCHECK_EQ(code.raw_instruction_stream(
+ PtrComprCageBase{collector_->isolate()->code_cage_base()}),
+ istream_or_smi_zero);
+
+ // We must not remove deoptimization literals which may be needed in
+ // order to successfully deoptimize.
+ code.IterateDeoptimizationLiterals(this);
+
+ if (istream_or_smi_zero != Smi::zero()) {
+ VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot);
}
- // And then mark the Code itself.
- VisitRootPointer(Root::kStackRoots, nullptr, p);
+
+ VisitRootPointer(Root::kStackRoots, nullptr, code_slot);
}
private:
@@ -1131,10 +1068,11 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
// other roots.
//
// It is currently used for
-// - Code held alive by the top optimized frame. This code cannot be deoptimized
-// and thus have to be kept alive in an isolate way, i.e., it should not keep
-// alive other code objects reachable through the weak list but they should
-// keep alive its embedded pointers (which would otherwise be dropped).
+// - InstructionStream held alive by the top optimized frame. This code cannot
+// be deoptimized and thus have to be kept alive in an isolate way, i.e., it
+// should not keep alive other code objects reachable through the weak list but
+// they should keep alive its embedded pointers (which would otherwise be
+// dropped).
// - Prefix of the string table.
// - If V8_ENABLE_SANDBOX, client Isolates' waiter queue node
// ExternalPointer_t in shared Isolates.
@@ -1162,8 +1100,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
}
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
MarkObject(host, slot.load(code_cage_base()));
}
@@ -1173,13 +1110,14 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
UNREACHABLE();
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- MarkObject(host, target);
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
+ MarkObject(rinfo->instruction_stream(), target);
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- MarkObject(host, rinfo->target_object(cage_base()));
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+ MarkObject(rinfo->instruction_stream(), rinfo->target_object(cage_base()));
}
private:
@@ -1193,6 +1131,61 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkCompactCollector* const collector_;
};
+class MarkCompactCollector::ClientCustomRootBodyMarkingVisitor final
+ : public ObjectVisitorWithCageBases {
+ public:
+ explicit ClientCustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector) {}
+
+ void VisitPointer(HeapObject host, ObjectSlot p) final {
+ MarkObject(host, p.load(cage_base()));
+ }
+
+ void VisitMapPointer(HeapObject host) final {
+ MarkObject(host, host.map(cage_base()));
+ }
+
+ void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), p);
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ MarkObject(host, p.load(cage_base()));
+ }
+ }
+
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
+ MarkObject(host, slot.load(code_cage_base()));
+ }
+
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ // At the moment, custom roots cannot contain weak pointers.
+ UNREACHABLE();
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
+ MarkObject(rinfo->instruction_stream(), target);
+ }
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+ MarkObject(rinfo->instruction_stream(), rinfo->target_object(cage_base()));
+ }
+
+ private:
+ V8_INLINE void MarkObject(HeapObject host, Object object) {
+ if (!object.IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (!heap_object.InWritableSharedSpace()) return;
+ collector_->MarkObject(host, heap_object);
+ }
+
+ MarkCompactCollector* const collector_;
+};
+
class MarkCompactCollector::SharedHeapObjectVisitor final
: public ObjectVisitorWithCageBases {
public:
@@ -1224,7 +1217,7 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
}
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
UNREACHABLE();
}
@@ -1237,20 +1230,18 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
}
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
+ void VisitCodeTarget(RelocInfo* rinfo) override { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- UNREACHABLE();
- }
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override { UNREACHABLE(); }
private:
V8_INLINE void CheckForSharedObject(HeapObject host, ObjectSlot slot,
Object object) {
- DCHECK(!host.InSharedHeap());
+ DCHECK(!host.InAnySharedSpace());
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- if (!heap_object.InSharedWritableHeap()) return;
- DCHECK(heap_object.InSharedWritableHeap());
+ if (!heap_object.InWritableSharedSpace()) return;
+ DCHECK(heap_object.InWritableSharedSpace());
MemoryChunk* host_chunk = MemoryChunk::FromHeapObject(host);
DCHECK(host_chunk->InYoungGeneration());
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
@@ -1282,7 +1273,8 @@ class InternalizedStringTableCleaner final : public RootVisitor {
if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
DCHECK(!Heap::InYoungGeneration(heap_object));
- if (marking_state->IsWhite(heap_object)) {
+ if (!heap_object.InReadOnlySpace() &&
+ marking_state->IsUnmarked(heap_object)) {
pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
p.store(StringTable::deleted_element());
@@ -1298,6 +1290,9 @@ class InternalizedStringTableCleaner final : public RootVisitor {
int pointers_removed_ = 0;
};
+enum class ExternalStringTableCleaningMode { kAll, kYoungOnly };
+
+template <ExternalStringTableCleaningMode mode>
class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
@@ -1305,23 +1300,28 @@ class ExternalStringTableCleaner : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
+ DCHECK_EQ(static_cast<int>(root),
+ static_cast<int>(Root::kExternalStringsTable));
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (FullObjectSlot p = start; p < end; ++p) {
Object o = *p;
- if (o.IsHeapObject()) {
- HeapObject heap_object = HeapObject::cast(o);
- if (marking_state->IsWhite(heap_object)) {
- if (o.IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(o));
- } else {
- // The original external string may have been internalized.
- DCHECK(o.IsThinString());
- }
- // Set the entry to the_hole_value (as deleted).
- p.store(the_hole);
- }
+ if (!o.IsHeapObject()) continue;
+ HeapObject heap_object = HeapObject::cast(o);
+ // MinorMC doesn't update the young strings set and so it may contain
+ // strings that are already in old space.
+ if (!marking_state->IsUnmarked(heap_object)) continue;
+ if ((mode == ExternalStringTableCleaningMode::kYoungOnly) &&
+ !Heap::InYoungGeneration(heap_object))
+ continue;
+ if (o.IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(o));
+ } else {
+ // The original external string may have been internalized.
+ DCHECK(o.IsThinString());
}
+ // Set the entry to the_hole_value (as deleted).
+ p.store(the_hole);
}
}
@@ -1361,7 +1361,7 @@ class MarkExternalPointerFromExternalStringTable : public RootVisitor {
: table_(table) {}
void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
ExternalPointerTag tag) override {
- if (!IsSandboxedExternalPointerType(tag)) return;
+ DCHECK_NE(tag, kExternalPointerNullTag);
DCHECK(IsSharedExternalPointerType(tag));
ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
table_->Mark(handle, slot.address());
@@ -1374,15 +1374,11 @@ class MarkExternalPointerFromExternalStringTable : public RootVisitor {
MaybeObjectSlot end) override {
UNREACHABLE();
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- UNREACHABLE();
- }
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- UNREACHABLE();
- }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
UNREACHABLE();
}
+ void VisitCodeTarget(RelocInfo* rinfo) override { UNREACHABLE(); }
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override { UNREACHABLE(); }
private:
ExternalPointerTable* table_;
@@ -1402,7 +1398,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
Object RetainAs(Object object) override {
HeapObject heap_object = HeapObject::cast(object);
DCHECK(!marking_state_->IsGrey(heap_object));
- if (marking_state_->IsBlack(heap_object)) {
+ if (marking_state_->IsMarked(heap_object)) {
return object;
} else if (object.IsAllocationSite() &&
!(AllocationSite::cast(object).IsZombie())) {
@@ -1416,7 +1412,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
// marking
nested = current_site.nested_site();
current_site.MarkZombie();
- marking_state_->WhiteToBlack(current_site);
+ marking_state_->TryMarkAndAccountLiveBytes(current_site);
}
return object;
@@ -1468,8 +1464,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
}
}
- inline void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ inline void VisitCodePointer(Code host, CodeObjectSlot slot) final {
// This code is similar to the implementation of VisitPointer() modulo
// new kind of slot.
DCHECK(!HasWeakHeapObjectTag(slot.load(code_cage_base())));
@@ -1482,6 +1477,14 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
DCHECK(host.IsEphemeronHashTable());
DCHECK(!Heap::InYoungGeneration(host));
+ if (v8_flags.minor_mc) {
+ // Minor MC lacks support for specialized generational ephemeron barriers.
+ // The regular write barrier works as well but keeps more memory alive.
+ // TODO(v8:12612): Add support to MinorMC.
+ ObjectVisitorWithCageBases::VisitEphemeron(host, index, key, value);
+ return;
+ }
+
VisitPointer(host, value);
if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
@@ -1494,37 +1497,34 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
}
}
- inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- DCHECK_EQ(host, rinfo->host());
+ inline void VisitCodeTarget(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!Heap::InYoungGeneration(target));
- DCHECK(!target.InSharedWritableHeap());
- heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
+ DCHECK(!target.InWritableSharedSpace());
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
- inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- DCHECK_EQ(host, rinfo->host());
+ inline void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = rinfo->target_object(cage_base());
- GenerationalBarrierForCode(host, rinfo, object);
- WriteBarrier::Shared(host, rinfo, object);
- heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
+ GenerationalBarrierForCode(rinfo, object);
+ WriteBarrier::Shared(rinfo->instruction_stream(), rinfo, object);
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo, object);
}
// Entries that are skipped for recording.
- inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
- inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(RelocInfo* rinfo) final {}
+ inline void VisitInternalReference(RelocInfo* rinfo) final {}
inline void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
ExternalPointerTag tag) final {}
- virtual void MarkArrayBufferExtensionPromoted(HeapObject object) {}
-
protected:
- inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
- Address slot) {
+ inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
+ Address slot) {
if (value->IsStrongOrWeak()) {
BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
@@ -1537,15 +1537,14 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
DCHECK(chunk->SweepingDone());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ if (p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
} else {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
- } else if (p->InSharedHeap() && !host.InSharedWritableHeap()) {
+ } else if (p->InWritableSharedSpace() && !host.InWritableSharedSpace()) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
@@ -1574,9 +1573,15 @@ class ProfilingMigrationObserver final : public MigrationObserver {
inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
- if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
- PROFILE(heap_->isolate(),
- CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
+ // Note this method is called in a concurrent setting. The current object
+ // (src and dst) is somewhat safe to access without precautions, but other
+ // objects may be subject to concurrent modification.
+ if (dest == CODE_SPACE) {
+ PROFILE(heap_->isolate(), CodeMoveEvent(InstructionStream::cast(src),
+ InstructionStream::cast(dst)));
+ } else if (dest == OLD_SPACE && dst.IsBytecodeArray()) {
+ PROFILE(heap_->isolate(), BytecodeMoveEvent(BytecodeArray::cast(src),
+ BytecodeArray::cast(dst)));
}
heap_->OnMoveEvent(src, dst, size);
}
@@ -1602,13 +1607,12 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
void SetUpAbortEvacuationAtAddress(MemoryChunk* chunk) {
if (v8_flags.stress_compaction || v8_flags.stress_compaction_random) {
- // Stress aborting of evacuation by aborting ~10% of evacuation candidates
+ // Stress aborting of evacuation by aborting ~5% of evacuation candidates
// when stress testing.
const double kFraction = 0.05;
- if (heap_->isolate()->fuzzer_rng()->NextDouble() < kFraction) {
- const double abort_evacuation_percentage =
- heap_->isolate()->fuzzer_rng()->NextDouble();
+ if (rng_->NextDouble() < kFraction) {
+ const double abort_evacuation_percentage = rng_->NextDouble();
abort_evacuation_at_address_ =
chunk->area_start() +
abort_evacuation_percentage * chunk->area_size();
@@ -1653,9 +1657,6 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
// In case the object's map gets relocated during GC we load the old map
// here. This is fine since they store the same content.
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
- if (V8_UNLIKELY(v8_flags.minor_mc)) {
- base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
- }
} else if (dest == SHARED_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kTaggedSize));
@@ -1666,8 +1667,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
- Code code = Code::cast(dst);
- code.Relocate(dst_addr - src_addr);
+ InstructionStream istream = InstructionStream::cast(dst);
+ istream.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
// In case the object's map gets relocated during GC we load the old map
@@ -1680,7 +1681,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore);
+ src.set_map_word_forwarded(dst, kRelaxedStore);
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
@@ -1691,8 +1692,11 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
shared_old_allocator_(shared_old_allocator),
record_visitor_(record_visitor),
shared_string_table_(v8_flags.shared_string_table &&
- heap->isolate()->has_shared_heap()) {
+ heap->isolate()->has_shared_space()) {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
+#if DEBUG
+ rng_.emplace(heap_->isolate()->fuzzer_rng()->NextInt64());
+#endif // DEBUG
}
inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
@@ -1712,7 +1716,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation;
if (target_space == OLD_SPACE && ShouldPromoteIntoSharedHeap(map)) {
- if (heap_->isolate()->is_shared_heap_isolate()) {
+ if (heap_->isolate()->is_shared_space_isolate()) {
DCHECK_NULL(shared_old_allocator_);
allocation = local_allocator_->Allocate(
SHARED_SPACE, size, AllocationOrigin::kGC, alignment);
@@ -1726,10 +1730,11 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
if (allocation.To(target_object)) {
MigrateObject(*target_object, object, size, target_space);
- if (target_space == CODE_SPACE)
+ if (target_space == CODE_SPACE) {
MemoryChunk::FromHeapObject(*target_object)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject((*target_object).address());
+ }
return true;
}
return false;
@@ -1765,6 +1770,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
#if DEBUG
Address abort_evacuation_at_address_{kNullAddress};
#endif // DEBUG
+ base::Optional<base::RandomNumberGenerator> rng_;
};
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
@@ -1773,8 +1779,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor,
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
- AlwaysPromoteYoung always_promote_young)
+ PretenuringHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
@@ -1783,42 +1788,22 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
pretenuring_handler_(heap_->pretenuring_handler()),
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
- always_promote_young_(always_promote_young) {}
+ shortcut_strings_(!heap_->IsGCWithStack() ||
+ v8_flags.shortcut_strings_with_stack) {}
inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
- if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
- pretenuring_handler_->UpdateAllocationSite(object.map(), object,
- local_pretenuring_feedback_);
-
- if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
- heap_->FatalProcessOutOfMemory(
- "MarkCompactCollector: young object promotion failed");
- }
-
- promoted_size_ += size;
- return true;
- }
-
- DCHECK(!v8_flags.minor_mc);
-
- if (heap_->new_space()->ShouldBePromoted(object.address()) &&
- TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
- // Full GCs use AlwaysPromoteYoung::kYes above and MinorMC should never
- // move objects.
- promoted_size_ += size;
- return true;
- }
-
pretenuring_handler_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
- HeapObject target;
- AllocationSpace space = AllocateTargetObject(object, size, &target);
- MigrateObject(HeapObject::cast(target), object, size, space);
- semispace_copied_size_ += size;
+ if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
+ heap_->FatalProcessOutOfMemory(
+ "MarkCompactCollector: young object promotion failed");
+ }
+
+ promoted_size_ += size;
return true;
}
@@ -1829,14 +1814,15 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline bool TryEvacuateWithoutCopy(HeapObject object) {
DCHECK(!is_incremental_marking_);
+ if (!shortcut_strings_) return false;
+
Map map = object.map();
// Some objects can be evacuated without creating a copy.
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object.set_map_word(MapWord::FromForwardingAddress(actual),
- kRelaxedStore);
+ object.set_map_word_forwarded(actual, kRelaxedStore);
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1875,10 +1861,10 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
LocalAllocationBuffer buffer_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
- PretenturingHandler* const pretenuring_handler_;
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
+ PretenuringHandler* const pretenuring_handler_;
+ PretenuringHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
- AlwaysPromoteYoung always_promote_young_;
+ const bool shortcut_strings_;
};
template <PageEvacuationMode mode>
@@ -1886,7 +1872,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateNewSpacePageVisitor(
Heap* heap, RecordMigratedSlotVisitor* record_visitor,
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
+ PretenuringHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
: heap_(heap),
record_visitor_(record_visitor),
moved_bytes_(0),
@@ -1916,12 +1902,9 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
pretenuring_handler_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
}
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
+ DCHECK(!IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
object.IterateFast(cage_base, record_visitor_);
- if (V8_UNLIKELY(v8_flags.minor_mc)) {
- record_visitor_->MarkArrayBufferExtensionPromoted(object);
- }
}
return true;
}
@@ -1933,8 +1916,8 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
Heap* heap_;
RecordMigratedSlotVisitor* record_visitor_;
intptr_t moved_bytes_;
- PretenturingHandler* const pretenuring_handler_;
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
+ PretenuringHandler* const pretenuring_handler_;
+ PretenuringHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
};
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
@@ -1969,7 +1952,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
}
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
V8_INLINE PtrComprCageBase cage_base() const {
#ifdef V8_COMPRESS_POINTERS
return cage_base_;
@@ -1995,180 +1978,66 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
#endif // V8_COMPRESS_POINTERS
};
+// static
bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
Object o = *p;
if (!o.IsHeapObject()) return false;
HeapObject heap_object = HeapObject::cast(o);
- return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
- heap_object);
+ if (heap_object.InReadOnlySpace()) return false;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (V8_UNLIKELY(collector->uses_shared_heap_) &&
+ !collector->is_shared_space_isolate_) {
+ if (heap_object.InWritableSharedSpace()) return false;
+ }
+ return collector->non_atomic_marking_state()->IsUnmarked(heap_object);
+}
+
+// static
+bool MarkCompactCollector::IsUnmarkedSharedHeapObject(Heap* heap,
+ FullObjectSlot p) {
+ Object o = *p;
+ if (!o.IsHeapObject()) return false;
+ HeapObject heap_object = HeapObject::cast(o);
+ Isolate* shared_space_isolate = heap->isolate()->shared_space_isolate();
+ MarkCompactCollector* collector =
+ shared_space_isolate->heap()->mark_compact_collector();
+ if (!heap_object.InWritableSharedSpace()) return false;
+ return collector->non_atomic_marking_state()->IsUnmarked(heap_object);
}
-void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
- ObjectVisitor* custom_root_body_visitor) {
+void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
heap()->IterateRootsIncludingClients(
- root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak, SkipRoot::kStack});
+ root_visitor,
+ base::EnumSet<SkipRoot>{SkipRoot::kWeak, SkipRoot::kConservativeStack});
+
+ MarkWaiterQueueNode(isolate());
// Custom marking for top optimized frame.
- ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
+ CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
+ ProcessTopOptimizedFrame(&custom_root_body_visitor, isolate());
- if (isolate()->is_shared_heap_isolate()) {
+ if (isolate()->is_shared_space_isolate()) {
+ ClientCustomRootBodyMarkingVisitor client_custom_root_body_visitor(this);
isolate()->global_safepoint()->IterateClientIsolates(
- [this, custom_root_body_visitor](Isolate* client) {
- ProcessTopOptimizedFrame(custom_root_body_visitor, client);
+ [this, &client_custom_root_body_visitor](Isolate* client) {
+ ProcessTopOptimizedFrame(&client_custom_root_body_visitor, client);
});
}
-
- if (!heap_->cpp_heap() && heap_->local_embedder_heap_tracer()->InUse()) {
- // Conservative global handle scanning is necessary for keeping
- // v8::TracedReference alive from the stack. This is only needed when using
- // `EmbedderHeapTracer` and not using `CppHeap`.
- auto& stack = heap()->stack();
- if (stack.stack_start() &&
- heap_->local_embedder_heap_tracer()->embedder_stack_state() ==
- cppgc::EmbedderStackState::kMayContainHeapPointers) {
- GlobalHandleMarkingVisitor global_handles_marker(
- *heap_, *local_marking_worklists_);
- stack.IteratePointers(&global_handles_marker);
- }
- }
}
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_MB
-namespace {
-
-// This utility function returns the highest address in the page that is lower
-// than maybe_inner_ptr, has its markbit set, and whose previous address (if it
-// exists) does not have its markbit set. This address is guaranteed to be the
-// start of a valid object in the page. In case the markbit corresponding to
-// maybe_inner_ptr is set, the function bails out and returns kNullAddress.
-Address FindPreviousObjectForConservativeMarking(const Page* page,
- Address maybe_inner_ptr) {
- auto* bitmap = page->marking_bitmap<AccessMode::NON_ATOMIC>();
- const MarkBit::CellType* cells = bitmap->cells();
-
- // The first actual bit of the bitmap, corresponding to page->area_start(),
- // is at start_index which is somewhere in (not necessarily at the start of)
- // start_cell_index.
- const uint32_t start_index = page->AddressToMarkbitIndex(page->area_start());
- const uint32_t start_cell_index = Bitmap::IndexToCell(start_index);
- // We assume that all markbits before start_index are clear:
- // SLOW_DCHECK(bitmap->AllBitsClearInRange(0, start_index));
- // This has already been checked for the entire bitmap before starting marking
- // by MarkCompactCollector::VerifyMarkbitsAreClean.
-
- const uint32_t index = page->AddressToMarkbitIndex(maybe_inner_ptr);
- uint32_t cell_index = Bitmap::IndexToCell(index);
- const MarkBit::CellType mask = 1u << Bitmap::IndexInCell(index);
- MarkBit::CellType cell = cells[cell_index];
-
- // If the markbit is already set, bail out.
- if ((cell & mask) != 0) return kNullAddress;
-
- // Clear the bits corresponding to higher addresses in the cell.
- cell &= ((~static_cast<MarkBit::CellType>(0)) >>
- (Bitmap::kBitsPerCell - Bitmap::IndexInCell(index) - 1));
-
- // Traverse the bitmap backwards, until we find a markbit that is set and
- // whose previous markbit (if it exists) is unset.
- // First, iterate backwards to find a cell with any set markbit.
- while (cell == 0 && cell_index > start_cell_index) cell = cells[--cell_index];
- if (cell == 0) {
- DCHECK_EQ(start_cell_index, cell_index);
- // We have reached the start of the page.
- return page->area_start();
- }
-
- // We have found such a cell.
- const uint32_t leading_zeros = base::bits::CountLeadingZeros(cell);
- const uint32_t leftmost_ones =
- base::bits::CountLeadingZeros(~(cell << leading_zeros));
- const uint32_t index_of_last_leftmost_one =
- Bitmap::kBitsPerCell - leading_zeros - leftmost_ones;
-
- // If the leftmost sequence of set bits does not reach the start of the cell,
- // we found it.
- if (index_of_last_leftmost_one > 0) {
- return page->MarkbitIndexToAddress(cell_index * Bitmap::kBitsPerCell +
- index_of_last_leftmost_one);
- }
-
- // The leftmost sequence of set bits reaches the start of the cell. We must
- // keep traversing backwards until we find the first unset markbit.
- if (cell_index == start_cell_index) {
- // We have reached the start of the page.
- return page->area_start();
- }
-
- // Iterate backwards to find a cell with any unset markbit.
- do {
- cell = cells[--cell_index];
- } while (~cell == 0 && cell_index > start_cell_index);
- if (~cell == 0) {
- DCHECK_EQ(start_cell_index, cell_index);
- // We have reached the start of the page.
- return page->area_start();
- }
-
- // We have found such a cell.
- const uint32_t leading_ones = base::bits::CountLeadingZeros(~cell);
- const uint32_t index_of_last_leading_one =
- Bitmap::kBitsPerCell - leading_ones;
- DCHECK_LT(0, index_of_last_leading_one);
- return page->MarkbitIndexToAddress(cell_index * Bitmap::kBitsPerCell +
- index_of_last_leading_one);
-}
-
-} // namespace
-
-Address MarkCompactCollector::FindBasePtrForMarking(Address maybe_inner_ptr) {
- // Check if the pointer is contained by a normal or large page owned by this
- // heap. Bail out if it is not.
- const MemoryChunk* chunk =
- heap()->memory_allocator()->LookupChunkContainingAddress(maybe_inner_ptr);
- if (chunk == nullptr) return kNullAddress;
- DCHECK(chunk->Contains(maybe_inner_ptr));
- // If it is contained in a large page, we want to mark the only object on it.
- if (chunk->IsLargePage()) return chunk->area_start();
- // Otherwise, we have a pointer inside a normal page.
- const Page* page = static_cast<const Page*>(chunk);
- // If it is in the young generation "from" semispace, it is not used and we
- // must ignore it, as its markbits may not be clean.
- if (page->IsFromPage()) return kNullAddress;
- // Try to find the address of a previous valid object on this page.
- Address base_ptr =
- FindPreviousObjectForConservativeMarking(page, maybe_inner_ptr);
- // If the markbit is set, then we have an object that does not need to be
- // marked.
- if (base_ptr == kNullAddress) return kNullAddress;
- // Iterate through the objects in the page forwards, until we find the object
- // containing maybe_inner_ptr.
- DCHECK_LE(base_ptr, maybe_inner_ptr);
- PtrComprCageBase cage_base{page->heap()->isolate()};
- while (true) {
- HeapObject obj(HeapObject::FromAddress(base_ptr));
- const int size = obj.Size(cage_base);
- DCHECK_LT(0, size);
- if (maybe_inner_ptr < base_ptr + size)
- return obj.IsFreeSpaceOrFiller(cage_base) ? kNullAddress : base_ptr;
- base_ptr += size;
- DCHECK_LT(base_ptr, page->area_end());
- }
-}
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_MB
-
-void MarkCompactCollector::MarkRootsFromStack(RootVisitor* root_visitor) {
- heap()->IterateRootsFromStackIncludingClient(root_visitor,
- Heap::ScanStackMode::kComplete);
+void MarkCompactCollector::MarkRootsFromConservativeStack(
+ RootVisitor* root_visitor) {
+ heap()->IterateConservativeStackRootsIncludingClients(
+ root_visitor, Heap::ScanStackMode::kComplete);
}
void MarkCompactCollector::MarkObjectsFromClientHeaps() {
- if (!isolate()->is_shared_heap_isolate()) return;
+ if (!isolate()->is_shared_space_isolate()) return;
isolate()->global_safepoint()->IterateClientIsolates(
[collector = this](Isolate* client) {
- if (client->is_shared_heap_isolate()) return;
collector->MarkObjectsFromClientHeap(client);
});
}
@@ -2224,7 +2093,7 @@ void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
HeapObject heap_object;
if (obj.GetHeapObject(&heap_object) &&
- heap_object.InSharedWritableHeap()) {
+ heap_object.InWritableSharedSpace()) {
collector->MarkRootObject(Root::kClientHeap, heap_object);
return KEEP_SLOT;
} else {
@@ -2238,7 +2107,7 @@ void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
chunk, [collector = this, heap](SlotType slot_type, Address slot) {
HeapObject heap_object =
UpdateTypedSlotHelper::GetTargetObject(heap, slot_type, slot);
- if (heap_object.InSharedWritableHeap()) {
+ if (heap_object.InWritableSharedSpace()) {
collector->MarkRootObject(Root::kClientHeap, heap_object);
return KEEP_SLOT;
} else {
@@ -2247,36 +2116,35 @@ void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
});
}
+ MarkWaiterQueueNode(client);
+
+#ifdef V8_ENABLE_SANDBOX
+ DCHECK(IsSharedExternalPointerType(kExternalStringResourceTag));
+ DCHECK(IsSharedExternalPointerType(kExternalStringResourceDataTag));
+ // All ExternalString resources are stored in the shared external pointer
+ // table. Mark entries from client heaps.
+ ExternalPointerTable& shared_table = client->shared_external_pointer_table();
+ MarkExternalPointerFromExternalStringTable external_string_visitor(
+ &shared_table);
+ heap->external_string_table_.IterateAll(&external_string_visitor);
+#endif // V8_ENABLE_SANDBOX
+}
+
+void MarkCompactCollector::MarkWaiterQueueNode(Isolate* isolate) {
#ifdef V8_COMPRESS_POINTERS
- DCHECK(IsSandboxedExternalPointerType(kWaiterQueueNodeTag));
DCHECK(IsSharedExternalPointerType(kWaiterQueueNodeTag));
- // Custom marking for the external pointer table entry used to hold
- // client Isolates' WaiterQueueNode, which is used by JS mutexes and
- // condition variables.
+ // Custom marking for the external pointer table entry used to hold the
+ // isolates' WaiterQueueNode, which is used by JS mutexes and condition
+ // variables.
ExternalPointerHandle* handle_location =
- client->GetWaiterQueueNodeExternalPointerHandleLocation();
- ExternalPointerTable& table = client->shared_external_pointer_table();
+ isolate->GetWaiterQueueNodeExternalPointerHandleLocation();
+ ExternalPointerTable& shared_table = isolate->shared_external_pointer_table();
ExternalPointerHandle handle =
base::AsAtomic32::Relaxed_Load(handle_location);
if (handle) {
- table.Mark(handle, reinterpret_cast<Address>(handle_location));
+ shared_table.Mark(handle, reinterpret_cast<Address>(handle_location));
}
#endif // V8_COMPRESS_POINTERS
-
-#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(kExternalStringResourceTag) ||
- IsSandboxedExternalPointerType(kExternalStringResourceDataTag)) {
- // All ExternalString resources are stored in the shared external pointer
- // table. Mark entries from client heaps.
- ExternalPointerTable& table = client->shared_external_pointer_table();
- MarkExternalPointerFromExternalStringTable external_string_visitor(&table);
- heap->external_string_table_.IterateAll(&external_string_visitor);
- }
-#endif // V8_ENABLE_SANDBOX
-}
-
-void MarkCompactCollector::VisitObject(HeapObject obj) {
- marking_visitor_->Visit(obj.map(), obj);
}
bool MarkCompactCollector::MarkTransitiveClosureUntilFixpoint() {
@@ -2315,8 +2183,7 @@ bool MarkCompactCollector::MarkTransitiveClosureUntilFixpoint() {
} while (another_ephemeron_iteration_main_thread ||
heap()->concurrent_marking()->another_ephemeron_iteration() ||
!local_marking_worklists()->IsEmpty() ||
- !local_marking_worklists()->IsWrapperEmpty() ||
- !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
+ !IsCppHeapMarkingFinished());
return true;
}
@@ -2373,7 +2240,7 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() {
while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
- if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
+ if (non_atomic_marking_state()->IsUnmarked(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
}
}
@@ -2399,7 +2266,7 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() {
while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
- if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
+ if (non_atomic_marking_state()->IsUnmarked(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
}
}
@@ -2410,7 +2277,7 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() {
local_weak_objects()->next_ephemerons_local.Publish();
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
- non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
+ non_atomic_marking_state()->TryMark(ephemeron.value)) {
local_marking_worklists()->Push(ephemeron.value);
}
});
@@ -2432,9 +2299,8 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() {
// for work_to_do are not sufficient for determining if another iteration
// is necessary.
- work_to_do = !local_marking_worklists()->IsEmpty() ||
- !local_marking_worklists()->IsWrapperEmpty() ||
- !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
+ work_to_do =
+ !local_marking_worklists()->IsEmpty() || !IsCppHeapMarkingFinished();
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
}
@@ -2453,22 +2319,11 @@ void MarkCompactCollector::MarkTransitiveClosureLinear() {
}
void MarkCompactCollector::PerformWrapperTracing() {
- if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
- if (local_marking_worklists()->PublishWrapper()) {
- DCHECK(local_marking_worklists()->IsWrapperEmpty());
- } else {
- // Cannot directly publish wrapper objects.
- LocalEmbedderHeapTracer::ProcessingScope scope(
- heap_->local_embedder_heap_tracer());
- HeapObject object;
- while (local_marking_worklists()->PopWrapper(&object)) {
- scope.TracePossibleWrapper(JSObject::cast(object));
- }
- }
- heap_->local_embedder_heap_tracer()->Trace(
- std::numeric_limits<double>::infinity());
- }
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ if (!cpp_heap) return;
+
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
+ cpp_heap->AdvanceTracing(std::numeric_limits<double>::infinity());
}
std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
@@ -2486,7 +2341,8 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
Isolate* isolate = heap()->isolate();
PtrComprCageBase cage_base(isolate);
CodePageHeaderModificationScope rwx_write_scope(
- "Marking of Code objects require write access to Code page headers");
+ "Marking of InstructionStream objects require write access to "
+ "Code page headers");
if (parallel_marking_)
heap_->concurrent_marking()->RescheduleJobIfNeeded(
GarbageCollector::MARK_COMPACTOR, TaskPriority::kUserBlocking);
@@ -2500,7 +2356,7 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
// first bit set, one word fillers are always black.
DCHECK_IMPLIES(object.map(cage_base) ==
ReadOnlyRoots(isolate).one_pointer_filler_map(),
- marking_state()->IsBlack(object));
+ marking_state()->IsMarked(object));
// Other fillers may be black or grey depending on the color of the object
// that was trimmed.
DCHECK_IMPLIES(object.map(cage_base) !=
@@ -2510,7 +2366,7 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
}
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(marking_state()->IsWhite(object)));
+ DCHECK(!(marking_state()->IsUnmarked(object)));
if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
@@ -2522,7 +2378,12 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
local_marking_worklists()->SwitchToContext(context);
}
}
- size_t visited_size = marking_visitor_->Visit(map, object);
+ const auto visited_size = marking_visitor_->Visit(map, object);
+ if (visited_size) {
+ marking_state_->IncrementLiveBytes(
+ MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(object)),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size));
+ }
if (is_per_context_mode) {
native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
map, object, visited_size);
@@ -2537,13 +2398,22 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
}
bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
+ // Objects in the shared heap are prohibited from being used as keys in
+ // WeakMaps and WeakSets and therefore cannot be ephemeron keys, because that
+ // would enable thread local -> shared heap edges.
+ DCHECK(!key.InWritableSharedSpace());
+ // Usually values that should not be marked are not added to the ephemeron
+ // worklist. However, minor collection during incremental marking may promote
+ // strings from the younger generation into the shared heap. This
+ // ShouldMarkObject call catches those cases.
+ if (!ShouldMarkObject(value)) return false;
if (marking_state()->IsBlackOrGrey(key)) {
- if (marking_state()->WhiteToGrey(value)) {
+ if (marking_state()->TryMark(value)) {
local_marking_worklists()->Push(value);
return true;
}
- } else if (marking_state()->IsWhite(value)) {
+ } else if (marking_state()->IsUnmarked(value)) {
local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
}
return false;
@@ -2583,13 +2453,14 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
it.Advance()) {
if (it.frame()->is_unoptimized()) return;
if (it.frame()->is_optimized()) {
- CodeLookupResult lookup_result = it.frame()->LookupCodeT();
- // Embedded builtins can't deoptimize.
- if (lookup_result.IsCodeDataContainer()) return;
- Code code = lookup_result.code();
- if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
+ GcSafeCode lookup_result = it.frame()->GcSafeLookupCode();
+ if (!lookup_result.has_instruction_stream()) return;
+ if (!lookup_result.CanDeoptAt(isolate, it.frame()->pc())) {
+ InstructionStream istream = InstructionStream::unchecked_cast(
+ lookup_result.raw_instruction_stream());
PtrComprCageBase cage_base(isolate);
- Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
+ InstructionStream::BodyDescriptor::IterateBody(istream.map(cage_base),
+ istream, visitor);
}
return;
}
@@ -2600,6 +2471,7 @@ void MarkCompactCollector::RecordObjectStats() {
if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
// Cannot run during bootstrapping due to incomplete objects.
if (isolate()->bootstrapper()->IsActive()) return;
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, "V8.GC_OBJECT_DUMP_STATISTICS");
heap()->CreateObjectStats();
ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
heap()->dead_object_stats_.get());
@@ -2631,7 +2503,8 @@ bool ShouldRetainMap(MarkingState* marking_state, Map map, int age) {
}
Object constructor = map.GetConstructor();
if (!constructor.IsHeapObject() ||
- marking_state->IsWhite(HeapObject::cast(constructor))) {
+ (!HeapObject::cast(constructor).InReadOnlySpace() &&
+ marking_state->IsUnmarked(HeapObject::cast(constructor)))) {
// The constructor is dead, no new objects with this map can
// be created. Do not retain this map.
return false;
@@ -2659,9 +2532,9 @@ void MarkCompactCollector::RetainMaps() {
int age = retained_maps.Get(i + 1).ToSmi().value();
int new_age;
Map map = Map::cast(map_heap_object);
- if (should_retain_maps && marking_state()->IsWhite(map)) {
+ if (should_retain_maps && marking_state()->IsUnmarked(map)) {
if (ShouldRetainMap(marking_state(), map, age)) {
- if (marking_state()->WhiteToGrey(map)) {
+ if (marking_state()->TryMark(map)) {
local_marking_worklists()->Push(map);
}
if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
@@ -2670,7 +2543,8 @@ void MarkCompactCollector::RetainMaps() {
}
Object prototype = map.prototype();
if (age > 0 && prototype.IsHeapObject() &&
- marking_state()->IsWhite(HeapObject::cast(prototype))) {
+ (!HeapObject::cast(prototype).InReadOnlySpace() &&
+ marking_state()->IsUnmarked(HeapObject::cast(prototype)))) {
// The prototype is not marked, age the map.
new_age = age - 1;
} else {
@@ -2691,18 +2565,15 @@ void MarkCompactCollector::RetainMaps() {
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
- // The recursive GC marker detects when it is nearing stack overflow,
- // and switches to a different marking system. JS interrupts interfere
- // with the C stack limit check.
- PostponeInterruptsScope postpone(isolate());
- bool was_marked_incrementally = false;
- {
+ const bool was_marked_incrementally =
+ !heap_->incremental_marking()->IsStopped();
+ if (was_marked_incrementally) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
- if (heap_->incremental_marking()->Stop()) {
- MarkingBarrier::PublishAll(heap());
- was_marked_incrementally = true;
- }
+ auto* incremental_marking = heap_->incremental_marking();
+ DCHECK(incremental_marking->IsMajorMarking());
+ incremental_marking->Stop();
+ MarkingBarrier::PublishAll(heap());
}
#ifdef DEBUG
@@ -2710,14 +2581,16 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- heap_->local_embedder_heap_tracer()->EnterFinalPause();
+ if (heap_->cpp_heap()) {
+ CppHeap::From(heap_->cpp_heap())
+ ->EnterFinalPause(heap_->embedder_stack_state_);
+ }
RootMarkingVisitor root_visitor(this);
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
- CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
- MarkRoots(&root_visitor, &custom_root_body_visitor);
+ MarkRoots(&root_visitor);
}
{
@@ -2746,7 +2619,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
- MarkRootsFromStack(&root_visitor);
+ MarkRootsFromConservativeStack(&root_visitor);
}
{
@@ -2760,7 +2633,7 @@ void MarkCompactCollector::MarkLiveObjects() {
local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
- CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
+ CHECK(IsCppHeapMarkingFinished());
VerifyEphemeronMarking();
}
@@ -2847,7 +2720,12 @@ class StringForwardingTableCleaner final {
: heap_(heap),
isolate_(heap_->isolate()),
marking_state_(heap_->non_atomic_marking_state()) {}
- void Run() {
+
+ // Transition all strings in the forwarding table to
+ // ThinStrings/ExternalStrings and clear the table afterwards.
+ void TransitionStrings() {
+ DCHECK(!heap_->IsGCWithStack() ||
+ v8_flags.transition_strings_during_gc_with_stack);
StringForwardingTable* forwarding_table =
isolate_->string_forwarding_table();
forwarding_table->IterateElements(
@@ -2857,17 +2735,79 @@ class StringForwardingTableCleaner final {
forwarding_table->Reset();
}
+ // When performing GC with a stack, we conservatively assume that
+ // the GC could have been triggered by optimized code. Optimized code
+ // assumes that flat strings don't transition during GCs, so we are not
+ // allowed to transition strings to ThinString/ExternalString in that
+ // case.
+ // Instead we mark forward objects to keep them alive and update entries
+ // of evacuated objects later.
+ void ProcessFullWithStack() {
+ DCHECK(heap_->IsGCWithStack() &&
+ !v8_flags.transition_strings_during_gc_with_stack);
+ StringForwardingTable* forwarding_table =
+ isolate_->string_forwarding_table();
+ forwarding_table->IterateElements(
+ [&](StringForwardingTable::Record* record) {
+ MarkForwardObject(record);
+ });
+ }
+
+ // For Minor MC we don't mark forward objects, because they are always
+ // in old generation (and thus considered live).
+ // We only need to delete non-live young objects.
+ void ProcessYoungObjects() {
+ DCHECK(v8_flags.always_use_string_forwarding_table);
+ StringForwardingTable* forwarding_table =
+ isolate_->string_forwarding_table();
+ forwarding_table->IterateElements(
+ [&](StringForwardingTable::Record* record) {
+ ClearNonLiveYoungObjects(record);
+ });
+ }
+
private:
+ void MarkForwardObject(StringForwardingTable::Record* record) {
+ Object original = record->OriginalStringObject(isolate_);
+ if (!original.IsHeapObject()) {
+ DCHECK_EQ(original, StringForwardingTable::deleted_element());
+ return;
+ }
+ String original_string = String::cast(original);
+ if (marking_state_->IsMarked(original_string)) {
+ Object forward = record->ForwardStringObjectOrHash(isolate_);
+ if (!forward.IsHeapObject() ||
+ HeapObject::cast(forward).InReadOnlySpace()) {
+ return;
+ }
+ marking_state_->TryMarkAndAccountLiveBytes(HeapObject::cast(forward));
+ } else {
+ DisposeExternalResource(record);
+ record->set_original_string(StringForwardingTable::deleted_element());
+ }
+ }
+
+ void ClearNonLiveYoungObjects(StringForwardingTable::Record* record) {
+ Object original = record->OriginalStringObject(isolate_);
+ if (!original.IsHeapObject()) {
+ DCHECK_EQ(original, StringForwardingTable::deleted_element());
+ return;
+ }
+ String original_string = String::cast(original);
+ if (!Heap::InYoungGeneration(original_string)) return;
+ if (!marking_state_->IsMarked(original_string)) {
+ DisposeExternalResource(record);
+ record->set_original_string(StringForwardingTable::deleted_element());
+ }
+ }
+
void TransitionStrings(StringForwardingTable::Record* record) {
Object original = record->OriginalStringObject(isolate_);
if (!original.IsHeapObject()) {
- // Only if we always use the forwarding table, the string could be a
- // smi, indicating that the entry died during scavenge.
- DCHECK(v8_flags.always_use_string_forwarding_table);
DCHECK_EQ(original, StringForwardingTable::deleted_element());
return;
}
- if (marking_state_->IsBlack(HeapObject::cast(original))) {
+ if (marking_state_->IsMarked(HeapObject::cast(original))) {
String original_string = String::cast(original);
if (original_string.IsThinString()) {
original_string = ThinString::cast(original_string).actual();
@@ -2876,7 +2816,7 @@ class StringForwardingTableCleaner final {
TryInternalize(original_string, record);
original_string.set_raw_hash_field(record->raw_hash(isolate_));
} else {
- record->DisposeExternalResource();
+ DisposeExternalResource(record);
}
}
@@ -2909,11 +2849,15 @@ class StringForwardingTableCleaner final {
StringForwardingTable::Record* record) {
if (original_string.IsInternalizedString()) return;
Object forward = record->ForwardStringObjectOrHash(isolate_);
- if (!forward.IsHeapObject()) return;
+ if (!forward.IsHeapObject()) {
+ return;
+ }
String forward_string = String::cast(forward);
// Mark the forwarded string to keep it alive.
- marking_state_->WhiteToBlack(forward_string);
+ if (!forward_string.InReadOnlySpace()) {
+ marking_state_->TryMarkAndAccountLiveBytes(forward_string);
+ }
// Transition the original string to a ThinString and override the
// forwarding index with the correct hash.
original_string.MakeThin(isolate_, forward_string);
@@ -2924,9 +2868,24 @@ class StringForwardingTableCleaner final {
ThinString::cast(original_string).RawField(ThinString::kActualOffset);
MarkCompactCollector::RecordSlot(original_string, slot, forward_string);
}
+
+ // Dispose external resource, if it wasn't disposed already.
+ // We can have multiple entries of the same external resource in the string
+ // forwarding table (i.e. concurrent externalization of a string with the same
+ // resource), therefore we keep track of already disposed resources to not
+ // dispose a resource more than once.
+ void DisposeExternalResource(StringForwardingTable::Record* record) {
+ Address resource = record->ExternalResourceAddress();
+ if (resource != kNullAddress && disposed_resources_.count(resource) == 0) {
+ record->DisposeExternalResource();
+ disposed_resources_.insert(resource);
+ }
+ }
+
Heap* const heap_;
Isolate* const isolate_;
NonAtomicMarkingState* const marking_state_;
+ std::unordered_set<Address> disposed_resources_;
};
} // namespace
@@ -2938,12 +2897,18 @@ void MarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_STRING_FORWARDING_TABLE);
// Clear string forwarding table. Live strings are transitioned to
- // ThinStrings/ExternalStrings in the cleanup process.
+ // ThinStrings/ExternalStrings in the cleanup process, if this is a GC
+ // without stack.
// Clearing the string forwarding table must happen before clearing the
// string table, as entries in the forwarding table can keep internalized
// strings alive.
StringForwardingTableCleaner forwarding_table_cleaner(heap());
- forwarding_table_cleaner.Run();
+ if (!heap_->IsGCWithStack() ||
+ v8_flags.transition_strings_during_gc_with_stack) {
+ forwarding_table_cleaner.TransitionStrings();
+ } else {
+ forwarding_table_cleaner.ProcessFullWithStack();
+ }
}
auto clearing_job = std::make_unique<ParallelClearingJob>();
@@ -2953,7 +2918,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_EXTERNAL_STRING_TABLE);
- ExternalStringTableCleaner external_visitor(heap());
+ ExternalStringTableCleaner<ExternalStringTableCleaningMode::kAll>
+ external_visitor(heap());
heap()->external_string_table_.IterateAll(&external_visitor);
heap()->external_string_table_.CleanUpAll();
}
@@ -2963,9 +2929,17 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// We depend on `IterateWeakRootsForPhantomHandles()` being called before
// `ProcessOldCodeCandidates()` in order to identify flushed bytecode in the
// CPU profiler.
- heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
+ isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
&IsUnmarkedHeapObject);
- heap()->isolate()->traced_handles()->ResetDeadNodes(&IsUnmarkedHeapObject);
+ isolate()->traced_handles()->ResetDeadNodes(&IsUnmarkedHeapObject);
+
+ if (isolate()->is_shared_space_isolate()) {
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ client->global_handles()->IterateWeakRootsForPhantomHandles(
+ &IsUnmarkedSharedHeapObject);
+ // No need to reset traced handles since they are always strong.
+ });
+ }
}
{
@@ -2994,6 +2968,9 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// ClearFullMapTransitions must be called before weak references are
// cleared.
ClearFullMapTransitions();
+ // Weaken recorded strong DescriptorArray objects. This phase can
+ // potentially move everywhere after `ClearFullMapTransitions()`.
+ WeakenStrongDescriptorArrays();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
@@ -3044,7 +3021,7 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
!code.embedded_objects_cleared()) {
if (!code.marked_for_deoptimization()) {
- code.SetMarkedForDeoptimization("weak objects");
+ code.SetMarkedForDeoptimization(isolate(), "weak objects");
have_code_to_deoptimize_ = true;
}
code.ClearEmbeddedObjects(heap_);
@@ -3054,7 +3031,7 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
}
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
- DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
+ DCHECK(non_atomic_marking_state()->IsUnmarked(dead_target));
Object potential_parent = dead_target.constructor_or_back_pointer();
if (potential_parent.IsMap()) {
Map parent = Map::cast(potential_parent);
@@ -3145,7 +3122,7 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// marked.
DCHECK(!ShouldMarkObject(inferred_name) ||
marking_state()->IsBlackOrGrey(inferred_name));
- marking_state()->WhiteToBlack(uncompiled_data);
+ marking_state()->TryMarkAndAccountLiveBytes(uncompiled_data);
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
@@ -3159,23 +3136,33 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
SharedFunctionInfo flushing_candidate;
while (local_weak_objects()->code_flushing_candidates_local.Pop(
&flushing_candidate)) {
+ Code baseline_code;
+ InstructionStream baseline_istream;
+ HeapObject baseline_bytecode_or_interpreter_data;
+ if (v8_flags.flush_baseline_code && flushing_candidate.HasBaselineCode()) {
+ baseline_code =
+ Code::cast(flushing_candidate.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the Code was
+ // acquire-loaded.
+ baseline_istream = FromCode(baseline_code, isolate(), kRelaxedLoad);
+ baseline_bytecode_or_interpreter_data =
+ baseline_code.bytecode_or_interpreter_data();
+ }
// During flushing a BytecodeArray is transformed into an UncompiledData in
// place. Seeing an UncompiledData here implies that another
- // SharedFunctionInfo had a reference to the same ByteCodeArray and flushed
+ // SharedFunctionInfo had a reference to the same BytecodeArray and flushed
// it before processing this candidate. This can happen when using
// CloneSharedFunctionInfo().
bool bytecode_already_decompiled =
flushing_candidate.function_data(isolate(), kAcquireLoad)
- .IsUncompiledData(isolate());
+ .IsUncompiledData(isolate()) ||
+ (!baseline_istream.is_null() &&
+ baseline_bytecode_or_interpreter_data.IsUncompiledData(isolate()));
bool is_bytecode_live = !bytecode_already_decompiled &&
non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
- if (v8_flags.flush_baseline_code && flushing_candidate.HasBaselineCode()) {
- CodeT baseline_codet =
- CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
- // Safe to do a relaxed load here since the CodeT was acquire-loaded.
- Code baseline_code = FromCodeT(baseline_codet, isolate(), kRelaxedLoad);
- if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
+ if (!baseline_istream.is_null()) {
+ if (non_atomic_marking_state()->IsBlackOrGrey(baseline_istream)) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
@@ -3184,15 +3171,18 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
- // Regardless of whether the CodeT is a CodeDataContainer or the Code
- // itself, if the Code is live then the CodeT has to be live and will
- // have been marked via the owning JSFunction.
- DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
- } else if (is_bytecode_live) {
- // If baseline code is flushed but we have a valid bytecode array reset
- // the function_data field to the BytecodeArray/InterpreterData.
+ // Regardless of whether the Code is a Code or
+ // the InstructionStream itself, if the InstructionStream is live then
+ // the Code has to be live and will have been marked via
+ // the owning JSFunction.
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_code));
+ } else if (is_bytecode_live || bytecode_already_decompiled) {
+ // Reset the function_data field to the BytecodeArray, InterpreterData,
+ // or UncompiledData found on the baseline code. We can skip this step
+ // if the BytecodeArray is not live and not already decompiled, because
+ // FlushBytecodeFromSFI below will set the function_data field.
flushing_candidate.set_function_data(
- baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
+ baseline_bytecode_or_interpreter_data, kReleaseStore);
}
}
@@ -3304,11 +3294,11 @@ bool MarkCompactCollector::TransitionArrayNeedsCompaction(
for (int j = 0; j < num_transitions; ++j) {
DCHECK_IMPLIES(
!transitions.GetRawTarget(j).IsSmi(),
- !non_atomic_marking_state()->IsWhite(transitions.GetTarget(j)));
+ !non_atomic_marking_state()->IsUnmarked(transitions.GetTarget(j)));
}
#endif
return false;
- } else if (non_atomic_marking_state()->IsWhite(
+ } else if (non_atomic_marking_state()->IsUnmarked(
TransitionsAccessor::GetTargetFromRaw(raw_target))) {
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
@@ -3336,7 +3326,7 @@ bool MarkCompactCollector::CompactTransitionArray(Map map,
for (int i = 0; i < num_transitions; ++i) {
Map target = transitions.GetTarget(i);
DCHECK_EQ(target.constructor_or_back_pointer(), map);
- if (non_atomic_marking_state()->IsWhite(target)) {
+ if (non_atomic_marking_state()->IsUnmarked(target)) {
if (!descriptors.is_null() &&
target.instance_descriptors(isolate()) == descriptors) {
DCHECK(!target.is_prototype_map());
@@ -3408,6 +3398,26 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
array.set_number_of_all_descriptors(new_nof_all_descriptors);
}
+void MarkCompactCollector::RecordStrongDescriptorArraysForWeakening(
+ GlobalHandleVector<DescriptorArray> strong_descriptor_arrays) {
+ DCHECK(heap()->incremental_marking()->IsMajorMarking());
+ base::MutexGuard guard(&strong_descriptor_arrays_mutex_);
+ strong_descriptor_arrays_.push_back(std::move(strong_descriptor_arrays));
+}
+
+void MarkCompactCollector::WeakenStrongDescriptorArrays() {
+ Map descriptor_array_map = ReadOnlyRoots(isolate()).descriptor_array_map();
+ for (auto vec : strong_descriptor_arrays_) {
+ for (auto it = vec.begin(); it != vec.end(); ++it) {
+ DescriptorArray raw = it.raw();
+ DCHECK(raw.IsStrongDescriptorArray());
+ raw.set_map_safe_transition_no_write_barrier(descriptor_array_map);
+ DCHECK_EQ(raw.raw_gc_state(kRelaxedLoad), 0);
+ }
+ }
+ strong_descriptor_arrays_.clear();
+}
+
void MarkCompactCollector::TrimDescriptorArray(Map map,
DescriptorArray descriptors) {
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
@@ -3465,7 +3475,7 @@ void MarkCompactCollector::ClearWeakCollections() {
non_atomic_marking_state()->IsBlackOrGrey(heap_object));
}
}
-#endif
+#endif // VERIFY_HEAP
if (!ShouldMarkObject(key)) continue;
if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
table.RemoveEntry(i);
@@ -3494,7 +3504,8 @@ void MarkCompactCollector::ClearWeakReferences() {
MaybeObjectSlot location(slot.second);
if ((*location)->GetHeapObjectIfWeak(&value)) {
DCHECK(!value.IsCell());
- if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
+ if (value.InReadOnlySpace() ||
+ non_atomic_marking_state()->IsBlackOrGrey(value)) {
// The value of the weak reference is alive.
RecordSlot(slot.first, HeapObjectSlot(location), value);
} else {
@@ -3512,7 +3523,8 @@ void MarkCompactCollector::ClearJSWeakRefs() {
JSWeakRef weak_ref;
while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
- if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ if (!target.InReadOnlySpace() &&
+ !non_atomic_marking_state()->IsBlackOrGrey(target)) {
weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
} else {
// The value of the JSWeakRef is alive.
@@ -3529,7 +3541,8 @@ void MarkCompactCollector::ClearJSWeakRefs() {
}
};
HeapObject target = HeapObject::cast(weak_cell.target());
- if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ if (!target.InReadOnlySpace() &&
+ !non_atomic_marking_state()->IsBlackOrGrey(target)) {
DCHECK(target.CanBeHeldWeakly());
// The value of the WeakCell is dead.
JSFinalizationRegistry finalization_registry =
@@ -3551,7 +3564,8 @@ void MarkCompactCollector::ClearJSWeakRefs() {
}
HeapObject unregister_token = weak_cell.unregister_token();
- if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
+ if (!unregister_token.InReadOnlySpace() &&
+ !non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
DCHECK(unregister_token.CanBeHeldWeakly());
// The unregister token is dead. Remove any corresponding entries in the
// key map. Multiple WeakCell with the same token will have all their
@@ -3578,9 +3592,10 @@ bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
}
// static
-bool MarkCompactCollector::ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+bool MarkCompactCollector::ShouldRecordRelocSlot(RelocInfo* rinfo,
HeapObject target) {
- MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
+ MemoryChunk* source_chunk =
+ MemoryChunk::FromHeapObject(rinfo->instruction_stream());
BasicMemoryChunk* target_chunk = BasicMemoryChunk::FromHeapObject(target);
return target_chunk->IsEvacuationCandidate() &&
!source_chunk->ShouldSkipEvacuationSlotRecording();
@@ -3588,10 +3603,7 @@ bool MarkCompactCollector::ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
// static
MarkCompactCollector::RecordRelocSlotInfo
-MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
- HeapObject target) {
- DCHECK_EQ(host, rinfo->host());
-
+MarkCompactCollector::ProcessRelocInfo(RelocInfo* rinfo, HeapObject target) {
RecordRelocSlotInfo result;
const RelocInfo::Mode rmode = rinfo->rmode();
Address addr;
@@ -3621,7 +3633,8 @@ MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
}
}
- MemoryChunk* const source_chunk = MemoryChunk::FromHeapObject(host);
+ MemoryChunk* const source_chunk =
+ MemoryChunk::FromHeapObject(rinfo->instruction_stream());
const uintptr_t offset = addr - source_chunk->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
result.memory_chunk = source_chunk;
@@ -3632,10 +3645,10 @@ MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
}
// static
-void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo,
HeapObject target) {
- if (!ShouldRecordRelocSlot(host, rinfo, target)) return;
- RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
+ if (!ShouldRecordRelocSlot(rinfo, target)) return;
+ RecordRelocSlotInfo info = ProcessRelocInfo(rinfo, target);
// Access to TypeSlots need to be protected, since LocalHeaps might
// publish code in the background thread.
@@ -3691,10 +3704,18 @@ MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
return HeapObjectReference::Strong(heap_object);
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+template <>
+Object MakeSlotValue<CodeObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return heap_object;
+}
+#endif // V8_EXTERNAL_CODE_SPACE
+
// The following specialization
// MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
// is not used.
-#endif
+#endif // V8_COMPRESS_POINTERS
template <AccessMode access_mode, HeapObjectReferenceType reference_type,
typename TSlot>
@@ -3705,19 +3726,18 @@ static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot,
std::is_same<TSlot, ObjectSlot>::value ||
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value ||
- std::is_same<TSlot, OffHeapObjectSlot>::value,
- "Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
- "expected here");
+ std::is_same<TSlot, OffHeapObjectSlot>::value ||
+ std::is_same<TSlot, CodeObjectSlot>::value,
+ "Only [Full|OffHeap]ObjectSlot, [Full]MaybeObjectSlot "
+ "or CodeObjectSlot are expected here");
MapWord map_word = heap_obj.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES((!v8_flags.minor_mc && !Heap::InFromPage(heap_obj)),
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromHeapObject(heap_obj)->IsFlagSet(
Page::COMPACTION_WAS_ABORTED));
- PtrComprCageBase host_cage_base =
- V8_EXTERNAL_CODE_SPACE_BOOL ? GetPtrComprCageBase(heap_obj) : cage_base;
typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
- map_word.ToForwardingAddress(host_cage_base));
+ map_word.ToForwardingAddress(heap_obj));
if (access_mode == AccessMode::NON_ATOMIC) {
// Needs to be atomic for map space compaction: This slot could be a map
// word which we update while loading the map word for updating the slot
@@ -3760,7 +3780,7 @@ static inline SlotCallbackResult UpdateOldToSharedSlot(
cage_base, slot, obj, heap_obj);
}
- return heap_obj.InSharedWritableHeap() ? KEEP_SLOT : REMOVE_SLOT;
+ return heap_obj.InWritableSharedSpace() ? KEEP_SLOT : REMOVE_SLOT;
} else {
return REMOVE_SLOT;
}
@@ -3785,7 +3805,7 @@ static inline SlotCallbackResult UpdateStrongOldToSharedSlot(
if (obj.GetHeapObject(&heap_obj)) {
UpdateSlot<AccessMode::NON_ATOMIC, HeapObjectReferenceType::STRONG>(
cage_base, slot, obj, heap_obj);
- return heap_obj.InSharedWritableHeap() ? KEEP_SLOT : REMOVE_SLOT;
+ return heap_obj.InWritableSharedSpace() ? KEEP_SLOT : REMOVE_SLOT;
}
return REMOVE_SLOT;
@@ -3803,12 +3823,12 @@ static inline void UpdateStrongCodeSlot(HeapObject host,
UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(cage_base, slot,
obj, heap_obj);
- CodeDataContainer code_data_container =
- CodeDataContainer::cast(HeapObject::FromAddress(
- slot.address() - CodeDataContainer::kCodeOffset));
- Code code = code_data_container.code(code_cage_base);
+ Code code = Code::cast(HeapObject::FromAddress(
+ slot.address() - Code::kInstructionStreamOffset));
+ InstructionStream instruction_stream =
+ code.instruction_stream(code_cage_base);
Isolate* isolate_for_sandbox = GetIsolateForSandbox(host);
- code_data_container.UpdateCodeEntryPoint(isolate_for_sandbox, code);
+ code.UpdateCodeEntryPoint(isolate_for_sandbox, instruction_stream);
}
}
@@ -3844,8 +3864,7 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base(),
code_cage_base(), slot);
}
@@ -3871,12 +3890,12 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
// This visitor nevers visits code objects.
UNREACHABLE();
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
// This visitor nevers visits code objects.
UNREACHABLE();
}
@@ -3914,7 +3933,7 @@ static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
MapWord map_word = old_string.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
- String new_string = String::cast(map_word.ToForwardingAddress());
+ String new_string = String::cast(map_word.ToForwardingAddress(old_string));
if (new_string.IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
@@ -3939,7 +3958,8 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
}
- new_space->EvacuatePrologue();
+ if (!v8_flags.minor_mc)
+ SemiSpaceNewSpace::From(new_space)->EvacuatePrologue();
}
if (heap()->new_lo_space()) {
@@ -3974,7 +3994,7 @@ void VerifyRememberedSetsAfterEvacuation(Heap* heap,
DCHECK_NULL((chunk->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((chunk->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
- if (new_space_is_empty) {
+ if (new_space_is_empty && (collector == GarbageCollector::MARK_COMPACTOR)) {
// Old-to-new slot sets must be empty after evacuation.
DCHECK_NULL((chunk->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>()));
DCHECK_NULL((chunk->typed_slot_set<OLD_TO_NEW, AccessMode::ATOMIC>()));
@@ -3984,7 +4004,7 @@ void VerifyRememberedSetsAfterEvacuation(Heap* heap,
// new or shared spaces.
AllocationSpace id = chunk->owner_identity();
if (id == SHARED_SPACE || id == SHARED_LO_SPACE || id == NEW_SPACE ||
- id == NEW_LO_SPACE || heap->isolate()->is_shared()) {
+ id == NEW_LO_SPACE) {
DCHECK_NULL((chunk->slot_set<OLD_TO_SHARED, AccessMode::ATOMIC>()));
DCHECK_NULL((chunk->typed_slot_set<OLD_TO_SHARED, AccessMode::ATOMIC>()));
}
@@ -4007,7 +4027,6 @@ void MarkCompactCollector::EvacuateEpilogue() {
// New space.
if (heap()->new_space()) {
- heap()->new_space()->EvacuateEpilogue();
DCHECK_EQ(0, heap()->new_space()->Size());
}
@@ -4021,9 +4040,10 @@ void MarkCompactCollector::EvacuateEpilogue() {
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
- if (v8_flags.shared_string_table && heap->isolate()->has_shared_heap() &&
- !heap->isolate()->is_shared_heap_isolate()) {
- return new ConcurrentAllocator(nullptr, heap->shared_allocation_space());
+ if (v8_flags.shared_string_table && heap->isolate()->has_shared_space() &&
+ !heap->isolate()->is_shared_space_isolate()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_allocation_space(),
+ ConcurrentAllocator::Context::kGC);
}
return nullptr;
@@ -4062,31 +4082,24 @@ class Evacuator : public Malloced {
return kObjectsOldToOld;
}
- // NewSpacePages with more live bytes than this threshold qualify for fast
- // evacuation.
- static intptr_t NewSpacePageEvacuationThreshold() {
- return v8_flags.page_promotion_threshold *
- MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
- }
-
- Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
- EvacuationAllocator* local_allocator,
- AlwaysPromoteYoung always_promote_young)
+ explicit Evacuator(Heap* heap)
: heap_(heap),
local_pretenuring_feedback_(
- PretenturingHandler::kInitialFeedbackCapacity),
+ PretenuringHandler::kInitialFeedbackCapacity),
+ local_allocator_(heap_,
+ CompactionSpaceKind::kCompactionSpaceForMarkCompact),
shared_old_allocator_(CreateSharedOldAllocator(heap_)),
- new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
- record_visitor, &local_pretenuring_feedback_,
- always_promote_young),
- new_to_new_page_visitor_(heap_, record_visitor,
+ record_visitor_(heap_, &ephemeron_remembered_set_),
+ new_space_visitor_(heap_, &local_allocator_,
+ shared_old_allocator_.get(), &record_visitor_,
+ &local_pretenuring_feedback_),
+ new_to_new_page_visitor_(heap_, &record_visitor_,
&local_pretenuring_feedback_),
- new_to_old_page_visitor_(heap_, record_visitor,
+ new_to_old_page_visitor_(heap_, &record_visitor_,
&local_pretenuring_feedback_),
- old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
- record_visitor),
- local_allocator_(local_allocator),
+ old_space_visitor_(heap_, &local_allocator_,
+ shared_old_allocator_.get(), &record_visitor_),
duration_(0.0),
bytes_compacted_(0) {}
@@ -4101,15 +4114,11 @@ class Evacuator : public Malloced {
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
- virtual void Finalize();
-
- virtual GCTracer::Scope::ScopeId GetBackgroundTracingScope() = 0;
- virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
+ void Finalize();
protected:
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual bool RawEvacuatePage(MemoryChunk* chunk,
- intptr_t* saved_live_bytes) = 0;
+ bool RawEvacuatePage(MemoryChunk* chunk, intptr_t* saved_live_bytes);
inline Heap* heap() { return heap_; }
@@ -4120,11 +4129,17 @@ class Evacuator : public Malloced {
Heap* heap_;
- PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
+ PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
+
+ // Locally cached collector data.
+ EvacuationAllocator local_allocator_;
// Allocator for the shared heap.
std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
+ EphemeronRememberedSet ephemeron_remembered_set_;
+ RecordMigratedSlotVisitor record_visitor_;
+
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
@@ -4133,9 +4148,6 @@ class Evacuator : public Malloced {
new_to_old_page_visitor_;
EvacuateOldSpaceVisitor old_space_visitor_;
- // Locally cached collector data.
- EvacuationAllocator* local_allocator_;
-
// Book keeping info.
double duration_;
intptr_t bytes_compacted_;
@@ -4169,7 +4181,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
}
void Evacuator::Finalize() {
- local_allocator_->Finalize();
+ local_allocator_.Finalize();
if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
@@ -4184,51 +4196,23 @@ void Evacuator::Finalize() {
new_to_new_page_visitor_.moved_bytes());
heap()->pretenuring_handler()->MergeAllocationSitePretenuringFeedback(
local_pretenuring_feedback_);
-}
-
-class FullEvacuator : public Evacuator {
- public:
- explicit FullEvacuator(Heap* heap)
- : Evacuator(heap, &record_visitor_, &local_allocator_,
- AlwaysPromoteYoung::kYes),
- record_visitor_(heap_, &ephemeron_remembered_set_),
- local_allocator_(heap_,
- CompactionSpaceKind::kCompactionSpaceForMarkCompact) {}
-
- GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
- return GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY;
- }
-
- GCTracer::Scope::ScopeId GetTracingScope() override {
- return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
- }
-
- void Finalize() override {
- Evacuator::Finalize();
- for (auto it = ephemeron_remembered_set_.begin();
- it != ephemeron_remembered_set_.end(); ++it) {
- auto insert_result =
- heap()->ephemeron_remembered_set_.insert({it->first, it->second});
- if (!insert_result.second) {
- // Insertion didn't happen, there was already an item.
- auto set = insert_result.first->second;
- for (int entry : it->second) {
- set.insert(entry);
- }
+ DCHECK_IMPLIES(v8_flags.minor_mc, ephemeron_remembered_set_.empty());
+ for (auto it = ephemeron_remembered_set_.begin();
+ it != ephemeron_remembered_set_.end(); ++it) {
+ auto insert_result =
+ heap()->ephemeron_remembered_set_.insert({it->first, it->second});
+ if (!insert_result.second) {
+ // Insertion didn't happen, there was already an item.
+ auto set = insert_result.first->second;
+ for (int entry : it->second) {
+ set.insert(entry);
}
}
}
+}
- protected:
- bool RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
-
- EphemeronRememberedSet ephemeron_remembered_set_;
- RecordMigratedSlotVisitor record_visitor_;
- EvacuationAllocator local_allocator_;
-};
-
-bool FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
+bool Evacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(chunk);
@@ -4260,7 +4244,8 @@ bool FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
break;
case kObjectsOldToOld: {
RwxMemoryWriteScope rwx_write_scope(
- "Evacuation of objects in Code space requires write access for the "
+ "Evacuation of objects in Code space requires write "
+ "access for the "
"current worker thread.");
#if DEBUG
old_space_visitor_.SetUpAbortEvacuationAtAddress(chunk);
@@ -4303,10 +4288,10 @@ class PageEvacuationJob : public v8::JobTask {
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
if (delegate->IsJoiningThread()) {
- TRACE_GC(tracer_, evacuator->GetTracingScope());
+ TRACE_GC(tracer_, GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL);
ProcessItems(delegate, evacuator);
} else {
- TRACE_GC_EPOCH(tracer_, evacuator->GetBackgroundTracingScope(),
+ TRACE_GC_EPOCH(tracer_, GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
ThreadKind::kBackground);
ProcessItems(delegate, evacuator);
}
@@ -4352,8 +4337,7 @@ namespace {
template <class Evacuator>
size_t CreateAndExecuteEvacuationTasks(
Heap* heap,
- std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
- MigrationObserver* migration_observer) {
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items) {
base::Optional<ProfilingMigrationObserver> profiling_observer;
if (heap->isolate()->log_object_relocation()) {
profiling_observer.emplace(heap);
@@ -4365,9 +4349,6 @@ size_t CreateAndExecuteEvacuationTasks(
if (profiling_observer) {
evacuator->AddObserver(&profiling_observer.value());
}
- if (migration_observer) {
- evacuator->AddObserver(migration_observer);
- }
evacuators.push_back(std::move(evacuator));
}
V8::GetCurrentPlatform()
@@ -4382,6 +4363,13 @@ size_t CreateAndExecuteEvacuationTasks(
return wanted_num_tasks;
}
+// NewSpacePages with more live bytes than this threshold qualify for fast
+// evacuation.
+intptr_t NewSpacePageEvacuationThreshold() {
+ return v8_flags.page_promotion_threshold *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
+}
+
bool ShouldMovePage(Page* p, intptr_t live_bytes, intptr_t wasted_bytes,
MemoryReductionMode memory_reduction_mode,
AlwaysPromoteYoung always_promote_young,
@@ -4390,8 +4378,7 @@ bool ShouldMovePage(Page* p, intptr_t live_bytes, intptr_t wasted_bytes,
return v8_flags.page_promotion &&
(memory_reduction_mode == MemoryReductionMode::kNone) &&
!p->NeverEvacuate() &&
- ((live_bytes + wasted_bytes >
- Evacuator::NewSpacePageEvacuationThreshold()) ||
+ ((live_bytes + wasted_bytes > NewSpacePageEvacuationThreshold()) ||
(promote_unusable_pages == PromoteUnusablePages::kYes &&
!p->WasUsedForAllocation())) &&
(always_promote_young == AlwaysPromoteYoung::kYes ||
@@ -4437,6 +4424,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
AlwaysPromoteYoung::kYes, PromoteUnusablePages::kNo) ||
force_page_promotion) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(heap()->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are
// going to sweep the page and add page->live_byte_count.
@@ -4486,7 +4474,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
- if (marking_state->IsBlack(object)) {
+ if (marking_state->IsMarked(object)) {
heap()->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);
@@ -4503,8 +4491,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
"MarkCompactCollector::EvacuatePagesInParallel", "pages",
evacuation_items.size());
- wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
- heap(), std::move(evacuation_items), nullptr);
+ wanted_num_tasks = CreateAndExecuteEvacuationTasks<Evacuator>(
+ heap(), std::move(evacuation_items));
}
const size_t aborted_pages = PostProcessAbortedEvacuationCandidates();
@@ -4522,19 +4510,13 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
HeapObject heap_object = HeapObject::cast(object);
MapWord map_word = heap_object.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
- return map_word.ToForwardingAddress();
+ return map_word.ToForwardingAddress(heap_object);
}
}
return object;
}
};
-void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
- EvacuateRecordOnlyVisitor visitor(heap());
- LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
- &visitor);
-}
-
template <class Visitor, typename MarkingState>
bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
MarkingState* marking_state,
@@ -4561,7 +4543,7 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
"LiveObjectVisitor::VisitBlackObjectsNoFail");
if (chunk->IsLargePage()) {
HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
- if (marking_state->IsBlack(object)) {
+ if (marking_state->IsMarked(object)) {
const bool success = visitor->Visit(object, object.Size());
USE(success);
DCHECK(success);
@@ -4570,7 +4552,7 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject const object = object_and_size.first;
- DCHECK(marking_state->IsBlack(object));
+ DCHECK(marking_state->IsMarked(object));
const bool success = visitor->Visit(object, object_and_size.second);
USE(success);
DCHECK(success);
@@ -4600,7 +4582,6 @@ void MarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
- EvacuationScope evacuation_scope(heap());
EvacuatePagesInParallel();
}
@@ -4622,11 +4603,10 @@ void MarkCompactCollector::Evacuate() {
DCHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
DCHECK(p->SweepingDone());
PagedNewSpace* space = heap()->paged_new_space();
- if ((resize_new_space_ == ResizeNewSpaceMode::kShrink) &&
- space->ShouldReleasePage()) {
+ if (space->ShouldReleaseEmptyPage()) {
space->ReleasePage(p);
} else {
- sweeper()->AddNewSpacePage(p);
+ sweeper()->SweepEmptyNewSpacePage(p);
}
}
}
@@ -4660,7 +4640,7 @@ void MarkCompactCollector::Evacuate() {
FullEvacuationVerifier verifier(heap());
verifier.Run();
}
-#endif
+#endif // VERIFY_HEAP
}
class UpdatingItem : public ParallelWorkItem {
@@ -4673,22 +4653,21 @@ class PointersUpdatingJob : public v8::JobTask {
public:
explicit PointersUpdatingJob(
Isolate* isolate,
- std::vector<std::unique_ptr<UpdatingItem>> updating_items,
- GCTracer::Scope::ScopeId scope, GCTracer::Scope::ScopeId background_scope)
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items)
: updating_items_(std::move(updating_items)),
remaining_updating_items_(updating_items_.size()),
generator_(updating_items_.size()),
- tracer_(isolate->heap()->tracer()),
- scope_(scope),
- background_scope_(background_scope) {}
+ tracer_(isolate->heap()->tracer()) {}
void Run(JobDelegate* delegate) override {
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
if (delegate->IsJoiningThread()) {
- TRACE_GC(tracer_, scope_);
+ TRACE_GC(tracer_, GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL);
UpdatePointers(delegate);
} else {
- TRACE_GC_EPOCH(tracer_, background_scope_, ThreadKind::kBackground);
+ TRACE_GC_EPOCH(tracer_,
+ GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
+ ThreadKind::kBackground);
UpdatePointers(delegate);
}
}
@@ -4724,8 +4703,6 @@ class PointersUpdatingJob : public v8::JobTask {
IndexGenerator generator_;
GCTracer* tracer_;
- GCTracer::Scope::ScopeId scope_;
- GCTracer::Scope::ScopeId background_scope_;
};
template <typename MarkingState>
@@ -4785,15 +4762,14 @@ class ToSpaceUpdatingItem : public UpdatingItem {
namespace {
-template <GarbageCollector collector>
class RememberedSetUpdatingItem : public UpdatingItem {
public:
explicit RememberedSetUpdatingItem(Heap* heap, MemoryChunk* chunk)
: heap_(heap),
marking_state_(heap_->non_atomic_marking_state()),
chunk_(chunk),
- record_old_to_shared_slots_(heap->isolate()->has_shared_heap() &&
- !chunk->InSharedHeap()) {}
+ record_old_to_shared_slots_(heap->isolate()->has_shared_space() &&
+ !chunk->InWritableSharedSpace()) {}
~RememberedSetUpdatingItem() override = default;
void Process() override {
@@ -4814,7 +4790,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return;
}
- if (heap_object.InSharedWritableHeap()) {
+ if (heap_object.InWritableSharedSpace()) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
chunk, slot.address());
}
@@ -4834,7 +4810,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
});
#endif // DEBUG
- if (heap_object.InSharedWritableHeap()) {
+ if (heap_object.InWritableSharedSpace()) {
const uintptr_t offset = addr - chunk->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
RememberedSet<OLD_TO_SHARED>::InsertTyped(chunk, slot_type,
@@ -4843,95 +4819,35 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
template <typename TSlot>
- inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
+ inline void CheckAndUpdateOldToNewSlot(TSlot slot) {
static_assert(
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
HeapObject heap_object;
- if (!(*slot).GetHeapObject(&heap_object)) {
- return REMOVE_SLOT;
- }
- if (!Heap::InYoungGeneration(heap_object)) return REMOVE_SLOT;
- if (collector == GarbageCollector::MINOR_MARK_COMPACTOR) {
- return CheckAndUpdateOldToNewSlotMinor(heap_object);
+ if (!(*slot).GetHeapObject(&heap_object)) return;
+ if (!Heap::InYoungGeneration(heap_object)) return;
+
+ if (v8_flags.minor_mc && !Heap::IsLargeObject(heap_object)) {
+ DCHECK(Heap::InToPage(heap_object));
} else {
- return CheckAndUpdateOldToNewSlotMajor(slot, heap_object);
+ DCHECK(Heap::InFromPage(heap_object));
}
- }
-
- inline SlotCallbackResult CheckAndUpdateOldToNewSlotMinor(
- HeapObject heap_object) {
- DCHECK_EQ(GarbageCollector::MINOR_MARK_COMPACTOR, collector);
- DCHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
- DCHECK(!Heap::InFromPage(heap_object));
- if (marking_state_->IsBlack(heap_object)) return KEEP_SLOT;
- return REMOVE_SLOT;
- }
- template <typename TSlot>
- inline SlotCallbackResult CheckAndUpdateOldToNewSlotMajor(
- TSlot slot, HeapObject heap_object) {
- DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
- using THeapObjectSlot = typename TSlot::THeapObjectSlot;
- if (Heap::InFromPage(heap_object)) {
- if (v8_flags.minor_mc) {
- DCHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
- DCHECK(Page::FromHeapObject(heap_object)->IsLargePage());
- DCHECK(!marking_state_->IsBlack(heap_object));
- return REMOVE_SLOT;
- }
- MapWord map_word = heap_object.map_word(kRelaxedLoad);
- if (map_word.IsForwardingAddress()) {
- HeapObjectReference::Update(THeapObjectSlot(slot),
- map_word.ToForwardingAddress());
- }
- bool success = (*slot).GetHeapObject(&heap_object);
- USE(success);
- DCHECK(success);
- // If the object was in from space before and is after executing the
- // callback in to space, the object is still live.
- // Unfortunately, we do not know about the slot. It could be in a
- // just freed free space object.
- if (Heap::InToPage(heap_object)) {
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
+ MapWord map_word = heap_object.map_word(kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ HeapObjectReference::Update(THeapObjectSlot(slot),
+ map_word.ToForwardingAddress(heap_object));
} else {
- DCHECK(Heap::InToPage(heap_object));
- // Slots can point to "to" space if the page has been moved, or if the
- // slot has been recorded multiple times in the remembered set, or
- // if the slot was already updated during old->old updating.
- // In case the page has been moved, check markbits to determine liveness
- // of the slot. In the other case, the slot can just be kept.
- if (v8_flags.minor_mc) {
- MapWord map_word = heap_object.map_word(kRelaxedLoad);
- if (map_word.IsForwardingAddress()) {
- HeapObjectReference::Update(THeapObjectSlot(slot),
- map_word.ToForwardingAddress());
- bool success = (*slot).GetHeapObject(&heap_object);
- USE(success);
- DCHECK(success);
- } else if (marking_state_->IsBlack(heap_object)) {
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
- }
- if (Page::FromHeapObject(heap_object)
- ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
- if (marking_state_->IsBlack(heap_object)) {
- return KEEP_SLOT;
- } else {
- return REMOVE_SLOT;
- }
- }
- return KEEP_SLOT;
+ // OLD_TO_NEW slots are recorded in dead memory, so they might point to
+ // dead objects.
+ DCHECK(!marking_state_->IsMarked(heap_object));
}
}
void UpdateUntypedPointers() {
UpdateUntypedOldToNewPointers();
- if (collector == GarbageCollector::MINOR_MARK_COMPACTOR) return;
UpdateUntypedOldToOldPointers();
UpdateUntypedOldToCodePointers();
UpdateUntypedOldToSharedPointers();
@@ -4948,30 +4864,28 @@ class RememberedSetUpdatingItem : public UpdatingItem {
: InvalidatedSlotsFilter::LivenessCheck::kNo;
InvalidatedSlotsFilter filter =
InvalidatedSlotsFilter::OldToNew(chunk_, liveness_check);
- int slots = RememberedSet<OLD_TO_NEW>::Iterate(
+ RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, &filter, cage_base](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- SlotCallbackResult result = CheckAndUpdateOldToNewSlot(slot);
+ CheckAndUpdateOldToNewSlot(slot);
// A new space string might have been promoted into the shared heap
// during GC.
if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
}
- return result;
+ // Always keep slot since all slots are dropped at once after
+ // iteration.
+ return KEEP_SLOT;
},
- SlotSet::FREE_EMPTY_BUCKETS);
-
- DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
-
- if (slots == 0) {
- chunk_->ReleaseSlotSet<OLD_TO_NEW>();
- }
+ SlotSet::KEEP_EMPTY_BUCKETS);
}
// The invalidated slots are not needed after old-to-new slots were
// processed.
chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ // Full GCs will empty new space, so OLD_TO_NEW is empty.
+ chunk_->ReleaseSlotSet<OLD_TO_NEW>();
}
void UpdateUntypedOldToOldPointers() {
@@ -5004,8 +4918,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
void UpdateUntypedOldToCodePointers() {
- if (!V8_EXTERNAL_CODE_SPACE_BOOL) return;
-
if (chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>()) {
const PtrComprCageBase cage_base = heap_->isolate();
#ifdef V8_EXTERNAL_CODE_SPACE
@@ -5017,8 +4929,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_,
[=](MaybeObjectSlot slot) {
HeapObject host = HeapObject::FromAddress(
- slot.address() - CodeDataContainer::kCodeOffset);
- DCHECK(host.IsCodeDataContainer(cage_base));
+ slot.address() - Code::kInstructionStreamOffset);
+ DCHECK(host.IsCode(cage_base));
UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(
host, cage_base, code_cage_base,
CodeObjectSlot(slot.address()));
@@ -5039,7 +4951,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedOldToSharedPointers() {
if (chunk_->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()) {
// Client GCs need to remove invalidated OLD_TO_SHARED slots.
- DCHECK(!heap_->IsShared());
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_SHARED>::Iterate(
@@ -5057,7 +4968,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateTypedPointers() {
UpdateTypedOldToNewPointers();
- if (collector == GarbageCollector::MINOR_MARK_COMPACTOR) return;
UpdateTypedOldToOldPointers();
}
@@ -5066,20 +4976,25 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return;
const auto check_and_update_old_to_new_slot_fn =
[this](FullMaybeObjectSlot slot) {
- return CheckAndUpdateOldToNewSlot(slot);
+ CheckAndUpdateOldToNewSlot(slot);
+ return KEEP_SLOT;
};
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [this, &check_and_update_old_to_new_slot_fn](SlotType slot_type,
Address slot) {
- SlotCallbackResult result = UpdateTypedSlotHelper::UpdateTypedSlot(
+ UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
// A new space string might have been promoted into the shared heap
// during GC.
if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedTyped(chunk_, slot_type, slot);
}
- return result;
+ // Always keep slot since all slots are dropped at once after
+ // iteration.
+ return KEEP_SLOT;
});
+ // Full GCs will empty new space, so OLD_TO_NEW is empty.
+ chunk_->ReleaseTypedSlotSet<OLD_TO_NEW>();
}
void UpdateTypedOldToOldPointers() {
@@ -5114,26 +5029,18 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} // namespace
-std::unique_ptr<UpdatingItem>
-MarkCompactCollector::CreateRememberedSetUpdatingItem(MemoryChunk* chunk) {
- return std::make_unique<
- RememberedSetUpdatingItem<GarbageCollector::MARK_COMPACTOR>>(heap(),
- chunk);
-}
-
namespace {
-template <typename IterateableSpace, typename Collector>
+template <typename IterateableSpace>
void CollectRememberedSetUpdatingItems(
- Collector* collector, std::vector<std::unique_ptr<UpdatingItem>>* items,
- IterateableSpace* space, RememberedSetUpdatingMode mode) {
+ std::vector<std::unique_ptr<UpdatingItem>>* items,
+ IterateableSpace* space) {
for (MemoryChunk* chunk : *space) {
// No need to update pointers on evacuation candidates. Evacuated pages will
// be released after this phase.
if (chunk->IsEvacuationCandidate()) continue;
- if (mode == RememberedSetUpdatingMode::ALL
- ? chunk->HasRecordedSlots()
- : chunk->HasRecordedOldToNewSlots()) {
- items->emplace_back(collector->CreateRememberedSetUpdatingItem(chunk));
+ if (chunk->HasRecordedSlots()) {
+ items->emplace_back(
+ std::make_unique<RememberedSetUpdatingItem>(space->heap(), chunk));
}
}
}
@@ -5170,7 +5077,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
HeapObject key = key_slot.ToHeapObject();
MapWord map_word = key.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
- key = map_word.ToForwardingAddress();
+ key = map_word.ToForwardingAddress(key);
key_slot.StoreHeapObject(key);
}
if (!heap_->InYoungGeneration(key)) {
@@ -5216,26 +5123,17 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
- CollectRememberedSetUpdatingItems(this, &updating_items,
- heap()->old_space(),
- RememberedSetUpdatingMode::ALL);
- CollectRememberedSetUpdatingItems(this, &updating_items,
- heap()->code_space(),
- RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space());
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space());
if (heap()->shared_space()) {
- CollectRememberedSetUpdatingItems(this, &updating_items,
- heap()->shared_space(),
- RememberedSetUpdatingMode::ALL);
- }
- CollectRememberedSetUpdatingItems(this, &updating_items, heap()->lo_space(),
- RememberedSetUpdatingMode::ALL);
- CollectRememberedSetUpdatingItems(this, &updating_items,
- heap()->code_lo_space(),
- RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items,
+ heap()->shared_space());
+ }
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space());
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space());
if (heap()->shared_lo_space()) {
- CollectRememberedSetUpdatingItems(this, &updating_items,
- heap()->shared_lo_space(),
- RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items,
+ heap()->shared_lo_space());
}
// Iterating to space may require a valid body descriptor for e.g.
@@ -5247,12 +5145,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
std::make_unique<EphemeronTableUpdatingItem>(heap()));
V8::GetCurrentPlatform()
- ->CreateJob(
- v8::TaskPriority::kUserBlocking,
- std::make_unique<PointersUpdatingJob>(
- isolate(), std::move(updating_items),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->CreateJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items)))
->Join();
}
@@ -5263,18 +5158,25 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
+ // Update pointers in string forwarding table.
+ // When GC was performed without a stack, the table was cleared and this
+ // does nothing. In the case this was a GC with stack, we need to update
+ // the entries for evacuated objects.
+ isolate()->string_forwarding_table()->UpdateAfterFullEvacuation();
+
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessWeakListRoots(&evacuation_object_retainer);
}
+
+ // Flush the inner_pointer_to_code_cache which may now have stale contents.
+ isolate()->inner_pointer_to_code_cache()->Flush();
}
void MarkCompactCollector::UpdatePointersInClientHeaps() {
- if (!isolate()->is_shared_heap_isolate()) return;
+ if (!isolate()->is_shared_space_isolate()) return;
- isolate()->global_safepoint()->IterateClientIsolates([this](Isolate* client) {
- if (client->is_shared_heap_isolate()) return;
- UpdatePointersInClientHeap(client);
- });
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [this](Isolate* client) { UpdatePointersInClientHeap(client); });
}
void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
@@ -5295,16 +5197,16 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
if (chunk->InYoungGeneration()) chunk->ReleaseSlotSet<OLD_TO_SHARED>();
- RememberedSet<OLD_TO_SHARED>::IterateTyped(chunk, [this](SlotType slot_type,
- Address slot) {
- // Using UpdateStrongSlot is OK here, because there are no weak
- // typed slots.
- PtrComprCageBase cage_base = heap_->isolate();
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
- return UpdateStrongOldToSharedSlot(cage_base, slot);
- });
- });
+ RememberedSet<OLD_TO_SHARED>::IterateTyped(
+ chunk, [this](SlotType slot_type, Address slot) {
+ // Using UpdateStrongSlot is OK here, because there are no weak
+ // typed slots.
+ PtrComprCageBase cage_base = heap_->isolate();
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
+ return UpdateStrongOldToSharedSlot(cage_base, slot);
+ });
+ });
if (chunk->InYoungGeneration()) chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
}
}
@@ -5420,6 +5322,66 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false;
}
+void MarkCompactCollector::StartSweepNewSpace() {
+ PagedSpaceForNewSpace* paged_space = heap()->paged_new_space()->paged_space();
+ paged_space->ClearAllocatorState();
+
+ int will_be_swept = 0;
+
+ DCHECK_EQ(Heap::ResizeNewSpaceMode::kNone, resize_new_space_);
+ resize_new_space_ = heap()->ShouldResizeNewSpace();
+ if (resize_new_space_ == Heap::ResizeNewSpaceMode::kShrink) {
+ paged_space->StartShrinking();
+ }
+
+ DCHECK(empty_new_space_pages_to_be_swept_.empty());
+ for (auto it = paged_space->begin(); it != paged_space->end();) {
+ Page* p = *(it++);
+ DCHECK(p->SweepingDone());
+
+ if (non_atomic_marking_state()->live_bytes(p) > 0) {
+ // Non-empty pages will be evacuated/promoted.
+ continue;
+ }
+
+ if (paged_space->ShouldReleaseEmptyPage()) {
+ paged_space->ReleasePage(p);
+ } else {
+ empty_new_space_pages_to_be_swept_.push_back(p);
+ }
+ will_be_swept++;
+ }
+
+ if (v8_flags.gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
+ paged_space->name(), will_be_swept);
+ }
+}
+
+void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
+ auto* marking_state = heap()->non_atomic_marking_state();
+ PtrComprCageBase cage_base(heap()->isolate());
+ size_t surviving_object_size = 0;
+ for (auto it = space->begin(); it != space->end();) {
+ LargePage* current = *(it++);
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (!marking_state->IsMarked(object)) {
+ // Object is dead and page can be released.
+ space->RemovePage(current);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
+ current);
+
+ continue;
+ }
+ Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
+ current->ProgressBar().ResetIfEnabled();
+ non_atomic_marking_state()->SetLiveBytes(current, 0);
+ surviving_object_size += static_cast<size_t>(object.Size(cage_base));
+ }
+ space->set_objects_size(surviving_object_size);
+}
+
void MarkCompactCollector::Sweep() {
DCHECK(!sweeper()->sweeping_in_progress());
TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_SWEEP,
@@ -5438,6 +5400,12 @@ void MarkCompactCollector::Sweep() {
heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE_LO, ThreadKind::kMain);
SweepLargeSpace(heap()->code_lo_space());
}
+ if (heap()->shared_space()) {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_SHARED_LO,
+ ThreadKind::kMain);
+ SweepLargeSpace(heap()->shared_lo_space());
+ }
{
GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD,
ThreadKind::kMain);
@@ -5477,8 +5445,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
return marking_state_->bitmap(chunk);
}
- bool IsBlack(HeapObject object) override {
- return marking_state_->IsBlack(object);
+ bool IsMarked(HeapObject object) override {
+ return marking_state_->IsMarked(object);
}
void Run() override {
@@ -5486,6 +5454,10 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_space());
}
+ GarbageCollector collector() const override {
+ return GarbageCollector::MINOR_MARK_COMPACTOR;
+ }
+
protected:
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
@@ -5497,18 +5469,18 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyPointersImpl(start, end);
}
void VerifyCodePointer(CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Code slots never appear in new space because CodeDataContainers, the
- // only object that can contain code pointers, are always allocated in
- // the old space.
+ // Code slots never appear in new space because
+ // Code objects, the only object that can contain code pointers, are
+ // always allocated in the old space.
UNREACHABLE();
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
@@ -5517,7 +5489,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsBlack(heap_object));
+ CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
}
template <typename TSlot>
@@ -5542,7 +5514,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
return Heap::InYoungGeneration(*p) &&
- !heap->non_atomic_marking_state()->IsBlack(HeapObject::cast(*p));
+ !heap->non_atomic_marking_state()->IsMarked(HeapObject::cast(*p));
}
} // namespace
@@ -5555,7 +5527,8 @@ YoungGenerationMainMarkingVisitor::YoungGenerationMainMarkingVisitor(
marking_state_(marking_state) {}
bool YoungGenerationMainMarkingVisitor::ShouldVisit(HeapObject object) {
- return marking_state_->GreyToBlack(object);
+ CHECK(marking_state_->GreyToBlack(object));
+ return true;
}
MinorMarkCompactCollector::~MinorMarkCompactCollector() = default;
@@ -5565,20 +5538,23 @@ void MinorMarkCompactCollector::SetUp() {}
void MinorMarkCompactCollector::TearDown() {
if (heap()->incremental_marking()->IsMinorMarking()) {
local_marking_worklists()->Publish();
- heap()->main_thread_local_heap()->marking_barrier()->Publish();
+ heap()->main_thread_local_heap()->marking_barrier()->PublishIfNeeded();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
}
}
void MinorMarkCompactCollector::FinishConcurrentMarking() {
- if (v8_flags.concurrent_marking) {
+ if (v8_flags.concurrent_minor_mc_marking) {
DCHECK_EQ(heap()->concurrent_marking()->garbage_collector(),
GarbageCollector::MINOR_MARK_COMPACTOR);
heap()->concurrent_marking()->Cancel();
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
}
+ if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
+ cpp_heap->FinishConcurrentMarkingIfNeeded();
+ }
}
// static
@@ -5586,7 +5562,6 @@ constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: CollectorBase(heap, GarbageCollector::MINOR_MARK_COMPACTOR),
- page_parallel_job_semaphore_(0),
sweeper_(heap_->sweeper()) {}
std::pair<size_t, size_t> MinorMarkCompactCollector::ProcessMarkingWorklist(
@@ -5596,178 +5571,13 @@ std::pair<size_t, size_t> MinorMarkCompactCollector::ProcessMarkingWorklist(
UNREACHABLE();
}
-void MinorMarkCompactCollector::CleanupPromotedPages() {
- for (Page* p : promoted_pages_) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- non_atomic_marking_state()->ClearLiveness(p);
- }
- promoted_pages_.clear();
-
- for (LargePage* p : promoted_large_pages_) {
- DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- HeapObject object = p->GetObject();
- Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
- p->ProgressBar().ResetIfEnabled();
- non_atomic_marking_state()->SetLiveBytes(p, 0);
- }
- promoted_large_pages_.clear();
-}
-
-void MinorMarkCompactCollector::VisitObject(HeapObject obj) {
- main_marking_visitor_->Visit(obj.map(), obj);
-}
-
-void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS);
- heap_->array_buffer_sweeper()->RequestSweep(
- ArrayBufferSweeper::SweepingType::kYoung);
-}
-
-class YoungGenerationMigrationObserver final : public MigrationObserver {
- public:
- YoungGenerationMigrationObserver(Heap* heap,
- MarkCompactCollector* mark_compact_collector)
- : MigrationObserver(heap),
- mark_compact_collector_(mark_compact_collector) {}
-
- inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
- int size) final {
- // Migrate color to old generation marking in case the object survived
- // young generation garbage collection.
- if (heap_->incremental_marking()->IsMarking()) {
- DCHECK(heap_->atomic_marking_state()->IsWhite(dst));
- heap_->incremental_marking()->TransferColor(src, dst);
- }
- }
-
- protected:
- base::Mutex mutex_;
- MarkCompactCollector* mark_compact_collector_;
-};
-
-class YoungGenerationRecordMigratedSlotVisitor final
- : public RecordMigratedSlotVisitor {
- public:
- explicit YoungGenerationRecordMigratedSlotVisitor(Heap* heap)
- : RecordMigratedSlotVisitor(heap, nullptr) {}
-
- void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
- UNREACHABLE();
- }
-
- void MarkArrayBufferExtensionPromoted(HeapObject object) final {
- if (!object.IsJSArrayBuffer()) return;
- JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
- }
-
- private:
- // Only record slots for host objects that are considered as live by the
- // full collector.
- inline bool IsLive(HeapObject object) {
- return heap_->non_atomic_marking_state()->IsBlack(object);
- }
-
- inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
- Address slot) final {
- if (value->IsStrongOrWeak()) {
- BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
- if (p->InYoungGeneration()) {
- DCHECK_IMPLIES(p->IsToPage(),
- v8_flags.minor_mc ||
- p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
- p->IsLargePage());
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
- DCHECK(chunk->SweepingDone());
- RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
- } else if (p->IsEvacuationCandidate() && IsLive(host)) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
- MemoryChunk::FromHeapObject(host), slot);
- } else {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- MemoryChunk::FromHeapObject(host), slot);
- }
- } else if (p->InSharedHeap()) {
- DCHECK(!host.InSharedWritableHeap());
- RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
- MemoryChunk::FromHeapObject(host), slot);
- }
- }
- }
-};
-
-namespace {
-
-template <typename IterateableSpace>
-void DropOldToNewRememberedSets(IterateableSpace* space) {
- for (MemoryChunk* chunk : *space) {
- DCHECK(!chunk->IsEvacuationCandidate());
- chunk->ReleaseSlotSet<OLD_TO_NEW>();
- chunk->ReleaseTypedSlotSet<OLD_TO_NEW>();
- chunk->ReleaseInvalidatedSlots<OLD_TO_NEW>();
- }
-}
-
-} // namespace
-
-void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
-
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- if (heap()->paged_new_space()->Size() == 0) {
- DropOldToNewRememberedSets(heap()->old_space());
- DropOldToNewRememberedSets(heap()->code_space());
- DropOldToNewRememberedSets(heap()->lo_space());
- DropOldToNewRememberedSets(heap()->code_lo_space());
- } else {
- std::vector<std::unique_ptr<UpdatingItem>> updating_items;
-
- // Create batches of global handles.
- CollectRememberedSetUpdatingItems(
- this, &updating_items, heap()->old_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(
- this, &updating_items, heap()->code_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(
- this, &updating_items, heap()->lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(
- this, &updating_items, heap()->code_lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
-
- V8::GetCurrentPlatform()
- ->CreateJob(
- v8::TaskPriority::kUserBlocking,
- std::make_unique<PointersUpdatingJob>(
- isolate(), std::move(updating_items),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::Scope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
- ->Join();
- }
- }
-
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
+void MinorMarkCompactCollector::PerformWrapperTracing() {
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ if (!cpp_heap) return;
- // Update pointers from external string table.
- heap()->UpdateYoungReferencesInExternalStringTable([](Heap* heap,
- FullObjectSlot p) {
- DCHECK(
- !HeapObject::cast(*p).map_word(kRelaxedLoad).IsForwardingAddress());
- return String::cast(*p);
- });
- }
+ DCHECK(CppHeap::From(heap_->cpp_heap())->generational_gc_supported());
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_EMBEDDER_TRACING);
+ cpp_heap->AdvanceTracing(std::numeric_limits<double>::infinity());
}
class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
@@ -5788,6 +5598,10 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
}
}
+ GarbageCollector collector() const override {
+ return GarbageCollector::MINOR_MARK_COMPACTOR;
+ }
+
private:
V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
@@ -5796,70 +5610,67 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
MinorMarkCompactCollector* const collector_;
};
-void MinorMarkCompactCollector::Prepare() {
- DCHECK(sweeper()->IsSweepingDoneForSpace(NEW_SPACE));
-
- // Probably requires more.
- if (!heap()->incremental_marking()->IsMarking()) {
- StartMarking();
+void MinorMarkCompactCollector::StartMarking() {
+#ifdef VERIFY_HEAP
+ if (v8_flags.verify_heap) {
+ for (Page* page : *heap()->new_space()) {
+ CHECK(page->marking_bitmap<AccessMode::NON_ATOMIC>()->IsClean());
+ }
}
+#endif // VERIFY_HEAP
- heap()->new_space()->FreeLinearAllocationArea();
-}
-
-void MinorMarkCompactCollector::StartMarking() {
- local_marking_worklists_ =
- std::make_unique<MarkingWorklists::Local>(&marking_worklists_);
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ if (cpp_heap && cpp_heap->generational_gc_supported()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_EMBEDDER_PROLOGUE);
+ // InitializeTracing should be called before visitor initialization in
+ // StartMarking.
+ cpp_heap->InitializeTracing(CppHeap::CollectionType::kMinor);
+ }
+ local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
+ marking_worklists(),
+ cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
+ : MarkingWorklists::Local::kNoCppMarkingState);
main_marking_visitor_ = std::make_unique<YoungGenerationMainMarkingVisitor>(
heap()->isolate(), marking_state(), local_marking_worklists());
-
-#ifdef VERIFY_HEAP
- for (Page* page : *heap()->new_space()) {
- CHECK(page->marking_bitmap<AccessMode::NON_ATOMIC>()->IsClean());
+ if (cpp_heap && cpp_heap->generational_gc_supported()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_EMBEDDER_PROLOGUE);
+ // StartTracing immediately starts marking which requires V8 worklists to
+ // be set up.
+ cpp_heap->StartTracing();
}
-#endif // VERIFY_HEAP
}
void MinorMarkCompactCollector::Finish() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_FINISH);
+
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP);
- {
- DCHECK_NOT_NULL(heap()->new_lo_space());
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP_NEW_LO);
- SweepLargeSpace(heap()->new_lo_space());
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_FINISH_ENSURE_CAPACITY);
+ switch (resize_new_space_) {
+ case ResizeNewSpaceMode::kShrink:
+ heap()->ReduceNewSpaceSize();
+ break;
+ case ResizeNewSpaceMode::kGrow:
+ heap()->ExpandNewSpaceSize();
+ break;
+ case ResizeNewSpaceMode::kNone:
+ break;
}
+ resize_new_space_ = ResizeNewSpaceMode::kNone;
-#ifdef DEBUG
- heap()->VerifyCountersBeforeConcurrentSweeping(garbage_collector_);
-#endif
- }
-
- switch (resize_new_space_) {
- case ResizeNewSpaceMode::kShrink:
- heap()->ReduceNewSpaceSize();
- break;
- case ResizeNewSpaceMode::kGrow:
- heap()->ExpandNewSpaceSize();
- break;
- case ResizeNewSpaceMode::kNone:
- break;
- }
- resize_new_space_ = ResizeNewSpaceMode::kNone;
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->EnsureCurrentCapacity()) {
heap()->FatalProcessOutOfMemory("NewSpace::EnsureCurrentCapacity");
}
}
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_FINISH);
+ heap()->new_space()->GarbageCollectionEpilogue();
+
+ main_marking_visitor_->Finalize();
local_marking_worklists_.reset();
main_marking_visitor_.reset();
-
- sweeper()->StartSweeperTasks();
}
void MinorMarkCompactCollector::CollectGarbage() {
@@ -5867,8 +5678,11 @@ void MinorMarkCompactCollector::CollectGarbage() {
DCHECK_NOT_NULL(heap()->new_space());
// Minor MC does not support processing the ephemeron remembered set.
DCHECK(heap()->ephemeron_remembered_set_.empty());
+ DCHECK(!heap()->array_buffer_sweeper()->sweeping_in_progress());
+ DCHECK(!sweeper()->AreSweeperTasksRunning());
+ DCHECK(sweeper()->IsSweepingDoneForSpace(NEW_SPACE));
- heap()->array_buffer_sweeper()->EnsureFinished();
+ heap()->new_space()->FreeLinearAllocationArea();
MarkLiveObjects();
ClearNonLiveReferences();
@@ -5880,7 +5694,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
#endif // VERIFY_HEAP
Sweep();
- Evacuate();
Finish();
#ifdef VERIFY_HEAP
@@ -5892,10 +5705,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
#endif // VERIFY_HEAP
- CleanupPromotedPages();
-
- SweepArrayBufferExtensions();
-
auto* isolate = heap()->isolate();
isolate->global_handles()->UpdateListOfYoungNodes();
isolate->traced_handles()->UpdateListOfYoungNodes();
@@ -5911,14 +5720,14 @@ void MinorMarkCompactCollector::MakeIterable(
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state()->bitmap(p))) {
HeapObject const object = object_and_size.first;
- DCHECK(non_atomic_marking_state()->IsBlack(object));
+ DCHECK(non_atomic_marking_state()->IsMarked(object));
Address free_end = object.address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
- heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ DCHECK(heap_->non_atomic_marking_state()->bitmap(p)->AllBitsClearInRange(
p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(free_end));
+ p->AddressToMarkbitIndex(free_end)));
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
@@ -5933,9 +5742,9 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
- heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ DCHECK(heap_->non_atomic_marking_state()->bitmap(p)->AllBitsClearInRange(
p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(p->area_end()));
+ p->AddressToMarkbitIndex(p->area_end())));
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
@@ -5943,137 +5752,108 @@ void MinorMarkCompactCollector::MakeIterable(
}
}
-namespace {
-
-// Helper class for pruning the string table.
-class YoungGenerationExternalStringTableCleaner : public RootVisitor {
- public:
- explicit YoungGenerationExternalStringTableCleaner(Heap* heap)
- : heap_(heap), marking_state_(heap_->non_atomic_marking_state()) {}
-
- void VisitRootPointers(Root root, const char* description,
- FullObjectSlot start, FullObjectSlot end) override {
- DCHECK_EQ(static_cast<int>(root),
- static_cast<int>(Root::kExternalStringsTable));
- // Visit all HeapObject pointers in [start, end).
- for (FullObjectSlot p = start; p < end; ++p) {
- Object o = *p;
- if (o.IsHeapObject()) {
- HeapObject heap_object = HeapObject::cast(o);
- if (marking_state_->IsWhite(heap_object)) {
- if (o.IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
- } else {
- // The original external string may have been internalized.
- DCHECK(o.IsThinString());
- }
- // Set the entry to the_hole_value (as deleted).
- p.store(ReadOnlyRoots(heap_).the_hole_value());
- }
- }
- }
- }
-
- private:
- Heap* const heap_;
- NonAtomicMarkingState* const marking_state_;
-};
-
-} // namespace
-
void MinorMarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
- {
+ if (V8_UNLIKELY(v8_flags.always_use_string_forwarding_table)) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_CLEAR_STRING_FORWARDING_TABLE);
+ // Clear non-live objects in the string fowarding table.
+ StringForwardingTableCleaner forwarding_table_cleaner(heap());
+ forwarding_table_cleaner.ProcessYoungObjects();
+ }
+
+ Heap::ExternalStringTable& external_string_table =
+ heap()->external_string_table_;
+ if (external_string_table.HasYoung()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
// Internalized strings are always stored in old space, so there is no
// need to clean them here.
- YoungGenerationExternalStringTableCleaner external_visitor(heap());
- heap()->external_string_table_.IterateYoung(&external_visitor);
- heap()->external_string_table_.CleanUpYoung();
+ ExternalStringTableCleaner<ExternalStringTableCleaningMode::kYoungOnly>
+ external_visitor(heap());
+ external_string_table.IterateYoung(&external_visitor);
+ external_string_table.CleanUpYoung();
}
-}
-void MinorMarkCompactCollector::EvacuatePrologue() {
- NewSpace* new_space = heap()->new_space();
- // Append the list of new space pages to be processed.
- DCHECK_NOT_NULL(new_space);
- for (Page* p : *new_space) {
- if (non_atomic_marking_state()->live_bytes(p) > 0) {
- new_space_evacuation_pages_.push_back(p);
+ if (isolate()->global_handles()->HasYoung() ||
+ isolate()->traced_handles()->HasYoung()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_CLEAR_WEAK_GLOBAL_HANDLES);
+ isolate()->global_handles()->ProcessWeakYoungObjects(
+ nullptr, &IsUnmarkedObjectForYoungGeneration);
+ if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ cpp_heap && cpp_heap->generational_gc_supported()) {
+ isolate()->traced_handles()->ResetYoungDeadNodes(
+ &IsUnmarkedObjectForYoungGeneration);
+ } else {
+ isolate()->traced_handles()->ProcessYoungObjects(
+ nullptr, &IsUnmarkedObjectForYoungGeneration);
}
}
-
- heap()->new_lo_space()->Flip();
- heap()->new_lo_space()->ResetPendingObject();
-}
-
-void MinorMarkCompactCollector::EvacuateEpilogue() {
- heap()->new_space()->EvacuateEpilogue();
-
-#ifdef DEBUG
- VerifyRememberedSetsAfterEvacuation(heap(),
- GarbageCollector::MINOR_MARK_COMPACTOR);
-#endif // DEBUG
-}
-
-std::unique_ptr<UpdatingItem>
-MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(MemoryChunk* chunk) {
- return std::make_unique<
- RememberedSetUpdatingItem<GarbageCollector::MINOR_MARK_COMPACTOR>>(heap(),
- chunk);
}
class PageMarkingItem;
-class RootMarkingItem;
-class YoungGenerationMarkingTask {
- public:
- YoungGenerationMarkingTask(Isolate* isolate, Heap* heap,
- MarkingWorklists* global_worklists)
- : marking_worklists_local_(
- std::make_unique<MarkingWorklists::Local>(global_worklists)),
- marking_state_(heap->marking_state()),
- visitor_(isolate, marking_state_, marking_worklists_local()) {}
-
- void MarkObject(Object object) {
- if (!Heap::InYoungGeneration(object)) return;
- HeapObject heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
- visitor_.Visit(heap_object);
- // Objects transition to black when visited.
- DCHECK(marking_state_->IsBlack(heap_object));
+YoungGenerationMarkingTask::YoungGenerationMarkingTask(
+ Isolate* isolate, Heap* heap, MarkingWorklists* global_worklists)
+ : marking_worklists_local_(std::make_unique<MarkingWorklists::Local>(
+ global_worklists,
+ heap->cpp_heap()
+ ? CppHeap::From(heap->cpp_heap())->CreateCppMarkingState()
+ : MarkingWorklists::Local::kNoCppMarkingState)),
+ marking_state_(heap->marking_state()),
+ visitor_(isolate, marking_state_, marking_worklists_local()) {}
+
+void YoungGenerationMarkingTask::MarkYoungObject(HeapObject heap_object) {
+ if (marking_state_->TryMark(heap_object)) {
+ const auto visited_size = visitor_.Visit(heap_object);
+ if (visited_size) {
+ live_bytes_[MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(
+ heap_object))] += ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size);
}
+ // Objects transition to black when visited.
+ DCHECK(marking_state_->IsMarked(heap_object));
}
+}
- void EmptyMarkingWorklist() {
- HeapObject object;
- while (marking_worklists_local_->Pop(&object) ||
- marking_worklists_local_->PopOnHold(&object)) {
- visitor_.Visit(object);
+void YoungGenerationMarkingTask::DrainMarkingWorklist() {
+ HeapObject heap_object;
+ while (marking_worklists_local_->Pop(&heap_object) ||
+ marking_worklists_local_->PopOnHold(&heap_object)) {
+ const auto visited_size = visitor_.Visit(heap_object);
+ if (visited_size) {
+ live_bytes_[MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(
+ heap_object))] += ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size);
}
}
+ // Publish wrapper objects to the cppgc marking state, if registered.
+ marking_worklists_local_->PublishWrapper();
+}
- void PublishMarkingWorklist() { marking_worklists_local_->Publish(); }
+void YoungGenerationMarkingTask::PublishMarkingWorklist() {
+ marking_worklists_local_->Publish();
+}
- MarkingWorklists::Local* marking_worklists_local() {
- return marking_worklists_local_.get();
+void YoungGenerationMarkingTask::Finalize() {
+ visitor_.Finalize();
+ for (auto& pair : live_bytes_) {
+ marking_state_->IncrementLiveBytes(pair.first, pair.second);
}
-
- private:
- std::unique_ptr<MarkingWorklists::Local> marking_worklists_local_;
- MarkingState* marking_state_;
- YoungGenerationMainMarkingVisitor visitor_;
-};
+}
void PageMarkingItem::Process(YoungGenerationMarkingTask* task) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "PageMarkingItem::Process");
base::MutexGuard guard(chunk_->mutex());
- MarkUntypedPointers(task);
- MarkTypedPointers(task);
+ CodePageMemoryModificationScope memory_modification_scope(chunk_);
+ if (slots_type_ == SlotsType::kRegularSlots) {
+ MarkUntypedPointers(task);
+ } else {
+ MarkTypedPointers(task);
+ }
}
void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::MarkUntypedPointers");
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_NEW>::Iterate(
@@ -6083,9 +5863,14 @@ void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) {
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
+ // The invalidated slots are not needed after old-to-new slots were
+ // processed.
+ chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
}
void PageMarkingItem::MarkTypedPointers(YoungGenerationMarkingTask* task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::MarkTypedPointers");
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [this, task](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
@@ -6103,15 +5888,10 @@ V8_INLINE SlotCallbackResult PageMarkingItem::CheckAndMarkObject(
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
MaybeObject object = *slot;
- if (Heap::InYoungGeneration(object)) {
- // Marking happens before flipping the young generation, so the object
- // has to be in a to page.
- DCHECK(Heap::InToPage(object));
- HeapObject heap_object;
- bool success = object.GetHeapObject(&heap_object);
- USE(success);
- DCHECK(success);
- task->MarkObject(heap_object);
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object) &&
+ Heap::InYoungGeneration(heap_object)) {
+ task->MarkYoungObject(heap_object);
return KEEP_SLOT;
}
return REMOVE_SLOT;
@@ -6135,7 +5915,7 @@ size_t YoungGenerationMarkingJob::GetMaxConcurrency(size_t worker_count) const {
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
size_t num_tasks;
- if (!incremental()) {
+ if (ShouldDrainMarkingWorklist()) {
num_tasks = std::max(
(items + 1) / kPagesPerTask,
global_worklists_->shared()->Size() +
@@ -6157,10 +5937,12 @@ void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
- YoungGenerationMarkingTask task(isolate_, heap_, global_worklists_);
+ const int task_id = delegate->GetTaskId();
+ DCHECK_LT(task_id, tasks_.size());
+ YoungGenerationMarkingTask& task = tasks_[task_id];
ProcessMarkingItems(&task);
- if (!incremental()) {
- task.EmptyMarkingWorklist();
+ if (ShouldDrainMarkingWorklist()) {
+ task.DrainMarkingWorklist();
} else {
task.PublishMarkingWorklist();
}
@@ -6185,8 +5967,8 @@ void YoungGenerationMarkingJob::ProcessMarkingItems(
auto& work_item = marking_items_[i];
if (!work_item.TryAcquire()) break;
work_item.Process(task);
- if (!incremental()) {
- task->EmptyMarkingWorklist();
+ if (ShouldDrainMarkingWorklist()) {
+ task->DrainMarkingWorklist();
}
if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
1) {
@@ -6196,117 +5978,158 @@ void YoungGenerationMarkingJob::ProcessMarkingItems(
}
}
-void MinorMarkCompactCollector::MarkRootSetInParallel(
+void MinorMarkCompactCollector::MarkLiveObjectsInParallel(
RootMarkingVisitor* root_visitor, bool was_marked_incrementally) {
- {
- std::vector<PageMarkingItem> marking_items;
+ std::vector<PageMarkingItem> marking_items;
- // Seed the root set (roots + old->new set).
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
- isolate()->traced_handles()->ComputeWeaknessForYoungObjects(
- &JSObject::IsUnmodifiedApiObject);
- // MinorMC treats all weak roots except for global handles as strong.
- // That is why we don't set skip_weak = true here and instead visit
- // global handles separately.
- heap()->IterateRoots(
- root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
- SkipRoot::kGlobalHandles,
- SkipRoot::kOldGeneration});
- isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
+ // Seed the root set (roots + old->new set).
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ isolate()->traced_handles()->ComputeWeaknessForYoungObjects(
+ &JSObject::IsUnmodifiedApiObject);
+ // MinorMC treats all weak roots except for global handles as strong.
+ // That is why we don't set skip_weak = true here and instead visit
+ // global handles separately.
+ heap()->IterateRoots(root_visitor,
+ base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
+ SkipRoot::kGlobalHandles,
+ SkipRoot::kOldGeneration});
+ isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
+ root_visitor);
+ if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ cpp_heap && cpp_heap->generational_gc_supported()) {
+ // Visit the Oilpan-to-V8 remembered set.
+ isolate()->traced_handles()->IterateAndMarkYoungRootsWithOldHosts(
root_visitor);
+ // Visit the V8-to-Oilpan remembered set.
+ cpp_heap->VisitCrossHeapRememberedSetIfNeeded([this](JSObject obj) {
+ VisitObjectWithEmbedderFields(obj, *local_marking_worklists());
+ });
+ } else {
+ // Otherwise, visit all young roots.
isolate()->traced_handles()->IterateYoungRoots(root_visitor);
+ }
- if (!was_marked_incrementally) {
- // Create items for each page.
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&marking_items](MemoryChunk* chunk) {
- marking_items.emplace_back(chunk);
- });
- }
+ if (!was_marked_incrementally) {
+ // Create items for each page.
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&marking_items](MemoryChunk* chunk) {
+ if (chunk->slot_set<OLD_TO_NEW>()) {
+ marking_items.emplace_back(
+ chunk, PageMarkingItem::SlotsType::kRegularSlots);
+ } else {
+ chunk->ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ }
+
+ if (chunk->typed_slot_set<OLD_TO_NEW>()) {
+ marking_items.emplace_back(
+ chunk, PageMarkingItem::SlotsType::kTypedSlots);
+ }
+ });
}
+ }
- // Add tasks and run in parallel.
- {
- // The main thread might hold local items, while GlobalPoolSize() ==
- // 0. Flush to ensure these items are visible globally and picked up
- // by the job.
- local_marking_worklists_->Publish();
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_CLOSURE_PARALLEL);
- V8::GetCurrentPlatform()
- ->CreateJob(
- v8::TaskPriority::kUserBlocking,
- std::make_unique<YoungGenerationMarkingJob>(
- isolate(), heap(), marking_worklists(),
- std::move(marking_items), YoungMarkingJobType::kAtomic))
- ->Join();
+ // Add tasks and run in parallel.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_CLOSURE_PARALLEL);
+
+ // CppGC starts parallel marking tasks that will trace TracedReferences.
+ if (heap_->cpp_heap()) {
+ CppHeap::From(heap_->cpp_heap())
+ ->EnterFinalPause(heap_->embedder_stack_state_);
+ }
+
+ // The main thread might hold local items, while GlobalPoolSize() ==
+ // 0. Flush to ensure these items are visible globally and picked up
+ // by the job.
+ local_marking_worklists_->Publish();
- DCHECK(local_marking_worklists_->IsEmpty());
+ std::vector<YoungGenerationMarkingTask> tasks;
+ for (size_t i = 0; i < (v8_flags.parallel_marking ? kMaxParallelTasks : 1);
+ ++i) {
+ tasks.emplace_back(isolate(), heap(), marking_worklists());
+ }
+ V8::GetCurrentPlatform()
+ ->CreateJob(
+ v8::TaskPriority::kUserBlocking,
+ std::make_unique<YoungGenerationMarkingJob>(
+ isolate(), heap(), marking_worklists(),
+ std::move(marking_items), YoungMarkingJobType::kAtomic, tasks))
+ ->Join();
+ for (YoungGenerationMarkingTask& task : tasks) {
+ task.Finalize();
}
+ // If unified young generation is in progress, the parallel marker may add
+ // more entries into local_marking_worklists_.
+ DCHECK_IMPLIES(!v8_flags.cppgc_young_generation,
+ local_marking_worklists_->IsEmpty());
}
}
void MinorMarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
- DCHECK_NOT_NULL(local_marking_worklists_);
- DCHECK_NOT_NULL(main_marking_visitor_);
-
- PostponeInterruptsScope postpone(isolate());
-
- bool was_marked_incrementally = false;
- {
+ const bool was_marked_incrementally =
+ !heap_->incremental_marking()->IsStopped();
+ if (!was_marked_incrementally) {
+ StartMarking();
+ } else {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_MARK_FINISH_INCREMENTAL);
- if (heap_->incremental_marking()->Stop()) {
- MarkingBarrier::PublishAll(heap());
- // TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FULL_CLOSURE_PARALLEL_JOIN.
- // TODO(v8:13012): Instead of finishing concurrent marking here, we could
- // continue running it to replace parallel marking.
- FinishConcurrentMarking();
- was_marked_incrementally = true;
- }
+ auto* incremental_marking = heap_->incremental_marking();
+ DCHECK(incremental_marking->IsMinorMarking());
+ incremental_marking->Stop();
+ MarkingBarrier::PublishAll(heap());
+ // TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FULL_CLOSURE_PARALLEL_JOIN.
+ // TODO(v8:13012): Instead of finishing concurrent marking here, we could
+ // continue running it to replace parallel marking.
+ FinishConcurrentMarking();
}
+ DCHECK_NOT_NULL(local_marking_worklists_);
+ DCHECK_NOT_NULL(main_marking_visitor_);
+
RootMarkingVisitor root_visitor(this);
- MarkRootSetInParallel(&root_visitor, was_marked_incrementally);
+ MarkLiveObjectsInParallel(&root_visitor, was_marked_incrementally);
- // Mark rest on the main thread.
{
+ // Finish marking the transitive closure on the main thread.
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_CLOSURE);
+ if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
+ cpp_heap->FinishConcurrentMarkingIfNeeded();
+ }
DrainMarkingWorklist();
}
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
- isolate()->global_handles()->ProcessWeakYoungObjects(
- &root_visitor, &IsUnmarkedObjectForYoungGeneration);
- isolate()->traced_handles()->ProcessYoungObjects(
- &root_visitor, &IsUnmarkedObjectForYoungGeneration);
- DrainMarkingWorklist();
+ if (was_marked_incrementally) {
+ MarkingBarrier::DeactivateAll(heap());
}
if (v8_flags.minor_mc_trace_fragmentation) {
TraceFragmentation();
}
-
- if (was_marked_incrementally) {
- MarkingBarrier::DeactivateAll(heap());
- }
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
PtrComprCageBase cage_base(isolate());
- HeapObject object;
- while (local_marking_worklists_->Pop(&object)) {
- DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
- DCHECK(object.IsHeapObject());
- DCHECK(heap()->Contains(object));
- DCHECK(non_atomic_marking_state()->IsBlack(object));
- main_marking_visitor_->Visit(object);
- }
+ do {
+ PerformWrapperTracing();
+
+ HeapObject heap_object;
+ while (local_marking_worklists_->Pop(&heap_object)) {
+ DCHECK(!heap_object.IsFreeSpaceOrFiller(cage_base));
+ DCHECK(heap_object.IsHeapObject());
+ DCHECK(heap()->Contains(heap_object));
+ DCHECK(!non_atomic_marking_state()->IsUnmarked(heap_object));
+ const auto visited_size = main_marking_visitor_->Visit(heap_object);
+ if (visited_size) {
+ marking_state_->IncrementLiveBytes(
+ MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(heap_object)),
+ visited_size);
+ }
+ }
+ } while (!IsCppHeapMarkingFinished());
DCHECK(local_marking_worklists_->IsEmpty());
}
@@ -6364,157 +6187,133 @@ void MinorMarkCompactCollector::TraceFragmentation() {
free_bytes_of_class[3]);
}
-void MinorMarkCompactCollector::Evacuate() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
- base::MutexGuard guard(heap()->relocation_mutex());
+bool MinorMarkCompactCollector::StartSweepNewSpace() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP_NEW);
+ PagedSpaceForNewSpace* paged_space = heap()->paged_new_space()->paged_space();
+ paged_space->ClearAllocatorState();
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
- EvacuatePrologue();
- }
+ int will_be_swept = 0;
+ bool has_promoted_pages = false;
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
- EvacuationScope evacuation_scope(heap());
- EvacuatePagesInParallel();
+ DCHECK_EQ(Heap::ResizeNewSpaceMode::kNone, resize_new_space_);
+ resize_new_space_ = heap()->ShouldResizeNewSpace();
+ if (resize_new_space_ == Heap::ResizeNewSpaceMode::kShrink) {
+ paged_space->StartShrinking();
}
- UpdatePointersAfterEvacuation();
+ for (auto it = paged_space->begin(); it != paged_space->end();) {
+ Page* p = *(it++);
+ DCHECK(p->SweepingDone());
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
- for (Page* p : new_space_evacuation_pages_) {
- DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
- if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- promoted_pages_.push_back(p);
+ intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(p);
+ if (live_bytes_on_page == 0) {
+ if (paged_space->ShouldReleaseEmptyPage()) {
+ paged_space->ReleasePage(p);
+ } else {
+ sweeper()->SweepEmptyNewSpacePage(p);
}
+ continue;
}
- new_space_evacuation_pages_.clear();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
- EvacuateEpilogue();
- }
-}
-
-namespace {
-
-class YoungGenerationEvacuator : public Evacuator {
- public:
- explicit YoungGenerationEvacuator(Heap* heap)
- : Evacuator(heap, &record_visitor_, &local_allocator_,
- AlwaysPromoteYoung::kNo),
- record_visitor_(heap_),
- local_allocator_(
- heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
- collector_(heap_->minor_mark_compact_collector()) {}
-
- GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
- return GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
- }
-
- GCTracer::Scope::ScopeId GetTracingScope() override {
- return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
- }
-
- protected:
- bool RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
-
- YoungGenerationRecordMigratedSlotVisitor record_visitor_;
- EvacuationAllocator local_allocator_;
- MinorMarkCompactCollector* collector_;
-};
-
-bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
- intptr_t* live_bytes) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "YoungGenerationEvacuator::RawEvacuatePage");
- NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(chunk);
- DCHECK_EQ(kPageNewToOld, ComputeEvacuationMode(chunk));
- LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
- &new_to_old_page_visitor_);
- new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(chunk));
- if (!chunk->IsLargePage()) {
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(static_cast<Page*>(chunk),
- FreeSpaceTreatmentMode::kZapFreeSpace);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits
- // of the full collector. We cannot yet discard the young
- // generation mark bits as they are still relevant for pointers
- // updating.
- collector_->MakeIterable(static_cast<Page*>(chunk),
- FreeSpaceTreatmentMode::kIgnoreFreeSpace);
- }
- }
-
- return true;
-}
-
-} // namespace
-void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
- intptr_t live_bytes = 0;
-
- for (Page* page : new_space_evacuation_pages_) {
- intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
- DCHECK_LT(0, live_bytes_on_page);
- live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page, page->wasted_memory(),
+ if (ShouldMovePage(p, live_bytes_on_page, p->wasted_memory(),
MemoryReductionMode::kNone, AlwaysPromoteYoung::kNo,
heap()->tracer()->IsCurrentGCDueToAllocationFailure()
? PromoteUnusablePages::kYes
: PromoteUnusablePages::kNo)) {
- EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
- evacuation_items.emplace_back(ParallelWorkItem{}, page);
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(p);
+ has_promoted_pages = true;
+ sweeper()->AddPromotedPageForIteration(p);
} else {
// Page is not promoted. Sweep it instead.
- sweeper()->AddNewSpacePage(page);
+ sweeper()->AddNewSpacePage(p);
+ will_be_swept++;
}
}
- // Promote young generation large objects.
- for (auto it = heap()->new_lo_space()->begin();
- it != heap()->new_lo_space()->end();) {
+ if (v8_flags.gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
+ paged_space->name(), will_be_swept);
+ }
+
+ return has_promoted_pages;
+}
+
+bool MinorMarkCompactCollector::SweepNewLargeSpace() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP_NEW_LO);
+ NewLargeObjectSpace* new_lo_space = heap()->new_lo_space();
+ DCHECK_NOT_NULL(new_lo_space);
+
+ heap()->new_lo_space()->ResetPendingObject();
+
+ bool has_promoted_pages = false;
+
+ auto* marking_state = heap()->non_atomic_marking_state();
+ OldLargeObjectSpace* old_lo_space = heap()->lo_space();
+
+ for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject();
- if (non_atomic_marking_state()->IsBlack(object)) {
- heap_->lo_space()->PromoteNewLargeObject(current);
- current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- promoted_large_pages_.push_back(current);
- evacuation_items.emplace_back(ParallelWorkItem{}, current);
+ DCHECK(!marking_state->IsGrey(object));
+ if (!marking_state->IsMarked(object)) {
+ // Object is dead and page can be released.
+ new_lo_space->RemovePage(current);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
+ current);
+ continue;
}
- heap()->new_lo_space()->set_objects_size(0);
+ current->ClearFlag(MemoryChunk::TO_PAGE);
+ current->SetFlag(MemoryChunk::FROM_PAGE);
+ current->ProgressBar().ResetIfEnabled();
+ old_lo_space->PromoteNewLargeObject(current);
+ has_promoted_pages = true;
+ sweeper()->AddPromotedPageForIteration(current);
}
- if (evacuation_items.empty()) return;
+ new_lo_space->set_objects_size(0);
- YoungGenerationMigrationObserver observer(heap(),
- heap()->mark_compact_collector());
- const auto pages_count = evacuation_items.size();
- const auto wanted_num_tasks =
- CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- heap(), std::move(evacuation_items), &observer);
-
- if (v8_flags.trace_evacuation) {
- TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
- }
+ return has_promoted_pages;
}
void MinorMarkCompactCollector::Sweep() {
DCHECK(!sweeper()->AreSweeperTasksRunning());
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP);
+
+ bool has_promoted_pages = false;
+ if (StartSweepNewSpace()) has_promoted_pages = true;
+ if (SweepNewLargeSpace()) has_promoted_pages = true;
+
+ if (v8_flags.verify_heap && has_promoted_pages) {
+ // Update the external string table in preparation for heap verification.
+ // Otherwise, updating the table will happen during the next full GC.
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_SWEEP_UPDATE_STRING_TABLE);
+ heap()->UpdateYoungReferencesInExternalStringTable([](Heap* heap,
+ FullObjectSlot p) {
+ DCHECK(
+ !HeapObject::cast(*p).map_word(kRelaxedLoad).IsForwardingAddress());
+ return String::cast(*p);
+ });
+ }
+
+ sweeper_->StartSweeping(GarbageCollector::MINOR_MARK_COMPACTOR);
+
+#ifdef DEBUG
+ VerifyRememberedSetsAfterEvacuation(heap(),
+ GarbageCollector::MINOR_MARK_COMPACTOR);
+ heap()->VerifyCountersBeforeConcurrentSweeping(
+ GarbageCollector::MINOR_MARK_COMPACTOR);
+#endif
+
{
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_SWEEP_NEW,
- ThreadKind::kMain);
- StartSweepNewSpace();
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP_START_JOBS);
+ sweeper()->StartSweeperTasks();
+ DCHECK_EQ(0, heap_->new_lo_space()->Size());
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kYoung,
+ (heap_->new_space()->Size() == 0)
+ ? ArrayBufferSweeper::TreatAllYoungAsPromoted::kYes
+ : ArrayBufferSweeper::TreatAllYoungAsPromoted::kNo);
}
- sweeper_->StartSweeping(garbage_collector_);
}
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 005d4fec0c..556ce4f5b5 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -9,8 +9,10 @@
#include <vector>
#include "include/v8-internal.h"
+#include "src/common/globals.h"
#include "src/heap/base/worklist.h"
#include "src/heap/concurrent-marking.h"
+#include "src/heap/index-generator.h"
#include "src/heap/marking-state.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
@@ -34,6 +36,7 @@ class PagedNewSpace;
class ReadOnlySpace;
class RecordMigratedSlotVisitor;
class UpdatingItem;
+class YoungGenerationMarkingTask;
class MarkBitCellIterator {
public:
@@ -191,32 +194,26 @@ class MainMarkingVisitor final
WeakObjects::Local* local_weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool embedder_tracing_enabled,
+ bool trace_embedder_fields,
bool should_keep_ages_unchanged)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
local_marking_worklists, local_weak_objects, heap,
- mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
+ mark_compact_epoch, code_flush_mode, trace_embedder_fields,
should_keep_ages_unchanged),
marking_state_(marking_state) {}
- // HeapVisitor override.
bool ShouldVisit(HeapObject object) {
- return marking_state_->GreyToBlack(object);
+ CHECK(marking_state_->GreyToBlack(object));
+ return true;
}
private:
// Functions required by MarkingVisitorBase.
- template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
- int VisitJSObjectSubclass(Map map, T object);
-
- template <typename T>
- int VisitLeftTrimmableArray(Map map, T object);
-
template <typename TSlot>
void RecordSlot(HeapObject object, TSlot slot, HeapObject target);
- void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
+ void RecordRelocSlot(RelocInfo* rinfo, HeapObject target);
MarkingState* marking_state() { return marking_state_; }
@@ -240,7 +237,6 @@ class YoungGenerationMainMarkingVisitor final
MarkingState* marking_state,
MarkingWorklists::Local* worklists_local);
- // HeapVisitor override.
bool ShouldVisit(HeapObject object);
private:
@@ -263,7 +259,7 @@ class CollectorBase {
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
- MarkingWorklists::Local* local_marking_worklists() {
+ MarkingWorklists::Local* local_marking_worklists() const {
return local_marking_worklists_.get();
}
@@ -273,16 +269,12 @@ class CollectorBase {
virtual std::pair<size_t, size_t> ProcessMarkingWorklist(
size_t bytes_to_process) = 0;
- // Used by incremental marking for object that change their layout.
- virtual void VisitObject(HeapObject obj) = 0;
-
virtual void Finish() = 0;
bool IsMajorMC();
private:
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<LargePage*> promoted_large_pages_;
protected:
using ResizeNewSpaceMode = Heap::ResizeNewSpaceMode;
@@ -297,8 +289,8 @@ class CollectorBase {
}
void StartSweepSpace(PagedSpace* space);
- void StartSweepNewSpace();
- void SweepLargeSpace(LargeObjectSpace* space);
+
+ bool IsCppHeapMarkingFinished() const;
Heap* heap_;
GarbageCollector garbage_collector_;
@@ -321,6 +313,7 @@ class MarkCompactCollector final : public CollectorBase {
using MarkingVisitor = MainMarkingVisitor<MarkingState>;
class CustomRootBodyMarkingVisitor;
+ class ClientCustomRootBodyMarkingVisitor;
class SharedHeapObjectVisitor;
class RootMarkingVisitor;
@@ -338,6 +331,11 @@ class MarkCompactCollector final : public CollectorBase {
return static_cast<MarkCompactCollector*>(collector);
}
+ // Callback function for telling whether the object *p is an unmarked
+ // heap object.
+ static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
+ static bool IsUnmarkedSharedHeapObject(Heap* heap, FullObjectSlot p);
+
std::pair<size_t, size_t> ProcessMarkingWorklist(
size_t bytes_to_process) final;
@@ -383,24 +381,25 @@ class MarkCompactCollector final : public CollectorBase {
static V8_EXPORT_PRIVATE bool IsMapOrForwarded(Map map);
- static bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
- HeapObject target);
- static RecordRelocSlotInfo ProcessRelocInfo(Code host, RelocInfo* rinfo,
+ static bool ShouldRecordRelocSlot(RelocInfo* rinfo, HeapObject target);
+ static RecordRelocSlotInfo ProcessRelocInfo(RelocInfo* rinfo,
HeapObject target);
- static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
+ static void RecordRelocSlot(RelocInfo* rinfo, HeapObject target);
V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
HeapObject target);
V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target);
V8_INLINE static void RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target);
- void RecordLiveSlotsOnPage(Page* page);
bool is_compacting() const { return compacting_; }
inline void AddTransitionArray(TransitionArray array);
+ void RecordStrongDescriptorArraysForWeakening(
+ GlobalHandleVector<DescriptorArray> strong_descriptor_arrays);
+
#ifdef DEBUG
// Checks whether performing mark-compact collection.
bool in_use() { return state_ > PREPARE_GC; }
@@ -410,7 +409,6 @@ class MarkCompactCollector final : public CollectorBase {
void VerifyMarking();
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
- void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreClean(PagedSpaceBase* space);
void VerifyMarkbitsAreClean(NewSpace* space);
void VerifyMarkbitsAreClean(LargeObjectSpace* space);
@@ -425,8 +423,6 @@ class MarkCompactCollector final : public CollectorBase {
WeakObjects* weak_objects() { return &weak_objects_; }
WeakObjects::Local* local_weak_objects() { return local_weak_objects_.get(); }
- void VisitObject(HeapObject obj) final;
-
void AddNewlyDiscovered(HeapObject object) {
if (ephemeron_marking_.newly_discovered_overflowed) return;
@@ -446,20 +442,6 @@ class MarkCompactCollector final : public CollectorBase {
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() final;
- // Used by wrapper tracing.
- V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
-
- std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
- MemoryChunk* chunk);
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_MB
- // Finds an object header based on a `maybe_inner_ptr`. It returns
- // `kNullAddress` if the parameter does not point to (the interior of) a valid
- // heap object, or if it points to (the interior of) some object that is
- // already marked as live (black or grey).
- V8_EXPORT_PRIVATE Address FindBasePtrForMarking(Address maybe_inner_ptr);
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_MB
-
private:
Sweeper* sweeper() { return sweeper_; }
@@ -486,17 +468,20 @@ class MarkCompactCollector final : public CollectorBase {
V8_INLINE void MarkRootObject(Root root, HeapObject obj);
// Mark the heap roots and all objects reachable from them.
- void MarkRoots(RootVisitor* root_visitor,
- ObjectVisitor* custom_root_body_visitor);
+ void MarkRoots(RootVisitor* root_visitor);
// Mark the stack roots and all objects reachable from them.
- void MarkRootsFromStack(RootVisitor* root_visitor);
+ void MarkRootsFromConservativeStack(RootVisitor* root_visitor);
// Mark all objects that are directly referenced from one of the clients
// heaps.
void MarkObjectsFromClientHeaps();
void MarkObjectsFromClientHeap(Isolate* client);
+ // Mark the entry in the external pointer table for the given isolates
+ // WaiterQueueNode.
+ void MarkWaiterQueueNode(Isolate* isolate);
+
// Updates pointers to shared objects from client heaps.
void UpdatePointersInClientHeaps();
void UpdatePointersInClientHeap(Isolate* client);
@@ -532,10 +517,6 @@ class MarkCompactCollector final : public CollectorBase {
// Perform Wrapper Tracing if in use.
void PerformWrapperTracing();
- // Callback function for telling whether the object *p is an unmarked
- // heap object.
- static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
-
// Retain dying maps for `v8_flags.retain_maps_for_n_gc` garbage collections
// to increase chances of reusing of map transition tree in future.
void RetainMaps();
@@ -570,6 +551,7 @@ class MarkCompactCollector final : public CollectorBase {
DescriptorArray descriptors);
bool TransitionArrayNeedsCompaction(TransitionArray transitions,
int num_transitions);
+ void WeakenStrongDescriptorArrays();
// After all reachable objects have been marked those weak map entries
// with an unreachable key are removed from all encountered weak maps.
@@ -612,6 +594,9 @@ class MarkCompactCollector final : public CollectorBase {
V8_INLINE bool ShouldMarkObject(HeapObject) const;
+ void StartSweepNewSpace();
+ void SweepLargeSpace(LargeObjectSpace* space);
+
base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_{0};
@@ -629,7 +614,7 @@ class MarkCompactCollector final : public CollectorBase {
#endif
const bool uses_shared_heap_;
- const bool is_shared_heap_isolate_;
+ const bool is_shared_space_isolate_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
@@ -646,6 +631,9 @@ class MarkCompactCollector final : public CollectorBase {
NativeContextInferrer native_context_inferrer_;
NativeContextStats native_context_stats_;
+ std::vector<GlobalHandleVector<DescriptorArray>> strong_descriptor_arrays_;
+ base::Mutex strong_descriptor_arrays_mutex_;
+
// Candidates for pages that should be evacuated.
std::vector<Page*> evacuation_candidates_;
// Pages that are actually processed during evacuation.
@@ -672,7 +660,9 @@ class MarkCompactCollector final : public CollectorBase {
// the start of each GC.
base::EnumSet<CodeFlushMode> code_flush_mode_;
- friend class FullEvacuator;
+ std::vector<Page*> empty_new_space_pages_to_be_swept_;
+
+ friend class Evacuator;
friend class RecordMigratedSlotVisitor;
};
@@ -694,18 +684,15 @@ class MinorMarkCompactCollector final : public CollectorBase {
void SetUp() final;
void TearDown() final;
void CollectGarbage() final;
- void Prepare() final;
+ void Prepare() final {}
void StartMarking() final;
void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
- void CleanupPromotedPages();
-
- std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
- MemoryChunk* chunk);
void Finish() final;
- void VisitObject(HeapObject obj) final;
+ // Perform Wrapper Tracing if in use.
+ void PerformWrapperTracing();
private:
class RootMarkingVisitor;
@@ -716,8 +703,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
Sweeper* sweeper() { return sweeper_; }
void MarkLiveObjects();
- void MarkRootSetInParallel(RootMarkingVisitor* root_visitor,
- bool was_marked_incrementally);
+ void MarkLiveObjectsInParallel(RootMarkingVisitor* root_visitor,
+ bool was_marked_incrementally);
V8_INLINE void MarkRootObject(HeapObject obj);
void DrainMarkingWorklist();
void TraceFragmentation();
@@ -725,22 +712,15 @@ class MinorMarkCompactCollector final : public CollectorBase {
void Sweep();
- void EvacuatePrologue();
- void EvacuateEpilogue();
- void Evacuate();
- void EvacuatePagesInParallel();
- void UpdatePointersAfterEvacuation();
void FinishConcurrentMarking();
- void SweepArrayBufferExtensions();
+ // 'StartSweepNewSpace' and 'SweepNewLargeSpace' return true if any pages were
+ // promoted.
+ bool StartSweepNewSpace();
+ bool SweepNewLargeSpace();
std::unique_ptr<YoungGenerationMainMarkingVisitor> main_marking_visitor_;
- base::Semaphore page_parallel_job_semaphore_;
- std::vector<Page*> new_space_evacuation_pages_;
- std::vector<Page*> promoted_pages_;
- std::vector<LargePage*> promoted_large_pages_;
-
Sweeper* const sweeper_;
friend class YoungGenerationMarkingTask;
@@ -748,6 +728,92 @@ class MinorMarkCompactCollector final : public CollectorBase {
friend class YoungGenerationMainMarkingVisitor;
};
+class PageMarkingItem : public ParallelWorkItem {
+ public:
+ enum class SlotsType { kRegularSlots, kTypedSlots };
+
+ PageMarkingItem(MemoryChunk* chunk, SlotsType slots_type)
+ : chunk_(chunk), slots_type_(slots_type) {}
+ ~PageMarkingItem() = default;
+
+ void Process(YoungGenerationMarkingTask* task);
+
+ private:
+ inline Heap* heap() { return chunk_->heap(); }
+
+ void MarkUntypedPointers(YoungGenerationMarkingTask* task);
+ void MarkTypedPointers(YoungGenerationMarkingTask* task);
+ template <typename TSlot>
+ V8_INLINE SlotCallbackResult
+ CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot);
+
+ MemoryChunk* chunk_;
+ const SlotsType slots_type_;
+};
+
+enum class YoungMarkingJobType { kAtomic, kIncremental };
+
+class YoungGenerationMarkingJob : public v8::JobTask {
+ public:
+ YoungGenerationMarkingJob(Isolate* isolate, Heap* heap,
+ MarkingWorklists* global_worklists,
+ std::vector<PageMarkingItem> marking_items,
+ YoungMarkingJobType young_marking_job_type,
+ std::vector<YoungGenerationMarkingTask>& tasks)
+ : isolate_(isolate),
+ heap_(heap),
+ global_worklists_(global_worklists),
+ marking_items_(std::move(marking_items)),
+ remaining_marking_items_(marking_items_.size()),
+ generator_(marking_items_.size()),
+ young_marking_job_type_(young_marking_job_type),
+ tasks_(tasks) {}
+
+ void Run(JobDelegate* delegate) override;
+ size_t GetMaxConcurrency(size_t worker_count) const override;
+
+ bool ShouldDrainMarkingWorklist() const {
+ return young_marking_job_type_ == YoungMarkingJobType::kAtomic;
+ }
+
+ private:
+ void ProcessItems(JobDelegate* delegate);
+ void ProcessMarkingItems(YoungGenerationMarkingTask* task);
+
+ Isolate* isolate_;
+ Heap* heap_;
+ MarkingWorklists* global_worklists_;
+ std::vector<PageMarkingItem> marking_items_;
+ std::atomic_size_t remaining_marking_items_{0};
+ IndexGenerator generator_;
+ YoungMarkingJobType young_marking_job_type_;
+ std::vector<YoungGenerationMarkingTask>& tasks_;
+};
+
+class YoungGenerationMarkingTask final {
+ public:
+ YoungGenerationMarkingTask(Isolate* isolate, Heap* heap,
+ MarkingWorklists* global_worklists);
+
+ void MarkYoungObject(HeapObject heap_object);
+
+ void DrainMarkingWorklist();
+
+ void PublishMarkingWorklist();
+
+ MarkingWorklists::Local* marking_worklists_local() {
+ return marking_worklists_local_.get();
+ }
+
+ void Finalize();
+
+ private:
+ std::unique_ptr<MarkingWorklists::Local> marking_worklists_local_;
+ MarkingState* marking_state_;
+ YoungGenerationMainMarkingVisitor visitor_;
+ std::unordered_map<MemoryChunk*, size_t, MemoryChunk::Hasher> live_bytes_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-barrier-inl.h b/deps/v8/src/heap/marking-barrier-inl.h
index 4d83a533e0..320949eb5b 100644
--- a/deps/v8/src/heap/marking-barrier-inl.h
+++ b/deps/v8/src/heap/marking-barrier-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_MARKING_BARRIER_INL_H_
#define V8_HEAP_MARKING_BARRIER_INL_H_
+#include "src/base/logging.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-barrier.h"
@@ -12,10 +13,15 @@
namespace v8 {
namespace internal {
-bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
- DCHECK(IsCurrentMarkingBarrier());
- DCHECK(is_activated_);
- DCHECK(!marking_state_.IsImpossible(value));
+void MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
+ if (value.InReadOnlySpace()) return;
+
+ DCHECK(IsCurrentMarkingBarrier(host));
+ DCHECK(is_activated_ || shared_heap_worklist_.has_value());
+
+ DCHECK_IMPLIES(!value.InWritableSharedSpace() || is_shared_space_isolate_,
+ !marking_state_.IsImpossible(value));
+
// Host may have an impossible markbit pattern if manual allocation folding
// is performed and host happens to be the last word of an allocated region.
// In that case host has only one markbit and the second markbit belongs to
@@ -23,58 +29,94 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// filler map.
DCHECK(!marking_state_.IsImpossible(host) ||
value == ReadOnlyRoots(heap_->isolate()).one_pointer_filler_map());
- if (!V8_CONCURRENT_MARKING_BOOL && !marking_state_.IsBlack(host)) {
- // The value will be marked and the slot will be recorded when the marker
- // visits the host object.
- return false;
+
+ // When shared heap isn't enabled all objects are local, we can just run the
+ // local marking barrier. Also from the point-of-view of the shared space
+ // isolate (= main isolate) also shared objects are considered local.
+ if (V8_UNLIKELY(uses_shared_heap_) && !is_shared_space_isolate_) {
+ // Check whether incremental marking is enabled for that object's space.
+ if (!MemoryChunk::FromHeapObject(host)->IsMarking()) {
+ return;
+ }
+
+ if (host.InWritableSharedSpace()) {
+ // Invoking shared marking barrier when storing into shared objects.
+ MarkValueShared(value);
+ return;
+ } else if (value.InWritableSharedSpace()) {
+ // No marking needed when storing shared objects in local objects.
+ return;
+ }
}
- if (!ShouldMarkObject(value)) return false;
+ DCHECK_IMPLIES(host.InWritableSharedSpace(), is_shared_space_isolate_);
+ DCHECK_IMPLIES(value.InWritableSharedSpace(), is_shared_space_isolate_);
+
+ DCHECK(is_activated_);
+ MarkValueLocal(value);
+}
+
+void MarkingBarrier::MarkValueShared(HeapObject value) {
+ // Value is either in read-only space or shared heap.
+ DCHECK(value.InAnySharedSpace());
+
+ // We should only reach this on client isolates (= worker isolates).
+ DCHECK(!is_shared_space_isolate_);
+ DCHECK(shared_heap_worklist_.has_value());
+
+ // Mark shared object and push it onto shared heap worklist.
+ if (marking_state_.TryMark(value)) {
+ shared_heap_worklist_->Push(value);
+ }
+}
+
+void MarkingBarrier::MarkValueLocal(HeapObject value) {
+ DCHECK(!value.InReadOnlySpace());
if (is_minor()) {
// We do not need to insert into RememberedSet<OLD_TO_NEW> here because the
// C++ marking barrier already does this for us.
if (Heap::InYoungGeneration(value)) {
WhiteToGreyAndPush(value); // NEW->NEW
}
- return false;
} else {
if (WhiteToGreyAndPush(value)) {
if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
}
}
- return true;
- }
-}
-
-bool MarkingBarrier::ShouldMarkObject(HeapObject object) const {
- if (V8_LIKELY(!uses_shared_heap_)) return true;
- if (v8_flags.shared_space) {
- if (is_shared_heap_isolate_) return true;
- return !object.InSharedHeap();
- } else {
- return is_shared_heap_isolate_ == object.InSharedHeap();
}
}
template <typename TSlot>
inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
auto* isolate = heap_->isolate();
+ const bool record_slots =
+ IsCompacting(host) &&
+ !MemoryChunk::FromHeapObject(host)->ShouldSkipEvacuationSlotRecording();
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = slot.Relaxed_Load();
HeapObject heap_object;
// Mark both, weak and strong edges.
if (object.GetHeapObject(isolate, &heap_object)) {
- if (MarkValue(host, heap_object) && is_compacting_) {
- DCHECK(is_major());
+ MarkValue(host, heap_object);
+ if (record_slots) {
major_collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
}
}
}
}
+bool MarkingBarrier::IsCompacting(HeapObject object) const {
+ if (is_compacting_) {
+ DCHECK(is_major());
+ return true;
+ }
+
+ return shared_heap_worklist_.has_value() && object.InWritableSharedSpace();
+}
+
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
- if (marking_state_.WhiteToGrey(obj)) {
+ if (marking_state_.TryMark(obj)) {
current_worklist_->Push(obj);
return true;
}
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 5509de66d1..66ab72a669 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -18,6 +18,7 @@
#include "src/heap/safepoint.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -29,58 +30,66 @@ MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
incremental_marking_(heap_->incremental_marking()),
major_worklist_(*major_collector_->marking_worklists()->shared()),
minor_worklist_(*minor_collector_->marking_worklists()->shared()),
- marking_state_(heap_->isolate()),
+ marking_state_(isolate()),
is_main_thread_barrier_(local_heap->is_main_thread()),
- uses_shared_heap_(heap_->isolate()->has_shared_heap()),
- is_shared_heap_isolate_(heap_->isolate()->is_shared_heap_isolate()) {}
+ uses_shared_heap_(isolate()->has_shared_space()),
+ is_shared_space_isolate_(isolate()->is_shared_space_isolate()) {}
MarkingBarrier::~MarkingBarrier() { DCHECK(typed_slots_map_.empty()); }
void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
HeapObject value) {
- DCHECK(IsCurrentMarkingBarrier());
- if (MarkValue(host, value)) {
- if (is_compacting_ && slot.address()) {
- DCHECK(is_major());
- major_collector_->RecordSlot(host, slot, value);
- }
+ DCHECK(IsCurrentMarkingBarrier(host));
+ DCHECK(is_activated_ || shared_heap_worklist_.has_value());
+ DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking());
+ MarkValue(host, value);
+
+ if (slot.address() && IsCompacting(host)) {
+ MarkCompactCollector::RecordSlot(host, slot, value);
}
}
void MarkingBarrier::WriteWithoutHost(HeapObject value) {
DCHECK(is_main_thread_barrier_);
- if (is_minor() && !Heap::InYoungGeneration(value)) return;
-
- if (WhiteToGreyAndPush(value)) {
- if (V8_UNLIKELY(v8_flags.track_retaining_path) && is_major()) {
- heap_->AddRetainingRoot(Root::kWriteBarrier, value);
+ DCHECK(is_activated_);
+
+ // Without a shared heap and on the shared space isolate (= main isolate) all
+ // objects are considered local.
+ if (V8_UNLIKELY(uses_shared_heap_) && !is_shared_space_isolate_) {
+ // On client isolates (= worker isolates) shared values can be ignored.
+ if (value.InWritableSharedSpace()) {
+ return;
}
}
+ if (value.InReadOnlySpace()) return;
+ MarkValueLocal(value);
}
-void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
- DCHECK(IsCurrentMarkingBarrier());
- if (MarkValue(host, value)) {
- if (is_compacting_) {
- DCHECK(is_major());
- if (is_main_thread_barrier_) {
- // An optimization to avoid allocating additional typed slots for the
- // main thread.
- major_collector_->RecordRelocSlot(host, reloc_info, value);
- } else {
- RecordRelocSlot(host, reloc_info, value);
- }
+void MarkingBarrier::Write(InstructionStream host, RelocInfo* reloc_info,
+ HeapObject value) {
+ DCHECK(IsCurrentMarkingBarrier(host));
+ DCHECK(!host.InWritableSharedSpace());
+ DCHECK(is_activated_ || shared_heap_worklist_.has_value());
+ DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking());
+ MarkValue(host, value);
+ if (is_compacting_) {
+ DCHECK(is_major());
+ if (is_main_thread_barrier_) {
+ // An optimization to avoid allocating additional typed slots for the
+ // main thread.
+ major_collector_->RecordRelocSlot(reloc_info, value);
+ } else {
+ RecordRelocSlot(reloc_info, value);
}
}
}
void MarkingBarrier::Write(JSArrayBuffer host,
ArrayBufferExtension* extension) {
- DCHECK(IsCurrentMarkingBarrier());
- if (!V8_CONCURRENT_MARKING_BOOL && !marking_state_.IsBlack(host)) {
- // The extension will be marked when the marker visits the host object.
- return;
- }
+ DCHECK(IsCurrentMarkingBarrier(host));
+ DCHECK(!host.InWritableSharedSpace());
+ DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking());
+
if (is_minor()) {
if (Heap::InYoungGeneration(host)) {
extension->YoungMark();
@@ -92,50 +101,57 @@ void MarkingBarrier::Write(JSArrayBuffer host,
void MarkingBarrier::Write(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
- DCHECK(IsCurrentMarkingBarrier());
+ DCHECK(IsCurrentMarkingBarrier(descriptor_array));
DCHECK(IsReadOnlyHeapObject(descriptor_array.map()));
+ DCHECK(MemoryChunk::FromHeapObject(descriptor_array)->IsMarking());
- if (is_minor() && !heap_->InYoungGeneration(descriptor_array)) return;
+ // Only major GC uses custom liveness.
+ if (is_minor() || descriptor_array.IsStrongDescriptorArray()) {
+ MarkValueLocal(descriptor_array);
+ return;
+ }
+
+ unsigned gc_epoch;
+ MarkingWorklist::Local* worklist;
+ if (V8_UNLIKELY(uses_shared_heap_) &&
+ descriptor_array.InWritableSharedSpace() && !is_shared_space_isolate_) {
+ gc_epoch = isolate()
+ ->shared_space_isolate()
+ ->heap()
+ ->mark_compact_collector()
+ ->epoch();
+ DCHECK(shared_heap_worklist_.has_value());
+ worklist = &*shared_heap_worklist_;
+ } else {
+ gc_epoch = major_collector_->epoch();
+ worklist = current_worklist_;
+ }
- // The DescriptorArray needs to be marked black here to ensure that slots are
- // recorded by the Scavenger in case the DescriptorArray is promoted while
- // incremental marking is running. This is needed as the regular marking
- // visitor does not re-process any already marked descriptors. If we don't
- // mark it black here, the Scavenger may promote a DescriptorArray and any
- // already marked descriptors will not have any slots recorded.
- if (!marking_state_.IsBlack(descriptor_array)) {
- marking_state_.WhiteToGrey(descriptor_array);
+ // The DescriptorArray needs to be marked black here to ensure that slots
+ // are recorded by the Scavenger in case the DescriptorArray is promoted
+ // while incremental marking is running. This is needed as the regular
+ // marking visitor does not re-process any already marked descriptors. If we
+ // don't mark it black here, the Scavenger may promote a DescriptorArray and
+ // any already marked descriptors will not have any slots recorded.
+ if (!marking_state_.IsMarked(descriptor_array)) {
+ marking_state_.TryMark(descriptor_array);
marking_state_.GreyToBlack(descriptor_array);
- MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(),
- descriptor_array.GetDescriptorSlot(0));
}
- // Concurrent MinorMC always marks the full young generation DescriptorArray.
- // We cannot use epoch like MajorMC does because only the lower 2 bits are
- // used, and with many MinorMC cycles this could lead to correctness issues.
- const int16_t old_marked =
- is_minor() ? 0
- : descriptor_array.UpdateNumberOfMarkedDescriptors(
- major_collector_->epoch(), number_of_own_descriptors);
- if (old_marked < number_of_own_descriptors) {
- // This marks the range from [old_marked, number_of_own_descriptors) instead
- // of registering weak slots which may temporarily hold alive more objects
- // for the current GC cycle. Weakness is not needed for actual trimming, see
- // `MarkCompactCollector::TrimDescriptorArray()`.
- MarkRange(descriptor_array,
- MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
- MaybeObjectSlot(descriptor_array.GetDescriptorSlot(
- number_of_own_descriptors)));
+ // `TryUpdateIndicesToMark()` acts as a barrier that publishes the slots'
+ // values corresponding to `number_of_own_descriptors`.
+ if (DescriptorArrayMarkingState::TryUpdateIndicesToMark(
+ gc_epoch, descriptor_array, number_of_own_descriptors)) {
+ worklist->Push(descriptor_array);
}
}
-void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
- HeapObject target) {
- DCHECK(IsCurrentMarkingBarrier());
- if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target)) return;
+void MarkingBarrier::RecordRelocSlot(RelocInfo* rinfo, HeapObject target) {
+ DCHECK(IsCurrentMarkingBarrier(rinfo->instruction_stream()));
+ if (!MarkCompactCollector::ShouldRecordRelocSlot(rinfo, target)) return;
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
+ MarkCompactCollector::ProcessRelocInfo(rinfo, target);
auto& typed_slots = typed_slots_map_[info.memory_chunk];
if (!typed_slots) {
@@ -161,7 +177,8 @@ void ActivateSpaces(Heap* heap) {
ActivateSpace(heap->old_space());
{
CodePageHeaderModificationScope rwx_write_scope(
- "Modification of Code page header flags requires write access");
+ "Modification of InstructionStream page header flags requires write "
+ "access");
ActivateSpace(heap->code_space());
}
ActivateSpace(heap->new_space());
@@ -180,7 +197,8 @@ void ActivateSpaces(Heap* heap) {
{
CodePageHeaderModificationScope rwx_write_scope(
- "Modification of Code page header flags requires write access");
+ "Modification of InstructionStream page header flags requires write "
+ "access");
for (LargePage* p : *heap->code_lo_space()) {
p->SetOldGenerationPageFlags(true);
}
@@ -240,6 +258,21 @@ void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting,
local_heap->marking_barrier()->Activate(is_compacting,
marking_barrier_type);
});
+
+ if (heap->isolate()->is_shared_space_isolate()) {
+ heap->isolate()
+ ->shared_space_isolate()
+ ->global_safepoint()
+ ->IterateClientIsolates([](Isolate* client) {
+ // Force the RecordWrite builtin into the incremental marking code
+ // path.
+ client->heap()->SetIsMarkingFlag(true);
+ client->heap()->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) {
+ local_heap->marking_barrier()->ActivateShared();
+ });
+ });
+ }
}
void MarkingBarrier::Activate(bool is_compacting,
@@ -253,6 +286,15 @@ void MarkingBarrier::Activate(bool is_compacting,
is_activated_ = true;
}
+void MarkingBarrier::ActivateShared() {
+ DCHECK(!shared_heap_worklist_.has_value());
+ Isolate* shared_isolate = isolate()->shared_space_isolate();
+ shared_heap_worklist_.emplace(*shared_isolate->heap()
+ ->mark_compact_collector()
+ ->marking_worklists()
+ ->shared());
+}
+
// static
void MarkingBarrier::DeactivateAll(Heap* heap) {
DeactivateSpaces(heap);
@@ -260,6 +302,24 @@ void MarkingBarrier::DeactivateAll(Heap* heap) {
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->marking_barrier()->Deactivate();
});
+
+ if (heap->isolate()->is_shared_space_isolate()) {
+ heap->isolate()
+ ->shared_space_isolate()
+ ->global_safepoint()
+ ->IterateClientIsolates([](Isolate* client) {
+ // We can't just simply disable the marking barrier for all clients. A
+ // client may still need it to be set for incremental marking in the
+ // local heap.
+ const bool is_marking =
+ client->heap()->incremental_marking()->IsMarking();
+ client->heap()->SetIsMarkingFlag(is_marking);
+ client->heap()->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) {
+ local_heap->marking_barrier()->DeactivateShared();
+ });
+ });
+ }
}
void MarkingBarrier::Deactivate() {
@@ -269,13 +329,31 @@ void MarkingBarrier::Deactivate() {
DCHECK(current_worklist_->IsLocalEmpty());
}
+void MarkingBarrier::DeactivateShared() {
+ DCHECK(shared_heap_worklist_->IsLocalAndGlobalEmpty());
+ shared_heap_worklist_.reset();
+}
+
// static
void MarkingBarrier::PublishAll(Heap* heap) {
- heap->safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->marking_barrier()->Publish(); });
+ heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->marking_barrier()->PublishIfNeeded();
+ });
+
+ if (heap->isolate()->is_shared_space_isolate()) {
+ heap->isolate()
+ ->shared_space_isolate()
+ ->global_safepoint()
+ ->IterateClientIsolates([](Isolate* client) {
+ client->heap()->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) {
+ local_heap->marking_barrier()->PublishSharedIfNeeded();
+ });
+ });
+ }
}
-void MarkingBarrier::Publish() {
+void MarkingBarrier::PublishIfNeeded() {
if (is_activated_) {
current_worklist_->Publish();
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
@@ -299,9 +377,26 @@ void MarkingBarrier::Publish() {
}
}
-bool MarkingBarrier::IsCurrentMarkingBarrier() {
- return WriteBarrier::CurrentMarkingBarrier(heap_) == this;
+void MarkingBarrier::PublishSharedIfNeeded() {
+ if (shared_heap_worklist_) {
+ shared_heap_worklist_->Publish();
+ }
+}
+
+bool MarkingBarrier::IsCurrentMarkingBarrier(
+ HeapObject verification_candidate) {
+ return WriteBarrier::CurrentMarkingBarrier(verification_candidate) == this;
+}
+
+Isolate* MarkingBarrier::isolate() const { return heap_->isolate(); }
+
+#if DEBUG
+void MarkingBarrier::AssertMarkingIsActivated() const { DCHECK(is_activated_); }
+
+void MarkingBarrier::AssertSharedMarkingIsActivated() const {
+ DCHECK(shared_heap_worklist_.has_value());
}
+#endif // DEBUG
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index f35e7939be..382267edf6 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -8,6 +8,7 @@
#include "include/v8-internal.h"
#include "src/common/globals.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-worklist.h"
namespace v8 {
namespace internal {
@@ -27,7 +28,11 @@ class MarkingBarrier {
void Activate(bool is_compacting, MarkingBarrierType marking_barrier_type);
void Deactivate();
- void Publish();
+ void PublishIfNeeded();
+
+ void ActivateShared();
+ void DeactivateShared();
+ void PublishSharedIfNeeded();
static void ActivateAll(Heap* heap, bool is_compacting,
MarkingBarrierType marking_barrier_type);
@@ -35,35 +40,47 @@ class MarkingBarrier {
V8_EXPORT_PRIVATE static void PublishAll(Heap* heap);
void Write(HeapObject host, HeapObjectSlot, HeapObject value);
- void Write(Code host, RelocInfo*, HeapObject value);
+ void Write(InstructionStream host, RelocInfo*, HeapObject value);
void Write(JSArrayBuffer host, ArrayBufferExtension*);
void Write(DescriptorArray, int number_of_own_descriptors);
// Only usable when there's no valid JS host object for this write, e.g., when
// value is held alive from a global handle.
void WriteWithoutHost(HeapObject value);
- // Returns true if the slot needs to be recorded.
- inline bool MarkValue(HeapObject host, HeapObject value);
+ inline void MarkValue(HeapObject host, HeapObject value);
bool is_minor() const {
return marking_barrier_type_ == MarkingBarrierType::kMinor;
}
+ Heap* heap() const { return heap_; }
+
+#if DEBUG
+ void AssertMarkingIsActivated() const;
+ void AssertSharedMarkingIsActivated() const;
+#endif // DEBUG
+
private:
- inline bool ShouldMarkObject(HeapObject value) const;
+ inline void MarkValueShared(HeapObject value);
+ inline void MarkValueLocal(HeapObject value);
+
inline bool WhiteToGreyAndPush(HeapObject value);
- void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
+ void RecordRelocSlot(RelocInfo* rinfo, HeapObject target);
- bool IsCurrentMarkingBarrier();
+ bool IsCurrentMarkingBarrier(HeapObject verification_candidate);
template <typename TSlot>
inline void MarkRange(HeapObject value, TSlot start, TSlot end);
+ inline bool IsCompacting(HeapObject object) const;
+
bool is_major() const {
return marking_barrier_type_ == MarkingBarrierType::kMajor;
}
+ Isolate* isolate() const;
+
Heap* heap_;
MarkCompactCollector* major_collector_;
MinorMarkCompactCollector* minor_collector_;
@@ -71,16 +88,17 @@ class MarkingBarrier {
MarkingWorklist::Local major_worklist_;
MarkingWorklist::Local minor_worklist_;
MarkingWorklist::Local* current_worklist_;
+ base::Optional<MarkingWorklist::Local> shared_heap_worklist_;
MarkingState marking_state_;
std::unordered_map<MemoryChunk*, std::unique_ptr<TypedSlots>,
MemoryChunk::Hasher>
typed_slots_map_;
bool is_compacting_ = false;
bool is_activated_ = false;
- bool is_main_thread_barrier_;
+ const bool is_main_thread_barrier_;
const bool uses_shared_heap_;
- const bool is_shared_heap_isolate_;
- MarkingBarrierType marking_barrier_type_;
+ const bool is_shared_space_isolate_;
+ MarkingBarrierType marking_barrier_type_ = MarkingBarrierType::kMajor;
};
} // namespace internal
diff --git a/deps/v8/src/heap/marking-state-inl.h b/deps/v8/src/heap/marking-state-inl.h
index 0ab19a91a1..c22795234d 100644
--- a/deps/v8/src/heap/marking-state-inl.h
+++ b/deps/v8/src/heap/marking-state-inl.h
@@ -25,25 +25,19 @@ MarkBit MarkingStateBase<ConcreteState, access_mode>::MarkBitFrom(
}
template <typename ConcreteState, AccessMode access_mode>
-Marking::ObjectColor MarkingStateBase<ConcreteState, access_mode>::Color(
- const HeapObject obj) const {
- return Marking::Color(MarkBitFrom(obj));
-}
-
-template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::IsImpossible(
const HeapObject obj) const {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
}
template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::IsBlack(
+bool MarkingStateBase<ConcreteState, access_mode>::IsMarked(
const HeapObject obj) const {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
}
template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::IsWhite(
+bool MarkingStateBase<ConcreteState, access_mode>::IsUnmarked(
const HeapObject obj) const {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
}
@@ -61,30 +55,24 @@ bool MarkingStateBase<ConcreteState, access_mode>::IsBlackOrGrey(
}
template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(HeapObject obj) {
+bool MarkingStateBase<ConcreteState, access_mode>::TryMark(HeapObject obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
+bool MarkingStateBase<ConcreteState, access_mode>::TryMarkAndAccountLiveBytes(
HeapObject obj) {
- return WhiteToGrey(obj) && GreyToBlack(obj);
+ if (TryMark(obj) && GreyToBlack(obj)) {
+ static_cast<ConcreteState*>(this)->IncrementLiveBytes(
+ MemoryChunk::cast(BasicMemoryChunk::FromHeapObject(obj)),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(obj.Size(cage_base())));
+ return true;
+ }
+ return false;
}
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
- MarkBit markbit = MarkBitFrom(chunk, obj.address());
- if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(
- MemoryChunk::cast(chunk),
- ALIGN_TO_ALLOCATION_ALIGNMENT(obj.Size(cage_base())));
- return true;
-}
-
-template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlackUnaccounted(
- HeapObject obj) {
return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
}
diff --git a/deps/v8/src/heap/marking-state.h b/deps/v8/src/heap/marking-state.h
index c197c10243..aefd341dfd 100644
--- a/deps/v8/src/heap/marking-state.h
+++ b/deps/v8/src/heap/marking-state.h
@@ -31,7 +31,7 @@ class MarkingStateBase {
}
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
V8_INLINE PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
@@ -45,25 +45,17 @@ class MarkingStateBase {
// {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(const BasicMemoryChunk* p, Address addr) const;
- V8_INLINE Marking::ObjectColor Color(const HeapObject obj) const;
-
V8_INLINE bool IsImpossible(const HeapObject obj) const;
-
- V8_INLINE bool IsBlack(const HeapObject obj) const;
-
- V8_INLINE bool IsWhite(const HeapObject obj) const;
-
V8_INLINE bool IsGrey(const HeapObject obj) const;
-
V8_INLINE bool IsBlackOrGrey(const HeapObject obj) const;
-
- V8_INLINE bool WhiteToGrey(HeapObject obj);
-
- V8_INLINE bool WhiteToBlack(HeapObject obj);
-
V8_INLINE bool GreyToBlack(HeapObject obj);
- V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj);
+ V8_INLINE bool TryMark(HeapObject obj);
+ // Helper method for fully marking an object and accounting its live bytes.
+ // Should be used to mark individual objects in one-off cases.
+ V8_INLINE bool TryMarkAndAccountLiveBytes(HeapObject obj);
+ V8_INLINE bool IsMarked(const HeapObject obj) const;
+ V8_INLINE bool IsUnmarked(const HeapObject obj) const;
V8_INLINE void ClearLiveness(MemoryChunk* chunk);
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 827b938683..9e873e34ca 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -5,15 +5,20 @@
#ifndef V8_HEAP_MARKING_VISITOR_INL_H_
#define V8_HEAP_MARKING_VISITOR_INL_H_
+#include "src/common/globals.h"
#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/pretenuring-handler-inl.h"
#include "src/heap/progress-bar.h"
#include "src/heap/spaces.h"
+#include "src/objects/descriptor-array.h"
#include "src/objects/objects.h"
+#include "src/objects/property-details.h"
#include "src/objects/smi.h"
+#include "src/objects/string.h"
#include "src/sandbox/external-pointer-inl.h"
namespace v8 {
@@ -29,7 +34,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
SynchronizePageAccess(object);
AddStrongReferenceForReferenceSummarizer(host, object);
- if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
+ if (concrete_visitor()->marking_state()->TryMark(object)) {
local_marking_worklists_->Push(object);
if (V8_UNLIKELY(concrete_visitor()->retaining_path_mode() ==
TraceRetainingPathMode::kEnabled)) {
@@ -98,8 +103,7 @@ MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitPointersImpl(
template <typename ConcreteVisitor, typename MarkingState>
V8_INLINE void
MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodePointerImpl(
- HeapObject host, CodeObjectSlot slot) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ Code host, CodeObjectSlot slot) {
Object object =
slot.Relaxed_Load(ObjectVisitorWithCageBases::code_cage_base());
HeapObject heap_object;
@@ -113,46 +117,47 @@ MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodePointerImpl(
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
- Code host, RelocInfo* rinfo) {
+ RelocInfo* rinfo) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object =
rinfo->target_object(ObjectVisitorWithCageBases::cage_base());
if (!ShouldMarkObject(object)) return;
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
- if (host.IsWeakObject(object)) {
+ if (rinfo->code().IsWeakObject(object)) {
local_weak_objects_->weak_objects_in_code_local.Push(
- std::make_pair(object, host));
- AddWeakReferenceForReferenceSummarizer(host, object);
+ std::make_pair(object, rinfo->code()));
+ AddWeakReferenceForReferenceSummarizer(rinfo->instruction_stream(),
+ object);
} else {
- MarkObject(host, object);
+ MarkObject(rinfo->instruction_stream(), object);
}
}
- concrete_visitor()->RecordRelocSlot(host, rinfo, object);
+ concrete_visitor()->RecordRelocSlot(rinfo, object);
}
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodeTarget(
- Code host, RelocInfo* rinfo) {
+ RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
if (!ShouldMarkObject(target)) return;
- MarkObject(host, target);
- concrete_visitor()->RecordRelocSlot(host, rinfo, target);
+ MarkObject(rinfo->instruction_stream(), target);
+ concrete_visitor()->RecordRelocSlot(rinfo, target);
}
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitExternalPointer(
HeapObject host, ExternalPointerSlot slot, ExternalPointerTag tag) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
- ExternalPointerTable* table = IsSharedExternalPointerType(tag)
- ? shared_external_pointer_table_
- : external_pointer_table_;
- table->Mark(handle, slot.address());
- }
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
+ ExternalPointerTable* table = IsSharedExternalPointerType(tag)
+ ? shared_external_pointer_table_
+ : external_pointer_table_;
+ table->Mark(handle, slot.address());
#endif // V8_ENABLE_SANDBOX
}
@@ -211,11 +216,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
// If bytecode flushing is disabled but baseline code flushing is enabled
// then we have to visit the bytecode but not the baseline code.
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
- CodeT baseline_codet = CodeT::cast(shared_info.function_data(kAcquireLoad));
- // Safe to do a relaxed load here since the CodeT was acquire-loaded.
- Code baseline_code =
- FromCodeT(baseline_codet, ObjectVisitorWithCageBases::code_cage_base(),
- kRelaxedLoad);
+ Code baseline_code = Code::cast(shared_info.function_data(kAcquireLoad));
// Visit the bytecode hanging off baseline code.
VisitPointer(baseline_code,
baseline_code.RawField(
@@ -256,6 +257,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
+ DCHECK(ShouldMarkObject(object));
local_marking_worklists_->Push(object);
}
}
@@ -263,21 +265,24 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
}
template <typename ConcreteVisitor, typename MarkingState>
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArrayRegularly(
+ Map map, FixedArray object) {
+ if (!concrete_visitor()->ShouldVisit(object)) return 0;
+ int size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ concrete_visitor()->VisitMapPointerIfNeeded(object);
+ FixedArray::BodyDescriptor::IterateBody(map, object, size,
+ concrete_visitor());
+ return size;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
Map map, FixedArray object) {
- // Arrays with the progress bar are not left-trimmable because they reside
- // in the large object space.
ProgressBar& progress_bar =
MemoryChunk::FromHeapObject(object)->ProgressBar();
return CanUpdateValuesInHeap() && progress_bar.IsEnabled()
? VisitFixedArrayWithProgressBar(map, object, progress_bar)
- : concrete_visitor()->VisitLeftTrimmableArray(map, object);
-}
-
-template <typename ConcreteVisitor, typename MarkingState>
-int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedDoubleArray(
- Map map, FixedDoubleArray object) {
- return concrete_visitor()->VisitLeftTrimmableArray(map, object);
+ : VisitFixedArrayRegularly(map, object);
}
// ===========================================================================
@@ -302,15 +307,8 @@ inline int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
requires_snapshot &&
local_marking_worklists_->ExtractWrapper(map, object, wrapper_snapshot);
const int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
- if (size) {
- if (valid_snapshot) {
- // Success: The object needs to be processed for embedder references.
- local_marking_worklists_->PushExtractedWrapper(wrapper_snapshot);
- } else if (!requires_snapshot) {
- // Snapshot not supported. Just fall back to pushing the wrapper itself
- // instead which will be processed on the main thread.
- local_marking_worklists_->PushWrapper(object);
- }
+ if (size && valid_snapshot) {
+ local_marking_worklists_->PushExtractedWrapper(wrapper_snapshot);
}
return size;
}
@@ -321,7 +319,7 @@ int MarkingVisitorBase<ConcreteVisitor,
MarkingState>::VisitEmbedderTracingSubclass(Map map,
T object) {
DCHECK(object.MayHaveEmbedderFields());
- if (V8_LIKELY(is_embedder_tracing_enabled_)) {
+ if (V8_LIKELY(trace_embedder_fields_)) {
return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
}
return VisitEmbedderTracingSubClassNoEmbedderTracing(map, object);
@@ -341,8 +339,9 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSArrayBuffer(
}
template <typename ConcreteVisitor, typename MarkingState>
-int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSDataView(
- Map map, JSDataView object) {
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitJSDataViewOrRabGsabDataView(Map map,
+ JSDataViewOrRabGsabDataView object) {
return VisitEmbedderTracingSubclass(map, object);
}
@@ -374,7 +373,11 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
ObjectSlot value_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
- if (!ShouldMarkObject(key) ||
+ // Objects in the shared heap are prohibited from being used as keys in
+ // WeakMaps and WeakSets and therefore cannot be ephemeron keys. See also
+ // MarkCompactCollector::ProcessEphemeron.
+ DCHECK(!key.InWritableSharedSpace());
+ if (key.InReadOnlySpace() ||
concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
@@ -390,7 +393,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
- if (concrete_visitor()->marking_state()->IsWhite(value)) {
+ if (concrete_visitor()->marking_state()->IsUnmarked(value)) {
local_weak_objects_->discovered_ephemerons_local.Push(
Ephemeron{key, value});
}
@@ -408,7 +411,8 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSWeakRef(
if (weak_ref.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_ref.target());
SynchronizePageAccess(target);
- if (concrete_visitor()->marking_state()->IsBlackOrGrey(target)) {
+ if (target.InReadOnlySpace() ||
+ concrete_visitor()->marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
@@ -435,8 +439,10 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
HeapObject unregister_token = weak_cell.relaxed_unregister_token();
SynchronizePageAccess(target);
SynchronizePageAccess(unregister_token);
- if (concrete_visitor()->marking_state()->IsBlackOrGrey(target) &&
- concrete_visitor()->marking_state()->IsBlackOrGrey(unregister_token)) {
+ if ((target.InReadOnlySpace() ||
+ concrete_visitor()->marking_state()->IsBlackOrGrey(target)) &&
+ (unregister_token.InReadOnlySpace() ||
+ concrete_visitor()->marking_state()->IsBlackOrGrey(unregister_token))) {
// Record the slots inside the WeakCell, since the IterateBody above
// didn't visit it.
ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
@@ -459,51 +465,56 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
-int MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
- DescriptorArray descriptors) {
- concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
- if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
- VisitMapPointer(descriptors);
- VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
- descriptors.GetDescriptorSlot(0));
- return DescriptorArray::BodyDescriptor::SizeOf(descriptors.map(),
- descriptors);
- }
- return 0;
-}
-
-template <typename ConcreteVisitor, typename MarkingState>
-void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptors(
- DescriptorArray descriptor_array, int number_of_own_descriptors) {
- int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
- int16_t old_marked = 0;
- if (CanUpdateValuesInHeap()) {
- old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
- mark_compact_epoch_, new_marked);
- }
- if (old_marked < new_marked) {
- VisitPointers(
- descriptor_array,
- MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
- MaybeObjectSlot(descriptor_array.GetDescriptorSlot(new_marked)));
- }
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitDescriptorArrayStrongly(Map map, DescriptorArray array) {
+ concrete_visitor()->ShouldVisit(array);
+ this->VisitMapPointer(array);
+ int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
+ VisitPointers(array, array.GetFirstPointerSlot(), array.GetDescriptorSlot(0));
+ VisitPointers(
+ array, MaybeObjectSlot(array.GetDescriptorSlot(0)),
+ MaybeObjectSlot(array.GetDescriptorSlot(array.number_of_descriptors())));
+ return size;
}
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorArray(
Map map, DescriptorArray array) {
- if (!concrete_visitor()->ShouldVisit(array)) return 0;
- this->VisitMapPointer(array);
- int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
- VisitPointers(array, array.GetFirstPointerSlot(), array.GetDescriptorSlot(0));
- VisitDescriptors(array, array.number_of_descriptors());
- return size;
+ if (!CanUpdateValuesInHeap()) {
+ // If we cannot update the values in the heap, we just treat the array
+ // strongly.
+ return VisitDescriptorArrayStrongly(map, array);
+ }
+
+ // The markbit is not used anymore. This is different from a checked
+ // transition in that the array is re-added to the worklist and thus there's
+ // many invocations of this transition. All cases (roots, marking via map,
+ // write barrier) are handled here as they all update the state accordingly.
+ concrete_visitor()->marking_state()->GreyToBlack(array);
+ const auto [start, end] =
+ DescriptorArrayMarkingState::AcquireDescriptorRangeToMark(
+ mark_compact_epoch_, array);
+ if (start != end) {
+ DCHECK_LT(start, end);
+ VisitPointers(array, MaybeObjectSlot(array.GetDescriptorSlot(start)),
+ MaybeObjectSlot(array.GetDescriptorSlot(end)));
+ if (start == 0) {
+ // We are processing the object the first time. Visit the header and
+ // return a size for accounting.
+ int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
+ VisitPointers(array, array.GetFirstPointerSlot(),
+ array.GetDescriptorSlot(0));
+ concrete_visitor()->VisitMapPointerIfNeeded(array);
+ return size;
+ }
+ }
+ return 0;
}
template <typename ConcreteVisitor, typename MarkingState>
-int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
+void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
Map map) {
- if (!map.CanTransition()) return 0;
+ if (!CanUpdateValuesInHeap() || !map.CanTransition()) return;
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
@@ -512,7 +523,6 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
-
Object maybe_descriptors =
TaggedField<Object, Map::kInstanceDescriptorsOffset>::Acquire_Load(
heap_->isolate(), map);
@@ -521,19 +531,19 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
// deserialized, and doesn't yet have an initialized descriptor field.
if (maybe_descriptors.IsSmi()) {
DCHECK_EQ(maybe_descriptors, Smi::uninitialized_deserialization_value());
- return 0;
+ return;
}
DescriptorArray descriptors = DescriptorArray::cast(maybe_descriptors);
-
- // Don't do any special processing of strong descriptor arrays, let them get
- // marked through the normal visitor mechanism.
- if (descriptors.IsStrongDescriptorArray()) {
- return 0;
+ // Normal processing of descriptor arrays through the pointers iteration that
+ // follows this call:
+ // - Array in read only space;
+ // - StrongDescriptor array;
+ if (descriptors.InReadOnlySpace() || descriptors.IsStrongDescriptorArray()) {
+ return;
}
- SynchronizePageAccess(descriptors);
- int size = MarkDescriptorArrayBlack(descriptors);
- int number_of_own_descriptors = map.NumberOfOwnDescriptors();
+
+ const int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
// It is possible that the concurrent marker observes the
// number_of_own_descriptors out of sync with the descriptors. In that
@@ -541,12 +551,14 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
// that all required descriptors are marked. The concurrent marker
// just should avoid crashing in that case. That's why we need the
// std::min<int>() below.
- VisitDescriptors(descriptors,
- std::min<int>(number_of_own_descriptors,
- descriptors.number_of_descriptors()));
+ const auto descriptors_to_mark = std::min<int>(
+ number_of_own_descriptors, descriptors.number_of_descriptors());
+ concrete_visitor()->marking_state()->TryMark(descriptors);
+ if (DescriptorArrayMarkingState::TryUpdateIndicesToMark(
+ mark_compact_epoch_, descriptors, descriptors_to_mark)) {
+ local_marking_worklists_->Push(descriptors);
+ }
}
-
- return size;
}
template <typename ConcreteVisitor, typename MarkingState>
@@ -554,7 +566,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitMap(Map meta_map,
Map map) {
if (!concrete_visitor()->ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
- size += VisitDescriptorsForMap(map);
+ VisitDescriptorsForMap(map);
// Mark the pointer fields of the Map. If there is a transitions array, it has
// been marked already, so it is fine that one of these fields contains a
@@ -579,23 +591,100 @@ YoungGenerationMarkingVisitorBase<ConcreteVisitor, MarkingState>::
YoungGenerationMarkingVisitorBase(Isolate* isolate,
MarkingWorklists::Local* worklists_local)
: NewSpaceVisitor<ConcreteVisitor>(isolate),
- worklists_local_(worklists_local) {}
+ worklists_local_(worklists_local),
+ pretenuring_handler_(isolate->heap()->pretenuring_handler()),
+ local_pretenuring_feedback_(
+ PretenuringHandler::kInitialFeedbackCapacity) {}
+
+template <typename ConcreteVisitor, typename MarkingState>
+template <typename T>
+int YoungGenerationMarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitEmbedderTracingSubClassWithEmbedderTracing(Map map, T object) {
+ const int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
+ if (!worklists_local_->SupportsExtractWrapper()) return size;
+ MarkingWorklists::Local::WrapperSnapshot wrapper_snapshot;
+ const bool valid_snapshot =
+ worklists_local_->ExtractWrapper(map, object, wrapper_snapshot);
+ if (size && valid_snapshot) {
+ // Success: The object needs to be processed for embedder references.
+ worklists_local_->PushExtractedWrapper(wrapper_snapshot);
+ }
+ return size;
+}
template <typename ConcreteVisitor, typename MarkingState>
int YoungGenerationMarkingVisitorBase<
ConcreteVisitor, MarkingState>::VisitJSArrayBuffer(Map map,
JSArrayBuffer object) {
- if (!concrete_visitor()->ShouldVisit(object)) return 0;
object.YoungMarkExtension();
- int size = JSArrayBuffer::BodyDescriptor::SizeOf(map, object);
- JSArrayBuffer::BodyDescriptor::IterateBody(map, object, size, this);
- return size;
+ return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+int YoungGenerationMarkingVisitorBase<
+ ConcreteVisitor, MarkingState>::VisitJSApiObject(Map map, JSObject object) {
+ return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+int YoungGenerationMarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitJSDataViewOrRabGsabDataView(Map map,
+ JSDataViewOrRabGsabDataView object) {
+ return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+int YoungGenerationMarkingVisitorBase<
+ ConcreteVisitor, MarkingState>::VisitJSTypedArray(Map map,
+ JSTypedArray object) {
+ return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+int YoungGenerationMarkingVisitorBase<
+ ConcreteVisitor, MarkingState>::VisitJSObject(Map map, JSObject object) {
+ int result = NewSpaceVisitor<ConcreteVisitor>::VisitJSObject(map, object);
+ DCHECK_LT(0, result);
+ pretenuring_handler_->UpdateAllocationSite(map, object,
+ &local_pretenuring_feedback_);
+ return result;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+int YoungGenerationMarkingVisitorBase<
+ ConcreteVisitor, MarkingState>::VisitJSObjectFast(Map map,
+ JSObject object) {
+ int result = NewSpaceVisitor<ConcreteVisitor>::VisitJSObjectFast(map, object);
+ DCHECK_LT(0, result);
+ pretenuring_handler_->UpdateAllocationSite(map, object,
+ &local_pretenuring_feedback_);
+ return result;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+template <typename T, typename TBodyDescriptor>
+int YoungGenerationMarkingVisitorBase<
+ ConcreteVisitor, MarkingState>::VisitJSObjectSubclass(Map map, T object) {
+ int result = NewSpaceVisitor<ConcreteVisitor>::template VisitJSObjectSubclass<
+ T, TBodyDescriptor>(map, object);
+ DCHECK_LT(0, result);
+ pretenuring_handler_->UpdateAllocationSite(map, object,
+ &local_pretenuring_feedback_);
+ return result;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+void YoungGenerationMarkingVisitorBase<ConcreteVisitor,
+ MarkingState>::Finalize() {
+ pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
+ local_pretenuring_feedback_);
+ local_pretenuring_feedback_.clear();
}
template <typename ConcreteVisitor, typename MarkingState>
void YoungGenerationMarkingVisitorBase<ConcreteVisitor, MarkingState>::
MarkObjectViaMarkingWorklist(HeapObject object) {
- if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
+ if (concrete_visitor()->marking_state()->TryMark(object)) {
worklists_local_->Push(object);
}
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 3e6937bb41..f2c069e5a6 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -6,11 +6,14 @@
#define V8_HEAP_MARKING_VISITOR_H_
#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/heap/marking-state.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/spaces.h"
#include "src/heap/weak-object-worklists.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
@@ -29,12 +32,9 @@ struct EphemeronMarking {
// - ConcreteVisitor::retaining_path_mode method,
// - ConcreteVisitor::RecordSlot method,
// - ConcreteVisitor::RecordRelocSlot method,
-// - ConcreteVisitor::VisitJSObjectSubclass method,
-// - ConcreteVisitor::VisitLeftTrimmableArray method.
// These methods capture the difference between the concurrent and main thread
// marking visitors. For example, the concurrent visitor has to use the
// snapshotting protocol to visit JSObject and left-trimmable FixedArrays.
-
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
@@ -42,7 +42,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
WeakObjects::Local* local_weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool is_embedder_tracing_enabled,
+ bool trace_embedder_fields,
bool should_keep_ages_unchanged)
: HeapVisitor<int, ConcreteVisitor>(heap),
local_marking_worklists_(local_marking_worklists),
@@ -50,9 +50,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
heap_(heap),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
- is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
+ trace_embedder_fields_(trace_embedder_fields),
should_keep_ages_unchanged_(should_keep_ages_unchanged),
- should_mark_shared_heap_(heap->ShouldMarkSharedHeap())
+ should_mark_shared_heap_(heap->isolate()->is_shared_space_isolate())
#ifdef V8_ENABLE_SANDBOX
,
external_pointer_table_(&heap->isolate()->external_pointer_table()),
@@ -63,13 +63,14 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
+ V8_INLINE int VisitDescriptorArrayStrongly(Map map, DescriptorArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
V8_INLINE int VisitFixedArray(Map map, FixedArray object);
- V8_INLINE int VisitFixedDoubleArray(Map map, FixedDoubleArray object);
V8_INLINE int VisitJSApiObject(Map map, JSObject object);
V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
- V8_INLINE int VisitJSDataView(Map map, JSDataView object);
+ V8_INLINE int VisitJSDataViewOrRabGsabDataView(
+ Map map, JSDataViewOrRabGsabDataView object);
V8_INLINE int VisitJSFunction(Map map, JSFunction object);
V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
V8_INLINE int VisitJSWeakRef(Map map, JSWeakRef object);
@@ -81,8 +82,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// ObjectVisitor overrides.
void VisitMapPointer(HeapObject host) final {
Map map = host.map(ObjectVisitorWithCageBases::cage_base());
- MarkObject(host, map);
- concrete_visitor()->RecordSlot(host, host.map_slot(), map);
+ ProcessStrongHeapObject(host, host.map_slot(), map);
}
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
@@ -98,11 +98,11 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
MaybeObjectSlot end) final {
VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
+ V8_INLINE void VisitCodePointer(Code host, CodeObjectSlot slot) final {
VisitCodePointerImpl(host, slot);
}
- V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
- V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitEmbeddedPointer(RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(RelocInfo* rinfo) final;
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
// Weak list pointers should be ignored during marking. The lists are
@@ -120,8 +120,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
}
bool ShouldMarkObject(HeapObject object) const {
+ if (object.InReadOnlySpace()) return false;
if (should_mark_shared_heap_) return true;
- return !object.InSharedHeap();
+ return !object.InAnySharedSpace();
}
// Marks the object grey and pushes it on the marking work list.
@@ -146,12 +147,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Similar to VisitPointersImpl() but using code cage base for loading from
// the slot.
- V8_INLINE void VisitCodePointerImpl(HeapObject host, CodeObjectSlot slot);
-
- V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
- int number_of_own_descriptors);
+ V8_INLINE void VisitCodePointerImpl(Code host, CodeObjectSlot slot);
- V8_INLINE int VisitDescriptorsForMap(Map map);
+ V8_INLINE void VisitDescriptorsForMap(Map map);
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
@@ -162,10 +160,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
ProgressBar& progress_bar);
- // Marks the descriptor array black without pushing it on the marking work
- // list and visits its header. Returns the size of the descriptor array
- // if it was successully marked as black.
- V8_INLINE int MarkDescriptorArrayBlack(DescriptorArray descriptors);
+ V8_INLINE int VisitFixedArrayRegularly(Map map, FixedArray object);
V8_INLINE void AddStrongReferenceForReferenceSummarizer(HeapObject host,
HeapObject obj) {
@@ -189,7 +184,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
Heap* const heap_;
const unsigned mark_compact_epoch_;
const base::EnumSet<CodeFlushMode> code_flush_mode_;
- const bool is_embedder_tracing_enabled_;
+ const bool trace_embedder_fields_;
const bool should_keep_ages_unchanged_;
const bool should_mark_shared_heap_;
#ifdef V8_ENABLE_SANDBOX
@@ -205,6 +200,10 @@ class YoungGenerationMarkingVisitorBase
YoungGenerationMarkingVisitorBase(Isolate* isolate,
MarkingWorklists::Local* worklists_local);
+ ~YoungGenerationMarkingVisitorBase() override {
+ DCHECK(local_pretenuring_feedback_.empty());
+ }
+
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
VisitPointersImpl(host, start, end);
@@ -215,12 +214,11 @@ class YoungGenerationMarkingVisitorBase
VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitCodePointer(HeapObject host,
- CodeObjectSlot slot) override {
+ V8_INLINE void VisitCodePointer(Code host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Code slots never appear in new space because CodeDataContainers, the
- // only object that can contain code pointers, are always allocated in
- // the old space.
+ // InstructionStream slots never appear in new space because
+ // Code objects, the only object that can contain code pointers, are
+ // always allocated in the old space.
UNREACHABLE();
}
@@ -232,23 +230,37 @@ class YoungGenerationMarkingVisitorBase
VisitPointerImpl(host, slot);
}
- V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ V8_INLINE void VisitCodeTarget(RelocInfo* rinfo) final {
// Code objects are not expected in new space.
UNREACHABLE();
}
- V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ V8_INLINE void VisitEmbeddedPointer(RelocInfo* rinfo) final {
// Code objects are not expected in new space.
UNREACHABLE();
}
+ V8_INLINE int VisitJSApiObject(Map map, JSObject object);
V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
+ V8_INLINE int VisitJSDataViewOrRabGsabDataView(
+ Map map, JSDataViewOrRabGsabDataView object);
+ V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
+
+ V8_INLINE int VisitJSObject(Map map, JSObject object);
+ V8_INLINE int VisitJSObjectFast(Map map, JSObject object);
+ template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
+ V8_INLINE int VisitJSObjectSubclass(Map map, T object);
+
+ V8_INLINE void Finalize();
protected:
ConcreteVisitor* concrete_visitor() {
return static_cast<ConcreteVisitor*>(this);
}
+ template <typename T>
+ int VisitEmbedderTracingSubClassWithEmbedderTracing(Map map, T object);
+
inline void MarkObjectViaMarkingWorklist(HeapObject object);
private:
@@ -263,6 +275,8 @@ class YoungGenerationMarkingVisitorBase
void VisitPointerImpl(HeapObject host, TSlot slot);
MarkingWorklists::Local* worklists_local_;
+ PretenuringHandler* const pretenuring_handler_;
+ PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/marking-worklist-inl.h b/deps/v8/src/heap/marking-worklist-inl.h
index 6024a3e46a..b34f83ec50 100644
--- a/deps/v8/src/heap/marking-worklist-inl.h
+++ b/deps/v8/src/heap/marking-worklist-inl.h
@@ -18,7 +18,6 @@ template <typename Callback>
void MarkingWorklists::Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
- wrapper_.Update(callback);
other_.Update(callback);
for (auto& cw : context_worklists_) {
cw.worklist->Update(callback);
@@ -59,16 +58,6 @@ void MarkingWorklists::Local::PushExtractedWrapper(
cpp_marking_state_->MarkAndPush(snapshot);
}
-void MarkingWorklists::Local::PushWrapper(HeapObject object) {
- DCHECK_NULL(cpp_marking_state_);
- wrapper_.Push(object);
-}
-
-bool MarkingWorklists::Local::PopWrapper(HeapObject* object) {
- DCHECK_NULL(cpp_marking_state_);
- return wrapper_.Pop(object);
-}
-
Address MarkingWorklists::Local::SwitchToContext(Address context) {
if (context == active_context_) return context;
return SwitchToContextSlow(context);
diff --git a/deps/v8/src/heap/marking-worklist.cc b/deps/v8/src/heap/marking-worklist.cc
index d05540e2f8..a035155b4a 100644
--- a/deps/v8/src/heap/marking-worklist.cc
+++ b/deps/v8/src/heap/marking-worklist.cc
@@ -24,7 +24,6 @@ namespace internal {
void MarkingWorklists::Clear() {
shared_.Clear();
on_hold_.Clear();
- wrapper_.Clear();
other_.Clear();
for (auto& cw : context_worklists_) {
cw.worklist->Clear();
@@ -107,7 +106,6 @@ MarkingWorklists::Local::Local(
: active_(&shared_),
shared_(*global->shared()),
on_hold_(*global->on_hold()),
- wrapper_(*global->wrapper()),
active_context_(kSharedContext),
is_per_context_mode_(!global->context_worklists().empty()),
worklist_by_context_(
@@ -118,7 +116,6 @@ MarkingWorklists::Local::Local(
void MarkingWorklists::Local::Publish() {
shared_.Publish();
on_hold_.Publish();
- wrapper_.Publish();
other_.Publish();
if (is_per_context_mode_) {
for (auto& cw : worklist_by_context_) {
@@ -153,11 +150,7 @@ bool MarkingWorklists::Local::IsEmpty() {
}
bool MarkingWorklists::Local::IsWrapperEmpty() const {
- if (cpp_marking_state_) {
- DCHECK(wrapper_.IsLocalAndGlobalEmpty());
- return cpp_marking_state_->IsLocalEmpty();
- }
- return wrapper_.IsLocalAndGlobalEmpty();
+ return !cpp_marking_state_ || cpp_marking_state_->IsLocalEmpty();
}
void MarkingWorklists::Local::ShareWork() {
diff --git a/deps/v8/src/heap/marking-worklist.h b/deps/v8/src/heap/marking-worklist.h
index b2c6fc297e..9a37372c21 100644
--- a/deps/v8/src/heap/marking-worklist.h
+++ b/deps/v8/src/heap/marking-worklist.h
@@ -25,7 +25,6 @@ class JSObject;
const int kMainThreadTask = 0;
using MarkingWorklist = ::heap::base::Worklist<HeapObject, 64>;
-using WrapperTracingWorklist = ::heap::base::Worklist<HeapObject, 16>;
// We piggyback on marking to compute object sizes per native context that is
// needed for the new memory measurement API. The algorithm works as follows:
@@ -91,7 +90,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists final {
MarkingWorklist* shared() { return &shared_; }
MarkingWorklist* on_hold() { return &on_hold_; }
MarkingWorklist* other() { return &other_; }
- WrapperTracingWorklist* wrapper() { return &wrapper_; }
// A list of (context, worklist) pairs that was set up at the start of
// marking by CreateContextWorklists.
@@ -122,11 +120,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists final {
// for freshly allocatd objects.
MarkingWorklist on_hold_;
- // Worklist for objects that potentially require embedder tracing, i.e.,
- // these objects need to be handed over to the embedder to find the full
- // transitive closure.
- WrapperTracingWorklist wrapper_;
-
// Per-context worklists. Objects are in the `shared_` worklist by default.
std::vector<ContextWorklistPair> context_worklists_;
// Worklist used for objects that are attributed to contexts that are
@@ -166,8 +159,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local final {
WrapperSnapshot& snapshot);
inline void PushExtractedWrapper(const WrapperSnapshot& snapshot);
inline bool SupportsExtractWrapper();
- inline void PushWrapper(HeapObject object);
- inline bool PopWrapper(HeapObject* object);
void Publish();
bool IsEmpty();
@@ -205,7 +196,6 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local final {
MarkingWorklist::Local* active_;
MarkingWorklist::Local shared_;
MarkingWorklist::Local on_hold_;
- WrapperTracingWorklist::Local wrapper_;
Address active_context_;
const bool is_per_context_mode_;
const std::unordered_map<Address, std::unique_ptr<MarkingWorklist::Local>>
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 288527809e..faaf6554fe 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -356,7 +356,6 @@ class Marking : public AllStatic {
// ATOMIC as soon we add concurrency.
// Impossible markbits: 01
- static const char* kImpossibleBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsImpossible(MarkBit mark_bit) {
if (mode == AccessMode::NON_ATOMIC) {
@@ -374,14 +373,12 @@ class Marking : public AllStatic {
}
// Black markbits: 11
- static const char* kBlackBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsBlack(MarkBit mark_bit) {
return mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
// White markbits: 00 - this is required by the mark bit clearer.
- static const char* kWhiteBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsWhite(MarkBit mark_bit) {
DCHECK(!IsImpossible<mode>(mark_bit));
@@ -389,7 +386,6 @@ class Marking : public AllStatic {
}
// Grey markbits: 10
- static const char* kGreyBitPattern;
template <AccessMode mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsGrey(MarkBit mark_bit) {
return mark_bit.Get<mode>() && !mark_bit.Next().Get<mode>();
@@ -433,34 +429,6 @@ class Marking : public AllStatic {
return markbit.Get<mode>() && markbit.Next().Set<mode>();
}
- enum ObjectColor {
- BLACK_OBJECT,
- WHITE_OBJECT,
- GREY_OBJECT,
- IMPOSSIBLE_COLOR
- };
-
- static const char* ColorName(ObjectColor color) {
- switch (color) {
- case BLACK_OBJECT:
- return "black";
- case WHITE_OBJECT:
- return "white";
- case GREY_OBJECT:
- return "grey";
- case IMPOSSIBLE_COLOR:
- return "impossible";
- }
- return "error";
- }
-
- static ObjectColor Color(MarkBit mark_bit) {
- if (IsBlack(mark_bit)) return BLACK_OBJECT;
- if (IsWhite(mark_bit)) return WHITE_OBJECT;
- if (IsGrey(mark_bit)) return GREY_OBJECT;
- UNREACHABLE();
- }
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
};
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index dd9afbdfa6..69284078b0 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -247,7 +247,8 @@ void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
Address MemoryAllocator::AllocateAlignedMemory(
size_t chunk_size, size_t area_size, size_t alignment,
- Executability executable, void* hint, VirtualMemory* controller) {
+ AllocationSpace space, Executability executable, void* hint,
+ VirtualMemory* controller) {
v8::PageAllocator* page_allocator = this->page_allocator(executable);
DCHECK_LT(area_size, chunk_size);
@@ -270,17 +271,16 @@ Address MemoryAllocator::AllocateAlignedMemory(
Address base = reservation.address();
if (executable == EXECUTABLE) {
- const size_t aligned_area_size = ::RoundUp(area_size, GetCommitPageSize());
- if (!SetPermissionsOnExecutableMemoryChunk(&reservation, base,
- aligned_area_size, chunk_size)) {
+ if (!SetPermissionsOnExecutableMemoryChunk(&reservation, base, area_size,
+ chunk_size)) {
return HandleAllocationFailure(executable);
}
} else {
// No guard page between page header and object area. This allows us to make
// all OS pages for both regions readable+writable at once.
- const size_t commit_size =
- ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
- GetCommitPageSize());
+ const size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space) + area_size,
+ GetCommitPageSize());
if (reservation.SetPermissions(base, commit_size,
PageAllocator::kReadWrite)) {
@@ -306,6 +306,7 @@ Address MemoryAllocator::HandleAllocationFailure(Executability executable) {
}
size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
+ AllocationSpace space,
Executability executable) {
if (executable == EXECUTABLE) {
//
@@ -340,31 +341,35 @@ size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
//
DCHECK_EQ(executable, NOT_EXECUTABLE);
- return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
- GetCommitPageSize());
+ return ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space) + area_size,
+ GetCommitPageSize());
}
base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
-MemoryAllocator::AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
- Executability executable,
- PageSize page_size) {
-#ifdef V8_COMPRESS_POINTERS
+MemoryAllocator::AllocateUninitializedChunkAt(BaseSpace* space,
+ size_t area_size,
+ Executability executable,
+ Address hint,
+ PageSize page_size) {
+#ifndef V8_COMPRESS_POINTERS
// When pointer compression is enabled, spaces are expected to be at a
// predictable address (see mkgrokdump) so we don't supply a hint and rely on
// the deterministic behaviour of the BoundedPageAllocator.
- void* address_hint = nullptr;
-#else
- void* address_hint = AlignedAddress(isolate_->heap()->GetRandomMmapAddr(),
- MemoryChunk::kAlignment);
+ if (hint == kNullAddress) {
+ hint = reinterpret_cast<Address>(AlignedAddress(
+ isolate_->heap()->GetRandomMmapAddr(), MemoryChunk::kAlignment));
+ }
#endif
VirtualMemory reservation;
- size_t chunk_size = ComputeChunkSize(area_size, executable);
+ size_t chunk_size =
+ ComputeChunkSize(area_size, space->identity(), executable);
DCHECK_EQ(chunk_size % GetCommitPageSize(), 0);
- Address base =
- AllocateAlignedMemory(chunk_size, area_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
+ Address base = AllocateAlignedMemory(
+ chunk_size, area_size, MemoryChunk::kAlignment, space->identity(),
+ executable, reinterpret_cast<void*>(hint), &reservation);
if (base == kNullAddress) return {};
size_ += reservation.size();
@@ -385,9 +390,11 @@ MemoryAllocator::AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
} else {
DCHECK_EQ(executable, NOT_EXECUTABLE);
// Zap both page header and object area at once. No guard page in-between.
- ZapBlock(base,
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
- kZapValue);
+ ZapBlock(
+ base,
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity()) +
+ area_size,
+ kZapValue);
}
}
@@ -423,8 +430,8 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
DCHECK(isolate_->RequiresCodeRange());
reservation->DiscardSystemPages(chunk->area_end(), page_size);
} else {
- reservation->SetPermissions(chunk->area_end(), page_size,
- PageAllocator::kNoAccess);
+ CHECK(reservation->SetPermissions(chunk->area_end(), page_size,
+ PageAllocator::kNoAccess));
}
}
// On e.g. Windows, a reservation may be larger than a page and releasing
@@ -587,12 +594,13 @@ Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
return page;
}
-ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space) {
+ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space,
+ Address hint) {
DCHECK_EQ(space->identity(), RO_SPACE);
size_t size = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE);
base::Optional<MemoryChunkAllocationResult> chunk_info =
- AllocateUninitializedChunk(space, size, NOT_EXECUTABLE,
- PageSize::kRegular);
+ AllocateUninitializedChunkAt(space, size, NOT_EXECUTABLE, hint,
+ PageSize::kRegular);
if (!chunk_info) return nullptr;
return new (chunk_info->start) ReadOnlyPage(
isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
@@ -685,17 +693,24 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
size_t chunk_size) {
const size_t page_size = GetCommitPageSize();
+ // The code area starts at an offset on the first page. To calculate the page
+ // aligned size of the area, we have to add that offset and then round up to
+ // commit page size.
+ size_t area_offset = MemoryChunkLayout::ObjectStartOffsetInCodePage() -
+ MemoryChunkLayout::ObjectPageOffsetInCodePage();
+ size_t aligned_area_size = RoundUp(area_offset + area_size, page_size);
+
// All addresses and sizes must be aligned to the commit page size.
DCHECK(IsAligned(start, page_size));
- DCHECK_EQ(0, area_size % page_size);
DCHECK_EQ(0, chunk_size % page_size);
const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
const size_t code_area_offset =
- MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ MemoryChunkLayout::ObjectPageOffsetInCodePage();
- DCHECK_EQ(pre_guard_offset + guard_size + area_size + guard_size, chunk_size);
+ DCHECK_EQ(pre_guard_offset + guard_size + aligned_area_size + guard_size,
+ chunk_size);
const Address pre_guard_page = start + pre_guard_offset;
const Address code_area = start + code_area_offset;
@@ -713,15 +728,15 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
// Create the pre-code guard page, following the header.
if (vm->DiscardSystemPages(pre_guard_page, page_size)) {
// Commit the executable code body.
- if (vm->RecommitPages(code_area, area_size,
+ if (vm->RecommitPages(code_area, aligned_area_size,
PageAllocator::kReadWriteExecute)) {
// Create the post-code guard page.
if (vm->DiscardSystemPages(post_guard_page, page_size)) {
- UpdateAllocatedSpaceLimits(start, code_area + area_size);
+ UpdateAllocatedSpaceLimits(start, code_area + aligned_area_size);
return true;
}
- vm->DiscardSystemPages(code_area, area_size);
+ vm->DiscardSystemPages(code_area, aligned_area_size);
}
}
vm->DiscardSystemPages(start, pre_guard_offset);
@@ -738,7 +753,7 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
bool set_permission_successed = false;
#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
if (!jitless && RwxMemoryWriteScope::IsSupported()) {
- base::AddressRegion region(code_area, area_size);
+ base::AddressRegion region(code_area, aligned_area_size);
set_permission_successed =
base::MemoryProtectionKey::SetPermissionsAndKey(
code_page_allocator_, region,
@@ -748,7 +763,7 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
#endif
{
set_permission_successed = vm->SetPermissions(
- code_area, area_size,
+ code_area, aligned_area_size,
jitless ? PageAllocator::kReadWrite
: MemoryChunk::GetCodeModificationPermission());
}
@@ -756,32 +771,35 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
- UpdateAllocatedSpaceLimits(start, code_area + area_size);
+ UpdateAllocatedSpaceLimits(start, code_area + aligned_area_size);
return true;
}
- vm->SetPermissions(code_area, area_size, PageAllocator::kNoAccess);
+ CHECK(vm->SetPermissions(code_area, aligned_area_size,
+ PageAllocator::kNoAccess));
}
}
- vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
+ CHECK(vm->SetPermissions(start, pre_guard_offset,
+ PageAllocator::kNoAccess));
}
}
return false;
}
+// static
const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
- Address addr) const {
- base::MutexGuard guard(&pages_mutex_);
+ const NormalPagesSet& normal_pages, const LargePagesSet& large_pages,
+ Address addr) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(addr);
- if (auto it = normal_pages_.find(static_cast<Page*>(chunk));
- it != normal_pages_.end()) {
+ if (auto it = normal_pages.find(static_cast<Page*>(chunk));
+ it != normal_pages.end()) {
// The chunk is a normal page.
DCHECK_LE(chunk->address(), addr);
if (chunk->Contains(addr)) return *it;
- } else if (auto it = large_pages_.upper_bound(static_cast<LargePage*>(chunk));
- it != large_pages_.begin()) {
+ } else if (auto it = large_pages.upper_bound(static_cast<LargePage*>(chunk));
+ it != large_pages.begin()) {
// The chunk could be inside a large page.
- DCHECK_IMPLIES(it != large_pages_.end(), addr < (*it)->address());
+ DCHECK_IMPLIES(it != large_pages.end(), addr < (*it)->address());
auto* large_page = *std::next(it, -1);
DCHECK_NOT_NULL(large_page);
DCHECK_LE(large_page->address(), addr);
@@ -791,6 +809,14 @@ const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
return nullptr;
}
+const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
+ Address addr) const {
+ // All threads should be either parked or in a safepoint whenever this method
+ // is called, thus pages cannot be allocated or freed at the same time and a
+ // mutex is not required here,
+ return LookupChunkContainingAddress(normal_pages_, large_pages_, addr);
+}
+
void MemoryAllocator::RecordNormalPageCreated(const Page& page) {
base::MutexGuard guard(&pages_mutex_);
auto result = normal_pages_.insert(&page);
@@ -800,6 +826,8 @@ void MemoryAllocator::RecordNormalPageCreated(const Page& page) {
void MemoryAllocator::RecordNormalPageDestroyed(const Page& page) {
base::MutexGuard guard(&pages_mutex_);
+ DCHECK_IMPLIES(v8_flags.minor_mc && isolate_->heap()->sweeping_in_progress(),
+ isolate_->heap()->tracer()->IsInAtomicPause());
auto size = normal_pages_.erase(&page);
USE(size);
DCHECK_EQ(1u, size);
@@ -814,6 +842,8 @@ void MemoryAllocator::RecordLargePageCreated(const LargePage& page) {
void MemoryAllocator::RecordLargePageDestroyed(const LargePage& page) {
base::MutexGuard guard(&pages_mutex_);
+ DCHECK_IMPLIES(v8_flags.minor_mc && isolate_->heap()->sweeping_in_progress(),
+ isolate_->heap()->tracer()->IsInAtomicPause());
auto size = large_pages_.erase(&page);
USE(size);
DCHECK_EQ(1u, size);
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index ed6e4c82fa..f831cc77d3 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -7,9 +7,9 @@
#include <atomic>
#include <memory>
-#include <unordered_map>
+#include <set>
#include <unordered_set>
-#include <vector>
+#include <utility>
#include "include/v8-platform.h"
#include "src/base/bounded-page-allocator.h"
@@ -28,6 +28,10 @@
namespace v8 {
namespace internal {
+namespace heap {
+class TestMemoryAllocatorScope;
+} // namespace heap
+
class Heap;
class Isolate;
class ReadOnlyPage;
@@ -38,6 +42,9 @@ class ReadOnlyPage;
// pages for large object space.
class MemoryAllocator {
public:
+ using NormalPagesSet = std::unordered_set<const Page*>;
+ using LargePagesSet = std::set<const LargePage*>;
+
// Unmapper takes care of concurrently unmapping and uncommitting memory
// chunks.
class Unmapper {
@@ -194,7 +201,8 @@ class MemoryAllocator {
size_t object_size,
Executability executable);
- ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
+ ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space,
+ Address hint = kNullAddress);
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
@@ -265,6 +273,9 @@ class MemoryAllocator {
// Return the normal or large page that contains this address, if it is owned
// by this heap, otherwise a nullptr.
+ V8_EXPORT_PRIVATE static const MemoryChunk* LookupChunkContainingAddress(
+ const NormalPagesSet& normal_pages, const LargePagesSet& large_page,
+ Address addr);
V8_EXPORT_PRIVATE const MemoryChunk* LookupChunkContainingAddress(
Address addr) const;
@@ -274,6 +285,19 @@ class MemoryAllocator {
void RecordLargePageCreated(const LargePage& page);
void RecordLargePageDestroyed(const LargePage& page);
+ std::pair<const NormalPagesSet, const LargePagesSet> SnapshotPageSetsUnsafe()
+ const {
+ return std::make_pair(normal_pages_, large_pages_);
+ }
+
+ std::pair<const NormalPagesSet, const LargePagesSet> SnapshotPageSetsSafe()
+ const {
+ // For shared heap, this method may be called by client isolates thus
+ // requiring a mutex.
+ base::MutexGuard guard(&pages_mutex_);
+ return SnapshotPageSetsUnsafe();
+ }
+
private:
// Used to store all data about MemoryChunk allocation, e.g. in
// AllocateUninitializedChunk.
@@ -287,19 +311,28 @@ class MemoryAllocator {
// Computes the size of a MemoryChunk from the size of the object_area and
// whether the chunk is executable or not.
- static size_t ComputeChunkSize(size_t area_size, Executability executable);
+ static size_t ComputeChunkSize(size_t area_size, AllocationSpace space,
+ Executability executable);
// Internal allocation method for all pages/memory chunks. Returns data about
// the unintialized memory region.
V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
- Executability executable, PageSize page_size);
+ Executability executable, PageSize page_size) {
+ return AllocateUninitializedChunkAt(space, area_size, executable,
+ kNullAddress, page_size);
+ }
+ V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
+ AllocateUninitializedChunkAt(BaseSpace* space, size_t area_size,
+ Executability executable, Address hint,
+ PageSize page_size);
// Internal raw allocation method that allocates an aligned MemoryChunk and
// sets the right memory permissions.
Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
- size_t alignment, Executability executable,
- void* hint, VirtualMemory* controller);
+ size_t alignment, AllocationSpace space,
+ Executability executable, void* hint,
+ VirtualMemory* controller);
// Commit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
@@ -424,8 +457,8 @@ class MemoryAllocator {
// Allocated normal and large pages are stored here, to be used during
// conservative stack scanning.
- std::unordered_set<const Page*> normal_pages_;
- std::set<const LargePage*> large_pages_;
+ NormalPagesSet normal_pages_;
+ LargePagesSet large_pages_;
mutable base::Mutex pages_mutex_;
V8_EXPORT_PRIVATE static size_t commit_page_size_;
diff --git a/deps/v8/src/heap/memory-chunk-layout.cc b/deps/v8/src/heap/memory-chunk-layout.cc
index e81aaec8f3..1fb265f39f 100644
--- a/deps/v8/src/heap/memory-chunk-layout.cc
+++ b/deps/v8/src/heap/memory-chunk-layout.cc
@@ -24,6 +24,12 @@ size_t MemoryChunkLayout::CodePageGuardSize() {
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
+ // The first page also includes padding for code alignment.
+ return ObjectPageOffsetInCodePage() +
+ InstructionStream::kCodeAlignmentMinusCodeHeader;
+}
+
+intptr_t MemoryChunkLayout::ObjectPageOffsetInCodePage() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
@@ -46,11 +52,19 @@ intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
ALIGN_TO_ALLOCATION_ALIGNMENT(kDoubleSize));
}
+intptr_t MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage() {
+ return RoundUp(BasicMemoryChunk::kHeaderSize,
+ ALIGN_TO_ALLOCATION_ALIGNMENT(kDoubleSize));
+}
+
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE || space == CODE_LO_SPACE) {
return ObjectStartOffsetInCodePage();
}
+ if (space == RO_SPACE) {
+ return ObjectStartOffsetInReadOnlyPage();
+ }
return ObjectStartOffsetInDataPage();
}
@@ -60,16 +74,26 @@ size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
return memory;
}
+size_t MemoryChunkLayout::AllocatableMemoryInReadOnlyPage() {
+ size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInReadOnlyPage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return AllocatableMemoryInCodePage();
}
+ if (space == RO_SPACE) {
+ return AllocatableMemoryInReadOnlyPage();
+ }
return AllocatableMemoryInDataPage();
}
int MemoryChunkLayout::MaxRegularCodeObjectSize() {
- int size = static_cast<int>(AllocatableMemoryInCodePage() / 2);
+ int size = static_cast<int>(
+ RoundDown(AllocatableMemoryInCodePage() / 2, kTaggedSize));
DCHECK_LE(size, kMaxRegularHeapObjectSize);
return size;
}
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 8c771f8b2b..05a44d6291 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -5,16 +5,13 @@
#ifndef V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#define V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
-#include "src/heap/heap.h"
#include "src/heap/list.h"
#include "src/heap/progress-bar.h"
#include "src/heap/slot-set.h"
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-#include "src/heap/object-start-bitmap.h"
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
namespace v8 {
namespace internal {
@@ -29,7 +26,7 @@ enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
OLD_TO_SHARED,
- OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_SHARED + 1 : OLD_TO_SHARED,
+ OLD_TO_CODE,
NUMBER_OF_REMEMBERED_SET_TYPES
};
@@ -39,11 +36,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static constexpr int kNumSets = NUMBER_OF_REMEMBERED_SET_TYPES;
static constexpr int kNumTypes = ExternalBackingStoreType::kNumTypes;
-#if V8_CC_MSVC && V8_TARGET_ARCH_IA32
- static constexpr int kMemoryChunkAlignment = 8;
-#else
static constexpr int kMemoryChunkAlignment = sizeof(size_t);
-#endif // V8_CC_MSVC && V8_TARGET_ARCH_IA32
#define FIELD(Type, Name) \
k##Name##Offset, k##Name##End = k##Name##Offset + sizeof(Type) - 1
enum Header {
@@ -65,6 +58,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
FIELD(void* [kNumSets], InvalidatedSlots),
FIELD(base::Mutex*, Mutex),
+ FIELD(base::SharedMutex*, SharedMutex),
FIELD(std::atomic<intptr_t>, ConcurrentSweeping),
FIELD(base::Mutex*, PageProtectionChangeMutex),
FIELD(uintptr_t, WriteUnprotectCounter),
@@ -73,10 +67,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(FreeListCategory**, Categories),
FIELD(CodeObjectRegistry*, CodeObjectRegistry),
FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets),
- FIELD(ActiveSystemPages, ActiveSystemPages),
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- FIELD(ObjectStartBitmap, ObjectStartBitmap),
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ FIELD(ActiveSystemPages*, ActiveSystemPages),
FIELD(size_t, WasUsedForAllocation),
kMarkingBitmapOffset,
kMemoryChunkHeaderSize =
@@ -92,11 +83,16 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
#undef FIELD
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
+ // Code pages have padding on the first page for code alignment, so the
+ // ObjectStartOffset will not be page aligned.
+ static intptr_t ObjectPageOffsetInCodePage();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
static size_t AllocatableMemoryInDataPage();
+ static intptr_t ObjectStartOffsetInReadOnlyPage();
+ static size_t AllocatableMemoryInReadOnlyPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 35493a0b70..7277852deb 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -5,6 +5,7 @@
#include "src/heap/memory-chunk.h"
#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
@@ -129,34 +130,25 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
VirtualMemory reservation, Executability executable,
PageSize page_size)
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
- std::move(reservation))
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- ,
- object_start_bitmap_(PtrComprCageBase{heap->isolate()}, area_start)
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-{
+ std::move(reservation)) {
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_SHARED], nullptr);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
- }
+ base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
nullptr);
invalidated_slots_[OLD_TO_NEW] = nullptr;
invalidated_slots_[OLD_TO_OLD] = nullptr;
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // Not actually used but initialize anyway for predictability.
- invalidated_slots_[OLD_TO_CODE] = nullptr;
- }
+ invalidated_slots_[OLD_TO_CODE] = nullptr;
invalidated_slots_[OLD_TO_SHARED] = nullptr;
progress_bar_.Initialize();
set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
page_protection_change_mutex_ = new base::Mutex();
write_unprotect_counter_ = 0;
mutex_ = new base::Mutex();
+ shared_mutex_ = new base::SharedMutex();
external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
@@ -171,9 +163,13 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
heap->code_space_memory_modification_scope_depth();
} else if (!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(area_start_, page_size));
- size_t area_size = RoundUp(area_end_ - area_start_, page_size);
- CHECK(reservation_.SetPermissions(area_start_, area_size,
+ // On executable chunks, area_start_ points past padding used for code
+ // alignment.
+ Address start_before_padding =
+ address() + MemoryChunkLayout::ObjectPageOffsetInCodePage();
+ DCHECK(IsAligned(start_before_padding, page_size));
+ size_t area_size = RoundUp(area_end_ - start_before_padding, page_size);
+ CHECK(reservation_.SetPermissions(start_before_padding, area_size,
DefaultWritableCodePermissions()));
}
}
@@ -187,17 +183,19 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
possibly_empty_buckets_.Initialize();
if (page_size == PageSize::kRegular) {
- active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
- MemoryAllocator::GetCommitPageSizeBits(), size());
+ active_system_pages_ = new ActiveSystemPages;
+ active_system_pages_->Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(),
+ size());
} else {
// We do not track active system pages for large pages.
- active_system_pages_.Clear();
+ active_system_pages_ = nullptr;
}
// All pages of a shared heap need to be marked with this flag.
- if (heap->IsShared() || owner()->identity() == SHARED_SPACE ||
+ if (owner()->identity() == SHARED_SPACE ||
owner()->identity() == SHARED_LO_SPACE) {
- SetFlag(MemoryChunk::IN_SHARED_HEAP);
+ SetFlag(MemoryChunk::IN_WRITABLE_SHARED_SPACE);
}
#ifdef DEBUG
@@ -207,7 +205,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
size_t MemoryChunk::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
- return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
+ return active_system_pages_->Size(MemoryAllocator::GetCommitPageSizeBits());
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
@@ -216,8 +214,16 @@ void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::INCREMENTAL_MARKING);
} else {
- ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ if (owner_identity() == SHARED_SPACE ||
+ owner_identity() == SHARED_LO_SPACE) {
+ // We need to track pointers into the SHARED_SPACE for OLD_TO_SHARED.
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ // No need to track OLD_TO_NEW or OLD_TO_SHARED within the shared space.
+ ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
}
}
@@ -241,6 +247,10 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
delete mutex_;
mutex_ = nullptr;
}
+ if (shared_mutex_) {
+ delete shared_mutex_;
+ shared_mutex_ = nullptr;
+ }
if (page_protection_change_mutex_ != nullptr) {
delete page_protection_change_mutex_;
page_protection_change_mutex_ = nullptr;
@@ -250,10 +260,15 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
code_object_registry_ = nullptr;
}
+ if (active_system_pages_ != nullptr) {
+ delete active_system_pages_;
+ active_system_pages_ = nullptr;
+ }
+
possibly_empty_buckets_.Release();
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
- if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
+ ReleaseSlotSet<OLD_TO_CODE>();
ReleaseSlotSet<OLD_TO_SHARED>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
@@ -276,9 +291,7 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template V8_EXPORT_PRIVATE SlotSet*
MemoryChunk::AllocateSlotSet<OLD_TO_SHARED>();
-#ifdef V8_EXTERNAL_CODE_SPACE
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_CODE>();
-#endif // V8_EXTERNAL_CODE_SPACE
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
@@ -300,9 +313,7 @@ SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_SHARED>();
-#ifdef V8_EXTERNAL_CODE_SPACE
template void MemoryChunk::ReleaseSlotSet<OLD_TO_CODE>();
-#endif // V8_EXTERNAL_CODE_SPACE
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
@@ -382,6 +393,7 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int new_size) {
// ByteArray and FixedArray are still invalidated in tests.
DCHECK(object.IsString() || object.IsByteArray() || object.IsFixedArray());
+ DCHECK(!object.InWritableSharedSpace());
bool skip_slot_recording;
switch (type) {
@@ -428,6 +440,7 @@ MemoryChunk::UpdateInvalidatedObjectSize<OLD_TO_SHARED>(HeapObject object,
template <RememberedSetType type>
void MemoryChunk::UpdateInvalidatedObjectSize(HeapObject object, int new_size) {
+ DCHECK(!object.InWritableSharedSpace());
DCHECK_GT(new_size, 0);
if (invalidated_slots<type>() == nullptr) return;
@@ -490,6 +503,8 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
MemoryChunkLayout::kInvalidatedSlotsOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->mutex_) - chunk->address(),
MemoryChunkLayout::kMutexOffset);
+ DCHECK_EQ(reinterpret_cast<Address>(&chunk->shared_mutex_) - chunk->address(),
+ MemoryChunkLayout::kSharedMutexOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->concurrent_sweeping_) -
chunk->address(),
MemoryChunkLayout::kConcurrentSweepingOffset);
@@ -515,11 +530,6 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<Address>(&chunk->active_system_pages_) -
chunk->address(),
MemoryChunkLayout::kActiveSystemPagesOffset);
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- DCHECK_EQ(reinterpret_cast<Address>(&chunk->object_start_bitmap_) -
- chunk->address(),
- MemoryChunkLayout::kObjectStartBitmapOffset);
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
DCHECK_EQ(reinterpret_cast<Address>(&chunk->was_used_for_allocation_) -
chunk->address(),
MemoryChunkLayout::kWasUsedForAllocationOffset);
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index b3e784a5d2..1061e5792c 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -12,7 +12,6 @@
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
-#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
@@ -24,6 +23,7 @@ namespace internal {
class CodeObjectRegistry;
class FreeListCategory;
+class Space;
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
@@ -85,7 +85,8 @@ class MemoryChunk : public BasicMemoryChunk {
void DiscardUnusedMemory(Address addr, size_t size);
- base::Mutex* mutex() { return mutex_; }
+ base::Mutex* mutex() const { return mutex_; }
+ base::SharedMutex* shared_mutex() const { return shared_mutex_; }
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
@@ -215,14 +216,6 @@ class MemoryChunk : public BasicMemoryChunk {
// read-only space chunks.
void ReleaseAllocatedMemoryNeededForWritableChunk();
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- ObjectStartBitmap* object_start_bitmap() { return &object_start_bitmap_; }
-
- const ObjectStartBitmap* object_start_bitmap() const {
- return &object_start_bitmap_;
- }
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
void MarkWasUsedForAllocation() { was_used_for_allocation_ = true; }
void ClearWasUsedForAllocation() { was_used_for_allocation_ = false; }
bool WasUsedForAllocation() const { return was_used_for_allocation_; }
@@ -241,6 +234,15 @@ class MemoryChunk : public BasicMemoryChunk {
static void ValidateOffsets(MemoryChunk* chunk);
#endif
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
+ void set_slot_set(SlotSet* slot_set) {
+ if (access_mode == AccessMode::ATOMIC) {
+ base::AsAtomicPointer::Release_Store(&slot_set_[type], slot_set);
+ return;
+ }
+ slot_set_[type] = slot_set;
+ }
+
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
@@ -260,6 +262,7 @@ class MemoryChunk : public BasicMemoryChunk {
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
base::Mutex* mutex_;
+ base::SharedMutex* shared_mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
@@ -288,11 +291,7 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
- ActiveSystemPages active_system_pages_;
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- ObjectStartBitmap object_start_bitmap_;
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ ActiveSystemPages* active_system_pages_;
// Marks a chunk that was used for allocation since it was last swept. Used
// only for new space pages.
@@ -306,6 +305,8 @@ class MemoryChunk : public BasicMemoryChunk {
friend class MemoryAllocator;
friend class MemoryChunkValidator;
friend class PagedSpace;
+ template <RememberedSetType>
+ friend class RememberedSet;
};
} // namespace internal
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 2cb2fb3d89..816a2d2f5c 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -25,9 +25,12 @@ MemoryReducer::MemoryReducer(Heap* heap)
: heap_(heap),
taskrunner_(V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(heap->isolate()))),
- state_(kDone, 0, 0.0, 0.0, 0),
+ state_(State::CreateUninitialized()),
js_calls_counter_(0),
- js_calls_sample_time_ms_(0.0) {}
+ js_calls_sample_time_ms_(0.0) {
+ DCHECK(v8_flags.incremental_marking);
+ DCHECK(v8_flags.memory_reducer);
+}
MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
: CancelableTask(memory_reducer->heap()->isolate()),
@@ -36,49 +39,49 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
void MemoryReducer::TimerTask::RunInternal() {
Heap* heap = memory_reducer_->heap();
- Event event;
- double time_ms = heap->MonotonicallyIncreasingTimeInMs();
+ const double time_ms = heap->MonotonicallyIncreasingTimeInMs();
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
heap->OldGenerationAllocationCounter(),
heap->EmbedderAllocationCounter());
- bool low_allocation_rate = heap->HasLowAllocationRate();
- bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
+ const bool low_allocation_rate = heap->HasLowAllocationRate();
+ const bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
if (v8_flags.trace_gc_verbose) {
heap->isolate()->PrintWithTimestamp(
"Memory reducer: %s, %s\n",
low_allocation_rate ? "low alloc" : "high alloc",
optimize_for_memory ? "background" : "foreground");
}
- event.type = kTimer;
- event.time_ms = time_ms;
- // The memory reducer will start incremental markig if
+ // The memory reducer will start incremental marking if
// 1) mutator is likely idle: js call rate is low and allocation rate is low.
// 2) mutator is in background: optimize for memory flag is set.
- event.should_start_incremental_gc =
- low_allocation_rate || optimize_for_memory;
- event.can_start_incremental_gc =
+ const Event event{
+ kTimer,
+ time_ms,
+ heap->CommittedOldGenerationMemory(),
+ false,
+ low_allocation_rate || optimize_for_memory,
heap->incremental_marking()->IsStopped() &&
- (heap->incremental_marking()->CanBeStarted() || optimize_for_memory);
- event.committed_memory = heap->CommittedOldGenerationMemory();
+ (heap->incremental_marking()->CanBeStarted() || optimize_for_memory),
+ };
memory_reducer_->NotifyTimer(event);
}
void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK_EQ(kTimer, event.type);
- DCHECK_EQ(kWait, state_.action);
+ DCHECK_EQ(kWait, state_.id());
state_ = Step(state_, event);
- if (state_.action == kRun) {
+ if (state_.id() == kRun) {
DCHECK(heap()->incremental_marking()->IsStopped());
DCHECK(v8_flags.incremental_marking);
if (v8_flags.trace_gc_verbose) {
heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
- state_.started_gcs);
+ state_.started_gcs());
}
- heap()->StartIdleIncrementalMarking(
- GarbageCollectionReason::kMemoryReducer,
- kGCCallbackFlagCollectAllExternalMemory);
- } else if (state_.action == kWait) {
+ heap()->StartIncrementalMarking(Heap::kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kMemoryReducer,
+ kGCCallbackFlagCollectAllExternalMemory);
+ } else if (state_.id() == kWait) {
if (!heap()->incremental_marking()->IsStopped() &&
heap()->ShouldOptimizeForMemoryUsage()) {
// Make progress with pending incremental marking if memory usage has
@@ -87,114 +90,140 @@ void MemoryReducer::NotifyTimer(const Event& event) {
heap()->incremental_marking()->AdvanceAndFinalizeIfComplete();
}
// Re-schedule the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(state_.next_gc_start_ms() - event.time_ms);
if (v8_flags.trace_gc_verbose) {
heap()->isolate()->PrintWithTimestamp(
"Memory reducer: waiting for %.f ms\n",
- state_.next_gc_start_ms - event.time_ms);
+ state_.next_gc_start_ms() - event.time_ms);
}
}
}
-
-void MemoryReducer::NotifyMarkCompact(const Event& event) {
- DCHECK_EQ(kMarkCompact, event.type);
- Action old_action = state_.action;
+void MemoryReducer::NotifyMarkCompact(size_t committed_memory_before) {
+ if (!v8_flags.incremental_marking) return;
+ const size_t committed_memory = heap()->CommittedOldGenerationMemory();
+
+ // Trigger one more GC if
+ // - this GC decreased committed memory,
+ // - there is high fragmentation,
+ const MemoryReducer::Event event{
+ MemoryReducer::kMarkCompact,
+ heap()->MonotonicallyIncreasingTimeInMs(),
+ committed_memory,
+ (committed_memory_before > committed_memory + MB) ||
+ heap()->HasHighFragmentation(),
+ false,
+ false};
+ const Id old_action = state_.id();
+ int old_started_gcs = state_.started_gcs();
state_ = Step(state_, event);
- if (old_action != kWait && state_.action == kWait) {
+ if (old_action != kWait && state_.id() == kWait) {
// If we are transitioning to the WAIT state, start the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(state_.next_gc_start_ms() - event.time_ms);
}
if (old_action == kRun) {
if (v8_flags.trace_gc_verbose) {
heap()->isolate()->PrintWithTimestamp(
- "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
- state_.action == kWait ? "will do more" : "done");
+ "Memory reducer: finished GC #%d (%s)\n", old_started_gcs,
+ state_.id() == kWait ? "will do more" : "done");
}
}
}
-void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
- DCHECK_EQ(kPossibleGarbage, event.type);
- Action old_action = state_.action;
+void MemoryReducer::NotifyPossibleGarbage() {
+ const MemoryReducer::Event event{MemoryReducer::kPossibleGarbage,
+ heap()->MonotonicallyIncreasingTimeInMs(),
+ 0,
+ false,
+ false,
+ false};
+ const Id old_action = state_.id();
state_ = Step(state_, event);
- if (old_action != kWait && state_.action == kWait) {
+ if (old_action != kWait && state_.id() == kWait) {
// If we are transitioning to the WAIT state, start the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(state_.next_gc_start_ms() - event.time_ms);
}
}
-
bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
- return state.last_gc_time_ms != 0 &&
- event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
+ return state.last_gc_time_ms() != 0 &&
+ event.time_ms > state.last_gc_time_ms() + kWatchdogDelayMs;
}
// For specification of this function see the comment for MemoryReducer class.
MemoryReducer::State MemoryReducer::Step(const State& state,
const Event& event) {
- if (!v8_flags.incremental_marking || !v8_flags.memory_reducer) {
- return State(kDone, 0, 0, state.last_gc_time_ms, 0);
- }
- switch (state.action) {
+ DCHECK(v8_flags.memory_reducer);
+ DCHECK(v8_flags.incremental_marking);
+
+ switch (state.id()) {
case kDone:
+ CHECK_IMPLIES(
+ v8_flags.memory_reducer_single_gc,
+ state.started_gcs() == 0 || state.started_gcs() == kMaxNumberOfGCs);
if (event.type == kTimer) {
return state;
} else if (event.type == kMarkCompact) {
if (event.committed_memory <
std::max(
- static_cast<size_t>(state.committed_memory_at_last_run *
+ static_cast<size_t>(state.committed_memory_at_last_run() *
kCommittedMemoryFactor),
- state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
+ state.committed_memory_at_last_run() + kCommittedMemoryDelta)) {
return state;
} else {
- return State(kWait, 0, event.time_ms + kLongDelayMs, event.time_ms,
- 0);
+ return State::CreateWait(0, event.time_ms + kLongDelayMs,
+ event.time_ms);
}
} else {
DCHECK_EQ(kPossibleGarbage, event.type);
- return State(kWait, 0,
- event.time_ms + v8_flags.gc_memory_reducer_start_delay_ms,
- state.last_gc_time_ms, 0);
+ return State::CreateWait(
+ 0, event.time_ms + v8_flags.gc_memory_reducer_start_delay_ms,
+ state.last_gc_time_ms());
}
case kWait:
+ CHECK_IMPLIES(v8_flags.memory_reducer_single_gc,
+ state.started_gcs() == 0);
switch (event.type) {
case kPossibleGarbage:
return state;
case kTimer:
- if (state.started_gcs >= kMaxNumberOfGCs) {
- return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
- event.committed_memory);
+ if (state.started_gcs() >= kMaxNumberOfGCs) {
+ return State::CreateDone(state.last_gc_time_ms(),
+ event.committed_memory);
} else if (event.can_start_incremental_gc &&
(event.should_start_incremental_gc ||
WatchdogGC(state, event))) {
- if (state.next_gc_start_ms <= event.time_ms) {
- return State(kRun, state.started_gcs + 1, 0.0,
- state.last_gc_time_ms, 0);
+ if (state.next_gc_start_ms() <= event.time_ms) {
+ return State::CreateRun(state.started_gcs() + 1);
} else {
return state;
}
} else {
- return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
- state.last_gc_time_ms, 0);
+ return State::CreateWait(state.started_gcs(),
+ event.time_ms + kLongDelayMs,
+ state.last_gc_time_ms());
}
case kMarkCompact:
- return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
- event.time_ms, 0);
+ return State::CreateWait(state.started_gcs(),
+ event.time_ms + kLongDelayMs, event.time_ms);
}
case kRun:
- if (event.type != kMarkCompact) {
- return state;
- } else {
- if (state.started_gcs < kMaxNumberOfGCs &&
- (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
- return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
- event.time_ms, 0);
+ CHECK_IMPLIES(v8_flags.memory_reducer_single_gc,
+ state.started_gcs() == 1);
+ if (event.type == kMarkCompact) {
+ if (!v8_flags.memory_reducer_single_gc &&
+ state.started_gcs() < kMaxNumberOfGCs &&
+ (event.next_gc_likely_to_collect_more ||
+ state.started_gcs() == 1)) {
+ return State::CreateWait(state.started_gcs(),
+ event.time_ms + kShortDelayMs,
+ event.time_ms);
} else {
- return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
- event.committed_memory);
+ return State::CreateDone(event.time_ms, event.committed_memory);
}
+ } else {
+ return state;
}
}
UNREACHABLE();
@@ -209,7 +238,7 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
(delay_ms + kSlackMs) / 1000.0);
}
-void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
+void MemoryReducer::TearDown() { state_ = State::CreateUninitialized(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 2d4a06180f..1f1f9cef15 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -86,21 +86,61 @@ class Heap;
// long_delay_ms, short_delay_ms, and watchdog_delay_ms are constants.
class V8_EXPORT_PRIVATE MemoryReducer {
public:
- enum Action { kDone, kWait, kRun };
+ enum Id { kDone, kWait, kRun };
- struct State {
- State(Action action, int started_gcs, double next_gc_start_ms,
+ class State {
+ public:
+ static State CreateUninitialized() { return {kDone, 0, 0, 0, 0}; }
+
+ static State CreateDone(double last_gc_time_ms, size_t committed_memory) {
+ return {kDone, kMaxNumberOfGCs, 0, last_gc_time_ms, committed_memory};
+ }
+
+ static State CreateWait(int started_gcs, double next_gc_time_ms,
+ double last_gc_time_ms) {
+ return {kWait, started_gcs, next_gc_time_ms, last_gc_time_ms, 0};
+ }
+
+ static State CreateRun(int started_gcs) {
+ return {kRun, started_gcs, 0, 0, 0};
+ }
+
+ Id id() const { return id_; }
+
+ int started_gcs() const {
+ DCHECK(id() == kWait || id() == kRun || id() == kDone);
+ return started_gcs_;
+ }
+
+ double next_gc_start_ms() const {
+ DCHECK_EQ(id(), kWait);
+ return next_gc_start_ms_;
+ }
+
+ double last_gc_time_ms() const {
+ DCHECK(id() == kWait || id() == kDone);
+ return last_gc_time_ms_;
+ }
+
+ size_t committed_memory_at_last_run() const {
+ DCHECK_EQ(id(), kDone);
+ return committed_memory_at_last_run_;
+ }
+
+ private:
+ State(Id action, int started_gcs, double next_gc_start_ms,
double last_gc_time_ms, size_t committed_memory_at_last_run)
- : action(action),
- started_gcs(started_gcs),
- next_gc_start_ms(next_gc_start_ms),
- last_gc_time_ms(last_gc_time_ms),
- committed_memory_at_last_run(committed_memory_at_last_run) {}
- Action action;
- int started_gcs;
- double next_gc_start_ms;
- double last_gc_time_ms;
- size_t committed_memory_at_last_run;
+ : id_(action),
+ started_gcs_(started_gcs),
+ next_gc_start_ms_(next_gc_start_ms),
+ last_gc_time_ms_(last_gc_time_ms),
+ committed_memory_at_last_run_(committed_memory_at_last_run) {}
+
+ Id id_;
+ int started_gcs_;
+ double next_gc_start_ms_;
+ double last_gc_time_ms_;
+ size_t committed_memory_at_last_run_;
};
enum EventType { kTimer, kMarkCompact, kPossibleGarbage };
@@ -118,9 +158,8 @@ class V8_EXPORT_PRIVATE MemoryReducer {
MemoryReducer(const MemoryReducer&) = delete;
MemoryReducer& operator=(const MemoryReducer&) = delete;
// Callbacks.
- void NotifyMarkCompact(const Event& event);
- void NotifyPossibleGarbage(const Event& event);
- void NotifyBackgroundIdleNotification(const Event& event);
+ void NotifyMarkCompact(size_t committed_memory_before);
+ void NotifyPossibleGarbage();
// The step function that computes the next state from the current state and
// the incoming event.
static State Step(const State& state, const Event& event);
@@ -141,7 +180,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
Heap* heap() { return heap_; }
bool ShouldGrowHeapSlowly() {
- return state_.action == kDone && state_.started_gcs > 0;
+ return state_.id() == kDone && state_.started_gcs() > 0;
}
private:
diff --git a/deps/v8/src/heap/minor-gc-job.cc b/deps/v8/src/heap/minor-gc-job.cc
new file mode 100644
index 0000000000..36e2af56ef
--- /dev/null
+++ b/deps/v8/src/heap/minor-gc-job.cc
@@ -0,0 +1,74 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/minor-gc-job.h"
+
+#include "src/base/platform/time.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/flags/flags.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/init/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class MinorGCJob::Task : public CancelableTask {
+ public:
+ Task(Isolate* isolate, MinorGCJob* job)
+ : CancelableTask(isolate), isolate_(isolate), job_(job) {}
+
+ // CancelableTask overrides.
+ void RunInternal() override;
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ Isolate* const isolate_;
+ MinorGCJob* const job_;
+};
+
+size_t MinorGCJob::YoungGenerationTaskTriggerSize(Heap* heap) {
+ return heap->new_space()->TotalCapacity() * v8_flags.minor_gc_task_trigger /
+ 100;
+}
+
+bool MinorGCJob::YoungGenerationSizeTaskTriggerReached(Heap* heap) {
+ return heap->new_space()->Size() >= YoungGenerationTaskTriggerSize(heap);
+}
+
+void MinorGCJob::ScheduleTaskIfNeeded(Heap* heap) {
+ if (!v8_flags.minor_gc_task) return;
+ if (task_pending_) return;
+ if (heap->IsTearingDown()) return;
+ if (!YoungGenerationSizeTaskTriggerReached(heap)) return;
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+ if (taskrunner->NonNestableTasksEnabled()) {
+ taskrunner->PostNonNestableTask(
+ std::make_unique<Task>(heap->isolate(), this));
+ task_pending_ = true;
+ }
+}
+
+void MinorGCJob::Task::RunInternal() {
+ VMState<GC> state(isolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
+
+ job_->task_pending_ = false;
+
+ if (v8_flags.minor_mc &&
+ isolate()->heap()->incremental_marking()->IsMajorMarking()) {
+ // Don't trigger a MinorMC cycle while major incremental marking is active.
+ return;
+ }
+ if (!MinorGCJob::YoungGenerationSizeTaskTriggerReached(isolate()->heap()))
+ return;
+
+ isolate()->heap()->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kTask);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/minor-gc-job.h
index c9d17cded7..a3aa8b6dcf 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/minor-gc-job.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_SCAVENGE_JOB_H_
-#define V8_HEAP_SCAVENGE_JOB_H_
+#ifndef V8_HEAP_MINOR_GC_JOB_H_
+#define V8_HEAP_MINOR_GC_JOB_H_
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
@@ -16,25 +16,22 @@ class Isolate;
// The scavenge job uses platform tasks to perform a young generation
// Scavenge garbage collection. The job posts a foreground task.
-class ScavengeJob {
+class MinorGCJob {
public:
- static constexpr size_t kStepSize = 64 * KB;
-
- ScavengeJob() V8_NOEXCEPT = default;
+ MinorGCJob() V8_NOEXCEPT = default;
void ScheduleTaskIfNeeded(Heap* heap);
+ static size_t YoungGenerationTaskTriggerSize(Heap* heap);
+
private:
class Task;
static bool YoungGenerationSizeTaskTriggerReached(Heap* heap);
- static size_t YoungGenerationTaskTriggerSize(Heap* heap);
-
- void set_task_pending(bool value) { task_pending_ = value; }
bool task_pending_ = false;
};
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_SCAVENGE_JOB_H_
+#endif // V8_HEAP_MINOR_GC_JOB_H_
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index 14c6753804..63969f86fe 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -94,7 +94,7 @@ V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
}
DCHECK(old_top + aligned_size_in_bytes <= high);
- UpdateInlineAllocationLimit(aligned_size_in_bytes);
+ UpdateInlineAllocationLimitForAllocation(aligned_size_in_bytes);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
@@ -108,8 +108,11 @@ V8_INLINE bool PagedSpaceForNewSpace::EnsureAllocation(
int* out_max_aligned_size) {
if (!PagedSpaceBase::EnsureAllocation(size_in_bytes, alignment, origin,
out_max_aligned_size)) {
- return false;
+ if (!AddPageBeyondCapacity(size_in_bytes, origin)) {
+ return false;
+ }
}
+
allocated_linear_areas_ += limit() - top();
return true;
}
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index da2abbe95d..8358f3d3f9 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -9,6 +9,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-state-inl.h"
@@ -357,7 +358,7 @@ void SemiSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
-void SemiSpace::Verify() const {
+void SemiSpace::VerifyPageMetadata() const {
bool is_from_space = (id_ == kFromSpace);
size_t external_backing_store_bytes[kNumTypes];
@@ -436,10 +437,6 @@ NewSpace::NewSpace(Heap* heap, LinearAllocationArea& allocation_info)
allocation_counter_, allocation_info,
linear_area_original_data_) {}
-void NewSpace::ResetParkedAllocationBuffers() {
- parked_allocation_buffers_.clear();
-}
-
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_.MergeIfAdjacent(info)) {
linear_area_original_data_.set_original_top_release(allocation_info_.top());
@@ -465,82 +462,6 @@ void NewSpace::VerifyTop() const {
}
#endif // DEBUG
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceObjectIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
- Address current_address) const {
- DCHECK(current_page->ContainsLimit(current_address));
-
- size_t external_space_bytes[kNumTypes];
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- CHECK(!current_page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!current_page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
-
- PtrComprCageBase cage_base(isolate);
- VerifyPointersVisitor visitor(heap());
- const Page* page = current_page;
- while (true) {
- if (!Page::IsAlignedToPageSize(current_address)) {
- HeapObject object = HeapObject::FromAddress(current_address);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space or read-only space.
- Map map = object.map(cage_base);
- CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->old_space()->Contains(map));
-
- // The object should not be code or a map.
- CHECK(!object.IsMap(cage_base));
- CHECK(!object.IsAbstractCode(cage_base));
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- // All the interior pointers should be contained in the heap.
- int size = object.Size(cage_base);
- object.IterateBody(map, size, &visitor);
-
- if (object.IsExternalString(cage_base)) {
- ExternalString external_string = ExternalString::cast(object);
- size_t string_size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] +=
- string_size;
- }
-
- current_address += ALIGN_TO_ALLOCATION_ALIGNMENT(size);
- } else {
- // At end of page, switch to next page.
- page = page->next_page();
- if (!page) break;
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
- current_address = page->area_start();
- }
- }
-
- for (int i = 0; i < kNumTypes; i++) {
- if (i == ExternalBackingStoreType::kArrayBuffer) continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
-
- if (!v8_flags.concurrent_array_buffer_sweeping) {
- size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- current_page->object_start_bitmap()->Verify();
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-}
-#endif // VERIFY_HEAP
-
void NewSpace::PromotePageToOldSpace(Page* page) {
DCHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
DCHECK(page->InYoungGeneration());
@@ -548,7 +469,7 @@ void NewSpace::PromotePageToOldSpace(Page* page) {
RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration());
- new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ USE(new_page);
}
// -----------------------------------------------------------------------------
@@ -650,7 +571,7 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
to_space_.AddRangeToActiveSystemPages(top(), limit());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
}
void SemiSpaceNewSpace::ResetLinearAllocationArea() {
@@ -665,7 +586,8 @@ void SemiSpaceNewSpace::ResetLinearAllocationArea() {
}
}
-void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
+void SemiSpaceNewSpace::UpdateInlineAllocationLimitForAllocation(
+ size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(),
ALIGN_TO_ALLOCATION_ALIGNMENT(min_size));
DCHECK_LE(top(), new_limit);
@@ -683,6 +605,10 @@ void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
#endif
}
+void SemiSpaceNewSpace::UpdateInlineAllocationLimit() {
+ UpdateInlineAllocationLimitForAllocation(0);
+}
+
bool SemiSpaceNewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
@@ -734,10 +660,14 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
return false;
}
+void SemiSpaceNewSpace::ResetParkedAllocationBuffers() {
+ parked_allocation_buffers_.clear();
+}
+
void SemiSpaceNewSpace::FreeLinearAllocationArea() {
AdvanceAllocationObservers();
MakeLinearAllocationAreaIterable();
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
}
#if DEBUG
@@ -753,22 +683,69 @@ void SemiSpaceNewSpace::VerifyTop() const {
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
-void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
+void SemiSpaceNewSpace::Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
-
- VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current);
+ VerifyObjects(isolate, visitor);
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
+ from_space_.VerifyPageMetadata();
+ to_space_.VerifyPageMetadata();
+}
+
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void SemiSpaceNewSpace::VerifyObjects(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
+ size_t external_space_bytes[kNumTypes];
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ PtrComprCageBase cage_base(isolate);
+ for (const Page* page = to_space_.first_page(); page;
+ page = page->next_page()) {
+ visitor->VerifyPage(page);
+
+ Address current_address = page->area_start();
+
+ while (!Page::IsAlignedToPageSize(current_address)) {
+ HeapObject object = HeapObject::FromAddress(current_address);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space or read-only space.
+ int size = object.Size(cage_base);
+
+ visitor->VerifyObject(object);
+
+ if (object.IsExternalString(cage_base)) {
+ ExternalString external_string = ExternalString::cast(object);
+ size_t string_size = external_string.ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] +=
+ string_size;
+ }
+
+ current_address += ALIGN_TO_ALLOCATION_ALIGNMENT(size);
+ }
+
+ visitor->VerifyPageDone(page);
+ }
+
+ for (int i = 0; i < kNumTypes; i++) {
+ if (i == ExternalBackingStoreType::kArrayBuffer) continue;
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+
+ if (!v8_flags.concurrent_array_buffer_sweeping) {
+ size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
}
#endif // VERIFY_HEAP
@@ -792,9 +769,11 @@ void SemiSpaceNewSpace::MakeUnusedPagesInToSpaceIterable() {
// Fix the current page, above the LAB.
DCHECK_NOT_NULL(*it);
- DCHECK((*it)->Contains(limit()));
- heap()->CreateFillerObjectAt(limit(),
- static_cast<int>((*it)->area_end() - limit()));
+ if (limit() != (*it)->area_end()) {
+ DCHECK((*it)->Contains(limit()));
+ heap()->CreateFillerObjectAt(limit(),
+ static_cast<int>((*it)->area_end() - limit()));
+ }
// Fix the remaining unused pages in the "to" semispace.
for (Page* page = *(++it); page != nullptr; page = *(++it)) {
@@ -803,15 +782,6 @@ void SemiSpaceNewSpace::MakeUnusedPagesInToSpaceIterable() {
}
}
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-void SemiSpaceNewSpace::ClearUnusedObjectStartBitmaps() {
- if (!IsFromSpaceCommitted()) return;
- for (Page* page : PageRange(from_space().first_page(), nullptr)) {
- page->object_start_bitmap()->Clear();
- }
-}
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
bool SemiSpaceNewSpace::ShouldBePromoted(Address address) const {
Page* page = Page::FromAddress(address);
Address current_age_mark = age_mark();
@@ -872,7 +842,7 @@ void SemiSpaceNewSpace::EvacuatePrologue() {
DCHECK_EQ(0u, Size());
}
-void SemiSpaceNewSpace::EvacuateEpilogue() { set_age_mark(top()); }
+void SemiSpaceNewSpace::GarbageCollectionEpilogue() { set_age_mark(top()); }
void SemiSpaceNewSpace::ZapUnusedMemory() {
if (!IsFromSpaceCommitted()) return;
@@ -961,7 +931,7 @@ void PagedSpaceForNewSpace::Grow() {
}
bool PagedSpaceForNewSpace::StartShrinking() {
- DCHECK_EQ(current_capacity_, target_capacity_);
+ DCHECK_GE(current_capacity_, target_capacity_);
DCHECK(heap()->tracer()->IsInAtomicPause());
size_t new_target_capacity =
RoundUp(std::max(initial_capacity_, 2 * Size()), Page::kPageSize);
@@ -985,13 +955,14 @@ void PagedSpaceForNewSpace::FinishShrinking() {
}
}
-void PagedSpaceForNewSpace::UpdateInlineAllocationLimit(size_t size_in_bytes) {
- PagedSpaceBase::UpdateInlineAllocationLimit(size_in_bytes);
+void PagedSpaceForNewSpace::UpdateInlineAllocationLimit() {
+ PagedSpaceBase::UpdateInlineAllocationLimit();
}
size_t PagedSpaceForNewSpace::AddPage(Page* page) {
current_capacity_ += Page::kPageSize;
- DCHECK_LE(current_capacity_, target_capacity_);
+ DCHECK_IMPLIES(!force_allocation_success_,
+ UsableCapacity() <= TotalCapacity());
return PagedSpaceBase::AddPage(page);
}
@@ -1011,7 +982,7 @@ bool PagedSpaceForNewSpace::PreallocatePages() {
while (current_capacity_ < target_capacity_) {
if (!TryExpandImpl()) return false;
}
- DCHECK_EQ(current_capacity_, target_capacity_);
+ DCHECK_GE(current_capacity_, target_capacity_);
return true;
}
@@ -1030,18 +1001,50 @@ void PagedSpaceForNewSpace::FreeLinearAllocationArea() {
PagedSpaceBase::FreeLinearAllocationArea();
}
-#ifdef VERIFY_HEAP
-void PagedSpaceForNewSpace::Verify(Isolate* isolate,
- ObjectVisitor* visitor) const {
- PagedSpaceBase::Verify(isolate, visitor);
+bool PagedSpaceForNewSpace::ShouldReleaseEmptyPage() const {
+ return current_capacity_ > target_capacity_;
+}
- DCHECK_EQ(current_capacity_, target_capacity_);
- DCHECK_EQ(current_capacity_, Page::kPageSize * CountTotalPages());
+void PagedSpaceForNewSpace::RefillFreeList() {
+ // New space is not used for concurrent allcations or allocations during
+ // evacuation.
+ DCHECK(heap_->IsMainThread() ||
+ (heap_->IsSharedMainThread() &&
+ !heap_->isolate()->is_shared_space_isolate()));
+ DCHECK(!is_compaction_space());
+
+ Sweeper* sweeper = heap()->sweeper();
+
+ Sweeper::SweptList swept_pages = sweeper->GetAllSweptPagesSafe(this);
+ if (swept_pages.empty()) return;
+
+ base::MutexGuard guard(mutex());
+ for (Page* p : swept_pages) {
+ DCHECK(!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE));
+ RefineAllocatedBytesAfterSweeping(p);
+ RelinkFreeListCategories(p);
+ }
}
-#endif // VERIFY_HEAP
-bool PagedSpaceForNewSpace::ShouldReleasePage() const {
- return current_capacity_ > target_capacity_;
+bool PagedSpaceForNewSpace::AddPageBeyondCapacity(int size_in_bytes,
+ AllocationOrigin origin) {
+ DCHECK(heap()->sweeper()->IsSweepingDoneForSpace(NEW_SPACE));
+ if (!force_allocation_success_ &&
+ ((UsableCapacity() >= TotalCapacity()) ||
+ (TotalCapacity() - UsableCapacity() < Page::kPageSize)))
+ return false;
+ if (!heap()->CanExpandOldGeneration(Size() + heap()->new_lo_space()->Size() +
+ Page::kPageSize)) {
+ // Assuming all of new space if alive, doing a full GC and promoting all
+ // objects should still succeed. Don't let new space grow if it means it
+ // will exceed the available size of old space.
+ return false;
+ }
+ DCHECK_IMPLIES(heap()->incremental_marking()->IsMarking(),
+ heap()->incremental_marking()->IsMajorMarking());
+ if (!TryExpandImpl()) return false;
+ return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+ origin);
}
// -----------------------------------------------------------------------------
@@ -1062,17 +1065,5 @@ PagedNewSpace::~PagedNewSpace() {
paged_space_.TearDown();
}
-#ifdef VERIFY_HEAP
-void PagedNewSpace::Verify(Isolate* isolate) const {
- const Page* first_page = paged_space_.first_page();
-
- if (first_page) VerifyImpl(isolate, first_page, first_page->area_start());
-
- // Check paged-spaces.
- VerifyPointersVisitor visitor(heap());
- paged_space_.Verify(isolate, &visitor);
-}
-#endif // VERIFY_HEAP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index 09d11bc90c..c569aceb21 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -13,6 +13,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/heap.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
@@ -27,9 +28,6 @@ class SemiSpaceNewSpace;
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
-using ParkedAllocationBuffer = std::pair<int, Address>;
-using ParkedAllocationBuffersVector = std::vector<ParkedAllocationBuffer>;
-
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
@@ -109,6 +107,9 @@ class SemiSpace final : public Space {
void PrependPage(Page* page);
void MovePageToTheEnd(Page* page);
+ void PauseAllocationObservers() override { UNREACHABLE(); }
+ void ResumeAllocationObservers() override { UNREACHABLE(); }
+
Page* InitializePage(MemoryChunk* chunk) final;
// Age mark accessors.
@@ -175,7 +176,10 @@ class SemiSpace final : public Space {
#endif
#ifdef VERIFY_HEAP
- virtual void Verify() const;
+ void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final {
+ UNREACHABLE();
+ }
+ void VerifyPageMetadata() const;
#endif
void AddRangeToActiveSystemPages(Address start, Address end);
@@ -242,8 +246,6 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
inline bool Contains(HeapObject o) const;
virtual bool ContainsSlow(Address a) const = 0;
- void ResetParkedAllocationBuffers();
-
#if DEBUG
void VerifyTop() const override;
#endif // DEBUG
@@ -278,29 +280,13 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
// Grow the capacity of the space.
virtual void Grow() = 0;
- // Shrink the capacity of the space.
- virtual void Shrink() = 0;
-
virtual bool ShouldBePromoted(Address) const = 0;
// Creates a filler object in the linear allocation area.
virtual void MakeLinearAllocationAreaIterable() = 0;
-#ifdef VERIFY_HEAP
- virtual void Verify(Isolate* isolate) const = 0;
- // VerifyImpl verifies objects on the space starting from |current_page| and
- // |current_address|. |current_address| should be a valid limit on
- // |current_page| (see BasicMemoryChunk::ContainsLimit).
- void VerifyImpl(Isolate* isolate, const Page* current_page,
- Address current_address) const;
-#endif
-
virtual void MakeIterable() = 0;
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- virtual void ClearUnusedObjectStartBitmaps() = 0;
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
virtual iterator begin() = 0;
virtual iterator end() = 0;
@@ -313,10 +299,7 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
virtual void Prologue() {}
- virtual void EvacuatePrologue() = 0;
- virtual void EvacuateEpilogue() = 0;
-
- virtual void ZapUnusedMemory() {}
+ virtual void GarbageCollectionEpilogue() = 0;
virtual bool IsPromotionCandidate(const MemoryChunk* page) const = 0;
@@ -330,8 +313,6 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
AllocationCounter allocation_counter_;
LinearAreaOriginalData linear_area_original_data_;
- ParkedAllocationBuffersVector parked_allocation_buffers_;
-
virtual void RemovePage(Page* page) = 0;
bool SupportsAllocationObserver() const final { return true; }
@@ -344,6 +325,9 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
// forwards most functions to the appropriate semispace.
class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
+ using ParkedAllocationBuffer = std::pair<int, Address>;
+ using ParkedAllocationBuffersVector = std::vector<ParkedAllocationBuffer>;
+
public:
static SemiSpaceNewSpace* From(NewSpace* space) {
DCHECK(!v8_flags.minor_mc);
@@ -363,7 +347,7 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
void Grow() final;
// Shrink the capacity of the semispaces.
- void Shrink() final;
+ void Shrink();
// Return the allocated bytes in the active semispace.
size_t Size() const final {
@@ -454,7 +438,8 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(size_t size_in_bytes) final;
+ void UpdateInlineAllocationLimit() final;
+ void UpdateInlineAllocationLimitForAllocation(size_t size_in_bytes);
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
@@ -465,12 +450,17 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
bool AddParkedAllocationBuffer(int size_in_bytes,
AllocationAlignment alignment);
+ void ResetParkedAllocationBuffers();
+
// Creates a filler object in the linear allocation area and closes it.
void FreeLinearAllocationArea() final;
#ifdef VERIFY_HEAP
// Verify the active semispace.
- void Verify(Isolate* isolate) const final;
+ void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final;
+
+ // VerifyObjects verifies all objects in the active semi space.
+ void VerifyObjects(Isolate* isolate, SpaceVerificationVisitor* visitor) const;
#endif
#ifdef DEBUG
@@ -483,10 +473,6 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
void MakeAllPagesInFromSpaceIterable();
void MakeUnusedPagesInToSpaceIterable();
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- void ClearUnusedObjectStartBitmaps() override;
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
Page* first_page() final { return to_space_.first_page(); }
Page* last_page() final { return to_space_.last_page(); }
@@ -510,10 +496,11 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
void Prologue() final;
- void EvacuatePrologue() final;
- void EvacuateEpilogue() final;
+ void EvacuatePrologue();
- void ZapUnusedMemory() final;
+ void GarbageCollectionEpilogue() final;
+
+ void ZapUnusedMemory();
bool IsPromotionCandidate(const MemoryChunk* page) const final;
@@ -539,6 +526,8 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
SemiSpace from_space_;
VirtualMemory reservation_;
+ ParkedAllocationBuffersVector parked_allocation_buffers_;
+
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final;
@@ -565,7 +554,6 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
void Grow();
// Shrink the capacity of the space.
- void Shrink() { UNREACHABLE(); }
bool StartShrinking();
void FinishShrinking();
@@ -587,15 +575,17 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
}
// Reset the allocation pointer.
- void EvacuatePrologue() {}
- void EvacuateEpilogue() { allocated_linear_areas_ = 0; }
+ void GarbageCollectionEpilogue() {
+ allocated_linear_areas_ = 0;
+ force_allocation_success_ = false;
+ }
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(size_t size_in_bytes) final;
+ void UpdateInlineAllocationLimit() final;
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
@@ -624,18 +614,29 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
}
#ifdef VERIFY_HEAP
- void Verify(Isolate* isolate, ObjectVisitor* visitor) const final;
+ void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final {
+ PagedSpaceBase::Verify(isolate, visitor);
+
+ DCHECK_EQ(current_capacity_, Page::kPageSize * CountTotalPages());
+ }
#endif
void MakeIterable() { free_list()->RepairLists(heap()); }
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- void ClearUnusedObjectStartBitmaps() {}
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ bool ShouldReleaseEmptyPage() const;
+
+ void RefillFreeList() final;
- bool ShouldReleasePage() const;
+ bool AddPageBeyondCapacity(int size_in_bytes, AllocationOrigin origin);
+
+ void ForceAllocationSuccessUntilNextGC() { force_allocation_success_ = true; }
private:
+ size_t UsableCapacity() const {
+ DCHECK_LE(free_list_->wasted_bytes(), current_capacity_);
+ return current_capacity_ - free_list_->wasted_bytes();
+ }
+
bool PreallocatePages();
const size_t initial_capacity_;
@@ -644,6 +645,8 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
size_t current_capacity_ = 0;
size_t allocated_linear_areas_ = 0;
+
+ bool force_allocation_success_ = false;
};
// TODO(v8:12612): PagedNewSpace is a bridge between the NewSpace interface and
@@ -669,7 +672,6 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
void Grow() final { paged_space_.Grow(); }
// Shrink the capacity of the space.
- void Shrink() final { paged_space_.Shrink(); }
bool StartShrinking() { return paged_space_.StartShrinking(); }
void FinishShrinking() { paged_space_.FinishShrinking(); }
@@ -732,8 +734,8 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(size_t size_in_bytes) final {
- paged_space_.UpdateInlineAllocationLimit(size_in_bytes);
+ void UpdateInlineAllocationLimit() final {
+ paged_space_.UpdateInlineAllocationLimit();
}
// Try to switch the active semispace to a new, empty, page.
@@ -748,7 +750,9 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
#ifdef VERIFY_HEAP
// Verify the active semispace.
- void Verify(Isolate* isolate) const final;
+ void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final {
+ paged_space_.Verify(isolate, visitor);
+ }
#endif
#ifdef DEBUG
@@ -774,8 +778,9 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
bool ShouldBePromoted(Address address) const final { return true; }
- void EvacuatePrologue() final { paged_space_.EvacuatePrologue(); }
- void EvacuateEpilogue() final { paged_space_.EvacuateEpilogue(); }
+ void GarbageCollectionEpilogue() final {
+ paged_space_.GarbageCollectionEpilogue();
+ }
bool IsPromotionCandidate(const MemoryChunk* page) const final {
return true;
@@ -793,18 +798,18 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
void MakeIterable() override { paged_space_.MakeIterable(); }
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- void ClearUnusedObjectStartBitmaps() override {
- paged_space_.ClearUnusedObjectStartBitmaps();
- }
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
// All operations on `memory_chunk_list_` should go through `paged_space_`.
heap::List<MemoryChunk>& memory_chunk_list() final { UNREACHABLE(); }
- bool ShouldReleasePage() const { return paged_space_.ShouldReleasePage(); }
+ bool ShouldReleaseEmptyPage() {
+ return paged_space_.ShouldReleaseEmptyPage();
+ }
void ReleasePage(Page* page) { paged_space_.ReleasePage(page); }
+ void ForceAllocationSuccessUntilNextGC() {
+ paged_space_.ForceAllocationSuccessUntilNextGC();
+ }
+
private:
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
diff --git a/deps/v8/src/heap/object-lock.h b/deps/v8/src/heap/object-lock.h
new file mode 100644
index 0000000000..7263e47296
--- /dev/null
+++ b/deps/v8/src/heap/object-lock.h
@@ -0,0 +1,50 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_OBJECT_LOCK_H_
+#define V8_HEAP_OBJECT_LOCK_H_
+
+#include "src/heap/memory-chunk.h"
+#include "src/objects/heap-object.h"
+
+namespace v8::internal {
+
+class ExclusiveObjectLock final {
+ public:
+ static void Lock(HeapObject heap_object) {
+ MemoryChunk::FromHeapObject(heap_object)->shared_mutex()->LockExclusive();
+ }
+ static void Unlock(HeapObject heap_object) {
+ MemoryChunk::FromHeapObject(heap_object)->shared_mutex()->UnlockExclusive();
+ }
+};
+
+class SharedObjectLock final {
+ public:
+ static void Lock(HeapObject heap_object) {
+ MemoryChunk::FromHeapObject(heap_object)->shared_mutex()->LockShared();
+ }
+ static void Unlock(HeapObject heap_object) {
+ MemoryChunk::FromHeapObject(heap_object)->shared_mutex()->UnlockShared();
+ }
+};
+
+template <typename LockType>
+class ObjectLockGuard final {
+ public:
+ explicit ObjectLockGuard(HeapObject object) : raw_object_(object) {
+ LockType::Lock(object);
+ }
+ ~ObjectLockGuard() { LockType::Unlock(raw_object_); }
+
+ private:
+ HeapObject raw_object_;
+};
+
+using ExclusiveObjectLockGuard = ObjectLockGuard<ExclusiveObjectLock>;
+using SharedObjectLockGuard = ObjectLockGuard<SharedObjectLock>;
+
+} // namespace v8::internal
+
+#endif // V8_HEAP_OBJECT_LOCK_H_
diff --git a/deps/v8/src/heap/object-start-bitmap-inl.h b/deps/v8/src/heap/object-start-bitmap-inl.h
deleted file mode 100644
index 23fada00c2..0000000000
--- a/deps/v8/src/heap/object-start-bitmap-inl.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_OBJECT_START_BITMAP_INL_H_
-#define V8_HEAP_OBJECT_START_BITMAP_INL_H_
-
-#include <limits.h>
-#include <stdint.h>
-
-#include "include/v8-internal.h"
-#include "src/base/bits.h"
-#include "src/base/macros.h"
-#include "src/heap/object-start-bitmap.h"
-#include "src/heap/paged-spaces-inl.h"
-#include "src/heap/paged-spaces.h"
-
-namespace v8 {
-namespace internal {
-
-ObjectStartBitmap::ObjectStartBitmap(PtrComprCageBase cage_base, size_t offset)
- : cage_base_(cage_base), offset_(offset) {
- Clear();
-}
-
-Address ObjectStartBitmap::FindBasePtrImpl(Address maybe_inner_ptr) const {
- DCHECK_LE(offset(), maybe_inner_ptr);
- size_t object_offset = maybe_inner_ptr - offset();
- size_t object_start_number = object_offset / kAllocationGranularity;
- size_t cell_index = object_start_number / kBitsPerCell;
- DCHECK_GT(object_start_bit_map_.size(), cell_index);
- const size_t bit = object_start_number & kCellMask;
- // check if maybe_inner_ptr is the base pointer
- uint32_t byte =
- load(cell_index) & static_cast<uint32_t>((1 << (bit + 1)) - 1);
- while (byte == 0 && cell_index > 0) {
- byte = load(--cell_index);
- }
- if (byte == 0) {
- DCHECK_EQ(0, cell_index);
- return kNullAddress;
- }
- const int leading_zeroes = v8::base::bits::CountLeadingZeros(byte);
- DCHECK_GT(kBitsPerCell, leading_zeroes);
- object_start_number =
- (cell_index * kBitsPerCell) + (kBitsPerCell - 1) - leading_zeroes;
- return StartIndexToAddress(object_start_number);
-}
-
-Address ObjectStartBitmap::FindBasePtr(Address maybe_inner_ptr) {
- Address base_ptr = FindBasePtrImpl(maybe_inner_ptr);
- if (base_ptr == maybe_inner_ptr) {
- DCHECK(CheckBit(base_ptr));
- return base_ptr;
- }
- // TODO(v8:12851): If the ObjectStartBitmap implementation stays, this part of
- // code involving the Page and the iteration through its objects is the only
- // connection with V8 internals. It should be moved to some different
- // abstraction.
- const Page* page = Page::FromAddress(offset_);
- DCHECK_EQ(page->area_start(), offset_);
-
-#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap) {
- Verify();
- }
-#endif // VERIFY_HEAP
-
- if (base_ptr == kNullAddress) base_ptr = offset_;
- DCHECK_LE(base_ptr, maybe_inner_ptr);
-
- const Address limit = page->area_end();
- while (base_ptr < limit) {
- SetBit(base_ptr);
- if (maybe_inner_ptr < base_ptr) break;
- const int size = HeapObject::FromAddress(base_ptr).Size(cage_base());
- if (maybe_inner_ptr < base_ptr + size) return base_ptr;
- base_ptr += size;
- DCHECK_LE(base_ptr, limit);
- }
- return kNullAddress;
-}
-
-void ObjectStartBitmap::SetBit(Address base_ptr) {
- size_t cell_index, object_bit;
- ObjectStartIndexAndBit(base_ptr, &cell_index, &object_bit);
- store(cell_index, load(cell_index) | static_cast<uint32_t>(1 << object_bit));
-}
-
-void ObjectStartBitmap::ClearBit(Address base_ptr) {
- size_t cell_index, object_bit;
- ObjectStartIndexAndBit(base_ptr, &cell_index, &object_bit);
- store(cell_index,
- load(cell_index) & static_cast<uint32_t>(~(1 << object_bit)));
-}
-
-bool ObjectStartBitmap::CheckBit(Address base_ptr) const {
- size_t cell_index, object_bit;
- ObjectStartIndexAndBit(base_ptr, &cell_index, &object_bit);
- return (load(cell_index) & static_cast<uint32_t>(1 << object_bit)) != 0;
-}
-
-void ObjectStartBitmap::store(size_t cell_index, uint32_t value) {
- object_start_bit_map_[cell_index] = value;
-}
-
-uint32_t ObjectStartBitmap::load(size_t cell_index) const {
- return object_start_bit_map_[cell_index];
-}
-
-void ObjectStartBitmap::ObjectStartIndexAndBit(Address base_ptr,
- size_t* cell_index,
- size_t* bit) const {
- const size_t object_offset = base_ptr - offset();
- DCHECK(!(object_offset & kAllocationMask));
- const size_t object_start_number = object_offset / kAllocationGranularity;
- *cell_index = object_start_number / kBitsPerCell;
- DCHECK_GT(kBitmapSize, *cell_index);
- *bit = object_start_number & kCellMask;
-}
-
-Address ObjectStartBitmap::StartIndexToAddress(
- size_t object_start_index) const {
- return offset() + (kAllocationGranularity * object_start_index);
-}
-
-template <typename Callback>
-inline void ObjectStartBitmap::Iterate(Callback callback) const {
- for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
- uint32_t value = load(cell_index);
- while (value != 0) {
- const int trailing_zeroes =
- v8::base::bits::CountTrailingZerosNonZero(value);
- DCHECK_GT(kBitsPerCell, trailing_zeroes);
- const size_t object_start_number =
- (cell_index * kBitsPerCell) + trailing_zeroes;
- const Address object_address = StartIndexToAddress(object_start_number);
- callback(object_address);
- // Clear current object bit in temporary value to advance iteration.
- value &= value - 1;
- }
- }
-}
-
-void ObjectStartBitmap::Clear() {
- std::fill(object_start_bit_map_.begin(), object_start_bit_map_.end(), 0);
-}
-
-#ifdef VERIFY_HEAP
-void ObjectStartBitmap::Verify() const {
- Page* page = Page::FromAddress(offset_);
- DCHECK_EQ(page->area_start(), offset_);
- Address next_object_in_page = page->area_start();
- const PtrComprCageBase cage = cage_base();
- Iterate([&next_object_in_page, cage](Address next_object_in_bitmap) {
- while (next_object_in_page != next_object_in_bitmap) {
- DCHECK_LT(next_object_in_page, next_object_in_bitmap);
- next_object_in_page +=
- HeapObject::FromAddress(next_object_in_page).Size(cage);
- }
- });
-}
-#endif // VERIFY_HEAP
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_OBJECT_START_BITMAP_INL_H_
diff --git a/deps/v8/src/heap/object-start-bitmap.h b/deps/v8/src/heap/object-start-bitmap.h
deleted file mode 100644
index 307d834cc2..0000000000
--- a/deps/v8/src/heap/object-start-bitmap.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_OBJECT_START_BITMAP_H_
-#define V8_HEAP_OBJECT_START_BITMAP_H_
-
-#include <array>
-
-#include "src/common/globals.h"
-#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
-
-namespace v8 {
-namespace internal {
-
-static constexpr size_t kAllocationGranularity = kTaggedSize;
-static constexpr size_t kAllocationMask = kAllocationGranularity - 1;
-static const int kPageSize = 1 << kPageSizeBits;
-
-// A bitmap for recording object starts. Objects have to be allocated at
-// minimum granularity of kGranularity.
-//
-// Depends on internals such as:
-// - kPageSize
-// - kAllocationGranularity
-//
-// ObjectStartBitmap does not support concurrent access and is used only by the
-// main thread.
-class V8_EXPORT_PRIVATE ObjectStartBitmap {
- public:
- // Granularity of addresses added to the bitmap.
- static constexpr size_t Granularity() { return kAllocationGranularity; }
-
- // Maximum number of entries in the bitmap.
- static constexpr size_t MaxEntries() {
- return kReservedForBitmap * kBitsPerCell;
- }
-
- inline ObjectStartBitmap(PtrComprCageBase cage_base, size_t offset);
-
- // Finds an object header based on a maybe_inner_ptr. If the object start
- // bitmap is not fully populated, this iterates through the objects of the
- // page to recalculate the part of the bitmap that is required, for this
- // method to return the correct result.
- inline Address FindBasePtr(Address maybe_inner_ptr);
-
- inline void SetBit(Address);
- inline void ClearBit(Address);
- inline bool CheckBit(Address) const;
-
- // Iterates all object starts recorded in the bitmap.
- //
- // The callback is of type
- // void(Address)
- // and is passed the object start address as parameter.
- template <typename Callback>
- inline void Iterate(Callback) const;
-
- // Clear the object start bitmap.
- inline void Clear();
-
-#ifdef VERIFY_HEAP
- // This method verifies that the object start bitmap is consistent with the
- // page's contents. That is, the bits that are set correspond to existing
- // objects in the page.
- inline void Verify() const;
-#endif // VERIFY_HEAP
-
- private:
- inline void store(size_t cell_index, uint32_t value);
- inline uint32_t load(size_t cell_index) const;
-
- PtrComprCageBase cage_base() const { return cage_base_; }
- Address offset() const { return offset_; }
-
- static constexpr size_t kBitsPerCell = sizeof(uint32_t) * CHAR_BIT;
- static constexpr size_t kCellMask = kBitsPerCell - 1;
- static constexpr size_t kBitmapSize =
- (kPageSize + ((kBitsPerCell * kAllocationGranularity) - 1)) /
- (kBitsPerCell * kAllocationGranularity);
- static constexpr size_t kReservedForBitmap =
- ((kBitmapSize + kAllocationMask) & ~kAllocationMask);
-
- inline void ObjectStartIndexAndBit(Address, size_t*, size_t*) const;
-
- inline Address StartIndexToAddress(size_t object_start_index) const;
-
- // Finds an object header based on a maybe_inner_ptr. Will search for an
- // object start in decreasing address order. If the object start bitmap is
- // not fully populated, this may incorrectly return |kNullPointer| or the base
- // pointer of a previous object on the page.
- inline Address FindBasePtrImpl(Address maybe_inner_ptr) const;
-
- PtrComprCageBase cage_base_;
- size_t offset_;
-
- std::array<uint32_t, kReservedForBitmap> object_start_bit_map_;
-
- FRIEND_TEST(V8ObjectStartBitmapTest, FindBasePtrExact);
- FRIEND_TEST(V8ObjectStartBitmapTest, FindBasePtrApproximate);
- FRIEND_TEST(V8ObjectStartBitmapTest, FindBasePtrIteratingWholeBitmap);
- FRIEND_TEST(V8ObjectStartBitmapTest, FindBasePtrNextCell);
- FRIEND_TEST(V8ObjectStartBitmapTest, FindBasePtrSameCell);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_OBJECT_START_BITMAP_H_
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index e15b5f332e..7b3e5428dd 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -22,6 +22,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/prototype-info.h"
#include "src/objects/slots.h"
#include "src/objects/templates.h"
#include "src/objects/visitors.h"
@@ -96,18 +97,16 @@ class FieldStatsCollector : public ObjectVisitorWithCageBases {
*tagged_fields_count_ += (end - start);
}
- V8_INLINE void VisitCodePointer(HeapObject host,
- CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ V8_INLINE void VisitCodePointer(Code host, CodeObjectSlot slot) override {
*tagged_fields_count_ += 1;
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- // Code target is most likely encoded as a relative 32-bit offset and not
- // as a full tagged value, so there's nothing to count.
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ // InstructionStream target is most likely encoded as a relative 32-bit
+ // offset and not as a full tagged value, so there's nothing to count.
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
*tagged_fields_count_ += 1;
}
@@ -149,7 +148,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == PropertyLocation::kField) {
- FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
+ FieldIndex index = FieldIndex::ForDetails(map, details);
// Stop on first out-of-object field.
if (!index.is_inobject()) break;
if (details.representation().IsSmi()) {
@@ -430,7 +429,7 @@ class ObjectStatsCollectorImpl {
// Details.
void RecordVirtualAllocationSiteDetails(AllocationSite site);
void RecordVirtualBytecodeArrayDetails(BytecodeArray bytecode);
- void RecordVirtualCodeDetails(Code code);
+ void RecordVirtualCodeDetails(InstructionStream code);
void RecordVirtualContext(Context context);
void RecordVirtualFeedbackVectorDetails(FeedbackVector vector);
void RecordVirtualFixedArrayDetails(FixedArray array);
@@ -754,8 +753,8 @@ void ObjectStatsCollectorImpl::CollectStatistics(
RecordVirtualMapDetails(Map::cast(obj));
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
- } else if (InstanceTypeChecker::IsCode(instance_type)) {
- RecordVirtualCodeDetails(Code::cast(obj));
+ } else if (InstanceTypeChecker::IsInstructionStream(instance_type)) {
+ RecordVirtualCodeDetails(InstructionStream::cast(obj));
} else if (InstanceTypeChecker::IsFunctionTemplateInfo(instance_type)) {
RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo::cast(obj));
@@ -847,8 +846,12 @@ bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase array) {
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject obj1, HeapObject obj2) {
- return obj1.is_null() || obj2.is_null() ||
- marking_state_->Color(obj1) == marking_state_->Color(obj2);
+ if (obj1.is_null() || obj2.is_null()) return true;
+ const auto obj1_marked =
+ obj1.InReadOnlySpace() || marking_state_->IsMarked(obj1);
+ const auto obj2_marked =
+ obj2.InReadOnlySpace() || marking_state_->IsMarked(obj2);
+ return obj1_marked == obj2_marked;
}
void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
@@ -905,9 +908,9 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
}
if (map.is_prototype_map()) {
- if (map.prototype_info().IsPrototypeInfo(cage_base())) {
- PrototypeInfo info = PrototypeInfo::cast(map.prototype_info());
- Object users = info.prototype_users();
+ PrototypeInfo prototype_info;
+ if (map.TryGetPrototypeInfo(&prototype_info)) {
+ Object users = prototype_info.prototype_users();
if (users.IsWeakFixedArray(cage_base())) {
RecordSimpleVirtualObjectStats(map, WeakArrayList::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
@@ -1032,19 +1035,21 @@ ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(CodeKind kind) {
} // namespace
-void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
- RecordSimpleVirtualObjectStats(HeapObject(), code,
+void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(
+ InstructionStream istream) {
+ Code code = istream.code(kAcquireLoad);
+ RecordSimpleVirtualObjectStats(HeapObject(), istream,
CodeKindToVirtualInstanceType(code.kind()));
- RecordSimpleVirtualObjectStats(code, code.relocation_info(),
+ RecordSimpleVirtualObjectStats(istream, code.relocation_info(),
ObjectStats::RELOC_INFO_TYPE);
if (CodeKindIsOptimizedJSFunction(code.kind())) {
Object source_position_table = code.source_position_table();
if (source_position_table.IsHeapObject()) {
- RecordSimpleVirtualObjectStats(code,
+ RecordSimpleVirtualObjectStats(istream,
HeapObject::cast(source_position_table),
ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
- RecordSimpleVirtualObjectStats(code, code.deoptimization_data(),
+ RecordSimpleVirtualObjectStats(istream, code.deoptimization_data(),
ObjectStats::DEOPTIMIZATION_DATA_TYPE);
DeoptimizationData input_data =
DeoptimizationData::cast(code.deoptimization_data());
@@ -1060,7 +1065,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
Object target = it.rinfo()->target_object(cage_base());
if (target.IsFixedArrayExact(cage_base())) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
+ istream, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
}
}
@@ -1093,11 +1098,10 @@ class ObjectStatsVisitor {
phase_(phase) {}
void Visit(HeapObject obj) {
- if (marking_state_->IsBlack(obj)) {
+ if (obj.InReadOnlySpace() || marking_state_->IsMarked(obj)) {
live_collector_->CollectStatistics(
obj, phase_, ObjectStatsCollectorImpl::CollectFieldStats::kYes);
} else {
- DCHECK(!marking_state_->IsGrey(obj));
dead_collector_->CollectStatistics(
obj, phase_, ObjectStatsCollectorImpl::CollectFieldStats::kNo);
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 371ed94f2b..9c121a6123 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -9,8 +9,8 @@
#include "src/objects/objects.h"
// These instance types do not exist for actual use but are merely introduced
-// for object stats tracing. In contrast to Code and FixedArray sub types
-// these types are not known to other counters outside of object stats
+// for object stats tracing. In contrast to InstructionStream and FixedArray sub
+// types these types are not known to other counters outside of object stats
// tracing.
//
// Update LAST_VIRTUAL_TYPE below when changing this macro.
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index b7279df847..a58dd29a92 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -6,7 +6,6 @@
#define V8_HEAP_OBJECTS_VISITING_INL_H_
#include "src/base/logging.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting.h"
#include "src/objects/arguments.h"
@@ -18,6 +17,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/torque-defined-classes.h"
#include "src/objects/visitors.h"
@@ -90,30 +90,30 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
}
template <typename ResultType, typename ConcreteVisitor>
-void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
+void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointerIfNeeded(
HeapObject host) {
- DCHECK(!host.map_word(kRelaxedLoad).IsForwardingAddress());
- if (!static_cast<ConcreteVisitor*>(this)->ShouldVisitMapPointer()) return;
+ DCHECK(!host.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress());
+ if constexpr (!ConcreteVisitor::ShouldVisitMapPointer()) return;
static_cast<ConcreteVisitor*>(this)->VisitMapPointer(host);
}
-#define VISIT(TypeName) \
- template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \
- Map map, TypeName object) { \
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
- if (!visitor->ShouldVisit(object)) return ResultType(); \
- if (!visitor->AllowDefaultJSObjectVisit()) { \
- DCHECK_WITH_MSG(!map.IsJSObjectMap(), \
- "Implement custom visitor for new JSObject subclass in " \
- "concurrent marker"); \
- } \
- int size = TypeName::BodyDescriptor::SizeOf(map, object); \
- if (visitor->ShouldVisitMapPointer()) { \
- visitor->VisitMapPointer(object); \
- } \
- TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
- return static_cast<ResultType>(size); \
+#define VISIT(TypeName) \
+ template <typename ResultType, typename ConcreteVisitor> \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \
+ Map map, TypeName object) { \
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
+ if (!visitor->ShouldVisit(object)) return ResultType(); \
+ /* If you see the following DCHECK fail, then the size computation of \
+ * BodyDescriptor doesn't match the size return via obj.Size(). This is \
+ * problematic as the GC requires those sizes to match for accounting \
+ * reasons. The fix likely involves adding a padding field in the object \
+ * defintions. */ \
+ DCHECK_EQ(object.SizeFromMap(map), \
+ TypeName::BodyDescriptor::SizeOf(map, object)); \
+ visitor->VisitMapPointerIfNeeded(object); \
+ const int size = TypeName::BodyDescriptor::SizeOf(map, object); \
+ TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
+ return static_cast<ResultType>(size); \
}
TYPED_VISITOR_ID_LIST(VISIT)
TORQUE_VISITOR_ID_LIST(VISIT)
@@ -131,20 +131,12 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
+ visitor->VisitMapPointerIfNeeded(object);
#ifdef V8_ENABLE_SANDBOX
// The following types have external pointers, which must be visited.
- // TODO(v8:10391) Consider adding custom visitor IDs for these and making this
- // block not depend on V8_ENABLE_SANDBOX.
- if (object.IsExternalOneByteString(cage_base())) {
- ExternalOneByteString::BodyDescriptor::IterateBody(map, object, size,
- visitor);
- } else if (object.IsExternalTwoByteString(cage_base())) {
- ExternalTwoByteString::BodyDescriptor::IterateBody(map, object, size,
- visitor);
- } else if (object.IsForeign(cage_base())) {
+ // TODO(v8:10391) Consider adding custom visitor IDs for these and making
+ // this block not depend on V8_ENABLE_SANDBOX.
+ if (object.IsForeign(cage_base())) {
Foreign::BodyDescriptor::IterateBody(map, object, size, visitor);
}
#endif // V8_ENABLE_SANDBOX
@@ -154,27 +146,14 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
Map map, JSObject object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
- JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
- return static_cast<ResultType>(size);
+ return VisitJSObjectSubclass<JSObject, JSObject::FastBodyDescriptor>(map,
+ object);
}
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
Map map, JSObject object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
- JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
- return static_cast<ResultType>(size);
+ return VisitJSObjectSubclass<JSObject, JSObject::BodyDescriptor>(map, object);
}
template <typename ResultType, typename ConcreteVisitor>
@@ -183,9 +162,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
+ visitor->VisitMapPointerIfNeeded(object);
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -195,12 +172,29 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
Map map, FreeSpace object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
+ visitor->VisitMapPointerIfNeeded(object);
return static_cast<ResultType>(object.size(kRelaxedLoad));
}
+template <typename ResultType, typename ConcreteVisitor>
+template <typename T, typename TBodyDescriptor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectSubclass(
+ Map map, T object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return 0;
+ visitor->VisitMapPointerIfNeeded(object);
+ const int size = TBodyDescriptor::SizeOf(map, object);
+ const int used_size = map.UsedInstanceSize();
+ DCHECK_LE(used_size, size);
+ DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
+ // It is important to visit only the used field and ignore the slack fields
+ // because the slack fields may be trimmed concurrently. For non-concurrent
+ // visitors this merely is an optimization in that we only visit the actually
+ // used fields.
+ TBodyDescriptor::IterateBody(map, object, used_size, visitor);
+ return size;
+}
+
template <typename ConcreteVisitor>
NewSpaceVisitor<ConcreteVisitor>::NewSpaceVisitor(Isolate* isolate)
: HeapVisitor<int, ConcreteVisitor>(isolate) {}
@@ -213,24 +207,21 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map map,
- JSObject object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- return visitor->VisitJSObject(map, object);
+int NewSpaceVisitor<ConcreteVisitor>::VisitSharedFunctionInfo(
+ Map map, SharedFunctionInfo object) {
+ UNREACHABLE();
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitSharedFunctionInfo(
- Map map, SharedFunctionInfo object) {
+int NewSpaceVisitor<ConcreteVisitor>::VisitBytecodeArray(Map map,
+ BytecodeArray object) {
UNREACHABLE();
- return 0;
}
template <typename ConcreteVisitor>
int NewSpaceVisitor<ConcreteVisitor>::VisitWeakCell(Map map,
WeakCell weak_cell) {
UNREACHABLE();
- return 0;
}
} // namespace internal
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index e25de9fb1b..769012ead3 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -85,32 +85,6 @@ static void ClearWeakList(Heap* heap, Object list) {
}
template <>
-struct WeakListVisitor<CodeT> {
- static void SetWeakNext(CodeT code, Object next) {
- CodeDataContainerFromCodeT(code).set_next_code_link(next,
- UPDATE_WRITE_BARRIER);
- }
-
- static Object WeakNext(CodeT code) {
- return CodeDataContainerFromCodeT(code).next_code_link();
- }
-
- static HeapObject WeakNextHolder(CodeT code) {
- return CodeDataContainerFromCodeT(code);
- }
-
- static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
-
- static void VisitLiveObject(Heap*, CodeT, WeakObjectRetainer*) {}
-
- static void VisitPhantomObject(Heap* heap, CodeT code) {
- // Even though the code is dying, its code_data_container can still be
- // alive. Clear the next_code_link slot to avoid a dangling pointer.
- SetWeakNext(code, ReadOnlyRoots(heap).undefined_value());
- }
-};
-
-template <>
struct WeakListVisitor<Context> {
static void SetWeakNext(Context context, Object next) {
context.set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WRITE_BARRIER);
@@ -136,11 +110,6 @@ struct WeakListVisitor<Context> {
MarkCompactCollector::RecordSlot(context, slot,
HeapObject::cast(*slot));
}
- // Code objects are always allocated in Code space, we do not have to
- // visit them during scavenges.
- DoWeakList<CodeT>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
- DoWeakList<CodeT>(heap, context, retainer,
- Context::DEOPTIMIZED_CODE_LIST);
}
}
@@ -161,10 +130,7 @@ struct WeakListVisitor<Context> {
}
}
- static void VisitPhantomObject(Heap* heap, Context context) {
- ClearWeakList<CodeT>(heap, context.get(Context::OPTIMIZED_CODE_LIST));
- ClearWeakList<CodeT>(heap, context.get(Context::DEOPTIMIZED_CODE_LIST));
- }
+ static void VisitPhantomObject(Heap* heap, Context context) {}
};
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index c1d73fb8d9..8a19c05caa 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -7,6 +7,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/map.h"
+#include "src/objects/object-list-macros.h"
#include "src/objects/objects.h"
#include "src/objects/visitors.h"
@@ -21,17 +22,18 @@ namespace internal {
V(BytecodeArray) \
V(CallHandlerInfo) \
V(Cell) \
+ V(InstructionStream) \
V(Code) \
- V(CodeDataContainer) \
V(CoverageInfo) \
V(DataHandler) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
+ V(ExternalString) \
V(FeedbackCell) \
V(FeedbackMetadata) \
V(FixedDoubleArray) \
V(JSArrayBuffer) \
- V(JSDataView) \
+ V(JSDataViewOrRabGsabDataView) \
V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
@@ -49,6 +51,7 @@ namespace internal {
V(PropertyArray) \
V(PropertyCell) \
V(PrototypeInfo) \
+ V(SharedFunctionInfo) \
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
@@ -70,7 +73,8 @@ namespace internal {
IF_WASM(V, WasmSuspenderObject) \
IF_WASM(V, WasmResumeData) \
IF_WASM(V, WasmTypeInfo) \
- IF_WASM(V, WasmContinuationObject)
+ IF_WASM(V, WasmContinuationObject) \
+ IF_WASM(V, WasmNull)
#define FORWARD_DECLARE(TypeName) class TypeName;
TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
@@ -99,19 +103,18 @@ class HeapVisitor : public ObjectVisitorWithCageBases {
V8_INLINE ResultType Visit(HeapObject object);
V8_INLINE ResultType Visit(Map map, HeapObject object);
- // A callback for visiting the map pointer in the object header.
- void VisitMapPointer(HeapObject host);
- // Guard predicate for visiting the objects map pointer separately.
- V8_INLINE bool ShouldVisitMapPointer() { return true; }
-
protected:
// A guard predicate for visiting the object.
// If it returns false then the default implementations of the Visit*
- // functions bailout from iterating the object pointers.
+ // functions bail out from iterating the object pointers.
V8_INLINE bool ShouldVisit(HeapObject object) { return true; }
- // If this predicate returns false, then the heap visitor will fail
- // in default Visit implementation for subclasses of JSObject.
- V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
+
+ // If this predicate return false the default implementations of Visit*
+ // functions bail out from visiting the map pointer.
+ V8_INLINE static constexpr bool ShouldVisitMapPointer() { return true; }
+
+ // Only visits the Map pointer if `ShouldVisitMapPointer()` returns true.
+ V8_INLINE void VisitMapPointerIfNeeded(HeapObject host);
#define VISIT(TypeName) \
V8_INLINE ResultType Visit##TypeName(Map map, TypeName object);
@@ -125,6 +128,9 @@ class HeapVisitor : public ObjectVisitorWithCageBases {
V8_INLINE ResultType VisitStruct(Map map, HeapObject object);
V8_INLINE ResultType VisitFreeSpace(Map map, FreeSpace object);
+ template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
+ V8_INLINE ResultType VisitJSObjectSubclass(Map map, T object);
+
template <typename T>
static V8_INLINE T Cast(HeapObject object);
};
@@ -132,22 +138,18 @@ class HeapVisitor : public ObjectVisitorWithCageBases {
template <typename ConcreteVisitor>
class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
public:
- V8_INLINE NewSpaceVisitor(Isolate* isolate);
+ V8_INLINE explicit NewSpaceVisitor(Isolate* isolate);
- V8_INLINE bool ShouldVisitMapPointer() { return false; }
+ protected:
+ V8_INLINE static constexpr bool ShouldVisitMapPointer() { return false; }
// Special cases for young generation.
-
V8_INLINE int VisitNativeContext(Map map, NativeContext object);
- V8_INLINE int VisitJSApiObject(Map map, JSObject object);
-
- int VisitBytecodeArray(Map map, BytecodeArray object) {
- UNREACHABLE();
- return 0;
- }
+ V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
+ V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
+ V8_INLINE int VisitWeakCell(Map map, WeakCell weak_cell);
- int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
- int VisitWeakCell(Map map, WeakCell weak_cell);
+ friend class HeapVisitor<int, ConcreteVisitor>;
};
class WeakObjectRetainer;
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index 91d053ddcd..43fdc2bf5e 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -39,7 +39,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
inline HeapObject Next() override;
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
@@ -81,7 +81,7 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller(cage_base())) {
- if (obj.IsCode(cage_base())) {
+ if (obj.IsInstructionStream(cage_base())) {
DCHECK_EQ(space_->identity(), CODE_SPACE);
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
@@ -116,7 +116,7 @@ V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
- if ((identity() != NEW_SPACE) && !is_compaction_space()) {
+ if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
// running.
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 97b3f76258..d73e268b7a 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -84,7 +84,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
#endif // V8_COMPRESS_POINTERS
{
heap->MakeHeapIterable();
- DCHECK_IMPLIES(space->IsInlineAllocationEnabled(),
+ DCHECK_IMPLIES(!heap->IsInlineAllocationEnabled(),
!page->Contains(space->top()));
DCHECK(page->Contains(start_address));
DCHECK(page->SweepingDone());
@@ -142,50 +142,6 @@ void PagedSpaceBase::TearDown() {
accounting_stats_.Clear();
}
-void PagedSpaceBase::RefillFreeList() {
- // Any PagedSpace might invoke RefillFreeList. We filter all but our old
- // generation spaces out.
- DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
- identity() == NEW_SPACE || identity() == SHARED_SPACE);
-
- Sweeper* sweeper = heap()->sweeper();
- size_t added = 0;
-
- {
- Page* p = nullptr;
- while ((p = sweeper->GetSweptPageSafe(this)) != nullptr) {
- // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
- // entries here to make them unavailable for allocations.
- if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- p->ForAllFreeListCategories([this](FreeListCategory* category) {
- category->Reset(free_list());
- });
- }
-
- // Only during compaction pages can actually change ownership. This is
- // safe because there exists no other competing action on the page links
- // during compaction.
- if (is_compaction_space()) {
- DCHECK_NE(this, p->owner());
- DCHECK_NE(NEW_SPACE, identity());
- PagedSpaceBase* owner = reinterpret_cast<PagedSpaceBase*>(p->owner());
- base::MutexGuard guard(owner->mutex());
- owner->RefineAllocatedBytesAfterSweeping(p);
- owner->RemovePage(p);
- added += AddPage(p);
- added += p->wasted_memory();
- } else {
- base::MutexGuard guard(mutex());
- DCHECK_EQ(this, p->owner());
- RefineAllocatedBytesAfterSweeping(p);
- added += RelinkFreeListCategories(p);
- added += p->wasted_memory();
- }
- if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
- }
- }
-}
-
void PagedSpaceBase::MergeCompactionSpace(CompactionSpace* other) {
base::MutexGuard guard(mutex());
@@ -351,7 +307,8 @@ void PagedSpaceBase::RemovePage(Page* page) {
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
}
-void PagedSpaceBase::SetTopAndLimit(Address top, Address limit) {
+void PagedSpaceBase::SetTopAndLimit(Address top, Address limit, Address end) {
+ DCHECK_GE(end, limit);
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -359,10 +316,16 @@ void PagedSpaceBase::SetTopAndLimit(Address top, Address limit) {
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
if (!is_compaction_space()) optional_guard.emplace(linear_area_lock());
- linear_area_original_data_.set_original_limit_relaxed(limit);
+ linear_area_original_data_.set_original_limit_relaxed(end);
linear_area_original_data_.set_original_top_release(top);
}
+void PagedSpaceBase::SetLimit(Address limit) {
+ DCHECK(SupportsExtendingLAB());
+ DCHECK_LE(limit, original_limit_relaxed());
+ allocation_info_.SetLimit(limit);
+}
+
size_t PagedSpaceBase::ShrinkPageToHighWaterMark(Page* page) {
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
@@ -428,8 +391,9 @@ int PagedSpaceBase::CountTotalPages() const {
return count;
}
-void PagedSpaceBase::SetLinearAllocationArea(Address top, Address limit) {
- SetTopAndLimit(top, limit);
+void PagedSpaceBase::SetLinearAllocationArea(Address top, Address limit,
+ Address end) {
+ SetTopAndLimit(top, limit, end);
if (top != kNullAddress && top != limit) {
Page* page = Page::FromAllocationAreaAddress(top);
if (identity() == NEW_SPACE) {
@@ -454,9 +418,17 @@ void PagedSpaceBase::DecreaseLimit(Address new_limit) {
}
ConcurrentAllocationMutex guard(this);
- SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit,
- SpaceAccountingMode::kSpaceAccounted);
+ Address old_max_limit = original_limit_relaxed();
+ if (!SupportsExtendingLAB()) {
+ DCHECK_EQ(old_max_limit, old_limit);
+ SetTopAndLimit(top(), new_limit, new_limit);
+ Free(new_limit, old_max_limit - new_limit,
+ SpaceAccountingMode::kSpaceAccounted);
+ } else {
+ SetLimit(new_limit);
+ heap()->CreateFillerObjectAt(new_limit,
+ static_cast<int>(old_max_limit - new_limit));
+ }
if (heap()->incremental_marking()->black_allocation() &&
identity() != NEW_SPACE) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
@@ -523,16 +495,25 @@ void PagedSpaceBase::FreeLinearAllocationArea() {
DCHECK_EQ(kNullAddress, current_limit);
return;
}
+ Address current_max_limit = original_limit_relaxed();
+ DCHECK_IMPLIES(!SupportsExtendingLAB(), current_max_limit == current_limit);
AdvanceAllocationObservers();
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+
+ if (identity() == CODE_SPACE) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(allocation_info_.top());
+ optional_scope.emplace(chunk);
+ }
+
if (identity() != NEW_SPACE && current_top != current_limit &&
heap()->incremental_marking()->black_allocation()) {
Page::FromAddress(current_top)
->DestroyBlackArea(current_top, current_limit);
}
- SetTopAndLimit(kNullAddress, kNullAddress);
+ SetTopAndLimit(kNullAddress, kNullAddress, kNullAddress);
DCHECK_GE(current_limit, current_top);
// The code page of the linear allocation area needs to be unprotected
@@ -543,10 +524,10 @@ void PagedSpaceBase::FreeLinearAllocationArea() {
GetUnprotectMemoryOrigin(is_compaction_space()));
}
- DCHECK_IMPLIES(
- current_limit - current_top >= 2 * kTaggedSize,
- heap()->marking_state()->IsWhite(HeapObject::FromAddress(current_top)));
- Free(current_top, current_limit - current_top,
+ DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
+ heap()->marking_state()->IsUnmarked(
+ HeapObject::FromAddress(current_top)));
+ Free(current_top, current_max_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
@@ -562,7 +543,7 @@ void PagedSpaceBase::ReleasePage(Page* page) {
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
- SetTopAndLimit(kNullAddress, kNullAddress);
+ SetTopAndLimit(kNullAddress, kNullAddress, kNullAddress);
}
if (identity() == CODE_SPACE) {
@@ -650,9 +631,15 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
heap()->UnprotectAndRegisterMemoryChunk(
page, GetUnprotectMemoryOrigin(is_compaction_space()));
}
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ if (!SupportsExtendingLAB()) {
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ end = limit;
+ } else {
+ DCHECK(heap()->IsMainThread());
+ heap()->CreateFillerObjectAt(limit, static_cast<int>(end - limit));
+ }
}
- SetLinearAllocationArea(start, limit);
+ SetLinearAllocationArea(start, limit, end);
AddRangeToActiveSystemPages(page, start, limit);
return true;
@@ -707,7 +694,8 @@ void PagedSpaceBase::Print() {}
#endif
#ifdef VERIFY_HEAP
-void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
+void PagedSpaceBase::Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
size_t external_space_bytes[kNumTypes];
@@ -721,6 +709,7 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
for (const Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK_IMPLIES(identity() != NEW_SPACE, !page->WasUsedForAllocation());
+ visitor->VerifyPage(page);
for (int i = 0; i < kNumTypes; i++) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
@@ -737,26 +726,11 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
CHECK(end_of_previous_object <= object.address());
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map map = object.map(cage_base);
- CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->old_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- if (identity() != RO_SPACE && !v8_flags.verify_heap_skip_remembered_set) {
- HeapVerifier::VerifyRememberedSetFor(isolate->heap(), object);
- }
+ // Invoke verification method for each object.
+ visitor->VerifyObject(object);
// All the interior pointers should be contained in the heap.
int size = object.Size(cage_base);
- object.IterateBody(map, size, visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
@@ -773,12 +747,7 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
external_space_bytes[t] += external_page_bytes[t];
}
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- page->object_start_bitmap()->Verify();
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ visitor->VerifyPageDone(page);
}
for (int i = 0; i < kNumTypes; i++) {
if (i == ExternalBackingStoreType::kArrayBuffer) continue;
@@ -787,10 +756,17 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
}
CHECK(allocation_pointer_found_in_space);
- if (identity() == OLD_SPACE && !v8_flags.concurrent_array_buffer_sweeping) {
- size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ if (!v8_flags.concurrent_array_buffer_sweeping) {
+ if (identity() == OLD_SPACE) {
+ size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
+ CHECK_EQ(bytes, ExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer));
+ } else if (identity() == NEW_SPACE) {
+ DCHECK(v8_flags.minor_mc);
+ size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
+ CHECK_EQ(bytes, ExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer));
+ }
}
#ifdef DEBUG
@@ -807,7 +783,7 @@ void PagedSpaceBase::VerifyLiveBytes() const {
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
- if (marking_state->IsBlack(object)) {
+ if (marking_state->IsMarked(object)) {
black_size += object.Size(cage_base);
}
}
@@ -859,11 +835,11 @@ void PagedSpaceBase::VerifyCountersBeforeConcurrentSweeping() const {
}
#endif
-void PagedSpaceBase::UpdateInlineAllocationLimit(size_t min_size) {
+void PagedSpaceBase::UpdateInlineAllocationLimit() {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
- Address new_limit = ComputeLimit(top(), limit(), min_size);
+ Address new_limit = ComputeLimit(top(), limit(), 0);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, limit());
DecreaseLimit(new_limit);
@@ -902,11 +878,37 @@ bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin) {
origin);
}
+bool PagedSpaceBase::TryExtendLAB(int size_in_bytes) {
+ Address current_top = top();
+ if (current_top == kNullAddress) return false;
+ Address current_limit = limit();
+ Address max_limit = original_limit_relaxed();
+ if (current_top + size_in_bytes > max_limit) {
+ return false;
+ }
+ DCHECK(SupportsExtendingLAB());
+ AdvanceAllocationObservers();
+ Address new_limit = ComputeLimit(current_top, max_limit, size_in_bytes);
+ SetLimit(new_limit);
+ DCHECK(heap()->IsMainThread());
+ heap()->CreateFillerObjectAt(new_limit,
+ static_cast<int>(max_limit - new_limit));
+ Page* page = Page::FromAddress(current_top);
+ // No need to create a black allocation area since new space doesn't use
+ // black allocation.
+ DCHECK_EQ(NEW_SPACE, identity());
+ AddRangeToActiveSystemPages(page, current_limit, new_limit);
+ return true;
+}
+
bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
- const int kMaxPagesToSweep = 1;
+
+ if (TryExtendLAB(size_in_bytes)) return true;
+
+ static constexpr int kMaxPagesToSweep = 1;
if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
@@ -920,22 +922,23 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
if (heap()->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
- {
- TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
- RefillFreeList();
- }
-
- // Retry the free list allocation.
- if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
- origin))
- return true;
+ if (heap()->sweeper()->ShouldRefillFreelistForSpace(identity())) {
+ {
+ TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id,
+ sweeping_scope_kind);
+ RefillFreeList();
+ }
- {
- TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
- if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep,
- size_in_bytes, origin))
+ // Retry the free list allocation.
+ if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+ origin))
return true;
}
+
+ if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
+ origin, sweeping_scope_id,
+ sweeping_scope_kind))
+ return true;
}
if (is_compaction_space()) {
@@ -954,7 +957,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
if (identity() != NEW_SPACE &&
heap()->ShouldExpandOldGenerationOnSlowAllocation(
- heap()->main_thread_local_heap()) &&
+ heap()->main_thread_local_heap(), origin) &&
heap()->CanExpandOldGeneration(AreaSize())) {
if (TryExpand(size_in_bytes, origin)) {
return true;
@@ -962,10 +965,9 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
}
// Try sweeping all pages.
- {
- TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
- if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) return true;
- }
+ if (ContributeToSweepingMain(0, 0, size_in_bytes, origin, sweeping_scope_id,
+ sweeping_scope_kind))
+ return true;
if (identity() != NEW_SPACE && heap()->gc_state() != Heap::NOT_IN_GC &&
!heap()->force_oom()) {
@@ -976,22 +978,26 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
return false;
}
-bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
- int max_pages, int size_in_bytes,
- AllocationOrigin origin) {
+bool PagedSpaceBase::ContributeToSweepingMain(
+ int required_freed_bytes, int max_pages, int size_in_bytes,
+ AllocationOrigin origin, GCTracer::Scope::ScopeId sweeping_scope_id,
+ ThreadKind sweeping_scope_kind) {
+ if (!heap()->sweeping_in_progress()) return false;
+ if (!heap()->sweeper()->AreSweeperTasksRunning() &&
+ heap()->sweeper()->IsSweepingDoneForSpace(identity()))
+ return false;
+
+ TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::SweepingMode sweeping_mode =
is_compaction_space() ? Sweeper::SweepingMode::kEagerDuringGC
: Sweeper::SweepingMode::kLazyOrConcurrent;
- if (heap()->sweeping_in_progress()) {
- heap()->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
- required_freed_bytes, max_pages);
- RefillFreeList();
- return TryAllocationFromFreeListMain(size_in_bytes, origin);
- }
- return false;
+ heap()->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
+ required_freed_bytes, max_pages);
+ RefillFreeList();
+ return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
void PagedSpaceBase::AddRangeToActiveSystemPages(Page* page, Address start,
@@ -1037,5 +1043,46 @@ size_t PagedSpaceBase::RelinkFreeListCategories(Page* page) {
return added;
}
+void PagedSpace::RefillFreeList() {
+ // Any PagedSpace might invoke RefillFreeList.
+ DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
+ identity() == SHARED_SPACE);
+
+ Sweeper* sweeper = heap()->sweeper();
+
+ size_t added = 0;
+
+ Page* p = nullptr;
+ while ((p = sweeper->GetSweptPageSafe(this)) != nullptr) {
+ // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
+ // entries here to make them unavailable for allocations.
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ p->ForAllFreeListCategories(
+ [this](FreeListCategory* category) { category->Reset(free_list()); });
+ }
+
+ // Only during compaction pages can actually change ownership. This is
+ // safe because there exists no other competing action on the page links
+ // during compaction.
+ if (is_compaction_space()) {
+ DCHECK_NE(this, p->owner());
+ DCHECK_NE(NEW_SPACE, identity());
+ PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
+ base::MutexGuard guard(owner->mutex());
+ owner->RefineAllocatedBytesAfterSweeping(p);
+ owner->RemovePage(p);
+ added += AddPage(p);
+ added += p->wasted_memory();
+ } else {
+ base::MutexGuard guard(mutex());
+ DCHECK_EQ(this, p->owner());
+ RefineAllocatedBytesAfterSweeping(p);
+ added += RelinkFreeListCategories(p);
+ added += p->wasted_memory();
+ }
+ if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index c6549f3da6..6e961bf684 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -18,6 +18,8 @@
#include "src/flags/flags.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/allocation-stats.h"
+#include "src/heap/heap-verifier.h"
+#include "src/heap/heap.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -184,13 +186,10 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
#ifdef VERIFY_HEAP
// Verify integrity of this space.
- virtual void Verify(Isolate* isolate, ObjectVisitor* visitor) const;
+ void Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const override;
void VerifyLiveBytes() const;
-
- // Overridden by subclasses to verify space-specific object
- // properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject obj) const {}
#endif
#ifdef DEBUG
@@ -226,7 +225,7 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Refills the free list from the corresponding free list filled by the
// sweeper.
- void RefillFreeList();
+ virtual void RefillFreeList() = 0;
base::Mutex* mutex() { return &space_mutex_; }
@@ -261,7 +260,7 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
- void SetLinearAllocationArea(Address top, Address limit);
+ void SetLinearAllocationArea(Address top, Address limit, Address end);
void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
void ReduceActiveSystemPages(Page* page,
@@ -292,19 +291,27 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
base::Optional<base::MutexGuard> guard_;
};
- bool SupportsConcurrentAllocation() const { return !is_compaction_space(); }
+ bool SupportsConcurrentAllocation() const {
+ return !is_compaction_space() && (identity() != NEW_SPACE);
+ }
// Set space linear allocation area.
- void SetTopAndLimit(Address top, Address limit);
+ void SetTopAndLimit(Address top, Address limit, Address end);
void DecreaseLimit(Address new_limit);
bool SupportsAllocationObserver() const override {
return !is_compaction_space();
}
+ protected:
+ // Updates the current lab limit without updating top, original_top or
+ // original_limit.
+ void SetLimit(Address limit);
+
+ bool SupportsExtendingLAB() const { return identity() == NEW_SPACE; }
+
void RefineAllocatedBytesAfterSweeping(Page* page);
- protected:
- void UpdateInlineAllocationLimit(size_t min_size) override;
+ void UpdateInlineAllocationLimit() override;
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@@ -328,10 +335,10 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);
- V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
- int max_pages,
- int size_in_bytes,
- AllocationOrigin origin);
+ V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(
+ int required_freed_bytes, int max_pages, int size_in_bytes,
+ AllocationOrigin origin, GCTracer::Scope::ScopeId sweeping_scope_id,
+ ThreadKind sweeping_scope_kind);
// Refills LAB for EnsureLabMain. This function is space-dependent. Returns
// false if there is not enough space and the caller has to retry after
@@ -344,6 +351,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin);
+ V8_WARN_UNUSED_RESULT bool TryExtendLAB(int size_in_bytes);
+
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
@@ -383,6 +392,8 @@ class V8_EXPORT_PRIVATE PagedSpace : public PagedSpaceBase {
allocation_info, linear_area_original_data_,
compaction_space_kind) {}
+ void RefillFreeList() final;
+
private:
AllocationCounter allocation_counter_;
LinearAreaOriginalData linear_area_original_data_;
diff --git a/deps/v8/src/heap/parked-scope.h b/deps/v8/src/heap/parked-scope.h
index c3520e742c..de92c32bc9 100644
--- a/deps/v8/src/heap/parked-scope.h
+++ b/deps/v8/src/heap/parked-scope.h
@@ -70,6 +70,32 @@ class V8_NODISCARD ParkedMutexGuard {
base::Mutex* mutex_;
};
+// Scope that automatically parks the thread while blocking on the given
+// base::RecursiveMutex.
+class V8_NODISCARD ParkedRecursiveMutexGuard {
+ public:
+ explicit ParkedRecursiveMutexGuard(LocalIsolate* local_isolate,
+ base::RecursiveMutex* mutex)
+ : ParkedRecursiveMutexGuard(local_isolate->heap(), mutex) {}
+ explicit ParkedRecursiveMutexGuard(LocalHeap* local_heap,
+ base::RecursiveMutex* mutex)
+ : mutex_(mutex) {
+ DCHECK(AllowGarbageCollection::IsAllowed());
+ if (!mutex_->TryLock()) {
+ ParkedScope scope(local_heap);
+ mutex_->Lock();
+ }
+ }
+
+ ParkedRecursiveMutexGuard(const ParkedMutexGuard&) = delete;
+ ParkedRecursiveMutexGuard& operator=(const ParkedMutexGuard&) = delete;
+
+ ~ParkedRecursiveMutexGuard() { mutex_->Unlock(); }
+
+ private:
+ base::RecursiveMutex* mutex_;
+};
+
template <base::MutexSharedType kIsShared,
base::NullBehavior Behavior = base::NullBehavior::kRequireNotNull>
class V8_NODISCARD ParkedSharedMutexGuardIf final {
diff --git a/deps/v8/src/heap/pretenuring-handler-inl.h b/deps/v8/src/heap/pretenuring-handler-inl.h
index 33c337ec46..45ac665404 100644
--- a/deps/v8/src/heap/pretenuring-handler-inl.h
+++ b/deps/v8/src/heap/pretenuring-handler-inl.h
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-void PretenturingHandler::UpdateAllocationSite(
+void PretenuringHandler::UpdateAllocationSite(
Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
#ifdef DEBUG
@@ -24,7 +24,7 @@ void PretenturingHandler::UpdateAllocationSite(
DCHECK_IMPLIES(chunk->IsToPage(),
v8_flags.minor_mc ||
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
- DCHECK_IMPLIES(!chunk->InYoungGeneration(),
+ DCHECK_IMPLIES(!v8_flags.minor_mc && !chunk->InYoungGeneration(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
#endif
if (!v8_flags.allocation_site_pretenuring ||
@@ -34,6 +34,7 @@ void PretenturingHandler::UpdateAllocationSite(
AllocationMemento memento_candidate =
FindAllocationMemento<kForGC>(map, object);
if (memento_candidate.is_null()) return;
+ DCHECK(map.IsJSObjectMap());
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
@@ -42,9 +43,9 @@ void PretenturingHandler::UpdateAllocationSite(
(*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
}
-template <PretenturingHandler::FindMementoMode mode>
-AllocationMemento PretenturingHandler::FindAllocationMemento(
- Map map, HeapObject object) {
+template <PretenuringHandler::FindMementoMode mode>
+AllocationMemento PretenuringHandler::FindAllocationMemento(Map map,
+ HeapObject object) {
Address object_address = object.address();
Address memento_address =
object_address + ALIGN_TO_ALLOCATION_ALIGNMENT(object.SizeFromMap(map));
diff --git a/deps/v8/src/heap/pretenuring-handler.cc b/deps/v8/src/heap/pretenuring-handler.cc
index af12751be3..3668781f9a 100644
--- a/deps/v8/src/heap/pretenuring-handler.cc
+++ b/deps/v8/src/heap/pretenuring-handler.cc
@@ -4,7 +4,9 @@
#include "src/heap/pretenuring-handler.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
#include "src/handles/global-handles-inl.h"
#include "src/heap/new-spaces.h"
#include "src/objects/allocation-site-inl.h"
@@ -12,12 +14,12 @@
namespace v8 {
namespace internal {
-PretenturingHandler::PretenturingHandler(Heap* heap)
+PretenuringHandler::PretenuringHandler(Heap* heap)
: heap_(heap), global_pretenuring_feedback_(kInitialFeedbackCapacity) {}
-PretenturingHandler::~PretenturingHandler() = default;
+PretenuringHandler::~PretenuringHandler() = default;
-void PretenturingHandler::MergeAllocationSitePretenuringFeedback(
+void PretenuringHandler::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
PtrComprCageBase cage_base(heap_->isolate());
AllocationSite site;
@@ -25,7 +27,7 @@ void PretenturingHandler::MergeAllocationSitePretenuringFeedback(
site = site_and_count.first;
MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
- site = AllocationSite::cast(map_word.ToForwardingAddress());
+ site = AllocationSite::cast(map_word.ToForwardingAddress(site));
}
// We have not validated the allocation site yet, since we have not
@@ -42,6 +44,18 @@ void PretenturingHandler::MergeAllocationSitePretenuringFeedback(
}
}
+bool PretenuringHandler::DeoptMaybeTenuredAllocationSites() const {
+ NewSpace* new_space = heap_->new_space();
+ if (heap_->tracer()->GetCurrentCollector() ==
+ GarbageCollector::MINOR_MARK_COMPACTOR) {
+ DCHECK(v8_flags.minor_mc);
+ DCHECK_NOT_NULL(new_space);
+ return heap_->IsFirstMaximumSizeMinorGC();
+ }
+ return new_space && new_space->IsAtMaximumCapacity() &&
+ !heap_->MaximumSizeMinorGC();
+}
+
namespace {
inline bool MakePretenureDecision(
@@ -133,12 +147,12 @@ bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
} // namespace
-void PretenturingHandler::RemoveAllocationSitePretenuringFeedback(
+void PretenuringHandler::RemoveAllocationSitePretenuringFeedback(
AllocationSite site) {
global_pretenuring_feedback_.erase(site);
}
-void PretenturingHandler::ProcessPretenuringFeedback() {
+void PretenuringHandler::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (v8_flags.allocation_site_pretenuring) {
int tenure_decisions = 0;
@@ -187,17 +201,20 @@ void PretenturingHandler::ProcessPretenuringFeedback() {
allocation_sites_to_pretenure_.reset();
}
- // Step 3: Deopt maybe tenured allocation sites.
- heap_->ForeachAllocationSite(
- heap_->allocation_sites_list(),
- [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
- DCHECK(site.IsAllocationSite());
- allocation_sites++;
- if (site.IsMaybeTenure()) {
- site.set_deopt_dependent_code(true);
- trigger_deoptimization = true;
- }
- });
+ // Step 3: Deopt maybe tenured allocation sites if necessary.
+ bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+ if (deopt_maybe_tenured) {
+ heap_->ForeachAllocationSite(
+ heap_->allocation_sites_list(),
+ [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
+ DCHECK(site.IsAllocationSite());
+ allocation_sites++;
+ if (site.IsMaybeTenure()) {
+ site.set_deopt_dependent_code(true);
+ trigger_deoptimization = true;
+ }
+ });
+ }
if (trigger_deoptimization) {
heap_->isolate()->stack_guard()->RequestDeoptMarkedAllocationSites();
@@ -207,12 +224,12 @@ void PretenturingHandler::ProcessPretenuringFeedback() {
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
PrintIsolate(heap_->isolate(),
- "pretenuring: visited_sites=%d "
+ "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
"active_sites=%d "
"mementos=%d tenured=%d not_tenured=%d\n",
- allocation_sites, active_allocation_sites,
- allocation_mementos_found, tenure_decisions,
- dont_tenure_decisions);
+ deopt_maybe_tenured ? 1 : 0, allocation_sites,
+ active_allocation_sites, allocation_mementos_found,
+ tenure_decisions, dont_tenure_decisions);
}
global_pretenuring_feedback_.clear();
@@ -220,7 +237,7 @@ void PretenturingHandler::ProcessPretenuringFeedback() {
}
}
-void PretenturingHandler::PretenureAllocationSiteOnNextCollection(
+void PretenuringHandler::PretenureAllocationSiteOnNextCollection(
AllocationSite site) {
if (!allocation_sites_to_pretenure_) {
allocation_sites_to_pretenure_.reset(
@@ -229,7 +246,7 @@ void PretenturingHandler::PretenureAllocationSiteOnNextCollection(
allocation_sites_to_pretenure_->Push(site);
}
-void PretenturingHandler::reset() { allocation_sites_to_pretenure_.reset(); }
+void PretenuringHandler::reset() { allocation_sites_to_pretenure_.reset(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/pretenuring-handler.h b/deps/v8/src/heap/pretenuring-handler.h
index aeaefe0a50..56a23da91f 100644
--- a/deps/v8/src/heap/pretenuring-handler.h
+++ b/deps/v8/src/heap/pretenuring-handler.h
@@ -18,15 +18,15 @@ template <typename T>
class GlobalHandleVector;
class Heap;
-class PretenturingHandler final {
+class PretenuringHandler final {
public:
static const int kInitialFeedbackCapacity = 256;
using PretenuringFeedbackMap =
std::unordered_map<AllocationSite, size_t, Object::Hasher>;
enum FindMementoMode { kForRuntime, kForGC };
- explicit PretenturingHandler(Heap* heap);
- ~PretenturingHandler();
+ explicit PretenuringHandler(Heap* heap);
+ ~PretenuringHandler();
void reset();
@@ -68,7 +68,13 @@ class PretenturingHandler final {
// Removes an entry from the global pretenuring storage.
void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
+ bool HasPretenuringFeedback() const {
+ return !global_pretenuring_feedback_.empty();
+ }
+
private:
+ bool DeoptMaybeTenuredAllocationSites() const;
+
Heap* const heap_;
// The feedback storage is used to store allocation sites (keys) and how often
diff --git a/deps/v8/src/heap/read-only-heap-inl.h b/deps/v8/src/heap/read-only-heap-inl.h
index 0c12828584..ad027a7a0d 100644
--- a/deps/v8/src/heap/read-only-heap-inl.h
+++ b/deps/v8/src/heap/read-only-heap-inl.h
@@ -13,21 +13,29 @@ namespace v8 {
namespace internal {
// static
-ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- return ReadOnlyRoots(
- Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr())));
-#else
+ReadOnlyRoots ReadOnlyHeap::EarlyGetReadOnlyRoots(HeapObject object) {
#ifdef V8_SHARED_RO_HEAP
- // This fails if we are creating heap objects and the roots haven't yet been
- // copied into the read-only heap.
auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_;
- if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
+ if (shared_ro_heap && shared_ro_heap->roots_init_complete()) {
return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
}
+ return ReadOnlyRoots(GetHeapFromWritableObject(object));
+#else
+ return GetReadOnlyRoots(object);
#endif // V8_SHARED_RO_HEAP
+}
+
+// static
+ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
+#ifdef V8_SHARED_RO_HEAP
+ auto* shared_ro_heap = SoleReadOnlyHeap::shared_ro_heap_;
+ // If this check fails in code that runs during initialization use
+ // EarlyGetReadOnlyRoots instead.
+ DCHECK(shared_ro_heap && shared_ro_heap->roots_init_complete());
+ return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
+#else
return ReadOnlyRoots(GetHeapFromWritableObject(object));
-#endif // V8_COMPRESS_POINTERS
+#endif // V8_SHARED_RO_HEAP
}
} // namespace internal
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 07db0d09da..9d47c6ccf9 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -99,6 +99,9 @@ void ReadOnlyHeap::SetUp(Isolate* isolate,
artifacts = InitializeSharedReadOnlyArtifacts();
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
+
+ // Ensure the first read-only page ends up first in the cage.
+ ro_heap->read_only_space()->EnsurePage();
artifacts->VerifyChecksum(read_only_snapshot_data, true);
}
} else {
@@ -117,9 +120,17 @@ void ReadOnlyHeap::DeserializeIntoIsolate(Isolate* isolate,
DCHECK_NOT_NULL(read_only_snapshot_data);
ReadOnlyDeserializer des(isolate, read_only_snapshot_data, can_rehash);
des.DeserializeIntoIsolate();
+ OnCreateRootsComplete(isolate);
InitFromIsolate(isolate);
}
+void ReadOnlyHeap::OnCreateRootsComplete(Isolate* isolate) {
+ DCHECK_NOT_NULL(isolate);
+ DCHECK(!roots_init_complete_);
+ if (IsReadOnlySpaceShared()) InitializeFromIsolateRoots(isolate);
+ roots_init_complete_ = true;
+}
+
void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
InitFromIsolate(isolate);
@@ -145,7 +156,8 @@ ReadOnlyHeap* ReadOnlyHeap::CreateInitalHeapForBootstrapping(
} else {
std::unique_ptr<SoleReadOnlyHeap> sole_ro_heap(
new SoleReadOnlyHeap(ro_space));
- // The global shared ReadOnlyHeap is only used without pointer compression.
+ // The global shared ReadOnlyHeap is used with shared cage and if pointer
+ // compression is disabled.
SoleReadOnlyHeap::shared_ro_heap_ = sole_ro_heap.get();
ro_heap = std::move(sole_ro_heap);
}
@@ -169,10 +181,9 @@ void SoleReadOnlyHeap::InitializeFromIsolateRoots(Isolate* isolate) {
}
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
- DCHECK(!init_complete_);
+ DCHECK(roots_init_complete_);
read_only_space_->ShrinkPages();
if (IsReadOnlySpaceShared()) {
- InitializeFromIsolateRoots(isolate);
std::shared_ptr<ReadOnlyArtifacts> artifacts(
*read_only_artifacts_.Pointer());
@@ -187,7 +198,6 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
} else {
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
}
- init_complete_ = true;
}
void ReadOnlyHeap::OnHeapTearDown(Heap* heap) {
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index db73d00e58..33a6c4a09e 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -57,6 +57,9 @@ class ReadOnlyHeap {
// a deserializer was not previously provided to Setup. When V8_SHARED_RO_HEAP
// is enabled, this releases the ReadOnlyHeap creation lock.
void OnCreateHeapObjectsComplete(Isolate* isolate);
+ // Indicates that all objects reachable by the read only roots table have been
+ // set up.
+ void OnCreateRootsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
virtual void OnHeapTearDown(Heap* heap);
@@ -69,11 +72,14 @@ class ReadOnlyHeap {
V8_EXPORT_PRIVATE static bool Contains(Address address);
// Returns whether the object resides in the read-only space.
V8_EXPORT_PRIVATE static bool Contains(HeapObject object);
- // Gets read-only roots from an appropriate root list: shared read-only root
- // list if the shared read-only heap has been initialized or the isolate
- // specific roots table.
+ // Gets read-only roots from an appropriate root list. Shared read only root
+ // must be initialized
V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots(
HeapObject object);
+ // Returns the current isolates roots table during initialization as opposed
+ // to the shared one in case the latter is not initialized yet.
+ V8_EXPORT_PRIVATE inline static ReadOnlyRoots EarlyGetReadOnlyRoots(
+ HeapObject object);
// Extends the read-only object cache with new zero smi and returns a
// reference to it.
@@ -87,7 +93,7 @@ class ReadOnlyHeap {
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
- static bool IsReadOnlySpaceShared() {
+ static constexpr bool IsReadOnlySpaceShared() {
return V8_SHARED_RO_HEAP_BOOL &&
(!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL);
}
@@ -96,6 +102,8 @@ class ReadOnlyHeap {
virtual void InitializeFromIsolateRoots(Isolate* isolate) {}
virtual bool IsOwnedByIsolate() { return true; }
+ bool roots_init_complete() const { return roots_init_complete_; }
+
protected:
friend class ReadOnlyArtifacts;
friend class PointerCompressedReadOnlyArtifacts;
@@ -115,7 +123,7 @@ class ReadOnlyHeap {
// (unless sharing is disabled).
void InitFromIsolate(Isolate* isolate);
- bool init_complete_ = false;
+ bool roots_init_complete_ = false;
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 7385fd2353..869668245a 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -15,18 +15,21 @@
#include "src/heap/allocation-stats.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/marking-state-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot-data.h"
+#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/snapshot-utils.h"
namespace v8 {
namespace internal {
void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
- Address src_base = GetIsolateRootAddress(src[0]);
+ Address src_base =
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(src[0]);
for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
dst[i] = src[i] - src_base + new_base;
}
@@ -188,7 +191,7 @@ ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
Address original_address = original_object.ptr();
Address new_address =
isolate_root +
- V8HeapCompressionScheme::CompressTagged(original_address);
+ V8HeapCompressionScheme::CompressObject(original_address);
Object new_object = Object(new_address);
cache.push_back(new_object);
}
@@ -239,7 +242,7 @@ void PointerCompressedReadOnlyArtifacts::Initialize(
shared_memory_.push_back(std::move(shared_memory));
// This is just CompressTagged but inlined so it will always compile.
Tagged_t compressed_address =
- V8HeapCompressionScheme::CompressTagged(page->address());
+ V8HeapCompressionScheme::CompressAny(page->address());
page_offsets_.push_back(compressed_address);
// 3. Update the accounting stats so the allocated bytes are for the new
@@ -336,7 +339,6 @@ ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
std::move(reservation)) {
allocated_bytes_ = 0;
SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
- heap->non_atomic_marking_state()->bitmap(this)->MarkAllBits();
}
void ReadOnlyPage::MakeHeaderRelocatable() {
@@ -440,12 +442,7 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
cur_addr_ += ALIGN_TO_ALLOCATION_ALIGNMENT(obj_size);
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
- if (obj.IsCode()) {
- DCHECK(Code::cast(obj).is_builtin());
- DCHECK_CODEOBJECT_SIZE(obj_size, space_);
- } else {
- DCHECK_OBJECT_SIZE(obj_size);
- }
+ DCHECK_OBJECT_SIZE(obj_size);
return obj;
}
}
@@ -459,33 +456,9 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
} // namespace
#ifdef VERIFY_HEAP
-namespace {
-class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
- public:
- explicit VerifyReadOnlyPointersVisitor(Heap* heap)
- : VerifyPointersVisitor(heap) {}
-
- protected:
- void VerifyPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- if (!host.is_null()) {
- CHECK(ReadOnlyHeap::Contains(host.map()));
- }
- VerifyPointersVisitor::VerifyPointers(host, start, end);
-
- for (MaybeObjectSlot current = start; current < end; ++current) {
- HeapObject heap_object;
- if ((*current)->GetHeapObject(&heap_object)) {
- CHECK(ReadOnlyHeap::Contains(heap_object));
- }
- }
- }
-};
-} // namespace
-
-void ReadOnlySpace::Verify(Isolate* isolate) const {
+void ReadOnlySpace::Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
bool allocation_pointer_found_in_space = top_ == limit_;
- VerifyReadOnlyPointersVisitor visitor(isolate->heap());
for (BasicMemoryChunk* page : pages_) {
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
@@ -494,6 +467,8 @@ void ReadOnlySpace::Verify(Isolate* isolate) const {
CHECK_EQ(page->owner(), this);
}
+ visitor->VerifyPage(page);
+
if (page == Page::FromAllocationAreaAddress(top_)) {
allocation_pointer_found_in_space = true;
}
@@ -504,24 +479,15 @@ void ReadOnlySpace::Verify(Isolate* isolate) const {
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
CHECK(end_of_previous_object <= object.address());
- Map map = object.map();
- CHECK(map.IsMap());
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
+ visitor->VerifyObject(object);
// All the interior pointers should be contained in the heap.
int size = object.Size();
- object.IterateBody(map, size, &visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
-
- CHECK(!object.IsExternalString());
- CHECK(!object.IsJSArrayBuffer());
}
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ visitor->VerifyPageDone(page);
}
CHECK(allocation_pointer_found_in_space);
@@ -574,11 +540,6 @@ void ReadOnlySpace::FreeLinearAllocationArea() {
return;
}
- // Clear the bits in the unused black area.
- ReadOnlyPage* page = pages_.back();
- heap()->marking_state()->bitmap(page)->ClearRange(
- page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
-
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_));
BasicMemoryChunk::UpdateHighWaterMark(top_);
@@ -587,6 +548,15 @@ void ReadOnlySpace::FreeLinearAllocationArea() {
limit_ = kNullAddress;
}
+void ReadOnlySpace::EnsurePage() {
+ if (pages_.empty()) EnsureSpaceForAllocation(1);
+ CHECK(!pages_.empty());
+ // For all configurations where static roots are supported the read only roots
+ // are currently allocated in the first page of the cage.
+ CHECK_IMPLIES(V8_STATIC_ROOTS_BOOL,
+ heap_->isolate()->cage_base() == pages_.back()->address());
+}
+
void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
if (top_ + size_in_bytes <= limit_) {
return;
@@ -682,15 +652,9 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
- AllocationResult result =
- USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
- HeapObject heap_obj;
- if (result.To(&heap_obj)) {
- DCHECK(heap()->marking_state()->IsBlack(heap_obj));
- }
- return result;
+ return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
+ : AllocateRawUnaligned(size_in_bytes);
}
size_t ReadOnlyPage::ShrinkToHighWaterMark() {
@@ -784,5 +748,46 @@ SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
pages_ = artifacts->pages();
}
+void ReadOnlySpace::InitFromMemoryDump(Isolate* isolate,
+ SnapshotByteSource* in) {
+ size_t num_pages = in->GetInt();
+ auto cage = isolate->GetPtrComprCage();
+
+ CHECK_LT(num_pages, 10);
+
+ auto first_page = cage->base() + in->GetInt();
+
+ for (size_t i = 0; i < num_pages; ++i) {
+ int size = in->GetInt();
+ ReadOnlyPage* chunk;
+ if (i == 0) {
+ chunk =
+ heap()->memory_allocator()->AllocateReadOnlyPage(this, first_page);
+ // If this fails we probably allocated r/o space too late.
+ CHECK_EQ(reinterpret_cast<void*>(first_page), chunk);
+ } else {
+ chunk = heap()->memory_allocator()->AllocateReadOnlyPage(this);
+ }
+
+ capacity_ += AreaSize();
+
+ AccountCommitted(chunk->size());
+ CHECK_NOT_NULL(chunk);
+
+ CHECK_LE(chunk->area_start() + size, chunk->area_end());
+ in->CopyRaw(reinterpret_cast<void*>(chunk->area_start()), size);
+ chunk->IncreaseAllocatedBytes(size);
+ chunk->high_water_mark_ = (chunk->area_start() - chunk->address()) + size;
+
+ DCHECK_NE(chunk->allocated_bytes(), 0);
+ accounting_stats_.IncreaseCapacity(chunk->area_size());
+ accounting_stats_.IncreaseAllocatedBytes(chunk->allocated_bytes(), chunk);
+ pages_.push_back(chunk);
+
+ top_ = chunk->area_start() + size;
+ limit_ = chunk->area_end();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/read-only-spaces.h b/deps/v8/src/heap/read-only-spaces.h
index bcb8cd99eb..1f52b5fa4d 100644
--- a/deps/v8/src/heap/read-only-spaces.h
+++ b/deps/v8/src/heap/read-only-spaces.h
@@ -11,9 +11,11 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
+#include "src/heap/allocation-result.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/base-space.h"
#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
@@ -22,7 +24,7 @@ namespace internal {
class MemoryAllocator;
class ReadOnlyHeap;
-class SnapshotData;
+class SnapshotByteSource;
class ReadOnlyPage : public BasicMemoryChunk {
public:
@@ -223,7 +225,7 @@ class ReadOnlySpace : public BaseSpace {
bool ContainsSlow(Address addr) const;
V8_EXPORT_PRIVATE void ShrinkPages();
#ifdef VERIFY_HEAP
- void Verify(Isolate* isolate) const;
+ void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final;
#ifdef DEBUG
void VerifyCounters(Heap* heap) const;
#endif // DEBUG
@@ -234,6 +236,11 @@ class ReadOnlySpace : public BaseSpace {
Address FirstPageAddress() const { return pages_.front()->address(); }
+ void InitFromMemoryDump(Isolate* isolate, SnapshotByteSource* source);
+
+ // Ensure the read only space has at least one allocated page
+ void EnsurePage();
+
protected:
friend class SingleCopyReadOnlyArtifacts;
@@ -267,6 +274,9 @@ class ReadOnlySpace : public BaseSpace {
size_t capacity_;
const size_t area_size_;
+
+ friend class Heap;
+ friend class ReadOnlySerializer; // For Unseal.
};
class SharedReadOnlySpace : public ReadOnlySpace {
diff --git a/deps/v8/src/heap/reference-summarizer.cc b/deps/v8/src/heap/reference-summarizer.cc
index 394d1f4af0..2e8fb20a97 100644
--- a/deps/v8/src/heap/reference-summarizer.cc
+++ b/deps/v8/src/heap/reference-summarizer.cc
@@ -48,13 +48,11 @@ class ReferenceSummarizerMarkingState final {
// Standard marking visitor functions:
- bool IsWhite(HeapObject obj) const { return true; }
-
+ bool GreyToBlack(HeapObject obj) { return true; }
bool IsBlackOrGrey(HeapObject obj) const { return false; }
- bool WhiteToGrey(HeapObject obj) { return true; }
-
- bool GreyToBlack(HeapObject obj) { return true; }
+ bool TryMark(HeapObject obj) { return true; }
+ bool IsUnmarked(HeapObject obj) const { return true; }
// Adds a retaining relationship found by the marking visitor.
void AddStrongReferenceForReferenceSummarizer(HeapObject host,
diff --git a/deps/v8/src/heap/remembered-set-inl.h b/deps/v8/src/heap/remembered-set-inl.h
index 03e22cb806..d30f887122 100644
--- a/deps/v8/src/heap/remembered-set-inl.h
+++ b/deps/v8/src/heap/remembered-set-inl.h
@@ -20,30 +20,33 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
Callback callback) {
switch (slot_type) {
case SlotType::kCodeEntry: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code(),
+ InstructionStream());
return UpdateCodeTarget(&rinfo, callback);
}
case SlotType::kConstPoolCodeEntry: {
return UpdateCodeEntry(addr, callback);
}
case SlotType::kEmbeddedObjectCompressed: {
- RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
+ RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code(),
+ InstructionStream());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case SlotType::kEmbeddedObjectFull: {
- RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
+ RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code(),
+ InstructionStream());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case SlotType::kConstPoolEmbeddedObjectCompressed: {
HeapObject old_target =
- HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
+ HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
heap->isolate(), base::Memory<Tagged_t>(addr))));
HeapObject new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
base::Memory<Tagged_t>(addr) =
- V8HeapCompressionScheme::CompressTagged(new_target.ptr());
+ V8HeapCompressionScheme::CompressObject(new_target.ptr());
}
return result;
}
@@ -61,22 +64,25 @@ HeapObject UpdateTypedSlotHelper::GetTargetObject(Heap* heap,
Address addr) {
switch (slot_type) {
case SlotType::kCodeEntry: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
- return Code::GetCodeFromTargetAddress(rinfo.target_address());
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code(),
+ InstructionStream());
+ return InstructionStream::FromTargetAddress(rinfo.target_address());
}
case SlotType::kConstPoolCodeEntry: {
- return Code::GetObjectFromEntryAddress(addr);
+ return InstructionStream::FromEntryAddress(addr);
}
case SlotType::kEmbeddedObjectCompressed: {
- RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
+ RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code(),
+ InstructionStream());
return rinfo.target_object(heap->isolate());
}
case SlotType::kEmbeddedObjectFull: {
- RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
+ RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code(),
+ InstructionStream());
return rinfo.target_object(heap->isolate());
}
case SlotType::kConstPoolEmbeddedObjectCompressed: {
- Address full = V8HeapCompressionScheme::DecompressTaggedAny(
+ Address full = V8HeapCompressionScheme::DecompressTagged(
heap->isolate(), base::Memory<Tagged_t>(addr));
return HeapObject::cast(Object(full));
}
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index d7dac8809d..1a2ebb0981 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -79,7 +79,7 @@ class RememberedSetOperations {
slot_set->Iterate(
chunk->address(), start_bucket, end_bucket,
[start, end](MaybeObjectSlot slot) {
- CHECK(!base::IsInRange(slot.address(), start, end + 1));
+ CHECK(slot.address() < start || slot.address() >= end);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
@@ -102,6 +102,19 @@ class RememberedSet : public AllStatic {
RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
}
+ // Given a page and a slot set, this function merges the slot set to the set
+ // of the page. |other_slot_set| should not be used after calling this method.
+ static void MergeAndDelete(MemoryChunk* chunk, SlotSet* other_slot_set) {
+ static_assert(type == RememberedSetType::OLD_TO_NEW);
+ SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
+ if (slot_set == nullptr) {
+ chunk->set_slot_set<RememberedSetType::OLD_TO_NEW>(other_slot_set);
+ return;
+ }
+ slot_set->Merge(other_slot_set, chunk->buckets());
+ SlotSet::Delete(other_slot_set, chunk->buckets());
+ }
+
// Given a page and a slot in that page, this function returns true if
// the remembered set contains the slot.
static bool Contains(MemoryChunk* chunk, Address slot_addr) {
@@ -283,9 +296,7 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- chunk->ReleaseSlotSet<OLD_TO_CODE>();
- }
+ chunk->ReleaseSlotSet<OLD_TO_CODE>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
@@ -312,8 +323,8 @@ class UpdateTypedSlotHelper {
template <typename Callback>
static SlotCallbackResult UpdateCodeEntry(Address entry_address,
Callback callback) {
- Code code = Code::GetObjectFromEntryAddress(entry_address);
- Code old_code = code;
+ InstructionStream code = InstructionStream::FromEntryAddress(entry_address);
+ InstructionStream old_code = code;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
@@ -328,12 +339,14 @@ class UpdateTypedSlotHelper {
static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
Callback callback) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Code new_target = old_target;
+ InstructionStream old_target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
+ InstructionStream new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
- rinfo->set_target_address(Code::cast(new_target).raw_instruction_start());
+ rinfo->set_target_address(
+ InstructionStream::cast(new_target).instruction_start());
}
return result;
}
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index a08f75d3d8..6b9bf9e5c6 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -26,8 +26,7 @@
namespace v8 {
namespace internal {
-IsolateSafepoint::IsolateSafepoint(Heap* heap)
- : heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
+IsolateSafepoint::IsolateSafepoint(Heap* heap) : heap_(heap) {}
void IsolateSafepoint::EnterLocalSafepointScope() {
// Safepoints need to be initiated on some main thread.
@@ -72,7 +71,7 @@ class PerClientSafepointData final {
void IsolateSafepoint::InitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
- shared_heap_isolate()->global_safepoint()->AssertActive();
+ shared_space_isolate()->global_safepoint()->AssertActive();
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
InitiateGlobalSafepointScopeRaw(initiator, client_data);
@@ -80,7 +79,7 @@ void IsolateSafepoint::InitiateGlobalSafepointScope(
void IsolateSafepoint::TryInitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
- shared_heap_isolate()->global_safepoint()->AssertActive();
+ shared_space_isolate()->global_safepoint()->AssertActive();
if (!local_heaps_mutex_.TryLock()) return;
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
@@ -245,6 +244,8 @@ void IsolateSafepoint::Barrier::NotifyPark() {
}
void IsolateSafepoint::Barrier::WaitInSafepoint() {
+ const auto scoped_blocking_call =
+ V8::GetCurrentPlatform()->CreateBlockingScope(BlockingType::kWillBlock);
base::MutexGuard guard(&mutex_);
CHECK(IsArmed());
stopped_++;
@@ -256,6 +257,8 @@ void IsolateSafepoint::Barrier::WaitInSafepoint() {
}
void IsolateSafepoint::Barrier::WaitInUnpark() {
+ const auto scoped_blocking_call =
+ V8::GetCurrentPlatform()->CreateBlockingScope(BlockingType::kWillBlock);
base::MutexGuard guard(&mutex_);
while (IsArmed()) {
@@ -278,8 +281,8 @@ void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
Isolate* IsolateSafepoint::isolate() const { return heap_->isolate(); }
-Isolate* IsolateSafepoint::shared_heap_isolate() const {
- return isolate()->shared_heap_isolate();
+Isolate* IsolateSafepoint::shared_space_isolate() const {
+ return isolate()->shared_space_isolate();
}
IsolateSafepointScope::IsolateSafepointScope(Heap* heap)
@@ -292,7 +295,7 @@ IsolateSafepointScope::~IsolateSafepointScope() {
}
GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
- : shared_heap_isolate_(isolate) {}
+ : shared_space_isolate_(isolate) {}
void GlobalSafepoint::AppendClient(Isolate* client) {
clients_mutex_.AssertHeld();
@@ -313,11 +316,7 @@ void GlobalSafepoint::AppendClient(Isolate* client) {
void GlobalSafepoint::RemoveClient(Isolate* client) {
DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN);
-
- // A shared heap may have already acquired the client mutex to perform a
- // shared GC. We need to park the Isolate here to allow for a shared GC.
- IgnoreLocalGCRequests ignore_gc_requests(client->heap());
- ParkedMutexGuard guard(client->main_thread_local_heap(), &clients_mutex_);
+ AssertActive();
if (client->global_safepoint_next_client_isolate_) {
client->global_safepoint_next_client_isolate_
@@ -333,17 +332,10 @@ void GlobalSafepoint::RemoveClient(Isolate* client) {
DCHECK_EQ(clients_head_, client);
clients_head_ = client->global_safepoint_next_client_isolate_;
}
-
- client->shared_isolate_ = nullptr;
}
void GlobalSafepoint::AssertNoClientsOnTearDown() {
- DCHECK_WITH_MSG(
- clients_head_ == nullptr,
- "Shared heap must not have clients at teardown. The first isolate that "
- "is created (in a process that has no isolates) owns the lifetime of the "
- "shared heap and is considered the main isolate. The main isolate must "
- "outlive all other isolates.");
+ DCHECK_NULL(clients_head_);
}
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
@@ -356,6 +348,8 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
clients_mutex_.Lock();
}
+ if (++active_safepoint_scopes_ > 1) return;
+
TimedHistogramScope timer(
initiator->counters()->gc_time_to_global_safepoint());
TRACE_GC(initiator->heap()->tracer(),
@@ -365,22 +359,12 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
// Try to initiate safepoint for all clients. Fail immediately when the
// local_heaps_mutex_ can't be locked without blocking.
- IterateClientIsolates([&clients, initiator](Isolate* client) {
+ IterateSharedSpaceAndClientIsolates([&clients, initiator](Isolate* client) {
clients.emplace_back(client);
client->heap()->safepoint()->TryInitiateGlobalSafepointScope(
initiator, &clients.back());
});
- if (shared_heap_isolate_->is_shared()) {
- // Make it possible to use AssertActive() on shared isolates.
- CHECK(shared_heap_isolate_->heap()
- ->safepoint()
- ->local_heaps_mutex_.TryLock());
-
- // Shared isolates should never have multiple threads.
- shared_heap_isolate_->heap()->safepoint()->AssertMainThreadIsOnlyThread();
- }
-
// Iterate all clients again to initiate the safepoint for all of them - even
// if that means blocking.
for (PerClientSafepointData& client : clients) {
@@ -390,8 +374,7 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
#if DEBUG
for (const PerClientSafepointData& client : clients) {
- DCHECK_EQ(client.isolate()->shared_heap_isolate(), shared_heap_isolate_);
- DCHECK(client.heap()->deserialization_complete());
+ DCHECK_EQ(client.isolate()->shared_space_isolate(), shared_space_isolate_);
}
#endif // DEBUG
@@ -404,27 +387,34 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
}
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
- if (shared_heap_isolate_->is_shared()) {
- shared_heap_isolate_->heap()->safepoint()->local_heaps_mutex_.Unlock();
+ clients_mutex_.AssertHeld();
+ DCHECK_GT(active_safepoint_scopes_, 0);
+
+ if (--active_safepoint_scopes_ == 0) {
+ IterateSharedSpaceAndClientIsolates([initiator](Isolate* client) {
+ Heap* client_heap = client->heap();
+ client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
+ });
}
- IterateClientIsolates([initiator](Isolate* client) {
- Heap* client_heap = client->heap();
- client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
- });
+ clients_mutex_.Unlock();
+}
+bool GlobalSafepoint::IsRequestedForTesting() {
+ if (!clients_mutex_.TryLock()) return true;
clients_mutex_.Unlock();
+ return false;
}
GlobalSafepointScope::GlobalSafepointScope(Isolate* initiator)
: initiator_(initiator),
- shared_heap_isolate_(initiator->shared_heap_isolate()) {
- shared_heap_isolate_->global_safepoint()->EnterGlobalSafepointScope(
+ shared_space_isolate_(initiator->shared_space_isolate()) {
+ shared_space_isolate_->global_safepoint()->EnterGlobalSafepointScope(
initiator_);
}
GlobalSafepointScope::~GlobalSafepointScope() {
- shared_heap_isolate_->global_safepoint()->LeaveGlobalSafepointScope(
+ shared_space_isolate_->global_safepoint()->LeaveGlobalSafepointScope(
initiator_);
}
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 7e935889b7..5091e11ded 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -133,7 +133,7 @@ class IsolateSafepoint final {
}
Isolate* isolate() const;
- Isolate* shared_heap_isolate() const;
+ Isolate* shared_space_isolate() const;
Barrier barrier_;
Heap* heap_;
@@ -141,9 +141,9 @@ class IsolateSafepoint final {
// Mutex is used both for safepointing and adding/removing threads. A
// RecursiveMutex is needed since we need to support nested SafepointScopes.
base::RecursiveMutex local_heaps_mutex_;
- LocalHeap* local_heaps_head_;
+ LocalHeap* local_heaps_head_ = nullptr;
- int active_safepoint_scopes_;
+ int active_safepoint_scopes_ = 0;
friend class GlobalSafepoint;
friend class GlobalSafepointScope;
@@ -174,21 +174,33 @@ class GlobalSafepoint final {
void IterateClientIsolates(Callback callback) {
for (Isolate* current = clients_head_; current;
current = current->global_safepoint_next_client_isolate_) {
+ DCHECK(!current->is_shared_space_isolate());
callback(current);
}
}
+ template <typename Callback>
+ void IterateSharedSpaceAndClientIsolates(Callback callback) {
+ callback(shared_space_isolate_);
+ IterateClientIsolates(callback);
+ }
+
void AssertNoClientsOnTearDown();
void AssertActive() { clients_mutex_.AssertHeld(); }
+ V8_EXPORT_PRIVATE bool IsRequestedForTesting();
+
private:
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveGlobalSafepointScope(Isolate* initiator);
- Isolate* const shared_heap_isolate_;
- base::Mutex clients_mutex_;
+ Isolate* const shared_space_isolate_;
+ // RecursiveMutex is needed since we need to support nested
+ // GlobalSafepointScopes.
+ base::RecursiveMutex clients_mutex_;
Isolate* clients_head_ = nullptr;
+ int active_safepoint_scopes_ = 0;
friend class GlobalSafepointScope;
friend class Isolate;
@@ -201,7 +213,7 @@ class V8_NODISCARD GlobalSafepointScope {
private:
Isolate* const initiator_;
- Isolate* const shared_heap_isolate_;
+ Isolate* const shared_space_isolate_;
};
enum class SafepointKind { kIsolate, kGlobal };
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
deleted file mode 100644
index ba079ebf8c..0000000000
--- a/deps/v8/src/heap/scavenge-job.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/scavenge-job.h"
-
-#include "src/base/platform/time.h"
-#include "src/execution/isolate.h"
-#include "src/execution/vm-state-inl.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-#include "src/init/v8.h"
-
-namespace v8 {
-namespace internal {
-
-class ScavengeJob::Task : public CancelableTask {
- public:
- Task(Isolate* isolate, ScavengeJob* job)
- : CancelableTask(isolate), isolate_(isolate), job_(job) {}
-
- // CancelableTask overrides.
- void RunInternal() override;
-
- Isolate* isolate() const { return isolate_; }
-
- private:
- Isolate* const isolate_;
- ScavengeJob* const job_;
-};
-
-size_t ScavengeJob::YoungGenerationTaskTriggerSize(Heap* heap) {
- return heap->new_space()->Capacity() * v8_flags.scavenge_task_trigger / 100;
-}
-
-bool ScavengeJob::YoungGenerationSizeTaskTriggerReached(Heap* heap) {
- return heap->new_space()->Size() >= YoungGenerationTaskTriggerSize(heap);
-}
-
-void ScavengeJob::ScheduleTaskIfNeeded(Heap* heap) {
- if (v8_flags.scavenge_task && !task_pending_ && !heap->IsTearingDown() &&
- YoungGenerationSizeTaskTriggerReached(heap)) {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
- auto taskrunner =
- V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
- if (taskrunner->NonNestableTasksEnabled()) {
- taskrunner->PostNonNestableTask(
- std::make_unique<Task>(heap->isolate(), this));
- task_pending_ = true;
- }
- }
-}
-
-void ScavengeJob::Task::RunInternal() {
- VMState<GC> state(isolate());
- TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
-
- if (ScavengeJob::YoungGenerationSizeTaskTriggerReached(isolate()->heap())) {
- isolate()->heap()->CollectGarbage(NEW_SPACE,
- GarbageCollectionReason::kTask);
- }
-
- job_->set_task_pending(false);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 4c37eb5044..a6bce6eef1 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -19,10 +19,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/slots-inl.h"
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-#include "src/heap/object-start-bitmap-inl.h"
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
namespace v8 {
namespace internal {
@@ -96,13 +92,13 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
int size,
PromotionHeapChoice promotion_heap_choice) {
// Copy the content of source to target.
- target.set_map_word(MapWord::FromMap(map), kRelaxedStore);
+ target.set_map_word(map, kRelaxedStore);
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
// This release CAS is paired with the load acquire in ScavengeObject.
- if (!source.release_compare_and_swap_map_word(
- MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
+ if (!source.release_compare_and_swap_map_word_forwarded(MapWord::FromMap(map),
+ target)) {
// Other task migrated the object.
return false;
}
@@ -135,13 +131,13 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
HeapObject target;
if (allocation.To(&target)) {
- DCHECK(heap()->non_atomic_marking_state()->IsWhite(target));
+ DCHECK(heap()->marking_state()->IsUnmarked(target));
const bool self_success =
MigrateObject(map, object, target, object_size, kPromoteIntoLocalHeap);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object.map_word(kAcquireLoad);
- HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
+ HeapObjectReference::Update(slot, map_word.ToForwardingAddress(object));
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
@@ -183,13 +179,21 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
HeapObject target;
if (allocation.To(&target)) {
- DCHECK(heap()->non_atomic_marking_state()->IsWhite(target));
+ DCHECK(heap()->non_atomic_marking_state()->IsUnmarked(target));
const bool self_success =
MigrateObject(map, object, target, object_size, promotion_heap_choice);
if (!self_success) {
- allocator_.FreeLast(OLD_SPACE, target, object_size);
+ switch (promotion_heap_choice) {
+ case kPromoteIntoLocalHeap:
+ allocator_.FreeLast(OLD_SPACE, target, object_size);
+ break;
+ case kPromoteIntoSharedHeap:
+ heap()->CreateFillerObjectAt(target.address(), object_size);
+ break;
+ }
+
MapWord map_word = object.map_word(kAcquireLoad);
- HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
+ HeapObjectReference::Update(slot, map_word.ToForwardingAddress(object));
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
@@ -223,8 +227,8 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
- if (object.release_compare_and_swap_map_word(
- MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
+ if (object.release_compare_and_swap_map_word_forwarded(
+ MapWord::FromMap(map), object)) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
@@ -290,7 +294,7 @@ SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
- if (!is_incremental_marking_) {
+ if (shortcut_strings_) {
// The ThinString should die after Scavenge, so avoid writing the proper
// forwarding pointer and instead just signal the actual object as forwarded
// reference.
@@ -317,32 +321,31 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(IsShortcutCandidate(map.instance_type()));
- if (!is_incremental_marking_ &&
+
+ if (shortcut_strings_ &&
object.unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
HeapObject first = HeapObject::cast(object.unchecked_first());
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object.set_map_word(MapWord::FromForwardingAddress(first), kReleaseStore);
+ object.set_map_word_forwarded(first, kReleaseStore);
return REMOVE_SLOT;
}
MapWord first_word = first.map_word(kAcquireLoad);
if (first_word.IsForwardingAddress()) {
- HeapObject target = first_word.ToForwardingAddress();
+ HeapObject target = first_word.ToForwardingAddress(first);
HeapObjectReference::Update(slot, target);
- object.set_map_word(MapWord::FromForwardingAddress(target),
- kReleaseStore);
+ object.set_map_word_forwarded(target, kReleaseStore);
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map first_map = first_word.ToMap();
SlotCallbackResult result = EvacuateObjectDefault(
first_map, slot, first, first.SizeFromMap(first_map),
Map::ObjectFieldsFrom(first_map.visitor_id()));
- object.set_map_word(MapWord::FromForwardingAddress(slot.ToHeapObject()),
- kReleaseStore);
+ object.set_map_word_forwarded(slot.ToHeapObject(), kReleaseStore);
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
@@ -423,7 +426,7 @@ SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- HeapObject dest = first_word.ToForwardingAddress();
+ HeapObject dest = first_word.ToForwardingAddress(object);
HeapObjectReference::Update(p, dest);
DCHECK_IMPLIES(Heap::InYoungGeneration(dest),
Heap::InToPage(dest) || Heap::IsLargeObject(dest));
@@ -475,12 +478,13 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final;
- V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final;
+ V8_INLINE void VisitCodePointer(Code host, CodeObjectSlot slot) final;
- V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
- V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(RelocInfo* rinfo) final;
+ V8_INLINE void VisitEmbeddedPointer(RelocInfo* rinfo) final;
V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
+ V8_INLINE int VisitJSApiObject(Map map, JSObject object);
private:
template <typename TSlot>
@@ -502,26 +506,28 @@ void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
return VisitPointersImpl(host, start, end);
}
-void ScavengeVisitor::VisitCodePointer(HeapObject host, CodeObjectSlot slot) {
+void ScavengeVisitor::VisitCodePointer(Code host, CodeObjectSlot slot) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Code slots never appear in new space because CodeDataContainers, the
- // only object that can contain code pointers, are always allocated in
- // the old space.
+ // InstructionStream slots never appear in new space because
+ // Code objects, the only object that can contain code pointers, are
+ // always allocated in the old space.
UNREACHABLE();
}
-void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+void ScavengeVisitor::VisitCodeTarget(RelocInfo* rinfo) {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
#ifdef DEBUG
- Code old_target = target;
+ InstructionStream old_target = target;
#endif
FullObjectSlot slot(&target);
VisitHeapObjectImpl(slot, target);
- // Code objects are never in new-space, so the slot contents must not change.
+ // InstructionStream objects are never in new-space, so the slot contents must
+ // not change.
DCHECK_EQ(old_target, target);
}
-void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
+void ScavengeVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
HeapObject heap_object = rinfo->target_object(cage_base());
#ifdef DEBUG
HeapObject old_heap_object = heap_object;
@@ -561,6 +567,10 @@ int ScavengeVisitor::VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
return size;
}
+int ScavengeVisitor::VisitJSApiObject(Map map, JSObject object) {
+ return VisitJSObject(map, object);
+}
+
int ScavengeVisitor::VisitEphemeronHashTable(Map map,
EphemeronHashTable table) {
// Register table with the scavenger, so it can take care of the weak keys
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 9e93a995f6..a96286e74b 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -61,22 +61,25 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
+ V8_INLINE void VisitCodePointer(Code host, CodeObjectSlot slot) final {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // Code slots never appear in new space because CodeDataContainers, the
- // only object that can contain code pointers, are always allocated in
- // the old space.
+ // InstructionStream slots never appear in new space because
+ // Code objects, the only object that can contain code pointers, are
+ // always allocated in the old space.
UNREACHABLE();
}
- V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- HandleSlot(host, FullHeapObjectSlot(&target), target);
+ V8_INLINE void VisitCodeTarget(RelocInfo* rinfo) final {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
+ HandleSlot(rinfo->instruction_stream(), FullHeapObjectSlot(&target),
+ target);
}
- V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
- PtrComprCageBase cage_base = host.main_cage_base();
+ V8_INLINE void VisitEmbeddedPointer(RelocInfo* rinfo) final {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(rinfo->code());
HeapObject heap_object = rinfo->target_object(cage_base);
- HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
+ HandleSlot(rinfo->instruction_stream(), FullHeapObjectSlot(&heap_object),
+ heap_object);
}
inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
@@ -132,20 +135,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
}
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else if (record_slots_ &&
MarkCompactCollector::IsOnEvacuationCandidate(target)) {
// We should never try to record off-heap slots.
DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
- // Code slots never appear in new space because CodeDataContainers, the
- // only object that can contain code pointers, are always allocated in
- // the old space.
+ // InstructionStream slots never appear in new space because
+ // Code objects, the only object that can contain code pointers, are
+ // always allocated in the old space.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
!MemoryChunk::FromHeapObject(target)->IsFlagSet(
MemoryChunk::IS_EXECUTABLE));
- // Shared heap pages do not have evacuation candidates outside an atomic
- // shared GC pause.
- DCHECK_IMPLIES(!v8_flags.shared_space, !target.InSharedWritableHeap());
// We cannot call MarkCompactCollector::RecordSlot because that checks
// that the host page is not in young generation, which does not hold
@@ -154,8 +154,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
MemoryChunk::FromHeapObject(host), slot.address());
}
- if (target.InSharedWritableHeap()) {
- DCHECK(!scavenger_->heap()->IsShared());
+ if (target.InWritableSharedSpace()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
@@ -296,7 +295,7 @@ class GlobalHandlesWeakRootsUpdatingVisitor final : public RootVisitor {
CHECK(Heap::InFromPage(heap_object));
MapWord first_word = heap_object.map_word(kRelaxedLoad);
CHECK(first_word.IsForwardingAddress());
- HeapObject dest = first_word.ToForwardingAddress();
+ HeapObject dest = first_word.ToForwardingAddress(heap_object);
HeapObjectReference::Update(FullHeapObjectSlot(p), dest);
CHECK_IMPLIES(Heap::InYoungGeneration(dest),
Heap::InToPage(dest) || Heap::IsLargeObject(dest));
@@ -444,7 +443,7 @@ void ScavengerCollector::CollectGarbage() {
}
if (V8_UNLIKELY(v8_flags.always_use_string_forwarding_table)) {
- isolate_->string_forwarding_table()->UpdateAfterEvacuation();
+ isolate_->string_forwarding_table()->UpdateAfterYoungEvacuation();
}
}
@@ -516,7 +515,7 @@ void ScavengerCollector::IterateStackAndScavenge(
survived_bytes_before +=
scavenger->bytes_copied() + scavenger->bytes_promoted();
}
- heap_->IterateStackRoots(root_scavenge_visitor, Heap::ScanStackMode::kNone);
+ heap_->IterateStackRoots(root_scavenge_visitor);
(*scavengers)[main_thread_id]->Process();
size_t survived_bytes_after = 0;
for (auto& scavenger : *scavengers) {
@@ -538,8 +537,12 @@ void ScavengerCollector::IterateStackAndScavenge(
}
void ScavengerCollector::SweepArrayBufferExtensions() {
+ DCHECK_EQ(0, heap_->new_lo_space()->Size());
heap_->array_buffer_sweeper()->RequestSweep(
- ArrayBufferSweeper::SweepingType::kYoung);
+ ArrayBufferSweeper::SweepingType::kYoung,
+ (heap_->new_space()->Size() == 0)
+ ? ArrayBufferSweeper::TreatAllYoungAsPromoted::kYes
+ : ArrayBufferSweeper::TreatAllYoungAsPromoted::kNo);
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
@@ -552,9 +555,9 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
- object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
+ object.set_map_word(map, kRelaxedStore);
- if (is_compacting && marking_state->IsBlack(object) &&
+ if (is_compacting && marking_state->IsMarked(object) &&
MarkCompactCollector::IsOnEvacuationCandidate(map)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
MemoryChunk::FromHeapObject(object), object.map_slot().address());
@@ -601,11 +604,23 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
- if (v8_flags.shared_string_table && heap->isolate()->has_shared_heap()) {
- return new ConcurrentAllocator(nullptr, heap->shared_allocation_space());
+ if (v8_flags.shared_string_table && heap->isolate()->has_shared_space()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_allocation_space(),
+ ConcurrentAllocator::Context::kGC);
}
return nullptr;
}
+
+// This returns true if the scavenger runs in a client isolate and incremental
+// marking is enabled in the shared space isolate.
+bool IsSharedIncrementalMarking(Isolate* isolate) {
+ return isolate->has_shared_space() && !isolate->is_shared_space_isolate() &&
+ isolate->shared_space_isolate()
+ ->heap()
+ ->incremental_marking()
+ ->IsMarking();
+}
+
} // namespace
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
@@ -619,8 +634,7 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
copied_list_local_(*copied_list),
ephemeron_table_list_local_(*ephemeron_table_list),
pretenuring_handler_(heap_->pretenuring_handler()),
- local_pretenuring_feedback_(
- PretenturingHandler::kInitialFeedbackCapacity),
+ local_pretenuring_feedback_(PretenuringHandler::kInitialFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
@@ -629,7 +643,11 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()),
shared_string_table_(shared_old_allocator_.get() != nullptr),
- mark_shared_heap_(heap->isolate()->is_shared_space_isolate()) {}
+ mark_shared_heap_(heap->isolate()->is_shared_space_isolate()),
+ shortcut_strings_(
+ (!heap->IsGCWithStack() || v8_flags.shortcut_strings_with_stack) &&
+ !is_incremental_marking_ &&
+ !IsSharedIncrementalMarking(heap->isolate())) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) {
@@ -640,7 +658,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
// the end of collection it would be a violation of the invariant to record
// its slots.
const bool record_slots =
- is_compacting_ && heap()->atomic_marking_state()->IsBlack(target);
+ is_compacting_ && heap()->atomic_marking_state()->IsMarked(target);
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
@@ -663,13 +681,14 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->sweeper()->AddPage(space, reinterpret_cast<Page*>(page),
- Sweeper::READD_TEMPORARY_REMOVED_PAGE);
+ Sweeper::READD_TEMPORARY_REMOVED_PAGE,
+ AccessMode::ATOMIC);
}
}
void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(page);
- const bool record_old_to_shared_slots = heap_->isolate()->has_shared_heap();
+ const bool record_old_to_shared_slots = heap_->isolate()->has_shared_space();
if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
@@ -707,7 +726,8 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
// A new space string might have been promoted into the shared
// heap during GC.
if (record_old_to_shared_slots) {
- CheckOldToNewSlotForSharedTyped(page, slot_type, slot_address);
+ CheckOldToNewSlotForSharedTyped(page, slot_type, slot_address,
+ *slot);
}
return result;
});
@@ -846,7 +866,7 @@ void Scavenger::CheckOldToNewSlotForSharedUntyped(MemoryChunk* chunk,
HeapObject heap_object;
if (object.GetHeapObject(&heap_object) &&
- heap_object.InSharedWritableHeap()) {
+ heap_object.InWritableSharedSpace()) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
}
@@ -854,20 +874,12 @@ void Scavenger::CheckOldToNewSlotForSharedUntyped(MemoryChunk* chunk,
void Scavenger::CheckOldToNewSlotForSharedTyped(MemoryChunk* chunk,
SlotType slot_type,
- Address slot_address) {
- HeapObject heap_object = UpdateTypedSlotHelper::GetTargetObject(
- chunk->heap(), slot_type, slot_address);
-
-#if DEBUG
- UpdateTypedSlotHelper::UpdateTypedSlot(
- chunk->heap(), slot_type, slot_address,
- [heap_object](FullMaybeObjectSlot slot) {
- DCHECK_EQ((*slot).GetHeapObjectAssumeStrong(), heap_object);
- return KEEP_SLOT;
- });
-#endif // DEBUG
+ Address slot_address,
+ MaybeObject new_target) {
+ HeapObject heap_object;
- if (heap_object.InSharedWritableHeap()) {
+ if (new_target.GetHeapObject(&heap_object) &&
+ heap_object.InWritableSharedSpace()) {
const uintptr_t offset = slot_address - chunk->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 7942bfb30d..43f7660ab7 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -133,7 +133,9 @@ class Scavenger {
template <typename TSlot>
inline void CheckOldToNewSlotForSharedUntyped(MemoryChunk* chunk, TSlot slot);
inline void CheckOldToNewSlotForSharedTyped(MemoryChunk* chunk,
- SlotType slot_type, Address slot);
+ SlotType slot_type,
+ Address slot_address,
+ MaybeObject new_target);
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
@@ -200,8 +202,8 @@ class Scavenger {
PromotionList::Local promotion_list_local_;
CopiedList::Local copied_list_local_;
EphemeronTableList::Local ephemeron_table_list_local_;
- PretenturingHandler* const pretenuring_handler_;
- PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
+ PretenuringHandler* const pretenuring_handler_;
+ PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
EvacuationAllocator allocator_;
@@ -214,6 +216,7 @@ class Scavenger {
const bool is_compacting_;
const bool shared_string_table_;
const bool mark_shared_heap_;
+ const bool shortcut_strings_;
friend class IterateAndScavengePromotedObjectsVisitor;
friend class RootScavengeVisitor;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 48081db0a4..3f217e6441 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -44,7 +44,9 @@
#include "src/objects/template-objects-inl.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/turbofan-types.h"
+#include "src/objects/turboshaft-types.h"
#include "src/regexp/regexp.h"
+#include "src/utils/allocation.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
@@ -66,91 +68,184 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
return shared;
}
-} // namespace
-
-bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
- return heap->CreateHeapObjects();
+#ifdef DEBUG
+bool IsMutableMap(InstanceType instance_type, ElementsKind elements_kind) {
+ bool is_js_object = InstanceTypeChecker::IsJSObject(instance_type);
+ bool is_wasm_object = false;
+#if V8_ENABLE_WEBASSEMBLY
+ is_wasm_object =
+ instance_type == WASM_STRUCT_TYPE || instance_type == WASM_ARRAY_TYPE;
+#endif // V8_ENABLE_WEBASSEMBLY
+ DCHECK_IMPLIES(is_js_object &&
+ !Map::CanHaveFastTransitionableElementsKind(instance_type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
+ // JSObjects have maps with a mutable prototype_validity_cell, so they cannot
+ // go in RO_SPACE. Maps for managed Wasm objects have mutable subtype lists.
+ return is_js_object || is_wasm_object;
}
+#endif
-bool Heap::CreateHeapObjects() {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
-
- // Ensure that all young generation pages are iterable. It must be after heap
- // setup, so that the maps have been created.
- if (new_space()) new_space()->MakeIterable();
-
- CreateApiObjects();
+struct ConstantStringInit {
+ const char* contents;
+ RootIndex index;
+};
- // Create initial objects
- CreateInitialObjects();
- CreateInternalAccessorInfoObjects();
- CHECK_EQ(0u, gc_count_);
+constexpr std::initializer_list<ConstantStringInit>
+#define CONSTANT_STRING_ELEMENT(_, name, contents) \
+ {contents, RootIndex::k##name},
+ kImportantConstantStringTable{
+ EXTRA_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(
+ CONSTANT_STRING_ELEMENT, /* not used */)
+ IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(
+ CONSTANT_STRING_ELEMENT, /* not used */)
+#undef CONSTANT_STRING_ELEMENT
+ };
- set_native_contexts_list(ReadOnlyRoots(this).undefined_value());
- set_allocation_sites_list(ReadOnlyRoots(this).undefined_value());
- set_dirty_js_finalization_registries_list(
- ReadOnlyRoots(this).undefined_value());
- set_dirty_js_finalization_registries_list_tail(
- ReadOnlyRoots(this).undefined_value());
+constexpr std::initializer_list<ConstantStringInit>
+#define CONSTANT_STRING_ELEMENT(_, name, contents) \
+ {contents, RootIndex::k##name},
+ kNotImportantConstantStringTable{
+ NOT_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(
+ CONSTANT_STRING_ELEMENT, /* not used */)
+#undef CONSTANT_STRING_ELEMENT
+ };
- return true;
-}
+struct StringTypeInit {
+ InstanceType type;
+ int size;
+ RootIndex index;
+};
-const Heap::StringTypeTable Heap::string_type_table[] = {
+constexpr std::initializer_list<StringTypeInit> kStringTypeTable{
#define STRING_TYPE_ELEMENT(type, size, name, CamelName) \
{type, size, RootIndex::k##CamelName##Map},
STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
#undef STRING_TYPE_ELEMENT
};
-const Heap::ConstantStringTable Heap::constant_string_table[] = {
- {"", RootIndex::kempty_string},
-#define CONSTANT_STRING_ELEMENT(_, name, contents) \
- {contents, RootIndex::k##name},
- INTERNALIZED_STRING_LIST_GENERATOR(CONSTANT_STRING_ELEMENT, /* not used */)
-#undef CONSTANT_STRING_ELEMENT
+struct StructInit {
+ InstanceType type;
+ int size;
+ RootIndex index;
};
-const Heap::StructTable Heap::struct_table[] = {
+constexpr bool is_important_struct(InstanceType type) {
+ return type == ENUM_CACHE_TYPE || type == CALL_SITE_INFO_TYPE;
+}
+
+constexpr std::initializer_list<StructInit> kStructTable{
#define STRUCT_TABLE_ELEMENT(TYPE, Name, name) \
{TYPE, Name::kSize, RootIndex::k##Name##Map},
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
-
#define ALLOCATION_SITE_ELEMENT(_, TYPE, Name, Size, name) \
{TYPE, Name::kSize##Size, RootIndex::k##Name##Size##Map},
ALLOCATION_SITE_LIST(ALLOCATION_SITE_ELEMENT, /* not used */)
#undef ALLOCATION_SITE_ELEMENT
-
#define DATA_HANDLER_ELEMENT(_, TYPE, Name, Size, name) \
{TYPE, Name::kSizeWithData##Size, RootIndex::k##Name##Size##Map},
DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT, /* not used */)
#undef DATA_HANDLER_ELEMENT
};
-AllocationResult Heap::AllocateMap(InstanceType instance_type,
+} // namespace
+
+bool SetupIsolateDelegate::SetupHeapInternal(Isolate* isolate) {
+ auto heap = isolate->heap();
+ if (!isolate->read_only_heap()->roots_init_complete()) {
+ if (!heap->CreateReadOnlyHeapObjects()) return false;
+ isolate->VerifyStaticRoots();
+ isolate->read_only_heap()->OnCreateRootsComplete(isolate);
+ }
+ // We prefer to fit all of read-only space in one page.
+ CHECK_EQ(heap->read_only_space()->pages().size(), 1);
+ auto ro_size = heap->read_only_space()->Size();
+ DCHECK_EQ(heap->old_space()->Size(), 0);
+ DCHECK_IMPLIES(heap->new_space(), heap->new_space()->Size() == 0);
+ auto res = heap->CreateMutableHeapObjects();
+ DCHECK_EQ(heap->read_only_space()->Size(), ro_size);
+ USE(ro_size);
+ return res;
+}
+
+bool Heap::CreateReadOnlyHeapObjects() {
+ // Create initial maps and important objects.
+ if (!CreateEarlyReadOnlyMaps()) return false;
+ if (!CreateImportantReadOnlyObjects()) return false;
+
+#if V8_STATIC_ROOTS_BOOL
+ // The read only heap is sorted such that often used objects are allocated
+ // early for their compressed address to fit into 12bit arm immediates.
+ ReadOnlySpace* ro_space = isolate()->heap()->read_only_space();
+ DCHECK_LT(V8HeapCompressionScheme::CompressAny(ro_space->top()), 0xfff);
+ USE(ro_space);
+#endif
+
+ if (!CreateLateReadOnlyMaps()) return false;
+ CreateReadOnlyApiObjects();
+ if (!CreateReadOnlyObjects()) return false;
+
+#ifdef DEBUG
+ ReadOnlyRoots roots(isolate());
+ for (auto pos = RootIndex::kFirstReadOnlyRoot;
+ pos <= RootIndex::kLastReadOnlyRoot; ++pos) {
+ DCHECK(roots.is_initialized(pos));
+ }
+#endif
+ return true;
+}
+
+bool Heap::CreateMutableHeapObjects() {
+ ReadOnlyRoots roots(this);
+
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { \
+ Map map; \
+ if (!AllocateMap(AllocationType::kMap, (instance_type), size).To(&map)) \
+ return false; \
+ set_##field_name##_map(map); \
+ }
+
+ { // Map allocation
+ ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kHeaderSize,
+ message_object)
+ ALLOCATE_MAP(JS_EXTERNAL_OBJECT_TYPE, JSExternalObject::kHeaderSize,
+ external)
+ external_map().set_is_extensible(false);
+ }
+#undef ALLOCATE_MAP
+
+ // Ensure that all young generation pages are iterable. It must be after heap
+ // setup, so that the maps have been created.
+ if (new_space()) new_space()->MakeIterable();
+
+ CreateMutableApiObjects();
+
+ // Create initial objects
+ CreateInitialMutableObjects();
+ CreateInternalAccessorInfoObjects();
+ CHECK_EQ(0u, gc_count_);
+
+ set_native_contexts_list(roots.undefined_value());
+ set_allocation_sites_list(roots.undefined_value());
+ set_dirty_js_finalization_registries_list(roots.undefined_value());
+ set_dirty_js_finalization_registries_list_tail(roots.undefined_value());
+
+ return true;
+}
+
+AllocationResult Heap::AllocateMap(AllocationType allocation_type,
+ InstanceType instance_type,
int instance_size,
ElementsKind elements_kind,
int inobject_properties) {
static_assert(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- bool is_js_object = InstanceTypeChecker::IsJSObject(instance_type);
- bool is_wasm_object = false;
-#if V8_ENABLE_WEBASSEMBLY
- is_wasm_object =
- instance_type == WASM_STRUCT_TYPE || instance_type == WASM_ARRAY_TYPE;
-#endif // V8_ENABLE_WEBASSEMBLY
- DCHECK_IMPLIES(is_js_object &&
- !Map::CanHaveFastTransitionableElementsKind(instance_type),
- IsDictionaryElementsKind(elements_kind) ||
- IsTerminalElementsKind(elements_kind));
HeapObject result;
- // JSObjects have maps with a mutable prototype_validity_cell, so they cannot
- // go in RO_SPACE. Maps for managed Wasm objects have mutable subtype lists.
- bool is_mutable = is_js_object || is_wasm_object;
- AllocationResult allocation =
- AllocateRaw(Map::kSize, is_mutable ? AllocationType::kMap
- : AllocationType::kReadOnly);
+ DCHECK_EQ(allocation_type, IsMutableMap(instance_type, elements_kind)
+ ? AllocationType::kMap
+ : AllocationType::kReadOnly);
+ AllocationResult allocation = AllocateRaw(Map::kSize, allocation_type);
if (!allocation.To(&result)) return allocation;
result.set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
@@ -199,8 +294,7 @@ void Heap::FinalizePartialMap(Map map) {
map.set_dependent_code(DependentCode::empty_dependent_code(roots));
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
- map.set_prototype(roots.null_value());
- map.set_constructor_or_back_pointer(roots.null_value());
+ map.init_prototype_and_constructor_or_back_pointer(roots);
}
AllocationResult Heap::Allocate(Handle<Map> map,
@@ -218,7 +312,9 @@ AllocationResult Heap::Allocate(Handle<Map> map,
return AllocationResult::FromObject(result);
}
-bool Heap::CreateInitialMaps() {
+bool Heap::CreateEarlyReadOnlyMaps() {
+ // Setup maps which are used often, or used in CreateImportantReadOnlyObjects.
+ ReadOnlyRoots roots(this);
HeapObject obj;
{
AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
@@ -229,8 +325,6 @@ bool Heap::CreateInitialMaps() {
set_meta_map(new_meta_map);
new_meta_map.set_map_after_allocation(new_meta_map);
- ReadOnlyRoots roots(this);
- { // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
{ \
Map map; \
@@ -238,6 +332,7 @@ bool Heap::CreateInitialMaps() {
set_##field_name##_map(map); \
}
+ { // Partial map allocation
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
ALLOCATE_PARTIAL_MAP(WEAK_FIXED_ARRAY_TYPE, kVariableSizeSentinel,
weak_fixed_array);
@@ -254,6 +349,13 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
+ // Some struct maps which we need for later dependencies
+ for (const StructInit& entry : kStructTable) {
+ if (!is_important_struct(entry.type)) continue;
+ Map map;
+ if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
+ roots_table()[entry.index] = map.ptr();
+ }
#undef ALLOCATE_PARTIAL_MAP
}
@@ -314,14 +416,6 @@ bool Heap::CreateInitialMaps() {
// Set preliminary exception sentinel value before actually initializing it.
set_exception(roots.null_value());
- // Setup the struct maps first (needed for the EnumCache).
- for (unsigned i = 0; i < arraysize(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- Map map;
- if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
- roots_table()[entry.index] = map.ptr();
- }
-
// Allocate the empty enum cache.
{
AllocationResult allocation =
@@ -339,7 +433,8 @@ bool Heap::CreateInitialMaps() {
obj.set_map_after_allocation(roots.descriptor_array_map(),
SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
- array.Initialize(roots.empty_enum_cache(), roots.undefined_value(), 0, 0);
+ array.Initialize(roots.empty_enum_cache(), roots.undefined_value(), 0, 0,
+ DescriptorArrayMarkingState::kInitialGCState);
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
@@ -355,17 +450,19 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(roots.null_map());
roots.null_map().set_is_undetectable(true);
FinalizePartialMap(roots.the_hole_map());
- for (unsigned i = 0; i < arraysize(struct_table); ++i) {
- const StructTable& entry = struct_table[i];
- FinalizePartialMap(Map::cast(Object(roots_table()[entry.index])));
+ for (const StructInit& entry : kStructTable) {
+ if (!is_important_struct(entry.type)) continue;
+ FinalizePartialMap(Map::cast(roots.object_at(entry.index)));
}
- { // Map allocation
-#define ALLOCATE_MAP(instance_type, size, field_name) \
- { \
- Map map; \
- if (!AllocateMap((instance_type), size).To(&map)) return false; \
- set_##field_name##_map(map); \
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { \
+ Map map; \
+ if (!AllocateMap(AllocationType::kReadOnly, (instance_type), size) \
+ .To(&map)) { \
+ return false; \
+ } \
+ set_##field_name##_map(map); \
}
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
@@ -379,6 +476,7 @@ bool Heap::CreateInitialMaps() {
(constructor_function_index)); \
}
+ { // Map allocation
ALLOCATE_VARSIZE_MAP(SCOPE_INFO_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_VARSIZE_MAP(CLOSURE_FEEDBACK_CELL_ARRAY_TYPE,
@@ -386,8 +484,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
- ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
- Context::SYMBOL_FUNCTION_INDEX)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
ALLOCATE_MAP(MEGA_DOM_HANDLER_TYPE, MegaDomHandler::kSize, mega_dom_handler)
@@ -403,10 +499,15 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, basic_block_counters_marker);
ALLOCATE_VARSIZE_MAP(BIGINT_TYPE, bigint);
- for (unsigned i = 0; i < arraysize(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
+ ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
+ Context::SYMBOL_FUNCTION_INDEX)
+
+ for (const StringTypeInit& entry : kStringTypeTable) {
Map map;
- if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
+ if (!AllocateMap(AllocationType::kReadOnly, entry.type, entry.size)
+ .To(&map)) {
+ return false;
+ }
map.SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
@@ -426,26 +527,14 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_NAME_DICTIONARY_TYPE,
small_ordered_name_dictionary)
-#define TORQUE_ALLOCATE_MAP(NAME, Name, name) \
- ALLOCATE_MAP(NAME, Name::SizeFor(), name)
- TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
-#undef TORQUE_ALLOCATE_MAP
-
-#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
- /* The DescriptorArray map is pre-allocated and initialized above. */ \
- if (NAME != DESCRIPTOR_ARRAY_TYPE) { \
- ALLOCATE_VARSIZE_MAP(NAME, name) \
- }
- TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
-#undef TORQUE_ALLOCATE_VARSIZE_MAP
-
- ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
+ ALLOCATE_VARSIZE_MAP(INSTRUCTION_STREAM_TYPE, instruction_stream)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
{
// The invalid_prototype_validity_cell is needed for JSObject maps.
Smi value = Smi::FromInt(Map::kPrototypeChainInvalid);
- AllocationResult alloc = AllocateRaw(Cell::kSize, AllocationType::kOld);
+ AllocationResult alloc =
+ AllocateRaw(Cell::kSize, AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj.set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
Cell::cast(obj).set_value(value);
@@ -470,24 +559,64 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
- ALLOCATE_VARSIZE_MAP(ORDERED_HASH_MAP_TYPE, ordered_hash_map)
- ALLOCATE_VARSIZE_MAP(ORDERED_HASH_SET_TYPE, ordered_hash_set)
ALLOCATE_VARSIZE_MAP(ORDERED_NAME_DICTIONARY_TYPE, ordered_name_dictionary)
ALLOCATE_VARSIZE_MAP(NAME_DICTIONARY_TYPE, name_dictionary)
ALLOCATE_VARSIZE_MAP(SWISS_NAME_DICTIONARY_TYPE, swiss_name_dictionary)
ALLOCATE_VARSIZE_MAP(GLOBAL_DICTIONARY_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
+
+ ALLOCATE_VARSIZE_MAP(REGISTERED_SYMBOL_TABLE_TYPE, registered_symbol_table)
+
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
+
+ ALLOCATE_MAP(ACCESSOR_INFO_TYPE, AccessorInfo::kSize, accessor_info)
+
+ ALLOCATE_VARSIZE_MAP(PREPARSE_DATA_TYPE, preparse_data)
+ ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize,
+ shared_function_info)
+ ALLOCATE_MAP(CODE_TYPE, Code::kSize, code)
+
+ return true;
+ }
+}
+
+bool Heap::CreateLateReadOnlyMaps() {
+ ReadOnlyRoots roots(this);
+ {
+ // Setup the struct maps.
+ for (const StructInit& entry : kStructTable) {
+ if (is_important_struct(entry.type)) continue;
+ Map map;
+ if (!AllocateMap(AllocationType::kReadOnly, entry.type, entry.size)
+ .To(&map))
+ return false;
+ roots_table()[entry.index] = map.ptr();
+ }
+
+#define TORQUE_ALLOCATE_MAP(NAME, Name, name) \
+ ALLOCATE_MAP(NAME, Name::SizeFor(), name)
+ TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
+#undef TORQUE_ALLOCATE_MAP
+
+#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
+ /* The DescriptorArray map is pre-allocated and initialized above. */ \
+ if (NAME != DESCRIPTOR_ARRAY_TYPE) { \
+ ALLOCATE_VARSIZE_MAP(NAME, name) \
+ }
+ TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
+#undef TORQUE_ALLOCATE_VARSIZE_MAP
+
+ ALLOCATE_VARSIZE_MAP(ORDERED_HASH_MAP_TYPE, ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(ORDERED_HASH_SET_TYPE, ordered_hash_set)
+
ALLOCATE_VARSIZE_MAP(SIMPLE_NUMBER_DICTIONARY_TYPE,
simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(NAME_TO_INDEX_HASH_TABLE_TYPE,
name_to_index_hash_table)
- ALLOCATE_VARSIZE_MAP(REGISTERED_SYMBOL_TABLE_TYPE, registered_symbol_table)
ALLOCATE_VARSIZE_MAP(EMBEDDER_DATA_ARRAY_TYPE, embedder_data_array)
ALLOCATE_VARSIZE_MAP(EPHEMERON_HASH_TABLE_TYPE, ephemeron_hash_table)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
-
ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TABLE_TYPE, script_context_table)
ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
@@ -495,8 +624,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(COVERAGE_INFO_TYPE, coverage_info);
- ALLOCATE_MAP(ACCESSOR_INFO_TYPE, AccessorInfo::kSize, accessor_info)
-
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
side_effect_call_handler_info)
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
@@ -504,15 +631,10 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
next_call_side_effect_free_call_handler_info)
- ALLOCATE_VARSIZE_MAP(PREPARSE_DATA_TYPE, preparse_data)
- ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
- shared_function_info)
ALLOCATE_MAP(SOURCE_TEXT_MODULE_TYPE, SourceTextModule::kSize,
source_text_module)
ALLOCATE_MAP(SYNTHETIC_MODULE_TYPE, SyntheticModule::kSize,
synthetic_module)
- ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
- code_data_container)
IF_WASM(ALLOCATE_MAP, WASM_API_FUNCTION_REF_TYPE, WasmApiFunctionRef::kSize,
wasm_api_function_ref)
@@ -530,27 +652,110 @@ bool Heap::CreateInitialMaps() {
wasm_type_info)
IF_WASM(ALLOCATE_MAP, WASM_CONTINUATION_OBJECT_TYPE,
WasmContinuationObject::kSize, wasm_continuation_object)
+ IF_WASM(ALLOCATE_MAP, WASM_NULL_TYPE, kVariableSizeSentinel, wasm_null);
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
-
- ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kHeaderSize,
- message_object)
- ALLOCATE_MAP(JS_EXTERNAL_OBJECT_TYPE, JSExternalObject::kHeaderSize,
- external)
- external_map().set_is_extensible(false);
+ }
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
+
+ return true;
+}
+
+bool Heap::CreateImportantReadOnlyObjects() {
+ // Allocate some objects early to get addreses to fit as arm64 immediates
+ HeapObject obj;
+ ReadOnlyRoots roots(isolate());
+
+ // For static roots we need the r/o space to have identical layout on all
+ // compile targets. Varying objects are padded to their biggest size.
+ auto StaticRootsEnsureAllocatedSize = [&](HeapObject obj, int required) {
+ if (V8_STATIC_ROOTS_BOOL || v8_flags.static_roots_src) {
+ if (required == obj.Size()) return;
+ CHECK_LT(obj.Size(), required);
+ int filler_size = required - obj.Size();
+
+ HeapObject filler =
+ allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ filler_size, AllocationType::kReadOnly,
+ AllocationOrigin::kRuntime, AllocationAlignment::kTaggedAligned);
+ CreateFillerObjectAt(filler.address(), filler_size,
+ ClearFreedMemoryMode::kClearFreedMemory);
+
+ CHECK_EQ(filler.address() + filler.Size(), obj.address() + required);
+ }
+ };
+
+ // Bools
+
+ HandleScope initial_objects_handle_scope(isolate());
+ {
+ AllocationResult allocation =
+ Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
+ if (!allocation.To(&obj)) return false;
}
+ set_true_value(Oddball::cast(obj));
+ Oddball::cast(obj).set_kind(Oddball::kTrue);
+
{
- AllocationResult alloc = AllocateRaw(
- ArrayList::SizeFor(ArrayList::kFirstIndex), AllocationType::kReadOnly);
- if (!alloc.To(&obj)) return false;
- obj.set_map_after_allocation(roots.array_list_map(), SKIP_WRITE_BARRIER);
- ArrayList::cast(obj).set_length(ArrayList::kFirstIndex);
- ArrayList::cast(obj).SetLength(0);
+ AllocationResult allocation =
+ Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_false_value(Oddball::cast(obj));
+ Oddball::cast(obj).set_kind(Oddball::kFalse);
+
+ // Hash seed for strings
+
+ Factory* factory = isolate()->factory();
+ set_hash_seed(*factory->NewByteArray(kInt64Size, AllocationType::kReadOnly));
+ InitializeHashSeed();
+
+ // Important strings and symbols
+ for (const ConstantStringInit& entry : kImportantConstantStringTable) {
+ Handle<String> str = factory->InternalizeUtf8String(entry.contents);
+ roots_table()[entry.index] = str->ptr();
+ }
+
+ {
+#define SYMBOL_INIT(_, name) \
+ { \
+ Handle<Symbol> symbol( \
+ isolate()->factory()->NewPrivateSymbol(AllocationType::kReadOnly)); \
+ roots_table()[RootIndex::k##name] = symbol->ptr(); \
+ }
+ IMPORTANT_PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)}
+ // SYMBOL_INIT used again later.
+
+ // Empty elements
+ Handle<NameDictionary>
+ empty_property_dictionary = NameDictionary::New(
+ isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+
+ set_empty_property_dictionary(*empty_property_dictionary);
+
+ // Allocate the empty OrderedNameDictionary
+ Handle<OrderedNameDictionary> empty_ordered_property_dictionary =
+ OrderedNameDictionary::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
+ set_empty_ordered_property_dictionary(*empty_ordered_property_dictionary);
+
+ // Allocate the empty SwissNameDictionary
+ Handle<SwissNameDictionary> empty_swiss_property_dictionary =
+ factory->CreateCanonicalEmptySwissNameDictionary();
+ set_empty_swiss_property_dictionary(*empty_swiss_property_dictionary);
+ StaticRootsEnsureAllocatedSize(*empty_swiss_property_dictionary,
+ 8 * kTaggedSize);
+
+ {
+ if (!AllocateRaw(ByteArray::SizeFor(0), AllocationType::kReadOnly).To(&obj))
+ return false;
+ obj.set_map_after_allocation(roots.byte_array_map(), SKIP_WRITE_BARRIER);
+ ByteArray::cast(obj).set_length(0);
+ set_empty_byte_array(ByteArray::cast(obj));
}
- set_empty_array_list(ArrayList::cast(obj));
{
AllocationResult alloc =
@@ -571,6 +776,61 @@ bool Heap::CreateInitialMaps() {
set_empty_scope_info(ScopeInfo::cast(obj));
{
+ if (!AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly)
+ .To(&obj)) {
+ return false;
+ }
+ obj.set_map_after_allocation(roots.property_array_map(),
+ SKIP_WRITE_BARRIER);
+ PropertyArray::cast(obj).initialize_length(0);
+ set_empty_property_array(PropertyArray::cast(obj));
+ }
+
+ // Heap Numbers
+ // The -0 value must be set before NewNumber works.
+ set_minus_zero_value(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(-0.0));
+ DCHECK(std::signbit(roots.minus_zero_value().Number()));
+
+ set_nan_value(*factory->NewHeapNumber<AllocationType::kReadOnly>(
+ std::numeric_limits<double>::quiet_NaN()));
+ set_hole_nan_value(*factory->NewHeapNumberFromBits<AllocationType::kReadOnly>(
+ kHoleNanInt64));
+ set_infinity_value(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(V8_INFINITY));
+ set_minus_infinity_value(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(-V8_INFINITY));
+ set_max_safe_integer(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(kMaxSafeInteger));
+ set_max_uint_32(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(kMaxUInt32));
+ set_smi_min_value(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(kSmiMinValue));
+ set_smi_max_value_plus_one(
+ *factory->NewHeapNumber<AllocationType::kReadOnly>(0.0 - kSmiMinValue));
+
+ return true;
+}
+
+bool Heap::CreateReadOnlyObjects() {
+ HandleScope initial_objects_handle_scope(isolate());
+ Factory* factory = isolate()->factory();
+ ReadOnlyRoots roots(this);
+ HeapObject obj;
+
+ // Empty elements
+ {
+ AllocationResult alloc = AllocateRaw(
+ ArrayList::SizeFor(ArrayList::kFirstIndex), AllocationType::kReadOnly);
+ if (!alloc.To(&obj)) return false;
+ obj.set_map_after_allocation(roots.array_list_map(), SKIP_WRITE_BARRIER);
+ // Unchecked to skip failing checks since required roots are uninitialized.
+ ArrayList::unchecked_cast(obj).set_length(ArrayList::kFirstIndex);
+ ArrayList::unchecked_cast(obj).SetLength(0);
+ }
+ set_empty_array_list(ArrayList::unchecked_cast(obj));
+
+ {
// Empty boilerplate needs a field for literal_flags
AllocationResult alloc =
AllocateRaw(FixedArray::SizeFor(1), AllocationType::kReadOnly);
@@ -600,43 +860,8 @@ bool Heap::CreateInitialMaps() {
set_empty_array_boilerplate_description(
ArrayBoilerplateDescription::cast(obj));
- {
- AllocationResult allocation =
- Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
- if (!allocation.To(&obj)) return false;
- }
- set_true_value(Oddball::cast(obj));
- Oddball::cast(obj).set_kind(Oddball::kTrue);
-
- {
- AllocationResult allocation =
- Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
- if (!allocation.To(&obj)) return false;
- }
- set_false_value(Oddball::cast(obj));
- Oddball::cast(obj).set_kind(Oddball::kFalse);
-
// Empty arrays.
{
- if (!AllocateRaw(ByteArray::SizeFor(0), AllocationType::kReadOnly).To(&obj))
- return false;
- obj.set_map_after_allocation(roots.byte_array_map(), SKIP_WRITE_BARRIER);
- ByteArray::cast(obj).set_length(0);
- set_empty_byte_array(ByteArray::cast(obj));
- }
-
- {
- if (!AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly)
- .To(&obj)) {
- return false;
- }
- obj.set_map_after_allocation(roots.property_array_map(),
- SKIP_WRITE_BARRIER);
- PropertyArray::cast(obj).initialize_length(0);
- set_empty_property_array(PropertyArray::cast(obj));
- }
-
- {
if (!AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly)
.To(&obj)) {
return false;
@@ -652,49 +877,6 @@ bool Heap::CreateInitialMaps() {
roots.bigint_map().SetConstructorFunctionIndex(
Context::BIGINT_FUNCTION_INDEX);
- return true;
-}
-
-void Heap::CreateApiObjects() {
- Isolate* isolate = this->isolate();
- HandleScope scope(isolate);
-
- set_message_listeners(*TemplateList::New(isolate, 2));
-
- Handle<InterceptorInfo> info =
- Handle<InterceptorInfo>::cast(isolate->factory()->NewStruct(
- INTERCEPTOR_INFO_TYPE, AllocationType::kReadOnly));
- info->set_flags(0);
- set_noop_interceptor_info(*info);
-}
-
-void Heap::CreateInitialObjects() {
- HandleScope initial_objects_handle_scope(isolate());
- Factory* factory = isolate()->factory();
- ReadOnlyRoots roots(this);
-
- // The -0 value must be set before NewNumber works.
- set_minus_zero_value(
- *factory->NewHeapNumber<AllocationType::kReadOnly>(-0.0));
- DCHECK(std::signbit(roots.minus_zero_value().Number()));
-
- set_nan_value(*factory->NewHeapNumber<AllocationType::kReadOnly>(
- std::numeric_limits<double>::quiet_NaN()));
- set_hole_nan_value(*factory->NewHeapNumberFromBits<AllocationType::kReadOnly>(
- kHoleNanInt64));
- set_infinity_value(
- *factory->NewHeapNumber<AllocationType::kReadOnly>(V8_INFINITY));
- set_minus_infinity_value(
- *factory->NewHeapNumber<AllocationType::kReadOnly>(-V8_INFINITY));
-
- set_hash_seed(*factory->NewByteArray(kInt64Size, AllocationType::kReadOnly));
- InitializeHashSeed();
-
- // There's no "current microtask" in the beginning.
- set_current_microtask(roots.undefined_value());
-
- set_weak_refs_keep_during_job(roots.undefined_value());
-
// Allocate and initialize table for single character one byte strings.
int table_size = String::kMaxOneByteCharCode + 1;
set_single_character_string_table(
@@ -707,14 +889,11 @@ void Heap::CreateInitialObjects() {
single_character_string_table().set(i, *str);
}
- for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
- Handle<String> str =
- factory->InternalizeUtf8String(constant_string_table[i].contents);
- roots_table()[constant_string_table[i].index] = str->ptr();
+ for (const ConstantStringInit& entry : kNotImportantConstantStringTable) {
+ Handle<String> str = factory->InternalizeUtf8String(entry.contents);
+ roots_table()[entry.index] = str->ptr();
}
- // Allocate
-
// Finish initializing oddballs after creating the string table.
Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
factory->nan_value(), "undefined", Oddball::kUndefined);
@@ -772,31 +951,25 @@ void Heap::CreateInitialObjects() {
{
HandleScope handle_scope(isolate());
-#define SYMBOL_INIT(_, name) \
- { \
- Handle<Symbol> symbol( \
- isolate()->factory()->NewPrivateSymbol(AllocationType::kReadOnly)); \
- roots_table()[RootIndex::k##name] = symbol->ptr(); \
- }
- PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
+ NOT_IMPORTANT_PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
}
{
HandleScope handle_scope(isolate());
-#define PUBLIC_SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
- Handle<String> name##d = factory->InternalizeUtf8String(#description); \
- name->set_description(*name##d); \
+#define PUBLIC_SYMBOL_INIT(_, name, description) \
+ Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
+ Handle<String> name##d = factory->InternalizeUtf8String(#description); \
+ TaggedField<Object>::store(*name, Symbol::kDescriptionOffset, *name##d); \
roots_table()[RootIndex::k##name] = name->ptr();
PUBLIC_SYMBOL_LIST_GENERATOR(PUBLIC_SYMBOL_INIT, /* not used */)
-#define WELL_KNOWN_SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
- Handle<String> name##d = factory->InternalizeUtf8String(#description); \
- name->set_is_well_known_symbol(true); \
- name->set_description(*name##d); \
+#define WELL_KNOWN_SYMBOL_INIT(_, name, description) \
+ Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
+ Handle<String> name##d = factory->InternalizeUtf8String(#description); \
+ name->set_is_well_known_symbol(true); \
+ TaggedField<Object>::store(*name, Symbol::kDescriptionOffset, *name##d); \
roots_table()[RootIndex::k##name] = name->ptr();
WELL_KNOWN_SYMBOL_LIST_GENERATOR(WELL_KNOWN_SYMBOL_INIT, /* not used */)
@@ -843,23 +1016,140 @@ void Heap::CreateInitialObjects() {
#undef WELL_KNOWN_SYMBOL_INIT
}
- Handle<NameDictionary> empty_property_dictionary = NameDictionary::New(
+ Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
- DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
-
- set_empty_property_dictionary(*empty_property_dictionary);
+ DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
+ set_empty_slow_element_dictionary(*slow_element_dictionary);
Handle<RegisteredSymbolTable> empty_symbol_table = RegisteredSymbolTable::New(
isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!empty_symbol_table->HasSufficientCapacityToAdd(1));
- set_public_symbol_table(*empty_symbol_table);
- set_api_symbol_table(*empty_symbol_table);
- set_api_private_symbol_table(*empty_symbol_table);
+ set_empty_symbol_table(*empty_symbol_table);
+
+ // Allocate the empty OrderedHashMap.
+ Handle<OrderedHashMap> empty_ordered_hash_map =
+ OrderedHashMap::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
+ set_empty_ordered_hash_map(*empty_ordered_hash_map);
+
+ // Allocate the empty OrderedHashSet.
+ Handle<OrderedHashSet> empty_ordered_hash_set =
+ OrderedHashSet::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
+ set_empty_ordered_hash_set(*empty_ordered_hash_set);
+
+ // Allocate the empty FeedbackMetadata.
+ Handle<FeedbackMetadata> empty_feedback_metadata =
+ factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
+ set_empty_feedback_metadata(*empty_feedback_metadata);
+
+ // Canonical scope arrays.
+ Handle<ScopeInfo> global_this_binding =
+ ScopeInfo::CreateGlobalThisBinding(isolate());
+ set_global_this_binding_scope_info(*global_this_binding);
+
+ Handle<ScopeInfo> empty_function =
+ ScopeInfo::CreateForEmptyFunction(isolate());
+ set_empty_function_scope_info(*empty_function);
+
+ Handle<ScopeInfo> native_scope_info =
+ ScopeInfo::CreateForNativeContext(isolate());
+ set_native_scope_info(*native_scope_info);
+
+ Handle<ScopeInfo> shadow_realm_scope_info =
+ ScopeInfo::CreateForShadowRealmNativeContext(isolate());
+ set_shadow_realm_scope_info(*shadow_realm_scope_info);
+
+ // Initialize the wasm null_value.
+
+#ifdef V8_ENABLE_WEBASSEMBLY
+ // Allocate the wasm-null object. It is a regular V8 heap object contained in
+ // a V8 page.
+ // In static-roots builds, it is large enough so that its payload (other than
+ // its map word) can be mprotected on OS page granularity. We adjust the
+ // layout such that we have a filler object in the current OS page, and the
+ // wasm-null map word at the end of the current OS page. The payload then is
+ // contained on a separate OS page which can be protected.
+ // In non-static-roots builds, it is a regular object of size {kTaggedSize}
+ // and does not need padding.
+
+ constexpr size_t kLargestPossibleOSPageSize = 64 * KB;
+ static_assert(kLargestPossibleOSPageSize >= kMinimumOSPageSize);
+
+ if (V8_STATIC_ROOTS_BOOL || V8_STATIC_ROOT_GENERATION_BOOL) {
+ // Ensure all of the following lands on the same V8 page.
+ constexpr int kOffsetAfterMapWord = HeapObject::kMapOffset + kTaggedSize;
+ read_only_space_->EnsureSpaceForAllocation(
+ kLargestPossibleOSPageSize + WasmNull::kSize - kOffsetAfterMapWord);
+ Address next_page =
+ RoundUp(read_only_space_->top(), kLargestPossibleOSPageSize);
+ CHECK_EQ(kOffsetAfterMapWord % kObjectAlignment, 0);
+
+ // Add some filler to end up right before an OS page boundary.
+ int filler_size = static_cast<int>(next_page - read_only_space_->top() -
+ kOffsetAfterMapWord);
+ HeapObject filler =
+ allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ filler_size, AllocationType::kReadOnly, AllocationOrigin::kRuntime,
+ AllocationAlignment::kTaggedAligned);
+ CreateFillerObjectAt(filler.address(), filler_size,
+ ClearFreedMemoryMode::kClearFreedMemory);
+ CHECK_EQ(read_only_space_->top() + kOffsetAfterMapWord, next_page);
+ }
+
+ // Finally, allocate the wasm-null object.
+ {
+ HeapObject obj;
+ CHECK(AllocateRaw(WasmNull::kSize, AllocationType::kReadOnly).To(&obj));
+ obj.set_map_after_allocation(roots.wasm_null_map(), SKIP_WRITE_BARRIER);
+ MemsetUint32(
+ reinterpret_cast<uint32_t*>(obj.ptr() - kHeapObjectTag + kTaggedSize),
+ 0, (WasmNull::kSize - kTaggedSize) / sizeof(uint32_t));
+ set_wasm_null(WasmNull::cast(obj));
+ if (V8_STATIC_ROOTS_BOOL || V8_STATIC_ROOT_GENERATION_BOOL) {
+ CHECK_EQ(read_only_space_->top() % kLargestPossibleOSPageSize, 0);
+ }
+ }
+#endif
+
+ return true;
+}
+
+void Heap::CreateMutableApiObjects() {
+ Isolate* isolate = this->isolate();
+ HandleScope scope(isolate);
+
+ set_message_listeners(*TemplateList::New(isolate, 2));
+}
+
+void Heap::CreateReadOnlyApiObjects() {
+ HandleScope scope(isolate());
+ Handle<InterceptorInfo> info =
+ Handle<InterceptorInfo>::cast(isolate()->factory()->NewStruct(
+ INTERCEPTOR_INFO_TYPE, AllocationType::kReadOnly));
+ info->set_flags(0);
+ set_noop_interceptor_info(*info);
+}
+
+void Heap::CreateInitialMutableObjects() {
+ HandleScope initial_objects_handle_scope(isolate());
+ Factory* factory = isolate()->factory();
+ ReadOnlyRoots roots(this);
+
+ // There's no "current microtask" in the beginning.
+ set_current_microtask(roots.undefined_value());
+
+ set_weak_refs_keep_during_job(roots.undefined_value());
+
+ set_public_symbol_table(roots.empty_symbol_table());
+ set_api_symbol_table(roots.empty_symbol_table());
+ set_api_private_symbol_table(roots.empty_symbol_table());
set_number_string_cache(*factory->NewFixedArray(
kInitialNumberStringCacheSize * 2, AllocationType::kOld));
- set_basic_block_profiling_data(roots.empty_array_list());
+ // Unchecked to skip failing checks since required roots are uninitialized.
+ set_basic_block_profiling_data(roots.unchecked_empty_array_list());
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
@@ -888,11 +1178,6 @@ void Heap::CreateInitialObjects() {
set_script_list(roots.empty_weak_array_list());
- Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
- isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
- DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
- set_empty_slow_element_dictionary(*slow_element_dictionary);
-
set_materialized_objects(*factory->NewFixedArray(0, AllocationType::kOld));
// Handling of script id generation is in Heap::NextScriptId().
@@ -900,47 +1185,6 @@ void Heap::CreateInitialObjects() {
set_last_debugging_id(Smi::FromInt(DebugInfo::kNoDebuggingId));
set_next_template_serial_number(Smi::zero());
- // Allocate the empty OrderedHashMap.
- Handle<OrderedHashMap> empty_ordered_hash_map =
- OrderedHashMap::AllocateEmpty(isolate(), AllocationType::kReadOnly)
- .ToHandleChecked();
- set_empty_ordered_hash_map(*empty_ordered_hash_map);
-
- // Allocate the empty OrderedHashSet.
- Handle<OrderedHashSet> empty_ordered_hash_set =
- OrderedHashSet::AllocateEmpty(isolate(), AllocationType::kReadOnly)
- .ToHandleChecked();
- set_empty_ordered_hash_set(*empty_ordered_hash_set);
-
- // Allocate the empty OrderedNameDictionary
- Handle<OrderedNameDictionary> empty_ordered_property_dictionary =
- OrderedNameDictionary::AllocateEmpty(isolate(), AllocationType::kReadOnly)
- .ToHandleChecked();
- set_empty_ordered_property_dictionary(*empty_ordered_property_dictionary);
-
- // Allocate the empty SwissNameDictionary
- Handle<SwissNameDictionary> empty_swiss_property_dictionary =
- factory->CreateCanonicalEmptySwissNameDictionary();
- set_empty_swiss_property_dictionary(*empty_swiss_property_dictionary);
-
- // Allocate the empty FeedbackMetadata.
- Handle<FeedbackMetadata> empty_feedback_metadata =
- factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
- set_empty_feedback_metadata(*empty_feedback_metadata);
-
- // Canonical scope arrays.
- Handle<ScopeInfo> global_this_binding =
- ScopeInfo::CreateGlobalThisBinding(isolate());
- set_global_this_binding_scope_info(*global_this_binding);
-
- Handle<ScopeInfo> empty_function =
- ScopeInfo::CreateForEmptyFunction(isolate());
- set_empty_function_scope_info(*empty_function);
-
- Handle<ScopeInfo> native_scope_info =
- ScopeInfo::CreateForNativeContext(isolate());
- set_native_scope_info(*native_scope_info);
-
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
@@ -966,32 +1210,12 @@ void Heap::CreateInitialObjects() {
set_set_iterator_protector(*factory->NewProtector());
set_string_iterator_protector(*factory->NewProtector());
set_string_length_protector(*factory->NewProtector());
+ set_number_string_prototype_no_replace_protector(*factory->NewProtector());
set_typed_array_species_protector(*factory->NewProtector());
set_serialized_objects(roots.empty_fixed_array());
set_serialized_global_proxy_sizes(roots.empty_fixed_array());
- /* Canonical off-heap trampoline data */
- set_off_heap_trampoline_relocation_info(
- *Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
-
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // These roots will not be used.
- HeapObject no_container = *isolate()->factory()->undefined_value();
- set_trampoline_trivial_code_data_container(no_container);
- set_trampoline_promise_rejection_code_data_container(no_container);
-
- } else {
- set_trampoline_trivial_code_data_container(
- *isolate()->factory()->NewCodeDataContainer(0,
- AllocationType::kReadOnly));
-
- set_trampoline_promise_rejection_code_data_container(
- *isolate()->factory()->NewCodeDataContainer(
- Code::IsPromiseRejectionField::encode(true),
- AllocationType::kReadOnly));
- }
-
// Evaluate the hash values which will then be cached in the strings.
isolate()->factory()->zero_string()->EnsureHash();
isolate()->factory()->one_string()->EnsureHash();
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index a3a40885f8..7caffdeeed 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -197,6 +197,22 @@ class SlotSet final : public ::heap::base::BasicSlotSet<kTaggedSize> {
return empty;
}
+
+ void Merge(SlotSet* other, size_t buckets) {
+ for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
+ Bucket* other_bucket =
+ other->LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
+ if (!other_bucket) continue;
+ Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
+ if (bucket == nullptr) {
+ bucket = new Bucket;
+ CHECK(SwapInNewBucket<AccessMode::NON_ATOMIC>(bucket_index, bucket));
+ }
+ for (int cell_index = 0; cell_index < kCellsPerBucket; cell_index++) {
+ bucket->SetCellBits(cell_index, *other_bucket->cell(cell_index));
+ }
+ }
+ }
};
static_assert(std::is_standard_layout<SlotSet>::value);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index a29cb88d5a..822a06ec8a 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -14,9 +14,7 @@
#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
-#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
-#include "src/heap/heap-controller.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -229,55 +227,40 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_counter_.RemoveAllocationObserver(observer);
}
-void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
-
-void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
-
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) const {
DCHECK_GE(end - start, min_size);
- if (!allocation_info_.enabled()) {
+ // During GCs we always use the full LAB.
+ if (heap()->IsInGC()) return end;
+
+ if (!heap()->IsInlineAllocationEnabled()) {
// LABs are disabled, so we fit the requested area exactly.
return start + min_size;
}
- if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
+ // When LABs are enabled, pick the largest possible LAB size by default.
+ size_t step_size = end - start;
+
+ if (SupportsAllocationObserver() && heap()->IsAllocationObserverActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
- // Generated code may allocate inline from the linear allocation area for.
- // To make sure we can observe these allocations, we use a lower ©limit.
size_t step = allocation_counter_.NextBytes();
DCHECK_NE(step, 0);
- size_t rounded_step =
- RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
- // Use uint64_t to avoid overflow on 32-bit
- uint64_t step_end =
- static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
- uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
- return static_cast<Address>(new_end);
+ // Generated code may allocate inline from the linear allocation area. To
+ // make sure we can observe these allocations, we use a lower limit.
+ size_t rounded_step = static_cast<size_t>(
+ RoundSizeDownToObjectAlignment(static_cast<int>(step - 1)));
+ step_size = std::min(step_size, rounded_step);
}
- // LABs are enabled and no observers attached. Return the whole node for the
- // LAB.
- return end;
-}
-
-void SpaceWithLinearArea::DisableInlineAllocation() {
- if (!allocation_info_.enabled()) return;
-
- allocation_info_.SetEnabled(false);
- FreeLinearAllocationArea();
- UpdateInlineAllocationLimit(0);
-}
-
-void SpaceWithLinearArea::EnableInlineAllocation() {
- if (allocation_info_.enabled()) return;
+ if (v8_flags.stress_marking) {
+ step_size = std::min(step_size, static_cast<size_t>(64));
+ }
- allocation_info_.SetEnabled(true);
- AdvanceAllocationObservers();
- UpdateInlineAllocationLimit(0);
+ DCHECK_LE(start + step_size, end);
+ return start + std::max(step_size, min_size);
}
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
@@ -335,7 +318,7 @@ void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
if (!allocation_counter_.IsStepInProgress()) {
AdvanceAllocationObservers();
Space::AddAllocationObserver(observer);
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
} else {
Space::AddAllocationObserver(observer);
}
@@ -346,7 +329,7 @@ void SpaceWithLinearArea::RemoveAllocationObserver(
if (!allocation_counter_.IsStepInProgress()) {
AdvanceAllocationObservers();
Space::RemoveAllocationObserver(observer);
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
} else {
Space::RemoveAllocationObserver(observer);
}
@@ -354,20 +337,20 @@ void SpaceWithLinearArea::RemoveAllocationObserver(
void SpaceWithLinearArea::PauseAllocationObservers() {
AdvanceAllocationObservers();
- Space::PauseAllocationObservers();
}
void SpaceWithLinearArea::ResumeAllocationObservers() {
- Space::ResumeAllocationObservers();
MarkLabStartInitialized();
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
}
void SpaceWithLinearArea::AdvanceAllocationObservers() {
if (allocation_info_.top() &&
allocation_info_.start() != allocation_info_.top()) {
- allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
- allocation_info_.start());
+ if (heap()->IsAllocationObserverActive()) {
+ allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
+ allocation_info_.start());
+ }
MarkLabStartInitialized();
}
}
@@ -399,7 +382,8 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
DCHECK(size_in_bytes == aligned_size_in_bytes ||
aligned_size_in_bytes == allocation_size);
- if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
+ if (!SupportsAllocationObserver() || !heap()->IsAllocationObserverActive())
+ return;
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
@@ -434,9 +418,8 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
}
- DCHECK_IMPLIES(allocation_counter_.IsActive(),
- (allocation_info_.limit() - allocation_info_.start()) <
- allocation_counter_.NextBytes());
+ DCHECK_LT(allocation_info_.limit() - allocation_info_.start(),
+ allocation_counter_.NextBytes());
}
#if DEBUG
@@ -458,5 +441,28 @@ int MemoryChunk::FreeListsLength() {
return length;
}
+SpaceIterator::SpaceIterator(Heap* heap)
+ : heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
+
+SpaceIterator::~SpaceIterator() = default;
+
+bool SpaceIterator::HasNext() {
+ while (current_space_ <= LAST_MUTABLE_SPACE) {
+ Space* space = heap_->space(current_space_);
+ if (space) return true;
+ ++current_space_;
+ }
+
+ // No more spaces left.
+ return false;
+}
+
+Space* SpaceIterator::Next() {
+ DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
+ Space* space = heap_->space(current_space_++);
+ DCHECK_NOT_NULL(space);
+ return space;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 48b8c9fc41..7929d53fc9 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -16,7 +16,6 @@
#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
-#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk-layout.h"
@@ -37,9 +36,11 @@ class TestCodePageAllocatorScope;
class AllocationObserver;
class FreeList;
+class Heap;
class Isolate;
class LargeObjectSpace;
class LargePage;
+class ObjectIterator;
class Page;
class PagedSpaceBase;
class SemiSpace;
@@ -139,9 +140,9 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
virtual void RemoveAllocationObserver(AllocationObserver* observer);
- virtual void PauseAllocationObservers();
+ virtual void PauseAllocationObservers() {}
- virtual void ResumeAllocationObservers();
+ virtual void ResumeAllocationObservers() {}
// Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace).
@@ -314,7 +315,7 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories();
void ReleaseFreeListCategories();
- ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
+ ActiveSystemPages* active_system_pages() { return active_system_pages_; }
template <RememberedSetType remembered_set>
void ClearTypedSlotsInFreeMemory(const TypedSlotSet::FreeRangesMap& ranges) {
@@ -567,12 +568,7 @@ class SpaceWithLinearArea : public Space {
// allow proper observation based on existing observers. min_size specifies
// the minimum size that the limited area should have.
Address ComputeLimit(Address start, Address end, size_t min_size) const;
- V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
- size_t min_size) = 0;
-
- void DisableInlineAllocation();
- void EnableInlineAllocation();
- bool IsInlineAllocationEnabled() const { return allocation_info_.enabled(); }
+ V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit() = 0;
void PrintAllocationsOrigins() const;
@@ -650,6 +646,19 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
+class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
+ public:
+ explicit SpaceIterator(Heap* heap);
+ virtual ~SpaceIterator();
+
+ bool HasNext();
+ Space* Next();
+
+ private:
+ Heap* heap_;
+ int current_space_; // from enum AllocationSpace.
+};
+
// Iterates over all memory chunks in the heap (across all spaces).
class MemoryChunkIterator {
public:
diff --git a/deps/v8/src/heap/stress-marking-observer.cc b/deps/v8/src/heap/stress-marking-observer.cc
deleted file mode 100644
index 3183984d0d..0000000000
--- a/deps/v8/src/heap/stress-marking-observer.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/stress-marking-observer.h"
-#include "src/heap/incremental-marking.h"
-
-namespace v8 {
-namespace internal {
-
-// TODO(majeski): meaningful step_size
-StressMarkingObserver::StressMarkingObserver(Heap* heap)
- : AllocationObserver(64), heap_(heap) {}
-
-void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
- size_t size) {
- heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
deleted file mode 100644
index 5736ba9289..0000000000
--- a/deps/v8/src/heap/stress-marking-observer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_STRESS_MARKING_OBSERVER_H_
-#define V8_HEAP_STRESS_MARKING_OBSERVER_H_
-
-#include "src/heap/heap.h"
-
-namespace v8 {
-namespace internal {
-
-class StressMarkingObserver : public AllocationObserver {
- public:
- explicit StressMarkingObserver(Heap* heap);
-
- void Step(int bytes_allocated, Address soon_object, size_t size) override;
-
- private:
- Heap* heap_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_STRESS_MARKING_OBSERVER_H_
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
index 4c72416e8c..15407358d5 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.cc
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -33,7 +33,7 @@ void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
}
double current_percent =
- heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->TotalCapacity();
if (v8_flags.trace_stress_scavenge) {
heap_->isolate()->PrintWithTimestamp(
@@ -64,8 +64,9 @@ bool StressScavengeObserver::HasRequestedGC() const {
void StressScavengeObserver::RequestedGCDone() {
size_t new_space_size = heap_->new_space()->Size();
double current_percent =
- new_space_size ? new_space_size * 100.0 / heap_->new_space()->Capacity()
- : 0;
+ new_space_size
+ ? new_space_size * 100.0 / heap_->new_space()->TotalCapacity()
+ : 0;
limit_percentage_ = NextLimit(static_cast<int>(current_percent));
if (v8_flags.trace_stress_scavenge) {
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index c9c282fb15..84c4406607 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -4,9 +4,12 @@
#include "src/heap/sweeper.h"
+#include <algorithm>
+#include <atomic>
#include <memory>
#include <vector>
+#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
@@ -18,11 +21,16 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/marking-state.h"
+#include "src/heap/memory-allocator.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/pretenuring-handler-inl.h"
#include "src/heap/pretenuring-handler.h"
#include "src/heap/remembered-set.h"
+#include "src/heap/slot-set.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -31,36 +39,44 @@ namespace internal {
class Sweeper::ConcurrentSweeper final {
public:
explicit ConcurrentSweeper(Sweeper* sweeper)
- : sweeper_(sweeper),
- local_pretenuring_feedback_(
- PretenturingHandler::kInitialFeedbackCapacity) {}
+ : sweeper_(sweeper), local_sweeper_(sweeper_) {}
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate) {
DCHECK(IsValidSweepingSpace(identity));
while (!delegate->ShouldYield()) {
Page* page = sweeper_->GetSweepingPageSafe(identity);
if (page == nullptr) return true;
- sweeper_->ParallelSweepPage(page, identity, &local_pretenuring_feedback_,
- SweepingMode::kLazyOrConcurrent);
+ local_sweeper_.ParallelSweepPage(page, identity,
+ SweepingMode::kLazyOrConcurrent);
}
return false;
}
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback() {
- return &local_pretenuring_feedback_;
+ bool ConcurrentSweepForRememberedSet(JobDelegate* delegate) {
+ if (!sweeper_->should_iterate_promoted_pages_) {
+ local_sweeper_.CleanPromotedPages();
+ return true;
+ }
+ while (!delegate->ShouldYield()) {
+ MemoryChunk* chunk = sweeper_->GetPromotedPageForIterationSafe();
+ if (chunk == nullptr) return true;
+ local_sweeper_.ParallelIteratePromotedPageForRememberedSets(chunk);
+ }
+ return false;
}
+ void Finalize() { local_sweeper_.Finalize(); }
+
private:
Sweeper* const sweeper_;
- PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
+ LocalSweeper local_sweeper_;
};
class Sweeper::SweeperJob final : public JobTask {
public:
- SweeperJob(Isolate* isolate, Sweeper* sweeper,
- std::vector<ConcurrentSweeper>* concurrent_sweepers)
+ SweeperJob(Isolate* isolate, Sweeper* sweeper)
: sweeper_(sweeper),
- concurrent_sweepers_(concurrent_sweepers),
+ concurrent_sweepers_(sweeper_->concurrent_sweepers_),
tracer_(isolate->heap()->tracer()) {}
~SweeperJob() override = default;
@@ -77,7 +93,7 @@ class Sweeper::SweeperJob final : public JobTask {
size_t GetMaxConcurrency(size_t worker_count) const override {
const size_t kPagePerTask = 2;
return std::min<size_t>(
- concurrent_sweepers_->size(),
+ concurrent_sweepers_.size(),
worker_count +
(sweeper_->ConcurrentSweepingPageCount() + kPagePerTask - 1) /
kPagePerTask);
@@ -87,8 +103,30 @@ class Sweeper::SweeperJob final : public JobTask {
void RunImpl(JobDelegate* delegate, bool is_joining_thread) {
static_assert(NEW_SPACE == FIRST_SWEEPABLE_SPACE);
const int offset = delegate->GetTaskId();
- DCHECK_LT(offset, concurrent_sweepers_->size());
- ConcurrentSweeper& concurrent_sweeper = (*concurrent_sweepers_)[offset];
+ DCHECK_LT(offset, concurrent_sweepers_.size());
+ ConcurrentSweeper& concurrent_sweeper = concurrent_sweepers_[offset];
+ {
+ TRACE_GC_EPOCH(
+ tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread),
+ is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
+ // Prioritize sweeping new space pages first. Young allocation are the
+ // most prominent, so these pages are most likely to be needed soon.
+ if (!concurrent_sweeper.ConcurrentSweepSpace(NEW_SPACE, delegate)) return;
+ if (!sweeper_->should_sweep_non_new_spaces_) {
+ // If only new space needs to be swept, iterate promoted pages and
+ // return.
+ concurrent_sweeper.ConcurrentSweepForRememberedSet(delegate);
+ return;
+ }
+ }
+ // When non-new space also require sweeping, minor sweeping (i.e. iterating
+ // of promoted pages, which will be needed for the next minor GC) and major
+ // sweeping (i.e. of non-new spaces) is interleaved within the same sweeping
+ // tasks (to balance/mitigate further old allocations needing to sweep on
+ // the main thread and minor GCs needing to iterate pages during complete
+ // sweep). Each task starts sweeping from a different space (to reduce
+ // contention), and moves on to the next space once the current space is
+ // done, until all spaces are swept.
if (offset > 0) {
if (!SweepNonNewSpaces(concurrent_sweeper, delegate, is_joining_thread,
offset, kNumberOfSweepingSpaces))
@@ -98,7 +136,7 @@ class Sweeper::SweeperJob final : public JobTask {
TRACE_GC_EPOCH(
tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread),
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
- if (!concurrent_sweeper.ConcurrentSweepSpace(NEW_SPACE, delegate)) return;
+ if (!concurrent_sweeper.ConcurrentSweepForRememberedSet(delegate)) return;
}
if (!SweepNonNewSpaces(concurrent_sweeper, delegate, is_joining_thread, 1,
offset == 0 ? kNumberOfSweepingSpaces : offset))
@@ -108,7 +146,7 @@ class Sweeper::SweeperJob final : public JobTask {
bool SweepNonNewSpaces(ConcurrentSweeper& concurrent_sweeper,
JobDelegate* delegate, bool is_joining_thread,
int first_space_index, int last_space_index) {
- if (!sweeper_->should_sweep_non_new_spaces_) return true;
+ DCHECK(sweeper_->should_sweep_non_new_spaces_);
TRACE_GC_EPOCH(
tracer_, sweeper_->GetTracingScope(OLD_SPACE, is_joining_thread),
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
@@ -123,22 +161,145 @@ class Sweeper::SweeperJob final : public JobTask {
}
Sweeper* const sweeper_;
- std::vector<ConcurrentSweeper>* const concurrent_sweepers_;
+ std::vector<ConcurrentSweeper>& concurrent_sweepers_;
GCTracer* const tracer_;
};
+namespace {
+void AssertMainThreadOrSharedMainThread(Heap* heap) {
+ DCHECK(heap->IsMainThread() || (heap->IsSharedMainThread() &&
+ !heap->isolate()->is_shared_space_isolate()));
+}
+} // namespace
+
+void Sweeper::LocalSweeper::Finalize() {
+ AssertMainThreadOrSharedMainThread(sweeper_->heap_);
+ for (auto it : old_to_new_remembered_sets_) {
+ MemoryChunk* chunk = it.first;
+ RememberedSet<OLD_TO_NEW>::MergeAndDelete(chunk, it.second);
+ }
+ old_to_new_remembered_sets_.clear();
+}
+
+void Sweeper::LocalSweeper::ContributeAndWaitForPromotedPagesIteration() {
+ if (!sweeper_->sweeping_in_progress()) return;
+ if (!sweeper_->IsIteratingPromotedPages()) return;
+ ParallelIteratePromotedPagesForRememberedSets();
+ base::MutexGuard guard(
+ &sweeper_->promoted_pages_iteration_notification_mutex_);
+ // Check again that iteration is not yet finished.
+ if (!sweeper_->IsIteratingPromotedPages()) return;
+ sweeper_->promoted_pages_iteration_notification_variable_.Wait(
+ &sweeper_->promoted_pages_iteration_notification_mutex_);
+}
+
+int Sweeper::LocalSweeper::ParallelSweepSpace(AllocationSpace identity,
+ SweepingMode sweeping_mode,
+ int required_freed_bytes,
+ int max_pages) {
+ int max_freed = 0;
+ int pages_freed = 0;
+ Page* page = nullptr;
+ while ((page = sweeper_->GetSweepingPageSafe(identity)) != nullptr) {
+ int freed = ParallelSweepPage(page, identity, sweeping_mode);
+ ++pages_freed;
+ if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ // Free list of a never-allocate page will be dropped later on.
+ continue;
+ }
+ DCHECK_GE(freed, 0);
+ max_freed = std::max(max_freed, freed);
+ if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
+ return max_freed;
+ if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
+ }
+ return max_freed;
+}
+
+int Sweeper::LocalSweeper::ParallelSweepPage(Page* page,
+ AllocationSpace identity,
+ SweepingMode sweeping_mode) {
+ DCHECK(IsValidSweepingSpace(identity));
+
+ // The Scavenger may add already swept pages back.
+ if (page->SweepingDone()) return 0;
+
+ int max_freed = 0;
+ {
+ base::MutexGuard guard(page->mutex());
+ DCHECK(!page->SweepingDone());
+ // If the page is a code page, the CodePageMemoryModificationScope changes
+ // the page protection mode from rx -> rw while sweeping.
+ CodePageMemoryModificationScope code_page_scope(page);
+
+ DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
+ page->concurrent_sweeping_state());
+ page->set_concurrent_sweeping_state(
+ Page::ConcurrentSweepingState::kInProgress);
+ const FreeSpaceTreatmentMode free_space_treatment_mode =
+ Heap::ShouldZapGarbage() ? FreeSpaceTreatmentMode::kZapFreeSpace
+ : FreeSpaceTreatmentMode::kIgnoreFreeSpace;
+ max_freed =
+ sweeper_->RawSweep(page, free_space_treatment_mode, sweeping_mode);
+ DCHECK(page->SweepingDone());
+ }
+
+ sweeper_->AddSweptPage(page, identity);
+
+ return max_freed;
+}
+
+void Sweeper::LocalSweeper::ParallelIteratePromotedPagesForRememberedSets() {
+ if (sweeper_->should_iterate_promoted_pages_) {
+ MemoryChunk* chunk = nullptr;
+ while ((chunk = sweeper_->GetPromotedPageForIterationSafe()) != nullptr) {
+ ParallelIteratePromotedPageForRememberedSets(chunk);
+ }
+ } else {
+ CleanPromotedPages();
+ }
+}
+
+void Sweeper::LocalSweeper::ParallelIteratePromotedPageForRememberedSets(
+ MemoryChunk* chunk) {
+ DCHECK_NOT_NULL(chunk);
+ base::MutexGuard guard(chunk->mutex());
+ DCHECK(!chunk->SweepingDone());
+ DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
+ chunk->concurrent_sweeping_state());
+ chunk->set_concurrent_sweeping_state(
+ Page::ConcurrentSweepingState::kInProgress);
+ DCHECK(sweeper_->should_iterate_promoted_pages_);
+ sweeper_->RawIteratePromotedPageForRememberedSets(
+ chunk, &old_to_new_remembered_sets_);
+ DCHECK(chunk->SweepingDone());
+ sweeper_->IncrementAndNotifyPromotedPagesIterationFinishedIfNeeded();
+}
+
+void Sweeper::LocalSweeper::CleanPromotedPages() {
+ DCHECK(!sweeper_->should_iterate_promoted_pages_);
+ std::vector<MemoryChunk*> promoted_pages =
+ sweeper_->GetAllPromotedPagesForIterationSafe();
+ if (promoted_pages.empty()) return;
+ for (MemoryChunk* chunk : promoted_pages) {
+ sweeper_->marking_state_->ClearLiveness(chunk);
+ chunk->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
+ }
+ DCHECK_EQ(0u, sweeper_->iterated_promoted_pages_count_);
+ sweeper_->iterated_promoted_pages_count_ = promoted_pages.size();
+ sweeper_->NotifyPromotedPagesIterationFinished();
+}
+
Sweeper::Sweeper(Heap* heap)
: heap_(heap),
marking_state_(heap_->non_atomic_marking_state()),
sweeping_in_progress_(false),
should_reduce_memory_(false),
- pretenuring_handler_(heap_->pretenuring_handler()),
- local_pretenuring_feedback_(
- PretenturingHandler::kInitialFeedbackCapacity) {}
+ main_thread_local_sweeper_(this) {}
Sweeper::~Sweeper() {
DCHECK(concurrent_sweepers_.empty());
- DCHECK(local_pretenuring_feedback_.empty());
+ DCHECK(main_thread_local_sweeper_.IsEmpty());
}
Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
@@ -183,8 +344,28 @@ void Sweeper::TearDown() {
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
}
+void Sweeper::SnapshotPageSets() {
+ DCHECK(heap_->IsMainThread());
+ // No mutex needed for the main thread.
+ std::tie(snapshot_normal_pages_set_, snapshot_large_pages_set_) =
+ heap_->memory_allocator()->SnapshotPageSetsUnsafe();
+ if (heap_->isolate()->has_shared_space()) {
+ Heap* shared_heap = heap_->isolate()->shared_space_isolate()->heap();
+ if (shared_heap == heap_) {
+ // Current heap is the shared heap, thus all relevant pages have already
+ // been snapshotted and no lock is required.
+ snapshot_shared_normal_pages_set_ = snapshot_normal_pages_set_;
+ snapshot_shared_large_pages_set_ = snapshot_large_pages_set_;
+ } else {
+ std::tie(snapshot_shared_normal_pages_set_,
+ snapshot_shared_large_pages_set_) =
+ shared_heap->memory_allocator()->SnapshotPageSetsSafe();
+ }
+ }
+}
+
void Sweeper::StartSweeping(GarbageCollector collector) {
- DCHECK(local_pretenuring_feedback_.empty());
+ DCHECK(main_thread_local_sweeper_.IsEmpty());
sweeping_in_progress_ = true;
if (collector == GarbageCollector::MARK_COMPACTOR)
should_sweep_non_new_spaces_ = true;
@@ -213,9 +394,42 @@ int Sweeper::NumberOfConcurrentSweepers() const {
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
}
+namespace {
+bool ShouldUpdateRememberedSets(Heap* heap) {
+ DCHECK_EQ(0, heap->new_lo_space()->Size());
+ if (heap->new_space()->Size() > 0) {
+ // Keep track of OLD_TO_NEW slots
+ return true;
+ }
+ if (heap->isolate()->has_shared_space()) {
+ // Keep track of OLD_TO_SHARED slots
+ return true;
+ }
+ if (heap->ShouldZapGarbage()) {
+ return true;
+ }
+ return false;
+}
+} // namespace
+
void Sweeper::StartSweeperTasks() {
DCHECK(current_new_space_collector_.has_value());
DCHECK(!job_handle_ || !job_handle_->IsValid());
+ if (promoted_pages_for_iteration_count_ > 0) {
+ DCHECK(v8_flags.minor_mc);
+ DCHECK_EQ(GarbageCollector::MINOR_MARK_COMPACTOR,
+ current_new_space_collector_);
+
+ DCHECK(snapshot_normal_pages_set_.empty());
+ DCHECK(snapshot_large_pages_set_.empty());
+
+ DCHECK(!promoted_page_iteration_in_progress_);
+ should_iterate_promoted_pages_ = ShouldUpdateRememberedSets(heap_);
+ if (should_iterate_promoted_pages_) {
+ SnapshotPageSets();
+ }
+ promoted_page_iteration_in_progress_.store(true, std::memory_order_release);
+ }
if (v8_flags.concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
if (concurrent_sweepers_.empty()) {
@@ -226,83 +440,151 @@ void Sweeper::StartSweeperTasks() {
DCHECK_EQ(NumberOfConcurrentSweepers(), concurrent_sweepers_.size());
job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible,
- std::make_unique<SweeperJob>(heap_->isolate(), this,
- &concurrent_sweepers_));
+ std::make_unique<SweeperJob>(heap_->isolate(), this));
}
}
Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
base::MutexGuard guard(&mutex_);
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
+ Page* page = nullptr;
if (!list.empty()) {
- auto last_page = list.back();
+ page = list.back();
list.pop_back();
- return last_page;
}
- return nullptr;
+ if (list.empty()) {
+ has_swept_pages_[GetSweepSpaceIndex(space->identity())].store(
+ false, std::memory_order_release);
+ }
+ return page;
+}
+
+Sweeper::SweptList Sweeper::GetAllSweptPagesSafe(PagedSpaceBase* space) {
+ base::MutexGuard guard(&mutex_);
+ SweptList list;
+ list.swap(swept_list_[GetSweepSpaceIndex(space->identity())]);
+ has_swept_pages_[GetSweepSpaceIndex(space->identity())].store(
+ false, std::memory_order_release);
+ return list;
+}
+
+void Sweeper::FinalizeLocalSweepers() {
+ DCHECK_EQ(promoted_pages_for_iteration_count_,
+ iterated_promoted_pages_count_);
+ main_thread_local_sweeper_.Finalize();
+
+ for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
+ concurrent_sweeper.Finalize();
+ }
}
void Sweeper::EnsureCompleted() {
+ AssertMainThreadOrSharedMainThread(heap_);
+
if (!sweeping_in_progress_) return;
// If sweeping is not completed or not running at all, we try to complete it
// here.
- ForAllSweepingSpaces([this](AllocationSpace space) {
- ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
- });
+ if (should_sweep_non_new_spaces_) {
+ TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (space == NEW_SPACE) return;
+ main_thread_local_sweeper_.ParallelSweepSpace(
+ space, SweepingMode::kLazyOrConcurrent, 0);
+ });
+ }
+ TRACE_GC_EPOCH(heap_->tracer(), GetTracingScopeForCompleteYoungSweep(),
+ ThreadKind::kMain);
+ main_thread_local_sweeper_.ParallelSweepSpace(
+ NEW_SPACE, SweepingMode::kLazyOrConcurrent, 0);
+ // Array buffer sweeper may have grabbed a page for iteration to contribute.
+ // Wait until it has finished iterating.
+ main_thread_local_sweeper_.ContributeAndWaitForPromotedPagesIteration();
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
ForAllSweepingSpaces([this](AllocationSpace space) {
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
+ DCHECK(IsSweepingDoneForSpace(space));
});
- pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
- local_pretenuring_feedback_);
- local_pretenuring_feedback_.clear();
- for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
- pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
- *concurrent_sweeper.local_pretenuring_feedback());
- // No need to clear the concurrent feedback map since the concurrent sweeper
- // goes away.
- }
+ FinalizeLocalSweepers();
+ DCHECK(main_thread_local_sweeper_.IsEmpty());
+
concurrent_sweepers_.clear();
current_new_space_collector_.reset();
should_sweep_non_new_spaces_ = false;
+ {
+ base::MutexGuard guard(&promoted_pages_iteration_notification_mutex_);
+ DCHECK_EQ(promoted_pages_for_iteration_count_,
+ iterated_promoted_pages_count_);
+ base::AsAtomicPtr(&promoted_pages_for_iteration_count_)
+ ->store(0, std::memory_order_relaxed);
+ iterated_promoted_pages_count_ = 0;
+ }
+ snapshot_large_pages_set_.clear();
+ snapshot_normal_pages_set_.clear();
sweeping_in_progress_ = false;
}
void Sweeper::PauseAndEnsureNewSpaceCompleted() {
+ AssertMainThreadOrSharedMainThread(heap_);
+
if (!sweeping_in_progress_) return;
- ParallelSweepSpace(NEW_SPACE, SweepingMode::kLazyOrConcurrent, 0);
+ main_thread_local_sweeper_.ParallelSweepSpace(
+ NEW_SPACE, SweepingMode::kLazyOrConcurrent, 0);
+ // Array buffer sweeper may have grabbed a page for iteration to contribute.
+ // Wait until it has finished iterating.
+ main_thread_local_sweeper_.ContributeAndWaitForPromotedPagesIteration();
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
CHECK(sweeping_list_[GetSweepSpaceIndex(NEW_SPACE)].empty());
+ DCHECK(IsSweepingDoneForSpace(NEW_SPACE));
+ CHECK(sweeping_list_for_promoted_page_iteration_.empty());
- pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
- local_pretenuring_feedback_);
- local_pretenuring_feedback_.clear();
- for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
- pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
- *concurrent_sweeper.local_pretenuring_feedback());
- concurrent_sweeper.local_pretenuring_feedback()->clear();
- }
+ FinalizeLocalSweepers();
+ DCHECK(main_thread_local_sweeper_.IsEmpty());
current_new_space_collector_.reset();
+ DCHECK_EQ(promoted_pages_for_iteration_count_,
+ iterated_promoted_pages_count_);
+ base::AsAtomicPtr(&promoted_pages_for_iteration_count_)
+ ->store(0, std::memory_order_relaxed);
+ iterated_promoted_pages_count_ = 0;
+ snapshot_large_pages_set_.clear();
+ snapshot_normal_pages_set_.clear();
}
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
if (!sweeping_in_progress_) return;
- ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
+ main_thread_local_sweeper_.ParallelSweepSpace(
+ space, SweepingMode::kLazyOrConcurrent, 0);
}
bool Sweeper::AreSweeperTasksRunning() {
return job_handle_ && job_handle_->IsValid() && job_handle_->IsActive();
}
+namespace {
+// Atomically zap the specified area.
+V8_INLINE void AtomicZapBlock(Address addr, size_t size_in_bytes) {
+ static_assert(sizeof(Tagged_t) == kTaggedSize);
+ static constexpr Tagged_t kZapTagged = static_cast<Tagged_t>(kZapValue);
+ DCHECK(IsAligned(addr, kTaggedSize));
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
+ const size_t size_in_tagged = size_in_bytes / kTaggedSize;
+ Tagged_t* current_addr = reinterpret_cast<Tagged_t*>(addr);
+ for (size_t i = 0; i < size_in_tagged; ++i) {
+ base::AsAtomicPtr(current_addr++)
+ ->store(kZapTagged, std::memory_order_relaxed);
+ }
+}
+} // namespace
+
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
FreeSpaceTreatmentMode free_space_treatment_mode) {
@@ -310,7 +592,7 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
size_t freed_bytes = 0;
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_treatment_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
- ZapCode(free_start, size);
+ AtomicZapBlock(free_start, size);
}
page->heap()->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size));
freed_bytes = reinterpret_cast<PagedSpaceBase*>(space)->UnaccountedFree(
@@ -324,6 +606,7 @@ V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page, bool record_free_ranges,
TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* invalidated_old_to_new_cleanup,
+ InvalidatedSlotsCleanup* invalidated_old_to_old_cleanup,
InvalidatedSlotsCleanup* invalidated_old_to_shared_cleanup) {
DCHECK_LE(free_start, free_end);
if (sweeping_mode == SweepingMode::kEagerDuringGC) {
@@ -355,6 +638,7 @@ V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
}
invalidated_old_to_new_cleanup->Free(free_start, free_end);
+ invalidated_old_to_old_cleanup->Free(free_start, free_end);
invalidated_old_to_shared_cleanup->Free(free_start, free_end);
}
@@ -391,10 +675,8 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(Page* page,
DCHECK_EQ(live_bytes, page->allocated_bytes());
}
-int Sweeper::RawSweep(
- Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
- SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback) {
+int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
+ SweepingMode sweeping_mode) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
@@ -441,20 +723,19 @@ int Sweeper::RawSweep(
// in free memory.
InvalidatedSlotsCleanup invalidated_old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
+ InvalidatedSlotsCleanup invalidated_old_to_old_cleanup =
+ InvalidatedSlotsCleanup::NoCleanup(p);
InvalidatedSlotsCleanup invalidated_old_to_shared_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
if (sweeping_mode == SweepingMode::kEagerDuringGC) {
invalidated_old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
+ invalidated_old_to_old_cleanup = InvalidatedSlotsCleanup::OldToOld(p);
invalidated_old_to_shared_cleanup = InvalidatedSlotsCleanup::OldToShared(p);
}
// The free ranges map is used for filtering typed slots.
TypedSlotSet::FreeRangesMap free_ranges_map;
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- p->object_start_bitmap()->Clear();
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
// Iterate over the page using the live objects and free the memory before
// the given live object.
Address free_start = p->area_start();
@@ -463,7 +744,7 @@ int Sweeper::RawSweep(
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject const object = object_and_size.first;
if (code_object_registry) code_objects.push_back(object.address());
- DCHECK(marking_state_->IsBlack(object));
+ DCHECK(marking_state_->IsMarked(object));
Address free_end = object.address();
if (free_end != free_start) {
max_freed_bytes =
@@ -473,7 +754,7 @@ int Sweeper::RawSweep(
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, record_free_ranges, &free_ranges_map,
sweeping_mode, &invalidated_old_to_new_cleanup,
- &invalidated_old_to_shared_cleanup);
+ &invalidated_old_to_old_cleanup, &invalidated_old_to_shared_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
DCHECK(MarkCompactCollector::IsMapOrForwarded(map));
@@ -481,20 +762,11 @@ int Sweeper::RawSweep(
live_bytes += size;
free_start = free_end + size;
- if (p->InYoungGeneration()) {
- pretenuring_handler_->UpdateAllocationSite(map, object,
- local_pretenuring_feedback);
- }
-
if (active_system_pages_after_sweeping) {
active_system_pages_after_sweeping->Add(
free_end - p->address(), free_start - p->address(),
MemoryAllocator::GetCommitPageSizeBits());
}
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- p->object_start_bitmap()->SetBit(object.address());
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
}
// If there is free memory after the last live object also free that.
@@ -507,7 +779,7 @@ int Sweeper::RawSweep(
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, record_free_ranges, &free_ranges_map,
sweeping_mode, &invalidated_old_to_new_cleanup,
- &invalidated_old_to_shared_cleanup);
+ &invalidated_old_to_old_cleanup, &invalidated_old_to_shared_cleanup);
}
// Phase 3: Post process the page.
@@ -521,92 +793,273 @@ int Sweeper::RawSweep(
*active_system_pages_after_sweeping);
}
- if (code_object_registry)
+ if (code_object_registry) {
code_object_registry->ReinitializeFrom(std::move(code_objects));
+ }
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
return static_cast<int>(
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
-size_t Sweeper::ConcurrentSweepingPageCount() {
- base::MutexGuard guard(&mutex_);
- size_t count = 0;
- for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
- count += sweeping_list_[i].size();
+namespace {
+
+class PromotedPageRecordMigratedSlotVisitor
+ : public ObjectVisitorWithCageBases {
+ public:
+ PromotedPageRecordMigratedSlotVisitor(
+ Heap* heap,
+ Sweeper::CachedOldToNewRememberedSets*
+ snapshot_old_to_new_remembered_sets,
+ const MemoryAllocator::NormalPagesSet& snapshot_normal_pages_set,
+ const MemoryAllocator::LargePagesSet& snapshot_large_pages_set,
+ const MemoryAllocator::NormalPagesSet& snapshot_shared_normal_pages_set,
+ const MemoryAllocator::LargePagesSet& snapshot_shared_large_pages_set)
+ : ObjectVisitorWithCageBases(heap->isolate()),
+ heap_(heap),
+ snapshot_old_to_new_remembered_sets_(
+ snapshot_old_to_new_remembered_sets),
+ snapshot_normal_pages_set_(snapshot_normal_pages_set),
+ snapshot_large_pages_set_(snapshot_large_pages_set),
+ snapshot_shared_normal_pages_set_(snapshot_shared_normal_pages_set),
+ snapshot_shared_large_pages_set_(snapshot_shared_large_pages_set) {}
+
+ inline void VisitPointer(HeapObject host, ObjectSlot p) final {
+ DCHECK(!HasWeakHeapObjectTag(p.Relaxed_Load(cage_base())));
+ RecordMigratedSlot(host,
+ MaybeObject::FromObject(p.Relaxed_Load(cage_base())),
+ p.address());
}
- return count;
-}
-int Sweeper::ParallelSweepSpace(AllocationSpace identity,
- SweepingMode sweeping_mode,
- int required_freed_bytes, int max_pages) {
- int max_freed = 0;
- int pages_freed = 0;
- Page* page = nullptr;
- while ((page = GetSweepingPageSafe(identity)) != nullptr) {
- int freed = ParallelSweepPage(page, identity, &local_pretenuring_feedback_,
- sweeping_mode);
- ++pages_freed;
- if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- // Free list of a never-allocate page will be dropped later on.
- continue;
+ inline void VisitMapPointer(HeapObject host) final {
+ VisitPointer(host, host.map_slot());
+ }
+
+ inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
+ RecordMigratedSlot(host, p.Relaxed_Load(cage_base()), p.address());
+ }
+
+ inline void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
+ while (start < end) {
+ VisitPointer(host, start);
+ ++start;
}
- DCHECK_GE(freed, 0);
- max_freed = std::max(max_freed, freed);
- if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
- return max_freed;
- if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
}
- return max_freed;
+
+ inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ while (start < end) {
+ VisitPointer(host, start);
+ ++start;
+ }
+ }
+
+ inline void VisitCodePointer(Code host, CodeObjectSlot slot) final {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // This code is similar to the implementation of VisitPointer() modulo
+ // new kind of slot.
+ DCHECK(!HasWeakHeapObjectTag(slot.Relaxed_Load(code_cage_base())));
+ Object code = slot.Relaxed_Load(code_cage_base());
+ RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
+ }
+
+ inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
+ ObjectSlot value) override {
+ DCHECK(host.IsEphemeronHashTable());
+ DCHECK(!Heap::InYoungGeneration(host));
+
+ VisitPointer(host, value);
+ VisitPointer(host, key);
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(RelocInfo* rinfo) final { UNREACHABLE(); }
+
+ // Entries that are skipped for recording.
+ inline void VisitExternalReference(RelocInfo* rinfo) final {}
+ inline void VisitInternalReference(RelocInfo* rinfo) final {}
+ inline void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
+ ExternalPointerTag tag) final {}
+
+ protected:
+ inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
+ Address slot) {
+ DCHECK(!host.InWritableSharedSpace());
+ DCHECK(!Heap::InYoungGeneration(host));
+ DCHECK(!MemoryChunk::FromHeapObject(host)->SweepingDone());
+ if (value->IsStrongOrWeak()) {
+ RecordOldToNewMigratedSlot(host, value, slot);
+ RecordOldToSharedMigratedSlot(host, value, slot);
+ }
+ }
+
+ inline void RecordOldToNewMigratedSlot(HeapObject host, MaybeObject value,
+ Address slot) {
+ const MemoryChunk* value_chunk =
+ MemoryAllocator::LookupChunkContainingAddress(
+ snapshot_normal_pages_set_, snapshot_large_pages_set_, value.ptr());
+ if (!value_chunk) return;
+#ifdef THREAD_SANITIZER
+ value_chunk->SynchronizedHeapLoad();
+#endif // THREAD_SANITIZER
+ if (!value_chunk->InYoungGeneration()) return;
+ MemoryChunk* host_chunk = MemoryChunk::FromHeapObject(host);
+ if (snapshot_old_to_new_remembered_sets_->find(host_chunk) ==
+ snapshot_old_to_new_remembered_sets_->end()) {
+ snapshot_old_to_new_remembered_sets_->emplace(
+ host_chunk, SlotSet::Allocate(host_chunk->buckets()));
+ }
+ RememberedSetOperations::Insert<AccessMode::NON_ATOMIC>(
+ (*snapshot_old_to_new_remembered_sets_)[host_chunk], host_chunk, slot);
+ }
+
+ inline void RecordOldToSharedMigratedSlot(HeapObject host, MaybeObject value,
+ Address slot) {
+ const MemoryChunk* value_chunk =
+ MemoryAllocator::LookupChunkContainingAddress(
+ snapshot_shared_normal_pages_set_, snapshot_shared_large_pages_set_,
+ value.ptr());
+ if (!value_chunk) return;
+#ifdef THREAD_SANITIZER
+ value_chunk->SynchronizedHeapLoad();
+#endif // THREAD_SANITIZER
+ if (!value_chunk->InWritableSharedSpace()) return;
+ RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot);
+ }
+
+ Heap* const heap_;
+ Sweeper::CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets_;
+ const MemoryAllocator::NormalPagesSet& snapshot_normal_pages_set_;
+ const MemoryAllocator::LargePagesSet& snapshot_large_pages_set_;
+ const MemoryAllocator::NormalPagesSet& snapshot_shared_normal_pages_set_;
+ const MemoryAllocator::LargePagesSet& snapshot_shared_large_pages_set_;
+};
+
+inline void HandlePromotedObject(
+ HeapObject object, NonAtomicMarkingState* marking_state,
+ PtrComprCageBase cage_base,
+ PromotedPageRecordMigratedSlotVisitor* record_visitor) {
+ DCHECK(marking_state->IsMarked(object));
+ DCHECK(!IsCodeSpaceObject(object));
+ object.IterateFast(cage_base, record_visitor);
+ if (object.IsJSArrayBuffer()) {
+ JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
+ }
}
-int Sweeper::ParallelSweepPage(
- Page* page, AllocationSpace identity,
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
- SweepingMode sweeping_mode) {
- DCHECK(IsValidSweepingSpace(identity));
+inline void HandleFreeSpace(Address free_start, Address free_end, Heap* heap) {
+ if (!heap->ShouldZapGarbage()) return;
+ if (free_end == free_start) return;
+ size_t size = static_cast<size_t>(free_end - free_start);
+ DCHECK(
+ heap->non_atomic_marking_state()
+ ->bitmap(Page::FromAddress(free_start))
+ ->AllBitsClearInRange(
+ Page::FromAddress(free_start)->AddressToMarkbitIndex(free_start),
+ Page::FromAddress(free_start)->AddressToMarkbitIndex(free_end)));
+ AtomicZapBlock(free_start, size);
+ heap->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size));
+}
- // The Scavenger may add already swept pages back.
- if (page->SweepingDone()) return 0;
+} // namespace
- int max_freed = 0;
- {
- base::MutexGuard guard(page->mutex());
- DCHECK(!page->SweepingDone());
- // If the page is a code page, the CodePageMemoryModificationScope changes
- // the page protection mode from rx -> rw while sweeping.
- CodePageMemoryModificationScope code_page_scope(page);
+void Sweeper::RawIteratePromotedPageForRememberedSets(
+ MemoryChunk* chunk,
+ CachedOldToNewRememberedSets* old_to_new_remembered_sets) {
+ DCHECK(v8_flags.minor_mc);
+ DCHECK(chunk->owner_identity() == OLD_SPACE ||
+ chunk->owner_identity() == LO_SPACE);
+ DCHECK(!chunk->SweepingDone());
+ DCHECK(!chunk->IsEvacuationCandidate());
- DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
- page->concurrent_sweeping_state());
- page->set_concurrent_sweeping_state(
- Page::ConcurrentSweepingState::kInProgress);
- const FreeSpaceTreatmentMode free_space_treatment_mode =
- Heap::ShouldZapGarbage() ? FreeSpaceTreatmentMode::kZapFreeSpace
- : FreeSpaceTreatmentMode::kIgnoreFreeSpace;
- max_freed = RawSweep(page, free_space_treatment_mode, sweeping_mode, guard,
- local_pretenuring_feedback);
- DCHECK(page->SweepingDone());
+ // Iterate over the page using the live objects and free the memory before
+ // the given live object.
+ PtrComprCageBase cage_base(heap_->isolate());
+ PromotedPageRecordMigratedSlotVisitor record_visitor(
+ heap_, old_to_new_remembered_sets, snapshot_normal_pages_set_,
+ snapshot_large_pages_set_, snapshot_shared_normal_pages_set_,
+ snapshot_shared_large_pages_set_);
+ DCHECK(!heap_->incremental_marking()->IsMarking());
+ if (chunk->IsLargePage()) {
+ HandlePromotedObject(static_cast<LargePage*>(chunk)->GetObject(),
+ marking_state_, cage_base, &record_visitor);
+ } else {
+ PtrComprCageBase cage_base(chunk->heap()->isolate());
+ Address free_start = chunk->area_start();
+ for (auto object_and_size :
+ LiveObjectRange<kBlackObjects>(chunk, marking_state_->bitmap(chunk))) {
+ HeapObject object = object_and_size.first;
+ HandlePromotedObject(object, marking_state_, cage_base, &record_visitor);
+ Address free_end = object.address();
+ HandleFreeSpace(free_start, free_end, heap_);
+ Map map = object.map(cage_base, kAcquireLoad);
+ int size = object.SizeFromMap(map);
+ free_start = free_end + size;
+ }
+ HandleFreeSpace(free_start, chunk->area_end(), heap_);
}
+ marking_state_->ClearLiveness(chunk);
+ chunk->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
+}
- {
- base::MutexGuard guard(&mutex_);
- swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
- cv_page_swept_.NotifyAll();
+bool Sweeper::IsIteratingPromotedPages() const {
+ return promoted_page_iteration_in_progress_.load(std::memory_order_acquire);
+}
+
+void Sweeper::ContributeAndWaitForPromotedPagesIteration() {
+ main_thread_local_sweeper_.ContributeAndWaitForPromotedPagesIteration();
+}
+
+void Sweeper::IncrementAndNotifyPromotedPagesIterationFinishedIfNeeded() {
+ if (++iterated_promoted_pages_count_ < promoted_pages_for_iteration_count_)
+ return;
+ NotifyPromotedPagesIterationFinished();
+}
+
+void Sweeper::NotifyPromotedPagesIterationFinished() {
+ DCHECK_EQ(iterated_promoted_pages_count_,
+ promoted_pages_for_iteration_count_);
+ base::MutexGuard guard(&promoted_pages_iteration_notification_mutex_);
+ promoted_page_iteration_in_progress_.store(false, std::memory_order_release);
+ promoted_pages_iteration_notification_variable_.NotifyAll();
+}
+
+size_t Sweeper::ConcurrentSweepingPageCount() {
+ DCHECK(sweeping_in_progress());
+ base::MutexGuard guard(&mutex_);
+ base::MutexGuard promoted_pages_guard(&promoted_pages_iteration_mutex_);
+ size_t promoted_pages_count =
+ sweeping_list_for_promoted_page_iteration_.size();
+ size_t count = should_iterate_promoted_pages_
+ ? promoted_pages_count
+ : std::min(static_cast<size_t>(1), promoted_pages_count);
+ for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
+ count += sweeping_list_[i].size();
}
- return max_freed;
+ return count;
+}
+
+int Sweeper::ParallelSweepSpace(AllocationSpace identity,
+ SweepingMode sweeping_mode,
+ int required_freed_bytes, int max_pages) {
+ DCHECK_IMPLIES(identity == NEW_SPACE, heap_->IsMainThread());
+ return main_thread_local_sweeper_.ParallelSweepSpace(
+ identity, sweeping_mode, required_freed_bytes, max_pages);
}
void Sweeper::EnsurePageIsSwept(Page* page) {
+ AssertMainThreadOrSharedMainThread(heap_);
if (!sweeping_in_progress() || page->SweepingDone()) return;
AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept.
- ParallelSweepPage(page, space, &local_pretenuring_feedback_,
- SweepingMode::kLazyOrConcurrent);
+ main_thread_local_sweeper_.ParallelSweepPage(
+ page, space, SweepingMode::kLazyOrConcurrent);
} else {
// Some sweeper task already took ownership of that page, wait until
// sweeping is finished.
@@ -631,27 +1084,66 @@ bool Sweeper::TryRemoveSweepingPageSafe(AllocationSpace space, Page* page) {
std::find(sweeping_list.begin(), sweeping_list.end(), page);
if (position == sweeping_list.end()) return false;
sweeping_list.erase(position);
+ if (sweeping_list.empty()) {
+ has_sweeping_work_[GetSweepSpaceIndex(space)].store(
+ false, std::memory_order_release);
+ }
return true;
}
void Sweeper::AddPage(AllocationSpace space, Page* page,
- Sweeper::AddPageMode mode) {
+ Sweeper::AddPageMode mode, AccessMode mutex_mode) {
DCHECK_NE(NEW_SPACE, space);
- AddPageImpl(space, page, mode);
+ AddPageImpl(space, page, mode, mutex_mode);
}
-void Sweeper::AddNewSpacePage(Page* page) {
+void Sweeper::AddNewSpacePage(Page* page, AccessMode mutex_mode) {
DCHECK_EQ(NEW_SPACE, page->owner_identity());
size_t live_bytes = marking_state_->live_bytes(page);
heap_->IncrementNewSpaceSurvivingObjectSize(live_bytes);
heap_->IncrementYoungSurvivorsCounter(live_bytes);
page->ClearWasUsedForAllocation();
- AddPageImpl(NEW_SPACE, page, AddPageMode::REGULAR);
+ AddPageImpl(NEW_SPACE, page, AddPageMode::REGULAR, mutex_mode);
+}
+
+void Sweeper::AddPromotedPageForIteration(MemoryChunk* chunk) {
+ DCHECK(heap_->IsMainThread());
+ DCHECK(chunk->owner_identity() == OLD_SPACE ||
+ chunk->owner_identity() == LO_SPACE);
+ DCHECK_IMPLIES(v8_flags.concurrent_sweeping,
+ !job_handle_ || !job_handle_->IsValid());
+ size_t live_bytes = marking_state_->live_bytes(chunk);
+ DCHECK_GE(chunk->area_size(), live_bytes);
+ heap_->IncrementPromotedObjectsSize(live_bytes);
+ heap_->IncrementYoungSurvivorsCounter(live_bytes);
+#if DEBUG
+ if (!chunk->IsLargePage()) {
+ static_cast<Page*>(chunk)->ForAllFreeListCategories(
+ [chunk](FreeListCategory* category) {
+ DCHECK(!category->is_linked(chunk->owner()->free_list()));
+ });
+ }
+#endif // DEBUG
+ DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
+ chunk->concurrent_sweeping_state());
+ chunk->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
+ // This method is called only from the main thread while sweeping tasks have
+ // not yet started, thus a mutex is not needed.
+ sweeping_list_for_promoted_page_iteration_.push_back(chunk);
+ promoted_pages_for_iteration_count_++;
}
void Sweeper::AddPageImpl(AllocationSpace space, Page* page,
- Sweeper::AddPageMode mode) {
- base::MutexGuard guard(&mutex_);
+ Sweeper::AddPageMode mode, AccessMode mutex_mode) {
+ base::Optional<base::MutexGuard> guard;
+ if (mutex_mode == AccessMode::ATOMIC) {
+ guard.emplace(&mutex_);
+ } else {
+ // This assert only checks that the non_atomic version is only used on the
+ // main thread. It would not catch cases where main thread add a page
+ // non-atomically while concurrent jobs are adding pages atomically.
+ AssertMainThreadOrSharedMainThread(heap_);
+ }
DCHECK(IsValidSweepingSpace(space));
DCHECK_IMPLIES(v8_flags.concurrent_sweeping,
!job_handle_ || !job_handle_->IsValid());
@@ -665,6 +1157,8 @@ void Sweeper::AddPageImpl(AllocationSpace space, Page* page,
DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
page->concurrent_sweeping_state());
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
+ has_sweeping_work_[GetSweepSpaceIndex(space)].store(
+ true, std::memory_order_release);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
@@ -693,13 +1187,36 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
int space_index = GetSweepSpaceIndex(space);
Page* page = nullptr;
- if (!sweeping_list_[space_index].empty()) {
- page = sweeping_list_[space_index].back();
- sweeping_list_[space_index].pop_back();
+ SweepingList& sweeping_list = sweeping_list_[space_index];
+ if (!sweeping_list.empty()) {
+ page = sweeping_list.back();
+ sweeping_list.pop_back();
+ }
+ if (sweeping_list.empty()) {
+ has_sweeping_work_[GetSweepSpaceIndex(space)].store(
+ false, std::memory_order_release);
}
return page;
}
+MemoryChunk* Sweeper::GetPromotedPageForIterationSafe() {
+ base::MutexGuard guard(&promoted_pages_iteration_mutex_);
+ MemoryChunk* chunk = nullptr;
+ if (!sweeping_list_for_promoted_page_iteration_.empty()) {
+ chunk = sweeping_list_for_promoted_page_iteration_.back();
+ sweeping_list_for_promoted_page_iteration_.pop_back();
+ }
+ return chunk;
+}
+
+std::vector<MemoryChunk*> Sweeper::GetAllPromotedPagesForIterationSafe() {
+ base::MutexGuard guard(&promoted_pages_iteration_mutex_);
+ std::vector<MemoryChunk*> pages;
+ pages.swap(sweeping_list_for_promoted_page_iteration_);
+ DCHECK(sweeping_list_for_promoted_page_iteration_.empty());
+ return pages;
+}
+
GCTracer::Scope::ScopeId Sweeper::GetTracingScope(AllocationSpace space,
bool is_joining_thread) {
if (space == NEW_SPACE &&
@@ -717,9 +1234,71 @@ GCTracer::Scope::ScopeId Sweeper::GetTracingScopeForCompleteYoungSweep() {
: GCTracer::Scope::MC_COMPLETE_SWEEPING;
}
-bool Sweeper::IsSweepingDoneForSpace(AllocationSpace space) {
- DCHECK(!AreSweeperTasksRunning());
- return sweeping_list_[GetSweepSpaceIndex(space)].empty();
+bool Sweeper::IsSweepingDoneForSpace(AllocationSpace space) const {
+ return !has_sweeping_work_[GetSweepSpaceIndex(space)].load(
+ std::memory_order_acquire);
+}
+
+void Sweeper::AddSweptPage(Page* page, AllocationSpace identity) {
+ base::MutexGuard guard(&mutex_);
+ swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
+ has_swept_pages_[GetSweepSpaceIndex(identity)].store(
+ true, std::memory_order_release);
+ cv_page_swept_.NotifyAll();
+}
+
+bool Sweeper::ShouldRefillFreelistForSpace(AllocationSpace space) const {
+ DCHECK_IMPLIES(space == NEW_SPACE, v8_flags.minor_mc);
+ return has_swept_pages_[GetSweepSpaceIndex(space)].load(
+ std::memory_order_acquire);
+}
+
+void Sweeper::SweepEmptyNewSpacePage(Page* page) {
+ DCHECK(v8_flags.minor_mc);
+ DCHECK_EQ(NEW_SPACE, page->owner_identity());
+ DCHECK_EQ(0, marking_state_->live_bytes(page));
+ DCHECK(marking_state_->bitmap(page)->IsClean());
+ DCHECK(heap_->IsMainThread() ||
+ (heap_->IsSharedMainThread() &&
+ !heap_->isolate()->is_shared_space_isolate()));
+ DCHECK(heap_->tracer()->IsInAtomicPause());
+ DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
+ page->concurrent_sweeping_state());
+
+ PagedSpaceBase* paged_space =
+ PagedNewSpace::From(heap_->new_space())->paged_space();
+
+ Address start = page->area_start();
+ size_t size = page->area_size();
+
+ if (Heap::ShouldZapGarbage()) {
+ static constexpr Tagged_t kZapTagged = static_cast<Tagged_t>(kZapValue);
+ const size_t size_in_tagged = size / kTaggedSize;
+ Tagged_t* current_addr = reinterpret_cast<Tagged_t*>(start);
+ for (size_t i = 0; i < size_in_tagged; ++i) {
+ base::AsAtomicPtr(current_addr++)
+ ->store(kZapTagged, std::memory_order_relaxed);
+ }
+ }
+
+ page->ClearWasUsedForAllocation();
+ page->ResetAllocationStatistics();
+ heap_->CreateFillerObjectAtSweeper(start, static_cast<int>(size));
+ paged_space->UnaccountedFree(start, size);
+ paged_space->IncreaseAllocatedBytes(0, page);
+ paged_space->RelinkFreeListCategories(page);
+
+ if (heap_->ShouldReduceMemory()) {
+ page->DiscardUnusedMemory(start, size);
+ // Only decrement counter when we discard unused system pages.
+ ActiveSystemPages active_system_pages_after_sweeping;
+ active_system_pages_after_sweeping.Init(
+ MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(), Page::kPageSize);
+ // Decrement accounted memory for discarded memory.
+ paged_space->ReduceActiveSystemPages(page,
+ active_system_pages_after_sweeping);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 7f5546a205..c2c437a9dd 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -6,14 +6,16 @@
#define V8_HEAP_SWEEPER_H_
#include <map>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "src/base/optional.h"
#include "src/base/platform/condition-variable.h"
-#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/pretenuring-handler.h"
#include "src/heap/slot-set.h"
#include "src/tasks/cancelable-task.h"
@@ -22,8 +24,10 @@ namespace v8 {
namespace internal {
class InvalidatedSlotsCleanup;
+class MemoryChunk;
class NonAtomicMarkingState;
class Page;
+class LargePage;
class PagedSpaceBase;
class Space;
@@ -33,6 +37,12 @@ class Sweeper {
public:
using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
+ using CachedOldToNewRememberedSets =
+ std::unordered_map<MemoryChunk*, SlotSet*>;
+
+ enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
+ enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
+ enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
// Pauses the sweeper tasks.
class V8_NODISCARD PauseScope final {
@@ -74,34 +84,64 @@ class Sweeper {
bool sweeping_in_progress_;
};
- enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
- enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
- enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
+ // LocalSweeper holds local data structures required for sweeping and is used
+ // to initiate sweeping and promoted page iteration on multiple threads. Each
+ // thread should holds its own LocalSweeper. Once sweeping is done, all
+ // LocalSweepers should be finalized on the main thread.
+ //
+ // LocalSweeper is not thread-safe and should not be concurrently by several
+ // threads. The exceptions to this rule are allocations during parallel
+ // evacuation and from concurrent allocators. In practice the data structures
+ // in LocalSweeper are only actively used for new space sweeping. Since
+ // parallel evacuators and concurrent allocators never try to allocate in new
+ // space, they will never contribute to new space sweeping and thus can use
+ // the main thread's local sweeper without risk of data races.
+ class LocalSweeper final {
+ public:
+ explicit LocalSweeper(Sweeper* sweeper) : sweeper_(sweeper) {
+ DCHECK_NOT_NULL(sweeper_);
+ }
+ ~LocalSweeper() { DCHECK(IsEmpty()); }
+
+ int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
+ int required_freed_bytes, int max_pages = 0);
+ void ContributeAndWaitForPromotedPagesIteration();
+ void Finalize();
- Sweeper(Heap* heap);
+ bool IsEmpty() const { return old_to_new_remembered_sets_.empty(); }
+
+ private:
+ int ParallelSweepPage(Page* page, AllocationSpace identity,
+ SweepingMode sweeping_mode);
+
+ void ParallelIteratePromotedPagesForRememberedSets();
+ void ParallelIteratePromotedPageForRememberedSets(MemoryChunk* chunk);
+ void CleanPromotedPages();
+
+ Sweeper* const sweeper_;
+ CachedOldToNewRememberedSets old_to_new_remembered_sets_;
+
+ friend class Sweeper;
+ };
+
+ explicit Sweeper(Heap* heap);
~Sweeper();
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void TearDown();
- void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
- void AddNewSpacePage(Page* page);
+ void AddPage(AllocationSpace space, Page* page, AddPageMode mode,
+ AccessMode mutex_mode = AccessMode::NON_ATOMIC);
+ void AddNewSpacePage(Page* page,
+ AccessMode mutex_mode = AccessMode::NON_ATOMIC);
+ void AddPromotedPageForIteration(MemoryChunk* chunk);
int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages = 0);
- int ParallelSweepPage(
- Page* page, AllocationSpace identity,
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
- SweepingMode sweeping_mode);
void EnsurePageIsSwept(Page* page);
- int RawSweep(
- Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
- SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
- PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback);
-
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
@@ -113,17 +153,35 @@ class Sweeper {
bool AreSweeperTasksRunning();
Page* GetSweptPageSafe(PagedSpaceBase* space);
+ SweptList GetAllSweptPagesSafe(PagedSpaceBase* space);
- bool IsSweepingDoneForSpace(AllocationSpace space);
+ bool IsSweepingDoneForSpace(AllocationSpace space) const;
GCTracer::Scope::ScopeId GetTracingScope(AllocationSpace space,
bool is_joining_thread);
GCTracer::Scope::ScopeId GetTracingScopeForCompleteYoungSweep();
+ bool IsIteratingPromotedPages() const;
+ void ContributeAndWaitForPromotedPagesIteration();
+
+ bool ShouldRefillFreelistForSpace(AllocationSpace space) const;
+
+ void SweepEmptyNewSpacePage(Page* page);
+
private:
NonAtomicMarkingState* marking_state() const { return marking_state_; }
- void AddPageImpl(AllocationSpace space, Page* page, AddPageMode mode);
+ int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
+ SweepingMode sweeping_mode);
+
+ void RawIteratePromotedPageForRememberedSets(
+ MemoryChunk* chunk,
+ CachedOldToNewRememberedSets* old_to_new_remembered_sets);
+
+ void AddPageImpl(AllocationSpace space, Page* page, AddPageMode mode,
+ AccessMode mutex_mode);
+
+ void FinalizeLocalSweepers();
class ConcurrentSweeper;
class SweeperJob;
@@ -156,6 +214,7 @@ class Sweeper {
Address free_start, Address free_end, Page* page, bool record_free_ranges,
TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* invalidated_old_to_new_cleanup,
+ InvalidatedSlotsCleanup* invalidated_old_to_old_cleanup,
InvalidatedSlotsCleanup* invalidated_old_to_shared_cleanup);
// Helper function for RawSweep. Clears invalid typed slots in the given free
@@ -172,6 +231,8 @@ class Sweeper {
bool IsDoneSweeping() const {
bool is_done = true;
ForAllSweepingSpaces([this, &is_done](AllocationSpace space) {
+ DCHECK_EQ(IsSweepingDoneForSpace(space),
+ sweeping_list_[GetSweepSpaceIndex(space)].empty());
if (!sweeping_list_[GetSweepSpaceIndex(space)].empty()) is_done = false;
});
return is_done;
@@ -180,6 +241,8 @@ class Sweeper {
size_t ConcurrentSweepingPageCount();
Page* GetSweepingPageSafe(AllocationSpace space);
+ MemoryChunk* GetPromotedPageForIterationSafe();
+ std::vector<MemoryChunk*> GetAllPromotedPagesForIterationSafe();
bool TryRemoveSweepingPageSafe(AllocationSpace space, Page* page);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
@@ -195,22 +258,45 @@ class Sweeper {
int NumberOfConcurrentSweepers() const;
+ void IncrementAndNotifyPromotedPagesIterationFinishedIfNeeded();
+ void NotifyPromotedPagesIterationFinished();
+
+ void SnapshotPageSets();
+
+ void AddSweptPage(Page* page, AllocationSpace identity);
+
Heap* const heap_;
NonAtomicMarkingState* const marking_state_;
std::unique_ptr<JobHandle> job_handle_;
base::Mutex mutex_;
+ base::Mutex promoted_pages_iteration_mutex_;
base::ConditionVariable cv_page_swept_;
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
+ std::atomic<bool> has_sweeping_work_[kNumberOfSweepingSpaces]{false};
+ std::atomic<bool> has_swept_pages_[kNumberOfSweepingSpaces]{false};
+ std::vector<MemoryChunk*> sweeping_list_for_promoted_page_iteration_;
std::vector<ConcurrentSweeper> concurrent_sweepers_;
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
bool should_reduce_memory_;
bool should_sweep_non_new_spaces_ = false;
- PretenturingHandler* const pretenuring_handler_;
- PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
base::Optional<GarbageCollector> current_new_space_collector_;
+ LocalSweeper main_thread_local_sweeper_;
+
+ // The following fields are used for maintaining an order between iterating
+ // promoted pages and sweeping array buffer extensions.
+ size_t promoted_pages_for_iteration_count_ = 0;
+ std::atomic<size_t> iterated_promoted_pages_count_{0};
+ base::Mutex promoted_pages_iteration_notification_mutex_;
+ base::ConditionVariable promoted_pages_iteration_notification_variable_;
+ MemoryAllocator::NormalPagesSet snapshot_normal_pages_set_;
+ MemoryAllocator::LargePagesSet snapshot_large_pages_set_;
+ MemoryAllocator::NormalPagesSet snapshot_shared_normal_pages_set_;
+ MemoryAllocator::LargePagesSet snapshot_shared_large_pages_set_;
+ std::atomic<bool> promoted_page_iteration_in_progress_{false};
+ bool should_iterate_promoted_pages_ = false;
};
} // namespace internal
diff --git a/deps/v8/src/heap/global-handle-marking-visitor.cc b/deps/v8/src/heap/traced-handles-marking-visitor.cc
index 46d81cfea2..3e737ba729 100644
--- a/deps/v8/src/heap/global-handle-marking-visitor.cc
+++ b/deps/v8/src/heap/traced-handles-marking-visitor.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/global-handle-marking-visitor.h"
+#include "src/heap/traced-handles-marking-visitor.h"
#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-worklist-inl.h"
@@ -10,14 +10,20 @@
namespace v8 {
namespace internal {
-GlobalHandleMarkingVisitor::GlobalHandleMarkingVisitor(
- Heap& heap, MarkingWorklists::Local& local_marking_worklist)
+ConservativeTracedHandlesMarkingVisitor::
+ ConservativeTracedHandlesMarkingVisitor(
+ Heap& heap, MarkingWorklists::Local& local_marking_worklist,
+ cppgc::internal::CollectionType collection_type)
: heap_(heap),
marking_state_(*heap_.marking_state()),
local_marking_worklist_(local_marking_worklist),
- traced_node_bounds_(heap.isolate()->traced_handles()->GetNodeBounds()) {}
+ traced_node_bounds_(heap.isolate()->traced_handles()->GetNodeBounds()),
+ mark_mode_(collection_type == cppgc::internal::CollectionType::kMinor
+ ? TracedHandles::MarkMode::kOnlyYoung
+ : TracedHandles::MarkMode::kAll) {}
-void GlobalHandleMarkingVisitor::VisitPointer(const void* address) {
+void ConservativeTracedHandlesMarkingVisitor::VisitPointer(
+ const void* address) {
const auto upper_it = std::upper_bound(
traced_node_bounds_.begin(), traced_node_bounds_.end(), address,
[](const void* needle, const auto& pair) { return needle < pair.first; });
@@ -28,7 +34,8 @@ void GlobalHandleMarkingVisitor::VisitPointer(const void* address) {
if (address < bounds->second) {
auto object = TracedHandles::MarkConservatively(
const_cast<Address*>(reinterpret_cast<const Address*>(address)),
- const_cast<Address*>(reinterpret_cast<const Address*>(bounds->first)));
+ const_cast<Address*>(reinterpret_cast<const Address*>(bounds->first)),
+ mark_mode_);
if (!object.IsHeapObject()) {
// The embedder is not aware of whether numbers are materialized as heap
// objects are just passed around as Smis. This branch also filters out
@@ -37,7 +44,8 @@ void GlobalHandleMarkingVisitor::VisitPointer(const void* address) {
return;
}
HeapObject heap_object = HeapObject::cast(object);
- if (marking_state_.WhiteToGrey(heap_object)) {
+ if (heap_object.InReadOnlySpace()) return;
+ if (marking_state_.TryMark(heap_object)) {
local_marking_worklist_.Push(heap_object);
}
if (V8_UNLIKELY(v8_flags.track_retaining_path)) {
diff --git a/deps/v8/src/heap/traced-handles-marking-visitor.h b/deps/v8/src/heap/traced-handles-marking-visitor.h
new file mode 100644
index 0000000000..ddcc1dcb7e
--- /dev/null
+++ b/deps/v8/src/heap/traced-handles-marking-visitor.h
@@ -0,0 +1,40 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_TRACED_HANDLES_MARKING_VISITOR_H_
+#define V8_HEAP_TRACED_HANDLES_MARKING_VISITOR_H_
+
+#include "src/handles/traced-handles.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/heap.h"
+#include "src/heap/mark-compact.h"
+
+namespace v8 {
+namespace internal {
+
+// Marking visitor for conservatively marking handles creates through
+// `TracedHandles`. The visitor assumes that pointers (on stack, or
+// conservatively scanned on-heap) may point into traced handle nodes which
+// requires them to be kept alive.
+class ConservativeTracedHandlesMarkingVisitor final
+ : public ::heap::base::StackVisitor {
+ public:
+ ConservativeTracedHandlesMarkingVisitor(Heap&, MarkingWorklists::Local&,
+ cppgc::internal::CollectionType);
+ ~ConservativeTracedHandlesMarkingVisitor() override = default;
+
+ void VisitPointer(const void*) override;
+
+ private:
+ Heap& heap_;
+ MarkingState& marking_state_;
+ MarkingWorklists::Local& local_marking_worklist_;
+ const TracedHandles::NodeBounds traced_node_bounds_;
+ const TracedHandles::MarkMode mark_mode_;
+};
+
+#endif // V8_HEAP_TRACED_HANDLES_MARKING_VISITOR_H_
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index c631ab400e..2531a3dea8 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -71,7 +71,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
TNode<HeapObjectReference> AccessorAssembler::TryMonomorphicCase(
TNode<TaggedIndex> slot, TNode<FeedbackVector> vector,
- TNode<Map> lookup_start_object_map, Label* if_handler,
+ TNode<HeapObjectReference> weak_lookup_start_object_map, Label* if_handler,
TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -89,7 +89,9 @@ TNode<HeapObjectReference> AccessorAssembler::TryMonomorphicCase(
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak reference in feedback.
- GotoIfNot(IsWeakReferenceTo(feedback, lookup_start_object_map), if_miss);
+ CSA_DCHECK(this,
+ IsMap(GetHeapObjectAssumeWeak(weak_lookup_start_object_map)));
+ GotoIfNot(TaggedEqual(feedback, weak_lookup_start_object_map), if_miss);
TNode<MaybeObject> handler = UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), vector,
@@ -101,8 +103,9 @@ TNode<HeapObjectReference> AccessorAssembler::TryMonomorphicCase(
}
void AccessorAssembler::HandlePolymorphicCase(
- TNode<Map> lookup_start_object_map, TNode<WeakFixedArray> feedback,
- Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss) {
+ TNode<HeapObjectReference> weak_lookup_start_object_map,
+ TNode<WeakFixedArray> feedback, Label* if_handler,
+ TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("HandlePolymorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -123,8 +126,9 @@ void AccessorAssembler::HandlePolymorphicCase(
{
TNode<MaybeObject> maybe_cached_map =
LoadWeakFixedArrayElement(feedback, var_index.value());
- CSA_DCHECK(this, IsWeakOrCleared(maybe_cached_map));
- GotoIfNot(IsWeakReferenceTo(maybe_cached_map, lookup_start_object_map),
+ CSA_DCHECK(this,
+ IsMap(GetHeapObjectAssumeWeak(weak_lookup_start_object_map)));
+ GotoIfNot(TaggedEqual(maybe_cached_map, weak_lookup_start_object_map),
&loop_next);
// Found, now call handler.
@@ -187,7 +191,7 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
// we should miss to the runtime.
exit_point->Return(
CallBuiltin(Builtin::kCallFunctionTemplate_CheckCompatibleReceiver,
- context, getter, IntPtrConstant(0), lookup_start_object));
+ context, getter, IntPtrConstant(1), lookup_start_object));
}
void AccessorAssembler::HandleLoadICHandlerCase(
@@ -209,7 +213,7 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&try_proto_handler);
{
GotoIf(IsWeakOrCleared(handler), &call_getter);
- GotoIf(IsCodeT(CAST(handler)), &call_code_handler);
+ GotoIf(IsCode(CAST(handler)), &call_code_handler);
HandleLoadICProtoHandler(p, CAST(handler), &var_holder, &var_smi_handler,
&if_smi_handler, miss, exit_point, ic_mode,
access_mode);
@@ -237,7 +241,7 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&call_code_handler);
{
- TNode<CodeT> code_handler = CAST(handler);
+ TNode<Code> code_handler = CAST(handler);
exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, code_handler,
p->context(), p->lookup_start_object(),
p->name(), p->slot(), p->vector());
@@ -291,7 +295,7 @@ void AccessorAssembler::HandleLoadAccessor(
Goto(&load);
BIND(&load);
- TNode<IntPtrT> argc = IntPtrConstant(0);
+ TNode<Int32T> argc = Int32Constant(0);
exit_point->Return(CallApiCallback(context, callback, argc, data,
api_holder.value(), p->receiver()));
}
@@ -1015,7 +1019,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
if (on_code_handler) {
Label if_smi_handler(this);
GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler);
- TNode<CodeT> code = CAST(smi_or_code_handler);
+ TNode<Code> code = CAST(smi_or_code_handler);
on_code_handler(code);
BIND(&if_smi_handler);
@@ -1304,25 +1308,12 @@ void AccessorAssembler::HandleStoreICHandlerCase(
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
- GotoIf(IsPropertyDetailsConst(details), &if_constant);
+ GotoIf(IsPropertyDetailsConst(details), miss);
}
StoreValueByKeyIndex<PropertyDictionary>(
properties, var_name_index.value(), p->value());
Return(p->value());
-
- if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
- BIND(&if_constant);
- {
- TNode<Object> prev_value =
- LoadValueByKeyIndex(properties, var_name_index.value());
- BranchIfSameValue(prev_value, p->value(), &done, miss,
- SameValueMode::kNumbersOnly);
- }
-
- BIND(&done);
- Return(p->value());
- }
}
}
BIND(&if_fast_smi);
@@ -1351,7 +1342,10 @@ void AccessorAssembler::HandleStoreICHandlerCase(
}
BIND(&if_proxy);
- HandleStoreToProxy(p, CAST(holder), miss, support_elements);
+ {
+ CSA_DCHECK(this, BoolConstant(!p->IsDefineKeyedOwn()));
+ HandleStoreToProxy(p, CAST(holder), miss, support_elements);
+ }
BIND(&if_interceptor);
{
@@ -1389,7 +1383,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
GotoIf(IsWeakOrCleared(ref_handler), &store_transition_or_global);
TNode<HeapObject> strong_handler = CAST(handler);
TNode<Map> handler_map = LoadMap(strong_handler);
- Branch(IsCodeTMap(handler_map), &call_handler, &if_proto_handler);
+ Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
BIND(&if_proto_handler);
{
@@ -1400,7 +1394,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
{
- TNode<CodeT> code_handler = CAST(strong_handler);
+ TNode<Code> code_handler = CAST(strong_handler);
TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
p->vector());
@@ -1499,6 +1493,29 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
p->value(), miss, true);
}
+void AccessorAssembler::UpdateMayHaveInterestingSymbol(
+ TNode<PropertyDictionary> dict, TNode<Name> name) {
+ Comment("UpdateMayHaveInterestingSymbol");
+ Label done(this);
+
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ // TODO(pthier): Add flags to swiss dictionaries.
+ Goto(&done);
+ } else {
+ GotoIfNot(IsSymbol(name), &done);
+ TNode<Uint32T> symbol_flags =
+ LoadObjectField<Uint32T>(name, Symbol::kFlagsOffset);
+ GotoIfNot(IsSetWord32<Symbol::IsInterestingSymbolBit>(symbol_flags), &done);
+ TNode<Smi> flags = GetNameDictionaryFlags(dict);
+ flags = SmiOr(
+ flags, SmiConstant(
+ NameDictionary::MayHaveInterestingSymbolsBit::encode(true)));
+ SetNameDictionaryFlags(dict, flags);
+ Goto(&done);
+ }
+ BIND(&done);
+}
+
void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
TNode<IntPtrT> name_index,
TNode<Word32T> representation,
@@ -1613,14 +1630,9 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
StoreMap(object, object_map);
StoreObjectField(object, field_offset, heap_number);
} else {
+ GotoIf(IsPropertyDetailsConst(details), slow);
TNode<HeapNumber> heap_number =
CAST(LoadObjectField(object, field_offset));
- Label store_value(this);
- GotoIfNot(IsPropertyDetailsConst(details), &store_value);
- TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
- BranchIfSameNumberValue(current_value, double_value, &store_value,
- slow);
- BIND(&store_value);
StoreHeapNumberValue(heap_number, double_value);
}
Goto(&done);
@@ -1631,12 +1643,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
if (do_transitioning_store) {
StoreMap(object, object_map);
} else {
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Object> current_value = LoadObjectField(object, field_offset);
- BranchIfSameValue(current_value, value, &done, slow,
- SameValueMode::kNumbersOnly);
- BIND(&if_mutable);
+ GotoIf(IsPropertyDetailsConst(details), slow);
}
StoreObjectField(object, field_offset, value);
Goto(&done);
@@ -1684,29 +1691,16 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
&double_rep, &tagged_rep);
BIND(&double_rep);
{
+ GotoIf(IsPropertyDetailsConst(details), slow);
TNode<HeapNumber> heap_number =
CAST(LoadPropertyArrayElement(properties, backing_store_index));
TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
-
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
- BranchIfSameNumberValue(current_value, double_value, &done, slow);
-
- BIND(&if_mutable);
StoreHeapNumberValue(heap_number, double_value);
Goto(&done);
}
BIND(&tagged_rep);
{
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Object> current_value =
- LoadPropertyArrayElement(properties, backing_store_index);
- BranchIfSameValue(current_value, value, &done, slow,
- SameValueMode::kNumbersOnly);
-
- BIND(&if_mutable);
+ GotoIf(IsPropertyDetailsConst(details), slow);
StorePropertyArrayElement(properties, backing_store_index, value);
Goto(&done);
}
@@ -1820,7 +1814,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
OnCodeHandler on_code_handler;
if (support_elements == kSupportElements) {
// Code sub-handlers are expected only in KeyedStoreICs.
- on_code_handler = [=](TNode<CodeT> code_handler) {
+ on_code_handler = [=](TNode<Code> code_handler) {
// This is either element store or transitioning element store.
Label if_element_store(this), if_transitioning_element_store(this);
Branch(IsStoreHandler0Map(LoadMap(handler)), &if_element_store,
@@ -1931,7 +1925,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(p->receiver())));
- Add<PropertyDictionary>(properties, CAST(p->name()), p->value(), &slow);
+ TNode<Name> name = CAST(p->name());
+ Add<PropertyDictionary>(properties, name, p->value(), &slow);
+ UpdateMayHaveInterestingSymbol(properties, name);
Return(p->value());
BIND(&slow);
@@ -1982,7 +1978,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&store);
{
GotoIf(IsSideEffectFreeDebuggingActive(), &if_slow);
- TNode<IntPtrT> argc = IntPtrConstant(1);
+ TNode<Int32T> argc = Int32Constant(1);
Return(CallApiCallback(context, callback, argc, data,
api_holder.value(), p->receiver(), p->value()));
}
@@ -2149,6 +2145,18 @@ void AccessorAssembler::CheckDescriptorConsidersNumbersMutable(
bailout);
}
+void AccessorAssembler::GotoIfNotSameNumberBitPattern(TNode<Float64T> left,
+ TNode<Float64T> right,
+ Label* miss) {
+ // TODO(verwaest): Use a single compare on 64bit archs.
+ const TNode<Uint32T> lhs_hi = Float64ExtractHighWord32(left);
+ const TNode<Uint32T> rhs_hi = Float64ExtractHighWord32(right);
+ GotoIfNot(Word32Equal(lhs_hi, rhs_hi), miss);
+ const TNode<Uint32T> lhs_lo = Float64ExtractLowWord32(left);
+ const TNode<Uint32T> rhs_lo = Float64ExtractLowWord32(right);
+ GotoIfNot(Word32Equal(lhs_lo, rhs_lo), miss);
+}
+
void AccessorAssembler::HandleStoreFieldAndReturn(
TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value,
base::Optional<TNode<Float64T>> double_value, Representation representation,
@@ -2192,11 +2200,9 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
&do_store);
{
if (store_value_as_double) {
- Label done(this);
TNode<Float64T> current_value =
LoadObjectField<Float64T>(property_storage, offset);
- BranchIfSameNumberValue(current_value, *double_value, &done, miss);
- BIND(&done);
+ GotoIfNotSameNumberBitPattern(current_value, *double_value, miss);
Return(value);
} else {
TNode<Object> current_value = LoadObjectField(property_storage, offset);
@@ -2904,7 +2910,7 @@ TNode<IntPtrT> AccessorAssembler::StubCachePrimaryOffset(TNode<Name> name,
TNode<IntPtrT> map_word = BitcastTaggedToWord(map);
TNode<Int32T> map32 = TruncateIntPtrToInt32(UncheckedCast<IntPtrT>(
- WordXor(map_word, WordShr(map_word, StubCache::kMapKeyShift))));
+ WordXor(map_word, WordShr(map_word, StubCache::kPrimaryTableBits))));
// Base the offset on a simple combination of name and map.
TNode<Word32T> hash = Int32Add(raw_hash_field, map32);
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
@@ -2923,7 +2929,7 @@ TNode<IntPtrT> AccessorAssembler::StubCacheSecondaryOffset(TNode<Name> name,
TNode<Int32T> map32 = TruncateIntPtrToInt32(BitcastTaggedToWord(map));
// Base the offset on a simple combination of name and map.
TNode<Word32T> hash_a = Int32Add(map32, name32);
- TNode<Word32T> hash_b = Word32Shr(hash_a, StubCache::kSecondaryKeyShift);
+ TNode<Word32T> hash_b = Word32Shr(hash_a, StubCache::kSecondaryTableBits);
TNode<Word32T> hash = Int32Add(hash_a, hash_b);
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
@@ -3052,8 +3058,10 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
TVARIABLE(MaybeObject, var_handler);
Label try_polymorphic(this), if_handler(this, &var_handler);
+ TNode<HeapObjectReference> weak_lookup_start_object_map =
+ MakeWeak(lookup_start_object_map);
TNode<HeapObjectReference> feedback = TryMonomorphicCase(
- p->slot(), CAST(p->vector()), lookup_start_object_map, &if_handler,
+ p->slot(), CAST(p->vector()), weak_lookup_start_object_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -3064,7 +3072,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
TNode<HeapObject> strong_feedback =
GetHeapObjectIfStrong(feedback, &miss);
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &stub_call);
- HandlePolymorphicCase(lookup_start_object_map, CAST(strong_feedback),
+ HandlePolymorphicCase(weak_lookup_start_object_map, CAST(strong_feedback),
&if_handler, &var_handler, &miss);
}
}
@@ -3075,7 +3083,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
// Call into the stub that implements the non-inlined parts of LoadIC.
Callable ic = Builtins::CallableFor(isolate(), Builtin::kLoadIC_Noninlined);
- TNode<CodeT> code_target = HeapConstant(ic.code());
+ TNode<Code> code_target = HeapConstant(ic.code());
exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context(),
p->receiver_and_lookup_start_object(), p->name(),
p->slot(), p->vector());
@@ -3118,9 +3126,11 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
- TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map,
- &if_handler, &var_handler, &try_polymorphic);
+ TNode<HeapObjectReference> weak_lookup_start_object_map =
+ MakeWeak(lookup_start_object_map);
+ TNode<HeapObjectReference> feedback = TryMonomorphicCase(
+ p->slot(), CAST(p->vector()), weak_lookup_start_object_map, &if_handler,
+ &var_handler, &try_polymorphic);
BIND(&if_handler);
{
LazyLoadICParameters lazy_p(p);
@@ -3133,7 +3143,7 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
// Check polymorphic case.
Comment("LoadIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &non_inlined);
- HandlePolymorphicCase(lookup_start_object_map, CAST(strong_feedback),
+ HandlePolymorphicCase(weak_lookup_start_object_map, CAST(strong_feedback),
&if_handler, &var_handler, &miss);
}
@@ -3174,9 +3184,11 @@ void AccessorAssembler::LoadSuperIC(const LoadICParameters* p) {
TNode<Map> lookup_start_object_map = LoadMap(CAST(p->lookup_start_object()));
GotoIf(IsDeprecatedMap(lookup_start_object_map), &miss);
- TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map,
- &if_handler, &var_handler, &try_polymorphic);
+ TNode<HeapObjectReference> weak_lookup_start_object_map =
+ MakeWeak(lookup_start_object_map);
+ TNode<HeapObjectReference> feedback = TryMonomorphicCase(
+ p->slot(), CAST(p->vector()), weak_lookup_start_object_map, &if_handler,
+ &var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3192,7 +3204,7 @@ void AccessorAssembler::LoadSuperIC(const LoadICParameters* p) {
{
Comment("LoadSuperIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &non_inlined);
- HandlePolymorphicCase(lookup_start_object_map, CAST(strong_feedback),
+ HandlePolymorphicCase(weak_lookup_start_object_map, CAST(strong_feedback),
&if_handler, &var_handler, &miss);
}
@@ -3357,7 +3369,9 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
BIND(&if_property_cell);
{
- // Load value or try handler case if the weak reference is cleared.
+ // This branch also handles the "handler mode": the weak reference is
+ // cleared, the feedback extra is the handler. In that case we jump to
+ // try_handler. (See FeedbackNexus::ConfigureHandlerMode.)
CSA_DCHECK(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, try_handler));
@@ -3369,6 +3383,9 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
BIND(&if_lexical_var);
{
+ // This branch handles the "lexical variable mode": the feedback is a SMI
+ // encoding the variable location. (See
+ // FeedbackNexus::ConfigureLexicalVarMode.)
Comment("Load lexical variable");
TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_ref));
TNode<IntPtrT> context_index =
@@ -3485,9 +3502,11 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
GotoIf(IsUndefined(p->vector()), &generic);
// Check monomorphic case.
- TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map,
- &if_handler, &var_handler, &try_polymorphic);
+ TNode<HeapObjectReference> weak_lookup_start_object_map =
+ MakeWeak(lookup_start_object_map);
+ TNode<HeapObjectReference> feedback = TryMonomorphicCase(
+ p->slot(), CAST(p->vector()), weak_lookup_start_object_map, &if_handler,
+ &var_handler, &try_polymorphic);
BIND(&if_handler);
{
LazyLoadICParameters lazy_p(p);
@@ -3502,7 +3521,7 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
// Check polymorphic case.
Comment("KeyedLoadIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
- HandlePolymorphicCase(lookup_start_object_map, CAST(strong_feedback),
+ HandlePolymorphicCase(weak_lookup_start_object_map, CAST(strong_feedback),
&if_handler, &var_handler, &miss);
}
@@ -3675,6 +3694,100 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
+void AccessorAssembler::KeyedLoadICGeneric_StringKey(
+ const LoadICParameters* p) {
+ TNode<String> key = CAST(p->name());
+
+ Label if_runtime(this, Label::kDeferred);
+ TNode<Object> lookup_start_object = p->lookup_start_object();
+ GotoIf(TaggedIsSmi(lookup_start_object), &if_runtime);
+ GotoIf(IsNullOrUndefined(lookup_start_object), &if_runtime);
+
+ {
+ TNode<Int32T> instance_type = LoadInstanceType(key);
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
+
+ // Check |key| is not an index string.
+ CSA_DCHECK(this, IsSetWord32(LoadNameRawHashField(key),
+ Name::kDoesNotContainCachedArrayIndexMask));
+ CSA_DCHECK(this, IsNotEqualInWord32<Name::HashFieldTypeBits>(
+ LoadNameRawHashField(key),
+ Name::HashFieldType::kIntegerIndex));
+
+ TVARIABLE(Name, var_unique);
+ Label if_thinstring(this), if_unique_name(this), if_notunique(this);
+ GotoIf(InstanceTypeEqual(instance_type, THIN_STRING_TYPE), &if_thinstring);
+
+ // Check |key| does not contain forwarding index.
+ CSA_DCHECK(this,
+ Word32BinaryNot(
+ IsBothEqualInWord32<Name::HashFieldTypeBits,
+ Name::IsInternalizedForwardingIndexBit>(
+ LoadNameRawHashField(key),
+ Name::HashFieldType::kForwardingIndex, true)));
+
+ // Check if |key| is internalized.
+ static_assert(kNotInternalizedTag != 0);
+ GotoIf(IsSetWord32(instance_type, kIsNotInternalizedMask), &if_notunique);
+
+ var_unique = key;
+ Goto(&if_unique_name);
+
+ BIND(&if_thinstring);
+ {
+ var_unique = LoadObjectField<String>(key, ThinString::kActualOffset);
+ Goto(&if_unique_name);
+ }
+
+ BIND(&if_unique_name);
+ {
+ LoadICParameters pp(p, var_unique.value());
+ TNode<Map> lookup_start_object_map = LoadMap(CAST(lookup_start_object));
+ GenericPropertyLoad(CAST(lookup_start_object), lookup_start_object_map,
+ LoadMapInstanceType(lookup_start_object_map), &pp,
+ &if_runtime);
+ }
+
+ BIND(&if_notunique);
+ {
+ if (v8_flags.internalize_on_the_fly) {
+ // We expect only string type keys can be used here, so we take all
+ // otherwise to the {if_runtime} path.
+ Label if_in_string_table(this);
+ TVARIABLE(IntPtrT, var_index);
+ TryInternalizeString(key, &if_runtime, &var_index, &if_in_string_table,
+ &var_unique, &if_runtime, &if_runtime);
+
+ BIND(&if_in_string_table);
+ {
+ // TODO(bmeurer): We currently use a version of GenericPropertyLoad
+ // here, where we don't try to probe the megamorphic stub cache
+ // after successfully internalizing the incoming string. Past
+ // experiments with this have shown that it causes too much traffic
+ // on the stub cache. We may want to re-evaluate that in the future.
+ LoadICParameters pp(p, var_unique.value());
+ TNode<Map> lookup_start_object_map =
+ LoadMap(CAST(lookup_start_object));
+ GenericPropertyLoad(CAST(lookup_start_object),
+ lookup_start_object_map,
+ LoadMapInstanceType(lookup_start_object_map), &pp,
+ &if_runtime, kDontUseStubCache);
+ }
+ } else {
+ Goto(&if_runtime);
+ }
+ }
+ }
+
+ BIND(&if_runtime);
+ {
+ Comment("KeyedLoadGeneric_slow");
+ // TODO(jkummerow): Should we use the GetProperty TF stub instead?
+ TailCallRuntime(Runtime::kGetProperty, p->context(),
+ p->receiver_and_lookup_start_object(), key);
+ }
+}
+
void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
LoadAccessMode access_mode) {
TVARIABLE(MaybeObject, var_handler);
@@ -3698,7 +3811,7 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(vector, slot, kTaggedSize);
TNode<WeakFixedArray> array = CAST(feedback_element);
- HandlePolymorphicCase(lookup_start_object_map, array, &if_handler,
+ HandlePolymorphicCase(MakeWeak(lookup_start_object_map), array, &if_handler,
&var_handler, &miss);
BIND(&if_handler);
@@ -3736,8 +3849,9 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
+ TNode<HeapObjectReference> weak_receiver_map = MakeWeak(receiver_map);
TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), weak_receiver_map,
&if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3752,7 +3866,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
// Check polymorphic case.
Comment("StoreIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
- HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
+ HandlePolymorphicCase(weak_receiver_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss);
}
@@ -3797,6 +3911,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
{
Label try_handler(this), miss(this, Label::kDeferred);
+ // This branch also handles the "handler mode": the weak reference is
+ // cleared, the feedback extra is the handler. In that case we jump to
+ // try_handler. (See FeedbackNexus::ConfigureHandlerMode.)
CSA_DCHECK(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
@@ -3814,11 +3931,12 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), &miss);
DCHECK(pp->receiver_is_null());
+ DCHECK(pp->flags_is_null());
TNode<NativeContext> native_context = LoadNativeContext(pp->context());
StoreICParameters p(
pp->context(),
LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX),
- pp->name(), pp->value(), pp->slot(), pp->vector(),
+ pp->name(), pp->value(), base::nullopt, pp->slot(), pp->vector(),
StoreICMode::kDefault);
HandleStoreICHandlerCase(&p, handler, &miss, ICMode::kGlobalIC);
@@ -3833,6 +3951,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
BIND(&if_lexical_var);
{
+ // This branch handles the "lexical variable mode": the feedback is a SMI
+ // encoding the variable location. (See
+ // FeedbackNexus::ConfigureLexicalVarMode.)
Comment("Store lexical variable");
TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_ref));
TNode<IntPtrT> context_index =
@@ -3934,8 +4055,9 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
+ TNode<HeapObjectReference> weak_receiver_map = MakeWeak(receiver_map);
TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), weak_receiver_map,
&if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3951,8 +4073,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
Comment("KeyedStoreIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
&try_megamorphic);
- HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss);
+ HandlePolymorphicCase(weak_receiver_map, CAST(strong_feedback),
+ &if_handler, &var_handler, &miss);
}
BIND(&try_megamorphic);
@@ -3979,7 +4101,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(CAST(p->vector()), p->slot(), kTaggedSize);
TNode<WeakFixedArray> array = CAST(feedback_element);
- HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
+ HandlePolymorphicCase(weak_receiver_map, array, &if_handler, &var_handler,
&miss);
}
}
@@ -3994,6 +4116,25 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
void AccessorAssembler::DefineKeyedOwnIC(const StoreICParameters* p) {
Label miss(this, Label::kDeferred);
{
+ {
+ // TODO(v8:13451): Port SetFunctionName to an ic so that we can remove
+ // the runtime call here. Potentially we may also remove the
+ // StoreICParameters flags and have builtins:kDefineKeyedOwnIC reusing
+ // StoreWithVectorDescriptor again.
+ Label did_set_function_name_if_needed(this);
+ TNode<Int32T> needs_set_function_name = Word32And(
+ SmiToInt32(p->flags()),
+ Int32Constant(
+ static_cast<int>(DefineKeyedOwnPropertyFlag::kSetFunctionName)));
+ GotoIfNot(needs_set_function_name, &did_set_function_name_if_needed);
+
+ Comment("DefineKeyedOwnIC_set_function_name");
+ CallRuntime(Runtime::kSetFunctionName, p->context(), p->value(),
+ p->name());
+
+ Goto(&did_set_function_name_if_needed);
+ BIND(&did_set_function_name_if_needed);
+ }
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler),
@@ -4008,8 +4149,9 @@ void AccessorAssembler::DefineKeyedOwnIC(const StoreICParameters* p) {
GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
+ TNode<HeapObjectReference> weak_receiver_map = MakeWeak(receiver_map);
TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map,
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), weak_receiver_map,
&if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -4025,8 +4167,8 @@ void AccessorAssembler::DefineKeyedOwnIC(const StoreICParameters* p) {
Comment("DefineKeyedOwnIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
&try_megamorphic);
- HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss);
+ HandlePolymorphicCase(weak_receiver_map, CAST(strong_feedback),
+ &if_handler, &var_handler, &miss);
}
BIND(&try_megamorphic);
@@ -4053,7 +4195,7 @@ void AccessorAssembler::DefineKeyedOwnIC(const StoreICParameters* p) {
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(CAST(p->vector()), p->slot(), kTaggedSize);
TNode<WeakFixedArray> array = CAST(feedback_element);
- HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
+ HandlePolymorphicCase(weak_receiver_map, array, &if_handler, &var_handler,
&miss);
}
}
@@ -4079,9 +4221,10 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIf(IsUndefined(p->vector()), &no_feedback);
+ TNode<HeapObjectReference> weak_array_map = MakeWeak(array_map);
TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(p->slot(), CAST(p->vector()), array_map, &if_handler,
- &var_handler, &try_polymorphic);
+ TryMonomorphicCase(p->slot(), CAST(p->vector()), weak_array_map,
+ &if_handler, &var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -4094,11 +4237,11 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIf(TaggedIsSmi(var_handler.value()), &if_smi_handler);
TNode<HeapObject> handler = CAST(var_handler.value());
- GotoIfNot(IsCodeT(handler), &if_transitioning_element_store);
+ GotoIfNot(IsCode(handler), &if_transitioning_element_store);
{
// Call the handler.
- TNode<CodeT> code_handler = CAST(handler);
+ TNode<Code> code_handler = CAST(handler);
TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
p->vector());
@@ -4111,7 +4254,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<Map> transition_map =
CAST(GetHeapObjectAssumeWeak(maybe_transition_map, &miss));
GotoIf(IsDeprecatedMap(transition_map), &miss);
- TNode<CodeT> code =
+ TNode<Code> code =
CAST(LoadObjectField(handler, StoreHandler::kSmiHandlerOffset));
TailCallStub(StoreTransitionDescriptor{}, code, p->context(),
p->receiver(), p->name(), transition_map, p->value(),
@@ -4140,7 +4283,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
Comment("StoreInArrayLiteralIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
&try_megamorphic);
- HandlePolymorphicCase(array_map, CAST(strong_feedback), &if_handler,
+ HandlePolymorphicCase(weak_array_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss);
}
@@ -4463,6 +4606,16 @@ void AccessorAssembler::LookupGlobalIC(
TailCallRuntime(function_id, context, lazy_name());
}
+void AccessorAssembler::GenerateLookupGlobalIC(TypeofMode typeof_mode) {
+ using Descriptor = LookupWithVectorDescriptor;
+ LookupGlobalIC([&] { return Parameter<Object>(Descriptor::kName); },
+ Parameter<TaggedIndex>(Descriptor::kDepth),
+ [&] { return Parameter<TaggedIndex>(Descriptor::kSlot); },
+ Parameter<Context>(Descriptor::kContext),
+ [&] { return Parameter<FeedbackVector>(Descriptor::kVector); },
+ typeof_mode);
+}
+
void AccessorAssembler::GenerateLookupGlobalICTrampoline(
TypeofMode typeof_mode) {
using Descriptor = LookupTrampolineDescriptor;
@@ -4508,6 +4661,19 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
KeyedLoadICGeneric(&p);
}
+void AccessorAssembler::GenerateKeyedLoadIC_MegamorphicStringKey() {
+ using Descriptor = LoadWithVectorDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadICGeneric_StringKey(&p);
+}
+
void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
using Descriptor = LoadDescriptor;
@@ -4545,6 +4711,19 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
slot, vector);
}
+void AccessorAssembler::GenerateKeyedLoadICTrampoline_MegamorphicStringKey() {
+ using Descriptor = LoadDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
+
+ TailCallBuiltin(Builtin::kKeyedLoadIC_MegamorphicStringKey, context, receiver,
+ name, slot, vector);
+}
+
void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
using Descriptor = LoadWithVectorDescriptor;
@@ -4564,10 +4743,11 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto flags = base::nullopt;
auto vector = Parameter<HeapObject>(Descriptor::kVector);
auto context = Parameter<Context>(Descriptor::kContext);
- StoreICParameters p(context, base::nullopt, name, value, slot, vector,
+ StoreICParameters p(context, base::nullopt, name, value, flags, slot, vector,
StoreICMode::kDefault);
StoreGlobalIC(&p);
}
@@ -4602,11 +4782,12 @@ void AccessorAssembler::GenerateStoreIC() {
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = base::nullopt;
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
auto vector = Parameter<HeapObject>(Descriptor::kVector);
auto context = Parameter<Context>(Descriptor::kContext);
- StoreICParameters p(context, receiver, name, value, slot, vector,
+ StoreICParameters p(context, receiver, name, value, flags, slot, vector,
StoreICMode::kDefault);
StoreIC(&p);
}
@@ -4645,11 +4826,12 @@ void AccessorAssembler::GenerateDefineNamedOwnIC() {
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = base::nullopt;
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
auto vector = Parameter<HeapObject>(Descriptor::kVector);
auto context = Parameter<Context>(Descriptor::kContext);
- StoreICParameters p(context, receiver, name, value, slot, vector,
+ StoreICParameters p(context, receiver, name, value, flags, slot, vector,
StoreICMode::kDefineNamedOwn);
// StoreIC is a generic helper than handle both set and define own
// named stores.
@@ -4690,11 +4872,12 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = base::nullopt;
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
auto vector = Parameter<HeapObject>(Descriptor::kVector);
auto context = Parameter<Context>(Descriptor::kContext);
- StoreICParameters p(context, receiver, name, value, slot, vector,
+ StoreICParameters p(context, receiver, name, value, flags, slot, vector,
StoreICMode::kDefault);
KeyedStoreIC(&p);
}
@@ -4728,46 +4911,49 @@ void AccessorAssembler::GenerateKeyedStoreICBaseline() {
}
void AccessorAssembler::GenerateDefineKeyedOwnIC() {
- using Descriptor = StoreWithVectorDescriptor;
+ using Descriptor = DefineKeyedOwnWithVectorDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
auto vector = Parameter<HeapObject>(Descriptor::kVector);
auto context = Parameter<Context>(Descriptor::kContext);
- StoreICParameters p(context, receiver, name, value, slot, vector,
+ StoreICParameters p(context, receiver, name, value, flags, slot, vector,
StoreICMode::kDefineKeyedOwn);
DefineKeyedOwnIC(&p);
}
void AccessorAssembler::GenerateDefineKeyedOwnICTrampoline() {
- using Descriptor = StoreDescriptor;
+ using Descriptor = DefineKeyedOwnDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtin::kDefineKeyedOwnIC, context, receiver, name, value,
- slot, vector);
+ flags, slot, vector);
}
void AccessorAssembler::GenerateDefineKeyedOwnICBaseline() {
- using Descriptor = StoreBaselineDescriptor;
+ using Descriptor = DefineKeyedOwnBaselineDescriptor;
auto receiver = Parameter<Object>(Descriptor::kReceiver);
auto name = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
TNode<Context> context = LoadContextFromBaseline();
TailCallBuiltin(Builtin::kDefineKeyedOwnIC, context, receiver, name, value,
- slot, vector);
+ flags, slot, vector);
}
void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
@@ -4776,11 +4962,12 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
auto array = Parameter<Object>(Descriptor::kReceiver);
auto index = Parameter<Object>(Descriptor::kName);
auto value = Parameter<Object>(Descriptor::kValue);
+ auto flags = base::nullopt;
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
auto vector = Parameter<HeapObject>(Descriptor::kVector);
auto context = Parameter<Context>(Descriptor::kContext);
- StoreICParameters p(context, array, index, value, slot, vector,
+ StoreICParameters p(context, array, index, value, flags, slot, vector,
StoreICMode::kDefault);
StoreInArrayLiteralIC(&p);
}
@@ -4884,8 +5071,9 @@ void AccessorAssembler::GenerateCloneObjectIC() {
GotoIf(IsUndefined(maybe_vector), &slow);
+ TNode<HeapObjectReference> weak_source_map = MakeWeak(source_map);
TNode<HeapObjectReference> feedback =
- TryMonomorphicCase(slot, CAST(maybe_vector), source_map, &if_handler,
+ TryMonomorphicCase(slot, CAST(maybe_vector), weak_source_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -4959,7 +5147,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
IntPtrAdd(field_offset, field_offset_difference);
StoreObjectFieldNoWriteBarrier(object, result_offset, field);
},
- 1, IndexAdvanceMode::kPost);
+ 1, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
// We need to go through the {object} again here and properly clone them. We
// use a second loop here to ensure that the GC (and heap verifier) always
@@ -4979,7 +5167,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
{
Comment("CloneObjectIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
- HandlePolymorphicCase(source_map, CAST(strong_feedback), &if_handler,
+ HandlePolymorphicCase(weak_source_map, CAST(strong_feedback), &if_handler,
&var_handler, &miss);
}
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 40445355f3..367a9aff52 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -35,10 +35,12 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void GenerateLoadSuperICBaseline();
void GenerateKeyedLoadIC();
void GenerateKeyedLoadIC_Megamorphic();
+ void GenerateKeyedLoadIC_MegamorphicStringKey();
void GenerateKeyedLoadIC_PolymorphicName();
void GenerateKeyedLoadICTrampoline();
void GenerateKeyedLoadICBaseline();
void GenerateKeyedLoadICTrampoline_Megamorphic();
+ void GenerateKeyedLoadICTrampoline_MegamorphicStringKey();
void GenerateStoreIC();
void GenerateStoreICTrampoline();
void GenerateStoreICBaseline();
@@ -59,6 +61,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
void GenerateLoadGlobalICBaseline(TypeofMode typeof_mode);
+ void GenerateLookupGlobalIC(TypeofMode typeof_mode);
void GenerateLookupGlobalICTrampoline(TypeofMode typeof_mode);
void GenerateLookupGlobalICBaseline(TypeofMode typeof_mode);
void GenerateLookupContextTrampoline(TypeofMode typeof_mode);
@@ -209,12 +212,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
StoreICParameters(TNode<Context> context,
base::Optional<TNode<Object>> receiver,
TNode<Object> name, TNode<Object> value,
- TNode<TaggedIndex> slot, TNode<HeapObject> vector,
- StoreICMode mode)
+ base::Optional<TNode<Smi>> flags, TNode<TaggedIndex> slot,
+ TNode<HeapObject> vector, StoreICMode mode)
: context_(context),
receiver_(receiver),
name_(name),
value_(value),
+ flags_(flags),
slot_(slot),
vector_(vector),
mode_(mode) {}
@@ -223,12 +227,14 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Object> receiver() const { return receiver_.value(); }
TNode<Object> name() const { return name_; }
TNode<Object> value() const { return value_; }
+ TNode<Smi> flags() const { return flags_.value(); }
TNode<TaggedIndex> slot() const { return slot_; }
TNode<HeapObject> vector() const { return vector_; }
TNode<Object> lookup_start_object() const { return receiver(); }
bool receiver_is_null() const { return !receiver_.has_value(); }
+ bool flags_is_null() const { return !flags_.has_value(); }
bool IsDefineNamedOwn() const {
return mode_ == StoreICMode::kDefineNamedOwn;
@@ -245,6 +251,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
base::Optional<TNode<Object>> receiver_;
TNode<Object> name_;
TNode<Object> value_;
+ base::Optional<TNode<Smi>> flags_;
TNode<TaggedIndex> slot_;
TNode<HeapObject> vector_;
StoreICMode mode_;
@@ -268,6 +275,10 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
Label* miss,
StoreTransitionMapFlags flags);
+ // Updates flags on |dict| if |name| is an interesting symbol.
+ void UpdateMayHaveInterestingSymbol(TNode<PropertyDictionary> dict,
+ TNode<Name> name);
+
void JumpIfDataProperty(TNode<Uint32T> details, Label* writable,
Label* readonly);
@@ -323,6 +334,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void KeyedLoadIC(const LoadICParameters* p, LoadAccessMode access_mode);
void KeyedLoadICGeneric(const LoadICParameters* p);
+ void KeyedLoadICGeneric_StringKey(const LoadICParameters* p);
void KeyedLoadICPolymorphicName(const LoadICParameters* p,
LoadAccessMode access_mode);
@@ -343,17 +355,20 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
LazyNode<TaggedIndex> lazy_slot, TNode<Context> context,
TypeofMode typeof_mode);
+ void GotoIfNotSameNumberBitPattern(TNode<Float64T> left,
+ TNode<Float64T> right, Label* miss);
+
// IC dispatcher behavior.
// Checks monomorphic case. Returns {feedback} entry of the vector.
TNode<HeapObjectReference> TryMonomorphicCase(
TNode<TaggedIndex> slot, TNode<FeedbackVector> vector,
- TNode<Map> lookup_start_object_map, Label* if_handler,
+ TNode<HeapObjectReference> weak_lookup_start_object_map,
+ Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss);
+ void HandlePolymorphicCase(
+ TNode<HeapObjectReference> weak_lookup_start_object_map,
+ TNode<WeakFixedArray> feedback, Label* if_handler,
TVariable<MaybeObject>* var_handler, Label* if_miss);
- void HandlePolymorphicCase(TNode<Map> lookup_start_object_map,
- TNode<WeakFixedArray> feedback, Label* if_handler,
- TVariable<MaybeObject>* var_handler,
- Label* if_miss);
void TryMegaDOMCase(TNode<Object> lookup_start_object,
TNode<Map> lookup_start_object_map,
@@ -496,7 +511,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// Low-level helpers.
- using OnCodeHandler = std::function<void(TNode<CodeT> code_handler)>;
+ using OnCodeHandler = std::function<void(TNode<Code> code_handler)>;
using OnFoundOnLookupStartObject = std::function<void(
TNode<PropertyDictionary> properties, TNode<IntPtrT> name_index)>;
@@ -603,7 +618,7 @@ class ExitPoint {
template <class... TArgs>
void ReturnCallStub(const CallInterfaceDescriptor& descriptor,
- TNode<CodeT> target, TNode<Context> context,
+ TNode<Code> target, TNode<Context> context,
TArgs... args) {
if (IsDirect()) {
asm_->TailCallStub(descriptor, target, context, args...);
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index c364503311..a1dfbe49d8 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -9,6 +9,16 @@
namespace v8 {
namespace internal {
+namespace {
+
+inline bool IsBigInt64OpSupported(BinaryOpAssembler* assembler, Operation op) {
+ return assembler->Is64() && op != Operation::kExponentiate &&
+ op != Operation::kShiftLeft && op != Operation::kShiftRight &&
+ op != Operation::kShiftRightLogical;
+}
+
+} // namespace
+
TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
@@ -412,7 +422,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
GotoIfNot(IsBigInt(CAST(rhs)), &call_with_any_feedback);
- if (Is64()) {
+ if (IsBigInt64OpSupported(this, op)) {
GotoIfLargeBigInt(CAST(lhs), &if_both_bigint);
GotoIfLargeBigInt(CAST(rhs), &if_both_bigint);
Goto(&if_both_bigint64);
@@ -435,7 +445,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
Goto(&call_stub);
}
- if (Is64()) {
+ if (IsBigInt64OpSupported(this, op)) {
BIND(&if_both_bigint64);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt64);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
@@ -497,10 +507,6 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
}
break;
}
- case Operation::kExponentiate: {
- Goto(&if_both_bigint);
- break;
- }
default:
UNREACHABLE();
}
@@ -513,7 +519,6 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
update_feedback_mode);
switch (op) {
case Operation::kSubtract: {
- Label bigint_too_big(this);
var_result =
CallBuiltin(Builtin::kBigIntSubtractNoThrow, context(), lhs, rhs);
@@ -527,8 +532,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
break;
}
case Operation::kMultiply: {
- Label bigint_too_big(this),
- termination_requested(this, Label::kDeferred);
+ Label termination_requested(this, Label::kDeferred);
var_result =
CallBuiltin(Builtin::kBigIntMultiplyNoThrow, context(), lhs, rhs);
@@ -549,8 +553,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
break;
}
case Operation::kDivide: {
- Label bigint_div_zero(this),
- termination_requested(this, Label::kDeferred);
+ Label termination_requested(this, Label::kDeferred);
var_result =
CallBuiltin(Builtin::kBigIntDivideNoThrow, context(), lhs, rhs);
@@ -571,8 +574,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
break;
}
case Operation::kModulus: {
- Label bigint_div_zero(this),
- termination_requested(this, Label::kDeferred);
+ Label termination_requested(this, Label::kDeferred);
var_result =
CallBuiltin(Builtin::kBigIntModulusNoThrow, context(), lhs, rhs);
@@ -787,35 +789,30 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
TVARIABLE(Word32T, var_left_word32);
TVARIABLE(Word32T, var_right_word32);
TVARIABLE(BigInt, var_left_bigint);
- TVARIABLE(BigInt, var_right_bigint);
- // These are the variables that are passed to BigIntBinaryOp. They are not
- // guaranteed to be BigInts because the Runtime call handles throwing
- // exceptions when only one side is a BigInt.
- TVARIABLE(Object, var_left_maybe_bigint, left);
- TVARIABLE(Numeric, var_right_maybe_bigint);
Label done(this);
Label if_left_number(this), do_number_op(this);
- Label if_left_bigint(this), do_bigint_op(this);
+ Label if_left_bigint(this), if_left_bigint64(this);
+ Label if_left_number_right_bigint(this, Label::kDeferred);
TaggedToWord32OrBigIntWithFeedback(
context(), left, &if_left_number, &var_left_word32, &if_left_bigint,
+ IsBigInt64OpSupported(this, bitwise_op) ? &if_left_bigint64 : nullptr,
&var_left_bigint, slot ? &var_left_feedback : nullptr);
- Label right_is_bigint(this);
BIND(&if_left_number);
- {
- TaggedToWord32OrBigIntWithFeedback(
- context(), right, &do_number_op, &var_right_word32, &right_is_bigint,
- &var_right_bigint, slot ? &var_right_feedback : nullptr);
- }
+ TaggedToWord32OrBigIntWithFeedback(
+ context(), right, &do_number_op, &var_right_word32,
+ &if_left_number_right_bigint, nullptr, nullptr,
+ slot ? &var_right_feedback : nullptr);
- BIND(&right_is_bigint);
+ BIND(&if_left_number_right_bigint);
{
- // At this point it's guaranteed that the op will fail because the RHS is a
- // BigInt while the LHS is not, but that's ok because the Runtime call will
- // throw the exception.
- var_right_maybe_bigint = var_right_bigint.value();
- Goto(&do_bigint_op);
+ if (slot) {
+ // Ensure that the feedback is updated before we throw.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot, update_feedback_mode);
+ }
+ ThrowTypeError(context(), MessageTemplate::kBigIntMixedTypes);
}
BIND(&do_number_op);
@@ -837,29 +834,176 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
}
// BigInt cases.
- BIND(&if_left_bigint);
{
- TaggedToNumericWithFeedback(context(), right, &var_right_maybe_bigint,
- &var_right_feedback);
- var_left_maybe_bigint = var_left_bigint.value();
- Goto(&do_bigint_op);
- }
+ TVARIABLE(BigInt, var_right_bigint);
+ Label if_both_bigint(this), if_both_bigint64(this);
+ Label if_bigint_mix(this, Label::kDeferred);
- BIND(&do_bigint_op);
- {
- if (slot) {
- // Ensure that the feedback is updated even if the runtime call below
- // would throw.
- TNode<Smi> feedback =
- SmiOr(var_left_feedback.value(), var_right_feedback.value());
- UpdateFeedback(feedback, (*maybe_feedback_vector)(), *slot,
- update_feedback_mode);
+ BIND(&if_left_bigint);
+ TaggedToBigInt(context(), right, &if_bigint_mix, &if_both_bigint, nullptr,
+ &var_right_bigint, slot ? &var_right_feedback : nullptr);
+
+ if (IsBigInt64OpSupported(this, bitwise_op)) {
+ BIND(&if_left_bigint64);
+ TaggedToBigInt(context(), right, &if_bigint_mix, &if_both_bigint,
+ &if_both_bigint64, &var_right_bigint,
+ slot ? &var_right_feedback : nullptr);
+
+ BIND(&if_both_bigint64);
+ if (slot) {
+ // {feedback} is Any if {left} or {right} is non-number.
+ TNode<Smi> feedback =
+ SmiOr(var_left_feedback.value(), var_right_feedback.value());
+ UpdateFeedback(feedback, (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+
+ TVARIABLE(UintPtrT, left_raw);
+ TVARIABLE(UintPtrT, right_raw);
+ BigIntToRawBytes(var_left_bigint.value(), &left_raw, &left_raw);
+ BigIntToRawBytes(var_right_bigint.value(), &right_raw, &right_raw);
+
+ switch (bitwise_op) {
+ case Operation::kBitwiseAnd: {
+ result = BigIntFromInt64(UncheckedCast<IntPtrT>(
+ WordAnd(left_raw.value(), right_raw.value())));
+ Goto(&done);
+ break;
+ }
+ case Operation::kBitwiseOr: {
+ result = BigIntFromInt64(UncheckedCast<IntPtrT>(
+ WordOr(left_raw.value(), right_raw.value())));
+ Goto(&done);
+ break;
+ }
+ case Operation::kBitwiseXor: {
+ result = BigIntFromInt64(UncheckedCast<IntPtrT>(
+ WordXor(left_raw.value(), right_raw.value())));
+ Goto(&done);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
- result = CallRuntime(
- Runtime::kBigIntBinaryOp, context(), var_left_maybe_bigint.value(),
- var_right_maybe_bigint.value(), SmiConstant(bitwise_op));
- Goto(&done);
+ BIND(&if_both_bigint);
+ {
+ if (slot) {
+ // Ensure that the feedback is updated even if the runtime call below
+ // would throw.
+ TNode<Smi> feedback =
+ SmiOr(var_left_feedback.value(), var_right_feedback.value());
+ UpdateFeedback(feedback, (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+
+ switch (bitwise_op) {
+ case Operation::kBitwiseAnd: {
+ result =
+ CallBuiltin(Builtin::kBigIntBitwiseAndNoThrow, context(),
+ var_left_bigint.value(), var_right_bigint.value());
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIfNot(TaggedIsSmi(result.value()), &done);
+
+ if (slot) {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
+ break;
+ }
+ case Operation::kBitwiseOr: {
+ result =
+ CallBuiltin(Builtin::kBigIntBitwiseOrNoThrow, context(),
+ var_left_bigint.value(), var_right_bigint.value());
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIfNot(TaggedIsSmi(result.value()), &done);
+
+ if (slot) {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
+ break;
+ }
+ case Operation::kBitwiseXor: {
+ result =
+ CallBuiltin(Builtin::kBigIntBitwiseXorNoThrow, context(),
+ var_left_bigint.value(), var_right_bigint.value());
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIfNot(TaggedIsSmi(result.value()), &done);
+
+ if (slot) {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
+ break;
+ }
+ case Operation::kShiftLeft: {
+ result =
+ CallBuiltin(Builtin::kBigIntShiftLeftNoThrow, context(),
+ var_left_bigint.value(), var_right_bigint.value());
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIfNot(TaggedIsSmi(result.value()), &done);
+
+ if (slot) {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
+ break;
+ }
+ case Operation::kShiftRight: {
+ result =
+ CallBuiltin(Builtin::kBigIntShiftRightNoThrow, context(),
+ var_left_bigint.value(), var_right_bigint.value());
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIfNot(TaggedIsSmi(result.value()), &done);
+
+ if (slot) {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
+ break;
+ }
+ case Operation::kShiftRightLogical: {
+ if (slot) {
+ // Ensure that the feedback is updated before we throw.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot,
+ update_feedback_mode);
+ }
+ // BigInt does not support logical right shift.
+ ThrowTypeError(context(), MessageTemplate::kBigIntShr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ BIND(&if_bigint_mix);
+ {
+ if (slot) {
+ // Ensure that the feedback is updated before we throw.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot, update_feedback_mode);
+ }
+ ThrowTypeError(context(), MessageTemplate::kBigIntMixedTypes);
+ }
}
BIND(&done);
@@ -905,7 +1049,7 @@ BinaryOpAssembler::Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
TNode<HeapObject> left_pointer = CAST(left);
TaggedPointerToWord32OrBigIntWithFeedback(
context(), left_pointer, &do_number_op, &var_left_word32,
- &if_bigint_mix, &var_left_bigint, &var_left_feedback);
+ &if_bigint_mix, nullptr, &var_left_bigint, &var_left_feedback);
BIND(&do_number_op);
{
result =
@@ -923,9 +1067,8 @@ BinaryOpAssembler::Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
{
if (slot) {
// Ensure that the feedback is updated before we throw.
- feedback = var_left_feedback.value();
- UpdateFeedback(feedback.value(), (*maybe_feedback_vector)(), *slot,
- update_feedback_mode);
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ (*maybe_feedback_vector)(), *slot, update_feedback_mode);
}
ThrowTypeError(context(), MessageTemplate::kBigIntMixedTypes);
}
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 3e01b61226..679e6ea95c 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -154,7 +154,7 @@ Handle<Smi> StoreHandler::StoreInterceptor(Isolate* isolate) {
return handle(Smi::FromInt(config), isolate);
}
-Handle<CodeT> StoreHandler::StoreSloppyArgumentsBuiltin(
+Handle<Code> StoreHandler::StoreSloppyArgumentsBuiltin(
Isolate* isolate, KeyedAccessStoreMode mode) {
switch (mode) {
case STANDARD_STORE:
@@ -173,8 +173,8 @@ Handle<CodeT> StoreHandler::StoreSloppyArgumentsBuiltin(
}
}
-Handle<CodeT> StoreHandler::StoreFastElementBuiltin(Isolate* isolate,
- KeyedAccessStoreMode mode) {
+Handle<Code> StoreHandler::StoreFastElementBuiltin(Isolate* isolate,
+ KeyedAccessStoreMode mode) {
switch (mode) {
case STANDARD_STORE:
return BUILTIN_CODE(isolate, StoreFastElementIC_Standard);
@@ -190,7 +190,7 @@ Handle<CodeT> StoreHandler::StoreFastElementBuiltin(Isolate* isolate,
}
}
-Handle<CodeT> StoreHandler::ElementsTransitionAndStoreBuiltin(
+Handle<Code> StoreHandler::ElementsTransitionAndStoreBuiltin(
Isolate* isolate, KeyedAccessStoreMode mode) {
switch (mode) {
case STANDARD_STORE:
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 43511407e0..29dd9fabf7 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -344,6 +344,15 @@ Handle<Object> StoreHandler::StoreProxy(Isolate* isolate,
MaybeObjectHandle::Weak(proxy));
}
+bool LoadHandler::CanHandleHolderNotLookupStart(Object handler) {
+ if (handler.IsSmi()) {
+ auto kind = LoadHandler::KindBits::decode(handler.ToSmi().value());
+ return kind == LoadHandler::Kind::kSlow ||
+ kind == LoadHandler::Kind::kNonExistent;
+ }
+ return handler.IsLoadHandler();
+}
+
#if defined(OBJECT_PRINT)
namespace {
void PrintSmiLoadHandler(int raw_handler, std::ostream& os) {
@@ -499,7 +508,10 @@ void PrintSmiStoreHandler(int raw_handler, std::ostream& os) {
case StoreHandler::Kind::kProxy:
os << "kProxy";
break;
- default:
+ case StoreHandler::Kind::kSharedStructField:
+ os << "kSharedStructField";
+ break;
+ case StoreHandler::Kind::kKindsNumber:
UNREACHABLE();
}
}
@@ -514,9 +526,9 @@ void LoadHandler::PrintHandler(Object handler, std::ostream& os) {
os << "LoadHandler(Smi)(";
PrintSmiLoadHandler(raw_handler, os);
os << ")";
- } else if (handler.IsCodeT()) {
+ } else if (handler.IsCode()) {
os << "LoadHandler(Code)("
- << Builtins::name(CodeT::cast(handler).builtin_id()) << ")";
+ << Builtins::name(Code::cast(handler).builtin_id()) << ")";
} else if (handler.IsSymbol()) {
os << "LoadHandler(Symbol)(" << Brief(Symbol::cast(handler)) << ")";
} else if (handler.IsLoadHandler()) {
@@ -557,8 +569,8 @@ void StoreHandler::PrintHandler(Object handler, std::ostream& os) {
} else if (handler.IsStoreHandler()) {
os << "StoreHandler(";
StoreHandler store_handler = StoreHandler::cast(handler);
- if (store_handler.smi_handler().IsCodeT()) {
- CodeT code = CodeT::cast(store_handler.smi_handler());
+ if (store_handler.smi_handler().IsCode()) {
+ Code code = Code::cast(store_handler.smi_handler());
os << "builtin = ";
code.ShortPrint(os);
} else {
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index c262620c3e..a7bc14be0f 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -228,6 +228,10 @@ class LoadHandler final : public DataHandler {
// Decodes the KeyedAccessLoadMode from a {handler}.
static KeyedAccessLoadMode GetKeyedAccessLoadMode(MaybeObject handler);
+ // Returns true iff the handler can be used in the "holder != lookup start
+ // object" case.
+ static bool CanHandleHolderNotLookupStart(Object handler);
+
#if defined(OBJECT_PRINT)
static void PrintHandler(Object handler, std::ostream& os);
#endif // defined(OBJECT_PRINT)
@@ -354,11 +358,11 @@ class StoreHandler final : public DataHandler {
// Creates a Smi-handler for storing a property to an interceptor.
static inline Handle<Smi> StoreInterceptor(Isolate* isolate);
- static inline Handle<CodeT> StoreSloppyArgumentsBuiltin(
- Isolate* isolate, KeyedAccessStoreMode mode);
- static inline Handle<CodeT> StoreFastElementBuiltin(
+ static inline Handle<Code> StoreSloppyArgumentsBuiltin(
Isolate* isolate, KeyedAccessStoreMode mode);
- static inline Handle<CodeT> ElementsTransitionAndStoreBuiltin(
+ static inline Handle<Code> StoreFastElementBuiltin(Isolate* isolate,
+ KeyedAccessStoreMode mode);
+ static inline Handle<Code> ElementsTransitionAndStoreBuiltin(
Isolate* isolate, KeyedAccessStoreMode mode);
// Creates a Smi-handler for storing a property.
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index cc998c72b2..eb62f89726 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -32,7 +32,7 @@ bool IC::IsHandler(MaybeObject object) {
(heap_object.IsMap() || heap_object.IsPropertyCell() ||
heap_object.IsAccessorPair())) ||
(object->GetHeapObjectIfStrong(&heap_object) &&
- (heap_object.IsDataHandler() || heap_object.IsCodeT()));
+ (heap_object.IsDataHandler() || heap_object.IsCode()));
}
bool IC::vector_needs_update() {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index ae1dde1a8c..9b1a82eab3 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -138,7 +138,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
return;
}
- JavaScriptFrameIterator it(isolate());
+ JavaScriptStackFrameIterator it(isolate());
JavaScriptFrame* frame = it.frame();
DisallowGarbageCollection no_gc;
@@ -309,16 +309,18 @@ void IC::OnFeedbackChanged(const char* reason) {
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
FeedbackSlot slot, const char* reason) {
- if (v8_flags.trace_opt_verbose) {
- if (vector.profiler_ticks() != 0) {
- StdoutStream os;
- os << "[resetting ticks for ";
- vector.shared_function_info().ShortPrint(os);
- os << " from " << vector.profiler_ticks()
- << " due to IC change: " << reason << "]" << std::endl;
+ if (v8_flags.reset_ticks_on_ic_update) {
+ if (v8_flags.trace_opt_verbose) {
+ if (vector.profiler_ticks() != 0) {
+ StdoutStream os;
+ os << "[resetting ticks for ";
+ vector.shared_function_info().ShortPrint(os);
+ os << " from " << vector.profiler_ticks()
+ << " due to IC change: " << reason << "]" << std::endl;
+ }
}
+ vector.set_profiler_ticks(0);
}
- vector.set_profiler_ticks(0);
#ifdef V8_TRACE_FEEDBACK_UPDATES
if (v8_flags.trace_feedback_updates) {
@@ -341,7 +343,7 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
}
#endif
- isolate->tiering_manager()->NotifyICChanged();
+ isolate->tiering_manager()->NotifyICChanged(vector);
}
namespace {
@@ -597,7 +599,7 @@ static bool AddOneReceiverMapIfMissing(
}
bool IC::UpdateMegaDOMIC(const MaybeObjectHandle& handler, Handle<Name> name) {
- if (!v8_flags.enable_mega_dom_ic) return false;
+ if (!v8_flags.mega_dom_ic) return false;
// TODO(gsathya): Enable fuzzing once this feature is more stable.
if (v8_flags.fuzzing) return false;
@@ -837,6 +839,10 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
}
}
handler = ComputeHandler(lookup);
+ auto holder = lookup->GetHolder<Object>();
+ CHECK(*holder == *(lookup->lookup_start_object()) ||
+ LoadHandler::CanHandleHolderNotLookupStart(*handler.object()) ||
+ holder->IsJSPrimitiveWrapper());
}
// Can't use {lookup->name()} because the LookupIterator might be in
// "elements" mode for keys that are strings representing integers above
@@ -1153,7 +1159,7 @@ MaybeObjectHandle LoadIC::ComputeHandler(LookupIterator* lookup) {
UNREACHABLE();
}
- return MaybeObjectHandle(Handle<Code>::null());
+ return MaybeObjectHandle(Handle<InstructionStream>::null());
}
bool KeyedLoadIC::CanChangeToAllowOutOfBounds(Handle<Map> receiver_map) {
@@ -1750,7 +1756,6 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
LookupIterator it(
isolate(), object, key,
IsDefineNamedOwnIC() ? LookupIterator::OWN : LookupIterator::DEFAULT);
- DCHECK_IMPLIES(IsDefineNamedOwnIC(), it.IsFound() && it.HolderIsReceiver());
// TODO(v8:12548): refactor DefinedNamedOwnIC and SetNamedIC as subclasses
// of StoreIC so their logic doesn't get mixed here.
if (IsDefineNamedOwnIC()) {
@@ -1818,6 +1823,11 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
if (!can_define.FromJust()) {
return isolate()->factory()->undefined_value();
}
+ // Restart the lookup iterator updated by CheckIfCanDefine() for
+ // UpdateCaches() to handle access checks.
+ if (use_ic && object->IsAccessCheckNeeded()) {
+ it.Restart();
+ }
}
if (use_ic) {
@@ -2110,8 +2120,10 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<JSProxy> holder = lookup->GetHolder<JSProxy>();
// IsDefineNamedOwnIC() is true when we are defining public fields on a
- // Proxy. In that case use the slow stub to invoke the define trap.
- if (IsDefineNamedOwnIC()) {
+ // Proxy. IsDefineKeyedOwnIC() is true when we are defining computed
+ // fields in a Proxy. In these cases use the slow stub to invoke the
+ // define trap.
+ if (IsDefineNamedOwnIC() || IsDefineKeyedOwnIC()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
}
@@ -2278,6 +2290,13 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
IsStoreInArrayLiteralIC());
if (receiver_map->IsJSProxyMap()) {
+ // DefineKeyedOwnIC, which is used to define computed fields in instances,
+ // should be handled by the slow stub.
+ if (IsDefineKeyedOwnIC()) {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
+ return StoreHandler::StoreSlow(isolate(), store_mode);
+ }
+
return StoreHandler::StoreProxy(isolate());
}
@@ -2699,7 +2718,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
}
@@ -2758,7 +2777,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
}
@@ -2786,7 +2805,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
// when feedback vector is available.
FeedbackSlotKind kind = FeedbackSlotKind::kSetNamedStrict;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
kind = vector->GetKind(vector_slot);
@@ -2814,7 +2833,7 @@ RUNTIME_FUNCTION(Runtime_DefineNamedOwnIC_Miss) {
// feedback kind. There _should_ be a vector, though.
FeedbackSlotKind kind = FeedbackSlotKind::kDefineNamedOwn;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
kind = vector->GetKind(vector_slot);
@@ -2953,7 +2972,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
// and StoreInArrayLiteral kinds.
FeedbackSlotKind kind = FeedbackSlotKind::kSetKeyedStrict;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
kind = vector->GetKind(vector_slot);
@@ -2991,7 +3010,7 @@ RUNTIME_FUNCTION(Runtime_DefineKeyedOwnIC_Miss) {
FeedbackSlotKind kind = FeedbackSlotKind::kDefineKeyedOwn;
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
kind = vector->GetKind(vector_slot);
@@ -3015,7 +3034,7 @@ RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
}
@@ -3394,7 +3413,7 @@ RUNTIME_FUNCTION(Runtime_KeyedHasIC_Miss) {
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
- if (!maybe_vector->IsUndefined()) {
+ if (!maybe_vector->IsUndefined(isolate)) {
DCHECK(maybe_vector->IsFeedbackVector());
vector = Handle<FeedbackVector>::cast(maybe_vector);
}
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 69e4c3e4e5..dbc8620113 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -969,8 +969,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<Object> prev_value =
LoadValueByKeyIndex(properties, var_name_index.value());
- BranchIfSameValue(prev_value, p->value(), &done, slow,
- SameValueMode::kNumbersOnly);
+ Branch(TaggedEqual(prev_value, p->value()), &done, slow);
} else {
Goto(&overwrite);
}
@@ -1018,6 +1017,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
}
Label add_dictionary_property_slow(this);
InvalidateValidityCellIfPrototype(receiver_map, bitfield3);
+ UpdateMayHaveInterestingSymbol(properties, name);
Add<PropertyDictionary>(properties, name, p->value(),
&add_dictionary_property_slow);
exit_point->Return(p->value());
@@ -1120,8 +1120,9 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&if_unique_name);
{
Comment("key is unique name");
- StoreICParameters p(context, receiver, var_unique.value(), value, {},
- UndefinedConstant(), StoreICMode::kDefault);
+ StoreICParameters p(context, receiver, var_unique.value(), value,
+ base::nullopt, {}, UndefinedConstant(),
+ StoreICMode::kDefault);
ExitPoint direct_exit(this);
EmitGenericPropertyStore(CAST(receiver), receiver_map, instance_type, &p,
&direct_exit, &slow, language_mode);
@@ -1150,8 +1151,12 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
value);
} else {
DCHECK(IsDefineKeyedOwnInLiteral());
- TailCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral_Simple, context,
- receiver, key, value);
+ TNode<Smi> flags =
+ SmiConstant(DefineKeyedOwnPropertyInLiteralFlag::kNoFlags);
+ // TODO(v8:10047): Use TaggedIndexConstant here once TurboFan supports it.
+ TNode<Smi> slot = SmiConstant(FeedbackSlot::Invalid().ToInt());
+ TailCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral, context,
+ receiver, key, value, flags, UndefinedConstant(), slot);
}
}
}
@@ -1196,7 +1201,7 @@ void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
// checks, strings and string wrappers, proxies) are handled in the runtime.
GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
{
- StoreICParameters p(context, receiver, name, value, slot,
+ StoreICParameters p(context, receiver, name, value, base::nullopt, slot,
UndefinedConstant(),
IsDefineNamedOwn() ? StoreICMode::kDefineNamedOwn
: StoreICMode::kDefault);
@@ -1220,7 +1225,7 @@ void KeyedStoreGenericAssembler::StoreProperty(TNode<Context> context,
TNode<Name> unique_name,
TNode<Object> value,
LanguageMode language_mode) {
- StoreICParameters p(context, receiver, unique_name, value, {},
+ StoreICParameters p(context, receiver, unique_name, value, base::nullopt, {},
UndefinedConstant(), StoreICMode::kDefault);
Label done(this), slow(this, Label::kDeferred);
@@ -1238,8 +1243,12 @@ void KeyedStoreGenericAssembler::StoreProperty(TNode<Context> context,
BIND(&slow);
{
if (IsDefineKeyedOwnInLiteral()) {
- CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral_Simple, context,
- receiver, unique_name, value);
+ TNode<Smi> flags =
+ SmiConstant(DefineKeyedOwnPropertyInLiteralFlag::kNoFlags);
+ // TODO(v8:10047): Use TaggedIndexConstant here once TurboFan supports it.
+ TNode<Smi> slot = SmiConstant(FeedbackSlot::Invalid().ToInt());
+ CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral, context, receiver,
+ unique_name, value, flags, p.vector(), slot);
} else {
CallRuntime(Runtime::kSetKeyedProperty, context, receiver, unique_name,
value);
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 4dd60fdfa9..c3457563a6 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -14,29 +14,10 @@
namespace v8 {
namespace internal {
-// static
-void StubCache::ClearCallback(v8::Isolate* isolate, v8::GCType type,
- v8::GCCallbackFlags flags, void* data) {
- StubCache* cache = static_cast<StubCache*>(data);
- cache->Clear();
-}
-
StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
// Ensure the nullptr (aka Smi::zero()) which StubCache::Get() returns
// when the entry is not found is not considered as a handler.
DCHECK(!IC::IsHandler(MaybeObject()));
-
- // The stub caches are not traversed during GC; clear them to force
- // their lazy re-initialization. This must be done after the
- // GC, because it relies on the new address of certain old space
- // objects (empty string, illegal builtin).
-
- isolate_->heap()->AddGCEpilogueCallback(ClearCallback,
- kGCTypeMarkSweepCompact, this);
-}
-
-StubCache::~StubCache() {
- isolate_->heap()->RemoveGCEpilogueCallback(ClearCallback, this);
}
void StubCache::Initialize() {
@@ -56,7 +37,7 @@ int StubCache::PrimaryOffset(Name name, Map map) {
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
uint32_t map_low32bits =
- static_cast<uint32_t>(map.ptr() ^ (map.ptr() >> kMapKeyShift));
+ static_cast<uint32_t>(map.ptr() ^ (map.ptr() >> kPrimaryTableBits));
// Base the offset on a simple combination of name and map.
uint32_t key = map_low32bits + field;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
@@ -70,7 +51,7 @@ int StubCache::SecondaryOffset(Name name, Map old_map) {
uint32_t name_low32bits = static_cast<uint32_t>(name.ptr());
uint32_t map_low32bits = static_cast<uint32_t>(old_map.ptr());
uint32_t key = (map_low32bits + name_low32bits);
- key = key + (key >> kSecondaryKeyShift);
+ key = key + (key >> kSecondaryTableBits);
return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 0372919f18..2bee68c893 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -90,20 +90,11 @@ class V8_EXPORT_PRIVATE StubCache {
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
- // Used to introduce more entropy from the higher bits of the Map address.
- // This should fill in the masked out kCacheIndexShift-bits.
- static const int kMapKeyShift = kPrimaryTableBits + kCacheIndexShift;
- static const int kSecondaryKeyShift = kSecondaryTableBits + kCacheIndexShift;
-
static int PrimaryOffsetForTesting(Name name, Map map);
static int SecondaryOffsetForTesting(Name name, Map map);
- static void ClearCallback(v8::Isolate* isolate, v8::GCType type,
- v8::GCCallbackFlags flags, void* data);
-
// The constructor is made public only for the purposes of testing.
explicit StubCache(Isolate* isolate);
- ~StubCache();
StubCache(const StubCache&) = delete;
StubCache& operator=(const StubCache&) = delete;
diff --git a/deps/v8/src/ic/unary-op-assembler.cc b/deps/v8/src/ic/unary-op-assembler.cc
index ee76ef46bf..fe3dc599ca 100644
--- a/deps/v8/src/ic/unary-op-assembler.cc
+++ b/deps/v8/src/ic/unary-op-assembler.cc
@@ -29,7 +29,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
TVARIABLE(Object, var_result);
Label if_number(this), if_bigint(this, Label::kDeferred), out(this);
TaggedToWord32OrBigIntWithFeedback(context, value, &if_number, &var_word32,
- &if_bigint, &var_bigint, &var_feedback);
+ &if_bigint, nullptr, &var_bigint,
+ &var_feedback);
// Number case.
BIND(&if_number);
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index cdb2e88536..69d01ff339 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -43,6 +43,7 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-atomics-synchronization.h"
+#include "src/objects/js-iterator-helpers.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
@@ -246,8 +247,6 @@ class Genesis {
#undef DECLARE_FEATURE_INITIALIZATION
void InitializeGlobal_regexp_linear_flag();
- void InitializeGlobal_experimental_web_snapshots();
-
enum ArrayBufferKind { ARRAY_BUFFER, SHARED_ARRAY_BUFFER };
Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
ArrayBufferKind array_buffer_kind);
@@ -523,28 +522,26 @@ V8_NOINLINE Handle<JSFunction> InstallFunction(
V8_NOINLINE Handle<JSFunction> CreateSharedObjectConstructor(
Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
- ElementsKind element_kind, Builtin builtin) {
+ int inobject_properties, ElementsKind element_kind, Builtin builtin) {
Factory* factory = isolate->factory();
Handle<SharedFunctionInfo> info = factory->NewSharedFunctionInfoForBuiltin(
name, builtin, FunctionKind::kNormalFunction);
info->set_language_mode(LanguageMode::kStrict);
Handle<JSFunction> constructor =
Factory::JSFunctionBuilder{isolate, info, isolate->native_context()}
- .set_map(isolate->strict_function_map())
+ .set_map(isolate->strict_function_with_readonly_prototype_map())
.Build();
- constexpr int in_object_properties = 0;
Handle<Map> instance_map =
- factory->NewMap(type, instance_size, element_kind, in_object_properties,
+ factory->NewMap(type, instance_size, element_kind, inobject_properties,
AllocationType::kSharedMap);
// Shared objects have fixed layout ahead of time, so there's no slack.
instance_map->SetInObjectUnusedPropertyFields(0);
// Shared objects are not extensible and have a null prototype.
instance_map->set_is_extensible(false);
JSFunction::SetInitialMap(isolate, constructor, instance_map,
- factory->null_value());
- // The constructor itself is not a shared object, so the shared map should not
- // point to it.
- instance_map->set_constructor_or_back_pointer(*factory->null_value());
+ factory->null_value(), factory->null_value());
+ constructor->map().SetConstructor(ReadOnlyRoots(isolate).null_value());
+ constructor->map().set_has_non_instance_prototype(true);
return constructor;
}
@@ -794,7 +791,8 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
.Assert();
- if (JSObject::PreventExtensions(function, kThrowOnError).IsNothing()) {
+ if (JSObject::PreventExtensions(isolate_, function, kThrowOnError)
+ .IsNothing()) {
DCHECK(false);
}
@@ -1803,6 +1801,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
1, false);
SimpleInstallFunction(isolate_, proto, "findIndex",
Builtin::kArrayPrototypeFindIndex, 1, false);
+ SimpleInstallFunction(isolate_, proto, "findLast",
+ Builtin::kArrayPrototypeFindLast, 1, false);
+ SimpleInstallFunction(isolate_, proto, "findLastIndex",
+ Builtin::kArrayPrototypeFindLastIndex, 1, false);
SimpleInstallFunction(isolate_, proto, "lastIndexOf",
Builtin::kArrayPrototypeLastIndexOf, 1, false);
SimpleInstallFunction(isolate_, proto, "pop", Builtin::kArrayPrototypePop,
@@ -1875,6 +1877,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallTrueValuedProperty(isolate_, unscopables, "fill");
InstallTrueValuedProperty(isolate_, unscopables, "find");
InstallTrueValuedProperty(isolate_, unscopables, "findIndex");
+ InstallTrueValuedProperty(isolate_, unscopables, "findLast");
+ InstallTrueValuedProperty(isolate_, unscopables, "findLastIndex");
InstallTrueValuedProperty(isolate_, unscopables, "flat");
InstallTrueValuedProperty(isolate_, unscopables, "flatMap");
InstallTrueValuedProperty(isolate_, unscopables, "includes");
@@ -3395,6 +3399,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtin::kTypedArrayPrototypeFind, 1, false);
SimpleInstallFunction(isolate_, prototype, "findIndex",
Builtin::kTypedArrayPrototypeFindIndex, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "findLast",
+ Builtin::kTypedArrayPrototypeFindLast, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "findLastIndex",
+ Builtin::kTypedArrayPrototypeFindLastIndex, 1, false);
SimpleInstallFunction(isolate_, prototype, "forEach",
Builtin::kTypedArrayPrototypeForEach, 1, false);
SimpleInstallFunction(isolate_, prototype, "includes",
@@ -3459,6 +3467,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallToStringTag(isolate_, prototype, "DataView");
+ // Setup objects needed for the JSRabGsabDataView.
+ Handle<Map> rab_gsab_data_view_map = factory->NewMap(
+ JS_RAB_GSAB_DATA_VIEW_TYPE, JSDataView::kSizeWithEmbedderFields,
+ TERMINAL_FAST_ELEMENTS_KIND);
+ Map::SetPrototype(isolate(), rab_gsab_data_view_map, prototype);
+ rab_gsab_data_view_map->SetConstructor(*data_view_fun);
+ native_context()->set_js_rab_gsab_data_view_map(*rab_gsab_data_view_map);
+
// Install the "buffer", "byteOffset" and "byteLength" getters
// on the {prototype}.
SimpleInstallGetter(isolate_, prototype, factory->buffer_string(),
@@ -4141,7 +4157,6 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
InitializeGlobal_regexp_linear_flag();
- InitializeGlobal_experimental_web_snapshots();
}
bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
@@ -4501,15 +4516,15 @@ void Genesis::InitializeConsole(Handle<JSObject> extras_binding) {
Builtin::kConsoleTimeStamp, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "context", Builtin::kConsoleContext,
1, true, NONE);
- InstallToStringTag(isolate_, console, "Object");
+ InstallToStringTag(isolate_, console, "console");
}
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_static_blocks)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_symbol_as_weakmap_key)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rab_gsab_transfer)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
@@ -4517,15 +4532,86 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
+void Genesis::InitializeGlobal_harmony_iterator_helpers() {
+ if (!v8_flags.harmony_iterator_helpers) return;
+
+ // --- Iterator
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype(), isolate());
+ Handle<JSFunction> iterator_function = InstallFunction(
+ isolate(), global, "Iterator", JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
+ iterator_prototype, Builtin::kIteratorConstructor);
+ iterator_function->shared().DontAdaptArguments();
+ iterator_function->shared().set_length(0);
+ SimpleInstallFunction(isolate(), iterator_function, "from",
+ Builtin::kIteratorFrom, 1, true);
+ InstallWithIntrinsicDefaultProto(isolate(), iterator_function,
+ Context::ITERATOR_FUNCTION_INDEX);
+
+ // --- %WrapForValidIteratorPrototype%
+ Handle<JSObject> wrap_for_valid_iterator_prototype = factory()->NewJSObject(
+ isolate()->object_function(), AllocationType::kOld);
+ JSObject::ForceSetPrototype(isolate(), wrap_for_valid_iterator_prototype,
+ iterator_prototype);
+ SimpleInstallFunction(isolate(), wrap_for_valid_iterator_prototype, "next",
+ Builtin::kWrapForValidIteratorPrototypeNext, 0, true);
+ SimpleInstallFunction(isolate(), wrap_for_valid_iterator_prototype, "return",
+ Builtin::kWrapForValidIteratorPrototypeReturn, 0, true);
+ Handle<Map> valid_iterator_wrapper_map = factory()->NewMap(
+ JS_VALID_ITERATOR_WRAPPER_TYPE, JSValidIteratorWrapper::kHeaderSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 0);
+ Map::SetPrototype(isolate(), valid_iterator_wrapper_map,
+ wrap_for_valid_iterator_prototype);
+ native_context()->set_valid_iterator_wrapper_map(*valid_iterator_wrapper_map);
+
+ // --- %IteratorHelperPrototype%
+ Handle<JSObject> iterator_helper_prototype = factory()->NewJSObject(
+ isolate()->object_function(), AllocationType::kOld);
+ JSObject::ForceSetPrototype(isolate(), iterator_helper_prototype,
+ iterator_prototype);
+ InstallToStringTag(isolate(), iterator_helper_prototype, "Iterator Helper");
+ SimpleInstallFunction(isolate(), iterator_helper_prototype, "next",
+ Builtin::kIteratorHelperPrototypeNext, 0, true);
+ SimpleInstallFunction(isolate(), iterator_helper_prototype, "return",
+ Builtin::kIteratorHelperPrototypeReturn, 0, true);
+
+ // --- Helper maps
+#define INSTALL_ITERATOR_HELPER(lowercase_name, Capitalized_name, \
+ ALL_CAPS_NAME, argc) \
+ { \
+ Handle<Map> map = \
+ factory()->NewMap(JS_ITERATOR_##ALL_CAPS_NAME##_HELPER_TYPE, \
+ JSIterator##Capitalized_name##Helper::kHeaderSize, \
+ TERMINAL_FAST_ELEMENTS_KIND, 0); \
+ Map::SetPrototype(isolate(), map, iterator_helper_prototype); \
+ native_context()->set_iterator_##lowercase_name##_helper_map(*map); \
+ SimpleInstallFunction(isolate(), iterator_prototype, #lowercase_name, \
+ Builtin::kIteratorPrototype##Capitalized_name, argc, \
+ true); \
+ }
+
+#define ITERATOR_HELPERS(V) \
+ V(map, Map, MAP, 1) \
+ V(filter, Filter, FILTER, 1) \
+ V(take, Take, TAKE, 1) \
+ V(drop, Drop, DROP, 1)
+
+ ITERATOR_HELPERS(INSTALL_ITERATOR_HELPER)
+
+#undef INSTALL_ITERATOR_HELPER
+#undef ITERATOR_HELPERS
+}
+
void Genesis::InitializeGlobal_harmony_json_parse_with_source() {
if (!v8_flags.harmony_json_parse_with_source) return;
- Handle<Map> map = factory()->NewMap(JS_RAW_JSON_TYPE, JSRawJson::kSize,
+ Handle<Map> map = factory()->NewMap(JS_RAW_JSON_TYPE, JSRawJson::kInitialSize,
TERMINAL_FAST_ELEMENTS_KIND, 1);
Map::EnsureDescriptorSlack(isolate_, map, 1);
{
Descriptor d = Descriptor::DataField(
- isolate(), factory()->raw_json_string(), JSRawJson::kRawJsonIndex, NONE,
- Representation::Tagged());
+ isolate(), factory()->raw_json_string(),
+ JSRawJson::kRawJsonInitialIndex, NONE, Representation::Tagged());
map->AppendDescriptor(isolate(), &d);
}
map->SetPrototype(isolate(), map, isolate()->factory()->null_value());
@@ -4686,8 +4772,8 @@ void Genesis::InitializeGlobal_harmony_struct() {
Handle<String> shared_array_str =
isolate()->factory()->InternalizeUtf8String("SharedArray");
Handle<JSFunction> shared_array_fun = CreateSharedObjectConstructor(
- isolate(), shared_array_str, JS_SHARED_ARRAY_TYPE,
- JSSharedArray::kHeaderSize, SHARED_ARRAY_ELEMENTS,
+ isolate(), shared_array_str, JS_SHARED_ARRAY_TYPE, JSSharedArray::kSize,
+ JSSharedArray::kInObjectFieldCount, SHARED_ARRAY_ELEMENTS,
Builtin::kSharedArrayConstructor);
shared_array_fun->shared().set_internal_formal_parameter_count(
JSParameterCount(0));
@@ -4697,14 +4783,12 @@ void Genesis::InitializeGlobal_harmony_struct() {
Handle<DescriptorArray> descriptors =
isolate()->factory()->NewDescriptorArray(1, 0,
AllocationType::kSharedOld);
- Descriptor descriptor = Descriptor::AccessorConstant(
- isolate()->shared_heap_isolate()->factory()->length_string(),
- isolate()
- ->shared_heap_isolate()
- ->factory()
- ->shared_array_length_accessor(),
- ALL_ATTRIBUTES_MASK);
- descriptors->Set(InternalIndex(0), &descriptor);
+ Descriptor length_descriptor = Descriptor::DataField(
+ isolate()->shared_space_isolate()->factory()->length_string(),
+ JSSharedArray::kLengthFieldIndex, ALL_ATTRIBUTES_MASK,
+ PropertyConstness::kConst, Representation::Smi(),
+ MaybeObjectHandle(FieldType::Any(isolate())));
+ descriptors->Set(InternalIndex(0), &length_descriptor);
shared_array_fun->initial_map().InitializeDescriptors(isolate(),
*descriptors);
@@ -4721,7 +4805,7 @@ void Genesis::InitializeGlobal_harmony_struct() {
isolate()->factory()->InternalizeUtf8String("Mutex");
Handle<JSFunction> mutex_fun = CreateSharedObjectConstructor(
isolate(), mutex_str, JS_ATOMICS_MUTEX_TYPE,
- JSAtomicsMutex::kHeaderSize, TERMINAL_FAST_ELEMENTS_KIND,
+ JSAtomicsMutex::kHeaderSize, 0, TERMINAL_FAST_ELEMENTS_KIND,
Builtin::kAtomicsMutexConstructor);
mutex_fun->shared().set_internal_formal_parameter_count(
JSParameterCount(0));
@@ -4741,7 +4825,7 @@ void Genesis::InitializeGlobal_harmony_struct() {
isolate()->factory()->InternalizeUtf8String("Condition");
Handle<JSFunction> condition_fun = CreateSharedObjectConstructor(
isolate(), condition_str, JS_ATOMICS_CONDITION_TYPE,
- JSAtomicsCondition::kHeaderSize, TERMINAL_FAST_ELEMENTS_KIND,
+ JSAtomicsCondition::kHeaderSize, 0, TERMINAL_FAST_ELEMENTS_KIND,
Builtin::kAtomicsConditionConstructor);
condition_fun->shared().set_internal_formal_parameter_count(
JSParameterCount(0));
@@ -4758,39 +4842,6 @@ void Genesis::InitializeGlobal_harmony_struct() {
}
}
-void Genesis::InitializeGlobal_harmony_array_find_last() {
- if (!v8_flags.harmony_array_find_last) return;
-
- {
- Handle<JSFunction> array_function(native_context()->array_function(),
- isolate());
- Handle<JSObject> array_prototype(
- JSObject::cast(array_function->instance_prototype()), isolate());
-
- SimpleInstallFunction(isolate_, array_prototype, "findLast",
- Builtin::kArrayPrototypeFindLast, 1, false);
- SimpleInstallFunction(isolate_, array_prototype, "findLastIndex",
- Builtin::kArrayPrototypeFindLastIndex, 1, false);
-
- Handle<JSObject> unscopables = Handle<JSObject>::cast(
- JSObject::GetProperty(isolate(), array_prototype,
- isolate()->factory()->unscopables_symbol())
- .ToHandleChecked());
-
- InstallTrueValuedProperty(isolate_, unscopables, "findLast");
- InstallTrueValuedProperty(isolate_, unscopables, "findLastIndex");
- }
-
- {
- Handle<JSObject> prototype(native_context()->typed_array_prototype(),
- isolate());
- SimpleInstallFunction(isolate_, prototype, "findLast",
- Builtin::kTypedArrayPrototypeFindLast, 1, false);
- SimpleInstallFunction(isolate_, prototype, "findLastIndex",
- Builtin::kTypedArrayPrototypeFindLastIndex, 1, false);
- }
-}
-
void Genesis::InitializeGlobal_harmony_array_grouping() {
if (!v8_flags.harmony_array_grouping) return;
@@ -4850,6 +4901,10 @@ void Genesis::InitializeGlobal_harmony_weak_refs_with_cleanup_some() {
DONT_ENUM);
}
+void Genesis::InitializeGlobal_harmony_array_from_async() {
+ if (!v8_flags.harmony_array_from_async) return;
+}
+
void Genesis::InitializeGlobal_regexp_linear_flag() {
if (!v8_flags.enable_experimental_regexp_engine) return;
@@ -4877,8 +4932,16 @@ void Genesis::InitializeGlobal_harmony_rab_gsab() {
Builtin::kArrayBufferPrototypeGetResizable, false);
SimpleInstallFunction(isolate(), array_buffer_prototype, "resize",
Builtin::kArrayBufferPrototypeResize, 1, true);
- SimpleInstallFunction(isolate(), array_buffer_prototype, "transfer",
- Builtin::kArrayBufferPrototypeTransfer, 0, false);
+ if (v8_flags.harmony_rab_gsab_transfer) {
+ SimpleInstallFunction(isolate(), array_buffer_prototype, "transfer",
+ Builtin::kArrayBufferPrototypeTransfer, 0, false);
+ SimpleInstallFunction(
+ isolate(), array_buffer_prototype, "transferToFixedLength",
+ Builtin::kArrayBufferPrototypeTransferToFixedLength, 0, false);
+ SimpleInstallGetter(isolate(), array_buffer_prototype,
+ factory()->detached_string(),
+ Builtin::kArrayBufferPrototypeGetDetached, false);
+ }
Handle<JSObject> shared_array_buffer_prototype(
JSObject::cast(
@@ -4895,6 +4958,18 @@ void Genesis::InitializeGlobal_harmony_rab_gsab() {
Builtin::kSharedArrayBufferPrototypeGrow, 1, true);
}
+void Genesis::InitializeGlobal_harmony_string_is_well_formed() {
+ if (!v8_flags.harmony_string_is_well_formed) return;
+ Handle<JSFunction> string_function(native_context()->string_function(),
+ isolate());
+ Handle<JSObject> string_prototype(
+ JSObject::cast(string_function->initial_map().prototype()), isolate());
+ SimpleInstallFunction(isolate(), string_prototype, "isWellFormed",
+ Builtin::kStringPrototypeIsWellFormed, 0, false);
+ SimpleInstallFunction(isolate(), string_prototype, "toWellFormed",
+ Builtin::kStringPrototypeToWellFormed, 0, false);
+}
+
void Genesis::InitializeGlobal_harmony_temporal() {
if (!v8_flags.harmony_temporal) return;
// -- T e m p o r a l
@@ -5568,21 +5643,6 @@ void Genesis::InitializeGlobal_harmony_intl_number_format_v3() {
#endif // V8_INTL_SUPPORT
-void Genesis::InitializeGlobal_experimental_web_snapshots() {
- if (!v8_flags.experimental_web_snapshots) return;
-
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Handle<JSObject> web_snapshot_object =
- factory()->NewJSObject(isolate_->object_function(), AllocationType::kOld);
- JSObject::AddProperty(isolate_, global, "WebSnapshot", web_snapshot_object,
- DONT_ENUM);
- InstallToStringTag(isolate_, web_snapshot_object, "WebSnapshot");
- SimpleInstallFunction(isolate_, web_snapshot_object, "serialize",
- Builtin::kWebSnapshotSerialize, 2, false);
- SimpleInstallFunction(isolate_, web_snapshot_object, "deserialize",
- Builtin::kWebSnapshotDeserialize, 2, false);
-}
-
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_intl_duration_format() {
if (!v8_flags.harmony_intl_duration_format) return;
@@ -5908,7 +5968,8 @@ bool Genesis::InstallABunchOfRandomThings() {
.ToChecked();
// Freeze the {template_object} as well.
- JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
+ JSObject::SetIntegrityLevel(isolate(), template_object, FROZEN,
+ kThrowOnError)
.ToChecked();
{
DisallowGarbageCollection no_gc;
@@ -6388,7 +6449,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
// If the property is already there we skip it.
if (PropertyAlreadyExists(isolate(), to, key)) continue;
- FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
+ FieldIndex index = FieldIndex::ForDetails(from->map(), details);
Handle<Object> value = JSObject::FastPropertyAt(
isolate(), from, details.representation(), index);
JSObject::AddProperty(isolate(), to, key, value,
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 6568096236..5388b88d4e 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -152,7 +152,22 @@
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _)
#endif // V8_INTL_SUPPORT
-#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+// Internalized strings to be allocated early on the read only heap and early in
+// the roots table. Used to give this string a RootIndex < 32.
+#define EXTRA_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ V(_, empty_string, "")
+
+// Internalized strings to be allocated early on the read only heap
+#define IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ V(_, length_string, "length") \
+ V(_, prototype_string, "prototype") \
+ V(_, name_string, "name") \
+ V(_, enumerable_string, "enumerable") \
+ V(_, configurable_string, "configurable") \
+ V(_, value_string, "value") \
+ V(_, writable_string, "writable")
+
+#define NOT_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
V(_, add_string, "add") \
V(_, AggregateError_string, "AggregateError") \
@@ -197,7 +212,6 @@
V(_, code_string, "code") \
V(_, column_string, "column") \
V(_, computed_string, "<computed>") \
- V(_, configurable_string, "configurable") \
V(_, conjunction_string, "conjunction") \
V(_, console_string, "console") \
V(_, constrain_string, "constrain") \
@@ -218,6 +232,7 @@
V(_, default_string, "default") \
V(_, defineProperty_string, "defineProperty") \
V(_, deleteProperty_string, "deleteProperty") \
+ V(_, detached_string, "detached") \
V(_, disjunction_string, "disjunction") \
V(_, done_string, "done") \
V(_, dot_brand_string, ".brand") \
@@ -235,7 +250,6 @@
V(_, dotAll_string, "dotAll") \
V(_, Error_string, "Error") \
V(_, EvalError_string, "EvalError") \
- V(_, enumerable_string, "enumerable") \
V(_, element_string, "element") \
V(_, epochMicroseconds_string, "epochMicroseconds") \
V(_, epochMilliseconds_string, "epochMilliseconds") \
@@ -305,7 +319,6 @@
V(_, keys_string, "keys") \
V(_, largestUnit_string, "largestUnit") \
V(_, lastIndex_string, "lastIndex") \
- V(_, length_string, "length") \
V(_, let_string, "let") \
V(_, line_string, "line") \
V(_, linear_string, "linear") \
@@ -332,7 +345,6 @@
V(_, monthsInYear_string, "monthsInYear") \
V(_, monthCode_string, "monthCode") \
V(_, multiline_string, "multiline") \
- V(_, name_string, "name") \
V(_, NaN_string, "NaN") \
V(_, nanosecond_string, "nanosecond") \
V(_, nanoseconds_string, "nanoseconds") \
@@ -352,6 +364,7 @@
V(_, Object_string, "Object") \
V(_, object_string, "object") \
V(_, object_to_string, "[object Object]") \
+ V(_, Object_prototype_string, "Object.prototype") \
V(_, of_string, "of") \
V(_, offset_string, "offset") \
V(_, offsetNanoseconds_string, "offsetNanoseconds") \
@@ -368,7 +381,6 @@
V(_, private_constructor_string, "#constructor") \
V(_, Promise_string, "Promise") \
V(_, proto_string, "__proto__") \
- V(_, prototype_string, "prototype") \
V(_, proxy_string, "proxy") \
V(_, Proxy_string, "Proxy") \
V(_, query_colon_string, "(?:)") \
@@ -415,6 +427,7 @@
V(_, string_string, "string") \
V(_, string_to_string, "[object String]") \
V(_, Symbol_iterator_string, "Symbol.iterator") \
+ V(_, Symbol_replace_string, "Symbol.replace") \
V(_, symbol_species_string, "[Symbol.species]") \
V(_, Symbol_species_string, "Symbol.species") \
V(_, Symbol_string, "Symbol") \
@@ -442,7 +455,6 @@
V(_, unit_string, "unit") \
V(_, URIError_string, "URIError") \
V(_, UTC_string, "UTC") \
- V(_, value_string, "value") \
V(_, valueOf_string, "valueOf") \
V(_, WeakMap_string, "WeakMap") \
V(_, WeakRef_string, "WeakRef") \
@@ -451,57 +463,67 @@
V(_, weeks_string, "weeks") \
V(_, weekOfYear_string, "weekOfYear") \
V(_, word_string, "word") \
- V(_, writable_string, "writable") \
V(_, yearMonthFromFields_string, "yearMonthFromFields") \
V(_, year_string, "year") \
V(_, years_string, "years") \
V(_, zero_string, "0")
-#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
- V(_, array_buffer_wasm_memory_symbol) \
- V(_, call_site_info_symbol) \
- V(_, console_context_id_symbol) \
- V(_, console_context_name_symbol) \
- V(_, class_fields_symbol) \
- V(_, class_positions_symbol) \
- V(_, elements_transition_symbol) \
- V(_, error_end_pos_symbol) \
- V(_, error_script_symbol) \
- V(_, error_stack_symbol) \
- V(_, error_start_pos_symbol) \
- V(_, frozen_symbol) \
- V(_, interpreter_trampoline_symbol) \
- V(_, mega_dom_symbol) \
- V(_, megamorphic_symbol) \
- V(_, native_context_index_symbol) \
- V(_, nonextensible_symbol) \
- V(_, not_mapped_symbol) \
- V(_, promise_debug_marker_symbol) \
- V(_, promise_debug_message_symbol) \
- V(_, promise_forwarding_handler_symbol) \
- V(_, promise_handled_by_symbol) \
- V(_, promise_awaited_by_symbol) \
- V(_, regexp_result_names_symbol) \
- V(_, regexp_result_regexp_input_symbol) \
- V(_, regexp_result_regexp_last_index_symbol) \
- V(_, sealed_symbol) \
- V(_, strict_function_transition_symbol) \
- V(_, template_literal_function_literal_id_symbol) \
- V(_, template_literal_slot_id_symbol) \
- V(_, wasm_exception_tag_symbol) \
- V(_, wasm_exception_values_symbol) \
- V(_, wasm_uncatchable_symbol) \
- V(_, wasm_wrapped_object_symbol) \
- V(_, wasm_debug_proxy_cache_symbol) \
- V(_, wasm_debug_proxy_names_symbol) \
- V(_, uninitialized_symbol)
+#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ EXTRA_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ NOT_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR(V, _)
+
+// Symbols to be allocated early on the read only heap
+#define IMPORTANT_PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, not_mapped_symbol) \
+ V(_, uninitialized_symbol) \
+ V(_, megamorphic_symbol) \
+ V(_, elements_transition_symbol) \
+ V(_, mega_dom_symbol)
+
+#define NOT_IMPORTANT_PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, array_buffer_wasm_memory_symbol) \
+ V(_, call_site_info_symbol) \
+ V(_, console_context_id_symbol) \
+ V(_, console_context_name_symbol) \
+ V(_, class_fields_symbol) \
+ V(_, class_positions_symbol) \
+ V(_, error_end_pos_symbol) \
+ V(_, error_script_symbol) \
+ V(_, error_stack_symbol) \
+ V(_, error_start_pos_symbol) \
+ V(_, frozen_symbol) \
+ V(_, interpreter_trampoline_symbol) \
+ V(_, native_context_index_symbol) \
+ V(_, nonextensible_symbol) \
+ V(_, promise_debug_marker_symbol) \
+ V(_, promise_debug_message_symbol) \
+ V(_, promise_forwarding_handler_symbol) \
+ V(_, promise_handled_by_symbol) \
+ V(_, promise_awaited_by_symbol) \
+ V(_, regexp_result_names_symbol) \
+ V(_, regexp_result_regexp_input_symbol) \
+ V(_, regexp_result_regexp_last_index_symbol) \
+ V(_, sealed_symbol) \
+ V(_, strict_function_transition_symbol) \
+ V(_, template_literal_function_literal_id_symbol) \
+ V(_, template_literal_slot_id_symbol) \
+ V(_, wasm_exception_tag_symbol) \
+ V(_, wasm_exception_values_symbol) \
+ V(_, wasm_uncatchable_symbol) \
+ V(_, wasm_wrapped_object_symbol) \
+ V(_, wasm_debug_proxy_cache_symbol) \
+ V(_, wasm_debug_proxy_names_symbol)
+
+#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ IMPORTANT_PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ NOT_IMPORTANT_PRIVATE_SYMBOL_LIST_GENERATOR(V, _)
#define PUBLIC_SYMBOL_LIST_GENERATOR(V, _) \
V(_, async_iterator_symbol, Symbol.asyncIterator) \
V(_, intl_fallback_symbol, IntlLegacyConstructedSymbol) \
V(_, match_all_symbol, Symbol.matchAll) \
V(_, match_symbol, Symbol.match) \
- V(_, replace_symbol, Symbol.replace) \
V(_, search_symbol, Symbol.search) \
V(_, split_symbol, Symbol.split) \
V(_, to_primitive_symbol, Symbol.toPrimitive) \
@@ -523,11 +545,12 @@
V(_, resolve_string, "resolve") \
V(_, then_string, "then")
-// Note that the descriptioon string should be part of the internalized
+// Note that the description string should be part of the internalized
// string roots to make sure we don't accidentally end up allocating the
// description in between the symbols during deserialization.
#define SYMBOL_FOR_PROTECTOR_LIST_GENERATOR(V, _) \
V(_, iterator_symbol, Symbol.iterator) \
+ V(_, replace_symbol, Symbol.replace) \
V(_, species_symbol, Symbol.species)
#define WELL_KNOWN_SYMBOL_FOR_PROTECTOR_LIST_GENERATOR(V, _) \
@@ -568,11 +591,12 @@
MINOR_INCREMENTAL_SCOPES(F) \
F(HEAP_EMBEDDER_TRACING_EPILOGUE) \
F(HEAP_EPILOGUE) \
- F(HEAP_EPILOGUE_ADJUST_NEW_SPACE) \
+ F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EPILOGUE_SAFEPOINT) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_NEAR_HEAP_LIMIT) \
F(HEAP_EXTERNAL_PROLOGUE) \
+ F(HEAP_EXTERNAL_SECOND_PASS_CALLBACKS) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(HEAP_PROLOGUE_SAFEPOINT) \
@@ -627,32 +651,28 @@
F(MC_SWEEP_NEW_LO) \
F(MC_SWEEP_OLD) \
F(MC_SWEEP_SHARED) \
+ F(MC_SWEEP_SHARED_LO) \
F(MINOR_MARK_COMPACTOR) \
F(MINOR_MC) \
TOP_MINOR_MC_SCOPES(F) \
+ F(MINOR_MC_CLEAR_STRING_FORWARDING_TABLE) \
F(MINOR_MC_CLEAR_STRING_TABLE) \
+ F(MINOR_MC_CLEAR_WEAK_GLOBAL_HANDLES) \
F(MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(MINOR_MC_COMPLETE_SWEEPING) \
- F(MINOR_MC_EVACUATE_CLEAN_UP) \
- F(MINOR_MC_EVACUATE_COPY) \
- F(MINOR_MC_EVACUATE_COPY_PARALLEL) \
- F(MINOR_MC_EVACUATE_EPILOGUE) \
- F(MINOR_MC_EVACUATE_PROLOGUE) \
- F(MINOR_MC_EVACUATE_REBALANCE) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS) \
- F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_FINISH_INCREMENTAL) \
F(MINOR_MC_MARK_PARALLEL) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_CLOSURE_PARALLEL) \
F(MINOR_MC_MARK_CLOSURE) \
+ F(MINOR_MC_MARK_EMBEDDER_PROLOGUE) \
+ F(MINOR_MC_MARK_EMBEDDER_TRACING) \
F(MINOR_MC_SWEEP_NEW) \
F(MINOR_MC_SWEEP_NEW_LO) \
+ F(MINOR_MC_SWEEP_UPDATE_STRING_TABLE) \
+ F(MINOR_MC_SWEEP_START_JOBS) \
+ F(MINOR_MC_FINISH_ENSURE_CAPACITY) \
F(SAFEPOINT) \
F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
@@ -671,35 +691,37 @@
F(TIME_TO_GLOBAL_SAFEPOINT) \
F(TIME_TO_SAFEPOINT) \
F(UNMAPPER) \
- F(UNPARK)
+ F(UNPARK) \
+ F(YOUNG_ARRAY_BUFFER_SWEEP) \
+ F(FULL_ARRAY_BUFFER_SWEEP)
-#define TRACER_BACKGROUND_SCOPES(F) \
- F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
- F(BACKGROUND_FULL_ARRAY_BUFFER_SWEEP) \
- F(BACKGROUND_COLLECTION) \
- F(BACKGROUND_UNMAPPER) \
- F(BACKGROUND_UNPARK) \
- F(BACKGROUND_SAFEPOINT) \
- F(MC_BACKGROUND_EVACUATE_COPY) \
- F(MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
- F(MC_BACKGROUND_MARKING) \
- F(MC_BACKGROUND_SWEEPING) \
- F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
- F(MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
- F(MINOR_MC_BACKGROUND_MARKING) \
- F(MINOR_MC_BACKGROUND_SWEEPING) \
+#define TRACER_BACKGROUND_SCOPES(F) \
+ F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
+ F(BACKGROUND_FULL_ARRAY_BUFFER_SWEEP) \
+ F(BACKGROUND_COLLECTION) \
+ F(BACKGROUND_UNMAPPER) \
+ F(BACKGROUND_UNPARK) \
+ F(BACKGROUND_SAFEPOINT) \
+ F(MC_BACKGROUND_EVACUATE_COPY) \
+ F(MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
+ F(MC_BACKGROUND_MARKING) \
+ F(MC_BACKGROUND_SWEEPING) \
+ F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
+ F(MINOR_MC_BACKGROUND_MARKING) \
+ F(MINOR_MC_BACKGROUND_SWEEPING) \
F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL)
-#define TRACER_YOUNG_EPOCH_SCOPES(F) \
- F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
- F(MINOR_MARK_COMPACTOR) \
- F(MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
- F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
- F(MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
- F(MINOR_MC_BACKGROUND_MARKING) \
- F(MINOR_MC_BACKGROUND_SWEEPING) \
- F(SCAVENGER) \
- F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL) \
+#define TRACER_YOUNG_EPOCH_SCOPES(F) \
+ F(YOUNG_ARRAY_BUFFER_SWEEP) \
+ F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
+ F(MINOR_MARK_COMPACTOR) \
+ F(MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS) \
+ F(MINOR_MC_COMPLETE_SWEEPING) \
+ F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
+ F(MINOR_MC_BACKGROUND_MARKING) \
+ F(MINOR_MC_BACKGROUND_SWEEPING) \
+ F(SCAVENGER) \
+ F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS)
#endif // V8_INIT_HEAP_SYMBOLS_H_
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index b7b98804b0..e80a332d09 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -5,6 +5,7 @@
#include "src/init/isolate-allocator.h"
#include "src/base/bounded-page-allocator.h"
+#include "src/common/ptr-compr-inl.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
#include "src/sandbox/sandbox.h"
@@ -15,42 +16,21 @@ namespace v8 {
namespace internal {
#ifdef V8_COMPRESS_POINTERS
-namespace {
-
-// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
-// reservation. This "IsolateRootBiasPage" page is supposed to be used for
-// storing part of the Isolate object when Isolate::isolate_root_bias() is
-// not zero.
-inline size_t GetIsolateRootBiasPageSize(
- v8::PageAllocator* platform_page_allocator) {
- return RoundUp(Isolate::isolate_root_bias(),
- platform_page_allocator->AllocatePageSize());
-}
-
-} // namespace
-
struct PtrComprCageReservationParams
: public VirtualMemoryCage::ReservationParams {
PtrComprCageReservationParams() {
page_allocator = GetPlatformPageAllocator();
- // This is only used when there is a per-Isolate cage, in which case the
- // Isolate is allocated within the cage, and the Isolate root is also the
- // cage base.
- const size_t kIsolateRootBiasPageSize =
- COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
- ? GetIsolateRootBiasPageSize(page_allocator)
- : 0;
- reservation_size = kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
+ reservation_size = kPtrComprCageReservationSize;
base_alignment = kPtrComprCageBaseAlignment;
- base_bias_size = kIsolateRootBiasPageSize;
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
page_size =
RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
- requested_start_hint =
- reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr());
+ requested_start_hint = RoundDown(
+ reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr()),
+ base_alignment);
jit = JitPermission::kNoJit;
}
};
@@ -63,8 +43,7 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(VirtualMemoryCage, GetProcessWidePtrComprCage)
// static
void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
- if (std::shared_ptr<CodeRange> code_range =
- CodeRange::GetProcessWideCodeRange()) {
+ if (CodeRange* code_range = CodeRange::GetProcessWideCodeRange()) {
code_range->Free();
}
GetProcessWidePtrComprCage()->Free();
@@ -94,7 +73,14 @@ void IsolateAllocator::InitializeOncePerProcess() {
"Failed to reserve virtual memory for process-wide V8 "
"pointer compression cage");
}
-#endif
+ V8HeapCompressionScheme::InitBase(GetProcessWidePtrComprCage()->base());
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // Speculatively set the code cage base to the same value in case jitless
+ // mode will be used. Once the process-wide CodeRange instance is created
+ // the code cage base will be set accordingly.
+ ExternalCodeCompressionScheme::InitBase(V8HeapCompressionScheme::base());
+#endif // V8_EXTERNAL_CODE_SPACE
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
}
IsolateAllocator::IsolateAllocator() {
@@ -106,30 +92,20 @@ IsolateAllocator::IsolateAllocator() {
"Failed to reserve memory for Isolate V8 pointer compression cage");
}
page_allocator_ = isolate_ptr_compr_cage_.page_allocator();
- CommitPagesForIsolate();
#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
- // Allocate Isolate in C++ heap when sharing a cage.
CHECK(GetProcessWidePtrComprCage()->IsReserved());
page_allocator_ = GetProcessWidePtrComprCage()->page_allocator();
- isolate_memory_ = ::operator new(sizeof(Isolate));
#else
- // Allocate Isolate in C++ heap.
page_allocator_ = GetPlatformPageAllocator();
- isolate_memory_ = ::operator new(sizeof(Isolate));
#endif // V8_COMPRESS_POINTERS
+ // Allocate Isolate in C++ heap.
+ isolate_memory_ = ::operator new(sizeof(Isolate));
+
CHECK_NOT_NULL(page_allocator_);
}
IsolateAllocator::~IsolateAllocator() {
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- if (isolate_ptr_compr_cage_.reservation()->IsReserved()) {
- // The actual memory will be freed when the |isolate_ptr_compr_cage_| will
- // die.
- return;
- }
-#endif
-
// The memory was allocated in C++ heap.
::operator delete(isolate_memory_);
}
@@ -148,57 +124,5 @@ const VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() const {
return const_cast<IsolateAllocator*>(this)->GetPtrComprCage();
}
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
-void IsolateAllocator::CommitPagesForIsolate() {
- v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
-
- CHECK(isolate_ptr_compr_cage_.IsReserved());
- Address isolate_root = isolate_ptr_compr_cage_.base();
- CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
- CHECK_GE(isolate_ptr_compr_cage_.reservation()->size(),
- kPtrComprCageReservationSize +
- GetIsolateRootBiasPageSize(platform_page_allocator));
- CHECK(isolate_ptr_compr_cage_.reservation()->InVM(
- isolate_root, kPtrComprCageReservationSize));
-
- size_t page_size = page_allocator_->AllocatePageSize();
- Address isolate_address = isolate_root - Isolate::isolate_root_bias();
- Address isolate_end = isolate_address + sizeof(Isolate);
-
- // Inform the bounded page allocator about reserved pages.
- {
- Address reserved_region_address = isolate_root;
- size_t reserved_region_size =
- RoundUp(isolate_end, page_size) - reserved_region_address;
-
- CHECK(isolate_ptr_compr_cage_.page_allocator()->AllocatePagesAt(
- reserved_region_address, reserved_region_size,
- PageAllocator::Permission::kNoAccess));
- }
-
- // Commit pages where the Isolate will be stored.
- {
- size_t commit_page_size = platform_page_allocator->CommitPageSize();
- Address committed_region_address =
- RoundDown(isolate_address, commit_page_size);
- size_t committed_region_size =
- RoundUp(isolate_end, commit_page_size) - committed_region_address;
-
- // We are using |isolate_ptr_compr_cage_.reservation()| directly here
- // because |page_allocator_| has bigger commit page size than we actually
- // need.
- CHECK(isolate_ptr_compr_cage_.reservation()->SetPermissions(
- committed_region_address, committed_region_size,
- PageAllocator::kReadWrite));
-
- if (Heap::ShouldZapGarbage()) {
- MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
- kZapValue, committed_region_size / kSystemPointerSize);
- }
- }
- isolate_memory_ = reinterpret_cast<void*>(isolate_address);
-}
-#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/init/isolate-allocator.h b/deps/v8/src/init/isolate-allocator.h
index 2bf739ec49..876229481b 100644
--- a/deps/v8/src/init/isolate-allocator.h
+++ b/deps/v8/src/init/isolate-allocator.h
@@ -15,16 +15,10 @@
namespace v8 {
namespace internal {
-// IsolateAllocator object is responsible for allocating memory for one (!)
-// Isolate object. Depending on the whether pointer compression is enabled,
-// the memory can be allocated
-//
-// 1) in the C++ heap (when pointer compression is disabled or when multiple
-// Isolates share a pointer compression cage)
+// TODO(v8:13788): remove IsolateAllocator, as it's no longer needed.
//
-// 2) in a proper part of a properly aligned region of a reserved address space
-// (when pointer compression is enabled and each Isolate has its own pointer
-// compression cage).
+// IsolateAllocator object is responsible for allocating memory for one (!)
+// Isolate object. Currently, the memory is always allocated in the C++ heap.
//
// Isolate::New() first creates IsolateAllocator object which allocates the
// memory and then it constructs Isolate object in this memory. Once it's done
@@ -54,8 +48,6 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
static void InitializeOncePerProcess();
private:
- void CommitPagesForIsolate();
-
friend class SequentialUnmapperTest;
// Only used for testing.
static void FreeProcessWidePtrComprCageForTesting();
diff --git a/deps/v8/src/init/setup-isolate-deserialize.cc b/deps/v8/src/init/setup-isolate-deserialize.cc
index 853c3bde0b..de4736677b 100644
--- a/deps/v8/src/init/setup-isolate-deserialize.cc
+++ b/deps/v8/src/init/setup-isolate-deserialize.cc
@@ -2,23 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/setup-isolate.h"
-
#include "src/base/logging.h"
+#include "src/execution/isolate.h"
+#include "src/init/setup-isolate.h"
namespace v8 {
namespace internal {
-void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
- CHECK(!create_heap_objects_);
- // No actual work to be done; builtins will be deserialized from the snapshot.
-}
-
-bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
- CHECK(!create_heap_objects_);
+bool SetupIsolateDelegate::SetupHeap(Isolate* isolate,
+ bool create_heap_objects) {
// No actual work to be done; heap will be deserialized from the snapshot.
+ CHECK_WITH_MSG(!create_heap_objects,
+ "Heap setup supported only in mksnapshot");
return true;
}
+void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
+ bool compile_builtins) {
+ // No actual work to be done; builtins will be deserialized from the snapshot.
+ CHECK_WITH_MSG(!compile_builtins,
+ "Builtin compilation supported only in mksnapshot");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/init/setup-isolate-full.cc b/deps/v8/src/init/setup-isolate-full.cc
index d927820ca6..ad9854f9d3 100644
--- a/deps/v8/src/init/setup-isolate-full.cc
+++ b/deps/v8/src/init/setup-isolate-full.cc
@@ -11,24 +11,25 @@
namespace v8 {
namespace internal {
-void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
- if (create_heap_objects_) {
- SetupBuiltinsInternal(isolate);
-#ifdef DEBUG
- DebugEvaluate::VerifyTransitiveBuiltins(isolate);
-#endif // DEBUG
- } else {
+bool SetupIsolateDelegate::SetupHeap(Isolate* isolate,
+ bool create_heap_objects) {
+ if (!create_heap_objects) {
CHECK(isolate->snapshot_available());
+ return true;
}
+ return SetupHeapInternal(isolate);
}
-bool SetupIsolateDelegate::SetupHeap(Heap* heap) {
- if (create_heap_objects_) {
- return SetupHeapInternal(heap);
- } else {
- CHECK(heap->isolate()->snapshot_available());
- return true;
+void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
+ bool compile_builtins) {
+ if (!compile_builtins) {
+ CHECK(isolate->snapshot_available());
+ return;
}
+ SetupBuiltinsInternal(isolate);
+#ifdef DEBUG
+ DebugEvaluate::VerifyTransitiveBuiltins(isolate);
+#endif // DEBUG
}
} // namespace internal
diff --git a/deps/v8/src/init/setup-isolate.h b/deps/v8/src/init/setup-isolate.h
index af9c614bc0..6f52a157e1 100644
--- a/deps/v8/src/init/setup-isolate.h
+++ b/deps/v8/src/init/setup-isolate.h
@@ -31,13 +31,11 @@ class Isolate;
// linked in by the latter two Delegate implementations.
class V8_EXPORT_PRIVATE SetupIsolateDelegate {
public:
- explicit SetupIsolateDelegate(bool create_heap_objects)
- : create_heap_objects_(create_heap_objects) {}
+ SetupIsolateDelegate() = default;
virtual ~SetupIsolateDelegate() = default;
- virtual void SetupBuiltins(Isolate* isolate);
-
- virtual bool SetupHeap(Heap* heap);
+ virtual bool SetupHeap(Isolate* isolate, bool create_heap_objects);
+ virtual void SetupBuiltins(Isolate* isolate, bool compile_builtins);
protected:
static void SetupBuiltinsInternal(Isolate* isolate);
@@ -45,9 +43,7 @@ class V8_EXPORT_PRIVATE SetupIsolateDelegate {
static void PopulateWithPlaceholders(Isolate* isolate);
static void ReplacePlaceholders(Isolate* isolate);
- static bool SetupHeapInternal(Heap* heap);
-
- const bool create_heap_objects_;
+ static bool SetupHeapInternal(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 4a4267f17b..9dad3911a4 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -92,6 +92,7 @@ void AdvanceStartupState(V8StartupState expected_next_state) {
V8_DECLARE_ONCE(init_snapshot_once);
#endif
+// static
void V8::InitializePlatform(v8::Platform* platform) {
AdvanceStartupState(V8StartupState::kPlatformInitializing);
CHECK(!platform_);
@@ -114,6 +115,16 @@ void V8::InitializePlatform(v8::Platform* platform) {
AdvanceStartupState(V8StartupState::kPlatformInitialized);
}
+// static
+void V8::InitializePlatformForTesting(v8::Platform* platform) {
+ if (v8_startup_state_ != V8StartupState::kIdle) {
+ FATAL(
+ "The platform was initialized before. Note that running multiple tests "
+ "in the same process is not supported.");
+ }
+ V8::InitializePlatform(platform);
+}
+
#define DISABLE_FLAG(flag) \
if (v8_flags.flag) { \
PrintF(stderr, \
diff --git a/deps/v8/src/init/v8.h b/deps/v8/src/init/v8.h
index dd58a0c8e9..68193bd235 100644
--- a/deps/v8/src/init/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -43,6 +43,8 @@ class V8 : public AllStatic {
Isolate* isolate, const char* location, const char* detail);
static void InitializePlatform(v8::Platform* platform);
+ V8_EXPORT_PRIVATE static void InitializePlatformForTesting(
+ v8::Platform* platform);
static void DisposePlatform();
V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
// Replaces the current platform with the given platform.
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 0a5e3a25ac..87ae628aa9 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -102,6 +102,8 @@ v8_source_set("inspector") {
"../../include/v8-inspector.h",
]
sources += [
+ "crc32.cc",
+ "crc32.h",
"custom-preview.cc",
"custom-preview.h",
"injected-script.cc",
@@ -125,6 +127,8 @@ v8_source_set("inspector") {
"v8-console.h",
"v8-debugger-agent-impl.cc",
"v8-debugger-agent-impl.h",
+ "v8-debugger-barrier.cc",
+ "v8-debugger-barrier.h",
"v8-debugger-id.cc",
"v8-debugger-id.h",
"v8-debugger-script.cc",
diff --git a/deps/v8/src/inspector/crc32.cc b/deps/v8/src/inspector/crc32.cc
new file mode 100644
index 0000000000..29d6460a0a
--- /dev/null
+++ b/deps/v8/src/inspector/crc32.cc
@@ -0,0 +1,85 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/crc32.h"
+
+#include "src/base/macros.h"
+
+namespace v8_inspector {
+
+// Generated from the polynomial 0xedb88320 using the following script:
+// for i in range(0, 256):
+// c = i ^ 0xff
+// for j in range(0, 8):
+// l = 0 if c & 1 else 0xedb88320
+// c = (c >> 1) ^ l
+// print("0x%x" % (c))
+static uint32_t kCrcTable[256] = {
+ 0x0L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
+ 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L, 0x79dcb8a4L,
+ 0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+ 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+ 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+ 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+ 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+ 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+ 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+ 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+ 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+ 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+ 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+ 0x1db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
+ 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L, 0x9609a88eL,
+ 0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL, 0x91646c97L, 0xe6635c01L,
+ 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+ 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+ 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+ 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+ 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+ 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+ 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+ 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+ 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+ 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+ 0x3b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
+ 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL, 0x7a6a5aa8L,
+ 0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L, 0x7d079eb1L, 0xf00f9344L,
+ 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+ 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+ 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+ 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+ 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+ 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+ 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+ 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+ 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+ 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+ 0x26d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
+ 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L, 0x92d28e9bL,
+ 0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+ 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+ 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+ 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+ 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+ 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+ 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+ 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+ 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+ 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+ 0x2d02ef8dL};
+
+int32_t computeCrc32(const String16& text) {
+ const uint8_t* bytes = reinterpret_cast<const uint8_t*>(text.characters16());
+ size_t byteLength = sizeof(UChar) * text.length();
+
+ uint32_t checksum = 0;
+ for (size_t i = 0; i < byteLength; ++i) {
+ uint32_t index = (checksum ^ bytes[i]) & 0xff;
+ checksum = (checksum >> 8) ^ kCrcTable[index];
+ }
+
+ return v8::base::bit_cast<int32_t>(checksum);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/crc32.h b/deps/v8/src/inspector/crc32.h
new file mode 100644
index 0000000000..c20b56a660
--- /dev/null
+++ b/deps/v8/src/inspector/crc32.h
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_CRC32_H_
+#define V8_INSPECTOR_CRC32_H_
+
+#include "src/inspector/string-16.h"
+
+namespace v8_inspector {
+
+int32_t computeCrc32(const String16&);
+
+}
+
+#endif // V8_INSPECTOR_CRC32_H_
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index c55249329b..51498c60dd 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -686,7 +686,7 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
auto filtered = std::make_unique<Array<PropertyPreview>>();
for (const String16& column : selectedColumns) {
if (columnMap.find(column) == columnMap.end()) continue;
- filtered->push_back(columnMap[column]->clone());
+ filtered->push_back(columnMap[column]->Clone());
}
columnPreview->setProperties(std::move(filtered));
}
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index 987d3a7642..90b94eced8 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -28,7 +28,14 @@ class InjectedScriptHost;
class V8ContextInfo;
class V8InspectorImpl;
-enum class V8InternalValueType { kNone, kEntry, kScope, kScopeList };
+enum class V8InternalValueType {
+ kNone,
+ kEntry,
+ kScope,
+ kScopeList,
+ kPrivateMethodList,
+ kPrivateMethod
+};
class InspectedContext {
public:
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index ee6ad9edfd..a8b786a816 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -118,8 +118,8 @@ int String16::toInteger(bool* ok) const {
return static_cast<int>(result);
}
-String16 String16::stripWhiteSpace() const {
- if (!length()) return String16();
+std::pair<size_t, size_t> String16::getTrimmedOffsetAndLength() const {
+ if (!length()) return std::make_pair(0, 0);
size_t start = 0;
size_t end = length() - 1;
@@ -128,13 +128,21 @@ String16 String16::stripWhiteSpace() const {
while (start <= end && isSpaceOrNewLine(characters16()[start])) ++start;
// only white space
- if (start > end) return String16();
+ if (start > end) return std::make_pair(0, 0);
// skip white space from end
while (end && isSpaceOrNewLine(characters16()[end])) --end;
- if (!start && end == length() - 1) return *this;
- return String16(characters16() + start, end + 1 - start);
+ return std::make_pair(start, end + 1 - start);
+}
+
+String16 String16::stripWhiteSpace() const {
+ std::pair<size_t, size_t> offsetAndLength = getTrimmedOffsetAndLength();
+ if (offsetAndLength.second == 0) return String16();
+ if (offsetAndLength.first == 0 && offsetAndLength.second == length() - 1) {
+ return *this;
+ }
+ return substring(offsetAndLength.first, offsetAndLength.second);
}
String16Builder::String16Builder() = default;
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 7dfc5e34a8..29651ac959 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -46,6 +46,7 @@ class String16 {
int64_t toInteger64(bool* ok = nullptr) const;
uint64_t toUInt64(bool* ok = nullptr) const;
int toInteger(bool* ok = nullptr) const;
+ std::pair<size_t, size_t> getTrimmedOffsetAndLength() const;
String16 stripWhiteSpace() const;
const UChar* characters16() const { return m_impl.c_str(); }
size_t length() const { return m_impl.length(); }
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index e3c1444fbc..1c6e5b2272 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -14,6 +14,7 @@
#include "include/v8-microtask-queue.h"
#include "src/base/safe_conversions.h"
#include "src/debug/debug-interface.h"
+#include "src/inspector/crc32.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Debugger.h"
@@ -49,12 +50,16 @@ static const char pauseOnExceptionsState[] = "pauseOnExceptionsState";
static const char asyncCallStackDepth[] = "asyncCallStackDepth";
static const char blackboxPattern[] = "blackboxPattern";
static const char debuggerEnabled[] = "debuggerEnabled";
+static const char breakpointsActiveWhenEnabled[] = "breakpointsActive";
static const char skipAllPauses[] = "skipAllPauses";
static const char breakpointsByRegex[] = "breakpointsByRegex";
static const char breakpointsByUrl[] = "breakpointsByUrl";
static const char breakpointsByScriptHash[] = "breakpointsByScriptHash";
static const char breakpointHints[] = "breakpointHints";
+static const char breakpointHintText[] = "text";
+static const char breakpointHintPrefixHash[] = "prefixHash";
+static const char breakpointHintPrefixLength[] = "prefixLen";
static const char instrumentationBreakpoints[] = "instrumentationBreakpoints";
} // namespace DebuggerAgentState
@@ -179,23 +184,49 @@ bool positionComparator(const std::pair<int, int>& a,
return a.second < b.second;
}
-String16 breakpointHint(const V8DebuggerScript& script, int lineNumber,
- int columnNumber) {
- int offset;
- if (!script.offset(lineNumber, columnNumber).To(&offset)) return String16();
+std::unique_ptr<protocol::DictionaryValue> breakpointHint(
+ const V8DebuggerScript& script, int breakpointLineNumber,
+ int breakpointColumnNumber, int actualLineNumber, int actualColumnNumber) {
+ int actualOffset;
+ int breakpointOffset;
+ if (!script.offset(actualLineNumber, actualColumnNumber).To(&actualOffset) ||
+ !script.offset(breakpointLineNumber, breakpointColumnNumber)
+ .To(&breakpointOffset)) {
+ return {};
+ }
+
+ auto hintObject = protocol::DictionaryValue::create();
+ String16 rawHint = script.source(actualOffset, kBreakpointHintMaxLength);
+ std::pair<size_t, size_t> offsetAndLength =
+ rawHint.getTrimmedOffsetAndLength();
String16 hint =
- script.source(offset, kBreakpointHintMaxLength).stripWhiteSpace();
+ rawHint.substring(offsetAndLength.first, offsetAndLength.second);
for (size_t i = 0; i < hint.length(); ++i) {
if (hint[i] == '\r' || hint[i] == '\n' || hint[i] == ';') {
- return hint.substring(0, i);
+ hint = hint.substring(0, i);
+ break;
}
}
- return hint;
+ hintObject->setString(DebuggerAgentState::breakpointHintText, hint);
+
+ // Also store the hash of the text between the requested breakpoint location
+ // and the actual breakpoint location. If we see the same prefix text next
+ // time, we will keep the breakpoint at the same location (so that
+ // breakpoints do not slide around on reloads without any edits).
+ if (breakpointOffset <= actualOffset) {
+ size_t length = actualOffset - breakpointOffset + offsetAndLength.first;
+ String16 prefix = script.source(breakpointOffset, length);
+ int crc32 = computeCrc32(prefix);
+ hintObject->setInteger(DebuggerAgentState::breakpointHintPrefixHash, crc32);
+ hintObject->setInteger(DebuggerAgentState::breakpointHintPrefixLength,
+ v8::base::checked_cast<int32_t>(length));
+ }
+ return hintObject;
}
void adjustBreakpointLocation(const V8DebuggerScript& script,
- const String16& hint, int* lineNumber,
- int* columnNumber) {
+ const protocol::DictionaryValue* hintObject,
+ int* lineNumber, int* columnNumber) {
if (*lineNumber < script.startLine() || *lineNumber > script.endLine())
return;
if (*lineNumber == script.startLine() &&
@@ -206,15 +237,41 @@ void adjustBreakpointLocation(const V8DebuggerScript& script,
return;
}
- if (hint.isEmpty()) return;
int sourceOffset;
if (!script.offset(*lineNumber, *columnNumber).To(&sourceOffset)) return;
+ int prefixLength = 0;
+ hintObject->getInteger(DebuggerAgentState::breakpointHintPrefixLength,
+ &prefixLength);
+ String16 hint;
+ if (!hintObject->getString(DebuggerAgentState::breakpointHintText, &hint) ||
+ hint.isEmpty())
+ return;
+
intptr_t searchRegionOffset = std::max(
sourceOffset - kBreakpointHintMaxSearchOffset, static_cast<intptr_t>(0));
size_t offset = sourceOffset - searchRegionOffset;
- String16 searchArea = script.source(searchRegionOffset,
- offset + kBreakpointHintMaxSearchOffset);
+ size_t searchRegionSize =
+ offset + std::max(kBreakpointHintMaxSearchOffset,
+ static_cast<intptr_t>(prefixLength + hint.length()));
+
+ String16 searchArea = script.source(searchRegionOffset, searchRegionSize);
+
+ // Let us see if the breakpoint hint text appears at the same location
+ // as before, with the same prefix text in between. If yes, then we just use
+ // that position.
+ int prefixHash;
+ if (hintObject->getInteger(DebuggerAgentState::breakpointHintPrefixHash,
+ &prefixHash) &&
+ offset + prefixLength + hint.length() <= searchArea.length() &&
+ searchArea.substring(offset + prefixLength, hint.length()) == hint &&
+ computeCrc32(searchArea.substring(offset, prefixLength)) == prefixHash) {
+ v8::debug::Location hintPosition = script.location(
+ static_cast<int>(searchRegionOffset + offset + prefixLength));
+ *lineNumber = hintPosition.GetLineNumber();
+ *columnNumber = hintPosition.GetColumnNumber();
+ return;
+ }
size_t nextMatch = searchArea.find(hint, offset);
size_t prevMatch = searchArea.reverseFind(hint, offset);
@@ -222,7 +279,8 @@ void adjustBreakpointLocation(const V8DebuggerScript& script,
return;
}
size_t bestMatch;
- if (nextMatch == String16::kNotFound) {
+ if (nextMatch == String16::kNotFound ||
+ nextMatch > offset + kBreakpointHintMaxSearchOffset) {
bestMatch = prevMatch;
} else if (prevMatch == String16::kNotFound) {
bestMatch = nextMatch;
@@ -371,7 +429,7 @@ V8DebuggerAgentImpl::V8DebuggerAgentImpl(
: m_inspector(session->inspector()),
m_debugger(m_inspector->debugger()),
m_session(session),
- m_enabled(false),
+ m_enableState(kDisabled),
m_state(state),
m_frontend(frontendChannel),
m_isolate(m_inspector->isolate()) {}
@@ -379,7 +437,7 @@ V8DebuggerAgentImpl::V8DebuggerAgentImpl(
V8DebuggerAgentImpl::~V8DebuggerAgentImpl() = default;
void V8DebuggerAgentImpl::enableImpl() {
- m_enabled = true;
+ m_enableState = kEnabled;
m_state->setBoolean(DebuggerAgentState::debuggerEnabled, true);
m_debugger->enable();
@@ -389,9 +447,11 @@ void V8DebuggerAgentImpl::enableImpl() {
didParseSource(std::move(script), true);
}
- m_breakpointsActive = true;
- m_debugger->setBreakpointsActive(true);
-
+ m_breakpointsActive = m_state->booleanProperty(
+ DebuggerAgentState::breakpointsActiveWhenEnabled, true);
+ if (m_breakpointsActive) {
+ m_debugger->setBreakpointsActive(true);
+ }
if (isPaused()) {
didPause(0, v8::Local<v8::Value>(), std::vector<v8::debug::BreakpointId>(),
v8::debug::kException, false,
@@ -401,6 +461,8 @@ void V8DebuggerAgentImpl::enableImpl() {
Response V8DebuggerAgentImpl::enable(Maybe<double> maxScriptsCacheSize,
String16* outDebuggerId) {
+ if (m_enableState == kStopping)
+ return Response::ServerError("Debugger is stopping");
m_maxScriptCacheSize = v8::base::saturated_cast<size_t>(
maxScriptsCacheSize.fromMaybe(std::numeric_limits<double>::max()));
*outDebuggerId =
@@ -439,7 +501,7 @@ Response V8DebuggerAgentImpl::disable() {
m_cachedScripts.clear();
m_cachedScriptSize = 0;
for (const auto& it : m_debuggerBreakpointIdToBreakpointId) {
- v8::debug::RemoveBreakpoint(m_isolate, it.first);
+ m_debugger->removeBreakpoint(it.first);
}
m_breakpointIdToDebuggerBreakpointIds.clear();
m_debuggerBreakpointIdToBreakpointId.clear();
@@ -449,14 +511,15 @@ Response V8DebuggerAgentImpl::disable() {
m_skipAllPauses = false;
m_state->setBoolean(DebuggerAgentState::skipAllPauses, false);
m_state->remove(DebuggerAgentState::blackboxPattern);
- m_enabled = false;
+ m_enableState = kDisabled;
+ m_instrumentationFinished = true;
m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
m_debugger->disable();
return Response::Success();
}
void V8DebuggerAgentImpl::restore() {
- DCHECK(!m_enabled);
+ DCHECK(m_enableState == kDisabled);
if (!m_state->booleanProperty(DebuggerAgentState::debuggerEnabled, false))
return;
if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
@@ -484,7 +547,8 @@ void V8DebuggerAgentImpl::restore() {
}
Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
- if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+ m_state->setBoolean(DebuggerAgentState::breakpointsActiveWhenEnabled, active);
+ if (!enabled()) return Response::Success();
if (m_breakpointsActive == active) return Response::Success();
m_breakpointsActive = active;
m_debugger->setBreakpointsActive(active);
@@ -501,24 +565,42 @@ Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
return Response::Success();
}
-static bool matches(V8InspectorImpl* inspector, const V8DebuggerScript& script,
- BreakpointType type, const String16& selector) {
- switch (type) {
- case BreakpointType::kByUrl:
- return script.sourceURL() == selector;
- case BreakpointType::kByScriptHash:
- return script.hash() == selector;
- case BreakpointType::kByUrlRegex: {
- V8Regex regex(inspector, selector, true);
- return regex.match(script.sourceURL()) != -1;
+namespace {
+
+class Matcher {
+ public:
+ Matcher(V8InspectorImpl* inspector, BreakpointType type,
+ const String16& selector)
+ : type_(type), selector_(selector) {
+ if (type == BreakpointType::kByUrlRegex) {
+ regex_ = std::make_unique<V8Regex>(inspector, selector, true);
}
- case BreakpointType::kByScriptId: {
- return script.scriptId() == selector;
+ }
+
+ bool matches(const V8DebuggerScript& script) {
+ switch (type_) {
+ case BreakpointType::kByUrl:
+ return script.sourceURL() == selector_;
+ case BreakpointType::kByScriptHash:
+ return script.hash() == selector_;
+ case BreakpointType::kByUrlRegex: {
+ return regex_->match(script.sourceURL()) != -1;
+ }
+ case BreakpointType::kByScriptId: {
+ return script.scriptId() == selector_;
+ }
+ default:
+ return false;
}
- default:
- return false;
}
-}
+
+ private:
+ std::unique_ptr<V8Regex> regex_;
+ BreakpointType type_;
+ const String16& selector_;
+};
+
+} // namespace
Response V8DebuggerAgentImpl::setBreakpointByUrl(
int lineNumber, Maybe<String16> optionalURL,
@@ -557,6 +639,9 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
type = BreakpointType::kByScriptHash;
}
+ // Note: This constructor can call into JavaScript.
+ Matcher matcher(m_inspector, type, selector);
+
String16 condition = optionalCondition.fromMaybe(String16());
String16 breakpointId =
generateBreakpointId(type, selector, lineNumber, columnNumber);
@@ -585,26 +670,33 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
"Breakpoint at specified location already exists.");
}
- String16 hint;
+ std::unique_ptr<protocol::DictionaryValue> hint;
for (const auto& script : m_scripts) {
- if (!matches(m_inspector, *script.second, type, selector)) continue;
- if (!hint.isEmpty()) {
- adjustBreakpointLocation(*script.second, hint, &lineNumber,
- &columnNumber);
+ if (!matcher.matches(*script.second)) continue;
+ // Make sure the session was not disabled by some re-entrant call
+ // in the script matcher.
+ DCHECK(enabled());
+ int adjustedLineNumber = lineNumber;
+ int adjustedColumnNumber = columnNumber;
+ if (hint) {
+ adjustBreakpointLocation(*script.second, hint.get(), &adjustedLineNumber,
+ &adjustedColumnNumber);
}
- std::unique_ptr<protocol::Debugger::Location> location = setBreakpointImpl(
- breakpointId, script.first, condition, lineNumber, columnNumber);
+ std::unique_ptr<protocol::Debugger::Location> location =
+ setBreakpointImpl(breakpointId, script.first, condition,
+ adjustedLineNumber, adjustedColumnNumber);
if (location && type != BreakpointType::kByUrlRegex) {
- hint = breakpointHint(*script.second, location->getLineNumber(),
- location->getColumnNumber(columnNumber));
+ hint = breakpointHint(*script.second, lineNumber, columnNumber,
+ location->getLineNumber(),
+ location->getColumnNumber(adjustedColumnNumber));
}
if (location) (*locations)->emplace_back(std::move(location));
}
breakpoints->setString(breakpointId, condition);
- if (!hint.isEmpty()) {
+ if (hint) {
protocol::DictionaryValue* breakpointHints =
getOrCreateObject(m_state, DebuggerAgentState::breakpointHints);
- breakpointHints->setString(breakpointId, hint);
+ breakpointHints->setObject(breakpointId, std::move(hint));
}
*outBreakpointId = breakpointId;
return Response::Success();
@@ -683,6 +775,7 @@ Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
if (!parseBreakpointId(breakpointId, &type, &selector)) {
return Response::Success();
}
+ Matcher matcher(m_inspector, type, selector);
protocol::DictionaryValue* breakpoints = nullptr;
switch (type) {
case BreakpointType::kByUrl: {
@@ -719,8 +812,10 @@ Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
// not Wasm breakpoint.
std::vector<V8DebuggerScript*> scripts;
for (const auto& scriptIter : m_scripts) {
- const bool scriptSelectorMatch =
- matches(m_inspector, *scriptIter.second, type, selector);
+ const bool scriptSelectorMatch = matcher.matches(*scriptIter.second);
+ // Make sure the session was not disabled by some re-entrant call
+ // in the script matcher.
+ DCHECK(enabled());
const bool isInstrumentation =
type == BreakpointType::kInstrumentationBreakpoint;
if (!scriptSelectorMatch && !isInstrumentation) continue;
@@ -751,7 +846,7 @@ void V8DebuggerAgentImpl::removeBreakpointImpl(
script->removeWasmBreakpoint(id);
}
#endif // V8_ENABLE_WEBASSEMBLY
- v8::debug::RemoveBreakpoint(m_isolate, id);
+ m_debugger->removeBreakpoint(id);
m_debuggerBreakpointIdToBreakpointId.erase(id);
}
m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
@@ -1017,6 +1112,9 @@ const char* buildStatus(v8::debug::LiveEditResult::Status status) {
case v8::debug::LiveEditResult::BLOCKED_BY_RUNNING_GENERATOR:
return protocol::Debugger::SetScriptSource::StatusEnum::
BlockedByActiveGenerator;
+ case v8::debug::LiveEditResult::BLOCKED_BY_TOP_LEVEL_ES_MODULE_CHANGE:
+ return protocol::Debugger::SetScriptSource::StatusEnum::
+ BlockedByTopLevelEsModuleChange;
}
}
} // namespace
@@ -1350,6 +1448,8 @@ Response V8DebuggerAgentImpl::pause() {
Response V8DebuggerAgentImpl::resume(Maybe<bool> terminateOnResume) {
if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
+
+ m_instrumentationFinished = true;
m_debugger->continueProgram(m_session->contextGroupId(),
terminateOnResume.fromMaybe(false));
return Response::Success();
@@ -1675,13 +1775,13 @@ Response V8DebuggerAgentImpl::currentCallFrames(
.setCanBeRestarted(iterator->CanBeRestarted())
.build();
- v8::Local<v8::Function> func = iterator->GetFunction();
- if (!func.IsEmpty()) {
+ v8::debug::Location func_loc = iterator->GetFunctionLocation();
+ if (!func_loc.IsEmpty()) {
frame->setFunctionLocation(
protocol::Debugger::Location::create()
- .setScriptId(String16::fromInteger(func->ScriptId()))
- .setLineNumber(func->GetScriptLineNumber())
- .setColumnNumber(func->GetScriptColumnNumber())
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setLineNumber(func_loc.GetLineNumber())
+ .setColumnNumber(func_loc.GetColumnNumber())
.build());
}
@@ -1883,14 +1983,17 @@ void V8DebuggerAgentImpl::didParseSource(
int columnNumber = 0;
parseBreakpointId(breakpointId, &type, &selector, &lineNumber,
&columnNumber);
+ Matcher matcher(m_inspector, type, selector);
- if (!matches(m_inspector, *scriptRef, type, selector)) continue;
+ if (!matcher.matches(*scriptRef)) continue;
+ // Make sure the session was not disabled by some re-entrant call
+ // in the script matcher.
+ DCHECK(enabled());
String16 condition;
breakpointWithCondition.second->asString(&condition);
- String16 hint;
- bool hasHint =
- breakpointHints && breakpointHints->getString(breakpointId, &hint);
- if (hasHint) {
+ protocol::DictionaryValue* hint =
+ breakpointHints ? breakpointHints->getObject(breakpointId) : nullptr;
+ if (hint) {
adjustBreakpointLocation(*scriptRef, hint, &lineNumber, &columnNumber);
}
std::unique_ptr<protocol::Debugger::Location> location =
@@ -1944,6 +2047,7 @@ void V8DebuggerAgentImpl::didPauseOnInstrumentation(
m_debuggerBreakpointIdToBreakpointId.end()) {
DCHECK_GT(protocolCallFrames->size(), 0);
if (protocolCallFrames->size() > 0) {
+ m_instrumentationFinished = false;
breakReason = protocol::Debugger::Paused::ReasonEnum::Instrumentation;
const String16 scriptId =
protocolCallFrames->at(0)->getLocation()->getScriptId();
@@ -2197,4 +2301,9 @@ Response V8DebuggerAgentImpl::processSkipList(
m_skipList = std::move(skipListInit);
return Response::Success();
}
+
+void V8DebuggerAgentImpl::stop() {
+ disable();
+ m_enableState = kStopping;
+}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 09f9c3d4e4..9853312bce 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -43,6 +43,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
V8DebuggerAgentImpl(const V8DebuggerAgentImpl&) = delete;
V8DebuggerAgentImpl& operator=(const V8DebuggerAgentImpl&) = delete;
void restore();
+ void stop();
// Part of the protocol.
Response enable(Maybe<double> maxScriptsCacheSize,
@@ -144,7 +145,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
positions) override;
- bool enabled() const { return m_enabled; }
+ bool enabled() const { return m_enableState == kEnabled; }
void setBreakpointFor(v8::Local<v8::Function> function,
v8::Local<v8::String> condition,
@@ -160,6 +161,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void reset();
+ bool instrumentationFinished() { return m_instrumentationFinished; }
// Interface for V8InspectorImpl
void didPauseOnInstrumentation(v8::debug::BreakpointId instrumentationId);
@@ -222,10 +224,17 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
using DebuggerBreakpointIdToBreakpointIdMap =
std::unordered_map<v8::debug::BreakpointId, String16>;
+ enum EnableState {
+ kDisabled,
+ kEnabled,
+ kStopping, // This is the same as 'disabled', but it cannot become enabled
+ // again.
+ };
+
V8InspectorImpl* m_inspector;
V8Debugger* m_debugger;
V8InspectorSessionImpl* m_session;
- bool m_enabled;
+ EnableState m_enableState;
protocol::DictionaryValue* m_state;
protocol::Debugger::Frontend m_frontend;
v8::Isolate* m_isolate;
@@ -260,6 +269,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
bool m_skipAllPauses = false;
bool m_breakpointsActive = false;
+ bool m_instrumentationFinished = true;
std::unique_ptr<V8Regex> m_blackboxPattern;
std::unordered_map<String16, std::vector<std::pair<int, int>>>
diff --git a/deps/v8/src/inspector/v8-debugger-barrier.cc b/deps/v8/src/inspector/v8-debugger-barrier.cc
new file mode 100644
index 0000000000..c20f3a19f1
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger-barrier.cc
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger-barrier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+V8DebuggerBarrier::V8DebuggerBarrier(V8InspectorClient* client,
+ int contextGroupId)
+ : m_client(client), m_contextGroupId(contextGroupId) {}
+
+V8DebuggerBarrier::~V8DebuggerBarrier() {
+ m_client->runIfWaitingForDebugger(m_contextGroupId);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-barrier.h b/deps/v8/src/inspector/v8-debugger-barrier.h
new file mode 100644
index 0000000000..f4dc29611b
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger-barrier.h
@@ -0,0 +1,28 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8_DEBUGGER_BARRIER_H_
+#define V8_INSPECTOR_V8_DEBUGGER_BARRIER_H_
+
+namespace v8_inspector {
+
+class V8InspectorClient;
+
+// This class is used to synchronize multiple sessions issuing
+// `Runtime.runIfWaitingForDebbuger` so that the global client
+// `runIfWaitingForDebugger` method is only invoked when all
+// sessions have invoked `Runtime.runIfWaitingForDebugger`.
+class V8DebuggerBarrier {
+ public:
+ V8DebuggerBarrier(V8InspectorClient* client, int contextGroupId);
+ ~V8DebuggerBarrier();
+
+ private:
+ V8InspectorClient* const m_client;
+ int m_contextGroupId;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8_DEBUGGER_BARRIER_H_
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index aed85fa7cd..b11802ce64 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -84,8 +84,14 @@ V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
V8Debugger::~V8Debugger() {
m_isolate->RemoveCallCompletedCallback(
&V8Debugger::terminateExecutionCompletedCallback);
- m_isolate->RemoveMicrotasksCompletedCallback(
- &V8Debugger::terminateExecutionCompletedCallbackIgnoringData);
+ if (!m_terminateExecutionCallbackContext.IsEmpty()) {
+ v8::HandleScope handles(m_isolate);
+ v8::MicrotaskQueue* microtask_queue =
+ m_terminateExecutionCallbackContext.Get(m_isolate)->GetMicrotaskQueue();
+ microtask_queue->RemoveMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallbackIgnoringData,
+ microtask_queue);
+ }
}
void V8Debugger::enable() {
@@ -96,7 +102,7 @@ void V8Debugger::enable() {
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
#if V8_ENABLE_WEBASSEMBLY
- v8::debug::TierDownAllModulesPerIsolate(m_isolate);
+ v8::debug::EnterDebuggingForIsolate(m_isolate);
#endif // V8_ENABLE_WEBASSEMBLY
}
@@ -104,14 +110,20 @@ void V8Debugger::disable() {
if (isPaused()) {
bool scheduledOOMBreak = m_scheduledOOMBreak;
bool hasAgentAcceptsPause = false;
- m_inspector->forEachSession(
- m_pausedContextGroupId, [&scheduledOOMBreak, &hasAgentAcceptsPause](
- V8InspectorSessionImpl* session) {
- if (session->debuggerAgent()->acceptsPause(scheduledOOMBreak)) {
- hasAgentAcceptsPause = true;
- }
- });
- if (!hasAgentAcceptsPause) m_inspector->client()->quitMessageLoopOnPause();
+
+ if (m_instrumentationPause) {
+ quitMessageLoopIfAgentsFinishedInstrumentation();
+ } else {
+ m_inspector->forEachSession(
+ m_pausedContextGroupId, [&scheduledOOMBreak, &hasAgentAcceptsPause](
+ V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->acceptsPause(scheduledOOMBreak)) {
+ hasAgentAcceptsPause = true;
+ }
+ });
+ if (!hasAgentAcceptsPause)
+ m_inspector->client()->quitMessageLoopOnPause();
+ }
}
if (--m_enableCount) return;
clearContinueToLocation();
@@ -121,7 +133,7 @@ void V8Debugger::disable() {
m_pauseOnNextCallRequested = false;
m_pauseOnAsyncCall = false;
#if V8_ENABLE_WEBASSEMBLY
- v8::debug::TierUpAllModulesPerIsolate(m_isolate);
+ v8::debug::LeaveDebuggingForIsolate(m_isolate);
#endif // V8_ENABLE_WEBASSEMBLY
v8::debug::SetDebugDelegate(m_isolate, nullptr);
m_isolate->RemoveNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback,
@@ -160,9 +172,15 @@ void V8Debugger::setBreakpointsActive(bool active) {
UNREACHABLE();
}
m_breakpointsActiveCount += active ? 1 : -1;
+ DCHECK_GE(m_breakpointsActiveCount, 0);
v8::debug::SetBreakPointsActive(m_isolate, m_breakpointsActiveCount);
}
+void V8Debugger::removeBreakpoint(v8::debug::BreakpointId id) {
+ v8::debug::RemoveBreakpoint(m_isolate, id);
+ m_throwingConditionReported.erase(id);
+}
+
v8::debug::ExceptionBreakState V8Debugger::getPauseOnExceptionsState() {
DCHECK(enabled());
return m_pauseOnExceptionsState;
@@ -233,14 +251,39 @@ void V8Debugger::requestPauseAfterInstrumentation() {
m_requestedPauseAfterInstrumentation = true;
}
+void V8Debugger::quitMessageLoopIfAgentsFinishedInstrumentation() {
+ bool allAgentsFinishedInstrumentation = true;
+ m_inspector->forEachSession(
+ m_pausedContextGroupId,
+ [&allAgentsFinishedInstrumentation](V8InspectorSessionImpl* session) {
+ if (!session->debuggerAgent()->instrumentationFinished()) {
+ allAgentsFinishedInstrumentation = false;
+ }
+ });
+ if (allAgentsFinishedInstrumentation) {
+ m_inspector->client()->quitMessageLoopOnPause();
+ }
+}
+
void V8Debugger::continueProgram(int targetContextGroupId,
bool terminateOnResume) {
if (m_pausedContextGroupId != targetContextGroupId) return;
if (isPaused()) {
- if (terminateOnResume) {
+ if (m_instrumentationPause) {
+ quitMessageLoopIfAgentsFinishedInstrumentation();
+ } else if (terminateOnResume) {
v8::debug::SetTerminateOnResume(m_isolate);
+
+ v8::HandleScope handles(m_isolate);
+ v8::Local<v8::Context> context =
+ m_inspector->client()->ensureDefaultContextInGroup(
+ targetContextGroupId);
+ installTerminateExecutionCallbacks(context);
+
+ m_inspector->client()->quitMessageLoopOnPause();
+ } else {
+ m_inspector->client()->quitMessageLoopOnPause();
}
- m_inspector->client()->quitMessageLoopOnPause();
}
}
@@ -283,8 +326,9 @@ void V8Debugger::stepOutOfFunction(int targetContextGroupId) {
}
void V8Debugger::terminateExecution(
+ v8::Local<v8::Context> context,
std::unique_ptr<TerminateExecutionCallback> callback) {
- if (m_terminateExecutionCallback) {
+ if (!m_terminateExecutionReported) {
if (callback) {
callback->sendFailure(Response::ServerError(
"There is current termination request in progress"));
@@ -292,22 +336,52 @@ void V8Debugger::terminateExecution(
return;
}
m_terminateExecutionCallback = std::move(callback);
+ installTerminateExecutionCallbacks(context);
+ m_isolate->TerminateExecution();
+}
+
+void V8Debugger::installTerminateExecutionCallbacks(
+ v8::Local<v8::Context> context) {
m_isolate->AddCallCompletedCallback(
&V8Debugger::terminateExecutionCompletedCallback);
- m_isolate->AddMicrotasksCompletedCallback(
- &V8Debugger::terminateExecutionCompletedCallbackIgnoringData);
- m_isolate->TerminateExecution();
+
+ if (!context.IsEmpty()) {
+ m_terminateExecutionCallbackContext.Reset(m_isolate, context);
+ m_terminateExecutionCallbackContext.SetWeak();
+ v8::MicrotaskQueue* microtask_queue = context->GetMicrotaskQueue();
+ microtask_queue->AddMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallbackIgnoringData,
+ microtask_queue);
+ }
+
+ DCHECK(m_terminateExecutionReported);
+ m_terminateExecutionReported = false;
}
void V8Debugger::reportTermination() {
- if (!m_terminateExecutionCallback) return;
+ if (m_terminateExecutionReported) {
+ DCHECK(m_terminateExecutionCallbackContext.IsEmpty());
+ return;
+ }
+ v8::HandleScope handles(m_isolate);
m_isolate->RemoveCallCompletedCallback(
&V8Debugger::terminateExecutionCompletedCallback);
- m_isolate->RemoveMicrotasksCompletedCallback(
- &V8Debugger::terminateExecutionCompletedCallbackIgnoringData);
+ if (!m_terminateExecutionCallbackContext.IsEmpty()) {
+ v8::MicrotaskQueue* microtask_queue =
+ m_terminateExecutionCallbackContext.Get(m_isolate)->GetMicrotaskQueue();
+ if (microtask_queue) {
+ microtask_queue->RemoveMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallbackIgnoringData,
+ microtask_queue);
+ }
+ }
m_isolate->CancelTerminateExecution();
- m_terminateExecutionCallback->sendSuccess();
- m_terminateExecutionCallback.reset();
+ if (m_terminateExecutionCallback) {
+ m_terminateExecutionCallback->sendSuccess();
+ m_terminateExecutionCallback.reset();
+ }
+ m_terminateExecutionCallbackContext.Reset();
+ m_terminateExecutionReported = true;
}
void V8Debugger::terminateExecutionCompletedCallback(v8::Isolate* isolate) {
@@ -318,7 +392,12 @@ void V8Debugger::terminateExecutionCompletedCallback(v8::Isolate* isolate) {
}
void V8Debugger::terminateExecutionCompletedCallbackIgnoringData(
- v8::Isolate* isolate, void*) {
+ v8::Isolate* isolate, void* data) {
+ DCHECK(data);
+ // Ensure that after every microtask completed callback we remove the
+ // callback regardless of how `terminateExecutionCompletedCallback` behaves.
+ static_cast<v8::MicrotaskQueue*>(data)->RemoveMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallbackIgnoringData, data);
terminateExecutionCompletedCallback(isolate);
}
@@ -518,11 +597,11 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
});
}
-V8Debugger::PauseAfterInstrumentation V8Debugger::BreakOnInstrumentation(
+V8Debugger::ActionAfterInstrumentation V8Debugger::BreakOnInstrumentation(
v8::Local<v8::Context> pausedContext,
v8::debug::BreakpointId instrumentationId) {
// Don't allow nested breaks.
- if (isPaused()) return kNoPauseAfterInstrumentationRequested;
+ if (isPaused()) return ActionAfterInstrumentation::kPauseIfBreakpointsHit;
int contextGroupId = m_inspector->contextGroupId(pausedContext);
bool hasAgents = false;
@@ -531,7 +610,7 @@ V8Debugger::PauseAfterInstrumentation V8Debugger::BreakOnInstrumentation(
if (session->debuggerAgent()->acceptsPause(false /* isOOMBreak */))
hasAgents = true;
});
- if (!hasAgents) return kNoPauseAfterInstrumentationRequested;
+ if (!hasAgents) return ActionAfterInstrumentation::kPauseIfBreakpointsHit;
m_pausedContextGroupId = contextGroupId;
m_instrumentationPause = true;
@@ -553,14 +632,21 @@ V8Debugger::PauseAfterInstrumentation V8Debugger::BreakOnInstrumentation(
m_pausedContextGroupId = 0;
m_instrumentationPause = false;
- m_inspector->forEachSession(contextGroupId,
- [](V8InspectorSessionImpl* session) {
- if (session->debuggerAgent()->enabled())
- session->debuggerAgent()->didContinue();
- });
- return requestedPauseAfterInstrumentation
- ? kPauseAfterInstrumentationRequested
- : kNoPauseAfterInstrumentationRequested;
+ hasAgents = false;
+ m_inspector->forEachSession(
+ contextGroupId, [&hasAgents](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->enabled())
+ session->debuggerAgent()->didContinue();
+ if (session->debuggerAgent()->acceptsPause(false /* isOOMBreak */))
+ hasAgents = true;
+ });
+ if (!hasAgents) {
+ return ActionAfterInstrumentation::kContinue;
+ } else if (requestedPauseAfterInstrumentation) {
+ return ActionAfterInstrumentation::kPause;
+ } else {
+ return ActionAfterInstrumentation::kPauseIfBreakpointsHit;
+ }
}
void V8Debugger::BreakProgramRequested(
@@ -623,6 +709,47 @@ bool V8Debugger::ShouldBeSkipped(v8::Local<v8::debug::Script> script, int line,
return hasAgents && allShouldBeSkipped;
}
+void V8Debugger::BreakpointConditionEvaluated(
+ v8::Local<v8::Context> context, v8::debug::BreakpointId breakpoint_id,
+ bool exception_thrown, v8::Local<v8::Value> exception) {
+ auto it = m_throwingConditionReported.find(breakpoint_id);
+
+ if (!exception_thrown) {
+ // Successful evaluation, clear out the bit: we report exceptions should
+ // this breakpoint throw again.
+ if (it != m_throwingConditionReported.end()) {
+ m_throwingConditionReported.erase(it);
+ }
+ return;
+ }
+
+ CHECK(exception_thrown);
+ if (it != m_throwingConditionReported.end() || exception.IsEmpty()) {
+ // Already reported this breakpoint or no exception to report.
+ return;
+ }
+
+ CHECK(!exception.IsEmpty());
+
+ v8::Local<v8::Message> message =
+ v8::debug::CreateMessageFromException(isolate(), exception);
+ v8::ScriptOrigin origin = message->GetScriptOrigin();
+ String16 url;
+ if (origin.ResourceName()->IsString()) {
+ url = toProtocolString(isolate(), origin.ResourceName().As<v8::String>());
+ }
+ // The message text is prepended to the exception text itself so we don't
+ // need to get it from the v8::Message.
+ StringView messageText;
+ StringView detailedMessage;
+ m_inspector->exceptionThrown(
+ context, messageText, exception, detailedMessage, toStringView(url),
+ message->GetLineNumber(context).FromMaybe(0),
+ message->GetStartColumn() + 1, createStackTrace(message->GetStackTrace()),
+ origin.ScriptId());
+ m_throwingConditionReported.insert(breakpoint_id);
+}
+
void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
int id, bool isBlackboxed) {
// Async task events from Promises are given misaligned pointers to prevent
@@ -668,7 +795,6 @@ V8StackTraceId V8Debugger::currentExternalParent() {
v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
v8::Local<v8::Context> context, v8::Local<v8::Value> value,
ScopeTargetKind kind) {
- v8::Local<v8::Value> scopesValue;
std::unique_ptr<v8::debug::ScopeIterator> iterator;
switch (kind) {
case FUNCTION:
@@ -792,6 +918,48 @@ v8::MaybeLocal<v8::Array> V8Debugger::collectionsEntries(
return wrappedEntries;
}
+v8::MaybeLocal<v8::Array> V8Debugger::privateMethods(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> receiver) {
+ if (!receiver->IsObject()) {
+ return v8::MaybeLocal<v8::Array>();
+ }
+ v8::Isolate* isolate = context->GetIsolate();
+ std::vector<v8::Local<v8::Value>> names;
+ std::vector<v8::Local<v8::Value>> values;
+ int filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateMethods);
+ if (!v8::debug::GetPrivateMembers(context, receiver.As<v8::Object>(), filter,
+ &names, &values) ||
+ names.size() == 0) {
+ return v8::MaybeLocal<v8::Array>();
+ }
+
+ v8::Local<v8::Array> result = v8::Array::New(isolate);
+ if (!result->SetPrototype(context, v8::Null(isolate)).FromMaybe(false))
+ return v8::MaybeLocal<v8::Array>();
+ for (uint32_t i = 0; i < names.size(); i++) {
+ v8::Local<v8::Value> name = names[i];
+ v8::Local<v8::Value> value = values[i];
+ DCHECK(value->IsFunction());
+ v8::Local<v8::Object> wrapper = v8::Object::New(isolate);
+ if (!wrapper->SetPrototype(context, v8::Null(isolate)).FromMaybe(false))
+ continue;
+ createDataProperty(context, wrapper,
+ toV8StringInternalized(isolate, "name"), name);
+ createDataProperty(context, wrapper,
+ toV8StringInternalized(isolate, "value"), value);
+ if (!addInternalObject(context, wrapper,
+ V8InternalValueType::kPrivateMethod))
+ continue;
+ createDataProperty(context, result, result->Length(), wrapper);
+ }
+
+ if (!addInternalObject(context, result,
+ V8InternalValueType::kPrivateMethodList))
+ return v8::MaybeLocal<v8::Array>();
+ return result;
+}
+
v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
v8::Local<v8::Array> properties;
@@ -804,10 +972,6 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
createDataProperty(context, properties, properties->Length(), entries);
}
- if (v8::debug::isExperimentalRemoveInternalScopesPropertyEnabled()) {
- return properties;
- }
-
if (value->IsGeneratorObject()) {
v8::Local<v8::Value> scopes;
if (generatorScopes(context, value).ToLocal(&scopes)) {
@@ -825,6 +989,13 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
createDataProperty(context, properties, properties->Length(), scopes);
}
}
+ v8::Local<v8::Array> private_methods;
+ if (privateMethods(context, value).ToLocal(&private_methods)) {
+ createDataProperty(context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[PrivateMethods]]"));
+ createDataProperty(context, properties, properties->Length(),
+ private_methods);
+ }
return properties;
}
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index fda2ce0f17..65fb27e238 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -54,6 +54,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Isolate* isolate() const { return m_isolate; }
void setBreakpointsActive(bool);
+ void removeBreakpoint(v8::debug::BreakpointId id);
v8::debug::ExceptionBreakState getPauseOnExceptionsState();
void setPauseOnExceptionsState(v8::debug::ExceptionBreakState);
@@ -71,7 +72,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
void stepOverStatement(int targetContextGroupId);
void stepOutOfFunction(int targetContextGroupId);
- void terminateExecution(std::unique_ptr<TerminateExecutionCallback> callback);
+ void terminateExecution(v8::Local<v8::Context> context,
+ std::unique_ptr<TerminateExecutionCallback> callback);
Response continueToLocation(int targetContextGroupId,
V8DebuggerScript* script,
@@ -151,6 +153,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
static void terminateExecutionCompletedCallback(v8::Isolate* isolate);
static void terminateExecutionCompletedCallbackIgnoringData(
v8::Isolate* isolate, void*);
+ void installTerminateExecutionCallbacks(v8::Local<v8::Context> context);
+
void handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
@@ -172,6 +176,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Local<v8::Value>);
v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
v8::Local<v8::Value> value);
+ v8::MaybeLocal<v8::Array> privateMethods(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value);
void asyncTaskScheduledForStack(const StringView& taskName, void* task,
bool recurring, bool skipTopFrame = false);
@@ -193,7 +199,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Local<v8::Context> paused_context,
const std::vector<v8::debug::BreakpointId>& break_points_hit,
v8::debug::BreakReasons break_reasons) override;
- PauseAfterInstrumentation BreakOnInstrumentation(
+ ActionAfterInstrumentation BreakOnInstrumentation(
v8::Local<v8::Context> paused_context, v8::debug::BreakpointId) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
@@ -205,11 +211,17 @@ class V8Debugger : public v8::debug::DebugDelegate,
bool ShouldBeSkipped(v8::Local<v8::debug::Script> script, int line,
int column) override;
+ void BreakpointConditionEvaluated(v8::Local<v8::Context> context,
+ v8::debug::BreakpointId breakpoint_id,
+ bool exception_thrown,
+ v8::Local<v8::Value> exception) override;
int currentContextGroupId();
bool hasScheduledBreakOnNextFunctionCall() const;
+ void quitMessageLoopIfAgentsFinishedInstrumentation();
+
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
int m_enableCount;
@@ -297,6 +309,14 @@ class V8Debugger : public v8::debug::DebugDelegate,
std::unordered_map<int, internal::V8DebuggerId> m_contextGroupIdToDebuggerId;
std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
+ v8::Global<v8::Context> m_terminateExecutionCallbackContext;
+ bool m_terminateExecutionReported = true;
+
+ // Throwing conditional breakpoints for which we already have logged an error
+ // message to the console. The intention is to reduce console spam.
+ // Removing the breakpoint or a non-throwing evaluation of the breakpoint
+ // clears it out of the set.
+ std::unordered_set<v8::debug::BreakpointId> m_throwingConditionReported;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 27bdd3e812..60ad12aece 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -44,6 +44,7 @@
#include "src/inspector/v8-console-message.h"
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger-barrier.h"
#include "src/inspector/v8-debugger-id.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-session-impl.h"
@@ -147,11 +148,26 @@ std::unique_ptr<V8StackTrace> V8InspectorImpl::createStackTrace(
std::unique_ptr<V8InspectorSession> V8InspectorImpl::connect(
int contextGroupId, V8Inspector::Channel* channel, StringView state,
- ClientTrustLevel client_trust_level) {
+ ClientTrustLevel client_trust_level, SessionPauseState pause_state) {
int sessionId = ++m_lastSessionId;
+ std::shared_ptr<V8DebuggerBarrier> debuggerBarrier;
+ if (pause_state == kWaitingForDebugger) {
+ auto it = m_debuggerBarriers.find(contextGroupId);
+ if (it != m_debuggerBarriers.end()) {
+ // Note this will be empty in case a pre-existent barrier is already
+ // released. This is by design, as a released throttle is no longer
+ // efficient.
+ debuggerBarrier = it->second.lock();
+ } else {
+ debuggerBarrier =
+ std::make_shared<V8DebuggerBarrier>(m_client, contextGroupId);
+ m_debuggerBarriers.insert(it, {contextGroupId, debuggerBarrier});
+ }
+ }
std::unique_ptr<V8InspectorSessionImpl> session =
V8InspectorSessionImpl::create(this, contextGroupId, sessionId, channel,
- state, client_trust_level);
+ state, client_trust_level,
+ std::move(debuggerBarrier));
m_sessions[contextGroupId][sessionId] = session.get();
return std::move(session);
}
@@ -159,7 +175,10 @@ std::unique_ptr<V8InspectorSession> V8InspectorImpl::connect(
void V8InspectorImpl::disconnect(V8InspectorSessionImpl* session) {
auto& map = m_sessions[session->contextGroupId()];
map.erase(session->sessionId());
- if (map.empty()) m_sessions.erase(session->contextGroupId());
+ if (map.empty()) {
+ m_sessions.erase(session->contextGroupId());
+ m_debuggerBarriers.erase(session->contextGroupId());
+ }
}
InspectedContext* V8InspectorImpl::getContext(int groupId,
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 3be16b29f6..0d30cc59f9 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -49,6 +49,7 @@ class V8Console;
class V8ConsoleMessageStorage;
class V8Debugger;
class V8DebuggerAgentImpl;
+class V8DebuggerBarrier;
class V8InspectorSessionImpl;
class V8ProfilerAgentImpl;
class V8RuntimeAgentImpl;
@@ -81,7 +82,8 @@ class V8InspectorImpl : public V8Inspector {
std::unique_ptr<V8InspectorSession> connect(int contextGroupId,
V8Inspector::Channel*,
StringView state,
- ClientTrustLevel) override;
+ ClientTrustLevel,
+ SessionPauseState) override;
void contextCreated(const V8ContextInfo&) override;
void contextDestroyed(v8::Local<v8::Context>) override;
v8::MaybeLocal<v8::Context> contextById(int contextId) override;
@@ -178,6 +180,8 @@ class V8InspectorImpl : public V8Inspector {
// contextGroupId -> sessionId -> session
std::unordered_map<int, std::map<int, V8InspectorSessionImpl*>> m_sessions;
+ // contextGroupId -> debugger barrier
+ std::unordered_map<int, std::weak_ptr<V8DebuggerBarrier>> m_debuggerBarriers;
using ConsoleStorageMap =
std::unordered_map<int, std::unique_ptr<V8ConsoleMessageStorage>>;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index f7f3e10938..36e11bdf19 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -17,6 +17,7 @@
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console-agent-impl.h"
#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger-barrier.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-heap-profiler-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
@@ -90,15 +91,18 @@ int V8ContextInfo::executionContextId(v8::Local<v8::Context> context) {
std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
V8InspectorImpl* inspector, int contextGroupId, int sessionId,
V8Inspector::Channel* channel, StringView state,
- V8Inspector::ClientTrustLevel clientTrustLevel) {
+ V8Inspector::ClientTrustLevel clientTrustLevel,
+ std::shared_ptr<V8DebuggerBarrier> debuggerBarrier) {
return std::unique_ptr<V8InspectorSessionImpl>(new V8InspectorSessionImpl(
- inspector, contextGroupId, sessionId, channel, state, clientTrustLevel));
+ inspector, contextGroupId, sessionId, channel, state, clientTrustLevel,
+ std::move(debuggerBarrier)));
}
V8InspectorSessionImpl::V8InspectorSessionImpl(
V8InspectorImpl* inspector, int contextGroupId, int sessionId,
V8Inspector::Channel* channel, StringView savedState,
- V8Inspector::ClientTrustLevel clientTrustLevel)
+ V8Inspector::ClientTrustLevel clientTrustLevel,
+ std::shared_ptr<V8DebuggerBarrier> debuggerBarrier)
: m_contextGroupId(contextGroupId),
m_sessionId(sessionId),
m_inspector(inspector),
@@ -116,7 +120,8 @@ V8InspectorSessionImpl::V8InspectorSessionImpl(
m_state->getBoolean("use_binary_protocol", &use_binary_protocol_);
m_runtimeAgent.reset(new V8RuntimeAgentImpl(
- this, this, agentState(protocol::Runtime::Metainfo::domainName)));
+ this, this, agentState(protocol::Runtime::Metainfo::domainName),
+ std::move(debuggerBarrier)));
protocol::Runtime::Dispatcher::wire(&m_dispatcher, m_runtimeAgent.get());
m_debuggerAgent.reset(new V8DebuggerAgentImpl(
@@ -387,7 +392,7 @@ void V8InspectorSessionImpl::dispatchProtocolMessage(StringView message) {
}
v8_crdtp::Dispatchable dispatchable(cbor);
if (!dispatchable.ok()) {
- if (dispatchable.HasCallId()) {
+ if (!dispatchable.HasCallId()) {
m_channel->sendNotification(serializeForFrontend(
v8_crdtp::CreateErrorNotification(dispatchable.DispatchError())));
} else {
@@ -506,4 +511,6 @@ void V8InspectorSessionImpl::triggerPreciseCoverageDeltaUpdate(
m_profilerAgent->triggerPreciseCoverageDeltaUpdate(toString16(occasion));
}
+void V8InspectorSessionImpl::stop() { m_debuggerAgent->stop(); }
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 9de8aa1b49..9e443161c4 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -21,6 +21,7 @@ class InjectedScript;
class RemoteObjectIdBase;
class V8ConsoleAgentImpl;
class V8DebuggerAgentImpl;
+class V8DebuggerBarrier;
class V8InspectorImpl;
class V8HeapProfilerAgentImpl;
class V8ProfilerAgentImpl;
@@ -35,7 +36,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
static std::unique_ptr<V8InspectorSessionImpl> create(
V8InspectorImpl*, int contextGroupId, int sessionId,
V8Inspector::Channel*, StringView state,
- v8_inspector::V8Inspector::ClientTrustLevel);
+ v8_inspector::V8Inspector::ClientTrustLevel,
+ std::shared_ptr<V8DebuggerBarrier>);
~V8InspectorSessionImpl() override;
V8InspectorSessionImpl(const V8InspectorSessionImpl&) = delete;
V8InspectorSessionImpl& operator=(const V8InspectorSessionImpl&) = delete;
@@ -98,6 +100,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
static const unsigned kInspectedObjectBufferSize = 5;
void triggerPreciseCoverageDeltaUpdate(StringView occasion) override;
+ void stop() override;
+
V8Inspector::ClientTrustLevel clientTrustLevel() {
return m_clientTrustLevel;
}
@@ -105,7 +109,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
private:
V8InspectorSessionImpl(V8InspectorImpl*, int contextGroupId, int sessionId,
V8Inspector::Channel*, StringView state,
- V8Inspector::ClientTrustLevel);
+ V8Inspector::ClientTrustLevel,
+ std::shared_ptr<V8DebuggerBarrier>);
protocol::DictionaryValue* agentState(const String16& name);
// protocol::FrontendChannel implementation.
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
index dcd120fded..4adf347843 100644
--- a/deps/v8/src/inspector/v8-regex.cc
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -36,6 +36,8 @@ V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
if (multiline) flags |= v8::RegExp::kMultiline;
v8::Local<v8::RegExp> regex;
+ // Protect against reentrant debugger calls via interrupts.
+ v8::debug::PostponeInterruptsScope no_interrupts(m_inspector->isolate());
if (v8::RegExp::New(context, toV8String(isolate, pattern),
static_cast<v8::RegExp::Flags>(flags))
.ToLocal(&regex))
@@ -65,6 +67,8 @@ int V8Regex::match(const String16& string, int startFrom,
v8::Context::Scope contextScope(context);
v8::MicrotasksScope microtasks(context,
v8::MicrotasksScope::kDoNotRunMicrotasks);
+ // Protect against reentrant debugger calls via interrupts.
+ v8::debug::PostponeInterruptsScope no_interrupts(m_inspector->isolate());
v8::TryCatch tryCatch(isolate);
v8::Local<v8::RegExp> regex = m_regex.Get(isolate);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 1a3ee11ba1..9a1a845487 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -241,11 +241,13 @@ Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
V8RuntimeAgentImpl::V8RuntimeAgentImpl(
V8InspectorSessionImpl* session, protocol::FrontendChannel* FrontendChannel,
- protocol::DictionaryValue* state)
+ protocol::DictionaryValue* state,
+ std::shared_ptr<V8DebuggerBarrier> debuggerBarrier)
: m_session(session),
m_state(state),
m_frontend(FrontendChannel),
m_inspector(session->inspector()),
+ m_debuggerBarrier(debuggerBarrier),
m_enabled(false) {}
V8RuntimeAgentImpl::~V8RuntimeAgentImpl() = default;
@@ -373,16 +375,22 @@ void V8RuntimeAgentImpl::callFunctionOn(
Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
Maybe<bool> userGesture, Maybe<bool> awaitPromise,
Maybe<int> executionContextId, Maybe<String16> objectGroup,
- Maybe<bool> throwOnSideEffect, Maybe<bool> generateWebDriverValue,
+ Maybe<bool> throwOnSideEffect, Maybe<String16> uniqueContextId,
+ Maybe<bool> generateWebDriverValue,
std::unique_ptr<CallFunctionOnCallback> callback) {
- if (objectId.isJust() && executionContextId.isJust()) {
- callback->sendFailure(Response::ServerError(
- "ObjectId must not be specified together with executionContextId"));
+ int justCount = (objectId.isJust() ? 1 : 0) +
+ (executionContextId.isJust() ? 1 : 0) +
+ (uniqueContextId.isJust() ? 1 : 0);
+ if (justCount > 1) {
+ callback->sendFailure(Response::InvalidParams(
+ "ObjectId, executionContextId and uniqueContextId must mutually "
+ "exclude each other"));
return;
}
- if (!objectId.isJust() && !executionContextId.isJust()) {
- callback->sendFailure(Response::ServerError(
- "Either ObjectId or executionContextId must be specified"));
+ if (justCount < 1) {
+ callback->sendFailure(
+ Response::InvalidParams("Either objectId or executionContextId or "
+ "uniqueContextId must be specified"));
return;
}
WrapMode wrap_mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
@@ -407,8 +415,8 @@ void V8RuntimeAgentImpl::callFunctionOn(
} else {
int contextId = 0;
Response response = ensureContext(m_inspector, m_session->contextGroupId(),
- std::move(executionContextId.fromJust()),
- /* uniqueContextId */ {}, &contextId);
+ std::move(executionContextId),
+ std::move(uniqueContextId), &contextId);
if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
@@ -491,11 +499,13 @@ Response V8RuntimeAgentImpl::releaseObjectGroup(const String16& objectGroup) {
}
Response V8RuntimeAgentImpl::runIfWaitingForDebugger() {
- if (m_runIfWaitingForDebuggerCalled) return Response::Success();
- m_runIfWaitingForDebuggerCalled = true;
- // The client implementation is resposible for checking if the session is
- // actually waiting for debugger. m_runIfWaitingForDebuggerCalled only makes
- // sure that the client implementation is invoked once per agent instance.
+ if (m_debuggerBarrier) {
+ m_debuggerBarrier.reset();
+ return Response::Success();
+ }
+ // TODO(chromium:1352175): the below is provisional until client-side changes
+ // land. The call should come through the barrier only once client properly
+ // communicates whether the session is waiting for debugger.
m_inspector->client()->runIfWaitingForDebugger(m_session->contextGroupId());
return Response::Success();
}
@@ -705,7 +715,13 @@ Response V8RuntimeAgentImpl::getHeapUsage(double* out_usedSize,
void V8RuntimeAgentImpl::terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) {
- m_inspector->debugger()->terminateExecution(std::move(callback));
+ v8::HandleScope handles(m_inspector->isolate());
+ v8::Local<v8::Context> defaultContext =
+ m_inspector->client()->ensureDefaultContextInGroup(
+ m_session->contextGroupId());
+
+ m_inspector->debugger()->terminateExecution(defaultContext,
+ std::move(callback));
}
namespace {
@@ -981,7 +997,8 @@ void V8RuntimeAgentImpl::reportExecutionContextDestroyed(
InspectedContext* context) {
if (m_enabled && context->isReported(m_session->sessionId())) {
context->setReported(m_session->sessionId(), false);
- m_frontend.executionContextDestroyed(context->contextId());
+ m_frontend.executionContextDestroyed(context->contextId(),
+ context->uniqueId().toString());
}
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index 7ae40221e8..4bb0e87114 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -49,6 +49,7 @@ class InjectedScript;
class InspectedContext;
class RemoteObjectIdBase;
class V8ConsoleMessage;
+class V8DebuggerBarrier;
class V8InspectorImpl;
class V8InspectorSessionImpl;
@@ -58,7 +59,8 @@ using protocol::Maybe;
class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
public:
V8RuntimeAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
- protocol::DictionaryValue* state);
+ protocol::DictionaryValue* state,
+ std::shared_ptr<V8DebuggerBarrier>);
~V8RuntimeAgentImpl() override;
V8RuntimeAgentImpl(const V8RuntimeAgentImpl&) = delete;
V8RuntimeAgentImpl& operator=(const V8RuntimeAgentImpl&) = delete;
@@ -87,7 +89,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<bool> generatePreview, Maybe<bool> userGesture,
Maybe<bool> awaitPromise, Maybe<int> executionContextId,
Maybe<String16> objectGroup, Maybe<bool> throwOnSideEffect,
- Maybe<bool> generateWebDriverValue,
+ Maybe<String16> uniqueContextId, Maybe<bool> generateWebDriverValue,
std::unique_ptr<CallFunctionOnCallback>) override;
Response releaseObject(const String16& objectId) override;
Response getProperties(
@@ -155,12 +157,12 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
protocol::DictionaryValue* m_state;
protocol::Runtime::Frontend m_frontend;
V8InspectorImpl* m_inspector;
+ std::shared_ptr<V8DebuggerBarrier> m_debuggerBarrier;
bool m_enabled;
std::unordered_map<String16, std::unique_ptr<v8::Global<v8::Script>>>
m_compiledScripts;
// Binding name -> executionContextIds mapping.
std::unordered_map<String16, std::unordered_set<int>> m_activeBindings;
- bool m_runIfWaitingForDebuggerCalled = false;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index c46de4465d..0b3c0839d3 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -1,4 +1,5 @@
// Copyright 2016 the V8 project authors. All rights reserved.
+//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,6 +11,8 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include <algorithm>
+#include <memory>
+#include <vector>
#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/debug/debug-interface.h"
@@ -313,6 +316,21 @@ StringView V8StackTraceImpl::topFunctionName() const {
return toStringView(m_frames[0]->functionName());
}
+std::vector<V8StackFrame> V8StackTraceImpl::frames() const {
+ std::vector<V8StackFrame> ret;
+ ret.reserve(m_frames.size());
+
+ for (const auto& frame : m_frames) {
+ if (frame) {
+ ret.emplace_back(V8StackFrame{
+ toStringView(frame->sourceURL()), toStringView(frame->functionName()),
+ frame->lineNumber() + 1, frame->columnNumber() + 1});
+ }
+ }
+
+ return ret;
+}
+
std::unique_ptr<protocol::Runtime::StackTrace>
V8StackTraceImpl::buildInspectorObjectImpl(V8Debugger* debugger) const {
return buildInspectorObjectImpl(debugger, m_maxAsyncDepth);
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 221700a195..bf793b0b51 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -84,6 +84,8 @@ class V8StackTraceImpl : public V8StackTrace {
bool isEqualIgnoringTopFrame(V8StackTraceImpl* stackTrace) const;
+ std::vector<V8StackFrame> frames() const override;
+
private:
V8StackTraceImpl(std::vector<std::shared_ptr<StackFrame>> frames,
int maxAsyncDepth,
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 3e98dd95f0..37db97e2c5 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -308,6 +308,15 @@ String16 descriptionForObject(v8::Isolate* isolate,
return toProtocolString(isolate, object->GetConstructorName());
}
+String16 descriptionForProxy(v8::Isolate* isolate, v8::Local<v8::Proxy> proxy) {
+ v8::Local<v8::Value> target = proxy->GetTarget();
+ if (target->IsObject()) {
+ return String16::concat(
+ "Proxy(", descriptionForObject(isolate, target.As<v8::Object>()), ")");
+ }
+ return String16("Proxy");
+}
+
String16 descriptionForDate(v8::Local<v8::Context> context,
v8::Local<v8::Date> date) {
v8::Isolate* isolate = context->GetIsolate();
@@ -394,6 +403,24 @@ String16 descriptionForFunction(v8::Local<v8::Function> value) {
return toProtocolString(isolate, description);
}
+String16 descriptionForPrivateMethodList(v8::Local<v8::Array> list) {
+ return String16::concat(
+ "PrivateMethods[",
+ String16::fromInteger(static_cast<size_t>(list->Length())), ']');
+}
+
+String16 descriptionForPrivateMethod(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Value> value;
+ if (!object->GetRealNamedProperty(context, toV8String(isolate, "value"))
+ .ToLocal(&value)) {
+ return String16();
+ }
+ DCHECK(value->IsFunction());
+ return descriptionForFunction(value.As<v8::Function>());
+}
+
class PrimitiveValueMirror final : public ValueMirror {
public:
PrimitiveValueMirror(v8::Local<v8::Value> value, const String16& type)
@@ -1286,7 +1313,6 @@ void nativeSetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
!object->IsObject()) {
return;
}
- v8::Local<v8::Value> value;
if (!object.As<v8::Object>()->Set(context, name, info[0]).IsNothing()) return;
}
@@ -1376,7 +1402,8 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
object = value.As<v8::Object>();
}
}
- if (internalType == V8InternalValueType::kScopeList) {
+ if (internalType == V8InternalValueType::kScopeList ||
+ internalType == V8InternalValueType::kPrivateMethodList) {
if (!set->Add(context, toV8String(isolate, "length")).ToLocal(&set)) {
return false;
}
@@ -1570,11 +1597,13 @@ std::vector<PrivatePropertyMirror> ValueMirror::getPrivateProperties(
v8::MicrotasksScope microtasksScope(context,
v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::TryCatch tryCatch(isolate);
- v8::Local<v8::Array> privateProperties;
std::vector<v8::Local<v8::Value>> names;
std::vector<v8::Local<v8::Value>> values;
- if (!v8::debug::GetPrivateMembers(context, object, &names, &values))
+ int filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateAccessors) |
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateFields);
+ if (!v8::debug::GetPrivateMembers(context, object, filter, &names, &values))
return mirrors;
size_t len = values.size();
@@ -1688,7 +1717,8 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
}
if (value->IsProxy()) {
return std::make_unique<ObjectMirror>(
- value, RemoteObject::SubtypeEnum::Proxy, "Proxy");
+ value, RemoteObject::SubtypeEnum::Proxy,
+ descriptionForProxy(isolate, value.As<v8::Proxy>()));
}
if (value->IsFunction()) {
return std::make_unique<FunctionMirror>(value);
@@ -1790,6 +1820,12 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
value, "internal#scopeList",
descriptionForScopeList(value.As<v8::Array>()));
}
+ if (value->IsArray() &&
+ internalType == V8InternalValueType::kPrivateMethodList) {
+ return std::make_unique<ObjectMirror>(
+ value, "internal#privateMethodList",
+ descriptionForPrivateMethodList(value.As<v8::Array>()));
+ }
if (value->IsObject() && internalType == V8InternalValueType::kEntry) {
return std::make_unique<ObjectMirror>(
value, "internal#entry",
@@ -1800,6 +1836,12 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
value, "internal#scope",
descriptionForScope(context, value.As<v8::Object>()));
}
+ if (value->IsObject() &&
+ internalType == V8InternalValueType::kPrivateMethod) {
+ return std::make_unique<ObjectMirror>(
+ value, "internal#privateMethod",
+ descriptionForPrivateMethod(context, value.As<v8::Object>()));
+ }
size_t length = 0;
if (value->IsArray() || isArrayLike(context, value, &length)) {
length = value->IsArray() ? value.As<v8::Array>()->Length() : length;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 784d88a9a8..7bf855afb8 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -922,13 +922,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::SetKeyedProperty(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::DefineKeyedOwnProperty(
- Register object, Register key, int feedback_slot) {
+ Register object, Register key, DefineKeyedOwnPropertyFlags flags,
+ int feedback_slot) {
// Ensure that the IC uses a strict language mode, as this is the only
// supported mode for this use case.
DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(
FeedbackVector::ToSlot(feedback_slot))),
LanguageMode::kStrict);
- OutputDefineKeyedOwnProperty(object, key, feedback_slot);
+ OutputDefineKeyedOwnProperty(object, key, flags, feedback_slot);
return *this;
}
@@ -1604,8 +1605,9 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use>
void BytecodeArrayBuilder::PrepareToOutputBytecode() {
- if (register_optimizer_)
+ if (register_optimizer_) {
register_optimizer_->PrepareForBytecode<bytecode, implicit_register_use>();
+ }
}
uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index a9d2199840..cf75af4273 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -191,8 +191,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Define an own property keyed by a value in a register, trigger the
// defineProperty traps if necessary. The value to be defined should be
// in the accumulator.
- BytecodeArrayBuilder& DefineKeyedOwnProperty(Register object, Register key,
- int feedback_slot);
+ BytecodeArrayBuilder& DefineKeyedOwnProperty(
+ Register object, Register key, DefineKeyedOwnPropertyFlags flags,
+ int feedback_slot);
// Store an own element in an array literal. The value to be stored should be
// in the accumulator.
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index c63c46c0b9..e5f7296274 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -75,6 +75,19 @@ TestTypeOfFlags::LiteralFlag TestTypeOfFlags::Decode(uint8_t raw_flag) {
}
// static
+const char* TestTypeOfFlags::ToString(LiteralFlag literal_flag) {
+ switch (literal_flag) {
+#define CASE(Name, name) \
+ case LiteralFlag::k##Name: \
+ return #name;
+ TYPEOF_LITERAL_LIST(CASE)
+#undef CASE
+ default:
+ return "<invalid>";
+ }
+}
+
+// static
uint8_t StoreLookupSlotFlags::Encode(LanguageMode language_mode,
LookupHoistingMode lookup_hoisting_mode) {
DCHECK_IMPLIES(lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy,
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index c6c41c7580..0df20d764f 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -75,6 +75,8 @@ class TestTypeOfFlags {
static uint8_t Encode(LiteralFlag literal_flag);
static LiteralFlag Decode(uint8_t raw_flag);
+ static const char* ToString(LiteralFlag literal_flag);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TestTypeOfFlags);
};
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 706d897d8a..acaa1dae45 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -896,6 +896,8 @@ class V8_NODISCARD BytecodeGenerator::MultipleEntryBlockContextScope {
}
}
+ ~MultipleEntryBlockContextScope() { DCHECK(!is_in_scope_); }
+
MultipleEntryBlockContextScope(const MultipleEntryBlockContextScope&) =
delete;
MultipleEntryBlockContextScope& operator=(
@@ -906,12 +908,9 @@ class V8_NODISCARD BytecodeGenerator::MultipleEntryBlockContextScope {
DCHECK(inner_context_.is_valid());
DCHECK(outer_context_.is_valid());
DCHECK(!is_in_scope_);
- Register temp = generator_->register_allocator()->NewRegister();
- generator_->builder()->StoreAccumulatorInRegister(temp);
generator_->builder()->LoadAccumulatorWithRegister(inner_context_);
current_scope_.emplace(generator_, scope_);
context_scope_.emplace(generator_, scope_, outer_context_);
- generator_->builder()->LoadAccumulatorWithRegister(temp);
is_in_scope_ = true;
}
@@ -919,11 +918,8 @@ class V8_NODISCARD BytecodeGenerator::MultipleEntryBlockContextScope {
DCHECK(inner_context_.is_valid());
DCHECK(outer_context_.is_valid());
DCHECK(is_in_scope_);
- Register temp = generator_->register_allocator()->NewRegister();
- generator_->builder()->StoreAccumulatorInRegister(temp);
context_scope_ = base::nullopt;
current_scope_ = base::nullopt;
- generator_->builder()->LoadAccumulatorWithRegister(temp);
is_in_scope_ = false;
}
@@ -2454,7 +2450,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
// Finish the iteration in the finally block.
BuildFinalizeIteration(iterator, done, iteration_continuation_token);
},
- HandlerTable::UNCAUGHT);
+ catch_prediction());
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
@@ -2747,13 +2743,11 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
}
if (expr->instance_members_initializer_function() != nullptr) {
- Register initializer =
- VisitForRegisterValue(expr->instance_members_initializer_function());
+ VisitForAccumulatorValue(expr->instance_members_initializer_function());
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()
- ->LoadAccumulatorWithRegister(initializer)
- .StoreClassFieldsInitializer(class_constructor, feedback_index(slot))
+ ->StoreClassFieldsInitializer(class_constructor, feedback_index(slot))
.LoadAccumulatorWithRegister(class_constructor);
}
@@ -2762,23 +2756,18 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
// class boilerplate in the future. The name argument can be
// passed to the DefineClass runtime function and have it set
// there.
+ // TODO(v8:13451): Alternatively, port SetFunctionName to an ic so that we
+ // can replace the runtime call to a dedicate bytecode here.
if (name.is_valid()) {
- Register key = register_allocator()->NewRegister();
- builder()
- ->LoadLiteral(ast_string_constants()->name_string())
- .StoreAccumulatorInRegister(key);
-
- DefineKeyedOwnPropertyInLiteralFlags data_property_flags =
- DefineKeyedOwnPropertyInLiteralFlag::kNoFlags;
- FeedbackSlot slot =
- feedback_spec()->AddDefineKeyedOwnPropertyInLiteralICSlot();
+ RegisterAllocationScope inner_register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
builder()
- ->LoadAccumulatorWithRegister(name)
- .DefineKeyedOwnPropertyInLiteral(class_constructor, key,
- data_property_flags,
- feedback_index(slot));
+ ->MoveRegister(class_constructor, args[0])
+ .MoveRegister(name, args[1])
+ .CallRuntime(Runtime::kSetFunctionName, args);
}
+ RegisterAllocationScope inner_register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer = VisitForRegisterValue(expr->static_initializer());
@@ -2855,17 +2844,33 @@ void BytecodeGenerator::BuildClassProperty(ClassLiteral::Property* property) {
}
builder()->SetExpressionAsStatementPosition(property->value());
- VisitForAccumulatorValue(property->value());
if (is_literal_store) {
+ VisitForAccumulatorValue(property->value());
FeedbackSlot slot = feedback_spec()->AddDefineNamedOwnICSlot();
builder()->DefineNamedOwnProperty(
builder()->Receiver(),
property->key()->AsLiteral()->AsRawPropertyName(),
feedback_index(slot));
} else {
+ DefineKeyedOwnPropertyFlags flags = DefineKeyedOwnPropertyFlag::kNoFlags;
+ if (property->NeedsSetFunctionName()) {
+ // Static class fields require the name property to be set on
+ // the class, meaning we can't wait until the
+ // DefineKeyedOwnProperty call later to set the name.
+ if (property->value()->IsClassLiteral() &&
+ property->value()->AsClassLiteral()->static_initializer() !=
+ nullptr) {
+ VisitClassLiteral(property->value()->AsClassLiteral(), key);
+ } else {
+ VisitForAccumulatorValue(property->value());
+ flags |= DefineKeyedOwnPropertyFlag::kSetFunctionName;
+ }
+ } else {
+ VisitForAccumulatorValue(property->value());
+ }
FeedbackSlot slot = feedback_spec()->AddDefineKeyedOwnICSlot();
- builder()->DefineKeyedOwnProperty(builder()->Receiver(), key,
+ builder()->DefineKeyedOwnProperty(builder()->Receiver(), key, flags,
feedback_index(slot));
}
}
@@ -2917,7 +2922,9 @@ void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver,
builder()
->StoreAccumulatorInRegister(brand_reg)
.LoadAccumulatorWithRegister(class_context->reg())
- .DefineKeyedOwnProperty(receiver, brand_reg, feedback_index(slot));
+ .DefineKeyedOwnProperty(receiver, brand_reg,
+ DefineKeyedOwnPropertyFlag::kNoFlags,
+ feedback_index(slot));
} else {
// We are in the slow case where super() is called from a nested
// arrow function or a eval(), so the class scope context isn't
@@ -3135,8 +3142,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
feedback_index(slot));
} else {
FeedbackSlot slot = feedback_spec()->AddDefineKeyedOwnICSlot();
- builder()->DefineKeyedOwnProperty(literal, key_reg,
- feedback_index(slot));
+ builder()->DefineKeyedOwnProperty(
+ literal, key_reg, DefineKeyedOwnPropertyFlag::kNoFlags,
+ feedback_index(slot));
}
} else {
VisitForEffect(property->value());
@@ -3233,34 +3241,30 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
object_literal_context_scope.SetEnteredIf(
should_be_in_object_literal_scope);
builder()->SetExpressionPosition(property->value());
- Register value;
-
- // Static class fields require the name property to be set on
- // the class, meaning we can't wait until the
- // DefineKeyedOwnPropertyInLiteral call later to set the name.
- if (property->value()->IsClassLiteral() &&
- property->value()->AsClassLiteral()->static_initializer() !=
- nullptr) {
- value = register_allocator()->NewRegister();
- VisitClassLiteral(property->value()->AsClassLiteral(), key);
- builder()->StoreAccumulatorInRegister(value);
- } else {
- value = VisitForRegisterValue(property->value());
- }
DefineKeyedOwnPropertyInLiteralFlags data_property_flags =
DefineKeyedOwnPropertyInLiteralFlag::kNoFlags;
if (property->NeedsSetFunctionName()) {
- data_property_flags |=
- DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName;
+ // Static class fields require the name property to be set on
+ // the class, meaning we can't wait until the
+ // DefineKeyedOwnPropertyInLiteral call later to set the name.
+ if (property->value()->IsClassLiteral() &&
+ property->value()->AsClassLiteral()->static_initializer() !=
+ nullptr) {
+ VisitClassLiteral(property->value()->AsClassLiteral(), key);
+ } else {
+ data_property_flags |=
+ DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName;
+ VisitForAccumulatorValue(property->value());
+ }
+ } else {
+ VisitForAccumulatorValue(property->value());
}
FeedbackSlot slot =
feedback_spec()->AddDefineKeyedOwnPropertyInLiteralICSlot();
- builder()
- ->LoadAccumulatorWithRegister(value)
- .DefineKeyedOwnPropertyInLiteral(literal, key, data_property_flags,
- feedback_index(slot));
+ builder()->DefineKeyedOwnPropertyInLiteral(
+ literal, key, data_property_flags, feedback_index(slot));
break;
}
case ObjectLiteral::Property::GETTER:
@@ -3302,11 +3306,15 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- builder()->LoadAccumulatorWithRegister(literal);
if (home_object != nullptr) {
object_literal_context_scope.SetEnteredIf(true);
+ builder()->LoadAccumulatorWithRegister(literal);
BuildVariableAssignment(home_object, Token::INIT, HoleCheckMode::kElided);
}
+ // Make sure to exit the scope before materialising the value into the
+ // accumulator, to prevent the context scope from clobbering it.
+ object_literal_context_scope.SetEnteredIf(false);
+ builder()->LoadAccumulatorWithRegister(literal);
}
// Fill an array with values from an iterator, starting at a given index. It is
@@ -3638,8 +3646,15 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
feedback_index(slot), depth);
break;
}
- default:
+ default: {
+ // Normally, private names should not be looked up dynamically,
+ // but we make an exception in debug-evaluate, in that case the
+ // lookup will be done in %SetPrivateMember() and %GetPrivateMember()
+ // calls, not here.
+ DCHECK(!variable->raw_name()->IsPrivateName());
builder()->LoadLookupSlot(variable->raw_name(), typeof_mode);
+ break;
+ }
}
break;
}
@@ -3944,6 +3959,14 @@ BytecodeGenerator::AssignmentLhsData::PrivateMethodOrAccessor(
}
// static
BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::PrivateDebugEvaluate(AssignType type,
+ Property* property,
+ Register object) {
+ return AssignmentLhsData(type, property, RegisterList(), object, Register(),
+ nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::KeyedSuperProperty(
RegisterList super_property_args) {
return AssignmentLhsData(KEYED_SUPER_PROPERTY, nullptr, super_property_args,
@@ -3984,6 +4007,13 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
return AssignmentLhsData::PrivateMethodOrAccessor(assign_type, property,
object, key);
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ // Do not visit the key here, instead we will look them up at run time.
+ return AssignmentLhsData::PrivateDebugEvaluate(assign_type, property,
+ object);
+ }
case NAMED_SUPER_PROPERTY: {
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
RegisterList super_property_args =
@@ -4105,7 +4135,7 @@ void BytecodeGenerator::BuildFinalizeIteration(
.ReThrow()
.Bind(&suppress_close_exception);
},
- HandlerTable::UNCAUGHT);
+ catch_prediction());
}
iterator_is_done.Bind(builder());
@@ -4560,6 +4590,16 @@ void BytecodeGenerator::BuildAssignment(
}
break;
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ Property* property = lhs_data.expr()->AsProperty();
+ BuildPrivateDebugDynamicSet(property, lhs_data.object(), value);
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+ break;
+ }
}
}
@@ -4631,6 +4671,11 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
lhs_data.expr()->AsProperty());
break;
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ Property* property = lhs_data.expr()->AsProperty();
+ BuildPrivateDebugDynamicGet(property, lhs_data.object());
+ break;
+ }
}
BinaryOperation* binop = expr->binary_operation();
@@ -5143,9 +5188,41 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
VisitForAccumulatorValue(property->key());
break;
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ BuildPrivateDebugDynamicGet(property, obj);
+ break;
+ }
}
}
+void BytecodeGenerator::BuildPrivateDebugDynamicGet(Property* property,
+ Register obj) {
+ RegisterAllocationScope scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+
+ Variable* private_name = property->key()->AsVariableProxy()->var();
+ builder()
+ ->MoveRegister(obj, args[0])
+ .LoadLiteral(private_name->raw_name())
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kGetPrivateMember, args);
+}
+
+void BytecodeGenerator::BuildPrivateDebugDynamicSet(Property* property,
+ Register obj,
+ Register value) {
+ RegisterAllocationScope scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(3);
+
+ Variable* private_name = property->key()->AsVariableProxy()->var();
+ builder()
+ ->MoveRegister(obj, args[0])
+ .LoadLiteral(private_name->raw_name())
+ .StoreAccumulatorInRegister(args[1])
+ .MoveRegister(value, args[2])
+ .CallRuntime(Runtime::kSetPrivateMember, args);
+}
+
void BytecodeGenerator::BuildPrivateGetterAccess(Register object,
Register accessor_pair) {
RegisterAllocationScope scope(this);
@@ -5915,9 +5992,15 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
// and strict modes.
Property* property = expr->AsProperty();
DCHECK(!property->IsPrivateReference());
- Register object = VisitForRegisterValue(property->obj());
- VisitForAccumulatorValue(property->key());
- builder()->Delete(object, language_mode());
+ if (property->IsSuperAccess()) {
+ // Delete of super access is not allowed.
+ VisitForEffect(property->key());
+ builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
+ } else {
+ Register object = VisitForRegisterValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ builder()->Delete(object, language_mode());
+ }
} else if (expr->IsOptionalChain()) {
Expression* expr_inner = expr->AsOptionalChain()->expression();
if (expr_inner->IsProperty()) {
@@ -6081,6 +6164,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
BuildPrivateGetterAccess(object, key);
break;
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ object = VisitForRegisterValue(property->obj());
+ BuildPrivateDebugDynamicGet(property, object);
+ break;
+ }
}
// Save result for postfix expressions.
@@ -6161,6 +6249,12 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case PRIVATE_DEBUG_DYNAMIC: {
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ BuildPrivateDebugDynamicSet(property, object, value);
+ break;
+ }
}
// Restore old value for postfix expressions.
@@ -6594,7 +6688,9 @@ void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
void BytecodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
- builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
+ // Handled by VisitAssignment(), VisitCall(), VisitDelete() and
+ // VisitPropertyLoad().
+ UNREACHABLE();
}
void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 63174d44fc..a114c0dfdf 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -102,17 +102,24 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Property* property,
Register object,
Register key);
+ static AssignmentLhsData PrivateDebugEvaluate(AssignType type,
+ Property* property,
+ Register object);
static AssignmentLhsData NamedSuperProperty(
RegisterList super_property_args);
static AssignmentLhsData KeyedSuperProperty(
RegisterList super_property_args);
AssignType assign_type() const { return assign_type_; }
- Expression* expr() const {
- DCHECK(assign_type_ == NON_PROPERTY || assign_type_ == PRIVATE_METHOD ||
+ bool is_private_assign_type() const {
+ return assign_type_ == PRIVATE_METHOD ||
assign_type_ == PRIVATE_GETTER_ONLY ||
assign_type_ == PRIVATE_SETTER_ONLY ||
- assign_type_ == PRIVATE_GETTER_AND_SETTER);
+ assign_type_ == PRIVATE_GETTER_AND_SETTER ||
+ assign_type_ == PRIVATE_DEBUG_DYNAMIC;
+ }
+ Expression* expr() const {
+ DCHECK(assign_type_ == NON_PROPERTY || is_private_assign_type());
return expr_;
}
Expression* object_expr() const {
@@ -121,17 +128,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
}
Register object() const {
DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY ||
- assign_type_ == PRIVATE_METHOD ||
- assign_type_ == PRIVATE_GETTER_ONLY ||
- assign_type_ == PRIVATE_SETTER_ONLY ||
- assign_type_ == PRIVATE_GETTER_AND_SETTER);
+ is_private_assign_type());
return object_;
}
Register key() const {
- DCHECK(assign_type_ == KEYED_PROPERTY || assign_type_ == PRIVATE_METHOD ||
- assign_type_ == PRIVATE_GETTER_ONLY ||
- assign_type_ == PRIVATE_SETTER_ONLY ||
- assign_type_ == PRIVATE_GETTER_AND_SETTER);
+ DCHECK((assign_type_ == KEYED_PROPERTY || is_private_assign_type()) &&
+ assign_type_ != PRIVATE_DEBUG_DYNAMIC);
return key_;
}
const AstRawString* name() const {
@@ -332,6 +334,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildPrivateGetterAccess(Register obj, Register access_pair);
void BuildPrivateSetterAccess(Register obj, Register access_pair,
Register value);
+ void BuildPrivateDebugDynamicGet(Property* property, Register obj);
+ void BuildPrivateDebugDynamicSet(Property* property, Register obj,
+ Register value);
void BuildPrivateMethods(ClassLiteral* expr, bool is_static,
Register home_object);
void BuildClassProperty(ClassLiteral::Property* property);
diff --git a/deps/v8/src/interpreter/bytecode-operands.cc b/deps/v8/src/interpreter/bytecode-operands.cc
index 9e6a66769c..bfba1cfb5a 100644
--- a/deps/v8/src/interpreter/bytecode-operands.cc
+++ b/deps/v8/src/interpreter/bytecode-operands.cc
@@ -21,8 +21,12 @@ const char* ImplicitRegisterUseToString(
return "ReadAccumulator";
case ImplicitRegisterUse::kWriteAccumulator:
return "WriteAccumulator";
+ case ImplicitRegisterUse::kClobberAccumulator:
+ return "ClobberAccumulator";
case ImplicitRegisterUse::kWriteShortStar:
return "WriteShortStar";
+ case ImplicitRegisterUse::kReadAndClobberAccumulator:
+ return "ReadAndClobberAccumulator";
case ImplicitRegisterUse::kReadWriteAccumulator:
return "ReadWriteAccumulator";
case ImplicitRegisterUse::kReadAccumulatorWriteShortStar:
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 68e6800169..42d7445df0 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -114,8 +114,10 @@ enum class ImplicitRegisterUse : uint8_t {
kNone = 0,
kReadAccumulator = 1 << 0,
kWriteAccumulator = 1 << 1,
- kWriteShortStar = 1 << 2,
+ kClobberAccumulator = 1 << 2,
+ kWriteShortStar = 1 << 3,
kReadWriteAccumulator = kReadAccumulator | kWriteAccumulator,
+ kReadAndClobberAccumulator = kReadAccumulator | kClobberAccumulator,
kReadAccumulatorWriteShortStar = kReadAccumulator | kWriteShortStar
};
@@ -187,6 +189,24 @@ class BytecodeOperands : public AllStatic {
ImplicitRegisterUse::kWriteAccumulator;
}
+ // Returns true if |implicit_register_use| clobbers the
+ // accumulator.
+ static constexpr bool ClobbersAccumulator(
+ ImplicitRegisterUse implicit_register_use) {
+ return (implicit_register_use & ImplicitRegisterUse::kClobberAccumulator) ==
+ ImplicitRegisterUse::kClobberAccumulator;
+ }
+
+ // Returns true if |implicit_register_use| writes or clobbers the
+ // accumulator.
+ static constexpr bool WritesOrClobbersAccumulator(
+ ImplicitRegisterUse implicit_register_use) {
+ return (implicit_register_use &
+ (ImplicitRegisterUse::kWriteAccumulator |
+ ImplicitRegisterUse::kClobberAccumulator)) !=
+ ImplicitRegisterUse::kNone;
+ }
+
// Returns true if |implicit_register_use| writes to a
// register not specified by an operand.
static constexpr bool WritesImplicitRegister(
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 52123df198..65deb09c24 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -94,7 +94,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
- if (BytecodeOperands::WritesAccumulator(implicit_register_use)) {
+ if (BytecodeOperands::WritesOrClobbersAccumulator(implicit_register_use)) {
PrepareOutputRegister(accumulator_);
}
}
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 3919daeb20..72dfec59a9 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -105,8 +105,8 @@ namespace interpreter {
OperandType::kIdx) \
V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaGlobal, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx, \
- OperandType::kIdx) \
+ V(StaGlobal, ImplicitRegisterUse::kReadAndClobberAccumulator, \
+ OperandType::kIdx, OperandType::kIdx) \
\
/* Context operations */ \
V(StaContextSlot, ImplicitRegisterUse::kReadAccumulator, OperandType::kReg, \
@@ -144,15 +144,16 @@ namespace interpreter {
OperandType::kImm, OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
- V(SetNamedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(SetNamedProperty, ImplicitRegisterUse::kReadAndClobberAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
- V(DefineNamedOwnProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(DefineNamedOwnProperty, ImplicitRegisterUse::kReadAndClobberAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
- V(SetKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(SetKeyedProperty, ImplicitRegisterUse::kReadAndClobberAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(DefineKeyedOwnProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
- OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(StaInArrayLiteral, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(DefineKeyedOwnProperty, ImplicitRegisterUse::kReadAndClobberAccumulator, \
+ OperandType::kReg, OperandType::kReg, OperandType::kFlag8, \
+ OperandType::kIdx) \
+ V(StaInArrayLiteral, ImplicitRegisterUse::kReadAndClobberAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
V(DefineKeyedOwnPropertyInLiteral, ImplicitRegisterUse::kReadAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kFlag8, \
@@ -394,7 +395,7 @@ namespace interpreter {
\
/* Complex flow control For..in */ \
V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
- V(ForInPrepare, ImplicitRegisterUse::kReadWriteAccumulator, \
+ V(ForInPrepare, ImplicitRegisterUse::kReadAndClobberAccumulator, \
OperandType::kRegOutTriple, OperandType::kIdx) \
V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \
@@ -655,6 +656,18 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
GetImplicitRegisterUse(bytecode));
}
+ // Returns true if |bytecode| writes the accumulator.
+ static bool ClobbersAccumulator(Bytecode bytecode) {
+ return BytecodeOperands::ClobbersAccumulator(
+ GetImplicitRegisterUse(bytecode));
+ }
+
+ // Returns true if |bytecode| writes the accumulator.
+ static bool WritesOrClobbersAccumulator(Bytecode bytecode) {
+ return BytecodeOperands::WritesOrClobbersAccumulator(
+ GetImplicitRegisterUse(bytecode));
+ }
+
// Returns true if |bytecode| writes to a register not specified by an
// operand.
static bool WritesImplicitRegister(Bytecode bytecode) {
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index deca631416..ef4f781b8a 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -165,6 +165,13 @@ void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
accumulator_ = value;
}
+void InterpreterAssembler::ClobberAccumulator(TNode<Object> clobber_value) {
+ DCHECK(Bytecodes::ClobbersAccumulator(bytecode_));
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kClobberAccumulator;
+ accumulator_ = clobber_value;
+}
+
TNode<Context> InterpreterAssembler::GetContext() {
return CAST(LoadRegister(Register::current_context()));
}
@@ -730,7 +737,7 @@ void InterpreterAssembler::CallJSAndDispatch(
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
- TNode<CodeT> code_target = HeapConstant(callable.code());
+ TNode<Code> code_target = HeapConstant(callable.code());
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
args_count, args.base_reg_location(),
@@ -751,7 +758,7 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
Callable callable = CodeFactory::Call(isolate());
- TNode<CodeT> code_target = HeapConstant(callable.code());
+ TNode<Code> code_target = HeapConstant(callable.code());
arg_count = JSParameterCount(arg_count);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@@ -786,17 +793,22 @@ template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
+ TNode<UintPtrT> slot_id) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
+
+#ifndef V8_JITLESS
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
LazyNode<Object> receiver = [=] { return LoadRegisterAtOperandIndex(1); };
CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
slot_id);
+#endif // !V8_JITLESS
+
Comment("call using CallWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
- TNode<CodeT> code_target = HeapConstant(callable.code());
+ TNode<Code> code_target = HeapConstant(callable.code());
TNode<Word32T> args_count = args.reg_count();
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
@@ -853,15 +865,16 @@ TNode<Object> InterpreterAssembler::Construct(
TNode<Object> InterpreterAssembler::ConstructWithSpread(
TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
- const RegListNodePair& args, TNode<UintPtrT> slot_id,
- TNode<HeapObject> maybe_feedback_vector) {
+ const RegListNodePair& args, TNode<UintPtrT> slot_id) {
// TODO(bmeurer): Unify this with the Construct bytecode feedback
// above once we have a way to pass the AllocationSite to the Array
// constructor _and_ spread the last argument at the same time.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+
+#ifndef V8_JITLESS
Label extra_checks(this, Label::kDeferred), construct(this);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
GotoIf(IsUndefined(maybe_feedback_vector), &construct);
-
TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
// Increment the call count.
@@ -965,6 +978,7 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
}
BIND(&construct);
+#endif // !V8_JITLESS
Comment("call using ConstructWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
@@ -981,7 +995,7 @@ TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
- TNode<CodeT> code_target = HeapConstant(callable.code());
+ TNode<Code> code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
@@ -1360,6 +1374,7 @@ void InterpreterAssembler::OnStackReplacement(
// 2) Presence of cached OSR Sparkplug code.
// 3) The OSR urgency exceeds the current loop depth - in that case, trigger
// a Turbofan OSR compilation.
+
TVARIABLE(Object, maybe_target_code, SmiConstant(0));
Label osr_to_turbofan(this), osr_to_sparkplug(this);
@@ -1391,7 +1406,7 @@ void InterpreterAssembler::OnStackReplacement(
LoadFunctionClosure(), JSFunction::kSharedFunctionInfoOffset);
TNode<HeapObject> sfi_data = LoadObjectField<HeapObject>(
sfi, SharedFunctionInfo::kFunctionDataOffset);
- GotoIf(InstanceTypeEqual(LoadInstanceType(sfi_data), CODET_TYPE),
+ GotoIf(InstanceTypeEqual(LoadInstanceType(sfi_data), CODE_TYPE),
&osr_to_sparkplug);
// Case 3).
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index a809d00aa6..4a5c165f58 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -74,6 +74,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Accumulator.
TNode<Object> GetAccumulator();
void SetAccumulator(TNode<Object> value);
+ void ClobberAccumulator(TNode<Object> clobber_value);
// Context.
TNode<Context> GetContext();
@@ -143,10 +144,26 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<JSFunction> LoadFunctionClosure();
- // Load the FeedbackVector for the current function. The retuned node could be
- // undefined.
+ // Load the FeedbackVector for the current function. The returned node could
+ // be undefined.
TNode<HeapObject> LoadFeedbackVector();
+ TNode<HeapObject> LoadFeedbackVectorOrUndefinedIfJitless() {
+#ifndef V8_JITLESS
+ return LoadFeedbackVector();
+#else
+ return UndefinedConstant();
+#endif // V8_JITLESS
+ }
+
+ static constexpr UpdateFeedbackMode DefaultUpdateFeedbackMode() {
+#ifndef V8_JITLESS
+ return UpdateFeedbackMode::kOptionalFeedback;
+#else
+ return UpdateFeedbackMode::kNoFeedback;
+#endif // !V8_JITLESS
+ }
+
// Call JSFunction or Callable |function| with |args| arguments, possibly
// including the receiver depending on |receiver_mode|. After the call returns
// directly dispatches to the next bytecode.
@@ -169,8 +186,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void CallJSWithSpreadAndDispatch(TNode<Object> function,
TNode<Context> context,
const RegListNodePair& args,
- TNode<UintPtrT> slot_id,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<UintPtrT> slot_id);
// Call constructor |target| with |args| arguments (not including receiver).
// The |new_target| is the same as the |target| for the new keyword, but
@@ -188,8 +204,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<Context> context,
TNode<Object> new_target,
const RegListNodePair& args,
- TNode<UintPtrT> slot_id,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<UintPtrT> slot_id);
// Call runtime function with |args| arguments.
template <class T = Object>
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 82f053300c..672ec482d4 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -236,11 +236,11 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<Object> result = CallBuiltin(Builtin::kStoreGlobalIC, context, name,
value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
- // the accumulator, we overwrite the accumulator after the IC call. It
+ // the accumulator, we clobber the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ ClobberAccumulator(result);
Dispatch();
}
@@ -604,11 +604,11 @@ class InterpreterSetNamedPropertyAssembler : public InterpreterAssembler {
TNode<Object> result =
CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
- // the accumulator, we overwrite the accumulator after the IC call. It
+ // the accumulator, we clobber the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ ClobberAccumulator(result);
Dispatch();
}
};
@@ -659,18 +659,19 @@ IGNITION_HANDLER(SetKeyedProperty, InterpreterAssembler) {
TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
- // the accumulator, we overwrite the accumulator after the IC call. It
+ // the accumulator, we clobber the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ ClobberAccumulator(result);
Dispatch();
}
-// DefineKeyedOwnProperty <object> <key> <slot>
+// DefineKeyedOwnProperty <object> <key> <flags> <slot>
//
// Calls the DefineKeyedOwnIC at FeedbackVector slot <slot> for <object> and
-// the key <key> with the value in the accumulator.
+// the key <key> with the value in the accumulator. Whether set_function_name
+// is stored in DefineKeyedOwnPropertyFlags <flags>.
//
// This is similar to SetKeyedProperty, but avoids checking the prototype
// chain, and in the case of private names, throws if the private name already
@@ -679,19 +680,21 @@ IGNITION_HANDLER(DefineKeyedOwnProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
+ TNode<Smi> flags =
+ SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag8(2)));
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(3);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kDefineKeyedOwnIC, context, object, name,
- value, slot, maybe_vector);
+ TNode<Object> result =
+ CallBuiltin(Builtin::kDefineKeyedOwnIC, context, object, name, value,
+ flags, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
- // the accumulator, we overwrite the accumulator after the IC call. It
+ // the accumulator, we clobber the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ ClobberAccumulator(result);
Dispatch();
}
@@ -711,11 +714,11 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, index, value,
slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
- // the accumulator, we overwrite the accumulator after the IC call. It
+ // the accumulator, we clobber the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ ClobberAccumulator(result);
Dispatch();
}
@@ -865,13 +868,14 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
TNode<Object> rhs = GetAccumulator();
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
BinaryOpAssembler binop_asm(state());
- TNode<Object> result =
- (binop_asm.*generator)([=] { return context; }, lhs, rhs, slot_index,
- [=] { return maybe_feedback_vector; },
- UpdateFeedbackMode::kOptionalFeedback, false);
+ TNode<Object> result = (binop_asm.*generator)(
+ [=] { return context; }, lhs, rhs, slot_index,
+ [=] { return maybe_feedback_vector; }, mode, false);
SetAccumulator(result);
Dispatch();
}
@@ -881,13 +885,14 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
TNode<Smi> rhs = BytecodeOperandImmSmi(0);
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
BinaryOpAssembler binop_asm(state());
- TNode<Object> result =
- (binop_asm.*generator)([=] { return context; }, lhs, rhs, slot_index,
- [=] { return maybe_feedback_vector; },
- UpdateFeedbackMode::kOptionalFeedback, true);
+ TNode<Object> result = (binop_asm.*generator)(
+ [=] { return context; }, lhs, rhs, slot_index,
+ [=] { return maybe_feedback_vector; }, mode, true);
SetAccumulator(result);
Dispatch();
}
@@ -990,13 +995,14 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<Object> right = GetAccumulator();
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
BinaryOpAssembler binop_asm(state());
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
bitwise_op, left, right, [=] { return context; }, slot_index,
- [=] { return maybe_feedback_vector; },
- UpdateFeedbackMode::kOptionalFeedback, false);
+ [=] { return maybe_feedback_vector; }, mode, false);
SetAccumulator(result);
Dispatch();
@@ -1006,14 +1012,15 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<Object> left = GetAccumulator();
TNode<Smi> right = BytecodeOperandImmSmi(0);
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
BinaryOpAssembler binop_asm(state());
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
bitwise_op, left, right, [=] { return context; }, slot_index,
- [=] { return maybe_feedback_vector; },
- UpdateFeedbackMode::kOptionalFeedback, true);
+ [=] { return maybe_feedback_vector; }, mode, true);
SetAccumulator(result);
Dispatch();
@@ -1099,12 +1106,13 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_BitwiseNotWithFeedback(
- context, value, slot_index, maybe_feedback_vector,
- UpdateFeedbackMode::kOptionalFeedback);
+ context, value, slot_index, maybe_feedback_vector, mode);
SetAccumulator(result);
Dispatch();
@@ -1144,12 +1152,13 @@ IGNITION_HANDLER(Negate, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_NegateWithFeedback(
- context, value, slot_index, maybe_feedback_vector,
- UpdateFeedbackMode::kOptionalFeedback);
+ context, value, slot_index, maybe_feedback_vector, mode);
SetAccumulator(result);
Dispatch();
@@ -1206,12 +1215,13 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_IncrementWithFeedback(
- context, value, slot_index, maybe_feedback_vector,
- UpdateFeedbackMode::kOptionalFeedback);
+ context, value, slot_index, maybe_feedback_vector, mode);
SetAccumulator(result);
Dispatch();
@@ -1224,12 +1234,13 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_DecrementWithFeedback(
- context, value, slot_index, maybe_feedback_vector,
- UpdateFeedbackMode::kOptionalFeedback);
+ context, value, slot_index, maybe_feedback_vector, mode);
SetAccumulator(result);
Dispatch();
@@ -1347,19 +1358,21 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// Generates code to perform a JS call that collects type feedback.
void JSCall(ConvertReceiverMode receiver_mode) {
TNode<Object> function = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
+ TNode<Context> context = GetContext();
+
+#ifndef V8_JITLESS
+ // Collect the {function} feedback.
LazyNode<Object> receiver = [=] {
return receiver_mode == ConvertReceiverMode::kNullOrUndefined
? UndefinedConstant()
: LoadRegisterAtOperandIndex(1);
};
- RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- TNode<Context> context = GetContext();
-
- // Collect the {function} feedback.
CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
slot_id);
+#endif // !V8_JITLESS
// Call the function and dispatch to the next handler.
CallJSAndDispatch(function, context, args, receiver_mode);
@@ -1373,22 +1386,24 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
const int kReceiverOperandCount =
(receiver_mode == ConvertReceiverMode::kNullOrUndefined) ? 0 : 1;
const int kReceiverAndArgOperandCount = kReceiverOperandCount + arg_count;
- const int kSlotOperandIndex =
- kFirstArgumentOperandIndex + kReceiverAndArgOperandCount;
TNode<Object> function = LoadRegisterAtOperandIndex(0);
+ TNode<Context> context = GetContext();
+
+#ifndef V8_JITLESS
+ // Collect the {function} feedback.
LazyNode<Object> receiver = [=] {
return receiver_mode == ConvertReceiverMode::kNullOrUndefined
? UndefinedConstant()
: LoadRegisterAtOperandIndex(1);
};
+ const int kSlotOperandIndex =
+ kFirstArgumentOperandIndex + kReceiverAndArgOperandCount;
TNode<UintPtrT> slot_id = BytecodeOperandIdx(kSlotOperandIndex);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- TNode<Context> context = GetContext();
-
- // Collect the {function} feedback.
CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
slot_id);
+#endif // !V8_JITLESS
switch (kReceiverAndArgOperandCount) {
case 0:
@@ -1537,12 +1552,10 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
TNode<Object> callable = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
// Call into Runtime function CallWithSpread which does everything.
- CallJSWithSpreadAndDispatch(callable, context, args, slot_id,
- maybe_feedback_vector);
+ CallJSWithSpreadAndDispatch(callable, context, args, slot_id);
}
// ConstructWithSpread <first_arg> <arg_count>
@@ -1556,10 +1569,9 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TNode<Object> result = ConstructWithSpread(
- constructor, context, new_target, args, slot_id, maybe_feedback_vector);
+ TNode<Object> result =
+ ConstructWithSpread(constructor, context, new_target, args, slot_id);
SetAccumulator(result);
Dispatch();
}
@@ -1615,9 +1627,11 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
}
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
- slot_index);
+ TNode<HeapObject> maybe_feedback_vector =
+ LoadFeedbackVectorOrUndefinedIfJitless();
+ static constexpr UpdateFeedbackMode mode = DefaultUpdateFeedbackMode();
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index,
+ mode);
SetAccumulator(result);
Dispatch();
}
@@ -1704,11 +1718,14 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> callable = GetAccumulator();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
+#ifndef V8_JITLESS
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot_id);
+#endif // !V8_JITLESS
+
SetAccumulator(InstanceOf(object, callable, context));
Dispatch();
}
@@ -2155,14 +2172,15 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
+#ifndef V8_JITLESS
Label ok(this);
+
TNode<FeedbackVector> feedback_vector =
CodeStubAssembler::LoadFeedbackVector(LoadFunctionClosure(), &ok);
TNode<Int8T> osr_state = LoadOsrState(feedback_vector);
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
- Label maybe_osr_because_baseline(this),
- maybe_osr_because_osr_state(this, Label::kDeferred);
+ Label maybe_osr_because_osr_state(this, Label::kDeferred);
// The quick initial OSR check. If it passes, we proceed on to more expensive
// OSR logic.
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
@@ -2171,18 +2189,22 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
&maybe_osr_because_osr_state);
// Perhaps we've got cached baseline code?
+ Label maybe_osr_because_baseline(this);
TNode<SharedFunctionInfo> sfi = LoadObjectField<SharedFunctionInfo>(
LoadFunctionClosure(), JSFunction::kSharedFunctionInfoOffset);
TNode<HeapObject> sfi_data =
LoadObjectField<HeapObject>(sfi, SharedFunctionInfo::kFunctionDataOffset);
- Branch(InstanceTypeEqual(LoadInstanceType(sfi_data), CODET_TYPE),
+ Branch(InstanceTypeEqual(LoadInstanceType(sfi_data), CODE_TYPE),
&maybe_osr_because_baseline, &ok);
BIND(&ok);
+#endif // !V8_JITLESS
+
// The backward jump can trigger a budget interrupt, which can handle stack
// interrupts, so we don't need to explicitly handle them here.
JumpBackward(relative_jump);
+#ifndef V8_JITLESS
BIND(&maybe_osr_because_baseline);
{
TNode<Context> context = GetContext();
@@ -2200,6 +2222,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
slot_index, osr_state,
OnStackReplacementParams::kDefault);
}
+#endif // !V8_JITLESS
}
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
@@ -2897,10 +2920,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
&cache_length, UpdateFeedbackMode::kOptionalFeedback);
- // The accumulator is clobbered soon after ForInPrepare, so avoid keeping it
- // alive too long and instead set it to cache_array to match the first return
- // value of Builtin::kForInPrepare.
- SetAccumulator(cache_array);
+ ClobberAccumulator(SmiConstant(0));
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 11c83fa9b5..a39483f768 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -111,16 +111,15 @@ Builtin BuiltinIndexFromBytecode(Bytecode bytecode,
} // namespace
-CodeT Interpreter::GetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale) {
+Code Interpreter::GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
Builtin builtin = BuiltinIndexFromBytecode(bytecode, operand_scale);
return isolate_->builtins()->code(builtin);
}
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale,
- CodeT handler) {
- DCHECK(handler.is_off_heap_trampoline());
+ OperandScale operand_scale, Code handler) {
+ DCHECK(!handler.has_instruction_stream());
DCHECK(handler.kind() == CodeKind::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
dispatch_table_[index] = handler.InstructionStart();
@@ -341,16 +340,15 @@ void Interpreter::Initialize() {
// Set the interpreter entry trampoline entry point now that builtins are
// initialized.
- Handle<CodeT> code = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
+ Handle<Code> code = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
DCHECK(builtins->is_initialized());
- DCHECK(code->is_off_heap_trampoline() ||
- isolate_->heap()->IsImmovable(FromCodeT(*code)));
+ DCHECK(!code->has_instruction_stream());
interpreter_entry_trampoline_instruction_start_ = code->InstructionStart();
// Initialize the dispatch table.
ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
Builtin builtin = BuiltinIndexFromBytecode(bytecode, operand_scale);
- CodeT handler = builtins->code(builtin);
+ Code handler = builtins->code(builtin);
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
#ifdef DEBUG
std::string builtin_name(Builtins::name(builtin));
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 16dc5ef8aa..59c88097e2 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -62,12 +62,12 @@ class Interpreter {
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
- V8_EXPORT_PRIVATE CodeT GetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale);
+ V8_EXPORT_PRIVATE Code GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale);
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
- CodeT handler);
+ Code handler);
V8_EXPORT_PRIVATE Handle<JSObject> GetDispatchCountersObject();
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 8d5f563fdf..4f8924d334 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -5,10 +5,12 @@
#include "src/json/json-parser.h"
#include "src/base/strings.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
+#include "src/heap/factory.h"
#include "src/numbers/conversions.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/field-type.h"
@@ -16,6 +18,7 @@
#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor.h"
+#include "src/roots/roots.h"
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-hasher.h"
@@ -121,40 +124,44 @@ static const constexpr uint8_t character_json_scan_flags[256] = {
} // namespace
-MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
- Handle<Object> result,
- Handle<Object> reviver,
- Handle<String> source) {
+MaybeHandle<Object> JsonParseInternalizer::Internalize(
+ Isolate* isolate, Handle<Object> result, Handle<Object> reviver,
+ Handle<String> source, MaybeHandle<Object> val_node) {
DCHECK(reviver->IsCallable());
JsonParseInternalizer internalizer(isolate, Handle<JSReceiver>::cast(reviver),
source);
Handle<JSObject> holder =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<String> name = isolate->factory()->empty_string();
+ JSObject::AddProperty(isolate, holder, name, result, NONE);
if (v8_flags.harmony_json_parse_with_source) {
- DCHECK(result->IsFixedArray());
- Handle<FixedArray> array = Handle<FixedArray>::cast(result);
- DCHECK_EQ(2, array->length());
- Handle<Object> object(array->get(0), isolate);
- Handle<Object> val_node(array->get(1), isolate);
- JSObject::AddProperty(isolate, holder, name, object, NONE);
- return internalizer.InternalizeJsonProperty(holder, name, val_node);
- } else {
- JSObject::AddProperty(isolate, holder, name, result, NONE);
- return internalizer.InternalizeJsonProperty(holder, name, Handle<Object>());
+ return internalizer.InternalizeJsonProperty<kWithSource>(
+ holder, name, val_node.ToHandleChecked(), result);
}
+ return internalizer.InternalizeJsonProperty<kWithoutSource>(
+ holder, name, Handle<Object>(), Handle<Object>());
}
-// TODO(v8:12955): Fix the parse node assert bug. See
-// https://github.com/tc39/proposal-json-parse-with-source/issues/35.
+template <JsonParseInternalizer::WithOrWithoutSource with_source>
MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
- Handle<JSReceiver> holder, Handle<String> name, Handle<Object> val_node) {
+ Handle<JSReceiver> holder, Handle<String> name, Handle<Object> val_node,
+ Handle<Object> snapshot) {
+ DCHECK_EQ(with_source == kWithSource,
+ !val_node.is_null() && !snapshot.is_null());
DCHECK(reviver_->IsCallable());
HandleScope outer_scope(isolate_);
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
isolate_, value, Object::GetPropertyOrElement(isolate_, holder, name),
Object);
+
+ // When with_source == kWithSource, the source text is passed to the reviver
+ // if the reviver has not mucked with the originally parsed value.
+ //
+ // When with_source == kWithoutSource, this is unused.
+ bool pass_source_to_reviver =
+ with_source == kWithSource && value->SameValue(*snapshot);
+
if (value->IsJSReceiver()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
Maybe<bool> is_array = Object::IsArray(object);
@@ -165,28 +172,39 @@ MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
isolate_, length_object,
Object::GetLengthFromArrayLike(isolate_, object), Object);
double length = length_object->Number();
-
- if (v8_flags.harmony_json_parse_with_source) {
- DCHECK(val_node->IsFixedArray());
- Handle<FixedArray> val_nodes = Handle<FixedArray>::cast(val_node);
- for (double i = 0; i < length; i++) {
+ if (pass_source_to_reviver) {
+ Handle<FixedArray> val_nodes_and_snapshots =
+ Handle<FixedArray>::cast(val_node);
+ int snapshot_length = val_nodes_and_snapshots->length() / 2;
+ for (int i = 0; i < length; i++) {
HandleScope inner_scope(isolate_);
Handle<Object> index = isolate_->factory()->NewNumber(i);
Handle<String> index_name =
isolate_->factory()->NumberToString(index);
- if (!RecurseAndApply(object, index_name,
- handle(val_nodes->get(i), isolate_))) {
+ // Even if the array pointer snapshot matched, it's possible the
+ // array had new elements added that are not in the snapshotted
+ // elements.
+ const bool rv =
+ i < snapshot_length
+ ? RecurseAndApply<kWithSource>(
+ object, index_name,
+ handle(val_nodes_and_snapshots->get(i * 2), isolate_),
+ handle(val_nodes_and_snapshots->get(i * 2 + 1),
+ isolate_))
+ : RecurseAndApply<kWithoutSource>(
+ object, index_name, Handle<Object>(), Handle<Object>());
+ if (!rv) {
return MaybeHandle<Object>();
}
}
} else {
- DCHECK(val_node.is_null());
- for (double i = 0; i < length; i++) {
+ for (int i = 0; i < length; i++) {
HandleScope inner_scope(isolate_);
Handle<Object> index = isolate_->factory()->NewNumber(i);
Handle<String> index_name =
isolate_->factory()->NumberToString(index);
- if (!RecurseAndApply(object, index_name, Handle<Object>())) {
+ if (!RecurseAndApply<kWithoutSource>(
+ object, index_name, Handle<Object>(), Handle<Object>())) {
return MaybeHandle<Object>();
}
}
@@ -199,37 +217,49 @@ MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
ENUMERABLE_STRINGS,
GetKeysConversion::kConvertToString),
Object);
- if (v8_flags.harmony_json_parse_with_source) {
- DCHECK(val_node->IsObjectHashTable());
- Handle<ObjectHashTable> val_nodes =
- Handle<ObjectHashTable>::cast(val_node);
+ if (pass_source_to_reviver) {
+ Handle<ObjectTwoHashTable> val_nodes_and_snapshots =
+ Handle<ObjectTwoHashTable>::cast(val_node);
for (int i = 0; i < contents->length(); i++) {
HandleScope inner_scope(isolate_);
Handle<String> key_name(String::cast(contents->get(i)), isolate_);
- Handle<Object> node = handle(val_nodes->Lookup(key_name), isolate_);
- DCHECK(!node->IsTheHole());
- if (!RecurseAndApply(object, key_name, node)) {
+ auto property_val_node_and_snapshot =
+ val_nodes_and_snapshots->Lookup(isolate_, key_name);
+ Handle<Object> property_val_node(property_val_node_and_snapshot[0],
+ isolate_);
+ Handle<Object> property_snapshot(property_val_node_and_snapshot[1],
+ isolate_);
+ // Even if the object pointer snapshot matched, it's possible the
+ // object had new properties added that are not in the snapshotted
+ // contents.
+ const bool rv =
+ !property_snapshot->IsTheHole()
+ ? RecurseAndApply<kWithSource>(
+ object, key_name, property_val_node, property_snapshot)
+ : RecurseAndApply<kWithoutSource>(
+ object, key_name, Handle<Object>(), Handle<Object>());
+ if (!rv) {
return MaybeHandle<Object>();
}
}
} else {
- DCHECK(val_node.is_null());
for (int i = 0; i < contents->length(); i++) {
HandleScope inner_scope(isolate_);
Handle<String> key_name(String::cast(contents->get(i)), isolate_);
- if (!RecurseAndApply(object, key_name, Handle<Object>())) {
+ if (!RecurseAndApply<kWithoutSource>(
+ object, key_name, Handle<Object>(), Handle<Object>())) {
return MaybeHandle<Object>();
}
}
}
}
}
+
Handle<Object> result;
if (v8_flags.harmony_json_parse_with_source) {
- DCHECK(!val_node.is_null());
Handle<JSObject> context =
isolate_->factory()->NewJSObject(isolate_->object_function());
- if (val_node->IsString()) {
+ if (pass_source_to_reviver && val_node->IsString()) {
JSReceiver::CreateDataProperty(isolate_, context,
isolate_->factory()->source_string(),
val_node, Just(kThrowOnError))
@@ -240,7 +270,6 @@ MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
isolate_, result, Execution::Call(isolate_, reviver_, holder, 3, argv),
Object);
} else {
- DCHECK(val_node.is_null());
Handle<Object> argv[] = {name, value};
ASSIGN_RETURN_ON_EXCEPTION(
isolate_, result, Execution::Call(isolate_, reviver_, holder, 2, argv),
@@ -249,14 +278,18 @@ MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
return outer_scope.CloseAndEscape(result);
}
+template <JsonParseInternalizer::WithOrWithoutSource with_source>
bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
Handle<String> name,
- Handle<Object> val_node) {
+ Handle<Object> val_node,
+ Handle<Object> snapshot) {
STACK_CHECK(isolate_, false);
DCHECK(reviver_->IsCallable());
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, result, InternalizeJsonProperty(holder, name, val_node), false);
+ isolate_, result,
+ InternalizeJsonProperty<with_source>(holder, name, val_node, snapshot),
+ false);
Maybe<bool> change_result = Nothing<bool>();
if (result->IsUndefined(isolate_)) {
change_result = JSReceiver::DeletePropertyOrElement(holder, name,
@@ -426,7 +459,7 @@ void JsonParser<Char>::ReportUnexpectedToken(
Script::InitLineEnds(isolate(), script);
}
- StackTraceFrameIterator it(isolate_);
+ DebuggableStackFrameIterator it(isolate_);
if (!it.done() && it.is_javascript()) {
FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(summary.AsJavaScript().function()->shared());
@@ -597,7 +630,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Map> map = initial_map;
- Handle<FixedArrayBase> elements = factory()->empty_fixed_array();
+ Handle<FixedArrayBase> elements;
// First store the elements.
if (cont.elements > 0) {
@@ -610,15 +643,18 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (!property.string.is_index()) continue;
uint32_t index = property.string.index();
Handle<Object> value = property.value;
- elms = NumberDictionary::Set(isolate_, elms, index, value);
+ NumberDictionary::UncheckedSet(isolate_, elms, index, value);
}
+ elms->SetInitialNumberOfElements(length);
+ elms->UpdateMaxNumberKey(cont.max_index, Handle<JSObject>::null());
map = Map::AsElementsKind(isolate_, map, DICTIONARY_ELEMENTS);
elements = elms;
} else {
Handle<FixedArray> elms =
factory()->NewFixedArrayWithHoles(cont.max_index + 1);
DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ FixedArray raw_elements = *elms;
+ WriteBarrierMode mode = raw_elements.GetWriteBarrierMode(no_gc);
DCHECK_EQ(HOLEY_ELEMENTS, map->elements_kind());
for (int i = 0; i < length; i++) {
@@ -626,18 +662,25 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (!property.string.is_index()) continue;
uint32_t index = property.string.index();
Handle<Object> value = property.value;
- elms->set(static_cast<int>(index), *value, mode);
+ raw_elements.set(static_cast<int>(index), *value, mode);
}
elements = elms;
}
+ } else {
+ elements = factory()->empty_fixed_array();
}
- int feedback_descriptors =
- (feedback.is_null() ||
- feedback->elements_kind() != map->elements_kind() ||
- feedback->instance_size() != map->instance_size())
- ? 0
- : feedback->NumberOfOwnDescriptors();
+ int feedback_descriptors = 0;
+ if (!feedback.is_null()) {
+ DisallowGarbageCollection no_gc;
+ auto raw_feedback = *feedback;
+ auto raw_map = *map;
+ feedback_descriptors =
+ (raw_feedback.elements_kind() != raw_map.elements_kind() ||
+ raw_feedback.instance_size() != raw_map.instance_size())
+ ? 0
+ : raw_feedback.NumberOfOwnDescriptors();
+ }
int i;
int descriptor = 0;
@@ -741,7 +784,9 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
{
descriptor = 0;
DisallowGarbageCollection no_gc;
- WriteBarrierMode mode = object->GetWriteBarrierMode(no_gc);
+ auto raw_object = *object;
+ auto raw_map = *map;
+ WriteBarrierMode mode = raw_object.GetWriteBarrierMode(no_gc);
Address mutable_double_address =
mutable_double_buffer.is_null()
? 0
@@ -760,9 +805,9 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (property.string.is_index()) continue;
InternalIndex descriptor_index(descriptor);
PropertyDetails details =
- map->instance_descriptors(isolate()).GetDetails(descriptor_index);
+ raw_map.instance_descriptors(isolate()).GetDetails(descriptor_index);
+ FieldIndex index = FieldIndex::ForDetails(raw_map, details);
Object value = *property.value;
- FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor_index);
descriptor++;
if (details.representation().IsDouble()) {
@@ -770,8 +815,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (!V8_COMPRESS_POINTERS_8GB_BOOL && kTaggedSize != kDoubleSize) {
// Write alignment filler.
HeapObject filler = HeapObject::FromAddress(filler_address);
- filler.set_map_after_allocation(
- *factory()->one_pointer_filler_map());
+ filler.set_map_after_allocation(roots().one_pointer_filler_map());
filler_address += kMutableDoubleSize;
}
@@ -781,18 +825,16 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
// payload, so we can skip notifying object layout change.
HeapObject hn = HeapObject::FromAddress(mutable_double_address);
- hn.set_map_after_allocation(*factory()->heap_number_map());
+ hn.set_map_after_allocation(roots().heap_number_map());
HeapNumber::cast(hn).set_value_as_bits(bits, kRelaxedStore);
value = hn;
mutable_double_address +=
ALIGN_TO_ALLOCATION_ALIGNMENT(kMutableDoubleSize);
} else {
DCHECK(value.IsHeapNumber());
- HeapObject::cast(value).set_map(*factory()->heap_number_map(),
- kReleaseStore);
}
}
- object->RawFastInobjectPropertyAtPut(index, value, mode);
+ raw_object.RawFastInobjectPropertyAtPut(index, value, mode);
}
// Make all mutable HeapNumbers alive.
if (!mutable_double_buffer.is_null()) {
@@ -935,32 +977,46 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
Handle<Object> value;
- // We use val_node to record current json value's parse node. For primitive
- // values, the val_node is the source string of the json value. For JSObject
- // values, the val_node is an ObjectHashTable in which the key is the property
- // name and the value is the property value's parse node. For JSArray values,
- // the val_node is a FixedArray containing the parse nodes of the elements.
- // And for JSObject values, The order in which properties are defined may be
- // different from the order in which properties are enumerated when calling
- // InternalizeJSONProperty for the JSObject value. E.g., the json source
+ // When should_track_json_source is true, we use val_node to record current
+ // JSON value's parse node.
+ //
+ // For primitive values, the val_node is the source string of the JSON value.
+ //
+ // For JSObject values, the val_node is an ObjectHashTable in which the key is
+ // the property name and the first value is the property value's parse
+ // node. The order in which properties are defined may be different from the
+ // order in which properties are enumerated when calling
+ // InternalizeJSONProperty for the JSObject value. E.g., the JSON source
// string is '{"a": 1, "1": 2}', and the properties enumerate order is ["1",
- // "a"]. Moreover, properties may be defined repeatedly in the json string.
- // E.g., the json string is '{"a": 1, "a": 1}', and the properties enumerate
+ // "a"]. Moreover, properties may be defined repeatedly in the JSON string.
+ // E.g., the JSON string is '{"a": 1, "a": 1}', and the properties enumerate
// order is ["a"]. So we cannot use the FixedArray to record the properties's
// parse node by the order in which properties are defined and we use a
// ObjectHashTable here to record the property name and the property's parse
// node. We then look up the property's parse node by the property name when
- // calling InternalizeJSONProperty.
+ // calling InternalizeJSONProperty. The second value associated with the key
+ // is the property value's snapshot.
+ //
+ // For JSArray values, the val_node is a FixedArray containing the parse nodes
+ // and snapshots of the elements.
+ //
+ // For information about snapshotting, see below.
Handle<Object> val_node;
// Record the start position and end position for the primitive values.
int start_position;
int end_position;
- // element_val_node_stack is used to track all the elements's parse node. And
- // we use this to construct the JSArray's parse node.
+ // Workaround for -Wunused-but-set-variable on old gcc versions (version < 8).
+ USE(start_position);
+ USE(end_position);
+
+ // element_val_node_stack is used to track all the elements's
+ // parse nodes. And we use this to construct the JSArray's
+ // parse node and value snapshot.
SmallVector<Handle<Object>> element_val_node_stack;
- // property_val_node_stack is used to track all the property value's parse
- // node. And we use this to construct the JSObject's parse node.
+ // property_val_node_stack is used to track all the property
+ // value's parse nodes. And we use this to construct the
+ // JSObject's parse node and value snapshot.
SmallVector<Handle<Object>> property_val_node_stack;
while (true) {
// Produce a json value.
@@ -973,14 +1029,14 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
// The switch is immediately followed by 'break' so we can use 'break' to
// break out of the loop, and 'continue' to continue the loop.
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
start_position = position();
}
switch (peek()) {
case JsonToken::STRING:
Consume(JsonToken::STRING);
value = MakeString(ScanJsonString(false));
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
end_position = position();
val_node = isolate_->factory()->NewSubString(
source_, start_position, end_position);
@@ -989,7 +1045,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
case JsonToken::NUMBER:
value = ParseJsonNumber();
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
end_position = position();
val_node = isolate_->factory()->NewSubString(
source_, start_position, end_position);
@@ -1001,7 +1057,9 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
if (Check(JsonToken::RBRACE)) {
// TODO(verwaest): Directly use the map instead.
value = factory()->NewJSObject(object_constructor_);
- val_node = ObjectHashTable::New(isolate_, 0);
+ if constexpr (should_track_json_source) {
+ val_node = ObjectTwoHashTable::New(isolate_, 0);
+ }
break;
}
@@ -1014,7 +1072,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
ExpectNext(JsonToken::STRING,
MessageTemplate::kJsonParseExpectedPropNameOrRBrace);
property_stack.emplace_back(ScanJsonPropertyKey(&cont));
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
property_val_node_stack.emplace_back(Handle<Object>());
}
@@ -1029,7 +1087,9 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
Consume(JsonToken::LBRACK);
if (Check(JsonToken::RBRACK)) {
value = factory()->NewJSArray(0, PACKED_SMI_ELEMENTS);
- val_node = factory()->NewFixedArray(0);
+ if constexpr (should_track_json_source) {
+ val_node = factory()->NewFixedArray(0);
+ }
break;
}
@@ -1044,7 +1104,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
case JsonToken::TRUE_LITERAL:
ScanLiteral("true");
value = factory()->true_value();
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
val_node = isolate_->factory()->true_string();
}
break;
@@ -1052,7 +1112,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
case JsonToken::FALSE_LITERAL:
ScanLiteral("false");
value = factory()->false_value();
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
val_node = isolate_->factory()->false_string();
}
break;
@@ -1060,7 +1120,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
case JsonToken::NULL_LITERAL:
ScanLiteral("null");
value = factory()->null_value();
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
val_node = isolate_->factory()->null_string();
}
break;
@@ -1095,12 +1155,11 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
// break out of the loop, and 'continue' to continue the loop.
switch (cont.type()) {
case JsonContinuation::kReturn:
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
DCHECK(!val_node.is_null());
- Handle<FixedArray> result = factory()->NewFixedArray(2);
- result->set(0, *value);
- result->set(1, *val_node);
- return cont.scope.CloseAndEscape(result);
+ auto raw_value = *value;
+ parsed_val_node_ = cont.scope.CloseAndEscape(val_node);
+ return cont.scope.CloseAndEscape(handle(raw_value, isolate_));
} else {
return cont.scope.CloseAndEscape(value);
}
@@ -1108,7 +1167,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
case JsonContinuation::kObjectProperty: {
// Store the previous property value into its property info.
property_stack.back().value = value;
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
property_val_node_stack.back() = val_node;
}
@@ -1119,7 +1178,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
MessageTemplate::kJsonParseExpectedDoubleQuotedPropertyName);
property_stack.emplace_back(ScanJsonPropertyKey(&cont));
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
property_val_node_stack.emplace_back(Handle<Object>());
}
ExpectNext(JsonToken::COLON);
@@ -1138,7 +1197,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
// from the transition tree.
if (!maybe_feedback.IsDetached(isolate_)) {
feedback = handle(maybe_feedback, isolate_);
- if (feedback->is_deprecated()) {
+ if (maybe_feedback.is_deprecated()) {
feedback = Map::Update(isolate_, feedback);
}
}
@@ -1147,27 +1206,32 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
Expect(JsonToken::RBRACE,
MessageTemplate::kJsonParseExpectedCommaOrRBrace);
// Return the object.
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
size_t start = cont.index;
- int length = static_cast<int>(property_stack.size() - start);
- Handle<ObjectHashTable> table =
- ObjectHashTable::New(isolate(), length);
- for (int i = 0; i < length; i++) {
+ int num_properties =
+ static_cast<int>(property_stack.size() - start);
+ Handle<ObjectTwoHashTable> table =
+ ObjectTwoHashTable::New(isolate(), num_properties);
+ for (int i = 0; i < num_properties; i++) {
const JsonProperty& property = property_stack[start + i];
+ Handle<Object> property_val_node =
+ property_val_node_stack[start + i];
+ Handle<Object> property_snapshot = property.value;
+ Handle<String> key;
if (property.string.is_index()) {
- table = ObjectHashTable::Put(
- table, factory()->Uint32ToString(property.string.index()),
- property_val_node_stack[start + i]);
+ key = factory()->Uint32ToString(property.string.index());
} else {
- table =
- ObjectHashTable::Put(table, MakeString(property.string),
- property_val_node_stack[start + i]);
+ key = MakeString(property.string);
}
+ table = ObjectTwoHashTable::Put(
+ isolate(), table, key,
+ {property_val_node, property_snapshot});
}
property_val_node_stack.resize_no_init(cont.index);
- Object value_obj = *value;
- val_node = cont.scope.CloseAndEscape(table);
- value = cont.scope.CloseAndEscape(handle(value_obj, isolate_));
+ DisallowGarbageCollection no_gc;
+ auto raw_table = *table;
+ value = cont.scope.CloseAndEscape(value);
+ val_node = cont.scope.CloseAndEscape(handle(raw_table, isolate_));
} else {
value = cont.scope.CloseAndEscape(value);
}
@@ -1183,7 +1247,7 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
case JsonContinuation::kArrayElement: {
// Store the previous element on the stack.
element_stack.emplace_back(value);
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
element_val_node_stack.emplace_back(val_node);
}
// Break to start producing the subsequent element value.
@@ -1193,17 +1257,23 @@ MaybeHandle<Object> JsonParser<Char>::ParseJsonValue(Handle<Object> reviver) {
Expect(JsonToken::RBRACK,
MessageTemplate::kJsonParseExpectedCommaOrRBrack);
// Return the array.
- if (should_track_json_source) {
+ if constexpr (should_track_json_source) {
size_t start = cont.index;
- int length = static_cast<int>(element_stack.size() - start);
- Handle<FixedArray> array = factory()->NewFixedArray(length);
- for (int i = 0; i < length; i++) {
- array->set(i, *element_val_node_stack[start + i]);
+ int num_elements = static_cast<int>(element_stack.size() - start);
+ Handle<FixedArray> val_node_and_snapshot_array =
+ factory()->NewFixedArray(num_elements * 2);
+ DisallowGarbageCollection no_gc;
+ auto raw_val_node_and_snapshot_array = *val_node_and_snapshot_array;
+ for (int i = 0; i < num_elements; i++) {
+ raw_val_node_and_snapshot_array.set(
+ i * 2, *element_val_node_stack[start + i]);
+ raw_val_node_and_snapshot_array.set(i * 2 + 1,
+ *element_stack[start + i]);
}
element_val_node_stack.resize_no_init(cont.index);
- Object value_obj = *value;
- val_node = cont.scope.CloseAndEscape(array);
- value = cont.scope.CloseAndEscape(handle(value_obj, isolate_));
+ value = cont.scope.CloseAndEscape(value);
+ val_node = cont.scope.CloseAndEscape(
+ handle(raw_val_node_and_snapshot_array, isolate_));
} else {
value = cont.scope.CloseAndEscape(value);
}
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
index 9304c163b3..60eb13c83e 100644
--- a/deps/v8/src/json/json-parser.h
+++ b/deps/v8/src/json/json-parser.h
@@ -12,6 +12,7 @@
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/objects.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -105,19 +106,25 @@ class JsonParseInternalizer {
static MaybeHandle<Object> Internalize(Isolate* isolate,
Handle<Object> result,
Handle<Object> reviver,
- Handle<String> source);
+ Handle<String> source,
+ MaybeHandle<Object> val_node);
private:
JsonParseInternalizer(Isolate* isolate, Handle<JSReceiver> reviver,
Handle<String> source)
: isolate_(isolate), reviver_(reviver), source_(source) {}
+ enum WithOrWithoutSource { kWithoutSource, kWithSource };
+
+ template <WithOrWithoutSource with_source>
MaybeHandle<Object> InternalizeJsonProperty(Handle<JSReceiver> holder,
Handle<String> key,
- Handle<Object> val_node);
+ Handle<Object> val_node,
+ Handle<Object> snapshot);
+ template <WithOrWithoutSource with_source>
bool RecurseAndApply(Handle<JSReceiver> holder, Handle<String> name,
- Handle<Object> val_node);
+ Handle<Object> val_node, Handle<Object> snapshot);
Isolate* isolate_;
Handle<JSReceiver> reviver_;
@@ -158,12 +165,16 @@ class JsonParser final {
HighAllocationThroughputScope high_throughput_scope(
V8::GetCurrentPlatform());
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- JsonParser(isolate, source).ParseJson(reviver),
- Object);
+ MaybeHandle<Object> val_node;
+ {
+ JsonParser parser(isolate, source);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, parser.ParseJson(reviver),
+ Object);
+ val_node = parser.parsed_val_node_;
+ }
if (reviver->IsCallable()) {
return JsonParseInternalizer::Internalize(isolate, result, reviver,
- source);
+ source, val_node);
}
return result;
}
@@ -338,6 +349,7 @@ class JsonParser final {
inline Isolate* isolate() { return isolate_; }
inline Factory* factory() { return isolate_->factory(); }
+ inline ReadOnlyRoots roots() { return ReadOnlyRoots(isolate_); }
inline Handle<JSFunction> object_constructor() { return object_constructor_; }
static const int kInitialSpecialStringLength = 32;
@@ -377,6 +389,9 @@ class JsonParser final {
Handle<JSFunction> object_constructor_;
const Handle<String> original_source_;
Handle<String> source_;
+ // The parsed value's source to be passed to the reviver, if the reviver is
+ // callable.
+ MaybeHandle<Object> parsed_val_node_;
// Cached pointer to the raw chars in source. In case source is on-heap, we
// register an UpdatePointers callback. For this reason, chars_, cursor_ and
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 98ff273a2e..f718bcd9cf 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -225,14 +225,19 @@ JsonStringifier::JsonStringifier(Isolate* isolate)
MaybeHandle<Object> JsonStringifier::Stringify(Handle<Object> object,
Handle<Object> replacer,
Handle<Object> gap) {
- if (!InitializeReplacer(replacer)) return MaybeHandle<Object>();
+ if (!InitializeReplacer(replacer)) {
+ CHECK(isolate_->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
if (!gap->IsUndefined(isolate_) && !InitializeGap(gap)) {
+ CHECK(isolate_->has_pending_exception());
return MaybeHandle<Object>();
}
Result result = SerializeObject(object);
if (result == UNCHANGED) return factory()->undefined_value();
if (result == SUCCESS) return builder_.Finish();
DCHECK(result == EXCEPTION);
+ CHECK(isolate_->has_pending_exception());
return MaybeHandle<Object>();
}
@@ -274,6 +279,7 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
MaybeHandle<OrderedHashSet> set_candidate =
OrderedHashSet::Add(isolate_, set, key);
if (!set_candidate.ToHandle(&set)) {
+ CHECK(isolate_->has_pending_exception());
return false;
}
}
@@ -586,11 +592,34 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
case JS_RAW_JSON_TYPE:
DCHECK(v8_flags.harmony_json_parse_with_source);
if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendString(Handle<String>::cast(
- handle(Handle<JSRawJson>::cast(object)->InObjectPropertyAt(
- JSRawJson::kRawJsonIndex),
- isolate_)));
+ {
+ Handle<JSRawJson> raw_json_obj = Handle<JSRawJson>::cast(object);
+ Handle<String> raw_json;
+ if (raw_json_obj->HasInitialLayout(isolate_)) {
+ // Fast path: the object returned by JSON.rawJSON has its initial map
+ // intact.
+ raw_json = Handle<String>::cast(handle(
+ raw_json_obj->InObjectPropertyAt(JSRawJson::kRawJsonInitialIndex),
+ isolate_));
+ } else {
+ // Slow path: perform a property get for "rawJSON". Because raw JSON
+ // objects are created frozen, it is still guaranteed that there will
+ // be a property named "rawJSON" that is a String. Their initial maps
+ // only change due to VM-internal operations like being optimized for
+ // being used as a prototype.
+ raw_json = Handle<String>::cast(
+ JSObject::GetProperty(isolate_, raw_json_obj,
+ isolate_->factory()->raw_json_string())
+ .ToHandleChecked());
+ }
+ builder_.AppendString(raw_json);
+ }
return SUCCESS;
+#if V8_ENABLE_WEBASSEMBLY
+ case WASM_STRUCT_TYPE:
+ case WASM_ARRAY_TYPE:
+ return UNCHANGED;
+#endif
default:
if (InstanceTypeChecker::IsString(instance_type)) {
if (deferred_string_key) SerializeDeferredKey(comma, key);
@@ -852,7 +881,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
if (details.location() == PropertyLocation::kField &&
*map == object->map(cage_base)) {
DCHECK_EQ(PropertyKind::kData, details.kind());
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ FieldIndex field_index = FieldIndex::ForDetails(*map, details);
property = JSObject::FastPropertyAt(
isolate_, object, details.representation(), field_index);
} else {
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 55ca063184..0d235e8566 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -206,18 +206,19 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
#endif // V8_USE_PERFETTO
trace_config_.reset(trace_config);
+ recording_.store(true, std::memory_order_release);
+
+#ifndef V8_USE_PERFETTO
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
base::MutexGuard lock(mutex_.get());
- recording_.store(true, std::memory_order_release);
-#ifndef V8_USE_PERFETTO
UpdateCategoryGroupEnabledFlags();
-#endif
observers_copy = observers_;
}
for (auto o : observers_copy) {
o->OnTraceEnabled();
}
+#endif
}
void TracingController::StopTracing() {
@@ -227,7 +228,6 @@ void TracingController::StopTracing() {
}
#ifndef V8_USE_PERFETTO
UpdateCategoryGroupEnabledFlags();
-#endif
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
base::MutexGuard lock(mutex_.get());
@@ -236,6 +236,7 @@ void TracingController::StopTracing() {
for (auto o : observers_copy) {
o->OnTraceDisabled();
}
+#endif
#ifdef V8_USE_PERFETTO
tracing_session_->StopBlocking();
@@ -341,7 +342,6 @@ const uint8_t* TracingController::GetCategoryGroupEnabled(
}
return category_group_enabled;
}
-#endif // !defined(V8_USE_PERFETTO)
void TracingController::AddTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) {
@@ -360,6 +360,7 @@ void TracingController::RemoveTraceStateObserver(
DCHECK(observers_.find(observer) != observers_.end());
observers_.erase(observer);
}
+#endif // !defined(V8_USE_PERFETTO)
} // namespace tracing
} // namespace platform
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 1a947dc0d5..83028c9ee8 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -266,13 +266,16 @@ class Sampler::PlatformData {
public:
// Get a handle to the calling thread. This is the thread that we are
// going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData()
- : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false, GetCurrentThreadId())) {}
+ // going to use it in the sampler thread.
+ PlatformData() {
+ HANDLE current_process = GetCurrentProcess();
+ BOOL result = DuplicateHandle(
+ current_process, GetCurrentThread(), current_process, &profiled_thread_,
+ THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION,
+ FALSE, 0);
+ DCHECK(result);
+ USE(result);
+ }
~PlatformData() {
if (profiled_thread_ != nullptr) {
diff --git a/deps/v8/src/logging/code-events.h b/deps/v8/src/logging/code-events.h
index 01d1c5ea5e..9faee22ab6 100644
--- a/deps/v8/src/logging/code-events.h
+++ b/deps/v8/src/logging/code-events.h
@@ -88,7 +88,8 @@ class LogEventListener {
virtual void RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) = 0;
// Not handlified as this happens during GC. No allocation allowed.
- virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0;
+ virtual void CodeMoveEvent(InstructionStream from, InstructionStream to) = 0;
+ virtual void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void NativeContextMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
@@ -124,10 +125,6 @@ class Logger {
if (position != listeners_.end()) return false;
// Add the listener to the end and update the element
listeners_.push_back(listener);
- if (!_is_listening_to_code_events) {
- _is_listening_to_code_events |= listener->is_listening_to_code_events();
- }
- DCHECK_EQ(_is_listening_to_code_events, IsListeningToCodeEvents());
return true;
}
void RemoveListener(LogEventListener* listener) {
@@ -135,15 +132,13 @@ class Logger {
auto position = std::find(listeners_.begin(), listeners_.end(), listener);
if (position == listeners_.end()) return;
listeners_.erase(position);
- if (listener->is_listening_to_code_events()) {
- _is_listening_to_code_events = IsListeningToCodeEvents();
- }
- DCHECK_EQ(_is_listening_to_code_events, IsListeningToCodeEvents());
}
bool is_listening_to_code_events() const {
- DCHECK_EQ(_is_listening_to_code_events, IsListeningToCodeEvents());
- return _is_listening_to_code_events;
+ for (auto listener : listeners_) {
+ if (listener->is_listening_to_code_events()) return true;
+ }
+ return false;
}
void CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
@@ -210,12 +205,18 @@ class Logger {
listener->RegExpCodeCreateEvent(code, source);
}
}
- void CodeMoveEvent(AbstractCode from, AbstractCode to) {
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) {
base::MutexGuard guard(&mutex_);
for (auto listener : listeners_) {
listener->CodeMoveEvent(from, to);
}
}
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) {
+ base::MutexGuard guard(&mutex_);
+ for (auto listener : listeners_) {
+ listener->BytecodeMoveEvent(from, to);
+ }
+ }
void SharedFunctionInfoMoveEvent(Address from, Address to) {
base::MutexGuard guard(&mutex_);
for (auto listener : listeners_) {
@@ -264,16 +265,8 @@ class Logger {
}
private:
- bool IsListeningToCodeEvents() const {
- for (auto listener : listeners_) {
- if (listener->is_listening_to_code_events()) return true;
- }
- return false;
- }
-
std::vector<LogEventListener*> listeners_;
base::Mutex mutex_;
- bool _is_listening_to_code_events = false;
};
} // namespace internal
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index d0a3fcac68..f86a7be5f2 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -10,10 +10,9 @@
namespace v8 {
namespace internal {
+// Generic range histograms.
+// HR(name, caption, min, max, num_buckets)
#define HISTOGRAM_RANGE_LIST(HR) \
- /* Generic range histograms: HR(name, caption, min, max, num_buckets) */ \
- HR(background_marking, V8.GCBackgroundMarking, 0, 10000, 101) \
- HR(background_sweeping, V8.GCBackgroundSweeping, 0, 10000, 101) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
@@ -62,47 +61,44 @@ namespace internal {
HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 20, \
21) \
HR(wasm_memory_allocation_result, V8.WasmMemoryAllocationResult, 0, 3, 4) \
- /* committed code size per module, collected on GC */ \
+ /* Committed code size per module, collected on GC. */ \
HR(wasm_module_code_size_mb, V8.WasmModuleCodeSizeMiB, 0, 1024, 64) \
- /* code size per module after baseline compilation */ \
- HR(wasm_module_code_size_mb_after_baseline, \
- V8.WasmModuleCodeSizeBaselineMiB, 0, 1024, 64) \
- /* percent of freed code size per module, collected on GC */ \
+ /* Percent of freed code size per module, collected on GC. */ \
HR(wasm_module_freed_code_size_percent, V8.WasmModuleCodeSizePercentFreed, \
0, 100, 32) \
- /* number of code GCs triggered per native module, collected on code GC */ \
+ /* Number of code GCs triggered per native module, collected on code GC. */ \
HR(wasm_module_num_triggered_code_gcs, \
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
- /* number of code spaces reserved per wasm module */ \
+ /* Number of code spaces reserved per wasm module. */ \
HR(wasm_module_num_code_spaces, V8.WasmModuleNumberOfCodeSpaces, 1, 128, 20) \
- /* number of live modules per isolate */ \
+ /* Number of live modules per isolate. */ \
HR(wasm_modules_per_isolate, V8.WasmModulesPerIsolate, 1, 1024, 30) \
- /* number of live modules per engine (i.e. whole process) */ \
+ /* Number of live modules per engine (i.e. whole process). */ \
HR(wasm_modules_per_engine, V8.WasmModulesPerEngine, 1, 1024, 30) \
- /* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
+ /* Bailout reason if Liftoff failed, or {kSuccess} (per function). */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21) \
- /* support for PKEYs/PKU by testing result of pkey_alloc() */ \
+ /* Support for PKEYs/PKU by testing result of pkey_alloc(). */ \
HR(wasm_memory_protection_keys_support, V8.WasmMemoryProtectionKeysSupport, \
0, 1, 2) \
- /* number of thrown exceptions per isolate */ \
+ /* Number of thrown exceptions per isolate. */ \
HR(wasm_throw_count, V8.WasmThrowCount, 0, 100000, 30) \
- /* number of rethrown exceptions per isolate */ \
+ /* Number of rethrown exceptions per isolate. */ \
HR(wasm_rethrow_count, V8.WasmReThrowCount, 0, 100000, 30) \
- /* number of caught exceptions per isolate */ \
+ /* Number of caught exceptions per isolate. */ \
HR(wasm_catch_count, V8.WasmCatchCount, 0, 100000, 30) \
- /* Ticks observed in a single Turbofan compilation, in 1K */ \
+ /* Ticks observed in a single Turbofan compilation, in 1K. */ \
HR(turbofan_ticks, V8.TurboFan1KTicks, 0, 100000, 200) \
- /* Backtracks observed in a single regexp interpreter execution */ \
+ /* Backtracks observed in a single regexp interpreter execution. */ \
/* The maximum of 100M backtracks takes roughly 2 seconds on my machine. */ \
HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50) \
- /* number of times a cache event is triggered for a wasm module */ \
+ /* Number of times a cache event is triggered for a wasm module. */ \
HR(wasm_cache_count, V8.WasmCacheCount, 0, 100, 101) \
HR(wasm_streaming_until_compilation_finished, \
V8.WasmStreamingUntilCompilationFinishedMilliSeconds, 0, 10000, 50) \
HR(wasm_compilation_until_streaming_finished, \
V8.WasmCompilationUntilStreamFinishedMilliSeconds, 0, 10000, 50) \
- /* Number of in-use external pointers in the external pointer table */ \
- /* Counted after sweeping the table at the end of mark-compact GC */ \
+ /* Number of in-use external pointers in the external pointer table. */ \
+ /* Counted after sweeping the table at the end of mark-compact GC. */ \
HR(external_pointers_count, V8.SandboxedExternalPointersCount, 0, \
kMaxExternalPointers, 101) \
HR(wasm_num_lazy_compilations_5sec, V8.WasmNumLazyCompilations5Sec, 0, \
@@ -115,13 +111,14 @@ namespace internal {
200000, 50) \
/* Outcome of external pointer table compaction: kSuccess, */ \
/* kPartialSuccessor kAbortedDuringSweeping. See */ \
- /* ExternalPointerTable::TableCompactionOutcome enum for more details */ \
+ /* ExternalPointerTable::TableCompactionOutcome enum for more details. */ \
HR(external_pointer_table_compaction_outcome, \
- V8.ExternalPointerTableCompactionOutcome, 0, 2, 3)
+ V8.ExternalPointerTableCompactionOutcome, 0, 2, 3) \
+ HR(wasm_compilation_method, V8.WasmCompilationMethod, 0, 4, 5)
+// Like TIMED_HISTOGRAM_LIST, but allows the use of NestedTimedHistogramScope.
+// HT(name, caption, max, unit)
#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
- /* Nested timer histograms allow distributions of nested timed results. */ \
- /* HT(name, caption, max, unit) */ \
/* Garbage collection timers. */ \
HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
@@ -136,19 +133,27 @@ namespace internal {
MICROSECOND) \
HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
- /* Serialization as part of compilation (code caching) */ \
+ /* Serialization as part of compilation (code caching). */ \
HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000, \
MICROSECOND) \
- /* Total compilation time incl. caching/parsing */ \
+ /* Snapshot. */ \
+ HT(snapshot_deserialize_rospace, V8.SnapshotDeserializeRoSpaceMicroSeconds, \
+ 1000000, MICROSECOND) \
+ HT(snapshot_deserialize_isolate, V8.SnapshotDeserializeIsolateMicroSeconds, \
+ 1000000, MICROSECOND) \
+ HT(snapshot_deserialize_context, V8.SnapshotDeserializeContextMicroSeconds, \
+ 1000000, MICROSECOND) \
+ /* ... and also see compile_deserialize above. */ \
+ /* Total compilation time incl. caching/parsing. */ \
HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND)
-#define NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT) \
- /* Total V8 time (including JS and runtime calls, exluding callbacks) */ \
+#define NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT) \
+ /* Total V8 time (including JS and runtime calls, exluding callbacks). */ \
HT(execute, V8.ExecuteMicroSeconds, 1000000, MICROSECOND)
+// Timer histograms, thread safe: HT(name, caption, max, unit)
#define TIMED_HISTOGRAM_LIST(HT) \
- /* Timer histograms, thread safe: HT(name, caption, max, unit) */ \
/* Garbage collection timers. */ \
HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
HT(gc_compactor_background, V8.GCCompactorBackground, 10000, MILLISECOND) \
@@ -171,6 +176,12 @@ namespace internal {
HT(gc_time_to_safepoint, V8.GC.TimeToSafepoint, 10000000, MICROSECOND) \
HT(gc_time_to_collection_on_background, V8.GC.TimeToCollectionOnBackground, \
10000000, MICROSECOND) \
+ /* Maglev timers. */ \
+ HT(maglev_optimize_prepare, V8.MaglevOptimizePrepare, 100000, MICROSECOND) \
+ HT(maglev_optimize_execute, V8.MaglevOptimizeExecute, 100000, MICROSECOND) \
+ HT(maglev_optimize_finalize, V8.MaglevOptimizeFinalize, 100000, MICROSECOND) \
+ HT(maglev_optimize_total_time, V8.MaglevOptimizeTotalTime, 1000000, \
+ MICROSECOND) \
/* TurboFan timers. */ \
HT(turbofan_optimize_prepare, V8.TurboFanOptimizePrepare, 1000000, \
MICROSECOND) \
@@ -271,7 +282,7 @@ namespace internal {
V8.WasmSumLazyCompilationTime60SecMilliSeconds, 20000, MILLISECOND) \
HT(wasm_sum_lazy_compilation_time_120sec, \
V8.WasmSumLazyCompilationTime120SecMilliSeconds, 20000, MILLISECOND) \
- /* Debugger timers */ \
+ /* Debugger timers. */ \
HT(debug_pause_to_paused_event, V8.DebugPauseToPausedEventMilliSeconds, \
1000000, MILLISECOND)
@@ -295,24 +306,17 @@ namespace internal {
HM(heap_sample_code_space_committed, V8.MemoryHeapSampleCodeSpaceCommitted) \
HM(heap_sample_maximum_committed, V8.MemoryHeapSampleMaximumCommitted)
-// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
-// Intellisense to crash. It was broken into two macros (each of length 40
-// lines) rather than one macro (of length about 80 lines) to work around
-// this problem. Please avoid using recursive macros of this length when
-// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- /* Number of times the cache contained a reusable Script but not \
- the root SharedFunctionInfo */ \
- SC(compilation_cache_partial_hits, V8.CompilationCachePartialHits) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull)
-
-#define STATS_COUNTER_LIST_2(SC) \
+#define STATS_COUNTER_LIST(SC) \
+ /* Global handle count. */ \
+ SC(global_handles, V8.GlobalHandles) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(compilation_cache_hits, V8.CompilationCacheHits) \
+ SC(compilation_cache_misses, V8.CompilationCacheMisses) \
+ /* Number of times the cache contained a reusable Script but not */ \
+ /* the root SharedFunctionInfo. */ \
+ SC(compilation_cache_partial_hits, V8.CompilationCachePartialHits) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
SC(gc_compactor_caused_by_promoted_data, V8.GCCompactorCausedByPromotedData) \
SC(gc_compactor_caused_by_oldspace_exhaustion, \
@@ -339,7 +343,8 @@ namespace internal {
SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed) \
SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
SC(wasm_reloc_size, V8.WasmRelocBytes) \
- SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions)
+ SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions) \
+ SC(wasm_compiled_export_wrapper, V8.WasmCompiledExportWrappers)
// List of counters that can be incremented from generated code. We need them in
// a separate list to be able to relocate them.
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index a009025fba..ad8da51d85 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -220,8 +220,7 @@ Counters::Counters(Isolate* isolate)
#define FIXED_ARRAY_INSTANCE_TYPE_SC(name) COUNT_AND_SIZE_SC(FIXED_ARRAY_##name)
// clang-format off
- STATS_COUNTER_LIST_1(BARE_SC)
- STATS_COUNTER_LIST_2(BARE_SC)
+ STATS_COUNTER_LIST(BARE_SC)
STATS_COUNTER_NATIVE_CODE_LIST(BARE_SC)
INSTANCE_TYPE_LIST(COUNT_AND_SIZE_SC)
CODE_KIND_LIST(CODE_KIND_SC)
@@ -243,8 +242,7 @@ void Counters::ResetCounterFunction(CounterLookupCallback f) {
stats_table_.SetCounterFunction(f);
#define SC(name, caption) name##_.Reset();
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
+ STATS_COUNTER_LIST(SC)
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 354bc54845..f62a15b0c6 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -219,8 +219,7 @@ class Histogram {
enum class TimedHistogramResolution { MILLISECOND, MICROSECOND };
-// A thread safe histogram timer. It also allows distributions of
-// nested timed results.
+// A thread safe histogram timer.
class TimedHistogram : public Histogram {
public:
// Records a TimeDelta::Max() result. Useful to record percentage of tasks
@@ -262,7 +261,14 @@ class TimedHistogram : public Histogram {
class NestedTimedHistogramScope;
class PauseNestedTimedHistogramScope;
-// A NestedTimedHistogram allows distributions of nested timed results.
+// For use with the NestedTimedHistogramScope. 'Nested' here means that scopes
+// may have nested lifetimes while still correctly accounting for time, e.g.:
+//
+// void f() {
+// NestedTimedHistogramScope timer(...);
+// ...
+// f(); // Recursive call.
+// }
class NestedTimedHistogram : public TimedHistogram {
public:
// Note: public for testing purposes only.
@@ -562,8 +568,7 @@ class Counters : public std::enable_shared_from_this<Counters> {
#define SC(name, caption) \
StatsCounter* name() { return &name##_; }
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
+ STATS_COUNTER_LIST(SC)
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
@@ -584,8 +589,7 @@ class Counters : public std::enable_shared_from_this<Counters> {
HISTOGRAM_LEGACY_MEMORY_LIST(MEMORY_ID)
#undef MEMORY_ID
#define COUNTER_ID(name, caption) k_##name,
- STATS_COUNTER_LIST_1(COUNTER_ID)
- STATS_COUNTER_LIST_2(COUNTER_ID)
+ STATS_COUNTER_LIST(COUNTER_ID)
STATS_COUNTER_NATIVE_CODE_LIST(COUNTER_ID)
#undef COUNTER_ID
#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
@@ -663,8 +667,7 @@ class Counters : public std::enable_shared_from_this<Counters> {
#undef HM
#define SC(name, caption) StatsCounter name##_;
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
+ STATS_COUNTER_LIST(SC)
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
diff --git a/deps/v8/src/logging/local-logger.cc b/deps/v8/src/logging/local-logger.cc
index 000a069da1..6792383c6c 100644
--- a/deps/v8/src/logging/local-logger.cc
+++ b/deps/v8/src/logging/local-logger.cc
@@ -20,8 +20,7 @@ LocalLogger::LocalLogger(Isolate* isolate)
void LocalLogger::ScriptDetails(Script script) {
v8_file_logger_->ScriptDetails(script);
}
-void LocalLogger::ScriptEvent(V8FileLogger::ScriptEventType type,
- int script_id) {
+void LocalLogger::ScriptEvent(ScriptEventType type, int script_id) {
v8_file_logger_->ScriptEvent(type, script_id);
}
void LocalLogger::CodeLinePosInfoRecordEvent(Address code_start,
diff --git a/deps/v8/src/logging/local-logger.h b/deps/v8/src/logging/local-logger.h
index c7c4195eca..347c4991dd 100644
--- a/deps/v8/src/logging/local-logger.h
+++ b/deps/v8/src/logging/local-logger.h
@@ -21,7 +21,7 @@ class LocalLogger {
return is_listening_to_code_events_;
}
void ScriptDetails(Script script);
- void ScriptEvent(V8FileLogger::ScriptEventType type, int script_id);
+ void ScriptEvent(ScriptEventType type, int script_id);
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray source_position_table,
JitCodeEvent::CodeType code_type);
diff --git a/deps/v8/src/logging/log-file.cc b/deps/v8/src/logging/log-file.cc
index acdd24701c..d8201d1575 100644
--- a/deps/v8/src/logging/log-file.cc
+++ b/deps/v8/src/logging/log-file.cc
@@ -14,6 +14,7 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/isolate-utils.h"
+#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
#include "src/objects/string-inl.h"
#include "src/strings/string-stream.h"
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 1556cf3399..ca97693465 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -19,6 +19,7 @@
#include "src/codegen/bailout-reason.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/source-position-table.h"
+#include "src/common/assert-scope.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/perf-jit.h"
#include "src/execution/isolate.h"
@@ -129,7 +130,7 @@ const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
// We record interpreter trampoline builtin copies as having the
// "interpreted" marker.
if (v8_flags.interpreted_frames_native_stack && kind == CodeKind::BUILTIN &&
- !code.is_off_heap_trampoline(cage_base)) {
+ code.has_instruction_stream(cage_base)) {
DCHECK_EQ(code.builtin_id(cage_base), Builtin::kInterpreterEntryTrampoline);
kind = CodeKind::INTERPRETED_FUNCTION;
}
@@ -248,7 +249,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
name_buffer_->AppendBytes(comment);
- LogRecordedBuffer(code, MaybeHandle<SharedFunctionInfo>(),
+ DisallowGarbageCollection no_gc;
+ LogRecordedBuffer(*code, MaybeHandle<SharedFunctionInfo>(),
name_buffer_->get(), name_buffer_->size());
}
@@ -257,7 +259,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
DCHECK(is_listening_to_code_events());
name_buffer_->Init(tag);
name_buffer_->AppendName(*name);
- LogRecordedBuffer(code, MaybeHandle<SharedFunctionInfo>(),
+ DisallowGarbageCollection no_gc;
+ LogRecordedBuffer(*code, MaybeHandle<SharedFunctionInfo>(),
name_buffer_->get(), name_buffer_->size());
}
@@ -269,7 +272,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
name_buffer_->AppendBytes(ComputeMarker(*shared, *code));
name_buffer_->AppendByte(' ');
name_buffer_->AppendName(*script_name);
- LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
+ DisallowGarbageCollection no_gc;
+ LogRecordedBuffer(*code, shared, name_buffer_->get(), name_buffer_->size());
}
void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
@@ -292,7 +296,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
name_buffer_->AppendInt(line);
name_buffer_->AppendByte(':');
name_buffer_->AppendInt(column);
- LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
+ DisallowGarbageCollection no_gc;
+ LogRecordedBuffer(*code, shared, name_buffer_->get(), name_buffer_->size());
}
#if V8_ENABLE_WEBASSEMBLY
@@ -312,6 +317,7 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, const wasm::WasmCode* code,
}
name_buffer_->AppendByte('-');
name_buffer_->AppendBytes(ExecutionTierToString(code->tier()));
+ DisallowGarbageCollection no_gc;
LogRecordedBuffer(code, name_buffer_->get(), name_buffer_->size());
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -321,7 +327,8 @@ void CodeEventLogger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
DCHECK(is_listening_to_code_events());
name_buffer_->Init(LogEventListener::CodeTag::kRegExp);
name_buffer_->AppendString(*source);
- LogRecordedBuffer(code, MaybeHandle<SharedFunctionInfo>(),
+ DisallowGarbageCollection no_gc;
+ LogRecordedBuffer(*code, MaybeHandle<SharedFunctionInfo>(),
name_buffer_->get(), name_buffer_->size());
}
@@ -332,12 +339,13 @@ class LinuxPerfBasicLogger : public CodeEventLogger {
explicit LinuxPerfBasicLogger(Isolate* isolate);
~LinuxPerfBasicLogger() override;
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override {}
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override {}
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
private:
- void LogRecordedBuffer(Handle<AbstractCode> code,
+ void LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
#if V8_ENABLE_WEBASSEMBLY
@@ -347,13 +355,14 @@ class LinuxPerfBasicLogger : public CodeEventLogger {
void WriteLogRecordedBuffer(uintptr_t address, int size, const char* name,
int name_length);
+ static base::LazyRecursiveMutex& GetFileMutex();
+
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
static const int kFilenameBufferPadding;
// Per-process singleton file. We assume that there is one main isolate
// to determine when it goes away, we keep the reference count.
- static base::LazyRecursiveMutex file_mutex_;
static FILE* perf_output_handle_;
static uint64_t reference_count_;
};
@@ -362,15 +371,20 @@ const char LinuxPerfBasicLogger::kFilenameFormatString[] = "/tmp/perf-%d.map";
// Extra space for the PID in the filename
const int LinuxPerfBasicLogger::kFilenameBufferPadding = 16;
-base::LazyRecursiveMutex LinuxPerfBasicLogger::file_mutex_;
+// static
+base::LazyRecursiveMutex& LinuxPerfBasicLogger::GetFileMutex() {
+ static base::LazyRecursiveMutex file_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+ return file_mutex;
+}
+
// The following static variables are protected by
-// LinuxPerfBasicLogger::file_mutex_.
+// LinuxPerfBasicLogger::GetFileMutext().
uint64_t LinuxPerfBasicLogger::reference_count_ = 0;
FILE* LinuxPerfBasicLogger::perf_output_handle_ = nullptr;
LinuxPerfBasicLogger::LinuxPerfBasicLogger(Isolate* isolate)
: CodeEventLogger(isolate) {
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ base::LockGuard<base::RecursiveMutex> guard_file(GetFileMutex().Pointer());
int process_id_ = base::OS::GetCurrentProcessId();
reference_count_++;
// If this is the first logger, open the file.
@@ -389,7 +403,7 @@ LinuxPerfBasicLogger::LinuxPerfBasicLogger(Isolate* isolate)
}
LinuxPerfBasicLogger::~LinuxPerfBasicLogger() {
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+ base::LockGuard<base::RecursiveMutex> guard_file(GetFileMutex().Pointer());
reference_count_--;
// If this was the last logger, close the file.
@@ -413,18 +427,19 @@ void LinuxPerfBasicLogger::WriteLogRecordedBuffer(uintptr_t address, int size,
size, name_length, name);
}
-void LinuxPerfBasicLogger::LogRecordedBuffer(Handle<AbstractCode> code,
+void LinuxPerfBasicLogger::LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo>,
const char* name, int length) {
+ DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base(isolate_);
if (v8_flags.perf_basic_prof_only_functions &&
- CodeKindIsBuiltinOrJSFunction(code->kind(cage_base))) {
+ CodeKindIsBuiltinOrJSFunction(code.kind(cage_base))) {
return;
}
WriteLogRecordedBuffer(
- static_cast<uintptr_t>(code->InstructionStart(cage_base)),
- code->InstructionSize(cage_base), name, length);
+ static_cast<uintptr_t>(code.InstructionStart(cage_base)),
+ code.InstructionSize(cage_base), name, length);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -587,22 +602,39 @@ void ExternalLogEventListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event));
}
-void ExternalLogEventListener::CodeMoveEvent(AbstractCode from,
- AbstractCode to) {
- PtrComprCageBase cage_base(isolate_);
+namespace {
+
+void InitializeCodeEvent(Isolate* isolate, CodeEvent* event,
+ Address previous_code_start_address,
+ Address code_start_address, int code_size) {
+ event->previous_code_start_address =
+ static_cast<uintptr_t>(previous_code_start_address);
+ event->code_start_address = static_cast<uintptr_t>(code_start_address);
+ event->code_size = static_cast<size_t>(code_size);
+ event->function_name = isolate->factory()->empty_string();
+ event->script_name = isolate->factory()->empty_string();
+ event->script_line = 0;
+ event->script_column = 0;
+ event->code_type = v8::CodeEventType::kRelocationType;
+ event->comment = "";
+}
+
+} // namespace
+
+void ExternalLogEventListener::CodeMoveEvent(InstructionStream from,
+ InstructionStream to) {
CodeEvent code_event;
- code_event.previous_code_start_address =
- static_cast<uintptr_t>(from.InstructionStart(cage_base));
- code_event.code_start_address =
- static_cast<uintptr_t>(to.InstructionStart(cage_base));
- code_event.code_size = static_cast<size_t>(to.InstructionSize(cage_base));
- code_event.function_name = isolate_->factory()->empty_string();
- code_event.script_name = isolate_->factory()->empty_string();
- code_event.script_line = 0;
- code_event.script_column = 0;
- code_event.code_type = v8::CodeEventType::kRelocationType;
- code_event.comment = "";
+ InitializeCodeEvent(isolate_, &code_event, from.instruction_start(),
+ to.instruction_start(),
+ to.code(kAcquireLoad).instruction_size());
+ code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event));
+}
+void ExternalLogEventListener::BytecodeMoveEvent(BytecodeArray from,
+ BytecodeArray to) {
+ CodeEvent code_event;
+ InitializeCodeEvent(isolate_, &code_event, from.GetFirstBytecodeAddress(),
+ to.GetFirstBytecodeAddress(), to.length());
code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event));
}
@@ -612,14 +644,15 @@ class LowLevelLogger : public CodeEventLogger {
LowLevelLogger(Isolate* isolate, const char* file_name);
~LowLevelLogger() override;
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override;
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
void SnapshotPositionEvent(HeapObject obj, int pos);
void CodeMovingGCEvent() override;
private:
- void LogRecordedBuffer(Handle<AbstractCode> code,
+ void LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
#if V8_ENABLE_WEBASSEMBLY
@@ -709,19 +742,19 @@ void LowLevelLogger::LogCodeInfo() {
LogWriteBytes(arch, sizeof(arch));
}
-void LowLevelLogger::LogRecordedBuffer(Handle<AbstractCode> code,
+void LowLevelLogger::LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo>,
const char* name, int length) {
+ DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base(isolate_);
CodeCreateStruct event;
event.name_size = length;
- event.code_address = code->InstructionStart(cage_base);
- event.code_size = code->InstructionSize(cage_base);
+ event.code_address = code.InstructionStart(cage_base);
+ event.code_size = code.InstructionSize(cage_base);
LogWriteStruct(event);
LogWriteBytes(name, length);
- LogWriteBytes(
- reinterpret_cast<const char*>(code->InstructionStart(cage_base)),
- code->InstructionSize(cage_base));
+ LogWriteBytes(reinterpret_cast<const char*>(code.InstructionStart(cage_base)),
+ code.InstructionSize(cage_base));
}
#if V8_ENABLE_WEBASSEMBLY
@@ -738,11 +771,18 @@ void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
}
#endif // V8_ENABLE_WEBASSEMBLY
-void LowLevelLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
- PtrComprCageBase cage_base(isolate_);
+void LowLevelLogger::CodeMoveEvent(InstructionStream from,
+ InstructionStream to) {
+ CodeMoveStruct event;
+ event.from_address = from.instruction_start();
+ event.to_address = to.instruction_start();
+ LogWriteStruct(event);
+}
+
+void LowLevelLogger::BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) {
CodeMoveStruct event;
- event.from_address = from.InstructionStart(cage_base);
- event.to_address = to.InstructionStart(cage_base);
+ event.from_address = from.GetFirstBytecodeAddress();
+ event.to_address = to.GetFirstBytecodeAddress();
LogWriteStruct(event);
}
@@ -762,7 +802,8 @@ class JitLogger : public CodeEventLogger {
public:
JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler);
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override;
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
void AddCodeLinePosInfoEvent(void* jit_handler_data, int pc_offset,
@@ -775,7 +816,7 @@ class JitLogger : public CodeEventLogger {
JitCodeEvent::CodeType code_type);
private:
- void LogRecordedBuffer(Handle<AbstractCode> code,
+ void LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) override;
#if V8_ENABLE_WEBASSEMBLY
@@ -792,16 +833,17 @@ JitLogger::JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler)
DCHECK_NOT_NULL(code_event_handler);
}
-void JitLogger::LogRecordedBuffer(Handle<AbstractCode> code,
+void JitLogger::LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) {
+ DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base(isolate_);
JitCodeEvent event;
event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = reinterpret_cast<void*>(code->InstructionStart(cage_base));
- event.code_type = code->IsCode(cage_base) ? JitCodeEvent::JIT_CODE
- : JitCodeEvent::BYTE_CODE;
- event.code_len = code->InstructionSize(cage_base);
+ event.code_start = reinterpret_cast<void*>(code.InstructionStart(cage_base));
+ event.code_type =
+ code.IsCode(cage_base) ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
+ event.code_len = code.InstructionSize(cage_base);
Handle<SharedFunctionInfo> shared;
if (maybe_shared.ToHandle(&shared) &&
shared->script(cage_base).IsScript(cage_base)) {
@@ -866,18 +908,29 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
}
#endif // V8_ENABLE_WEBASSEMBLY
-void JitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
+void JitLogger::CodeMoveEvent(InstructionStream from, InstructionStream to) {
base::MutexGuard guard(&logger_mutex_);
- PtrComprCageBase cage_base(isolate_);
JitCodeEvent event;
event.type = JitCodeEvent::CODE_MOVED;
- event.code_type =
- from.IsCode(cage_base) ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
- event.code_start = reinterpret_cast<void*>(from.InstructionStart(cage_base));
- event.code_len = from.InstructionSize(cage_base);
- event.new_code_start =
- reinterpret_cast<void*>(to.InstructionStart(cage_base));
+ event.code_type = JitCodeEvent::JIT_CODE;
+ event.code_start = reinterpret_cast<void*>(from.instruction_start());
+ event.code_len = from.unchecked_code().instruction_size();
+ event.new_code_start = reinterpret_cast<void*>(to.instruction_start());
+ event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+
+ code_event_handler_(&event);
+}
+
+void JitLogger::BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) {
+ base::MutexGuard guard(&logger_mutex_);
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_MOVED;
+ event.code_type = JitCodeEvent::BYTE_CODE;
+ event.code_start = reinterpret_cast<void*>(from.GetFirstBytecodeAddress());
+ event.code_len = from.length();
+ event.new_code_start = reinterpret_cast<void*>(to.GetFirstBytecodeAddress());
event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
@@ -969,7 +1022,7 @@ class Profiler : public base::Thread {
// Inserts collected profiling data into buffer.
void Insert(TickSample* sample) {
if (Succ(head_) == static_cast<int>(base::Acquire_Load(&tail_))) {
- overflow_ = true;
+ base::Relaxed_Store(&overflow_, true);
} else {
buffer_[head_] = *sample;
head_ = Succ(head_);
@@ -984,10 +1037,10 @@ class Profiler : public base::Thread {
bool Remove(TickSample* sample) {
buffer_semaphore_.Wait(); // Wait for an element.
*sample = buffer_[base::Relaxed_Load(&tail_)];
- bool result = overflow_;
+ bool result = base::Relaxed_Load(&overflow_);
base::Release_Store(
&tail_, static_cast<base::Atomic32>(Succ(base::Relaxed_Load(&tail_))));
- overflow_ = false;
+ base::Relaxed_Store(&overflow_, false);
return result;
}
@@ -1001,7 +1054,7 @@ class Profiler : public base::Thread {
TickSample buffer_[kBufferSize]; // Buffer storage.
int head_; // Index to the buffer head.
base::Atomic32 tail_; // Index to the buffer tail.
- bool overflow_; // Tell whether a buffer overflow has occurred.
+ base::Atomic32 overflow_; // Tell whether a buffer overflow has occurred.
// Semaphore used for buffer synchronization.
base::Semaphore buffer_semaphore_;
@@ -1067,9 +1120,9 @@ Profiler::Profiler(Isolate* isolate)
: base::Thread(Options("v8:Profiler")),
isolate_(isolate),
head_(0),
- overflow_(false),
buffer_semaphore_(0) {
base::Relaxed_Store(&tail_, 0);
+ base::Relaxed_Store(&overflow_, false);
base::Relaxed_Store(&running_, 0);
}
@@ -1310,7 +1363,7 @@ void V8FileLogger::LogSourceCodeInformation(Handle<AbstractCode> code,
bool hasInlined = false;
if (code->kind(cage_base) != CodeKind::BASELINE) {
SourcePositionTableIterator iterator(
- code->SourcePositionTable(cage_base, *shared));
+ code->SourcePositionTable(isolate_, *shared));
for (; !iterator.done(); iterator.Advance()) {
SourcePosition pos = iterator.source_position();
msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
@@ -1372,11 +1425,6 @@ void V8FileLogger::LogCodeDisassemble(Handle<AbstractCode> code) {
#ifdef ENABLE_DISASSEMBLER
Code::cast(*code).Disassemble(nullptr, stream, isolate_);
#endif
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- code->IsCodeDataContainer(cage_base)) {
-#ifdef ENABLE_DISASSEMBLER
- CodeT::cast(*code).Disassemble(nullptr, stream, isolate_);
-#endif
} else {
BytecodeArray::cast(*code).Disassemble(stream);
}
@@ -1543,11 +1591,16 @@ void V8FileLogger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
msg.WriteToLogFile();
}
-void V8FileLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
+void V8FileLogger::CodeMoveEvent(InstructionStream from, InstructionStream to) {
if (!is_listening_to_code_events()) return;
- PtrComprCageBase cage_base(isolate_);
- MoveEventInternal(Event::kCodeMove, from.InstructionStart(cage_base),
- to.InstructionStart(cage_base));
+ MoveEventInternal(Event::kCodeMove, from.instruction_start(),
+ to.instruction_start());
+}
+
+void V8FileLogger::BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) {
+ if (!is_listening_to_code_events()) return;
+ MoveEventInternal(Event::kCodeMove, from.GetFirstBytecodeAddress(),
+ to.GetFirstBytecodeAddress());
}
void V8FileLogger::SharedFunctionInfoMoveEvent(Address from, Address to) {
@@ -1739,9 +1792,12 @@ void V8FileLogger::ScriptEvent(ScriptEventType type, int script_id) {
case ScriptEventType::kBackgroundCompile:
msg << "background-compile";
break;
- case ScriptEventType::kStreamingCompile:
+ case ScriptEventType::kStreamingCompileBackground:
msg << "streaming-compile";
break;
+ case ScriptEventType::kStreamingCompileForeground:
+ msg << "streaming-compile-foreground";
+ break;
}
msg << V8FileLogger::kNext << script_id << V8FileLogger::kNext << Time();
msg.WriteToLogFile();
@@ -1949,12 +2005,10 @@ EnumerateCompiledFunctions(Heap* heap) {
JSFunction function = JSFunction::cast(obj);
// TODO(jarin) This leaves out deoptimized code that might still be on the
// stack. Also note that we will not log optimized code objects that are
- // only on a type feedback vector. We should make this mroe precise.
+ // only on a type feedback vector. We should make this more precise.
if (function.HasAttachedOptimizedCode() &&
Script::cast(function.shared().script()).HasValidSource()) {
- // TODO(v8:13261): use ToAbstractCode() here.
- record(function.shared(),
- AbstractCode::cast(FromCodeT(function.code())));
+ record(function.shared(), AbstractCode::cast(function.code()));
}
}
}
@@ -1983,8 +2037,9 @@ void V8FileLogger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
existing_code_logger_.LogExistingFunction(shared, code);
}
-void V8FileLogger::LogCompiledFunctions() {
- existing_code_logger_.LogCompiledFunctions();
+void V8FileLogger::LogCompiledFunctions(
+ bool ensure_source_positions_available) {
+ existing_code_logger_.LogCompiledFunctions(ensure_source_positions_available);
}
void V8FileLogger::LogBuiltins() { existing_code_logger_.LogBuiltins(); }
@@ -2059,8 +2114,7 @@ static void PrepareLogFileName(std::ostream& os, Isolate* isolate,
break;
case 't':
// %t expands to the current time in milliseconds.
- os << static_cast<int64_t>(
- V8::GetCurrentPlatform()->CurrentClockTimeMillis());
+ os << V8::GetCurrentPlatform()->CurrentClockTimeMilliseconds();
break;
case '%':
// %% expands (contracts really) to %.
@@ -2153,6 +2207,9 @@ void V8FileLogger::SetEtwCodeEventHandler(uint32_t options) {
etw_jit_logger_ = std::make_unique<ETWJitLogger>(isolate_);
AddLogEventListener(etw_jit_logger_.get());
CHECK(isolate_->logger()->is_listening_to_code_events());
+ // Generate builtins for new isolates always. Otherwise it will not
+ // traverse the builtins.
+ options |= kJitCodeEventEnumExisting;
}
if (options & kJitCodeEventEnumExisting) {
@@ -2164,7 +2221,7 @@ void V8FileLogger::SetEtwCodeEventHandler(uint32_t options) {
HandleScope scope(isolate_);
LogBuiltins();
LogCodeObjects();
- LogCompiledFunctions();
+ LogCompiledFunctions(false);
}
}
@@ -2285,7 +2342,7 @@ void ExistingCodeLogger::LogCodeObject(AbstractCode object) {
tag = CodeTag::kBytecodeHandler;
break;
case CodeKind::BUILTIN:
- if (!abstract_code->is_off_heap_trampoline(cage_base)) {
+ if (abstract_code->has_instruction_stream(cage_base)) {
DCHECK_EQ(abstract_code->builtin_id(cage_base),
Builtin::kInterpreterEntryTrampoline);
// We treat interpreter trampoline builtin copies as
@@ -2325,26 +2382,15 @@ void ExistingCodeLogger::LogCodeObject(AbstractCode object) {
void ExistingCodeLogger::LogCodeObjects() {
Heap* heap = isolate_->heap();
- HeapObjectIterator iterator(heap);
+ CombinedHeapObjectIterator iterator(heap);
DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base(isolate_);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
InstanceType instance_type = obj.map(cage_base).instance_type();
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // In this case AbstactCode is Code|CodeDataContainer|BytecodeArray but
- // we want to log code objects only once, thus we ignore Code objects
- // which will be logged via corresponding CodeDataContainer.
- if (InstanceTypeChecker::IsCodeT(instance_type) ||
- InstanceTypeChecker::IsBytecodeArray(instance_type)) {
- LogCodeObject(AbstractCode::cast(obj));
- }
- } else {
- // In this case AbstactCode is Code|BytecodeArray.
- if (InstanceTypeChecker::IsCode(instance_type) ||
- InstanceTypeChecker::IsBytecodeArray(instance_type)) {
- LogCodeObject(AbstractCode::cast(obj));
- }
+ if (InstanceTypeChecker::IsCode(instance_type) ||
+ InstanceTypeChecker::IsBytecodeArray(instance_type)) {
+ LogCodeObject(AbstractCode::cast(obj));
}
}
}
@@ -2352,13 +2398,14 @@ void ExistingCodeLogger::LogCodeObjects() {
void ExistingCodeLogger::LogBuiltins() {
DCHECK(isolate_->builtins()->is_initialized());
// The main "copy" of used builtins are logged by LogCodeObjects() while
- // iterating CodeT objects.
+ // iterating Code objects.
// TODO(v8:11880): Log other copies of remapped builtins once we
// decide to remap them multiple times into the code range (for example
// for arm64).
}
-void ExistingCodeLogger::LogCompiledFunctions() {
+void ExistingCodeLogger::LogCompiledFunctions(
+ bool ensure_source_positions_available) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
std::vector<std::pair<Handle<SharedFunctionInfo>, Handle<AbstractCode>>>
@@ -2368,27 +2415,24 @@ void ExistingCodeLogger::LogCompiledFunctions() {
// GetScriptLineNumber call.
for (auto& pair : compiled_funcs) {
Handle<SharedFunctionInfo> shared = pair.first;
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
+ if (ensure_source_positions_available) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
+ }
if (shared->HasInterpreterData()) {
- // TODO(v8:13261): use ToAbstractCode() here.
LogExistingFunction(
shared,
Handle<AbstractCode>(
- AbstractCode::cast(FromCodeT(shared->InterpreterTrampoline())),
- isolate_));
+ AbstractCode::cast(shared->InterpreterTrampoline()), isolate_));
}
if (shared->HasBaselineCode()) {
- // TODO(v8:13261): use ToAbstractCode() here.
- LogExistingFunction(shared, Handle<AbstractCode>(
- AbstractCode::cast(FromCodeT(
- shared->baseline_code(kAcquireLoad))),
- isolate_));
+ LogExistingFunction(
+ shared, Handle<AbstractCode>(
+ AbstractCode::cast(shared->baseline_code(kAcquireLoad)),
+ isolate_));
}
- // Can't use .is_identical_to() because AbstractCode might be both Code and
- // non-Code object and regular tagged comparison or compressed values might
- // not be correct when V8_EXTERNAL_CODE_SPACE is enabled.
- if (*pair.second == ToAbstractCode(*BUILTIN_CODE(isolate_, CompileLazy)))
+ if (pair.second.is_identical_to(BUILTIN_CODE(isolate_, CompileLazy))) {
continue;
+ }
LogExistingFunction(pair.first, pair.second);
}
@@ -2442,7 +2486,7 @@ void ExistingCodeLogger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
#if USES_FUNCTION_DESCRIPTORS
entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
#endif
- Handle<String> fun_name = SharedFunctionInfo::DebugName(shared);
+ Handle<String> fun_name = SharedFunctionInfo::DebugName(isolate_, shared);
CALL_CODE_EVENT_HANDLER(CallbackEvent(fun_name, entry_point))
// Fast API function.
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 339031c4ff..c0720c9f27 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -95,7 +95,7 @@ class ExistingCodeLogger {
void LogCodeObjects();
void LogBuiltins();
- void LogCompiledFunctions();
+ void LogCompiledFunctions(bool ensure_source_positions_available = true);
void LogExistingFunction(
Handle<SharedFunctionInfo> shared, Handle<AbstractCode> code,
LogEventListener::CodeTag tag = LogEventListener::CodeTag::kFunction);
@@ -110,14 +110,6 @@ enum class LogSeparator;
class V8FileLogger : public LogEventListener {
public:
- enum class ScriptEventType {
- kReserveId,
- kCreate,
- kDeserialize,
- kBackgroundCompile,
- kStreamingCompile
- };
-
explicit V8FileLogger(Isolate* isolate);
~V8FileLogger() override;
@@ -198,7 +190,8 @@ class V8FileLogger : public LogEventListener {
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override;
void RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) override;
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override;
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override;
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override;
@@ -280,7 +273,8 @@ class V8FileLogger : public LogEventListener {
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code);
// Logs all compiled functions found in the heap.
- V8_EXPORT_PRIVATE void LogCompiledFunctions();
+ V8_EXPORT_PRIVATE void LogCompiledFunctions(
+ bool ensure_source_positions_available = true);
// Logs all accessor callbacks found in the heap.
V8_EXPORT_PRIVATE void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
@@ -453,7 +447,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public LogEventListener {
private:
class NameBuffer;
- virtual void LogRecordedBuffer(Handle<AbstractCode> code,
+ virtual void LogRecordedBuffer(AbstractCode code,
MaybeHandle<SharedFunctionInfo> maybe_shared,
const char* name, int length) = 0;
#if V8_ENABLE_WEBASSEMBLY
@@ -505,7 +499,8 @@ class ExternalLogEventListener : public LogEventListener {
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override {}
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override;
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
void CodeMovingGCEvent() override {}
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index fd51b6b5b0..8b00ac14a0 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -144,6 +144,7 @@ class RuntimeCallTimer final {
V(BigUint64Array_New) \
V(BooleanObject_BooleanValue) \
V(BooleanObject_New) \
+ V(Context_DeepFreeze) \
V(Context_New) \
V(Context_NewRemoteContext) \
V(DataView_New) \
@@ -372,9 +373,17 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifyLoops) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TraceScheduleAndVerify) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildTurboshaft) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeTurboshaft) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftBuildGraph) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftDeadCodeElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, \
+ TurboshaftDecompressionOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftLateOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftMachineLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftOptimize) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftRecreateSchedule) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTagUntagLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTypeAssertions) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTypedOptimizations) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \
@@ -470,10 +479,14 @@ class RuntimeCallTimer final {
V(OptimizeBackgroundDispatcherJob) \
V(OptimizeCode) \
V(OptimizeConcurrentFinalize) \
+ V(OptimizeConcurrentFinalizeMaglev) \
V(OptimizeConcurrentPrepare) \
V(OptimizeFinalizePipelineJob) \
V(OptimizeHeapBrokerInitialization) \
V(OptimizeNonConcurrent) \
+ V(OptimizeNonConcurrentMaglev) \
+ V(OptimizeBackgroundMaglev) \
+ V(OptimizeRevectorizer) \
V(OptimizeSerialization) \
V(OptimizeSerializeMetadata) \
V(ParseEval) \
@@ -489,21 +502,6 @@ class RuntimeCallTimer final {
V(TestCounter2) \
V(TestCounter3) \
V(UpdateProtector) \
- V(WebSnapshotDeserialize) \
- V(WebSnapshotDeserialize_Arrays) \
- V(WebSnapshotDeserialize_ArrayBuffers) \
- V(WebSnapshotDeserialize_BigInts) \
- V(WebSnapshotDeserialize_BuiltinObjects) \
- V(WebSnapshotDeserialize_Classes) \
- V(WebSnapshotDeserialize_Contexts) \
- V(WebSnapshotDeserialize_DataViews) \
- V(WebSnapshotDeserialize_Exports) \
- V(WebSnapshotDeserialize_Functions) \
- V(WebSnapshotDeserialize_Maps) \
- V(WebSnapshotDeserialize_Objects) \
- V(WebSnapshotDeserialize_Strings) \
- V(WebSnapshotDeserialize_Symbols) \
- V(WebSnapshotDeserialize_TypedArrays) \
V(WrappedFunctionLengthGetter) \
V(WrappedFunctionNameGetter)
diff --git a/deps/v8/src/maglev/DEPS b/deps/v8/src/maglev/DEPS
index b6aca49f37..6bb6ecf188 100644
--- a/deps/v8/src/maglev/DEPS
+++ b/deps/v8/src/maglev/DEPS
@@ -8,10 +8,4 @@ specific_include_rules = {
"maglev-graph-builder\.h": [
"+src/interpreter/interpreter-intrinsics.h",
],
- "maglev-ir\.cc": [
- # Allow Maglev to reuse the baseline assembler.
- # TODO(v8:7700): Clean up these dependencies by extracting common code to a
- # separate directory.
- "+src/baseline/baseline-assembler-inl.h",
- ],
}
diff --git a/deps/v8/src/maglev/arm64/maglev-assembler-arm64-inl.h b/deps/v8/src/maglev/arm64/maglev-assembler-arm64-inl.h
new file mode 100644
index 0000000000..4d7cec4358
--- /dev/null
+++ b/deps/v8/src/maglev/arm64/maglev-assembler-arm64-inl.h
@@ -0,0 +1,864 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
+#define V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
+
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/compiler/compilation-dependencies.h"
+#include "src/maglev/maglev-assembler.h"
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-code-gen-state.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+constexpr Condition ConditionFor(Operation operation) {
+ switch (operation) {
+ case Operation::kEqual:
+ case Operation::kStrictEqual:
+ return eq;
+ case Operation::kLessThan:
+ return lt;
+ case Operation::kLessThanOrEqual:
+ return le;
+ case Operation::kGreaterThan:
+ return gt;
+ case Operation::kGreaterThanOrEqual:
+ return ge;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline int ShiftFromScale(int n) {
+ switch (n) {
+ case 1:
+ return 0;
+ case 2:
+ return 1;
+ case 4:
+ return 2;
+ default:
+ UNREACHABLE();
+ }
+}
+
+class MaglevAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(MaglevAssembler* masm) : wrapped_scope_(masm) {
+ // This field is never used in arm64.
+ DCHECK_NULL(masm->scratch_register_scope_);
+ }
+
+ Register Acquire() { return wrapped_scope_.AcquireX(); }
+ void Include(Register reg) { wrapped_scope_.Include(reg); }
+ void Include(const RegList list) {
+ wrapped_scope_.Include(CPURegList(kXRegSizeInBits, list));
+ }
+
+ DoubleRegister AcquireDouble() { return wrapped_scope_.AcquireD(); }
+ void IncludeDouble(const DoubleRegList list) {
+ wrapped_scope_.IncludeFP(CPURegList(kDRegSizeInBits, list));
+ }
+
+ RegList Available() {
+ return RegList::FromBits(wrapped_scope_.Available()->bits());
+ }
+ void SetAvailable(RegList list) {
+ wrapped_scope_.SetAvailable(CPURegList(kXRegSizeInBits, list));
+ }
+
+ DoubleRegList AvailableDouble() {
+ uint64_t bits = wrapped_scope_.AvailableFP()->bits();
+ // AvailableFP fits in a 32 bits word.
+ DCHECK_LE(bits, std::numeric_limits<uint32_t>::max());
+ return DoubleRegList::FromBits(static_cast<uint32_t>(bits));
+ }
+ void SetAvailableDouble(DoubleRegList list) {
+ wrapped_scope_.SetAvailableFP(CPURegList(kDRegSizeInBits, list));
+ }
+
+ private:
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(MaglevAssembler* masm,
+ MaglevAssembler::ScratchRegisterScope* scratch,
+ Arg arg) {
+ Register reg = scratch->Acquire();
+ masm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(MaglevAssembler* masm,
+ MaglevAssembler::ScratchRegisterScope* scratch,
+ Register reg) {
+ return reg;
+}
+inline Register ToRegister(MaglevAssembler* masm,
+ MaglevAssembler::ScratchRegisterScope* scratch,
+ const Input& input) {
+ if (input.operand().IsConstant()) {
+ Register reg = scratch->Acquire();
+ input.node()->LoadToRegister(masm, reg);
+ return reg;
+ }
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input.operand());
+ if (operand.IsRegister()) {
+ return ToRegister(input);
+ } else {
+ DCHECK(operand.IsStackSlot());
+ Register reg = scratch->Acquire();
+ masm->Move(reg, masm->ToMemOperand(input));
+ return reg;
+ }
+}
+
+template <typename... Args>
+struct CountPushHelper;
+
+template <>
+struct CountPushHelper<> {
+ static int Count() { return 0; }
+};
+
+template <typename Arg, typename... Args>
+struct CountPushHelper<Arg, Args...> {
+ static int Count(Arg arg, Args... args) {
+ int arg_count = 1;
+ if constexpr (is_iterator_range<Arg>::value) {
+ arg_count = static_cast<int>(std::distance(arg.begin(), arg.end()));
+ }
+ return arg_count + CountPushHelper<Args...>::Count(args...);
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+
+template <typename... Args>
+inline void PushAll(MaglevAssembler* masm, Args... args) {
+ PushAllHelper<Args...>::Push(masm, args...);
+}
+
+template <typename... Args>
+inline void PushAllReverse(MaglevAssembler* masm, Args... args) {
+ PushAllHelper<Args...>::PushReverse(masm, args...);
+}
+
+template <>
+struct PushAllHelper<> {
+ static void Push(MaglevAssembler* masm) {}
+ static void PushReverse(MaglevAssembler* masm) {}
+};
+
+template <typename T, typename... Args>
+inline void PushIterator(MaglevAssembler* masm, base::iterator_range<T> range,
+ Args... args) {
+ using value_type = typename base::iterator_range<T>::value_type;
+ for (auto iter = range.begin(), end = range.end(); iter != end; ++iter) {
+ value_type val1 = *iter;
+ ++iter;
+ if (iter == end) {
+ PushAll(masm, val1, args...);
+ return;
+ }
+ value_type val2 = *iter;
+ masm->Push(val1, val2);
+ }
+ PushAll(masm, args...);
+}
+
+template <typename T, typename... Args>
+inline void PushIteratorReverse(MaglevAssembler* masm,
+ base::iterator_range<T> range, Args... args) {
+ using value_type = typename base::iterator_range<T>::value_type;
+ using difference_type = typename base::iterator_range<T>::difference_type;
+ difference_type count = std::distance(range.begin(), range.end());
+ DCHECK_GE(count, 0);
+ auto iter = range.rbegin();
+ auto end = range.rend();
+ if (count % 2 != 0) {
+ PushAllReverse(masm, *iter, args...);
+ ++iter;
+ } else {
+ PushAllReverse(masm, args...);
+ }
+ while (iter != end) {
+ value_type val1 = *iter;
+ ++iter;
+ value_type val2 = *iter;
+ ++iter;
+ masm->Push(val1, val2);
+ }
+}
+
+template <typename Arg1, typename Arg2>
+inline void PushAligned(MaglevAssembler* masm, Arg1 arg1, Arg2 arg2) {
+ {
+ // Push the first argument together with padding to ensure alignment.
+ // The second argument is not pushed together with the first so we can
+ // re-use any scratch registers used to materialise the first argument for
+ // the second one.
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ masm->MacroAssembler::Push(ToRegister(masm, &temps, arg1), padreg);
+ }
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ masm->MacroAssembler::str(ToRegister(masm, &temps, arg2), MemOperand(sp));
+ }
+}
+
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static void Push(MaglevAssembler* masm, Arg arg) {
+ if constexpr (is_iterator_range<Arg>::value) {
+ PushIterator(masm, arg);
+ } else {
+ FATAL("Unaligned push");
+ }
+ }
+ static void PushReverse(MaglevAssembler* masm, Arg arg) {
+ if constexpr (is_iterator_range<Arg>::value) {
+ PushIteratorReverse(masm, arg);
+ } else {
+ PushAllReverse(masm, arg, padreg);
+ }
+ }
+};
+
+template <typename Arg1, typename Arg2, typename... Args>
+struct PushAllHelper<Arg1, Arg2, Args...> {
+ static void Push(MaglevAssembler* masm, Arg1 arg1, Arg2 arg2, Args... args) {
+ if constexpr (is_iterator_range<Arg1>::value) {
+ PushIterator(masm, arg1, arg2, args...);
+ } else if constexpr (is_iterator_range<Arg2>::value) {
+ if (arg2.begin() != arg2.end()) {
+ auto val = *arg2.begin();
+ PushAligned(masm, arg1, val);
+ PushAll(masm,
+ base::make_iterator_range(std::next(arg2.begin()), arg2.end()),
+ args...);
+ } else {
+ PushAll(masm, arg1, args...);
+ }
+ } else {
+ PushAligned(masm, arg1, arg2);
+ PushAll(masm, args...);
+ }
+ }
+ static void PushReverse(MaglevAssembler* masm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ if constexpr (is_iterator_range<Arg1>::value) {
+ PushIteratorReverse(masm, arg1, arg2, args...);
+ } else if constexpr (is_iterator_range<Arg2>::value) {
+ if (arg2.begin() != arg2.end()) {
+ auto val = *arg2.begin();
+ PushAllReverse(
+ masm,
+ base::make_iterator_range(std::next(arg2.begin()), arg2.end()),
+ args...);
+ PushAligned(masm, val, arg1);
+ } else {
+ PushAllReverse(masm, arg1, args...);
+ }
+ } else {
+ PushAllReverse(masm, args...);
+ PushAligned(masm, arg2, arg1);
+ }
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+void MaglevAssembler::Push(T... vals) {
+ const int push_count = detail::CountPushHelper<T...>::Count(vals...);
+ if (push_count % 2 == 0) {
+ detail::PushAll(this, vals...);
+ } else {
+ detail::PushAll(this, padreg, vals...);
+ }
+}
+
+template <typename... T>
+void MaglevAssembler::PushReverse(T... vals) {
+ detail::PushAllReverse(this, vals...);
+}
+
+inline void MaglevAssembler::BindJumpTarget(Label* label) {
+ MacroAssembler::BindJumpTarget(label);
+}
+
+inline void MaglevAssembler::BindBlock(BasicBlock* block) {
+ if (block->is_start_block_of_switch_case()) {
+ BindJumpTarget(block->label());
+ } else {
+ Bind(block->label());
+ }
+}
+
+inline void MaglevAssembler::DoubleToInt64Repr(Register dst,
+ DoubleRegister src) {
+ Mov(dst, src, 0);
+}
+
+inline void MaglevAssembler::SmiTagInt32(Register obj, Label* fail) {
+ Adds(obj.W(), obj.W(), obj.W());
+ JumpIf(vs, fail);
+}
+
+inline Condition MaglevAssembler::IsInt64Constant(Register reg,
+ int64_t constant) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Mov(scratch, kHoleNanInt64);
+ Cmp(reg, scratch);
+ return eq;
+}
+
+inline Condition MaglevAssembler::IsRootConstant(Input input,
+ RootIndex root_index) {
+ if (input.operand().IsRegister()) {
+ CompareRoot(ToRegister(input), root_index);
+ } else {
+ DCHECK(input.operand().IsStackSlot());
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Ldr(scratch, ToMemOperand(input));
+ CompareRoot(scratch, root_index);
+ }
+ return eq;
+}
+
+inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot slot) {
+ return MemOperand(fp, slot.index);
+}
+
+// TODO(Victorgomes): Unify this to use StackSlot struct.
+inline MemOperand MaglevAssembler::GetStackSlot(
+ const compiler::AllocatedOperand& operand) {
+ return MemOperand(fp, GetFramePointerOffsetForStackSlot(operand));
+}
+
+inline MemOperand MaglevAssembler::ToMemOperand(
+ const compiler::InstructionOperand& operand) {
+ return GetStackSlot(compiler::AllocatedOperand::cast(operand));
+}
+
+inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
+ return ToMemOperand(location.operand());
+}
+
+inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
+ Register object) {
+ DCHECK_NE(data_pointer, object);
+ LoadExternalPointerField(
+ data_pointer,
+ FieldMemOperand(object, JSTypedArray::kExternalPointerOffset));
+ if (JSTypedArray::kMaxSizeInHeap == 0) return;
+ ScratchRegisterScope scope(this);
+ Register base = scope.Acquire();
+ Ldr(base.W(), FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
+ Add(data_pointer, data_pointer, base);
+}
+
+inline void MaglevAssembler::LoadTaggedFieldByIndex(Register result,
+ Register object,
+ Register index, int scale,
+ int offset) {
+ if (scale == 1) {
+ Add(result, object, index);
+ } else {
+ Add(result, object, Operand(index, LSL, ShiftFromScale(scale / 2)));
+ }
+ MacroAssembler::LoadTaggedField(result, FieldMemOperand(result, offset));
+}
+
+inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result,
+ Register object,
+ int offset) {
+ Move(result, FieldMemOperand(object, offset));
+#ifdef V8_ENABLE_SANDBOX
+ Lsr(result, result, kBoundedSizeShift);
+#endif // V8_ENABLE_SANDBOX
+}
+
+inline void MaglevAssembler::LoadExternalPointerField(Register result,
+ MemOperand operand) {
+#ifdef V8_ENABLE_SANDBOX
+ LoadSandboxedPointerField(result, operand);
+#else
+ Move(result, operand);
+#endif
+}
+
+inline void MaglevAssembler::LoadSignedField(Register result,
+ MemOperand operand, int size) {
+ if (size == 1) {
+ Ldrsb(result, operand);
+ } else if (size == 2) {
+ Ldrsh(result, operand);
+ } else {
+ DCHECK_EQ(size, 4);
+ DCHECK(result.IsW());
+ Ldr(result, operand);
+ }
+}
+
+inline void MaglevAssembler::LoadUnsignedField(Register result,
+ MemOperand operand, int size) {
+ if (size == 1) {
+ Ldrb(result, operand);
+ } else if (size == 2) {
+ Ldrh(result, operand);
+ } else {
+ DCHECK_EQ(size, 4);
+ DCHECK(result.IsW());
+ Ldr(result, operand);
+ }
+}
+
+inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
+ Register value) {
+ AssertSmi(value);
+ StoreTaggedField(value, FieldMemOperand(object, offset));
+}
+
+inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
+ Smi value) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Mov(scratch, value);
+ StoreTaggedField(scratch, FieldMemOperand(object, offset));
+}
+
+inline void MaglevAssembler::StoreField(MemOperand operand, Register value,
+ int size) {
+ DCHECK(size == 1 || size == 2 || size == 4);
+ if (size == 1) {
+ Strb(value, operand);
+ } else if (size == 2) {
+ Strh(value, operand);
+ } else {
+ DCHECK_EQ(size, 4);
+ DCHECK(value.IsW());
+ Str(value, operand);
+ }
+}
+
+inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
+ if (size == 2) {
+ Rev16(value, value);
+ } else if (size == 4) {
+ Rev32(value, value);
+ } else {
+ DCHECK_EQ(size, 1);
+ }
+}
+
+inline void MaglevAssembler::Move(StackSlot dst, Register src) {
+ Str(src, StackSlotOperand(dst));
+}
+inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
+ Str(src, StackSlotOperand(dst));
+}
+inline void MaglevAssembler::Move(Register dst, StackSlot src) {
+ Ldr(dst, StackSlotOperand(src));
+}
+inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
+ Ldr(dst, StackSlotOperand(src));
+}
+inline void MaglevAssembler::Move(MemOperand dst, Register src) {
+ Str(src, dst);
+}
+inline void MaglevAssembler::Move(MemOperand dst, DoubleRegister src) {
+ Str(src, dst);
+}
+inline void MaglevAssembler::Move(Register dst, MemOperand src) {
+ Ldr(dst, src);
+}
+inline void MaglevAssembler::Move(DoubleRegister dst, MemOperand src) {
+ Ldr(dst, src);
+}
+inline void MaglevAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+ Fmov(dst, src);
+}
+inline void MaglevAssembler::Move(Register dst, Smi src) {
+ MacroAssembler::Move(dst, src);
+}
+inline void MaglevAssembler::Move(Register dst, ExternalReference src) {
+ Mov(dst, src);
+}
+inline void MaglevAssembler::Move(Register dst, Register src) {
+ MacroAssembler::Move(dst, src);
+}
+inline void MaglevAssembler::Move(Register dst, TaggedIndex i) {
+ Mov(dst, i.ptr());
+}
+inline void MaglevAssembler::Move(Register dst, int32_t i) {
+ Mov(dst.W(), Immediate(i));
+}
+inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
+ Fmov(dst, n);
+}
+inline void MaglevAssembler::Move(DoubleRegister dst, Float64 n) {
+ Fmov(dst, n.get_scalar());
+}
+inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
+ Mov(dst, Operand(obj));
+}
+
+inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
+ Mov(dst, Operand(src.W(), SXTW));
+}
+inline void MaglevAssembler::NegateInt32(Register val) {
+ Neg(val.W(), val.W());
+}
+
+inline void MaglevAssembler::ToUint8Clamped(Register result,
+ DoubleRegister value, Label* min,
+ Label* max, Label* done) {
+ ScratchRegisterScope temps(this);
+ DoubleRegister scratch = temps.AcquireDouble();
+ Move(scratch, 0.0);
+ Fcmp(scratch, value);
+ // Set to 0 if NaN.
+ B(vs, min);
+ B(ge, min);
+ Move(scratch, 255.0);
+ Fcmp(value, scratch);
+ B(ge, max);
+ // if value in [0, 255], then round up to the nearest.
+ Frintn(scratch, value);
+ TruncateDoubleToInt32(result, scratch);
+ B(done);
+}
+
+template <typename NodeT>
+inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
+ Register scratch,
+ NodeT* node) {
+ if (!code_gen_state()
+ ->broker()
+ ->dependencies()
+ ->DependOnArrayBufferDetachingProtector()) {
+ // A detached buffer leads to megamorphic feedback, so we won't have a deopt
+ // loop if we deopt here.
+ LoadTaggedField(scratch,
+ FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
+ LoadTaggedField(scratch,
+ FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+ Tst(scratch.W(), Immediate(JSArrayBuffer::WasDetachedBit::kMask));
+ EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
+ }
+}
+
+inline void MaglevAssembler::LoadByte(Register dst, MemOperand src) {
+ Ldrb(dst, src);
+}
+
+inline void MaglevAssembler::IsObjectType(Register heap_object,
+ InstanceType type) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MacroAssembler::IsObjectType(heap_object, scratch, scratch, type);
+}
+
+inline void MaglevAssembler::CompareObjectType(Register heap_object,
+ InstanceType type) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareObjectType(heap_object, type, scratch);
+}
+
+inline void MaglevAssembler::JumpIfJSAnyIsNotPrimitive(
+ Register heap_object, Label* target, Label::Distance distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MacroAssembler::JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target,
+ distance);
+}
+
+inline void MaglevAssembler::CompareObjectType(Register heap_object,
+ InstanceType type,
+ Register scratch) {
+ LoadMap(scratch, heap_object);
+ CompareInstanceType(scratch, scratch, type);
+}
+
+inline void MaglevAssembler::CompareObjectTypeRange(Register heap_object,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadMap(scratch, heap_object);
+ CompareInstanceTypeRange(scratch, scratch, lower_limit, higher_limit);
+}
+
+inline void MaglevAssembler::CompareMapWithRoot(Register object,
+ RootIndex index,
+ Register scratch) {
+ if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) {
+ Ldr(scratch.W(), FieldMemOperand(object, HeapObject::kMapOffset));
+ CmpTagged(scratch, Immediate(ReadOnlyRootPtr(index)));
+ return;
+ }
+ LoadMap(scratch, object);
+ CompareRoot(scratch, index);
+}
+
+inline void MaglevAssembler::CompareInstanceTypeRange(
+ Register map, InstanceType lower_limit, InstanceType higher_limit) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareInstanceTypeRange(map, scratch, lower_limit, higher_limit);
+}
+
+inline void MaglevAssembler::CompareInstanceTypeRange(
+ Register map, Register instance_type_out, InstanceType lower_limit,
+ InstanceType higher_limit) {
+ MacroAssembler::CompareInstanceTypeRange(map, instance_type_out, lower_limit,
+ higher_limit);
+}
+
+inline void MaglevAssembler::CompareTagged(Register reg,
+ Handle<HeapObject> obj) {
+ CmpTagged(reg, Operand(obj, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
+}
+
+inline void MaglevAssembler::CompareTagged(Register src1, Register src2) {
+ CmpTagged(src1, src2);
+}
+
+inline void MaglevAssembler::CompareInt32(Register reg, int32_t imm) {
+ Cmp(reg.W(), Immediate(imm));
+}
+
+inline void MaglevAssembler::CompareInt32(Register src1, Register src2) {
+ Cmp(src1.W(), src2.W());
+}
+
+inline void MaglevAssembler::CallSelf() {
+ DCHECK(code_gen_state()->entry_label()->is_bound());
+ Bl(code_gen_state()->entry_label());
+}
+
+inline void MaglevAssembler::Jump(Label* target, Label::Distance) { B(target); }
+
+inline void MaglevAssembler::JumpIf(Condition cond, Label* target,
+ Label::Distance) {
+ B(target, cond);
+}
+
+inline void MaglevAssembler::JumpIfRoot(Register with, RootIndex index,
+ Label* if_equal,
+ Label::Distance distance) {
+ MacroAssembler::JumpIfRoot(with, index, if_equal);
+}
+
+inline void MaglevAssembler::JumpIfNotRoot(Register with, RootIndex index,
+ Label* if_not_equal,
+ Label::Distance distance) {
+ MacroAssembler::JumpIfNotRoot(with, index, if_not_equal);
+}
+
+inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
+ Label::Distance distance) {
+ MacroAssembler::JumpIfSmi(src, on_smi);
+}
+
+void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
+ Label* target, Label::Distance) {
+ CompareAndBranch(value, Immediate(byte), cc, target);
+}
+
+inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
+ Condition cond,
+ Label* target,
+ Label::Distance distance) {
+ CompareAndBranch(r1.W(), r2.W(), cond, target);
+}
+
+inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
+ Condition cond,
+ Label* target,
+ Label::Distance distance) {
+ CompareAndBranch(r1.W(), Immediate(value), cond, target);
+}
+
+inline void MaglevAssembler::CompareSmiAndJumpIf(Register r1, Smi value,
+ Condition cond, Label* target,
+ Label::Distance distance) {
+ AssertSmi(r1);
+ CompareAndBranch(r1, Immediate(value), cond, target);
+}
+
+inline void MaglevAssembler::TestInt32AndJumpIfAnySet(
+ Register r1, int32_t mask, Label* target, Label::Distance distance) {
+ TestAndBranchIfAnySet(r1.W(), mask, target);
+}
+
+inline void MaglevAssembler::TestInt32AndJumpIfAllClear(
+ Register r1, int32_t mask, Label* target, Label::Distance distance) {
+ TestAndBranchIfAllClear(r1.W(), mask, target);
+}
+
+inline void MaglevAssembler::LoadHeapNumberValue(DoubleRegister result,
+ Register heap_number) {
+ Ldr(result, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+}
+
+inline void MaglevAssembler::Int32ToDouble(DoubleRegister result, Register n) {
+ Scvtf(result, n.W());
+}
+
+inline void MaglevAssembler::Pop(Register dst) { Pop(dst, padreg); }
+
+inline void MaglevAssembler::AssertStackSizeCorrect() {
+ if (v8_flags.debug_code) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add(scratch, sp,
+ RoundUp<2 * kSystemPointerSize>(
+ code_gen_state()->stack_slots() * kSystemPointerSize +
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ Cmp(scratch, fp);
+ Assert(eq, AbortReason::kStackAccessBelowStackPointer);
+ }
+}
+
+inline void MaglevAssembler::FinishCode() {
+ ForceConstantPoolEmissionWithoutJump();
+}
+
+template <typename NodeT>
+inline void MaglevAssembler::EmitEagerDeoptIfNotEqual(DeoptimizeReason reason,
+ NodeT* node) {
+ EmitEagerDeoptIf(ne, reason, node);
+}
+
+inline void MaglevAssembler::MaterialiseValueNode(Register dst,
+ ValueNode* value) {
+ switch (value->opcode()) {
+ case Opcode::kInt32Constant: {
+ int32_t int_value = value->Cast<Int32Constant>()->value();
+ if (Smi::IsValid(int_value)) {
+ Move(dst, Smi::FromInt(int_value));
+ } else {
+ MoveHeapNumber(dst, int_value);
+ }
+ return;
+ }
+ case Opcode::kFloat64Constant: {
+ double double_value =
+ value->Cast<Float64Constant>()->value().get_scalar();
+ MoveHeapNumber(dst, double_value);
+ return;
+ }
+ default:
+ break;
+ }
+
+ DCHECK(!value->allocation().IsConstant());
+ DCHECK(value->allocation().IsAnyStackSlot());
+ using D = NewHeapNumberDescriptor;
+ MemOperand src = ToMemOperand(value->allocation());
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kInt32: {
+ Label done;
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Ldr(scratch.W(), src);
+ Adds(dst.W(), scratch.W(), scratch.W());
+ B(&done, vc);
+ // If we overflow, instead of bailing out (deopting), we change
+ // representation to a HeapNumber.
+ Scvtf(D::GetDoubleRegisterParameter(D::kValue), scratch);
+ CallBuiltin(Builtin::kNewHeapNumber);
+ Move(dst, kReturnRegister0);
+ bind(&done);
+ break;
+ }
+ case ValueRepresentation::kUint32: {
+ Label done, tag_smi;
+ Ldr(dst.W(), src);
+ // Unsigned comparison against Smi::kMaxValue.
+ Cmp(dst.W(), Immediate(Smi::kMaxValue));
+ B(&tag_smi, ls);
+ // If we don't fit in a Smi, instead of bailing out (deopting), we
+ // change representation to a HeapNumber.
+ Ucvtf(D::GetDoubleRegisterParameter(D::kValue), dst.W());
+ CallBuiltin(Builtin::kNewHeapNumber);
+ Move(dst, kReturnRegister0);
+ B(&done);
+ bind(&tag_smi);
+ SmiTag(dst);
+ bind(&done);
+ break;
+ }
+ case ValueRepresentation::kFloat64:
+ Ldr(D::GetDoubleRegisterParameter(D::kValue), src);
+ CallBuiltin(Builtin::kNewHeapNumber);
+ Move(dst, kReturnRegister0);
+ break;
+ case ValueRepresentation::kWord64:
+ case ValueRepresentation::kTagged:
+ UNREACHABLE();
+ }
+}
+
+template <>
+inline void MaglevAssembler::MoveRepr(MachineRepresentation repr, Register dst,
+ Register src) {
+ Mov(dst, src);
+}
+template <>
+inline void MaglevAssembler::MoveRepr(MachineRepresentation repr, Register dst,
+ MemOperand src) {
+ switch (repr) {
+ case MachineRepresentation::kWord32:
+ return Ldr(dst.W(), src);
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ return Ldr(dst, src);
+ default:
+ UNREACHABLE();
+ }
+}
+template <>
+inline void MaglevAssembler::MoveRepr(MachineRepresentation repr,
+ MemOperand dst, Register src) {
+ switch (repr) {
+ case MachineRepresentation::kWord32:
+ return Str(src.W(), dst);
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ return Str(src, dst);
+ default:
+ UNREACHABLE();
+ }
+}
+template <>
+inline void MaglevAssembler::MoveRepr(MachineRepresentation repr,
+ MemOperand dst, MemOperand src) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MoveRepr(repr, scratch, src);
+ MoveRepr(repr, dst, scratch);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/maglev/arm64/maglev-assembler-arm64.cc b/deps/v8/src/maglev/arm64/maglev-assembler-arm64.cc
new file mode 100644
index 0000000000..a3814cfcfb
--- /dev/null
+++ b/deps/v8/src/maglev/arm64/maglev-assembler-arm64.cc
@@ -0,0 +1,901 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/maglev/maglev-assembler-inl.h"
+#include "src/maglev/maglev-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+#define __ masm->
+
+void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
+ Register object, int size_in_bytes,
+ AllocationType alloc_type,
+ AllocationAlignment alignment) {
+ // TODO(victorgomes): Call the runtime for large object allocation.
+ // TODO(victorgomes): Support double alignment.
+ DCHECK_EQ(alignment, kTaggedAligned);
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
+ if (v8_flags.single_generation) {
+ alloc_type = AllocationType::kOld;
+ }
+ bool in_new_space = alloc_type == AllocationType::kYoung;
+ ExternalReference top =
+ in_new_space
+ ? ExternalReference::new_space_allocation_top_address(isolate_)
+ : ExternalReference::old_space_allocation_top_address(isolate_);
+ ExternalReference limit =
+ in_new_space
+ ? ExternalReference::new_space_allocation_limit_address(isolate_)
+ : ExternalReference::old_space_allocation_limit_address(isolate_);
+
+ ZoneLabelRef done(this);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ // We are a bit short on registers, so we use the same register for {object}
+ // and {new_top}. Once we have defined {new_top}, we don't use {object} until
+ // {new_top} is used for the last time. And there (at the end of this
+ // function), we recover the original {object} from {new_top} by subtracting
+ // {size_in_bytes}.
+ Register new_top = object;
+ // Check if there is enough space.
+ Ldr(object, ExternalReferenceAsOperand(top, scratch));
+ Add(new_top, object, size_in_bytes);
+ Ldr(scratch, ExternalReferenceAsOperand(limit, scratch));
+ Cmp(new_top, scratch);
+ // Otherwise call runtime.
+ JumpToDeferredIf(
+ ge,
+ [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
+ Register object, Builtin builtin, int size_in_bytes,
+ ZoneLabelRef done) {
+ // Remove {object} from snapshot, since it is the returned allocated
+ // HeapObject.
+ register_snapshot.live_registers.clear(object);
+ register_snapshot.live_tagged_registers.clear(object);
+ {
+ SaveRegisterStateForCall save_register_state(masm, register_snapshot);
+ using D = AllocateDescriptor;
+ __ Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
+ __ CallBuiltin(builtin);
+ save_register_state.DefineSafepoint();
+ __ Move(object, kReturnRegister0);
+ }
+ __ B(*done);
+ },
+ register_snapshot, object,
+ in_new_space ? Builtin::kAllocateRegularInYoungGeneration
+ : Builtin::kAllocateRegularInOldGeneration,
+ size_in_bytes, done);
+ // Store new top and tag object.
+ Move(ExternalReferenceAsOperand(top, scratch), new_top);
+ Add(object, object, kHeapObjectTag - size_in_bytes);
+ bind(*done);
+}
+
+void MaglevAssembler::AllocateHeapNumber(RegisterSnapshot register_snapshot,
+ Register result,
+ DoubleRegister value) {
+ // In the case we need to call the runtime, we should spill the value
+ // register. Even if it is not live in the next node, otherwise the
+ // allocation call might trash it.
+ register_snapshot.live_double_registers.set(value);
+ Allocate(register_snapshot, result, HeapNumber::kSize);
+ // `Allocate` needs 2 scratch registers, so it's important to `Acquire` after
+ // `Allocate` is done and not before.
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadTaggedRoot(scratch, RootIndex::kHeapNumberMap);
+ StoreTaggedField(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+ Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+void MaglevAssembler::StoreTaggedFieldWithWriteBarrier(
+ Register object, int offset, Register value,
+ RegisterSnapshot register_snapshot, ValueIsCompressed value_is_compressed,
+ ValueCanBeSmi value_can_be_smi) {
+ AssertNotSmi(object);
+ StoreTaggedField(FieldMemOperand(object, offset), value);
+
+ ZoneLabelRef done(this);
+ Label* deferred_write_barrier = MakeDeferredCode(
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object, int offset,
+ Register value, RegisterSnapshot register_snapshot,
+ ValueIsCompressed value_is_compressed) {
+ ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
+ if (value_is_compressed == kValueIsCompressed) {
+ __ DecompressTagged(value, value);
+ }
+ __ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask,
+ eq, *done);
+
+ Register stub_object_reg = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
+
+ RegList saved;
+ if (object != stub_object_reg &&
+ register_snapshot.live_registers.has(stub_object_reg)) {
+ saved.set(stub_object_reg);
+ }
+ if (register_snapshot.live_registers.has(slot_reg)) {
+ saved.set(slot_reg);
+ }
+
+ __ PushAll(saved);
+
+ if (object != stub_object_reg) {
+ __ Move(stub_object_reg, object);
+ object = stub_object_reg;
+ }
+ __ Add(slot_reg, object, offset - kHeapObjectTag);
+
+ SaveFPRegsMode const save_fp_mode =
+ !register_snapshot.live_double_registers.is_empty()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+
+ __ CallRecordWriteStub(object, slot_reg, save_fp_mode);
+
+ __ PopAll(saved);
+ __ B(*done);
+ },
+ done, object, offset, value, register_snapshot, value_is_compressed);
+
+ if (value_can_be_smi == kValueCanBeSmi) {
+ JumpIfSmi(value, *done);
+ } else {
+ AssertNotSmi(value);
+ }
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ deferred_write_barrier);
+ bind(*done);
+}
+
+void MaglevAssembler::ToBoolean(Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false,
+ bool fallthrough_when_true) {
+ ScratchRegisterScope temps(this);
+ Register map = temps.Acquire();
+
+ // Check if {{value}} is Smi.
+ Condition is_smi = CheckSmi(value);
+ JumpToDeferredIf(
+ is_smi,
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
+ // Check if {value} is not zero.
+ __ CmpTagged(value, Smi::FromInt(0));
+ __ JumpIf(eq, *is_false);
+ __ Jump(*is_true);
+ },
+ value, is_true, is_false);
+
+ // Check if {{value}} is false.
+ CompareRoot(value, RootIndex::kFalseValue);
+ JumpIf(eq, *is_false);
+
+ // Check if {{value}} is empty string.
+ CompareRoot(value, RootIndex::kempty_string);
+ JumpIf(eq, *is_false);
+
+ // Check if {{value}} is undetectable.
+ LoadMap(map, value);
+ {
+ ScratchRegisterScope scope(this);
+ Register tmp = scope.Acquire().W();
+ Move(tmp, FieldMemOperand(map, Map::kBitFieldOffset));
+ Tst(tmp, Immediate(Map::Bits1::IsUndetectableBit::kMask));
+ JumpIf(ne, *is_false);
+ }
+
+ // Check if {{value}} is a HeapNumber.
+ CompareRoot(map, RootIndex::kHeapNumberMap);
+ JumpToDeferredIf(
+ eq,
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
+ ScratchRegisterScope scope(masm);
+ DoubleRegister value_double = scope.AcquireDouble();
+ __ Ldr(value_double, FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ Fcmp(value_double, 0.0);
+ __ JumpIf(eq, *is_false);
+ __ JumpIf(vs, *is_false); // NaN check
+ __ Jump(*is_true);
+ },
+ value, is_true, is_false);
+
+ // Check if {{value}} is a BigInt.
+ CompareRoot(map, RootIndex::kBigIntMap);
+ JumpToDeferredIf(
+ eq,
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
+ ScratchRegisterScope scope(masm);
+ Register tmp = scope.Acquire().W();
+ __ Ldr(tmp, FieldMemOperand(value, BigInt::kBitfieldOffset));
+ __ Tst(tmp, Immediate(BigInt::LengthBits::kMask));
+ __ JumpIf(eq, *is_false);
+ __ Jump(*is_true);
+ },
+ value, is_true, is_false);
+
+ // Otherwise true.
+ if (!fallthrough_when_true) {
+ Jump(*is_true);
+ }
+}
+
+void MaglevAssembler::TestTypeOf(
+ Register object, interpreter::TestTypeOfFlags::LiteralFlag literal,
+ Label* is_true, Label::Distance true_distance, bool fallthrough_when_true,
+ Label* is_false, Label::Distance false_distance,
+ bool fallthrough_when_false) {
+ // If both true and false are fallthroughs, we don't have to do anything.
+ if (fallthrough_when_true && fallthrough_when_false) return;
+
+ // IMPORTANT: Note that `object` could be a register that aliases registers in
+ // the ScratchRegisterScope. Make sure that all reads of `object` are before
+ // any writes to scratch registers
+ using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag;
+ switch (literal) {
+ case LiteralFlag::kNumber: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_true);
+ Ldr(scratch.W(), FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch.W(), RootIndex::kHeapNumberMap);
+ Branch(eq, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kString: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false);
+ LoadMap(scratch, object);
+ CompareInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+ Branch(le, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kSymbol: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false);
+ LoadMap(scratch, object);
+ CompareInstanceType(scratch, scratch, SYMBOL_TYPE);
+ Branch(eq, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kBoolean:
+ CompareRoot(object, RootIndex::kTrueValue);
+ B(eq, is_true);
+ CompareRoot(object, RootIndex::kFalseValue);
+ Branch(eq, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ case LiteralFlag::kBigInt: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false);
+ LoadMap(scratch, object);
+ CompareInstanceType(scratch, scratch, BIGINT_TYPE);
+ Branch(eq, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kUndefined: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ // Make sure `object` isn't a valid temp here, since we re-use it.
+ temps.SetAvailable(temps.Available() - object);
+ Register map = temps.Acquire();
+ JumpIfSmi(object, is_false);
+ // Check it has the undetectable bit set and it is not null.
+ LoadMap(map, object);
+ Ldr(map.W(), FieldMemOperand(map, Map::kBitFieldOffset));
+ TestAndBranchIfAllClear(map.W(), Map::Bits1::IsUndetectableBit::kMask,
+ is_false);
+ CompareRoot(object, RootIndex::kNullValue);
+ Branch(ne, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kFunction: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false);
+ // Check if callable bit is set and not undetectable.
+ LoadMap(scratch, object);
+ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset));
+ And(scratch.W(), scratch.W(),
+ Map::Bits1::IsUndetectableBit::kMask |
+ Map::Bits1::IsCallableBit::kMask);
+ Cmp(scratch.W(), Map::Bits1::IsCallableBit::kMask);
+ Branch(eq, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kObject: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false);
+ // If the object is null then return true.
+ CompareRoot(object, RootIndex::kNullValue);
+ B(eq, is_true);
+ // Check if the object is a receiver type,
+ LoadMap(scratch, object);
+ {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ CompareInstanceType(scratch, temps.Acquire(), FIRST_JS_RECEIVER_TYPE);
+ }
+ B(lt, is_false);
+ // ... and is not undefined (undetectable) nor callable.
+ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset));
+ Tst(scratch.W(), Immediate(Map::Bits1::IsUndetectableBit::kMask |
+ Map::Bits1::IsCallableBit::kMask));
+ Branch(eq, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kOther:
+ if (!fallthrough_when_false) {
+ Jump(is_false, false_distance);
+ }
+ return;
+ }
+ UNREACHABLE();
+}
+
+void MaglevAssembler::Prologue(Graph* graph) {
+ ScratchRegisterScope temps(this);
+ // We add two extra registers to the scope. Ideally we could add all the
+ // allocatable general registers, except Context, JSFunction, NewTarget and
+ // ArgCount. Unfortunately, OptimizeCodeOrTailCallOptimizedCodeSlot and
+ // LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing pick random registers and
+ // we could alias those.
+ // TODO(victorgomes): Fix these builtins to either use the scope or pass the
+ // used registers manually.
+ temps.Include({x14, x15});
+
+ CallTarget();
+
+ BailoutIfDeoptimized();
+
+ if (graph->has_recursive_calls()) {
+ BindCallTarget(code_gen_state()->entry_label());
+ }
+
+ // Tiering support.
+ // TODO(jgruber): Extract to a builtin.
+ {
+ ScratchRegisterScope temps(this);
+ Register flags = temps.Acquire();
+ Register feedback_vector = temps.Acquire();
+
+ Label* deferred_flags_need_processing = MakeDeferredCode(
+ [](MaglevAssembler* masm, Register flags, Register feedback_vector) {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+ // TODO(leszeks): This could definitely be a builtin that we
+ // tail-call.
+ __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
+ __ Trap();
+ },
+ flags, feedback_vector);
+
+ Move(feedback_vector,
+ compilation_info()->toplevel_compilation_unit()->feedback().object());
+ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
+ flags, feedback_vector, CodeKind::MAGLEV,
+ deferred_flags_need_processing);
+ }
+
+ EnterFrame(StackFrame::MAGLEV);
+
+ // Save arguments in frame.
+ // TODO(leszeks): Consider eliding this frame if we don't make any calls
+ // that could clobber these registers.
+ // Push the context and the JSFunction.
+ Push(kContextRegister, kJSFunctionRegister);
+ // Push the actual argument count and a _possible_ stack slot.
+ Push(kJavaScriptCallArgCountRegister, xzr);
+ int remaining_stack_slots = code_gen_state()->stack_slots() - 1;
+ DCHECK_GE(remaining_stack_slots, 0);
+
+ // Initialize stack slots.
+ if (graph->tagged_stack_slots() > 0) {
+ ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
+
+ // If tagged_stack_slots is divisible by 2, we overshoot and allocate one
+ // extra stack slot, otherwise we allocate exactly the right amount, since
+ // one stack has already been allocated.
+ int tagged_two_slots_count = graph->tagged_stack_slots() / 2;
+ remaining_stack_slots -= 2 * tagged_two_slots_count;
+
+ // Magic value. Experimentally, an unroll size of 8 doesn't seem any
+ // worse than fully unrolled pushes.
+ const int kLoopUnrollSize = 8;
+ if (tagged_two_slots_count < kLoopUnrollSize) {
+ for (int i = 0; i < tagged_two_slots_count; i++) {
+ Push(xzr, xzr);
+ }
+ } else {
+ ScratchRegisterScope temps(this);
+ Register count = temps.Acquire();
+ // Extract the first few slots to round to the unroll size.
+ int first_slots = tagged_two_slots_count % kLoopUnrollSize;
+ for (int i = 0; i < first_slots; ++i) {
+ Push(xzr, xzr);
+ }
+ Move(count, tagged_two_slots_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at
+ // least once.
+ DCHECK_GT(tagged_two_slots_count / kLoopUnrollSize, 0);
+ Label loop;
+ bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ Push(xzr, xzr);
+ }
+ Subs(count, count, Immediate(1));
+ B(&loop, gt);
+ }
+ }
+ if (remaining_stack_slots > 0) {
+ // Round up.
+ remaining_stack_slots += (remaining_stack_slots % 2);
+ // Extend sp by the size of the remaining untagged part of the frame,
+ // no need to initialise these.
+ Sub(sp, sp, Immediate(remaining_stack_slots * kSystemPointerSize));
+ }
+}
+
+void MaglevAssembler::MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
+ Label* eager_deopt_entry,
+ size_t lazy_deopt_count,
+ Label* lazy_deopt_entry) {
+ ForceConstantPoolEmissionWithoutJump();
+
+ DCHECK_GE(Deoptimizer::kLazyDeoptExitSize, Deoptimizer::kEagerDeoptExitSize);
+ size_t deopt_count = eager_deopt_count + lazy_deopt_count;
+ CheckVeneerPool(
+ false, false,
+ static_cast<int>(deopt_count) * Deoptimizer::kLazyDeoptExitSize);
+
+ ScratchRegisterScope scope(this);
+ Register scratch = scope.Acquire();
+ if (eager_deopt_count > 0) {
+ Bind(eager_deopt_entry);
+ LoadEntryFromBuiltin(Builtin::kDeoptimizationEntry_Eager, scratch);
+ MacroAssembler::Jump(scratch);
+ }
+ if (lazy_deopt_count > 0) {
+ Bind(lazy_deopt_entry);
+ LoadEntryFromBuiltin(Builtin::kDeoptimizationEntry_Lazy, scratch);
+ MacroAssembler::Jump(scratch);
+ }
+}
+
+void MaglevAssembler::AllocateTwoByteString(RegisterSnapshot register_snapshot,
+ Register result, int length) {
+ int size = SeqTwoByteString::SizeFor(length);
+ Allocate(register_snapshot, result, size);
+ ScratchRegisterScope scope(this);
+ Register scratch = scope.Acquire();
+ StoreTaggedField(xzr, FieldMemOperand(result, size - kObjectAlignment));
+ LoadTaggedRoot(scratch, RootIndex::kStringMap);
+ StoreTaggedField(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+ Move(scratch, Name::kEmptyHashField);
+ StoreTaggedField(scratch, FieldMemOperand(result, Name::kRawHashFieldOffset));
+ Move(scratch, length);
+ StoreTaggedField(scratch, FieldMemOperand(result, String::kLengthOffset));
+}
+
+void MaglevAssembler::LoadSingleCharacterString(Register result,
+ Register char_code,
+ Register scratch) {
+ DCHECK_NE(char_code, scratch);
+ if (v8_flags.debug_code) {
+ Cmp(char_code, Immediate(String::kMaxOneByteCharCode));
+ Assert(ls, AbortReason::kUnexpectedValue);
+ }
+ Register table = scratch;
+ LoadRoot(table, RootIndex::kSingleCharacterStringTable);
+ Add(table, table, Operand(char_code, LSL, kTaggedSizeLog2));
+ DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize));
+}
+
+void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
+ Label* char_code_fits_one_byte,
+ Register result, Register char_code,
+ Register scratch) {
+ AssertZeroExtended(char_code);
+ DCHECK_NE(char_code, scratch);
+ ZoneLabelRef done(this);
+ Cmp(char_code, Immediate(String::kMaxOneByteCharCode));
+ JumpToDeferredIf(
+ hi,
+ [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
+ ZoneLabelRef done, Register result, Register char_code,
+ Register scratch) {
+ ScratchRegisterScope temps(masm);
+ // Ensure that {result} never aliases {scratch}, otherwise the store
+ // will fail.
+ Register string = result;
+ bool reallocate_result = scratch.Aliases(result);
+ if (reallocate_result) {
+ string = temps.Acquire();
+ }
+ // Be sure to save {char_code}. If it aliases with {result}, use
+ // the scratch register.
+ if (char_code.Aliases(result)) {
+ __ Move(scratch, char_code);
+ char_code = scratch;
+ }
+ DCHECK(!char_code.Aliases(string));
+ DCHECK(!scratch.Aliases(string));
+ DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
+ register_snapshot.live_registers.set(char_code);
+ __ AllocateTwoByteString(register_snapshot, string, 1);
+ __ And(scratch, char_code, Immediate(0xFFFF));
+ __ Strh(scratch.W(),
+ FieldMemOperand(string, SeqTwoByteString::kHeaderSize));
+ if (reallocate_result) {
+ __ Move(result, string);
+ }
+ __ B(*done);
+ },
+ register_snapshot, done, result, char_code, scratch);
+ if (char_code_fits_one_byte != nullptr) {
+ bind(char_code_fits_one_byte);
+ }
+ LoadSingleCharacterString(result, char_code, scratch);
+ bind(*done);
+}
+
+void MaglevAssembler::StringCharCodeOrCodePointAt(
+ BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode,
+ RegisterSnapshot& register_snapshot, Register result, Register string,
+ Register index, Register instance_type, Label* result_fits_one_byte) {
+ ZoneLabelRef done(this);
+ Label seq_string;
+ Label cons_string;
+ Label sliced_string;
+
+ Label* deferred_runtime_call = MakeDeferredCode(
+ [](MaglevAssembler* masm,
+ BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode,
+ RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
+ Register string, Register index) {
+ DCHECK(!register_snapshot.live_registers.has(result));
+ DCHECK(!register_snapshot.live_registers.has(string));
+ DCHECK(!register_snapshot.live_registers.has(index));
+ {
+ SaveRegisterStateForCall save_register_state(masm, register_snapshot);
+ __ SmiTag(index);
+ __ Push(string, index);
+ __ Move(kContextRegister, masm->native_context().object());
+ // This call does not throw nor can deopt.
+ if (mode ==
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCodePointAt) {
+ __ CallRuntime(Runtime::kStringCodePointAt);
+ } else {
+ DCHECK_EQ(mode,
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCharCodeAt);
+ __ CallRuntime(Runtime::kStringCharCodeAt);
+ }
+ save_register_state.DefineSafepoint();
+ __ SmiUntag(kReturnRegister0);
+ __ Move(result, kReturnRegister0);
+ }
+ __ jmp(*done);
+ },
+ mode, register_snapshot, done, result, string, index);
+
+ // We might need to try more than one time for ConsString, SlicedString and
+ // ThinString.
+ Label loop;
+ bind(&loop);
+
+ if (v8_flags.debug_code) {
+ Register scratch = instance_type;
+
+ // Check if {string} is a string.
+ AssertNotSmi(string);
+ LoadMap(scratch, string);
+ CompareInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+ Check(ls, AbortReason::kUnexpectedValue);
+
+ Ldr(scratch.W(), FieldMemOperand(string, String::kLengthOffset));
+ Cmp(index.W(), scratch.W());
+ Check(lo, AbortReason::kUnexpectedValue);
+ }
+
+ // Get instance type.
+ LoadMap(instance_type, string);
+ Ldr(instance_type.W(),
+ FieldMemOperand(instance_type, Map::kInstanceTypeOffset));
+
+ {
+ ScratchRegisterScope temps(this);
+ Register representation = temps.Acquire().W();
+
+ // TODO(victorgomes): Add fast path for external strings.
+ And(representation, instance_type.W(),
+ Immediate(kStringRepresentationMask));
+ Cmp(representation, Immediate(kSeqStringTag));
+ B(&seq_string, eq);
+ Cmp(representation, Immediate(kConsStringTag));
+ B(&cons_string, eq);
+ Cmp(representation, Immediate(kSlicedStringTag));
+ B(&sliced_string, eq);
+ Cmp(representation, Immediate(kThinStringTag));
+ B(deferred_runtime_call, ne);
+ // Fallthrough to thin string.
+ }
+
+ // Is a thin string.
+ {
+ DecompressTagged(string,
+ FieldMemOperand(string, ThinString::kActualOffset));
+ B(&loop);
+ }
+
+ bind(&sliced_string);
+ {
+ ScratchRegisterScope temps(this);
+ Register offset = temps.Acquire();
+
+ Ldr(offset.W(), FieldMemOperand(string, SlicedString::kOffsetOffset));
+ SmiUntag(offset);
+ DecompressTagged(string,
+ FieldMemOperand(string, SlicedString::kParentOffset));
+ Add(index, index, offset);
+ B(&loop);
+ }
+
+ bind(&cons_string);
+ {
+ // Reuse {instance_type} register here, since CompareRoot requires a scratch
+ // register as well.
+ Register second_string = instance_type;
+ Ldr(second_string.W(), FieldMemOperand(string, ConsString::kSecondOffset));
+ CompareRoot(second_string, RootIndex::kempty_string);
+ B(deferred_runtime_call, ne);
+ DecompressTagged(string, FieldMemOperand(string, ConsString::kFirstOffset));
+ B(&loop); // Try again with first string.
+ }
+
+ bind(&seq_string);
+ {
+ Label two_byte_string;
+ TestAndBranchIfAllClear(instance_type, kOneByteStringTag, &two_byte_string);
+ // The result of one-byte string will be the same for both modes
+ // (CharCodeAt/CodePointAt), since it cannot be the first half of a
+ // surrogate pair.
+ Add(index, index, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ Ldrb(result, MemOperand(string, index));
+ B(result_fits_one_byte);
+
+ bind(&two_byte_string);
+ // {instance_type} is unused from this point, so we can use as scratch.
+ Register scratch = instance_type;
+ Lsl(scratch, index, 1);
+ Add(scratch, scratch, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ Ldrh(result, MemOperand(string, scratch));
+
+ if (mode == BuiltinStringPrototypeCharCodeOrCodePointAt::kCodePointAt) {
+ Register first_code_point = scratch;
+ And(first_code_point.W(), result.W(), Immediate(0xfc00));
+ CompareAndBranch(first_code_point, Immediate(0xd800), kNotEqual, *done);
+
+ Register length = scratch;
+ Ldr(length.W(), FieldMemOperand(string, String::kLengthOffset));
+ Add(index.W(), index.W(), Immediate(1));
+ CompareAndBranch(index, length, kGreaterThanEqual, *done);
+
+ Register second_code_point = scratch;
+ Lsl(index, index, 1);
+ Add(index, index, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ Ldrh(second_code_point, MemOperand(string, index));
+
+ // {index} is not needed at this point.
+ Register scratch2 = index;
+ And(scratch2.W(), second_code_point.W(), Immediate(0xfc00));
+ CompareAndBranch(scratch2, Immediate(0xdc00), kNotEqual, *done);
+
+ int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
+ Add(second_code_point, second_code_point, Immediate(surrogate_offset));
+ Lsl(result, result, 10);
+ Add(result, result, second_code_point);
+ }
+
+ // Fallthrough.
+ }
+
+ bind(*done);
+
+ if (v8_flags.debug_code) {
+ // We make sure that the user of this macro is not relying in string and
+ // index to not be clobbered.
+ if (result != string) {
+ Mov(string, Immediate(0xdeadbeef));
+ }
+ if (result != index) {
+ Mov(index, Immediate(0xdeadbeef));
+ }
+ }
+}
+
+void MaglevAssembler::TruncateDoubleToInt32(Register dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(JSCVT)) {
+ Fjcvtzs(dst.W(), src);
+ return;
+ }
+
+ ZoneLabelRef done(this);
+ // Try to convert with an FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer.
+ //
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
+ // when the double is out of range. NaNs and infinities will be converted to 0
+ // (as ECMA-262 requires).
+ Fcvtzs(dst.X(), src);
+
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
+ // representable using a double, so if the result is one of those then we know
+ // that saturation occurred, and we need to manually handle the conversion.
+ //
+ // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
+ // 1 will cause signed overflow.
+ Cmp(dst.X(), 1);
+ Ccmp(dst.X(), -1, VFlag, vc);
+
+ JumpToDeferredIf(
+ vs,
+ [](MaglevAssembler* masm, DoubleRegister src, Register dst,
+ ZoneLabelRef done) {
+ __ MacroAssembler::Push(xzr, src);
+ __ CallBuiltin(Builtin::kDoubleToI);
+ __ Ldr(dst.W(), MemOperand(sp, 0));
+ DCHECK_EQ(xzr.SizeInBytes(), src.SizeInBytes());
+ __ Drop(2);
+ __ B(*done);
+ },
+ src, dst, done);
+
+ Bind(*done);
+ // Zero extend the converted value to complete the truncation.
+ Mov(dst, Operand(dst.W(), UXTW));
+}
+
+void MaglevAssembler::TryTruncateDoubleToInt32(Register dst, DoubleRegister src,
+ Label* fail) {
+ ScratchRegisterScope temps(this);
+ DoubleRegister converted_back = temps.AcquireDouble();
+
+ // Convert the input float64 value to int32.
+ Fcvtzs(dst.W(), src);
+ // Convert that int32 value back to float64.
+ Scvtf(converted_back, dst.W());
+ // Check that the result of the float64->int32->float64 is equal to the input
+ // (i.e. that the conversion didn't truncate.
+ Fcmp(src, converted_back);
+ JumpIf(ne, fail);
+
+ // Check if {input} is -0.
+ Label check_done;
+ Cbnz(dst, &check_done);
+
+ // In case of 0, we need to check for the IEEE 0 pattern (which is all zeros).
+ Register input_bits = temps.Acquire();
+ Fmov(input_bits, src);
+ Cbnz(input_bits, fail);
+
+ Bind(&check_done);
+}
+
+void MaglevAssembler::StringLength(Register result, Register string) {
+ if (v8_flags.debug_code) {
+ // Check if {string} is a string.
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ AssertNotSmi(string);
+ LoadMap(scratch, string);
+ CompareInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+ Check(ls, AbortReason::kUnexpectedValue);
+ }
+ Ldr(result.W(), FieldMemOperand(string, String::kLengthOffset));
+}
+
+void MaglevAssembler::StoreFixedArrayElementWithWriteBarrier(
+ Register array, Register index, Register value,
+ RegisterSnapshot register_snapshot) {
+ if (v8_flags.debug_code) {
+ AssertNotSmi(array);
+ IsObjectType(array, FIXED_ARRAY_TYPE);
+ Assert(eq, AbortReason::kUnexpectedValue);
+ Cmp(index, Immediate(0));
+ Assert(hs, AbortReason::kUnexpectedNegativeValue);
+ }
+ {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add(scratch, array, Operand(index, LSL, kTaggedSizeLog2));
+ Str(value.W(), FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ }
+
+ ZoneLabelRef done(this);
+ Label* deferred_write_barrier = MakeDeferredCode(
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
+ Register index, Register value, RegisterSnapshot register_snapshot) {
+ ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
+ __ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask,
+ eq, *done);
+
+ Register stub_object_reg = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
+
+ RegList saved;
+ if (object != stub_object_reg &&
+ register_snapshot.live_registers.has(stub_object_reg)) {
+ saved.set(stub_object_reg);
+ }
+ if (register_snapshot.live_registers.has(slot_reg)) {
+ saved.set(slot_reg);
+ }
+
+ __ PushAll(saved);
+
+ if (object != stub_object_reg) {
+ __ Move(stub_object_reg, object);
+ object = stub_object_reg;
+ }
+ __ Add(slot_reg, object, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(slot_reg, slot_reg, Operand(index, LSL, kTaggedSizeLog2));
+
+ SaveFPRegsMode const save_fp_mode =
+ !register_snapshot.live_double_registers.is_empty()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+
+ __ CallRecordWriteStub(object, slot_reg, save_fp_mode);
+
+ __ PopAll(saved);
+ __ B(*done);
+ },
+ done, array, index, value, register_snapshot);
+
+ JumpIfSmi(value, *done);
+ CheckPageFlag(array, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ deferred_write_barrier);
+ bind(*done);
+}
+
+void MaglevAssembler::StoreFixedArrayElementNoWriteBarrier(Register array,
+ Register index,
+ Register value) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (v8_flags.debug_code) {
+ AssertNotSmi(array);
+ IsObjectType(array, FIXED_ARRAY_TYPE);
+ Assert(eq, AbortReason::kUnexpectedValue);
+ Cmp(index, Immediate(0));
+ Assert(hs, AbortReason::kUnexpectedNegativeValue);
+ }
+ Add(scratch, array, Operand(index, LSL, kTaggedSizeLog2));
+ Str(value.W(), FieldMemOperand(scratch, FixedArray::kHeaderSize));
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/arm64/maglev-ir-arm64.cc b/deps/v8/src/maglev/arm64/maglev-ir-arm64.cc
new file mode 100644
index 0000000000..dee8e594de
--- /dev/null
+++ b/deps/v8/src/maglev/arm64/maglev-ir-arm64.cc
@@ -0,0 +1,2304 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/register-arm64.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
+#include "src/maglev/maglev-assembler-inl.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir-inl.h"
+#include "src/objects/feedback-cell.h"
+#include "src/objects/js-function.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+#define __ masm->
+
+void Int32NegateWithOverflow::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineAsRegister(this);
+}
+
+void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input()).W();
+ Register out = ToRegister(result()).W();
+
+ // Deopt when result would be -0.
+ static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt());
+ Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
+ __ RecordComment("-- Jump to eager deopt");
+ __ Cbz(value, fail);
+
+ __ Negs(out, value);
+ // Output register must not be a register input into the eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{out} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32IncrementWithOverflow::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineAsRegister(this);
+}
+
+void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input()).W();
+ Register out = ToRegister(result()).W();
+ __ Adds(out, value, Immediate(1));
+ // Output register must not be a register input into the eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{out} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32DecrementWithOverflow::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineAsRegister(this);
+}
+
+void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input()).W();
+ Register out = ToRegister(result()).W();
+ __ Subs(out, value, Immediate(1));
+ // Output register must not be a register input into the eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{out} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
+}
+
+void CheckJSObjectElementsBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ set_temporaries_needed(1);
+ UseRegister(index_input());
+}
+void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input()).W();
+
+ __ AssertNotSmi(object);
+
+ if (v8_flags.debug_code) {
+ __ CompareObjectType(object, FIRST_JS_OBJECT_TYPE, scratch);
+ __ Assert(ge, AbortReason::kUnexpectedValue);
+ }
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(object, JSObject::kElementsOffset));
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(scratch);
+ }
+ __ SmiUntagField(scratch,
+ FieldMemOperand(scratch, FixedArray::kLengthOffset));
+ __ Cmp(index, scratch.W());
+ __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this);
+}
+
+int BuiltinStringFromCharCode::MaxCallStackArgs() const {
+ return AllocateDescriptor::GetStackParameterCount();
+}
+void BuiltinStringFromCharCode::SetValueLocationConstraints() {
+ if (code_input().node()->Is<Int32Constant>()) {
+ UseAny(code_input());
+ } else {
+ UseRegister(code_input());
+ }
+ set_temporaries_needed(2);
+ DefineAsRegister(this);
+}
+void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register result_string = ToRegister(result());
+ if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
+ int32_t char_code = constant->value();
+ if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
+ __ LoadSingleCharacterString(result_string, char_code);
+ } else {
+ // Ensure that {result_string} never aliases {scratch}, otherwise the
+ // store will fail.
+ bool reallocate_result = scratch.Aliases(result_string);
+ if (reallocate_result) {
+ result_string = temps.Acquire();
+ }
+ DCHECK(!scratch.Aliases(result_string));
+ __ AllocateTwoByteString(register_snapshot(), result_string, 1);
+ __ Move(scratch, char_code & 0xFFFF);
+ __ Strh(scratch.W(),
+ FieldMemOperand(result_string, SeqTwoByteString::kHeaderSize));
+ if (reallocate_result) {
+ __ Move(ToRegister(result()), result_string);
+ }
+ }
+ } else {
+ __ StringFromCharCode(register_snapshot(), nullptr, result_string,
+ ToRegister(code_input()), scratch);
+ }
+}
+
+int BuiltinStringPrototypeCharCodeOrCodePointAt::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2);
+ return 2;
+}
+void BuiltinStringPrototypeCharCodeOrCodePointAt::
+ SetValueLocationConstraints() {
+ UseAndClobberRegister(string_input());
+ UseAndClobberRegister(index_input());
+ DefineAsRegister(this);
+}
+void BuiltinStringPrototypeCharCodeOrCodePointAt::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Label done;
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ RegisterSnapshot save_registers = register_snapshot();
+ __ StringCharCodeOrCodePointAt(mode_, save_registers, ToRegister(result()),
+ ToRegister(string_input()),
+ ToRegister(index_input()), scratch, &done);
+ __ Bind(&done);
+}
+
+void FoldedAllocation::SetValueLocationConstraints() {
+ UseRegister(raw_allocation());
+ DefineAsRegister(this);
+}
+
+void FoldedAllocation::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ Add(ToRegister(result()), ToRegister(raw_allocation()), offset());
+}
+
+int CreateEmptyObjectLiteral::MaxCallStackArgs() const {
+ return AllocateDescriptor::GetStackParameterCount();
+}
+void CreateEmptyObjectLiteral::SetValueLocationConstraints() {
+ DefineAsRegister(this);
+}
+void CreateEmptyObjectLiteral::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(result());
+ __ Allocate(register_snapshot(), object, map().instance_size());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, map().object());
+ __ StoreTaggedField(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ LoadTaggedRoot(scratch, RootIndex::kEmptyFixedArray);
+ static_assert(JSObject::kPropertiesOrHashOffset + sizeof(Tagged_t) ==
+ JSObject::kElementsOffset);
+ __ StoreTwoTaggedFields(
+ scratch, FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
+ __ LoadTaggedRoot(scratch, RootIndex::kUndefinedValue);
+ int i = 0;
+ for (; i + 1 < map().GetInObjectProperties(); i += 2) {
+ int offset1 = map().GetInObjectPropertyOffset(i);
+ int offset2 = map().GetInObjectPropertyOffset(i + 1);
+ CHECK(offset1 + sizeof(Tagged_t) == offset2);
+ __ StoreTwoTaggedFields(scratch, FieldMemOperand(object, offset1));
+ }
+ if (i < map().GetInObjectProperties()) {
+ CHECK(i + 1 == map().GetInObjectProperties());
+ int offset = map().GetInObjectPropertyOffset(i);
+ __ StoreTaggedField(scratch, FieldMemOperand(object, offset));
+ }
+}
+
+void CheckedInt32ToUint32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedInt32ToUint32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register input_reg = ToRegister(input()).W();
+ __ Tst(input_reg, input_reg);
+ __ EmitEagerDeoptIf(mi, DeoptimizeReason::kNotUint32, this);
+}
+
+void CheckJSArrayBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ UseRegister(index_input());
+}
+void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+ __ AssertNotSmi(object);
+
+ if (v8_flags.debug_code) {
+ __ IsObjectType(object, JS_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
+ __ SmiUntagField(scratch, FieldMemOperand(object, JSArray::kLengthOffset));
+ __ Cmp(index, scratch);
+ __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this);
+}
+
+void ChangeInt32ToFloat64::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void ChangeInt32ToFloat64::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ Scvtf(ToDoubleRegister(result()), ToRegister(input()).W());
+}
+
+void ChangeUint32ToFloat64::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void ChangeUint32ToFloat64::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ Ucvtf(ToDoubleRegister(result()), ToRegister(input()).W());
+}
+
+void CheckedTruncateFloat64ToUint32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void CheckedTruncateFloat64ToUint32::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ DoubleRegister input_reg = ToDoubleRegister(input());
+ Register result_reg = ToRegister(result()).W();
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister converted_back = temps.AcquireDouble();
+
+ // Convert the input float64 value to uint32.
+ __ Fcvtzu(result_reg, input_reg);
+ // Convert that uint32 value back to float64.
+ __ Ucvtf(converted_back, result_reg);
+ // Check that the result of the float64->uint32->float64 is equal to the input
+ // (i.e. that the conversion didn't truncate.
+ __ Fcmp(input_reg, converted_back);
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotUint32, this);
+
+ // Check if {input} is -0.
+ Label check_done;
+ __ Cbnz(result_reg, &check_done);
+
+ // In case of 0, we need to check for the IEEE 0 pattern (which is all zeros).
+ Register input_bits = temps.Acquire();
+ __ Fmov(input_bits, input_reg);
+ __ Cbnz(input_bits, __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32));
+
+ __ Bind(&check_done);
+}
+
+void CheckMaps::SetValueLocationConstraints() { UseRegister(receiver_input()); }
+void CheckMaps::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+
+ // TODO(victorgomes): This can happen, because we do not emit an unconditional
+ // deopt when we intersect the map sets.
+ if (maps().is_empty()) {
+ __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
+ return;
+ }
+
+ bool maps_include_heap_number = AnyMapIsHeapNumber(maps());
+
+ Label done;
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ if (maps_include_heap_number) {
+ // Smis count as matching the HeapNumber map, so we're done.
+ __ B(&done, is_smi);
+ } else {
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
+ }
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object_map = temps.Acquire();
+ Register map = temps.Acquire();
+ __ LoadMap(object_map, object);
+ size_t map_count = maps().size();
+ for (size_t i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map_handle = maps().at(i);
+ __ Move(map, map_handle);
+ __ CmpTagged(object_map, map);
+ __ B(&done, eq);
+ }
+ Handle<Map> last_map_handle = maps().at(map_count - 1);
+ __ Move(map, last_map_handle);
+ __ CmpTagged(object_map, map);
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, this);
+ __ Bind(&done);
+}
+
+int CheckMapsWithMigration::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kTryMigrateInstance)->nargs, 1);
+ return 1;
+}
+void CheckMapsWithMigration::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ set_temporaries_needed(1);
+}
+void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // TODO(victorgomes): This can happen, because we do not emit an unconditional
+ // deopt when we intersect the map sets.
+ if (maps().is_empty()) {
+ __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
+ return;
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object = ToRegister(receiver_input());
+
+ bool maps_include_heap_number = AnyMapIsHeapNumber(maps());
+
+ ZoneLabelRef done(masm);
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ if (maps_include_heap_number) {
+ // Smis count as matching the HeapNumber map, so we're done.
+ __ B(*done, is_smi);
+ } else {
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
+ }
+ }
+
+ // Use general temporaries to be able to send to deferred code.
+ Register object_map = temps.Acquire();
+ Register scratch = temps.Acquire();
+ __ LoadMap(object_map, object);
+
+ size_t map_count = maps().size();
+ for (size_t i = 0; i < map_count; ++i) {
+ ZoneLabelRef continue_label(masm);
+ Handle<Map> map_handle = maps().at(i);
+ {
+ Register map = scratch;
+ __ Move(map, map_handle);
+ __ CmpTagged(object_map, map);
+ }
+ bool last_map = (i == map_count - 1);
+ if (map_handle->is_migration_target()) {
+ __ JumpToDeferredIf(
+ ne,
+ [](MaglevAssembler* masm, ZoneLabelRef continue_label,
+ ZoneLabelRef done, Register object, Register object_map,
+ Register scratch, int map_index, CheckMapsWithMigration* node) {
+ // If the map is not deprecated, we fail the map check, continue to
+ // the next one.
+ __ Ldr(scratch.W(),
+ FieldMemOperand(object_map, Map::kBitField3Offset));
+ __ TestAndBranchIfAllClear(scratch.W(),
+ Map::Bits3::IsDeprecatedBit::kMask,
+ *continue_label);
+
+ // Otherwise, try migrating the object. If the migration
+ // returns Smi zero, then it failed the migration.
+ Register return_val = Register::no_reg();
+ {
+ RegisterSnapshot register_snapshot = node->register_snapshot();
+ // We can eager deopt after the snapshot, so make sure the nodes
+ // used by the deopt are included in it.
+ // TODO(leszeks): This is a bit of a footgun -- we likely want the
+ // snapshot to always include eager deopt input registers.
+ AddDeoptRegistersToSnapshot(&register_snapshot,
+ node->eager_deopt_info());
+ SaveRegisterStateForCall save_register_state(masm,
+ register_snapshot);
+
+ __ Push(object);
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kTryMigrateInstance);
+ save_register_state.DefineSafepoint();
+
+ // Make sure the return value is preserved across the live
+ // register restoring pop all.
+ return_val = kReturnRegister0;
+ if (register_snapshot.live_registers.has(return_val)) {
+ DCHECK(!register_snapshot.live_registers.has(scratch));
+ __ Mov(scratch, return_val);
+ return_val = scratch;
+ }
+ }
+
+ // On failure, the returned value is zero
+ __ Cbz(return_val, *continue_label);
+
+ // The migrated object is returned on success, retry the map check.
+ __ Move(object, return_val);
+ __ LoadMap(object_map, object);
+ __ Move(scratch, node->maps().at(map_index));
+ __ CmpTagged(object_map, scratch);
+ __ B(*done, eq);
+ __ B(*continue_label);
+ },
+ // If this is the last map to check, we should deopt if we fail.
+ // This is safe to do, since {eager_deopt_info} is ZoneAllocated.
+ (last_map ? ZoneLabelRef::UnsafeFromLabelPointer(masm->GetDeoptLabel(
+ this, DeoptimizeReason::kWrongMap))
+ : continue_label),
+ done, object, object_map, scratch, i, this);
+ } else if (last_map) {
+ // If it is the last map and it is not a migration target, we should deopt
+ // if the check fails.
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, this);
+ }
+
+ if (!last_map) {
+ // We don't need to Bind the label for the last map.
+ __ B(*done, eq);
+ __ Bind(*continue_label);
+ }
+ }
+
+ __ Bind(*done);
+}
+
+void CheckNumber::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+}
+void CheckNumber::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Label done;
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register value = ToRegister(receiver_input());
+ // If {value} is a Smi or a HeapNumber, we're done.
+ __ JumpIfSmi(value, &done);
+ if (mode() == Object::Conversion::kToNumeric) {
+ __ LoadMap(scratch, value);
+ __ CompareRoot(scratch.W(), RootIndex::kHeapNumberMap);
+ // Jump to done if it is a HeapNumber.
+ __ B(&done, eq);
+ // Check if it is a BigInt.
+ __ Ldrh(scratch.W(), FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, Immediate(BIGINT_TYPE));
+ } else {
+ __ Ldr(scratch.W(), FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, RootIndex::kHeapNumberMap);
+ }
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotANumber, this);
+ __ Bind(&done);
+}
+
+int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; }
+void CheckedObjectToIndex::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
+ set_double_temporaries_needed(1);
+}
+void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register result_reg = ToRegister(result());
+
+ ZoneLabelRef done(masm);
+ Condition is_smi = __ CheckSmi(object);
+ __ JumpToDeferredIf(
+ NegateCondition(is_smi),
+ [](MaglevAssembler* masm, Register object, Register result_reg,
+ ZoneLabelRef done, CheckedObjectToIndex* node) {
+ Label is_string;
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ LoadMap(scratch, object);
+ __ CompareInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+ __ B(&is_string, ls);
+
+ __ Cmp(scratch, Immediate(HEAP_NUMBER_TYPE));
+ // The IC will go generic if it encounters something other than a
+ // Number or String key.
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotInt32, node);
+
+ // Heap Number.
+ {
+ DoubleRegister number_value = temps.AcquireDouble();
+ DoubleRegister converted_back = temps.AcquireDouble();
+ __ Ldr(number_value,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
+ // Convert the input float64 value to int32.
+ __ TruncateDoubleToInt32(result_reg, number_value);
+ // Convert that int32 value back to float64.
+ __ Scvtf(converted_back, result_reg);
+ // Check that the result of the float64->int32->float64 is equal to
+ // the input (i.e. that the conversion didn't truncate.
+ __ Fcmp(number_value, converted_back);
+ __ B(*done, eq);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ }
+
+ // String.
+ __ Bind(&is_string);
+ {
+ RegisterSnapshot snapshot = node->register_snapshot();
+ snapshot.live_registers.clear(result_reg);
+ DCHECK(!snapshot.live_tagged_registers.has(result_reg));
+ {
+ SaveRegisterStateForCall save_register_state(masm, snapshot);
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Move(x0, object);
+ __ CallCFunction(
+ ExternalReference::string_to_array_index_function(), 1);
+ // No need for safepoint since this is a fast C call.
+ __ Move(result_reg, kReturnRegister0);
+ }
+ __ Cmp(result_reg, Immediate(0));
+ __ B(*done, ge);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ }
+ },
+ object, result_reg, done, this);
+
+ // If we didn't enter the deferred block, we're a Smi.
+ if (result_reg == object) {
+ __ SmiToInt32(result_reg);
+ } else {
+ __ SmiToInt32(result_reg, object);
+ }
+
+ __ Bind(*done);
+}
+
+void Int32ToNumber::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void Int32ToNumber::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ ZoneLabelRef done(masm);
+ Register object = ToRegister(result());
+ Register value = ToRegister(input());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Adds(scratch.W(), value.W(), value.W());
+ __ JumpToDeferredIf(
+ vs,
+ [](MaglevAssembler* masm, Register object, Register value,
+ Register scratch, ZoneLabelRef done, Int32ToNumber* node) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ // We can include {scratch} back to the temporary set, since we jump
+ // over its use to the label {done}.
+ temps.Include(scratch);
+ DoubleRegister double_value = temps.AcquireDouble();
+ __ Scvtf(double_value, value.W());
+ __ AllocateHeapNumber(node->register_snapshot(), object, double_value);
+ __ B(*done);
+ },
+ object, value, scratch, done, this);
+ __ Mov(object, scratch);
+ __ Bind(*done);
+}
+
+void Uint32ToNumber::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void Uint32ToNumber::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ ZoneLabelRef done(masm);
+ Register value = ToRegister(input());
+ Register object = ToRegister(result());
+ __ Cmp(value.W(), Immediate(Smi::kMaxValue));
+ __ JumpToDeferredIf(
+ hi,
+ [](MaglevAssembler* masm, Register object, Register value,
+ ZoneLabelRef done, Uint32ToNumber* node) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister double_value = temps.AcquireDouble();
+ __ Ucvtf(double_value, value.W());
+ __ AllocateHeapNumber(node->register_snapshot(), object, double_value);
+ __ B(*done);
+ },
+ object, value, done, this);
+ __ Add(object, value, value);
+ __ Bind(*done);
+}
+
+void Int32AddWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input()).W();
+ Register right = ToRegister(right_input()).W();
+ Register out = ToRegister(result()).W();
+ __ Adds(out, left, right);
+ // The output register shouldn't be a register input into the eager deopt
+ // info.
+ DCHECK_REGLIST_EMPTY(RegList{out} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32SubtractWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input()).W();
+ Register right = ToRegister(right_input()).W();
+ Register out = ToRegister(result()).W();
+ __ Subs(out, left, right);
+ // The output register shouldn't be a register input into the eager deopt
+ // info.
+ DCHECK_REGLIST_EMPTY(RegList{out} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input()).W();
+ Register right = ToRegister(right_input()).W();
+ Register out = ToRegister(result()).W();
+
+ // TODO(leszeks): peephole optimise multiplication by a constant.
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ bool out_alias_input = out == left || out == right;
+ Register res = out.X();
+ if (out_alias_input) {
+ res = temps.Acquire();
+ }
+
+ __ Smull(res, left, right);
+
+ // if res != (res[0:31] sign extended to 64 bits), then the multiplication
+ // result is too large for 32 bits.
+ __ Cmp(res, Operand(res.W(), SXTW));
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kOverflow, this);
+
+ // If the result is zero, check if either lhs or rhs is negative.
+ Label end;
+ __ CompareAndBranch(res, Immediate(0), ne, &end);
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register temp = temps.Acquire().W();
+ __ Orr(temp, left, right);
+ __ Cmp(temp, Immediate(0));
+ // If one of them is negative, we must have a -0 result, which is non-int32,
+ // so deopt.
+ // TODO(leszeks): Consider splitting these deopts to have distinct deopt
+ // reasons. Otherwise, the reason has to match the above.
+ __ EmitEagerDeoptIf(lt, DeoptimizeReason::kOverflow, this);
+ }
+ __ Bind(&end);
+ if (out_alias_input) {
+ __ Move(out, res.W());
+ }
+}
+
+void Int32DivideWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input()).W();
+ Register right = ToRegister(right_input()).W();
+ Register out = ToRegister(result()).W();
+
+ // TODO(leszeks): peephole optimise division by a constant.
+
+ // Pre-check for overflow, since idiv throws a division exception on overflow
+ // rather than setting the overflow flag. Logic copied from
+ // effect-control-linearizer.cc
+
+ // Check if {right} is positive (and not zero).
+ __ Cmp(right, Immediate(0));
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(
+ le,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
+ Register right, Int32DivideWithOverflow* node) {
+ // {right} is negative or zero.
+
+ // Check if {right} is zero.
+ // We've already done the compare and flags won't be cleared yet.
+ // TODO(leszeks): Using kNotInt32 here, but kDivisionByZero would be
+ // better. Right now all eager deopts in a node have to be the same --
+ // we should allow a node to emit multiple eager deopts with different
+ // reasons.
+ __ EmitEagerDeoptIf(eq, DeoptimizeReason::kNotInt32, node);
+
+ // Check if {left} is zero, as that would produce minus zero.
+ __ Cmp(left, Immediate(0));
+ // TODO(leszeks): Better DeoptimizeReason = kMinusZero.
+ __ EmitEagerDeoptIf(eq, DeoptimizeReason::kNotInt32, node);
+
+ // Check if {left} is kMinInt and {right} is -1, in which case we'd have
+ // to return -kMinInt, which is not representable as Int32.
+ __ Cmp(left, Immediate(kMinInt));
+ __ JumpIf(ne, *done);
+ __ Cmp(right, Immediate(-1));
+ __ JumpIf(ne, *done);
+ // TODO(leszeks): Better DeoptimizeReason = kOverflow, but
+ // eager_deopt_info is already configured as kNotInt32.
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ },
+ done, left, right, this);
+ __ Bind(*done);
+
+ // Perform the actual integer division.
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ bool out_alias_input = out == left || out == right;
+ Register res = out;
+ if (out_alias_input) {
+ res = temps.Acquire().W();
+ }
+ __ Sdiv(res, left, right);
+
+ // Check that the remainder is zero.
+ Register temp = temps.Acquire().W();
+ __ Msub(temp, res, right, left);
+ __ Cmp(temp, Immediate(0));
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotInt32, this);
+
+ __ Move(out, res);
+}
+
+void Int32ModulusWithOverflow::SetValueLocationConstraints() {
+ UseAndClobberRegister(left_input());
+ UseAndClobberRegister(right_input());
+ DefineAsRegister(this);
+}
+void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // If AreAliased(lhs, rhs):
+ // deopt if lhs < 0 // Minus zero.
+ // 0
+ //
+ // Using same algorithm as in EffectControlLinearizer:
+ // if rhs <= 0 then
+ // rhs = -rhs
+ // deopt if rhs == 0
+ // if lhs < 0 then
+ // let lhs_abs = -lsh in
+ // let res = lhs_abs % rhs in
+ // deopt if res == 0
+ // -res
+ // else
+ // let msk = rhs - 1 in
+ // if rhs & msk == 0 then
+ // lhs & msk
+ // else
+ // lhs % rhs
+
+ Register lhs = ToRegister(left_input()).W();
+ Register rhs = ToRegister(right_input()).W();
+ Register out = ToRegister(result()).W();
+
+ static constexpr DeoptimizeReason deopt_reason =
+ DeoptimizeReason::kDivisionByZero;
+
+ if (lhs == rhs) {
+ // For the modulus algorithm described above, lhs and rhs must not alias
+ // each other.
+ __ Tst(lhs, lhs);
+ // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
+ // allows one deopt reason per IR.
+ __ EmitEagerDeoptIf(mi, deopt_reason, this);
+ __ Move(ToRegister(result()), 0);
+ return;
+ }
+
+ DCHECK(!AreAliased(lhs, rhs));
+
+ ZoneLabelRef done(masm);
+ ZoneLabelRef rhs_checked(masm);
+ __ Cmp(rhs, Immediate(0));
+ __ JumpToDeferredIf(
+ le,
+ [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
+ Int32ModulusWithOverflow* node) {
+ __ Negs(rhs, rhs);
+ __ EmitEagerDeoptIf(eq, deopt_reason, node);
+ __ Jump(*rhs_checked);
+ },
+ rhs_checked, rhs, this);
+ __ Bind(*rhs_checked);
+
+ __ Cmp(lhs, Immediate(0));
+ __ JumpToDeferredIf(
+ lt,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
+ Register out, Int32ModulusWithOverflow* node) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register res = temps.Acquire().W();
+ __ Neg(lhs, lhs);
+ __ Udiv(res, lhs, rhs);
+ __ Msub(out, res, rhs, lhs);
+ __ Cmp(out, Immediate(0));
+ // TODO(victorgomes): This ideally should be kMinusZero, but Maglev
+ // only allows one deopt reason per IR.
+ __ EmitEagerDeoptIf(eq, deopt_reason, node);
+ __ Neg(out, out);
+ __ B(*done);
+ },
+ done, lhs, rhs, out, this);
+
+ Label rhs_not_power_of_2;
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register mask = temps.Acquire().W();
+ __ Add(mask, rhs, Immediate(-1));
+ __ Tst(mask, rhs);
+ __ JumpIf(ne, &rhs_not_power_of_2);
+
+ // {rhs} is power of 2.
+ __ And(out, mask, lhs);
+ __ Jump(*done);
+
+ __ Bind(&rhs_not_power_of_2);
+
+ // We store the result of the Udiv in a temporary register in case {out} is
+ // the same as {lhs} or {rhs}: we'll still need those 2 registers intact to
+ // get the remainder.
+ Register res = mask;
+ __ Udiv(res, lhs, rhs);
+ __ Msub(out, res, rhs, lhs);
+
+ __ Bind(*done);
+}
+
+#define DEF_BITWISE_BINOP(Instruction, opcode) \
+ void Instruction::SetValueLocationConstraints() { \
+ UseRegister(left_input()); \
+ UseRegister(right_input()); \
+ DefineAsRegister(this); \
+ } \
+ \
+ void Instruction::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Register left = ToRegister(left_input()).W(); \
+ Register right = ToRegister(right_input()).W(); \
+ Register out = ToRegister(result()).W(); \
+ __ opcode(out, left, right); \
+ }
+DEF_BITWISE_BINOP(Int32BitwiseAnd, and_)
+DEF_BITWISE_BINOP(Int32BitwiseOr, orr)
+DEF_BITWISE_BINOP(Int32BitwiseXor, eor)
+DEF_BITWISE_BINOP(Int32ShiftLeft, lslv)
+DEF_BITWISE_BINOP(Int32ShiftRight, asrv)
+DEF_BITWISE_BINOP(Int32ShiftRightLogical, lsrv)
+#undef DEF_BITWISE_BINOP
+
+void Int32BitwiseNot::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineAsRegister(this);
+}
+
+void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input()).W();
+ Register out = ToRegister(result()).W();
+ __ Mvn(out, value);
+}
+
+void Float64Add::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+void Float64Add::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ DoubleRegister out = ToDoubleRegister(result());
+ __ Fadd(out, left, right);
+}
+
+void Float64Subtract::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+void Float64Subtract::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ DoubleRegister out = ToDoubleRegister(result());
+ __ Fsub(out, left, right);
+}
+
+void Float64Multiply::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+void Float64Multiply::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ DoubleRegister out = ToDoubleRegister(result());
+ __ Fmul(out, left, right);
+}
+
+void Float64Divide::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+void Float64Divide::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ DoubleRegister out = ToDoubleRegister(result());
+ __ Fdiv(out, left, right);
+}
+
+int Float64Modulus::MaxCallStackArgs() const { return 0; }
+void Float64Modulus::SetValueLocationConstraints() {
+ UseFixed(left_input(), v0);
+ UseFixed(right_input(), v1);
+ DefineSameAsFirst(this);
+}
+void Float64Modulus::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+}
+
+void Float64Negate::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void Float64Negate::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister value = ToDoubleRegister(input());
+ DoubleRegister out = ToDoubleRegister(result());
+ __ Fneg(out, value);
+}
+
+void Float64Round::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister in = ToDoubleRegister(input());
+ DoubleRegister out = ToDoubleRegister(result());
+ if (kind_ == Kind::kNearest) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister temp = temps.AcquireDouble();
+ DoubleRegister temp2 = temps.AcquireDouble();
+ __ Move(temp, in);
+ // Frintn rounds to even on tie, while JS expects it to round towards
+ // +Infinity. Fix the difference by checking if we rounded down by exactly
+ // 0.5, and if so, round to the other side.
+ __ Frintn(out, in);
+ __ Fsub(temp, temp, out);
+ __ Move(temp2, 0.5);
+ __ Fcmp(temp, temp2);
+ Label done;
+ __ JumpIf(ne, &done, Label::kNear);
+ // Fix wrong tie-to-even by adding 0.5 twice.
+ __ Fadd(out, out, temp2);
+ __ Fadd(out, out, temp2);
+ __ bind(&done);
+ } else if (kind_ == Kind::kCeil) {
+ __ Frintp(out, in);
+ } else if (kind_ == Kind::kFloor) {
+ __ Frintm(out, in);
+ }
+}
+
+int Float64Exponentiate::MaxCallStackArgs() const { return 0; }
+void Float64Exponentiate::SetValueLocationConstraints() {
+ UseFixed(left_input(), v0);
+ UseFixed(right_input(), v1);
+ DefineSameAsFirst(this);
+}
+void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
+}
+
+int Float64Ieee754Unary::MaxCallStackArgs() const { return 0; }
+void Float64Ieee754Unary::SetValueLocationConstraints() {
+ UseFixed(input(), v0);
+ DefineSameAsFirst(this);
+}
+void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ieee_function_, 1);
+}
+
+void Float64SilenceNaN::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+
+void Float64SilenceNaN::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ CanonicalizeNaN(ToDoubleRegister(result()), ToDoubleRegister(input()));
+}
+
+template <class Derived, Operation kOperation>
+void Float64CompareNode<Derived, kOperation>::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+template <class Derived, Operation kOperation>
+void Float64CompareNode<Derived, kOperation>::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ Register result = ToRegister(this->result());
+ Label is_false, end;
+ __ Fcmp(left, right);
+ // Check for NaN first.
+ __ JumpIf(vs, &is_false);
+ __ JumpIf(NegateCondition(ConditionFor(kOperation)), &is_false);
+ // TODO(leszeks): Investigate loading existing materialisations of roots here,
+ // if available.
+ __ LoadRoot(result, RootIndex::kTrueValue);
+ __ Jump(&end);
+ {
+ __ Bind(&is_false);
+ __ LoadRoot(result, RootIndex::kFalseValue);
+ }
+ __ Bind(&end);
+}
+
+#define DEF_OPERATION(Name) \
+ void Name::SetValueLocationConstraints() { \
+ Base::SetValueLocationConstraints(); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Base::GenerateCode(masm, state); \
+ }
+DEF_OPERATION(Float64Equal)
+DEF_OPERATION(Float64StrictEqual)
+DEF_OPERATION(Float64LessThan)
+DEF_OPERATION(Float64LessThanOrEqual)
+DEF_OPERATION(Float64GreaterThan)
+DEF_OPERATION(Float64GreaterThanOrEqual)
+#undef DEF_OPERATION
+
+void CheckInt32IsSmi::SetValueLocationConstraints() { UseRegister(input()); }
+void CheckInt32IsSmi::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // TODO(leszeks): This basically does a SmiTag and throws the result away.
+ // Don't throw the result away if we want to actually use it.
+ Register reg = ToRegister(input()).W();
+ __ Adds(wzr, reg, reg);
+ DCHECK_REGLIST_EMPTY(RegList{reg} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kNotASmi, this);
+}
+
+void CheckedSmiTagInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void CheckedSmiTagInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input()).W();
+ Register out = ToRegister(result()).W();
+ __ Adds(out, reg, reg);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{out} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
+}
+
+void CheckedSmiTagUint32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void CheckedSmiTagUint32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input()).W();
+ Register result_reg = ToRegister(result()).W();
+ // Perform an unsigned comparison against Smi::kMaxValue.
+ __ Cmp(reg, Immediate(Smi::kMaxValue));
+ __ EmitEagerDeoptIf(hi, DeoptimizeReason::kOverflow, this);
+ __ Adds(result_reg, reg, reg);
+ __ Assert(vc, AbortReason::kInputDoesNotFitSmi);
+}
+
+void CheckJSTypedArrayBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ if (ElementsKindSize(elements_kind_) == 1) {
+ UseRegister(index_input());
+ } else {
+ UseAndClobberRegister(index_input());
+ }
+}
+void CheckJSTypedArrayBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(object);
+ __ IsObjectType(object, JS_TYPED_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register byte_length = temps.Acquire();
+ __ LoadBoundedSizeFromObject(byte_length, object,
+ JSTypedArray::kRawByteLengthOffset);
+ int element_size = ElementsKindSize(elements_kind_);
+ if (element_size > 1) {
+ DCHECK(element_size == 2 || element_size == 4 || element_size == 8);
+ __ Cmp(byte_length,
+ Operand(index, LSL, base::bits::CountTrailingZeros(element_size)));
+ } else {
+ __ Cmp(byte_length, index);
+ }
+ // We use an unsigned comparison to handle negative indices as well.
+ __ EmitEagerDeoptIf(kUnsignedLessThanEqual, DeoptimizeReason::kOutOfBounds,
+ this);
+}
+
+int CheckJSDataViewBounds::MaxCallStackArgs() const { return 1; }
+void CheckJSDataViewBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ UseRegister(index_input());
+ set_temporaries_needed(1);
+}
+void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(object);
+ __ IsObjectType(object, JS_DATA_VIEW_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+
+ // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
+ Register byte_length = temps.Acquire();
+ __ LoadBoundedSizeFromObject(byte_length, object,
+ JSDataView::kRawByteLengthOffset);
+
+ int element_size = ExternalArrayElementSize(element_type_);
+ if (element_size > 1) {
+ __ Subs(byte_length, byte_length, Immediate(element_size - 1));
+ __ EmitEagerDeoptIf(mi, DeoptimizeReason::kOutOfBounds, this);
+ }
+ __ Cmp(index, byte_length);
+ __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this);
+}
+
+void CheckedInternalizedString::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineSameAsFirst(this);
+ set_temporaries_needed(1);
+}
+void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register object = ToRegister(object_input());
+
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
+ }
+
+ __ LoadMap(scratch, object);
+ __ RecordComment("Test IsInternalizedString");
+ // Go to the slow path if this is a non-string, or a non-internalised string.
+ __ Ldrh(scratch.W(), FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ Tst(scratch.W(), Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ static_assert((kStringTag | kInternalizedTag) == 0);
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(
+ ne,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
+ CheckedInternalizedString* node, EagerDeoptInfo* deopt_info,
+ Register instance_type) {
+ __ RecordComment("Deferred Test IsThinString");
+ // Deopt if this isn't a thin string.
+ __ Cmp(instance_type.W(), Immediate(THIN_STRING_TYPE));
+ __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, node);
+ __ LoadTaggedField(object,
+ FieldMemOperand(object, ThinString::kActualOffset));
+ if (v8_flags.debug_code) {
+ __ RecordComment("DCHECK IsInternalizedString");
+ Register scratch = instance_type;
+ __ LoadMap(scratch, object);
+ __ Ldrh(scratch.W(),
+ FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ Tst(scratch.W(),
+ Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ static_assert((kStringTag | kInternalizedTag) == 0);
+ __ Check(eq, AbortReason::kUnexpectedValue);
+ }
+ __ jmp(*done);
+ },
+ done, object, this, eager_deopt_info(), scratch);
+ __ Bind(*done);
+}
+
+void UnsafeSmiTag::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input()).W();
+ Register out = ToRegister(result()).W();
+ if (v8_flags.debug_code) {
+ if (input().node()->properties().value_representation() ==
+ ValueRepresentation::kUint32) {
+ __ Cmp(reg, Immediate(Smi::kMaxValue));
+ __ Check(ls, AbortReason::kInputDoesNotFitSmi);
+ }
+ }
+ __ Adds(out, reg, reg);
+ if (v8_flags.debug_code) {
+ __ Check(vc, AbortReason::kInputDoesNotFitSmi);
+ }
+}
+
+namespace {
+
+void JumpToFailIfNotHeapNumberOrOddball(
+ MaglevAssembler* masm, Register value,
+ TaggedToFloat64ConversionType conversion_type, Label* fail) {
+ switch (conversion_type) {
+ case TaggedToFloat64ConversionType::kNumberOrOddball:
+ // Check if HeapNumber or Oddball, jump to fail otherwise.
+ static_assert(InstanceType::HEAP_NUMBER_TYPE + 1 ==
+ InstanceType::ODDBALL_TYPE);
+ if (fail) {
+ __ CompareObjectTypeRange(value, InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ODDBALL_TYPE);
+ __ JumpIf(kUnsignedGreaterThan, fail);
+ } else {
+ if (v8_flags.debug_code) {
+ __ CompareObjectTypeRange(value, InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ODDBALL_TYPE);
+ __ Assert(kUnsignedLessThanEqual, AbortReason::kUnexpectedValue);
+ }
+ }
+ break;
+ case TaggedToFloat64ConversionType::kNumber:
+ // Check if HeapNumber, jump to fail otherwise.
+ if (fail) {
+ __ IsObjectType(value, InstanceType::HEAP_NUMBER_TYPE);
+ __ JumpIf(kNotEqual, fail);
+ } else {
+ if (v8_flags.debug_code) {
+ __ IsObjectType(value, InstanceType::HEAP_NUMBER_TYPE);
+ __ Assert(kEqual, AbortReason::kUnexpectedValue);
+ }
+ }
+ break;
+ }
+}
+
+void TryUnboxNumberOrOddball(MaglevAssembler* masm, DoubleRegister dst,
+ Register src,
+ TaggedToFloat64ConversionType conversion_type,
+ Label* fail) {
+ Label is_not_smi, done;
+ // Check if Smi.
+ __ JumpIfNotSmi(src, &is_not_smi);
+ // If Smi, convert to Float64.
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register temp = temps.Acquire();
+ __ SmiToInt32(temp, src);
+ __ Sxtw(temp, temp.W());
+ __ Scvtf(dst, temp);
+ }
+ __ Jump(&done);
+ __ Bind(&is_not_smi);
+ JumpToFailIfNotHeapNumberOrOddball(masm, src, conversion_type, fail);
+ static_assert(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ __ LoadHeapNumberValue(dst, src);
+ __ Bind(&done);
+}
+
+} // namespace
+
+void CheckedNumberOrOddballToFloat64::SetValueLocationConstraints() {
+ UseAndClobberRegister(input());
+ DefineAsRegister(this);
+}
+void CheckedNumberOrOddballToFloat64::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ TryUnboxNumberOrOddball(
+ masm, ToDoubleRegister(result()), value, conversion_type(),
+ __ GetDeoptLabel(this, DeoptimizeReason::kNotANumberOrOddball));
+}
+
+void UncheckedNumberOrOddballToFloat64::SetValueLocationConstraints() {
+ UseAndClobberRegister(input());
+ DefineAsRegister(this);
+}
+void UncheckedNumberOrOddballToFloat64::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ TryUnboxNumberOrOddball(masm, ToDoubleRegister(result()), value,
+ conversion_type(), nullptr);
+}
+
+namespace {
+
+void EmitTruncateNumberOrOddballToInt32(
+ MaglevAssembler* masm, Register value, Register result_reg,
+ TaggedToFloat64ConversionType conversion_type, Label* not_a_number) {
+ Label is_not_smi, done;
+ // Check if Smi.
+ __ JumpIfNotSmi(value, &is_not_smi);
+ // If Smi, convert to Int32.
+ __ SmiToInt32(value);
+ __ B(&done);
+ __ Bind(&is_not_smi);
+ JumpToFailIfNotHeapNumberOrOddball(masm, value, conversion_type,
+ not_a_number);
+ static_assert(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister double_value = temps.AcquireDouble();
+ __ Ldr(double_value, FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ TruncateDoubleToInt32(result_reg, double_value);
+ __ Bind(&done);
+}
+
+} // namespace
+
+void CheckedTruncateNumberOrOddballToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedTruncateNumberOrOddballToInt32::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ Register result_reg = ToRegister(result());
+ DCHECK_EQ(value, result_reg);
+ Label* deopt_label =
+ __ GetDeoptLabel(this, DeoptimizeReason::kNotANumberOrOddball);
+ EmitTruncateNumberOrOddballToInt32(masm, value, result_reg, conversion_type(),
+ deopt_label);
+}
+
+void TruncateNumberOrOddballToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void TruncateNumberOrOddballToInt32::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ Register result_reg = ToRegister(result());
+ DCHECK_EQ(value, result_reg);
+ EmitTruncateNumberOrOddballToInt32(masm, value, result_reg, conversion_type(),
+ nullptr);
+}
+
+void IncreaseInterruptBudget::SetValueLocationConstraints() {
+ set_temporaries_needed(1);
+}
+void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register feedback_cell = temps.Acquire();
+ Register budget = temps.Acquire().W();
+ __ Ldr(feedback_cell,
+ MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedField(
+ feedback_cell,
+ FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
+ __ Ldr(budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add(budget, budget, Immediate(amount()));
+ __ Str(budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+namespace {
+
+enum class ReduceInterruptBudgetType { kLoop, kReturn };
+
+void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
+ Node* node, ReduceInterruptBudgetType type,
+ Register scratch0) {
+ // For loops, first check for interrupts. Don't do this for returns, as we
+ // can't lazy deopt to the end of a return.
+ if (type == ReduceInterruptBudgetType::kLoop) {
+ Label next;
+ // Here, we only care about interrupts since we've already guarded against
+ // real stack overflows on function entry.
+ {
+ Register stack_limit = scratch0;
+ __ LoadStackLimit(stack_limit, StackLimitKind::kInterruptStackLimit);
+ __ Cmp(sp, stack_limit);
+ __ B(&next, hi);
+ }
+
+ // An interrupt has been requested and we must call into runtime to handle
+ // it; since we already pay the call cost, combine with the TieringManager
+ // call.
+ {
+ SaveRegisterStateForCall save_register_state(masm,
+ node->register_snapshot());
+ Register function = scratch0;
+ __ Ldr(function, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Push(function);
+ // Move into kContextRegister after the load into scratch0, just in case
+ // scratch0 happens to be kContextRegister.
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
+ save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
+ }
+ __ B(*done); // All done, continue.
+ __ Bind(&next);
+ }
+
+ // No pending interrupts. Call into the TieringManager if needed.
+ {
+ // Skip the runtime call if the tiering state is kInProgress. The runtime
+ // only performs simple bookkeeping in this case, which we can easily
+ // replicate here in generated code.
+ // TODO(jgruber): Use the correct feedback vector once Maglev inlining is
+ // enabled.
+ Label update_profiler_ticks_and_interrupt_budget;
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch1 = temps.Acquire();
+ static_assert(kTieringStateInProgressBlocksTierup);
+ __ Move(scratch0, masm->compilation_info()
+ ->toplevel_compilation_unit()
+ ->feedback()
+ .object());
+
+ // If tiering_state is kInProgress, skip the runtime call.
+ __ Ldrh(scratch1.W(),
+ FieldMemOperand(scratch0, FeedbackVector::kFlagsOffset));
+ __ DecodeField<FeedbackVector::TieringStateBits>(scratch1);
+ __ Cmp(scratch1.W(),
+ Immediate(static_cast<int>(TieringState::kInProgress)));
+ __ B(&update_profiler_ticks_and_interrupt_budget, eq);
+
+ // If osr_tiering_state is kInProgress, skip the runtime call.
+ __ Ldrh(scratch1.W(),
+ FieldMemOperand(scratch0, FeedbackVector::kFlagsOffset));
+ __ DecodeField<FeedbackVector::OsrTieringStateBit>(scratch1);
+ __ Cmp(scratch1.W(),
+ Immediate(static_cast<int>(TieringState::kInProgress)));
+ __ B(&update_profiler_ticks_and_interrupt_budget, eq);
+ }
+
+ {
+ SaveRegisterStateForCall save_register_state(masm,
+ node->register_snapshot());
+ Register function = scratch0;
+ __ Ldr(function, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Push(function);
+ // Move into kContextRegister after the load into scratch0, just in case
+ // scratch0 happens to be kContextRegister.
+ __ Move(kContextRegister, masm->native_context().object());
+ // Note: must not cause a lazy deopt!
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
+ save_register_state.DefineSafepoint();
+ }
+ __ B(*done);
+
+ __ Bind(&update_profiler_ticks_and_interrupt_budget);
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register feedback_vector = scratch0;
+ Register ticks = temps.Acquire().W();
+ // We are skipping the call to Runtime::kBytecodeBudgetInterrupt_Maglev
+ // since the tiering state is kInProgress. Perform bookkeeping that would
+ // have been done in the runtime function:
+ __ AssertFeedbackVector(feedback_vector);
+ // FeedbackVector::SaturatingIncrementProfilerTicks.
+ // TODO(jgruber): This isn't saturating and thus we may theoretically
+ // exceed Smi::kMaxValue. But, 1) this is very unlikely since it'd take
+ // quite some time to exhaust the budget that many times; and 2) even an
+ // overflow doesn't hurt us at all.
+ __ Ldr(ticks, FieldMemOperand(feedback_vector,
+ FeedbackVector::kProfilerTicksOffset));
+ __ Add(ticks, ticks, Immediate(1));
+ __ Str(ticks, FieldMemOperand(feedback_vector,
+ FeedbackVector::kProfilerTicksOffset));
+ }
+
+ // JSFunction::SetInterruptBudget.
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register feedback_cell = scratch0;
+ Register budget = temps.Acquire().W();
+ __ Ldr(feedback_cell,
+ MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedField(
+ feedback_cell,
+ FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
+ __ Move(budget, v8_flags.interrupt_budget);
+ __ Str(budget, FieldMemOperand(feedback_cell,
+ FeedbackCell::kInterruptBudgetOffset));
+ }
+ __ B(*done);
+ }
+}
+
+void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
+ ReduceInterruptBudgetType type, int amount) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register feedback_cell = scratch;
+ Register budget = temps.Acquire().W();
+ __ Ldr(feedback_cell,
+ MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedField(
+ feedback_cell,
+ FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
+ __ Ldr(budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Subs(budget, budget, Immediate(amount));
+ __ Str(budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(lt, HandleInterruptsAndTiering, done, node, type,
+ scratch);
+ __ Bind(*done);
+}
+
+} // namespace
+
+int ReduceInterruptBudgetForLoop::MaxCallStackArgs() const { return 1; }
+void ReduceInterruptBudgetForLoop::SetValueLocationConstraints() {
+ set_temporaries_needed(2);
+}
+void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ GenerateReduceInterruptBudget(masm, this, ReduceInterruptBudgetType::kLoop,
+ amount());
+}
+
+int ReduceInterruptBudgetForReturn::MaxCallStackArgs() const { return 1; }
+void ReduceInterruptBudgetForReturn::SetValueLocationConstraints() {
+ set_temporaries_needed(2);
+}
+void ReduceInterruptBudgetForReturn::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ GenerateReduceInterruptBudget(masm, this, ReduceInterruptBudgetType::kReturn,
+ amount());
+}
+
+namespace {
+
+template <bool check_detached, typename ResultReg, typename NodeT>
+void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object,
+ Register index, ResultReg result_reg,
+ ElementsKind kind) {
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ __ IsObjectType(object, JS_TYPED_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
+ if constexpr (check_detached) {
+ __ DeoptIfBufferDetached(object, scratch, node);
+ }
+
+ Register data_pointer = scratch;
+ __ BuildTypedArrayDataPointer(data_pointer, object);
+
+ if constexpr (std::is_same_v<ResultReg, Register>) {
+ if (IsSignedIntTypedArrayElementsKind(kind)) {
+ int element_size = ElementsKindSize(kind);
+ __ Add(data_pointer, data_pointer,
+ Operand(index, LSL, ShiftFromScale(element_size)));
+ __ LoadSignedField(result_reg.W(), MemOperand(data_pointer),
+ element_size);
+ } else {
+ DCHECK(IsUnsignedIntTypedArrayElementsKind(kind));
+ int element_size = ElementsKindSize(kind);
+ __ Add(data_pointer, data_pointer,
+ Operand(index, LSL, ShiftFromScale(element_size)));
+ __ LoadUnsignedField(result_reg.W(), MemOperand(data_pointer),
+ element_size);
+ }
+ } else {
+#ifdef DEBUG
+ bool result_reg_is_double = std::is_same_v<ResultReg, DoubleRegister>;
+ DCHECK(result_reg_is_double);
+ DCHECK(IsFloatTypedArrayElementsKind(kind));
+#endif
+ switch (kind) {
+ case FLOAT32_ELEMENTS:
+ __ Add(data_pointer, data_pointer, Operand(index, LSL, 2));
+ __ Ldr(result_reg.S(), MemOperand(data_pointer));
+ __ Fcvt(result_reg, result_reg.S());
+ break;
+ case FLOAT64_ELEMENTS:
+ __ Add(data_pointer, data_pointer, Operand(index, LSL, 3));
+ __ Ldr(result_reg, MemOperand(data_pointer));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <bool check_detached, typename ValueReg, typename NodeT>
+void GenerateTypedArrayStore(MaglevAssembler* masm, NodeT* node,
+ Register object, Register index, ValueReg value,
+ ElementsKind kind) {
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ __ IsObjectType(object, JS_TYPED_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
+ if constexpr (check_detached) {
+ __ DeoptIfBufferDetached(object, scratch, node);
+ }
+
+ Register data_pointer = scratch;
+ __ BuildTypedArrayDataPointer(data_pointer, object);
+
+ if constexpr (std::is_same_v<ValueReg, Register>) {
+ int element_size = ElementsKindSize(kind);
+ __ Add(data_pointer, data_pointer,
+ Operand(index, LSL, ShiftFromScale(element_size)));
+ __ StoreField(MemOperand(data_pointer), value.W(), element_size);
+ } else {
+#ifdef DEBUG
+ bool value_is_double = std::is_same_v<ValueReg, DoubleRegister>;
+ DCHECK(value_is_double);
+ DCHECK(IsFloatTypedArrayElementsKind(kind));
+#endif
+ switch (kind) {
+ case FLOAT32_ELEMENTS: {
+ DoubleRegister double_scratch = temps.AcquireDouble();
+ __ Fcvt(double_scratch.S(), value);
+ __ Add(data_pointer, data_pointer, Operand(index, LSL, 2));
+ __ Str(double_scratch.S(), MemOperand(data_pointer));
+ break;
+ }
+ case FLOAT64_ELEMENTS:
+ __ Add(data_pointer, data_pointer, Operand(index, LSL, 3));
+ __ Str(value, MemOperand(data_pointer));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+} // namespace
+
+#define DEF_LOAD_TYPED_ARRAY(Name, ResultReg, ToResultReg, check_detached) \
+ void Name::SetValueLocationConstraints() { \
+ UseRegister(object_input()); \
+ UseRegister(index_input()); \
+ DefineAsRegister(this); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Register object = ToRegister(object_input()); \
+ Register index = ToRegister(index_input()); \
+ ResultReg result_reg = ToResultReg(result()); \
+ \
+ GenerateTypedArrayLoad<check_detached>(masm, this, object, index, \
+ result_reg, elements_kind_); \
+ }
+
+DEF_LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElement, Register, ToRegister,
+ /*check_detached*/ true)
+DEF_LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElementNoDeopt, Register,
+ ToRegister,
+ /*check_detached*/ false)
+
+DEF_LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElement, Register, ToRegister,
+ /*check_detached*/ true)
+DEF_LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElementNoDeopt, Register,
+ ToRegister,
+ /*check_detached*/ false)
+
+DEF_LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElement, DoubleRegister,
+ ToDoubleRegister,
+ /*check_detached*/ true)
+DEF_LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElementNoDeopt, DoubleRegister,
+ ToDoubleRegister, /*check_detached*/ false)
+#undef DEF_LOAD_TYPED_ARRAY
+
+#define DEF_STORE_TYPED_ARRAY(Name, ValueReg, ToValueReg, check_detached) \
+ void Name::SetValueLocationConstraints() { \
+ UseRegister(object_input()); \
+ UseRegister(index_input()); \
+ UseRegister(value_input()); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Register object = ToRegister(object_input()); \
+ Register index = ToRegister(index_input()); \
+ ValueReg value = ToValueReg(value_input()); \
+ \
+ GenerateTypedArrayStore<check_detached>(masm, this, object, index, value, \
+ elements_kind_); \
+ }
+
+DEF_STORE_TYPED_ARRAY(StoreIntTypedArrayElement, Register, ToRegister,
+ /*check_detached*/ true)
+DEF_STORE_TYPED_ARRAY(StoreIntTypedArrayElementNoDeopt, Register, ToRegister,
+ /*check_detached*/ false)
+
+DEF_STORE_TYPED_ARRAY(StoreDoubleTypedArrayElement, DoubleRegister,
+ ToDoubleRegister,
+ /*check_detached*/ true)
+DEF_STORE_TYPED_ARRAY(StoreDoubleTypedArrayElementNoDeopt, DoubleRegister,
+ ToDoubleRegister, /*check_detached*/ false)
+
+#undef DEF_STORE_TYPED_ARRAY
+
+void LoadFixedArrayElement::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ DefineAsRegister(this);
+}
+void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(elements);
+ __ IsObjectType(elements, FIXED_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ Register result_reg = ToRegister(result());
+ __ Add(result_reg, elements, Operand(index, LSL, kTaggedSizeLog2));
+ if (this->decompresses_tagged_result()) {
+ __ DecompressTagged(result_reg,
+ FieldMemOperand(result_reg, FixedArray::kHeaderSize));
+ } else {
+ __ Ldr(result_reg.W(),
+ FieldMemOperand(result_reg, FixedArray::kHeaderSize));
+ }
+}
+
+void LoadFixedDoubleArrayElement::SetValueLocationConstraints() {
+ UseAndClobberRegister(elements_input());
+ UseRegister(index_input());
+ DefineAsRegister(this);
+}
+void LoadFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(elements);
+ __ IsObjectType(elements, FIXED_DOUBLE_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ __ Add(elements, elements, Operand(index, LSL, kDoubleSizeLog2));
+ __ Ldr(ToDoubleRegister(result()),
+ FieldMemOperand(elements, FixedArray::kHeaderSize));
+}
+
+void StoreFixedDoubleArrayElement::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+ set_temporaries_needed(1);
+}
+void StoreFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(elements);
+ __ IsObjectType(elements, FIXED_DOUBLE_ARRAY_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ __ Add(scratch, elements, Operand(index, LSL, kDoubleSizeLog2));
+ __ Str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+}
+
+void StoreDoubleField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(value_input());
+}
+void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register tmp = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ __ DecompressTagged(tmp, FieldMemOperand(object, offset()));
+ __ AssertNotSmi(tmp);
+ __ Move(FieldMemOperand(tmp, HeapNumber::kValueOffset), value);
+}
+
+void LoadSignedIntDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ if (is_little_endian_constant() ||
+ type_ == ExternalArrayType::kExternalInt8Array) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ DefineAsRegister(this);
+}
+void LoadSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ Register result_reg = ToRegister(result());
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CompareObjectType(object, JS_DATA_VIEW_TYPE);
+ __ Assert(hs, AbortReason::kUnexpectedValue);
+ }
+
+ int element_size = ExternalArrayElementSize(type_);
+
+ // Load data pointer.
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register data_pointer = temps.Acquire();
+ __ LoadExternalPointerField(
+ data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
+
+ __ LoadSignedField(result_reg.W(), MemOperand(data_pointer, index),
+ element_size);
+ }
+
+ // We ignore little endian argument if type is a byte size.
+ if (type_ != ExternalArrayType::kExternalInt8Array) {
+ if (is_little_endian_constant()) {
+ if (!FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ ReverseByteOrder(result_reg, element_size);
+ }
+ } else {
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, false);
+ __ Bind(*is_big_endian);
+ __ ReverseByteOrder(result_reg, element_size);
+ __ Bind(*is_little_endian);
+ // arm64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ }
+ }
+}
+
+void StoreSignedIntDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ if (ExternalArrayElementSize(type_) > 1) {
+ UseAndClobberRegister(value_input());
+ } else {
+ UseRegister(value_input());
+ }
+ if (is_little_endian_constant() ||
+ type_ == ExternalArrayType::kExternalInt8Array) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+}
+void StoreSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ Register value = ToRegister(value_input());
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CompareObjectType(object, JS_DATA_VIEW_TYPE);
+ __ Assert(hs, AbortReason::kUnexpectedValue);
+ }
+
+ int element_size = ExternalArrayElementSize(type_);
+
+ // We ignore little endian argument if type is a byte size.
+ if (element_size > 1) {
+ if (is_little_endian_constant()) {
+ if (!FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ ReverseByteOrder(value, element_size);
+ }
+ } else {
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, false);
+ __ Bind(*is_big_endian);
+ __ ReverseByteOrder(value, element_size);
+ __ Bind(*is_little_endian);
+ // arm64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ }
+ }
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register data_pointer = temps.Acquire();
+ __ LoadExternalPointerField(
+ data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
+ __ StoreField(MemOperand(data_pointer, index), value.W(), element_size);
+}
+
+void LoadDoubleDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ if (is_little_endian_constant()) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ set_temporaries_needed(1);
+ DefineAsRegister(this);
+}
+void LoadDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister result_reg = ToDoubleRegister(result());
+ Register data_pointer = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CompareObjectType(object, JS_DATA_VIEW_TYPE);
+ __ Assert(hs, AbortReason::kUnexpectedValue);
+ }
+
+ // Load data pointer.
+ __ LoadExternalPointerField(
+ data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
+
+ if (is_little_endian_constant()) {
+ if (FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ Move(result_reg, MemOperand(data_pointer, index));
+ } else {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, MemOperand(data_pointer, index));
+ __ Rev(scratch, scratch);
+ __ Fmov(result_reg, scratch);
+ }
+ } else {
+ Label done;
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ // TODO(leszeks): We're likely to be calling this on an existing boolean --
+ // maybe that's a case we should fast-path here and re-use that boolean
+ // value?
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, true);
+ // arm64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ __ Bind(*is_little_endian);
+ __ Move(result_reg, MemOperand(data_pointer, index));
+ __ B(&done);
+ // We should swap the bytes if big endian.
+ __ Bind(*is_big_endian);
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, MemOperand(data_pointer, index));
+ __ Rev(scratch, scratch);
+ __ Fmov(result_reg, scratch);
+ __ Bind(&done);
+ }
+}
+
+void StoreDoubleDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+ if (is_little_endian_constant()) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ set_temporaries_needed(1);
+}
+void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
+ Register data_pointer = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CompareObjectType(object, JS_DATA_VIEW_TYPE);
+ __ Assert(hs, AbortReason::kUnexpectedValue);
+ }
+
+ // Load data pointer.
+ __ LoadExternalPointerField(
+ data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
+
+ if (is_little_endian_constant()) {
+ if (FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ Str(value, MemOperand(data_pointer, index));
+ } else {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Fmov(scratch, value);
+ __ Rev(scratch, scratch);
+ __ Str(scratch, MemOperand(data_pointer, index));
+ }
+ } else {
+ Label done;
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ // TODO(leszeks): We're likely to be calling this on an existing boolean --
+ // maybe that's a case we should fast-path here and re-use that boolean
+ // value?
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, true);
+ // arm64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ __ Bind(*is_little_endian);
+ __ Str(value, MemOperand(data_pointer, index));
+ __ B(&done);
+ // We should swap the bytes if big endian.
+ __ Bind(*is_big_endian);
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Fmov(scratch, value);
+ __ Rev(scratch, scratch);
+ __ Str(scratch, MemOperand(data_pointer, index));
+ __ Bind(&done);
+ }
+}
+
+void SetPendingMessage::SetValueLocationConstraints() {
+ UseRegister(value());
+ DefineAsRegister(this);
+}
+
+void SetPendingMessage::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register new_message = ToRegister(value());
+ Register return_value = ToRegister(result());
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch0 = temps.Acquire();
+ MemOperand pending_message_operand = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_pending_message(masm->isolate()), scratch0);
+
+ if (new_message != return_value) {
+ __ Ldr(return_value, pending_message_operand);
+ __ Str(new_message, pending_message_operand);
+ } else {
+ Register scratch1 = temps.Acquire();
+ __ Ldr(scratch1, pending_message_operand);
+ __ Str(new_message, pending_message_operand);
+ __ Move(return_value, scratch1);
+ }
+}
+
+void TestUndetectable::SetValueLocationConstraints() {
+ UseRegister(value());
+ DefineAsRegister(this);
+}
+void TestUndetectable::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(value());
+ Register return_value = ToRegister(result());
+
+ Label return_false, done;
+ __ JumpIfSmi(object, &return_false);
+ {
+ // For heap objects, check the map's undetectable bit.
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ LoadMap(scratch, object);
+ __ Ldr(scratch.W(), FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(
+ scratch.W(), Map::Bits1::IsUndetectableBit::kMask, &return_false);
+ }
+
+ __ LoadRoot(return_value, RootIndex::kTrueValue);
+ __ B(&done);
+
+ __ Bind(&return_false);
+ __ LoadRoot(return_value, RootIndex::kFalseValue);
+
+ __ Bind(&done);
+}
+
+int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; }
+void ThrowIfNotSuperConstructor::SetValueLocationConstraints() {
+ UseRegister(constructor());
+ UseRegister(function());
+}
+void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Label* deferred_abort = __ MakeDeferredCode(
+ [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) {
+ __ Push(ToRegister(node->constructor()), ToRegister(node->function()));
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kThrowNotSuperConstructor, 2);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
+ __ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ },
+ this);
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ LoadMap(scratch, ToRegister(constructor()));
+ __ Ldr(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(scratch, Map::Bits1::IsConstructorBit::kMask,
+ deferred_abort);
+}
+
+int FunctionEntryStackCheck::MaxCallStackArgs() const { return 1; }
+void FunctionEntryStackCheck::SetValueLocationConstraints() {
+ set_temporaries_needed(2);
+}
+void FunctionEntryStackCheck::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real
+ // stack limit or tighter. By ensuring we have space until that limit
+ // after building the frame we can quickly precheck both at once.
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ const int stack_check_offset = masm->code_gen_state()->stack_check_offset();
+ Register stack_cmp_reg = sp;
+ if (stack_check_offset > kStackLimitSlackForDeoptimizationInBytes) {
+ stack_cmp_reg = temps.Acquire();
+ __ Sub(stack_cmp_reg, sp, stack_check_offset);
+ }
+ Register interrupt_stack_limit = temps.Acquire();
+ __ LoadStackLimit(interrupt_stack_limit,
+ StackLimitKind::kInterruptStackLimit);
+ __ Cmp(stack_cmp_reg, interrupt_stack_limit);
+
+ ZoneLabelRef deferred_call_stack_guard_return(masm);
+ __ JumpToDeferredIf(
+ lo,
+ [](MaglevAssembler* masm, FunctionEntryStackCheck* node,
+ ZoneLabelRef done, int stack_check_offset) {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ {
+ SaveRegisterStateForCall save_register_state(
+ masm, node->register_snapshot());
+ // Push the frame size
+ __ Push(Smi::FromInt(stack_check_offset));
+ __ CallRuntime(Runtime::kStackGuardWithGap, 1);
+ save_register_state.DefineSafepointWithLazyDeopt(
+ node->lazy_deopt_info());
+ }
+ __ B(*done);
+ },
+ this, deferred_call_stack_guard_return, stack_check_offset);
+ __ bind(*deferred_call_stack_guard_return);
+}
+
+// ---
+// Control nodes
+// ---
+void Return::SetValueLocationConstraints() {
+ UseFixed(value_input(), kReturnRegister0);
+}
+void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
+ DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
+ // Read the formal number of parameters from the top level compilation unit
+ // (i.e. the outermost, non inlined function).
+ int formal_params_size =
+ masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
+
+ // We're not going to continue execution, so we can use an arbitrary register
+ // here instead of relying on temporaries from the register allocator.
+ // We cannot use scratch registers, since they're used in LeaveFrame and
+ // DropArguments.
+ Register actual_params_size = x9;
+ Register params_size = x10;
+
+ // Compute the size of the actual parameters + receiver (in bytes).
+ // TODO(leszeks): Consider making this an input into Return to re-use the
+ // incoming argc's register (if it's still valid).
+ __ Ldr(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ Mov(params_size, Immediate(formal_params_size));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ CompareAndBranch(params_size, actual_params_size, ge,
+ &corrected_args_count);
+ __ Mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame.
+ __ LeaveFrame(StackFrame::MAGLEV);
+
+ // Drop receiver + arguments according to dynamic arguments size.
+ __ DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
+ __ Ret();
+}
+
+void BranchIfFloat64Compare::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+}
+void BranchIfFloat64Compare::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ __ Fcmp(left, right);
+ if (jump_mode_if_nan_ == JumpModeIfNaN::kJumpToTrue) {
+ __ JumpIf(vs, if_true()->label());
+ } else {
+ __ JumpIf(vs, if_false()->label());
+ }
+ __ Branch(ConditionFor(operation_), if_true(), if_false(),
+ state.next_block());
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-assembler-inl.h b/deps/v8/src/maglev/maglev-assembler-inl.h
index 975358363a..44b3b75d34 100644
--- a/deps/v8/src/maglev/maglev-assembler-inl.h
+++ b/deps/v8/src/maglev/maglev-assembler-inl.h
@@ -1,102 +1,26 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
+// Copyright 2023 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
-#include <tuple>
#include <type_traits>
-#include <utility>
-#include "src/codegen/macro-assembler-inl.h"
#include "src/maglev/maglev-assembler.h"
-#include "src/maglev/maglev-basic-block.h"
-#include "src/maglev/maglev-code-gen-state.h"
+
+#ifdef V8_TARGET_ARCH_ARM64
+#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/maglev/x64/maglev-assembler-x64-inl.h"
+#else
+#error "Maglev does not supported this architecture."
+#endif
namespace v8 {
namespace internal {
namespace maglev {
-ZoneLabelRef::ZoneLabelRef(MaglevAssembler* masm)
- : ZoneLabelRef(masm->compilation_info()->zone()) {}
-
-void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true,
- BasicBlock* if_false, BasicBlock* next_block) {
- // We don't have any branch probability information, so try to jump
- // over whatever the next block emitted is.
- if (if_false == next_block) {
- // Jump over the false block if true, otherwise fall through into it.
- j(condition, if_true->label());
- } else {
- // Jump to the false block if true.
- j(NegateCondition(condition), if_false->label());
- // Jump to the true block if it's not the next block.
- if (if_true != next_block) {
- jmp(if_true->label());
- }
- }
-}
-
-void MaglevAssembler::PushInput(const Input& input) {
- if (input.operand().IsConstant()) {
- input.node()->LoadToRegister(this, kScratchRegister);
- Push(kScratchRegister);
- } else {
- // TODO(leszeks): Consider special casing the value. (Toon: could possibly
- // be done through Input directly?)
- const compiler::AllocatedOperand& operand =
- compiler::AllocatedOperand::cast(input.operand());
-
- if (operand.IsRegister()) {
- Push(operand.GetRegister());
- } else {
- DCHECK(operand.IsStackSlot());
- Push(GetStackSlot(operand));
- }
- }
-}
-
-Register MaglevAssembler::FromAnyToRegister(const Input& input,
- Register scratch) {
- if (input.operand().IsConstant()) {
- input.node()->LoadToRegister(this, scratch);
- return scratch;
- }
- const compiler::AllocatedOperand& operand =
- compiler::AllocatedOperand::cast(input.operand());
- if (operand.IsRegister()) {
- return ToRegister(input);
- } else {
- DCHECK(operand.IsStackSlot());
- movq(scratch, ToMemOperand(input));
- return scratch;
- }
-}
-
-inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) {
- info->set_deopting_call_return_pc(pc_offset_for_safepoint());
- code_gen_state()->PushLazyDeopt(info);
- safepoint_table_builder()->DefineSafepoint(this);
-}
-
-inline void MaglevAssembler::DefineExceptionHandlerPoint(NodeBase* node) {
- ExceptionHandlerInfo* info = node->exception_handler_info();
- if (!info->HasExceptionHandler()) return;
- info->pc_offset = pc_offset_for_safepoint();
- code_gen_state()->PushHandlerInfo(node);
-}
-
-inline void MaglevAssembler::DefineExceptionHandlerAndLazyDeoptPoint(
- NodeBase* node) {
- DefineExceptionHandlerPoint(node);
- DefineLazyDeoptPoint(node->lazy_deopt_info());
-}
-
-// ---
-// Deferred code handling.
-// ---
-
namespace detail {
// Base case provides an error.
@@ -149,10 +73,18 @@ struct CopyForDeferredHelper<BytecodeOffset>
template <>
struct CopyForDeferredHelper<EagerDeoptInfo*>
: public CopyForDeferredByValue<EagerDeoptInfo*> {};
+// LazyDeoptInfo pointers are copied by value.
+template <>
+struct CopyForDeferredHelper<LazyDeoptInfo*>
+ : public CopyForDeferredByValue<LazyDeoptInfo*> {};
// ZoneLabelRef is copied by value.
template <>
struct CopyForDeferredHelper<ZoneLabelRef>
: public CopyForDeferredByValue<ZoneLabelRef> {};
+// RegList are copied by value.
+template <>
+struct CopyForDeferredHelper<RegList> : public CopyForDeferredByValue<RegList> {
+};
// Register snapshots are copied by value.
template <>
struct CopyForDeferredHelper<RegisterSnapshot>
@@ -161,6 +93,11 @@ struct CopyForDeferredHelper<RegisterSnapshot>
template <>
struct CopyForDeferredHelper<FeedbackSlot>
: public CopyForDeferredByValue<FeedbackSlot> {};
+// Heap Refs are copied by value.
+template <typename T>
+struct CopyForDeferredHelper<T, typename std::enable_if<std::is_base_of<
+ compiler::ObjectRef, T>::value>::type>
+ : public CopyForDeferredByValue<T> {};
template <typename T>
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T&& value) {
@@ -214,15 +151,21 @@ class DeferredCodeInfoImpl final : public DeferredCodeInfo {
template <typename... InArgs>
explicit DeferredCodeInfoImpl(MaglevCompilationInfo* compilation_info,
+ RegList general_temporaries,
+ DoubleRegList double_temporaries,
FunctionPointer function, InArgs&&... args)
: function(function),
- args(CopyForDeferred(compilation_info, std::forward<InArgs>(args))...) {
- }
+ args(CopyForDeferred(compilation_info, std::forward<InArgs>(args))...),
+ general_temporaries_(general_temporaries),
+ double_temporaries_(double_temporaries) {}
DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
void Generate(MaglevAssembler* masm) override {
+ MaglevAssembler::ScratchRegisterScope scratch_scope(masm);
+ scratch_scope.SetAvailable(general_temporaries_);
+ scratch_scope.SetAvailableDouble(double_temporaries_);
std::apply(function,
std::tuple_cat(std::make_tuple(masm), std::move(args)));
}
@@ -230,13 +173,15 @@ class DeferredCodeInfoImpl final : public DeferredCodeInfo {
private:
FunctionPointer function;
Tuple args;
+ RegList general_temporaries_;
+ DoubleRegList double_temporaries_;
};
} // namespace detail
template <typename Function, typename... Args>
-inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode(
- Function&& deferred_code_gen, Args&&... args) {
+inline Label* MaglevAssembler::MakeDeferredCode(Function&& deferred_code_gen,
+ Args&&... args) {
using FunctionPointer =
typename detail::FunctionArgumentsTupleHelper<Function>::FunctionPointer;
static_assert(
@@ -245,15 +190,18 @@ inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode(
std::declval<MaglevCompilationInfo*>(),
std::declval<Args>()))...>,
"Parameters of deferred_code_gen function should match arguments into "
- "PushDeferredCode");
+ "MakeDeferredCode");
+ ScratchRegisterScope scratch_scope(this);
using DeferredCodeInfoT = detail::DeferredCodeInfoImpl<Function>;
DeferredCodeInfoT* deferred_code =
compilation_info()->zone()->New<DeferredCodeInfoT>(
- compilation_info(), deferred_code_gen, std::forward<Args>(args)...);
+ compilation_info(), scratch_scope.Available(),
+ scratch_scope.AvailableDouble(), deferred_code_gen,
+ std::forward<Args>(args)...);
code_gen_state()->PushDeferredCode(deferred_code);
- return deferred_code;
+ return &deferred_code->deferred_code_label;
}
// Note this doesn't take capturing lambdas by design, since state may
@@ -263,46 +211,75 @@ template <typename Function, typename... Args>
inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
Function&& deferred_code_gen,
Args&&... args) {
- DeferredCodeInfo* deferred_code = PushDeferredCode<Function, Args...>(
- std::forward<Function>(deferred_code_gen), std::forward<Args>(args)...);
if (v8_flags.code_comments) {
RecordComment("-- Jump to deferred code");
}
- j(cond, &deferred_code->deferred_code_label);
+ JumpIf(cond, MakeDeferredCode<Function, Args...>(
+ std::forward<Function>(deferred_code_gen),
+ std::forward<Args>(args)...));
+}
+
+inline void MaglevAssembler::SmiToDouble(DoubleRegister result, Register smi) {
+ AssertSmi(smi);
+ SmiUntag(smi);
+ Int32ToDouble(result, smi);
}
-// ---
-// Deopt
-// ---
+inline void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true,
+ BasicBlock* if_false,
+ BasicBlock* next_block) {
+ Branch(condition, if_true->label(), Label::kFar, if_true == next_block,
+ if_false->label(), Label::kFar, if_false == next_block);
+}
-inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
- DeoptimizeReason reason) {
- if (deopt_info->reason() != DeoptimizeReason::kUnknown) {
- DCHECK_EQ(deopt_info->reason(), reason);
- }
- if (deopt_info->deopt_entry_label()->is_unused()) {
- code_gen_state()->PushEagerDeopt(deopt_info);
- deopt_info->set_reason(reason);
+inline void MaglevAssembler::Branch(Condition condition, Label* if_true,
+ Label::Distance true_distance,
+ bool fallthrough_when_true, Label* if_false,
+ Label::Distance false_distance,
+ bool fallthrough_when_false) {
+ if (fallthrough_when_false) {
+ if (fallthrough_when_true) {
+ // If both paths are a fallthrough, do nothing.
+ DCHECK_EQ(if_true, if_false);
+ return;
+ }
+ // Jump over the false block if true, otherwise fall through into it.
+ JumpIf(condition, if_true, true_distance);
+ } else {
+ // Jump to the false block if true.
+ JumpIf(NegateCondition(condition), if_false, false_distance);
+ // Jump to the true block if it's not the next block.
+ if (!fallthrough_when_true) {
+ Jump(if_true, true_distance);
+ }
}
}
-template <typename NodeT>
-inline void MaglevAssembler::EmitEagerDeopt(NodeT* node,
- DeoptimizeReason reason) {
- static_assert(NodeT::kProperties.can_eager_deopt());
- RegisterEagerDeopt(node->eager_deopt_info(), reason);
- RecordComment("-- Jump to eager deopt");
- jmp(node->eager_deopt_info()->deopt_entry_label());
+inline void MaglevAssembler::LoadTaggedField(Register result,
+ MemOperand operand) {
+ MacroAssembler::LoadTaggedField(result, operand);
+}
+
+inline void MaglevAssembler::LoadTaggedField(Register result, Register object,
+ int offset) {
+ MacroAssembler::LoadTaggedField(result, FieldMemOperand(object, offset));
+}
+
+inline void MaglevAssembler::LoadTaggedFieldWithoutDecompressing(
+ Register result, Register object, int offset) {
+ MacroAssembler::LoadTaggedFieldWithoutDecompressing(
+ result, FieldMemOperand(object, offset));
+}
+
+inline void MaglevAssembler::LoadTaggedSignedField(Register result,
+ MemOperand operand) {
+ MacroAssembler::LoadTaggedField(result, operand);
}
-template <typename NodeT>
-inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond,
- DeoptimizeReason reason,
- NodeT* node) {
- static_assert(NodeT::kProperties.can_eager_deopt());
- RegisterEagerDeopt(node->eager_deopt_info(), reason);
- RecordComment("-- Jump to eager deopt");
- j(cond, node->eager_deopt_info()->deopt_entry_label());
+inline void MaglevAssembler::LoadTaggedSignedField(Register result,
+ Register object,
+ int offset) {
+ MacroAssembler::LoadTaggedField(result, FieldMemOperand(object, offset));
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-assembler.cc b/deps/v8/src/maglev/maglev-assembler.cc
index 54ea50f406..3f245cd60a 100644
--- a/deps/v8/src/maglev/maglev-assembler.cc
+++ b/deps/v8/src/maglev/maglev-assembler.cc
@@ -2,83 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/interface-descriptors-inl.h"
#include "src/maglev/maglev-assembler-inl.h"
-#include "src/objects/heap-number.h"
+#include "src/maglev/maglev-code-generator.h"
namespace v8 {
namespace internal {
namespace maglev {
-#define __ masm->
-
-void MaglevAssembler::Allocate(RegisterSnapshot& register_snapshot,
- Register object, int size_in_bytes,
- AllocationType alloc_type,
- AllocationAlignment alignment) {
- // TODO(victorgomes): Call the runtime for large object allocation.
- // TODO(victorgomes): Support double alignment.
- DCHECK_EQ(alignment, kTaggedAligned);
- size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
- if (v8_flags.single_generation) {
- alloc_type = AllocationType::kOld;
+Register MaglevAssembler::FromAnyToRegister(const Input& input,
+ Register scratch) {
+ if (input.operand().IsConstant()) {
+ input.node()->LoadToRegister(this, scratch);
+ return scratch;
+ }
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input.operand());
+ if (operand.IsRegister()) {
+ return ToRegister(input);
+ } else {
+ DCHECK(operand.IsStackSlot());
+ Move(scratch, ToMemOperand(input));
+ return scratch;
}
- bool in_new_space = alloc_type == AllocationType::kYoung;
- ExternalReference top =
- in_new_space
- ? ExternalReference::new_space_allocation_top_address(isolate_)
- : ExternalReference::old_space_allocation_top_address(isolate_);
- ExternalReference limit =
- in_new_space
- ? ExternalReference::new_space_allocation_limit_address(isolate_)
- : ExternalReference::old_space_allocation_limit_address(isolate_);
-
- ZoneLabelRef done(this);
- Register new_top = kScratchRegister;
- // Check if there is enough space.
- Move(object, ExternalReferenceAsOperand(top));
- leaq(new_top, Operand(object, size_in_bytes));
- cmpq(new_top, ExternalReferenceAsOperand(limit));
- // Otherwise call runtime.
- JumpToDeferredIf(
- greater_equal,
- [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
- Register object, Builtin builtin, int size_in_bytes,
- ZoneLabelRef done) {
- // Remove {object} from snapshot, since it is the returned allocated
- // HeapObject.
- register_snapshot.live_registers.clear(object);
- register_snapshot.live_tagged_registers.clear(object);
- {
- SaveRegisterStateForCall save_register_state(masm, register_snapshot);
- using D = AllocateDescriptor;
- __ Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
- __ CallBuiltin(builtin);
- save_register_state.DefineSafepoint();
- __ Move(object, kReturnRegister0);
- }
- __ jmp(*done);
- },
- register_snapshot, object,
- in_new_space ? Builtin::kAllocateRegularInYoungGeneration
- : Builtin::kAllocateRegularInOldGeneration,
- size_in_bytes, done);
- // Store new top and tag object.
- movq(ExternalReferenceAsOperand(top), new_top);
- addq(object, Immediate(kHeapObjectTag));
- bind(*done);
-}
-
-void MaglevAssembler::AllocateTwoByteString(RegisterSnapshot register_snapshot,
- Register result, int length) {
- Allocate(register_snapshot, result, SeqTwoByteString::SizeFor(length));
- LoadRoot(kScratchRegister, RootIndex::kStringMap);
- StoreTaggedField(FieldOperand(result, HeapObject::kMapOffset),
- kScratchRegister);
- StoreTaggedField(FieldOperand(result, Name::kRawHashFieldOffset),
- Immediate(Name::kEmptyHashField));
- StoreTaggedField(FieldOperand(result, String::kLengthOffset),
- Immediate(length));
}
void MaglevAssembler::LoadSingleCharacterString(Register result,
@@ -87,251 +32,33 @@ void MaglevAssembler::LoadSingleCharacterString(Register result,
DCHECK_LT(char_code, String::kMaxOneByteCharCode);
Register table = result;
LoadRoot(table, RootIndex::kSingleCharacterStringTable);
- DecompressAnyTagged(result, FieldOperand(table, FixedArray::kHeaderSize +
+ DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize +
char_code * kTaggedSize));
}
-void MaglevAssembler::LoadSingleCharacterString(Register result,
- Register char_code,
- Register scratch) {
- if (v8_flags.debug_code) {
- cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
- Assert(below_equal, AbortReason::kUnexpectedValue);
- }
- DCHECK_NE(char_code, scratch);
- Register table = scratch;
- LoadRoot(table, RootIndex::kSingleCharacterStringTable);
- DecompressAnyTagged(result, FieldOperand(table, char_code, times_tagged_size,
- FixedArray::kHeaderSize));
-}
-
-void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
- Label* char_code_fits_one_byte,
- Register result, Register char_code,
- Register scratch) {
- DCHECK_NE(char_code, scratch);
- ZoneLabelRef done(this);
- cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
- JumpToDeferredIf(
- above,
- [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
- ZoneLabelRef done, Register result, Register char_code,
- Register scratch) {
- // Be sure to save {char_code}. If it aliases with {result}, use
- // the scratch register.
- if (char_code == result) {
- // This is guaranteed to be true since we've already checked
- // char_code != scratch.
- DCHECK_NE(scratch, result);
- __ Move(scratch, char_code);
- char_code = scratch;
- }
- DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
- register_snapshot.live_registers.set(char_code);
- __ AllocateTwoByteString(register_snapshot, result, 1);
- __ andl(char_code, Immediate(0xFFFF));
- __ movw(FieldOperand(result, SeqTwoByteString::kHeaderSize), char_code);
- __ jmp(*done);
- },
- register_snapshot, done, result, char_code, scratch);
- if (char_code_fits_one_byte != nullptr) {
- bind(char_code_fits_one_byte);
- }
- LoadSingleCharacterString(result, char_code, scratch);
- bind(*done);
-}
-
-void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot,
- Register result, Register string,
- Register index, Register scratch,
- Label* result_fits_one_byte) {
- ZoneLabelRef done(this);
- Label seq_string;
- Label cons_string;
- Label sliced_string;
-
- DeferredCodeInfo* deferred_runtime_call = PushDeferredCode(
- [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
- ZoneLabelRef done, Register result, Register string, Register index) {
- DCHECK(!register_snapshot.live_registers.has(result));
- DCHECK(!register_snapshot.live_registers.has(string));
- DCHECK(!register_snapshot.live_registers.has(index));
- {
- SaveRegisterStateForCall save_register_state(masm, register_snapshot);
- __ Push(string);
- __ SmiTag(index);
- __ Push(index);
- __ Move(kContextRegister, masm->native_context().object());
- // This call does not throw nor can deopt.
- __ CallRuntime(Runtime::kStringCharCodeAt);
- __ SmiUntag(kReturnRegister0);
- __ Move(result, kReturnRegister0);
- }
- __ jmp(*done);
- },
- register_snapshot, done, result, string, index);
-
- Register instance_type = scratch;
-
- // We might need to try more than one time for ConsString, SlicedString and
- // ThinString.
- Label loop;
- bind(&loop);
-
- if (v8_flags.debug_code) {
- // Check if {string} is a string.
- AssertNotSmi(string);
- LoadMap(scratch, string);
- CmpInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE, LAST_STRING_TYPE);
- Check(below_equal, AbortReason::kUnexpectedValue);
-
- movl(scratch, FieldOperand(string, String::kLengthOffset));
- cmpl(index, scratch);
- Check(below, AbortReason::kUnexpectedValue);
- }
-
- // Get instance type.
- LoadMap(instance_type, string);
- mov_tagged(instance_type,
- FieldOperand(instance_type, Map::kInstanceTypeOffset));
-
- {
- // TODO(victorgomes): Add fast path for external strings.
- Register representation = kScratchRegister;
- movl(representation, instance_type);
- andl(representation, Immediate(kStringRepresentationMask));
- cmpl(representation, Immediate(kSeqStringTag));
- j(equal, &seq_string, Label::kNear);
- cmpl(representation, Immediate(kConsStringTag));
- j(equal, &cons_string, Label::kNear);
- cmpl(representation, Immediate(kSlicedStringTag));
- j(equal, &sliced_string, Label::kNear);
- cmpl(representation, Immediate(kThinStringTag));
- j(not_equal, &deferred_runtime_call->deferred_code_label);
- // Fallthrough to thin string.
+void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info,
+ Register result, Register object,
+ Register scratch) {
+ Register load_source = object;
+ // Resolve property holder.
+ if (access_info.holder().has_value()) {
+ load_source = scratch;
+ Move(load_source, access_info.holder().value().object());
}
-
- // Is a thin string.
- {
- DecompressAnyTagged(string,
- FieldOperand(string, ThinString::kActualOffset));
- jmp(&loop, Label::kNear);
- }
-
- bind(&sliced_string);
- {
- Register offset = scratch;
- movl(offset, FieldOperand(string, SlicedString::kOffsetOffset));
- SmiUntag(offset);
- DecompressAnyTagged(string,
- FieldOperand(string, SlicedString::kParentOffset));
- addl(index, offset);
- jmp(&loop, Label::kNear);
- }
-
- bind(&cons_string);
- {
- CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- RootIndex::kempty_string);
- j(not_equal, &deferred_runtime_call->deferred_code_label);
- DecompressAnyTagged(string, FieldOperand(string, ConsString::kFirstOffset));
- jmp(&loop, Label::kNear); // Try again with first string.
- }
-
- bind(&seq_string);
- {
- Label two_byte_string;
- andl(instance_type, Immediate(kStringEncodingMask));
- cmpl(instance_type, Immediate(kTwoByteStringTag));
- j(equal, &two_byte_string, Label::kNear);
- movzxbl(result, FieldOperand(string, index, times_1,
- SeqOneByteString::kHeaderSize));
- jmp(result_fits_one_byte);
- bind(&two_byte_string);
- movzxwl(result, FieldOperand(string, index, times_2,
- SeqTwoByteString::kHeaderSize));
- // Fallthrough.
- }
-
- bind(*done);
-
- if (v8_flags.debug_code) {
- // We make sure that the user of this macro is not relying in string and
- // index to not be clobbered.
- if (result != string) {
- movl(string, Immediate(0xdeadbeef));
- }
- if (result != index) {
- movl(index, Immediate(0xdeadbeef));
+ FieldIndex field_index = access_info.field_index();
+ if (!field_index.is_inobject()) {
+ Register load_source_object = load_source;
+ if (load_source == object) {
+ load_source = scratch;
}
+ // The field is in the property array, first load it from there.
+ AssertNotSmi(load_source_object);
+ DecompressTagged(load_source,
+ FieldMemOperand(load_source_object,
+ JSReceiver::kPropertiesOrHashOffset));
}
-}
-
-void MaglevAssembler::ToBoolean(Register value, ZoneLabelRef is_true,
- ZoneLabelRef is_false,
- bool fallthrough_when_true) {
- Register map = kScratchRegister;
-
- // Check if {{value}} is Smi.
- CheckSmi(value);
- JumpToDeferredIf(
- zero,
- [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
- ZoneLabelRef is_false) {
- // Check if {value} is not zero.
- __ SmiCompare(value, Smi::FromInt(0));
- __ j(equal, *is_false);
- __ jmp(*is_true);
- },
- value, is_true, is_false);
-
- // Check if {{value}} is false.
- CompareRoot(value, RootIndex::kFalseValue);
- j(equal, *is_false);
-
- // Check if {{value}} is empty string.
- CompareRoot(value, RootIndex::kempty_string);
- j(equal, *is_false);
-
- // Check if {{value}} is undetectable.
- LoadMap(map, value);
- testl(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(Map::Bits1::IsUndetectableBit::kMask));
- j(not_zero, *is_false);
-
- // Check if {{value}} is a HeapNumber.
- CompareRoot(map, RootIndex::kHeapNumberMap);
- JumpToDeferredIf(
- equal,
- [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
- ZoneLabelRef is_false) {
- // Sets scratch register to 0.0.
- __ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
- // Sets ZF if equal to 0.0, -0.0 or NaN.
- __ Ucomisd(kScratchDoubleReg,
- FieldOperand(value, HeapNumber::kValueOffset));
- __ j(zero, *is_false);
- __ jmp(*is_true);
- },
- value, is_true, is_false);
-
- // Check if {{value}} is a BigInt.
- CompareRoot(map, RootIndex::kBigIntMap);
- JumpToDeferredIf(
- equal,
- [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
- ZoneLabelRef is_false) {
- __ testl(FieldOperand(value, BigInt::kBitfieldOffset),
- Immediate(BigInt::LengthBits::kMask));
- __ j(zero, *is_false);
- __ jmp(*is_true);
- },
- value, is_true, is_false);
-
- // Otherwise true.
- if (!fallthrough_when_true) {
- jmp(*is_true);
- }
+ AssertNotSmi(load_source);
+ DecompressTagged(result, FieldMemOperand(load_source, field_index.offset()));
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-assembler.h b/deps/v8/src/maglev/maglev-assembler.h
index 2e4c6c050b..3f0dfd2101 100644
--- a/deps/v8/src/maglev/maglev-assembler.h
+++ b/deps/v8/src/maglev/maglev-assembler.h
@@ -5,13 +5,19 @@
#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_H_
+#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/interpreter/bytecode-flags.h"
#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
+class Graph;
class MaglevAssembler;
// Label allowed to be passed to deferred code.
@@ -34,23 +40,22 @@ class ZoneLabelRef {
explicit ZoneLabelRef(Label* label) : label_(label) {}
};
+// The slot index is the offset from the frame pointer.
+struct StackSlot {
+ int32_t index;
+};
+
class MaglevAssembler : public MacroAssembler {
public:
+ class ScratchRegisterScope;
+
explicit MaglevAssembler(Isolate* isolate, MaglevCodeGenState* code_gen_state)
: MacroAssembler(isolate, CodeObjectRequired::kNo),
code_gen_state_(code_gen_state) {}
- inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
- return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand));
- }
-
- inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
- return GetStackSlot(compiler::AllocatedOperand::cast(operand));
- }
-
- inline MemOperand ToMemOperand(const ValueLocation& location) {
- return ToMemOperand(location.operand());
- }
+ inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand);
+ inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand);
+ inline MemOperand ToMemOperand(const ValueLocation& location);
inline int GetFramePointerOffsetForStackSlot(
const compiler::AllocatedOperand& operand) {
@@ -61,11 +66,17 @@ class MaglevAssembler : public MacroAssembler {
return GetFramePointerOffsetForStackSlot(index);
}
- void Allocate(RegisterSnapshot& register_snapshot, Register result,
+ template <typename Dest, typename Source>
+ inline void MoveRepr(MachineRepresentation repr, Dest dst, Source src);
+
+ void Allocate(RegisterSnapshot register_snapshot, Register result,
int size_in_bytes,
AllocationType alloc_type = AllocationType::kYoung,
AllocationAlignment alignment = kTaggedAligned);
+ void AllocateHeapNumber(RegisterSnapshot register_snapshot, Register result,
+ DoubleRegister value);
+
void AllocateTwoByteString(RegisterSnapshot register_snapshot,
Register result, int length);
@@ -73,17 +84,82 @@ class MaglevAssembler : public MacroAssembler {
void LoadSingleCharacterString(Register result, Register char_code,
Register scratch);
+ inline void BindJumpTarget(Label* label);
+ inline void BindBlock(BasicBlock* block);
+
+ inline Condition IsInt64Constant(Register reg, int64_t constant);
+ inline Condition IsRootConstant(Input input, RootIndex root_index);
+
inline void Branch(Condition condition, BasicBlock* if_true,
BasicBlock* if_false, BasicBlock* next_block);
- inline void PushInput(const Input& input);
- inline Register FromAnyToRegister(const Input& input, Register scratch);
+ inline void Branch(Condition condition, Label* if_true,
+ Label::Distance true_distance, bool fallthrough_when_true,
+ Label* if_false, Label::Distance false_distance,
+ bool fallthrough_when_false);
+
+ Register FromAnyToRegister(const Input& input, Register scratch);
+
+ inline void LoadTaggedField(Register result, MemOperand operand);
+ inline void LoadTaggedField(Register result, Register object, int offset);
+ inline void LoadTaggedFieldWithoutDecompressing(Register result,
+ Register object, int offset);
+ inline void LoadTaggedSignedField(Register result, MemOperand operand);
+ inline void LoadTaggedSignedField(Register result, Register object,
+ int offset);
+ inline void LoadTaggedFieldByIndex(Register result, Register object,
+ Register index, int scale, int offset);
+ inline void LoadBoundedSizeFromObject(Register result, Register object,
+ int offset);
+ inline void LoadExternalPointerField(Register result, MemOperand operand);
+
+ inline void LoadSignedField(Register result, MemOperand operand,
+ int element_size);
+ inline void LoadUnsignedField(Register result, MemOperand operand,
+ int element_size);
+ template <typename BitField>
+ inline void LoadBitField(Register result, MemOperand operand) {
+ // Pick a load with the right size, which makes sure to read the whole
+ // field.
+ static constexpr int load_size =
+ RoundUp<8>(BitField::kSize + BitField::kShift) / 8;
+ // TODO(leszeks): If the shift is 8 or 16, we could have loaded from a
+ // shifted address instead.
+ LoadUnsignedField(result, operand, load_size);
+ DecodeField<BitField>(result);
+ }
+
+ enum ValueIsCompressed { kValueIsDecompressed, kValueIsCompressed };
+ enum ValueCanBeSmi { kValueCannotBeSmi, kValueCanBeSmi };
+ // Preserves all registers that are in the register snapshot, but is otherwise
+ // allowed to clobber both input registers if they are not in the snapshot.
+ //
+ // For maximum efficiency, prefer:
+ // * Having `object` == WriteBarrierDescriptor::ObjectRegister(),
+ // * Not having WriteBarrierDescriptor::SlotAddressRegister() in the
+ // register snapshot,
+ // * Not having `value` in the register snapshot, allowing it to be
+ // clobbered.
+ void StoreTaggedFieldWithWriteBarrier(Register object, int offset,
+ Register value,
+ RegisterSnapshot register_snapshot,
+ ValueIsCompressed value_is_compressed,
+ ValueCanBeSmi value_can_be_smi);
+ inline void StoreTaggedSignedField(Register object, int offset,
+ Register value);
+ inline void StoreTaggedSignedField(Register object, int offset, Smi value);
+
+ inline void StoreField(MemOperand operand, Register value, int element_size);
+ inline void ReverseByteOrder(Register value, int element_size);
+
+ void BuildTypedArrayDataPointer(Register data_pointer, Register object);
// Warning: Input registers {string} and {index} will be scratched.
// {result} is allowed to alias with one the other 3 input registers.
// {result} is an int32.
- void StringCharCodeAt(RegisterSnapshot& register_snapshot, Register result,
- Register string, Register index, Register scratch,
- Label* result_fits_one_byte);
+ void StringCharCodeOrCodePointAt(
+ BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode,
+ RegisterSnapshot& register_snapshot, Register result, Register string,
+ Register index, Register scratch, Label* result_fits_one_byte);
// Warning: Input {char_code} will be scratched.
void StringFromCharCode(RegisterSnapshot register_snapshot,
Label* char_code_fits_one_byte, Register result,
@@ -91,25 +167,163 @@ class MaglevAssembler : public MacroAssembler {
void ToBoolean(Register value, ZoneLabelRef is_true, ZoneLabelRef is_false,
bool fallthrough_when_true);
+ void TestTypeOf(Register object,
+ interpreter::TestTypeOfFlags::LiteralFlag literal,
+ Label* if_true, Label::Distance true_distance,
+ bool fallthrough_when_true, Label* if_false,
+ Label::Distance false_distance, bool fallthrough_when_false);
+
+ // Smi-tags {obj} in place.
+ inline void SmiTagInt32(Register obj, Label* fail);
+
+ inline void DoubleToInt64Repr(Register dst, DoubleRegister src);
+ void TruncateDoubleToInt32(Register dst, DoubleRegister src);
+ void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label* fail);
inline void DefineLazyDeoptPoint(LazyDeoptInfo* info);
inline void DefineExceptionHandlerPoint(NodeBase* node);
inline void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase* node);
template <typename Function, typename... Args>
- inline DeferredCodeInfo* PushDeferredCode(Function&& deferred_code_gen,
- Args&&... args);
+ inline Label* MakeDeferredCode(Function&& deferred_code_gen, Args&&... args);
template <typename Function, typename... Args>
inline void JumpToDeferredIf(Condition cond, Function&& deferred_code_gen,
Args&&... args);
-
- inline void RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
- DeoptimizeReason reason);
+ template <typename NodeT>
+ inline Label* GetDeoptLabel(NodeT* node, DeoptimizeReason reason);
template <typename NodeT>
inline void EmitEagerDeopt(NodeT* node, DeoptimizeReason reason);
template <typename NodeT>
inline void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason,
NodeT* node);
+ template <typename NodeT>
+ inline void EmitEagerDeoptIfNotEqual(DeoptimizeReason reason, NodeT* node);
+
+ inline void MaterialiseValueNode(Register dst, ValueNode* value);
+
+ inline MemOperand StackSlotOperand(StackSlot slot);
+ inline void Move(StackSlot dst, Register src);
+ inline void Move(StackSlot dst, DoubleRegister src);
+ inline void Move(Register dst, StackSlot src);
+ inline void Move(DoubleRegister dst, StackSlot src);
+ inline void Move(MemOperand dst, Register src);
+ inline void Move(MemOperand dst, DoubleRegister src);
+ inline void Move(Register dst, MemOperand src);
+ inline void Move(DoubleRegister dst, MemOperand src);
+ inline void Move(DoubleRegister dst, DoubleRegister src);
+ inline void Move(Register dst, Smi src);
+ inline void Move(Register dst, ExternalReference src);
+ inline void Move(Register dst, Register src);
+ inline void Move(Register dst, TaggedIndex i);
+ inline void Move(Register dst, int32_t i);
+ inline void Move(DoubleRegister dst, double n);
+ inline void Move(DoubleRegister dst, Float64 n);
+ inline void Move(Register dst, Handle<HeapObject> obj);
+
+ inline void LoadByte(Register dst, MemOperand src);
+
+ inline void SignExtend32To64Bits(Register dst, Register src);
+ inline void NegateInt32(Register val);
+
+ inline void ToUint8Clamped(Register result, DoubleRegister value, Label* min,
+ Label* max, Label* done);
+
+ template <typename NodeT>
+ inline void DeoptIfBufferDetached(Register array, Register scratch,
+ NodeT* node);
+
+ inline void IsObjectType(Register heap_object, InstanceType type);
+ inline void CompareObjectType(Register heap_object, InstanceType type);
+ inline void JumpIfJSAnyIsNotPrimitive(Register heap_object, Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void CompareObjectType(Register heap_object, InstanceType type,
+ Register scratch);
+ inline void CompareObjectTypeRange(Register heap_object,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
+ inline void CompareMapWithRoot(Register object, RootIndex index,
+ Register scratch);
+
+ inline void CompareInstanceTypeRange(Register map, InstanceType lower_limit,
+ InstanceType higher_limit);
+ inline void CompareInstanceTypeRange(Register map, Register instance_type_out,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
+ inline void CompareTagged(Register reg, Handle<HeapObject> obj);
+ inline void CompareTagged(Register src1, Register src2);
+
+ inline void CompareInt32(Register reg, int32_t imm);
+ inline void CompareInt32(Register src1, Register src2);
+
+ inline void CallSelf();
+
+ inline void Jump(Label* target, Label::Distance distance = Label::kFar);
+ inline void JumpIf(Condition cond, Label* target,
+ Label::Distance distance = Label::kFar);
+
+ inline void JumpIfRoot(Register with, RootIndex index, Label* if_equal,
+ Label::Distance distance = Label::kFar);
+ inline void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal,
+ Label::Distance distance = Label::kFar);
+ inline void JumpIfSmi(Register src, Label* on_smi,
+ Label::Distance near_jump = Label::kFar);
+ inline void JumpIfByte(Condition cc, Register value, int32_t byte,
+ Label* target, Label::Distance distance = Label::kFar);
+
+ inline void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond,
+ Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void CompareInt32AndJumpIf(Register r1, int32_t value, Condition cond,
+ Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void CompareSmiAndJumpIf(Register r1, Smi value, Condition cond,
+ Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void TestInt32AndJumpIfAllClear(
+ Register r1, int32_t mask, Label* target,
+ Label::Distance distance = Label::kFar);
+
+ inline void Int32ToDouble(DoubleRegister result, Register n);
+ inline void SmiToDouble(DoubleRegister result, Register smi);
+
+ void StringLength(Register result, Register string);
+
+ // The registers WriteBarrierDescriptor::ObjectRegister and
+ // WriteBarrierDescriptor::SlotAddressRegister can be clobbered.
+ void StoreFixedArrayElementWithWriteBarrier(
+ Register array, Register index, Register value,
+ RegisterSnapshot register_snapshot);
+ void StoreFixedArrayElementNoWriteBarrier(Register array, Register index,
+ Register value);
+
+ // TODO(victorgomes): Import baseline Pop(T...) methods.
+ inline void Pop(Register dst);
+ using MacroAssembler::Pop;
+
+ template <typename... T>
+ inline void Push(T... vals);
+ template <typename... T>
+ inline void PushReverse(T... vals);
+
+ void Prologue(Graph* graph);
+
+ inline void FinishCode();
+
+ inline void AssertStackSizeCorrect();
+
+ inline void LoadHeapNumberValue(DoubleRegister result, Register heap_number);
+
+ void LoadDataField(const PolymorphicAccessInfo& access_info, Register result,
+ Register object, Register scratch);
+
+ void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
+ Label* eager_deopt_entry,
+ size_t lazy_deopt_count,
+ Label* lazy_deopt_entry);
compiler::NativeContextRef native_context() const {
return code_gen_state()->broker()->target_native_context();
@@ -130,6 +344,7 @@ class MaglevAssembler : public MacroAssembler {
}
MaglevCodeGenState* const code_gen_state_;
+ ScratchRegisterScope* scratch_register_scope_ = nullptr;
};
class SaveRegisterStateForCall {
@@ -156,7 +371,13 @@ class SaveRegisterStateForCall {
}
pushed_reg_index++;
}
+#ifdef V8_TARGET_ARCH_ARM64
+ pushed_reg_index = RoundUp<2>(pushed_reg_index);
+#endif
int num_pushed_double_reg = snapshot_.live_double_registers.Count();
+#ifdef V8_TARGET_ARCH_ARM64
+ num_pushed_double_reg = RoundUp<2>(num_pushed_double_reg);
+#endif
safepoint.SetNumPushedRegisters(pushed_reg_index + num_pushed_double_reg);
return safepoint;
}
@@ -174,6 +395,135 @@ class SaveRegisterStateForCall {
RegisterSnapshot snapshot_;
};
+ZoneLabelRef::ZoneLabelRef(MaglevAssembler* masm)
+ : ZoneLabelRef(masm->compilation_info()->zone()) {}
+
+// ---
+// Deopt
+// ---
+
+template <typename NodeT>
+inline Label* MaglevAssembler::GetDeoptLabel(NodeT* node,
+ DeoptimizeReason reason) {
+ static_assert(NodeT::kProperties.can_eager_deopt());
+ EagerDeoptInfo* deopt_info = node->eager_deopt_info();
+ if (deopt_info->reason() != DeoptimizeReason::kUnknown) {
+ DCHECK_EQ(deopt_info->reason(), reason);
+ }
+ if (deopt_info->deopt_entry_label()->is_unused()) {
+ code_gen_state()->PushEagerDeopt(deopt_info);
+ deopt_info->set_reason(reason);
+ }
+ return node->eager_deopt_info()->deopt_entry_label();
+}
+
+template <typename NodeT>
+inline void MaglevAssembler::EmitEagerDeopt(NodeT* node,
+ DeoptimizeReason reason) {
+ RecordComment("-- Jump to eager deopt");
+ Jump(GetDeoptLabel(node, reason));
+}
+
+template <typename NodeT>
+inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond,
+ DeoptimizeReason reason,
+ NodeT* node) {
+ RecordComment("-- Jump to eager deopt");
+ JumpIf(cond, GetDeoptLabel(node, reason));
+}
+
+inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) {
+ info->set_deopting_call_return_pc(pc_offset_for_safepoint());
+ code_gen_state()->PushLazyDeopt(info);
+ safepoint_table_builder()->DefineSafepoint(this);
+}
+
+inline void MaglevAssembler::DefineExceptionHandlerPoint(NodeBase* node) {
+ ExceptionHandlerInfo* info = node->exception_handler_info();
+ if (!info->HasExceptionHandler()) return;
+ info->pc_offset = pc_offset_for_safepoint();
+ code_gen_state()->PushHandlerInfo(node);
+}
+
+inline void MaglevAssembler::DefineExceptionHandlerAndLazyDeoptPoint(
+ NodeBase* node) {
+ DefineExceptionHandlerPoint(node);
+ DefineLazyDeoptPoint(node->lazy_deopt_info());
+}
+
+// Helpers for pushing arguments.
+template <typename T>
+class RepeatIterator {
+ public:
+ // Although we pretend to be a random access iterator, only methods that are
+ // required for Push() are implemented right now.
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef T value_type;
+ typedef int difference_type;
+ typedef T* pointer;
+ typedef T reference;
+ RepeatIterator(T val, int count) : val_(val), count_(count) {}
+ reference operator*() const { return val_; }
+ pointer operator->() { return &val_; }
+ RepeatIterator& operator++() {
+ ++count_;
+ return *this;
+ }
+ RepeatIterator& operator--() {
+ --count_;
+ return *this;
+ }
+ RepeatIterator& operator+=(difference_type diff) {
+ count_ += diff;
+ return *this;
+ }
+ bool operator!=(const RepeatIterator<T>& that) const {
+ return count_ != that.count_;
+ }
+ bool operator==(const RepeatIterator<T>& that) const {
+ return count_ == that.count_;
+ }
+ difference_type operator-(const RepeatIterator<T>& it) const {
+ return count_ - it.count_;
+ }
+
+ private:
+ T val_;
+ int count_;
+};
+
+template <typename T>
+auto RepeatValue(T val, int count) {
+ return base::make_iterator_range(RepeatIterator<T>(val, 0),
+ RepeatIterator<T>(val, count));
+}
+
+namespace detail {
+
+template <class T>
+struct is_iterator_range : std::false_type {};
+template <typename T>
+struct is_iterator_range<base::iterator_range<T>> : std::true_type {};
+
+} // namespace detail
+
+// General helpers.
+
+inline bool AnyMapIsHeapNumber(const ZoneHandleSet<Map>& maps) {
+ return std::any_of(maps.begin(), maps.end(),
+ [](Handle<Map> map) { return map->IsHeapNumberMap(); });
+}
+
+inline Condition ToCondition(AssertCondition cond) {
+ switch (cond) {
+#define CASE(Name) \
+ case AssertCondition::k##Name: \
+ return k##Name;
+ ASSERT_CONDITION(CASE)
+#undef CASE
+ }
+}
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-basic-block.h b/deps/v8/src/maglev/maglev-basic-block.h
index 7f583e5bb2..d40b3c5c2e 100644
--- a/deps/v8/src/maglev/maglev-basic-block.h
+++ b/deps/v8/src/maglev/maglev-basic-block.h
@@ -7,6 +7,7 @@
#include <vector>
+#include "src/base/small-vector.h"
#include "src/codegen/label.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
@@ -26,7 +27,14 @@ class BasicBlock {
uint32_t first_id() const {
if (has_phi()) return phis()->first()->id();
- return nodes_.is_empty() ? control_node()->id() : nodes_.first()->id();
+ if (nodes_.is_empty()) {
+ return control_node()->id();
+ }
+ auto node = nodes_.first();
+ while (node && node->Is<Identity>()) {
+ node = node->NextNode();
+ }
+ return node ? node->id() : control_node()->id();
}
uint32_t FirstNonGapMoveId() const {
@@ -34,6 +42,7 @@ class BasicBlock {
if (!nodes_.is_empty()) {
for (const Node* node : nodes_) {
if (IsGapMoveNode(node->opcode())) continue;
+ if (node->Is<Identity>()) continue;
return node->id();
}
}
@@ -52,11 +61,17 @@ class BasicBlock {
bool is_edge_split_block() const { return is_edge_split_block_; }
+ bool is_loop() const { return has_state() && state()->is_loop(); }
+
MergePointRegisterState& edge_split_block_register_state() {
DCHECK(is_edge_split_block());
return *edge_split_block_register_state_;
}
+ bool contains_node_id(NodeIdT id) const {
+ return id >= first_id() && id <= control_node()->id();
+ }
+
void set_edge_split_block_register_state(
MergePointRegisterState* register_state) {
DCHECK(is_edge_split_block());
@@ -73,6 +88,13 @@ class BasicBlock {
edge_split_block_register_state_ = nullptr;
}
+ bool is_start_block_of_switch_case() const {
+ return is_start_block_of_switch_case_;
+ }
+ void set_start_block_of_switch_case(bool value) {
+ is_start_block_of_switch_case_ = value;
+ }
+
Phi::List* phis() const {
DCHECK(has_phi());
return state_->phis();
@@ -90,6 +112,8 @@ class BasicBlock {
control_node()->Cast<UnconditionalControlNode>()->set_predecessor_id(id);
}
+ base::SmallVector<BasicBlock*, 2> successors() const;
+
Label* label() { return &label_; }
MergePointInterpreterFrameState* state() const {
DCHECK(has_state());
@@ -103,6 +127,7 @@ class BasicBlock {
private:
bool is_edge_split_block_ = false;
+ bool is_start_block_of_switch_case_ = false;
Node::List nodes_;
ControlNode* control_node_;
union {
@@ -112,6 +137,26 @@ class BasicBlock {
Label label_;
};
+inline base::SmallVector<BasicBlock*, 2> BasicBlock::successors() const {
+ ControlNode* control = control_node();
+ if (auto node = control->TryCast<UnconditionalControlNode>()) {
+ return {node->target()};
+ } else if (auto node = control->TryCast<BranchControlNode>()) {
+ return {node->if_true(), node->if_false()};
+ } else if (auto node = control->TryCast<Switch>()) {
+ base::SmallVector<BasicBlock*, 2> succs;
+ for (int i = 0; i < node->size(); i++) {
+ succs.push_back(node->targets()[i].block_ptr());
+ }
+ if (node->has_fallthrough()) {
+ succs.push_back(node->fallthrough());
+ }
+ return succs;
+ } else {
+ return base::SmallVector<BasicBlock*, 2>();
+ }
+}
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-code-gen-state.h b/deps/v8/src/maglev/maglev-code-gen-state.h
index 3d3899d5cf..2c3ee8c309 100644
--- a/deps/v8/src/maglev/maglev-code-gen-state.h
+++ b/deps/v8/src/maglev/maglev-code-gen-state.h
@@ -12,6 +12,7 @@
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/execution/frame-constants.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-ir.h"
@@ -73,6 +74,43 @@ class MaglevCodeGenState {
}
MaglevCompilationInfo* compilation_info() const { return compilation_info_; }
+ Label* entry_label() { return &entry_label_; }
+
+ void set_max_deopted_stack_size(uint32_t max_deopted_stack_size) {
+ max_deopted_stack_size_ = max_deopted_stack_size;
+ }
+
+ void set_max_call_stack_args_(uint32_t max_call_stack_args) {
+ max_call_stack_args_ = max_call_stack_args;
+ }
+
+ uint32_t stack_check_offset() {
+ int32_t parameter_slots =
+ compilation_info_->toplevel_compilation_unit()->parameter_count();
+ uint32_t stack_slots = tagged_slots_ + untagged_slots_;
+ DCHECK(is_int32(stack_slots));
+ int32_t optimized_frame_height = parameter_slots * kSystemPointerSize +
+ StandardFrameConstants::kFixedFrameSize +
+ stack_slots * kSystemPointerSize;
+ DCHECK(is_int32(max_deopted_stack_size_));
+ int32_t signed_max_unoptimized_frame_height =
+ static_cast<int32_t>(max_deopted_stack_size_);
+
+ // The offset is either the delta between the optimized frames and the
+ // interpreted frame, or the maximal number of bytes pushed to the stack
+ // while preparing for function calls, whichever is bigger.
+ uint32_t frame_height_delta = static_cast<uint32_t>(std::max(
+ signed_max_unoptimized_frame_height - optimized_frame_height, 0));
+ uint32_t max_pushed_argument_bytes =
+ static_cast<uint32_t>(max_call_stack_args_ * kSystemPointerSize);
+ if (v8_flags.deopt_to_baseline) {
+ // If we deopt to baseline, we need to be sure that we have enough space
+ // to recreate the unoptimize frame plus arguments to the largest call.
+ return frame_height_delta + max_pushed_argument_bytes;
+ }
+ return std::max(frame_height_delta, max_pushed_argument_bytes);
+ }
+
private:
MaglevCompilationInfo* const compilation_info_;
MaglevSafepointTableBuilder* const safepoint_table_builder_;
@@ -84,6 +122,11 @@ class MaglevCodeGenState {
int untagged_slots_ = 0;
int tagged_slots_ = 0;
+ uint32_t max_deopted_stack_size_ = kMaxUInt32;
+ uint32_t max_call_stack_args_ = kMaxUInt32;
+
+ // Entry point label for recursive calls.
+ Label entry_label_;
};
// Some helpers for codegen.
diff --git a/deps/v8/src/maglev/maglev-code-generator.cc b/deps/v8/src/maglev/maglev-code-generator.cc
index a20fd22110..1c221459f7 100644
--- a/deps/v8/src/maglev/maglev-code-generator.cc
+++ b/deps/v8/src/maglev/maglev-code-generator.cc
@@ -17,6 +17,7 @@
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/deoptimizer/translation-array.h"
#include "src/execution/frame-constants.h"
#include "src/interpreter/bytecode-register.h"
@@ -44,16 +45,16 @@ template <typename RegisterT>
struct RegisterTHelper;
template <>
struct RegisterTHelper<Register> {
- static constexpr Register kScratch = kScratchRegister;
static constexpr RegList kAllocatableRegisters = kAllocatableGeneralRegisters;
};
template <>
struct RegisterTHelper<DoubleRegister> {
- static constexpr DoubleRegister kScratch = kScratchDoubleReg;
static constexpr DoubleRegList kAllocatableRegisters =
kAllocatableDoubleRegisters;
};
+enum NeedsDecompression { kDoesNotNeedDecompression, kNeedsDecompression };
+
// The ParallelMoveResolver is used to resolve multiple moves between registers
// and stack slots that are intended to happen, semantically, in parallel. It
// finds chains of moves that would clobber each other, and emits them in a non
@@ -86,33 +87,39 @@ struct RegisterTHelper<DoubleRegister> {
// It additionally keeps track of materialising moves, which don't have a stack
// slot but rather materialise a value from, e.g., a constant. These can safely
// be emitted at the end, once all the parallel moves are done.
-template <typename RegisterT>
+template <typename RegisterT, bool DecompressIfNeeded>
class ParallelMoveResolver {
- static constexpr RegisterT kScratchRegT =
- RegisterTHelper<RegisterT>::kScratch;
-
static constexpr auto kAllocatableRegistersT =
RegisterTHelper<RegisterT>::kAllocatableRegisters;
+ static_assert(!DecompressIfNeeded || std::is_same_v<Register, RegisterT>);
public:
- explicit ParallelMoveResolver(MaglevAssembler* masm) : masm_(masm) {}
+ explicit ParallelMoveResolver(MaglevAssembler* masm)
+ : masm_(masm), scratch_(RegisterT::no_reg()) {}
void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
- compiler::AllocatedOperand target) {
- if (target.IsRegister()) {
- RecordMoveToRegister(source_node, source, ToRegisterT<RegisterT>(target));
+ compiler::AllocatedOperand target,
+ bool target_needs_to_be_decompressed) {
+ if (target.IsAnyRegister()) {
+ RecordMoveToRegister(source_node, source, ToRegisterT<RegisterT>(target),
+ target_needs_to_be_decompressed);
} else {
RecordMoveToStackSlot(source_node, source,
- masm_->GetFramePointerOffsetForStackSlot(target));
+ masm_->GetFramePointerOffsetForStackSlot(target),
+ target_needs_to_be_decompressed);
}
}
void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
- RegisterT target_reg) {
- RecordMoveToRegister(source_node, source, target_reg);
+ RegisterT target_reg,
+ NeedsDecompression target_needs_to_be_decompressed) {
+ RecordMoveToRegister(source_node, source, target_reg,
+ target_needs_to_be_decompressed);
}
- void EmitMoves() {
+ void EmitMoves(RegisterT scratch) {
+ DCHECK(!scratch_.is_valid());
+ scratch_ = scratch;
for (RegisterT reg : kAllocatableRegistersT) {
StartEmitMoveChain(reg);
ValueNode* materializing_register_move =
@@ -128,8 +135,8 @@ class ParallelMoveResolver {
StartEmitMoveChain(moves_from_stack_slot_.begin()->first);
}
for (auto [stack_slot, node] : materializing_stack_slot_moves_) {
- node->LoadToRegister(masm_, kScratchRegT);
- EmitStackMove(stack_slot, kScratchRegT);
+ node->LoadToRegister(masm_, scratch_);
+ __ Move(StackSlot{stack_slot}, scratch_);
}
}
@@ -139,12 +146,25 @@ class ParallelMoveResolver {
ParallelMoveResolver operator=(const ParallelMoveResolver&) = delete;
private:
- // The targets of moves from a source, i.e. the set of outgoing edges for a
- // node in the move graph.
+ // For the GapMoveTargets::needs_decompression member when DecompressIfNeeded
+ // is false.
+ struct DummyNeedsDecompression {
+ // NOLINTNEXTLINE
+ DummyNeedsDecompression(NeedsDecompression) {}
+ };
+
+ // The targets of moves from a source, i.e. the set of outgoing edges for
+ // a node in the move graph.
struct GapMoveTargets {
+ base::SmallVector<int32_t, 1> stack_slots = base::SmallVector<int32_t, 1>{};
RegListBase<RegisterT> registers;
- base::SmallVector<uint32_t, 1> stack_slots =
- base::SmallVector<uint32_t, 1>{};
+
+ // We only need this field for DecompressIfNeeded, otherwise use an empty
+ // dummy value.
+ V8_NO_UNIQUE_ADDRESS
+ std::conditional_t<DecompressIfNeeded, NeedsDecompression,
+ DummyNeedsDecompression>
+ needs_decompression = kDoesNotNeedDecompression;
GapMoveTargets() = default;
GapMoveTargets(GapMoveTargets&&) V8_NOEXCEPT = default;
@@ -178,11 +198,11 @@ class ParallelMoveResolver {
}
}
- void CheckNoExistingMoveToStackSlot(uint32_t target_slot) {
- for (Register reg : kAllocatableRegistersT) {
+ void CheckNoExistingMoveToStackSlot(int32_t target_slot) {
+ for (RegisterT reg : kAllocatableRegistersT) {
auto& stack_slots = moves_from_register_[reg.code()].stack_slots;
if (std::any_of(stack_slots.begin(), stack_slots.end(),
- [&](uint32_t slot) { return slot == target_slot; })) {
+ [&](int32_t slot) { return slot == target_slot; })) {
FATAL("Existing move from %s to stack slot %d", RegisterName(reg),
target_slot);
}
@@ -190,7 +210,7 @@ class ParallelMoveResolver {
for (auto& [stack_slot, targets] : moves_from_stack_slot_) {
auto& stack_slots = targets.stack_slots;
if (std::any_of(stack_slots.begin(), stack_slots.end(),
- [&](uint32_t slot) { return slot == target_slot; })) {
+ [&](int32_t slot) { return slot == target_slot; })) {
FATAL("Existing move from stack slot %d to stack slot %d", stack_slot,
target_slot);
}
@@ -204,51 +224,99 @@ class ParallelMoveResolver {
}
#else
void CheckNoExistingMoveToRegister(RegisterT target_reg) {}
- void CheckNoExistingMoveToStackSlot(uint32_t target_slot) {}
+ void CheckNoExistingMoveToStackSlot(int32_t target_slot) {}
#endif
void RecordMoveToRegister(ValueNode* node,
compiler::InstructionOperand source,
- RegisterT target_reg) {
+ RegisterT target_reg,
+ bool target_needs_to_be_decompressed) {
// There shouldn't have been another move to this register already.
CheckNoExistingMoveToRegister(target_reg);
+ NeedsDecompression needs_decompression = kDoesNotNeedDecompression;
+ if constexpr (DecompressIfNeeded) {
+ if (target_needs_to_be_decompressed &&
+ !node->decompresses_tagged_result()) {
+ needs_decompression = kNeedsDecompression;
+ }
+ } else {
+ DCHECK_IMPLIES(target_needs_to_be_decompressed,
+ node->decompresses_tagged_result());
+ }
+
+ GapMoveTargets* targets;
if (source.IsAnyRegister()) {
RegisterT source_reg = ToRegisterT<RegisterT>(source);
- if (target_reg != source_reg) {
- moves_from_register_[source_reg.code()].registers.set(target_reg);
+ if (target_reg == source_reg) {
+ // We should never have a register aliasing case that needs
+ // decompression, since this path is only used by exception phis and
+ // they have no reg->reg moves.
+ DCHECK_EQ(needs_decompression, kDoesNotNeedDecompression);
+ return;
}
+ targets = &moves_from_register_[source_reg.code()];
} else if (source.IsAnyStackSlot()) {
- uint32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
+ int32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
compiler::AllocatedOperand::cast(source));
- moves_from_stack_slot_[source_slot].registers.set(target_reg);
+ targets = &moves_from_stack_slot_[source_slot];
} else {
DCHECK(source.IsConstant());
DCHECK(IsConstantNode(node->opcode()));
materializing_register_moves_[target_reg.code()] = node;
+ // No need to update `targets.needs_decompression`, materialization is
+ // always decompressed.
+ return;
+ }
+
+ targets->registers.set(target_reg);
+ if (needs_decompression == kNeedsDecompression) {
+ targets->needs_decompression = kNeedsDecompression;
}
}
void RecordMoveToStackSlot(ValueNode* node,
compiler::InstructionOperand source,
- uint32_t target_slot) {
+ int32_t target_slot,
+ bool target_needs_to_be_decompressed) {
// There shouldn't have been another move to this stack slot already.
CheckNoExistingMoveToStackSlot(target_slot);
+ NeedsDecompression needs_decompression = kDoesNotNeedDecompression;
+ if constexpr (DecompressIfNeeded) {
+ if (target_needs_to_be_decompressed &&
+ !node->decompresses_tagged_result()) {
+ needs_decompression = kNeedsDecompression;
+ }
+ } else {
+ DCHECK_IMPLIES(target_needs_to_be_decompressed,
+ node->decompresses_tagged_result());
+ }
+
+ GapMoveTargets* targets;
if (source.IsAnyRegister()) {
RegisterT source_reg = ToRegisterT<RegisterT>(source);
- moves_from_register_[source_reg.code()].stack_slots.push_back(
- target_slot);
+ targets = &moves_from_register_[source_reg.code()];
} else if (source.IsAnyStackSlot()) {
- uint32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
+ int32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
compiler::AllocatedOperand::cast(source));
- if (source_slot != target_slot) {
- moves_from_stack_slot_[source_slot].stack_slots.push_back(target_slot);
+ if (source_slot == target_slot &&
+ needs_decompression == kDoesNotNeedDecompression) {
+ return;
}
+ targets = &moves_from_stack_slot_[source_slot];
} else {
DCHECK(source.IsConstant());
DCHECK(IsConstantNode(node->opcode()));
materializing_stack_slot_moves_.emplace_back(target_slot, node);
+ // No need to update `targets.needs_decompression`, materialization is
+ // always decompressed.
+ return;
+ }
+
+ targets->stack_slots.push_back(target_slot);
+ if (needs_decompression == kNeedsDecompression) {
+ targets->needs_decompression = kNeedsDecompression;
}
}
@@ -258,7 +326,7 @@ class ParallelMoveResolver {
return std::exchange(moves_from_register_[source_reg.code()],
GapMoveTargets{});
}
- GapMoveTargets PopTargets(uint32_t source_slot) {
+ GapMoveTargets PopTargets(int32_t source_slot) {
auto handle = moves_from_stack_slot_.extract(source_slot);
if (handle.empty()) return {};
DCHECK(!handle.mapped().is_empty());
@@ -286,10 +354,10 @@ class ParallelMoveResolver {
// chain start.
if (has_cycle) {
if (!scratch_has_cycle_start_) {
- Pop(kScratchRegT);
+ Pop(scratch_);
scratch_has_cycle_start_ = true;
}
- EmitMovesFromSource(kScratchRegT, std::move(targets));
+ EmitMovesFromSource(scratch_, std::move(targets));
scratch_has_cycle_start_ = false;
__ RecordComment("-- * End of cycle");
} else {
@@ -306,10 +374,10 @@ class ParallelMoveResolver {
if (chain_start == source) {
__ RecordComment("-- * Cycle");
DCHECK(!scratch_has_cycle_start_);
- if constexpr (std::is_same_v<ChainStartT, uint32_t>) {
- EmitStackMove(kScratchRegT, chain_start);
+ if constexpr (std::is_same_v<ChainStartT, int32_t>) {
+ __ Move(scratch_, StackSlot{chain_start});
} else {
- __ Move(kScratchRegT, chain_start);
+ __ Move(scratch_, chain_start);
}
scratch_has_cycle_start_ = true;
return true;
@@ -338,7 +406,7 @@ class ParallelMoveResolver {
for (auto target : targets.registers) {
has_cycle |= ContinueEmitMoveChain(chain_start, target);
}
- for (uint32_t target_slot : targets.stack_slots) {
+ for (int32_t target_slot : targets.stack_slots) {
has_cycle |= ContinueEmitMoveChain(chain_start, target_slot);
}
return has_cycle;
@@ -346,18 +414,23 @@ class ParallelMoveResolver {
void EmitMovesFromSource(RegisterT source_reg, GapMoveTargets&& targets) {
DCHECK(moves_from_register_[source_reg.code()].is_empty());
+ if constexpr (DecompressIfNeeded) {
+ if (targets.needs_decompression == kNeedsDecompression) {
+ __ DecompressTagged(source_reg, source_reg);
+ }
+ }
for (RegisterT target_reg : targets.registers) {
DCHECK(moves_from_register_[target_reg.code()].is_empty());
__ Move(target_reg, source_reg);
}
- for (uint32_t target_slot : targets.stack_slots) {
+ for (int32_t target_slot : targets.stack_slots) {
DCHECK_EQ(moves_from_stack_slot_.find(target_slot),
moves_from_stack_slot_.end());
- EmitStackMove(target_slot, source_reg);
+ __ Move(StackSlot{target_slot}, source_reg);
}
}
- void EmitMovesFromSource(uint32_t source_slot, GapMoveTargets&& targets) {
+ void EmitMovesFromSource(int32_t source_slot, GapMoveTargets&& targets) {
DCHECK_EQ(moves_from_stack_slot_.find(source_slot),
moves_from_stack_slot_.end());
@@ -372,51 +445,35 @@ class ParallelMoveResolver {
// Otherwise, cache the slot value on the scratch register, clobbering it
// if necessary.
if (scratch_has_cycle_start_) {
- Push(kScratchRegT);
+ Push(scratch_);
scratch_has_cycle_start_ = false;
}
- register_with_slot_value = kScratchRegT;
+ register_with_slot_value = scratch_;
}
-
// Now emit moves from that cached register instead of from the stack slot.
DCHECK(register_with_slot_value.is_valid());
DCHECK(moves_from_register_[register_with_slot_value.code()].is_empty());
- EmitStackMove(register_with_slot_value, source_slot);
+ __ Move(register_with_slot_value, StackSlot{source_slot});
+ // Decompress after the first move, subsequent moves reuse this register so
+ // they're guaranteed to be decompressed.
+ if constexpr (DecompressIfNeeded) {
+ if (targets.needs_decompression == kNeedsDecompression) {
+ __ DecompressTagged(register_with_slot_value, register_with_slot_value);
+ targets.needs_decompression = kDoesNotNeedDecompression;
+ }
+ }
EmitMovesFromSource(register_with_slot_value, std::move(targets));
}
- // The slot index used for representing slots in the move graph is the offset
- // from the frame pointer. These helpers help translate this into an actual
- // machine move.
- void EmitStackMove(uint32_t target_slot, Register source_reg) {
- __ movq(MemOperand(rbp, target_slot), source_reg);
- }
- void EmitStackMove(uint32_t target_slot, DoubleRegister source_reg) {
- __ Movsd(MemOperand(rbp, target_slot), source_reg);
- }
- void EmitStackMove(Register target_reg, uint32_t source_slot) {
- __ movq(target_reg, MemOperand(rbp, source_slot));
- }
- void EmitStackMove(DoubleRegister target_reg, uint32_t source_slot) {
- __ Movsd(target_reg, MemOperand(rbp, source_slot));
- }
-
void Push(Register reg) { __ Push(reg); }
void Push(DoubleRegister reg) { __ PushAll({reg}); }
- void Push(uint32_t stack_slot) {
- __ movq(kScratchRegister, MemOperand(rbp, stack_slot));
- __ movq(MemOperand(rsp, -1), kScratchRegister);
- }
void Pop(Register reg) { __ Pop(reg); }
void Pop(DoubleRegister reg) { __ PopAll({reg}); }
- void Pop(uint32_t stack_slot) {
- __ movq(kScratchRegister, MemOperand(rsp, -1));
- __ movq(MemOperand(rbp, stack_slot), kScratchRegister);
- }
- MacroAssembler* masm() const { return masm_; }
+ MaglevAssembler* masm() const { return masm_; }
MaglevAssembler* const masm_;
+ RegisterT scratch_;
// Keep moves to/from registers and stack slots separate -- there are a fixed
// number of registers but an infinite number of stack slots, so the register
@@ -427,15 +484,16 @@ class ParallelMoveResolver {
std::array<GapMoveTargets, RegisterT::kNumRegisters> moves_from_register_ =
{};
+ // TODO(victorgomes): Use MaglevAssembler::StackSlot instead of int32_t.
// moves_from_stack_slot_[source] = target.
- std::unordered_map<uint32_t, GapMoveTargets> moves_from_stack_slot_;
+ std::unordered_map<int32_t, GapMoveTargets> moves_from_stack_slot_;
// materializing_register_moves[target] = node.
std::array<ValueNode*, RegisterT::kNumRegisters>
materializing_register_moves_ = {};
// materializing_stack_slot_moves = {(node,target), ... }.
- std::vector<std::pair<uint32_t, ValueNode*>> materializing_stack_slot_moves_;
+ std::vector<std::pair<int32_t, ValueNode*>> materializing_stack_slot_moves_;
bool scratch_has_cycle_start_ = false;
};
@@ -486,35 +544,46 @@ class ExceptionHandlerTrampolineBuilder {
// values are tagged and b) the stack walk treats unknown stack slots as
// tagged.
- const InterpretedDeoptFrame& lazy_frame =
- deopt_info->top_frame().type() ==
- DeoptFrame::FrameType::kBuiltinContinuationFrame
- ? deopt_info->top_frame().parent()->as_interpreted()
- : deopt_info->top_frame().as_interpreted();
+ // TODO(victorgomes): Update this once we support exceptions in inlined
+ // functions. Currently, only the bottom frame can contain a catch block.
+ const DeoptFrame* bottom_frame = &deopt_info->top_frame();
+ while (bottom_frame->parent() != nullptr) {
+ bottom_frame = bottom_frame->parent();
+ }
+ const InterpretedDeoptFrame& lazy_frame = bottom_frame->as_interpreted();
// TODO(v8:7700): Handle inlining.
-
- ParallelMoveResolver<Register> direct_moves(masm_);
+ ParallelMoveResolver<Register, true> direct_moves(masm_);
MoveVector materialising_moves;
bool save_accumulator = false;
RecordMoves(lazy_frame.unit(), catch_block, lazy_frame.frame_state(),
&direct_moves, &materialising_moves, &save_accumulator);
-
- __ bind(&handler_info->trampoline_entry);
+ __ BindJumpTarget(&handler_info->trampoline_entry);
__ RecordComment("-- Exception handler trampoline START");
EmitMaterialisationsAndPushResults(materialising_moves, save_accumulator);
+
__ RecordComment("EmitMoves");
- direct_moves.EmitMoves();
- EmitPopMaterialisedResults(materialising_moves, save_accumulator);
- __ jmp(catch_block->label());
+// TODO(victorgomes): Add a scratch register scope to MaglevAssembler and
+// remove this arch depedent code.
+#ifdef V8_TARGET_ARCH_ARM64
+ UseScratchRegisterScope temps(masm_);
+ Register scratch = temps.AcquireX();
+#elif V8_TARGET_ARCH_X64
+ Register scratch = kScratchRegister;
+#else
+#error "Maglev does not supported this architecture."
+#endif
+ direct_moves.EmitMoves(scratch);
+ EmitPopMaterialisedResults(materialising_moves, save_accumulator, scratch);
+ __ Jump(catch_block->label());
__ RecordComment("-- Exception handler trampoline END");
}
- MacroAssembler* masm() const { return masm_; }
+ MaglevAssembler* masm() const { return masm_; }
void RecordMoves(const MaglevCompilationUnit& unit, BasicBlock* catch_block,
const CompactInterpreterFrameState* register_frame,
- ParallelMoveResolver<Register>* direct_moves,
+ ParallelMoveResolver<Register, true>* direct_moves,
MoveVector* materialising_moves, bool* save_accumulator) {
for (Phi* phi : *catch_block->phis()) {
DCHECK(phi->is_exception_phi());
@@ -540,22 +609,18 @@ class ExceptionHandlerTrampolineBuilder {
DCHECK(!source->allocation().IsRegister());
switch (source->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
case ValueRepresentation::kTagged:
direct_moves->RecordMove(
source, source->allocation(),
- compiler::AllocatedOperand::cast(target.operand()));
+ compiler::AllocatedOperand::cast(target.operand()),
+ phi->decompresses_tagged_result() ? kNeedsDecompression
+ : kDoesNotNeedDecompression);
break;
case ValueRepresentation::kInt32:
- if (source->allocation().IsConstant()) {
- // TODO(jgruber): Why is it okay for Int32 constants to remain
- // untagged while non-constants are unconditionally smi-tagged or
- // converted to a HeapNumber during materialisation?
- direct_moves->RecordMove(
- source, source->allocation(),
- compiler::AllocatedOperand::cast(target.operand()));
- } else {
- materialising_moves->emplace_back(target, source);
- }
+ case ValueRepresentation::kUint32:
+ materialising_moves->emplace_back(target, source);
break;
case ValueRepresentation::kFloat64:
materialising_moves->emplace_back(target, source);
@@ -582,73 +647,37 @@ class ExceptionHandlerTrampolineBuilder {
// talking about a presumably infrequent case for exception handlers.
__ RecordComment("EmitMaterialisationsAndPushResults");
+
if (save_accumulator) __ Push(kReturnRegister0);
for (const Move& move : moves) {
- MaterialiseTo(move.source, kReturnRegister0);
+ // We consider constants after all other operations, since constants
+ // don't need to call NewHeapNumber.
+ if (IsConstantNode(move.source->opcode())) continue;
+ __ MaterialiseValueNode(kReturnRegister0, move.source);
__ Push(kReturnRegister0);
}
}
void EmitPopMaterialisedResults(const MoveVector& moves,
- bool save_accumulator) const {
+ bool save_accumulator,
+ Register scratch) const {
if (moves.size() == 0) return;
__ RecordComment("EmitPopMaterialisedResults");
- for (auto it = moves.rbegin(); it < moves.rend(); it++) {
- const ValueLocation& target = it->target;
- if (target.operand().IsRegister()) {
- __ Pop(target.AssignedGeneralRegister());
+ for (const Move& move : base::Reversed(moves)) {
+ const ValueLocation& target = move.target;
+ Register target_reg = target.operand().IsAnyRegister()
+ ? target.AssignedGeneralRegister()
+ : scratch;
+ if (IsConstantNode(move.source->opcode())) {
+ __ MaterialiseValueNode(target_reg, move.source);
} else {
- DCHECK(target.operand().IsStackSlot());
- __ Pop(kScratchRegister);
- __ movq(masm_->ToMemOperand(target.operand()), kScratchRegister);
+ __ Pop(target_reg);
}
- }
-
- if (save_accumulator) __ Pop(kReturnRegister0);
- }
-
- void MaterialiseTo(ValueNode* value, Register dst) const {
- using D = NewHeapNumberDescriptor;
- switch (value->properties().value_representation()) {
- case ValueRepresentation::kInt32: {
- // We consider Int32Constants together with tagged values.
- DCHECK(!value->allocation().IsConstant());
- Label done;
- __ movq(dst, ToMemOperand(value));
- __ addl(dst, dst);
- __ j(no_overflow, &done);
- // If we overflow, instead of bailing out (deopting), we change
- // representation to a HeapNumber.
- __ Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue),
- ToMemOperand(value));
- __ CallBuiltin(Builtin::kNewHeapNumber);
- __ Move(dst, kReturnRegister0);
- __ bind(&done);
- break;
+ if (target_reg == scratch) {
+ __ Move(masm_->ToMemOperand(target.operand()), scratch);
}
- case ValueRepresentation::kFloat64:
- if (Float64Constant* constant = value->TryCast<Float64Constant>()) {
- __ Move(D::GetDoubleRegisterParameter(D::kValue), constant->value());
- } else {
- __ Movsd(D::GetDoubleRegisterParameter(D::kValue),
- ToMemOperand(value));
- }
- __ CallBuiltin(Builtin::kNewHeapNumber);
- __ Move(dst, kReturnRegister0);
- break;
- case ValueRepresentation::kTagged:
- UNREACHABLE();
}
- }
-
- MemOperand ToMemOperand(ValueNode* node) const {
- DCHECK(node->allocation().IsAnyStackSlot());
- return masm_->ToMemOperand(node->allocation());
- }
-
- MemOperand ToMemOperand(const ValueLocation& location) const {
- DCHECK(location.operand().IsStackSlot());
- return masm_->ToMemOperand(location.operand());
+ if (save_accumulator) __ Pop(kReturnRegister0);
}
MaglevAssembler* const masm_;
@@ -660,155 +689,33 @@ class MaglevCodeGeneratingNodeProcessor {
: masm_(masm) {}
void PreProcessGraph(Graph* graph) {
+ // TODO(victorgomes): I wonder if we want to create a struct that shares
+ // these fields between graph and code_gen_state.
code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
+ code_gen_state()->set_max_deopted_stack_size(
+ graph->max_deopted_stack_size());
+ code_gen_state()->set_max_call_stack_args_(graph->max_call_stack_args());
if (v8_flags.maglev_break_on_entry) {
- __ int3();
+ __ DebugBreak();
}
- if (v8_flags.maglev_ool_prologue) {
- // Call the out-of-line prologue (with parameters passed on the stack).
- __ Push(Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
- __ Push(Immediate(code_gen_state()->tagged_slots() * kSystemPointerSize));
- __ CallBuiltin(Builtin::kMaglevOutOfLinePrologue);
- } else {
- __ BailoutIfDeoptimized(rbx);
-
- // Tiering support.
- // TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
- // per Maglev code object on x64).
- {
- // Scratch registers. Don't clobber regs related to the calling
- // convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
- // with deferred flags code.
- Register flags = rcx;
- Register feedback_vector = r9;
-
- // Load the feedback vector.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- __ AssertFeedbackVector(feedback_vector);
-
- __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
- flags, feedback_vector, CodeKind::MAGLEV,
- &deferred_flags_need_processing_);
- }
-
- __ EnterFrame(StackFrame::MAGLEV);
-
- // Save arguments in frame.
- // TODO(leszeks): Consider eliding this frame if we don't make any calls
- // that could clobber these registers.
- __ Push(kContextRegister);
- __ Push(kJSFunctionRegister); // Callee's JS function.
- __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
-
- {
- ASM_CODE_COMMENT_STRING(masm(), " Stack/interrupt check");
- // Stack check. This folds the checks for both the interrupt stack limit
- // check and the real stack limit into one by just checking for the
- // interrupt limit. The interrupt limit is either equal to the real
- // stack limit or tighter. By ensuring we have space until that limit
- // after building the frame we can quickly precheck both at once.
- __ Move(kScratchRegister, rsp);
- // TODO(leszeks): Include a max call argument size here.
- __ subq(kScratchRegister, Immediate(code_gen_state()->stack_slots() *
- kSystemPointerSize));
- __ cmpq(kScratchRegister,
- __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
-
- __ j(below, &deferred_call_stack_guard_);
- __ bind(&deferred_call_stack_guard_return_);
- }
-
- // Initialize stack slots.
- if (graph->tagged_stack_slots() > 0) {
- ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
- // TODO(leszeks): Consider filling with xmm + movdqa instead.
- __ Move(rax, Immediate(0));
-
- // Magic value. Experimentally, an unroll size of 8 doesn't seem any
- // worse than fully unrolled pushes.
- const int kLoopUnrollSize = 8;
- int tagged_slots = graph->tagged_stack_slots();
- if (tagged_slots < 2 * kLoopUnrollSize) {
- // If the frame is small enough, just unroll the frame fill
- // completely.
- for (int i = 0; i < tagged_slots; ++i) {
- __ pushq(rax);
- }
- } else {
- // Extract the first few slots to round to the unroll size.
- int first_slots = tagged_slots % kLoopUnrollSize;
- for (int i = 0; i < first_slots; ++i) {
- __ pushq(rax);
- }
- __ Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
- // We enter the loop unconditionally, so make sure we need to loop at
- // least once.
- DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
- Label loop;
- __ bind(&loop);
- for (int i = 0; i < kLoopUnrollSize; ++i) {
- __ pushq(rax);
- }
- __ decl(rbx);
- __ j(greater, &loop);
- }
- }
- if (graph->untagged_stack_slots() > 0) {
- // Extend rsp by the size of the remaining untagged part of the frame,
- // no need to initialise these.
- __ subq(rsp,
- Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
- }
- }
+ __ Prologue(graph);
}
- void PostProcessGraph(Graph*) {
- __ int3();
-
- if (!v8_flags.maglev_ool_prologue) {
- __ bind(&deferred_call_stack_guard_);
- {
- ASM_CODE_COMMENT_STRING(masm(), "Stack/interrupt call");
- // Save any registers that can be referenced by RegisterInput.
- // TODO(leszeks): Only push those that are used by the graph.
- __ PushAll(RegisterInput::kAllowedRegisters);
- // Push the frame size
- __ Push(Immediate(Smi::FromInt(code_gen_state()->stack_slots() *
- kSystemPointerSize)));
- __ CallRuntime(Runtime::kStackGuardWithGap, 1);
- __ PopAll(RegisterInput::kAllowedRegisters);
- __ jmp(&deferred_call_stack_guard_return_);
- }
-
- __ bind(&deferred_flags_need_processing_);
- {
- ASM_CODE_COMMENT_STRING(masm(), "Optimized marker check");
- // See PreProcessGraph.
- Register flags = rcx;
- Register feedback_vector = r9;
- // TODO(leszeks): This could definitely be a builtin that we tail-call.
- __ OptimizeCodeOrTailCallOptimizedCodeSlot(
- flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
- __ Trap();
- }
- }
- }
+ void PostProcessGraph(Graph* graph) {}
void PreProcessBasicBlock(BasicBlock* block) {
+ if (block->is_loop()) {
+ __ LoopHeaderAlign();
+ }
if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- Block b" << graph_labeller()->BlockId(block);
__ RecordComment(ss.str());
}
-
- __ bind(block->label());
+ __ BindBlock(block);
}
template <typename NodeT>
@@ -820,13 +727,8 @@ class MaglevCodeGeneratingNodeProcessor {
__ RecordComment(ss.str());
}
- if (v8_flags.debug_code) {
- __ movq(kScratchRegister, rbp);
- __ subq(kScratchRegister, rsp);
- __ cmpq(kScratchRegister,
- Immediate(code_gen_state()->stack_slots() * kSystemPointerSize +
- StandardFrameConstants::kFixedFrameSizeFromFp));
- __ Assert(equal, AbortReason::kStackAccessBelowStackPointer);
+ if (v8_flags.maglev_assert_stack_size) {
+ __ AssertStackSizeCorrect();
}
// Emit Phi moves before visiting the control node.
@@ -835,22 +737,47 @@ class MaglevCodeGeneratingNodeProcessor {
state);
}
+ if (v8_flags.debug_code && !std::is_same_v<NodeT, Phi>) {
+ // Check that all int32/uint32 inputs are zero extended.
+ // Note that we don't do this for Phis, since they are virtual operations
+ // whose inputs aren't actual inputs but are injected on incoming
+ // branches. There's thus nothing to verify for the inputs we see for the
+ // phi.
+ for (Input& input : *node) {
+ ValueRepresentation rep =
+ input.node()->properties().value_representation();
+ if (rep == ValueRepresentation::kInt32 ||
+ rep == ValueRepresentation::kUint32) {
+ // TODO(leszeks): Ideally we'd check non-register inputs too, but
+ // AssertZeroExtended needs the scratch register, so we'd have to do
+ // some manual push/pop here to free up another register.
+ if (input.IsGeneralRegister()) {
+ __ AssertZeroExtended(ToRegister(input));
+ }
+ }
+ }
+ }
+
+ MaglevAssembler::ScratchRegisterScope scratch_scope(masm());
+ scratch_scope.Include(node->general_temporaries());
+ scratch_scope.IncludeDouble(node->double_temporaries());
+
node->GenerateCode(masm(), state);
if (std::is_base_of<ValueNode, NodeT>::value) {
ValueNode* value_node = node->template Cast<ValueNode>();
- if (value_node->is_spilled()) {
+ if (value_node->has_valid_live_range() && value_node->is_spilled()) {
compiler::AllocatedOperand source =
compiler::AllocatedOperand::cast(value_node->result().operand());
// We shouldn't spill nodes which already output to the stack.
if (!source.IsAnyStackSlot()) {
if (v8_flags.code_comments) __ RecordComment("-- Spill:");
if (source.IsRegister()) {
- __ movq(masm()->GetStackSlot(value_node->spill_slot()),
+ __ Move(masm()->GetStackSlot(value_node->spill_slot()),
ToRegister(source));
} else {
- __ Movsd(masm()->GetStackSlot(value_node->spill_slot()),
- ToDoubleRegister(source));
+ __ Move(masm()->GetStackSlot(value_node->spill_slot()),
+ ToDoubleRegister(source));
}
} else {
// Otherwise, the result source stack slot should be equal to the
@@ -871,14 +798,28 @@ class MaglevCodeGeneratingNodeProcessor {
int predecessor_id = state.block()->predecessor_id();
+// TODO(victorgomes): Add a scratch register scope to MaglevAssembler and
+// remove this arch depedent code.
+#ifdef V8_TARGET_ARCH_ARM64
+ UseScratchRegisterScope temps(masm_);
+ Register scratch = temps.AcquireX();
+ DoubleRegister double_scratch = temps.AcquireD();
+#elif V8_TARGET_ARCH_X64
+ Register scratch = kScratchRegister;
+ DoubleRegister double_scratch = kScratchDoubleReg;
+#else
+#error "Maglev does not supported this architecture."
+#endif
+
// TODO(leszeks): Move these to fields, to allow their data structure
// allocations to be reused. Will need some sort of state resetting.
- ParallelMoveResolver<Register> register_moves(masm_);
- ParallelMoveResolver<DoubleRegister> double_register_moves(masm_);
+ ParallelMoveResolver<Register, false> register_moves(masm_);
+ ParallelMoveResolver<DoubleRegister, false> double_register_moves(masm_);
// Remember what registers were assigned to by a Phi, to avoid clobbering
// them with RegisterMoves.
RegList registers_set_by_phis;
+ DoubleRegList double_registers_set_by_phis;
__ RecordComment("-- Gap moves:");
@@ -910,9 +851,19 @@ class MaglevCodeGeneratingNodeProcessor {
<< graph_labeller()->NodeId(phi) << ")";
__ RecordComment(ss.str());
}
- register_moves.RecordMove(node, source, target);
+ if (phi->value_representation() == ValueRepresentation::kFloat64) {
+ DCHECK(!phi->decompresses_tagged_result());
+ double_register_moves.RecordMove(node, source, target, false);
+ } else {
+ register_moves.RecordMove(node, source, target,
+ kDoesNotNeedDecompression);
+ }
if (target.IsAnyRegister()) {
- registers_set_by_phis.set(target.GetRegister());
+ if (phi->value_representation() == ValueRepresentation::kFloat64) {
+ double_registers_set_by_phis.set(target.GetDoubleRegister());
+ } else {
+ registers_set_by_phis.set(target.GetRegister());
+ }
}
}
}
@@ -932,16 +883,20 @@ class MaglevCodeGeneratingNodeProcessor {
ss << "-- * " << source << " → " << reg;
__ RecordComment(ss.str());
}
- register_moves.RecordMove(node, source, reg);
+ register_moves.RecordMove(node, source, reg,
+ kDoesNotNeedDecompression);
}
});
- register_moves.EmitMoves();
+ register_moves.EmitMoves(scratch);
__ RecordComment("-- Double gap moves:");
target->state()->register_state().ForEachDoubleRegister(
[&](DoubleRegister reg, RegisterState& state) {
+ // Don't clobber registers set by a Phi.
+ if (double_registers_set_by_phis.has(reg)) return;
+
ValueNode* node;
RegisterMerge* merge;
if (LoadMergeState(state, &node, &merge)) {
@@ -952,11 +907,12 @@ class MaglevCodeGeneratingNodeProcessor {
ss << "-- * " << source << " → " << reg;
__ RecordComment(ss.str());
}
- double_register_moves.RecordMove(node, source, reg);
+ double_register_moves.RecordMove(node, source, reg,
+ kDoesNotNeedDecompression);
}
});
- double_register_moves.EmitMoves();
+ double_register_moves.EmitMoves(double_scratch);
}
Isolate* isolate() const { return masm_->isolate(); }
@@ -970,9 +926,6 @@ class MaglevCodeGeneratingNodeProcessor {
private:
MaglevAssembler* const masm_;
- Label deferred_call_stack_guard_;
- Label deferred_call_stack_guard_return_;
- Label deferred_flags_need_processing_;
};
class SafepointingNodeProcessor {
@@ -992,18 +945,31 @@ class SafepointingNodeProcessor {
};
namespace {
-int GetFrameCount(const DeoptFrame& deopt_frame) {
- switch (deopt_frame.type()) {
- case DeoptFrame::FrameType::kInterpretedFrame:
- return 1 + deopt_frame.as_interpreted().unit().inlining_depth();
- case DeoptFrame::FrameType::kBuiltinContinuationFrame:
- return 1 + GetFrameCount(*deopt_frame.parent());
+struct FrameCount {
+ int total;
+ int js_frame;
+};
+
+FrameCount GetFrameCount(const DeoptFrame* deopt_frame) {
+ int total = 1;
+ int js_frame = 1;
+ while (deopt_frame->parent()) {
+ deopt_frame = deopt_frame->parent();
+ if (deopt_frame->type() != DeoptFrame::FrameType::kInlinedArgumentsFrame) {
+ js_frame++;
+ }
+ total++;
}
+ return FrameCount{total, js_frame};
}
+
BytecodeOffset GetBytecodeOffset(const DeoptFrame& deopt_frame) {
switch (deopt_frame.type()) {
case DeoptFrame::FrameType::kInterpretedFrame:
return deopt_frame.as_interpreted().bytecode_position();
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame:
+ DCHECK_NOT_NULL(deopt_frame.parent());
+ return GetBytecodeOffset(*deopt_frame.parent());
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
return Builtins::GetContinuationBytecodeOffset(
deopt_frame.as_builtin_continuation().builtin_id());
@@ -1013,10 +979,24 @@ SourcePosition GetSourcePosition(const DeoptFrame& deopt_frame) {
switch (deopt_frame.type()) {
case DeoptFrame::FrameType::kInterpretedFrame:
return deopt_frame.as_interpreted().source_position();
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame:
+ DCHECK_NOT_NULL(deopt_frame.parent());
+ return GetSourcePosition(*deopt_frame.parent());
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
return SourcePosition::Unknown();
}
}
+compiler::SharedFunctionInfoRef GetSharedFunctionInfo(
+ const DeoptFrame& deopt_frame) {
+ switch (deopt_frame.type()) {
+ case DeoptFrame::FrameType::kInterpretedFrame:
+ return deopt_frame.as_interpreted().unit().shared_function_info();
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame:
+ return deopt_frame.as_inlined_arguments().unit().shared_function_info();
+ case DeoptFrame::FrameType::kBuiltinContinuationFrame:
+ return GetSharedFunctionInfo(*deopt_frame.parent());
+ }
+}
} // namespace
class MaglevTranslationArrayBuilder {
@@ -1031,24 +1011,32 @@ class MaglevTranslationArrayBuilder {
deopt_literals_(deopt_literals) {}
void BuildEagerDeopt(EagerDeoptInfo* deopt_info) {
- int frame_count = GetFrameCount(deopt_info->top_frame());
- int jsframe_count = frame_count;
- int update_feedback_count = 0;
+ auto [frame_count, jsframe_count] = GetFrameCount(&deopt_info->top_frame());
deopt_info->set_translation_index(
- translation_array_builder_->BeginTranslation(frame_count, jsframe_count,
- update_feedback_count));
+ translation_array_builder_->BeginTranslation(
+ frame_count, jsframe_count,
+ deopt_info->feedback_to_update().IsValid()));
+ if (deopt_info->feedback_to_update().IsValid()) {
+ translation_array_builder_->AddUpdateFeedback(
+ GetDeoptLiteral(*deopt_info->feedback_to_update().vector),
+ deopt_info->feedback_to_update().index());
+ }
const InputLocation* current_input_location = deopt_info->input_locations();
BuildDeoptFrame(deopt_info->top_frame(), current_input_location);
}
void BuildLazyDeopt(LazyDeoptInfo* deopt_info) {
- int frame_count = GetFrameCount(deopt_info->top_frame());
- int jsframe_count = frame_count;
- int update_feedback_count = 0;
+ auto [frame_count, jsframe_count] = GetFrameCount(&deopt_info->top_frame());
deopt_info->set_translation_index(
- translation_array_builder_->BeginTranslation(frame_count, jsframe_count,
- update_feedback_count));
+ translation_array_builder_->BeginTranslation(
+ frame_count, jsframe_count,
+ deopt_info->feedback_to_update().IsValid()));
+ if (deopt_info->feedback_to_update().IsValid()) {
+ translation_array_builder_->AddUpdateFeedback(
+ GetDeoptLiteral(*deopt_info->feedback_to_update().vector),
+ deopt_info->feedback_to_update().index());
+ }
const InputLocation* current_input_location = deopt_info->input_locations();
@@ -1090,8 +1078,7 @@ class MaglevTranslationArrayBuilder {
}
translation_array_builder_->BeginInterpretedFrame(
interpreted_frame.bytecode_position(),
- GetDeoptLiteral(
- *interpreted_frame.unit().shared_function_info().object()),
+ GetDeoptLiteral(GetSharedFunctionInfo(interpreted_frame)),
interpreted_frame.unit().register_count(), return_offset,
deopt_info->result_size());
@@ -1101,6 +1088,9 @@ class MaglevTranslationArrayBuilder {
deopt_info->result_size());
break;
}
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame:
+ // The inlined arguments frame can never be the top frame.
+ UNREACHABLE();
case DeoptFrame::FrameType::kBuiltinContinuationFrame: {
const BuiltinContinuationDeoptFrame& builtin_continuation_frame =
top_frame.as_builtin_continuation();
@@ -1108,11 +1098,7 @@ class MaglevTranslationArrayBuilder {
translation_array_builder_->BeginBuiltinContinuationFrame(
Builtins::GetContinuationBytecodeOffset(
builtin_continuation_frame.builtin_id()),
- GetDeoptLiteral(*builtin_continuation_frame.parent()
- ->as_interpreted()
- .unit()
- .shared_function_info()
- .object()),
+ GetDeoptLiteral(GetSharedFunctionInfo(builtin_continuation_frame)),
builtin_continuation_frame.parameters().length());
// Closure
@@ -1168,8 +1154,7 @@ class MaglevTranslationArrayBuilder {
const int return_count = 0;
translation_array_builder_->BeginInterpretedFrame(
interpreted_frame.bytecode_position(),
- GetDeoptLiteral(
- *interpreted_frame.unit().shared_function_info().object()),
+ GetDeoptLiteral(GetSharedFunctionInfo(interpreted_frame)),
interpreted_frame.unit().register_count(), return_offset,
return_count);
@@ -1179,6 +1164,28 @@ class MaglevTranslationArrayBuilder {
return_count);
break;
}
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame: {
+ const InlinedArgumentsDeoptFrame& inlined_arguments_frame =
+ frame.as_inlined_arguments();
+
+ translation_array_builder_->BeginInlinedExtraArguments(
+ GetDeoptLiteral(GetSharedFunctionInfo(inlined_arguments_frame)),
+ static_cast<uint32_t>(inlined_arguments_frame.arguments().size()));
+
+ // Closure
+ translation_array_builder_->StoreLiteral(
+ GetDeoptLiteral(inlined_arguments_frame.unit().function()));
+
+ // Arguments
+ // TODO(victorgomes): Technically we don't need all arguments, only the
+ // extra ones. But doing this at the moment, since it matches the
+ // TurboFan behaviour.
+ for (ValueNode* value : inlined_arguments_frame.arguments()) {
+ BuildDeoptFrameSingleValue(value, *current_input_location);
+ current_input_location++;
+ }
+ break;
+ }
case DeoptFrame::FrameType::kBuiltinContinuationFrame: {
const BuiltinContinuationDeoptFrame& builtin_continuation_frame =
frame.as_builtin_continuation();
@@ -1186,11 +1193,7 @@ class MaglevTranslationArrayBuilder {
translation_array_builder_->BeginBuiltinContinuationFrame(
Builtins::GetContinuationBytecodeOffset(
builtin_continuation_frame.builtin_id()),
- GetDeoptLiteral(*builtin_continuation_frame.parent()
- ->as_interpreted()
- .unit()
- .shared_function_info()
- .object()),
+ GetDeoptLiteral(GetSharedFunctionInfo(builtin_continuation_frame)),
builtin_continuation_frame.parameters().length());
// Closure
@@ -1215,12 +1218,17 @@ class MaglevTranslationArrayBuilder {
void BuildDeoptStoreRegister(const compiler::AllocatedOperand& operand,
ValueRepresentation repr) {
switch (repr) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
case ValueRepresentation::kTagged:
translation_array_builder_->StoreRegister(operand.GetRegister());
break;
case ValueRepresentation::kInt32:
translation_array_builder_->StoreInt32Register(operand.GetRegister());
break;
+ case ValueRepresentation::kUint32:
+ translation_array_builder_->StoreUint32Register(operand.GetRegister());
+ break;
case ValueRepresentation::kFloat64:
translation_array_builder_->StoreDoubleRegister(
operand.GetDoubleRegister());
@@ -1232,12 +1240,17 @@ class MaglevTranslationArrayBuilder {
ValueRepresentation repr) {
int stack_slot = DeoptStackSlotFromStackSlot(operand);
switch (repr) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
case ValueRepresentation::kTagged:
translation_array_builder_->StoreStackSlot(stack_slot);
break;
case ValueRepresentation::kInt32:
translation_array_builder_->StoreInt32StackSlot(stack_slot);
break;
+ case ValueRepresentation::kUint32:
+ translation_array_builder_->StoreUint32StackSlot(stack_slot);
+ break;
case ValueRepresentation::kFloat64:
translation_array_builder_->StoreDoubleStackSlot(stack_slot);
break;
@@ -1273,7 +1286,7 @@ class MaglevTranslationArrayBuilder {
translation_array_builder_->StoreStackSlot(closure_index);
} else {
translation_array_builder_->StoreLiteral(
- GetDeoptLiteral(*compilation_unit.function().object()));
+ GetDeoptLiteral(compilation_unit.function()));
}
// TODO(leszeks): The input locations array happens to be in the same order
@@ -1346,6 +1359,10 @@ class MaglevTranslationArrayBuilder {
return *res.entry;
}
+ int GetDeoptLiteral(compiler::HeapObjectRef ref) {
+ return GetDeoptLiteral(*ref.object());
+ }
+
LocalIsolate* local_isolate_;
MaglevAssembler* masm_;
TranslationArrayBuilder* translation_array_builder_;
@@ -1381,10 +1398,29 @@ void MaglevCodeGenerator::EmitCode() {
MaglevCodeGeneratingNodeProcessor>>
processor(SafepointingNodeProcessor{local_isolate_},
MaglevCodeGeneratingNodeProcessor{masm()});
+ RecordInlinedFunctions();
processor.ProcessGraph(graph_);
EmitDeferredCode();
EmitDeopts();
+ if (code_gen_failed_) return;
EmitExceptionHandlerTrampolines();
+ __ FinishCode();
+}
+
+void MaglevCodeGenerator::RecordInlinedFunctions() {
+ // The inlined functions should be the first literals.
+ DCHECK_EQ(0u, deopt_literals_.size());
+ for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
+ graph_->inlined_functions()) {
+ IdentityMapFindResult<int> res =
+ deopt_literals_.FindOrInsert(inlined.shared_info);
+ if (!res.already_exists) {
+ DCHECK_EQ(0, *res.entry);
+ *res.entry = deopt_literals_.size() - 1;
+ }
+ inlined.RegisterInlinedFunctionId(*res.entry);
+ }
+ inlined_function_count_ = static_cast<int>(deopt_literals_.size());
}
void MaglevCodeGenerator::EmitDeferredCode() {
@@ -1401,9 +1437,28 @@ void MaglevCodeGenerator::EmitDeferredCode() {
}
void MaglevCodeGenerator::EmitDeopts() {
+ const size_t num_deopts = code_gen_state_.eager_deopts().size() +
+ code_gen_state_.lazy_deopts().size();
+ if (num_deopts > Deoptimizer::kMaxNumberOfEntries) {
+ code_gen_failed_ = true;
+ return;
+ }
+
MaglevTranslationArrayBuilder translation_builder(
local_isolate_, &masm_, &translation_array_builder_, &deopt_literals_);
+ // Deoptimization exits must be as small as possible, since their count grows
+ // with function size. These labels are an optimization which extracts the
+ // (potentially large) instruction sequence for the final jump to the
+ // deoptimization entry into a single spot per InstructionStream object. All
+ // deopt exits can then near-call to this label. Note: not used on all
+ // architectures.
+ Label eager_deopt_entry;
+ Label lazy_deopt_entry;
+ __ MaybeEmitDeoptBuiltinsCall(
+ code_gen_state_.eager_deopts().size(), &eager_deopt_entry,
+ code_gen_state_.lazy_deopts().size(), &lazy_deopt_entry);
+
deopt_exit_start_offset_ = __ pc_offset();
int deopt_index = 0;
@@ -1413,15 +1468,23 @@ void MaglevCodeGenerator::EmitDeopts() {
local_isolate_->heap()->Safepoint();
translation_builder.BuildEagerDeopt(deopt_info);
- if (masm_.compilation_info()->collect_source_positions()) {
+ if (masm_.compilation_info()->collect_source_positions() ||
+ IsDeoptimizationWithoutCodeInvalidation(deopt_info->reason())) {
+ // Note: Maglev uses the deopt_reason to tell the deoptimizer not to
+ // discard optimized code on deopt during ML-TF OSR. This is why we
+ // unconditionally emit the deopt_reason when
+ // IsDeoptimizationWithoutCodeInvalidation is true.
__ RecordDeoptReason(deopt_info->reason(), 0,
GetSourcePosition(deopt_info->top_frame()),
deopt_index);
}
__ bind(deopt_info->deopt_entry_label());
+
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, deopt_index,
deopt_info->deopt_entry_label(),
- DeoptimizeKind::kEager, nullptr, nullptr);
+ DeoptimizeKind::kEager, nullptr,
+ &eager_deopt_entry);
+
deopt_index++;
}
@@ -1436,10 +1499,11 @@ void MaglevCodeGenerator::EmitDeopts() {
GetSourcePosition(deopt_info->top_frame()),
deopt_index);
}
- __ bind(deopt_info->deopt_entry_label());
+ __ BindExceptionHandler(deopt_info->deopt_entry_label());
+
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, deopt_index,
deopt_info->deopt_entry_label(),
- DeoptimizeKind::kLazy, nullptr, nullptr);
+ DeoptimizeKind::kLazy, nullptr, &lazy_deopt_entry);
last_updated_safepoint = safepoint_table_builder_.UpdateDeoptimizationInfo(
deopt_info->deopting_call_return_pc(),
@@ -1459,7 +1523,7 @@ void MaglevCodeGenerator::EmitExceptionHandlerTrampolines() {
void MaglevCodeGenerator::EmitMetadata() {
// Final alignment before starting on the metadata section.
- masm()->Align(Code::kMetadataAlignment);
+ masm()->Align(InstructionStream::kMetadataAlignment);
safepoint_table_builder_.Emit(masm());
@@ -1473,6 +1537,8 @@ void MaglevCodeGenerator::EmitMetadata() {
}
MaybeHandle<Code> MaglevCodeGenerator::BuildCodeObject(Isolate* isolate) {
+ if (code_gen_failed_) return {};
+
CodeDesc desc;
masm()->GetCode(isolate, &desc, &safepoint_table_builder_,
handler_table_offset_);
@@ -1501,10 +1567,8 @@ Handle<DeoptimizationData> MaglevCodeGenerator::GenerateDeoptimizationData(
auto raw_data = *data;
raw_data.SetTranslationByteArray(*translation_array);
- // TODO(leszeks): Fix with the real inlined function count.
- raw_data.SetInlinedFunctionCount(Smi::zero());
- // TODO(leszeks): Support optimization IDs
- raw_data.SetOptimizationId(Smi::zero());
+ raw_data.SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ raw_data.SetOptimizationId(Smi::FromInt(isolate->NextOptimizationId()));
DCHECK_NE(deopt_exit_start_offset_, -1);
raw_data.SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
@@ -1520,9 +1584,14 @@ Handle<DeoptimizationData> MaglevCodeGenerator::GenerateDeoptimizationData(
Handle<DeoptimizationLiteralArray> literals =
isolate->factory()->NewDeoptimizationLiteralArray(deopt_literals_.size() +
1);
- // TODO(leszeks): Fix with the real inlining positions.
+ int inlined_functions_size =
+ static_cast<int>(graph_->inlined_functions().size());
Handle<PodArray<InliningPosition>> inlining_positions =
- PodArray<InliningPosition>::New(isolate, 0);
+ PodArray<InliningPosition>::New(isolate, inlined_functions_size);
+ for (int i = 0; i < inlined_functions_size; ++i) {
+ inlining_positions->set(i, graph_->inlined_functions()[i].position);
+ }
+
DisallowGarbageCollection no_gc;
auto raw_literals = *literals;
@@ -1539,8 +1608,6 @@ Handle<DeoptimizationData> MaglevCodeGenerator::GenerateDeoptimizationData(
->bytecode()
.object());
raw_data.SetLiteralArray(raw_literals);
-
- // TODO(leszeks): Fix with the real inlining positions.
raw_data.SetInliningPositions(*inlining_positions);
// TODO(leszeks): Fix once we have OSR.
diff --git a/deps/v8/src/maglev/maglev-code-generator.h b/deps/v8/src/maglev/maglev-code-generator.h
index 719e2d1154..53b1f5279e 100644
--- a/deps/v8/src/maglev/maglev-code-generator.h
+++ b/deps/v8/src/maglev/maglev-code-generator.h
@@ -34,6 +34,7 @@ class MaglevCodeGenerator final {
void EmitDeopts();
void EmitExceptionHandlerTrampolines();
void EmitMetadata();
+ void RecordInlinedFunctions();
MaybeHandle<Code> BuildCodeObject(Isolate* isolate);
Handle<DeoptimizationData> GenerateDeoptimizationData(Isolate* isolate);
@@ -55,6 +56,9 @@ class MaglevCodeGenerator final {
IdentityMap<int, base::DefaultAllocationPolicy> deopt_literals_;
int deopt_exit_start_offset_ = -1;
int handler_table_offset_ = 0;
+ int inlined_function_count_ = 0;
+
+ bool code_gen_failed_ = false;
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-compilation-info.cc b/deps/v8/src/maglev/maglev-compilation-info.cc
index 19a3b67314..6c858a5715 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.cc
+++ b/deps/v8/src/maglev/maglev-compilation-info.cc
@@ -62,6 +62,7 @@ MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
function->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map()) {
DCHECK(v8_flags.maglev);
+ compiler::CurrentHeapBrokerScope current_broker(broker_.get());
collect_source_positions_ = isolate->NeedsDetailedOptimizedCodeLineInfo();
if (collect_source_positions_) {
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.cc b/deps/v8/src/maglev/maglev-compilation-unit.cc
index d84588509a..29da7bc49f 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.cc
+++ b/deps/v8/src/maglev/maglev-compilation-unit.cc
@@ -26,10 +26,9 @@ MaglevCompilationUnit::MaglevCompilationUnit(
: info_(info),
caller_(caller),
function_(function),
- shared_function_info_(function_.shared()),
- bytecode_(shared_function_info_.GetBytecodeArray()),
- feedback_(
- function_.feedback_vector(info_->broker()->dependencies()).value()),
+ shared_function_info_(function_.shared(broker())),
+ bytecode_(shared_function_info_.GetBytecodeArray(broker())),
+ feedback_(function_.feedback_vector(info_->broker()).value()),
register_count_(bytecode_.register_count()),
parameter_count_(bytecode_.parameter_count()),
inlining_depth_(caller == nullptr ? 0 : caller->inlining_depth_ + 1) {}
diff --git a/deps/v8/src/maglev/maglev-compiler.cc b/deps/v8/src/maglev/maglev-compiler.cc
index 2e78016581..186e6ec3d5 100644
--- a/deps/v8/src/maglev/maglev-compiler.cc
+++ b/deps/v8/src/maglev/maglev-compiler.cc
@@ -13,6 +13,7 @@
#include "src/base/threaded-list.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
+#include "src/codegen/register-configuration.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
@@ -37,8 +38,9 @@
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir-inl.h"
#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-phi-representation-selector.h"
+#include "src/maglev/maglev-regalloc-data.h"
#include "src/maglev/maglev-regalloc.h"
-#include "src/maglev/maglev-vreg-allocator.h"
#include "src/objects/code-inl.h"
#include "src/objects/js-function.h"
#include "src/utils/identity-map.h"
@@ -48,12 +50,116 @@ namespace v8 {
namespace internal {
namespace maglev {
+class ValueLocationConstraintProcessor {
+ public:
+ void PreProcessGraph(Graph* graph) {}
+ void PostProcessGraph(Graph* graph) {}
+ void PreProcessBasicBlock(BasicBlock* block) {}
+
+#define DEF_PROCESS_NODE(NAME) \
+ void Process(NAME* node, const ProcessingState& state) { \
+ node->SetValueLocationConstraints(); \
+ }
+ NODE_BASE_LIST(DEF_PROCESS_NODE)
+#undef DEF_PROCESS_NODE
+};
+
+class DecompressedUseMarkingProcessor {
+ public:
+ void PreProcessGraph(Graph* graph) {}
+ void PostProcessGraph(Graph* graph) {}
+ void PreProcessBasicBlock(BasicBlock* block) {}
+
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ node->MarkTaggedInputsAsDecompressing();
+ }
+};
+
+class MaxCallDepthProcessor {
+ public:
+ void PreProcessGraph(Graph* graph) {}
+ void PostProcessGraph(Graph* graph) {
+ graph->set_max_call_stack_args(max_call_stack_args_);
+ graph->set_max_deopted_stack_size(max_deopted_stack_size_);
+ }
+ void PreProcessBasicBlock(BasicBlock* block) {}
+
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ if constexpr (NodeT::kProperties.is_call() ||
+ NodeT::kProperties.needs_register_snapshot()) {
+ int node_stack_args = node->MaxCallStackArgs();
+ if constexpr (NodeT::kProperties.needs_register_snapshot()) {
+ // Pessimistically assume that we'll push all registers in deferred
+ // calls.
+ node_stack_args +=
+ kAllocatableGeneralRegisterCount + kAllocatableDoubleRegisterCount;
+ }
+ max_call_stack_args_ = std::max(max_call_stack_args_, node_stack_args);
+ }
+ if constexpr (NodeT::kProperties.can_eager_deopt()) {
+ UpdateMaxDeoptedStackSize(node->eager_deopt_info());
+ }
+ if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ UpdateMaxDeoptedStackSize(node->lazy_deopt_info());
+ }
+ }
+
+ private:
+ void UpdateMaxDeoptedStackSize(DeoptInfo* deopt_info) {
+ const DeoptFrame* deopt_frame = &deopt_info->top_frame();
+ if (deopt_frame->type() == DeoptFrame::FrameType::kInterpretedFrame) {
+ if (&deopt_frame->as_interpreted().unit() == last_seen_unit_) return;
+ last_seen_unit_ = &deopt_frame->as_interpreted().unit();
+ }
+
+ int frame_size = 0;
+ do {
+ frame_size += ConservativeFrameSize(deopt_frame);
+ deopt_frame = deopt_frame->parent();
+ } while (deopt_frame != nullptr);
+ max_deopted_stack_size_ = std::max(frame_size, max_deopted_stack_size_);
+ }
+ int ConservativeFrameSize(const DeoptFrame* deopt_frame) {
+ switch (deopt_frame->type()) {
+ case DeoptFrame::FrameType::kInterpretedFrame: {
+ auto info = UnoptimizedFrameInfo::Conservative(
+ deopt_frame->as_interpreted().unit().parameter_count(),
+ deopt_frame->as_interpreted().unit().register_count());
+ return info.frame_size_in_bytes();
+ }
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame: {
+ return static_cast<int>(
+ deopt_frame->as_inlined_arguments().arguments().size() -
+ deopt_frame->as_inlined_arguments().unit().parameter_count());
+ }
+ case DeoptFrame::FrameType::kBuiltinContinuationFrame: {
+ // PC + FP + Closure + Params + Context
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ auto info = BuiltinContinuationFrameInfo::Conservative(
+ deopt_frame->as_builtin_continuation().parameters().length(),
+ Builtins::CallInterfaceDescriptorFor(
+ deopt_frame->as_builtin_continuation().builtin_id()),
+ config);
+ return info.frame_size_in_bytes();
+ }
+ }
+ }
+
+ int max_call_stack_args_ = 0;
+ int max_deopted_stack_size_ = 0;
+ // Optimize UpdateMaxDeoptedStackSize to not re-calculate if it sees the same
+ // compilation unit multiple times in a row.
+ const MaglevCompilationUnit* last_seen_unit_ = nullptr;
+};
+
class UseMarkingProcessor {
public:
explicit UseMarkingProcessor(MaglevCompilationInfo* compilation_info)
: compilation_info_(compilation_info) {}
- void PreProcessGraph(Graph* graph) { next_node_id_ = kFirstValidNodeId; }
+ void PreProcessGraph(Graph* graph) {}
void PostProcessGraph(Graph* graph) { DCHECK(loop_used_nodes_.empty()); }
void PreProcessBasicBlock(BasicBlock* block) {
if (!block->has_state()) return;
@@ -74,13 +180,16 @@ class UseMarkingProcessor {
template <typename NodeT>
void MarkInputUses(NodeT* node, const ProcessingState& state) {
LoopUsedNodes* loop_used_nodes = GetCurrentLoopUsedNodes();
+ // Mark input uses in the same order as inputs are assigned in the register
+ // allocator (see StraightForwardRegisterAllocator::AssignInputs).
+ node->ForAllInputsInRegallocAssignmentOrder(
+ [&](NodeBase::InputAllocationPolicy, Input* input) {
+ MarkUse(input->node(), node->id(), input, loop_used_nodes);
+ });
if constexpr (NodeT::kProperties.can_eager_deopt()) {
MarkCheckpointNodes(node, node->eager_deopt_info(), loop_used_nodes,
state);
}
- for (Input& input : *node) {
- MarkUse(input.node(), node->id(), &input, loop_used_nodes);
- }
if constexpr (NodeT::kProperties.can_lazy_deopt()) {
MarkCheckpointNodes(node, node->lazy_deopt_info(), loop_used_nodes,
state);
@@ -100,27 +209,25 @@ class UseMarkingProcessor {
BasicBlock* target = node->target();
uint32_t use = node->id();
+ DCHECK(!loop_used_nodes_.empty());
+ LoopUsedNodes loop_used_nodes = std::move(loop_used_nodes_.back());
+ loop_used_nodes_.pop_back();
+
+ LoopUsedNodes* outer_loop_used_nodes = GetCurrentLoopUsedNodes();
+
if (target->has_phi()) {
- // Phis are potential users of nodes outside this loop, but only on
- // initial loop entry, not on actual looping, so we don't need to record
- // their other inputs for lifetime extension.
for (Phi* phi : *target->phis()) {
ValueNode* input = phi->input(i).node();
- input->mark_use(use, &phi->input(i));
+ MarkUse(input, use, &phi->input(i), outer_loop_used_nodes);
}
}
- DCHECK(!loop_used_nodes_.empty());
- LoopUsedNodes loop_used_nodes = std::move(loop_used_nodes_.back());
- loop_used_nodes_.pop_back();
-
DCHECK_EQ(loop_used_nodes.header, target);
if (!loop_used_nodes.used_nodes.empty()) {
// Uses of nodes in this loop may need to propagate to an outer loop, so
// that they're lifetime is extended there too.
// TODO(leszeks): We only need to extend the lifetime in one outermost
// loop, allow nodes to be "moved" between lifetime extensions.
- LoopUsedNodes* outer_loop_used_nodes = GetCurrentLoopUsedNodes();
base::Vector<Input> used_node_inputs =
compilation_info_->zone()->NewVector<Input>(
loop_used_nodes.used_nodes.size());
@@ -196,13 +303,14 @@ class UseMarkingProcessor {
}
MaglevCompilationInfo* compilation_info_;
- uint32_t next_node_id_;
+ uint32_t next_node_id_ = kFirstValidNodeId;
std::vector<LoopUsedNodes> loop_used_nodes_;
};
// static
-void MaglevCompiler::Compile(LocalIsolate* local_isolate,
+bool MaglevCompiler::Compile(LocalIsolate* local_isolate,
MaglevCompilationInfo* compilation_info) {
+ compiler::CurrentHeapBrokerScope current_broker(compilation_info->broker());
Graph* graph = Graph::New(compilation_info->zone());
// Build graph.
@@ -236,6 +344,17 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
std::cout << "\nAfter graph buiding" << std::endl;
PrintGraph(std::cout, compilation_info, graph);
}
+
+ if (v8_flags.maglev_untagged_phis) {
+ GraphProcessor<MaglevPhiRepresentationSelector> representation_selector(
+ &graph_builder);
+ representation_selector.ProcessGraph(graph);
+
+ if (v8_flags.print_maglev_graph) {
+ std::cout << "\nAfter Phi untagging" << std::endl;
+ PrintGraph(std::cout, compilation_info, graph);
+ }
+ }
}
#ifdef DEBUG
@@ -246,14 +365,19 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
#endif
{
- GraphMultiProcessor<UseMarkingProcessor, MaglevVregAllocator> processor(
- UseMarkingProcessor{compilation_info});
+ // Preprocessing for register allocation and code gen:
+ // - Collect input/output location constraints
+ // - Find the maximum number of stack arguments passed to calls
+ // - Collect use information, for SSA liveness and next-use distance.
+ GraphMultiProcessor<ValueLocationConstraintProcessor, MaxCallDepthProcessor,
+ UseMarkingProcessor, DecompressedUseMarkingProcessor>
+ processor(UseMarkingProcessor{compilation_info});
processor.ProcessGraph(graph);
}
if (v8_flags.print_maglev_graph) {
UnparkedScope unparked_scope(local_isolate->heap());
- std::cout << "After node processor" << std::endl;
+ std::cout << "After register allocation pre-processing" << std::endl;
PrintGraph(std::cout, compilation_info, graph);
}
@@ -275,11 +399,14 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
// Stash the compiled code_generator on the compilation info.
compilation_info->set_code_generator(std::move(code_generator));
}
+
+ return true;
}
// static
-MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
+MaybeHandle<Code> MaglevCompiler::GenerateCode(
Isolate* isolate, MaglevCompilationInfo* compilation_info) {
+ compiler::CurrentHeapBrokerScope current_broker(compilation_info->broker());
MaglevCodeGenerator* const code_generator =
compilation_info->code_generator();
DCHECK_NOT_NULL(code_generator);
@@ -304,8 +431,7 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
code->Print();
}
- isolate->native_context()->AddOptimizedCode(ToCodeT(*code));
- return ToCodeT(code, isolate);
+ return code;
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-compiler.h b/deps/v8/src/maglev/maglev-compiler.h
index febe387d87..9e9ee1ca1b 100644
--- a/deps/v8/src/maglev/maglev-compiler.h
+++ b/deps/v8/src/maglev/maglev-compiler.h
@@ -24,12 +24,12 @@ class Graph;
class MaglevCompiler : public AllStatic {
public:
// May be called from any thread.
- static void Compile(LocalIsolate* local_isolate,
+ static bool Compile(LocalIsolate* local_isolate,
MaglevCompilationInfo* compilation_info);
// Called on the main thread after Compile has completed.
// TODO(v8:7700): Move this to a different class?
- static MaybeHandle<CodeT> GenerateCode(
+ static MaybeHandle<Code> GenerateCode(
Isolate* isolate, MaglevCompilationInfo* compilation_info);
};
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
index ef23940ddc..af6ebf8f23 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
@@ -106,17 +106,19 @@ CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
RuntimeCallStats* stats, LocalIsolate* local_isolate) {
LocalIsolateScope scope{info(), local_isolate};
- maglev::MaglevCompiler::Compile(local_isolate, info());
+ if (!maglev::MaglevCompiler::Compile(local_isolate, info())) {
+ return CompilationJob::FAILED;
+ }
// TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
- Handle<CodeT> codet;
- if (!maglev::MaglevCompiler::GenerateCode(isolate, info()).ToHandle(&codet)) {
+ Handle<Code> code;
+ if (!maglev::MaglevCompiler::GenerateCode(isolate, info()).ToHandle(&code)) {
return CompilationJob::FAILED;
}
- info()->toplevel_compilation_unit()->function().object()->set_code(*codet);
+ info()->toplevel_compilation_unit()->function().object()->set_code(*code);
return CompilationJob::SUCCEEDED;
}
@@ -128,6 +130,23 @@ bool MaglevCompilationJob::specialize_to_function_context() const {
return info_->specialize_to_function_context();
}
+void MaglevCompilationJob::RecordCompilationStats(Isolate* isolate) const {
+ // Don't record samples from machines without high-resolution timers,
+ // as that can cause serious reporting issues. See the thread at
+ // http://g/chrome-metrics-team/NwwJEyL8odU/discussion for more details.
+ if (base::TimeTicks::IsHighResolution()) {
+ Counters* const counters = isolate->counters();
+ counters->maglev_optimize_prepare()->AddSample(
+ static_cast<int>(time_taken_to_prepare_.InMicroseconds()));
+ counters->maglev_optimize_execute()->AddSample(
+ static_cast<int>(time_taken_to_execute_.InMicroseconds()));
+ counters->maglev_optimize_finalize()->AddSample(
+ static_cast<int>(time_taken_to_finalize_.InMicroseconds()));
+ counters->maglev_optimize_total_time()->AddSample(
+ static_cast<int>(ElapsedTime().InMicroseconds()));
+ }
+}
+
// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
// processing the incoming queue on a worker thread.
class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
@@ -146,10 +165,13 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
TRACE_EVENT_WITH_FLOW0(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.MaglevBackground",
job.get(), TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
- RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement.
- CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate);
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
- outgoing_queue()->Enqueue(std::move(job));
+ RCS_SCOPE(&local_isolate,
+ RuntimeCallCounterId::kOptimizeBackgroundMaglev);
+ CompilationJob::Status status =
+ job->ExecuteJob(local_isolate.runtime_call_stats(), &local_isolate);
+ if (status == CompilationJob::SUCCEEDED) {
+ outgoing_queue()->Enqueue(std::move(job));
+ }
}
isolate()->stack_guard()->RequestInstallMaglevCode();
}
@@ -189,8 +211,6 @@ MaglevConcurrentDispatcher::~MaglevConcurrentDispatcher() {
void MaglevConcurrentDispatcher::EnqueueJob(
std::unique_ptr<MaglevCompilationJob>&& job) {
DCHECK(is_enabled());
- // TODO(v8:7700): RCS.
- // RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileMaglev);
incoming_queue_.Enqueue(std::move(job));
job_handle_->NotifyConcurrencyIncrease();
}
@@ -203,6 +223,8 @@ void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.MaglevConcurrentFinalize", job.get(),
TRACE_EVENT_FLAG_FLOW_IN);
+ RCS_SCOPE(isolate_,
+ RuntimeCallCounterId::kOptimizeConcurrentFinalizeMaglev);
Compiler::FinalizeMaglevCompilationJob(job.get(), isolate_);
}
}
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
index 09f046eee2..4f05f9b3b4 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
@@ -25,7 +25,7 @@ class MaglevCompilationInfo;
// that should still be addressed soon:
// - Full tracing support through --trace-opt.
// - Concurrent codegen.
-// - Concurrent Code object creation (optional?).
+// - Concurrent InstructionStream object creation (optional?).
// - Test support for concurrency (see %FinalizeOptimization).
// Exports needed functionality without exposing implementation details.
@@ -62,6 +62,8 @@ class MaglevCompilationJob final : public OptimizedCompilationJob {
base::TimeDelta time_taken_to_execute() { return time_taken_to_execute_; }
base::TimeDelta time_taken_to_finalize() { return time_taken_to_finalize_; }
+ void RecordCompilationStats(Isolate* isolate) const;
+
private:
explicit MaglevCompilationJob(std::unique_ptr<MaglevCompilationInfo>&& info);
diff --git a/deps/v8/src/maglev/maglev-graph-builder.cc b/deps/v8/src/maglev/maglev-graph-builder.cc
index 16bd047e49..9f07f1cb72 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.cc
+++ b/deps/v8/src/maglev/maglev-graph-builder.cc
@@ -4,20 +4,30 @@
#include "src/maglev/maglev-graph-builder.h"
+#include <limits>
+
+#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/v8-fallthrough.h"
+#include "src/base/vector.h"
#include "src/builtins/builtins-constructor.h"
+#include "src/builtins/builtins.h"
+#include "src/codegen/cpu-features.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/common/globals.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/processed-feedback.h"
#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/flags/flags.h"
#include "src/handles/maybe-handles-inl.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/interpreter/bytecode-flags.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-compilation-unit.h"
@@ -30,12 +40,34 @@
#include "src/objects/name-inl.h"
#include "src/objects/property-cell.h"
#include "src/objects/property-details.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
+#include "src/objects/type-hints.h"
+#include "src/zone/zone-handle-set.h"
namespace v8::internal::maglev {
namespace {
+enum class CpuOperation {
+ kFloat64Round,
+};
+
+// TODO(leszeks): Add a generic mechanism for marking nodes as optionally
+// supported.
+bool IsSupported(CpuOperation op) {
+#ifdef V8_TARGET_ARCH_X64
+ switch (op) {
+ case CpuOperation::kFloat64Round:
+ return CpuFeatures::IsSupported(SSE4_1) || CpuFeatures::IsSupported(AVX);
+ }
+#elif V8_TARGET_ARCH_ARM64
+ return true;
+#else
+#error "Maglev does not supported this architecture."
+#endif
+}
+
ValueNode* TryGetParentContext(ValueNode* node) {
if (CreateFunctionContext* n = node->TryCast<CreateFunctionContext>()) {
return n->context().node();
@@ -68,27 +100,174 @@ void MinimizeContextChainDepth(ValueNode** context, size_t* depth) {
class FunctionContextSpecialization final : public AllStatic {
public:
- static base::Optional<compiler::ContextRef> TryToRef(
+ static compiler::OptionalContextRef TryToRef(
const MaglevCompilationUnit* unit, ValueNode* context, size_t* depth) {
DCHECK(unit->info()->specialize_to_function_context());
- base::Optional<compiler::ContextRef> ref;
+ compiler::OptionalContextRef ref;
if (InitialValue* n = context->TryCast<InitialValue>()) {
if (n->source().is_current_context()) {
- ref = unit->function().context();
+ ref = unit->function().context(unit->broker());
}
} else if (Constant* n = context->TryCast<Constant>()) {
ref = n->ref().AsContext();
}
if (!ref.has_value()) return {};
- return ref->previous(depth);
+ return ref->previous(unit->broker(), depth);
}
};
} // namespace
+class CallArguments {
+ public:
+ enum Mode {
+ kDefault,
+ kWithSpread,
+ kWithArrayLike,
+ };
+
+ CallArguments(ConvertReceiverMode receiver_mode,
+ interpreter::RegisterList reglist,
+ const InterpreterFrameState& frame, Mode mode = kDefault)
+ : receiver_mode_(receiver_mode),
+ args_(reglist.register_count()),
+ mode_(mode) {
+ for (int i = 0; i < reglist.register_count(); i++) {
+ args_[i] = frame.get(reglist[i]);
+ }
+ DCHECK_IMPLIES(args_.size() == 0,
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined);
+ DCHECK_IMPLIES(mode != kDefault,
+ receiver_mode == ConvertReceiverMode::kAny);
+ DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2);
+ }
+
+ explicit CallArguments(ConvertReceiverMode receiver_mode)
+ : receiver_mode_(receiver_mode), args_(), mode_(kDefault) {
+ DCHECK_EQ(receiver_mode, ConvertReceiverMode::kNullOrUndefined);
+ }
+
+ CallArguments(ConvertReceiverMode receiver_mode,
+ std::initializer_list<ValueNode*> args, Mode mode = kDefault)
+ : receiver_mode_(receiver_mode), args_(args), mode_(mode) {
+ DCHECK_IMPLIES(mode != kDefault,
+ receiver_mode == ConvertReceiverMode::kAny);
+ DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2);
+ }
+
+ ValueNode* receiver() const {
+ if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
+ return nullptr;
+ }
+ return args_[0];
+ }
+
+ size_t count() const {
+ if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
+ return args_.size();
+ }
+ return args_.size() - 1;
+ }
+
+ size_t count_with_receiver() const { return count() + 1; }
+
+ ValueNode* operator[](size_t i) const {
+ if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) {
+ i++;
+ }
+ if (i >= args_.size()) return nullptr;
+ return args_[i];
+ }
+
+ void set_arg(size_t i, ValueNode* node) {
+ if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) {
+ i++;
+ }
+ DCHECK_LT(i, args_.size());
+ args_[i] = node;
+ }
+
+ Mode mode() const { return mode_; }
+
+ ConvertReceiverMode receiver_mode() const { return receiver_mode_; }
+
+ void Truncate(size_t new_args_count) {
+ if (new_args_count >= count()) return;
+ size_t args_to_pop = count() - new_args_count;
+ for (size_t i = 0; i < args_to_pop; i++) {
+ args_.pop_back();
+ }
+ }
+
+ void PopReceiver(ConvertReceiverMode new_receiver_mode) {
+ DCHECK_NE(receiver_mode_, ConvertReceiverMode::kNullOrUndefined);
+ DCHECK_NE(new_receiver_mode, ConvertReceiverMode::kNullOrUndefined);
+ DCHECK_GT(args_.size(), 0); // We have at least a receiver to pop!
+ // TODO(victorgomes): Do this better!
+ for (size_t i = 0; i < args_.size() - 1; i++) {
+ args_[i] = args_[i + 1];
+ }
+ args_.pop_back();
+
+ // If there is no non-receiver argument to become the new receiver,
+ // consider the new receiver to be known undefined.
+ receiver_mode_ = args_.size() == 0 ? ConvertReceiverMode::kNullOrUndefined
+ : new_receiver_mode;
+ }
+
+ private:
+ ConvertReceiverMode receiver_mode_;
+ base::SmallVector<ValueNode*, 8> args_;
+ Mode mode_;
+};
+
+class V8_NODISCARD MaglevGraphBuilder::CallSpeculationScope {
+ public:
+ CallSpeculationScope(MaglevGraphBuilder* builder,
+ compiler::FeedbackSource feedback_source)
+ : builder_(builder) {
+ DCHECK(!builder_->current_speculation_feedback_.IsValid());
+ if (feedback_source.IsValid()) {
+ DCHECK_EQ(
+ FeedbackNexus(feedback_source.vector, feedback_source.slot).kind(),
+ FeedbackSlotKind::kCall);
+ }
+ builder_->current_speculation_feedback_ = feedback_source;
+ }
+ ~CallSpeculationScope() {
+ builder_->current_speculation_feedback_ = compiler::FeedbackSource();
+ }
+
+ private:
+ MaglevGraphBuilder* builder_;
+};
+
+class V8_NODISCARD MaglevGraphBuilder::LazyDeoptContinuationScope {
+ public:
+ LazyDeoptContinuationScope(MaglevGraphBuilder* builder, Builtin continuation)
+ : builder_(builder),
+ parent_(builder->current_lazy_deopt_continuation_scope_),
+ continuation_(continuation) {
+ builder_->current_lazy_deopt_continuation_scope_ = this;
+ }
+ ~LazyDeoptContinuationScope() {
+ builder_->current_lazy_deopt_continuation_scope_ = parent_;
+ }
+
+ LazyDeoptContinuationScope* parent() const { return parent_; }
+ Builtin continuation() const { return continuation_; }
+
+ private:
+ MaglevGraphBuilder* builder_;
+ LazyDeoptContinuationScope* parent_;
+ Builtin continuation_;
+};
+
MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
MaglevCompilationUnit* compilation_unit,
- Graph* graph, MaglevGraphBuilder* parent)
+ Graph* graph, float call_frequency,
+ BytecodeOffset bytecode_offset,
+ MaglevGraphBuilder* parent)
: local_isolate_(local_isolate),
compilation_unit_(compilation_unit),
parent_(parent),
@@ -96,7 +275,8 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
bytecode_analysis_(bytecode().object(), zone(), BytecodeOffset::None(),
true),
iterator_(bytecode().object()),
- source_position_iterator_(bytecode().SourcePositionTable()),
+ source_position_iterator_(bytecode().SourcePositionTable(broker())),
+ call_frequency_(call_frequency),
// Add an extra jump_target slot for the inline exit if needed.
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length() +
(is_inline() ? 1 : 0))),
@@ -105,7 +285,12 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
// exit when needed.
merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>(
bytecode().length() + 1)),
- current_interpreter_frame_(*compilation_unit_),
+ current_interpreter_frame_(
+ *compilation_unit_,
+ is_inline() ? parent->current_interpreter_frame_.known_node_aspects()
+ : compilation_unit_->zone()->New<KnownNodeAspects>(
+ compilation_unit_->zone())),
+ caller_bytecode_offset_(bytecode_offset),
catch_block_stack_(zone()) {
memset(merge_states_, 0,
(bytecode().length() + 1) * sizeof(InterpreterFrameState*));
@@ -148,15 +333,16 @@ ValueNode* MaglevGraphBuilder::GetTaggedArgument(int i) {
return GetTaggedValue(reg);
}
-void MaglevGraphBuilder::BuildRegisterFrameInitialization() {
- // TODO(leszeks): Extract out a separate "incoming context/closure" nodes,
- // to be able to read in the machine register but also use the frame-spilled
- // slot.
- interpreter::Register regs[] = {interpreter::Register::current_context(),
- interpreter::Register::function_closure()};
- for (interpreter::Register& reg : regs) {
- current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
- }
+void MaglevGraphBuilder::InitializeRegister(interpreter::Register reg,
+ ValueNode* value) {
+ current_interpreter_frame_.set(
+ reg, value ? value : AddNewNode<InitialValue>({}, reg));
+}
+
+void MaglevGraphBuilder::BuildRegisterFrameInitialization(ValueNode* context,
+ ValueNode* closure) {
+ InitializeRegister(interpreter::Register::current_context(), context);
+ InitializeRegister(interpreter::Register::function_closure(), closure);
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
@@ -167,16 +353,17 @@ void MaglevGraphBuilder::BuildRegisterFrameInitialization() {
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
- StoreRegister(interpreter::Register(register_index), undefined_value);
+ current_interpreter_frame_.set(interpreter::Register(register_index),
+ undefined_value);
}
- StoreRegister(
+ current_interpreter_frame_.set(
new_target_or_generator_register,
- // TODO(leszeks): Expose in Graph.
- AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister));
+ GetRegisterInput(kJavaScriptCallNewTargetRegister));
register_index++;
}
for (; register_index < register_count(); register_index++) {
- StoreRegister(interpreter::Register(register_index), undefined_value);
+ current_interpreter_frame_.set(interpreter::Register(register_index),
+ undefined_value);
}
}
@@ -215,6 +402,511 @@ void MaglevGraphBuilder::BuildMergeStates() {
}
namespace {
+
+template <int index, interpreter::OperandType... operands>
+struct GetResultLocationAndSizeHelper;
+
+// Terminal cases
+template <int index>
+struct GetResultLocationAndSizeHelper<index> {
+ static std::pair<interpreter::Register, int> GetResultLocationAndSize(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // TODO(leszeks): This should probably actually be "UNREACHABLE" but we have
+ // lazy deopt info for interrupt budget updates at returns, not for actual
+ // lazy deopts, but just for stack iteration purposes.
+ return {interpreter::Register::invalid_value(), 0};
+ }
+ static bool HasOutputRegisterOperand() { return false; }
+};
+
+template <int index, interpreter::OperandType... operands>
+struct GetResultLocationAndSizeHelper<index, interpreter::OperandType::kRegOut,
+ operands...> {
+ static std::pair<interpreter::Register, int> GetResultLocationAndSize(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // We shouldn't have any other output operands than this one.
+ return {iterator.GetRegisterOperand(index), 1};
+ }
+ static bool HasOutputRegisterOperand() { return true; }
+};
+
+template <int index, interpreter::OperandType... operands>
+struct GetResultLocationAndSizeHelper<
+ index, interpreter::OperandType::kRegOutPair, operands...> {
+ static std::pair<interpreter::Register, int> GetResultLocationAndSize(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // We shouldn't have any other output operands than this one.
+ return {iterator.GetRegisterOperand(index), 2};
+ }
+ static bool HasOutputRegisterOperand() { return true; }
+};
+
+template <int index, interpreter::OperandType... operands>
+struct GetResultLocationAndSizeHelper<
+ index, interpreter::OperandType::kRegOutTriple, operands...> {
+ static std::pair<interpreter::Register, int> GetResultLocationAndSize(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // We shouldn't have any other output operands than this one.
+ DCHECK(!(GetResultLocationAndSizeHelper<
+ index + 1, operands...>::HasOutputRegisterOperand()));
+ return {iterator.GetRegisterOperand(index), 3};
+ }
+ static bool HasOutputRegisterOperand() { return true; }
+};
+
+// We don't support RegOutList for lazy deopts.
+template <int index, interpreter::OperandType... operands>
+struct GetResultLocationAndSizeHelper<
+ index, interpreter::OperandType::kRegOutList, operands...> {
+ static std::pair<interpreter::Register, int> GetResultLocationAndSize(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ interpreter::RegisterList list = iterator.GetRegisterListOperand(index);
+ return {list.first_register(), list.register_count()};
+ }
+ static bool HasOutputRegisterOperand() { return true; }
+};
+
+// Induction case.
+template <int index, interpreter::OperandType operand,
+ interpreter::OperandType... operands>
+struct GetResultLocationAndSizeHelper<index, operand, operands...> {
+ static std::pair<interpreter::Register, int> GetResultLocationAndSize(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ return GetResultLocationAndSizeHelper<
+ index + 1, operands...>::GetResultLocationAndSize(iterator);
+ }
+ static bool HasOutputRegisterOperand() {
+ return GetResultLocationAndSizeHelper<
+ index + 1, operands...>::HasOutputRegisterOperand();
+ }
+};
+
+template <interpreter::Bytecode bytecode,
+ interpreter::ImplicitRegisterUse implicit_use,
+ interpreter::OperandType... operands>
+std::pair<interpreter::Register, int> GetResultLocationAndSizeForBytecode(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // We don't support output registers for implicit registers.
+ DCHECK(!interpreter::BytecodeOperands::WritesImplicitRegister(implicit_use));
+ if (interpreter::BytecodeOperands::WritesAccumulator(implicit_use)) {
+ // If we write the accumulator, we shouldn't also write an output register.
+ DCHECK(!(GetResultLocationAndSizeHelper<
+ 0, operands...>::HasOutputRegisterOperand()));
+ return {interpreter::Register::virtual_accumulator(), 1};
+ }
+
+ // Use template magic to output a the appropriate GetRegisterOperand call and
+ // size for this bytecode.
+ return GetResultLocationAndSizeHelper<
+ 0, operands...>::GetResultLocationAndSize(iterator);
+}
+
+} // namespace
+
+std::pair<interpreter::Register, int>
+MaglevGraphBuilder::GetResultLocationAndSize() const {
+ using Bytecode = interpreter::Bytecode;
+ using OperandType = interpreter::OperandType;
+ using ImplicitRegisterUse = interpreter::ImplicitRegisterUse;
+ Bytecode bytecode = iterator_.current_bytecode();
+ // TODO(leszeks): Only emit these cases for bytecodes we know can lazy deopt.
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return GetResultLocationAndSizeForBytecode<Bytecode::k##Name, \
+ __VA_ARGS__>(iterator_);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+}
+
+#ifdef DEBUG
+bool MaglevGraphBuilder::HasOutputRegister(interpreter::Register reg) const {
+ interpreter::Bytecode bytecode = iterator_.current_bytecode();
+ if (reg == interpreter::Register::virtual_accumulator()) {
+ return interpreter::Bytecodes::WritesAccumulator(bytecode);
+ }
+ for (int i = 0; i < interpreter::Bytecodes::NumberOfOperands(bytecode); ++i) {
+ if (interpreter::Bytecodes::IsRegisterOutputOperandType(
+ interpreter::Bytecodes::GetOperandType(bytecode, i))) {
+ interpreter::Register operand_reg = iterator_.GetRegisterOperand(i);
+ int operand_range = iterator_.GetRegisterOperandRange(i);
+ if (base::IsInRange(reg.index(), operand_reg.index(),
+ operand_reg.index() + operand_range)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+#endif
+
+DeoptFrame* MaglevGraphBuilder::GetParentDeoptFrame() {
+ if (parent_ == nullptr) return nullptr;
+ if (parent_deopt_frame_ == nullptr) {
+ // The parent resumes after the call, which is roughly equivalent to a lazy
+ // deopt. Use the helper function directly so that we can mark the
+ // accumulator as dead (since it'll be overwritten by this function's
+ // return value anyway).
+ // TODO(leszeks): This is true for our current set of
+ // inlinings/continuations, but there might be cases in the future where it
+ // isn't. We may need to store the relevant overwritten register in
+ // LazyDeoptContinuationScope.
+ DCHECK(interpreter::Bytecodes::WritesAccumulator(
+ parent_->iterator_.current_bytecode()));
+
+ parent_deopt_frame_ =
+ zone()->New<DeoptFrame>(parent_->GetDeoptFrameForLazyDeoptHelper(
+ parent_->current_lazy_deopt_continuation_scope_, true));
+ if (inlined_arguments_) {
+ parent_deopt_frame_ = zone()->New<InlinedArgumentsDeoptFrame>(
+ *compilation_unit_, caller_bytecode_offset_, *inlined_arguments_,
+ parent_deopt_frame_);
+ }
+ }
+ return parent_deopt_frame_;
+}
+
+DeoptFrame MaglevGraphBuilder::GetLatestCheckpointedFrame() {
+ if (!latest_checkpointed_frame_) {
+ // TODO(leszeks): Figure out a way of handling eager continuations.
+ DCHECK_NULL(current_lazy_deopt_continuation_scope_);
+ latest_checkpointed_frame_.emplace(
+ *compilation_unit_,
+ zone()->New<CompactInterpreterFrameState>(
+ *compilation_unit_, GetInLiveness(), current_interpreter_frame_),
+ BytecodeOffset(iterator_.current_offset()), current_source_position_,
+ GetParentDeoptFrame());
+ }
+ return *latest_checkpointed_frame_;
+}
+
+DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeopt() {
+ return GetDeoptFrameForLazyDeoptHelper(current_lazy_deopt_continuation_scope_,
+ false);
+}
+
+DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeoptHelper(
+ LazyDeoptContinuationScope* continuation_scope,
+ bool mark_accumulator_dead) {
+ if (continuation_scope == nullptr) {
+ // Potentially copy the out liveness if we want to explicitly drop the
+ // accumulator.
+ const compiler::BytecodeLivenessState* liveness = GetOutLiveness();
+ if (mark_accumulator_dead && liveness->AccumulatorIsLive()) {
+ compiler::BytecodeLivenessState* liveness_copy =
+ zone()->New<compiler::BytecodeLivenessState>(*liveness, zone());
+ liveness_copy->MarkAccumulatorDead();
+ liveness = liveness_copy;
+ }
+ return InterpretedDeoptFrame(
+ *compilation_unit_,
+ zone()->New<CompactInterpreterFrameState>(*compilation_unit_, liveness,
+ current_interpreter_frame_),
+ BytecodeOffset(iterator_.current_offset()), current_source_position_,
+ GetParentDeoptFrame());
+ }
+
+ // Currently only support builtin continuations for bytecodes that write to
+ // the accumulator
+ DCHECK(
+ interpreter::Bytecodes::WritesAccumulator(iterator_.current_bytecode()));
+ return BuiltinContinuationDeoptFrame(
+ continuation_scope->continuation(), {}, GetContext(),
+ // Mark the accumulator dead in parent frames since we know that the
+ // continuation will write it.
+ zone()->New<DeoptFrame>(
+ GetDeoptFrameForLazyDeoptHelper(continuation_scope->parent(), true)));
+}
+
+InterpretedDeoptFrame MaglevGraphBuilder::GetDeoptFrameForEntryStackCheck() {
+ DCHECK_EQ(iterator_.current_offset(), 0);
+ DCHECK_NULL(parent_);
+ return InterpretedDeoptFrame(
+ *compilation_unit_,
+ zone()->New<CompactInterpreterFrameState>(
+ *compilation_unit_, GetInLivenessFor(0), current_interpreter_frame_),
+ BytecodeOffset(kFunctionEntryBytecodeOffset), current_source_position_,
+ nullptr);
+}
+
+ValueNode* MaglevGraphBuilder::GetTaggedValue(ValueNode* value) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged:
+ return value;
+ case ValueRepresentation::kInt32: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->tagged_alternative == nullptr) {
+ if (NodeTypeIsSmi(node_info->type)) {
+ node_info->tagged_alternative = AddNewNode<UnsafeSmiTag>({value});
+ } else {
+ node_info->tagged_alternative = AddNewNode<Int32ToNumber>({value});
+ }
+ }
+ return node_info->tagged_alternative;
+ }
+ case ValueRepresentation::kUint32: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->tagged_alternative == nullptr) {
+ if (NodeTypeIsSmi(node_info->type)) {
+ node_info->tagged_alternative = AddNewNode<UnsafeSmiTag>({value});
+ } else {
+ node_info->tagged_alternative = AddNewNode<Uint32ToNumber>({value});
+ }
+ }
+ return node_info->tagged_alternative;
+ }
+ case ValueRepresentation::kFloat64: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->tagged_alternative == nullptr) {
+ node_info->tagged_alternative = AddNewNode<Float64ToTagged>({value});
+ }
+ return node_info->tagged_alternative;
+ }
+ }
+ UNREACHABLE();
+}
+
+ValueNode* MaglevGraphBuilder::GetInternalizedString(
+ interpreter::Register reg) {
+ ValueNode* node = GetTaggedValue(reg);
+ if (known_node_aspects().GetOrCreateInfoFor(node)->is_internalized_string()) {
+ return node;
+ }
+ if (Constant* constant = node->TryCast<Constant>()) {
+ if (constant->object().IsInternalizedString()) {
+ SetKnownType(constant, NodeType::kInternalizedString);
+ return constant;
+ }
+ }
+ node = AddNewNode<CheckedInternalizedString>({node});
+ SetKnownType(node, NodeType::kInternalizedString);
+ current_interpreter_frame_.set(reg, node);
+ return node;
+}
+
+namespace {
+NodeType TaggedToFloat64ConversionTypeToNodeType(
+ TaggedToFloat64ConversionType conversion_type) {
+ switch (conversion_type) {
+ case TaggedToFloat64ConversionType::kNumber:
+ return NodeType::kNumber;
+ case TaggedToFloat64ConversionType::kNumberOrOddball:
+ return NodeType::kNumberOrOddball;
+ }
+}
+} // namespace
+
+ValueNode* MaglevGraphBuilder::GetTruncatedInt32FromNumber(
+ ValueNode* value, TaggedToFloat64ConversionType conversion_type) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged: {
+ if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
+ return GetInt32Constant(constant->value().value());
+ }
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative != nullptr) {
+ return node_info->int32_alternative;
+ }
+ if (node_info->truncated_int32_alternative != nullptr) {
+ return node_info->truncated_int32_alternative;
+ }
+ NodeType old_type;
+ NodeType desired_type =
+ TaggedToFloat64ConversionTypeToNodeType(conversion_type);
+ EnsureType(value, desired_type, &old_type);
+ if (NodeTypeIsSmi(old_type)) {
+ return node_info->int32_alternative =
+ AddNewNode<UnsafeSmiUntag>({value});
+ }
+ if (NodeTypeIs(old_type, desired_type)) {
+ return node_info->truncated_int32_alternative =
+ AddNewNode<TruncateNumberOrOddballToInt32>({value},
+ conversion_type);
+ }
+ return node_info->truncated_int32_alternative =
+ AddNewNode<CheckedTruncateNumberOrOddballToInt32>(
+ {value}, conversion_type);
+ UNREACHABLE();
+ }
+ case ValueRepresentation::kFloat64: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->truncated_int32_alternative == nullptr) {
+ node_info->truncated_int32_alternative =
+ AddNewNode<TruncateFloat64ToInt32>({value});
+ }
+ return node_info->truncated_int32_alternative;
+ }
+ case ValueRepresentation::kInt32:
+ // Already good.
+ return value;
+ case ValueRepresentation::kUint32:
+ return AddNewNode<TruncateUint32ToInt32>({value});
+ }
+ UNREACHABLE();
+}
+
+ValueNode* MaglevGraphBuilder::GetTruncatedInt32(ValueNode* value) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged:
+ case ValueRepresentation::kFloat64:
+ return GetInt32(value);
+ case ValueRepresentation::kInt32:
+ // Already good.
+ return value;
+ case ValueRepresentation::kUint32:
+ return AddNewNode<TruncateUint32ToInt32>({value});
+ }
+ UNREACHABLE();
+}
+
+ValueNode* MaglevGraphBuilder::GetInt32(ValueNode* value) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged: {
+ if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
+ return GetInt32Constant(constant->value().value());
+ }
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative == nullptr) {
+ node_info->int32_alternative = BuildSmiUntag(value);
+ }
+ return node_info->int32_alternative;
+ }
+ case ValueRepresentation::kInt32:
+ return value;
+ case ValueRepresentation::kUint32: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative == nullptr) {
+ if (node_info->is_smi()) {
+ node_info->int32_alternative =
+ AddNewNode<TruncateUint32ToInt32>({value});
+ } else {
+ node_info->int32_alternative =
+ AddNewNode<CheckedUint32ToInt32>({value});
+ }
+ }
+ return node_info->int32_alternative;
+ }
+ case ValueRepresentation::kFloat64: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative == nullptr) {
+ node_info->int32_alternative =
+ AddNewNode<CheckedTruncateFloat64ToInt32>({value});
+ }
+ return node_info->int32_alternative;
+ }
+ }
+ UNREACHABLE();
+}
+
+ValueNode* MaglevGraphBuilder::GetFloat64(
+ ValueNode* value, TaggedToFloat64ConversionType conversion_type) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged: {
+ if (Constant* constant = value->TryCast<Constant>()) {
+ if (constant->object().IsHeapNumber()) {
+ return GetFloat64Constant(constant->object().AsHeapNumber().value());
+ }
+ }
+ if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
+ return GetFloat64Constant(constant->value().value());
+ }
+ if (conversion_type == TaggedToFloat64ConversionType::kNumberOrOddball) {
+ if (RootConstant* constant = value->TryCast<RootConstant>()) {
+ Object root_object = local_isolate_->root(constant->index());
+ if (root_object.IsOddball(local_isolate_)) {
+ return GetFloat64Constant(
+ Oddball::cast(root_object).to_number_raw());
+ }
+ }
+ }
+
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->float64_alternative != nullptr) {
+ return node_info->float64_alternative;
+ }
+ switch (conversion_type) {
+ case TaggedToFloat64ConversionType::kNumber:
+ // Number->Float64 conversions are lossless, so they can become the
+ // canonical float64_alternative.
+ return node_info->float64_alternative = BuildNumberOrOddballToFloat64(
+ value, TaggedToFloat64ConversionType::kNumber);
+ case TaggedToFloat64ConversionType::kNumberOrOddball: {
+ // NumberOrOddball->Float64 conversions are lossy, since they lose the
+ // oddball information, so they can only become the canonical
+ // float64_alternative if they are known number (and therefore not
+ // oddball).
+ ValueNode* float64_node = BuildNumberOrOddballToFloat64(
+ value, TaggedToFloat64ConversionType::kNumberOrOddball);
+ if (NodeTypeIsNumber(node_info->type)) {
+ node_info->float64_alternative = float64_node;
+ }
+ return float64_node;
+ }
+ }
+ }
+ case ValueRepresentation::kInt32: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->float64_alternative == nullptr) {
+ node_info->float64_alternative =
+ AddNewNode<ChangeInt32ToFloat64>({value});
+ }
+ return node_info->float64_alternative;
+ }
+ case ValueRepresentation::kUint32: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->float64_alternative == nullptr) {
+ node_info->float64_alternative =
+ AddNewNode<ChangeUint32ToFloat64>({value});
+ }
+ return node_info->float64_alternative;
+ }
+ case ValueRepresentation::kFloat64:
+ return value;
+ }
+ UNREACHABLE();
+}
+
+ValueNode* MaglevGraphBuilder::GetUint32ClampedFromNumber(ValueNode* value) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged: {
+ if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
+ int32_t value = constant->value().value();
+ if (value < 0) return GetInt32Constant(0);
+ if (value > 255) return GetInt32Constant(255);
+ return GetInt32Constant(constant->value().value());
+ }
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative != nullptr) {
+ return AddNewNode<Int32ToUint8Clamped>({node_info->int32_alternative});
+ }
+ return AddNewNode<CheckedNumberToUint8Clamped>({value});
+ }
+ case ValueRepresentation::kFloat64: {
+ return AddNewNode<Float64ToUint8Clamped>({value});
+ }
+ case ValueRepresentation::kInt32:
+ return AddNewNode<Int32ToUint8Clamped>({value});
+ case ValueRepresentation::kUint32:
+ return AddNewNode<Uint32ToUint8Clamped>({value});
+ }
+ UNREACHABLE();
+}
+
+namespace {
template <Operation kOperation>
struct NodeForOperationHelper;
@@ -230,45 +922,23 @@ template <Operation kOperation>
using GenericNodeForOperation =
typename NodeForOperationHelper<kOperation>::generic_type;
-// TODO(victorgomes): Remove this once all operations have fast paths.
+// Bitwise operations reinterprets the numeric input as Int32 bits for a
+// bitwise operation, which means we want to do slightly different conversions.
template <Operation kOperation>
-bool BinaryOperationHasInt32FastPath() {
+constexpr bool BinaryOperationIsBitwiseInt32() {
switch (kOperation) {
- case Operation::kAdd:
- case Operation::kSubtract:
- case Operation::kMultiply:
- case Operation::kDivide:
- case Operation::kModulus:
+ case Operation::kBitwiseNot:
case Operation::kBitwiseAnd:
case Operation::kBitwiseOr:
case Operation::kBitwiseXor:
case Operation::kShiftLeft:
case Operation::kShiftRight:
case Operation::kShiftRightLogical:
- case Operation::kEqual:
- case Operation::kStrictEqual:
- case Operation::kLessThan:
- case Operation::kLessThanOrEqual:
- case Operation::kGreaterThan:
- case Operation::kGreaterThanOrEqual:
return true;
default:
return false;
}
}
-template <Operation kOperation>
-bool BinaryOperationHasFloat64FastPath() {
- switch (kOperation) {
- case Operation::kAdd:
- case Operation::kSubtract:
- case Operation::kMultiply:
- case Operation::kDivide:
- return true;
- default:
- return false;
- }
-}
-
} // namespace
// MAP_OPERATION_TO_NODES are tuples with the following format:
@@ -276,19 +946,25 @@ bool BinaryOperationHasFloat64FastPath() {
// - Int32 operation node,
// - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it
// exists, or otherwise {}.
-#define MAP_OPERATION_TO_INT32_NODE(V) \
- V(Add, Int32AddWithOverflow, 0) \
- V(Subtract, Int32SubtractWithOverflow, 0) \
- V(Multiply, Int32MultiplyWithOverflow, 1) \
- V(Divide, Int32DivideWithOverflow, 1) \
- V(Modulus, Int32ModulusWithOverflow, {}) \
- V(BitwiseAnd, Int32BitwiseAnd, ~0) \
- V(BitwiseOr, Int32BitwiseOr, 0) \
- V(BitwiseXor, Int32BitwiseXor, 0) \
- V(ShiftLeft, Int32ShiftLeft, 0) \
- V(ShiftRight, Int32ShiftRight, 0) \
+#define MAP_BINARY_OPERATION_TO_INT32_NODE(V) \
+ V(Add, Int32AddWithOverflow, 0) \
+ V(Subtract, Int32SubtractWithOverflow, 0) \
+ V(Multiply, Int32MultiplyWithOverflow, 1) \
+ V(Divide, Int32DivideWithOverflow, 1) \
+ V(Modulus, Int32ModulusWithOverflow, {}) \
+ V(BitwiseAnd, Int32BitwiseAnd, ~0) \
+ V(BitwiseOr, Int32BitwiseOr, 0) \
+ V(BitwiseXor, Int32BitwiseXor, 0) \
+ V(ShiftLeft, Int32ShiftLeft, 0) \
+ V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, {})
+#define MAP_UNARY_OPERATION_TO_INT32_NODE(V) \
+ V(BitwiseNot, Int32BitwiseNot) \
+ V(Increment, Int32IncrementWithOverflow) \
+ V(Decrement, Int32DecrementWithOverflow) \
+ V(Negate, Int32NegateWithOverflow)
+
#define MAP_COMPARE_OPERATION_TO_INT32_NODE(V) \
V(Equal, Int32Equal) \
V(StrictEqual, Int32StrictEqual) \
@@ -303,7 +979,18 @@ bool BinaryOperationHasFloat64FastPath() {
V(Add, Float64Add) \
V(Subtract, Float64Subtract) \
V(Multiply, Float64Multiply) \
- V(Divide, Float64Divide)
+ V(Divide, Float64Divide) \
+ V(Modulus, Float64Modulus) \
+ V(Negate, Float64Negate) \
+ V(Exponentiate, Float64Exponentiate)
+
+#define MAP_COMPARE_OPERATION_TO_FLOAT64_NODE(V) \
+ V(Equal, Float64Equal) \
+ V(StrictEqual, Float64StrictEqual) \
+ V(LessThan, Float64LessThan) \
+ V(LessThanOrEqual, Float64LessThanOrEqual) \
+ V(GreaterThan, Float64GreaterThan) \
+ V(GreaterThanOrEqual, Float64GreaterThanOrEqual)
template <Operation kOperation>
static constexpr base::Optional<int> Int32Identity() {
@@ -311,45 +998,43 @@ static constexpr base::Optional<int> Int32Identity() {
#define CASE(op, _, identity) \
case Operation::k##op: \
return identity;
- MAP_OPERATION_TO_INT32_NODE(CASE)
+ MAP_BINARY_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
}
}
+namespace {
template <Operation kOperation>
-ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
- std::initializer_list<ValueNode*> inputs) {
- switch (kOperation) {
-#define CASE(op, OpNode, _) \
- case Operation::k##op: \
- return AddNewNode<OpNode>(inputs);
- MAP_OPERATION_TO_INT32_NODE(CASE)
-#undef CASE
-#define CASE(op, OpNode) \
- case Operation::k##op: \
- return AddNewNode<OpNode>(inputs);
- MAP_COMPARE_OPERATION_TO_INT32_NODE(CASE)
-#undef CASE
- default:
- UNREACHABLE();
- }
-}
+struct Int32NodeForHelper;
+#define SPECIALIZATION(op, OpNode, ...) \
+ template <> \
+ struct Int32NodeForHelper<Operation::k##op> { \
+ using type = OpNode; \
+ };
+MAP_UNARY_OPERATION_TO_INT32_NODE(SPECIALIZATION)
+MAP_BINARY_OPERATION_TO_INT32_NODE(SPECIALIZATION)
+MAP_COMPARE_OPERATION_TO_INT32_NODE(SPECIALIZATION)
+#undef SPECIALIZATION
template <Operation kOperation>
-ValueNode* MaglevGraphBuilder::AddNewFloat64BinaryOperationNode(
- std::initializer_list<ValueNode*> inputs) {
- switch (kOperation) {
-#define CASE(op, OpNode) \
- case Operation::k##op: \
- return AddNewNode<OpNode>(inputs);
- MAP_OPERATION_TO_FLOAT64_NODE(CASE)
-#undef CASE
- default:
- UNREACHABLE();
- }
-}
+using Int32NodeFor = typename Int32NodeForHelper<kOperation>::type;
+
+template <Operation kOperation>
+struct Float64NodeForHelper;
+#define SPECIALIZATION(op, OpNode) \
+ template <> \
+ struct Float64NodeForHelper<Operation::k##op> { \
+ using type = OpNode; \
+ };
+MAP_OPERATION_TO_FLOAT64_NODE(SPECIALIZATION)
+MAP_COMPARE_OPERATION_TO_FLOAT64_NODE(SPECIALIZATION)
+#undef SPECIALIZATION
+
+template <Operation kOperation>
+using Float64NodeFor = typename Float64NodeForHelper<kOperation>::type;
+} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
@@ -379,13 +1064,34 @@ void MaglevGraphBuilder::BuildGenericBinarySmiOperationNode() {
}
template <Operation kOperation>
+void MaglevGraphBuilder::BuildInt32UnaryOperationNode() {
+ static const bool input_is_truncated =
+ BinaryOperationIsBitwiseInt32<kOperation>();
+ // TODO(v8:7700): Do constant folding.
+ ValueNode* value = input_is_truncated ? GetAccumulatorTruncatedInt32()
+ : GetAccumulatorInt32();
+ using OpNodeT = Int32NodeFor<kOperation>;
+ SetAccumulator(AddNewNode<OpNodeT>({value}));
+}
+
+void MaglevGraphBuilder::BuildTruncatingInt32BitwiseNotForNumber(
+ TaggedToFloat64ConversionType conversion_type) {
+ // TODO(v8:7700): Do constant folding.
+ ValueNode* value = GetTruncatedInt32FromNumber(
+ current_interpreter_frame_.accumulator(), conversion_type);
+ SetAccumulator(AddNewNode<Int32BitwiseNot>({value}));
+}
+
+template <Operation kOperation>
ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left,
ValueNode* right) {
switch (kOperation) {
case Operation::kModulus:
- // x % x = 0
- if (right == left) return GetInt32Constant(0);
- break;
+ // Note the `x % x = 0` fold is invalid since for negative x values the
+ // result is -0.0.
+ // TODO(v8:7700): Consider re-enabling this fold if the result is used
+ // only in contexts where -0.0 is semantically equivalent to 0.0, or if x
+ // is known to be non-negative.
default:
// TODO(victorgomes): Implement more folds.
break;
@@ -398,13 +1104,14 @@ ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left,
int right) {
switch (kOperation) {
case Operation::kModulus:
- // x % 1 = 0
- // x % -1 = 0
- if (right == 1 || right == -1) return GetInt32Constant(0);
+ // Note the `x % 1 = 0` and `x % -1 = 0` folds are invalid since for
+ // negative x values the result is -0.0.
+ // TODO(v8:7700): Consider re-enabling this fold if the result is used
+ // only in contexts where -0.0 is semantically equivalent to 0.0, or if x
+ // is known to be non-negative.
// TODO(victorgomes): We can emit faster mod operation if {right} is power
// of 2, unfortunately we need to know if {left} is negative or not.
// Maybe emit a Int32ModulusRightIsPowerOf2?
- break;
default:
// TODO(victorgomes): Implement more folds.
break;
@@ -414,27 +1121,100 @@ ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left,
template <Operation kOperation>
void MaglevGraphBuilder::BuildInt32BinaryOperationNode() {
+ // Truncating Int32 nodes treat their input as a signed int32 regardless
+ // of whether it's really signed or not, so we allow Uint32 by loading a
+ // TruncatedInt32 value.
+ static const bool inputs_are_truncated =
+ BinaryOperationIsBitwiseInt32<kOperation>();
// TODO(v8:7700): Do constant folding.
- ValueNode* left = LoadRegisterInt32(0);
- ValueNode* right = GetAccumulatorInt32();
+ ValueNode* left = inputs_are_truncated ? LoadRegisterTruncatedInt32(0)
+ : LoadRegisterInt32(0);
+ ValueNode* right = inputs_are_truncated ? GetAccumulatorTruncatedInt32()
+ : GetAccumulatorInt32();
if (ValueNode* result =
TryFoldInt32BinaryOperation<kOperation>(left, right)) {
SetAccumulator(result);
return;
}
+ using OpNodeT = Int32NodeFor<kOperation>;
+
+ SetAccumulator(AddNewNode<OpNodeT>({left, right}));
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildTruncatingInt32BinaryOperationNodeForNumber(
+ TaggedToFloat64ConversionType conversion_type) {
+ DCHECK(BinaryOperationIsBitwiseInt32<kOperation>());
+ // TODO(v8:7700): Do constant folding.
+ ValueNode* left;
+ ValueNode* right;
+ if (IsRegisterEqualToAccumulator(0)) {
+ left = right = GetTruncatedInt32FromNumber(
+ current_interpreter_frame_.get(iterator_.GetRegisterOperand(0)),
+ conversion_type);
+ } else {
+ left = GetTruncatedInt32FromNumber(
+ current_interpreter_frame_.get(iterator_.GetRegisterOperand(0)),
+ conversion_type);
+ right = GetTruncatedInt32FromNumber(
+ current_interpreter_frame_.accumulator(), conversion_type);
+ }
- SetAccumulator(AddNewInt32BinaryOperationNode<kOperation>({left, right}));
+ if (ValueNode* result =
+ TryFoldInt32BinaryOperation<kOperation>(left, right)) {
+ SetAccumulator(result);
+ return;
+ }
+ SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
+ // Truncating Int32 nodes treat their input as a signed int32 regardless
+ // of whether it's really signed or not, so we allow Uint32 by loading a
+ // TruncatedInt32 value.
+ static const bool inputs_are_truncated =
+ BinaryOperationIsBitwiseInt32<kOperation>();
+ // TODO(v8:7700): Do constant folding.
+ ValueNode* left = inputs_are_truncated ? GetAccumulatorTruncatedInt32()
+ : GetAccumulatorInt32();
+ int32_t constant = iterator_.GetImmediateOperand(0);
+ if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
+ // If the constant is the unit of the operation, it already has the right
+ // value, so use the truncated value if necessary (and if not just a
+ // conversion) and return.
+ if (inputs_are_truncated && !left->properties().is_conversion()) {
+ current_interpreter_frame_.set_accumulator(left);
+ }
+ return;
+ }
+ if (ValueNode* result =
+ TryFoldInt32BinaryOperation<kOperation>(left, constant)) {
+ SetAccumulator(result);
+ return;
+ }
+ ValueNode* right = GetInt32Constant(constant);
+
+ using OpNodeT = Int32NodeFor<kOperation>;
+
+ SetAccumulator(AddNewNode<OpNodeT>({left, right}));
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildTruncatingInt32BinarySmiOperationNodeForNumber(
+ TaggedToFloat64ConversionType conversion_type) {
+ DCHECK(BinaryOperationIsBitwiseInt32<kOperation>());
// TODO(v8:7700): Do constant folding.
- ValueNode* left = GetAccumulatorInt32();
+ ValueNode* left = GetTruncatedInt32FromNumber(
+ current_interpreter_frame_.accumulator(), conversion_type);
int32_t constant = iterator_.GetImmediateOperand(0);
if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
// If the constant is the unit of the operation, it already has the right
- // value, so we can just return.
+ // value, so use the truncated value (if not just a conversion) and return.
+ if (!left->properties().is_conversion()) {
+ current_interpreter_frame_.set_accumulator(left);
+ }
return;
}
if (ValueNode* result =
@@ -443,30 +1223,99 @@ void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
return;
}
ValueNode* right = GetInt32Constant(constant);
- SetAccumulator(AddNewInt32BinaryOperationNode<kOperation>({left, right}));
+ SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
-void MaglevGraphBuilder::BuildFloat64BinarySmiOperationNode() {
+void MaglevGraphBuilder::BuildFloat64BinarySmiOperationNode(
+ TaggedToFloat64ConversionType conversion_type) {
// TODO(v8:7700): Do constant folding.
- ValueNode* left = GetAccumulatorFloat64();
+ ValueNode* left = GetAccumulatorFloat64(conversion_type);
double constant = static_cast<double>(iterator_.GetImmediateOperand(0));
ValueNode* right = GetFloat64Constant(constant);
- SetAccumulator(AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
+ SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
-void MaglevGraphBuilder::BuildFloat64BinaryOperationNode() {
+void MaglevGraphBuilder::BuildFloat64UnaryOperationNode(
+ TaggedToFloat64ConversionType conversion_type) {
// TODO(v8:7700): Do constant folding.
- ValueNode* left = LoadRegisterFloat64(0);
- ValueNode* right = GetAccumulatorFloat64();
+ ValueNode* value = GetAccumulatorFloat64(conversion_type);
+ switch (kOperation) {
+ case Operation::kNegate:
+ SetAccumulator(AddNewNode<Float64Negate>({value}));
+ break;
+ case Operation::kIncrement:
+ SetAccumulator(AddNewNode<Float64Add>({value, GetFloat64Constant(1)}));
+ break;
+ case Operation::kDecrement:
+ SetAccumulator(
+ AddNewNode<Float64Subtract>({value, GetFloat64Constant(1)}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildFloat64BinaryOperationNode(
+ TaggedToFloat64ConversionType conversion_type) {
+ // TODO(v8:7700): Do constant folding.
+ ValueNode* left = LoadRegisterFloat64(0, conversion_type);
+ ValueNode* right = GetAccumulatorFloat64(conversion_type);
+ SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
+}
+
+namespace {
+TaggedToFloat64ConversionType HintToTaggedToFloat64ConversionType(
+ BinaryOperationHint hint) {
+ switch (hint) {
+ case BinaryOperationHint::kSignedSmallInputs:
+ case BinaryOperationHint::kNumber:
+ return TaggedToFloat64ConversionType::kNumber;
+ case BinaryOperationHint::kNumberOrOddball:
+ return TaggedToFloat64ConversionType::kNumberOrOddball;
- SetAccumulator(AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
+ case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kSignedSmall:
+ case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
+ case BinaryOperationHint::kBigInt64:
+ case BinaryOperationHint::kAny:
+ UNREACHABLE();
+ }
}
+} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::VisitUnaryOperation() {
- // TODO(victorgomes): Use feedback info and create optimized versions.
+ FeedbackNexus nexus = FeedbackNexusForOperand(0);
+ switch (nexus.GetBinaryOperationFeedback()) {
+ case BinaryOperationHint::kNone:
+ return EmitUnconditionalDeopt(
+ DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
+ case BinaryOperationHint::kSignedSmall:
+ return BuildInt32UnaryOperationNode<kOperation>();
+ case BinaryOperationHint::kSignedSmallInputs:
+ case BinaryOperationHint::kNumber:
+ case BinaryOperationHint::kNumberOrOddball: {
+ TaggedToFloat64ConversionType conversion_type =
+ HintToTaggedToFloat64ConversionType(
+ nexus.GetBinaryOperationFeedback());
+ if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
+ static_assert(kOperation == Operation::kBitwiseNot);
+ return BuildTruncatingInt32BitwiseNotForNumber(conversion_type);
+ }
+ return BuildFloat64UnaryOperationNode<kOperation>(conversion_type);
+ break;
+ }
+ case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
+ case BinaryOperationHint::kBigInt64:
+ case BinaryOperationHint::kAny:
+ // Fallback to generic node.
+ break;
+ }
BuildGenericUnaryOperationNode<kOperation>();
}
@@ -475,29 +1324,33 @@ void MaglevGraphBuilder::VisitBinaryOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kNone:
- EmitUnconditionalDeopt(
+ return EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
- return;
case BinaryOperationHint::kSignedSmall:
- if (BinaryOperationHasInt32FastPath<kOperation>()) {
- BuildInt32BinaryOperationNode<kOperation>();
- return;
+ if constexpr (kOperation == Operation::kExponentiate) {
+ // Exponentiate never updates the feedback to be a Smi.
+ UNREACHABLE();
+ } else {
+ return BuildInt32BinaryOperationNode<kOperation>();
}
- break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
- if (BinaryOperationHasFloat64FastPath<kOperation>()) {
- BuildFloat64BinaryOperationNode<kOperation>();
- return;
- // } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
- // // Fall back to int32 fast path if there is one (this will be the
- // case
- // // for operations that deal with bits rather than numbers).
- // BuildInt32BinaryOperationNode<kOperation>();
- // return;
+ case BinaryOperationHint::kNumberOrOddball: {
+ TaggedToFloat64ConversionType conversion_type =
+ HintToTaggedToFloat64ConversionType(
+ nexus.GetBinaryOperationFeedback());
+ if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
+ return BuildTruncatingInt32BinaryOperationNodeForNumber<kOperation>(
+ conversion_type);
+ } else {
+ return BuildFloat64BinaryOperationNode<kOperation>(conversion_type);
}
break;
- default:
+ }
+ case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
+ case BinaryOperationHint::kBigInt64:
+ case BinaryOperationHint::kAny:
// Fallback to generic node.
break;
}
@@ -509,45 +1362,48 @@ void MaglevGraphBuilder::VisitBinarySmiOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kNone:
- EmitUnconditionalDeopt(
+ return EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
- return;
case BinaryOperationHint::kSignedSmall:
- if (BinaryOperationHasInt32FastPath<kOperation>()) {
- BuildInt32BinarySmiOperationNode<kOperation>();
- return;
+ if constexpr (kOperation == Operation::kExponentiate) {
+ // Exponentiate never updates the feedback to be a Smi.
+ UNREACHABLE();
+ } else {
+ return BuildInt32BinarySmiOperationNode<kOperation>();
}
- break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
- if (BinaryOperationHasFloat64FastPath<kOperation>()) {
- BuildFloat64BinarySmiOperationNode<kOperation>();
- return;
- // } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
- // // Fall back to int32 fast path if there is one (this will be the
- // case
- // // for operations that deal with bits rather than numbers).
- // BuildInt32BinarySmiOperationNode<kOperation>();
- // return;
+ case BinaryOperationHint::kNumberOrOddball: {
+ TaggedToFloat64ConversionType conversion_type =
+ HintToTaggedToFloat64ConversionType(
+ nexus.GetBinaryOperationFeedback());
+ if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
+ return BuildTruncatingInt32BinarySmiOperationNodeForNumber<kOperation>(
+ conversion_type);
+ } else {
+ return BuildFloat64BinarySmiOperationNode<kOperation>(conversion_type);
}
break;
- default:
+ }
+ case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
+ case BinaryOperationHint::kBigInt64:
+ case BinaryOperationHint::kAny:
// Fallback to generic node.
break;
}
BuildGenericBinarySmiOperationNode<kOperation>();
}
-template <typename CompareControlNode>
-bool MaglevGraphBuilder::TryBuildCompareOperation(Operation operation,
- ValueNode* left,
- ValueNode* right) {
+template <typename BranchControlNodeT, typename... Args>
+bool MaglevGraphBuilder::TryBuildBranchFor(
+ std::initializer_list<ValueNode*> control_inputs, Args&&... args) {
// Don't emit the shortcut branch if the next bytecode is a merge target.
if (IsOffsetAMergePoint(next_offset())) return false;
interpreter::Bytecode next_bytecode = iterator_.next_bytecode();
- int true_offset;
- int false_offset;
+ int true_offset, false_offset;
+ int true_interrupt_correction, false_interrupt_correction;
switch (next_bytecode) {
case interpreter::Bytecode::kJumpIfFalse:
case interpreter::Bytecode::kJumpIfFalseConstant:
@@ -560,7 +1416,9 @@ bool MaglevGraphBuilder::TryBuildCompareOperation(Operation operation,
// emitting the test.
iterator_.Advance();
true_offset = next_offset();
+ true_interrupt_correction = 0;
false_offset = iterator_.GetJumpTargetOffset();
+ false_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
break;
case interpreter::Bytecode::kJumpIfTrue:
case interpreter::Bytecode::kJumpIfTrueConstant:
@@ -573,26 +1431,19 @@ bool MaglevGraphBuilder::TryBuildCompareOperation(Operation operation,
// emitting the test.
iterator_.Advance();
true_offset = iterator_.GetJumpTargetOffset();
+ true_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
false_offset = next_offset();
+ false_interrupt_correction = 0;
break;
default:
return false;
}
- BasicBlock* block = FinishBlock<CompareControlNode>(
- {left, right}, operation, &jump_targets_[true_offset],
- &jump_targets_[false_offset]);
- if (true_offset == iterator_.GetJumpTargetOffset()) {
- block->control_node()
- ->Cast<BranchControlNode>()
- ->set_true_interrupt_correction(
- iterator_.GetRelativeJumpTargetOffset());
- } else {
- block->control_node()
- ->Cast<BranchControlNode>()
- ->set_false_interrupt_correction(
- iterator_.GetRelativeJumpTargetOffset());
- }
+ BasicBlock* block = FinishBlock<BranchControlNodeT>(
+ control_inputs, std::forward<Args>(args)..., &jump_targets_[true_offset],
+ true_interrupt_correction, &jump_targets_[false_offset],
+ false_interrupt_correction);
+
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
StartFallthroughBlock(next_offset(), block);
return true;
@@ -606,40 +1457,30 @@ void MaglevGraphBuilder::VisitCompareOperation() {
EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation);
return;
- case CompareOperationHint::kSignedSmall:
- if (BinaryOperationHasInt32FastPath<kOperation>()) {
- ValueNode* left = LoadRegisterInt32(0);
- ValueNode* right = GetAccumulatorInt32();
-
- if (TryBuildCompareOperation<BranchIfInt32Compare>(kOperation, left,
- right)) {
- return;
- }
- SetAccumulator(
- AddNewInt32BinaryOperationNode<kOperation>({left, right}));
+ case CompareOperationHint::kSignedSmall: {
+ ValueNode* left = LoadRegisterInt32(0);
+ ValueNode* right = GetAccumulatorInt32();
+ if (TryBuildBranchFor<BranchIfInt32Compare>({left, right}, kOperation)) {
return;
}
- break;
- case CompareOperationHint::kNumber:
- if (BinaryOperationHasFloat64FastPath<kOperation>()) {
- ValueNode* left = LoadRegisterFloat64(0);
- ValueNode* right = GetAccumulatorFloat64();
-
- if (TryBuildCompareOperation<BranchIfFloat64Compare>(kOperation, left,
- right)) {
- return;
- }
- SetAccumulator(
- AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
+ SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
+ return;
+ }
+ case CompareOperationHint::kNumber: {
+ // TODO(leszeks): we could support kNumberOrOddball with
+ // BranchIfFloat64Compare, but we'd need to special case comparing
+ // oddballs with NaN value (e.g. undefined) against themselves.
+ ValueNode* left =
+ LoadRegisterFloat64(0, TaggedToFloat64ConversionType::kNumber);
+ ValueNode* right =
+ GetAccumulatorFloat64(TaggedToFloat64ConversionType::kNumber);
+ if (TryBuildBranchFor<BranchIfFloat64Compare>({left, right},
+ kOperation)) {
return;
- // } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
- // // Fall back to int32 fast path if there is one (this will be the
- // case
- // // for operations that deal with bits rather than numbers).
- // BuildInt32BinaryOperationNode<kOperation>();
- // return;
}
- break;
+ SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
+ return;
+ }
case CompareOperationHint::kInternalizedString: {
DCHECK(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual);
@@ -653,8 +1494,8 @@ void MaglevGraphBuilder::VisitCompareOperation() {
right =
GetInternalizedString(interpreter::Register::virtual_accumulator());
}
- if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
- right)) {
+ if (TryBuildBranchFor<BranchIfReferenceCompare>({left, right},
+ kOperation)) {
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({left, right}));
@@ -667,14 +1508,21 @@ void MaglevGraphBuilder::VisitCompareOperation() {
ValueNode* right = GetAccumulatorTagged();
BuildCheckSymbol(left);
BuildCheckSymbol(right);
- if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
- right)) {
+ if (TryBuildBranchFor<BranchIfReferenceCompare>({left, right},
+ kOperation)) {
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({left, right}));
return;
}
- default:
+ case CompareOperationHint::kNumberOrOddball:
+ case CompareOperationHint::kNumberOrBoolean:
+ case CompareOperationHint::kString:
+ case CompareOperationHint::kBigInt:
+ case CompareOperationHint::kBigInt64:
+ case CompareOperationHint::kReceiver:
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
+ case CompareOperationHint::kAny:
// Fallback to generic node.
break;
}
@@ -716,7 +1564,7 @@ bool MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext(
DCHECK(compilation_unit_->info()->specialize_to_function_context());
size_t new_depth = *depth;
- base::Optional<compiler::ContextRef> maybe_context_ref =
+ compiler::OptionalContextRef maybe_context_ref =
FunctionContextSpecialization::TryToRef(compilation_unit_, *context,
&new_depth);
if (!maybe_context_ref.has_value()) return false;
@@ -728,8 +1576,8 @@ bool MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext(
return false;
}
- base::Optional<compiler::ObjectRef> maybe_slot_value =
- context_ref.get(slot_index);
+ compiler::OptionalObjectRef maybe_slot_value =
+ context_ref.get(broker(), slot_index);
if (!maybe_slot_value.has_value()) {
*depth = new_depth;
*context = GetConstant(context_ref);
@@ -746,7 +1594,7 @@ bool MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext(
//
// See also: JSContextSpecialization::ReduceJSLoadContext.
compiler::OddballType oddball_type =
- slot_value.AsHeapObject().map().oddball_type();
+ slot_value.AsHeapObject().map(broker()).oddball_type(broker());
if (oddball_type == compiler::OddballType::kUndefined ||
oddball_type == compiler::OddballType::kHole) {
*depth = new_depth;
@@ -779,6 +1627,22 @@ ValueNode* MaglevGraphBuilder::LoadAndCacheContextSlot(
return cached_value = AddNewNode<LoadTaggedField>({context}, offset);
}
+void MaglevGraphBuilder::StoreAndCacheContextSlot(ValueNode* context,
+ int offset,
+ ValueNode* value) {
+ DCHECK_EQ(
+ known_node_aspects().loaded_context_constants.count({context, offset}),
+ 0);
+ BuildStoreTaggedField(context, GetTaggedValue(value), offset);
+
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << " * Recording context slot store "
+ << PrintNodeLabel(graph_labeller(), context) << "[" << offset
+ << "]: " << PrintNode(graph_labeller(), value) << std::endl;
+ }
+ known_node_aspects().loaded_context_slots[{context, offset}] = value;
+}
+
void MaglevGraphBuilder::BuildLoadContextSlot(
ValueNode* context, size_t depth, int slot_index,
ContextSlotMutability slot_mutability) {
@@ -804,6 +1668,30 @@ void MaglevGraphBuilder::BuildLoadContextSlot(
context, Context::OffsetOfElementAt(slot_index), kMutable));
}
+void MaglevGraphBuilder::BuildStoreContextSlot(ValueNode* context, size_t depth,
+ int slot_index,
+ ValueNode* value) {
+ MinimizeContextChainDepth(&context, &depth);
+
+ if (compilation_unit_->info()->specialize_to_function_context()) {
+ compiler::OptionalContextRef maybe_ref =
+ FunctionContextSpecialization::TryToRef(compilation_unit_, context,
+ &depth);
+ if (maybe_ref.has_value()) {
+ context = GetConstant(maybe_ref.value());
+ }
+ }
+
+ for (size_t i = 0; i < depth; ++i) {
+ context = LoadAndCacheContextSlot(
+ context, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX),
+ kImmutable);
+ }
+
+ StoreAndCacheContextSlot(context, Context::OffsetOfElementAt(slot_index),
+ value);
+}
+
void MaglevGraphBuilder::VisitLdaContextSlot() {
ValueNode* context = LoadRegisterTagged(0);
int slot_index = iterator_.GetIndexOperand(1);
@@ -831,35 +1719,12 @@ void MaglevGraphBuilder::VisitStaContextSlot() {
ValueNode* context = LoadRegisterTagged(0);
int slot_index = iterator_.GetIndexOperand(1);
size_t depth = iterator_.GetUnsignedImmediateOperand(2);
-
- MinimizeContextChainDepth(&context, &depth);
-
- if (compilation_unit_->info()->specialize_to_function_context()) {
- base::Optional<compiler::ContextRef> maybe_ref =
- FunctionContextSpecialization::TryToRef(compilation_unit_, context,
- &depth);
- if (maybe_ref.has_value()) {
- context = GetConstant(maybe_ref.value());
- }
- }
-
- for (size_t i = 0; i < depth; ++i) {
- context = LoadAndCacheContextSlot(
- context, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX),
- kImmutable);
- }
-
- AddNewNode<StoreTaggedFieldWithWriteBarrier>(
- {context, GetAccumulatorTagged()},
- Context::OffsetOfElementAt(slot_index));
+ BuildStoreContextSlot(context, depth, slot_index, GetRawAccumulator());
}
void MaglevGraphBuilder::VisitStaCurrentContextSlot() {
ValueNode* context = GetContext();
int slot_index = iterator_.GetIndexOperand(0);
-
- AddNewNode<StoreTaggedFieldWithWriteBarrier>(
- {context, GetAccumulatorTagged()},
- Context::OffsetOfElementAt(slot_index));
+ BuildStoreContextSlot(context, 0, slot_index, GetRawAccumulator());
}
void MaglevGraphBuilder::VisitStar() {
@@ -897,8 +1762,8 @@ void MaglevGraphBuilder::VisitTestReferenceEqual() {
SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
return;
}
- if (TryBuildCompareOperation<BranchIfReferenceCompare>(
- Operation::kStrictEqual, lhs, rhs)) {
+ if (TryBuildBranchFor<BranchIfReferenceCompare>({lhs, rhs},
+ Operation::kStrictEqual)) {
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({lhs, rhs}));
@@ -911,12 +1776,8 @@ void MaglevGraphBuilder::VisitTestUndetectable() {
void MaglevGraphBuilder::VisitTestNull() {
ValueNode* value = GetAccumulatorTagged();
- if (RootConstant* constant = value->TryCast<RootConstant>()) {
- if (constant->index() == RootIndex::kNullValue) {
- SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
- } else {
- SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
- }
+ if (IsConstantNode(value->opcode())) {
+ SetAccumulator(GetBooleanConstant(IsNullValue(value)));
return;
}
ValueNode* null_constant = GetRootConstant(RootIndex::kNullValue);
@@ -925,12 +1786,8 @@ void MaglevGraphBuilder::VisitTestNull() {
void MaglevGraphBuilder::VisitTestUndefined() {
ValueNode* value = GetAccumulatorTagged();
- if (RootConstant* constant = value->TryCast<RootConstant>()) {
- if (constant->index() == RootIndex::kUndefinedValue) {
- SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
- } else {
- SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
- }
+ if (IsConstantNode(value->opcode())) {
+ SetAccumulator(GetBooleanConstant(IsUndefinedValue(value)));
return;
}
ValueNode* undefined_constant = GetRootConstant(RootIndex::kUndefinedValue);
@@ -948,7 +1805,9 @@ void MaglevGraphBuilder::VisitTestTypeOf() {
return;
}
ValueNode* value = GetAccumulatorTagged();
- // TODO(victorgomes): Add fast path for constants.
+ if (TryBuildBranchFor<BranchIfTypeOf>({value}, literal)) {
+ return;
+ }
SetAccumulator(AddNewNode<TestTypeOf>({value}, literal));
}
@@ -956,9 +1815,9 @@ bool MaglevGraphBuilder::TryBuildScriptContextConstantAccess(
const compiler::GlobalAccessFeedback& global_access_feedback) {
if (!global_access_feedback.immutable()) return false;
- base::Optional<compiler::ObjectRef> maybe_slot_value =
+ compiler::OptionalObjectRef maybe_slot_value =
global_access_feedback.script_context().get(
- global_access_feedback.slot_index());
+ broker(), global_access_feedback.slot_index());
if (!maybe_slot_value) return false;
SetAccumulator(GetConstant(maybe_slot_value.value()));
@@ -988,10 +1847,10 @@ bool MaglevGraphBuilder::TryBuildPropertyCellAccess(
compiler::PropertyCellRef property_cell =
global_access_feedback.property_cell();
- if (!property_cell.Cache()) return false;
+ if (!property_cell.Cache(broker())) return false;
- compiler::ObjectRef property_cell_value = property_cell.value();
- if (property_cell_value.IsTheHole()) {
+ compiler::ObjectRef property_cell_value = property_cell.value(broker());
+ if (property_cell_value.IsTheHole(broker())) {
// The property cell is no longer valid.
EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
@@ -1062,8 +1921,7 @@ void MaglevGraphBuilder::VisitStaGlobal() {
// TODO(v8:7700): Add fast path.
ValueNode* context = GetContext();
- SetAccumulator(
- AddNewNode<StoreGlobal>({context, value}, name, feedback_source));
+ AddNewNode<StoreGlobal>({context, value}, name, feedback_source);
}
void MaglevGraphBuilder::VisitLdaLookupSlot() {
@@ -1086,8 +1944,16 @@ void MaglevGraphBuilder::VisitLdaLookupGlobalSlot() {
ValueNode* name = GetConstant(GetRefOperand<Name>(0));
ValueNode* slot = GetSmiConstant(iterator_.GetIndexOperand(1));
ValueNode* depth = GetSmiConstant(iterator_.GetUnsignedImmediateOperand(2));
- SetAccumulator(BuildCallBuiltin<Builtin::kLookupGlobalICTrampoline>(
- {name, depth, slot}));
+ ValueNode* result;
+ if (parent_) {
+ ValueNode* vector = GetConstant(feedback());
+ result =
+ BuildCallBuiltin<Builtin::kLookupGlobalIC>({name, depth, slot, vector});
+ } else {
+ result = BuildCallBuiltin<Builtin::kLookupGlobalICTrampoline>(
+ {name, depth, slot});
+ }
+ SetAccumulator(result);
}
void MaglevGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
@@ -1112,9 +1978,16 @@ void MaglevGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
ValueNode* name = GetConstant(GetRefOperand<Name>(0));
ValueNode* slot = GetSmiConstant(iterator_.GetIndexOperand(1));
ValueNode* depth = GetSmiConstant(iterator_.GetUnsignedImmediateOperand(2));
- SetAccumulator(
- BuildCallBuiltin<Builtin::kLookupGlobalICInsideTypeofTrampoline>(
- {name, depth, slot}));
+ ValueNode* result;
+ if (parent_) {
+ ValueNode* vector = GetConstant(feedback());
+ result = BuildCallBuiltin<Builtin::kLookupGlobalICInsideTypeof>(
+ {name, depth, slot, vector});
+ } else {
+ result = BuildCallBuiltin<Builtin::kLookupGlobalICInsideTypeofTrampoline>(
+ {name, depth, slot});
+ }
+ SetAccumulator(result);
}
namespace {
@@ -1143,9 +2016,12 @@ void MaglevGraphBuilder::VisitStaLookupSlot() {
namespace {
NodeType StaticTypeForNode(ValueNode* node) {
- DCHECK(node->is_tagged());
+ if (!node->is_tagged()) {
+ return NodeType::kNumber;
+ }
switch (node->opcode()) {
- case Opcode::kCheckedSmiTag:
+ case Opcode::kCheckedSmiTagInt32:
+ case Opcode::kCheckedSmiTagUint32:
case Opcode::kSmiConstant:
return NodeType::kSmi;
case Opcode::kConstant: {
@@ -1161,6 +2037,18 @@ NodeType StaticTypeForNode(ValueNode* node) {
}
return NodeType::kHeapObjectWithKnownMap;
}
+ case Opcode::kLoadPolymorphicTaggedField: {
+ Representation field_representation =
+ node->Cast<LoadPolymorphicTaggedField>()->field_representation();
+ switch (field_representation.kind()) {
+ case Representation::kSmi:
+ return NodeType::kSmi;
+ case Representation::kHeapObject:
+ return NodeType::kAnyHeapObject;
+ default:
+ return NodeType::kUnknown;
+ }
+ }
case Opcode::kToNumberOrNumeric:
if (node->Cast<ToNumberOrNumeric>()->mode() ==
Object::Conversion::kToNumber) {
@@ -1170,6 +2058,8 @@ NodeType StaticTypeForNode(ValueNode* node) {
return NodeType::kUnknown;
case Opcode::kToString:
return NodeType::kString;
+ case Opcode::kCheckedInternalizedString:
+ return NodeType::kInternalizedString;
case Opcode::kToObject:
return NodeType::kJSReceiver;
case Opcode::kToName:
@@ -1180,13 +2070,18 @@ NodeType StaticTypeForNode(ValueNode* node) {
}
} // namespace
-NodeInfo* MaglevGraphBuilder::CreateInfoIfNot(ValueNode* node, NodeType type) {
+bool MaglevGraphBuilder::EnsureType(ValueNode* node, NodeType type,
+ NodeType* old_type) {
NodeType static_type = StaticTypeForNode(node);
- if (NodeTypeIs(static_type, type)) return nullptr;
+ if (NodeTypeIs(static_type, type)) {
+ if (old_type) *old_type = static_type;
+ return true;
+ }
NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(node);
- if (NodeTypeIs(known_info->type, type)) return nullptr;
- known_info->type = CombineType(known_info->type, static_type);
- return known_info;
+ if (old_type) *old_type = known_info->type;
+ if (NodeTypeIs(known_info->type, type)) return true;
+ known_info->type = CombineType(known_info->type, type);
+ return false;
}
bool MaglevGraphBuilder::CheckType(ValueNode* node, NodeType type) {
@@ -1196,15 +2091,6 @@ bool MaglevGraphBuilder::CheckType(ValueNode* node, NodeType type) {
return NodeTypeIs(it->second.type, type);
}
-bool MaglevGraphBuilder::EnsureType(ValueNode* node, NodeType type,
- NodeType* old) {
- NodeInfo* known_info = CreateInfoIfNot(node, type);
- if (known_info == nullptr) return true;
- if (old != nullptr) *old = known_info->type;
- known_info->type = CombineType(known_info->type, type);
- return false;
-}
-
ValueNode* MaglevGraphBuilder::BuildSmiUntag(ValueNode* node) {
if (EnsureType(node, NodeType::kSmi)) {
return AddNewNode<UnsafeSmiUntag>({node});
@@ -1213,6 +2099,22 @@ ValueNode* MaglevGraphBuilder::BuildSmiUntag(ValueNode* node) {
}
}
+ValueNode* MaglevGraphBuilder::BuildNumberOrOddballToFloat64(
+ ValueNode* node, TaggedToFloat64ConversionType conversion_type) {
+ NodeType old_type;
+ if (EnsureType(node, TaggedToFloat64ConversionTypeToNodeType(conversion_type),
+ &old_type)) {
+ if (old_type == NodeType::kSmi) {
+ ValueNode* untagged_smi = AddNewNode<UnsafeSmiUntag>({node});
+ return AddNewNode<ChangeInt32ToFloat64>({untagged_smi});
+ }
+ return AddNewNode<UncheckedNumberOrOddballToFloat64>({node},
+ conversion_type);
+ } else {
+ return AddNewNode<CheckedNumberOrOddballToFloat64>({node}, conversion_type);
+ }
+}
+
void MaglevGraphBuilder::BuildCheckSmi(ValueNode* object) {
if (EnsureType(object, NodeType::kSmi)) return;
AddNewNode<CheckSmi>({object});
@@ -1252,13 +2154,15 @@ namespace {
class KnownMapsMerger {
public:
- explicit KnownMapsMerger(compiler::JSHeapBroker* broker, ValueNode* object,
- KnownNodeAspects& known_node_aspects,
- ZoneVector<compiler::MapRef> const& maps)
+ explicit KnownMapsMerger(compiler::JSHeapBroker* broker,
+ base::Vector<const compiler::MapRef> maps)
: broker_(broker),
maps_(maps),
known_maps_are_subset_of_maps_(true),
- emit_check_with_migration_(false) {
+ emit_check_with_migration_(false) {}
+
+ void IntersectWithKnownNodeAspects(
+ ValueNode* object, const KnownNodeAspects& known_node_aspects) {
// A non-value value here means the universal set, i.e., we don't know
// anything about the possible maps of the object.
base::Optional<ZoneHandleSet<Map>> known_stable_map_set =
@@ -1268,7 +2172,10 @@ class KnownMapsMerger {
IntersectKnownMaps(known_stable_map_set, true);
IntersectKnownMaps(known_unstable_map_set, false);
+ }
+ void UpdateKnownNodeAspects(ValueNode* object,
+ KnownNodeAspects& known_node_aspects) {
// Update known maps.
known_node_aspects.stable_maps[object] = stable_map_set_;
known_node_aspects.unstable_maps[object] = unstable_map_set_;
@@ -1290,7 +2197,7 @@ class KnownMapsMerger {
private:
compiler::JSHeapBroker* broker_;
- ZoneVector<compiler::MapRef> const& maps_;
+ base::Vector<const compiler::MapRef> maps_;
bool known_maps_are_subset_of_maps_;
bool emit_check_with_migration_;
ZoneHandleSet<Map> stable_map_set_;
@@ -1309,7 +2216,7 @@ class KnownMapsMerger {
return it->second;
}
- base::Optional<compiler::MapRef> GetMapRefFromMaps(Handle<Map> handle) {
+ compiler::OptionalMapRef GetMapRefFromMaps(Handle<Map> handle) {
auto it =
std::find_if(maps_.begin(), maps_.end(), [&](compiler::MapRef map_ref) {
return map_ref.object().is_identical_to(handle);
@@ -1322,7 +2229,13 @@ class KnownMapsMerger {
if (map.is_migration_target()) {
emit_check_with_migration_ = true;
}
- if (!map.IsJSReceiverMap()) node_type_ = NodeType::kHeapObjectWithKnownMap;
+ if (map.IsHeapNumberMap()) {
+ // If this is a heap number map, the object may be a Smi, so mask away
+ // the known HeapObject bit.
+ node_type_ = IntersectType(node_type_, NodeType::kObjectWithKnownMap);
+ } else if (!map.IsJSReceiverMap()) {
+ node_type_ = IntersectType(node_type_, NodeType::kHeapObjectWithKnownMap);
+ }
if (map.is_stable()) {
// TODO(victorgomes): Add a DCHECK_SLOW that checks if the map already
// exists in the CompilationDependencySet for the else branch.
@@ -1362,7 +2275,7 @@ class KnownMapsMerger {
} // namespace
void MaglevGraphBuilder::BuildCheckMaps(
- ValueNode* object, ZoneVector<compiler::MapRef> const& maps) {
+ ValueNode* object, base::Vector<const compiler::MapRef> maps) {
// TODO(verwaest): Support other objects with possible known stable maps as
// well.
if (object->Is<Constant>()) {
@@ -1370,7 +2283,8 @@ void MaglevGraphBuilder::BuildCheckMaps(
// don't need to emit a map check, and can use the dependency -- we
// can't do this for unstable maps because the constant could migrate
// during compilation.
- compiler::MapRef constant_map = object->Cast<Constant>()->object().map();
+ compiler::MapRef constant_map =
+ object->Cast<Constant>()->object().map(broker());
if (std::find(maps.begin(), maps.end(), constant_map) != maps.end()) {
if (constant_map.is_stable()) {
broker()->dependencies()->DependOnStableMap(constant_map);
@@ -1388,11 +2302,13 @@ void MaglevGraphBuilder::BuildCheckMaps(
// Calculates if known maps are a subset of maps, their map intersection and
// whether we should emit check with migration.
- KnownMapsMerger merger(broker(), object, known_node_aspects(), maps);
+ KnownMapsMerger merger(broker(), maps);
+ merger.IntersectWithKnownNodeAspects(object, known_node_aspects());
+ merger.UpdateKnownNodeAspects(object, known_node_aspects());
// If the known maps are the subset of the maps to check, we are done.
if (merger.known_maps_are_subset_of_maps()) {
- DCHECK(NodeTypeIs(known_info->type, NodeType::kHeapObjectWithKnownMap));
+ DCHECK(NodeTypeIs(known_info->type, merger.node_type()));
return;
}
@@ -1413,16 +2329,91 @@ void MaglevGraphBuilder::BuildCheckMaps(
known_info->type = merger.node_type();
}
-bool MaglevGraphBuilder::TryFoldLoadDictPrototypeConstant(
+namespace {
+AllocateRaw* GetAllocation(ValueNode* object) {
+ if (object->Is<FoldedAllocation>()) {
+ object = object->Cast<FoldedAllocation>()->input(0).node();
+ }
+ if (object->Is<AllocateRaw>()) {
+ return object->Cast<AllocateRaw>();
+ }
+ return nullptr;
+}
+} // namespace
+
+bool MaglevGraphBuilder::CanElideWriteBarrier(ValueNode* object,
+ ValueNode* value) {
+ if (value->Is<RootConstant>()) return true;
+ if (value->Is<SmiConstant>()) return true;
+ if (CheckType(value, NodeType::kSmi)) return true;
+
+ // No need for a write barrier if both object and value are part of the same
+ // folded young allocation.
+ AllocateRaw* allocation = GetAllocation(object);
+ if (allocation != nullptr &&
+ allocation->allocation_type() == AllocationType::kYoung &&
+ allocation == GetAllocation(value)) {
+ return true;
+ }
+
+ return false;
+}
+
+void MaglevGraphBuilder::BuildStoreTaggedField(ValueNode* object,
+ ValueNode* value, int offset) {
+ if (CanElideWriteBarrier(object, value)) {
+ AddNewNode<StoreTaggedFieldNoWriteBarrier>({object, value}, offset);
+ } else {
+ AddNewNode<StoreTaggedFieldWithWriteBarrier>({object, value}, offset);
+ }
+}
+
+void MaglevGraphBuilder::BuildStoreTaggedFieldNoWriteBarrier(ValueNode* object,
+ ValueNode* value,
+ int offset) {
+ DCHECK(CanElideWriteBarrier(object, value));
+ AddNewNode<StoreTaggedFieldNoWriteBarrier>({object, value}, offset);
+}
+
+void MaglevGraphBuilder::BuildStoreFixedArrayElement(ValueNode* elements,
+ ValueNode* index,
+ ValueNode* value) {
+ if (CanElideWriteBarrier(elements, value)) {
+ AddNewNode<StoreFixedArrayElementNoWriteBarrier>({elements, index, value});
+ } else {
+ AddNewNode<StoreFixedArrayElementWithWriteBarrier>(
+ {elements, index, value});
+ }
+}
+
+bool MaglevGraphBuilder::CanTreatHoleAsUndefined(
+ base::Vector<const compiler::MapRef> const& receiver_maps) {
+ // Check if all {receiver_maps} have one of the initial Array.prototype
+ // or Object.prototype objects as their prototype (in any of the current
+ // native contexts, as the global Array protector works isolate-wide).
+ for (compiler::MapRef receiver_map : receiver_maps) {
+ compiler::ObjectRef receiver_prototype = receiver_map.prototype(broker());
+ if (!receiver_prototype.IsJSObject() ||
+ !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
+ return false;
+ }
+ }
+
+ // Check if the array prototype chain is intact.
+ return broker()->dependencies()->DependOnNoElementsProtector();
+}
+
+compiler::OptionalObjectRef
+MaglevGraphBuilder::TryFoldLoadDictPrototypeConstant(
compiler::PropertyAccessInfo access_info) {
DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
DCHECK(access_info.IsDictionaryProtoDataConstant());
DCHECK(access_info.holder().has_value());
- base::Optional<compiler::ObjectRef> constant =
+ compiler::OptionalObjectRef constant =
access_info.holder()->GetOwnDictionaryProperty(
- access_info.dictionary_index(), broker()->dependencies());
- if (!constant.has_value()) return false;
+ broker(), access_info.dictionary_index(), broker()->dependencies());
+ if (!constant.has_value()) return {};
for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
Handle<Map> map_handle = map.object();
@@ -1444,32 +2435,27 @@ bool MaglevGraphBuilder::TryFoldLoadDictPrototypeConstant(
map, access_info.name(), constant.value(), PropertyKind::kData);
}
- SetAccumulator(GetConstant(constant.value()));
- return true;
+ return constant;
}
-bool MaglevGraphBuilder::TryFoldLoadConstantDataField(
+compiler::OptionalObjectRef MaglevGraphBuilder::TryFoldLoadConstantDataField(
compiler::PropertyAccessInfo access_info, ValueNode* lookup_start_object) {
- if (!access_info.IsFastDataConstant()) return false;
- base::Optional<compiler::JSObjectRef> source;
+ if (!access_info.IsFastDataConstant()) return {};
+ compiler::OptionalJSObjectRef source;
if (access_info.holder().has_value()) {
source = access_info.holder();
} else if (Constant* n = lookup_start_object->TryCast<Constant>()) {
- if (!n->ref().IsJSObject()) return false;
+ if (!n->ref().IsJSObject()) return {};
source = n->ref().AsJSObject();
} else {
- return false;
+ return {};
}
- base::Optional<compiler::ObjectRef> constant =
- source.value().GetOwnFastDataProperty(access_info.field_representation(),
- access_info.field_index(),
- broker()->dependencies());
- if (!constant.has_value()) return false;
- SetAccumulator(GetConstant(constant.value()));
- return true;
+ return source.value().GetOwnFastDataProperty(
+ broker(), access_info.field_representation(), access_info.field_index(),
+ broker()->dependencies());
}
-bool MaglevGraphBuilder::TryBuildPropertyGetterCall(
+ReduceResult MaglevGraphBuilder::TryBuildPropertyGetterCall(
compiler::PropertyAccessInfo access_info, ValueNode* receiver,
ValueNode* lookup_start_object) {
compiler::ObjectRef constant = access_info.constant().value();
@@ -1488,41 +2474,49 @@ bool MaglevGraphBuilder::TryBuildPropertyGetterCall(
receiver == lookup_start_object
? ConvertReceiverMode::kNotNullOrUndefined
: ConvertReceiverMode::kAny;
- Call* call = CreateNewNode<Call>(Call::kFixedInputCount + 1, receiver_mode,
- Call::TargetType::kJSFunction,
- compiler::FeedbackSource(),
- GetConstant(constant), GetContext());
- call->set_arg(0, receiver);
- SetAccumulator(AddNode(call));
- return true;
+ CallArguments args(receiver_mode, {receiver});
+ return ReduceCall(constant.AsJSFunction(), args);
+ } else if (receiver != lookup_start_object) {
+ return ReduceResult::Fail();
} else {
- // TODO(victorgomes): API calls.
- return false;
+ ValueNode* api_holder = access_info.api_holder().has_value()
+ ? GetConstant(access_info.api_holder().value())
+ : receiver;
+ compiler::FunctionTemplateInfoRef templ = constant.AsFunctionTemplateInfo();
+ if (!templ.call_code(broker()).has_value()) return ReduceResult::Fail();
+
+ compiler::CallHandlerInfoRef call_handler_info =
+ templ.call_code(broker()).value();
+ ApiFunction function(call_handler_info.callback());
+ ExternalReference reference = ExternalReference::Create(
+ &function, ExternalReference::DIRECT_API_CALL);
+ return BuildCallBuiltin<Builtin::kCallApiCallback>(
+ {GetExternalConstant(reference), GetInt32Constant(0),
+ GetConstant(call_handler_info.data(broker())), api_holder, receiver});
}
}
-bool MaglevGraphBuilder::TryBuildPropertySetterCall(
+ReduceResult MaglevGraphBuilder::TryBuildPropertySetterCall(
compiler::PropertyAccessInfo access_info, ValueNode* receiver,
ValueNode* value) {
compiler::ObjectRef constant = access_info.constant().value();
if (constant.IsJSFunction()) {
- Call* call = CreateNewNode<Call>(
- Call::kFixedInputCount + 2, ConvertReceiverMode::kNotNullOrUndefined,
- Call::TargetType::kJSFunction, compiler::FeedbackSource(),
- GetConstant(constant), GetContext());
- call->set_arg(0, receiver);
- call->set_arg(1, value);
- SetAccumulator(AddNode(call));
- return true;
+ CallArguments args(ConvertReceiverMode::kNotNullOrUndefined,
+ {receiver, value});
+ return ReduceCall(constant.AsJSFunction(), args);
} else {
// TODO(victorgomes): API calls.
- return false;
+ return ReduceResult::Fail();
}
}
-void MaglevGraphBuilder::BuildLoadField(
+ValueNode* MaglevGraphBuilder::BuildLoadField(
compiler::PropertyAccessInfo access_info, ValueNode* lookup_start_object) {
- if (TryFoldLoadConstantDataField(access_info, lookup_start_object)) return;
+ compiler::OptionalObjectRef constant =
+ TryFoldLoadConstantDataField(access_info, lookup_start_object);
+ if (constant.has_value()) {
+ return GetConstant(constant.value());
+ }
// Resolve property holder.
ValueNode* load_source;
@@ -1541,47 +2535,49 @@ void MaglevGraphBuilder::BuildLoadField(
// Do the load.
if (field_index.is_double()) {
- SetAccumulator(
- AddNewNode<LoadDoubleField>({load_source}, field_index.offset()));
- } else {
- ValueNode* value =
- AddNewNode<LoadTaggedField>({load_source}, field_index.offset());
- SetAccumulator(value);
- // Insert stable field information if present.
- if (access_info.field_representation().IsSmi()) {
- NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(value);
- known_info->type = NodeType::kSmi;
- } else if (access_info.field_representation().IsHeapObject()) {
- NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (access_info.field_map().has_value() &&
- access_info.field_map().value().is_stable()) {
- DCHECK(access_info.field_map().value().IsJSReceiverMap());
- known_info->type = NodeType::kJSReceiverWithKnownMap;
- auto map = access_info.field_map().value();
- ZoneHandleSet<Map> stable_maps(map.object());
- ZoneHandleSet<Map> unstable_maps;
- known_node_aspects().stable_maps.emplace(value, stable_maps);
- known_node_aspects().unstable_maps.emplace(value, unstable_maps);
- broker()->dependencies()->DependOnStableMap(map);
- } else {
- known_info->type = NodeType::kAnyHeapObject;
- }
+ return AddNewNode<LoadDoubleField>({load_source}, field_index.offset());
+ }
+ ValueNode* value =
+ AddNewNode<LoadTaggedField>({load_source}, field_index.offset());
+ // Insert stable field information if present.
+ if (access_info.field_representation().IsSmi()) {
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(value);
+ known_info->type = NodeType::kSmi;
+ } else if (access_info.field_representation().IsHeapObject()) {
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (access_info.field_map().has_value() &&
+ access_info.field_map().value().is_stable()) {
+ DCHECK(access_info.field_map().value().IsJSReceiverMap());
+ known_info->type = NodeType::kJSReceiverWithKnownMap;
+ auto map = access_info.field_map().value();
+ ZoneHandleSet<Map> stable_maps(map.object());
+ ZoneHandleSet<Map> unstable_maps;
+ known_node_aspects().stable_maps.emplace(value, stable_maps);
+ known_node_aspects().unstable_maps.emplace(value, unstable_maps);
+ broker()->dependencies()->DependOnStableMap(map);
+ } else {
+ known_info->type = NodeType::kAnyHeapObject;
}
}
+ return value;
}
-bool MaglevGraphBuilder::TryBuildStoreField(
- compiler::PropertyAccessInfo access_info, ValueNode* receiver) {
+ReduceResult MaglevGraphBuilder::TryBuildStoreField(
+ compiler::PropertyAccessInfo access_info, ValueNode* receiver,
+ compiler::AccessMode access_mode) {
FieldIndex field_index = access_info.field_index();
Representation field_representation = access_info.field_representation();
if (access_info.HasTransitionMap()) {
compiler::MapRef transition = access_info.transition_map().value();
- compiler::MapRef original_map = transition.GetBackPointer().AsMap();
+ compiler::MapRef original_map = transition.GetBackPointer(broker()).AsMap();
// TODO(verwaest): Support growing backing stores.
- if (original_map.UnusedPropertyFields() == 0) return false;
- } else if (access_info.IsFastDataConstant()) {
- return false;
+ if (original_map.UnusedPropertyFields() == 0) {
+ return ReduceResult::Fail();
+ }
+ } else if (access_info.IsFastDataConstant() &&
+ access_mode == compiler::AccessMode::kStore) {
+ return ReduceResult::DoneWithAbort();
}
ValueNode* store_target;
@@ -1595,22 +2591,20 @@ bool MaglevGraphBuilder::TryBuildStoreField(
ValueNode* value;
if (field_representation.IsDouble()) {
- value = GetAccumulatorFloat64();
+ value = GetAccumulatorFloat64(TaggedToFloat64ConversionType::kNumber);
if (access_info.HasTransitionMap()) {
// Allocate the mutable double box owned by the field.
- value = AddNewNode<Float64Box>({value});
+ value = AddNewNode<Float64ToTagged>(
+ {value}, Float64ToTagged::ConversionMode::kForceHeapNumber);
}
} else {
value = GetAccumulatorTagged();
- if (field_representation.IsSmi()) {
- BuildCheckSmi(value);
- } else if (field_representation.IsHeapObject()) {
+ if (field_representation.IsHeapObject()) {
// Emit a map check for the field type, if needed, otherwise just a
// HeapObject check.
if (access_info.field_map().has_value()) {
- ZoneVector<compiler::MapRef> maps({access_info.field_map().value()},
- zone());
- BuildCheckMaps(value, maps);
+ BuildCheckMaps(value,
+ base::VectorOf({access_info.field_map().value()}));
} else {
BuildCheckHeapObject(value);
}
@@ -1618,8 +2612,8 @@ bool MaglevGraphBuilder::TryBuildStoreField(
}
if (field_representation.IsSmi()) {
- AddNewNode<StoreTaggedFieldNoWriteBarrier>({store_target, value},
- field_index.offset());
+ AddNewNode<CheckedStoreSmiField>({store_target, value},
+ field_index.offset());
} else if (value->use_double_register()) {
DCHECK(field_representation.IsDouble());
DCHECK(!access_info.HasTransitionMap());
@@ -1628,8 +2622,7 @@ bool MaglevGraphBuilder::TryBuildStoreField(
DCHECK(field_representation.IsHeapObject() ||
field_representation.IsTagged() ||
(field_representation.IsDouble() && access_info.HasTransitionMap()));
- AddNewNode<StoreTaggedFieldWithWriteBarrier>({store_target, value},
- field_index.offset());
+ BuildStoreTaggedField(store_target, value, field_index.offset());
}
if (access_info.HasTransitionMap()) {
@@ -1652,10 +2645,10 @@ bool MaglevGraphBuilder::TryBuildStoreField(
}
}
- return true;
+ return ReduceResult::Done();
}
-bool MaglevGraphBuilder::TryBuildPropertyLoad(
+ReduceResult MaglevGraphBuilder::TryBuildPropertyLoad(
ValueNode* receiver, ValueNode* lookup_start_object, compiler::NameRef name,
compiler::PropertyAccessInfo const& access_info) {
if (access_info.holder().has_value() && !access_info.HasDictionaryHolder()) {
@@ -1668,38 +2661,40 @@ bool MaglevGraphBuilder::TryBuildPropertyLoad(
case compiler::PropertyAccessInfo::kInvalid:
UNREACHABLE();
case compiler::PropertyAccessInfo::kNotFound:
- SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue));
- return true;
+ return GetRootConstant(RootIndex::kUndefinedValue);
case compiler::PropertyAccessInfo::kDataField:
- case compiler::PropertyAccessInfo::kFastDataConstant:
- BuildLoadField(access_info, lookup_start_object);
- RecordKnownProperty(lookup_start_object, name,
- current_interpreter_frame_.accumulator(),
- access_info.IsFastDataConstant());
- return true;
- case compiler::PropertyAccessInfo::kDictionaryProtoDataConstant:
- return TryFoldLoadDictPrototypeConstant(access_info);
+ case compiler::PropertyAccessInfo::kFastDataConstant: {
+ ValueNode* result = BuildLoadField(access_info, lookup_start_object);
+ RecordKnownProperty(lookup_start_object, name, result, access_info);
+ return result;
+ }
+ case compiler::PropertyAccessInfo::kDictionaryProtoDataConstant: {
+ compiler::OptionalObjectRef constant =
+ TryFoldLoadDictPrototypeConstant(access_info);
+ if (!constant.has_value()) return ReduceResult::Fail();
+ return GetConstant(constant.value());
+ }
case compiler::PropertyAccessInfo::kFastAccessorConstant:
case compiler::PropertyAccessInfo::kDictionaryProtoAccessorConstant:
return TryBuildPropertyGetterCall(access_info, receiver,
lookup_start_object);
case compiler::PropertyAccessInfo::kModuleExport: {
ValueNode* cell = GetConstant(access_info.constant().value().AsCell());
- SetAccumulator(AddNewNode<LoadTaggedField>({cell}, Cell::kValueOffset));
- return true;
+ return AddNewNode<LoadTaggedField>({cell}, Cell::kValueOffset);
}
- case compiler::PropertyAccessInfo::kStringLength:
+ case compiler::PropertyAccessInfo::kStringLength: {
DCHECK_EQ(receiver, lookup_start_object);
- SetAccumulator(AddNewNode<StringLength>({receiver}));
- RecordKnownProperty(lookup_start_object, name,
- current_interpreter_frame_.accumulator(), true);
- return true;
+ ValueNode* result = AddNewNode<StringLength>({receiver});
+ RecordKnownProperty(lookup_start_object, name, result, access_info);
+ return result;
+ }
}
}
-bool MaglevGraphBuilder::TryBuildPropertyStore(
+ReduceResult MaglevGraphBuilder::TryBuildPropertyStore(
ValueNode* receiver, compiler::NameRef name,
- compiler::PropertyAccessInfo const& access_info) {
+ compiler::PropertyAccessInfo const& access_info,
+ compiler::AccessMode access_mode) {
if (access_info.holder().has_value()) {
broker()->dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
@@ -1711,17 +2706,17 @@ bool MaglevGraphBuilder::TryBuildPropertyStore(
GetAccumulatorTagged());
} else {
DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant());
- if (TryBuildStoreField(access_info, receiver)) {
+ if (TryBuildStoreField(access_info, receiver, access_mode).IsDone()) {
RecordKnownProperty(receiver, name,
current_interpreter_frame_.accumulator(),
- access_info.IsFastDataConstant());
- return true;
+ access_info);
+ return ReduceResult::Done();
}
- return false;
+ return ReduceResult::Fail();
}
}
-bool MaglevGraphBuilder::TryBuildPropertyAccess(
+ReduceResult MaglevGraphBuilder::TryBuildPropertyAccess(
ValueNode* receiver, ValueNode* lookup_start_object, compiler::NameRef name,
compiler::PropertyAccessInfo const& access_info,
compiler::AccessMode access_mode) {
@@ -1733,67 +2728,79 @@ bool MaglevGraphBuilder::TryBuildPropertyAccess(
case compiler::AccessMode::kStoreInLiteral:
case compiler::AccessMode::kDefine:
DCHECK_EQ(receiver, lookup_start_object);
- return TryBuildPropertyStore(receiver, name, access_info);
+ return TryBuildPropertyStore(receiver, name, access_info, access_mode);
case compiler::AccessMode::kHas:
// TODO(victorgomes): BuildPropertyTest.
- return false;
- }
-}
-
-namespace {
-bool HasOnlyStringMaps(ZoneVector<compiler::MapRef> const& maps) {
- for (compiler::MapRef map : maps) {
- if (!map.IsStringMap()) return false;
+ return ReduceResult::Fail();
}
- return true;
}
-bool HasOnlyNumberMaps(ZoneVector<compiler::MapRef> const& maps) {
- for (compiler::MapRef map : maps) {
- if (map.instance_type() != HEAP_NUMBER_TYPE) return false;
- }
- return true;
-}
-
-} // namespace
-
-bool MaglevGraphBuilder::TryBuildNamedAccess(
+ReduceResult MaglevGraphBuilder::TryBuildNamedAccess(
ValueNode* receiver, ValueNode* lookup_start_object,
compiler::NamedAccessFeedback const& feedback,
+ compiler::FeedbackSource const& feedback_source,
compiler::AccessMode access_mode) {
+ // Check for the megamorphic case.
+ if (feedback.maps().empty()) {
+ // We don't have a builtin to fast path megamorphic stores.
+ // TODO(leszeks): Maybe we should?
+ if (access_mode != compiler::AccessMode::kLoad) return ReduceResult::Fail();
+ // We can't do megamorphic loads for lookups where the lookup start isn't
+ // the receiver (e.g. load from super).
+ if (receiver != lookup_start_object) return ReduceResult::Fail();
+
+ return BuildCallBuiltin<Builtin::kLoadIC_Megamorphic>(
+ {receiver, GetConstant(feedback.name())}, feedback_source);
+ }
+
ZoneVector<compiler::PropertyAccessInfo> access_infos(zone());
{
ZoneVector<compiler::PropertyAccessInfo> access_infos_for_feedback(zone());
+ ZoneHandleSet<Map> inferred_maps;
+
if (Constant* n = lookup_start_object->TryCast<Constant>()) {
- compiler::MapRef constant_map = n->object().map();
- compiler::PropertyAccessInfo access_info =
- broker()->GetPropertyAccessInfo(constant_map, feedback.name(),
- access_mode,
- broker()->dependencies());
- access_infos_for_feedback.push_back(access_info);
+ compiler::MapRef constant_map = n->object().map(broker());
+ inferred_maps = ZoneHandleSet<Map>(constant_map.object());
} else {
- for (const compiler::MapRef& map : feedback.maps()) {
- if (map.is_deprecated()) continue;
- compiler::PropertyAccessInfo access_info =
- broker()->GetPropertyAccessInfo(map, feedback.name(), access_mode,
- broker()->dependencies());
- access_infos_for_feedback.push_back(access_info);
+ // TODO(leszeks): This is doing duplicate work with BuildCheckMaps,
+ // consider passing the merger into there.
+ KnownMapsMerger merger(broker(), base::VectorOf(feedback.maps()));
+ merger.IntersectWithKnownNodeAspects(lookup_start_object,
+ known_node_aspects());
+ inferred_maps = merger.intersect_set();
+ }
+
+ for (Handle<Map> map : inferred_maps) {
+ if (map->is_deprecated()) continue;
+
+ // TODO(v8:12547): Support writing to objects in shared space, which
+ // need a write barrier that calls Object::Share to ensure the RHS is
+ // shared.
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
+ map->instance_type()) &&
+ access_mode == compiler::AccessMode::kStore) {
+ return ReduceResult::Fail();
}
+
+ compiler::MapRef map_ref = MakeRefAssumeMemoryFence(broker(), map);
+ compiler::PropertyAccessInfo access_info =
+ broker()->GetPropertyAccessInfo(map_ref, feedback.name(),
+ access_mode);
+ access_infos_for_feedback.push_back(access_info);
}
- compiler::AccessInfoFactory access_info_factory(
- broker(), broker()->dependencies(), zone());
+ compiler::AccessInfoFactory access_info_factory(broker(), zone());
if (!access_info_factory.FinalizePropertyAccessInfos(
access_infos_for_feedback, access_mode, &access_infos)) {
- return false;
+ return ReduceResult::Fail();
}
}
// Check for monomorphic case.
if (access_infos.size() == 1) {
compiler::PropertyAccessInfo access_info = access_infos.front();
- const ZoneVector<compiler::MapRef>& maps =
- access_info.lookup_start_object_maps();
+ base::Vector<const compiler::MapRef> maps =
+ base::VectorOf(access_info.lookup_start_object_maps());
if (HasOnlyStringMaps(maps)) {
// Check for string maps before checking if we need to do an access
// check. Primitive strings always get the prototype from the native
@@ -1805,17 +2812,113 @@ bool MaglevGraphBuilder::TryBuildNamedAccess(
BuildCheckMaps(lookup_start_object, maps);
}
- // Generate the actual property access.
+ // Generate the actual property
return TryBuildPropertyAccess(receiver, lookup_start_object,
feedback.name(), access_info, access_mode);
} else {
- // TODO(victorgomes): polymorphic case.
- return false;
+ // TODO(victorgomes): Support more generic polymorphic case.
+
+ // Only support polymorphic load at the moment.
+ if (access_mode != compiler::AccessMode::kLoad) {
+ return ReduceResult::Fail();
+ }
+
+ // Check if we support the polymorphic load.
+ for (compiler::PropertyAccessInfo access_info : access_infos) {
+ DCHECK(!access_info.IsInvalid());
+ if (access_info.IsDictionaryProtoDataConstant()) {
+ compiler::OptionalObjectRef constant =
+ access_info.holder()->GetOwnDictionaryProperty(
+ broker(), access_info.dictionary_index(),
+ broker()->dependencies());
+ if (!constant.has_value()) {
+ return ReduceResult::Fail();
+ }
+ } else if (access_info.IsDictionaryProtoAccessorConstant() ||
+ access_info.IsFastAccessorConstant()) {
+ return ReduceResult::Fail();
+ }
+
+ // TODO(victorgomes): Support map migration.
+ for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
+ if (map.is_migration_target()) {
+ return ReduceResult::Fail();
+ }
+ }
+ }
+
+ // Add compilation dependencies if needed, get constants and fill
+ // polymorphic access info.
+ Representation field_repr = Representation::Smi();
+ ZoneVector<PolymorphicAccessInfo> poly_access_infos(zone());
+ poly_access_infos.reserve(access_infos.size());
+
+ for (compiler::PropertyAccessInfo access_info : access_infos) {
+ if (access_info.holder().has_value() &&
+ !access_info.HasDictionaryHolder()) {
+ broker()->dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
+ access_info.holder().value());
+ }
+
+ const auto& maps = access_info.lookup_start_object_maps();
+ switch (access_info.kind()) {
+ case compiler::PropertyAccessInfo::kNotFound:
+ field_repr = Representation::Tagged();
+ poly_access_infos.push_back(PolymorphicAccessInfo::NotFound(maps));
+ break;
+ case compiler::PropertyAccessInfo::kDataField:
+ case compiler::PropertyAccessInfo::kFastDataConstant: {
+ field_repr =
+ field_repr.generalize(access_info.field_representation());
+ compiler::OptionalObjectRef constant =
+ TryFoldLoadConstantDataField(access_info, lookup_start_object);
+ if (constant.has_value()) {
+ poly_access_infos.push_back(
+ PolymorphicAccessInfo::Constant(maps, constant.value()));
+ } else {
+ poly_access_infos.push_back(PolymorphicAccessInfo::DataLoad(
+ maps, access_info.field_representation(), access_info.holder(),
+ access_info.field_index()));
+ }
+ break;
+ }
+ case compiler::PropertyAccessInfo::kDictionaryProtoDataConstant: {
+ field_repr =
+ field_repr.generalize(access_info.field_representation());
+ compiler::OptionalObjectRef constant =
+ TryFoldLoadDictPrototypeConstant(access_info);
+ DCHECK(constant.has_value());
+ poly_access_infos.push_back(
+ PolymorphicAccessInfo::Constant(maps, constant.value()));
+ break;
+ }
+ case compiler::PropertyAccessInfo::kModuleExport:
+ field_repr = Representation::Tagged();
+ break;
+ case compiler::PropertyAccessInfo::kStringLength:
+ poly_access_infos.push_back(
+ PolymorphicAccessInfo::StringLength(maps));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (field_repr.kind() == Representation::kDouble) {
+ return AddNewNode<LoadPolymorphicDoubleField>(
+ {lookup_start_object}, std::move(poly_access_infos));
+ }
+
+ return AddNewNode<LoadPolymorphicTaggedField>(
+ {lookup_start_object}, field_repr, std::move(poly_access_infos));
}
}
ValueNode* MaglevGraphBuilder::GetInt32ElementIndex(ValueNode* object) {
switch (object->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
case ValueRepresentation::kTagged:
if (SmiConstant* constant = object->TryCast<SmiConstant>()) {
return GetInt32Constant(constant->value().value());
@@ -1833,22 +2936,43 @@ ValueNode* MaglevGraphBuilder::GetInt32ElementIndex(ValueNode* object) {
case ValueRepresentation::kInt32:
// Already good.
return object;
+ case ValueRepresentation::kUint32:
+ case ValueRepresentation::kFloat64:
+ return GetInt32(object);
+ }
+}
+
+// TODO(victorgomes): Consider caching the values and adding an
+// uint32_alternative in node_info.
+ValueNode* MaglevGraphBuilder::GetUint32ElementIndex(ValueNode* object) {
+ switch (object->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kTagged:
+ // TODO(victorgomes): Consider create Uint32Constants and
+ // CheckedObjectToUnsignedIndex.
+ return AddNewNode<CheckedInt32ToUint32>({GetInt32ElementIndex(object)});
+ case ValueRepresentation::kInt32:
+ return AddNewNode<CheckedInt32ToUint32>({object});
+ case ValueRepresentation::kUint32:
+ return object;
case ValueRepresentation::kFloat64:
- // TODO(leszeks): Pass in the index register (probably the
- // accumulator), so that we can save this truncation on there as a
- // conversion node.
- return AddNewNode<CheckedTruncateFloat64ToInt32>({object});
+ return AddNewNode<CheckedTruncateFloat64ToUint32>({object});
}
}
-bool MaglevGraphBuilder::TryBuildElementAccessOnString(
+ReduceResult MaglevGraphBuilder::TryBuildElementAccessOnString(
ValueNode* object, ValueNode* index_object,
compiler::KeyedAccessMode const& keyed_mode) {
// Strings are immutable and `in` cannot be used on strings
- if (keyed_mode.access_mode() != compiler::AccessMode::kLoad) return false;
+ if (keyed_mode.access_mode() != compiler::AccessMode::kLoad) {
+ return ReduceResult::Fail();
+ }
// TODO(victorgomes): Deal with LOAD_IGNORE_OUT_OF_BOUNDS.
- if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) return false;
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) {
+ return ReduceResult::Fail();
+ }
DCHECK_EQ(keyed_mode.load_mode(), STANDARD_LOAD);
@@ -1857,91 +2981,350 @@ bool MaglevGraphBuilder::TryBuildElementAccessOnString(
ValueNode* length = AddNewNode<StringLength>({object});
ValueNode* index = GetInt32ElementIndex(index_object);
- AddNewNode<CheckInt32Condition>({index, length}, AssertCondition::kLess,
+ AddNewNode<CheckInt32Condition>({index, length},
+ AssertCondition::kUnsignedLessThan,
DeoptimizeReason::kOutOfBounds);
- SetAccumulator(AddNewNode<StringAt>({object, index}));
- return true;
+ return AddNewNode<StringAt>({object, index});
+}
+
+ValueNode* MaglevGraphBuilder::BuildLoadTypedArrayElement(
+ ValueNode* object, ValueNode* index, ElementsKind elements_kind) {
+#define BUILD_AND_RETURN_LOAD_TYPED_ARRAY(Type, ...) \
+ if (broker()->dependencies()->DependOnArrayBufferDetachingProtector()) { \
+ return AddNewNode<Load##Type##TypedArrayElementNoDeopt>(__VA_ARGS__); \
+ } \
+ return AddNewNode<Load##Type##TypedArrayElement>(__VA_ARGS__);
+ switch (elements_kind) {
+ case INT8_ELEMENTS:
+ case INT16_ELEMENTS:
+ case INT32_ELEMENTS:
+ BUILD_AND_RETURN_LOAD_TYPED_ARRAY(SignedInt, {object, index},
+ elements_kind);
+ case UINT8_CLAMPED_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ BUILD_AND_RETURN_LOAD_TYPED_ARRAY(UnsignedInt, {object, index},
+ elements_kind);
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ BUILD_AND_RETURN_LOAD_TYPED_ARRAY(Double, {object, index}, elements_kind);
+ default:
+ UNREACHABLE();
+ }
+#undef BUILD_AND_RETURN_LOAD_TYPED_ARRAY
+}
+
+void MaglevGraphBuilder::BuildStoreTypedArrayElement(
+ ValueNode* object, ValueNode* index, ElementsKind elements_kind) {
+#define BUILD_STORE_TYPED_ARRAY(Type, ...) \
+ if (broker()->dependencies()->DependOnArrayBufferDetachingProtector()) { \
+ AddNewNode<Store##Type##TypedArrayElementNoDeopt>(__VA_ARGS__); \
+ } else { \
+ AddNewNode<Store##Type##TypedArrayElement>(__VA_ARGS__); \
+ }
+ switch (elements_kind) {
+ case UINT8_CLAMPED_ELEMENTS: {
+ BUILD_STORE_TYPED_ARRAY(
+ Int, {object, index, GetAccumulatorUint32ClampedFromNumber()},
+ elements_kind)
+ break;
+ }
+ case INT8_ELEMENTS:
+ case INT16_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ BUILD_STORE_TYPED_ARRAY(Int,
+ {object, index,
+ GetAccumulatorTruncatedInt32FromNumber(
+ TaggedToFloat64ConversionType::kNumber)},
+ elements_kind)
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ BUILD_STORE_TYPED_ARRAY(
+ Double,
+ {object, index,
+ GetAccumulatorFloat64(TaggedToFloat64ConversionType::kNumber)},
+ elements_kind)
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef BUILD_STORE_TYPED_ARRAY
}
-bool MaglevGraphBuilder::TryBuildElementAccess(
+ReduceResult MaglevGraphBuilder::TryBuildElementAccessOnTypedArray(
ValueNode* object, ValueNode* index_object,
- compiler::ElementAccessFeedback const& feedback) {
- // TODO(victorgomes): Implement other access modes.
- if (feedback.keyed_mode().access_mode() != compiler::AccessMode::kLoad) {
- return false;
+ const compiler::ElementAccessInfo& access_info,
+ compiler::KeyedAccessMode const& keyed_mode) {
+ DCHECK(HasOnlyJSTypedArrayMaps(
+ base::VectorOf(access_info.lookup_start_object_maps())));
+ ElementsKind elements_kind = access_info.elements_kind();
+ if (elements_kind == BIGUINT64_ELEMENTS ||
+ elements_kind == BIGINT64_ELEMENTS) {
+ return ReduceResult::Fail();
+ }
+ if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
+ keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS) {
+ // TODO(victorgomes): Handle STORE_IGNORE_OUT_OF_BOUNDS mode.
+ return ReduceResult::Fail();
+ }
+ if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
+ elements_kind == UINT8_CLAMPED_ELEMENTS &&
+ !IsSupported(CpuOperation::kFloat64Round)) {
+ // TODO(victorgomes): Technically we still support if value (in the
+ // accumulator) is of type int32. It would be nice to have a roll back
+ // mechanism instead, so that we do not need to check this early.
+ return ReduceResult::Fail();
+ }
+ ValueNode* index = GetUint32ElementIndex(index_object);
+ AddNewNode<CheckJSTypedArrayBounds>({object, index}, elements_kind);
+ switch (keyed_mode.access_mode()) {
+ case compiler::AccessMode::kLoad:
+ DCHECK_EQ(keyed_mode.load_mode(), STANDARD_LOAD);
+ return BuildLoadTypedArrayElement(object, index, elements_kind);
+ case compiler::AccessMode::kStore:
+ DCHECK_EQ(keyed_mode.store_mode(), STANDARD_STORE);
+ BuildStoreTypedArrayElement(object, index, elements_kind);
+ return ReduceResult::Done();
+ case compiler::AccessMode::kHas:
+ // TODO(victorgomes): Implement has element access.
+ return ReduceResult::Fail();
+ case compiler::AccessMode::kStoreInLiteral:
+ case compiler::AccessMode::kDefine:
+ UNREACHABLE();
+ }
+}
+
+ReduceResult MaglevGraphBuilder::TryBuildElementAccessOnJSArrayOrJSObject(
+ ValueNode* object, ValueNode* index_object,
+ const compiler::ElementAccessInfo& access_info,
+ compiler::KeyedAccessMode const& keyed_mode) {
+ ElementsKind elements_kind = access_info.elements_kind();
+ if (!IsFastElementsKind(elements_kind)) {
+ return ReduceResult::Fail();
+ }
+ base::Vector<const compiler::MapRef> maps =
+ base::VectorOf(access_info.lookup_start_object_maps());
+ if ((keyed_mode.access_mode() == compiler::AccessMode::kLoad) &&
+ IsHoleyElementsKind(elements_kind) && !CanTreatHoleAsUndefined(maps)) {
+ return ReduceResult::Fail();
+ }
+ if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
+ keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
+ // TODO(victorgomes): Handle growable elements.
+ return ReduceResult::Fail();
+ }
+ ValueNode* index;
+ if (HasOnlyJSArrayMaps(maps)) {
+ index = GetInt32ElementIndex(index_object);
+ AddNewNode<CheckJSArrayBounds>({object, index});
+ } else {
+ DCHECK(HasOnlyJSObjectMaps(maps));
+ index = GetInt32ElementIndex(index_object);
+ AddNewNode<CheckJSObjectElementsBounds>({object, index});
+ }
+ switch (keyed_mode.access_mode()) {
+ case compiler::AccessMode::kLoad: {
+ DCHECK_EQ(keyed_mode.load_mode(), STANDARD_LOAD);
+ ValueNode* elements_array =
+ AddNewNode<LoadTaggedField>({object}, JSObject::kElementsOffset);
+ ValueNode* result;
+ if (IsDoubleElementsKind(elements_kind)) {
+ result =
+ AddNewNode<LoadFixedDoubleArrayElement>({elements_array, index});
+ if (IsHoleyElementsKind(elements_kind)) {
+ // TODO(v8:7700): Add a representation for "Float64OrHole" and emit
+ // this boxing lazily.
+ result = AddNewNode<HoleyFloat64Box>({result});
+ }
+ } else {
+ result = AddNewNode<LoadFixedArrayElement>({elements_array, index});
+ if (IsHoleyElementsKind(elements_kind)) {
+ result = AddNewNode<ConvertHoleToUndefined>({result});
+ }
+ }
+ return result;
+ }
+ case compiler::AccessMode::kStore: {
+ ValueNode* elements_array =
+ AddNewNode<LoadTaggedField>({object}, JSObject::kElementsOffset);
+ if (IsDoubleElementsKind(elements_kind)) {
+ ValueNode* value =
+ GetAccumulatorFloat64(TaggedToFloat64ConversionType::kNumber);
+ // Make sure we do not store signalling NaNs into double arrays.
+ // TODO(leszeks): Consider making this a bit on
+ // StoreFixedDoubleArrayElement rather than a separate node.
+ value = GetSilencedNaN(value);
+ AddNewNode<StoreFixedDoubleArrayElement>(
+ {elements_array, index, value});
+ } else {
+ if (keyed_mode.store_mode() == STORE_HANDLE_COW) {
+ elements_array =
+ AddNewNode<EnsureWritableFastElements>({elements_array, object});
+ } else {
+ // Ensure that this is not a COW FixedArray.
+ BuildCheckMaps(elements_array,
+ base::VectorOf({broker()->fixed_array_map()}));
+ }
+ ValueNode* value = GetAccumulatorTagged();
+ if (IsSmiElementsKind(elements_kind)) {
+ AddNewNode<CheckedStoreFixedArraySmiElement>(
+ {elements_array, index, value});
+ } else {
+ BuildStoreFixedArrayElement(elements_array, index, value);
+ }
+ }
+ return ReduceResult::Done();
+ }
+ default:
+ // TODO(victorgomes): Implement more access types.
+ return ReduceResult::Fail();
+ }
+}
+
+ReduceResult MaglevGraphBuilder::TryBuildElementAccess(
+ ValueNode* object, ValueNode* index_object,
+ compiler::ElementAccessFeedback const& feedback,
+ compiler::FeedbackSource const& feedback_source) {
+ const compiler::KeyedAccessMode& keyed_mode = feedback.keyed_mode();
+ // Check for the megamorphic case.
+ if (feedback.transition_groups().empty()) {
+ if (keyed_mode.access_mode() != compiler::AccessMode::kLoad) {
+ return ReduceResult::Fail();
+ }
+ return BuildCallBuiltin<Builtin::kKeyedLoadIC_Megamorphic>(
+ {object, GetTaggedValue(index_object)}, feedback_source);
}
// TODO(leszeks): Add non-deopting bounds check (has to support undefined
// values).
- if (feedback.keyed_mode().load_mode() != STANDARD_LOAD) {
- return false;
+ if (keyed_mode.access_mode() == compiler::AccessMode::kLoad &&
+ keyed_mode.load_mode() != STANDARD_LOAD) {
+ return ReduceResult::Fail();
}
// TODO(victorgomes): Add fast path for loading from HeapConstant.
- if (!feedback.transition_groups().empty() &&
- feedback.HasOnlyStringMaps(broker())) {
- return TryBuildElementAccessOnString(object, index_object,
- feedback.keyed_mode());
+ if (feedback.HasOnlyStringMaps(broker())) {
+ return TryBuildElementAccessOnString(object, index_object, keyed_mode);
}
- compiler::AccessInfoFactory access_info_factory(
- broker(), broker()->dependencies(), zone());
+ compiler::AccessInfoFactory access_info_factory(broker(), zone());
ZoneVector<compiler::ElementAccessInfo> access_infos(zone());
if (!access_info_factory.ComputeElementAccessInfos(feedback, &access_infos) ||
access_infos.empty()) {
- return false;
+ return ReduceResult::Fail();
+ }
+
+ // TODO(leszeks): This is copied without changes from TurboFan's native
+ // context specialization. We should figure out a way to share this code.
+ //
+ // For holey stores or growing stores, we need to check that the prototype
+ // chain contains no setters for elements, and we need to guard those checks
+ // via code dependencies on the relevant prototype maps.
+ if (keyed_mode.access_mode() == compiler::AccessMode::kStore) {
+ // TODO(v8:7700): We could have a fast path here, that checks for the
+ // common case of Array or Object prototype only and therefore avoids
+ // the zone allocation of this vector.
+ ZoneVector<compiler::MapRef> prototype_maps(zone());
+ for (compiler::ElementAccessInfo const& access_info : access_infos) {
+ for (compiler::MapRef receiver_map :
+ access_info.lookup_start_object_maps()) {
+ // If the {receiver_map} has a prototype and its elements backing
+ // store is either holey, or we have a potentially growing store,
+ // then we need to check that all prototypes have stable maps with
+ // fast elements (and we need to guard against changes to that below).
+ if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
+ IsGrowStoreMode(feedback.keyed_mode().store_mode())) &&
+ !receiver_map.HasOnlyStablePrototypesWithFastElements(
+ broker(), &prototype_maps)) {
+ return ReduceResult::Fail();
+ }
+
+ // TODO(v8:12547): Support writing to objects in shared space, which
+ // need a write barrier that calls Object::Share to ensure the RHS is
+ // shared.
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
+ receiver_map.instance_type())) {
+ return ReduceResult::Fail();
+ }
+ }
+ }
+ for (compiler::MapRef const& prototype_map : prototype_maps) {
+ broker()->dependencies()->DependOnStableMap(prototype_map);
+ }
}
// Check for monomorphic case.
if (access_infos.size() == 1) {
compiler::ElementAccessInfo access_info = access_infos.front();
+ // TODO(victorgomes): Support RAB/GSAB backed typed arrays.
+ if (IsRabGsabTypedArrayElementsKind(access_info.elements_kind())) {
+ return ReduceResult::Fail();
+ }
- // TODO(victorgomes): Support elment kind transitions.
- if (access_info.transition_sources().size() != 0) return false;
-
- // TODO(victorgomes): Support more elements kind.
- ElementsKind elements_kind = access_info.elements_kind();
- if (!IsFastElementsKind(elements_kind)) return false;
- if (IsHoleyElementsKind(elements_kind)) return false;
-
- const compiler::MapRef& map =
+ compiler::MapRef transition_target =
access_info.lookup_start_object_maps().front();
- if (access_info.lookup_start_object_maps().size() != 1) {
- // TODO(victorgomes): polymorphic case.
- return false;
+ if (!access_info.transition_sources().empty()) {
+ base::Vector<compiler::MapRef> transition_sources =
+ zone()->CloneVector(base::VectorOf(access_info.transition_sources()));
+ AddNewNode<TransitionElementsKind>({object}, transition_sources,
+ transition_target);
}
- BuildCheckMaps(object, access_info.lookup_start_object_maps());
- ValueNode* index = GetInt32ElementIndex(index_object);
- if (map.IsJSArrayMap()) {
- AddNewNode<CheckJSArrayBounds>({object, index});
- } else {
- DCHECK(map.IsJSObjectMap());
- AddNewNode<CheckJSObjectElementsBounds>({object, index});
- }
- if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
- SetAccumulator(AddNewNode<LoadDoubleElement>({object, index}));
- } else {
- DCHECK(!IsDoubleElementsKind(elements_kind));
- SetAccumulator(AddNewNode<LoadTaggedElement>({object, index}));
+ BuildCheckMaps(object,
+ base::VectorOf(access_info.lookup_start_object_maps()));
+ if (IsTypedArrayElementsKind(access_info.elements_kind())) {
+ return TryBuildElementAccessOnTypedArray(object, index_object,
+ access_info, keyed_mode);
}
- return true;
-
+ return TryBuildElementAccessOnJSArrayOrJSObject(object, index_object,
+ access_info, keyed_mode);
} else {
// TODO(victorgomes): polymorphic case.
- return false;
+ return ReduceResult::Fail();
}
}
-void MaglevGraphBuilder::RecordKnownProperty(ValueNode* lookup_start_object,
- compiler::NameRef name,
- ValueNode* value, bool is_const) {
+void MaglevGraphBuilder::RecordKnownProperty(
+ ValueNode* lookup_start_object, compiler::NameRef name, ValueNode* value,
+ compiler::PropertyAccessInfo const& access_info) {
+ bool is_const;
+ if (access_info.IsFastDataConstant() || access_info.IsStringLength()) {
+ is_const = true;
+ // Even if we have a constant load, if the map is not stable, we cannot
+ // guarantee that the load is preserved across side-effecting calls.
+ // TODO(v8:7700): It might be possible to track it as const if we know that
+ // we're still on the main transition tree; and if we add a dependency on
+ // the stable end-maps of the entire tree.
+ for (auto& map : access_info.lookup_start_object_maps()) {
+ if (!map.is_stable()) {
+ is_const = false;
+ break;
+ }
+ }
+ } else {
+ is_const = false;
+ }
+
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << " * Recording " << (is_const ? "constant" : "non-constant")
+ << " known property "
+ << PrintNodeLabel(graph_labeller(), lookup_start_object) << ": "
+ << PrintNode(graph_labeller(), lookup_start_object) << " ["
+ << *name.object()
+ << "] = " << PrintNodeLabel(graph_labeller(), value) << ": "
+ << PrintNode(graph_labeller(), value) << std::endl;
+ }
auto& loaded_properties =
is_const ? known_node_aspects().loaded_constant_properties
: known_node_aspects().loaded_properties;
- loaded_properties.emplace(std::make_pair(lookup_start_object, name), value);
+ loaded_properties[std::make_pair(lookup_start_object, name)] = value;
}
bool MaglevGraphBuilder::TryReuseKnownPropertyLoad(
@@ -1988,14 +3371,15 @@ void MaglevGraphBuilder::VisitGetNamedProperty() {
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
return;
- case compiler::ProcessedFeedback::kNamedAccess:
+ case compiler::ProcessedFeedback::kNamedAccess: {
if (TryReuseKnownPropertyLoad(object, name)) return;
- if (TryBuildNamedAccess(object, object,
- processed_feedback.AsNamedAccess(),
- compiler::AccessMode::kLoad)) {
- return;
- }
+ ReduceResult result = TryBuildNamedAccess(
+ object, object, processed_feedback.AsNamedAccess(), feedback_source,
+ compiler::AccessMode::kLoad);
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
break;
+ }
default:
break;
}
@@ -2030,15 +3414,15 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() {
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
return;
- case compiler::ProcessedFeedback::kNamedAccess:
+ case compiler::ProcessedFeedback::kNamedAccess: {
if (TryReuseKnownPropertyLoad(lookup_start_object, name)) return;
- if (TryBuildNamedAccess(receiver, lookup_start_object,
- processed_feedback.AsNamedAccess(),
- compiler::AccessMode::kLoad)) {
- return;
- }
+ ReduceResult result = TryBuildNamedAccess(
+ receiver, lookup_start_object, processed_feedback.AsNamedAccess(),
+ feedback_source, compiler::AccessMode::kLoad);
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
break;
-
+ }
default:
break;
}
@@ -2061,6 +3445,28 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
broker()->GetFeedbackForPropertyAccess(
feedback_source, compiler::AccessMode::kLoad, base::nullopt);
+ if (current_for_in_state.index != nullptr &&
+ current_for_in_state.receiver == object &&
+ current_for_in_state.key == current_interpreter_frame_.accumulator()) {
+ if (current_for_in_state.receiver_needs_map_check) {
+ auto* receiver_map =
+ AddNewNode<LoadTaggedField>({object}, HeapObject::kMapOffset);
+ AddNewNode<CheckDynamicValue>(
+ {receiver_map, current_for_in_state.cache_type});
+ current_for_in_state.receiver_needs_map_check = false;
+ }
+ // TODO(leszeks): Cache the indices across the loop.
+ auto* cache_array = AddNewNode<LoadTaggedField>(
+ {current_for_in_state.enum_cache}, EnumCache::kIndicesOffset);
+ AddNewNode<CheckFixedArrayNonEmpty>({cache_array});
+ // TODO(leszeks): Cache the field index per iteration.
+ auto* field_index = AddNewNode<LoadFixedArrayElement>(
+ {cache_array, current_for_in_state.index});
+ SetAccumulator(
+ AddNewNode<LoadTaggedFieldByFieldIndex>({object, field_index}));
+ return;
+ }
+
switch (processed_feedback.kind()) {
case compiler::ProcessedFeedback::kInsufficient:
EmitUnconditionalDeopt(
@@ -2071,23 +3477,23 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
// Get the accumulator without conversion. TryBuildElementAccess
// will try to pick the best representation.
ValueNode* index = current_interpreter_frame_.accumulator();
- if (TryBuildElementAccess(object, index,
- processed_feedback.AsElementAccess())) {
- return;
- }
+ ReduceResult result = TryBuildElementAccess(
+ object, index, processed_feedback.AsElementAccess(), feedback_source);
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
break;
}
case compiler::ProcessedFeedback::kNamedAccess: {
ValueNode* key = GetAccumulatorTagged();
compiler::NameRef name = processed_feedback.AsNamedAccess().name();
- if (!BuildCheckValue(key, name)) return;
+ if (BuildCheckValue(key, name).IsDoneWithAbort()) return;
if (TryReuseKnownPropertyLoad(object, name)) return;
- if (TryBuildNamedAccess(object, object,
- processed_feedback.AsNamedAccess(),
- compiler::AccessMode::kLoad)) {
- return;
- }
+ ReduceResult result = TryBuildNamedAccess(
+ object, object, processed_feedback.AsNamedAccess(), feedback_source,
+ compiler::AccessMode::kLoad);
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
break;
}
@@ -2111,7 +3517,7 @@ void MaglevGraphBuilder::VisitLdaModuleVariable() {
MinimizeContextChainDepth(&context, &depth);
if (compilation_unit_->info()->specialize_to_function_context()) {
- base::Optional<compiler::ContextRef> maybe_ref =
+ compiler::OptionalContextRef maybe_ref =
FunctionContextSpecialization::TryToRef(compilation_unit_, context,
&depth);
if (maybe_ref.has_value()) {
@@ -2139,7 +3545,7 @@ void MaglevGraphBuilder::VisitLdaModuleVariable() {
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
- ValueNode* cell = LoadFixedArrayElement(exports_or_imports, cell_index);
+ ValueNode* cell = BuildLoadFixedArrayElement(exports_or_imports, cell_index);
SetAccumulator(AddNewNode<LoadTaggedField>({cell}, Cell::kValueOffset));
}
@@ -2158,7 +3564,7 @@ void MaglevGraphBuilder::VisitStaModuleVariable() {
MinimizeContextChainDepth(&context, &depth);
if (compilation_unit_->info()->specialize_to_function_context()) {
- base::Optional<compiler::ContextRef> maybe_ref =
+ compiler::OptionalContextRef maybe_ref =
FunctionContextSpecialization::TryToRef(compilation_unit_, context,
&depth);
if (maybe_ref.has_value()) {
@@ -2178,9 +3584,8 @@ void MaglevGraphBuilder::VisitStaModuleVariable() {
{module}, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
- ValueNode* cell = LoadFixedArrayElement(exports, cell_index);
- AddNewNode<StoreTaggedFieldWithWriteBarrier>({cell, GetAccumulatorTagged()},
- Cell::kValueOffset);
+ ValueNode* cell = BuildLoadFixedArrayElement(exports, cell_index);
+ BuildStoreTaggedField(cell, GetAccumulatorTagged(), Cell::kValueOffset);
}
void MaglevGraphBuilder::BuildLoadGlobal(
@@ -2189,17 +3594,13 @@ void MaglevGraphBuilder::BuildLoadGlobal(
const compiler::ProcessedFeedback& access_feedback =
broker()->GetFeedbackForGlobalAccess(feedback_source);
- if (access_feedback.IsInsufficient()) {
- EmitUnconditionalDeopt(
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
- return;
- }
-
- const compiler::GlobalAccessFeedback& global_access_feedback =
- access_feedback.AsGlobalAccess();
+ if (!access_feedback.IsInsufficient()) {
+ const compiler::GlobalAccessFeedback& global_access_feedback =
+ access_feedback.AsGlobalAccess();
- if (TryBuildScriptContextAccess(global_access_feedback)) return;
- if (TryBuildPropertyCellAccess(global_access_feedback)) return;
+ if (TryBuildScriptContextAccess(global_access_feedback)) return;
+ if (TryBuildPropertyCellAccess(global_access_feedback)) return;
+ }
ValueNode* context = GetContext();
SetAccumulator(
@@ -2226,7 +3627,8 @@ void MaglevGraphBuilder::VisitSetNamedProperty() {
case compiler::ProcessedFeedback::kNamedAccess:
if (TryBuildNamedAccess(object, object,
processed_feedback.AsNamedAccess(),
- compiler::AccessMode::kStore)) {
+ feedback_source, compiler::AccessMode::kStore)
+ .IsDone()) {
return;
}
break;
@@ -2237,8 +3639,7 @@ void MaglevGraphBuilder::VisitSetNamedProperty() {
// Create a generic store in the fallthrough.
ValueNode* context = GetContext();
ValueNode* value = GetAccumulatorTagged();
- SetAccumulator(AddNewNode<SetNamedGeneric>({context, object, value}, name,
- feedback_source));
+ AddNewNode<SetNamedGeneric>({context, object, value}, name, feedback_source);
}
void MaglevGraphBuilder::VisitDefineNamedOwnProperty() {
@@ -2261,7 +3662,8 @@ void MaglevGraphBuilder::VisitDefineNamedOwnProperty() {
case compiler::ProcessedFeedback::kNamedAccess:
if (TryBuildNamedAccess(object, object,
processed_feedback.AsNamedAccess(),
- compiler::AccessMode::kDefine)) {
+ feedback_source, compiler::AccessMode::kDefine)
+ .IsDone()) {
return;
}
break;
@@ -2273,31 +3675,56 @@ void MaglevGraphBuilder::VisitDefineNamedOwnProperty() {
// Create a generic store in the fallthrough.
ValueNode* context = GetContext();
ValueNode* value = GetAccumulatorTagged();
- SetAccumulator(AddNewNode<DefineNamedOwnGeneric>({context, object, value},
- name, feedback_source));
+ AddNewNode<DefineNamedOwnGeneric>({context, object, value}, name,
+ feedback_source);
}
void MaglevGraphBuilder::VisitSetKeyedProperty() {
// SetKeyedProperty <object> <key> <slot>
ValueNode* object = LoadRegisterTagged(0);
- ValueNode* key = LoadRegisterTagged(1);
FeedbackSlot slot = GetSlotOperand(2);
compiler::FeedbackSource feedback_source{feedback(), slot};
- // TODO(victorgomes): Add monomorphic fast path.
+ const compiler::ProcessedFeedback& processed_feedback =
+ broker()->GetFeedbackForPropertyAccess(
+ feedback_source, compiler::AccessMode::kLoad, base::nullopt);
+
+ switch (processed_feedback.kind()) {
+ case compiler::ProcessedFeedback::kInsufficient:
+ EmitUnconditionalDeopt(
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
+ return;
+
+ case compiler::ProcessedFeedback::kElementAccess: {
+ // Get the key without conversion. TryBuildElementAccess will try to pick
+ // the best representation.
+ ValueNode* index =
+ current_interpreter_frame_.get(iterator_.GetRegisterOperand(1));
+ if (TryBuildElementAccess(object, index,
+ processed_feedback.AsElementAccess(),
+ feedback_source)
+ .IsDone()) {
+ return;
+ }
+ } break;
+
+ default:
+ break;
+ }
// Create a generic store in the fallthrough.
+ ValueNode* key = LoadRegisterTagged(1);
ValueNode* context = GetContext();
ValueNode* value = GetAccumulatorTagged();
- SetAccumulator(AddNewNode<SetKeyedGeneric>({context, object, key, value},
- feedback_source));
+ AddNewNode<SetKeyedGeneric>({context, object, key, value}, feedback_source);
}
void MaglevGraphBuilder::VisitDefineKeyedOwnProperty() {
- // DefineKeyedOwnProperty <object> <key> <slot>
+ // DefineKeyedOwnProperty <object> <key> <flags> <slot>
ValueNode* object = LoadRegisterTagged(0);
ValueNode* key = LoadRegisterTagged(1);
- FeedbackSlot slot = GetSlotOperand(2);
+ ValueNode* flags = GetSmiConstant(GetFlag8Operand(2));
+ FeedbackSlot slot = GetSlotOperand(3);
compiler::FeedbackSource feedback_source{feedback(), slot};
// TODO(victorgomes): Add monomorphic fast path.
@@ -2305,8 +3732,8 @@ void MaglevGraphBuilder::VisitDefineKeyedOwnProperty() {
// Create a generic store in the fallthrough.
ValueNode* context = GetContext();
ValueNode* value = GetAccumulatorTagged();
- SetAccumulator(AddNewNode<DefineKeyedOwnGeneric>(
- {context, object, key, value}, feedback_source));
+ AddNewNode<DefineKeyedOwnGeneric>({context, object, key, value, flags},
+ feedback_source);
}
void MaglevGraphBuilder::VisitStaInArrayLiteral() {
@@ -2334,8 +3761,8 @@ void MaglevGraphBuilder::VisitStaInArrayLiteral() {
// Create a generic store in the fallthrough.
ValueNode* context = GetContext();
ValueNode* value = GetAccumulatorTagged();
- SetAccumulator(AddNewNode<StoreInArrayLiteralGeneric>(
- {context, object, name, value}, feedback_source));
+ AddNewNode<StoreInArrayLiteralGeneric>({context, object, name, value},
+ feedback_source);
}
void MaglevGraphBuilder::VisitDefineKeyedOwnPropertyInLiteral() {
@@ -2345,9 +3772,8 @@ void MaglevGraphBuilder::VisitDefineKeyedOwnPropertyInLiteral() {
ValueNode* flags = GetSmiConstant(GetFlag8Operand(2));
ValueNode* slot = GetSmiConstant(GetSlotOperand(3).ToInt());
ValueNode* feedback_vector = GetConstant(feedback());
- SetAccumulator(
- BuildCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral,
- {object, name, value, flags, feedback_vector, slot}));
+ BuildCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral,
+ {object, name, value, flags, feedback_vector, slot});
}
void MaglevGraphBuilder::VisitAdd() { VisitBinaryOperation<Operation::kAdd>(); }
@@ -2511,81 +3937,218 @@ void MaglevGraphBuilder::VisitFindNonDefaultConstructorOrConstruct() {
StoreRegisterPair(result, call_builtin);
}
-void MaglevGraphBuilder::InlineCallFromRegisters(
- int argc_count, ConvertReceiverMode receiver_mode,
- compiler::JSFunctionRef function) {
- // The undefined constant node has to be created before the inner graph is
- // created.
- RootConstant* undefined_constant;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- undefined_constant = GetRootConstant(RootIndex::kUndefinedValue);
- }
-
- // Create a new compilation unit and graph builder for the inlined
- // function.
- MaglevCompilationUnit* inner_unit =
- MaglevCompilationUnit::NewInner(zone(), compilation_unit_, function);
- MaglevGraphBuilder inner_graph_builder(local_isolate_, inner_unit, graph_,
- this);
-
- // Finish the current block with a jump to the inlined function.
- BasicBlockRef start_ref, end_ref;
- BasicBlock* block = FinishBlock<JumpToInlined>({}, &start_ref, inner_unit);
+ReduceResult MaglevGraphBuilder::BuildInlined(const CallArguments& args,
+ BasicBlockRef* start_ref,
+ BasicBlockRef* end_ref) {
+ DCHECK(is_inline());
// Manually create the prologue of the inner function graph, so that we
// can manually set up the arguments.
- inner_graph_builder.StartPrologue();
-
- int arg_index = 0;
- int reg_count;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- reg_count = argc_count;
- if (function.shared().language_mode() == LanguageMode::kSloppy) {
- // TODO(leszeks): Store the global proxy somehow.
- inner_graph_builder.SetArgument(arg_index++, undefined_constant);
- } else {
- inner_graph_builder.SetArgument(arg_index++, undefined_constant);
+ StartPrologue();
+
+ // Set receiver.
+ ValueNode* receiver = GetConvertReceiver(function(), args);
+ SetArgument(0, receiver);
+ // Set remaining arguments.
+ RootConstant* undefined_constant =
+ GetRootConstant(RootIndex::kUndefinedValue);
+ for (int i = 1; i < parameter_count(); i++) {
+ ValueNode* arg_value = args[i - 1];
+ if (arg_value == nullptr) arg_value = undefined_constant;
+ SetArgument(i, arg_value);
+ }
+
+ int arg_count = static_cast<int>(args.count());
+ int formal_parameter_count =
+ compilation_unit_->shared_function_info()
+ .internal_formal_parameter_count_without_receiver();
+ if (arg_count != formal_parameter_count) {
+ inlined_arguments_.emplace(zone()->NewVector<ValueNode*>(arg_count + 1));
+ (*inlined_arguments_)[0] = receiver;
+ for (int i = 0; i < arg_count; i++) {
+ (*inlined_arguments_)[i + 1] = args[i];
}
- } else {
- reg_count = argc_count + 1;
- }
- for (int i = 0; i < reg_count && i < inner_unit->parameter_count(); i++) {
- inner_graph_builder.SetArgument(arg_index++, LoadRegisterTagged(i + 1));
- }
- for (; arg_index < inner_unit->parameter_count(); arg_index++) {
- inner_graph_builder.SetArgument(arg_index, undefined_constant);
}
- // TODO(leszeks): Also correctly set up the closure and context slots, instead
- // of using InitialValue.
- inner_graph_builder.BuildRegisterFrameInitialization();
- inner_graph_builder.BuildMergeStates();
- BasicBlock* inlined_prologue = inner_graph_builder.EndPrologue();
+
+ BuildRegisterFrameInitialization(GetConstant(function().context(broker())),
+ GetConstant(function()));
+ BuildMergeStates();
+ BasicBlock* inlined_prologue = EndPrologue();
// Set the entry JumpToInlined to jump to the prologue block.
// TODO(leszeks): Passing start_ref to JumpToInlined creates a two-element
// linked list of refs. Consider adding a helper to explicitly set the target
// instead.
- start_ref.SetToBlockAndReturnNext(inlined_prologue)
+ start_ref->SetToBlockAndReturnNext(inlined_prologue)
->SetToBlockAndReturnNext(inlined_prologue);
// Build the inlined function body.
- inner_graph_builder.BuildBody();
+ BuildBody();
// All returns in the inlined body jump to a merge point one past the
// bytecode length (i.e. at offset bytecode.length()). Create a block at
// this fake offset and have it jump out of the inlined function, into a new
// block that we create which resumes execution of the outer function.
// TODO(leszeks): Wrap this up in a helper.
- DCHECK_NULL(inner_graph_builder.current_block_);
- inner_graph_builder.ProcessMergePoint(
- inner_graph_builder.inline_exit_offset());
- inner_graph_builder.StartNewBlock(inner_graph_builder.inline_exit_offset());
- inner_graph_builder.FinishBlock<JumpFromInlined>({}, &end_ref);
+ DCHECK_NULL(current_block_);
+
+ // If we don't have a merge state at the inline_exit_offset, then there is no
+ // control flow that reaches the end of the inlined function, either because
+ // of infinite loops or deopts
+ if (merge_states_[inline_exit_offset()] == nullptr) {
+ return ReduceResult::DoneWithAbort();
+ }
+
+ ProcessMergePoint(inline_exit_offset());
+ StartNewBlock(inline_exit_offset());
+ FinishBlock<JumpFromInlined>({}, end_ref);
// Pull the returned accumulator value out of the inlined function's final
// merged return state.
- current_interpreter_frame_.set_accumulator(
- inner_graph_builder.current_interpreter_frame_.accumulator());
+ return current_interpreter_frame_.accumulator();
+}
+
+#define TRACE_INLINING(...) \
+ do { \
+ if (v8_flags.trace_maglev_inlining) \
+ StdoutStream{} << __VA_ARGS__ << std::endl; \
+ } while (false)
+
+#define TRACE_CANNOT_INLINE(...) \
+ TRACE_INLINING(" cannot inline " << shared << ": " << __VA_ARGS__)
+
+bool MaglevGraphBuilder::ShouldInlineCall(compiler::JSFunctionRef function,
+ float call_frequency) {
+ compiler::OptionalCodeRef code = function.code(broker());
+ compiler::SharedFunctionInfoRef shared = function.shared(broker());
+ if (graph()->total_inlined_bytecode_size() >
+ v8_flags.max_maglev_inlined_bytecode_size_cumulative) {
+ TRACE_CANNOT_INLINE("maximum inlined bytecode size");
+ return false;
+ }
+ if (!code) {
+ // TODO(verwaest): Soft deopt instead?
+ TRACE_CANNOT_INLINE("it has not been compiled yet");
+ return false;
+ }
+ if (code->object()->kind() == CodeKind::TURBOFAN) {
+ TRACE_CANNOT_INLINE("already turbofanned");
+ return false;
+ }
+ if (!function.feedback_vector(broker()).has_value()) {
+ TRACE_CANNOT_INLINE("no feedback vector");
+ return false;
+ }
+ SharedFunctionInfo::Inlineability inlineability =
+ shared.GetInlineability(broker());
+ if (inlineability != SharedFunctionInfo::Inlineability::kIsInlineable) {
+ TRACE_CANNOT_INLINE(inlineability);
+ return false;
+ }
+ // TODO(victorgomes): Support NewTarget/RegisterInput in inlined functions.
+ compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
+ if (bytecode.incoming_new_target_or_generator_register().is_valid()) {
+ TRACE_CANNOT_INLINE("use unsupported NewTargetOrGenerator register");
+ return false;
+ }
+ // TODO(victorgomes): Support exception handler inside inlined functions.
+ if (bytecode.handler_table_size() > 0) {
+ TRACE_CANNOT_INLINE("use unsupported expection handlers");
+ return false;
+ }
+ // TODO(victorgomes): Support inlined allocation of the arguments object.
+ interpreter::BytecodeArrayIterator iterator(bytecode.object());
+ for (; !iterator.done(); iterator.Advance()) {
+ switch (iterator.current_bytecode()) {
+ case interpreter::Bytecode::kCreateMappedArguments:
+ case interpreter::Bytecode::kCreateUnmappedArguments:
+ case interpreter::Bytecode::kCreateRestParameter:
+ TRACE_CANNOT_INLINE("not supported inlined arguments object");
+ return false;
+ default:
+ break;
+ }
+ }
+ if (call_frequency < v8_flags.min_maglev_inlining_frequency) {
+ TRACE_CANNOT_INLINE("call frequency ("
+ << call_frequency << ") < minimum threshold ("
+ << v8_flags.min_maglev_inlining_frequency << ")");
+ return false;
+ }
+ if (bytecode.length() < v8_flags.max_maglev_inlined_bytecode_size_small) {
+ TRACE_INLINING(" inlining " << shared << ": small function");
+ return true;
+ }
+ if (bytecode.length() > v8_flags.max_maglev_inlined_bytecode_size) {
+ TRACE_CANNOT_INLINE("big function, size ("
+ << bytecode.length() << ") >= max-size ("
+ << v8_flags.max_maglev_inlined_bytecode_size << ")");
+ return false;
+ }
+ if (inlining_depth() > v8_flags.max_maglev_inline_depth) {
+ TRACE_CANNOT_INLINE("inlining depth ("
+ << inlining_depth() << ") >= max-depth ("
+ << v8_flags.max_maglev_inline_depth << ")");
+ return false;
+ }
+ TRACE_INLINING(" inlining " << shared);
+ if (v8_flags.trace_maglev_inlining_verbose) {
+ BytecodeArray::Disassemble(bytecode.object(), std::cout);
+ function.feedback_vector(broker())->object()->Print(std::cout);
+ }
+ graph()->add_inlined_bytecode_size(bytecode.length());
+ return true;
+}
+
+ReduceResult MaglevGraphBuilder::TryBuildInlinedCall(
+ compiler::JSFunctionRef function, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source) {
+ DCHECK_EQ(args.mode(), CallArguments::kDefault);
+ float feedback_frequency = 0.0f;
+ if (feedback_source.IsValid()) {
+ compiler::ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForCall(feedback_source);
+ feedback_frequency =
+ feedback.IsInsufficient() ? 0.0f : feedback.AsCall().frequency();
+ }
+ float call_frequency = feedback_frequency * call_frequency_;
+ if (!ShouldInlineCall(function, call_frequency)) {
+ return ReduceResult::Fail();
+ }
+
+ compiler::BytecodeArrayRef bytecode =
+ function.shared(broker()).GetBytecodeArray(broker());
+ graph()->inlined_functions().push_back(
+ OptimizedCompilationInfo::InlinedFunctionHolder(
+ function.shared(broker()).object(), bytecode.object(),
+ current_source_position_));
+
+ // Create a new compilation unit and graph builder for the inlined
+ // function.
+ MaglevCompilationUnit* inner_unit =
+ MaglevCompilationUnit::NewInner(zone(), compilation_unit_, function);
+ MaglevGraphBuilder inner_graph_builder(
+ local_isolate_, inner_unit, graph_, call_frequency,
+ BytecodeOffset(iterator_.current_offset()), this);
+
+ // Propagate catch block.
+ inner_graph_builder.parent_catch_block_ = GetCurrentTryCatchBlockOffset();
+
+ // Finish the current block with a jump to the inlined function.
+ BasicBlockRef start_ref, end_ref;
+ BasicBlock* block = FinishBlock<JumpToInlined>({}, &start_ref, inner_unit);
+
+ ReduceResult result =
+ inner_graph_builder.BuildInlined(args, &start_ref, &end_ref);
+ if (result.IsDoneWithAbort()) {
+ MarkBytecodeDead();
+ return ReduceResult::DoneWithAbort();
+ }
+ DCHECK(result.IsDoneWithValue());
+
+ // Propagate KnownNodeAspects back to the caller.
+ current_interpreter_frame_.set_known_node_aspects(
+ inner_graph_builder.current_interpreter_frame_.known_node_aspects());
// Create a new block at our current offset, and resume execution. Do this
// manually to avoid trying to resolve any merges to this offset, which will
@@ -2595,22 +4158,24 @@ void MaglevGraphBuilder::InlineCallFromRegisters(
iterator_.current_offset(), 1, block, GetInLiveness()));
// Set the exit JumpFromInlined to jump to this resume block.
// TODO(leszeks): Passing start_ref to JumpFromInlined creates a two-element
- // linked list of refs. Consider adding a helper to explicitly set the target
- // instead.
+ // linked list of refs. Consider adding a helper to explicitly set the
+ // target instead.
end_ref.SetToBlockAndReturnNext(current_block_)
->SetToBlockAndReturnNext(current_block_);
+
+ return result;
}
-bool MaglevGraphBuilder::TryReduceStringFromCharCode(
- compiler::JSFunctionRef target, const CallArguments& args) {
- if (args.count() != 1) return false;
- SetAccumulator(AddNewNode<BuiltinStringFromCharCode>({GetInt32(args[0])}));
- return true;
+ReduceResult MaglevGraphBuilder::TryReduceStringFromCharCode(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ if (args.count() != 1) return ReduceResult::Fail();
+ return AddNewNode<BuiltinStringFromCharCode>({GetTruncatedInt32FromNumber(
+ args[0], TaggedToFloat64ConversionType::kNumber)});
}
-bool MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt(
- compiler::JSFunctionRef target, const CallArguments& args) {
- ValueNode* receiver = GetTaggedReceiver(args);
+ReduceResult MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ ValueNode* receiver = GetTaggedOrUndefined(args.receiver());
ValueNode* index;
if (args.count() == 0) {
// Index is the undefined object. ToIntegerOrInfinity(undefined) = 0.
@@ -2623,30 +4188,289 @@ bool MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt(
BuildCheckString(receiver);
// And index is below length.
ValueNode* length = AddNewNode<StringLength>({receiver});
- AddNewNode<CheckInt32Condition>({index, length}, AssertCondition::kLess,
+ AddNewNode<CheckInt32Condition>({index, length},
+ AssertCondition::kUnsignedLessThan,
DeoptimizeReason::kOutOfBounds);
- SetAccumulator(
- AddNewNode<BuiltinStringPrototypeCharCodeAt>({receiver, index}));
- return true;
+ return AddNewNode<BuiltinStringPrototypeCharCodeOrCodePointAt>(
+ {receiver, index},
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCharCodeAt);
}
-bool MaglevGraphBuilder::TryReduceFunctionPrototypeCall(
- compiler::JSFunctionRef target, const CallArguments& args) {
+ReduceResult MaglevGraphBuilder::TryReduceStringPrototypeCodePointAt(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ ValueNode* receiver = GetTaggedOrUndefined(args.receiver());
+ ValueNode* index;
+ if (args.count() == 0) {
+ // Index is the undefined object. ToIntegerOrInfinity(undefined) = 0.
+ index = GetInt32Constant(0);
+ } else {
+ index = GetInt32ElementIndex(args[0]);
+ }
+ // Any other argument is ignored.
+ // Ensure that {receiver} is actually a String.
+ BuildCheckString(receiver);
+ // And index is below length.
+ ValueNode* length = AddNewNode<StringLength>({receiver});
+ AddNewNode<CheckInt32Condition>({index, length},
+ AssertCondition::kUnsignedLessThan,
+ DeoptimizeReason::kOutOfBounds);
+ return AddNewNode<BuiltinStringPrototypeCharCodeOrCodePointAt>(
+ {receiver, index},
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCodePointAt);
+}
+
+template <typename LoadNode>
+ReduceResult MaglevGraphBuilder::TryBuildLoadDataView(const CallArguments& args,
+ ExternalArrayType type) {
+ if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
+ // TODO(victorgomes): Add checks whether the array has been detached.
+ return ReduceResult::Fail();
+ }
+ // TODO(victorgomes): Add data view to known types.
+ ValueNode* receiver = GetTaggedOrUndefined(args.receiver());
+ AddNewNode<CheckInstanceType>({receiver}, CheckType::kCheckHeapObject,
+ JS_DATA_VIEW_TYPE);
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
+ ValueNode* offset =
+ args[0] ? GetInt32ElementIndex(args[0]) : GetInt32Constant(0);
+ AddNewNode<CheckJSDataViewBounds>({receiver, offset}, type);
+ ValueNode* is_little_endian =
+ args[1] ? GetTaggedValue(args[1]) : GetBooleanConstant(false);
+ return AddNewNode<LoadNode>({receiver, offset, is_little_endian}, type);
+}
+
+template <typename StoreNode, typename Function>
+ReduceResult MaglevGraphBuilder::TryBuildStoreDataView(
+ const CallArguments& args, ExternalArrayType type, Function&& getValue) {
+ if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
+ // TODO(victorgomes): Add checks whether the array has been detached.
+ return ReduceResult::Fail();
+ }
+ // TODO(victorgomes): Add data view to known types.
+ ValueNode* receiver = GetTaggedOrUndefined(args.receiver());
+ AddNewNode<CheckInstanceType>({receiver}, CheckType::kCheckHeapObject,
+ JS_DATA_VIEW_TYPE);
+ // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
+ ValueNode* offset =
+ args[0] ? GetInt32ElementIndex(args[0]) : GetInt32Constant(0);
+ AddNewNode<CheckJSDataViewBounds>({receiver, offset},
+ ExternalArrayType::kExternalFloat64Array);
+ ValueNode* value = getValue(args[1]);
+ ValueNode* is_little_endian =
+ args[2] ? GetTaggedValue(args[2]) : GetBooleanConstant(false);
+ AddNewNode<StoreNode>({receiver, offset, value, is_little_endian}, type);
+ return GetRootConstant(RootIndex::kUndefinedValue);
+}
+
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt8(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildLoadDataView<LoadSignedIntDataViewElement>(
+ args, ExternalArrayType::kExternalInt8Array);
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt8(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildStoreDataView<StoreSignedIntDataViewElement>(
+ args, ExternalArrayType::kExternalInt8Array, [&](ValueNode* value) {
+ return value ? GetInt32(value) : GetInt32Constant(0);
+ });
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt16(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildLoadDataView<LoadSignedIntDataViewElement>(
+ args, ExternalArrayType::kExternalInt16Array);
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt16(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildStoreDataView<StoreSignedIntDataViewElement>(
+ args, ExternalArrayType::kExternalInt16Array, [&](ValueNode* value) {
+ return value ? GetInt32(value) : GetInt32Constant(0);
+ });
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt32(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildLoadDataView<LoadSignedIntDataViewElement>(
+ args, ExternalArrayType::kExternalInt32Array);
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt32(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildStoreDataView<StoreSignedIntDataViewElement>(
+ args, ExternalArrayType::kExternalInt32Array, [&](ValueNode* value) {
+ return value ? GetInt32(value) : GetInt32Constant(0);
+ });
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetFloat64(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildLoadDataView<LoadDoubleDataViewElement>(
+ args, ExternalArrayType::kExternalFloat64Array);
+}
+ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetFloat64(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return TryBuildStoreDataView<StoreDoubleDataViewElement>(
+ args, ExternalArrayType::kExternalFloat64Array, [&](ValueNode* value) {
+ return value ? GetFloat64(value, TaggedToFloat64ConversionType::kNumber)
+ : GetFloat64Constant(
+ std::numeric_limits<double>::quiet_NaN());
+ });
+}
+
+ReduceResult MaglevGraphBuilder::TryReduceFunctionPrototypeCall(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ // We can't reduce Function#call when there is no receiver function.
+ if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
+ return ReduceResult::Fail();
+ }
// Use Function.prototype.call context, to ensure any exception is thrown in
// the correct context.
- ValueNode* context = GetConstant(target.context());
- ValueNode* receiver = GetTaggedReceiver(args);
- compiler::FeedbackSource feedback_source;
- BuildGenericCall(receiver, context, Call::TargetType::kAny,
- args.PopReceiver(ConvertReceiverMode::kAny),
- feedback_source);
- return true;
+ ValueNode* context = GetConstant(target.context(broker()));
+ ValueNode* receiver = GetTaggedOrUndefined(args.receiver());
+ args.PopReceiver(ConvertReceiverMode::kAny);
+ return BuildGenericCall(receiver, context, Call::TargetType::kAny, args);
+}
+
+ReduceResult MaglevGraphBuilder::TryReduceObjectPrototypeHasOwnProperty(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ // We can't reduce Function#call when there is no receiver function.
+ if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
+ return ReduceResult::Fail();
+ }
+ if (args.receiver() != current_for_in_state.receiver) {
+ return ReduceResult::Fail();
+ }
+ if (args.count() != 1 || args[0] != current_for_in_state.key) {
+ return ReduceResult::Fail();
+ }
+ return GetRootConstant(RootIndex::kTrueValue);
+}
+
+ReduceResult MaglevGraphBuilder::TryReduceMathRound(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return DoTryReduceMathRound(target, args, Float64Round::Kind::kNearest);
}
-bool MaglevGraphBuilder::TryReduceBuiltin(compiler::JSFunctionRef target,
- const CallArguments& args) {
- if (!target.shared().HasBuiltinId()) return false;
- switch (target.shared().builtin_id()) {
+ReduceResult MaglevGraphBuilder::TryReduceMathFloor(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return DoTryReduceMathRound(target, args, Float64Round::Kind::kFloor);
+}
+
+ReduceResult MaglevGraphBuilder::TryReduceMathCeil(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ return DoTryReduceMathRound(target, args, Float64Round::Kind::kCeil);
+}
+
+ReduceResult MaglevGraphBuilder::DoTryReduceMathRound(
+ compiler::JSFunctionRef target, CallArguments& args,
+ Float64Round::Kind kind) {
+ if (args.count() == 0) {
+ return GetFloat64Constant(std::numeric_limits<double>::quiet_NaN());
+ }
+ ValueNode* arg = args[0];
+ switch (arg->value_representation()) {
+ case ValueRepresentation::kInt32:
+ case ValueRepresentation::kUint32:
+ return arg;
+ case ValueRepresentation::kTagged:
+ if (CheckType(arg, NodeType::kSmi)) return arg;
+ if (CheckType(arg, NodeType::kNumber)) {
+ arg = GetFloat64(arg, TaggedToFloat64ConversionType::kNumber);
+ } else {
+ LazyDeoptContinuationScope continuation_scope(
+ this, Float64Round::continuation(kind));
+ ToNumberOrNumeric* conversion = AddNewNode<ToNumberOrNumeric>(
+ {GetContext(), arg}, Object::Conversion::kToNumber);
+ arg = AddNewNode<UncheckedNumberOrOddballToFloat64>(
+ {conversion}, TaggedToFloat64ConversionType::kNumber);
+ }
+ break;
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ case ValueRepresentation::kFloat64:
+ break;
+ }
+ if (IsSupported(CpuOperation::kFloat64Round)) {
+ return AddNewNode<Float64Round>({arg}, kind);
+ }
+
+ // Update the first argument, in case there was a side-effecting ToNumber
+ // conversion.
+ args.set_arg(0, arg);
+ return ReduceResult::Fail();
+}
+
+ReduceResult MaglevGraphBuilder::TryReduceMathPow(
+ compiler::JSFunctionRef target, CallArguments& args) {
+ if (args.count() < 2) {
+ return GetRootConstant(RootIndex::kNanValue);
+ }
+ // If both arguments are tagged, it is cheaper to call Math.Pow builtin,
+ // instead of Float64Exponentiate, since we are still making a call and we
+ // don't need to unbox both inputs. See https://crbug.com/1393643.
+ if (args[0]->properties().is_tagged() && args[1]->properties().is_tagged()) {
+ // The Math.pow call will be created in CallKnownJSFunction reduction.
+ return ReduceResult::Fail();
+ }
+ ValueNode* left = GetFloat64(args[0], TaggedToFloat64ConversionType::kNumber);
+ ValueNode* right =
+ GetFloat64(args[1], TaggedToFloat64ConversionType::kNumber);
+ return AddNewNode<Float64Exponentiate>({left, right});
+}
+
+#define MAP_MATH_UNARY_TO_IEEE_754(V) \
+ V(MathAcos, acos) \
+ V(MathAcosh, acosh) \
+ V(MathAsin, asin) \
+ V(MathAsinh, asinh) \
+ V(MathAtan, atan) \
+ V(MathAtanh, atanh) \
+ V(MathCbrt, cbrt) \
+ V(MathCos, cos) \
+ V(MathCosh, cosh) \
+ V(MathExp, exp) \
+ V(MathExpm1, expm1) \
+ V(MathLog, log) \
+ V(MathLog1p, log1p) \
+ V(MathLog10, log10) \
+ V(MathLog2, log2) \
+ V(MathSin, sin) \
+ V(MathSinh, sinh) \
+ V(MathTan, tan) \
+ V(MathTanh, tanh)
+
+#define MATH_UNARY_IEEE_BUILTIN_REDUCER(Name, IeeeOp) \
+ ReduceResult MaglevGraphBuilder::TryReduce##Name( \
+ compiler::JSFunctionRef target, CallArguments& args) { \
+ if (args.count() < 1) { \
+ return GetRootConstant(RootIndex::kNanValue); \
+ } \
+ ValueNode* value = \
+ GetFloat64(args[0], TaggedToFloat64ConversionType::kNumber); \
+ return AddNewNode<Float64Ieee754Unary>( \
+ {value}, ExternalReference::ieee754_##IeeeOp##_function()); \
+ }
+
+MAP_MATH_UNARY_TO_IEEE_754(MATH_UNARY_IEEE_BUILTIN_REDUCER)
+
+#undef MATH_UNARY_IEEE_BUILTIN_REDUCER
+#undef MAP_MATH_UNARY_TO_IEEE_754
+
+ReduceResult MaglevGraphBuilder::TryReduceBuiltin(
+ compiler::JSFunctionRef target, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode) {
+ if (args.mode() != CallArguments::kDefault) {
+ // TODO(victorgomes): Maybe inline the spread stub? Or call known function
+ // directly if arguments list is an array.
+ return ReduceResult::Fail();
+ }
+ if (feedback_source.IsValid() &&
+ speculation_mode == SpeculationMode::kDisallowSpeculation) {
+ // TODO(leszeks): Some builtins might be inlinable without speculation.
+ return ReduceResult::Fail();
+ }
+ CallSpeculationScope speculate(this, feedback_source);
+ if (!target.shared(broker()).HasBuiltinId()) {
+ return ReduceResult::Fail();
+ }
+ switch (target.shared(broker()).builtin_id()) {
#define CASE(Name) \
case Builtin::k##Name: \
return TryReduce##Name(target, args);
@@ -2654,29 +4478,31 @@ bool MaglevGraphBuilder::TryReduceBuiltin(compiler::JSFunctionRef target,
#undef CASE
default:
// TODO(v8:7700): Inline more builtins.
- return false;
+ return ReduceResult::Fail();
}
}
ValueNode* MaglevGraphBuilder::GetConvertReceiver(
compiler::JSFunctionRef function, const CallArguments& args) {
- compiler::SharedFunctionInfoRef shared = function.shared();
+ compiler::SharedFunctionInfoRef shared = function.shared(broker());
if (shared.native() || shared.language_mode() == LanguageMode::kStrict) {
if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
return GetRootConstant(RootIndex::kUndefinedValue);
} else {
- return GetTaggedValue(*args.receiver());
+ return GetTaggedValue(args.receiver());
}
}
if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
- return GetConstant(function.native_context().global_proxy_object());
+ return GetConstant(
+ function.native_context(broker()).global_proxy_object(broker()));
}
- ValueNode* receiver = GetTaggedValue(*args.receiver());
+ ValueNode* receiver = GetTaggedValue(args.receiver());
if (CheckType(receiver, NodeType::kJSReceiver)) return receiver;
if (Constant* constant = receiver->TryCast<Constant>()) {
const Handle<HeapObject> object = constant->object().object();
if (object->IsUndefined() || object->IsNull()) {
- return GetConstant(function.native_context().global_proxy_object());
+ return GetConstant(
+ function.native_context(broker()).global_proxy_object(broker()));
} else if (object->IsJSReceiver()) {
return constant;
}
@@ -2685,106 +4511,227 @@ ValueNode* MaglevGraphBuilder::GetConvertReceiver(
args.receiver_mode());
}
-bool MaglevGraphBuilder::TryBuildCallKnownJSFunction(
- compiler::JSFunctionRef function, const CallArguments& args) {
- // Don't inline CallFunction stub across native contexts.
- if (function.native_context() != broker()->target_native_context()) {
- return false;
- }
+template <typename CallNode, typename... Args>
+CallNode* MaglevGraphBuilder::AddNewCallNode(const CallArguments& args,
+ Args&&... extra_args) {
+ size_t input_count = args.count_with_receiver() + CallNode::kFixedInputCount;
+ return AddNewNode<CallNode>(
+ input_count,
+ [&](CallNode* call) {
+ int arg_index = 0;
+ call->set_arg(arg_index++, GetTaggedOrUndefined(args.receiver()));
+ for (size_t i = 0; i < args.count(); ++i) {
+ call->set_arg(arg_index++, GetTaggedValue(args[i]));
+ }
+ },
+ std::forward<Args>(extra_args)...);
+}
+
+ValueNode* MaglevGraphBuilder::BuildGenericCall(
+ ValueNode* target, ValueNode* context, Call::TargetType target_type,
+ const CallArguments& args,
+ const compiler::FeedbackSource& feedback_source) {
+ // TODO(victorgomes): We do not collect call feedback from optimized/inlined
+ // calls. In order to be consistent, we don't pass the feedback_source to the
+ // IR, so that we avoid collecting for generic calls as well. We might want to
+ // revisit this in the future.
+ switch (args.mode()) {
+ case CallArguments::kDefault:
+ return AddNewCallNode<Call>(args, args.receiver_mode(), target_type,
+ compiler::FeedbackSource(), target, context);
+ case CallArguments::kWithSpread:
+ DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kAny);
+ return AddNewCallNode<CallWithSpread>(args, compiler::FeedbackSource(),
+ target, context);
+ case CallArguments::kWithArrayLike:
+ DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kAny);
+ return AddNewNode<CallWithArrayLike>(
+ {target, args.receiver(), args[0], context});
+ }
+}
+
+ValueNode* MaglevGraphBuilder::BuildCallSelf(compiler::JSFunctionRef function,
+ CallArguments& args) {
ValueNode* receiver = GetConvertReceiver(function, args);
- size_t input_count = args.count() + CallKnownJSFunction::kFixedInputCount;
- CallKnownJSFunction* call =
- CreateNewNode<CallKnownJSFunction>(input_count, function, receiver);
- for (int i = 0; i < args.count(); i++) {
- call->set_arg(i, GetTaggedValue(args[i]));
+ size_t input_count = args.count() + CallSelf::kFixedInputCount;
+ graph()->set_has_recursive_calls(true);
+ return AddNewNode<CallSelf>(
+ input_count,
+ [&](CallSelf* call) {
+ for (int i = 0; i < static_cast<int>(args.count()); i++) {
+ call->set_arg(i, GetTaggedValue(args[i]));
+ }
+ },
+ broker(), function, receiver);
+}
+
+bool MaglevGraphBuilder::TargetIsCurrentCompilingUnit(
+ compiler::JSFunctionRef target) {
+ if (compilation_unit_->info()->specialize_to_function_context()) {
+ return target.equals(compilation_unit_->function());
}
- SetAccumulator(AddNode(call));
- return true;
+ return compilation_unit_->shared_function_info().equals(
+ target.shared(broker()));
}
-void MaglevGraphBuilder::BuildGenericCall(
- ValueNode* target, ValueNode* context, Call::TargetType target_type,
- const CallArguments& args, compiler::FeedbackSource& feedback_source) {
- size_t input_count = args.count_with_receiver() + Call::kFixedInputCount;
- Call* call =
- CreateNewNode<Call>(input_count, args.receiver_mode(), target_type,
- feedback_source, target, context);
- int arg_index = 0;
- call->set_arg(arg_index++, GetTaggedReceiver(args));
- for (int i = 0; i < args.count(); ++i) {
- call->set_arg(arg_index++, GetTaggedValue(args[i]));
+ReduceResult MaglevGraphBuilder::TryBuildCallKnownJSFunction(
+ compiler::JSFunctionRef function, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source) {
+ // Don't inline CallFunction stub across native contexts.
+ if (function.native_context(broker()) != broker()->target_native_context()) {
+ return ReduceResult::Fail();
}
- SetAccumulator(AddNode(call));
+ if (args.mode() != CallArguments::kDefault) {
+ // TODO(victorgomes): Maybe inline the spread stub? Or call known function
+ // directly if arguments list is an array.
+ return ReduceResult::Fail();
+ }
+ if (!is_inline() && TargetIsCurrentCompilingUnit(function)) {
+ return BuildCallSelf(function, args);
+ }
+ if (v8_flags.maglev_inlining) {
+ RETURN_IF_DONE(TryBuildInlinedCall(function, args, feedback_source));
+ }
+ ValueNode* receiver = GetConvertReceiver(function, args);
+ size_t input_count = args.count() + CallKnownJSFunction::kFixedInputCount;
+ return AddNewNode<CallKnownJSFunction>(
+ input_count,
+ [&](CallKnownJSFunction* call) {
+ for (int i = 0; i < static_cast<int>(args.count()); i++) {
+ call->set_arg(i, GetTaggedValue(args[i]));
+ }
+ },
+ broker(), function, receiver);
}
-bool MaglevGraphBuilder::BuildCheckValue(ValueNode* node,
- const compiler::HeapObjectRef& ref) {
+ReduceResult MaglevGraphBuilder::BuildCheckValue(
+ ValueNode* node, const compiler::HeapObjectRef& ref) {
if (node->Is<Constant>()) {
- if (node->Cast<Constant>()->object().equals(ref)) return true;
+ if (node->Cast<Constant>()->object().equals(ref))
+ return ReduceResult::Done();
EmitUnconditionalDeopt(DeoptimizeReason::kUnknown);
- return false;
- }
- AddNewNode<CheckValue>({node}, ref);
- return true;
-}
+ return ReduceResult::DoneWithAbort();
+ }
+ // TODO(v8:7700): Add CheckValue support for numbers (incl. conversion between
+ // Smi and HeapNumber).
+ DCHECK(!ref.IsSmi());
+ DCHECK(!ref.IsHeapNumber());
+ if (ref.IsString()) {
+ AddNewNode<CheckValueEqualsString>({node}, ref.AsInternalizedString());
+ } else {
+ AddNewNode<CheckValue>({node}, ref);
+ }
+ return ReduceResult::Done();
+}
+
+ReduceResult MaglevGraphBuilder::ReduceCall(
+ compiler::ObjectRef object, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode) {
+ if (!object.IsJSFunction()) {
+ return BuildGenericCall(GetConstant(object), GetContext(),
+ Call::TargetType::kAny, args);
+ }
+ compiler::JSFunctionRef target = object.AsJSFunction();
+ // Do not reduce calls to functions with break points.
+ if (!target.shared(broker()).HasBreakInfo()) {
+ if (target.object()->IsJSClassConstructor()) {
+ // If we have a class constructor, we should raise an exception.
+ return BuildCallRuntime(Runtime::kThrowConstructorNonCallableError,
+ {GetConstant(target)});
+ }
-void MaglevGraphBuilder::BuildCall(ValueNode* target_node,
- const CallArguments& args,
+ DCHECK(target.object()->IsCallable());
+ RETURN_IF_DONE(
+ TryReduceBuiltin(target, args, feedback_source, speculation_mode));
+ RETURN_IF_DONE(TryBuildCallKnownJSFunction(target, args, feedback_source));
+ }
+ return BuildGenericCall(GetConstant(target), GetContext(),
+ Call::TargetType::kJSFunction, args);
+}
+
+ReduceResult MaglevGraphBuilder::ReduceCallForTarget(
+ ValueNode* target_node, compiler::JSFunctionRef target, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode) {
+ if (BuildCheckValue(target_node, target).IsDoneWithAbort())
+ return ReduceResult::DoneWithAbort();
+ return ReduceCall(target, args, feedback_source, speculation_mode);
+}
+
+ReduceResult MaglevGraphBuilder::ReduceFunctionPrototypeApplyCallWithReceiver(
+ ValueNode* target_node, compiler::JSFunctionRef receiver,
+ CallArguments& args, const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode) {
+ // TODO(victorgomes): This has currently pretty limited usage and does not
+ // work with inlining. We need to implement arguments forwarding like TF
+ // ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments.
+ return ReduceResult::Fail();
+ // compiler::NativeContextRef native_context =
+ // broker()->target_native_context(); RETURN_IF_ABORT(BuildCheckValue(
+ // target_node, native_context.function_prototype_apply(broker())));
+ // ValueNode* receiver_node = GetTaggedOrUndefined(args.receiver());
+ // RETURN_IF_ABORT(BuildCheckValue(receiver_node, receiver));
+ // if (args.mode() == CallArguments::kDefault) {
+ // if (args.count() == 0) {
+ // // No need for spread.
+ // CallArguments empty_args(ConvertReceiverMode::kNullOrUndefined);
+ // return ReduceCall(receiver, empty_args, feedback_source,
+ // speculation_mode);
+ // }
+ // if (args.count() == 1 || IsNullValue(args[1]) ||
+ // IsUndefinedValue(args[1])) {
+ // // No need for spread. We have only the new receiver.
+ // CallArguments new_args(ConvertReceiverMode::kAny,
+ // {GetTaggedValue(args[0])});
+ // return ReduceCall(receiver, new_args, feedback_source,
+ // speculation_mode);
+ // }
+ // if (args.count() > 2) {
+ // // FunctionPrototypeApply only consider two arguments: the new receiver
+ // // and an array-like arguments_list. All others shall be ignored.
+ // args.Truncate(2);
+ // }
+ // }
+ // return ReduceCall(native_context.function_prototype_apply(broker()), args,
+ // feedback_source, speculation_mode);
+}
+
+void MaglevGraphBuilder::BuildCall(ValueNode* target_node, CallArguments& args,
compiler::FeedbackSource& feedback_source) {
- Call::TargetType target_type = Call::TargetType::kAny;
const compiler::ProcessedFeedback& processed_feedback =
broker()->GetFeedbackForCall(feedback_source);
- switch (processed_feedback.kind()) {
- case compiler::ProcessedFeedback::kInsufficient:
- EmitUnconditionalDeopt(
- DeoptimizeReason::kInsufficientTypeFeedbackForCall);
- return;
-
- case compiler::ProcessedFeedback::kCall: {
- const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
- CallFeedbackContent content = call_feedback.call_feedback_content();
- if (content != CallFeedbackContent::kTarget) break;
-
- base::Optional<compiler::HeapObjectRef> maybe_target =
- call_feedback.target();
- if (!maybe_target.has_value()) break;
-
- compiler::HeapObjectRef target = maybe_target.value();
- if (!target.IsJSFunction()) break;
- compiler::JSFunctionRef function = target.AsJSFunction();
-
- // Do not reduce calls to functions with break points.
- if (function.shared().HasBreakInfo()) break;
-
- // Reset the feedback source
- feedback_source = compiler::FeedbackSource();
- target_type = Call::TargetType::kJSFunction;
- if (!BuildCheckValue(target_node, target)) return;
-
- if (function.object()->IsJSClassConstructor()) {
- // If we have a class constructor, we should raise an exception.
- SetAccumulator(BuildCallRuntime(
- Runtime::kThrowConstructorNonCallableError, {target_node}));
- return;
- }
+ if (processed_feedback.IsInsufficient()) {
+ EmitUnconditionalDeopt(DeoptimizeReason::kInsufficientTypeFeedbackForCall);
+ return;
+ }
- DCHECK(function.object()->IsCallable());
- if (TryReduceBuiltin(function, args)) {
- return;
- }
- if (TryBuildCallKnownJSFunction(function, args)) {
- return;
- }
- break;
+ DCHECK_EQ(processed_feedback.kind(), compiler::ProcessedFeedback::kCall);
+ const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
+ if (call_feedback.target().has_value() &&
+ call_feedback.target()->IsJSFunction()) {
+ CallFeedbackContent content = call_feedback.call_feedback_content();
+ compiler::JSFunctionRef function = call_feedback.target()->AsJSFunction();
+ ReduceResult result;
+ if (content == CallFeedbackContent::kTarget) {
+ result = ReduceCallForTarget(target_node, function, args, feedback_source,
+ call_feedback.speculation_mode());
+ } else {
+ DCHECK_EQ(content, CallFeedbackContent::kReceiver);
+ // We only collect receiver feedback for FunctionPrototypeApply.
+ // See CollectCallFeedback in ic-callable.tq
+ result = ReduceFunctionPrototypeApplyCallWithReceiver(
+ target_node, function, args, feedback_source,
+ call_feedback.speculation_mode());
}
-
- default:
- break;
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
}
// On fallthrough, create a generic call.
ValueNode* context = GetContext();
- BuildGenericCall(target_node, context, target_type, args, feedback_source);
+ SetAccumulator(BuildGenericCall(target_node, context, Call::TargetType::kAny,
+ args, feedback_source));
}
void MaglevGraphBuilder::BuildCallFromRegisterList(
@@ -2793,7 +4740,7 @@ void MaglevGraphBuilder::BuildCallFromRegisterList(
interpreter::RegisterList reg_list = iterator_.GetRegisterListOperand(1);
FeedbackSlot slot = GetSlotOperand(3);
compiler::FeedbackSource feedback_source(feedback(), slot);
- CallArguments args(receiver_mode, reg_list);
+ CallArguments args(receiver_mode, reg_list, current_interpreter_frame_);
BuildCall(target, args, feedback_source);
}
@@ -2803,32 +4750,29 @@ void MaglevGraphBuilder::BuildCallFromRegisters(
const int receiver_count =
(receiver_mode == ConvertReceiverMode::kNullOrUndefined) ? 0 : 1;
const int reg_count = arg_count + receiver_count;
- int slot_operand_index = arg_count + receiver_count + 1;
- FeedbackSlot slot = GetSlotOperand(slot_operand_index);
+ FeedbackSlot slot = GetSlotOperand(reg_count + 1);
compiler::FeedbackSource feedback_source(feedback(), slot);
switch (reg_count) {
case 0: {
- CallArguments args(receiver_mode, reg_count);
+ DCHECK_EQ(receiver_mode, ConvertReceiverMode::kNullOrUndefined);
+ CallArguments args(receiver_mode);
BuildCall(target, args, feedback_source);
break;
}
case 1: {
- CallArguments args(receiver_mode, reg_count,
- iterator_.GetRegisterOperand(1));
+ CallArguments args(receiver_mode, {LoadRegisterRaw(1)});
BuildCall(target, args, feedback_source);
break;
}
case 2: {
- CallArguments args(receiver_mode, reg_count,
- iterator_.GetRegisterOperand(1),
- iterator_.GetRegisterOperand(2));
+ CallArguments args(receiver_mode,
+ {LoadRegisterRaw(1), LoadRegisterRaw(2)});
BuildCall(target, args, feedback_source);
break;
}
case 3: {
- CallArguments args(
- receiver_mode, reg_count, iterator_.GetRegisterOperand(1),
- iterator_.GetRegisterOperand(2), iterator_.GetRegisterOperand(3));
+ CallArguments args(receiver_mode, {LoadRegisterRaw(1), LoadRegisterRaw(2),
+ LoadRegisterRaw(3)});
BuildCall(target, args, feedback_source);
break;
}
@@ -2867,34 +4811,28 @@ void MaglevGraphBuilder::VisitCallUndefinedReceiver2() {
void MaglevGraphBuilder::VisitCallWithSpread() {
ValueNode* function = LoadRegisterTagged(0);
- interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
- ValueNode* context = GetContext();
+ interpreter::RegisterList reglist = iterator_.GetRegisterListOperand(1);
FeedbackSlot slot = GetSlotOperand(3);
compiler::FeedbackSource feedback_source(feedback(), slot);
-
- size_t input_count = args.register_count() + CallWithSpread::kFixedInputCount;
- CallWithSpread* call = CreateNewNode<CallWithSpread>(
- input_count, feedback_source, function, context);
- for (int i = 0; i < args.register_count(); ++i) {
- call->set_arg(i, GetTaggedValue(args[i]));
- }
-
- SetAccumulator(AddNode(call));
+ CallArguments args(ConvertReceiverMode::kAny, reglist,
+ current_interpreter_frame_, CallArguments::kWithSpread);
+ BuildCall(function, args, feedback_source);
}
void MaglevGraphBuilder::VisitCallRuntime() {
Runtime::FunctionId function_id = iterator_.GetRuntimeIdOperand(0);
interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
ValueNode* context = GetContext();
-
size_t input_count = args.register_count() + CallRuntime::kFixedInputCount;
- CallRuntime* call_runtime =
- CreateNewNode<CallRuntime>(input_count, function_id, context);
- for (int i = 0; i < args.register_count(); ++i) {
- call_runtime->set_arg(i, GetTaggedValue(args[i]));
- }
-
- SetAccumulator(AddNode(call_runtime));
+ CallRuntime* call_runtime = AddNewNode<CallRuntime>(
+ input_count,
+ [&](CallRuntime* call_runtime) {
+ for (int i = 0; i < args.register_count(); ++i) {
+ call_runtime->set_arg(i, GetTaggedValue(args[i]));
+ }
+ },
+ function_id, context);
+ SetAccumulator(call_runtime);
}
void MaglevGraphBuilder::VisitCallJSRuntime() {
@@ -2905,21 +4843,11 @@ void MaglevGraphBuilder::VisitCallJSRuntime() {
ValueNode* callee = LoadAndCacheContextSlot(
context, NativeContext::OffsetOfElementAt(slot), kMutable);
// Call the function.
- interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
- int kTheReceiver = 1;
- size_t input_count =
- args.register_count() + Call::kFixedInputCount + kTheReceiver;
-
- Call* call =
- CreateNewNode<Call>(input_count, ConvertReceiverMode::kNullOrUndefined,
- Call::TargetType::kJSFunction,
- compiler::FeedbackSource(), callee, GetContext());
- int arg_index = 0;
- call->set_arg(arg_index++, GetRootConstant(RootIndex::kUndefinedValue));
- for (int i = 0; i < args.register_count(); ++i) {
- call->set_arg(arg_index++, GetTaggedValue(args[i]));
- }
- SetAccumulator(AddNode(call));
+ interpreter::RegisterList reglist = iterator_.GetRegisterListOperand(1);
+ CallArguments args(ConvertReceiverMode::kNullOrUndefined, reglist,
+ current_interpreter_frame_);
+ SetAccumulator(BuildGenericCall(callee, GetContext(),
+ Call::TargetType::kJSFunction, args));
}
void MaglevGraphBuilder::VisitCallRuntimeForPair() {
@@ -2928,11 +4856,14 @@ void MaglevGraphBuilder::VisitCallRuntimeForPair() {
ValueNode* context = GetContext();
size_t input_count = args.register_count() + CallRuntime::kFixedInputCount;
- CallRuntime* call_runtime =
- AddNewNode<CallRuntime>(input_count, function_id, context);
- for (int i = 0; i < args.register_count(); ++i) {
- call_runtime->set_arg(i, GetTaggedValue(args[i]));
- }
+ CallRuntime* call_runtime = AddNewNode<CallRuntime>(
+ input_count,
+ [&](CallRuntime* call_runtime) {
+ for (int i = 0; i < args.register_count(); ++i) {
+ call_runtime->set_arg(i, GetTaggedValue(args[i]));
+ }
+ },
+ function_id, context);
auto result = iterator_.GetRegisterPairOperand(3);
StoreRegisterPair(result, call_runtime);
}
@@ -2968,16 +4899,18 @@ void MaglevGraphBuilder::
GetSmiConstant(args.register_count() - 1);
int kContext = 1;
int kExcludedPropertyCount = 1;
- CallBuiltin* call_builtin = CreateNewNode<CallBuiltin>(
+ CallBuiltin* call_builtin = AddNewNode<CallBuiltin>(
args.register_count() + kContext + kExcludedPropertyCount,
+ [&](CallBuiltin* call_builtin) {
+ int arg_index = 0;
+ call_builtin->set_arg(arg_index++, GetTaggedValue(args[0]));
+ call_builtin->set_arg(arg_index++, excluded_property_count);
+ for (int i = 1; i < args.register_count(); i++) {
+ call_builtin->set_arg(arg_index++, GetTaggedValue(args[i]));
+ }
+ },
Builtin::kCopyDataPropertiesWithExcludedProperties, GetContext());
- int arg_index = 0;
- call_builtin->set_arg(arg_index++, GetTaggedValue(args[0]));
- call_builtin->set_arg(arg_index++, excluded_property_count);
- for (int i = 1; i < args.register_count(); i++) {
- call_builtin->set_arg(arg_index++, GetTaggedValue(args[i]));
- }
- SetAccumulator(AddNode(call_builtin));
+ SetAccumulator(call_builtin);
}
void MaglevGraphBuilder::VisitIntrinsicCreateIterResultObject(
@@ -3015,8 +4948,8 @@ void MaglevGraphBuilder::VisitIntrinsicGeneratorClose(
DCHECK_EQ(args.register_count(), 1);
ValueNode* generator = GetTaggedValue(args[0]);
ValueNode* value = GetSmiConstant(JSGeneratorObject::kGeneratorClosed);
- AddNewNode<StoreTaggedFieldNoWriteBarrier>(
- {generator, value}, JSGeneratorObject::kContinuationOffset);
+ BuildStoreTaggedFieldNoWriteBarrier(generator, value,
+ JSGeneratorObject::kContinuationOffset);
SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue));
}
@@ -3107,15 +5040,19 @@ void MaglevGraphBuilder::VisitConstruct() {
compiler::FeedbackSource feedback_source{feedback(), slot};
size_t input_count = args.register_count() + 1 + Construct::kFixedInputCount;
- Construct* construct = CreateNewNode<Construct>(
- input_count, feedback_source, constructor, new_target, context);
- int arg_index = 0;
- // Add undefined receiver.
- construct->set_arg(arg_index++, GetRootConstant(RootIndex::kUndefinedValue));
- for (int i = 0; i < args.register_count(); i++) {
- construct->set_arg(arg_index++, GetTaggedValue(args[i]));
- }
- SetAccumulator(AddNode(construct));
+ Construct* construct = AddNewNode<Construct>(
+ input_count,
+ [&](Construct* construct) {
+ int arg_index = 0;
+ // Add undefined receiver.
+ construct->set_arg(arg_index++,
+ GetRootConstant(RootIndex::kUndefinedValue));
+ for (int i = 0; i < args.register_count(); i++) {
+ construct->set_arg(arg_index++, GetTaggedValue(args[i]));
+ }
+ },
+ feedback_source, constructor, new_target, context);
+ SetAccumulator(construct);
}
void MaglevGraphBuilder::VisitConstructWithSpread() {
@@ -3129,15 +5066,19 @@ void MaglevGraphBuilder::VisitConstructWithSpread() {
int kReceiver = 1;
size_t input_count =
args.register_count() + kReceiver + ConstructWithSpread::kFixedInputCount;
- ConstructWithSpread* construct = CreateNewNode<ConstructWithSpread>(
- input_count, feedback_source, constructor, new_target, context);
- int arg_index = 0;
- // Add undefined receiver.
- construct->set_arg(arg_index++, GetRootConstant(RootIndex::kUndefinedValue));
- for (int i = 0; i < args.register_count(); ++i) {
- construct->set_arg(arg_index++, GetTaggedValue(args[i]));
- }
- SetAccumulator(AddNode(construct));
+ ConstructWithSpread* construct = AddNewNode<ConstructWithSpread>(
+ input_count,
+ [&](ConstructWithSpread* construct) {
+ int arg_index = 0;
+ // Add undefined receiver.
+ construct->set_arg(arg_index++,
+ GetRootConstant(RootIndex::kUndefinedValue));
+ for (int i = 0; i < args.register_count(); i++) {
+ construct->set_arg(arg_index++, GetTaggedValue(args[i]));
+ }
+ },
+ feedback_source, constructor, new_target, context);
+ SetAccumulator(construct);
}
void MaglevGraphBuilder::VisitTestEqual() {
@@ -3192,17 +5133,17 @@ MaglevGraphBuilder::InferHasInPrototypeChain(
all = false;
break;
}
- compiler::HeapObjectRef map_prototype = map.prototype();
+ compiler::HeapObjectRef map_prototype = map.prototype(broker());
if (map_prototype.equals(prototype)) {
none = false;
break;
}
- map = map_prototype.map();
+ map = map_prototype.map(broker());
// TODO(v8:11457) Support dictionary mode protoypes here.
if (!map.is_stable() || map.is_dictionary_map()) {
return kMayBeInPrototypeChain;
}
- if (map.oddball_type() == compiler::OddballType::kNull) {
+ if (map.oddball_type(broker()) == compiler::OddballType::kNull) {
all = false;
break;
}
@@ -3213,7 +5154,7 @@ MaglevGraphBuilder::InferHasInPrototypeChain(
if (!all && !none) return kMayBeInPrototypeChain;
{
- base::Optional<compiler::JSObjectRef> last_prototype;
+ compiler::OptionalJSObjectRef last_prototype;
if (all) {
// We don't need to protect the full chain if we found the prototype, we
// can stop at {prototype}. In fact we could stop at the one before
@@ -3221,7 +5162,7 @@ MaglevGraphBuilder::InferHasInPrototypeChain(
// might be a different object each time, so it's much simpler to include
// {prototype}. That does, however, mean that we must check {prototype}'s
// map stability.
- if (!prototype.map().is_stable()) return kMayBeInPrototypeChain;
+ if (!prototype.map(broker()).is_stable()) return kMayBeInPrototypeChain;
last_prototype = prototype.AsJSObject();
}
broker()->dependencies()->DependOnStablePrototypeChains(
@@ -3262,7 +5203,7 @@ bool MaglevGraphBuilder::TryBuildFastOrdinaryHasInstance(
// invocation of the instanceof operator again.
compiler::JSBoundFunctionRef function = callable.AsJSBoundFunction();
compiler::JSReceiverRef bound_target_function =
- function.bound_target_function();
+ function.bound_target_function(broker());
if (!bound_target_function.IsJSObject() ||
!TryBuildFastInstanceOf(object, bound_target_function.AsJSObject(),
@@ -3281,9 +5222,9 @@ bool MaglevGraphBuilder::TryBuildFastOrdinaryHasInstance(
// TODO(v8:7700): Remove the has_prototype_slot condition once the broker
// is always enabled.
- if (!function.map().has_prototype_slot() ||
- !function.has_instance_prototype(broker()->dependencies()) ||
- function.PrototypeRequiresRuntimeLookup(broker()->dependencies())) {
+ if (!function.map(broker()).has_prototype_slot() ||
+ !function.has_instance_prototype(broker()) ||
+ function.PrototypeRequiresRuntimeLookup(broker())) {
return false;
}
@@ -3311,12 +5252,10 @@ void MaglevGraphBuilder::BuildOrdinaryHasInstance(
bool MaglevGraphBuilder::TryBuildFastInstanceOf(
ValueNode* object, compiler::JSObjectRef callable,
ValueNode* callable_node_if_not_constant) {
- compiler::MapRef receiver_map = callable.map();
- compiler::NameRef name =
- MakeRef(broker(), local_isolate()->factory()->has_instance_symbol());
+ compiler::MapRef receiver_map = callable.map(broker());
+ compiler::NameRef name = broker()->has_instance_symbol();
compiler::PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- receiver_map, name, compiler::AccessMode::kLoad,
- broker()->dependencies());
+ receiver_map, name, compiler::AccessMode::kLoad);
// TODO(v8:11457) Support dictionary mode holders here.
if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
@@ -3337,7 +5276,7 @@ bool MaglevGraphBuilder::TryBuildFastInstanceOf(
// Monomorphic property access.
if (callable_node_if_not_constant) {
BuildCheckMaps(callable_node_if_not_constant,
- access_info.lookup_start_object_maps());
+ base::VectorOf(access_info.lookup_start_object_maps()));
}
BuildOrdinaryHasInstance(object, callable, callable_node_if_not_constant);
@@ -3345,17 +5284,17 @@ bool MaglevGraphBuilder::TryBuildFastInstanceOf(
}
if (access_info.IsFastDataConstant()) {
- base::Optional<compiler::JSObjectRef> holder = access_info.holder();
+ compiler::OptionalJSObjectRef holder = access_info.holder();
bool found_on_proto = holder.has_value();
compiler::JSObjectRef holder_ref =
found_on_proto ? holder.value() : callable;
- base::Optional<compiler::ObjectRef> has_instance_field =
- holder_ref.GetOwnFastDataProperty(access_info.field_representation(),
- access_info.field_index(),
- broker()->dependencies());
+ compiler::OptionalObjectRef has_instance_field =
+ holder_ref.GetOwnFastDataProperty(
+ broker(), access_info.field_representation(),
+ access_info.field_index(), broker()->dependencies());
if (!has_instance_field.has_value() ||
!has_instance_field->IsHeapObject() ||
- !has_instance_field->AsHeapObject().map().is_callable()) {
+ !has_instance_field->AsHeapObject().map(broker()).is_callable()) {
return false;
}
@@ -3368,31 +5307,33 @@ bool MaglevGraphBuilder::TryBuildFastInstanceOf(
ValueNode* callable_node;
if (callable_node_if_not_constant) {
// Check that {callable_node_if_not_constant} is actually {callable}.
- AddNewNode<CheckValue>({callable_node_if_not_constant}, callable);
+ BuildCheckValue(callable_node_if_not_constant, callable);
callable_node = callable_node_if_not_constant;
} else {
callable_node = GetConstant(callable);
}
+ BuildCheckMaps(callable_node,
+ base::VectorOf(access_info.lookup_start_object_maps()));
// Call @@hasInstance
- Call* call = AddNewNode<Call>(
- Call::kFixedInputCount + 2, ConvertReceiverMode::kNotNullOrUndefined,
- Call::TargetType::kJSFunction, compiler::FeedbackSource(),
- GetConstant(*has_instance_field), GetContext());
- call->set_arg(0, callable_node);
- call->set_arg(1, object);
-
- // Make sure that a lazy deopt after the @@hasInstance call also performs
- // ToBoolean before returning to the interpreter.
- // TODO(leszeks): Wrap this in a helper.
- new (call->lazy_deopt_info()) LazyDeoptInfo(
- zone(),
- BuiltinContinuationDeoptFrame(
- Builtin::kToBooleanLazyDeoptContinuation, {}, GetContext(),
- zone()->New<InterpretedDeoptFrame>(
- call->lazy_deopt_info()->top_frame().as_interpreted())));
-
- SetAccumulator(AddNewNode<ToBoolean>({call}));
+ CallArguments args(ConvertReceiverMode::kNotNullOrUndefined,
+ {callable_node, object});
+ ValueNode* call_result;
+ {
+ // Make sure that a lazy deopt after the @@hasInstance call also performs
+ // ToBoolean before returning to the interpreter.
+ LazyDeoptContinuationScope continuation_scope(
+ this, Builtin::kToBooleanLazyDeoptContinuation);
+ ReduceResult result = ReduceCall(*has_instance_field, args);
+ // TODO(victorgomes): Propagate the case if we need to soft deopt.
+ DCHECK(!result.IsDoneWithAbort());
+ call_result = result.value();
+ }
+
+ // TODO(v8:7700): Do we need to call ToBoolean here? If we have reduce the
+ // call further, we might already have a boolean constant as result.
+ // TODO(leszeks): Avoid forcing a conversion to tagged here.
+ SetAccumulator(AddNewNode<ToBoolean>({GetTaggedValue(call_result)}));
return true;
}
@@ -3418,7 +5359,7 @@ bool MaglevGraphBuilder::TryBuildFastInstanceOfWithFeedback(
return TryBuildFastInstanceOf(object, callable_ref, nullptr);
}
if (feedback_source.IsValid()) {
- base::Optional<compiler::JSObjectRef> callable_from_feedback =
+ compiler::OptionalJSObjectRef callable_from_feedback =
feedback.AsInstanceOf().value();
if (callable_from_feedback) {
return TryBuildFastInstanceOf(object, *callable_from_feedback, callable);
@@ -3488,7 +5429,11 @@ void MaglevGraphBuilder::BuildToNumberOrToNumeric(Object::Conversion mode) {
}
AddNewNode<CheckNumber>({value}, mode);
break;
- default:
+ case BinaryOperationHint::kNone:
+ // TODO(leszeks): Faster ToNumber for kNumberOrOddball
+ case BinaryOperationHint::kNumberOrOddball:
+ case BinaryOperationHint::kString:
+ case BinaryOperationHint::kAny:
if (CheckType(value, NodeType::kNumber)) return;
SetAccumulator(
AddNewNode<ToNumberOrNumeric>({GetContext(), value}, mode));
@@ -3540,18 +5485,27 @@ void MaglevGraphBuilder::VisitCreateArrayLiteral() {
int bytecode_flags = GetFlag8Operand(2);
int literal_flags =
interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
+ compiler::FeedbackSource feedback_source(feedback(), slot_index);
+
+ compiler::ProcessedFeedback const& processed_feedback =
+ broker()->GetFeedbackForArrayOrObjectLiteral(feedback_source);
+ if (!processed_feedback.IsInsufficient()) {
+ ReduceResult result =
+ TryBuildFastCreateObjectOrArrayLiteral(processed_feedback.AsLiteral());
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
+ }
+
if (interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::decode(
bytecode_flags)) {
// TODO(victorgomes): CreateShallowArrayLiteral should not need the
// boilerplate descriptor. However the current builtin checks that the
// feedback exists and fallsback to CreateArrayLiteral if it doesn't.
SetAccumulator(AddNewNode<CreateShallowArrayLiteral>(
- {}, constant_elements, compiler::FeedbackSource{feedback(), slot_index},
- literal_flags));
+ {}, constant_elements, feedback_source, literal_flags));
} else {
SetAccumulator(AddNewNode<CreateArrayLiteral>(
- {}, constant_elements, compiler::FeedbackSource{feedback(), slot_index},
- literal_flags));
+ {}, constant_elements, feedback_source, literal_flags));
}
}
@@ -3568,6 +5522,368 @@ void MaglevGraphBuilder::VisitCreateEmptyArrayLiteral() {
{}, compiler::FeedbackSource{feedback(), slot_index}));
}
+base::Optional<FastLiteralObject>
+MaglevGraphBuilder::TryReadBoilerplateForFastLiteral(
+ compiler::JSObjectRef boilerplate, AllocationType allocation, int max_depth,
+ int* max_properties) {
+ DCHECK_GE(max_depth, 0);
+ DCHECK_GE(*max_properties, 0);
+
+ if (max_depth == 0) return {};
+
+ // Prevent concurrent migrations of boilerplate objects.
+ compiler::JSHeapBroker::BoilerplateMigrationGuardIfNeeded
+ boilerplate_access_guard(broker());
+
+ // Now that we hold the migration lock, get the current map.
+ compiler::MapRef boilerplate_map = boilerplate.map(broker());
+ // Protect against concurrent changes to the boilerplate object by checking
+ // for an identical value at the end of the compilation.
+ broker()->dependencies()->DependOnObjectSlotValue(
+ boilerplate, HeapObject::kMapOffset, boilerplate_map);
+ {
+ compiler::OptionalMapRef current_boilerplate_map =
+ boilerplate.map_direct_read(broker());
+ if (!current_boilerplate_map.has_value() ||
+ !current_boilerplate_map->equals(boilerplate_map)) {
+ // TODO(leszeks): Emit an eager deopt for this case, so that we can
+ // re-learn the boilerplate. This will be easier once we get rid of the
+ // two-pass approach, since we'll be able to create the eager deopt here
+ // and return a ReduceResult::DoneWithAbort().
+ return {};
+ }
+ }
+
+ // Bail out if the boilerplate map has been deprecated. The map could of
+ // course be deprecated at some point after the line below, but it's not a
+ // correctness issue -- it only means the literal won't be created with the
+ // most up to date map(s).
+ if (boilerplate_map.is_deprecated()) return {};
+
+ // We currently only support in-object properties.
+ if (boilerplate.map(broker()).elements_kind() == DICTIONARY_ELEMENTS ||
+ boilerplate.map(broker()).is_dictionary_map() ||
+ !boilerplate.raw_properties_or_hash(broker()).has_value()) {
+ return {};
+ }
+ {
+ compiler::ObjectRef properties =
+ *boilerplate.raw_properties_or_hash(broker());
+ bool const empty =
+ properties.IsSmi() ||
+ properties.equals(MakeRef(
+ broker(), local_isolate()->factory()->empty_fixed_array())) ||
+ properties.equals(MakeRef(
+ broker(), Handle<Object>::cast(
+ local_isolate()->factory()->empty_property_array())));
+ if (!empty) return {};
+ }
+
+ compiler::OptionalFixedArrayBaseRef maybe_elements =
+ boilerplate.elements(broker(), kRelaxedLoad);
+ if (!maybe_elements.has_value()) return {};
+ compiler::FixedArrayBaseRef boilerplate_elements = maybe_elements.value();
+ broker()->dependencies()->DependOnObjectSlotValue(
+ boilerplate, JSObject::kElementsOffset, boilerplate_elements);
+ int const elements_length = boilerplate_elements.length();
+
+ FastLiteralObject fast_literal(boilerplate_map, zone(), {});
+
+ // Compute the in-object properties to store first.
+ int index = 0;
+ for (InternalIndex i :
+ InternalIndex::Range(boilerplate_map.NumberOfOwnDescriptors())) {
+ PropertyDetails const property_details =
+ boilerplate_map.GetPropertyDetails(broker(), i);
+ if (property_details.location() != PropertyLocation::kField) continue;
+ DCHECK_EQ(PropertyKind::kData, property_details.kind());
+ if ((*max_properties)-- == 0) return {};
+
+ int offset = boilerplate_map.GetInObjectPropertyOffset(index);
+#ifdef DEBUG
+ FieldIndex field_index =
+ FieldIndex::ForDetails(*boilerplate_map.object(), property_details);
+ DCHECK(field_index.is_inobject());
+ DCHECK_EQ(index, field_index.property_index());
+ DCHECK_EQ(field_index.offset(), offset);
+#endif
+
+ // Note: the use of RawInobjectPropertyAt (vs. the higher-level
+ // GetOwnFastDataProperty) here is necessary, since the underlying value
+ // may be `uninitialized`, which the latter explicitly does not support.
+ compiler::OptionalObjectRef maybe_boilerplate_value =
+ boilerplate.RawInobjectPropertyAt(
+ broker(),
+ FieldIndex::ForInObjectOffset(offset, FieldIndex::kTagged));
+ if (!maybe_boilerplate_value.has_value()) return {};
+
+ // Note: We don't need to take a compilation dependency verifying the value
+ // of `boilerplate_value`, since boilerplate properties are constant after
+ // initialization modulo map migration. We protect against concurrent map
+ // migrations (other than elements kind transition, which don't affect us)
+ // via the boilerplate_migration_access lock.
+ compiler::ObjectRef boilerplate_value = maybe_boilerplate_value.value();
+
+ FastLiteralField value;
+ if (boilerplate_value.IsJSObject()) {
+ compiler::JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
+ base::Optional<FastLiteralObject> maybe_object_value =
+ TryReadBoilerplateForFastLiteral(boilerplate_object, allocation,
+ max_depth - 1, max_properties);
+ if (!maybe_object_value.has_value()) return {};
+ value = FastLiteralField(maybe_object_value.value());
+ } else if (property_details.representation().IsDouble()) {
+ Float64 number =
+ Float64::FromBits(boilerplate_value.AsHeapNumber().value_as_bits());
+ value = FastLiteralField(number);
+ } else {
+ // It's fine to store the 'uninitialized' Oddball into a Smi field since
+ // it will get overwritten anyway.
+ DCHECK_IMPLIES(property_details.representation().IsSmi() &&
+ !boilerplate_value.IsSmi(),
+ boilerplate_value.object()->IsUninitialized());
+ value = FastLiteralField(boilerplate_value);
+ }
+
+ DCHECK_LT(index, boilerplate_map.GetInObjectProperties());
+ fast_literal.fields[index] = value;
+ index++;
+ }
+
+ // Fill slack at the end of the boilerplate object with filler maps.
+ int const boilerplate_length = boilerplate_map.GetInObjectProperties();
+ for (; index < boilerplate_length; ++index) {
+ DCHECK(!V8_MAP_PACKING_BOOL);
+ // TODO(wenyuzhao): Fix incorrect MachineType when V8_MAP_PACKING is
+ // enabled.
+ DCHECK_LT(index, boilerplate_map.GetInObjectProperties());
+ fast_literal.fields[index] = FastLiteralField(MakeRef(
+ broker(), local_isolate()->factory()->one_pointer_filler_map()));
+ }
+
+ // Empty or copy-on-write elements just store a constant.
+ compiler::MapRef elements_map = boilerplate_elements.map(broker());
+ // Protect against concurrent changes to the boilerplate object by checking
+ // for an identical value at the end of the compilation.
+ broker()->dependencies()->DependOnObjectSlotValue(
+ boilerplate_elements, HeapObject::kMapOffset, elements_map);
+ if (boilerplate_elements.length() == 0 ||
+ elements_map.IsFixedCowArrayMap(broker())) {
+ if (allocation == AllocationType::kOld &&
+ !boilerplate.IsElementsTenured(boilerplate_elements)) {
+ return {};
+ }
+ fast_literal.elements = FastLiteralFixedArray(boilerplate_elements);
+ } else {
+ // Compute the elements to store first (might have effects).
+ if (boilerplate_elements.IsFixedDoubleArray()) {
+ int const size = FixedDoubleArray::SizeFor(elements_length);
+ if (size > kMaxRegularHeapObjectSize) return {};
+ fast_literal.elements =
+ FastLiteralFixedArray(elements_length, zone(), double{});
+
+ compiler::FixedDoubleArrayRef elements =
+ boilerplate_elements.AsFixedDoubleArray();
+ for (int i = 0; i < elements_length; ++i) {
+ Float64 value = elements.GetFromImmutableFixedDoubleArray(i);
+ fast_literal.elements.double_values[i] = value;
+ }
+ } else {
+ int const size = FixedArray::SizeFor(elements_length);
+ if (size > kMaxRegularHeapObjectSize) return {};
+ fast_literal.elements = FastLiteralFixedArray(elements_length, zone());
+
+ compiler::FixedArrayRef elements = boilerplate_elements.AsFixedArray();
+ for (int i = 0; i < elements_length; ++i) {
+ if ((*max_properties)-- == 0) return {};
+ compiler::OptionalObjectRef element_value =
+ elements.TryGet(broker(), i);
+ if (!element_value.has_value()) return {};
+ if (element_value->IsJSObject()) {
+ base::Optional<FastLiteralObject> object =
+ TryReadBoilerplateForFastLiteral(element_value->AsJSObject(),
+ allocation, max_depth - 1,
+ max_properties);
+ if (!object.has_value()) return {};
+ fast_literal.elements.values[i] = FastLiteralField(*object);
+ } else {
+ fast_literal.elements.values[i] = FastLiteralField(*element_value);
+ }
+ }
+ }
+ }
+
+ if (boilerplate.IsJSArray()) {
+ fast_literal.js_array_length =
+ boilerplate.AsJSArray().GetBoilerplateLength(broker());
+ }
+
+ return fast_literal;
+}
+
+ValueNode* MaglevGraphBuilder::ExtendOrReallocateCurrentRawAllocation(
+ int size, AllocationType allocation_type) {
+ if (!current_raw_allocation_ ||
+ current_raw_allocation_->allocation_type() != allocation_type) {
+ current_raw_allocation_ =
+ AddNewNode<AllocateRaw>({}, allocation_type, size);
+ return current_raw_allocation_;
+ }
+
+ int current_size = current_raw_allocation_->size();
+ if (current_size + size > kMaxRegularHeapObjectSize) {
+ return current_raw_allocation_ =
+ AddNewNode<AllocateRaw>({}, allocation_type, size);
+ }
+
+ DCHECK_GT(current_size, 0);
+ int previous_end = current_size;
+ current_raw_allocation_->extend(size);
+ return AddNewNode<FoldedAllocation>({current_raw_allocation_}, previous_end);
+}
+
+void MaglevGraphBuilder::ClearCurrentRawAllocation() {
+ current_raw_allocation_ = nullptr;
+}
+
+ValueNode* MaglevGraphBuilder::BuildAllocateFastLiteral(
+ FastLiteralObject object, AllocationType allocation_type) {
+ base::SmallVector<ValueNode*, 8, ZoneAllocator<ValueNode*>> properties(
+ object.map.GetInObjectProperties(), ZoneAllocator<ValueNode*>(zone()));
+ for (int i = 0; i < object.map.GetInObjectProperties(); ++i) {
+ properties[i] = BuildAllocateFastLiteral(object.fields[i], allocation_type);
+ }
+ ValueNode* elements =
+ BuildAllocateFastLiteral(object.elements, allocation_type);
+
+ DCHECK(object.map.IsJSObjectMap());
+ // TODO(leszeks): Fold allocations.
+ ValueNode* allocation = ExtendOrReallocateCurrentRawAllocation(
+ object.map.instance_size(), allocation_type);
+ AddNewNode<StoreMap>({allocation}, object.map);
+ AddNewNode<StoreTaggedFieldNoWriteBarrier>(
+ {allocation,
+ GetConstant(MakeRefAssumeMemoryFence(
+ broker(), local_isolate()->factory()->empty_fixed_array()))},
+ JSObject::kPropertiesOrHashOffset);
+ if (object.js_array_length.has_value()) {
+ BuildStoreTaggedField(allocation, GetConstant(*object.js_array_length),
+ JSArray::kLengthOffset);
+ }
+
+ BuildStoreTaggedField(allocation, elements, JSObject::kElementsOffset);
+ for (int i = 0; i < object.map.GetInObjectProperties(); ++i) {
+ BuildStoreTaggedField(allocation, properties[i],
+ object.map.GetInObjectPropertyOffset(i));
+ }
+ return allocation;
+}
+
+ValueNode* MaglevGraphBuilder::BuildAllocateFastLiteral(
+ FastLiteralField value, AllocationType allocation_type) {
+ switch (value.type) {
+ case FastLiteralField::kObject:
+ return BuildAllocateFastLiteral(value.object, allocation_type);
+ case FastLiteralField::kMutableDouble: {
+ ValueNode* new_alloc = ExtendOrReallocateCurrentRawAllocation(
+ HeapNumber::kSize, allocation_type);
+ AddNewNode<StoreMap>(
+ {new_alloc},
+ MakeRefAssumeMemoryFence(
+ broker(), local_isolate()->factory()->heap_number_map()));
+ // TODO(leszeks): Fix hole storage, in case this should be a custom NaN.
+ AddNewNode<StoreFloat64>(
+ {new_alloc, GetFloat64Constant(value.mutable_double_value)},
+ HeapNumber::kValueOffset);
+ return new_alloc;
+ }
+
+ case FastLiteralField::kConstant:
+ return GetConstant(value.constant_value);
+ case FastLiteralField::kUninitialized:
+ UNREACHABLE();
+ }
+}
+
+ValueNode* MaglevGraphBuilder::BuildAllocateFastLiteral(
+ FastLiteralFixedArray value, AllocationType allocation_type) {
+ switch (value.type) {
+ case FastLiteralFixedArray::kTagged: {
+ base::SmallVector<ValueNode*, 8, ZoneAllocator<ValueNode*>> elements(
+ value.length, ZoneAllocator<ValueNode*>(zone()));
+ for (int i = 0; i < value.length; ++i) {
+ elements[i] =
+ BuildAllocateFastLiteral(value.values[i], allocation_type);
+ }
+ ValueNode* allocation = ExtendOrReallocateCurrentRawAllocation(
+ FixedArray::SizeFor(value.length), allocation_type);
+ AddNewNode<StoreMap>(
+ {allocation},
+ MakeRefAssumeMemoryFence(
+ broker(), local_isolate()->factory()->fixed_array_map()));
+ AddNewNode<StoreTaggedFieldNoWriteBarrier>(
+ {allocation, GetSmiConstant(value.length)},
+ FixedArray::kLengthOffset);
+ for (int i = 0; i < value.length; ++i) {
+ // TODO(leszeks): Elide the write barrier where possible.
+ BuildStoreTaggedField(allocation, elements[i],
+ FixedArray::OffsetOfElementAt(i));
+ }
+ return allocation;
+ }
+ case FastLiteralFixedArray::kDouble: {
+ ValueNode* allocation = ExtendOrReallocateCurrentRawAllocation(
+ FixedDoubleArray::SizeFor(value.length), allocation_type);
+ AddNewNode<StoreMap>(
+ {allocation},
+ MakeRefAssumeMemoryFence(
+ broker(), local_isolate()->factory()->fixed_double_array_map()));
+ AddNewNode<StoreTaggedFieldNoWriteBarrier>(
+ {allocation, GetSmiConstant(value.length)},
+ FixedDoubleArray::kLengthOffset);
+ for (int i = 0; i < value.length; ++i) {
+ // TODO(leszeks): Fix hole storage, in case Float64::get_scalar doesn't
+ // preserve custom NaNs.
+ AddNewNode<StoreFloat64>(
+ {allocation, GetFloat64Constant(value.double_values[i])},
+ FixedDoubleArray::OffsetOfElementAt(i));
+ }
+ return allocation;
+ }
+ case FastLiteralFixedArray::kCoW:
+ return GetConstant(value.cow_value);
+ case FastLiteralFixedArray::kUninitialized:
+ UNREACHABLE();
+ }
+}
+
+ReduceResult MaglevGraphBuilder::TryBuildFastCreateObjectOrArrayLiteral(
+ const compiler::LiteralFeedback& feedback) {
+ compiler::AllocationSiteRef site = feedback.value();
+ if (!site.boilerplate(broker()).has_value()) return ReduceResult::Fail();
+ AllocationType allocation_type =
+ broker()->dependencies()->DependOnPretenureMode(site);
+
+ // First try to extract out the shape and values of the boilerplate, bailing
+ // out on complex boilerplates.
+ int max_properties = compiler::kMaxFastLiteralProperties;
+ base::Optional<FastLiteralObject> maybe_value =
+ TryReadBoilerplateForFastLiteral(
+ *site.boilerplate(broker()), allocation_type,
+ compiler::kMaxFastLiteralDepth, &max_properties);
+ if (!maybe_value.has_value()) return ReduceResult::Fail();
+
+ // Then, use the collected information to actually create nodes in the graph.
+ // TODO(leszeks): Add support for unwinding graph modifications, so that we
+ // can get rid of this two pass approach.
+ broker()->dependencies()->DependOnElementsKinds(site);
+ ReduceResult result = BuildAllocateFastLiteral(*maybe_value, allocation_type);
+ // TODO(leszeks): Don't eagerly clear the raw allocation, have the next side
+ // effect clear it.
+ ClearCurrentRawAllocation();
+ return result;
+}
+
void MaglevGraphBuilder::VisitCreateObjectLiteral() {
compiler::ObjectBoilerplateDescriptionRef boilerplate_desc =
GetRefOperand<ObjectBoilerplateDescription>(0);
@@ -3575,25 +5891,34 @@ void MaglevGraphBuilder::VisitCreateObjectLiteral() {
int bytecode_flags = GetFlag8Operand(2);
int literal_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
+ compiler::FeedbackSource feedback_source(feedback(), slot_index);
+
+ compiler::ProcessedFeedback const& processed_feedback =
+ broker()->GetFeedbackForArrayOrObjectLiteral(feedback_source);
+ if (!processed_feedback.IsInsufficient()) {
+ ReduceResult result =
+ TryBuildFastCreateObjectOrArrayLiteral(processed_feedback.AsLiteral());
+ PROCESS_AND_RETURN_IF_DONE(
+ result, [&](ValueNode* value) { SetAccumulator(value); });
+ }
+
if (interpreter::CreateObjectLiteralFlags::FastCloneSupportedBit::decode(
bytecode_flags)) {
// TODO(victorgomes): CreateShallowObjectLiteral should not need the
// boilerplate descriptor. However the current builtin checks that the
// feedback exists and fallsback to CreateObjectLiteral if it doesn't.
SetAccumulator(AddNewNode<CreateShallowObjectLiteral>(
- {}, boilerplate_desc, compiler::FeedbackSource{feedback(), slot_index},
- literal_flags));
+ {}, boilerplate_desc, feedback_source, literal_flags));
} else {
SetAccumulator(AddNewNode<CreateObjectLiteral>(
- {}, boilerplate_desc, compiler::FeedbackSource{feedback(), slot_index},
- literal_flags));
+ {}, boilerplate_desc, feedback_source, literal_flags));
}
}
void MaglevGraphBuilder::VisitCreateEmptyObjectLiteral() {
compiler::NativeContextRef native_context = broker()->target_native_context();
compiler::MapRef map =
- native_context.object_function().initial_map(broker()->dependencies());
+ native_context.object_function(broker()).initial_map(broker());
DCHECK(!map.is_dictionary_map());
DCHECK(!map.IsInobjectSlackTrackingInProgress());
SetAccumulator(AddNewNode<CreateEmptyObjectLiteral>({}, map));
@@ -3633,7 +5958,7 @@ void MaglevGraphBuilder::VisitCreateClosure() {
compiler::SharedFunctionInfoRef shared_function_info =
GetRefOperand<SharedFunctionInfo>(0);
compiler::FeedbackCellRef feedback_cell =
- feedback().GetClosureFeedbackCell(iterator_.GetIndexOperand(1));
+ feedback().GetClosureFeedbackCell(broker(), iterator_.GetIndexOperand(1));
uint32_t flags = GetFlag8Operand(2);
if (interpreter::CreateClosureFlags::FastNewClosureBit::decode(flags)) {
@@ -3701,7 +6026,7 @@ void MaglevGraphBuilder::VisitCreateWithContext() {
void MaglevGraphBuilder::VisitCreateMappedArguments() {
compiler::SharedFunctionInfoRef shared =
compilation_unit_->shared_function_info();
- if (shared.object()->has_duplicate_parameters()) {
+ if (is_inline() || shared.object()->has_duplicate_parameters()) {
SetAccumulator(
BuildCallRuntime(Runtime::kNewSloppyArguments, {GetClosure()}));
} else {
@@ -3711,13 +6036,23 @@ void MaglevGraphBuilder::VisitCreateMappedArguments() {
}
void MaglevGraphBuilder::VisitCreateUnmappedArguments() {
- SetAccumulator(
- BuildCallBuiltin<Builtin::kFastNewStrictArguments>({GetClosure()}));
+ if (is_inline()) {
+ SetAccumulator(
+ BuildCallRuntime(Runtime::kNewStrictArguments, {GetClosure()}));
+ } else {
+ SetAccumulator(
+ BuildCallBuiltin<Builtin::kFastNewStrictArguments>({GetClosure()}));
+ }
}
void MaglevGraphBuilder::VisitCreateRestParameter() {
- SetAccumulator(
- BuildCallBuiltin<Builtin::kFastNewRestArguments>({GetClosure()}));
+ if (is_inline()) {
+ SetAccumulator(
+ BuildCallRuntime(Runtime::kNewRestParameter, {GetClosure()}));
+ } else {
+ SetAccumulator(
+ BuildCallBuiltin<Builtin::kFastNewRestArguments>({GetClosure()}));
+ }
}
void MaglevGraphBuilder::VisitJumpLoop() {
@@ -3727,9 +6062,10 @@ void MaglevGraphBuilder::VisitJumpLoop() {
const FeedbackSlot feedback_slot = iterator_.GetSlotOperand(2);
int target = iterator_.GetJumpTargetOffset();
- if (!is_toptier()) {
+ if (ShouldEmitInterruptBudgetChecks()) {
if (relative_jump_bytecode_offset > 0) {
- AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
+ AddNewNode<ReduceInterruptBudgetForLoop>({},
+ relative_jump_bytecode_offset);
}
AddNewNode<JumpLoopPrologue>({}, loop_offset, feedback_slot,
BytecodeOffset(iterator_.current_offset()),
@@ -3739,13 +6075,14 @@ void MaglevGraphBuilder::VisitJumpLoop() {
FinishBlock<JumpLoop>({}, jump_targets_[target].block_ptr());
merge_states_[target]->MergeLoop(*compilation_unit_, graph_->smi(),
- current_interpreter_frame_, block, target);
+ current_interpreter_frame_, block);
block->set_predecessor_id(merge_states_[target]->predecessor_count() - 1);
}
void MaglevGraphBuilder::VisitJump() {
const uint32_t relative_jump_bytecode_offset =
iterator_.GetRelativeJumpTargetOffset();
- if (!is_toptier() && relative_jump_bytecode_offset > 0) {
+ if (v8_flags.maglev_increase_budget_forward_jump &&
+ ShouldEmitInterruptBudgetChecks() && relative_jump_bytecode_offset > 0) {
AddNewNode<IncreaseInterruptBudget>({}, relative_jump_bytecode_offset);
}
BasicBlock* block =
@@ -3789,8 +6126,7 @@ void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
} else {
// If there already is a frame state, merge.
merge_states_[target]->Merge(*compilation_unit_, graph_->smi(),
- current_interpreter_frame_, predecessor,
- target);
+ current_interpreter_frame_, predecessor);
}
}
@@ -3800,7 +6136,7 @@ void MaglevGraphBuilder::MergeDeadIntoFrameState(int target) {
predecessors_[target]--;
if (merge_states_[target]) {
// If there already is a frame state, merge.
- merge_states_[target]->MergeDead(*compilation_unit_, target);
+ merge_states_[target]->MergeDead(*compilation_unit_);
// If this merge is the last one which kills a loop merge, remove that
// merge state.
if (merge_states_[target]->is_unreachable_loop()) {
@@ -3818,7 +6154,7 @@ void MaglevGraphBuilder::MergeDeadLoopIntoFrameState(int target) {
predecessors_[target]--;
if (merge_states_[target]) {
// If there already is a frame state, merge.
- merge_states_[target]->MergeDeadLoop(*compilation_unit_, target);
+ merge_states_[target]->MergeDeadLoop(*compilation_unit_);
}
}
@@ -3841,8 +6177,7 @@ void MaglevGraphBuilder::MergeIntoInlinedReturnFrameState(
DCHECK(GetInLiveness()->Equals(
*merge_states_[target]->frame_state().liveness()));
merge_states_[target]->Merge(*compilation_unit_, graph_->smi(),
- current_interpreter_frame_, predecessor,
- target);
+ current_interpreter_frame_, predecessor);
}
}
@@ -3864,26 +6199,24 @@ void MaglevGraphBuilder::BuildBranchIfRootConstant(ValueNode* node,
return;
}
- BasicBlockRef* true_target = jump_type == kJumpIfTrue
- ? &jump_targets_[jump_offset]
- : &jump_targets_[fallthrough_offset];
- BasicBlockRef* false_target = jump_type == kJumpIfFalse
- ? &jump_targets_[jump_offset]
- : &jump_targets_[fallthrough_offset];
-
- BasicBlock* block = FinishBlock<BranchIfRootConstant>(
- {node}, true_target, false_target, root_index);
+ BasicBlockRef *true_target, *false_target;
+ int true_interrupt_correction, false_interrupt_correction;
if (jump_type == kJumpIfTrue) {
- block->control_node()
- ->Cast<BranchControlNode>()
- ->set_true_interrupt_correction(
- iterator_.GetRelativeJumpTargetOffset());
+ true_target = &jump_targets_[jump_offset];
+ true_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
+ false_target = &jump_targets_[fallthrough_offset];
+ false_interrupt_correction = 0;
} else {
- block->control_node()
- ->Cast<BranchControlNode>()
- ->set_false_interrupt_correction(
- iterator_.GetRelativeJumpTargetOffset());
+ true_target = &jump_targets_[fallthrough_offset];
+ true_interrupt_correction = 0;
+ false_target = &jump_targets_[jump_offset];
+ false_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
}
+
+ BasicBlock* block = FinishBlock<BranchIfRootConstant>(
+ {node}, true_target, true_interrupt_correction, false_target,
+ false_interrupt_correction, root_index);
+
MergeIntoFrameState(block, jump_offset);
StartFallthroughBlock(fallthrough_offset, block);
}
@@ -3903,33 +6236,95 @@ void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
JumpType jump_type) {
int fallthrough_offset = next_offset();
int jump_offset = iterator_.GetJumpTargetOffset();
- BasicBlockRef* true_target = jump_type == kJumpIfTrue
- ? &jump_targets_[jump_offset]
- : &jump_targets_[fallthrough_offset];
- BasicBlockRef* false_target = jump_type == kJumpIfFalse
- ? &jump_targets_[jump_offset]
- : &jump_targets_[fallthrough_offset];
- BasicBlock* block =
- FinishBlock<BranchIfToBooleanTrue>({node}, true_target, false_target);
+
+ if (IsConstantNode(node->opcode())) {
+ bool constant_is_true = FromConstantToBool(local_isolate(), node);
+ bool is_jump_taken = constant_is_true == (jump_type == kJumpIfTrue);
+ if (is_jump_taken) {
+ BasicBlock* block = FinishBlock<Jump>({}, &jump_targets_[jump_offset]);
+ MergeDeadIntoFrameState(fallthrough_offset);
+ MergeIntoFrameState(block, jump_offset);
+ } else {
+ MergeDeadIntoFrameState(jump_offset);
+ }
+ return;
+ }
+
+ BasicBlockRef *true_target, *false_target;
+ int true_interrupt_correction, false_interrupt_correction;
if (jump_type == kJumpIfTrue) {
- block->control_node()
- ->Cast<BranchControlNode>()
- ->set_true_interrupt_correction(
- iterator_.GetRelativeJumpTargetOffset());
+ true_target = &jump_targets_[jump_offset];
+ true_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
+ false_target = &jump_targets_[fallthrough_offset];
+ false_interrupt_correction = 0;
+ } else {
+ true_target = &jump_targets_[fallthrough_offset];
+ true_interrupt_correction = 0;
+ false_target = &jump_targets_[jump_offset];
+ false_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
+ }
+
+ auto make_specialized_branch_if_compare = [&](ValueRepresentation repr,
+ ValueNode* cond) {
+ // Note that this function (BuildBranchIfToBooleanTrue) generates either a
+ // BranchIfToBooleanTrue, or a BranchIfFloat64Compare/BranchIfInt32Compare
+ // comparing the {node} with 0. In the former case, the jump is taken if
+ // {node} is non-zero, while in the latter cases, the jump is taken if
+ // {node} is zero. The {true_target} and {false_target} are thus swapped
+ // when we generate a BranchIfFloat64Compare or a BranchIfInt32Compare
+ // below.
+ DCHECK_EQ(repr, cond->value_representation());
+ switch (repr) {
+ case ValueRepresentation::kFloat64:
+ return FinishBlock<BranchIfFloat64Compare>(
+ {cond, GetFloat64Constant(0)}, Operation::kEqual, false_target,
+ false_interrupt_correction, true_target, true_interrupt_correction,
+ BranchIfFloat64Compare::JumpModeIfNaN::kJumpToTrue);
+ case ValueRepresentation::kInt32:
+ return FinishBlock<BranchIfInt32Compare>(
+ {cond, GetInt32Constant(0)}, Operation::kEqual, false_target,
+ false_interrupt_correction, true_target, true_interrupt_correction);
+ default:
+ UNREACHABLE();
+ }
+ };
+
+ BasicBlock* block;
+ if (node->value_representation() == ValueRepresentation::kInt32) {
+ block =
+ make_specialized_branch_if_compare(ValueRepresentation::kInt32, node);
+ } else if (node->value_representation() == ValueRepresentation::kFloat64) {
+ block =
+ make_specialized_branch_if_compare(ValueRepresentation::kFloat64, node);
} else {
- block->control_node()
- ->Cast<BranchControlNode>()
- ->set_false_interrupt_correction(
- iterator_.GetRelativeJumpTargetOffset());
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(node);
+ if (ValueNode* as_int32 = node_info->int32_alternative) {
+ block = make_specialized_branch_if_compare(ValueRepresentation::kInt32,
+ as_int32);
+ } else if (ValueNode* as_float64 = node_info->float64_alternative) {
+ block = make_specialized_branch_if_compare(ValueRepresentation::kFloat64,
+ as_float64);
+ } else {
+ DCHECK(node->value_representation() == ValueRepresentation::kTagged ||
+ node->value_representation() == ValueRepresentation::kUint32);
+ // Uint32 should be rare enough that tagging them shouldn't be too
+ // expensive (we don't have a `BranchIfUint32Compare` node, and adding one
+ // doesn't seem worth it at this point).
+ node = GetTaggedValue(node);
+ block = FinishBlock<BranchIfToBooleanTrue>(
+ {node}, true_target, true_interrupt_correction, false_target,
+ false_interrupt_correction);
+ }
}
+
MergeIntoFrameState(block, jump_offset);
StartFallthroughBlock(fallthrough_offset, block);
}
void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
- BuildBranchIfToBooleanTrue(GetAccumulatorTagged(), kJumpIfTrue);
+ BuildBranchIfToBooleanTrue(GetRawAccumulator(), kJumpIfTrue);
}
void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
- BuildBranchIfToBooleanTrue(GetAccumulatorTagged(), kJumpIfFalse);
+ BuildBranchIfToBooleanTrue(GetRawAccumulator(), kJumpIfFalse);
}
void MaglevGraphBuilder::VisitJumpIfTrue() {
BuildBranchIfTrue(GetAccumulatorTagged(), kJumpIfTrue);
@@ -3950,16 +6345,22 @@ void MaglevGraphBuilder::VisitJumpIfNotUndefined() {
BuildBranchIfUndefined(GetAccumulatorTagged(), kJumpIfFalse);
}
void MaglevGraphBuilder::VisitJumpIfUndefinedOrNull() {
+ int true_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
+ int false_interrupt_correction = 0;
BasicBlock* block = FinishBlock<BranchIfUndefinedOrNull>(
{GetAccumulatorTagged()}, &jump_targets_[iterator_.GetJumpTargetOffset()],
- &jump_targets_[next_offset()]);
+ true_interrupt_correction, &jump_targets_[next_offset()],
+ false_interrupt_correction);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
StartFallthroughBlock(next_offset(), block);
}
void MaglevGraphBuilder::VisitJumpIfJSReceiver() {
+ int true_interrupt_correction = iterator_.GetRelativeJumpTargetOffset();
+ int false_interrupt_correction = 0;
BasicBlock* block = FinishBlock<BranchIfJSReceiver>(
{GetAccumulatorTagged()}, &jump_targets_[iterator_.GetJumpTargetOffset()],
- &jump_targets_[next_offset()]);
+ true_interrupt_correction, &jump_targets_[next_offset()],
+ false_interrupt_correction);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
StartFallthroughBlock(next_offset(), block);
}
@@ -4001,32 +6402,77 @@ void MaglevGraphBuilder::VisitForInPrepare() {
compiler::FeedbackSource feedback_source{feedback(), slot};
// TODO(v8:7700): Use feedback and create fast path.
ValueNode* context = GetContext();
- ForInPrepare* result =
- AddNewNode<ForInPrepare>({context, enumerator}, feedback_source);
- // No need to set the accumulator.
- DCHECK(!GetOutLiveness()->AccumulatorIsLive());
- // The result is output in registers |cache_info_triple| to
- // |cache_info_triple + 2|, with the registers holding cache_type,
- // cache_array, and cache_length respectively.
- interpreter::Register first = iterator_.GetRegisterOperand(0);
- auto array_and_length =
- std::make_pair(interpreter::Register{first.index() + 1},
- interpreter::Register{first.index() + 2});
- StoreRegisterPair(array_and_length, result);
+ interpreter::Register cache_type_reg = iterator_.GetRegisterOperand(0);
+ interpreter::Register cache_array_reg{cache_type_reg.index() + 1};
+ interpreter::Register cache_length_reg{cache_type_reg.index() + 2};
+
+ ForInHint hint = broker()->GetFeedbackForForIn(feedback_source);
+
+ current_for_in_state = ForInState();
+ switch (hint) {
+ case ForInHint::kNone:
+ case ForInHint::kEnumCacheKeysAndIndices:
+ case ForInHint::kEnumCacheKeys: {
+ BuildCheckMaps(enumerator, base::VectorOf({broker()->meta_map()}));
+
+ auto* descriptor_array = AddNewNode<LoadTaggedField>(
+ {enumerator}, Map::kInstanceDescriptorsOffset);
+ auto* enum_cache = AddNewNode<LoadTaggedField>(
+ {descriptor_array}, DescriptorArray::kEnumCacheOffset);
+ auto* cache_array =
+ AddNewNode<LoadTaggedField>({enum_cache}, EnumCache::kKeysOffset);
+ current_for_in_state.enum_cache = enum_cache;
+
+ auto* cache_length = AddNewNode<LoadEnumCacheLength>({enumerator});
+
+ MoveNodeBetweenRegisters(interpreter::Register::virtual_accumulator(),
+ cache_type_reg);
+ StoreRegister(cache_array_reg, cache_array);
+ StoreRegister(cache_length_reg, cache_length);
+ break;
+ }
+ case ForInHint::kAny: {
+ // The result of the bytecode is output in registers |cache_info_triple|
+ // to |cache_info_triple + 2|, with the registers holding cache_type,
+ // cache_array, and cache_length respectively.
+ //
+ // We set the cache type first (to the accumulator value), and write
+ // the other two with a ForInPrepare builtin call. This can lazy deopt,
+ // which will write to cache_array and cache_length, with cache_type
+ // already set on the translation frame.
+
+ // This move needs to happen before ForInPrepare to avoid lazy deopt
+ // extending the lifetime of the {cache_type} register.
+ MoveNodeBetweenRegisters(interpreter::Register::virtual_accumulator(),
+ cache_type_reg);
+ ForInPrepare* result =
+ AddNewNode<ForInPrepare>({context, enumerator}, feedback_source);
+ // The add will set up a lazy deopt info writing to all three output
+ // registers. Update this to only write to the latter two.
+ result->lazy_deopt_info()->UpdateResultLocation(cache_array_reg, 2);
+ StoreRegisterPair({cache_array_reg, cache_length_reg}, result);
+ // Force a conversion to Int32 for the cache length value.
+ GetInt32(cache_length_reg);
+ break;
+ }
+ }
}
void MaglevGraphBuilder::VisitForInContinue() {
// ForInContinue <index> <cache_length>
- ValueNode* index = LoadRegisterTagged(0);
- ValueNode* cache_length = LoadRegisterTagged(1);
- // TODO(verwaest): Fold with the next instruction.
- SetAccumulator(AddNewNode<TaggedNotEqual>({index, cache_length}));
+ ValueNode* index = LoadRegisterInt32(0);
+ ValueNode* cache_length = LoadRegisterInt32(1);
+ if (TryBuildBranchFor<BranchIfInt32Compare>({index, cache_length},
+ Operation::kLessThan)) {
+ return;
+ }
+ SetAccumulator(
+ AddNewNode<Int32NodeFor<Operation::kLessThan>>({index, cache_length}));
}
void MaglevGraphBuilder::VisitForInNext() {
// ForInNext <receiver> <index> <cache_info_pair>
ValueNode* receiver = LoadRegisterTagged(0);
- ValueNode* index = LoadRegisterTagged(1);
interpreter::Register cache_type_reg, cache_array_reg;
std::tie(cache_type_reg, cache_array_reg) =
iterator_.GetRegisterPairOperand(2);
@@ -4034,17 +6480,57 @@ void MaglevGraphBuilder::VisitForInNext() {
ValueNode* cache_array = GetTaggedValue(cache_array_reg);
FeedbackSlot slot = GetSlotOperand(3);
compiler::FeedbackSource feedback_source{feedback(), slot};
- ValueNode* context = GetContext();
- SetAccumulator(AddNewNode<ForInNext>(
- {context, receiver, cache_array, cache_type, index}, feedback_source));
+
+ ForInHint hint = broker()->GetFeedbackForForIn(feedback_source);
+
+ switch (hint) {
+ case ForInHint::kNone:
+ case ForInHint::kEnumCacheKeysAndIndices:
+ case ForInHint::kEnumCacheKeys: {
+ ValueNode* index = LoadRegisterInt32(1);
+ // Ensure that the expected map still matches that of the {receiver}.
+ auto* receiver_map =
+ AddNewNode<LoadTaggedField>({receiver}, HeapObject::kMapOffset);
+ AddNewNode<CheckDynamicValue>({receiver_map, cache_type});
+ auto* key = AddNewNode<LoadFixedArrayElement>({cache_array, index});
+ SetAccumulator(key);
+
+ current_for_in_state.receiver = receiver;
+ if (ToObject* to_object =
+ current_for_in_state.receiver->TryCast<ToObject>()) {
+ current_for_in_state.receiver = to_object->value_input().node();
+ }
+ current_for_in_state.receiver_needs_map_check = false;
+ current_for_in_state.cache_type = cache_type;
+ current_for_in_state.key = key;
+ if (hint == ForInHint::kEnumCacheKeysAndIndices) {
+ current_for_in_state.index = index;
+ }
+ // We know that the enum cache entry is not undefined, so skip over the
+ // next JumpIfUndefined.
+ DCHECK(iterator_.next_bytecode() ==
+ interpreter::Bytecode::kJumpIfUndefined ||
+ iterator_.next_bytecode() ==
+ interpreter::Bytecode::kJumpIfUndefinedConstant);
+ iterator_.Advance();
+ MergeDeadIntoFrameState(iterator_.GetJumpTargetOffset());
+ break;
+ }
+ case ForInHint::kAny: {
+ ValueNode* index = LoadRegisterTagged(1);
+ ValueNode* context = GetContext();
+ SetAccumulator(AddNewNode<ForInNext>(
+ {context, receiver, cache_array, cache_type, index},
+ feedback_source));
+ break;
+ };
+ }
}
void MaglevGraphBuilder::VisitForInStep() {
- // TODO(victorgomes): We should be able to assert that Register(0)
- // contains an Smi.
ValueNode* index = LoadRegisterInt32(0);
- ValueNode* one = GetInt32Constant(1);
- SetAccumulator(AddNewInt32BinaryOperationNode<Operation::kAdd>({index, one}));
+ SetAccumulator(AddNewNode<Int32NodeFor<Operation::kIncrement>>({index}));
+ current_for_in_state = ForInState();
}
void MaglevGraphBuilder::VisitSetPendingMessage() {
@@ -4066,8 +6552,9 @@ void MaglevGraphBuilder::VisitReThrow() {
void MaglevGraphBuilder::VisitReturn() {
// See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn.
const uint32_t relative_jump_bytecode_offset = iterator_.current_offset();
- if (!is_toptier() && relative_jump_bytecode_offset > 0) {
- AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
+ if (ShouldEmitInterruptBudgetChecks() && relative_jump_bytecode_offset > 0) {
+ AddNewNode<ReduceInterruptBudgetForReturn>({},
+ relative_jump_bytecode_offset);
}
if (!is_inline()) {
@@ -4091,7 +6578,7 @@ void MaglevGraphBuilder::VisitThrowReferenceErrorIfHole() {
ValueNode* value = GetAccumulatorTagged();
// Avoid the check if we know it is not the hole.
if (IsConstantNode(value->opcode())) {
- if (IsConstantNodeTheHole(value)) {
+ if (IsTheHoleValue(value)) {
ValueNode* constant = GetConstant(name);
BuildCallRuntime(Runtime::kThrowAccessedUninitializedVariable,
{constant});
@@ -4106,7 +6593,7 @@ void MaglevGraphBuilder::VisitThrowSuperNotCalledIfHole() {
ValueNode* value = GetAccumulatorTagged();
// Avoid the check if we know it is not the hole.
if (IsConstantNode(value->opcode())) {
- if (IsConstantNodeTheHole(value)) {
+ if (IsTheHoleValue(value)) {
BuildCallRuntime(Runtime::kThrowSuperNotCalled, {});
BuildAbort(AbortReason::kUnexpectedReturnFromThrow);
}
@@ -4119,7 +6606,7 @@ void MaglevGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
ValueNode* value = GetAccumulatorTagged();
// Avoid the check if we know it is the hole.
if (IsConstantNode(value->opcode())) {
- if (!IsConstantNodeTheHole(value)) {
+ if (!IsTheHoleValue(value)) {
BuildCallRuntime(Runtime::kThrowSuperAlreadyCalledError, {});
BuildAbort(AbortReason::kUnexpectedReturnFromThrow);
}
@@ -4149,10 +6636,13 @@ void MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
// We create an initial block that checks if the generator is undefined.
ValueNode* maybe_generator = LoadRegisterTagged(0);
+ // Neither the true nor the false path jump over any bytecode
+ int true_interrupt_correction = 0, false_interrupt_correction = 0;
BasicBlock* block_is_generator_undefined = FinishBlock<BranchIfRootConstant>(
{maybe_generator}, &jump_targets_[next_offset()],
+ true_interrupt_correction,
&jump_targets_[generator_prologue_block_offset],
- RootIndex::kUndefinedValue);
+ false_interrupt_correction, RootIndex::kUndefinedValue);
MergeIntoFrameState(block_is_generator_undefined, next_offset());
// We create the generator prologue block.
@@ -4163,8 +6653,8 @@ void MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
ValueNode* state = AddNewNode<LoadTaggedField>(
{generator}, JSGeneratorObject::kContinuationOffset);
ValueNode* new_state = GetSmiConstant(JSGeneratorObject::kGeneratorExecuting);
- AddNewNode<StoreTaggedFieldNoWriteBarrier>(
- {generator, new_state}, JSGeneratorObject::kContinuationOffset);
+ BuildStoreTaggedFieldNoWriteBarrier(generator, new_state,
+ JSGeneratorObject::kContinuationOffset);
ValueNode* context = AddNewNode<LoadTaggedField>(
{generator}, JSGeneratorObject::kContextOffset);
SetContext(context);
@@ -4198,24 +6688,30 @@ void MaglevGraphBuilder::VisitSuspendGenerator() {
int input_count = parameter_count_without_receiver() + args.register_count() +
GeneratorStore::kFixedInputCount;
- GeneratorStore* node = CreateNewNode<GeneratorStore>(
- input_count, context, generator, suspend_id, iterator_.current_offset());
- int arg_index = 0;
- for (int i = 1 /* skip receiver */; i < parameter_count(); ++i) {
- node->set_parameters_and_registers(arg_index++, GetTaggedArgument(i));
- }
- const compiler::BytecodeLivenessState* liveness = GetOutLiveness();
- for (int i = 0; i < args.register_count(); ++i) {
- ValueNode* value = liveness->RegisterIsLive(args[i].index())
- ? GetTaggedValue(args[i])
- : GetRootConstant(RootIndex::kOptimizedOut);
- node->set_parameters_and_registers(arg_index++, value);
- }
- AddNode(node);
+ int debug_pos_offset = iterator_.current_offset() +
+ (BytecodeArray::kHeaderSize - kHeapObjectTag);
+ AddNewNode<GeneratorStore>(
+ input_count,
+ [&](GeneratorStore* node) {
+ int arg_index = 0;
+ for (int i = 1 /* skip receiver */; i < parameter_count(); ++i) {
+ node->set_parameters_and_registers(arg_index++, GetTaggedArgument(i));
+ }
+ const compiler::BytecodeLivenessState* liveness = GetOutLiveness();
+ for (int i = 0; i < args.register_count(); ++i) {
+ ValueNode* value = liveness->RegisterIsLive(args[i].index())
+ ? GetTaggedValue(args[i])
+ : GetRootConstant(RootIndex::kOptimizedOut);
+ node->set_parameters_and_registers(arg_index++, value);
+ }
+ },
+
+ context, generator, suspend_id, debug_pos_offset);
const uint32_t relative_jump_bytecode_offset = iterator_.current_offset();
- if (!is_toptier() && relative_jump_bytecode_offset > 0) {
- AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
+ if (ShouldEmitInterruptBudgetChecks() && relative_jump_bytecode_offset > 0) {
+ AddNewNode<ReduceInterruptBudgetForReturn>({},
+ relative_jump_bytecode_offset);
}
FinishBlock<Return>({GetAccumulatorTagged()});
}
@@ -4236,17 +6732,18 @@ void MaglevGraphBuilder::VisitResumeGenerator() {
ValueNode* register_size = GetInt32Constant(
parameter_count_without_receiver() + registers.register_count());
AddNewNode<AssertInt32>(
- {register_size, array_length}, AssertCondition::kLessOrEqual,
+ {register_size, array_length}, AssertCondition::kLessThanEqual,
AbortReason::kInvalidParametersAndRegistersInGenerator);
}
const compiler::BytecodeLivenessState* liveness =
GetOutLivenessFor(next_offset());
+ RootConstant* stale = GetRootConstant(RootIndex::kStaleRegister);
for (int i = 0; i < registers.register_count(); ++i) {
if (liveness->RegisterIsLive(registers[i].index())) {
int array_index = parameter_count_without_receiver() + i;
- StoreRegister(registers[i],
- AddNewNode<GeneratorRestoreRegister>({array}, array_index));
+ StoreRegister(registers[i], AddNewNode<GeneratorRestoreRegister>(
+ {array, stale}, array_index));
}
}
SetAccumulator(AddNewNode<LoadTaggedField>(
diff --git a/deps/v8/src/maglev/maglev-graph-builder.h b/deps/v8/src/maglev/maglev-graph-builder.h
index 26d7a4fa1a..a0a9caf71b 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.h
+++ b/deps/v8/src/maglev/maglev-graph-builder.h
@@ -16,6 +16,7 @@
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
+#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/processed-feedback.h"
@@ -29,19 +30,170 @@
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
+#include "src/objects/elements-kind.h"
#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
namespace maglev {
+class CallArguments;
+
+class ReduceResult {
+ public:
+ enum Kind {
+ kDoneWithValue = 0, // No need to mask while returning the pointer.
+ kDoneWithAbort,
+ kDoneWithoutValue,
+ kFail,
+ kNone,
+ };
+
+ ReduceResult() : payload_(kNone) {}
+
+ // NOLINTNEXTLINE
+ ReduceResult(ValueNode* value) : payload_(value) { DCHECK_NOT_NULL(value); }
+
+ ValueNode* value() const {
+ DCHECK(HasValue());
+ return payload_.GetPointerWithKnownPayload(kDoneWithValue);
+ }
+ bool HasValue() const { return kind() == kDoneWithValue; }
+
+ static ReduceResult Done(ValueNode* value) { return ReduceResult(value); }
+ static ReduceResult Done() { return ReduceResult(kDoneWithoutValue); }
+ static ReduceResult DoneWithAbort() { return ReduceResult(kDoneWithAbort); }
+ static ReduceResult Fail() { return ReduceResult(kFail); }
+
+ ReduceResult(const ReduceResult&) V8_NOEXCEPT = default;
+ ReduceResult& operator=(const ReduceResult&) V8_NOEXCEPT = default;
+
+ // No/undefined result, created by default constructor.
+ bool IsNone() const { return kind() == kNone; }
+
+ // Either DoneWithValue, DoneWithoutValue or DoneWithAbort.
+ bool IsDone() const { return !IsFail() && !IsNone(); }
+
+ // ReduceResult failed.
+ bool IsFail() const { return kind() == kFail; }
+
+ // Done with a ValueNode.
+ bool IsDoneWithValue() const { return HasValue(); }
+
+ // Done without producing a ValueNode.
+ bool IsDoneWithoutValue() const { return kind() == kDoneWithoutValue; }
+
+ // Done with an abort (unconditional deopt, infinite loop in an inlined
+ // function, etc)
+ bool IsDoneWithAbort() const { return kind() == kDoneWithAbort; }
+
+ Kind kind() const { return payload_.GetPayload(); }
+
+ private:
+ explicit ReduceResult(Kind kind) : payload_(kind) {}
+ base::PointerWithPayload<ValueNode, Kind, 3> payload_;
+};
+
+struct FastLiteralField;
+
+// Encoding of a fast allocation site object's element fixed array.
+struct FastLiteralFixedArray {
+ FastLiteralFixedArray() : type(kUninitialized) {}
+ explicit FastLiteralFixedArray(compiler::ObjectRef cow_value)
+ : type(kCoW), cow_value(cow_value) {}
+ explicit FastLiteralFixedArray(int length, Zone* zone)
+ : type(kTagged),
+ length(length),
+ values(zone->NewArray<FastLiteralField>(length)) {}
+ explicit FastLiteralFixedArray(int length, Zone* zone, double)
+ : type(kDouble),
+ length(length),
+ double_values(zone->NewArray<Float64>(length)) {}
+
+ enum { kUninitialized, kCoW, kTagged, kDouble } type;
+
+ union {
+ char uninitialized_marker;
+
+ compiler::ObjectRef cow_value;
+
+ struct {
+ int length;
+ union {
+ FastLiteralField* values;
+ Float64* double_values;
+ };
+ };
+ };
+};
+
+// Encoding of a fast allocation site boilerplate object.
+struct FastLiteralObject {
+ FastLiteralObject(compiler::MapRef map, Zone* zone,
+ FastLiteralFixedArray elements)
+ : map(map),
+ fields(zone->NewArray<FastLiteralField>(map.GetInObjectProperties())),
+ elements(elements) {}
+
+ compiler::MapRef map;
+ FastLiteralField* fields;
+ FastLiteralFixedArray elements;
+ compiler::OptionalObjectRef js_array_length;
+};
+
+// Encoding of a fast allocation site literal value.
+struct FastLiteralField {
+ FastLiteralField() : type(kUninitialized) {}
+ explicit FastLiteralField(FastLiteralObject object)
+ : type(kObject), object(object) {}
+ explicit FastLiteralField(Float64 mutable_double_value)
+ : type(kMutableDouble), mutable_double_value(mutable_double_value) {}
+ explicit FastLiteralField(compiler::ObjectRef constant_value)
+ : type(kConstant), constant_value(constant_value) {}
+
+ enum { kUninitialized, kObject, kMutableDouble, kConstant } type;
+
+ union {
+ char uninitialized_marker;
+ FastLiteralObject object;
+ Float64 mutable_double_value;
+ compiler::ObjectRef constant_value;
+ };
+};
+
+#define RETURN_IF_DONE(result) \
+ do { \
+ auto res = result; \
+ if (res.IsDone()) { \
+ return res; \
+ } \
+ } while (false)
+
+#define PROCESS_AND_RETURN_IF_DONE(result, value_processor) \
+ do { \
+ auto res = result; \
+ if (res.IsDone()) { \
+ if (res.IsDoneWithValue()) { \
+ value_processor(res.value()); \
+ } \
+ return; \
+ } \
+ } while (false)
+
+#define RETURN_IF_ABORT(result) \
+ if (result.IsDoneWithAbort()) { \
+ return ReduceResult::DoneWithAbort(); \
+ }
+
class MaglevGraphBuilder {
public:
- explicit MaglevGraphBuilder(LocalIsolate* local_isolate,
- MaglevCompilationUnit* compilation_unit,
- Graph* graph,
- MaglevGraphBuilder* parent = nullptr);
+ explicit MaglevGraphBuilder(
+ LocalIsolate* local_isolate, MaglevCompilationUnit* compilation_unit,
+ Graph* graph, float call_frequency = 1.0f,
+ BytecodeOffset bytecode_offset = BytecodeOffset::None(),
+ MaglevGraphBuilder* parent = nullptr);
void Build() {
DCHECK(!is_inline());
@@ -56,15 +208,31 @@ class MaglevGraphBuilder {
SetArgument(i, v);
}
BuildRegisterFrameInitialization();
+
+ // Don't use the AddNewNode helper for the function entry stack check, so
+ // that we can set a custom deopt frame on it.
+ FunctionEntryStackCheck* function_entry_stack_check =
+ FunctionEntryStackCheck::New(zone(), {});
+ new (function_entry_stack_check->lazy_deopt_info()) LazyDeoptInfo(
+ zone(), GetDeoptFrameForEntryStackCheck(),
+ interpreter::Register::invalid_value(), 0, compiler::FeedbackSource());
+ AddInitializedNodeToGraph(function_entry_stack_check);
+
BuildMergeStates();
EndPrologue();
BuildBody();
}
+ ReduceResult BuildInlined(const CallArguments& args, BasicBlockRef* start_ref,
+ BasicBlockRef* end_ref);
+
void StartPrologue();
void SetArgument(int i, ValueNode* value);
+ void InitializeRegister(interpreter::Register reg,
+ ValueNode* value = nullptr);
ValueNode* GetTaggedArgument(int i);
- void BuildRegisterFrameInitialization();
+ void BuildRegisterFrameInitialization(ValueNode* context = nullptr,
+ ValueNode* closure = nullptr);
void BuildMergeStates();
BasicBlock* EndPrologue();
@@ -75,14 +243,55 @@ class MaglevGraphBuilder {
}
}
+ Int32Constant* GetInt32Constant(int constant) {
+ // The constant must fit in a Smi, since it could be later tagged in a Phi.
+ DCHECK(Smi::IsValid(constant));
+ auto it = graph_->int32().find(constant);
+ if (it == graph_->int32().end()) {
+ Int32Constant* node = CreateNewConstantNode<Int32Constant>(0, constant);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ graph_->int32().emplace(constant, node);
+ return node;
+ }
+ return it->second;
+ }
+
+ Float64Constant* GetFloat64Constant(double constant) {
+ return GetFloat64Constant(
+ Float64::FromBits(base::double_to_uint64(constant)));
+ }
+
+ Float64Constant* GetFloat64Constant(Float64 constant) {
+ auto it = graph_->float64().find(constant.get_bits());
+ if (it == graph_->float64().end()) {
+ Float64Constant* node =
+ CreateNewConstantNode<Float64Constant>(0, constant);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ graph_->float64().emplace(constant.get_bits(), node);
+ return node;
+ }
+ return it->second;
+ }
+
Graph* graph() const { return graph_; }
+ Zone* zone() const { return compilation_unit_->zone(); }
+ MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
+
+ bool has_graph_labeller() const {
+ return compilation_unit_->has_graph_labeller();
+ }
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
private:
+ class CallSpeculationScope;
+ class LazyDeoptContinuationScope;
+
bool CheckType(ValueNode* node, NodeType type);
- NodeInfo* CreateInfoIfNot(ValueNode* node, NodeType type);
bool EnsureType(ValueNode* node, NodeType type, NodeType* old = nullptr);
- bool is_toptier() {
- return v8_flags.lower_tier_as_toptier && !v8_flags.turbofan;
+ bool ShouldEmitInterruptBudgetChecks() {
+ return v8_flags.force_emit_interrupt_budget_checks || v8_flags.turbofan;
}
BasicBlock* CreateEdgeSplitBlock(int offset,
int interrupt_budget_correction) {
@@ -94,7 +303,8 @@ class MaglevGraphBuilder {
// Add an interrupt budget correction if necessary. This makes the edge
// split block no longer empty, which is unexpected, but we're not changing
// interpreter frame state, so that's ok.
- if (!is_toptier() && interrupt_budget_correction != 0) {
+ if (v8_flags.maglev_increase_budget_forward_jump &&
+ ShouldEmitInterruptBudgetChecks() && interrupt_budget_correction != 0) {
DCHECK_GT(interrupt_budget_correction, 0);
AddNewNode<IncreaseInterruptBudget>({}, interrupt_budget_correction);
}
@@ -136,8 +346,9 @@ class MaglevGraphBuilder {
current_interpreter_frame_.CopyFrom(*compilation_unit_, merge_state);
// Merges aren't simple fallthroughs, so we should reset the checkpoint
- // validity.
+ // and for-in state validity.
latest_checkpointed_frame_.reset();
+ current_for_in_state.receiver_needs_map_check = true;
if (merge_state.predecessor_count() == 1) return;
@@ -264,13 +475,14 @@ class MaglevGraphBuilder {
// DCHECK(!current_block_->nodes().is_empty());
BasicBlock* predecessor = FinishBlock<Jump>({}, &jump_targets_[offset]);
merge_state->Merge(*compilation_unit_, graph_->smi(),
- current_interpreter_frame_, predecessor, offset);
+ current_interpreter_frame_, predecessor);
}
if (v8_flags.trace_maglev_graph_building) {
auto detail = merge_state->is_exception_handler() ? "exception handler"
: merge_state->is_loop() ? "loop header"
: "merge";
- std::cout << "== New block (" << detail << ") ==" << std::endl;
+ std::cout << "== New block (" << detail << ") at " << function()
+ << "==" << std::endl;
}
if (merge_state->is_exception_handler()) {
@@ -361,11 +573,7 @@ class MaglevGraphBuilder {
INTRINSICS_LIST(DECLARE_VISITOR)
#undef DECLARE_VISITOR
- template <typename NodeT>
- NodeT* AddNode(NodeT* node) {
- if (node->properties().is_required_when_unused()) {
- MarkPossibleSideEffect();
- }
+ void AddInitializedNodeToGraph(Node* node) {
current_block_->nodes().Add(node);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
if (v8_flags.trace_maglev_graph_building) {
@@ -376,42 +584,74 @@ class MaglevGraphBuilder {
#ifdef DEBUG
new_nodes_.insert(node);
#endif
- return node;
}
- template <typename NodeT, typename... Args>
- NodeT* AddNewNode(size_t input_count, Args&&... args) {
- return AddNode(
- CreateNewNode<NodeT>(input_count, std::forward<Args>(args)...));
+ // Add a new node with a dynamic set of inputs which are initialized by the
+ // `post_create_input_initializer` function before the node is added to the
+ // graph.
+ template <typename NodeT, typename Function, typename... Args>
+ NodeT* AddNewNode(size_t input_count,
+ Function&& post_create_input_initializer, Args&&... args) {
+ NodeT* node =
+ NodeBase::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
+ post_create_input_initializer(node);
+ return AttachExtraInfoAndAddToGraph(node);
}
+ // Add a new node with a static set of inputs.
template <typename NodeT, typename... Args>
NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
- return AddNode(CreateNewNode<NodeT>(inputs, std::forward<Args>(args)...));
+ NodeT* node =
+ NodeBase::New<NodeT>(zone(), inputs, std::forward<Args>(args)...);
+ return AttachExtraInfoAndAddToGraph(node);
}
template <typename NodeT, typename... Args>
- NodeT* CreateNewNodeHelper(Args&&... args) {
+ NodeT* CreateNewConstantNode(Args&&... args) {
+ static_assert(IsConstantNode(Node::opcode_of<NodeT>));
+ NodeT* node = NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
+ static_assert(!NodeT::kProperties.can_eager_deopt());
+ static_assert(!NodeT::kProperties.can_lazy_deopt());
+ static_assert(!NodeT::kProperties.can_throw());
+ static_assert(!NodeT::kProperties.has_any_side_effects());
+ return node;
+ }
+
+ template <typename NodeT>
+ NodeT* AttachExtraInfoAndAddToGraph(NodeT* node) {
+ AttachEagerDeoptInfo(node);
+ AttachLazyDeoptInfo(node);
+ AttachExceptionHandlerInfo(node);
+ MarkPossibleSideEffect(node);
+ AddInitializedNodeToGraph(node);
+ return node;
+ }
+
+ template <typename NodeT>
+ void AttachEagerDeoptInfo(NodeT* node) {
if constexpr (NodeT::kProperties.can_eager_deopt()) {
- return NodeBase::New<NodeT>(zone(), GetLatestCheckpointedFrame(),
- std::forward<Args>(args)...);
- } else if constexpr (NodeT::kProperties.can_lazy_deopt()) {
- return NodeBase::New<NodeT>(zone(), GetDeoptFrameForLazyDeopt(),
- std::forward<Args>(args)...);
- } else {
- return NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
+ new (node->eager_deopt_info()) EagerDeoptInfo(
+ zone(), GetLatestCheckpointedFrame(), current_speculation_feedback_);
}
}
- template <typename NodeT, typename... Args>
- NodeT* CreateNewNode(Args&&... args) {
- NodeT* node = CreateNewNodeHelper<NodeT>(std::forward<Args>(args)...);
+ template <typename NodeT>
+ void AttachLazyDeoptInfo(NodeT* node) {
+ if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ auto [register_result, register_count] = GetResultLocationAndSize();
+ new (node->lazy_deopt_info())
+ LazyDeoptInfo(zone(), GetDeoptFrameForLazyDeopt(), register_result,
+ register_count, current_speculation_feedback_);
+ }
+ }
+
+ template <typename NodeT>
+ void AttachExceptionHandlerInfo(NodeT* node) {
if constexpr (NodeT::kProperties.can_throw()) {
- if (catch_block_stack_.size() > 0) {
- // Inside a try-block.
- int handler_offset = catch_block_stack_.top().handler;
+ BasicBlockRef* catch_block_ref = GetCurrentTryCatchBlockOffset();
+ if (catch_block_ref) {
new (node->exception_handler_info())
- ExceptionHandlerInfo(&jump_targets_[handler_offset]);
+ ExceptionHandlerInfo(catch_block_ref);
} else {
// Patch no exception handler marker.
// TODO(victorgomes): Avoid allocating exception handler data in this
@@ -419,7 +659,19 @@ class MaglevGraphBuilder {
new (node->exception_handler_info()) ExceptionHandlerInfo();
}
}
- return node;
+ }
+
+ BasicBlockRef* GetCurrentTryCatchBlockOffset() {
+ if (catch_block_stack_.size() > 0) {
+ // Inside a try-block.
+ return &jump_targets_[catch_block_stack_.top().handler];
+ }
+ // Function is inlined and the call is inside a catch block.
+ if (parent_catch_block_) {
+ DCHECK(is_inline());
+ return parent_catch_block_;
+ }
+ return nullptr;
}
enum ContextSlotMutability { kImmutable, kMutable };
@@ -428,30 +680,35 @@ class MaglevGraphBuilder {
ContextSlotMutability slot_mutability);
ValueNode* LoadAndCacheContextSlot(ValueNode* context, int offset,
ContextSlotMutability slot_mutability);
+ void StoreAndCacheContextSlot(ValueNode* context, int offset,
+ ValueNode* value);
void BuildLoadContextSlot(ValueNode* context, size_t depth, int slot_index,
ContextSlotMutability slot_mutability);
+ void BuildStoreContextSlot(ValueNode* context, size_t depth, int slot_index,
+ ValueNode* value);
template <Builtin kBuiltin>
CallBuiltin* BuildCallBuiltin(std::initializer_list<ValueNode*> inputs) {
using Descriptor = typename CallInterfaceDescriptorFor<kBuiltin>::type;
- CallBuiltin* call_builtin;
if constexpr (Descriptor::HasContextParameter()) {
- call_builtin =
- CreateNewNode<CallBuiltin>(inputs.size() + 1, kBuiltin, GetContext());
+ return AddNewNode<CallBuiltin>(
+ inputs.size() + 1,
+ [&](CallBuiltin* call_builtin) {
+ int arg_index = 0;
+ for (auto* input : inputs) {
+ call_builtin->set_arg(arg_index++, input);
+ }
+ },
+ kBuiltin, GetContext());
} else {
- call_builtin = CreateNewNode<CallBuiltin>(inputs.size(), kBuiltin);
+ return AddNewNode<CallBuiltin>(inputs, kBuiltin);
}
- int arg_index = 0;
- for (auto* input : inputs) {
- call_builtin->set_arg(arg_index++, input);
- }
- return AddNode(call_builtin);
}
template <Builtin kBuiltin>
CallBuiltin* BuildCallBuiltin(
std::initializer_list<ValueNode*> inputs,
- compiler::FeedbackSource& feedback,
+ compiler::FeedbackSource const& feedback,
CallBuiltin::FeedbackSlotType slot_type = CallBuiltin::kTaggedIndex) {
CallBuiltin* call_builtin = BuildCallBuiltin<kBuiltin>(inputs);
call_builtin->set_feedback(feedback, slot_type);
@@ -464,8 +721,6 @@ class MaglevGraphBuilder {
// TODO(victorgomes): Rename all kFeedbackVector parameters in the builtins
// to kVector.
DCHECK_EQ(vector_index, Descriptor::kVector);
- // Also check that the builtin does not allow var args.
- DCHECK_EQ(Descriptor::kAllowVarArgs, false);
#endif // DEBUG
return call_builtin;
}
@@ -476,14 +731,15 @@ class MaglevGraphBuilder {
CallRuntime* BuildCallRuntime(Runtime::FunctionId function_id,
std::initializer_list<ValueNode*> inputs) {
- CallRuntime* call_runtime = CreateNewNode<CallRuntime>(
- inputs.size() + CallRuntime::kFixedInputCount, function_id,
- GetContext());
- int arg_index = 0;
- for (auto* input : inputs) {
- call_runtime->set_arg(arg_index++, input);
- }
- return AddNode(call_runtime);
+ return AddNewNode<CallRuntime>(
+ inputs.size() + CallRuntime::kFixedInputCount,
+ [&](CallRuntime* call_runtime) {
+ int arg_index = 0;
+ for (auto* input : inputs) {
+ call_runtime->set_arg(arg_index++, input);
+ }
+ },
+ function_id, GetContext());
}
void BuildAbort(AbortReason reason) {
@@ -553,7 +809,8 @@ class MaglevGraphBuilder {
DCHECK(Smi::IsValid(constant));
auto it = graph_->smi().find(constant);
if (it == graph_->smi().end()) {
- SmiConstant* node = CreateNewNode<SmiConstant>(0, Smi::FromInt(constant));
+ SmiConstant* node =
+ CreateNewConstantNode<SmiConstant>(0, Smi::FromInt(constant));
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
graph_->smi().emplace(constant, node);
return node;
@@ -561,10 +818,22 @@ class MaglevGraphBuilder {
return it->second;
}
+ ExternalConstant* GetExternalConstant(ExternalReference reference) {
+ auto it = graph_->external_references().find(reference.address());
+ if (it == graph_->external_references().end()) {
+ ExternalConstant* node =
+ CreateNewConstantNode<ExternalConstant>(0, reference);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ graph_->external_references().emplace(reference.address(), node);
+ return node;
+ }
+ return it->second;
+ }
+
RootConstant* GetRootConstant(RootIndex index) {
auto it = graph_->root().find(index);
if (it == graph_->root().end()) {
- RootConstant* node = CreateNewNode<RootConstant>(0, index);
+ RootConstant* node = CreateNewConstantNode<RootConstant>(0, index);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
graph_->root().emplace(index, node);
return node;
@@ -577,44 +846,18 @@ class MaglevGraphBuilder {
: RootIndex::kFalseValue);
}
- Int32Constant* GetInt32Constant(int constant) {
- // The constant must fit in a Smi, since it could be later tagged in a Phi.
- DCHECK(Smi::IsValid(constant));
- auto it = graph_->int32().find(constant);
- if (it == graph_->int32().end()) {
- Int32Constant* node = CreateNewNode<Int32Constant>(0, constant);
- if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
- graph_->int32().emplace(constant, node);
- return node;
- }
- return it->second;
- }
-
- Float64Constant* GetFloat64Constant(double constant) {
- if (constant != constant) {
- if (graph_->nan() == nullptr) {
- graph_->set_nan(CreateNewNode<Float64Constant>(0, constant));
- }
- return graph_->nan();
- }
- auto it = graph_->float64().find(constant);
- if (it == graph_->float64().end()) {
- Float64Constant* node = CreateNewNode<Float64Constant>(0, constant);
- if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
- graph_->float64().emplace(constant, node);
- return node;
- }
- return it->second;
- }
-
ValueNode* GetConstant(const compiler::ObjectRef& ref) {
if (ref.IsSmi()) return GetSmiConstant(ref.AsSmi());
-
- // TODO(verwaest): Handle roots.
const compiler::HeapObjectRef& constant = ref.AsHeapObject();
+
+ auto root_index = broker()->FindRootIndex(constant);
+ if (root_index.has_value()) {
+ return GetRootConstant(*root_index);
+ }
+
auto it = graph_->constants().find(constant);
if (it == graph_->constants().end()) {
- Constant* node = CreateNewNode<Constant>(0, constant);
+ Constant* node = CreateNewConstantNode<Constant>(0, constant);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
graph_->constants().emplace(constant, node);
return node;
@@ -622,50 +865,38 @@ class MaglevGraphBuilder {
return it->second;
}
- bool IsConstantNodeTheHole(ValueNode* value) {
- DCHECK(IsConstantNode(value->opcode()));
- if (RootConstant* constant = value->TryCast<RootConstant>()) {
- return constant->index() == RootIndex::kTheHoleValue;
- }
- if (Constant* constant = value->TryCast<Constant>()) {
- return constant->IsTheHole();
- }
- // The other constants nodes cannot be TheHole.
- return false;
+ ValueNode* GetRegisterInput(Register reg) {
+ DCHECK(!graph_->register_inputs().has(reg));
+ graph_->register_inputs().set(reg);
+ return AddNewNode<RegisterInput>({}, reg);
+ }
+
+#define DEFINE_IS_ROOT_OBJECT(type, name, CamelName) \
+ bool Is##CamelName(ValueNode* value) const { \
+ if (RootConstant* constant = value->TryCast<RootConstant>()) { \
+ return constant->index() == RootIndex::k##CamelName; \
+ } \
+ return false; \
}
+ ROOT_LIST(DEFINE_IS_ROOT_OBJECT)
+#undef DEFINE_IS_ROOT_OBJECT
// Move an existing ValueNode between two registers. You can pass
// virtual_accumulator as the src or dst to move in or out of the accumulator.
void MoveNodeBetweenRegisters(interpreter::Register src,
interpreter::Register dst) {
// We shouldn't be moving newly created nodes between registers.
- DCHECK_EQ(0, new_nodes_.count(current_interpreter_frame_.get(src)));
+ DCHECK(!IsNodeCreatedForThisBytecode(current_interpreter_frame_.get(src)));
DCHECK_NOT_NULL(current_interpreter_frame_.get(src));
current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
}
+ ValueNode* GetTaggedValue(ValueNode* value);
+
ValueNode* GetTaggedValue(interpreter::Register reg) {
ValueNode* value = current_interpreter_frame_.get(reg);
- switch (value->properties().value_representation()) {
- case ValueRepresentation::kTagged:
- return value;
- case ValueRepresentation::kInt32: {
- NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (node_info->tagged_alternative == nullptr) {
- node_info->tagged_alternative = AddNewNode<CheckedSmiTag>({value});
- }
- return node_info->tagged_alternative;
- }
- case ValueRepresentation::kFloat64: {
- NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (node_info->tagged_alternative == nullptr) {
- node_info->tagged_alternative = AddNewNode<Float64Box>({value});
- }
- return node_info->tagged_alternative;
- }
- }
- UNREACHABLE();
+ return GetTaggedValue(value);
}
void SetKnownType(ValueNode* node, NodeType type) {
@@ -673,78 +904,48 @@ class MaglevGraphBuilder {
known_info->type = type;
}
- ValueNode* GetInternalizedString(interpreter::Register reg) {
- ValueNode* node = GetTaggedValue(reg);
- if (known_node_aspects()
- .GetOrCreateInfoFor(node)
- ->is_internalized_string()) {
- return node;
- }
- if (Constant* constant = node->TryCast<Constant>()) {
- if (constant->object().IsInternalizedString()) {
- SetKnownType(constant, NodeType::kInternalizedString);
- return constant;
- }
- }
- node = AddNewNode<CheckedInternalizedString>({node});
- SetKnownType(node, NodeType::kInternalizedString);
- current_interpreter_frame_.set(reg, node);
- return node;
+ ValueNode* GetInternalizedString(interpreter::Register reg);
+
+ ValueNode* GetTruncatedInt32FromNumber(
+ ValueNode* value, TaggedToFloat64ConversionType conversion_type);
+
+ ValueNode* GetTruncatedInt32FromNumber(
+ interpreter::Register reg,
+ TaggedToFloat64ConversionType conversion_type) {
+ return GetTruncatedInt32FromNumber(current_interpreter_frame_.get(reg),
+ conversion_type);
}
+ ValueNode* GetUint32ClampedFromNumber(ValueNode* value);
+
+ ValueNode* GetUint32ClampedFromNumber(interpreter::Register reg) {
+ return GetUint32ClampedFromNumber(current_interpreter_frame_.get(reg));
+ }
+
+ ValueNode* GetTruncatedInt32(ValueNode* node);
+
+ ValueNode* GetTruncatedInt32(interpreter::Register reg) {
+ return GetTruncatedInt32(current_interpreter_frame_.get(reg));
+ }
+
+ ValueNode* GetInt32(ValueNode* value);
+
ValueNode* GetInt32(interpreter::Register reg) {
ValueNode* value = current_interpreter_frame_.get(reg);
- switch (value->properties().value_representation()) {
- case ValueRepresentation::kTagged: {
- if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
- return GetInt32Constant(constant->value().value());
- }
- NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (node_info->int32_alternative == nullptr) {
- node_info->int32_alternative = BuildSmiUntag(value);
- }
- return node_info->int32_alternative;
- }
- case ValueRepresentation::kInt32:
- return value;
- case ValueRepresentation::kFloat64: {
- NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (node_info->int32_alternative == nullptr) {
- node_info->int32_alternative =
- AddNewNode<CheckedTruncateFloat64ToInt32>({value});
- }
- return node_info->int32_alternative;
- }
- }
- UNREACHABLE();
+ return GetInt32(value);
}
- ValueNode* GetFloat64(ValueNode* value) {
- switch (value->properties().value_representation()) {
- case ValueRepresentation::kTagged: {
- NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (node_info->float64_alternative == nullptr) {
- node_info->float64_alternative =
- AddNewNode<CheckedFloat64Unbox>({value});
- }
- return node_info->float64_alternative;
- }
- case ValueRepresentation::kInt32: {
- NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
- if (node_info->float64_alternative == nullptr) {
- node_info->float64_alternative =
- AddNewNode<ChangeInt32ToFloat64>({value});
- }
- return node_info->float64_alternative;
- }
- case ValueRepresentation::kFloat64:
- return value;
- }
- UNREACHABLE();
+ ValueNode* GetFloat64(ValueNode* value,
+ TaggedToFloat64ConversionType conversion_type);
+
+ ValueNode* GetFloat64(interpreter::Register reg,
+ TaggedToFloat64ConversionType conversion_type) {
+ return GetFloat64(current_interpreter_frame_.get(reg), conversion_type);
}
- ValueNode* GetFloat64(interpreter::Register reg) {
- return GetFloat64(current_interpreter_frame_.get(reg));
+ ValueNode* GetRawAccumulator() {
+ return current_interpreter_frame_.get(
+ interpreter::Register::virtual_accumulator());
}
ValueNode* GetAccumulatorTagged() {
@@ -755,8 +956,45 @@ class MaglevGraphBuilder {
return GetInt32(interpreter::Register::virtual_accumulator());
}
- ValueNode* GetAccumulatorFloat64() {
- return GetFloat64(interpreter::Register::virtual_accumulator());
+ ValueNode* GetAccumulatorTruncatedInt32() {
+ return GetTruncatedInt32(interpreter::Register::virtual_accumulator());
+ }
+
+ ValueNode* GetAccumulatorTruncatedInt32FromNumber(
+ TaggedToFloat64ConversionType conversion_type) {
+ return GetTruncatedInt32FromNumber(
+ interpreter::Register::virtual_accumulator(), conversion_type);
+ }
+
+ ValueNode* GetAccumulatorUint32ClampedFromNumber() {
+ return GetUint32ClampedFromNumber(
+ interpreter::Register::virtual_accumulator());
+ }
+
+ ValueNode* GetAccumulatorFloat64(
+ TaggedToFloat64ConversionType conversion_type) {
+ return GetFloat64(interpreter::Register::virtual_accumulator(),
+ conversion_type);
+ }
+
+ ValueNode* GetSilencedNaN(ValueNode* value) {
+ DCHECK_EQ(value->properties().value_representation(),
+ ValueRepresentation::kFloat64);
+
+ // We only need to check for silenced NaN in non-conversion nodes, since
+ // conversions can't be signalling NaNs.
+ if (value->properties().is_conversion()) return value;
+
+ // Special case constants, since we know what they are.
+ Float64Constant* constant = value->TryCast<Float64Constant>();
+ if (constant) {
+ constexpr double quiet_NaN = std::numeric_limits<double>::quiet_NaN();
+ if (!constant->value().is_nan()) return constant;
+ return GetFloat64Constant(quiet_NaN);
+ }
+
+ // Silence all other values.
+ return AddNewNode<Float64SilenceNaN>({value});
}
bool IsRegisterEqualToAccumulator(int operand_index) {
@@ -765,6 +1003,11 @@ class MaglevGraphBuilder {
current_interpreter_frame_.accumulator();
}
+ ValueNode* LoadRegisterRaw(int operand_index) {
+ return current_interpreter_frame_.get(
+ iterator_.GetRegisterOperand(operand_index));
+ }
+
ValueNode* LoadRegisterTagged(int operand_index) {
return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
}
@@ -773,11 +1016,17 @@ class MaglevGraphBuilder {
return GetInt32(iterator_.GetRegisterOperand(operand_index));
}
- ValueNode* LoadRegisterFloat64(int operand_index) {
- return GetFloat64(iterator_.GetRegisterOperand(operand_index));
+ ValueNode* LoadRegisterTruncatedInt32(int operand_index) {
+ return GetTruncatedInt32(iterator_.GetRegisterOperand(operand_index));
+ }
+
+ ValueNode* LoadRegisterFloat64(
+ int operand_index, TaggedToFloat64ConversionType conversion_type) {
+ return GetFloat64(iterator_.GetRegisterOperand(operand_index),
+ conversion_type);
}
- ValueNode* LoadFixedArrayElement(ValueNode* node, int index) {
+ ValueNode* BuildLoadFixedArrayElement(ValueNode* node, int index) {
return AddNewNode<LoadTaggedField>({node},
FixedArray::OffsetOfElementAt(index));
}
@@ -810,14 +1059,15 @@ class MaglevGraphBuilder {
template <typename NodeT>
void StoreRegister(interpreter::Register target, NodeT* value) {
- // We should only set register values to nodes that were newly created in
- // this Visit. Existing nodes should be moved between registers with
- // MoveNodeBetweenRegisters.
- if (!IsConstantNode(value->opcode())) {
- DCHECK_NE(0, new_nodes_.count(value));
- }
- MarkAsLazyDeoptResult(value, target, 1);
+ static_assert(std::is_base_of_v<ValueNode, NodeT>);
+ DCHECK(HasOutputRegister(target));
current_interpreter_frame_.set(target, value);
+
+ // Make sure the lazy deopt info of this value, if any, is registered as
+ // mutating this register.
+ DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
+ IsNodeCreatedForThisBytecode(value),
+ value->lazy_deopt_info()->IsResultRegister(target));
}
template <typename NodeT>
@@ -831,76 +1081,104 @@ class MaglevGraphBuilder {
DCHECK_EQ(value->ReturnCount(), 2);
DCHECK_NE(0, new_nodes_.count(value));
- MarkAsLazyDeoptResult(value, target0, value->ReturnCount());
+ DCHECK(HasOutputRegister(target0));
current_interpreter_frame_.set(target0, value);
ValueNode* second_value = GetSecondValue(value);
DCHECK_NE(0, new_nodes_.count(second_value));
+ DCHECK(HasOutputRegister(target1));
current_interpreter_frame_.set(target1, second_value);
- }
- InterpretedDeoptFrame GetLatestCheckpointedFrame() {
- if (!latest_checkpointed_frame_) {
- latest_checkpointed_frame_.emplace(
- *compilation_unit_,
- zone()->New<CompactInterpreterFrameState>(
- *compilation_unit_, GetInLiveness(), current_interpreter_frame_),
- BytecodeOffset(iterator_.current_offset()), current_source_position_,
- // TODO(leszeks): Don't always allocate for the parent state,
- // maybe cache it on the graph builder?
- parent_
- ? zone()->New<DeoptFrame>(parent_->GetLatestCheckpointedFrame())
- : nullptr);
- }
- return *latest_checkpointed_frame_;
+ // Make sure the lazy deopt info of this value, if any, is registered as
+ // mutating these registers.
+ DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
+ IsNodeCreatedForThisBytecode(value),
+ value->lazy_deopt_info()->IsResultRegister(target0));
+ DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
+ IsNodeCreatedForThisBytecode(value),
+ value->lazy_deopt_info()->IsResultRegister(target1));
}
- InterpretedDeoptFrame GetDeoptFrameForLazyDeopt() {
- return InterpretedDeoptFrame(
- *compilation_unit_,
- zone()->New<CompactInterpreterFrameState>(
- *compilation_unit_, GetOutLiveness(), current_interpreter_frame_),
- BytecodeOffset(iterator_.current_offset()), current_source_position_,
- // TODO(leszeks): Support inlining for lazy deopts.
- nullptr);
- }
+ std::pair<interpreter::Register, int> GetResultLocationAndSize() const;
+#ifdef DEBUG
+ bool HasOutputRegister(interpreter::Register reg) const;
+#endif
+
+ DeoptFrame* GetParentDeoptFrame();
+ DeoptFrame GetLatestCheckpointedFrame();
+ DeoptFrame GetDeoptFrameForLazyDeopt();
+ DeoptFrame GetDeoptFrameForLazyDeoptHelper(
+ LazyDeoptContinuationScope* continuation_scope,
+ bool mark_accumulator_dead);
+ InterpretedDeoptFrame GetDeoptFrameForEntryStackCheck();
template <typename NodeT>
- void MarkAsLazyDeoptResult(NodeT* value,
- interpreter::Register result_location,
- int result_size) {
- DCHECK_EQ(NodeT::kProperties.can_lazy_deopt(),
- value->properties().can_lazy_deopt());
- if constexpr (NodeT::kProperties.can_lazy_deopt()) {
- value->lazy_deopt_info()->SetResultLocation(result_location, result_size);
+ void MarkPossibleSideEffect(NodeT* node) {
+ // Don't do anything for nodes without side effects.
+ if constexpr (!NodeT::kProperties.has_any_side_effects()) return;
+
+ // Simple field stores are stores which do nothing but change a field value
+ // (i.e. no map transitions or calls into user code).
+ static constexpr bool is_simple_field_store =
+ std::is_same_v<NodeT, StoreTaggedFieldWithWriteBarrier> ||
+ std::is_same_v<NodeT, StoreTaggedFieldNoWriteBarrier> ||
+ std::is_same_v<NodeT, StoreDoubleField> ||
+ std::is_same_v<NodeT, CheckedStoreSmiField>;
+
+ // Don't change known node aspects for:
+ //
+ // * Simple field stores -- the only relevant side effect on these is
+ // writes to objects which invalidate loaded properties and context
+ // slots, and we invalidate these already as part of emitting the store.
+ //
+ // * CheckMapsWithMigration -- this only migrates representations of
+ // values, not the values themselves, so cached values are still valid.
+ static constexpr bool should_clear_unstable_node_aspects =
+ !is_simple_field_store &&
+ !std::is_same_v<NodeT, CheckMapsWithMigration>;
+
+ // Simple field stores can't possibly change or migrate the map.
+ static constexpr bool is_possible_map_change = !is_simple_field_store;
+
+ // We only need to clear unstable node aspects on the current builder, not
+ // the parent, since we'll anyway copy the known_node_aspects to the parent
+ // once we finish the inlined function.
+ if constexpr (should_clear_unstable_node_aspects) {
+ // A side effect could change existing objects' maps. For stable maps we
+ // know this hasn't happened (because we added a dependency on the maps
+ // staying stable and therefore not possible to transition away from), but
+ // we can no longer assume that objects with unstable maps still have the
+ // same map. Unstable maps can also transition to stable ones, so the
+ // set of stable maps becomes invalid for a not that had a unstable map.
+ auto it = known_node_aspects().unstable_maps.begin();
+ while (it != known_node_aspects().unstable_maps.end()) {
+ if (it->second.size() == 0) {
+ it++;
+ } else {
+ known_node_aspects().stable_maps.erase(it->first);
+ it = known_node_aspects().unstable_maps.erase(it);
+ }
+ }
+ // Similarly, side-effects can change object contents, so we have to clear
+ // our known loaded properties -- however, constant properties are known
+ // to not change (and we added a dependency on this), so we don't have to
+ // clear those.
+ known_node_aspects().loaded_properties.clear();
+ known_node_aspects().loaded_context_slots.clear();
}
- }
- void MarkPossibleSideEffect() {
- // If there was a potential side effect, invalidate the previous checkpoint.
- latest_checkpointed_frame_.reset();
+ // Other kinds of side effect have to be propagated up to the parent.
+ for (MaglevGraphBuilder* builder = this; builder != nullptr;
+ builder = builder->parent_) {
+ // All user-observable side effects need to clear the checkpointed frame.
+ // TODO(leszeks): What side effects aren't observable? Maybe migrations?
+ builder->latest_checkpointed_frame_.reset();
- // A side effect could change existing objects' maps. For stable maps we
- // know this hasn't happened (because we added a dependency on the maps
- // staying stable and therefore not possible to transition away from), but
- // we can no longer assume that objects with unstable maps still have the
- // same map. Unstable maps can also transition to stable ones, so the
- // set of stable maps becomes invalid for a not that had a unstable map.
- auto it = known_node_aspects().unstable_maps.begin();
- while (it != known_node_aspects().unstable_maps.end()) {
- if (it->second.size() == 0) {
- it++;
- } else {
- known_node_aspects().stable_maps.erase(it->first);
- it = known_node_aspects().unstable_maps.erase(it);
+ // If a map might have changed, then we need to re-check it for for-in.
+ if (is_possible_map_change) {
+ builder->current_for_in_state.receiver_needs_map_check = true;
}
}
- // Similarly, side-effects can change object contents, so we have to clear
- // our known loaded properties -- however, constant properties are known
- // to not change (and we added a dependency on this), so we don't have to
- // clear those.
- known_node_aspects().loaded_properties.clear();
- known_node_aspects().loaded_context_slots.clear();
}
int next_offset() const {
@@ -928,8 +1206,12 @@ class MaglevGraphBuilder {
template <typename ControlNodeT, typename... Args>
BasicBlock* FinishBlock(std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
- ControlNode* control_node = CreateNewNode<ControlNodeT>(
- control_inputs, std::forward<Args>(args)...);
+ ControlNodeT* control_node = NodeBase::New<ControlNodeT>(
+ zone(), control_inputs, std::forward<Args>(args)...);
+ AttachEagerDeoptInfo(control_node);
+ static_assert(!ControlNodeT::kProperties.can_lazy_deopt());
+ static_assert(!ControlNodeT::kProperties.can_throw());
+ static_assert(!ControlNodeT::kProperties.has_any_side_effects());
current_block_->set_control_node(control_node);
BasicBlock* block = current_block_;
@@ -963,7 +1245,8 @@ class MaglevGraphBuilder {
jump_target_refs_head =
jump_target_refs_head->SetToBlockAndReturnNext(block);
}
- if (!is_toptier() && interrupt_budget_correction != 0) {
+ if (v8_flags.maglev_increase_budget_forward_jump &&
+ ShouldEmitInterruptBudgetChecks() && interrupt_budget_correction != 0) {
DCHECK_GT(interrupt_budget_correction, 0);
AddNewNode<IncreaseInterruptBudget>({}, interrupt_budget_correction);
}
@@ -979,7 +1262,8 @@ class MaglevGraphBuilder {
if (NumPredecessors(next_block_offset) == 1) {
if (v8_flags.trace_maglev_graph_building) {
- std::cout << "== New block (single fallthrough) ==" << std::endl;
+ std::cout << "== New block (single fallthrough) at " << function()
+ << "==" << std::endl;
}
StartNewBlock(next_block_offset);
} else {
@@ -987,137 +1271,111 @@ class MaglevGraphBuilder {
}
}
- void InlineCallFromRegisters(int argc_count,
- ConvertReceiverMode receiver_mode,
- compiler::JSFunctionRef function);
-
- class CallArguments {
- public:
- enum Mode {
- kFromRegisters,
- kFromRegisterList,
- };
-
- CallArguments(ConvertReceiverMode receiver_mode, int reg_count,
- interpreter::Register r0 = interpreter::Register(),
- interpreter::Register r1 = interpreter::Register(),
- interpreter::Register r2 = interpreter::Register())
- : receiver_mode_(receiver_mode),
- call_mode_(kFromRegisters),
- reg_count_(reg_count) {
- DCHECK_GE(reg_count, 0);
- DCHECK_LT(reg_count, 4);
- DCHECK_IMPLIES(receiver_mode_ != ConvertReceiverMode::kNullOrUndefined,
- reg_count > 0);
- DCHECK_IMPLIES(reg_count > 0, r0.is_valid());
- regs_[0] = r0;
- DCHECK_IMPLIES(reg_count > 1, r1.is_valid());
- regs_[1] = r1;
- DCHECK_IMPLIES(reg_count > 2, r2.is_valid());
- regs_[2] = r2;
- }
-
- CallArguments(ConvertReceiverMode receiver_mode,
- interpreter::RegisterList reglist)
- : receiver_mode_(receiver_mode),
- call_mode_(kFromRegisterList),
- reglist_(reglist) {}
-
- base::Optional<interpreter::Register> receiver() const {
- if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
- return {};
- }
- if (call_mode_ == kFromRegisters) {
- DCHECK_GT(reg_count_, 0);
- return regs_[0];
- }
- return reglist_[0];
- }
-
- int count() const {
- int register_count =
- call_mode_ == kFromRegisters ? reg_count_ : reglist_.register_count();
- if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
- return register_count;
- }
- return register_count - 1;
+ ValueNode* GetTaggedOrUndefined(ValueNode* maybe_value) {
+ if (maybe_value == nullptr) {
+ return GetRootConstant(RootIndex::kUndefinedValue);
}
-
- int count_with_receiver() const { return count() + 1; }
-
- const interpreter::Register operator[](size_t i) const {
- if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) {
- i++;
- }
- if (call_mode_ == kFromRegisters) {
- DCHECK_LT(i, reg_count_);
- DCHECK_GE(i, 0);
- return regs_[i];
- }
- return reglist_[i];
- }
-
- ConvertReceiverMode receiver_mode() const { return receiver_mode_; }
-
- CallArguments PopReceiver(ConvertReceiverMode new_receiver_mode) const {
- DCHECK_NE(receiver_mode_, ConvertReceiverMode::kNullOrUndefined);
- DCHECK_NE(new_receiver_mode, ConvertReceiverMode::kNullOrUndefined);
- // If there is no non-receiver argument to become the new receiver,
- // consider the new receiver to be known undefined.
- if (count() == 0) {
- new_receiver_mode = ConvertReceiverMode::kNullOrUndefined;
- }
- if (call_mode_ == kFromRegisters) {
- return CallArguments(new_receiver_mode, reg_count_ - 1, regs_[1],
- regs_[2]);
- }
- return CallArguments(new_receiver_mode, reglist_.PopLeft());
- }
-
- private:
- const ConvertReceiverMode receiver_mode_;
- const Mode call_mode_;
- union {
- struct {
- interpreter::Register regs_[3];
- int reg_count_;
- };
- interpreter::RegisterList reglist_;
- };
- };
-
- ValueNode* GetTaggedReceiver(const CallArguments& args) {
- auto maybe_receiver = args.receiver();
- if (maybe_receiver.has_value()) {
- return GetTaggedValue(*maybe_receiver);
- }
- DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kNullOrUndefined);
- return GetRootConstant(RootIndex::kUndefinedValue);
+ return GetTaggedValue(maybe_value);
}
+
ValueNode* GetConvertReceiver(compiler::JSFunctionRef function,
const CallArguments& args);
-#define MAGLEV_REDUCED_BUILTIN(V) \
- V(FunctionPrototypeCall) \
- V(StringFromCharCode) \
- V(StringPrototypeCharCodeAt)
-
-#define DEFINE_BUILTIN_REDUCER(Name) \
- bool TryReduce##Name(compiler::JSFunctionRef builtin_target, \
- const CallArguments& args);
+ template <typename LoadNode>
+ ReduceResult TryBuildLoadDataView(const CallArguments& args,
+ ExternalArrayType type);
+ template <typename StoreNode, typename Function>
+ ReduceResult TryBuildStoreDataView(const CallArguments& args,
+ ExternalArrayType type,
+ Function&& getValue);
+
+#define MATH_UNARY_IEEE_BUILTIN(V) \
+ V(MathAcos) \
+ V(MathAcosh) \
+ V(MathAsin) \
+ V(MathAsinh) \
+ V(MathAtan) \
+ V(MathAtanh) \
+ V(MathCbrt) \
+ V(MathCos) \
+ V(MathCosh) \
+ V(MathExp) \
+ V(MathExpm1) \
+ V(MathLog) \
+ V(MathLog1p) \
+ V(MathLog10) \
+ V(MathLog2) \
+ V(MathSin) \
+ V(MathSinh) \
+ V(MathTan) \
+ V(MathTanh)
+
+#define MAGLEV_REDUCED_BUILTIN(V) \
+ V(DataViewPrototypeGetInt8) \
+ V(DataViewPrototypeSetInt8) \
+ V(DataViewPrototypeGetInt16) \
+ V(DataViewPrototypeSetInt16) \
+ V(DataViewPrototypeGetInt32) \
+ V(DataViewPrototypeSetInt32) \
+ V(DataViewPrototypeGetFloat64) \
+ V(DataViewPrototypeSetFloat64) \
+ V(FunctionPrototypeCall) \
+ V(ObjectPrototypeHasOwnProperty) \
+ V(MathCeil) \
+ V(MathFloor) \
+ V(MathPow) \
+ V(MathRound) \
+ V(StringFromCharCode) \
+ V(StringPrototypeCharCodeAt) \
+ V(StringPrototypeCodePointAt) \
+ MATH_UNARY_IEEE_BUILTIN(V)
+
+#define DEFINE_BUILTIN_REDUCER(Name) \
+ ReduceResult TryReduce##Name(compiler::JSFunctionRef builtin_target, \
+ CallArguments& args);
MAGLEV_REDUCED_BUILTIN(DEFINE_BUILTIN_REDUCER)
#undef DEFINE_BUILTIN_REDUCER
- bool TryReduceBuiltin(compiler::JSFunctionRef builtin_target,
- const CallArguments& args);
-
- bool TryBuildCallKnownJSFunction(compiler::JSFunctionRef function,
- const CallArguments& args);
-
- void BuildGenericCall(ValueNode* target, ValueNode* context,
- Call::TargetType target_type, const CallArguments& args,
- compiler::FeedbackSource& feedback_source);
- void BuildCall(ValueNode* target_node, const CallArguments& args,
+ ReduceResult DoTryReduceMathRound(compiler::JSFunctionRef builtin_target,
+ CallArguments& args,
+ Float64Round::Kind kind);
+
+ template <typename CallNode, typename... Args>
+ CallNode* AddNewCallNode(const CallArguments& args, Args&&... extra_args);
+
+ ValueNode* BuildCallSelf(compiler::JSFunctionRef function,
+ CallArguments& args);
+ ReduceResult TryReduceBuiltin(compiler::JSFunctionRef builtin_target,
+ CallArguments& args,
+ const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode);
+ bool TargetIsCurrentCompilingUnit(compiler::JSFunctionRef target);
+ ReduceResult TryBuildCallKnownJSFunction(
+ compiler::JSFunctionRef function, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source);
+ bool ShouldInlineCall(compiler::JSFunctionRef function, float call_frequency);
+ ReduceResult TryBuildInlinedCall(
+ compiler::JSFunctionRef function, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source);
+ ValueNode* BuildGenericCall(ValueNode* target, ValueNode* context,
+ Call::TargetType target_type,
+ const CallArguments& args,
+ const compiler::FeedbackSource& feedback_source =
+ compiler::FeedbackSource());
+ ReduceResult ReduceCall(
+ compiler::ObjectRef target, CallArguments& args,
+ const compiler::FeedbackSource& feedback_source =
+ compiler::FeedbackSource(),
+ SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
+ ReduceResult ReduceCallForTarget(
+ ValueNode* target_node, compiler::JSFunctionRef target,
+ CallArguments& args, const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode);
+ ReduceResult ReduceFunctionPrototypeApplyCallWithReceiver(
+ ValueNode* target_node, compiler::JSFunctionRef receiver,
+ CallArguments& args, const compiler::FeedbackSource& feedback_source,
+ SpeculationMode speculation_mode);
+ void BuildCall(ValueNode* target_node, CallArguments& args,
compiler::FeedbackSource& feedback_source);
void BuildCallFromRegisterList(ConvertReceiverMode receiver_mode);
void BuildCallFromRegisters(int argc_count,
@@ -1131,6 +1389,8 @@ class MaglevGraphBuilder {
const compiler::GlobalAccessFeedback& global_access_feedback);
ValueNode* BuildSmiUntag(ValueNode* node);
+ ValueNode* BuildNumberOrOddballToFloat64(
+ ValueNode* node, TaggedToFloat64ConversionType conversion_type);
void BuildCheckSmi(ValueNode* object);
void BuildCheckNumber(ValueNode* object);
@@ -1138,10 +1398,18 @@ class MaglevGraphBuilder {
void BuildCheckString(ValueNode* object);
void BuildCheckSymbol(ValueNode* object);
void BuildCheckMaps(ValueNode* object,
- ZoneVector<compiler::MapRef> const& maps);
+ base::Vector<const compiler::MapRef> maps);
// Emits an unconditional deopt and returns false if the node is a constant
// that doesn't match the ref.
- bool BuildCheckValue(ValueNode* node, const compiler::HeapObjectRef& ref);
+ ReduceResult BuildCheckValue(ValueNode* node,
+ const compiler::HeapObjectRef& ref);
+
+ bool CanElideWriteBarrier(ValueNode* object, ValueNode* value);
+ void BuildStoreTaggedField(ValueNode* object, ValueNode* value, int offset);
+ void BuildStoreTaggedFieldNoWriteBarrier(ValueNode* object, ValueNode* value,
+ int offset);
+ void BuildStoreFixedArrayElement(ValueNode* elements, ValueNode* index,
+ ValueNode* value);
ValueNode* GetInt32ElementIndex(interpreter::Register reg) {
ValueNode* index_object = current_interpreter_frame_.get(reg);
@@ -1149,48 +1417,77 @@ class MaglevGraphBuilder {
}
ValueNode* GetInt32ElementIndex(ValueNode* index_object);
- bool TryFoldLoadDictPrototypeConstant(
+ ValueNode* GetUint32ElementIndex(interpreter::Register reg) {
+ ValueNode* index_object = current_interpreter_frame_.get(reg);
+ return GetUint32ElementIndex(index_object);
+ }
+ ValueNode* GetUint32ElementIndex(ValueNode* index_object);
+
+ bool CanTreatHoleAsUndefined(
+ base::Vector<const compiler::MapRef> const& receiver_maps);
+
+ compiler::OptionalObjectRef TryFoldLoadDictPrototypeConstant(
compiler::PropertyAccessInfo access_info);
- bool TryFoldLoadConstantDataField(compiler::PropertyAccessInfo access_info,
- ValueNode* lookup_start_object);
-
- void BuildLoadField(compiler::PropertyAccessInfo access_info,
- ValueNode* lookup_start_object);
- bool TryBuildStoreField(compiler::PropertyAccessInfo access_info,
- ValueNode* receiver);
- bool TryBuildPropertyGetterCall(compiler::PropertyAccessInfo access_info,
+ compiler::OptionalObjectRef TryFoldLoadConstantDataField(
+ compiler::PropertyAccessInfo access_info, ValueNode* lookup_start_object);
+
+ // Returns the loaded value node but doesn't update the accumulator yet.
+ ValueNode* BuildLoadField(compiler::PropertyAccessInfo access_info,
+ ValueNode* lookup_start_object);
+ ReduceResult TryBuildStoreField(compiler::PropertyAccessInfo access_info,
ValueNode* receiver,
- ValueNode* lookup_start_object);
- bool TryBuildPropertySetterCall(compiler::PropertyAccessInfo access_info,
- ValueNode* receiver, ValueNode* value);
-
- bool TryBuildPropertyLoad(ValueNode* receiver, ValueNode* lookup_start_object,
- compiler::NameRef name,
- compiler::PropertyAccessInfo const& access_info);
- bool TryBuildPropertyStore(ValueNode* receiver, compiler::NameRef name,
- compiler::PropertyAccessInfo const& access_info);
- bool TryBuildPropertyAccess(ValueNode* receiver,
- ValueNode* lookup_start_object,
- compiler::NameRef name,
- compiler::PropertyAccessInfo const& access_info,
- compiler::AccessMode access_mode);
-
- bool TryBuildElementAccessOnString(
+ compiler::AccessMode access_mode);
+ ReduceResult TryBuildPropertyGetterCall(
+ compiler::PropertyAccessInfo access_info, ValueNode* receiver,
+ ValueNode* lookup_start_object);
+ ReduceResult TryBuildPropertySetterCall(
+ compiler::PropertyAccessInfo access_info, ValueNode* receiver,
+ ValueNode* value);
+
+ ReduceResult TryBuildPropertyLoad(
+ ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::NameRef name, compiler::PropertyAccessInfo const& access_info);
+ ReduceResult TryBuildPropertyStore(
+ ValueNode* receiver, compiler::NameRef name,
+ compiler::PropertyAccessInfo const& access_info,
+ compiler::AccessMode access_mode);
+ ReduceResult TryBuildPropertyAccess(
+ ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::NameRef name, compiler::PropertyAccessInfo const& access_info,
+ compiler::AccessMode access_mode);
+ ReduceResult TryBuildNamedAccess(
+ ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::NamedAccessFeedback const& feedback,
+ compiler::FeedbackSource const& feedback_source,
+ compiler::AccessMode access_mode);
+
+ ValueNode* BuildLoadTypedArrayElement(ValueNode* object, ValueNode* index,
+ ElementsKind elements_kind);
+ void BuildStoreTypedArrayElement(ValueNode* object, ValueNode* index,
+ ElementsKind elements_kind);
+
+ ReduceResult TryBuildElementAccessOnString(
ValueNode* object, ValueNode* index,
compiler::KeyedAccessMode const& keyed_mode);
-
- bool TryBuildNamedAccess(ValueNode* receiver, ValueNode* lookup_start_object,
- compiler::NamedAccessFeedback const& feedback,
- compiler::AccessMode access_mode);
- bool TryBuildElementAccess(ValueNode* object, ValueNode* index,
- compiler::ElementAccessFeedback const& feedback);
+ ReduceResult TryBuildElementAccessOnTypedArray(
+ ValueNode* object, ValueNode* index,
+ const compiler::ElementAccessInfo& access_info,
+ compiler::KeyedAccessMode const& keyed_mode);
+ ReduceResult TryBuildElementAccessOnJSArrayOrJSObject(
+ ValueNode* object, ValueNode* index,
+ const compiler::ElementAccessInfo& access_info,
+ compiler::KeyedAccessMode const& keyed_mode);
+ ReduceResult TryBuildElementAccess(
+ ValueNode* object, ValueNode* index,
+ compiler::ElementAccessFeedback const& feedback,
+ compiler::FeedbackSource const& feedback_source);
// Load elimination -- when loading or storing a simple property without
// side effects, record its value, and allow that value to be re-used on
// subsequent loads.
void RecordKnownProperty(ValueNode* lookup_start_object,
compiler::NameRef name, ValueNode* value,
- bool is_const);
+ compiler::PropertyAccessInfo const& access_info);
bool TryReuseKnownPropertyLoad(ValueNode* lookup_start_object,
compiler::NameRef name);
@@ -1218,6 +1515,22 @@ class MaglevGraphBuilder {
ValueNode* object, ValueNode* callable,
compiler::FeedbackSource feedback_source);
+ ValueNode* ExtendOrReallocateCurrentRawAllocation(
+ int size, AllocationType allocation_type);
+ void ClearCurrentRawAllocation();
+
+ ReduceResult TryBuildFastCreateObjectOrArrayLiteral(
+ const compiler::LiteralFeedback& feedback);
+ base::Optional<FastLiteralObject> TryReadBoilerplateForFastLiteral(
+ compiler::JSObjectRef boilerplate, AllocationType allocation,
+ int max_depth, int* max_properties);
+ ValueNode* BuildAllocateFastLiteral(FastLiteralObject object,
+ AllocationType allocation);
+ ValueNode* BuildAllocateFastLiteral(FastLiteralField value,
+ AllocationType allocation);
+ ValueNode* BuildAllocateFastLiteral(FastLiteralFixedArray array,
+ AllocationType allocation);
+
template <Operation kOperation>
void BuildGenericUnaryOperationNode();
template <Operation kOperation>
@@ -1226,25 +1539,33 @@ class MaglevGraphBuilder {
void BuildGenericBinarySmiOperationNode();
template <Operation kOperation>
- ValueNode* AddNewInt32BinaryOperationNode(
- std::initializer_list<ValueNode*> inputs);
- template <Operation kOperation>
- ValueNode* AddNewFloat64BinaryOperationNode(
- std::initializer_list<ValueNode*> inputs);
-
- template <Operation kOperation>
ValueNode* TryFoldInt32BinaryOperation(ValueNode* left, ValueNode* right);
template <Operation kOperation>
ValueNode* TryFoldInt32BinaryOperation(ValueNode* left, int right);
template <Operation kOperation>
+ void BuildInt32UnaryOperationNode();
+ void BuildTruncatingInt32BitwiseNotForNumber(
+ TaggedToFloat64ConversionType conversion_type);
+ template <Operation kOperation>
void BuildInt32BinaryOperationNode();
template <Operation kOperation>
void BuildInt32BinarySmiOperationNode();
template <Operation kOperation>
- void BuildFloat64BinaryOperationNode();
+ void BuildTruncatingInt32BinaryOperationNodeForNumber(
+ TaggedToFloat64ConversionType conversion_type);
+ template <Operation kOperation>
+ void BuildTruncatingInt32BinarySmiOperationNodeForNumber(
+ TaggedToFloat64ConversionType conversion_type);
+ template <Operation kOperation>
+ void BuildFloat64UnaryOperationNode(
+ TaggedToFloat64ConversionType conversion_type);
+ template <Operation kOperation>
+ void BuildFloat64BinaryOperationNode(
+ TaggedToFloat64ConversionType conversion_type);
template <Operation kOperation>
- void BuildFloat64BinarySmiOperationNode();
+ void BuildFloat64BinarySmiOperationNode(
+ TaggedToFloat64ConversionType conversion_type);
template <Operation kOperation>
void VisitUnaryOperation();
@@ -1253,9 +1574,10 @@ class MaglevGraphBuilder {
template <Operation kOperation>
void VisitBinarySmiOperation();
- template <typename CompareControlNode>
- bool TryBuildCompareOperation(Operation operation, ValueNode* left,
- ValueNode* right);
+ template <typename BranchControlNodeT, typename... Args>
+ bool TryBuildBranchFor(std::initializer_list<ValueNode*> control_inputs,
+ Args&&... args);
+
template <Operation kOperation>
void VisitCompareOperation();
@@ -1311,7 +1633,7 @@ class MaglevGraphBuilder {
int NumPredecessors(int offset) { return predecessors_[offset]; }
- compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
+ compiler::JSHeapBroker* broker() const { return broker_; }
const compiler::FeedbackVectorRef& feedback() const {
return compilation_unit_->feedback();
}
@@ -1327,27 +1649,24 @@ class MaglevGraphBuilder {
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode();
}
+ const compiler::JSFunctionRef& function() const {
+ return compilation_unit_->function();
+ }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return bytecode_analysis_;
}
LocalIsolate* local_isolate() const { return local_isolate_; }
- Zone* zone() const { return compilation_unit_->zone(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
int parameter_count_without_receiver() { return parameter_count() - 1; }
int register_count() const { return compilation_unit_->register_count(); }
- bool has_graph_labeller() const {
- return compilation_unit_->has_graph_labeller();
- }
- MaglevGraphLabeller* graph_labeller() const {
- return compilation_unit_->graph_labeller();
- }
KnownNodeAspects& known_node_aspects() {
- return current_interpreter_frame_.known_node_aspects();
+ return *current_interpreter_frame_.known_node_aspects();
}
// True when this graph builder is building the subgraph of an inlined
// function.
bool is_inline() const { return parent_ != nullptr; }
+ int inlining_depth() const { return compilation_unit_->inlining_depth(); }
// The fake offset used as a target for all exits of an inlined function.
int inline_exit_offset() const {
@@ -1358,6 +1677,11 @@ class MaglevGraphBuilder {
LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const compilation_unit_;
MaglevGraphBuilder* const parent_;
+ DeoptFrame* parent_deopt_frame_ = nullptr;
+ BasicBlockRef* parent_catch_block_ = nullptr;
+ // Cache the heap broker since we access it a bunch.
+ compiler::JSHeapBroker* broker_ = compilation_unit_->broker();
+
Graph* const graph_;
compiler::BytecodeAnalysis bytecode_analysis_;
interpreter::BytecodeArrayIterator iterator_;
@@ -1368,11 +1692,34 @@ class MaglevGraphBuilder {
BasicBlock* current_block_ = nullptr;
base::Optional<InterpretedDeoptFrame> latest_checkpointed_frame_;
SourcePosition current_source_position_;
+ struct ForInState {
+ ValueNode* receiver = nullptr;
+ ValueNode* cache_type = nullptr;
+ ValueNode* enum_cache = nullptr;
+ ValueNode* key = nullptr;
+ ValueNode* index = nullptr;
+ bool receiver_needs_map_check = false;
+ };
+ // TODO(leszeks): Allow having a stack of these.
+ ForInState current_for_in_state = ForInState();
+
+ AllocateRaw* current_raw_allocation_ = nullptr;
+
+ float call_frequency_;
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;
InterpreterFrameState current_interpreter_frame_;
+ compiler::FeedbackSource current_speculation_feedback_;
+
+ // TODO(victorgomes): Refactor all inlined data to a
+ // base::Optional<InlinedGraphBuilderData>.
+ // base::Vector<ValueNode*>* inlined_arguments_ = nullptr;
+ base::Optional<base::Vector<ValueNode*>> inlined_arguments_;
+ BytecodeOffset caller_bytecode_offset_;
+
+ LazyDeoptContinuationScope* current_lazy_deopt_continuation_scope_ = nullptr;
struct HandlerTableEntry {
int end;
@@ -1382,6 +1729,9 @@ class MaglevGraphBuilder {
int next_handler_table_index_ = 0;
#ifdef DEBUG
+ bool IsNodeCreatedForThisBytecode(ValueNode* node) const {
+ return new_nodes_.find(node) != new_nodes_.end();
+ }
std::unordered_set<Node*> new_nodes_;
#endif
};
diff --git a/deps/v8/src/maglev/maglev-graph-printer.cc b/deps/v8/src/maglev/maglev-graph-printer.cc
index e869707c53..9762bb622a 100644
--- a/deps/v8/src/maglev/maglev-graph-printer.cc
+++ b/deps/v8/src/maglev/maglev-graph-printer.cc
@@ -371,7 +371,16 @@ void PrintSingleDeoptFrame(
LazyDeoptInfo* lazy_deopt_info_if_top_frame = nullptr) {
switch (frame.type()) {
case DeoptFrame::FrameType::kInterpretedFrame: {
- os << "@" << frame.as_interpreted().bytecode_position() << " : {";
+ os << "@" << frame.as_interpreted().bytecode_position();
+ if (!v8_flags.print_maglev_deopt_verbose) {
+ int count = 0;
+ frame.as_interpreted().frame_state()->ForEachValue(
+ frame.as_interpreted().unit(),
+ [&](ValueNode* node, interpreter::Register reg) { count++; });
+ os << " (" << count << " live vars)";
+ return;
+ }
+ os << " : {";
bool first = true;
frame.as_interpreted().frame_state()->ForEachValue(
frame.as_interpreted().unit(),
@@ -394,9 +403,32 @@ void PrintSingleDeoptFrame(
os << "}";
break;
}
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame: {
+ os << "@" << frame.as_inlined_arguments().bytecode_position();
+ if (!v8_flags.print_maglev_deopt_verbose) return;
+ os << " : {";
+ auto arguments = frame.as_inlined_arguments().arguments();
+ DCHECK_GT(arguments.size(), 0);
+ os << "<this>:" << PrintNodeLabel(graph_labeller, arguments[0]) << ":"
+ << current_input_location->operand();
+ current_input_location++;
+ if (arguments.size() > 1) {
+ os << ", ";
+ }
+ for (size_t i = 1; i < arguments.size(); i++) {
+ os << "a" << (i - 1) << ":"
+ << PrintNodeLabel(graph_labeller, arguments[i]) << ":"
+ << current_input_location->operand();
+ current_input_location++;
+ os << ", ";
+ }
+ os << "}";
+ break;
+ }
case DeoptFrame::FrameType::kBuiltinContinuationFrame: {
- os << "@" << Builtins::name(frame.as_builtin_continuation().builtin_id())
- << " : {";
+ os << "@" << Builtins::name(frame.as_builtin_continuation().builtin_id());
+ if (!v8_flags.print_maglev_deopt_verbose) return;
+ os << " : {";
int arg_index = 0;
for (ValueNode* node : frame.as_builtin_continuation().parameters()) {
os << "a" << arg_index << ":" << PrintNodeLabel(graph_labeller, node)
@@ -510,7 +542,7 @@ void PrintExceptionHandlerPoint(std::ostream& os,
// No phis in the block.
return;
}
- int handler_offset = first_phi->merge_offset();
+ int handler_offset = first_phi->merge_state()->merge_offset();
// The exception handler liveness should be a subset of lazy_deopt_info one.
auto* liveness = block->state()->frame_state().liveness();
@@ -571,10 +603,27 @@ void MaybePrintLazyDeoptOrExceptionHandler(std::ostream& os,
void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller_, max_node_id_, phi);
+ os_ << "φ";
+ switch (phi->value_representation()) {
+ case ValueRepresentation::kTagged:
+ os_ << "ᵀ";
+ break;
+ case ValueRepresentation::kInt32:
+ os_ << "ᴵ";
+ break;
+ case ValueRepresentation::kUint32:
+ os_ << "ᵁ";
+ break;
+ case ValueRepresentation::kFloat64:
+ os_ << "ᶠ";
+ break;
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
if (phi->input_count() == 0) {
- os_ << "φₑ " << phi->owner().ToString();
+ os_ << "ₑ " << phi->owner().ToString();
} else {
- os_ << "φ (";
+ os_ << " (";
// Manually walk Phi inputs to print just the node labels, without
// input locations (which are shown in the predecessor block's gap
// moves).
@@ -584,6 +633,13 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
}
os_ << ")";
}
+ if (phi->is_tagged() && !phi->result().operand().IsUnallocated()) {
+ if (phi->decompresses_tagged_result()) {
+ os_ << " (decompressed)";
+ } else {
+ os_ << " (compressed)";
+ }
+ }
os_ << " → " << phi->result().operand() << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
@@ -688,8 +744,24 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
os_ << (has_fallthrough ? "│" : " ");
os_ << " - ";
graph_labeller_->PrintInput(os_, phi->input(pid));
- os_ << " → " << graph_labeller_->NodeId(phi) << ": φ "
- << phi->result().operand() << "\n";
+ os_ << " → " << graph_labeller_->NodeId(phi) << ": φ";
+ switch (phi->value_representation()) {
+ case ValueRepresentation::kTagged:
+ os_ << "ᵀ";
+ break;
+ case ValueRepresentation::kInt32:
+ os_ << "ᴵ";
+ break;
+ case ValueRepresentation::kUint32:
+ os_ << "ᵁ";
+ break;
+ case ValueRepresentation::kFloat64:
+ os_ << "ᶠ";
+ break;
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+ os_ << " " << phi->result().operand() << "\n";
}
if (target->state()->register_state().is_initialized()) {
PrintVerticalArrows(os_, targets_);
@@ -734,7 +806,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
void PrintGraph(std::ostream& os, MaglevCompilationInfo* compilation_info,
Graph* const graph) {
- GraphProcessor<MaglevPrintingVisitor> printer(
+ GraphProcessor<MaglevPrintingVisitor, /*visit_identity_nodes*/ true> printer(
compilation_info->graph_labeller(), os);
printer.ProcessGraph(graph);
}
diff --git a/deps/v8/src/maglev/maglev-graph-processor.h b/deps/v8/src/maglev/maglev-graph-processor.h
index 60fa90c786..a6d5dfedcc 100644
--- a/deps/v8/src/maglev/maglev-graph-processor.h
+++ b/deps/v8/src/maglev/maglev-graph-processor.h
@@ -40,12 +40,14 @@ namespace maglev {
// // overloading as appropriate to group node processing.
// void Process(FooNode* node, const ProcessingState& state) {}
//
-template <typename NodeProcessor>
+template <typename NodeProcessor, bool visit_identity_nodes = false>
class GraphProcessor;
class ProcessingState {
public:
- explicit ProcessingState(BlockConstIterator block_it) : block_it_(block_it) {}
+ explicit ProcessingState(BlockConstIterator block_it,
+ NodeIterator* node_it = nullptr)
+ : block_it_(block_it), node_it_(node_it) {}
// Disallow copies, since the underlying frame states stay mutable.
ProcessingState(const ProcessingState&) = delete;
@@ -54,11 +56,17 @@ class ProcessingState {
BasicBlock* block() const { return *block_it_; }
BasicBlock* next_block() const { return *(block_it_ + 1); }
+ NodeIterator* node_it() const {
+ DCHECK_NOT_NULL(node_it_);
+ return node_it_;
+ }
+
private:
BlockConstIterator block_it_;
+ NodeIterator* node_it_;
};
-template <typename NodeProcessor>
+template <typename NodeProcessor, bool visit_identity_nodes>
class GraphProcessor {
public:
template <typename... Args>
@@ -90,6 +98,10 @@ class GraphProcessor {
node_processor_.Process(constant, GetCurrentState());
USE(index);
}
+ for (const auto& [address, constant] : graph->external_references()) {
+ node_processor_.Process(constant, GetCurrentState());
+ USE(address);
+ }
for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
BasicBlock* block = *block_it_;
@@ -118,14 +130,20 @@ class GraphProcessor {
const NodeProcessor& node_processor() const { return node_processor_; }
private:
- ProcessingState GetCurrentState() { return ProcessingState(block_it_); }
+ ProcessingState GetCurrentState() {
+ return ProcessingState(block_it_, &node_it_);
+ }
void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
switch (node->opcode()) {
-#define CASE(OPCODE) \
- case Opcode::k##OPCODE: \
- PreProcess(node->Cast<OPCODE>(), state); \
- node_processor_.Process(node->Cast<OPCODE>(), state); \
+#define CASE(OPCODE) \
+ case Opcode::k##OPCODE: \
+ if constexpr (!visit_identity_nodes && \
+ Opcode::k##OPCODE == Opcode::kIdentity) { \
+ return; \
+ } \
+ PreProcess(node->Cast<OPCODE>(), state); \
+ node_processor_.Process(node->Cast<OPCODE>(), state); \
break;
NODE_BASE_LIST(CASE)
#undef CASE
@@ -137,7 +155,7 @@ class GraphProcessor {
NodeProcessor node_processor_;
Graph* graph_;
BlockConstIterator block_it_;
- NodeConstIterator node_it_;
+ NodeIterator node_it_;
};
// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
diff --git a/deps/v8/src/maglev/maglev-graph-verifier.h b/deps/v8/src/maglev/maglev-graph-verifier.h
index 59c1799f9d..6af5f928c4 100644
--- a/deps/v8/src/maglev/maglev-graph-verifier.h
+++ b/deps/v8/src/maglev/maglev-graph-verifier.h
@@ -13,21 +13,6 @@ namespace v8 {
namespace internal {
namespace maglev {
-std::ostream& operator<<(std::ostream& os, const ValueRepresentation& repr) {
- switch (repr) {
- case ValueRepresentation::kTagged:
- os << "Tagged";
- break;
- case ValueRepresentation::kInt32:
- os << "Int32";
- break;
- case ValueRepresentation::kFloat64:
- os << "Float64";
- break;
- }
- return os;
-}
-
class Graph;
// TODO(victorgomes): Currently it only verifies the inputs for all ValueNodes
@@ -44,291 +29,9 @@ class MaglevGraphVerifier {
void PostProcessGraph(Graph* graph) {}
void PreProcessBasicBlock(BasicBlock* block) {}
- static ValueRepresentation ToValueRepresentation(MachineType type) {
- switch (type.representation()) {
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kTaggedSigned:
- case MachineRepresentation::kTaggedPointer:
- return ValueRepresentation::kTagged;
- case MachineRepresentation::kFloat64:
- return ValueRepresentation::kFloat64;
- default:
- return ValueRepresentation::kInt32;
- }
- }
-
- void CheckValueInputIs(NodeBase* node, int i, ValueRepresentation expected) {
- ValueNode* input = node->input(i).node();
- ValueRepresentation got = input->properties().value_representation();
- if (got != expected) {
- std::ostringstream str;
- str << "Type representation error: node ";
- if (graph_labeller_) {
- str << "#" << graph_labeller_->NodeId(node) << " : ";
- }
- str << node->opcode() << " (input @" << i << " = " << input->opcode()
- << ") type " << got << " is not " << expected;
- FATAL("%s", str.str().c_str());
- }
- }
-
- void Process(NodeBase* node, const ProcessingState& state) {
- switch (node->opcode()) {
- case Opcode::kAbort:
- case Opcode::kConstant:
- case Opcode::kConstantGapMove:
- case Opcode::kCreateEmptyArrayLiteral:
- case Opcode::kCreateEmptyObjectLiteral:
- case Opcode::kCreateArrayLiteral:
- case Opcode::kCreateShallowArrayLiteral:
- case Opcode::kCreateObjectLiteral:
- case Opcode::kCreateShallowObjectLiteral:
- case Opcode::kCreateRegExpLiteral:
- case Opcode::kDebugBreak:
- case Opcode::kDeopt:
- case Opcode::kFloat64Constant:
- case Opcode::kGapMove:
- case Opcode::kGetSecondReturnedValue:
- case Opcode::kInitialValue:
- case Opcode::kInt32Constant:
- case Opcode::kJump:
- case Opcode::kJumpFromInlined:
- case Opcode::kJumpLoop:
- case Opcode::kJumpLoopPrologue:
- case Opcode::kJumpToInlined:
- case Opcode::kRegisterInput:
- case Opcode::kRootConstant:
- case Opcode::kSmiConstant:
- case Opcode::kIncreaseInterruptBudget:
- case Opcode::kReduceInterruptBudget:
- // No input.
- DCHECK_EQ(node->input_count(), 0);
- break;
- case Opcode::kCheckedSmiUntag:
- case Opcode::kUnsafeSmiUntag:
- case Opcode::kGenericBitwiseNot:
- case Opcode::kGenericDecrement:
- case Opcode::kGenericIncrement:
- case Opcode::kGenericNegate:
- case Opcode::kLoadDoubleField:
- case Opcode::kLoadGlobal:
- case Opcode::kLoadTaggedField:
- // TODO(victorgomes): Can we check that the input is actually a receiver?
- case Opcode::kCheckHeapObject:
- case Opcode::kCheckMaps:
- case Opcode::kCheckValue:
- case Opcode::kCheckMapsWithMigration:
- case Opcode::kCheckSmi:
- case Opcode::kCheckNumber:
- case Opcode::kCheckString:
- case Opcode::kCheckSymbol:
- case Opcode::kCheckedInternalizedString:
- case Opcode::kCheckedObjectToIndex:
- case Opcode::kConvertReceiver:
- // TODO(victorgomes): Can we check that the input is Boolean?
- case Opcode::kBranchIfToBooleanTrue:
- case Opcode::kBranchIfRootConstant:
- case Opcode::kBranchIfUndefinedOrNull:
- case Opcode::kBranchIfJSReceiver:
- case Opcode::kCheckedFloat64Unbox:
- case Opcode::kCreateFunctionContext:
- case Opcode::kCreateClosure:
- case Opcode::kFastCreateClosure:
- case Opcode::kGeneratorRestoreRegister:
- case Opcode::kGetTemplateObject:
- case Opcode::kLogicalNot:
- case Opcode::kSetPendingMessage:
- case Opcode::kStoreMap:
- case Opcode::kStringLength:
- case Opcode::kToBoolean:
- case Opcode::kToBooleanLogicalNot:
- case Opcode::kTestUndetectable:
- case Opcode::kTestTypeOf:
- case Opcode::kThrowReferenceErrorIfHole:
- case Opcode::kThrowSuperNotCalledIfHole:
- case Opcode::kThrowSuperAlreadyCalledIfNotHole:
- case Opcode::kReturn:
- DCHECK_EQ(node->input_count(), 1);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- break;
- case Opcode::kSwitch:
- case Opcode::kCheckedSmiTag:
- case Opcode::kUnsafeSmiTag:
- case Opcode::kChangeInt32ToFloat64:
- case Opcode::kBuiltinStringFromCharCode:
- DCHECK_EQ(node->input_count(), 1);
- CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
- break;
- case Opcode::kFloat64Box:
- case Opcode::kCheckedTruncateFloat64ToInt32:
- DCHECK_EQ(node->input_count(), 1);
- CheckValueInputIs(node, 0, ValueRepresentation::kFloat64);
- break;
- case Opcode::kForInPrepare:
- case Opcode::kGenericAdd:
- case Opcode::kGenericBitwiseAnd:
- case Opcode::kGenericBitwiseOr:
- case Opcode::kGenericBitwiseXor:
- case Opcode::kGenericDivide:
- case Opcode::kGenericExponentiate:
- case Opcode::kGenericModulus:
- case Opcode::kGenericMultiply:
- case Opcode::kGenericShiftLeft:
- case Opcode::kGenericShiftRight:
- case Opcode::kGenericShiftRightLogical:
- case Opcode::kGenericSubtract:
- // TODO(victorgomes): Can we use the fact that these nodes return a
- // Boolean?
- case Opcode::kGenericEqual:
- case Opcode::kGenericGreaterThan:
- case Opcode::kGenericGreaterThanOrEqual:
- case Opcode::kGenericLessThan:
- case Opcode::kGenericLessThanOrEqual:
- case Opcode::kGenericStrictEqual:
- case Opcode::kGetIterator:
- case Opcode::kTaggedEqual:
- case Opcode::kTaggedNotEqual:
- case Opcode::kStoreGlobal:
- // TODO(victorgomes): Can we check that first input is an Object?
- case Opcode::kStoreTaggedFieldNoWriteBarrier:
- // TODO(victorgomes): Can we check that second input is a Smi?
- case Opcode::kStoreTaggedFieldWithWriteBarrier:
- case Opcode::kLoadNamedGeneric:
- case Opcode::kThrowIfNotSuperConstructor:
- case Opcode::kToName:
- case Opcode::kToNumberOrNumeric:
- case Opcode::kToObject:
- case Opcode::kToString:
- DCHECK_EQ(node->input_count(), 2);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
- break;
- case Opcode::kDeleteProperty:
- case Opcode::kLoadNamedFromSuperGeneric:
- case Opcode::kSetNamedGeneric:
- case Opcode::kDefineNamedOwnGeneric:
- case Opcode::kGetKeyedGeneric:
- case Opcode::kTestInstanceOf:
- DCHECK_EQ(node->input_count(), 3);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 2, ValueRepresentation::kTagged);
- break;
- case Opcode::kSetKeyedGeneric:
- case Opcode::kDefineKeyedOwnGeneric:
- case Opcode::kStoreInArrayLiteralGeneric:
- DCHECK_EQ(node->input_count(), 4);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 2, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 3, ValueRepresentation::kTagged);
- break;
- case Opcode::kAssertInt32:
- case Opcode::kInt32AddWithOverflow:
- case Opcode::kInt32SubtractWithOverflow:
- case Opcode::kInt32MultiplyWithOverflow:
- case Opcode::kInt32DivideWithOverflow:
- // case Opcode::kInt32ExponentiateWithOverflow:
- case Opcode::kInt32ModulusWithOverflow:
- case Opcode::kInt32BitwiseAnd:
- case Opcode::kInt32BitwiseOr:
- case Opcode::kInt32BitwiseXor:
- case Opcode::kInt32ShiftLeft:
- case Opcode::kInt32ShiftRight:
- case Opcode::kInt32ShiftRightLogical:
- case Opcode::kInt32Equal:
- case Opcode::kInt32StrictEqual:
- case Opcode::kInt32LessThan:
- case Opcode::kInt32LessThanOrEqual:
- case Opcode::kInt32GreaterThan:
- case Opcode::kInt32GreaterThanOrEqual:
- case Opcode::kBranchIfInt32Compare:
- case Opcode::kCheckInt32Condition:
- DCHECK_EQ(node->input_count(), 2);
- CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
- CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
- break;
- case Opcode::kBranchIfReferenceCompare:
- DCHECK_EQ(node->input_count(), 2);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
- break;
- case Opcode::kFloat64Add:
- case Opcode::kFloat64Subtract:
- case Opcode::kFloat64Multiply:
- case Opcode::kFloat64Divide:
- case Opcode::kFloat64Equal:
- case Opcode::kFloat64StrictEqual:
- case Opcode::kFloat64LessThan:
- case Opcode::kFloat64LessThanOrEqual:
- case Opcode::kFloat64GreaterThan:
- case Opcode::kFloat64GreaterThanOrEqual:
- case Opcode::kBranchIfFloat64Compare:
- DCHECK_EQ(node->input_count(), 2);
- CheckValueInputIs(node, 0, ValueRepresentation::kFloat64);
- CheckValueInputIs(node, 1, ValueRepresentation::kFloat64);
- break;
- case Opcode::kStoreDoubleField:
- DCHECK_EQ(node->input_count(), 2);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 1, ValueRepresentation::kFloat64);
- break;
- case Opcode::kCall:
- case Opcode::kCallKnownJSFunction:
- case Opcode::kCallRuntime:
- case Opcode::kCallWithSpread:
- case Opcode::kConstruct:
- case Opcode::kConstructWithSpread:
- case Opcode::kGeneratorStore:
- case Opcode::kForInNext:
- case Opcode::kPhi:
- // All inputs should be tagged.
- for (int i = 0; i < node->input_count(); i++) {
- CheckValueInputIs(node, i, ValueRepresentation::kTagged);
- }
- break;
- case Opcode::kCheckJSArrayBounds:
- case Opcode::kCheckJSObjectElementsBounds:
- case Opcode::kLoadTaggedElement:
- case Opcode::kLoadDoubleElement:
- case Opcode::kStringAt:
- case Opcode::kBuiltinStringPrototypeCharCodeAt:
- DCHECK_EQ(node->input_count(), 2);
- CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
- CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
- break;
- case Opcode::kCallBuiltin: {
- CallBuiltin* call_builtin = node->Cast<CallBuiltin>();
- auto descriptor =
- Builtins::CallInterfaceDescriptorFor(call_builtin->builtin());
- int count = call_builtin->input_count();
- // Verify context.
- if (descriptor.HasContextParameter()) {
- CheckValueInputIs(call_builtin, count - 1,
- ValueRepresentation::kTagged);
- count--;
- }
-
-// {all_input_count} includes the feedback slot and vector.
-#ifdef DEBUG
- int all_input_count = count + (call_builtin->has_feedback() ? 2 : 0);
- if (descriptor.AllowVarArgs()) {
- DCHECK_GE(all_input_count, descriptor.GetParameterCount());
- } else {
- DCHECK_EQ(all_input_count, descriptor.GetParameterCount());
- }
-#endif
- int i = 0;
- // Check the rest of inputs.
- for (; i < count; ++i) {
- MachineType type = i < descriptor.GetParameterCount()
- ? descriptor.GetParameterType(i)
- : MachineType::AnyTagged();
- CheckValueInputIs(call_builtin, i, ToValueRepresentation(type));
- }
- break;
- }
- }
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ node->VerifyInputs(graph_labeller_);
}
private:
diff --git a/deps/v8/src/maglev/maglev-graph.h b/deps/v8/src/maglev/maglev-graph.h
index cc084e8350..225008224e 100644
--- a/deps/v8/src/maglev/maglev-graph.h
+++ b/deps/v8/src/maglev/maglev-graph.h
@@ -7,19 +7,17 @@
#include <vector>
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/heap-refs.h"
#include "src/maglev/maglev-basic-block.h"
-#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
namespace maglev {
-using BlockConstIterator =
- std::vector<BasicBlock*, ZoneAllocator<BasicBlock*>>::const_iterator;
+using BlockConstIterator = ZoneVector<BasicBlock*>::const_iterator;
using BlockConstReverseIterator =
- std::vector<BasicBlock*,
- ZoneAllocator<BasicBlock*>>::const_reverse_iterator;
+ ZoneVector<BasicBlock*>::const_reverse_iterator;
class Graph final : public ZoneObject {
public:
@@ -32,8 +30,11 @@ class Graph final : public ZoneObject {
smi_(zone),
int_(zone),
float_(zone),
+ external_references_(zone),
parameters_(zone),
- constants_(zone) {}
+ register_inputs_(),
+ constants_(zone),
+ inlined_functions_(zone) {}
BasicBlock* operator[](int i) { return blocks_[i]; }
const BasicBlock* operator[](int i) const { return blocks_[i]; }
@@ -51,6 +52,8 @@ class Graph final : public ZoneObject {
uint32_t tagged_stack_slots() const { return tagged_stack_slots_; }
uint32_t untagged_stack_slots() const { return untagged_stack_slots_; }
+ uint32_t max_call_stack_args() const { return max_call_stack_args_; }
+ uint32_t max_deopted_stack_size() const { return max_deopted_stack_size_; }
void set_tagged_stack_slots(uint32_t stack_slots) {
DCHECK_EQ(kMaxUInt32, tagged_stack_slots_);
DCHECK_NE(kMaxUInt32, stack_slots);
@@ -61,32 +64,62 @@ class Graph final : public ZoneObject {
DCHECK_NE(kMaxUInt32, stack_slots);
untagged_stack_slots_ = stack_slots;
}
+ void set_max_call_stack_args(uint32_t stack_slots) {
+ DCHECK_EQ(kMaxUInt32, max_call_stack_args_);
+ DCHECK_NE(kMaxUInt32, stack_slots);
+ max_call_stack_args_ = stack_slots;
+ }
+ void set_max_deopted_stack_size(uint32_t size) {
+ DCHECK_EQ(kMaxUInt32, max_deopted_stack_size_);
+ DCHECK_NE(kMaxUInt32, size);
+ max_deopted_stack_size_ = size;
+ }
+
+ int total_inlined_bytecode_size() const {
+ return total_inlined_bytecode_size_;
+ }
+ void add_inlined_bytecode_size(int size) {
+ total_inlined_bytecode_size_ += size;
+ }
ZoneMap<RootIndex, RootConstant*>& root() { return root_; }
ZoneMap<int, SmiConstant*>& smi() { return smi_; }
ZoneMap<int, Int32Constant*>& int32() { return int_; }
- ZoneMap<double, Float64Constant*>& float64() { return float_; }
+ ZoneMap<uint64_t, Float64Constant*>& float64() { return float_; }
+ ZoneMap<Address, ExternalConstant*>& external_references() {
+ return external_references_;
+ }
ZoneVector<InitialValue*>& parameters() { return parameters_; }
+ RegList& register_inputs() { return register_inputs_; }
compiler::ZoneRefMap<compiler::ObjectRef, Constant*>& constants() {
return constants_;
}
- Float64Constant* nan() const { return nan_; }
- void set_nan(Float64Constant* nan) {
- DCHECK_NULL(nan_);
- nan_ = nan;
+ ZoneVector<OptimizedCompilationInfo::InlinedFunctionHolder>&
+ inlined_functions() {
+ return inlined_functions_;
}
+ bool has_recursive_calls() const { return has_recursive_calls_; }
+ void set_has_recursive_calls(bool value) { has_recursive_calls_ = value; }
private:
uint32_t tagged_stack_slots_ = kMaxUInt32;
uint32_t untagged_stack_slots_ = kMaxUInt32;
+ uint32_t max_call_stack_args_ = kMaxUInt32;
+ uint32_t max_deopted_stack_size_ = kMaxUInt32;
ZoneVector<BasicBlock*> blocks_;
ZoneMap<RootIndex, RootConstant*> root_;
ZoneMap<int, SmiConstant*> smi_;
ZoneMap<int, Int32Constant*> int_;
- ZoneMap<double, Float64Constant*> float_;
+ // Use the bits of the float as the key.
+ ZoneMap<uint64_t, Float64Constant*> float_;
+ ZoneMap<Address, ExternalConstant*> external_references_;
ZoneVector<InitialValue*> parameters_;
+ RegList register_inputs_;
compiler::ZoneRefMap<compiler::ObjectRef, Constant*> constants_;
- Float64Constant* nan_ = nullptr;
+ ZoneVector<OptimizedCompilationInfo::InlinedFunctionHolder>
+ inlined_functions_;
+ bool has_recursive_calls_ = false;
+ int total_inlined_bytecode_size_ = 0;
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.cc b/deps/v8/src/maglev/maglev-interpreter-frame-state.cc
index 759a46b031..83031a3f21 100644
--- a/deps/v8/src/maglev/maglev-interpreter-frame-state.cc
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.cc
@@ -4,6 +4,7 @@
#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-graph.h"
@@ -11,6 +12,119 @@ namespace v8 {
namespace internal {
namespace maglev {
+void KnownNodeAspects::Merge(const KnownNodeAspects& other, Zone* zone) {
+ DestructivelyIntersect(node_infos, other.node_infos,
+ [](NodeInfo& lhs, const NodeInfo& rhs) {
+ lhs.MergeWith(rhs);
+ return !lhs.is_empty();
+ });
+ DestructivelyIntersect(
+ stable_maps, other.stable_maps,
+ [zone](ZoneHandleSet<Map>& lhs, const ZoneHandleSet<Map>& rhs) {
+ for (Handle<Map> map : rhs) {
+ lhs.insert(map, zone);
+ }
+ // We should always add the value even if the set is empty.
+ return true;
+ });
+ DestructivelyIntersect(
+ unstable_maps, other.unstable_maps,
+ [zone](ZoneHandleSet<Map>& lhs, const ZoneHandleSet<Map>& rhs) {
+ for (Handle<Map> map : rhs) {
+ lhs.insert(map, zone);
+ }
+ // We should always add the value even if the set is empty.
+ return true;
+ });
+ DestructivelyIntersect(loaded_constant_properties,
+ other.loaded_constant_properties);
+ DestructivelyIntersect(loaded_properties, other.loaded_properties);
+ DestructivelyIntersect(loaded_context_constants,
+ other.loaded_context_constants);
+ DestructivelyIntersect(loaded_context_slots, other.loaded_context_slots);
+}
+
+// static
+MergePointInterpreterFrameState* MergePointInterpreterFrameState::New(
+ const MaglevCompilationUnit& info, const InterpreterFrameState& state,
+ int merge_offset, int predecessor_count, BasicBlock* predecessor,
+ const compiler::BytecodeLivenessState* liveness) {
+ MergePointInterpreterFrameState* merge_state =
+ info.zone()->New<MergePointInterpreterFrameState>(
+ info, merge_offset, predecessor_count, 1,
+ info.zone()->NewArray<BasicBlock*>(predecessor_count),
+ BasicBlockType::kDefault, liveness);
+ int i = 0;
+ merge_state->frame_state_.ForEachValue(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = state.get(reg);
+ // Initialise the alternatives list and cache the alternative
+ // representations of the node.
+ Alternatives::List* per_predecessor_alternatives =
+ new (&merge_state->per_predecessor_alternatives_[i])
+ Alternatives::List();
+ per_predecessor_alternatives->Add(info.zone()->New<Alternatives>(
+ state.known_node_aspects()->TryGetInfoFor(entry)));
+ i++;
+ });
+ merge_state->predecessors_[0] = predecessor;
+ merge_state->known_node_aspects_ =
+ state.known_node_aspects()->Clone(info.zone());
+ return merge_state;
+}
+
+// static
+MergePointInterpreterFrameState* MergePointInterpreterFrameState::NewForLoop(
+ const InterpreterFrameState& start_state, const MaglevCompilationUnit& info,
+ int merge_offset, int predecessor_count,
+ const compiler::BytecodeLivenessState* liveness,
+ const compiler::LoopInfo* loop_info) {
+ MergePointInterpreterFrameState* state =
+ info.zone()->New<MergePointInterpreterFrameState>(
+ info, merge_offset, predecessor_count, 0,
+ info.zone()->NewArray<BasicBlock*>(predecessor_count),
+ BasicBlockType::kLoopHeader, liveness);
+ if (loop_info->resumable()) {
+ state->known_node_aspects_ =
+ info.zone()->New<KnownNodeAspects>(info.zone());
+ state->is_resumable_loop_ = true;
+ }
+ auto& assignments = loop_info->assignments();
+ auto& frame_state = state->frame_state_;
+ int i = 0;
+ frame_state.ForEachParameter(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = nullptr;
+ if (assignments.ContainsParameter(reg.ToParameterIndex())) {
+ entry = state->NewLoopPhi(info.zone(), reg);
+ } else if (state->is_resumable_loop()) {
+ // Copy initial values out of the start state.
+ entry = start_state.get(reg);
+ // Initialise the alternatives list for this value.
+ new (&state->per_predecessor_alternatives_[i]) Alternatives::List();
+ DCHECK(entry->Is<InitialValue>());
+ }
+ ++i;
+ });
+ frame_state.context(info) = nullptr;
+ if (state->is_resumable_loop_) {
+ // While contexts are always the same at specific locations, resumable loops
+ // do have different nodes to set the context across resume points. Create a
+ // phi for them.
+ frame_state.context(info) = state->NewLoopPhi(
+ info.zone(), interpreter::Register::current_context());
+ }
+ frame_state.ForEachLocal(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = nullptr;
+ if (assignments.ContainsLocal(reg.index())) {
+ entry = state->NewLoopPhi(info.zone(), reg);
+ }
+ });
+ DCHECK(!frame_state.liveness()->AccumulatorIsLive());
+ return state;
+}
+
// static
MergePointInterpreterFrameState*
MergePointInterpreterFrameState::NewForCatchBlock(
@@ -20,8 +134,8 @@ MergePointInterpreterFrameState::NewForCatchBlock(
Zone* const zone = unit.zone();
MergePointInterpreterFrameState* state =
zone->New<MergePointInterpreterFrameState>(
- unit, 0, 0, nullptr, BasicBlockType::kExceptionHandlerStart,
- liveness);
+ unit, handler_offset, 0, 0, nullptr,
+ BasicBlockType::kExceptionHandlerStart, liveness);
auto& frame_state = state->frame_state_;
// If the accumulator is live, the ExceptionPhi associated to it is the
// first one in the block. That ensures it gets kReturnValue0 in the
@@ -29,22 +143,373 @@ MergePointInterpreterFrameState::NewForCatchBlock(
// StraightForwardRegisterAllocator::AllocateRegisters.
if (frame_state.liveness()->AccumulatorIsLive()) {
frame_state.accumulator(unit) = state->NewExceptionPhi(
- zone, interpreter::Register::virtual_accumulator(), handler_offset);
+ zone, interpreter::Register::virtual_accumulator());
}
frame_state.ForEachParameter(
unit, [&](ValueNode*& entry, interpreter::Register reg) {
- entry = state->NewExceptionPhi(zone, reg, handler_offset);
- });
- frame_state.context(unit) =
- state->NewExceptionPhi(zone, context_register, handler_offset);
- frame_state.ForEachLocal(
- unit, [&](ValueNode*& entry, interpreter::Register reg) {
- entry = state->NewExceptionPhi(zone, reg, handler_offset);
+ entry = state->NewExceptionPhi(zone, reg);
});
+ frame_state.context(unit) = state->NewExceptionPhi(zone, context_register);
+ frame_state.ForEachLocal(unit,
+ [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = state->NewExceptionPhi(zone, reg);
+ });
state->known_node_aspects_ = zone->New<KnownNodeAspects>(zone);
return state;
}
+MergePointInterpreterFrameState::MergePointInterpreterFrameState(
+ const MaglevCompilationUnit& info, int merge_offset, int predecessor_count,
+ int predecessors_so_far, BasicBlock** predecessors, BasicBlockType type,
+ const compiler::BytecodeLivenessState* liveness)
+ : merge_offset_(merge_offset),
+ predecessor_count_(predecessor_count),
+ predecessors_so_far_(predecessors_so_far),
+ predecessors_(predecessors),
+ basic_block_type_(type),
+ frame_state_(info, liveness),
+ per_predecessor_alternatives_(
+ info.zone()->NewArray<Alternatives::List>(frame_state_.size(info))) {}
+
+void MergePointInterpreterFrameState::Merge(
+ MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants, InterpreterFrameState& unmerged,
+ BasicBlock* predecessor) {
+ DCHECK_GT(predecessor_count_, 1);
+ DCHECK_LT(predecessors_so_far_, predecessor_count_);
+ predecessors_[predecessors_so_far_] = predecessor;
+
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << "Merging..." << std::endl;
+ }
+ int i = 0;
+ frame_state_.ForEachValue(compilation_unit, [&](ValueNode*& value,
+ interpreter::Register reg) {
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << " " << reg.ToString() << ": "
+ << PrintNodeLabel(compilation_unit.graph_labeller(), value)
+ << " <- "
+ << PrintNodeLabel(compilation_unit.graph_labeller(),
+ unmerged.get(reg));
+ }
+ value = MergeValue(compilation_unit, smi_constants, reg,
+ *unmerged.known_node_aspects(), value, unmerged.get(reg),
+ per_predecessor_alternatives_[i]);
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << " => "
+ << PrintNodeLabel(compilation_unit.graph_labeller(), value)
+ << ": " << PrintNode(compilation_unit.graph_labeller(), value)
+ << std::endl;
+ }
+ ++i;
+ });
+
+ if (known_node_aspects_ == nullptr) {
+ DCHECK(is_unmerged_loop());
+ DCHECK_EQ(predecessors_so_far_, 0);
+ known_node_aspects_ = unmerged.known_node_aspects()->CloneForLoopHeader(
+ compilation_unit.zone());
+ } else {
+ known_node_aspects_->Merge(*unmerged.known_node_aspects(),
+ compilation_unit.zone());
+ }
+
+ predecessors_so_far_++;
+ DCHECK_LE(predecessors_so_far_, predecessor_count_);
+}
+
+void MergePointInterpreterFrameState::MergeLoop(
+ MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants,
+ InterpreterFrameState& loop_end_state, BasicBlock* loop_end_block) {
+ // This should be the last predecessor we try to merge.
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_ - 1);
+ DCHECK(is_unmerged_loop());
+ predecessors_[predecessor_count_ - 1] = loop_end_block;
+
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << "Merging loop backedge..." << std::endl;
+ }
+ frame_state_.ForEachValue(compilation_unit, [&](ValueNode* value,
+ interpreter::Register reg) {
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << " " << reg.ToString() << ": "
+ << PrintNodeLabel(compilation_unit.graph_labeller(), value)
+ << " <- "
+ << PrintNodeLabel(compilation_unit.graph_labeller(),
+ loop_end_state.get(reg));
+ }
+ MergeLoopValue(compilation_unit, smi_constants, reg,
+ *loop_end_state.known_node_aspects(), value,
+ loop_end_state.get(reg));
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << " => "
+ << PrintNodeLabel(compilation_unit.graph_labeller(), value)
+ << ": " << PrintNode(compilation_unit.graph_labeller(), value)
+ << std::endl;
+ }
+ });
+ predecessors_so_far_++;
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+}
+
+namespace {
+
+// TODO(victorgomes): Consider refactor this function to share code with
+// MaglevGraphBuilder::GetSmiConstant.
+SmiConstant* GetSmiConstant(MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants,
+ int constant) {
+ DCHECK(Smi::IsValid(constant));
+ auto it = smi_constants.find(constant);
+ if (it == smi_constants.end()) {
+ SmiConstant* node = Node::New<SmiConstant>(compilation_unit.zone(), 0,
+ Smi::FromInt(constant));
+ compilation_unit.RegisterNodeInGraphLabeller(node);
+ smi_constants.emplace(constant, node);
+ return node;
+ }
+ return it->second;
+}
+
+ValueNode* FromInt32ToTagged(MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants,
+ NodeType node_type, ValueNode* value,
+ BasicBlock* predecessor) {
+ DCHECK_EQ(value->properties().value_representation(),
+ ValueRepresentation::kInt32);
+ DCHECK(!value->properties().is_conversion());
+
+ ValueNode* tagged;
+ if (value->Is<Int32Constant>()) {
+ int32_t constant = value->Cast<Int32Constant>()->value();
+ return GetSmiConstant(compilation_unit, smi_constants, constant);
+ } else if (value->Is<StringLength>() ||
+ value->Is<BuiltinStringPrototypeCharCodeOrCodePointAt>()) {
+ static_assert(String::kMaxLength <= kSmiMaxValue,
+ "String length must fit into a Smi");
+ tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
+ } else if (NodeTypeIsSmi(node_type)) {
+ // For known Smis, we can tag without a check.
+ tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
+ } else {
+ tagged = Node::New<Int32ToNumber>(compilation_unit.zone(), {value});
+ }
+
+ predecessor->nodes().Add(tagged);
+ compilation_unit.RegisterNodeInGraphLabeller(tagged);
+ return tagged;
+}
+
+ValueNode* FromUint32ToTagged(MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants,
+ NodeType node_type, ValueNode* value,
+ BasicBlock* predecessor) {
+ DCHECK_EQ(value->properties().value_representation(),
+ ValueRepresentation::kUint32);
+ DCHECK(!value->properties().is_conversion());
+
+ ValueNode* tagged;
+ if (NodeTypeIsSmi(node_type)) {
+ tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
+ } else {
+ tagged = Node::New<Uint32ToNumber>(compilation_unit.zone(), {value});
+ }
+
+ predecessor->nodes().Add(tagged);
+ compilation_unit.RegisterNodeInGraphLabeller(tagged);
+ return tagged;
+}
+
+ValueNode* FromFloat64ToTagged(MaglevCompilationUnit& compilation_unit,
+ NodeType node_type, ValueNode* value,
+ BasicBlock* predecessor) {
+ DCHECK_EQ(value->properties().value_representation(),
+ ValueRepresentation::kFloat64);
+ DCHECK(!value->properties().is_conversion());
+
+ // Create a tagged version, and insert it at the end of the predecessor.
+ ValueNode* tagged =
+ Node::New<Float64ToTagged>(compilation_unit.zone(), {value});
+
+ predecessor->nodes().Add(tagged);
+ compilation_unit.RegisterNodeInGraphLabeller(tagged);
+ return tagged;
+}
+
+ValueNode* NonTaggedToTagged(MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants,
+ NodeType node_type, ValueNode* value,
+ BasicBlock* predecessor) {
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kWord64:
+ case ValueRepresentation::kTagged:
+ UNREACHABLE();
+ case ValueRepresentation::kInt32:
+ return FromInt32ToTagged(compilation_unit, smi_constants, node_type,
+ value, predecessor);
+ case ValueRepresentation::kUint32:
+ return FromUint32ToTagged(compilation_unit, smi_constants, node_type,
+ value, predecessor);
+ case ValueRepresentation::kFloat64:
+ return FromFloat64ToTagged(compilation_unit, node_type, value,
+ predecessor);
+ }
+}
+ValueNode* EnsureTagged(MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants,
+ const KnownNodeAspects& known_node_aspects,
+ ValueNode* value, BasicBlock* predecessor) {
+ if (value->properties().value_representation() ==
+ ValueRepresentation::kTagged) {
+ return value;
+ }
+
+ auto info_it = known_node_aspects.FindInfo(value);
+ const NodeInfo* info =
+ known_node_aspects.IsValid(info_it) ? &info_it->second : nullptr;
+ if (info && info->tagged_alternative) {
+ return info->tagged_alternative;
+ }
+ return NonTaggedToTagged(compilation_unit, smi_constants,
+ info ? info->type : NodeType::kUnknown, value,
+ predecessor);
+}
+
+} // namespace
+
+ValueNode* MergePointInterpreterFrameState::MergeValue(
+ MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants, interpreter::Register owner,
+ const KnownNodeAspects& unmerged_aspects, ValueNode* merged,
+ ValueNode* unmerged, Alternatives::List& per_predecessor_alternatives) {
+ // If the merged node is null, this is a pre-created loop header merge
+ // frame will null values for anything that isn't a loop Phi.
+ if (merged == nullptr) {
+ DCHECK(is_unmerged_loop());
+ DCHECK_EQ(predecessors_so_far_, 0);
+ // Initialise the alternatives list and cache the alternative
+ // representations of the node.
+ new (&per_predecessor_alternatives) Alternatives::List();
+ per_predecessor_alternatives.Add(compilation_unit.zone()->New<Alternatives>(
+ unmerged_aspects.TryGetInfoFor(unmerged)));
+ return unmerged;
+ }
+
+ Phi* result = merged->TryCast<Phi>();
+ if (result != nullptr && result->merge_state() == this) {
+ // It's possible that merged == unmerged at this point since loop-phis are
+ // not dropped if they are only assigned to themselves in the loop.
+ DCHECK_EQ(result->owner(), owner);
+ unmerged = EnsureTagged(compilation_unit, smi_constants, unmerged_aspects,
+ unmerged, predecessors_[predecessors_so_far_]);
+ result->set_input(predecessors_so_far_, unmerged);
+ return result;
+ }
+
+ if (merged == unmerged) {
+ // Cache the alternative representations of the unmerged node.
+ DCHECK_EQ(per_predecessor_alternatives.LengthForTest(),
+ predecessors_so_far_);
+ per_predecessor_alternatives.Add(compilation_unit.zone()->New<Alternatives>(
+ unmerged_aspects.TryGetInfoFor(unmerged)));
+ return merged;
+ }
+
+ // Up to this point all predecessors had the same value for this interpreter
+ // frame slot. Now that we find a distinct value, insert a copy of the first
+ // value for each predecessor seen so far, in addition to the new value.
+ // TODO(verwaest): Unclear whether we want this for Maglev: Instead of
+ // letting the register allocator remove phis, we could always merge through
+ // the frame slot. In that case we only need the inputs for representation
+ // selection, and hence could remove duplicate inputs. We'd likely need to
+ // attach the interpreter register to the phi in that case?
+ result =
+ Node::New<Phi>(compilation_unit.zone(), predecessor_count_, this, owner);
+ if (v8_flags.trace_maglev_graph_building) {
+ for (int i = 0; i < predecessor_count_; i++) {
+ result->set_input(i, nullptr);
+ }
+ }
+
+ if (merged->properties().value_representation() ==
+ ValueRepresentation::kTagged) {
+ for (int i = 0; i < predecessors_so_far_; i++) {
+ result->set_input(i, merged);
+ }
+ } else {
+ // If the existing merged value is untagged, we look through the
+ // per-predecessor alternative representations, and try to find tagged
+ // representations there.
+ int i = 0;
+ for (const Alternatives* alt : per_predecessor_alternatives) {
+ // TODO(victorgomes): Support Phi nodes of untagged values.
+ ValueNode* tagged = alt->tagged_alternative();
+ if (tagged == nullptr) {
+ tagged = NonTaggedToTagged(compilation_unit, smi_constants,
+ alt->node_type(), merged, predecessors_[i]);
+ }
+ result->set_input(i, tagged);
+ i++;
+ }
+ DCHECK_EQ(i, predecessors_so_far_);
+ }
+
+ unmerged = EnsureTagged(compilation_unit, smi_constants, unmerged_aspects,
+ unmerged, predecessors_[predecessors_so_far_]);
+ result->set_input(predecessors_so_far_, unmerged);
+
+ phis_.Add(result);
+ return result;
+}
+void MergePointInterpreterFrameState::MergeLoopValue(
+ MaglevCompilationUnit& compilation_unit,
+ ZoneMap<int, SmiConstant*>& smi_constants, interpreter::Register owner,
+ KnownNodeAspects& unmerged_aspects, ValueNode* merged,
+ ValueNode* unmerged) {
+ Phi* result = merged->TryCast<Phi>();
+ if (result == nullptr || result->merge_state() != this) {
+ // Not a loop phi, we don't have to do anything.
+ return;
+ }
+ DCHECK_EQ(result->owner(), owner);
+ unmerged = EnsureTagged(compilation_unit, smi_constants, unmerged_aspects,
+ unmerged, predecessors_[predecessors_so_far_]);
+ result->set_input(predecessor_count_ - 1, unmerged);
+}
+
+ValueNode* MergePointInterpreterFrameState::NewLoopPhi(
+ Zone* zone, interpreter::Register reg) {
+ DCHECK_EQ(predecessors_so_far_, 0);
+ // Create a new loop phi, which for now is empty.
+ Phi* result = Node::New<Phi>(zone, predecessor_count_, this, reg);
+ if (v8_flags.trace_maglev_graph_building) {
+ for (int i = 0; i < predecessor_count_; i++) {
+ result->set_input(i, nullptr);
+ }
+ }
+ phis_.Add(result);
+ return result;
+}
+
+void MergePointInterpreterFrameState::ReducePhiPredecessorCount(
+ interpreter::Register owner, ValueNode* merged) {
+ // If the merged node is null, this is a pre-created loop header merge
+ // frame with null values for anything that isn't a loop Phi.
+ if (merged == nullptr) {
+ DCHECK(is_unmerged_loop());
+ DCHECK_EQ(predecessors_so_far_, 0);
+ return;
+ }
+
+ Phi* result = merged->TryCast<Phi>();
+ if (result != nullptr && result->merge_state() == this) {
+ // It's possible that merged == unmerged at this point since loop-phis are
+ // not dropped if they are only assigned to themselves in the loop.
+ DCHECK_EQ(result->owner(), owner);
+ result->reduce_input_count();
+ }
+}
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.h b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
index 50bb9f5f42..3a173b55d1 100644
--- a/deps/v8/src/maglev/maglev-interpreter-frame-state.h
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
@@ -68,19 +68,21 @@ void DestructivelyIntersect(ZoneMap<Key, Value>& lhs_map,
// (possibly "kUnknown").
// All heap object types include the heap object bit, so that they can be
// checked for AnyHeapObject with a single bit check.
-// TODO(leszeks): Figure out how to represent Number/Numeric with this encoding.
-#define NODE_TYPE_LIST(V) \
- V(Unknown, 0) \
- V(Number, (1 << 0)) \
- V(Smi, (1 << 1) | kNumber) \
- V(AnyHeapObject, (1 << 2)) \
- V(Name, (1 << 3) | kAnyHeapObject) \
- V(String, (1 << 4) | kName) \
- V(InternalizedString, (1 << 5) | kString) \
- V(Symbol, (1 << 6) | kName) \
- V(JSReceiver, (1 << 7) | kAnyHeapObject) \
- V(HeapObjectWithKnownMap, (1 << 8) | kAnyHeapObject) \
- V(HeapNumber, kHeapObjectWithKnownMap | kNumber) \
+#define NODE_TYPE_LIST(V) \
+ V(Unknown, 0) \
+ V(NumberOrOddball, (1 << 1)) \
+ V(Number, (1 << 2) | kNumberOrOddball) \
+ V(Oddball, (1 << 3) | kNumberOrOddball) \
+ V(ObjectWithKnownMap, (1 << 4)) \
+ V(Smi, (1 << 5) | kObjectWithKnownMap | kNumber) \
+ V(AnyHeapObject, (1 << 6)) \
+ V(Name, (1 << 7) | kAnyHeapObject) \
+ V(String, (1 << 8) | kName) \
+ V(InternalizedString, (1 << 9) | kString) \
+ V(Symbol, (1 << 10) | kName) \
+ V(JSReceiver, (1 << 11) | kAnyHeapObject) \
+ V(HeapObjectWithKnownMap, kObjectWithKnownMap | kAnyHeapObject) \
+ V(HeapNumber, kHeapObjectWithKnownMap | kNumber) \
V(JSReceiverWithKnownMap, kJSReceiver | kHeapObjectWithKnownMap)
enum class NodeType {
@@ -93,6 +95,10 @@ inline NodeType CombineType(NodeType left, NodeType right) {
return static_cast<NodeType>(static_cast<int>(left) |
static_cast<int>(right));
}
+inline NodeType IntersectType(NodeType left, NodeType right) {
+ return static_cast<NodeType>(static_cast<int>(left) &
+ static_cast<int>(right));
+}
inline bool NodeTypeIs(NodeType type, NodeType to_check) {
int right = static_cast<int>(to_check);
return (static_cast<int>(type) & right) == right;
@@ -115,6 +121,7 @@ struct NodeInfo {
ValueNode* tagged_alternative = nullptr;
ValueNode* int32_alternative = nullptr;
ValueNode* float64_alternative = nullptr;
+ ValueNode* truncated_int32_alternative = nullptr;
bool is_empty() {
return type == NodeType::kUnknown && tagged_alternative == nullptr &&
@@ -132,8 +139,7 @@ struct NodeInfo {
// Mutate this node info by merging in another node info, with the result
// being a node info that is the subset of information valid in both inputs.
void MergeWith(const NodeInfo& other) {
- type = static_cast<NodeType>(static_cast<int>(type) &
- static_cast<int>(other.type));
+ type = IntersectType(type, other.type);
tagged_alternative = tagged_alternative == other.tagged_alternative
? tagged_alternative
: nullptr;
@@ -143,6 +149,10 @@ struct NodeInfo {
float64_alternative = float64_alternative == other.float64_alternative
? float64_alternative
: nullptr;
+ truncated_int32_alternative =
+ truncated_int32_alternative == other.truncated_int32_alternative
+ ? truncated_int32_alternative
+ : nullptr;
}
};
@@ -182,43 +192,25 @@ struct KnownNodeAspects {
ZoneMap<ValueNode*, NodeInfo>::iterator FindInfo(ValueNode* node) {
return node_infos.find(node);
}
+ ZoneMap<ValueNode*, NodeInfo>::const_iterator FindInfo(
+ ValueNode* node) const {
+ return node_infos.find(node);
+ }
bool IsValid(ZoneMap<ValueNode*, NodeInfo>::iterator& it) {
return it != node_infos.end();
}
+ bool IsValid(ZoneMap<ValueNode*, NodeInfo>::const_iterator& it) const {
+ return it != node_infos.end();
+ }
+ const NodeInfo* TryGetInfoFor(ValueNode* node) const {
+ auto info_it = FindInfo(node);
+ if (!IsValid(info_it)) return nullptr;
+ return &info_it->second;
+ }
NodeInfo* GetOrCreateInfoFor(ValueNode* node) { return &node_infos[node]; }
- void Merge(const KnownNodeAspects& other, Zone* zone) {
- DestructivelyIntersect(node_infos, other.node_infos,
- [](NodeInfo& lhs, const NodeInfo& rhs) {
- lhs.MergeWith(rhs);
- return !lhs.is_empty();
- });
- DestructivelyIntersect(
- stable_maps, other.stable_maps,
- [zone](ZoneHandleSet<Map>& lhs, const ZoneHandleSet<Map>& rhs) {
- for (Handle<Map> map : rhs) {
- lhs.insert(map, zone);
- }
- // We should always add the value even if the set is empty.
- return true;
- });
- DestructivelyIntersect(
- unstable_maps, other.unstable_maps,
- [zone](ZoneHandleSet<Map>& lhs, const ZoneHandleSet<Map>& rhs) {
- for (Handle<Map> map : rhs) {
- lhs.insert(map, zone);
- }
- // We should always add the value even if the set is empty.
- return true;
- });
- DestructivelyIntersect(loaded_constant_properties,
- other.loaded_constant_properties);
- DestructivelyIntersect(loaded_properties, other.loaded_properties);
- DestructivelyIntersect(loaded_context_constants,
- other.loaded_context_constants);
- DestructivelyIntersect(loaded_context_slots, other.loaded_context_slots);
- }
+ void Merge(const KnownNodeAspects& other, Zone* zone);
// TODO(leszeks): Store these more efficiently than with std::map -- in
// particular, clear out entries that are no longer reachable, perhaps also
@@ -253,9 +245,13 @@ struct KnownNodeAspects {
class InterpreterFrameState {
public:
+ InterpreterFrameState(const MaglevCompilationUnit& info,
+ KnownNodeAspects* known_node_aspects)
+ : frame_(info), known_node_aspects_(known_node_aspects) {}
+
explicit InterpreterFrameState(const MaglevCompilationUnit& info)
- : frame_(info),
- known_node_aspects_(info.zone()->New<KnownNodeAspects>(info.zone())) {}
+ : InterpreterFrameState(
+ info, info.zone()->New<KnownNodeAspects>(info.zone())) {}
inline void CopyFrom(const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
@@ -290,9 +286,14 @@ class InterpreterFrameState {
const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
- KnownNodeAspects& known_node_aspects() { return *known_node_aspects_; }
- const KnownNodeAspects& known_node_aspects() const {
- return *known_node_aspects_;
+ KnownNodeAspects* known_node_aspects() { return known_node_aspects_; }
+ const KnownNodeAspects* known_node_aspects() const {
+ return known_node_aspects_;
+ }
+
+ void set_known_node_aspects(KnownNodeAspects* known_node_aspects) {
+ DCHECK_NOT_NULL(known_node_aspects);
+ known_node_aspects_ = known_node_aspects;
}
private:
@@ -398,7 +399,7 @@ class CompactInterpreterFrameState {
DCHECK(liveness_->AccumulatorIsLive());
return live_registers_and_accumulator_[size(info) - 1];
}
- ValueNode* accumulator(const MaglevCompilationUnit& info) const {
+ ValueNode*& accumulator(const MaglevCompilationUnit& info) const {
DCHECK(liveness_->AccumulatorIsLive());
return live_registers_and_accumulator_[size(info) - 1];
}
@@ -406,7 +407,7 @@ class CompactInterpreterFrameState {
ValueNode*& context(const MaglevCompilationUnit& info) {
return live_registers_and_accumulator_[info.parameter_count()];
}
- ValueNode* context(const MaglevCompilationUnit& info) const {
+ ValueNode*& context(const MaglevCompilationUnit& info) const {
return live_registers_and_accumulator_[info.parameter_count()];
}
@@ -493,63 +494,13 @@ class MergePointInterpreterFrameState {
static MergePointInterpreterFrameState* New(
const MaglevCompilationUnit& info, const InterpreterFrameState& state,
int merge_offset, int predecessor_count, BasicBlock* predecessor,
- const compiler::BytecodeLivenessState* liveness) {
- MergePointInterpreterFrameState* merge_state =
- info.zone()->New<MergePointInterpreterFrameState>(
- info, predecessor_count, 1,
- info.zone()->NewArray<BasicBlock*>(predecessor_count),
- BasicBlockType::kDefault, liveness);
- merge_state->frame_state_.ForEachValue(
- info, [&](ValueNode*& entry, interpreter::Register reg) {
- entry = state.get(reg);
- });
- merge_state->predecessors_[0] = predecessor;
- merge_state->known_node_aspects_ =
- state.known_node_aspects().Clone(info.zone());
- return merge_state;
- }
+ const compiler::BytecodeLivenessState* liveness);
static MergePointInterpreterFrameState* NewForLoop(
const InterpreterFrameState& start_state,
const MaglevCompilationUnit& info, int merge_offset,
int predecessor_count, const compiler::BytecodeLivenessState* liveness,
- const compiler::LoopInfo* loop_info) {
- MergePointInterpreterFrameState* state =
- info.zone()->New<MergePointInterpreterFrameState>(
- info, predecessor_count, 0,
- info.zone()->NewArray<BasicBlock*>(predecessor_count),
- BasicBlockType::kLoopHeader, liveness);
- if (loop_info->resumable()) {
- state->known_node_aspects_ =
- info.zone()->New<KnownNodeAspects>(info.zone());
- state->is_resumable_loop_ = true;
- }
- auto& assignments = loop_info->assignments();
- auto& frame_state = state->frame_state_;
- frame_state.ForEachParameter(
- info, [&](ValueNode*& entry, interpreter::Register reg) {
- entry = nullptr;
- if (assignments.ContainsParameter(reg.ToParameterIndex())) {
- entry = state->NewLoopPhi(info.zone(), reg, merge_offset);
- } else if (state->is_resumable_loop()) {
- // Copy initial values out of the start state.
- entry = start_state.get(reg);
- DCHECK(entry->Is<InitialValue>());
- }
- });
- // TODO(v8:7700): Add contexts into assignment analysis.
- frame_state.context(info) = state->NewLoopPhi(
- info.zone(), interpreter::Register::current_context(), merge_offset);
- frame_state.ForEachLocal(
- info, [&](ValueNode*& entry, interpreter::Register reg) {
- entry = nullptr;
- if (assignments.ContainsLocal(reg.index())) {
- entry = state->NewLoopPhi(info.zone(), reg, merge_offset);
- }
- });
- DCHECK(!frame_state.liveness()->AccumulatorIsLive());
- return state;
- }
+ const compiler::LoopInfo* loop_info);
static MergePointInterpreterFrameState* NewForCatchBlock(
const MaglevCompilationUnit& unit,
@@ -560,109 +511,36 @@ class MergePointInterpreterFrameState {
// framestate.
void Merge(MaglevCompilationUnit& compilation_unit,
ZoneMap<int, SmiConstant*>& smi_constants,
- InterpreterFrameState& unmerged, BasicBlock* predecessor,
- int merge_offset) {
- DCHECK_GT(predecessor_count_, 1);
- DCHECK_LT(predecessors_so_far_, predecessor_count_);
- predecessors_[predecessors_so_far_] = predecessor;
-
- if (v8_flags.trace_maglev_graph_building) {
- std::cout << "Merging..." << std::endl;
- }
- frame_state_.ForEachValue(compilation_unit, [&](ValueNode*& value,
- interpreter::Register reg) {
- if (v8_flags.trace_maglev_graph_building) {
- std::cout << " " << reg.ToString() << ": "
- << PrintNodeLabel(compilation_unit.graph_labeller(), value)
- << " <- "
- << PrintNodeLabel(compilation_unit.graph_labeller(),
- unmerged.get(reg));
- }
- value = MergeValue(compilation_unit, smi_constants, reg,
- unmerged.known_node_aspects(), value,
- unmerged.get(reg), merge_offset);
- if (v8_flags.trace_maglev_graph_building) {
- std::cout << " => "
- << PrintNodeLabel(compilation_unit.graph_labeller(), value)
- << ": " << PrintNode(compilation_unit.graph_labeller(), value)
- << std::endl;
- }
- });
-
- if (known_node_aspects_ == nullptr) {
- DCHECK(is_unmerged_loop());
- DCHECK_EQ(predecessors_so_far_, 0);
- known_node_aspects_ = unmerged.known_node_aspects().CloneForLoopHeader(
- compilation_unit.zone());
- } else {
- known_node_aspects_->Merge(unmerged.known_node_aspects(),
- compilation_unit.zone());
- }
-
- predecessors_so_far_++;
- DCHECK_LE(predecessors_so_far_, predecessor_count_);
- }
+ InterpreterFrameState& unmerged, BasicBlock* predecessor);
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void MergeLoop(MaglevCompilationUnit& compilation_unit,
ZoneMap<int, SmiConstant*>& smi_constants,
InterpreterFrameState& loop_end_state,
- BasicBlock* loop_end_block, int merge_offset) {
- // This should be the last predecessor we try to merge.
- DCHECK_EQ(predecessors_so_far_, predecessor_count_ - 1);
- DCHECK(is_unmerged_loop());
- predecessors_[predecessor_count_ - 1] = loop_end_block;
-
- if (v8_flags.trace_maglev_graph_building) {
- std::cout << "Merging loop backedge..." << std::endl;
- }
- frame_state_.ForEachValue(compilation_unit, [&](ValueNode* value,
- interpreter::Register reg) {
- if (v8_flags.trace_maglev_graph_building) {
- std::cout << " " << reg.ToString() << ": "
- << PrintNodeLabel(compilation_unit.graph_labeller(), value)
- << " <- "
- << PrintNodeLabel(compilation_unit.graph_labeller(),
- loop_end_state.get(reg));
- }
- MergeLoopValue(compilation_unit, smi_constants, reg,
- loop_end_state.known_node_aspects(), value,
- loop_end_state.get(reg), merge_offset);
- if (v8_flags.trace_maglev_graph_building) {
- std::cout << " => "
- << PrintNodeLabel(compilation_unit.graph_labeller(), value)
- << ": " << PrintNode(compilation_unit.graph_labeller(), value)
- << std::endl;
- }
- });
- predecessors_so_far_++;
- DCHECK_EQ(predecessors_so_far_, predecessor_count_);
- }
+ BasicBlock* loop_end_block);
// Merges a dead framestate (e.g. one which has been early terminated with a
// deopt).
- void MergeDead(const MaglevCompilationUnit& compilation_unit,
- int merge_offset) {
+ void MergeDead(const MaglevCompilationUnit& compilation_unit) {
DCHECK_GE(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessor_count_--;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
- frame_state_.ForEachValue(
- compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
- ReducePhiPredecessorCount(reg, value, merge_offset);
- });
+ frame_state_.ForEachValue(compilation_unit,
+ [&](ValueNode* value, interpreter::Register reg) {
+ ReducePhiPredecessorCount(reg, value);
+ });
}
// Merges a dead loop framestate (e.g. one where the block containing the
// JumpLoop has been early terminated with a deopt).
- void MergeDeadLoop(const MaglevCompilationUnit& compilation_unit,
- int merge_offset) {
+ void MergeDeadLoop(const MaglevCompilationUnit& compilation_unit) {
// This should be the last predecessor we try to merge.
DCHECK_EQ(predecessors_so_far_, predecessor_count_ - 1);
DCHECK(is_unmerged_loop());
- MergeDead(compilation_unit, merge_offset);
+ MergeDead(compilation_unit);
// This means that this is no longer a loop.
basic_block_type_ = BasicBlockType::kDefault;
}
@@ -713,7 +591,33 @@ class MergePointInterpreterFrameState {
bool is_resumable_loop() const { return is_resumable_loop_; }
+ int merge_offset() const { return merge_offset_; }
+
private:
+ // For each non-Phi value in the frame state, store its alternative
+ // representations to avoid re-converting on Phi creation.
+ class Alternatives {
+ public:
+ using List = base::ThreadedList<Alternatives>;
+
+ explicit Alternatives(const NodeInfo* node_info)
+ : node_type_(node_info ? node_info->type : NodeType::kUnknown),
+ tagged_alternative_(node_info ? node_info->tagged_alternative
+ : nullptr) {}
+
+ NodeType node_type() const { return node_type_; }
+ ValueNode* tagged_alternative() const { return tagged_alternative_; }
+
+ private:
+ Alternatives** next() { return &next_; }
+
+ // For now, Phis are tagged, so only store the tagged alternative.
+ NodeType node_type_;
+ ValueNode* tagged_alternative_;
+ Alternatives* next_ = nullptr;
+ friend base::ThreadedListTraits<Alternatives>;
+ };
+
friend void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
@@ -722,232 +626,39 @@ class MergePointInterpreterFrameState {
friend T* Zone::New(Args&&... args);
MergePointInterpreterFrameState(
- const MaglevCompilationUnit& info, int predecessor_count,
- int predecessors_so_far, BasicBlock** predecessors, BasicBlockType type,
- const compiler::BytecodeLivenessState* liveness)
- : predecessor_count_(predecessor_count),
- predecessors_so_far_(predecessors_so_far),
- predecessors_(predecessors),
- basic_block_type_(type),
- frame_state_(info, liveness) {}
-
- // TODO(victorgomes): Consider refactor this function to share code with
- // MaglevGraphBuilder::GetSmiConstant.
- SmiConstant* GetSmiConstant(MaglevCompilationUnit& compilation_unit,
- ZoneMap<int, SmiConstant*>& smi_constants,
- int constant) {
- DCHECK(Smi::IsValid(constant));
- auto it = smi_constants.find(constant);
- if (it == smi_constants.end()) {
- SmiConstant* node = Node::New<SmiConstant>(compilation_unit.zone(), 0,
- Smi::FromInt(constant));
- compilation_unit.RegisterNodeInGraphLabeller(node);
- smi_constants.emplace(constant, node);
- return node;
- }
- return it->second;
- }
-
- ValueNode* FromInt32ToTagged(MaglevCompilationUnit& compilation_unit,
- ZoneMap<int, SmiConstant*>& smi_constants,
- KnownNodeAspects& known_node_aspects,
- ValueNode* value) {
- DCHECK_EQ(value->properties().value_representation(),
- ValueRepresentation::kInt32);
- DCHECK(!value->properties().is_conversion());
-#define IS_INT32_OP_NODE(Name) || value->Is<Name>()
- DCHECK(value->Is<Int32Constant>() || value->Is<StringLength>() ||
- value->Is<BuiltinStringPrototypeCharCodeAt>()
- INT32_OPERATIONS_NODE_LIST(IS_INT32_OP_NODE));
-#undef IS_INT32_OP_NODE
- NodeInfo* node_info = known_node_aspects.GetOrCreateInfoFor(value);
- if (!node_info->tagged_alternative) {
- // Create a tagged version.
- ValueNode* tagged;
- if (value->Is<Int32Constant>()) {
- int32_t constant = value->Cast<Int32Constant>()->value();
- return GetSmiConstant(compilation_unit, smi_constants, constant);
- } else if (value->Is<StringLength>() ||
- value->Is<BuiltinStringPrototypeCharCodeAt>()) {
- static_assert(String::kMaxLength <= kSmiMaxValue,
- "String length must fit into a Smi");
- tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
- } else {
- tagged = Node::New<CheckedSmiTag, std::initializer_list<ValueNode*>>(
- compilation_unit.zone(),
- DeoptFrame(value->eager_deopt_info()->top_frame()), {value});
- }
-
- Node::List::AddAfter(value, tagged);
- compilation_unit.RegisterNodeInGraphLabeller(tagged);
- node_info->tagged_alternative = tagged;
- }
- return node_info->tagged_alternative;
- }
-
- ValueNode* FromFloat64ToTagged(MaglevCompilationUnit& compilation_unit,
- KnownNodeAspects& known_node_aspects,
- ValueNode* value) {
- DCHECK_EQ(value->properties().value_representation(),
- ValueRepresentation::kFloat64);
- DCHECK(!value->properties().is_conversion());
- // Check if the next Node in the block after value is its Float64Box
- // version and reuse it.
- if (value->NextNode()) {
- Float64Box* tagged = value->NextNode()->TryCast<Float64Box>();
- if (tagged != nullptr && value == tagged->input().node()) {
- return tagged;
- }
- }
- // Otherwise create a tagged version.
- ValueNode* tagged = Node::New<Float64Box>(compilation_unit.zone(), {value});
- Node::List::AddAfter(value, tagged);
- compilation_unit.RegisterNodeInGraphLabeller(tagged);
- return tagged;
- }
-
- // TODO(victorgomes): Consider refactor this function to share code with
- // MaglevGraphBuilder::GetTagged.
- ValueNode* EnsureTagged(MaglevCompilationUnit& compilation_unit,
- ZoneMap<int, SmiConstant*>& smi_constants,
- KnownNodeAspects& known_node_aspects,
- ValueNode* value) {
- switch (value->properties().value_representation()) {
- case ValueRepresentation::kTagged:
- return value;
- case ValueRepresentation::kInt32:
- return FromInt32ToTagged(compilation_unit, smi_constants,
- known_node_aspects, value);
- case ValueRepresentation::kFloat64:
- return FromFloat64ToTagged(compilation_unit, known_node_aspects, value);
- }
- }
+ const MaglevCompilationUnit& info, int merge_offset,
+ int predecessor_count, int predecessors_so_far, BasicBlock** predecessors,
+ BasicBlockType type, const compiler::BytecodeLivenessState* liveness);
ValueNode* MergeValue(MaglevCompilationUnit& compilation_unit,
ZoneMap<int, SmiConstant*>& smi_constants,
interpreter::Register owner,
- KnownNodeAspects& unmerged_aspects, ValueNode* merged,
- ValueNode* unmerged, int merge_offset) {
- // If the merged node is null, this is a pre-created loop header merge
- // frame will null values for anything that isn't a loop Phi.
- if (merged == nullptr) {
- DCHECK(is_unmerged_loop());
- DCHECK_EQ(predecessors_so_far_, 0);
- return unmerged;
- }
-
- Phi* result = merged->TryCast<Phi>();
- if (result != nullptr && result->merge_offset() == merge_offset) {
- // It's possible that merged == unmerged at this point since loop-phis are
- // not dropped if they are only assigned to themselves in the loop.
- DCHECK_EQ(result->owner(), owner);
- unmerged = EnsureTagged(compilation_unit, smi_constants, unmerged_aspects,
- unmerged);
- result->set_input(predecessors_so_far_, unmerged);
- return result;
- }
+ const KnownNodeAspects& unmerged_aspects,
+ ValueNode* merged, ValueNode* unmerged,
+ Alternatives::List& per_predecessor_alternatives);
- if (merged == unmerged) return merged;
-
- // We guarantee that the values are tagged.
- // TODO(victorgomes): Support Phi nodes of untagged values.
- merged = EnsureTagged(compilation_unit, smi_constants, *known_node_aspects_,
- merged);
- unmerged = EnsureTagged(compilation_unit, smi_constants, unmerged_aspects,
- unmerged);
-
- // Tagged versions could point to the same value, avoid Phi nodes in this
- // case.
- if (merged == unmerged) return merged;
-
- // Up to this point all predecessors had the same value for this interpreter
- // frame slot. Now that we find a distinct value, insert a copy of the first
- // value for each predecessor seen so far, in addition to the new value.
- // TODO(verwaest): Unclear whether we want this for Maglev: Instead of
- // letting the register allocator remove phis, we could always merge through
- // the frame slot. In that case we only need the inputs for representation
- // selection, and hence could remove duplicate inputs. We'd likely need to
- // attach the interpreter register to the phi in that case?
- result = Node::New<Phi>(compilation_unit.zone(), predecessor_count_, owner,
- merge_offset);
-
- for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
- result->set_input(predecessors_so_far_, unmerged);
- if (v8_flags.trace_maglev_graph_building) {
- for (int i = predecessors_so_far_ + 1; i < predecessor_count_; i++) {
- result->set_input(i, nullptr);
- }
- }
-
- phis_.Add(result);
- return result;
- }
-
- void ReducePhiPredecessorCount(interpreter::Register owner, ValueNode* merged,
- int merge_offset) {
- // If the merged node is null, this is a pre-created loop header merge
- // frame with null values for anything that isn't a loop Phi.
- if (merged == nullptr) {
- DCHECK(is_unmerged_loop());
- DCHECK_EQ(predecessors_so_far_, 0);
- return;
- }
-
- Phi* result = merged->TryCast<Phi>();
- if (result != nullptr && result->merge_offset() == merge_offset) {
- // It's possible that merged == unmerged at this point since loop-phis are
- // not dropped if they are only assigned to themselves in the loop.
- DCHECK_EQ(result->owner(), owner);
- result->reduce_input_count();
- }
- }
+ void ReducePhiPredecessorCount(interpreter::Register owner,
+ ValueNode* merged);
void MergeLoopValue(MaglevCompilationUnit& compilation_unit,
ZoneMap<int, SmiConstant*>& smi_constants,
interpreter::Register owner,
KnownNodeAspects& unmerged_aspects, ValueNode* merged,
- ValueNode* unmerged, int merge_offset) {
- Phi* result = merged->TryCast<Phi>();
- if (result == nullptr || result->merge_offset() != merge_offset) {
- if (merged != unmerged) {
- // TODO(leszeks): These DCHECKs are too restrictive, investigate making
- // them looser.
- // DCHECK(unmerged->Is<CheckedSmiUntag>() ||
- // unmerged->Is<CheckedFloat64Unbox>());
- // DCHECK_EQ(merged, unmerged->input(0).node());
- }
- return;
- }
- DCHECK_EQ(result->owner(), owner);
- unmerged = EnsureTagged(compilation_unit, smi_constants, unmerged_aspects,
- unmerged);
- result->set_input(predecessor_count_ - 1, unmerged);
- }
+ ValueNode* unmerged);
- ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg,
- int merge_offset) {
- DCHECK_EQ(predecessors_so_far_, 0);
- // Create a new loop phi, which for now is empty.
- Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
- if (v8_flags.trace_maglev_graph_building) {
- for (int i = 0; i < predecessor_count_; i++) {
- result->set_input(i, nullptr);
- }
- }
- phis_.Add(result);
- return result;
- }
+ ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg);
- ValueNode* NewExceptionPhi(Zone* zone, interpreter::Register reg,
- int handler_offset) {
+ ValueNode* NewExceptionPhi(Zone* zone, interpreter::Register reg) {
DCHECK_EQ(predecessors_so_far_, 0);
DCHECK_EQ(predecessor_count_, 0);
DCHECK_NULL(predecessors_);
- Phi* result = Node::New<Phi>(zone, 0, reg, handler_offset);
+ Phi* result = Node::New<Phi>(zone, 0, this, reg);
phis_.Add(result);
return result;
}
+ int merge_offset_;
+
int predecessor_count_;
int predecessors_so_far_;
bool is_resumable_loop_ = false;
@@ -959,6 +670,7 @@ class MergePointInterpreterFrameState {
CompactInterpreterFrameState frame_state_;
MergePointRegisterState register_state_;
KnownNodeAspects* known_node_aspects_ = nullptr;
+ Alternatives::List* per_predecessor_alternatives_;
};
void InterpreterFrameState::CopyFrom(
diff --git a/deps/v8/src/maglev/maglev-ir-inl.h b/deps/v8/src/maglev/maglev-ir-inl.h
index db58acaa25..03ce8e3a1e 100644
--- a/deps/v8/src/maglev/maglev-ir-inl.h
+++ b/deps/v8/src/maglev/maglev-ir-inl.h
@@ -25,16 +25,22 @@ void DeepForEachInputImpl(const DeoptFrame& frame,
case DeoptFrame::FrameType::kInterpretedFrame:
frame.as_interpreted().frame_state()->ForEachValue(
frame.as_interpreted().unit(),
- [&](ValueNode* node, interpreter::Register reg) {
+ [&](ValueNode*& node, interpreter::Register reg) {
f(node, &input_locations[index++]);
});
break;
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame: {
+ for (ValueNode*& node : frame.as_inlined_arguments().arguments()) {
+ f(node, &input_locations[index++]);
+ }
+ break;
+ }
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
- for (ValueNode* node : frame.as_builtin_continuation().parameters()) {
+ for (ValueNode*& node : frame.as_builtin_continuation().parameters()) {
f(node, &input_locations[index++]);
}
f(frame.as_builtin_continuation().context(), &input_locations[index++]);
- UNREACHABLE();
+ break;
}
}
@@ -59,24 +65,110 @@ void DeepForEachInput(const LazyDeoptInfo* deopt_info, Function&& f) {
case DeoptFrame::FrameType::kInterpretedFrame:
top_frame.as_interpreted().frame_state()->ForEachValue(
top_frame.as_interpreted().unit(),
- [&](ValueNode* node, interpreter::Register reg) {
+ [&](ValueNode*& node, interpreter::Register reg) {
// Skip over the result location since it is irrelevant for lazy
// deopts (unoptimized code will recreate the result).
if (deopt_info->IsResultRegister(reg)) return;
f(node, &input_locations[index++]);
});
break;
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame:
+ // The inlined arguments frame can never be the top frame.
+ UNREACHABLE();
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
- for (ValueNode* node : top_frame.as_builtin_continuation().parameters()) {
+ for (ValueNode*& node :
+ top_frame.as_builtin_continuation().parameters()) {
f(node, &input_locations[index++]);
- };
+ }
f(top_frame.as_builtin_continuation().context(),
&input_locations[index++]);
+ break;
}
}
} // namespace detail
+inline void AddDeoptRegistersToSnapshot(RegisterSnapshot* snapshot,
+ const EagerDeoptInfo* deopt_info) {
+ detail::DeepForEachInput(deopt_info, [&](ValueNode* node,
+ InputLocation* input) {
+ if (!input->IsAnyRegister()) return;
+ if (input->IsDoubleRegister()) {
+ snapshot->live_double_registers.set(input->AssignedDoubleRegister());
+ } else {
+ snapshot->live_registers.set(input->AssignedGeneralRegister());
+ if (node->is_tagged()) {
+ snapshot->live_tagged_registers.set(input->AssignedGeneralRegister());
+ }
+ }
+ });
+}
+
+#ifdef DEBUG
+inline RegList GetGeneralRegistersUsedAsInputs(
+ const EagerDeoptInfo* deopt_info) {
+ RegList regs;
+ detail::DeepForEachInput(deopt_info,
+ [&regs](ValueNode* value, InputLocation* input) {
+ if (input->IsGeneralRegister()) {
+ regs.set(input->AssignedGeneralRegister());
+ }
+ });
+ return regs;
+}
+#endif // DEBUG
+
+// Helper macro for checking that a reglist is empty which prints the contents
+// when non-empty.
+#define DCHECK_REGLIST_EMPTY(...) DCHECK_EQ((__VA_ARGS__), RegList{})
+
+// ---
+// Value location constraint setting helpers.
+// ---
+
+static constexpr int kNoVreg = -1;
+
+inline void DefineAsRegister(Node* node) {
+ node->result().SetUnallocated(
+ compiler::UnallocatedOperand::MUST_HAVE_REGISTER, kNoVreg);
+}
+inline void DefineAsConstant(Node* node) {
+ node->result().SetUnallocated(compiler::UnallocatedOperand::NONE, kNoVreg);
+}
+
+inline void DefineAsFixed(Node* node, Register reg) {
+ node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
+ reg.code(), kNoVreg);
+}
+
+inline void DefineSameAsFirst(Node* node) {
+ node->result().SetUnallocated(kNoVreg, 0);
+}
+
+inline void UseRegister(Input& input) {
+ input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
+ compiler::UnallocatedOperand::USED_AT_END, kNoVreg);
+}
+inline void UseAndClobberRegister(Input& input) {
+ input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
+ compiler::UnallocatedOperand::USED_AT_START, kNoVreg);
+}
+inline void UseAny(Input& input) {
+ input.SetUnallocated(
+ compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ compiler::UnallocatedOperand::USED_AT_END, kNoVreg);
+}
+inline void UseFixed(Input& input, Register reg) {
+ input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
+ kNoVreg);
+ input.node()->SetHint(input.operand());
+}
+inline void UseFixed(Input& input, DoubleRegister reg) {
+ input.SetUnallocated(compiler::UnallocatedOperand::FIXED_FP_REGISTER,
+ reg.code(), kNoVreg);
+ input.node()->SetHint(input.operand());
+}
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-ir.cc b/deps/v8/src/maglev/maglev-ir.cc
index 073e1e5ecc..3b31db5241 100644
--- a/deps/v8/src/maglev/maglev-ir.cc
+++ b/deps/v8/src/maglev/maglev-ir.cc
@@ -4,36 +4,30 @@
#include "src/maglev/maglev-ir.h"
-#include "src/base/bits.h"
-#include "src/base/logging.h"
-#include "src/baseline/baseline-assembler-inl.h"
+#include <limits>
+
+#include "src/base/bounds.h"
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/interface-descriptors-inl.h"
-#include "src/codegen/maglev-safepoint-table.h"
-#include "src/codegen/register.h"
-#include "src/codegen/reglist.h"
-#include "src/codegen/x64/assembler-x64.h"
-#include "src/codegen/x64/register-x64.h"
-#include "src/common/globals.h"
-#include "src/compiler/backend/instruction.h"
-#include "src/deoptimizer/deoptimize-reason.h"
-#include "src/ic/handler-configuration.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/compiler/heap-refs.h"
+#include "src/execution/isolate-inl.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/parked-scope.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/maglev/maglev-assembler-inl.h"
-#include "src/maglev/maglev-code-gen-state.h"
-#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-assembler.h"
#include "src/maglev/maglev-graph-labeller.h"
-#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
-#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir-inl.h"
-#include "src/maglev/maglev-vreg-allocator.h"
-#include "src/objects/instance-type.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
namespace maglev {
+#define __ masm->
+
const char* OpcodeToString(Opcode opcode) {
#define DEF_NAME(Name) #Name,
static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
@@ -41,97 +35,71 @@ const char* OpcodeToString(Opcode opcode) {
return names[static_cast<int>(opcode)];
}
-#define __ masm->
+BasicBlock* Phi::predecessor_at(int i) {
+ return merge_state_->predecessor_at(i);
+}
namespace {
-// ---
-// Vreg allocation helpers.
-// ---
+// Prevent people from accidentally using kScratchRegister here and having their
+// code break in arm64.
+struct Do_not_use_kScratchRegister_in_arch_independent_code {
+} kScratchRegister;
+struct Do_not_use_kScratchDoubleRegister_in_arch_independent_code {
+} kScratchDoubleRegister;
+static_assert(!std::is_same_v<decltype(kScratchRegister), Register>);
+static_assert(
+ !std::is_same_v<decltype(kScratchDoubleRegister), DoubleRegister>);
-int GetVirtualRegister(Node* node) {
- return compiler::UnallocatedOperand::cast(node->result().operand())
- .virtual_register();
-}
+} // namespace
-void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
- node->result().SetUnallocated(
- compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
- vreg_state->AllocateVirtualRegister());
-}
-void DefineAsConstant(MaglevVregAllocationState* vreg_state, Node* node) {
- node->result().SetUnallocated(compiler::UnallocatedOperand::NONE,
- vreg_state->AllocateVirtualRegister());
-}
+#ifdef DEBUG
+namespace {
-void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
- Register reg) {
- node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
- reg.code(),
- vreg_state->AllocateVirtualRegister());
+template <size_t InputCount, typename Base, typename Derived>
+int StaticInputCount(FixedInputNodeTMixin<InputCount, Base, Derived>*) {
+ return InputCount;
}
-void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
- node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
-}
+int StaticInputCount(NodeBase*) { UNREACHABLE(); }
-void UseRegister(Input& input) {
- input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
- compiler::UnallocatedOperand::USED_AT_END,
- GetVirtualRegister(input.node()));
-}
-void UseAndClobberRegister(Input& input) {
- input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
- compiler::UnallocatedOperand::USED_AT_START,
- GetVirtualRegister(input.node()));
-}
-void UseAny(Input& input) {
- input.SetUnallocated(
- compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
- compiler::UnallocatedOperand::USED_AT_END,
- GetVirtualRegister(input.node()));
-}
-void UseFixed(Input& input, Register reg) {
- input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
- GetVirtualRegister(input.node()));
-}
-[[maybe_unused]] void UseFixed(Input& input, DoubleRegister reg) {
- input.SetUnallocated(compiler::UnallocatedOperand::FIXED_FP_REGISTER,
- reg.code(), GetVirtualRegister(input.node()));
-}
+} // namespace
-void AddDeoptRegistersToSnapshot(RegisterSnapshot* snapshot,
- const EagerDeoptInfo* deopt_info) {
- detail::DeepForEachInput(deopt_info, [&](ValueNode* node,
- InputLocation* input) {
- if (!input->IsAnyRegister()) return;
- if (input->IsDoubleRegister()) {
- snapshot->live_double_registers.set(input->AssignedDoubleRegister());
- } else {
- snapshot->live_registers.set(input->AssignedGeneralRegister());
- if (node->is_tagged()) {
- snapshot->live_tagged_registers.set(input->AssignedGeneralRegister());
- }
- }
- });
-}
+void NodeBase::CheckCanOverwriteWith(Opcode new_opcode,
+ OpProperties new_properties) {
+ DCHECK_IMPLIES(new_properties.can_eager_deopt(),
+ properties().can_eager_deopt());
+ DCHECK_IMPLIES(new_properties.can_lazy_deopt(),
+ properties().can_lazy_deopt());
+ DCHECK_IMPLIES(new_properties.needs_register_snapshot(),
+ properties().needs_register_snapshot());
+
+ int old_input_count = input_count();
+ size_t old_sizeof = -1;
+ switch (opcode()) {
+#define CASE(op) \
+ case Opcode::k##op: \
+ old_sizeof = sizeof(op); \
+ break;
+ NODE_BASE_LIST(CASE);
+#undef CASE
+ }
-#ifdef DEBUG
-RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
- RegList regs;
- detail::DeepForEachInput(deopt_info,
- [&regs](ValueNode* value, InputLocation* input) {
- if (input->IsGeneralRegister()) {
- regs.set(input->AssignedGeneralRegister());
- }
- });
- return regs;
+ switch (new_opcode) {
+#define CASE(op) \
+ case Opcode::k##op: { \
+ DCHECK_EQ(old_input_count, StaticInputCount(static_cast<op*>(this))); \
+ DCHECK_EQ(sizeof(op), old_sizeof); \
+ break; \
+ }
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
}
+
#endif // DEBUG
-// Helper macro for checking that a reglist is empty which prints the contents
-// when non-empty.
-#define DCHECK_REGLIST_EMPTY(...) DCHECK_EQ((__VA_ARGS__), RegList{})
+namespace {
// ---
// Print
@@ -217,27 +185,6 @@ void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
}
}
-} // namespace
-
-void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller,
- bool skip_targets) const {
- switch (opcode()) {
-#define V(Name) \
- case Opcode::k##Name: \
- return PrintImpl(os, graph_labeller, this->Cast<Name>(), skip_targets);
- NODE_BASE_LIST(V)
-#undef V
- }
- UNREACHABLE();
-}
-
-void NodeBase::Print() const {
- MaglevGraphLabeller labeller;
- Print(std::cout, &labeller);
- std::cout << std::endl;
-}
-
-namespace {
size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) {
size_t size = 0;
const DeoptFrame* frame = &top_frame;
@@ -247,6 +194,9 @@ size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) {
size += frame->as_interpreted().frame_state()->size(
frame->as_interpreted().unit());
break;
+ case DeoptFrame::FrameType::kInlinedArgumentsFrame:
+ size += frame->as_inlined_arguments().arguments().size();
+ break;
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
size += frame->as_builtin_continuation().parameters().size() + 1;
break;
@@ -255,10 +205,85 @@ size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) {
} while (frame != nullptr);
return size;
}
+
+bool RootToBoolean(RootIndex index) {
+ switch (index) {
+ case RootIndex::kFalseValue:
+ case RootIndex::kNullValue:
+ case RootIndex::kUndefinedValue:
+ case RootIndex::kNanValue:
+ case RootIndex::kHoleNanValue:
+ case RootIndex::kMinusZeroValue:
+ case RootIndex::kempty_string:
+#ifdef V8_ENABLE_WEBASSEMBLY
+ case RootIndex::kWasmNull:
+#endif
+ return false;
+ default:
+ return true;
+ }
+}
+
+#ifdef DEBUG
+// For all RO roots, check that RootToBoolean returns the same value as
+// BooleanValue on that root.
+bool CheckToBooleanOnAllRoots(LocalIsolate* local_isolate) {
+ ReadOnlyRoots roots(local_isolate);
+ // Use the READ_ONLY_ROOT_LIST macro list rather than a for loop to get nicer
+ // error messages if there is a failure.
+#define DO_CHECK(type, name, CamelName) \
+ /* Ignore 'undefined' roots that are not the undefined value itself. */ \
+ if (roots.name() != roots.undefined_value() || \
+ RootIndex::k##CamelName == RootIndex::kUndefinedValue) { \
+ DCHECK_EQ(roots.name().BooleanValue(local_isolate), \
+ RootToBoolean(RootIndex::k##CamelName)); \
+ }
+ READ_ONLY_ROOT_LIST(DO_CHECK)
+#undef DO_CHECK
+ return true;
+}
+#endif
+
} // namespace
-DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame)
+bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const {
+#ifdef DEBUG
+ // (Ab)use static locals to call CheckToBooleanOnAllRoots once, on first
+ // call to this function.
+ static bool check_once = CheckToBooleanOnAllRoots(local_isolate);
+ DCHECK(check_once);
+#endif
+ // ToBoolean is only supported for RO roots.
+ DCHECK(RootsTable::IsReadOnly(index_));
+ return RootToBoolean(index_);
+}
+
+bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node) {
+ DCHECK(IsConstantNode(node->opcode()));
+ switch (node->opcode()) {
+#define CASE(Name) \
+ case Opcode::k##Name: { \
+ return node->Cast<Name>()->ToBoolean(local_isolate); \
+ }
+ CONSTANT_VALUE_NODE_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) {
+ // TODO(leszeks): Getting the main thread local isolate is not what we
+ // actually want here, but it's all we have, and it happens to work because
+ // really all we're using it for is ReadOnlyRoots. We should change ToBoolean
+ // to be able to pass ReadOnlyRoots in directly.
+ return FromConstantToBool(masm->isolate()->AsLocalIsolate(), node);
+}
+
+DeoptInfo::DeoptInfo(Zone* zone, const DeoptFrame top_frame,
+ compiler::FeedbackSource feedback_to_update)
: top_frame_(top_frame),
+ feedback_to_update_(feedback_to_update),
input_locations_(zone->NewArray<InputLocation>(
GetInputLocationsArraySize(top_frame))) {
// Initialise InputLocations so that they correctly don't have a next use id.
@@ -268,17 +293,371 @@ DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame)
}
bool LazyDeoptInfo::IsResultRegister(interpreter::Register reg) const {
- if (V8_LIKELY(result_size_ == 1)) {
+ if (V8_LIKELY(result_size() == 1)) {
return reg == result_location_;
}
- DCHECK_EQ(result_size_, 2);
+ if (result_size() == 0) {
+ return false;
+ }
+ DCHECK_EQ(result_size(), 2);
return reg == result_location_ ||
reg == interpreter::Register(result_location_.index() + 1);
}
+void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ bool skip_targets) const {
+ switch (opcode()) {
+#define V(Name) \
+ case Opcode::k##Name: \
+ return PrintImpl(os, graph_labeller, this->Cast<Name>(), skip_targets);
+ NODE_BASE_LIST(V)
+#undef V
+ }
+ UNREACHABLE();
+}
+
+void NodeBase::Print() const {
+ MaglevGraphLabeller labeller;
+ Print(std::cout, &labeller);
+ std::cout << std::endl;
+}
+
+void ValueNode::SetHint(compiler::InstructionOperand hint) {
+ if (!hint_.IsInvalid()) return;
+ hint_ = hint;
+ if (result_.operand().IsUnallocated()) {
+ auto operand = compiler::UnallocatedOperand::cast(result_.operand());
+ if (operand.HasSameAsInputPolicy()) {
+ input(operand.input_index()).node()->SetHint(hint);
+ }
+ }
+ if (this->Is<Phi>()) {
+ for (Input& input : *this) {
+ if (input.node()->has_id() && input.node()->id() < this->id()) {
+ input.node()->SetHint(hint);
+ }
+ }
+ }
+}
+
+void ValueNode::SetNoSpill() {
+ DCHECK(!IsConstantNode(opcode()));
+#ifdef DEBUG
+ state_ = kSpill;
+#endif // DEBUG
+ spill_ = compiler::InstructionOperand();
+}
+
+void ValueNode::SetConstantLocation() {
+ DCHECK(IsConstantNode(opcode()));
+#ifdef DEBUG
+ state_ = kSpill;
+#endif // DEBUG
+ spill_ = compiler::ConstantOperand(
+ compiler::UnallocatedOperand::cast(result().operand())
+ .virtual_register());
+}
+
+// ---
+// Check input value representation
+// ---
+
+ValueRepresentation ToValueRepresentation(MachineType type) {
+ switch (type.representation()) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ return ValueRepresentation::kTagged;
+ case MachineRepresentation::kFloat64:
+ return ValueRepresentation::kFloat64;
+ case MachineRepresentation::kWord64:
+ return ValueRepresentation::kWord64;
+ default:
+ return ValueRepresentation::kInt32;
+ }
+}
+
+void CheckValueInputIs(const NodeBase* node, int i,
+ ValueRepresentation expected,
+ MaglevGraphLabeller* graph_labeller) {
+ ValueNode* input = node->input(i).node();
+ DCHECK(!input->Is<Identity>());
+ ValueRepresentation got = input->properties().value_representation();
+ if (got != expected) {
+ std::ostringstream str;
+ str << "Type representation error: node ";
+ if (graph_labeller) {
+ str << "#" << graph_labeller->NodeId(node) << " : ";
+ }
+ str << node->opcode() << " (input @" << i << " = " << input->opcode()
+ << ") type " << got << " is not " << expected;
+ FATAL("%s", str.str().c_str());
+ }
+}
+
+void CheckValueInputIs(const NodeBase* node, int i, Opcode expected,
+ MaglevGraphLabeller* graph_labeller) {
+ ValueNode* input = node->input(i).node();
+ Opcode got = input->opcode();
+ if (got != expected) {
+ std::ostringstream str;
+ str << "Opcode error: node ";
+ if (graph_labeller) {
+ str << "#" << graph_labeller->NodeId(node) << " : ";
+ }
+ str << node->opcode() << " (input @" << i << " = " << input->opcode()
+ << ") opcode " << got << " is not " << expected;
+ FATAL("%s", str.str().c_str());
+ }
+}
+
+void CheckValueInputIsWord32(const NodeBase* node, int i,
+ MaglevGraphLabeller* graph_labeller) {
+ ValueNode* input = node->input(i).node();
+ DCHECK(!input->Is<Identity>());
+ ValueRepresentation got = input->properties().value_representation();
+ if (got != ValueRepresentation::kInt32 &&
+ got != ValueRepresentation::kUint32) {
+ std::ostringstream str;
+ str << "Type representation error: node ";
+ if (graph_labeller) {
+ str << "#" << graph_labeller->NodeId(node) << " : ";
+ }
+ str << node->opcode() << " (input @" << i << " = " << input->opcode()
+ << ") type " << got << " is not Word32 (Int32 or Uint32)";
+ FATAL("%s", str.str().c_str());
+ }
+}
+
+void GeneratorStore::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void UnsafeSmiTag::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ DCHECK_EQ(input_count(), 1);
+ CheckValueInputIsWord32(this, 0, graph_labeller);
+}
+
+void Phi::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ switch (value_representation()) {
+#define CASE_REPR(repr) \
+ case ValueRepresentation::k##repr: \
+ for (int i = 0; i < input_count(); i++) { \
+ CheckValueInputIs(this, i, ValueRepresentation::k##repr, \
+ graph_labeller); \
+ } \
+ break;
+
+ CASE_REPR(Tagged)
+ CASE_REPR(Int32)
+ CASE_REPR(Uint32)
+ CASE_REPR(Float64)
+#undef CASE_REPR
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+}
+
+void Call::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void Call::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void CallWithArrayLike::VerifyInputs(
+ MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void CallWithArrayLike::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void CallWithSpread::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void CallWithSpread::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void CallSelf::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void CallSelf::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void CallKnownJSFunction::VerifyInputs(
+ MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void CallKnownJSFunction::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void Construct::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void Construct::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void ConstructWithSpread::VerifyInputs(
+ MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void ConstructWithSpread::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void CallBuiltin::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
+ int count = input_count();
+ // Verify context.
+ if (descriptor.HasContextParameter()) {
+ CheckValueInputIs(this, count - 1, ValueRepresentation::kTagged,
+ graph_labeller);
+ count--;
+ }
+
+// {all_input_count} includes the feedback slot and vector.
+#ifdef DEBUG
+ int all_input_count = count + (has_feedback() ? 2 : 0);
+ if (descriptor.AllowVarArgs()) {
+ DCHECK_GE(all_input_count, descriptor.GetParameterCount());
+ } else {
+ DCHECK_EQ(all_input_count, descriptor.GetParameterCount());
+ }
+#endif
+ int i = 0;
+ // Check the rest of inputs.
+ for (; i < count; ++i) {
+ MachineType type = i < descriptor.GetParameterCount()
+ ? descriptor.GetParameterType(i)
+ : MachineType::AnyTagged();
+ CheckValueInputIs(this, i, ToValueRepresentation(type), graph_labeller);
+ }
+}
+
+void CallBuiltin::MarkTaggedInputsAsDecompressing() {
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
+ int count = input_count();
+ // Set context.
+ if (descriptor.HasContextParameter()) {
+ input(count - 1).node()->SetTaggedResultNeedsDecompress();
+ count--;
+ }
+ int i = 0;
+ // Set the rest of the tagged inputs.
+ for (; i < count; ++i) {
+ MachineType type = i < descriptor.GetParameterCount()
+ ? descriptor.GetParameterType(i)
+ : MachineType::AnyTagged();
+ if (type.IsTagged() && !type.IsTaggedSigned()) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+ }
+}
+
+void CallRuntime::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ for (int i = 0; i < input_count(); i++) {
+ CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
+ }
+}
+
+void CallRuntime::MarkTaggedInputsAsDecompressing() {
+ for (int i = 0; i < input_count(); i++) {
+ input(i).node()->SetTaggedResultNeedsDecompress();
+ }
+}
+
+void FoldedAllocation::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ Base::VerifyInputs(graph_labeller);
+ CheckValueInputIs(this, 0, Opcode::kAllocateRaw, graph_labeller);
+}
+
+// ---
+// Reify constants
+// ---
+
+Handle<Object> ValueNode::Reify(LocalIsolate* isolate) {
+ switch (opcode()) {
+#define V(Name) \
+ case Opcode::k##Name: \
+ return this->Cast<Name>()->DoReify(isolate);
+ CONSTANT_VALUE_NODE_LIST(V)
+#undef V
+ default:
+ UNREACHABLE();
+ }
+}
+
+Handle<Object> ExternalConstant::DoReify(LocalIsolate* isolate) {
+ UNREACHABLE();
+}
+
+Handle<Object> SmiConstant::DoReify(LocalIsolate* isolate) {
+ return handle(value_, isolate);
+}
+
+Handle<Object> Int32Constant::DoReify(LocalIsolate* isolate) {
+ return isolate->factory()->NewNumber<AllocationType::kOld>(value());
+}
+
+Handle<Object> Float64Constant::DoReify(LocalIsolate* isolate) {
+ return isolate->factory()->NewNumber<AllocationType::kOld>(
+ value_.get_scalar());
+}
+
+Handle<Object> Constant::DoReify(LocalIsolate* isolate) {
+ return object_.object();
+}
+
+Handle<Object> RootConstant::DoReify(LocalIsolate* isolate) {
+ return isolate->root_handle(index());
+}
+
// ---
-// Nodes
+// Load node to registers
// ---
+
namespace {
template <typename NodeT>
void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm, Register reg) {
@@ -300,6 +679,7 @@ void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm,
}
}
} // namespace
+
void ValueNode::LoadToRegister(MaglevAssembler* masm, Register reg) {
switch (opcode()) {
#define V(Name) \
@@ -322,280 +702,352 @@ void ValueNode::LoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
UNREACHABLE();
}
}
+
void ValueNode::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
DCHECK(is_spilled());
DCHECK(!use_double_register());
- __ movq(reg,
+ __ Move(reg,
masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
}
+
void ValueNode::DoLoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
DCHECK(is_spilled());
DCHECK(use_double_register());
- __ Movsd(reg,
- masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
-}
-Handle<Object> ValueNode::Reify(LocalIsolate* isolate) {
- switch (opcode()) {
-#define V(Name) \
- case Opcode::k##Name: \
- return this->Cast<Name>()->DoReify(isolate);
- CONSTANT_VALUE_NODE_LIST(V)
-#undef V
- default:
- UNREACHABLE();
- }
+ __ Move(reg,
+ masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
}
-void ValueNode::SetNoSpillOrHint() {
- DCHECK_EQ(state_, kLastUse);
- DCHECK(!IsConstantNode(opcode()));
-#ifdef DEBUG
- state_ = kSpillOrHint;
-#endif // DEBUG
- spill_or_hint_ = compiler::InstructionOperand();
+void ExternalConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
+ __ Move(reg, reference());
}
-void ValueNode::SetConstantLocation() {
- DCHECK(IsConstantNode(opcode()));
-#ifdef DEBUG
- state_ = kSpillOrHint;
-#endif // DEBUG
- spill_or_hint_ = compiler::ConstantOperand(
- compiler::UnallocatedOperand::cast(result().operand())
- .virtual_register());
-}
-
-void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsConstant(vreg_state, this);
-}
-void SmiConstant::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {}
-Handle<Object> SmiConstant::DoReify(LocalIsolate* isolate) {
- return handle(value_, isolate);
-}
void SmiConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
- __ Move(reg, Immediate(value()));
-}
-void SmiConstant::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << value() << ")";
+ __ Move(reg, value());
}
-void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsConstant(vreg_state, this);
-}
-void Float64Constant::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {}
-Handle<Object> Float64Constant::DoReify(LocalIsolate* isolate) {
- return isolate->factory()->NewNumber<AllocationType::kOld>(value_);
+void Int32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
+ __ Move(reg, value());
}
+
void Float64Constant::DoLoadToRegister(MaglevAssembler* masm,
DoubleRegister reg) {
__ Move(reg, value());
}
-void Float64Constant::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << value() << ")";
+
+void Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
+ __ Move(reg, object_.object());
}
-void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsConstant(vreg_state, this);
+void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
+ __ LoadRoot(reg, index());
}
+
+// ---
+// Arch agnostic nodes
+// ---
+
+void ExternalConstant::SetValueLocationConstraints() { DefineAsConstant(this); }
+void ExternalConstant::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {}
+
+void SmiConstant::SetValueLocationConstraints() { DefineAsConstant(this); }
+void SmiConstant::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {}
+
+void Int32Constant::SetValueLocationConstraints() { DefineAsConstant(this); }
+void Int32Constant::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {}
+
+void Float64Constant::SetValueLocationConstraints() { DefineAsConstant(this); }
+void Float64Constant::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {}
+
+void Constant::SetValueLocationConstraints() { DefineAsConstant(this); }
void Constant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
-void Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
- __ Move(reg, object_.object());
+
+void RootConstant::SetValueLocationConstraints() { DefineAsConstant(this); }
+void RootConstant::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {}
+
+void InitialValue::SetValueLocationConstraints() {
+ // TODO(leszeks): Make this nicer.
+ result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
+ (StandardFrameConstants::kExpressionsOffset -
+ UnoptimizedFrameConstants::kRegisterFileFromFp) /
+ kSystemPointerSize +
+ source().index(),
+ kNoVreg);
}
-Handle<Object> Constant::DoReify(LocalIsolate* isolate) {
- return object_.object();
+void InitialValue::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // No-op, the value is already in the appropriate slot.
}
-void Constant::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << object_ << ")";
+
+void RegisterInput::SetValueLocationConstraints() {
+ DefineAsFixed(this, input());
+}
+void RegisterInput::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // Nothing to be done, the value is already in the register.
}
-void DeleteProperty::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
- UseFixed(context(), kContextRegister);
- UseFixed(object(), D::GetRegisterParameter(D::kObject));
- UseFixed(key(), D::GetRegisterParameter(D::kKey));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+void GetSecondReturnedValue::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister1);
}
-void DeleteProperty::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
- DCHECK_EQ(ToRegister(context()), kContextRegister);
- DCHECK_EQ(ToRegister(object()), D::GetRegisterParameter(D::kObject));
- DCHECK_EQ(ToRegister(key()), D::GetRegisterParameter(D::kKey));
- __ Move(D::GetRegisterParameter(D::kLanguageMode),
- Smi::FromInt(static_cast<int>(mode())));
- __ CallBuiltin(Builtin::kDeleteProperty);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+void GetSecondReturnedValue::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // No-op. This is just a hack that binds kReturnRegister1 to a value node.
+ // kReturnRegister1 is guaranteed to be free in the register allocator, since
+ // previous node in the basic block is a call.
+#ifdef DEBUG
+ // Check if the previous node is call.
+ Node* previous = nullptr;
+ for (Node* node : state.block()->nodes()) {
+ if (node == this) {
+ break;
+ }
+ previous = node;
+ }
+ DCHECK_NE(previous, nullptr);
+ DCHECK(previous->properties().is_call());
+#endif // DEBUG
}
-void DeleteProperty::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << LanguageMode2String(mode()) << ")";
+
+void Deopt::SetValueLocationConstraints() {}
+void Deopt::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
+ __ EmitEagerDeopt(this, reason());
}
-void GeneratorStore::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseAny(context_input());
- UseRegister(generator_input());
- for (int i = 0; i < num_parameters_and_registers(); i++) {
- UseAny(parameters_and_registers(i));
+void Phi::SetValueLocationConstraints() {
+ for (Input& input : *this) {
+ UseAny(input);
}
- RequireSpecificTemporary(WriteBarrierDescriptor::ObjectRegister());
- RequireSpecificTemporary(WriteBarrierDescriptor::SlotAddressRegister());
+
+ // We have to pass a policy for the result, but it is ignored during register
+ // allocation. See StraightForwardRegisterAllocator::AllocateRegisters which
+ // has special handling for Phis.
+ static const compiler::UnallocatedOperand::ExtendedPolicy kIgnoredPolicy =
+ compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT;
+
+ result().SetUnallocated(kIgnoredPolicy, kNoVreg);
}
-void GeneratorStore::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register generator = ToRegister(generator_input());
- Register array = WriteBarrierDescriptor::ObjectRegister();
- __ LoadTaggedPointerField(
- array, FieldOperand(generator,
- JSGeneratorObject::kParametersAndRegistersOffset));
+void Phi::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {}
- for (int i = 0; i < num_parameters_and_registers(); i++) {
- // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
- // register since it's a known temporary, and the write barrier slow path
- // generates better code when value == scratch. Can't use kScratchRegister
- // because CheckPageFlag uses it.
- Register value =
- __ FromAnyToRegister(parameters_and_registers(i),
- WriteBarrierDescriptor::SlotAddressRegister());
-
- ZoneLabelRef done(masm);
- DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, ZoneLabelRef done, Register value,
- Register array, GeneratorStore* node, int32_t offset) {
- ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
- // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
- // register, see comment above.
- __ CheckPageFlag(
- value, WriteBarrierDescriptor::SlotAddressRegister(),
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- zero, *done);
-
- Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
-
- __ leaq(slot_reg, FieldOperand(array, offset));
-
- // TODO(leszeks): Add an interface for flushing all double registers
- // before this Node, to avoid needing to save them here.
- SaveFPRegsMode const save_fp_mode =
- !node->register_snapshot().live_double_registers.is_empty()
- ? SaveFPRegsMode::kSave
- : SaveFPRegsMode::kIgnore;
-
- __ CallRecordWriteStub(array, slot_reg, save_fp_mode);
-
- __ jmp(*done);
- },
- done, value, array, this, FixedArray::OffsetOfElementAt(i));
-
- __ StoreTaggedField(FieldOperand(array, FixedArray::OffsetOfElementAt(i)),
- value);
- __ JumpIfSmi(value, *done, Label::kNear);
- // TODO(leszeks): This will stay either false or true throughout this loop.
- // Consider hoisting the check out of the loop and duplicating the loop into
- // with and without write barrier.
- __ CheckPageFlag(array, kScratchRegister,
- MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
- &deferred_write_barrier->deferred_code_label);
-
- __ bind(*done);
+namespace {
+
+constexpr Builtin BuiltinFor(Operation operation) {
+ switch (operation) {
+#define CASE(name) \
+ case Operation::k##name: \
+ return Builtin::k##name##_WithFeedback;
+ OPERATION_LIST(CASE)
+#undef CASE
}
+}
- // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
- // register, see comment above.
- Register context = __ FromAnyToRegister(
- context_input(), WriteBarrierDescriptor::SlotAddressRegister());
+} // namespace
- ZoneLabelRef done(masm);
- DeferredCodeInfo* deferred_context_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, ZoneLabelRef done, Register context,
- Register generator, GeneratorStore* node) {
- ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
- // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
- // register, see comment above.
- // TODO(leszeks): The context is almost always going to be in
- // old-space, consider moving this check to the fast path, maybe even
- // as the first bailout.
- __ CheckPageFlag(
- context, WriteBarrierDescriptor::SlotAddressRegister(),
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- *done);
-
- __ Move(WriteBarrierDescriptor::ObjectRegister(), generator);
- generator = WriteBarrierDescriptor::ObjectRegister();
- Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
-
- __ leaq(slot_reg,
- FieldOperand(generator, JSGeneratorObject::kContextOffset));
-
- // TODO(leszeks): Add an interface for flushing all double registers
- // before this Node, to avoid needing to save them here.
- SaveFPRegsMode const save_fp_mode =
- !node->register_snapshot().live_double_registers.is_empty()
- ? SaveFPRegsMode::kSave
- : SaveFPRegsMode::kIgnore;
-
- __ CallRecordWriteStub(generator, slot_reg, save_fp_mode);
-
- __ jmp(*done);
- },
- done, context, generator, this);
- __ StoreTaggedField(
- FieldOperand(generator, JSGeneratorObject::kContextOffset), context);
- __ AssertNotSmi(context);
- __ CheckPageFlag(generator, kScratchRegister,
- MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
- &deferred_context_write_barrier->deferred_code_label);
- __ bind(*done);
+template <class Derived, Operation kOperation>
+void UnaryWithFeedbackNode<Derived, kOperation>::SetValueLocationConstraints() {
+ using D = UnaryOp_WithFeedbackDescriptor;
+ UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
+ DefineAsFixed(this, kReturnRegister0);
+}
- __ StoreTaggedSignedField(
- FieldOperand(generator, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(suspend_id()));
- __ StoreTaggedSignedField(
- FieldOperand(generator, JSGeneratorObject::kInputOrDebugPosOffset),
- Smi::FromInt(bytecode_offset()));
+template <class Derived, Operation kOperation>
+void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ using D = UnaryOp_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
+ __ Move(kContextRegister, masm->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ CallBuiltin(BuiltinFor(kOperation));
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void GeneratorRestoreRegister::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(array_input());
- DefineAsRegister(vreg_state, this);
- set_temporaries_needed(1);
+template <class Derived, Operation kOperation>
+void BinaryWithFeedbackNode<Derived,
+ kOperation>::SetValueLocationConstraints() {
+ using D = BinaryOp_WithFeedbackDescriptor;
+ UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
+ UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
+ DefineAsFixed(this, kReturnRegister0);
}
-void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register array = ToRegister(array_input());
- Register result_reg = ToRegister(result());
- Register temp = general_temporaries().PopFirst();
- // The input and the output can alias, if that happen we use a temporary
- // register and a move at the end.
- Register value = (array == result_reg ? temp : result_reg);
+template <class Derived, Operation kOperation>
+void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ using D = BinaryOp_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
+ DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight));
+ __ Move(kContextRegister, masm->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ CallBuiltin(BuiltinFor(kOperation));
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+}
- // Loads the current value in the generator register file.
- __ DecompressAnyTagged(
- value, FieldOperand(array, FixedArray::OffsetOfElementAt(index())));
+#define DEF_OPERATION(Name) \
+ void Name::SetValueLocationConstraints() { \
+ Base::SetValueLocationConstraints(); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Base::GenerateCode(masm, state); \
+ }
+GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
+#undef DEF_OPERATION
- // And trashs it with StaleRegisterConstant.
- __ LoadRoot(kScratchRegister, RootIndex::kStaleRegister);
- __ StoreTaggedField(
- FieldOperand(array, FixedArray::OffsetOfElementAt(index())),
- kScratchRegister);
+void ConstantGapMove::SetValueLocationConstraints() { UNREACHABLE(); }
- if (value != result_reg) {
- __ Move(result_reg, value);
+namespace {
+template <typename T>
+struct GetRegister;
+template <>
+struct GetRegister<Register> {
+ static Register Get(compiler::AllocatedOperand target) {
+ return target.GetRegister();
+ }
+};
+template <>
+struct GetRegister<DoubleRegister> {
+ static DoubleRegister Get(compiler::AllocatedOperand target) {
+ return target.GetDoubleRegister();
+ }
+};
+} // namespace
+
+void ConstantGapMove::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ switch (node_->opcode()) {
+#define CASE(Name) \
+ case Opcode::k##Name: \
+ return node_->Cast<Name>()->DoLoadToRegister( \
+ masm, GetRegister<Name::OutputRegister>::Get(target()));
+ CONSTANT_VALUE_NODE_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
}
}
-void ForInPrepare::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void GapMove::SetValueLocationConstraints() { UNREACHABLE(); }
+void GapMove::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DCHECK_EQ(source().representation(), target().representation());
+ MachineRepresentation repr = source().representation();
+ if (source().IsRegister()) {
+ Register source_reg = ToRegister(source());
+ if (target().IsAnyRegister()) {
+ DCHECK(target().IsRegister());
+ __ MoveRepr(repr, ToRegister(target()), source_reg);
+ } else {
+ __ MoveRepr(repr, masm->ToMemOperand(target()), source_reg);
+ }
+ } else if (source().IsDoubleRegister()) {
+ DoubleRegister source_reg = ToDoubleRegister(source());
+ if (target().IsAnyRegister()) {
+ DCHECK(target().IsDoubleRegister());
+ __ Move(ToDoubleRegister(target()), source_reg);
+ } else {
+ __ Move(masm->ToMemOperand(target()), source_reg);
+ }
+ } else {
+ DCHECK(source().IsAnyStackSlot());
+ MemOperand source_op = masm->ToMemOperand(source());
+ if (target().IsRegister()) {
+ __ MoveRepr(repr, ToRegister(target()), source_op);
+ } else if (target().IsDoubleRegister()) {
+ __ Move(ToDoubleRegister(target()), source_op);
+ } else {
+ DCHECK(target().IsAnyStackSlot());
+ __ MoveRepr(repr, masm->ToMemOperand(target()), source_op);
+ }
+ }
+}
+
+void AssertInt32::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+}
+void AssertInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ CompareInt32(ToRegister(left_input()), ToRegister(right_input()));
+ __ Check(ToCondition(condition_), reason_);
+}
+
+void CheckUint32IsSmi::SetValueLocationConstraints() { UseRegister(input()); }
+void CheckUint32IsSmi::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ // Perform an unsigned comparison against Smi::kMaxValue.
+ __ Cmp(reg, Smi::kMaxValue);
+ __ EmitEagerDeoptIf(kUnsignedGreaterThan, DeoptimizeReason::kNotASmi, this);
+}
+
+void CheckedSmiUntag::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+
+void CheckedSmiUntag::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
+ // TODO(leszeks): Consider optimizing away this test and using the carry bit
+ // of the `sarl` for cases where the deopt uses the value from a different
+ // register.
+ Condition is_smi = __ CheckSmi(value);
+ __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi,
+ this);
+ __ SmiToInt32(value);
+}
+
+void UnsafeSmiUntag::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+
+void UnsafeSmiUntag::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
+ __ AssertSmi(value);
+ __ SmiToInt32(value);
+}
+
+int DeleteProperty::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
+ return D::GetStackParameterCount();
+}
+void DeleteProperty::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
+ UseFixed(context(), kContextRegister);
+ UseFixed(object(), D::GetRegisterParameter(D::kObject));
+ UseFixed(key(), D::GetRegisterParameter(D::kKey));
+ DefineAsFixed(this, kReturnRegister0);
+}
+void DeleteProperty::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+ DCHECK_EQ(ToRegister(object()), D::GetRegisterParameter(D::kObject));
+ DCHECK_EQ(ToRegister(key()), D::GetRegisterParameter(D::kKey));
+ __ Move(D::GetRegisterParameter(D::kLanguageMode),
+ Smi::FromInt(static_cast<int>(mode())));
+ __ CallBuiltin(Builtin::kDeleteProperty);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+}
+
+int ForInPrepare::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type;
+ return D::GetStackParameterCount();
+}
+void ForInPrepare::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type;
UseFixed(context(), kContextRegister);
UseFixed(enumerator(), D::GetRegisterParameter(D::kEnumerator));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ForInPrepare::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -609,14 +1061,18 @@ void ForInPrepare::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void ForInNext::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ForInNext::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type;
+ return D::GetStackParameterCount();
+}
+void ForInNext::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type;
UseFixed(context(), kContextRegister);
UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver));
UseFixed(cache_array(), D::GetRegisterParameter(D::kCacheArray));
UseFixed(cache_type(), D::GetRegisterParameter(D::kCacheType));
UseFixed(cache_index(), D::GetRegisterParameter(D::kCacheIndex));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ForInNext::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -626,7 +1082,7 @@ void ForInNext::GenerateCode(MaglevAssembler* masm,
DCHECK_EQ(ToRegister(cache_array()), D::GetRegisterParameter(D::kCacheArray));
DCHECK_EQ(ToRegister(cache_type()), D::GetRegisterParameter(D::kCacheType));
DCHECK_EQ(ToRegister(cache_index()), D::GetRegisterParameter(D::kCacheIndex));
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
// Feedback vector is pushed into the stack.
static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0);
static_assert(D::GetStackParameterCount() == 1);
@@ -635,11 +1091,15 @@ void ForInNext::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void GetIterator::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int GetIterator::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type;
+ return D::GetStackParameterCount();
+}
+void GetIterator::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type;
UseFixed(context(), kContextRegister);
UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void GetIterator::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -655,50 +1115,507 @@ void GetIterator::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void GetSecondReturnedValue::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister1);
+template <class Derived, Operation kOperation>
+void Int32CompareNode<Derived, kOperation>::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
}
-void GetSecondReturnedValue::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- // No-op. This is just a hack that binds kReturnRegister1 to a value node.
- // kReturnRegister1 is guaranteed to be free in the register allocator, since
- // previous node in the basic block is a call.
-#ifdef DEBUG
- // Check if the previous node is call.
- Node* previous = nullptr;
- for (Node* node : state.block()->nodes()) {
- if (node == this) {
- break;
+
+template <class Derived, Operation kOperation>
+void Int32CompareNode<Derived, kOperation>::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register result = ToRegister(this->result());
+ Label is_true, end;
+ __ CompareInt32AndJumpIf(ToRegister(left_input()), ToRegister(right_input()),
+ ConditionFor(kOperation), &is_true,
+ Label::Distance::kNear);
+ // TODO(leszeks): Investigate loading existing materialisations of roots here,
+ // if available.
+ __ LoadRoot(result, RootIndex::kFalseValue);
+ __ jmp(&end);
+ {
+ __ bind(&is_true);
+ __ LoadRoot(result, RootIndex::kTrueValue);
+ }
+ __ bind(&end);
+}
+
+#define DEF_OPERATION(Name) \
+ void Name::SetValueLocationConstraints() { \
+ Base::SetValueLocationConstraints(); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Base::GenerateCode(masm, state); \
+ }
+DEF_OPERATION(Int32Equal)
+DEF_OPERATION(Int32StrictEqual)
+DEF_OPERATION(Int32LessThan)
+DEF_OPERATION(Int32LessThanOrEqual)
+DEF_OPERATION(Int32GreaterThan)
+DEF_OPERATION(Int32GreaterThanOrEqual)
+#undef DEF_OPERATION
+
+void LoadDoubleField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
+ set_temporaries_needed(1);
+}
+void LoadDoubleField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register tmp = temps.Acquire();
+ Register object = ToRegister(object_input());
+ __ AssertNotSmi(object);
+ __ DecompressTagged(tmp, FieldMemOperand(object, offset()));
+ __ AssertNotSmi(tmp);
+ __ LoadHeapNumberValue(ToDoubleRegister(result()), tmp);
+}
+
+void LoadTaggedField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
+}
+void LoadTaggedField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ __ AssertNotSmi(object);
+ if (this->decompresses_tagged_result()) {
+ __ LoadTaggedField(ToRegister(result()), object, offset());
+ } else {
+ __ LoadTaggedFieldWithoutDecompressing(ToRegister(result()), object,
+ offset());
+ }
+}
+
+void LoadTaggedFieldByFieldIndex::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseAndClobberRegister(index_input());
+ DefineAsRegister(this);
+ set_temporaries_needed(1);
+ set_double_temporaries_needed(1);
+}
+void LoadTaggedFieldByFieldIndex::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ Register result_reg = ToRegister(result());
+ __ AssertNotSmi(object);
+ __ AssertSmi(index);
+
+ ZoneLabelRef done(masm);
+
+ // For in-object properties, the index is encoded as:
+ //
+ // index = actual_index | is_double_bit | smi_tag_bit
+ // = actual_index << 2 | is_double_bit << 1
+ //
+ // The value we want is at the field offset:
+ //
+ // (actual_index << kTaggedSizeLog2) + JSObject::kHeaderSize
+ //
+ // We could get index from actual_index by shifting away the double and smi
+ // bits. But, note that `kTaggedSizeLog2 == 2` and `index` encodes
+ // `actual_index` with a two bit shift. So, we can do some rearranging
+ // to get the offset without shifting:
+ //
+ // ((index >> 2) << kTaggedSizeLog2 + JSObject::kHeaderSize
+ //
+ // [Expand definitions of index and kTaggedSizeLog2]
+ // = (((actual_index << 2 | is_double_bit << 1) >> 2) << 2)
+ // + JSObject::kHeaderSize
+ //
+ // [Cancel out shift down and shift up, clear is_double bit by subtracting]
+ // = (actual_index << 2 | is_double_bit << 1) - (is_double_bit << 1)
+ // + JSObject::kHeaderSize
+ //
+ // [Fold together the constants, and collapse definition of index]
+ // = index + (JSObject::kHeaderSize - (is_double_bit << 1))
+ //
+ //
+ // For out-of-object properties, the encoding is:
+ //
+ // index = (-1 - actual_index) | is_double_bit | smi_tag_bit
+ // = (-1 - actual_index) << 2 | is_double_bit << 1
+ // = (-1 - actual_index) * 4 + (is_double_bit ? 2 : 0)
+ // = -(actual_index * 4) + (is_double_bit ? 2 : 0) - 4
+ // = -(actual_index << 2) + (is_double_bit ? 2 : 0) - 4
+ //
+ // The value we want is in the property array at offset:
+ //
+ // (actual_index << kTaggedSizeLog2) + FixedArray::kHeaderSize
+ //
+ // [Expand definition of kTaggedSizeLog2]
+ // = (actual_index << 2) + FixedArray::kHeaderSize
+ //
+ // [Substitute in index]
+ // = (-index + (is_double_bit ? 2 : 0) - 4) + FixedArray::kHeaderSize
+ //
+ // [Fold together the constants]
+ // = -index + (FixedArray::kHeaderSize + (is_double_bit ? 2 : 0) - 4))
+ //
+ // This allows us to simply negate the index register and do a load with
+ // otherwise constant offset.
+
+ // Check if field is a mutable double field.
+ static constexpr int32_t kIsDoubleBitMask = 1 << kSmiTagSize;
+ __ TestInt32AndJumpIfAnySet(
+ index, kIsDoubleBitMask,
+ __ MakeDeferredCode(
+ [](MaglevAssembler* masm, Register object, Register index,
+ Register result_reg, RegisterSnapshot register_snapshot,
+ ZoneLabelRef done) {
+ // The field is a Double field, a.k.a. a mutable HeapNumber.
+ static const int kIsDoubleBit = 1;
+
+ // Check if field is in-object or out-of-object. The is_double bit
+ // value doesn't matter, since negative values will stay negative.
+ Label if_outofobject, loaded_field;
+ __ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject);
+
+ // The field is located in the {object} itself.
+ {
+ // See giant comment above.
+ static_assert(kTaggedSizeLog2 == 2);
+ static_assert(kSmiTagSize == 1);
+ // We haven't untagged, so we need to sign extend.
+ __ SignExtend32To64Bits(index, index);
+ __ LoadTaggedFieldByIndex(
+ result_reg, object, index, 1,
+ JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize));
+ __ Jump(&loaded_field);
+ }
+
+ __ bind(&if_outofobject);
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register property_array = temps.Acquire();
+ // Load the property array.
+ __ LoadTaggedField(
+ property_array,
+ FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
+
+ // See giant comment above.
+ static_assert(kSmiTagSize == 1);
+ __ NegateInt32(index);
+ __ LoadTaggedFieldByIndex(
+ result_reg, property_array, index, 1,
+ FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4);
+ __ Jump(&loaded_field);
+ }
+
+ __ bind(&loaded_field);
+ // We may have transitioned in-place away from double, so check that
+ // this is a HeapNumber -- otherwise the load is fine and we don't
+ // need to copy anything anyway.
+ __ JumpIfSmi(result_reg, *done);
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register map = temps.Acquire();
+ // Hack: The temporary allocated for `map` might alias the result
+ // register. If it does, use the index register as a temporary
+ // instead (since it's clobbered anyway).
+ // TODO(leszeks): Extend the result register's lifetime to overlap
+ // the temporaries, so that this alias isn't possible.
+ if (map == result_reg) {
+ DCHECK_NE(map, index);
+ map = index;
+ }
+ __ LoadMap(map, result_reg);
+ __ JumpIfNotRoot(map, RootIndex::kHeapNumberMap, *done);
+ DoubleRegister double_value = temps.AcquireDouble();
+ __ LoadHeapNumberValue(double_value, result_reg);
+ __ AllocateHeapNumber(register_snapshot, result_reg, double_value);
+ __ Jump(*done);
+ },
+ object, index, result_reg, register_snapshot(), done));
+
+ // The field is a proper Tagged field on {object}. The {index} is shifted
+ // to the left by one in the code below.
+ {
+ static const int kIsDoubleBit = 0;
+
+ // Check if field is in-object or out-of-object. The is_double bit value
+ // doesn't matter, since negative values will stay negative.
+ Label if_outofobject;
+ __ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject);
+
+ // The field is located in the {object} itself.
+ {
+ // See giant comment above.
+ static_assert(kTaggedSizeLog2 == 2);
+ static_assert(kSmiTagSize == 1);
+ // We haven't untagged, so we need to sign extend.
+ __ SignExtend32To64Bits(index, index);
+ __ LoadTaggedFieldByIndex(
+ result_reg, object, index, 1,
+ JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize));
+ __ Jump(*done);
+ }
+
+ __ bind(&if_outofobject);
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register property_array = temps.Acquire();
+ // Load the property array.
+ __ LoadTaggedField(
+ property_array,
+ FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
+
+ // See giant comment above.
+ static_assert(kSmiTagSize == 1);
+ __ NegateInt32(index);
+ __ LoadTaggedFieldByIndex(
+ result_reg, property_array, index, 1,
+ FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4);
+ // Fallthrough to `done`.
}
- previous = node;
}
- DCHECK_NE(previous, nullptr);
- DCHECK(previous->properties().is_call());
-#endif // DEBUG
+
+ __ bind(*done);
}
-void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- // TODO(leszeks): Make this nicer.
- result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
- (StandardFrameConstants::kExpressionsOffset -
- UnoptimizedFrameConstants::kRegisterFileFromFp) /
- kSystemPointerSize +
- source().index(),
- vreg_state->AllocateVirtualRegister());
+int StoreMap::MaxCallStackArgs() const {
+ return WriteBarrierDescriptor::GetStackParameterCount();
}
-void InitialValue::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- // No-op, the value is already in the appropriate slot.
+void StoreMap::SetValueLocationConstraints() {
+ UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister());
+ set_temporaries_needed(1);
}
-void InitialValue::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << source().ToString() << ")";
+void StoreMap::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ // TODO(leszeks): Consider making this an arbitrary register and push/popping
+ // in the deferred path.
+ Register object = WriteBarrierDescriptor::ObjectRegister();
+ DCHECK_EQ(object, ToRegister(object_input()));
+ Register value = temps.Acquire();
+ __ Move(value, map_.object());
+
+ __ StoreTaggedFieldWithWriteBarrier(object, HeapObject::kMapOffset, value,
+ register_snapshot(),
+ MaglevAssembler::kValueIsDecompressed,
+ MaglevAssembler::kValueCannotBeSmi);
+}
+
+int StoreTaggedFieldWithWriteBarrier::MaxCallStackArgs() const {
+ return WriteBarrierDescriptor::GetStackParameterCount();
+}
+void StoreTaggedFieldWithWriteBarrier::SetValueLocationConstraints() {
+ UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister());
+ UseRegister(value_input());
+}
+void StoreTaggedFieldWithWriteBarrier::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ // TODO(leszeks): Consider making this an arbitrary register and push/popping
+ // in the deferred path.
+ Register object = WriteBarrierDescriptor::ObjectRegister();
+ DCHECK_EQ(object, ToRegister(object_input()));
+ Register value = ToRegister(value_input());
+
+ __ StoreTaggedFieldWithWriteBarrier(
+ object, offset(), value, register_snapshot(),
+ value_input().node()->decompresses_tagged_result()
+ ? MaglevAssembler::kValueIsDecompressed
+ : MaglevAssembler::kValueIsCompressed,
+ MaglevAssembler::kValueCanBeSmi);
+}
+
+namespace {
+
+template <typename NodeT, typename Function, typename... Args>
+void EmitPolymorphicAccesses(MaglevAssembler* masm, NodeT* node,
+ Register object, Function&& f, Args&&... args) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object_map = temps.Acquire();
+
+ Label done;
+ Label is_number;
+
+ Condition is_smi = __ CheckSmi(object);
+ __ JumpIf(is_smi, &is_number);
+ __ LoadMap(object_map, object);
+
+ for (const PolymorphicAccessInfo& access_info : node->access_infos()) {
+ Label next;
+ Label map_found;
+ auto& maps = access_info.maps();
+
+ if (HasOnlyNumberMaps(base::VectorOf(maps))) {
+ __ CompareRoot(object_map, RootIndex::kHeapNumberMap);
+ __ JumpIf(kNotEqual, &next);
+ // Fallthrough... to map_found.
+ DCHECK(!is_number.is_bound());
+ __ bind(&is_number);
+ } else if (HasOnlyStringMaps(base::VectorOf(maps))) {
+ __ CompareInstanceTypeRange(object_map, FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+ __ JumpIf(kUnsignedGreaterThan, &next);
+ // Fallthrough... to map_found.
+ } else {
+ for (auto it = maps.begin(); it != maps.end(); ++it) {
+ __ CompareTagged(object_map, it->object());
+ if (it == maps.end() - 1) {
+ __ JumpIf(kNotEqual, &next);
+ // Fallthrough... to map_found.
+ } else {
+ __ JumpIf(kEqual, &map_found);
+ }
+ }
+ }
+
+ __ bind(&map_found);
+ f(masm, node, access_info, object, object_map, std::forward<Args>(args)...);
+ __ Jump(&done);
+
+ __ bind(&next);
+ }
+
+ // A HeapNumberMap was not found, we should eager deopt here in case of a
+ // number.
+ if (!is_number.is_bound()) {
+ __ bind(&is_number);
+ }
+
+ // No map matched!
+ __ EmitEagerDeopt(node, DeoptimizeReason::kWrongMap);
+ __ bind(&done);
}
-void LoadGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+} // namespace
+
+void LoadPolymorphicTaggedField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
+ set_temporaries_needed(1);
+ set_double_temporaries_needed(1);
+}
+void LoadPolymorphicTaggedField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ EmitPolymorphicAccesses(
+ masm, this, object,
+ [](MaglevAssembler* masm, LoadPolymorphicTaggedField* node,
+ const PolymorphicAccessInfo& access_info, Register object,
+ Register map, Register result) {
+ switch (access_info.kind()) {
+ case PolymorphicAccessInfo::kNotFound:
+ __ LoadRoot(result, RootIndex::kUndefinedValue);
+ break;
+ case PolymorphicAccessInfo::kConstant: {
+ Handle<Object> constant = access_info.constant();
+ if (constant->IsSmi()) {
+ __ Move(result, Smi::cast(*constant));
+ } else {
+ DCHECK(access_info.constant()->IsHeapObject());
+ __ Move(result, Handle<HeapObject>::cast(constant));
+ }
+ break;
+ }
+ case PolymorphicAccessInfo::kModuleExport: {
+ Register cell = map; // Reuse scratch.
+ __ Move(cell, access_info.cell());
+ __ AssertNotSmi(cell);
+ __ DecompressTagged(result,
+ FieldMemOperand(cell, Cell::kValueOffset));
+ break;
+ }
+ case PolymorphicAccessInfo::kDataLoad: {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister double_scratch = temps.AcquireDouble();
+ __ LoadDataField(access_info, result, object, map);
+ if (access_info.field_index().is_double()) {
+ __ LoadHeapNumberValue(double_scratch, result);
+ __ AllocateHeapNumber(node->register_snapshot(), result,
+ double_scratch);
+ }
+ break;
+ }
+ case PolymorphicAccessInfo::kStringLength:
+ __ StringLength(result, object);
+ __ SmiTag(result);
+ break;
+ }
+ },
+ ToRegister(result()));
+}
+
+void LoadPolymorphicDoubleField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
+ set_temporaries_needed(1);
+}
+void LoadPolymorphicDoubleField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ EmitPolymorphicAccesses(
+ masm, this, object,
+ [](MaglevAssembler* masm, LoadPolymorphicDoubleField* node,
+ const PolymorphicAccessInfo& access_info, Register object,
+ Register map, DoubleRegister result) {
+ Register scratch = map;
+ switch (access_info.kind()) {
+ case PolymorphicAccessInfo::kDataLoad:
+ __ LoadDataField(access_info, scratch, object, map);
+ switch (access_info.field_representation().kind()) {
+ case Representation::kSmi:
+ __ SmiToDouble(result, scratch);
+ break;
+ case Representation::kDouble:
+ __ LoadHeapNumberValue(result, scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PolymorphicAccessInfo::kConstant: {
+ Handle<Object> constant = access_info.constant();
+ if (constant->IsSmi()) {
+ __ Move(scratch, Smi::cast(*constant));
+ __ SmiToDouble(result, scratch);
+ } else {
+ DCHECK(constant->IsHeapNumber());
+ __ Move(result, Handle<HeapNumber>::cast(constant)->value());
+ }
+ break;
+ }
+ case PolymorphicAccessInfo::kStringLength:
+ __ StringLength(scratch, object);
+ __ Int32ToDouble(result, scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ },
+ ToDoubleRegister(result()));
+}
+
+void LoadEnumCacheLength::SetValueLocationConstraints() {
+ UseRegister(map_input());
+ DefineAsRegister(this);
+}
+void LoadEnumCacheLength::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register map = ToRegister(map_input());
+ Register result_reg = ToRegister(result());
+ __ AssertMap(map);
+ __ LoadBitField<Map::Bits3::EnumLengthBits>(
+ result_reg, FieldMemOperand(map, Map::kBitField3Offset));
+}
+
+int LoadGlobal::MaxCallStackArgs() const {
+ if (typeof_mode() == TypeofMode::kNotInside) {
+ using D = CallInterfaceDescriptorFor<Builtin::kLoadGlobalIC>::type;
+ return D::GetStackParameterCount();
+ } else {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kLoadGlobalICInsideTypeof>::type;
+ return D::GetStackParameterCount();
+ }
+}
+void LoadGlobal::SetValueLocationConstraints() {
UseFixed(context(), kContextRegister);
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void LoadGlobal::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -727,16 +1644,16 @@ void LoadGlobal::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void LoadGlobal::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << name() << ")";
-}
-void StoreGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int StoreGlobal::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type;
+ return D::GetStackParameterCount();
+}
+void StoreGlobal::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type;
UseFixed(context(), kContextRegister);
UseFixed(value(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void StoreGlobal::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -751,53 +1668,238 @@ void StoreGlobal::GenerateCode(MaglevAssembler* masm,
__ CallBuiltin(Builtin::kStoreGlobalIC);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void StoreGlobal::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << name() << ")";
+
+void CheckValue::SetValueLocationConstraints() { UseRegister(target_input()); }
+void CheckValue::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register target = ToRegister(target_input());
+ __ CompareTagged(target, value().object());
+ __ EmitEagerDeoptIfNotEqual(DeoptimizeReason::kWrongValue, this);
}
-void RegisterInput::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, input());
+void CheckValueEqualsString::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kStringEqual>::type;
+ UseFixed(target_input(), D::GetRegisterParameter(D::kLeft));
+ RequireSpecificTemporary(D::GetRegisterParameter(D::kLength));
}
-void RegisterInput::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- // Nothing to be done, the value is already in the register.
+void CheckValueEqualsString::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ using D = CallInterfaceDescriptorFor<Builtin::kStringEqual>::type;
+
+ ZoneLabelRef end(masm);
+ DCHECK_EQ(D::GetRegisterParameter(D::kLeft), ToRegister(target_input()));
+ Register target = D::GetRegisterParameter(D::kLeft);
+ // Maybe the string is internalized already, do a fast reference check first.
+ __ CompareTagged(target, value().object());
+ __ JumpIf(kEqual, *end, Label::kNear);
+
+ __ EmitEagerDeoptIf(__ CheckSmi(target), DeoptimizeReason::kWrongValue, this);
+ __ CompareObjectTypeRange(target, FIRST_STRING_TYPE, LAST_STRING_TYPE);
+
+ __ JumpToDeferredIf(
+ kUnsignedLessThanEqual,
+ [](MaglevAssembler* masm, CheckValueEqualsString* node,
+ ZoneLabelRef end) {
+ Register target = D::GetRegisterParameter(D::kLeft);
+ Register string_length = D::GetRegisterParameter(D::kLength);
+ __ StringLength(string_length, target);
+ __ CompareInt32(string_length, node->value().length());
+ __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kWrongValue, node);
+
+ RegisterSnapshot snapshot = node->register_snapshot();
+ AddDeoptRegistersToSnapshot(&snapshot, node->eager_deopt_info());
+ {
+ SaveRegisterStateForCall save_register_state(masm, snapshot);
+ __ Move(kContextRegister, masm->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kRight), node->value().object());
+ __ CallBuiltin(Builtin::kStringEqual);
+ save_register_state.DefineSafepoint();
+ // Compare before restoring registers, so that the deopt below has the
+ // correct register set.
+ __ CompareRoot(kReturnRegister0, RootIndex::kTrueValue);
+ }
+ __ JumpIf(kEqual, *end);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kWrongValue);
+ },
+ this, end);
+
+ __ EmitEagerDeopt(this, DeoptimizeReason::kWrongValue);
+
+ __ bind(*end);
}
-void RegisterInput::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << input() << ")";
+
+void CheckDynamicValue::SetValueLocationConstraints() {
+ UseRegister(first_input());
+ UseRegister(second_input());
+}
+void CheckDynamicValue::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register first = ToRegister(first_input());
+ Register second = ToRegister(second_input());
+ __ CompareTagged(first, second);
+ __ EmitEagerDeoptIfNotEqual(DeoptimizeReason::kWrongValue, this);
}
-void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsConstant(vreg_state, this);
+void CheckSmi::SetValueLocationConstraints() { UseRegister(receiver_input()); }
+void CheckSmi::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi,
+ this);
}
-void RootConstant::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {}
-bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const {
- switch (index_) {
- case RootIndex::kFalseValue:
- case RootIndex::kNullValue:
- case RootIndex::kUndefinedValue:
- case RootIndex::kempty_string:
- return false;
- default:
- return true;
+
+void CheckHeapObject::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+}
+void CheckHeapObject::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kSmi, this);
+}
+
+void CheckSymbol::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+}
+void CheckSymbol::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotASymbol, this);
}
+ __ IsObjectType(object, SYMBOL_TYPE);
+ __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kNotASymbol, this);
}
-void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
- __ LoadRoot(reg, index());
+
+void CheckInstanceType::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
}
-Handle<Object> RootConstant::DoReify(LocalIsolate* isolate) {
- return isolate->root_handle(index());
+void CheckInstanceType::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongInstanceType, this);
+ }
+ __ IsObjectType(object, instance_type());
+ __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kWrongInstanceType, this);
}
-void RootConstant::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << RootsTable::name(index()) << ")";
+
+void CheckFixedArrayNonEmpty::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ set_temporaries_needed(1);
+}
+void CheckFixedArrayNonEmpty::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ __ AssertNotSmi(object);
+
+ if (v8_flags.debug_code) {
+ Label ok;
+ __ IsObjectType(object, FIXED_ARRAY_TYPE);
+ __ JumpIf(kEqual, &ok);
+ __ IsObjectType(object, FIXED_DOUBLE_ARRAY_TYPE);
+ __ Assert(kEqual, AbortReason::kOperandIsNotAFixedArray);
+ __ bind(&ok);
+ }
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register length = temps.Acquire();
+ __ LoadTaggedSignedField(length, object, FixedArrayBase::kLengthOffset);
+ __ CompareSmiAndJumpIf(
+ length, Smi::zero(), kEqual,
+ __ GetDeoptLabel(this, DeoptimizeReason::kWrongEnumIndices));
+}
+
+void CheckInt32Condition::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+}
+void CheckInt32Condition::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ CompareInt32(ToRegister(left_input()), ToRegister(right_input()));
+ __ EmitEagerDeoptIf(NegateCondition(ToCondition(condition_)), reason_, this);
+}
+
+void CheckString::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+}
+void CheckString::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotAString, this);
+ }
+ __ CompareObjectTypeRange(object, FIRST_STRING_TYPE, LAST_STRING_TYPE);
+ __ EmitEagerDeoptIf(kUnsignedGreaterThan, DeoptimizeReason::kNotAString,
+ this);
+}
+
+void ConvertHoleToUndefined::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineSameAsFirst(this);
+}
+void ConvertHoleToUndefined::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Label done;
+ DCHECK_EQ(ToRegister(object_input()), ToRegister(result()));
+ __ JumpIfNotRoot(ToRegister(object_input()), RootIndex::kTheHoleValue, &done);
+ __ LoadRoot(ToRegister(result()), RootIndex::kUndefinedValue);
+ __ bind(&done);
+}
+
+int ConvertReceiver::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
+ return D::GetStackParameterCount();
+}
+void ConvertReceiver::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
+ UseFixed(receiver_input(), D::GetRegisterParameter(D::kInput));
+ DefineAsFixed(this, kReturnRegister0);
+}
+void ConvertReceiver::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Label convert_to_object, done;
+ Register receiver = ToRegister(receiver_input());
+ __ JumpIfSmi(receiver, &convert_to_object, Label::Distance::kNear);
+ __ JumpIfJSAnyIsNotPrimitive(receiver, &done);
+
+ compiler::JSHeapBroker* broker = masm->compilation_info()->broker();
+ if (mode_ != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(receiver, RootIndex::kUndefinedValue, &convert_global_proxy,
+ Label::Distance::kNear);
+ __ JumpIfNotRoot(receiver, RootIndex::kNullValue, &convert_to_object,
+ Label::Distance::kNear);
+ __ bind(&convert_global_proxy);
+ // Patch receiver to global proxy.
+ __ Move(
+ ToRegister(result()),
+ target_.native_context(broker).global_proxy_object(broker).object());
+ __ Jump(&done);
+ }
+
+ __ bind(&convert_to_object);
+ // ToObject needs to be ran with the target context installed.
+ __ Move(kContextRegister, target_.context(broker).object());
+ __ CallBuiltin(Builtin::kToObject);
+ __ bind(&done);
}
-void CreateEmptyArrayLiteral::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int CreateEmptyArrayLiteral::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kCreateEmptyArrayLiteral>::type;
+ return D::GetStackParameterCount();
+}
+void CreateEmptyArrayLiteral::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister0);
}
void CreateEmptyArrayLiteral::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -809,23 +1911,28 @@ void CreateEmptyArrayLiteral::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CreateArrayLiteral::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int CreateObjectLiteral::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->nargs, 4);
+ return 4;
}
-void CreateArrayLiteral::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
+void CreateObjectLiteral::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister0);
+}
+void CreateObjectLiteral::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
__ Move(kContextRegister, masm->native_context().object());
- __ Push(feedback().vector);
- __ Push(TaggedIndex::FromIntptr(feedback().index()));
- __ Push(constant_elements().object());
- __ Push(Smi::FromInt(flags()));
- __ CallRuntime(Runtime::kCreateArrayLiteral);
+ __ Push(feedback().vector, TaggedIndex::FromIntptr(feedback().index()),
+ boilerplate_descriptor().object(), Smi::FromInt(flags()));
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CreateShallowArrayLiteral::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int CreateShallowArrayLiteral::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kCreateEmptyArrayLiteral>::type;
+ return D::GetStackParameterCount();
+}
+void CreateShallowArrayLiteral::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister0);
}
void CreateShallowArrayLiteral::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -841,47 +1948,29 @@ void CreateShallowArrayLiteral::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CreateObjectLiteral::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int CreateArrayLiteral::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kCreateArrayLiteral)->nargs, 4);
+ return 4;
}
-void CreateObjectLiteral::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
+void CreateArrayLiteral::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister0);
+}
+void CreateArrayLiteral::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
__ Move(kContextRegister, masm->native_context().object());
- __ Push(feedback().vector);
- __ Push(TaggedIndex::FromIntptr(feedback().index()));
- __ Push(boilerplate_descriptor().object());
- __ Push(Smi::FromInt(flags()));
- __ CallRuntime(Runtime::kCreateObjectLiteral);
+ __ Push(feedback().vector, TaggedIndex::FromIntptr(feedback().index()),
+ constant_elements().object(), Smi::FromInt(flags()));
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CreateEmptyObjectLiteral::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- DefineAsRegister(vreg_state, this);
-}
-void CreateEmptyObjectLiteral::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(result());
- RegisterSnapshot save_registers = register_snapshot();
- __ Allocate(save_registers, object, map().instance_size());
- __ Move(kScratchRegister, map().object());
- __ StoreTaggedField(FieldOperand(object, HeapObject::kMapOffset),
- kScratchRegister);
- __ LoadRoot(kScratchRegister, RootIndex::kEmptyFixedArray);
- __ StoreTaggedField(FieldOperand(object, JSObject::kPropertiesOrHashOffset),
- kScratchRegister);
- __ StoreTaggedField(FieldOperand(object, JSObject::kElementsOffset),
- kScratchRegister);
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- for (int i = 0; i < map().GetInObjectProperties(); i++) {
- int offset = map().GetInObjectPropertyOffset(i);
- __ StoreTaggedField(FieldOperand(object, offset), kScratchRegister);
- }
+int CreateShallowObjectLiteral::MaxCallStackArgs() const {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kCreateShallowObjectLiteral>::type;
+ return D::GetStackParameterCount();
}
-
-void CreateShallowObjectLiteral::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+void CreateShallowObjectLiteral::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister0);
}
void CreateShallowObjectLiteral::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -896,8 +1985,67 @@ void CreateShallowObjectLiteral::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CreateFunctionContext::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+void AllocateRaw::SetValueLocationConstraints() { DefineAsRegister(this); }
+
+void AllocateRaw::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ Allocate(register_snapshot(), ToRegister(result()), size(),
+ allocation_type());
+}
+
+int CreateClosure::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(pretenured() ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure)
+ ->nargs,
+ 2);
+ return 2;
+}
+void CreateClosure::SetValueLocationConstraints() {
+ UseFixed(context(), kContextRegister);
+ DefineAsFixed(this, kReturnRegister0);
+}
+void CreateClosure::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Runtime::FunctionId function_id =
+ pretenured() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
+ __ Push(shared_function_info().object(), feedback_cell().object());
+ __ CallRuntime(function_id);
+}
+
+int FastCreateClosure::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
+ return D::GetStackParameterCount();
+}
+void FastCreateClosure::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
+ static_assert(D::HasContextParameter());
+ UseFixed(context(), D::ContextRegister());
+ DefineAsFixed(this, kReturnRegister0);
+}
+void FastCreateClosure::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
+
+ DCHECK_EQ(ToRegister(context()), D::ContextRegister());
+ __ Move(D::GetRegisterParameter(D::kSharedFunctionInfo),
+ shared_function_info().object());
+ __ Move(D::GetRegisterParameter(D::kFeedbackCell), feedback_cell().object());
+ __ CallBuiltin(Builtin::kFastNewClosure);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+}
+
+int CreateFunctionContext::MaxCallStackArgs() const {
+ if (scope_type() == FUNCTION_SCOPE) {
+ using D = CallInterfaceDescriptorFor<
+ Builtin::kFastNewFunctionContextFunction>::type;
+ return D::GetStackParameterCount();
+ } else {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type;
+ return D::GetStackParameterCount();
+ }
+}
+void CreateFunctionContext::SetValueLocationConstraints() {
DCHECK_LE(slot_count(),
static_cast<uint32_t>(
ConstructorBuiltins::MaximumFunctionContextSlots()));
@@ -913,7 +2061,7 @@ void CreateFunctionContext::AllocateVreg(
static_assert(D::HasContextParameter());
UseFixed(context(), D::ContextRegister());
}
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void CreateFunctionContext::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -922,7 +2070,7 @@ void CreateFunctionContext::GenerateCode(MaglevAssembler* masm,
Builtin::kFastNewFunctionContextFunction>::type;
DCHECK_EQ(ToRegister(context()), D::ContextRegister());
__ Move(D::GetRegisterParameter(D::kScopeInfo), scope_info().object());
- __ Move(D::GetRegisterParameter(D::kSlots), Immediate(slot_count()));
+ __ Move(D::GetRegisterParameter(D::kSlots), slot_count());
// TODO(leszeks): Consider inlining this allocation.
__ CallBuiltin(Builtin::kFastNewFunctionContextFunction);
} else {
@@ -931,63 +2079,18 @@ void CreateFunctionContext::GenerateCode(MaglevAssembler* masm,
CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type;
DCHECK_EQ(ToRegister(context()), D::ContextRegister());
__ Move(D::GetRegisterParameter(D::kScopeInfo), scope_info().object());
- __ Move(D::GetRegisterParameter(D::kSlots), Immediate(slot_count()));
+ __ Move(D::GetRegisterParameter(D::kSlots), slot_count());
__ CallBuiltin(Builtin::kFastNewFunctionContextEval);
}
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CreateFunctionContext::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << *scope_info().object() << ", " << slot_count() << ")";
-}
-void FastCreateClosure::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
- static_assert(D::HasContextParameter());
- UseFixed(context(), D::ContextRegister());
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int CreateRegExpLiteral::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kCreateRegExpLiteral>::type;
+ return D::GetStackParameterCount();
}
-void FastCreateClosure::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
-
- DCHECK_EQ(ToRegister(context()), D::ContextRegister());
- __ Move(D::GetRegisterParameter(D::kSharedFunctionInfo),
- shared_function_info().object());
- __ Move(D::GetRegisterParameter(D::kFeedbackCell), feedback_cell().object());
- __ CallBuiltin(Builtin::kFastNewClosure);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
-}
-void FastCreateClosure::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << *shared_function_info().object() << ", "
- << feedback_cell().object() << ")";
-}
-
-void CreateClosure::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseFixed(context(), kContextRegister);
- DefineAsFixed(vreg_state, this, kReturnRegister0);
-}
-void CreateClosure::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Runtime::FunctionId function_id =
- pretenured() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
- __ Push(shared_function_info().object());
- __ Push(feedback_cell().object());
- __ CallRuntime(function_id);
-}
-void CreateClosure::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << *shared_function_info().object() << ", "
- << feedback_cell().object();
- if (pretenured()) {
- os << " [pretenured]";
- }
- os << ")";
-}
-
-void CreateRegExpLiteral::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+void CreateRegExpLiteral::SetValueLocationConstraints() {
+ DefineAsFixed(this, kReturnRegister0);
}
void CreateRegExpLiteral::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -1001,12 +2104,15 @@ void CreateRegExpLiteral::GenerateCode(MaglevAssembler* masm,
__ CallBuiltin(Builtin::kCreateRegExpLiteral);
}
-void GetTemplateObject::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int GetTemplateObject::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kGetTemplateObject>::type;
+ return D::GetStackParameterCount();
+}
+void GetTemplateObject::SetValueLocationConstraints() {
using D = GetTemplateObjectDescriptor;
UseFixed(description(), D::GetRegisterParameter(D::kDescription));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
-
void GetTemplateObject::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
using D = GetTemplateObjectDescriptor;
@@ -1018,848 +2124,57 @@ void GetTemplateObject::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void Abort::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
-void Abort::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
- __ Push(Smi::FromInt(static_cast<int>(reason())));
- __ CallRuntime(Runtime::kAbort, 1);
- __ Trap();
-}
-void Abort::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << GetAbortReason(reason()) << ")";
-}
-
-namespace {
-Condition ToCondition(AssertCondition cond) {
- switch (cond) {
- case AssertCondition::kLess:
- return less;
- case AssertCondition::kLessOrEqual:
- return less_equal;
- case AssertCondition::kGreater:
- return greater;
- case AssertCondition::kGeaterOrEqual:
- return greater_equal;
- case AssertCondition::kEqual:
- return equal;
- case AssertCondition::kNotEqual:
- return not_equal;
- }
-}
-
-std::ostream& operator<<(std::ostream& os, const AssertCondition cond) {
- switch (cond) {
- case AssertCondition::kLess:
- os << "Less";
- break;
- case AssertCondition::kLessOrEqual:
- os << "LessOrEqual";
- break;
- case AssertCondition::kGreater:
- os << "Greater";
- break;
- case AssertCondition::kGeaterOrEqual:
- os << "GeaterOrEqual";
- break;
- case AssertCondition::kEqual:
- os << "Equal";
- break;
- case AssertCondition::kNotEqual:
- os << "NotEqual";
- break;
- }
- return os;
-}
-} // namespace
-
-void AssertInt32::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
-}
-void AssertInt32::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- __ cmpq(ToRegister(left_input()), ToRegister(right_input()));
- __ Check(ToCondition(condition_), reason_);
-}
-void AssertInt32::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << condition_ << ")";
-}
-
-void CheckMaps::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckMaps::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
-
- // TODO(victorgomes): This can happen, because we do not emit an unconditional
- // deopt when we intersect the map sets.
- if (maps().is_empty()) {
- __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap);
- __ jmp(eager_deopt_info()->deopt_entry_label());
- return;
- }
-
- if (check_type_ == CheckType::kOmitHeapObjectCheck) {
- __ AssertNotSmi(object);
- } else {
- Condition is_smi = __ CheckSmi(object);
- __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
- }
-
- Label done;
- size_t map_count = maps().size();
- for (size_t i = 0; i < map_count - 1; ++i) {
- Handle<Map> map = maps().at(i);
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- __ j(equal, &done, Label::kNear);
- }
- Handle<Map> last_map = maps().at(map_count - 1);
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), last_map);
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, this);
- __ bind(&done);
-}
-void CheckMaps::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(";
- size_t map_count = maps().size();
- if (map_count > 0) {
- for (size_t i = 0; i < map_count - 1; ++i) {
- os << maps().at(i) << ", ";
- }
- os << maps().at(map_count - 1);
- }
- os << ")";
-}
-void CheckValue::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(target_input());
-}
-void CheckValue::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register target = ToRegister(target_input());
-
- __ Cmp(target, value().object());
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongValue, this);
-}
-void CheckValue::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << *value().object() << ")";
-}
-void CheckSmi::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckSmi::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
- Condition is_smi = __ CheckSmi(object);
- __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi,
- this);
-}
-void CheckSmi::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {}
-
-void CheckNumber::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckNumber::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Label done;
- Register value = ToRegister(receiver_input());
- // If {value} is a Smi or a HeapNumber, we're done.
- __ JumpIfSmi(value, &done);
- __ CompareRoot(FieldOperand(value, HeapObject::kMapOffset),
- RootIndex::kHeapNumberMap);
- if (mode() == Object::Conversion::kToNumeric) {
- // Jump to done if it is a HeapNumber.
- __ j(equal, &done);
- // Check if it is a BigInt.
- __ LoadMap(kScratchRegister, value);
- __ cmpw(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(BIGINT_TYPE));
- }
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotANumber, this);
- __ bind(&done);
-}
-
-void CheckHeapObject::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckHeapObject::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
- Condition is_smi = __ CheckSmi(object);
- __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kSmi, this);
-}
-void CheckHeapObject::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {}
-void CheckSymbol::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckSymbol::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
- if (check_type_ == CheckType::kOmitHeapObjectCheck) {
- __ AssertNotSmi(object);
- } else {
- Condition is_smi = __ CheckSmi(object);
- __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotASymbol, this);
- }
- __ LoadMap(kScratchRegister, object);
- __ CmpInstanceType(kScratchRegister, SYMBOL_TYPE);
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotASymbol, this);
-}
-void CheckSymbol::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {}
-
-void CheckString::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckString::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
- if (check_type_ == CheckType::kOmitHeapObjectCheck) {
- __ AssertNotSmi(object);
- } else {
- Condition is_smi = __ CheckSmi(object);
- __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotAString, this);
- }
- __ LoadMap(kScratchRegister, object);
- __ CmpInstanceTypeRange(kScratchRegister, kScratchRegister, FIRST_STRING_TYPE,
- LAST_STRING_TYPE);
- __ EmitEagerDeoptIf(above, DeoptimizeReason::kNotAString, this);
-}
-void CheckString::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {}
-
-void CheckMapsWithMigration::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
-}
-void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- __ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap);
-
- // TODO(victorgomes): This can happen, because we do not emit an unconditional
- // deopt when we intersect the map sets.
- if (maps().is_empty()) {
- __ jmp(eager_deopt_info()->deopt_entry_label());
- return;
- }
-
- Register object = ToRegister(receiver_input());
-
- if (check_type_ == CheckType::kOmitHeapObjectCheck) {
- __ AssertNotSmi(object);
- } else {
- Condition is_smi = __ CheckSmi(object);
- __ j(is_smi, eager_deopt_info()->deopt_entry_label());
- }
-
- ZoneLabelRef done(masm);
- size_t map_count = maps().size();
- for (size_t i = 0; i < map_count; ++i) {
- ZoneLabelRef continue_label(masm);
- Handle<Map> map = maps().at(i);
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-
- bool last_map = (i == map_count - 1);
- if (map->is_migration_target()) {
- __ JumpToDeferredIf(
- not_equal,
- [](MaglevAssembler* masm, ZoneLabelRef continue_label,
- ZoneLabelRef done, Register object, int map_index,
- CheckMapsWithMigration* node) {
- // Reload the map to avoid needing to save it on a temporary in the
- // fast path.
- __ LoadMap(kScratchRegister, object);
- // If the map is not deprecated, we fail the map check, continue to
- // the next one.
- __ movl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitField3Offset));
- __ testl(kScratchRegister,
- Immediate(Map::Bits3::IsDeprecatedBit::kMask));
- __ j(zero, *continue_label);
-
- // Otherwise, try migrating the object. If the migration
- // returns Smi zero, then it failed the migration.
- Register return_val = Register::no_reg();
- {
- RegisterSnapshot register_snapshot = node->register_snapshot();
- // We can eager deopt after the snapshot, so make sure the nodes
- // used by the deopt are included in it.
- // TODO(leszeks): This is a bit of a footgun -- we likely want the
- // snapshot to always include eager deopt input registers.
- AddDeoptRegistersToSnapshot(&register_snapshot,
- node->eager_deopt_info());
- SaveRegisterStateForCall save_register_state(masm,
- register_snapshot);
-
- __ Push(object);
- __ Move(kContextRegister, masm->native_context().object());
- __ CallRuntime(Runtime::kTryMigrateInstance);
- save_register_state.DefineSafepoint();
-
- // Make sure the return value is preserved across the live
- // register restoring pop all.
- return_val = kReturnRegister0;
- if (node->register_snapshot().live_registers.has(return_val)) {
- DCHECK(!node->register_snapshot().live_registers.has(
- kScratchRegister));
- __ movq(kScratchRegister, return_val);
- return_val = kScratchRegister;
- }
- }
-
- // On failure, the returned value is zero
- __ cmpl(return_val, Immediate(0));
- __ j(equal, *continue_label);
-
- // The migrated object is returned on success, retry the map check.
- __ Move(object, return_val);
- // Manually load the map pointer without uncompressing it.
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset),
- node->maps().at(map_index));
- __ j(equal, *done);
- __ jmp(*continue_label);
- },
- // If this is the last map to check, we should deopt if we fail.
- // This is safe to do, since {eager_deopt_info} is ZoneAllocated.
- (last_map ? ZoneLabelRef::UnsafeFromLabelPointer(
- eager_deopt_info()->deopt_entry_label())
- : continue_label),
- done, object, i, this);
- } else if (last_map) {
- // If it is the last map and it is not a migration target, we should deopt
- // if the check fails.
- __ j(not_equal, eager_deopt_info()->deopt_entry_label());
- }
-
- if (!last_map) {
- // We don't need to bind the label for the last map.
- __ j(equal, *done);
- __ bind(*continue_label);
- }
- }
-
- __ bind(*done);
-}
-void CheckMapsWithMigration::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(";
- size_t map_count = maps().size();
- if (map_count > 0) {
- for (size_t i = 0; i < map_count - 1; ++i) {
- os << maps().at(i) << ", ";
- }
- os << maps().at(map_count - 1);
- }
- os << ")";
-}
-
-void CheckJSArrayBounds::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
- UseRegister(index_input());
-}
-void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
- Register index = ToRegister(index_input());
- __ AssertNotSmi(object);
-
- if (v8_flags.debug_code) {
- __ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister);
- __ Assert(equal, AbortReason::kUnexpectedValue);
- }
- __ SmiUntagField(kScratchRegister,
- FieldOperand(object, JSArray::kLengthOffset));
- __ cmpl(index, kScratchRegister);
- __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
-}
-
-void CheckJSObjectElementsBounds::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(receiver_input());
- UseRegister(index_input());
-}
-void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(receiver_input());
- Register index = ToRegister(index_input());
- __ AssertNotSmi(object);
-
- if (v8_flags.debug_code) {
- __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ Assert(greater_equal, AbortReason::kUnexpectedValue);
- }
- __ LoadAnyTaggedField(kScratchRegister,
- FieldOperand(object, JSObject::kElementsOffset));
- if (v8_flags.debug_code) {
- __ AssertNotSmi(kScratchRegister);
- }
- __ SmiUntagField(kScratchRegister,
- FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
- __ cmpl(index, kScratchRegister);
- __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
-}
-
-void CheckInt32Condition::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
-}
-void CheckInt32Condition::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- __ cmpq(ToRegister(left_input()), ToRegister(right_input()));
- __ EmitEagerDeoptIf(NegateCondition(ToCondition(condition_)), reason_, this);
-}
-void CheckInt32Condition::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << condition_ << ")";
-}
-
-void DebugBreak::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
+void DebugBreak::SetValueLocationConstraints() {}
void DebugBreak::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- __ int3();
+ __ DebugBreak();
}
-void CheckedInternalizedString::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- set_temporaries_needed(1);
- DefineSameAsFirst(vreg_state, this);
-}
-void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(object_input());
- RegList temps = general_temporaries();
- Register map_tmp = temps.PopFirst();
-
- if (check_type_ == CheckType::kOmitHeapObjectCheck) {
- __ AssertNotSmi(object);
- } else {
- Condition is_smi = __ CheckSmi(object);
- __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
- }
-
- __ LoadMap(map_tmp, object);
- __ RecordComment("Test IsInternalizedString");
- // Go to the slow path if this is a non-string, or a non-internalised string.
- __ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
- Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- static_assert((kStringTag | kInternalizedTag) == 0);
- ZoneLabelRef done(masm);
- __ JumpToDeferredIf(
- not_zero,
- [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
- CheckedInternalizedString* node, EagerDeoptInfo* deopt_info,
- Register map_tmp) {
- __ RecordComment("Deferred Test IsThinString");
- __ movw(map_tmp, FieldOperand(map_tmp, Map::kInstanceTypeOffset));
- static_assert(kThinStringTagBit > 0);
- // Deopt if this isn't a string.
- __ testw(map_tmp, Immediate(kIsNotStringMask));
- __ j(not_zero, deopt_info->deopt_entry_label());
- // Deopt if this isn't a thin string.
- __ testb(map_tmp, Immediate(kThinStringTagBit));
- __ j(zero, deopt_info->deopt_entry_label());
- __ LoadTaggedPointerField(
- object, FieldOperand(object, ThinString::kActualOffset));
- if (v8_flags.debug_code) {
- __ RecordComment("DCHECK IsInternalizedString");
- __ LoadMap(map_tmp, object);
- __ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
- Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- static_assert((kStringTag | kInternalizedTag) == 0);
- __ Check(zero, AbortReason::kUnexpectedValue);
- }
- __ jmp(*done);
- },
- done, object, this, eager_deopt_info(), map_tmp);
- __ bind(*done);
-}
-
-void CheckedObjectToIndex::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- DefineAsRegister(vreg_state, this);
- set_double_temporaries_needed(1);
-}
-void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(object_input());
- Register result_reg = ToRegister(result());
-
- ZoneLabelRef done(masm);
- Condition is_smi = __ CheckSmi(object);
- __ JumpToDeferredIf(
- NegateCondition(is_smi),
- [](MaglevAssembler* masm, Register object, Register result_reg,
- ZoneLabelRef done, CheckedObjectToIndex* node) {
- Label is_string;
- __ LoadMap(kScratchRegister, object);
- __ CmpInstanceTypeRange(kScratchRegister, kScratchRegister,
- FIRST_STRING_TYPE, LAST_STRING_TYPE);
- __ j(below_equal, &is_string);
-
- __ cmpl(kScratchRegister, Immediate(HEAP_NUMBER_TYPE));
- // The IC will go generic if it encounters something other than a
- // Number or String key.
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, node);
-
- // Heap Number.
- {
- DoubleRegister number_value = node->double_temporaries().first();
- DoubleRegister converted_back = kScratchDoubleReg;
- // Convert the input float64 value to int32.
- __ Cvttsd2si(result_reg, number_value);
- // Convert that int32 value back to float64.
- __ Cvtlsi2sd(converted_back, result_reg);
- // Check that the result of the float64->int32->float64 is equal to
- // the input (i.e. that the conversion didn't truncate.
- __ Ucomisd(number_value, converted_back);
- __ j(equal, *done);
- __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
- }
-
- // String.
- __ bind(&is_string);
- {
- RegisterSnapshot snapshot = node->register_snapshot();
- snapshot.live_registers.clear(result_reg);
- DCHECK(!snapshot.live_tagged_registers.has(result_reg));
- {
- SaveRegisterStateForCall save_register_state(masm, snapshot);
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(1);
- __ Move(arg_reg_1, object);
- __ CallCFunction(
- ExternalReference::string_to_array_index_function(), 1);
- // No need for safepoint since this is a fast C call.
- __ Move(result_reg, kReturnRegister0);
- }
- __ cmpl(result_reg, Immediate(0));
- __ j(greater_equal, *done);
- __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
- }
- },
- object, result_reg, done, this);
-
- // If we didn't enter the deferred block, we're a Smi.
- if (result_reg == object) {
- __ SmiUntag(object);
- } else {
- __ SmiUntag(result_reg, object);
- }
-
- __ bind(*done);
-}
-
-void BuiltinStringFromCharCode::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- if (code_input().node()->Is<Int32Constant>()) {
- UseAny(code_input());
- } else {
- UseAndClobberRegister(code_input());
- set_temporaries_needed(1);
- }
- DefineAsRegister(vreg_state, this);
-}
-void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register result_string = ToRegister(result());
- if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
- int32_t char_code = constant->value();
- if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
- __ LoadSingleCharacterString(result_string, char_code);
- } else {
- __ AllocateTwoByteString(register_snapshot(), result_string, 1);
- __ movw(FieldOperand(result_string, SeqTwoByteString::kHeaderSize),
- Immediate(char_code & 0xFFFF));
- }
- } else {
- Register char_code = ToRegister(code_input());
- // We only need a scratch here if {char_code} alias with {result}.
- // TODO(victorgomes): Add a constraint in the register allocator for this
- // use case?
- Register scratch = general_temporaries().PopFirst();
- __ StringFromCharCode(register_snapshot(), nullptr, result_string,
- char_code, scratch);
- }
+int Abort::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kAbort)->nargs, 1);
+ return 1;
}
-
-void BuiltinStringPrototypeCharCodeAt::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseAndClobberRegister(string_input());
- UseAndClobberRegister(index_input());
- DefineAsRegister(vreg_state, this);
- set_temporaries_needed(1);
-}
-
-void BuiltinStringPrototypeCharCodeAt::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- Register string = ToRegister(string_input());
- Register index = ToRegister(index_input());
- Register scratch = general_temporaries().PopFirst();
- ZoneLabelRef done(masm);
- RegisterSnapshot save_registers = register_snapshot();
- __ StringCharCodeAt(save_registers, ToRegister(result()), string, index,
- scratch, *done);
- __ bind(*done);
-}
-
-void LoadTaggedField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- DefineAsRegister(vreg_state, this);
-}
-void LoadTaggedField::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(object_input());
- __ AssertNotSmi(object);
- __ DecompressAnyTagged(ToRegister(result()), FieldOperand(object, offset()));
-}
-void LoadTaggedField::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(0x" << std::hex << offset() << std::dec << ")";
-}
-
-void LoadDoubleField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- DefineAsRegister(vreg_state, this);
- set_temporaries_needed(1);
-}
-void LoadDoubleField::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register tmp = general_temporaries().PopFirst();
- Register object = ToRegister(object_input());
- __ AssertNotSmi(object);
- __ DecompressAnyTagged(tmp, FieldOperand(object, offset()));
- __ AssertNotSmi(tmp);
- __ Movsd(ToDoubleRegister(result()),
- FieldOperand(tmp, HeapNumber::kValueOffset));
-}
-void LoadDoubleField::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(0x" << std::hex << offset() << std::dec << ")";
-}
-
-void LoadTaggedElement::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- UseRegister(index_input());
- DefineAsRegister(vreg_state, this);
-}
-void LoadTaggedElement::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(object_input());
- Register index = ToRegister(index_input());
- Register result_reg = ToRegister(result());
- __ AssertNotSmi(object);
- if (v8_flags.debug_code) {
- __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
- __ Assert(above_equal, AbortReason::kUnexpectedValue);
- }
- __ DecompressAnyTagged(kScratchRegister,
- FieldOperand(object, JSObject::kElementsOffset));
- if (v8_flags.debug_code) {
- __ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Assert(equal, AbortReason::kUnexpectedValue);
- // Reload since CmpObjectType clobbered the scratch register.
- __ DecompressAnyTagged(kScratchRegister,
- FieldOperand(object, JSObject::kElementsOffset));
- }
- __ DecompressAnyTagged(
- result_reg, FieldOperand(kScratchRegister, index, times_tagged_size,
- FixedArray::kHeaderSize));
+void Abort::SetValueLocationConstraints() {}
+void Abort::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
+ __ Push(Smi::FromInt(static_cast<int>(reason())));
+ __ CallRuntime(Runtime::kAbort, 1);
+ __ Trap();
}
-void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- UseRegister(index_input());
- DefineAsRegister(vreg_state, this);
+void LogicalNot::SetValueLocationConstraints() {
+ UseAny(value());
+ DefineAsRegister(this);
}
-void LoadDoubleElement::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(object_input());
- Register index = ToRegister(index_input());
- DoubleRegister result_reg = ToDoubleRegister(result());
- __ AssertNotSmi(object);
- if (v8_flags.debug_code) {
- __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
- __ Assert(above_equal, AbortReason::kUnexpectedValue);
- }
- __ DecompressAnyTagged(kScratchRegister,
- FieldOperand(object, JSObject::kElementsOffset));
+void LogicalNot::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
if (v8_flags.debug_code) {
- __ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE,
- kScratchRegister);
- __ Assert(equal, AbortReason::kUnexpectedValue);
- // Reload since CmpObjectType clobbered the scratch register.
- __ DecompressAnyTagged(kScratchRegister,
- FieldOperand(object, JSObject::kElementsOffset));
+ // LogicalNot expects either TrueValue or FalseValue.
+ Label next;
+ __ JumpIf(__ IsRootConstant(value(), RootIndex::kFalseValue), &next);
+ __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &next);
+ __ Abort(AbortReason::kUnexpectedValue);
+ __ bind(&next);
}
- __ Movsd(result_reg, FieldOperand(kScratchRegister, index, times_8,
- FixedDoubleArray::kHeaderSize));
-}
-
-void StoreDoubleField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- UseRegister(value_input());
- set_temporaries_needed(1);
-}
-void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register tmp = general_temporaries().PopFirst();
- Register object = ToRegister(object_input());
- DoubleRegister value = ToDoubleRegister(value_input());
-
- __ AssertNotSmi(object);
- __ DecompressAnyTagged(tmp, FieldOperand(object, offset()));
- __ AssertNotSmi(tmp);
- __ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value);
-}
-void StoreDoubleField::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << std::hex << offset() << std::dec << ")";
-}
-void StoreTaggedFieldNoWriteBarrier::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- UseRegister(value_input());
-}
-void StoreTaggedFieldNoWriteBarrier::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- Register object = ToRegister(object_input());
- Register value = ToRegister(value_input());
-
- __ AssertNotSmi(object);
- __ StoreTaggedField(FieldOperand(object, offset()), value);
-}
-void StoreTaggedFieldNoWriteBarrier::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << std::hex << offset() << std::dec << ")";
-}
-
-void StoreMap::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister());
-}
-void StoreMap::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- // TODO(leszeks): Consider making this an arbitrary register and push/popping
- // in the deferred path.
- Register object = WriteBarrierDescriptor::ObjectRegister();
- DCHECK_EQ(object, ToRegister(object_input()));
-
- __ AssertNotSmi(object);
- Register value = kScratchRegister;
- __ Move(value, map_.object());
- __ StoreTaggedField(FieldOperand(object, HeapObject::kMapOffset),
- kScratchRegister);
-
- ZoneLabelRef done(masm);
- DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, ZoneLabelRef done, Register value,
- Register object, StoreMap* node) {
- ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
- __ CheckPageFlag(
- value, kScratchRegister,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- *done);
-
- Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
- RegList saved;
- if (node->register_snapshot().live_registers.has(slot_reg)) {
- saved.set(slot_reg);
- }
-
- __ PushAll(saved);
- __ leaq(slot_reg, FieldOperand(object, HeapObject::kMapOffset));
- SaveFPRegsMode const save_fp_mode =
- !node->register_snapshot().live_double_registers.is_empty()
- ? SaveFPRegsMode::kSave
- : SaveFPRegsMode::kIgnore;
-
- __ CallRecordWriteStub(object, slot_reg, save_fp_mode);
+ Label return_false, done;
+ __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &return_false);
+ __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
+ __ Jump(&done);
- __ PopAll(saved);
- __ jmp(*done);
- },
- done, value, object, this);
+ __ bind(&return_false);
+ __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
- __ JumpIfSmi(value, *done);
- __ CheckPageFlag(object, kScratchRegister,
- MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
- &deferred_write_barrier->deferred_code_label);
- __ bind(*done);
-}
-void StoreMap::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << *map_.object() << ")";
+ __ bind(&done);
}
-void StoreTaggedFieldWithWriteBarrier::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister());
- UseRegister(value_input());
+int LoadNamedGeneric::MaxCallStackArgs() const {
+ return LoadWithVectorDescriptor::GetStackParameterCount();
}
-void StoreTaggedFieldWithWriteBarrier::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- // TODO(leszeks): Consider making this an arbitrary register and push/popping
- // in the deferred path.
- Register object = WriteBarrierDescriptor::ObjectRegister();
- DCHECK_EQ(object, ToRegister(object_input()));
-
- Register value = ToRegister(value_input());
-
- __ AssertNotSmi(object);
- __ StoreTaggedField(FieldOperand(object, offset()), value);
-
- ZoneLabelRef done(masm);
- DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, ZoneLabelRef done, Register value,
- Register object, StoreTaggedFieldWithWriteBarrier* node) {
- ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
- __ CheckPageFlag(
- value, kScratchRegister,
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- *done);
-
- Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
- RegList saved;
- if (node->register_snapshot().live_registers.has(slot_reg)) {
- saved.set(slot_reg);
- }
-
- __ PushAll(saved);
- __ leaq(slot_reg, FieldOperand(object, node->offset()));
-
- SaveFPRegsMode const save_fp_mode =
- !node->register_snapshot().live_double_registers.is_empty()
- ? SaveFPRegsMode::kSave
- : SaveFPRegsMode::kIgnore;
-
- __ CallRecordWriteStub(object, slot_reg, save_fp_mode);
-
- __ PopAll(saved);
- __ jmp(*done);
- },
- done, value, object, this);
-
- __ JumpIfSmi(value, *done);
- __ CheckPageFlag(object, kScratchRegister,
- MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
- &deferred_write_barrier->deferred_code_label);
- __ bind(*done);
-}
-void StoreTaggedFieldWithWriteBarrier::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << std::hex << offset() << std::dec << ")";
-}
-
-void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void LoadNamedGeneric::SetValueLocationConstraints() {
using D = LoadWithVectorDescriptor;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void LoadNamedGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -1873,19 +2188,17 @@ void LoadNamedGeneric::GenerateCode(MaglevAssembler* masm,
__ CallBuiltin(Builtin::kLoadIC);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void LoadNamedGeneric::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << name_ << ")";
-}
-void LoadNamedFromSuperGeneric::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+int LoadNamedFromSuperGeneric::MaxCallStackArgs() const {
+ return LoadWithReceiverAndVectorDescriptor::GetStackParameterCount();
+}
+void LoadNamedFromSuperGeneric::SetValueLocationConstraints() {
using D = LoadWithReceiverAndVectorDescriptor;
UseFixed(context(), kContextRegister);
UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver));
UseFixed(lookup_start_object(),
D::GetRegisterParameter(D::kLookupStartObject));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void LoadNamedFromSuperGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -1901,17 +2214,17 @@ void LoadNamedFromSuperGeneric::GenerateCode(MaglevAssembler* masm,
__ CallBuiltin(Builtin::kLoadSuperIC);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void LoadNamedFromSuperGeneric::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << name_ << ")";
-}
-void SetNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int SetNamedGeneric::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type;
+ return D::GetStackParameterCount();
+}
+void SetNamedGeneric::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void SetNamedGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -1926,64 +2239,17 @@ void SetNamedGeneric::GenerateCode(MaglevAssembler* masm,
__ CallBuiltin(Builtin::kStoreIC);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void SetNamedGeneric::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << name_ << ")";
-}
-
-void StringLength::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(object_input());
- DefineAsRegister(vreg_state, this);
-}
-void StringLength::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(object_input());
- if (v8_flags.debug_code) {
- // Use return register as temporary. Push it in case it aliases the object
- // register.
- Register tmp = ToRegister(result());
- __ Push(tmp);
- // Check if {object} is a string.
- __ AssertNotSmi(object);
- __ LoadMap(tmp, object);
- __ CmpInstanceTypeRange(tmp, tmp, FIRST_STRING_TYPE, LAST_STRING_TYPE);
- __ Check(below_equal, AbortReason::kUnexpectedValue);
- __ Pop(tmp);
- }
- __ movl(ToRegister(result()), FieldOperand(object, String::kLengthOffset));
-}
-
-void StringAt::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseAndClobberRegister(string_input());
- UseAndClobberRegister(index_input());
- DefineAsRegister(vreg_state, this);
- set_temporaries_needed(1);
-}
-void StringAt::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register result_string = ToRegister(result());
- Register string = ToRegister(string_input());
- Register index = ToRegister(index_input());
- Register scratch = general_temporaries().PopFirst();
- Register char_code = string;
-
- ZoneLabelRef done(masm);
- Label cached_one_byte_string;
- RegisterSnapshot save_registers = register_snapshot();
- __ StringCharCodeAt(save_registers, char_code, string, index, scratch,
- &cached_one_byte_string);
- __ StringFromCharCode(save_registers, &cached_one_byte_string, result_string,
- char_code, scratch);
+int DefineNamedOwnGeneric::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type;
+ return D::GetStackParameterCount();
}
-
-void DefineNamedOwnGeneric::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+void DefineNamedOwnGeneric::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void DefineNamedOwnGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -1998,18 +2264,55 @@ void DefineNamedOwnGeneric::GenerateCode(MaglevAssembler* masm,
__ CallBuiltin(Builtin::kDefineNamedOwnIC);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void DefineNamedOwnGeneric::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << name_ << ")";
+
+void EnsureWritableFastElements::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(object_input());
+ set_temporaries_needed(1);
+ DefineSameAsFirst(this);
+}
+void EnsureWritableFastElements::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register elements = ToRegister(elements_input());
+ Register result_reg = ToRegister(result());
+ DCHECK_EQ(elements, result_reg);
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ CompareMapWithRoot(elements, RootIndex::kFixedArrayMap, scratch);
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(
+ kNotEqual,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
+ Register result_reg, RegisterSnapshot snapshot) {
+ {
+ using D = CallInterfaceDescriptorFor<
+ Builtin::kCopyFastSmiOrObjectElements>::type;
+ snapshot.live_registers.clear(result_reg);
+ snapshot.live_tagged_registers.clear(result_reg);
+ SaveRegisterStateForCall save_register_state(masm, snapshot);
+ __ Move(D::GetRegisterParameter(D::kObject), object);
+ __ CallBuiltin(Builtin::kCopyFastSmiOrObjectElements);
+ save_register_state.DefineSafepoint();
+ __ Move(result_reg, kReturnRegister0);
+ }
+ __ Jump(*done);
+ },
+ done, object, result_reg, register_snapshot());
+ __ bind(*done);
}
-void SetKeyedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int SetKeyedGeneric::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type;
+ return D::GetStackParameterCount();
+}
+void SetKeyedGeneric::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
UseFixed(key_input(), D::GetRegisterParameter(D::kName));
UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void SetKeyedGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -2025,14 +2328,18 @@ void SetKeyedGeneric::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void DefineKeyedOwnGeneric::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type;
+int DefineKeyedOwnGeneric::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kDefineKeyedOwnIC>::type;
+ return D::GetStackParameterCount();
+}
+void DefineKeyedOwnGeneric::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kDefineKeyedOwnIC>::type;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
UseFixed(key_input(), D::GetRegisterParameter(D::kName));
UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ UseFixed(flags_input(), D::GetRegisterParameter(D::kFlags));
+ DefineAsFixed(this, kReturnRegister0);
}
void DefineKeyedOwnGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -2041,21 +2348,25 @@ void DefineKeyedOwnGeneric::GenerateCode(MaglevAssembler* masm,
DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
DCHECK_EQ(ToRegister(key_input()), D::GetRegisterParameter(D::kName));
DCHECK_EQ(ToRegister(value_input()), D::GetRegisterParameter(D::kValue));
+ DCHECK_EQ(ToRegister(flags_input()), D::GetRegisterParameter(D::kFlags));
__ Move(D::GetRegisterParameter(D::kSlot),
TaggedIndex::FromIntptr(feedback().index()));
- __ Move(D::GetRegisterParameter(D::kVector), feedback().vector);
+ __ Push(feedback().vector);
__ CallBuiltin(Builtin::kDefineKeyedOwnIC);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void StoreInArrayLiteralGeneric::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+int StoreInArrayLiteralGeneric::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kStoreInArrayLiteralIC>::type;
+ return D::GetStackParameterCount();
+}
+void StoreInArrayLiteralGeneric::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kStoreInArrayLiteralIC>::type;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
UseFixed(name_input(), D::GetRegisterParameter(D::kName));
UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void StoreInArrayLiteralGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -2071,936 +2382,318 @@ void StoreInArrayLiteralGeneric::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void GetKeyedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
- UseFixed(context(), kContextRegister);
- UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
- UseFixed(key_input(), D::GetRegisterParameter(D::kName));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
-}
-void GetKeyedGeneric::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
- DCHECK_EQ(ToRegister(context()), kContextRegister);
- DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
- DCHECK_EQ(ToRegister(key_input()), D::GetRegisterParameter(D::kName));
- __ Move(D::GetRegisterParameter(D::kSlot),
- TaggedIndex::FromIntptr(feedback().slot.ToInt()));
- __ Move(D::GetRegisterParameter(D::kVector), feedback().vector);
- __ CallBuiltin(Builtin::kKeyedLoadIC);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+void GeneratorRestoreRegister::SetValueLocationConstraints() {
+ UseRegister(array_input());
+ UseRegister(stale_input());
+ DefineAsRegister(this);
+ set_temporaries_needed(1);
}
+void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register temp = temps.Acquire();
+ Register array = ToRegister(array_input());
+ Register stale = ToRegister(stale_input());
+ Register result_reg = ToRegister(result());
-void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UNREACHABLE();
-}
-void GapMove::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- if (source().IsRegister()) {
- Register source_reg = ToRegister(source());
- if (target().IsAnyRegister()) {
- DCHECK(target().IsRegister());
- __ movq(ToRegister(target()), source_reg);
- } else {
- __ movq(masm->ToMemOperand(target()), source_reg);
- }
- } else if (source().IsDoubleRegister()) {
- DoubleRegister source_reg = ToDoubleRegister(source());
- if (target().IsAnyRegister()) {
- DCHECK(target().IsDoubleRegister());
- __ Movsd(ToDoubleRegister(target()), source_reg);
- } else {
- __ Movsd(masm->ToMemOperand(target()), source_reg);
- }
- } else {
- DCHECK(source().IsAnyStackSlot());
- MemOperand source_op = masm->ToMemOperand(source());
- if (target().IsRegister()) {
- __ movq(ToRegister(target()), source_op);
- } else if (target().IsDoubleRegister()) {
- __ Movsd(ToDoubleRegister(target()), source_op);
- } else {
- DCHECK(target().IsAnyStackSlot());
- __ movq(kScratchRegister, source_op);
- __ movq(masm->ToMemOperand(target()), kScratchRegister);
- }
- }
-}
-void GapMove::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << source() << " → " << target() << ")";
-}
-void ConstantGapMove::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UNREACHABLE();
-}
+ // The input and the output can alias, if that happens we use a temporary
+ // register and a move at the end.
+ Register value = (array == result_reg ? temp : result_reg);
-namespace {
-template <typename T>
-struct GetRegister;
-template <>
-struct GetRegister<Register> {
- static Register Get(compiler::AllocatedOperand target) {
- return target.GetRegister();
- }
-};
-template <>
-struct GetRegister<DoubleRegister> {
- static DoubleRegister Get(compiler::AllocatedOperand target) {
- return target.GetDoubleRegister();
- }
-};
-} // namespace
-void ConstantGapMove::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- switch (node_->opcode()) {
-#define CASE(Name) \
- case Opcode::k##Name: \
- return node_->Cast<Name>()->DoLoadToRegister( \
- masm, GetRegister<Name::OutputRegister>::Get(target()));
- CONSTANT_VALUE_NODE_LIST(CASE)
-#undef CASE
- default:
- UNREACHABLE();
- }
-}
-void ConstantGapMove::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(";
- graph_labeller->PrintNodeLabel(os, node_);
- os << " → " << target() << ")";
-}
+ // Loads the current value in the generator register file.
+ __ DecompressTagged(
+ value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(index())));
-namespace {
+ // And trashs it with StaleRegisterConstant.
+ __ StoreTaggedField(
+ FieldMemOperand(array, FixedArray::OffsetOfElementAt(index())), stale);
-constexpr Builtin BuiltinFor(Operation operation) {
- switch (operation) {
-#define CASE(name) \
- case Operation::k##name: \
- return Builtin::k##name##_WithFeedback;
- OPERATION_LIST(CASE)
-#undef CASE
+ if (value != result_reg) {
+ __ Move(result_reg, value);
}
}
-} // namespace
-
-template <class Derived, Operation kOperation>
-void UnaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- using D = UnaryOp_WithFeedbackDescriptor;
- UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
-}
-
-template <class Derived, Operation kOperation>
-void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- using D = UnaryOp_WithFeedbackDescriptor;
- DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
- __ Move(kContextRegister, masm->native_context().object());
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
- __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
- __ CallBuiltin(BuiltinFor(kOperation));
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
-}
-
-template <class Derived, Operation kOperation>
-void BinaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- using D = BinaryOp_WithFeedbackDescriptor;
- UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
- UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int GeneratorStore::MaxCallStackArgs() const {
+ return WriteBarrierDescriptor::GetStackParameterCount();
}
-
-template <class Derived, Operation kOperation>
-void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- using D = BinaryOp_WithFeedbackDescriptor;
- DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
- DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight));
- __ Move(kContextRegister, masm->native_context().object());
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
- __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
- __ CallBuiltin(BuiltinFor(kOperation));
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
-}
-
-#define DEF_OPERATION(Name) \
- void Name::AllocateVreg(MaglevVregAllocationState* vreg_state) { \
- Base::AllocateVreg(vreg_state); \
- } \
- void Name::GenerateCode(MaglevAssembler* masm, \
- const ProcessingState& state) { \
- Base::GenerateCode(masm, state); \
+void GeneratorStore::SetValueLocationConstraints() {
+ UseAny(context_input());
+ UseRegister(generator_input());
+ for (int i = 0; i < num_parameters_and_registers(); i++) {
+ UseAny(parameters_and_registers(i));
}
-GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
-#undef DEF_OPERATION
-
-void Int32AddWithOverflow::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
-}
-
-void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ addl(left, right);
- // None of the mutated input registers should be a register input into the
- // eager deopt info.
- DCHECK_REGLIST_EMPTY(RegList{left} &
- GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
- __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
-}
-
-void Int32SubtractWithOverflow::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
-}
-
-void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ subl(left, right);
- // None of the mutated input registers should be a register input into the
- // eager deopt info.
- DCHECK_REGLIST_EMPTY(RegList{left} &
- GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
- __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
-}
-
-void Int32MultiplyWithOverflow::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
- set_temporaries_needed(1);
+ RequireSpecificTemporary(WriteBarrierDescriptor::ObjectRegister());
+ RequireSpecificTemporary(WriteBarrierDescriptor::SlotAddressRegister());
}
-
-void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register result = ToRegister(this->result());
- Register right = ToRegister(right_input());
- DCHECK_EQ(result, ToRegister(left_input()));
-
- Register saved_left = general_temporaries().first();
- __ movl(saved_left, result);
- // TODO(leszeks): peephole optimise multiplication by a constant.
- __ imull(result, right);
- // None of the mutated input registers should be a register input into the
- // eager deopt info.
- DCHECK_REGLIST_EMPTY(RegList{saved_left, result} &
- GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
- __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
-
- // If the result is zero, check if either lhs or rhs is negative.
- Label end;
- __ cmpl(result, Immediate(0));
- __ j(not_zero, &end);
- {
- __ orl(saved_left, right);
- __ cmpl(saved_left, Immediate(0));
- // If one of them is negative, we must have a -0 result, which is non-int32,
- // so deopt.
- // TODO(leszeks): Consider splitting these deopts to have distinct deopt
- // reasons. Otherwise, the reason has to match the above.
- __ EmitEagerDeoptIf(less, DeoptimizeReason::kOverflow, this);
+void GeneratorStore::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register generator = ToRegister(generator_input());
+ Register array = WriteBarrierDescriptor::ObjectRegister();
+ __ LoadTaggedField(array, generator,
+ JSGeneratorObject::kParametersAndRegistersOffset);
+
+ RegisterSnapshot register_snapshot_during_store = register_snapshot();
+ // Include the array and generator registers in the register snapshot while
+ // storing parameters and registers, to avoid the write barrier clobbering
+ // them.
+ register_snapshot_during_store.live_registers.set(array);
+ register_snapshot_during_store.live_tagged_registers.set(array);
+ register_snapshot_during_store.live_registers.set(generator);
+ register_snapshot_during_store.live_tagged_registers.set(generator);
+ for (int i = 0; i < num_parameters_and_registers(); i++) {
+ // Use WriteBarrierDescriptor::SlotAddressRegister() as the temporary for
+ // the value -- it'll be clobbered by StoreTaggedFieldWithWriteBarrier since
+ // it's not in the register snapshot, but that's ok, and a clobberable value
+ // register lets the write barrier emit slightly better code.
+ Input value_input = parameters_and_registers(i);
+ Register value = __ FromAnyToRegister(
+ value_input, WriteBarrierDescriptor::SlotAddressRegister());
+ // Include the value register in the live set, in case it is used by future
+ // inputs.
+ register_snapshot_during_store.live_registers.set(value);
+ register_snapshot_during_store.live_tagged_registers.set(value);
+ __ StoreTaggedFieldWithWriteBarrier(
+ array, FixedArray::OffsetOfElementAt(i), value,
+ register_snapshot_during_store,
+ value_input.node()->decompresses_tagged_result()
+ ? MaglevAssembler::kValueIsDecompressed
+ : MaglevAssembler::kValueIsCompressed,
+ MaglevAssembler::kValueCanBeSmi);
}
- __ bind(&end);
-}
-
-void Int32ModulusWithOverflow::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineAsFixed(vreg_state, this, rdx);
- // rax,rdx are clobbered by div.
- RequireSpecificTemporary(rax);
- RequireSpecificTemporary(rdx);
-}
-
-void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- // Using same algorithm as in EffectControlLinearizer:
- // if rhs <= 0 then
- // rhs = -rhs
- // deopt if rhs == 0
- // if lhs < 0 then
- // let lhs_abs = -lsh in
- // let res = lhs_abs % rhs in
- // deopt if res == 0
- // -res
- // else
- // let msk = rhs - 1 in
- // if rhs & msk == 0 then
- // lhs & msk
- // else
- // lhs % rhs
-
- DCHECK(general_temporaries().has(rax));
- DCHECK(general_temporaries().has(rdx));
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
-
- ZoneLabelRef done(masm);
- ZoneLabelRef rhs_checked(masm);
- __ cmpl(right, Immediate(0));
- __ JumpToDeferredIf(
- less_equal,
- [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register right,
- Int32ModulusWithOverflow* node) {
- __ negl(right);
- __ testl(right, right);
- __ EmitEagerDeoptIf(equal, DeoptimizeReason::kDivisionByZero, node);
- __ jmp(*rhs_checked);
- },
- rhs_checked, right, this);
- __ bind(*rhs_checked);
-
- __ cmpl(left, Immediate(0));
- __ JumpToDeferredIf(
- less,
- [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
- Register right, Int32ModulusWithOverflow* node) {
- __ negl(left);
- __ movl(rax, left);
- __ xorl(rdx, rdx);
- __ divl(right);
- __ testl(rdx, rdx);
- // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
- // allows one deopt reason per IR.
- __ EmitEagerDeoptIf(equal, DeoptimizeReason::kDivisionByZero, node);
- __ negl(rdx);
- __ jmp(*done);
- },
- done, left, right, this);
-
- Label right_not_power_of_2;
- Register mask = rax;
- __ leal(mask, Operand(right, -1));
- __ testl(right, mask);
- __ j(not_zero, &right_not_power_of_2, Label::kNear);
-
- // {right} is power of 2.
- __ andl(mask, left);
- __ movl(ToRegister(result()), mask);
- __ jmp(*done, Label::kNear);
-
- __ bind(&right_not_power_of_2);
- __ movl(rax, left);
- __ xorl(rdx, rdx);
- __ divl(right);
- // Result is implicitly written to rdx.
- DCHECK_EQ(ToRegister(result()), rdx);
-
- __ bind(*done);
-}
-
-void Int32DivideWithOverflow::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineAsFixed(vreg_state, this, rax);
- // rax,rdx are clobbered by idiv.
- RequireSpecificTemporary(rax);
- RequireSpecificTemporary(rdx);
-}
-
-void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- DCHECK(general_temporaries().has(rax));
- DCHECK(general_temporaries().has(rdx));
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ movl(rax, left);
-
- // TODO(leszeks): peephole optimise division by a constant.
-
- // Sign extend eax into edx.
- __ cdq();
-
- // Pre-check for overflow, since idiv throws a division exception on overflow
- // rather than setting the overflow flag. Logic copied from
- // effect-control-linearizer.cc
-
- // Check if {right} is positive (and not zero).
- __ cmpl(right, Immediate(0));
- ZoneLabelRef done(masm);
- __ JumpToDeferredIf(
- less_equal,
- [](MaglevAssembler* masm, ZoneLabelRef done, Register right,
- Int32DivideWithOverflow* node) {
- // {right} is negative or zero.
-
- // Check if {right} is zero.
- // We've already done the compare and flags won't be cleared yet.
- // TODO(leszeks): Using kNotInt32 here, but kDivisionByZero would be
- // better. Right now all eager deopts in a node have to be the same --
- // we should allow a node to emit multiple eager deopts with different
- // reasons.
- __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
-
- // Check if {left} is zero, as that would produce minus zero. Left is in
- // rax already.
- __ cmpl(rax, Immediate(0));
- // TODO(leszeks): Better DeoptimizeReason = kMinusZero.
- __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
-
- // Check if {left} is kMinInt and {right} is -1, in which case we'd have
- // to return -kMinInt, which is not representable as Int32.
- __ cmpl(rax, Immediate(kMinInt));
- __ j(not_equal, *done);
- __ cmpl(right, Immediate(-1));
- __ j(not_equal, *done);
- // TODO(leszeks): Better DeoptimizeReason = kOverflow, but
- // eager_deopt_info is already configured as kNotInt32.
- __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
- },
- done, right, this);
- __ bind(*done);
+ __ StoreTaggedSignedField(generator, JSGeneratorObject::kContinuationOffset,
+ Smi::FromInt(suspend_id()));
+ __ StoreTaggedSignedField(generator,
+ JSGeneratorObject::kInputOrDebugPosOffset,
+ Smi::FromInt(bytecode_offset()));
- // Perform the actual integer division.
- __ idivl(right);
-
- // Check that the remainder is zero.
- __ cmpl(rdx, Immediate(0));
- // None of the mutated input registers should be a register input into the
- // eager deopt info.
- DCHECK_REGLIST_EMPTY(RegList{rax, rdx} &
- GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
- DCHECK_EQ(ToRegister(result()), rax);
-}
-
-void Int32BitwiseAnd::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
-}
-
-void Int32BitwiseAnd::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ andl(left, right);
-}
-
-void Int32BitwiseOr::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
+ // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
+ // register, see comment above. At this point we no longer need to preserve
+ // the array or generator registers, so use the original register snapshot.
+ Register context = __ FromAnyToRegister(
+ context_input(), WriteBarrierDescriptor::SlotAddressRegister());
+ __ StoreTaggedFieldWithWriteBarrier(
+ generator, JSGeneratorObject::kContextOffset, context,
+ register_snapshot(),
+ context_input().node()->decompresses_tagged_result()
+ ? MaglevAssembler::kValueIsDecompressed
+ : MaglevAssembler::kValueIsCompressed,
+ MaglevAssembler::kValueCannotBeSmi);
}
-void Int32BitwiseOr::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ orl(left, right);
+int GetKeyedGeneric::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
+ return D::GetStackParameterCount();
}
-
-void Int32BitwiseXor::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
+void GetKeyedGeneric::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
+ UseFixed(context(), kContextRegister);
+ UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
+ UseFixed(key_input(), D::GetRegisterParameter(D::kName));
+ DefineAsFixed(this, kReturnRegister0);
}
-
-void Int32BitwiseXor::GenerateCode(MaglevAssembler* masm,
+void GetKeyedGeneric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ xorl(left, right);
-}
-
-void Int32ShiftLeft::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- // Use the "shift by cl" variant of shl.
- // TODO(leszeks): peephole optimise shifts by a constant.
- UseFixed(right_input(), rcx);
- DefineSameAsFirst(vreg_state, this);
-}
-
-void Int32ShiftLeft::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- DCHECK_EQ(rcx, ToRegister(right_input()));
- __ shll_cl(left);
+ using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+ DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
+ DCHECK_EQ(ToRegister(key_input()), D::GetRegisterParameter(D::kName));
+ __ Move(D::GetRegisterParameter(D::kSlot),
+ TaggedIndex::FromIntptr(feedback().slot.ToInt()));
+ __ Move(D::GetRegisterParameter(D::kVector), feedback().vector);
+ __ CallBuiltin(Builtin::kKeyedLoadIC);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void Int32ShiftRight::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- // Use the "shift by cl" variant of sar.
- // TODO(leszeks): peephole optimise shifts by a constant.
- UseFixed(right_input(), rcx);
- DefineSameAsFirst(vreg_state, this);
+void Float64ToTagged::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
}
-
-void Int32ShiftRight::GenerateCode(MaglevAssembler* masm,
+void Float64ToTagged::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register left = ToRegister(left_input());
- DCHECK_EQ(rcx, ToRegister(right_input()));
- __ sarl_cl(left);
-}
-
-void Int32ShiftRightLogical::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- // Use the "shift by cl" variant of shr.
- // TODO(leszeks): peephole optimise shifts by a constant.
- UseFixed(right_input(), rcx);
- DefineSameAsFirst(vreg_state, this);
-}
-
-void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- DCHECK_EQ(rcx, ToRegister(right_input()));
- __ shrl_cl(left);
- // The result of >>> is unsigned, but Maglev doesn't yet track
- // signed/unsigned representations. Instead, deopt if the resulting smi would
- // be negative.
- // TODO(jgruber): Properly track signed/unsigned representations and
- // allocated a heap number if the result is outside smi range.
- __ testl(left, Immediate((1 << 31) | (1 << 30)));
- // None of the mutated input registers should be a register input into the
- // eager deopt info.
- DCHECK_REGLIST_EMPTY(RegList{left} &
- GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kOverflow, this);
-}
-
-namespace {
-
-constexpr Condition ConditionFor(Operation operation) {
- switch (operation) {
- case Operation::kEqual:
- case Operation::kStrictEqual:
- return equal;
- case Operation::kLessThan:
- return less;
- case Operation::kLessThanOrEqual:
- return less_equal;
- case Operation::kGreaterThan:
- return greater;
- case Operation::kGreaterThanOrEqual:
- return greater_equal;
- default:
- UNREACHABLE();
+ DoubleRegister value = ToDoubleRegister(input());
+ Register object = ToRegister(result());
+ Label box, done;
+ if (canonicalize_smi()) {
+ __ TryTruncateDoubleToInt32(object, value, &box);
+ __ SmiTagInt32(object, &box);
+ __ jmp(&done);
+ __ bind(&box);
}
-}
-
-} // namespace
-
-template <class Derived, Operation kOperation>
-void Int32CompareNode<Derived, kOperation>::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineAsRegister(vreg_state, this);
-}
-
-template <class Derived, Operation kOperation>
-void Int32CompareNode<Derived, kOperation>::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- Register result = ToRegister(this->result());
- Label is_true, end;
- __ cmpl(left, right);
- // TODO(leszeks): Investigate using cmov here.
- __ j(ConditionFor(kOperation), &is_true);
- // TODO(leszeks): Investigate loading existing materialisations of roots here,
- // if available.
- __ LoadRoot(result, RootIndex::kFalseValue);
- __ jmp(&end);
- {
- __ bind(&is_true);
- __ LoadRoot(result, RootIndex::kTrueValue);
+ __ AllocateHeapNumber(register_snapshot(), object, value);
+ if (canonicalize_smi()) {
+ __ bind(&done);
}
- __ bind(&end);
}
-#define DEF_OPERATION(Name) \
- void Name::AllocateVreg(MaglevVregAllocationState* vreg_state) { \
- Base::AllocateVreg(vreg_state); \
- } \
- void Name::GenerateCode(MaglevAssembler* masm, \
- const ProcessingState& state) { \
- Base::GenerateCode(masm, state); \
+void Float64Round::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+ if (kind_ == Kind::kNearest) {
+ set_double_temporaries_needed(1);
}
-DEF_OPERATION(Int32Equal)
-DEF_OPERATION(Int32StrictEqual)
-DEF_OPERATION(Int32LessThan)
-DEF_OPERATION(Int32LessThanOrEqual)
-DEF_OPERATION(Int32GreaterThan)
-DEF_OPERATION(Int32GreaterThanOrEqual)
-#undef DEF_OPERATION
-
-void Float64Add::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
}
-
-void Float64Add::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- DoubleRegister left = ToDoubleRegister(left_input());
- DoubleRegister right = ToDoubleRegister(right_input());
- __ Addsd(left, right);
-}
-
-void Float64Subtract::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
-}
-
-void Float64Subtract::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- DoubleRegister left = ToDoubleRegister(left_input());
- DoubleRegister right = ToDoubleRegister(right_input());
- __ Subsd(left, right);
-}
-
-void Float64Multiply::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
+void HoleyFloat64Box::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
}
-
-void Float64Multiply::GenerateCode(MaglevAssembler* masm,
+void HoleyFloat64Box::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- DoubleRegister left = ToDoubleRegister(left_input());
- DoubleRegister right = ToDoubleRegister(right_input());
- __ Mulsd(left, right);
+ ZoneLabelRef done(masm);
+ DoubleRegister value = ToDoubleRegister(input());
+ // Using return as scratch register.
+ Register repr = ToRegister(result());
+ Register object = ToRegister(result());
+ __ DoubleToInt64Repr(repr, value);
+ __ JumpToDeferredIf(
+ __ IsInt64Constant(repr, kHoleNanInt64),
+ [](MaglevAssembler* masm, Register object, ZoneLabelRef done) {
+ __ LoadRoot(object, RootIndex::kUndefinedValue);
+ __ Jump(*done);
+ },
+ object, done);
+ __ AllocateHeapNumber(register_snapshot(), object, value);
+ __ bind(*done);
}
-void Float64Divide::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineSameAsFirst(vreg_state, this);
+void CheckedSmiTagFloat64::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
}
+void CheckedSmiTagFloat64::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister value = ToDoubleRegister(input());
+ Register object = ToRegister(result());
-void Float64Divide::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- DoubleRegister left = ToDoubleRegister(left_input());
- DoubleRegister right = ToDoubleRegister(right_input());
- __ Divsd(left, right);
+ __ TryTruncateDoubleToInt32(
+ object, value, __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi));
+ __ SmiTagInt32(object, __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi));
}
-template <class Derived, Operation kOperation>
-void Float64CompareNode<Derived, kOperation>::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
- DefineAsRegister(vreg_state, this);
+void StoreFloat64::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(value_input());
}
+void StoreFloat64::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
-template <class Derived, Operation kOperation>
-void Float64CompareNode<Derived, kOperation>::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- DoubleRegister left = ToDoubleRegister(left_input());
- DoubleRegister right = ToDoubleRegister(right_input());
- Register result = ToRegister(this->result());
- Label is_true, end;
- __ Ucomisd(left, right);
- // TODO(leszeks): Investigate using cmov here.
- __ j(ConditionFor(kOperation), &is_true);
- // TODO(leszeks): Investigate loading existing materialisations of roots here,
- // if available.
- __ LoadRoot(result, RootIndex::kFalseValue);
- __ jmp(&end);
- {
- __ bind(&is_true);
- __ LoadRoot(result, RootIndex::kTrueValue);
- }
- __ bind(&end);
+ __ AssertNotSmi(object);
+ __ Move(FieldMemOperand(object, offset()), value);
}
-#define DEF_OPERATION(Name) \
- void Name::AllocateVreg(MaglevVregAllocationState* vreg_state) { \
- Base::AllocateVreg(vreg_state); \
- } \
- void Name::GenerateCode(MaglevAssembler* masm, \
- const ProcessingState& state) { \
- Base::GenerateCode(masm, state); \
- }
-DEF_OPERATION(Float64Equal)
-DEF_OPERATION(Float64StrictEqual)
-DEF_OPERATION(Float64LessThan)
-DEF_OPERATION(Float64LessThanOrEqual)
-DEF_OPERATION(Float64GreaterThan)
-DEF_OPERATION(Float64GreaterThanOrEqual)
-#undef DEF_OPERATION
-
-void CheckedSmiUntag::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(input());
- DefineSameAsFirst(vreg_state, this);
+void CheckedStoreSmiField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(value_input());
}
+void CheckedStoreSmiField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register value = ToRegister(value_input());
-void CheckedSmiUntag::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register value = ToRegister(input());
- // TODO(leszeks): Consider optimizing away this test and using the carry bit
- // of the `sarl` for cases where the deopt uses the value from a different
- // register.
Condition is_smi = __ CheckSmi(value);
__ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi,
this);
- __ SmiToInt32(value);
-}
-
-void UnsafeSmiUntag::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(input());
- DefineSameAsFirst(vreg_state, this);
-}
-
-void UnsafeSmiUntag::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register value = ToRegister(input());
- __ AssertSmi(value);
- __ SmiToInt32(value);
-}
-
-void CheckedSmiTag::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(input());
- DefineSameAsFirst(vreg_state, this);
-}
-
-void CheckedSmiTag::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register reg = ToRegister(input());
- __ addl(reg, reg);
- // None of the mutated input registers should be a register input into the
- // eager deopt info.
- DCHECK_REGLIST_EMPTY(RegList{reg} &
- GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
- __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+ __ StoreTaggedField(FieldMemOperand(object, offset()), value);
}
-void UnsafeSmiTag::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(input());
- DefineSameAsFirst(vreg_state, this);
-}
-void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register reg = ToRegister(input());
- __ addl(reg, reg);
- if (v8_flags.debug_code) {
- __ Check(no_overflow, AbortReason::kInputDoesNotFitSmi);
- }
-}
-
-void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- DefineAsConstant(vreg_state, this);
-}
-void Int32Constant::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {}
-void Int32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
- __ Move(reg, Immediate(value()));
-}
-Handle<Object> Int32Constant::DoReify(LocalIsolate* isolate) {
- return isolate->factory()->NewNumber<AllocationType::kOld>(value());
-}
-void Int32Constant::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << value() << ")";
-}
-
-void Float64Box::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(input());
- DefineAsRegister(vreg_state, this);
-}
-void Float64Box::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- DoubleRegister value = ToDoubleRegister(input());
- Register object = ToRegister(result());
- // In the case we need to call the runtime, we should spill the input
- // register. Even if it is not live in the next node, otherwise the allocation
- // call might trash it.
- RegisterSnapshot save_registers = register_snapshot();
- save_registers.live_double_registers.set(value);
- __ Allocate(save_registers, object, HeapNumber::kSize);
- __ LoadRoot(kScratchRegister, RootIndex::kHeapNumberMap);
- __ StoreTaggedField(FieldOperand(object, HeapObject::kMapOffset),
- kScratchRegister);
- __ Movsd(FieldOperand(object, HeapNumber::kValueOffset), value);
+void StoreTaggedFieldNoWriteBarrier::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(value_input());
}
+void StoreTaggedFieldNoWriteBarrier::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register value = ToRegister(value_input());
-void CheckedFloat64Unbox::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(input());
- DefineAsRegister(vreg_state, this);
-}
-void CheckedFloat64Unbox::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register value = ToRegister(input());
- Label is_not_smi, done;
- // Check if Smi.
- __ JumpIfNotSmi(value, &is_not_smi);
- // If Smi, convert to Float64.
- __ SmiToInt32(value);
- __ Cvtlsi2sd(ToDoubleRegister(result()), value);
- // TODO(v8:7700): Add a constraint to the register allocator to indicate that
- // the value in the input register is "trashed" by this node. Currently we
- // have the invariant that the input register should not be mutated when it is
- // not the same as the output register or the function does not call a
- // builtin. So, we recover the Smi value here.
- __ SmiTag(value);
- __ jmp(&done);
- __ bind(&is_not_smi);
- // Check if HeapNumber, deopt otherwise.
- __ CompareRoot(FieldOperand(value, HeapObject::kMapOffset),
- RootIndex::kHeapNumberMap);
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotANumber, this);
- __ Movsd(ToDoubleRegister(result()),
- FieldOperand(value, HeapNumber::kValueOffset));
- __ bind(&done);
+ __ AssertNotSmi(object);
+ __ StoreTaggedField(FieldMemOperand(object, offset()), value);
}
-void LogicalNot::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(value());
- DefineAsRegister(vreg_state, this);
+int StringAt::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2);
+ return std::max(2, AllocateDescriptor::GetStackParameterCount());
}
-void LogicalNot::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(value());
- Register return_value = ToRegister(result());
-
- if (v8_flags.debug_code) {
- // LogicalNot expects either TrueValue or FalseValue.
- Label next;
- __ CompareRoot(object, RootIndex::kFalseValue);
- __ j(equal, &next);
- __ CompareRoot(object, RootIndex::kTrueValue);
- __ Check(equal, AbortReason::kUnexpectedValue);
- __ bind(&next);
- }
-
- Label return_false, done;
- __ CompareRoot(object, RootIndex::kTrueValue);
- __ j(equal, &return_false, Label::kNear);
- __ LoadRoot(return_value, RootIndex::kTrueValue);
- __ jmp(&done, Label::kNear);
-
- __ bind(&return_false);
- __ LoadRoot(return_value, RootIndex::kFalseValue);
-
- __ bind(&done);
-}
-
-void SetPendingMessage::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(value());
+void StringAt::SetValueLocationConstraints() {
+ UseAndClobberRegister(string_input());
+ UseAndClobberRegister(index_input());
+ DefineAsRegister(this);
set_temporaries_needed(1);
- DefineAsRegister(vreg_state, this);
}
+void StringAt::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register result_string = ToRegister(result());
+ Register string = ToRegister(string_input());
+ Register index = ToRegister(index_input());
+ Register char_code = string;
-void SetPendingMessage::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register new_message = ToRegister(value());
- Register return_value = ToRegister(result());
-
- MemOperand pending_message_operand = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_pending_message(masm->isolate()),
- kScratchRegister);
-
- if (new_message != return_value) {
- __ Move(return_value, pending_message_operand);
- __ movq(pending_message_operand, new_message);
- } else {
- Register scratch = general_temporaries().PopFirst();
- __ Move(scratch, pending_message_operand);
- __ movq(pending_message_operand, new_message);
- __ Move(return_value, scratch);
- }
-}
+ ZoneLabelRef done(masm);
+ Label cached_one_byte_string;
-void ToBoolean::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(value());
- DefineAsRegister(vreg_state, this);
-}
-void ToBoolean::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(value());
- Register return_value = ToRegister(result());
- Label done;
- Zone* zone = masm->compilation_info()->zone();
- ZoneLabelRef object_is_true(zone), object_is_false(zone);
- // TODO(leszeks): We're likely to be calling this on an existing boolean --
- // maybe that's a case we should fast-path here and re-use that boolean value?
- __ ToBoolean(object, object_is_true, object_is_false, true);
- __ bind(*object_is_true);
- __ LoadRoot(return_value, RootIndex::kTrueValue);
- __ jmp(&done, Label::kNear);
- __ bind(*object_is_false);
- __ LoadRoot(return_value, RootIndex::kFalseValue);
- __ bind(&done);
+ RegisterSnapshot save_registers = register_snapshot();
+ __ StringCharCodeOrCodePointAt(
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCharCodeAt, save_registers,
+ char_code, string, index, scratch, &cached_one_byte_string);
+ __ StringFromCharCode(save_registers, &cached_one_byte_string, result_string,
+ char_code, scratch);
}
-void ToBooleanLogicalNot::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(value());
- DefineAsRegister(vreg_state, this);
+void StringLength::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
}
-void ToBooleanLogicalNot::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register object = ToRegister(value());
- Register return_value = ToRegister(result());
- Label done;
- Zone* zone = masm->compilation_info()->zone();
- ZoneLabelRef object_is_true(zone), object_is_false(zone);
- __ ToBoolean(object, object_is_true, object_is_false, true);
- __ bind(*object_is_true);
- __ LoadRoot(return_value, RootIndex::kFalseValue);
- __ jmp(&done, Label::kNear);
- __ bind(*object_is_false);
- __ LoadRoot(return_value, RootIndex::kTrueValue);
- __ bind(&done);
+void StringLength::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ StringLength(ToRegister(result()), ToRegister(object_input()));
}
-void TaggedEqual::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void TaggedEqual::SetValueLocationConstraints() {
UseRegister(lhs());
UseRegister(rhs());
- DefineAsRegister(vreg_state, this);
+ DefineAsRegister(this);
}
void TaggedEqual::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Label done, if_equal;
- __ cmp_tagged(ToRegister(lhs()), ToRegister(rhs()));
- __ j(equal, &if_equal, Label::kNear);
+ __ CmpTagged(ToRegister(lhs()), ToRegister(rhs()));
+ __ JumpIf(kEqual, &if_equal, Label::Distance::kNear);
__ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
- __ jmp(&done, Label::kNear);
+ __ Jump(&done);
__ bind(&if_equal);
__ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
__ bind(&done);
}
-void TaggedNotEqual::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void TaggedNotEqual::SetValueLocationConstraints() {
UseRegister(lhs());
UseRegister(rhs());
- DefineAsRegister(vreg_state, this);
+ DefineAsRegister(this);
}
void TaggedNotEqual::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Label done, if_equal;
- __ cmp_tagged(ToRegister(lhs()), ToRegister(rhs()));
- __ j(equal, &if_equal, Label::kNear);
+ __ CmpTagged(ToRegister(lhs()), ToRegister(rhs()));
+ __ JumpIf(kEqual, &if_equal, Label::Distance::kNear);
__ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
- __ jmp(&done, Label::kNear);
+ __ Jump(&done);
__ bind(&if_equal);
__ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
__ bind(&done);
}
-void TestInstanceOf::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int TestInstanceOf::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf_WithFeedback>::type;
+ return D::GetStackParameterCount();
+}
+void TestInstanceOf::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf_WithFeedback>::type;
UseFixed(context(), kContextRegister);
UseFixed(object(), D::GetRegisterParameter(D::kLeft));
UseFixed(callable(), D::GetRegisterParameter(D::kRight));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void TestInstanceOf::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -3011,136 +2704,85 @@ void TestInstanceOf::GenerateCode(MaglevAssembler* masm,
DCHECK_EQ(ToRegister(callable()), D::GetRegisterParameter(D::kRight));
#endif
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
__ CallBuiltin(Builtin::kInstanceOf_WithFeedback);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void TestUndetectable::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void TestTypeOf::SetValueLocationConstraints() {
UseRegister(value());
- set_temporaries_needed(1);
- DefineAsRegister(vreg_state, this);
+ DefineAsRegister(this);
}
-void TestUndetectable::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
+void TestTypeOf::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
Register object = ToRegister(value());
- Register return_value = ToRegister(result());
- Register scratch = general_temporaries().PopFirst();
+ // Use return register as temporary if needed. Be careful: {object} and
+ // {scratch} could alias (which means that {object} should be considered dead
+ // once {scratch} has been written to).
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ temps.Include(ToRegister(result()));
- Label return_false, done;
- __ JumpIfSmi(object, &return_false, Label::kNear);
- // For heap objects, check the map's undetectable bit.
- __ LoadMap(scratch, object);
- __ testl(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(Map::Bits1::IsUndetectableBit::kMask));
- __ j(zero, &return_false, Label::kNear);
+ Label is_true, is_false, done;
+ __ TestTypeOf(object, literal_, &is_true, Label::Distance::kNear, true,
+ &is_false, Label::Distance::kNear, false);
+ // Fallthrough into true.
+ __ bind(&is_true);
+ __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
+ __ Jump(&done, Label::Distance::kNear);
+ __ bind(&is_false);
+ __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
+ __ bind(&done);
+}
+void ToBoolean::SetValueLocationConstraints() {
+ UseRegister(value());
+ DefineAsRegister(this);
+}
+void ToBoolean::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(value());
+ Register return_value = ToRegister(result());
+ Label done;
+ ZoneLabelRef object_is_true(masm), object_is_false(masm);
+ // TODO(leszeks): We're likely to be calling this on an existing boolean --
+ // maybe that's a case we should fast-path here and re-use that boolean value?
+ __ ToBoolean(object, object_is_true, object_is_false, true);
+ __ bind(*object_is_true);
__ LoadRoot(return_value, RootIndex::kTrueValue);
- __ jmp(&done, Label::kNear);
-
- __ bind(&return_false);
+ __ Jump(&done);
+ __ bind(*object_is_false);
__ LoadRoot(return_value, RootIndex::kFalseValue);
-
__ bind(&done);
}
-void TestTypeOf::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void ToBooleanLogicalNot::SetValueLocationConstraints() {
UseRegister(value());
- DefineAsRegister(vreg_state, this);
+ DefineAsRegister(this);
}
-void TestTypeOf::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag;
+void ToBooleanLogicalNot::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
Register object = ToRegister(value());
- // Use return register as temporary if needed.
- Register tmp = ToRegister(result());
- Label is_true, is_false, done;
- switch (literal_) {
- case LiteralFlag::kNumber:
- __ JumpIfSmi(object, &is_true, Label::kNear);
- __ CompareRoot(FieldOperand(object, HeapObject::kMapOffset),
- RootIndex::kHeapNumberMap);
- __ j(not_equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kString:
- __ JumpIfSmi(object, &is_false, Label::kNear);
- __ LoadMap(tmp, object);
- __ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(FIRST_NONSTRING_TYPE));
- __ j(greater_equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kSymbol:
- __ JumpIfSmi(object, &is_false, Label::kNear);
- __ LoadMap(tmp, object);
- __ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(SYMBOL_TYPE));
- __ j(not_equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kBoolean:
- __ CompareRoot(object, RootIndex::kTrueValue);
- __ j(equal, &is_true, Label::kNear);
- __ CompareRoot(object, RootIndex::kFalseValue);
- __ j(not_equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kBigInt:
- __ JumpIfSmi(object, &is_false, Label::kNear);
- __ LoadMap(tmp, object);
- __ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(BIGINT_TYPE));
- __ j(not_equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kUndefined:
- __ JumpIfSmi(object, &is_false, Label::kNear);
- // Check it has the undetectable bit set and it is not null.
- __ LoadMap(tmp, object);
- __ testl(FieldOperand(tmp, Map::kBitFieldOffset),
- Immediate(Map::Bits1::IsUndetectableBit::kMask));
- __ j(zero, &is_false, Label::kNear);
- __ CompareRoot(object, RootIndex::kNullValue);
- __ j(equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kFunction:
- __ JumpIfSmi(object, &is_false, Label::kNear);
- // Check if callable bit is set and not undetectable.
- __ LoadMap(tmp, object);
- __ movl(tmp, FieldOperand(tmp, Map::kBitFieldOffset));
- __ andl(tmp, Immediate(Map::Bits1::IsUndetectableBit::kMask |
- Map::Bits1::IsCallableBit::kMask));
- __ cmpl(tmp, Immediate(Map::Bits1::IsCallableBit::kMask));
- __ j(not_equal, &is_false, Label::kNear);
- break;
- case LiteralFlag::kObject:
- __ JumpIfSmi(object, &is_false, Label::kNear);
- // If the object is null then return true.
- __ CompareRoot(object, RootIndex::kNullValue);
- __ j(equal, &is_true, Label::kNear);
- // Check if the object is a receiver type,
- __ LoadMap(tmp, object);
- __ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(FIRST_JS_RECEIVER_TYPE));
- __ j(less, &is_false, Label::kNear);
- // ... and is not undefined (undetectable) nor callable.
- __ testl(FieldOperand(tmp, Map::kBitFieldOffset),
- Immediate(Map::Bits1::IsUndetectableBit::kMask |
- Map::Bits1::IsCallableBit::kMask));
- __ j(not_zero, &is_false, Label::kNear);
- break;
- case LiteralFlag::kOther:
- UNREACHABLE();
- }
- __ bind(&is_true);
- __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
- __ jmp(&done, Label::kNear);
- __ bind(&is_false);
- __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
+ Register return_value = ToRegister(result());
+ Label done;
+ ZoneLabelRef object_is_true(masm), object_is_false(masm);
+ __ ToBoolean(object, object_is_true, object_is_false, true);
+ __ bind(*object_is_true);
+ __ LoadRoot(return_value, RootIndex::kFalseValue);
+ __ Jump(&done);
+ __ bind(*object_is_false);
+ __ LoadRoot(return_value, RootIndex::kTrueValue);
__ bind(&done);
}
-void ToName::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ToName::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kToName>::type;
+ return D::GetStackParameterCount();
+}
+void ToName::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kToName>::type;
UseFixed(context(), kContextRegister);
UseFixed(value_input(), D::GetRegisterParameter(D::kInput));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ToName::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
#ifdef DEBUG
@@ -3152,11 +2794,14 @@ void ToName::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void ToNumberOrNumeric::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ToNumberOrNumeric::MaxCallStackArgs() const {
+ return TypeConversionDescriptor::GetStackParameterCount();
+}
+void ToNumberOrNumeric::SetValueLocationConstraints() {
using D = TypeConversionDescriptor;
UseFixed(context(), kContextRegister);
UseFixed(value_input(), D::GetRegisterParameter(D::kArgument));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ToNumberOrNumeric::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -3171,11 +2816,15 @@ void ToNumberOrNumeric::GenerateCode(MaglevAssembler* masm,
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void ToObject::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ToObject::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
+ return D::GetStackParameterCount();
+}
+void ToObject::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
UseFixed(context(), kContextRegister);
UseFixed(value_input(), D::GetRegisterParameter(D::kInput));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ToObject::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -3187,22 +2836,23 @@ void ToObject::GenerateCode(MaglevAssembler* masm,
Register value = ToRegister(value_input());
Label call_builtin, done;
// Avoid the builtin call if {value} is a JSReceiver.
- __ JumpIfSmi(value, &call_builtin);
- __ LoadMap(kScratchRegister, value);
- __ cmpw(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(FIRST_JS_RECEIVER_TYPE));
- __ j(greater_equal, &done);
+ __ JumpIfSmi(value, &call_builtin, Label::Distance::kNear);
+ __ JumpIfJSAnyIsNotPrimitive(value, &done, Label::Distance::kNear);
__ bind(&call_builtin);
__ CallBuiltin(Builtin::kToObject);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
__ bind(&done);
}
-void ToString::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ToString::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kToString>::type;
+ return D::GetStackParameterCount();
+}
+void ToString::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<Builtin::kToString>::type;
UseFixed(context(), kContextRegister);
UseFixed(value_input(), D::GetRegisterParameter(D::kO));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ToString::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -3214,86 +2864,295 @@ void ToString::GenerateCode(MaglevAssembler* masm,
Register value = ToRegister(value_input());
Label call_builtin, done;
// Avoid the builtin call if {value} is a string.
- __ JumpIfSmi(value, &call_builtin);
- __ LoadMap(kScratchRegister, value);
- __ cmpw(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(FIRST_NONSTRING_TYPE));
- __ j(less, &done);
+ __ JumpIfSmi(value, &call_builtin, Label::Distance::kNear);
+ __ CompareObjectType(value, FIRST_NONSTRING_TYPE);
+ __ JumpIf(kUnsignedLessThan, &done, Label::Distance::kNear);
__ bind(&call_builtin);
__ CallBuiltin(Builtin::kToString);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
__ bind(&done);
}
-void ChangeInt32ToFloat64::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ThrowReferenceErrorIfHole::MaxCallStackArgs() const { return 1; }
+void ThrowReferenceErrorIfHole::SetValueLocationConstraints() {
+ UseAny(value());
+}
+void ThrowReferenceErrorIfHole::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ JumpToDeferredIf(
+ __ IsRootConstant(value(), RootIndex::kTheHoleValue),
+ [](MaglevAssembler* masm, ThrowReferenceErrorIfHole* node) {
+ __ Move(kContextRegister, masm->native_context().object());
+ __ Push(node->name().object());
+ __ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, 1);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
+ __ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ },
+ this);
+}
+
+int ThrowSuperNotCalledIfHole::MaxCallStackArgs() const { return 0; }
+void ThrowSuperNotCalledIfHole::SetValueLocationConstraints() {
+ UseAny(value());
+}
+void ThrowSuperNotCalledIfHole::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ JumpToDeferredIf(
+ __ IsRootConstant(value(), RootIndex::kTheHoleValue),
+ [](MaglevAssembler* masm, ThrowSuperNotCalledIfHole* node) {
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kThrowSuperNotCalled, 0);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
+ __ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ },
+ this);
+}
+
+int ThrowSuperAlreadyCalledIfNotHole::MaxCallStackArgs() const { return 0; }
+void ThrowSuperAlreadyCalledIfNotHole::SetValueLocationConstraints() {
+ UseAny(value());
+}
+void ThrowSuperAlreadyCalledIfNotHole::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ __ JumpToDeferredIf(
+ NegateCondition(__ IsRootConstant(value(), RootIndex::kTheHoleValue)),
+ [](MaglevAssembler* masm, ThrowSuperAlreadyCalledIfNotHole* node) {
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, 0);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
+ __ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ },
+ this);
+}
+
+void TruncateUint32ToInt32::SetValueLocationConstraints() {
UseRegister(input());
- DefineAsRegister(vreg_state, this);
+ DefineSameAsFirst(this);
}
-void ChangeInt32ToFloat64::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- __ Cvtlsi2sd(ToDoubleRegister(result()), ToRegister(input()));
+void TruncateUint32ToInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // No code emitted -- as far as the machine is concerned, int32 is uint32.
+ DCHECK_EQ(ToRegister(input()), ToRegister(result()));
+}
+
+void TruncateFloat64ToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void TruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ TruncateDoubleToInt32(ToRegister(result()), ToDoubleRegister(input()));
}
-void CheckedTruncateFloat64ToInt32::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+void CheckedTruncateFloat64ToInt32::SetValueLocationConstraints() {
UseRegister(input());
- DefineAsRegister(vreg_state, this);
+ DefineAsRegister(this);
}
void CheckedTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- DoubleRegister input_reg = ToDoubleRegister(input());
+ __ TryTruncateDoubleToInt32(
+ ToRegister(result()), ToDoubleRegister(input()),
+ __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32));
+}
+
+void UnsafeTruncateFloat64ToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void UnsafeTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+#ifdef DEBUG
+ Label fail, start;
+ __ Jump(&start);
+ __ bind(&fail);
+ __ Abort(AbortReason::kFloat64IsNotAInt32);
+
+ __ bind(&start);
+ __ TryTruncateDoubleToInt32(ToRegister(result()), ToDoubleRegister(input()),
+ &fail);
+#else
+ // TODO(dmercadier): TruncateDoubleToInt32 does additional work when the
+ // double doesn't fit in a 32-bit integer. This is not necessary for
+ // UnsafeTruncateFloat64ToInt32 (since we statically know that it the double
+ // fits in a 32-bit int) and could be instead just a Cvttsd2si (x64) or Fcvtzs
+ // (arm64).
+ __ TruncateDoubleToInt32(ToRegister(result()), ToDoubleRegister(input()));
+#endif
+}
+
+void CheckedUint32ToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedUint32ToInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register input_reg = ToRegister(input());
+ Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32);
+ __ CompareInt32AndJumpIf(input_reg, 0, kLessThan, fail);
+}
+
+void UnsafeTruncateUint32ToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void UnsafeTruncateUint32ToInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+#ifdef DEBUG
+ Register input_reg = ToRegister(input());
+ Label success;
+ __ CompareInt32AndJumpIf(input_reg, 0, kGreaterThanEqual, &success);
+ __ Abort(AbortReason::kUint32IsNotAInt32);
+ __ bind(&success);
+#endif
+ // No code emitted -- as far as the machine is concerned, int32 is uint32.
+ DCHECK_EQ(ToRegister(input()), ToRegister(result()));
+}
+
+void Int32ToUint8Clamped::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void Int32ToUint8Clamped::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
Register result_reg = ToRegister(result());
- DoubleRegister converted_back = kScratchDoubleReg;
-
- // Convert the input float64 value to int32.
- __ Cvttsd2si(result_reg, input_reg);
- // Convert that int32 value back to float64.
- __ Cvtlsi2sd(converted_back, result_reg);
- // Check that the result of the float64->int32->float64 is equal to the input
- // (i.e. that the conversion didn't truncate.
- __ Ucomisd(input_reg, converted_back);
- __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
-
- // Check if {input} is -0.
- Label check_done;
- __ cmpl(result_reg, Immediate(0));
- __ j(not_equal, &check_done);
-
- // In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Register high_word32_of_input = kScratchRegister;
- __ Pextrd(high_word32_of_input, input_reg, 1);
- __ cmpl(high_word32_of_input, Immediate(0));
- __ EmitEagerDeoptIf(less, DeoptimizeReason::kNotInt32, this);
-
- __ bind(&check_done);
-}
-
-void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- // Phi inputs are processed in the post-process, once loop phis' inputs'
- // v-regs are allocated.
-
- // We have to pass a policy, but it is later ignored during register
- // allocation. See StraightForwardRegisterAllocator::AllocateRegisters
- // which has special handling for Phis.
- static const compiler::UnallocatedOperand::ExtendedPolicy kIgnoredPolicy =
- compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT;
+ DCHECK_EQ(value, result_reg);
+ Label min, done;
+ __ CompareInt32(value, 0);
+ __ JumpIf(kLessThanEqual, &min);
+ __ CompareInt32(value, 255);
+ __ JumpIf(kLessThanEqual, &done);
+ __ Move(result_reg, 255);
+ __ Jump(&done, Label::Distance::kNear);
+ __ bind(&min);
+ __ Move(result_reg, 0);
+ __ bind(&done);
+}
- result().SetUnallocated(kIgnoredPolicy,
- vreg_state->AllocateVirtualRegister());
+void Uint32ToUint8Clamped::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
}
-// TODO(verwaest): Remove after switching the register allocator.
-void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
- for (Input& input : *this) {
- UseAny(input);
- }
+void Uint32ToUint8Clamped::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
+ DCHECK_EQ(value, ToRegister(result()));
+ Label done;
+ __ CompareInt32(value, 255);
+ __ JumpIf(kUnsignedLessThanEqual, &done, Label::Distance::kNear);
+ __ Move(value, 255);
+ __ bind(&done);
}
-void Phi::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {}
-void Phi::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << owner().ToString() << ")";
+
+void Float64ToUint8Clamped::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void Float64ToUint8Clamped::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister value = ToDoubleRegister(input());
+ Register result_reg = ToRegister(result());
+ Label min, max, done;
+ __ ToUint8Clamped(result_reg, value, &min, &max, &done);
+ __ bind(&min);
+ __ Move(result_reg, 0);
+ __ Jump(&done, Label::Distance::kNear);
+ __ bind(&max);
+ __ Move(result_reg, 255);
+ __ bind(&done);
+}
+
+void CheckedNumberToUint8Clamped::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+ set_temporaries_needed(1);
+ set_double_temporaries_needed(1);
+}
+void CheckedNumberToUint8Clamped::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
+ Register result_reg = ToRegister(result());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ DoubleRegister double_value = temps.AcquireDouble();
+ Label is_not_smi, min, max, done;
+ // Check if Smi.
+ __ JumpIfNotSmi(value, &is_not_smi);
+ // If Smi, convert to Int32.
+ __ SmiToInt32(value);
+ // Clamp.
+ __ CompareInt32(value, 0);
+ __ JumpIf(kLessThanEqual, &min);
+ __ CompareInt32(value, 255);
+ __ JumpIf(kGreaterThanEqual, &max);
+ __ Jump(&done);
+ __ bind(&is_not_smi);
+ // Check if HeapNumber, deopt otherwise.
+ __ CompareMapWithRoot(value, RootIndex::kHeapNumberMap, scratch);
+ __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kNotANumber, this);
+ // If heap number, get double value.
+ __ LoadHeapNumberValue(double_value, value);
+ // Clamp.
+ __ ToUint8Clamped(value, double_value, &min, &max, &done);
+ __ bind(&min);
+ __ Move(result_reg, 0);
+ __ Jump(&done, Label::Distance::kNear);
+ __ bind(&max);
+ __ Move(result_reg, 255);
+ __ bind(&done);
+}
+
+void StoreFixedArrayElementWithWriteBarrier::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+ RequireSpecificTemporary(WriteBarrierDescriptor::ObjectRegister());
+ RequireSpecificTemporary(WriteBarrierDescriptor::SlotAddressRegister());
+}
+void StoreFixedArrayElementWithWriteBarrier::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ Register value = ToRegister(value_input());
+ __ StoreFixedArrayElementWithWriteBarrier(elements, index, value,
+ register_snapshot());
+}
+
+void StoreFixedArrayElementNoWriteBarrier::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+}
+void StoreFixedArrayElementNoWriteBarrier::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ Register value = ToRegister(value_input());
+ __ StoreFixedArrayElementNoWriteBarrier(elements, index, value);
+}
+
+void CheckedStoreFixedArraySmiElement::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+}
+void CheckedStoreFixedArraySmiElement::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ Register value = ToRegister(value_input());
+ Condition is_smi = __ CheckSmi(value);
+ __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi,
+ this);
+ __ StoreFixedArrayElementNoWriteBarrier(elements, index, value);
}
-void Call::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+// ---
+// Arch agnostic call nodes
+// ---
+
+int Call::MaxCallStackArgs() const { return num_args(); }
+void Call::SetValueLocationConstraints() {
// TODO(leszeks): Consider splitting Call into with- and without-feedback
// opcodes, rather than checking for feedback validity.
if (feedback_.IsValid()) {
@@ -3309,8 +3168,9 @@ void Call::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseAny(arg(i));
}
UseFixed(context(), kContextRegister);
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
+
void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
// TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
#ifdef DEBUG
@@ -3325,18 +3185,15 @@ void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
#endif
DCHECK_EQ(ToRegister(context()), kContextRegister);
- for (int i = num_args() - 1; i >= 0; --i) {
- __ PushInput(arg(i));
- }
+ __ PushReverse(base::make_iterator_range(args_begin(), args_end()));
uint32_t arg_count = num_args();
if (feedback_.IsValid()) {
DCHECK_EQ(TargetType::kAny, target_type_);
using D = CallTrampoline_WithFeedbackDescriptor;
- __ Move(D::GetRegisterParameter(D::kActualArgumentsCount),
- Immediate(arg_count));
+ __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), arg_count);
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
switch (receiver_mode_) {
case ConvertReceiverMode::kNullOrUndefined:
@@ -3352,8 +3209,7 @@ void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
}
} else if (target_type_ == TargetType::kAny) {
using D = CallTrampolineDescriptor;
- __ Move(D::GetRegisterParameter(D::kActualArgumentsCount),
- Immediate(arg_count));
+ __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), arg_count);
switch (receiver_mode_) {
case ConvertReceiverMode::kNullOrUndefined:
@@ -3369,8 +3225,7 @@ void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
} else {
DCHECK_EQ(TargetType::kJSFunction, target_type_);
using D = CallTrampolineDescriptor;
- __ Move(D::GetRegisterParameter(D::kActualArgumentsCount),
- Immediate(arg_count));
+ __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), arg_count);
switch (receiver_mode_) {
case ConvertReceiverMode::kNullOrUndefined:
@@ -3387,98 +3242,104 @@ void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void Call::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << receiver_mode_ << ", ";
- switch (target_type_) {
- case TargetType::kJSFunction:
- os << "JSFunction";
- break;
- case TargetType::kAny:
- os << "Any";
- break;
+
+int CallSelf::MaxCallStackArgs() const {
+ int actual_parameter_count = num_args() + 1;
+ return std::max(expected_parameter_count_, actual_parameter_count);
+}
+void CallSelf::SetValueLocationConstraints() {
+ UseAny(receiver());
+ for (int i = 0; i < num_args(); i++) {
+ UseAny(arg(i));
}
- os << ")";
+ DefineAsFixed(this, kReturnRegister0);
+ set_temporaries_needed(1);
+}
+
+void CallSelf::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ int actual_parameter_count = num_args() + 1;
+ if (actual_parameter_count < expected_parameter_count_) {
+ int number_of_undefineds =
+ expected_parameter_count_ - actual_parameter_count;
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ PushReverse(receiver(),
+ base::make_iterator_range(args_begin(), args_end()),
+ RepeatValue(scratch, number_of_undefineds));
+ } else {
+ __ PushReverse(receiver(),
+ base::make_iterator_range(args_begin(), args_end()));
+ }
+ compiler::JSHeapBroker* broker = masm->compilation_info()->broker();
+ __ Move(kContextRegister, function_.context(broker).object());
+ __ Move(kJavaScriptCallTargetRegister, function_.object());
+ __ LoadRoot(kJavaScriptCallNewTargetRegister, RootIndex::kUndefinedValue);
+ __ Move(kJavaScriptCallArgCountRegister, actual_parameter_count);
+ DCHECK(!shared_function_info(broker).HasBuiltinId());
+ __ CallSelf();
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CallKnownJSFunction::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int CallKnownJSFunction::MaxCallStackArgs() const {
+ int actual_parameter_count = num_args() + 1;
+ return std::max(expected_parameter_count_, actual_parameter_count);
+}
+void CallKnownJSFunction::SetValueLocationConstraints() {
UseAny(receiver());
for (int i = 0; i < num_args(); i++) {
UseAny(arg(i));
}
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
+ set_temporaries_needed(1);
}
+
void CallKnownJSFunction::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- int expected_parameter_count =
- shared_function_info().internal_formal_parameter_count_with_receiver();
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
int actual_parameter_count = num_args() + 1;
- if (actual_parameter_count < expected_parameter_count) {
+ if (actual_parameter_count < expected_parameter_count_) {
int number_of_undefineds =
- expected_parameter_count - actual_parameter_count;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- for (int i = 0; i < number_of_undefineds; i++) {
- __ Push(kScratchRegister);
- }
- }
- for (int i = num_args() - 1; i >= 0; --i) {
- __ PushInput(arg(i));
+ expected_parameter_count_ - actual_parameter_count;
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ PushReverse(receiver(),
+ base::make_iterator_range(args_begin(), args_end()),
+ RepeatValue(scratch, number_of_undefineds));
+ } else {
+ __ PushReverse(receiver(),
+ base::make_iterator_range(args_begin(), args_end()));
}
- __ PushInput(receiver());
- __ Move(kContextRegister, function_.context().object());
+ compiler::JSHeapBroker* broker = masm->compilation_info()->broker();
+ __ Move(kContextRegister, function_.context(broker).object());
__ Move(kJavaScriptCallTargetRegister, function_.object());
__ LoadRoot(kJavaScriptCallNewTargetRegister, RootIndex::kUndefinedValue);
- __ Move(kJavaScriptCallArgCountRegister, Immediate(actual_parameter_count));
- if (shared_function_info().HasBuiltinId()) {
- __ CallBuiltin(shared_function_info().builtin_id());
+ __ Move(kJavaScriptCallArgCountRegister, actual_parameter_count);
+ if (shared_function_info(broker).HasBuiltinId()) {
+ __ CallBuiltin(shared_function_info(broker).builtin_id());
} else {
__ AssertCallableFunction(kJavaScriptCallTargetRegister);
- __ LoadTaggedPointerField(
- kJavaScriptCallCodeStartRegister,
- FieldOperand(kJavaScriptCallTargetRegister, JSFunction::kCodeOffset));
- __ CallCodeTObject(kJavaScriptCallCodeStartRegister);
+ __ LoadTaggedField(kJavaScriptCallCodeStartRegister,
+ FieldMemOperand(kJavaScriptCallTargetRegister,
+ JSFunction::kCodeOffset));
+ __ CallCodeObject(kJavaScriptCallCodeStartRegister);
}
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CallKnownJSFunction::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << function_.object() << ")";
-}
-void Construct::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = Construct_WithFeedbackDescriptor;
- UseFixed(function(), D::GetRegisterParameter(D::kTarget));
- UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget));
- UseFixed(context(), kContextRegister);
- for (int i = 0; i < num_args(); i++) {
- UseAny(arg(i));
- }
- DefineAsFixed(vreg_state, this, kReturnRegister0);
-}
-void Construct::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- using D = Construct_WithFeedbackDescriptor;
- DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
- DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget));
- DCHECK_EQ(ToRegister(context()), kContextRegister);
-
- for (int i = num_args() - 1; i >= 0; --i) {
- __ PushInput(arg(i));
+int CallBuiltin::MaxCallStackArgs() const {
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
+ if (!descriptor.AllowVarArgs()) {
+ return descriptor.GetStackParameterCount();
+ } else {
+ int all_input_count = InputCountWithoutContext() + (has_feedback() ? 2 : 0);
+ DCHECK_GE(all_input_count, descriptor.GetRegisterParameterCount());
+ return all_input_count - descriptor.GetRegisterParameterCount();
}
- static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0);
- static_assert(D::GetStackParameterCount() == 1);
- __ Push(feedback().vector);
-
- uint32_t arg_count = num_args();
- __ Move(D::GetRegisterParameter(D::kActualArgumentsCount),
- Immediate(arg_count));
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
-
- __ CallBuiltin(Builtin::kConstruct_WithFeedback);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CallBuiltin::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void CallBuiltin::SetValueLocationConstraints() {
auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
bool has_context = descriptor.HasContextParameter();
int i = 0;
@@ -3491,18 +3352,20 @@ void CallBuiltin::AllocateVreg(MaglevVregAllocationState* vreg_state) {
if (has_context) {
UseFixed(input(i), kContextRegister);
}
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
-void CallBuiltin::PassFeedbackSlotOnStack(MaglevAssembler* masm) {
- DCHECK(has_feedback());
- switch (slot_type()) {
- case kTaggedIndex:
- __ Push(TaggedIndex::FromIntptr(feedback().index()));
- break;
- case kSmi:
- __ Push(Smi::FromInt(feedback().index()));
- break;
+template <typename... Args>
+void CallBuiltin::PushArguments(MaglevAssembler* masm, Args... extra_args) {
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
+ if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) {
+ // In Default order we cannot have extra args (feedback).
+ DCHECK_EQ(sizeof...(extra_args), 0);
+ __ Push(base::make_iterator_range(stack_args_begin(), stack_args_end()));
+ } else {
+ DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS);
+ __ PushReverse(extra_args..., base::make_iterator_range(stack_args_begin(),
+ stack_args_end()));
}
}
@@ -3522,7 +3385,7 @@ void CallBuiltin::PassFeedbackSlotInRegister(MaglevAssembler* masm) {
}
}
-void CallBuiltin::PushFeedback(MaglevAssembler* masm) {
+void CallBuiltin::PushFeedbackAndArguments(MaglevAssembler* masm) {
DCHECK(has_feedback());
auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
@@ -3536,112 +3399,202 @@ void CallBuiltin::PushFeedback(MaglevAssembler* masm) {
if (vector_index < descriptor.GetRegisterParameterCount()) {
PassFeedbackSlotInRegister(masm);
__ Move(descriptor.GetRegisterParameter(vector_index), feedback().vector);
+ PushArguments(masm);
} else if (vector_index == descriptor.GetRegisterParameterCount()) {
PassFeedbackSlotInRegister(masm);
- // We do not allow var args if has_feedback(), so here we have only one
- // parameter on stack and do not need to check stack arguments order.
+ DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS);
+ // Ensure that the builtin only expects the feedback vector on the stack and
+ // potentional additional var args are passed through to another builtin.
+ // This is required to align the stack correctly (e.g. on arm64).
+ DCHECK_EQ(descriptor.GetStackParameterCount(), 1);
+ PushArguments(masm);
__ Push(feedback().vector);
} else {
- // Same as above. We does not allow var args if has_feedback(), so feedback
- // slot and vector must be last two inputs.
- if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) {
- PassFeedbackSlotOnStack(masm);
- __ Push(feedback().vector);
- } else {
- DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS);
- __ Push(feedback().vector);
- PassFeedbackSlotOnStack(masm);
+ int slot = feedback().index();
+ Handle<FeedbackVector> vector = feedback().vector;
+ switch (slot_type()) {
+ case kTaggedIndex:
+ PushArguments(masm, TaggedIndex::FromIntptr(slot), vector);
+ break;
+ case kSmi:
+ PushArguments(masm, Smi::FromInt(slot), vector);
+ break;
}
}
}
void CallBuiltin::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
-
- if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) {
- for (int i = InputsInRegisterCount(); i < InputCountWithoutContext(); ++i) {
- __ PushInput(input(i));
- }
- if (has_feedback()) {
- PushFeedback(masm);
- }
+ if (has_feedback()) {
+ PushFeedbackAndArguments(masm);
} else {
- DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS);
- if (has_feedback()) {
- PushFeedback(masm);
- }
- for (int i = InputCountWithoutContext() - 1; i >= InputsInRegisterCount();
- --i) {
- __ PushInput(input(i));
- }
+ PushArguments(masm);
}
__ CallBuiltin(builtin());
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CallBuiltin::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << Builtins::name(builtin()) << ")";
-}
-void CallRuntime::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int CallRuntime::MaxCallStackArgs() const { return num_args(); }
+void CallRuntime::SetValueLocationConstraints() {
UseFixed(context(), kContextRegister);
for (int i = 0; i < num_args(); i++) {
UseAny(arg(i));
}
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void CallRuntime::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
DCHECK_EQ(ToRegister(context()), kContextRegister);
- for (int i = 0; i < num_args(); i++) {
- __ PushInput(arg(i));
- }
+ __ Push(base::make_iterator_range(args_begin(), args_end()));
__ CallRuntime(function_id(), num_args());
// TODO(victorgomes): Not sure if this is needed for all runtime calls.
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void CallRuntime::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << Runtime::FunctionForId(function_id())->name << ")";
-}
-void CallWithSpread::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D =
- CallInterfaceDescriptorFor<Builtin::kCallWithSpread_WithFeedback>::type;
- UseFixed(function(), D::GetRegisterParameter(D::kTarget));
+int CallWithSpread::MaxCallStackArgs() const {
+ int argc_no_spread = num_args() - 1;
+ if (feedback_.IsValid()) {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kCallWithSpread_WithFeedback>::type;
+ return argc_no_spread + D::GetStackParameterCount();
+ } else {
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithSpread>::type;
+ return argc_no_spread + D::GetStackParameterCount();
+ }
+}
+void CallWithSpread::SetValueLocationConstraints() {
+ if (feedback_.IsValid()) {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kCallWithSpread_WithFeedback>::type;
+ UseFixed(function(), D::GetRegisterParameter(D::kTarget));
+ UseFixed(spread(), D::GetRegisterParameter(D::kSpread));
+ } else {
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithSpread>::type;
+ UseFixed(function(), D::GetRegisterParameter(D::kTarget));
+ UseFixed(spread(), D::GetRegisterParameter(D::kSpread));
+ }
UseFixed(context(), kContextRegister);
for (int i = 0; i < num_args() - 1; i++) {
UseAny(arg(i));
}
- UseFixed(spread(), D::GetRegisterParameter(D::kSpread));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void CallWithSpread::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- using D =
- CallInterfaceDescriptorFor<Builtin::kCallWithSpread_WithFeedback>::type;
+#ifdef DEBUG
+ if (feedback_.IsValid()) {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kCallWithSpread_WithFeedback>::type;
+ DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
+ DCHECK_EQ(ToRegister(spread()), D::GetRegisterParameter(D::kSpread));
+ } else {
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithSpread>::type;
+ DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
+ DCHECK_EQ(ToRegister(spread()), D::GetRegisterParameter(D::kSpread));
+ }
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+#endif
+
+ if (feedback_.IsValid()) {
+ using D =
+ CallInterfaceDescriptorFor<Builtin::kCallWithSpread_WithFeedback>::type;
+ __ PushReverse(base::make_iterator_range(args_no_spread_begin(),
+ args_no_spread_end()));
+ // Receiver needs to be pushed (aligned) separately as it is consumed by
+ // CallWithSpread_WithFeedback directly while the other arguments on the
+ // stack are passed through to CallWithSpread.
+ static_assert(D::GetStackParameterIndex(D::kReceiver) == 0);
+ static_assert(D::GetStackParameterCount() == 1);
+ __ Push(receiver());
+
+ __ Move(D::GetRegisterParameter(D::kArgumentsCount), num_args_no_spread());
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
+ __ CallBuiltin(Builtin::kCallWithSpread_WithFeedback);
+ } else {
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithSpread>::type;
+ __ PushReverse(base::make_iterator_range(args_no_spread_begin(),
+ args_no_spread_end()));
+ __ Move(D::GetRegisterParameter(D::kArgumentsCount), num_args_no_spread());
+ __ CallBuiltin(Builtin::kCallWithSpread);
+ }
+
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+}
+
+int CallWithArrayLike::MaxCallStackArgs() const {
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithArrayLike>::type;
+ return D::GetStackParameterCount();
+}
+void CallWithArrayLike::SetValueLocationConstraints() {
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithArrayLike>::type;
+ UseFixed(function(), D::GetRegisterParameter(D::kTarget));
+ UseAny(receiver());
+ UseFixed(arguments_list(), D::GetRegisterParameter(D::kArgumentsList));
+ UseFixed(context(), kContextRegister);
+ DefineAsFixed(this, kReturnRegister0);
+}
+void CallWithArrayLike::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+#ifdef DEBUG
+ using D = CallInterfaceDescriptorFor<Builtin::kCallWithArrayLike>::type;
DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
- DCHECK_EQ(ToRegister(spread()), D::GetRegisterParameter(D::kSpread));
+ DCHECK_EQ(ToRegister(arguments_list()),
+ D::GetRegisterParameter(D::kArgumentsList));
DCHECK_EQ(ToRegister(context()), kContextRegister);
- // Push other arguments (other than the spread) to the stack.
- int argc_no_spread = num_args() - 1;
- for (int i = argc_no_spread - 1; i >= 0; --i) {
- __ PushInput(arg(i));
+#endif // DEBUG
+ __ Push(receiver());
+ __ CallBuiltin(Builtin::kCallWithArrayLike);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
+}
+
+// ---
+// Arch agnostic construct nodes
+// ---
+
+int Construct::MaxCallStackArgs() const {
+ using D = Construct_WithFeedbackDescriptor;
+ return num_args() + D::GetStackParameterCount();
+}
+void Construct::SetValueLocationConstraints() {
+ using D = Construct_WithFeedbackDescriptor;
+ UseFixed(function(), D::GetRegisterParameter(D::kTarget));
+ UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget));
+ UseFixed(context(), kContextRegister);
+ for (int i = 0; i < num_args(); i++) {
+ UseAny(arg(i));
}
- static_assert(D::GetStackParameterIndex(D::kReceiver) == 0);
+ DefineAsFixed(this, kReturnRegister0);
+}
+void Construct::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ using D = Construct_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
+ DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget));
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+
+ __ PushReverse(base::make_iterator_range(args_begin(), args_end()));
+ // Feedback needs to be pushed (aligned) separately as it is consumed by
+ // Construct_WithFeedback directly while the other arguments on the stack
+ // are passed through to Construct.
+ static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0);
static_assert(D::GetStackParameterCount() == 1);
- __ PushInput(arg(0));
+ __ Push(feedback().vector);
- __ Move(D::GetRegisterParameter(D::kArgumentsCount),
- Immediate(argc_no_spread));
- __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
- __ CallBuiltin(Builtin::kCallWithSpread_WithFeedback);
+ __ Move(D::GetRegisterParameter(D::kActualArgumentsCount), num_args());
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
+
+ __ CallBuiltin(Builtin::kConstruct_WithFeedback);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void ConstructWithSpread::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int ConstructWithSpread::MaxCallStackArgs() const {
+ int argc_no_spread = num_args() - 1;
+ using D = CallInterfaceDescriptorFor<
+ Builtin::kConstructWithSpread_WithFeedback>::type;
+ return argc_no_spread + D::GetStackParameterCount();
+}
+void ConstructWithSpread::SetValueLocationConstraints() {
using D = CallInterfaceDescriptorFor<
Builtin::kConstructWithSpread_WithFeedback>::type;
UseFixed(function(), D::GetRegisterParameter(D::kTarget));
@@ -3651,7 +3604,7 @@ void ConstructWithSpread::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseAny(arg(i));
}
UseFixed(spread(), D::GetRegisterParameter(D::kSpread));
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+ DefineAsFixed(this, kReturnRegister0);
}
void ConstructWithSpread::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
@@ -3660,306 +3613,106 @@ void ConstructWithSpread::GenerateCode(MaglevAssembler* masm,
DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget));
DCHECK_EQ(ToRegister(context()), kContextRegister);
- // Push other arguments (other than the spread) to the stack.
- int argc_no_spread = num_args() - 1;
- for (int i = argc_no_spread - 1; i >= 0; --i) {
- __ PushInput(arg(i));
- }
+ __ PushReverse(
+ base::make_iterator_range(args_no_spread_begin(), args_no_spread_end()));
+
+ // Feedback needs to be pushed (aligned) separately as it is consumed by
+ // Construct_WithFeedback directly while the other arguments on the stack
+ // are passed through to Construct.
static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0);
static_assert(D::GetStackParameterCount() == 1);
__ Push(feedback().vector);
__ Move(D::GetRegisterParameter(D::kActualArgumentsCount),
- Immediate(argc_no_spread));
- __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ num_args_no_spread());
+ __ Move(D::GetRegisterParameter(D::kSlot), feedback().index());
__ CallBuiltin(Builtin::kConstructWithSpread_WithFeedback);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
-void ConvertReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
- UseFixed(receiver_input(), D::GetRegisterParameter(D::kInput));
- set_temporaries_needed(1);
- DefineAsFixed(vreg_state, this, kReturnRegister0);
+int TransitionElementsKind::MaxCallStackArgs() const {
+ return std::max(WriteBarrierDescriptor::GetStackParameterCount(), 2);
}
-void ConvertReceiver::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Label convert_to_object, done;
- Register receiver = ToRegister(receiver_input());
- Register scratch = general_temporaries().first();
- __ JumpIfSmi(receiver, &convert_to_object, Label::kNear);
- static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
- __ j(above_equal, &done);
-
- if (mode_ != ConvertReceiverMode::kNotNullOrUndefined) {
- Label convert_global_proxy;
- __ JumpIfRoot(receiver, RootIndex::kUndefinedValue, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(receiver, RootIndex::kNullValue, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
- // Patch receiver to global proxy.
- __ Move(ToRegister(result()),
- target_.native_context().global_proxy_object().object());
- }
- __ jmp(&done);
- }
-
- __ bind(&convert_to_object);
- // ToObject needs to be ran with the target context installed.
- __ Move(kContextRegister, target_.context().object());
- __ CallBuiltin(Builtin::kToObject);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
- __ bind(&done);
-}
-
-void IncreaseInterruptBudget::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+void TransitionElementsKind::SetValueLocationConstraints() {
+ UseRegister(object_input());
set_temporaries_needed(1);
}
-void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register scratch = general_temporaries().first();
- __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
- __ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
- Immediate(amount()));
-}
-void IncreaseInterruptBudget::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << amount() << ")";
-}
+void TransitionElementsKind::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register object = ToRegister(object_input());
-void ReduceInterruptBudget::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- set_temporaries_needed(1);
-}
-void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register scratch = general_temporaries().first();
- __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
- __ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
- Immediate(amount()));
ZoneLabelRef done(masm);
- __ JumpToDeferredIf(
- less,
- [](MaglevAssembler* masm, ZoneLabelRef done,
- ReduceInterruptBudget* node) {
- {
- SaveRegisterStateForCall save_register_state(
- masm, node->register_snapshot());
- __ Move(kContextRegister, masm->native_context().object());
- __ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev,
- 1);
- save_register_state.DefineSafepointWithLazyDeopt(
- node->lazy_deopt_info());
- }
- __ jmp(*done);
- },
- done, this);
- __ bind(*done);
-}
-void ReduceInterruptBudget::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << amount() << ")";
-}
-
-void ThrowReferenceErrorIfHole::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseAny(value());
-}
-void ThrowReferenceErrorIfHole::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- if (value().operand().IsRegister()) {
- __ CompareRoot(ToRegister(value()), RootIndex::kTheHoleValue);
- } else {
- DCHECK(value().operand().IsStackSlot());
- __ CompareRoot(masm->ToMemOperand(value()), RootIndex::kTheHoleValue);
- }
- __ JumpToDeferredIf(
- equal,
- [](MaglevAssembler* masm, ThrowReferenceErrorIfHole* node) {
- __ Move(kContextRegister, masm->native_context().object());
- __ Push(node->name().object());
- __ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, 1);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
- __ Abort(AbortReason::kUnexpectedReturnFromThrow);
- },
- this);
-}
-void ThrowSuperNotCalledIfHole::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseAny(value());
-}
-void ThrowSuperNotCalledIfHole::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- if (value().operand().IsRegister()) {
- __ CompareRoot(ToRegister(value()), RootIndex::kTheHoleValue);
- } else {
- DCHECK(value().operand().IsStackSlot());
- __ CompareRoot(masm->ToMemOperand(value()), RootIndex::kTheHoleValue);
- }
- __ JumpToDeferredIf(
- equal,
- [](MaglevAssembler* masm, ThrowSuperNotCalledIfHole* node) {
- __ Move(kContextRegister, masm->native_context().object());
- __ CallRuntime(Runtime::kThrowSuperNotCalled, 0);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
- __ Abort(AbortReason::kUnexpectedReturnFromThrow);
- },
- this);
-}
-
-void ThrowSuperAlreadyCalledIfNotHole::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseAny(value());
-}
-void ThrowSuperAlreadyCalledIfNotHole::GenerateCode(
- MaglevAssembler* masm, const ProcessingState& state) {
- if (value().operand().IsRegister()) {
- __ CompareRoot(ToRegister(value()), RootIndex::kTheHoleValue);
- } else {
- DCHECK(value().operand().IsStackSlot());
- __ CompareRoot(masm->ToMemOperand(value()), RootIndex::kTheHoleValue);
+ // TODO(leszeks): We could consider just deopting straight away if the object
+ // is a Smi, but it's also fine to not deopt since there will be another Smi
+ // check in the map check after this transition. This second Smi check could
+ // be elided if we deopted here, but OTOH then we'd need extra deopt info for
+ // this node.
+ __ JumpIfSmi(object, *done, Label::kNear);
+
+ Register map = temps.Acquire();
+ __ LoadMap(map, object);
+
+ for (compiler::MapRef transition_source : transition_sources_) {
+ bool is_simple = IsSimpleMapChangeTransition(
+ transition_source.elements_kind(), transition_target_.elements_kind());
+
+ // TODO(leszeks): If there are a lot of transition source maps, move the
+ // source into a register and share the deferred code between maps.
+ __ CompareTagged(map, transition_source.object());
+ __ JumpToDeferredIf(
+ kEqual,
+ [](MaglevAssembler* masm, Register object, Register map,
+ RegisterSnapshot register_snapshot,
+ compiler::MapRef transition_target, bool is_simple,
+ ZoneLabelRef done) {
+ if (is_simple) {
+ __ Move(map, transition_target.object());
+ __ StoreTaggedFieldWithWriteBarrier(
+ object, HeapObject::kMapOffset, map, register_snapshot,
+ MaglevAssembler::kValueIsDecompressed,
+ MaglevAssembler::kValueCannotBeSmi);
+ } else {
+ SaveRegisterStateForCall save_state(masm, register_snapshot);
+ __ Push(object, transition_target.object());
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kTransitionElementsKind);
+ save_state.DefineSafepoint();
+ }
+ __ Jump(*done);
+ },
+ object, map, register_snapshot(), transition_target_, is_simple, done);
}
- __ JumpToDeferredIf(
- not_equal,
- [](MaglevAssembler* masm, ThrowSuperAlreadyCalledIfNotHole* node) {
- __ Move(kContextRegister, masm->native_context().object());
- __ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, 0);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
- __ Abort(AbortReason::kUnexpectedReturnFromThrow);
- },
- this);
-}
-
-void ThrowIfNotSuperConstructor::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(constructor());
- UseRegister(function());
-}
-void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- __ LoadMap(kScratchRegister, ToRegister(constructor()));
- __ testl(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(Map::Bits1::IsConstructorBit::kMask));
- __ JumpToDeferredIf(
- equal,
- [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) {
- __ Push(ToRegister(node->constructor()));
- __ Push(ToRegister(node->function()));
- __ Move(kContextRegister, masm->native_context().object());
- __ CallRuntime(Runtime::kThrowNotSuperConstructor, 2);
- masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
- __ Abort(AbortReason::kUnexpectedReturnFromThrow);
- },
- this);
+ __ bind(*done);
}
// ---
-// Control nodes
+// Arch agnostic control nodes
// ---
-void Return::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseFixed(value_input(), kReturnRegister0);
-}
-void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
- DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
-
- // Read the formal number of parameters from the top level compilation unit
- // (i.e. the outermost, non inlined function).
- int formal_params_size =
- masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
-
- // We're not going to continue execution, so we can use an arbitrary register
- // here instead of relying on temporaries from the register allocator.
- Register actual_params_size = r8;
-
- // Compute the size of the actual parameters + receiver (in bytes).
- // TODO(leszeks): Consider making this an input into Return to re-use the
- // incoming argc's register (if it's still valid).
- __ movq(actual_params_size,
- MemOperand(rbp, StandardFrameConstants::kArgCOffset));
-
- // Leave the frame.
- // TODO(leszeks): Add a new frame maker for Maglev.
- __ LeaveFrame(StackFrame::BASELINE);
- // If actual is bigger than formal, then we should use it to free up the stack
- // arguments.
- Label drop_dynamic_arg_size;
- __ cmpq(actual_params_size, Immediate(formal_params_size));
- __ j(greater, &drop_dynamic_arg_size);
-
- // Drop receiver + arguments according to static formal arguments size.
- __ Ret(formal_params_size * kSystemPointerSize, kScratchRegister);
-
- __ bind(&drop_dynamic_arg_size);
- // Drop receiver + arguments according to dynamic arguments size.
- __ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
- __ Ret();
-}
-
-void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
-void Deopt::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
- __ EmitEagerDeopt(this, reason());
-}
-void Deopt::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << DeoptimizeReasonToString(reason()) << ")";
-}
-
-void Switch::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(value());
-}
-void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
- std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(size());
- for (int i = 0; i < size(); i++) {
- labels[i] = (targets())[i].block_ptr()->label();
- }
- __ Switch(kScratchRegister, ToRegister(value()), value_base(), labels.get(),
- size());
- if (has_fallthrough()) {
- DCHECK_EQ(fallthrough(), state.next_block());
- } else {
- __ Trap();
- }
-}
-
-void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
+void Jump::SetValueLocationConstraints() {}
void Jump::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
// Avoid emitting a jump to the next block.
if (target() != state.next_block()) {
- __ jmp(target()->label());
+ __ Jump(target()->label());
}
}
-void JumpToInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
+void JumpToInlined::SetValueLocationConstraints() {}
void JumpToInlined::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// Avoid emitting a jump to the next block.
if (target() != state.next_block()) {
- __ jmp(target()->label());
+ __ Jump(target()->label());
}
}
-void JumpToInlined::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << Brief(*unit()->shared_function_info().object()) << ")";
-}
-
-void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
+void JumpFromInlined::SetValueLocationConstraints() {}
void JumpFromInlined::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// Avoid emitting a jump to the next block.
if (target() != state.next_block()) {
- __ jmp(target()->label());
+ __ Jump(target()->label());
}
}
@@ -3974,28 +3727,28 @@ void AttemptOnStackReplacement(MaglevAssembler* masm,
// Two cases may cause us to attempt OSR, in the following order:
//
// 1) Presence of cached OSR Turbofan code.
- // 2) The OSR urgency exceeds the current loop depth - in that case, trigger
- // a Turbofan OSR compilation.
+ // 2) The OSR urgency exceeds the current loop depth - in that case, call
+ // into runtime to trigger a Turbofan OSR compilation. A non-zero return
+ // value means we should deopt into Ignition which will handle all further
+ // necessary steps (rewriting the stack frame, jumping to OSR'd code).
//
// See also: InterpreterAssembler::OnStackReplacement.
- baseline::BaselineAssembler basm(masm);
__ AssertFeedbackVector(scratch0);
// Case 1).
Label deopt;
Register maybe_target_code = scratch1;
- {
- basm.TryLoadOptimizedOsrCode(maybe_target_code, scratch0, feedback_slot,
- &deopt, Label::kFar);
- }
+ __ TryLoadOptimizedOsrCode(maybe_target_code, scratch0, feedback_slot, &deopt,
+ Label::kFar);
// Case 2).
{
- __ movb(scratch0, FieldOperand(scratch0, FeedbackVector::kOsrStateOffset));
+ __ LoadByte(scratch0,
+ FieldMemOperand(scratch0, FeedbackVector::kOsrStateOffset));
__ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch0);
- basm.JumpIfByte(baseline::Condition::kUnsignedLessThanEqual, scratch0,
- loop_depth, *no_code_for_osr, Label::kNear);
+ __ JumpIfByte(kUnsignedLessThanEqual, scratch0, loop_depth,
+ *no_code_for_osr, Label::Distance::kNear);
// The osr_urgency exceeds the current loop_depth, signaling an OSR
// request. Call into runtime to compile.
@@ -4017,11 +3770,11 @@ void AttemptOnStackReplacement(MaglevAssembler* masm,
__ Move(maybe_target_code, kReturnRegister0);
}
- // A `0` return value means there is no OSR code available yet. Fall
- // through for now, OSR code will be picked up once it exists and is
+ // A `0` return value means there is no OSR code available yet. Continue
+ // execution in Maglev, OSR code will be picked up once it exists and is
// cached on the feedback vector.
__ Cmp(maybe_target_code, 0);
- __ j(equal, *no_code_for_osr, Label::kNear);
+ __ JumpIf(kEqual, *no_code_for_osr);
}
__ bind(&deopt);
@@ -4033,61 +3786,108 @@ void AttemptOnStackReplacement(MaglevAssembler* masm,
GetGeneralRegistersUsedAsInputs(node->eager_deopt_info()));
__ EmitEagerDeopt(node, DeoptimizeReason::kPrepareForOnStackReplacement);
} else {
- // Fall through. With TF disabled we cannot OSR and thus it doesn't make
- // sense to start the process. We do still perform all remaining
- // bookkeeping above though, to keep Maglev code behavior roughly the same
- // in both configurations.
+ // Continue execution in Maglev. With TF disabled we cannot OSR and thus it
+ // doesn't make sense to start the process. We do still perform all
+ // remaining bookkeeping above though, to keep Maglev code behavior roughly
+ // the same in both configurations.
+ __ Jump(*no_code_for_osr);
}
}
} // namespace
-void JumpLoopPrologue::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+int JumpLoopPrologue::MaxCallStackArgs() const {
+ // For the kCompileOptimizedOSRFromMaglev call.
+ return 1;
+}
+void JumpLoopPrologue::SetValueLocationConstraints() {
+ if (!v8_flags.use_osr) return;
set_temporaries_needed(2);
}
void JumpLoopPrologue::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register scratch0 = general_temporaries().PopFirst();
- Register scratch1 = general_temporaries().PopFirst();
+ if (!v8_flags.use_osr) return;
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch0 = temps.Acquire();
+ Register scratch1 = temps.Acquire();
const Register osr_state = scratch1;
__ Move(scratch0, unit_->feedback().object());
__ AssertFeedbackVector(scratch0);
- __ movb(osr_state, FieldOperand(scratch0, FeedbackVector::kOsrStateOffset));
+ __ LoadByte(osr_state,
+ FieldMemOperand(scratch0, FeedbackVector::kOsrStateOffset));
// The quick initial OSR check. If it passes, we proceed on to more
// expensive OSR logic.
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
FeedbackVector::kMaxOsrUrgency);
- __ cmpl(osr_state, Immediate(loop_depth_));
+ __ CompareInt32(osr_state, loop_depth_);
ZoneLabelRef no_code_for_osr(masm);
- __ JumpToDeferredIf(above, AttemptOnStackReplacement, no_code_for_osr, this,
- scratch0, scratch1, loop_depth_, feedback_slot_,
- osr_offset_);
+ __ JumpToDeferredIf(kUnsignedGreaterThan, AttemptOnStackReplacement,
+ no_code_for_osr, this, scratch0, scratch1, loop_depth_,
+ feedback_slot_, osr_offset_);
__ bind(*no_code_for_osr);
}
-void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
+void JumpLoop::SetValueLocationConstraints() {}
void JumpLoop::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- __ jmp(target()->label());
+ __ Jump(target()->label());
}
-void BranchIfRootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void BranchIfRootConstant::SetValueLocationConstraints() {
UseRegister(condition_input());
}
void BranchIfRootConstant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
__ CompareRoot(ToRegister(condition_input()), root_index());
- __ Branch(equal, if_true(), if_false(), state.next_block());
+ __ Branch(ConditionFor(Operation::kEqual), if_true(), if_false(),
+ state.next_block());
}
-void BranchIfRootConstant::PrintParams(
- std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << RootsTable::name(root_index_) << ")";
+
+void BranchIfToBooleanTrue::SetValueLocationConstraints() {
+ // TODO(victorgomes): consider using any input instead.
+ UseRegister(condition_input());
+}
+void BranchIfToBooleanTrue::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // BasicBlocks are zone allocated and so safe to be casted to ZoneLabelRef.
+ ZoneLabelRef true_label =
+ ZoneLabelRef::UnsafeFromLabelPointer(if_true()->label());
+ ZoneLabelRef false_label =
+ ZoneLabelRef::UnsafeFromLabelPointer(if_false()->label());
+ bool fallthrough_when_true = (if_true() == state.next_block());
+ __ ToBoolean(ToRegister(condition_input()), true_label, false_label,
+ fallthrough_when_true);
}
-void BranchIfUndefinedOrNull::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
+void BranchIfReferenceCompare::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+}
+void BranchIfReferenceCompare::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ CmpTagged(left, right);
+ __ Branch(ConditionFor(operation_), if_true(), if_false(),
+ state.next_block());
+}
+
+void BranchIfInt32Compare::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+}
+void BranchIfInt32Compare::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ CompareInt32(left, right);
+ __ Branch(ConditionFor(operation_), if_true(), if_false(),
+ state.next_block());
+}
+
+void BranchIfUndefinedOrNull::SetValueLocationConstraints() {
UseRegister(condition_input());
}
void BranchIfUndefinedOrNull::GenerateCode(MaglevAssembler* masm,
@@ -4097,90 +3897,462 @@ void BranchIfUndefinedOrNull::GenerateCode(MaglevAssembler* masm,
__ JumpIfRoot(value, RootIndex::kNullValue, if_true()->label());
auto* next_block = state.next_block();
if (if_false() != next_block) {
- __ jmp(if_false()->label());
+ __ Jump(if_false()->label());
}
}
-void BranchIfJSReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+void BranchIfTypeOf::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ // One temporary for TestTypeOf.
+ set_temporaries_needed(1);
+}
+void BranchIfTypeOf::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input());
+ __ TestTypeOf(value, literal_, if_true()->label(), Label::kFar,
+ if_true() == state.next_block(), if_false()->label(),
+ Label::kFar, if_false() == state.next_block());
+}
+
+void BranchIfJSReceiver::SetValueLocationConstraints() {
UseRegister(condition_input());
}
void BranchIfJSReceiver::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register value = ToRegister(condition_input());
__ JumpIfSmi(value, if_false()->label());
- __ LoadMap(kScratchRegister, value);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_RECEIVER_TYPE);
- __ Branch(above_equal, if_true(), if_false(), state.next_block());
+ __ JumpIfJSAnyIsNotPrimitive(value, if_true()->label());
+ __ jmp(if_false()->label());
}
-void BranchIfInt32Compare::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
+void Switch::SetValueLocationConstraints() {
+ UseAndClobberRegister(value());
+ // TODO(victorgomes): Create a arch-agnostic scratch register scope.
+ set_temporaries_needed(1);
}
-void BranchIfInt32Compare::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ cmpl(left, right);
- __ Branch(ConditionFor(operation_), if_true(), if_false(),
- state.next_block());
+void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(size());
+ for (int i = 0; i < size(); i++) {
+ BasicBlock* block = (targets())[i].block_ptr();
+ block->set_start_block_of_switch_case(true);
+ labels[i] = block->label();
+ }
+ Register val = ToRegister(value());
+ // Switch requires {val} (the switch's condition) to be 64-bit, but maglev
+ // usually manipulates/creates 32-bit integers. We thus sign-extend {val} to
+ // 64-bit to have the correct value for negative numbers.
+ __ SignExtend32To64Bits(val, val);
+ __ Switch(scratch, val, value_base(), labels.get(), size());
+ if (has_fallthrough()) {
+ DCHECK_EQ(fallthrough(), state.next_block());
+ } else {
+ __ Trap();
+ }
}
-void BranchIfFloat64Compare::PrintParams(
+
+// ---
+// Print params
+// ---
+
+void ExternalConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << reference() << ")";
+}
+
+void SmiConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << value() << ")";
+}
+
+void Int32Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << value() << ")";
+}
+
+void Float64Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ if (value().is_nan()) {
+ os << "(NaN [0x" << std::hex << value().get_bits() << std::dec << "]";
+ if (value().is_hole_nan()) {
+ os << ", the hole";
+ } else if (value().get_bits() ==
+ base::bit_cast<uint64_t>(
+ std::numeric_limits<double>::quiet_NaN())) {
+ os << ", quiet NaN";
+ }
+ os << ")";
+
+ } else {
+ os << "(" << value().get_scalar() << ")";
+ }
+}
+
+void Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << object_ << ")";
+}
+
+void DeleteProperty::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << LanguageMode2String(mode()) << ")";
+}
+
+void InitialValue::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << source().ToString() << ")";
+}
+
+void LoadGlobal::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name() << ")";
+}
+
+void StoreGlobal::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name() << ")";
+}
+
+void RegisterInput::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << input() << ")";
+}
+
+void RootConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << RootsTable::name(index()) << ")";
+}
+
+void CreateFunctionContext::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << operation_ << ")";
+ os << "(" << *scope_info().object() << ", " << slot_count() << ")";
}
-void BranchIfFloat64Compare::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
+void FastCreateClosure::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *shared_function_info().object() << ", "
+ << feedback_cell().object() << ")";
}
-void BranchIfFloat64Compare::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- DoubleRegister left = ToDoubleRegister(left_input());
- DoubleRegister right = ToDoubleRegister(right_input());
- __ Ucomisd(left, right);
- __ Branch(ConditionFor(operation_), if_true(), if_false(),
- state.next_block());
+
+void CreateClosure::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *shared_function_info().object() << ", "
+ << feedback_cell().object();
+ if (pretenured()) {
+ os << " [pretenured]";
+ }
+ os << ")";
}
-void BranchIfInt32Compare::PrintParams(
+
+void AllocateRaw::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << allocation_type() << ", " << size() << ")";
+}
+
+void FoldedAllocation::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(+" << offset() << ")";
+}
+
+void Abort::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << GetAbortReason(reason()) << ")";
+}
+
+void AssertInt32::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << condition_ << ")";
+}
+
+void BuiltinStringPrototypeCharCodeOrCodePointAt::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
- os << "(" << operation_ << ")";
+ switch (mode_) {
+ case BuiltinStringPrototypeCharCodeOrCodePointAt::kCharCodeAt:
+ os << "(CharCodeAt)";
+ break;
+ case BuiltinStringPrototypeCharCodeOrCodePointAt::kCodePointAt:
+ os << "(CodePointAt)";
+ break;
+ }
}
-void BranchIfReferenceCompare::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- UseRegister(left_input());
- UseRegister(right_input());
+void CheckMaps::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(";
+ size_t map_count = maps().size();
+ if (map_count > 0) {
+ for (size_t i = 0; i < map_count - 1; ++i) {
+ os << maps().at(i) << ", ";
+ }
+ os << maps().at(map_count - 1);
+ }
+ os << ")";
}
-void BranchIfReferenceCompare::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- Register left = ToRegister(left_input());
- Register right = ToRegister(right_input());
- __ cmp_tagged(left, right);
- __ Branch(ConditionFor(operation_), if_true(), if_false(),
- state.next_block());
+
+void CheckValue::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *value().object() << ")";
}
-void BranchIfReferenceCompare::PrintParams(
+
+void CheckValueEqualsString::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *value().object() << ")";
+}
+
+void CheckInstanceType::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << instance_type() << ")";
+}
+
+void CheckMapsWithMigration::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(";
+ size_t map_count = maps().size();
+ if (map_count > 0) {
+ for (size_t i = 0; i < map_count - 1; ++i) {
+ os << maps().at(i) << ", ";
+ }
+ os << maps().at(map_count - 1);
+ }
+ os << ")";
+}
+
+void CheckInt32Condition::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << condition_ << ")";
+}
+
+void CheckedNumberOrOddballToFloat64::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << conversion_type() << ")";
+}
+
+void UncheckedNumberOrOddballToFloat64::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << conversion_type() << ")";
+}
+
+void CheckedTruncateNumberOrOddballToInt32::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << conversion_type() << ")";
+}
+
+void TruncateNumberOrOddballToInt32::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << conversion_type() << ")";
+}
+
+void LoadTaggedField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(0x" << std::hex << offset() << std::dec;
+ // Print compression status only after the result is allocated, since that's
+ // when we do decompression marking.
+ if (!result().operand().IsUnallocated()) {
+ if (decompresses_tagged_result()) {
+ os << ", decompressed";
+ } else {
+ os << ", compressed";
+ }
+ }
+ os << ")";
+}
+
+void LoadDoubleField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(0x" << std::hex << offset() << std::dec << ")";
+}
+
+void LoadFixedArrayElement::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ // Print compression status only after the result is allocated, since that's
+ // when we do decompression marking.
+ if (!result().operand().IsUnallocated()) {
+ if (decompresses_tagged_result()) {
+ os << "(decompressed)";
+ } else {
+ os << "(compressed)";
+ }
+ }
+}
+
+void StoreDoubleField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(0x" << std::hex << offset() << std::dec << ")";
+}
+
+void StoreFloat64::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(0x" << std::hex << offset() << std::dec << ")";
+}
+
+void CheckedStoreSmiField::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << std::hex << offset() << std::dec << ")";
+}
+
+void StoreTaggedFieldNoWriteBarrier::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(0x" << std::hex << offset() << std::dec << ")";
+}
+
+void StoreMap::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *map_.object() << ")";
+}
+
+void StoreTaggedFieldWithWriteBarrier::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(0x" << std::hex << offset() << std::dec << ")";
+}
+
+void LoadNamedGeneric::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name_ << ")";
+}
+
+void LoadNamedFromSuperGeneric::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name_ << ")";
+}
+
+void SetNamedGeneric::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name_ << ")";
+}
+
+void DefineNamedOwnGeneric::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name_ << ")";
+}
+
+void GapMove::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << source() << " → " << target() << ")";
+}
+
+void ConstantGapMove::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(";
+ graph_labeller->PrintNodeLabel(os, node_);
+ os << " → " << target() << ")";
+}
+
+void Float64Ieee754Unary::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "("
+ << ExternalReferenceTable::NameOfIsolateIndependentAddress(
+ ieee_function_.address())
+ << ")";
+}
+
+void Float64Round::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ switch (kind_) {
+ case Kind::kCeil:
+ os << "(ceil)";
+ return;
+ case Kind::kFloor:
+ os << "(floor)";
+ return;
+ case Kind::kNearest:
+ os << "(nearest)";
+ return;
+ }
+}
+
+void Phi::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << owner().ToString() << ")";
+}
+
+void Call::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << receiver_mode_ << ", ";
+ switch (target_type_) {
+ case TargetType::kJSFunction:
+ os << "JSFunction";
+ break;
+ case TargetType::kAny:
+ os << "Any";
+ break;
+ }
+ os << ")";
+}
+
+void CallSelf::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << function_.object() << ")";
+}
+
+void CallKnownJSFunction::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << function_.object() << ")";
+}
+
+void CallBuiltin::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << Builtins::name(builtin()) << ")";
+}
+
+void CallRuntime::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << Runtime::FunctionForId(function_id())->name << ")";
+}
+
+void TestTypeOf::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << interpreter::TestTypeOfFlags::ToString(literal_) << ")";
+}
+
+void IncreaseInterruptBudget::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << amount() << ")";
+}
+
+void ReduceInterruptBudgetForLoop::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << amount() << ")";
+}
+
+void ReduceInterruptBudgetForReturn::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << amount() << ")";
+}
+
+void Deopt::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << DeoptimizeReasonToString(reason()) << ")";
+}
+
+void JumpToInlined::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << Brief(*unit()->shared_function_info().object()) << ")";
+}
+
+void BranchIfRootConstant::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << RootsTable::name(root_index_) << ")";
+}
+
+void BranchIfFloat64Compare::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
os << "(" << operation_ << ")";
}
-void BranchIfToBooleanTrue::AllocateVreg(
- MaglevVregAllocationState* vreg_state) {
- // TODO(victorgomes): consider using any input instead.
- UseRegister(condition_input());
+void BranchIfInt32Compare::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << operation_ << ")";
}
-void BranchIfToBooleanTrue::GenerateCode(MaglevAssembler* masm,
- const ProcessingState& state) {
- // BasicBlocks are zone allocated and so safe to be casted to ZoneLabelRef.
- ZoneLabelRef true_label =
- ZoneLabelRef::UnsafeFromLabelPointer(if_true()->label());
- ZoneLabelRef false_label =
- ZoneLabelRef::UnsafeFromLabelPointer(if_false()->label());
- bool fallthrough_when_true = (if_true() == state.next_block());
- __ ToBoolean(ToRegister(condition_input()), true_label, false_label,
- fallthrough_when_true);
+
+void BranchIfReferenceCompare::PrintParams(
+ std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << operation_ << ")";
+}
+
+void BranchIfTypeOf::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << interpreter::TestTypeOfFlags::ToString(literal_) << ")";
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h
index 2df4ef72b6..09966e259c 100644
--- a/deps/v8/src/maglev/maglev-ir.h
+++ b/deps/v8/src/maglev/maglev-ir.h
@@ -11,13 +11,19 @@
#include "src/base/small-vector.h"
#include "src/base/threaded-list.h"
#include "src/codegen/label.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/reglist.h"
#include "src/codegen/source-position.h"
#include "src/common/globals.h"
#include "src/common/operation.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
+// TODO(dmercadier): move the Turboshaft utils functions to shared code (in
+// particular, any_of, which is the reason we're including this Turboshaft
+// header)
+#include "src/compiler/turboshaft/utils.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
@@ -30,6 +36,9 @@
namespace v8 {
namespace internal {
+
+enum Condition : uint8_t;
+
namespace maglev {
class BasicBlock;
@@ -40,6 +49,7 @@ class MaglevCompilationUnit;
class MaglevGraphLabeller;
class MaglevVregAllocationState;
class CompactInterpreterFrameState;
+class MergePointInterpreterFrameState;
// Nodes are either
// 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
@@ -72,28 +82,27 @@ class CompactInterpreterFrameState;
V(GenericGreaterThan) \
V(GenericGreaterThanOrEqual)
-#define INT32_OPERATIONS_NODE_LIST(V) \
- V(Int32AddWithOverflow) \
- V(Int32SubtractWithOverflow) \
- V(Int32MultiplyWithOverflow) \
- V(Int32DivideWithOverflow) \
- V(Int32ModulusWithOverflow) \
- /*V(Int32ExponentiateWithOverflow)*/ \
- V(Int32BitwiseAnd) \
- V(Int32BitwiseOr) \
- V(Int32BitwiseXor) \
- V(Int32ShiftLeft) \
- V(Int32ShiftRight) \
- V(Int32ShiftRightLogical) \
- /*V(Int32BitwiseNot) */ \
- /*V(Int32NegateWithOverflow) */ \
- /*V(Int32IncrementWithOverflow)*/ \
- /*V(Int32DecrementWithOverflow)*/ \
- V(Int32Equal) \
- V(Int32StrictEqual) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(Int32GreaterThan) \
+#define INT32_OPERATIONS_NODE_LIST(V) \
+ V(Int32AddWithOverflow) \
+ V(Int32SubtractWithOverflow) \
+ V(Int32MultiplyWithOverflow) \
+ V(Int32DivideWithOverflow) \
+ V(Int32ModulusWithOverflow) \
+ V(Int32BitwiseAnd) \
+ V(Int32BitwiseOr) \
+ V(Int32BitwiseXor) \
+ V(Int32ShiftLeft) \
+ V(Int32ShiftRight) \
+ V(Int32ShiftRightLogical) \
+ V(Int32BitwiseNot) \
+ V(Int32NegateWithOverflow) \
+ V(Int32IncrementWithOverflow) \
+ V(Int32DecrementWithOverflow) \
+ V(Int32Equal) \
+ V(Int32StrictEqual) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(Int32GreaterThan) \
V(Int32GreaterThanOrEqual)
#define FLOAT64_OPERATIONS_NODE_LIST(V) \
@@ -101,20 +110,22 @@ class CompactInterpreterFrameState;
V(Float64Subtract) \
V(Float64Multiply) \
V(Float64Divide) \
- /*V(Float64Modulus)*/ \
- /*V(Float64Exponentiate)*/ \
- /*V(Float64Negate) */ \
- /*V(Float64Increment)*/ \
- /*V(Float64Decrement)*/ \
+ V(Float64Exponentiate) \
+ V(Float64Modulus) \
+ V(Float64Negate) \
+ V(Float64Round) \
V(Float64Equal) \
V(Float64StrictEqual) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
V(Float64GreaterThan) \
- V(Float64GreaterThanOrEqual)
+ V(Float64GreaterThanOrEqual) \
+ V(Float64Ieee754Unary) \
+ V(Float64SilenceNaN)
#define CONSTANT_VALUE_NODE_LIST(V) \
V(Constant) \
+ V(ExternalConstant) \
V(Float64Constant) \
V(Int32Constant) \
V(RootConstant) \
@@ -122,113 +133,175 @@ class CompactInterpreterFrameState;
#define INLINE_BUILTIN_NODE_LIST(V) \
V(BuiltinStringFromCharCode) \
- V(BuiltinStringPrototypeCharCodeAt)
-
-#define VALUE_NODE_LIST(V) \
- V(Call) \
- V(CallBuiltin) \
- V(CallRuntime) \
- V(CallWithSpread) \
- V(CallKnownJSFunction) \
- V(Construct) \
- V(ConstructWithSpread) \
- V(ConvertReceiver) \
- V(CreateEmptyArrayLiteral) \
- V(CreateArrayLiteral) \
- V(CreateShallowArrayLiteral) \
- V(CreateObjectLiteral) \
- V(CreateEmptyObjectLiteral) \
- V(CreateShallowObjectLiteral) \
- V(CreateFunctionContext) \
- V(CreateClosure) \
- V(FastCreateClosure) \
- V(CreateRegExpLiteral) \
- V(DeleteProperty) \
- V(ForInPrepare) \
- V(ForInNext) \
- V(GeneratorRestoreRegister) \
- V(GetIterator) \
- V(GetSecondReturnedValue) \
- V(GetTemplateObject) \
- V(InitialValue) \
- V(LoadTaggedField) \
- V(LoadDoubleField) \
- V(LoadTaggedElement) \
- V(LoadDoubleElement) \
- V(LoadGlobal) \
- V(LoadNamedGeneric) \
- V(LoadNamedFromSuperGeneric) \
- V(SetNamedGeneric) \
- V(DefineNamedOwnGeneric) \
- V(StoreInArrayLiteralGeneric) \
- V(StoreGlobal) \
- V(GetKeyedGeneric) \
- V(SetKeyedGeneric) \
- V(DefineKeyedOwnGeneric) \
- V(Phi) \
- V(RegisterInput) \
- V(CheckedSmiTag) \
- V(UnsafeSmiTag) \
- V(CheckedSmiUntag) \
- V(UnsafeSmiUntag) \
- V(CheckedInternalizedString) \
- V(CheckedObjectToIndex) \
- V(ChangeInt32ToFloat64) \
- V(CheckedTruncateFloat64ToInt32) \
- V(Float64Box) \
- V(CheckedFloat64Unbox) \
- V(LogicalNot) \
- V(SetPendingMessage) \
- V(StringAt) \
- V(StringLength) \
- V(ToBoolean) \
- V(ToBooleanLogicalNot) \
- V(TaggedEqual) \
- V(TaggedNotEqual) \
- V(TestInstanceOf) \
- V(TestUndetectable) \
- V(TestTypeOf) \
- V(ToName) \
- V(ToNumberOrNumeric) \
- V(ToObject) \
- V(ToString) \
- CONSTANT_VALUE_NODE_LIST(V) \
- INT32_OPERATIONS_NODE_LIST(V) \
- FLOAT64_OPERATIONS_NODE_LIST(V) \
- GENERIC_OPERATIONS_NODE_LIST(V) \
+ V(BuiltinStringPrototypeCharCodeOrCodePointAt)
+
+#define VALUE_NODE_LIST(V) \
+ V(Identity) \
+ V(AllocateRaw) \
+ V(Call) \
+ V(CallBuiltin) \
+ V(CallRuntime) \
+ V(CallWithArrayLike) \
+ V(CallWithSpread) \
+ V(CallKnownJSFunction) \
+ V(CallSelf) \
+ V(Construct) \
+ V(ConstructWithSpread) \
+ V(ConvertReceiver) \
+ V(ConvertHoleToUndefined) \
+ V(CreateEmptyArrayLiteral) \
+ V(CreateArrayLiteral) \
+ V(CreateShallowArrayLiteral) \
+ V(CreateObjectLiteral) \
+ V(CreateEmptyObjectLiteral) \
+ V(CreateShallowObjectLiteral) \
+ V(CreateFunctionContext) \
+ V(CreateClosure) \
+ V(FastCreateClosure) \
+ V(CreateRegExpLiteral) \
+ V(DeleteProperty) \
+ V(EnsureWritableFastElements) \
+ V(FoldedAllocation) \
+ V(ForInPrepare) \
+ V(ForInNext) \
+ V(GeneratorRestoreRegister) \
+ V(GetIterator) \
+ V(GetSecondReturnedValue) \
+ V(GetTemplateObject) \
+ V(InitialValue) \
+ V(LoadPolymorphicDoubleField) \
+ V(LoadPolymorphicTaggedField) \
+ V(LoadTaggedField) \
+ V(LoadDoubleField) \
+ V(LoadTaggedFieldByFieldIndex) \
+ V(LoadFixedArrayElement) \
+ V(LoadFixedDoubleArrayElement) \
+ V(LoadSignedIntDataViewElement) \
+ V(LoadDoubleDataViewElement) \
+ V(LoadSignedIntTypedArrayElement) \
+ V(LoadSignedIntTypedArrayElementNoDeopt) \
+ V(LoadUnsignedIntTypedArrayElement) \
+ V(LoadUnsignedIntTypedArrayElementNoDeopt) \
+ V(LoadDoubleTypedArrayElement) \
+ V(LoadDoubleTypedArrayElementNoDeopt) \
+ V(LoadEnumCacheLength) \
+ V(LoadGlobal) \
+ V(LoadNamedGeneric) \
+ V(LoadNamedFromSuperGeneric) \
+ V(SetNamedGeneric) \
+ V(DefineNamedOwnGeneric) \
+ V(StoreInArrayLiteralGeneric) \
+ V(StoreGlobal) \
+ V(GetKeyedGeneric) \
+ V(SetKeyedGeneric) \
+ V(DefineKeyedOwnGeneric) \
+ V(Phi) \
+ V(RegisterInput) \
+ V(CheckedSmiTagInt32) \
+ V(CheckedSmiTagUint32) \
+ V(UnsafeSmiTag) \
+ V(CheckedSmiUntag) \
+ V(UnsafeSmiUntag) \
+ V(CheckedInternalizedString) \
+ V(CheckedObjectToIndex) \
+ V(CheckedTruncateNumberOrOddballToInt32) \
+ V(CheckedInt32ToUint32) \
+ V(CheckedUint32ToInt32) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeUint32ToFloat64) \
+ V(CheckedTruncateFloat64ToInt32) \
+ V(CheckedTruncateFloat64ToUint32) \
+ V(TruncateNumberOrOddballToInt32) \
+ V(TruncateUint32ToInt32) \
+ V(TruncateFloat64ToInt32) \
+ V(UnsafeTruncateUint32ToInt32) \
+ V(UnsafeTruncateFloat64ToInt32) \
+ V(Int32ToUint8Clamped) \
+ V(Uint32ToUint8Clamped) \
+ V(Float64ToUint8Clamped) \
+ V(CheckedNumberToUint8Clamped) \
+ V(Int32ToNumber) \
+ V(Uint32ToNumber) \
+ V(Float64ToTagged) \
+ V(HoleyFloat64Box) \
+ V(CheckedSmiTagFloat64) \
+ V(CheckedNumberOrOddballToFloat64) \
+ V(UncheckedNumberOrOddballToFloat64) \
+ V(LogicalNot) \
+ V(SetPendingMessage) \
+ V(StringAt) \
+ V(StringLength) \
+ V(ToBoolean) \
+ V(ToBooleanLogicalNot) \
+ V(TaggedEqual) \
+ V(TaggedNotEqual) \
+ V(TestInstanceOf) \
+ V(TestUndetectable) \
+ V(TestTypeOf) \
+ V(ToName) \
+ V(ToNumberOrNumeric) \
+ V(ToObject) \
+ V(ToString) \
+ CONSTANT_VALUE_NODE_LIST(V) \
+ INT32_OPERATIONS_NODE_LIST(V) \
+ FLOAT64_OPERATIONS_NODE_LIST(V) \
+ GENERIC_OPERATIONS_NODE_LIST(V) \
INLINE_BUILTIN_NODE_LIST(V)
#define GAP_MOVE_NODE_LIST(V) \
V(ConstantGapMove) \
V(GapMove)
-#define NODE_LIST(V) \
- V(AssertInt32) \
- V(CheckHeapObject) \
- V(CheckInt32Condition) \
- V(CheckJSArrayBounds) \
- V(CheckJSObjectElementsBounds) \
- V(CheckMaps) \
- V(CheckMapsWithMigration) \
- V(CheckNumber) \
- V(CheckSmi) \
- V(CheckString) \
- V(CheckSymbol) \
- V(CheckValue) \
- V(DebugBreak) \
- V(GeneratorStore) \
- V(JumpLoopPrologue) \
- V(StoreMap) \
- V(StoreDoubleField) \
- V(StoreTaggedFieldNoWriteBarrier) \
- V(StoreTaggedFieldWithWriteBarrier) \
- V(IncreaseInterruptBudget) \
- V(ReduceInterruptBudget) \
- V(ThrowReferenceErrorIfHole) \
- V(ThrowSuperNotCalledIfHole) \
- V(ThrowSuperAlreadyCalledIfNotHole) \
- V(ThrowIfNotSuperConstructor) \
- GAP_MOVE_NODE_LIST(V) \
+#define NODE_LIST(V) \
+ V(AssertInt32) \
+ V(CheckDynamicValue) \
+ V(CheckInt32IsSmi) \
+ V(CheckUint32IsSmi) \
+ V(CheckHeapObject) \
+ V(CheckInt32Condition) \
+ V(CheckFixedArrayNonEmpty) \
+ V(CheckJSArrayBounds) \
+ V(CheckJSDataViewBounds) \
+ V(CheckJSObjectElementsBounds) \
+ V(CheckJSTypedArrayBounds) \
+ V(CheckMaps) \
+ V(CheckMapsWithMigration) \
+ V(CheckNumber) \
+ V(CheckSmi) \
+ V(CheckString) \
+ V(CheckSymbol) \
+ V(CheckValue) \
+ V(CheckValueEqualsString) \
+ V(CheckInstanceType) \
+ V(DebugBreak) \
+ V(FunctionEntryStackCheck) \
+ V(GeneratorStore) \
+ V(JumpLoopPrologue) \
+ V(StoreMap) \
+ V(StoreDoubleField) \
+ V(CheckedStoreFixedArraySmiElement) \
+ V(StoreFixedArrayElementWithWriteBarrier) \
+ V(StoreFixedArrayElementNoWriteBarrier) \
+ V(StoreFixedDoubleArrayElement) \
+ V(StoreFloat64) \
+ V(StoreIntTypedArrayElement) \
+ V(StoreIntTypedArrayElementNoDeopt) \
+ V(StoreDoubleTypedArrayElement) \
+ V(StoreDoubleTypedArrayElementNoDeopt) \
+ V(StoreSignedIntDataViewElement) \
+ V(StoreDoubleDataViewElement) \
+ V(StoreTaggedFieldNoWriteBarrier) \
+ V(StoreTaggedFieldWithWriteBarrier) \
+ V(CheckedStoreSmiField) \
+ V(IncreaseInterruptBudget) \
+ V(ReduceInterruptBudgetForLoop) \
+ V(ReduceInterruptBudgetForReturn) \
+ V(ThrowReferenceErrorIfHole) \
+ V(ThrowSuperNotCalledIfHole) \
+ V(ThrowSuperAlreadyCalledIfNotHole) \
+ V(ThrowIfNotSuperConstructor) \
+ V(TransitionElementsKind) \
+ GAP_MOVE_NODE_LIST(V) \
VALUE_NODE_LIST(V)
#define BRANCH_CONTROL_NODE_LIST(V) \
@@ -238,7 +311,8 @@ class CompactInterpreterFrameState;
V(BranchIfInt32Compare) \
V(BranchIfFloat64Compare) \
V(BranchIfUndefinedOrNull) \
- V(BranchIfJSReceiver)
+ V(BranchIfJSReceiver) \
+ V(BranchIfTypeOf)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
V(Switch) \
@@ -361,7 +435,110 @@ class UnconditionalControlNode;
class TerminalControlNode;
class ValueNode;
-enum class ValueRepresentation : uint8_t { kTagged, kInt32, kFloat64 };
+enum class ValueRepresentation : uint8_t {
+ kTagged,
+ kInt32,
+ kUint32,
+ kFloat64,
+ kWord64
+};
+
+enum class TaggedToFloat64ConversionType : uint8_t {
+ kNumber,
+ kNumberOrOddball,
+};
+
+constexpr Condition ConditionFor(Operation cond);
+
+bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node);
+bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node);
+
+inline int ExternalArrayElementSize(const ExternalArrayType element_type) {
+ switch (element_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ DCHECK_LE(sizeof(ctype), 8); \
+ return sizeof(ctype);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+ }
+}
+
+inline int ElementsKindSize(ElementsKind element_kind) {
+ switch (element_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ DCHECK_LE(sizeof(ctype), 8); \
+ return sizeof(ctype);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+ }
+}
+
+inline std::ostream& operator<<(std::ostream& os,
+ const ValueRepresentation& repr) {
+ switch (repr) {
+ case ValueRepresentation::kTagged:
+ return os << "Tagged";
+ case ValueRepresentation::kInt32:
+ return os << "Int32";
+ case ValueRepresentation::kUint32:
+ return os << "Uint32";
+ case ValueRepresentation::kFloat64:
+ return os << "Float64";
+ case ValueRepresentation::kWord64:
+ return os << "Word64";
+ }
+}
+
+inline std::ostream& operator<<(
+ std::ostream& os, const TaggedToFloat64ConversionType& conversion_type) {
+ switch (conversion_type) {
+ case TaggedToFloat64ConversionType::kNumber:
+ return os << "Number";
+ case TaggedToFloat64ConversionType::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ }
+}
+
+inline bool HasOnlyJSTypedArrayMaps(base::Vector<const compiler::MapRef> maps) {
+ for (compiler::MapRef map : maps) {
+ if (!map.IsJSTypedArrayMap()) return false;
+ }
+ return true;
+}
+
+inline bool HasOnlyJSArrayMaps(base::Vector<const compiler::MapRef> maps) {
+ for (compiler::MapRef map : maps) {
+ if (!map.IsJSArrayMap()) return false;
+ }
+ return true;
+}
+
+inline bool HasOnlyJSObjectMaps(base::Vector<const compiler::MapRef> maps) {
+ for (compiler::MapRef map : maps) {
+ if (!map.IsJSObjectMap()) return false;
+ }
+ return true;
+}
+
+inline bool HasOnlyStringMaps(base::Vector<const compiler::MapRef> maps) {
+ for (compiler::MapRef map : maps) {
+ if (!map.IsStringMap()) return false;
+ }
+ return true;
+}
+
+inline bool HasOnlyNumberMaps(base::Vector<const compiler::MapRef> maps) {
+ for (compiler::MapRef map : maps) {
+ if (map.instance_type() != HEAP_NUMBER_TYPE) return false;
+ }
+ return true;
+}
#define DEF_FORWARD_DECLARATION(type, ...) class type;
NODE_BASE_LIST(DEF_FORWARD_DECLARATION)
@@ -477,13 +654,20 @@ class BasicBlockRef {
class OpProperties {
public:
- constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); }
+ constexpr bool is_call() const {
+ // Only returns true for non-deferred calls. Use `is_any_call` to check
+ // deferred calls as well.
+ return kIsCallBit::decode(bitfield_);
+ }
constexpr bool can_eager_deopt() const {
return kCanEagerDeoptBit::decode(bitfield_);
}
constexpr bool can_lazy_deopt() const {
return kCanLazyDeoptBit::decode(bitfield_);
}
+ constexpr bool can_deopt() const {
+ return can_eager_deopt() || can_lazy_deopt();
+ }
constexpr bool can_throw() const {
return kCanThrowBit::decode(bitfield_) && can_lazy_deopt();
}
@@ -495,6 +679,9 @@ class OpProperties {
constexpr ValueRepresentation value_representation() const {
return kValueRepresentationBits::decode(bitfield_);
}
+ constexpr bool is_tagged() const {
+ return value_representation() == ValueRepresentation::kTagged;
+ }
constexpr bool is_conversion() const {
return kIsConversionBit::decode(bitfield_);
}
@@ -504,9 +691,20 @@ class OpProperties {
constexpr bool is_pure() const {
return (bitfield_ & kPureMask) == kPureValue;
}
- constexpr bool is_required_when_unused() const {
+ constexpr bool has_any_side_effects() const {
return can_write() || non_memory_side_effects();
}
+ constexpr bool is_required_when_unused() {
+ if (is_conversion()) {
+ // Calls in conversions are not counted as side-effect as far as
+ // is_required_when_unused is concerned, since they should always be to
+ // the Allocate builtin.
+ return has_any_side_effects() || can_throw() || can_deopt();
+ } else {
+ return has_any_side_effects() || can_throw() || can_deopt() ||
+ is_any_call();
+ }
+ }
constexpr OpProperties operator|(const OpProperties& that) {
return OpProperties(bitfield_ | that.bitfield_);
@@ -538,10 +736,18 @@ class OpProperties {
return OpProperties(
kValueRepresentationBits::encode(ValueRepresentation::kTagged));
}
+ static constexpr OpProperties ExternalReference() {
+ return OpProperties(
+ kValueRepresentationBits::encode(ValueRepresentation::kWord64));
+ }
static constexpr OpProperties Int32() {
return OpProperties(
kValueRepresentationBits::encode(ValueRepresentation::kInt32));
}
+ static constexpr OpProperties Uint32() {
+ return OpProperties(
+ kValueRepresentationBits::encode(ValueRepresentation::kUint32));
+ }
static constexpr OpProperties Float64() {
return OpProperties(
kValueRepresentationBits::encode(ValueRepresentation::kFloat64));
@@ -549,9 +755,6 @@ class OpProperties {
static constexpr OpProperties ConversionNode() {
return OpProperties(kIsConversionBit::encode(true));
}
- static constexpr OpProperties NeedsRegisterSnapshot() {
- return OpProperties(kNeedsRegisterSnapshotBit::encode(true));
- }
// Without auditing the call target, we must assume it can cause a lazy deopt
// and throw. Use this when codegen calls runtime or a builtin, unless
// certain that the target either doesn't throw or cannot deopt.
@@ -576,6 +779,15 @@ class OpProperties {
constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
operator uint32_t() const { return bitfield_; }
+ OpProperties WithNewValueRepresentation(ValueRepresentation new_repr) const {
+ return OpProperties(kValueRepresentationBits::update(bitfield_, new_repr));
+ }
+
+ OpProperties WithoutDeopt() const {
+ return OpProperties(kCanLazyDeoptBit::update(
+ kCanEagerDeoptBit::update(bitfield_, false), false));
+ }
+
private:
using kIsCallBit = base::BitField<bool, 0, 1>;
using kCanEagerDeoptBit = kIsCallBit::Next<bool, 1>;
@@ -585,7 +797,7 @@ class OpProperties {
using kCanWriteBit = kCanReadBit::Next<bool, 1>;
using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
using kValueRepresentationBits =
- kNonMemorySideEffectsBit::Next<ValueRepresentation, 2>;
+ kNonMemorySideEffectsBit::Next<ValueRepresentation, 3>;
using kIsConversionBit = kValueRepresentationBits::Next<bool, 1>;
using kNeedsRegisterSnapshotBit = kIsConversionBit::Next<bool, 1>;
@@ -595,12 +807,32 @@ class OpProperties {
kCanWriteBit::encode(false) |
kNonMemorySideEffectsBit::encode(false);
+ // NeedsRegisterSnapshot is only used for DeferredCall, and we rely on this in
+ // `is_any_call` to detect deferred calls. If you need to use
+ // NeedsRegisterSnapshot for something else that DeferredCalls, then you'll
+ // have to update `is_any_call`.
+ static constexpr OpProperties NeedsRegisterSnapshot() {
+ return OpProperties(kNeedsRegisterSnapshotBit::encode(true));
+ }
+
const uint32_t bitfield_;
public:
static const size_t kSize = kNeedsRegisterSnapshotBit::kLastUsedBit + 1;
+
+ constexpr bool is_any_call() const {
+ // Currently, there is no kDeferredCall bit, but DeferredCall only sets a
+ // single bit: kNeedsRegisterSnapShot. If this static assert breaks, it
+ // means that you added additional properties to DeferredCall, and you
+ // should update this function accordingly.
+ static_assert(DeferredCall().bitfield_ ==
+ kNeedsRegisterSnapshotBit::encode(true));
+ return is_call() || needs_register_snapshot();
+ }
};
+constexpr inline OpProperties StaticPropertiesForOpcode(Opcode opcode);
+
class ValueLocation {
public:
ValueLocation() = default;
@@ -675,11 +907,13 @@ class Input : public InputLocation {
};
class InterpretedDeoptFrame;
+class InlinedArgumentsDeoptFrame;
class BuiltinContinuationDeoptFrame;
class DeoptFrame {
public:
enum class FrameType {
kInterpretedFrame,
+ kInlinedArgumentsFrame,
kBuiltinContinuationFrame,
};
@@ -687,6 +921,7 @@ class DeoptFrame {
const DeoptFrame* parent() const { return parent_; }
inline const InterpretedDeoptFrame& as_interpreted() const;
+ inline const InlinedArgumentsDeoptFrame& as_inlined_arguments() const;
inline const BuiltinContinuationDeoptFrame& as_builtin_continuation() const;
protected:
@@ -696,16 +931,37 @@ class DeoptFrame {
BytecodeOffset bytecode_position;
SourcePosition source_position;
};
+ struct InlinedArgumentsFrameData {
+ const MaglevCompilationUnit& unit;
+ BytecodeOffset bytecode_position;
+ base::Vector<ValueNode*> arguments;
+ };
struct BuiltinContinuationFrameData {
Builtin builtin_id;
base::Vector<ValueNode*> parameters;
- ValueNode* context;
+ // All of the ValueNode inputs in InterpretedFrameData,
+ // InlinedArgumentsFrameData and BuiltinContinuationFrameData are stored in
+ // non-const pointers (`frame_state.live_registers_and_accumulator_`,
+ // `arguments` and `parameters`), which allows DeepForEachInput and
+ // CompactInterpreterFrameState::ForEachValue to iterate over all of the
+ // inputs as ValueNode*&, allowing the callback function to mutate the frame
+ // state inputs. This is in particular useful for PhiRepresentationSelector,
+ // which may need to update some frame state inputs.
+ // However, the following `context` field is the only exception, as it is
+ // stored directly in the frame state. Thus, we made it mutable, so that we
+ // can update it in DeepForEachInput while still keeping as many `const`
+ // methods and fields as we can.
+ mutable ValueNode* context;
};
DeoptFrame(InterpretedFrameData data, const DeoptFrame* parent)
: interpreted_frame_data_(data),
type_(FrameType::kInterpretedFrame),
parent_(parent) {}
+ DeoptFrame(InlinedArgumentsFrameData data, const DeoptFrame* parent)
+ : inlined_arguments_frame_data_(data),
+ type_(FrameType::kInlinedArgumentsFrame),
+ parent_(parent) {}
DeoptFrame(BuiltinContinuationFrameData data, const DeoptFrame* parent)
: builtin_continuation_frame_data_(data),
type_(FrameType::kBuiltinContinuationFrame),
@@ -713,6 +969,7 @@ class DeoptFrame {
union {
const InterpretedFrameData interpreted_frame_data_;
+ const InlinedArgumentsFrameData inlined_arguments_frame_data_;
const BuiltinContinuationFrameData builtin_continuation_frame_data_;
};
FrameType type_;
@@ -752,6 +1009,36 @@ inline const InterpretedDeoptFrame& DeoptFrame::as_interpreted() const {
return static_cast<const InterpretedDeoptFrame&>(*this);
}
+class InlinedArgumentsDeoptFrame : public DeoptFrame {
+ public:
+ InlinedArgumentsDeoptFrame(const MaglevCompilationUnit& unit,
+ BytecodeOffset bytecode_position,
+ base::Vector<ValueNode*> arguments,
+ const DeoptFrame* parent)
+ : DeoptFrame(
+ InlinedArgumentsFrameData{unit, bytecode_position, arguments},
+ parent) {}
+
+ const MaglevCompilationUnit& unit() const {
+ return interpreted_frame_data_.unit;
+ }
+ BytecodeOffset bytecode_position() const {
+ return inlined_arguments_frame_data_.bytecode_position;
+ }
+ base::Vector<ValueNode*> arguments() const {
+ return inlined_arguments_frame_data_.arguments;
+ }
+};
+
+// Make sure storing/passing deopt frames by value doesn't truncate them.
+static_assert(sizeof(InlinedArgumentsDeoptFrame) == sizeof(DeoptFrame));
+
+inline const InlinedArgumentsDeoptFrame& DeoptFrame::as_inlined_arguments()
+ const {
+ DCHECK_EQ(type(), FrameType::kInlinedArgumentsFrame);
+ return static_cast<const InlinedArgumentsDeoptFrame&>(*this);
+}
+
class BuiltinContinuationDeoptFrame : public DeoptFrame {
public:
BuiltinContinuationDeoptFrame(Builtin builtin_id,
@@ -767,7 +1054,7 @@ class BuiltinContinuationDeoptFrame : public DeoptFrame {
base::Vector<ValueNode*> parameters() const {
return builtin_continuation_frame_data_.parameters;
}
- ValueNode* context() const {
+ ValueNode*& context() const {
return builtin_continuation_frame_data_.context;
}
};
@@ -783,10 +1070,14 @@ DeoptFrame::as_builtin_continuation() const {
class DeoptInfo {
protected:
- DeoptInfo(Zone* zone, DeoptFrame top_frame);
+ DeoptInfo(Zone* zone, const DeoptFrame top_frame,
+ compiler::FeedbackSource feedback_to_update);
public:
const DeoptFrame& top_frame() const { return top_frame_; }
+ const compiler::FeedbackSource& feedback_to_update() {
+ return feedback_to_update_;
+ }
InputLocation* input_locations() const { return input_locations_; }
Label* deopt_entry_label() { return &deopt_entry_label_; }
@@ -796,6 +1087,7 @@ class DeoptInfo {
private:
const DeoptFrame top_frame_;
+ const compiler::FeedbackSource feedback_to_update_;
InputLocation* const input_locations_;
Label deopt_entry_label_;
int translation_index_ = -1;
@@ -809,8 +1101,9 @@ struct RegisterSnapshot {
class EagerDeoptInfo : public DeoptInfo {
public:
- EagerDeoptInfo(Zone* zone, DeoptFrame&& top_frame)
- : DeoptInfo(zone, std::move(top_frame)) {}
+ EagerDeoptInfo(Zone* zone, const DeoptFrame top_frame,
+ compiler::FeedbackSource feedback_to_update)
+ : DeoptInfo(zone, top_frame, feedback_to_update) {}
DeoptimizeReason reason() const { return reason_; }
void set_reason(DeoptimizeReason reason) { reason_ = reason; }
@@ -821,29 +1114,75 @@ class EagerDeoptInfo : public DeoptInfo {
class LazyDeoptInfo : public DeoptInfo {
public:
- LazyDeoptInfo(Zone* zone, DeoptFrame&& top_frame)
- : DeoptInfo(zone, std::move(top_frame)) {}
+ LazyDeoptInfo(Zone* zone, const DeoptFrame top_frame,
+ interpreter::Register result_location, int result_size,
+ compiler::FeedbackSource feedback_to_update)
+ : DeoptInfo(zone, top_frame, feedback_to_update),
+ result_location_(result_location),
+ bitfield_(
+ DeoptingCallReturnPcField::encode(kUninitializedCallReturnPc) |
+ ResultSizeField::encode(result_size)) {}
- interpreter::Register result_location() const { return result_location_; }
- int result_size() const { return result_size_; }
+ interpreter::Register result_location() const {
+ // We should only be checking this for interpreted frames, other kinds of
+ // frames shouldn't be considered for result locations.
+ DCHECK_EQ(top_frame().type(), DeoptFrame::FrameType::kInterpretedFrame);
+ return result_location_;
+ }
+ int result_size() const {
+ // We should only be checking this for interpreted frames, other kinds of
+ // frames shouldn't be considered for result locations.
+ DCHECK_EQ(top_frame().type(), DeoptFrame::FrameType::kInterpretedFrame);
+ return ResultSizeField::decode(bitfield_);
+ }
bool IsResultRegister(interpreter::Register reg) const;
- void SetResultLocation(interpreter::Register result_location,
- int result_size) {
- DCHECK(result_location.is_valid());
- DCHECK(!result_location_.is_valid());
+ void UpdateResultLocation(interpreter::Register result_location,
+ int result_size) {
+ // We should only update to a subset of the existing result location.
+ DCHECK_GE(result_location.index(), result_location_.index());
+ DCHECK_LE(result_location.index() + result_size,
+ result_location_.index() + this->result_size());
result_location_ = result_location;
- result_size_ = result_size;
+ bitfield_ = ResultSizeField::update(bitfield_, result_size);
+ }
+ bool HasResultLocation() const {
+ // We should only be checking this for interpreted frames, other kinds of
+ // frames shouldn't be considered for result locations.
+ DCHECK_EQ(top_frame().type(), DeoptFrame::FrameType::kInterpretedFrame);
+ return result_location_.is_valid();
}
- int deopting_call_return_pc() const { return deopting_call_return_pc_; }
- void set_deopting_call_return_pc(int pc) { deopting_call_return_pc_ = pc; }
+ int deopting_call_return_pc() const {
+ DCHECK_NE(DeoptingCallReturnPcField::decode(bitfield_),
+ kUninitializedCallReturnPc);
+ return DeoptingCallReturnPcField::decode(bitfield_);
+ }
+ void set_deopting_call_return_pc(int pc) {
+ DCHECK_EQ(DeoptingCallReturnPcField::decode(bitfield_),
+ kUninitializedCallReturnPc);
+ bitfield_ = DeoptingCallReturnPcField::update(bitfield_, pc);
+ }
private:
- int deopting_call_return_pc_ = -1;
- interpreter::Register result_location_ =
- interpreter::Register::invalid_value();
- int result_size_ = 1;
+ using DeoptingCallReturnPcField = base::BitField<unsigned int, 0, 30>;
+ using ResultSizeField = DeoptingCallReturnPcField::Next<unsigned int, 2>;
+
+ // The max code size is enforced by the various assemblers, but it's not
+ // visible here, so static assert against the magic constant that we happen
+ // to know is correct.
+ static constexpr int kMaxCodeSize = 512 * MB;
+ static constexpr unsigned int kUninitializedCallReturnPc =
+ DeoptingCallReturnPcField::kMax;
+ static_assert(DeoptingCallReturnPcField::is_valid(kMaxCodeSize));
+ static_assert(kMaxCodeSize != kUninitializedCallReturnPc);
+
+ // Lazy deopts can have at most two result registers -- temporarily three for
+ // ForInPrepare.
+ static_assert(ResultSizeField::kMax >= 3);
+
+ interpreter::Register result_location_;
+ uint32_t bitfield_;
};
class ExceptionHandlerInfo {
@@ -884,14 +1223,14 @@ NODE_BASE_LIST(DEF_OPCODE_OF)
#undef DEF_OPCODE_OF
template <typename T>
-T* ObjectPtrBeforeAddress(void* address) {
+constexpr T* ObjectPtrBeforeAddress(void* address) {
char* address_as_char_ptr = reinterpret_cast<char*>(address);
char* object_ptr_as_char_ptr = address_as_char_ptr - sizeof(T);
return reinterpret_cast<T*>(object_ptr_as_char_ptr);
}
template <typename T>
-const T* ObjectPtrBeforeAddress(const void* address) {
+constexpr const T* ObjectPtrBeforeAddress(const void* address) {
const char* address_as_char_ptr = reinterpret_cast<const char*>(address);
const char* object_ptr_as_char_ptr = address_as_char_ptr - sizeof(T);
return reinterpret_cast<const T*>(object_ptr_as_char_ptr);
@@ -910,7 +1249,7 @@ class NodeBase : public ZoneObject {
using NumDoubleTemporariesNeededField =
NumTemporariesNeededField::Next<uint8_t, 1>;
// Align input count to 32-bit.
- using UnusedField = NumDoubleTemporariesNeededField::Next<uint8_t, 2>;
+ using UnusedField = NumDoubleTemporariesNeededField::Next<uint8_t, 1>;
using InputCountField = UnusedField::Next<size_t, 17>;
static_assert(InputCountField::kShift == 32);
@@ -919,12 +1258,12 @@ class NodeBase : public ZoneObject {
template <class T, int size>
using NextBitField = InputCountField::Next<T, size>;
- template <class T>
- static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
-
static constexpr int kMaxInputs = InputCountField::kMax;
public:
+ template <class T>
+ static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
+
template <class Derived, typename... Args>
static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs,
Args&&... args) {
@@ -940,19 +1279,6 @@ class NodeBase : public ZoneObject {
return node;
}
- template <class Derived, typename... Args>
- static Derived* New(Zone* zone, DeoptFrame&& deopt_frame, Args&&... args) {
- Derived* node = New<Derived>(zone, std::forward<Args>(args)...);
- if constexpr (Derived::kProperties.can_eager_deopt()) {
- new (node->eager_deopt_info())
- EagerDeoptInfo(zone, std::move(deopt_frame));
- } else {
- static_assert(Derived::kProperties.can_lazy_deopt());
- new (node->lazy_deopt_info()) LazyDeoptInfo(zone, std::move(deopt_frame));
- }
- return node;
- }
-
// Inputs must be initialized manually.
template <class Derived, typename... Args>
static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
@@ -969,6 +1295,9 @@ class NodeBase : public ZoneObject {
constexpr OpProperties properties() const {
return OpPropertiesField::decode(bitfield_);
}
+ void set_properties(OpProperties properties) {
+ bitfield_ = OpPropertiesField::update(bitfield_, properties);
+ }
template <class T>
constexpr bool Is() const;
@@ -994,11 +1323,11 @@ class NodeBase : public ZoneObject {
return static_cast<int>(InputCountField::decode(bitfield_));
}
- Input& input(int index) {
+ constexpr Input& input(int index) {
DCHECK_LT(index, input_count());
return *(input_base() - index);
}
- const Input& input(int index) const {
+ constexpr const Input& input(int index) const {
DCHECK_LT(index, input_count());
return *(input_base() - index);
}
@@ -1006,8 +1335,10 @@ class NodeBase : public ZoneObject {
// Input iterators, use like:
//
// for (Input& input : *node) { ... }
- auto begin() { return std::make_reverse_iterator(&input(-1)); }
- auto end() { return std::make_reverse_iterator(&input(input_count() - 1)); }
+ constexpr auto begin() { return std::make_reverse_iterator(&input(-1)); }
+ constexpr auto end() {
+ return std::make_reverse_iterator(&input(input_count() - 1));
+ }
constexpr bool has_id() const { return id_ != kInvalidNodeId; }
constexpr NodeIdT id() const {
@@ -1050,6 +1381,16 @@ class NodeBase : public ZoneObject {
}
}
+ enum class InputAllocationPolicy { kFixedRegister, kArbitraryRegister, kAny };
+
+ // Some parts of Maglev require a specific iteration order of the inputs (such
+ // as UseMarkingProcessor::MarkInputUses or
+ // StraightForwardRegisterAllocator::AssignInputs). For such cases,
+ // `ForAllInputsInRegallocAssignmentOrder` can be called with a callback `f`
+ // that will be called for each input in the "correct" order.
+ template <typename Function>
+ void ForAllInputsInRegallocAssignmentOrder(Function&& f);
+
void Print(std::ostream& os, MaglevGraphLabeller*,
bool skip_targets = false) const;
@@ -1084,11 +1425,63 @@ class NodeBase : public ZoneObject {
snapshot;
}
+ void change_input(int index, ValueNode* node) { set_input(index, node); }
+
+ void change_representation(ValueRepresentation new_repr) {
+ DCHECK_EQ(opcode(), Opcode::kPhi);
+ bitfield_ = OpPropertiesField::update(
+ bitfield_, properties().WithNewValueRepresentation(new_repr));
+ }
+
+ void set_opcode(Opcode new_opcode) {
+ bitfield_ = OpcodeField::update(bitfield_, new_opcode);
+ }
+
+ void CopyEagerDeoptInfoOf(NodeBase* other, Zone* zone) {
+ new (eager_deopt_info())
+ EagerDeoptInfo(zone, other->eager_deopt_info()->top_frame(),
+ other->eager_deopt_info()->feedback_to_update());
+ }
+
+ template <typename NodeT>
+ void OverwriteWith() {
+ OverwriteWith(NodeBase::opcode_of<NodeT>, NodeT::kProperties);
+ }
+
+ void OverwriteWith(
+ Opcode new_opcode,
+ base::Optional<OpProperties> maybe_new_properties = base::nullopt) {
+ OpProperties new_properties = maybe_new_properties.has_value()
+ ? maybe_new_properties.value()
+ : StaticPropertiesForOpcode(new_opcode);
+#ifdef DEBUG
+ CheckCanOverwriteWith(new_opcode, new_properties);
+#endif
+ set_opcode(new_opcode);
+ set_properties(new_properties);
+ }
+
protected:
explicit NodeBase(uint64_t bitfield) : bitfield_(bitfield) {}
- Input* input_base() { return detail::ObjectPtrBeforeAddress<Input>(this); }
- const Input* input_base() const {
+ // Allow updating bits above NextBitField from subclasses
+ constexpr uint64_t bitfield() const { return bitfield_; }
+ void set_bitfield(uint64_t new_bitfield) {
+#ifdef DEBUG
+ // Make sure that all the base bitfield bits (all bits before the next
+ // bitfield start) are equal in the new value.s
+ const uint64_t base_bitfield_mask =
+ (uint64_t{1} << NextBitField<bool, 1>::kShift) - 1;
+ DCHECK_EQ(bitfield_ & base_bitfield_mask,
+ new_bitfield & base_bitfield_mask);
+#endif
+ bitfield_ = new_bitfield;
+ }
+
+ constexpr Input* input_base() {
+ return detail::ObjectPtrBeforeAddress<Input>(this);
+ }
+ constexpr const Input* input_base() const {
return detail::ObjectPtrBeforeAddress<Input>(this);
}
Input* last_input() { return &input(input_count() - 1); }
@@ -1195,6 +1588,8 @@ class NodeBase : public ZoneObject {
return register_snapshot_address() - extra;
}
+ void CheckCanOverwriteWith(Opcode new_opcode, OpProperties new_properties);
+
uint64_t bitfield_;
NodeIdT id_ = kInvalidNodeId;
RegList temporaries_;
@@ -1238,6 +1633,10 @@ constexpr bool NodeBase::Is<TerminalControlNode>() const {
return IsTerminalControlNode(opcode());
}
+void CheckValueInputIs(const NodeBase* node, int i,
+ ValueRepresentation expected,
+ MaglevGraphLabeller* graph_labeller);
+
// The Node class hierarchy contains all non-control nodes.
class Node : public NodeBase {
public:
@@ -1260,24 +1659,47 @@ class Node : public NodeBase {
// All non-control nodes with a result.
class ValueNode : public Node {
+ private:
+ using TaggedResultNeedsDecompressField = NextBitField<bool, 1>;
+
+ protected:
+ // Subclasses may use the remaining bitfield bits.
+ template <class T, int size>
+ using NextBitField = TaggedResultNeedsDecompressField::Next<T, size>;
+
public:
ValueLocation& result() { return result_; }
const ValueLocation& result() const { return result_; }
+ void SetHint(compiler::InstructionOperand hint);
+
+ void ClearHint() { hint_ = compiler::InstructionOperand(); }
+
+ bool has_hint() { return !hint_.IsInvalid(); }
+
+ template <typename RegisterT>
+ RegisterT GetRegisterHint() {
+ if (hint_.IsInvalid()) return RegisterT::no_reg();
+ return RegisterT::from_code(
+ compiler::UnallocatedOperand::cast(hint_).fixed_register_index());
+ }
+
const compiler::InstructionOperand& hint() const {
- DCHECK_EQ(state_, kSpillOrHint);
- DCHECK(spill_or_hint_.IsInvalid() || spill_or_hint_.IsUnallocated());
- return spill_or_hint_;
+ DCHECK(hint_.IsInvalid() || hint_.IsUnallocated());
+ return hint_;
}
bool is_loadable() const {
- DCHECK_EQ(state_, kSpillOrHint);
- return spill_or_hint_.IsConstant() || spill_or_hint_.IsAnyStackSlot();
+ DCHECK_EQ(state_, kSpill);
+ return spill_.IsConstant() || spill_.IsAnyStackSlot();
}
- bool is_spilled() const { return spill_or_hint_.IsAnyStackSlot(); }
+ bool is_spilled() const {
+ DCHECK_EQ(state_, kSpill);
+ return spill_.IsAnyStackSlot();
+ }
- void SetNoSpillOrHint();
+ void SetNoSpill();
void SetConstantLocation();
/* For constants only. */
@@ -1290,15 +1712,15 @@ class ValueNode : public Node {
void Spill(compiler::AllocatedOperand operand) {
#ifdef DEBUG
if (state_ == kLastUse) {
- state_ = kSpillOrHint;
+ state_ = kSpill;
} else {
DCHECK(!is_loadable());
}
#endif // DEBUG
DCHECK(!IsConstantNode(opcode()));
DCHECK(operand.IsAnyStackSlot());
- spill_or_hint_ = operand;
- DCHECK(spill_or_hint_.IsAnyStackSlot());
+ spill_ = operand;
+ DCHECK(spill_.IsAnyStackSlot());
}
compiler::AllocatedOperand spill_slot() const {
@@ -1307,9 +1729,9 @@ class ValueNode : public Node {
}
compiler::InstructionOperand loadable_slot() const {
- DCHECK_EQ(state_, kSpillOrHint);
+ DCHECK_EQ(state_, kSpill);
DCHECK(is_loadable());
- return spill_or_hint_;
+ return spill_;
}
void mark_use(NodeIdT id, InputLocation* input_location) {
@@ -1349,12 +1771,38 @@ class ValueNode : public Node {
ValueRepresentation::kTagged);
}
+ constexpr bool decompresses_tagged_result() const {
+ return TaggedResultNeedsDecompressField::decode(bitfield());
+ }
+ void SetTaggedResultNeedsDecompress() {
+ DCHECK_IMPLIES(!Is<Identity>(), is_tagged());
+ DCHECK_IMPLIES(Is<Identity>(), input(0).node()->is_tagged());
+ set_bitfield(TaggedResultNeedsDecompressField::update(bitfield(), true));
+ if (Is<Phi>()) {
+ for (Input& input : *this) {
+ // Avoid endless recursion by terminating on values already marked.
+ if (input.node()->decompresses_tagged_result()) continue;
+ input.node()->SetTaggedResultNeedsDecompress();
+ }
+ } else if (Is<Identity>()) {
+ DCHECK_EQ(input_count(), 0);
+ input(0).node()->SetTaggedResultNeedsDecompress();
+ }
+ }
+
+ constexpr ValueRepresentation value_representation() const {
+ return properties().value_representation();
+ }
+
constexpr MachineRepresentation GetMachineRepresentation() const {
switch (properties().value_representation()) {
case ValueRepresentation::kTagged:
return MachineRepresentation::kTagged;
case ValueRepresentation::kInt32:
+ case ValueRepresentation::kUint32:
return MachineRepresentation::kWord32;
+ case ValueRepresentation::kWord64:
+ return MachineRepresentation::kWord64;
case ValueRepresentation::kFloat64:
return MachineRepresentation::kFloat64;
}
@@ -1420,13 +1868,14 @@ class ValueNode : public Node {
FirstRegisterCode());
}
DCHECK(is_loadable());
- return spill_or_hint_;
+ return spill_;
}
protected:
explicit ValueNode(uint64_t bitfield)
: Node(bitfield),
- last_uses_next_use_id_(&next_use_)
+ last_uses_next_use_id_(&next_use_),
+ hint_(compiler::InstructionOperand())
#ifdef DEBUG
,
state_(kLastUse)
@@ -1461,11 +1910,11 @@ class ValueNode : public Node {
// this will be a pointer to an Input's next_use_id_ field, but it's
// initialized to this node's next_use_ to track the first use.
NodeIdT* last_uses_next_use_id_;
- compiler::InstructionOperand spill_or_hint_;
+ compiler::InstructionOperand spill_;
};
+ compiler::InstructionOperand hint_;
#ifdef DEBUG
- // TODO(leszeks): Consider spilling into kSpill and kHint.
- enum { kLastUse, kSpillOrHint } state_;
+ enum {kLastUse, kSpill} state_;
#endif // DEBUG
};
@@ -1486,69 +1935,134 @@ ValueLocation& Node::result() {
return Cast<ValueNode>()->result();
}
-template <class Derived>
-class NodeT : public Node {
- static_assert(!IsValueNode(opcode_of<Derived>));
-
+// Mixin for a node with known class (and therefore known opcode and static
+// properties), but possibly unknown numbers of inputs.
+template <typename Base, typename Derived>
+class NodeTMixin : public Base {
public:
- constexpr Opcode opcode() const { return opcode_of<Derived>; }
- const OpProperties& properties() const { return Derived::kProperties; }
+ // Shadowing for static knowledge.
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr const OpProperties& properties() const {
+ return Derived::kProperties;
+ }
+
+ template <typename... Args>
+ static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs,
+ Args&&... args) {
+ return NodeBase::New<Derived>(zone, inputs, std::forward<Args>...);
+ }
+ template <typename... Args>
+ static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
+ return NodeBase::New<Derived>(zone, input_count, std::forward<Args>...);
+ }
protected:
- explicit NodeT(uint64_t bitfield) : Node(bitfield) {
- DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ template <typename... Args>
+ explicit NodeTMixin(uint64_t bitfield, Args&&... args)
+ : Base(bitfield, std::forward<Args>(args)...) {
+ DCHECK_EQ(NodeBase::opcode(), NodeBase::opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::properties(), Derived::kProperties);
}
};
-template <size_t InputCount, class Derived>
-class FixedInputNodeT : public NodeT<Derived> {
+namespace detail {
+// Helper class for defining input types as a std::array, but without
+// accidental initialisation with the wrong sized initializer_list.
+template <size_t Size>
+class ArrayWrapper : public std::array<ValueRepresentation, Size> {
+ public:
+ template <typename... Args>
+ explicit constexpr ArrayWrapper(Args&&... args)
+ : std::array<ValueRepresentation, Size>({args...}) {
+ static_assert(sizeof...(args) == Size);
+ }
+};
+struct YouNeedToDefineAnInputTypesArrayInYourDerivedClass {};
+} // namespace detail
+
+// Mixin for a node with known class (and therefore known opcode and static
+// properties), and known numbers of inputs.
+template <size_t InputCount, typename Base, typename Derived>
+class FixedInputNodeTMixin : public NodeTMixin<Base, Derived> {
static constexpr size_t kInputCount = InputCount;
public:
// Shadowing for static knowledge.
constexpr bool has_inputs() const { return input_count() > 0; }
constexpr uint16_t input_count() const { return kInputCount; }
- auto end() {
+ constexpr auto end() {
return std::make_reverse_iterator(&this->input(input_count() - 1));
}
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
+ if constexpr (kInputCount != 0) {
+ static_assert(
+ std::is_same_v<const InputTypes, decltype(Derived::kInputTypes)>);
+ static_assert(kInputCount == Derived::kInputTypes.size());
+ for (int i = 0; i < static_cast<int>(kInputCount); ++i) {
+ CheckValueInputIs(this, i, Derived::kInputTypes[i], graph_labeller);
+ }
+ }
+ }
+
+ void MarkTaggedInputsAsDecompressing() const {
+ if constexpr (kInputCount != 0) {
+ static_assert(
+ std::is_same_v<const InputTypes, decltype(Derived::kInputTypes)>);
+ static_assert(kInputCount == Derived::kInputTypes.size());
+ for (int i = 0; i < static_cast<int>(kInputCount); ++i) {
+ if (Derived::kInputTypes[i] == ValueRepresentation::kTagged) {
+ ValueNode* input_node = this->input(i).node();
+ input_node->SetTaggedResultNeedsDecompress();
+ }
+ }
+ }
+ }
+
protected:
- explicit FixedInputNodeT(uint64_t bitfield) : NodeT<Derived>(bitfield) {
+ using InputTypes = detail::ArrayWrapper<kInputCount>;
+ detail::YouNeedToDefineAnInputTypesArrayInYourDerivedClass kInputTypes;
+
+ template <typename... Args>
+ explicit FixedInputNodeTMixin(uint64_t bitfield, Args&&... args)
+ : NodeTMixin<Base, Derived>(bitfield, std::forward<Args>(args)...) {
DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
template <class Derived>
-class ValueNodeT : public ValueNode {
- static_assert(IsValueNode(opcode_of<Derived>));
+using NodeT = NodeTMixin<Node, Derived>;
- public:
- constexpr Opcode opcode() const { return opcode_of<Derived>; }
- const OpProperties& properties() const { return Derived::kProperties; }
+template <class Derived>
+using ValueNodeT = NodeTMixin<ValueNode, Derived>;
- protected:
- explicit ValueNodeT(uint64_t bitfield) : ValueNode(bitfield) {
- DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
- }
-};
+template <size_t InputCount, class Derived>
+using FixedInputNodeT =
+ FixedInputNodeTMixin<InputCount, NodeT<Derived>, Derived>;
template <size_t InputCount, class Derived>
-class FixedInputValueNodeT : public ValueNodeT<Derived> {
- static constexpr size_t kInputCount = InputCount;
+using FixedInputValueNodeT =
+ FixedInputNodeTMixin<InputCount, ValueNodeT<Derived>, Derived>;
+
+class Identity : public FixedInputValueNodeT<1, Identity> {
+ using Base = FixedInputValueNodeT<1, Identity>;
public:
- // Shadowing for static knowledge.
- constexpr bool has_inputs() const { return input_count() > 0; }
- constexpr uint16_t input_count() const { return kInputCount; }
- auto end() {
- return std::make_reverse_iterator(&this->input(input_count() - 1));
- }
+ static constexpr OpProperties kProperties = OpProperties::Pure();
- protected:
- explicit FixedInputValueNodeT(uint64_t bitfield)
- : ValueNodeT<Derived>(bitfield) {
- DCHECK_EQ(NodeBase::input_count(), kInputCount);
+ explicit Identity(uint64_t bitfield) : Base(bitfield) {}
+
+ void VerifyInputs(MaglevGraphLabeller*) const {
+ // Identity is valid for all input types.
+ }
+ void MarkTaggedInputsAsDecompressing() {
+ // Do not mark inputs as decompressing here, since we don't yet know whether
+ // this Phi needs decompression. Instead, let
+ // Node::SetTaggedResultNeedsDecompress pass through phis.
}
+ void SetValueLocationConstraints() {}
+ void GenerateCode(MaglevAssembler*, const ProcessingState&) {}
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
template <class Derived, Operation kOperation>
@@ -1558,6 +2072,8 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
public:
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kOperandIndex = 0;
Input& operand_input() { return Node::input(kOperandIndex); }
@@ -1568,7 +2084,7 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
const compiler::FeedbackSource& feedback)
: Base(bitfield), feedback_(feedback) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -1582,6 +2098,8 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
public:
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@@ -1594,36 +2112,36 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
const compiler::FeedbackSource& feedback)
: Base(bitfield), feedback_(feedback) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
const compiler::FeedbackSource feedback_;
};
-#define DEF_OPERATION_NODE(Name, Super, OpName) \
+#define DEF_OPERATION_WITH_FEEDBACK_NODE(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
Name(uint64_t bitfield, const compiler::FeedbackSource& feedback) \
: Base(bitfield, feedback) {} \
- void AllocateVreg(MaglevVregAllocationState*); \
+ int MaxCallStackArgs() const { return 0; } \
+ void SetValueLocationConstraints(); \
void GenerateCode(MaglevAssembler*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
#define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \
- DEF_OPERATION_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
+ DEF_OPERATION_WITH_FEEDBACK_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
#define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \
- DEF_OPERATION_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
+ DEF_OPERATION_WITH_FEEDBACK_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE)
ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
#undef DEF_UNARY_WITH_FEEDBACK_NODE
#undef DEF_BINARY_WITH_FEEDBACK_NODE
-
-#undef DEF_OPERATION_NODE
+#undef DEF_OPERATION_WITH_FEEDBACK_NODE
template <class Derived, Operation kOperation>
class Int32BinaryWithOverflowNode : public FixedInputValueNodeT<2, Derived> {
@@ -1632,6 +2150,8 @@ class Int32BinaryWithOverflowNode : public FixedInputValueNodeT<2, Derived> {
public:
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Int32();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@@ -1650,7 +2170,7 @@ class Int32BinaryWithOverflowNode : public FixedInputValueNodeT<2, Derived> {
\
public: \
explicit Name(uint64_t bitfield) : Base(bitfield) {} \
- void AllocateVreg(MaglevVregAllocationState*); \
+ void SetValueLocationConstraints(); \
void GenerateCode(MaglevAssembler*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
@@ -1663,7 +2183,6 @@ DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Subtract)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Multiply)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Divide)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Modulus)
-// DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Exponentiate)
#undef DEF_INT32_BINARY_WITH_OVERFLOW_NODE
template <class Derived, Operation kOperation>
@@ -1671,8 +2190,9 @@ class Int32BinaryNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
- static constexpr OpProperties kProperties =
- OpProperties::EagerDeopt() | OpProperties::Int32();
+ static constexpr OpProperties kProperties = OpProperties::Int32();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@@ -1681,21 +2201,8 @@ class Int32BinaryNode : public FixedInputValueNodeT<2, Derived> {
protected:
explicit Int32BinaryNode(uint64_t bitfield) : Base(bitfield) {}
-
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-#define DEF_OPERATION_NODE(Name, Super, OpName) \
- class Name : public Super<Name, Operation::k##OpName> { \
- using Base = Super<Name, Operation::k##OpName>; \
- \
- public: \
- explicit Name(uint64_t bitfield) : Base(bitfield) {} \
- void AllocateVreg(MaglevVregAllocationState*); \
- void GenerateCode(MaglevAssembler*, const ProcessingState&); \
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
- };
-
#define DEF_INT32_BINARY_NODE(Name) \
DEF_OPERATION_NODE(Int32##Name, Int32BinaryNode, Name)
DEF_INT32_BINARY_NODE(BitwiseAnd)
@@ -1703,17 +2210,81 @@ DEF_INT32_BINARY_NODE(BitwiseOr)
DEF_INT32_BINARY_NODE(BitwiseXor)
DEF_INT32_BINARY_NODE(ShiftLeft)
DEF_INT32_BINARY_NODE(ShiftRight)
-DEF_INT32_BINARY_NODE(ShiftRightLogical)
#undef DEF_INT32_BINARY_NODE
-// DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Negate)
-// DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Increment)
-// DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Decrement)
+
+class Int32BitwiseNot : public FixedInputValueNodeT<1, Int32BitwiseNot> {
+ using Base = FixedInputValueNodeT<1, Int32BitwiseNot>;
+
+ public:
+ explicit Int32BitwiseNot(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
+
+ static constexpr int kValueIndex = 0;
+ Input& value_input() { return Node::input(kValueIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+template <class Derived, Operation kOperation>
+class Int32UnaryWithOverflowNode : public FixedInputValueNodeT<1, Derived> {
+ using Base = FixedInputValueNodeT<1, Derived>;
+
+ public:
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
+
+ static constexpr int kValueIndex = 0;
+ Input& value_input() { return Node::input(kValueIndex); }
+
+ protected:
+ explicit Int32UnaryWithOverflowNode(uint64_t bitfield) : Base(bitfield) {}
+};
+
+#define DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Name) \
+ DEF_OPERATION_NODE(Int32##Name##WithOverflow, Int32UnaryWithOverflowNode, \
+ Name)
+DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Negate)
+DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Increment)
+DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Decrement)
+#undef DEF_INT32_UNARY_WITH_OVERFLOW_NODE
+
+class Int32ShiftRightLogical
+ : public FixedInputValueNodeT<2, Int32ShiftRightLogical> {
+ using Base = FixedInputValueNodeT<2, Int32ShiftRightLogical>;
+
+ public:
+ explicit Int32ShiftRightLogical(uint64_t bitfield) : Base(bitfield) {}
+
+ // Unlike the other Int32 nodes, logical right shift returns a Uint32.
+ static constexpr OpProperties kProperties = OpProperties::Uint32();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
+
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return Node::input(kLeftIndex); }
+ Input& right_input() { return Node::input(kRightIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
template <class Derived, Operation kOperation>
class Int32CompareNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
+
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
@@ -1722,22 +2293,11 @@ class Int32CompareNode : public FixedInputValueNodeT<2, Derived> {
protected:
explicit Int32CompareNode(uint64_t bitfield) : Base(bitfield) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-#define DEF_OPERATION_NODE(Name, Super, OpName) \
- class Name : public Super<Name, Operation::k##OpName> { \
- using Base = Super<Name, Operation::k##OpName>; \
- \
- public: \
- explicit Name(uint64_t bitfield) : Base(bitfield) {} \
- void AllocateVreg(MaglevVregAllocationState*); \
- void GenerateCode(MaglevAssembler*, const ProcessingState&); \
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
- };
-
#define DEF_INT32_COMPARE_NODE(Name) \
DEF_OPERATION_NODE(Int32##Name, Int32CompareNode, Name)
DEF_INT32_COMPARE_NODE(Equal)
@@ -1748,14 +2308,14 @@ DEF_INT32_COMPARE_NODE(GreaterThan)
DEF_INT32_COMPARE_NODE(GreaterThanOrEqual)
#undef DEF_INT32_COMPARE_NODE
-#undef DEF_OPERATION_NODE
-
template <class Derived, Operation kOperation>
class Float64BinaryNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
static constexpr OpProperties kProperties = OpProperties::Float64();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kFloat64, ValueRepresentation::kFloat64};
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@@ -1768,38 +2328,66 @@ class Float64BinaryNode : public FixedInputValueNodeT<2, Derived> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-#define DEF_OPERATION_NODE(Name, Super, OpName) \
+#define DEF_OPERATION_NODE_WITH_CALL(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
explicit Name(uint64_t bitfield) : Base(bitfield) {} \
- void AllocateVreg(MaglevVregAllocationState*); \
+ int MaxCallStackArgs() const; \
+ void SetValueLocationConstraints(); \
void GenerateCode(MaglevAssembler*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
+template <class Derived, Operation kOperation>
+class Float64BinaryNodeWithCall : public FixedInputValueNodeT<2, Derived> {
+ using Base = FixedInputValueNodeT<2, Derived>;
+
+ public:
+ static constexpr OpProperties kProperties =
+ OpProperties::Float64() | OpProperties::Call();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kFloat64, ValueRepresentation::kFloat64};
+
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return Node::input(kLeftIndex); }
+ Input& right_input() { return Node::input(kRightIndex); }
+
+ protected:
+ explicit Float64BinaryNodeWithCall(uint64_t bitfield) : Base(bitfield) {}
+
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
#define DEF_FLOAT64_BINARY_NODE(Name) \
DEF_OPERATION_NODE(Float64##Name, Float64BinaryNode, Name)
+#define DEF_FLOAT64_BINARY_NODE_WITH_CALL(Name) \
+ DEF_OPERATION_NODE_WITH_CALL(Float64##Name, Float64BinaryNodeWithCall, Name)
DEF_FLOAT64_BINARY_NODE(Add)
DEF_FLOAT64_BINARY_NODE(Subtract)
DEF_FLOAT64_BINARY_NODE(Multiply)
DEF_FLOAT64_BINARY_NODE(Divide)
-// DEF_FLOAT64_BINARY_NODE(Modulus)
-// DEF_FLOAT64_BINARY_NODE(Exponentiate)
-// DEF_FLOAT64_BINARY_NODE(Equal)
-// DEF_FLOAT64_BINARY_NODE(StrictEqual)
-// DEF_FLOAT64_BINARY_NODE(LessThan)
-// DEF_FLOAT64_BINARY_NODE(LessThanOrEqual)
-// DEF_FLOAT64_BINARY_NODE(GreaterThan)
-// DEF_FLOAT64_BINARY_NODE(GreaterThanOrEqual)
+#ifdef V8_TARGET_ARCH_ARM64
+// On Arm64, floating point modulus is implemented with a call to a C++
+// function, while on x64, it's implemented natively without call.
+DEF_FLOAT64_BINARY_NODE_WITH_CALL(Modulus)
+#else
+DEF_FLOAT64_BINARY_NODE(Modulus)
+#endif
+DEF_FLOAT64_BINARY_NODE_WITH_CALL(Exponentiate)
#undef DEF_FLOAT64_BINARY_NODE
+#undef DEF_FLOAT64_BINARY_NODE_WITH_CALL
template <class Derived, Operation kOperation>
class Float64CompareNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kFloat64, ValueRepresentation::kFloat64};
+
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
@@ -1808,22 +2396,11 @@ class Float64CompareNode : public FixedInputValueNodeT<2, Derived> {
protected:
explicit Float64CompareNode(uint64_t bitfield) : Base(bitfield) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-#define DEF_OPERATION_NODE(Name, Super, OpName) \
- class Name : public Super<Name, Operation::k##OpName> { \
- using Base = Super<Name, Operation::k##OpName>; \
- \
- public: \
- explicit Name(uint64_t bitfield) : Base(bitfield) {} \
- void AllocateVreg(MaglevVregAllocationState*); \
- void GenerateCode(MaglevAssembler*, const ProcessingState&); \
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
- };
-
#define DEF_FLOAT64_COMPARE_NODE(Name) \
DEF_OPERATION_NODE(Float64##Name, Float64CompareNode, Name)
DEF_FLOAT64_COMPARE_NODE(Equal)
@@ -1835,19 +2412,134 @@ DEF_FLOAT64_COMPARE_NODE(GreaterThanOrEqual)
#undef DEF_FLOAT64_COMPARE_NODE
#undef DEF_OPERATION_NODE
+#undef DEF_OPERATION_NODE_WITH_CALL
-class CheckedSmiTag : public FixedInputValueNodeT<1, CheckedSmiTag> {
- using Base = FixedInputValueNodeT<1, CheckedSmiTag>;
+class Float64Negate : public FixedInputValueNodeT<1, Float64Negate> {
+ using Base = FixedInputValueNodeT<1, Float64Negate>;
public:
- explicit CheckedSmiTag(uint64_t bitfield) : Base(bitfield) {}
+ explicit Float64Negate(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Float64();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Float64Ieee754Unary
+ : public FixedInputValueNodeT<1, Float64Ieee754Unary> {
+ using Base = FixedInputValueNodeT<1, Float64Ieee754Unary>;
+
+ public:
+ explicit Float64Ieee754Unary(uint64_t bitfield,
+ ExternalReference ieee_function)
+ : Base(bitfield), ieee_function_(ieee_function) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Float64() | OpProperties::Call();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ Input& input() { return Node::input(0); }
+
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ ExternalReference ieee_function_;
+};
+
+class Float64SilenceNaN : public FixedInputValueNodeT<1, Float64SilenceNaN> {
+ using Base = FixedInputValueNodeT<1, Float64SilenceNaN>;
+
+ public:
+ explicit Float64SilenceNaN(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Float64();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckInt32IsSmi : public FixedInputNodeT<1, CheckInt32IsSmi> {
+ using Base = FixedInputNodeT<1, CheckInt32IsSmi>;
+
+ public:
+ explicit CheckInt32IsSmi(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckUint32IsSmi : public FixedInputNodeT<1, CheckUint32IsSmi> {
+ using Base = FixedInputNodeT<1, CheckUint32IsSmi>;
+
+ public:
+ explicit CheckUint32IsSmi(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedSmiTagInt32 : public FixedInputValueNodeT<1, CheckedSmiTagInt32> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiTagInt32>;
+
+ public:
+ explicit CheckedSmiTagInt32(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedSmiTagUint32
+ : public FixedInputValueNodeT<1, CheckedSmiTagUint32> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiTagUint32>;
+
+ public:
+ explicit CheckedSmiTagUint32(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -1863,7 +2555,11 @@ class UnsafeSmiTag : public FixedInputValueNodeT<1, UnsafeSmiTag> {
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller*) const;
+ void MarkTaggedInputsAsDecompressing() {
+ // No tagged inputs.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -1877,10 +2573,15 @@ class CheckedSmiUntag : public FixedInputValueNodeT<1, CheckedSmiUntag> {
static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
OpProperties::Int32() |
OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to untag.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -1893,10 +2594,15 @@ class UnsafeSmiUntag : public FixedInputValueNodeT<1, UnsafeSmiUntag> {
static constexpr OpProperties kProperties =
OpProperties::Int32() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to untag.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -1916,7 +2622,7 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
bool ToBoolean(LocalIsolate* local_isolate) const { return value_ != 0; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -1933,18 +2639,18 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
public:
using OutputRegister = DoubleRegister;
- explicit Float64Constant(uint64_t bitfield, double value)
+ explicit Float64Constant(uint64_t bitfield, Float64 value)
: Base(bitfield), value_(value) {}
static constexpr OpProperties kProperties = OpProperties::Float64();
- double value() const { return value_; }
+ Float64 value() const { return value_; }
bool ToBoolean(LocalIsolate* local_isolate) const {
- return value_ != 0.0 && !std::isnan(value_);
+ return value_.get_scalar() != 0.0 && !value_.is_nan();
}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -1952,21 +2658,223 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
Handle<Object> DoReify(LocalIsolate* isolate);
private:
- const double value_;
+ const Float64 value_;
};
-class Float64Box : public FixedInputValueNodeT<1, Float64Box> {
- using Base = FixedInputValueNodeT<1, Float64Box>;
+class Int32ToUint8Clamped
+ : public FixedInputValueNodeT<1, Int32ToUint8Clamped> {
+ using Base = FixedInputValueNodeT<1, Int32ToUint8Clamped>;
public:
- explicit Float64Box(uint64_t bitfield) : Base(bitfield) {}
+ explicit Int32ToUint8Clamped(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Uint32ToUint8Clamped
+ : public FixedInputValueNodeT<1, Uint32ToUint8Clamped> {
+ using Base = FixedInputValueNodeT<1, Uint32ToUint8Clamped>;
+
+ public:
+ explicit Uint32ToUint8Clamped(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Float64ToUint8Clamped
+ : public FixedInputValueNodeT<1, Float64ToUint8Clamped> {
+ using Base = FixedInputValueNodeT<1, Float64ToUint8Clamped>;
+
+ public:
+ explicit Float64ToUint8Clamped(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedNumberToUint8Clamped
+ : public FixedInputValueNodeT<1, CheckedNumberToUint8Clamped> {
+ using Base = FixedInputValueNodeT<1, CheckedNumberToUint8Clamped>;
+
+ public:
+ explicit CheckedNumberToUint8Clamped(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Int32ToNumber : public FixedInputValueNodeT<1, Int32ToNumber> {
+ using Base = FixedInputValueNodeT<1, Int32ToNumber>;
+
+ public:
+ explicit Int32ToNumber(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::DeferredCall() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Uint32ToNumber : public FixedInputValueNodeT<1, Uint32ToNumber> {
+ using Base = FixedInputValueNodeT<1, Uint32ToNumber>;
+
+ public:
+ explicit Uint32ToNumber(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::DeferredCall() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32};
+
+ Input& input() { return Node::input(0); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Float64ToTagged : public FixedInputValueNodeT<1, Float64ToTagged> {
+ using Base = FixedInputValueNodeT<1, Float64ToTagged>;
+
+ public:
+ enum class ConversionMode { kCanonicalizeSmi, kForceHeapNumber };
+ explicit Float64ToTagged(
+ uint64_t bitfield, ConversionMode mode = ConversionMode::kCanonicalizeSmi)
+ : Base(ConversionModeBitField::update(bitfield, mode)) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ static constexpr OpProperties kProperties =
+ OpProperties::DeferredCall() | OpProperties::ConversionNode();
+
+ Input& input() { return Node::input(0); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ bool canonicalize_smi() {
+ return ConversionModeBitField::decode(bitfield()) ==
+ ConversionMode::kCanonicalizeSmi;
+ }
+ using ConversionModeBitField = NextBitField<ConversionMode, 1>;
+};
+
+class HoleyFloat64Box : public FixedInputValueNodeT<1, HoleyFloat64Box> {
+ using Base = FixedInputValueNodeT<1, HoleyFloat64Box>;
+
+ public:
+ explicit HoleyFloat64Box(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ Input& input() { return Node::input(0); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedSmiTagFloat64
+ : public FixedInputValueNodeT<1, CheckedSmiTagFloat64> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiTagFloat64>;
+
+ public:
+ explicit CheckedSmiTagFloat64(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::ConversionNode();
+
+ Input& input() { return Node::input(0); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedInt32ToUint32
+ : public FixedInputValueNodeT<1, CheckedInt32ToUint32> {
+ using Base = FixedInputValueNodeT<1, CheckedInt32ToUint32>;
+
+ public:
+ explicit CheckedInt32ToUint32(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Uint32() |
+ OpProperties::ConversionNode() |
+ OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedUint32ToInt32
+ : public FixedInputValueNodeT<1, CheckedUint32ToInt32> {
+ using Base = FixedInputValueNodeT<1, CheckedUint32ToInt32>;
+
+ public:
+ explicit CheckedUint32ToInt32(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Int32() |
+ OpProperties::ConversionNode() |
+ OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -1980,10 +2888,31 @@ class ChangeInt32ToFloat64
static constexpr OpProperties kProperties =
OpProperties::Float64() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class ChangeUint32ToFloat64
+ : public FixedInputValueNodeT<1, ChangeUint32ToFloat64> {
+ using Base = FixedInputValueNodeT<1, ChangeUint32ToFloat64>;
+
+ public:
+ explicit ChangeUint32ToFloat64(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Float64() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -1998,41 +2927,228 @@ class CheckedTruncateFloat64ToInt32
static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
OpProperties::Int32() |
OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-class CheckedFloat64Unbox
- : public FixedInputValueNodeT<1, CheckedFloat64Unbox> {
- using Base = FixedInputValueNodeT<1, CheckedFloat64Unbox>;
+class Float64Round : public FixedInputValueNodeT<1, Float64Round> {
+ using Base = FixedInputValueNodeT<1, Float64Round>;
public:
- explicit CheckedFloat64Unbox(uint64_t bitfield) : Base(bitfield) {}
+ enum class Kind { kFloor, kCeil, kNearest };
+
+ static Builtin continuation(Kind kind) {
+ switch (kind) {
+ case Kind::kCeil:
+ return Builtin::kMathCeilContinuation;
+ case Kind::kFloor:
+ return Builtin::kMathFloorContinuation;
+ case Kind::kNearest:
+ return Builtin::kMathRoundContinuation;
+ }
+ }
+
+ explicit Float64Round(uint64_t bitfield, Kind kind)
+ : Base(bitfield), kind_(kind) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Float64();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ Kind kind_;
+};
+
+class CheckedTruncateFloat64ToUint32
+ : public FixedInputValueNodeT<1, CheckedTruncateFloat64ToUint32> {
+ using Base = FixedInputValueNodeT<1, CheckedTruncateFloat64ToUint32>;
+
+ public:
+ explicit CheckedTruncateFloat64ToUint32(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kFloat64};
static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
- OpProperties::Float64() |
+ OpProperties::Uint32() |
OpProperties::ConversionNode();
Input& input() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
+#define DEFINE_TRUNCATE_NODE(name, from_repr, properties) \
+ class name : public FixedInputValueNodeT<1, name> { \
+ using Base = FixedInputValueNodeT<1, name>; \
+ \
+ public: \
+ explicit name(uint64_t bitfield) : Base(bitfield) {} \
+ \
+ static constexpr OpProperties kProperties = properties; \
+ static constexpr typename Base::InputTypes kInputTypes{ \
+ ValueRepresentation::k##from_repr}; \
+ \
+ Input& input() { return Node::input(0); } \
+ \
+ void SetValueLocationConstraints(); \
+ void GenerateCode(MaglevAssembler*, const ProcessingState&); \
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
+ };
+
+DEFINE_TRUNCATE_NODE(TruncateUint32ToInt32, Uint32, OpProperties::Int32())
+DEFINE_TRUNCATE_NODE(TruncateFloat64ToInt32, Float64, OpProperties::Int32())
+DEFINE_TRUNCATE_NODE(UnsafeTruncateUint32ToInt32, Uint32, OpProperties::Int32())
+DEFINE_TRUNCATE_NODE(UnsafeTruncateFloat64ToInt32, Float64,
+ OpProperties::Int32())
+
+#undef DEFINE_TRUNCATE_NODE
+
+class CheckedNumberOrOddballToFloat64
+ : public FixedInputValueNodeT<1, CheckedNumberOrOddballToFloat64> {
+ using Base = FixedInputValueNodeT<1, CheckedNumberOrOddballToFloat64>;
+
+ public:
+ explicit CheckedNumberOrOddballToFloat64(
+ uint64_t bitfield, TaggedToFloat64ConversionType conversion_type)
+ : Base(TaggedToFloat64ConversionTypeOffset::update(bitfield,
+ conversion_type)) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
+ OpProperties::Float64() |
+ OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ TaggedToFloat64ConversionType conversion_type() const {
+ return TaggedToFloat64ConversionTypeOffset::decode(bitfield());
+ }
+
+ private:
+ using TaggedToFloat64ConversionTypeOffset =
+ NextBitField<TaggedToFloat64ConversionType, 1>;
+};
+
+class UncheckedNumberOrOddballToFloat64
+ : public FixedInputValueNodeT<1, UncheckedNumberOrOddballToFloat64> {
+ using Base = FixedInputValueNodeT<1, UncheckedNumberOrOddballToFloat64>;
+
+ public:
+ explicit UncheckedNumberOrOddballToFloat64(
+ uint64_t bitfield, TaggedToFloat64ConversionType conversion_type)
+ : Base(TaggedToFloat64ConversionTypeOffset::update(bitfield,
+ conversion_type)) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Float64() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ TaggedToFloat64ConversionType conversion_type() const {
+ return TaggedToFloat64ConversionTypeOffset::decode(bitfield());
+ }
+
+ private:
+ using TaggedToFloat64ConversionTypeOffset =
+ NextBitField<TaggedToFloat64ConversionType, 1>;
+};
+
+class TruncateNumberOrOddballToInt32
+ : public FixedInputValueNodeT<1, TruncateNumberOrOddballToInt32> {
+ using Base = FixedInputValueNodeT<1, TruncateNumberOrOddballToInt32>;
+
+ public:
+ explicit TruncateNumberOrOddballToInt32(
+ uint64_t bitfield, TaggedToFloat64ConversionType conversion_type)
+ : Base(TaggedToFloat64ConversionTypeOffset::update(bitfield,
+ conversion_type)) {}
+
+ static constexpr OpProperties kProperties = OpProperties ::Int32();
+ static constexpr
+ typename Base ::InputTypes kInputTypes{ValueRepresentation ::kTagged};
+
+ Input& input() { return Node ::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std ::ostream&, MaglevGraphLabeller*) const;
+
+ TaggedToFloat64ConversionType conversion_type() const {
+ return TaggedToFloat64ConversionTypeOffset::decode(bitfield());
+ }
+
+ private:
+ using TaggedToFloat64ConversionTypeOffset =
+ NextBitField<TaggedToFloat64ConversionType, 1>;
+};
+
+class CheckedTruncateNumberOrOddballToInt32
+ : public FixedInputValueNodeT<1, CheckedTruncateNumberOrOddballToInt32> {
+ using Base = FixedInputValueNodeT<1, CheckedTruncateNumberOrOddballToInt32>;
+
+ public:
+ explicit CheckedTruncateNumberOrOddballToInt32(
+ uint64_t bitfield, TaggedToFloat64ConversionType conversion_type)
+ : Base(TaggedToFloat64ConversionTypeOffset::update(bitfield,
+ conversion_type)) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ Input& input() { return Node::input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ TaggedToFloat64ConversionType conversion_type() const {
+ return TaggedToFloat64ConversionTypeOffset::decode(bitfield());
+ }
+
+ private:
+ using TaggedToFloat64ConversionTypeOffset =
+ NextBitField<TaggedToFloat64ConversionType, 1>;
+};
+
class LogicalNot : public FixedInputValueNodeT<1, LogicalNot> {
using Base = FixedInputValueNodeT<1, LogicalNot>;
public:
explicit LogicalNot(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2043,9 +3159,14 @@ class SetPendingMessage : public FixedInputValueNodeT<1, SetPendingMessage> {
public:
explicit SetPendingMessage(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr OpProperties kProperties =
+ OpProperties::Writing() | OpProperties::Reading();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2056,9 +3177,12 @@ class ToBoolean : public FixedInputValueNodeT<1, ToBoolean> {
public:
explicit ToBoolean(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2070,9 +3194,12 @@ class ToBooleanLogicalNot
public:
explicit ToBooleanLogicalNot(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2083,10 +3210,16 @@ class TaggedEqual : public FixedInputValueNodeT<2, TaggedEqual> {
public:
explicit TaggedEqual(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
Input& lhs() { return Node::input(0); }
Input& rhs() { return Node::input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to compare reference equality.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2097,10 +3230,16 @@ class TaggedNotEqual : public FixedInputValueNodeT<2, TaggedNotEqual> {
public:
explicit TaggedNotEqual(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
Input& lhs() { return Node::input(0); }
Input& rhs() { return Node::input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to compare reference equality.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2114,13 +3253,17 @@ class TestInstanceOf : public FixedInputValueNodeT<3, TestInstanceOf> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
Input& context() { return input(0); }
Input& object() { return input(1); }
Input& callable() { return input(2); }
compiler::FeedbackSource feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2134,9 +3277,12 @@ class TestUndetectable : public FixedInputValueNodeT<1, TestUndetectable> {
public:
explicit TestUndetectable(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2149,11 +3295,14 @@ class TestTypeOf : public FixedInputValueNodeT<1, TestTypeOf> {
interpreter::TestTypeOfFlags::LiteralFlag literal)
: Base(bitfield), literal_(literal) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
interpreter::TestTypeOfFlags::LiteralFlag literal_;
@@ -2167,11 +3316,14 @@ class ToName : public FixedInputValueNodeT<2, ToName> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
Input& context() { return Node::input(0); }
Input& value_input() { return Node::input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2185,12 +3337,15 @@ class ToNumberOrNumeric : public FixedInputValueNodeT<2, ToNumberOrNumeric> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
Input& context() { return Node::input(0); }
Input& value_input() { return Node::input(1); }
Object::Conversion mode() const { return mode_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2207,6 +3362,9 @@ class DeleteProperty : public FixedInputValueNodeT<3, DeleteProperty> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
Input& context() { return Node::input(0); }
Input& object() { return Node::input(1); }
@@ -2214,7 +3372,8 @@ class DeleteProperty : public FixedInputValueNodeT<3, DeleteProperty> {
LanguageMode mode() const { return mode_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2258,7 +3417,12 @@ class GeneratorStore : public NodeT<GeneratorStore> {
set_input(i + kFixedInputCount, node);
}
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to store.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2282,9 +3446,10 @@ class JumpLoopPrologue : public FixedInputNodeT<0, JumpLoopPrologue> {
unit_(unit) {}
static constexpr OpProperties kProperties =
- OpProperties::NeedsRegisterSnapshot() | OpProperties::EagerDeopt();
+ OpProperties::DeferredCall() | OpProperties::EagerDeopt();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2305,6 +3470,8 @@ class ForInPrepare : public FixedInputValueNodeT<2, ForInPrepare> {
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -2313,7 +3480,8 @@ class ForInPrepare : public FixedInputValueNodeT<2, ForInPrepare> {
int ReturnCount() const { return 2; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2329,6 +3497,10 @@ class ForInNext : public FixedInputValueNodeT<5, ForInNext> {
: Base(bitfield), feedback_(feedback) {}
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -2338,7 +3510,8 @@ class ForInNext : public FixedInputValueNodeT<5, ForInNext> {
Input& cache_type() { return Node::input(3); }
Input& cache_index() { return Node::input(4); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2358,6 +3531,8 @@ class GetIterator : public FixedInputValueNodeT<2, GetIterator> {
feedback_(feedback.object()) {}
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
Input& context() { return input(0); }
Input& receiver() { return input(1); }
@@ -2366,7 +3541,8 @@ class GetIterator : public FixedInputValueNodeT<2, GetIterator> {
int call_slot() const { return call_slot_; }
Handle<FeedbackVector> feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2383,7 +3559,7 @@ class GetSecondReturnedValue
public:
explicit GetSecondReturnedValue(uint64_t bitfield) : Base(bitfield) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2396,11 +3572,14 @@ class ToObject : public FixedInputValueNodeT<2, ToObject> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
Input& context() { return Node::input(0); }
Input& value_input() { return Node::input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -2413,27 +3592,34 @@ class ToString : public FixedInputValueNodeT<2, ToString> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
Input& context() { return Node::input(0); }
Input& value_input() { return Node::input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class GeneratorRestoreRegister
- : public FixedInputValueNodeT<1, GeneratorRestoreRegister> {
- using Base = FixedInputValueNodeT<1, GeneratorRestoreRegister>;
+ : public FixedInputValueNodeT<2, GeneratorRestoreRegister> {
+ using Base = FixedInputValueNodeT<2, GeneratorRestoreRegister>;
public:
explicit GeneratorRestoreRegister(uint64_t bitfield, int index)
: Base(bitfield), index_(index) {}
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
Input& array_input() { return input(0); }
+ Input& stale_input() { return input(1); }
int index() const { return index_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2450,7 +3636,7 @@ class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
interpreter::Register source() const { return source_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2462,17 +3648,12 @@ class RegisterInput : public FixedInputValueNodeT<0, RegisterInput> {
using Base = FixedInputValueNodeT<0, RegisterInput>;
public:
- static constexpr RegList kAllowedRegisters = {
- kJavaScriptCallNewTargetRegister};
-
explicit RegisterInput(uint64_t bitfield, Register input)
- : Base(bitfield), input_(input) {
- DCHECK(kAllowedRegisters.has(input));
- }
+ : Base(bitfield), input_(input) {}
Register input() const { return input_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2495,7 +3676,7 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
return value_ != Smi::FromInt(0);
}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2506,6 +3687,34 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
const Smi value_;
};
+class ExternalConstant : public FixedInputValueNodeT<0, ExternalConstant> {
+ using Base = FixedInputValueNodeT<0, ExternalConstant>;
+
+ public:
+ using OutputRegister = Register;
+
+ explicit ExternalConstant(uint64_t bitfield,
+ const ExternalReference& reference)
+ : Base(bitfield), reference_(reference) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Pure() | OpProperties::ExternalReference();
+
+ ExternalReference reference() const { return reference_; }
+
+ bool ToBoolean(LocalIsolate* local_isolate) const { UNREACHABLE(); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ void DoLoadToRegister(MaglevAssembler*, OutputRegister);
+ Handle<Object> DoReify(LocalIsolate* isolate);
+
+ private:
+ const ExternalReference reference_;
+};
+
class Constant : public FixedInputValueNodeT<0, Constant> {
using Base = FixedInputValueNodeT<0, Constant>;
@@ -2519,9 +3728,11 @@ class Constant : public FixedInputValueNodeT<0, Constant> {
return object_.object()->BooleanValue(local_isolate);
}
- bool IsTheHole() const { return object_.IsTheHole(); }
+ bool IsTheHole(compiler::JSHeapBroker* broker) const {
+ return object_.IsTheHole(broker);
+ }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2549,7 +3760,7 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
RootIndex index() const { return index_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2575,7 +3786,8 @@ class CreateEmptyArrayLiteral
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2604,7 +3816,8 @@ class CreateArrayLiteral : public FixedInputValueNodeT<0, CreateArrayLiteral> {
static constexpr OpProperties kProperties =
OpProperties::Call() | OpProperties::Throw() | OpProperties::LazyDeopt();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2635,7 +3848,8 @@ class CreateShallowArrayLiteral
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2669,7 +3883,8 @@ class CreateObjectLiteral
static constexpr OpProperties kProperties =
OpProperties::Call() | OpProperties::Throw() | OpProperties::LazyDeopt();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2691,7 +3906,8 @@ class CreateEmptyObjectLiteral
compiler::MapRef map() { return map_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2725,7 +3941,8 @@ class CreateShallowObjectLiteral
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2735,6 +3952,59 @@ class CreateShallowObjectLiteral
const int flags_;
};
+class AllocateRaw : public FixedInputValueNodeT<0, AllocateRaw> {
+ using Base = FixedInputValueNodeT<0, AllocateRaw>;
+
+ public:
+ explicit AllocateRaw(uint64_t bitfield, AllocationType allocation_type,
+ int size)
+ : Base(bitfield), allocation_type_(allocation_type), size_(size) {}
+
+ static constexpr OpProperties kProperties = OpProperties::DeferredCall();
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ AllocationType allocation_type() const { return allocation_type_; }
+ int size() const { return size_; }
+
+ // Allow increasing the size for allocation folding.
+ void extend(int size) {
+ DCHECK_GT(size, 0);
+ size_ += size;
+ }
+
+ private:
+ AllocationType allocation_type_;
+ int size_;
+};
+
+class FoldedAllocation : public FixedInputValueNodeT<1, FoldedAllocation> {
+ using Base = FixedInputValueNodeT<1, FoldedAllocation>;
+
+ public:
+ explicit FoldedAllocation(uint64_t bitfield, int offset)
+ : Base(bitfield), offset_(offset) {}
+
+ Input& raw_allocation() { return input(0); }
+
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+
+ int offset() const { return offset_; }
+
+ private:
+ int offset_;
+};
+
class CreateFunctionContext
: public FixedInputValueNodeT<1, CreateFunctionContext> {
using Base = FixedInputValueNodeT<1, CreateFunctionContext>;
@@ -2757,8 +4027,11 @@ class CreateFunctionContext
// The implementation currently calls runtime.
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2789,8 +4062,11 @@ class FastCreateClosure : public FixedInputValueNodeT<1, FastCreateClosure> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2817,7 +4093,8 @@ class CreateRegExpLiteral
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2850,8 +4127,11 @@ class CreateClosure : public FixedInputValueNodeT<1, CreateClosure> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2861,15 +4141,36 @@ class CreateClosure : public FixedInputValueNodeT<1, CreateClosure> {
const bool pretenured_;
};
+#define ASSERT_CONDITION(V) \
+ V(Equal) \
+ V(NotEqual) \
+ V(LessThan) \
+ V(LessThanEqual) \
+ V(GreaterThan) \
+ V(GreaterThanEqual) \
+ V(UnsignedLessThan) \
+ V(UnsignedLessThanEqual) \
+ V(UnsignedGreaterThan) \
+ V(UnsignedGreaterThanEqual)
+
enum class AssertCondition {
- kLess,
- kLessOrEqual,
- kGreater,
- kGeaterOrEqual,
- kEqual,
- kNotEqual,
+#define D(Name) k##Name,
+ ASSERT_CONDITION(D)
+#undef D
};
+inline std::ostream& operator<<(std::ostream& os, const AssertCondition cond) {
+ switch (cond) {
+#define CASE(Name) \
+ case AssertCondition::k##Name: \
+ os << #Name; \
+ break;
+ ASSERT_CONDITION(CASE)
+#undef CASE
+ }
+ return os;
+}
+
class AssertInt32 : public FixedInputNodeT<2, AssertInt32> {
using Base = FixedInputNodeT<2, AssertInt32>;
@@ -2878,10 +4179,13 @@ class AssertInt32 : public FixedInputNodeT<2, AssertInt32> {
AbortReason reason)
: Base(bitfield), condition_(condition), reason_(reason) {}
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
+
Input& left_input() { return input(0); }
Input& right_input() { return input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2901,13 +4205,15 @@ class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
: Base(bitfield), maps_(maps), check_type_(check_type) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
const ZoneHandleSet<Map>& maps() const { return maps_; }
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2924,13 +4230,18 @@ class CheckValue : public FixedInputNodeT<1, CheckValue> {
: Base(bitfield), value_(value) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
compiler::HeapObjectRef value() const { return value_; }
static constexpr int kTargetIndex = 0;
Input& target_input() { return input(kTargetIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to compare reference equality.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -2938,6 +4249,57 @@ class CheckValue : public FixedInputNodeT<1, CheckValue> {
const compiler::HeapObjectRef value_;
};
+class CheckValueEqualsString
+ : public FixedInputNodeT<1, CheckValueEqualsString> {
+ using Base = FixedInputNodeT<1, CheckValueEqualsString>;
+
+ public:
+ explicit CheckValueEqualsString(uint64_t bitfield,
+ const compiler::InternalizedStringRef& value)
+ : Base(bitfield), value_(value) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ compiler::InternalizedStringRef value() const { return value_; }
+
+ static constexpr int kTargetIndex = 0;
+ Input& target_input() { return input(kTargetIndex); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::InternalizedStringRef value_;
+};
+
+class CheckDynamicValue : public FixedInputNodeT<2, CheckDynamicValue> {
+ using Base = FixedInputNodeT<2, CheckDynamicValue>;
+
+ public:
+ explicit CheckDynamicValue(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
+ static constexpr int kFirstIndex = 0;
+ static constexpr int kSecondIndex = 1;
+ Input& first_input() { return input(kFirstIndex); }
+ Input& second_input() { return input(kSecondIndex); }
+
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to compare reference equality.
+ }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class CheckSmi : public FixedInputNodeT<1, CheckSmi> {
using Base = FixedInputNodeT<1, CheckSmi>;
@@ -2945,13 +4307,18 @@ class CheckSmi : public FixedInputNodeT<1, CheckSmi> {
explicit CheckSmi(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to check Smi bits.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class CheckNumber : public FixedInputNodeT<1, CheckNumber> {
@@ -2962,12 +4329,14 @@ class CheckNumber : public FixedInputNodeT<1, CheckNumber> {
: Base(bitfield), mode_(mode) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
Object::Conversion mode() const { return mode_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -2982,13 +4351,18 @@ class CheckHeapObject : public FixedInputNodeT<1, CheckHeapObject> {
explicit CheckHeapObject(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress to check Smi bits.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class CheckSymbol : public FixedInputNodeT<1, CheckSymbol> {
@@ -2999,16 +4373,46 @@ class CheckSymbol : public FixedInputNodeT<1, CheckSymbol> {
: Base(bitfield), check_type_(check_type) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ const CheckType check_type_;
+};
+
+class CheckInstanceType : public FixedInputNodeT<1, CheckInstanceType> {
+ using Base = FixedInputNodeT<1, CheckInstanceType>;
+
+ public:
+ explicit CheckInstanceType(uint64_t bitfield, CheckType check_type,
+ InstanceType instance_type)
+ : Base(bitfield),
+ check_type_(check_type),
+ instance_type_(instance_type) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ static constexpr int kReceiverIndex = 0;
+ Input& receiver_input() { return input(kReceiverIndex); }
+
+ InstanceType instance_type() const { return instance_type_; }
+
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const CheckType check_type_;
+ const InstanceType instance_type_;
};
class CheckString : public FixedInputNodeT<1, CheckString> {
@@ -3019,13 +4423,15 @@ class CheckString : public FixedInputNodeT<1, CheckString> {
: Base(bitfield), check_type_(check_type) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
const CheckType check_type_;
@@ -3041,15 +4447,19 @@ class CheckMapsWithMigration
CheckType check_type)
: Base(bitfield), maps_(maps), check_type_(check_type) {}
- static constexpr OpProperties kProperties =
- OpProperties::EagerDeopt() | OpProperties::DeferredCall();
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
+ OpProperties::DeferredCall() |
+ OpProperties::Writing();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
const ZoneHandleSet<Map>& maps() const { return maps_; }
static constexpr int kReceiverIndex = 0;
Input& receiver_input() { return input(kReceiverIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3058,22 +4468,93 @@ class CheckMapsWithMigration
const CheckType check_type_;
};
+class CheckFixedArrayNonEmpty
+ : public FixedInputNodeT<1, CheckFixedArrayNonEmpty> {
+ using Base = FixedInputNodeT<1, CheckFixedArrayNonEmpty>;
+
+ public:
+ explicit CheckFixedArrayNonEmpty(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ static constexpr int kReceiverIndex = 0;
+ Input& receiver_input() { return input(kReceiverIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class CheckJSArrayBounds : public FixedInputNodeT<2, CheckJSArrayBounds> {
using Base = FixedInputNodeT<2, CheckJSArrayBounds>;
public:
explicit CheckJSArrayBounds(uint64_t bitfield) : Base(bitfield) {}
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
+
+ static constexpr int kReceiverIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ Input& receiver_input() { return input(kReceiverIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckJSDataViewBounds : public FixedInputNodeT<2, CheckJSDataViewBounds> {
+ using Base = FixedInputNodeT<2, CheckJSDataViewBounds>;
+
+ public:
+ explicit CheckJSDataViewBounds(uint64_t bitfield,
+ ExternalArrayType element_type)
+ : Base(bitfield), element_type_(element_type) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
+
+ static constexpr int kReceiverIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ Input& receiver_input() { return input(kReceiverIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ ExternalArrayType element_type_;
+};
+
+class CheckJSTypedArrayBounds
+ : public FixedInputNodeT<2, CheckJSTypedArrayBounds> {
+ using Base = FixedInputNodeT<2, CheckJSTypedArrayBounds>;
+
+ public:
+ explicit CheckJSTypedArrayBounds(uint64_t bitfield,
+ ElementsKind elements_kind)
+ : Base(bitfield), elements_kind_(elements_kind) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kUint32};
static constexpr int kReceiverIndex = 0;
static constexpr int kIndexIndex = 1;
Input& receiver_input() { return input(kReceiverIndex); }
Input& index_input() { return input(kIndexIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ ElementsKind elements_kind_;
};
class CheckInt32Condition : public FixedInputNodeT<2, CheckInt32Condition> {
@@ -3085,13 +4566,15 @@ class CheckInt32Condition : public FixedInputNodeT<2, CheckInt32Condition> {
: Base(bitfield), condition_(condition), reason_(reason) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return input(kLeftIndex); }
Input& right_input() { return input(kRightIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3108,13 +4591,15 @@ class CheckJSObjectElementsBounds
explicit CheckJSObjectElementsBounds(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
static constexpr int kReceiverIndex = 0;
static constexpr int kIndexIndex = 1;
Input& receiver_input() { return input(kReceiverIndex); }
Input& index_input() { return input(kIndexIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -3125,7 +4610,23 @@ class DebugBreak : public FixedInputNodeT<0, DebugBreak> {
public:
explicit DebugBreak(uint64_t bitfield) : Base(bitfield) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class FunctionEntryStackCheck
+ : public FixedInputNodeT<0, FunctionEntryStackCheck> {
+ using Base = FixedInputNodeT<0, FunctionEntryStackCheck>;
+
+ public:
+ explicit FunctionEntryStackCheck(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::DeferredCall() | OpProperties::LazyDeopt();
+
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -3143,11 +4644,13 @@ class CheckedInternalizedString
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::TaggedValue();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
Input& object_input() { return Node::input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3165,11 +4668,14 @@ class CheckedObjectToIndex
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Int32() |
OpProperties::DeferredCall() | OpProperties::ConversionNode();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
Input& object_input() { return Node::input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -3189,6 +4695,8 @@ class GetTemplateObject : public FixedInputValueNodeT<1, GetTemplateObject> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties =
OpProperties::GenericRuntimeOrBuiltinCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& description() { return input(0); }
@@ -3197,7 +4705,8 @@ class GetTemplateObject : public FixedInputValueNodeT<1, GetTemplateObject> {
}
compiler::FeedbackSource feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3214,34 +4723,219 @@ class BuiltinStringFromCharCode
explicit BuiltinStringFromCharCode(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
Input& code_input() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-class BuiltinStringPrototypeCharCodeAt
- : public FixedInputValueNodeT<2, BuiltinStringPrototypeCharCodeAt> {
- using Base = FixedInputValueNodeT<2, BuiltinStringPrototypeCharCodeAt>;
+class BuiltinStringPrototypeCharCodeOrCodePointAt
+ : public FixedInputValueNodeT<2,
+ BuiltinStringPrototypeCharCodeOrCodePointAt> {
+ using Base =
+ FixedInputValueNodeT<2, BuiltinStringPrototypeCharCodeOrCodePointAt>;
public:
- explicit BuiltinStringPrototypeCharCodeAt(uint64_t bitfield)
- : Base(bitfield) {}
+ enum Mode {
+ kCharCodeAt,
+ kCodePointAt,
+ };
+
+ explicit BuiltinStringPrototypeCharCodeOrCodePointAt(uint64_t bitfield,
+ Mode mode)
+ : Base(bitfield), mode_(mode) {}
static constexpr OpProperties kProperties = OpProperties::Reading() |
OpProperties::DeferredCall() |
OpProperties::Int32();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
static constexpr int kStringIndex = 0;
static constexpr int kIndexIndex = 1;
Input& string_input() { return input(kStringIndex); }
Input& index_input() { return input(kIndexIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ Mode mode_;
+};
+
+class PolymorphicAccessInfo {
+ public:
+ enum Kind {
+ kNotFound,
+ kConstant,
+ kDataLoad,
+ kModuleExport,
+ kStringLength,
+ };
+
+ static PolymorphicAccessInfo NotFound(
+ const ZoneVector<compiler::MapRef>& maps) {
+ return PolymorphicAccessInfo(kNotFound, maps, Representation::Tagged());
+ }
+ static PolymorphicAccessInfo Constant(
+ const ZoneVector<compiler::MapRef>& maps, compiler::ObjectRef constant) {
+ return PolymorphicAccessInfo(kConstant, maps, Representation::Tagged(),
+ constant);
+ }
+ static PolymorphicAccessInfo DataLoad(
+ const ZoneVector<compiler::MapRef>& maps, Representation representation,
+ compiler::OptionalJSObjectRef holder, FieldIndex field_index) {
+ return PolymorphicAccessInfo(kDataLoad, maps, representation, holder,
+ field_index);
+ }
+ static PolymorphicAccessInfo ModuleExport(
+ const ZoneVector<compiler::MapRef>& maps, compiler::CellRef cell) {
+ return PolymorphicAccessInfo(kModuleExport, maps, Representation::Tagged(),
+ cell);
+ }
+ static PolymorphicAccessInfo StringLength(
+ const ZoneVector<compiler::MapRef>& maps) {
+ return PolymorphicAccessInfo(kStringLength, maps, Representation::Smi());
+ }
+
+ Kind kind() const { return kind_; }
+
+ const ZoneVector<compiler::MapRef>& maps() const { return maps_; }
+
+ Handle<Object> constant() const {
+ DCHECK_EQ(kind_, kConstant);
+ return constant_.object();
+ }
+
+ Handle<Cell> cell() const {
+ DCHECK_EQ(kind_, kModuleExport);
+ return constant_.AsCell().object();
+ }
+
+ compiler::OptionalJSObjectRef holder() const {
+ DCHECK_EQ(kind_, kDataLoad);
+ return data_load_.holder_;
+ }
+
+ FieldIndex field_index() const {
+ DCHECK_EQ(kind_, kDataLoad);
+ return data_load_.field_index_;
+ }
+
+ Representation field_representation() const { return representation_; }
+
+ private:
+ explicit PolymorphicAccessInfo(Kind kind,
+ const ZoneVector<compiler::MapRef>& maps,
+ Representation representation)
+ : kind_(kind), maps_(maps), representation_(representation) {
+ DCHECK(kind == kNotFound || kind == kStringLength);
+ }
+
+ PolymorphicAccessInfo(Kind kind, const ZoneVector<compiler::MapRef>& maps,
+ Representation representation,
+ compiler::ObjectRef constant)
+ : kind_(kind),
+ maps_(maps),
+ representation_(representation),
+ constant_(constant) {
+ DCHECK(kind == kConstant || kind == kModuleExport);
+ }
+
+ PolymorphicAccessInfo(Kind kind, const ZoneVector<compiler::MapRef>& maps,
+ Representation representation,
+ compiler::OptionalJSObjectRef holder,
+ FieldIndex field_index)
+ : kind_(kind),
+ maps_(maps),
+ representation_(representation),
+ data_load_{holder, field_index} {
+ DCHECK_EQ(kind, kDataLoad);
+ }
+
+ const Kind kind_;
+ // TODO(victorgomes): Create a PolymorphicMapChecks and avoid the maps here.
+ const ZoneVector<compiler::MapRef> maps_;
+ const Representation representation_;
+ union {
+ const compiler::ObjectRef constant_;
+ struct {
+ const compiler::OptionalJSObjectRef holder_;
+ const FieldIndex field_index_;
+ } data_load_;
+ };
+};
+
+class LoadPolymorphicTaggedField
+ : public FixedInputValueNodeT<1, LoadPolymorphicTaggedField> {
+ using Base = FixedInputValueNodeT<1, LoadPolymorphicTaggedField>;
+
+ public:
+ explicit LoadPolymorphicTaggedField(
+ uint64_t bitfield, Representation field_representation,
+ ZoneVector<PolymorphicAccessInfo>&& access_info)
+ : Base(bitfield),
+ field_representation_(field_representation),
+ access_infos_(access_info) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Reading() |
+ OpProperties::EagerDeopt() |
+ OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ static constexpr int kObjectIndex = 0;
+ Input& object_input() { return input(kObjectIndex); }
+
+ Representation field_representation() const { return field_representation_; }
+ const ZoneVector<PolymorphicAccessInfo> access_infos() const {
+ return access_infos_;
+ }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ Representation field_representation_;
+ ZoneVector<PolymorphicAccessInfo> access_infos_;
+};
+
+class LoadPolymorphicDoubleField
+ : public FixedInputValueNodeT<1, LoadPolymorphicDoubleField> {
+ using Base = FixedInputValueNodeT<1, LoadPolymorphicDoubleField>;
+
+ public:
+ explicit LoadPolymorphicDoubleField(
+ uint64_t bitfield, ZoneVector<PolymorphicAccessInfo>&& access_info)
+ : Base(bitfield), access_infos_(access_info) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Reading() |
+ OpProperties::EagerDeopt() |
+ OpProperties::Float64();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ static constexpr int kObjectIndex = 0;
+ Input& object_input() { return input(kObjectIndex); }
+ const ZoneVector<PolymorphicAccessInfo> access_infos() const {
+ return access_infos_;
+ }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ ZoneVector<PolymorphicAccessInfo> access_infos_;
};
class LoadTaggedField : public FixedInputValueNodeT<1, LoadTaggedField> {
@@ -3252,13 +4946,15 @@ class LoadTaggedField : public FixedInputValueNodeT<1, LoadTaggedField> {
: Base(bitfield), offset_(offset) {}
static constexpr OpProperties kProperties = OpProperties::Reading();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
int offset() const { return offset_; }
static constexpr int kObjectIndex = 0;
Input& object_input() { return input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3275,13 +4971,15 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> {
static constexpr OpProperties kProperties =
OpProperties::Reading() | OpProperties::Float64();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
int offset() const { return offset_; }
static constexpr int kObjectIndex = 0;
Input& object_input() { return input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3289,39 +4987,476 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> {
const int offset_;
};
-class LoadTaggedElement : public FixedInputValueNodeT<2, LoadTaggedElement> {
- using Base = FixedInputValueNodeT<2, LoadTaggedElement>;
+class LoadTaggedFieldByFieldIndex
+ : public FixedInputValueNodeT<2, LoadTaggedFieldByFieldIndex> {
+ using Base = FixedInputValueNodeT<2, LoadTaggedFieldByFieldIndex>;
+
+ public:
+ explicit LoadTaggedFieldByFieldIndex(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Reading() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class LoadFixedArrayElement
+ : public FixedInputValueNodeT<2, LoadFixedArrayElement> {
+ using Base = FixedInputValueNodeT<2, LoadFixedArrayElement>;
public:
- explicit LoadTaggedElement(uint64_t bitfield) : Base(bitfield) {}
+ explicit LoadFixedArrayElement(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::Reading();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+};
+
+class EnsureWritableFastElements
+ : public FixedInputValueNodeT<2, EnsureWritableFastElements> {
+ using Base = FixedInputValueNodeT<2, EnsureWritableFastElements>;
+
+ public:
+ explicit EnsureWritableFastElements(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kObjectIndex = 1;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& object_input() { return input(kObjectIndex); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class StoreFixedArrayElementWithWriteBarrier
+ : public FixedInputNodeT<3, StoreFixedArrayElementWithWriteBarrier> {
+ using Base = FixedInputNodeT<3, StoreFixedArrayElementWithWriteBarrier>;
+
+ public:
+ explicit StoreFixedArrayElementWithWriteBarrier(uint64_t bitfield)
+ : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Writing() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kTagged};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ static constexpr int kValueIndex = 2;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ int MaxCallStackArgs() const { return 0; }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+// StoreFixedArrayElementNoWriteBarrier never does a Deferred Call. However,
+// PhiRepresentationSelector can cause some StoreFixedArrayElementNoWriteBarrier
+// to become StoreFixedArrayElementWithWriteBarrier, which can do Deferred
+// Calls, and thus need the register snapshot. We thus set the DeferredCall
+// property in StoreFixedArrayElementNoWriteBarrier so that it's allocated with
+// enough space for the register snapshot.
+class StoreFixedArrayElementNoWriteBarrier
+ : public FixedInputNodeT<3, StoreFixedArrayElementNoWriteBarrier> {
+ using Base = FixedInputNodeT<3, StoreFixedArrayElementNoWriteBarrier>;
+
+ public:
+ explicit StoreFixedArrayElementNoWriteBarrier(uint64_t bitfield)
+ : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Writing() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kTagged};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ static constexpr int kValueIndex = 2;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ int MaxCallStackArgs() const {
+ // StoreFixedArrayElementNoWriteBarrier never really does any call.
+ return 0;
+ }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+// CheckedStoreFixedArraySmiElement doesn't do any Deferred Calls, but
+// PhiRepresentationSelector could turn this node into a
+// StoreFixedArrayElementNoWriteBarrier, which needs the register snapshot (see
+// the comment before the definition of kProperties in
+// StoreFixedArrayElementNoWriteBarrier).
+// TODO(dmercadier): when CheckedStoreFixedArraySmiElement is transformed into a
+// StoreFixedArrayElementNoWriteBarrier, the later can never be transformed in
+// turn into a StoreFixedArrayElementWithWriteBarrier, so the register snapshot
+// is not actually needed. We should introduce a
+// StoreFixedArrayElementNoWriteBarrier node without register snapshot for such
+// cases, in order to avoid the DeferredCall property in
+// CheckedStoreFixedArraySmiElement.
+class CheckedStoreFixedArraySmiElement
+ : public FixedInputNodeT<3, CheckedStoreFixedArraySmiElement> {
+ using Base = FixedInputNodeT<3, CheckedStoreFixedArraySmiElement>;
+
+ public:
+ explicit CheckedStoreFixedArraySmiElement(uint64_t bitfield)
+ : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Writing() |
+ OpProperties::EagerDeopt() |
+ OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kTagged};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ static constexpr int kValueIndex = 2;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ int MaxCallStackArgs() const {
+ // CheckedStoreFixedArraySmiElement never really does any call.
+ return 0;
+ }
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class LoadFixedDoubleArrayElement
+ : public FixedInputValueNodeT<2, LoadFixedDoubleArrayElement> {
+ using Base = FixedInputValueNodeT<2, LoadFixedDoubleArrayElement>;
+
+ public:
+ explicit LoadFixedDoubleArrayElement(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Reading() | OpProperties::Float64();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class StoreFixedDoubleArrayElement
+ : public FixedInputNodeT<3, StoreFixedDoubleArrayElement> {
+ using Base = FixedInputNodeT<3, StoreFixedDoubleArrayElement>;
+
+ public:
+ explicit StoreFixedDoubleArrayElement(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Writing();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kFloat64};
+
+ static constexpr int kElementsIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ static constexpr int kValueIndex = 2;
+ Input& elements_input() { return input(kElementsIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class LoadSignedIntDataViewElement
+ : public FixedInputValueNodeT<3, LoadSignedIntDataViewElement> {
+ using Base = FixedInputValueNodeT<3, LoadSignedIntDataViewElement>;
+
+ public:
+ explicit LoadSignedIntDataViewElement(uint64_t bitfield,
+ ExternalArrayType type)
+ : Base(bitfield), type_(type) {
+ DCHECK(type == ExternalArrayType::kExternalInt8Array ||
+ type == ExternalArrayType::kExternalInt16Array ||
+ type == ExternalArrayType::kExternalInt32Array);
+ }
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Reading() | OpProperties::Int32();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
static constexpr int kIndexIndex = 1;
+ static constexpr int kIsLittleEndianIndex = 2;
Input& object_input() { return input(kObjectIndex); }
Input& index_input() { return input(kIndexIndex); }
+ Input& is_little_endian_input() { return input(kIsLittleEndianIndex); }
+
+ bool is_little_endian_constant() {
+ return IsConstantNode(is_little_endian_input().node()->opcode());
+ }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ ExternalArrayType type_;
};
-class LoadDoubleElement : public FixedInputValueNodeT<2, LoadDoubleElement> {
- using Base = FixedInputValueNodeT<2, LoadDoubleElement>;
+class LoadDoubleDataViewElement
+ : public FixedInputValueNodeT<3, LoadDoubleDataViewElement> {
+ using Base = FixedInputValueNodeT<3, LoadDoubleDataViewElement>;
public:
- explicit LoadDoubleElement(uint64_t bitfield) : Base(bitfield) {}
+ explicit LoadDoubleDataViewElement(uint64_t bitfield, ExternalArrayType type)
+ : Base(bitfield) {
+ DCHECK_EQ(type, ExternalArrayType::kExternalFloat64Array);
+ }
static constexpr OpProperties kProperties =
OpProperties::Reading() | OpProperties::Float64();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kTagged};
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ static constexpr int kIsLittleEndianIndex = 2;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ Input& is_little_endian_input() { return input(kIsLittleEndianIndex); }
+
+ bool is_little_endian_constant() {
+ return IsConstantNode(is_little_endian_input().node()->opcode());
+ }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+#define LOAD_TYPED_ARRAY(name, properties, ...) \
+ class name : public FixedInputValueNodeT<2, name> { \
+ using Base = FixedInputValueNodeT<2, name>; \
+ \
+ public: \
+ explicit name(uint64_t bitfield, ElementsKind elements_kind) \
+ : Base(bitfield), elements_kind_(elements_kind) { \
+ DCHECK(elements_kind == \
+ v8::internal::compiler::turboshaft::any_of(__VA_ARGS__)); \
+ } \
+ \
+ static constexpr OpProperties kProperties = \
+ OpProperties::Reading() | properties; \
+ static constexpr typename Base::InputTypes kInputTypes{ \
+ ValueRepresentation::kTagged, ValueRepresentation::kUint32}; \
+ \
+ static constexpr int kObjectIndex = 0; \
+ static constexpr int kIndexIndex = 1; \
+ Input& object_input() { return input(kObjectIndex); } \
+ Input& index_input() { return input(kIndexIndex); } \
+ \
+ void SetValueLocationConstraints(); \
+ void GenerateCode(MaglevAssembler*, const ProcessingState&); \
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
+ \
+ private: \
+ ElementsKind elements_kind_; \
+ };
+
+// Nodes that can deopt are larger, since they contain the DeoptInfo. Thus, to
+// have better performance, we split the LoadxxxTypedArrayElement nodes in two:
+// those who can deopt and those who can't. Deoptimization in a
+// LoadxxxTypedArrayElement node is always because of a detached array buffer.
+// The NoDeopt versions of the nodes rely on the ArrayBufferDetachingProtector,
+// while the deopting versions have a runtime check that triggers a deopt if the
+// buffer is detached.
+LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElement,
+ OpProperties::EagerDeopt() | OpProperties::Int32(),
+ INT8_ELEMENTS, INT16_ELEMENTS, INT32_ELEMENTS)
+LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElementNoDeopt, OpProperties::Int32(),
+ INT8_ELEMENTS, INT16_ELEMENTS, INT32_ELEMENTS)
+
+LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElement,
+ OpProperties::EagerDeopt() | OpProperties::Uint32(),
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, UINT16_ELEMENTS,
+ UINT16_ELEMENTS, UINT32_ELEMENTS)
+LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElementNoDeopt,
+ OpProperties::Uint32(), UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS,
+ UINT16_ELEMENTS, UINT16_ELEMENTS, UINT32_ELEMENTS)
+
+LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElement,
+ OpProperties::EagerDeopt() | OpProperties::Float64(),
+ FLOAT32_ELEMENTS, FLOAT64_ELEMENTS)
+LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElementNoDeopt, OpProperties::Float64(),
+ FLOAT32_ELEMENTS, FLOAT64_ELEMENTS)
+
+#undef LOAD_TYPED_ARRAY
+
+#define STORE_TYPED_ARRAY(name, properties, type, ...) \
+ class name : public FixedInputNodeT<3, name> { \
+ using Base = FixedInputNodeT<3, name>; \
+ \
+ public: \
+ explicit name(uint64_t bitfield, ElementsKind elements_kind) \
+ : Base(bitfield), elements_kind_(elements_kind) { \
+ DCHECK(elements_kind == \
+ v8::internal::compiler::turboshaft::any_of(__VA_ARGS__)); \
+ } \
+ \
+ static constexpr OpProperties kProperties = properties; \
+ static constexpr typename Base::InputTypes kInputTypes{ \
+ ValueRepresentation::kTagged, ValueRepresentation::kUint32, type}; \
+ \
+ static constexpr int kObjectIndex = 0; \
+ static constexpr int kIndexIndex = 1; \
+ static constexpr int kValueIndex = 2; \
+ Input& object_input() { return input(kObjectIndex); } \
+ Input& index_input() { return input(kIndexIndex); } \
+ Input& value_input() { return input(kValueIndex); } \
+ \
+ void SetValueLocationConstraints(); \
+ void GenerateCode(MaglevAssembler*, const ProcessingState&); \
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
+ \
+ private: \
+ ElementsKind elements_kind_; \
+ };
+
+// Nodes that can deopt are larger, since they contain the DeoptInfo. Thus, to
+// have better performance, we split the StorexxxTypedArrayElement nodes in two:
+// those who can deopt and those who can't. Deoptimization in a
+// StorexxxTypedArrayElement node is always because of a detached array buffer.
+// The NoDeopt versions of the nodes rely on the ArrayBufferDetachingProtector,
+// while the deopting versions have a runtime check that triggers a deopt if the
+// buffer is detached.
+STORE_TYPED_ARRAY(StoreIntTypedArrayElement,
+ OpProperties::EagerDeopt() | OpProperties::Writing(),
+ ValueRepresentation::kInt32, INT8_ELEMENTS, INT16_ELEMENTS,
+ INT32_ELEMENTS, UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS,
+ UINT16_ELEMENTS, UINT16_ELEMENTS, UINT32_ELEMENTS)
+STORE_TYPED_ARRAY(StoreIntTypedArrayElementNoDeopt, OpProperties::Writing(),
+ ValueRepresentation::kInt32, INT8_ELEMENTS, INT16_ELEMENTS,
+ INT32_ELEMENTS, UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS,
+ UINT16_ELEMENTS, UINT16_ELEMENTS, UINT32_ELEMENTS)
+
+STORE_TYPED_ARRAY(StoreDoubleTypedArrayElement,
+ OpProperties::EagerDeopt() | OpProperties::Writing(),
+ ValueRepresentation::kFloat64, FLOAT32_ELEMENTS,
+ FLOAT64_ELEMENTS)
+STORE_TYPED_ARRAY(StoreDoubleTypedArrayElementNoDeopt, OpProperties::Writing(),
+ ValueRepresentation::kFloat64, FLOAT32_ELEMENTS,
+ FLOAT64_ELEMENTS)
+#undef STORE_TYPED_ARRAY
+
+class StoreSignedIntDataViewElement
+ : public FixedInputNodeT<4, StoreSignedIntDataViewElement> {
+ using Base = FixedInputNodeT<4, StoreSignedIntDataViewElement>;
+
+ public:
+ explicit StoreSignedIntDataViewElement(uint64_t bitfield,
+ ExternalArrayType type)
+ : Base(bitfield), type_(type) {
+ DCHECK(type == ExternalArrayType::kExternalInt8Array ||
+ type == ExternalArrayType::kExternalInt16Array ||
+ type == ExternalArrayType::kExternalInt32Array);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::Writing();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kInt32, ValueRepresentation::kTagged};
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kIndexIndex = 1;
+ static constexpr int kValueIndex = 2;
+ static constexpr int kIsLittleEndianIndex = 3;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& index_input() { return input(kIndexIndex); }
+ Input& value_input() { return input(kValueIndex); }
+ Input& is_little_endian_input() { return input(kIsLittleEndianIndex); }
+
+ bool is_little_endian_constant() {
+ return IsConstantNode(is_little_endian_input().node()->opcode());
+ }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ ExternalArrayType type_;
+};
+
+class StoreDoubleDataViewElement
+ : public FixedInputNodeT<4, StoreDoubleDataViewElement> {
+ using Base = FixedInputNodeT<4, StoreDoubleDataViewElement>;
+
+ public:
+ explicit StoreDoubleDataViewElement(uint64_t bitfield, ExternalArrayType type)
+ : Base(bitfield) {
+ DCHECK_EQ(type, ExternalArrayType::kExternalFloat64Array);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::Writing();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32,
+ ValueRepresentation::kFloat64, ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
static constexpr int kIndexIndex = 1;
+ static constexpr int kValueIndex = 2;
+ static constexpr int kIsLittleEndianIndex = 3;
Input& object_input() { return input(kObjectIndex); }
Input& index_input() { return input(kIndexIndex); }
+ Input& value_input() { return input(kValueIndex); }
+ Input& is_little_endian_input() { return input(kIsLittleEndianIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ bool is_little_endian_constant() {
+ return IsConstantNode(is_little_endian_input().node()->opcode());
+ }
+
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -3334,6 +5469,8 @@ class StoreDoubleField : public FixedInputNodeT<2, StoreDoubleField> {
: Base(bitfield), offset_(offset) {}
static constexpr OpProperties kProperties = OpProperties::Writing();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kFloat64};
int offset() const { return offset_; }
@@ -3342,7 +5479,76 @@ class StoreDoubleField : public FixedInputNodeT<2, StoreDoubleField> {
Input& object_input() { return input(kObjectIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int offset_;
+};
+
+class StoreFloat64 : public FixedInputNodeT<2, StoreFloat64> {
+ using Base = FixedInputNodeT<2, StoreFloat64>;
+
+ public:
+ explicit StoreFloat64(uint64_t bitfield, int offset)
+ : Base(bitfield), offset_(offset) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Writing();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kFloat64};
+
+ int offset() const { return offset_; }
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kValueIndex = 1;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int offset_;
+};
+
+class CheckedStoreSmiField : public FixedInputNodeT<2, CheckedStoreSmiField> {
+ using Base = FixedInputNodeT<2, CheckedStoreSmiField>;
+
+ public:
+ explicit CheckedStoreSmiField(uint64_t bitfield, int offset)
+ : Base(bitfield), offset_(offset) {}
+
+ // CheckedStoreSmiField doesn't do any Deferred Calls, but
+ // PhiRepresentationSelector could turn this node into a
+ // StoreTaggedFieldNoWriteBarrier, which needs the register snapshot (see the
+ // comment before the definition of kProperties in
+ // StoreTaggedFieldNoWriteBarrier).
+ // TODO(dmercadier): when CheckedStoreSmiField is transformed into a
+ // StoreTaggedFieldNoWriteBarrier, the later can never be transformed in turn
+ // into a StoreTaggedFieldWithWriteBarrier, so the register snapshot is not
+ // actually needed. We should introduce a StoreTaggedField node without
+ // register snapshot for such cases, in order to avoid the DeferredCall
+ // property in CheckedStoreSmiField.
+ static constexpr OpProperties kProperties = OpProperties::Writing() |
+ OpProperties::EagerDeopt() |
+ OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
+ int offset() const { return offset_; }
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kValueIndex = 1;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ int MaxCallStackArgs() const {
+ // CheckedStoreSmiField never really does any call.
+ return 0;
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3358,7 +5564,16 @@ class StoreTaggedFieldNoWriteBarrier
explicit StoreTaggedFieldNoWriteBarrier(uint64_t bitfield, int offset)
: Base(bitfield), offset_(offset) {}
- static constexpr OpProperties kProperties = OpProperties::Writing();
+ // StoreTaggedFieldNoWriteBarrier never does a Deferred Call. However,
+ // PhiRepresentationSelector can cause some StoreTaggedFieldNoWriteBarrier to
+ // become StoreTaggedFieldWithWriteBarrier, which can do Deferred Calls, and
+ // thus need the register snapshot. We thus set the DeferredCall property in
+ // StoreTaggedFieldNoWriteBarrier so that it's allocated with enough space for
+ // the register snapshot.
+ static constexpr OpProperties kProperties =
+ OpProperties::Writing() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
int offset() const { return offset_; }
@@ -3367,7 +5582,15 @@ class StoreTaggedFieldNoWriteBarrier
Input& object_input() { return input(kObjectIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ object_input().node()->SetTaggedResultNeedsDecompress();
+ // Don't need to decompress value to store it.
+ }
+ int MaxCallStackArgs() const {
+ // StoreTaggedFieldNoWriteBarrier never really does any call.
+ return 0;
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3379,16 +5602,19 @@ class StoreMap : public FixedInputNodeT<1, StoreMap> {
using Base = FixedInputNodeT<1, StoreMap>;
public:
- explicit StoreMap(uint64_t bitfield, compiler::MapRef& map)
+ explicit StoreMap(uint64_t bitfield, compiler::MapRef map)
: Base(bitfield), map_(map) {}
static constexpr OpProperties kProperties =
OpProperties::Writing() | OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
Input& object_input() { return input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3406,6 +5632,8 @@ class StoreTaggedFieldWithWriteBarrier
static constexpr OpProperties kProperties =
OpProperties::Writing() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
int offset() const { return offset_; }
@@ -3414,7 +5642,12 @@ class StoreTaggedFieldWithWriteBarrier
Input& object_input() { return input(kObjectIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ object_input().node()->SetTaggedResultNeedsDecompress();
+ // Don't need to decompress value to store it.
+ }
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3436,6 +5669,8 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
const compiler::NameRef& name() const { return name_; }
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3443,7 +5678,8 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
Input& context() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3463,6 +5699,8 @@ class StoreGlobal : public FixedInputValueNodeT<2, StoreGlobal> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
const compiler::NameRef& name() const { return name_; }
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3470,7 +5708,8 @@ class StoreGlobal : public FixedInputValueNodeT<2, StoreGlobal> {
Input& context() { return input(0); }
Input& value() { return input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3489,6 +5728,8 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
compiler::NameRef name() const { return name_; }
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3498,7 +5739,8 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
Input& context() { return input(kContextIndex); }
Input& object_input() { return input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3519,6 +5761,9 @@ class LoadNamedFromSuperGeneric
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
compiler::NameRef name() const { return name_; }
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3530,7 +5775,8 @@ class LoadNamedFromSuperGeneric
Input& receiver() { return input(kReceiverIndex); }
Input& lookup_start_object() { return input(kLookupStartObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3549,6 +5795,9 @@ class SetNamedGeneric : public FixedInputValueNodeT<3, SetNamedGeneric> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
compiler::NameRef name() const { return name_; }
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3560,7 +5809,8 @@ class SetNamedGeneric : public FixedInputValueNodeT<3, SetNamedGeneric> {
Input& object_input() { return input(kObjectIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3569,6 +5819,26 @@ class SetNamedGeneric : public FixedInputValueNodeT<3, SetNamedGeneric> {
const compiler::FeedbackSource feedback_;
};
+class LoadEnumCacheLength
+ : public FixedInputValueNodeT<1, LoadEnumCacheLength> {
+ using Base = FixedInputValueNodeT<1, LoadEnumCacheLength>;
+
+ public:
+ explicit LoadEnumCacheLength(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Reading() | OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ static constexpr int kMapInput = 0;
+ Input& map_input() { return input(kMapInput); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class StringAt : public FixedInputValueNodeT<2, StringAt> {
using Base = FixedInputValueNodeT<2, StringAt>;
@@ -3577,13 +5847,16 @@ class StringAt : public FixedInputValueNodeT<2, StringAt> {
static constexpr OpProperties kProperties =
OpProperties::Reading() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kInt32};
static constexpr int kStringIndex = 0;
static constexpr int kIndexIndex = 1;
Input& string_input() { return input(kStringIndex); }
Input& index_input() { return input(kIndexIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -3596,11 +5869,14 @@ class StringLength : public FixedInputValueNodeT<1, StringLength> {
static constexpr OpProperties kProperties =
OpProperties::Reading() | OpProperties::Int32();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
Input& object_input() { return input(kObjectIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -3617,6 +5893,9 @@ class DefineNamedOwnGeneric
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
compiler::NameRef name() const { return name_; }
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3628,7 +5907,8 @@ class DefineNamedOwnGeneric
Input& object_input() { return input(kObjectIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3648,6 +5928,9 @@ class StoreInArrayLiteralGeneric
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3660,7 +5943,8 @@ class StoreInArrayLiteralGeneric
Input& name_input() { return input(kNameIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3678,6 +5962,9 @@ class GetKeyedGeneric : public FixedInputValueNodeT<3, GetKeyedGeneric> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3688,7 +5975,8 @@ class GetKeyedGeneric : public FixedInputValueNodeT<3, GetKeyedGeneric> {
Input& object_input() { return input(kObjectIndex); }
Input& key_input() { return input(kKeyIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3706,6 +5994,9 @@ class SetKeyedGeneric : public FixedInputValueNodeT<4, SetKeyedGeneric> {
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3718,7 +6009,8 @@ class SetKeyedGeneric : public FixedInputValueNodeT<4, SetKeyedGeneric> {
Input& key_input() { return input(kKeyIndex); }
Input& value_input() { return input(kValueIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3727,8 +6019,8 @@ class SetKeyedGeneric : public FixedInputValueNodeT<4, SetKeyedGeneric> {
};
class DefineKeyedOwnGeneric
- : public FixedInputValueNodeT<4, DefineKeyedOwnGeneric> {
- using Base = FixedInputValueNodeT<4, DefineKeyedOwnGeneric>;
+ : public FixedInputValueNodeT<5, DefineKeyedOwnGeneric> {
+ using Base = FixedInputValueNodeT<5, DefineKeyedOwnGeneric>;
public:
explicit DefineKeyedOwnGeneric(uint64_t bitfield,
@@ -3737,6 +6029,10 @@ class DefineKeyedOwnGeneric
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged,
+ ValueRepresentation::kTagged};
compiler::FeedbackSource feedback() const { return feedback_; }
@@ -3744,12 +6040,15 @@ class DefineKeyedOwnGeneric
static constexpr int kObjectIndex = 1;
static constexpr int kKeyIndex = 2;
static constexpr int kValueIndex = 3;
+ static constexpr int kFlagsIndex = 4;
Input& context() { return input(kContextIndex); }
Input& object_input() { return input(kObjectIndex); }
Input& key_input() { return input(kKeyIndex); }
Input& value_input() { return input(kValueIndex); }
+ Input& flags_input() { return input(kFlagsIndex); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3768,7 +6067,7 @@ class GapMove : public FixedInputNodeT<0, GapMove> {
compiler::AllocatedOperand source() const { return source_; }
compiler::AllocatedOperand target() const { return target_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3788,7 +6087,7 @@ class ConstantGapMove : public FixedInputNodeT<0, ConstantGapMove> {
compiler::AllocatedOperand target() const { return target_; }
ValueNode* node() const { return node_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3798,6 +6097,8 @@ class ConstantGapMove : public FixedInputNodeT<0, ConstantGapMove> {
compiler::AllocatedOperand target_;
};
+class MergePointInterpreterFrameState;
+
// TODO(verwaest): It may make more sense to buffer phis in merged_states until
// we set up the interpreter frame state for code generation. At that point we
// can generate correctly-sized phis.
@@ -3808,29 +6109,40 @@ class Phi : public ValueNodeT<Phi> {
using List = base::ThreadedList<Phi>;
// TODO(jgruber): More intuitive constructors, if possible.
- Phi(uint64_t bitfield, interpreter::Register owner, int merge_offset)
- : Base(bitfield), owner_(owner), merge_offset_(merge_offset) {}
+ Phi(uint64_t bitfield, MergePointInterpreterFrameState* merge_state,
+ interpreter::Register owner)
+ : Base(bitfield), owner_(owner), merge_state_(merge_state) {
+ DCHECK_NOT_NULL(merge_state);
+ }
interpreter::Register owner() const { return owner_; }
- int merge_offset() const { return merge_offset_; }
+ const MergePointInterpreterFrameState* merge_state() const {
+ return merge_state_;
+ }
using Node::reduce_input_count;
using Node::set_input;
bool is_exception_phi() const { return input_count() == 0; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing() {
+ // Do not mark inputs as decompressing here, since we don't yet know whether
+ // this Phi needs decompression. Instead, let
+ // Node::SetTaggedResultNeedsDecompress pass through phis.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
- void AllocateVregInPostProcess(MaglevVregAllocationState*);
+
+ BasicBlock* predecessor_at(int i);
private:
Phi** next() { return &next_; }
const interpreter::Register owner_;
Phi* next_ = nullptr;
- const int merge_offset_;
- friend List;
+ MergePointInterpreterFrameState* const merge_state_;
friend base::ThreadedListTraits<Phi>;
};
@@ -3839,7 +6151,6 @@ class Call : public ValueNodeT<Call> {
public:
enum class TargetType { kJSFunction, kAny };
-
// We assume function and context as fixed inputs.
static constexpr int kFunctionIndex = 0;
static constexpr int kContextIndex = 1;
@@ -3847,7 +6158,8 @@ class Call : public ValueNodeT<Call> {
// We need enough inputs to have these fixed inputs plus the maximum arguments
// to a function call.
- static_assert(kMaxInputs >= kFixedInputCount + Code::kMaxArguments);
+ static_assert(kMaxInputs >=
+ kFixedInputCount + InstructionStream::kMaxArguments);
// This ctor is used when for variable input counts.
// Inputs must be initialized manually.
@@ -3873,9 +6185,15 @@ class Call : public ValueNodeT<Call> {
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
}
+ auto args_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_end() { return std::make_reverse_iterator(&arg(num_args() - 1)); }
+
compiler::FeedbackSource feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -3897,7 +6215,8 @@ class Construct : public ValueNodeT<Construct> {
// We need enough inputs to have these fixed inputs plus the maximum arguments
// to a function call.
- static_assert(kMaxInputs >= kFixedInputCount + Code::kMaxArguments);
+ static_assert(kMaxInputs >=
+ kFixedInputCount + InstructionStream::kMaxArguments);
// This ctor is used when for variable input counts.
// Inputs must be initialized manually.
@@ -3922,9 +6241,15 @@ class Construct : public ValueNodeT<Construct> {
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
}
+ auto args_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_end() { return std::make_reverse_iterator(&arg(num_args() - 1)); }
+
compiler::FeedbackSource feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -3967,7 +6292,7 @@ class CallBuiltin : public ValueNodeT<CallBuiltin> {
DCHECK(has_feedback());
return slot_type_;
}
- void set_feedback(compiler::FeedbackSource& feedback,
+ void set_feedback(compiler::FeedbackSource const& feedback,
FeedbackSlotType slot_type) {
feedback_ = feedback;
slot_type_ = slot_type;
@@ -4003,20 +6328,31 @@ class CallBuiltin : public ValueNodeT<CallBuiltin> {
return descriptor.GetRegisterParameterCount();
}
+ auto stack_args_begin() {
+ return std::make_reverse_iterator(&input(InputsInRegisterCount() - 1));
+ }
+ auto stack_args_end() {
+ return std::make_reverse_iterator(&input(InputCountWithoutContext() - 1));
+ }
+
void set_arg(int i, ValueNode* node) { set_input(i, node); }
int ReturnCount() const {
return Builtins::CallInterfaceDescriptorFor(builtin_).GetReturnCount();
}
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
- void PassFeedbackSlotOnStack(MaglevAssembler*);
+ template <typename... Args>
+ void PushArguments(MaglevAssembler* masm, Args... extra_args);
void PassFeedbackSlotInRegister(MaglevAssembler*);
- void PushFeedback(MaglevAssembler*);
+ void PushFeedbackAndArguments(MaglevAssembler*);
Builtin builtin_;
base::Optional<compiler::FeedbackSource> feedback_;
@@ -4050,12 +6386,17 @@ class CallRuntime : public ValueNodeT<CallRuntime> {
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
}
+ auto args_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_end() { return std::make_reverse_iterator(&arg(num_args() - 1)); }
int ReturnCount() const {
return Runtime::FunctionForId(function_id())->result_size;
}
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4088,17 +6429,29 @@ class CallWithSpread : public ValueNodeT<CallWithSpread> {
Input& context() { return input(kContextIndex); }
const Input& context() const { return input(kContextIndex); }
int num_args() const { return input_count() - kFixedInputCount; }
+ int num_args_no_spread() const {
+ DCHECK_GT(num_args(), 0);
+ return num_args() - 1;
+ }
Input& arg(int i) { return input(i + kFixedInputCount); }
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
}
+ auto args_no_spread_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_no_spread_end() {
+ return std::make_reverse_iterator(&arg(num_args_no_spread() - 1));
+ }
Input& spread() {
// Spread is the last argument/input.
return input(input_count() - 1);
}
+ Input& receiver() { return arg(0); }
compiler::FeedbackSource feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -4106,6 +6459,91 @@ class CallWithSpread : public ValueNodeT<CallWithSpread> {
const compiler::FeedbackSource feedback_;
};
+class CallWithArrayLike : public FixedInputValueNodeT<4, CallWithArrayLike> {
+ using Base = FixedInputValueNodeT<4, CallWithArrayLike>;
+
+ public:
+ // We assume function and context as fixed inputs.
+ static constexpr int kFunctionIndex = 0;
+ static constexpr int kReceiverIndex = 1;
+ static constexpr int kArgumentsListIndex = 2;
+ static constexpr int kContextIndex = 3;
+
+ // This ctor is used when for variable input counts.
+ // Inputs must be initialized manually.
+ explicit CallWithArrayLike(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
+
+ Input& function() { return input(kFunctionIndex); }
+ Input& receiver() { return input(kReceiverIndex); }
+ Input& arguments_list() { return input(kArgumentsListIndex); }
+ Input& context() { return input(kContextIndex); }
+
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CallSelf : public ValueNodeT<CallSelf> {
+ using Base = ValueNodeT<CallSelf>;
+
+ public:
+ // We assume function and context as fixed inputs.
+ static constexpr int kReceiverIndex = 0;
+ static constexpr int kFixedInputCount = 1;
+
+ // We need enough inputs to have these fixed inputs plus the maximum arguments
+ // to a function call.
+ static_assert(kMaxInputs >=
+ kFixedInputCount + InstructionStream::kMaxArguments);
+
+ // This ctor is used when for variable input counts.
+ // Inputs must be initialized manually.
+ CallSelf(uint64_t bitfield, compiler::JSHeapBroker* broker,
+ const compiler::JSFunctionRef function, ValueNode* receiver)
+ : Base(bitfield),
+ function_(function),
+ expected_parameter_count_(
+ function.shared(broker)
+ .internal_formal_parameter_count_with_receiver()) {
+ set_input(kReceiverIndex, receiver);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
+
+ Input& receiver() { return input(kReceiverIndex); }
+ const Input& receiver() const { return input(kReceiverIndex); }
+ int num_args() const { return input_count() - kFixedInputCount; }
+ Input& arg(int i) { return input(i + kFixedInputCount); }
+ void set_arg(int i, ValueNode* node) {
+ set_input(i + kFixedInputCount, node);
+ }
+ auto args_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_end() { return std::make_reverse_iterator(&arg(num_args() - 1)); }
+
+ compiler::SharedFunctionInfoRef shared_function_info(
+ compiler::JSHeapBroker* broker) const {
+ return function_.shared(broker);
+ }
+
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::JSFunctionRef function_;
+ // Cache the expected parameter count so that we can access it in
+ // MaxCallStackArgs without needing to unpark the local isolate.
+ int expected_parameter_count_;
+};
+
class CallKnownJSFunction : public ValueNodeT<CallKnownJSFunction> {
using Base = ValueNodeT<CallKnownJSFunction>;
@@ -4116,13 +6554,19 @@ class CallKnownJSFunction : public ValueNodeT<CallKnownJSFunction> {
// We need enough inputs to have these fixed inputs plus the maximum arguments
// to a function call.
- static_assert(kMaxInputs >= kFixedInputCount + Code::kMaxArguments);
+ static_assert(kMaxInputs >=
+ kFixedInputCount + InstructionStream::kMaxArguments);
// This ctor is used when for variable input counts.
// Inputs must be initialized manually.
- CallKnownJSFunction(uint64_t bitfield, const compiler::JSFunctionRef function,
+ CallKnownJSFunction(uint64_t bitfield, compiler::JSHeapBroker* broker,
+ const compiler::JSFunctionRef function,
ValueNode* receiver)
- : Base(bitfield), function_(function) {
+ : Base(bitfield),
+ function_(function),
+ expected_parameter_count_(
+ function.shared(broker)
+ .internal_formal_parameter_count_with_receiver()) {
set_input(kReceiverIndex, receiver);
}
@@ -4135,17 +6579,26 @@ class CallKnownJSFunction : public ValueNodeT<CallKnownJSFunction> {
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
}
+ auto args_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_end() { return std::make_reverse_iterator(&arg(num_args() - 1)); }
- compiler::SharedFunctionInfoRef shared_function_info() const {
- return function_.shared();
+ compiler::SharedFunctionInfoRef shared_function_info(
+ compiler::JSHeapBroker* broker) const {
+ return function_.shared(broker);
}
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const compiler::JSFunctionRef function_;
+ // Cache the expected parameter count so that we can access it in
+ // MaxCallStackArgs without needing to unpark the local isolate.
+ int expected_parameter_count_;
};
class ConstructWithSpread : public ValueNodeT<ConstructWithSpread> {
@@ -4178,6 +6631,10 @@ class ConstructWithSpread : public ValueNodeT<ConstructWithSpread> {
Input& context() { return input(kContextIndex); }
const Input& context() const { return input(kContextIndex); }
int num_args() const { return input_count() - kFixedInputCount; }
+ int num_args_no_spread() const {
+ DCHECK_GT(num_args(), 0);
+ return num_args() - 1;
+ }
Input& arg(int i) { return input(i + kFixedInputCount); }
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
@@ -4186,9 +6643,16 @@ class ConstructWithSpread : public ValueNodeT<ConstructWithSpread> {
// Spread is the last argument/input.
return input(input_count() - 1);
}
+ auto args_no_spread_begin() { return std::make_reverse_iterator(&arg(-1)); }
+ auto args_no_spread_end() {
+ return std::make_reverse_iterator(&arg(num_args_no_spread() - 1));
+ }
compiler::FeedbackSource feedback() const { return feedback_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void VerifyInputs(MaglevGraphLabeller* graph_labeller) const;
+ void MarkTaggedInputsAsDecompressing();
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -4208,9 +6672,12 @@ class ConvertReceiver : public FixedInputValueNodeT<1, ConvertReceiver> {
Input& receiver_input() { return input(0); }
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::JSCall();
+ static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -4219,6 +6686,23 @@ class ConvertReceiver : public FixedInputValueNodeT<1, ConvertReceiver> {
ConvertReceiverMode mode_;
};
+class ConvertHoleToUndefined
+ : public FixedInputValueNodeT<1, ConvertHoleToUndefined> {
+ using Base = FixedInputValueNodeT<1, ConvertHoleToUndefined>;
+
+ public:
+ explicit ConvertHoleToUndefined(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ Input& object_input() { return input(0); }
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class IncreaseInterruptBudget
: public FixedInputNodeT<0, IncreaseInterruptBudget> {
using Base = FixedInputNodeT<0, IncreaseInterruptBudget>;
@@ -4231,7 +6715,7 @@ class IncreaseInterruptBudget
int amount() const { return amount_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4239,25 +6723,46 @@ class IncreaseInterruptBudget
const int amount_;
};
-class ReduceInterruptBudget : public FixedInputNodeT<0, ReduceInterruptBudget> {
- using Base = FixedInputNodeT<0, ReduceInterruptBudget>;
+class ReduceInterruptBudgetForLoop
+ : public FixedInputNodeT<0, ReduceInterruptBudgetForLoop> {
+ using Base = FixedInputNodeT<0, ReduceInterruptBudgetForLoop>;
public:
- explicit ReduceInterruptBudget(uint64_t bitfield, int amount)
+ explicit ReduceInterruptBudgetForLoop(uint64_t bitfield, int amount)
: Base(bitfield), amount_(amount) {
DCHECK_GT(amount, 0);
}
- // TODO(leszeks): This is marked as lazy deopt because the interrupt can throw
- // on a stack overflow. Full lazy deopt information is probably overkill
- // though, we likely don't need the full frame but just the function and
- // source location. Consider adding a minimal lazy deopt info.
static constexpr OpProperties kProperties =
OpProperties::DeferredCall() | OpProperties::LazyDeopt();
int amount() const { return amount_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int amount_;
+};
+
+class ReduceInterruptBudgetForReturn
+ : public FixedInputNodeT<0, ReduceInterruptBudgetForReturn> {
+ using Base = FixedInputNodeT<0, ReduceInterruptBudgetForReturn>;
+
+ public:
+ explicit ReduceInterruptBudgetForReturn(uint64_t bitfield, int amount)
+ : Base(bitfield), amount_(amount) {
+ DCHECK_GT(amount, 0);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::DeferredCall();
+
+ int amount() const { return amount_; }
+
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4276,12 +6781,15 @@ class ThrowReferenceErrorIfHole
static constexpr OpProperties kProperties =
OpProperties::Throw() | OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
const compiler::NameRef& name() const { return name_; }
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -4298,10 +6806,13 @@ class ThrowSuperNotCalledIfHole
static constexpr OpProperties kProperties =
OpProperties::Throw() | OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -4316,10 +6827,13 @@ class ThrowSuperAlreadyCalledIfNotHole
static constexpr OpProperties kProperties =
OpProperties::Throw() | OpProperties::DeferredCall();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& value() { return Node::input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -4333,15 +6847,49 @@ class ThrowIfNotSuperConstructor
static constexpr OpProperties kProperties =
OpProperties::Throw() | OpProperties::DeferredCall();
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
Input& constructor() { return Node::input(0); }
Input& function() { return Node::input(1); }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
+class TransitionElementsKind
+ : public FixedInputNodeT<1, TransitionElementsKind> {
+ using Base = FixedInputNodeT<1, TransitionElementsKind>;
+
+ public:
+ explicit TransitionElementsKind(
+ uint64_t bitfield,
+ base::Vector<const compiler::MapRef> transition_sources,
+ compiler::MapRef transition_target)
+ : Base(bitfield),
+ transition_sources_(transition_sources),
+ transition_target_(transition_target) {}
+
+ // TODO(leszeks): Special case the case where all transitions are fast.
+ static constexpr OpProperties kProperties =
+ OpProperties::DeferredCall() | OpProperties::Writing();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ Input& object_input() { return Node::input(0); }
+
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ const base::Vector<const compiler::MapRef> transition_sources_;
+ const compiler::MapRef transition_target_;
+};
+
class ControlNode : public NodeBase {
public:
// A "hole" in control flow is a control node that unconditionally interrupts
@@ -4386,31 +6934,18 @@ class UnconditionalControlNode : public ControlNode {
};
template <class Derived>
-class UnconditionalControlNodeT : public UnconditionalControlNode {
- static_assert(IsUnconditionalControlNode(opcode_of<Derived>));
- static constexpr size_t kInputCount = 0;
-
- public:
- // Shadowing for static knowledge.
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
- constexpr bool has_inputs() const { return input_count() > 0; }
- constexpr uint16_t input_count() const { return kInputCount; }
- auto end() {
- return std::make_reverse_iterator(&this->input(input_count() - 1));
- }
+class UnconditionalControlNodeT
+ : public FixedInputNodeTMixin<0, UnconditionalControlNode, Derived> {
+ static_assert(IsUnconditionalControlNode(NodeBase::opcode_of<Derived>));
protected:
explicit UnconditionalControlNodeT(uint64_t bitfield,
BasicBlockRef* target_refs)
- : UnconditionalControlNode(bitfield, target_refs) {
- DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
- DCHECK_EQ(NodeBase::input_count(), kInputCount);
- }
+ : FixedInputNodeTMixin<0, UnconditionalControlNode, Derived>(
+ bitfield, target_refs) {}
explicit UnconditionalControlNodeT(uint64_t bitfield, BasicBlock* target)
- : UnconditionalControlNode(bitfield, target) {
- DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
- DCHECK_EQ(NodeBase::input_count(), kInputCount);
- }
+ : FixedInputNodeTMixin<0, UnconditionalControlNode, Derived>(bitfield,
+ target) {}
};
class ConditionalControlNode : public ControlNode {
@@ -4421,19 +6956,17 @@ class ConditionalControlNode : public ControlNode {
class BranchControlNode : public ConditionalControlNode {
public:
BranchControlNode(uint64_t bitfield, BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
+ int true_correction, BasicBlockRef* if_false_refs,
+ int false_correction)
: ConditionalControlNode(bitfield),
if_true_(if_true_refs),
- if_false_(if_false_refs) {}
+ if_false_(if_false_refs) {
+ if_true_.set_interrupt_budget_correction(true_correction);
+ if_false_.set_interrupt_budget_correction(false_correction);
+ }
BasicBlock* if_true() const { return if_true_.block_ptr(); }
BasicBlock* if_false() const { return if_false_.block_ptr(); }
- void set_true_interrupt_correction(int interrupt_budget_correction) {
- if_true_.set_interrupt_budget_correction(interrupt_budget_correction);
- }
- void set_false_interrupt_correction(int interrupt_budget_correction) {
- if_false_.set_interrupt_budget_correction(interrupt_budget_correction);
- }
private:
BasicBlockRef if_true_;
@@ -4445,42 +6978,30 @@ class TerminalControlNode : public ControlNode {
explicit TerminalControlNode(uint64_t bitfield) : ControlNode(bitfield) {}
};
-template <class Derived>
-class TerminalControlNodeT : public TerminalControlNode {
- static_assert(IsTerminalControlNode(opcode_of<Derived>));
-
- public:
- // Shadowing for static knowledge.
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+template <size_t InputCount, class Derived>
+class TeminalControlNodeT
+ : public FixedInputNodeTMixin<InputCount, TerminalControlNode, Derived> {
+ static_assert(IsTerminalControlNode(NodeBase::opcode_of<Derived>));
protected:
- explicit TerminalControlNodeT(uint64_t bitfield)
- : TerminalControlNode(bitfield) {
- DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
- }
+ explicit TeminalControlNodeT(uint64_t bitfield)
+ : FixedInputNodeTMixin<InputCount, TerminalControlNode, Derived>(
+ bitfield) {}
};
template <size_t InputCount, class Derived>
-class BranchControlNodeT : public BranchControlNode {
- static_assert(IsBranchControlNode(opcode_of<Derived>));
- static constexpr size_t kInputCount = InputCount;
-
- public:
- // Shadowing for static knowledge.
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
- constexpr bool has_inputs() const { return input_count() > 0; }
- constexpr uint16_t input_count() const { return kInputCount; }
- auto end() {
- return std::make_reverse_iterator(&this->input(input_count() - 1));
- }
+class BranchControlNodeT
+ : public FixedInputNodeTMixin<InputCount, BranchControlNode, Derived> {
+ static_assert(IsBranchControlNode(NodeBase::opcode_of<Derived>));
protected:
explicit BranchControlNodeT(uint64_t bitfield, BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : BranchControlNode(bitfield, if_true_refs, if_false_refs) {
- DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
- DCHECK_EQ(NodeBase::input_count(), kInputCount);
- }
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : FixedInputNodeTMixin<InputCount, BranchControlNode, Derived>(
+ bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction) {}
};
class Jump : public UnconditionalControlNodeT<Jump> {
@@ -4490,7 +7011,7 @@ class Jump : public UnconditionalControlNodeT<Jump> {
Jump(uint64_t bitfield, BasicBlockRef* target_refs)
: Base(bitfield, target_refs) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -4505,7 +7026,7 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
explicit JumpLoop(uint64_t bitfield, BasicBlockRef* ref)
: Base(bitfield, ref) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -4526,7 +7047,7 @@ class JumpToInlined : public UnconditionalControlNodeT<JumpToInlined> {
MaglevCompilationUnit* unit)
: Base(bitfield, target_refs), unit_(unit) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4543,21 +7064,26 @@ class JumpFromInlined : public UnconditionalControlNodeT<JumpFromInlined> {
explicit JumpFromInlined(uint64_t bitfield, BasicBlockRef* target_refs)
: Base(bitfield, target_refs) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-class Abort : public TerminalControlNode {
+class Abort : public TeminalControlNodeT<0, Abort> {
+ using Base = TeminalControlNodeT<0, Abort>;
+
public:
explicit Abort(uint64_t bitfield, AbortReason reason)
- : TerminalControlNode(bitfield), reason_(reason) {
+ : Base(bitfield), reason_(reason) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Abort>);
}
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
AbortReason reason() const { return reason_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ int MaxCallStackArgs() const;
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4565,23 +7091,30 @@ class Abort : public TerminalControlNode {
const AbortReason reason_;
};
-class Return : public TerminalControlNode {
+class Return : public TeminalControlNodeT<1, Return> {
+ using Base = TeminalControlNodeT<1, Return>;
+
public:
- explicit Return(uint64_t bitfield) : TerminalControlNode(bitfield) {
+ explicit Return(uint64_t bitfield) : Base(bitfield) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>);
}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
Input& value_input() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
-class Deopt : public TerminalControlNode {
+class Deopt : public TeminalControlNodeT<0, Deopt> {
+ using Base = TeminalControlNodeT<0, Deopt>;
+
public:
explicit Deopt(uint64_t bitfield, DeoptimizeReason reason)
- : TerminalControlNode(bitfield), reason_(reason) {
+ : Base(bitfield), reason_(reason) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Deopt>);
}
@@ -4589,7 +7122,7 @@ class Deopt : public TerminalControlNode {
DeoptimizeReason reason() const { return reason_; }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4597,11 +7130,13 @@ class Deopt : public TerminalControlNode {
DeoptimizeReason reason_;
};
-class Switch : public ConditionalControlNode {
+class Switch : public FixedInputNodeTMixin<1, ConditionalControlNode, Switch> {
+ using Base = FixedInputNodeTMixin<1, ConditionalControlNode, Switch>;
+
public:
explicit Switch(uint64_t bitfield, int value_base, BasicBlockRef* targets,
int size)
- : ConditionalControlNode(bitfield),
+ : Base(bitfield),
value_base_(value_base),
targets_(targets),
size_(size),
@@ -4609,12 +7144,15 @@ class Switch : public ConditionalControlNode {
explicit Switch(uint64_t bitfield, int value_base, BasicBlockRef* targets,
int size, BasicBlockRef* fallthrough)
- : ConditionalControlNode(bitfield),
+ : Base(bitfield),
value_base_(value_base),
targets_(targets),
size_(size),
fallthrough_(fallthrough) {}
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32};
+
int value_base() const { return value_base_; }
const BasicBlockRef* targets() const { return targets_; }
int size() const { return size_; }
@@ -4627,7 +7165,7 @@ class Switch : public ConditionalControlNode {
Input& value() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
@@ -4644,14 +7182,24 @@ class BranchIfRootConstant
public:
explicit BranchIfRootConstant(uint64_t bitfield, BasicBlockRef* if_true_refs,
+ int true_interrupt_correction,
BasicBlockRef* if_false_refs,
+ int false_interrupt_correction,
RootIndex root_index)
- : Base(bitfield, if_true_refs, if_false_refs), root_index_(root_index) {}
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction),
+ root_index_(root_index) {}
+
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
RootIndex root_index() { return root_index_; }
Input& condition_input() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress values to reference compare.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4666,12 +7214,21 @@ class BranchIfUndefinedOrNull
public:
explicit BranchIfUndefinedOrNull(uint64_t bitfield,
BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : Base(bitfield, if_true_refs, if_false_refs) {}
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction) {}
+
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& condition_input() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress values to reference compare.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -4681,12 +7238,18 @@ class BranchIfJSReceiver : public BranchControlNodeT<1, BranchIfJSReceiver> {
public:
explicit BranchIfJSReceiver(uint64_t bitfield, BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : Base(bitfield, if_true_refs, if_false_refs) {}
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction) {}
+
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& condition_input() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -4697,14 +7260,18 @@ class BranchIfToBooleanTrue
public:
explicit BranchIfToBooleanTrue(uint64_t bitfield, BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : Base(bitfield, if_true_refs, if_false_refs) {}
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction) {}
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
Input& condition_input() { return input(0); }
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
@@ -4721,10 +7288,17 @@ class BranchIfInt32Compare
explicit BranchIfInt32Compare(uint64_t bitfield, Operation operation,
BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction),
+ operation_(operation) {}
+
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kInt32, ValueRepresentation::kInt32};
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4742,17 +7316,28 @@ class BranchIfFloat64Compare
Input& left_input() { return NodeBase::input(kLeftIndex); }
Input& right_input() { return NodeBase::input(kRightIndex); }
- explicit BranchIfFloat64Compare(uint64_t bitfield, Operation operation,
- BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
+ enum class JumpModeIfNaN { kJumpToFalse, kJumpToTrue };
+
+ explicit BranchIfFloat64Compare(
+ uint64_t bitfield, Operation operation, BasicBlockRef* if_true_refs,
+ int true_interrupt_correction, BasicBlockRef* if_false_refs,
+ int false_interrupt_correction,
+ JumpModeIfNaN jump_mode_if_nan = JumpModeIfNaN::kJumpToFalse)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction),
+ operation_(operation),
+ jump_mode_if_nan_(jump_mode_if_nan) {}
+
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kFloat64, ValueRepresentation::kFloat64};
- void AllocateVreg(MaglevVregAllocationState*);
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
Operation operation_;
+ JumpModeIfNaN jump_mode_if_nan_;
};
class BranchIfReferenceCompare
@@ -4767,10 +7352,20 @@ class BranchIfReferenceCompare
explicit BranchIfReferenceCompare(uint64_t bitfield, Operation operation,
BasicBlockRef* if_true_refs,
- BasicBlockRef* if_false_refs)
- : Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction),
+ operation_(operation) {}
- void AllocateVreg(MaglevVregAllocationState*);
+ static constexpr typename Base::InputTypes kInputTypes{
+ ValueRepresentation::kTagged, ValueRepresentation::kTagged};
+
+ void MarkTaggedInputsAsDecompressing() {
+ // Don't need to decompress values to reference compare.
+ }
+ void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
@@ -4778,6 +7373,79 @@ class BranchIfReferenceCompare
Operation operation_;
};
+class BranchIfTypeOf : public BranchControlNodeT<1, BranchIfTypeOf> {
+ using Base = BranchControlNodeT<1, BranchIfTypeOf>;
+
+ public:
+ static constexpr int kValueIndex = 0;
+ Input& value_input() { return NodeBase::input(kValueIndex); }
+
+ explicit BranchIfTypeOf(uint64_t bitfield,
+ interpreter::TestTypeOfFlags::LiteralFlag literal,
+ BasicBlockRef* if_true_refs,
+ int true_interrupt_correction,
+ BasicBlockRef* if_false_refs,
+ int false_interrupt_correction)
+ : Base(bitfield, if_true_refs, true_interrupt_correction, if_false_refs,
+ false_interrupt_correction),
+ literal_(literal) {}
+
+ static constexpr
+ typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged};
+
+ void SetValueLocationConstraints();
+ void GenerateCode(MaglevAssembler*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ interpreter::TestTypeOfFlags::LiteralFlag literal_;
+};
+
+constexpr inline OpProperties StaticPropertiesForOpcode(Opcode opcode) {
+ switch (opcode) {
+#define CASE(op) \
+ case Opcode::k##op: \
+ return op::kProperties;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+}
+
+template <typename Function>
+inline void NodeBase::ForAllInputsInRegallocAssignmentOrder(Function&& f) {
+ auto iterate_inputs = [&](InputAllocationPolicy category) {
+ for (Input& input : *this) {
+ switch (compiler::UnallocatedOperand::cast(input.operand())
+ .extended_policy()) {
+ case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
+ if (category == InputAllocationPolicy::kArbitraryRegister)
+ f(category, &input);
+ break;
+
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ if (category == InputAllocationPolicy::kAny) f(category, &input);
+ break;
+
+ case compiler::UnallocatedOperand::FIXED_REGISTER:
+ case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
+ if (category == InputAllocationPolicy::kFixedRegister)
+ f(category, &input);
+ break;
+
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
+ case compiler::UnallocatedOperand::SAME_AS_INPUT:
+ case compiler::UnallocatedOperand::NONE:
+ case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
+ UNREACHABLE();
+ }
+ }
+ };
+
+ iterate_inputs(InputAllocationPolicy::kFixedRegister);
+ iterate_inputs(InputAllocationPolicy::kArbitraryRegister);
+ iterate_inputs(InputAllocationPolicy::kAny);
+}
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-phi-representation-selector.cc b/deps/v8/src/maglev/maglev-phi-representation-selector.cc
new file mode 100644
index 0000000000..67966ed7bf
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-phi-representation-selector.cc
@@ -0,0 +1,589 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-phi-representation-selector.h"
+
+#include "src/handles/handles-inl.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-ir-inl.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+namespace {
+
+constexpr int representation_mask_for(ValueRepresentation r) {
+ if (r == ValueRepresentation::kTagged) return 0;
+ return 1 << static_cast<int>(r);
+}
+
+} // namespace
+
+void MaglevPhiRepresentationSelector::Process(Phi* node,
+ const ProcessingState&) {
+ DCHECK_EQ(node->value_representation(), ValueRepresentation::kTagged);
+
+ // We negate the Uint32 from {representation_mask} from the start in order to
+ // disable Uint32 untagged Phis for now, because they don't seem very useful,
+ // and this simplifies things a little bit.
+ // TODO(dmercadier): evaluate what benefits untagged Uint32 Phis would bring,
+ // and consider allowing them.
+ unsigned int representation_mask =
+ ~representation_mask_for(ValueRepresentation::kUint32);
+
+ for (int i = 0; i < node->input_count(); i++) {
+ Node* input = node->input(i).node();
+ if (input->Is<SmiConstant>()) {
+ // Could be any representation
+ } else if (Constant* constant = input->TryCast<Constant>()) {
+ if (constant->object().IsHeapNumber()) {
+ representation_mask &=
+ representation_mask_for(ValueRepresentation::kFloat64);
+ } else {
+ // Not a Constant that we can untag.
+ representation_mask = 0;
+ break;
+ }
+ } else if (input->properties().is_conversion()) {
+ DCHECK_EQ(input->input_count(), 1);
+ representation_mask &= representation_mask_for(
+ input->input(0).node()->properties().value_representation());
+ } else {
+ representation_mask = 0;
+ break;
+ }
+ }
+
+ if (base::bits::CountPopulation(representation_mask) == 1) {
+ ConvertTaggedPhiTo(
+ node, static_cast<ValueRepresentation>(
+ base::bits::CountTrailingZeros(representation_mask)));
+ } else {
+ // No possible representations, or more than 1 possible representation.
+ // We'll typically end up in this case if:
+ // - All of the inputs of the Phi are SmiConstant (which can have any
+ // representation)
+ // - Some inputs are Int32 tagging and other inputs are Float64 tagging.
+ // If we had information about uses of the node, then we could chose a
+ // representation over the other (eg, Float64 rather than Int32), but
+ // since we don't, we just bail out to avoid choosing the wrong
+ // representation.
+ EnsurePhiInputsTagged(node);
+ }
+}
+
+void MaglevPhiRepresentationSelector::EnsurePhiInputsTagged(Phi* phi) {
+ // Since we are untagging some Phis, it's possible that one of the inputs of
+ // {phi} is an untagged Phi. However, if this function is called, then we've
+ // decided that {phi} is going to stay tagged, and thus, all of its inputs
+ // should be tagged. We'll thus insert tagging operation on the untagged phi
+ // inputs of {phi}.
+
+ for (int i = 0; i < phi->input_count(); i++) {
+ ValueNode* input = phi->input(i).node();
+ if (Phi* phi_input = input->TryCast<Phi>()) {
+ phi->set_input(i, EnsurePhiTagged(phi_input, phi->predecessor_at(i),
+ NewNodePosition::kEnd));
+ } else {
+ // Inputs of Phis that aren't Phi should always be tagged (except for the
+ // phis untagged by this class, but {phi} isn't one of them).
+ DCHECK(input->is_tagged());
+ }
+ }
+}
+
+void MaglevPhiRepresentationSelector::ConvertTaggedPhiTo(
+ Phi* phi, ValueRepresentation repr) {
+ phi->change_representation(repr);
+
+ for (int i = 0; i < phi->input_count(); i++) {
+ ValueNode* input = phi->input(i).node();
+ if (input->Is<SmiConstant>()) {
+ switch (repr) {
+ case ValueRepresentation::kInt32:
+ phi->change_input(i,
+ builder_->GetInt32Constant(
+ input->Cast<SmiConstant>()->value().value()));
+ break;
+ case ValueRepresentation::kFloat64:
+ phi->change_input(i,
+ builder_->GetFloat64Constant(
+ input->Cast<SmiConstant>()->value().value()));
+ break;
+ case ValueRepresentation::kUint32:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ } else if (Constant* constant = input->TryCast<Constant>()) {
+ DCHECK(constant->object().IsHeapNumber());
+ DCHECK_EQ(repr, ValueRepresentation::kFloat64);
+ phi->change_input(i, builder_->GetFloat64Constant(
+ constant->object().AsHeapNumber().value()));
+ } else if (input->properties().is_conversion()) {
+ // Unwrapping the conversion.
+ DCHECK_EQ(input->value_representation(), ValueRepresentation::kTagged);
+ DCHECK_EQ(input->input(0).node()->value_representation(), repr);
+ phi->set_input(i, input->input(0).node());
+ } else {
+ DCHECK_EQ(input->value_representation(), repr);
+ }
+ }
+}
+
+bool MaglevPhiRepresentationSelector::IsUntagging(Opcode op) {
+ switch (op) {
+ case Opcode::kCheckedSmiUntag:
+ case Opcode::kUnsafeSmiUntag:
+ case Opcode::kCheckedObjectToIndex:
+ case Opcode::kCheckedTruncateNumberOrOddballToInt32:
+ case Opcode::kTruncateNumberOrOddballToInt32:
+ case Opcode::kCheckedNumberOrOddballToFloat64:
+ case Opcode::kUncheckedNumberOrOddballToFloat64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+namespace {
+
+Opcode GetOpcodeForCheckedConversion(ValueRepresentation from,
+ ValueRepresentation to, bool truncating) {
+ DCHECK_NE(from, ValueRepresentation::kTagged);
+ DCHECK_NE(to, ValueRepresentation::kTagged);
+
+ switch (from) {
+ case ValueRepresentation::kInt32:
+ switch (to) {
+ case ValueRepresentation::kUint32:
+ return Opcode::kCheckedInt32ToUint32;
+ case ValueRepresentation::kFloat64:
+ return Opcode::kChangeInt32ToFloat64;
+
+ case ValueRepresentation::kInt32:
+ case ValueRepresentation::kTagged:
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+ case ValueRepresentation::kUint32:
+ switch (to) {
+ case ValueRepresentation::kInt32:
+ return Opcode::kCheckedUint32ToInt32;
+
+ case ValueRepresentation::kFloat64:
+ return Opcode::kChangeUint32ToFloat64;
+
+ case ValueRepresentation::kUint32:
+ case ValueRepresentation::kTagged:
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+ case ValueRepresentation::kFloat64:
+ switch (to) {
+ case ValueRepresentation::kInt32:
+ if (truncating) {
+ return Opcode::kTruncateFloat64ToInt32;
+ }
+ return Opcode::kCheckedTruncateFloat64ToInt32;
+ case ValueRepresentation::kUint32:
+ // The graph builder never inserts Tagged->Uint32 conversions, so we
+ // don't have to handle this case.
+ UNREACHABLE();
+
+ case ValueRepresentation::kFloat64:
+ case ValueRepresentation::kTagged:
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+ case ValueRepresentation::kTagged:
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+void MaglevPhiRepresentationSelector::UpdateUntaggingOfPhi(
+ ValueNode* old_untagging) {
+ DCHECK_EQ(old_untagging->input_count(), 1);
+ DCHECK(old_untagging->input(0).node()->Is<Phi>());
+
+ ValueRepresentation from_repr =
+ old_untagging->input(0).node()->value_representation();
+ ValueRepresentation to_repr = old_untagging->value_representation();
+
+ // Since initially Phis are tagged, it would make not sense for
+ // {old_conversion} to convert a Phi to a Tagged value.
+ DCHECK_NE(to_repr, ValueRepresentation::kTagged);
+ // The graph builder never inserts Tagged->Uint32 conversions (and thus, we
+ // don't handle them in GetOpcodeForCheckedConversion).
+ DCHECK_NE(to_repr, ValueRepresentation::kUint32);
+
+ if (from_repr == ValueRepresentation::kTagged) {
+ // The Phi hasn't been untagged, so we leave the conversion as it is.
+ return;
+ }
+
+ if (from_repr == to_repr) {
+ old_untagging->OverwriteWith<Identity>();
+ return;
+ }
+
+ if (old_untagging->Is<UnsafeSmiUntag>()) {
+ // UnsafeSmiTag are only inserted when the node is a known Smi. If the
+ // current phi has a Float64/Uint32 representation, then we can safely
+ // truncate it to Int32, because we know that the Float64/Uint32 fits in a
+ // Smi, and therefore in a Int32.
+ if (from_repr == ValueRepresentation::kFloat64) {
+ old_untagging->OverwriteWith<UnsafeTruncateFloat64ToInt32>();
+ } else if (from_repr == ValueRepresentation::kUint32) {
+ old_untagging->OverwriteWith<UnsafeTruncateUint32ToInt32>();
+ } else {
+ DCHECK_EQ(from_repr, ValueRepresentation::kInt32);
+ old_untagging->OverwriteWith<Identity>();
+ }
+ return;
+ }
+
+ // The graph builder inserts 3 kind of Tagged->Int32 conversions that can have
+ // heap number as input: CheckedTruncateNumberToInt32, which truncates its
+ // input (and deopts if it's not a HeapNumber), TruncateNumberToInt32, which
+ // truncates its input (assuming that it's indeed a HeapNumber) and
+ // CheckedSmiTag, which deopts on non-smi inputs. The first 2 cannot deopt if
+ // we have Float64 phi and will happily truncate it, but the 3rd one should
+ // deopt if it cannot be converted without loss of precision.
+ bool conversion_is_truncating_float64 =
+ old_untagging->Is<CheckedTruncateNumberOrOddballToInt32>() ||
+ old_untagging->Is<TruncateNumberOrOddballToInt32>();
+
+ Opcode needed_conversion = GetOpcodeForCheckedConversion(
+ from_repr, to_repr, conversion_is_truncating_float64);
+
+ if (needed_conversion != old_untagging->opcode()) {
+ old_untagging->OverwriteWith(needed_conversion);
+ }
+}
+
+// If the input of a StoreTaggedFieldNoWriteBarrier was a Phi that got
+// untagged, then we need to retag it, and we might need to actually use a write
+// barrier.
+void MaglevPhiRepresentationSelector::UpdateNodePhiInput(
+ StoreTaggedFieldNoWriteBarrier* node, Phi* phi, int input_index,
+ const ProcessingState& state) {
+ if (input_index == StoreTaggedFieldNoWriteBarrier::kObjectIndex) {
+ // The 1st input of a Store should usually not be untagged. However, it is
+ // possible to write `let x = a ? 4 : 2; x.c = 10`, which will produce a
+ // store whose receiver could be an untagged Phi. So, for such cases, we use
+ // the generic UpdateNodePhiInput method to tag `phi` if needed.
+ UpdateNodePhiInput(static_cast<NodeBase*>(node), phi, input_index, state);
+ return;
+ }
+ DCHECK_EQ(input_index, StoreTaggedFieldNoWriteBarrier::kValueIndex);
+
+ if (phi->value_representation() != ValueRepresentation::kTagged) {
+ // We need to tag {phi}. However, this could turn it into a HeapObject
+ // rather than a Smi (either because {phi} is a Float64 phi, or because it's
+ // a Int32/Uint32 phi that doesn't fit on 31 bits), so we need the write
+ // barrier.
+ node->change_input(input_index, EnsurePhiTagged(phi, current_block_,
+ NewNodePosition::kStart));
+ static_assert(StoreTaggedFieldNoWriteBarrier::kObjectIndex ==
+ StoreTaggedFieldWithWriteBarrier::kObjectIndex);
+ static_assert(StoreTaggedFieldNoWriteBarrier::kValueIndex ==
+ StoreTaggedFieldWithWriteBarrier::kValueIndex);
+ node->OverwriteWith<StoreTaggedFieldWithWriteBarrier>();
+ }
+}
+
+// CheckedStoreSmiField is a bit of a special node, because it expects its input
+// to be a Smi, and not just any Object. The comments in SmiTagPhi explain what
+// this means for untagged Phis.
+void MaglevPhiRepresentationSelector::UpdateNodePhiInput(
+ CheckedStoreSmiField* node, Phi* phi, int input_index,
+ const ProcessingState& state) {
+ if (input_index == CheckedStoreSmiField::kValueIndex) {
+ node->change_input(input_index, SmiTagPhi(phi, node, state));
+ } else {
+ DCHECK_EQ(input_index, CheckedStoreSmiField::kObjectIndex);
+ // The 1st input of a Store should usually not be untagged. However, it is
+ // possible to write `let x = a ? 4 : 2; x.c = 10`, which will produce a
+ // store whose receiver could be an untagged Phi. So, for such cases, we use
+ // the generic UpdateNodePhiInput method to tag `phi` if needed.
+ UpdateNodePhiInput(static_cast<NodeBase*>(node), phi, input_index, state);
+ return;
+ }
+}
+
+// If the input of a StoreFixedArrayElementNoWriteBarrier was a Phi that got
+// untagged, then we need to retag it, and we might need to actually use a write
+// barrier.
+void MaglevPhiRepresentationSelector::UpdateNodePhiInput(
+ StoreFixedArrayElementNoWriteBarrier* node, Phi* phi, int input_index,
+ const ProcessingState& state) {
+ if (input_index != StoreFixedArrayElementNoWriteBarrier::kValueIndex) {
+ UpdateNodePhiInput(static_cast<NodeBase*>(node), phi, input_index, state);
+ return;
+ }
+
+ if (phi->value_representation() != ValueRepresentation::kTagged) {
+ // We need to tag {phi}. However, this could turn it into a HeapObject
+ // rather than a Smi (either because {phi} is a Float64 phi, or because it's
+ // a Int32/Uint32 phi that doesn't fit on 31 bits), so we need the write
+ // barrier.
+ node->change_input(input_index, EnsurePhiTagged(phi, current_block_,
+ NewNodePosition::kStart));
+ static_assert(StoreFixedArrayElementNoWriteBarrier::kElementsIndex ==
+ StoreFixedArrayElementWithWriteBarrier::kElementsIndex);
+ static_assert(StoreFixedArrayElementNoWriteBarrier::kIndexIndex ==
+ StoreFixedArrayElementWithWriteBarrier::kIndexIndex);
+ static_assert(StoreFixedArrayElementNoWriteBarrier::kValueIndex ==
+ StoreFixedArrayElementWithWriteBarrier::kValueIndex);
+ node->OverwriteWith<StoreFixedArrayElementWithWriteBarrier>();
+ }
+}
+
+// CheckedStoreFixedArraySmiElement is a bit of a special node, because it
+// expects its input to be a Smi, and not just any Object. The comments in
+// SmiTagPhi explain what this means for untagged Phis.
+void MaglevPhiRepresentationSelector::UpdateNodePhiInput(
+ CheckedStoreFixedArraySmiElement* node, Phi* phi, int input_index,
+ const ProcessingState& state) {
+ if (input_index == CheckedStoreFixedArraySmiElement::kValueIndex) {
+ node->change_input(input_index, SmiTagPhi(phi, node, state));
+ } else {
+ UpdateNodePhiInput(static_cast<NodeBase*>(node), phi, input_index, state);
+ }
+}
+
+// {node} was using {phi} without any untagging, which means that it was using
+// {phi} as a tagged value, so, if we've untagged {phi}, we need to re-tag it
+// for {node}.
+void MaglevPhiRepresentationSelector::UpdateNodePhiInput(
+ NodeBase* node, Phi* phi, int input_index, const ProcessingState&) {
+ if (node->properties().is_conversion()) {
+ // {node} can't be an Untagging if we reached this point (because
+ // UpdateNodePhiInput is not called on untagging nodes).
+ DCHECK(!IsUntagging(node->opcode()));
+ // So, {node} has to be a conversion that takes an input an untagged node,
+ // and this input happens to be {phi}, which means that {node} is aware that
+ // {phi} isn't tagged. This means that {node} was inserted during the
+ // current phase. In this case, we don't do anything.
+ DCHECK_NE(phi->value_representation(), ValueRepresentation::kTagged);
+ DCHECK_NE(new_nodes_.find(node), new_nodes_.end());
+ } else {
+ node->change_input(input_index, EnsurePhiTagged(phi, current_block_,
+ NewNodePosition::kStart));
+ }
+}
+
+template <class ToNodeT, class FromNodeT>
+ValueNode* MaglevPhiRepresentationSelector::SmiTagPhi(
+ Phi* phi, FromNodeT* user_node, const ProcessingState& state) {
+ // The input graph was something like:
+ //
+ // Tagged Phi
+ // │
+ // │
+ // ▼
+ // FromNodeT
+ //
+ // If the phi has been untagged, we have to retag it to a Smi, after which we
+ // can omit the "CheckedSmi" part of the FromNodeT, which we do by
+ // replacing the FromNodeT by a ToNodeT:
+ //
+ // Untagged Phi
+ // │
+ // │
+ // ▼
+ // CheckedSmiTagFloat64/CheckedSmiTagInt32
+ // │
+ // │
+ // ▼
+ // ToNodeT
+
+ ValueNode* tagged;
+ switch (phi->value_representation()) {
+#define TAG_INPUT(tagging_op) \
+ tagged = NodeBase::New<tagging_op>(builder_->zone(), {phi}); \
+ break;
+ case ValueRepresentation::kFloat64:
+ TAG_INPUT(CheckedSmiTagFloat64)
+ case ValueRepresentation::kInt32:
+ TAG_INPUT(CheckedSmiTagInt32)
+ case ValueRepresentation::kUint32:
+ TAG_INPUT(CheckedSmiTagUint32)
+ case ValueRepresentation::kTagged:
+ return phi;
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+#undef TAG_INPUT
+ }
+
+ tagged->CopyEagerDeoptInfoOf(user_node, builder_->zone());
+
+ state.node_it()->InsertBefore(tagged);
+ if (builder_->has_graph_labeller()) {
+ builder_->graph_labeller()->RegisterNode(tagged);
+ }
+#ifdef DEBUG
+ new_nodes_.insert(tagged);
+#endif
+ user_node->template OverwriteWith<ToNodeT>();
+ return tagged;
+}
+
+ValueNode* MaglevPhiRepresentationSelector::SmiTagPhi(
+ Phi* phi, CheckedStoreSmiField* user_node, const ProcessingState& state) {
+ // Since we're planning on replacing CheckedStoreSmiField by a
+ // StoreTaggedFieldNoWriteBarrier, it's important to ensure that they have the
+ // same layout. OverwriteWith will check the sizes and properties of the
+ // operators, but isn't aware of which inputs are at which index, so we
+ // static_assert that both operators have the same inputs at the same index.
+ static_assert(StoreTaggedFieldNoWriteBarrier::kObjectIndex ==
+ CheckedStoreSmiField::kObjectIndex);
+ static_assert(StoreTaggedFieldNoWriteBarrier::kValueIndex ==
+ CheckedStoreSmiField::kValueIndex);
+ return SmiTagPhi<StoreTaggedFieldNoWriteBarrier>(phi, user_node, state);
+}
+
+ValueNode* MaglevPhiRepresentationSelector::SmiTagPhi(
+ Phi* phi, CheckedStoreFixedArraySmiElement* user_node,
+ const ProcessingState& state) {
+ // Since we're planning on replacing CheckedStoreFixedArraySmiElement by a
+ // StoreFixedArrayElementNoWriteBarrier, it's important to ensure that they
+ // have the same layout. OverwriteWith will check the sizes and properties of
+ // the operators, but isn't aware of which inputs are at which index, so we
+ // static_assert that both operators have the same inputs at the same index.
+ static_assert(StoreFixedArrayElementNoWriteBarrier::kElementsIndex ==
+ CheckedStoreFixedArraySmiElement::kElementsIndex);
+ static_assert(StoreFixedArrayElementNoWriteBarrier::kIndexIndex ==
+ CheckedStoreFixedArraySmiElement::kIndexIndex);
+ static_assert(StoreFixedArrayElementNoWriteBarrier::kValueIndex ==
+ CheckedStoreFixedArraySmiElement::kValueIndex);
+ return SmiTagPhi<StoreFixedArrayElementNoWriteBarrier>(phi, user_node, state);
+}
+
+ValueNode* MaglevPhiRepresentationSelector::EnsurePhiTagged(
+ Phi* phi, BasicBlock* block, NewNodePosition pos) {
+ switch (phi->value_representation()) {
+ case ValueRepresentation::kFloat64:
+ return AddNode(NodeBase::New<Float64ToTagged>(builder_->zone(), {phi}),
+ block, pos);
+ case ValueRepresentation::kInt32:
+ return AddNode(NodeBase::New<Int32ToNumber>(builder_->zone(), {phi}),
+ block, pos);
+ case ValueRepresentation::kUint32:
+ return AddNode(NodeBase::New<Uint32ToNumber>(builder_->zone(), {phi}),
+ block, pos);
+ case ValueRepresentation::kTagged:
+ return phi;
+ case ValueRepresentation::kWord64:
+ UNREACHABLE();
+ }
+}
+
+void MaglevPhiRepresentationSelector::FixLoopPhisBackedge(BasicBlock* block) {
+ // TODO(dmercadier): it would be interesting to compute a fix point for loop
+ // phis, or at least to go over the loop header twice.
+ if (!block->has_phi()) return;
+ for (Phi* phi : *block->phis()) {
+ int last_input_idx = phi->input_count() - 1;
+ ValueNode* backedge = phi->input(last_input_idx).node();
+ if (phi->value_representation() == ValueRepresentation::kTagged) {
+ // If the backedge is a Phi that was untagged, but {phi} is tagged, then
+ // we need to retag the backedge.
+
+ // Identity nodes are used to replace outdated untagging nodes after a phi
+ // has been untagged. Here, since the backedge was initially tagged, it
+ // couldn't have been such an untagging node, so it shouldn't be an
+ // Identity node now.
+ DCHECK(!backedge->Is<Identity>());
+
+ if (backedge->value_representation() != ValueRepresentation::kTagged) {
+ // Since all Phi inputs are initially tagged, the fact that the backedge
+ // is not tagged means that it's a Phi that we recently untagged.
+ DCHECK(backedge->Is<Phi>());
+ phi->set_input(last_input_idx,
+ EnsurePhiTagged(backedge->Cast<Phi>(), current_block_,
+ NewNodePosition::kEnd));
+ }
+ } else {
+ // If {phi} was untagged and its backedge became Identity, then we need to
+ // unwrap it.
+ DCHECK_NE(phi->value_representation(), ValueRepresentation::kTagged);
+ if (backedge->Is<Identity>()) {
+ DCHECK_EQ(backedge->input(0).node()->value_representation(),
+ phi->value_representation());
+ phi->set_input(last_input_idx, backedge->input(0).node());
+ }
+ }
+ }
+}
+
+template <typename DeoptInfoT>
+void MaglevPhiRepresentationSelector::BypassIdentities(
+ const DeoptInfoT* deopt_info) {
+ detail::DeepForEachInput(deopt_info,
+ [&](ValueNode*& node, InputLocation* input) {
+ if (node->Is<Identity>()) {
+ node = node->input(0).node();
+ }
+ });
+}
+template void MaglevPhiRepresentationSelector::BypassIdentities<EagerDeoptInfo>(
+ const EagerDeoptInfo*);
+template void MaglevPhiRepresentationSelector::BypassIdentities<LazyDeoptInfo>(
+ const LazyDeoptInfo*);
+
+ValueNode* MaglevPhiRepresentationSelector::AddNode(ValueNode* node,
+ BasicBlock* block,
+ NewNodePosition pos) {
+ if (block == current_block_) {
+ // When adding an Node in the current block, we delay until we've finished
+ // processing the current block, to avoid mutating the list of nodes while
+ // we're iterating it.
+ if (pos == NewNodePosition::kStart) {
+ new_nodes_current_block_start_.push_back(node);
+ } else {
+ new_nodes_current_block_end_.push_back(node);
+ }
+ } else {
+ // However, when adding a node in a predecessor, the node won't be used
+ // until the current block, and it might be using nodes computed in the
+ // predecessor, so we add it at the end of the predecessor.
+ DCHECK_EQ(pos, NewNodePosition::kEnd);
+ block->nodes().Add(node);
+ }
+ if (builder_->has_graph_labeller()) {
+ builder_->graph_labeller()->RegisterNode(node);
+ }
+#ifdef DEBUG
+ new_nodes_.insert(node);
+#endif
+ return node;
+}
+
+void MaglevPhiRepresentationSelector::MergeNewNodesInBlock(BasicBlock* block) {
+ if (block != nullptr && !new_nodes_current_block_start_.empty()) {
+ for (Node* node : new_nodes_current_block_start_) {
+ block->nodes().AddFront(node);
+ }
+ }
+ new_nodes_current_block_start_.clear();
+
+ if (block != nullptr && !new_nodes_current_block_end_.empty()) {
+ for (Node* node : new_nodes_current_block_end_) {
+ block->nodes().Add(node);
+ }
+ }
+ new_nodes_current_block_end_.clear();
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-phi-representation-selector.h b/deps/v8/src/maglev/maglev-phi-representation-selector.h
new file mode 100644
index 0000000000..52ef9a14d3
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-phi-representation-selector.h
@@ -0,0 +1,179 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_PHI_REPRESENTATION_SELECTOR_H_
+#define V8_MAGLEV_MAGLEV_PHI_REPRESENTATION_SELECTOR_H_
+
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-graph-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class Graph;
+
+class MaglevPhiRepresentationSelector {
+ public:
+ explicit MaglevPhiRepresentationSelector(MaglevGraphBuilder* builder)
+ : builder_(builder),
+ new_nodes_current_block_start_(builder->zone()),
+ new_nodes_current_block_end_(builder->zone()) {}
+
+ void PreProcessGraph(Graph* graph) {}
+ void PostProcessGraph(Graph* graph) { MergeNewNodesInBlock(current_block_); }
+ void PreProcessBasicBlock(BasicBlock* block) {
+ MergeNewNodesInBlock(current_block_);
+ current_block_ = block;
+ }
+
+ void Process(Phi* node, const ProcessingState&);
+
+ void Process(JumpLoop* node, const ProcessingState&) {
+ FixLoopPhisBackedge(node->target());
+ }
+
+ template <class NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ UpdateNodeInputs(node, state);
+ }
+
+ private:
+ // Update the inputs of {phi} so that they all have {repr} representation, and
+ // updates {phi}'s representation to {repr}.
+ void ConvertTaggedPhiTo(Phi* phi, ValueRepresentation repr);
+
+ // Since this pass changes the representation of Phis, it makes some untagging
+ // operations outdated: if we've decided that a Phi should have Int32
+ // representation, then we don't need to do a kCheckedSmiUntag before using
+ // it. UpdateNodeInputs(n) removes such untagging from {n}'s input (and insert
+ // new conversions if needed, from Int32 to Float64 for instance).
+ template <class NodeT>
+ void UpdateNodeInputs(NodeT* n, const ProcessingState& state) {
+ NodeBase* node = static_cast<NodeBase*>(n);
+
+ if (IsUntagging(n->opcode())) {
+ if (node->input(0).node()->Is<Phi>() &&
+ node->input(0).node()->value_representation() !=
+ ValueRepresentation::kTagged) {
+ DCHECK_EQ(node->input_count(), 1);
+ // This untagging conversion is outdated, since its input has been
+ // untagged. Depending on the conversion, it might need to be replaced
+ // by another untagged->untagged conversion, or it might need to be
+ // removed alltogether (or rather, replaced by an identity node).
+ UpdateUntaggingOfPhi(n->template Cast<ValueNode>());
+ return;
+ }
+ } else {
+ UpdateNonUntaggingNodeInputs(n, state);
+ }
+
+ // It's important to check the properties of {node} rather than the static
+ // properties of `NodeT`, because `UpdateUntaggingOfPhi` could have changed
+ // the opcode of {node}, potentially converting a deopting node into a
+ // non-deopting one.
+ if (node->properties().can_eager_deopt()) {
+ BypassIdentities(node->eager_deopt_info());
+ }
+ if (node->properties().can_lazy_deopt()) {
+ BypassIdentities(node->lazy_deopt_info());
+ }
+ }
+
+ template <class NodeT>
+ void UpdateNonUntaggingNodeInputs(NodeT* n, const ProcessingState& state) {
+ NodeBase* node = static_cast<NodeBase*>(n);
+
+ // It would be bad to re-tag the input of an untagging node, so this
+ // function should never be called on untagging nodes.
+ DCHECK(!IsUntagging(n->opcode()));
+
+ for (int i = 0; i < n->input_count(); i++) {
+ ValueNode* input = node->input(i).node();
+ if (input->Is<Identity>()) {
+ // Bypassing the identity
+ node->change_input(i, input->input(0).node());
+ } else if (Phi* phi = input->TryCast<Phi>()) {
+ // If the input is a Phi and it was used without any untagging, then
+ // we need to retag it (with some additional checks/changes for some
+ // nodes, cf the overload of UpdateNodePhiInput).
+ UpdateNodePhiInput(n, phi, i, state);
+ }
+ }
+ }
+
+ void UpdateNodePhiInput(StoreTaggedFieldNoWriteBarrier* node, Phi* phi,
+ int input_index, const ProcessingState& state);
+ void UpdateNodePhiInput(CheckedStoreSmiField* node, Phi* phi, int input_index,
+ const ProcessingState& state);
+ void UpdateNodePhiInput(StoreFixedArrayElementNoWriteBarrier* node, Phi* phi,
+ int input_index, const ProcessingState& state);
+ void UpdateNodePhiInput(CheckedStoreFixedArraySmiElement* node, Phi* phi,
+ int input_index, const ProcessingState& state);
+ void UpdateNodePhiInput(NodeBase* node, Phi* phi, int input_index,
+ const ProcessingState& state);
+
+ void EnsurePhiInputsTagged(Phi* phi);
+
+ // Returns true if {op} is an untagging node.
+ bool IsUntagging(Opcode op);
+
+ // Updates {old_untagging} to reflect that its Phi input has been untagged and
+ // that a different conversion is now needed.
+ void UpdateUntaggingOfPhi(ValueNode* old_untagging);
+
+ // NewNodePosition is used to represent where a new node should be inserted:
+ // at the start of a block (kStart), or at the end of a block (kEnd).
+ enum class NewNodePosition { kStart, kEnd };
+
+ // Tags {phi} as a Smi.
+ ValueNode* SmiTagPhi(Phi* phi, CheckedStoreSmiField* user_node,
+ const ProcessingState& state);
+ ValueNode* SmiTagPhi(Phi* phi, CheckedStoreFixedArraySmiElement* user_node,
+ const ProcessingState& state);
+ template <class ToNodeT, class FromNodeT>
+ ValueNode* SmiTagPhi(Phi*, FromNodeT* user_node,
+ const ProcessingState& state);
+
+ // Returns a tagged node that represents a tagged version of {phi}.
+ ValueNode* EnsurePhiTagged(Phi* phi, BasicBlock* block, NewNodePosition pos);
+
+ ValueNode* AddNode(ValueNode* node, BasicBlock* block, NewNodePosition pos);
+
+ // Merges the nodes from {new_nodes_current_block_start_} and
+ // {new_nodes_current_block_end_} into their destinations.
+ void MergeNewNodesInBlock(BasicBlock* block);
+
+ // If {block} is the start of a loop header, FixLoopPhisBackedge inserts the
+ // necessary tagging on the backedge of the loop Phis of the loop header.
+ // Additionally, if {block} contains untagged loop phis whose backedges have
+ // been updated to Identity, FixLoopPhisBackedge unwraps those Identity.
+ void FixLoopPhisBackedge(BasicBlock* block);
+
+ // Replaces Identity nodes by their inputs in {deopt_info}
+ template <typename DeoptInfoT>
+ void BypassIdentities(const DeoptInfoT* deopt_info);
+
+ MaglevGraphBuilder* builder_ = nullptr;
+ BasicBlock* current_block_ = nullptr;
+
+ // {new_nodes_current_block_start_}, {new_nodes_current_block_end_} and
+ // are used to store new nodes added by this pass, but to delay their
+ // insertion into their destination, in order to not mutate the linked list of
+ // nodes of the current block while also iterating it. Nodes in
+ // {new_nodes_current_block_start_} and {new_nodes_current_block_end_} will be
+ // inserted respectively at the begining and the end of the current block.
+ ZoneVector<Node*> new_nodes_current_block_start_;
+ ZoneVector<Node*> new_nodes_current_block_end_;
+
+#ifdef DEBUG
+ std::unordered_set<NodeBase*> new_nodes_;
+#endif
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_PHI_REPRESENTATION_SELECTOR_H_
diff --git a/deps/v8/src/maglev/maglev-regalloc.cc b/deps/v8/src/maglev/maglev-regalloc.cc
index 499993e209..3bb45ba267 100644
--- a/deps/v8/src/maglev/maglev-regalloc.cc
+++ b/deps/v8/src/maglev/maglev-regalloc.cc
@@ -5,6 +5,7 @@
#include "src/maglev/maglev-regalloc.h"
#include <sstream>
+#include <type_traits>
#include "src/base/bits.h"
#include "src/base/logging.h"
@@ -12,6 +13,7 @@
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/compiler/backend/instruction.h"
+#include "src/heap/parked-scope.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-compilation-unit.h"
@@ -23,6 +25,15 @@
#include "src/maglev/maglev-ir-inl.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
+#include "src/zone/zone-containers.h"
+
+#ifdef V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/register-arm64.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/register-x64.h"
+#else
+#error "Maglev does not supported this architecture."
+#endif
namespace v8 {
namespace internal {
@@ -132,6 +143,10 @@ bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
// Gap moves may already be inserted in the target, so skip over those.
return node->id() < target->FirstNonGapMoveId();
}
+
+ // Drop all values on resumable loop headers.
+ if (target->has_state() && target->state()->is_resumable_loop()) return false;
+
// TODO(verwaest): This should be true but isn't because we don't yet
// eliminate dead code.
// DCHECK_GT(node->next_use, source->id());
@@ -140,8 +155,16 @@ bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
return node->live_range().end >= target->first_id();
}
+// TODO(dmercadier): this function should never clear any registers, since dead
+// registers should always have been cleared:
+// - Nodes without uses have their output registers cleared right after their
+// allocation by `FreeRegistersUsedBy(node)`.
+// - Once the last use of a Node has been processed, its register is freed (by
+// UpdateUse, called from Assigned***Input, called by AssignInputs).
+// Thus, this function should DCHECK that all of the registers are live at
+// target, rather than clearing the ones that aren't.
template <typename RegisterT>
-void ClearDeadFallthroughRegisters(RegisterFrameState<RegisterT> registers,
+void ClearDeadFallthroughRegisters(RegisterFrameState<RegisterT>& registers,
ConditionalControlNode* control_node,
BasicBlock* target) {
RegListBase<RegisterT> list = registers.used();
@@ -155,6 +178,11 @@ void ClearDeadFallthroughRegisters(RegisterFrameState<RegisterT> registers,
}
}
}
+
+bool IsDeadNodeToSkip(Node* node) {
+ return node->Is<ValueNode>() && node->Cast<ValueNode>()->is_dead() &&
+ !node->properties().is_required_when_unused();
+}
} // namespace
StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
@@ -162,8 +190,18 @@ StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
: compilation_info_(compilation_info), graph_(graph) {
ComputePostDominatingHoles();
AllocateRegisters();
- graph_->set_tagged_stack_slots(tagged_.top);
- graph_->set_untagged_stack_slots(untagged_.top);
+ uint32_t tagged_stack_slots = tagged_.top;
+ uint32_t untagged_stack_slots = untagged_.top;
+#ifdef V8_TARGET_ARCH_ARM64
+ // Due to alignment constraints, we add one untagged slot if
+ // stack_slots + fixed_slot_count is odd.
+ static_assert(StandardFrameConstants::kFixedSlotCount % 2 == 1);
+ if ((tagged_stack_slots + untagged_stack_slots) % 2 == 0) {
+ untagged_stack_slots++;
+ }
+#endif // V8_TARGET_ARCH_ARM64
+ graph_->set_tagged_stack_slots(tagged_stack_slots);
+ graph_->set_untagged_stack_slots(untagged_stack_slots);
}
StraightForwardRegisterAllocator::~StraightForwardRegisterAllocator() = default;
@@ -232,7 +270,7 @@ void StraightForwardRegisterAllocator::ComputePostDominatingHoles() {
// the block. Such a list of jumps terminates in return or jumploop.
for (BasicBlock* block : base::Reversed(*graph_)) {
ControlNode* control = block->control_node();
- if (auto node = control->TryCast<Jump>()) {
+ if (auto node = control->TryCast<UnconditionalControlNode>()) {
// If the current control node is a jump, prepend it to the list of jumps
// at the target.
control->set_next_post_dominating_hole(
@@ -310,6 +348,10 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
constant->SetConstantLocation();
USE(value);
}
+ for (const auto& [address, constant] : graph_->external_references()) {
+ constant->SetConstantLocation();
+ USE(address);
+ }
for (block_it_ = graph_->begin(); block_it_ != graph_->end(); ++block_it_) {
BasicBlock* block = *block_it_;
@@ -387,7 +429,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
// TODO(leszeks): We should remove dead phis entirely and turn this
// into a DCHECK.
if (!phi->has_valid_live_range()) continue;
- phi->SetNoSpillOrHint();
+ phi->SetNoSpill();
TryAllocateToInput(phi);
}
if (block->is_exception_handler_block()) {
@@ -416,7 +458,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
DCHECK(phi->owner().is_receiver());
// The receiver is a special case for a fairly silly reason:
// OptimizedFrame::Summarize requires the receiver (and the function)
- // to be in a stack slot, since it's value must be available even
+ // to be in a stack slot, since its value must be available even
// though we're not deoptimizing (and thus register states are not
// available).
//
@@ -440,16 +482,31 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
// a DCHECK.
if (!phi->has_valid_live_range()) continue;
if (phi->result().operand().IsAllocated()) continue;
- // We assume that Phis are always untagged, and so are always allocated
- // in a general register.
- if (!general_registers_.UnblockedFreeIsEmpty()) {
- compiler::AllocatedOperand allocation =
- general_registers_.AllocateRegister(phi);
- phi->result().SetAllocated(allocation);
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->Process(phi, ProcessingState(block_it_));
- printing_visitor_->os()
- << "phi (new reg) " << phi->result().operand() << std::endl;
+ if (phi->value_representation() == ValueRepresentation::kFloat64) {
+ // We'll use a double register.
+ if (!double_registers_.UnblockedFreeIsEmpty()) {
+ compiler::AllocatedOperand allocation =
+ double_registers_.AllocateRegister(phi, phi->hint());
+ phi->result().SetAllocated(allocation);
+ SetLoopPhiRegisterHint(phi, allocation.GetDoubleRegister());
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->Process(phi, ProcessingState(block_it_));
+ printing_visitor_->os()
+ << "phi (new reg) " << phi->result().operand() << std::endl;
+ }
+ }
+ } else {
+ // We'll use a general purpose register for this Phi.
+ if (!general_registers_.UnblockedFreeIsEmpty()) {
+ compiler::AllocatedOperand allocation =
+ general_registers_.AllocateRegister(phi, phi->hint());
+ phi->result().SetAllocated(allocation);
+ SetLoopPhiRegisterHint(phi, allocation.GetRegister());
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->Process(phi, ProcessingState(block_it_));
+ printing_visitor_->os()
+ << "phi (new reg) " << phi->result().operand() << std::endl;
+ }
}
}
}
@@ -481,8 +538,35 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
VerifyRegisterState();
node_it_ = block->nodes().begin();
- for (; node_it_ != block->nodes().end(); ++node_it_) {
- AllocateNode(*node_it_);
+ for (; node_it_ != block->nodes().end();) {
+ Node* node = *node_it_;
+
+ if (IsDeadNodeToSkip(node)) {
+ // We remove unused pure nodes.
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "Removing unused node "
+ << PrintNodeLabel(graph_labeller(), node) << "\n";
+ }
+
+ if (!node->Is<Identity>()) {
+ // Updating the uses of the inputs in order to free dead input
+ // registers. We don't do this for Identity nodes, because they were
+ // skipped during use marking, and their inputs are thus not aware
+ // that they were used by this node.
+ DCHECK(!node->properties().can_deopt());
+ node->ForAllInputsInRegallocAssignmentOrder(
+ [&](NodeBase::InputAllocationPolicy, Input* input) {
+ UpdateUse(input);
+ });
+ }
+
+ node_it_ = block->nodes().RemoveAt(node_it_);
+ continue;
+ }
+
+ AllocateNode(node);
+ ++node_it_;
}
AllocateControlNode(block->control_node(), block);
}
@@ -498,6 +582,11 @@ void StraightForwardRegisterAllocator::FreeRegistersUsedBy(ValueNode* node) {
void StraightForwardRegisterAllocator::UpdateUse(
ValueNode* node, InputLocation* input_location) {
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "Using " << PrintNodeLabel(graph_labeller(), node) << "...\n";
+ }
+
DCHECK(!node->is_dead());
// Update the next use.
@@ -528,14 +617,10 @@ void StraightForwardRegisterAllocator::UpdateUse(
}
}
-void StraightForwardRegisterAllocator::UpdateUse(
+void StraightForwardRegisterAllocator::AllocateEagerDeopt(
const EagerDeoptInfo& deopt_info) {
detail::DeepForEachInput(
&deopt_info, [&](ValueNode* node, InputLocation* input) {
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os()
- << "- using " << PrintNodeLabel(graph_labeller(), node) << "\n";
- }
// We might have dropped this node without spilling it. Spill it now.
if (!node->has_register() && !node->is_loadable()) {
Spill(node);
@@ -545,30 +630,32 @@ void StraightForwardRegisterAllocator::UpdateUse(
});
}
-void StraightForwardRegisterAllocator::UpdateUse(
+void StraightForwardRegisterAllocator::AllocateLazyDeopt(
const LazyDeoptInfo& deopt_info) {
- detail::DeepForEachInput(
- &deopt_info, [&](ValueNode* node, InputLocation* input) {
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os()
- << "- using " << PrintNodeLabel(graph_labeller(), node) << "\n";
- }
- // Lazy deopts always need spilling, and should always be loaded from
- // their loadable slot.
- Spill(node);
- input->InjectLocation(node->loadable_slot());
- UpdateUse(node, input);
- });
+ detail::DeepForEachInput(&deopt_info,
+ [&](ValueNode* node, InputLocation* input) {
+ // Lazy deopts always need spilling, and should
+ // always be loaded from their loadable slot.
+ Spill(node);
+ input->InjectLocation(node->loadable_slot());
+ UpdateUse(node, input);
+ });
}
#ifdef DEBUG
namespace {
-Register GetNodeResultRegister(Node* node) {
- ValueNode* value_node = node->TryCast<ValueNode>();
- if (!value_node) return Register::no_reg();
- if (!value_node->result().operand().IsRegister()) return Register::no_reg();
- return value_node->result().AssignedGeneralRegister();
-}
+#define GET_NODE_RESULT_REGISTER_T(RegisterT, AssignedRegisterT) \
+ RegisterT GetNodeResult##RegisterT(Node* node) { \
+ ValueNode* value_node = node->TryCast<ValueNode>(); \
+ if (!value_node) return RegisterT::no_reg(); \
+ if (!value_node->result().operand().Is##RegisterT()) { \
+ return RegisterT::no_reg(); \
+ } \
+ return value_node->result().AssignedRegisterT(); \
+ }
+GET_NODE_RESULT_REGISTER_T(Register, AssignedGeneralRegister)
+GET_NODE_RESULT_REGISTER_T(DoubleRegister, AssignedDoubleRegister)
+#undef GET_NODE_RESULT_REGISTER_T
} // namespace
#endif // DEBUG
@@ -597,33 +684,35 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
AllocateNodeResult(node->Cast<ValueNode>());
}
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os() << "Updating uses...\n";
- }
-
- // Update uses only after allocating the node result. This order is necessary
- // to avoid emitting input-clobbering gap moves during node result allocation.
+ // Eager deopts might happen after the node result has been set, so allocate
+ // them after result allocation.
if (node->properties().can_eager_deopt()) {
if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os() << "Using eager deopt nodes...\n";
- }
- UpdateUse(*node->eager_deopt_info());
- }
- for (Input& input : *node) {
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os()
- << "Using input " << PrintNodeLabel(graph_labeller(), input.node())
- << "...\n";
+ printing_visitor_->os() << "Allocating eager deopt inputs...\n";
}
- UpdateUse(&input);
+ AllocateEagerDeopt(*node->eager_deopt_info());
}
- // Lazy deopts are semantically after the node, so update them last.
+ // Lazy deopts are semantically after the node, so allocate them last.
if (node->properties().can_lazy_deopt()) {
if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os() << "Using lazy deopt nodes...\n";
+ printing_visitor_->os() << "Allocating lazy deopt inputs...\n";
}
- UpdateUse(*node->lazy_deopt_info());
+ // Ensure all values live from a throwing node across its catch block are
+ // spilled so they can properly be merged after the catch block.
+ if (node->properties().can_throw()) {
+ ExceptionHandlerInfo* info = node->exception_handler_info();
+ if (info->HasExceptionHandler() && !node->properties().is_call()) {
+ BasicBlock* block = info->catch_block.block_ptr();
+ auto spill = [&](auto reg, ValueNode* node) {
+ if (node->live_range().end < block->first_id()) return;
+ Spill(node);
+ };
+ general_registers_.ForEachUsedRegister(spill);
+ double_registers_.ForEachUsedRegister(spill);
+ }
+ }
+ AllocateLazyDeopt(*node->lazy_deopt_info());
}
if (node->properties().needs_register_snapshot()) SaveRegisterSnapshot(node);
@@ -635,11 +724,15 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
printing_visitor_->os() << "\n";
}
- // All the temporaries should be free by the end. The exception is the node
- // result, which could be written into a register that was previously
- // considered a temporary.
- DCHECK_EQ(general_registers_.free() |
- (node->general_temporaries() - GetNodeResultRegister(node)),
+ // Result register should not be in temporaries.
+ DCHECK_IMPLIES(GetNodeResultRegister(node) != Register::no_reg(),
+ !node->general_temporaries().has(GetNodeResultRegister(node)));
+ DCHECK_IMPLIES(
+ GetNodeResultDoubleRegister(node) != DoubleRegister::no_reg(),
+ !node->double_temporaries().has(GetNodeResultDoubleRegister(node)));
+
+ // All the temporaries should be free by the end.
+ DCHECK_EQ(general_registers_.free() | node->general_temporaries(),
general_registers_.free());
DCHECK_EQ(double_registers_.free() | node->double_temporaries(),
double_registers_.free());
@@ -656,7 +749,7 @@ void StraightForwardRegisterAllocator::DropRegisterValueAtEnd(RegisterT reg) {
ValueNode* node = list.GetValue(reg);
// If the register is not live after the current node, just remove its
// value.
- if (node->live_range().end == current_node_->id()) {
+ if (IsCurrentNodeLastUseOf(node)) {
node->RemoveRegister(reg);
} else {
DropRegisterValue(list, reg);
@@ -668,7 +761,7 @@ void StraightForwardRegisterAllocator::DropRegisterValueAtEnd(RegisterT reg) {
void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
DCHECK(!node->Is<Phi>());
- node->SetNoSpillOrHint();
+ node->SetNoSpill();
compiler::UnallocatedOperand operand =
compiler::UnallocatedOperand::cast(node->result().operand());
@@ -700,6 +793,8 @@ void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
case compiler::UnallocatedOperand::SAME_AS_INPUT: {
Input& input = node->input(operand.input_index());
node->result().SetAllocated(ForceAllocate(input, node));
+ // Clear any hint that (probably) comes from this constraint.
+ if (node->has_hint()) input.node()->ClearHint();
break;
}
@@ -757,6 +852,10 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
// register, as we may still want to use it elsewhere.
if (!registers.UnblockedFreeIsEmpty()) {
RegisterT target_reg = registers.unblocked_free().first();
+ RegisterT hint_reg = node->GetRegisterHint<RegisterT>();
+ if (hint_reg.is_valid() && registers.unblocked_free().has(hint_reg)) {
+ target_reg = hint_reg;
+ }
registers.RemoveFromFree(target_reg);
registers.SetValueWithoutBlocking(target_reg, node);
// Emit a gapmove.
@@ -800,7 +899,6 @@ void StraightForwardRegisterAllocator::InitializeBranchTargetPhis(
Input& input = phi->input(predecessor_id);
input.InjectLocation(input.node()->allocation());
}
- for (Phi* phi : *phis) UpdateUse(&phi->input(predecessor_id));
}
void StraightForwardRegisterAllocator::InitializeConditionalBranchTarget(
@@ -837,7 +935,11 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
DCHECK_EQ(node->input_count(), 0);
- DCHECK_EQ(node->properties(), OpProperties(0));
+ // Either there are no special properties, or there's a call but it doesn't
+ // matter because we'll abort anyway.
+ DCHECK_IMPLIES(
+ node->properties() != OpProperties(0),
+ node->properties() == OpProperties::Call() && node->Is<Abort>());
if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
@@ -851,7 +953,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties::EagerDeopt());
- UpdateUse(*node->eager_deopt_info());
+ AllocateEagerDeopt(*node->eager_deopt_info());
if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
@@ -873,6 +975,11 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
InitializeBranchTargetPhis(predecessor_id, target);
MergeRegisterValues(unconditional, target, predecessor_id);
+ if (target->has_phi()) {
+ for (Phi* phi : *target->phis()) {
+ UpdateUse(&phi->input(predecessor_id));
+ }
+ }
// For JumpLoops, now update the uses of any node used in, but not defined
// in the loop. This makes sure that such nodes' lifetimes are extended to
@@ -900,7 +1007,6 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
VerifyInputs(node);
DCHECK(!node->properties().can_eager_deopt());
- for (Input& input : *node) UpdateUse(&input);
DCHECK(!node->properties().can_lazy_deopt());
if (node->properties().is_call()) SpillAndClearRegisters();
@@ -940,6 +1046,21 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
VerifyRegisterState();
}
+template <typename RegisterT>
+void StraightForwardRegisterAllocator::SetLoopPhiRegisterHint(Phi* phi,
+ RegisterT reg) {
+ compiler::UnallocatedOperand hint(
+ std::is_same_v<RegisterT, Register>
+ ? compiler::UnallocatedOperand::FIXED_REGISTER
+ : compiler::UnallocatedOperand::FIXED_FP_REGISTER,
+ reg.code(), kNoVreg);
+ for (Input& input : *phi) {
+ if (input.node()->id() > phi->id()) {
+ input.node()->SetHint(hint);
+ }
+ }
+}
+
void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
// Try allocate phis to a register used by any of the inputs.
for (Input& input : *phi) {
@@ -949,6 +1070,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
Register reg = input.AssignedGeneralRegister();
if (general_registers_.unblocked_free().has(reg)) {
phi->result().SetAllocated(ForceAllocate(reg, phi));
+ SetLoopPhiRegisterHint(phi, reg);
DCHECK_EQ(general_registers_.GetValue(reg), phi);
if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(phi, ProcessingState(block_it_));
@@ -1065,6 +1187,9 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
if (location != allocated) {
AddMoveBeforeCurrentNode(node, location, allocated);
}
+ UpdateUse(&input);
+ // Clear any hint that (probably) comes from this fixed use.
+ input.node()->ClearHint();
}
void StraightForwardRegisterAllocator::MarkAsClobbered(
@@ -1082,8 +1207,45 @@ void StraightForwardRegisterAllocator::MarkAsClobbered(
}
}
+namespace {
+
+#ifdef DEBUG
+bool IsInRegisterLocation(ValueNode* node,
+ compiler::InstructionOperand location) {
+ DCHECK(location.IsAnyRegister());
+ compiler::AllocatedOperand allocation =
+ compiler::AllocatedOperand::cast(location);
+ DCHECK_IMPLIES(node->use_double_register(), allocation.IsDoubleRegister());
+ DCHECK_IMPLIES(!node->use_double_register(), allocation.IsRegister());
+ if (node->use_double_register()) {
+ return node->is_in_register(allocation.GetDoubleRegister());
+ } else {
+ return node->is_in_register(allocation.GetRegister());
+ }
+}
+#endif // DEBUG
+
+bool SameAsInput(ValueNode* node, Input& input) {
+ auto operand = compiler::UnallocatedOperand::cast(node->result().operand());
+ return operand.HasSameAsInputPolicy() &&
+ &input == &node->input(operand.input_index());
+}
+
+compiler::InstructionOperand InputHint(NodeBase* node, Input& input) {
+ ValueNode* value_node = node->TryCast<ValueNode>();
+ if (!value_node) return input.node()->hint();
+ DCHECK(value_node->result().operand().IsUnallocated());
+ if (SameAsInput(value_node, input)) {
+ return value_node->hint();
+ } else {
+ return input.node()->hint();
+ }
+}
+
+} // namespace
+
void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput(
- Input& input) {
+ NodeBase* result_node, Input& input) {
// Already assigned in AssignFixedInput
if (!input.operand().IsUnallocated()) return;
@@ -1099,32 +1261,69 @@ void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput(
compiler::UnallocatedOperand::MUST_HAVE_REGISTER);
ValueNode* node = input.node();
- compiler::InstructionOperand location = node->allocation();
+ bool is_clobbered = input.Cloberred();
+
+ compiler::AllocatedOperand location = ([&] {
+ compiler::InstructionOperand existing_register_location;
+ auto hint = InputHint(result_node, input);
+ if (is_clobbered) {
+ // For clobbered inputs, we want to pick a different register than
+ // non-clobbered inputs, so that we don't clobber those.
+ existing_register_location =
+ node->use_double_register()
+ ? double_registers_.TryChooseUnblockedInputRegister(node)
+ : general_registers_.TryChooseUnblockedInputRegister(node);
+ } else {
+ ValueNode* value_node = result_node->TryCast<ValueNode>();
+ // Only use the hint if it helps with the result's allocation due to
+ // same-as-input policy. Otherwise this doesn't affect regalloc.
+ auto result_hint = value_node && SameAsInput(value_node, input)
+ ? value_node->hint()
+ : compiler::InstructionOperand();
+ existing_register_location =
+ node->use_double_register()
+ ? double_registers_.TryChooseInputRegister(node, result_hint)
+ : general_registers_.TryChooseInputRegister(node, result_hint);
+ }
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os()
- << "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
- << location << "\n";
- }
+ // Reuse an existing register if possible.
+ if (existing_register_location.IsAnyLocationOperand()) {
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
+ << (is_clobbered ? "clobbered " : "") << existing_register_location
+ << "\n";
+ }
+ return compiler::AllocatedOperand::cast(existing_register_location);
+ }
- if (location.IsAnyRegister()) {
- compiler::AllocatedOperand location =
- node->use_double_register()
- ? double_registers_.ChooseInputRegister(node)
- : general_registers_.ChooseInputRegister(node);
- if (input.Cloberred()) {
- MarkAsClobbered(node, location);
- }
- input.SetAllocated(location);
- } else {
- compiler::AllocatedOperand allocation = AllocateRegister(node);
- if (input.Cloberred()) {
- MarkAsClobbered(node, allocation);
+ // Otherwise, allocate a register for the node and load it in from there.
+ compiler::InstructionOperand existing_location = node->allocation();
+ compiler::AllocatedOperand allocation = AllocateRegister(node, hint);
+ DCHECK_NE(existing_location, allocation);
+ AddMoveBeforeCurrentNode(node, existing_location, allocation);
+
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
+ << (is_clobbered ? "clobbered " : "") << allocation << " ← "
+ << node->allocation() << "\n";
}
- input.SetAllocated(allocation);
- DCHECK_NE(location, allocation);
- AddMoveBeforeCurrentNode(node, location, allocation);
+ return allocation;
+ })();
+
+ input.SetAllocated(location);
+
+ UpdateUse(&input);
+ // Only need to mark the location as clobbered if the node wasn't already
+ // killed by UpdateUse.
+ if (is_clobbered && !node->is_dead()) {
+ MarkAsClobbered(node, location);
}
+ // Clobbered inputs should no longer be in the allocated location, as far as
+ // the register allocator is concerned. This will happen either via
+ // clobbering, or via this being the last use.
+ DCHECK_IMPLIES(is_clobbered, !IsInRegisterLocation(node, location));
}
void StraightForwardRegisterAllocator::AssignAnyInput(Input& input) {
@@ -1139,11 +1338,21 @@ void StraightForwardRegisterAllocator::AssignAnyInput(Input& input) {
compiler::InstructionOperand location = node->allocation();
input.InjectLocation(location);
+ if (location.IsAnyRegister()) {
+ compiler::AllocatedOperand allocation =
+ compiler::AllocatedOperand::cast(location);
+ if (allocation.IsDoubleRegister()) {
+ double_registers_.block(allocation.GetDoubleRegister());
+ } else {
+ general_registers_.block(allocation.GetRegister());
+ }
+ }
if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
<< " in original " << location << "\n";
}
+ UpdateUse(&input);
}
void StraightForwardRegisterAllocator::AssignInputs(NodeBase* node) {
@@ -1153,9 +1362,13 @@ void StraightForwardRegisterAllocator::AssignInputs(NodeBase* node) {
// the inputs could be assigned a register in AssignArbitraryRegisterInput
// (and respectivelly its node location), therefore we wait until all
// registers are allocated before assigning any location for these inputs.
+ // TODO(dmercadier): consider using `ForAllInputsInRegallocAssignmentOrder` to
+ // iterate the inputs. Since UseMarkingProcessor uses this helper to iterate
+ // inputs, and it has to iterate them in the same order as this function,
+ // using the iteration helper in both places would be better.
for (Input& input : *node) AssignFixedInput(input);
AssignFixedTemporaries(node);
- for (Input& input : *node) AssignArbitraryRegisterInput(input);
+ for (Input& input : *node) AssignArbitraryRegisterInput(node, input);
AssignArbitraryTemporaries(node);
for (Input& input : *node) AssignAnyInput(input);
}
@@ -1257,6 +1470,7 @@ void StraightForwardRegisterAllocator::VerifyRegisterState() {
}
}
for (Node* node : block->nodes()) {
+ if (IsDeadNodeToSkip(node)) continue;
if (ValueNode* value_node = node->TryCast<ValueNode>()) {
ValidateValueNode(value_node);
}
@@ -1333,9 +1547,12 @@ void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
auto it =
std::upper_bound(slots.free_slots.begin(), slots.free_slots.end(),
start, [](NodeIdT s, const SpillSlotInfo& slot_info) {
- return slot_info.freed_at_position < s;
+ return slot_info.freed_at_position >= s;
});
- if (it != slots.free_slots.end()) {
+ if (it != slots.free_slots.begin()) {
+ // {it} points to the first invalid slot. Decrement it to get to the last
+ // valid slot freed before {start}.
+ --it;
free_slot = it->slot_index;
slots.free_slots.erase(it);
} else {
@@ -1379,9 +1596,11 @@ RegisterT StraightForwardRegisterAllocator::PickRegisterToFree(
}
template <typename RegisterT>
-RegisterT StraightForwardRegisterAllocator::FreeUnblockedRegister() {
+RegisterT StraightForwardRegisterAllocator::FreeUnblockedRegister(
+ RegListBase<RegisterT> reserved) {
RegisterFrameState<RegisterT>& registers = GetRegisterFrameState<RegisterT>();
- RegisterT best = PickRegisterToFree<RegisterT>(registers.blocked());
+ RegisterT best =
+ PickRegisterToFree<RegisterT>(registers.blocked() | reserved);
DCHECK(best.is_valid());
DCHECK(!registers.is_blocked(best));
DropRegisterValue(registers, best);
@@ -1390,56 +1609,75 @@ RegisterT StraightForwardRegisterAllocator::FreeUnblockedRegister() {
}
compiler::AllocatedOperand StraightForwardRegisterAllocator::AllocateRegister(
- ValueNode* node) {
+ ValueNode* node, const compiler::InstructionOperand& hint) {
compiler::InstructionOperand allocation;
if (node->use_double_register()) {
if (double_registers_.UnblockedFreeIsEmpty()) {
FreeUnblockedRegister<DoubleRegister>();
}
- return double_registers_.AllocateRegister(node);
+ return double_registers_.AllocateRegister(node, hint);
} else {
if (general_registers_.UnblockedFreeIsEmpty()) {
FreeUnblockedRegister<Register>();
}
- return general_registers_.AllocateRegister(node);
+ return general_registers_.AllocateRegister(node, hint);
}
}
+namespace {
template <typename RegisterT>
-void StraightForwardRegisterAllocator::EnsureFreeRegisterAtEnd() {
+static RegisterT GetRegisterHint(const compiler::InstructionOperand& hint) {
+ if (hint.IsInvalid()) return RegisterT::no_reg();
+ DCHECK(hint.IsUnallocated());
+ return RegisterT::from_code(
+ compiler::UnallocatedOperand::cast(hint).fixed_register_index());
+}
+
+} // namespace
+
+bool StraightForwardRegisterAllocator::IsCurrentNodeLastUseOf(ValueNode* node) {
+ return node->live_range().end == current_node_->id();
+}
+
+template <typename RegisterT>
+void StraightForwardRegisterAllocator::EnsureFreeRegisterAtEnd(
+ const compiler::InstructionOperand& hint) {
RegisterFrameState<RegisterT>& registers = GetRegisterFrameState<RegisterT>();
// If we still have free registers, pick one of those.
- if (!registers.free().is_empty()) {
- // Make sure that at least one of the free registers is not blocked; this
- // effectively means freeing up a temporary.
- if (registers.unblocked_free().is_empty()) {
- registers.unblock(registers.free().first());
- }
- return;
- }
+ if (!registers.unblocked_free().is_empty()) return;
// If the current node is a last use of an input, pick a register containing
- // the input.
- for (RegisterT reg : registers.blocked()) {
- if (registers.GetValue(reg)->live_range().end == current_node_->id()) {
+ // the input. Prefer the hint register if available.
+ RegisterT hint_reg = GetRegisterHint<RegisterT>(hint);
+ if (!registers.free().has(hint_reg) && registers.blocked().has(hint_reg) &&
+ IsCurrentNodeLastUseOf(registers.GetValue(hint_reg))) {
+ DropRegisterValueAtEnd(hint_reg);
+ return;
+ }
+ // Only search in the used-blocked list, since we don't want to assign the
+ // result register to a temporary (free + blocked).
+ for (RegisterT reg : (registers.blocked() - registers.free())) {
+ if (IsCurrentNodeLastUseOf(registers.GetValue(reg))) {
DropRegisterValueAtEnd(reg);
return;
}
}
// Pick any input-blocked register based on regular heuristics.
- RegisterT reg = PickRegisterToFree<RegisterT>(registers.empty());
+ RegisterT reg = hint.IsInvalid()
+ ? PickRegisterToFree<RegisterT>(registers.empty())
+ : GetRegisterHint<RegisterT>(hint);
DropRegisterValueAtEnd(reg);
}
compiler::AllocatedOperand
StraightForwardRegisterAllocator::AllocateRegisterAtEnd(ValueNode* node) {
if (node->use_double_register()) {
- EnsureFreeRegisterAtEnd<DoubleRegister>();
- return double_registers_.AllocateRegister(node);
+ EnsureFreeRegisterAtEnd<DoubleRegister>(node->hint());
+ return double_registers_.AllocateRegister(node, node->hint());
} else {
- EnsureFreeRegisterAtEnd<Register>();
- return general_registers_.AllocateRegister(node);
+ EnsureFreeRegisterAtEnd<Register>(node->hint());
+ return general_registers_.AllocateRegister(node, node->hint());
}
}
@@ -1499,38 +1737,63 @@ compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
}
}
+namespace {
template <typename RegisterT>
-compiler::AllocatedOperand RegisterFrameState<RegisterT>::ChooseInputRegister(
- ValueNode* node) {
- RegTList blocked = node->result_registers<RegisterT>() & blocked_;
- if (blocked.Count() > 0) {
- return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
- node->GetMachineRepresentation(),
- blocked.first().code());
- }
- compiler::AllocatedOperand allocation =
- compiler::AllocatedOperand::cast(node->allocation());
- if constexpr (std::is_same<RegisterT, DoubleRegister>::value) {
- block(allocation.GetDoubleRegister());
- } else {
- block(allocation.GetRegister());
+compiler::AllocatedOperand OperandForNodeRegister(ValueNode* node,
+ RegisterT reg) {
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ node->GetMachineRepresentation(),
+ reg.code());
+}
+} // namespace
+
+template <typename RegisterT>
+compiler::InstructionOperand
+RegisterFrameState<RegisterT>::TryChooseInputRegister(
+ ValueNode* node, const compiler::InstructionOperand& hint) {
+ RegTList result_registers = node->result_registers<RegisterT>();
+ if (result_registers.is_empty()) return compiler::InstructionOperand();
+
+ // Prefer to return an existing blocked register.
+ RegTList blocked_result_registers = result_registers & blocked_;
+ if (!blocked_result_registers.is_empty()) {
+ RegisterT reg = GetRegisterHint<RegisterT>(hint);
+ if (!blocked_result_registers.has(reg)) {
+ reg = blocked_result_registers.first();
+ }
+ return OperandForNodeRegister(node, reg);
}
- return allocation;
+
+ RegisterT reg = result_registers.first();
+ block(reg);
+ return OperandForNodeRegister(node, reg);
}
template <typename RegisterT>
-compiler::AllocatedOperand RegisterFrameState<RegisterT>::AllocateRegister(
+compiler::InstructionOperand
+RegisterFrameState<RegisterT>::TryChooseUnblockedInputRegister(
ValueNode* node) {
+ RegTList result_excl_blocked = node->result_registers<RegisterT>() - blocked_;
+ if (result_excl_blocked.is_empty()) return compiler::InstructionOperand();
+ RegisterT reg = result_excl_blocked.first();
+ block(reg);
+ return OperandForNodeRegister(node, reg);
+}
+
+template <typename RegisterT>
+compiler::AllocatedOperand RegisterFrameState<RegisterT>::AllocateRegister(
+ ValueNode* node, const compiler::InstructionOperand& hint) {
DCHECK(!unblocked_free().is_empty());
- RegisterT reg = unblocked_free().first();
+ RegisterT reg = GetRegisterHint<RegisterT>(hint);
+ if (!unblocked_free().has(reg)) {
+ reg = unblocked_free().first();
+ }
RemoveFromFree(reg);
// Allocation succeeded. This might have found an existing allocation.
// Simply update the state anyway.
SetValue(reg, node);
- return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
- node->GetMachineRepresentation(),
- reg.code());
+ return OperandForNodeRegister(node, reg);
}
template <typename RegisterT>
@@ -1557,6 +1820,11 @@ void StraightForwardRegisterAllocator::AssignFixedTemporaries(
<< "Fixed Double Temporaries: " << fixed_temporaries << "\n";
}
}
+
+ // After allocating the specific/fixed temporary registers, we empty the node
+ // set, so that it is used to allocate only the arbitrary/available temporary
+ // register that is going to be inserted in the scratch scope.
+ node->temporaries<RegisterT>() = {};
}
void StraightForwardRegisterAllocator::AssignFixedTemporaries(NodeBase* node) {
@@ -1564,6 +1832,30 @@ void StraightForwardRegisterAllocator::AssignFixedTemporaries(NodeBase* node) {
AssignFixedTemporaries(double_registers_, node);
}
+namespace {
+template <typename RegisterT>
+RegListBase<RegisterT> GetReservedRegisters(NodeBase* node_base) {
+ if (!node_base->Is<ValueNode>()) return RegListBase<RegisterT>();
+ ValueNode* node = node_base->Cast<ValueNode>();
+ compiler::UnallocatedOperand operand =
+ compiler::UnallocatedOperand::cast(node->result().operand());
+ RegListBase<RegisterT> reserved = {node->GetRegisterHint<RegisterT>()};
+ if constexpr (std::is_same_v<RegisterT, Register>) {
+ if (operand.extended_policy() ==
+ compiler::UnallocatedOperand::FIXED_REGISTER) {
+ reserved.set(Register::from_code(operand.fixed_register_index()));
+ }
+ } else {
+ static_assert(std::is_same_v<RegisterT, DoubleRegister>);
+ if (operand.extended_policy() ==
+ compiler::UnallocatedOperand::FIXED_FP_REGISTER) {
+ reserved.set(DoubleRegister::from_code(operand.fixed_register_index()));
+ }
+ }
+ return reserved;
+}
+} // namespace
+
template <typename RegisterT>
void StraightForwardRegisterAllocator::AssignArbitraryTemporaries(
RegisterFrameState<RegisterT>& registers, NodeBase* node) {
@@ -1572,9 +1864,13 @@ void StraightForwardRegisterAllocator::AssignArbitraryTemporaries(
DCHECK_GT(num_temporaries_needed, 0);
RegListBase<RegisterT> temporaries = node->temporaries<RegisterT>();
+ DCHECK(temporaries.is_empty());
int remaining_temporaries_needed = num_temporaries_needed;
- for (RegisterT reg : registers.unblocked_free()) {
+ // If the node is a ValueNode with a fixed result register, we should not
+ // assign a temporary to the result register, nor its hint.
+ RegListBase<RegisterT> reserved = GetReservedRegisters<RegisterT>(node);
+ for (RegisterT reg : (registers.unblocked_free() - reserved)) {
registers.block(reg);
DCHECK(!temporaries.has(reg));
temporaries.set(reg);
@@ -1583,8 +1879,8 @@ void StraightForwardRegisterAllocator::AssignArbitraryTemporaries(
// Free extra registers if necessary.
for (int i = 0; i < remaining_temporaries_needed; ++i) {
- DCHECK(registers.UnblockedFreeIsEmpty());
- RegisterT reg = FreeUnblockedRegister<RegisterT>();
+ DCHECK((registers.unblocked_free() - reserved).is_empty());
+ RegisterT reg = FreeUnblockedRegister<RegisterT>(reserved);
registers.block(reg);
DCHECK(!temporaries.has(reg));
temporaries.set(reg);
@@ -1667,6 +1963,7 @@ void StraightForwardRegisterAllocator::InitializeRegisterValues(
}
#ifdef DEBUG
+
bool StraightForwardRegisterAllocator::IsInRegister(
MergePointRegisterState& target_state, ValueNode* incoming) {
bool found = false;
@@ -1683,7 +1980,39 @@ bool StraightForwardRegisterAllocator::IsInRegister(
}
return found;
}
-#endif
+
+// Returns true if {first_id} or {last_id} are forward-reachable from {current}.
+bool StraightForwardRegisterAllocator::IsForwardReachable(
+ BasicBlock* start_block, NodeIdT first_id, NodeIdT last_id) {
+ ZoneQueue<BasicBlock*> queue(compilation_info_->zone());
+ ZoneSet<BasicBlock*> seen(compilation_info_->zone());
+ while (!queue.empty()) {
+ BasicBlock* curr = queue.front();
+ queue.pop();
+
+ if (curr->contains_node_id(first_id) || curr->contains_node_id(last_id)) {
+ return true;
+ }
+
+ if (curr->control_node()->Is<JumpLoop>()) {
+ // A JumpLoop will have a backward edge. Since we are only interested in
+ // checking forward reachability, we ignore its successors.
+ continue;
+ }
+
+ for (BasicBlock* succ : curr->successors()) {
+ if (seen.insert(succ).second) {
+ queue.push(succ);
+ }
+ // Since we skipped JumpLoop, only forward edges should remain.
+ DCHECK_GT(succ->first_id(), curr->first_id());
+ }
+ }
+
+ return false;
+}
+
+#endif // DEBUG
void StraightForwardRegisterAllocator::InitializeBranchTargetRegisterValues(
ControlNode* source, BasicBlock* target) {
@@ -1782,6 +2111,28 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
return;
}
+ if (node == nullptr) {
+ // Don't load new nodes at loop headers.
+ if (control->Is<JumpLoop>()) return;
+ } else if (!node->is_loadable() && !node->has_register()) {
+ // If we have a node already, but can't load it here, we must be in a
+ // liveness hole for it, so nuke the merge state.
+ // This can only happen for conversion nodes, as they can split and take
+ // over the liveness of the node they are converting.
+ // TODO(v8:7700): Overeager DCHECK.
+ // DCHECK(node->properties().is_conversion());
+ if (v8_flags.trace_maglev_regalloc) {
+ printing_visitor_->os() << " " << reg << " - can't load "
+ << PrintNodeLabel(graph_labeller(), node)
+ << ", dropping the merge\n";
+ }
+ // We always need to be able to restore values on JumpLoop since the value
+ // is definitely live at the loop header.
+ CHECK(!control->Is<JumpLoop>());
+ state = {nullptr, initialized_node};
+ return;
+ }
+
if (merge) {
// The register is already occupied with a different node. Figure out
// where that node is allocated on the incoming branch.
@@ -1792,11 +2143,17 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
<< " from " << node->allocation() << " \n";
}
- // If there's a value in the incoming state, that value is either
- // already spilled or in another place in the merge state.
- if (incoming != nullptr && !incoming->is_loadable()) {
- DCHECK(IsInRegister(target_state, incoming));
+ if (incoming != nullptr) {
+ // If {incoming} isn't loadable or available in a register, then we are
+ // in a liveness hole, and none of its uses should be reachable from
+ // {target} (for simplicity/speed, we only check the first and last use
+ // though).
+ DCHECK_IMPLIES(
+ !incoming->is_loadable() && !IsInRegister(target_state, incoming),
+ !IsForwardReachable(target, incoming->next_use(),
+ incoming->live_range().end));
}
+
return;
}
@@ -1814,24 +2171,8 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " " << reg << " - can't load incoming "
- << PrintNodeLabel(graph_labeller(), node) << ", bailing out\n";
- }
- return;
- }
-
- if (node != nullptr && !node->is_loadable() && !node->has_register()) {
- // If we have a node already, but can't load it here, we must be in a
- // liveness hole for it, so nuke the merge state.
- // This can only happen for conversion nodes, as they can split and take
- // over the liveness of the node they are converting.
- // TODO(v8:7700): Overeager DCHECK.
- // DCHECK(node->properties().is_conversion());
- if (v8_flags.trace_maglev_regalloc) {
- printing_visitor_->os() << " " << reg << " - can't load "
- << PrintNodeLabel(graph_labeller(), node)
- << ", dropping the merge\n";
+ << PrintNodeLabel(graph_labeller(), incoming) << ", bailing out\n";
}
- state = {nullptr, initialized_node};
return;
}
diff --git a/deps/v8/src/maglev/maglev-regalloc.h b/deps/v8/src/maglev/maglev-regalloc.h
index 120f5cfa6d..e02b914424 100644
--- a/deps/v8/src/maglev/maglev-regalloc.h
+++ b/deps/v8/src/maglev/maglev-regalloc.h
@@ -124,8 +124,13 @@ class RegisterFrameState {
bool is_blocked(RegisterT reg) { return blocked_.has(reg); }
void clear_blocked() { blocked_ = kEmptyRegList; }
- compiler::AllocatedOperand ChooseInputRegister(ValueNode* node);
- compiler::AllocatedOperand AllocateRegister(ValueNode* node);
+ compiler::InstructionOperand TryChooseInputRegister(
+ ValueNode* node, const compiler::InstructionOperand& hint =
+ compiler::InstructionOperand());
+ compiler::InstructionOperand TryChooseUnblockedInputRegister(ValueNode* node);
+ compiler::AllocatedOperand AllocateRegister(
+ ValueNode* node, const compiler::InstructionOperand& hint =
+ compiler::InstructionOperand());
private:
ValueNode* values_[RegisterT::kNumRegisters];
@@ -165,8 +170,6 @@ class StraightForwardRegisterAllocator {
void UpdateUse(Input* input) { return UpdateUse(input->node(), input); }
void UpdateUse(ValueNode* node, InputLocation* input_location);
- void UpdateUse(const EagerDeoptInfo& deopt_info);
- void UpdateUse(const LazyDeoptInfo& deopt_info);
void MarkAsClobbered(ValueNode* node,
const compiler::AllocatedOperand& location);
@@ -174,8 +177,10 @@ class StraightForwardRegisterAllocator {
void AllocateControlNode(ControlNode* node, BasicBlock* block);
void AllocateNode(Node* node);
void AllocateNodeResult(ValueNode* node);
+ void AllocateEagerDeopt(const EagerDeoptInfo& deopt_info);
+ void AllocateLazyDeopt(const LazyDeoptInfo& deopt_info);
void AssignFixedInput(Input& input);
- void AssignArbitraryRegisterInput(Input& input);
+ void AssignArbitraryRegisterInput(NodeBase* result_node, Input& input);
void AssignAnyInput(Input& input);
void AssignInputs(NodeBase* node);
template <typename RegisterT>
@@ -186,6 +191,8 @@ class StraightForwardRegisterAllocator {
void AssignArbitraryTemporaries(RegisterFrameState<RegisterT>& registers,
NodeBase* node);
void AssignArbitraryTemporaries(NodeBase* node);
+ template <typename RegisterT>
+ void SetLoopPhiRegisterHint(Phi* phi, RegisterT reg);
void TryAllocateToInput(Phi* phi);
void VerifyInputs(NodeBase* node);
@@ -207,7 +214,8 @@ class StraightForwardRegisterAllocator {
void FreeRegistersUsedBy(ValueNode* node);
template <typename RegisterT>
- RegisterT FreeUnblockedRegister();
+ RegisterT FreeUnblockedRegister(
+ RegListBase<RegisterT> reserved = RegListBase<RegisterT>());
template <typename RegisterT>
RegisterT PickRegisterToFree(RegListBase<RegisterT> reserved);
@@ -222,8 +230,10 @@ class StraightForwardRegisterAllocator {
template <typename RegisterT>
void DropRegisterValueAtEnd(RegisterT reg);
+ bool IsCurrentNodeLastUseOf(ValueNode* node);
template <typename RegisterT>
- void EnsureFreeRegisterAtEnd();
+ void EnsureFreeRegisterAtEnd(const compiler::InstructionOperand& hint =
+ compiler::InstructionOperand());
compiler::AllocatedOperand AllocateRegisterAtEnd(ValueNode* node);
template <typename RegisterT>
@@ -232,7 +242,9 @@ class StraightForwardRegisterAllocator {
void DropRegisterValue(Register reg);
void DropRegisterValue(DoubleRegister reg);
- compiler::AllocatedOperand AllocateRegister(ValueNode* node);
+ compiler::AllocatedOperand AllocateRegister(
+ ValueNode* node, const compiler::InstructionOperand& hint =
+ compiler::InstructionOperand());
template <typename RegisterT>
compiler::AllocatedOperand ForceAllocate(
@@ -249,6 +261,8 @@ class StraightForwardRegisterAllocator {
void InitializeRegisterValues(MergePointRegisterState& target_state);
#ifdef DEBUG
bool IsInRegister(MergePointRegisterState& target_state, ValueNode* incoming);
+ bool IsForwardReachable(BasicBlock* start_block, NodeIdT first_id,
+ NodeIdT last_id);
#endif
void InitializeBranchTargetRegisterValues(ControlNode* source,
diff --git a/deps/v8/src/maglev/maglev-vreg-allocator.h b/deps/v8/src/maglev/maglev-vreg-allocator.h
deleted file mode 100644
index 014c69dad3..0000000000
--- a/deps/v8/src/maglev/maglev-vreg-allocator.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
-#define V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
-
-#include "src/maglev/maglev-basic-block.h"
-#include "src/maglev/maglev-graph.h"
-#include "src/maglev/maglev-ir.h"
-
-namespace v8 {
-namespace internal {
-namespace maglev {
-
-class ProcessingState;
-
-class MaglevVregAllocationState {
- public:
- int AllocateVirtualRegister() { return next_virtual_register_++; }
- int num_allocated_registers() const { return next_virtual_register_; }
-
- private:
- int next_virtual_register_ = 0;
-};
-
-class MaglevVregAllocator {
- public:
- void PreProcessGraph(Graph* graph) {}
- void PostProcessGraph(Graph* graph) {
- for (BasicBlock* block : *graph) {
- if (!block->has_phi()) continue;
- for (Phi* phi : *block->phis()) {
- phi->AllocateVregInPostProcess(&state_);
- }
- }
- }
- void PreProcessBasicBlock(BasicBlock* block) {}
-
-#define DEF_PROCESS_NODE(NAME) \
- void Process(NAME* node, const ProcessingState& state) { \
- node->AllocateVreg(&state_); \
- }
- NODE_BASE_LIST(DEF_PROCESS_NODE)
-#undef DEF_PROCESS_NODE
-
- private:
- MaglevVregAllocationState state_;
-};
-
-} // namespace maglev
-} // namespace internal
-} // namespace v8
-
-#endif // V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
diff --git a/deps/v8/src/maglev/maglev.cc b/deps/v8/src/maglev/maglev.cc
index 024175c840..c8b8b0161c 100644
--- a/deps/v8/src/maglev/maglev.cc
+++ b/deps/v8/src/maglev/maglev.cc
@@ -5,19 +5,23 @@
#include "src/maglev/maglev.h"
#include "src/common/globals.h"
+#include "src/logging/runtime-call-stats-scope.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-compiler.h"
namespace v8 {
namespace internal {
-MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
- Handle<JSFunction> function) {
+MaybeHandle<Code> Maglev::Compile(Isolate* isolate,
+ Handle<JSFunction> function) {
DCHECK(v8_flags.maglev);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeNonConcurrentMaglev);
std::unique_ptr<maglev::MaglevCompilationInfo> info =
maglev::MaglevCompilationInfo::New(isolate, function);
- maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(),
- info.get());
+ if (!maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(),
+ info.get())) {
+ return {};
+ }
return maglev::MaglevCompiler::GenerateCode(isolate, info.get());
}
diff --git a/deps/v8/src/maglev/maglev.h b/deps/v8/src/maglev/maglev.h
index 7207fdec5e..940fa10ea9 100644
--- a/deps/v8/src/maglev/maglev.h
+++ b/deps/v8/src/maglev/maglev.h
@@ -19,8 +19,8 @@ class Maglev : public AllStatic {
public:
// TODO(v8:7700): This entry point is only used for testing. Consider
// removing it once BenchMaglev runtime functions are no longer useful.
- static MaybeHandle<CodeT> Compile(Isolate* isolate,
- Handle<JSFunction> function);
+ static MaybeHandle<Code> Compile(Isolate* isolate,
+ Handle<JSFunction> function);
};
} // namespace internal
diff --git a/deps/v8/src/maglev/x64/maglev-assembler-x64-inl.h b/deps/v8/src/maglev/x64/maglev-assembler-x64-inl.h
new file mode 100644
index 0000000000..015eb2a2e8
--- /dev/null
+++ b/deps/v8/src/maglev/x64/maglev-assembler-x64-inl.h
@@ -0,0 +1,717 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_X64_MAGLEV_ASSEMBLER_X64_INL_H_
+#define V8_MAGLEV_X64_MAGLEV_ASSEMBLER_X64_INL_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/compiler/compilation-dependencies.h"
+#include "src/maglev/maglev-assembler.h"
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-code-gen-state.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+constexpr Condition ConditionFor(Operation operation) {
+ switch (operation) {
+ case Operation::kEqual:
+ case Operation::kStrictEqual:
+ return equal;
+ case Operation::kLessThan:
+ return less;
+ case Operation::kLessThanOrEqual:
+ return less_equal;
+ case Operation::kGreaterThan:
+ return greater;
+ case Operation::kGreaterThanOrEqual:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline ScaleFactor ScaleFactorFromInt(int n) {
+ switch (n) {
+ case 1:
+ return times_1;
+ case 2:
+ return times_2;
+ case 4:
+ return times_4;
+ default:
+ UNREACHABLE();
+ }
+}
+
+class MaglevAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(MaglevAssembler* masm)
+ : masm_(masm),
+ prev_scope_(masm->scratch_register_scope_),
+ available_(masm->scratch_register_scope_
+ ? masm_->scratch_register_scope_->available_
+ : RegList()),
+ available_double_(
+ masm->scratch_register_scope_
+ ? masm_->scratch_register_scope_->available_double_
+ : DoubleRegList()) {
+ masm_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { masm_->scratch_register_scope_ = prev_scope_; }
+
+ Register Acquire() { return available_.PopFirst(); }
+ void Include(Register reg) { available_.set(reg); }
+ void Include(const RegList list) { available_ = available_ | list; }
+
+ DoubleRegister AcquireDouble() { return available_double_.PopFirst(); }
+ void IncludeDouble(const DoubleRegList list) {
+ available_double_ = available_double_ | list;
+ }
+
+ RegList Available() { return available_; }
+ void SetAvailable(RegList list) { available_ = list; }
+
+ DoubleRegList AvailableDouble() { return available_double_; }
+ void SetAvailableDouble(DoubleRegList list) { available_double_ = list; }
+
+ private:
+ MaglevAssembler* masm_;
+ ScratchRegisterScope* prev_scope_;
+ RegList available_;
+ DoubleRegList available_double_;
+};
+
+namespace detail {
+
+template <typename... Args>
+struct PushAllHelper;
+
+template <>
+struct PushAllHelper<> {
+ static void Push(MaglevAssembler* masm) {}
+ static void PushReverse(MaglevAssembler* masm) {}
+};
+
+inline void PushInput(MaglevAssembler* masm, const Input& input) {
+ if (input.operand().IsConstant()) {
+ input.node()->LoadToRegister(masm, kScratchRegister);
+ masm->Push(kScratchRegister);
+ } else {
+ // TODO(leszeks): Consider special casing the value. (Toon: could possibly
+ // be done through Input directly?)
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input.operand());
+
+ if (operand.IsRegister()) {
+ masm->Push(operand.GetRegister());
+ } else {
+ DCHECK(operand.IsStackSlot());
+ masm->Push(masm->GetStackSlot(operand));
+ }
+ }
+}
+
+template <typename T, typename... Args>
+inline void PushIterator(MaglevAssembler* masm, base::iterator_range<T> range,
+ Args... args) {
+ for (auto iter = range.begin(), end = range.end(); iter != end; ++iter) {
+ masm->Push(*iter);
+ }
+ PushAllHelper<Args...>::Push(masm, args...);
+}
+
+template <typename T, typename... Args>
+inline void PushIteratorReverse(MaglevAssembler* masm,
+ base::iterator_range<T> range, Args... args) {
+ PushAllHelper<Args...>::PushReverse(masm, args...);
+ for (auto iter = range.rbegin(), end = range.rend(); iter != end; ++iter) {
+ masm->Push(*iter);
+ }
+}
+
+template <typename... Args>
+struct PushAllHelper<Input, Args...> {
+ static void Push(MaglevAssembler* masm, const Input& arg, Args... args) {
+ PushInput(masm, arg);
+ PushAllHelper<Args...>::Push(masm, args...);
+ }
+ static void PushReverse(MaglevAssembler* masm, const Input& arg,
+ Args... args) {
+ PushAllHelper<Args...>::PushReverse(masm, args...);
+ PushInput(masm, arg);
+ }
+};
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static void Push(MaglevAssembler* masm, Arg arg, Args... args) {
+ if constexpr (is_iterator_range<Arg>::value) {
+ PushIterator(masm, arg, args...);
+ } else {
+ masm->MacroAssembler::Push(arg);
+ PushAllHelper<Args...>::Push(masm, args...);
+ }
+ }
+ static void PushReverse(MaglevAssembler* masm, Arg arg, Args... args) {
+ if constexpr (is_iterator_range<Arg>::value) {
+ PushIteratorReverse(masm, arg, args...);
+ } else {
+ PushAllHelper<Args...>::PushReverse(masm, args...);
+ masm->Push(arg);
+ }
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+void MaglevAssembler::Push(T... vals) {
+ detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void MaglevAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+inline void MaglevAssembler::BindJumpTarget(Label* label) { bind(label); }
+
+inline void MaglevAssembler::BindBlock(BasicBlock* block) {
+ bind(block->label());
+}
+
+inline void MaglevAssembler::DoubleToInt64Repr(Register dst,
+ DoubleRegister src) {
+ Movq(dst, src);
+}
+
+inline void MaglevAssembler::SmiTagInt32(Register obj, Label* fail) {
+ addl(obj, obj);
+ JumpIf(overflow, fail);
+}
+
+inline Condition MaglevAssembler::IsInt64Constant(Register reg,
+ int64_t constant) {
+ movq(kScratchRegister, kHoleNanInt64);
+ cmpq(reg, kScratchRegister);
+ return equal;
+}
+
+inline Condition MaglevAssembler::IsRootConstant(Input input,
+ RootIndex root_index) {
+ if (input.operand().IsRegister()) {
+ CompareRoot(ToRegister(input), root_index);
+ } else {
+ DCHECK(input.operand().IsStackSlot());
+ CompareRoot(ToMemOperand(input), root_index);
+ }
+ return equal;
+}
+
+inline MemOperand MaglevAssembler::GetStackSlot(
+ const compiler::AllocatedOperand& operand) {
+ return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand));
+}
+
+inline MemOperand MaglevAssembler::ToMemOperand(
+ const compiler::InstructionOperand& operand) {
+ return GetStackSlot(compiler::AllocatedOperand::cast(operand));
+}
+
+inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
+ return ToMemOperand(location.operand());
+}
+
+inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
+ Register object) {
+ DCHECK_NE(data_pointer, object);
+ LoadExternalPointerField(
+ data_pointer, FieldOperand(object, JSTypedArray::kExternalPointerOffset));
+ if (JSTypedArray::kMaxSizeInHeap == 0) return;
+
+ Register base = kScratchRegister;
+ movl(base, FieldOperand(object, JSTypedArray::kBasePointerOffset));
+ addq(data_pointer, base);
+}
+
+inline void MaglevAssembler::LoadTaggedFieldByIndex(Register result,
+ Register object,
+ Register index, int scale,
+ int offset) {
+ LoadTaggedField(
+ result, FieldOperand(object, index, ScaleFactorFromInt(scale), offset));
+}
+
+inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result,
+ Register object,
+ int offset) {
+ movq(result, FieldOperand(object, offset));
+#ifdef V8_ENABLE_SANDBOX
+ shrq(result, Immediate(kBoundedSizeShift));
+#endif // V8_ENABLE_SANDBOX
+}
+
+inline void MaglevAssembler::LoadExternalPointerField(Register result,
+ Operand operand) {
+#ifdef V8_ENABLE_SANDBOX
+ LoadSandboxedPointerField(result, operand);
+#else
+ movq(result, operand);
+#endif
+}
+
+inline void MaglevAssembler::LoadSignedField(Register result, Operand operand,
+ int size) {
+ if (size == 1) {
+ movsxbl(result, operand);
+ } else if (size == 2) {
+ movsxwl(result, operand);
+ } else {
+ DCHECK_EQ(size, 4);
+ movl(result, operand);
+ }
+}
+
+inline void MaglevAssembler::LoadUnsignedField(Register result, Operand operand,
+ int size) {
+ if (size == 1) {
+ movzxbl(result, operand);
+ } else if (size == 2) {
+ movzxwl(result, operand);
+ } else {
+ DCHECK_EQ(size, 4);
+ movl(result, operand);
+ }
+}
+
+inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
+ Register value) {
+ AssertSmi(value);
+ mov_tagged(FieldOperand(object, offset), value);
+}
+
+inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
+ Smi value) {
+ MacroAssembler::StoreTaggedSignedField(FieldOperand(object, offset), value);
+}
+
+inline void MaglevAssembler::StoreField(Operand operand, Register value,
+ int size) {
+ DCHECK(size == 1 || size == 2 || size == 4);
+ if (size == 1) {
+ movb(operand, value);
+ } else if (size == 2) {
+ movw(operand, value);
+ } else {
+ DCHECK_EQ(size, 4);
+ movl(operand, value);
+ }
+}
+
+inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
+ if (size == 2) {
+ bswapl(value);
+ sarl(value, Immediate(16));
+ } else if (size == 4) {
+ bswapl(value);
+ } else {
+ DCHECK_EQ(size, 1);
+ }
+}
+
+inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot stack_slot) {
+ return MemOperand(rbp, stack_slot.index);
+}
+
+inline void MaglevAssembler::Move(StackSlot dst, Register src) {
+ movq(StackSlotOperand(dst), src);
+}
+
+inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
+ Movsd(StackSlotOperand(dst), src);
+}
+
+inline void MaglevAssembler::Move(Register dst, StackSlot src) {
+ movq(dst, StackSlotOperand(src));
+}
+
+inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
+ Movsd(dst, StackSlotOperand(src));
+}
+
+inline void MaglevAssembler::Move(MemOperand dst, Register src) {
+ movq(dst, src);
+}
+
+inline void MaglevAssembler::Move(MemOperand dst, DoubleRegister src) {
+ Movsd(dst, src);
+}
+
+inline void MaglevAssembler::Move(Register dst, TaggedIndex i) {
+ MacroAssembler::Move(dst, i);
+}
+
+inline void MaglevAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+ MacroAssembler::Move(dst, src);
+}
+
+inline void MaglevAssembler::Move(Register dst, Smi src) {
+ MacroAssembler::Move(dst, src);
+}
+
+inline void MaglevAssembler::Move(Register dst, ExternalReference src) {
+ MacroAssembler::Move(dst, src);
+}
+
+inline void MaglevAssembler::Move(Register dst, MemOperand src) {
+ MacroAssembler::Move(dst, src);
+}
+
+inline void MaglevAssembler::Move(DoubleRegister dst, MemOperand src) {
+ Movsd(dst, src);
+}
+
+inline void MaglevAssembler::Move(Register dst, Register src) {
+ MacroAssembler::Move(dst, src);
+}
+
+inline void MaglevAssembler::Move(Register dst, int32_t i) {
+ // Move as a uint32 to avoid sign extension.
+ MacroAssembler::Move(dst, static_cast<uint32_t>(i));
+}
+
+inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
+ MacroAssembler::Move(dst, n);
+}
+
+inline void MaglevAssembler::Move(DoubleRegister dst, Float64 n) {
+ MacroAssembler::Move(dst, n.get_bits());
+}
+
+inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
+ MacroAssembler::Move(dst, obj);
+}
+
+inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
+ movsxlq(dst, src);
+}
+inline void MaglevAssembler::NegateInt32(Register val) { negl(val); }
+
+inline void MaglevAssembler::ToUint8Clamped(Register result,
+ DoubleRegister value, Label* min,
+ Label* max, Label* done) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ Move(kScratchDoubleReg, 0.0);
+ Ucomisd(kScratchDoubleReg, value);
+ // Set to 0 if NaN.
+ j(parity_even, min);
+ j(above_equal, min);
+ Move(kScratchDoubleReg, 255.0);
+ Ucomisd(value, kScratchDoubleReg);
+ j(above_equal, max);
+ // if value in [0, 255], then round up to the nearest.
+ Roundsd(kScratchDoubleReg, value, kRoundToNearest);
+ TruncateDoubleToInt32(result, kScratchDoubleReg);
+ jmp(done);
+}
+
+template <typename NodeT>
+inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
+ Register scratch,
+ NodeT* node) {
+ if (!code_gen_state()
+ ->broker()
+ ->dependencies()
+ ->DependOnArrayBufferDetachingProtector()) {
+ // A detached buffer leads to megamorphic feedback, so we won't have a deopt
+ // loop if we deopt here.
+ LoadTaggedField(scratch,
+ FieldOperand(array, JSArrayBufferView::kBufferOffset));
+ LoadTaggedField(scratch,
+ FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+ testl(scratch, Immediate(JSArrayBuffer::WasDetachedBit::kMask));
+ EmitEagerDeoptIf(not_zero, DeoptimizeReason::kArrayBufferWasDetached, node);
+ }
+}
+
+inline void MaglevAssembler::LoadByte(Register dst, MemOperand src) {
+ movzxbl(dst, src);
+}
+
+inline void MaglevAssembler::IsObjectType(Register heap_object,
+ InstanceType type) {
+ MacroAssembler::IsObjectType(heap_object, type, kScratchRegister);
+}
+
+inline void MaglevAssembler::CompareObjectType(Register heap_object,
+ InstanceType type) {
+ LoadMap(kScratchRegister, heap_object);
+ CmpInstanceType(kScratchRegister, type);
+}
+
+inline void MaglevAssembler::JumpIfJSAnyIsNotPrimitive(
+ Register heap_object, Label* target, Label::Distance distance) {
+ MacroAssembler::JumpIfJSAnyIsNotPrimitive(heap_object, kScratchRegister,
+ target, distance);
+}
+
+inline void MaglevAssembler::CompareObjectType(Register heap_object,
+ InstanceType type,
+ Register scratch) {
+ CompareObjectType(heap_object, type);
+}
+
+inline void MaglevAssembler::CompareObjectTypeRange(Register heap_object,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ LoadMap(kScratchRegister, heap_object);
+ CmpInstanceTypeRange(kScratchRegister, kScratchRegister, lower_limit,
+ higher_limit);
+}
+
+inline void MaglevAssembler::CompareMapWithRoot(Register object,
+ RootIndex index,
+ Register scratch) {
+ if (CanBeImmediate(index)) {
+ cmp_tagged(FieldOperand(object, HeapObject::kMapOffset),
+ Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
+ return;
+ }
+ LoadMap(scratch, object);
+ CompareRoot(scratch, index);
+}
+
+inline void MaglevAssembler::CompareInstanceTypeRange(
+ Register map, InstanceType lower_limit, InstanceType higher_limit) {
+ CompareInstanceTypeRange(map, kScratchRegister, lower_limit, higher_limit);
+}
+
+inline void MaglevAssembler::CompareInstanceTypeRange(
+ Register map, Register instance_type_out, InstanceType lower_limit,
+ InstanceType higher_limit) {
+ CmpInstanceTypeRange(map, instance_type_out, lower_limit, higher_limit);
+}
+
+inline void MaglevAssembler::CompareTagged(Register reg,
+ Handle<HeapObject> obj) {
+ Cmp(reg, obj);
+}
+
+inline void MaglevAssembler::CompareTagged(Register src1, Register src2) {
+ cmp_tagged(src1, src2);
+}
+
+inline void MaglevAssembler::CompareInt32(Register reg, int32_t imm) {
+ cmpl(reg, Immediate(imm));
+}
+
+inline void MaglevAssembler::CompareInt32(Register src1, Register src2) {
+ cmpl(src1, src2);
+}
+
+inline void MaglevAssembler::CallSelf() {
+ DCHECK(code_gen_state()->entry_label()->is_bound());
+ Call(code_gen_state()->entry_label());
+}
+
+inline void MaglevAssembler::Jump(Label* target, Label::Distance distance) {
+ jmp(target, distance);
+}
+
+inline void MaglevAssembler::JumpIf(Condition cond, Label* target,
+ Label::Distance distance) {
+ j(cond, target, distance);
+}
+
+inline void MaglevAssembler::JumpIfRoot(Register with, RootIndex index,
+ Label* if_equal,
+ Label::Distance distance) {
+ MacroAssembler::JumpIfRoot(with, index, if_equal, distance);
+}
+
+inline void MaglevAssembler::JumpIfNotRoot(Register with, RootIndex index,
+ Label* if_not_equal,
+ Label::Distance distance) {
+ MacroAssembler::JumpIfNotRoot(with, index, if_not_equal, distance);
+}
+
+inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
+ Label::Distance distance) {
+ MacroAssembler::JumpIfSmi(src, on_smi, distance);
+}
+
+void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
+ Label* target, Label::Distance distance) {
+ cmpb(value, Immediate(byte));
+ j(cc, target, distance);
+}
+
+void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
+ Condition cond, Label* target,
+ Label::Distance distance) {
+ CompareInt32(r1, r2);
+ JumpIf(cond, target, distance);
+}
+
+inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
+ Condition cond,
+ Label* target,
+ Label::Distance distance) {
+ CompareInt32(r1, value);
+ JumpIf(cond, target, distance);
+}
+
+inline void MaglevAssembler::CompareSmiAndJumpIf(Register r1, Smi value,
+ Condition cond, Label* target,
+ Label::Distance distance) {
+ AssertSmi(r1);
+ Cmp(r1, value);
+ JumpIf(cond, target, distance);
+}
+
+inline void MaglevAssembler::TestInt32AndJumpIfAnySet(
+ Register r1, int32_t mask, Label* target, Label::Distance distance) {
+ testl(r1, Immediate(mask));
+ JumpIf(kNotZero, target, distance);
+}
+
+inline void MaglevAssembler::TestInt32AndJumpIfAllClear(
+ Register r1, int32_t mask, Label* target, Label::Distance distance) {
+ testl(r1, Immediate(mask));
+ JumpIf(kZero, target, distance);
+}
+
+inline void MaglevAssembler::LoadHeapNumberValue(DoubleRegister result,
+ Register heap_number) {
+ Movsd(result, FieldOperand(heap_number, HeapNumber::kValueOffset));
+}
+
+inline void MaglevAssembler::Int32ToDouble(DoubleRegister result, Register n) {
+ Cvtlsi2sd(result, n);
+}
+
+inline void MaglevAssembler::Pop(Register dst) { MacroAssembler::Pop(dst); }
+
+template <typename NodeT>
+inline void MaglevAssembler::EmitEagerDeoptIfNotEqual(DeoptimizeReason reason,
+ NodeT* node) {
+ EmitEagerDeoptIf(not_equal, reason, node);
+}
+
+inline void MaglevAssembler::MaterialiseValueNode(Register dst,
+ ValueNode* value) {
+ switch (value->opcode()) {
+ case Opcode::kInt32Constant: {
+ int32_t int_value = value->Cast<Int32Constant>()->value();
+ if (Smi::IsValid(int_value)) {
+ Move(dst, Smi::FromInt(int_value));
+ } else {
+ movq_heap_number(dst, int_value);
+ }
+ return;
+ }
+ case Opcode::kFloat64Constant: {
+ double double_value =
+ value->Cast<Float64Constant>()->value().get_scalar();
+ movq_heap_number(dst, double_value);
+ return;
+ }
+ default:
+ break;
+ }
+
+ DCHECK(!value->allocation().IsConstant());
+ DCHECK(value->allocation().IsAnyStackSlot());
+ using D = NewHeapNumberDescriptor;
+ MemOperand src = ToMemOperand(value->allocation());
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kInt32: {
+ Label done;
+ movl(dst, src);
+ addl(dst, dst);
+ j(no_overflow, &done, Label::kNear);
+ // If we overflow, instead of bailing out (deopting), we change
+ // representation to a HeapNumber.
+ Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue), src);
+ CallBuiltin(Builtin::kNewHeapNumber);
+ Move(dst, kReturnRegister0);
+ bind(&done);
+ break;
+ }
+ case ValueRepresentation::kUint32: {
+ Label done, tag_smi;
+ movl(dst, src);
+ // Unsigned comparison against Smi::kMaxValue.
+ cmpl(dst, Immediate(Smi::kMaxValue));
+ // If we don't fit in a Smi, instead of bailing out (deopting), we
+ // change representation to a HeapNumber.
+ j(below_equal, &tag_smi, Label::kNear);
+ // The value was loaded with movl, so is zero extended in 64-bit.
+ // Therefore, we can do an unsigned 32-bit converstion to double with a
+ // 64-bit signed conversion (Cvt_q_si2sd instead of Cvt_l_si2sd).
+ Cvtqsi2sd(D::GetDoubleRegisterParameter(D::kValue), dst);
+ CallBuiltin(Builtin::kNewHeapNumber);
+ Move(dst, kReturnRegister0);
+ jmp(&done, Label::kNear);
+ bind(&tag_smi);
+ SmiTag(dst);
+ bind(&done);
+ break;
+ }
+ case ValueRepresentation::kFloat64:
+ Movsd(D::GetDoubleRegisterParameter(D::kValue), src);
+ CallBuiltin(Builtin::kNewHeapNumber);
+ Move(dst, kReturnRegister0);
+ break;
+ case ValueRepresentation::kWord64:
+ case ValueRepresentation::kTagged:
+ UNREACHABLE();
+ }
+}
+
+inline void MaglevAssembler::AssertStackSizeCorrect() {
+ if (v8_flags.debug_code) {
+ movq(kScratchRegister, rbp);
+ subq(kScratchRegister, rsp);
+ cmpq(kScratchRegister,
+ Immediate(code_gen_state()->stack_slots() * kSystemPointerSize +
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ Assert(equal, AbortReason::kStackAccessBelowStackPointer);
+ }
+}
+
+inline void MaglevAssembler::FinishCode() {}
+
+template <typename Dest, typename Source>
+inline void MaglevAssembler::MoveRepr(MachineRepresentation repr, Dest dst,
+ Source src) {
+ switch (repr) {
+ case MachineRepresentation::kWord32:
+ return movl(dst, src);
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ return movq(dst, src);
+ default:
+ UNREACHABLE();
+ }
+}
+template <>
+inline void MaglevAssembler::MoveRepr(MachineRepresentation repr,
+ MemOperand dst, MemOperand src) {
+ MoveRepr(repr, kScratchRegister, src);
+ MoveRepr(repr, dst, kScratchRegister);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_X64_MAGLEV_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/maglev/x64/maglev-assembler-x64.cc b/deps/v8/src/maglev/x64/maglev-assembler-x64.cc
new file mode 100644
index 0000000000..4d7eee06db
--- /dev/null
+++ b/deps/v8/src/maglev/x64/maglev-assembler-x64.cc
@@ -0,0 +1,821 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/common/globals.h"
+#include "src/interpreter/bytecode-flags.h"
+#include "src/maglev/maglev-assembler-inl.h"
+#include "src/maglev/maglev-assembler.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/objects/heap-number.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+#define __ masm->
+
+void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
+ Register object, int size_in_bytes,
+ AllocationType alloc_type,
+ AllocationAlignment alignment) {
+ // TODO(victorgomes): Call the runtime for large object allocation.
+ // TODO(victorgomes): Support double alignment.
+ DCHECK_EQ(alignment, kTaggedAligned);
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
+ if (v8_flags.single_generation) {
+ alloc_type = AllocationType::kOld;
+ }
+ bool in_new_space = alloc_type == AllocationType::kYoung;
+ ExternalReference top =
+ in_new_space
+ ? ExternalReference::new_space_allocation_top_address(isolate_)
+ : ExternalReference::old_space_allocation_top_address(isolate_);
+ ExternalReference limit =
+ in_new_space
+ ? ExternalReference::new_space_allocation_limit_address(isolate_)
+ : ExternalReference::old_space_allocation_limit_address(isolate_);
+
+ ZoneLabelRef done(this);
+ Register new_top = kScratchRegister;
+ // Check if there is enough space.
+ Move(object, ExternalReferenceAsOperand(top));
+ leaq(new_top, Operand(object, size_in_bytes));
+ cmpq(new_top, ExternalReferenceAsOperand(limit));
+ // Otherwise call runtime.
+ JumpToDeferredIf(
+ greater_equal,
+ [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
+ Register object, Builtin builtin, int size_in_bytes,
+ ZoneLabelRef done) {
+ // Remove {object} from snapshot, since it is the returned allocated
+ // HeapObject.
+ register_snapshot.live_registers.clear(object);
+ register_snapshot.live_tagged_registers.clear(object);
+ {
+ SaveRegisterStateForCall save_register_state(masm, register_snapshot);
+ using D = AllocateDescriptor;
+ __ Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
+ __ CallBuiltin(builtin);
+ save_register_state.DefineSafepoint();
+ __ Move(object, kReturnRegister0);
+ }
+ __ jmp(*done);
+ },
+ register_snapshot, object,
+ in_new_space ? Builtin::kAllocateRegularInYoungGeneration
+ : Builtin::kAllocateRegularInOldGeneration,
+ size_in_bytes, done);
+ // Store new top and tag object.
+ movq(ExternalReferenceAsOperand(top), new_top);
+ addq(object, Immediate(kHeapObjectTag));
+ bind(*done);
+}
+
+void MaglevAssembler::AllocateHeapNumber(RegisterSnapshot register_snapshot,
+ Register result,
+ DoubleRegister value) {
+ // In the case we need to call the runtime, we should spill the value
+ // register. Even if it is not live in the next node, otherwise the
+ // allocation call might trash it.
+ register_snapshot.live_double_registers.set(value);
+ Allocate(register_snapshot, result, HeapNumber::kSize);
+ LoadRoot(kScratchRegister, RootIndex::kHeapNumberMap);
+ StoreTaggedField(FieldOperand(result, HeapObject::kMapOffset),
+ kScratchRegister);
+ Movsd(FieldOperand(result, HeapNumber::kValueOffset), value);
+}
+
+void MaglevAssembler::AllocateTwoByteString(RegisterSnapshot register_snapshot,
+ Register result, int length) {
+ int size = SeqTwoByteString::SizeFor(length);
+ Allocate(register_snapshot, result, size);
+ LoadRoot(kScratchRegister, RootIndex::kStringMap);
+ StoreTaggedField(FieldOperand(result, size - kObjectAlignment), Immediate(0));
+ StoreTaggedField(FieldOperand(result, HeapObject::kMapOffset),
+ kScratchRegister);
+ StoreTaggedField(FieldOperand(result, Name::kRawHashFieldOffset),
+ Immediate(Name::kEmptyHashField));
+ StoreTaggedField(FieldOperand(result, String::kLengthOffset),
+ Immediate(length));
+}
+
+void MaglevAssembler::LoadSingleCharacterString(Register result,
+ Register char_code,
+ Register scratch) {
+ AssertZeroExtended(char_code);
+ if (v8_flags.debug_code) {
+ cmpq(char_code, Immediate(String::kMaxOneByteCharCode));
+ Assert(below_equal, AbortReason::kUnexpectedValue);
+ }
+ DCHECK_NE(char_code, scratch);
+ Register table = scratch;
+ LoadRoot(table, RootIndex::kSingleCharacterStringTable);
+ DecompressTagged(result, FieldOperand(table, char_code, times_tagged_size,
+ FixedArray::kHeaderSize));
+}
+
+void MaglevAssembler::StoreTaggedFieldWithWriteBarrier(
+ Register object, int offset, Register value,
+ RegisterSnapshot register_snapshot, ValueIsCompressed value_is_compressed,
+ ValueCanBeSmi value_can_be_smi) {
+ DCHECK_NE(object, kScratchRegister);
+ DCHECK_NE(value, kScratchRegister);
+ AssertNotSmi(object);
+ StoreTaggedField(FieldOperand(object, offset), value);
+
+ ZoneLabelRef done(this);
+ Label* deferred_write_barrier = MakeDeferredCode(
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object, int offset,
+ Register value, RegisterSnapshot register_snapshot,
+ ValueIsCompressed value_is_compressed) {
+ ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
+ if (value_is_compressed == kValueIsCompressed) {
+ __ DecompressTagged(value, value);
+ }
+
+ // Use the value as the scratch register if possible, since
+ // CheckPageFlag emits slightly better code when value == scratch.
+ Register scratch = kScratchRegister;
+ if (value != object && !register_snapshot.live_registers.has(value)) {
+ scratch = value;
+ }
+ __ CheckPageFlag(value, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ *done);
+
+ Register stub_object_reg = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
+
+ RegList saved;
+ if (object != stub_object_reg &&
+ register_snapshot.live_registers.has(stub_object_reg)) {
+ saved.set(stub_object_reg);
+ }
+ if (register_snapshot.live_registers.has(slot_reg)) {
+ saved.set(slot_reg);
+ }
+
+ __ PushAll(saved);
+
+ if (object != stub_object_reg) {
+ __ Move(stub_object_reg, object);
+ object = stub_object_reg;
+ }
+ __ leaq(slot_reg, FieldOperand(object, offset));
+
+ SaveFPRegsMode const save_fp_mode =
+ !register_snapshot.live_double_registers.is_empty()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+
+ __ CallRecordWriteStub(object, slot_reg, save_fp_mode);
+
+ __ PopAll(saved);
+ __ jmp(*done);
+ },
+ done, object, offset, value, register_snapshot, value_is_compressed);
+
+ if (value_can_be_smi == kValueCanBeSmi) {
+ JumpIfSmi(value, *done);
+ } else {
+ AssertNotSmi(value);
+ }
+ CheckPageFlag(object, kScratchRegister,
+ MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
+ deferred_write_barrier);
+ bind(*done);
+}
+
+void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
+ Label* char_code_fits_one_byte,
+ Register result, Register char_code,
+ Register scratch) {
+ DCHECK_NE(char_code, scratch);
+ ZoneLabelRef done(this);
+ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
+ JumpToDeferredIf(
+ above,
+ [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
+ ZoneLabelRef done, Register result, Register char_code,
+ Register scratch) {
+ // Be sure to save {char_code}. If it aliases with {result}, use
+ // the scratch register.
+ if (char_code == result) {
+ // This is guaranteed to be true since we've already checked
+ // char_code != scratch.
+ DCHECK_NE(scratch, result);
+ __ Move(scratch, char_code);
+ char_code = scratch;
+ }
+ DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
+ register_snapshot.live_registers.set(char_code);
+ __ AllocateTwoByteString(register_snapshot, result, 1);
+ __ andl(char_code, Immediate(0xFFFF));
+ __ movw(FieldOperand(result, SeqTwoByteString::kHeaderSize), char_code);
+ __ jmp(*done);
+ },
+ register_snapshot, done, result, char_code, scratch);
+ if (char_code_fits_one_byte != nullptr) {
+ bind(char_code_fits_one_byte);
+ }
+ LoadSingleCharacterString(result, char_code, scratch);
+ bind(*done);
+}
+
+void MaglevAssembler::StringCharCodeOrCodePointAt(
+ BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode,
+ RegisterSnapshot& register_snapshot, Register result, Register string,
+ Register index, Register scratch, Label* result_fits_one_byte) {
+ ZoneLabelRef done(this);
+ Label seq_string;
+ Label cons_string;
+ Label sliced_string;
+
+ Label* deferred_runtime_call = MakeDeferredCode(
+ [](MaglevAssembler* masm,
+ BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode,
+ RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
+ Register string, Register index) {
+ DCHECK(!register_snapshot.live_registers.has(result));
+ DCHECK(!register_snapshot.live_registers.has(string));
+ DCHECK(!register_snapshot.live_registers.has(index));
+ {
+ SaveRegisterStateForCall save_register_state(masm, register_snapshot);
+ __ Push(string);
+ __ SmiTag(index);
+ __ Push(index);
+ __ Move(kContextRegister, masm->native_context().object());
+ // This call does not throw nor can deopt.
+ if (mode ==
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCodePointAt) {
+ __ CallRuntime(Runtime::kStringCodePointAt);
+ } else {
+ DCHECK_EQ(mode,
+ BuiltinStringPrototypeCharCodeOrCodePointAt::kCharCodeAt);
+ __ CallRuntime(Runtime::kStringCharCodeAt);
+ }
+ save_register_state.DefineSafepoint();
+ __ SmiUntag(kReturnRegister0);
+ __ Move(result, kReturnRegister0);
+ }
+ __ jmp(*done);
+ },
+ mode, register_snapshot, done, result, string, index);
+
+ Register instance_type = scratch;
+
+ // We might need to try more than one time for ConsString, SlicedString and
+ // ThinString.
+ Label loop;
+ bind(&loop);
+
+ if (v8_flags.debug_code) {
+ // Check if {string} is a string.
+ AssertNotSmi(string);
+ LoadMap(scratch, string);
+ CmpInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE, LAST_STRING_TYPE);
+ Check(below_equal, AbortReason::kUnexpectedValue);
+
+ movl(scratch, FieldOperand(string, String::kLengthOffset));
+ cmpl(index, scratch);
+ Check(below, AbortReason::kUnexpectedValue);
+ }
+
+ // Get instance type.
+ LoadMap(instance_type, string);
+ mov_tagged(instance_type,
+ FieldOperand(instance_type, Map::kInstanceTypeOffset));
+
+ {
+ // TODO(victorgomes): Add fast path for external strings.
+ Register representation = kScratchRegister;
+ movl(representation, instance_type);
+ andl(representation, Immediate(kStringRepresentationMask));
+ cmpl(representation, Immediate(kSeqStringTag));
+ j(equal, &seq_string, Label::kNear);
+ cmpl(representation, Immediate(kConsStringTag));
+ j(equal, &cons_string, Label::kNear);
+ cmpl(representation, Immediate(kSlicedStringTag));
+ j(equal, &sliced_string, Label::kNear);
+ cmpl(representation, Immediate(kThinStringTag));
+ j(not_equal, deferred_runtime_call);
+ // Fallthrough to thin string.
+ }
+
+ // Is a thin string.
+ {
+ DecompressTagged(string, FieldOperand(string, ThinString::kActualOffset));
+ jmp(&loop, Label::kNear);
+ }
+
+ bind(&sliced_string);
+ {
+ Register offset = scratch;
+ movl(offset, FieldOperand(string, SlicedString::kOffsetOffset));
+ SmiUntag(offset);
+ DecompressTagged(string, FieldOperand(string, SlicedString::kParentOffset));
+ addl(index, offset);
+ jmp(&loop, Label::kNear);
+ }
+
+ bind(&cons_string);
+ {
+ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+ RootIndex::kempty_string);
+ j(not_equal, deferred_runtime_call);
+ DecompressTagged(string, FieldOperand(string, ConsString::kFirstOffset));
+ jmp(&loop, Label::kNear); // Try again with first string.
+ }
+
+ bind(&seq_string);
+ {
+ Label two_byte_string;
+ andl(instance_type, Immediate(kStringEncodingMask));
+ cmpl(instance_type, Immediate(kTwoByteStringTag));
+ j(equal, &two_byte_string, Label::kNear);
+ // The result of one-byte string will be the same for both modes
+ // (CharCodeAt/CodePointAt), since it cannot be the first half of a
+ // surrogate pair.
+ movzxbl(result, FieldOperand(string, index, times_1,
+ SeqOneByteString::kHeaderSize));
+ jmp(result_fits_one_byte);
+ bind(&two_byte_string);
+ movzxwl(result, FieldOperand(string, index, times_2,
+ SeqTwoByteString::kHeaderSize));
+
+ if (mode == BuiltinStringPrototypeCharCodeOrCodePointAt::kCodePointAt) {
+ Register first_code_point = scratch;
+ movl(first_code_point, result);
+ andl(first_code_point, Immediate(0xfc00));
+ cmpl(first_code_point, Immediate(0xd800));
+ j(not_equal, *done);
+
+ Register length = scratch;
+ StringLength(length, string);
+ incl(index);
+ cmpl(index, length);
+ j(greater_equal, *done);
+
+ Register second_code_point = scratch;
+ movzxwl(second_code_point, FieldOperand(string, index, times_2,
+ SeqTwoByteString::kHeaderSize));
+
+ // {index} is not needed at this point.
+ Register scratch2 = index;
+ movl(scratch2, second_code_point);
+ andl(scratch2, Immediate(0xfc00));
+ cmpl(scratch2, Immediate(0xdc00));
+ j(not_equal, *done);
+
+ int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
+ addl(second_code_point, Immediate(surrogate_offset));
+ shll(result, Immediate(10));
+ addl(result, second_code_point);
+ }
+
+ // Fallthrough.
+ }
+
+ bind(*done);
+
+ if (v8_flags.debug_code) {
+ // We make sure that the user of this macro is not relying in string and
+ // index to not be clobbered.
+ if (result != string) {
+ movl(string, Immediate(0xdeadbeef));
+ }
+ if (result != index) {
+ movl(index, Immediate(0xdeadbeef));
+ }
+ }
+}
+
+void MaglevAssembler::ToBoolean(Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false,
+ bool fallthrough_when_true) {
+ Register map = kScratchRegister;
+
+ // Check if {{value}} is Smi.
+ CheckSmi(value);
+ JumpToDeferredIf(
+ zero,
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
+ // Check if {value} is not zero.
+ __ SmiCompare(value, Smi::FromInt(0));
+ __ j(equal, *is_false);
+ __ jmp(*is_true);
+ },
+ value, is_true, is_false);
+
+ // Check if {{value}} is false.
+ CompareRoot(value, RootIndex::kFalseValue);
+ j(equal, *is_false);
+
+ // Check if {{value}} is empty string.
+ CompareRoot(value, RootIndex::kempty_string);
+ j(equal, *is_false);
+
+ // Check if {{value}} is undetectable.
+ LoadMap(map, value);
+ testl(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(Map::Bits1::IsUndetectableBit::kMask));
+ j(not_zero, *is_false);
+
+ // Check if {{value}} is a HeapNumber.
+ CompareRoot(map, RootIndex::kHeapNumberMap);
+ JumpToDeferredIf(
+ equal,
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
+ // Sets scratch register to 0.0.
+ __ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ // Sets ZF if equal to 0.0, -0.0 or NaN.
+ __ Ucomisd(kScratchDoubleReg,
+ FieldOperand(value, HeapNumber::kValueOffset));
+ __ j(zero, *is_false);
+ __ jmp(*is_true);
+ },
+ value, is_true, is_false);
+
+ // Check if {{value}} is a BigInt.
+ CompareRoot(map, RootIndex::kBigIntMap);
+ JumpToDeferredIf(
+ equal,
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
+ __ testl(FieldOperand(value, BigInt::kBitfieldOffset),
+ Immediate(BigInt::LengthBits::kMask));
+ __ j(zero, *is_false);
+ __ jmp(*is_true);
+ },
+ value, is_true, is_false);
+
+ // Otherwise true.
+ if (!fallthrough_when_true) {
+ jmp(*is_true);
+ }
+}
+
+void MaglevAssembler::TestTypeOf(
+ Register object, interpreter::TestTypeOfFlags::LiteralFlag literal,
+ Label* is_true, Label::Distance true_distance, bool fallthrough_when_true,
+ Label* is_false, Label::Distance false_distance,
+ bool fallthrough_when_false) {
+ // If both true and false are fallthroughs, we don't have to do anything.
+ if (fallthrough_when_true && fallthrough_when_false) return;
+
+ // IMPORTANT: Note that `object` could be a register that aliases registers in
+ // the ScratchRegisterScope. Make sure that all reads of `object` are before
+ // any writes to scratch registers
+ using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag;
+ switch (literal) {
+ case LiteralFlag::kNumber:
+ JumpIfSmi(object, is_true, true_distance);
+ CompareRoot(FieldOperand(object, HeapObject::kMapOffset),
+ RootIndex::kHeapNumberMap);
+ Branch(equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ case LiteralFlag::kString: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false, false_distance);
+ LoadMap(scratch, object);
+ cmpw(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(LAST_STRING_TYPE));
+ Branch(less_equal, is_true, true_distance, fallthrough_when_true,
+ is_false, false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kSymbol: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false, false_distance);
+ LoadMap(scratch, object);
+ cmpw(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(SYMBOL_TYPE));
+ Branch(equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kBoolean:
+ CompareRoot(object, RootIndex::kTrueValue);
+ JumpIf(equal, is_true, true_distance);
+ CompareRoot(object, RootIndex::kFalseValue);
+ Branch(equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ case LiteralFlag::kBigInt: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false, false_distance);
+ LoadMap(scratch, object);
+ cmpw(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(BIGINT_TYPE));
+ Branch(equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kUndefined: {
+ JumpIfSmi(object, is_false, false_distance);
+ // Check it has the undetectable bit set and it is not null.
+ LoadMap(kScratchRegister, object);
+ testl(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(Map::Bits1::IsUndetectableBit::kMask));
+ JumpIf(zero, is_false, false_distance);
+ CompareRoot(object, RootIndex::kNullValue);
+ Branch(not_equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kFunction: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false, false_distance);
+ // Check if callable bit is set and not undetectable.
+ LoadMap(scratch, object);
+ movl(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+ andl(scratch, Immediate(Map::Bits1::IsUndetectableBit::kMask |
+ Map::Bits1::IsCallableBit::kMask));
+ cmpl(scratch, Immediate(Map::Bits1::IsCallableBit::kMask));
+ Branch(equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kObject: {
+ MaglevAssembler::ScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ JumpIfSmi(object, is_false, false_distance);
+ // If the object is null then return true.
+ CompareRoot(object, RootIndex::kNullValue);
+ JumpIf(equal, is_true, true_distance);
+ // Check if the object is a receiver type,
+ LoadMap(scratch, object);
+ cmpw(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(FIRST_JS_RECEIVER_TYPE));
+ JumpIf(less, is_false, false_distance);
+ // ... and is not undefined (undetectable) nor callable.
+ testl(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(Map::Bits1::IsUndetectableBit::kMask |
+ Map::Bits1::IsCallableBit::kMask));
+ Branch(equal, is_true, true_distance, fallthrough_when_true, is_false,
+ false_distance, fallthrough_when_false);
+ return;
+ }
+ case LiteralFlag::kOther:
+ if (!fallthrough_when_false) {
+ Jump(is_false, false_distance);
+ }
+ return;
+ }
+ UNREACHABLE();
+}
+
+void MaglevAssembler::TruncateDoubleToInt32(Register dst, DoubleRegister src) {
+ ZoneLabelRef done(this);
+
+ Cvttsd2siq(dst, src);
+ // Check whether the Cvt overflowed.
+ cmpq(dst, Immediate(1));
+ JumpToDeferredIf(
+ overflow,
+ [](MaglevAssembler* masm, DoubleRegister src, Register dst,
+ ZoneLabelRef done) {
+ // Push the double register onto the stack as an input argument.
+ __ AllocateStackSpace(kDoubleSize);
+ __ Movsd(MemOperand(rsp, 0), src);
+ __ CallBuiltin(Builtin::kDoubleToI);
+ // DoubleToI sets the result on the stack, pop the result off the stack.
+ // Avoid using `pop` to not mix implicit and explicit rsp updates.
+ __ movl(dst, MemOperand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(*done);
+ },
+ src, dst, done);
+ bind(*done);
+ // Zero extend the converted value to complete the truncation.
+ movl(dst, dst);
+}
+
+void MaglevAssembler::TryTruncateDoubleToInt32(Register dst, DoubleRegister src,
+ Label* fail) {
+ DoubleRegister converted_back = kScratchDoubleReg;
+
+ // Convert the input float64 value to int32.
+ Cvttsd2si(dst, src);
+ // Convert that int32 value back to float64.
+ Cvtlsi2sd(converted_back, dst);
+ // Check that the result of the float64->int32->float64 is equal to the input
+ // (i.e. that the conversion didn't truncate.
+ Ucomisd(src, converted_back);
+ JumpIf(parity_even, fail);
+ JumpIf(not_equal, fail);
+
+ // Check if {input} is -0.
+ Label check_done;
+ cmpl(dst, Immediate(0));
+ j(not_equal, &check_done);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Register high_word32_of_input = kScratchRegister;
+ Pextrd(high_word32_of_input, src, 1);
+ cmpl(high_word32_of_input, Immediate(0));
+ JumpIf(less, fail);
+
+ bind(&check_done);
+}
+
+void MaglevAssembler::Prologue(Graph* graph) {
+ BailoutIfDeoptimized(rbx);
+
+ if (graph->has_recursive_calls()) {
+ bind(code_gen_state()->entry_label());
+ }
+
+ // Tiering support.
+ // TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
+ // per Maglev code object on x64).
+ {
+ // Scratch registers. Don't clobber regs related to the calling
+ // convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
+ // with deferred flags code.
+ Register flags = rcx;
+ Register feedback_vector = r9;
+
+ Label* deferred_flags_need_processing = MakeDeferredCode(
+ [](MaglevAssembler* masm, Register flags, Register feedback_vector) {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+ // TODO(leszeks): This could definitely be a builtin that we
+ // tail-call.
+ __ OptimizeCodeOrTailCallOptimizedCodeSlot(
+ flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
+ __ Trap();
+ },
+ flags, feedback_vector);
+
+ Move(feedback_vector,
+ compilation_info()->toplevel_compilation_unit()->feedback().object());
+ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
+ flags, feedback_vector, CodeKind::MAGLEV,
+ deferred_flags_need_processing);
+ }
+
+ EnterFrame(StackFrame::MAGLEV);
+
+ // Save arguments in frame.
+ // TODO(leszeks): Consider eliding this frame if we don't make any calls
+ // that could clobber these registers.
+ Push(kContextRegister);
+ Push(kJSFunctionRegister); // Callee's JS function.
+ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
+
+ // Initialize stack slots.
+ if (graph->tagged_stack_slots() > 0) {
+ ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
+ // TODO(leszeks): Consider filling with xmm + movdqa instead.
+ Move(rax, 0);
+
+ // Magic value. Experimentally, an unroll size of 8 doesn't seem any
+ // worse than fully unrolled pushes.
+ const int kLoopUnrollSize = 8;
+ int tagged_slots = graph->tagged_stack_slots();
+ if (tagged_slots < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill
+ // completely.
+ for (int i = 0; i < tagged_slots; ++i) {
+ pushq(rax);
+ }
+ } else {
+ // Extract the first few slots to round to the unroll size.
+ int first_slots = tagged_slots % kLoopUnrollSize;
+ for (int i = 0; i < first_slots; ++i) {
+ pushq(rax);
+ }
+ Move(rbx, tagged_slots / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at
+ // least once.
+ DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
+ Label loop;
+ bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ pushq(rax);
+ }
+ decl(rbx);
+ j(greater, &loop);
+ }
+ }
+ if (graph->untagged_stack_slots() > 0) {
+ // Extend rsp by the size of the remaining untagged part of the frame,
+ // no need to initialise these.
+ subq(rsp, Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
+ }
+}
+
+void MaglevAssembler::MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
+ Label* eager_deopt_entry,
+ size_t lazy_deopt_count,
+ Label* lazy_deopt_entry) {}
+
+void MaglevAssembler::StringLength(Register result, Register string) {
+ if (v8_flags.debug_code) {
+ // Check if {string} is a string.
+ AssertNotSmi(string);
+ LoadMap(kScratchRegister, string);
+ CmpInstanceTypeRange(kScratchRegister, kScratchRegister, FIRST_STRING_TYPE,
+ LAST_STRING_TYPE);
+ Check(below_equal, AbortReason::kUnexpectedValue);
+ }
+ movl(result, FieldOperand(string, String::kLengthOffset));
+}
+
+void MaglevAssembler::StoreFixedArrayElementWithWriteBarrier(
+ Register array, Register index, Register value,
+ RegisterSnapshot register_snapshot) {
+ if (v8_flags.debug_code) {
+ AssertNotSmi(array);
+ CmpObjectType(array, FIXED_ARRAY_TYPE, kScratchRegister);
+ Assert(equal, AbortReason::kUnexpectedValue);
+ cmpq(index, Immediate(0));
+ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
+ }
+ mov_tagged(
+ FieldOperand(array, index, times_tagged_size, FixedArray::kHeaderSize),
+ value);
+ ZoneLabelRef done(this);
+ Label* deferred_write_barrier = MakeDeferredCode(
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
+ Register index, Register value, RegisterSnapshot register_snapshot) {
+ ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
+ // Use the value as the scratch register if possible, since
+ // CheckPageFlag emits slightly better code when value == scratch.
+ Register scratch = kScratchRegister;
+ if (value != object && !register_snapshot.live_registers.has(value)) {
+ scratch = value;
+ }
+ __ CheckPageFlag(value, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ *done);
+
+ Register stub_object_reg = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
+
+ RegList saved;
+ if (object != stub_object_reg &&
+ register_snapshot.live_registers.has(stub_object_reg)) {
+ saved.set(stub_object_reg);
+ }
+ if (register_snapshot.live_registers.has(slot_reg)) {
+ saved.set(slot_reg);
+ }
+
+ __ PushAll(saved);
+
+ if (object != stub_object_reg) {
+ __ Move(stub_object_reg, object);
+ object = stub_object_reg;
+ }
+ __ leaq(slot_reg, FieldOperand(object, index, times_tagged_size,
+ FixedArray::kHeaderSize));
+
+ SaveFPRegsMode const save_fp_mode =
+ !register_snapshot.live_double_registers.is_empty()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+
+ __ CallRecordWriteStub(object, slot_reg, save_fp_mode);
+
+ __ PopAll(saved);
+ __ jmp(*done);
+ },
+ done, array, index, value, register_snapshot);
+
+ JumpIfSmi(value, *done);
+ CheckPageFlag(array, kScratchRegister,
+ MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
+ deferred_write_barrier);
+ bind(*done);
+}
+
+void MaglevAssembler::StoreFixedArrayElementNoWriteBarrier(Register array,
+ Register index,
+ Register value) {
+ if (v8_flags.debug_code) {
+ AssertNotSmi(array);
+ CmpObjectType(array, FIXED_ARRAY_TYPE, kScratchRegister);
+ Assert(equal, AbortReason::kUnexpectedValue);
+ cmpq(index, Immediate(0));
+ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
+ }
+ mov_tagged(
+ FieldOperand(array, index, times_tagged_size, FixedArray::kHeaderSize),
+ value);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/x64/maglev-ir-x64.cc b/deps/v8/src/maglev/x64/maglev-ir-x64.cc
new file mode 100644
index 0000000000..9b10d5b5dc
--- /dev/null
+++ b/deps/v8/src/maglev/x64/maglev-ir-x64.cc
@@ -0,0 +1,2303 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/x64/assembler-x64-inl.h"
+#include "src/codegen/x64/assembler-x64.h"
+#include "src/codegen/x64/register-x64.h"
+#include "src/maglev/maglev-assembler-inl.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir-inl.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/objects/feedback-cell.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/js-function.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+#define __ masm->
+
+constexpr Condition ConditionForFloat64(Operation operation) {
+ switch (operation) {
+ case Operation::kEqual:
+ case Operation::kStrictEqual:
+ return equal;
+ case Operation::kLessThan:
+ return below;
+ case Operation::kLessThanOrEqual:
+ return below_equal;
+ case Operation::kGreaterThan:
+ return above;
+ case Operation::kGreaterThanOrEqual:
+ return above_equal;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// ---
+// Nodes
+// ---
+
+void FoldedAllocation::SetValueLocationConstraints() {
+ UseRegister(raw_allocation());
+ DefineAsRegister(this);
+}
+
+void FoldedAllocation::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ leaq(ToRegister(result()),
+ Operand(ToRegister(raw_allocation()), offset()));
+}
+
+int CreateEmptyObjectLiteral::MaxCallStackArgs() const {
+ return AllocateDescriptor::GetStackParameterCount();
+}
+void CreateEmptyObjectLiteral::SetValueLocationConstraints() {
+ DefineAsRegister(this);
+}
+void CreateEmptyObjectLiteral::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(result());
+ __ Allocate(register_snapshot(), object, map().instance_size());
+ __ Move(kScratchRegister, map().object());
+ __ StoreTaggedField(FieldOperand(object, HeapObject::kMapOffset),
+ kScratchRegister);
+ __ LoadRoot(kScratchRegister, RootIndex::kEmptyFixedArray);
+ __ StoreTaggedField(FieldOperand(object, JSObject::kPropertiesOrHashOffset),
+ kScratchRegister);
+ __ StoreTaggedField(FieldOperand(object, JSObject::kElementsOffset),
+ kScratchRegister);
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ for (int i = 0; i < map().GetInObjectProperties(); i++) {
+ int offset = map().GetInObjectPropertyOffset(i);
+ __ StoreTaggedField(FieldOperand(object, offset), kScratchRegister);
+ }
+}
+
+void CheckMaps::SetValueLocationConstraints() { UseRegister(receiver_input()); }
+void CheckMaps::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+
+ // TODO(victorgomes): This can happen, because we do not emit an unconditional
+ // deopt when we intersect the map sets.
+ if (maps().is_empty()) {
+ __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
+ return;
+ }
+
+ bool maps_include_heap_number = AnyMapIsHeapNumber(maps());
+
+ Label done;
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ if (maps_include_heap_number) {
+ // Smis count as matching the HeapNumber map, so we're done.
+ __ j(is_smi, &done);
+ } else {
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
+ }
+ }
+
+ size_t map_count = maps().size();
+ for (size_t i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = maps().at(i);
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ __ j(equal, &done);
+ }
+ Handle<Map> last_map = maps().at(map_count - 1);
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), last_map);
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, this);
+ __ bind(&done);
+}
+
+void CheckNumber::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+}
+void CheckNumber::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Label done;
+ Register value = ToRegister(receiver_input());
+ // If {value} is a Smi or a HeapNumber, we're done.
+ __ JumpIfSmi(value, &done);
+ __ CompareRoot(FieldOperand(value, HeapObject::kMapOffset),
+ RootIndex::kHeapNumberMap);
+ if (mode() == Object::Conversion::kToNumeric) {
+ // Jump to done if it is a HeapNumber.
+ __ j(equal, &done);
+ // Check if it is a BigInt.
+ __ LoadMap(kScratchRegister, value);
+ __ cmpw(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(BIGINT_TYPE));
+ }
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotANumber, this);
+ __ bind(&done);
+}
+
+int CheckMapsWithMigration::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kTryMigrateInstance)->nargs, 1);
+ return 1;
+}
+void CheckMapsWithMigration::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+}
+void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // TODO(victorgomes): This can happen, because we do not emit an unconditional
+ // deopt when we intersect the map sets.
+ if (maps().is_empty()) {
+ __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
+ return;
+ }
+
+ Register object = ToRegister(receiver_input());
+
+ bool maps_include_heap_number = AnyMapIsHeapNumber(maps());
+
+ ZoneLabelRef done(masm);
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ if (maps_include_heap_number) {
+ // Smis count as matching the HeapNumber map, so we're done.
+ __ j(is_smi, *done);
+ } else {
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
+ }
+ }
+
+ size_t map_count = maps().size();
+ for (size_t i = 0; i < map_count; ++i) {
+ ZoneLabelRef continue_label(masm);
+ Handle<Map> map = maps().at(i);
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+
+ bool last_map = (i == map_count - 1);
+ if (map->is_migration_target()) {
+ __ JumpToDeferredIf(
+ not_equal,
+ [](MaglevAssembler* masm, ZoneLabelRef continue_label,
+ ZoneLabelRef done, Register object, int map_index,
+ CheckMapsWithMigration* node) {
+ // Reload the map to avoid needing to save it on a temporary in the
+ // fast path.
+ __ LoadMap(kScratchRegister, object);
+ // If the map is not deprecated, we fail the map check, continue to
+ // the next one.
+ __ movl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitField3Offset));
+ __ testl(kScratchRegister,
+ Immediate(Map::Bits3::IsDeprecatedBit::kMask));
+ __ j(zero, *continue_label);
+
+ // Otherwise, try migrating the object. If the migration
+ // returns Smi zero, then it failed the migration.
+ Register return_val = Register::no_reg();
+ {
+ RegisterSnapshot register_snapshot = node->register_snapshot();
+ // We can eager deopt after the snapshot, so make sure the nodes
+ // used by the deopt are included in it.
+ // TODO(leszeks): This is a bit of a footgun -- we likely want the
+ // snapshot to always include eager deopt input registers.
+ AddDeoptRegistersToSnapshot(&register_snapshot,
+ node->eager_deopt_info());
+ SaveRegisterStateForCall save_register_state(masm,
+ register_snapshot);
+
+ __ Push(object);
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kTryMigrateInstance);
+ save_register_state.DefineSafepoint();
+
+ // Make sure the return value is preserved across the live
+ // register restoring pop all.
+ return_val = kReturnRegister0;
+ if (register_snapshot.live_registers.has(return_val)) {
+ DCHECK(!register_snapshot.live_registers.has(kScratchRegister));
+ __ movq(kScratchRegister, return_val);
+ return_val = kScratchRegister;
+ }
+ }
+
+ // On failure, the returned value is zero
+ __ cmpl(return_val, Immediate(0));
+ __ j(equal, *continue_label);
+
+ // The migrated object is returned on success, retry the map check.
+ __ Move(object, return_val);
+ // Manually load the map pointer without uncompressing it.
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ node->maps().at(map_index));
+ __ j(equal, *done);
+ __ jmp(*continue_label);
+ },
+ // If this is the last map to check, we should deopt if we fail.
+ // This is safe to do, since {eager_deopt_info} is ZoneAllocated.
+ (last_map ? ZoneLabelRef::UnsafeFromLabelPointer(masm->GetDeoptLabel(
+ this, DeoptimizeReason::kWrongMap))
+ : continue_label),
+ done, object, i, this);
+ } else if (last_map) {
+ // If it is the last map and it is not a migration target, we should deopt
+ // if the check fails.
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, this);
+ }
+
+ if (!last_map) {
+ // We don't need to bind the label for the last map.
+ __ j(equal, *done);
+ __ bind(*continue_label);
+ }
+ }
+
+ __ bind(*done);
+}
+
+void CheckJSArrayBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ UseRegister(index_input());
+}
+void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+ __ AssertNotSmi(object);
+
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ }
+ __ SmiUntagField(kScratchRegister,
+ FieldOperand(object, JSArray::kLengthOffset));
+ __ cmpl(index, kScratchRegister);
+ __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
+}
+
+void CheckJSTypedArrayBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ if (ElementsKindSize(elements_kind_) == 1) {
+ UseRegister(index_input());
+ } else {
+ UseAndClobberRegister(index_input());
+ }
+}
+void CheckJSTypedArrayBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+ Register byte_length = kScratchRegister;
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(object);
+ __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ }
+ __ LoadBoundedSizeFromObject(byte_length, object,
+ JSTypedArray::kRawByteLengthOffset);
+ int element_size = ElementsKindSize(elements_kind_);
+ if (element_size > 1) {
+ DCHECK(element_size == 2 || element_size == 4 || element_size == 8);
+ __ shlq(index, Immediate(base::bits::CountTrailingZeros(element_size)));
+ }
+ __ cmpq(index, byte_length);
+ // We use {above_equal} which does an unsigned comparison to handle negative
+ // indices as well.
+ __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
+}
+
+int CheckJSDataViewBounds::MaxCallStackArgs() const { return 1; }
+void CheckJSDataViewBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ UseRegister(index_input());
+}
+void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+ Register byte_length = kScratchRegister;
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(object);
+ __ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ }
+
+ // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
+ __ LoadBoundedSizeFromObject(byte_length, object,
+ JSDataView::kRawByteLengthOffset);
+
+ int element_size = ExternalArrayElementSize(element_type_);
+ if (element_size > 1) {
+ __ subq(byte_length, Immediate(element_size - 1));
+ __ EmitEagerDeoptIf(negative, DeoptimizeReason::kOutOfBounds, this);
+ }
+ __ cmpl(index, byte_length);
+ __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
+}
+
+void CheckJSObjectElementsBounds::SetValueLocationConstraints() {
+ UseRegister(receiver_input());
+ UseRegister(index_input());
+}
+void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(receiver_input());
+ Register index = ToRegister(index_input());
+ __ AssertNotSmi(object);
+
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ Assert(greater_equal, AbortReason::kUnexpectedValue);
+ }
+ __ LoadTaggedField(kScratchRegister,
+ FieldOperand(object, JSObject::kElementsOffset));
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(kScratchRegister);
+ }
+ __ SmiUntagField(kScratchRegister,
+ FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
+ __ cmpl(index, kScratchRegister);
+ __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
+}
+
+void CheckedInternalizedString::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ set_temporaries_needed(1);
+ DefineSameAsFirst(this);
+}
+void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register map_tmp = temps.Acquire();
+ Register object = ToRegister(object_input());
+
+ if (check_type_ == CheckType::kOmitHeapObjectCheck) {
+ __ AssertNotSmi(object);
+ } else {
+ Condition is_smi = __ CheckSmi(object);
+ __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
+ }
+
+ __ LoadMap(map_tmp, object);
+ __ RecordComment("Test IsInternalizedString");
+ // Go to the slow path if this is a non-string, or a non-internalised string.
+ __ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
+ Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ static_assert((kStringTag | kInternalizedTag) == 0);
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(
+ not_zero,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
+ CheckedInternalizedString* node, EagerDeoptInfo* deopt_info,
+ Register map_tmp) {
+ __ RecordComment("Deferred Test IsThinString");
+ __ movw(map_tmp, FieldOperand(map_tmp, Map::kInstanceTypeOffset));
+ __ cmpw(map_tmp, Immediate(THIN_STRING_TYPE));
+ // Deopt if this isn't a thin string.
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, node);
+ __ LoadTaggedField(object,
+ FieldOperand(object, ThinString::kActualOffset));
+ if (v8_flags.debug_code) {
+ __ RecordComment("DCHECK IsInternalizedString");
+ __ LoadMap(map_tmp, object);
+ __ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
+ Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ static_assert((kStringTag | kInternalizedTag) == 0);
+ __ Check(zero, AbortReason::kUnexpectedValue);
+ }
+ __ jmp(*done);
+ },
+ done, object, this, eager_deopt_info(), map_tmp);
+ __ bind(*done);
+}
+
+int CheckedObjectToIndex::MaxCallStackArgs() const {
+ return MaglevAssembler::ArgumentStackSlotsForCFunctionCall(1);
+}
+void CheckedObjectToIndex::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ DefineAsRegister(this);
+ set_double_temporaries_needed(1);
+}
+void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register result_reg = ToRegister(result());
+
+ ZoneLabelRef done(masm);
+ Condition is_smi = __ CheckSmi(object);
+ __ JumpToDeferredIf(
+ NegateCondition(is_smi),
+ [](MaglevAssembler* masm, Register object, Register result_reg,
+ ZoneLabelRef done, CheckedObjectToIndex* node) {
+ Label is_string;
+ __ LoadMap(kScratchRegister, object);
+ __ CmpInstanceTypeRange(kScratchRegister, kScratchRegister,
+ FIRST_STRING_TYPE, LAST_STRING_TYPE);
+ __ j(below_equal, &is_string);
+
+ __ cmpw(kScratchRegister, Immediate(HEAP_NUMBER_TYPE));
+ // The IC will go generic if it encounters something other than a
+ // Number or String key.
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, node);
+
+ // Heap Number.
+ {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister number_value = temps.AcquireDouble();
+ DoubleRegister converted_back = kScratchDoubleReg;
+ // Load the heap number value into a double register.
+ __ Movsd(number_value,
+ FieldOperand(object, HeapNumber::kValueOffset));
+ // Convert the input float64 value to int32.
+ __ Cvttsd2si(result_reg, number_value);
+ // Convert that int32 value back to float64.
+ __ Cvtlsi2sd(converted_back, result_reg);
+ // Check that the result of the float64->int32->float64 is equal to
+ // the input (i.e. that the conversion didn't truncate.
+ __ Ucomisd(number_value, converted_back);
+ __ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotInt32, node);
+ __ j(equal, *done);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ }
+
+ // String.
+ __ bind(&is_string);
+ {
+ RegisterSnapshot snapshot = node->register_snapshot();
+ snapshot.live_registers.clear(result_reg);
+ DCHECK(!snapshot.live_tagged_registers.has(result_reg));
+ {
+ SaveRegisterStateForCall save_register_state(masm, snapshot);
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(1);
+ __ Move(arg_reg_1, object);
+ __ CallCFunction(
+ ExternalReference::string_to_array_index_function(), 1);
+ // No need for safepoint since this is a fast C call.
+ __ Move(result_reg, kReturnRegister0);
+ }
+ __ cmpl(result_reg, Immediate(0));
+ __ j(greater_equal, *done);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ }
+ },
+ object, result_reg, done, this);
+
+ // If we didn't enter the deferred block, we're a Smi.
+ if (result_reg == object) {
+ __ SmiToInt32(result_reg);
+ } else {
+ __ SmiToInt32(result_reg, object);
+ }
+
+ __ bind(*done);
+}
+
+int BuiltinStringFromCharCode::MaxCallStackArgs() const {
+ return AllocateDescriptor::GetStackParameterCount();
+}
+void BuiltinStringFromCharCode::SetValueLocationConstraints() {
+ if (code_input().node()->Is<Int32Constant>()) {
+ UseAny(code_input());
+ } else {
+ UseAndClobberRegister(code_input());
+ set_temporaries_needed(1);
+ }
+ DefineAsRegister(this);
+}
+void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register result_string = ToRegister(result());
+ if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
+ int32_t char_code = constant->value();
+ if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
+ __ LoadSingleCharacterString(result_string, char_code);
+ } else {
+ __ AllocateTwoByteString(register_snapshot(), result_string, 1);
+ __ movw(FieldOperand(result_string, SeqTwoByteString::kHeaderSize),
+ Immediate(char_code & 0xFFFF));
+ }
+ } else {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register char_code = ToRegister(code_input());
+ __ StringFromCharCode(register_snapshot(), nullptr, result_string,
+ char_code, scratch);
+ }
+}
+
+int BuiltinStringPrototypeCharCodeOrCodePointAt::MaxCallStackArgs() const {
+ DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2);
+ return 2;
+}
+void BuiltinStringPrototypeCharCodeOrCodePointAt::
+ SetValueLocationConstraints() {
+ UseAndClobberRegister(string_input());
+ UseAndClobberRegister(index_input());
+ DefineAsRegister(this);
+ set_temporaries_needed(1);
+}
+
+void BuiltinStringPrototypeCharCodeOrCodePointAt::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register string = ToRegister(string_input());
+ Register index = ToRegister(index_input());
+ ZoneLabelRef done(masm);
+ RegisterSnapshot save_registers = register_snapshot();
+ __ StringCharCodeOrCodePointAt(mode_, save_registers, ToRegister(result()),
+ string, index, scratch, *done);
+ __ bind(*done);
+}
+
+void LoadFixedArrayElement::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ DefineAsRegister(this);
+}
+void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ Register result_reg = ToRegister(result());
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(elements);
+ __ CmpObjectType(elements, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ __ cmpq(index, Immediate(0));
+ __ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
+ }
+ if (this->decompresses_tagged_result()) {
+ __ DecompressTagged(result_reg,
+ FieldOperand(elements, index, times_tagged_size,
+ FixedArray::kHeaderSize));
+ } else {
+ __ mov_tagged(result_reg, FieldOperand(elements, index, times_tagged_size,
+ FixedArray::kHeaderSize));
+ }
+}
+
+void LoadFixedDoubleArrayElement::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ DefineAsRegister(this);
+}
+void LoadFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister result_reg = ToDoubleRegister(result());
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(elements);
+ __ CmpObjectType(elements, FIXED_DOUBLE_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ __ cmpq(index, Immediate(0));
+ __ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
+ }
+ __ Movsd(result_reg, FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize));
+}
+
+void StoreFixedDoubleArrayElement::SetValueLocationConstraints() {
+ UseRegister(elements_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+}
+void StoreFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register elements = ToRegister(elements_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
+ if (v8_flags.debug_code) {
+ __ AssertNotSmi(elements);
+ __ CmpObjectType(elements, FIXED_DOUBLE_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ __ cmpq(index, Immediate(0));
+ __ Assert(above_equal, AbortReason::kUnexpectedNegativeValue);
+ }
+ __ Movsd(
+ FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ value);
+}
+
+void LoadSignedIntDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ if (is_little_endian_constant() ||
+ type_ == ExternalArrayType::kExternalInt8Array) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ set_temporaries_needed(1);
+ DefineAsRegister(this);
+}
+void LoadSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ Register result_reg = ToRegister(result());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register data_pointer = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
+ __ Assert(above_equal, AbortReason::kUnexpectedValue);
+ }
+
+ // Load data pointer.
+ __ LoadExternalPointerField(
+ data_pointer, FieldOperand(object, JSDataView::kDataPointerOffset));
+
+ int element_size = ExternalArrayElementSize(type_);
+ __ LoadSignedField(result_reg, Operand(data_pointer, index, times_1, 0),
+ element_size);
+
+ // We ignore little endian argument if type is a byte size.
+ if (type_ != ExternalArrayType::kExternalInt8Array) {
+ if (is_little_endian_constant()) {
+ if (!FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ ReverseByteOrder(result_reg, element_size);
+ }
+ } else {
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, false);
+ __ bind(*is_big_endian);
+ __ ReverseByteOrder(result_reg, element_size);
+ __ bind(*is_little_endian);
+ // x64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ }
+ }
+}
+
+void StoreSignedIntDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ if (ExternalArrayElementSize(type_) > 1) {
+ UseAndClobberRegister(value_input());
+ } else {
+ UseRegister(value_input());
+ }
+ if (is_little_endian_constant() ||
+ type_ == ExternalArrayType::kExternalInt8Array) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ set_temporaries_needed(1);
+}
+void StoreSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ Register value = ToRegister(value_input());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register data_pointer = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
+ __ Assert(above_equal, AbortReason::kUnexpectedValue);
+ }
+
+ // Load data pointer.
+ __ LoadExternalPointerField(
+ data_pointer, FieldOperand(object, JSDataView::kDataPointerOffset));
+
+ int element_size = ExternalArrayElementSize(type_);
+
+ // We ignore little endian argument if type is a byte size.
+ if (element_size > 1) {
+ if (is_little_endian_constant()) {
+ if (!FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ ReverseByteOrder(value, element_size);
+ }
+ } else {
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, false);
+ __ bind(*is_big_endian);
+ __ ReverseByteOrder(value, element_size);
+ __ bind(*is_little_endian);
+ // x64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ }
+ }
+
+ __ StoreField(Operand(data_pointer, index, times_1, 0), value, element_size);
+}
+
+void LoadDoubleDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ if (is_little_endian_constant()) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ set_temporaries_needed(1);
+ DefineAsRegister(this);
+}
+void LoadDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister result_reg = ToDoubleRegister(result());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register data_pointer = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
+ __ Assert(above_equal, AbortReason::kUnexpectedValue);
+ }
+
+ // Load data pointer.
+ __ LoadExternalPointerField(
+ data_pointer, FieldOperand(object, JSDataView::kDataPointerOffset));
+
+ if (is_little_endian_constant()) {
+ if (FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ Movsd(result_reg, Operand(data_pointer, index, times_1, 0));
+ } else {
+ __ movq(kScratchRegister, Operand(data_pointer, index, times_1, 0));
+ __ bswapq(kScratchRegister);
+ __ Movq(result_reg, kScratchRegister);
+ }
+ } else {
+ Label done;
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ // TODO(leszeks): We're likely to be calling this on an existing boolean --
+ // maybe that's a case we should fast-path here and re-use that boolean
+ // value?
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, true);
+ // x64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ __ bind(*is_little_endian);
+ __ Movsd(result_reg, Operand(data_pointer, index, times_1, 0));
+ __ jmp(&done);
+ // We should swap the bytes if big endian.
+ __ bind(*is_big_endian);
+ __ movq(kScratchRegister, Operand(data_pointer, index, times_1, 0));
+ __ bswapq(kScratchRegister);
+ __ Movq(result_reg, kScratchRegister);
+ __ bind(&done);
+ }
+}
+
+void StoreDoubleDataViewElement::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(index_input());
+ UseRegister(value_input());
+ if (is_little_endian_constant()) {
+ UseAny(is_little_endian_input());
+ } else {
+ UseRegister(is_little_endian_input());
+ }
+ set_temporaries_needed(1);
+}
+void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register index = ToRegister(index_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register data_pointer = temps.Acquire();
+
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
+ __ Assert(above_equal, AbortReason::kUnexpectedValue);
+ }
+
+ // Load data pointer.
+ __ LoadExternalPointerField(
+ data_pointer, FieldOperand(object, JSDataView::kDataPointerOffset));
+
+ if (is_little_endian_constant()) {
+ if (FromConstantToBool(masm, is_little_endian_input().node())) {
+ __ Movsd(Operand(data_pointer, index, times_1, 0), value);
+ } else {
+ __ Movq(kScratchRegister, value);
+ __ bswapq(kScratchRegister);
+ __ movq(Operand(data_pointer, index, times_1, 0), kScratchRegister);
+ }
+ } else {
+ Label done;
+ ZoneLabelRef is_little_endian(masm), is_big_endian(masm);
+ // TODO(leszeks): We're likely to be calling this on an existing boolean --
+ // maybe that's a case we should fast-path here and re-use that boolean
+ // value?
+ __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian,
+ is_big_endian, true);
+ // x64 is little endian.
+ static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
+ __ bind(*is_little_endian);
+ __ Movsd(Operand(data_pointer, index, times_1, 0), value);
+ __ jmp(&done);
+ // We should swap the bytes if big endian.
+ __ bind(*is_big_endian);
+ __ Movq(kScratchRegister, value);
+ __ bswapq(kScratchRegister);
+ __ movq(Operand(data_pointer, index, times_1, 0), kScratchRegister);
+ __ bind(&done);
+ }
+}
+
+namespace {
+
+template <bool check_detached, typename ResultReg, typename NodeT>
+void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object,
+ Register index, ResultReg result_reg,
+ Register scratch, ElementsKind kind) {
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ }
+
+ if constexpr (check_detached) {
+ __ DeoptIfBufferDetached(object, scratch, node);
+ }
+
+ Register data_pointer = scratch;
+ __ BuildTypedArrayDataPointer(data_pointer, object);
+
+ if constexpr (std::is_same_v<ResultReg, Register>) {
+ if (IsSignedIntTypedArrayElementsKind(kind)) {
+ int element_size = ElementsKindSize(kind);
+ __ LoadSignedField(
+ result_reg,
+ Operand(data_pointer, index, ScaleFactorFromInt(element_size), 0),
+ element_size);
+ } else {
+ DCHECK(IsUnsignedIntTypedArrayElementsKind(kind));
+ int element_size = ElementsKindSize(kind);
+ __ LoadUnsignedField(
+ result_reg,
+ Operand(data_pointer, index, ScaleFactorFromInt(element_size), 0),
+ element_size);
+ }
+ } else {
+#ifdef DEBUG
+ bool result_reg_is_double = std::is_same_v<ResultReg, DoubleRegister>;
+ DCHECK(result_reg_is_double);
+ DCHECK(IsFloatTypedArrayElementsKind(kind));
+#endif
+ switch (kind) {
+ case FLOAT32_ELEMENTS:
+ __ Movss(result_reg, Operand(data_pointer, index, times_4, 0));
+ __ Cvtss2sd(result_reg, result_reg);
+ break;
+ case FLOAT64_ELEMENTS:
+ __ Movsd(result_reg, Operand(data_pointer, index, times_8, 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <bool check_detached, typename ValueReg, typename NodeT>
+void GenerateTypedArrayStore(MaglevAssembler* masm, NodeT* node,
+ Register object, Register index, ValueReg value,
+ Register scratch, ElementsKind kind) {
+ __ AssertNotSmi(object);
+ if (v8_flags.debug_code) {
+ __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ }
+
+ if constexpr (check_detached) {
+ __ DeoptIfBufferDetached(object, scratch, node);
+ }
+
+ Register data_pointer = scratch;
+ __ BuildTypedArrayDataPointer(data_pointer, object);
+
+ if constexpr (std::is_same_v<ValueReg, Register>) {
+ int element_size = ElementsKindSize(kind);
+ __ StoreField(
+ Operand(data_pointer, index, ScaleFactorFromInt(element_size), 0),
+ value, element_size);
+ } else {
+#ifdef DEBUG
+ bool value_is_double = std::is_same_v<ValueReg, DoubleRegister>;
+ DCHECK(value_is_double);
+ DCHECK(IsFloatTypedArrayElementsKind(kind));
+#endif
+ switch (kind) {
+ case FLOAT32_ELEMENTS:
+ __ Cvtsd2ss(kScratchDoubleReg, value);
+ __ Movss(Operand(data_pointer, index, times_4, 0), kScratchDoubleReg);
+ break;
+ case FLOAT64_ELEMENTS:
+ __ Movsd(Operand(data_pointer, index, times_8, 0), value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+} // namespace
+
+#define DEF_LOAD_TYPED_ARRAY(Name, ResultReg, ToResultReg, check_detached) \
+ void Name::SetValueLocationConstraints() { \
+ UseRegister(object_input()); \
+ UseRegister(index_input()); \
+ DefineAsRegister(this); \
+ set_temporaries_needed(1); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Register object = ToRegister(object_input()); \
+ Register index = ToRegister(index_input()); \
+ ResultReg result_reg = ToResultReg(result()); \
+ MaglevAssembler::ScratchRegisterScope temps(masm); \
+ Register scratch = temps.Acquire(); \
+ \
+ GenerateTypedArrayLoad<check_detached>( \
+ masm, this, object, index, result_reg, scratch, elements_kind_); \
+ }
+
+DEF_LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElement, Register, ToRegister,
+ /*check_detached*/ true)
+DEF_LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElementNoDeopt, Register,
+ ToRegister,
+ /*check_detached*/ false)
+
+DEF_LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElement, Register, ToRegister,
+ /*check_detached*/ true)
+DEF_LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElementNoDeopt, Register,
+ ToRegister,
+ /*check_detached*/ false)
+
+DEF_LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElement, DoubleRegister,
+ ToDoubleRegister,
+ /*check_detached*/ true)
+DEF_LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElementNoDeopt, DoubleRegister,
+ ToDoubleRegister, /*check_detached*/ false)
+
+#undef DEF_LOAD_TYPED_ARRAY
+
+#define DEF_STORE_TYPED_ARRAY(Name, ValueReg, ToValueReg, check_detached) \
+ void Name::SetValueLocationConstraints() { \
+ UseRegister(object_input()); \
+ UseRegister(index_input()); \
+ UseRegister(value_input()); \
+ set_temporaries_needed(1); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Register object = ToRegister(object_input()); \
+ Register index = ToRegister(index_input()); \
+ ValueReg value = ToValueReg(value_input()); \
+ MaglevAssembler::ScratchRegisterScope temps(masm); \
+ Register scratch = temps.Acquire(); \
+ \
+ GenerateTypedArrayStore<check_detached>(masm, this, object, index, value, \
+ scratch, elements_kind_); \
+ }
+
+DEF_STORE_TYPED_ARRAY(StoreIntTypedArrayElement, Register, ToRegister,
+ /*check_detached*/ true)
+DEF_STORE_TYPED_ARRAY(StoreIntTypedArrayElementNoDeopt, Register, ToRegister,
+ /*check_detached*/ false)
+
+DEF_STORE_TYPED_ARRAY(StoreDoubleTypedArrayElement, DoubleRegister,
+ ToDoubleRegister,
+ /*check_detached*/ true)
+DEF_STORE_TYPED_ARRAY(StoreDoubleTypedArrayElementNoDeopt, DoubleRegister,
+ ToDoubleRegister, /*check_detached*/ false)
+
+#undef DEF_STORE_TYPED_ARRAY
+
+void StoreDoubleField::SetValueLocationConstraints() {
+ UseRegister(object_input());
+ UseRegister(value_input());
+ set_temporaries_needed(1);
+}
+void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register tmp = temps.Acquire();
+ Register object = ToRegister(object_input());
+ DoubleRegister value = ToDoubleRegister(value_input());
+
+ __ AssertNotSmi(object);
+ __ DecompressTagged(tmp, FieldOperand(object, offset()));
+ __ AssertNotSmi(tmp);
+ __ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value);
+}
+
+void Int32AddWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ addl(left, right);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{left} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32SubtractWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ subl(left, right);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{left} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+ set_temporaries_needed(1);
+}
+
+void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register result = ToRegister(this->result());
+ Register right = ToRegister(right_input());
+ DCHECK_EQ(result, ToRegister(left_input()));
+
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register saved_left = temps.Acquire();
+ __ movl(saved_left, result);
+ // TODO(leszeks): peephole optimise multiplication by a constant.
+ __ imull(result, right);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{saved_left, result} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+
+ // If the result is zero, check if either lhs or rhs is negative.
+ Label end;
+ __ cmpl(result, Immediate(0));
+ __ j(not_zero, &end);
+ {
+ __ orl(saved_left, right);
+ __ cmpl(saved_left, Immediate(0));
+ // If one of them is negative, we must have a -0 result, which is non-int32,
+ // so deopt.
+ // TODO(leszeks): Consider splitting these deopts to have distinct deopt
+ // reasons. Otherwise, the reason has to match the above.
+ __ EmitEagerDeoptIf(less, DeoptimizeReason::kOverflow, this);
+ }
+ __ bind(&end);
+}
+
+void Int32ModulusWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseAndClobberRegister(right_input());
+ DefineAsFixed(this, rdx);
+ // rax,rdx are clobbered by div.
+ RequireSpecificTemporary(rax);
+ RequireSpecificTemporary(rdx);
+}
+
+void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // If AreAliased(lhs, rhs):
+ // deopt if lhs < 0 // Minus zero.
+ // 0
+ //
+ // Otherwise, use the same algorithm as in EffectControlLinearizer:
+ // if rhs <= 0 then
+ // rhs = -rhs
+ // deopt if rhs == 0
+ // if lhs < 0 then
+ // let lhs_abs = -lhs in
+ // let res = lhs_abs % rhs in
+ // deopt if res == 0
+ // -res
+ // else
+ // let msk = rhs - 1 in
+ // if rhs & msk == 0 then
+ // lhs & msk
+ // else
+ // lhs % rhs
+
+ Register lhs = ToRegister(left_input());
+ Register rhs = ToRegister(right_input());
+
+ static constexpr DeoptimizeReason deopt_reason =
+ DeoptimizeReason::kDivisionByZero;
+
+ if (lhs == rhs) {
+ // For the modulus algorithm described above, lhs and rhs must not alias
+ // each other.
+ __ testl(lhs, lhs);
+ // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
+ // allows one deopt reason per IR.
+ __ EmitEagerDeoptIf(negative, deopt_reason, this);
+ __ Move(ToRegister(result()), 0);
+ return;
+ }
+
+ DCHECK(!AreAliased(lhs, rhs, rax, rdx));
+
+ ZoneLabelRef done(masm);
+ ZoneLabelRef rhs_checked(masm);
+
+ __ cmpl(rhs, Immediate(0));
+ __ JumpToDeferredIf(
+ less_equal,
+ [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
+ Int32ModulusWithOverflow* node) {
+ __ negl(rhs);
+ __ EmitEagerDeoptIf(zero, deopt_reason, node);
+ __ jmp(*rhs_checked);
+ },
+ rhs_checked, rhs, this);
+ __ bind(*rhs_checked);
+
+ __ cmpl(lhs, Immediate(0));
+ __ JumpToDeferredIf(
+ less,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
+ Int32ModulusWithOverflow* node) {
+ // `divl(divisor)` divides rdx:rax by the divisor and stores the
+ // quotient in rax, the remainder in rdx.
+ __ movl(rax, lhs);
+ __ negl(rax);
+ __ xorl(rdx, rdx);
+ __ divl(rhs);
+ __ testl(rdx, rdx);
+ // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
+ // allows one deopt reason per IR.
+ __ EmitEagerDeoptIf(equal, deopt_reason, node);
+ __ negl(rdx);
+ __ jmp(*done);
+ },
+ done, lhs, rhs, this);
+
+ Label rhs_not_power_of_2;
+ Register mask = rax;
+ __ leal(mask, Operand(rhs, -1));
+ __ testl(rhs, mask);
+ __ j(not_zero, &rhs_not_power_of_2, Label::kNear);
+
+ // {rhs} is power of 2.
+ __ andl(mask, lhs);
+ __ movl(ToRegister(result()), mask);
+ __ jmp(*done, Label::kNear);
+
+ __ bind(&rhs_not_power_of_2);
+ // `divl(divisor)` divides rdx:rax by the divisor and stores the
+ // quotient in rax, the remainder in rdx.
+ __ movl(rax, lhs);
+ __ xorl(rdx, rdx);
+ __ divl(rhs);
+ // Result is implicitly written to rdx.
+ DCHECK_EQ(ToRegister(result()), rdx);
+
+ __ bind(*done);
+}
+
+void Int32DivideWithOverflow::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsFixed(this, rax);
+ // rax,rdx are clobbered by idiv.
+ RequireSpecificTemporary(rax);
+ RequireSpecificTemporary(rdx);
+}
+
+void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ movl(rax, left);
+
+ // TODO(leszeks): peephole optimise division by a constant.
+
+ // Sign extend eax into edx.
+ __ cdq();
+
+ // Pre-check for overflow, since idiv throws a division exception on overflow
+ // rather than setting the overflow flag. Logic copied from
+ // effect-control-linearizer.cc
+
+ // Check if {right} is positive (and not zero).
+ __ cmpl(right, Immediate(0));
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(
+ less_equal,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register right,
+ Int32DivideWithOverflow* node) {
+ // {right} is negative or zero.
+
+ // Check if {right} is zero.
+ // We've already done the compare and flags won't be cleared yet.
+ // TODO(leszeks): Using kNotInt32 here, but kDivisionByZero would be
+ // better. Right now all eager deopts in a node have to be the same --
+ // we should allow a node to emit multiple eager deopts with different
+ // reasons.
+ __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
+
+ // Check if {left} is zero, as that would produce minus zero. Left is in
+ // rax already.
+ __ cmpl(rax, Immediate(0));
+ // TODO(leszeks): Better DeoptimizeReason = kMinusZero.
+ __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
+
+ // Check if {left} is kMinInt and {right} is -1, in which case we'd have
+ // to return -kMinInt, which is not representable as Int32.
+ __ cmpl(rax, Immediate(kMinInt));
+ __ j(not_equal, *done);
+ __ cmpl(right, Immediate(-1));
+ __ j(not_equal, *done);
+ // TODO(leszeks): Better DeoptimizeReason = kOverflow, but
+ // eager_deopt_info is already configured as kNotInt32.
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ },
+ done, right, this);
+ __ bind(*done);
+
+ // Perform the actual integer division.
+ __ idivl(right);
+
+ // Check that the remainder is zero.
+ __ cmpl(rdx, Immediate(0));
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{rax, rdx} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
+ DCHECK_EQ(ToRegister(result()), rax);
+}
+
+void Int32BitwiseAnd::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32BitwiseAnd::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ andl(left, right);
+}
+
+void Int32BitwiseOr::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32BitwiseOr::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ orl(left, right);
+}
+
+void Int32BitwiseXor::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32BitwiseXor::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ xorl(left, right);
+}
+
+void Int32ShiftLeft::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ // Use the "shift by cl" variant of shl.
+ // TODO(leszeks): peephole optimise shifts by a constant.
+ UseFixed(right_input(), rcx);
+ DefineSameAsFirst(this);
+}
+
+void Int32ShiftLeft::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ DCHECK_EQ(rcx, ToRegister(right_input()));
+ __ shll_cl(left);
+}
+
+void Int32ShiftRight::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ // Use the "shift by cl" variant of sar.
+ // TODO(leszeks): peephole optimise shifts by a constant.
+ UseFixed(right_input(), rcx);
+ DefineSameAsFirst(this);
+}
+
+void Int32ShiftRight::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ DCHECK_EQ(rcx, ToRegister(right_input()));
+ __ sarl_cl(left);
+}
+
+void Int32ShiftRightLogical::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ // Use the "shift by cl" variant of shr.
+ // TODO(leszeks): peephole optimise shifts by a constant.
+ UseFixed(right_input(), rcx);
+ DefineSameAsFirst(this);
+}
+
+void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ DCHECK_EQ(rcx, ToRegister(right_input()));
+ __ shrl_cl(left);
+}
+
+void Int32IncrementWithOverflow::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input());
+ __ incl(value);
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32DecrementWithOverflow::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input());
+ __ decl(value);
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32NegateWithOverflow::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input());
+ // Deopt when the result would be -0.
+ __ testl(value, value);
+ __ EmitEagerDeoptIf(zero, DeoptimizeReason::kOverflow, this);
+
+ __ negl(value);
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+}
+
+void Int32BitwiseNot::SetValueLocationConstraints() {
+ UseRegister(value_input());
+ DefineSameAsFirst(this);
+}
+
+void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register value = ToRegister(value_input());
+ __ notl(value);
+}
+
+void Float64Add::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Float64Add::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ __ Addsd(left, right);
+}
+
+void Float64Subtract::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Float64Subtract::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ __ Subsd(left, right);
+}
+
+void Float64Multiply::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Float64Multiply::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ __ Mulsd(left, right);
+}
+
+void Float64Divide::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(this);
+}
+
+void Float64Divide::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ __ Divsd(left, right);
+}
+
+void Float64Modulus::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ RequireSpecificTemporary(rax);
+ DefineAsRegister(this);
+}
+
+void Float64Modulus::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // Approach copied from code-generator-x64.cc
+ // Allocate space to use fld to move the value to the FPU stack.
+ __ AllocateStackSpace(kDoubleSize);
+ Operand scratch_stack_space = Operand(rsp, 0);
+ __ Movsd(scratch_stack_space, ToDoubleRegister(right_input()));
+ __ fld_d(scratch_stack_space);
+ __ Movsd(scratch_stack_space, ToDoubleRegister(left_input()));
+ __ fld_d(scratch_stack_space);
+ // Loop while fprem isn't done.
+ Label mod_loop;
+ __ bind(&mod_loop);
+ // This instructions traps on all kinds inputs, but we are assuming the
+ // floating point control word is set to ignore them all.
+ __ fprem();
+ // The following 2 instruction implicitly use rax.
+ __ fnstsw_ax();
+ if (CpuFeatures::IsSupported(SAHF)) {
+ CpuFeatureScope sahf_scope(masm, SAHF);
+ __ sahf();
+ } else {
+ __ shrl(rax, Immediate(8));
+ __ andl(rax, Immediate(0xFF));
+ __ pushq(rax);
+ __ popfq();
+ }
+ __ j(parity_even, &mod_loop);
+ // Move output to stack and clean up.
+ __ fstp(1);
+ __ fstp_d(scratch_stack_space);
+ __ Movsd(ToDoubleRegister(result()), scratch_stack_space);
+ __ addq(rsp, Immediate(kDoubleSize));
+}
+
+void Float64Negate::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+
+void Float64Negate::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister value = ToDoubleRegister(input());
+ __ Negpd(value, value, kScratchRegister);
+}
+
+void Float64Round::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister in = ToDoubleRegister(input());
+ DoubleRegister out = ToDoubleRegister(result());
+
+ if (kind_ == Kind::kNearest) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ DoubleRegister temp = temps.AcquireDouble();
+ __ Move(temp, in);
+ __ Roundsd(out, in, kRoundToNearest);
+ // RoundToNearest rounds to even on tie, while JS expects it to round
+ // towards +Infinity. Fix the difference by checking if we rounded down by
+ // exactly 0.5, and if so, round to the other side.
+ __ Subsd(temp, out);
+ __ Move(kScratchDoubleReg, 0.5);
+ Label done;
+ __ Ucomisd(temp, kScratchDoubleReg);
+ __ JumpIf(not_equal, &done, Label::kNear);
+ // Fix wrong tie-to-even by adding 0.5 twice.
+ __ Addsd(out, kScratchDoubleReg);
+ __ Addsd(out, kScratchDoubleReg);
+ __ bind(&done);
+ } else if (kind_ == Kind::kFloor) {
+ __ Roundsd(out, in, kRoundDown);
+ } else if (kind_ == Kind::kCeil) {
+ __ Roundsd(out, in, kRoundUp);
+ }
+}
+
+int Float64Exponentiate::MaxCallStackArgs() const {
+ return MaglevAssembler::ArgumentStackSlotsForCFunctionCall(2);
+}
+void Float64Exponentiate::SetValueLocationConstraints() {
+ UseFixed(left_input(), xmm0);
+ UseFixed(right_input(), xmm1);
+ DefineSameAsFirst(this);
+}
+void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
+}
+
+int Float64Ieee754Unary::MaxCallStackArgs() const {
+ return MaglevAssembler::ArgumentStackSlotsForCFunctionCall(1);
+}
+void Float64Ieee754Unary::SetValueLocationConstraints() {
+ UseFixed(input(), xmm0);
+ DefineSameAsFirst(this);
+}
+void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(ieee_function_, 1);
+}
+
+void Float64SilenceNaN::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+
+void Float64SilenceNaN::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister value = ToDoubleRegister(input());
+ __ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Subsd(value, kScratchDoubleReg);
+}
+
+template <class Derived, Operation kOperation>
+void Float64CompareNode<Derived, kOperation>::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineAsRegister(this);
+}
+
+template <class Derived, Operation kOperation>
+void Float64CompareNode<Derived, kOperation>::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ Register result = ToRegister(this->result());
+ Label is_false, end;
+ __ Ucomisd(left, right);
+ // Ucomisd sets these flags accordingly:
+ // UNORDERED(one of the operands is a NaN): ZF,PF,CF := 111;
+ // GREATER_THAN: ZF,PF,CF := 000;
+ // LESS_THAN: ZF,PF,CF := 001;
+ // EQUAL: ZF,PF,CF := 100;
+ // Since ZF can be set by NaN or EQUAL, we check for NaN first.
+ __ j(parity_even, &is_false);
+ __ j(NegateCondition(ConditionForFloat64(kOperation)), &is_false);
+ // TODO(leszeks): Investigate loading existing materialisations of roots here,
+ // if available.
+ __ LoadRoot(result, RootIndex::kTrueValue);
+ __ jmp(&end);
+ {
+ __ bind(&is_false);
+ __ LoadRoot(result, RootIndex::kFalseValue);
+ }
+ __ bind(&end);
+}
+
+#define DEF_OPERATION(Name) \
+ void Name::SetValueLocationConstraints() { \
+ Base::SetValueLocationConstraints(); \
+ } \
+ void Name::GenerateCode(MaglevAssembler* masm, \
+ const ProcessingState& state) { \
+ Base::GenerateCode(masm, state); \
+ }
+DEF_OPERATION(Float64Equal)
+DEF_OPERATION(Float64StrictEqual)
+DEF_OPERATION(Float64LessThan)
+DEF_OPERATION(Float64LessThanOrEqual)
+DEF_OPERATION(Float64GreaterThan)
+DEF_OPERATION(Float64GreaterThanOrEqual)
+#undef DEF_OPERATION
+
+void CheckInt32IsSmi::SetValueLocationConstraints() { UseRegister(input()); }
+void CheckInt32IsSmi::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // TODO(leszeks): This basically does a SmiTag and throws the result away.
+ // Don't throw the result away if we want to actually use it.
+ Register reg = ToRegister(input());
+ __ movl(kScratchRegister, reg);
+ __ addl(kScratchRegister, kScratchRegister);
+ DCHECK_REGLIST_EMPTY(RegList{reg} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kNotASmi, this);
+}
+
+void CheckedSmiTagInt32::SetValueLocationConstraints() {
+ UseAndClobberRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedSmiTagInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ __ addl(reg, reg);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{reg} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
+ __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
+}
+
+void CheckedSmiTagUint32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedSmiTagUint32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ // Perform an unsigned comparison against Smi::kMaxValue.
+ __ cmpl(reg, Immediate(Smi::kMaxValue));
+ __ EmitEagerDeoptIf(above, DeoptimizeReason::kOverflow, this);
+ __ addl(reg, reg);
+ __ Assert(no_overflow, AbortReason::kInputDoesNotFitSmi);
+}
+
+void UnsafeSmiTag::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ if (v8_flags.debug_code) {
+ if (input().node()->properties().value_representation() ==
+ ValueRepresentation::kUint32) {
+ __ cmpl(reg, Immediate(Smi::kMaxValue));
+ __ Check(below_equal, AbortReason::kInputDoesNotFitSmi);
+ }
+ }
+ __ addl(reg, reg);
+ if (v8_flags.debug_code) {
+ __ Check(no_overflow, AbortReason::kInputDoesNotFitSmi);
+ }
+}
+
+void Int32ToNumber::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void Int32ToNumber::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ ZoneLabelRef done(masm);
+ Register value = ToRegister(input());
+ Register object = ToRegister(result());
+ __ movl(kScratchRegister, value);
+ __ addl(kScratchRegister, kScratchRegister);
+ __ JumpToDeferredIf(
+ overflow,
+ [](MaglevAssembler* masm, Register object, Register value,
+ ZoneLabelRef done, Int32ToNumber* node) {
+ DoubleRegister double_value = kScratchDoubleReg;
+ __ Cvtlsi2sd(double_value, value);
+ __ AllocateHeapNumber(node->register_snapshot(), object, double_value);
+ __ jmp(*done);
+ },
+ object, value, done, this);
+ __ Move(object, kScratchRegister);
+ __ bind(*done);
+}
+
+void Uint32ToNumber::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void Uint32ToNumber::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ ZoneLabelRef done(masm);
+ Register value = ToRegister(input());
+ Register object = ToRegister(result());
+ __ cmpl(value, Immediate(Smi::kMaxValue));
+ __ JumpToDeferredIf(
+ above,
+ [](MaglevAssembler* masm, Register object, Register value,
+ ZoneLabelRef done, Uint32ToNumber* node) {
+ DoubleRegister double_value = kScratchDoubleReg;
+ __ Cvtlui2sd(double_value, value);
+ __ AllocateHeapNumber(node->register_snapshot(), object, double_value);
+ __ jmp(*done);
+ },
+ object, value, done, this);
+ __ addl(value, value);
+ DCHECK_EQ(object, value);
+ __ bind(*done);
+}
+
+namespace {
+
+void JumpToFailIfNotHeapNumberOrOddball(
+ MaglevAssembler* masm, Register value,
+ TaggedToFloat64ConversionType conversion_type, Label* fail) {
+ switch (conversion_type) {
+ case TaggedToFloat64ConversionType::kNumberOrOddball:
+ // Check if HeapNumber or Oddball, jump to fail otherwise.
+ static_assert(InstanceType::HEAP_NUMBER_TYPE + 1 ==
+ InstanceType::ODDBALL_TYPE);
+ if (fail) {
+ __ CompareObjectTypeRange(value, InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ODDBALL_TYPE);
+ __ JumpIf(kUnsignedGreaterThan, fail);
+ } else {
+ if (v8_flags.debug_code) {
+ __ CompareObjectTypeRange(value, InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ODDBALL_TYPE);
+ __ Assert(kUnsignedLessThanEqual, AbortReason::kUnexpectedValue);
+ }
+ }
+ break;
+ case TaggedToFloat64ConversionType::kNumber:
+ // Check if HeapNumber, jump to fail otherwise.
+ if (fail) {
+ __ IsObjectType(value, InstanceType::HEAP_NUMBER_TYPE);
+ __ JumpIf(kNotEqual, fail);
+ } else {
+ if (v8_flags.debug_code) {
+ __ IsObjectType(value, InstanceType::HEAP_NUMBER_TYPE);
+ __ Assert(kEqual, AbortReason::kUnexpectedValue);
+ }
+ }
+ break;
+ }
+}
+
+void TryUnboxNumberOrOddball(MaglevAssembler* masm, DoubleRegister dst,
+ Register clobbered_src,
+ TaggedToFloat64ConversionType conversion_type,
+ Label* fail) {
+ Label is_not_smi, done;
+ // Check if Smi.
+ __ JumpIfNotSmi(clobbered_src, &is_not_smi, Label::kNear);
+ // If Smi, convert to Float64.
+ __ SmiToInt32(clobbered_src);
+ __ Cvtlsi2sd(dst, clobbered_src);
+ __ jmp(&done, Label::kNear);
+ __ bind(&is_not_smi);
+ JumpToFailIfNotHeapNumberOrOddball(masm, clobbered_src, conversion_type,
+ fail);
+ static_assert(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ __ LoadHeapNumberValue(dst, clobbered_src);
+ __ bind(&done);
+}
+
+} // namespace
+
+void CheckedNumberOrOddballToFloat64::SetValueLocationConstraints() {
+ UseAndClobberRegister(input());
+ DefineAsRegister(this);
+}
+void CheckedNumberOrOddballToFloat64::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ TryUnboxNumberOrOddball(
+ masm, ToDoubleRegister(result()), value, conversion_type(),
+ __ GetDeoptLabel(this, DeoptimizeReason::kNotANumberOrOddball));
+}
+
+void UncheckedNumberOrOddballToFloat64::SetValueLocationConstraints() {
+ UseAndClobberRegister(input());
+ DefineAsRegister(this);
+}
+void UncheckedNumberOrOddballToFloat64::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ TryUnboxNumberOrOddball(masm, ToDoubleRegister(result()), value,
+ conversion_type(), nullptr);
+}
+
+namespace {
+
+void EmitTruncateNumberOrOddballToInt32(
+ MaglevAssembler* masm, Register value, Register result_reg,
+ TaggedToFloat64ConversionType conversion_type, Label* not_a_number) {
+ Label is_not_smi, done;
+ // Check if Smi.
+ __ JumpIfNotSmi(value, &is_not_smi, Label::kNear);
+ // If Smi, convert to Int32.
+ __ SmiToInt32(value);
+ __ jmp(&done, Label::kNear);
+ __ bind(&is_not_smi);
+ JumpToFailIfNotHeapNumberOrOddball(masm, value, conversion_type,
+ not_a_number);
+ static_assert(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ auto double_value = kScratchDoubleReg;
+ __ Movsd(double_value, FieldOperand(value, HeapNumber::kValueOffset));
+ __ TruncateDoubleToInt32(result_reg, double_value);
+ __ bind(&done);
+}
+
+} // namespace
+
+void CheckedTruncateNumberOrOddballToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedTruncateNumberOrOddballToInt32::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ Register result_reg = ToRegister(result());
+ DCHECK_EQ(value, result_reg);
+ Label* deopt_label =
+ __ GetDeoptLabel(this, DeoptimizeReason::kNotANumberOrOddball);
+ EmitTruncateNumberOrOddballToInt32(masm, value, result_reg, conversion_type(),
+ deopt_label);
+}
+
+void TruncateNumberOrOddballToInt32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void TruncateNumberOrOddballToInt32::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ Register value = ToRegister(input());
+ Register result_reg = ToRegister(result());
+ DCHECK_EQ(value, result_reg);
+ EmitTruncateNumberOrOddballToInt32(masm, value, result_reg, conversion_type(),
+ nullptr);
+}
+
+void SetPendingMessage::SetValueLocationConstraints() {
+ UseRegister(value());
+ set_temporaries_needed(1);
+ DefineAsRegister(this);
+}
+
+void SetPendingMessage::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register new_message = ToRegister(value());
+ Register return_value = ToRegister(result());
+
+ MemOperand pending_message_operand = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_pending_message(masm->isolate()),
+ kScratchRegister);
+
+ if (new_message != return_value) {
+ __ Move(return_value, pending_message_operand);
+ __ movq(pending_message_operand, new_message);
+ } else {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, pending_message_operand);
+ __ movq(pending_message_operand, new_message);
+ __ Move(return_value, scratch);
+ }
+}
+
+void TestUndetectable::SetValueLocationConstraints() {
+ UseRegister(value());
+ set_temporaries_needed(1);
+ DefineAsRegister(this);
+}
+void TestUndetectable::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(value());
+ Register return_value = ToRegister(result());
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
+ Label return_false, done;
+ __ JumpIfSmi(object, &return_false, Label::kNear);
+ // For heap objects, check the map's undetectable bit.
+ __ LoadMap(scratch, object);
+ __ testl(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(Map::Bits1::IsUndetectableBit::kMask));
+ __ j(zero, &return_false, Label::kNear);
+
+ __ LoadRoot(return_value, RootIndex::kTrueValue);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&return_false);
+ __ LoadRoot(return_value, RootIndex::kFalseValue);
+
+ __ bind(&done);
+}
+
+void CheckedInt32ToUint32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineSameAsFirst(this);
+}
+void CheckedInt32ToUint32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register input_reg = ToRegister(input());
+ __ testl(input_reg, input_reg);
+ __ EmitEagerDeoptIf(negative, DeoptimizeReason::kNotUint32, this);
+}
+
+void ChangeInt32ToFloat64::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void ChangeInt32ToFloat64::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ Cvtlsi2sd(ToDoubleRegister(result()), ToRegister(input()));
+}
+
+void ChangeUint32ToFloat64::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void ChangeUint32ToFloat64::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // TODO(leszeks): Cvtlui2sd does a manual movl to clear the top bits of the
+ // input register. We could eliminate this movl by ensuring that word32
+ // registers are always written with 32-bit ops and not 64-bit ones.
+ __ Cvtlui2sd(ToDoubleRegister(result()), ToRegister(input()));
+}
+
+void CheckedTruncateFloat64ToUint32::SetValueLocationConstraints() {
+ UseRegister(input());
+ DefineAsRegister(this);
+}
+void CheckedTruncateFloat64ToUint32::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ DoubleRegister input_reg = ToDoubleRegister(input());
+ Register result_reg = ToRegister(result());
+ DoubleRegister converted_back = kScratchDoubleReg;
+
+ // Convert the input float64 value to uint32.
+ Label* deopt = __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32);
+ __ Cvttsd2ui(result_reg, input_reg, deopt);
+ // Convert that uint32 value back to float64.
+ __ Cvtlui2sd(converted_back, result_reg);
+ // Check that the result of the float64->uint32->float64 is equal to the input
+ // (i.e. that the conversion didn't truncate.
+ __ Ucomisd(input_reg, converted_back);
+ __ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotUint32, this);
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotUint32, this);
+
+ // Check if {input} is -0.
+ Label check_done;
+ __ cmpl(result_reg, Immediate(0));
+ __ j(not_equal, &check_done);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Register high_word32_of_input = kScratchRegister;
+ __ Pextrd(high_word32_of_input, input_reg, 1);
+ __ cmpl(high_word32_of_input, Immediate(0));
+ __ EmitEagerDeoptIf(less, DeoptimizeReason::kNotUint32, this);
+
+ __ bind(&check_done);
+}
+
+void IncreaseInterruptBudget::SetValueLocationConstraints() {
+ set_temporaries_needed(1);
+}
+void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedField(scratch,
+ FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
+ __ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
+ Immediate(amount()));
+}
+
+namespace {
+
+enum class ReduceInterruptBudgetType { kLoop, kReturn };
+
+void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
+ Node* node, ReduceInterruptBudgetType type,
+ Register scratch0) {
+ // For loops, first check for interrupts. Don't do this for returns, as we
+ // can't lazy deopt to the end of a return.
+ if (type == ReduceInterruptBudgetType::kLoop) {
+ Label next;
+
+ // Here, we only care about interrupts since we've already guarded against
+ // real stack overflows on function entry.
+ __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
+ __ j(above, &next);
+
+ // An interrupt has been requested and we must call into runtime to handle
+ // it; since we already pay the call cost, combine with the TieringManager
+ // call.
+ {
+ SaveRegisterStateForCall save_register_state(masm,
+ node->register_snapshot());
+ __ Move(kContextRegister, masm->native_context().object());
+ __ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
+ save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
+ }
+ __ jmp(*done); // All done, continue.
+
+ __ bind(&next);
+ }
+
+ // No pending interrupts. Call into the TieringManager if needed.
+ {
+ // Skip the runtime call if the tiering state is kInProgress. The runtime
+ // only performs simple bookkeeping in this case, which we can easily
+ // replicate here in generated code.
+ // TODO(jgruber): Use the correct feedback vector once Maglev inlining is
+ // enabled.
+ Label update_profiler_ticks_and_interrupt_budget;
+ {
+ static_assert(kTieringStateInProgressBlocksTierup);
+ const Register scratch1 = kScratchRegister;
+ __ Move(scratch0, masm->compilation_info()
+ ->toplevel_compilation_unit()
+ ->feedback()
+ .object());
+
+ // If tiering_state is kInProgress, skip the runtime call.
+ __ movzxwl(scratch1,
+ FieldOperand(scratch0, FeedbackVector::kFlagsOffset));
+ __ DecodeField<FeedbackVector::TieringStateBits>(scratch1);
+ __ cmpl(scratch1, Immediate(static_cast<int>(TieringState::kInProgress)));
+ __ j(equal, &update_profiler_ticks_and_interrupt_budget);
+
+ // If osr_tiering_state is kInProgress, skip the runtime call.
+ __ movzxwl(scratch1,
+ FieldOperand(scratch0, FeedbackVector::kFlagsOffset));
+ __ DecodeField<FeedbackVector::OsrTieringStateBit>(scratch1);
+ __ cmpl(scratch1, Immediate(static_cast<int>(TieringState::kInProgress)));
+ __ j(equal, &update_profiler_ticks_and_interrupt_budget);
+ }
+
+ {
+ SaveRegisterStateForCall save_register_state(masm,
+ node->register_snapshot());
+ __ Move(kContextRegister, masm->native_context().object());
+ __ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ // Note: must not cause a lazy deopt!
+ __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
+ save_register_state.DefineSafepoint();
+ }
+ __ jmp(*done);
+
+ __ bind(&update_profiler_ticks_and_interrupt_budget);
+ // We are skipping the call to Runtime::kBytecodeBudgetInterrupt_Maglev
+ // since the tiering state is kInProgress. Perform bookkeeping that would
+ // have been done in the runtime function:
+ __ AssertFeedbackVector(scratch0);
+ // FeedbackVector::SaturatingIncrementProfilerTicks.
+ // TODO(jgruber): This isn't saturating and thus we may theoretically
+ // exceed Smi::kMaxValue. But, 1) this is very unlikely since it'd take
+ // quite some time to exhaust the budget that many times; and 2) even an
+ // overflow doesn't hurt us at all.
+ __ incl(FieldOperand(scratch0, FeedbackVector::kProfilerTicksOffset));
+ // JSFunction::SetInterruptBudget.
+ __ movq(scratch0, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedField(scratch0,
+ FieldOperand(scratch0, JSFunction::kFeedbackCellOffset));
+ __ movl(FieldOperand(scratch0, FeedbackCell::kInterruptBudgetOffset),
+ Immediate(v8_flags.interrupt_budget));
+ __ jmp(*done);
+ }
+}
+
+void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
+ ReduceInterruptBudgetType type, int amount) {
+ MaglevAssembler::ScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ LoadTaggedField(scratch,
+ FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
+ __ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
+ Immediate(amount));
+ ZoneLabelRef done(masm);
+ __ JumpToDeferredIf(less, HandleInterruptsAndTiering, done, node, type,
+ scratch);
+ __ bind(*done);
+}
+
+} // namespace
+
+int ReduceInterruptBudgetForLoop::MaxCallStackArgs() const { return 1; }
+void ReduceInterruptBudgetForLoop::SetValueLocationConstraints() {
+ set_temporaries_needed(1);
+}
+void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ GenerateReduceInterruptBudget(masm, this, ReduceInterruptBudgetType::kLoop,
+ amount());
+}
+
+int ReduceInterruptBudgetForReturn::MaxCallStackArgs() const { return 1; }
+void ReduceInterruptBudgetForReturn::SetValueLocationConstraints() {
+ set_temporaries_needed(1);
+}
+void ReduceInterruptBudgetForReturn::GenerateCode(
+ MaglevAssembler* masm, const ProcessingState& state) {
+ GenerateReduceInterruptBudget(masm, this, ReduceInterruptBudgetType::kReturn,
+ amount());
+}
+
+int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; }
+void ThrowIfNotSuperConstructor::SetValueLocationConstraints() {
+ UseRegister(constructor());
+ UseRegister(function());
+}
+void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ LoadMap(kScratchRegister, ToRegister(constructor()));
+ __ testl(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(Map::Bits1::IsConstructorBit::kMask));
+ __ JumpToDeferredIf(
+ equal,
+ [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) {
+ __ Push(ToRegister(node->constructor()));
+ __ Push(ToRegister(node->function()));
+ __ Move(kContextRegister, masm->native_context().object());
+ __ CallRuntime(Runtime::kThrowNotSuperConstructor, 2);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
+ __ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ },
+ this);
+}
+
+int FunctionEntryStackCheck::MaxCallStackArgs() const { return 1; }
+void FunctionEntryStackCheck::SetValueLocationConstraints() {}
+void FunctionEntryStackCheck::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real
+ // stack limit or tighter. By ensuring we have space until that limit
+ // after building the frame we can quickly precheck both at once.
+ const int stack_check_offset = masm->code_gen_state()->stack_check_offset();
+ Register stack_cmp_reg = rsp;
+ if (stack_check_offset > kStackLimitSlackForDeoptimizationInBytes) {
+ stack_cmp_reg = kScratchRegister;
+ __ leaq(stack_cmp_reg, Operand(rsp, -stack_check_offset));
+ }
+ __ cmpq(stack_cmp_reg,
+ __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
+
+ ZoneLabelRef deferred_call_stack_guard_return(masm);
+ __ JumpToDeferredIf(
+ below_equal,
+ [](MaglevAssembler* masm, FunctionEntryStackCheck* node,
+ ZoneLabelRef done, int stack_check_offset) {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ {
+ SaveRegisterStateForCall save_register_state(
+ masm, node->register_snapshot());
+ // Push the frame size
+ __ Push(Immediate(Smi::FromInt(stack_check_offset)));
+ __ CallRuntime(Runtime::kStackGuardWithGap, 1);
+ save_register_state.DefineSafepointWithLazyDeopt(
+ node->lazy_deopt_info());
+ }
+ __ jmp(*done);
+ },
+ this, deferred_call_stack_guard_return, stack_check_offset);
+ __ bind(*deferred_call_stack_guard_return);
+}
+
+// ---
+// Control nodes
+// ---
+void Return::SetValueLocationConstraints() {
+ UseFixed(value_input(), kReturnRegister0);
+}
+void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
+ DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
+
+ // Read the formal number of parameters from the top level compilation unit
+ // (i.e. the outermost, non inlined function).
+ int formal_params_size =
+ masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
+
+ // We're not going to continue execution, so we can use an arbitrary register
+ // here instead of relying on temporaries from the register allocator.
+ Register actual_params_size = r8;
+
+ // Compute the size of the actual parameters + receiver (in bytes).
+ // TODO(leszeks): Consider making this an input into Return to re-use the
+ // incoming argc's register (if it's still valid).
+ __ movq(actual_params_size,
+ MemOperand(rbp, StandardFrameConstants::kArgCOffset));
+
+ // Leave the frame.
+ __ LeaveFrame(StackFrame::MAGLEV);
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label drop_dynamic_arg_size;
+ __ cmpq(actual_params_size, Immediate(formal_params_size));
+ __ j(greater, &drop_dynamic_arg_size);
+
+ // Drop receiver + arguments according to static formal arguments size.
+ __ Ret(formal_params_size * kSystemPointerSize, kScratchRegister);
+
+ __ bind(&drop_dynamic_arg_size);
+ // Drop receiver + arguments according to dynamic arguments size.
+ __ DropArguments(actual_params_size, r9, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
+ __ Ret();
+}
+
+void BranchIfFloat64Compare::SetValueLocationConstraints() {
+ UseRegister(left_input());
+ UseRegister(right_input());
+}
+void BranchIfFloat64Compare::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister left = ToDoubleRegister(left_input());
+ DoubleRegister right = ToDoubleRegister(right_input());
+ __ Ucomisd(left, right);
+ if (jump_mode_if_nan_ == JumpModeIfNaN::kJumpToTrue) {
+ __ j(parity_even, if_true()->label());
+ } else {
+ __ j(parity_even, if_false()->label());
+ }
+ __ Branch(ConditionForFloat64(operation_), if_true(), if_false(),
+ state.next_block());
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index 76f072e4f9..2e54111e4e 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -289,24 +289,29 @@ enum class Sign { kNegative, kPositive, kNone };
// ES6 18.2.5 parseInt(string, radix) (with NumberParseIntHelper subclass);
// and BigInt parsing cases from https://tc39.github.io/proposal-bigint/
// (with StringToBigIntHelper subclass).
-template <typename IsolateT>
class StringToIntHelper {
public:
- StringToIntHelper(IsolateT* isolate, Handle<String> subject, int radix)
- : isolate_(isolate), subject_(subject), radix_(radix) {
+ StringToIntHelper(Handle<String> subject, int radix)
+ : subject_(subject), radix_(radix) {
DCHECK(subject->IsFlat());
}
+ // Used for the NumberParseInt operation
+ StringToIntHelper(const uint8_t* subject, int radix, int length)
+ : raw_one_byte_subject_(subject), radix_(radix), length_(length) {}
+
+ StringToIntHelper(const base::uc16* subject, int radix, int length)
+ : raw_two_byte_subject_(subject), radix_(radix), length_(length) {}
+
// Used for the StringToBigInt operation.
- StringToIntHelper(IsolateT* isolate, Handle<String> subject)
- : isolate_(isolate), subject_(subject) {
+ explicit StringToIntHelper(Handle<String> subject) : subject_(subject) {
DCHECK(subject->IsFlat());
}
// Used for parsing BigInt literals, where the input is a Zone-allocated
// buffer of one-byte digits, along with an optional radix prefix.
- StringToIntHelper(IsolateT* isolate, const uint8_t* subject, int length)
- : isolate_(isolate), raw_one_byte_subject_(subject), length_(length) {}
+ StringToIntHelper(const uint8_t* subject, int length)
+ : raw_one_byte_subject_(subject), length_(length) {}
virtual ~StringToIntHelper() = default;
protected:
@@ -326,6 +331,7 @@ class StringToIntHelper {
bool allow_trailing_junk() { return allow_trailing_junk_; }
bool IsOneByte() const {
+ if (raw_two_byte_subject_ != nullptr) return false;
return raw_one_byte_subject_ != nullptr ||
String::IsOneByteRepresentationUnderneath(*subject_);
}
@@ -340,10 +346,12 @@ class StringToIntHelper {
base::Vector<const base::uc16> GetTwoByteVector(
const DisallowGarbageCollection& no_gc) {
+ if (raw_two_byte_subject_ != nullptr) {
+ return base::Vector<const base::uc16>(raw_two_byte_subject_, length_);
+ }
return subject_->GetFlatContent(no_gc).ToUC16Vector();
}
- IsolateT* isolate() { return isolate_; }
int radix() { return radix_; }
int cursor() { return cursor_; }
int length() { return length_; }
@@ -356,9 +364,9 @@ class StringToIntHelper {
template <class Char>
void DetectRadixInternal(Char current, int length);
- IsolateT* isolate_;
Handle<String> subject_;
const uint8_t* raw_one_byte_subject_ = nullptr;
+ const base::uc16* raw_two_byte_subject_ = nullptr;
int radix_ = 0;
int cursor_ = 0;
int length_ = 0;
@@ -369,8 +377,7 @@ class StringToIntHelper {
State state_ = State::kRunning;
};
-template <typename IsolateT>
-void StringToIntHelper<IsolateT>::ParseInt() {
+void StringToIntHelper::ParseInt() {
DisallowGarbageCollection no_gc;
if (IsOneByte()) {
base::Vector<const uint8_t> vector = GetOneByteVector(no_gc);
@@ -385,10 +392,8 @@ void StringToIntHelper<IsolateT>::ParseInt() {
}
}
-template <typename IsolateT>
template <class Char>
-void StringToIntHelper<IsolateT>::DetectRadixInternal(Char current,
- int length) {
+void StringToIntHelper::DetectRadixInternal(Char current, int length) {
Char start = current;
length_ = length;
Char end = start + length;
@@ -465,10 +470,16 @@ void StringToIntHelper<IsolateT>::DetectRadixInternal(Char current,
cursor_ = static_cast<int>(current - start);
}
-class NumberParseIntHelper : public StringToIntHelper<Isolate> {
+class NumberParseIntHelper : public StringToIntHelper {
public:
- NumberParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
- : StringToIntHelper(isolate, string, radix) {}
+ NumberParseIntHelper(Handle<String> string, int radix)
+ : StringToIntHelper(string, radix) {}
+
+ NumberParseIntHelper(const uint8_t* string, int radix, int length)
+ : StringToIntHelper(string, radix, length) {}
+
+ NumberParseIntHelper(const base::uc16* string, int radix, int length)
+ : StringToIntHelper(string, radix, length) {}
template <class Char>
void ParseInternal(Char start) {
@@ -911,29 +922,31 @@ double StringToDouble(base::Vector<const base::uc16> str, int flags,
}
double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
- NumberParseIntHelper helper(isolate, string, radix);
+ NumberParseIntHelper helper(string, radix);
return helper.GetResult();
}
template <typename IsolateT>
-class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
+class StringToBigIntHelper : public StringToIntHelper {
public:
enum class Behavior { kStringToBigInt, kLiteral };
// Used for StringToBigInt operation (BigInt constructor and == operator).
StringToBigIntHelper(IsolateT* isolate, Handle<String> string)
- : StringToIntHelper<IsolateT>(isolate, string),
+ : StringToIntHelper(string),
+ isolate_(isolate),
behavior_(Behavior::kStringToBigInt) {
- this->set_allow_binary_and_octal_prefixes();
- this->set_disallow_trailing_junk();
+ set_allow_binary_and_octal_prefixes();
+ set_disallow_trailing_junk();
}
// Used for parsing BigInt literals, where the input is a buffer of
// one-byte ASCII digits, along with an optional radix prefix.
StringToBigIntHelper(IsolateT* isolate, const uint8_t* string, int length)
- : StringToIntHelper<IsolateT>(isolate, string, length),
+ : StringToIntHelper(string, length),
+ isolate_(isolate),
behavior_(Behavior::kLiteral) {
- this->set_allow_binary_and_octal_prefixes();
+ set_allow_binary_and_octal_prefixes();
}
void ParseOneByte(const uint8_t* start) final { return ParseInternal(start); }
@@ -942,14 +955,14 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
}
MaybeHandle<BigInt> GetResult() {
- this->ParseInt();
- if (behavior_ == Behavior::kStringToBigInt && this->sign() != Sign::kNone &&
- this->radix() != 10) {
+ ParseInt();
+ if (behavior_ == Behavior::kStringToBigInt && sign() != Sign::kNone &&
+ radix() != 10) {
return MaybeHandle<BigInt>();
}
- if (this->state() == State::kEmpty) {
+ if (state() == State::kEmpty) {
if (behavior_ == Behavior::kStringToBigInt) {
- this->set_state(State::kZero);
+ set_state(State::kZero);
} else {
UNREACHABLE();
}
@@ -959,10 +972,10 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
case State::kError:
return MaybeHandle<BigInt>();
case State::kZero:
- return BigInt::Zero(this->isolate(), allocation_type());
+ return BigInt::Zero(isolate(), allocation_type());
case State::kDone:
- return BigInt::Allocate(this->isolate(), &accumulator_,
- this->negative(), allocation_type());
+ return BigInt::Allocate(isolate(), &accumulator_, negative(),
+ allocation_type());
case State::kEmpty:
case State::kRunning:
break;
@@ -974,12 +987,12 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
// that the literal is valid and not too big, so this always succeeds.
std::unique_ptr<char[]> DecimalString(bigint::Processor* processor) {
DCHECK_EQ(behavior_, Behavior::kLiteral);
- this->ParseInt();
- if (this->state() == State::kZero) {
+ ParseInt();
+ if (state() == State::kZero) {
// Input may have been "0x0" or similar.
return std::unique_ptr<char[]>(new char[2]{'0', '\0'});
}
- DCHECK_EQ(this->state(), State::kDone);
+ DCHECK_EQ(state(), State::kDone);
int num_digits = accumulator_.ResultLength();
base::SmallVector<bigint::digit_t, 8> digit_storage(num_digits);
bigint::RWDigits digits(digit_storage.data(), num_digits);
@@ -990,23 +1003,24 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
out[num_chars] = '\0';
return out;
}
+ IsolateT* isolate() { return isolate_; }
private:
template <class Char>
void ParseInternal(Char start) {
using Result = bigint::FromStringAccumulator::Result;
- Char current = start + this->cursor();
- Char end = start + this->length();
- current = accumulator_.Parse(current, end, this->radix());
+ Char current = start + cursor();
+ Char end = start + length();
+ current = accumulator_.Parse(current, end, radix());
Result result = accumulator_.result();
if (result == Result::kMaxSizeExceeded) {
- return this->set_state(State::kError);
+ return set_state(State::kError);
}
- if (!this->allow_trailing_junk() && AdvanceToNonspace(&current, end)) {
- return this->set_state(State::kJunk);
+ if (!allow_trailing_junk() && AdvanceToNonspace(&current, end)) {
+ return set_state(State::kJunk);
}
- return this->set_state(State::kDone);
+ return set_state(State::kDone);
}
AllocationType allocation_type() {
@@ -1016,6 +1030,7 @@ class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
: AllocationType::kYoung;
}
+ IsolateT* isolate_;
bigint::FromStringAccumulator accumulator_{BigInt::kMaxLength};
Behavior behavior_;
};
@@ -1459,6 +1474,30 @@ base::Optional<double> TryStringToDouble(LocalIsolate* isolate,
return StringToDouble(v, flags);
}
+base::Optional<double> TryStringToInt(LocalIsolate* isolate,
+ Handle<String> object, int radix) {
+ DisallowGarbageCollection no_gc;
+ const int kMaxLengthForConversion = 20;
+ int length = object->length();
+ if (length > kMaxLengthForConversion) {
+ return base::nullopt;
+ }
+
+ if (String::IsOneByteRepresentationUnderneath(*object)) {
+ uint8_t buffer[kMaxLengthForConversion];
+ SharedStringAccessGuardIfNeeded access_guard(isolate);
+ String::WriteToFlat(*object, buffer, 0, length, isolate, access_guard);
+ NumberParseIntHelper helper(buffer, radix, length);
+ return helper.GetResult();
+ } else {
+ base::uc16 buffer[kMaxLengthForConversion];
+ SharedStringAccessGuardIfNeeded access_guard(isolate);
+ String::WriteToFlat(*object, buffer, 0, length, isolate, access_guard);
+ NumberParseIntHelper helper(buffer, radix, length);
+ return helper.GetResult();
+ }
+}
+
bool IsSpecialIndex(String string) {
// Max length of canonical double: -X.XXXXXXXXXXXXXXXXX-eXXX
const int kBufferSize = 24;
diff --git a/deps/v8/src/numbers/conversions.h b/deps/v8/src/numbers/conversions.h
index cf4ac19690..e70fc3bf4d 100644
--- a/deps/v8/src/numbers/conversions.h
+++ b/deps/v8/src/numbers/conversions.h
@@ -179,6 +179,11 @@ V8_EXPORT_PRIVATE base::Optional<double> TryStringToDouble(
LocalIsolate* isolate, Handle<String> object,
int max_length_for_conversion = 23);
+// Return base::nullopt if the string is longer than 20.
+V8_EXPORT_PRIVATE base::Optional<double> TryStringToInt(LocalIsolate* isolate,
+ Handle<String> object,
+ int radix);
+
inline bool TryNumberToSize(Object number, size_t* result);
// Converts a number into size_t.
diff --git a/deps/v8/src/objects/all-objects-inl.h b/deps/v8/src/objects/all-objects-inl.h
index b400f84339..6a739aa3bc 100644
--- a/deps/v8/src/objects/all-objects-inl.h
+++ b/deps/v8/src/objects/all-objects-inl.h
@@ -43,6 +43,7 @@
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-iterator-helpers-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-proxy-inl.h"
@@ -92,6 +93,7 @@
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/objects/turbofan-types-inl.h"
+#include "src/objects/turboshaft-types-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 1265accb24..0a96df93b2 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -98,7 +98,11 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
}
inline void DebugCheckZero(void* start, size_t byte_length) {
-#if DEBUG
+#ifdef DEBUG
+#ifdef V8_IS_TSAN
+ // TSan in debug mode is particularly slow. Skip this check for buffers >64MB.
+ if (byte_length > 64 * MB) return;
+#endif // TSan debug build
// Double check memory is zero-initialized. Despite being DEBUG-only,
// this function is somewhat optimized for the benefit of test suite
// execution times (some tests allocate several gigabytes).
@@ -347,8 +351,10 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
// Collect garbage and retry.
did_retry = true;
// TODO(wasm): try Heap::EagerlyFreeExternalMemory() first?
- isolate->heap()->MemoryPressureNotification(
- MemoryPressureLevel::kCritical, true);
+ if (isolate != nullptr) {
+ isolate->heap()->MemoryPressureNotification(
+ MemoryPressureLevel::kCritical, true);
+ }
}
return false;
};
@@ -368,7 +374,9 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
};
if (!gc_retry(allocate_pages)) {
// Page allocator could not reserve enough pages.
- RecordStatus(isolate, AllocationStatus::kOtherFailure);
+ if (isolate != nullptr) {
+ RecordStatus(isolate, AllocationStatus::kOtherFailure);
+ }
TRACE_BS("BSw:try failed to allocate pages\n");
return {};
}
@@ -403,8 +411,10 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
- RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
- : AllocationStatus::kSuccess);
+ if (isolate != nullptr) {
+ RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
+ : AllocationStatus::kSuccess);
+ }
const bool is_wasm_memory = wasm_memory != WasmMemoryFlag::kNotWasm;
ResizableFlag resizable =
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 699e2c1f31..66840a46ca 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -62,6 +62,10 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
#endif // V8_ENABLE_WEBASSEMBLY
// Tries to allocate `maximum_pages` of memory and commit `initial_pages`.
+ //
+ // If {isolate} is not null, initial failure to allocate the backing store may
+ // trigger GC, after which the allocation is retried. If {isolate} is null, no
+ // GC will be triggered.
static std::unique_ptr<BackingStore> TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t page_size, size_t initial_pages, size_t maximum_pages,
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index f19610f8e7..8132f9edd4 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -24,6 +24,7 @@
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/heap.h"
#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/instance-type-inl.h"
@@ -321,17 +322,11 @@ void MutableBigInt::Canonicalize(MutableBigInt result) {
if (to_trim != 0) {
Heap* heap = result.GetHeap();
if (!heap->IsLargeObject(result)) {
- // We do not create a filler for objects in large object space.
- // TODO(hpayer): We should shrink the large object page if the size
- // of the object changed significantly.
int old_size = ALIGN_TO_ALLOCATION_ALIGNMENT(BigInt::SizeFor(old_length));
int new_size = ALIGN_TO_ALLOCATION_ALIGNMENT(BigInt::SizeFor(new_length));
- if (!V8_COMPRESS_POINTERS_8GB_BOOL || new_size < old_size) {
- // A non-zero to_trim already guarantees that the sizes are different,
- // but their aligned values can be equal.
- Address new_end = result.address() + new_size;
- heap->CreateFillerObjectAt(new_end, old_size - new_size);
- }
+ heap->NotifyObjectSizeChange(result, old_size, new_size,
+ ClearRecordedSlots::kNo,
+ UpdateInvalidatedObjectSize::kNo);
}
result.set_length(new_length, kReleaseStore);
@@ -382,8 +377,7 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Isolate* isolate, Handle<BigInt> base,
Handle<BigInt> exponent) {
// 1. If exponent is < 0, throw a RangeError exception.
if (exponent->sign()) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kBigIntNegativeExponent),
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kMustBePositive),
BigInt);
}
// 2. If base is 0n and exponent is 0n, return 1n.
@@ -927,6 +921,26 @@ ComparisonResult BigInt::CompareToDouble(Handle<BigInt> x, double y) {
return ComparisonResult::kEqual;
}
+namespace {
+
+void RightTrimString(Isolate* isolate, Handle<SeqOneByteString> string,
+ int chars_allocated, int chars_written) {
+ DCHECK_LE(chars_written, chars_allocated);
+ if (chars_written == chars_allocated) return;
+ string->set_length(chars_written, kReleaseStore);
+ int string_size =
+ ALIGN_TO_ALLOCATION_ALIGNMENT(SeqOneByteString::SizeFor(chars_allocated));
+ int needed_size =
+ ALIGN_TO_ALLOCATION_ALIGNMENT(SeqOneByteString::SizeFor(chars_written));
+ if (needed_size < string_size && !isolate->heap()->IsLargeObject(*string)) {
+ isolate->heap()->NotifyObjectSizeChange(*string, string_size, needed_size,
+ ClearRecordedSlots::kNo,
+ UpdateInvalidatedObjectSize::kNo);
+ }
+}
+
+} // namespace
+
MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
int radix, ShouldThrow should_throw) {
if (bigint->is_zero()) {
@@ -969,6 +983,7 @@ MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
// out whether all characters were used.
chars_written = chars_allocated - static_cast<int>(out - start);
std::memmove(start, out, chars_written);
+ memset(start + chars_written, 0, chars_allocated - chars_written);
}
} else {
// Generic path, handles anything.
@@ -999,18 +1014,7 @@ MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
// Right-trim any over-allocation (which can happen due to conservative
// estimates).
- if (chars_written < chars_allocated) {
- result->set_length(chars_written, kReleaseStore);
- int string_size = ALIGN_TO_ALLOCATION_ALIGNMENT(
- SeqOneByteString::SizeFor(chars_allocated));
- int needed_size =
- ALIGN_TO_ALLOCATION_ALIGNMENT(SeqOneByteString::SizeFor(chars_written));
- if (needed_size < string_size && !isolate->heap()->IsLargeObject(*result)) {
- Address new_end = result->address() + needed_size;
- isolate->heap()->CreateFillerObjectAt(new_end,
- (string_size - needed_size));
- }
- }
+ RightTrimString(isolate, result, chars_allocated, chars_written);
#if DEBUG
// Verify that all characters have been written.
DCHECK(result->length() == chars_written);
@@ -1023,6 +1027,37 @@ MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
return result;
}
+Handle<String> BigInt::NoSideEffectsToString(Isolate* isolate,
+ Handle<BigInt> bigint) {
+ if (bigint->is_zero()) {
+ return isolate->factory()->zero_string();
+ }
+ // The threshold is chosen such that the operation will be fast enough to
+ // not need interrupt checks. This function is meant for producing human-
+ // readable error messages, so super-long results aren't useful anyway.
+ if (bigint->length() > 100) {
+ return isolate->factory()->NewStringFromStaticChars(
+ "<a very large BigInt>");
+ }
+
+ int chars_allocated =
+ bigint::ToStringResultLength(GetDigits(bigint), 10, bigint->sign());
+ DCHECK_LE(chars_allocated, String::kMaxLength);
+ Handle<SeqOneByteString> result = isolate->factory()
+ ->NewRawOneByteString(chars_allocated)
+ .ToHandleChecked();
+ int chars_written = chars_allocated;
+ DisallowGarbageCollection no_gc;
+ char* characters = reinterpret_cast<char*>(result->GetChars(no_gc));
+ std::unique_ptr<bigint::Processor, bigint::Processor::Destroyer>
+ non_interruptible_processor(
+ bigint::Processor::New(new bigint::Platform()));
+ non_interruptible_processor->ToString(characters, &chars_written,
+ GetDigits(bigint), 10, bigint->sign());
+ RightTrimString(isolate, result, chars_allocated, chars_written);
+ return result;
+}
+
MaybeHandle<BigInt> BigInt::FromNumber(Isolate* isolate,
Handle<Object> number) {
DCHECK(number->IsNumber());
@@ -1305,6 +1340,7 @@ Handle<BigInt> MutableBigInt::RightShiftByMaximum(Isolate* isolate, bool sign) {
Maybe<BigInt::digit_t> MutableBigInt::ToShiftAmount(Handle<BigIntBase> x) {
if (x->length() > 1) return Nothing<digit_t>();
digit_t value = x->digit(0);
+ // The Torque builtin also depends on the assertion.
static_assert(kMaxLengthBits < std::numeric_limits<digit_t>::max());
if (value > kMaxLengthBits) return Nothing<digit_t>();
return Just(value);
@@ -1739,5 +1775,103 @@ void MutableBigInt_BitwiseAndPosNegAndCanonicalize(Address result_addr,
MutableBigInt::Canonicalize(result);
}
+void MutableBigInt_BitwiseOrPosPosAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::BitwiseOr_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ MutableBigInt::Canonicalize(result);
+}
+
+void MutableBigInt_BitwiseOrNegNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::BitwiseOr_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ MutableBigInt::Canonicalize(result);
+}
+
+void MutableBigInt_BitwiseOrPosNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::BitwiseOr_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ MutableBigInt::Canonicalize(result);
+}
+
+void MutableBigInt_BitwiseXorPosPosAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::BitwiseXor_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ MutableBigInt::Canonicalize(result);
+}
+
+void MutableBigInt_BitwiseXorNegNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::BitwiseXor_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ MutableBigInt::Canonicalize(result);
+}
+
+void MutableBigInt_BitwiseXorPosNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::BitwiseXor_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ MutableBigInt::Canonicalize(result);
+}
+
+void MutableBigInt_LeftShiftAndCanonicalize(Address result_addr, Address x_addr,
+ intptr_t shift) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ bigint::LeftShift(GetRWDigits(result), GetDigits(x), shift);
+ MutableBigInt::Canonicalize(result);
+}
+
+uint32_t RightShiftResultLength(Address x_addr, uint32_t x_sign,
+ intptr_t shift) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ bigint::RightShiftState state;
+ int length =
+ bigint::RightShift_ResultLength(GetDigits(x), x_sign, shift, &state);
+ // {length} should be non-negative and fit in 30 bits.
+ DCHECK_EQ(length >> BigInt::kLengthFieldBits, 0);
+ return (static_cast<uint32_t>(state.must_round_down)
+ << BigInt::kLengthFieldBits) |
+ length;
+}
+
+void MutableBigInt_RightShiftAndCanonicalize(Address result_addr,
+ Address x_addr, intptr_t shift,
+ uint32_t must_round_down) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+ bigint::RightShiftState state{must_round_down == 1};
+ bigint::RightShift(GetRWDigits(result), GetDigits(x), shift, state);
+ MutableBigInt::Canonicalize(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index e3869fe2a6..44ac1815a4 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -44,13 +44,35 @@ void MutableBigInt_BitwiseAndNegNegAndCanonicalize(Address result_addr,
void MutableBigInt_BitwiseAndPosNegAndCanonicalize(Address result_addr,
Address x_addr,
Address y_addr);
+void MutableBigInt_BitwiseOrPosPosAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr);
+void MutableBigInt_BitwiseOrNegNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr);
+void MutableBigInt_BitwiseOrPosNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr);
+void MutableBigInt_BitwiseXorPosPosAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr);
+void MutableBigInt_BitwiseXorNegNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr);
+void MutableBigInt_BitwiseXorPosNegAndCanonicalize(Address result_addr,
+ Address x_addr,
+ Address y_addr);
+void MutableBigInt_LeftShiftAndCanonicalize(Address result_addr, Address x_addr,
+ intptr_t shift);
+uint32_t RightShiftResultLength(Address x_addr, uint32_t x_sign,
+ intptr_t shift);
+void MutableBigInt_RightShiftAndCanonicalize(Address result_addr,
+ Address x_addr, intptr_t shift,
+ uint32_t must_round_down);
class BigInt;
class ValueDeserializer;
class ValueSerializer;
-class WebSnapshotSerializerDeserializer;
-class WebSnapshotSerializer;
-class WebSnapshotDeserializer;
#include "torque-generated/src/objects/bigint-tq.inc"
@@ -253,6 +275,12 @@ class BigInt : public BigIntBase {
static MaybeHandle<String> ToString(Isolate* isolate, Handle<BigInt> bigint,
int radix = 10,
ShouldThrow should_throw = kThrowOnError);
+ // Like the above, but adapted for the needs of producing error messages:
+ // doesn't care about termination requests, and returns a default string
+ // for inputs beyond a relatively low upper bound.
+ static Handle<String> NoSideEffectsToString(Isolate* isolate,
+ Handle<BigInt> bigint);
+
// "The Number value for x", see:
// https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type
// Returns a Smi or HeapNumber.
@@ -273,9 +301,6 @@ class BigInt : public BigIntBase {
friend class StringToBigIntHelper;
friend class ValueDeserializer;
friend class ValueSerializer;
- friend class WebSnapshotSerializerDeserializer;
- friend class WebSnapshotSerializer;
- friend class WebSnapshotDeserializer;
// Special functions for StringToBigIntHelper:
template <typename IsolateT>
diff --git a/deps/v8/src/objects/call-site-info-inl.h b/deps/v8/src/objects/call-site-info-inl.h
index e10601f30a..8ea7d8d747 100644
--- a/deps/v8/src/objects/call-site-info-inl.h
+++ b/deps/v8/src/objects/call-site-info-inl.h
@@ -32,27 +32,12 @@ BOOL_GETTER(CallSiteInfo, flags, IsConstructor, IsConstructorBit::kShift)
BOOL_GETTER(CallSiteInfo, flags, IsAsync, IsAsyncBit::kShift)
DEF_GETTER(CallSiteInfo, code_object, HeapObject) {
- HeapObject value = TorqueGeneratedClass::code_object(cage_base);
- // The |code_object| field can contain many types of objects, but only CodeT
- // values have to be converted to Code.
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // In this mode the callers are fine with CodeT result.
- return value;
- }
- if (V8_EXTERNAL_CODE_SPACE_BOOL && value.IsCodeT()) {
- return FromCodeT(CodeT::cast(value));
- }
- return value;
+ return TorqueGeneratedClass::code_object(cage_base);
}
void CallSiteInfo::set_code_object(HeapObject code, WriteBarrierMode mode) {
- // The |code_object| field can contain many types of objects, but only Code
- // values have to be converted to CodeT.
- if (V8_EXTERNAL_CODE_SPACE_BOOL && IsCodeSpaceObject(code)) {
- TorqueGeneratedClass::set_code_object(ToCodeT(Code::cast(code)), mode);
- } else {
- TorqueGeneratedClass::set_code_object(code, mode);
- }
+ DCHECK(!IsCodeSpaceObject(code));
+ TorqueGeneratedClass::set_code_object(code, mode);
}
} // namespace internal
diff --git a/deps/v8/src/objects/call-site-info.cc b/deps/v8/src/objects/call-site-info.cc
index 4b57bcbfdf..97a6f12774 100644
--- a/deps/v8/src/objects/call-site-info.cc
+++ b/deps/v8/src/objects/call-site-info.cc
@@ -221,7 +221,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
builder.AppendCStringLiteral("eval at ");
if (script->has_eval_from_shared()) {
Handle<SharedFunctionInfo> eval_shared(script->eval_from_shared(), isolate);
- auto eval_name = SharedFunctionInfo::DebugName(eval_shared);
+ auto eval_name = SharedFunctionInfo::DebugName(isolate, eval_shared);
if (eval_name->length() != 0) {
builder.AppendString(eval_name);
} else {
@@ -570,11 +570,9 @@ int CallSiteInfo::ComputeSourcePosition(Handle<CallSiteInfo> info, int offset) {
Isolate* isolate = info->GetIsolate();
#if V8_ENABLE_WEBASSEMBLY
if (info->IsWasm()) {
- auto code_ref = Managed<wasm::GlobalWasmCodeRef>::cast(info->code_object());
- int byte_offset = code_ref.get()->code()->GetSourcePositionBefore(offset);
auto module = info->GetWasmInstance().module();
uint32_t func_index = info->GetWasmFunctionIndex();
- return wasm::GetSourcePosition(module, func_index, byte_offset,
+ return wasm::GetSourcePosition(module, func_index, offset,
info->IsAsmJsAtNumberConversion());
}
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/objects/call-site-info.h b/deps/v8/src/objects/call-site-info.h
index bb3ebbeb15..72ff3cb08a 100644
--- a/deps/v8/src/objects/call-site-info.h
+++ b/deps/v8/src/objects/call-site-info.h
@@ -84,7 +84,7 @@ class CallSiteInfo : public TorqueGeneratedCallSiteInfo<CallSiteInfo, Struct> {
#endif // V8_ENABLE_WEBASSEMBLY
// Returns the 0-based source position, which is the offset into the
- // Script in case of JavaScript and Asm.js, and the bytecode offset
+ // Script in case of JavaScript and Asm.js, and the wire byte offset
// in the module in case of actual Wasm. In case of async promise
// combinator frames, this returns the index of the promise.
static int GetSourcePosition(Handle<CallSiteInfo> info);
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index fc7a080572..48721e6f6e 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -35,106 +35,137 @@ OBJECT_CONSTRUCTORS_IMPL(DeoptimizationData, FixedArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(BytecodeArray)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakArrayList)
-OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
-NEVER_READ_ONLY_SPACE_IMPL(CodeDataContainer)
-
-NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
+OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(GcSafeCode, HeapObject)
CAST_ACCESSOR(AbstractCode)
+CAST_ACCESSOR(GcSafeCode)
+CAST_ACCESSOR(InstructionStream)
CAST_ACCESSOR(Code)
-CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
CAST_ACCESSOR(DeoptimizationLiteralArray)
+Code GcSafeCode::UnsafeCastToCode() const {
+ return Code::unchecked_cast(*this);
+}
+
+#define GCSAFE_CODE_FWD_ACCESSOR(ReturnType, Name) \
+ ReturnType GcSafeCode::Name() const { return UnsafeCastToCode().Name(); }
+GCSAFE_CODE_FWD_ACCESSOR(Address, InstructionStart)
+GCSAFE_CODE_FWD_ACCESSOR(Address, InstructionEnd)
+GCSAFE_CODE_FWD_ACCESSOR(bool, is_builtin)
+GCSAFE_CODE_FWD_ACCESSOR(Builtin, builtin_id)
+GCSAFE_CODE_FWD_ACCESSOR(CodeKind, kind)
+GCSAFE_CODE_FWD_ACCESSOR(bool, is_interpreter_trampoline_builtin)
+GCSAFE_CODE_FWD_ACCESSOR(bool, is_baseline_trampoline_builtin)
+GCSAFE_CODE_FWD_ACCESSOR(bool, is_baseline_leave_frame_builtin)
+GCSAFE_CODE_FWD_ACCESSOR(bool, has_instruction_stream)
+GCSAFE_CODE_FWD_ACCESSOR(bool, is_maglevved)
+GCSAFE_CODE_FWD_ACCESSOR(bool, is_turbofanned)
+GCSAFE_CODE_FWD_ACCESSOR(bool, has_tagged_outgoing_params)
+GCSAFE_CODE_FWD_ACCESSOR(bool, marked_for_deoptimization)
+GCSAFE_CODE_FWD_ACCESSOR(Object, raw_instruction_stream)
+GCSAFE_CODE_FWD_ACCESSOR(int, stack_slots)
+GCSAFE_CODE_FWD_ACCESSOR(Address, constant_pool)
+GCSAFE_CODE_FWD_ACCESSOR(Address, SafepointTableAddress)
+#undef GCSAFE_CODE_FWD_ACCESSOR
+
+int GcSafeCode::GetOffsetFromInstructionStart(Isolate* isolate,
+ Address pc) const {
+ return UnsafeCastToCode().GetOffsetFromInstructionStart(isolate, pc);
+}
+
+Address GcSafeCode::InstructionStart(Isolate* isolate, Address pc) const {
+ return UnsafeCastToCode().InstructionStart(isolate, pc);
+}
+
+Address GcSafeCode::InstructionEnd(Isolate* isolate, Address pc) const {
+ return UnsafeCastToCode().InstructionEnd(isolate, pc);
+}
+
+bool GcSafeCode::CanDeoptAt(Isolate* isolate, Address pc) const {
+ DeoptimizationData deopt_data = DeoptimizationData::unchecked_cast(
+ UnsafeCastToCode().unchecked_deoptimization_data());
+ Address code_start_address = InstructionStart();
+ for (int i = 0; i < deopt_data.DeoptCount(); i++) {
+ if (deopt_data.Pc(i).value() == -1) continue;
+ Address address = code_start_address + deopt_data.Pc(i).value();
+ if (address == pc &&
+ deopt_data.GetBytecodeOffset(i) != BytecodeOffset::None()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+Object GcSafeCode::raw_instruction_stream(
+ PtrComprCageBase code_cage_base) const {
+ return UnsafeCastToCode().raw_instruction_stream(code_cage_base);
+}
+
int AbstractCode::InstructionSize(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().InstructionSize();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().InstructionSize();
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return GetBytecodeArray().length();
}
}
ByteArray AbstractCode::SourcePositionTableInternal(
PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
- DCHECK_NE(GetCode().kind(), CodeKind::BASELINE);
- return GetCode().source_position_table(cage_base);
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- CodeT codet = GetCodeT();
- if (codet.is_off_heap_trampoline()) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
+ Code code = GetCode();
+ if (!code.has_instruction_stream()) {
return GetReadOnlyRoots().empty_byte_array();
}
- return codet.source_position_table(cage_base);
+ return code.source_position_table(cage_base);
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return GetBytecodeArray().SourcePositionTable(cage_base);
}
}
-ByteArray AbstractCode::SourcePositionTable(PtrComprCageBase cage_base,
+ByteArray AbstractCode::SourcePositionTable(Isolate* isolate,
SharedFunctionInfo sfi) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
- return GetCode().SourcePositionTable(cage_base, sfi);
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- CodeT codet = GetCodeT();
- if (codet.is_off_heap_trampoline()) {
- return GetReadOnlyRoots().empty_byte_array();
- }
- return FromCodeT(codet).SourcePositionTable(cage_base, sfi);
+ Map map_object = map(isolate);
+ if (InstanceTypeChecker::IsCode(map_object)) {
+ return GetCode().SourcePositionTable(isolate, sfi);
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
- return GetBytecodeArray().SourcePositionTable(cage_base);
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
+ return GetBytecodeArray().SourcePositionTable(isolate);
}
}
int AbstractCode::SizeIncludingMetadata(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
- return GetCode().SizeIncludingMetadata(cage_base);
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- CodeT codet = GetCodeT();
- return codet.is_off_heap_trampoline()
- ? 0
- : FromCodeT(codet).SizeIncludingMetadata(cage_base);
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
+ return GetCode().SizeIncludingMetadata();
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return GetBytecodeArray().SizeIncludingMetadata();
}
}
Address AbstractCode::InstructionStart(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().InstructionStart();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().InstructionStart();
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return GetBytecodeArray().GetFirstBytecodeAddress();
}
}
Address AbstractCode::InstructionEnd(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().InstructionEnd();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().InstructionEnd();
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
BytecodeArray bytecode_array = GetBytecodeArray();
return bytecode_array.GetFirstBytecodeAddress() + bytecode_array.length();
}
@@ -142,66 +173,46 @@ Address AbstractCode::InstructionEnd(PtrComprCageBase cage_base) {
bool AbstractCode::contains(Isolate* isolate, Address inner_pointer) {
PtrComprCageBase cage_base(isolate);
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().contains(isolate, inner_pointer);
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().contains(isolate, inner_pointer);
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return (address() <= inner_pointer) &&
(inner_pointer <= address() + Size(cage_base));
}
}
CodeKind AbstractCode::kind(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().kind();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().kind();
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return CodeKind::INTERPRETED_FUNCTION;
}
}
Builtin AbstractCode::builtin_id(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().builtin_id();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().builtin_id();
} else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
+ DCHECK(InstanceTypeChecker::IsBytecodeArray(map_object));
return Builtin::kNoBuiltinId;
}
}
-bool AbstractCode::is_off_heap_trampoline(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
- return GetCode().is_off_heap_trampoline();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().is_off_heap_trampoline();
- } else {
- DCHECK(InstanceTypeChecker::IsBytecodeArray(instance_type));
- return false;
- }
+bool AbstractCode::has_instruction_stream(PtrComprCageBase cage_base) {
+ DCHECK(InstanceTypeChecker::IsCode(map(cage_base)));
+ return GetCode().has_instruction_stream();
}
HandlerTable::CatchPrediction AbstractCode::GetBuiltinCatchPrediction(
PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ Map map_object = map(cage_base);
+ if (InstanceTypeChecker::IsCode(map_object)) {
return GetCode().GetBuiltinCatchPrediction();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT().GetBuiltinCatchPrediction();
} else {
UNREACHABLE();
}
@@ -211,138 +222,77 @@ bool AbstractCode::IsCode(PtrComprCageBase cage_base) const {
return HeapObject::IsCode(cage_base);
}
-bool AbstractCode::IsCodeT(PtrComprCageBase cage_base) const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- return HeapObject::IsCodeT(cage_base);
-}
-
bool AbstractCode::IsBytecodeArray(PtrComprCageBase cage_base) const {
return HeapObject::IsBytecodeArray(cage_base);
}
Code AbstractCode::GetCode() { return Code::cast(*this); }
-CodeT AbstractCode::GetCodeT() {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- return CodeT::cast(*this);
-}
-
BytecodeArray AbstractCode::GetBytecodeArray() {
return BytecodeArray::cast(*this);
}
-Code AbstractCode::ToCode(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
- return GetCode();
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- CodeT codet = GetCodeT();
- DCHECK(!codet.is_off_heap_trampoline());
- return FromCodeT(codet);
- } else {
- UNREACHABLE();
- }
-}
+OBJECT_CONSTRUCTORS_IMPL(InstructionStream, HeapObject)
+NEVER_READ_ONLY_SPACE_IMPL(InstructionStream)
-CodeT AbstractCode::ToCodeT(PtrComprCageBase cage_base) {
- InstanceType instance_type = map(cage_base).instance_type();
- if (InstanceTypeChecker::IsCode(instance_type)) {
- return i::ToCodeT(GetCode());
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- return GetCodeT();
- } else {
- UNREACHABLE();
- }
-}
-
-OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
-NEVER_READ_ONLY_SPACE_IMPL(Code)
-
-INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, raw_metadata_size, kMetadataSizeOffset)
+INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, metadata_size, kMetadataSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
INT_ACCESSORS(Code, code_comments_offset, kCodeCommentsOffsetOffset)
INT32_ACCESSORS(Code, unwinding_info_offset, kUnwindingInfoOffsetOffset)
-
-// Same as ACCESSORS_CHECKED2 macro but with Code as a host and using
-// main_cage_base() for computing the base.
-#define CODE_ACCESSORS_CHECKED2(name, type, offset, get_condition, \
- set_condition) \
- type Code::name() const { \
- PtrComprCageBase cage_base = main_cage_base(); \
- return Code::name(cage_base); \
- } \
- type Code::name(PtrComprCageBase cage_base) const { \
- type value = TaggedField<type, offset>::load(cage_base, *this); \
- DCHECK(get_condition); \
- return value; \
- } \
- void Code::set_##name(type value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- TaggedField<type, offset>::store(*this, value); \
- CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
+ACCESSORS_CHECKED2(Code, deoptimization_data, FixedArray,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, bytecode_or_interpreter_data, HeapObject,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, source_position_table, ByteArray, kPositionTableOffset,
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, bytecode_offset_table, ByteArray, kPositionTableOffset,
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
+// Same as RELEASE_ACQUIRE_ACCESSORS_CHECKED2 macro but with InstructionStream
+// as a host and using main_cage_base(kRelaxedLoad) for computing the base.
+#define RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS_CHECKED2( \
+ name, type, offset, get_condition, set_condition) \
+ type InstructionStream::name(AcquireLoadTag tag) const { \
+ PtrComprCageBase cage_base = main_cage_base(kRelaxedLoad); \
+ return InstructionStream::name(cage_base, tag); \
+ } \
+ type InstructionStream::name(PtrComprCageBase cage_base, AcquireLoadTag) \
+ const { \
+ type value = TaggedField<type, offset>::Acquire_Load(cage_base, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void InstructionStream::set_##name(type value, ReleaseStoreTag, \
+ WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<type, offset>::Release_Store(*this, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
-// Same as RELEASE_ACQUIRE_ACCESSORS_CHECKED2 macro but with Code as a host and
-// using main_cage_base(kRelaxedLoad) for computing the base.
-#define RELEASE_ACQUIRE_CODE_ACCESSORS_CHECKED2(name, type, offset, \
- get_condition, set_condition) \
- type Code::name(AcquireLoadTag tag) const { \
- PtrComprCageBase cage_base = main_cage_base(kRelaxedLoad); \
- return Code::name(cage_base, tag); \
- } \
- type Code::name(PtrComprCageBase cage_base, AcquireLoadTag) const { \
- type value = TaggedField<type, offset>::Acquire_Load(cage_base, *this); \
- DCHECK(get_condition); \
- return value; \
- } \
- void Code::set_##name(type value, ReleaseStoreTag, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- TaggedField<type, offset>::Release_Store(*this, value); \
- CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
- }
+#define RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS(name, type, offset) \
+ RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS_CHECKED2( \
+ name, type, offset, !ObjectInYoungGeneration(value), \
+ !ObjectInYoungGeneration(value))
+
+// Concurrent marker needs to access kind specific flags in code.
+RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS(code, Code, kCodeOffset)
+RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS(raw_code, HeapObject, kCodeOffset)
+#undef RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS
+#undef RELEASE_ACQUIRE_INSTRUCTION_STREAM_ACCESSORS_CHECKED2
-#define CODE_ACCESSORS(name, type, offset) \
- CODE_ACCESSORS_CHECKED2(name, type, offset, true, true)
-
-#define RELEASE_ACQUIRE_CODE_ACCESSORS(name, type, offset) \
- RELEASE_ACQUIRE_CODE_ACCESSORS_CHECKED2(name, type, offset, \
- !ObjectInYoungGeneration(value), \
- !ObjectInYoungGeneration(value))
-
-CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
-
-CODE_ACCESSORS_CHECKED2(deoptimization_data, FixedArray,
- kDeoptimizationDataOrInterpreterDataOffset,
- kind() != CodeKind::BASELINE,
- kind() != CodeKind::BASELINE &&
- !ObjectInYoungGeneration(value))
-CODE_ACCESSORS_CHECKED2(bytecode_or_interpreter_data, HeapObject,
- kDeoptimizationDataOrInterpreterDataOffset,
- kind() == CodeKind::BASELINE,
- kind() == CodeKind::BASELINE &&
- !ObjectInYoungGeneration(value))
-
-CODE_ACCESSORS_CHECKED2(source_position_table, ByteArray, kPositionTableOffset,
- kind() != CodeKind::BASELINE,
- kind() != CodeKind::BASELINE &&
- !ObjectInYoungGeneration(value))
-CODE_ACCESSORS_CHECKED2(bytecode_offset_table, ByteArray, kPositionTableOffset,
- kind() == CodeKind::BASELINE,
- kind() == CodeKind::BASELINE &&
- !ObjectInYoungGeneration(value))
-
-// Concurrent marker needs to access kind specific flags in code data container.
-RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
- kCodeDataContainerOffset)
-#undef CODE_ACCESSORS
-#undef CODE_ACCESSORS_CHECKED2
-#undef RELEASE_ACQUIRE_CODE_ACCESSORS
-#undef RELEASE_ACQUIRE_CODE_ACCESSORS_CHECKED2
-
-PtrComprCageBase Code::main_cage_base() const {
+PtrComprCageBase InstructionStream::main_cage_base() const {
#ifdef V8_EXTERNAL_CODE_SPACE
Address cage_base_hi = ReadField<Tagged_t>(kMainCageBaseUpper32BitsOffset);
return PtrComprCageBase(cage_base_hi << 32);
@@ -351,7 +301,7 @@ PtrComprCageBase Code::main_cage_base() const {
#endif
}
-PtrComprCageBase Code::main_cage_base(RelaxedLoadTag) const {
+PtrComprCageBase InstructionStream::main_cage_base(RelaxedLoadTag) const {
#ifdef V8_EXTERNAL_CODE_SPACE
Address cage_base_hi =
Relaxed_ReadField<Tagged_t>(kMainCageBaseUpper32BitsOffset);
@@ -361,7 +311,7 @@ PtrComprCageBase Code::main_cage_base(RelaxedLoadTag) const {
#endif
}
-void Code::set_main_cage_base(Address cage_base, RelaxedStoreTag) {
+void InstructionStream::set_main_cage_base(Address cage_base, RelaxedStoreTag) {
#ifdef V8_EXTERNAL_CODE_SPACE
Tagged_t cage_base_hi = static_cast<Tagged_t>(cage_base >> 32);
Relaxed_WriteField<Tagged_t>(kMainCageBaseUpper32BitsOffset, cage_base_hi);
@@ -370,327 +320,105 @@ void Code::set_main_cage_base(Address cage_base, RelaxedStoreTag) {
#endif
}
-CodeDataContainer Code::GCSafeCodeDataContainer(AcquireLoadTag) const {
+Code InstructionStream::GCSafeCode(AcquireLoadTag) const {
PtrComprCageBase cage_base = main_cage_base(kRelaxedLoad);
HeapObject object =
- TaggedField<HeapObject, kCodeDataContainerOffset>::Acquire_Load(cage_base,
- *this);
+ TaggedField<HeapObject, kCodeOffset>::Acquire_Load(cage_base, *this);
DCHECK(!ObjectInYoungGeneration(object));
- CodeDataContainer code_data_container =
- ForwardingAddress(CodeDataContainer::unchecked_cast(object));
- return code_data_container;
-}
-
-// Helper functions for converting Code objects to CodeDataContainer and back
-// when V8_EXTERNAL_CODE_SPACE is enabled.
-inline CodeT ToCodeT(Code code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return code.code_data_container(kAcquireLoad);
-#else
- return code;
-#endif
-}
-
-inline Handle<CodeT> ToCodeT(Handle<Code> code, Isolate* isolate) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return handle(ToCodeT(*code), isolate);
-#else
+ Code code = ForwardingAddress(Code::unchecked_cast(object));
return code;
-#endif
}
-inline MaybeHandle<CodeT> ToCodeT(MaybeHandle<Code> maybe_code,
- Isolate* isolate) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- Handle<Code> code;
- if (maybe_code.ToHandle(&code)) return ToCodeT(code, isolate);
- return {};
-#else
- return maybe_code;
-#endif
-}
+// Helper functions for converting InstructionStream objects to
+// Code and back.
+inline Code ToCode(InstructionStream code) { return code.code(kAcquireLoad); }
-inline Code FromCodeT(CodeT code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK(!code.is_off_heap_trampoline());
- // Compute the Code object pointer from the code entry point.
- Address ptr = code.code_entry_point() - Code::kHeaderSize + kHeapObjectTag;
- return Code::cast(Object(ptr));
-#else
- return code;
-#endif
+inline InstructionStream FromCode(Code code) {
+ DCHECK(code.has_instruction_stream());
+ // Compute the InstructionStream object pointer from the code entry point.
+ Address ptr =
+ code.code_entry_point() - InstructionStream::kHeaderSize + kHeapObjectTag;
+ return InstructionStream::cast(Object(ptr));
}
-inline Code FromCodeT(CodeT code, PtrComprCageBase code_cage_base,
- RelaxedLoadTag tag) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK(!code.is_off_heap_trampoline());
+inline InstructionStream FromCode(Code code, PtrComprCageBase code_cage_base,
+ RelaxedLoadTag tag) {
+ DCHECK(code.has_instruction_stream());
// Since the code entry point field is not aligned we can't load it atomically
- // and use for Code object pointer calculation. So, we load and decompress
- // the code field.
- return code.code(code_cage_base, tag);
-#else
- return code;
-#endif
-}
-
-inline Code FromCodeT(CodeT code, Isolate* isolate, RelaxedLoadTag tag) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- PtrComprCageBase code_cage_base(isolate->code_cage_base());
- return FromCodeT(code, code_cage_base, tag);
-#else
- return code;
-#endif
-}
-
-inline Handle<Code> FromCodeT(Handle<CodeT> code, Isolate* isolate) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return handle(FromCodeT(*code), isolate);
-#else
- return code;
-#endif
-}
-
-inline AbstractCode ToAbstractCode(CodeT code) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- return AbstractCode::cast(code);
- }
- return AbstractCode::cast(FromCodeT(code));
-}
-
-inline Handle<AbstractCode> ToAbstractCode(Handle<CodeT> code,
- Isolate* isolate) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- return Handle<AbstractCode>::cast(code);
- }
- return Handle<AbstractCode>::cast(FromCodeT(code, isolate));
-}
-
-inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return code;
-#else
- return code.code_data_container(kAcquireLoad);
-#endif
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-#define CODE_LOOKUP_RESULT_FWD_ACCESSOR(name, Type) \
- Type CodeLookupResult::name() const { \
- DCHECK(IsFound()); \
- return IsCode() ? code().name() : code_data_container().name(); \
- }
-#else
-#define CODE_LOOKUP_RESULT_FWD_ACCESSOR(name, Type) \
- Type CodeLookupResult::name() const { \
- DCHECK(IsFound()); \
- return code().name(); \
- }
-#endif
-
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(kind, CodeKind)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(builtin_id, Builtin)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(has_tagged_outgoing_params, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(has_handler_table, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(is_baseline_trampoline_builtin, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(is_interpreter_trampoline_builtin, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(is_baseline_leave_frame_builtin, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(is_maglevved, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(is_turbofanned, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(is_optimized_code, bool)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(stack_slots, int)
-CODE_LOOKUP_RESULT_FWD_ACCESSOR(GetBuiltinCatchPrediction,
- HandlerTable::CatchPrediction)
-
-#undef CODE_LOOKUP_RESULT_FWD_ACCESSOR
-
-int CodeLookupResult::GetOffsetFromInstructionStart(Isolate* isolate,
- Address pc) const {
- DCHECK(IsFound());
-#ifdef V8_EXTERNAL_CODE_SPACE
- if (IsCodeDataContainer()) {
- return code_data_container().GetOffsetFromInstructionStart(isolate, pc);
- }
-#endif
- return code().GetOffsetFromInstructionStart(isolate, pc);
-}
-
-SafepointEntry CodeLookupResult::GetSafepointEntry(Isolate* isolate,
- Address pc) const {
- DCHECK(IsFound());
-#ifdef V8_EXTERNAL_CODE_SPACE
- if (IsCodeDataContainer()) {
- return code_data_container().GetSafepointEntry(isolate, pc);
- }
-#endif
- return code().GetSafepointEntry(isolate, pc);
-}
-
-MaglevSafepointEntry CodeLookupResult::GetMaglevSafepointEntry(
- Isolate* isolate, Address pc) const {
- DCHECK(IsFound());
-#ifdef V8_EXTERNAL_CODE_SPACE
- if (IsCodeDataContainer()) {
- return code_data_container().GetMaglevSafepointEntry(isolate, pc);
- }
-#endif
- return code().GetMaglevSafepointEntry(isolate, pc);
-}
-
-AbstractCode CodeLookupResult::ToAbstractCode() const {
- DCHECK(IsFound());
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- return IsCodeDataContainer() ? AbstractCode::cast(code_data_container())
- : AbstractCode::cast(code());
- }
- return AbstractCode::cast(ToCode());
+ // and use for InstructionStream object pointer calculation. So, we load and
+ // decompress the code field.
+ return code.instruction_stream(code_cage_base, tag);
}
-Code CodeLookupResult::ToCode() const {
- DCHECK(IsFound());
+inline InstructionStream FromCode(Code code, Isolate* isolate,
+ RelaxedLoadTag tag) {
#ifdef V8_EXTERNAL_CODE_SPACE
- return IsCode() ? code() : FromCodeT(code_data_container());
+ return FromCode(code, PtrComprCageBase{isolate->code_cage_base()}, tag);
#else
- return code();
-#endif
-}
-
-CodeT CodeLookupResult::ToCodeT() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return IsCodeDataContainer() ? code_data_container() : i::ToCodeT(code());
-#else
- return code();
-#endif
+ return FromCode(code, GetPtrComprCageBase(code), tag);
+#endif // V8_EXTERNAL_CODE_SPACE
}
-void Code::WipeOutHeader() {
- WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
- WRITE_FIELD(*this, kDeoptimizationDataOrInterpreterDataOffset,
- Smi::FromInt(0));
- WRITE_FIELD(*this, kPositionTableOffset, Smi::FromInt(0));
- WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
+// TODO(jgruber): Remove this method once main_cage_base is gone.
+void InstructionStream::WipeOutHeader() {
+ WRITE_FIELD(*this, kCodeOffset, Smi::FromInt(0));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
set_main_cage_base(kNullAddress, kRelaxedStore);
}
}
-void Code::clear_padding() {
- // Clear the padding between the header and `raw_body_start`.
- if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
- memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
- FIELD_SIZE(kOptionalPaddingOffset));
- }
-
- // Clear the padding after `raw_body_end`.
+void Code::ClearInstructionStreamPadding() {
+ // Clear the padding after `body_end`.
size_t trailing_padding_size =
- CodeSize() - Code::kHeaderSize - raw_body_size();
- memset(reinterpret_cast<void*>(raw_body_end()), 0, trailing_padding_size);
+ CodeSize() - InstructionStream::kHeaderSize - body_size();
+ memset(reinterpret_cast<void*>(body_end()), 0, trailing_padding_size);
}
-ByteArray Code::SourcePositionTable(PtrComprCageBase cage_base,
+ByteArray Code::SourcePositionTable(Isolate* isolate,
SharedFunctionInfo sfi) const {
+ if (!has_instruction_stream()) {
+ return GetReadOnlyRoots().empty_byte_array();
+ }
+
DisallowGarbageCollection no_gc;
if (kind() == CodeKind::BASELINE) {
- return sfi.GetBytecodeArray(sfi.GetIsolate())
- .SourcePositionTable(cage_base);
+ return sfi.GetBytecodeArray(isolate).SourcePositionTable(isolate);
}
- return source_position_table(cage_base);
-}
-
-Object Code::next_code_link() const {
- return code_data_container(kAcquireLoad).next_code_link();
+ return source_position_table(isolate);
}
-void Code::set_next_code_link(Object value) {
- code_data_container(kAcquireLoad).set_next_code_link(value);
-}
+Address Code::body_start() const { return InstructionStart(); }
-Address Code::raw_body_start() const { return raw_instruction_start(); }
+Address Code::body_end() const { return body_start() + body_size(); }
-Address Code::raw_body_end() const {
- return raw_body_start() + raw_body_size();
-}
+int Code::body_size() const { return instruction_size() + metadata_size(); }
-int Code::raw_body_size() const {
- return raw_instruction_size() + raw_metadata_size();
-}
+// TODO(jgruber): Remove instruction_size.
+int Code::InstructionSize() const { return instruction_size(); }
-int Code::InstructionSize() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapInstructionSize(*this, builtin_id())
- : raw_instruction_size();
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-int CodeDataContainer::InstructionSize() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapInstructionSize(*this, builtin_id())
- : code().raw_instruction_size();
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
-Address Code::raw_instruction_start() const {
+Address InstructionStream::instruction_start() const {
return field_address(kHeaderSize);
}
-Address Code::InstructionStart() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? i::OffHeapInstructionStart(*this, builtin_id())
- : raw_instruction_start();
-}
-
-Address Code::raw_instruction_end() const {
- return raw_instruction_start() + raw_instruction_size();
-}
-
Address Code::InstructionEnd() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? i::OffHeapInstructionEnd(*this, builtin_id())
- : raw_instruction_end();
+ return InstructionStart() + instruction_size();
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::InstructionEnd() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? i::OffHeapInstructionEnd(*this, builtin_id())
- : code().raw_instruction_end();
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
-Address Code::raw_metadata_start() const {
- return raw_instruction_start() + raw_instruction_size();
+Address Code::metadata_start() const {
+ return InstructionStart() + instruction_size();
}
Address Code::InstructionStart(Isolate* isolate, Address pc) const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapInstructionStart(isolate, pc)
- : raw_instruction_start();
+ return V8_LIKELY(has_instruction_stream())
+ ? code_entry_point()
+ : OffHeapInstructionStart(isolate, pc);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::InstructionStart(Isolate* isolate,
- Address pc) const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapInstructionStart(isolate, pc)
- : raw_instruction_start();
-}
-#endif
-
Address Code::InstructionEnd(Isolate* isolate, Address pc) const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapInstructionEnd(isolate, pc)
- : raw_instruction_end();
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::InstructionEnd(Isolate* isolate, Address pc) const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapInstructionEnd(isolate, pc)
- : code().raw_instruction_end();
+ return V8_LIKELY(has_instruction_stream())
+ ? InstructionEnd()
+ : OffHeapInstructionEnd(isolate, pc);
}
-#endif
int Code::GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const {
Address instruction_start = InstructionStart(isolate, pc);
@@ -699,106 +427,52 @@ int Code::GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const {
return static_cast<int>(offset);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-int CodeDataContainer::GetOffsetFromInstructionStart(Isolate* isolate,
- Address pc) const {
- Address instruction_start = InstructionStart(isolate, pc);
- Address offset = pc - instruction_start;
- DCHECK_LE(offset, InstructionSize());
- return static_cast<int>(offset);
+Address Code::metadata_end() const {
+ return metadata_start() + metadata_size();
}
-#endif
-Address Code::raw_metadata_end() const {
- return raw_metadata_start() + raw_metadata_size();
-}
-
-int Code::MetadataSize() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapMetadataSize(*this, builtin_id())
- : raw_metadata_size();
-}
-
-DEF_GETTER(Code, SizeIncludingMetadata, int) {
+int Code::SizeIncludingMetadata() const {
int size = CodeSize();
- size += relocation_info(cage_base).Size();
+ size += relocation_info().Size();
if (kind() != CodeKind::BASELINE) {
- size += deoptimization_data(cage_base).Size();
+ size += deoptimization_data().Size();
}
return size;
}
-Address Code::raw_safepoint_table_address() const {
- return raw_metadata_start() + safepoint_table_offset();
+Address Code::safepoint_table_address() const {
+ return metadata_start() + safepoint_table_offset();
}
Address Code::SafepointTableAddress() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapSafepointTableAddress(*this, builtin_id())
- : raw_safepoint_table_address();
+ return V8_LIKELY(has_instruction_stream()) ? safepoint_table_address()
+ : OffHeapSafepointTableAddress();
}
int Code::safepoint_table_size() const {
- DCHECK_GE(handler_table_offset() - safepoint_table_offset(), 0);
return handler_table_offset() - safepoint_table_offset();
}
bool Code::has_safepoint_table() const { return safepoint_table_size() > 0; }
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::SafepointTableAddress() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapSafepointTableAddress(*this, builtin_id())
- : code().raw_safepoint_table_address();
-}
-
-int CodeDataContainer::safepoint_table_size() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapSafepointTableSize(*this, builtin_id())
- : code().safepoint_table_size();
-}
-
-bool CodeDataContainer::has_safepoint_table() const {
- return safepoint_table_size() > 0;
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
-Address Code::raw_handler_table_address() const {
- return raw_metadata_start() + handler_table_offset();
+Address Code::handler_table_address() const {
+ return metadata_start() + handler_table_offset();
}
Address Code::HandlerTableAddress() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapHandlerTableAddress(*this, builtin_id())
- : raw_handler_table_address();
+ return V8_LIKELY(has_instruction_stream()) ? handler_table_address()
+ : OffHeapHandlerTableAddress();
}
int Code::handler_table_size() const {
- DCHECK_GE(constant_pool_offset() - handler_table_offset(), 0);
return constant_pool_offset() - handler_table_offset();
}
bool Code::has_handler_table() const { return handler_table_size() > 0; }
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::HandlerTableAddress() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapHandlerTableAddress(*this, builtin_id())
- : code().raw_handler_table_address();
-}
-
-int CodeDataContainer::handler_table_size() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapHandlerTableSize(*this, builtin_id())
- : code().handler_table_size();
-}
-
-bool CodeDataContainer::has_handler_table() const {
- return handler_table_size() > 0;
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
int Code::constant_pool_size() const {
+ if V8_UNLIKELY (!has_instruction_stream()) return OffHeapConstantPoolSize();
+
const int size = code_comments_offset() - constant_pool_offset();
if (!V8_EMBEDDED_CONSTANT_POOL_BOOL) {
DCHECK_EQ(size, 0);
@@ -810,76 +484,51 @@ int Code::constant_pool_size() const {
bool Code::has_constant_pool() const { return constant_pool_size() > 0; }
-#ifdef V8_EXTERNAL_CODE_SPACE
-int CodeDataContainer::constant_pool_size() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapConstantPoolSize(*this, builtin_id())
- : code().constant_pool_size();
+ByteArray Code::unchecked_relocation_info() const {
+ return ByteArray::unchecked_cast(
+ TaggedField<HeapObject, kRelocationInfoOffset>::load(*this));
}
-bool CodeDataContainer::has_constant_pool() const {
- return constant_pool_size() > 0;
+FixedArray Code::unchecked_deoptimization_data() const {
+ return FixedArray::unchecked_cast(
+ TaggedField<HeapObject, kDeoptimizationDataOrInterpreterDataOffset>::load(
+ *this));
}
-#endif
-ByteArray Code::unchecked_relocation_info() const {
+Code InstructionStream::unchecked_code() const {
PtrComprCageBase cage_base = main_cage_base(kRelaxedLoad);
- return ByteArray::unchecked_cast(
- TaggedField<HeapObject, kRelocationInfoOffset>::load(cage_base, *this));
+ return Code::unchecked_cast(
+ TaggedField<HeapObject, kCodeOffset>::Acquire_Load(cage_base, *this));
}
byte* Code::relocation_start() const {
- return unchecked_relocation_info().GetDataStartAddress();
+ return V8_LIKELY(has_instruction_stream())
+ ? relocation_info().GetDataStartAddress()
+ : nullptr;
}
byte* Code::relocation_end() const {
- return unchecked_relocation_info().GetDataEndAddress();
+ return V8_LIKELY(has_instruction_stream())
+ ? relocation_info().GetDataEndAddress()
+ : nullptr;
}
int Code::relocation_size() const {
- return unchecked_relocation_info().length();
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-byte* CodeDataContainer::relocation_start() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? nullptr // Off heap trampolines do not have reloc info.
- : code().relocation_start();
-}
-
-byte* CodeDataContainer::relocation_end() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? nullptr // Off heap trampolines do not have reloc info.
- : code().relocation_end();
-}
-
-int CodeDataContainer::relocation_size() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? 0 // Off heap trampolines do not have reloc info.
- : code().relocation_size();
+ return V8_LIKELY(has_instruction_stream()) ? relocation_info().length() : 0;
}
-#endif
-Address Code::entry() const { return raw_instruction_start(); }
+Address InstructionStream::entry() const { return instruction_start(); }
-bool Code::contains(Isolate* isolate, Address inner_pointer) {
- if (is_off_heap_trampoline() &&
- OffHeapBuiltinContains(isolate, inner_pointer)) {
- return true;
- }
+bool InstructionStream::contains(Isolate* isolate, Address inner_pointer) {
return (address() <= inner_pointer) &&
(inner_pointer < address() + CodeSize());
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-bool CodeDataContainer::contains(Isolate* isolate, Address inner_pointer) {
- if (is_off_heap_trampoline()) {
- if (OffHeapBuiltinContains(isolate, inner_pointer)) return true;
- if (V8_EXTERNAL_CODE_SPACE_BOOL) return false;
- }
- return code().contains(isolate, inner_pointer);
+bool Code::contains(Isolate* isolate, Address inner_pointer) {
+ return has_instruction_stream()
+ ? instruction_stream().contains(isolate, inner_pointer)
+ : OffHeapBuiltinContains(isolate, inner_pointer);
}
-#endif // V8_EXTERNAL_CODE_SPACE
// static
void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
@@ -889,9 +538,12 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
static_cast<size_t>(desc.reloc_size));
}
-int Code::CodeSize() const { return SizeFor(raw_body_size()); }
+int InstructionStream::CodeSize() const {
+ return SizeFor(Code::unchecked_cast(raw_code(kAcquireLoad)).body_size());
+}
+int Code::CodeSize() const { return InstructionStream::SizeFor(body_size()); }
-DEF_GETTER(Code, Size, int) { return CodeSize(); }
+DEF_GETTER(InstructionStream, Size, int) { return CodeSize(); }
CodeKind Code::kind() const {
static_assert(FIELD_SIZE(kFlagsOffset) == kInt32Size);
@@ -958,49 +610,12 @@ uintptr_t Code::GetBaselinePCForNextExecutedBytecode(int bytecode_offset,
bytecode_iterator.GetJumpTargetOffset(), bytecodes);
} else {
DCHECK(!interpreter::Bytecodes::IsJump(bytecode));
+ DCHECK(!interpreter::Bytecodes::IsSwitch(bytecode));
+ DCHECK(!interpreter::Bytecodes::Returns(bytecode));
return GetBaselineEndPCForBytecodeOffset(bytecode_offset, bytecodes);
}
}
-void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
- bool is_off_heap_trampoline) {
- CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
- DCHECK(!CodeKindIsInterpretedJSFunction(kind));
- uint32_t flags = KindField::encode(kind) |
- IsTurbofannedField::encode(is_turbofanned) |
- StackSlotsField::encode(stack_slots) |
- IsOffHeapTrampoline::encode(is_off_heap_trampoline);
- static_assert(FIELD_SIZE(kFlagsOffset) == kInt32Size);
- RELAXED_WRITE_UINT32_FIELD(*this, kFlagsOffset, flags);
- DCHECK_IMPLIES(stack_slots != 0, uses_safepoint_table());
- DCHECK_IMPLIES(!uses_safepoint_table(), stack_slots == 0);
-}
-
-inline bool Code::is_interpreter_trampoline_builtin() const {
- return IsInterpreterTrampolineBuiltin(builtin_id());
-}
-
-inline bool Code::is_baseline_trampoline_builtin() const {
- return IsBaselineTrampolineBuiltin(builtin_id());
-}
-
-inline bool Code::is_baseline_leave_frame_builtin() const {
- return builtin_id() == Builtin::kBaselineLeaveFrame;
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-// Note, must be in sync with Code::checks_tiering_state().
-inline bool CodeDataContainer::checks_tiering_state() const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- bool checks_state = (builtin_id() == Builtin::kCompileLazy ||
- builtin_id() == Builtin::kInterpreterEntryTrampoline ||
- CodeKindCanTierUp(kind()));
- return checks_state ||
- (CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
-// Note, must be in sync with CodeDataContainer::checks_tiering_state().
inline bool Code::checks_tiering_state() const {
bool checks_state = (builtin_id() == Builtin::kCompileLazy ||
builtin_id() == Builtin::kInterpreterEntryTrampoline ||
@@ -1023,134 +638,50 @@ inline bool Code::has_tagged_outgoing_params() const {
#endif
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-inline bool CodeDataContainer::has_tagged_outgoing_params() const {
-#if V8_ENABLE_WEBASSEMBLY
- return CodeKindHasTaggedOutgoingParams(kind()) &&
- builtin_id() != Builtin::kWasmCompileLazy;
-#else
- return CodeKindHasTaggedOutgoingParams(kind());
-#endif
-}
-#endif
-
inline bool Code::is_turbofanned() const {
const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
return IsTurbofannedField::decode(flags);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-inline bool CodeDataContainer::is_turbofanned() const {
- return IsTurbofannedField::decode(flags(kRelaxedLoad));
-}
-#endif
-
-bool Code::is_maglevved() const { return kind() == CodeKind::MAGLEV; }
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-inline bool CodeDataContainer::is_maglevved() const {
- return kind() == CodeKind::MAGLEV;
-}
-#endif
-
-inline bool CodeDataContainer::can_have_weak_objects() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind field is not available on CodeDataContainer when external code space
- // is not enabled.
- DCHECK(CodeKindIsOptimizedJSFunction(kind()));
-#endif
- int32_t flags = kind_specific_flags(kRelaxedLoad);
- return Code::CanHaveWeakObjectsField::decode(flags);
-}
-
-inline void CodeDataContainer::set_can_have_weak_objects(bool value) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind field is not available on CodeDataContainer when external code space
- // is not enabled.
- DCHECK(CodeKindIsOptimizedJSFunction(kind()));
-#endif
- int32_t previous = kind_specific_flags(kRelaxedLoad);
- int32_t updated = Code::CanHaveWeakObjectsField::update(previous, value);
- set_kind_specific_flags(updated, kRelaxedStore);
-}
+inline bool Code::is_maglevved() const { return kind() == CodeKind::MAGLEV; }
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- CodeDataContainer container = code_data_container(kAcquireLoad);
- return container.can_have_weak_objects();
+ int16_t flags = kind_specific_flags(kRelaxedLoad);
+ return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- CodeDataContainer container = code_data_container(kAcquireLoad);
- container.set_can_have_weak_objects(value);
-}
-
-inline bool CodeDataContainer::is_promise_rejection() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind field is not available on CodeDataContainer when external code space
- // is not enabled.
- DCHECK_EQ(kind(), CodeKind::BUILTIN);
-#endif
- int32_t flags = kind_specific_flags(kRelaxedLoad);
- return Code::IsPromiseRejectionField::decode(flags);
-}
-
-inline void CodeDataContainer::set_is_promise_rejection(bool value) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind field is not available on CodeDataContainer when external code space
- // is not enabled.
- DCHECK_EQ(kind(), CodeKind::BUILTIN);
-#endif
- int32_t previous = kind_specific_flags(kRelaxedLoad);
- int32_t updated = Code::IsPromiseRejectionField::update(previous, value);
+ int16_t previous = kind_specific_flags(kRelaxedLoad);
+ int16_t updated = CanHaveWeakObjectsField::update(previous, value);
set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_promise_rejection() const {
DCHECK_EQ(kind(), CodeKind::BUILTIN);
- CodeDataContainer container = code_data_container(kAcquireLoad);
- return container.is_promise_rejection();
+ int16_t flags = kind_specific_flags(kRelaxedLoad);
+ return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK_EQ(kind(), CodeKind::BUILTIN);
- CodeDataContainer container = code_data_container(kAcquireLoad);
- container.set_is_promise_rejection(value);
-}
-
-inline bool Code::is_off_heap_trampoline() const {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) return false;
- const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
- return IsOffHeapTrampoline::decode(flags);
+ int16_t previous = kind_specific_flags(kRelaxedLoad);
+ int16_t updated = IsPromiseRejectionField::update(previous, value);
+ set_kind_specific_flags(updated, kRelaxedStore);
}
-inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() const {
+inline HandlerTable::CatchPrediction
+InstructionStream::GetBuiltinCatchPrediction() const {
if (is_promise_rejection()) return HandlerTable::PROMISE;
return HandlerTable::UNCAUGHT;
}
-inline HandlerTable::CatchPrediction
-CodeDataContainer::GetBuiltinCatchPrediction() const {
+inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() const {
if (is_promise_rejection()) return HandlerTable::PROMISE;
return HandlerTable::UNCAUGHT;
}
-Builtin Code::builtin_id() const {
- int index = RELAXED_READ_INT_FIELD(*this, kBuiltinIndexOffset);
- DCHECK(index == static_cast<int>(Builtin::kNoBuiltinId) ||
- Builtins::IsBuiltinId(index));
- return static_cast<Builtin>(index);
-}
-
-void Code::set_builtin_id(Builtin builtin) {
- DCHECK(builtin == Builtin::kNoBuiltinId || Builtins::IsBuiltinId(builtin));
- RELAXED_WRITE_INT_FIELD(*this, kBuiltinIndexOffset,
- static_cast<int>(builtin));
-}
-
-bool Code::is_builtin() const { return builtin_id() != Builtin::kNoBuiltinId; }
-
unsigned Code::inlined_bytecode_size() const {
unsigned size = RELAXED_READ_UINT_FIELD(*this, kInlinedBytecodeSizeOffset);
DCHECK(CodeKindIsOptimizedJSFunction(kind()) || size == 0);
@@ -1174,12 +705,6 @@ bool Code::uses_safepoint_table() const {
return is_turbofanned() || is_maglevved() || is_wasm_code();
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-bool CodeDataContainer::uses_safepoint_table() const {
- return is_turbofanned() || is_maglevved() || is_wasm_code();
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
int Code::stack_slots() const {
const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
const int slots = StackSlotsField::decode(flags);
@@ -1187,73 +712,37 @@ int Code::stack_slots() const {
return slots;
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-int CodeDataContainer::stack_slots() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapStackSlots(*this, builtin_id())
- : code().stack_slots();
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
-bool CodeDataContainer::marked_for_deoptimization() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind field is not available on CodeDataContainer when external code space
- // is not enabled.
- DCHECK(CodeKindCanDeoptimize(kind()));
-#endif // V8_EXTERNAL_CODE_SPACE
- int32_t flags = kind_specific_flags(kRelaxedLoad);
- return Code::MarkedForDeoptimizationField::decode(flags);
-}
-
bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- return code_data_container(kAcquireLoad).marked_for_deoptimization();
+ int16_t flags = kind_specific_flags(kRelaxedLoad);
+ return MarkedForDeoptimizationField::decode(flags);
}
-void CodeDataContainer::set_marked_for_deoptimization(bool flag) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- // kind field is not available on CodeDataContainer when external code space
- // is not enabled.
+void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
-#endif // V8_EXTERNAL_CODE_SPACE
- DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = kind_specific_flags(kRelaxedLoad);
- int32_t updated = Code::MarkedForDeoptimizationField::update(previous, flag);
+ DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(
+ GetIsolateFromWritableObject(*this)));
+ int16_t previous = kind_specific_flags(kRelaxedLoad);
+ int16_t updated = MarkedForDeoptimizationField::update(previous, flag);
set_kind_specific_flags(updated, kRelaxedStore);
}
-void Code::set_marked_for_deoptimization(bool flag) {
- code_data_container(kAcquireLoad).set_marked_for_deoptimization(flag);
-}
-
bool Code::embedded_objects_cleared() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags =
- code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
- return EmbeddedObjectsClearedField::decode(flags);
+ int16_t flags = kind_specific_flags(kRelaxedLoad);
+ return Code::EmbeddedObjectsClearedField::decode(flags);
}
void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
DCHECK_IMPLIES(flag, marked_for_deoptimization());
- CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags(kRelaxedLoad);
- int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- container.set_kind_specific_flags(updated, kRelaxedStore);
-}
-
-bool Code::is_optimized_code() const {
- return CodeKindIsOptimizedJSFunction(kind());
+ int16_t previous = kind_specific_flags(kRelaxedLoad);
+ int16_t updated = Code::EmbeddedObjectsClearedField::update(previous, flag);
+ set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::is_wasm_code() const { return kind() == CodeKind::WASM_FUNCTION; }
-#ifdef V8_EXTERNAL_CODE_SPACE
-bool CodeDataContainer::is_wasm_code() const {
- return kind() == CodeKind::WASM_FUNCTION;
-}
-#endif
-
int Code::constant_pool_offset() const {
if (!V8_EMBEDDED_CONSTANT_POOL_BOOL) {
// Redirection needed since the field doesn't exist in this case.
@@ -1267,117 +756,43 @@ void Code::set_constant_pool_offset(int value) {
// Redirection needed since the field doesn't exist in this case.
return;
}
- DCHECK_LE(value, MetadataSize());
+ DCHECK_LE(value, metadata_size());
WriteField<int>(kConstantPoolOffsetOffset, value);
}
-Address Code::raw_constant_pool() const {
- if (!has_constant_pool()) return kNullAddress;
- return raw_metadata_start() + constant_pool_offset();
-}
-
Address Code::constant_pool() const {
if (!has_constant_pool()) return kNullAddress;
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapConstantPoolAddress(*this, builtin_id())
- : raw_constant_pool();
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::constant_pool() const {
- if (!has_constant_pool()) return kNullAddress;
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapConstantPoolAddress(*this, builtin_id())
- : code().raw_constant_pool();
-}
-#endif
-
-Address Code::raw_code_comments() const {
- return raw_metadata_start() + code_comments_offset();
+ return V8_LIKELY(has_instruction_stream())
+ ? metadata_start() + constant_pool_offset()
+ : OffHeapConstantPoolAddress();
}
Address Code::code_comments() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapCodeCommentsAddress(*this, builtin_id())
- : raw_code_comments();
+ return V8_LIKELY(has_instruction_stream())
+ ? metadata_start() + code_comments_offset()
+ : OffHeapCodeCommentsAddress();
}
int Code::code_comments_size() const {
- DCHECK_GE(unwinding_info_offset() - code_comments_offset(), 0);
return unwinding_info_offset() - code_comments_offset();
}
bool Code::has_code_comments() const { return code_comments_size() > 0; }
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::code_comments() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapCodeCommentsAddress(*this, builtin_id())
- : code().code_comments();
-}
-
-int CodeDataContainer::code_comments_size() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapCodeCommentsSize(*this, builtin_id())
- : code().code_comments_size();
-}
-
-bool CodeDataContainer::has_code_comments() const {
- return code_comments_size() > 0;
-}
-#endif
-
-Address Code::raw_unwinding_info_start() const {
- return raw_metadata_start() + unwinding_info_offset();
-}
-
Address Code::unwinding_info_start() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapUnwindingInfoAddress(*this, builtin_id())
- : raw_unwinding_info_start();
+ return metadata_start() + unwinding_info_offset();
}
-Address Code::unwinding_info_end() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapMetadataEnd(*this, builtin_id())
- : raw_metadata_end();
-}
+Address Code::unwinding_info_end() const { return metadata_end(); }
int Code::unwinding_info_size() const {
- DCHECK_GE(unwinding_info_end(), unwinding_info_start());
return static_cast<int>(unwinding_info_end() - unwinding_info_start());
}
bool Code::has_unwinding_info() const { return unwinding_info_size() > 0; }
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::unwinding_info_start() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapUnwindingInfoAddress(*this, builtin_id())
- : code().raw_unwinding_info_start();
-}
-
-Address CodeDataContainer::unwinding_info_end() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapMetadataEnd(*this, builtin_id())
- : code().raw_metadata_end();
-}
-
-int CodeDataContainer::unwinding_info_size() const {
- return V8_UNLIKELY(is_off_heap_trampoline())
- ? OffHeapUnwindingInfoSize(*this, builtin_id())
- : code().unwinding_info_size();
-
- DCHECK_GE(unwinding_info_end(), unwinding_info_start());
- return static_cast<int>(unwinding_info_end() - unwinding_info_start());
-}
-
-bool CodeDataContainer::has_unwinding_info() const {
- return unwinding_info_size() > 0;
-}
-#endif
-
-Code Code::GetCodeFromTargetAddress(Address address) {
+// static
+InstructionStream InstructionStream::FromTargetAddress(Address address) {
{
// TODO(jgruber,v8:6666): Support embedded builtins here. We'd need to pass
// in the current isolate.
@@ -1387,18 +802,27 @@ Code Code::GetCodeFromTargetAddress(Address address) {
CHECK(address < start || address >= end);
}
- HeapObject code = HeapObject::FromAddress(address - Code::kHeaderSize);
- // Unchecked cast because we can't rely on the map currently
- // not being a forwarding pointer.
- return Code::unchecked_cast(code);
+ HeapObject code =
+ HeapObject::FromAddress(address - InstructionStream::kHeaderSize);
+ // Unchecked cast because we can't rely on the map currently not being a
+ // forwarding pointer.
+ return InstructionStream::unchecked_cast(code);
+}
+
+// static
+Code Code::FromTargetAddress(Address address) {
+ return InstructionStream::FromTargetAddress(address).code(kAcquireLoad);
}
-Code Code::GetObjectFromEntryAddress(Address location_of_address) {
+// static
+InstructionStream InstructionStream::FromEntryAddress(
+ Address location_of_address) {
Address code_entry = base::Memory<Address>(location_of_address);
- HeapObject code = HeapObject::FromAddress(code_entry - Code::kHeaderSize);
- // Unchecked cast because we can't rely on the map currently
- // not being a forwarding pointer.
- return Code::unchecked_cast(code);
+ HeapObject code =
+ HeapObject::FromAddress(code_entry - InstructionStream::kHeaderSize);
+ // Unchecked cast because we can't rely on the map currently not being a
+ // forwarding pointer.
+ return InstructionStream::unchecked_cast(code);
}
bool Code::CanContainWeakObjects() {
@@ -1410,14 +834,13 @@ bool Code::IsWeakObject(HeapObject object) {
}
bool Code::IsWeakObjectInOptimizedCode(HeapObject object) {
- Map map = object.map(kAcquireLoad);
- InstanceType instance_type = map.instance_type();
- if (InstanceTypeChecker::IsMap(instance_type)) {
+ Map map_object = object.map(kAcquireLoad);
+ if (InstanceTypeChecker::IsMap(map_object)) {
return Map::cast(object).CanTransition();
}
- return InstanceTypeChecker::IsPropertyCell(instance_type) ||
- InstanceTypeChecker::IsJSReceiver(instance_type) ||
- InstanceTypeChecker::IsContext(instance_type);
+ return InstanceTypeChecker::IsPropertyCell(map_object) ||
+ InstanceTypeChecker::IsJSReceiver(map_object) ||
+ InstanceTypeChecker::IsContext(map_object);
}
bool Code::IsWeakObjectInDeoptimizationLiteralArray(Object object) {
@@ -1429,143 +852,144 @@ bool Code::IsWeakObjectInDeoptimizationLiteralArray(Object object) {
Code::IsWeakObjectInOptimizedCode(HeapObject::cast(object));
}
-bool Code::IsExecutable() {
- return !Builtins::IsBuiltinId(builtin_id()) || !is_off_heap_trampoline() ||
- Builtins::CodeObjectIsExecutable(builtin_id());
+void Code::IterateDeoptimizationLiterals(RootVisitor* v) {
+ if (kind() == CodeKind::BASELINE) return;
+
+ auto deopt_data = DeoptimizationData::cast(deoptimization_data());
+ if (deopt_data.length() == 0) return;
+
+ DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
+ const int literals_length = literals.length();
+ for (int i = 0; i < literals_length; ++i) {
+ MaybeObject maybe_literal = literals.Get(i);
+ HeapObject heap_literal;
+ if (maybe_literal.GetHeapObject(&heap_literal)) {
+ v->VisitRootPointer(Root::kStackRoots, "deoptimization literal",
+ FullObjectSlot(&heap_literal));
+ }
+ }
}
// This field has to have relaxed atomic accessors because it is accessed in the
// concurrent marker.
-static_assert(FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) ==
- kInt32Size);
-RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
- kKindSpecificFlagsOffset)
+static_assert(FIELD_SIZE(Code::kKindSpecificFlagsOffset) == kInt16Size);
+RELAXED_UINT16_ACCESSORS(Code, kind_specific_flags, kKindSpecificFlagsOffset)
-Object CodeDataContainer::raw_code() const {
+Object Code::raw_instruction_stream() const {
PtrComprCageBase cage_base = code_cage_base();
- return CodeDataContainer::raw_code(cage_base);
+ return Code::raw_instruction_stream(cage_base);
}
-Object CodeDataContainer::raw_code(PtrComprCageBase cage_base) const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- Object value = ExternalCodeField::load(cage_base, *this);
- return value;
-#else
- UNREACHABLE();
-#endif // V8_EXTERNAL_CODE_SPACE
+Object Code::raw_instruction_stream(PtrComprCageBase cage_base) const {
+ return ExternalCodeField<Object>::load(cage_base, *this);
}
-void CodeDataContainer::set_raw_code(Object value, WriteBarrierMode mode) {
-#ifdef V8_EXTERNAL_CODE_SPACE
- ExternalCodeField::Release_Store(*this, value);
- CONDITIONAL_WRITE_BARRIER(*this, kCodeOffset, value, mode);
-#else
- UNREACHABLE();
-#endif // V8_EXTERNAL_CODE_SPACE
+void Code::set_raw_instruction_stream(Object value, WriteBarrierMode mode) {
+ ExternalCodeField<Object>::Release_Store(*this, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kInstructionStreamOffset, value, mode);
}
-Object CodeDataContainer::raw_code(RelaxedLoadTag tag) const {
- PtrComprCageBase cage_base = code_cage_base();
- return CodeDataContainer::raw_code(cage_base, tag);
+bool Code::has_instruction_stream() const {
+ const uint32_t value = ReadField<uint32_t>(kInstructionStreamOffset);
+ SLOW_DCHECK(value == 0 || !InReadOnlySpace());
+ return value != 0;
}
-Object CodeDataContainer::raw_code(PtrComprCageBase cage_base,
- RelaxedLoadTag) const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- Object value = ExternalCodeField::Relaxed_Load(cage_base, *this);
- return value;
-#else
- UNREACHABLE();
-#endif // V8_EXTERNAL_CODE_SPACE
+bool Code::has_instruction_stream(RelaxedLoadTag tag) const {
+ const uint32_t value =
+ RELAXED_READ_INT32_FIELD(*this, kInstructionStreamOffset);
+ SLOW_DCHECK(value == 0 || !InReadOnlySpace());
+ return value != 0;
}
-ACCESSORS(CodeDataContainer, next_code_link, Object, kNextCodeLinkOffset)
-
-PtrComprCageBase CodeDataContainer::code_cage_base() const {
+PtrComprCageBase Code::code_cage_base() const {
#ifdef V8_EXTERNAL_CODE_SPACE
+ // Only available if the current Code object is not in RO space (otherwise we
+ // can't grab the current Isolate from it).
+ DCHECK(!InReadOnlySpace());
Isolate* isolate = GetIsolateFromWritableObject(*this);
return PtrComprCageBase(isolate->code_cage_base());
-#else
+#else // V8_EXTERNAL_CODE_SPACE
+ // Without external code space: `code_cage_base == main_cage_base`. We can
+ // get the main cage base from any heap object, including objects in RO
+ // space.
return GetPtrComprCageBase(*this);
-#endif
+#endif // V8_EXTERNAL_CODE_SPACE
}
-Code CodeDataContainer::code() const {
+InstructionStream Code::instruction_stream() const {
PtrComprCageBase cage_base = code_cage_base();
- return CodeDataContainer::code(cage_base);
+ return Code::instruction_stream(cage_base);
}
-Code CodeDataContainer::code(PtrComprCageBase cage_base) const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK(!is_off_heap_trampoline());
-#endif
- return Code::cast(raw_code(cage_base));
+InstructionStream Code::instruction_stream(PtrComprCageBase cage_base) const {
+ DCHECK(has_instruction_stream());
+ return ExternalCodeField<InstructionStream>::load(cage_base, *this);
}
-Code CodeDataContainer::code(RelaxedLoadTag tag) const {
+InstructionStream Code::instruction_stream(RelaxedLoadTag tag) const {
PtrComprCageBase cage_base = code_cage_base();
- return CodeDataContainer::code(cage_base, tag);
+ return Code::instruction_stream(cage_base, tag);
+}
+
+InstructionStream Code::instruction_stream(PtrComprCageBase cage_base,
+ RelaxedLoadTag tag) const {
+ DCHECK(has_instruction_stream());
+ return ExternalCodeField<InstructionStream>::Relaxed_Load(cage_base, *this);
}
-Code CodeDataContainer::code(PtrComprCageBase cage_base,
- RelaxedLoadTag tag) const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- return Code::cast(raw_code(cage_base, tag));
+Object Code::raw_instruction_stream(RelaxedLoadTag tag) const {
+ PtrComprCageBase cage_base = code_cage_base();
+ return Code::raw_instruction_stream(cage_base, tag);
}
-DEF_GETTER(CodeDataContainer, code_entry_point, Address) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+Object Code::raw_instruction_stream(PtrComprCageBase cage_base,
+ RelaxedLoadTag tag) const {
+ return ExternalCodeField<Object>::Relaxed_Load(cage_base, *this);
+}
+
+DEF_GETTER(Code, code_entry_point, Address) {
return ReadField<Address>(kCodeEntryPointOffset);
}
-void CodeDataContainer::init_code_entry_point(Isolate* isolate, Address value) {
+void Code::init_code_entry_point(Isolate* isolate, Address value) {
set_code_entry_point(isolate, value);
}
-void CodeDataContainer::set_code_entry_point(Isolate* isolate, Address value) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+void Code::set_code_entry_point(Isolate* isolate, Address value) {
WriteField<Address>(kCodeEntryPointOffset, value);
}
-void CodeDataContainer::SetCodeAndEntryPoint(Isolate* isolate_for_sandbox,
- Code code, WriteBarrierMode mode) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- set_raw_code(code, mode);
- set_code_entry_point(isolate_for_sandbox, code.InstructionStart());
+void Code::SetInstructionStreamAndEntryPoint(Isolate* isolate_for_sandbox,
+ InstructionStream code,
+ WriteBarrierMode mode) {
+ set_raw_instruction_stream(code, mode);
+ set_code_entry_point(isolate_for_sandbox, code.instruction_start());
}
-void CodeDataContainer::SetEntryPointForOffHeapBuiltin(
- Isolate* isolate_for_sandbox, Address entry) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK(is_off_heap_trampoline());
-#endif
+void Code::SetEntryPointForOffHeapBuiltin(Isolate* isolate_for_sandbox,
+ Address entry) {
+ DCHECK(!has_instruction_stream());
set_code_entry_point(isolate_for_sandbox, entry);
}
-void CodeDataContainer::UpdateCodeEntryPoint(Isolate* isolate_for_sandbox,
- Code code) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- DCHECK_EQ(raw_code(), code);
- set_code_entry_point(isolate_for_sandbox, code.InstructionStart());
-}
-
-Address CodeDataContainer::InstructionStart() const {
- return code_entry_point();
+void Code::SetCodeEntryPointForSerialization(Isolate* isolate, Address entry) {
+ set_code_entry_point(isolate, entry);
}
-Address CodeDataContainer::raw_instruction_start() const {
- return code_entry_point();
+void Code::UpdateCodeEntryPoint(Isolate* isolate_for_sandbox,
+ InstructionStream code) {
+ DCHECK_EQ(raw_instruction_stream(), code);
+ set_code_entry_point(isolate_for_sandbox, code.instruction_start());
}
-Address CodeDataContainer::entry() const { return code_entry_point(); }
+Address Code::InstructionStart() const { return code_entry_point(); }
-void CodeDataContainer::clear_padding() {
+void Code::clear_padding() {
memset(reinterpret_cast<void*>(address() + kUnalignedSize), 0,
kSize - kUnalignedSize);
}
-RELAXED_UINT16_ACCESSORS(CodeDataContainer, flags, kFlagsOffset)
+RELAXED_UINT16_ACCESSORS(Code, flags, kFlagsOffset)
// Ensure builtin_id field fits into int16_t, so that we can rely on sign
// extension to convert int16_t{-1} to kNoBuiltinId.
@@ -1573,26 +997,22 @@ RELAXED_UINT16_ACCESSORS(CodeDataContainer, flags, kFlagsOffset)
static_assert(static_cast<int>(Builtin::kNoBuiltinId) == -1);
static_assert(Builtins::kBuiltinCount < std::numeric_limits<int16_t>::max());
-void CodeDataContainer::initialize_flags(CodeKind kind, Builtin builtin_id,
- bool is_turbofanned,
- bool is_off_heap_trampoline) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- uint16_t value = KindField::encode(kind) |
+void Code::initialize_flags(CodeKind kind, Builtin builtin_id,
+ bool is_turbofanned, int stack_slots) {
+ CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
+ DCHECK(!CodeKindIsInterpretedJSFunction(kind));
+ uint32_t value = KindField::encode(kind) |
IsTurbofannedField::encode(is_turbofanned) |
- IsOffHeapTrampoline::encode(is_off_heap_trampoline);
- set_flags(value, kRelaxedStore);
+ StackSlotsField::encode(stack_slots);
+ static_assert(FIELD_SIZE(kFlagsOffset) == kInt32Size);
+ RELAXED_WRITE_UINT32_FIELD(*this, kFlagsOffset, value);
+ DCHECK_IMPLIES(stack_slots != 0, uses_safepoint_table());
+ DCHECK_IMPLIES(!uses_safepoint_table(), stack_slots == 0);
WriteField<int16_t>(kBuiltinIdOffset, static_cast<int16_t>(builtin_id));
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-
-CodeKind CodeDataContainer::kind() const {
- return KindField::decode(flags(kRelaxedLoad));
-}
-
-Builtin CodeDataContainer::builtin_id() const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+Builtin Code::builtin_id() const {
// Rely on sign-extension when converting int16_t to int to preserve
// kNoBuiltinId value.
static_assert(static_cast<int>(static_cast<int16_t>(Builtin::kNoBuiltinId)) ==
@@ -1601,64 +1021,24 @@ Builtin CodeDataContainer::builtin_id() const {
return static_cast<Builtin>(value);
}
-bool CodeDataContainer::is_builtin() const {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- return builtin_id() != Builtin::kNoBuiltinId;
-}
-
-bool CodeDataContainer::is_off_heap_trampoline() const {
- return IsOffHeapTrampoline::decode(flags(kRelaxedLoad));
-}
-
-void CodeDataContainer::set_is_off_heap_trampoline_for_hash(bool value) {
- uint16_t flags_value = flags(kRelaxedLoad);
- flags_value = IsOffHeapTrampoline::update(flags_value, value);
- set_flags(flags_value, kRelaxedStore);
-}
+bool Code::is_builtin() const { return builtin_id() != Builtin::kNoBuiltinId; }
-bool CodeDataContainer::is_optimized_code() const {
+bool Code::is_optimized_code() const {
return CodeKindIsOptimizedJSFunction(kind());
}
-inline bool CodeDataContainer::is_interpreter_trampoline_builtin() const {
+inline bool Code::is_interpreter_trampoline_builtin() const {
return IsInterpreterTrampolineBuiltin(builtin_id());
}
-inline bool CodeDataContainer::is_baseline_trampoline_builtin() const {
+inline bool Code::is_baseline_trampoline_builtin() const {
return IsBaselineTrampolineBuiltin(builtin_id());
}
-inline bool CodeDataContainer::is_baseline_leave_frame_builtin() const {
+inline bool Code::is_baseline_leave_frame_builtin() const {
return builtin_id() == Builtin::kBaselineLeaveFrame;
}
-//
-// A collection of getters and predicates that forward queries to associated
-// Code object.
-//
-
-#define DEF_PRIMITIVE_FORWARDING_CDC_GETTER(name, type) \
- type CodeDataContainer::name() const { return FromCodeT(*this).name(); }
-
-#define DEF_FORWARDING_CDC_GETTER(name, type, result_if_off_heap) \
- DEF_GETTER(CodeDataContainer, name, type) { \
- if (is_off_heap_trampoline()) { \
- return GetReadOnlyRoots().result_if_off_heap(); \
- } \
- return FromCodeT(*this).name(cage_base); \
- }
-
-DEF_FORWARDING_CDC_GETTER(deoptimization_data, FixedArray, empty_fixed_array)
-DEF_FORWARDING_CDC_GETTER(bytecode_or_interpreter_data, HeapObject,
- empty_fixed_array)
-DEF_FORWARDING_CDC_GETTER(source_position_table, ByteArray, empty_byte_array)
-DEF_FORWARDING_CDC_GETTER(bytecode_offset_table, ByteArray, empty_byte_array)
-
-#undef DEF_PRIMITIVE_FORWARDING_CDC_GETTER
-#undef DEF_FORWARDING_CDC_GETTER
-
-#endif // V8_EXTERNAL_CODE_SPACE
-
byte BytecodeArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
return ReadField<byte>(kHeaderSize + index * kCharSize);
@@ -1764,15 +1144,50 @@ DEF_GETTER(BytecodeArray, SourcePositionTable, ByteArray) {
return roots.empty_byte_array();
}
+DEF_GETTER(BytecodeArray, raw_constant_pool, Object) {
+ Object value =
+ TaggedField<Object>::load(cage_base, *this, kConstantPoolOffset);
+ // This field might be 0 during deserialization.
+ DCHECK(value == Smi::zero() || value.IsFixedArray());
+ return value;
+}
+
+DEF_GETTER(BytecodeArray, raw_handler_table, Object) {
+ Object value =
+ TaggedField<Object>::load(cage_base, *this, kHandlerTableOffset);
+ // This field might be 0 during deserialization.
+ DCHECK(value == Smi::zero() || value.IsByteArray());
+ return value;
+}
+
+DEF_GETTER(BytecodeArray, raw_source_position_table, Object) {
+ Object value =
+ TaggedField<Object>::load(cage_base, *this, kSourcePositionTableOffset);
+ // This field might be 0 during deserialization.
+ DCHECK(value == Smi::zero() || value.IsByteArray() || value.IsUndefined() ||
+ value.IsException());
+ return value;
+}
+
int BytecodeArray::BytecodeArraySize() const { return SizeFor(this->length()); }
DEF_GETTER(BytecodeArray, SizeIncludingMetadata, int) {
int size = BytecodeArraySize();
- size += constant_pool(cage_base).Size(cage_base);
- size += handler_table(cage_base).Size();
- ByteArray table = SourcePositionTable(cage_base);
- if (table.length() != 0) {
- size += table.Size();
+ Object maybe_constant_pool = raw_constant_pool(cage_base);
+ if (maybe_constant_pool.IsFixedArray()) {
+ size += FixedArray::cast(maybe_constant_pool).Size(cage_base);
+ } else {
+ DCHECK_EQ(maybe_constant_pool, Smi::zero());
+ }
+ Object maybe_handler_table = raw_handler_table(cage_base);
+ if (maybe_handler_table.IsByteArray()) {
+ size += ByteArray::cast(maybe_handler_table).Size();
+ } else {
+ DCHECK_EQ(maybe_handler_table, Smi::zero());
+ }
+ Object maybe_table = raw_source_position_table(cage_base);
+ if (maybe_table.IsByteArray()) {
+ size += ByteArray::cast(maybe_table).Size();
}
return size;
}
@@ -1825,10 +1240,10 @@ inline Object DeoptimizationLiteralArray::get(PtrComprCageBase cage_base,
// weakly-held deoptimization literals are basically local variables that
// TurboFan has decided not to keep on the stack. Thus, if the deoptimization
// literal goes away, then whatever code needed it should be unreachable. The
- // exception is currently running Code: in that case, the deoptimization
- // literals array might be the only thing keeping the target object alive.
- // Thus, when a Code is running, we strongly mark all of its deoptimization
- // literals.
+ // exception is currently running InstructionStream: in that case, the
+ // deoptimization literals array might be the only thing keeping the target
+ // object alive. Thus, when a InstructionStream is running, we strongly mark
+ // all of its deoptimization literals.
CHECK(!maybe.IsCleared());
return maybe.GetHeapObjectOrSmi();
@@ -1853,11 +1268,11 @@ void DependentCode::DeoptimizeDependencyGroups(Isolate* isolate, ObjectT object,
// static
template <typename ObjectT>
-bool DependentCode::MarkCodeForDeoptimization(ObjectT object,
+bool DependentCode::MarkCodeForDeoptimization(Isolate* isolate, ObjectT object,
DependencyGroups groups) {
// Shared objects are designed to never invalidate code.
DCHECK(!object.InSharedHeap());
- return object.dependent_code().MarkCodeForDeoptimization(groups);
+ return object.dependent_code().MarkCodeForDeoptimization(isolate, groups);
}
} // namespace internal
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 46d9edb471..1ca28beb30 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -6,6 +6,7 @@
#include <iomanip>
+#include "src/base/v8-fallthrough.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/reloc-info.h"
@@ -30,6 +31,10 @@
#include "src/diagnostics/eh-frame.h"
#endif
+#ifdef V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -49,7 +54,7 @@ inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(
// copy of the re-embedded builtins in the shared CodeRange, so use that if
// it's present.
if (v8_flags.jitless) return EmbeddedData::FromBlob();
- CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange();
return (code_range && code_range->embedded_blob_code_copy() != nullptr)
? EmbeddedData::FromBlob(code_range)
: EmbeddedData::FromBlob();
@@ -62,91 +67,108 @@ inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(
} // namespace
-Address OffHeapInstructionStart(HeapObject code, Builtin builtin) {
+Address Code::OffHeapInstructionStart() const {
// TODO(11527): Here and below: pass Isolate as an argument for getting
// the EmbeddedData.
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin);
}
-Address OffHeapInstructionEnd(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapInstructionEnd() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin) +
d.InstructionSizeOfBuiltin(builtin);
}
-int OffHeapInstructionSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapInstructionSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionSizeOfBuiltin(builtin);
}
-Address OffHeapMetadataStart(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapMetadataStart() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.MetadataStartOfBuiltin(builtin);
}
-Address OffHeapMetadataEnd(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapMetadataEnd() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.MetadataStartOfBuiltin(builtin) + d.MetadataSizeOfBuiltin(builtin);
}
-int OffHeapMetadataSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapMetadataSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.MetadataSizeOfBuiltin(builtin);
}
-Address OffHeapSafepointTableAddress(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapSafepointTableAddress() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.SafepointTableStartOf(builtin);
}
-int OffHeapSafepointTableSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapSafepointTableSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.SafepointTableSizeOf(builtin);
}
-Address OffHeapHandlerTableAddress(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapHandlerTableAddress() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.HandlerTableStartOf(builtin);
}
-int OffHeapHandlerTableSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapHandlerTableSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.HandlerTableSizeOf(builtin);
}
-Address OffHeapConstantPoolAddress(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapConstantPoolAddress() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.ConstantPoolStartOf(builtin);
}
-int OffHeapConstantPoolSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapConstantPoolSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.ConstantPoolSizeOf(builtin);
}
-Address OffHeapCodeCommentsAddress(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapCodeCommentsAddress() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.CodeCommentsStartOf(builtin);
}
-int OffHeapCodeCommentsSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapCodeCommentsSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.CodeCommentsSizeOf(builtin);
}
-Address OffHeapUnwindingInfoAddress(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+Address Code::OffHeapUnwindingInfoAddress() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.UnwindingInfoStartOf(builtin);
}
-int OffHeapUnwindingInfoSize(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapUnwindingInfoSize() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.UnwindingInfoSizeOf(builtin);
}
-int OffHeapStackSlots(HeapObject code, Builtin builtin) {
- EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
+int Code::OffHeapStackSlots() const {
+ Builtin builtin = builtin_id();
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.StackSlotsOf(builtin);
}
@@ -160,25 +182,31 @@ void Code::ClearEmbeddedObjects(Heap* heap) {
set_embedded_objects_cleared(true);
}
-void Code::Relocate(intptr_t delta) {
- for (RelocIterator it(*this, RelocInfo::kApplyMask); !it.done(); it.next()) {
+void InstructionStream::Relocate(intptr_t delta) {
+ Code code = unchecked_code();
+ // This is called during evacuation and code.instruction_stream() will point
+ // to the old object. So pass *this directly to the RelocIterator and use a
+ // dummy Code() since it's not needed.
+ for (RelocIterator it(Code(), *this, code.unchecked_relocation_info(),
+ code.constant_pool(), RelocInfo::kApplyMask);
+ !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
- FlushICache();
+ FlushInstructionCache(instruction_start(), code.instruction_size());
}
void Code::FlushICache() const {
- FlushInstructionCache(raw_instruction_start(), raw_instruction_size());
+ FlushInstructionCache(InstructionStart(), instruction_size());
}
void Code::CopyFromNoFlush(ByteArray reloc_info, Heap* heap,
const CodeDesc& desc) {
// Copy code.
- static_assert(kOnHeapBodyIsContiguous);
- CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
+ CopyBytes(reinterpret_cast<byte*>(InstructionStart()), desc.buffer,
static_cast<size_t>(desc.instr_size));
// TODO(jgruber,v8:11036): Merge with the above.
- CopyBytes(reinterpret_cast<byte*>(raw_instruction_start() + desc.instr_size),
+ CopyBytes(reinterpret_cast<byte*>(InstructionStart() + desc.instr_size),
desc.unwinding_info, static_cast<size_t>(desc.unwinding_info_size));
// Copy reloc info.
@@ -203,9 +231,9 @@ void Code::RelocateFromDesc(ByteArray reloc_info, Heap* heap,
// Rewrite code handles to direct pointers to the first instruction in the
// code object.
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
- DCHECK(p->IsCodeT(GetPtrComprCageBaseSlow(*p)));
- Code code = FromCodeT(CodeT::cast(*p));
- it.rinfo()->set_target_address(code.raw_instruction_start(),
+ DCHECK(p->IsCode(GetPtrComprCageBaseSlow(*p)));
+ InstructionStream code = FromCode(Code::cast(*p));
+ it.rinfo()->set_target_address(code.instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsNearBuiltinEntry(mode)) {
// Rewrite builtin IDs to PC-relative offset to the builtin entry point.
@@ -215,9 +243,23 @@ void Code::RelocateFromDesc(ByteArray reloc_info, Heap* heap,
it.rinfo()->set_target_address(p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
DCHECK_EQ(p, it.rinfo()->target_address());
+ } else if (RelocInfo::IsWasmStubCall(mode)) {
+#if V8_ENABLE_WEBASSEMBLY
+ // Map wasm stub id to builtin.
+ uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
+ DCHECK_LT(stub_call_tag, wasm::WasmCode::kRuntimeStubCount);
+ Builtin builtin = wasm::RuntimeStubIdToBuiltinName(
+ static_cast<wasm::WasmCode::RuntimeStubId>(stub_call_tag));
+ // Store the builtin address in relocation info.
+ Address entry =
+ heap->isolate()->builtin_entry_table()[Builtins::ToInt(builtin)];
+ it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
+#else
+ UNREACHABLE();
+#endif
} else {
intptr_t delta =
- raw_instruction_start() - reinterpret_cast<Address>(desc.buffer);
+ InstructionStart() - reinterpret_cast<Address>(desc.buffer);
it.rinfo()->apply(delta);
}
}
@@ -229,15 +271,6 @@ SafepointEntry Code::GetSafepointEntry(Isolate* isolate, Address pc) {
return table.FindEntry(pc);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-SafepointEntry CodeDataContainer::GetSafepointEntry(Isolate* isolate,
- Address pc) {
- DCHECK(!is_maglevved());
- SafepointTable table(isolate, pc, *this);
- return table.FindEntry(pc);
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
MaglevSafepointEntry Code::GetMaglevSafepointEntry(Isolate* isolate,
Address pc) {
DCHECK(is_maglevved());
@@ -245,60 +278,24 @@ MaglevSafepointEntry Code::GetMaglevSafepointEntry(Isolate* isolate,
return table.FindEntry(pc);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-MaglevSafepointEntry CodeDataContainer::GetMaglevSafepointEntry(
- Isolate* isolate, Address pc) {
- DCHECK(is_maglevved());
- MaglevSafepointTable table(isolate, pc, *this);
- return table.FindEntry(pc);
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
Address Code::OffHeapInstructionStart(Isolate* isolate, Address pc) const {
- DCHECK(is_off_heap_trampoline());
+ DCHECK(!has_instruction_stream());
EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
return d.InstructionStartOfBuiltin(builtin_id());
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::OffHeapInstructionStart(Isolate* isolate,
- Address pc) const {
- DCHECK(is_off_heap_trampoline());
- EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
- return d.InstructionStartOfBuiltin(builtin_id());
-}
-#endif
-
Address Code::OffHeapInstructionEnd(Isolate* isolate, Address pc) const {
- DCHECK(is_off_heap_trampoline());
+ DCHECK(!has_instruction_stream());
EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
return d.InstructionEndOf(builtin_id());
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-Address CodeDataContainer::OffHeapInstructionEnd(Isolate* isolate,
- Address pc) const {
- DCHECK(is_off_heap_trampoline());
- EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
- return d.InstructionEndOf(builtin_id());
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
bool Code::OffHeapBuiltinContains(Isolate* isolate, Address pc) const {
- DCHECK(is_off_heap_trampoline());
+ DCHECK(!has_instruction_stream());
EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
return d.BuiltinContains(builtin_id(), pc);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-bool CodeDataContainer::OffHeapBuiltinContains(Isolate* isolate,
- Address pc) const {
- DCHECK(is_off_heap_trampoline());
- EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
- return d.BuiltinContains(builtin_id(), pc);
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
// TODO(cbruni): Move to BytecodeArray
int AbstractCode::SourcePosition(PtrComprCageBase cage_base, int offset) {
CHECK_NE(kind(cage_base), CodeKind::BASELINE);
@@ -339,21 +336,6 @@ int AbstractCode::SourceStatementPosition(PtrComprCageBase cage_base,
return statement_position;
}
-bool Code::CanDeoptAt(Isolate* isolate, Address pc) {
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(deoptimization_data());
- Address code_start_address = InstructionStart(isolate, pc);
- for (int i = 0; i < deopt_data.DeoptCount(); i++) {
- if (deopt_data.Pc(i).value() == -1) continue;
- Address address = code_start_address + deopt_data.Pc(i).value();
- if (address == pc &&
- deopt_data.GetBytecodeOffset(i) != BytecodeOffset::None()) {
- return true;
- }
- }
- return false;
-}
-
bool Code::IsIsolateIndependent(Isolate* isolate) {
static constexpr int kModeMask =
RelocInfo::AllRealModesMask() &
@@ -389,9 +371,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
if (OffHeapInstructionStream::PcIsOffHeap(isolate, target_address))
continue;
- Code target = Code::GetCodeFromTargetAddress(target_address);
- CHECK(target.IsCode());
- if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
+ Code target = Code::FromTargetAddress(target_address);
+ if (Builtins::IsIsolateIndependentBuiltin(target)) {
+ continue;
+ }
}
return false;
}
@@ -417,35 +400,48 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
return false;
}
-Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
- isolate_ = isolate;
- Object list = isolate->heap()->native_contexts_list();
- next_context_ =
- list.IsUndefined(isolate_) ? NativeContext() : NativeContext::cast(list);
-}
+Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate)
+ : isolate_(isolate),
+ safepoint_scope_(std::make_unique<SafepointScope>(
+ isolate, isolate->is_shared_space_isolate()
+ ? SafepointKind::kGlobal
+ : SafepointKind::kIsolate)),
+ object_iterator_(
+ isolate->heap()->code_space()->GetObjectIterator(isolate->heap())),
+ state_(kIteratingCodeSpace) {}
Code Code::OptimizedCodeIterator::Next() {
- do {
- Object next;
- if (!current_code_.is_null()) {
- // Get next code in the linked list.
- next = current_code_.next_code_link();
- } else if (!next_context_.is_null()) {
- // Linked list of code exhausted. Get list of next context.
- next = next_context_.OptimizedCodeListHead();
- Object next_context = next_context_.next_context_link();
- next_context_ = next_context.IsUndefined(isolate_)
- ? NativeContext()
- : NativeContext::cast(next_context);
- } else {
- // Exhausted contexts.
- return Code();
+ while (true) {
+ HeapObject object = object_iterator_->Next();
+ if (object.is_null()) {
+ // No objects left in the current iterator, try to move to the next space
+ // based on the state.
+ switch (state_) {
+ case kIteratingCodeSpace: {
+ object_iterator_ =
+ isolate_->heap()->code_lo_space()->GetObjectIterator(
+ isolate_->heap());
+ state_ = kIteratingCodeLOSpace;
+ continue;
+ }
+ case kIteratingCodeLOSpace:
+ // No other spaces to iterate, so clean up and we're done. Keep the
+ // object iterator so that it keeps returning null on Next(), to avoid
+ // needing to branch on state_ before the while loop, but drop the
+ // safepoint scope since we no longer need to stop the heap from
+ // moving.
+ safepoint_scope_.reset();
+ state_ = kDone;
+ V8_FALLTHROUGH;
+ case kDone:
+ return Code();
+ }
}
- current_code_ =
- next.IsUndefined(isolate_) ? Code() : FromCodeT(CodeT::cast(next));
- } while (current_code_.is_null());
- DCHECK(CodeKindCanDeoptimize(current_code_.kind()));
- return current_code_;
+ InstructionStream istream = InstructionStream::cast(object);
+ Code code = istream.code(kAcquireLoad);
+ if (!CodeKindCanDeoptimize(code.kind())) continue;
+ return code;
+ }
}
Handle<DeoptimizationData> DeoptimizationData::New(Isolate* isolate,
@@ -528,10 +524,8 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) {
namespace {
-template <typename CodeOrCodeT>
-inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os,
- CodeOrCodeT code, Address begin, size_t size,
- Address current_pc) {
+void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code,
+ Address begin, size_t size, Address current_pc) {
Address end = begin + size;
AllowHandleAllocation allow_handles;
DisallowGarbageCollection no_gc;
@@ -541,9 +535,8 @@ inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os,
CodeReference(handle(code, isolate)), current_pc);
}
-template <typename CodeOrCodeT>
void Disassemble(const char* name, std::ostream& os, Isolate* isolate,
- CodeOrCodeT code, Address current_pc) {
+ Code code, Address current_pc) {
CodeKind kind = code.kind();
os << "kind = " << CodeKindToString(kind) << "\n";
if (name == nullptr && code.is_builtin()) {
@@ -563,16 +556,6 @@ void Disassemble(const char* name, std::ostream& os, Isolate* isolate,
<< "\n";
os << "address = " << reinterpret_cast<void*>(code.ptr()) << "\n\n";
- if (code.IsCode() && code.is_off_heap_trampoline()) {
- Code trampoline_code = Code::cast(code);
- int trampoline_size = trampoline_code.raw_instruction_size();
- os << "Trampoline (size = " << trampoline_size << ")\n";
- DisassembleCodeRange(isolate, os, trampoline_code,
- trampoline_code.raw_instruction_start(),
- trampoline_size, current_pc);
- os << "\n";
- }
-
{
int code_size = code.InstructionSize();
os << "Instructions (size = " << code_size << ")\n";
@@ -654,8 +637,8 @@ void Disassemble(const char* name, std::ostream& os, Isolate* isolate,
}
os << "RelocInfo (size = " << code.relocation_size() << ")\n";
- if (code.IsCode()) {
- for (RelocIterator it(Code::cast(code)); !it.done(); it.next()) {
+ if (code.has_instruction_stream()) {
+ for (RelocIterator it(code); !it.done(); it.next()) {
it.rinfo()->Print(isolate, os);
}
}
@@ -678,13 +661,6 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
i::Disassemble(name, os, isolate, *this, current_pc);
}
-#ifdef V8_EXTERNAL_CODE_SPACE
-void CodeDataContainer::Disassemble(const char* name, std::ostream& os,
- Isolate* isolate, Address current_pc) {
- i::Disassemble(name, os, isolate, *this, current_pc);
-}
-#endif // V8_EXTERNAL_CODE_SPACE
-
#endif // ENABLE_DISASSEMBLER
void BytecodeArray::PrintJson(std::ostream& os) {
@@ -897,8 +873,8 @@ void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
Handle<HeapObject> object,
DependencyGroups groups) {
if (V8_UNLIKELY(v8_flags.trace_compilation_dependencies)) {
- StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
- << "] on [" << object << "] in groups [";
+ StdoutStream{} << "Installing dependency of [" << code << "] on [" << object
+ << "] in groups [";
PrintDependencyGroups(groups);
StdoutStream{} << "]\n";
}
@@ -918,11 +894,10 @@ Handle<DependentCode> DependentCode::InsertWeakCode(
Handle<Code> code) {
if (entries->length() == entries->capacity()) {
// We'd have to grow - try to compact first.
- entries->IterateAndCompact([](CodeT, DependencyGroups) { return false; });
+ entries->IterateAndCompact([](Code, DependencyGroups) { return false; });
}
- MaybeObjectHandle code_slot(HeapObjectReference::Weak(ToCodeT(*code)),
- isolate);
+ MaybeObjectHandle code_slot(HeapObjectReference::Weak(*code), isolate);
MaybeObjectHandle group_slot(MaybeObject::FromSmi(Smi::FromInt(groups)),
isolate);
entries = Handle<DependentCode>::cast(
@@ -930,16 +905,6 @@ Handle<DependentCode> DependentCode::InsertWeakCode(
return entries;
}
-Handle<DependentCode> DependentCode::New(Isolate* isolate,
- DependencyGroups groups,
- Handle<Code> code) {
- Handle<DependentCode> result = Handle<DependentCode>::cast(
- isolate->factory()->NewWeakArrayList(LengthFor(1), AllocationType::kOld));
- result->Set(0, HeapObjectReference::Weak(ToCodeT(*code)));
- result->Set(1, Smi::FromInt(groups));
- return result;
-}
-
void DependentCode::IterateAndCompact(const IterateAndCompactFn& fn) {
DisallowGarbageCollection no_gc;
@@ -960,7 +925,7 @@ void DependentCode::IterateAndCompact(const IterateAndCompactFn& fn) {
continue;
}
- if (fn(CodeT::cast(obj->GetHeapObjectAssumeWeak()),
+ if (fn(Code::cast(obj->GetHeapObjectAssumeWeak()),
static_cast<DependencyGroups>(
Get(i + kGroupsSlotOffset).ToSmi().value()))) {
len = FillEntryFromBack(i, len);
@@ -973,15 +938,15 @@ void DependentCode::IterateAndCompact(const IterateAndCompactFn& fn) {
}
bool DependentCode::MarkCodeForDeoptimization(
- DependentCode::DependencyGroups deopt_groups) {
+ Isolate* isolate, DependentCode::DependencyGroups deopt_groups) {
DisallowGarbageCollection no_gc;
bool marked_something = false;
- IterateAndCompact([&](CodeT code, DependencyGroups groups) {
+ IterateAndCompact([&](Code code, DependencyGroups groups) {
if ((groups & deopt_groups) == 0) return false;
if (!code.marked_for_deoptimization()) {
- code.SetMarkedForDeoptimization("code dependencies");
+ code.SetMarkedForDeoptimization(isolate, "code dependencies");
marked_something = true;
}
@@ -1009,7 +974,7 @@ int DependentCode::FillEntryFromBack(int index, int length) {
void DependentCode::DeoptimizeDependencyGroups(
Isolate* isolate, DependentCode::DependencyGroups groups) {
DisallowGarbageCollection no_gc_scope;
- bool marked_something = MarkCodeForDeoptimization(groups);
+ bool marked_something = MarkCodeForDeoptimization(isolate, groups);
if (marked_something) {
DCHECK(AllowCodeDependencyChange::IsAllowed());
Deoptimizer::DeoptimizeMarkedCode(isolate);
@@ -1021,17 +986,10 @@ DependentCode DependentCode::empty_dependent_code(const ReadOnlyRoots& roots) {
return DependentCode::cast(roots.empty_weak_array_list());
}
-void Code::SetMarkedForDeoptimization(const char* reason) {
- set_marked_for_deoptimization(true);
- Deoptimizer::TraceMarkForDeoptimization(*this, reason);
-}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
-void CodeDataContainer::SetMarkedForDeoptimization(const char* reason) {
+void Code::SetMarkedForDeoptimization(Isolate* isolate, const char* reason) {
set_marked_for_deoptimization(true);
- Deoptimizer::TraceMarkForDeoptimization(FromCodeT(*this), reason);
+ Deoptimizer::TraceMarkForDeoptimization(isolate, *this, reason);
}
-#endif
const char* DependentCode::DependencyGroupName(DependencyGroup group) {
switch (group) {
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 4ce50f12d6..e4cba40948 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -26,8 +26,10 @@ namespace internal {
class ByteArray;
class BytecodeArray;
-class CodeDataContainer;
+class Code;
class CodeDesc;
+class ObjectIterator;
+class SafepointScope;
class LocalFactory;
template <typename Impl>
@@ -39,81 +41,143 @@ class Register;
#include "torque-generated/src/objects/code-tq.inc"
-// CodeDataContainer is a container for all mutable fields associated with its
-// referencing {Code} object. Since {Code} objects reside on write-protected
-// pages within the heap, its header fields need to be immutable. There always
-// is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
-// field {Code::code_data_container} itself is immutable.
-class CodeDataContainer : public HeapObject {
+// Code is a container for data fields related to its associated
+// {InstructionStream} object. Since {InstructionStream} objects reside on
+// write-protected pages within the heap, its header fields need to be
+// immutable. Every InstructionStream object has an associated Code object,
+// but not every Code object has an InstructionStream (e.g. for builtins).
+//
+// Embedded builtins consist of on-heap Code objects, with an out-of-line body
+// section. Accessors (e.g. InstructionStart), redirect to the off-heap area.
+// Metadata table offsets remain relative to MetadataStart(), i.e. they point
+// into the off-heap metadata section. The off-heap layout is described in
+// detail in the EmbeddedData class, but at a high level one can assume a
+// dedicated, out-of-line, instruction and metadata section for each embedded
+// builtin:
+//
+// +--------------------------+ <-- InstructionStart()
+// | off-heap instructions |
+// | ... |
+// +--------------------------+ <-- InstructionEnd()
+//
+// +--------------------------+ <-- MetadataStart() (MS)
+// | off-heap metadata |
+// | ... | <-- MS + handler_table_offset()
+// | | <-- MS + constant_pool_offset()
+// | | <-- MS + code_comments_offset()
+// | | <-- MS + unwinding_info_offset()
+// +--------------------------+ <-- MetadataEnd()
+//
+// TODO(jgruber): Code currently contains many aliases for InstructionStream
+// functions. These will eventually move to the Code object. Once done, put all
+// these declarations in a decent order and move over comments from the current
+// declarations in InstructionStream.
+class Code : public HeapObject {
public:
- NEVER_READ_ONLY_SPACE
- DECL_ACCESSORS(next_code_link, Object)
- DECL_RELAXED_INT32_ACCESSORS(kind_specific_flags)
-
- // Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic.
- inline void clear_padding();
+ // When V8_EXTERNAL_CODE_SPACE is enabled, InstructionStream objects are
+ // allocated in a separate pointer compression cage instead of the cage where
+ // all the other objects are allocated.
+ inline PtrComprCageBase code_cage_base() const;
+ // Back-reference to the InstructionStream object.
//
- // A collection of getters and predicates that are used by respective methods
- // on Code object. They are defined here mostly because they operate on the
- // writable state of the respective Code object.
+ // Note the cage-less accessor versions may not be called if the current Code
+ // object is InReadOnlySpace. That may only be the case for Code objects
+ // representing builtins, or in other words, Code objects for which
+ // has_instruction_stream() is never true.
+ DECL_GETTER(instruction_stream, InstructionStream)
+ DECL_RELAXED_GETTER(instruction_stream, InstructionStream)
+ DECL_ACCESSORS(raw_instruction_stream, Object)
+ DECL_RELAXED_GETTER(raw_instruction_stream, Object)
+
+ // Whether this Code object has an associated InstructionStream (embedded
+ // builtins don't).
//
-
- DECL_PRIMITIVE_ACCESSORS(can_have_weak_objects, bool)
- DECL_PRIMITIVE_ACCESSORS(marked_for_deoptimization, bool)
- DECL_PRIMITIVE_ACCESSORS(is_promise_rejection, bool)
-
- inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction() const;
-
- // Back-reference to the Code object.
- // Available only when V8_EXTERNAL_CODE_SPACE is defined.
- DECL_GETTER(code, Code)
- DECL_RELAXED_GETTER(code, Code)
-
- // When V8_EXTERNAL_CODE_SPACE is enabled, Code objects are allocated in
- // a separate pointer compression cage instead of the cage where all the
- // other objects are allocated.
- // This helper method returns code cage base value which is used for
- // decompressing the reference to respective Code. It loads the Isolate from
- // the page header (since the CodeDataContainer objects are always writable)
- // and then the code cage base value from there.
- inline PtrComprCageBase code_cage_base() const;
-
- // Cached value of code().InstructionStart().
- // Available only when V8_EXTERNAL_CODE_SPACE is defined.
+ // Note there's a short amount of time during CodeBuilder::BuildInternal in
+ // which the Code object has been allocated and initialized, but the
+ // InstructionStream doesn't exist yet - in this situation,
+ // has_instruction_stream is `false` but will change to `true` once
+ // InstructionStream has also been initialized.
+ inline bool has_instruction_stream() const;
+ inline bool has_instruction_stream(RelaxedLoadTag) const;
+
+ // Cached value of instruction_stream().InstructionStart().
DECL_GETTER(code_entry_point, Address)
- inline void SetCodeAndEntryPoint(
- Isolate* isolate_for_sandbox, Code code,
+ // Aliases for code_entry_point for API compatibility with InstructionStream.
+ inline Address InstructionStart() const;
+
+ inline void SetInstructionStreamAndEntryPoint(
+ Isolate* isolate_for_sandbox, InstructionStream code,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void SetEntryPointForOffHeapBuiltin(Isolate* isolate_for_sandbox,
Address entry);
+ inline void SetCodeEntryPointForSerialization(Isolate* isolate,
+ Address entry);
// Updates the value of the code entry point. The code must be equal to
// the code() value.
- inline void UpdateCodeEntryPoint(Isolate* isolate_for_sandbox, Code code);
+ inline void UpdateCodeEntryPoint(Isolate* isolate_for_sandbox,
+ InstructionStream code);
+
+ DECL_RELAXED_UINT16_ACCESSORS(kind_specific_flags)
// Initializes internal flags field which stores cached values of some
- // properties of the respective Code object.
- // Available only when V8_EXTERNAL_CODE_SPACE is enabled.
+ // properties of the respective InstructionStream object.
inline void initialize_flags(CodeKind kind, Builtin builtin_id,
- bool is_turbofanned,
- bool is_off_heap_trampoline);
+ bool is_turbofanned, int stack_slots);
- // Alias for code_entry_point to make it API compatible with Code.
- inline Address InstructionStart() const;
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic.
+ inline void clear_padding();
+ // Clear the padding in the InstructionStream
+ inline void ClearInstructionStreamPadding();
- // Alias for code_entry_point to make it API compatible with Code.
- inline Address raw_instruction_start() const;
+ // Flushes the instruction cache for the executable instructions of this code
+ // object. Make sure to call this while the code is still writable.
+ void FlushICache() const;
- // Alias for code_entry_point to make it API compatible with Code.
- inline Address entry() const;
+ DECL_PRIMITIVE_ACCESSORS(can_have_weak_objects, bool)
+ DECL_PRIMITIVE_ACCESSORS(marked_for_deoptimization, bool)
-#ifdef V8_EXTERNAL_CODE_SPACE
- //
- // A collection of getters and predicates that forward queries to associated
- // Code object.
- //
+ // [is_promise_rejection]: For kind BUILTIN tells whether the
+ // exception thrown by the code will lead to promise rejection or
+ // uncaught if both this and is_exception_caught is set.
+ // Use GetBuiltinCatchPrediction to access this.
+ DECL_PRIMITIVE_ACCESSORS(is_promise_rejection, bool)
+
+ inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction() const;
+
+ DECL_PRIMITIVE_ACCESSORS(instruction_size, int)
+ DECL_PRIMITIVE_ACCESSORS(metadata_size, int)
+ // [handler_table_offset]: The offset where the exception handler table
+ // starts.
+ DECL_PRIMITIVE_ACCESSORS(handler_table_offset, int)
+ // [unwinding_info_offset]: Offset of the unwinding info section.
+ DECL_PRIMITIVE_ACCESSORS(unwinding_info_offset, int32_t)
+ // [deoptimization_data]: Array containing data for deopt for non-baseline
+ // code.
+ DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [bytecode_or_interpreter_data]: BytecodeArray or InterpreterData for
+ // baseline code.
+ DECL_ACCESSORS(bytecode_or_interpreter_data, HeapObject)
+ // [source_position_table]: ByteArray for the source positions table for
+ // non-baseline code.
+ DECL_ACCESSORS(source_position_table, ByteArray)
+ // [bytecode_offset_table]: ByteArray for the bytecode offset for baseline
+ // code.
+ DECL_ACCESSORS(bytecode_offset_table, ByteArray)
+ // [relocation_info]: InstructionStream relocation information
+ DECL_ACCESSORS(relocation_info, ByteArray)
+ DECL_PRIMITIVE_ACCESSORS(inlined_bytecode_size, unsigned)
+ DECL_PRIMITIVE_ACCESSORS(osr_offset, BytecodeOffset)
+ // [code_comments_offset]: Offset of the code comment section.
+ DECL_PRIMITIVE_ACCESSORS(code_comments_offset, int)
+ // [constant_pool offset]: Offset of the constant pool.
+ DECL_PRIMITIVE_ACCESSORS(constant_pool_offset, int)
+
+ // Unchecked accessors to be used during GC.
+ inline ByteArray unchecked_relocation_info() const;
+ inline FixedArray unchecked_deoptimization_data() const;
inline CodeKind kind() const;
inline Builtin builtin_id() const;
@@ -122,15 +186,12 @@ class CodeDataContainer : public HeapObject {
inline bool is_optimized_code() const;
inline bool is_wasm_code() const;
- // Testers for interpreter builtins.
inline bool is_interpreter_trampoline_builtin() const;
-
- // Testers for baseline builtins.
inline bool is_baseline_trampoline_builtin() const;
inline bool is_baseline_leave_frame_builtin() const;
- // Tells whether the code checks the tiering state in the function's
- // feedback vector.
+ // Tells whether the code checks the tiering state in the function's feedback
+ // vector.
inline bool checks_tiering_state() const;
// Tells whether the outgoing parameters of this code are tagged pointers.
@@ -144,22 +205,17 @@ class CodeDataContainer : public HeapObject {
// TurboFan optimizing compiler.
inline bool is_turbofanned() const;
- // [is_off_heap_trampoline]: For kind BUILTIN tells whether
- // this is a trampoline to an off-heap builtin.
- inline bool is_off_heap_trampoline() const;
-
- // [uses_safepoint_table]: Whether this Code object uses safepoint tables
- // (note the table may still be empty, see has_safepoint_table).
+ // [uses_safepoint_table]: Whether this InstructionStream object uses
+ // safepoint tables (note the table may still be empty, see
+ // has_safepoint_table).
inline bool uses_safepoint_table() const;
// [stack_slots]: If {uses_safepoint_table()}, the number of stack slots
// reserved in the code prologue; otherwise 0.
inline int stack_slots() const;
- DECL_GETTER(deoptimization_data, FixedArray)
- DECL_GETTER(bytecode_or_interpreter_data, HeapObject)
- DECL_GETTER(source_position_table, ByteArray)
- DECL_GETTER(bytecode_offset_table, ByteArray)
+ inline ByteArray SourcePositionTable(Isolate* isolate,
+ SharedFunctionInfo sfi) const;
// Returns true if pc is inside this object's instructions.
inline bool contains(Isolate* isolate, Address pc);
@@ -189,18 +245,36 @@ class CodeDataContainer : public HeapObject {
inline byte* relocation_end() const;
inline int relocation_size() const;
+ // [safepoint_table_offset]: The offset where the safepoint table starts.
+ inline int safepoint_table_offset() const { return 0; }
+
+ inline Address body_start() const;
+ inline Address body_end() const;
+ inline int body_size() const;
+
+ inline Address metadata_start() const;
+ inline Address metadata_end() const;
+
+ inline Address handler_table_address() const;
+
+ inline Address safepoint_table_address() const;
+
+ inline int CodeSize() const;
+ inline int SizeIncludingMetadata() const;
+
// When builtins un-embedding is enabled for the Isolate
// (see Isolate::is_short_builtin_calls_enabled()) then both embedded and
// un-embedded builtins might be exeuted and thus two kinds of |pc|s might
// appear on the stack.
// Unlike the paremeterless versions of the functions above the below variants
// ensure that the instruction start correspond to the given |pc| value.
- // Thus for off-heap trampoline Code objects the result might be the
- // instruction start/end of the embedded code stream or of un-embedded one.
- // For normal Code objects these functions just return the
- // raw_instruction_start/end() values.
+ // Thus for off-heap trampoline InstructionStream objects the result might be
+ // the instruction start/end of the embedded code stream or of un-embedded
+ // one. For normal InstructionStream objects these functions just return the
+ // instruction_start/end() values.
// TODO(11527): remove these versions once the full solution is ready.
inline Address InstructionStart(Isolate* isolate, Address pc) const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionStart() const;
V8_EXPORT_PRIVATE Address OffHeapInstructionStart(Isolate* isolate,
Address pc) const;
inline Address InstructionEnd(Isolate* isolate, Address pc) const;
@@ -213,15 +287,65 @@ class CodeDataContainer : public HeapObject {
inline Address InstructionEnd() const;
inline int InstructionSize() const;
- // Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Isolate* isolate, Address pc);
-
- // Get the maglev safepoint entry for the given pc.
MaglevSafepointEntry GetMaglevSafepointEntry(Isolate* isolate, Address pc);
inline int GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const;
- void SetMarkedForDeoptimization(const char* reason);
+ void SetMarkedForDeoptimization(Isolate* isolate, const char* reason);
+
+ inline bool CanContainWeakObjects();
+
+ inline bool IsWeakObject(HeapObject object);
+
+ static inline bool IsWeakObjectInOptimizedCode(HeapObject object);
+
+ static inline bool IsWeakObjectInDeoptimizationLiteralArray(Object object);
+
+ // This function should be called only from GC.
+ void ClearEmbeddedObjects(Heap* heap);
+
+ // [embedded_objects_cleared]: If CodeKindIsOptimizedJSFunction(kind), tells
+ // whether the embedded objects in the code marked for deoptimization were
+ // cleared. Note that embedded_objects_cleared() implies
+ // marked_for_deoptimization().
+ inline bool embedded_objects_cleared() const;
+ inline void set_embedded_objects_cleared(bool flag);
+
+ // Migrate code from desc without flushing the instruction cache.
+ void CopyFromNoFlush(ByteArray reloc_info, Heap* heap, const CodeDesc& desc);
+ void RelocateFromDesc(ByteArray reloc_info, Heap* heap, const CodeDesc& desc);
+
+ // Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be
+ // exactly the same size as the RelocInfo in |desc|.
+ static inline void CopyRelocInfoToByteArray(ByteArray dest,
+ const CodeDesc& desc);
+
+ bool IsIsolateIndependent(Isolate* isolate);
+
+ inline uintptr_t GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
+ BytecodeArray bytecodes);
+
+ inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
+ BytecodeArray bytecodes);
+
+ // Returns true if the function is inlined in the code.
+ bool Inlines(SharedFunctionInfo sfi);
+
+ // Returns the PC of the next bytecode in execution order.
+ // If the bytecode at the given offset is JumpLoop, the PC of the jump target
+ // is returned. Other jumps are not allowed.
+ // For other bytecodes this is equivalent to
+ // GetBaselineEndPCForBytecodeOffset.
+ inline uintptr_t GetBaselinePCForNextExecutedBytecode(
+ int bytecode_offset, BytecodeArray bytecodes);
+
+ inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
+ BytecodeArray bytecodes);
+
+ inline void IterateDeoptimizationLiterals(RootVisitor* v);
+
+ static inline Code FromTargetAddress(Address address);
#ifdef ENABLE_DISASSEMBLER
V8_EXPORT_PRIVATE void Disassemble(const char* name, std::ostream& os,
@@ -229,41 +353,55 @@ class CodeDataContainer : public HeapObject {
Address current_pc = kNullAddress);
#endif // ENABLE_DISASSEMBLER
-#endif // V8_EXTERNAL_CODE_SPACE
-
- DECL_CAST(CodeDataContainer)
+ DECL_CAST(Code)
// Dispatched behavior.
- DECL_PRINTER(CodeDataContainer)
- DECL_VERIFIER(CodeDataContainer)
+ DECL_PRINTER(Code)
+ DECL_VERIFIER(Code)
// Layout description.
-#define CODE_DATA_FIELDS(V) \
- /* Strong pointer fields. */ \
- V(kPointerFieldsStrongEndOffset, 0) \
- /* Weak pointer fields. */ \
- V(kNextCodeLinkOffset, kTaggedSize) \
- V(kPointerFieldsWeakEndOffset, 0) \
- /* Strong Code pointer fields. */ \
- V(kCodeOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
- V(kCodePointerFieldsStrongEndOffset, 0) \
- /* Raw data fields. */ \
- V(kCodeEntryPointOffset, \
- V8_EXTERNAL_CODE_SPACE_BOOL ? kSystemPointerSize : 0) \
- V(kFlagsOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kUInt16Size : 0) \
- V(kBuiltinIdOffset, V8_EXTERNAL_CODE_SPACE_BOOL ? kInt16Size : 0) \
- V(kKindSpecificFlagsOffset, kInt32Size) \
- V(kUnalignedSize, OBJECT_POINTER_PADDING(kUnalignedSize)) \
- /* Total size. */ \
+#define CODE_DATA_FIELDS(V) \
+ /* Strong pointer fields. */ \
+ V(kRelocationInfoOffset, kTaggedSize) \
+ V(kDeoptimizationDataOrInterpreterDataOffset, kTaggedSize) \
+ V(kPositionTableOffset, kTaggedSize) \
+ V(kPointerFieldsStrongEndOffset, 0) \
+ /* Strong InstructionStream pointer fields. */ \
+ V(kInstructionStreamOffset, kTaggedSize) \
+ V(kCodePointerFieldsStrongEndOffset, 0) \
+ /* Raw data fields. */ \
+ /* Data or code not directly visited by GC directly starts here. */ \
+ V(kDataStart, 0) \
+ V(kCodeEntryPointOffset, kSystemPointerSize) \
+ /* The serializer needs to copy bytes starting from here verbatim. */ \
+ /* Objects embedded into code is visited via reloc info. */ \
+ V(kFlagsOffset, kInt32Size) \
+ V(kBuiltinIdOffset, kInt16Size) \
+ V(kKindSpecificFlagsOffset, kInt16Size) \
+ V(kInstructionSizeOffset, kIntSize) \
+ V(kMetadataSizeOffset, kIntSize) \
+ V(kInlinedBytecodeSizeOffset, kIntSize) \
+ V(kOsrOffsetOffset, kInt32Size) \
+ /* Offsets describing inline metadata tables, relative to MetadataStart. */ \
+ V(kHandlerTableOffsetOffset, kIntSize) \
+ V(kUnwindingInfoOffsetOffset, kInt32Size) \
+ V(kConstantPoolOffsetOffset, V8_EMBEDDED_CONSTANT_POOL_BOOL ? kIntSize : 0) \
+ V(kCodeCommentsOffsetOffset, kIntSize) \
+ V(kUnalignedSize, OBJECT_POINTER_PADDING(kUnalignedSize)) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_DATA_FIELDS)
#undef CODE_DATA_FIELDS
#ifdef V8_EXTERNAL_CODE_SPACE
+ template <typename T>
using ExternalCodeField =
- TaggedField<Object, kCodeOffset, ExternalCodeCompressionScheme>;
-#endif
+ TaggedField<T, kInstructionStreamOffset, ExternalCodeCompressionScheme>;
+#else
+ template <typename T>
+ using ExternalCodeField = TaggedField<T, kInstructionStreamOffset>;
+#endif // V8_EXTERNAL_CODE_SPACE
class BodyDescriptor;
@@ -271,96 +409,174 @@ class CodeDataContainer : public HeapObject {
#define FLAGS_BIT_FIELDS(V, _) \
V(KindField, CodeKind, 4, _) \
V(IsTurbofannedField, bool, 1, _) \
- V(IsOffHeapTrampoline, bool, 1, _)
- /* The other 10 bits are still free. */
+ V(StackSlotsField, int, 24, _)
+ /* The other 3 bits are still free. */
+ // TODO(v8:13784): merge this with KindSpecificFlags by dropping the
+ // IsPromiseRejection field or taking one bit from the StackSlots field.
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
- static_assert(FLAGS_BIT_FIELDS_Ranges::kBitsCount == 6);
- static_assert(!V8_EXTERNAL_CODE_SPACE_BOOL ||
- (FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
- FIELD_SIZE(CodeDataContainer::kFlagsOffset) * kBitsPerByte));
+ static_assert(kCodeKindCount <= KindField::kNumValues);
+ static_assert(FLAGS_BIT_FIELDS_Ranges::kBitsCount == 29);
+ static_assert(FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
+ FIELD_SIZE(kFlagsOffset) * kBitsPerByte);
- private:
- DECL_ACCESSORS(raw_code, Object)
- DECL_RELAXED_GETTER(raw_code, Object)
+ // KindSpecificFlags layout.
+#define KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
+ V(MarkedForDeoptimizationField, bool, 1, _) \
+ V(EmbeddedObjectsClearedField, bool, 1, _) \
+ V(CanHaveWeakObjectsField, bool, 1, _) \
+ V(IsPromiseRejectionField, bool, 1, _)
+ DEFINE_BIT_FIELDS(KIND_SPECIFIC_FLAGS_BIT_FIELDS)
+#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
+ static_assert(KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 4);
+ static_assert(KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
+ FIELD_SIZE(Code::kKindSpecificFlagsOffset) * kBitsPerByte);
+
+ // The {marked_for_deoptimization} field is accessed from generated code.
+ static const int kMarkedForDeoptimizationBit =
+ MarkedForDeoptimizationField::kShift;
+
+ class OptimizedCodeIterator;
+ private:
inline void init_code_entry_point(Isolate* isolate, Address initial_value);
inline void set_code_entry_point(Isolate* isolate, Address value);
- // When V8_EXTERNAL_CODE_SPACE is enabled the flags field contains cached
- // values of some flags of the from the respective Code object.
+ // Contains cached values of some flags of the from the respective
+ // InstructionStream object.
DECL_RELAXED_UINT16_ACCESSORS(flags)
- inline void set_is_off_heap_trampoline_for_hash(bool value);
+
+ V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
+ V8_EXPORT_PRIVATE int OffHeapInstructionSize() const;
+ V8_EXPORT_PRIVATE Address OffHeapMetadataStart() const;
+ V8_EXPORT_PRIVATE Address OffHeapMetadataEnd() const;
+ V8_EXPORT_PRIVATE int OffHeapMetadataSize() const;
+ V8_EXPORT_PRIVATE Address OffHeapSafepointTableAddress() const;
+ V8_EXPORT_PRIVATE int OffHeapSafepointTableSize() const;
+ V8_EXPORT_PRIVATE Address OffHeapHandlerTableAddress() const;
+ V8_EXPORT_PRIVATE int OffHeapHandlerTableSize() const;
+ V8_EXPORT_PRIVATE Address OffHeapConstantPoolAddress() const;
+ V8_EXPORT_PRIVATE int OffHeapConstantPoolSize() const;
+ V8_EXPORT_PRIVATE Address OffHeapCodeCommentsAddress() const;
+ V8_EXPORT_PRIVATE int OffHeapCodeCommentsSize() const;
+ V8_EXPORT_PRIVATE Address OffHeapUnwindingInfoAddress() const;
+ V8_EXPORT_PRIVATE int OffHeapUnwindingInfoSize() const;
+ V8_EXPORT_PRIVATE int OffHeapStackSlots() const;
+
+ enum BytecodeToPCPosition {
+ kPcAtStartOfBytecode,
+ // End of bytecode equals the start of the next bytecode.
+ // We need it when we deoptimize to the next bytecode (lazy deopt or deopt
+ // of non-topmost frame).
+ kPcAtEndOfBytecode
+ };
+ inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
+ BytecodeToPCPosition position,
+ BytecodeArray bytecodes);
template <typename IsolateT>
friend class Deserializer;
+ friend class ReadOnlyDeserializer; // For init_code_entry_point.
+ friend class GcSafeCode; // For OffHeapFoo functions.
friend Factory;
friend FactoryBase<Factory>;
friend FactoryBase<LocalFactory>;
friend Isolate;
- OBJECT_CONSTRUCTORS(CodeDataContainer, HeapObject);
+ OBJECT_CONSTRUCTORS(Code, HeapObject);
};
-// Code describes objects with on-the-fly generated machine code.
-class Code : public HeapObject {
+// A Code object when used in situations where gc might be in progress. The
+// underlying pointer is guaranteed to be a Code object.
+//
+// Semantics around Code and InstructionStream objects are quite delicate when
+// GC is in progress and objects are currently being moved, because the
+// tightly-coupled object pair {Code,InstructionStream} are conceptually
+// treated as a single object in our codebase, and we frequently convert
+// between the two. However, during GC, extra care must be taken when accessing
+// the `Code::instruction_stream` and `InstructionStream::code` slots because
+// they may contain forwarding pointers.
+//
+// This class a) clarifies at use sites that we're dealing with a Code object
+// in a situation that requires special semantics, and b) safely implements
+// related functions.
+//
+// Note that both the underlying Code object and the associated
+// InstructionStream may be forwarding pointers, thus type checks and normal
+// (checked) casts do not work on GcSafeCode.
+class GcSafeCode : public HeapObject {
+ public:
+ DECL_CAST(GcSafeCode)
+
+ // Use with care, this casts away knowledge that we're dealing with a
+ // special-semantics object.
+ inline Code UnsafeCastToCode() const;
+
+ // Safe accessors (these just forward to Code methods).
+ inline Address InstructionStart() const;
+ inline Address InstructionEnd() const;
+ inline bool is_builtin() const;
+ inline Builtin builtin_id() const;
+ inline CodeKind kind() const;
+ inline bool is_interpreter_trampoline_builtin() const;
+ inline bool is_baseline_trampoline_builtin() const;
+ inline bool is_baseline_leave_frame_builtin() const;
+ inline bool has_instruction_stream() const;
+ inline bool is_maglevved() const;
+ inline bool is_turbofanned() const;
+ inline bool has_tagged_outgoing_params() const;
+ inline bool marked_for_deoptimization() const;
+ inline Object raw_instruction_stream() const;
+ inline Address constant_pool() const;
+ inline int stack_slots() const;
+
+ inline int GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const;
+ inline Address InstructionStart(Isolate* isolate, Address pc) const;
+ inline Address InstructionEnd(Isolate* isolate, Address pc) const;
+ inline bool CanDeoptAt(Isolate* isolate, Address pc) const;
+ inline Object raw_instruction_stream(PtrComprCageBase code_cage_base) const;
+
+ // Accessors that had to be modified to be used in GC settings.
+ inline Address SafepointTableAddress() const;
+
+ private:
+ OBJECT_CONSTRUCTORS(GcSafeCode, HeapObject);
+};
+
+// InstructionStream contains the instruction stream for V8-generated code
+// objects.
+//
+// When V8_EXTERNAL_CODE_SPACE is enabled, InstructionStream objects are
+// allocated in a separate pointer compression cage instead of the cage where
+// all the other objects are allocated.
+class InstructionStream : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
- // Opaque data type for encapsulating code flags like kind, inline
- // cache state, and arguments count.
- using Flags = uint32_t;
- // All Code objects have the following layout:
+ // All InstructionStream objects have the following layout:
//
// +--------------------------+
// | header |
// | padded to code alignment |
- // +--------------------------+ <-- raw_body_start()
- // | instructions | == raw_instruction_start()
+ // +--------------------------+ <-- body_start()
+ // | instructions | == instruction_start()
// | ... |
// | padded to meta alignment | see kMetadataAlignment
- // +--------------------------+ <-- raw_instruction_end()
- // | metadata | == raw_metadata_start() (MS)
+ // +--------------------------+ <-- instruction_end()
+ // | metadata | == metadata_start() (MS)
// | ... |
// | | <-- MS + handler_table_offset()
// | | <-- MS + constant_pool_offset()
// | | <-- MS + code_comments_offset()
// | | <-- MS + unwinding_info_offset()
// | padded to obj alignment |
- // +--------------------------+ <-- raw_metadata_end() == raw_body_end()
+ // +--------------------------+ <-- metadata_end() == body_end()
// | padded to code alignment |
// +--------------------------+
//
// In other words, the variable-size 'body' consists of 'instructions' and
// 'metadata'.
- //
- // Note the accessor functions below may be prefixed with 'raw'. In this case,
- // raw accessors (e.g. raw_instruction_start) always refer to the on-heap
- // Code object, while camel-case accessors (e.g. InstructionStart) may refer
- // to an off-heap area in the case of embedded builtins.
- //
- // Embedded builtins are on-heap Code objects, with an out-of-line body
- // section. The on-heap Code object contains an essentially empty body
- // section, while accessors, as mentioned above, redirect to the off-heap
- // area. Metadata table offsets remain relative to MetadataStart(), i.e. they
- // point into the off-heap metadata section. The off-heap layout is described
- // in detail in the EmbeddedData class, but at a high level one can assume a
- // dedicated, out-of-line, instruction and metadata section for each embedded
- // builtin *in addition* to the on-heap Code object:
- //
- // +--------------------------+ <-- InstructionStart()
- // | off-heap instructions |
- // | ... |
- // +--------------------------+ <-- InstructionEnd()
- //
- // +--------------------------+ <-- MetadataStart() (MS)
- // | off-heap metadata |
- // | ... | <-- MS + handler_table_offset()
- // | | <-- MS + constant_pool_offset()
- // | | <-- MS + code_comments_offset()
- // | | <-- MS + unwinding_info_offset()
- // +--------------------------+ <-- MetadataEnd()
// Constants for use in static asserts, stating whether the body is adjacent,
// i.e. instructions and metadata areas are adjacent.
@@ -369,220 +585,18 @@ class Code : public HeapObject {
static constexpr bool kBodyIsContiguous =
kOnHeapBodyIsContiguous && kOffHeapBodyIsContiguous;
- inline Address raw_body_start() const;
- inline Address raw_body_end() const;
- inline int raw_body_size() const;
-
- inline Address raw_instruction_start() const;
- inline Address InstructionStart() const;
-
- inline Address raw_instruction_end() const;
- inline Address InstructionEnd() const;
-
- // When builtins un-embedding is enabled for the Isolate
- // (see Isolate::is_short_builtin_calls_enabled()) then both embedded and
- // un-embedded builtins might be exeuted and thus two kinds of |pc|s might
- // appear on the stack.
- // Unlike the paremeterless versions of the functions above the below variants
- // ensure that the instruction start correspond to the given |pc| value.
- // Thus for off-heap trampoline Code objects the result might be the
- // instruction start/end of the embedded code stream or of un-embedded one.
- // For normal Code objects these functions just return the
- // raw_instruction_start/end() values.
- // TODO(11527): remove these versions once the full solution is ready.
- inline Address InstructionStart(Isolate* isolate, Address pc) const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionStart(Isolate* isolate,
- Address pc) const;
- inline Address InstructionEnd(Isolate* isolate, Address pc) const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionEnd(Isolate* isolate,
- Address pc) const;
-
- V8_EXPORT_PRIVATE bool OffHeapBuiltinContains(Isolate* isolate,
- Address pc) const;
-
- // Computes offset of the |pc| from the instruction start. The |pc| must
- // belong to this code.
- inline int GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const;
-
- inline int raw_instruction_size() const;
- inline void set_raw_instruction_size(int value);
- inline int InstructionSize() const;
-
- inline Address raw_metadata_start() const;
- inline Address raw_metadata_end() const;
- inline int raw_metadata_size() const;
- inline void set_raw_metadata_size(int value);
- inline int MetadataSize() const;
+ inline Address instruction_start() const;
// The metadata section is aligned to this value.
static constexpr int kMetadataAlignment = kIntSize;
- // [safepoint_table_offset]: The offset where the safepoint table starts.
- inline int safepoint_table_offset() const { return 0; }
- inline Address raw_safepoint_table_address() const;
- inline Address SafepointTableAddress() const;
- inline int safepoint_table_size() const;
- inline bool has_safepoint_table() const;
-
- // [handler_table_offset]: The offset where the exception handler table
- // starts.
- inline int handler_table_offset() const;
- inline void set_handler_table_offset(int offset);
- inline Address raw_handler_table_address() const;
- inline Address HandlerTableAddress() const;
- inline int handler_table_size() const;
- inline bool has_handler_table() const;
+ // [code]: The associated Code object.
+ DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, HeapObject)
- // [constant_pool offset]: Offset of the constant pool.
- inline int constant_pool_offset() const;
- inline void set_constant_pool_offset(int offset);
- inline Address raw_constant_pool() const;
- inline Address constant_pool() const;
- inline int constant_pool_size() const;
- inline bool has_constant_pool() const;
-
- // [code_comments_offset]: Offset of the code comment section.
- inline int code_comments_offset() const;
- inline void set_code_comments_offset(int offset);
- inline Address raw_code_comments() const;
- inline Address code_comments() const;
- inline int code_comments_size() const;
- inline bool has_code_comments() const;
-
- // [unwinding_info_offset]: Offset of the unwinding info section.
- inline int32_t unwinding_info_offset() const;
- inline void set_unwinding_info_offset(int32_t offset);
- inline Address raw_unwinding_info_start() const;
- inline Address unwinding_info_start() const;
- inline Address unwinding_info_end() const;
- inline int unwinding_info_size() const;
- inline bool has_unwinding_info() const;
-
-#ifdef ENABLE_DISASSEMBLER
- V8_EXPORT_PRIVATE void Disassemble(const char* name, std::ostream& os,
- Isolate* isolate,
- Address current_pc = kNullAddress);
-#endif
-
- // [relocation_info]: Code relocation information
- DECL_ACCESSORS(relocation_info, ByteArray)
-
- // This function should be called only from GC.
- void ClearEmbeddedObjects(Heap* heap);
-
- // [deoptimization_data]: Array containing data for deopt for non-baseline
- // code.
- DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [bytecode_or_interpreter_data]: BytecodeArray or InterpreterData for
- // baseline code.
- DECL_ACCESSORS(bytecode_or_interpreter_data, HeapObject)
-
- // [source_position_table]: ByteArray for the source positions table for
- // non-baseline code.
- DECL_ACCESSORS(source_position_table, ByteArray)
- // [bytecode_offset_table]: ByteArray for the bytecode offset for baseline
- // code.
- DECL_ACCESSORS(bytecode_offset_table, ByteArray)
-
- // If source positions have not been collected or an exception has been thrown
- // this will return empty_byte_array.
- inline ByteArray SourcePositionTable(PtrComprCageBase cage_base,
- SharedFunctionInfo sfi) const;
-
- // [code_data_container]: A container indirection for all mutable fields.
- DECL_RELEASE_ACQUIRE_ACCESSORS(code_data_container, CodeDataContainer)
-
- // [next_code_link]: Link for lists of optimized or deoptimized code.
- // Note that this field is stored in the {CodeDataContainer} to be mutable.
- inline Object next_code_link() const;
- inline void set_next_code_link(Object value);
-
- // Unchecked accessors to be used during GC.
- inline ByteArray unchecked_relocation_info() const;
-
- inline int relocation_size() const;
-
- // [kind]: Access to specific code kind.
- inline CodeKind kind() const;
-
- inline bool is_optimized_code() const;
- inline bool is_wasm_code() const;
-
- // Testers for interpreter builtins.
- inline bool is_interpreter_trampoline_builtin() const;
-
- // Testers for baseline builtins.
- inline bool is_baseline_trampoline_builtin() const;
- inline bool is_baseline_leave_frame_builtin() const;
-
- // Tells whether the code checks the tiering state in the function's
- // feedback vector.
- inline bool checks_tiering_state() const;
-
- // Tells whether the outgoing parameters of this code are tagged pointers.
- inline bool has_tagged_outgoing_params() const;
-
- // [is_turbofanned]: Tells whether the code object was generated by the
- // TurboFan optimizing compiler.
- inline bool is_turbofanned() const;
-
- // TODO(jgruber): Reconsider these predicates; we should probably merge them
- // and rename to something appropriate.
- inline bool is_maglevved() const;
-
- // [can_have_weak_objects]: If CodeKindIsOptimizedJSFunction(kind), tells
- // whether the embedded objects in code should be treated weakly.
- inline bool can_have_weak_objects() const;
- inline void set_can_have_weak_objects(bool value);
-
- // [builtin]: For builtins, tells which builtin index the code object
- // has. The builtin index is a non-negative integer for builtins, and
- // Builtin::kNoBuiltinId (-1) otherwise.
- inline Builtin builtin_id() const;
- inline void set_builtin_id(Builtin builtin);
- inline bool is_builtin() const;
-
- inline unsigned inlined_bytecode_size() const;
- inline void set_inlined_bytecode_size(unsigned size);
-
- inline BytecodeOffset osr_offset() const;
- inline void set_osr_offset(BytecodeOffset offset);
-
- // [uses_safepoint_table]: Whether this Code object uses safepoint tables
- // (note the table may still be empty, see has_safepoint_table).
- inline bool uses_safepoint_table() const;
-
- // [stack_slots]: If {uses_safepoint_table()}, the number of stack slots
- // reserved in the code prologue; otherwise 0.
- inline int stack_slots() const;
-
- // [marked_for_deoptimization]: If CodeKindCanDeoptimize(kind), tells whether
- // the code is going to be deoptimized.
- inline bool marked_for_deoptimization() const;
- inline void set_marked_for_deoptimization(bool flag);
-
- // [embedded_objects_cleared]: If CodeKindIsOptimizedJSFunction(kind), tells
- // whether the embedded objects in the code marked for deoptimization were
- // cleared. Note that embedded_objects_cleared() implies
- // marked_for_deoptimization().
- inline bool embedded_objects_cleared() const;
- inline void set_embedded_objects_cleared(bool flag);
-
- // [is_promise_rejection]: For kind BUILTIN tells whether the
- // exception thrown by the code will lead to promise rejection or
- // uncaught if both this and is_exception_caught is set.
- // Use GetBuiltinCatchPrediction to access this.
- inline void set_is_promise_rejection(bool flag);
-
- // [is_off_heap_trampoline]: For kind BUILTIN tells whether
- // this is a trampoline to an off-heap builtin.
- inline bool is_off_heap_trampoline() const;
-
- // Get the safepoint entry for the given pc.
- SafepointEntry GetSafepointEntry(Isolate* isolate, Address pc);
-
- // Get the maglev safepoint entry for the given pc.
- MaglevSafepointEntry GetMaglevSafepointEntry(Isolate* isolate, Address pc);
+ // A convenience wrapper around raw_code that will do an unchecked cast for
+ // you.
+ inline Code unchecked_code() const;
// The entire code object including its header is copied verbatim to the
// snapshot so that it can be written in one, fast, memcpy during
@@ -593,40 +607,19 @@ class Code : public HeapObject {
// out the to-be-overwritten header data for reproducible snapshots.
inline void WipeOutHeader();
- // When V8_EXTERNAL_CODE_SPACE is enabled, Code objects are allocated in
- // a separate pointer compression cage instead of the cage where all the
- // other objects are allocated.
- // This field contains cage base value which is used for decompressing
- // the references to non-Code objects (map, deoptimization_data, etc.).
+ // When V8_EXTERNAL_CODE_SPACE is enabled, InstructionStream objects are
+ // allocated in a separate pointer compression cage instead of the cage where
+ // all the other objects are allocated. This field contains cage base value
+ // which is used for decompressing the references to non-InstructionStream
+ // objects (map, deoptimization_data, etc.).
inline PtrComprCageBase main_cage_base() const;
inline PtrComprCageBase main_cage_base(RelaxedLoadTag) const;
inline void set_main_cage_base(Address cage_base, RelaxedStoreTag);
- // Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic. Depending on the V8 build mode there could be no padding.
- inline void clear_padding();
- // Initialize the flags field. Similar to clear_padding above this ensure that
- // the snapshot content is deterministic.
- inline void initialize_flags(CodeKind kind, bool is_turbofanned,
- int stack_slots, bool is_off_heap_trampoline);
-
- // Convert a target address into a code object.
- static inline Code GetCodeFromTargetAddress(Address address);
-
- // Convert an entry address into an object.
- static inline Code GetObjectFromEntryAddress(Address location_of_address);
+ static inline InstructionStream FromTargetAddress(Address address);
+ static inline InstructionStream FromEntryAddress(Address location_of_address);
- // Returns the size of code and its metadata. This includes the size of code
- // relocation information, deoptimization data.
- DECL_GETTER(SizeIncludingMetadata, int)
-
- // Returns the address of the first relocation info (read backwards!).
- inline byte* relocation_start() const;
-
- // Returns the address right after the relocation info (read backwards!).
- inline byte* relocation_end() const;
-
- // Code entry point.
+ // InstructionStream entry point.
inline Address entry() const;
// Returns true if pc is inside this object's instructions.
@@ -636,36 +629,6 @@ class Code : public HeapObject {
// object has been moved by delta bytes.
void Relocate(intptr_t delta);
- // Migrate code from desc without flushing the instruction cache.
- void CopyFromNoFlush(ByteArray reloc_info, Heap* heap, const CodeDesc& desc);
- void RelocateFromDesc(ByteArray reloc_info, Heap* heap, const CodeDesc& desc);
-
- // Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be
- // exactly the same size as the RelocInfo in |desc|.
- static inline void CopyRelocInfoToByteArray(ByteArray dest,
- const CodeDesc& desc);
-
- inline uintptr_t GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
- BytecodeArray bytecodes);
-
- inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
- BytecodeArray bytecodes);
-
- // Returns the PC of the next bytecode in execution order.
- // If the bytecode at the given offset is JumpLoop, the PC of the jump target
- // is returned. Other jumps are not allowed.
- // For other bytecodes this is equivalent to
- // GetBaselineEndPCForBytecodeOffset.
- inline uintptr_t GetBaselinePCForNextExecutedBytecode(
- int bytecode_offset, BytecodeArray bytecodes);
-
- inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
- BytecodeArray bytecodes);
-
- // Flushes the instruction cache for the executable instructions of this code
- // object. Make sure to call this while the code is still writable.
- void FlushICache() const;
-
// Returns the object size for a given body (used for allocation).
static int SizeFor(int body_size) {
return RoundUp(kHeaderSize + body_size, kCodeAlignment);
@@ -676,133 +639,37 @@ class Code : public HeapObject {
// Hides HeapObject::Size(...) and redirects queries to CodeSize().
DECL_GETTER(Size, int)
- DECL_CAST(Code)
+ DECL_CAST(InstructionStream)
// Dispatched behavior.
- DECL_PRINTER(Code)
- DECL_VERIFIER(Code)
-
- bool CanDeoptAt(Isolate* isolate, Address pc);
-
- void SetMarkedForDeoptimization(const char* reason);
+ DECL_PRINTER(InstructionStream)
+ DECL_VERIFIER(InstructionStream)
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction() const;
- bool IsIsolateIndependent(Isolate* isolate);
-
- inline bool CanContainWeakObjects();
-
- inline bool IsWeakObject(HeapObject object);
-
- static inline bool IsWeakObjectInOptimizedCode(HeapObject object);
-
- static inline bool IsWeakObjectInDeoptimizationLiteralArray(Object object);
-
- // Returns false if this is an embedded builtin Code object that's in
- // read_only_space and hence doesn't have execute permissions.
- inline bool IsExecutable();
-
- // Returns true if the function is inlined in the code.
- bool Inlines(SharedFunctionInfo sfi);
-
- class OptimizedCodeIterator;
-
// Layout description.
-#define CODE_FIELDS(V) \
- V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOrInterpreterDataOffset, kTaggedSize) \
- V(kPositionTableOffset, kTaggedSize) \
- V(kCodeDataContainerOffset, kTaggedSize) \
- /* Data or code not directly visited by GC directly starts here. */ \
- /* The serializer needs to copy bytes starting from here verbatim. */ \
- /* Objects embedded into code is visited via reloc info. */ \
- V(kDataStart, 0) \
- V(kMainCageBaseUpper32BitsOffset, \
- V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
- V(kInstructionSizeOffset, kIntSize) \
- V(kMetadataSizeOffset, kIntSize) \
- V(kFlagsOffset, kInt32Size) \
- V(kBuiltinIndexOffset, kIntSize) \
- V(kInlinedBytecodeSizeOffset, kIntSize) \
- V(kOsrOffsetOffset, kInt32Size) \
- /* Offsets describing inline metadata tables, relative to MetadataStart. */ \
- V(kHandlerTableOffsetOffset, kIntSize) \
- V(kConstantPoolOffsetOffset, V8_EMBEDDED_CONSTANT_POOL_BOOL ? kIntSize : 0) \
- V(kCodeCommentsOffsetOffset, kIntSize) \
- V(kUnwindingInfoOffsetOffset, kInt32Size) \
- V(kUnalignedHeaderSize, 0) \
- /* Add padding to align the instruction start following right after */ \
- /* the Code object header. */ \
- V(kOptionalPaddingOffset, CODE_POINTER_PADDING(kOptionalPaddingOffset)) \
+#define ISTREAM_FIELDS(V) \
+ V(kCodeOffset, kTaggedSize) \
+ /* Data or code not directly visited by GC directly starts here. */ \
+ V(kDataStart, 0) \
+ V(kMainCageBaseUpper32BitsOffset, \
+ V8_EXTERNAL_CODE_SPACE_BOOL ? kTaggedSize : 0) \
V(kHeaderSize, 0)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
-#undef CODE_FIELDS
-
- // This documents the amount of free space we have in each Code object header
- // due to padding for code alignment.
-#if V8_TARGET_ARCH_ARM64
- static constexpr int kHeaderPaddingSize =
- V8_EXTERNAL_CODE_SPACE_BOOL ? 4 : (COMPRESS_POINTERS_BOOL ? 8 : 20);
-#elif V8_TARGET_ARCH_MIPS64
- static constexpr int kHeaderPaddingSize = 20;
-#elif V8_TARGET_ARCH_LOONG64
- static constexpr int kHeaderPaddingSize = 20;
-#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize =
- V8_EXTERNAL_CODE_SPACE_BOOL ? 4 : (COMPRESS_POINTERS_BOOL ? 8 : 52);
-#elif V8_TARGET_ARCH_ARM
- static constexpr int kHeaderPaddingSize = 8;
-#elif V8_TARGET_ARCH_IA32
- static constexpr int kHeaderPaddingSize = 8;
-#elif V8_TARGET_ARCH_MIPS
- static constexpr int kHeaderPaddingSize = 8;
-#elif V8_TARGET_ARCH_PPC64
- static constexpr int kHeaderPaddingSize =
- V8_EMBEDDED_CONSTANT_POOL_BOOL ? (COMPRESS_POINTERS_BOOL ? 4 : 48)
- : (COMPRESS_POINTERS_BOOL ? 8 : 52);
-#elif V8_TARGET_ARCH_S390X
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 8 : 20;
-#elif V8_TARGET_ARCH_RISCV64
- static constexpr int kHeaderPaddingSize = (COMPRESS_POINTERS_BOOL ? 8 : 20);
-#elif V8_TARGET_ARCH_RISCV32
- static constexpr int kHeaderPaddingSize = 8;
-#else
-#error Unknown architecture.
-#endif
- static_assert(FIELD_SIZE(kOptionalPaddingOffset) == kHeaderPaddingSize);
-
- class BodyDescriptor;
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ISTREAM_FIELDS)
+#undef ISTREAM_FIELDS
- // Flags layout. base::BitField<type, shift, size>.
-#define CODE_FLAGS_BIT_FIELDS(V, _) \
- V(KindField, CodeKind, 4, _) \
- V(IsTurbofannedField, bool, 1, _) \
- V(StackSlotsField, int, 24, _) \
- V(IsOffHeapTrampoline, bool, 1, _)
- DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
-#undef CODE_FLAGS_BIT_FIELDS
- static_assert(kCodeKindCount <= KindField::kNumValues);
- static_assert(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 30);
- static_assert(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
- FIELD_SIZE(kFlagsOffset) * kBitsPerByte);
+ static_assert(kCodeAlignment > kHeaderSize);
+ // We do two things to ensure kCodeAlignment of the entry address:
+ // 1) add kCodeAlignmentMinusCodeHeader padding once in the beginning of every
+ // MemoryChunk
+ // 2) Round up all IStream allocations to a multiple of kCodeAlignment
+ // Together, the IStream object itself will always start at offset
+ // kCodeAlignmentMinusCodeHeader, which aligns the entry to kCodeAlignment.
+ static constexpr int kCodeAlignmentMinusCodeHeader =
+ kCodeAlignment - kHeaderSize;
- // KindSpecificFlags layout.
-#define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
- V(MarkedForDeoptimizationField, bool, 1, _) \
- V(EmbeddedObjectsClearedField, bool, 1, _) \
- V(CanHaveWeakObjectsField, bool, 1, _) \
- V(IsPromiseRejectionField, bool, 1, _)
- DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
-#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
- static_assert(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 4);
- static_assert(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
- FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) *
- kBitsPerByte);
-
- // The {marked_for_deoptimization} field is accessed from generated code.
- static const int kMarkedForDeoptimizationBit =
- MarkedForDeoptimizationField::kShift;
+ class BodyDescriptor;
static const int kArgumentsBits = 16;
// Reserve one argument count value as the "don't adapt arguments" sentinel.
@@ -812,182 +679,11 @@ class Code : public HeapObject {
friend class RelocIterator;
friend class EvacuateVisitorBase;
- inline CodeDataContainer GCSafeCodeDataContainer(AcquireLoadTag) const;
+ inline Code GCSafeCode(AcquireLoadTag) const;
bool is_promise_rejection() const;
- enum BytecodeToPCPosition {
- kPcAtStartOfBytecode,
- // End of bytecode equals the start of the next bytecode.
- // We need it when we deoptimize to the next bytecode (lazy deopt or deopt
- // of non-topmost frame).
- kPcAtEndOfBytecode
- };
- inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
- BytecodeToPCPosition position,
- BytecodeArray bytecodes);
-
- OBJECT_CONSTRUCTORS(Code, HeapObject);
-};
-
-// TODO(v8:11880): move these functions to CodeDataContainer once they are no
-// longer used from Code.
-V8_EXPORT_PRIVATE Address OffHeapInstructionStart(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE Address OffHeapInstructionEnd(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapInstructionSize(HeapObject code, Builtin builtin);
-
-V8_EXPORT_PRIVATE Address OffHeapMetadataStart(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE Address OffHeapMetadataEnd(HeapObject code, Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapMetadataSize(HeapObject code, Builtin builtin);
-
-V8_EXPORT_PRIVATE Address OffHeapSafepointTableAddress(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapSafepointTableSize(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE Address OffHeapHandlerTableAddress(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapHandlerTableSize(HeapObject code, Builtin builtin);
-V8_EXPORT_PRIVATE Address OffHeapConstantPoolAddress(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapConstantPoolSize(HeapObject code, Builtin builtin);
-V8_EXPORT_PRIVATE Address OffHeapCodeCommentsAddress(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapCodeCommentsSize(HeapObject code, Builtin builtin);
-V8_EXPORT_PRIVATE Address OffHeapUnwindingInfoAddress(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapUnwindingInfoSize(HeapObject code,
- Builtin builtin);
-V8_EXPORT_PRIVATE int OffHeapStackSlots(HeapObject code, Builtin builtin);
-
-// Represents result of the code by inner address (or pc) lookup.
-// When V8_EXTERNAL_CODE_SPACE is disabled there might be two variants:
-// - the pc does not correspond to any known code and IsFound() will return
-// false,
-// - the pc corresponds to existing Code object or embedded builtin (in which
-// case the code() will return the respective Code object or the trampoline
-// Code object that corresponds to the builtin).
-//
-// When V8_EXTERNAL_CODE_SPACE is enabled there might be three variants:
-// - the pc does not correspond to any known code (in which case IsFound()
-// will return false),
-// - the pc corresponds to existing Code object (in which case the code() will
-// return the respective Code object),
-// - the pc corresponds to an embedded builtin (in which case the
-// code_data_container() will return CodeDataContainer object corresponding
-// to the builtin).
-class CodeLookupResult {
- public:
- // Not found.
- CodeLookupResult() = default;
-
- // Code object was found.
- explicit CodeLookupResult(Code code) : code_(code) {}
-
-#ifdef V8_EXTERNAL_CODE_SPACE
- // Embedded builtin was found.
- explicit CodeLookupResult(CodeDataContainer code_data_container)
- : code_data_container_(code_data_container) {}
-#endif
-
- // Returns true if the lookup was successful.
- bool IsFound() const { return IsCode() || IsCodeDataContainer(); }
-
- // Returns true if the lookup found a Code object.
- bool IsCode() const { return !code_.is_null(); }
-
- // Returns true if V8_EXTERNAL_CODE_SPACE is enabled and the lookup found
- // an embedded builtin.
- bool IsCodeDataContainer() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return !code_data_container_.is_null();
-#else
- return false;
-#endif
- }
-
- // Returns the Code object containing the address in question.
- Code code() const {
- DCHECK(IsCode());
- return code_;
- }
-
- // Returns the CodeDataContainer object corresponding to an embedded builtin
- // containing the address in question.
- // Can be used only when V8_EXTERNAL_CODE_SPACE is enabled.
- CodeDataContainer code_data_container() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK(IsCodeDataContainer());
- return code_data_container_;
-#else
- UNREACHABLE();
-#endif
- }
-
- // Returns the CodeT object corresponding to the result in question.
- // The method doesn't try to convert Code result to CodeT, one should use
- // ToCodeT() instead if the conversion logic is required.
- CodeT codet() const {
-#ifdef V8_EXTERNAL_CODE_SPACE
- return code_data_container();
-#else
- return code();
-#endif
- }
-
- // Helper methods, in case of successful lookup return the result of
- // respective accessor of the Code/CodeDataContainer object found.
- // It's safe use them from GC.
- inline CodeKind kind() const;
- inline Builtin builtin_id() const;
- inline bool has_tagged_outgoing_params() const;
- inline bool has_handler_table() const;
- inline bool is_baseline_trampoline_builtin() const;
- inline bool is_interpreter_trampoline_builtin() const;
- inline bool is_baseline_leave_frame_builtin() const;
- inline bool is_maglevved() const;
- inline bool is_turbofanned() const;
- inline bool is_optimized_code() const;
- inline int stack_slots() const;
- inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction() const;
-
- inline int GetOffsetFromInstructionStart(Isolate* isolate, Address pc) const;
-
- inline SafepointEntry GetSafepointEntry(Isolate* isolate, Address pc) const;
- inline MaglevSafepointEntry GetMaglevSafepointEntry(Isolate* isolate,
- Address pc) const;
-
- // Helper method, coverts the successful lookup result to AbstractCode object.
- inline AbstractCode ToAbstractCode() const;
-
- // Helper method, coverts the successful lookup result to Code object.
- // It's not safe to be used from GC because conversion to Code might perform
- // a map check.
- inline Code ToCode() const;
-
- // Helper method, coverts the successful lookup result to CodeT object.
- // It's not safe to be used from GC because conversion to CodeT might perform
- // a map check.
- inline CodeT ToCodeT() const;
-
- bool operator==(const CodeLookupResult& other) const {
- return code_ == other.code_
-#ifdef V8_EXTERNAL_CODE_SPACE
- && code_data_container_ == other.code_data_container_
-#endif
- ; // NOLINT(whitespace/semicolon)
- }
- bool operator!=(const CodeLookupResult& other) const {
- return !operator==(other);
- }
-
- private:
- Code code_;
-#ifdef V8_EXTERNAL_CODE_SPACE
- CodeDataContainer code_data_container_;
-#endif
+ OBJECT_CONSTRUCTORS(InstructionStream, HeapObject);
};
class Code::OptimizedCodeIterator {
@@ -998,57 +694,33 @@ class Code::OptimizedCodeIterator {
Code Next();
private:
- NativeContext next_context_;
- Code current_code_;
Isolate* isolate_;
+ std::unique_ptr<SafepointScope> safepoint_scope_;
+ std::unique_ptr<ObjectIterator> object_iterator_;
+ enum { kIteratingCodeSpace, kIteratingCodeLOSpace, kDone } state_;
DISALLOW_GARBAGE_COLLECTION(no_gc)
};
-// Helper functions for converting Code objects to CodeDataContainer and back
-// when V8_EXTERNAL_CODE_SPACE is enabled.
-inline CodeT ToCodeT(Code code);
-inline Handle<CodeT> ToCodeT(Handle<Code> code, Isolate* isolate);
-inline Code FromCodeT(CodeT code);
-inline Code FromCodeT(CodeT code, Isolate* isolate, RelaxedLoadTag);
-inline Code FromCodeT(CodeT code, PtrComprCageBase, RelaxedLoadTag);
-inline Handle<Code> FromCodeT(Handle<CodeT> code, Isolate* isolate);
-inline AbstractCode ToAbstractCode(CodeT code);
-inline Handle<AbstractCode> ToAbstractCode(Handle<CodeT> code,
- Isolate* isolate);
-inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code);
-
-// AbsractCode is a helper wrapper around {Code|CodeDataContainer|BytecodeArray}
-// when V8_EXTERNAL_CODE_SPACE is enabled or {Code|BytecodeArray} otherwise.
-// Note that when V8_EXTERNAL_CODE_SPACE is enabled then the same abstract code
-// can be represented either by Code object or by respective CodeDataContainer
-// object.
+// Helper functions for converting InstructionStream objects to
+// Code and back.
+inline Code ToCode(InstructionStream code);
+inline InstructionStream FromCode(Code code);
+inline InstructionStream FromCode(Code code, Isolate* isolate, RelaxedLoadTag);
+inline InstructionStream FromCode(Code code, PtrComprCageBase, RelaxedLoadTag);
+
+// AbstractCode is a helper wrapper around {Code|BytecodeArray}.
class AbstractCode : public HeapObject {
public:
- NEVER_READ_ONLY_SPACE
-
int SourcePosition(PtrComprCageBase cage_base, int offset);
int SourceStatementPosition(PtrComprCageBase cage_base, int offset);
- // Returns the address of the first instruction. For off-heap code objects
- // this differs from instruction_start (which would point to the off-heap
- // trampoline instead).
inline Address InstructionStart(PtrComprCageBase cage_base);
-
- // Returns the address right after the last instruction. For off-heap code
- // objects this differs from instruction_end (which would point to the
- // off-heap trampoline instead).
inline Address InstructionEnd(PtrComprCageBase cage_base);
-
- // Returns the size of the native instructions, including embedded
- // data such as the safepoints table. For off-heap code objects
- // this may differ from instruction_size in that this will return the size of
- // the off-heap instruction stream rather than the on-heap trampoline located
- // at instruction_start.
inline int InstructionSize(PtrComprCageBase cage_base);
// Return the source position table for interpreter code.
- inline ByteArray SourcePositionTable(PtrComprCageBase cage_base,
+ inline ByteArray SourcePositionTable(Isolate* isolate,
SharedFunctionInfo sfi);
void DropStackFrameCache(PtrComprCageBase cage_base);
@@ -1064,51 +736,32 @@ class AbstractCode : public HeapObject {
inline Builtin builtin_id(PtrComprCageBase cage_base);
- inline bool is_off_heap_trampoline(PtrComprCageBase cage_base);
+ inline bool has_instruction_stream(PtrComprCageBase cage_base);
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction(
PtrComprCageBase cage_base);
DECL_CAST(AbstractCode)
- // The following predicates don't have the parameterless versions on
- // purpose - in order to avoid the expensive cage base computation that
- // should work for both regular V8 heap objects and external code space
- // objects.
inline bool IsCode(PtrComprCageBase cage_base) const;
- inline bool IsCodeT(PtrComprCageBase cage_base) const;
inline bool IsBytecodeArray(PtrComprCageBase cage_base) const;
- inline Code ToCode(PtrComprCageBase cage_base);
- inline CodeT ToCodeT(PtrComprCageBase cage_base);
-
inline Code GetCode();
- inline CodeT GetCodeT();
inline BytecodeArray GetBytecodeArray();
- // AbstractCode might be represented by both Code and non-Code objects and
- // thus regular comparison of tagged values might not be correct when
- // V8_EXTERNAL_CODE_SPACE is enabled. SafeEquals() must be used instead.
- constexpr bool operator==(AbstractCode other) const {
- return SafeEquals(other);
- }
- constexpr bool operator!=(AbstractCode other) const {
- return !SafeEquals(other);
- }
-
private:
inline ByteArray SourcePositionTableInternal(PtrComprCageBase cage_base);
OBJECT_CONSTRUCTORS(AbstractCode, HeapObject);
};
-// Dependent code is conceptually the list of {Code, DependencyGroup} tuples
-// associated with an object, where the dependency group is a reason that could
-// lead to a deopt of the corresponding code.
+// Dependent code is conceptually the list of {InstructionStream,
+// DependencyGroup} tuples associated with an object, where the dependency group
+// is a reason that could lead to a deopt of the corresponding code.
//
// Implementation details: DependentCode is a weak array list containing
-// entries, where each entry consists of a (weak) Code object and the
-// DependencyGroups bitset as a Smi.
+// entries, where each entry consists of a (weak) InstructionStream object and
+// the DependencyGroups bitset as a Smi.
//
// Note the underlying weak array list currently never shrinks physically (the
// contents may shrink).
@@ -1166,7 +819,7 @@ class DependentCode : public WeakArrayList {
DependencyGroups groups);
template <typename ObjectT>
- static bool MarkCodeForDeoptimization(ObjectT object,
+ static bool MarkCodeForDeoptimization(Isolate* isolate, ObjectT object,
DependencyGroups groups);
V8_EXPORT_PRIVATE static DependentCode empty_dependent_code(
@@ -1175,7 +828,8 @@ class DependentCode : public WeakArrayList {
RootIndex::kEmptyWeakArrayList;
// Constants exposed for tests.
- static constexpr int kSlotsPerEntry = 2; // {code: weak Code, groups: Smi}.
+ static constexpr int kSlotsPerEntry =
+ 2; // {code: weak InstructionStream, groups: Smi}.
static constexpr int kCodeSlotOffset = 0;
static constexpr int kGroupsSlotOffset = 1;
@@ -1185,20 +839,19 @@ class DependentCode : public WeakArrayList {
static void SetDependentCode(Handle<HeapObject> object,
Handle<DependentCode> dep);
- static Handle<DependentCode> New(Isolate* isolate, DependencyGroups groups,
- Handle<Code> code);
static Handle<DependentCode> InsertWeakCode(Isolate* isolate,
Handle<DependentCode> entries,
DependencyGroups groups,
Handle<Code> code);
- bool MarkCodeForDeoptimization(DependencyGroups deopt_groups);
+ bool MarkCodeForDeoptimization(Isolate* isolate,
+ DependencyGroups deopt_groups);
void DeoptimizeDependencyGroups(Isolate* isolate, DependencyGroups groups);
// The callback is called for all non-cleared entries, and should return true
// iff the current entry should be cleared.
- using IterateAndCompactFn = std::function<bool(CodeT, DependencyGroups)>;
+ using IterateAndCompactFn = std::function<bool(Code, DependencyGroups)>;
void IterateAndCompact(const IterateAndCompactFn& fn);
// Fills the given entry with the last non-cleared entry in this list, and
@@ -1256,6 +909,11 @@ class BytecodeArray
// this will return empty_byte_array.
DECL_GETTER(SourcePositionTable, ByteArray)
+ // Raw accessors to access these fields during code cache deserialization.
+ DECL_GETTER(raw_constant_pool, Object)
+ DECL_GETTER(raw_handler_table, Object)
+ DECL_GETTER(raw_source_position_table, Object)
+
// Indicates that an attempt was made to collect source positions, but that it
// failed most likely due to stack exhaustion. When in this state
// |SourcePositionTable| will return an empty byte array rather than crashing
@@ -1311,7 +969,7 @@ class DeoptimizationLiteralArray : public WeakFixedArray {
inline Object get(PtrComprCageBase cage_base, int index) const;
// Setter for literals. This will set the object as strong or weak depending
- // on Code::IsWeakObjectInOptimizedCode.
+ // on InstructionStream::IsWeakObjectInOptimizedCode.
inline void set(int index, Object value);
DECL_CAST(DeoptimizationLiteralArray)
diff --git a/deps/v8/src/objects/code.tq b/deps/v8/src/objects/code.tq
index 86e33add74..af6da378b6 100644
--- a/deps/v8/src/objects/code.tq
+++ b/deps/v8/src/objects/code.tq
@@ -26,4 +26,4 @@ extern class BytecodeArray extends FixedArrayBase {
bytecode_age: uint16;
}
-extern class CodeDataContainer extends HeapObject;
+extern class Code extends HeapObject;
diff --git a/deps/v8/src/objects/compilation-cache-table.cc b/deps/v8/src/objects/compilation-cache-table.cc
index 0377c92888..1c3b154310 100644
--- a/deps/v8/src/objects/compilation-cache-table.cc
+++ b/deps/v8/src/objects/compilation-cache-table.cc
@@ -212,8 +212,8 @@ class RegExpKey : public HashTableKey {
Smi flags_;
};
-// CodeKey carries the SharedFunctionInfo key associated with a Code
-// object value.
+// CodeKey carries the SharedFunctionInfo key associated with a
+// Code object value.
class CodeKey : public HashTableKey {
public:
explicit CodeKey(Handle<SharedFunctionInfo> key)
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index 1f1f0530be..4a4e9c8a2d 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -35,16 +35,16 @@ bool CompressedObjectSlot::contains_map_value(Address raw_value) const {
Object CompressedObjectSlot::operator*() const {
Tagged_t value = *location();
- return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
+ return Object(TCompressionScheme::DecompressTagged(address(), value));
}
Object CompressedObjectSlot::load(PtrComprCageBase cage_base) const {
Tagged_t value = *location();
- return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value));
+ return Object(TCompressionScheme::DecompressTagged(cage_base, value));
}
void CompressedObjectSlot::store(Object value) const {
- *location() = TCompressionScheme::CompressTagged(value.ptr());
+ *location() = TCompressionScheme::CompressObject(value.ptr());
}
void CompressedObjectSlot::store_map(Map map) const {
@@ -63,36 +63,36 @@ Map CompressedObjectSlot::load_map() const {
Object CompressedObjectSlot::Acquire_Load() const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
- return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
+ return Object(TCompressionScheme::DecompressTagged(address(), value));
}
Object CompressedObjectSlot::Relaxed_Load() const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return Object(TCompressionScheme::DecompressTaggedAny(address(), value));
+ return Object(TCompressionScheme::DecompressTagged(address(), value));
}
Object CompressedObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value));
+ return Object(TCompressionScheme::DecompressTagged(cage_base, value));
}
void CompressedObjectSlot::Relaxed_Store(Object value) const {
- Tagged_t ptr = TCompressionScheme::CompressTagged(value.ptr());
+ Tagged_t ptr = TCompressionScheme::CompressObject(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
void CompressedObjectSlot::Release_Store(Object value) const {
- Tagged_t ptr = TCompressionScheme::CompressTagged(value.ptr());
+ Tagged_t ptr = TCompressionScheme::CompressObject(value.ptr());
AsAtomicTagged::Release_Store(location(), ptr);
}
Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
Object target) const {
- Tagged_t old_ptr = TCompressionScheme::CompressTagged(old.ptr());
- Tagged_t target_ptr = TCompressionScheme::CompressTagged(target.ptr());
+ Tagged_t old_ptr = TCompressionScheme::CompressObject(old.ptr());
+ Tagged_t target_ptr = TCompressionScheme::CompressObject(target.ptr());
Tagged_t result =
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
- return Object(TCompressionScheme::DecompressTaggedAny(address(), result));
+ return Object(TCompressionScheme::DecompressTagged(address(), result));
}
//
@@ -101,38 +101,38 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
MaybeObject CompressedMaybeObjectSlot::operator*() const {
Tagged_t value = *location();
- return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value));
+ return MaybeObject(TCompressionScheme::DecompressTagged(address(), value));
}
MaybeObject CompressedMaybeObjectSlot::load(PtrComprCageBase cage_base) const {
Tagged_t value = *location();
- return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value));
+ return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value));
}
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
- *location() = TCompressionScheme::CompressTagged(value.ptr());
+ *location() = TCompressionScheme::CompressAny(value.ptr());
}
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value));
+ return MaybeObject(TCompressionScheme::DecompressTagged(address(), value));
}
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value));
+ return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value));
}
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
- Tagged_t ptr = TCompressionScheme::CompressTagged(value.ptr());
+ Tagged_t ptr = TCompressionScheme::CompressAny(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
void CompressedMaybeObjectSlot::Release_CompareAndSwap(
MaybeObject old, MaybeObject target) const {
- Tagged_t old_ptr = TCompressionScheme::CompressTagged(old.ptr());
- Tagged_t target_ptr = TCompressionScheme::CompressTagged(target.ptr());
+ Tagged_t old_ptr = TCompressionScheme::CompressAny(old.ptr());
+ Tagged_t target_ptr = TCompressionScheme::CompressAny(target.ptr());
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
}
@@ -143,29 +143,29 @@ void CompressedMaybeObjectSlot::Release_CompareAndSwap(
HeapObjectReference CompressedHeapObjectSlot::operator*() const {
Tagged_t value = *location();
return HeapObjectReference(
- TCompressionScheme::DecompressTaggedPointer(address(), value));
+ TCompressionScheme::DecompressTagged(address(), value));
}
HeapObjectReference CompressedHeapObjectSlot::load(
PtrComprCageBase cage_base) const {
Tagged_t value = *location();
return HeapObjectReference(
- TCompressionScheme::DecompressTaggedPointer(cage_base, value));
+ TCompressionScheme::DecompressTagged(cage_base, value));
}
void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
- *location() = TCompressionScheme::CompressTagged(value.ptr());
+ *location() = TCompressionScheme::CompressObject(value.ptr());
}
HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
Tagged_t value = *location();
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value));
return HeapObject::cast(
- Object(TCompressionScheme::DecompressTaggedPointer(address(), value)));
+ Object(TCompressionScheme::DecompressTagged(address(), value)));
}
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
- *location() = TCompressionScheme::CompressTagged(value.ptr());
+ *location() = TCompressionScheme::CompressObject(value.ptr());
}
//
@@ -176,47 +176,47 @@ template <typename CompressionScheme>
Object OffHeapCompressedObjectSlot<CompressionScheme>::load(
PtrComprCageBase cage_base) const {
Tagged_t value = *TSlotBase::location();
- return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
+ return Object(CompressionScheme::DecompressTagged(cage_base, value));
}
template <typename CompressionScheme>
void OffHeapCompressedObjectSlot<CompressionScheme>::store(Object value) const {
- *TSlotBase::location() = CompressionScheme::CompressTagged(value.ptr());
+ *TSlotBase::location() = CompressionScheme::CompressObject(value.ptr());
}
template <typename CompressionScheme>
Object OffHeapCompressedObjectSlot<CompressionScheme>::Relaxed_Load(
PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(TSlotBase::location());
- return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
+ return Object(CompressionScheme::DecompressTagged(cage_base, value));
}
template <typename CompressionScheme>
Object OffHeapCompressedObjectSlot<CompressionScheme>::Acquire_Load(
PtrComprCageBase cage_base) const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(TSlotBase::location());
- return Object(CompressionScheme::DecompressTaggedAny(cage_base, value));
+ return Object(CompressionScheme::DecompressTagged(cage_base, value));
}
template <typename CompressionScheme>
void OffHeapCompressedObjectSlot<CompressionScheme>::Relaxed_Store(
Object value) const {
- Tagged_t ptr = CompressionScheme::CompressTagged(value.ptr());
+ Tagged_t ptr = CompressionScheme::CompressObject(value.ptr());
AsAtomicTagged::Relaxed_Store(TSlotBase::location(), ptr);
}
template <typename CompressionScheme>
void OffHeapCompressedObjectSlot<CompressionScheme>::Release_Store(
Object value) const {
- Tagged_t ptr = CompressionScheme::CompressTagged(value.ptr());
+ Tagged_t ptr = CompressionScheme::CompressObject(value.ptr());
AsAtomicTagged::Release_Store(TSlotBase::location(), ptr);
}
template <typename CompressionScheme>
void OffHeapCompressedObjectSlot<CompressionScheme>::Release_CompareAndSwap(
Object old, Object target) const {
- Tagged_t old_ptr = CompressionScheme::CompressTagged(old.ptr());
- Tagged_t target_ptr = CompressionScheme::CompressTagged(target.ptr());
+ Tagged_t old_ptr = CompressionScheme::CompressObject(old.ptr());
+ Tagged_t target_ptr = CompressionScheme::CompressObject(target.ptr());
AsAtomicTagged::Release_CompareAndSwap(TSlotBase::location(), old_ptr,
target_ptr);
}
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 8f552a13e9..4fa5ec9030 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -287,7 +287,7 @@ Map NativeContext::TypedArrayElementsKindToCtorMap(
ElementsKind::FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
Map map = Map::cast(JSFunction::cast(get(ctor_index)).initial_map());
DCHECK_EQ(map.elements_kind(), element_kind);
- DCHECK(InstanceTypeChecker::IsJSTypedArray(map.instance_type()));
+ DCHECK(InstanceTypeChecker::IsJSTypedArray(map));
return map;
}
@@ -299,26 +299,10 @@ Map NativeContext::TypedArrayElementsKindToRabGsabCtorMap(
Map map = Map::cast(get(ctor_index));
DCHECK_EQ(map.elements_kind(),
GetCorrespondingRabGsabElementsKind(element_kind));
- DCHECK(InstanceTypeChecker::IsJSTypedArray(map.instance_type()));
+ DCHECK(InstanceTypeChecker::IsJSTypedArray(map));
return map;
}
-void NativeContext::SetOptimizedCodeListHead(Object head) {
- set(OPTIMIZED_CODE_LIST, head, UPDATE_WRITE_BARRIER, kReleaseStore);
-}
-
-Object NativeContext::OptimizedCodeListHead() {
- return get(OPTIMIZED_CODE_LIST);
-}
-
-void NativeContext::SetDeoptimizedCodeListHead(Object head) {
- set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WRITE_BARRIER, kReleaseStore);
-}
-
-Object NativeContext::DeoptimizedCodeListHead() {
- return get(DEOPTIMIZED_CODE_LIST);
-}
-
OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context)
} // namespace internal
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index f7661d0de6..6ff730cd7d 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -396,18 +396,6 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
}
}
- // Check blocklist. Names that are listed, cannot be resolved further.
- ScopeInfo scope_info = context->scope_info();
- CHECK_IMPLIES(v8_flags.experimental_reuse_locals_blocklists,
- !scope_info.HasLocalsBlockList());
- if (scope_info.HasLocalsBlockList() &&
- scope_info.LocalsBlockList().Has(isolate, name)) {
- if (v8_flags.trace_contexts) {
- PrintF(" - name is blocklisted. Aborting.\n");
- }
- break;
- }
-
// Check the original context, but do not follow its context chain.
Object obj = context->get(WRAPPED_CONTEXT_INDEX);
if (obj.IsContext()) {
@@ -428,8 +416,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
// Note that this implicitly skips the block list check for the
// "wrapped" context lookup for DebugEvaluateContexts. In that case
// `has_seen_debug_evaluate_context` will always be false.
- if (v8_flags.experimental_reuse_locals_blocklists &&
- has_seen_debug_evaluate_context &&
+ if (has_seen_debug_evaluate_context &&
isolate->heap()->locals_block_list_cache().IsEphemeronHashTable()) {
Handle<ScopeInfo> scope_info = handle(context->scope_info(), isolate);
Object maybe_outer_block_list =
@@ -456,13 +443,6 @@ bool NativeContext::HasTemplateLiteralObject(JSArray array) {
return array.map() == js_array_template_literal_object_map();
}
-void NativeContext::AddOptimizedCode(CodeT code) {
- DCHECK(CodeKindCanDeoptimize(code.kind()));
- DCHECK(code.next_code_link().IsUndefined());
- code.set_next_code_link(OptimizedCodeListHead());
- set(OPTIMIZED_CODE_LIST, code, UPDATE_WRITE_BARRIER, kReleaseStore);
-}
-
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Isolate* isolate = GetIsolate();
Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
@@ -541,7 +521,7 @@ void Context::VerifyExtensionSlot(HeapObject extension) {
void Context::set_extension(HeapObject object, WriteBarrierMode mode) {
DCHECK(scope_info().HasContextExtensionSlot());
#ifdef VERIFY_HEAP
- VerifyExtensionSlot(object);
+ if (v8_flags.verify_heap) VerifyExtensionSlot(object);
#endif
set(EXTENSION_INDEX, object, mode);
}
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 7e1e715038..0b4760e1a4 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -169,6 +169,12 @@ enum ContextLookupFlags {
V(INTL_SEGMENTER_FUNCTION_INDEX, JSFunction, intl_segmenter_function) \
V(INTL_SEGMENTS_MAP_INDEX, Map, intl_segments_map) \
V(INTL_SEGMENT_ITERATOR_MAP_INDEX, Map, intl_segment_iterator_map) \
+ V(ITERATOR_FILTER_HELPER_MAP_INDEX, Map, iterator_filter_helper_map) \
+ V(ITERATOR_MAP_HELPER_MAP_INDEX, Map, iterator_map_helper_map) \
+ V(ITERATOR_TAKE_HELPER_MAP_INDEX, Map, iterator_take_helper_map) \
+ V(ITERATOR_DROP_HELPER_MAP_INDEX, Map, iterator_drop_helper_map) \
+ V(ITERATOR_FUNCTION_INDEX, JSFunction, iterator_function) \
+ V(VALID_ITERATOR_WRAPPER_MAP_INDEX, Map, valid_iterator_wrapper_map) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
js_array_packed_smi_elements_map) \
@@ -230,6 +236,7 @@ enum ContextLookupFlags {
V(CATCH_CONTEXT_MAP_INDEX, Map, catch_context_map) \
V(WITH_CONTEXT_MAP_INDEX, Map, with_context_map) \
V(DEBUG_EVALUATE_CONTEXT_MAP_INDEX, Map, debug_evaluate_context_map) \
+ V(JS_RAB_GSAB_DATA_VIEW_MAP_INDEX, Map, js_rab_gsab_data_view_map) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \
V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \
@@ -549,13 +556,11 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
- OPTIMIZED_CODE_LIST, // Weak.
- DEOPTIMIZED_CODE_LIST, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
+ NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
- FIRST_WEAK_SLOT = OPTIMIZED_CODE_LIST,
+ FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK,
FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX,
// TODO(shell): Remove, once it becomes zero
@@ -780,14 +785,6 @@ class NativeContext : public Context {
class BodyDescriptor;
- // The native context stores a list of all optimized code and a list of all
- // deoptimized code, which are needed by the deoptimizer.
- V8_EXPORT_PRIVATE void AddOptimizedCode(CodeT code);
- inline void SetOptimizedCodeListHead(Object head);
- inline Object OptimizedCodeListHead();
- inline void SetDeoptimizedCodeListHead(Object head);
- inline Object DeoptimizedCodeListHead();
-
void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index d36c4c2c68..40238d455b 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -101,6 +101,12 @@ extern enum ContextSlot extends intptr constexpr 'Context::Field' {
ARRAY_JOIN_STACK_INDEX: Slot<NativeContext, Undefined|FixedArray>,
OBJECT_FUNCTION_INDEX: Slot<NativeContext, JSFunction>,
ITERATOR_RESULT_MAP_INDEX: Slot<NativeContext, Map>,
+ ITERATOR_MAP_HELPER_MAP_INDEX: Slot<NativeContext, Map>,
+ ITERATOR_FILTER_HELPER_MAP_INDEX: Slot<NativeContext, Map>,
+ ITERATOR_TAKE_HELPER_MAP_INDEX: Slot<NativeContext, Map>,
+ ITERATOR_DROP_HELPER_MAP_INDEX: Slot<NativeContext, Map>,
+ ITERATOR_FUNCTION_INDEX: Slot<NativeContext, JSFunction>,
+ VALID_ITERATOR_WRAPPER_MAP_INDEX: Slot<NativeContext, Map>,
JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX: Slot<NativeContext, Map>,
JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX: Slot<NativeContext, Map>,
MATH_RANDOM_CACHE_INDEX: Slot<NativeContext, FixedDoubleArray>,
@@ -143,6 +149,9 @@ extern enum ContextSlot extends intptr constexpr 'Context::Field' {
RAB_GSAB_BIGUINT64_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
RAB_GSAB_BIGINT64_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX: Slot<NativeContext, Map>,
+ DATA_PROPERTY_DESCRIPTOR_MAP_INDEX: Slot<NativeContext, Map>,
+
PROMISE_FUNCTION_INDEX: Slot<NativeContext, JSFunction>,
PROMISE_THEN_INDEX: Slot<NativeContext, JSFunction>,
PROMISE_PROTOTYPE_INDEX: Slot<NativeContext, JSObject>,
diff --git a/deps/v8/src/objects/data-handler.tq b/deps/v8/src/objects/data-handler.tq
index 46af326348..6bf26c2ccc 100644
--- a/deps/v8/src/objects/data-handler.tq
+++ b/deps/v8/src/objects/data-handler.tq
@@ -6,12 +6,11 @@
// here, please also update DataHandlerVerify in objects-debug.cc.
@abstract
extern class DataHandler extends Struct {
- // [smi_handler]: A Smi which encodes a handler or Code object (we still
- // use code handlers for accessing lexical environment variables, but soon
- // only smi handlers will remain). See LoadHandler and StoreHandler for
- // details about encoding.
- @if(V8_EXTERNAL_CODE_SPACE) smi_handler: Smi|CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) smi_handler: Smi|Code;
+ // [smi_handler]: A Smi which encodes a handler or Code object
+ // (we still use code handlers for accessing lexical environment variables,
+ // but soon only smi handlers will remain). See LoadHandler and StoreHandler
+ // for details about encoding.
+ smi_handler: Smi|Code;
// [validity_cell]: A validity Cell that guards prototype chain modifications.
validity_cell: Smi|Cell;
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index d85c4816c0..fa42dc17fc 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -32,9 +32,7 @@ RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
kNumberOfAllDescriptorsOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_descriptors,
kNumberOfDescriptorsOffset)
-RELAXED_INT16_ACCESSORS(DescriptorArray, raw_number_of_marked_descriptors,
- kRawNumberOfMarkedDescriptorsOffset)
-RELAXED_INT16_ACCESSORS(DescriptorArray, filler16bits, kFiller16BitsOffset)
+RELAXED_UINT32_ACCESSORS(DescriptorArray, raw_gc_state, kRawGcStateOffset)
inline int16_t DescriptorArray::number_of_slack_descriptors() const {
return number_of_all_descriptors() - number_of_descriptors();
@@ -44,14 +42,6 @@ inline int DescriptorArray::number_of_entries() const {
return number_of_descriptors();
}
-inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors(
- int16_t expected, int16_t value) {
- return base::Relaxed_CompareAndSwap(
- reinterpret_cast<base::Atomic16*>(
- FIELD_ADDR(*this, kRawNumberOfMarkedDescriptorsOffset)),
- expected, value);
-}
-
void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
set_enum_cache(array.enum_cache());
}
@@ -269,6 +259,82 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
+// static
+bool DescriptorArrayMarkingState::TryUpdateIndicesToMark(
+ unsigned gc_epoch, DescriptorArray array, DescriptorIndex index_to_mark) {
+ const auto current_epoch = gc_epoch & Epoch::kMask;
+ while (true) {
+ const RawGCStateType raw_gc_state = array.raw_gc_state(kRelaxedLoad);
+ const auto epoch_from_state = Epoch::decode(raw_gc_state);
+ RawGCStateType new_raw_gc_state = 0;
+ if (current_epoch != epoch_from_state) {
+ // If the epochs do not match, then either the raw_gc_state is zero
+ // (freshly allocated descriptor array) or the epoch from value lags
+ // by 1.
+ DCHECK_IMPLIES(raw_gc_state != 0,
+ Epoch::decode(epoch_from_state + 1) == current_epoch);
+ new_raw_gc_state = NewState(current_epoch, 0, index_to_mark);
+ } else {
+ const DescriptorIndex already_marked = Marked::decode(raw_gc_state);
+ const DescriptorIndex delta = Delta::decode(raw_gc_state);
+ if ((already_marked + delta) >= index_to_mark) {
+ return false;
+ }
+ new_raw_gc_state = NewState(current_epoch, already_marked,
+ index_to_mark - already_marked);
+ }
+ if (SwapState(array, raw_gc_state, new_raw_gc_state)) {
+ return true;
+ }
+ }
+}
+
+// static
+std::pair<DescriptorArrayMarkingState::DescriptorIndex,
+ DescriptorArrayMarkingState::DescriptorIndex>
+DescriptorArrayMarkingState::AcquireDescriptorRangeToMark(
+ unsigned gc_epoch, DescriptorArray array) {
+ const auto current_epoch = gc_epoch & Epoch::kMask;
+ while (true) {
+ const RawGCStateType raw_gc_state = array.raw_gc_state(kRelaxedLoad);
+ const DescriptorIndex marked = Marked::decode(raw_gc_state);
+ const DescriptorIndex delta = Delta::decode(raw_gc_state);
+ // We may encounter an array here that was merely pushed to the marker. In
+ // such a case, we process all descriptors (if we succeed). The cases to
+ // check are:
+ // 1. Epoch mismatch: Happens when descriptors survive a GC cycle.
+ // 2. Epoch matches but marked/delta is 0: Can happen when descriptors are
+ // newly allocated in the current cycle.
+ if (current_epoch != Epoch::decode(raw_gc_state) || (marked + delta) == 0) {
+ // In case number of descriptors is 0 and we reach the array through roots
+ // marking, mark also slack to get a proper transition from 0 marked to X
+ // marked. Otherwise, we would need to treat the state [0,0[ for marked
+ // and delta as valid state which leads to double-accounting through the
+ // marking barrier (when nof>1 in the barrier).
+ const int16_t number_of_descriptors =
+ array.number_of_descriptors() ? array.number_of_descriptors()
+ : array.number_of_all_descriptors();
+ DCHECK_GT(number_of_descriptors, 0);
+ if (SwapState(array, raw_gc_state,
+ NewState(current_epoch, number_of_descriptors, 0))) {
+ return {0, number_of_descriptors};
+ }
+ continue;
+ }
+
+ // The delta is 0, so everything has been processed. Return the marked
+ // indices.
+ if (delta == 0) {
+ return {marked, marked};
+ }
+
+ if (SwapState(array, raw_gc_state,
+ NewState(current_epoch, marked + delta, 0))) {
+ return {marked, marked + delta};
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 178e6d03af..f4ec397cb0 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -43,8 +43,7 @@ class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
// Header:
// [16:0 bits]: number_of_all_descriptors (including slack)
// [32:16 bits]: number_of_descriptors
-// [48:32 bits]: raw_number_of_marked_descriptors (used by GC)
-// [64:48 bits]: alignment filler
+// [64:32 bits]: raw_gc_state (used by GC)
// [kEnumCacheOffset]: enum cache
// Elements:
// [kHeaderSize + 0]: first key (and internalized String)
@@ -155,7 +154,7 @@ class DescriptorArray
AllocationType allocation = AllocationType::kYoung);
void Initialize(EnumCache enum_cache, HeapObject undefined_value,
- int nof_descriptors, int slack);
+ int nof_descriptors, int slack, uint32_t raw_gc_state);
// Constant for denoting key was not found.
static const int kNotFound = -1;
@@ -164,12 +163,9 @@ class DescriptorArray
static_assert(IsAligned(kHeaderSize, kTaggedSize));
// Garbage collection support.
- DECL_INT16_ACCESSORS(raw_number_of_marked_descriptors)
- // Atomic compare-and-swap operation on the raw_number_of_marked_descriptors.
- int16_t CompareAndSwapRawNumberOfMarkedDescriptors(int16_t expected,
- int16_t value);
- int16_t UpdateNumberOfMarkedDescriptors(unsigned mark_compact_epoch,
- int16_t number_of_marked_descriptors);
+ DECL_RELAXED_UINT32_ACCESSORS(raw_gc_state)
+ static constexpr size_t kSizeOfRawGcState =
+ kRawGcStateOffsetEnd - kRawGcStateOffset + 1;
static constexpr int SizeFor(int number_of_all_descriptors) {
return OffsetOfDescriptorAt(number_of_all_descriptors);
@@ -233,9 +229,6 @@ class DescriptorArray
using EntryValueField = TaggedField<MaybeObject, kEntryValueOffset>;
private:
- friend class WebSnapshotDeserializer;
- DECL_INT16_ACCESSORS(filler16bits)
-
inline void SetKey(InternalIndex descriptor_number, Name key);
inline void SetValue(InternalIndex descriptor_number, MaybeObject value);
inline void SetDetails(InternalIndex descriptor_number,
@@ -253,40 +246,71 @@ class DescriptorArray
TQ_OBJECT_CONSTRUCTORS(DescriptorArray)
};
-class NumberOfMarkedDescriptors {
+// Custom DescriptorArray marking state for visitors that are allowed to write
+// into the heap. The marking state uses DescriptorArray::raw_gc_state() as
+// storage.
+//
+// The state essentially keeps track of 3 fields:
+// 1. The collector epoch: The rest of the state is only valid if the epoch
+// matches. If the epoch doesn't match, the other fields should be considered
+// invalid. The epoch is necessary, as not all DescriptorArray objects are
+// eventually trimmed in the atomic pause and thus available for resetting
+// the state.
+// 2. Number of already marked descriptors.
+// 3. Delta of to be marked descriptors in this cycle. This must be 0 after
+// marking is done.
+class DescriptorArrayMarkingState final {
public:
-// Bit positions for |bit_field|.
#define BIT_FIELD_FIELDS(V, _) \
V(Epoch, unsigned, 2, _) \
- V(Marked, int16_t, 14, _)
+ V(Marked, uint16_t, 14, _) \
+ V(Delta, uint16_t, 16, _)
DEFINE_BIT_FIELDS(BIT_FIELD_FIELDS)
#undef BIT_FIELD_FIELDS
- static const int kMaxNumberOfMarkedDescriptors = Marked::kMax;
- // Decodes the raw value of the number of marked descriptors for the
- // given mark compact garbage collection epoch.
- static inline int16_t decode(unsigned mark_compact_epoch, int16_t raw_value) {
- unsigned epoch_from_value = Epoch::decode(static_cast<uint16_t>(raw_value));
- int16_t marked_from_value =
- Marked::decode(static_cast<uint16_t>(raw_value));
- unsigned actual_epoch = mark_compact_epoch & Epoch::kMask;
- if (actual_epoch == epoch_from_value) return marked_from_value;
- // If the epochs do not match, then either the raw_value is zero (freshly
- // allocated descriptor array) or the epoch from value lags by 1.
- DCHECK_IMPLIES(raw_value != 0,
- Epoch::decode(epoch_from_value + 1) == actual_epoch);
- // Not matching epochs means that the no descriptors were marked in the
- // current epoch.
- return 0;
+ static_assert(Marked::kMax <= Delta::kMax);
+ static_assert(kMaxNumberOfDescriptors <= Marked::kMax);
+
+ using DescriptorIndex = uint16_t;
+ using RawGCStateType = uint32_t;
+
+ static constexpr RawGCStateType kInitialGCState = 0;
+
+ static constexpr RawGCStateType GetFullyMarkedState(
+ unsigned epoch, DescriptorIndex number_of_descriptors) {
+ return NewState(epoch & Epoch::kMask, number_of_descriptors, 0);
+ }
+
+ // Potentially updates the delta of to be marked descriptors. Returns true if
+ // the update was successful and the object should be processed via a marking
+ // visitor.
+ //
+ // The call issues and Acq/Rel barrier to allow synchronizing other state
+ // (e.g. value of descriptor slots) with it.
+ static inline bool TryUpdateIndicesToMark(unsigned gc_epoch,
+ DescriptorArray array,
+ DescriptorIndex index_to_mark);
+
+ // Used from the visitor when processing a DescriptorArray. Returns a range of
+ // start and end descriptor indices. No processing is required for start ==
+ // end. The method signals the first invocation by returning start == 0, and
+ // end != 0.
+ static inline std::pair<DescriptorIndex, DescriptorIndex>
+ AcquireDescriptorRangeToMark(unsigned gc_epoch, DescriptorArray array);
+
+ private:
+ static constexpr RawGCStateType NewState(unsigned masked_epoch,
+ DescriptorIndex marked,
+ DescriptorIndex delta) {
+ return Epoch::encode(masked_epoch) | Marked::encode(marked) |
+ Delta::encode(delta);
}
- // Encodes the number of marked descriptors for the given mark compact
- // garbage collection epoch.
- static inline int16_t encode(unsigned mark_compact_epoch, int16_t value) {
- // TODO(ulan): avoid casting to int16_t by adding support for uint16_t
- // atomics.
- return static_cast<int16_t>(
- Epoch::encode(mark_compact_epoch & Epoch::kMask) |
- Marked::encode(value));
+ static bool SwapState(DescriptorArray array, RawGCStateType old_state,
+ RawGCStateType new_state) {
+ return static_cast<RawGCStateType>(base::AcquireRelease_CompareAndSwap(
+ reinterpret_cast<base::Atomic32*>(
+ FIELD_ADDR(array, DescriptorArray::kRawGcStateOffset)),
+ old_state, new_state)) == old_state;
}
};
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index 35c1e9a62a..d9f40694c1 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -19,8 +19,9 @@ struct DescriptorEntry {
extern class DescriptorArray extends HeapObject {
const number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
- raw_number_of_marked_descriptors: uint16;
- filler16_bits: uint16;
+ // Raw GC state that is maintained during marking.
+ // See `DescriptorArrayMarkingState`.
+ raw_gc_state: uint32;
enum_cache: EnumCache;
descriptors[number_of_all_descriptors]: DescriptorEntry;
}
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index a13a602949..05f774222c 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -212,6 +212,17 @@ Handle<Map> NameDictionary::GetMap(ReadOnlyRoots roots) {
return roots.name_dictionary_map_handle();
}
+uint32_t NameDictionary::flags() const {
+ return Smi::ToInt(this->get(kFlagsIndex));
+}
+
+void NameDictionary::set_flags(uint32_t flags) {
+ this->set(kFlagsIndex, Smi::FromInt(flags));
+}
+
+BIT_FIELD_ACCESSORS(NameDictionary, flags, may_have_interesting_symbols,
+ NameDictionary::MayHaveInterestingSymbolsBit)
+
PropertyCell GlobalDictionary::CellAt(InternalIndex entry) {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return CellAt(cage_base, entry);
@@ -292,18 +303,19 @@ Handle<Map> SimpleNumberDictionary::GetMap(ReadOnlyRoots roots) {
return roots.simple_number_dictionary_map_handle();
}
-bool NameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
+bool BaseNameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
DCHECK(other.IsTheHole() || Name::cast(other).IsUniqueName());
DCHECK(key->IsUniqueName());
return *key == other;
}
-uint32_t NameDictionaryShape::Hash(ReadOnlyRoots roots, Handle<Name> key) {
+uint32_t BaseNameDictionaryShape::Hash(ReadOnlyRoots roots, Handle<Name> key) {
DCHECK(key->IsUniqueName());
return key->hash();
}
-uint32_t NameDictionaryShape::HashForObject(ReadOnlyRoots roots, Object other) {
+uint32_t BaseNameDictionaryShape::HashForObject(ReadOnlyRoots roots,
+ Object other) {
DCHECK(other.IsUniqueName());
return Name::cast(other).hash();
}
@@ -319,14 +331,14 @@ uint32_t GlobalDictionaryShape::HashForObject(ReadOnlyRoots roots,
return PropertyCell::cast(other).name().hash();
}
-Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
- Handle<Name> key) {
+Handle<Object> BaseNameDictionaryShape::AsHandle(Isolate* isolate,
+ Handle<Name> key) {
DCHECK(key->IsUniqueName());
return key;
}
-Handle<Object> NameDictionaryShape::AsHandle(LocalIsolate* isolate,
- Handle<Name> key) {
+Handle<Object> BaseNameDictionaryShape::AsHandle(LocalIsolate* isolate,
+ Handle<Name> key) {
DCHECK(key->IsUniqueName());
return key;
}
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 3744faa639..c5b1920873 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -84,6 +84,15 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
+ // This method is only safe to use when it is guaranteed that the dictionary
+ // doesn't need to grow.
+ // The number of elements stored is not upted. Use
+ // |SetInitialNumberOfElements| to update the number in one go.
+ template <typename IsolateT>
+ static void UncheckedAdd(IsolateT* isolate, Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details);
+
static Handle<Derived> ShallowCopy(Isolate* isolate,
Handle<Derived> dictionary);
@@ -94,6 +103,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
Key key,
Handle<Object> value,
PropertyDetails details);
+ static void UncheckedAtPut(Isolate* isolate, Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details);
OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, Shape>);
};
@@ -115,7 +127,7 @@ class BaseDictionaryShape : public BaseShape<Key> {
PropertyDetails value);
};
-class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
+class BaseNameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
public:
static inline bool IsMatch(Handle<Name> key, Object other);
static inline uint32_t Hash(ReadOnlyRoots roots, Handle<Name> key);
@@ -123,9 +135,13 @@ class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
static inline Handle<Object> AsHandle(LocalIsolate* isolate,
Handle<Name> key);
- static const int kPrefixSize = 2;
- static const int kEntrySize = 3;
static const int kEntryValueIndex = 1;
+};
+
+class NameDictionaryShape : public BaseNameDictionaryShape {
+ public:
+ static const int kPrefixSize = 3;
+ static const int kEntrySize = 3;
static const bool kMatchNeedsHoleCheck = false;
};
@@ -194,6 +210,7 @@ class V8_EXPORT_PRIVATE NameDictionary
DECL_CAST(NameDictionary)
DECL_PRINTER(NameDictionary)
+ static const int kFlagsIndex = kObjectHashIndex + 1;
static const int kEntryValueIndex = 1;
static const int kEntryDetailsIndex = 2;
static const int kInitialCapacity = 2;
@@ -204,17 +221,34 @@ class V8_EXPORT_PRIVATE NameDictionary
inline void set_hash(int hash);
inline int hash() const;
+ // Note: Flags are stored as smi, so only 31 bits are usable.
+ using MayHaveInterestingSymbolsBit = base::BitField<bool, 0, 1, uint32_t>;
+ DECL_BOOLEAN_ACCESSORS(may_have_interesting_symbols)
+
+ static constexpr int kFlagsDefault = 0;
+
+ inline uint32_t flags() const;
+ inline void set_flags(uint32_t flags);
+
+ // Creates a new NameDictionary.
+ template <typename IsolateT>
+ V8_WARN_UNUSED_RESULT static Handle<NameDictionary> New(
+ IsolateT* isolate, int at_least_space_for,
+ AllocationType allocation = AllocationType::kYoung,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
+
OBJECT_CONSTRUCTORS(NameDictionary,
BaseNameDictionary<NameDictionary, NameDictionaryShape>);
};
-class V8_EXPORT_PRIVATE GlobalDictionaryShape : public NameDictionaryShape {
+class V8_EXPORT_PRIVATE GlobalDictionaryShape : public BaseNameDictionaryShape {
public:
static inline bool IsMatch(Handle<Name> key, Object other);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
- static const int kEntrySize = 1; // Overrides NameDictionaryShape::kEntrySize
static const bool kMatchNeedsHoleCheck = true;
+ static const int kPrefixSize = 2;
+ static const int kEntrySize = 1;
template <typename Dictionary>
static inline PropertyDetails DetailsAt(Dictionary dict, InternalIndex entry);
@@ -331,6 +365,14 @@ class NumberDictionary
Handle<Object> value,
Handle<JSObject> dictionary_holder = Handle<JSObject>::null(),
PropertyDetails details = PropertyDetails::Empty());
+ // This method is only safe to use when it is guaranteed that the dictionary
+ // doesn't need to grow.
+ // The number of elements stored and the maximum index is not updated. Use
+ // |SetInitialNumberOfElements| and |UpdateMaxNumberKey| to update the number
+ // in one go.
+ static void UncheckedSet(Isolate* isolate,
+ Handle<NumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value);
static const int kMaxNumberKeyIndex = kPrefixStartIndex;
void UpdateMaxNumberKey(uint32_t key, Handle<JSObject> dictionary_holder);
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index 5d0ebca5c1..833641bae4 100644
--- a/deps/v8/src/objects/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -285,6 +285,20 @@ inline bool IsFloatTypedArrayElementsKind(ElementsKind kind) {
kind == RAB_GSAB_FLOAT32_ELEMENTS || kind == RAB_GSAB_FLOAT64_ELEMENTS;
}
+inline bool IsSignedIntTypedArrayElementsKind(ElementsKind kind) {
+ return kind == INT8_ELEMENTS || kind == RAB_GSAB_INT8_ELEMENTS ||
+ kind == INT16_ELEMENTS || kind == RAB_GSAB_INT16_ELEMENTS ||
+ kind == INT32_ELEMENTS || kind == RAB_GSAB_INT32_ELEMENTS;
+}
+
+inline bool IsUnsignedIntTypedArrayElementsKind(ElementsKind kind) {
+ return kind == UINT8_CLAMPED_ELEMENTS ||
+ kind == RAB_GSAB_UINT8_CLAMPED_ELEMENTS || kind == UINT8_ELEMENTS ||
+ kind == RAB_GSAB_UINT8_ELEMENTS || kind == UINT16_ELEMENTS ||
+ kind == RAB_GSAB_UINT16_ELEMENTS || kind == UINT32_ELEMENTS ||
+ kind == RAB_GSAB_UINT32_ELEMENTS;
+}
+
inline bool IsWasmArrayElementsKind(ElementsKind kind) {
return kind == WASM_ARRAY_ELEMENTS;
}
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 6e267e8f52..9d9d93215a 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -479,8 +479,8 @@ void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
AtomicSlot end(start + sort_size);
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
#ifdef V8_COMPRESS_POINTERS
- Object a(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementA));
- Object b(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementB));
+ Object a(V8HeapCompressionScheme::DecompressTagged(isolate, elementA));
+ Object b(V8HeapCompressionScheme::DecompressTagged(isolate, elementB));
#else
Object a(elementA);
Object b(elementB);
@@ -3587,10 +3587,13 @@ class TypedElementsAccessor
}
size_t typed_array_length = typed_array.GetLength();
- if (start_from >= typed_array_length) {
+ if (V8_UNLIKELY(start_from >= typed_array_length)) {
// This can happen if the TypedArray got resized when we did ToInteger
// on the last parameter of lastIndexOf.
DCHECK(typed_array.IsVariableLength());
+ if (typed_array_length == 0) {
+ return Just<int64_t>(-1);
+ }
start_from = typed_array_length - 1;
}
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 62663cd2af..8250ed9727 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -57,6 +57,11 @@ void EmbedderDataSlot::store_smi(Smi value) {
// static
void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
Object value) {
+#ifdef V8_COMPRESS_POINTERS
+ CHECK(value.IsSmi() ||
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(value.ptr()) ==
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(array.ptr()));
+#endif
int slot_offset = EmbedderDataArray::OffsetOfElementAt(entry_index);
ObjectSlot(FIELD_ADDR(array, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value);
@@ -71,6 +76,11 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
// static
void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
Object value) {
+#ifdef V8_COMPRESS_POINTERS
+ CHECK(value.IsSmi() ||
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(value.ptr()) ==
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(object.ptr()));
+#endif
int slot_offset = object.GetEmbedderFieldOffset(embedder_field_index);
ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value);
@@ -169,7 +179,9 @@ void EmbedderDataSlot::gc_safe_store(Isolate* isolate, Address value) {
Address lo = static_cast<intptr_t>(static_cast<int32_t>(value));
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo));
Address hi = value >> 32;
- ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi));
+ // Here we use MaybeObjectSlot because ObjectSlot expects a valid `Object`.
+ // This allows us to store a non-smi, that is not a valid `HeapObject`.
+ MaybeObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(MaybeObject(hi));
#else
ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value));
#endif
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index a8bbc38341..e5e3ea4772 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -38,21 +38,18 @@ INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
kCreateClosureSlotCountOffset)
-RELEASE_ACQUIRE_WEAK_ACCESSORS(FeedbackVector, maybe_optimized_code,
- kMaybeOptimizedCodeOffset)
-
int32_t FeedbackMetadata::slot_count(AcquireLoadTag) const {
return ACQUIRE_READ_INT32_FIELD(*this, kSlotCountOffset);
}
int32_t FeedbackMetadata::get(int index) const {
- DCHECK(index >= 0 && index < length());
+ CHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
int offset = kHeaderSize + index * kInt32Size;
return ReadField<int32_t>(offset);
}
void FeedbackMetadata::set(int index, int32_t value) {
- DCHECK(index >= 0 && index < length());
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
int offset = kHeaderSize + index * kInt32Size;
WriteField<int32_t>(offset, value);
}
@@ -148,13 +145,13 @@ void FeedbackVector::set_maybe_has_optimized_osr_code(bool value) {
set_osr_state(MaybeHasOptimizedOsrCodeBit::update(osr_state(), value));
}
-CodeT FeedbackVector::optimized_code() const {
- MaybeObject slot = maybe_optimized_code(kAcquireLoad);
+Code FeedbackVector::optimized_code() const {
+ MaybeObject slot = maybe_optimized_code();
DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object;
- CodeT code;
+ Code code;
if (slot->GetHeapObject(&heap_object)) {
- code = CodeT::cast(heap_object);
+ code = Code::cast(heap_object);
}
// It is possible that the maybe_optimized_code slot is cleared but the flags
// haven't been updated yet. We update them when we execute the function next
@@ -202,20 +199,20 @@ void FeedbackVector::set_log_next_execution(bool value) {
set_flags(LogNextExecutionBit::update(flags(), value));
}
-base::Optional<CodeT> FeedbackVector::GetOptimizedOsrCode(Isolate* isolate,
- FeedbackSlot slot) {
+base::Optional<Code> FeedbackVector::GetOptimizedOsrCode(Isolate* isolate,
+ FeedbackSlot slot) {
MaybeObject maybe_code = Get(isolate, slot);
if (maybe_code->IsCleared()) return {};
- CodeT codet = CodeT::cast(maybe_code->GetHeapObject());
- if (codet.marked_for_deoptimization()) {
+ Code code = Code::cast(maybe_code->GetHeapObject());
+ if (code.marked_for_deoptimization()) {
// Clear the cached Code object if deoptimized.
// TODO(jgruber): Add tracing.
Set(slot, HeapObjectReference::ClearedValue(isolate));
return {};
}
- return codet;
+ return code;
}
// Conversion from an integer index to either a slot or an ic slot.
@@ -360,7 +357,9 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kReceiverOrNullOrUndefined;
}
- if (Is<CompareOperationFeedback::kBigInt>(type_feedback)) {
+ if (Is<CompareOperationFeedback::kBigInt64>(type_feedback)) {
+ return CompareOperationHint::kBigInt64;
+ } else if (Is<CompareOperationFeedback::kBigInt>(type_feedback)) {
return CompareOperationHint::kBigInt;
}
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 65321d5096..f791896406 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -215,6 +215,7 @@ Handle<ClosureFeedbackCellArray> ClosureFeedbackCellArray::New(
Handle<FeedbackVector> FeedbackVector::New(
Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ Handle<FeedbackCell> parent_feedback_cell,
IsCompiledScope* is_compiled_scope) {
DCHECK(is_compiled_scope->is_compiled());
Factory* factory = isolate->factory();
@@ -223,8 +224,8 @@ Handle<FeedbackVector> FeedbackVector::New(
isolate);
const int slot_count = feedback_metadata->slot_count();
- Handle<FeedbackVector> vector =
- factory->NewFeedbackVector(shared, closure_feedback_cell_array);
+ Handle<FeedbackVector> vector = factory->NewFeedbackVector(
+ shared, closure_feedback_cell_array, parent_feedback_cell);
DCHECK_EQ(vector->length(), slot_count);
@@ -292,16 +293,15 @@ Handle<FeedbackVector> FeedbackVector::New(
i += entry_size;
}
- Handle<FeedbackVector> result = Handle<FeedbackVector>::cast(vector);
if (!isolate->is_best_effort_code_coverage()) {
- AddToVectorsForProfilingTools(isolate, result);
+ AddToVectorsForProfilingTools(isolate, vector);
}
- return result;
+ parent_feedback_cell->set_value(*vector, kReleaseStore);
+ return vector;
}
-namespace {
-
-Handle<FeedbackVector> NewFeedbackVectorForTesting(
+// static
+Handle<FeedbackVector> FeedbackVector::NewForTesting(
Isolate* isolate, const FeedbackVectorSpec* spec) {
Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate, spec);
Handle<SharedFunctionInfo> shared =
@@ -312,20 +312,20 @@ Handle<FeedbackVector> NewFeedbackVectorForTesting(
shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
ClosureFeedbackCellArray::New(isolate, shared);
+ Handle<FeedbackCell> parent_cell = isolate->factory()->NewNoClosuresCell(
+ isolate->factory()->undefined_value());
IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
return FeedbackVector::New(isolate, shared, closure_feedback_cell_array,
- &is_compiled_scope);
+ parent_cell, &is_compiled_scope);
}
-} // namespace
-
// static
Handle<FeedbackVector> FeedbackVector::NewWithOneBinarySlotForTesting(
Zone* zone, Isolate* isolate) {
FeedbackVectorSpec one_slot(zone);
one_slot.AddBinaryOpICSlot();
- return NewFeedbackVectorForTesting(isolate, &one_slot);
+ return NewForTesting(isolate, &one_slot);
}
// static
@@ -333,7 +333,7 @@ Handle<FeedbackVector> FeedbackVector::NewWithOneCompareSlotForTesting(
Zone* zone, Isolate* isolate) {
FeedbackVectorSpec one_slot(zone);
one_slot.AddCompareICSlot();
- return NewFeedbackVectorForTesting(isolate, &one_slot);
+ return NewForTesting(isolate, &one_slot);
}
// static
@@ -352,7 +352,7 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
if (ticks < Smi::kMaxValue) set_profiler_ticks(ticks + 1);
}
-void FeedbackVector::SetOptimizedCode(CodeT code) {
+void FeedbackVector::SetOptimizedCode(Code code) {
DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
// We should set optimized code only when there is no valid optimized code.
DCHECK(!has_optimized_code() ||
@@ -365,7 +365,7 @@ void FeedbackVector::SetOptimizedCode(CodeT code) {
// re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't
// TieringState::kRequestTurbofan*.
- set_maybe_optimized_code(HeapObjectReference::Weak(code), kReleaseStore);
+ set_maybe_optimized_code(HeapObjectReference::Weak(code));
int32_t state = flags();
// TODO(leszeks): Reconsider whether this could clear the tiering state vs.
// the callers doing so.
@@ -384,13 +384,12 @@ void FeedbackVector::SetOptimizedCode(CodeT code) {
void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code());
DCHECK(maybe_has_maglev_code() || maybe_has_turbofan_code());
- set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
- kReleaseStore);
+ set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()));
set_maybe_has_maglev_code(false);
set_maybe_has_turbofan_code(false);
}
-void FeedbackVector::SetOptimizedOsrCode(FeedbackSlot slot, CodeT code) {
+void FeedbackVector::SetOptimizedOsrCode(FeedbackSlot slot, Code code) {
DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
DCHECK(!slot.IsInvalid());
Set(slot, HeapObjectReference::Weak(code));
@@ -430,17 +429,17 @@ void FeedbackVector::set_osr_tiering_state(TieringState marker) {
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
- SharedFunctionInfo shared, const char* reason) {
- MaybeObject slot = maybe_optimized_code(kAcquireLoad);
+ Isolate* isolate, SharedFunctionInfo shared, const char* reason) {
+ MaybeObject slot = maybe_optimized_code();
if (slot->IsCleared()) {
set_maybe_has_maglev_code(false);
set_maybe_has_turbofan_code(false);
return;
}
- CodeT code = CodeT::cast(slot->GetHeapObject());
+ Code code = Code::cast(slot->GetHeapObject());
if (code.marked_for_deoptimization()) {
- Deoptimizer::TraceEvictFromOptimizedCodeCache(shared, reason);
+ Deoptimizer::TraceEvictFromOptimizedCodeCache(isolate, shared, reason);
ClearOptimizedCode();
}
}
@@ -736,6 +735,17 @@ InlineCacheState FeedbackNexus::ic_state() const {
: InlineCacheState::MONOMORPHIC;
}
}
+ // TODO(1393773): Remove once the issue is solved.
+ Address vector_ptr = vector().ptr();
+ config_.isolate()->PushParamsAndDie(
+ reinterpret_cast<void*>(feedback.ptr()),
+ reinterpret_cast<void*>(extra.ptr()),
+ reinterpret_cast<void*>(vector_ptr),
+ reinterpret_cast<void*>(static_cast<intptr_t>(slot_.ToInt())),
+ reinterpret_cast<void*>(static_cast<intptr_t>(kind())),
+ // Include part of the feedback vector containing the slot.
+ reinterpret_cast<void*>(
+ vector_ptr + FeedbackVector::OffsetOfElementAt(slot_.ToInt())));
UNREACHABLE();
}
case FeedbackSlotKind::kCall: {
@@ -1224,7 +1234,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
if (mode != STANDARD_STORE) return mode;
continue;
} else {
- CodeT code = CodeT::cast(data_handler->smi_handler());
+ Code code = Code::cast(data_handler->smi_handler());
builtin_handler = code.builtin_id();
}
@@ -1243,7 +1253,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
continue;
} else {
// Element store without prototype chain check.
- CodeT code = CodeT::cast(*maybe_code_handler.object());
+ Code code = Code::cast(*maybe_code_handler.object());
builtin_handler = code.builtin_id();
}
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 987505a99c..80e39a6d22 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -25,8 +25,13 @@ namespace v8 {
namespace internal {
class IsCompiledScope;
+class FeedbackVectorSpec;
-enum class UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback };
+enum class UpdateFeedbackMode {
+ kOptionalFeedback,
+ kGuaranteedFeedback,
+ kNoFeedback,
+};
// Which feedback slots to clear in Clear().
enum class ClearBehavior {
@@ -202,11 +207,6 @@ class FeedbackVector
DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
static_assert(TieringState::kLastTieringState <= TieringStateBits::kMax);
- static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
- using TorqueGeneratedFeedbackVector<FeedbackVector,
- HeapObject>::maybe_optimized_code;
- DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
-
static constexpr uint32_t kFlagsMaybeHasTurbofanCode =
FeedbackVector::MaybeHasTurbofanCodeBit::kMask;
static constexpr uint32_t kFlagsMaybeHasMaglevCode =
@@ -254,7 +254,7 @@ class FeedbackVector
// The `osr_state` contains the osr_urgency and maybe_has_optimized_osr_code.
inline void reset_osr_state();
- inline CodeT optimized_code() const;
+ inline Code optimized_code() const;
// Whether maybe_optimized_code contains a cached Code object.
inline bool has_optimized_code() const;
@@ -268,16 +268,17 @@ class FeedbackVector
inline bool maybe_has_turbofan_code() const;
inline void set_maybe_has_turbofan_code(bool value);
- void SetOptimizedCode(CodeT code);
- void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
+ void SetOptimizedCode(Code code);
+ void EvictOptimizedCodeMarkedForDeoptimization(Isolate* isolate,
+ SharedFunctionInfo shared,
const char* reason);
void ClearOptimizedCode();
// Optimized OSR'd code is cached in JumpLoop feedback vector slots. The
// slots either contain a Code object or the ClearedValue.
- inline base::Optional<CodeT> GetOptimizedOsrCode(Isolate* isolate,
- FeedbackSlot slot);
- void SetOptimizedOsrCode(FeedbackSlot slot, CodeT code);
+ inline base::Optional<Code> GetOptimizedOsrCode(Isolate* isolate,
+ FeedbackSlot slot);
+ void SetOptimizedOsrCode(FeedbackSlot slot, Code code);
inline TieringState tiering_state() const;
void set_tiering_state(TieringState state);
@@ -319,8 +320,11 @@ class FeedbackVector
V8_EXPORT_PRIVATE static Handle<FeedbackVector> New(
Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ Handle<FeedbackCell> parent_feedback_cell,
IsCompiledScope* is_compiled_scope);
+ V8_EXPORT_PRIVATE static Handle<FeedbackVector> NewForTesting(
+ Isolate* isolate, const FeedbackVectorSpec* spec);
V8_EXPORT_PRIVATE static Handle<FeedbackVector>
NewWithOneBinarySlotForTesting(Zone* zone, Isolate* isolate);
V8_EXPORT_PRIVATE static Handle<FeedbackVector>
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index 3305d47552..1b8d89a643 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -44,8 +44,8 @@ extern class FeedbackVector extends HeapObject {
flags: FeedbackVectorFlags;
shared_function_info: SharedFunctionInfo;
closure_feedback_cell_array: ClosureFeedbackCellArray;
- @if(V8_EXTERNAL_CODE_SPACE) maybe_optimized_code: Weak<CodeDataContainer>;
- @ifnot(V8_EXTERNAL_CODE_SPACE) maybe_optimized_code: Weak<Code>;
+ parent_feedback_cell: FeedbackCell;
+ maybe_optimized_code: Weak<Code>;
@cppRelaxedLoad raw_feedback_slots[length]: MaybeObject;
}
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index a47826a0f4..e4461d4308 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -88,6 +88,10 @@ FieldIndex FieldIndex::ForDescriptor(PtrComprCageBase cage_base, Map map,
InternalIndex descriptor_index) {
PropertyDetails details = map.instance_descriptors(cage_base, kRelaxedLoad)
.GetDetails(descriptor_index);
+ return ForDetails(map, details);
+}
+
+FieldIndex FieldIndex::ForDetails(Map map, PropertyDetails details) {
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
}
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index bfbb9acd37..aa4d859c28 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -34,6 +34,7 @@ class FieldIndex final {
InternalIndex descriptor_index);
static inline FieldIndex ForDescriptor(PtrComprCageBase cage_base, Map map,
InternalIndex descriptor_index);
+ static inline FieldIndex ForDetails(Map map, PropertyDetails details);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 9a768a0b56..413cd860e0 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -83,18 +83,16 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK_NE(map(), EarlyGetReadOnlyRoots().unchecked_fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
DCHECK(Object(value).IsSmi());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
- DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
+ DCHECK_NE(EarlyGetReadOnlyRoots().unchecked_fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
int offset = OffsetOfElementAt(index);
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index f0f0b221af..d3d6edda86 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -142,20 +142,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
-#if !defined(_WIN32)
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
-#endif
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
@@ -480,6 +467,9 @@ class ArrayList : public TorqueGeneratedArrayList<ArrayList, FixedArray> {
Handle<Object> obj2);
V8_EXPORT_PRIVATE static Handle<ArrayList> Add(Isolate* isolate,
Handle<ArrayList> array,
+ Smi obj1);
+ V8_EXPORT_PRIVATE static Handle<ArrayList> Add(Isolate* isolate,
+ Handle<ArrayList> array,
Handle<Object> obj1, Smi obj2,
Smi obj3, Smi obj4);
static Handle<ArrayList> New(Isolate* isolate, int size);
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index 9a2a00f729..f6aa1fde88 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -25,15 +25,36 @@ RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
int FreeSpace::Size() { return size(kRelaxedLoad); }
-FreeSpace FreeSpace::next() {
+FreeSpace FreeSpace::next() const {
DCHECK(IsValid());
+#ifdef V8_EXTERNAL_CODE_SPACE
+ intptr_t diff_to_next =
+ static_cast<intptr_t>(TaggedField<Smi, kNextOffset>::load(*this).value());
+ if (diff_to_next == 0) {
+ return FreeSpace();
+ }
+ Address next_ptr = ptr() + diff_to_next * kObjectAlignment;
+ return FreeSpace::unchecked_cast(Object(next_ptr));
+#else
return FreeSpace::unchecked_cast(
TaggedField<Object, kNextOffset>::load(*this));
+#endif // V8_EXTERNAL_CODE_SPACE
}
void FreeSpace::set_next(FreeSpace next) {
DCHECK(IsValid());
- RELAXED_WRITE_FIELD(*this, kNextOffset, next);
+#ifdef V8_EXTERNAL_CODE_SPACE
+ if (next.is_null()) {
+ TaggedField<Smi, kNextOffset>::Relaxed_Store(*this, Smi::zero());
+ return;
+ }
+ intptr_t diff_to_next = next.ptr() - ptr();
+ DCHECK(IsAligned(diff_to_next, kObjectAlignment));
+ TaggedField<Smi, kNextOffset>::Relaxed_Store(
+ *this, Smi::FromIntptr(diff_to_next / kObjectAlignment));
+#else
+ TaggedField<Object, kNextOffset>::Relaxed_Store(*this, next);
+#endif // V8_EXTERNAL_CODE_SPACE
}
FreeSpace FreeSpace::cast(HeapObject o) {
@@ -46,13 +67,12 @@ FreeSpace FreeSpace::unchecked_cast(const Object o) {
return base::bit_cast<FreeSpace>(o);
}
-bool FreeSpace::IsValid() {
+bool FreeSpace::IsValid() const {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- CHECK_IMPLIES(!map_slot().contains_map_value(free_space_map.ptr()),
- !heap->deserialization_complete() &&
- map_slot().contains_map_value(kNullAddress));
+ CHECK(!heap->deserialization_complete() ||
+ map_slot().contains_map_value(free_space_map.ptr()));
CHECK_LE(kNextOffset + kTaggedSize, size(kRelaxedLoad));
return true;
}
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index 275fb781de..c9ee47cc8f 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -20,6 +20,16 @@ namespace internal {
// the heap remains iterable. They have a size and a next pointer.
// The next pointer is the raw address of the next FreeSpace object (or NULL)
// in the free list.
+//
+// When external code space is enabled next pointer is stored as Smi values
+// representing a diff from current FreeSpace object address in kObjectAlignment
+// chunks. Terminating FreeSpace value is represented as Smi zero.
+// Such a representation has the following properties:
+// a) it can hold both positive an negative diffs for full pointer compression
+// cage size (HeapObject address has only valuable 30 bits while Smis have
+// 31 bits),
+// b) it's independent of the pointer compression base and pointer compression
+// scheme.
class FreeSpace : public TorqueGeneratedFreeSpace<FreeSpace, HeapObject> {
public:
// [size]: size of the free space including the header.
@@ -28,7 +38,7 @@ class FreeSpace : public TorqueGeneratedFreeSpace<FreeSpace, HeapObject> {
inline int Size();
// Accessors for the next field.
- inline FreeSpace next();
+ inline FreeSpace next() const;
inline void set_next(FreeSpace next);
inline static FreeSpace cast(HeapObject obj);
@@ -40,7 +50,7 @@ class FreeSpace : public TorqueGeneratedFreeSpace<FreeSpace, HeapObject> {
class BodyDescriptor;
private:
- inline bool IsValid();
+ inline bool IsValid() const;
TQ_OBJECT_CONSTRUCTORS(FreeSpace)
};
diff --git a/deps/v8/src/objects/free-space.tq b/deps/v8/src/objects/free-space.tq
index 5fc8767a58..acdf1de97b 100644
--- a/deps/v8/src/objects/free-space.tq
+++ b/deps/v8/src/objects/free-space.tq
@@ -4,5 +4,5 @@
extern class FreeSpace extends HeapObject {
size: Smi;
- next: FreeSpace|Uninitialized;
+ next: FreeSpace|Smi|Uninitialized;
}
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 311d2fac17..f47404c575 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -55,11 +55,21 @@ NameToIndexHashTable::NameToIndexHashTable(Address ptr)
SLOW_DCHECK(IsNameToIndexHashTable());
}
+template <typename Derived, int N>
+ObjectMultiHashTableBase<Derived, N>::ObjectMultiHashTableBase(Address ptr)
+ : HashTable<Derived, ObjectMultiHashTableShape<N>>(ptr) {}
+
+ObjectTwoHashTable::ObjectTwoHashTable(Address ptr)
+ : ObjectMultiHashTableBase<ObjectTwoHashTable, 2>(ptr) {
+ SLOW_DCHECK(IsObjectTwoHashTable());
+}
+
CAST_ACCESSOR(ObjectHashTable)
CAST_ACCESSOR(RegisteredSymbolTable)
CAST_ACCESSOR(EphemeronHashTable)
CAST_ACCESSOR(ObjectHashSet)
CAST_ACCESSOR(NameToIndexHashTable)
+CAST_ACCESSOR(ObjectTwoHashTable)
void EphemeronHashTable::set_key(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -122,6 +132,11 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
return std::max({capacity, kMinCapacity});
}
+void HashTableBase::SetInitialNumberOfElements(int nof) {
+ DCHECK_EQ(NumberOfElements(), 0);
+ set(kNumberOfElementsIndex, Smi::FromInt(nof));
+}
+
void HashTableBase::SetNumberOfElements(int nof) {
set(kNumberOfElementsIndex, Smi::FromInt(nof));
}
@@ -192,7 +207,8 @@ InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(IsolateT* isolate,
template <typename Derived, typename Shape>
bool HashTable<Derived, Shape>::IsKey(ReadOnlyRoots roots, Object k) {
// TODO(leszeks): Dictionaries that don't delete could skip the hole check.
- return k != roots.undefined_value() && k != roots.the_hole_value();
+ return k != roots.unchecked_undefined_value() &&
+ k != roots.unchecked_the_hole_value();
}
template <typename Derived, typename Shape>
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index a77d7e8aa4..8256102b84 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -101,6 +101,10 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
// Minimum capacity for newly created hash tables.
static const int kMinCapacity = 4;
+ // Set the number of elements in the hash table after a bulk of elements was
+ // added.
+ inline void SetInitialNumberOfElements(int nof);
+
protected:
// Update the number of elements in the hash table.
inline void SetNumberOfElements(int nof);
@@ -418,6 +422,56 @@ class V8_EXPORT_PRIVATE EphemeronHashTable
ObjectHashTableBase<EphemeronHashTable, ObjectHashTableShape>);
};
+// ObjectMultihashTable is a hash table that maps Object keys to N Object
+// values. The Object values are stored inline in the underlying FixedArray.
+//
+// This is not a generic multimap where each key can map to a variable number of
+// values. Each key always maps to exactly N values.
+template <int N>
+class ObjectMultiHashTableShape : public ObjectHashTableShape {
+ public:
+ static const int kEntrySize = 1 + N;
+};
+
+template <typename Derived, int N>
+class ObjectMultiHashTableBase
+ : public HashTable<Derived, ObjectMultiHashTableShape<N>> {
+ public:
+ static_assert(N > 1, "use ObjectHashTable instead if N = 1");
+
+ // Returns the values associated with the given key. Return an std::array of
+ // holes if not found.
+ std::array<Object, N> Lookup(Handle<Object> key);
+ std::array<Object, N> Lookup(PtrComprCageBase cage_base, Handle<Object> key);
+
+ // Adds or overwrites the values associated with the given key.
+ static Handle<Derived> Put(Isolate* isolate, Handle<Derived> table,
+ Handle<Object> key,
+ const std::array<Handle<Object>, N>& values);
+
+ private:
+ void SetEntryValues(InternalIndex entry,
+ const std::array<Handle<Object>, N>& values);
+
+ static constexpr inline int EntryToValueIndexStart(InternalIndex entry) {
+ return HashTable<Derived, ObjectMultiHashTableShape<N>>::EntryToIndex(
+ entry) +
+ ObjectMultiHashTableShape<N>::kEntryValueIndex;
+ }
+
+ OBJECT_CONSTRUCTORS(ObjectMultiHashTableBase,
+ HashTable<Derived, ObjectMultiHashTableShape<N>>);
+};
+
+class ObjectTwoHashTable
+ : public ObjectMultiHashTableBase<ObjectTwoHashTable, 2> {
+ public:
+ DECL_CAST(ObjectTwoHashTable)
+
+ OBJECT_CONSTRUCTORS(ObjectTwoHashTable,
+ ObjectMultiHashTableBase<ObjectTwoHashTable, 2>);
+};
+
class ObjectHashSetShape : public ObjectHashTableShape {
public:
static const int kPrefixSize = 0;
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 3214135e48..759cda3cab 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -60,8 +60,8 @@ class HeapObject : public Object {
// Compare-and-swaps map word using release store, returns true if the map
// word was actually swapped.
- inline bool release_compare_and_swap_map_word(MapWord old_map_word,
- MapWord new_map_word);
+ inline bool release_compare_and_swap_map_word_forwarded(
+ MapWord old_map_word, HeapObject new_target_object);
// Initialize the map immediately after the object is allocated.
// Do not use this outside Heap.
@@ -71,11 +71,13 @@ class HeapObject : public Object {
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
DECL_RELAXED_GETTER(map_word, MapWord)
- inline void set_map_word(MapWord map_word, RelaxedStoreTag);
+ inline void set_map_word(Map map, RelaxedStoreTag);
+ inline void set_map_word_forwarded(HeapObject target_object, RelaxedStoreTag);
// Access the map word using acquire load and release store.
DECL_ACQUIRE_GETTER(map_word, MapWord)
- inline void set_map_word(MapWord map_word, ReleaseStoreTag);
+ inline void set_map_word(Map map, ReleaseStoreTag);
+ inline void set_map_word_forwarded(HeapObject target_object, ReleaseStoreTag);
// This method exists to help remove GetIsolate/GetHeap from HeapObject, in a
// way that doesn't require passing Isolate/Heap down huge call chains or to
@@ -84,12 +86,16 @@ class HeapObject : public Object {
// This version is intended to be used for the isolate values produced by
// i::GetPtrComprCageBase(HeapObject) function which may return nullptr.
inline ReadOnlyRoots GetReadOnlyRoots(PtrComprCageBase cage_base) const;
+ // This is slower, but safe to call during bootstrapping.
+ inline ReadOnlyRoots EarlyGetReadOnlyRoots() const;
// Whether the object is in the RO heap and the RO heap is shared, or in the
// writable shared heap.
- V8_INLINE bool InSharedHeap() const;
+ V8_INLINE bool InAnySharedSpace() const;
- V8_INLINE bool InSharedWritableHeap() const;
+ V8_INLINE bool InWritableSharedSpace() const;
+
+ V8_INLINE bool InReadOnlySpace() const;
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
@@ -97,18 +103,17 @@ class HeapObject : public Object {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
- IS_TYPE_FUNCTION_DECL(CodeT)
#undef IS_TYPE_FUNCTION_DECL
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+#define IS_TYPE_FUNCTION_DECL(Type, Value, _) \
V8_INLINE bool Is##Type(Isolate* isolate) const; \
V8_INLINE bool Is##Type(LocalIsolate* isolate) const; \
V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
- IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
+ IS_TYPE_FUNCTION_DECL(NullOrUndefined, , /* unused */)
#undef IS_TYPE_FUNCTION_DECL
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
@@ -117,6 +122,8 @@ class HeapObject : public Object {
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
+ V8_INLINE bool IsJSObjectThatCanBeTrackedAsPrototype() const;
+
// Converts an address to a HeapObject pointer.
static inline HeapObject FromAddress(Address address) {
DCHECK_TAG_ALIGNED(address);
diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h
index 3ff854a898..ce57d5d0ff 100644
--- a/deps/v8/src/objects/instance-type-inl.h
+++ b/deps/v8/src/objects/instance-type-inl.h
@@ -9,6 +9,7 @@
#include "src/execution/isolate-utils-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/map-inl.h"
+#include "src/roots/static-roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,14 +19,191 @@ namespace internal {
namespace InstanceTypeChecker {
+// INSTANCE_TYPE_CHECKERS macro defines some "types" that do not have
+// respective C++ classes (see TypedArrayConstructor, FixedArrayExact) or
+// the respective C++ counterpart is actually a template (see HashTable).
+// So in order to be able to customize IsType() implementations for specific
+// types, we declare a parallel set of "types" that can be compared using
+// std::is_same<>.
+namespace InstanceTypeTraits {
+
+#define DECL_TYPE(type, ...) class type;
+INSTANCE_TYPE_CHECKERS(DECL_TYPE)
+TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(DECL_TYPE)
+TORQUE_INSTANCE_CHECKERS_MULTIPLE_ONLY_DECLARED(DECL_TYPE)
+HEAP_OBJECT_TYPE_LIST(DECL_TYPE)
+#undef DECL_TYPE
+
+} // namespace InstanceTypeTraits
+
+// Instance types which are associated with one unique map.
+
+template <class type>
+inline constexpr base::Optional<RootIndex> UniqueMapOfInstanceTypeCheck() {
+ return {};
+}
+
+#define INSTANCE_TYPE_MAP(V, rootIndexName, rootAccessorName, class_name) \
+ template <> \
+ inline constexpr base::Optional<RootIndex> \
+ UniqueMapOfInstanceTypeCheck<InstanceTypeTraits::class_name>() { \
+ return {RootIndex::k##rootIndexName}; \
+ }
+UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(INSTANCE_TYPE_MAP, _)
+#undef INSTANCE_TYPE_MAP
+
+inline constexpr base::Optional<RootIndex> UniqueMapOfInstanceType(
+ InstanceType type) {
+#define INSTANCE_TYPE_CHECK(it, forinstancetype) \
+ if (type == forinstancetype) { \
+ return InstanceTypeChecker::UniqueMapOfInstanceTypeCheck< \
+ InstanceTypeChecker::InstanceTypeTraits::it>(); \
+ }
+ INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECK);
+#undef INSTANCE_TYPE_CHECK
+
+ return Map::TryGetMapRootIdxFor(type);
+}
+
+// Manually curated list of instance type ranges which are associated with a
+// unique range of map addresses on the read only heap. Both ranges are
+// inclusive.
+
+using InstanceTypeRange = std::pair<InstanceType, InstanceType>;
+using RootIndexRange = std::pair<RootIndex, RootIndex>;
+constexpr std::array<std::pair<InstanceTypeRange, RootIndexRange>, 6>
+ kUniqueMapRangeOfInstanceTypeRangeList = {
+ {{{ALLOCATION_SITE_TYPE, ALLOCATION_SITE_TYPE},
+ {RootIndex::kAllocationSiteWithWeakNextMap,
+ RootIndex::kAllocationSiteWithoutWeakNextMap}},
+ {{FIRST_STRING_TYPE, LAST_STRING_TYPE},
+ {RootIndex::kStringMap, RootIndex::kSharedOneByteStringMap}},
+ {{FIRST_NAME_TYPE, LAST_NAME_TYPE},
+ {RootIndex::kSymbolMap, RootIndex::kSharedOneByteStringMap}},
+ {{FIRST_SMALL_ORDERED_HASH_TABLE_TYPE,
+ LAST_SMALL_ORDERED_HASH_TABLE_TYPE},
+ {RootIndex::kSmallOrderedHashMapMap,
+ RootIndex::kSmallOrderedNameDictionaryMap}},
+ {{FIRST_ABSTRACT_INTERNAL_CLASS_TYPE,
+ LAST_ABSTRACT_INTERNAL_CLASS_TYPE},
+ {RootIndex::kAbstractInternalClassSubclass1Map,
+ RootIndex::kAbstractInternalClassSubclass2Map}},
+ {{FIRST_TURBOFAN_TYPE_TYPE, LAST_TURBOFAN_TYPE_TYPE},
+ {RootIndex::kTurbofanBitsetTypeMap,
+ RootIndex::kTurbofanOtherNumberConstantTypeMap}}}};
+
+struct kUniqueMapRangeOfStringType {
+ static constexpr RootIndexRange kInternalizedString = {
+ RootIndex::kExternalInternalizedStringMap,
+ RootIndex::kOneByteInternalizedStringMap};
+ static constexpr RootIndexRange kExternalString = {
+ RootIndex::kExternalStringMap,
+ RootIndex::kUncachedExternalOneByteInternalizedStringMap};
+};
+
+#if V8_STATIC_ROOTS_BOOL
+
+inline constexpr base::Optional<RootIndexRange>
+UniqueMapRangeOfInstanceTypeRange(InstanceType first, InstanceType last) {
+ // Doesn't use range based for loop due to LLVM <11 bug re. constexpr
+ // functions.
+ for (size_t i = 0; i < kUniqueMapRangeOfInstanceTypeRangeList.size(); ++i) {
+ if (kUniqueMapRangeOfInstanceTypeRangeList[i].first.first == first &&
+ kUniqueMapRangeOfInstanceTypeRangeList[i].first.second == last) {
+ return {kUniqueMapRangeOfInstanceTypeRangeList[i].second};
+ }
+ }
+ return {};
+}
+
+inline constexpr base::Optional<RootIndexRange> UniqueMapRangeOfInstanceType(
+ InstanceType type) {
+ return UniqueMapRangeOfInstanceTypeRange(type, type);
+}
+
+inline bool MayHaveMapCheckFastCase(InstanceType type) {
+ if (UniqueMapOfInstanceType(type)) return true;
+ for (auto& el : kUniqueMapRangeOfInstanceTypeRangeList) {
+ if (el.first.first <= type && type <= el.first.second) {
+ return true;
+ }
+ }
+ return false;
+}
+
+inline bool CheckInstanceMap(RootIndex expected, Map map) {
+ return V8HeapCompressionScheme::CompressObject(map.ptr()) ==
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>(expected)];
+}
+
+inline bool CheckInstanceMapRange(RootIndexRange expected, Map map) {
+ Tagged_t ptr = V8HeapCompressionScheme::CompressObject(map.ptr());
+ Tagged_t first =
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>(expected.first)];
+ Tagged_t last =
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>(expected.second)];
+ return ptr >= first && ptr <= last;
+}
+
+// Maps for primitive objects are allocated in r/o space. JS_RECEIVER maps are
+// all allocated later, i.e. they have a compressed address above the last read
+// only root. Thus, if we have a receiver and need to distinguish whether it is
+// either a primitive object or a JS receiver, it suffices to check if its map
+// is allocated above the following limit address.
+// The actual value is chosen such that it can be encoded as arm64 immediate.
+constexpr Tagged_t kNonJsReceiverMapLimit = 0x10000;
+static_assert(kNonJsReceiverMapLimit >
+ StaticReadOnlyRootsPointerTable[static_cast<size_t>(
+ RootIndex::kLastReadOnlyRoot)]);
+
+#else
+
+inline bool MayHaveMapCheckFastCase(InstanceType type) { return false; }
+constexpr Tagged_t kNonJsReceiverMapLimit = 0x0;
+
+#endif // V8_STATIC_ROOTS_BOOL
+
// Define type checkers for classes with single instance type.
-#define INSTANCE_TYPE_CHECKER(type, forinstancetype) \
+// INSTANCE_TYPE_CHECKER1 is to be used if the instance type is already loaded.
+// INSTANCE_TYPE_CHECKER2 is preferred since it can sometimes avoid loading the
+// instance type from the map, if the checked instance type corresponds to a
+// known map or range of maps.
+
+#define INSTANCE_TYPE_CHECKER1(type, forinstancetype) \
V8_INLINE constexpr bool Is##type(InstanceType instance_type) { \
return instance_type == forinstancetype; \
}
-INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER)
-#undef INSTANCE_TYPE_CHECKER
+#if V8_STATIC_ROOTS_BOOL
+
+#define INSTANCE_TYPE_CHECKER2(type, forinstancetype_) \
+ V8_INLINE bool Is##type(Map map_object) { \
+ InstanceType forinstancetype = \
+ static_cast<InstanceType>(forinstancetype_); \
+ if (base::Optional<RootIndex> expected = \
+ UniqueMapOfInstanceType(forinstancetype)) { \
+ return CheckInstanceMap(*expected, map_object); \
+ } \
+ if (base::Optional<RootIndexRange> range = \
+ UniqueMapRangeOfInstanceType(forinstancetype)) { \
+ return CheckInstanceMapRange(*range, map_object); \
+ } \
+ return Is##type(map_object.instance_type()); \
+ }
+
+#else
+
+#define INSTANCE_TYPE_CHECKER2(type, forinstancetype) \
+ V8_INLINE bool Is##type(Map map_object) { \
+ return Is##type(map_object.instance_type()); \
+ }
+
+#endif // V8_STATIC_ROOTS_BOOL
+
+INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER1)
+INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER2)
+#undef INSTANCE_TYPE_CHECKER1
+#undef INSTANCE_TYPE_CHECKER2
// Checks if value is in range [lower_limit, higher_limit] using a single
// branch. Assumes that the input instance type is valid.
@@ -51,14 +229,46 @@ struct InstanceRangeChecker<lower_limit, LAST_TYPE> {
};
// Define type checkers for classes with ranges of instance types.
-#define INSTANCE_TYPE_CHECKER_RANGE(type, first_instance_type, \
- last_instance_type) \
+// INSTANCE_TYPE_CHECKER_RANGE1 is to be used if the instance type is already
+// loaded. INSTANCE_TYPE_CHECKER_RANGE2 is preferred since it can sometimes
+// avoid loading the instance type from the map, if the checked instance type
+// range corresponds to a known range of maps.
+
+#define INSTANCE_TYPE_CHECKER_RANGE1(type, first_instance_type, \
+ last_instance_type) \
V8_INLINE constexpr bool Is##type(InstanceType instance_type) { \
return InstanceRangeChecker<first_instance_type, \
last_instance_type>::Check(instance_type); \
}
-INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE)
-#undef INSTANCE_TYPE_CHECKER_RANGE
+
+#if V8_STATIC_ROOTS_BOOL
+
+#define INSTANCE_TYPE_CHECKER_RANGE2(type, first_instance_type, \
+ last_instance_type) \
+ V8_INLINE bool Is##type(Map map_object) { \
+ if (base::Optional<RootIndexRange> range = \
+ UniqueMapRangeOfInstanceTypeRange(first_instance_type, \
+ last_instance_type)) { \
+ DCHECK(MayHaveMapCheckFastCase(last_instance_type)); \
+ return CheckInstanceMapRange(*range, map_object); \
+ } \
+ return Is##type(map_object.instance_type()); \
+ }
+
+#else
+
+#define INSTANCE_TYPE_CHECKER_RANGE2(type, first_instance_type, \
+ last_instance_type) \
+ V8_INLINE bool Is##type(Map map_object) { \
+ return Is##type(map_object.instance_type()); \
+ }
+
+#endif // V8_STATIC_ROOTS_BOOL
+
+INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE1)
+INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE2)
+#undef INSTANCE_TYPE_CHECKER_RANGE1
+#undef INSTANCE_TYPE_CHECKER_RANGE2
V8_INLINE constexpr bool IsHeapObject(InstanceType instance_type) {
return true;
@@ -70,47 +280,65 @@ V8_INLINE constexpr bool IsInternalizedString(InstanceType instance_type) {
(kStringTag | kInternalizedTag);
}
+V8_INLINE bool IsInternalizedString(Map map_object) {
+#if V8_STATIC_ROOTS_BOOL
+ return CheckInstanceMapRange(kUniqueMapRangeOfStringType::kInternalizedString,
+ map_object);
+#else
+ return IsInternalizedString(map_object.instance_type());
+#endif
+}
+
V8_INLINE constexpr bool IsExternalString(InstanceType instance_type) {
return (instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
kExternalStringTag;
}
+V8_INLINE bool IsExternalString(Map map_object) {
+#if V8_STATIC_ROOTS_BOOL
+ return CheckInstanceMapRange(kUniqueMapRangeOfStringType::kExternalString,
+ map_object);
+#else
+ return IsExternalString(map_object.instance_type());
+#endif
+}
+
V8_INLINE constexpr bool IsThinString(InstanceType instance_type) {
- return (instance_type & kStringRepresentationMask) == kThinStringTag;
+ return instance_type == THIN_STRING_TYPE;
+}
+
+V8_INLINE bool IsThinString(Map map_object) {
+#if V8_STATIC_ROOTS_BOOL
+ return CheckInstanceMap(RootIndex::kThinStringMap, map_object);
+#else
+ return IsThinString(map_object.instance_type());
+#endif
+}
+
+V8_INLINE constexpr bool IsGcSafeCode(InstanceType instance_type) {
+ return IsCode(instance_type);
}
+V8_INLINE bool IsGcSafeCode(Map map_object) { return IsCode(map_object); }
+
V8_INLINE constexpr bool IsAbstractCode(InstanceType instance_type) {
- return IsBytecodeArray(instance_type) || IsCode(instance_type) ||
- (V8_EXTERNAL_CODE_SPACE_BOOL && IsCodeDataContainer(instance_type));
+ return IsBytecodeArray(instance_type) || IsCode(instance_type);
+}
+
+V8_INLINE bool IsAbstractCode(Map map_object) {
+ return IsAbstractCode(map_object.instance_type());
}
V8_INLINE constexpr bool IsFreeSpaceOrFiller(InstanceType instance_type) {
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-V8_INLINE constexpr bool IsCodeT(InstanceType instance_type) {
- return instance_type == CODET_TYPE;
+V8_INLINE bool IsFreeSpaceOrFiller(Map map_object) {
+ return IsFreeSpaceOrFiller(map_object.instance_type());
}
} // namespace InstanceTypeChecker
-// INSTANCE_TYPE_CHECKERS macro defines some "types" that do not have
-// respective C++ classes (see TypedArrayConstructor, FixedArrayExact) or
-// the respective C++ counterpart is actually a template (see HashTable).
-// So in order to be able to customize IsType() implementations for specific
-// types, we declare a parallel set of "types" that can be compared using
-// std::is_same<>.
-namespace InstanceTypeTraits {
-
-#define DECL_TYPE(type, ...) class type;
-INSTANCE_TYPE_CHECKERS(DECL_TYPE)
-TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(DECL_TYPE)
-TORQUE_INSTANCE_CHECKERS_MULTIPLE_ONLY_DECLARED(DECL_TYPE)
-HEAP_OBJECT_TYPE_LIST(DECL_TYPE)
-#undef DECL_TYPE
-
-} // namespace InstanceTypeTraits
-
#define TYPE_CHECKER(type, ...) \
bool HeapObject::Is##type() const { \
/* In general, parameterless IsBlah() must not be used for objects */ \
@@ -125,12 +353,9 @@ HEAP_OBJECT_TYPE_LIST(DECL_TYPE)
/* compression cage where the Map space is allocated. */ \
bool HeapObject::Is##type(PtrComprCageBase cage_base) const { \
Map map_object = map(cage_base); \
- return InstanceTypeChecker::Is##type(map_object.instance_type()); \
+ return InstanceTypeChecker::Is##type(map_object); \
}
-// TODO(v8:7786): For instance types that have a single map instance on the
-// roots, and when that map is a embedded in the binary, compare against the map
-// pointer rather than looking up the instance type.
INSTANCE_TYPE_CHECKERS(TYPE_CHECKER)
#undef TYPE_CHECKER
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index b7879b5a31..9a051f5e98 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -143,9 +143,8 @@ enum InstanceType : uint16_t {
UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ // Mark thin strings as two-byte just to be on the safe side.
THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
- THIN_ONE_BYTE_STRING_TYPE =
- kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
SHARED_STRING_TYPE = STRING_TYPE | kSharedStringTag,
SHARED_ONE_BYTE_STRING_TYPE = ONE_BYTE_STRING_TYPE | kSharedStringTag,
SHARED_EXTERNAL_STRING_TYPE = EXTERNAL_STRING_TYPE | kSharedStringTag,
@@ -155,9 +154,6 @@ enum InstanceType : uint16_t {
UNCACHED_EXTERNAL_STRING_TYPE | kSharedStringTag,
SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE | kSharedStringTag,
- SHARED_THIN_STRING_TYPE = THIN_STRING_TYPE | kSharedStringTag,
- SHARED_THIN_ONE_BYTE_STRING_TYPE =
- THIN_ONE_BYTE_STRING_TYPE | kSharedStringTag,
// Most instance types are defined in Torque, with the exception of the string
// types above. They are ordered by inheritance hierarchy so that we can easily
@@ -191,12 +187,6 @@ enum InstanceType : uint16_t {
FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE,
LAST_TYPE = LAST_HEAP_OBJECT_TYPE,
BIGINT_TYPE = BIG_INT_BASE_TYPE,
-
-#ifdef V8_EXTERNAL_CODE_SPACE
- CODET_TYPE = CODE_DATA_CONTAINER_TYPE,
-#else
- CODET_TYPE = CODE_TYPE,
-#endif
};
// This constant is defined outside of the InstanceType enum because the
@@ -281,8 +271,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
#define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \
V(AbstractCode) \
- V(FreeSpaceOrFiller) \
V(ExternalString) \
+ V(FreeSpaceOrFiller) \
+ V(GcSafeCode) \
V(InternalizedString)
#define INSTANCE_TYPE_CHECKERS(V) \
@@ -291,13 +282,12 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
INSTANCE_TYPE_CHECKERS_CUSTOM(V)
namespace InstanceTypeChecker {
-#define IS_TYPE_FUNCTION_DECL(Type, ...) \
- V8_INLINE constexpr bool Is##Type(InstanceType instance_type);
+#define IS_TYPE_FUNCTION_DECL(Type, ...) \
+ V8_INLINE constexpr bool Is##Type(InstanceType instance_type); \
+ V8_INLINE bool Is##Type(Map map);
INSTANCE_TYPE_CHECKERS(IS_TYPE_FUNCTION_DECL)
-IS_TYPE_FUNCTION_DECL(CodeT)
-
#undef IS_TYPE_FUNCTION_DECL
} // namespace InstanceTypeChecker
@@ -317,8 +307,8 @@ IS_TYPE_FUNCTION_DECL(CodeT)
V(_, BytecodeArrayMap, bytecode_array_map, BytecodeArray) \
V(_, CellMap, cell_map, Cell) \
V(_, WeakCellMap, weak_cell_map, WeakCell) \
+ V(_, InstructionStreamMap, instruction_stream_map, InstructionStream) \
V(_, CodeMap, code_map, Code) \
- V(_, CodeDataContainerMap, code_data_container_map, CodeDataContainer) \
V(_, CoverageInfoMap, coverage_info_map, CoverageInfo) \
V(_, DebugInfoMap, debug_info_map, DebugInfo) \
V(_, FreeSpaceMap, free_space_map, FreeSpace) \
@@ -345,10 +335,18 @@ IS_TYPE_FUNCTION_DECL(CodeT)
// This list must contain only maps that are shared by all objects of their
// instance type.
-#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
- UNIQUE_LEAF_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
- V(_, HeapNumberMap, heap_number_map, HeapNumber) \
- V(_, WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArray) \
+#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
+ UNIQUE_LEAF_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
+ V(_, ByteArrayMap, byte_array_map, ByteArray) \
+ V(_, NameDictionaryMap, name_dictionary_map, NameDictionary) \
+ V(_, OrderedNameDictionaryMap, ordered_name_dictionary_map, \
+ OrderedNameDictionary) \
+ V(_, GlobalDictionaryMap, global_dictionary_map, GlobalDictionary) \
+ V(_, GlobalPropertyCellMap, global_property_cell_map, PropertyCell) \
+ V(_, HeapNumberMap, heap_number_map, HeapNumber) \
+ V(_, WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArray) \
+ V(_, ScopeInfoMap, scope_info_map, ScopeInfo) \
+ V(_, WeakArrayListMap, weak_array_list_map, WeakArrayList) \
TORQUE_DEFINED_MAP_CSA_LIST_GENERATOR(V, _)
} // namespace internal
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 881842fbdc..ab197778b8 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -279,7 +279,7 @@ MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
return result;
}
DCHECK(dest_length < result->length());
- return SeqString::Truncate(result, dest_length);
+ return SeqString::Truncate(isolate, result, dest_length);
}
} // namespace
@@ -2790,21 +2790,19 @@ bool Intl::RemoveCollation(const char* collation) {
// See the list in ecma402 #sec-issanctionedsimpleunitidentifier
std::set<std::string> Intl::SanctionedSimpleUnits() {
- return std::set<std::string>({"acre", "bit", "byte",
- "celsius", "centimeter", "day",
- "degree", "fahrenheit", "fluid-ounce",
- "foot", "gallon", "gigabit",
- "gigabyte", "gram", "hectare",
- "hour", "inch", "kilobit",
- "kilobyte", "kilogram", "kilometer",
- "liter", "megabit", "megabyte",
- "meter", "mile", "mile-scandinavian",
- "millimeter", "milliliter", "millisecond",
- "minute", "month", "ounce",
- "percent", "petabyte", "pound",
- "second", "stone", "terabit",
- "terabyte", "week", "yard",
- "year"});
+ return std::set<std::string>(
+ {"acre", "bit", "byte", "celsius",
+ "centimeter", "day", "degree", "fahrenheit",
+ "fluid-ounce", "foot", "gallon", "gigabit",
+ "gigabyte", "gram", "hectare", "hour",
+ "inch", "kilobit", "kilobyte", "kilogram",
+ "kilometer", "liter", "megabit", "megabyte",
+ "meter", "microsecond", "mile", "mile-scandinavian",
+ "millimeter", "milliliter", "millisecond", "minute",
+ "month", "nanosecond", "ounce", "percent",
+ "petabyte", "pound", "second", "stone",
+ "terabit", "terabyte", "week", "yard",
+ "year"});
}
// ecma-402/#sec-isvalidtimezonename
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 77a348fcf3..9378060d2d 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -21,7 +21,9 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBuffer)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSTypedArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSDataViewOrRabGsabDataView)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDataView)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRabGsabDataView)
ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
RELEASE_ACQUIRE_ACCESSORS(JSTypedArray, base_pointer, Object,
@@ -239,9 +241,13 @@ bool JSTypedArray::IsDetachedOrOutOfBounds() const {
if (WasDetached()) {
return true;
}
- bool out_of_bounds = false;
- GetLengthOrOutOfBounds(out_of_bounds);
- return out_of_bounds;
+ if (!is_backed_by_rab()) {
+ // TypedArrays backed by GSABs or regular AB/SABs are never out of bounds.
+ // This shortcut is load-bearing; this enables determining
+ // IsDetachedOrOutOfBounds without consulting the BackingStore.
+ return false;
+ }
+ return IsOutOfBounds();
}
// static
@@ -385,16 +391,39 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
-DEF_GETTER(JSDataView, data_pointer, void*) {
+DEF_GETTER(JSDataViewOrRabGsabDataView, data_pointer, void*) {
Address value = ReadSandboxedPointerField(kDataPointerOffset, cage_base);
return reinterpret_cast<void*>(value);
}
-void JSDataView::set_data_pointer(Isolate* isolate, void* ptr) {
+void JSDataViewOrRabGsabDataView::set_data_pointer(Isolate* isolate,
+ void* ptr) {
Address value = reinterpret_cast<Address>(ptr);
WriteSandboxedPointerField(kDataPointerOffset, isolate, value);
}
+size_t JSRabGsabDataView::GetByteLength() const {
+ if (IsOutOfBounds()) {
+ return 0;
+ }
+ if (is_length_tracking()) {
+ // Invariant: byte_length of length tracking DataViews is 0.
+ DCHECK_EQ(0, byte_length());
+ return buffer().GetByteLength() - byte_offset();
+ }
+ return byte_length();
+}
+
+bool JSRabGsabDataView::IsOutOfBounds() const {
+ if (!is_backed_by_rab()) {
+ return false;
+ }
+ if (is_length_tracking()) {
+ return byte_offset() > buffer().GetByteLength();
+ }
+ return byte_offset() + byte_length() > buffer().GetByteLength();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 3695e9a17e..fc6578d7a4 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -86,15 +86,19 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
set_backing_store(isolate, backing_store->buffer_start());
}
- if (is_shared() && is_resizable_by_js()) {
- // GSABs need to read their byte_length from the BackingStore. Maintain the
- // invariant that their byte_length field is always 0.
- set_byte_length(0);
- } else {
- CHECK_LE(backing_store->byte_length(), kMaxByteLength);
- set_byte_length(backing_store->byte_length());
- }
- set_max_byte_length(backing_store->max_byte_length());
+ // GSABs need to read their byte_length from the BackingStore. Maintain the
+ // invariant that their byte_length field is always 0.
+ auto byte_len =
+ (is_shared() && is_resizable_by_js()) ? 0 : backing_store->byte_length();
+ CHECK_LE(backing_store->byte_length(), kMaxByteLength);
+ set_byte_length(byte_len);
+ // For Wasm memories, it is possible for the backing store maximum to be
+ // different from the JSArrayBuffer maximum. The maximum pages allowed on a
+ // Wasm memory are tracked on the Wasm memory object, and not the
+ // JSArrayBuffer associated with it.
+ auto max_byte_len = is_resizable_by_js() ? backing_store->max_byte_length()
+ : backing_store->byte_length();
+ set_max_byte_length(max_byte_len);
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
ArrayBufferExtension* extension = EnsureExtension();
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 6c0784540d..e2950bdfe7 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -391,7 +391,6 @@ class JSTypedArray
template <typename IsolateT>
friend class Deserializer;
friend class Factory;
- friend class WebSnapshotDeserializer;
DECL_PRIMITIVE_SETTER(length, size_t)
// Reads the "length" field, doesn't assert the TypedArray is not RAB / GSAB
@@ -408,17 +407,14 @@ class JSTypedArray
TQ_OBJECT_CONSTRUCTORS(JSTypedArray)
};
-class JSDataView
- : public TorqueGeneratedJSDataView<JSDataView, JSArrayBufferView> {
+class JSDataViewOrRabGsabDataView
+ : public TorqueGeneratedJSDataViewOrRabGsabDataView<
+ JSDataViewOrRabGsabDataView, JSArrayBufferView> {
public:
// [data_pointer]: pointer to the actual data.
DECL_GETTER(data_pointer, void*)
inline void set_data_pointer(Isolate* isolate, void* value);
- // Dispatched behavior.
- DECL_PRINTER(JSDataView)
- DECL_VERIFIER(JSDataView)
-
// TODO(v8:9287): Re-enable when GCMole stops mixing 32/64 bit configs.
// static_assert(IsAligned(kDataPointerOffset, kTaggedSize));
@@ -428,9 +424,34 @@ class JSDataView
class BodyDescriptor;
+ TQ_OBJECT_CONSTRUCTORS(JSDataViewOrRabGsabDataView)
+};
+
+class JSDataView
+ : public TorqueGeneratedJSDataView<JSDataView,
+ JSDataViewOrRabGsabDataView> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSDataView)
+ DECL_VERIFIER(JSDataView)
+
TQ_OBJECT_CONSTRUCTORS(JSDataView)
};
+class JSRabGsabDataView
+ : public TorqueGeneratedJSRabGsabDataView<JSRabGsabDataView,
+ JSDataViewOrRabGsabDataView> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSRabGsabDataView)
+ DECL_VERIFIER(JSRabGsabDataView)
+
+ inline size_t GetByteLength() const;
+ inline bool IsOutOfBounds() const;
+
+ TQ_OBJECT_CONSTRUCTORS(JSRabGsabDataView)
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index 34e8bccce3..beb80a3c75 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -124,11 +124,16 @@ macro IsOnHeapTypedArray(array: JSTypedArray): bool {
return TaggedNotEqual(array.base_pointer, SmiConstant(0));
}
-extern class JSDataView extends JSArrayBufferView {
+@abstract
+extern class JSDataViewOrRabGsabDataView extends JSArrayBufferView {
// A SandboxedPtr if the sandbox is enabled
data_pointer: RawPtr;
}
+extern class JSDataView extends JSDataViewOrRabGsabDataView {}
+
+extern class JSRabGsabDataView extends JSDataViewOrRabGsabDataView {}
+
@abstract
@doNotGenerateCast extern class TypedArrayConstructor extends JSFunction
generates 'TNode<JSFunction>';
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 5c94d392e1..e7e30e4999 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -84,10 +84,14 @@ class JSArray : public TorqueGeneratedJSArray<JSArray, JSObject> {
// separators.
// 2) Implicitly between two consecutive strings a single separator.
//
+ // In addition repeated strings are represented by a negative smi, indicating
+ // how many times the previously written string has to be repeated.
+ //
// Here are some input/output examples given the separator string is ',':
//
// [1, 'hello', 2, 'world', 1] => ',hello,,world,'
// ['hello', 'world'] => 'hello,world'
+ // ['hello', -2, 'world'] => 'hello,hello,hello,world'
//
// To avoid any allocations, this helper assumes the destination string is the
// exact length necessary to write the strings and separators from the fixed
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index 7bf3fc7670..22c067df69 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -38,17 +38,25 @@ extern class JSArrayIterator extends JSObject {
// Perform CreateArrayIterator (ES #sec-createarrayiterator).
@export
macro CreateArrayIterator(implicit context: NativeContext)(
- array: JSReceiver, kind: constexpr IterationKind): JSArrayIterator {
+ array: JSReceiver, kind: constexpr IterationKind, nextIndex: Number):
+ JSArrayIterator {
return new JSArrayIterator{
map: *NativeContextSlot(ContextSlot::INITIAL_ARRAY_ITERATOR_MAP_INDEX),
properties_or_hash: kEmptyFixedArray,
elements: kEmptyFixedArray,
iterated_object: array,
- next_index: 0,
+ next_index: nextIndex,
kind: SmiTag<IterationKind>(kind)
};
}
+// Perform CreateArrayIterator (ES #sec-createarrayiterator).
+@export
+macro CreateArrayIterator(implicit context: NativeContext)(
+ array: JSReceiver, kind: constexpr IterationKind): JSArrayIterator {
+ return CreateArrayIterator(array, kind, 0);
+}
+
extern class JSArray extends JSObject {
macro IsEmpty(): bool {
return this.length == 0;
@@ -87,6 +95,21 @@ macro NewJSArray(implicit context: Context)(): JSArray {
};
}
+macro NewJSArrayFilledWithZero(implicit context: Context)(length: intptr):
+ JSArray labels Slow {
+ if (length == 0) return NewJSArray();
+ if (length > kMaxFastArrayLength) goto Slow;
+
+ const map: Map = GetFastPackedSmiElementsJSArrayMap();
+ const elements: FixedArrayBase = AllocateFixedArray(
+ ElementsKind::PACKED_SMI_ELEMENTS, length,
+ AllocationFlag::kAllowLargeObjectAllocation);
+ FillFixedArrayWithSmiZero(
+ ElementsKind::PACKED_SMI_ELEMENTS, UnsafeCast<FixedArray>(elements), 0,
+ length);
+ return NewJSArray(map, elements);
+}
+
// A HeapObject with a JSArray map, and either fast packed elements, or fast
// holey elements when the global NoElementsProtector is not invalidated.
transient type FastJSArray extends JSArray;
diff --git a/deps/v8/src/objects/js-atomics-synchronization-inl.h b/deps/v8/src/objects/js-atomics-synchronization-inl.h
index 07710e8d8d..68fdf4c67a 100644
--- a/deps/v8/src/objects/js-atomics-synchronization-inl.h
+++ b/deps/v8/src/objects/js-atomics-synchronization-inl.h
@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-atomics-synchronization.h"
+#include "src/objects/js-struct-inl.h"
#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/js-atomics-synchronization.h b/deps/v8/src/objects/js-atomics-synchronization.h
index 7394a67289..d1e7037ddc 100644
--- a/deps/v8/src/objects/js-atomics-synchronization.h
+++ b/deps/v8/src/objects/js-atomics-synchronization.h
@@ -10,6 +10,7 @@
#include "src/base/platform/time.h"
#include "src/execution/thread-id.h"
#include "src/objects/js-objects.h"
+#include "src/objects/js-struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -26,7 +27,7 @@ class WaiterQueueNode;
// Base class for JSAtomicsMutex and JSAtomicsCondition
class JSSynchronizationPrimitive
: public TorqueGeneratedJSSynchronizationPrimitive<
- JSSynchronizationPrimitive, JSObject> {
+ JSSynchronizationPrimitive, AlwaysSharedSpaceJSObject> {
public:
// Synchronization only store raw data as state.
static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
@@ -44,10 +45,10 @@ class JSSynchronizationPrimitive
inline std::atomic<StateT>* AtomicStatePtr();
- using TorqueGeneratedJSSynchronizationPrimitive<JSSynchronizationPrimitive,
- JSObject>::state;
- using TorqueGeneratedJSSynchronizationPrimitive<JSSynchronizationPrimitive,
- JSObject>::set_state;
+ using TorqueGeneratedJSSynchronizationPrimitive<
+ JSSynchronizationPrimitive, AlwaysSharedSpaceJSObject>::state;
+ using TorqueGeneratedJSSynchronizationPrimitive<
+ JSSynchronizationPrimitive, AlwaysSharedSpaceJSObject>::set_state;
};
// A non-recursive mutex that is exposed to JS.
diff --git a/deps/v8/src/objects/js-atomics-synchronization.tq b/deps/v8/src/objects/js-atomics-synchronization.tq
index 791020f0f6..d41cc0344a 100644
--- a/deps/v8/src/objects/js-atomics-synchronization.tq
+++ b/deps/v8/src/objects/js-atomics-synchronization.tq
@@ -3,7 +3,7 @@
// found in the LICENSE file.
@abstract
-extern class JSSynchronizationPrimitive extends JSObject {
+extern class JSSynchronizationPrimitive extends AlwaysSharedSpaceJSObject {
@if(TAGGED_SIZE_8_BYTES) state: uintptr;
@ifnot(TAGGED_SIZE_8_BYTES) state: uint32;
}
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index af96871cdf..eae113ad9a 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -1135,7 +1135,7 @@ Maybe<DateTimeValueRecord> HandleDateTimeOthers(
double x;
if (x_obj->IsUndefined()) {
// a. Set x to ! Call(%Date.now%, undefined).
- x = JSDate::CurrentTimeValue(isolate);
+ x = static_cast<double>(JSDate::CurrentTimeValue(isolate));
// 5. Else,
} else {
// a. Set x to ? ToNumber(x).
@@ -1453,7 +1453,7 @@ MaybeHandle<String> JSDateTimeFormat::DateTimeFormat(
double x;
if (date->IsUndefined()) {
// 3.a Let x be Call(%Date_now%, undefined).
- x = JSDate::CurrentTimeValue(isolate);
+ x = static_cast<double>(JSDate::CurrentTimeValue(isolate));
} else {
// 4. Else,
// a. Let x be ? ToNumber(date).
@@ -2781,7 +2781,7 @@ MaybeHandle<JSArray> JSDateTimeFormat::FormatToParts(
}
if (x->IsUndefined(isolate)) {
- x = factory->NewNumber(JSDate::CurrentTimeValue(isolate));
+ x = factory->NewNumberFromInt64(JSDate::CurrentTimeValue(isolate));
} else {
ASSIGN_RETURN_ON_EXCEPTION(isolate, x, Object::ToNumber(isolate, x),
JSArray);
diff --git a/deps/v8/src/objects/js-duration-format.cc b/deps/v8/src/objects/js-duration-format.cc
index fa7fcf3a91..4b15db7fe3 100644
--- a/deps/v8/src/objects/js-duration-format.cc
+++ b/deps/v8/src/objects/js-duration-format.cc
@@ -684,6 +684,39 @@ MaybeHandle<T> PartitionDurationFormatPattern(Isolate* isolate,
return Format(isolate, formatted, types);
}
+// #sec-todurationrecord
+// ToDurationRecord is almost the same as temporal::ToPartialDuration
+// except:
+// 1) In the beginning it will throw RangeError if the type of input is String,
+// 2) In the end it will throw RangeError if IsValidDurationRecord return false.
+Maybe<DurationRecord> ToDurationRecord(Isolate* isolate, Handle<Object> input,
+ const DurationRecord& default_value) {
+ // 1-a. If Type(input) is String, throw a RangeError exception.
+ if (input->IsString()) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ isolate->factory()->object_string(), input),
+ Nothing<DurationRecord>());
+ }
+ // Step 1-b - 23. Same as ToTemporalPartialDurationRecord.
+ DurationRecord record;
+ MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, record,
+ temporal::ToPartialDuration(isolate, input, default_value),
+ Nothing<DurationRecord>());
+ // 24. If IsValidDurationRecord(result) is false, throw a RangeError
+ // exception.
+ if (!temporal::IsValidDuration(isolate, record)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ isolate->factory()->object_string(), input),
+ Nothing<DurationRecord>());
+ }
+ return Just(record);
+}
+
template <typename T,
MaybeHandle<T> (*Format)(Isolate*, const icu::FormattedValue&,
const std::vector<std::string>&)>
@@ -695,17 +728,8 @@ MaybeHandle<T> FormatCommon(Isolate* isolate, Handle<JSDurationFormat> df,
DurationRecord record;
MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, record,
- temporal::ToPartialDuration(isolate, duration,
- {0, 0, 0, {0, 0, 0, 0, 0, 0, 0}}),
+ ToDurationRecord(isolate, duration, {0, 0, 0, {0, 0, 0, 0, 0, 0, 0}}),
Handle<T>());
- // 4. If IsValidDurationRecord(record) is false, throw a RangeError exception.
- if (!temporal::IsValidDuration(isolate, record)) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kInvalid,
- isolate->factory()->object_string(), duration),
- T);
- }
// 5. Let parts be ! PartitionDurationFormatPattern(df, record).
return PartitionDurationFormatPattern<T, Format>(isolate, df, record,
method_name);
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index ba8e015d41..152c00d1b1 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -63,16 +63,16 @@ AbstractCode JSFunction::abstract_code(IsolateT* isolate) {
if (ActiveTierIsIgnition()) {
return AbstractCode::cast(shared().GetBytecodeArray(isolate));
} else {
- return ToAbstractCode(code(kAcquireLoad));
+ return AbstractCode::cast(code(kAcquireLoad));
}
}
int JSFunction::length() { return shared().length(); }
-ACCESSORS_RELAXED(JSFunction, code, CodeT, kCodeOffset)
-RELEASE_ACQUIRE_GETTER_CHECKED(JSFunction, code, CodeT, kCodeOffset, true)
-void JSFunction::set_code(CodeT value, ReleaseStoreTag, WriteBarrierMode mode) {
- TaggedField<CodeT, kCodeOffset>::Release_Store(*this, value);
+ACCESSORS_RELAXED(JSFunction, code, Code, kCodeOffset)
+RELEASE_ACQUIRE_GETTER_CHECKED(JSFunction, code, Code, kCodeOffset, true)
+void JSFunction::set_code(Code value, ReleaseStoreTag, WriteBarrierMode mode) {
+ TaggedField<Code, kCodeOffset>::Release_Store(*this, value);
CONDITIONAL_WRITE_BARRIER(*this, kCodeOffset, value, mode);
if (V8_UNLIKELY(v8_flags.log_function_events && has_feedback_vector())) {
feedback_vector().set_log_next_execution(true);
@@ -80,18 +80,8 @@ void JSFunction::set_code(CodeT value, ReleaseStoreTag, WriteBarrierMode mode) {
}
RELEASE_ACQUIRE_ACCESSORS(JSFunction, context, Context, kContextOffset)
-#ifdef V8_EXTERNAL_CODE_SPACE
-void JSFunction::set_code(Code code, ReleaseStoreTag, WriteBarrierMode mode) {
- set_code(ToCodeT(code), kReleaseStore, mode);
-}
-#endif
-
Address JSFunction::code_entry_point() const {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- return CodeDataContainer::cast(code()).code_entry_point();
- } else {
- return code().InstructionStart();
- }
+ return Code::cast(code()).code_entry_point();
}
// TODO(ishell): Why relaxed read but release store?
@@ -234,8 +224,8 @@ bool JSFunction::ShouldFlushBaselineCode(
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread. JSFunction itself should be fully
- // initialized here but the SharedFunctionInfo, Code objects may not be
- // initialized. We read using acquire loads to defend against that.
+ // initialized here but the SharedFunctionInfo, InstructionStream objects may
+ // not be initialized. We read using acquire loads to defend against that.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false;
@@ -243,8 +233,13 @@ bool JSFunction::ShouldFlushBaselineCode(
// code field. We don't use release stores when copying code pointers from
// SFI / FV to JSFunction but it is safe in practice.
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
- if (!maybe_code.IsCodeT()) return false;
- CodeT code = CodeT::cast(maybe_code);
+#ifdef THREAD_SANITIZER
+ // This is needed because TSAN does not process the memory fence
+ // emitted after page initialization.
+ BasicMemoryChunk::FromAddress(maybe_code.ptr())->SynchronizedHeapLoad();
+#endif
+ if (!maybe_code.IsCode()) return false;
+ Code code = Code::cast(maybe_code);
if (code.kind() != CodeKind::BASELINE) return false;
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
@@ -254,14 +249,14 @@ bool JSFunction::ShouldFlushBaselineCode(
bool JSFunction::NeedsResetDueToFlushedBytecode() {
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread. JSFunction itself should be fully
- // initialized here but the SharedFunctionInfo, Code objects may not be
- // initialized. We read using acquire loads to defend against that.
+ // initialized here but the SharedFunctionInfo, InstructionStream objects may
+ // not be initialized. We read using acquire loads to defend against that.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false;
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
- if (!maybe_code.IsCodeT()) return false;
- CodeT code = CodeT::cast(maybe_code);
+ if (!maybe_code.IsCode()) return false;
+ Code code = Code::cast(maybe_code);
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 3b446c9dd1..1640cb0d31 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -52,7 +52,7 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
// Check the optimized code cache.
if (has_feedback_vector() && feedback_vector().has_optimized_code() &&
!feedback_vector().optimized_code().marked_for_deoptimization()) {
- CodeT code = feedback_vector().optimized_code();
+ Code code = feedback_vector().optimized_code();
DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
result |= CodeKindToCodeKindFlag(code.kind());
}
@@ -474,18 +474,19 @@ MaybeHandle<Object> JSWrappedFunction::Create(
// 8. If result is an Abrupt Completion, throw a TypeError exception.
if (is_abrupt.IsNothing()) {
DCHECK(isolate->has_pending_exception());
+ Handle<Object> pending_exception =
+ Handle<Object>(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
- // TODO(v8:11989): provide a non-observable inspection on the
- // pending_exception to the newly created TypeError.
- // https://github.com/tc39/proposal-shadowrealm/issues/353
// The TypeError thrown is created with creation Realm's TypeError
// constructor instead of the executing Realm's.
+ Handle<JSFunction> type_error_function =
+ Handle<JSFunction>(creation_context->type_error_function(), isolate);
+ Handle<String> string =
+ Object::NoSideEffectsToString(isolate, pending_exception);
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
- NewError(Handle<JSFunction>(creation_context->type_error_function(),
- isolate),
- MessageTemplate::kCannotWrap),
+ NewError(type_error_function, MessageTemplate::kCannotWrap, string),
{});
}
DCHECK(is_abrupt.FromJust());
@@ -584,13 +585,15 @@ void JSFunction::CreateAndAttachFeedbackVector(
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
handle(function->closure_feedback_cell_array(), isolate);
Handle<FeedbackVector> feedback_vector = FeedbackVector::New(
- isolate, shared, closure_feedback_cell_array, compiled_scope);
+ isolate, shared, closure_feedback_cell_array,
+ handle(function->raw_feedback_cell(isolate), isolate), compiled_scope);
+ USE(feedback_vector);
// EnsureClosureFeedbackCellArray should handle the special case where we need
// to allocate a new feedback cell. Please look at comment in that function
// for more details.
DCHECK(function->raw_feedback_cell() !=
isolate->heap()->many_closures_cell());
- function->raw_feedback_cell().set_value(*feedback_vector, kReleaseStore);
+ DCHECK_EQ(function->raw_feedback_cell().value(), *feedback_vector);
function->SetInterruptBudget(isolate);
DCHECK_EQ(v8_flags.log_function_events,
@@ -689,7 +692,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// needed. At that point, a new initial map is created and the
// prototype is put into the initial map where it belongs.
function->set_prototype_or_initial_map(*value, kReleaseStore);
- if (value->IsJSObject()) {
+ if (value->IsJSObjectThatCanBeTrackedAsPrototype()) {
// Optimize as prototype to detach it from its transition tree.
JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
@@ -745,16 +748,17 @@ void JSFunction::SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
void JSFunction::SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
Handle<Map> map, Handle<HeapObject> prototype,
- Handle<JSFunction> constructor) {
+ Handle<HeapObject> constructor) {
if (map->prototype() != *prototype) {
Map::SetPrototype(isolate, map, prototype);
}
+ DCHECK_IMPLIES(!constructor->IsJSFunction(), map->InSharedHeap());
map->SetConstructor(*constructor);
function->set_prototype_or_initial_map(*map, kReleaseStore);
if (v8_flags.log_maps) {
LOG(isolate, MapEvent("InitialMap", Handle<Map>(), map, "",
SharedFunctionInfo::DebugName(
- handle(function->shared(), isolate))));
+ isolate, handle(function->shared(), isolate))));
}
}
@@ -820,6 +824,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_DATA_VIEW_TYPE:
+ case JS_RAB_GSAB_DATA_VIEW_TYPE:
case JS_DATE_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_FUNCTION_TYPE:
@@ -898,7 +903,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case BYTECODE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case CELL_TYPE:
- case CODE_TYPE:
+ case INSTRUCTION_STREAM_TYPE:
case FILLER_TYPE:
case FIXED_ARRAY_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
@@ -1103,7 +1108,7 @@ int TypedArrayElementsKindToRabGsabCtorIndex(ElementsKind elements_kind) {
} // namespace
-MaybeHandle<Map> JSFunction::GetDerivedRabGsabMap(
+MaybeHandle<Map> JSFunction::GetDerivedRabGsabTypedArrayMap(
Isolate* isolate, Handle<JSFunction> constructor,
Handle<JSReceiver> new_target) {
MaybeHandle<Map> maybe_map = GetDerivedMap(isolate, constructor, new_target);
@@ -1132,6 +1137,28 @@ MaybeHandle<Map> JSFunction::GetDerivedRabGsabMap(
return rab_gsab_map;
}
+MaybeHandle<Map> JSFunction::GetDerivedRabGsabDataViewMap(
+ Isolate* isolate, Handle<JSReceiver> new_target) {
+ Handle<Context> context =
+ handle(isolate->context().native_context(), isolate);
+ Handle<JSFunction> constructor = handle(context->data_view_fun(), isolate);
+ MaybeHandle<Map> maybe_map = GetDerivedMap(isolate, constructor, new_target);
+ Handle<Map> map;
+ if (!maybe_map.ToHandle(&map)) {
+ return MaybeHandle<Map>();
+ }
+ if (*map == constructor->initial_map()) {
+ return handle(Map::cast(context->js_rab_gsab_data_view_map()), isolate);
+ }
+
+ // This only happens when subclassing DataViews. Create a new map with the
+ // JS_RAB_GSAB_DATA_VIEW instance type. Note: the map is not cached and
+ // reused -> every data view gets a unique map, making ICs slow.
+ Handle<Map> rab_gsab_map = Map::Copy(isolate, map, "RAB / GSAB");
+ rab_gsab_map->set_instance_type(JS_RAB_GSAB_DATA_VIEW_TYPE);
+ return rab_gsab_map;
+}
+
int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
CHECK(has_initial_map());
if (initial_map().IsInobjectSlackTrackingInProgress()) {
@@ -1192,7 +1219,8 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
GetDataProperty(isolate, function, isolate->factory()->name_string());
if (name->IsString()) return Handle<String>::cast(name);
}
- return SharedFunctionInfo::DebugName(handle(function->shared(), isolate));
+ return SharedFunctionInfo::DebugName(isolate,
+ handle(function->shared(), isolate));
}
bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
@@ -1221,8 +1249,7 @@ bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
namespace {
Handle<String> NativeCodeFunctionSourceString(
- Handle<SharedFunctionInfo> shared_info) {
- Isolate* const isolate = shared_info->GetIsolate();
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
IncrementalStringBuilder builder(isolate);
builder.AppendCStringLiteral("function ");
builder.AppendString(handle(shared_info->Name(), isolate));
@@ -1239,26 +1266,28 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
// Check if {function} should hide its source code.
if (!shared_info->IsUserJavaScript()) {
- return NativeCodeFunctionSourceString(shared_info);
+ return NativeCodeFunctionSourceString(isolate, shared_info);
}
- // Check if we should print {function} as a class.
- Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
- isolate, function, isolate->factory()->class_positions_symbol());
- if (maybe_class_positions->IsClassPositions()) {
- ClassPositions class_positions =
- ClassPositions::cast(*maybe_class_positions);
- int start_position = class_positions.start();
- int end_position = class_positions.end();
- Handle<String> script_source(
- String::cast(Script::cast(shared_info->script()).source()), isolate);
- return isolate->factory()->NewSubString(script_source, start_position,
- end_position);
+ if (IsClassConstructor(shared_info->kind())) {
+ // Check if we should print {function} as a class.
+ Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
+ isolate, function, isolate->factory()->class_positions_symbol());
+ if (maybe_class_positions->IsClassPositions()) {
+ ClassPositions class_positions =
+ ClassPositions::cast(*maybe_class_positions);
+ int start_position = class_positions.start();
+ int end_position = class_positions.end();
+ Handle<String> script_source(
+ String::cast(Script::cast(shared_info->script()).source()), isolate);
+ return isolate->factory()->NewSubString(script_source, start_position,
+ end_position);
+ }
}
// Check if we have source code for the {function}.
if (!shared_info->HasSourceCode()) {
- return NativeCodeFunctionSourceString(shared_info);
+ return NativeCodeFunctionSourceString(isolate, shared_info);
}
// If this function was compiled from asm.js, use the recorded offset
@@ -1286,10 +1315,10 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
// giving inconsistent call behaviour.
isolate->CountUsage(
v8::Isolate::UseCounterFeature::kFunctionTokenOffsetTooLongForToString);
- return NativeCodeFunctionSourceString(shared_info);
+ return NativeCodeFunctionSourceString(isolate, shared_info);
}
return Handle<String>::cast(
- SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
+ SharedFunctionInfo::GetSourceCodeHarmony(isolate, shared_info));
}
// static
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 0361d912c3..da353d2a66 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -118,20 +118,14 @@ class JSFunction : public TorqueGeneratedJSFunction<
// optimized code object, or when reading from the background thread.
// Storing a builtin doesn't require release semantics because these objects
// are fully initialized.
- DECL_ACCESSORS(code, CodeT)
- DECL_RELEASE_ACQUIRE_ACCESSORS(code, CodeT)
-#ifdef V8_EXTERNAL_CODE_SPACE
- // Convenient overloads to avoid unnecessary Code <-> CodeT conversions.
- // TODO(v8:11880): remove once |code| accessors are migrated to CodeT.
- inline void set_code(Code code, ReleaseStoreTag,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-#endif
+ DECL_ACCESSORS(code, Code)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
// Returns the address of the function code's instruction start.
inline Address code_entry_point() const;
// Get the abstract code associated with the function, which will either be
- // a Code object or a BytecodeArray.
+ // a InstructionStream object or a BytecodeArray.
template <typename IsolateT>
inline AbstractCode abstract_code(IsolateT* isolate);
@@ -272,7 +266,7 @@ class JSFunction : public TorqueGeneratedJSFunction<
Handle<Map> map, Handle<HeapObject> prototype);
static void SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
Handle<Map> map, Handle<HeapObject> prototype,
- Handle<JSFunction> constructor);
+ Handle<HeapObject> constructor);
DECL_GETTER(has_initial_map, bool)
V8_EXPORT_PRIVATE static void EnsureHasInitialMap(
Handle<JSFunction> function);
@@ -285,10 +279,15 @@ class JSFunction : public TorqueGeneratedJSFunction<
Handle<JSReceiver> new_target);
// Like GetDerivedMap, but returns a map with a RAB / GSAB ElementsKind.
- static V8_WARN_UNUSED_RESULT MaybeHandle<Map> GetDerivedRabGsabMap(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Map> GetDerivedRabGsabTypedArrayMap(
Isolate* isolate, Handle<JSFunction> constructor,
Handle<JSReceiver> new_target);
+ // Like GetDerivedMap, but can be used for DataViews for retrieving / creating
+ // a map with a JS_RAB_GSAB_DATA_VIEW instance type.
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Map> GetDerivedRabGsabDataViewMap(
+ Isolate* isolate, Handle<JSReceiver> new_target);
+
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
// map. Otherwise, the prototype is put in the initial map field
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
index 5ffed87356..abaeac1b56 100644
--- a/deps/v8/src/objects/js-function.tq
+++ b/deps/v8/src/objects/js-function.tq
@@ -33,8 +33,7 @@ extern class JSFunction extends JSFunctionOrBoundFunctionOrWrappedFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
feedback_cell: FeedbackCell;
- @if(V8_EXTERNAL_CODE_SPACE) code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) code: Code;
+ code: Code;
// Space for the following field may or may not be allocated.
prototype_or_initial_map: JSReceiver|Map;
}
diff --git a/deps/v8/src/objects/js-iterator-helpers-inl.h b/deps/v8/src/objects/js-iterator-helpers-inl.h
new file mode 100644
index 0000000000..a9121c6a70
--- /dev/null
+++ b/deps/v8/src/objects/js-iterator-helpers-inl.h
@@ -0,0 +1,35 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_ITERATOR_HELPERS_INL_H_
+#define V8_OBJECTS_JS_ITERATOR_HELPERS_INL_H_
+
+#include "src/objects/js-iterator-helpers.h"
+#include "src/objects/objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-iterator-helpers-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSIteratorHelper)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSIteratorMapHelper)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSIteratorFilterHelper)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSIteratorTakeHelper)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSIteratorDropHelper)
+
+CAST_ACCESSOR(JSIteratorMapHelper)
+CAST_ACCESSOR(JSIteratorFilterHelper)
+CAST_ACCESSOR(JSIteratorTakeHelper)
+CAST_ACCESSOR(JSIteratorDropHelper)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_ITERATOR_HELPERS_INL_H_
diff --git a/deps/v8/src/objects/js-iterator-helpers.h b/deps/v8/src/objects/js-iterator-helpers.h
new file mode 100644
index 0000000000..ff22b5f7d9
--- /dev/null
+++ b/deps/v8/src/objects/js-iterator-helpers.h
@@ -0,0 +1,112 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_ITERATOR_HELPERS_H_
+#define V8_OBJECTS_JS_ITERATOR_HELPERS_H_
+
+#include "src/objects/js-objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-iterator-helpers-tq.inc"
+
+// Iterator helpers are iterators that transform an underlying iterator in some
+// way. They are specified as spec generators. That is, the spec defines the
+// body of iterator helpers using algorithm steps with yields (like JS
+// generators) packaged in an Abstract Closure, and then makes a generator
+// object internally. Generator machinery such as GeneratorResume [1] are then
+// used to specify %IteratorHelperPrototype%.{next,return}. While this aids
+// understandability of the specification, it is not conducive to ease of
+// implementation or performance in V8.
+//
+// Instead, each iterator helper is implemented as an iterator directly, with
+// JSIteratorHelper acting as a superclass to multiplex the various kinds of
+// helpers.
+//
+// Each helper has its own Torque class to hold the state it needs. (In the
+// spec, the state is captured in the Abstract Closures.) The classes are named
+// after the name of the method that produces them. E.g., the iterator helper
+// returned by Iterator.prototype.map is named JSIteratorMapHelper, and has
+// fields for the underlying iterator, the mapper function, and a counter.
+//
+// The algorithm steps in the body Abstract Closure in the specification is
+// implemented directly as next() (and return(), if necessary) builtin
+// methods. E.g., the map helper's body is implemented as
+// Builtin::kIteratorMapHelperNext.
+//
+// All iterator helper objects have %IteratorHelperPrototype% as their
+// [[Prototype]]. The implementations of %IteratorHelperPrototype%.{next,return}
+// multiplex, typeswitching over all known iterator helpers and manually calling
+// their next() (and return(), if necessary) builtins. E.g., Calling next() on
+// JSIteratorMapHelper would ultimately call Builtin::kIteratorMapHelperNext.
+//
+// [1] https://tc39.es/ecma262/#sec-generatorresume
+
+// The superclass of all iterator helpers.
+class JSIteratorHelper
+ : public TorqueGeneratedJSIteratorHelper<JSIteratorHelper, JSObject> {
+ public:
+ void JSIteratorHelperPrintHeader(std::ostream& os, const char* helper_name);
+
+ TQ_OBJECT_CONSTRUCTORS(JSIteratorHelper)
+};
+
+// The iterator helper returned by Iterator.prototype.map.
+class JSIteratorMapHelper
+ : public TorqueGeneratedJSIteratorMapHelper<JSIteratorMapHelper,
+ JSIteratorHelper> {
+ public:
+ DECL_CAST(JSIteratorMapHelper)
+ DECL_PRINTER(JSIteratorMapHelper)
+ DECL_VERIFIER(JSIteratorMapHelper)
+
+ TQ_OBJECT_CONSTRUCTORS(JSIteratorMapHelper)
+};
+
+// The iterator helper returned by Iterator.prototype.filter.
+class JSIteratorFilterHelper
+ : public TorqueGeneratedJSIteratorFilterHelper<JSIteratorFilterHelper,
+ JSIteratorHelper> {
+ public:
+ DECL_CAST(JSIteratorFilterHelper)
+ DECL_PRINTER(JSIteratorFilterHelper)
+ DECL_VERIFIER(JSIteratorFilterHelper)
+
+ TQ_OBJECT_CONSTRUCTORS(JSIteratorFilterHelper)
+};
+
+// The iterator helper returned by Iterator.prototype.take.
+class JSIteratorTakeHelper
+ : public TorqueGeneratedJSIteratorTakeHelper<JSIteratorTakeHelper,
+ JSIteratorHelper> {
+ public:
+ DECL_CAST(JSIteratorTakeHelper)
+ DECL_PRINTER(JSIteratorTakeHelper)
+ DECL_VERIFIER(JSIteratorTakeHelper)
+
+ TQ_OBJECT_CONSTRUCTORS(JSIteratorTakeHelper)
+};
+
+// The iterator helper returned by Iterator.prototype.drop.
+class JSIteratorDropHelper
+ : public TorqueGeneratedJSIteratorDropHelper<JSIteratorDropHelper,
+ JSIteratorHelper> {
+ public:
+ DECL_CAST(JSIteratorDropHelper)
+ DECL_PRINTER(JSIteratorDropHelper)
+ DECL_VERIFIER(JSIteratorDropHelper)
+
+ TQ_OBJECT_CONSTRUCTORS(JSIteratorDropHelper)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_ITERATOR_HELPERS_H_
diff --git a/deps/v8/src/objects/js-iterator-helpers.tq b/deps/v8/src/objects/js-iterator-helpers.tq
new file mode 100644
index 0000000000..8807c4986a
--- /dev/null
+++ b/deps/v8/src/objects/js-iterator-helpers.tq
@@ -0,0 +1,26 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+@abstract
+extern class JSIteratorHelper extends JSObject {
+ underlying: iterator::IteratorRecord;
+}
+
+extern class JSIteratorMapHelper extends JSIteratorHelper {
+ mapper: Callable;
+ counter: Number;
+}
+
+extern class JSIteratorFilterHelper extends JSIteratorHelper {
+ predicate: Callable;
+ counter: Number;
+}
+
+extern class JSIteratorTakeHelper extends JSIteratorHelper {
+ remaining: Number;
+}
+
+extern class JSIteratorDropHelper extends JSIteratorHelper {
+ remaining: Number;
+}
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index e93eb8a804..11eef76f49 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -531,7 +531,7 @@ MaybeHandle<JSArray> JSLocale::Collations(Isolate* isolate,
icu::Locale icu_locale(*(locale->icu_locale().raw()));
return GetKeywordValuesFromLocale<icu::Collator>(
isolate, "collations", "co", icu_locale, Intl::RemoveCollation, true,
- false);
+ true);
}
MaybeHandle<JSArray> JSLocale::HourCycles(Isolate* isolate,
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 606fd3caf0..42b85cb662 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -1442,8 +1442,32 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
}
+ int rounding_increment = 1;
+ if (v8_flags.harmony_intl_number_format_v3) {
+ // 18. Let roundingIncrement be ? GetNumberOption(options,
+ // "roundingIncrement,", 1, 5000, 1).
+ Maybe<int> maybe_rounding_increment = GetNumberOption(
+ isolate, options, factory->roundingIncrement_string(), 1, 5000, 1);
+ if (!maybe_rounding_increment.To(&rounding_increment)) {
+ return MaybeHandle<JSNumberFormat>();
+ }
+
+ // 19. If roundingIncrement is not in « 1, 2, 5, 10, 20, 25, 50, 100, 200,
+ // 250, 500, 1000, 2000, 2500, 5000 », throw a RangeError exception.
+ if (!IsValidRoundingIncrement(rounding_increment)) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
+ factory->roundingIncrement_string()),
+ JSNumberFormat);
+ }
+ // 20. If roundingIncrement is not 1, set mxfdDefault to mnfdDefault.
+ if (rounding_increment != 1) {
+ mxfd_default = mnfd_default;
+ }
+ }
+
Notation notation = Notation::STANDARD;
- // 18. Let notation be ? GetOption(options, "notation", "string", «
+ // 21. Let notation be ? GetOption(options, "notation", "string", «
// "standard", "scientific", "engineering", "compact" », "standard").
MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, notation,
@@ -1454,9 +1478,9 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Notation::COMPACT},
Notation::STANDARD),
Handle<JSNumberFormat>());
- // 19. Set numberFormat.[[Notation]] to notation.
+ // 22. Set numberFormat.[[Notation]] to notation.
- // 20. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
+ // 23. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
// mnfdDefault, mxfdDefault).
Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
@@ -1466,32 +1490,16 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
if (v8_flags.harmony_intl_number_format_v3) {
- // 21. Let roundingIncrement be ? GetNumberOption(options,
- // "roundingIncrement,", 1, 5000, 1).
- int rounding_increment = 1;
- Maybe<int> maybe_rounding_increment = GetNumberOption(
- isolate, options, factory->roundingIncrement_string(), 1, 5000, 1);
- MAYBE_RETURN(maybe_rounding_increment, MaybeHandle<JSNumberFormat>());
- CHECK(maybe_rounding_increment.To(&rounding_increment));
-
- // 22. If roundingIncrement is not in « 1, 2, 5, 10, 20, 25, 50, 100, 200,
- // 250, 500, 1000, 2000, 2500, 5000 », throw a RangeError exception.
- if (!IsValidRoundingIncrement(rounding_increment)) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kPropertyValueOutOfRange,
- factory->roundingIncrement_string()),
- JSNumberFormat);
- }
+ // 24. If roundingIncrement is not 1, then
if (rounding_increment != 1) {
- // 23. If roundingIncrement is not 1 and numberFormat.[[RoundingType]] is
- // not fractionDigits, throw a TypeError exception.
+ // a. If numberFormat.[[RoundingType]] is not fractionDigits, throw a
+ // TypeError exception.
if (digit_options.rounding_type != Intl::RoundingType::kFractionDigits) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kBadRoundingType),
JSNumberFormat);
}
- // 24. If roundingIncrement is not 1 and
- // numberFormat.[[MaximumFractionDigits]] is not equal to
+ // b. If numberFormat.[[MaximumFractionDigits]] is not equal to
// numberFormat.[[MinimumFractionDigits]], throw a RangeError exception.
if (digit_options.maximum_fraction_digits !=
digit_options.minimum_fraction_digits) {
@@ -1504,7 +1512,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
}
- // 25. Set _numberFormat.[[RoundingIncrement]] to roundingIncrement.
+ // 25. Set numberFormat.[[RoundingIncrement]] to roundingIncrement.
// 26. Let trailingZeroDisplay be ? GetOption(options,
// "trailingZeroDisplay", "string", « "auto", "stripIfInteger" », "auto").
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 660f8c7a2e..d314c8aaee 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -44,6 +44,7 @@ JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
TQ_OBJECT_CONSTRUCTORS_IMPL(JSMessageObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSStringIterator)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSValidIteratorWrapper)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
@@ -100,12 +101,6 @@ MaybeHandle<HeapObject> JSReceiver::GetPrototype(Isolate* isolate,
// We don't expect access checks to be needed on JSProxy objects.
DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
- if (receiver->IsWasmObject()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kWasmObjectsAreOpaque),
- HeapObject);
- }
-
PrototypeIterator iter(isolate, receiver, kStartAtReceiver,
PrototypeIterator::END_AT_NON_HIDDEN);
do {
@@ -432,10 +427,7 @@ void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
DCHECK(index.is_inobject());
DCHECK(value.IsShared());
SEQ_CST_WRITE_FIELD(*this, index.offset(), value);
- // JSSharedStructs are allocated in the shared old space, which is currently
- // collected by stopping the world, so the incremental write barrier is not
- // needed. They can only store Smis and other HeapObjects in the shared old
- // space, so the generational write barrier is also not needed.
+ CONDITIONAL_WRITE_BARRIER(*this, index.offset(), value, UPDATE_WRITE_BARRIER);
}
void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
@@ -462,7 +454,7 @@ void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(PropertyKind::kData, details.kind());
DisallowGarbageCollection no_gc;
- FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
+ FieldIndex index = FieldIndex::ForDetails(map(), details);
if (details.representation().IsDouble()) {
// Manipulating the signaling NaN used for the hole and uninitialized
// double field sentinel in C++, e.g. with base::bit_cast or
@@ -775,11 +767,14 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
}
DEF_GETTER(JSReceiver, HasFastProperties, bool) {
- DCHECK(raw_properties_or_hash(cage_base).IsSmi() ||
- ((raw_properties_or_hash(cage_base).IsGlobalDictionary(cage_base) ||
- raw_properties_or_hash(cage_base).IsNameDictionary(cage_base) ||
- raw_properties_or_hash(cage_base).IsSwissNameDictionary(
- cage_base)) == map(cage_base).is_dictionary_map()));
+ Object raw_properties_or_hash_obj =
+ raw_properties_or_hash(cage_base, kRelaxedLoad);
+ DCHECK(raw_properties_or_hash_obj.IsSmi() ||
+ ((raw_properties_or_hash_obj.IsGlobalDictionary(cage_base) ||
+ raw_properties_or_hash_obj.IsNameDictionary(cage_base) ||
+ raw_properties_or_hash_obj.IsSwissNameDictionary(cage_base)) ==
+ map(cage_base).is_dictionary_map()));
+ USE(raw_properties_or_hash_obj);
return !map(cage_base).is_dictionary_map();
}
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 15356b6c58..9289522189 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -10,6 +10,7 @@
#include "src/date/date.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
+#include "src/execution/isolate-utils.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/handles/maybe-handles.h"
@@ -46,6 +47,7 @@
#include "src/objects/js-duration-format.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-iterator-helpers-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-list-format.h"
#include "src/objects/js-locale.h"
@@ -239,6 +241,10 @@ Maybe<bool> JSReceiver::CheckPrivateNameStore(LookupIterator* it,
RETURN_FAILURE(isolate, GetShouldThrow(isolate, Nothing<ShouldThrow>()),
NewTypeError(MessageTemplate::kInvalidPrivateMemberWrite,
name_string, it->GetReceiver()));
+ } else if (it->GetReceiver()->IsAlwaysSharedSpaceJSObject()) {
+ RETURN_FAILURE(
+ isolate, kThrowOnError,
+ NewTypeError(MessageTemplate::kDefineDisallowed, name_string));
}
return Just(true);
}
@@ -256,7 +262,7 @@ Maybe<bool> JSReceiver::CheckIfCanDefine(Isolate* isolate, LookupIterator* it,
NewTypeError(MessageTemplate::kRedefineDisallowed, it->GetName()));
}
} else if (!JSObject::IsExtensible(
- Handle<JSObject>::cast(it->GetReceiver()))) {
+ isolate, Handle<JSObject>::cast(it->GetReceiver()))) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kDefineDisallowed, it->GetName()));
@@ -280,7 +286,7 @@ bool HasExcludedProperty(
}
V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
- Handle<JSReceiver> target, Handle<Object> source,
+ Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
PropertiesEnumerationMode mode,
const base::ScopedVector<Handle<Object>>* excluded_properties,
bool use_set) {
@@ -290,8 +296,6 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
return Just(!source->IsString() || String::cast(*source).length() == 0);
}
- Isolate* isolate = target->GetIsolate();
-
// If the target is deprecated, the object will be updated on first store. If
// the source for that store equals the target, this will invalidate the
// cached representation of the source. Preventively upgrade the target.
@@ -426,7 +430,7 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
const base::ScopedVector<Handle<Object>>* excluded_properties,
bool use_set) {
Maybe<bool> fast_assign =
- FastAssign(target, source, mode, excluded_properties, use_set);
+ FastAssign(isolate, target, source, mode, excluded_properties, use_set);
if (fast_assign.IsNothing()) return Nothing<bool>();
if (fast_assign.FromJust()) return Just(true);
@@ -451,7 +455,7 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
source_length = JSGlobalObject::cast(*from)
.global_dictionary(kAcquireLoad)
.NumberOfEnumerableProperties();
- } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ } else if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
source_length =
from->property_dictionary_swiss().NumberOfEnumerableProperties();
} else {
@@ -578,8 +582,8 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
if (maybe_constructor->IsJSFunction()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(maybe_constructor);
- Handle<String> name =
- SharedFunctionInfo::DebugName(handle(constructor->shared(), isolate));
+ Handle<String> name = SharedFunctionInfo::DebugName(
+ isolate, handle(constructor->shared(), isolate));
if (name->length() != 0 &&
!name->Equals(ReadOnlyRoots(isolate).Object_string())) {
return std::make_pair(constructor, name);
@@ -620,7 +624,7 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
if (maybe_constructor->IsJSFunction()) {
auto constructor = Handle<JSFunction>::cast(maybe_constructor);
auto name = SharedFunctionInfo::DebugName(
- handle(constructor->shared(), isolate));
+ isolate, handle(constructor->shared(), isolate));
if (name->length() != 0 &&
!name->Equals(ReadOnlyRoots(isolate).Object_string())) {
@@ -647,29 +651,40 @@ Handle<String> JSReceiver::GetConstructorName(Isolate* isolate,
return GetConstructorHelper(isolate, receiver).second;
}
-MaybeHandle<NativeContext> JSReceiver::GetCreationContext() {
- JSReceiver receiver = *this;
- // Externals are JSObjects with null as a constructor.
- DCHECK(!receiver.IsJSExternalObject());
- Object constructor = receiver.map().GetConstructor();
+base::Optional<NativeContext> JSReceiver::GetCreationContextRaw() {
+ DisallowGarbageCollection no_gc;
JSFunction function;
- if (constructor.IsJSFunction()) {
- function = JSFunction::cast(constructor);
- } else if (constructor.IsFunctionTemplateInfo()) {
- // Remote objects don't have a creation context.
- return MaybeHandle<NativeContext>();
- } else if (receiver.IsJSGeneratorObject()) {
- function = JSGeneratorObject::cast(receiver).function();
- } else if (receiver.IsJSFunction()) {
- function = JSFunction::cast(receiver);
- } else {
- return MaybeHandle<NativeContext>();
+ {
+ JSReceiver receiver = *this;
+ Map receiver_map = receiver.map();
+ InstanceType receiver_instance_type = receiver_map.instance_type();
+ if (V8_LIKELY(InstanceTypeChecker::IsJSFunction(receiver_instance_type))) {
+ function = JSFunction::cast(receiver);
+ } else if (InstanceTypeChecker::IsJSGeneratorObject(
+ receiver_instance_type)) {
+ function = JSGeneratorObject::cast(receiver).function();
+ } else {
+ // Externals are JSObjects with null as a constructor.
+ DCHECK(!receiver.IsJSExternalObject());
+ Object constructor = receiver_map.GetConstructor();
+ if (constructor.IsJSFunction()) {
+ function = JSFunction::cast(constructor);
+ } else {
+ // constructor might be a FunctionTemplateInfo but remote objects don't
+ // have a creation context, if the object doesn't have a constructor
+ // then we can't compute a creation context.
+ return {};
+ }
+ }
}
+ if (function.has_context()) return function.native_context();
+ return {};
+}
- return function.has_context()
- ? Handle<NativeContext>(function.native_context(),
- receiver.GetIsolate())
- : MaybeHandle<NativeContext>();
+MaybeHandle<NativeContext> JSReceiver::GetCreationContext() {
+ base::Optional<NativeContext> maybe_context = GetCreationContextRaw();
+ if (!maybe_context.has_value()) return {};
+ return handle(maybe_context.value(), GetIsolate());
}
// static
@@ -795,7 +810,7 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
return properties;
}
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
DCHECK(properties.IsSwissNameDictionary());
SwissNameDictionary::cast(properties).SetHash(hash);
} else {
@@ -815,9 +830,10 @@ int GetIdentityHashHelper(JSReceiver object) {
if (properties.IsPropertyArray()) {
return PropertyArray::cast(properties).Hash();
}
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL &&
- properties.IsSwissNameDictionary()) {
- return SwissNameDictionary::cast(properties).Hash();
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if (properties.IsSwissNameDictionary()) {
+ return SwissNameDictionary::cast(properties).Hash();
+ }
}
if (properties.IsNameDictionary()) {
@@ -920,7 +936,7 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
cell->ClearAndInvalidate(ReadOnlyRoots(isolate));
} else {
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
object->property_dictionary_swiss(), isolate);
@@ -1175,6 +1191,11 @@ Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
RETURN_FAILURE(isolate, kThrowOnError,
NewTypeError(MessageTemplate::kWasmObjectsAreOpaque));
}
+ if (object->IsAlwaysSharedSpaceJSObject()) {
+ return AlwaysSharedSpaceJSObject::DefineOwnProperty(
+ isolate, Handle<AlwaysSharedSpaceJSObject>::cast(object), key, desc,
+ should_throw);
+ }
// OrdinaryDefineOwnProperty, by virtue of calling
// DefineOwnPropertyIgnoreAttributes, can handle arguments
@@ -1433,7 +1454,7 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(
it->Restart();
// 3. Let extensible be the value of the [[Extensible]] internal slot of O.
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
- bool extensible = JSObject::IsExtensible(object);
+ bool extensible = JSObject::IsExtensible(isolate, object);
return ValidateAndApplyPropertyDescriptor(
isolate, it, extensible, desc, &current, should_throw, Handle<Name>());
@@ -1532,8 +1553,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
desc->enumerable() == current->enumerable()) &&
(!desc->has_configurable() ||
desc->configurable() == current->configurable()) &&
- (!desc->has_value() ||
- (current->has_value() && current->value()->SameValue(*desc->value()))) &&
+ !desc->has_value() &&
(!desc->has_writable() ||
(current->has_writable() && current->writable() == desc->writable())) &&
(!desc->has_get() ||
@@ -1613,7 +1633,11 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
}
// 7a ii. If Desc.[[Value]] is present and SameValue(Desc.[[Value]],
// current.[[Value]]) is false, return false.
- if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
+ if (desc->has_value()) {
+ // We'll succeed applying the property, but the value is already the
+ // same and the property is read-only, so skip actually writing the
+ // property. Otherwise we may try to e.g., write to frozen elements.
+ if (desc->value()->SameValue(*current->value())) return Just(true);
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
@@ -1727,7 +1751,7 @@ Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
Maybe<ShouldThrow> should_throw) {
DCHECK(!it->check_prototype_chain());
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- Isolate* isolate = receiver->GetIsolate();
+ Isolate* isolate = it->isolate();
if (receiver->IsJSObject()) {
return JSObject::CreateDataProperty(it, value, should_throw); // Shortcut.
@@ -1748,7 +1772,8 @@ Maybe<bool> JSReceiver::AddPrivateField(LookupIterator* it,
Handle<Object> value,
Maybe<ShouldThrow> should_throw) {
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- Isolate* isolate = receiver->GetIsolate();
+ DCHECK(!receiver->IsAlwaysSharedSpaceJSObject());
+ Isolate* isolate = it->isolate();
DCHECK(it->GetName()->IsPrivateName());
Handle<Symbol> symbol = Handle<Symbol>::cast(it->GetName());
@@ -1931,7 +1956,8 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
PropertyDescriptor::IsDataDescriptor(desc));
return Just(true);
}
-Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
+Maybe<bool> JSReceiver::SetIntegrityLevel(Isolate* isolate,
+ Handle<JSReceiver> receiver,
IntegrityLevel level,
ShouldThrow should_throw) {
DCHECK(level == SEALED || level == FROZEN);
@@ -1942,23 +1968,21 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
if (!object->HasSloppyArgumentsElements() &&
!object->IsJSModuleNamespace()) { // Fast path.
// Prevent memory leaks by not adding unnecessary transitions.
- Maybe<bool> test = JSObject::TestIntegrityLevel(object, level);
+ Maybe<bool> test = JSObject::TestIntegrityLevel(isolate, object, level);
MAYBE_RETURN(test, Nothing<bool>());
if (test.FromJust()) return test;
if (level == SEALED) {
- return JSObject::PreventExtensionsWithTransition<SEALED>(object,
- should_throw);
+ return JSObject::PreventExtensionsWithTransition<SEALED>(
+ isolate, object, should_throw);
} else {
- return JSObject::PreventExtensionsWithTransition<FROZEN>(object,
- should_throw);
+ return JSObject::PreventExtensionsWithTransition<FROZEN>(
+ isolate, object, should_throw);
}
}
}
- Isolate* isolate = receiver->GetIsolate();
-
- MAYBE_RETURN(JSReceiver::PreventExtensions(receiver, should_throw),
+ MAYBE_RETURN(JSReceiver::PreventExtensions(isolate, receiver, should_throw),
Nothing<bool>());
Handle<FixedArray> keys;
@@ -2003,16 +2027,15 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
}
namespace {
-Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
+Maybe<bool> GenericTestIntegrityLevel(Isolate* isolate,
+ Handle<JSReceiver> receiver,
PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
- Maybe<bool> extensible = JSReceiver::IsExtensible(receiver);
+ Maybe<bool> extensible = JSReceiver::IsExtensible(isolate, receiver);
MAYBE_RETURN(extensible, Nothing<bool>());
if (extensible.FromJust()) return Just(false);
- Isolate* isolate = receiver->GetIsolate();
-
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, keys, JSReceiver::OwnPropertyKeys(isolate, receiver),
@@ -2038,38 +2061,41 @@ Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
} // namespace
-Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> receiver,
+Maybe<bool> JSReceiver::TestIntegrityLevel(Isolate* isolate,
+ Handle<JSReceiver> receiver,
IntegrityLevel level) {
if (!receiver->map().IsCustomElementsReceiverMap()) {
- return JSObject::TestIntegrityLevel(Handle<JSObject>::cast(receiver),
- level);
+ return JSObject::TestIntegrityLevel(
+ isolate, Handle<JSObject>::cast(receiver), level);
}
- return GenericTestIntegrityLevel(receiver, level);
+ return GenericTestIntegrityLevel(isolate, receiver, level);
}
-Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
+Maybe<bool> JSReceiver::PreventExtensions(Isolate* isolate,
+ Handle<JSReceiver> object,
ShouldThrow should_throw) {
if (object->IsJSProxy()) {
return JSProxy::PreventExtensions(Handle<JSProxy>::cast(object),
should_throw);
}
if (object->IsWasmObject()) {
- RETURN_FAILURE(object->GetIsolate(), kThrowOnError,
+ RETURN_FAILURE(isolate, kThrowOnError,
NewTypeError(MessageTemplate::kWasmObjectsAreOpaque));
}
DCHECK(object->IsJSObject());
- return JSObject::PreventExtensions(Handle<JSObject>::cast(object),
+ return JSObject::PreventExtensions(isolate, Handle<JSObject>::cast(object),
should_throw);
}
-Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
+Maybe<bool> JSReceiver::IsExtensible(Isolate* isolate,
+ Handle<JSReceiver> object) {
if (object->IsJSProxy()) {
return JSProxy::IsExtensible(Handle<JSProxy>::cast(object));
}
if (object->IsWasmObject()) {
return Just(false);
}
- return Just(JSObject::IsExtensible(Handle<JSObject>::cast(object)));
+ return Just(JSObject::IsExtensible(isolate, Handle<JSObject>::cast(object)));
}
// static
@@ -2366,6 +2392,18 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, initial_map,
JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
+ constexpr int initial_capacity = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? SwissNameDictionary::kInitialCapacity
+ : NameDictionary::kInitialCapacity;
+ Handle<JSObject> result = isolate->factory()->NewFastOrSlowJSObjectFromMap(
+ initial_map, initial_capacity, AllocationType::kYoung, site);
+ return result;
+}
+
+// static
+MaybeHandle<JSObject> JSObject::NewWithMap(Isolate* isolate,
+ Handle<Map> initial_map,
+ Handle<AllocationSite> site) {
int initial_capacity = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
? SwissNameDictionary::kInitialCapacity
: NameDictionary::kInitialCapacity;
@@ -2473,6 +2511,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSTypedArray::kHeaderSize;
case JS_DATA_VIEW_TYPE:
return JSDataView::kHeaderSize;
+ case JS_RAB_GSAB_DATA_VIEW_TYPE:
+ return JSRabGsabDataView::kHeaderSize;
case JS_SET_TYPE:
return JSSet::kHeaderSize;
case JS_MAP_TYPE:
@@ -2506,6 +2546,14 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSShadowRealm::kHeaderSize;
case JS_STRING_ITERATOR_TYPE:
return JSStringIterator::kHeaderSize;
+ case JS_ITERATOR_MAP_HELPER_TYPE:
+ return JSIteratorMapHelper::kHeaderSize;
+ case JS_ITERATOR_FILTER_HELPER_TYPE:
+ return JSIteratorFilterHelper::kHeaderSize;
+ case JS_ITERATOR_TAKE_HELPER_TYPE:
+ return JSIteratorTakeHelper::kHeaderSize;
+ case JS_ITERATOR_DROP_HELPER_TYPE:
+ return JSIteratorDropHelper::kHeaderSize;
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kHeaderSize;
case JS_SHARED_ARRAY_TYPE:
@@ -2536,6 +2584,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSTemporalTimeZone::kHeaderSize;
case JS_TEMPORAL_ZONED_DATE_TIME_TYPE:
return JSTemporalZonedDateTime::kHeaderSize;
+ case JS_VALID_ITERATOR_WRAPPER_TYPE:
+ return JSValidIteratorWrapper::kHeaderSize;
case JS_WRAPPED_FUNCTION_TYPE:
return JSWrappedFunction::kHeaderSize;
case JS_RAW_JSON_TYPE:
@@ -2760,7 +2810,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
DCHECK_EQ(dictionary->CellAt(entry).value(), *value);
}
} else {
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
object->property_dictionary_swiss(), isolate);
InternalIndex entry = dictionary->FindEntry(isolate, *name);
@@ -2790,6 +2840,10 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
details = details.set_index(enumeration_index);
dictionary->SetEntry(entry, *name, *value, details);
}
+ // TODO(pthier): Add flags to swiss dictionaries.
+ if (name->IsInterestingSymbol()) {
+ dictionary->set_may_have_interesting_symbols(true);
+ }
}
}
}
@@ -3035,7 +3089,7 @@ void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
reinterpret_cast<void*>(new_map->ptr()));
}
if (was_registered) {
- if (new_map->prototype_info().IsPrototypeInfo()) {
+ if (new_map->has_prototype_info()) {
// The new map isn't registered with its prototype yet; reflect this fact
// in the PrototypeInfo it just inherited from the old map.
PrototypeInfo::cast(new_map->prototype_info())
@@ -3099,8 +3153,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// Check if we still have space in the {object}, in which case we
// can also simply set the map (modulo a special case for mutable
// double boxes).
- FieldIndex index =
- FieldIndex::ForDescriptor(isolate, *new_map, new_map->LastAdded());
+ FieldIndex index = FieldIndex::ForDetails(*new_map, details);
if (index.is_inobject() || index.outobject_array_index() <
object->property_array(isolate).length()) {
// Allocate HeapNumbers for double fields.
@@ -3198,7 +3251,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
}
} else {
DCHECK_EQ(PropertyLocation::kField, old_details.location());
- FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
+ FieldIndex index = FieldIndex::ForDetails(*old_map, old_details);
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
DCHECK_IMPLIES(old_representation.IsNone(),
@@ -3288,15 +3341,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
property_count += expected_additional_properties;
} else {
// Make space for two more properties.
- int initial_capacity = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
- ? SwissNameDictionary::kInitialCapacity
- : NameDictionary::kInitialCapacity;
+ constexpr int initial_capacity = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? SwissNameDictionary::kInitialCapacity
+ : NameDictionary::kInitialCapacity;
property_count += initial_capacity;
}
Handle<NameDictionary> dictionary;
Handle<SwissNameDictionary> ord_dictionary;
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
ord_dictionary = isolate->factory()->NewSwissNameDictionary(property_count);
} else {
dictionary = isolate->factory()->NewNameDictionary(property_count);
@@ -3308,7 +3361,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
Handle<Name> key(descs->GetKey(isolate, i), isolate);
Handle<Object> value;
if (details.location() == PropertyLocation::kField) {
- FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
+ FieldIndex index = FieldIndex::ForDetails(*map, details);
if (details.kind() == PropertyKind::kData) {
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (details.representation().IsDouble()) {
@@ -3331,7 +3384,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
: PropertyConstness::kMutable;
PropertyDetails d(details.kind(), details.attributes(), constness);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
ord_dictionary =
SwissNameDictionary::Add(isolate, ord_dictionary, key, value, d);
} else {
@@ -3339,9 +3392,12 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
}
}
- if (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
// Copy the next enumeration index from instance descriptor.
dictionary->set_next_enumeration_index(real_size + 1);
+ // TODO(pthier): Add flags to swiss dictionaries.
+ dictionary->set_may_have_interesting_symbols(
+ map->may_have_interesting_symbols());
}
// From here on we cannot fail and we shouldn't GC anymore.
@@ -3364,7 +3420,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// the left-over space to avoid races with the sweeper thread.
object->set_map(*new_map, kReleaseStore);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
object->SetProperties(*ord_dictionary);
} else {
object->SetProperties(*dictionary);
@@ -3493,7 +3549,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
PropertyDetails details = descriptors->GetDetails(i);
Representation representation = details.representation();
if (!representation.IsDouble()) continue;
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ FieldIndex index = FieldIndex::ForDetails(*map, details);
auto box = isolate->factory()->NewHeapNumberWithHoleNaN();
if (index.is_inobject()) {
storage->set(index.property_index(), *box);
@@ -3765,7 +3821,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<NameDictionary> dictionary;
Handle<SwissNameDictionary> swiss_dictionary;
int number_of_elements;
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
swiss_dictionary = handle(object->property_dictionary_swiss(), isolate);
number_of_elements = swiss_dictionary->NumberOfElements();
} else {
@@ -3779,7 +3835,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<FixedArray> iteration_order;
int iteration_length;
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
// |iteration_order| remains empty handle, we don't need it.
iteration_length = swiss_dictionary->UsedCapacity();
} else {
@@ -3793,7 +3849,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
ReadOnlyRoots roots(isolate);
for (int i = 0; i < iteration_length; i++) {
PropertyKind kind;
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
InternalIndex index(swiss_dictionary->EntryForEnumerationIndex(i));
Object key = swiss_dictionary->KeyAt(index);
if (!SwissNameDictionary::IsKey(roots, key)) {
@@ -3868,7 +3924,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Object value;
PropertyDetails details = PropertyDetails::Empty();
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
InternalIndex index(swiss_dictionary->EntryForEnumerationIndex(i));
Object key_obj = swiss_dictionary->KeyAt(index);
if (!SwissNameDictionary::IsKey(roots, key_obj)) {
@@ -4060,7 +4116,7 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
DCHECK(result->IsBoolean());
args.AcceptSideEffects();
- // Rebox CustomArguments::kReturnValueOffset before returning.
+ // Rebox CustomArguments::kReturnValueIndex before returning.
return Just(result->IsTrue(isolate));
}
@@ -4069,8 +4125,7 @@ Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
Maybe<ShouldThrow> should_throw) {
DCHECK(it->GetReceiver()->IsJSObject());
MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- Isolate* isolate = receiver->GetIsolate();
+ Isolate* isolate = it->isolate();
Maybe<bool> can_define =
JSReceiver::CheckIfCanDefine(isolate, it, value, should_throw);
@@ -4132,7 +4187,7 @@ bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
return TestFastPropertiesIntegrityLevel(object.map(), level);
}
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
return TestDictionaryPropertiesIntegrityLevel(
object.property_dictionary_swiss(), object.GetReadOnlyRoots(), level);
} else {
@@ -4177,21 +4232,22 @@ bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
} // namespace
-Maybe<bool> JSObject::TestIntegrityLevel(Handle<JSObject> object,
+Maybe<bool> JSObject::TestIntegrityLevel(Isolate* isolate,
+ Handle<JSObject> object,
IntegrityLevel level) {
if (!object->map().IsCustomElementsReceiverMap() &&
!object->HasSloppyArgumentsElements()) {
return Just(FastTestIntegrityLevel(*object, level));
}
- return GenericTestIntegrityLevel(Handle<JSReceiver>::cast(object), level);
+ return GenericTestIntegrityLevel(isolate, Handle<JSReceiver>::cast(object),
+ level);
}
-Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
+Maybe<bool> JSObject::PreventExtensions(Isolate* isolate,
+ Handle<JSObject> object,
ShouldThrow should_throw) {
- Isolate* isolate = object->GetIsolate();
-
if (!object->HasSloppyArgumentsElements()) {
- return PreventExtensionsWithTransition<NONE>(object, should_throw);
+ return PreventExtensionsWithTransition<NONE>(isolate, object, should_throw);
}
if (object->IsAccessCheckNeeded() &&
@@ -4208,8 +4264,8 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
PrototypeIterator iter(isolate, object);
if (iter.IsAtEnd()) return Just(true);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter),
- should_throw);
+ return PreventExtensions(
+ isolate, PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
}
if (object->map().has_named_interceptor() ||
@@ -4242,8 +4298,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
return Just(true);
}
-bool JSObject::IsExtensible(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
+bool JSObject::IsExtensible(Isolate* isolate, Handle<JSObject> object) {
if (object->IsAccessCheckNeeded() &&
!isolate->MayAccess(handle(isolate->context(), isolate), object)) {
return true;
@@ -4310,7 +4365,7 @@ Handle<NumberDictionary> CreateElementDictionary(Isolate* isolate,
template <PropertyAttributes attrs>
Maybe<bool> JSObject::PreventExtensionsWithTransition(
- Handle<JSObject> object, ShouldThrow should_throw) {
+ Isolate* isolate, Handle<JSObject> object, ShouldThrow should_throw) {
static_assert(attrs == NONE || attrs == SEALED || attrs == FROZEN);
// Sealing/freezing sloppy arguments or namespace objects should be handled
@@ -4318,7 +4373,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
DCHECK(!object->HasSloppyArgumentsElements());
DCHECK_IMPLIES(object->IsJSModuleNamespace(), attrs == NONE);
- Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
!isolate->MayAccess(handle(isolate->context(), isolate), object)) {
isolate->ReportFailedAccessCheck(object);
@@ -4344,7 +4398,18 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (iter.IsAtEnd()) return Just(true);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return PreventExtensionsWithTransition<attrs>(
- PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
+ isolate, PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
+ }
+
+ // Shared objects are designed to have fixed layout, i.e. their maps are
+ // effectively immutable. They are constructed seal, but the semantics of
+ // ordinary ECMAScript objects allow sealed to be upgraded to frozen. This
+ // upgrade violates the fixed layout invariant and is disallowed.
+ if (object->IsAlwaysSharedSpaceJSObject()) {
+ DCHECK(FastTestIntegrityLevel(*object, SEALED));
+ if (attrs != FROZEN) return Just(true);
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kCannotFreeze));
}
if (object->map().has_named_interceptor() ||
@@ -4452,7 +4517,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
- } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ } else if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
object->property_dictionary_swiss(), isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
@@ -4524,7 +4589,7 @@ Handle<Object> JSObject::DictionaryPropertyAt(Isolate* isolate,
Handle<JSObject> object,
InternalIndex dict_index) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dict = object->property_dictionary_swiss();
return handle(dict.ValueAt(dict_index), isolate);
} else {
@@ -4542,7 +4607,7 @@ base::Optional<Object> JSObject::DictionaryPropertyAt(Handle<JSObject> object,
if (heap->IsPendingAllocation(HeapObject::cast(backing_store))) return {};
base::Optional<Object> maybe_obj;
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
if (!backing_store.IsSwissNameDictionary()) return {};
maybe_obj = SwissNameDictionary::cast(backing_store).TryValueAt(dict_index);
} else {
@@ -4728,7 +4793,7 @@ Object JSObject::SlowReverseLookup(Object value) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(PropertyKind::kData, details.kind());
- FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
+ FieldIndex field_index = FieldIndex::ForDetails(map(), details);
Object property = RawFastPropertyAt(field_index);
if (field_index.is_double()) {
DCHECK(property.IsHeapNumber());
@@ -4752,7 +4817,7 @@ Object JSObject::SlowReverseLookup(Object value) {
return JSGlobalObject::cast(*this)
.global_dictionary(kAcquireLoad)
.SlowReverseLookup(value);
- } else if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ } else if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
return property_dictionary_swiss().SlowReverseLookup(GetIsolate(), value);
} else {
return property_dictionary().SlowReverseLookup(value);
@@ -4779,7 +4844,7 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
where_to_start);
!iter.IsAtEnd(); iter.Advance()) {
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
- if (!current->IsJSObject()) return;
+ if (!current->IsJSObjectThatCanBeTrackedAsPrototype()) return;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
Map current_map = current_obj->map();
if (current_map.is_prototype_map()) {
@@ -4793,39 +4858,40 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
}
}
-static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
+static bool PrototypeBenefitsFromNormalization(JSObject object) {
DisallowGarbageCollection no_gc;
- if (!object->HasFastProperties()) return false;
- if (object->IsJSGlobalProxy()) return false;
+ if (!object.HasFastProperties()) return false;
+ if (object.IsJSGlobalProxy()) return false;
// TODO(v8:11248) make bootstrapper create dict mode prototypes, too?
- if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
+ if (object.GetIsolate()->bootstrapper()->IsActive()) return false;
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) return true;
- return !object->map().is_prototype_map() ||
- !object->map().should_be_fast_prototype_map();
+ return !object.map().is_prototype_map() ||
+ !object.map().should_be_fast_prototype_map();
}
// static
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
bool enable_setup_mode) {
- Isolate* isolate = object->GetIsolate();
+ DCHECK(object->IsJSObjectThatCanBeTrackedAsPrototype());
if (object->IsJSGlobalObject()) return;
- if (object->map(isolate).is_prototype_map()) {
- if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->map().is_prototype_map()) {
+ if (enable_setup_mode && PrototypeBenefitsFromNormalization(*object)) {
// This is the only way PrototypeBenefitsFromNormalization can be true:
- DCHECK(!object->map(isolate).should_be_fast_prototype_map());
+ DCHECK(!object->map().should_be_fast_prototype_map());
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
constexpr bool kUseCache = true;
JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES,
0, kUseCache, "NormalizeAsPrototype");
}
if (!V8_DICT_PROPERTY_CONST_TRACKING_BOOL &&
- object->map(isolate).should_be_fast_prototype_map() &&
+ object->map().should_be_fast_prototype_map() &&
!object->HasFastProperties()) {
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
} else {
Handle<Map> new_map;
- if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
+ if (enable_setup_mode && PrototypeBenefitsFromNormalization(*object)) {
#if DEBUG
Handle<Map> old_map = handle(object->map(isolate), isolate);
#endif // DEBUG
@@ -4874,7 +4940,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
dict.DetailsAtPut(index, details);
}
};
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
make_constant(object->property_dictionary_swiss());
} else {
make_constant(object->property_dictionary());
@@ -4892,8 +4958,11 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
// static
void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
- if (!object->map().is_prototype_map()) return;
- if (!object->map().should_be_fast_prototype_map()) return;
+ {
+ Map map = object->map();
+ if (!map.is_prototype_map()) return;
+ if (!map.should_be_fast_prototype_map()) return;
+ }
OptimizeAsPrototype(object);
}
@@ -4912,9 +4981,16 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
break;
}
Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
+ // This checks for both proxies and shared objects.
+ //
// Proxies on the prototype chain are not supported. They make it
// impossible to make any assumptions about the prototype chain anyway.
- if (maybe_proto->IsJSProxy()) return;
+ //
+ // Objects in the shared heap have fixed layouts and their maps never
+ // change, so they don't need to be tracked as prototypes
+ // anyway. Additionally, registering users of shared objects is not
+ // threadsafe.
+ if (!maybe_proto->IsJSObjectThatCanBeTrackedAsPrototype()) continue;
Handle<JSObject> proto = Handle<JSObject>::cast(maybe_proto);
Handle<PrototypeInfo> proto_info =
Map::GetOrCreatePrototypeInfo(proto, isolate);
@@ -4949,7 +5025,8 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(user->is_prototype_map());
// If it doesn't have a PrototypeInfo, it was never registered.
- if (!user->prototype_info().IsPrototypeInfo()) return false;
+ if (!user->has_prototype_info()) return false;
+ DCHECK(user->prototype_info().IsPrototypeInfo());
// If it had no prototype before, see if it had users that might expect
// registration.
if (!user->prototype().IsJSObject()) {
@@ -4995,16 +5072,18 @@ void InvalidateOnePrototypeValidityCellInternal(Map map) {
if (maybe_cell.IsCell()) {
// Just set the value; the cell will be replaced lazily.
Cell cell = Cell::cast(maybe_cell);
- cell.set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
+ Smi invalid_value = Smi::FromInt(Map::kPrototypeChainInvalid);
+ if (cell.value() != invalid_value) {
+ cell.set_value(invalid_value);
+ }
}
- Object maybe_prototype_info = map.prototype_info();
- if (maybe_prototype_info.IsPrototypeInfo()) {
- PrototypeInfo prototype_info = PrototypeInfo::cast(maybe_prototype_info);
+ PrototypeInfo prototype_info;
+ if (map.TryGetPrototypeInfo(&prototype_info)) {
prototype_info.set_prototype_chain_enum_cache(Object());
}
- // We may inline accesses to constants stored in dictionary mode protoypes in
- // optimized code. When doing so, we install depenendies of group
+ // We may inline accesses to constants stored in dictionary mode prototypes in
+ // optimized code. When doing so, we install dependencies of group
// |kPrototypeCheckGroup| on each prototype between the receiver's immediate
// prototype and the holder of the constant property. This dependency is used
// both to detect changes to the constant value itself, and other changes to
@@ -5031,9 +5110,8 @@ void InvalidatePrototypeChainsInternal(Map map) {
for (; !map.is_null(); map = next_map, next_map = Map()) {
InvalidateOnePrototypeValidityCellInternal(map);
- Object maybe_proto_info = map.prototype_info();
- if (!maybe_proto_info.IsPrototypeInfo()) return;
- PrototypeInfo proto_info = PrototypeInfo::cast(maybe_proto_info);
+ PrototypeInfo proto_info;
+ if (!map.TryGetPrototypeInfo(&proto_info)) return;
if (!proto_info.prototype_users().IsWeakArrayList()) {
return;
}
@@ -5124,9 +5202,14 @@ Maybe<bool> JSObject::SetPrototype(Isolate* isolate, Handle<JSObject> object,
bool immutable_proto = map->is_immutable_proto();
if (immutable_proto) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kImmutablePrototypeSet, object));
+ Handle<Object> msg;
+ if (object->IsJSObjectPrototype()) { // is [[Object.prototype]]
+ msg = isolate->factory()->Object_prototype_string();
+ } else {
+ msg = object;
+ }
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kImmutablePrototypeSet, msg));
}
// From 6.1.7.3 Invariants of the Essential Internal Methods
@@ -5161,6 +5244,8 @@ Maybe<bool> JSObject::SetPrototype(Isolate* isolate, Handle<JSObject> object,
isolate->UpdateNoElementsProtectorOnSetPrototype(real_receiver);
isolate->UpdateTypedArraySpeciesLookupChainProtectorOnSetPrototype(
real_receiver);
+ isolate->UpdateNumberStringPrototypeNoReplaceProtectorOnSetPrototype(
+ real_receiver);
Handle<Map> new_map =
Map::TransitionToPrototype(isolate, map, Handle<HeapObject>::cast(value));
@@ -5333,10 +5418,10 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
DisallowGarbageCollection no_gc;
Heap* heap = object->GetHeap();
- PretenturingHandler* pretunring_handler = heap->pretenuring_handler();
+ PretenuringHandler* pretunring_handler = heap->pretenuring_handler();
AllocationMemento memento =
pretunring_handler
- ->FindAllocationMemento<PretenturingHandler::kForRuntime>(
+ ->FindAllocationMemento<PretenuringHandler::kForRuntime>(
object->map(), *object);
if (memento.is_null()) return false;
@@ -5528,15 +5613,15 @@ MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
}
// static
-double JSDate::CurrentTimeValue(Isolate* isolate) {
+int64_t JSDate::CurrentTimeValue(Isolate* isolate) {
if (v8_flags.log_internal_timer_events) LOG(isolate, CurrentTimeEvent());
- if (v8_flags.correctness_fuzzer_suppressions) return 4.2;
+ if (v8_flags.correctness_fuzzer_suppressions) return 4;
// According to ECMA-262, section 15.9.1, page 117, the precision of
// the number in a Date object representing a particular instant in
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
- return std::floor(V8::GetCurrentPlatform()->CurrentClockTimeMillis());
+ return V8::GetCurrentPlatform()->CurrentClockTimeMilliseconds();
}
// static
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 06489c2b7b..89260488fb 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_OBJECTS_H_
#define V8_OBJECTS_JS_OBJECTS_H_
+#include "src/base/optional.h"
#include "src/objects/embedder-data-slot.h"
// TODO(jkummerow): Consider forward-declaring instead.
#include "src/objects/internal-index.h"
@@ -222,19 +223,20 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
// ES6 7.3.14 (when passed kDontThrow)
// 'level' must be SEALED or FROZEN.
V8_WARN_UNUSED_RESULT static Maybe<bool> SetIntegrityLevel(
- Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSReceiver> object, IntegrityLevel lvl,
+ ShouldThrow should_throw);
// ES6 7.3.15
// 'level' must be SEALED or FROZEN.
V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
- Handle<JSReceiver> object, IntegrityLevel lvl);
+ Isolate* isolate, Handle<JSReceiver> object, IntegrityLevel lvl);
// ES6 [[PreventExtensions]] (when passed kDontThrow)
V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
- Handle<JSReceiver> object, ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSReceiver> object, ShouldThrow should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(
- Handle<JSReceiver> object);
+ Isolate* isolate, Handle<JSReceiver> object);
// Returns the class name.
V8_EXPORT_PRIVATE String class_name();
@@ -251,6 +253,7 @@ class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
static Handle<String> GetConstructorName(Isolate* isolate,
Handle<JSReceiver> receiver);
+ V8_EXPORT_PRIVATE base::Optional<NativeContext> GetCreationContextRaw();
V8_EXPORT_PRIVATE MaybeHandle<NativeContext> GetCreationContext();
V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
@@ -332,6 +335,10 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
Handle<AllocationSite> site);
+ static MaybeHandle<JSObject> NewWithMap(Isolate* isolate,
+ Handle<Map> initial_map,
+ Handle<AllocationSite> site);
+
// 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
// Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
@@ -753,12 +760,12 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
bool ReferencesObject(Object obj);
V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
- Handle<JSObject> object, IntegrityLevel lvl);
+ Isolate* isolate, Handle<JSObject> object, IntegrityLevel lvl);
V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
- Handle<JSObject> object, ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSObject> object, ShouldThrow should_throw);
- static bool IsExtensible(Handle<JSObject> object);
+ static bool IsExtensible(Isolate* isolate, Handle<JSObject> object);
static MaybeHandle<Object> ReadFromOptionsBag(Handle<Object> options,
Handle<String> option_name,
@@ -901,7 +908,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
template <PropertyAttributes attrs>
V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
- Handle<JSObject> object, ShouldThrow should_throw);
+ Isolate* isolate, Handle<JSObject> object, ShouldThrow should_throw);
TQ_OBJECT_CONSTRUCTORS(JSObject)
};
@@ -1109,8 +1116,8 @@ class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
static V8_WARN_UNUSED_RESULT MaybeHandle<JSDate> New(
Handle<JSFunction> constructor, Handle<JSReceiver> new_target, double tv);
- // Returns the time value (UTC) identifying the current time.
- static double CurrentTimeValue(Isolate* isolate);
+ // Returns the time value (UTC) identifying the current time in milliseconds.
+ static int64_t CurrentTimeValue(Isolate* isolate);
// Returns the date field with the specified index.
// See FieldIndex for the list of date fields.
@@ -1280,6 +1287,18 @@ class JSStringIterator
TQ_OBJECT_CONSTRUCTORS(JSStringIterator)
};
+// The valid iterator wrapper is the wrapper object created by
+// Iterator.from(obj), which attempts to wrap iterator-like objects into an
+// actual iterator with %Iterator.prototype%.
+class JSValidIteratorWrapper
+ : public TorqueGeneratedJSValidIteratorWrapper<JSValidIteratorWrapper,
+ JSObject> {
+ public:
+ DECL_PRINTER(JSValidIteratorWrapper)
+
+ TQ_OBJECT_CONSTRUCTORS(JSValidIteratorWrapper)
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index 99107f0466..11f375dc50 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -79,7 +79,7 @@ macro GetDerivedMap(implicit context: Context)(
}
}
-macro GetDerivedRabGsabMap(implicit context: Context)(
+macro GetDerivedRabGsabTypedArrayMap(implicit context: Context)(
target: JSFunction, newTarget: JSReceiver): Map {
return runtime::GetDerivedMap(context, target, newTarget, TrueConstant());
}
@@ -173,6 +173,13 @@ extern class JSStringIterator extends JSObject {
index: Smi;
}
+// The wrapper returned by Iterator.from().
+// https://tc39.es/proposal-iterator-helpers/#sec-wrapforvaliditeratorprototype-object
+extern class JSValidIteratorWrapper extends JSObject {
+ // The [[Iterated]] slot.
+ underlying: iterator::IteratorRecord;
+}
+
extern macro AllocateJSObjectFromMap(Map): JSObject;
extern macro AllocateJSObjectFromMap(
Map,
diff --git a/deps/v8/src/objects/js-raw-json-inl.h b/deps/v8/src/objects/js-raw-json-inl.h
index b20a6ad117..e91725a696 100644
--- a/deps/v8/src/objects/js-raw-json-inl.h
+++ b/deps/v8/src/objects/js-raw-json-inl.h
@@ -18,6 +18,10 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRawJson)
+bool JSRawJson::HasInitialLayout(Isolate* isolate) const {
+ return map() == *isolate->js_raw_json_map();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-raw-json.cc b/deps/v8/src/objects/js-raw-json.cc
index 1c82066076..b4db2cc473 100644
--- a/deps/v8/src/objects/js-raw-json.cc
+++ b/deps/v8/src/objects/js-raw-json.cc
@@ -20,21 +20,22 @@ MaybeHandle<JSRawJson> JSRawJson::Create(Isolate* isolate,
Handle<String> json_string;
ASSIGN_RETURN_ON_EXCEPTION(isolate, json_string,
Object::ToString(isolate, text), JSRawJson);
- if (String::IsOneByteRepresentationUnderneath(*json_string)) {
- if (!JsonParser<uint8_t>::CheckRawJson(isolate, json_string)) {
+ Handle<String> flat = String::Flatten(isolate, json_string);
+ if (String::IsOneByteRepresentationUnderneath(*flat)) {
+ if (!JsonParser<uint8_t>::CheckRawJson(isolate, flat)) {
DCHECK(isolate->has_pending_exception());
return MaybeHandle<JSRawJson>();
}
} else {
- if (!JsonParser<uint16_t>::CheckRawJson(isolate, json_string)) {
+ if (!JsonParser<uint16_t>::CheckRawJson(isolate, flat)) {
DCHECK(isolate->has_pending_exception());
return MaybeHandle<JSRawJson>();
}
}
Handle<JSObject> result =
isolate->factory()->NewJSObjectFromMap(isolate->js_raw_json_map());
- result->InObjectPropertyAtPut(JSRawJson::kRawJsonIndex, *json_string);
- JSObject::SetIntegrityLevel(result, FROZEN, kThrowOnError).Check();
+ result->InObjectPropertyAtPut(JSRawJson::kRawJsonInitialIndex, *flat);
+ JSObject::SetIntegrityLevel(isolate, result, FROZEN, kThrowOnError).Check();
return Handle<JSRawJson>::cast(result);
}
diff --git a/deps/v8/src/objects/js-raw-json.h b/deps/v8/src/objects/js-raw-json.h
index bb9f90cbc5..1eff6b6066 100644
--- a/deps/v8/src/objects/js-raw-json.h
+++ b/deps/v8/src/objects/js-raw-json.h
@@ -17,15 +17,20 @@ namespace internal {
class JSRawJson : public TorqueGeneratedJSRawJson<JSRawJson, JSObject> {
public:
- // Layout description.
-#define JS_RAW_JSON_FIELDS(V) \
- V(kRawJsonOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
+ // Initial layout description.
+#define JS_RAW_JSON_FIELDS(V) \
+ V(kRawJsonInitialOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kInitialSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_RAW_JSON_FIELDS)
#undef JS_RAW_JSON_FIELDS
- static const int kRawJsonIndex = 0;
+ // Index only valid to use if HasInitialLayout() returns true.
+ static const int kRawJsonInitialIndex = 0;
+
+ // Returns whether this raw JSON object has the initial layout and the
+ // "rawJSON" property can be directly accessed using kRawJsonInitialIndex.
+ inline bool HasInitialLayout(Isolate* isolate) const;
V8_WARN_UNUSED_RESULT static MaybeHandle<JSRawJson> Create(
Isolate* isolate, Handle<Object> text);
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index bd3b15f8d5..0f5c9e71f5 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -113,9 +113,9 @@ bool JSRegExp::HasCompiledCode() const {
if (type_tag() != IRREGEXP) return false;
Smi uninitialized = Smi::FromInt(kUninitializedValue);
#ifdef DEBUG
- DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCodeT() ||
+ DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCode() ||
DataAt(kIrregexpLatin1CodeIndex) == uninitialized);
- DCHECK(DataAt(kIrregexpUC16CodeIndex).IsCodeT() ||
+ DCHECK(DataAt(kIrregexpUC16CodeIndex).IsCode() ||
DataAt(kIrregexpUC16CodeIndex) == uninitialized);
DCHECK(DataAt(kIrregexpLatin1BytecodeIndex).IsByteArray() ||
DataAt(kIrregexpLatin1BytecodeIndex) == uninitialized);
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index d90e88dc1e..2fc3c9149d 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -153,12 +153,12 @@ MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
Object JSRegExp::code(bool is_latin1) const {
DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
Object value = DataAt(code_index(is_latin1));
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, value.IsSmi() || value.IsCodeT());
+ DCHECK(value.IsSmi() || value.IsCode());
return value;
}
void JSRegExp::set_code(bool is_latin1, Handle<Code> code) {
- SetDataAt(code_index(is_latin1), ToCodeT(*code));
+ SetDataAt(code_index(is_latin1), *code);
}
Object JSRegExp::bytecode(bool is_latin1) const {
@@ -172,8 +172,7 @@ void JSRegExp::set_bytecode_and_trampoline(Isolate* isolate,
SetDataAt(kIrregexpLatin1BytecodeIndex, *bytecode);
SetDataAt(kIrregexpUC16BytecodeIndex, *bytecode);
- Handle<CodeT> trampoline =
- BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, *trampoline);
SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, *trampoline);
}
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 41ca912f5e..b1a8c1336a 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -66,7 +66,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
inline Type type_tag() const;
inline String atom_pattern() const;
- // This could be a Smi kUninitializedValue or Code.
+ // This could be a Smi kUninitializedValue or InstructionStream.
V8_EXPORT_PRIVATE Object code(bool is_latin1) const;
V8_EXPORT_PRIVATE void set_code(bool is_unicode, Handle<Code> code);
// This could be a Smi kUninitializedValue or ByteArray.
@@ -176,7 +176,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static constexpr int kAtomPatternIndex = kFirstTypeSpecificIndex;
static constexpr int kAtomDataSize = kAtomPatternIndex + 1;
- // A Code object or a Smi marker value equal to kUninitializedValue.
+ // A InstructionStream object or a Smi marker value equal to
+ // kUninitializedValue.
static constexpr int kIrregexpLatin1CodeIndex = kFirstTypeSpecificIndex;
static constexpr int kIrregexpUC16CodeIndex = kIrregexpLatin1CodeIndex + 1;
// A ByteArray object or a Smi marker value equal to kUninitializedValue.
diff --git a/deps/v8/src/objects/js-shared-array-inl.h b/deps/v8/src/objects/js-shared-array-inl.h
index b56a66db45..988933ef51 100644
--- a/deps/v8/src/objects/js-shared-array-inl.h
+++ b/deps/v8/src/objects/js-shared-array-inl.h
@@ -8,6 +8,7 @@
#include "src/api/api-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-shared-array.h"
+#include "src/objects/js-struct-inl.h"
#include "src/objects/smi-inl.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/js-shared-array.h b/deps/v8/src/objects/js-shared-array.h
index 2c98c45570..5441db96ff 100644
--- a/deps/v8/src/objects/js-shared-array.h
+++ b/deps/v8/src/objects/js-shared-array.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_SHARED_ARRAY_H_
#include "src/objects/js-objects.h"
+#include "src/objects/js-struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,12 +17,25 @@ namespace internal {
#include "torque-generated/src/objects/js-shared-array-tq.inc"
class JSSharedArray
- : public TorqueGeneratedJSSharedArray<JSSharedArray, JSObject> {
+ : public TorqueGeneratedJSSharedArray<JSSharedArray,
+ AlwaysSharedSpaceJSObject> {
public:
DECL_CAST(JSSharedArray)
DECL_PRINTER(JSSharedArray)
EXPORT_DECL_VERIFIER(JSSharedArray)
+ // In-object fields.
+ enum {
+ // The length field is constant and is equal to elements().length().
+ //
+ // TODO(v8:12547): We can save the space for this field by making it
+ // possible to put AccessorInfo in shared or RO space.
+ kLengthFieldIndex = 0,
+ kInObjectFieldCount,
+ };
+ static constexpr int kSize =
+ kHeaderSize + (kTaggedSize * kInObjectFieldCount);
+
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(JSSharedArray)
diff --git a/deps/v8/src/objects/js-shared-array.tq b/deps/v8/src/objects/js-shared-array.tq
index b88777d77c..1cd690600b 100644
--- a/deps/v8/src/objects/js-shared-array.tq
+++ b/deps/v8/src/objects/js-shared-array.tq
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern class JSSharedArray extends JSObject {}
+extern class JSSharedArray extends AlwaysSharedSpaceJSObject {}
diff --git a/deps/v8/src/objects/js-struct-inl.h b/deps/v8/src/objects/js-struct-inl.h
index 6c3601b71a..24b06008a1 100644
--- a/deps/v8/src/objects/js-struct-inl.h
+++ b/deps/v8/src/objects/js-struct-inl.h
@@ -18,6 +18,8 @@ namespace internal {
#include "torque-generated/src/objects/js-struct-tq-inl.inc"
+TQ_OBJECT_CONSTRUCTORS_IMPL(AlwaysSharedSpaceJSObject)
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSharedStruct)
CAST_ACCESSOR(JSSharedStruct)
diff --git a/deps/v8/src/objects/js-struct.cc b/deps/v8/src/objects/js-struct.cc
new file mode 100644
index 0000000000..f1bb5a84e9
--- /dev/null
+++ b/deps/v8/src/objects/js-struct.cc
@@ -0,0 +1,49 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/js-struct.h"
+
+#include "src/objects/lookup-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Maybe<bool> AlwaysSharedSpaceJSObject::DefineOwnProperty(
+ Isolate* isolate, Handle<AlwaysSharedSpaceJSObject> shared_obj,
+ Handle<Object> key, PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ // Shared objects are designed to have fixed layout, i.e. their maps are
+ // effectively immutable. They are constructed seal, but the semantics of
+ // ordinary ECMAScript objects allow writable properties to be upgraded to
+ // non-writable properties. This upgrade violates the fixed layout invariant
+ // and is disallowed.
+
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey.
+ PropertyKey lookup_key(isolate, key);
+ LookupIterator it(isolate, shared_obj, lookup_key, LookupIterator::OWN);
+ PropertyDescriptor current;
+ MAYBE_RETURN(GetOwnPropertyDescriptor(&it, &current), Nothing<bool>());
+
+ // The only redefinition allowed is to set the value if all attributes match.
+ if (!it.IsFound() ||
+ PropertyDescriptor::IsDataDescriptor(desc) !=
+ PropertyDescriptor::IsDataDescriptor(&current) ||
+ desc->ToAttributes() != current.ToAttributes()) {
+ DCHECK(!shared_obj->map().is_extensible());
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kDefineDisallowedFixedLayout,
+ it.GetName()));
+ }
+ DCHECK(it.property_attributes() == desc->ToAttributes());
+ if (desc->has_value()) {
+ return Object::SetDataProperty(&it, desc->value());
+ }
+ return Just(true);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-struct.h b/deps/v8/src/objects/js-struct.h
index c65f60a3a1..7501650c8a 100644
--- a/deps/v8/src/objects/js-struct.h
+++ b/deps/v8/src/objects/js-struct.h
@@ -15,8 +15,22 @@ namespace internal {
#include "torque-generated/src/objects/js-struct-tq.inc"
+class AlwaysSharedSpaceJSObject
+ : public TorqueGeneratedAlwaysSharedSpaceJSObject<AlwaysSharedSpaceJSObject,
+ JSObject> {
+ public:
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<AlwaysSharedSpaceJSObject> shared_obj,
+ Handle<Object> key, PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw);
+
+ static_assert(kHeaderSize == JSObject::kHeaderSize);
+ TQ_OBJECT_CONSTRUCTORS(AlwaysSharedSpaceJSObject)
+};
+
class JSSharedStruct
- : public TorqueGeneratedJSSharedStruct<JSSharedStruct, JSObject> {
+ : public TorqueGeneratedJSSharedStruct<JSSharedStruct,
+ AlwaysSharedSpaceJSObject> {
public:
DECL_CAST(JSSharedStruct)
DECL_PRINTER(JSSharedStruct)
diff --git a/deps/v8/src/objects/js-struct.tq b/deps/v8/src/objects/js-struct.tq
index d2304ead2f..a50c6d75c7 100644
--- a/deps/v8/src/objects/js-struct.tq
+++ b/deps/v8/src/objects/js-struct.tq
@@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern class JSSharedStruct extends JSObject {
- // escaped_local_thread: Smi;
+// AlwaysSharedSpaceJSObject are JSObjects that must always be allocated in the
+// shared space. Its instance type range is used to fast path the shared value
+// barrier.
+@abstract
+extern class AlwaysSharedSpaceJSObject extends JSObject {
}
+
+extern class JSSharedStruct extends AlwaysSharedSpaceJSObject {}
diff --git a/deps/v8/src/objects/js-temporal-objects.cc b/deps/v8/src/objects/js-temporal-objects.cc
index 900d709f20..03303935b3 100644
--- a/deps/v8/src/objects/js-temporal-objects.cc
+++ b/deps/v8/src/objects/js-temporal-objects.cc
@@ -608,7 +608,8 @@ Handle<BigInt> SystemUTCEpochNanoseconds(Isolate* isolate) {
TEMPORAL_ENTER_FUNC();
// 1. Let ns be the approximate current UTC date and time, in nanoseconds
// since the epoch.
- double ms = V8::GetCurrentPlatform()->CurrentClockTimeMillis();
+ double ms =
+ V8::GetCurrentPlatform()->CurrentClockTimeMillisecondsHighResolution();
// 2. Set ns to the result of clamping ns between −8.64 × 10^21 and 8.64 ×
// 10^21.
@@ -3106,16 +3107,8 @@ MaybeHandle<JSTemporalZonedDateTime> SystemZonedDateTime(
}
int CompareResultToSign(ComparisonResult r) {
- switch (r) {
- case ComparisonResult::kEqual:
- return 0;
- case ComparisonResult::kLessThan:
- return -1;
- case ComparisonResult::kGreaterThan:
- return 1;
- case ComparisonResult::kUndefined:
- UNREACHABLE();
- }
+ DCHECK_NE(r, ComparisonResult::kUndefined);
+ return static_cast<int>(r);
}
// #sec-temporal-formattimezoneoffsetstring
@@ -4614,7 +4607,8 @@ bool IsBuiltinCalendar(Isolate* isolate, Handle<String> id) {
// 1. Let calendars be AvailableCalendars().
// 2. If calendars contains the ASCII-lowercase of id, return true.
// 3. Return false.
- id = Intl::ConvertToLower(isolate, id).ToHandleChecked();
+ id = Intl::ConvertToLower(isolate, String::Flatten(isolate, id))
+ .ToHandleChecked();
return GetCalendarMap()->Contains(id->ToCString().get());
}
@@ -4624,7 +4618,8 @@ Handle<String> CalendarIdentifier(Isolate* isolate, int32_t index) {
}
int32_t CalendarIndex(Isolate* isolate, Handle<String> id) {
- id = Intl::ConvertToLower(isolate, id).ToHandleChecked();
+ id = Intl::ConvertToLower(isolate, String::Flatten(isolate, id))
+ .ToHandleChecked();
return GetCalendarMap()->Index(id->ToCString().get());
}
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 6aebe60291..98a39b6b86 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -5,6 +5,7 @@
#include "src/objects/keys.h"
#include "src/api/api-arguments-inl.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/isolate-inl.h"
#include "src/handles/handles-inl.h"
@@ -17,6 +18,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/prototype-info.h"
#include "src/objects/prototype.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/utils/identity-map.h"
@@ -148,9 +150,8 @@ ExceptionStatus KeyAccumulator::AddKey(Handle<Object> key,
OrderedHashSet::Add(isolate(), keys(), key);
Handle<OrderedHashSet> new_set;
if (!new_set_candidate.ToHandle(&new_set)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate_, NewRangeError(MessageTemplate::kTooManyProperties),
- ExceptionStatus::kException);
+ CHECK(isolate_->has_pending_exception());
+ return ExceptionStatus::kException;
}
if (*new_set != *keys_) {
// The keys_ Set is converted directly to a FixedArray in GetKeys which can
@@ -536,16 +537,19 @@ Handle<FixedArray> FastKeyAccumulator::InitializeFastPropertyEnumCache(
if (fields_only) {
indices = isolate->factory()->NewFixedArray(enum_length, allocation);
index = 0;
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- DisallowGarbageCollection no_gc;
- PropertyDetails details = descriptors->GetDetails(i);
+ DisallowGarbageCollection no_gc;
+ auto raw_map = *map;
+ auto raw_indices = *indices;
+ auto raw_descriptors = *descriptors;
+ for (InternalIndex i : raw_map.IterateOwnDescriptors()) {
+ PropertyDetails details = raw_descriptors.GetDetails(i);
if (details.IsDontEnum()) continue;
- Object key = descriptors->GetKey(i);
+ Object key = raw_descriptors.GetKey(i);
if (key.IsSymbol()) continue;
DCHECK_EQ(PropertyKind::kData, details.kind());
DCHECK_EQ(PropertyLocation::kField, details.location());
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- indices->set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
+ FieldIndex field_index = FieldIndex::ForDetails(raw_map, details);
+ raw_indices.set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
index++;
}
DCHECK_EQ(index, indices->length());
@@ -662,19 +666,19 @@ bool FastKeyAccumulator::TryPrototypeInfoCache(Handle<JSReceiver> receiver) {
!isolate_->MayAccess(handle(isolate_->context(), isolate_), object)) {
return false;
}
- HeapObject prototype = receiver->map().prototype();
+ DisallowGarbageCollection no_gc;
+ HeapObject prototype = receiver->map(isolate_).prototype();
if (prototype.is_null()) return false;
- if (!prototype.map().is_prototype_map() ||
- !prototype.map().prototype_info().IsPrototypeInfo()) {
- return false;
- }
+ Map maybe_proto_map = prototype.map(isolate_);
+ if (!maybe_proto_map.is_prototype_map()) return false;
+ PrototypeInfo prototype_info;
+ if (!maybe_proto_map.TryGetPrototypeInfo(&prototype_info)) return false;
+
first_prototype_ = handle(JSReceiver::cast(prototype), isolate_);
- Handle<Map> map(prototype.map(), isolate_);
- first_prototype_map_ = map;
- has_prototype_info_cache_ = map->IsPrototypeValidityCellValid() &&
- PrototypeInfo::cast(map->prototype_info())
- .prototype_chain_enum_cache()
- .IsFixedArray();
+ first_prototype_map_ = handle(maybe_proto_map, isolate_);
+ has_prototype_info_cache_ =
+ maybe_proto_map.IsPrototypeValidityCellValid() &&
+ prototype_info.prototype_chain_enum_cache().IsFixedArray();
return true;
}
@@ -1293,7 +1297,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
}
}
// 10. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(isolate_, target);
MAYBE_RETURN(maybe_extensible, Nothing<bool>());
bool extensible_target = maybe_extensible.FromJust();
// 11. Let targetKeys be ? target.[[OwnPropertyKeys]]().
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index ff30fcc4f2..d34f710147 100644
--- a/deps/v8/src/objects/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -211,8 +211,11 @@ Handle<T> LookupIterator::GetHolder() const {
bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ // Shared objects have fixed layout. No properties may be added to them, not
+ // even private symbols.
return !receiver->map(isolate_).is_extensible() &&
- (IsElement() || !name_->IsPrivate(isolate_));
+ (IsElement() || (!name_->IsPrivate(isolate_) ||
+ receiver->IsAlwaysSharedSpaceJSObject()));
}
bool LookupIterator::IsCacheableTransition() {
@@ -239,7 +242,8 @@ void LookupIterator::UpdateProtector(Isolate* isolate, Handle<Object> receiver,
*name == roots.constructor_string() || *name == roots.next_string() ||
*name == roots.resolve_string() || *name == roots.then_string() ||
*name == roots.is_concat_spreadable_symbol() ||
- *name == roots.iterator_symbol() || *name == roots.species_symbol();
+ *name == roots.iterator_symbol() || *name == roots.species_symbol() ||
+ *name == roots.replace_symbol();
DCHECK_EQ(maybe_protector, debug_maybe_protector);
#endif // DEBUG
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 8fe77f6b62..e3bf4794e8 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -324,6 +324,18 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
receiver->IsJSPromisePrototype()) {
Protectors::InvalidatePromiseThenLookupChain(isolate);
}
+ } else if (*name == roots.replace_symbol()) {
+ if (!Protectors::IsNumberStringPrototypeNoReplaceIntact(isolate)) return;
+ // We need to protect the prototype chains of `Number.prototype` and
+ // `String.prototype`: that `Symbol.replace` is not added as a property on
+ // any object on these prototype chains.
+ // We detect `Number.prototype` and `String.prototype` by checking for a
+ // prototype that is a JSPrimitiveWrapper. This is a safe approximation.
+ // Using JSPrimitiveWrapper as prototype should be sufficiently rare.
+ if (receiver->map().is_prototype_map() &&
+ (receiver->IsJSPrimitiveWrapper() || receiver->IsJSObjectPrototype())) {
+ Protectors::InvalidateNumberStringPrototypeNoReplace(isolate);
+ }
}
}
@@ -375,11 +387,9 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
// Check that current value matches new value otherwise we should make
// the property mutable.
if (holder->HasFastProperties(isolate_)) {
- if (!IsConstFieldValueEqualTo(*value)) {
- new_constness = PropertyConstness::kMutable;
- }
+ if (!CanStayConst(*value)) new_constness = PropertyConstness::kMutable;
} else if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
- if (!IsConstDictValueEqualTo(*value)) {
+ if (!DictCanStayConst(*value)) {
property_details_ =
property_details_.CopyWithConstness(PropertyConstness::kMutable);
@@ -387,7 +397,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
// that's only for the case that the existing map is a fast mode map.
// Therefore, we need to perform the necessary updates to the property
// details and the prototype validity cell directly.
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dict = holder->property_dictionary_swiss();
dict.DetailsAtPut(dictionary_entry(), property_details_);
} else {
@@ -439,7 +449,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
property_details_ =
property_details_.CopyWithConstness(PropertyConstness::kMutable);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dict = holder_obj->property_dictionary_swiss();
dict.DetailsAtPut(dictionary_entry(), property_details_);
} else {
@@ -520,7 +530,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
} else {
PropertyDetails details(PropertyKind::kData, attributes,
PropertyConstness::kMutable);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
holder_obj->property_dictionary_swiss(isolate_), isolate());
dictionary->ValueAtPut(dictionary_entry(), *value);
@@ -561,6 +571,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
DCHECK_IMPLIES(receiver->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
DCHECK_IMPLIES(!receiver.is_identical_to(GetStoreTarget<JSReceiver>()),
name()->IsPrivateName());
+ DCHECK(!receiver->IsAlwaysSharedSpaceJSObject());
if (state_ == TRANSITION) return;
if (!IsElement() && name()->IsPrivate(isolate_)) {
@@ -669,7 +680,7 @@ void LookupIterator::ApplyTransitionToDataProperty(
receiver->IsJSObject(isolate_)) {
JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
}
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
receiver->property_dictionary_swiss(isolate_), isolate_);
@@ -687,6 +698,10 @@ void LookupIterator::ApplyTransitionToDataProperty(
isolate_->factory()->uninitialized_value(),
property_details_, &number_);
receiver->SetProperties(*dictionary);
+ // TODO(pthier): Add flags to swiss dictionaries.
+ if (name()->IsInterestingSymbol()) {
+ dictionary->set_may_have_interesting_symbols(true);
+ }
// Reload details containing proper enumeration index value.
property_details_ = dictionary->DetailsAt(number_);
}
@@ -876,7 +891,7 @@ Handle<Object> LookupIterator::FetchValue(
result = holder->global_dictionary(isolate_, kAcquireLoad)
.ValueAt(isolate_, dictionary_entry());
} else if (!holder_->HasFastProperties(isolate_)) {
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
result = holder_->property_dictionary_swiss(isolate_).ValueAt(
dictionary_entry());
} else {
@@ -887,7 +902,7 @@ Handle<Object> LookupIterator::FetchValue(
DCHECK_EQ(PropertyKind::kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index =
- FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
+ FieldIndex::ForDetails(holder->map(isolate_), property_details_);
if (allocation_policy == AllocationPolicy::kAllocationDisallowed &&
field_index.is_inobject() && field_index.is_double()) {
return isolate_->factory()->undefined_value();
@@ -902,7 +917,7 @@ Handle<Object> LookupIterator::FetchValue(
return handle(result, isolate_);
}
-bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
+bool LookupIterator::CanStayConst(Object value) const {
DCHECK(!IsElement(*holder_));
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(PropertyLocation::kField, property_details_.location());
@@ -915,7 +930,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
}
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index =
- FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
+ FieldIndex::ForDetails(holder->map(isolate_), property_details_);
if (property_details_.representation().IsDouble()) {
if (!value.IsNumber(isolate_)) return false;
uint64_t bits;
@@ -927,23 +942,15 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
// base::bit_cast or value(), will change its value on ia32 (the x87
// stack is used to return values and stores to the stack silently clear the
// signalling bit).
- if (bits == kHoleNanInt64) {
- // Uninitialized double field.
- return true;
- }
- return Object::SameNumberValue(base::bit_cast<double>(bits),
- value.Number());
- } else {
- Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
- if (current_value.IsUninitialized(isolate()) || current_value == value) {
- return true;
- }
- return current_value.IsNumber(isolate_) && value.IsNumber(isolate_) &&
- Object::SameNumberValue(current_value.Number(), value.Number());
+ // Only allow initializing stores to double to stay constant.
+ return bits == kHoleNanInt64;
}
+
+ Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
+ return current_value.IsUninitialized(isolate());
}
-bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
+bool LookupIterator::DictCanStayConst(Object value) const {
DCHECK(!IsElement(*holder_));
DCHECK(!holder_->HasFastProperties(isolate_));
DCHECK(!holder_->IsJSGlobalObject());
@@ -960,7 +967,7 @@ bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
}
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
Object current_value;
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dict = holder->property_dictionary_swiss();
current_value = dict.ValueAt(dictionary_entry());
} else {
@@ -968,11 +975,7 @@ bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
current_value = dict.ValueAt(dictionary_entry());
}
- if (current_value.IsUninitialized(isolate()) || current_value == value) {
- return true;
- }
- return current_value.IsNumber(isolate_) && value.IsNumber(isolate_) &&
- Object::SameNumberValue(current_value.Number(), value.Number());
+ return current_value.IsUninitialized(isolate());
}
int LookupIterator::GetFieldDescriptorIndex() const {
@@ -997,7 +1000,7 @@ FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK(!IsElement(*holder_));
- return FieldIndex::ForDescriptor(holder_->map(isolate_), descriptor_number());
+ return FieldIndex::ForDetails(holder_->map(isolate_), property_details_);
}
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
@@ -1029,7 +1032,7 @@ Handle<Object> LookupIterator::GetDataValue(SeqCstAccessTag tag) const {
DCHECK_EQ(PropertyKind::kData, property_details_.kind());
Handle<JSSharedStruct> holder = GetHolder<JSSharedStruct>();
FieldIndex field_index =
- FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number());
+ FieldIndex::ForDetails(holder->map(isolate_), property_details_);
return JSObject::FastPropertyAt(
isolate_, holder, property_details_.representation(), field_index, tag);
}
@@ -1059,7 +1062,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
// equal to |value|.
DCHECK_IMPLIES(!initializing_store && property_details_.constness() ==
PropertyConstness::kConst,
- IsConstFieldValueEqualTo(*value));
+ CanStayConst(*value));
JSObject::cast(*holder).WriteToField(descriptor_number(),
property_details_, *value);
} else {
@@ -1083,9 +1086,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_IMPLIES(
V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !initializing_store &&
property_details_.constness() == PropertyConstness::kConst,
- holder->IsJSProxy(isolate_) || IsConstDictValueEqualTo(*value));
+ holder->IsJSProxy(isolate_) || DictCanStayConst(*value));
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dictionary =
holder->property_dictionary_swiss(isolate_);
dictionary.ValueAtPut(dictionary_entry(), *value);
@@ -1284,7 +1287,7 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
property_details_ = descriptors.GetDetails(number_);
} else {
DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
SwissNameDictionary dict = holder.property_dictionary_swiss(isolate_);
number_ = dict.FindEntry(isolate(), *name_);
if (number_.is_not_found()) return NotFound(holder);
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 782a09225c..9adee79b30 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -257,8 +257,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
void RestartInternal(InterceptorState interceptor_state);
Handle<Object> FetchValue(AllocationPolicy allocation_policy =
AllocationPolicy::kAllocationAllowed) const;
- bool IsConstFieldValueEqualTo(Object value) const;
- bool IsConstDictValueEqualTo(Object value) const;
+ bool CanStayConst(Object value) const;
+ bool DictCanStayConst(Object value) const;
template <bool is_element>
void ReloadPropertyInformation();
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 04cdb99e10..592231d8d5 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -17,6 +17,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/property.h"
#include "src/objects/prototype-info-inl.h"
+#include "src/objects/prototype-info.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/templates-inl.h"
#include "src/objects/transitions-inl.h"
@@ -54,7 +55,8 @@ RELEASE_ACQUIRE_WEAK_ACCESSORS(Map, raw_transitions,
ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true,
value.IsNull() || value.IsJSProxy() ||
value.IsWasmObject() ||
- (value.IsJSObject() && value.map().is_prototype_map()))
+ (value.IsJSObject() && (value.InWritableSharedSpace() ||
+ value.map().is_prototype_map())))
DEF_GETTER(Map, prototype_info, Object) {
Object value = TaggedField<Object, kTransitionsOrPrototypeInfoOffset>::load(
@@ -65,6 +67,14 @@ DEF_GETTER(Map, prototype_info, Object) {
RELEASE_ACQUIRE_ACCESSORS(Map, prototype_info, Object,
kTransitionsOrPrototypeInfoOffset)
+void Map::init_prototype_and_constructor_or_back_pointer(ReadOnlyRoots roots) {
+ HeapObject null = roots.null_value();
+ TaggedField<HeapObject,
+ kConstructorOrBackPointerOrNativeContextOffset>::store(*this,
+ null);
+ TaggedField<HeapObject, kPrototypeOffset>::store(*this, null);
+}
+
// |bit_field| fields.
// Concurrent access to |has_prototype_slot| and |has_non_instance_prototype|
// is explicitly allowlisted here. The former is never modified after the map
@@ -555,10 +565,24 @@ bool Map::is_abandoned_prototype_map() const {
}
bool Map::should_be_fast_prototype_map() const {
- if (!prototype_info().IsPrototypeInfo()) return false;
+ DCHECK(is_prototype_map());
+ if (!has_prototype_info()) return false;
return PrototypeInfo::cast(prototype_info()).should_be_fast_map();
}
+bool Map::has_prototype_info() const {
+ DCHECK(is_prototype_map());
+ return PrototypeInfo::IsPrototypeInfoFast(prototype_info());
+}
+
+bool Map::TryGetPrototypeInfo(PrototypeInfo* result) const {
+ DCHECK(is_prototype_map());
+ Object maybe_proto_info = prototype_info();
+ if (!PrototypeInfo::IsPrototypeInfoFast(maybe_proto_info)) return false;
+ *result = PrototypeInfo::cast(maybe_proto_info);
+ return true;
+}
+
void Map::set_elements_kind(ElementsKind elements_kind) {
CHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
set_bit_field2(
@@ -680,12 +704,12 @@ void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
bool Map::CanTransition() const {
// Only JSObject and subtypes have map transitions and back pointers.
- return InstanceTypeChecker::IsJSObject(instance_type());
+ return InstanceTypeChecker::IsJSObject(*this);
}
-#define DEF_TESTER(Type, ...) \
- bool Map::Is##Type##Map() const { \
- return InstanceTypeChecker::Is##Type(instance_type()); \
+#define DEF_TESTER(Type, ...) \
+ bool Map::Is##Type##Map() const { \
+ return InstanceTypeChecker::Is##Type(*this); \
}
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
@@ -774,6 +798,12 @@ void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
}
// static
+Map Map::GetMapFor(ReadOnlyRoots roots, InstanceType type) {
+ RootIndex map_idx = TryGetMapRootIdxFor(type).value();
+ return Map::unchecked_cast(roots.object_at(map_idx));
+}
+
+// static
Map Map::ElementsTransitionMap(Isolate* isolate, ConcurrencyMode cmode) {
return TransitionsAccessor(isolate, *this, IsConcurrent(cmode))
.SearchSpecial(ReadOnlyRoots(isolate).elements_transition_symbol());
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index c065533248..1bde79e84e 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -4,6 +4,7 @@
#include "src/objects/map.h"
+#include "src/common/assert-scope.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
@@ -58,27 +59,6 @@ base::Optional<JSFunction> Map::GetConstructorFunction(Map map,
return {};
}
-Map Map::GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type) {
- Map map;
- switch (type) {
-#define MAKE_CASE(TYPE, Name, name) \
- case TYPE: \
- map = roots.name##_map(); \
- break;
- STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
-#define MAKE_CASE(TYPE, Name, name) \
- case TYPE: \
- map = roots.name##_map(); \
- break;
- TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- }
- return map;
-}
-
VisitorId Map::GetVisitorId(Map map) {
static_assert(kVisitorIdCount <= 256);
@@ -104,7 +84,7 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitSlicedString;
case kExternalStringTag:
- return kVisitDataObject;
+ return kVisitExternalString;
case kThinStringTag:
return kVisitThinString;
@@ -179,8 +159,8 @@ VisitorId Map::GetVisitorId(Map map) {
case MAP_TYPE:
return kVisitMap;
- case CODE_TYPE:
- return kVisitCode;
+ case INSTRUCTION_STREAM_TYPE:
+ return kVisitInstructionStream;
case CELL_TYPE:
return kVisitCell;
@@ -211,7 +191,8 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitJSArrayBuffer;
case JS_DATA_VIEW_TYPE:
- return kVisitJSDataView;
+ case JS_RAB_GSAB_DATA_VIEW_TYPE:
+ return kVisitJSDataViewOrRabGsabDataView;
case JS_EXTERNAL_OBJECT_TYPE:
return kVisitJSExternalObject;
@@ -242,8 +223,11 @@ VisitorId Map::GetVisitorId(Map map) {
case SWISS_NAME_DICTIONARY_TYPE:
return kVisitSwissNameDictionary;
- case CODE_DATA_CONTAINER_TYPE:
- return kVisitCodeDataContainer;
+ case CODE_TYPE:
+ return kVisitCode;
+
+ case SHARED_FUNCTION_INFO_TYPE:
+ return kVisitSharedFunctionInfo;
case PREPARSE_DATA_TYPE:
return kVisitPreparseData;
@@ -262,6 +246,10 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_DATE_TYPE:
case JS_ERROR_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ITERATOR_FILTER_HELPER_TYPE:
+ case JS_ITERATOR_MAP_HELPER_TYPE:
+ case JS_ITERATOR_TAKE_HELPER_TYPE:
+ case JS_ITERATOR_DROP_HELPER_TYPE:
case JS_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_KEY_ITERATOR_TYPE:
@@ -299,6 +287,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_TEMPORAL_TIME_ZONE_TYPE:
case JS_TEMPORAL_ZONED_DATE_TIME_TYPE:
case JS_TYPED_ARRAY_PROTOTYPE_TYPE:
+ case JS_VALID_ITERATOR_WRAPPER_TYPE:
case JS_RAW_JSON_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_V8_BREAK_ITERATOR_TYPE:
@@ -411,6 +400,8 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitWasmCapiFunctionData;
case WASM_SUSPENDER_OBJECT_TYPE:
return kVisitWasmSuspenderObject;
+ case WASM_NULL_TYPE:
+ return kVisitWasmNull;
#endif // V8_ENABLE_WEBASSEMBLY
#define MAKE_TQ_CASE(TYPE, Name) \
@@ -861,7 +852,7 @@ Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
if (prototype->IsNull(isolate)) {
return isolate->slow_object_with_null_prototype_map();
}
- if (prototype->IsJSObject()) {
+ if (prototype->IsJSObjectThatCanBeTrackedAsPrototype()) {
Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
if (!js_prototype->map().is_prototype_map()) {
JSObject::OptimizeAsPrototype(js_prototype);
@@ -1472,6 +1463,8 @@ Handle<Map> Map::CopyReplaceDescriptors(Isolate* isolate, Handle<Map> map,
DCHECK(!maybe_name.is_null());
ConnectTransition(isolate, map, result, name, simple_flag);
is_connected = true;
+ } else if (isolate->bootstrapper()->IsActive()) {
+ result->InitializeDescriptors(isolate, *descriptors);
} else {
descriptors->GeneralizeAllFields();
result->InitializeDescriptors(isolate, *descriptors);
@@ -2209,9 +2202,12 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
Isolate* isolate) {
- Object maybe_proto_info = prototype->map().prototype_info();
- if (maybe_proto_info.IsPrototypeInfo()) {
- return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
+ DCHECK(prototype->IsJSObjectThatCanBeTrackedAsPrototype());
+ {
+ PrototypeInfo prototype_info;
+ if (prototype->map().TryGetPrototypeInfo(&prototype_info)) {
+ return handle(prototype_info, isolate);
+ }
}
Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
prototype->map().set_prototype_info(*proto_info, kReleaseStore);
@@ -2221,9 +2217,11 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
Isolate* isolate) {
- Object maybe_proto_info = prototype_map->prototype_info();
- if (maybe_proto_info.IsPrototypeInfo()) {
- return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
+ {
+ Object maybe_proto_info = prototype_map->prototype_info();
+ if (PrototypeInfo::IsPrototypeInfoFast(maybe_proto_info)) {
+ return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
+ }
}
Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
prototype_map->set_prototype_info(*proto_info, kReleaseStore);
@@ -2233,7 +2231,8 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
// static
void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
Isolate* isolate) {
- if (value == false && !map->prototype_info().IsPrototypeInfo()) {
+ DCHECK(map->is_prototype_map());
+ if (value == false && !map->has_prototype_info()) {
// "False" is the implicit default value, so there's nothing to do.
return;
}
@@ -2253,7 +2252,7 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
maybe_prototype =
handle(map->GetPrototypeChainRootMap(isolate).prototype(), isolate);
}
- if (!maybe_prototype->IsJSObject()) {
+ if (!maybe_prototype->IsJSObjectThatCanBeTrackedAsPrototype()) {
return handle(Smi::FromInt(Map::kPrototypeChainValid), isolate);
}
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
@@ -2265,14 +2264,14 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Object maybe_cell = prototype->map().prototype_validity_cell(kRelaxedLoad);
// Return existing cell if it's still valid.
if (maybe_cell.IsCell()) {
- Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
- if (cell->value() == Smi::FromInt(Map::kPrototypeChainValid)) {
- return cell;
+ Cell cell = Cell::cast(maybe_cell);
+ if (cell.value() == Smi::FromInt(Map::kPrototypeChainValid)) {
+ return handle(cell, isolate);
}
}
// Otherwise create a new cell.
- Handle<Cell> cell = isolate->factory()->NewCell(
- handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
+ Handle<Cell> cell =
+ isolate->factory()->NewCell(Smi::FromInt(Map::kPrototypeChainValid));
prototype->map().set_prototype_validity_cell(*cell, kRelaxedStore);
return cell;
}
@@ -2294,12 +2293,12 @@ void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
bool enable_prototype_setup_mode) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kMap_SetPrototype);
- if (prototype->IsJSObject()) {
+ if (prototype->IsJSObjectThatCanBeTrackedAsPrototype()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
} else {
DCHECK(prototype->IsNull(isolate) || prototype->IsJSProxy() ||
- prototype->IsWasmObject());
+ prototype->IsWasmObject() || prototype->InWritableSharedSpace());
}
WriteBarrierMode wb_mode =
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 6914a51150..d9d15c3f5f 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -11,6 +11,7 @@
#include "src/objects/heap-object.h"
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
+#include "src/objects/prototype-info.h"
#include "torque-generated/bit-fields.h"
#include "torque-generated/visitor-lists.h"
@@ -30,7 +31,8 @@ enum InstanceType : uint16_t;
V(CoverageInfo) \
V(DataObject) \
V(FeedbackMetadata) \
- V(FixedDoubleArray)
+ V(FixedDoubleArray) \
+ IF_WASM(V, WasmNull)
#define POINTER_VISITOR_ID_LIST(V) \
V(AccessorInfo) \
@@ -38,16 +40,17 @@ enum InstanceType : uint16_t;
V(BytecodeArray) \
V(CallHandlerInfo) \
V(Cell) \
+ V(InstructionStream) \
V(Code) \
- V(CodeDataContainer) \
V(DataHandler) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
+ V(ExternalString) \
V(FeedbackCell) \
V(FreeSpace) \
V(JSApiObject) \
V(JSArrayBuffer) \
- V(JSDataView) \
+ V(JSDataViewOrRabGsabDataView) \
V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
@@ -65,6 +68,7 @@ enum InstanceType : uint16_t;
V(PropertyArray) \
V(PropertyCell) \
V(PrototypeInfo) \
+ V(SharedFunctionInfo) \
V(ShortcutCandidate) \
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
@@ -407,6 +411,8 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
DECL_BOOLEAN_ACCESSORS(is_extensible)
DECL_BOOLEAN_ACCESSORS(is_prototype_map)
inline bool is_abandoned_prototype_map() const;
+ inline bool has_prototype_info() const;
+ inline bool TryGetPrototypeInfo(PrototypeInfo* result) const;
// Whether the instance has been added to the retained map list by
// Heap::AddRetainedMap.
@@ -568,6 +574,11 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
Isolate* isolate, Handle<Map> map, Handle<HeapObject> prototype,
bool enable_prototype_setup_mode = true);
+ // Sets prototype and constructor fields to null. Can be called during
+ // bootstrapping.
+ inline void init_prototype_and_constructor_or_back_pointer(
+ ReadOnlyRoots roots);
+
// [constructor]: points back to the function or FunctionTemplateInfo
// responsible for this map.
// The field overlaps with the back pointer. All maps in a transition tree
@@ -790,7 +801,21 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
inline bool CanTransition() const;
- static Map GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type);
+ static constexpr base::Optional<RootIndex> TryGetMapRootIdxFor(
+ InstanceType type) {
+ switch (type) {
+#define MAKE_CASE(TYPE, Name, name) \
+ case TYPE: \
+ return RootIndex::k##Name##Map;
+ STRUCT_LIST(MAKE_CASE)
+ TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ default:
+ break;
+ }
+ return {};
+ }
+ static inline Map GetMapFor(ReadOnlyRoots roots, InstanceType type);
#define DECL_TESTER(Type, ...) inline bool Is##Type##Map() const;
INSTANCE_TYPE_CHECKERS(DECL_TESTER)
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index f5dd961487..2e3616a6eb 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -84,7 +84,7 @@ HeapObjectReference HeapObjectReference::ClearedValue(
#ifdef V8_COMPRESS_POINTERS
// This is necessary to make pointer decompression computation also
// suitable for cleared weak references.
- Address raw_value = V8HeapCompressionScheme::DecompressTaggedPointer(
+ Address raw_value = V8HeapCompressionScheme::DecompressTagged(
cage_base, kClearedWeakHeapObjectLower32);
#else
Address raw_value = kClearedWeakHeapObjectLower32;
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index a23f33861c..fccb1c238d 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -346,7 +346,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
PropertyCellType::kMutable));
}
}
- JSObject::PreventExtensions(ns, kThrowOnError).ToChecked();
+ JSObject::PreventExtensions(isolate, ns, kThrowOnError).ToChecked();
// Optimize the namespace object as a prototype, for two reasons:
// - The object's map is guaranteed not to be shared. ICs rely on this.
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 8623e7965f..23eb1adfc0 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -78,6 +78,7 @@ class ZoneForwardList;
V(AccessCheckNeeded) \
V(AccessorInfo) \
V(AllocationSite) \
+ V(AlwaysSharedSpaceJSObject) \
V(ArrayList) \
V(BigInt) \
V(BigIntBase) \
@@ -91,8 +92,8 @@ class ZoneForwardList;
V(Callable) \
V(Cell) \
V(ClassBoilerplate) \
+ V(InstructionStream) \
V(Code) \
- V(CodeDataContainer) \
V(CompilationCacheTable) \
V(ConsString) \
V(Constructor) \
@@ -119,6 +120,7 @@ class ZoneForwardList;
V(Foreign) \
V(FreeSpace) \
V(Function) \
+ V(GcSafeCode) \
V(GlobalDictionary) \
V(HandlerTable) \
V(HeapNumber) \
@@ -139,6 +141,7 @@ class ZoneForwardList;
V(JSContextExtensionObject) \
V(JSCustomElementsObject) \
V(JSDataView) \
+ V(JSDataViewOrRabGsabDataView) \
V(JSDate) \
V(JSError) \
V(JSExternalObject) \
@@ -148,6 +151,11 @@ class ZoneForwardList;
V(JSGeneratorObject) \
V(JSGlobalObject) \
V(JSGlobalProxy) \
+ V(JSIteratorHelper) \
+ V(JSIteratorFilterHelper) \
+ V(JSIteratorMapHelper) \
+ V(JSIteratorTakeHelper) \
+ V(JSIteratorDropHelper) \
V(JSMap) \
V(JSMapIterator) \
V(JSMessageObject) \
@@ -157,6 +165,7 @@ class ZoneForwardList;
V(JSPrimitiveWrapper) \
V(JSPromise) \
V(JSProxy) \
+ V(JSRabGsabDataView) \
V(JSRawJson) \
V(JSReceiver) \
V(JSRegExp) \
@@ -180,6 +189,7 @@ class ZoneForwardList;
V(JSTemporalTimeZone) \
V(JSTemporalZonedDateTime) \
V(JSTypedArray) \
+ V(JSValidIteratorWrapper) \
V(JSWeakCollection) \
V(JSWeakRef) \
V(JSWeakMap) \
@@ -200,6 +210,7 @@ class ZoneForwardList;
V(NumberWrapper) \
V(ObjectHashSet) \
V(ObjectHashTable) \
+ V(ObjectTwoHashTable) \
V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
@@ -240,6 +251,16 @@ class ZoneForwardList;
V(TemplateLiteralObject) \
V(ThinString) \
V(TransitionArray) \
+ V(TurboshaftFloat64RangeType) \
+ V(TurboshaftFloat64SetType) \
+ V(TurboshaftFloat64Type) \
+ V(TurboshaftType) \
+ V(TurboshaftWord32RangeType) \
+ V(TurboshaftWord32SetType) \
+ V(TurboshaftWord32Type) \
+ V(TurboshaftWord64RangeType) \
+ V(TurboshaftWord64SetType) \
+ V(TurboshaftWord64Type) \
V(UncompiledData) \
V(UncompiledDataWithPreparseData) \
V(UncompiledDataWithoutPreparseData) \
@@ -268,6 +289,7 @@ class ZoneForwardList;
IF_WASM(V, WasmValueObject) \
IF_WASM(V, WasmSuspenderObject) \
IF_WASM(V, WasmContinuationObject) \
+ IF_WASM(V, WasmNull) \
V(WeakFixedArray) \
V(WeakArrayList) \
V(WeakCell) \
@@ -352,17 +374,17 @@ class ZoneForwardList;
HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) \
HEAP_OBJECT_SPECIALIZED_TYPE_LIST(V)
-#define ODDBALL_LIST(V) \
- V(Undefined, undefined_value) \
- V(Null, null_value) \
- V(TheHole, the_hole_value) \
- V(Exception, exception) \
- V(Uninitialized, uninitialized_value) \
- V(True, true_value) \
- V(False, false_value) \
- V(ArgumentsMarker, arguments_marker) \
- V(OptimizedOut, optimized_out) \
- V(StaleRegister, stale_register)
+#define ODDBALL_LIST(V) \
+ V(Undefined, undefined_value, UndefinedValue) \
+ V(Null, null_value, NullValue) \
+ V(TheHole, the_hole_value, TheHoleValue) \
+ V(Exception, exception, Exception) \
+ V(Uninitialized, uninitialized_value, UninitializedValue) \
+ V(True, true_value, TrueValue) \
+ V(False, false_value, FalseValue) \
+ V(ArgumentsMarker, arguments_marker, ArgumentsMarker) \
+ V(OptimizedOut, optimized_out, OptimizedOut) \
+ V(StaleRegister, stale_register, StaleRegister)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index f5de0bb960..46eee497bc 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -74,6 +74,9 @@
#define DECL_RELAXED_INT32_ACCESSORS(name) \
DECL_RELAXED_PRIMITIVE_ACCESSORS(name, int32_t)
+#define DECL_RELAXED_UINT32_ACCESSORS(name) \
+ DECL_RELAXED_PRIMITIVE_ACCESSORS(name, uint32_t)
+
#define DECL_RELAXED_UINT16_ACCESSORS(name) \
DECL_RELAXED_PRIMITIVE_ACCESSORS(name, uint16_t)
@@ -185,6 +188,14 @@
RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
}
+#define RELAXED_UINT32_ACCESSORS(holder, name, offset) \
+ uint32_t holder::name(RelaxedLoadTag) const { \
+ return RELAXED_READ_UINT32_FIELD(*this, offset); \
+ } \
+ void holder::set_##name(uint32_t value, RelaxedStoreTag) { \
+ RELAXED_WRITE_UINT32_FIELD(*this, offset, value); \
+ }
+
#define RELAXED_UINT16_ACCESSORS(holder, name, offset) \
uint16_t holder::name(RelaxedLoadTag) const { \
return RELAXED_READ_UINT16_FIELD(*this, offset); \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 7aac0e6a10..5df31698bc 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -21,6 +21,8 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-weak-refs.h"
@@ -36,6 +38,7 @@
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
#include "src/objects/turbofan-types-inl.h"
+#include "src/objects/turboshaft-types-inl.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects-inl.h"
@@ -356,10 +359,10 @@ class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot());
DCHECK_GE(object_size, header_size);
IteratePointers(obj, kStartOffset, kCodeOffset, v);
- // Code field is treated as a custom weak pointer. This field is visited as
- // a weak pointer if the Code is baseline code and the bytecode array
- // corresponding to this function is old. In the rest of the cases this
- // field is treated as strong pointer.
+ // Code field is treated as a custom weak pointer. This field
+ // is visited as a weak pointer if the Code is baseline code
+ // and the bytecode array corresponding to this function is old. In the rest
+ // of the cases this field is treated as strong pointer.
IterateCustomWeakPointer(obj, kCodeOffset, v);
// Iterate rest of the header fields
DCHECK_GE(header_size, kCodeOffset);
@@ -421,7 +424,8 @@ class JSTypedArray::BodyDescriptor final : public BodyDescriptorBase {
}
};
-class JSDataView::BodyDescriptor final : public BodyDescriptorBase {
+class JSDataViewOrRabGsabDataView::BodyDescriptor final
+ : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kEndOfTaggedFieldsOffset) return true;
@@ -432,7 +436,8 @@ class JSDataView::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- // JSDataView contains raw data that the GC does not know about.
+ // JSDataViewOrRabGsabDataView contains raw data that the GC does not know
+ // about.
IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
}
@@ -613,6 +618,25 @@ class PreparseData::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= HeapObject::kHeaderSize &&
+ offset < kEndOfStrongFieldsOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateCustomWeakPointers(obj, kStartOfWeakFieldsOffset,
+ kEndOfWeakFieldsOffset, v);
+ IteratePointers(obj, kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
+ v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject raw_object) { return kSize; }
+};
+
class PromiseOnStack::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -880,36 +904,29 @@ class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
}
};
-#endif // V8_ENABLE_WEBASSEMBLY
-
-class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
+class WasmNull::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ UNREACHABLE();
+ }
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- ExternalString string = ExternalString::cast(obj);
- v->VisitExternalPointer(obj,
- string.RawExternalPointerField(kResourceOffset),
- kExternalStringResourceTag);
- if (string.is_uncached()) return;
- v->VisitExternalPointer(obj,
- string.RawExternalPointerField(kResourceDataOffset),
- kExternalStringResourceDataTag);
- }
+ ObjectVisitor* v) {}
- static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+ static inline int SizeOf(Map map, HeapObject obj) { return WasmNull::kSize; }
};
-class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
+#endif // V8_ENABLE_WEBASSEMBLY
+
+class ExternalString::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- ExternalString string = ExternalString::cast(obj);
+ ExternalString string = ExternalString::unchecked_cast(obj);
v->VisitExternalPointer(obj,
string.RawExternalPointerField(kResourceOffset),
kExternalStringResourceTag);
@@ -919,7 +936,12 @@ class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
kExternalStringResourceDataTag);
}
- static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+ static inline int SizeOf(Map map, HeapObject object) {
+ InstanceType type = map.instance_type();
+ const auto is_uncached =
+ (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
+ return is_uncached ? kUncachedSize : kSizeOfAllExternalStrings;
+ }
};
class CoverageInfo::BodyDescriptor final : public BodyDescriptorBase {
@@ -936,14 +958,11 @@ class CoverageInfo::BodyDescriptor final : public BodyDescriptorBase {
}
};
-class Code::BodyDescriptor final : public BodyDescriptorBase {
+class InstructionStream::BodyDescriptor final : public BodyDescriptorBase {
public:
- static_assert(kRelocationInfoOffset + kTaggedSize ==
- kDeoptimizationDataOrInterpreterDataOffset);
- static_assert(kDeoptimizationDataOrInterpreterDataOffset + kTaggedSize ==
- kPositionTableOffset);
- static_assert(kPositionTableOffset + kTaggedSize == kCodeDataContainerOffset);
- static_assert(kCodeDataContainerOffset + kTaggedSize == kDataStart);
+ static_assert(static_cast<int>(HeapObject::kHeaderSize) ==
+ static_cast<int>(kCodeOffset));
+ static_assert(kCodeOffset + kTaggedSize == kDataStart);
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
// Slots in code can't be invalid because we never trim code objects.
@@ -958,14 +977,18 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
// GC does not visit data/code in the header and in the body directly.
- IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
+ IteratePointers(obj, kCodeOffset, kDataStart, v);
- RelocIterator it(Code::cast(obj), kRelocModeMask);
+ InstructionStream istream = InstructionStream::cast(obj);
+ Code code = istream.unchecked_code();
+ RelocIterator it(code, istream, code.unchecked_relocation_info(),
+ code.constant_pool(), kRelocModeMask);
v->VisitRelocInfo(&it);
}
@@ -976,7 +999,7 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return Code::unchecked_cast(object).CodeSize();
+ return InstructionStream::unchecked_cast(object).CodeSize();
}
};
@@ -1047,30 +1070,24 @@ class NativeContext::BodyDescriptor final : public BodyDescriptorBase {
}
};
-class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
+class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= HeapObject::kHeaderSize &&
- offset <= CodeDataContainer::kPointerFieldsWeakEndOffset;
+ offset <= Code::kPointerFieldsStrongEndOffset;
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, HeapObject::kHeaderSize,
- CodeDataContainer::kPointerFieldsStrongEndOffset, v);
- IterateCustomWeakPointers(
- obj, CodeDataContainer::kPointerFieldsStrongEndOffset,
- CodeDataContainer::kPointerFieldsWeakEndOffset, v);
+ Code::kPointerFieldsStrongEndOffset, v);
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- v->VisitCodePointer(obj, obj.RawCodeField(kCodeOffset));
- }
+ v->VisitCodePointer(Code::cast(obj),
+ obj.RawCodeField(kInstructionStreamOffset));
}
- static inline int SizeOf(Map map, HeapObject object) {
- return CodeDataContainer::kSize;
- }
+ static inline int SizeOf(Map map, HeapObject object) { return Code::kSize; }
};
class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
@@ -1234,6 +1251,10 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case JS_GENERATOR_OBJECT_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
+ case JS_ITERATOR_FILTER_HELPER_TYPE:
+ case JS_ITERATOR_MAP_HELPER_TYPE:
+ case JS_ITERATOR_TAKE_HELPER_TYPE:
+ case JS_ITERATOR_DROP_HELPER_TYPE:
case JS_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_KEY_ITERATOR_TYPE:
@@ -1276,6 +1297,7 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_VALID_ITERATOR_WRAPPER_TYPE:
case JS_WRAPPED_FUNCTION_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
@@ -1311,6 +1333,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
#if V8_ENABLE_WEBASSEMBLY
case WASM_INSTANCE_OBJECT_TYPE:
return CALL_APPLY(WasmInstanceObject);
+ case WASM_NULL_TYPE:
+ return CALL_APPLY(WasmNull);
#endif // V8_ENABLE_WEBASSEMBLY
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
@@ -1319,6 +1343,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
return CALL_APPLY(JSArrayBuffer);
case JS_DATA_VIEW_TYPE:
return CALL_APPLY(JSDataView);
+ case JS_RAB_GSAB_DATA_VIEW_TYPE:
+ return CALL_APPLY(JSRabGsabDataView);
case JS_TYPED_ARRAY_TYPE:
return CALL_APPLY(JSTypedArray);
case JS_EXTERNAL_OBJECT_TYPE:
@@ -1336,8 +1362,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
return CALL_APPLY(Foreign);
case MAP_TYPE:
return CALL_APPLY(Map);
- case CODE_TYPE:
- return CALL_APPLY(Code);
+ case INSTRUCTION_STREAM_TYPE:
+ return CALL_APPLY(InstructionStream);
case CELL_TYPE:
return CALL_APPLY(Cell);
case PROPERTY_CELL_TYPE:
@@ -1354,10 +1380,12 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
return CALL_APPLY(SmallOrderedHashTable<SmallOrderedNameDictionary>);
case SWISS_NAME_DICTIONARY_TYPE:
return CALL_APPLY(SwissNameDictionary);
- case CODE_DATA_CONTAINER_TYPE:
- return CALL_APPLY(CodeDataContainer);
+ case CODE_TYPE:
+ return CALL_APPLY(Code);
case PREPARSE_DATA_TYPE:
return CALL_APPLY(PreparseData);
+ case SHARED_FUNCTION_INFO_TYPE:
+ return CALL_APPLY(SharedFunctionInfo);
case HEAP_NUMBER_TYPE:
return CALL_APPLY(HeapNumber);
case BYTE_ARRAY_TYPE:
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 5bdc344604..1a0a7ad670 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -47,15 +47,12 @@ namespace internal {
V(CONS_ONE_BYTE_STRING_TYPE) \
V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
V(SLICED_ONE_BYTE_STRING_TYPE) \
- V(THIN_ONE_BYTE_STRING_TYPE) \
V(UNCACHED_EXTERNAL_STRING_TYPE) \
V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
V(SHARED_STRING_TYPE) \
V(SHARED_EXTERNAL_STRING_TYPE) \
- V(SHARED_THIN_STRING_TYPE) \
V(SHARED_ONE_BYTE_STRING_TYPE) \
V(SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SHARED_THIN_ONE_BYTE_STRING_TYPE) \
V(SHARED_UNCACHED_EXTERNAL_STRING_TYPE) \
V(SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE)
@@ -63,8 +60,9 @@ namespace internal {
INSTANCE_TYPE_LIST_BASE(V) \
TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(V)
-// Since string types are not consecutive, this macro is used to
-// iterate over them.
+// Since string types are not consecutive, this macro is used to iterate over
+// them. The order matters for read only heap layout. The maps are placed such
+// that string types map to address ranges of maps.
#define STRING_TYPE_LIST(V) \
V(STRING_TYPE, kVariableSizeSentinel, string, String) \
V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string, \
@@ -85,10 +83,18 @@ namespace internal {
ExternalOneByteString::kUncachedSize, uncached_external_one_byte_string, \
UncachedExternalOneByteString) \
\
- V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
- InternalizedString) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
- one_byte_internalized_string, OneByteInternalizedString) \
+ V(SHARED_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, \
+ shared_external_string, SharedExternalString) \
+ V(SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
+ shared_external_one_byte_string, SharedExternalOneByteString) \
+ V(SHARED_UNCACHED_EXTERNAL_STRING_TYPE, \
+ ExternalTwoByteString::kUncachedSize, shared_uncached_external_string, \
+ SharedUncachedExternalString) \
+ V(SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE, \
+ ExternalOneByteString::kUncachedSize, \
+ shared_uncached_external_one_byte_string, \
+ SharedUncachedExternalOneByteString) \
+ \
V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize, \
external_internalized_string, ExternalInternalizedString) \
V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
@@ -100,28 +106,16 @@ namespace internal {
ExternalOneByteString::kUncachedSize, \
uncached_external_one_byte_internalized_string, \
UncachedExternalOneByteInternalizedString) \
- V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
- V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
- ThinOneByteString) \
\
+ V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
+ InternalizedString) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, \
+ one_byte_internalized_string, OneByteInternalizedString) \
+ \
+ V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
V(SHARED_STRING_TYPE, kVariableSizeSentinel, shared_string, SharedString) \
V(SHARED_ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, \
- shared_one_byte_string, SharedOneByteString) \
- V(SHARED_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, \
- shared_external_string, SharedExternalString) \
- V(SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
- shared_external_one_byte_string, SharedExternalOneByteString) \
- V(SHARED_UNCACHED_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kUncachedSize, shared_uncached_external_string, \
- SharedUncachedExternalString) \
- V(SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE, \
- ExternalOneByteString::kUncachedSize, \
- shared_uncached_external_one_byte_string, \
- SharedUncachedExternalOneByteString) \
- V(SHARED_THIN_STRING_TYPE, ThinString::kSize, shared_thin_string, \
- SharedThinString) \
- V(SHARED_THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, \
- shared_thin_one_byte_string, SharedThinOneByteString)
+ shared_one_byte_string, SharedOneByteString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index 76c4c70345..afd86c3856 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -77,11 +77,16 @@ bool Object::IsTaggedIndex() const {
}
bool Object::InSharedHeap() const {
- return IsHeapObject() && HeapObject::cast(*this).InSharedHeap();
+ return IsHeapObject() && HeapObject::cast(*this).InAnySharedSpace();
}
-bool Object::InSharedWritableHeap() const {
- return IsHeapObject() && HeapObject::cast(*this).InSharedWritableHeap();
+bool Object::InWritableSharedSpace() const {
+ return IsHeapObject() && HeapObject::cast(*this).InWritableSharedSpace();
+}
+
+bool Object::IsJSObjectThatCanBeTrackedAsPrototype() const {
+ return IsHeapObject() &&
+ HeapObject::cast(*this).IsJSObjectThatCanBeTrackedAsPrototype();
}
#define IS_TYPE_FUNCTION_DEF(type_) \
@@ -94,19 +99,15 @@ bool Object::InSharedWritableHeap() const {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
IS_TYPE_FUNCTION_DEF(HashTableBase)
IS_TYPE_FUNCTION_DEF(SmallOrderedHashTable)
-IS_TYPE_FUNCTION_DEF(CodeT)
#undef IS_TYPE_FUNCTION_DEF
-#define IS_TYPE_FUNCTION_DEF(Type, Value) \
+#define IS_TYPE_FUNCTION_DEF(Type, Value, _) \
bool Object::Is##Type(Isolate* isolate) const { \
return Is##Type(ReadOnlyRoots(isolate)); \
} \
bool Object::Is##Type(LocalIsolate* isolate) const { \
return Is##Type(ReadOnlyRoots(isolate)); \
} \
- bool Object::Is##Type(ReadOnlyRoots roots) const { \
- return SafeEquals(roots.Value()); \
- } \
bool Object::Is##Type() const { \
return IsHeapObject() && HeapObject::cast(*this).Is##Type(); \
} \
@@ -123,6 +124,22 @@ IS_TYPE_FUNCTION_DEF(CodeT)
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
+#if V8_STATIC_ROOTS_BOOL
+#define IS_TYPE_FUNCTION_DEF(Type, Value, CamelName) \
+ bool Object::Is##Type(ReadOnlyRoots roots) const { \
+ SLOW_DCHECK(CheckObjectComparisonAllowed(ptr(), roots.Value().ptr())); \
+ return V8HeapCompressionScheme::CompressObject(ptr()) == \
+ StaticReadOnlyRoot::k##CamelName; \
+ }
+#else
+#define IS_TYPE_FUNCTION_DEF(Type, Value, _) \
+ bool Object::Is##Type(ReadOnlyRoots roots) const { \
+ return (*this) == roots.Value(); \
+ }
+#endif
+ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
+
bool Object::IsNullOrUndefined(Isolate* isolate) const {
return IsNullOrUndefined(ReadOnlyRoots(isolate));
}
@@ -181,13 +198,22 @@ void Object::Relaxed_WriteField(size_t offset, T value) {
static_cast<AtomicT>(value));
}
-bool HeapObject::InSharedHeap() const {
+bool HeapObject::InAnySharedSpace() const {
if (IsReadOnlyHeapObject(*this)) return V8_SHARED_RO_HEAP_BOOL;
- return InSharedWritableHeap();
+ return InWritableSharedSpace();
}
-bool HeapObject::InSharedWritableHeap() const {
- return BasicMemoryChunk::FromHeapObject(*this)->InSharedHeap();
+bool HeapObject::InWritableSharedSpace() const {
+ return BasicMemoryChunk::FromHeapObject(*this)->InWritableSharedSpace();
+}
+
+bool HeapObject::InReadOnlySpace() const { return IsReadOnlyHeapObject(*this); }
+
+bool HeapObject::IsJSObjectThatCanBeTrackedAsPrototype() const {
+ // Do not optimize objects in the shared heap because it is not
+ // threadsafe. Objects in the shared heap have fixed layouts and their maps
+ // never change.
+ return IsJSObject() && !InWritableSharedSpace();
}
bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
@@ -202,11 +228,6 @@ bool HeapObject::IsNullOrUndefined() const {
return IsNullOrUndefined(GetReadOnlyRoots());
}
-DEF_GETTER(HeapObject, IsCodeT, bool) {
- return V8_EXTERNAL_CODE_SPACE_BOOL ? IsCodeDataContainer(cage_base)
- : IsCode(cage_base);
-}
-
DEF_GETTER(HeapObject, IsUniqueName, bool) {
return IsInternalizedString(cage_base) || IsSymbol(cage_base);
}
@@ -248,8 +269,8 @@ DEF_GETTER(HeapObject, IsConsString, bool) {
}
DEF_GETTER(HeapObject, IsThinString, bool) {
- if (!IsString(cage_base)) return false;
- return StringShape(String::cast(*this).map(cage_base)).IsThin();
+ InstanceType type = map(cage_base).instance_type();
+ return type == THIN_STRING_TYPE;
}
DEF_GETTER(HeapObject, IsSlicedString, bool) {
@@ -395,6 +416,10 @@ DEF_GETTER(HeapObject, IsObjectHashTable, bool) {
return IsHashTable(cage_base);
}
+DEF_GETTER(HeapObject, IsObjectTwoHashTable, bool) {
+ return IsHashTable(cage_base);
+}
+
DEF_GETTER(HeapObject, IsHashTableBase, bool) { return IsHashTable(cage_base); }
bool Object::IsPrimitive() const {
@@ -725,34 +750,41 @@ Map MapWord::ToMap() const {
}
bool MapWord::IsForwardingAddress() const {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // When external code space is enabled forwarding pointers are encoded as
+ // Smi representing a diff from the source object address in kObjectAlignment
+ // chunks.
+ return HAS_SMI_TAG(value_);
+#else
return (value_ & kForwardingTagMask) == kForwardingTag;
+#endif // V8_EXTERNAL_CODE_SPACE
}
-MapWord MapWord::FromForwardingAddress(HeapObject object) {
+MapWord MapWord::FromForwardingAddress(HeapObject map_word_host,
+ HeapObject object) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // When external code space is enabled forwarding pointers are encoded as
+ // Smi representing a diff from the source object address in kObjectAlignment
+ // chunks.
+ intptr_t diff = static_cast<intptr_t>(object.ptr() - map_word_host.ptr());
+ DCHECK(IsAligned(diff, kObjectAlignment));
+ MapWord map_word(Smi::FromIntptr(diff / kObjectAlignment).ptr());
+ DCHECK(map_word.IsForwardingAddress());
+ return map_word;
+#else
return MapWord(object.ptr() - kHeapObjectTag);
+#endif // V8_EXTERNAL_CODE_SPACE
}
-HeapObject MapWord::ToForwardingAddress() {
- DCHECK(IsForwardingAddress());
- HeapObject obj = HeapObject::FromAddress(value_);
- // For objects allocated outside of the main pointer compression cage the
- // variant with explicit cage base must be used.
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(obj));
- return obj;
-}
-
-HeapObject MapWord::ToForwardingAddress(PtrComprCageBase host_cage_base) {
+HeapObject MapWord::ToForwardingAddress(HeapObject map_word_host) {
DCHECK(IsForwardingAddress());
#ifdef V8_EXTERNAL_CODE_SPACE
- // Recompress value_ using proper host_cage_base and compression scheme
- // since the map word is decompressed using the default compression scheme
- // in an assumption it'll contain Map pointer.
- // TODO(v8:11880): this code must be updated once a different scheme is used
- // for external code fields.
- Tagged_t compressed = V8HeapCompressionScheme::CompressTagged(value_);
- Address value = V8HeapCompressionScheme::DecompressTaggedPointer(
- host_cage_base, compressed);
- return HeapObject::FromAddress(value);
+ // When external code space is enabled forwarding pointers are encoded as
+ // Smi representing a diff from the source object address in kObjectAlignment
+ // chunks.
+ intptr_t diff = static_cast<intptr_t>(Smi(value_).value()) * kObjectAlignment;
+ Address address = map_word_host.address() + diff;
+ return HeapObject::FromAddress(address);
#else
return HeapObject::FromAddress(value_);
#endif // V8_EXTERNAL_CODE_SPACE
@@ -777,23 +809,23 @@ void HeapObject::VerifySmiField(int offset) {
#endif
+ReadOnlyRoots HeapObject::EarlyGetReadOnlyRoots() const {
+ return ReadOnlyHeap::EarlyGetReadOnlyRoots(*this);
+}
+
ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
+// TODO(v8:13788): Remove this cage-ful accessor.
ReadOnlyRoots HeapObject::GetReadOnlyRoots(PtrComprCageBase cage_base) const {
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- DCHECK_NE(cage_base.address(), 0);
- return ReadOnlyRoots(Isolate::FromRootAddress(cage_base.address()));
-#else
return GetReadOnlyRoots();
-#endif
}
Map HeapObject::map() const {
- // This method is never used for objects located in code space (Code and
- // free space fillers) and thus it is fine to use auto-computed cage base
- // value.
+ // This method is never used for objects located in code space
+ // (InstructionStream and free space fillers) and thus it is fine to use
+ // auto-computed cage base value.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map(cage_base);
@@ -866,7 +898,8 @@ void HeapObject::set_map(Map value, MemoryOrder order, VerificationMode mode) {
HeapVerifier::VerifyObjectLayoutChange(heap, *this, value);
}
}
- set_map_word(MapWord::FromMap(value), order);
+ set_map_word(value, order);
+ Heap::NotifyObjectLayoutChangeDone(*this);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
if (emit_write_barrier == EmitWriteBarrier::kYes) {
@@ -880,8 +913,7 @@ void HeapObject::set_map(Map value, MemoryOrder order, VerificationMode mode) {
}
void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
- MapWord mapword = MapWord::FromMap(value);
- set_map_word(mapword, kRelaxedStore);
+ set_map_word(value, kRelaxedStore);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
@@ -901,9 +933,9 @@ ObjectSlot HeapObject::map_slot() const {
}
MapWord HeapObject::map_word(RelaxedLoadTag tag) const {
- // This method is never used for objects located in code space (Code and
- // free space fillers) and thus it is fine to use auto-computed cage base
- // value.
+ // This method is never used for objects located in code space
+ // (InstructionStream and free space fillers) and thus it is fine to use
+ // auto-computed cage base value.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
@@ -913,14 +945,20 @@ MapWord HeapObject::map_word(PtrComprCageBase cage_base,
return MapField::Relaxed_Load_Map_Word(cage_base, *this);
}
-void HeapObject::set_map_word(MapWord map_word, RelaxedStoreTag) {
- MapField::Relaxed_Store_Map_Word(*this, map_word);
+void HeapObject::set_map_word(Map map, RelaxedStoreTag) {
+ MapField::Relaxed_Store_Map_Word(*this, MapWord::FromMap(map));
+}
+
+void HeapObject::set_map_word_forwarded(HeapObject target_object,
+ RelaxedStoreTag) {
+ MapField::Relaxed_Store_Map_Word(
+ *this, MapWord::FromForwardingAddress(*this, target_object));
}
MapWord HeapObject::map_word(AcquireLoadTag tag) const {
- // This method is never used for objects located in code space (Code and
- // free space fillers) and thus it is fine to use auto-computed cage base
- // value.
+ // This method is never used for objects located in code space
+ // (InstructionStream and free space fillers) and thus it is fine to use
+ // auto-computed cage base value.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
@@ -930,14 +968,21 @@ MapWord HeapObject::map_word(PtrComprCageBase cage_base,
return MapField::Acquire_Load_No_Unpack(cage_base, *this);
}
-void HeapObject::set_map_word(MapWord map_word, ReleaseStoreTag) {
- MapField::Release_Store_Map_Word(*this, map_word);
+void HeapObject::set_map_word(Map map, ReleaseStoreTag) {
+ MapField::Release_Store_Map_Word(*this, MapWord::FromMap(map));
}
-bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,
- MapWord new_map_word) {
- Tagged_t result =
- MapField::Release_CompareAndSwap(*this, old_map_word, new_map_word);
+void HeapObject::set_map_word_forwarded(HeapObject target_object,
+ ReleaseStoreTag) {
+ MapField::Release_Store_Map_Word(
+ *this, MapWord::FromForwardingAddress(*this, target_object));
+}
+
+bool HeapObject::release_compare_and_swap_map_word_forwarded(
+ MapWord old_map_word, HeapObject new_target_object) {
+ Tagged_t result = MapField::Release_CompareAndSwap(
+ *this, old_map_word,
+ MapWord::FromForwardingAddress(*this, new_target_object));
return result == static_cast<Tagged_t>(old_map_word.ptr());
}
@@ -1198,24 +1243,33 @@ bool Object::IsShared() const {
}
// Check if this object is already shared.
- switch (object.map().instance_type()) {
+ InstanceType instance_type = object.map().instance_type();
+ if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(instance_type)) {
+ DCHECK(object.InAnySharedSpace());
+ return true;
+ }
+ switch (instance_type) {
case SHARED_STRING_TYPE:
case SHARED_ONE_BYTE_STRING_TYPE:
- case JS_SHARED_ARRAY_TYPE:
- case JS_SHARED_STRUCT_TYPE:
- case JS_ATOMICS_MUTEX_TYPE:
- case JS_ATOMICS_CONDITION_TYPE:
- DCHECK(object.InSharedHeap());
+ case SHARED_EXTERNAL_STRING_TYPE:
+ case SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SHARED_UNCACHED_EXTERNAL_STRING_TYPE:
+ case SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ DCHECK(object.InAnySharedSpace());
return true;
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
if (v8_flags.shared_string_table) {
- DCHECK(object.InSharedHeap());
+ DCHECK(object.InAnySharedSpace());
return true;
}
return false;
case HEAP_NUMBER_TYPE:
- return object.InSharedWritableHeap();
+ return object.InWritableSharedSpace();
default:
return false;
}
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 05a8c6f600..43ea4a7350 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -190,7 +190,8 @@ std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
INSTANCE_TYPE_LIST(WRITE_TYPE)
#undef WRITE_TYPE
}
- UNREACHABLE();
+ return os << "[unknown instance type " << static_cast<int16_t>(instance_type)
+ << "]";
}
std::ostream& operator<<(std::ostream& os, PropertyCellType type) {
@@ -487,14 +488,7 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
} while (currInput->IsJSProxy());
return NoSideEffectsToString(isolate, currInput);
} else if (input->IsBigInt()) {
- MaybeHandle<String> maybe_string =
- BigInt::ToString(isolate, Handle<BigInt>::cast(input), 10, kDontThrow);
- Handle<String> result;
- if (maybe_string.ToHandle(&result)) return result;
- // BigInt-to-String conversion can fail on 32-bit platforms where
- // String::kMaxLength is too small to fit this BigInt.
- return isolate->factory()->NewStringFromStaticChars(
- "<a very large BigInt>");
+ return BigInt::NoSideEffectsToString(isolate, Handle<BigInt>::cast(input));
} else if (input->IsFunction()) {
// -- F u n c t i o n
Handle<String> fun_str;
@@ -657,6 +651,9 @@ bool Object::BooleanValue(IsolateT* isolate) {
DCHECK(IsHeapObject());
if (IsBoolean()) return IsTrue(isolate);
if (IsNullOrUndefined(isolate)) return false;
+#ifdef V8_ENABLE_WEBASSEMBLY
+ if (IsWasmNull()) return false;
+#endif
if (IsUndetectable()) return false; // Undetectable object is false.
if (IsString()) return String::cast(*this).length() != 0;
if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(*this).value());
@@ -1168,9 +1165,7 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
return result;
}
case LookupIterator::WASM_OBJECT:
- THROW_NEW_ERROR(it->isolate(),
- NewTypeError(MessageTemplate::kWasmObjectsAreOpaque),
- Object);
+ return it->isolate()->factory()->undefined_value();
case LookupIterator::INTERCEPTOR: {
bool done;
Handle<Object> result;
@@ -1391,7 +1386,7 @@ MaybeHandle<HeapObject> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
HeapObject);
}
// 9. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(is_extensible, MaybeHandle<HeapObject>());
// 10. If extensibleTarget is true, return handlerProto.
if (is_extensible.FromJust()) return Handle<HeapObject>::cast(handler_proto);
@@ -2031,22 +2026,19 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) {
os << ">";
break;
}
- case CODE_DATA_CONTAINER_TYPE: {
-#ifdef V8_EXTERNAL_CODE_SPACE
- CodeDataContainer code = CodeDataContainer::cast(*this);
- os << "<CodeDataContainer " << CodeKindToString(code.kind());
+ case CODE_TYPE: {
+ Code code = Code::cast(*this);
+ os << "<Code " << CodeKindToString(code.kind());
if (code.is_builtin()) {
os << " " << Builtins::name(code.builtin_id());
}
os << ">";
-#else
- os << "<CodeDataContainer>";
-#endif // V8_EXTERNAL_CODE_SPACE
break;
}
- case CODE_TYPE: {
- Code code = Code::cast(*this);
- os << "<Code " << CodeKindToString(code.kind());
+ case INSTRUCTION_STREAM_TYPE: {
+ InstructionStream istream = InstructionStream::cast(*this);
+ Code code = istream.code(kAcquireLoad);
+ os << "<InstructionStream " << CodeKindToString(code.kind());
if (code.is_builtin()) {
os << " " << Builtins::name(code.builtin_id());
}
@@ -2286,8 +2278,8 @@ int HeapObject::SizeFromMap(Map map) const {
TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(MAKE_TORQUE_SIZE_FOR)
#undef MAKE_TORQUE_SIZE_FOR
- if (instance_type == CODE_TYPE) {
- return Code::unchecked_cast(*this).CodeSize();
+ if (instance_type == INSTRUCTION_STREAM_TYPE) {
+ return InstructionStream::unchecked_cast(*this).CodeSize();
}
if (instance_type == COVERAGE_INFO_TYPE) {
return CoverageInfo::SizeFor(
@@ -2304,6 +2296,9 @@ int HeapObject::SizeFromMap(Map map) const {
if (instance_type == WASM_ARRAY_TYPE) {
return WasmArray::SizeFor(map, WasmArray::unchecked_cast(*this).length());
}
+ if (instance_type == WASM_NULL_TYPE) {
+ return WasmNull::kSize;
+ }
#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
return EmbedderDataArray::SizeFor(
@@ -2316,8 +2311,9 @@ bool HeapObject::NeedsRehashing(PtrComprCageBase cage_base) const {
bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // Use map() only when it's guaranteed that it's not a Code object.
- DCHECK_IMPLIES(instance_type != CODE_TYPE,
+ // Use map() only when it's guaranteed that it's not a InstructionStream
+ // object.
+ DCHECK_IMPLIES(instance_type != INSTRUCTION_STREAM_TYPE,
instance_type == map().instance_type());
} else {
DCHECK_EQ(instance_type, map().instance_type());
@@ -2412,6 +2408,7 @@ void HeapObject::RehashBasedOnMap(IsolateT* isolate) {
SimpleNumberDictionary::cast(*this).Rehash(isolate);
break;
case DESCRIPTOR_ARRAY_TYPE:
+ case STRONG_DESCRIPTOR_ARRAY_TYPE:
DCHECK_LE(1, DescriptorArray::cast(*this).number_of_descriptors());
DescriptorArray::cast(*this).Sort();
break;
@@ -2458,7 +2455,6 @@ void DescriptorArray::GeneralizeAllFields() {
details = details.CopyWithRepresentation(Representation::Tagged());
if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(PropertyKind::kData, details.kind());
- details = details.CopyWithConstness(PropertyConstness::kMutable);
SetValue(i, MaybeObject::FromObject(FieldType::Any()));
}
SetDetails(i, details);
@@ -3104,7 +3100,7 @@ Maybe<bool> JSProxy::CheckHasTrap(Isolate* isolate, Handle<Name> name,
return Nothing<bool>();
}
// 9b ii. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(extensible_target, Nothing<bool>());
// 9b iii. If extensibleTarget is false, throw a TypeError exception.
if (!extensible_target.FromJust()) {
@@ -3223,7 +3219,7 @@ Maybe<bool> JSProxy::CheckDeleteTrap(Isolate* isolate, Handle<Name> name,
return Nothing<bool>();
}
// 13. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(extensible_target, Nothing<bool>());
// 14. If extensibleTarget is false, throw a TypeError exception.
if (!extensible_target.FromJust()) {
@@ -3531,7 +3527,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
JSReceiver::GetOwnPropertyDescriptor(isolate, target, key, &target_desc);
MAYBE_RETURN(target_found, Nothing<bool>());
// 12. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(maybe_extensible, Nothing<bool>());
bool extensible_target = maybe_extensible.FromJust();
// 13. If Desc has a [[Configurable]] field and if Desc.[[Configurable]]
@@ -3623,7 +3619,7 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
PropertyDetails details(PropertyKind::kData, DONT_ENUM,
PropertyConstness::kMutable);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dict(proxy->property_dictionary_swiss(),
isolate);
Handle<SwissNameDictionary> result =
@@ -3704,7 +3700,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
return Nothing<bool>();
}
// 11c. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(extensible_target, Nothing<bool>());
// 11d. (Assert)
// 11e. If extensibleTarget is false, throw a TypeError exception.
@@ -3717,7 +3713,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
return Just(false);
}
// 12. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(extensible_target, Nothing<bool>());
// 13. Let resultDesc be ? ToPropertyDescriptor(trapResultObj).
if (!PropertyDescriptor::ToPropertyDescriptor(isolate, trap_result_obj,
@@ -3785,7 +3781,7 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
if (trap->IsUndefined(isolate)) {
- return JSReceiver::PreventExtensions(target, should_throw);
+ return JSReceiver::PreventExtensions(isolate, target, should_throw);
}
Handle<Object> trap_result;
@@ -3801,7 +3797,7 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
}
// Enforce the invariant.
- Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ Maybe<bool> target_result = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(target_result, Nothing<bool>());
if (target_result.FromJust()) {
isolate->Throw(*factory->NewTypeError(
@@ -3829,7 +3825,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
if (trap->IsUndefined(isolate)) {
- return JSReceiver::IsExtensible(target);
+ return JSReceiver::IsExtensible(isolate, target);
}
Handle<Object> trap_result;
@@ -3840,7 +3836,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
Nothing<bool>());
// Enforce the invariant.
- Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ Maybe<bool> target_result = JSReceiver::IsExtensible(isolate, target);
MAYBE_RETURN(target_result, Nothing<bool>());
if (target_result.FromJust() != trap_result->BooleanValue(isolate)) {
isolate->Throw(
@@ -4045,6 +4041,21 @@ Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
return array;
}
+Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
+ Smi obj1) {
+ int length = array->Length();
+ array = EnsureSpace(isolate, array, length + 1);
+ // Check that GC didn't remove elements from the array.
+ DCHECK_EQ(array->Length(), length);
+ {
+ DisallowGarbageCollection no_gc;
+ ArrayList raw_array = *array;
+ raw_array.Set(length, obj1);
+ raw_array.SetLength(length + 1);
+ }
+ return array;
+}
+
// static
Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
Handle<Object> obj1, Handle<Object> obj2) {
@@ -4427,14 +4438,14 @@ template Handle<DescriptorArray> DescriptorArray::Allocate(
void DescriptorArray::Initialize(EnumCache empty_enum_cache,
HeapObject undefined_value,
- int nof_descriptors, int slack) {
+ int nof_descriptors, int slack,
+ uint32_t raw_gc_state) {
DCHECK_GE(nof_descriptors, 0);
DCHECK_GE(slack, 0);
DCHECK_LE(nof_descriptors + slack, kMaxNumberOfDescriptors);
set_number_of_all_descriptors(nof_descriptors + slack);
set_number_of_descriptors(nof_descriptors);
- set_raw_number_of_marked_descriptors(0);
- set_filler16bits(0);
+ set_raw_gc_state(raw_gc_state, kRelaxedStore);
set_enum_cache(empty_enum_cache, SKIP_WRITE_BARRIER);
MemsetTagged(GetDescriptorSlot(0), undefined_value,
number_of_all_descriptors() * kEntrySize);
@@ -4539,28 +4550,6 @@ void DescriptorArray::CheckNameCollisionDuringInsertion(Descriptor* desc,
}
}
-int16_t DescriptorArray::UpdateNumberOfMarkedDescriptors(
- unsigned mark_compact_epoch, int16_t new_marked) {
- static_assert(kMaxNumberOfDescriptors <=
- NumberOfMarkedDescriptors::kMaxNumberOfMarkedDescriptors);
- int16_t old_raw_marked = raw_number_of_marked_descriptors();
- int16_t old_marked =
- NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
- int16_t new_raw_marked =
- NumberOfMarkedDescriptors::encode(mark_compact_epoch, new_marked);
- while (old_marked < new_marked) {
- int16_t actual_raw_marked = CompareAndSwapRawNumberOfMarkedDescriptors(
- old_raw_marked, new_raw_marked);
- if (actual_raw_marked == old_raw_marked) {
- break;
- }
- old_raw_marked = actual_raw_marked;
- old_marked =
- NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
- }
- return old_marked;
-}
-
Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
Handle<AccessorPair> pair) {
Handle<AccessorPair> copy = isolate->factory()->NewAccessorPair();
@@ -4700,20 +4689,33 @@ void WriteFixedArrayToFlat(FixedArray fixed_array, int length, String separator,
}
uint32_t num_separators = 0;
+ uint32_t repeat_last = 0;
for (int i = 0; i < length; i++) {
Object element = fixed_array.get(i);
- const bool element_is_separator_sequence = element.IsSmi();
-
- // If element is a Smi, it represents the number of separators to write.
- if (V8_UNLIKELY(element_is_separator_sequence)) {
- CHECK(element.ToUint32(&num_separators));
- // Verify that Smis (number of separators) only occur when necessary:
- // 1) at the beginning
- // 2) at the end
- // 3) when the number of separators > 1
- // - It is assumed that consecutive Strings will have one separator,
- // so there is no need for a Smi.
- DCHECK(i == 0 || i == length - 1 || num_separators > 1);
+ const bool element_is_special = element.IsSmi();
+
+ // If element is a positive Smi, it represents the number of separators to
+ // write. If it is a negative Smi, it reprsents the number of times the last
+ // string is repeated.
+ if (V8_UNLIKELY(element_is_special)) {
+ int count;
+ CHECK(element.ToInt32(&count));
+ if (count > 0) {
+ num_separators = count;
+ // Verify that Smis (number of separators) only occur when necessary:
+ // 1) at the beginning
+ // 2) at the end
+ // 3) when the number of separators > 1
+ // - It is assumed that consecutive Strings will have one
+ // separator,
+ // so there is no need for a Smi.
+ DCHECK(i == 0 || i == length - 1 || num_separators > 1);
+ } else {
+ repeat_last = -count;
+ // Repeat is only possible when the previous element is not special.
+ DCHECK_GT(i, 0);
+ DCHECK(fixed_array.get(i - 1).IsString());
+ }
}
// Write separator(s) if necessary.
@@ -4733,11 +4735,41 @@ void WriteFixedArrayToFlat(FixedArray fixed_array, int length, String separator,
sink += separator_length;
}
}
+ num_separators = 0;
}
- if (V8_UNLIKELY(element_is_separator_sequence)) {
- num_separators = 0;
- } else {
+ // Repeat the last written string |repeat_last| times (including
+ // separators).
+ if (V8_UNLIKELY(repeat_last > 0)) {
+ Object last_element = fixed_array.get(i - 1);
+ int string_length = String::cast(last_element).length();
+ // The implemented logic requires that string length is > 0. Empty strings
+ // are handled by repeating the separator (positive smi in the fixed
+ // array) already.
+ DCHECK_GT(string_length, 0);
+ int length_with_sep = string_length + separator_length;
+ // Only copy separators between elements, not at the start or beginning.
+ sinkchar* copy_end =
+ sink + (length_with_sep * repeat_last) - separator_length;
+ int copy_length = length_with_sep;
+ while (sink < copy_end - copy_length) {
+ DCHECK_LE(sink + copy_length, sink_end);
+ memcpy(sink, sink - copy_length, copy_length * sizeof(sinkchar));
+ sink += copy_length;
+ copy_length *= 2;
+ }
+ int remaining = static_cast<int>(copy_end - sink);
+ if (remaining > 0) {
+ DCHECK_LE(sink + remaining, sink_end);
+ memcpy(sink, sink - remaining - separator_length,
+ remaining * sizeof(sinkchar));
+ sink += remaining;
+ }
+ repeat_last = 0;
+ num_separators = 1;
+ }
+
+ if (V8_LIKELY(!element_is_special)) {
DCHECK(element.IsString());
String string = String::cast(element);
const int string_length = string.length();
@@ -5120,11 +5152,6 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Handle<Script> script, IsolateT* isolate,
FunctionLiteral* function_literal) {
int function_literal_id = function_literal->function_literal_id();
- if (V8_UNLIKELY(script->type() == Script::TYPE_WEB_SNAPSHOT &&
- function_literal_id >=
- script->shared_function_info_count())) {
- return FindWebSnapshotSharedFunctionInfo(script, isolate, function_literal);
- }
CHECK_NE(function_literal_id, kFunctionLiteralIdInvalid);
// If this check fails, the problem is most probably the function id
@@ -5146,77 +5173,6 @@ template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Handle<Script> script, LocalIsolate* isolate,
FunctionLiteral* function_literal);
-MaybeHandle<SharedFunctionInfo> Script::FindWebSnapshotSharedFunctionInfo(
- Handle<Script> script, Isolate* isolate,
- FunctionLiteral* function_literal) {
- // We might be able to de-dupe the SFI against a SFI that was
- // created when deserializing the snapshot (or when calling a function which
- // was included in the snapshot). In that case, we can find it based on the
- // start position in shared_function_info_table.
- Handle<ObjectHashTable> shared_function_info_table = handle(
- ObjectHashTable::cast(script->shared_function_info_table()), isolate);
- {
- DisallowHeapAllocation no_gc;
- Object index_object = shared_function_info_table->Lookup(
- handle(Smi::FromInt(function_literal->start_position()), isolate));
- if (!index_object.IsTheHole()) {
- int index = Smi::cast(index_object).value();
- DCHECK_LT(index, script->shared_function_info_count());
- MaybeObject maybe_shared = script->shared_function_infos().Get(index);
- HeapObject heap_object;
- if (!maybe_shared->GetHeapObject(&heap_object)) {
- // We found the correct location but it's not filled in (e.g., the weak
- // pointer to the SharedFunctionInfo has been cleared). Record the
- // location in the FunctionLiteral, so that it will be refilled later.
- // SharedFunctionInfo::SetScript will write the SharedFunctionInfo in
- // the shared_function_infos.
- function_literal->set_function_literal_id(index);
- return MaybeHandle<SharedFunctionInfo>();
- }
- SharedFunctionInfo shared = SharedFunctionInfo::cast(heap_object);
- DCHECK_EQ(shared.StartPosition(), function_literal->start_position());
- DCHECK_EQ(shared.EndPosition(), function_literal->end_position());
- return handle(shared, isolate);
- }
- }
-
- // It's possible that FunctionLiterals which were processed before this one
- // were deduplicated against existing ones. Decrease function_literal_id to
- // avoid holes in shared_function_infos.
- int old_length = script->shared_function_info_count();
- int function_literal_id = old_length;
- function_literal->set_function_literal_id(function_literal_id);
-
- // Also add to shared_function_info_table.
- shared_function_info_table = ObjectHashTable::Put(
- shared_function_info_table,
- handle(Smi::FromInt(function_literal->start_position()), isolate),
- handle(Smi::FromInt(function_literal_id), isolate));
- script->set_shared_function_info_table(*shared_function_info_table);
-
- // Grow shared_function_infos if needed (we don't know the correct amount of
- // space needed upfront).
- int new_length = old_length + 1;
- Handle<WeakFixedArray> old_infos =
- handle(script->shared_function_infos(), isolate);
- if (new_length > old_infos->length()) {
- int capacity = WeakArrayList::CapacityForLength(new_length);
- Handle<WeakFixedArray> new_infos(
- isolate->factory()->NewWeakFixedArray(capacity, AllocationType::kOld));
- new_infos->CopyElements(isolate, 0, *old_infos, 0, old_length,
- WriteBarrierMode::UPDATE_WRITE_BARRIER);
- script->set_shared_function_infos(*new_infos);
- }
- return MaybeHandle<SharedFunctionInfo>();
-}
-
-MaybeHandle<SharedFunctionInfo> Script::FindWebSnapshotSharedFunctionInfo(
- Handle<Script> script, LocalIsolate* isolate,
- FunctionLiteral* function_literal) {
- // Off-thread serialization of web snapshots is not implemented.
- UNREACHABLE();
-}
-
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
@@ -5288,7 +5244,7 @@ Maybe<bool> JSProxy::SetPrototype(Isolate* isolate, Handle<JSProxy> proxy,
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
}
// 10. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(isolate, target);
if (is_extensible.IsNothing()) return Nothing<bool>();
// 11. If extensibleTarget is true, return true.
if (is_extensible.FromJust()) {
@@ -5874,7 +5830,7 @@ template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::Rehash(PtrComprCageBase cage_base) {
DisallowGarbageCollection no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
- ReadOnlyRoots roots = GetReadOnlyRoots(cage_base);
+ ReadOnlyRoots roots = EarlyGetReadOnlyRoots();
uint32_t capacity = Capacity();
bool done = false;
for (int probe = 1; !done; probe++) {
@@ -6080,19 +6036,6 @@ Handle<RegisteredSymbolTable> RegisteredSymbolTable::Add(
return table;
}
-Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
- Handle<ObjectHashSet> set,
- Handle<Object> key) {
- int32_t hash = key->GetOrCreateHash(isolate).value();
- if (!set->Has(isolate, key, hash)) {
- set = EnsureCapacity(isolate, set);
- InternalIndex entry = set->FindInsertionEntry(isolate, hash);
- set->set(EntryToIndex(entry), *key);
- set->ElementAdded();
- }
- return set;
-}
-
template <typename Derived, typename Shape>
template <typename IsolateT>
Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
@@ -6106,6 +6049,17 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
return dict;
}
+template <typename IsolateT>
+Handle<NameDictionary> NameDictionary::New(IsolateT* isolate,
+ int at_least_space_for,
+ AllocationType allocation,
+ MinimumCapacity capacity_option) {
+ auto dict = BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
+ isolate, at_least_space_for, allocation, capacity_option);
+ dict->set_flags(kFlagsDefault);
+ return dict;
+}
+
template <typename Derived, typename Shape>
int BaseNameDictionary<Derived, Shape>::NextEnumerationIndex(
Isolate* isolate, Handle<Derived> dictionary) {
@@ -6168,6 +6122,23 @@ Handle<Derived> Dictionary<Derived, Shape>::AtPut(Isolate* isolate,
}
template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::UncheckedAtPut(Isolate* isolate,
+ Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details) {
+ InternalIndex entry = dictionary->FindEntry(isolate, key);
+
+ // If the entry is present set the value;
+ if (entry.is_not_found()) {
+ Derived::UncheckedAdd(isolate, dictionary, key, value, details);
+ } else {
+ // We don't need to copy over the enumeration index.
+ dictionary->ValueAtPut(entry, *value);
+ if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
+ }
+}
+
+template <typename Derived, typename Shape>
template <typename IsolateT>
Handle<Derived>
BaseNameDictionary<Derived, Shape>::AddNoUpdateNextEnumerationIndex(
@@ -6223,6 +6194,27 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(IsolateT* isolate,
}
template <typename Derived, typename Shape>
+template <typename IsolateT>
+void Dictionary<Derived, Shape>::UncheckedAdd(IsolateT* isolate,
+ Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details) {
+ ReadOnlyRoots roots(isolate);
+ uint32_t hash = Shape::Hash(roots, key);
+ // Validate that the key is absent and we capacity is sufficient.
+ SLOW_DCHECK(dictionary->FindEntry(isolate, key).is_not_found());
+ DCHECK(dictionary->HasSufficientCapacityToAdd(1));
+
+ // Compute the key object.
+ Handle<Object> k = Shape::AsHandle(isolate, key);
+
+ InternalIndex entry = dictionary->FindInsertionEntry(isolate, roots, hash);
+ dictionary->SetEntry(entry, *k, *value, details);
+ DCHECK(dictionary->KeyAt(isolate, entry).IsNumber() ||
+ Shape::Unwrap(dictionary->KeyAt(isolate, entry)).IsUniqueName());
+}
+
+template <typename Derived, typename Shape>
Handle<Derived> Dictionary<Derived, Shape>::ShallowCopy(
Isolate* isolate, Handle<Derived> dictionary) {
return Handle<Derived>::cast(isolate->factory()->CopyFixedArrayWithMap(
@@ -6272,6 +6264,13 @@ Handle<NumberDictionary> NumberDictionary::Set(
return new_dictionary;
}
+// static
+void NumberDictionary::UncheckedSet(Isolate* isolate,
+ Handle<NumberDictionary> dictionary,
+ uint32_t key, Handle<Object> value) {
+ UncheckedAtPut(isolate, dictionary, key, value, PropertyDetails::Empty());
+}
+
void NumberDictionary::CopyValuesTo(FixedArray elements) {
ReadOnlyRoots roots = GetReadOnlyRoots();
int pos = 0;
@@ -6442,6 +6441,32 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Handle<Derived> table,
hash);
}
+namespace {
+
+template <typename T>
+void RehashObjectHashTableAndGCIfNeeded(Isolate* isolate, Handle<T> table) {
+ // Rehash if more than 33% of the entries are deleted entries.
+ // TODO(verwaest): Consider to shrink the fixed array in place.
+ if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
+ table->Rehash(isolate);
+ }
+ // If we're out of luck, we didn't get a GC recently, and so rehashing
+ // isn't enough to avoid a crash.
+ if (!table->HasSufficientCapacityToAdd(1)) {
+ int nof = table->NumberOfElements() + 1;
+ int capacity = T::ComputeCapacity(nof * 2);
+ if (capacity > T::kMaxCapacity) {
+ for (size_t i = 0; i < 2; ++i) {
+ isolate->heap()->CollectAllGarbage(
+ Heap::kNoGCFlags, GarbageCollectionReason::kFullHashtable);
+ }
+ table->Rehash(isolate);
+ }
+ }
+}
+
+} // namespace
+
template <typename Derived, typename Shape>
Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
Handle<Derived> table,
@@ -6460,24 +6485,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
return table;
}
- // Rehash if more than 33% of the entries are deleted entries.
- // TODO(verwaest): Consider to shrink the fixed array in place.
- if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
- table->Rehash(isolate);
- }
- // If we're out of luck, we didn't get a GC recently, and so rehashing
- // isn't enough to avoid a crash.
- if (!table->HasSufficientCapacityToAdd(1)) {
- int nof = table->NumberOfElements() + 1;
- int capacity = ObjectHashTable::ComputeCapacity(nof * 2);
- if (capacity > ObjectHashTable::kMaxCapacity) {
- for (size_t i = 0; i < 2; ++i) {
- isolate->heap()->CollectAllGarbage(
- Heap::kNoGCFlags, GarbageCollectionReason::kFullHashtable);
- }
- table->Rehash(isolate);
- }
- }
+ RehashObjectHashTableAndGCIfNeeded(isolate, table);
// Check whether the hash table should be extended.
table = Derived::EnsureCapacity(isolate, table);
@@ -6534,6 +6542,90 @@ void ObjectHashTableBase<Derived, Shape>::RemoveEntry(InternalIndex entry) {
this->ElementRemoved();
}
+template <typename Derived, int N>
+std::array<Object, N> ObjectMultiHashTableBase<Derived, N>::Lookup(
+ Handle<Object> key) {
+ return Lookup(GetPtrComprCageBase(*this), key);
+}
+
+template <typename Derived, int N>
+std::array<Object, N> ObjectMultiHashTableBase<Derived, N>::Lookup(
+ PtrComprCageBase cage_base, Handle<Object> key) {
+ DisallowGarbageCollection no_gc;
+
+ ReadOnlyRoots roots = this->GetReadOnlyRoots(cage_base);
+ DCHECK(this->IsKey(roots, *key));
+
+ Object hash_obj = key->GetHash();
+ if (hash_obj.IsUndefined(roots)) {
+ return {roots.the_hole_value(), roots.the_hole_value()};
+ }
+ int32_t hash = Smi::ToInt(hash_obj);
+
+ InternalIndex entry = this->FindEntry(cage_base, roots, key, hash);
+ if (entry.is_not_found()) {
+ return {roots.the_hole_value(), roots.the_hole_value()};
+ }
+
+ int start_index = this->EntryToIndex(entry) +
+ ObjectMultiHashTableShape<N>::kEntryValueIndex;
+ std::array<Object, N> values;
+ for (int i = 0; i < N; i++) {
+ values[i] = this->get(start_index + i);
+ DCHECK(!values[i].IsTheHole());
+ }
+ return values;
+}
+
+// static
+template <typename Derived, int N>
+Handle<Derived> ObjectMultiHashTableBase<Derived, N>::Put(
+ Isolate* isolate, Handle<Derived> table, Handle<Object> key,
+ const std::array<Handle<Object>, N>& values) {
+ ReadOnlyRoots roots(isolate);
+ DCHECK(table->IsKey(roots, *key));
+
+ int32_t hash = key->GetOrCreateHash(isolate).value();
+ InternalIndex entry = table->FindEntry(isolate, roots, key, hash);
+
+ // Overwrite values if entry is found.
+ if (entry.is_found()) {
+ table->SetEntryValues(entry, values);
+ return table;
+ }
+
+ RehashObjectHashTableAndGCIfNeeded(isolate, table);
+
+ // Check whether the hash table should be extended.
+ table = Derived::EnsureCapacity(isolate, table);
+ entry = table->FindInsertionEntry(isolate, hash);
+ table->set(Derived::EntryToIndex(entry), *key);
+ table->SetEntryValues(entry, values);
+ return table;
+}
+
+template <typename Derived, int N>
+void ObjectMultiHashTableBase<Derived, N>::SetEntryValues(
+ InternalIndex entry, const std::array<Handle<Object>, N>& values) {
+ int start_index = EntryToValueIndexStart(entry);
+ for (int i = 0; i < N; i++) {
+ this->set(start_index + i, *values[i]);
+ }
+}
+
+Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
+ Handle<ObjectHashSet> set,
+ Handle<Object> key) {
+ int32_t hash = key->GetOrCreateHash(isolate).value();
+ if (!set->Has(isolate, key, hash)) {
+ set = EnsureCapacity(isolate, set);
+ InternalIndex entry = set->FindInsertionEntry(isolate, hash);
+ set->set(EntryToIndex(entry), *key);
+ set->ElementAdded();
+ }
+ return set;
+}
+
void JSSet::Initialize(Handle<JSSet> set, Isolate* isolate) {
Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
set->set_table(*table);
@@ -6942,6 +7034,58 @@ Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
return Smi::FromInt(tie).ptr();
}
+void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
+ Isolate* isolate, Address raw_finalization_registry,
+ Address raw_weak_cell) {
+ DisallowGarbageCollection no_gc;
+ JSFinalizationRegistry finalization_registry =
+ JSFinalizationRegistry::cast(Object(raw_finalization_registry));
+ WeakCell weak_cell = WeakCell::cast(Object(raw_weak_cell));
+ DCHECK(!weak_cell.unregister_token().IsUndefined(isolate));
+ HeapObject undefined = ReadOnlyRoots(isolate).undefined_value();
+
+ // Remove weak_cell from the linked list of other WeakCells with the same
+ // unregister token and remove its unregister token from key_map if necessary
+ // without shrinking it. Since shrinking may allocate, it is performed by the
+ // caller after looping, or on exception.
+ if (weak_cell.key_list_prev().IsUndefined(isolate)) {
+ SimpleNumberDictionary key_map =
+ SimpleNumberDictionary::cast(finalization_registry.key_map());
+ HeapObject unregister_token = weak_cell.unregister_token();
+ uint32_t key = Smi::ToInt(unregister_token.GetHash());
+ InternalIndex entry = key_map.FindEntry(isolate, key);
+ DCHECK(entry.is_found());
+
+ if (weak_cell.key_list_next().IsUndefined(isolate)) {
+ // weak_cell is the only one associated with its key; remove the key
+ // from the hash table.
+ key_map.ClearEntry(entry);
+ key_map.ElementRemoved();
+ } else {
+ // weak_cell is the list head for its key; we need to change the value
+ // of the key in the hash table.
+ WeakCell next = WeakCell::cast(weak_cell.key_list_next());
+ DCHECK_EQ(next.key_list_prev(), weak_cell);
+ next.set_key_list_prev(undefined);
+ key_map.ValueAtPut(entry, next);
+ }
+ } else {
+ // weak_cell is somewhere in the middle of its key list.
+ WeakCell prev = WeakCell::cast(weak_cell.key_list_prev());
+ prev.set_key_list_next(weak_cell.key_list_next());
+ if (!weak_cell.key_list_next().IsUndefined()) {
+ WeakCell next = WeakCell::cast(weak_cell.key_list_next());
+ next.set_key_list_prev(weak_cell.key_list_prev());
+ }
+ }
+
+ // weak_cell is now removed from the unregister token map, so clear its
+ // unregister token-related fields.
+ weak_cell.set_unregister_token(undefined);
+ weak_cell.set_key_list_prev(undefined);
+ weak_cell.set_key_list_next(undefined);
+}
+
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
// Keep this at the end of this file
@@ -6969,6 +7113,11 @@ Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
ObjectHashTableBase<DERIVED, SHAPE>;
+#define EXTERN_DEFINE_MULTI_OBJECT_BASE_HASH_TABLE(DERIVED, N) \
+ EXTERN_DEFINE_HASH_TABLE(DERIVED, ObjectMultiHashTableShape<N>) \
+ template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
+ ObjectMultiHashTableBase<DERIVED, N>;
+
#define EXTERN_DEFINE_DICTIONARY(DERIVED, SHAPE) \
EXTERN_DEFINE_HASH_TABLE(DERIVED, SHAPE) \
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
@@ -7011,10 +7160,17 @@ EXTERN_DEFINE_HASH_TABLE(RegisteredSymbolTable, RegisteredSymbolTableShape)
EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(ObjectHashTable, ObjectHashTableShape)
EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(EphemeronHashTable, ObjectHashTableShape)
+EXTERN_DEFINE_MULTI_OBJECT_BASE_HASH_TABLE(ObjectTwoHashTable, 2)
+
EXTERN_DEFINE_DICTIONARY(SimpleNumberDictionary, SimpleNumberDictionaryShape)
EXTERN_DEFINE_DICTIONARY(NumberDictionary, NumberDictionaryShape)
EXTERN_DEFINE_BASE_NAME_DICTIONARY(NameDictionary, NameDictionaryShape)
+template V8_EXPORT_PRIVATE Handle<NameDictionary> NameDictionary::New(
+ Isolate*, int, AllocationType, MinimumCapacity);
+template V8_EXPORT_PRIVATE Handle<NameDictionary> NameDictionary::New(
+ LocalIsolate*, int, AllocationType, MinimumCapacity);
+
EXTERN_DEFINE_BASE_NAME_DICTIONARY(GlobalDictionary, GlobalDictionaryShape)
#undef EXTERN_DEFINE_HASH_TABLE
@@ -7022,57 +7178,5 @@ EXTERN_DEFINE_BASE_NAME_DICTIONARY(GlobalDictionary, GlobalDictionaryShape)
#undef EXTERN_DEFINE_DICTIONARY
#undef EXTERN_DEFINE_BASE_NAME_DICTIONARY
-void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
- Isolate* isolate, Address raw_finalization_registry,
- Address raw_weak_cell) {
- DisallowGarbageCollection no_gc;
- JSFinalizationRegistry finalization_registry =
- JSFinalizationRegistry::cast(Object(raw_finalization_registry));
- WeakCell weak_cell = WeakCell::cast(Object(raw_weak_cell));
- DCHECK(!weak_cell.unregister_token().IsUndefined(isolate));
- HeapObject undefined = ReadOnlyRoots(isolate).undefined_value();
-
- // Remove weak_cell from the linked list of other WeakCells with the same
- // unregister token and remove its unregister token from key_map if necessary
- // without shrinking it. Since shrinking may allocate, it is performed by the
- // caller after looping, or on exception.
- if (weak_cell.key_list_prev().IsUndefined(isolate)) {
- SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_registry.key_map());
- HeapObject unregister_token = weak_cell.unregister_token();
- uint32_t key = Smi::ToInt(unregister_token.GetHash());
- InternalIndex entry = key_map.FindEntry(isolate, key);
- DCHECK(entry.is_found());
-
- if (weak_cell.key_list_next().IsUndefined(isolate)) {
- // weak_cell is the only one associated with its key; remove the key
- // from the hash table.
- key_map.ClearEntry(entry);
- key_map.ElementRemoved();
- } else {
- // weak_cell is the list head for its key; we need to change the value
- // of the key in the hash table.
- WeakCell next = WeakCell::cast(weak_cell.key_list_next());
- DCHECK_EQ(next.key_list_prev(), weak_cell);
- next.set_key_list_prev(undefined);
- key_map.ValueAtPut(entry, next);
- }
- } else {
- // weak_cell is somewhere in the middle of its key list.
- WeakCell prev = WeakCell::cast(weak_cell.key_list_prev());
- prev.set_key_list_next(weak_cell.key_list_next());
- if (!weak_cell.key_list_next().IsUndefined()) {
- WeakCell next = WeakCell::cast(weak_cell.key_list_next());
- next.set_key_list_prev(weak_cell.key_list_prev());
- }
- }
-
- // weak_cell is now removed from the unregister token map, so clear its
- // unregister token-related fields.
- weak_cell.set_unregister_token(undefined);
- weak_cell.set_key_list_prev(undefined);
- weak_cell.set_key_list_next(undefined);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 2b75ccf321..eb0b3d61dd 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -157,8 +157,9 @@
// - DescriptorArray
// - PropertyCell
// - PropertyArray
-// - Code
+// - InstructionStream
// - AbstractCode, a wrapper around Code or BytecodeArray
+// - GcSafeCode, a wrapper around Code
// - Map
// - Foreign
// - SmallOrderedHashTable
@@ -262,10 +263,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// Result of an abstract relational comparison of x and y, implemented according
// to ES6 section 7.2.11 Abstract Relational Comparison.
enum class ComparisonResult {
- kLessThan, // x < y
- kEqual, // x = y
- kGreaterThan, // x > y
- kUndefined // at least one of x or y was undefined or NaN
+ kLessThan = -1, // x < y
+ kEqual = 0, // x = y
+ kGreaterThan = 1, // x > y
+ kUndefined = 2 // at least one of x or y was undefined or NaN
};
// (Returns false whenever {result} is kUndefined.)
@@ -307,7 +308,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// writable shared heap.
V8_INLINE bool InSharedHeap() const;
- V8_INLINE bool InSharedWritableHeap() const;
+ V8_INLINE bool InWritableSharedSpace() const;
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
@@ -316,19 +317,18 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
- IS_TYPE_FUNCTION_DECL(CodeT)
#undef IS_TYPE_FUNCTION_DECL
V8_INLINE bool IsNumber(ReadOnlyRoots roots) const;
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+#define IS_TYPE_FUNCTION_DECL(Type, Value, _) \
V8_INLINE bool Is##Type(Isolate* isolate) const; \
V8_INLINE bool Is##Type(LocalIsolate* isolate) const; \
V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
- IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
+ IS_TYPE_FUNCTION_DECL(NullOrUndefined, , /* unused */)
#undef IS_TYPE_FUNCTION_DECL
V8_INLINE bool IsZero() const;
@@ -341,6 +341,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
bool IsWasmObject(Isolate* = nullptr) const { return false; }
#endif
+ V8_INLINE bool IsJSObjectThatCanBeTrackedAsPrototype() const;
+
enum class Conversion { kToNumber, kToNumeric };
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
@@ -645,15 +647,19 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
EXPORT_DECL_VERIFIER(Object)
#ifdef VERIFY_HEAP
- // Verify a pointer is a valid (non-Code) object pointer.
- // When V8_EXTERNAL_CODE_SPACE is enabled Code objects are not allowed.
+ // Verify a pointer is a valid (non-InstructionStream) object pointer.
+ // When V8_EXTERNAL_CODE_SPACE is enabled InstructionStream objects are not
+ // allowed.
static void VerifyPointer(Isolate* isolate, Object p);
// Verify a pointer is a valid object pointer.
- // Code objects are allowed regardless of the V8_EXTERNAL_CODE_SPACE mode.
+ // InstructionStream objects are allowed regardless of the
+ // V8_EXTERNAL_CODE_SPACE mode.
static void VerifyAnyTagged(Isolate* isolate, Object p);
#endif
- inline void VerifyApiCallResultType();
+#ifdef DEBUG
+ inline bool IsApiCallResultType() const;
+#endif // DEBUG
// Prints this object without details.
V8_EXPORT_PRIVATE void ShortPrint(FILE* out = stdout) const;
@@ -687,8 +693,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
};
- // For use with std::unordered_set/unordered_map when using both Code and
- // non-Code objects as keys.
+ // For use with std::unordered_set/unordered_map when using both
+ // InstructionStream and non-InstructionStream objects as keys.
struct KeyEqualSafe {
bool operator()(const Object a, const Object b) const {
return a.SafeEquals(b);
@@ -869,6 +875,16 @@ V8_INLINE static bool HasWeakHeapObjectTag(const Object value) {
// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
// encoded in the first word. The class MapWord is an abstraction of the
// value in a heap object's first word.
+//
+// When external code space is enabled forwarding pointers are encoded as
+// Smi values representing a diff from the source or map word host object
+// address in kObjectAlignment chunks. Such a representation has the following
+// properties:
+// a) it can hold both positive an negative diffs for full pointer compression
+// cage size (HeapObject address has only valuable 30 bits while Smis have
+// 31 bits),
+// b) it's independent of the pointer compression base and pointer compression
+// scheme.
class MapWord {
public:
// Normal state: the map word contains a map pointer.
@@ -888,18 +904,23 @@ class MapWord {
inline bool IsForwardingAddress() const;
// Create a map word from a forwarding address.
- static inline MapWord FromForwardingAddress(HeapObject object);
+ static inline MapWord FromForwardingAddress(HeapObject map_word_host,
+ HeapObject object);
+
+ // View this map word as a forwarding address.
+ inline HeapObject ToForwardingAddress(HeapObject map_word_host);
- // View this map word as a forwarding address. The parameterless version
- // is allowed to be used for objects allocated in the main pointer compression
- // cage, while the second variant uses the value of the cage base explicitly
- // and thus can be used in situations where one has to deal with both cases.
- // Note, that the parameterless version is preferred because it avoids
- // unnecessary recompressions.
- inline HeapObject ToForwardingAddress();
- inline HeapObject ToForwardingAddress(PtrComprCageBase host_cage_base);
+ constexpr inline Address ptr() const { return value_; }
- inline Address ptr() { return value_; }
+ // When pointer compression is enabled, MapWord is uniquely identified by
+ // the lower 32 bits. On the other hand full-value comparison is not correct
+ // because map word in a forwarding state might have corrupted upper part.
+ constexpr bool operator==(MapWord other) const {
+ return static_cast<Tagged_t>(ptr()) == static_cast<Tagged_t>(other.ptr());
+ }
+ constexpr bool operator!=(MapWord other) const {
+ return static_cast<Tagged_t>(ptr()) != static_cast<Tagged_t>(other.ptr());
+ }
#ifdef V8_MAP_PACKING
static constexpr Address Pack(Address map) {
@@ -924,7 +945,7 @@ class MapWord {
template <typename TFieldType, int kFieldOffset, typename CompressionScheme>
friend class TaggedField;
- explicit MapWord(Address value) : value_(value) {}
+ explicit constexpr MapWord(Address value) : value_(value) {}
Address value_;
};
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index ee720e5608..e21f21d2f3 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -16,9 +16,8 @@ namespace v8 {
namespace internal {
template <class Derived, int entrysize>
-template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
- IsolateT* isolate, int capacity, AllocationType allocation) {
+ Isolate* isolate, int capacity, AllocationType allocation) {
// Capacity must be a power of two, since we depend on being able
// to divide and multiple by 2 (kLoadFactor) to derive capacity
// from number of buckets. If we decide to change kLoadFactor
@@ -27,7 +26,8 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
capacity =
base::bits::RoundUpToPowerOfTwo32(std::max({kInitialCapacity, capacity}));
if (capacity > MaxCapacity()) {
- return MaybeHandle<Derived>();
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kTooManyProperties), {});
}
int num_buckets = capacity / kLoadFactor;
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
@@ -52,7 +52,7 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::AllocateEmpty(
// This is only supposed to be used to create the canonical empty versions
// of each ordered structure, and should not be used afterwards.
// Requires that the map has already been set up in the roots table.
- DCHECK(ReadOnlyRoots(isolate).at(root_index) == kNullAddress);
+ DCHECK(!ReadOnlyRoots(isolate).is_initialized(root_index));
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
Derived::GetMap(ReadOnlyRoots(isolate)), HashTableStartIndex(),
@@ -67,9 +67,8 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::AllocateEmpty(
}
template <class Derived, int entrysize>
-template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
- IsolateT* isolate, Handle<Derived> table) {
+ Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
int nof = table->NumberOfElements();
@@ -192,6 +191,7 @@ MaybeHandle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
MaybeHandle<OrderedHashSet> table_candidate =
OrderedHashSet::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
+ CHECK(isolate->has_pending_exception());
return table_candidate;
}
DisallowGarbageCollection no_gc;
@@ -250,17 +250,15 @@ HeapObject OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
}
template <class Derived, int entrysize>
-template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
- IsolateT* isolate, Handle<Derived> table) {
+ Isolate* isolate, Handle<Derived> table) {
return OrderedHashTable<Derived, entrysize>::Rehash(isolate, table,
table->Capacity());
}
template <class Derived, int entrysize>
-template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
- IsolateT* isolate, Handle<Derived> table, int new_capacity) {
+ Isolate* isolate, Handle<Derived> table, int new_capacity) {
DCHECK(!table->IsObsolete());
MaybeHandle<Derived> new_table_candidate =
@@ -332,9 +330,8 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
return Base::Rehash(isolate, table, new_capacity);
}
-template <typename IsolateT>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
- IsolateT* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
+ Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
MaybeHandle<OrderedNameDictionary> new_table_candidate =
Base::Rehash(isolate, table, new_capacity);
Handle<OrderedNameDictionary> new_table;
@@ -457,9 +454,8 @@ InternalIndex OrderedNameDictionary::FindEntry(IsolateT* isolate, Object key) {
return InternalIndex::NotFound();
}
-template <typename IsolateT>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
- IsolateT* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
+ Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details) {
DCHECK(key->IsUniqueName());
DCHECK(table->FindEntry(isolate, *key).is_not_found());
@@ -538,9 +534,8 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Allocate(
return Base::Allocate(isolate, capacity, allocation);
}
-template <typename IsolateT>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
- IsolateT* isolate, int capacity, AllocationType allocation) {
+ Isolate* isolate, int capacity, AllocationType allocation) {
MaybeHandle<OrderedNameDictionary> table_candidate =
Base::Allocate(isolate, capacity, allocation);
Handle<OrderedNameDictionary> table;
@@ -631,37 +626,12 @@ template MaybeHandle<OrderedNameDictionary>
OrderedHashTable<OrderedNameDictionary, 3>::EnsureGrowable(
Isolate* isolate, Handle<OrderedNameDictionary> table);
-template V8_EXPORT_PRIVATE MaybeHandle<OrderedNameDictionary>
-OrderedNameDictionary::Allocate(Isolate* isolate, int capacity,
- AllocationType allocation);
-
-template V8_EXPORT_PRIVATE MaybeHandle<OrderedNameDictionary>
-OrderedNameDictionary::Allocate(LocalIsolate* isolate, int capacity,
- AllocationType allocation);
-
-template V8_EXPORT_PRIVATE MaybeHandle<OrderedNameDictionary>
-OrderedNameDictionary::Rehash(Isolate* isolate,
- Handle<OrderedNameDictionary> table,
- int new_capacity);
-
template V8_EXPORT_PRIVATE InternalIndex
OrderedNameDictionary::FindEntry(Isolate* isolate, Object key);
template V8_EXPORT_PRIVATE InternalIndex
OrderedNameDictionary::FindEntry(LocalIsolate* isolate, Object key);
-template V8_EXPORT_PRIVATE MaybeHandle<OrderedNameDictionary>
-OrderedNameDictionary::Add(Isolate* isolate,
- Handle<OrderedNameDictionary> table,
- Handle<Name> key, Handle<Object> value,
- PropertyDetails details);
-
-template V8_EXPORT_PRIVATE MaybeHandle<OrderedNameDictionary>
-OrderedNameDictionary::Add(LocalIsolate* isolate,
- Handle<OrderedNameDictionary> table,
- Handle<Name> key, Handle<Object> value,
- PropertyDetails details);
-
template <>
Handle<SmallOrderedHashSet>
SmallOrderedHashTable<SmallOrderedHashSet>::Allocate(
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 60a6343bb0..41cb6cf6e2 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -68,8 +68,7 @@ class OrderedHashTable : public FixedArray {
public:
// Returns an OrderedHashTable (possibly |table|) with enough space
// to add at least one new element.
- template <typename IsolateT>
- static MaybeHandle<Derived> EnsureGrowable(IsolateT* isolate,
+ static MaybeHandle<Derived> EnsureGrowable(Isolate* isolate,
Handle<Derived> table);
// Returns an OrderedHashTable (possibly |table|) that's shrunken
@@ -201,19 +200,16 @@ class OrderedHashTable : public FixedArray {
protected:
// Returns an OrderedHashTable with a capacity of at least |capacity|.
- template <typename IsolateT>
static MaybeHandle<Derived> Allocate(
- IsolateT* isolate, int capacity,
+ Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static MaybeHandle<Derived> AllocateEmpty(Isolate* isolate,
AllocationType allocation,
RootIndex root_ndex);
- template <typename IsolateT>
- static MaybeHandle<Derived> Rehash(IsolateT* isolate, Handle<Derived> table);
- template <typename IsolateT>
- static MaybeHandle<Derived> Rehash(IsolateT* isolate, Handle<Derived> table,
+ static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table);
+ static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
int new_capacity);
int HashToEntryRaw(int hash) {
@@ -764,9 +760,8 @@ class V8_EXPORT_PRIVATE OrderedNameDictionary
DECL_CAST(OrderedNameDictionary)
DECL_PRINTER(OrderedNameDictionary)
- template <typename IsolateT>
static MaybeHandle<OrderedNameDictionary> Add(
- IsolateT* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
+ Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details);
void SetEntry(InternalIndex entry, Object key, Object value,
@@ -789,17 +784,15 @@ class V8_EXPORT_PRIVATE OrderedNameDictionary
Isolate* isolate, Handle<OrderedNameDictionary> table,
InternalIndex entry);
- template <typename IsolateT>
static MaybeHandle<OrderedNameDictionary> Allocate(
- IsolateT* isolate, int capacity,
+ Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static MaybeHandle<OrderedNameDictionary> AllocateEmpty(
Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
- template <typename IsolateT>
static MaybeHandle<OrderedNameDictionary> Rehash(
- IsolateT* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
+ Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
// Returns the value for entry.
inline Object ValueAt(InternalIndex entry);
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index 8295637d9e..241cd6d0a3 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -75,10 +75,7 @@ void PropertyArray::set(int index, Object value, SeqCstAccessTag tag) {
DCHECK(value.IsShared());
int offset = OffsetOfElementAt(index);
SEQ_CST_WRITE_FIELD(*this, offset, value);
- // JSSharedStructs are allocated in the shared old space, which is currently
- // collected by stopping the world, so the incremental write barrier is not
- // needed. They can only store Smis and other HeapObjects in the shared old
- // space, so the generational write barrier is also not needed.
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, UPDATE_WRITE_BARRIER);
}
Object PropertyArray::Swap(int index, Object value, SeqCstAccessTag tag) {
@@ -92,12 +89,11 @@ Object PropertyArray::Swap(PtrComprCageBase cage_base, int index, Object value,
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length(kAcquireLoad)));
DCHECK(value.IsShared());
- return TaggedField<Object>::SeqCst_Swap(cage_base, *this,
- OffsetOfElementAt(index), value);
- // JSSharedStructs are allocated in the shared old space, which is currently
- // collected by stopping the world, so the incremental write barrier is not
- // needed. They can only store Smis and other HeapObjects in the shared old
- // space, so the generational write barrier is also not needed.
+ Object result = TaggedField<Object>::SeqCst_Swap(
+ cage_base, *this, OffsetOfElementAt(index), value);
+ CONDITIONAL_WRITE_BARRIER(*this, OffsetOfElementAt(index), value,
+ UPDATE_WRITE_BARRIER);
+ return result;
}
ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
diff --git a/deps/v8/src/objects/property-descriptor-object.tq b/deps/v8/src/objects/property-descriptor-object.tq
index 256903e815..1731a8bff8 100644
--- a/deps/v8/src/objects/property-descriptor-object.tq
+++ b/deps/v8/src/objects/property-descriptor-object.tq
@@ -17,8 +17,150 @@ bitfield struct PropertyDescriptorObjectFlags extends uint31 {
}
extern class PropertyDescriptorObject extends Struct {
+ macro IsDataDescriptor(): bool {
+ return this.flags.has_value || this.flags.has_writable;
+ }
+
+ macro IsAccessorDescriptor(): bool {
+ return this.flags.has_get || this.flags.has_set;
+ }
+
+ macro IsGenericDescriptor(): bool {
+ if (this.IsDataDescriptor() || this.IsAccessorDescriptor()) {
+ return false;
+ }
+ return true;
+ }
+
+ macro IsEmptyOrEquivalentTo(current: PropertyDescriptorObject): bool {
+ return (!this.flags.has_enumerable ||
+ this.flags.is_enumerable == current.flags.is_enumerable) &&
+ (!this.flags.has_configurable ||
+ this.flags.is_configurable == current.flags.is_configurable) &&
+ (!this.flags.has_value || SameValue(this.value, current.value)) &&
+ (!this.flags.has_writable ||
+ this.flags.is_writable == current.flags.is_writable) &&
+ (!this.flags.has_get || SameValue(this.get, current.get)) &&
+ (!this.flags.has_set || SameValue(this.get, current.set));
+ }
+
flags: SmiTagged<PropertyDescriptorObjectFlags>;
value: JSAny|TheHole;
get: JSAny|TheHole;
set: JSAny|TheHole;
}
+
+macro IsCompatiblePropertyDescriptor(
+ _extensible: bool, newDesc: PropertyDescriptorObject,
+ current: PropertyDescriptorObject): bool {
+ if (newDesc.IsEmptyOrEquivalentTo(current)) return true;
+
+ // 5. If current.[[Configurable]] is false, then
+ // 5a. If Desc has a [[Configurable]] field and Desc.[[Configurable]] is
+ // true, return false. 5b. If Desc has an [[Enumerable]] field and
+ // SameValue(Desc.[[Enumerable]], current.[[Enumerable]]) is false, return
+ // false. 5c. If IsGenericDescriptor(Desc) is false and
+ // SameValue(IsAccessorDescriptor(Desc), IsAccessorDescriptor(current)) is
+ // false, return false. 5d. If IsAccessorDescriptor(Desc) is true, then
+ // i. If Desc has a [[Get]] field and SameValue(Desc.[[Get]],
+ // current.[[Get]]) is false, return false.
+ // ii. If Desc has a [[Set]] field and SameValue(Desc.[[Set]],
+ // current.[[Set]]) is false, return false.
+ // 5e. Else if current.[[Writable]] is false, then
+ // i. If Desc has a [[Writable]] field and Desc.[[Writable]] is true,
+ // return false.
+ // ii. ii. If Desc has a [[Value]] field and SameValue(Desc.[[Value]],
+ // current.[[Value]]) is false, return false.
+ if (!current.flags.is_configurable) {
+ if (newDesc.flags.has_configurable && newDesc.flags.is_configurable)
+ return false;
+ if (!current.flags.has_enumerable &&
+ (newDesc.flags.is_enumerable != current.flags.is_enumerable))
+ return false;
+ const isAccessor = newDesc.IsAccessorDescriptor();
+ if (!newDesc.IsGenericDescriptor() &&
+ isAccessor != current.IsAccessorDescriptor())
+ return false;
+ if (isAccessor) {
+ if (newDesc.flags.has_get && !SameValue(newDesc.get, current.get))
+ return false;
+ if (newDesc.flags.has_set && !SameValue(newDesc.set, current.set))
+ return false;
+ } else if (!current.flags.is_writable) {
+ if (newDesc.flags.is_writable) return false;
+ if (newDesc.flags.has_value && !SameValue(newDesc.value, current.value))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+macro IsCompatiblePropertyDescriptor(
+ extensible: bool, newDesc: (PropertyDescriptorObject|Undefined),
+ current: PropertyDescriptorObject): bool {
+ // 3. If every field in Desc is absent, return true. (This also has a shortcut
+ // not in the spec: if every field value matches the current value, return.)
+ typeswitch (newDesc) {
+ case (Undefined): {
+ return true;
+ }
+ case (newDesc: PropertyDescriptorObject): {
+ return IsCompatiblePropertyDescriptor(extensible, newDesc, current);
+ }
+ }
+}
+
+@export
+macro IsCompatiblePropertyDescriptor(
+ extensible: bool, newDesc: (PropertyDescriptorObject|Undefined),
+ current: (PropertyDescriptorObject|Undefined)): bool {
+ // 2. If current is undefined, then
+ // 2a. If extensible is false, return false.
+ // 2b. If O is undefined, return true.
+ typeswitch (current) {
+ case (Undefined): {
+ return extensible;
+ }
+ case (current: PropertyDescriptorObject): {
+ return IsCompatiblePropertyDescriptor(extensible, newDesc, current);
+ }
+ }
+}
+
+@export
+macro CompletePropertyDescriptor(desc: PropertyDescriptorObject): void {
+ // 1. Let like be the Record { [[Value]]: undefined, [[Writable]]: false,
+ // [[Get]]: undefined,
+ // [[Set]]: undefined, [[Enumerable]]: false, [[Configurable]]: false }.
+ if (!desc.IsAccessorDescriptor()) {
+ if (!desc.flags.has_value) {
+ desc.flags.has_value = true;
+ desc.value = Undefined;
+ }
+ if (!desc.flags.has_writable) {
+ desc.flags.has_writable = true;
+ desc.flags.is_writable = false;
+ }
+ } else {
+ if (!desc.flags.has_get) {
+ desc.flags.has_get = true;
+ desc.get = Undefined;
+ }
+ if (!desc.flags.has_set) {
+ desc.flags.has_set = true;
+ desc.set = Undefined;
+ }
+ }
+ if (!desc.flags.has_enumerable) {
+ desc.flags.has_enumerable = true;
+ desc.flags.is_enumerable = false;
+ }
+ if (!desc.flags.has_configurable) {
+ desc.flags.has_configurable = true;
+ desc.flags.is_configurable = false;
+ }
+}
+
+extern macro AllocatePropertyDescriptorObject(implicit context: Context)():
+ PropertyDescriptorObject;
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index 4878070d0d..3c8aecfe8b 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -4,6 +4,7 @@
#include "src/objects/property-descriptor.h"
+#include "src/common/assert-scope.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
@@ -42,22 +43,31 @@ bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
// the entire conversion!
bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDescriptor* desc) {
- if (!obj->IsJSObject()) return false;
- Handle<Map> map(Handle<JSObject>::cast(obj)->map(), isolate);
- if (map->instance_type() != JS_OBJECT_TYPE) return false;
- if (map->is_access_check_needed()) return false;
- if (map->prototype() != *isolate->initial_object_prototype()) return false;
- // During bootstrapping, the object_function_prototype_map hasn't been
- // set up yet.
- if (isolate->bootstrapper()->IsActive()) return false;
- if (JSObject::cast(map->prototype()).map() !=
- isolate->native_context()->object_function_prototype_map()) {
- return false;
+ {
+ DisallowGarbageCollection no_gc;
+ auto raw_obj = *obj;
+ if (!raw_obj.IsJSObject()) return false;
+ Map raw_map = raw_obj.map(isolate);
+ if (raw_map.instance_type() != JS_OBJECT_TYPE) return false;
+ if (raw_map.is_access_check_needed()) return false;
+ if (raw_map.prototype() != *isolate->initial_object_prototype())
+ return false;
+ // During bootstrapping, the object_function_prototype_map hasn't been
+ // set up yet.
+ if (isolate->bootstrapper()->IsActive()) return false;
+ if (JSObject::cast(raw_map.prototype()).map() !=
+ isolate->raw_native_context().object_function_prototype_map()) {
+ return false;
+ }
+ // TODO(jkummerow): support dictionary properties?
+ if (raw_map.is_dictionary_map()) return false;
}
- // TODO(jkummerow): support dictionary properties?
- if (map->is_dictionary_map()) return false;
+
+ Handle<Map> map(obj->map(isolate), isolate);
+
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(map->instance_descriptors(isolate), isolate);
+ ReadOnlyRoots roots(isolate);
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
@@ -65,7 +75,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
if (details.kind() == PropertyKind::kData) {
value = JSObject::FastPropertyAt(isolate, Handle<JSObject>::cast(obj),
details.representation(),
- FieldIndex::ForDescriptor(*map, i));
+ FieldIndex::ForDetails(*map, details));
} else {
DCHECK_EQ(PropertyKind::kAccessor, details.kind());
// Bail out to slow path.
@@ -83,7 +93,6 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
}
}
Name key = descs->GetKey(i);
- ReadOnlyRoots roots(isolate);
if (key == roots.enumerable_string()) {
desc->set_enumerable(value->BooleanValue(isolate));
} else if (key == roots.configurable_string()) {
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 0b19f943a4..0208e03d60 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -46,6 +46,12 @@ bool PrototypeInfo::HasObjectCreateMap() {
return cache->IsWeak();
}
+bool PrototypeInfo::IsPrototypeInfoFast(Object object) {
+ bool is_proto_info = object != Smi::zero();
+ DCHECK_EQ(is_proto_info, object.IsPrototypeInfo());
+ return is_proto_info;
+}
+
BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map,
ShouldBeFastBit::kShift)
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index 882ff63dcb..5284584a8a 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -33,6 +33,8 @@ class PrototypeInfo
inline Map ObjectCreateMap();
inline bool HasObjectCreateMap();
+ static inline bool IsPrototypeInfoFast(Object object);
+
DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
// Dispatched behavior.
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index c0117d27c9..1b6083f527 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -491,12 +491,20 @@ Handle<ScopeInfo> ScopeInfo::CreateForNativeContext(Isolate* isolate) {
}
// static
+Handle<ScopeInfo> ScopeInfo::CreateForShadowRealmNativeContext(
+ Isolate* isolate) {
+ return CreateForBootstrapping(isolate, BootstrappingType::kShadowRealm);
+}
+
+// static
Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
BootstrappingType type) {
const int parameter_count = 0;
const bool is_empty_function = type == BootstrappingType::kFunction;
- const bool is_native_context = type == BootstrappingType::kNative;
+ const bool is_native_context = (type == BootstrappingType::kNative) ||
+ (type == BootstrappingType::kShadowRealm);
const bool is_script = type == BootstrappingType::kScript;
+ const bool is_shadow_realm = type == BootstrappingType::kShadowRealm;
const int context_local_count =
is_empty_function || is_native_context ? 0 : 1;
const bool has_inferred_function_name = is_empty_function;
@@ -513,8 +521,12 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
factory->NewScopeInfo(length, AllocationType::kReadOnly);
DisallowGarbageCollection _nogc;
// Encode the flags.
+ DCHECK_IMPLIES(is_shadow_realm || is_script, !is_empty_function);
int flags =
- ScopeTypeBits::encode(is_empty_function ? FUNCTION_SCOPE : SCRIPT_SCOPE) |
+ ScopeTypeBits::encode(
+ is_empty_function
+ ? FUNCTION_SCOPE
+ : (is_shadow_realm ? SHADOW_REALM_SCOPE : SCRIPT_SCOPE)) |
SloppyEvalCanExtendVarsBit::encode(false) |
LanguageModeBit::encode(LanguageMode::kSloppy) |
DeclarationScopeBit::encode(true) |
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 3fe5c2c5ad..1593a720c1 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -271,6 +271,7 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
V8_EXPORT_PRIVATE static Handle<ScopeInfo> CreateForEmptyFunction(
Isolate* isolate);
static Handle<ScopeInfo> CreateForNativeContext(Isolate* isolate);
+ static Handle<ScopeInfo> CreateForShadowRealmNativeContext(Isolate* isolate);
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
// Creates a copy of a {ScopeInfo} but with the provided locals blocklist
@@ -315,8 +316,6 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
V8_EXPORT_PRIVATE uint32_t Hash();
private:
- friend class WebSnapshotDeserializer;
-
int InlinedLocalNamesLookup(String name);
int ContextLocalNamesIndex() const;
@@ -363,7 +362,7 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
return index;
}
- enum class BootstrappingType { kScript, kFunction, kNative };
+ enum class BootstrappingType { kScript, kFunction, kNative, kShadowRealm };
static Handle<ScopeInfo> CreateForBootstrapping(Isolate* isolate,
BootstrappingType type);
diff --git a/deps/v8/src/objects/scope-info.tq b/deps/v8/src/objects/scope-info.tq
index d03228063f..e58db3f496 100644
--- a/deps/v8/src/objects/scope-info.tq
+++ b/deps/v8/src/objects/scope-info.tq
@@ -17,7 +17,8 @@ extern enum ScopeType extends uint32 {
SCRIPT_SCOPE,
CATCH_SCOPE,
BLOCK_SCOPE,
- WITH_SCOPE
+ WITH_SCOPE,
+ SHADOW_REALM_SCOPE
}
extern enum VariableAllocationInfo extends uint32 {
@@ -158,6 +159,7 @@ extern class ScopeInfo extends HeapObject {
flags.scope_type == ScopeType::SCRIPT_SCOPE ||
flags.scope_type == ScopeType::EVAL_SCOPE ||
flags.scope_type == ScopeType::MODULE_SCOPE ||
+ flags.scope_type == ScopeType::SHADOW_REALM_SCOPE ||
(flags.is_empty ? false : flags.scope_type == ScopeType::CLASS_SCOPE)]:
PositionInfo;
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 4d1af1104a..6e40e796d3 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -25,7 +25,7 @@ NEVER_READ_ONLY_SPACE_IMPL(Script)
#if V8_ENABLE_WEBASSEMBLY
ACCESSORS_CHECKED(Script, wasm_breakpoint_infos, FixedArray,
- kEvalFromSharedOrWrappedArgumentsOrSfiTableOffset,
+ kEvalFromSharedOrWrappedArgumentsOffset,
this->type() == TYPE_WASM)
ACCESSORS_CHECKED(Script, wasm_managed_native_module, Object,
kEvalFromPositionOffset, this->type() == TYPE_WASM)
@@ -37,59 +37,43 @@ ACCESSORS_CHECKED(Script, wasm_weak_instance_list, WeakArrayList,
#endif // V8_ENABLE_WEBASSEMBLY
SMI_ACCESSORS(Script, type, kScriptTypeOffset)
-ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments_or_sfi_table,
- Object, kEvalFromSharedOrWrappedArgumentsOrSfiTableOffset,
+ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
+ kEvalFromSharedOrWrappedArgumentsOffset,
CHECK_SCRIPT_NOT_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
CHECK_SCRIPT_NOT_WASM)
#undef CHECK_SCRIPT_NOT_WASM
+ACCESSORS(Script, compiled_lazy_function_positions, Object,
+ kCompiledLazyFunctionPositionsOffset)
+
bool Script::is_wrapped() const {
- return eval_from_shared_or_wrapped_arguments_or_sfi_table().IsFixedArray() &&
- type() != TYPE_WEB_SNAPSHOT;
+ return eval_from_shared_or_wrapped_arguments().IsFixedArray();
}
bool Script::has_eval_from_shared() const {
- return eval_from_shared_or_wrapped_arguments_or_sfi_table()
- .IsSharedFunctionInfo();
+ return eval_from_shared_or_wrapped_arguments().IsSharedFunctionInfo();
}
void Script::set_eval_from_shared(SharedFunctionInfo shared,
WriteBarrierMode mode) {
DCHECK(!is_wrapped());
- DCHECK_NE(type(), TYPE_WEB_SNAPSHOT);
- set_eval_from_shared_or_wrapped_arguments_or_sfi_table(shared, mode);
+ set_eval_from_shared_or_wrapped_arguments(shared, mode);
}
SharedFunctionInfo Script::eval_from_shared() const {
DCHECK(has_eval_from_shared());
- return SharedFunctionInfo::cast(
- eval_from_shared_or_wrapped_arguments_or_sfi_table());
+ return SharedFunctionInfo::cast(eval_from_shared_or_wrapped_arguments());
}
void Script::set_wrapped_arguments(FixedArray value, WriteBarrierMode mode) {
DCHECK(!has_eval_from_shared());
- DCHECK_NE(type(), TYPE_WEB_SNAPSHOT);
- set_eval_from_shared_or_wrapped_arguments_or_sfi_table(value, mode);
+ set_eval_from_shared_or_wrapped_arguments(value, mode);
}
FixedArray Script::wrapped_arguments() const {
DCHECK(is_wrapped());
- return FixedArray::cast(eval_from_shared_or_wrapped_arguments_or_sfi_table());
-}
-
-void Script::set_shared_function_info_table(ObjectHashTable value,
- WriteBarrierMode mode) {
- DCHECK(!has_eval_from_shared());
- DCHECK(!is_wrapped());
- DCHECK_EQ(type(), TYPE_WEB_SNAPSHOT);
- set_eval_from_shared_or_wrapped_arguments_or_sfi_table(value, mode);
-}
-
-ObjectHashTable Script::shared_function_info_table() const {
- DCHECK_EQ(type(), TYPE_WEB_SNAPSHOT);
- return ObjectHashTable::cast(
- eval_from_shared_or_wrapped_arguments_or_sfi_table());
+ return FixedArray::cast(eval_from_shared_or_wrapped_arguments());
}
DEF_GETTER(Script, shared_function_infos, WeakFixedArray) {
@@ -111,11 +95,6 @@ void Script::set_shared_function_infos(WeakFixedArray value,
}
int Script::shared_function_info_count() const {
- if (V8_UNLIKELY(type() == TYPE_WEB_SNAPSHOT)) {
- // +1 because the 0th element in shared_function_infos is reserved for the
- // top-level SharedFunctionInfo which doesn't exist.
- return shared_function_info_table().NumberOfElements() + 1;
- }
return shared_function_infos().length();
}
@@ -148,6 +127,14 @@ void Script::set_compilation_state(CompilationState state) {
set_flags(CompilationStateBit::update(flags(), state));
}
+bool Script::produce_compile_hints() const {
+ return ProduceCompileHintsBit::decode(flags());
+}
+
+void Script::set_produce_compile_hints(bool produce_compile_hints) {
+ set_flags(ProduceCompileHintsBit::update(flags(), produce_compile_hints));
+}
+
bool Script::is_repl_mode() const { return IsReplModeBit::decode(flags()); }
void Script::set_is_repl_mode(bool value) {
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index b4dd450b48..fea0014f6d 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -46,8 +46,7 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
#if V8_ENABLE_WEBASSEMBLY
TYPE_WASM = 3,
#endif // V8_ENABLE_WEBASSEMBLY
- TYPE_INSPECTOR = 4,
- TYPE_WEB_SNAPSHOT = 5
+ TYPE_INSPECTOR = 4
};
// Script compilation types.
@@ -62,7 +61,7 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// [type]: the script type.
DECL_INT_ACCESSORS(type)
- DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments_or_sfi_table, Object)
+ DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments, Object)
// [eval_from_shared]: for eval scripts the shared function info for the
// function from which eval was called.
@@ -71,12 +70,6 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// [wrapped_arguments]: for the list of arguments in a wrapped script.
DECL_ACCESSORS(wrapped_arguments, FixedArray)
- // For web snapshots: a hash table mapping function positions to indices in
- // shared_function_infos.
- // TODO(v8:11525): Replace with a more efficient data structure mapping
- // function positions to weak pointers to SharedFunctionInfos directly.
- DECL_ACCESSORS(shared_function_info_table, ObjectHashTable)
-
// Whether the script is implicitly wrapped in a function.
inline bool is_wrapped() const;
@@ -128,6 +121,9 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
inline CompilationType compilation_type();
inline void set_compilation_type(CompilationType type);
+ inline bool produce_compile_hints() const;
+ inline void set_produce_compile_hints(bool produce_compile_hints);
+
// [compilation_state]: determines whether the script has already been
// compiled. Encoded in the 'flags' field.
inline CompilationState compilation_state();
@@ -144,6 +140,8 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
inline v8::ScriptOriginOptions origin_options();
inline void set_origin_options(ScriptOriginOptions origin_options);
+ DECL_ACCESSORS(compiled_lazy_function_positions, Object)
+
// If script source is an external string, check that the underlying
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
@@ -216,14 +214,6 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
Handle<Script> script, IsolateT* isolate,
FunctionLiteral* function_literal);
- static MaybeHandle<SharedFunctionInfo> FindWebSnapshotSharedFunctionInfo(
- Handle<Script> script, Isolate* isolate,
- FunctionLiteral* function_literal);
-
- static MaybeHandle<SharedFunctionInfo> FindWebSnapshotSharedFunctionInfo(
- Handle<Script> script, LocalIsolate* isolate,
- FunctionLiteral* function_literal);
-
// Iterate over all script objects on the heap.
class V8_EXPORT_PRIVATE Iterator {
public:
diff --git a/deps/v8/src/objects/script.tq b/deps/v8/src/objects/script.tq
index e4f99e62d9..a0e0c9dc46 100644
--- a/deps/v8/src/objects/script.tq
+++ b/deps/v8/src/objects/script.tq
@@ -12,6 +12,7 @@ bitfield struct ScriptFlags extends uint31 {
origin_options: int32: 4 bit;
// Whether an instrumentation breakpoint is set for this script (wasm only).
break_on_entry: bool: 1 bit;
+ produce_compile_hints: bool: 1 bit;
}
extern class Script extends Struct {
@@ -41,13 +42,16 @@ extern class Script extends Struct {
// For scripts originating from eval: the SharedFunctionInfo contains the SFI
// for the script. For scripts wrapped as functions: the FixedArray contains
- // the arguments. For web snapshots: the ObjectHashTable maps function start
- // position to SFI index in shared_function_infos.
- eval_from_shared_or_wrapped_arguments_or_sfi_table: SharedFunctionInfo|
- FixedArray|ObjectHashTable|Undefined;
+ // the arguments.
+ eval_from_shared_or_wrapped_arguments: SharedFunctionInfo|FixedArray|
+ Undefined;
eval_from_position: Smi|Foreign; // Smi or Managed<wasm::NativeModule>
shared_function_infos: WeakFixedArray|WeakArrayList;
+ // [compiled_lazy_function_positions]: ArrayList containing SMIs marking
+ // the start positions of lazy functions which got compiled.
+ compiled_lazy_function_positions: ArrayList|Undefined;
+
// [flags]: Holds an exciting bitfield.
flags: SmiTagged<ScriptFlags>;
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 43ef0d3d17..5621b15d98 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -98,7 +98,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseDataAndJob)
TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
TQ_OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfo)
-NEVER_READ_ONLY_SPACE_IMPL(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, function_data, Object,
@@ -193,14 +192,13 @@ bool SharedFunctionInfo::needs_script_context() const {
return is_script() && scope_info(kAcquireLoad).ContextLocalCount() > 0;
}
-template <typename IsolateT>
-AbstractCode SharedFunctionInfo::abstract_code(IsolateT* isolate) {
+AbstractCode SharedFunctionInfo::abstract_code(Isolate* isolate) {
// TODO(v8:11429): Decide if this return bytecode or baseline code, when the
// latter is present.
if (HasBytecodeArray(isolate)) {
return AbstractCode::cast(GetBytecodeArray(isolate));
} else {
- return ToAbstractCode(GetCode());
+ return AbstractCode::cast(GetCode(isolate));
}
}
@@ -227,7 +225,7 @@ SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
IsolateT* isolate) const {
if (!script().IsScript()) return kHasNoScript;
- if (GetIsolate()->is_precise_binary_code_coverage() &&
+ if (isolate->is_precise_binary_code_coverage() &&
!has_reported_binary_coverage()) {
// We may miss invocations if this function is inlined.
return kNeedsBinaryCoverage;
@@ -383,8 +381,9 @@ void SharedFunctionInfo::set_function_map_index(int index) {
}
void SharedFunctionInfo::clear_padding() {
- memset(reinterpret_cast<void*>(this->address() + kSize), 0,
- kAlignedSize - kSize);
+#if V8_SFI_NEEDS_PADDING
+ set_optional_padding(0);
+#endif // V8_SFI_NEEDS_PADDING
}
void SharedFunctionInfo::UpdateFunctionMapIndex() {
@@ -559,14 +558,13 @@ DEF_GETTER(SharedFunctionInfo, HasBytecodeArray, bool) {
HeapObject::cast(data).map(cage_base).instance_type();
return InstanceTypeChecker::IsBytecodeArray(instance_type) ||
InstanceTypeChecker::IsInterpreterData(instance_type) ||
- InstanceTypeChecker::IsCodeT(instance_type);
+ InstanceTypeChecker::IsCode(instance_type);
}
template <typename IsolateT>
BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
- // TODO(ishell): access shared_function_info_access() via IsolateT.
SharedMutexGuardIfOffThread<IsolateT, base::kShared> mutex_guard(
- GetIsolate()->shared_function_info_access(), isolate);
+ isolate->shared_function_info_access(), isolate);
DCHECK(HasBytecodeArray());
if (HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray()) {
@@ -578,8 +576,8 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
Object data = function_data(kAcquireLoad);
- if (data.IsCodeT()) {
- CodeT baseline_code = CodeT::cast(data);
+ if (data.IsCode()) {
+ Code baseline_code = Code::cast(data);
data = baseline_code.bytecode_or_interpreter_data();
}
if (data.IsBytecodeArray()) {
@@ -623,8 +621,8 @@ bool SharedFunctionInfo::ShouldFlushCode(
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
- if (data.IsCodeT()) {
- CodeT baseline_code = CodeT::cast(data);
+ if (data.IsCode()) {
+ Code baseline_code = Code::cast(data);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
// If baseline code flushing isn't enabled and we have baseline data on SFI
// we cannot flush baseline / bytecode.
@@ -644,15 +642,15 @@ bool SharedFunctionInfo::ShouldFlushCode(
return bytecode.IsOld();
}
-DEF_GETTER(SharedFunctionInfo, InterpreterTrampoline, CodeT) {
+DEF_GETTER(SharedFunctionInfo, InterpreterTrampoline, Code) {
DCHECK(HasInterpreterData(cage_base));
return interpreter_data(cage_base).interpreter_trampoline(cage_base);
}
DEF_GETTER(SharedFunctionInfo, HasInterpreterData, bool) {
Object data = function_data(cage_base, kAcquireLoad);
- if (data.IsCodeT(cage_base)) {
- CodeT baseline_code = CodeT::cast(data);
+ if (data.IsCode(cage_base)) {
+ Code baseline_code = Code::cast(data);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
data = baseline_code.bytecode_or_interpreter_data(cage_base);
}
@@ -662,8 +660,8 @@ DEF_GETTER(SharedFunctionInfo, HasInterpreterData, bool) {
DEF_GETTER(SharedFunctionInfo, interpreter_data, InterpreterData) {
DCHECK(HasInterpreterData(cage_base));
Object data = function_data(cage_base, kAcquireLoad);
- if (data.IsCodeT(cage_base)) {
- CodeT baseline_code = CodeT::cast(data);
+ if (data.IsCode(cage_base)) {
+ Code baseline_code = Code::cast(data);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
data = baseline_code.bytecode_or_interpreter_data(cage_base);
}
@@ -679,19 +677,19 @@ void SharedFunctionInfo::set_interpreter_data(
DEF_GETTER(SharedFunctionInfo, HasBaselineCode, bool) {
Object data = function_data(cage_base, kAcquireLoad);
- if (data.IsCodeT(cage_base)) {
- DCHECK_EQ(CodeT::cast(data).kind(), CodeKind::BASELINE);
+ if (data.IsCode(cage_base)) {
+ DCHECK_EQ(Code::cast(data).kind(), CodeKind::BASELINE);
return true;
}
return false;
}
-DEF_ACQUIRE_GETTER(SharedFunctionInfo, baseline_code, CodeT) {
+DEF_ACQUIRE_GETTER(SharedFunctionInfo, baseline_code, Code) {
DCHECK(HasBaselineCode(cage_base));
- return CodeT::cast(function_data(cage_base, kAcquireLoad));
+ return Code::cast(function_data(cage_base, kAcquireLoad));
}
-void SharedFunctionInfo::set_baseline_code(CodeT baseline_code,
+void SharedFunctionInfo::set_baseline_code(Code baseline_code,
ReleaseStoreTag tag,
WriteBarrierMode mode) {
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
@@ -755,6 +753,15 @@ const wasm::FunctionSig* SharedFunctionInfo::wasm_function_signature() const {
DCHECK_LT(function_data.function_index(), module->functions.size());
return module->functions[function_data.function_index()].sig;
}
+
+int SharedFunctionInfo::wasm_function_index() const {
+ if (!HasWasmExportedFunctionData()) return -1;
+ const WasmExportedFunctionData& function_data = wasm_exported_function_data();
+ DCHECK_GE(function_data.function_index(), 0);
+ DCHECK_LT(function_data.function_index(), wasm_module()->functions.size());
+ return function_data.function_index();
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
bool SharedFunctionInfo::HasBuiltinId() const {
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index f68f05e320..60e6f42c75 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -68,13 +68,12 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
clear_padding();
}
-CodeT SharedFunctionInfo::GetCode() const {
+Code SharedFunctionInfo::GetCode(Isolate* isolate) const {
// ======
// NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
// GetSharedFunctionInfoCode method in code-stub-assembler.cc.
// ======
- Isolate* isolate = GetIsolate();
Object data = function_data(kAcquireLoad);
if (data.IsSmi()) {
// Holding a Smi means we are a builtin.
@@ -86,10 +85,10 @@ CodeT SharedFunctionInfo::GetCode() const {
DCHECK(HasBytecodeArray());
return isolate->builtins()->code(Builtin::kInterpreterEntryTrampoline);
}
- if (data.IsCodeT()) {
+ if (data.IsCode()) {
// Having baseline Code means we are a compiled, baseline function.
DCHECK(HasBaselineCode());
- return CodeT::cast(data);
+ return Code::cast(data);
}
#if V8_ENABLE_WEBASSEMBLY
if (data.IsAsmWasmData()) {
@@ -128,8 +127,8 @@ CodeT SharedFunctionInfo::GetCode() const {
return isolate->builtins()->code(Builtin::kHandleApiCall);
}
if (data.IsInterpreterData()) {
- CodeT code = InterpreterTrampoline();
- DCHECK(code.IsCodeT());
+ Code code = InterpreterTrampoline();
+ DCHECK(code.IsCode());
DCHECK(code.is_interpreter_trampoline_builtin());
return code;
}
@@ -314,11 +313,10 @@ std::unique_ptr<char[]> SharedFunctionInfo::DebugNameCStr() const {
// static
Handle<String> SharedFunctionInfo::DebugName(
- Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
#if V8_ENABLE_WEBASSEMBLY
if (shared->HasWasmExportedFunctionData()) {
- return shared->GetIsolate()
- ->factory()
+ return isolate->factory()
->NewStringFromUtf8(base::CStrVector(shared->DebugNameCStr().get()))
.ToHandleChecked();
}
@@ -326,7 +324,7 @@ Handle<String> SharedFunctionInfo::DebugName(
DisallowHeapAllocation no_gc;
String function_name = shared->Name();
if (function_name.length() == 0) function_name = shared->inferred_name();
- return handle(function_name, shared->GetIsolate());
+ return handle(function_name, isolate);
}
bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
@@ -349,7 +347,7 @@ void SharedFunctionInfo::DiscardCompiledMetadata(
DisallowGarbageCollection no_gc;
if (HasFeedbackMetadata()) {
if (v8_flags.trace_flush_bytecode) {
- CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[discarding compiled metadata for ");
ShortPrint(scope.file());
PrintF(scope.file(), "]\n");
@@ -415,8 +413,7 @@ void SharedFunctionInfo::DiscardCompiled(
// static
Handle<Object> SharedFunctionInfo::GetSourceCode(
- Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = shared->GetIsolate();
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
Handle<String> source(String::cast(Script::cast(shared->script()).source()),
isolate);
@@ -426,8 +423,7 @@ Handle<Object> SharedFunctionInfo::GetSourceCode(
// static
Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
- Handle<SharedFunctionInfo> shared) {
- Isolate* isolate = shared->GetIsolate();
+ Isolate* isolate, Handle<SharedFunctionInfo> shared) {
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
Handle<String> script_source(
String::cast(Script::cast(shared->script()).source()), isolate);
@@ -490,13 +486,13 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
}
}
-void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
+void SharedFunctionInfo::DisableOptimization(Isolate* isolate,
+ BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
set_flags(DisabledOptimizationReasonBits::update(flags(kRelaxedLoad), reason),
kRelaxedStore);
// Code should be the lazy compilation stub or else interpreted.
- Isolate* isolate = GetIsolate();
if constexpr (DEBUG_BOOL) {
CodeKind kind = abstract_code(isolate).kind(isolate);
CHECK(kind == CodeKind::INTERPRETED_FUNCTION || kind == CodeKind::BUILTIN);
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index b0a7306655..2b642fa886 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -186,7 +186,6 @@ class InterpreterData
class SharedFunctionInfo
: public TorqueGeneratedSharedFunctionInfo<SharedFunctionInfo, HeapObject> {
public:
- NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_SHARED_FUNCTION_INFO_FLAGS()
DEFINE_TORQUE_GENERATED_SHARED_FUNCTION_INFO_FLAGS2()
@@ -205,12 +204,11 @@ class SharedFunctionInfo
inline void SetName(String name);
// Get the code object which represents the execution of this function.
- V8_EXPORT_PRIVATE CodeT GetCode() const;
+ V8_EXPORT_PRIVATE Code GetCode(Isolate* isolate) const;
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
- template <typename IsolateT>
- inline AbstractCode abstract_code(IsolateT* isolate);
+ inline AbstractCode abstract_code(Isolate* isolate);
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
@@ -332,12 +330,12 @@ class SharedFunctionInfo
inline BytecodeArray GetBytecodeArray(IsolateT* isolate) const;
inline void set_bytecode_array(BytecodeArray bytecode);
- DECL_GETTER(InterpreterTrampoline, CodeT)
+ DECL_GETTER(InterpreterTrampoline, Code)
DECL_GETTER(HasInterpreterData, bool)
DECL_GETTER(interpreter_data, InterpreterData)
inline void set_interpreter_data(InterpreterData interpreter_data);
DECL_GETTER(HasBaselineCode, bool)
- DECL_RELEASE_ACQUIRE_ACCESSORS(baseline_code, CodeT)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(baseline_code, Code)
inline void FlushBaselineCode();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
@@ -361,6 +359,7 @@ class SharedFunctionInfo
inline const wasm::WasmModule* wasm_module() const;
inline const wasm::FunctionSig* wasm_function_signature() const;
+ inline int wasm_function_index() const;
#endif // V8_ENABLE_WEBASSEMBLY
// builtin corresponds to the auto-generated Builtin enum.
@@ -401,7 +400,8 @@ class SharedFunctionInfo
// The function's name if it is non-empty, otherwise the inferred name.
std::unique_ptr<char[]> DebugNameCStr() const;
- static Handle<String> DebugName(Handle<SharedFunctionInfo>);
+ static Handle<String> DebugName(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
// Used for flags such as --turbo-filter.
bool PassesFilter(const char* raw_filter);
@@ -522,7 +522,7 @@ class SharedFunctionInfo
// Disable (further) attempted optimization of all functions sharing this
// shared function info.
- void DisableOptimization(BailoutReason reason);
+ void DisableOptimization(Isolate* isolate, BailoutReason reason);
// This class constructor needs to call out to an instance fields
// initializer. This flag is set when creating the
@@ -532,8 +532,10 @@ class SharedFunctionInfo
// [source code]: Source code for the function.
bool HasSourceCode() const;
- static Handle<Object> GetSourceCode(Handle<SharedFunctionInfo> shared);
- static Handle<Object> GetSourceCodeHarmony(Handle<SharedFunctionInfo> shared);
+ static Handle<Object> GetSourceCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
+ static Handle<Object> GetSourceCodeHarmony(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
// Tells whether this function should be subject to debugging, e.g. for
// - scope inspection
@@ -666,7 +668,7 @@ class SharedFunctionInfo
static const uint16_t kFunctionTokenOutOfRange = static_cast<uint16_t>(-1);
static_assert(kMaximumFunctionTokenOffset + 1 == kFunctionTokenOutOfRange);
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kSize);
+ static_assert(kSize % kTaggedSize == 0);
class BodyDescriptor;
@@ -689,8 +691,6 @@ class SharedFunctionInfo
Isolate* isolate);
private:
- friend class WebSnapshotDeserializer;
-
#ifdef VERIFY_HEAP
void SharedFunctionInfoVerify(ReadOnlyRoots roots);
#endif
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 3c460b9b21..98045efaab 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -10,8 +10,7 @@ extern class PreparseData extends HeapObject {
extern class InterpreterData extends Struct {
bytecode_array: BytecodeArray;
- @if(V8_EXTERNAL_CODE_SPACE) interpreter_trampoline: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) interpreter_trampoline: Code;
+ interpreter_trampoline: Code;
}
type FunctionKind extends uint8 constexpr 'FunctionKind';
@@ -47,7 +46,6 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
sparkplug_compiled: bool: 1 bit;
}
-@generateBodyDescriptor
extern class SharedFunctionInfo extends HeapObject {
// function_data field is treated as a custom weak pointer. We visit this
// field as a weak pointer if there is aged bytecode. If there is no bytecode
@@ -81,6 +79,9 @@ extern class SharedFunctionInfo extends HeapObject {
// [unique_id] - For --log-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
+
+ @if(V8_SFI_NEEDS_PADDING) optional_padding: uint32;
+ @ifnot(V8_SFI_NEEDS_PADDING) optional_padding: void;
}
const kDontAdaptArgumentsSentinel: constexpr int32
diff --git a/deps/v8/src/objects/simd.cc b/deps/v8/src/objects/simd.cc
index 4fa55e4cc7..d64b441168 100644
--- a/deps/v8/src/objects/simd.cc
+++ b/deps/v8/src/objects/simd.cc
@@ -5,6 +5,7 @@
#include "src/objects/simd.h"
#include "src/base/cpu.h"
+#include "src/codegen/cpu-features.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/heap-number-inl.h"
@@ -39,8 +40,12 @@ enum class SimdKinds { kSSE, kNeon, kAVX2, kNone };
inline SimdKinds get_vectorization_kind() {
#ifdef __SSE3__
- static base::CPU cpu;
- if (cpu.has_avx2()) {
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+ bool has_avx2 = CpuFeatures::IsSupported(AVX2);
+#else
+ bool has_avx2 = false;
+#endif
+ if (has_avx2) {
return SimdKinds::kAVX2;
} else {
// No need for a runtime check since we do not support x86/x64 CPUs without
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index 9f97aba2b3..1759c7927c 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -157,65 +157,58 @@ void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
void ExternalPointerSlot::init(Isolate* isolate, Address value,
ExternalPointerTag tag) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- ExternalPointerTable& table = GetExternalPointerTableForTag(isolate, tag);
- ExternalPointerHandle handle =
- table.AllocateAndInitializeEntry(isolate, value, tag);
- // Use a Release_Store to ensure that the store of the pointer into the
- // table is not reordered after the store of the handle. Otherwise, other
- // threads may access an uninitialized table entry and crash.
- Release_StoreHandle(handle);
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ ExternalPointerTable& table = GetExternalPointerTableForTag(isolate, tag);
+ ExternalPointerHandle handle =
+ table.AllocateAndInitializeEntry(isolate, value, tag);
+ // Use a Release_Store to ensure that the store of the pointer into the
+ // table is not reordered after the store of the handle. Otherwise, other
+ // threads may access an uninitialized table entry and crash.
+ Release_StoreHandle(handle);
+#else
store(isolate, value, tag);
+#endif // V8_ENABLE_SANDBOX
}
#ifdef V8_ENABLE_SANDBOX
ExternalPointerHandle ExternalPointerSlot::Relaxed_LoadHandle() const {
- // TODO(saelo): here and below: remove cast once ExternalPointerHandle is
- // always 32 bit large.
- auto handle_location = reinterpret_cast<ExternalPointerHandle*>(location());
- return base::AsAtomic32::Relaxed_Load(handle_location);
+ return base::AsAtomic32::Relaxed_Load(location());
}
void ExternalPointerSlot::Relaxed_StoreHandle(
ExternalPointerHandle handle) const {
- auto handle_location = reinterpret_cast<ExternalPointerHandle*>(location());
- return base::AsAtomic32::Relaxed_Store(handle_location, handle);
+ return base::AsAtomic32::Relaxed_Store(location(), handle);
}
void ExternalPointerSlot::Release_StoreHandle(
ExternalPointerHandle handle) const {
- auto handle_location = reinterpret_cast<ExternalPointerHandle*>(location());
- return base::AsAtomic32::Release_Store(handle_location, handle);
+ return base::AsAtomic32::Release_Store(location(), handle);
}
#endif // V8_ENABLE_SANDBOX
Address ExternalPointerSlot::load(const Isolate* isolate,
ExternalPointerTag tag) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- const ExternalPointerTable& table =
- GetExternalPointerTableForTag(isolate, tag);
- ExternalPointerHandle handle = Relaxed_LoadHandle();
- return table.Get(handle, tag);
- }
-#endif // V8_ENABLE_SANDBOX
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ const ExternalPointerTable& table =
+ GetExternalPointerTableForTag(isolate, tag);
+ ExternalPointerHandle handle = Relaxed_LoadHandle();
+ return table.Get(handle, tag);
+#else
return ReadMaybeUnalignedValue<Address>(address());
+#endif // V8_ENABLE_SANDBOX
}
void ExternalPointerSlot::store(Isolate* isolate, Address value,
ExternalPointerTag tag) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- ExternalPointerTable& table = GetExternalPointerTableForTag(isolate, tag);
- ExternalPointerHandle handle = Relaxed_LoadHandle();
- table.Set(handle, value, tag);
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+ DCHECK_NE(tag, kExternalPointerNullTag);
+ ExternalPointerTable& table = GetExternalPointerTableForTag(isolate, tag);
+ ExternalPointerHandle handle = Relaxed_LoadHandle();
+ table.Set(handle, value, tag);
+#else
WriteMaybeUnalignedValue<Address>(address(), value);
+#endif // V8_ENABLE_SANDBOX
}
ExternalPointerSlot::RawContent
@@ -272,7 +265,8 @@ inline void CopyTagged(Address dst, const Address src, size_t num_tagged) {
// Sets |counter| number of kTaggedSize-sized values starting at |start| slot.
inline void MemsetTagged(Tagged_t* start, Object value, size_t counter) {
#ifdef V8_COMPRESS_POINTERS
- Tagged_t raw_value = V8HeapCompressionScheme::CompressTagged(value.ptr());
+ // CompressAny since many callers pass values which are not valid objects.
+ Tagged_t raw_value = V8HeapCompressionScheme::CompressAny(value.ptr());
MemsetUint32(start, raw_value, counter);
#else
Address raw_value = value.ptr();
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index fbb89b4617..7baf362b30 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -135,8 +135,7 @@ void SourceTextModule::CreateExport(Isolate* isolate,
Handle<SourceTextModule> module,
int cell_index, Handle<FixedArray> names) {
DCHECK_LT(0, names->length());
- Handle<Cell> cell =
- isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ Handle<Cell> cell = isolate->factory()->NewCell();
module->regular_exports().set(ExportIndex(cell_index), *cell);
Handle<ObjectHashTable> exports(module->exports(), isolate);
@@ -980,7 +979,8 @@ Maybe<bool> SourceTextModule::ExecuteAsyncModule(
if (ret.is_null()) {
// The evaluation of async module can not throwing a JavaScript observable
// exception.
- DCHECK(isolate->is_execution_termination_pending());
+ DCHECK_IMPLIES(v8_flags.strict_termination_checks,
+ isolate->is_execution_termination_pending());
return Nothing<bool>();
}
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
index 3330f772ff..70d47b36f3 100644
--- a/deps/v8/src/objects/string-comparator.cc
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -16,7 +16,10 @@ void StringComparator::State::Init(
if (!cons_string.is_null()) {
int offset;
string = iter_.Next(&offset);
- String::VisitFlat(this, string, offset, access_guard);
+ // We are resetting the iterator with zero offset, so we should never have
+ // a per-segment offset.
+ DCHECK_EQ(offset, 0);
+ String::VisitFlat(this, string, 0, access_guard);
}
}
diff --git a/deps/v8/src/objects/string-forwarding-table-inl.h b/deps/v8/src/objects/string-forwarding-table-inl.h
index f5e087c55b..b50104bd09 100644
--- a/deps/v8/src/objects/string-forwarding-table-inl.h
+++ b/deps/v8/src/objects/string-forwarding-table-inl.h
@@ -130,7 +130,7 @@ uint32_t StringForwardingTable::Record::raw_hash(
Object hash_or_string = ForwardStringObjectOrHash(cage_base);
uint32_t raw_hash;
if (hash_or_string.IsHeapObject()) {
- raw_hash = String::cast(hash_or_string).raw_hash_field();
+ raw_hash = String::cast(hash_or_string).RawHash();
} else {
raw_hash = static_cast<uint32_t>(hash_or_string.ptr());
}
@@ -203,9 +203,8 @@ bool StringForwardingTable::Record::TryUpdateExternalResource(Address address) {
void StringForwardingTable::Record::DisposeExternalResource() {
bool is_one_byte;
auto resource = external_resource(&is_one_byte);
- if (resource != nullptr) {
- resource->Dispose();
- }
+ DCHECK_NOT_NULL(resource);
+ resource->Dispose();
}
void StringForwardingTable::Record::DisposeUnusedExternalResource(
@@ -248,8 +247,10 @@ class StringForwardingTable::Block {
return &elements_[index];
}
- void UpdateAfterEvacuation(PtrComprCageBase cage_base);
- void UpdateAfterEvacuation(PtrComprCageBase cage_base, int up_to_index);
+ void UpdateAfterYoungEvacuation(PtrComprCageBase cage_base);
+ void UpdateAfterYoungEvacuation(PtrComprCageBase cage_base, int up_to_index);
+ void UpdateAfterFullEvacuation(PtrComprCageBase cage_base);
+ void UpdateAfterFullEvacuation(PtrComprCageBase cage_base, int up_to_index);
private:
const int capacity_;
diff --git a/deps/v8/src/objects/string-forwarding-table.cc b/deps/v8/src/objects/string-forwarding-table.cc
index 8c58966965..15733ef7e4 100644
--- a/deps/v8/src/objects/string-forwarding-table.cc
+++ b/deps/v8/src/objects/string-forwarding-table.cc
@@ -58,32 +58,75 @@ std::unique_ptr<StringForwardingTable::Block> StringForwardingTable::Block::New(
return std::unique_ptr<Block>(new (capacity) Block(capacity));
}
-void StringForwardingTable::Block::UpdateAfterEvacuation(
+void StringForwardingTable::Block::UpdateAfterYoungEvacuation(
PtrComprCageBase cage_base) {
- UpdateAfterEvacuation(cage_base, capacity_);
+ UpdateAfterYoungEvacuation(cage_base, capacity_);
}
-void StringForwardingTable::Block::UpdateAfterEvacuation(
+void StringForwardingTable::Block::UpdateAfterFullEvacuation(
+ PtrComprCageBase cage_base) {
+ UpdateAfterFullEvacuation(cage_base, capacity_);
+}
+
+namespace {
+
+bool UpdateForwardedSlot(HeapObject object, OffHeapObjectSlot slot) {
+ MapWord map_word = object.map_word(kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+ HeapObject forwarded_object = map_word.ToForwardingAddress(object);
+ slot.Release_Store(forwarded_object);
+ return true;
+ }
+ return false;
+}
+
+bool UpdateForwardedSlot(Object object, OffHeapObjectSlot slot) {
+ if (!object.IsHeapObject()) return false;
+ return UpdateForwardedSlot(HeapObject::cast(object), slot);
+}
+
+} // namespace
+
+void StringForwardingTable::Block::UpdateAfterYoungEvacuation(
PtrComprCageBase cage_base, int up_to_index) {
- // This is only used for Scavenger.
- DCHECK(!v8_flags.minor_mc);
- DCHECK(v8_flags.always_use_string_forwarding_table);
for (int index = 0; index < up_to_index; ++index) {
- Object original = record(index)->OriginalStringObject(cage_base);
+ OffHeapObjectSlot slot = record(index)->OriginalStringSlot();
+ Object original = slot.Acquire_Load(cage_base);
if (!original.IsHeapObject()) continue;
HeapObject object = HeapObject::cast(original);
if (Heap::InFromPage(object)) {
- DCHECK(!object.InSharedWritableHeap());
- MapWord map_word = object.map_word(kRelaxedLoad);
- if (map_word.IsForwardingAddress()) {
- HeapObject forwarded_object = map_word.ToForwardingAddress();
- record(index)->set_original_string(forwarded_object);
- } else {
- record(index)->set_original_string(deleted_element());
+ DCHECK(!object.InWritableSharedSpace());
+ const bool was_forwarded = UpdateForwardedSlot(object, slot);
+ if (!was_forwarded) {
+ // The object died in young space.
+ slot.Release_Store(deleted_element());
}
} else {
DCHECK(!object.map_word(kRelaxedLoad).IsForwardingAddress());
}
+// No need to update forwarded (internalized) strings as they are never
+// in young space.
+#ifdef DEBUG
+ Object forward = record(index)->ForwardStringObjectOrHash(cage_base);
+ if (forward.IsHeapObject()) {
+ DCHECK(!Heap::InYoungGeneration(HeapObject::cast(forward)));
+ }
+#endif
+ }
+}
+
+void StringForwardingTable::Block::UpdateAfterFullEvacuation(
+ PtrComprCageBase cage_base, int up_to_index) {
+ for (int index = 0; index < up_to_index; ++index) {
+ OffHeapObjectSlot original_slot = record(index)->OriginalStringSlot();
+ Object original = original_slot.Acquire_Load(cage_base);
+ if (!original.IsHeapObject()) continue;
+ UpdateForwardedSlot(HeapObject::cast(original), original_slot);
+ // During mark compact the forwarded (internalized) string may have been
+ // evacuated.
+ OffHeapObjectSlot forward_slot = record(index)->ForwardStringOrHashSlot();
+ Object forward = forward_slot.Acquire_Load(cage_base);
+ UpdateForwardedSlot(forward, forward_slot);
}
}
@@ -277,7 +320,16 @@ StringForwardingTable::GetExternalResource(int index, bool* is_one_byte) const {
}
void StringForwardingTable::TearDown() {
- IterateElements([](Record* record) { record->DisposeExternalResource(); });
+ std::unordered_set<Address> disposed_resources;
+ IterateElements([this, &disposed_resources](Record* record) {
+ if (record->OriginalStringObject(isolate_) != deleted_element()) {
+ Address resource = record->ExternalResourceAddress();
+ if (resource != kNullAddress && disposed_resources.count(resource) == 0) {
+ record->DisposeExternalResource();
+ disposed_resources.insert(resource);
+ }
+ }
+ });
Reset();
}
@@ -295,7 +347,9 @@ void StringForwardingTable::Reset() {
next_free_index_ = 0;
}
-void StringForwardingTable::UpdateAfterEvacuation() {
+void StringForwardingTable::UpdateAfterYoungEvacuation() {
+ // This is only used for the Scavenger.
+ DCHECK(!v8_flags.minor_mc);
DCHECK(v8_flags.always_use_string_forwarding_table);
if (empty()) return;
@@ -306,12 +360,29 @@ void StringForwardingTable::UpdateAfterEvacuation() {
for (unsigned int block_index = 0; block_index < last_block_index;
++block_index) {
Block* block = blocks->LoadBlock(block_index, kAcquireLoad);
- block->UpdateAfterEvacuation(isolate_);
+ block->UpdateAfterYoungEvacuation(isolate_);
+ }
+ // Handle last block separately, as it is not filled to capacity.
+ const int max_index = IndexInBlock(size() - 1, last_block_index) + 1;
+ blocks->LoadBlock(last_block_index, kAcquireLoad)
+ ->UpdateAfterYoungEvacuation(isolate_, max_index);
+}
+
+void StringForwardingTable::UpdateAfterFullEvacuation() {
+ if (empty()) return;
+
+ BlockVector* blocks = blocks_.load(std::memory_order_relaxed);
+ const unsigned int last_block_index =
+ static_cast<unsigned int>(blocks->size() - 1);
+ for (unsigned int block_index = 0; block_index < last_block_index;
+ ++block_index) {
+ Block* block = blocks->LoadBlock(block_index, kAcquireLoad);
+ block->UpdateAfterFullEvacuation(isolate_);
}
// Handle last block separately, as it is not filled to capacity.
const int max_index = IndexInBlock(size() - 1, last_block_index) + 1;
blocks->LoadBlock(last_block_index, kAcquireLoad)
- ->UpdateAfterEvacuation(isolate_, max_index);
+ ->UpdateAfterFullEvacuation(isolate_, max_index);
}
} // namespace internal
diff --git a/deps/v8/src/objects/string-forwarding-table.h b/deps/v8/src/objects/string-forwarding-table.h
index 3cf7d3280b..a2c2093df7 100644
--- a/deps/v8/src/objects/string-forwarding-table.h
+++ b/deps/v8/src/objects/string-forwarding-table.h
@@ -65,7 +65,8 @@ class StringForwardingTable {
// Dispose all external resources stored in the table.
void TearDown();
void Reset();
- void UpdateAfterEvacuation();
+ void UpdateAfterYoungEvacuation();
+ void UpdateAfterFullEvacuation();
class Record;
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index c7ddd9f696..41bba35d8d 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -18,6 +18,7 @@
#include "src/sandbox/external-pointer-inl.h"
#include "src/sandbox/external-pointer.h"
#include "src/strings/string-hasher-inl.h"
+#include "src/strings/unicode-inl.h"
#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
@@ -261,8 +262,7 @@ inline TResult StringShape::DispatchToSpecificTypeWithoutCast(TArgs&&... args) {
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
return TDispatcher::HandleSlicedString(std::forward<TArgs>(args)...);
- case kThinStringTag | kOneByteStringTag:
- case kThinStringTag | kTwoByteStringTag:
+ case kThinStringTag:
return TDispatcher::HandleThinString(std::forward<TArgs>(args)...);
default:
return TDispatcher::HandleInvalidString(std::forward<TArgs>(args)...);
@@ -302,12 +302,14 @@ inline TResult StringShape::DispatchToSpecificType(String str,
}
DEF_GETTER(String, IsOneByteRepresentation, bool) {
- uint32_t type = map(cage_base).instance_type();
+ String string = IsThinString() ? ThinString::cast(*this).actual() : *this;
+ uint32_t type = string.map(cage_base).instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
DEF_GETTER(String, IsTwoByteRepresentation, bool) {
- uint32_t type = map(cage_base).instance_type();
+ String string = IsThinString() ? ThinString::cast(*this).actual() : *this;
+ uint32_t type = string.map(cage_base).instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
@@ -448,14 +450,15 @@ class SeqSubStringKey final : public StringTableKey {
CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
length());
internalized_string_ = result;
+ } else {
+ Handle<SeqTwoByteString> result =
+ isolate->factory()->AllocateRawTwoByteInternalizedString(
+ length(), raw_hash_field());
+ DisallowGarbageCollection no_gc;
+ CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
+ length());
+ internalized_string_ = result;
}
- Handle<SeqTwoByteString> result =
- isolate->factory()->AllocateRawTwoByteInternalizedString(
- length(), raw_hash_field());
- DisallowGarbageCollection no_gc;
- CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
- length());
- internalized_string_ = result;
}
Handle<String> GetHandleForInsertion() {
@@ -569,13 +572,14 @@ bool String::IsEqualToImpl(
case kConsStringTag | kTwoByteStringTag: {
// The ConsString path is more complex and rare, so call out to an
// out-of-line handler.
- return IsConsStringEqualToImpl<Char>(ConsString::cast(string),
- slice_offset, str, cage_base,
- access_guard);
+ // Slices cannot refer to ConsStrings, so there cannot be a non-zero
+ // slice offset here.
+ DCHECK_EQ(slice_offset, 0);
+ return IsConsStringEqualToImpl<Char>(ConsString::cast(string), str,
+ cage_base, access_guard);
}
- case kThinStringTag | kOneByteStringTag:
- case kThinStringTag | kTwoByteStringTag:
+ case kThinStringTag:
string = ThinString::cast(string).actual(cage_base);
continue;
@@ -588,17 +592,20 @@ bool String::IsEqualToImpl(
// static
template <typename Char>
bool String::IsConsStringEqualToImpl(
- ConsString string, int slice_offset, base::Vector<const Char> str,
- PtrComprCageBase cage_base,
+ ConsString string, base::Vector<const Char> str, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
// Already checked the len in IsEqualToImpl. Check GE rather than EQ in case
// this is a prefix check.
DCHECK_GE(string.length(), str.size());
- ConsStringIterator iter(ConsString::cast(string), slice_offset);
+ ConsStringIterator iter(ConsString::cast(string));
base::Vector<const Char> remaining_str = str;
- for (String segment = iter.Next(&slice_offset); !segment.is_null();
- segment = iter.Next(&slice_offset)) {
+ int offset;
+ for (String segment = iter.Next(&offset); !segment.is_null();
+ segment = iter.Next(&offset)) {
+ // We create the iterator without an offset, so we should never have a
+ // per-segment offset.
+ DCHECK_EQ(offset, 0);
// Compare the individual segment against the appropriate subvector of the
// remaining string.
size_t len = std::min<size_t>(segment.length(), remaining_str.size());
@@ -620,18 +627,20 @@ bool String::IsOneByteEqualTo(base::Vector<const char> str) {
}
template <typename Char>
-const Char* String::GetChars(PtrComprCageBase cage_base,
- const DisallowGarbageCollection& no_gc) const {
+const Char* String::GetDirectStringChars(
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ DCHECK(StringShape(*this).IsDirect());
return StringShape(*this, cage_base).IsExternal()
? CharTraits<Char>::ExternalString::cast(*this).GetChars(cage_base)
: CharTraits<Char>::String::cast(*this).GetChars(no_gc);
}
template <typename Char>
-const Char* String::GetChars(
+const Char* String::GetDirectStringChars(
PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) const {
+ DCHECK(StringShape(*this).IsDirect());
return StringShape(*this, cage_base).IsExternal()
? CharTraits<Char>::ExternalString::cast(*this).GetChars(cage_base)
: CharTraits<Char>::String::cast(*this).GetChars(no_gc,
@@ -945,8 +954,7 @@ ConsString String::VisitFlat(
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(string);
- case kThinStringTag | kOneByteStringTag:
- case kThinStringTag | kTwoByteStringTag:
+ case kThinStringTag:
string = ThinString::cast(string).actual(cage_base);
continue;
@@ -956,6 +964,31 @@ ConsString String::VisitFlat(
}
}
+bool String::IsWellFormedUnicode(Isolate* isolate, Handle<String> string) {
+ // One-byte strings are definitionally well formed and cannot have unpaired
+ // surrogates.
+ //
+ // Note that an indirect string's 1-byte flag can differ from their underlying
+ // string's 1-byte flag, because the underlying string may have been
+ // externalized from 1-byte to 2-byte. That is, the 1-byte flag is the
+ // 1-byteness at time of creation. However, this is sufficient to determine
+ // well-formedness. String::MakeExternal requires that the external resource's
+ // content is equal to the original string's content, even if 1-byteness
+ // differs.
+ if (string->IsOneByteRepresentation()) return true;
+
+ // TODO(v8:13557): The two-byte case can be optimized by extending the
+ // InstanceType. See
+ // https://docs.google.com/document/d/15f-1c_Ysw3lvjy_Gx0SmmD9qeO8UuXuAbWIpWCnTDO8/
+ string = Flatten(isolate, string);
+ if (String::IsOneByteRepresentationUnderneath(*string)) return true;
+ DisallowGarbageCollection no_gc;
+ String::FlatContent flat = string->GetFlatContent(no_gc);
+ DCHECK(flat.IsFlat());
+ const uint16_t* data = flat.ToUC16Vector().begin();
+ return !unibrow::Utf16::HasUnpairedSurrogate(data, string->length());
+}
+
template <>
inline base::Vector<const uint8_t> String::GetCharVector(
const DisallowGarbageCollection& no_gc) {
@@ -1476,6 +1509,22 @@ SubStringRange::iterator SubStringRange::end() {
return SubStringRange::iterator(string_, first_ + length_, no_gc_);
}
+void SeqOneByteString::clear_padding_destructively(int length) {
+ // Ensure we are not killing the map word, which is already set at this point
+ static_assert(SizeFor(0) >= kObjectAlignment + kTaggedSize);
+ memset(
+ reinterpret_cast<void*>(address() + SizeFor(length) - kObjectAlignment),
+ 0, kObjectAlignment);
+}
+
+void SeqTwoByteString::clear_padding_destructively(int length) {
+ // Ensure we are not killing the map word, which is already set at this point
+ static_assert(SizeFor(0) >= kObjectAlignment + kTaggedSize);
+ memset(
+ reinterpret_cast<void*>(address() + SizeFor(length) - kObjectAlignment),
+ 0, kObjectAlignment);
+}
+
// static
bool String::IsInPlaceInternalizable(String string) {
return IsInPlaceInternalizable(string.map().instance_type());
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index f87e5eb8e6..bf0f9fceb3 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -375,6 +375,7 @@ class InternalizedStringKey final : public StringTableKey {
// We can see already internalized strings here only when sharing the
// string table and allowing concurrent internalization.
DCHECK(v8_flags.shared_string_table);
+ internalized_string_ = string_;
return;
}
@@ -396,7 +397,7 @@ class InternalizedStringKey final : public StringTableKey {
// original string is not transitioned to a ThinString (setting the
// resource) immediately.
DCHECK(!shape.IsShared());
- string_ =
+ internalized_string_ =
isolate->factory()->InternalizeExternalString<ExternalOneByteString>(
string_);
} else if (can_avoid_copy && shape.IsExternalTwoByte()) {
@@ -406,13 +407,13 @@ class InternalizedStringKey final : public StringTableKey {
// original string is not transitioned to a ThinString (setting the
// resource) immediately.
DCHECK(!shape.IsShared());
- string_ =
+ internalized_string_ =
isolate->factory()->InternalizeExternalString<ExternalTwoByteString>(
string_);
} else {
// Otherwise allocate a new internalized string.
- string_ = isolate->factory()->NewInternalizedStringImpl(string_, length(),
- raw_hash_field());
+ internalized_string_ = isolate->factory()->NewInternalizedStringImpl(
+ string_, length(), raw_hash_field());
}
}
@@ -435,11 +436,17 @@ class InternalizedStringKey final : public StringTableKey {
// in-place migrate the original string instead of internalizing the copy
// and migrating the original string to a ThinString. This scenario doesn't
// seem to be common enough to justify re-computing the strategy here.
- return string_;
+ return internalized_string_.ToHandleChecked();
}
private:
Handle<String> string_;
+ // Copy of the string to be internalized (only set if the string is not
+ // in-place internalizable). We can't override the original string, as
+ // internalized external strings don't set the resource directly (deferred to
+ // MakeThin to ensure unique ownership of the resource), and thus would break
+ // equality checks in case of hash collisions.
+ MaybeHandle<String> internalized_string_;
MaybeHandle<Map> maybe_internalized_map_;
};
@@ -733,7 +740,8 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
String::WriteToFlat(source, buffer.get(), 0, length, isolate, access_guard);
chars = buffer.get();
} else {
- chars = source.GetChars<Char>(isolate, no_gc, access_guard) + start;
+ chars =
+ source.GetDirectStringChars<Char>(isolate, no_gc, access_guard) + start;
}
if (!Name::IsHashFieldComputed(raw_hash_field) || !is_source_hash_usable) {
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index cf7b0afd92..e2f18a6e84 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -172,17 +172,6 @@ void MigrateExternalString(Isolate* isolate, String string,
}
}
-template <typename IsolateT>
-Map ComputeThinStringMap(IsolateT* isolate, StringShape from_string_shape,
- bool one_byte) {
- ReadOnlyRoots roots(isolate);
- if (from_string_shape.IsShared()) {
- return one_byte ? roots.shared_thin_one_byte_string_map()
- : roots.shared_thin_string_map();
- }
- return one_byte ? roots.thin_one_byte_string_map() : roots.thin_string_map();
-}
-
void InitExternalPointerFieldsDuringExternalization(String string, Map new_map,
Isolate* isolate) {
string.InitExternalPointerField<kExternalStringResourceTag>(
@@ -222,9 +211,23 @@ void String::MakeThin(
bool may_contain_recorded_slots = initial_shape.IsIndirect();
int old_size = SizeFromMap(initial_map);
- Map target_map = ComputeThinStringMap(isolate, initial_shape,
- internalized.IsOneByteRepresentation());
+ Map target_map = ReadOnlyRoots(isolate).thin_string_map();
+ const bool in_shared_heap = InWritableSharedSpace();
+ if (in_shared_heap) {
+ // Objects in the shared heap are always direct, therefore they can't have
+ // any invalidated slots.
+ update_invalidated_object_size = UpdateInvalidatedObjectSize::kNo;
+ }
if (initial_shape.IsExternal()) {
+ // Conservatively assume ExternalStrings may have recorded slots if they
+ // don't reside in shared heap, because they could have been transitioned
+ // from ConsStrings without having had the recorded slots cleared.
+ // In the shared heap no such transitions are possible, as it can't contain
+ // indirect strings.
+ // Indirect strings also don't get large enough to be in LO space.
+ // TODO(v8:13374): Fix this more uniformly.
+ may_contain_recorded_slots = !in_shared_heap && !Heap::IsLargeObject(*this);
+
// Notify GC about the layout change before the transition to avoid
// concurrent marking from observing any in-between state (e.g.
// ExternalString map where the resource external pointer is overwritten
@@ -232,14 +235,11 @@ void String::MakeThin(
// ExternalString -> ThinString transitions can only happen on the
// main-thread.
isolate->AsIsolate()->heap()->NotifyObjectLayoutChange(
- *this, no_gc, InvalidateRecordedSlots::kYes, ThinString::kSize);
+ *this, no_gc,
+ may_contain_recorded_slots ? InvalidateRecordedSlots::kYes
+ : InvalidateRecordedSlots::kNo,
+ ThinString::kSize);
MigrateExternalString(isolate->AsIsolate(), *this, internalized);
-
- // Conservatively assume ExternalStrings may have recorded slots, because
- // they could have been transitioned from ConsStrings without having had the
- // recorded slots cleared.
- // TODO(v8:13374): Fix this more uniformly.
- may_contain_recorded_slots = true;
}
// Update actual first and then do release store on the map word. This ensures
@@ -377,12 +377,14 @@ void String::MakeExternalDuringGC(Isolate* isolate, T* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- // Shared strings are never indirect or large.
- DCHECK(!isolate->heap()->IsLargeObject(*this));
+ // Shared strings are never indirect.
DCHECK(!StringShape(*this).IsIndirect());
- isolate->heap()->NotifyObjectSizeChange(*this, size, new_size,
- ClearRecordedSlots::kNo);
+ if (!isolate->heap()->IsLargeObject(*this)) {
+ isolate->heap()->NotifyObjectSizeChange(*this, size, new_size,
+ ClearRecordedSlots::kNo,
+ UpdateInvalidatedObjectSize::kNo);
+ }
// The external pointer slots must be initialized before the new map is
// installed. Otherwise, a GC marking thread may see the new map before the
@@ -462,9 +464,15 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
if (!isolate->heap()->IsLargeObject(*this)) {
+ // Strings in the shared heap are never indirect and thus cannot have any
+ // invalidated slots.
+ const auto update_invalidated_object_size =
+ InWritableSharedSpace() ? UpdateInvalidatedObjectSize::kNo
+ : UpdateInvalidatedObjectSize::kYes;
isolate->heap()->NotifyObjectSizeChange(
*this, size, new_size,
- has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
+ has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo,
+ update_invalidated_object_size);
} else {
// We don't need special handling for the combination IsLargeObject &&
// has_pointers, because indirect strings never get that large.
@@ -542,13 +550,19 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
int new_size = this->SizeFromMap(new_map);
if (has_pointers) {
+ DCHECK(!InWritableSharedSpace());
isolate->heap()->NotifyObjectLayoutChange(
*this, no_gc, InvalidateRecordedSlots::kYes, new_size);
}
-
+ // Strings in the shared heap are never indirect and thus cannot have any
+ // invalidated slots.
+ const auto update_invalidated_object_size =
+ InWritableSharedSpace() ? UpdateInvalidatedObjectSize::kNo
+ : UpdateInvalidatedObjectSize::kYes;
isolate->heap()->NotifyObjectSizeChange(
*this, size, new_size,
- has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
+ has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo,
+ update_invalidated_object_size);
} else {
// We don't need special handling for the combination IsLargeObject &&
// has_pointers, because indirect strings never get that large.
@@ -596,8 +610,39 @@ bool String::SupportsExternalization() {
DCHECK_LE(ExternalString::kUncachedSize, this->Size());
#endif
- Isolate* isolate = GetIsolateFromWritableObject(*this);
- return !isolate->heap()->IsInGCPostProcessing();
+ return true;
+}
+
+bool String::SupportsExternalization(v8::String::Encoding encoding) {
+ if (this->IsThinString()) {
+ return i::ThinString::cast(*this).actual().SupportsExternalization(
+ encoding);
+ }
+
+ // RO_SPACE strings cannot be externalized.
+ if (IsReadOnlyHeapObject(*this)) {
+ return false;
+ }
+
+#ifdef V8_COMPRESS_POINTERS
+ // Small strings may not be in-place externalizable.
+ if (this->Size() < ExternalString::kUncachedSize) return false;
+#else
+ DCHECK_LE(ExternalString::kUncachedSize, this->Size());
+#endif
+
+ StringShape shape(*this);
+
+ // Already an external string.
+ if (shape.IsExternal()) {
+ return false;
+ }
+
+ // Encoding changes are not supported.
+ static_assert(kStringEncodingMask == 1 << 3);
+ static_assert(v8::String::Encoding::ONE_BYTE_ENCODING == 1 << 3);
+ static_assert(v8::String::Encoding::TWO_BYTE_ENCODING == 0);
+ return shape.encoding_tag() == static_cast<uint32_t>(encoding);
}
const char* String::PrefixForDebugPrint() const {
@@ -979,8 +1024,7 @@ void String::WriteToFlat(String source, sinkchar* sink, int start, int length,
start += offset;
continue;
}
- case kOneByteStringTag | kThinStringTag:
- case kTwoByteStringTag | kThinStringTag:
+ case kThinStringTag:
source = ThinString::cast(source).actual(cage_base);
continue;
}
@@ -1656,7 +1700,8 @@ uint32_t HashString(String string, size_t start, int length, uint64_t seed,
access_guard);
chars = buffer.get();
} else {
- chars = string.GetChars<Char>(cage_base, no_gc, access_guard) + start;
+ chars = string.GetDirectStringChars<Char>(cage_base, no_gc, access_guard) +
+ start;
}
return StringHasher::HashSequentialString<Char>(chars, length, seed);
@@ -1681,7 +1726,7 @@ uint32_t String::ComputeAndSetRawHash(
DCHECK_IMPLIES(!v8_flags.shared_string_table, !HasHashCode());
// Store the hash code in the object.
- uint64_t seed = HashSeed(GetReadOnlyRoots());
+ uint64_t seed = HashSeed(EarlyGetReadOnlyRoots());
size_t start = 0;
String string = *this;
PtrComprCageBase cage_base = GetPtrComprCageBase(string);
@@ -1764,7 +1809,8 @@ void String::PrintOn(std::ostream& ostream) {
}
}
-Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
+Handle<String> SeqString::Truncate(Isolate* isolate, Handle<SeqString> string,
+ int new_length) {
if (new_length == 0) return string->GetReadOnlyRoots().empty_string_handle();
int new_size, old_size;
@@ -1786,16 +1832,20 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
DCHECK(IsAligned(start_of_string + new_size, kObjectAlignment));
#endif
- Heap* heap = Heap::FromWritableHeapObject(*string);
+ Heap* heap = isolate->heap();
if (!heap->IsLargeObject(*string)) {
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
+ // No slot invalidation needed since this method is only used on freshly
+ // allocated strings.
heap->NotifyObjectSizeChange(*string, old_size, new_size,
- ClearRecordedSlots::kNo);
+ ClearRecordedSlots::kNo,
+ UpdateInvalidatedObjectSize::kNo);
}
// We are storing the new length using release store after creating a filler
// for the left-over space to avoid races with the sweeper thread.
string->set_length(new_length, kReleaseStore);
+ string->ClearPadding();
return string;
}
@@ -1821,6 +1871,25 @@ SeqString::DataAndPaddingSizes SeqTwoByteString::GetDataAndPaddingSizes()
return DataAndPaddingSizes{data_size, padding_size};
}
+#ifdef VERIFY_HEAP
+V8_EXPORT_PRIVATE void SeqString::SeqStringVerify(Isolate* isolate) {
+ TorqueGeneratedSeqString<SeqString, String>::SeqStringVerify(isolate);
+ DataAndPaddingSizes sz = GetDataAndPaddingSizes();
+ auto padding = reinterpret_cast<char*>(address() + sz.data_size);
+ CHECK(sz.padding_size <= kTaggedSize);
+ for (int i = 0; i < sz.padding_size; ++i) {
+ CHECK_EQ(padding[i], 0);
+ }
+}
+#endif // VERIFY_HEAP
+
+void SeqString::ClearPadding() {
+ DataAndPaddingSizes sz = GetDataAndPaddingSizes();
+ DCHECK_EQ(address() + sz.data_size + sz.padding_size, address() + Size());
+ if (sz.padding_size == 0) return;
+ memset(reinterpret_cast<void*>(address() + sz.data_size), 0, sz.padding_size);
+}
+
uint16_t ConsString::Get(
int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const {
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index ac56001bd1..aacc6c994f 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -207,12 +207,12 @@ class String : public TorqueGeneratedString<String, Name> {
// SharedStringAccessGuard is not needed (i.e. on the main thread or on
// read-only strings).
template <typename Char>
- inline const Char* GetChars(PtrComprCageBase cage_base,
- const DisallowGarbageCollection& no_gc) const;
+ inline const Char* GetDirectStringChars(
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc) const;
// Get chars from sequential or external strings.
template <typename Char>
- inline const Char* GetChars(
+ inline const Char* GetDirectStringChars(
PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) const;
@@ -422,7 +422,10 @@ class String : public TorqueGeneratedString<String, Name> {
v8::String::ExternalStringResource* resource);
V8_EXPORT_PRIVATE bool MakeExternal(
v8::String::ExternalOneByteStringResource* resource);
+ // TODO(pthier, v8:13785): Remove once v8::String::CanMakeExternal without
+ // encoding is removed.
bool SupportsExternalization();
+ bool SupportsExternalization(v8::String::Encoding);
// Conversion.
// "array index": an index allowed by the ES spec for JSArrays.
@@ -526,6 +529,10 @@ class String : public TorqueGeneratedString<String, Name> {
PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
+ // Returns true if this string has no unpaired surrogates and false otherwise.
+ static inline bool IsWellFormedUnicode(Isolate* isolate,
+ Handle<String> string);
+
static inline bool IsAscii(const char* chars, int length) {
return IsAscii(reinterpret_cast<const uint8_t*>(chars), length);
}
@@ -623,7 +630,7 @@ class String : public TorqueGeneratedString<String, Name> {
// Out-of-line IsEqualToImpl for ConsString.
template <typename Char>
V8_NOINLINE static bool IsConsStringEqualToImpl(
- ConsString string, int slice_offset, base::Vector<const Char> str,
+ ConsString string, base::Vector<const Char> str,
PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard);
@@ -701,15 +708,26 @@ class SeqString : public TorqueGeneratedSeqString<SeqString, String> {
// Truncate the string in-place if possible and return the result.
// In case of new_length == 0, the empty string is returned without
// truncating the original string.
- V8_WARN_UNUSED_RESULT static Handle<String> Truncate(Handle<SeqString> string,
+ V8_WARN_UNUSED_RESULT static Handle<String> Truncate(Isolate* isolate,
+ Handle<SeqString> string,
int new_length);
struct DataAndPaddingSizes {
const int data_size;
const int padding_size;
+ bool operator==(const DataAndPaddingSizes& other) const {
+ return data_size == other.data_size && padding_size == other.padding_size;
+ }
};
DataAndPaddingSizes GetDataAndPaddingSizes() const;
+ // Zero out only the padding bytes of this string.
+ void ClearPadding();
+
+#ifdef VERIFY_HEAP
+ V8_EXPORT_PRIVATE void SeqStringVerify(Isolate* isolate);
+#endif
+
TQ_OBJECT_CONSTRUCTORS(SeqString)
};
@@ -754,6 +772,9 @@ class SeqOneByteString
DataAndPaddingSizes GetDataAndPaddingSizes() const;
+ // Initializes padding bytes. Potentially zeros tail of the payload too!
+ inline void clear_padding_destructively(int length);
+
// Maximal memory usage for a single sequential one-byte string.
static const int kMaxCharsSize = kMaxLength;
static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
@@ -798,6 +819,9 @@ class SeqTwoByteString
DataAndPaddingSizes GetDataAndPaddingSizes() const;
+ // Initializes padding bytes. Potentially zeros tail of the payload too!
+ inline void clear_padding_destructively(int length);
+
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxCharsSize = kMaxLength * 2;
static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
@@ -912,6 +936,8 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
class ExternalString
: public TorqueGeneratedExternalString<ExternalString, String> {
public:
+ class BodyDescriptor;
+
DECL_VERIFIER(ExternalString)
// Size of uncached external strings.
@@ -978,8 +1004,6 @@ class ExternalOneByteString
inline uint8_t Get(int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
- class BodyDescriptor;
-
static_assert(kSize == kSizeOfAllExternalStrings);
TQ_OBJECT_CONSTRUCTORS(ExternalOneByteString)
@@ -1025,8 +1049,6 @@ class ExternalTwoByteString
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
- class BodyDescriptor;
-
static_assert(kSize == kSizeOfAllExternalStrings);
TQ_OBJECT_CONSTRUCTORS(ExternalTwoByteString)
@@ -1073,7 +1095,11 @@ class ConsStringIterator {
if (cons_string.is_null()) return;
Initialize(cons_string, offset);
}
- // Returns nullptr when complete.
+ // Returns nullptr when complete. The offset_out parameter will be set to the
+ // offset within the returned segment that the user should start looking at,
+ // to match the offset passed into the constructor or Reset -- this will only
+ // be non-zero immediately after construction or Reset, and only if those had
+ // a non-zero offset.
inline String Next(int* offset_out) {
*offset_out = 0;
if (depth_ == 0) return String();
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index e202199e36..3a9ae8382c 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -16,6 +16,41 @@ extern class String extends Name {
return this.StringInstanceType().is_not_internalized;
}
+ // Keep this in sync with the C++ String::IsOneByteRepresentation.
+ macro IsOneByteRepresentation(): bool {
+ let s: String;
+ try {
+ const thin = Cast<ThinString>(this) otherwise NotThin;
+ s = thin.actual;
+ } label NotThin {
+ s = this;
+ }
+ return s.StringInstanceType().is_one_byte;
+ }
+
+ // Keep this in sync with the C++ String::IsOneByteRepresentationUnderneath.
+ macro IsOneByteRepresentationUnderneath(): bool {
+ let string = this;
+ while (true) {
+ typeswitch (string) {
+ case (cons: ConsString): {
+ dcheck(cons.IsFlat());
+ string = cons.first;
+ }
+ case (thin: ThinString): {
+ string = thin.actual;
+ }
+ case (slice: SlicedString): {
+ string = slice.parent;
+ }
+ case (String): {
+ return string.StringInstanceType().is_one_byte;
+ }
+ }
+ }
+ VerifiedUnreachable();
+ }
+
const length: int32;
}
@@ -134,7 +169,7 @@ type DirectString extends String;
macro AllocateNonEmptySeqOneByteString<Iterator: type>(
length: uint32, content: Iterator): SeqOneByteString {
dcheck(length != 0 && length <= kStringMaxLength);
- return new SeqOneByteString{
+ return new (ClearPadding) SeqOneByteString{
map: kOneByteStringMap,
raw_hash_field: kNameEmptyHashField,
length: Signed(length),
@@ -145,7 +180,7 @@ macro AllocateNonEmptySeqOneByteString<Iterator: type>(
macro AllocateNonEmptySeqTwoByteString<Iterator: type>(
length: uint32, content: Iterator): SeqTwoByteString {
dcheck(length > 0 && length <= kStringMaxLength);
- return new SeqTwoByteString{
+ return new (ClearPadding) SeqTwoByteString{
map: kStringMap,
raw_hash_field: kNameEmptyHashField,
length: Signed(length),
@@ -387,7 +422,6 @@ macro AbstractStringIndexOf(implicit context: Context)(
string, searchString, AbstractStringIndexOfFunctor{fromIndex: fromIndex});
}
-builtin StringIndexOf(implicit context: Context)(
- s: String, searchString: String, start: Smi): Smi {
+builtin StringIndexOf(s: String, searchString: String, start: Smi): Smi {
return AbstractStringIndexOf(s, searchString, SmiMax(start, 0));
}
diff --git a/deps/v8/src/objects/swiss-name-dictionary-inl.h b/deps/v8/src/objects/swiss-name-dictionary-inl.h
index 8ef6b68073..b0f2c2e6b5 100644
--- a/deps/v8/src/objects/swiss-name-dictionary-inl.h
+++ b/deps/v8/src/objects/swiss-name-dictionary-inl.h
@@ -581,7 +581,7 @@ void SwissNameDictionary::Initialize(IsolateT* isolate, ByteArray meta_table,
SwissNameDictionary::IndexIterator::IndexIterator(
Handle<SwissNameDictionary> dict, int start)
: enum_index_{start}, dict_{dict} {
- if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict.is_null()) {
+ if (dict.is_null()) {
used_capacity_ = 0;
} else {
used_capacity_ = dict->UsedCapacity();
@@ -626,7 +626,7 @@ SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::begin() {
}
SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
- if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict_.is_null()) {
+ if (dict_.is_null()) {
return IndexIterator(dict_, 0);
} else {
DCHECK(!dict_.is_null());
@@ -636,14 +636,15 @@ SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
SwissNameDictionary::IndexIterable
SwissNameDictionary::IterateEntriesOrdered() {
- // If we are supposed to iterate the empty dictionary (which is non-writable)
- // and pointer compression with a per-Isolate cage is disabled, we have no
- // simple way to get the isolate, which we would need to create a handle.
+ // If we are supposed to iterate the empty dictionary (which is non-writable),
+ // we have no simple way to get the isolate, which we would need to create a
+ // handle.
// TODO(emrich): Consider always using roots.empty_swiss_dictionary_handle()
// in the condition once this function gets Isolate as a parameter in order to
// avoid empty dict checks.
- if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && Capacity() == 0)
+ if (Capacity() == 0) {
return IndexIterable(Handle<SwissNameDictionary>::null());
+ }
Isolate* isolate;
GetIsolateFromHeapObject(*this, &isolate);
diff --git a/deps/v8/src/objects/swiss-name-dictionary.h b/deps/v8/src/objects/swiss-name-dictionary.h
index 42613e619c..d2588942a0 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.h
+++ b/deps/v8/src/objects/swiss-name-dictionary.h
@@ -117,6 +117,10 @@ class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
int NumberOfEnumerableProperties();
+ // TODO(pthier): Add flags (similar to NamedDictionary) also for swiss dicts.
+ inline bool may_have_interesting_symbols() { UNREACHABLE(); }
+ inline void set_may_have_interesting_symbols(bool value) { UNREACHABLE(); }
+
static Handle<SwissNameDictionary> ShallowCopy(
Isolate* isolate, Handle<SwissNameDictionary> table);
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index b1196465bd..b5951d10f6 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -77,8 +77,7 @@ bool SyntheticModule::PrepareInstantiate(Isolate* isolate,
for (int i = 0, n = export_names->length(); i < n; ++i) {
// Spec step 7.1: Create a new mutable binding for export_name.
// Spec step 7.2: Initialize the new mutable binding to undefined.
- Handle<Cell> cell =
- isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ Handle<Cell> cell = isolate->factory()->NewCell();
Handle<String> name(String::cast(export_names->get(i)), isolate);
CHECK(exports->Lookup(name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, name, cell);
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
index 1c8e9a8f0c..7f2af6d89a 100644
--- a/deps/v8/src/objects/tagged-field-inl.h
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -5,9 +5,8 @@
#ifndef V8_OBJECTS_TAGGED_FIELD_INL_H_
#define V8_OBJECTS_TAGGED_FIELD_INL_H_
-#include "src/objects/tagged-field.h"
-
#include "src/common/ptr-compr-inl.h"
+#include "src/objects/tagged-field.h"
namespace v8 {
namespace internal {
@@ -35,10 +34,9 @@ Address TaggedField<T, kFieldOffset, CompressionScheme>::tagged_to_full(
if (kIsSmi) {
return CompressionScheme::DecompressTaggedSigned(tagged_value);
} else if (kIsHeapObject) {
- return CompressionScheme::DecompressTaggedPointer(on_heap_addr,
- tagged_value);
+ return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value);
} else {
- return CompressionScheme::DecompressTaggedAny(on_heap_addr, tagged_value);
+ return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value);
}
#else
return tagged_value;
@@ -50,7 +48,10 @@ template <typename T, int kFieldOffset, typename CompressionScheme>
Tagged_t TaggedField<T, kFieldOffset, CompressionScheme>::full_to_tagged(
Address value) {
#ifdef V8_COMPRESS_POINTERS
- return CompressionScheme::CompressTagged(value);
+ if (std::is_base_of<MaybeObject, T>::value) {
+ return CompressionScheme::CompressAny(value);
+ }
+ return CompressionScheme::CompressObject(value);
#else
return value;
#endif
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index a585a66a7a..d42ba5c70c 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
diff --git a/deps/v8/src/objects/tagged-impl-inl.h b/deps/v8/src/objects/tagged-impl-inl.h
index 4ce915730d..9a44749949 100644
--- a/deps/v8/src/objects/tagged-impl-inl.h
+++ b/deps/v8/src/objects/tagged-impl-inl.h
@@ -112,9 +112,8 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
if (kIsFull) return GetHeapObjectIfStrong(result);
// Implementation for compressed pointers.
if (IsStrong()) {
- *result =
- HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
- isolate, static_cast<Tagged_t>(ptr_))));
+ *result = HeapObject::cast(Object(CompressionScheme::DecompressTagged(
+ isolate, static_cast<Tagged_t>(ptr_))));
return true;
}
return false;
@@ -138,7 +137,7 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
if (kIsFull) return GetHeapObjectAssumeStrong();
// Implementation for compressed pointers.
DCHECK(IsStrong());
- return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
+ return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
isolate, static_cast<Tagged_t>(ptr_))));
}
@@ -224,11 +223,11 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(
DCHECK(!IsSmi());
if (kCanBeWeak) {
DCHECK(!IsCleared());
- return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
+ return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
isolate, static_cast<Tagged_t>(ptr_) & ~kWeakHeapObjectMask)));
} else {
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
- return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer(
+ return HeapObject::cast(Object(CompressionScheme::DecompressTagged(
isolate, static_cast<Tagged_t>(ptr_))));
}
}
diff --git a/deps/v8/src/objects/tagged-impl.cc b/deps/v8/src/objects/tagged-impl.cc
index bc48297aca..c4ccf34e81 100644
--- a/deps/v8/src/objects/tagged-impl.cc
+++ b/deps/v8/src/objects/tagged-impl.cc
@@ -27,10 +27,11 @@ bool CheckObjectComparisonAllowed(Address a, Address b) {
}
HeapObject obj_a = HeapObject::unchecked_cast(Object(a));
HeapObject obj_b = HeapObject::unchecked_cast(Object(b));
- // This check might fail when we try to compare Code object with non-Code
- // object. The main legitimate case when such "mixed" comparison could happen
- // is comparing two AbstractCode objects. If that's the case one must use
- // AbstractCode's == operator instead of Object's one or SafeEquals().
+ // This check might fail when we try to compare InstructionStream object with
+ // non-InstructionStream object. The main legitimate case when such "mixed"
+ // comparison could happen is comparing two AbstractCode objects. If that's
+ // the case one must use AbstractCode's == operator instead of Object's one or
+ // SafeEquals().
CHECK_EQ(IsCodeSpaceObject(obj_a), IsCodeSpaceObject(obj_b));
return true;
}
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
index f4445b4ecf..2505c153ba 100644
--- a/deps/v8/src/objects/tagged-impl.h
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -13,9 +13,9 @@ namespace v8 {
namespace internal {
#ifdef V8_EXTERNAL_CODE_SPACE
-// When V8_EXTERNAL_CODE_SPACE is enabled comparing Code and non-Code objects
-// by looking only at compressed values it not correct.
-// Full pointers must be compared instead.
+// When V8_EXTERNAL_CODE_SPACE is enabled comparing InstructionStream and
+// non-InstructionStream objects by looking only at compressed values it not
+// correct. Full pointers must be compared instead.
bool V8_EXPORT_PRIVATE CheckObjectComparisonAllowed(Address a, Address b);
#endif
@@ -30,8 +30,9 @@ bool V8_EXPORT_PRIVATE CheckObjectComparisonAllowed(Address a, Address b);
template <HeapObjectReferenceType kRefType, typename StorageType>
class TaggedImpl {
public:
- // Compressed TaggedImpl are never used for external Code pointers, so
- // we can use this shorter alias for calling decompression functions.
+ // Compressed TaggedImpl are never used for external InstructionStream
+ // pointers, so we can use this shorter alias for calling decompression
+ // functions.
using CompressionScheme = V8HeapCompressionScheme;
static_assert(std::is_same<StorageType, Address>::value ||
@@ -87,8 +88,9 @@ class TaggedImpl {
return static_cast<Tagged_t>(ptr_) != static_cast<Tagged_t>(other.ptr());
}
- // A variant of operator== which allows comparing Code object with non-Code
- // objects even if the V8_EXTERNAL_CODE_SPACE is enabled.
+ // A variant of operator== which allows comparing InstructionStream object
+ // with non-InstructionStream objects even if the V8_EXTERNAL_CODE_SPACE is
+ // enabled.
constexpr bool SafeEquals(TaggedImpl other) const {
static_assert(std::is_same<StorageType, Address>::value,
"Safe comparison is allowed only for full tagged values");
diff --git a/deps/v8/src/objects/tagged-value-inl.h b/deps/v8/src/objects/tagged-value-inl.h
index 4ca8739367..7cdebdf700 100644
--- a/deps/v8/src/objects/tagged-value-inl.h
+++ b/deps/v8/src/objects/tagged-value-inl.h
@@ -21,7 +21,7 @@ namespace internal {
inline StrongTaggedValue::StrongTaggedValue(Object o)
:
#ifdef V8_COMPRESS_POINTERS
- TaggedImpl(CompressionScheme::CompressTagged(o.ptr()))
+ TaggedImpl(CompressionScheme::CompressObject(o.ptr()))
#else
TaggedImpl(o.ptr())
#endif
@@ -30,7 +30,7 @@ inline StrongTaggedValue::StrongTaggedValue(Object o)
Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
#ifdef V8_COMPRESS_POINTERS
- return Object(CompressionScheme::DecompressTaggedAny(isolate, object.ptr()));
+ return Object(CompressionScheme::DecompressTagged(isolate, object.ptr()));
#else
return Object(object.ptr());
#endif
@@ -39,7 +39,7 @@ Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
inline TaggedValue::TaggedValue(MaybeObject o)
:
#ifdef V8_COMPRESS_POINTERS
- TaggedImpl(CompressionScheme::CompressTagged(o.ptr()))
+ TaggedImpl(CompressionScheme::CompressAny(o.ptr()))
#else
TaggedImpl(o.ptr())
#endif
@@ -49,7 +49,7 @@ inline TaggedValue::TaggedValue(MaybeObject o)
MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) {
#ifdef V8_COMPRESS_POINTERS
return MaybeObject(
- CompressionScheme::DecompressTaggedAny(isolate, object.ptr()));
+ CompressionScheme::DecompressTagged(isolate, object.ptr()));
#else
return MaybeObject(object.ptr());
#endif
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 12190cf9ad..22a7a741da 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -371,13 +371,14 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
new_capacity = std::min({kMaxCachedPrototypeTransitions, new_capacity});
DCHECK_GT(new_capacity, capacity);
int grow_by = new_capacity - capacity;
- array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by);
+ Handle<WeakFixedArray> new_array =
+ isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by);
if (capacity < 0) {
// There was no prototype transitions array before, so the size
// couldn't be copied. Initialize it explicitly.
- SetNumberOfPrototypeTransitions(*array, 0);
+ SetNumberOfPrototypeTransitions(*new_array, 0);
}
- return array;
+ return new_array;
}
// static
@@ -397,15 +398,33 @@ void TransitionsAccessor::PutPrototypeTransition(Isolate* isolate,
int capacity = cache->length() - header;
int transitions = TransitionArray::NumberOfPrototypeTransitions(*cache) + 1;
- base::SharedMutexGuard<base::kExclusive> scope(
- isolate->full_transition_array_access());
+ // We're not using a MutexGuard for {full_transition_array_access}, because
+ // we'll need to release it before growing the transition array (if needed),
+ // in order to avoid deadlock if a background thread is waiting for the shared
+ // mutex outside of a safepoint. And after growing the array, we'll need to
+ // re-lock it.
+ base::SharedMutex* transition_array_mutex =
+ isolate->full_transition_array_access();
+ transition_array_mutex->LockExclusive();
if (transitions > capacity) {
// Grow the array if compacting it doesn't free space.
if (!TransitionArray::CompactPrototypeTransitionArray(isolate, *cache)) {
+ transition_array_mutex->UnlockExclusive();
if (capacity == TransitionArray::kMaxCachedPrototypeTransitions) return;
+
+ // GrowPrototypeTransitionArray can allocate, so it shouldn't hold the
+ // exclusive lock on {full_transition_array_access} mutex, since
+ // background threads could be waiting for the shared lock (outside of a
+ // safe point). This is not an issue, because GrowPrototypeTransitionArray
+ // doesn't actually modify in place the array, but instead return a new
+ // array.
+ transition_array_mutex->LockShared();
cache = TransitionArray::GrowPrototypeTransitionArray(
cache, 2 * transitions, isolate);
+ transition_array_mutex->UnlockShared();
+
+ transition_array_mutex->LockExclusive();
SetPrototypeTransitions(isolate, map, cache);
}
}
@@ -416,6 +435,8 @@ void TransitionsAccessor::PutPrototypeTransition(Isolate* isolate,
cache->Set(entry, HeapObjectReference::Weak(*target_map));
TransitionArray::SetNumberOfPrototypeTransitions(*cache, last + 1);
+
+ transition_array_mutex->UnlockExclusive();
}
// static
diff --git a/deps/v8/src/objects/turboshaft-types-inl.h b/deps/v8/src/objects/turboshaft-types-inl.h
new file mode 100644
index 0000000000..3dc379a2e6
--- /dev/null
+++ b/deps/v8/src/objects/turboshaft-types-inl.h
@@ -0,0 +1,33 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TURBOSHAFT_TYPES_INL_H_
+#define V8_OBJECTS_TURBOSHAFT_TYPES_INL_H_
+
+#include "src/heap/heap-write-barrier.h"
+#include "src/objects/turboshaft-types.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8::internal {
+
+#include "torque-generated/src/objects/turboshaft-types-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftType)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord32Type)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord32RangeType)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord32SetType)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord64Type)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord64RangeType)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord64SetType)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftFloat64Type)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftFloat64RangeType)
+TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftFloat64SetType)
+
+} // namespace v8::internal
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TURBOSHAFT_TYPES_INL_H_
diff --git a/deps/v8/src/objects/turboshaft-types.h b/deps/v8/src/objects/turboshaft-types.h
new file mode 100644
index 0000000000..f4b7deb34a
--- /dev/null
+++ b/deps/v8/src/objects/turboshaft-types.h
@@ -0,0 +1,115 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TURBOSHAFT_TYPES_H_
+#define V8_OBJECTS_TURBOSHAFT_TYPES_H_
+
+#include "src/common/globals.h"
+#include "src/objects/heap-object.h"
+#include "torque-generated/bit-fields.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8::internal {
+
+#include "torque-generated/src/objects/turboshaft-types-tq.inc"
+
+class TurboshaftFloatSpecialValues {
+ public:
+ DEFINE_TORQUE_GENERATED_TURBOSHAFT_FLOAT_SPECIAL_VALUES()
+};
+
+class TurboshaftType
+ : public TorqueGeneratedTurboshaftType<TurboshaftType, HeapObject> {
+ public:
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftType)
+};
+
+class TurboshaftWord32Type
+ : public TorqueGeneratedTurboshaftWord32Type<TurboshaftWord32Type,
+ TurboshaftType> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftWord32Type)
+};
+
+class TurboshaftWord32RangeType
+ : public TorqueGeneratedTurboshaftWord32RangeType<TurboshaftWord32RangeType,
+ TurboshaftWord32Type> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftWord32RangeType)
+};
+
+class TurboshaftWord32SetType
+ : public TorqueGeneratedTurboshaftWord32SetType<TurboshaftWord32SetType,
+ TurboshaftWord32Type> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftWord32SetType)
+};
+
+class TurboshaftWord64Type
+ : public TorqueGeneratedTurboshaftWord64Type<TurboshaftWord64Type,
+ TurboshaftType> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftWord64Type)
+};
+
+class TurboshaftWord64RangeType
+ : public TorqueGeneratedTurboshaftWord64RangeType<TurboshaftWord64RangeType,
+ TurboshaftWord64Type> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftWord64RangeType)
+};
+
+class TurboshaftWord64SetType
+ : public TorqueGeneratedTurboshaftWord64SetType<TurboshaftWord64SetType,
+ TurboshaftWord64Type> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftWord64SetType)
+};
+
+class TurboshaftFloat64Type
+ : public TorqueGeneratedTurboshaftFloat64Type<TurboshaftFloat64Type,
+ TurboshaftType> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftFloat64Type)
+};
+
+class TurboshaftFloat64RangeType
+ : public TorqueGeneratedTurboshaftFloat64RangeType<
+ TurboshaftFloat64RangeType, TurboshaftFloat64Type> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftFloat64RangeType)
+};
+
+class TurboshaftFloat64SetType
+ : public TorqueGeneratedTurboshaftFloat64SetType<TurboshaftFloat64SetType,
+ TurboshaftFloat64Type> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(TurboshaftFloat64SetType)
+};
+
+} // namespace v8::internal
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TURBOSHAFT_TYPES_H_
diff --git a/deps/v8/src/objects/turboshaft-types.tq b/deps/v8/src/objects/turboshaft-types.tq
new file mode 100644
index 0000000000..f860cc4dda
--- /dev/null
+++ b/deps/v8/src/objects/turboshaft-types.tq
@@ -0,0 +1,234 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/turboshaft-types.h"
+
+bitfield struct TurboshaftFloatSpecialValues extends uint32 {
+ nan: bool: 1 bit;
+ minus_zero: bool: 1 bit;
+ _unused: uint32: 30 bit;
+}
+
+@abstract
+extern class TurboshaftType extends HeapObject {
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftWord32Type extends TurboshaftType {
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftWord32RangeType extends TurboshaftWord32Type {
+ from: uint32;
+ to: uint32;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftWord32SetType extends TurboshaftWord32Type {
+ const set_size: uint32;
+ elements[set_size]: uint32;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftWord64Type extends TurboshaftType {
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftWord64RangeType extends TurboshaftWord64Type {
+ from_high: uint32;
+ from_low: uint32;
+ to_high: uint32;
+ to_low: uint32;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftWord64SetType extends TurboshaftWord64Type {
+ const set_size: uint32;
+ elements_high[set_size]: uint32;
+ elements_low[set_size]: uint32;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftFloat64Type extends TurboshaftType {
+ special_values: TurboshaftFloatSpecialValues;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftFloat64RangeType extends TurboshaftFloat64Type {
+ _padding: uint32;
+ min: float64;
+ max: float64;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class TurboshaftFloat64SetType extends TurboshaftFloat64Type {
+ const set_size: uint32;
+ elements[set_size]: float64;
+}
+
+macro TestTurboshaftWord32Type(
+ value: uint32, expected: TurboshaftWord32Type): bool {
+ typeswitch (expected) {
+ case (range: TurboshaftWord32RangeType): {
+ if (range.from > range.to) {
+ return value <= range.to || range.from <= value;
+ }
+ return range.from <= value && value <= range.to;
+ }
+ case (set: TurboshaftWord32SetType): {
+ for (let i: uint32 = 0; i < set.set_size; ++i) {
+ if (set.elements[i] == value) return true;
+ }
+ return false;
+ }
+ case (TurboshaftWord32Type): {
+ unreachable;
+ }
+ }
+}
+
+macro CompareUint64HighLow(
+ lhsHigh: uint32, lhsLow: uint32, rhsHigh: uint32, rhsLow: uint32): int32 {
+ if (lhsHigh == rhsHigh) {
+ if (lhsLow == rhsLow) return 0;
+ return lhsLow < rhsLow ? Convert<int32>(-1) : 1;
+ } else {
+ return lhsHigh < rhsHigh ? Convert<int32>(-1) : 1;
+ }
+}
+
+macro TestTurboshaftWord64Type(
+ valueHigh: uint32, valueLow: uint32, expected: TurboshaftWord64Type): bool {
+ typeswitch (expected) {
+ case (range: TurboshaftWord64RangeType): {
+ const greaterThanOrEqualFrom =
+ CompareUint64HighLow(
+ valueHigh, valueLow, range.from_high, range.from_low) >= 0;
+ const lessThanOrEqualTo =
+ CompareUint64HighLow(
+ valueHigh, valueLow, range.to_high, range.to_low) <= 0;
+ const isWrapping =
+ CompareUint64HighLow(
+ range.from_high, range.from_low, range.to_high, range.to_low) < 0;
+
+ return (isWrapping && (greaterThanOrEqualFrom || lessThanOrEqualTo)) ||
+ (greaterThanOrEqualFrom && lessThanOrEqualTo);
+ }
+ case (set: TurboshaftWord64SetType): {
+ for (let i: uint32 = 0; i < set.set_size; ++i) {
+ if (CompareUint64HighLow(
+ set.elements_high[i], set.elements_low[i], valueHigh,
+ valueLow) == 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case (TurboshaftWord64Type): {
+ unreachable;
+ }
+ }
+}
+
+macro TestTurboshaftFloat64Type(
+ value: float64, expected: TurboshaftFloat64Type): bool {
+ if (Float64IsNaN(value)) return expected.special_values.nan;
+ if (IsMinusZero(value)) return expected.special_values.minus_zero;
+ typeswitch (expected) {
+ case (range: TurboshaftFloat64RangeType): {
+ return range.min <= value && value <= range.max;
+ }
+ case (set: TurboshaftFloat64SetType): {
+ const delta = 0.000001;
+ for (let i: uint32 = 0; i < set.set_size; ++i) {
+ if (set.elements[i] - delta <= value &&
+ value <= set.elements[i] + delta)
+ return true;
+ }
+ return false;
+ }
+ case (TurboshaftFloat64Type): {
+ unreachable;
+ }
+ }
+}
+
+builtin CheckTurboshaftWord32Type(implicit context: Context)(
+ value: uint32, expectedType: TurboshaftWord32Type, nodeId: Smi): Undefined {
+ if (TestTurboshaftWord32Type(value, expectedType)) {
+ return Undefined;
+ }
+
+ Print('Type assertion failed!');
+ Print('Node id', nodeId);
+ Print('Actual value', Convert<Number>(value));
+ Print('Expected type', expectedType);
+ unreachable;
+}
+
+builtin CheckTurboshaftWord64Type(implicit context: Context)(
+ valueHigh: uint32, valueLow: uint32, expectedType: TurboshaftWord64Type,
+ nodeId: Smi): Undefined {
+ if (TestTurboshaftWord64Type(valueHigh, valueLow, expectedType)) {
+ return Undefined;
+ }
+
+ Print('Type assertion failed!');
+ Print('Node id', nodeId);
+ Print('Actual value (high)', Convert<Number>(valueHigh));
+ Print('Actual vlaue (low)', Convert<Number>(valueLow));
+ Print('Expected type', expectedType);
+ unreachable;
+}
+
+// Builtin needs custom interface descriptor to allow float32 argument type.
+@customInterfaceDescriptor
+builtin CheckTurboshaftFloat32Type(implicit context: Context)(
+ value: float32, expectedType: TurboshaftFloat64Type, nodeId: Smi):
+ Undefined {
+ const v = Convert<float64>(value);
+ if (TestTurboshaftFloat64Type(v, expectedType)) {
+ return Undefined;
+ }
+
+ Print('Type assertion failed!');
+ Print('Node id', nodeId);
+ Print('Actual value', Convert<Number>(v));
+ Print('Expected type', expectedType);
+ unreachable;
+}
+
+// Builtin needs custom interface descriptor to allow float64 argument type.
+@customInterfaceDescriptor
+builtin CheckTurboshaftFloat64Type(implicit context: Context)(
+ value: float64, expectedType: TurboshaftFloat64Type, nodeId: Smi):
+ Undefined {
+ if (TestTurboshaftFloat64Type(value, expectedType)) {
+ return Undefined;
+ }
+
+ Print('Type assertion failed!');
+ Print('Node id', nodeId);
+ Print('Actual value', Convert<Number>(value));
+ Print('Expected type', expectedType);
+ unreachable;
+}
diff --git a/deps/v8/src/objects/type-hints.cc b/deps/v8/src/objects/type-hints.cc
index a099ea12cc..7befe1ceae 100644
--- a/deps/v8/src/objects/type-hints.cc
+++ b/deps/v8/src/objects/type-hints.cc
@@ -51,6 +51,8 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "Symbol";
case CompareOperationHint::kBigInt:
return os << "BigInt";
+ case CompareOperationHint::kBigInt64:
+ return os << "BigInt64";
case CompareOperationHint::kReceiver:
return os << "Receiver";
case CompareOperationHint::kReceiverOrNullOrUndefined:
diff --git a/deps/v8/src/objects/type-hints.h b/deps/v8/src/objects/type-hints.h
index d9ed880577..26665e348d 100644
--- a/deps/v8/src/objects/type-hints.h
+++ b/deps/v8/src/objects/type-hints.h
@@ -41,6 +41,7 @@ enum class CompareOperationHint : uint8_t {
kString,
kSymbol,
kBigInt,
+ kBigInt64,
kReceiver,
kReceiverOrNullOrUndefined,
kAny
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 1205ca868c..63c32f4ba1 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -23,6 +23,7 @@
#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
@@ -157,6 +158,8 @@ enum class SerializationTag : uint8_t {
kEndJSSet = ',',
// Array buffer. byteLength:uint32_t, then raw data.
kArrayBuffer = 'B',
+ // Resizable ArrayBuffer.
+ kResizableArrayBuffer = '~',
// Array buffer (transferred). transferID:uint32_t
kArrayBufferTransfer = 't',
// View into an array buffer.
@@ -451,7 +454,8 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
WriteBigInt(BigInt::cast(*object));
return ThrowIfOutOfMemory();
case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE: {
+ case JS_DATA_VIEW_TYPE:
+ case JS_RAB_GSAB_DATA_VIEW_TYPE: {
// Despite being JSReceivers, these have their wrapped buffer serialized
// first. That makes this logic a little quirky, because it needs to
// happen before we assign object IDs.
@@ -603,6 +607,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
return WriteJSArrayBuffer(Handle<JSArrayBuffer>::cast(receiver));
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
+ case JS_RAB_GSAB_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
case JS_ERROR_TYPE:
return WriteJSError(Handle<JSObject>::cast(receiver));
@@ -650,7 +655,7 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
if (V8_LIKELY(!map_changed &&
details.location() == PropertyLocation::kField)) {
DCHECK_EQ(PropertyKind::kData, details.kind());
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ FieldIndex field_index = FieldIndex::ForDetails(*map, details);
value = JSObject::FastPropertyAt(isolate_, object,
details.representation(), field_index);
} else {
@@ -933,14 +938,25 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
return ThrowDataCloneError(
MessageTemplate::kDataCloneErrorDetachedArrayBuffer);
}
- double byte_length = array_buffer->byte_length();
+ size_t byte_length = array_buffer->byte_length();
if (byte_length > std::numeric_limits<uint32_t>::max()) {
return ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
}
- // TODO(v8:11111): Support RAB / GSAB. The wire version will need to be
- // bumped.
+ if (array_buffer->is_resizable_by_js()) {
+ size_t max_byte_length = array_buffer->max_byte_length();
+ if (max_byte_length > std::numeric_limits<uint32_t>::max()) {
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError,
+ array_buffer);
+ }
+
+ WriteTag(SerializationTag::kResizableArrayBuffer);
+ WriteVarint<uint32_t>(static_cast<uint32_t>(byte_length));
+ WriteVarint<uint32_t>(static_cast<uint32_t>(max_byte_length));
+ WriteRawBytes(array_buffer->backing_store(), byte_length);
+ return ThrowIfOutOfMemory();
+ }
WriteTag(SerializationTag::kArrayBuffer);
- WriteVarint<uint32_t>(byte_length);
+ WriteVarint<uint32_t>(static_cast<uint32_t>(byte_length));
WriteRawBytes(array_buffer->backing_store(), byte_length);
return ThrowIfOutOfMemory();
}
@@ -952,6 +968,11 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
WriteTag(SerializationTag::kArrayBufferView);
ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
if (view.IsJSTypedArray()) {
+ if (JSTypedArray::cast(view).IsOutOfBounds()) {
+ DCHECK(v8_flags.harmony_rab_gsab);
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError,
+ handle(view, isolate_));
+ }
switch (JSTypedArray::cast(view).type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case kExternal##Type##Array: \
@@ -961,7 +982,14 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
#undef TYPED_ARRAY_CASE
}
} else {
- DCHECK(view.IsJSDataView());
+ DCHECK(view.IsJSDataViewOrRabGsabDataView());
+ if (view.IsJSRabGsabDataView() &&
+ JSRabGsabDataView::cast(view).IsOutOfBounds()) {
+ DCHECK(v8_flags.harmony_rab_gsab);
+ return ThrowDataCloneError(MessageTemplate::kDataCloneError,
+ handle(view, isolate_));
+ }
+
tag = ArrayBufferViewTag::kDataView;
}
WriteVarint(static_cast<uint8_t>(tag));
@@ -1091,7 +1119,7 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
#endif // V8_ENABLE_WEBASSEMBLY
Maybe<bool> ValueSerializer::WriteSharedObject(Handle<HeapObject> object) {
- if (!delegate_ || !isolate_->has_shared_heap()) {
+ if (!delegate_ || !isolate_->has_shared_space()) {
return ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
}
@@ -1277,9 +1305,8 @@ Maybe<T> ValueDeserializer::ReadVarint() {
// same end state and result.
auto previous_position = position_;
Maybe<T> maybe_expected_value = ReadVarintLoop<T>();
- if (v8_flags.fuzzing && maybe_expected_value.IsNothing()) {
- return maybe_expected_value;
- }
+ // ReadVarintLoop can't return Nothing here; all such conditions have been
+ // checked above.
T expected_value = maybe_expected_value.ToChecked();
auto expected_position = position_;
position_ = previous_position;
@@ -1331,12 +1358,11 @@ Maybe<T> ValueDeserializer::ReadVarintLoop() {
value |= static_cast<T>(byte & 0x7F) << shift;
shift += 7;
} else {
- // We allow arbitrary data to be deserialized when fuzzing.
- // Since {value} is not modified in this branch we can safely skip the
- // DCHECK when fuzzing.
- DCHECK_IMPLIES(!v8_flags.fuzzing, !has_another_byte);
// For consistency with the fast unrolled loop in ReadVarint we return
// after we have read size(T) + 1 bytes.
+#ifdef V8_VALUE_DESERIALIZER_HARD_FAIL
+ CHECK(!has_another_byte);
+#endif // V8_VALUE_DESERIALIZER_HARD_FAIL
return Just(value);
}
position_++;
@@ -1549,15 +1575,22 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
case SerializationTag::kBeginJSSet:
return ReadJSSet();
case SerializationTag::kArrayBuffer: {
- const bool is_shared = false;
- return ReadJSArrayBuffer(is_shared);
+ constexpr bool is_shared = false;
+ constexpr bool is_resizable = false;
+ return ReadJSArrayBuffer(is_shared, is_resizable);
+ }
+ case SerializationTag::kResizableArrayBuffer: {
+ constexpr bool is_shared = false;
+ constexpr bool is_resizable = true;
+ return ReadJSArrayBuffer(is_shared, is_resizable);
}
case SerializationTag::kArrayBufferTransfer: {
return ReadTransferredJSArrayBuffer();
}
case SerializationTag::kSharedArrayBuffer: {
- const bool is_shared = true;
- return ReadJSArrayBuffer(is_shared);
+ constexpr bool is_shared = true;
+ constexpr bool is_resizable = false;
+ return ReadJSArrayBuffer(is_shared, is_resizable);
}
case SerializationTag::kError:
return ReadJSError();
@@ -1666,9 +1699,10 @@ bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
return {};
}
// Length is also checked in ReadRawBytes.
- DCHECK_IMPLIES(!v8_flags.fuzzing,
- byte_length <= static_cast<uint32_t>(
- std::numeric_limits<int32_t>::max()));
+#ifdef V8_VALUE_DESERIALIZER_HARD_FAIL
+ CHECK_LE(byte_length,
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
+#endif // V8_VALUE_DESERIALIZER_HARD_FAIL
if (!ReadRawBytes(byte_length).To(&bytes)) {
position_ = original_position;
return false;
@@ -1982,7 +2016,7 @@ MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
}
MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
- bool is_shared) {
+ bool is_shared, bool is_resizable) {
uint32_t id = next_id_++;
if (is_shared) {
uint32_t clone_id;
@@ -2001,13 +2035,34 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
return array_buffer;
}
uint32_t byte_length;
- if (!ReadVarint<uint32_t>().To(&byte_length) ||
- byte_length > static_cast<size_t>(end_ - position_)) {
+ if (!ReadVarint<uint32_t>().To(&byte_length)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ uint32_t max_byte_length = byte_length;
+ if (is_resizable) {
+ if (!ReadVarint<uint32_t>().To(&max_byte_length)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ if (byte_length > max_byte_length) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ if (!v8_flags.harmony_rab_gsab) {
+ // Disable resizability. This ensures that no resizable buffers are
+ // created in a version which has the harmony_rab_gsab turned off, even if
+ // such a version is reading data containing resizable buffers from disk.
+ is_resizable = false;
+ max_byte_length = byte_length;
+ }
+ }
+ if (byte_length > static_cast<size_t>(end_ - position_)) {
return MaybeHandle<JSArrayBuffer>();
}
MaybeHandle<JSArrayBuffer> result =
isolate_->factory()->NewJSArrayBufferAndBackingStore(
- byte_length, InitializedFlag::kUninitialized);
+ byte_length, max_byte_length, InitializedFlag::kUninitialized,
+ is_resizable ? ResizableFlag::kResizable
+ : ResizableFlag::kNotResizable);
+
Handle<JSArrayBuffer> array_buffer;
if (!result.ToHandle(&array_buffer)) return result;
@@ -2061,12 +2116,18 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
switch (static_cast<ArrayBufferViewTag>(tag)) {
case ArrayBufferViewTag::kDataView: {
- Handle<JSDataView> data_view =
- isolate_->factory()->NewJSDataView(buffer, byte_offset, byte_length);
- AddObjectWithID(id, data_view);
- if (!ValidateAndSetJSArrayBufferViewFlags(*data_view, *buffer, flags)) {
+ bool is_length_tracking = false;
+ bool is_backed_by_rab = false;
+ if (!ValidateJSArrayBufferViewFlags(*buffer, flags, is_length_tracking,
+ is_backed_by_rab)) {
return MaybeHandle<JSArrayBufferView>();
}
+ Handle<JSDataViewOrRabGsabDataView> data_view =
+ isolate_->factory()->NewJSDataViewOrRabGsabDataView(
+ buffer, byte_offset, byte_length, is_length_tracking);
+ CHECK_EQ(is_backed_by_rab, data_view->is_backed_by_rab());
+ CHECK_EQ(is_length_tracking, data_view->is_length_tracking());
+ AddObjectWithID(id, data_view);
return data_view;
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
@@ -2081,29 +2142,42 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
byte_length % element_size != 0) {
return MaybeHandle<JSArrayBufferView>();
}
- Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
- external_array_type, buffer, byte_offset, byte_length / element_size);
- if (!ValidateAndSetJSArrayBufferViewFlags(*typed_array, *buffer, flags)) {
+ bool is_length_tracking = false;
+ bool is_backed_by_rab = false;
+ if (!ValidateJSArrayBufferViewFlags(*buffer, flags, is_length_tracking,
+ is_backed_by_rab)) {
return MaybeHandle<JSArrayBufferView>();
}
+ Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
+ external_array_type, buffer, byte_offset, byte_length / element_size,
+ is_length_tracking);
+ CHECK_EQ(is_length_tracking, typed_array->is_length_tracking());
+ CHECK_EQ(is_backed_by_rab, typed_array->is_backed_by_rab());
AddObjectWithID(id, typed_array);
return typed_array;
}
-bool ValueDeserializer::ValidateAndSetJSArrayBufferViewFlags(
- JSArrayBufferView view, JSArrayBuffer buffer, uint32_t serialized_flags) {
- bool is_length_tracking =
+bool ValueDeserializer::ValidateJSArrayBufferViewFlags(
+ JSArrayBuffer buffer, uint32_t serialized_flags, bool& is_length_tracking,
+ bool& is_backed_by_rab) {
+ is_length_tracking =
JSArrayBufferViewIsLengthTracking::decode(serialized_flags);
- bool is_backed_by_rab =
- JSArrayBufferViewIsBackedByRab::decode(serialized_flags);
+ is_backed_by_rab = JSArrayBufferViewIsBackedByRab::decode(serialized_flags);
// TODO(marja): When the version number is bumped the next time, check that
// serialized_flags doesn't contain spurious 1-bits.
+ if (!v8_flags.harmony_rab_gsab) {
+ // Disable resizability. This ensures that no resizable buffers are
+ // created in a version which has the harmony_rab_gsab turned off, even if
+ // such a version is reading data containing resizable buffers from disk.
+ is_length_tracking = false;
+ is_backed_by_rab = false;
+ // The resizability of the buffer was already disabled.
+ CHECK(!buffer.is_resizable_by_js());
+ }
+
if (is_backed_by_rab || is_length_tracking) {
- if (!v8_flags.harmony_rab_gsab) {
- return false;
- }
if (!buffer.is_resizable_by_js()) {
return false;
}
@@ -2111,8 +2185,11 @@ bool ValueDeserializer::ValidateAndSetJSArrayBufferViewFlags(
return false;
}
}
- view.set_is_length_tracking(is_length_tracking);
- view.set_is_backed_by_rab(is_backed_by_rab);
+ // The RAB-ness of the buffer and the TA's "is_backed_by_rab" need to be in
+ // sync.
+ if (buffer.is_resizable_by_js() && !buffer.is_shared() && !is_backed_by_rab) {
+ return false;
+ }
return true;
}
@@ -2228,14 +2305,14 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
return MaybeHandle<WasmMemoryObject>();
}
- SerializationTag tag;
- if (!ReadTag().To(&tag) || tag != SerializationTag::kSharedArrayBuffer) {
+ Handle<Object> buffer_object;
+ if (!ReadObject().ToHandle(&buffer_object) ||
+ !buffer_object->IsJSArrayBuffer()) {
return MaybeHandle<WasmMemoryObject>();
}
- const bool is_shared = true;
- Handle<JSArrayBuffer> buffer;
- if (!ReadJSArrayBuffer(is_shared).ToHandle(&buffer)) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(buffer_object);
+ if (!buffer->is_shared()) {
return MaybeHandle<WasmMemoryObject>();
}
@@ -2398,37 +2475,38 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
// Deserializaton of |value| might have deprecated current |target|,
// ensure we are working with the up-to-date version.
target = Map::Update(isolate_, target);
-
- InternalIndex descriptor(properties.size());
- PropertyDetails details =
- target->instance_descriptors(isolate_).GetDetails(descriptor);
- Representation expected_representation = details.representation();
- if (value->FitsRepresentation(expected_representation)) {
- if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors(isolate_)
- .GetFieldType(descriptor)
- .NowContains(value)) {
- Handle<FieldType> value_type =
- value->OptimalType(isolate_, expected_representation);
- MapUpdater::GeneralizeField(isolate_, target, descriptor,
- details.constness(),
- expected_representation, value_type);
- }
- DCHECK(target->instance_descriptors(isolate_)
+ if (!target->is_dictionary_map()) {
+ InternalIndex descriptor(properties.size());
+ PropertyDetails details =
+ target->instance_descriptors(isolate_).GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+ if (value->FitsRepresentation(expected_representation)) {
+ if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors(isolate_)
.GetFieldType(descriptor)
- .NowContains(value));
- properties.push_back(value);
- map = target;
- continue;
- } else {
- transitioning = false;
+ .NowContains(value)) {
+ Handle<FieldType> value_type =
+ value->OptimalType(isolate_, expected_representation);
+ MapUpdater::GeneralizeField(isolate_, target, descriptor,
+ details.constness(),
+ expected_representation, value_type);
+ }
+ DCHECK(target->instance_descriptors(isolate_)
+ .GetFieldType(descriptor)
+ .NowContains(value));
+ properties.push_back(value);
+ map = target;
+ continue;
+ }
}
+ transitioning = false;
}
// Fell out of transitioning fast path. Commit the properties gathered so
// far, and then start setting properties slowly instead.
DCHECK(!transitioning);
CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+ CHECK(!map->is_dictionary_map());
CommitProperties(object, map, properties);
num_properties = static_cast<uint32_t>(properties.size());
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index 1c7e7bb76f..13156b99dd 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -98,8 +98,6 @@ class ValueSerializer {
void SetTreatArrayBufferViewsAsHostObjects(bool mode);
private:
- friend class WebSnapshotSerializer;
-
// Managing allocations of the internal buffer.
Maybe<bool> ExpandBuffer(size_t required_capacity);
@@ -249,8 +247,6 @@ class ValueDeserializer {
bool ReadByte(uint8_t* value) V8_WARN_UNUSED_RESULT;
private:
- friend class WebSnapshotDeserializer;
-
// Reading the wire format.
Maybe<SerializationTag> PeekTag() const V8_WARN_UNUSED_RESULT;
void ConsumeTag(SerializationTag peeked_tag);
@@ -297,15 +293,15 @@ class ValueDeserializer {
MaybeHandle<JSRegExp> ReadJSRegExp() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSMap> ReadJSMap() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSSet> ReadJSSet() V8_WARN_UNUSED_RESULT;
- MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer(bool is_shared)
- V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer(
+ bool is_shared, bool is_resizable) V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer()
V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) V8_WARN_UNUSED_RESULT;
- bool ValidateAndSetJSArrayBufferViewFlags(
- JSArrayBufferView view, JSArrayBuffer buffer,
- uint32_t serialized_flags) V8_WARN_UNUSED_RESULT;
+ bool ValidateJSArrayBufferViewFlags(
+ JSArrayBuffer buffer, uint32_t serialized_flags, bool& is_length_tracking,
+ bool& is_backed_by_rab) V8_WARN_UNUSED_RESULT;
MaybeHandle<Object> ReadJSError() V8_WARN_UNUSED_RESULT;
#if V8_ENABLE_WEBASSEMBLY
MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index 6f23a869cb..ab16adf31d 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-class CodeDataContainer;
+class Code;
#define ROOT_ID_LIST(V) \
V(kStringTable, "(Internalized strings)") \
@@ -91,11 +91,14 @@ class RootVisitor {
UNREACHABLE();
}
- // Visits a single pointer which is Code from the execution stack.
- virtual void VisitRunningCode(FullObjectSlot p) {
- // For most visitors, currently running Code is no different than any other
+ // Visits a running Code object and potentially its associated
+ // InstructionStream from the execution stack.
+ virtual void VisitRunningCode(FullObjectSlot code_slot,
+ FullObjectSlot istream_or_smi_zero_slot) {
+ // For most visitors, currently running code is no different than any other
// on-stack pointer.
- VisitRootPointer(Root::kStackRoots, nullptr, p);
+ VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot);
+ VisitRootPointer(Root::kStackRoots, nullptr, code_slot);
}
// Intended for serialization/deserialization checking: insert, or
@@ -104,6 +107,15 @@ class RootVisitor {
virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
static const char* RootName(Root root);
+
+ // The type of collector that invokes this visitor. This is used by the
+ // ConservativeStackVisitor to determine which root pointers on the stack
+ // to follow, during conservative stack scanning. For MARK_COMPACTOR (the
+ // default) all pointers are followed, whereas for young generation
+ // collectors only pointers to objects in the young generation are followed.
+ virtual GarbageCollector collector() const {
+ return GarbageCollector::MARK_COMPACTOR;
+ }
};
class RelocIterator;
@@ -120,12 +132,11 @@ class ObjectVisitor {
ObjectSlot end) = 0;
virtual void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) = 0;
- // When V8_EXTERNAL_CODE_SPACE is enabled, visits a Code pointer slot.
- // The values may be modified on return.
- // Not used when V8_EXTERNAL_CODE_SPACE is not enabled (the Code pointer
- // slots are visited as a part of on-heap slot visitation - via
- // VisitPointers()).
- virtual void VisitCodePointer(HeapObject host, CodeObjectSlot slot) = 0;
+ // When V8_EXTERNAL_CODE_SPACE is enabled, visits a InstructionStream pointer
+ // slot. The values may be modified on return. Not used when
+ // V8_EXTERNAL_CODE_SPACE is not enabled (the InstructionStream pointer slots
+ // are visited as a part of on-heap slot visitation - via VisitPointers()).
+ virtual void VisitCodePointer(Code host, CodeObjectSlot slot) = 0;
// Custom weak pointers must be ignored by the GC but not other
// visitors. They're used for e.g., lists that are recreated after GC. The
@@ -153,33 +164,23 @@ class ObjectVisitor {
VisitPointer(host, value);
}
- // To allow lazy clearing of inline caches the visitor has
- // a rich interface for iterating over Code objects ...
-
- // Visits a code target in the instruction stream.
- virtual void VisitCodeTarget(Code host, RelocInfo* rinfo) = 0;
+ virtual void VisitCodeTarget(RelocInfo* rinfo) = 0;
- // Visit pointer embedded into a code object.
- virtual void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) = 0;
+ virtual void VisitEmbeddedPointer(RelocInfo* rinfo) = 0;
- // Visits an external reference embedded into a code object.
- virtual void VisitExternalReference(Code host, RelocInfo* rinfo) {}
+ virtual void VisitExternalReference(RelocInfo* rinfo) {}
- // Visits an external pointer.
virtual void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
ExternalPointerTag tag) {}
- // Visits an (encoded) internal reference.
- virtual void VisitInternalReference(Code host, RelocInfo* rinfo) {}
+ virtual void VisitInternalReference(RelocInfo* rinfo) {}
- // Visits an off-heap target or near builtin entry in the instruction stream.
// TODO(ishell): rename to VisitBuiltinEntry.
- virtual void VisitOffHeapTarget(Code host, RelocInfo* rinfo) {}
+ virtual void VisitOffHeapTarget(RelocInfo* rinfo) {}
// Visits the relocation info using the given iterator.
void VisitRelocInfo(RelocIterator* it);
- // Visits the object's map pointer, decoding as necessary
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
};
@@ -193,7 +194,7 @@ class ObjectVisitorWithCageBases : public ObjectVisitor {
inline explicit ObjectVisitorWithCageBases(Heap* heap);
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
@@ -203,7 +204,7 @@ class ObjectVisitorWithCageBases : public ObjectVisitor {
}
// The pointer compression cage base value used for decompression of
- // references to Code objects.
+ // references to InstructionStream objects.
PtrComprCageBase code_cage_base() const {
#ifdef V8_EXTERNAL_CODE_SPACE
return code_cage_base_;
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 9c0eee6d00..47b883069b 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -92,8 +92,7 @@ UnoptimizedCompileFlags UnoptimizedCompileFlags::ForToplevelCompile(
flags.SetFlagsForToplevelCompile(is_user_javascript, language_mode, repl_mode,
type, lazy);
- LOG(isolate, ScriptEvent(V8FileLogger::ScriptEventType::kReserveId,
- flags.script_id()));
+ LOG(isolate, ScriptEvent(ScriptEventType::kReserveId, flags.script_id()));
return flags;
}
@@ -209,7 +208,9 @@ ParseInfo::ParseInfo(const UnoptimizedCompileFlags flags,
#if V8_ENABLE_WEBASSEMBLY
contains_asm_module_(false),
#endif // V8_ENABLE_WEBASSEMBLY
- language_mode_(flags.outer_language_mode()) {
+ language_mode_(flags.outer_language_mode()),
+ is_background_compilation_(false),
+ is_streaming_compilation_(false) {
if (flags.block_coverage_enabled()) {
AllocateSourceRangeMap();
}
@@ -241,8 +242,16 @@ Handle<Script> ParseInfo::CreateScript(
// Create a script object describing the script to be compiled.
DCHECK(flags().script_id() >= 0 ||
flags().script_id() == Script::kTemporaryScriptId);
+ auto event = ScriptEventType::kCreate;
+ if (is_streaming_compilation()) {
+ event = is_background_compilation()
+ ? ScriptEventType::kStreamingCompileBackground
+ : ScriptEventType::kStreamingCompileForeground;
+ } else if (is_background_compilation()) {
+ event = ScriptEventType::kBackgroundCompile;
+ }
Handle<Script> script =
- isolate->factory()->NewScriptWithId(source, flags().script_id());
+ isolate->factory()->NewScriptWithId(source, flags().script_id(), event);
DisallowGarbageCollection no_gc;
auto raw_script = *script;
switch (natives) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index e2e3bdf9d1..404fbf9ac8 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -61,7 +61,8 @@ class Zone;
V(post_parallel_compile_tasks_for_eager_toplevel, bool, 1, _) \
V(post_parallel_compile_tasks_for_lazy, bool, 1, _) \
V(collect_source_positions, bool, 1, _) \
- V(is_repl_mode, bool, 1, _)
+ V(is_repl_mode, bool, 1, _) \
+ V(produce_compile_hints, bool, 1, _)
class V8_EXPORT_PRIVATE UnoptimizedCompileFlags {
public:
@@ -338,6 +339,22 @@ class V8_EXPORT_PRIVATE ParseInfo {
void CheckFlagsForFunctionFromScript(Script script);
+ bool is_background_compilation() const { return is_background_compilation_; }
+
+ void set_is_background_compilation() { is_background_compilation_ = true; }
+
+ bool is_streaming_compilation() const { return is_streaming_compilation_; }
+
+ void set_is_streaming_compilation() { is_streaming_compilation_ = true; }
+
+ CompileHintCallback compile_hint_callback() const {
+ return compile_hint_callback_;
+ }
+
+ void* compile_hint_callback_data() const {
+ return compile_hint_callback_data_;
+ }
+
private:
ParseInfo(const UnoptimizedCompileFlags flags, UnoptimizedCompileState* state,
ReusableUnoptimizedCompileState* reusable_state,
@@ -356,6 +373,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
int parameters_end_pos_;
int max_function_literal_id_;
+ v8::CompileHintCallback compile_hint_callback_ = nullptr;
+ void* compile_hint_callback_data_ = nullptr;
+
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
std::unique_ptr<ConsumedPreparseData> consumed_preparse_data_;
@@ -370,6 +390,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
bool contains_asm_module_ : 1;
#endif // V8_ENABLE_WEBASSEMBLY
LanguageMode language_mode_ : 1;
+ bool is_background_compilation_ : 1;
+ bool is_streaming_compilation_ : 1;
};
} // namespace internal
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index aff9c5f481..ff5af7dfec 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -63,7 +63,7 @@ struct FormalParametersBase {
int num_parameters() const {
// Don't include the rest parameter into the function's formal parameter
// count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
- // which says whether we need to create an arguments adaptor frame).
+ // which says whether we need to create an inlined arguments frame).
return arity - has_rest;
}
@@ -305,6 +305,8 @@ class ParserBase {
// The current Zone, which might be the main zone or a temporary Zone.
Zone* zone() const { return zone_; }
+ V8_INLINE bool IsExtraordinaryPrivateNameAccessAllowed() const;
+
protected:
friend class v8::internal::ExpressionScope<ParserTypes<Impl>>;
friend class v8::internal::ExpressionParsingScope<ParserTypes<Impl>>;
@@ -1286,7 +1288,6 @@ class ParserBase {
// a scope where the name has also been let bound or the var declaration is
// hoisted over such a scope.
void CheckConflictingVarDeclarations(DeclarationScope* scope) {
- if (has_error()) return;
bool allowed_catch_binding_var_redeclaration = false;
Declaration* decl = scope->CheckConflictingVarDeclarations(
&allowed_catch_binding_var_redeclaration);
@@ -1761,6 +1762,39 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParsePropertyName() {
}
template <typename Impl>
+bool ParserBase<Impl>::IsExtraordinaryPrivateNameAccessAllowed() const {
+ if (flags().parsing_while_debugging() != ParsingWhileDebugging::kYes &&
+ !flags().is_repl_mode()) {
+ return false;
+ }
+ Scope* current_scope = scope();
+ while (current_scope != nullptr) {
+ switch (current_scope->scope_type()) {
+ case CLASS_SCOPE:
+ case CATCH_SCOPE:
+ case BLOCK_SCOPE:
+ case WITH_SCOPE:
+ case SHADOW_REALM_SCOPE:
+ return false;
+ // Top-level scopes.
+ case SCRIPT_SCOPE:
+ case MODULE_SCOPE:
+ return true;
+ // Top-level wrapper function scopes.
+ case FUNCTION_SCOPE:
+ return function_literal_id_ == kFunctionLiteralIdTopLevel;
+ // Used by debug-evaluate. If the outer scope is top-level,
+ // extraordinary private name access is allowed.
+ case EVAL_SCOPE:
+ current_scope = current_scope->outer_scope();
+ DCHECK_NOT_NULL(current_scope);
+ break;
+ }
+ }
+ UNREACHABLE();
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
int pos = position();
@@ -1781,7 +1815,10 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
PrivateNameScopeIterator private_name_scope_iter(scope());
// Parse the identifier so that we can display it in the error message
name = impl()->GetIdentifier();
- if (private_name_scope_iter.Done()) {
+ // In debug-evaluate, we relax the private name resolution to enable
+ // evaluation of obj.#member outside the class bodies in top-level scopes.
+ if (private_name_scope_iter.Done() &&
+ !IsExtraordinaryPrivateNameAccessAllowed()) {
impl()->ReportMessageAt(Scanner::Location(pos, pos + 1),
MessageTemplate::kInvalidPrivateFieldResolution,
impl()->GetRawNameFromIdentifier(name));
@@ -1831,8 +1868,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
return impl()->FailureExpression();
}
- const AstRawString* js_pattern = GetNextSymbolForRegExpLiteral();
+ const AstRawString* pattern = GetNextSymbolForRegExpLiteral();
base::Optional<RegExpFlags> flags = scanner()->ScanRegExpFlags();
+ const AstRawString* flags_as_ast_raw_string = GetNextSymbolForRegExpLiteral();
if (!flags.has_value() || !ValidateRegExpFlags(flags.value())) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
@@ -1840,13 +1878,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
}
Next();
RegExpError regexp_error;
- if (!ValidateRegExpLiteral(js_pattern, flags.value(), &regexp_error)) {
+ if (!ValidateRegExpLiteral(pattern, flags.value(), &regexp_error)) {
if (RegExpErrorIsStackOverflow(regexp_error)) set_stack_overflow();
- ReportMessage(MessageTemplate::kMalformedRegExp, js_pattern,
- RegExpErrorString(regexp_error));
+ ReportMessage(MessageTemplate::kMalformedRegExp, pattern,
+ flags_as_ast_raw_string, RegExpErrorString(regexp_error));
return impl()->FailureExpression();
}
- return factory()->NewRegExpLiteral(js_pattern, flags.value(), pos);
+ return factory()->NewRegExpLiteral(pattern, flags.value(), pos);
}
template <typename Impl>
@@ -2826,7 +2864,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral() {
// this runtime function. Here, we make sure that the number of
// properties is less than number of arguments allowed for a runtime
// call.
- if (has_rest_property && properties.length() > Code::kMaxArguments) {
+ if (has_rest_property &&
+ properties.length() > InstructionStream::kMaxArguments) {
expression_scope()->RecordPatternError(Scanner::Location(pos, position()),
MessageTemplate::kTooManyArguments);
}
@@ -2882,7 +2921,7 @@ void ParserBase<Impl>::ParseArguments(
if (!Check(Token::COMMA)) break;
}
- if (args->length() > Code::kMaxArguments) {
+ if (args->length() > InstructionStream::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyArguments);
return;
}
@@ -3929,7 +3968,7 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters) {
if (peek() != Token::RPAREN) {
while (true) {
// Add one since we're going to be adding a parameter.
- if (parameters->arity + 1 > Code::kMaxArguments) {
+ if (parameters->arity + 1 > InstructionStream::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyParameters);
return;
}
@@ -4317,6 +4356,8 @@ void ParserBase<Impl>::ParseFunctionBody(
StatementListT* body, IdentifierT function_name, int pos,
const FormalParametersT& parameters, FunctionKind kind,
FunctionSyntaxKind function_syntax_kind, FunctionBodyType body_type) {
+ CheckStackOverflow();
+
if (IsResumableFunction(kind)) impl()->PrepareGeneratorVariables();
DeclarationScope* function_scope = parameters.scope;
@@ -4737,8 +4778,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
if (Check(Token::SEMICOLON)) continue;
// Either we're parsing a `static { }` initialization block or a property.
- if (v8_flags.harmony_class_static_blocks && peek() == Token::STATIC &&
- PeekAhead() == Token::LBRACE) {
+ if (peek() == Token::STATIC && PeekAhead() == Token::LBRACE) {
BlockT static_block = ParseClassStaticBlock(&class_info);
impl()->AddClassStaticBlock(static_block, &class_info);
continue;
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index aba8ca6271..6886e037ac 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -174,6 +174,9 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
case Token::DIV:
*x = factory()->NewNumberLiteral(base::Divide(x_val, y_val), pos);
return true;
+ case Token::MOD:
+ *x = factory()->NewNumberLiteral(Modulo(x_val, y_val), pos);
+ return true;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
*x = factory()->NewNumberLiteral(value, pos);
@@ -889,15 +892,6 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
}
int function_literal_id = shared_info->function_literal_id();
- if (V8_UNLIKELY(script->type() == Script::TYPE_WEB_SNAPSHOT)) {
- // Function literal IDs for inner functions haven't been allocated when
- // deserializing. Put the inner function SFIs to the end of the list;
- // they'll be deduplicated later (if the corresponding SFIs exist already)
- // in Script::FindSharedFunctionInfo. (-1 here because function_literal_id
- // is the parent's id. The inner function will get ids starting from
- // function_literal_id + 1.)
- function_literal_id = script->shared_function_info_count() - 1;
- }
// Initialize parser state.
info->set_function_name(ast_value_factory()->GetString(
@@ -1120,6 +1114,7 @@ FunctionLiteral* Parser::ParseClassForInstanceMemberInitialization(
// Reparse the class as an expression to build the instance member
// initializer function.
Expression* expr = ParseClassExpression(original_scope_);
+ if (has_error()) return nullptr;
DCHECK(expr->IsClassLiteral());
ClassLiteral* literal = expr->AsClassLiteral();
@@ -2595,7 +2590,7 @@ void Parser::DeclareArrowFunctionFormalParameters(
AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos);
- if (parameters->arity > Code::kMaxArguments) {
+ if (parameters->arity > InstructionStream::kMaxArguments) {
ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
return;
}
@@ -2646,8 +2641,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name = ast_value_factory()->empty_string();
}
+ // This is true if we get here through CreateDynamicFunction.
+ bool params_need_validation = parameters_end_pos_ != kNoSourcePosition;
+
FunctionLiteral::EagerCompileHint eager_compile_hint =
- function_state_->next_function_is_likely_called() || is_wrapped
+ function_state_->next_function_is_likely_called() || is_wrapped ||
+ params_need_validation
? FunctionLiteral::kShouldEagerCompile
: default_eager_compile_hint();
@@ -2686,6 +2685,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
DCHECK_IMPLIES(parse_lazily(), info()->flags().allow_lazy_compile());
DCHECK_IMPLIES(parse_lazily(), has_error() || allow_lazy_);
DCHECK_IMPLIES(parse_lazily(), extension() == nullptr);
+ if (eager_compile_hint == FunctionLiteral::kShouldLazyCompile) {
+ // Apply compile hints from the embedder.
+ int compile_hint_position = peek_position();
+ v8::CompileHintCallback callback = info()->compile_hint_callback();
+ if (callback != nullptr &&
+ callback(compile_hint_position, info()->compile_hint_callback_data())) {
+ eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
+ }
+ }
const bool is_lazy =
eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
@@ -3325,16 +3333,32 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
Scope* function_scope = inner_scope->outer_scope();
DCHECK(function_scope->is_function_scope());
BlockState block_state(&scope_, inner_scope);
+ // According to https://tc39.es/ecma262/#sec-functiondeclarationinstantiation
+ // If a variable's name conflicts with the names of both parameters and
+ // functions, no bindings should be created for it. A set is used here
+ // to record such variables.
+ std::set<Variable*> hoisted_func_vars;
+ std::vector<std::pair<Variable*, Variable*>> var_param_bindings;
for (Declaration* decl : *inner_scope->declarations()) {
- if (decl->var()->mode() != VariableMode::kVar ||
- !decl->IsVariableDeclaration()) {
+ if (!decl->IsVariableDeclaration()) {
+ hoisted_func_vars.insert(decl->var());
+ continue;
+ } else if (decl->var()->mode() != VariableMode::kVar) {
continue;
}
const AstRawString* name = decl->var()->raw_name();
Variable* parameter = function_scope->LookupLocal(name);
if (parameter == nullptr) continue;
+ var_param_bindings.push_back(std::pair(decl->var(), parameter));
+ }
+
+ for (auto decl : var_param_bindings) {
+ if (hoisted_func_vars.find(decl.first) != hoisted_func_vars.end()) {
+ continue;
+ }
+ const AstRawString* name = decl.first->raw_name();
VariableProxy* to = NewUnresolved(name);
- VariableProxy* from = factory()->NewVariableProxy(parameter);
+ VariableProxy* from = factory()->NewVariableProxy(decl.second);
Expression* assignment =
factory()->NewAssignment(Token::ASSIGN, to, from, kNoSourcePosition);
Statement* statement =
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index 4756628ca7..a4fe59296d 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -18,17 +18,17 @@ namespace v8 {
namespace internal {
void PendingCompilationErrorHandler::MessageDetails::SetString(
- Handle<String> string, Isolate* isolate) {
- DCHECK_NE(args_[0].type, kMainThreadHandle);
- args_[0].type = kMainThreadHandle;
- args_[0].js_string = string;
+ int index, Handle<String> string, Isolate* isolate) {
+ DCHECK_NE(args_[index].type, kMainThreadHandle);
+ args_[index].type = kMainThreadHandle;
+ args_[index].js_string = string;
}
void PendingCompilationErrorHandler::MessageDetails::SetString(
- Handle<String> string, LocalIsolate* isolate) {
- DCHECK_NE(args_[0].type, kMainThreadHandle);
- args_[0].type = kMainThreadHandle;
- args_[0].js_string = isolate->heap()->NewPersistentHandle(string);
+ int index, Handle<String> string, LocalIsolate* isolate) {
+ DCHECK_NE(args_[index].type, kMainThreadHandle);
+ args_[index].type = kMainThreadHandle;
+ args_[index].js_string = isolate->heap()->NewPersistentHandle(string);
}
template <typename IsolateT>
@@ -37,17 +37,17 @@ void PendingCompilationErrorHandler::MessageDetails::Prepare(
for (int i = 0; i < kMaxArgumentCount; i++) {
switch (args_[i].type) {
case kAstRawString:
- return SetString(args_[i].ast_string->string(), isolate);
-
+ SetString(i, args_[i].ast_string->string(), isolate);
+ break;
case kNone:
case kConstCharString:
// We can delay allocation until ArgString(isolate).
- return;
+ break;
case kMainThreadHandle:
// The message details might already be prepared, so skip them if this
// is the case.
- return;
+ break;
}
}
}
@@ -81,7 +81,8 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
int end_position,
MessageTemplate message,
const char* arg) {
- if (has_pending_error_) return;
+ if (has_pending_error_ && end_position >= error_details_.start_pos()) return;
+
has_pending_error_ = true;
error_details_ = MessageDetails(start_position, end_position, message, arg);
@@ -91,7 +92,8 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
int end_position,
MessageTemplate message,
const AstRawString* arg) {
- if (has_pending_error_) return;
+ if (has_pending_error_ && end_position >= error_details_.start_pos()) return;
+
has_pending_error_ = true;
error_details_ = MessageDetails(start_position, end_position, message, arg);
@@ -102,12 +104,23 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
MessageTemplate message,
const AstRawString* arg0,
const char* arg1) {
- if (has_pending_error_) return;
+ if (has_pending_error_ && end_position >= error_details_.start_pos()) return;
+
has_pending_error_ = true;
error_details_ =
MessageDetails(start_position, end_position, message, arg0, arg1);
}
+void PendingCompilationErrorHandler::ReportMessageAt(
+ int start_position, int end_position, MessageTemplate message,
+ const AstRawString* arg0, const AstRawString* arg1, const char* arg2) {
+ if (has_pending_error_ && end_position >= error_details_.start_pos()) return;
+
+ has_pending_error_ = true;
+ error_details_ =
+ MessageDetails(start_position, end_position, message, arg0, arg1, arg2);
+}
+
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
int end_position,
MessageTemplate message,
@@ -178,11 +191,12 @@ void PendingCompilationErrorHandler::ThrowPendingError(
MessageLocation location = error_details_.GetLocation(script);
Handle<String> arg0 = error_details_.ArgString(isolate, 0);
Handle<String> arg1 = error_details_.ArgString(isolate, 1);
+ Handle<String> arg2 = error_details_.ArgString(isolate, 2);
isolate->debug()->OnCompileError(script);
Factory* factory = isolate->factory();
Handle<JSObject> error =
- factory->NewSyntaxError(error_details_.message(), arg0, arg1);
+ factory->NewSyntaxError(error_details_.message(), arg0, arg1, arg2);
isolate->ThrowAt(error, &location);
}
@@ -191,7 +205,8 @@ Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
error_details_.Prepare(isolate);
return MessageFormatter::Format(isolate, error_details_.message(),
error_details_.ArgString(isolate, 0),
- error_details_.ArgString(isolate, 1));
+ error_details_.ArgString(isolate, 1),
+ error_details_.ArgString(isolate, 2));
}
} // namespace internal
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index 9384e94df7..9ac25dae0c 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -41,6 +41,10 @@ class PendingCompilationErrorHandler {
MessageTemplate message, const AstRawString* arg0,
const char* arg1);
+ void ReportMessageAt(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0,
+ const AstRawString* arg1, const char* arg2);
+
void ReportWarningAt(int start_position, int end_position,
MessageTemplate message, const char* arg = nullptr);
@@ -93,23 +97,36 @@ class PendingCompilationErrorHandler {
: start_position_(start_position),
end_position_(end_position),
message_(message),
- args_{MessageArgument{arg0}, MessageArgument{}} {}
+ args_{MessageArgument{arg0}, MessageArgument{}, MessageArgument{}} {}
MessageDetails(int start_position, int end_position,
MessageTemplate message, const AstRawString* arg0,
const char* arg1)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- args_{MessageArgument{arg0}, MessageArgument{arg1}} {
+ args_{MessageArgument{arg0}, MessageArgument{arg1},
+ MessageArgument{}} {
+ DCHECK_NOT_NULL(arg0);
+ DCHECK_NOT_NULL(arg1);
+ }
+ MessageDetails(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0,
+ const AstRawString* arg1, const char* arg2)
+ : start_position_(start_position),
+ end_position_(end_position),
+ message_(message),
+ args_{MessageArgument{arg0}, MessageArgument{arg1},
+ MessageArgument{arg2}} {
DCHECK_NOT_NULL(arg0);
DCHECK_NOT_NULL(arg1);
+ DCHECK_NOT_NULL(arg2);
}
MessageDetails(int start_position, int end_position,
MessageTemplate message, const char* arg0)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- args_{MessageArgument{arg0}, MessageArgument{}} {}
+ args_{MessageArgument{arg0}, MessageArgument{}, MessageArgument{}} {}
Handle<String> ArgString(Isolate* isolate, int index) const;
int ArgCount() const {
@@ -127,6 +144,8 @@ class PendingCompilationErrorHandler {
}
MessageLocation GetLocation(Handle<Script> script) const;
+ int start_pos() const { return start_position_; }
+ int end_pos() const { return end_position_; }
MessageTemplate message() const { return message_; }
template <typename IsolateT>
@@ -135,8 +154,8 @@ class PendingCompilationErrorHandler {
private:
enum Type { kNone, kAstRawString, kConstCharString, kMainThreadHandle };
- void SetString(Handle<String> string, Isolate* isolate);
- void SetString(Handle<String> string, LocalIsolate* isolate);
+ void SetString(int index, Handle<String> string, Isolate* isolate);
+ void SetString(int index, Handle<String> string, LocalIsolate* isolate);
int start_position_;
int end_position_;
@@ -158,7 +177,7 @@ class PendingCompilationErrorHandler {
Type type;
};
- static constexpr int kMaxArgumentCount = 2;
+ static constexpr int kMaxArgumentCount = 3;
MessageArgument args_[kMaxArgumentCount];
};
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 44ea20e919..ea3308fc32 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -1565,7 +1565,7 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::StringLiteral();
}
- PreParserExpression ExpressionFromPrivateName(
+ V8_INLINE PreParserExpression ExpressionFromPrivateName(
PrivateNameScopeIterator* private_name_scope,
const PreParserIdentifier& name, int start_position) {
VariableProxy* proxy = factory()->ast_node_factory()->NewVariableProxy(
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index 83aa1290fe..0a8c6343d1 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -319,24 +319,25 @@ V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
}
V8_INLINE Token::Value Scanner::SkipWhiteSpace() {
- int start_position = source_pos();
+ if (!IsWhiteSpaceOrLineTerminator(c0_)) return Token::ILLEGAL;
- // We won't skip behind the end of input.
- DCHECK(!IsWhiteSpaceOrLineTerminator(kEndOfInput));
+ if (!next().after_line_terminator && unibrow::IsLineTerminator(c0_)) {
+ next().after_line_terminator = true;
+ }
// Advance as long as character is a WhiteSpace or LineTerminator.
- while (IsWhiteSpaceOrLineTerminator(c0_)) {
- if (!next().after_line_terminator && unibrow::IsLineTerminator(c0_)) {
- next().after_line_terminator = true;
+ base::uc32 hint = ' ';
+ AdvanceUntil([this, &hint](base::uc32 c0) {
+ if (V8_LIKELY(c0 == hint)) return false;
+ if (IsWhiteSpaceOrLineTerminator(c0)) {
+ if (!next().after_line_terminator && unibrow::IsLineTerminator(c0)) {
+ next().after_line_terminator = true;
+ }
+ hint = c0;
+ return false;
}
- Advance();
- }
-
- // Return whether or not we skipped any characters.
- if (source_pos() == start_position) {
- DCHECK_NE('0', c0_);
- return Token::ILLEGAL;
- }
+ return true;
+ });
return Token::WHITESPACE;
}
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index f6d7bcdee3..ea1c2ba257 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -995,12 +995,13 @@ base::Optional<RegExpFlags> Scanner::ScanRegExpFlags() {
DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
RegExpFlags flags;
+ next().literal_chars.Start();
while (IsIdentifierPart(c0_)) {
base::Optional<RegExpFlag> maybe_flag = JSRegExp::FlagFromChar(c0_);
if (!maybe_flag.has_value()) return {};
RegExpFlag flag = maybe_flag.value();
if (flags & flag) return {};
- Advance();
+ AddLiteralCharAdvance();
flags |= flag;
}
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index b638185b5b..2e035b2976 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -44,7 +44,9 @@ class Utf16CharacterStream {
virtual ~Utf16CharacterStream() = default;
V8_INLINE void set_parser_error() {
- buffer_cursor_ = buffer_end_;
+ // source_pos() returns one previous position of the cursor.
+ // Offset 1 cancels this out and makes it return exactly buffer_end_.
+ buffer_cursor_ = buffer_end_ + 1;
has_parser_error_ = true;
}
V8_INLINE void reset_parser_error_flag() { has_parser_error_ = false; }
@@ -245,7 +247,9 @@ class V8_EXPORT_PRIVATE Scanner {
if (!has_parser_error()) {
c0_ = kEndOfInput;
source_->set_parser_error();
- for (TokenDesc& desc : token_storage_) desc.token = Token::ILLEGAL;
+ for (TokenDesc& desc : token_storage_) {
+ if (desc.token != Token::UNINITIALIZED) desc.token = Token::ILLEGAL;
+ }
}
}
V8_INLINE void reset_parser_error_flag() {
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 3bf57e0095..7b155b407d 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -208,7 +208,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
Isolate* isolate = Isolate::FromHeap(heap);
int length = 0;
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo shared = frame->function().shared();
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index e0834e9eb2..c3c76b842d 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -14,26 +14,28 @@
namespace v8 {
namespace internal {
-void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->AddCode(instruction_start, entry, instruction_size);
+void CodeCreateEventRecord::UpdateCodeMap(
+ InstructionStreamMap* instruction_stream_map) {
+ instruction_stream_map->AddCode(instruction_start, entry, instruction_size);
}
-
-void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from_instruction_start, to_instruction_start);
+void CodeMoveEventRecord::UpdateCodeMap(
+ InstructionStreamMap* instruction_stream_map) {
+ instruction_stream_map->MoveCode(from_instruction_start,
+ to_instruction_start);
}
-
-void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
- CodeEntry* entry = code_map->FindEntry(instruction_start);
+void CodeDisableOptEventRecord::UpdateCodeMap(
+ InstructionStreamMap* instruction_stream_map) {
+ CodeEntry* entry = instruction_stream_map->FindEntry(instruction_start);
if (entry != nullptr) {
entry->set_bailout_reason(bailout_reason);
}
}
-
-void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
- CodeEntry* entry = code_map->FindEntry(instruction_start);
+void CodeDeoptEventRecord::UpdateCodeMap(
+ InstructionStreamMap* instruction_stream_map) {
+ CodeEntry* entry = instruction_stream_map->FindEntry(instruction_start);
if (entry != nullptr) {
std::vector<CpuProfileDeoptFrame> frames_vector(
deopt_frames, deopt_frames + deopt_frame_count);
@@ -42,9 +44,9 @@ void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
delete[] deopt_frames;
}
-
-void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
- CodeEntry* entry = code_map->FindEntry(instruction_start);
+void ReportBuiltinEventRecord::UpdateCodeMap(
+ InstructionStreamMap* instruction_stream_map) {
+ CodeEntry* entry = instruction_stream_map->FindEntry(instruction_start);
if (entry) {
entry->SetBuiltinId(builtin);
return;
@@ -53,9 +55,9 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
if (builtin == Builtin::kGenericJSToWasmWrapper) {
// Make sure to add the generic js-to-wasm wrapper builtin, because that
// one is supposed to show up in profiles.
- entry = code_map->code_entries().Create(LogEventListener::CodeTag::kBuiltin,
- Builtins::name(builtin));
- code_map->AddCode(instruction_start, entry, instruction_size);
+ entry = instruction_stream_map->code_entries().Create(
+ LogEventListener::CodeTag::kBuiltin, Builtins::name(builtin));
+ instruction_stream_map->AddCode(instruction_start, entry, instruction_size);
}
#endif // V8_ENABLE_WEBASSEMBLY
}
@@ -68,8 +70,9 @@ TickSample* SamplingEventsProcessor::StartTickSample() {
return &evt->sample;
}
-void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
- bool removed = code_map->RemoveCode(entry);
+void CodeDeleteEventRecord::UpdateCodeMap(
+ InstructionStreamMap* instruction_stream_map) {
+ bool removed = instruction_stream_map->RemoveCode(entry);
CHECK(removed);
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index f8641063a8..5972ef6cba 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -415,7 +415,7 @@ void ProfilerCodeObserver::LogBuiltins() {
++builtin) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kReportBuiltin);
ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
- CodeT code = builtins->code(builtin);
+ Code code = builtins->code(builtin);
rec->instruction_start = code.InstructionStart();
rec->instruction_size = code.InstructionSize();
rec->builtin = builtin;
@@ -645,7 +645,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
if (!symbolizer_) {
- symbolizer_ = std::make_unique<Symbolizer>(code_observer_->code_map());
+ symbolizer_ =
+ std::make_unique<Symbolizer>(code_observer_->instruction_stream_map());
}
base::TimeDelta sampling_interval = ComputeSamplingInterval();
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index fd1aae5895..5bd8fe804c 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -24,7 +24,7 @@ namespace internal {
// Forward declarations.
class CodeEntry;
-class CodeMap;
+class InstructionStreamMap;
class CpuProfilesCollection;
class Isolate;
class Symbolizer;
@@ -58,7 +58,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
CodeEntry* entry;
unsigned instruction_size;
- V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+ V8_INLINE void UpdateCodeMap(InstructionStreamMap* instruction_stream_map);
};
@@ -67,7 +67,7 @@ class CodeMoveEventRecord : public CodeEventRecord {
Address from_instruction_start;
Address to_instruction_start;
- V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+ V8_INLINE void UpdateCodeMap(InstructionStreamMap* instruction_stream_map);
};
@@ -76,7 +76,7 @@ class CodeDisableOptEventRecord : public CodeEventRecord {
Address instruction_start;
const char* bailout_reason;
- V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+ V8_INLINE void UpdateCodeMap(InstructionStreamMap* instruction_stream_map);
};
@@ -90,7 +90,7 @@ class CodeDeoptEventRecord : public CodeEventRecord {
CpuProfileDeoptFrame* deopt_frames;
int deopt_frame_count;
- V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+ V8_INLINE void UpdateCodeMap(InstructionStreamMap* instruction_stream_map);
};
@@ -100,7 +100,7 @@ class ReportBuiltinEventRecord : public CodeEventRecord {
unsigned instruction_size;
Builtin builtin;
- V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+ V8_INLINE void UpdateCodeMap(InstructionStreamMap* instruction_stream_map);
};
// Signals that a native context's address has changed.
@@ -127,7 +127,7 @@ class CodeDeleteEventRecord : public CodeEventRecord {
public:
CodeEntry* entry;
- V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+ V8_INLINE void UpdateCodeMap(InstructionStreamMap* instruction_stream_map);
};
// A record type for sending code events (e.g. create, move, delete) to the
@@ -256,17 +256,17 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
// low sampling intervals on Windows.
};
-// Builds and maintains a CodeMap tracking code objects on the VM heap. While
-// alive, logs generated code, callbacks, and builtins from the isolate.
-// Redirects events to the profiler events processor when present. CodeEntry
-// lifetime is associated with the given CodeEntryStorage.
+// Builds and maintains a InstructionStreamMap tracking code objects on the VM
+// heap. While alive, logs generated code, callbacks, and builtins from the
+// isolate. Redirects events to the profiler events processor when present.
+// CodeEntry lifetime is associated with the given CodeEntryStorage.
class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
public:
explicit ProfilerCodeObserver(Isolate*, CodeEntryStorage&);
void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
CodeEntryStorage* code_entries() { return &code_entries_; }
- CodeMap* code_map() { return &code_map_; }
+ InstructionStreamMap* instruction_stream_map() { return &code_map_; }
WeakCodeRegistry* weak_code_registry() { return &weak_code_registry_; }
size_t GetEstimatedMemoryUsage() const;
@@ -292,7 +292,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
Isolate* const isolate_;
CodeEntryStorage& code_entries_;
- CodeMap code_map_;
+ InstructionStreamMap code_map_;
WeakCodeRegistry weak_code_registry_;
ProfilerEventsProcessor* processor_;
};
@@ -375,7 +375,9 @@ class V8_EXPORT_PRIVATE CpuProfiler {
ProfilerListener* profiler_listener_for_test() const {
return profiler_listener_.get();
}
- CodeMap* code_map_for_test() { return code_observer_->code_map(); }
+ InstructionStreamMap* code_map_for_test() {
+ return code_observer_->instruction_stream_map();
+ }
private:
void StartProcessorIfNotStarted();
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 2f0e8aa31d..37d807ed87 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -14,6 +14,7 @@
#include "src/debug/debug.h"
#include "src/handles/global-handles.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/heap.h"
#include "src/heap/safepoint.h"
#include "src/numbers/conversions.h"
#include "src/objects/allocation-site-inl.h"
@@ -40,6 +41,12 @@
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/output-stream-writer.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/names-provider.h"
+#include "src/wasm/string-builder.h"
+#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -181,7 +188,8 @@ class HeapEntryVerifier {
// Objects that have been checked via a call to CheckStrongReference or
// CheckWeakReference, or deliberately skipped via a call to
// MarkReferenceCheckedWithoutChecking.
- std::unordered_set<HeapObject, Object::Hasher> checked_objects_;
+ std::unordered_set<HeapObject, Object::Hasher, Object::KeyEqualSafe>
+ checked_objects_;
// Objects transitively retained by the primary object. The objects in the set
// at index i are retained by the primary object via a chain of i+1
@@ -215,7 +223,7 @@ HeapGraphEdge::HeapGraphEdge(Type type, int index, HeapEntry* from,
HeapEntry::HeapEntry(HeapSnapshot* snapshot, int index, Type type,
const char* name, SnapshotObjectId id, size_t self_size,
unsigned trace_node_id)
- : type_(type),
+ : type_(static_cast<unsigned>(type)),
index_(index),
children_count_(0),
self_size_(self_size),
@@ -384,6 +392,8 @@ const char* HeapEntry::TypeAsString() const {
return "/bigint/";
case kObjectShape:
return "/object shape/";
+ case kWasmObject:
+ return "/wasm object/";
default: return "???";
}
}
@@ -850,8 +860,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
} else if (InstanceTypeChecker::IsBigInt(instance_type)) {
return AddEntry(object, HeapEntry::kBigInt, "bigint");
- } else if (InstanceTypeChecker::IsCode(instance_type) ||
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
+ } else if (InstanceTypeChecker::IsInstructionStream(instance_type) ||
+ InstanceTypeChecker::IsCode(instance_type)) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) {
@@ -872,6 +882,23 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
} else if (InstanceTypeChecker::IsHeapNumber(instance_type)) {
return AddEntry(object, HeapEntry::kHeapNumber, "heap number");
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (InstanceTypeChecker::IsWasmObject(instance_type)) {
+ WasmTypeInfo info = object.map().wasm_type_info();
+ wasm::NamesProvider* names =
+ info.instance().module_object().native_module()->GetNamesProvider();
+ wasm::StringBuilder sb;
+ if (InstanceTypeChecker::IsWasmStruct(instance_type)) {
+ sb << "wasm struct / ";
+ } else {
+ sb << "wasm array / ";
+ }
+ names->PrintTypeName(sb, info.type_index());
+ sb << '\0';
+ const char* name = names_->GetCopy(sb.start());
+ return AddEntry(object, HeapEntry::kWasmObject, name);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
return AddEntry(object, GetSystemEntryType(object),
GetSystemEntryName(object));
}
@@ -950,7 +977,7 @@ HeapEntry::Type V8HeapExplorer::GetSystemEntryType(HeapObject object) {
InstanceTypeChecker::IsArrayBoilerplateDescription(type) ||
InstanceTypeChecker::IsBytecodeArray(type) ||
InstanceTypeChecker::IsClosureFeedbackCellArray(type) ||
- InstanceTypeChecker::IsCodeDataContainer(type) ||
+ InstanceTypeChecker::IsCode(type) ||
InstanceTypeChecker::IsFeedbackCell(type) ||
InstanceTypeChecker::IsFeedbackMetadata(type) ||
InstanceTypeChecker::IsFeedbackVector(type) ||
@@ -1049,19 +1076,19 @@ class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
}
}
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override {
VisitSlotImpl(code_cage_base(), slot);
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ InstructionStream target =
+ InstructionStream::FromTargetAddress(rinfo->target_address());
VisitHeapObjectImpl(target, -1);
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
HeapObject object = rinfo->target_object(cage_base());
- if (host.IsWeakObject(object)) {
+ if (rinfo->code().IsWeakObject(object)) {
generator_->SetWeakReference(parent_, next_index_++, object, {});
} else {
VisitHeapObjectImpl(object, -1);
@@ -1191,6 +1218,12 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
ExtractBytecodeArrayReferences(entry, BytecodeArray::cast(obj));
} else if (obj.IsScopeInfo()) {
ExtractScopeInfoReferences(entry, ScopeInfo::cast(obj));
+#if V8_ENABLE_WEBASSEMBLY
+ } else if (obj.IsWasmStruct()) {
+ ExtractWasmStructReferences(WasmStruct::cast(obj), entry);
+ } else if (obj.IsWasmArray()) {
+ ExtractWasmArrayReferences(WasmArray::cast(obj), entry);
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
@@ -1393,18 +1426,8 @@ void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
FixedArray::OffsetOfElementAt(index));
}
- SetWeakReference(entry, "optimized_code_list",
- context.get(Context::OPTIMIZED_CODE_LIST),
- Context::OffsetOfElementAt(Context::OPTIMIZED_CODE_LIST),
- HeapEntry::kCustomWeakPointer);
- SetWeakReference(entry, "deoptimized_code_list",
- context.get(Context::DEOPTIMIZED_CODE_LIST),
- Context::OffsetOfElementAt(Context::DEOPTIMIZED_CODE_LIST),
- HeapEntry::kCustomWeakPointer);
- static_assert(Context::OPTIMIZED_CODE_LIST == Context::FIRST_WEAK_SLOT);
- static_assert(Context::NEXT_CONTEXT_LINK + 1 ==
- Context::NATIVE_CONTEXT_SLOTS);
- static_assert(Context::FIRST_WEAK_SLOT + 3 ==
+ static_assert(Context::NEXT_CONTEXT_LINK == Context::FIRST_WEAK_SLOT);
+ static_assert(Context::FIRST_WEAK_SLOT + 1 ==
Context::NATIVE_CONTEXT_SLOTS);
}
}
@@ -1478,12 +1501,9 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
HeapEntry* entry, SharedFunctionInfo shared) {
std::unique_ptr<char[]> name = shared.DebugNameCStr();
- CodeT code = shared.GetCode();
- // Don't try to get the Code object from Code-less embedded builtin.
+ Code code = shared.GetCode(isolate());
HeapObject maybe_code_obj =
- V8_EXTERNAL_CODE_SPACE_BOOL && code.is_off_heap_trampoline()
- ? HeapObject::cast(code)
- : FromCodeT(code);
+ code.has_instruction_stream() ? FromCode(code) : HeapObject::cast(code);
if (name[0] != '\0') {
TagObject(maybe_code_obj,
names_->GetFormatted("(code for %s)", name.get()));
@@ -1554,16 +1574,16 @@ void V8HeapExplorer::ExtractWeakCellReferences(HeapEntry* entry,
WeakCell::kUnregisterTokenOffset);
}
-void V8HeapExplorer::TagBuiltinCodeObject(CodeT code, const char* name) {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- TagObject(code, names_->GetFormatted("(%s builtin handle)", name));
- }
- if (!V8_EXTERNAL_CODE_SPACE_BOOL || !code.is_off_heap_trampoline()) {
- TagObject(FromCodeT(code), names_->GetFormatted("(%s builtin)", name));
+void V8HeapExplorer::TagBuiltinCodeObject(Code code, const char* name) {
+ TagObject(code, names_->GetFormatted("(%s builtin handle)", name));
+ if (code.has_instruction_stream()) {
+ TagObject(FromCode(code), names_->GetFormatted("(%s builtin)", name));
}
}
void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
+ if (!code.has_instruction_stream()) return;
+
TagObject(code.relocation_info(), "(code relocation info)", HeapEntry::kCode);
SetInternalReference(entry, "relocation_info", code.relocation_info(),
Code::kRelocationInfoOffset);
@@ -1839,7 +1859,8 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
}
Name k = descs.GetKey(i);
- FieldIndex field_index = FieldIndex::ForDescriptor(js_obj.map(), i);
+ FieldIndex field_index =
+ FieldIndex::ForDetails(js_obj.map(), details);
Object value = js_obj.RawFastPropertyAt(field_index);
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
@@ -1945,6 +1966,39 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject js_obj,
}
}
+#if V8_ENABLE_WEBASSEMBLY
+
+void V8HeapExplorer::ExtractWasmStructReferences(WasmStruct obj,
+ HeapEntry* entry) {
+ wasm::StructType* type = obj.type();
+ WasmTypeInfo info = obj.map().wasm_type_info();
+ wasm::NamesProvider* names =
+ info.instance().module_object().native_module()->GetNamesProvider();
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ if (!type->field(i).is_reference()) continue;
+ wasm::StringBuilder sb;
+ names->PrintFieldName(sb, info.type_index(), i);
+ sb << '\0';
+ const char* field_name = names_->GetCopy(sb.start());
+ int field_offset = type->field_offset(i);
+ Object value = obj.RawField(field_offset).load(entry->isolate());
+ HeapEntry* value_entry = GetEntry(value);
+ entry->SetNamedReference(HeapGraphEdge::kProperty, field_name, value_entry,
+ generator_);
+ MarkVisitedField(WasmStruct::kHeaderSize + field_offset);
+ }
+}
+void V8HeapExplorer::ExtractWasmArrayReferences(WasmArray obj,
+ HeapEntry* entry) {
+ if (!obj.type()->element_type().is_reference()) return;
+ for (uint32_t i = 0; i < obj.length(); i++) {
+ SetElementReference(entry, i, obj.ElementSlot(i).load(entry->isolate()));
+ MarkVisitedField(obj.element_offset(i));
+ }
+}
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
JSFunction V8HeapExplorer::GetConstructor(Isolate* isolate,
JSReceiver receiver) {
DisallowGarbageCollection no_gc;
@@ -1986,7 +2040,7 @@ class RootsReferencesExtractor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot object) override {
if (root == Root::kBuiltins) {
- explorer_->TagBuiltinCodeObject(CodeT::cast(*object), description);
+ explorer_->TagBuiltinCodeObject(Code::cast(*object), description);
}
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
*object);
@@ -2011,37 +2065,17 @@ class RootsReferencesExtractor : public RootVisitor {
}
}
- void VisitRunningCode(FullObjectSlot p) override {
- // Must match behavior in
- // MarkCompactCollector::RootMarkingVisitor::VisitRunningCode, which treats
- // deoptimization literals in running code as stack roots.
- HeapObject value = HeapObject::cast(*p);
- if (V8_EXTERNAL_CODE_SPACE_BOOL && !IsCodeSpaceObject(value)) {
- // When external code space is enabled, the slot might contain a CodeT
- // object representing an embedded builtin, which doesn't require
- // additional processing.
- DCHECK(CodeT::cast(value).is_off_heap_trampoline());
- } else {
- Code code = Code::cast(value);
- if (code.kind() != CodeKind::BASELINE) {
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(code.deoptimization_data());
- if (deopt_data.length() > 0) {
- DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
- int literals_length = literals.length();
- for (int i = 0; i < literals_length; ++i) {
- MaybeObject maybe_literal = literals.Get(i);
- HeapObject heap_literal;
- if (maybe_literal.GetHeapObject(&heap_literal)) {
- VisitRootPointer(Root::kStackRoots, nullptr,
- FullObjectSlot(&heap_literal));
- }
- }
- }
- }
+ // Keep this synced with
+ // MarkCompactCollector::RootMarkingVisitor::VisitRunningCode.
+ void VisitRunningCode(FullObjectSlot code_slot,
+ FullObjectSlot istream_or_smi_zero_slot) final {
+ Object istream_or_smi_zero = *istream_or_smi_zero_slot;
+ if (istream_or_smi_zero != Smi::zero()) {
+ Code code = Code::cast(*code_slot);
+ code.IterateDeoptimizationLiterals(this);
+ VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot);
}
- // Finally visit the Code itself.
- VisitRootPointer(Root::kStackRoots, nullptr, p);
+ VisitRootPointer(Root::kStackRoots, nullptr, code_slot);
}
private:
@@ -2128,9 +2162,9 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool V8HeapExplorer::IsEssentialObject(Object object) {
if (!object.IsHeapObject()) return false;
- // Avoid comparing Code objects with non-Code objects below.
- if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- IsCodeSpaceObject(HeapObject::cast(object))) {
+ // Avoid comparing InstructionStream objects with non-InstructionStream
+ // objects below.
+ if (IsCodeSpaceObject(HeapObject::cast(object))) {
return true;
}
Isolate* isolate = heap_->isolate();
@@ -2152,9 +2186,6 @@ bool V8HeapExplorer::IsEssentialHiddenReference(Object parent,
if (parent.IsAllocationSite() &&
field_offset == AllocationSite::kWeakNextOffset)
return false;
- if (parent.IsCodeDataContainer() &&
- field_offset == CodeDataContainer::kNextCodeLinkOffset)
- return false;
if (parent.IsContext() &&
field_offset == Context::OffsetOfElementAt(Context::NEXT_CONTEXT_LINK))
return false;
@@ -2333,7 +2364,8 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
}
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
- const char* name = GetStrongGcSubrootName(child_obj);
+ auto child_heap_obj = HeapObject::cast(child_obj);
+ const char* name = GetStrongGcSubrootName(child_heap_obj);
HeapGraphEdge::Type edge_type =
is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kInternal;
if (name != nullptr) {
@@ -2351,9 +2383,9 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
// Add a shortcut to JS global object reference at snapshot root.
// That allows the user to easily find global objects. They are
// also used as starting points in distance calculations.
- if (is_weak || !child_obj.IsNativeContext()) return;
+ if (is_weak || !child_heap_obj.IsNativeContext()) return;
- JSGlobalObject global = Context::cast(child_obj).global_object();
+ JSGlobalObject global = Context::cast(child_heap_obj).global_object();
if (!global.IsJSGlobalObject()) return;
if (!user_roots_.insert(global).second) return;
@@ -2361,13 +2393,15 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
SetUserGlobalReference(global);
}
-const char* V8HeapExplorer::GetStrongGcSubrootName(Object object) {
+const char* V8HeapExplorer::GetStrongGcSubrootName(HeapObject object) {
if (strong_gc_subroot_names_.empty()) {
Isolate* isolate = Isolate::FromHeap(heap_);
for (RootIndex root_index = RootIndex::kFirstStrongOrReadOnlyRoot;
root_index <= RootIndex::kLastStrongOrReadOnlyRoot; ++root_index) {
const char* name = RootsTable::name(root_index);
- strong_gc_subroot_names_.emplace(isolate->root(root_index), name);
+ Object root = isolate->root(root_index);
+ CHECK(!root.IsSmi());
+ strong_gc_subroot_names_.emplace(HeapObject::cast(root), name);
}
CHECK(!strong_gc_subroot_names_.empty());
}
@@ -3010,7 +3044,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("sliced string") ","
JSON_S("symbol") ","
JSON_S("bigint") ","
- JSON_S("object shape")) ","
+ JSON_S("object shape") ","
+ JSON_S("wasm object")) ","
JSON_S("string") ","
JSON_S("number") ","
JSON_S("number") ","
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 4237475590..5680e669a4 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -120,6 +120,8 @@ class HeapEntry {
kSymbol = v8::HeapGraphNode::kSymbol,
kBigInt = v8::HeapGraphNode::kBigInt,
kObjectShape = v8::HeapGraphNode::kObjectShape,
+ kWasmObject = v8::HeapGraphNode::kWasmObject,
+ kNumTypes,
};
HeapEntry(HeapSnapshot* snapshot, int index, Type type, const char* name,
@@ -127,7 +129,7 @@ class HeapEntry {
HeapSnapshot* snapshot() { return snapshot_; }
Type type() const { return static_cast<Type>(type_); }
- void set_type(Type type) { type_ = type; }
+ void set_type(Type type) { type_ = static_cast<unsigned>(type); }
const char* name() const { return name_; }
void set_name(const char* name) { name_ = name; }
SnapshotObjectId id() const { return id_; }
@@ -189,7 +191,8 @@ class HeapEntry {
V8_INLINE std::vector<HeapGraphEdge*>::iterator children_end() const;
const char* TypeAsString() const;
- unsigned type_: 4;
+ static_assert(kNumTypes <= 1 << 4);
+ unsigned type_ : 4;
unsigned index_ : 28; // Supports up to ~250M objects.
union {
// The count is used during the snapshot build phase,
@@ -391,7 +394,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
void CollectGlobalObjectsTags();
void MakeGlobalObjectTagMap(const IsolateSafepointScope& safepoint_scope);
- void TagBuiltinCodeObject(CodeT code, const char* name);
+ void TagBuiltinCodeObject(Code code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
const char* name,
@@ -466,6 +469,11 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void ExtractElementReferences(JSObject js_obj, HeapEntry* entry);
void ExtractInternalReferences(JSObject js_obj, HeapEntry* entry);
+#if V8_ENABLE_WEBASSEMBLY
+ void ExtractWasmStructReferences(WasmStruct obj, HeapEntry* entry);
+ void ExtractWasmArrayReferences(WasmArray obj, HeapEntry* entry);
+#endif // V8_ENABLE_WEBASSEMBLY
+
bool IsEssentialObject(Object object);
bool IsEssentialHiddenReference(Object parent, int field_offset);
@@ -500,7 +508,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void SetGcRootsReference(Root root);
void SetGcSubrootReference(Root root, const char* description, bool is_weak,
Object child);
- const char* GetStrongGcSubrootName(Object object);
+ const char* GetStrongGcSubrootName(HeapObject object);
void TagObject(Object obj, const char* tag,
base::Optional<HeapEntry::Type> type = {});
void RecursivelyTagConstantPool(Object obj, const char* tag,
@@ -518,8 +526,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
global_object_tag_pairs_;
std::unordered_map<JSGlobalObject, const char*, Object::Hasher>
global_object_tag_map_;
- std::unordered_map<Object, const char*, Object::Hasher>
- strong_gc_subroot_names_;
+ UnorderedHeapObjectMap<const char*> strong_gc_subroot_names_;
std::unordered_set<JSGlobalObject, Object::Hasher> user_roots_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 06b8a8e1e7..06edfa029e 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -941,11 +941,12 @@ void CodeEntryStorage::DecRef(CodeEntry* entry) {
}
}
-CodeMap::CodeMap(CodeEntryStorage& storage) : code_entries_(storage) {}
+InstructionStreamMap::InstructionStreamMap(CodeEntryStorage& storage)
+ : code_entries_(storage) {}
-CodeMap::~CodeMap() { Clear(); }
+InstructionStreamMap::~InstructionStreamMap() { Clear(); }
-void CodeMap::Clear() {
+void InstructionStreamMap::Clear() {
for (auto& slot : code_map_) {
if (CodeEntry* entry = slot.second.entry) {
code_entries_.DecRef(entry);
@@ -958,12 +959,13 @@ void CodeMap::Clear() {
code_map_.clear();
}
-void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
+void InstructionStreamMap::AddCode(Address addr, CodeEntry* entry,
+ unsigned size) {
code_map_.emplace(addr, CodeEntryMapInfo{entry, size});
entry->set_instruction_start(addr);
}
-bool CodeMap::RemoveCode(CodeEntry* entry) {
+bool InstructionStreamMap::RemoveCode(CodeEntry* entry) {
auto range = code_map_.equal_range(entry->instruction_start());
for (auto i = range.first; i != range.second; ++i) {
if (i->second.entry == entry) {
@@ -975,7 +977,7 @@ bool CodeMap::RemoveCode(CodeEntry* entry) {
return false;
}
-void CodeMap::ClearCodesInRange(Address start, Address end) {
+void InstructionStreamMap::ClearCodesInRange(Address start, Address end) {
auto left = code_map_.upper_bound(start);
if (left != code_map_.begin()) {
--left;
@@ -988,7 +990,8 @@ void CodeMap::ClearCodesInRange(Address start, Address end) {
code_map_.erase(left, right);
}
-CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
+CodeEntry* InstructionStreamMap::FindEntry(Address addr,
+ Address* out_instruction_start) {
// Note that an address may correspond to multiple CodeEntry objects. An
// arbitrary selection is made (as per multimap spec) in the event of a
// collision.
@@ -1003,7 +1006,7 @@ CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
return ret;
}
-void CodeMap::MoveCode(Address from, Address to) {
+void InstructionStreamMap::MoveCode(Address from, Address to) {
if (from == to) return;
auto range = code_map_.equal_range(from);
@@ -1026,14 +1029,14 @@ void CodeMap::MoveCode(Address from, Address to) {
code_map_.erase(range.first, it);
}
-void CodeMap::Print() {
+void InstructionStreamMap::Print() {
for (const auto& pair : code_map_) {
base::OS::Print("%p %5d %s\n", reinterpret_cast<void*>(pair.first),
pair.second.size, pair.second.entry->name());
}
}
-size_t CodeMap::GetEstimatedMemoryUsage() const {
+size_t InstructionStreamMap::GetEstimatedMemoryUsage() const {
size_t map_size = 0;
for (const auto& pair : code_map_) {
map_size += sizeof(pair.first) + sizeof(pair.second) +
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 3a0e3a8f2f..bf2e95b88c 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -125,7 +125,7 @@ class CodeEntry {
}
// Returns the start address of the instruction segment represented by this
- // CodeEntry. Used as a key in the containing CodeMap.
+ // CodeEntry. Used as a key in the containing InstructionStreamMap.
Address instruction_start() const { return instruction_start_; }
void set_instruction_start(Address address) { instruction_start_ = address; }
@@ -141,7 +141,7 @@ class CodeEntry {
}
// Returns whether or not the lifetime of this CodeEntry is reference
- // counted, and managed by a CodeMap.
+ // counted, and managed by a InstructionStreamMap.
bool is_ref_counted() const { return RefCountedField::decode(bit_field_); }
uint32_t GetHash() const;
@@ -484,18 +484,18 @@ class CpuProfileMaxSamplesCallbackTask : public v8::Task {
std::unique_ptr<DiscardedSamplesDelegate> delegate_;
};
-class V8_EXPORT_PRIVATE CodeMap {
+class V8_EXPORT_PRIVATE InstructionStreamMap {
public:
- explicit CodeMap(CodeEntryStorage& storage);
- ~CodeMap();
- CodeMap(const CodeMap&) = delete;
- CodeMap& operator=(const CodeMap&) = delete;
+ explicit InstructionStreamMap(CodeEntryStorage& storage);
+ ~InstructionStreamMap();
+ InstructionStreamMap(const InstructionStreamMap&) = delete;
+ InstructionStreamMap& operator=(const InstructionStreamMap&) = delete;
- // Adds the given CodeEntry to the CodeMap. The CodeMap takes ownership of
- // the CodeEntry.
+ // Adds the given CodeEntry to the InstructionStreamMap. The
+ // InstructionStreamMap takes ownership of the CodeEntry.
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
- // Attempts to remove the given CodeEntry from the CodeMap.
+ // Attempts to remove the given CodeEntry from the InstructionStreamMap.
// Returns true iff the entry was found and removed.
bool RemoveCode(CodeEntry*);
void ClearCodesInRange(Address start, Address end);
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 458aac21f8..688bdc1027 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -73,17 +73,15 @@ void ProfilerListener::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
void ProfilerListener::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name) {
+ PtrComprCageBase cage_base(isolate_);
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- PtrComprCageBase cage_base(isolate_);
rec->instruction_start = code->InstructionStart(cage_base);
rec->entry =
code_entries_.Create(tag, GetName(shared->DebugNameCStr().get()),
GetName(InferScriptName(*script_name, *shared)),
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
- DCHECK_IMPLIES(code->IsCode(cage_base),
- code->kind(cage_base) == CodeKind::BASELINE);
rec->entry->FillFunctionInfo(*shared);
rec->instruction_size = code->InstructionSize(cage_base);
weak_code_registry_.Track(rec->entry, code);
@@ -130,14 +128,13 @@ void ProfilerListener::CodeCreateEvent(CodeTag tag,
bool is_baseline = abstract_code->kind(cage_base) == CodeKind::BASELINE;
Handle<ByteArray> source_position_table(
- abstract_code->SourcePositionTable(cage_base, *shared), isolate_);
+ abstract_code->SourcePositionTable(isolate_, *shared), isolate_);
std::unique_ptr<baseline::BytecodeOffsetIterator> baseline_iterator;
if (is_baseline) {
Handle<BytecodeArray> bytecodes(shared->GetBytecodeArray(isolate_),
isolate_);
Handle<ByteArray> bytecode_offsets(
- abstract_code->ToCode(cage_base).bytecode_offset_table(cage_base),
- isolate_);
+ abstract_code->GetCode().bytecode_offset_table(cage_base), isolate_);
baseline_iterator = std::make_unique<baseline::BytecodeOffsetIterator>(
bytecode_offsets, bytecodes);
}
@@ -164,9 +161,9 @@ void ProfilerListener::CodeCreateEvent(CodeTag tag,
} else {
DCHECK(!is_baseline);
DCHECK(abstract_code->IsCode(cage_base));
- Handle<Code> code = handle(abstract_code->GetCode(), isolate_);
std::vector<SourcePositionInfo> stack =
- it.source_position().InliningStack(code);
+ it.source_position().InliningStack(isolate_,
+ abstract_code->GetCode());
DCHECK(!stack.empty());
// When we have an inlining id and we are doing cross-script inlining,
@@ -196,7 +193,7 @@ void ProfilerListener::CodeCreateEvent(CodeTag tag,
// kLeafNodeLineNumbers mode. Creating a SourcePositionInfo is a handy
// way of getting both easily.
SourcePositionInfo start_pos_info(
- SourcePosition(pos_info.shared->StartPosition()),
+ isolate_, SourcePosition(pos_info.shared->StartPosition()),
pos_info.shared);
CodeEntry* inline_entry = code_entries_.Create(
@@ -297,13 +294,22 @@ void ProfilerListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
+void ProfilerListener::CodeMoveEvent(InstructionStream from,
+ InstructionStream to) {
DisallowGarbageCollection no_gc;
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeMove);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- PtrComprCageBase cage_base(isolate_);
- rec->from_instruction_start = from.InstructionStart(cage_base);
- rec->to_instruction_start = to.InstructionStart(cage_base);
+ rec->from_instruction_start = from.instruction_start();
+ rec->to_instruction_start = to.instruction_start();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) {
+ DisallowGarbageCollection no_gc;
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeMove);
+ CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+ rec->from_instruction_start = from.GetFirstBytecodeAddress();
+ rec->to_instruction_start = to.GetFirstBytecodeAddress();
DispatchCodeEvent(evt_rec);
}
@@ -408,7 +414,8 @@ void ProfilerListener::AttachDeoptInlinedFrames(Handle<Code> code,
// frame. These don't escape this function, but quickly add up. This
// scope limits their lifetime.
HandleScope scope(isolate_);
- std::vector<SourcePositionInfo> stack = last_position.InliningStack(code);
+ std::vector<SourcePositionInfo> stack =
+ last_position.InliningStack(isolate_, *code);
CpuProfileDeoptFrame* deopt_frames =
new CpuProfileDeoptFrame[stack.size()];
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index f409d4cfea..eec7b08bfa 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -56,7 +56,8 @@ class V8_EXPORT_PRIVATE ProfilerListener : public LogEventListener,
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override;
void RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) override;
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override;
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override;
void CodeMovingGCEvent() override {}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 50a32dd4d5..c06395125b 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -81,7 +81,12 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
HeapObject heap_object = HeapObject::FromAddress(soon_object);
Handle<Object> obj(heap_object, isolate_);
- Local<v8::Value> loc = v8::Utils::ToLocal(obj);
+ // Since soon_object can be in code space we can't use v8::Utils::ToLocal.
+ DCHECK(obj.is_null() ||
+ (obj->IsSmi() ||
+ (V8_EXTERNAL_CODE_SPACE_BOOL && IsCodeSpaceObject(heap_object)) ||
+ !obj->IsTheHole()));
+ Local<v8::Value> loc(reinterpret_cast<v8::Value*>(obj.location()));
AllocationNode* node = AddStack();
node->allocations_[size]++;
@@ -145,7 +150,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
AllocationNode* node = &profile_root_;
std::vector<SharedFunctionInfo> stack;
- JavaScriptFrameIterator frame_it(isolate_);
+ JavaScriptStackFrameIterator frame_it(isolate_);
int frames_captured = 0;
bool found_arguments_marker_frames = false;
while (!frame_it.done() && frames_captured < stack_depth_) {
diff --git a/deps/v8/src/profiler/symbolizer.cc b/deps/v8/src/profiler/symbolizer.cc
index 8528b62693..7822cc8930 100644
--- a/deps/v8/src/profiler/symbolizer.cc
+++ b/deps/v8/src/profiler/symbolizer.cc
@@ -12,7 +12,8 @@
namespace v8 {
namespace internal {
-Symbolizer::Symbolizer(CodeMap* code_map) : code_map_(code_map) {}
+Symbolizer::Symbolizer(InstructionStreamMap* instruction_stream_map)
+ : code_map_(instruction_stream_map) {}
CodeEntry* Symbolizer::FindEntry(Address address,
Address* out_instruction_start) {
@@ -80,9 +81,9 @@ Symbolizer::SymbolizedSample Symbolizer::SymbolizeTickSample(
pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
}
// If pc is in the function code before it set up stack frame or after the
- // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
- // ebp contains the return address of the current function and skips the
- // caller's frame. Check for this case and just skip such samples.
+ // frame was destroyed, StackFrameIteratorForProfiler incorrectly thinks
+ // that ebp contains the return address of the current function and skips
+ // the caller's frame. Check for this case and just skip such samples.
if (pc_entry) {
int pc_offset =
static_cast<int>(attributed_pc - pc_entry_instruction_start);
diff --git a/deps/v8/src/profiler/symbolizer.h b/deps/v8/src/profiler/symbolizer.h
index 76a1af28e8..91ca9f2275 100644
--- a/deps/v8/src/profiler/symbolizer.h
+++ b/deps/v8/src/profiler/symbolizer.h
@@ -12,11 +12,11 @@ namespace v8 {
namespace internal {
class CodeEntry;
-class CodeMap;
+class InstructionStreamMap;
class V8_EXPORT_PRIVATE Symbolizer {
public:
- explicit Symbolizer(CodeMap* code_map);
+ explicit Symbolizer(InstructionStreamMap* instruction_stream_map);
Symbolizer(const Symbolizer&) = delete;
Symbolizer& operator=(const Symbolizer&) = delete;
@@ -25,17 +25,17 @@ class V8_EXPORT_PRIVATE Symbolizer {
int src_line;
};
- // Use the CodeMap to turn the raw addresses recorded in the sample into
- // code/function names.
+ // Use the InstructionStreamMap to turn the raw addresses recorded in the
+ // sample into code/function names.
SymbolizedSample SymbolizeTickSample(const TickSample& sample);
- CodeMap* code_map() { return code_map_; }
+ InstructionStreamMap* instruction_stream_map() { return code_map_; }
private:
CodeEntry* FindEntry(Address address,
Address* out_instruction_start = nullptr);
- CodeMap* const code_map_;
+ InstructionStreamMap* const code_map_;
};
} // namespace internal
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 5b27af707f..172b004ef5 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -301,11 +301,11 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
}
}
- i::SafeStackFrameIterator it(isolate, reinterpret_cast<i::Address>(regs->pc),
- reinterpret_cast<i::Address>(regs->fp),
- reinterpret_cast<i::Address>(regs->sp),
- reinterpret_cast<i::Address>(regs->lr),
- js_entry_sp);
+ i::StackFrameIteratorForProfiler it(
+ isolate, reinterpret_cast<i::Address>(regs->pc),
+ reinterpret_cast<i::Address>(regs->fp),
+ reinterpret_cast<i::Address>(regs->sp),
+ reinterpret_cast<i::Address>(regs->lr), js_entry_sp);
if (it.done()) return true;
@@ -327,8 +327,8 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
frames[i++] = reinterpret_cast<void*>(timer->counter());
timer = timer->parent();
}
-#endif // V8_RUNTIME_CALL_STATS
if (i == frames_limit) break;
+#endif // V8_RUNTIME_CALL_STATS
if (it.frame()->is_interpreted()) {
// For interpreted frames use the bytecode array pointer as the pc.
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index d18ae09fb1..6d341282e4 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -14,16 +14,31 @@ namespace internal {
TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
: isolate_(isolate), profiling_enabled_(false) {
+#if defined(V8_USE_PERFETTO)
+ TrackEvent::AddSessionObserver(this);
+ // Fire the observer if tracing is already in progress.
+ if (TrackEvent::IsEnabled()) OnStart({});
+#else
V8::GetCurrentPlatform()->GetTracingController()->AddTraceStateObserver(this);
+#endif
}
TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {
StopProfiling();
+#if defined(V8_USE_PERFETTO)
+ TrackEvent::RemoveSessionObserver(this);
+#else
V8::GetCurrentPlatform()->GetTracingController()->RemoveTraceStateObserver(
this);
+#endif
}
+#if defined(V8_USE_PERFETTO)
+void TracingCpuProfilerImpl::OnStart(
+ const perfetto::DataSourceBase::StartArgs&) {
+#else
void TracingCpuProfilerImpl::OnTraceEnabled() {
+#endif
bool enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"), &enabled);
@@ -36,7 +51,11 @@ void TracingCpuProfilerImpl::OnTraceEnabled() {
this);
}
+#if defined(V8_USE_PERFETTO)
+void TracingCpuProfilerImpl::OnStop(const perfetto::DataSourceBase::StopArgs&) {
+#else
void TracingCpuProfilerImpl::OnTraceDisabled() {
+#endif
base::MutexGuard lock(&mutex_);
if (!profiling_enabled_) return;
profiling_enabled_ = false;
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index 3adcaa63d7..0ccf7f0e5d 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -11,6 +11,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -19,16 +20,26 @@ class CpuProfiler;
class Isolate;
class TracingCpuProfilerImpl final
+#if defined(V8_USE_PERFETTO)
+ : public perfetto::TrackEventSessionObserver {
+#else
: private v8::TracingController::TraceStateObserver {
+#endif
public:
explicit TracingCpuProfilerImpl(Isolate*);
~TracingCpuProfilerImpl() override;
TracingCpuProfilerImpl(const TracingCpuProfilerImpl&) = delete;
TracingCpuProfilerImpl& operator=(const TracingCpuProfilerImpl&) = delete;
+#if defined(V8_USE_PERFETTO)
+ // perfetto::TrackEventSessionObserver
+ void OnStart(const perfetto::DataSourceBase::StartArgs&) override;
+ void OnStop(const perfetto::DataSourceBase::StopArgs&) override;
+#else
// v8::TracingController::TraceStateObserver
void OnTraceEnabled() final;
void OnTraceDisabled() final;
+#endif
private:
void StartProfiling();
diff --git a/deps/v8/src/profiler/weak-code-registry.h b/deps/v8/src/profiler/weak-code-registry.h
index 5e6cc1a079..8de4370817 100644
--- a/deps/v8/src/profiler/weak-code-registry.h
+++ b/deps/v8/src/profiler/weak-code-registry.h
@@ -35,8 +35,9 @@ class V8_EXPORT_PRIVATE WeakCodeRegistry {
private:
Isolate* const isolate_;
- // Invariant: Entries will always be removed here before the CodeMap is
- // destroyed. CodeEntries should not be freed while their heap objects exist.
+ // Invariant: Entries will always be removed here before the
+ // InstructionStreamMap is destroyed. CodeEntries should not be freed while
+ // their heap objects exist.
std::vector<CodeEntry*> entries_;
};
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 2658068b6f..89d3ea6252 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -21,7 +21,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - r5 : Pointer to current Code object including heap object tag.
+ * - r5 : Pointer to current InstructionStream object including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
@@ -51,17 +51,18 @@ namespace internal {
* - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
- * - fp[-12] start index (character index of start).
- * - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
+ * - fp[-4] frame marker
+ * - fp[-8] end of input (address of end of string).
+ * - fp[-12] start of input (address of first character in string).
+ * - fp[-16] start index (character index of start).
+ * - fp[-20] void* input_string (location of a handle containing the string).
+ * - fp[-24] success counter (only for global regexps to count matches).
+ * - fp[-28] Offset of location before start of input (effectively character
* string start - 1). Used to initialize capture registers to a
* non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
+ * - fp[-32] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
+ * - fp[-36] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -155,9 +156,9 @@ void RegExpMacroAssemblerARM::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ ldr(r0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ ldr(r0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ str(r0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ cmp(r0, Operand(backtrack_limit()));
__ b(ne, &next);
@@ -171,7 +172,8 @@ void RegExpMacroAssemblerARM::Backtrack() {
__ bind(&next);
}
- // Pop Code offset from backtrack stack, add Code and jump to location.
+ // Pop InstructionStream offset from backtrack stack, add InstructionStream
+ // and jump to location.
Pop(r0);
__ add(pc, r0, Operand(code_pointer()));
}
@@ -194,7 +196,7 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(base::uc16 limit,
}
void RegExpMacroAssemblerARM::CheckAtStart(int cp_offset, Label* on_at_start) {
- __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ cmp(r0, r1);
@@ -203,7 +205,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(int cp_offset, Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ cmp(r0, r1);
@@ -219,8 +221,8 @@ void RegExpMacroAssemblerARM::CheckCharacterLT(base::uc16 limit,
void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
__ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
__ cmp(current_input_offset(), r0);
- __ add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
+ __ add(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kSystemPointerSize), LeaveCC, eq);
BranchOrBacktrack(eq, on_equal);
}
@@ -238,7 +240,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
if (read_backward) {
- __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r3, r3, r1);
__ cmp(current_input_offset(), r3);
BranchOrBacktrack(le, on_no_match);
@@ -339,7 +341,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -374,7 +376,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(int start_reg,
// Check that there are enough characters left in the input.
if (read_backward) {
- __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r3, r3, r1);
__ cmp(current_input_offset(), r3);
BranchOrBacktrack(le, on_no_match);
@@ -489,8 +491,8 @@ void RegExpMacroAssemblerARM::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -669,7 +671,7 @@ void RegExpMacroAssemblerARM::PushRegExpBasePointer(Register stack_pointer,
__ mov(scratch, Operand(ref));
__ ldr(scratch, MemOperand(scratch));
__ sub(scratch, stack_pointer, scratch);
- __ str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerARM::PopRegExpBasePointer(Register stack_pointer_out,
@@ -677,7 +679,7 @@ void RegExpMacroAssemblerARM::PopRegExpBasePointer(Register stack_pointer_out,
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ ldr(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ mov(scratch, Operand(ref));
__ ldr(scratch, MemOperand(scratch));
__ add(stack_pointer_out, stack_pointer_out, scratch);
@@ -696,29 +698,42 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// is generated.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
+ // Emit code to start a new stack frame. In the following we push all
+ // callee-save registers (these end up above the fp) and all register
+ // arguments (in {r0,r1,r2,r3}, these end up below the fp).
RegList registers_to_retain = {r4, r5, r6, r7, r8, r9, r10, fp};
+ __ stm(db_w, sp, registers_to_retain | lr);
+ __ mov(frame_pointer(), sp);
+
+ // Registers {r0,r1,r2,r3} are the first four arguments as per the C calling
+ // convention, and must match our specified offsets (e.g. kInputEndOffset).
+ //
+ // r0: input_string
+ // r1: start_offset
+ // r2: input_start
+ // r3: input_end
RegList argument_registers = {r0, r1, r2, r3};
- __ stm(db_w, sp, argument_registers | registers_to_retain | lr);
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
-
- static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ // Also push the frame marker.
+ __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::IRREGEXP)));
+ static_assert(kFrameTypeOffset == kFramePointerOffset - kSystemPointerSize);
+ static_assert(kInputEndOffset == kFrameTypeOffset - kSystemPointerSize);
+ static_assert(kInputStartOffset == kInputEndOffset - kSystemPointerSize);
+ static_assert(kStartIndexOffset == kInputStartOffset - kSystemPointerSize);
+ static_assert(kInputStringOffset == kStartIndexOffset - kSystemPointerSize);
+ __ stm(db_w, sp, argument_registers | r4);
+
+ static_assert(kSuccessfulCapturesOffset ==
+ kInputStringOffset - kSystemPointerSize);
__ mov(r0, Operand::Zero());
__ push(r0); // Make room for success counter and initialize it to 0.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ push(r0); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ push(r0); // The backtrack counter.
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ push(r0); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here on.
@@ -743,7 +758,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ b(ls, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
+ __ cmp(r0, Operand(num_registers_ * kSystemPointerSize));
__ b(hs, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -760,21 +775,21 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
}
// Allocate space on stack for registers.
- __ AllocateStackSpace(num_registers_ * kPointerSize);
+ __ AllocateStackSpace(num_registers_ * kSystemPointerSize);
// Load string end.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEndOffset));
// Load input start.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
+ __ ldr(r0, MemOperand(frame_pointer(), kInputStartOffset));
// Find negative length (offset of start relative to end).
__ sub(current_input_offset(), r0, end_of_input_address());
// Set r0 to address of char before start of the input string
// (effectively string position -1).
- __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
+ __ ldr(r1, MemOperand(frame_pointer(), kStartIndexOffset));
__ sub(r0, current_input_offset(), Operand(char_size()));
__ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
// Store this value in a local variable, for use when clearing
// position registers.
- __ str(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ str(r0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -800,11 +815,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
+ __ add(r1, frame_pointer(), Operand(kRegisterZeroOffset));
__ mov(r2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+ __ str(r0, MemOperand(r1, kSystemPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop);
} else {
@@ -822,9 +837,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
- __ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
+ __ ldr(r1, MemOperand(frame_pointer(), kInputStartOffset));
+ __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutputOffset));
+ __ ldr(r2, MemOperand(frame_pointer(), kStartIndexOffset));
__ sub(r1, end_of_input_address(), r1);
// r1 is length of input in bytes.
if (mode_ == UC16) {
@@ -852,19 +867,19 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ add(r2, r1, Operand(r2));
__ add(r3, r1, Operand(r3));
}
- __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r0, kSystemPointerSize, PostIndex));
+ __ str(r3, MemOperand(r0, kSystemPointerSize, PostIndex));
}
}
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
+ __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Increment success counter.
__ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ str(r0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ sub(r1, r1, Operand(num_saved_registers_));
@@ -872,13 +887,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r1, Operand(num_saved_registers_));
__ b(lt, &return_r0);
- __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ str(r1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Advance the location for output.
- __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
- __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ add(r2, r2, Operand(num_saved_registers_ * kSystemPointerSize));
+ __ str(r2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -910,7 +925,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit and return r0
__ bind(&exit_label_);
if (global()) {
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
__ bind(&return_r0);
@@ -931,7 +946,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label exit_with_exception;
- // Preempt-code
+ // Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
@@ -946,7 +961,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ ldr(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
SafeReturn();
}
@@ -963,7 +979,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ cmp(r0, Operand::Zero());
@@ -1129,7 +1145,7 @@ void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ str(r0, register_location(reg));
}
@@ -1145,12 +1161,12 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ mov(r1, Operand(masm_->CodeObject()));
// We need to make room for the return address on the stack.
int stack_alignment = base::OS::ActivationFrameAlignment();
- DCHECK(IsAligned(stack_alignment, kPointerSize));
+ DCHECK(IsAligned(stack_alignment, kSystemPointerSize));
__ AllocateStackSpace(stack_alignment);
// r0 will point to the return address, placed by DirectCEntry.
@@ -1190,15 +1206,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<int>(re_frame, kStartIndex),
- static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ frame_entry<int>(re_frame, kStartIndexOffset),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
@@ -1208,7 +1225,7 @@ MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
@@ -1218,7 +1235,7 @@ void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
__ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
BranchOrBacktrack(ge, on_outside_input);
} else {
- __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
__ cmp(r0, r1);
BranchOrBacktrack(le, on_outside_input);
@@ -1265,17 +1282,32 @@ void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerARM::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
__ str(source,
- MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
+ MemOperand(backtrack_stackpointer(), kSystemPointerSize, NegPreIndex));
}
void RegExpMacroAssemblerARM::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ ldr(target,
- MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
+ MemOperand(backtrack_stackpointer(), kSystemPointerSize, PostIndex));
+}
+
+void RegExpMacroAssemblerARM::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
}
-
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index edf7650bd5..44be0d920b 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -92,42 +92,59 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
private:
// Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- static const int kStoredRegisters = kFramePointer;
+ static constexpr int kStoredRegistersOffset = kFramePointerOffset;
// Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
+ static constexpr int kReturnAddressOffset =
+ kStoredRegistersOffset + 8 * kSystemPointerSize;
// Stack parameters placed by caller.
- static const int kRegisterOutput = kReturnAddress + kPointerSize;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kDirectCall = kNumOutputRegisters + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
+ static constexpr int kRegisterOutputOffset =
+ kReturnAddressOffset + kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kRegisterOutputOffset + kSystemPointerSize;
+ static constexpr int kDirectCallOffset =
+ kNumOutputRegistersOffset + kSystemPointerSize;
+ static constexpr int kIsolateOffset = kDirectCallOffset + kSystemPointerSize;
+
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
// Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
+ static constexpr int kInputEndOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kInputStartOffset = kInputEndOffset - kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStartOffset - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kStartIndexOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kRegExpCodeSize = 1024;
+
+ static constexpr int kBacktrackConstantPoolSize = 4;
- static const int kBacktrackConstantPoolSize = 4;
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 2e462ba748..fe1b0f6e04 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -22,7 +22,7 @@ namespace internal {
* This assembler uses the following register assignment convention:
* - w19 : Used to temporarely store a value before a call to C code.
* See CheckNotBackReferenceIgnoreCase.
- * - x20 : Pointer to the current Code object,
+ * - x20 : Pointer to the current InstructionStream object,
* it includes the heap object tag.
* - w21 : Current position in input, as negative offset from
* the end of the string. Please notice that this is
@@ -64,14 +64,16 @@ namespace internal {
* - fp[8] lr Return from the RegExp code.
* - fp[0] fp Old frame pointer.
* ^^^^^^^^^ fp ^^^^^^^^^
- * - fp[-8] direct_call 1 => Direct call from JavaScript code.
+ * - fp[-8] frame marker
+ * - fp[-16] isolate
+ * - fp[-24] direct_call 1 => Direct call from JavaScript code.
* 0 => Call through the runtime system.
- * - fp[-16] output_size Output may fit multiple sets of matches.
- * - fp[-24] input Handle containing the input string.
- * - fp[-32] success_counter
+ * - fp[-32] output_size Output may fit multiple sets of matches.
+ * - fp[-40] input Handle containing the input string.
+ * - fp[-48] success_counter
* ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
- * - fp[-40] register N Capture registers initialized with
- * - fp[-44] register N + 1 non_position_value.
+ * - fp[-56] register N Capture registers initialized with
+ * - fp[-60] register N + 1 non_position_value.
* ... The first kNumCachedRegisters (N) registers
* ... are cached in x0 to x7.
* ... Only positions must be stored in the first
@@ -102,15 +104,13 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-const int RegExpMacroAssemblerARM64::kRegExpCodeSize;
-
RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
masm_(std::make_unique<MacroAssembler>(
isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ NewAssemblerBuffer(kInitialBufferSize))),
no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
@@ -194,9 +194,9 @@ void RegExpMacroAssemblerARM64::Backtrack() {
Label next;
UseScratchRegisterScope temps(masm_.get());
Register scratch = temps.AcquireW();
- __ Ldr(scratch, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Ldr(scratch, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ Add(scratch, scratch, 1);
- __ Str(scratch, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Str(scratch, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ Cmp(scratch, Operand(backtrack_limit()));
__ B(ne, &next);
@@ -437,7 +437,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -593,8 +593,8 @@ void RegExpMacroAssemblerARM64::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -768,7 +768,7 @@ void RegExpMacroAssemblerARM64::PushRegExpBasePointer(Register stack_pointer,
__ Mov(scratch, ref);
__ Ldr(scratch, MemOperand(scratch));
__ Sub(scratch, stack_pointer, scratch);
- __ Str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ Str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerARM64::PopRegExpBasePointer(Register stack_pointer_out,
@@ -776,7 +776,7 @@ void RegExpMacroAssemblerARM64::PopRegExpBasePointer(Register stack_pointer_out,
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ Ldr(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ Mov(scratch, ref);
__ Ldr(scratch, MemOperand(scratch));
__ Add(stack_pointer_out, stack_pointer_out, scratch);
@@ -807,18 +807,22 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// code is generated.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- // Push registers on the stack, only push the argument registers that we need.
- CPURegList argument_registers(x0, x5, x6, x7);
-
- CPURegList registers_to_retain = kCalleeSaved;
+ // Stack frame setup.
+ // Push callee-saved registers.
+ const CPURegList registers_to_retain = kCalleeSaved;
DCHECK_EQ(registers_to_retain.Count(), kNumCalleeSavedRegisters);
-
__ PushCPURegList(registers_to_retain);
- __ Push<TurboAssembler::kSignLR>(lr, fp);
- __ PushCPURegList(argument_registers);
-
- // Set frame pointer in place.
- __ Add(frame_pointer(), sp, argument_registers.Count() * kSystemPointerSize);
+ static_assert(kFrameTypeOffset == kFramePointerOffset - kSystemPointerSize);
+ __ EnterFrame(StackFrame::IRREGEXP);
+ // Only push the argument registers that we need.
+ static_assert(kIsolateOffset ==
+ kFrameTypeOffset - kPaddingAfterFrameType - kSystemPointerSize);
+ static_assert(kDirectCallOffset == kIsolateOffset - kSystemPointerSize);
+ static_assert(kNumOutputRegistersOffset ==
+ kDirectCallOffset - kSystemPointerSize);
+ static_assert(kInputStringOffset ==
+ kNumOutputRegistersOffset - kSystemPointerSize);
+ __ PushCPURegList(CPURegList{x0, x5, x6, x7});
// Initialize callee-saved registers.
__ Mov(start_offset(), w1);
@@ -887,9 +891,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Allocate space on stack.
__ Claim(num_wreg_to_allocate, kWRegSize);
- // Initialize success_counter and kBacktrackCount with 0.
- __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
- __ Str(wzr, MemOperand(frame_pointer(), kBacktrackCount));
+ // Initialize success_counter and kBacktrackCountOffset with 0.
+ __ Str(wzr, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ Str(wzr, MemOperand(frame_pointer(), kBacktrackCountOffset));
// Find negative length (offset of start relative to end).
__ Sub(x10, input_start(), input_end());
@@ -1004,7 +1008,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// There are always an even number of capture registers. A couple of
// registers determine one match with two offsets.
DCHECK_EQ(0, num_registers_left_on_stack % 2);
- __ Add(base, frame_pointer(), kFirstCaptureOnStack);
+ __ Add(base, frame_pointer(), kFirstCaptureOnStackOffset);
// We can unroll the loop here, we should not unroll for less than 2
// registers.
@@ -1068,13 +1072,16 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Ldr(success_counter,
+ MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
__ Add(success_counter, success_counter, 1);
- __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Str(success_counter,
+ MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
- __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Ldr(output_size,
+ MemOperand(frame_pointer(), kNumOutputRegistersOffset));
__ Sub(output_size, output_size, num_saved_registers_);
// Check whether we have enough room for another set of capture results.
__ Cmp(output_size, num_saved_registers_);
@@ -1083,7 +1090,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// The output pointer is already set to the next field in the output
// array.
// Update output size on the frame before we restart matching.
- __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Str(output_size,
+ MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -1114,7 +1122,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Exit and return w0.
__ Bind(&exit_label_);
if (global()) {
- __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Ldr(w0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
}
@@ -1123,13 +1131,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// stored base pointer).
PopRegExpBasePointer(backtrack_stackpointer(), x11);
- // Set stack pointer back to first register to retain.
- __ Mov(sp, fp);
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
-
- // Restore registers.
+ __ LeaveFrame(StackFrame::IRREGEXP);
__ PopCPURegList(registers_to_retain);
-
__ Ret();
Label exit_with_exception;
@@ -1163,7 +1166,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Call GrowStack(isolate).
static constexpr int kNumArguments = 1;
__ Mov(x0, ExternalReference::isolate_address(isolate()));
- __ CallCFunction(ExternalReference::re_grow_stack(), kNumArguments);
+ CallCFunctionFromIrregexpCode(ExternalReference::re_grow_stack(),
+ kNumArguments);
// If return nullptr, we have failed to grow the stack, and must exit with
// a stack-overflow exception. Returning from the regexp code restores the
// stack (sp <- fp) so we don't need to drop the link register from it
@@ -1244,7 +1248,7 @@ void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
- __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
+ __ Mov(w10, target + InstructionStream::kHeaderSize - kHeapObjectTag);
} else {
__ Adr(x10, label, MacroAssembler::kAdrFar);
__ Sub(x10, x10, code_pointer());
@@ -1391,8 +1395,8 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
// We should not unroll the loop for less than 2 registers.
static_assert(kNumRegistersToUnroll > 2);
// We position the base pointer to (reg_from + 1).
- int base_offset = kFirstRegisterOnStack -
- kWRegSize - (kWRegSize * reg_from);
+ int base_offset =
+ kFirstRegisterOnStackOffset - kWRegSize - (kWRegSize * reg_from);
if (num_registers > kNumRegistersToUnroll) {
Register base = x10;
__ Add(base, frame_pointer(), base_offset);
@@ -1429,12 +1433,14 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerARM64::CheckStackGuardState(
Address* return_address, Address raw_code, Address re_frame,
int start_index, const byte** input_start, const byte** input_end) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate), start_index,
- static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
- return_address, re_code, frame_entry_address<Address>(re_frame, kInput),
- input_start, input_end);
+ frame_entry<Isolate*>(re_frame, kIsolateOffset), start_index,
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int>(re_frame, kDirectCallOffset)),
+ return_address, re_code,
+ frame_entry_address<Address>(re_frame, kInputStringOffset), input_start,
+ input_end);
}
@@ -1477,7 +1483,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Mov(w3, start_offset());
// RegExp code frame pointer.
__ Mov(x2, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ Mov(x1, Operand(masm_->CodeObject()));
// We need to pass a pointer to the return address as first argument.
@@ -1498,7 +1504,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Drop(xreg_to_claim);
- // Reload the Code pointer.
+ // Reload the InstructionStream pointer.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1537,6 +1543,21 @@ void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
}
}
+void RegExpMacroAssemblerARM64::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
void RegExpMacroAssemblerARM64::CheckPreemption() {
// Check for preemption.
@@ -1655,14 +1676,14 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
// TODO(v8:10026): Remove when we stop compacting for code objects that are
// active on the call stack.
- __ Pop<TurboAssembler::kAuthLR>(padreg, lr);
+ __ Pop<MacroAssembler::kAuthLR>(padreg, lr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
__ Sub(lr, lr, Operand(masm_->CodeObject()));
- __ Push<TurboAssembler::kSignLR>(lr, padreg);
+ __ Push<MacroAssembler::kSignLR>(lr, padreg);
}
@@ -1673,7 +1694,7 @@ MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
num_registers_ = register_index + 1;
}
register_index -= kNumCachedRegisters;
- int offset = kFirstRegisterOnStack - register_index * kWRegSize;
+ int offset = kFirstRegisterOnStackOffset - register_index * kWRegSize;
return MemOperand(frame_pointer(), offset);
}
@@ -1684,7 +1705,7 @@ MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
DCHECK_LE(kNumCachedRegisters, register_index);
DCHECK_EQ(register_index % 2, 0);
register_index -= kNumCachedRegisters;
- int offset = kFirstCaptureOnStack - register_index * kWRegSize;
+ int offset = kFirstCaptureOnStackOffset - register_index * kWRegSize;
// capture_location is used with Stp instructions to load/store 2 registers.
// The immediate field in the encoding is limited to 7 bits (signed).
if (is_int7(offset)) {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 3801591b64..a5164472b7 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -98,41 +98,58 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
const byte** input_end);
private:
+ static constexpr int kFramePointerOffset = 0;
+
// Above the frame pointer - Stored registers and stack passed parameters.
- static const int kFramePointer = 0;
- static const int kReturnAddress = kFramePointer + kSystemPointerSize;
+ static constexpr int kReturnAddressOffset =
+ kFramePointerOffset + kSystemPointerSize;
// Callee-saved registers (x19-x28).
- static const int kNumCalleeSavedRegisters = 10;
- static const int kCalleeSavedRegisters = kReturnAddress + kSystemPointerSize;
-
- // Below the frame pointer.
+ static constexpr int kNumCalleeSavedRegisters = 10;
+ static constexpr int kCalleeSavedRegistersOffset =
+ kReturnAddressOffset + kSystemPointerSize;
+
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
+ static constexpr int kPaddingAfterFrameType = kSystemPointerSize;
// Register parameters stored by setup code.
- static const int kIsolate = -kSystemPointerSize;
- static const int kDirectCall = kIsolate - kSystemPointerSize;
- static const int kOutputSize = kDirectCall - kSystemPointerSize;
- static const int kInput = kOutputSize - kSystemPointerSize;
+ static constexpr int kIsolateOffset =
+ kFrameTypeOffset - kPaddingAfterFrameType - kSystemPointerSize;
+ static constexpr int kDirectCallOffset = kIsolateOffset - kSystemPointerSize;
+ // For the case of global regular expression, we have room to store at least
+ // one set of capture results. For the case of non-global regexp, we ignore
+ // this value.
+ static constexpr int kNumOutputRegistersOffset =
+ kDirectCallOffset - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessCounter = kInput - kSystemPointerSize;
- static const int kBacktrackCount = kSuccessCounter - kSystemPointerSize;
+ static constexpr int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// A padding slot to preserve alignment.
- static const int kStackLocalPadding =
- kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kStackLocalPadding =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
static constexpr int kNumberOfStackLocals = 4;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
- static const int kFirstRegisterOnStack = kStackLocalPadding - kWRegSize;
+ static constexpr int kFirstRegisterOnStackOffset =
+ kStackLocalPadding - kWRegSize;
// A capture is a 64 bit value holding two position.
- static const int kFirstCaptureOnStack = kStackLocalPadding - kXRegSize;
+ static constexpr int kFirstCaptureOnStackOffset =
+ kStackLocalPadding - kXRegSize;
- // Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kInitialBufferSize = 1024;
// Registers x0 to x7 are used to store the first captures, they need to be
// retained over calls to C++ code.
@@ -141,12 +158,15 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// When initializing registers to a non-position value we can unroll
// the loop. Set the limit of registers to unroll.
- static const int kNumRegistersToUnroll = 16;
+ static constexpr int kNumRegistersToUnroll = 16;
// We are using x0 to x7 as a register cache. Each hardware register must
// contain one capture, that is two 32 bit registers. We can cache at most
// 16 registers.
- static const int kNumCachedRegisters = 16;
+ static constexpr int kNumCachedRegisters = 16;
+
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index ce6e0c75ca..566ae06829 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -69,6 +69,11 @@ class CanBeHandledVisitor final : private RegExpVisitor {
return nullptr;
}
+ void* VisitClassSetOperand(RegExpClassSetOperand* node, void*) override {
+ result_ = !node->has_strings();
+ return nullptr;
+ }
+
void* VisitClassSetExpression(RegExpClassSetExpression* node,
void*) override {
result_ = false;
@@ -391,11 +396,10 @@ class CompileVisitor : private RegExpVisitor {
return nullptr;
}
- void* VisitClassRanges(RegExpClassRanges* node, void*) override {
+ void CompileCharacterRanges(ZoneList<CharacterRange>* ranges, bool negated) {
// A character class is compiled as Disjunction over its `CharacterRange`s.
- ZoneList<CharacterRange>* ranges = node->ranges(zone_);
CharacterRange::Canonicalize(ranges);
- if (node->is_negated()) {
+ if (negated) {
// The complement of a disjoint, non-adjacent (i.e. `Canonicalize`d)
// union of k intervals is a union of at most k + 1 intervals.
ZoneList<CharacterRange>* negated =
@@ -422,6 +426,17 @@ class CompileVisitor : private RegExpVisitor {
assembler_.ConsumeRange(from_uc16, to_uc16);
});
+ }
+
+ void* VisitClassRanges(RegExpClassRanges* node, void*) override {
+ CompileCharacterRanges(node->ranges(zone_), node->is_negated());
+ return nullptr;
+ }
+
+ void* VisitClassSetOperand(RegExpClassSetOperand* node, void*) override {
+ // TODO(v8:11935): Support strings.
+ DCHECK(!node->has_strings());
+ CompileCharacterRanges(node->ranges(), false);
return nullptr;
}
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index a09267d3b9..8bbf32265e 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -38,7 +38,7 @@ bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
DCHECK(v8_flags.enable_experimental_regexp_engine);
DCHECK_EQ(re->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
- re->JSRegExpVerify(isolate);
+ if (v8_flags.verify_heap) re->JSRegExpVerify(isolate);
#endif
static constexpr bool kIsLatin1 = true;
@@ -74,14 +74,14 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
RegExpCompileData parse_result;
DCHECK(!isolate->has_pending_exception());
+ RegExpFlags flags = JSRegExp::AsRegExpFlags(regexp->flags());
bool parse_success = RegExpParser::ParseRegExpFromHeapString(
- isolate, &zone, source, JSRegExp::AsRegExpFlags(regexp->flags()),
- &parse_result);
+ isolate, &zone, source, flags, &parse_result);
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
// the only way parsing can fail now is because of stack overflow.
DCHECK_EQ(parse_result.error, RegExpError::kStackOverflow);
- USE(RegExp::ThrowRegExpException(isolate, regexp, source,
+ USE(RegExp::ThrowRegExpException(isolate, regexp, flags, source,
parse_result.error));
return base::nullopt;
}
@@ -102,7 +102,7 @@ bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
DCHECK(v8_flags.enable_experimental_regexp_engine);
DCHECK_EQ(re->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
- re->JSRegExpVerify(isolate);
+ if (v8_flags.verify_heap) re->JSRegExpVerify(isolate);
#endif
Handle<String> source(re->source(), isolate);
@@ -210,7 +210,7 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
DCHECK(v8_flags.enable_experimental_regexp_engine);
DCHECK_EQ(regexp->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
- regexp->JSRegExpVerify(isolate);
+ if (v8_flags.verify_heap) regexp->JSRegExpVerify(isolate);
#endif
if (!IsCompiled(regexp, isolate) && !Compile(isolate, regexp)) {
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 6002345420..33f79d3050 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -49,6 +49,7 @@ namespace internal {
* --- frame alignment (if applicable) ---
* - return address
* ebp-> - old ebp
+ * - frame marker
* - backup of caller esi
* - backup of caller edi
* - backup of caller ebx
@@ -140,8 +141,8 @@ void RegExpMacroAssemblerIA32::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ inc(Operand(ebp, kBacktrackCount));
- __ cmp(Operand(ebp, kBacktrackCount), Immediate(backtrack_limit()));
+ __ inc(Operand(ebp, kBacktrackCountOffset));
+ __ cmp(Operand(ebp, kBacktrackCountOffset), Immediate(backtrack_limit()));
__ j(not_equal, &next);
// Backtrack limit exceeded.
@@ -154,7 +155,8 @@ void RegExpMacroAssemblerIA32::Backtrack() {
__ bind(&next);
}
- // Pop Code offset from backtrack stack, add Code and jump to location.
+ // Pop InstructionStream offset from backtrack stack, add InstructionStream
+ // and jump to location.
Pop(ebx);
__ add(ebx, Immediate(masm_->CodeObject()));
__ jmp(ebx);
@@ -179,14 +181,14 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(base::uc16 limit,
void RegExpMacroAssemblerIA32::CheckAtStart(int cp_offset, Label* on_at_start) {
__ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
- __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOneOffset));
BranchOrBacktrack(equal, on_at_start);
}
void RegExpMacroAssemblerIA32::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
__ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
- __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOneOffset));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -205,6 +207,22 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
__ bind(&fallthrough);
}
+void RegExpMacroAssemblerIA32::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
+
void RegExpMacroAssemblerIA32::PushCallerSavedRegisters() {
static_assert(backtrack_stackpointer() == ecx);
static_assert(current_character() == edx);
@@ -231,7 +249,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Check that there are sufficient characters left in the input.
if (read_backward) {
- __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOneOffset));
__ add(eax, ebx);
__ cmp(edi, eax);
BranchOrBacktrack(less_equal, on_no_match);
@@ -351,7 +369,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(compare, argument_count);
+ CallCFunctionFromIrregexpCode(compare, argument_count);
}
// Pop original values before reacting on result value.
__ pop(ebx);
@@ -391,7 +409,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(int start_reg,
// Check that there are sufficient characters left in the input.
if (read_backward) {
- __ mov(ebx, Operand(ebp, kStringStartMinusOne));
+ __ mov(ebx, Operand(ebp, kStringStartMinusOneOffset));
__ add(ebx, eax);
__ cmp(edi, ebx);
BranchOrBacktrack(less_equal, on_no_match);
@@ -529,8 +547,8 @@ void RegExpMacroAssemblerIA32::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
PopCallerSavedRegisters();
@@ -715,14 +733,14 @@ void RegExpMacroAssemblerIA32::PushRegExpBasePointer(Register stack_pointer,
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ mov(scratch, __ ExternalReferenceAsOperand(ref, scratch));
__ sub(scratch, stack_pointer);
- __ mov(Operand(ebp, kRegExpStackBasePointer), scratch);
+ __ mov(Operand(ebp, kRegExpStackBasePointerOffset), scratch);
}
void RegExpMacroAssemblerIA32::PopRegExpBasePointer(Register stack_pointer_out,
Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ mov(scratch, Operand(ebp, kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ebp, kRegExpStackBasePointerOffset));
__ mov(stack_pointer_out,
__ ExternalReferenceAsOperand(ref, stack_pointer_out));
__ sub(stack_pointer_out, scratch);
@@ -741,26 +759,29 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// code is generated.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- // Actually emit code to start a new stack frame.
- __ push(ebp);
- __ mov(ebp, esp);
+ // Actually emit code to start a new stack frame. This pushes the frame type
+ // marker into the stack slot at kFrameTypeOffset.
+ static_assert(kFrameTypeOffset == -1 * kSystemPointerSize);
+ __ EnterFrame(StackFrame::IRREGEXP);
+
// Save callee-save registers. Order here should correspond to order of
- // kBackup_ebx etc.
+ // kBackupEbxOffset etc.
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
- static_assert(kLastCalleeSaveRegister == kBackup_ebx);
+ static_assert(kLastCalleeSaveRegisterOffset == kBackupEbxOffset);
- static_assert(kSuccessfulCaptures ==
- kLastCalleeSaveRegister - kSystemPointerSize);
+ static_assert(kSuccessfulCapturesOffset ==
+ kLastCalleeSaveRegisterOffset - kSystemPointerSize);
__ push(Immediate(0)); // Number of successful matches in a global regexp.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ push(Immediate(0)); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ push(Immediate(0)); // The backtrack counter.
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ push(Immediate(0)); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here on.
@@ -803,14 +824,14 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
// Load start index for later use.
- __ mov(ebx, Operand(ebp, kStartIndex));
+ __ mov(ebx, Operand(ebp, kStartIndexOffset));
// Allocate space on stack for registers.
__ AllocateStackSpace(num_registers_ * kSystemPointerSize);
// Load string length.
- __ mov(esi, Operand(ebp, kInputEnd));
+ __ mov(esi, Operand(ebp, kInputEndOffset));
// Load input position.
- __ mov(edi, Operand(ebp, kInputStart));
+ __ mov(edi, Operand(ebp, kInputStartOffset));
// Set up edi to be negative offset from string end.
__ sub(edi, esi);
@@ -824,14 +845,14 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ mov(Operand(ebp, kStringStartMinusOne), eax);
+ __ mov(Operand(ebp, kStringStartMinusOneOffset), eax);
Label load_char_start_regexp;
{
Label start_regexp;
// Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ cmp(Operand(ebp, kStartIndexOffset), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
__ mov(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
@@ -851,12 +872,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (num_saved_registers_ > 8) {
DCHECK_EQ(ecx, backtrack_stackpointer());
__ push(ecx);
- __ mov(ecx, kRegisterZero);
+ __ mov(ecx, kRegisterZeroOffset);
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, 0), eax);
__ sub(ecx, Immediate(kSystemPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kSystemPointerSize);
+ __ cmp(ecx,
+ kRegisterZeroOffset - num_saved_registers_ * kSystemPointerSize);
__ j(greater, &init_loop);
__ pop(ecx);
} else { // Unroll the loop.
@@ -874,10 +896,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ mov(ebx, Operand(ebp, kRegisterOutput));
- __ mov(ecx, Operand(ebp, kInputEnd));
- __ mov(edx, Operand(ebp, kStartIndex));
- __ sub(ecx, Operand(ebp, kInputStart));
+ __ mov(ebx, Operand(ebp, kRegisterOutputOffset));
+ __ mov(ecx, Operand(ebp, kInputEndOffset));
+ __ mov(edx, Operand(ebp, kStartIndexOffset));
+ __ sub(ecx, Operand(ebp, kInputStartOffset));
if (mode_ == UC16) {
__ lea(ecx, Operand(ecx, edx, times_2, 0));
} else {
@@ -901,22 +923,22 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ inc(Operand(ebp, kSuccessfulCaptures));
+ __ inc(Operand(ebp, kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
- __ mov(ecx, Operand(ebp, kNumOutputRegisters));
+ __ mov(ecx, Operand(ebp, kNumOutputRegistersOffset));
__ sub(ecx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(ecx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ mov(Operand(ebp, kNumOutputRegisters), ecx);
+ __ mov(Operand(ebp, kNumOutputRegistersOffset), ecx);
// Advance the location for output.
- __ add(Operand(ebp, kRegisterOutput),
+ __ add(Operand(ebp, kRegisterOutputOffset),
Immediate(num_saved_registers_ * kSystemPointerSize));
// Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -950,7 +972,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ mov(eax, Operand(ebp, kSuccessfulCaptures));
+ __ mov(eax, Operand(ebp, kSuccessfulCapturesOffset));
}
__ bind(&return_eax);
@@ -959,13 +981,17 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
PopRegExpBasePointer(backtrack_stackpointer(), ebx);
// Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kLastCalleeSaveRegister));
+ __ lea(esp, Operand(ebp, kLastCalleeSaveRegisterOffset));
// Restore callee-save registers.
+ static_assert(kNumCalleeSaveRegisters == 3);
+ static_assert(kBackupEsiOffset == -2 * kSystemPointerSize);
+ static_assert(kBackupEdiOffset == -3 * kSystemPointerSize);
+ static_assert(kBackupEbxOffset == -4 * kSystemPointerSize);
__ pop(ebx);
__ pop(edi);
__ pop(esi);
- // Exit function frame, restore previous one.
- __ pop(ebp);
+
+ __ LeaveFrame(StackFrame::IRREGEXP);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -995,7 +1021,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ mov(esi, Operand(ebp, kInputEnd));
+ __ mov(esi, Operand(ebp, kInputEndOffset));
SafeReturn();
}
@@ -1015,7 +1041,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments, ebx);
__ mov(Operand(esp, 0 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(ExternalReference::re_grow_stack(), kNumArguments);
+ CallCFunctionFromIrregexpCode(ExternalReference::re_grow_stack(),
+ kNumArguments);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, eax);
@@ -1173,7 +1200,7 @@ void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ mov(register_location(reg), eax);
}
@@ -1186,14 +1213,14 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(Operand(esp, 2 * kSystemPointerSize), ebp);
- // Code of self.
+ // InstructionStream of self.
__ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(masm_->CodeObject()));
// Next address on the stack (will be address of return address).
__ lea(eax, Operand(esp, -kSystemPointerSize));
__ mov(Operand(esp, 0 * kSystemPointerSize), eax);
ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state();
- __ CallCFunction(check_stack_guard, num_arguments);
+ CallCFunctionFromIrregexpCode(check_stack_guard, num_arguments);
}
Operand RegExpMacroAssemblerIA32::StaticVariable(const ExternalReference& ext) {
@@ -1215,15 +1242,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<int>(re_frame, kStartIndex),
- static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ frame_entry<int>(re_frame, kStartIndexOffset),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
@@ -1232,7 +1260,8 @@ Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
- return Operand(ebp, kRegisterZero - register_index * kSystemPointerSize);
+ return Operand(ebp,
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
@@ -1243,7 +1272,7 @@ void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
BranchOrBacktrack(greater_equal, on_outside_input);
} else {
__ lea(eax, Operand(edi, cp_offset * char_size()));
- __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOneOffset));
BranchOrBacktrack(less_equal, on_outside_input);
}
}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 96d3163429..649c61d880 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -94,45 +94,61 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
private:
Operand StaticVariable(const ExternalReference& ext);
// Offsets from ebp of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kSystemPointerSize;
- static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
+ static constexpr int kReturnAddressOffset =
+ kFramePointerOffset + kSystemPointerSize;
+ static constexpr int kFrameAlign = kReturnAddressOffset + kSystemPointerSize;
// Parameters.
- static const int kInputString = kFrameAlign;
- static const int kStartIndex = kInputString + kSystemPointerSize;
- static const int kInputStart = kStartIndex + kSystemPointerSize;
- static const int kInputEnd = kInputStart + kSystemPointerSize;
- static const int kRegisterOutput = kInputEnd + kSystemPointerSize;
+ static constexpr int kInputStringOffset = kFrameAlign;
+ static constexpr int kStartIndexOffset =
+ kInputStringOffset + kSystemPointerSize;
+ static constexpr int kInputStartOffset =
+ kStartIndexOffset + kSystemPointerSize;
+ static constexpr int kInputEndOffset = kInputStartOffset + kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kInputEndOffset + kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
- static const int kDirectCall = kNumOutputRegisters + kSystemPointerSize;
- static const int kIsolate = kDirectCall + kSystemPointerSize;
- // Below the frame pointer - local stack variables.
+ static constexpr int kNumOutputRegistersOffset =
+ kRegisterOutputOffset + kSystemPointerSize;
+ static constexpr int kDirectCallOffset =
+ kNumOutputRegistersOffset + kSystemPointerSize;
+ static constexpr int kIsolateOffset = kDirectCallOffset + kSystemPointerSize;
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kBackup_esi = kFramePointer - kSystemPointerSize;
- static const int kBackup_edi = kBackup_esi - kSystemPointerSize;
- static const int kBackup_ebx = kBackup_edi - kSystemPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_ebx;
-
- static const int kSuccessfulCaptures =
- kLastCalleeSaveRegister - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kBackupEsiOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kBackupEdiOffset = kBackupEsiOffset - kSystemPointerSize;
+ static constexpr int kBackupEbxOffset = kBackupEdiOffset - kSystemPointerSize;
+ static constexpr int kNumCalleeSaveRegisters = 3;
+ static constexpr int kLastCalleeSaveRegisterOffset = kBackupEbxOffset;
+
+ static constexpr int kSuccessfulCapturesOffset =
+ kLastCalleeSaveRegisterOffset - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kRegExpCodeSize = 1024;
+
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
void PushCallerSavedRegisters();
void PopCallerSavedRegisters();
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
index 35fd95bd0f..472d513b97 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -20,7 +20,7 @@ namespace internal {
*
* This assembler uses the following register assignment convention
* - s0 : Unused.
- * - s1 : Pointer to current Code object including heap object tag.
+ * - s1 : Pointer to current InstructionStream object including heap object tag.
* - s2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - s5 : Currently loaded character. Must be loaded using
@@ -36,26 +36,27 @@ namespace internal {
*
* The stack will have the following structure:
*
- * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
- * kStackFrameHeader
+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolateOffset
+ * kStackFrameHeaderOffset
* --- sp when called ---
- * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddressOffset
* - fp[64] old-fp Old fp, callee saved.
* - fp[0..63] s0..s7 Callee-saved registers s0..s7.
* --- frame pointer ----
- * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-24] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
- * - fp[-32] end of input (address of end of string). kInputEnd
- * - fp[-40] start of input (address of first character in string). kInputStart
- * - fp[-48] start index (character index of start). kStartIndex
- * - fp[-56] void* input_string (location of a handle containing the string). kInputString
- * - fp[-64] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-72] Offset of location before start of input (effectively character kStringStartMinusOne
+ * - fp[-8] frame marker
+ * - fp[-16] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCallOffset
+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegistersOffset
+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutputOffset
+ * - fp[-40] end of input (address of end of string). kInputEndOffset
+ * - fp[-48] start of input (address of first character in string). kInputStartOffset
+ * - fp[-56] start index (character index of start). kStartIndexOffset
+ * - fp[-64] void* input_string (location of a handle containing the string). kInputStringOffset
+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCapturesOffset
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOneOffsetOffset
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
- * - fp[-80] register 0 (Only positions must be stored in the first kRegisterZero
+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZeroOffset
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -84,15 +85,13 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-const int RegExpMacroAssemblerLOONG64::kRegExpCodeSize;
-
RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
masm_(std::make_unique<MacroAssembler>(
isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ NewAssemblerBuffer(kInitialBufferSize))),
no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
@@ -151,9 +150,9 @@ void RegExpMacroAssemblerLOONG64::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ Add_d(a0, a0, Operand(1));
- __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ Branch(&next, ne, a0, Operand(backtrack_limit()));
// Backtrack limit exceeded.
@@ -185,7 +184,7 @@ void RegExpMacroAssemblerLOONG64::CheckCharacterGT(base::uc16 limit,
void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset,
Label* on_at_start) {
- __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Add_d(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
@@ -193,7 +192,7 @@ void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset,
void RegExpMacroAssemblerLOONG64::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Add_d(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
@@ -227,7 +226,7 @@ void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Add_d(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -329,7 +328,7 @@ void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -361,7 +360,7 @@ void RegExpMacroAssemblerLOONG64::CheckNotBackReference(int start_reg,
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Add_d(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -461,8 +460,8 @@ void RegExpMacroAssemblerLOONG64::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ li(code_pointer(), Operand(masm_->CodeObject()));
@@ -626,7 +625,7 @@ void RegExpMacroAssemblerLOONG64::PushRegExpBasePointer(Register stack_pointer,
__ li(scratch, ref);
__ Ld_d(scratch, MemOperand(scratch, 0));
__ Sub_d(scratch, stack_pointer, scratch);
- __ St_d(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ St_d(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerLOONG64::PopRegExpBasePointer(
@@ -634,7 +633,7 @@ void RegExpMacroAssemblerLOONG64::PopRegExpBasePointer(
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ Ld_d(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ li(scratch, ref);
__ Ld_d(scratch, MemOperand(scratch, 0));
__ Add_d(stack_pointer_out, stack_pointer_out, scratch);
@@ -659,34 +658,48 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// no is generated.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
+ // Emit code to start a new stack frame. In the following we push all
+ // callee-save registers (these end up above the fp) and all register
+ // arguments (in {a0,a1,a2,a3}, these end up below the fp).
// TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
// or dont save.
RegList registers_to_retain = {s0, s1, s2, s3, s4, s5, s6, s7};
- RegList argument_registers = {a0, a1, a2, a3};
+ __ MultiPush({ra}, {fp}, registers_to_retain);
+ __ mov(frame_pointer(), sp);
+
+ // Registers {a0,a1,a2,a3} are the first four arguments as per the C calling
+ // convention, and must match our specified offsets (e.g. kInputEndOffset).
+ //
+ // a0: input_string
+ // a1: start_offset
+ // a2: input_start
+ // a3: input_end
+ RegList argument_registers = {a0, a1, a2, a3};
argument_registers |= {a4, a5, a6, a7};
- __ MultiPush({ra}, {fp}, argument_registers | registers_to_retain);
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- // TODO(plind): this 8 is the # of argument regs, should have definition.
- __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize));
- static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ // Also push the frame marker.
+ __ li(kScratchReg, Operand(StackFrame::TypeToMarker(StackFrame::IRREGEXP)));
+ static_assert(kFrameTypeOffset == kFramePointerOffset - kSystemPointerSize);
+ static_assert(kInputEndOffset ==
+ kRegisterOutputOffset - kSystemPointerSize);
+ static_assert(kInputStartOffset == kInputEndOffset - kSystemPointerSize);
+ static_assert(kStartIndexOffset == kInputStartOffset - kSystemPointerSize);
+ static_assert(kInputStringOffset == kStartIndexOffset - kSystemPointerSize);
+ __ MultiPush(argument_registers | kScratchReg);
+
+ static_assert(kSuccessfulCapturesOffset ==
+ kInputStringOffset - kSystemPointerSize);
__ mov(a0, zero_reg);
__ Push(a0); // Make room for success counter and initialize it to 0.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ Push(a0); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ Push(a0); // The backtrack counter
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ Push(a0); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here
@@ -711,7 +724,8 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
__ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
// Check if there is room for the variable number of registers above
// the stack limit.
- __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ __ Branch(&stack_ok, hs, a0,
+ Operand(num_registers_ * kSystemPointerSize));
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ li(a0, Operand(EXCEPTION));
@@ -727,22 +741,23 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
}
// Allocate space on stack for registers.
- __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize));
+ __ Sub_d(sp, sp, Operand(num_registers_ * kSystemPointerSize));
// Load string end.
- __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld_d(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
// Load input start.
- __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStartOffset));
// Find negative length (offset of start relative to end).
__ Sub_d(current_input_offset(), a0, end_of_input_address());
// Set a0 to address of char before start of the input string
// (effectively string position -1).
- __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndexOffset));
__ Sub_d(a0, current_input_offset(), Operand(char_size()));
__ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0);
__ Sub_d(a0, a0, t1);
// Store this value in a local variable, for use when clearing
// position registers.
- __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -767,12 +782,12 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1.
if (num_saved_registers_ > 8) {
// Address of register 0.
- __ Add_d(a1, frame_pointer(), Operand(kRegisterZero));
+ __ Add_d(a1, frame_pointer(), Operand(kRegisterZeroOffset));
__ li(a2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ St_d(a0, MemOperand(a1, 0));
- __ Add_d(a1, a1, Operand(-kPointerSize));
+ __ Add_d(a1, a1, Operand(-kSystemPointerSize));
__ Sub_d(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg));
} else {
@@ -790,9 +805,9 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// Copy captures to output.
- __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart));
- __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput));
- __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStartOffset));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutputOffset));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndexOffset));
__ Sub_d(a1, end_of_input_address(), a1);
// a1 is length of input in bytes.
if (mode_ == UC16) {
@@ -832,12 +847,12 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Increment success counter.
__ Add_d(a0, a0, 1);
- __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ Sub_d(a1, a1, num_saved_registers_);
@@ -845,13 +860,13 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
//__ mov(v0, a0);
__ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
- __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Advance the location for output.
__ Add_d(a2, a2, num_saved_registers_ * kIntSize);
- __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Prepare a0 to initialize registers with its value in the next run.
- __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -882,7 +897,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Exit and return v0.
__ bind(&exit_label_);
if (global()) {
- __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
__ bind(&return_v0);
@@ -918,7 +933,8 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
- __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld_d(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
SafeReturn();
}
@@ -934,7 +950,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments, a0);
__ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
@@ -1012,13 +1028,14 @@ void RegExpMacroAssemblerLOONG64::PopRegister(int register_index) {
void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
- __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ __ li(a0,
+ Operand(target + InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
- int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ int cp_offset = offset + InstructionStream::kHeaderSize - kHeapObjectTag;
//__ emit(0);
__ nop();
masm_->label_at_put(label, offset);
@@ -1102,7 +1119,7 @@ void RegExpMacroAssemblerLOONG64::WriteCurrentPositionToRegister(
void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ St_d(a0, register_location(reg));
}
@@ -1118,17 +1135,17 @@ void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) {
// Align the stack pointer and save the original sp value on the stack.
__ mov(scratch, sp);
- __ Sub_d(sp, sp, Operand(kPointerSize));
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
__ St_d(scratch, MemOperand(sp, 0));
__ mov(a2, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// We need to make room for the return address on the stack.
- DCHECK(IsAligned(stack_alignment, kPointerSize));
+ DCHECK(IsAligned(stack_alignment, kSystemPointerSize));
__ Sub_d(sp, sp, Operand(stack_alignment));
// The stack pointer now points to cell where the return address will be
@@ -1183,16 +1200,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int64_t RegExpMacroAssemblerLOONG64::CheckStackGuardState(
Address* return_address, Address raw_code, Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndexOffset)),
static_cast<RegExp::CallOrigin>(
- frame_entry<int64_t>(re_frame, kDirectCall)),
+ frame_entry<int64_t>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) {
@@ -1201,7 +1218,7 @@ MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset,
@@ -1210,7 +1227,7 @@ void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset,
BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
Operand(-cp_offset * char_size()));
} else {
- __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
}
@@ -1265,6 +1282,22 @@ void RegExpMacroAssemblerLOONG64::Pop(Register target) {
__ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
}
+void RegExpMacroAssemblerLOONG64::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
+
void RegExpMacroAssemblerLOONG64::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
index fe40a4e74f..5990a8a5aa 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
@@ -93,49 +93,68 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
private:
// Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- static const int kStoredRegisters = kFramePointer;
+ static constexpr int kStoredRegistersOffset = kFramePointerOffset;
// Return address (stored from link register, read into pc on return).
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
- static const int kReturnAddress = kStoredRegisters + 9 * kSystemPointerSize;
+ static constexpr int kReturnAddressOffset =
+ kStoredRegistersOffset + 9 * kSystemPointerSize;
// Stack frame header.
- static const int kStackFrameHeader = kReturnAddress;
+ static constexpr int kStackFrameHeaderOffset = kReturnAddressOffset;
// Below the frame pointer.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
+
+ // Register parameters stored by setup code.
+ static constexpr int kIsolateOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kDirectCallOffset = kIsolateOffset - kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kDirectCallOffset - kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
+
// Register parameters stored by setup code.
- static const int kIsolate = kFramePointer - kSystemPointerSize;
- static const int kDirectCall = kIsolate - kSystemPointerSize;
- static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
- static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
- static const int kInputStart = kInputEnd - kSystemPointerSize;
- static const int kStartIndex = kInputStart - kSystemPointerSize;
- static const int kInputString = kStartIndex - kSystemPointerSize;
+ static constexpr int kInputEndOffset =
+ kRegisterOutputOffset - kSystemPointerSize;
+ static constexpr int kInputStartOffset = kInputEndOffset - kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStartOffset - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kStartIndexOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kInitialBufferSize = 1024;
void PushCallerSavedRegisters();
void PopCallerSavedRegisters();
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
+
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 456e166ade..46f009480b 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -21,7 +21,7 @@ namespace internal {
*
* This assembler uses the following register assignment convention
* - s0 : Unused.
- * - s1 : Pointer to current Code object including heap object tag.
+ * - s1 : Pointer to current InstructionStream object including heap object tag.
* - s2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - s5 : Currently loaded character. Must be loaded using
@@ -79,19 +79,20 @@ namespace internal {
* - fp[64] s9, old-fp Old fp, callee saved(s9).
* - fp[0..63] s0..s7 Callee-saved registers s0..s7.
* --- frame pointer ----
- * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-24] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
- * - fp[-32] end of input (address of end of string). kInputEnd
- * - fp[-40] start of input (address of first character in string). kInputStart
- * - fp[-48] start index (character index of start). kStartIndex
- * - fp[-56] void* input_string (location of a handle containing the string). kInputString
- * - fp[-64] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-72] Offset of location before start of input (effectively character kStringStartMinusOne
+ * - fp[-8] frame marker
+ * - fp[-16] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCallOffset
+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegistersOffset
+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutputOffset
+ * - fp[-40] end of input (address of end of string). kInputEndOffset
+ * - fp[-48] start of input (address of first character in string). kInputStartOffset
+ * - fp[-56] start index (character index of start). kStartIndexOffset
+ * - fp[-64] void* input_string (location of a handle containing the string). kInputStringOffset
+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCapturesOffset
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOneOffset
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
- * - fp[-80] register 0 (Only positions must be stored in the first kRegisterZero
+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -120,15 +121,13 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-const int RegExpMacroAssemblerMIPS::kRegExpCodeSize;
-
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
masm_(std::make_unique<MacroAssembler>(
isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ NewAssemblerBuffer(kInitialBufferSize))),
no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
@@ -191,9 +190,9 @@ void RegExpMacroAssemblerMIPS::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ Ld(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Ld(a0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ Daddu(a0, a0, Operand(1));
- __ Sd(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Sd(a0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ Branch(&next, ne, a0, Operand(backtrack_limit()));
// Backtrack limit exceeded.
@@ -228,7 +227,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(base::uc16 limit,
}
void RegExpMacroAssemblerMIPS::CheckAtStart(int cp_offset, Label* on_at_start) {
- __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Daddu(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
@@ -237,7 +236,7 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(int cp_offset, Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Daddu(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
@@ -272,7 +271,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Daddu(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -373,7 +372,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -405,7 +404,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Daddu(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -509,8 +508,8 @@ void RegExpMacroAssemblerMIPS::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ li(code_pointer(), Operand(masm_->CodeObject()));
@@ -674,7 +673,7 @@ void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register stack_pointer,
__ li(scratch, Operand(ref));
__ Ld(scratch, MemOperand(scratch));
__ Dsubu(scratch, stack_pointer, scratch);
- __ Sd(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ Sd(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register stack_pointer_out,
@@ -682,7 +681,7 @@ void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register stack_pointer_out,
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ Ld(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ li(scratch, Operand(ref));
__ Ld(scratch, MemOperand(scratch));
__ Daddu(stack_pointer_out, stack_pointer_out, scratch);
@@ -707,34 +706,48 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// no is generated.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
+ // Emit code to start a new stack frame. In the following we push all
+ // callee-save registers (these end up above the fp) and all register
+ // arguments (in {a0,a1,a2,a3}, these end up below the fp).
// TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
// or dont save.
RegList registers_to_retain = {s0, s1, s2, s3, s4, s5, s6, s7, fp};
- RegList argument_registers = {a0, a1, a2, a3};
+ __ MultiPush(registers_to_retain | ra);
+ __ mov(frame_pointer(), sp);
+
+ // Registers {a0,a1,a2,a3} are the first four arguments as per the C calling
+ // convention, and must match our specified offsets (e.g. kInputEndOffset).
+ //
+ // a0: input_string
+ // a1: start_offset
+ // a2: input_start
+ // a3: input_end
+ RegList argument_registers = {a0, a1, a2, a3};
argument_registers |= {a4, a5, a6, a7};
- __ MultiPush(argument_registers | registers_to_retain | ra);
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- // TODO(plind): this 8 is the # of argument regs, should have definition.
- __ Daddu(frame_pointer(), sp, Operand(8 * kPointerSize));
- static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ // Also push the frame marker.
+ __ li(kScratchReg, Operand(StackFrame::TypeToMarker(StackFrame::IRREGEXP)));
+ static_assert(kFrameTypeOffset == kFramePointerOffset - kSystemPointerSize);
+ static_assert(kInputEndOffset ==
+ kRegisterOutputOffset - kSystemPointerSize);
+ static_assert(kInputStartOffset == kInputEndOffset - kSystemPointerSize);
+ static_assert(kStartIndexOffset == kInputStartOffset - kSystemPointerSize);
+ static_assert(kInputStringOffset == kStartIndexOffset - kSystemPointerSize);
+ __ MultiPush(argument_registers | kScratchReg);
+
+ static_assert(kSuccessfulCapturesOffset ==
+ kInputStringOffset - kSystemPointerSize);
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ push(a0); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ push(a0); // The backtrack counter
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ push(a0); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here
@@ -777,20 +790,20 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ Dsubu(sp, sp, Operand(num_registers_ * kPointerSize));
// Load string end.
- __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEndOffset));
// Load input start.
- __ Ld(a0, MemOperand(frame_pointer(), kInputStart));
+ __ Ld(a0, MemOperand(frame_pointer(), kInputStartOffset));
// Find negative length (offset of start relative to end).
__ Dsubu(current_input_offset(), a0, end_of_input_address());
// Set a0 to address of char before start of the input string
// (effectively string position -1).
- __ Ld(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Ld(a1, MemOperand(frame_pointer(), kStartIndexOffset));
__ Dsubu(a0, current_input_offset(), Operand(char_size()));
__ dsll(t1, a1, (mode_ == UC16) ? 1 : 0);
__ Dsubu(a0, a0, t1);
// Store this value in a local variable, for use when clearing
// position registers.
- __ Sd(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Sd(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -815,7 +828,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1.
if (num_saved_registers_ > 8) {
// Address of register 0.
- __ Daddu(a1, frame_pointer(), Operand(kRegisterZero));
+ __ Daddu(a1, frame_pointer(), Operand(kRegisterZeroOffset));
__ li(a2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
@@ -838,9 +851,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// Copy captures to output.
- __ Ld(a1, MemOperand(frame_pointer(), kInputStart));
- __ Ld(a0, MemOperand(frame_pointer(), kRegisterOutput));
- __ Ld(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Ld(a1, MemOperand(frame_pointer(), kInputStartOffset));
+ __ Ld(a0, MemOperand(frame_pointer(), kRegisterOutputOffset));
+ __ Ld(a2, MemOperand(frame_pointer(), kStartIndexOffset));
__ Dsubu(a1, end_of_input_address(), a1);
// a1 is length of input in bytes.
if (mode_ == UC16) {
@@ -880,12 +893,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ Ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ Ld(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ Ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ Ld(a1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
+ __ Ld(a2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Increment success counter.
__ Daddu(a0, a0, 1);
- __ Sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Sd(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ Dsubu(a1, a1, num_saved_registers_);
@@ -893,13 +906,13 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ mov(v0, a0);
__ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
- __ Sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Sd(a1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Advance the location for output.
__ Daddu(a2, a2, num_saved_registers_ * kIntSize);
- __ Sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Sd(a2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Prepare a0 to initialize registers with its value in the next run.
- __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -930,7 +943,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit and return v0.
__ bind(&exit_label_);
if (global()) {
- __ Ld(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld(v0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
__ bind(&return_v0);
@@ -965,7 +978,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
- __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
SafeReturn();
}
@@ -980,7 +994,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments, a0);
__ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
@@ -1015,7 +1029,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
return Handle<HeapObject>::cast(code);
}
-
void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
if (to == nullptr) {
Backtrack();
@@ -1025,7 +1038,6 @@ void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
return;
}
-
void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
@@ -1033,7 +1045,6 @@ void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
}
-
void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
@@ -1041,41 +1052,37 @@ void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
}
-
void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
Label* if_eq) {
__ Ld(a0, register_location(reg));
BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
}
-
RegExpMacroAssembler::IrregexpImplementation
RegExpMacroAssemblerMIPS::Implementation() {
return kMIPSImplementation;
}
-
void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
Pop(current_input_offset());
}
-
void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
Pop(a0);
__ Sd(a0, register_location(register_index));
}
-
void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
- __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ __ li(a0,
+ Operand(target + InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
- int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ int cp_offset = offset + InstructionStream::kHeaderSize - kHeapObjectTag;
__ emit(0);
masm_->label_at_put(label, offset);
__ bind(&after_constant);
@@ -1090,12 +1097,10 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
CheckStackLimit();
}
-
void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
Push(current_input_offset());
}
-
void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
__ Ld(a0, register_location(register_index));
@@ -1103,7 +1108,6 @@ void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
if (check_stack_limit) CheckStackLimit();
}
-
void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
__ Ld(current_input_offset(), register_location(reg));
}
@@ -1140,20 +1144,17 @@ void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
__ bind(&after_position);
}
-
void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
__ li(a0, Operand(to));
__ Sd(a0, register_location(register_index));
}
-
bool RegExpMacroAssemblerMIPS::Succeed() {
__ jmp(&success_label_);
return global();
}
-
void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
@@ -1164,10 +1165,9 @@ void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
}
}
-
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ Sd(a0, register_location(reg));
}
@@ -1191,7 +1191,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
__ Sd(scratch, MemOperand(sp));
__ mov(a2, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// We need to make room for the return address on the stack.
@@ -1237,14 +1237,12 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
__ li(code_pointer(), Operand(masm_->CodeObject()));
}
-
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
-
template <typename T>
static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
@@ -1253,42 +1251,39 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int64_t RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndexOffset)),
static_cast<RegExp::CallOrigin>(
- frame_entry<int64_t>(re_frame, kDirectCall)),
+ frame_entry<int64_t>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
-
MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
DCHECK(register_index < (1<<30));
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
+ kRegisterZeroOffset - register_index * kPointerSize);
}
-
void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
Label* on_outside_input) {
if (cp_offset >= 0) {
BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
Operand(-cp_offset * char_size()));
} else {
- __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
}
}
-
void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
@@ -1308,7 +1303,6 @@ void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
__ Branch(to, condition, rs, rt);
}
-
void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
Condition cond,
Register rs,
@@ -1316,21 +1310,18 @@ void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
__ BranchAndLink(to, cond, rs, rt);
}
-
void RegExpMacroAssemblerMIPS::SafeReturn() {
__ pop(ra);
__ Daddu(t1, ra, Operand(masm_->CodeObject()));
__ Jump(t1);
}
-
void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
__ bind(name);
__ Dsubu(ra, ra, Operand(masm_->CodeObject()));
__ push(ra);
}
-
void RegExpMacroAssemblerMIPS::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
__ Daddu(backtrack_stackpointer(),
@@ -1339,13 +1330,27 @@ void RegExpMacroAssemblerMIPS::Push(Register source) {
__ Sw(source, MemOperand(backtrack_stackpointer()));
}
-
void RegExpMacroAssemblerMIPS::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ Lw(target, MemOperand(backtrack_stackpointer()));
__ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
}
+void RegExpMacroAssemblerMIPS::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
void RegExpMacroAssemblerMIPS::CheckPreemption() {
// Check for preemption.
@@ -1356,7 +1361,6 @@ void RegExpMacroAssemblerMIPS::CheckPreemption() {
SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
}
-
void RegExpMacroAssemblerMIPS::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit_address(
@@ -1367,7 +1371,6 @@ void RegExpMacroAssemblerMIPS::CheckStackLimit() {
SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
}
-
void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 449084b0da..009b91c9b0 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -94,49 +94,67 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
private:
// Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- static const int kStoredRegisters = kFramePointer;
+ static constexpr int kStoredRegistersOffset = kFramePointerOffset;
// Return address (stored from link register, read into pc on return).
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
- static const int kReturnAddress = kStoredRegisters + 9 * kSystemPointerSize;
+ static constexpr int kReturnAddressOffset =
+ kStoredRegistersOffset + 9 * kSystemPointerSize;
// Stack frame header.
- static const int kStackFrameHeader = kReturnAddress;
+ static constexpr int kStackFrameHeaderOffset = kReturnAddressOffset;
// Below the frame pointer.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
// Register parameters stored by setup code.
- static const int kIsolate = kFramePointer - kSystemPointerSize;
- static const int kDirectCall = kIsolate - kSystemPointerSize;
- static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
- static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
- static const int kInputStart = kInputEnd - kSystemPointerSize;
- static const int kStartIndex = kInputStart - kSystemPointerSize;
- static const int kInputString = kStartIndex - kSystemPointerSize;
+ static constexpr int kIsolateOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kDirectCallOffset = kIsolateOffset - kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kDirectCallOffset - kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
+
+ // Register parameters stored by setup code.
+ static constexpr int kInputEndOffset =
+ kRegisterOutputOffset - kSystemPointerSize;
+ static constexpr int kInputStartOffset = kInputEndOffset - kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStartOffset - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kStartIndexOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kInitialBufferSize = 1024;
void PushCallerSavedRegisters();
void PopCallerSavedRegisters();
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
+
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 4fdad87894..3576f318fe 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -21,7 +21,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - r25: Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - r26: Pointer to current Code object including heap object tag.
+ * - r26: Pointer to current InstructionStream object including heap object tag.
* - r27: Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r28: Currently loaded character. Must be loaded using
@@ -48,23 +48,25 @@ namespace internal {
* - fp[24] old frame pointer (r31).
* - fp[0..20] backup of registers r25..r30
* --- frame pointer ----
- * - fp[-4] direct_call (if 1, direct call from JavaScript code,
+ * - fp[-4] frame marker
+ * - fp[-8] isolate
+ * - fp[-12] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[-8] stack_area_base (high end of the memory area to use as
+ * - fp[-16] stack_area_base (high end of the memory area to use as
* backtracking stack).
- * - fp[-12] capture array size (may fit multiple sets of matches)
- * - fp[-16] int* capture_array (int[num_saved_registers_], for output).
- * - fp[-20] end of input (address of end of string).
- * - fp[-24] start of input (address of first character in string).
- * - fp[-28] start index (character index of start).
- * - fp[-32] void* input_string (location of a handle containing the string).
- * - fp[-36] success counter (only for global regexps to count matches).
- * - fp[-40] Offset of location before start of input (effectively character
+ * - fp[-20] capture array size (may fit multiple sets of matches)
+ * - fp[-24] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[-28] end of input (address of end of string).
+ * - fp[-32] start of input (address of first character in string).
+ * - fp[-36] start index (character index of start).
+ * - fp[-40] void* input_string (location of a handle containing the string).
+ * - fp[-44] success counter (only for global regexps to count matches).
+ * - fp[-48] Offset of location before start of input (effectively character
* string start - 1). Used to initialize capture registers to a
* non-position.
- * - fp[-44] At start (if 1, we are starting at the start of the
+ * - fp[-52] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-48] register 0 (Only positions must be stored in the first
+ * - fp[-56] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -172,9 +174,9 @@ void RegExpMacroAssemblerPPC::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ LoadU64(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
+ __ LoadU64(r3, MemOperand(frame_pointer(), kBacktrackCountOffset), r0);
__ addi(r3, r3, Operand(1));
- __ StoreU64(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
+ __ StoreU64(r3, MemOperand(frame_pointer(), kBacktrackCountOffset), r0);
__ mov(r0, Operand(backtrack_limit()));
__ CmpS64(r3, r0);
__ bne(&next);
@@ -189,7 +191,8 @@ void RegExpMacroAssemblerPPC::Backtrack() {
__ bind(&next);
}
- // Pop Code offset from backtrack stack, add Code and jump to location.
+ // Pop InstructionStream offset from backtrack stack, add InstructionStream
+ // and jump to location.
Pop(r3);
__ add(r3, r3, code_pointer());
__ Jump(r3);
@@ -211,7 +214,7 @@ void RegExpMacroAssemblerPPC::CheckCharacterGT(base::uc16 limit,
}
void RegExpMacroAssemblerPPC::CheckAtStart(int cp_offset, Label* on_at_start) {
- __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ addi(r3, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ CmpS64(r3, r4);
@@ -220,7 +223,7 @@ void RegExpMacroAssemblerPPC::CheckAtStart(int cp_offset, Label* on_at_start) {
void RegExpMacroAssemblerPPC::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ addi(r3, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ CmpS64(r3, r4);
@@ -260,7 +263,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
if (read_backward) {
- __ LoadU64(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r6, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r6, r6, r4);
__ CmpS64(current_input_offset(), r6);
BranchOrBacktrack(le, on_no_match);
@@ -365,7 +368,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -400,7 +403,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
// Check that there are enough characters left in the input.
if (read_backward) {
- __ LoadU64(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r6, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ add(r6, r6, r4);
__ CmpS64(current_input_offset(), r6);
BranchOrBacktrack(le, on_no_match);
@@ -524,8 +527,8 @@ void RegExpMacroAssemblerPPC::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -702,7 +705,8 @@ void RegExpMacroAssemblerPPC::PushRegExpBasePointer(Register stack_pointer,
__ mov(scratch, Operand(ref));
__ LoadU64(scratch, MemOperand(scratch));
__ SubS64(scratch, stack_pointer, scratch);
- __ StoreU64(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ StoreU64(scratch,
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerPPC::PopRegExpBasePointer(Register stack_pointer_out,
@@ -710,7 +714,7 @@ void RegExpMacroAssemblerPPC::PopRegExpBasePointer(Register stack_pointer_out,
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ LoadU64(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ mov(scratch, Operand(ref));
__ LoadU64(scratch, MemOperand(scratch));
__ AddS64(stack_pointer_out, stack_pointer_out, scratch);
@@ -745,31 +749,33 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
DCHECK(kRegExpCalleeSaved.has(end_of_input_address()));
DCHECK(kRegExpCalleeSaved.has(frame_pointer()));
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
+ // Emit code to start a new stack frame. In the following we push all
+ // callee-save registers (these end up above the fp) and all register
+ // arguments (these end up below the fp).
RegList registers_to_retain = kRegExpCalleeSaved;
- RegList argument_registers = {r3, r4, r5, r6, r7, r8, r9, r10};
__ mflr(r0);
__ push(r0);
- __ MultiPush(argument_registers | registers_to_retain);
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ addi(frame_pointer(), sp, Operand(8 * kSystemPointerSize));
+ __ MultiPush(registers_to_retain);
+ __ mr(frame_pointer(), sp);
- static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ RegList argument_registers = {r3, r4, r5, r6, r7, r8, r9, r10};
+ // Also push the frame marker.
+ __ mov(r0, Operand(StackFrame::TypeToMarker(StackFrame::IRREGEXP)));
+ __ push(r0);
+ __ MultiPush(argument_registers);
+
+ static_assert(kSuccessfulCapturesOffset ==
+ kInputStringOffset - kSystemPointerSize);
__ li(r3, Operand::Zero());
__ push(r3); // Make room for success counter and initialize it to 0.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ push(r3); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ push(r3); // The backtrack counter.
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ push(r3); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here
@@ -814,14 +820,15 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ AddS64(sp, sp, Operand(-num_registers_ * kSystemPointerSize), r0);
// Load string end.
- __ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ LoadU64(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
// Load input start.
- __ LoadU64(r3, MemOperand(frame_pointer(), kInputStart));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kInputStartOffset));
// Find negative length (offset of start relative to end).
__ sub(current_input_offset(), r3, end_of_input_address());
// Set r3 to address of char before start of the input string
// (effectively string position -1).
- __ LoadU64(r4, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStartIndexOffset));
__ subi(r3, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
__ ShiftLeftU64(r0, r4, Operand(1));
@@ -831,7 +838,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ StoreU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ StoreU64(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -858,7 +865,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
__ addi(r4, frame_pointer(),
- Operand(kRegisterZero + kSystemPointerSize));
+ Operand(kRegisterZeroOffset + kSystemPointerSize));
__ mov(r5, Operand(num_saved_registers_));
__ mtctr(r5);
Label init_loop;
@@ -880,9 +887,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ LoadU64(r4, MemOperand(frame_pointer(), kInputStart));
- __ LoadU64(r3, MemOperand(frame_pointer(), kRegisterOutput));
- __ LoadU64(r5, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kInputStartOffset));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kRegisterOutputOffset));
+ __ LoadU64(r5, MemOperand(frame_pointer(), kStartIndexOffset));
__ sub(r4, end_of_input_address(), r4);
// r4 is length of input in bytes.
if (mode_ == UC16) {
@@ -921,12 +928,12 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ LoadU64(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ LoadU64(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ LoadU64(r5, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
+ __ LoadU64(r5, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Increment success counter.
__ addi(r3, r3, Operand(1));
- __ StoreU64(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ StoreU64(r3, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ subi(r4, r4, Operand(num_saved_registers_));
@@ -934,13 +941,13 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ cmpi(r4, Operand(num_saved_registers_));
__ blt(&return_r3);
- __ StoreU64(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ StoreU64(r4, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Advance the location for output.
__ addi(r5, r5, Operand(num_saved_registers_ * kIntSize));
- __ StoreU64(r5, MemOperand(frame_pointer(), kRegisterOutput));
+ __ StoreU64(r5, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Prepare r3 to initialize registers with its value in the next run.
- __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -972,7 +979,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Exit and return r3
__ bind(&exit_label_);
if (global()) {
- __ LoadU64(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
__ bind(&return_r3);
@@ -1012,7 +1019,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// String might have moved: Reload end of string from frame.
__ LoadU64(end_of_input_address(),
- MemOperand(frame_pointer(), kInputEnd));
+ MemOperand(frame_pointer(), kInputEndOffset));
SafeReturn();
}
@@ -1028,7 +1035,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments, r3);
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ cmpi(r3, Operand::Zero());
@@ -1191,7 +1198,7 @@ void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreU64(r3, register_location(reg), r0);
}
@@ -1232,7 +1239,7 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// RegExp code frame pointer.
__ mr(r5, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ mov(r4, Operand(masm_->CodeObject()));
// r3 will point to the return address, placed by DirectCEntry.
__ addi(r3, sp, Operand(kStackFrameExtraParamSlot * kSystemPointerSize));
@@ -1273,16 +1280,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerPPC::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<intptr_t>(re_frame, kStartIndex),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ frame_entry<intptr_t>(re_frame, kStartIndexOffset),
static_cast<RegExp::CallOrigin>(
- frame_entry<intptr_t>(re_frame, kDirectCall)),
+ frame_entry<intptr_t>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
@@ -1292,9 +1299,24 @@ MemOperand RegExpMacroAssemblerPPC::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kSystemPointerSize);
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
+void RegExpMacroAssemblerPPC::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
Label* on_outside_input) {
@@ -1302,7 +1324,7 @@ void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
__ CmpS64(current_input_offset(), Operand(-cp_offset * char_size()), r0);
BranchOrBacktrack(ge, on_outside_input);
} else {
- __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ addi(r3, current_input_offset(), Operand(cp_offset * char_size()));
__ CmpS64(r3, r4);
BranchOrBacktrack(le, on_outside_input);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index db2783ff72..5f4b24a78d 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -92,41 +92,61 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
private:
// Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- static const int kStoredRegisters = kFramePointer;
+ static constexpr int kStoredRegistersOffset = kFramePointerOffset;
// Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 7 * kSystemPointerSize;
- static const int kCallerFrame = kReturnAddress + kSystemPointerSize;
-
- // Below the frame pointer.
+ static constexpr int kReturnAddressOffset =
+ kStoredRegistersOffset + 7 * kSystemPointerSize;
+ static constexpr int kCallerFrameOffset =
+ kReturnAddressOffset + kSystemPointerSize;
+
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ (V8_EMBEDDED_CONSTANT_POOL_BOOL
+ ? kSystemPointerSize +
+ CommonFrameConstants::kContextOrFrameTypeOffset
+ : CommonFrameConstants::kContextOrFrameTypeOffset));
// Register parameters stored by setup code.
- static const int kIsolate = kFramePointer - kSystemPointerSize;
- static const int kDirectCall = kIsolate - kSystemPointerSize;
- static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
- static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
- static const int kInputStart = kInputEnd - kSystemPointerSize;
- static const int kStartIndex = kInputStart - kSystemPointerSize;
- static const int kInputString = kStartIndex - kSystemPointerSize;
+ static constexpr int kIsolateOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kDirectCallOffset = kIsolateOffset - kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kDirectCallOffset - kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
+ static constexpr int kInputEndOffset =
+ kRegisterOutputOffset - kSystemPointerSize;
+ static constexpr int kInputStartOffset = kInputEndOffset - kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStartOffset - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kStartIndexOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kRegExpCodeSize = 1024;
+
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index ef0f153c68..d68d7fec26 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/regexp/regexp-ast.h"
+
#include "src/utils/ostreams.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@@ -193,6 +195,24 @@ void* RegExpUnparser::VisitClassRanges(RegExpClassRanges* that, void* data) {
return nullptr;
}
+void* RegExpUnparser::VisitClassSetOperand(RegExpClassSetOperand* that,
+ void* data) {
+ os_ << "![";
+ for (int i = 0; i < that->ranges()->length(); i++) {
+ if (i > 0) os_ << " ";
+ VisitCharacterRange(that->ranges()->at(i));
+ }
+ if (that->has_strings()) {
+ for (auto iter : *that->strings()) {
+ os_ << " '";
+ os_ << std::string(iter.first.begin(), iter.first.end());
+ os_ << "'";
+ }
+ }
+ os_ << "]";
+ return nullptr;
+}
+
void* RegExpUnparser::VisitClassSetExpression(RegExpClassSetExpression* that,
void* data) {
switch (that->operation()) {
@@ -362,6 +382,53 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
}
+RegExpClassSetOperand::RegExpClassSetOperand(ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings)
+ : ranges_(ranges), strings_(strings) {
+ DCHECK_NOT_NULL(ranges);
+ min_match_ = 0;
+ max_match_ = 0;
+ if (!ranges->is_empty()) {
+ min_match_ = 1;
+ max_match_ = 2;
+ }
+ if (has_strings()) {
+ for (auto string : *strings) {
+ min_match_ = std::min(min_match_, string.second->min_match());
+ max_match_ = std::max(max_match_, string.second->max_match());
+ }
+ }
+}
+
+RegExpClassSetExpression::RegExpClassSetExpression(
+ OperationType op, bool is_negated, bool may_contain_strings,
+ ZoneList<RegExpTree*>* operands)
+ : operation_(op),
+ is_negated_(is_negated),
+ may_contain_strings_(may_contain_strings),
+ operands_(operands) {
+ DCHECK_NOT_NULL(operands);
+ DCHECK_IMPLIES(is_negated_, !may_contain_strings_);
+ max_match_ = 0;
+ for (auto op : *operands) {
+ max_match_ = std::max(max_match_, op->max_match());
+ }
+}
+
+// static
+RegExpClassSetExpression* RegExpClassSetExpression::Empty(Zone* zone,
+ bool is_negated) {
+ ZoneList<CharacterRange>* ranges =
+ zone->template New<ZoneList<CharacterRange>>(0, zone);
+ RegExpClassSetOperand* op =
+ zone->template New<RegExpClassSetOperand>(ranges, nullptr);
+ ZoneList<RegExpTree*>* operands =
+ zone->template New<ZoneList<RegExpTree*>>(1, zone);
+ operands->Add(op, zone);
+ return zone->template New<RegExpClassSetExpression>(
+ RegExpClassSetExpression::OperationType::kUnion, is_negated, false,
+ operands);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 450aa2d1d3..34f59f6c31 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -22,6 +22,7 @@ namespace internal {
VISIT(Alternative) \
VISIT(Assertion) \
VISIT(ClassRanges) \
+ VISIT(ClassSetOperand) \
VISIT(ClassSetExpression) \
VISIT(Atom) \
VISIT(Quantifier) \
@@ -365,45 +366,108 @@ class RegExpClassRanges final : public RegExpTree {
ClassRangesFlags class_ranges_flags_;
};
+struct CharacterClassStringLess {
+ bool operator()(const base::Vector<const base::uc32>& lhs,
+ const base::Vector<const base::uc32>& rhs) const {
+ // Longer strings first so we generate matches for the largest string
+ // possible.
+ if (lhs.length() != rhs.length()) {
+ return lhs.length() > rhs.length();
+ }
+ for (int i = 0; i < lhs.length(); i++) {
+ if (lhs[i] != rhs[i]) {
+ return lhs[i] < rhs[i];
+ }
+ }
+ return false;
+ }
+};
+
+// A type used for strings as part of character classes (only possible in
+// unicode sets mode).
+// We use a ZoneMap instead of an UnorderedZoneMap because we need to match
+// the longest alternatives first. By using a ZoneMap with the custom comparator
+// we can avoid sorting before assembling the code.
+// Strings are likely short (the largest string in current unicode properties
+// consists of 10 code points).
+using CharacterClassStrings = ZoneMap<base::Vector<const base::uc32>,
+ RegExpTree*, CharacterClassStringLess>;
+
+// TODO(pthier): If we are sure we don't want to use icu::UnicodeSets
+// (performance evaluation pending), this class can be merged with
+// RegExpClassRanges.
+class RegExpClassSetOperand final : public RegExpTree {
+ public:
+ RegExpClassSetOperand(ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings);
+
+ DECL_BOILERPLATE(ClassSetOperand);
+
+ bool IsTextElement() override { return true; }
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+
+ void Union(RegExpClassSetOperand* other, Zone* zone);
+ void Intersect(RegExpClassSetOperand* other,
+ ZoneList<CharacterRange>* temp_ranges, Zone* zone);
+ void Subtract(RegExpClassSetOperand* other,
+ ZoneList<CharacterRange>* temp_ranges, Zone* zone);
+
+ bool has_strings() const { return strings_ != nullptr && !strings_->empty(); }
+ ZoneList<CharacterRange>* ranges() { return ranges_; }
+ CharacterClassStrings* strings() {
+ DCHECK_NOT_NULL(strings_);
+ return strings_;
+ }
+
+ private:
+ ZoneList<CharacterRange>* ranges_;
+ CharacterClassStrings* strings_;
+ int min_match_;
+ int max_match_;
+};
+
class RegExpClassSetExpression final : public RegExpTree {
public:
enum class OperationType { kUnion, kIntersection, kSubtraction };
RegExpClassSetExpression(OperationType op, bool is_negated,
- ZoneList<RegExpTree*>* operands)
- : operation_(op), is_negated_(is_negated), operands_(operands) {}
+ bool may_contain_strings,
+ ZoneList<RegExpTree*>* operands);
DECL_BOILERPLATE(ClassSetExpression);
+ // Create an empty class set expression (matches everything if |is_negated|,
+ // nothing otherwise).
+ static RegExpClassSetExpression* Empty(Zone* zone, bool is_negated);
+
bool IsTextElement() override { return true; }
- // At least 1 character is consumed.
- int min_match() override { return 1; }
- // Up to two code points might be consumed.
- int max_match() override { return 2; }
+ int min_match() override { return 0; }
+ int max_match() override { return max_match_; }
OperationType operation() const { return operation_; }
bool is_negated() const { return is_negated_; }
+ bool may_contain_strings() const { return may_contain_strings_; }
const ZoneList<RegExpTree*>* operands() const { return operands_; }
+ ZoneList<RegExpTree*>* operands() { return operands_; }
private:
- RegExpClassRanges* ToCharacterClass(Zone* zone);
-
// Recursively evaluates the tree rooted at |root|, computing the valid
- // CharacterRanges after applying all set operations and storing the result in
- // |result_ranges|. |temp_ranges| is list used for intermediate results,
- // passed as parameter to avoid allocating new lists all the time.
- static void ComputeCharacterRanges(RegExpTree* root,
- ZoneList<CharacterRange>* result_ranges,
- ZoneList<CharacterRange>* temp_ranges,
- Zone* zone);
+ // CharacterRanges and strings after applying all set operations.
+ // The original tree will be modified by this method, so don't store pointers
+ // to inner nodes of the tree somewhere else!
+ // Modifying the tree in-place saves memory and speeds up multiple calls of
+ // the method (e.g. when unrolling quantifiers).
+ // |temp_ranges| is used for intermediate results, passed as parameter to
+ // avoid allocating new lists all the time.
+ static RegExpClassSetOperand* ComputeExpression(
+ RegExpTree* root, ZoneList<CharacterRange>* temp_ranges, Zone* zone);
const OperationType operation_;
const bool is_negated_;
+ const bool may_contain_strings_;
ZoneList<RegExpTree*>* operands_ = nullptr;
-#ifdef ENABLE_SLOW_DCHECKS
- // Cache ranges for each node during computation for (slow) DCHECKs.
- ZoneList<CharacterRange>* ranges_ = nullptr;
-#endif
+ int max_match_;
};
class RegExpAtom final : public RegExpTree {
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index 265fc3d1cc..3258bb5149 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -134,7 +134,8 @@ bool RegExpClassRanges::is_standard(Zone* zone) {
UnicodeRangeSplitter::UnicodeRangeSplitter(ZoneList<CharacterRange>* base) {
// The unicode range splitter categorizes given character ranges into:
// - Code points from the BMP representable by one code unit.
- // - Code points outside the BMP that need to be split into surrogate pairs.
+ // - Code points outside the BMP that need to be split into
+ // surrogate pairs.
// - Lone lead surrogates.
// - Lone trail surrogates.
// Lone surrogates are valid code points, even though no actual characters.
@@ -535,9 +536,158 @@ RegExpNode* RegExpClassRanges::ToNode(RegExpCompiler* compiler,
return result;
}
+RegExpNode* RegExpClassSetOperand::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ Zone* zone = compiler->zone();
+ const int size = (has_strings() ? static_cast<int>(strings()->size()) : 0) +
+ (ranges()->is_empty() ? 0 : 1);
+ if (size == 0) {
+ // If neither ranges nor strings are present, the operand is equal to an
+ // empty range (matching nothing).
+ ZoneList<CharacterRange>* empty =
+ zone->template New<ZoneList<CharacterRange>>(0, zone);
+ return zone->template New<RegExpClassRanges>(zone, empty)
+ ->ToNode(compiler, on_success);
+ }
+ ZoneList<RegExpTree*>* alternatives =
+ zone->template New<ZoneList<RegExpTree*>>(size, zone);
+ // Strings are sorted by length first (larger strings before shorter ones).
+ // See the comment on CharacterClassStrings.
+ // Empty strings (if present) are added after character ranges.
+ RegExpTree* empty_string = nullptr;
+ if (has_strings()) {
+ for (auto string : *strings()) {
+ if (string.second->IsEmpty()) {
+ empty_string = string.second;
+ } else {
+ alternatives->Add(string.second, zone);
+ }
+ }
+ }
+ if (!ranges()->is_empty()) {
+ alternatives->Add(zone->template New<RegExpClassRanges>(zone, ranges()),
+ zone);
+ }
+ if (empty_string != nullptr) {
+ alternatives->Add(empty_string, zone);
+ }
+
+ RegExpTree* node = nullptr;
+ if (size == 1) {
+ DCHECK_EQ(alternatives->length(), 1);
+ node = alternatives->first();
+ } else {
+ node = zone->template New<RegExpDisjunction>(alternatives);
+ }
+ return node->ToNode(compiler, on_success);
+}
+
RegExpNode* RegExpClassSetExpression::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return ToCharacterClass(compiler->zone())->ToNode(compiler, on_success);
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* temp_ranges =
+ zone->template New<ZoneList<CharacterRange>>(4, zone);
+ RegExpClassSetOperand* root = ComputeExpression(this, temp_ranges, zone);
+ return root->ToNode(compiler, on_success);
+}
+
+void RegExpClassSetOperand::Union(RegExpClassSetOperand* other, Zone* zone) {
+ ranges()->AddAll(*other->ranges(), zone);
+ if (other->has_strings()) {
+ if (strings_ == nullptr) {
+ strings_ = zone->template New<CharacterClassStrings>(zone);
+ }
+ strings()->insert(other->strings()->begin(), other->strings()->end());
+ }
+}
+
+void RegExpClassSetOperand::Intersect(RegExpClassSetOperand* other,
+ ZoneList<CharacterRange>* temp_ranges,
+ Zone* zone) {
+ CharacterRange::Intersect(ranges(), other->ranges(), temp_ranges, zone);
+ std::swap(*ranges(), *temp_ranges);
+ temp_ranges->Rewind(0);
+ if (has_strings()) {
+ if (!other->has_strings()) {
+ strings()->clear();
+ } else {
+ for (auto iter = strings()->begin(); iter != strings()->end();) {
+ if (other->strings()->find(iter->first) == other->strings()->end()) {
+ iter = strings()->erase(iter);
+ } else {
+ iter++;
+ }
+ }
+ }
+ }
+}
+
+void RegExpClassSetOperand::Subtract(RegExpClassSetOperand* other,
+ ZoneList<CharacterRange>* temp_ranges,
+ Zone* zone) {
+ CharacterRange::Subtract(ranges(), other->ranges(), temp_ranges, zone);
+ std::swap(*ranges(), *temp_ranges);
+ temp_ranges->Rewind(0);
+ if (has_strings() && other->has_strings()) {
+ for (auto iter = strings()->begin(); iter != strings()->end();) {
+ if (other->strings()->find(iter->first) != other->strings()->end()) {
+ iter = strings()->erase(iter);
+ } else {
+ iter++;
+ }
+ }
+ }
+}
+
+// static
+RegExpClassSetOperand* RegExpClassSetExpression::ComputeExpression(
+ RegExpTree* root, ZoneList<CharacterRange>* temp_ranges, Zone* zone) {
+ DCHECK(temp_ranges->is_empty());
+ if (root->IsClassSetOperand()) {
+ return root->AsClassSetOperand();
+ }
+ DCHECK(root->IsClassSetExpression());
+ RegExpClassSetExpression* node = root->AsClassSetExpression();
+ RegExpClassSetOperand* result =
+ ComputeExpression(node->operands()->at(0), temp_ranges, zone);
+ switch (node->operation()) {
+ case OperationType::kUnion: {
+ for (int i = 1; i < node->operands()->length(); i++) {
+ RegExpClassSetOperand* op =
+ ComputeExpression(node->operands()->at(i), temp_ranges, zone);
+ result->Union(op, zone);
+ }
+ CharacterRange::Canonicalize(result->ranges());
+ break;
+ }
+ case OperationType::kIntersection: {
+ for (int i = 1; i < node->operands()->length(); i++) {
+ RegExpClassSetOperand* op =
+ ComputeExpression(node->operands()->at(i), temp_ranges, zone);
+ result->Intersect(op, temp_ranges, zone);
+ }
+ break;
+ }
+ case OperationType::kSubtraction: {
+ for (int i = 1; i < node->operands()->length(); i++) {
+ RegExpClassSetOperand* op =
+ ComputeExpression(node->operands()->at(i), temp_ranges, zone);
+ result->Subtract(op, temp_ranges, zone);
+ }
+ break;
+ }
+ }
+ if (node->is_negated()) {
+ DCHECK(!result->has_strings());
+ CharacterRange::Negate(result->ranges(), temp_ranges, zone);
+ std::swap(*result->ranges(), *temp_ranges);
+ temp_ranges->Rewind(0);
+ }
+ // Store the result as single operand of the current node.
+ node->operands()->Set(0, result);
+ node->operands()->Rewind(1);
+
+ return result;
}
namespace {
@@ -1498,128 +1648,6 @@ void CharacterSet::Canonicalize() {
CharacterRange::Canonicalize(ranges_);
}
-RegExpClassRanges* RegExpClassSetExpression::ToCharacterClass(Zone* zone) {
- ZoneList<CharacterRange>* result_ranges =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- ZoneList<CharacterRange>* temp_ranges =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- ComputeCharacterRanges(this, result_ranges, temp_ranges, zone);
- return zone->template New<RegExpClassRanges>(zone, result_ranges);
-}
-
-// static
-void RegExpClassSetExpression::ComputeCharacterRanges(
- RegExpTree* root, ZoneList<CharacterRange>* result_ranges,
- ZoneList<CharacterRange>* temp_ranges, Zone* zone) {
- DCHECK_EQ(temp_ranges->length(), 0);
- DCHECK(root->IsClassRanges() || root->IsClassSetExpression());
- if (root->IsClassRanges()) {
- DCHECK(!root->AsClassRanges()->is_negated());
- ZoneList<CharacterRange>* ranges = root->AsClassRanges()->ranges(zone);
- CharacterRange::Canonicalize(ranges);
- result_ranges->AddAll(*ranges, zone);
- return;
- }
- RegExpClassSetExpression* node = root->AsClassSetExpression();
- switch (node->operation()) {
- case OperationType::kUnion: {
- ZoneList<CharacterRange>* op_ranges =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- for (int i = 0; i < node->operands()->length(); i++) {
- RegExpTree* op = node->operands()->at(i);
- ComputeCharacterRanges(op, op_ranges, temp_ranges, zone);
- result_ranges->AddAll(*op_ranges, zone);
- op_ranges->Rewind(0);
- }
- CharacterRange::Canonicalize(result_ranges);
- break;
- }
- case OperationType::kIntersection: {
- ZoneList<CharacterRange>* op_ranges =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- ComputeCharacterRanges(node->operands()->at(0), op_ranges, temp_ranges,
- zone);
- result_ranges->AddAll(*op_ranges, zone);
- op_ranges->Rewind(0);
- for (int i = 1; i < node->operands()->length(); i++) {
- ComputeCharacterRanges(node->operands()->at(i), op_ranges, temp_ranges,
- zone);
- CharacterRange::Intersect(result_ranges, op_ranges, temp_ranges, zone);
- std::swap(*result_ranges, *temp_ranges);
- temp_ranges->Rewind(0);
- op_ranges->Rewind(0);
- }
- break;
- }
- case OperationType::kSubtraction: {
- ZoneList<CharacterRange>* op_ranges =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- ComputeCharacterRanges(node->operands()->at(0), op_ranges, temp_ranges,
- zone);
- result_ranges->AddAll(*op_ranges, zone);
- op_ranges->Rewind(0);
- for (int i = 1; i < node->operands()->length(); i++) {
- ComputeCharacterRanges(node->operands()->at(i), op_ranges, temp_ranges,
- zone);
- CharacterRange::Subtract(result_ranges, op_ranges, temp_ranges, zone);
- std::swap(*result_ranges, *temp_ranges);
- temp_ranges->Rewind(0);
- op_ranges->Rewind(0);
- }
-#ifdef ENABLE_SLOW_DCHECKS
- // Check that the result is equal to subtracting the union of all RHS
- // operands from the LHS operand.
- // TODO(pthier): It is unclear whether this variant is faster or slower
- // than subtracting multiple ranges in practice.
- ZoneList<CharacterRange>* lhs_range =
- node->operands()->at(0)->IsClassRanges()
- ? node->operands()->at(0)->AsClassRanges()->ranges(zone)
- : node->operands()->at(0)->AsClassSetExpression()->ranges_;
- ZoneList<CharacterRange>* rhs_union =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- for (int i = 1; i < node->operands()->length(); i++) {
- ZoneList<CharacterRange>* op_range =
- node->operands()->at(i)->IsClassRanges()
- ? node->operands()->at(i)->AsClassRanges()->ranges(zone)
- : node->operands()->at(i)->AsClassSetExpression()->ranges_;
- rhs_union->AddAll(*op_range, zone);
- }
- CharacterRange::Canonicalize(rhs_union);
- ZoneList<CharacterRange>* ranges_check =
- zone->template New<ZoneList<CharacterRange>>(2, zone);
- CharacterRange::Subtract(lhs_range, rhs_union, ranges_check, zone);
- DCHECK(CharacterRange::Equals(result_ranges, ranges_check));
-
- // Check that the result is equal to intersecting the LHS operand with the
- // complemented union of all RHS operands
- ZoneList<CharacterRange>* rhs_union_negated =
- zone->template New<ZoneList<CharacterRange>>(rhs_union->length(),
- zone);
- CharacterRange::Negate(rhs_union, rhs_union_negated, zone);
- ranges_check->Rewind(0);
- CharacterRange::Intersect(lhs_range, rhs_union_negated, ranges_check,
- zone);
- DCHECK(CharacterRange::Equals(result_ranges, ranges_check));
-#endif
- break;
- }
- }
-
- if (node->is_negated()) {
- CharacterRange::Negate(result_ranges, temp_ranges, zone);
- std::swap(*result_ranges, *temp_ranges);
- temp_ranges->Rewind(0);
- }
-
- DCHECK_EQ(temp_ranges->length(), 0);
-
-#ifdef ENABLE_SLOW_DCHECKS
- // Cache results for DCHECKs.
- node->ranges_ =
- zone->template New<ZoneList<CharacterRange>>(*result_ranges, zone);
-#endif
-}
-
// static
void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
if (character_ranges->length() <= 1) return;
@@ -1740,6 +1768,9 @@ void CharacterRange::Subtract(const ZoneList<CharacterRange>* src,
DCHECK(CharacterRange::IsCanonical(src));
DCHECK(CharacterRange::IsCanonical(to_remove));
DCHECK_EQ(0, result->length());
+
+ if (src->is_empty()) return;
+
int src_index = 0;
int to_remove_index = 0;
base::uc32 from = src->at(src_index).from();
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 6b76c8ab1a..ab2e9f1962 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -117,8 +117,8 @@ using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
//
// Actual regular expression code generation.
//
-// Code generation is actually more complicated than the above. In order
-// to improve the efficiency of the generated code some optimizations are
+// Code generation is actually more complicated than the above. In order to
+// improve the efficiency of the generated code some optimizations are
// performed
//
// * Choice nodes have 1-character lookahead.
diff --git a/deps/v8/src/regexp/regexp-error.h b/deps/v8/src/regexp/regexp-error.h
index 5c4ea28d7d..1cec429435 100644
--- a/deps/v8/src/regexp/regexp-error.h
+++ b/deps/v8/src/regexp/regexp-error.h
@@ -44,7 +44,9 @@ namespace internal {
T(UnterminatedCharacterClass, "Unterminated character class") \
T(OutOfOrderCharacterClass, "Range out of order in character class") \
T(InvalidClassSetOperation, "Invalid set operation in character class") \
- T(InvalidCharacterInClass, "Invalid character in character class")
+ T(InvalidCharacterInClass, "Invalid character in character class") \
+ T(NegatedCharacterClassWithStrings, \
+ "Negated character class may contain strings")
enum class RegExpError : uint32_t {
#define TEMPLATE(NAME, STRING) k##NAME,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 672b1785df..26c405cd69 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -283,12 +283,12 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() const {
// static
int NativeRegExpMacroAssembler::CheckStackGuardState(
Isolate* isolate, int start_index, RegExp::CallOrigin call_origin,
- Address* return_address, Code re_code, Address* subject,
+ Address* return_address, InstructionStream re_code, Address* subject,
const byte** input_start, const byte** input_end) {
DisallowGarbageCollection no_gc;
Address old_pc = PointerAuthentication::AuthenticatePC(return_address, 0);
- DCHECK_LE(re_code.raw_instruction_start(), old_pc);
- DCHECK_LE(old_pc, re_code.raw_instruction_end());
+ DCHECK_LE(re_code.instruction_start(), old_pc);
+ DCHECK_LE(old_pc, re_code.code(kAcquireLoad).InstructionEnd());
StackLimitCheck check(isolate);
bool js_has_overflowed = check.JsHasOverflowed();
@@ -315,7 +315,7 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
// Prepare for possible GC.
HandleScope handles(isolate);
- Handle<Code> code_handle(re_code, isolate);
+ Handle<InstructionStream> code_handle(re_code, isolate);
Handle<String> subject_handle(String::cast(Object(*subject)), isolate);
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject_handle);
int return_value = 0;
@@ -431,7 +431,7 @@ int NativeRegExpMacroAssembler::Execute(
RegExpStackScope stack_scope(isolate);
bool is_one_byte = String::IsOneByteRepresentationUnderneath(input);
- CodeT code = CodeT::cast(regexp.code(is_one_byte));
+ Code code = Code::cast(regexp.code(is_one_byte));
RegExp::CallOrigin call_origin = RegExp::CallOrigin::kFromRuntime;
using RegexpMatcherSig =
@@ -440,7 +440,7 @@ int NativeRegExpMacroAssembler::Execute(
const byte* input_end, int* output, int output_size, int call_origin,
Isolate* isolate, Address regexp);
- auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
+ auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(isolate, code);
int result = fn.Call(input.ptr(), start_offset, input_start, input_end,
output, output_size, call_origin, isolate, regexp.ptr());
DCHECK_GE(result, SMALLEST_REGEXP_RESULT);
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index e75489398f..2ba9e2d28d 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#include "src/base/strings.h"
+#include "src/execution/frame-constants.h"
#include "src/objects/fixed-array.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp.h"
@@ -330,8 +331,9 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// Called from generated code.
static int CheckStackGuardState(Isolate* isolate, int start_index,
RegExp::CallOrigin call_origin,
- Address* return_address, Code re_code,
- Address* subject, const byte** input_start,
+ Address* return_address,
+ InstructionStream re_code, Address* subject,
+ const byte** input_start,
const byte** input_end);
static Address word_character_map_address() {
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 78515b69ab..ef7ae24e27 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -18,6 +18,9 @@
#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
+#include "unicode/unistr.h"
+#include "unicode/usetiter.h"
+#include "unicode/utf16.h" // For U16_NEXT
#endif // V8_INTL_SUPPORT
namespace v8 {
@@ -42,6 +45,254 @@ enum class ClassSetOperandType {
kClassSetRange
};
+class RegExpTextBuilder {
+ public:
+ using SmallRegExpTreeVector =
+ base::SmallVector<RegExpTree*, 8, ZoneAllocator<RegExpTree*>>;
+
+ RegExpTextBuilder(Zone* zone, SmallRegExpTreeVector* terms_storage,
+ RegExpFlags flags)
+ : zone_(zone),
+ flags_(flags),
+ terms_(terms_storage),
+ text_(ZoneAllocator<RegExpTree*>{zone}) {}
+ void AddCharacter(base::uc16 character);
+ void AddUnicodeCharacter(base::uc32 character);
+ void AddEscapedUnicodeCharacter(base::uc32 character);
+ void AddAtom(RegExpTree* atom);
+ void AddTerm(RegExpTree* term);
+ void AddClassRanges(RegExpClassRanges* cc);
+ void FlushPendingSurrogate();
+ void FlushText();
+ RegExpTree* PopLastAtom();
+ RegExpTree* ToRegExp();
+
+ private:
+ static const base::uc16 kNoPendingSurrogate = 0;
+
+ void AddLeadSurrogate(base::uc16 lead_surrogate);
+ void AddTrailSurrogate(base::uc16 trail_surrogate);
+ void FlushCharacters();
+ bool NeedsDesugaringForUnicode(RegExpClassRanges* cc);
+ bool NeedsDesugaringForIgnoreCase(base::uc32 c);
+ void AddClassRangesForDesugaring(base::uc32 c);
+ bool ignore_case() const { return IsIgnoreCase(flags_); }
+ bool IsUnicodeMode() const {
+ // Either /v or /u enable UnicodeMode
+ // TODO(v8:11935): Change permalink once proposal is in stage 4.
+ // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-parsepattern
+ return IsUnicode(flags_) || IsUnicodeSets(flags_);
+ }
+ Zone* zone() const { return zone_; }
+
+ Zone* const zone_;
+ const RegExpFlags flags_;
+ ZoneList<base::uc16>* characters_ = nullptr;
+ base::uc16 pending_surrogate_ = kNoPendingSurrogate;
+ SmallRegExpTreeVector* terms_;
+ SmallRegExpTreeVector text_;
+};
+
+void RegExpTextBuilder::AddLeadSurrogate(base::uc16 lead_surrogate) {
+ DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
+ FlushPendingSurrogate();
+ // Hold onto the lead surrogate, waiting for a trail surrogate to follow.
+ pending_surrogate_ = lead_surrogate;
+}
+
+void RegExpTextBuilder::AddTrailSurrogate(base::uc16 trail_surrogate) {
+ DCHECK(unibrow::Utf16::IsTrailSurrogate(trail_surrogate));
+ if (pending_surrogate_ != kNoPendingSurrogate) {
+ base::uc16 lead_surrogate = pending_surrogate_;
+ pending_surrogate_ = kNoPendingSurrogate;
+ DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
+ base::uc32 combined =
+ unibrow::Utf16::CombineSurrogatePair(lead_surrogate, trail_surrogate);
+ if (NeedsDesugaringForIgnoreCase(combined)) {
+ AddClassRangesForDesugaring(combined);
+ } else {
+ ZoneList<base::uc16> surrogate_pair(2, zone());
+ surrogate_pair.Add(lead_surrogate, zone());
+ surrogate_pair.Add(trail_surrogate, zone());
+ RegExpAtom* atom =
+ zone()->New<RegExpAtom>(surrogate_pair.ToConstVector());
+ AddAtom(atom);
+ }
+ } else {
+ pending_surrogate_ = trail_surrogate;
+ FlushPendingSurrogate();
+ }
+}
+
+void RegExpTextBuilder::FlushPendingSurrogate() {
+ if (pending_surrogate_ != kNoPendingSurrogate) {
+ DCHECK(IsUnicodeMode());
+ base::uc32 c = pending_surrogate_;
+ pending_surrogate_ = kNoPendingSurrogate;
+ AddClassRangesForDesugaring(c);
+ }
+}
+
+void RegExpTextBuilder::FlushCharacters() {
+ FlushPendingSurrogate();
+ if (characters_ != nullptr) {
+ RegExpTree* atom = zone()->New<RegExpAtom>(characters_->ToConstVector());
+ characters_ = nullptr;
+ text_.emplace_back(atom);
+ }
+}
+
+void RegExpTextBuilder::FlushText() {
+ FlushCharacters();
+ size_t num_text = text_.size();
+ if (num_text == 0) {
+ return;
+ } else if (num_text == 1) {
+ terms_->emplace_back(text_.back());
+ } else {
+ RegExpText* text = zone()->New<RegExpText>(zone());
+ for (size_t i = 0; i < num_text; i++) {
+ text_[i]->AppendToText(text, zone());
+ }
+ terms_->emplace_back(text);
+ }
+ text_.clear();
+}
+
+void RegExpTextBuilder::AddCharacter(base::uc16 c) {
+ FlushPendingSurrogate();
+ if (NeedsDesugaringForIgnoreCase(c)) {
+ AddClassRangesForDesugaring(c);
+ } else {
+ if (characters_ == nullptr) {
+ characters_ = zone()->New<ZoneList<base::uc16>>(4, zone());
+ }
+ characters_->Add(c, zone());
+ }
+}
+
+void RegExpTextBuilder::AddUnicodeCharacter(base::uc32 c) {
+ if (c > static_cast<base::uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ DCHECK(IsUnicodeMode());
+ AddLeadSurrogate(unibrow::Utf16::LeadSurrogate(c));
+ AddTrailSurrogate(unibrow::Utf16::TrailSurrogate(c));
+ } else if (IsUnicodeMode() && unibrow::Utf16::IsLeadSurrogate(c)) {
+ AddLeadSurrogate(c);
+ } else if (IsUnicodeMode() && unibrow::Utf16::IsTrailSurrogate(c)) {
+ AddTrailSurrogate(c);
+ } else {
+ AddCharacter(static_cast<base::uc16>(c));
+ }
+}
+
+void RegExpTextBuilder::AddEscapedUnicodeCharacter(base::uc32 character) {
+ // A lead or trail surrogate parsed via escape sequence will not
+ // pair up with any preceding lead or following trail surrogate.
+ FlushPendingSurrogate();
+ AddUnicodeCharacter(character);
+ FlushPendingSurrogate();
+}
+
+void RegExpTextBuilder::AddClassRanges(RegExpClassRanges* cr) {
+ if (NeedsDesugaringForUnicode(cr)) {
+ // With /u or /v, character class needs to be desugared, so it
+ // must be a standalone term instead of being part of a RegExpText.
+ AddTerm(cr);
+ } else {
+ AddAtom(cr);
+ }
+}
+
+void RegExpTextBuilder::AddClassRangesForDesugaring(base::uc32 c) {
+ AddTerm(zone()->New<RegExpClassRanges>(
+ zone(), CharacterRange::List(zone(), CharacterRange::Singleton(c))));
+}
+
+void RegExpTextBuilder::AddAtom(RegExpTree* atom) {
+ DCHECK(atom->IsTextElement());
+ FlushCharacters();
+ text_.emplace_back(atom);
+}
+
+void RegExpTextBuilder::AddTerm(RegExpTree* term) {
+ DCHECK(term->IsTextElement());
+ FlushText();
+ terms_->emplace_back(term);
+}
+
+bool RegExpTextBuilder::NeedsDesugaringForUnicode(RegExpClassRanges* cc) {
+ if (!IsUnicodeMode()) return false;
+ // TODO(yangguo): we could be smarter than this. Case-insensitivity does not
+ // necessarily mean that we need to desugar. It's probably nicer to have a
+ // separate pass to figure out unicode desugarings.
+ if (ignore_case()) return true;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::Canonicalize(ranges);
+
+ if (cc->is_negated()) {
+ ZoneList<CharacterRange>* negated_ranges =
+ zone()->New<ZoneList<CharacterRange>>(ranges->length(), zone());
+ CharacterRange::Negate(ranges, negated_ranges, zone());
+ ranges = negated_ranges;
+ }
+
+ for (int i = ranges->length() - 1; i >= 0; i--) {
+ base::uc32 from = ranges->at(i).from();
+ base::uc32 to = ranges->at(i).to();
+ // Check for non-BMP characters.
+ if (to >= kNonBmpStart) return true;
+ // Check for lone surrogates.
+ if (from <= kTrailSurrogateEnd && to >= kLeadSurrogateStart) return true;
+ }
+ return false;
+}
+
+bool RegExpTextBuilder::NeedsDesugaringForIgnoreCase(base::uc32 c) {
+#ifdef V8_INTL_SUPPORT
+ if (IsUnicodeMode() && ignore_case()) {
+ icu::UnicodeSet set(c, c);
+ set.closeOver(USET_CASE_INSENSITIVE);
+ set.removeAllStrings();
+ return set.size() > 1;
+ }
+ // In the case where ICU is not included, we act as if the unicode flag is
+ // not set, and do not desugar.
+#endif // V8_INTL_SUPPORT
+ return false;
+}
+
+RegExpTree* RegExpTextBuilder::PopLastAtom() {
+ FlushPendingSurrogate();
+ RegExpTree* atom;
+ if (characters_ != nullptr) {
+ base::Vector<const base::uc16> char_vector = characters_->ToConstVector();
+ int num_chars = char_vector.length();
+ if (num_chars > 1) {
+ base::Vector<const base::uc16> prefix =
+ char_vector.SubVector(0, num_chars - 1);
+ text_.emplace_back(zone()->New<RegExpAtom>(prefix));
+ char_vector = char_vector.SubVector(num_chars - 1, num_chars);
+ }
+ characters_ = nullptr;
+ atom = zone()->New<RegExpAtom>(char_vector);
+ return atom;
+ } else if (text_.size() > 0) {
+ atom = text_.back();
+ text_.pop_back();
+ return atom;
+ }
+ return nullptr;
+}
+
+RegExpTree* RegExpTextBuilder::ToRegExp() {
+ FlushText();
+ size_t num_alternatives = terms_->size();
+ if (num_alternatives == 0) return zone()->New<RegExpEmpty>();
+ if (num_alternatives == 1) return terms_->back();
+ return zone()->New<RegExpAlternative>(zone()->New<ZoneList<RegExpTree*>>(
+ base::VectorOf(terms_->begin(), terms_->size()), zone()));
+}
+
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder {
public:
@@ -49,8 +300,8 @@ class RegExpBuilder {
: zone_(zone),
flags_(flags),
terms_(ZoneAllocator<RegExpTree*>{zone}),
- text_(ZoneAllocator<RegExpTree*>{zone}),
- alternatives_(ZoneAllocator<RegExpTree*>{zone}) {}
+ alternatives_(ZoneAllocator<RegExpTree*>{zone}),
+ text_builder_(RegExpTextBuilder{zone, &terms_, flags}) {}
void AddCharacter(base::uc16 character);
void AddUnicodeCharacter(base::uc32 character);
void AddEscapedUnicodeCharacter(base::uc32 character);
@@ -58,7 +309,6 @@ class RegExpBuilder {
// following quantifier
void AddEmpty();
void AddClassRanges(RegExpClassRanges* cc);
- void AddClassRangesForDesugaring(base::uc32 c);
void AddAtom(RegExpTree* tree);
void AddTerm(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
@@ -74,14 +324,7 @@ class RegExpBuilder {
bool dotall() const { return IsDotAll(flags_); }
private:
- static const base::uc16 kNoPendingSurrogate = 0;
- void AddLeadSurrogate(base::uc16 lead_surrogate);
- void AddTrailSurrogate(base::uc16 trail_surrogate);
- void FlushPendingSurrogate();
- void FlushCharacters();
void FlushTerms();
- bool NeedsDesugaringForUnicode(RegExpClassRanges* cc);
- bool NeedsDesugaringForIgnoreCase(base::uc32 c);
bool IsUnicodeMode() const {
// Either /v or /u enable UnicodeMode
// TODO(v8:11935): Change permalink once proposal is in stage 4.
@@ -89,30 +332,17 @@ class RegExpBuilder {
return IsUnicode(flags_) || IsUnicodeSets(flags_);
}
Zone* zone() const { return zone_; }
+ RegExpTextBuilder& text_builder() { return text_builder_; }
Zone* const zone_;
bool pending_empty_ = false;
const RegExpFlags flags_;
- ZoneList<base::uc16>* characters_ = nullptr;
- base::uc16 pending_surrogate_ = kNoPendingSurrogate;
using SmallRegExpTreeVector =
base::SmallVector<RegExpTree*, 8, ZoneAllocator<RegExpTree*>>;
SmallRegExpTreeVector terms_;
- SmallRegExpTreeVector text_;
SmallRegExpTreeVector alternatives_;
-#ifdef DEBUG
- enum {
- ADD_NONE,
- ADD_CHAR,
- ADD_TERM,
- ADD_ASSERT,
- ADD_ATOM
- } last_added_ = ADD_NONE;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
+ RegExpTextBuilder text_builder_;
};
enum SubexpressionType {
@@ -221,7 +451,8 @@ class RegExpParserImpl final {
bool ParsePropertyClassName(ZoneVector<char>* name_1,
ZoneVector<char>* name_2);
- bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
+ bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to_range,
+ CharacterClassStrings* add_to_strings, bool negate,
const ZoneVector<char>& name_1,
const ZoneVector<char>& name_2);
@@ -236,14 +467,17 @@ class RegExpParserImpl final {
bool TryParseCharacterClassEscape(base::uc32 next,
InClassEscapeState in_class_escape_state,
ZoneList<CharacterRange>* ranges,
- Zone* zone,
+ CharacterClassStrings* strings, Zone* zone,
bool add_unicode_case_equivalents);
- RegExpTree* ParseClassStringDisjunction();
+ RegExpTree* ParseClassStringDisjunction(ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings);
RegExpTree* ParseClassSetOperand(const RegExpBuilder* builder,
ClassSetOperandType* type_out);
RegExpTree* ParseClassSetOperand(const RegExpBuilder* builder,
ClassSetOperandType* type_out,
- ZoneList<CharacterRange>* ranges);
+ ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings);
+ base::uc32 ParseClassSetCharacter();
// Parses and returns a single escaped character.
base::uc32 ParseCharacterEscape(InClassEscapeState in_class_escape_state,
bool* is_escaped_unicode_character);
@@ -251,12 +485,14 @@ class RegExpParserImpl final {
RegExpTree* ParseClassUnion(const RegExpBuilder* builder, bool is_negated,
RegExpTree* first_operand,
ClassSetOperandType first_operand_type,
- ZoneList<CharacterRange>* ranges);
+ ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings);
RegExpTree* ParseClassIntersection(const RegExpBuilder* builder,
- bool is_negated,
- RegExpTree* first_operand);
+ bool is_negated, RegExpTree* first_operand,
+ ClassSetOperandType first_operand_type);
RegExpTree* ParseClassSubtraction(const RegExpBuilder* builder,
- bool is_negated, RegExpTree* first_operand);
+ bool is_negated, RegExpTree* first_operand,
+ ClassSetOperandType first_operand_type);
RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
base::uc32 ParseOctalLiteral();
@@ -281,15 +517,15 @@ class RegExpParserImpl final {
int captures_started() const { return captures_started_; }
int position() const { return next_pos_ - 1; }
bool failed() const { return failed_; }
+ RegExpFlags flags() const { return top_level_flags_; }
bool IsUnicodeMode() const {
// Either /v or /u enable UnicodeMode
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-parsepattern
- return IsUnicode(top_level_flags_) || IsUnicodeSets(top_level_flags_) ||
- force_unicode_;
+ return IsUnicode(flags()) || IsUnicodeSets(flags()) || force_unicode_;
}
- bool unicode_sets() const { return IsUnicodeSets(top_level_flags_); }
- bool ignore_case() const { return IsIgnoreCase(top_level_flags_); }
+ bool unicode_sets() const { return IsUnicodeSets(flags()); }
+ bool ignore_case() const { return IsIgnoreCase(flags()); }
static bool IsSyntaxCharacterOrSlash(base::uc32 c);
static bool IsClassSetSyntaxCharacter(base::uc32 c);
@@ -652,7 +888,7 @@ template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(nullptr, INITIAL, RegExpLookaround::LOOKAHEAD,
- 0, nullptr, top_level_flags_, zone());
+ 0, nullptr, flags(), zone());
RegExpParserState* state = &initial_state;
// Cache the builder in a local variable for quick access.
RegExpBuilder* builder = initial_state.builder();
@@ -860,16 +1096,14 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
case 's':
case 'S':
case 'w':
- case 'W':
- case 'p':
- case 'P': {
+ case 'W': {
base::uc32 next = Next();
ZoneList<CharacterRange>* ranges =
zone()->template New<ZoneList<CharacterRange>>(2, zone());
bool add_unicode_case_equivalents =
IsUnicodeMode() && ignore_case();
bool parsed_character_class_escape = TryParseCharacterClassEscape(
- next, InClassEscapeState::kNotInClass, ranges, zone(),
+ next, InClassEscapeState::kNotInClass, ranges, nullptr, zone(),
add_unicode_case_equivalents CHECK_FAILED);
if (parsed_character_class_escape) {
@@ -883,6 +1117,38 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
}
break;
}
+ case 'p':
+ case 'P': {
+ base::uc32 next = Next();
+ ZoneList<CharacterRange>* ranges =
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
+ CharacterClassStrings* strings = nullptr;
+ if (unicode_sets()) {
+ strings = zone()->template New<CharacterClassStrings>(zone());
+ }
+ bool add_unicode_case_equivalents = ignore_case();
+ bool parsed_character_class_escape = TryParseCharacterClassEscape(
+ next, InClassEscapeState::kNotInClass, ranges, strings, zone(),
+ add_unicode_case_equivalents CHECK_FAILED);
+
+ if (parsed_character_class_escape) {
+ if (unicode_sets()) {
+ RegExpClassSetOperand* op =
+ zone()->template New<RegExpClassSetOperand>(ranges,
+ strings);
+ builder->AddTerm(op);
+ } else {
+ RegExpClassRanges* cc =
+ zone()->template New<RegExpClassRanges>(zone(), ranges);
+ builder->AddClassRanges(cc);
+ }
+ } else {
+ CHECK(!IsUnicodeMode());
+ Advance(2);
+ builder->AddCharacter(next); // IdentityEscape.
+ }
+ break;
+ }
// AtomEscape ::
// k GroupName
case 'k': {
@@ -1049,26 +1315,6 @@ RegExpParserState* RegExpParserImpl<CharT>::ParseOpenParenthesis(
state->builder()->flags(), zone());
}
-#ifdef DEBUG
-namespace {
-
-bool IsSpecialClassEscape(base::uc32 c) {
- switch (c) {
- case 'd':
- case 'D':
- case 's':
- case 'S':
- case 'w':
- case 'W':
- return true;
- default:
- return false;
- }
-}
-
-} // namespace
-#endif
-
// In order to know whether an escape is a backreference or not we have to scan
// the entire regexp and find the number of capturing parentheses. However we
// don't want to scan the regexp twice unless it is necessary. This mini-parser
@@ -1593,10 +1839,44 @@ bool IsExactPropertyValueAlias(const char* property_value_name,
return false;
}
+void ExtractStringsFromUnicodeSet(const icu::UnicodeSet& set,
+ CharacterClassStrings* strings,
+ RegExpFlags flags, Zone* zone) {
+ DCHECK(set.hasStrings());
+ DCHECK(IsUnicodeSets(flags));
+ DCHECK_NOT_NULL(strings);
+
+ RegExpTextBuilder::SmallRegExpTreeVector string_storage(
+ ZoneAllocator<RegExpTree*>{zone});
+ RegExpTextBuilder string_builder(zone, &string_storage, flags);
+ const bool needs_case_folding = IsIgnoreCase(flags);
+ icu::UnicodeSetIterator iter(set);
+ iter.skipToStrings();
+ while (iter.next()) {
+ const icu::UnicodeString& s = iter.getString();
+ const char16_t* p = s.getBuffer();
+ int32_t length = s.length();
+ ZoneList<base::uc32>* string =
+ zone->template New<ZoneList<base::uc32>>(length, zone);
+ for (int32_t i = 0; i < length;) {
+ UChar32 c;
+ U16_NEXT(p, i, length, c);
+ string_builder.AddUnicodeCharacter(c);
+ if (needs_case_folding) {
+ c = u_foldCase(c, U_FOLD_CASE_DEFAULT);
+ }
+ string->Add(c, zone);
+ }
+ strings->emplace(string->ToVector(), string_builder.ToRegExp());
+ string_storage.clear();
+ }
+}
+
bool LookupPropertyValueName(UProperty property,
const char* property_value_name, bool negate,
- bool needs_case_folding,
- ZoneList<CharacterRange>* result, Zone* zone) {
+ ZoneList<CharacterRange>* result_ranges,
+ CharacterClassStrings* result_strings,
+ RegExpFlags flags, Zone* zone) {
UProperty property_for_lookup = property;
if (property_for_lookup == UCHAR_SCRIPT_EXTENSIONS) {
// For the property Script_Extensions, we have to do the property value
@@ -1620,11 +1900,15 @@ bool LookupPropertyValueName(UProperty property,
bool success = ec == U_ZERO_ERROR && !set.isEmpty();
if (success) {
+ if (set.hasStrings()) {
+ ExtractStringsFromUnicodeSet(set, result_strings, flags, zone);
+ }
+ const bool needs_case_folding = IsUnicodeSets(flags) && IsIgnoreCase(flags);
if (needs_case_folding) CharacterRange::UnicodeSimpleCloseOver(set);
set.removeAllStrings();
if (negate) set.complement();
for (int i = 0; i < set.getRangeCount(); i++) {
- result->Add(
+ result_ranges->Add(
CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
zone);
}
@@ -1639,7 +1923,7 @@ inline bool NameEquals(const char* name, const char (&literal)[N]) {
bool LookupSpecialPropertyValueName(const char* name,
ZoneList<CharacterRange>* result,
- bool negate, bool needs_case_folding,
+ bool negate, RegExpFlags flags,
Zone* zone) {
if (NameEquals(name, "Any")) {
if (negate) {
@@ -1654,7 +1938,7 @@ bool LookupSpecialPropertyValueName(const char* name,
zone);
} else if (NameEquals(name, "Assigned")) {
return LookupPropertyValueName(UCHAR_GENERAL_CATEGORY, "Unassigned",
- !negate, needs_case_folding, result, zone);
+ !negate, result, nullptr, flags, zone);
} else {
return false;
}
@@ -1663,7 +1947,7 @@ bool LookupSpecialPropertyValueName(const char* name,
// Explicitly allowlist supported binary properties. The spec forbids supporting
// properties outside of this set to ensure interoperability.
-bool IsSupportedBinaryProperty(UProperty property) {
+bool IsSupportedBinaryProperty(UProperty property, bool unicode_sets) {
switch (property) {
case UCHAR_ALPHABETIC:
// 'Any' is not supported by ICU. See LookupSpecialPropertyValueName.
@@ -1719,6 +2003,30 @@ bool IsSupportedBinaryProperty(UProperty property) {
case UCHAR_XID_CONTINUE:
case UCHAR_XID_START:
return true;
+ case UCHAR_BASIC_EMOJI:
+ case UCHAR_EMOJI_KEYCAP_SEQUENCE:
+ case UCHAR_RGI_EMOJI_MODIFIER_SEQUENCE:
+ case UCHAR_RGI_EMOJI_FLAG_SEQUENCE:
+ case UCHAR_RGI_EMOJI_TAG_SEQUENCE:
+ case UCHAR_RGI_EMOJI_ZWJ_SEQUENCE:
+ case UCHAR_RGI_EMOJI:
+ return unicode_sets;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool IsBinaryPropertyOfStrings(UProperty property) {
+ switch (property) {
+ case UCHAR_BASIC_EMOJI:
+ case UCHAR_EMOJI_KEYCAP_SEQUENCE:
+ case UCHAR_RGI_EMOJI_MODIFIER_SEQUENCE:
+ case UCHAR_RGI_EMOJI_FLAG_SEQUENCE:
+ case UCHAR_RGI_EMOJI_TAG_SEQUENCE:
+ case UCHAR_RGI_EMOJI_ZWJ_SEQUENCE:
+ case UCHAR_RGI_EMOJI:
+ return true;
default:
break;
}
@@ -1781,31 +2089,34 @@ bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
template <class CharT>
bool RegExpParserImpl<CharT>::AddPropertyClassRange(
- ZoneList<CharacterRange>* add_to, bool negate,
+ ZoneList<CharacterRange>* add_to_ranges,
+ CharacterClassStrings* add_to_strings, bool negate,
const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
- // With /vi, we need to apply case folding to property values.
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
- // See
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-maybesimplecasefolding
- const bool needs_case_folding = unicode_sets() && ignore_case();
if (name_2.empty()) {
// First attempt to interpret as general category property value name.
const char* name = name_1.data();
if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, negate,
- needs_case_folding, add_to, zone())) {
+ add_to_ranges, add_to_strings, flags(),
+ zone())) {
return true;
}
// Interpret "Any", "ASCII", and "Assigned".
- if (LookupSpecialPropertyValueName(name, add_to, negate, needs_case_folding,
+ if (LookupSpecialPropertyValueName(name, add_to_ranges, negate, flags(),
zone())) {
return true;
}
// Then attempt to interpret as binary property name with value name 'Y'.
UProperty property = u_getPropertyEnum(name);
- if (!IsSupportedBinaryProperty(property)) return false;
+ if (!IsSupportedBinaryProperty(property, unicode_sets())) return false;
if (!IsExactPropertyAlias(name, property)) return false;
+ // Negation of properties with strings is not allowed.
+ // TODO(v8:11935): Change permalink once proposal is in stage 4.
+ // See
+ // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-static-semantics-maycontainstrings
+ if (negate && IsBinaryPropertyOfStrings(property)) return false;
return LookupPropertyValueName(property, negate ? "N" : "Y", false,
- needs_case_folding, add_to, zone());
+ add_to_ranges, add_to_strings, flags(),
+ zone());
} else {
// Both property name and value name are specified. Attempt to interpret
// the property name as enumerated property.
@@ -1820,8 +2131,8 @@ bool RegExpParserImpl<CharT>::AddPropertyClassRange(
property != UCHAR_SCRIPT_EXTENSIONS) {
return false;
}
- return LookupPropertyValueName(property, value_name, negate,
- needs_case_folding, add_to, zone());
+ return LookupPropertyValueName(property, value_name, negate, add_to_ranges,
+ add_to_strings, flags(), zone());
}
}
@@ -1835,7 +2146,8 @@ bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
template <class CharT>
bool RegExpParserImpl<CharT>::AddPropertyClassRange(
- ZoneList<CharacterRange>* add_to, bool negate,
+ ZoneList<CharacterRange>* add_to_ranges,
+ CharacterClassStrings* add_to_strings, bool negate,
const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
return false;
}
@@ -1868,7 +2180,7 @@ base::uc32 RegExpParserImpl<CharT>::ParseCharacterEscape(
InClassEscapeState in_class_escape_state,
bool* is_escaped_unicode_character) {
DCHECK_EQ('\\', current());
- DCHECK(has_next() && !IsSpecialClassEscape(Next()));
+ DCHECK(has_next());
Advance();
@@ -2111,8 +2423,9 @@ void RegExpParserImpl<CharT>::ParseClassEscape(
static constexpr InClassEscapeState kInClassEscape =
InClassEscapeState::kInClass;
- *is_class_escape = TryParseCharacterClassEscape(
- next, kInClassEscape, ranges, zone, add_unicode_case_equivalents);
+ *is_class_escape =
+ TryParseCharacterClassEscape(next, kInClassEscape, ranges, nullptr, zone,
+ add_unicode_case_equivalents);
if (*is_class_escape) return;
bool dummy = false; // Unused.
@@ -2123,8 +2436,8 @@ void RegExpParserImpl<CharT>::ParseClassEscape(
template <class CharT>
bool RegExpParserImpl<CharT>::TryParseCharacterClassEscape(
base::uc32 next, InClassEscapeState in_class_escape_state,
- ZoneList<CharacterRange>* ranges, Zone* zone,
- bool add_unicode_case_equivalents) {
+ ZoneList<CharacterRange>* ranges, CharacterClassStrings* strings,
+ Zone* zone, bool add_unicode_case_equivalents) {
DCHECK_EQ(current(), '\\');
DCHECK_EQ(Next(), next);
@@ -2148,7 +2461,7 @@ bool RegExpParserImpl<CharT>::TryParseCharacterClassEscape(
ZoneVector<char> name_1(zone);
ZoneVector<char> name_2(zone);
if (!ParsePropertyClassName(&name_1, &name_2) ||
- !AddPropertyClassRange(ranges, negate, name_1, name_2)) {
+ !AddPropertyClassRange(ranges, strings, negate, name_1, name_2)) {
ReportError(in_class_escape_state == InClassEscapeState::kInClass
? RegExpError::kInvalidClassPropertyName
: RegExpError::kInvalidPropertyName);
@@ -2160,10 +2473,27 @@ bool RegExpParserImpl<CharT>::TryParseCharacterClassEscape(
}
}
+namespace {
+
+// Add |string| to |ranges| if length of |string| == 1, otherwise add |string|
+// to |strings|.
+void AddClassString(ZoneList<base::uc32>* normalized_string,
+ RegExpTree* regexp_string, ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings, Zone* zone) {
+ if (normalized_string->length() == 1) {
+ ranges->Add(CharacterRange::Singleton(normalized_string->at(0)), zone);
+ } else {
+ strings->emplace(normalized_string->ToVector(), regexp_string);
+ }
+}
+
+} // namespace
+
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassStringDisjunction
template <class CharT>
-RegExpTree* RegExpParserImpl<CharT>::ParseClassStringDisjunction() {
+RegExpTree* RegExpParserImpl<CharT>::ParseClassStringDisjunction(
+ ZoneList<CharacterRange>* ranges, CharacterClassStrings* strings) {
DCHECK(unicode_sets());
DCHECK_EQ(current(), '\\');
DCHECK_EQ(Next(), 'q');
@@ -2174,120 +2504,186 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassStringDisjunction() {
}
Advance();
- // TODO(pthier, v8:11935): Implement.
- return ReportError(RegExpError::kInvalidCharacterClass);
+ ZoneList<base::uc32>* string =
+ zone()->template New<ZoneList<base::uc32>>(4, zone());
+ RegExpTextBuilder::SmallRegExpTreeVector string_storage(
+ ZoneAllocator<RegExpTree*>{zone()});
+ RegExpTextBuilder string_builder(zone(), &string_storage, flags());
+
+ while (has_more() && current() != '}') {
+ if (current() == '|') {
+ AddClassString(string, string_builder.ToRegExp(), ranges, strings,
+ zone());
+ string = zone()->template New<ZoneList<base::uc32>>(4, zone());
+ string_storage.clear();
+ Advance();
+ } else {
+ base::uc32 c = ParseClassSetCharacter(CHECK_FAILED);
+ if (ignore_case()) {
+#ifdef V8_INTL_SUPPORT
+ c = u_foldCase(c, U_FOLD_CASE_DEFAULT);
+#else
+ c = AsciiAlphaToLower(c);
+#endif
+ }
+ string->Add(c, zone());
+ string_builder.AddUnicodeCharacter(c);
+ }
+ }
+
+ AddClassString(string, string_builder.ToRegExp(), ranges, strings, zone());
+
+ // We don't need to handle missing closing '}' here.
+ // If the character class is correctly closed, ParseClassSetCharacter will
+ // report an error.
+ Advance();
+ return nullptr;
}
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSetOperand
// Tree returned based on type_out:
-// * kClassStringDisjunction: RegExpAlternative | RegExpAtom
// * kNestedClass: RegExpClassSetExpression
-// * For all other types: RegExpClassRanges
+// * For all other types: RegExpClassSetOperand
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassSetOperand(
const RegExpBuilder* builder, ClassSetOperandType* type_out) {
ZoneList<CharacterRange>* ranges =
zone()->template New<ZoneList<CharacterRange>>(1, zone());
+ CharacterClassStrings* strings =
+ zone()->template New<CharacterClassStrings>(zone());
RegExpTree* tree =
- ParseClassSetOperand(builder, type_out, ranges CHECK_FAILED);
+ ParseClassSetOperand(builder, type_out, ranges, strings CHECK_FAILED);
+ DCHECK_IMPLIES(*type_out != ClassSetOperandType::kNestedClass,
+ tree == nullptr);
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kClassSetCharacter,
ranges->length() == 1);
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kClassSetCharacter,
- tree == nullptr);
- DCHECK_IMPLIES(*type_out == ClassSetOperandType::kCharacterClassEscape,
- !ranges->is_empty());
- DCHECK_IMPLIES(*type_out == ClassSetOperandType::kCharacterClassEscape,
- tree == nullptr);
- DCHECK_IMPLIES(*type_out == ClassSetOperandType::kClassStringDisjunction,
- ranges->is_empty());
- DCHECK_IMPLIES(*type_out == ClassSetOperandType::kClassStringDisjunction,
- tree->IsAtom() || tree->IsAlternative());
+ strings->empty());
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kNestedClass,
ranges->is_empty());
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kNestedClass,
+ strings->empty());
+ DCHECK_IMPLIES(*type_out == ClassSetOperandType::kNestedClass,
tree->IsClassSetExpression());
// ClassSetRange is only used within ClassSetUnion().
DCHECK_NE(*type_out, ClassSetOperandType::kClassSetRange);
+ // There are no restrictions for kCharacterClassEscape.
+ // CharacterClassEscape includes \p{}, which can contain ranges, strings or
+ // both and \P{}, which could contain nothing (i.e. \P{Any}).
if (tree == nullptr) {
- tree = zone()->template New<RegExpClassRanges>(zone(), ranges);
+ tree = zone()->template New<RegExpClassSetOperand>(ranges, strings);
}
return tree;
}
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSetOperand
-// Based on |type_out| either a tree is returned or ranges modifed (never both).
-// Tree returned based on type_out:
-// * kClassStringDisjunction: RegExpAlternative | RegExpAtom
-// * kNestedClass: RegExpClassSetExpression
-// For all other types, ranges is modified and nullptr is returned.
+// Based on |type_out| either a tree is returned or ranges/strings modified.
+// If a tree is returned, ranges/strings are not modified.
+// If |type_out| is kNestedClass, a tree of type RegExpClassSetExpression is
+// returned. For all other types, ranges is modified and nullptr is returned.
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassSetOperand(
const RegExpBuilder* builder, ClassSetOperandType* type_out,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges, CharacterClassStrings* strings) {
DCHECK(unicode_sets());
- const base::uc32 c = current();
+ base::uc32 c = current();
if (c == '\\') {
- base::uc32 next = Next();
- switch (next) {
- case 'b':
- *type_out = ClassSetOperandType::kClassSetCharacter;
- ranges->Add(CharacterRange::Singleton('\b'), zone());
- Advance(2);
- return nullptr;
- case 'q':
- *type_out = ClassSetOperandType::kClassStringDisjunction;
- return ParseClassStringDisjunction();
- case kEndMarker:
- return ReportError(RegExpError::kEscapeAtEndOfPattern);
+ const base::uc32 next = Next();
+ if (next == 'q') {
+ *type_out = ClassSetOperandType::kClassStringDisjunction;
+ ParseClassStringDisjunction(ranges, strings CHECK_FAILED);
+ return nullptr;
}
static constexpr InClassEscapeState kInClassEscape =
InClassEscapeState::kInClass;
const bool add_unicode_case_equivalents = ignore_case();
- if (TryParseCharacterClassEscape(next, kInClassEscape, ranges, zone(),
- add_unicode_case_equivalents)) {
+ if (TryParseCharacterClassEscape(next, kInClassEscape, ranges, strings,
+ zone(), add_unicode_case_equivalents)) {
*type_out = ClassSetOperandType::kCharacterClassEscape;
return nullptr;
}
-
- bool dummy = false; // Unused.
- base::uc32 escaped_char = ParseCharacterEscape(kInClassEscape, &dummy);
- *type_out = ClassSetOperandType::kClassSetCharacter;
- ranges->Add(CharacterRange::Singleton(escaped_char), zone());
- return nullptr;
}
+
if (c == '[') {
*type_out = ClassSetOperandType::kNestedClass;
return ParseCharacterClass(builder);
}
+
+ *type_out = ClassSetOperandType::kClassSetCharacter;
+ c = ParseClassSetCharacter(CHECK_FAILED);
+ ranges->Add(CharacterRange::Singleton(c), zone());
+ return nullptr;
+}
+
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseClassSetCharacter() {
+ DCHECK(unicode_sets());
+ const base::uc32 c = current();
+ if (c == '\\') {
+ const base::uc32 next = Next();
+ switch (next) {
+ case 'b':
+ Advance(2);
+ return '\b';
+ case kEndMarker:
+ ReportError(RegExpError::kEscapeAtEndOfPattern);
+ return 0;
+ }
+ static constexpr InClassEscapeState kInClassEscape =
+ InClassEscapeState::kInClass;
+
+ bool dummy = false; // Unused.
+ return ParseCharacterEscape(kInClassEscape, &dummy);
+ }
if (IsClassSetSyntaxCharacter(c)) {
- return ReportError(RegExpError::kInvalidCharacterInClass);
+ ReportError(RegExpError::kInvalidCharacterInClass);
+ return 0;
}
if (IsClassSetReservedDoublePunctuator(c)) {
- return ReportError(RegExpError::kInvalidClassSetOperation);
+ ReportError(RegExpError::kInvalidClassSetOperation);
+ return 0;
}
- *type_out = ClassSetOperandType::kClassSetCharacter;
- ranges->Add(CharacterRange::Singleton(c), zone());
Advance();
- return nullptr;
+ return c;
+}
+
+namespace {
+
+bool MayContainStrings(ClassSetOperandType type, RegExpTree* operand) {
+ switch (type) {
+ case ClassSetOperandType::kClassSetCharacter:
+ case ClassSetOperandType::kClassSetRange:
+ return false;
+ case ClassSetOperandType::kCharacterClassEscape:
+ case ClassSetOperandType::kClassStringDisjunction:
+ return operand->AsClassSetOperand()->has_strings();
+ case ClassSetOperandType::kNestedClass:
+ if (operand->IsClassRanges()) return false;
+ return operand->AsClassSetExpression()->may_contain_strings();
+ }
}
+} // namespace
+
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassUnion
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand,
- ClassSetOperandType first_operand_type, ZoneList<CharacterRange>* ranges) {
+ ClassSetOperandType first_operand_type, ZoneList<CharacterRange>* ranges,
+ CharacterClassStrings* strings) {
DCHECK(unicode_sets());
ZoneList<RegExpTree*>* operands =
zone()->template New<ZoneList<RegExpTree*>>(2, zone());
+ bool may_contain_strings = false;
// Add the lhs to operands if necessary.
- // Either the lhs values were added to |ranges| (in which case |first_operand|
- // is null), or the lhs was evaluated to a tree and passed as |first_operand|
- // (in which case |ranges| are empty).
- DCHECK_EQ(first_operand != nullptr, ranges->is_empty());
+ // Either the lhs values were added to |ranges|/|strings| (in which case
+ // |first_operand| is nullptr), or the lhs was evaluated to a tree and passed
+ // as |first_operand| (in which case |ranges| and |strings| are empty).
if (first_operand != nullptr) {
+ may_contain_strings = MayContainStrings(first_operand_type, first_operand);
operands->Add(first_operand, zone());
}
ClassSetOperandType last_type = first_operand_type;
@@ -2314,7 +2710,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
if (last_type != ClassSetOperandType::kClassSetCharacter) {
return ReportError(RegExpError::kInvalidCharacterClass);
}
- ParseClassSetOperand(builder, &last_type, ranges CHECK_FAILED);
+ ParseClassSetOperand(builder, &last_type, ranges, strings CHECK_FAILED);
if (last_type != ClassSetOperandType::kClassSetCharacter) {
return ReportError(RegExpError::kInvalidCharacterClass);
}
@@ -2333,18 +2729,23 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
last_type = ClassSetOperandType::kClassSetRange;
} else {
DCHECK_NE(current(), '-');
- RegExpTree* operand =
- ParseClassSetOperand(builder, &last_type, ranges CHECK_FAILED);
+ RegExpTree* operand = ParseClassSetOperand(builder, &last_type, ranges,
+ strings CHECK_FAILED);
if (operand != nullptr) {
+ may_contain_strings |= MayContainStrings(last_type, operand);
// Add the range we started building as operand and reset the current
// range.
- if (!ranges->is_empty()) {
+ if (!ranges->is_empty() || !strings->empty()) {
if (needs_case_folding) {
+ CharacterRange::Canonicalize(ranges);
CharacterRange::AddUnicodeCaseEquivalents(ranges, zone());
}
- operands->Add(zone()->template New<RegExpClassRanges>(zone(), ranges),
- zone());
+ may_contain_strings |= !strings->empty();
+ operands->Add(
+ zone()->template New<RegExpClassSetOperand>(ranges, strings),
+ zone());
ranges = zone()->template New<ZoneList<CharacterRange>>(2, zone());
+ strings = zone()->template New<CharacterClassStrings>(zone());
}
operands->Add(operand, zone());
}
@@ -2356,26 +2757,38 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
}
// Add the range we started building as operand.
- if (!ranges->is_empty()) {
+ if (!ranges->is_empty() || !strings->empty()) {
if (needs_case_folding) {
+ CharacterRange::Canonicalize(ranges);
CharacterRange::AddUnicodeCaseEquivalents(ranges, zone());
}
- operands->Add(zone()->template New<RegExpClassRanges>(zone(), ranges),
+ may_contain_strings |= !strings->empty();
+ operands->Add(zone()->template New<RegExpClassSetOperand>(ranges, strings),
zone());
}
+
DCHECK_EQ(current(), ']');
Advance();
+
+ if (is_negated && may_contain_strings) {
+ return ReportError(RegExpError::kNegatedCharacterClassWithStrings);
+ }
+
return zone()->template New<RegExpClassSetExpression>(
- RegExpClassSetExpression::OperationType::kUnion, is_negated, operands);
+ RegExpClassSetExpression::OperationType::kUnion, is_negated,
+ may_contain_strings, operands);
}
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassIntersection
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassIntersection(
- const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand) {
+ const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand,
+ ClassSetOperandType first_operand_type) {
DCHECK(unicode_sets());
DCHECK(current() == '&' && Next() == '&');
+ bool may_contain_strings =
+ MayContainStrings(first_operand_type, first_operand);
ZoneList<RegExpTree*>* operands =
zone()->template New<ZoneList<RegExpTree*>>(2, zone());
operands->Add(first_operand, zone());
@@ -2389,27 +2802,38 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassIntersection(
return ReportError(RegExpError::kInvalidCharacterInClass);
}
- ClassSetOperandType dummy; // unused
- RegExpTree* operand = ParseClassSetOperand(builder, &dummy CHECK_FAILED);
+ ClassSetOperandType operand_type;
+ RegExpTree* operand =
+ ParseClassSetOperand(builder, &operand_type CHECK_FAILED);
+ may_contain_strings &= MayContainStrings(operand_type, operand);
operands->Add(operand, zone());
}
if (!has_more()) {
return ReportError(RegExpError::kUnterminatedCharacterClass);
}
+ if (is_negated && may_contain_strings) {
+ return ReportError(RegExpError::kNegatedCharacterClassWithStrings);
+ }
DCHECK_EQ(current(), ']');
Advance();
return zone()->template New<RegExpClassSetExpression>(
RegExpClassSetExpression::OperationType::kIntersection, is_negated,
- operands);
+ may_contain_strings, operands);
}
// TODO(v8:11935): Change permalink once proposal is in stage 4.
// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSubtraction
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassSubtraction(
- const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand) {
+ const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand,
+ ClassSetOperandType first_operand_type) {
DCHECK(unicode_sets());
DCHECK(current() == '-' && Next() == '-');
+ const bool may_contain_strings =
+ MayContainStrings(first_operand_type, first_operand);
+ if (is_negated && may_contain_strings) {
+ return ReportError(RegExpError::kNegatedCharacterClassWithStrings);
+ }
ZoneList<RegExpTree*>* operands =
zone()->template New<ZoneList<RegExpTree*>>(2, zone());
operands->Add(first_operand, zone());
@@ -2429,7 +2853,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassSubtraction(
Advance();
return zone()->template New<RegExpClassSetExpression>(
RegExpClassSetExpression::OperationType::kSubtraction, is_negated,
- operands);
+ may_contain_strings, operands);
}
// https://tc39.es/ecma262/#prod-CharacterClass
@@ -2447,10 +2871,14 @@ RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
zone()->template New<ZoneList<CharacterRange>>(2, zone());
if (current() == ']') {
Advance();
- RegExpClassRanges::ClassRangesFlags class_ranges_flags;
- if (is_negated) class_ranges_flags = RegExpClassRanges::NEGATED;
- return zone()->template New<RegExpClassRanges>(zone(), ranges,
- class_ranges_flags);
+ if (unicode_sets()) {
+ return RegExpClassSetExpression::Empty(zone(), is_negated);
+ } else {
+ RegExpClassRanges::ClassRangesFlags class_ranges_flags;
+ if (is_negated) class_ranges_flags = RegExpClassRanges::NEGATED;
+ return zone()->template New<RegExpClassRanges>(zone(), ranges,
+ class_ranges_flags);
+ }
}
if (!unicode_sets()) {
@@ -2467,27 +2895,34 @@ RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
character_class_flags);
} else {
ClassSetOperandType operand_type;
- RegExpTree* operand =
- ParseClassSetOperand(builder, &operand_type, ranges CHECK_FAILED);
+ CharacterClassStrings* strings =
+ zone()->template New<CharacterClassStrings>(zone());
+ RegExpTree* operand = ParseClassSetOperand(builder, &operand_type, ranges,
+ strings CHECK_FAILED);
switch (current()) {
case '-':
if (Next() == '-') {
if (operand == nullptr) {
- operand = zone()->template New<RegExpClassRanges>(zone(), ranges);
+ operand =
+ zone()->template New<RegExpClassSetOperand>(ranges, strings);
}
- return ParseClassSubtraction(builder, is_negated, operand);
+ return ParseClassSubtraction(builder, is_negated, operand,
+ operand_type);
}
// ClassSetRange is handled in ParseClassUnion().
break;
case '&':
if (Next() == '&') {
if (operand == nullptr) {
- operand = zone()->template New<RegExpClassRanges>(zone(), ranges);
+ operand =
+ zone()->template New<RegExpClassSetOperand>(ranges, strings);
}
- return ParseClassIntersection(builder, is_negated, operand);
+ return ParseClassIntersection(builder, is_negated, operand,
+ operand_type);
}
}
- return ParseClassUnion(builder, is_negated, operand, operand_type, ranges);
+ return ParseClassUnion(builder, is_negated, operand, operand_type, ranges,
+ strings);
}
}
@@ -2523,125 +2958,31 @@ bool RegExpParserImpl<CharT>::Parse(RegExpCompileData* result) {
return true;
}
-void RegExpBuilder::AddLeadSurrogate(base::uc16 lead_surrogate) {
- DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
- FlushPendingSurrogate();
- // Hold onto the lead surrogate, waiting for a trail surrogate to follow.
- pending_surrogate_ = lead_surrogate;
-}
-
-void RegExpBuilder::AddTrailSurrogate(base::uc16 trail_surrogate) {
- DCHECK(unibrow::Utf16::IsTrailSurrogate(trail_surrogate));
- if (pending_surrogate_ != kNoPendingSurrogate) {
- base::uc16 lead_surrogate = pending_surrogate_;
- pending_surrogate_ = kNoPendingSurrogate;
- DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
- base::uc32 combined =
- unibrow::Utf16::CombineSurrogatePair(lead_surrogate, trail_surrogate);
- if (NeedsDesugaringForIgnoreCase(combined)) {
- AddClassRangesForDesugaring(combined);
- } else {
- ZoneList<base::uc16> surrogate_pair(2, zone());
- surrogate_pair.Add(lead_surrogate, zone());
- surrogate_pair.Add(trail_surrogate, zone());
- RegExpAtom* atom =
- zone()->New<RegExpAtom>(surrogate_pair.ToConstVector());
- AddAtom(atom);
- }
- } else {
- pending_surrogate_ = trail_surrogate;
- FlushPendingSurrogate();
- }
-}
-
-void RegExpBuilder::FlushPendingSurrogate() {
- if (pending_surrogate_ != kNoPendingSurrogate) {
- DCHECK(IsUnicodeMode());
- base::uc32 c = pending_surrogate_;
- pending_surrogate_ = kNoPendingSurrogate;
- AddClassRangesForDesugaring(c);
- }
-}
-
-void RegExpBuilder::FlushCharacters() {
- FlushPendingSurrogate();
- pending_empty_ = false;
- if (characters_ != nullptr) {
- RegExpTree* atom = zone()->New<RegExpAtom>(characters_->ToConstVector());
- characters_ = nullptr;
- text_.emplace_back(atom);
- LAST(ADD_ATOM);
- }
-}
-
-void RegExpBuilder::FlushText() {
- FlushCharacters();
- size_t num_text = text_.size();
- if (num_text == 0) {
- return;
- } else if (num_text == 1) {
- terms_.emplace_back(text_.back());
- } else {
- RegExpText* text = zone()->New<RegExpText>(zone());
- for (size_t i = 0; i < num_text; i++) {
- text_[i]->AppendToText(text, zone());
- }
- terms_.emplace_back(text);
- }
- text_.clear();
-}
+void RegExpBuilder::FlushText() { text_builder().FlushText(); }
void RegExpBuilder::AddCharacter(base::uc16 c) {
- FlushPendingSurrogate();
pending_empty_ = false;
- if (NeedsDesugaringForIgnoreCase(c)) {
- AddClassRangesForDesugaring(c);
- } else {
- if (characters_ == nullptr) {
- characters_ = zone()->New<ZoneList<base::uc16>>(4, zone());
- }
- characters_->Add(c, zone());
- LAST(ADD_CHAR);
- }
+ text_builder().AddCharacter(c);
}
void RegExpBuilder::AddUnicodeCharacter(base::uc32 c) {
- if (c > static_cast<base::uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- DCHECK(IsUnicodeMode());
- AddLeadSurrogate(unibrow::Utf16::LeadSurrogate(c));
- AddTrailSurrogate(unibrow::Utf16::TrailSurrogate(c));
- } else if (IsUnicodeMode() && unibrow::Utf16::IsLeadSurrogate(c)) {
- AddLeadSurrogate(c);
- } else if (IsUnicodeMode() && unibrow::Utf16::IsTrailSurrogate(c)) {
- AddTrailSurrogate(c);
- } else {
- AddCharacter(static_cast<base::uc16>(c));
- }
+ pending_empty_ = false;
+ text_builder().AddUnicodeCharacter(c);
}
void RegExpBuilder::AddEscapedUnicodeCharacter(base::uc32 character) {
- // A lead or trail surrogate parsed via escape sequence will not
- // pair up with any preceding lead or following trail surrogate.
- FlushPendingSurrogate();
- AddUnicodeCharacter(character);
- FlushPendingSurrogate();
+ pending_empty_ = false;
+ text_builder().AddEscapedUnicodeCharacter(character);
}
-void RegExpBuilder::AddEmpty() { pending_empty_ = true; }
-
-void RegExpBuilder::AddClassRanges(RegExpClassRanges* cc) {
- if (NeedsDesugaringForUnicode(cc)) {
- // With /u, character class needs to be desugared, so it
- // must be a standalone term instead of being part of a RegExpText.
- AddTerm(cc);
- } else {
- AddAtom(cc);
- }
+void RegExpBuilder::AddEmpty() {
+ text_builder().FlushPendingSurrogate();
+ pending_empty_ = true;
}
-void RegExpBuilder::AddClassRangesForDesugaring(base::uc32 c) {
- AddTerm(zone()->New<RegExpClassRanges>(
- zone(), CharacterRange::List(zone(), CharacterRange::Singleton(c))));
+void RegExpBuilder::AddClassRanges(RegExpClassRanges* cc) {
+ pending_empty_ = false;
+ text_builder().AddClassRanges(cc);
}
void RegExpBuilder::AddAtom(RegExpTree* term) {
@@ -2649,26 +2990,30 @@ void RegExpBuilder::AddAtom(RegExpTree* term) {
AddEmpty();
return;
}
+ pending_empty_ = false;
if (term->IsTextElement()) {
- FlushCharacters();
- text_.emplace_back(term);
+ text_builder().AddAtom(term);
} else {
FlushText();
terms_.emplace_back(term);
}
- LAST(ADD_ATOM);
}
void RegExpBuilder::AddTerm(RegExpTree* term) {
- FlushText();
- terms_.emplace_back(term);
- LAST(ADD_ATOM);
+ DCHECK(!term->IsEmpty());
+ pending_empty_ = false;
+ if (term->IsTextElement()) {
+ text_builder().AddTerm(term);
+ } else {
+ FlushText();
+ terms_.emplace_back(term);
+ }
}
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
+ pending_empty_ = false;
terms_.emplace_back(assert);
- LAST(ADD_ASSERT);
}
void RegExpBuilder::NewAlternative() { FlushTerms(); }
@@ -2688,48 +3033,6 @@ void RegExpBuilder::FlushTerms() {
}
alternatives_.emplace_back(alternative);
terms_.clear();
- LAST(ADD_NONE);
-}
-
-bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpClassRanges* cc) {
- if (!IsUnicodeMode()) return false;
- // TODO(yangguo): we could be smarter than this. Case-insensitivity does not
- // necessarily mean that we need to desugar. It's probably nicer to have a
- // separate pass to figure out unicode desugarings.
- if (ignore_case()) return true;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::Canonicalize(ranges);
-
- if (cc->is_negated()) {
- ZoneList<CharacterRange>* negated_ranges =
- zone()->New<ZoneList<CharacterRange>>(ranges->length(), zone());
- CharacterRange::Negate(ranges, negated_ranges, zone());
- ranges = negated_ranges;
- }
-
- for (int i = ranges->length() - 1; i >= 0; i--) {
- base::uc32 from = ranges->at(i).from();
- base::uc32 to = ranges->at(i).to();
- // Check for non-BMP characters.
- if (to >= kNonBmpStart) return true;
- // Check for lone surrogates.
- if (from <= kTrailSurrogateEnd && to >= kLeadSurrogateStart) return true;
- }
- return false;
-}
-
-bool RegExpBuilder::NeedsDesugaringForIgnoreCase(base::uc32 c) {
-#ifdef V8_INTL_SUPPORT
- if (IsUnicodeMode() && ignore_case()) {
- icu::UnicodeSet set(c, c);
- set.closeOver(USET_CASE_INSENSITIVE);
- set.removeAllStrings();
- return set.size() > 1;
- }
- // In the case where ICU is not included, we act as if the unicode flag is
- // not set, and do not desugar.
-#endif // V8_INTL_SUPPORT
- return false;
}
RegExpTree* RegExpBuilder::ToRegExp() {
@@ -2743,33 +3046,14 @@ RegExpTree* RegExpBuilder::ToRegExp() {
bool RegExpBuilder::AddQuantifierToAtom(
int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
- FlushPendingSurrogate();
if (pending_empty_) {
pending_empty_ = false;
return true;
}
- RegExpTree* atom;
- if (characters_ != nullptr) {
- DCHECK(last_added_ == ADD_CHAR);
- // Last atom was character.
- base::Vector<const base::uc16> char_vector = characters_->ToConstVector();
- int num_chars = char_vector.length();
- if (num_chars > 1) {
- base::Vector<const base::uc16> prefix =
- char_vector.SubVector(0, num_chars - 1);
- text_.emplace_back(zone()->New<RegExpAtom>(prefix));
- char_vector = char_vector.SubVector(num_chars - 1, num_chars);
- }
- characters_ = nullptr;
- atom = zone()->New<RegExpAtom>(char_vector);
- FlushText();
- } else if (text_.size() > 0) {
- DCHECK(last_added_ == ADD_ATOM);
- atom = text_.back();
- text_.pop_back();
+ RegExpTree* atom = text_builder().PopLastAtom();
+ if (atom != nullptr) {
FlushText();
} else if (terms_.size() > 0) {
- DCHECK(last_added_ == ADD_ATOM);
atom = terms_.back();
terms_.pop_back();
if (atom->IsLookaround()) {
@@ -2782,7 +3066,6 @@ bool RegExpBuilder::AddQuantifierToAtom(
}
if (atom->max_match() == 0) {
// Guaranteed to only match an empty string.
- LAST(ADD_TERM);
if (min == 0) {
return true;
}
@@ -2795,7 +3078,6 @@ bool RegExpBuilder::AddQuantifierToAtom(
}
terms_.emplace_back(
zone()->New<RegExpQuantifier>(min, max, quantifier_type, atom));
- LAST(ADD_TERM);
return true;
}
@@ -2844,7 +3126,5 @@ template bool RegExpParser::VerifyRegExpSyntax<base::uc16>(
Zone*, uintptr_t, const base::uc16*, int, RegExpFlags, RegExpCompileData*,
const DisallowGarbageCollection&);
-#undef LAST
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index ce323df96c..e6e7d140a7 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -99,7 +99,7 @@ class RegExpImpl final : public AllStatic {
static void SetIrregexpMaxRegisterCount(FixedArray re, int value);
static int IrregexpNumberOfCaptures(FixedArray re);
static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
- static CodeT IrregexpNativeCode(FixedArray re, bool is_one_byte);
+ static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
};
// static
@@ -136,6 +136,7 @@ template bool RegExp::VerifySyntax<base::uc16>(
MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
Handle<JSRegExp> re,
+ RegExpFlags flags,
Handle<String> pattern,
RegExpError error) {
base::Vector<const char> error_data =
@@ -144,16 +145,18 @@ MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
isolate->factory()
->NewStringFromOneByte(base::Vector<const uint8_t>::cast(error_data))
.ToHandleChecked();
- THROW_NEW_ERROR(
- isolate,
- NewSyntaxError(MessageTemplate::kMalformedRegExp, pattern, error_text),
- Object);
+ Handle<String> flag_string =
+ JSRegExp::StringFromFlags(isolate, JSRegExp::AsJSRegExpFlags(flags));
+ THROW_NEW_ERROR(isolate,
+ NewSyntaxError(MessageTemplate::kMalformedRegExp, pattern,
+ flag_string, error_text),
+ Object);
}
void RegExp::ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
RegExpError error_text) {
- USE(ThrowRegExpException(isolate, re, Handle<String>(re->source(), isolate),
- error_text));
+ USE(ThrowRegExpException(isolate, re, JSRegExp::AsRegExpFlags(re->flags()),
+ Handle<String>(re->source(), isolate), error_text));
}
bool RegExp::IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp) {
@@ -219,7 +222,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
&parse_result)) {
// Throw an exception if we fail to parse the pattern.
- return RegExp::ThrowRegExpException(isolate, re, pattern,
+ return RegExp::ThrowRegExpException(isolate, re, flags, pattern,
parse_result.error);
}
@@ -238,7 +241,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
parse_result.capture_count)) {
// TODO(mbid): The error could provide a reason for why the regexp can't
// be executed in linear time (e.g. due to back references).
- return RegExp::ThrowRegExpException(isolate, re, pattern,
+ return RegExp::ThrowRegExpException(isolate, re, flags, pattern,
RegExpError::kNotLinear);
}
ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
@@ -450,7 +453,7 @@ bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
}
if (!needs_initial_compilation && !needs_tier_up_compilation) {
- DCHECK(compiled_code.IsCodeT());
+ DCHECK(compiled_code.IsCode());
DCHECK_IMPLIES(v8_flags.regexp_interpret_all, bytecode.IsByteArray());
return true;
}
@@ -483,7 +486,7 @@ bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
DCHECK_EQ(JSRegExp::kUninitializedValue, bytecode_value);
} else {
- DCHECK(entry.IsSmi() || (entry.IsCodeT() && bytecode.IsByteArray()));
+ DCHECK(entry.IsSmi() || (entry.IsCode() && bytecode.IsByteArray()));
}
return true;
@@ -551,7 +554,8 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
&compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- USE(RegExp::ThrowRegExpException(isolate, re, pattern, compile_data.error));
+ USE(RegExp::ThrowRegExpException(isolate, re, flags, pattern,
+ compile_data.error));
return false;
}
// The compilation target is a kBytecode if we're interpreting all regexp
@@ -576,7 +580,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
if (compile_data.compilation_target == RegExpCompilationTarget::kNative) {
Code code = Code::cast(*compile_data.code);
- data->set(JSRegExp::code_index(is_one_byte), ToCodeT(code));
+ data->set(JSRegExp::code_index(is_one_byte), code);
// Reset bytecode to uninitialized. In case we use tier-up we know that
// tier-up has happened this way.
@@ -588,7 +592,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
// Store code generated by compiler in bytecode and trampoline to
// interpreter in code.
data->set(JSRegExp::bytecode_index(is_one_byte), *compile_data.code);
- Handle<CodeT> trampoline =
+ Handle<Code> trampoline =
BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
data->set(JSRegExp::code_index(is_one_byte), *trampoline);
}
@@ -629,8 +633,8 @@ ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
return ByteArray::cast(re.get(JSRegExp::bytecode_index(is_one_byte)));
}
-CodeT RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
- return CodeT::cast(re.get(JSRegExp::code_index(is_one_byte)));
+Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
+ return Code::cast(re.get(JSRegExp::code_index(is_one_byte)));
}
void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
@@ -1019,15 +1023,15 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
data->compilation_target == RegExpCompilationTarget::kNative) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
OFStream os(trace_scope.file());
- Handle<Code> c = Handle<Code>::cast(result.code);
- auto pattern_cstring = pattern->ToCString();
- c->Disassemble(pattern_cstring.get(), os, isolate);
+ Handle<Code> code = Handle<Code>::cast(result.code);
+ std::unique_ptr<char[]> pattern_cstring = pattern->ToCString();
+ code->Disassemble(pattern_cstring.get(), os, isolate);
}
#endif
if (v8_flags.print_regexp_bytecode &&
data->compilation_target == RegExpCompilationTarget::kBytecode) {
Handle<ByteArray> bytecode = Handle<ByteArray>::cast(result.code);
- auto pattern_cstring = pattern->ToCString();
+ std::unique_ptr<char[]> pattern_cstring = pattern->ToCString();
RegExpBytecodeDisassemble(bytecode->GetDataStartAddress(),
bytecode->length(), pattern_cstring.get());
}
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 09d1e52215..2d9435d705 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -158,6 +158,7 @@ class RegExp final : public AllStatic {
V8_WARN_UNUSED_RESULT
static MaybeHandle<Object> ThrowRegExpException(Isolate* isolate,
Handle<JSRegExp> re,
+ RegExpFlags flags,
Handle<String> pattern,
RegExpError error);
static void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
diff --git a/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.cc b/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.cc
index c8f3eb551e..9eb1807de8 100644
--- a/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.cc
+++ b/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.cc
@@ -18,7 +18,7 @@ namespace internal {
/* clang-format off
* This assembler uses the following register assignment convention
- * - s1 : Pointer to current Code object including heap object tag.
+ * - s1 : Pointer to current InstructionStream object including heap object tag.
* - s2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - s5 : Currently loaded character. Must be loaded using
@@ -36,24 +36,25 @@ namespace internal {
*
* kStackFrameHeader
* --- sp when called ---
- * - fp[72] ra Return from RegExp code (ra). kReturnAddress
- * - fp[64] s9, old-fp Old fp, callee saved(s9).
- * - fp[0..63] fp..s7 Callee-saved registers fp..s7.
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[64] old-fp Old fp, callee saved(s9).
+ * - fp[0..63] s1..s78 Callee-saved registers fp..s7.
* --- frame pointer ----
- * - fp[-8] Isolate* isolate (address of the current isolate) kIsolate
- * - fp[-16] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-24] output_size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-32] int* output (int[num_saved_registers_], for output). kRegisterOutput
- * - fp[-40] end of input (address of end of string). kInputEnd
- * - fp[-48] start of input (address of first character in string). kInputStart
- * - fp[-56] start index (character index of start). kStartIndex
- * - fp[-64] void* input_string (location of a handle containing the string). kInputString
- * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * - fp[-8] frame marker
+ * - fp[-16] Isolate* isolate (address of the current isolate) kIsolate
+ * - fp[-24] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-32] output_size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-40] int* output (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-48] end of input (address of end of string). kInputEnd
+ * - fp[-56] start of input (address of first character in string). kInputStart
+ * - fp[-64] start index (character index of start). kStartIndex
+ * - fp[-72] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-80] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-88] Offset of location before start of input (effectively character kStringStartMinusOne
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
- * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - fp[-96] register 0 (Only positions must be stored in the first kRegisterZero
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -83,15 +84,13 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-const int RegExpMacroAssemblerRISCV::kRegExpCodeSize;
-
RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
masm_(std::make_unique<MacroAssembler>(
isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ NewAssemblerBuffer(kInitialBufferSize))),
no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
@@ -150,9 +149,9 @@ void RegExpMacroAssemblerRISCV::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ LoadWord(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ LoadWord(a0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ AddWord(a0, a0, Operand(1));
- __ StoreWord(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ StoreWord(a0, MemOperand(frame_pointer(), kBacktrackCountOffset));
__ BranchShort(&next, ne, a0, Operand(backtrack_limit()));
// Backtrack limit exceeded.
@@ -184,7 +183,7 @@ void RegExpMacroAssemblerRISCV::CheckCharacterGT(base::uc16 limit,
void RegExpMacroAssemblerRISCV::CheckAtStart(int cp_offset,
Label* on_at_start) {
- __ LoadWord(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddWord(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
@@ -192,7 +191,7 @@ void RegExpMacroAssemblerRISCV::CheckAtStart(int cp_offset,
void RegExpMacroAssemblerRISCV::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ LoadWord(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddWord(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
@@ -225,8 +224,8 @@ void RegExpMacroAssemblerRISCV::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -259,7 +258,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
__ BranchShort(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ LoadWord(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(t1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddWord(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -362,7 +361,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -394,7 +393,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReference(int start_reg,
__ BranchShort(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ LoadWord(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(t1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddWord(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -631,7 +630,8 @@ void RegExpMacroAssemblerRISCV::PushRegExpBasePointer(Register stack_pointer,
__ li(scratch, Operand(ref));
__ LoadWord(scratch, MemOperand(scratch));
__ SubWord(scratch, stack_pointer, scratch);
- __ StoreWord(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ StoreWord(scratch,
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerRISCV::PopRegExpBasePointer(Register stack_pointer_out,
@@ -639,7 +639,7 @@ void RegExpMacroAssemblerRISCV::PopRegExpBasePointer(Register stack_pointer_out,
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ LoadWord(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ li(scratch, Operand(ref));
__ LoadWord(scratch, MemOperand(scratch));
__ AddWord(stack_pointer_out, stack_pointer_out, scratch);
@@ -692,23 +692,27 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// According to MultiPush implementation, registers will be pushed in the
// order of ra, fp, then s8, ..., s1, and finally a7,...a0
- __ MultiPush(RegList{ra} | registers_to_retain | argument_registers);
+ __ MultiPush(RegList{ra} | registers_to_retain);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
- __ AddWord(frame_pointer(), sp,
- Operand(argument_registers.Count() * kSystemPointerSize));
-
- static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ __ AddWord(frame_pointer(), sp, Operand(0));
+ static_assert(kFrameTypeOffset == -kSystemPointerSize);
+ __ li(kScratchReg, Operand(StackFrame::TypeToMarker(StackFrame::IRREGEXP)));
+ __ push(kScratchReg);
+ __ MultiPush(argument_registers);
+ static_assert(kSuccessfulCapturesOffset ==
+ kInputStringOffset - kSystemPointerSize);
__ mv(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ push(a0); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ push(a0); // The backtrack counter
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ push(a0); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here
@@ -749,20 +753,21 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ SubWord(sp, sp, Operand(num_registers_ * kSystemPointerSize));
// Load string end.
- __ LoadWord(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ LoadWord(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
// Load input start.
- __ LoadWord(a0, MemOperand(frame_pointer(), kInputStart));
+ __ LoadWord(a0, MemOperand(frame_pointer(), kInputStartOffset));
// Find negative length (offset of start relative to end).
__ SubWord(current_input_offset(), a0, end_of_input_address());
// Set a0 to address of char before start of the input string
// (effectively string position -1).
- __ LoadWord(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadWord(a1, MemOperand(frame_pointer(), kStartIndexOffset));
__ SubWord(a0, current_input_offset(), Operand(char_size()));
__ slli(t1, a1, (mode_ == UC16) ? 1 : 0);
__ SubWord(a0, a0, t1);
// Store this value in a local variable, for use when clearing
// position registers.
- __ StoreWord(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ StoreWord(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -787,7 +792,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1.
if (num_saved_registers_ > 8) {
// Address of register 0.
- __ AddWord(a1, frame_pointer(), Operand(kRegisterZero));
+ __ AddWord(a1, frame_pointer(), Operand(kRegisterZeroOffset));
__ li(a2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
@@ -810,9 +815,9 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// Copy captures to output.
- __ LoadWord(a1, MemOperand(frame_pointer(), kInputStart));
- __ LoadWord(a0, MemOperand(frame_pointer(), kRegisterOutput));
- __ LoadWord(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadWord(a1, MemOperand(frame_pointer(), kInputStartOffset));
+ __ LoadWord(a0, MemOperand(frame_pointer(), kRegisterOutputOffset));
+ __ LoadWord(a2, MemOperand(frame_pointer(), kStartIndexOffset));
__ SubWord(a1, end_of_input_address(), a1);
// a1 is length of input in bytes.
if (mode_ == UC16) {
@@ -852,25 +857,28 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ LoadWord(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ LoadWord(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ LoadWord(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadWord(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ LoadWord(a1, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
+ __ LoadWord(a2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Increment success counter.
__ AddWord(a0, a0, 1);
- __ StoreWord(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ StoreWord(a0,
+ MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ SubWord(a1, a1, num_saved_registers_);
// Check whether we have enough room for another set of capture results.
__ Branch(&return_a0, lt, a1, Operand(num_saved_registers_));
- __ StoreWord(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ StoreWord(a1,
+ MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Advance the location for output.
__ AddWord(a2, a2, num_saved_registers_ * kIntSize);
- __ StoreWord(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ StoreWord(a2, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Prepare a0 to initialize registers with its value in the next run.
- __ LoadWord(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(a0,
+ MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -901,7 +909,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Exit and return a0.
__ bind(&exit_label_);
if (global()) {
- __ LoadWord(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadWord(a0, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
__ bind(&return_a0);
@@ -936,7 +944,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
__ LoadWord(end_of_input_address(),
- MemOperand(frame_pointer(), kInputEnd));
+ MemOperand(frame_pointer(), kInputEndOffset));
SafeReturn();
}
@@ -950,7 +958,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments, 0, a0);
__ li(a0, ExternalReference::isolate_address(isolate()));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ BranchShort(&exit_with_exception, eq, a0, Operand(zero_reg));
@@ -1029,13 +1037,14 @@ void RegExpMacroAssemblerRISCV::PopRegister(int register_index) {
void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
- __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ __ li(a0,
+ Operand(target + InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ BranchShort(&after_constant);
int offset = masm_->pc_offset();
- int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ int cp_offset = offset + InstructionStream::kHeaderSize - kHeapObjectTag;
__ emit(0);
masm_->label_at_put(label, offset);
__ bind(&after_constant);
@@ -1118,7 +1127,7 @@ void RegExpMacroAssemblerRISCV::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerRISCV::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ LoadWord(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(a0, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreWord(a0, register_location(reg));
}
@@ -1142,7 +1151,7 @@ void RegExpMacroAssemblerRISCV::CallCheckStackGuardState(Register scratch) {
__ StoreWord(scratch, MemOperand(sp));
__ mv(a2, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// We need to make room for the return address on the stack.
@@ -1202,16 +1211,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int64_t RegExpMacroAssemblerRISCV::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndexOffset)),
static_cast<RegExp::CallOrigin>(
- frame_entry<int64_t>(re_frame, kDirectCall)),
+ frame_entry<int64_t>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
MemOperand RegExpMacroAssemblerRISCV::register_location(int register_index) {
@@ -1220,7 +1229,7 @@ MemOperand RegExpMacroAssemblerRISCV::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kSystemPointerSize);
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
void RegExpMacroAssemblerRISCV::CheckPosition(int cp_offset,
@@ -1229,7 +1238,7 @@ void RegExpMacroAssemblerRISCV::CheckPosition(int cp_offset,
BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
Operand(-cp_offset * char_size()));
} else {
- __ LoadWord(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadWord(a1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddWord(a0, current_input_offset(), Operand(cp_offset * char_size()));
BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
}
@@ -1345,6 +1354,21 @@ void RegExpMacroAssemblerRISCV::LoadCurrentCharacterUnchecked(int cp_offset,
}
}
+void RegExpMacroAssemblerRISCV::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.h b/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.h
index 1080e72a7e..90a1d314cc 100644
--- a/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.h
+++ b/deps/v8/src/regexp/riscv/regexp-macro-assembler-riscv.h
@@ -97,48 +97,65 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
private:
// Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
// Registers s1 to s8, fp, and ra.
- static const int kStoredRegisters = kFramePointer;
+ static constexpr int kStoredRegistersOffset = kFramePointerOffset;
// Return address (stored from link register, read into pc on return).
// This 9 is 8 s-regs (s1..s8) plus fp.
- static const int kNumCalleeRegsToRetain = 9;
- static const int kReturnAddress =
- kStoredRegisters + kNumCalleeRegsToRetain * kSystemPointerSize;
+ static constexpr int kNumCalleeRegsToRetain = 9;
+ static constexpr int kReturnAddressOffset =
+ kStoredRegistersOffset + kNumCalleeRegsToRetain * kSystemPointerSize;
// Stack frame header.
- static const int kStackFrameHeader = kReturnAddress;
- // Below the frame pointer.
+ static constexpr int kStackFrameHeaderOffset = kReturnAddressOffset;
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ (V8_EMBEDDED_CONSTANT_POOL_BOOL
+ ? kSystemPointerSize +
+ CommonFrameConstants::kContextOrFrameTypeOffset
+ : CommonFrameConstants::kContextOrFrameTypeOffset));
// Register parameters stored by setup code.
- static const int kIsolate = kFramePointer - kSystemPointerSize;
- static const int kDirectCall = kIsolate - kSystemPointerSize;
- static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
- static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
- static const int kInputStart = kInputEnd - kSystemPointerSize;
- static const int kStartIndex = kInputStart - kSystemPointerSize;
- static const int kInputString = kStartIndex - kSystemPointerSize;
+ static constexpr int kIsolateOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kDirectCallOffset = kIsolateOffset - kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kDirectCallOffset - kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
+ static constexpr int kInputEndOffset =
+ kRegisterOutputOffset - kSystemPointerSize;
+ static constexpr int kInputStartOffset = kInputEndOffset - kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStartOffset - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kStartIndexOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
static constexpr int kNumberOfStackLocals = 4;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kInitialBufferSize = 1024;
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
void PushCallerSavedRegisters();
void PopCallerSavedRegisters();
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index a61bc379ba..525fc32e99 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -22,7 +22,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - r6: Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - r7: Pointer to current Code object including heap object tag.
+ * - r7: Pointer to current InstructionStream object including heap object tag.
* - r8: Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r9: Currently loaded character. Must be loaded using
@@ -50,23 +50,25 @@ namespace internal {
* - fp[0..96] zLinux ABI register saving area
* --- sp when called ---
* --- frame pointer ----
- * - fp[-4] direct_call (if 1, direct call from JavaScript code,
+ * - fp [-4] frame marker
+ * - fp [-8] isolate
+ * - fp[-12] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[-8] stack_area_base (high end of the memory area to use as
+ * - fp[-16] stack_area_base (high end of the memory area to use as
* backtracking stack).
- * - fp[-12] capture array size (may fit multiple sets of matches)
- * - fp[-16] int* capture_array (int[num_saved_registers_], for output).
- * - fp[-20] end of input (address of end of string).
- * - fp[-24] start of input (address of first character in string).
- * - fp[-28] start index (character index of start).
- * - fp[-32] void* input_string (location of a handle containing the string).
- * - fp[-36] success counter (only for global regexps to count matches).
- * - fp[-40] Offset of location before start of input (effectively character
+ * - fp[-20] capture array size (may fit multiple sets of matches)
+ * - fp[-24] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[-28] end of input (address of end of string).
+ * - fp[-32] start of input (address of first character in string).
+ * - fp[-36] start index (character index of start).
+ * - fp[-40] void* input_string (location of a handle containing the string).
+ * - fp[-44] success counter (only for global regexps to count matches).
+ * - fp[-48] Offset of location before start of input (effectively character
* string start - 1). Used to initialize capture registers to a
* non-position.
- * - fp[-44] At start (if 1, we are starting at the start of the
+ * - fp[-52] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-48] register 0 (Only positions must be stored in the first
+ * - fp[-56] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -167,9 +169,9 @@ void RegExpMacroAssemblerS390::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ LoadU64(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
+ __ LoadU64(r2, MemOperand(frame_pointer(), kBacktrackCountOffset), r0);
__ AddS64(r2, r2, Operand(1));
- __ StoreU64(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
+ __ StoreU64(r2, MemOperand(frame_pointer(), kBacktrackCountOffset), r0);
__ CmpU64(r2, Operand(backtrack_limit()));
__ bne(&next);
@@ -183,7 +185,8 @@ void RegExpMacroAssemblerS390::Backtrack() {
__ bind(&next);
}
- // Pop Code offset from backtrack stack, add Code and jump to location.
+ // Pop InstructionStream offset from backtrack stack, add InstructionStream
+ // and jump to location.
Pop(r2);
__ AddS64(r2, code_pointer());
__ b(r2);
@@ -203,7 +206,7 @@ void RegExpMacroAssemblerS390::CheckCharacterGT(base::uc16 limit,
}
void RegExpMacroAssemblerS390::CheckAtStart(int cp_offset, Label* on_at_start) {
- __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddS64(r2, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ CmpS64(r2, r3);
@@ -212,7 +215,7 @@ void RegExpMacroAssemblerS390::CheckAtStart(int cp_offset, Label* on_at_start) {
void RegExpMacroAssemblerS390::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddS64(r2, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ CmpS64(r2, r3);
@@ -250,7 +253,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
if (read_backward) {
- __ LoadU64(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r5, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddS64(r5, r5, r3);
__ CmpS64(current_input_offset(), r5);
BranchOrBacktrack(le, on_no_match);
@@ -356,7 +359,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(function, argument_count);
+ CallCFunctionFromIrregexpCode(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
@@ -391,7 +394,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
// Check that there are enough characters left in the input.
if (read_backward) {
- __ LoadU64(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r5, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddS64(r5, r5, r3);
__ CmpS64(current_input_offset(), r5);
BranchOrBacktrack(le, on_no_match);
@@ -502,8 +505,8 @@ void RegExpMacroAssemblerS390::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(masm_.get(), StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -679,7 +682,8 @@ void RegExpMacroAssemblerS390::PushRegExpBasePointer(Register stack_pointer,
__ mov(scratch, Operand(ref));
__ LoadU64(scratch, MemOperand(scratch));
__ SubS64(scratch, stack_pointer, scratch);
- __ StoreU64(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ StoreU64(scratch,
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
}
void RegExpMacroAssemblerS390::PopRegExpBasePointer(Register stack_pointer_out,
@@ -687,7 +691,7 @@ void RegExpMacroAssemblerS390::PopRegExpBasePointer(Register stack_pointer_out,
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ LoadU64(stack_pointer_out,
- MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ MemOperand(frame_pointer(), kRegExpStackBasePointerOffset));
__ mov(scratch, Operand(ref));
__ LoadU64(scratch, MemOperand(scratch));
__ AddS64(stack_pointer_out, stack_pointer_out, scratch);
@@ -716,6 +720,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
DCHECK(kRegExpCalleeSaved.has(end_of_input_address()));
DCHECK(kRegExpCalleeSaved.has(frame_pointer()));
+ // Emit code to start a new stack frame. In the following we push all
+ // callee-save registers (these end up above the fp) and all register
+ // arguments (these end up below the fp).
+ //
// zLinux ABI
// Incoming parameters:
// r2: input_string
@@ -735,27 +743,24 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// r8 = stack area base
// r9 = direct call
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
- //
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
__ mov(frame_pointer(), sp);
+ // Also push the frame marker.
+ __ mov(r0, Operand(StackFrame::TypeToMarker(StackFrame::IRREGEXP)));
+ __ push(r0);
__ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
- static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+
+ static_assert(kSuccessfulCapturesOffset ==
+ kInputStringOffset - kSystemPointerSize);
__ mov(r1, Operand::Zero()); // success counter
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ mov(r0, r1); // offset of location
__ StoreMultipleP(r0, r9, MemOperand(sp, 0));
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ Push(r1); // The backtrack counter.
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ push(r1); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here on.
@@ -799,12 +804,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ lay(sp, MemOperand(sp, (-num_registers_ * kSystemPointerSize)));
// Load string end.
- __ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ LoadU64(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
// Load input start.
- __ LoadU64(r4, MemOperand(frame_pointer(), kInputStart));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kInputStartOffset));
// Find negative length (offset of start relative to end).
__ SubS64(current_input_offset(), r4, end_of_input_address());
- __ LoadU64(r3, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStartIndexOffset));
// Set r1 to address of char before start of the input string
// (effectively string position -1).
__ mov(r1, r4);
@@ -817,7 +823,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ StoreU64(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ StoreU64(r1, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -843,8 +849,8 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
- __ lay(r3,
- MemOperand(frame_pointer(), kRegisterZero + kSystemPointerSize));
+ __ lay(r3, MemOperand(frame_pointer(),
+ kRegisterZeroOffset + kSystemPointerSize));
__ mov(r4, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
@@ -866,9 +872,9 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ LoadU64(r0, MemOperand(frame_pointer(), kInputStart));
- __ LoadU64(r2, MemOperand(frame_pointer(), kRegisterOutput));
- __ LoadU64(r4, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadU64(r0, MemOperand(frame_pointer(), kInputStartOffset));
+ __ LoadU64(r2, MemOperand(frame_pointer(), kRegisterOutputOffset));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStartIndexOffset));
__ SubS64(r0, end_of_input_address(), r0);
// r0 is length of input in bytes.
if (mode_ == UC16) {
@@ -929,12 +935,12 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ LoadU64(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ LoadU64(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ LoadU64(r4, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadU64(r2, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Increment success counter.
__ AddS64(r2, Operand(1));
- __ StoreU64(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ StoreU64(r2, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ SubS64(r3, Operand(num_saved_registers_));
@@ -942,13 +948,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ CmpS64(r3, Operand(num_saved_registers_));
__ blt(&return_r2);
- __ StoreU64(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ StoreU64(r3, MemOperand(frame_pointer(), kNumOutputRegistersOffset));
// Advance the location for output.
__ AddS64(r4, Operand(num_saved_registers_ * kIntSize));
- __ StoreU64(r4, MemOperand(frame_pointer(), kRegisterOutput));
+ __ StoreU64(r4, MemOperand(frame_pointer(), kRegisterOutputOffset));
// Prepare r2 to initialize registers with its value in the next run.
- __ LoadU64(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r2, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -979,7 +985,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Exit and return r2
__ bind(&exit_label_);
if (global()) {
- __ LoadU64(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadU64(r2, MemOperand(frame_pointer(), kSuccessfulCapturesOffset));
}
__ bind(&return_r2);
@@ -1017,7 +1023,8 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
- __ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ LoadU64(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEndOffset));
SafeReturn();
}
@@ -1034,7 +1041,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ PrepareCallCFunction(kNumArguments, r2);
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ CmpS64(r2, Operand::Zero());
@@ -1109,7 +1116,8 @@ void RegExpMacroAssemblerS390::PopRegister(int register_index) {
void RegExpMacroAssemblerS390::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
- __ mov(r2, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ __ mov(r2,
+ Operand(target + InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
masm_->load_label_offset(r2, label);
}
@@ -1185,7 +1193,7 @@ void RegExpMacroAssemblerS390::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ LoadU64(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r2, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreU64(r2, register_location(reg));
}
@@ -1201,7 +1209,7 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(r4, frame_pointer());
- // Code of self.
+ // InstructionStream of self.
__ mov(r3, Operand(masm_->CodeObject()));
// r2 becomes return address pointer.
__ lay(r2, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
@@ -1241,16 +1249,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerS390::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<intptr_t>(re_frame, kStartIndex),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ frame_entry<intptr_t>(re_frame, kStartIndexOffset),
static_cast<RegExp::CallOrigin>(
- frame_entry<intptr_t>(re_frame, kDirectCall)),
+ frame_entry<intptr_t>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
MemOperand RegExpMacroAssemblerS390::register_location(int register_index) {
@@ -1259,7 +1267,7 @@ MemOperand RegExpMacroAssemblerS390::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kSystemPointerSize);
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
@@ -1268,7 +1276,7 @@ void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
__ CmpS64(current_input_offset(), Operand(-cp_offset * char_size()));
BranchOrBacktrack(ge, on_outside_input);
} else {
- __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOneOffset));
__ AddS64(r2, current_input_offset(), Operand(cp_offset * char_size()));
__ CmpS64(r2, r3);
BranchOrBacktrack(le, on_outside_input);
@@ -1330,6 +1338,22 @@ void RegExpMacroAssemblerS390::Pop(Register target) {
MemOperand(backtrack_stackpointer(), kSystemPointerSize));
}
+void RegExpMacroAssemblerS390::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
+
void RegExpMacroAssemblerS390::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index f0b4833eb8..6e32d71063 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -92,41 +92,53 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
private:
// Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static const int kFramePointerOffset = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- static const int kStoredRegisters = kFramePointer;
- static const int kCallerFrame =
- kStoredRegisters + kCalleeRegisterSaveAreaSize;
-
- // Below the frame pointer.
+ static const int kStoredRegistersOffset = kFramePointerOffset;
+ static const int kCallerFrameOffset =
+ kStoredRegistersOffset + kCalleeRegisterSaveAreaSize;
+
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
// Register parameters stored by setup code.
- static const int kIsolate = kFramePointer - kSystemPointerSize;
- static const int kDirectCall = kIsolate - kSystemPointerSize;
- static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
- static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
- static const int kInputStart = kInputEnd - kSystemPointerSize;
- static const int kStartIndex = kInputStart - kSystemPointerSize;
- static const int kInputString = kStartIndex - kSystemPointerSize;
+ static const int kIsolateOffset = kFrameTypeOffset - kSystemPointerSize;
+ static const int kDirectCallOffset = kIsolateOffset - kSystemPointerSize;
+ static const int kNumOutputRegistersOffset =
+ kDirectCallOffset - kSystemPointerSize;
+ static const int kRegisterOutputOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
+ static const int kInputEndOffset = kRegisterOutputOffset - kSystemPointerSize;
+ static const int kInputStartOffset = kInputEndOffset - kSystemPointerSize;
+ static const int kStartIndexOffset = kInputStartOffset - kSystemPointerSize;
+ static const int kInputStringOffset = kStartIndexOffset - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static const int kSuccessfulCapturesOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static const int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static const int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static const int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static const int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
+
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 89fd2e34f1..90ff2696a5 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -34,9 +34,10 @@ namespace internal {
* - rsp : Points to tip of C stack.
* - rcx : Points to tip of backtrack stack. The backtrack stack contains
* only 32-bit values. Most are offsets from some base (e.g., character
- * positions from end of string or code location from Code pointer).
- * - r8 : Code object pointer. Used to convert between absolute and
- * code-object-relative addresses.
+ * positions from end of string or code location from InstructionStream
+ * pointer).
+ * - r8 : InstructionStream object pointer. Used to convert between absolute
+ * and code-object-relative addresses.
*
* The registers rax, rbx, r9 and r11 are free to use for computations.
* If changed to use r12+, they should be saved as callee-save registers.
@@ -47,7 +48,7 @@ namespace internal {
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
- * frame pointer (see, e.g., kDirectCall):
+ * frame pointer (see, e.g., kDirectCallOffset):
* - Address regexp (address of the JSRegExp object; unused in native
* code, passed to match signature of interpreter)
* - Isolate* isolate (address of the current isolate)
@@ -151,8 +152,8 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ incq(Operand(rbp, kBacktrackCount));
- __ cmpq(Operand(rbp, kBacktrackCount), Immediate(backtrack_limit()));
+ __ incq(Operand(rbp, kBacktrackCountOffset));
+ __ cmpq(Operand(rbp, kBacktrackCountOffset), Immediate(backtrack_limit()));
__ j(not_equal, &next);
// Backtrack limit exceeded.
@@ -165,7 +166,8 @@ void RegExpMacroAssemblerX64::Backtrack() {
__ bind(&next);
}
- // Pop Code offset from backtrack stack, add Code and jump to location.
+ // Pop InstructionStream offset from backtrack stack, add InstructionStream
+ // and jump to location.
Pop(rbx);
__ addq(rbx, code_object_pointer());
__ jmp(rbx);
@@ -190,14 +192,14 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(base::uc16 limit,
void RegExpMacroAssemblerX64::CheckAtStart(int cp_offset, Label* on_at_start) {
__ leaq(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
- __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
+ __ cmpq(rax, Operand(rbp, kStringStartMinusOneOffset));
BranchOrBacktrack(equal, on_at_start);
}
void RegExpMacroAssemblerX64::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
__ leaq(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
- __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
+ __ cmpq(rax, Operand(rbp, kStringStartMinusOneOffset));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -216,6 +218,22 @@ void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
__ bind(&fallthrough);
}
+void RegExpMacroAssemblerX64::CallCFunctionFromIrregexpCode(
+ ExternalReference function, int num_arguments) {
+ // Irregexp code must not set fast_c_call_caller_fp and fast_c_call_caller_pc
+ // since
+ //
+ // 1. it may itself have been called using CallCFunction and nested calls are
+ // unsupported, and
+ // 2. it may itself have been called directly from C where the frame pointer
+ // might not be set (-fomit-frame-pointer), and thus frame iteration would
+ // fail.
+ //
+ // See also: crbug.com/v8/12670#c17.
+ __ CallCFunction(function, num_arguments,
+ MacroAssembler::SetIsolateDataSlots::kNo);
+}
+
// Push (pop) caller-saved registers used by irregexp.
void RegExpMacroAssemblerX64::PushCallerSavedRegisters() {
#ifndef V8_TARGET_OS_WIN
@@ -255,7 +273,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// rbx - length of capture
// Check that there are sufficient characters left in the input.
if (read_backward) {
- __ movl(rax, Operand(rbp, kStringStartMinusOne));
+ __ movl(rax, Operand(rbp, kStringStartMinusOneOffset));
__ addl(rax, rbx);
__ cmpl(rdi, rax);
BranchOrBacktrack(less_equal, on_no_match);
@@ -370,7 +388,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
unicode
? ExternalReference::re_case_insensitive_compare_unicode()
: ExternalReference::re_case_insensitive_compare_non_unicode();
- __ CallCFunction(compare, num_arguments);
+ CallCFunctionFromIrregexpCode(compare, num_arguments);
}
// Restore original values before reacting on result value.
@@ -411,7 +429,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
// rax - length of capture
// Check that there are sufficient characters left in the input.
if (read_backward) {
- __ movl(rbx, Operand(rbp, kStringStartMinusOne));
+ __ movl(rbx, Operand(rbp, kStringStartMinusOneOffset));
__ addl(rbx, rax);
__ cmpl(rdi, rbx);
BranchOrBacktrack(less_equal, on_no_match);
@@ -539,8 +557,8 @@ void RegExpMacroAssemblerX64::CallIsCharacterInRangeArray(
{
// We have a frame (set up in GetCode), but the assembler doesn't know.
FrameScope scope(&masm_, StackFrame::MANUAL);
- __ CallCFunction(ExternalReference::re_is_character_in_range_array(),
- kNumArguments);
+ CallCFunctionFromIrregexpCode(
+ ExternalReference::re_is_character_in_range_array(), kNumArguments);
}
PopCallerSavedRegisters();
@@ -725,14 +743,14 @@ void RegExpMacroAssemblerX64::PushRegExpBasePointer(Register stack_pointer,
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
__ movq(scratch, __ ExternalReferenceAsOperand(ref, scratch));
__ subq(scratch, stack_pointer);
- __ movq(Operand(rbp, kRegExpStackBasePointer), scratch);
+ __ movq(Operand(rbp, kRegExpStackBasePointerOffset), scratch);
}
void RegExpMacroAssemblerX64::PopRegExpBasePointer(Register stack_pointer_out,
Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ movq(scratch, Operand(rbp, kRegExpStackBasePointer));
+ __ movq(scratch, Operand(rbp, kRegExpStackBasePointerOffset));
__ movq(stack_pointer_out,
__ ExternalReferenceAsOperand(ref, stack_pointer_out));
__ subq(stack_pointer_out, scratch);
@@ -749,33 +767,38 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// physical frame is generated.
FrameScope scope(&masm_, StackFrame::MANUAL);
- // Actually emit code to start a new stack frame.
- __ pushq(rbp);
- __ movq(rbp, rsp);
+ // Actually emit code to start a new stack frame. This pushes the frame type
+ // marker into the stack slot at kFrameTypeOffset.
+ static_assert(kFrameTypeOffset == -1 * kSystemPointerSize);
+ __ EnterFrame(StackFrame::IRREGEXP);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef V8_TARGET_OS_WIN
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
// Store register parameters in pre-allocated stack slots.
- __ movq(Operand(rbp, kInputString), arg_reg_1);
- __ movq(Operand(rbp, kStartIndex), arg_reg_2); // Passed as int32 in edx.
- __ movq(Operand(rbp, kInputStart), arg_reg_3);
- __ movq(Operand(rbp, kInputEnd), arg_reg_4);
+ __ movq(Operand(rbp, kInputStringOffset), arg_reg_1);
+ __ movq(Operand(rbp, kStartIndexOffset),
+ arg_reg_2); // Passed as int32 in edx.
+ __ movq(Operand(rbp, kInputStartOffset), arg_reg_3);
+ __ movq(Operand(rbp, kInputEndOffset), arg_reg_4);
static_assert(kNumCalleeSaveRegisters == 3);
+ static_assert(kBackupRsiOffset == -2 * kSystemPointerSize);
+ static_assert(kBackupRdiOffset == -3 * kSystemPointerSize);
+ static_assert(kBackupRbxOffset == -4 * kSystemPointerSize);
__ pushq(rsi);
__ pushq(rdi);
__ pushq(rbx);
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
- DCHECK_EQ(kInputString, -1 * kSystemPointerSize);
- DCHECK_EQ(kStartIndex, -2 * kSystemPointerSize);
- DCHECK_EQ(kInputStart, -3 * kSystemPointerSize);
- DCHECK_EQ(kInputEnd, -4 * kSystemPointerSize);
- DCHECK_EQ(kRegisterOutput, -5 * kSystemPointerSize);
- DCHECK_EQ(kNumOutputRegisters, -6 * kSystemPointerSize);
+ static_assert(kInputStringOffset == -2 * kSystemPointerSize);
+ static_assert(kStartIndexOffset == -3 * kSystemPointerSize);
+ static_assert(kInputStartOffset == -4 * kSystemPointerSize);
+ static_assert(kInputEndOffset == -5 * kSystemPointerSize);
+ static_assert(kRegisterOutputOffset == -6 * kSystemPointerSize);
+ static_assert(kNumOutputRegistersOffset == -7 * kSystemPointerSize);
__ pushq(arg_reg_1);
__ pushq(arg_reg_2);
__ pushq(arg_reg_3);
@@ -784,19 +807,21 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(r9);
static_assert(kNumCalleeSaveRegisters == 1);
+ static_assert(kBackupRbxOffset == -8 * kSystemPointerSize);
__ pushq(rbx);
#endif
- static_assert(kSuccessfulCaptures ==
+ static_assert(kSuccessfulCapturesOffset ==
kLastCalleeSaveRegister - kSystemPointerSize);
__ Push(Immediate(0)); // Number of successful matches in a global regexp.
- static_assert(kStringStartMinusOne ==
- kSuccessfulCaptures - kSystemPointerSize);
+ static_assert(kStringStartMinusOneOffset ==
+ kSuccessfulCapturesOffset - kSystemPointerSize);
__ Push(Immediate(0)); // Make room for "string start - 1" constant.
- static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ static_assert(kBacktrackCountOffset ==
+ kStringStartMinusOneOffset - kSystemPointerSize);
__ Push(Immediate(0)); // The backtrack counter.
- static_assert(kRegExpStackBasePointer ==
- kBacktrackCount - kSystemPointerSize);
+ static_assert(kRegExpStackBasePointerOffset ==
+ kBacktrackCountOffset - kSystemPointerSize);
__ Push(Immediate(0)); // The regexp stack base ptr.
// Initialize backtrack stack pointer. It must not be clobbered from here on.
@@ -843,14 +868,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ AllocateStackSpace(num_registers_ * kSystemPointerSize);
// Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movq(rsi, Operand(rbp, kInputEndOffset));
// Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
+ __ movq(rdi, Operand(rbp, kInputStartOffset));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
+ __ movq(rbx, Operand(rbp, kStartIndexOffset));
__ negq(rbx);
if (mode_ == UC16) {
__ leaq(rax, Operand(rdi, rbx, times_2, -char_size()));
@@ -859,7 +884,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movq(Operand(rbp, kStringStartMinusOne), rax);
+ __ movq(Operand(rbp, kStringStartMinusOneOffset), rax);
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -869,7 +894,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label start_regexp;
// Load newline if index is at start, previous character otherwise.
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpl(Operand(rbp, kStartIndexOffset), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
__ Move(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
@@ -888,12 +913,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
- __ Move(r9, kRegisterZero);
+ __ Move(r9, kRegisterZeroOffset);
Label init_loop;
__ bind(&init_loop);
__ movq(Operand(rbp, r9, times_1, 0), rax);
__ subq(r9, Immediate(kSystemPointerSize));
- __ cmpq(r9, Immediate(kRegisterZero -
+ __ cmpq(r9, Immediate(kRegisterZeroOffset -
num_saved_registers_ * kSystemPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
@@ -911,10 +936,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
+ __ movq(rdx, Operand(rbp, kStartIndexOffset));
+ __ movq(rbx, Operand(rbp, kRegisterOutputOffset));
+ __ movq(rcx, Operand(rbp, kInputEndOffset));
+ __ subq(rcx, Operand(rbp, kInputStartOffset));
if (mode_ == UC16) {
__ leaq(rcx, Operand(rcx, rdx, times_2, 0));
} else {
@@ -937,22 +962,22 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incq(Operand(rbp, kSuccessfulCapturesOffset));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
- __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
+ __ movsxlq(rcx, Operand(rbp, kNumOutputRegistersOffset));
__ subq(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmpq(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
+ __ movq(Operand(rbp, kNumOutputRegistersOffset), rcx);
// Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
+ __ addq(Operand(rbp, kRegisterOutputOffset),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kStringStartMinusOne));
+ __ movq(rax, Operand(rbp, kStringStartMinusOneOffset));
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
@@ -987,7 +1012,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
+ __ movq(rax, Operand(rbp, kSuccessfulCapturesOffset));
}
__ bind(&return_rax);
@@ -999,20 +1024,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Restore callee save registers.
__ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister));
static_assert(kNumCalleeSaveRegisters == 3);
+ static_assert(kBackupRsiOffset == -2 * kSystemPointerSize);
+ static_assert(kBackupRdiOffset == -3 * kSystemPointerSize);
+ static_assert(kBackupRbxOffset == -4 * kSystemPointerSize);
__ popq(rbx);
__ popq(rdi);
__ popq(rsi);
- // Stack now at rbp.
#else
// Restore callee save register.
static_assert(kNumCalleeSaveRegisters == 1);
- __ movq(rbx, Operand(rbp, kBackup_rbx));
- // Skip rsp to rbp.
- __ movq(rsp, rbp);
+ __ movq(rbx, Operand(rbp, kBackupRbxOffset));
#endif
- // Exit function frame, restore previous one.
- __ popq(rbp);
+ __ LeaveFrame(StackFrame::IRREGEXP);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -1023,7 +1047,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label exit_with_exception;
- // Preempt-code
+ // Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
@@ -1044,7 +1068,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movq(rsi, Operand(rbp, kInputEndOffset));
SafeReturn();
}
@@ -1064,7 +1088,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
- __ CallCFunction(grow_stack, kNumArguments);
+ CallCFunctionFromIrregexpCode(grow_stack, kNumArguments);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ testq(rax, rax);
@@ -1230,7 +1254,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kStringStartMinusOne));
+ __ movq(rax, Operand(rbp, kStringStartMinusOneOffset));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ movq(register_location(reg), rax);
}
@@ -1244,7 +1268,8 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
#ifdef V8_TARGET_OS_WIN
- // Second argument: Code of self. (Do this before overwriting r8).
+ // Second argument: InstructionStream of self. (Do this before overwriting
+ // r8).
__ movq(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
__ movq(r8, rbp);
@@ -1254,7 +1279,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
#else
// Third argument: RegExp code frame pointer.
__ movq(rdx, rbp);
- // Second argument: Code of self.
+ // Second argument: InstructionStream of self.
__ movq(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
@@ -1262,7 +1287,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state();
- __ CallCFunction(stack_check, num_arguments);
+ CallCFunctionFromIrregexpCode(stack_check, num_arguments);
}
@@ -1281,15 +1306,16 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Address raw_code,
Address re_frame) {
- Code re_code = Code::cast(Object(raw_code));
+ InstructionStream re_code = InstructionStream::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<int>(re_frame, kStartIndex),
- static_cast<RegExp::CallOrigin>(frame_entry<int>(re_frame, kDirectCall)),
+ frame_entry<Isolate*>(re_frame, kIsolateOffset),
+ frame_entry<int>(re_frame, kStartIndexOffset),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int>(re_frame, kDirectCallOffset)),
return_address, re_code,
- frame_entry_address<Address>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
+ frame_entry_address<Address>(re_frame, kInputStringOffset),
+ frame_entry_address<const byte*>(re_frame, kInputStartOffset),
+ frame_entry_address<const byte*>(re_frame, kInputEndOffset));
}
@@ -1298,7 +1324,8 @@ Operand RegExpMacroAssemblerX64::register_location(int register_index) {
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
- return Operand(rbp, kRegisterZero - register_index * kSystemPointerSize);
+ return Operand(rbp,
+ kRegisterZeroOffset - register_index * kSystemPointerSize);
}
@@ -1309,7 +1336,7 @@ void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
BranchOrBacktrack(greater_equal, on_outside_input);
} else {
__ leaq(rax, Operand(rdi, cp_offset * char_size()));
- __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
+ __ cmpq(rax, Operand(rbp, kStringStartMinusOneOffset));
BranchOrBacktrack(less_equal, on_outside_input);
}
}
@@ -1362,15 +1389,13 @@ void RegExpMacroAssemblerX64::Push(Immediate value) {
void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
for (int position : code_relative_fixup_positions_) {
// The position succeeds a relative label offset from position.
- // Patch the relative offset to be relative to the Code object pointer
- // instead.
+ // Patch the relative offset to be relative to the InstructionStream object
+ // pointer instead.
int patch_position = position - kIntSize;
int offset = masm_.long_at(patch_position);
- masm_.long_at_put(patch_position,
- offset
- + position
- + Code::kHeaderSize
- - kHeapObjectTag);
+ masm_.long_at_put(
+ patch_position,
+ offset + position + InstructionStream::kHeaderSize - kHeapObjectTag);
}
code_relative_fixup_positions_.Rewind(0);
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 683d3bc428..bfe8290a19 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -92,80 +92,102 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
private:
// Offsets from rbp of function parameters and stored registers.
- static const int kFramePointer = 0;
+ static constexpr int kFramePointerOffset = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kSystemPointerSize;
- static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
+ static constexpr int kReturnAddressOffset =
+ kFramePointerOffset + kSystemPointerSize;
+ static constexpr int kFrameAlign = kReturnAddressOffset + kSystemPointerSize;
+ // Below the frame pointer - the stack frame type marker and locals.
+ static constexpr int kFrameTypeOffset =
+ kFramePointerOffset - kSystemPointerSize;
+ static_assert(kFrameTypeOffset ==
+ CommonFrameConstants::kContextOrFrameTypeOffset);
#ifdef V8_TARGET_OS_WIN
// Parameters (first four passed as registers, but with room on stack).
// In Microsoft 64-bit Calling Convention, there is room on the callers
// stack (before the return address) to spill parameter registers. We
// use this space to store the register passed parameters.
- static const int kInputString = kFrameAlign;
+ static constexpr int kInputStringOffset = kFrameAlign;
// StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kSystemPointerSize;
- static const int kInputStart = kStartIndex + kSystemPointerSize;
- static const int kInputEnd = kInputStart + kSystemPointerSize;
- static const int kRegisterOutput = kInputEnd + kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStringOffset + kSystemPointerSize;
+ static constexpr int kInputStartOffset =
+ kStartIndexOffset + kSystemPointerSize;
+ static constexpr int kInputEndOffset = kInputStartOffset + kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kInputEndOffset + kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kRegisterOutputOffset + kSystemPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kNumOutputRegisters + kSystemPointerSize;
- static const int kIsolate = kDirectCall + kSystemPointerSize;
+ static constexpr int kDirectCallOffset =
+ kNumOutputRegistersOffset + kSystemPointerSize;
+ static constexpr int kIsolateOffset = kDirectCallOffset + kSystemPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
// if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kSystemPointerSize;
- static const int kStartIndex = kInputString - kSystemPointerSize;
- static const int kInputStart = kStartIndex - kSystemPointerSize;
- static const int kInputEnd = kInputStart - kSystemPointerSize;
- static const int kRegisterOutput = kInputEnd - kSystemPointerSize;
+ static constexpr int kInputStringOffset =
+ kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kStartIndexOffset =
+ kInputStringOffset - kSystemPointerSize;
+ static constexpr int kInputStartOffset =
+ kStartIndexOffset - kSystemPointerSize;
+ static constexpr int kInputEndOffset = kInputStartOffset - kSystemPointerSize;
+ static constexpr int kRegisterOutputOffset =
+ kInputEndOffset - kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput - kSystemPointerSize;
+ static constexpr int kNumOutputRegistersOffset =
+ kRegisterOutputOffset - kSystemPointerSize;
- static const int kDirectCall = kFrameAlign;
- static const int kIsolate = kDirectCall + kSystemPointerSize;
+ static constexpr int kDirectCallOffset = kFrameAlign;
+ static constexpr int kIsolateOffset = kDirectCallOffset + kSystemPointerSize;
#endif
// We push callee-save registers that we use after the frame pointer (and
// after the parameters).
#ifdef V8_TARGET_OS_WIN
- static const int kBackup_rsi = kFramePointer - kSystemPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kSystemPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kSystemPointerSize;
- static const int kNumCalleeSaveRegisters = 3;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
+ static constexpr int kBackupRsiOffset = kFrameTypeOffset - kSystemPointerSize;
+ static constexpr int kBackupRdiOffset = kBackupRsiOffset - kSystemPointerSize;
+ static constexpr int kBackupRbxOffset = kBackupRdiOffset - kSystemPointerSize;
+ static constexpr int kNumCalleeSaveRegisters = 3;
+ static constexpr int kLastCalleeSaveRegister = kBackupRbxOffset;
#else
- static const int kBackup_rbx = kNumOutputRegisters - kSystemPointerSize;
- static const int kNumCalleeSaveRegisters = 1;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
+ static constexpr int kBackupRbxOffset =
+ kNumOutputRegistersOffset - kSystemPointerSize;
+ static constexpr int kNumCalleeSaveRegisters = 1;
+ static constexpr int kLastCalleeSaveRegister = kBackupRbxOffset;
#endif
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures =
+ static constexpr int kSuccessfulCapturesOffset =
kLastCalleeSaveRegister - kSystemPointerSize;
- static const int kStringStartMinusOne =
- kSuccessfulCaptures - kSystemPointerSize;
- static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ static constexpr int kStringStartMinusOneOffset =
+ kSuccessfulCapturesOffset - kSystemPointerSize;
+ static constexpr int kBacktrackCountOffset =
+ kStringStartMinusOneOffset - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
- static const int kRegExpStackBasePointer =
- kBacktrackCount - kSystemPointerSize;
+ static constexpr int kRegExpStackBasePointerOffset =
+ kBacktrackCountOffset - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kRegisterZeroOffset =
+ kRegExpStackBasePointerOffset - kSystemPointerSize;
// Initial size of code buffer.
- static const int kRegExpCodeSize = 1024;
+ static constexpr int kRegExpCodeSize = 1024;
+
+ void CallCFunctionFromIrregexpCode(ExternalReference function,
+ int num_arguments);
void PushCallerSavedRegisters();
void PopCallerSavedRegisters();
@@ -189,7 +211,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// name to the register.
static constexpr Register backtrack_stackpointer() { return rcx; }
- // The registers containing a self pointer to this code's Code object.
+ // The registers containing a self pointer to this code's InstructionStream
+ // object.
static constexpr Register code_object_pointer() { return r8; }
// Byte size of chars in the string to match (decided by the Mode argument)
@@ -223,9 +246,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// by a word size and stores the value there.
inline void Push(Immediate value);
- // Pushes the Code object relative offset of a label on the backtrack stack
- // (i.e., a backtrack target). Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
+ // Pushes the InstructionStream object relative offset of a label on the
+ // backtrack stack (i.e., a backtrack target). Decrements the stack pointer
+ // (rcx) by a word size and stores the value there.
inline void Push(Label* label);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
diff --git a/deps/v8/src/roots/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index ccd2bc6cb7..9c1748754a 100644
--- a/deps/v8/src/roots/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -5,11 +5,13 @@
#ifndef V8_ROOTS_ROOTS_INL_H_
#define V8_ROOTS_ROOTS_INL_H_
+#include "src/common/ptr-compr-inl.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/cell.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/heap-number.h"
@@ -23,6 +25,11 @@
#include "src/objects/string.h"
#include "src/objects/swiss-name-dictionary.h"
#include "src/roots/roots.h"
+#include "src/roots/static-roots.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-objects.h"
+#endif
namespace v8 {
namespace internal {
@@ -61,7 +68,7 @@ bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
: ReadOnlyRoots(Isolate::FromHeap(heap)) {}
-ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
+ReadOnlyRoots::ReadOnlyRoots(const Isolate* isolate)
: read_only_roots_(reinterpret_cast<Address*>(
isolate->roots_table().read_only_roots_begin().address())) {}
@@ -72,19 +79,18 @@ ReadOnlyRoots::ReadOnlyRoots(LocalIsolate* isolate)
// have the right type, and to avoid the heavy #includes that would be
// required for checked casts.
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- Type ReadOnlyRoots::name() const { \
- DCHECK(CheckType_##name()); \
- return unchecked_##name(); \
- } \
- Type ReadOnlyRoots::unchecked_##name() const { \
- return Type::unchecked_cast( \
- Object(*GetLocation(RootIndex::k##CamelName))); \
- } \
- Handle<Type> ReadOnlyRoots::name##_handle() const { \
- DCHECK(CheckType_##name()); \
- Address* location = GetLocation(RootIndex::k##CamelName); \
- return Handle<Type>(location); \
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type ReadOnlyRoots::name() const { \
+ DCHECK(CheckType_##name()); \
+ return unchecked_##name(); \
+ } \
+ Type ReadOnlyRoots::unchecked_##name() const { \
+ return Type::unchecked_cast(object_at(RootIndex::k##CamelName)); \
+ } \
+ Handle<Type> ReadOnlyRoots::name##_handle() const { \
+ DCHECK(CheckType_##name()); \
+ Address* location = GetLocation(RootIndex::k##CamelName); \
+ return Handle<Type>(location); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
@@ -93,15 +99,20 @@ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
Address* ReadOnlyRoots::GetLocation(RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
- return &read_only_roots_[index];
+ Address* location = &read_only_roots_[index];
+ // Filler objects must be created before the free space map is initialized.
+ // Bootstrapping is able to handle kNullAddress being returned here.
+ DCHECK_IMPLIES(*location == kNullAddress,
+ root_index == RootIndex::kFreeSpaceMap);
+ return location;
}
Address ReadOnlyRoots::first_name_for_protector() const {
- return at(RootIndex::kFirstNameForProtector);
+ return address_at(RootIndex::kFirstNameForProtector);
}
Address ReadOnlyRoots::last_name_for_protector() const {
- return at(RootIndex::kLastNameForProtector);
+ return address_at(RootIndex::kLastNameForProtector);
}
bool ReadOnlyRoots::IsNameForProtector(HeapObject object) const {
@@ -116,8 +127,28 @@ void ReadOnlyRoots::VerifyNameForProtectorsPages() const {
Page::FromAddress(last_name_for_protector()));
}
-Address ReadOnlyRoots::at(RootIndex root_index) const {
+Handle<Object> ReadOnlyRoots::handle_at(RootIndex root_index) const {
+ return Handle<Object>(GetLocation(root_index));
+}
+
+Object ReadOnlyRoots::object_at(RootIndex root_index) const {
+ return Object(address_at(root_index));
+}
+
+Address ReadOnlyRoots::address_at(RootIndex root_index) const {
+#if V8_STATIC_ROOTS_BOOL
+ return V8HeapCompressionScheme::DecompressTagged(
+ V8HeapCompressionScheme::base(),
+ StaticReadOnlyRootsPointerTable[static_cast<int>(root_index)]);
+#else
return *GetLocation(root_index);
+#endif
+}
+
+bool ReadOnlyRoots::is_initialized(RootIndex root_index) const {
+ size_t index = static_cast<size_t>(root_index);
+ DCHECK_LT(index, kEntriesCount);
+ return read_only_roots_[index] != kNullAddress;
}
} // namespace internal
diff --git a/deps/v8/src/roots/roots.cc b/deps/v8/src/roots/roots.cc
index 1f798c94b5..d76360f97a 100644
--- a/deps/v8/src/roots/roots.cc
+++ b/deps/v8/src/roots/roots.cc
@@ -4,9 +4,11 @@
#include "src/roots/roots.h"
+#include "src/common/globals.h"
#include "src/objects/elements-kind.h"
#include "src/objects/objects-inl.h"
#include "src/objects/visitors.h"
+#include "src/roots/static-roots.h"
namespace v8 {
namespace internal {
@@ -34,7 +36,7 @@ void ReadOnlyRoots::VerifyNameForProtectors() {
Name prev;
for (RootIndex root_index = RootIndex::kFirstNameForProtector;
root_index <= RootIndex::kLastNameForProtector; ++root_index) {
- Name current = Name::cast(Object(at(root_index)));
+ Name current = Name::cast(object_at(root_index));
DCHECK(IsNameForProtector(current));
if (root_index != RootIndex::kFirstNameForProtector) {
// Make sure the objects are adjacent in memory.
@@ -56,5 +58,31 @@ READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
#undef ROOT_TYPE_CHECK
#endif
+Handle<HeapNumber> ReadOnlyRoots::FindHeapNumber(double value) {
+ auto bits = base::bit_cast<uint64_t>(value);
+ for (auto pos = RootIndex::kFirstHeapNumberRoot;
+ pos <= RootIndex::kLastHeapNumberRoot; ++pos) {
+ auto root = HeapNumber::cast(object_at(pos));
+ if (base::bit_cast<uint64_t>(root.value()) == bits) {
+ return Handle<HeapNumber>(GetLocation(pos));
+ }
+ }
+ return Handle<HeapNumber>();
+}
+
+void ReadOnlyRoots::InitFromStaticRootsTable(Address cage_base) {
+ CHECK(V8_STATIC_ROOTS_BOOL);
+#if V8_STATIC_ROOTS_BOOL
+ RootIndex pos = RootIndex::kFirstReadOnlyRoot;
+ for (auto element : StaticReadOnlyRootsPointerTable) {
+ auto ptr = V8HeapCompressionScheme::DecompressTagged(cage_base, element);
+ DCHECK(!is_initialized(pos));
+ read_only_roots_[static_cast<size_t>(pos)] = ptr;
+ ++pos;
+ }
+ DCHECK_EQ(static_cast<int>(pos) - 1, RootIndex::kLastReadOnlyRoot);
+#endif // V8_STATIC_ROOTS_BOOL
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 3772da300d..4beb2f218b 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -34,6 +34,27 @@ class RootVisitor;
class String;
class Symbol;
+#define STRONG_READ_ONLY_HEAP_NUMBER_ROOT_LIST(V) \
+ /* Special numbers */ \
+ V(HeapNumber, nan_value, NanValue) \
+ V(HeapNumber, hole_nan_value, HoleNanValue) \
+ V(HeapNumber, infinity_value, InfinityValue) \
+ V(HeapNumber, minus_zero_value, MinusZeroValue) \
+ V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
+ V(HeapNumber, max_safe_integer, MaxSafeInteger) \
+ V(HeapNumber, max_uint_32, MaxUInt32) \
+ V(HeapNumber, smi_min_value, SmiMinValue) \
+ V(HeapNumber, smi_max_value_plus_one, SmiMaxValuePlusOne)
+
+// Adapts one INTERNALIZED_STRING_LIST_GENERATOR entry to
+// the ROOT_LIST-compatible entry
+#define INTERNALIZED_STRING_LIST_ADAPTER(V, name, ...) V(String, name, name)
+
+// Produces (String, name, CamelCase) entries
+#define EXTRA_IMPORTANT_INTERNALIZED_STRING_ROOT_LIST(V) \
+ EXTRA_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR( \
+ INTERNALIZED_STRING_LIST_ADAPTER, V)
+
// Defines all the read-only roots in Heap.
#define STRONG_READ_ONLY_ROOT_LIST(V) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
@@ -48,24 +69,24 @@ class Symbol;
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
- V(String, empty_string, empty_string) \
+ EXTRA_IMPORTANT_INTERNALIZED_STRING_ROOT_LIST(V) \
V(Map, meta_map, MetaMap) \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, hash_table_map, HashTableMap) \
V(Map, symbol_map, SymbolMap) \
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, scope_info_map, ScopeInfoMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
- V(Map, code_map, CodeMap) \
+ V(Map, instruction_stream_map, InstructionStreamMap) \
V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, foreign_map, ForeignMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, transition_array_map, TransitionArrayMap) \
- V(Map, thin_one_byte_string_map, ThinOneByteStringMap) \
/* TODO(mythria): Once lazy feedback lands, check if feedback vector map */ \
/* is still a popular map */ \
V(Map, feedback_vector_map, FeedbackVectorMap) \
@@ -87,9 +108,8 @@ class Symbol;
V(Map, bigint_map, BigIntMap) \
V(Map, object_boilerplate_description_map, ObjectBoilerplateDescriptionMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
- V(Map, code_data_container_map, CodeDataContainerMap) \
+ V(Map, code_map, CodeMap) \
V(Map, coverage_info_map, CoverageInfoMap) \
- V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
V(Map, mega_dom_handler_map, MegaDomHandlerMap) \
@@ -127,6 +147,7 @@ class Symbol;
IF_WASM(V, Map, wasm_resume_data_map, WasmResumeDataMap) \
IF_WASM(V, Map, wasm_type_info_map, WasmTypeInfoMap) \
IF_WASM(V, Map, wasm_continuation_object_map, WasmContinuationObjectMap) \
+ IF_WASM(V, Map, wasm_null_map, WasmNullMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
V(Map, weak_array_list_map, WeakArrayListMap) \
V(Map, ephemeron_hash_table_map, EphemeronHashTableMap) \
@@ -159,8 +180,6 @@ class Symbol;
V(Map, shared_uncached_external_one_byte_string_map, \
SharedUncachedExternalOneByteStringMap) \
V(Map, shared_uncached_external_string_map, SharedUncachedExternalStringMap) \
- V(Map, shared_thin_one_byte_string_map, SharedThinOneByteStringMap) \
- V(Map, shared_thin_string_map, SharedThinStringMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
@@ -198,31 +217,23 @@ class Symbol;
V(ArrayList, empty_array_list, EmptyArrayList) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
- /* Special numbers */ \
- V(HeapNumber, nan_value, NanValue) \
- V(HeapNumber, hole_nan_value, HoleNanValue) \
- V(HeapNumber, infinity_value, InfinityValue) \
- V(HeapNumber, minus_zero_value, MinusZeroValue) \
- V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
+ V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
+ STRONG_READ_ONLY_HEAP_NUMBER_ROOT_LIST(V) \
/* Table of strings of one-byte single characters */ \
V(FixedArray, single_character_string_table, SingleCharacterStringTable) \
/* Marker for self-references during code-generation */ \
V(HeapObject, self_reference_marker, SelfReferenceMarker) \
/* Marker for basic-block usage counters array during code-generation */ \
V(Oddball, basic_block_counters_marker, BasicBlockCountersMarker) \
- /* Canonical off-heap trampoline data */ \
- V(ByteArray, off_heap_trampoline_relocation_info, \
- OffHeapTrampolineRelocationInfo) \
- V(HeapObject, trampoline_trivial_code_data_container, \
- TrampolineTrivialCodeDataContainer) \
- V(HeapObject, trampoline_promise_rejection_code_data_container, \
- TrampolinePromiseRejectionCodeDataContainer) \
/* Canonical scope infos */ \
V(ScopeInfo, global_this_binding_scope_info, GlobalThisBindingScopeInfo) \
V(ScopeInfo, empty_function_scope_info, EmptyFunctionScopeInfo) \
V(ScopeInfo, native_scope_info, NativeScopeInfo) \
+ V(ScopeInfo, shadow_realm_scope_info, ShadowRealmScopeInfo) \
+ V(RegisteredSymbolTable, empty_symbol_table, EmptySymbolTable) \
/* Hash seed */ \
- V(ByteArray, hash_seed, HashSeed)
+ V(ByteArray, hash_seed, HashSeed) \
+ IF_WASM(V, WasmNull, wasm_null, WasmNull)
// Mutable roots that are known to be immortal immovable, for which we can
// safely skip write barriers.
@@ -234,7 +245,6 @@ class Symbol;
/* Canonical empty values */ \
V(Script, empty_script, EmptyScript) \
V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
- V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
/* Protectors */ \
V(PropertyCell, array_constructor_protector, ArrayConstructorProtector) \
V(PropertyCell, no_elements_protector, NoElementsProtector) \
@@ -254,6 +264,8 @@ class Symbol;
V(PropertyCell, promise_then_protector, PromiseThenProtector) \
V(PropertyCell, set_iterator_protector, SetIteratorProtector) \
V(PropertyCell, string_iterator_protector, StringIteratorProtector) \
+ V(PropertyCell, number_string_prototype_no_replace_protector, \
+ NumberStringPrototypeNoReplaceProtector) \
/* Caches */ \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
@@ -354,13 +366,12 @@ class Symbol;
ConstructStubInvokeDeoptPCOffset) \
V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
-// Adapts one INTERNALIZED_STRING_LIST_GENERATOR entry to
-// the ROOT_LIST-compatible entry
-#define INTERNALIZED_STRING_LIST_ADAPTER(V, name, ...) V(String, name, name)
-
// Produces (String, name, CamelCase) entries
-#define INTERNALIZED_STRING_ROOT_LIST(V) \
- INTERNALIZED_STRING_LIST_GENERATOR(INTERNALIZED_STRING_LIST_ADAPTER, V)
+#define INTERNALIZED_STRING_ROOT_LIST(V) \
+ IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR( \
+ INTERNALIZED_STRING_LIST_ADAPTER, V) \
+ NOT_IMPORTANT_INTERNALIZED_STRING_LIST_GENERATOR( \
+ INTERNALIZED_STRING_LIST_ADAPTER, V)
// Adapts one XXX_SYMBOL_LIST_GENERATOR entry to the ROOT_LIST-compatible entry
#define SYMBOL_ROOT_LIST_ADAPTER(V, name, ...) V(Symbol, name, name)
@@ -431,6 +442,9 @@ enum class RootIndex : uint16_t {
kFirstReadOnlyRoot = kFirstRoot,
kLastReadOnlyRoot = kFirstReadOnlyRoot + kReadOnlyRootsCount - 1,
+ kFirstHeapNumberRoot = kNanValue,
+ kLastHeapNumberRoot = kSmiMaxValuePlusOne,
+
// Use for fast protector update checks
kFirstNameForProtector = kconstructor_string,
kNameForProtectorCount = 0 NAME_FOR_PROTECTOR_ROOT_LIST(COUNT_ROOT),
@@ -516,6 +530,12 @@ class RootsTable {
static_cast<unsigned>(RootIndex::kLastImmortalImmovableRoot);
}
+ static constexpr bool IsReadOnly(RootIndex root_index) {
+ static_assert(static_cast<int>(RootIndex::kFirstReadOnlyRoot) == 0);
+ return static_cast<unsigned>(root_index) <=
+ static_cast<unsigned>(RootIndex::kLastReadOnlyRoot);
+ }
+
private:
FullObjectSlot begin() {
return FullObjectSlot(&roots_[static_cast<size_t>(RootIndex::kFirstRoot)]);
@@ -593,7 +613,7 @@ class ReadOnlyRoots {
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
- V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
+ V8_INLINE explicit ReadOnlyRoots(const Isolate* isolate);
V8_INLINE explicit ReadOnlyRoots(LocalIsolate* isolate);
// For `v8_enable_map_packing=true`, this will return a packed (also untagged)
@@ -614,14 +634,27 @@ class ReadOnlyRoots {
void VerifyNameForProtectors();
#endif
- // Get the address of a given read-only root index, without type checks.
- V8_INLINE Address at(RootIndex root_index) const;
+ // Returns heap number with identical value if it already exists or the empty
+ // handle otherwise.
+ Handle<HeapNumber> FindHeapNumber(double value);
+
+ V8_INLINE Address address_at(RootIndex root_index) const;
+ V8_INLINE Object object_at(RootIndex root_index) const;
+ V8_INLINE Handle<Object> handle_at(RootIndex root_index) const;
+
+ // Check if a slot is initialized yet. Should only be neccessary for code
+ // running during snapshot creation.
+ V8_INLINE bool is_initialized(RootIndex root_index) const;
// Iterate over all the read-only roots. This is not necessary for garbage
// collection and is usually only performed as part of (de)serialization or
// heap verification.
void Iterate(RootVisitor* visitor);
+ // Uncompress pointers in the static roots table and store them into the
+ // actual roots table.
+ void InitFromStaticRootsTable(Address cage_base);
+
private:
V8_INLINE Address first_name_for_protector() const;
V8_INLINE Address last_name_for_protector() const;
diff --git a/deps/v8/src/roots/static-roots.h b/deps/v8/src/roots/static-roots.h
new file mode 100644
index 0000000000..df53f43736
--- /dev/null
+++ b/deps/v8/src/roots/static-roots.h
@@ -0,0 +1,1515 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is automatically generated by `tools/dev/gen-static-roots.py`. Do
+// not edit manually.
+
+#ifndef V8_ROOTS_STATIC_ROOTS_H_
+#define V8_ROOTS_STATIC_ROOTS_H_
+
+#include "src/common/globals.h"
+
+#if V8_STATIC_ROOTS_BOOL
+
+#include "src/objects/instance-type.h"
+#include "src/roots/roots.h"
+
+// Disabling Wasm or Intl invalidates the contents of static-roots.h.
+// TODO(olivf): To support static roots for multiple build configurations we
+// will need to generate target specific versions of this file.
+static_assert(V8_ENABLE_WEBASSEMBLY);
+static_assert(V8_INTL_SUPPORT);
+
+namespace v8 {
+namespace internal {
+
+struct StaticReadOnlyRoot {
+ static constexpr Tagged_t kMetaMap = 0x61;
+ static constexpr Tagged_t kFixedArrayMap = 0x89;
+ static constexpr Tagged_t kWeakFixedArrayMap = 0xb1;
+ static constexpr Tagged_t kWeakArrayListMap = 0xd9;
+ static constexpr Tagged_t kFixedCOWArrayMap = 0x101;
+ static constexpr Tagged_t kDescriptorArrayMap = 0x129;
+ static constexpr Tagged_t kUndefinedMap = 0x151;
+ static constexpr Tagged_t kNullMap = 0x179;
+ static constexpr Tagged_t kTheHoleMap = 0x1a1;
+ static constexpr Tagged_t kCallSiteInfoMap = 0x1c9;
+ static constexpr Tagged_t kEnumCacheMap = 0x1f1;
+ static constexpr Tagged_t kEmptyFixedArray = 0x219;
+ static constexpr Tagged_t kEmptyWeakFixedArray = 0x221;
+ static constexpr Tagged_t kEmptyWeakArrayList = 0x229;
+ static constexpr Tagged_t kNullValue = 0x235;
+ static constexpr Tagged_t kUndefinedValue = 0x251;
+ static constexpr Tagged_t kTheHoleValue = 0x26d;
+ static constexpr Tagged_t kEmptyEnumCache = 0x289;
+ static constexpr Tagged_t kEmptyDescriptorArray = 0x295;
+ static constexpr Tagged_t kScopeInfoMap = 0x2a5;
+ static constexpr Tagged_t kModuleInfoMap = 0x2cd;
+ static constexpr Tagged_t kClosureFeedbackCellArrayMap = 0x2f5;
+ static constexpr Tagged_t kFeedbackVectorMap = 0x31d;
+ static constexpr Tagged_t kHeapNumberMap = 0x345;
+ static constexpr Tagged_t kForeignMap = 0x36d;
+ static constexpr Tagged_t kMegaDomHandlerMap = 0x395;
+ static constexpr Tagged_t kBooleanMap = 0x3bd;
+ static constexpr Tagged_t kUninitializedMap = 0x3e5;
+ static constexpr Tagged_t kArgumentsMarkerMap = 0x40d;
+ static constexpr Tagged_t kExceptionMap = 0x435;
+ static constexpr Tagged_t kTerminationExceptionMap = 0x45d;
+ static constexpr Tagged_t kOptimizedOutMap = 0x485;
+ static constexpr Tagged_t kStaleRegisterMap = 0x4ad;
+ static constexpr Tagged_t kSelfReferenceMarkerMap = 0x4d5;
+ static constexpr Tagged_t kBasicBlockCountersMarkerMap = 0x4fd;
+ static constexpr Tagged_t kBigIntMap = 0x525;
+ static constexpr Tagged_t kSymbolMap = 0x54d;
+ static constexpr Tagged_t kStringMap = 0x575;
+ static constexpr Tagged_t kOneByteStringMap = 0x59d;
+ static constexpr Tagged_t kConsStringMap = 0x5c5;
+ static constexpr Tagged_t kConsOneByteStringMap = 0x5ed;
+ static constexpr Tagged_t kSlicedStringMap = 0x615;
+ static constexpr Tagged_t kSlicedOneByteStringMap = 0x63d;
+ static constexpr Tagged_t kExternalStringMap = 0x665;
+ static constexpr Tagged_t kExternalOneByteStringMap = 0x68d;
+ static constexpr Tagged_t kUncachedExternalStringMap = 0x6b5;
+ static constexpr Tagged_t kUncachedExternalOneByteStringMap = 0x6dd;
+ static constexpr Tagged_t kSharedExternalStringMap = 0x705;
+ static constexpr Tagged_t kSharedExternalOneByteStringMap = 0x72d;
+ static constexpr Tagged_t kSharedUncachedExternalStringMap = 0x755;
+ static constexpr Tagged_t kSharedUncachedExternalOneByteStringMap = 0x77d;
+ static constexpr Tagged_t kExternalInternalizedStringMap = 0x7a5;
+ static constexpr Tagged_t kExternalOneByteInternalizedStringMap = 0x7cd;
+ static constexpr Tagged_t kUncachedExternalInternalizedStringMap = 0x7f5;
+ static constexpr Tagged_t kUncachedExternalOneByteInternalizedStringMap =
+ 0x81d;
+ static constexpr Tagged_t kInternalizedStringMap = 0x845;
+ static constexpr Tagged_t kOneByteInternalizedStringMap = 0x86d;
+ static constexpr Tagged_t kThinStringMap = 0x895;
+ static constexpr Tagged_t kSharedStringMap = 0x8bd;
+ static constexpr Tagged_t kSharedOneByteStringMap = 0x8e5;
+ static constexpr Tagged_t kFixedDoubleArrayMap = 0x90d;
+ static constexpr Tagged_t kFeedbackMetadataArrayMap = 0x935;
+ static constexpr Tagged_t kByteArrayMap = 0x95d;
+ static constexpr Tagged_t kBytecodeArrayMap = 0x985;
+ static constexpr Tagged_t kFreeSpaceMap = 0x9ad;
+ static constexpr Tagged_t kPropertyArrayMap = 0x9d5;
+ static constexpr Tagged_t kSmallOrderedHashMapMap = 0x9fd;
+ static constexpr Tagged_t kSmallOrderedHashSetMap = 0xa25;
+ static constexpr Tagged_t kSmallOrderedNameDictionaryMap = 0xa4d;
+ static constexpr Tagged_t kInstructionStreamMap = 0xa75;
+ static constexpr Tagged_t kCellMap = 0xa9d;
+ static constexpr Tagged_t kInvalidPrototypeValidityCell = 0xac5;
+ static constexpr Tagged_t kGlobalPropertyCellMap = 0xacd;
+ static constexpr Tagged_t kOnePointerFillerMap = 0xaf5;
+ static constexpr Tagged_t kTwoPointerFillerMap = 0xb1d;
+ static constexpr Tagged_t kNoClosuresCellMap = 0xb45;
+ static constexpr Tagged_t kOneClosureCellMap = 0xb6d;
+ static constexpr Tagged_t kManyClosuresCellMap = 0xb95;
+ static constexpr Tagged_t kTransitionArrayMap = 0xbbd;
+ static constexpr Tagged_t kHashTableMap = 0xbe5;
+ static constexpr Tagged_t kOrderedNameDictionaryMap = 0xc0d;
+ static constexpr Tagged_t kNameDictionaryMap = 0xc35;
+ static constexpr Tagged_t kSwissNameDictionaryMap = 0xc5d;
+ static constexpr Tagged_t kGlobalDictionaryMap = 0xc85;
+ static constexpr Tagged_t kNumberDictionaryMap = 0xcad;
+ static constexpr Tagged_t kRegisteredSymbolTableMap = 0xcd5;
+ static constexpr Tagged_t kArrayListMap = 0xcfd;
+ static constexpr Tagged_t kAccessorInfoMap = 0xd25;
+ static constexpr Tagged_t kPreparseDataMap = 0xd4d;
+ static constexpr Tagged_t kSharedFunctionInfoMap = 0xd75;
+ static constexpr Tagged_t kCodeMap = 0xd9d;
+ static constexpr Tagged_t kTrueValue = 0xdc5;
+ static constexpr Tagged_t kFalseValue = 0xde1;
+ static constexpr Tagged_t kHashSeed = 0xdfd;
+ static constexpr Tagged_t kempty_string = 0xe0d;
+ static constexpr Tagged_t klength_string = 0xe19;
+ static constexpr Tagged_t kprototype_string = 0xe2d;
+ static constexpr Tagged_t kname_string = 0xe45;
+ static constexpr Tagged_t kenumerable_string = 0xe55;
+ static constexpr Tagged_t kconfigurable_string = 0xe6d;
+ static constexpr Tagged_t kvalue_string = 0xe85;
+ static constexpr Tagged_t kwritable_string = 0xe99;
+ static constexpr Tagged_t knot_mapped_symbol = 0xead;
+ static constexpr Tagged_t kuninitialized_symbol = 0xebd;
+ static constexpr Tagged_t kmegamorphic_symbol = 0xecd;
+ static constexpr Tagged_t kelements_transition_symbol = 0xedd;
+ static constexpr Tagged_t kmega_dom_symbol = 0xeed;
+ static constexpr Tagged_t kEmptyPropertyDictionary = 0xefd;
+ static constexpr Tagged_t kEmptyOrderedPropertyDictionary = 0xf29;
+ static constexpr Tagged_t kEmptySwissPropertyDictionary = 0xf4d;
+ static constexpr Tagged_t kEmptyByteArray = 0xf6d;
+ static constexpr Tagged_t kEmptyScopeInfo = 0xf75;
+ static constexpr Tagged_t kEmptyPropertyArray = 0xf85;
+ static constexpr Tagged_t kMinusZeroValue = 0xf8d;
+ static constexpr Tagged_t kNanValue = 0xf99;
+ static constexpr Tagged_t kHoleNanValue = 0xfa5;
+ static constexpr Tagged_t kInfinityValue = 0xfb1;
+ static constexpr Tagged_t kMinusInfinityValue = 0xfbd;
+ static constexpr Tagged_t kMaxSafeInteger = 0xfc9;
+ static constexpr Tagged_t kMaxUInt32 = 0xfd5;
+ static constexpr Tagged_t kSmiMinValue = 0xfe1;
+ static constexpr Tagged_t kSmiMaxValuePlusOne = 0xfed;
+ static constexpr Tagged_t kPromiseFulfillReactionJobTaskMap = 0xff9;
+ static constexpr Tagged_t kPromiseRejectReactionJobTaskMap = 0x1021;
+ static constexpr Tagged_t kCallableTaskMap = 0x1049;
+ static constexpr Tagged_t kCallbackTaskMap = 0x1071;
+ static constexpr Tagged_t kPromiseResolveThenableJobTaskMap = 0x1099;
+ static constexpr Tagged_t kFunctionTemplateInfoMap = 0x10c1;
+ static constexpr Tagged_t kObjectTemplateInfoMap = 0x10e9;
+ static constexpr Tagged_t kAccessCheckInfoMap = 0x1111;
+ static constexpr Tagged_t kAccessorPairMap = 0x1139;
+ static constexpr Tagged_t kAliasedArgumentsEntryMap = 0x1161;
+ static constexpr Tagged_t kAllocationMementoMap = 0x1189;
+ static constexpr Tagged_t kArrayBoilerplateDescriptionMap = 0x11b1;
+ static constexpr Tagged_t kAsmWasmDataMap = 0x11d9;
+ static constexpr Tagged_t kAsyncGeneratorRequestMap = 0x1201;
+ static constexpr Tagged_t kBreakPointMap = 0x1229;
+ static constexpr Tagged_t kBreakPointInfoMap = 0x1251;
+ static constexpr Tagged_t kClassPositionsMap = 0x1279;
+ static constexpr Tagged_t kDebugInfoMap = 0x12a1;
+ static constexpr Tagged_t kErrorStackDataMap = 0x12c9;
+ static constexpr Tagged_t kFunctionTemplateRareDataMap = 0x12f1;
+ static constexpr Tagged_t kInterceptorInfoMap = 0x1319;
+ static constexpr Tagged_t kInterpreterDataMap = 0x1341;
+ static constexpr Tagged_t kModuleRequestMap = 0x1369;
+ static constexpr Tagged_t kPromiseCapabilityMap = 0x1391;
+ static constexpr Tagged_t kPromiseOnStackMap = 0x13b9;
+ static constexpr Tagged_t kPromiseReactionMap = 0x13e1;
+ static constexpr Tagged_t kPropertyDescriptorObjectMap = 0x1409;
+ static constexpr Tagged_t kPrototypeInfoMap = 0x1431;
+ static constexpr Tagged_t kRegExpBoilerplateDescriptionMap = 0x1459;
+ static constexpr Tagged_t kScriptMap = 0x1481;
+ static constexpr Tagged_t kScriptOrModuleMap = 0x14a9;
+ static constexpr Tagged_t kSourceTextModuleInfoEntryMap = 0x14d1;
+ static constexpr Tagged_t kStackFrameInfoMap = 0x14f9;
+ static constexpr Tagged_t kTemplateObjectDescriptionMap = 0x1521;
+ static constexpr Tagged_t kTuple2Map = 0x1549;
+ static constexpr Tagged_t kWasmExceptionTagMap = 0x1571;
+ static constexpr Tagged_t kWasmIndirectFunctionTableMap = 0x1599;
+ static constexpr Tagged_t kAllocationSiteWithWeakNextMap = 0x15c1;
+ static constexpr Tagged_t kAllocationSiteWithoutWeakNextMap = 0x15e9;
+ static constexpr Tagged_t kLoadHandler1Map = 0x1611;
+ static constexpr Tagged_t kLoadHandler2Map = 0x1639;
+ static constexpr Tagged_t kLoadHandler3Map = 0x1661;
+ static constexpr Tagged_t kStoreHandler0Map = 0x1689;
+ static constexpr Tagged_t kStoreHandler1Map = 0x16b1;
+ static constexpr Tagged_t kStoreHandler2Map = 0x16d9;
+ static constexpr Tagged_t kStoreHandler3Map = 0x1701;
+ static constexpr Tagged_t kUncompiledDataWithoutPreparseDataMap = 0x1729;
+ static constexpr Tagged_t kUncompiledDataWithPreparseDataMap = 0x1751;
+ static constexpr Tagged_t kUncompiledDataWithoutPreparseDataWithJobMap =
+ 0x1779;
+ static constexpr Tagged_t kUncompiledDataWithPreparseDataAndJobMap = 0x17a1;
+ static constexpr Tagged_t kOnHeapBasicBlockProfilerDataMap = 0x17c9;
+ static constexpr Tagged_t kTurbofanBitsetTypeMap = 0x17f1;
+ static constexpr Tagged_t kTurbofanUnionTypeMap = 0x1819;
+ static constexpr Tagged_t kTurbofanRangeTypeMap = 0x1841;
+ static constexpr Tagged_t kTurbofanHeapConstantTypeMap = 0x1869;
+ static constexpr Tagged_t kTurbofanOtherNumberConstantTypeMap = 0x1891;
+ static constexpr Tagged_t kTurboshaftWord32TypeMap = 0x18b9;
+ static constexpr Tagged_t kTurboshaftWord32RangeTypeMap = 0x18e1;
+ static constexpr Tagged_t kTurboshaftWord64TypeMap = 0x1909;
+ static constexpr Tagged_t kTurboshaftWord64RangeTypeMap = 0x1931;
+ static constexpr Tagged_t kTurboshaftFloat64TypeMap = 0x1959;
+ static constexpr Tagged_t kTurboshaftFloat64RangeTypeMap = 0x1981;
+ static constexpr Tagged_t kInternalClassMap = 0x19a9;
+ static constexpr Tagged_t kSmiPairMap = 0x19d1;
+ static constexpr Tagged_t kSmiBoxMap = 0x19f9;
+ static constexpr Tagged_t kExportedSubClassBaseMap = 0x1a21;
+ static constexpr Tagged_t kExportedSubClassMap = 0x1a49;
+ static constexpr Tagged_t kAbstractInternalClassSubclass1Map = 0x1a71;
+ static constexpr Tagged_t kAbstractInternalClassSubclass2Map = 0x1a99;
+ static constexpr Tagged_t kExportedSubClass2Map = 0x1ac1;
+ static constexpr Tagged_t kSortStateMap = 0x1ae9;
+ static constexpr Tagged_t kWasmStringViewIterMap = 0x1b11;
+ static constexpr Tagged_t kSloppyArgumentsElementsMap = 0x1b39;
+ static constexpr Tagged_t kStrongDescriptorArrayMap = 0x1b61;
+ static constexpr Tagged_t kTurboshaftWord32SetTypeMap = 0x1b89;
+ static constexpr Tagged_t kTurboshaftWord64SetTypeMap = 0x1bb1;
+ static constexpr Tagged_t kTurboshaftFloat64SetTypeMap = 0x1bd9;
+ static constexpr Tagged_t kInternalClassWithSmiElementsMap = 0x1c01;
+ static constexpr Tagged_t kInternalClassWithStructElementsMap = 0x1c29;
+ static constexpr Tagged_t kOrderedHashMapMap = 0x1c51;
+ static constexpr Tagged_t kOrderedHashSetMap = 0x1c79;
+ static constexpr Tagged_t kSimpleNumberDictionaryMap = 0x1ca1;
+ static constexpr Tagged_t kNameToIndexHashTableMap = 0x1cc9;
+ static constexpr Tagged_t kEmbedderDataArrayMap = 0x1cf1;
+ static constexpr Tagged_t kEphemeronHashTableMap = 0x1d19;
+ static constexpr Tagged_t kScriptContextTableMap = 0x1d41;
+ static constexpr Tagged_t kObjectBoilerplateDescriptionMap = 0x1d69;
+ static constexpr Tagged_t kCoverageInfoMap = 0x1d91;
+ static constexpr Tagged_t kSideEffectCallHandlerInfoMap = 0x1db9;
+ static constexpr Tagged_t kSideEffectFreeCallHandlerInfoMap = 0x1de1;
+ static constexpr Tagged_t kNextCallSideEffectFreeCallHandlerInfoMap = 0x1e09;
+ static constexpr Tagged_t kSourceTextModuleMap = 0x1e31;
+ static constexpr Tagged_t kSyntheticModuleMap = 0x1e59;
+ static constexpr Tagged_t kWasmApiFunctionRefMap = 0x1e81;
+ static constexpr Tagged_t kWasmCapiFunctionDataMap = 0x1ea9;
+ static constexpr Tagged_t kWasmExportedFunctionDataMap = 0x1ed1;
+ static constexpr Tagged_t kWasmInternalFunctionMap = 0x1ef9;
+ static constexpr Tagged_t kWasmJSFunctionDataMap = 0x1f21;
+ static constexpr Tagged_t kWasmResumeDataMap = 0x1f49;
+ static constexpr Tagged_t kWasmTypeInfoMap = 0x1f71;
+ static constexpr Tagged_t kWasmContinuationObjectMap = 0x1f99;
+ static constexpr Tagged_t kWasmNullMap = 0x1fc1;
+ static constexpr Tagged_t kWeakCellMap = 0x1fe9;
+ static constexpr Tagged_t kNoOpInterceptorInfo = 0x2011;
+ static constexpr Tagged_t kEmptyArrayList = 0x2039;
+ static constexpr Tagged_t kEmptyObjectBoilerplateDescription = 0x2045;
+ static constexpr Tagged_t kEmptyArrayBoilerplateDescription = 0x2051;
+ static constexpr Tagged_t kEmptyClosureFeedbackCellArray = 0x205d;
+ static constexpr Tagged_t kSingleCharacterStringTable = 0x2065;
+ static constexpr Tagged_t kdot_string = 0x274d;
+ static constexpr Tagged_t kzero_string = 0x276d;
+ static constexpr Tagged_t kone_string = 0x277d;
+ static constexpr Tagged_t kadoptText_string = 0x346d;
+ static constexpr Tagged_t kapproximatelySign_string = 0x3485;
+ static constexpr Tagged_t kbaseName_string = 0x34a5;
+ static constexpr Tagged_t kaccounting_string = 0x34b9;
+ static constexpr Tagged_t kbreakType_string = 0x34d1;
+ static constexpr Tagged_t kcalendars_string = 0x34e9;
+ static constexpr Tagged_t kcardinal_string = 0x3501;
+ static constexpr Tagged_t kcaseFirst_string = 0x3515;
+ static constexpr Tagged_t kceil_string = 0x352d;
+ static constexpr Tagged_t kcompare_string = 0x353d;
+ static constexpr Tagged_t kcollation_string = 0x3551;
+ static constexpr Tagged_t kcollations_string = 0x3569;
+ static constexpr Tagged_t kcompact_string = 0x3581;
+ static constexpr Tagged_t kcompactDisplay_string = 0x3595;
+ static constexpr Tagged_t kcurrency_string = 0x35b1;
+ static constexpr Tagged_t kcurrencyDisplay_string = 0x35c5;
+ static constexpr Tagged_t kcurrencySign_string = 0x35e1;
+ static constexpr Tagged_t kdateStyle_string = 0x35f9;
+ static constexpr Tagged_t kdateTimeField_string = 0x3611;
+ static constexpr Tagged_t kdayPeriod_string = 0x362d;
+ static constexpr Tagged_t kdaysDisplay_string = 0x3645;
+ static constexpr Tagged_t kdecimal_string = 0x365d;
+ static constexpr Tagged_t kdialect_string = 0x3671;
+ static constexpr Tagged_t kdigital_string = 0x3685;
+ static constexpr Tagged_t kdirection_string = 0x3699;
+ static constexpr Tagged_t kendRange_string = 0x36b1;
+ static constexpr Tagged_t kengineering_string = 0x36c5;
+ static constexpr Tagged_t kexceptZero_string = 0x36dd;
+ static constexpr Tagged_t kexpand_string = 0x36f5;
+ static constexpr Tagged_t kexponentInteger_string = 0x3709;
+ static constexpr Tagged_t kexponentMinusSign_string = 0x3725;
+ static constexpr Tagged_t kexponentSeparator_string = 0x3745;
+ static constexpr Tagged_t kfallback_string = 0x3765;
+ static constexpr Tagged_t kfirst_string = 0x3779;
+ static constexpr Tagged_t kfirstDay_string = 0x378d;
+ static constexpr Tagged_t kfloor_string = 0x37a1;
+ static constexpr Tagged_t kformat_string = 0x37b5;
+ static constexpr Tagged_t kfraction_string = 0x37c9;
+ static constexpr Tagged_t kfractionalDigits_string = 0x37dd;
+ static constexpr Tagged_t kfractionalSecond_string = 0x37f9;
+ static constexpr Tagged_t kfull_string = 0x3815;
+ static constexpr Tagged_t kgranularity_string = 0x3825;
+ static constexpr Tagged_t kgrapheme_string = 0x383d;
+ static constexpr Tagged_t kgroup_string = 0x3851;
+ static constexpr Tagged_t kh11_string = 0x3865;
+ static constexpr Tagged_t kh12_string = 0x3875;
+ static constexpr Tagged_t kh23_string = 0x3885;
+ static constexpr Tagged_t kh24_string = 0x3895;
+ static constexpr Tagged_t khalfCeil_string = 0x38a5;
+ static constexpr Tagged_t khalfEven_string = 0x38b9;
+ static constexpr Tagged_t khalfExpand_string = 0x38cd;
+ static constexpr Tagged_t khalfFloor_string = 0x38e5;
+ static constexpr Tagged_t khalfTrunc_string = 0x38fd;
+ static constexpr Tagged_t khour12_string = 0x3915;
+ static constexpr Tagged_t khourCycle_string = 0x3929;
+ static constexpr Tagged_t khourCycles_string = 0x3941;
+ static constexpr Tagged_t khoursDisplay_string = 0x3959;
+ static constexpr Tagged_t kideo_string = 0x3971;
+ static constexpr Tagged_t kignorePunctuation_string = 0x3981;
+ static constexpr Tagged_t kInvalid_Date_string = 0x39a1;
+ static constexpr Tagged_t kinteger_string = 0x39b9;
+ static constexpr Tagged_t kisWordLike_string = 0x39cd;
+ static constexpr Tagged_t kkana_string = 0x39e5;
+ static constexpr Tagged_t klanguage_string = 0x39f5;
+ static constexpr Tagged_t klanguageDisplay_string = 0x3a09;
+ static constexpr Tagged_t klessPrecision_string = 0x3a25;
+ static constexpr Tagged_t kletter_string = 0x3a41;
+ static constexpr Tagged_t klist_string = 0x3a55;
+ static constexpr Tagged_t kliteral_string = 0x3a65;
+ static constexpr Tagged_t klocale_string = 0x3a79;
+ static constexpr Tagged_t kloose_string = 0x3a8d;
+ static constexpr Tagged_t klower_string = 0x3aa1;
+ static constexpr Tagged_t kltr_string = 0x3ab5;
+ static constexpr Tagged_t kmaximumFractionDigits_string = 0x3ac5;
+ static constexpr Tagged_t kmaximumSignificantDigits_string = 0x3ae9;
+ static constexpr Tagged_t kmicrosecondsDisplay_string = 0x3b0d;
+ static constexpr Tagged_t kmillisecondsDisplay_string = 0x3b2d;
+ static constexpr Tagged_t kmin2_string = 0x3b4d;
+ static constexpr Tagged_t kminimalDays_string = 0x3b5d;
+ static constexpr Tagged_t kminimumFractionDigits_string = 0x3b75;
+ static constexpr Tagged_t kminimumIntegerDigits_string = 0x3b99;
+ static constexpr Tagged_t kminimumSignificantDigits_string = 0x3bb9;
+ static constexpr Tagged_t kminus_0 = 0x3bdd;
+ static constexpr Tagged_t kminusSign_string = 0x3bed;
+ static constexpr Tagged_t kminutesDisplay_string = 0x3c05;
+ static constexpr Tagged_t kmonthsDisplay_string = 0x3c21;
+ static constexpr Tagged_t kmorePrecision_string = 0x3c3d;
+ static constexpr Tagged_t knan_string = 0x3c59;
+ static constexpr Tagged_t knanosecondsDisplay_string = 0x3c69;
+ static constexpr Tagged_t knarrowSymbol_string = 0x3c89;
+ static constexpr Tagged_t knegative_string = 0x3ca1;
+ static constexpr Tagged_t knever_string = 0x3cb5;
+ static constexpr Tagged_t knone_string = 0x3cc9;
+ static constexpr Tagged_t knotation_string = 0x3cd9;
+ static constexpr Tagged_t knormal_string = 0x3ced;
+ static constexpr Tagged_t knumberingSystem_string = 0x3d01;
+ static constexpr Tagged_t knumberingSystems_string = 0x3d1d;
+ static constexpr Tagged_t knumeric_string = 0x3d39;
+ static constexpr Tagged_t kordinal_string = 0x3d4d;
+ static constexpr Tagged_t kpercentSign_string = 0x3d61;
+ static constexpr Tagged_t kplusSign_string = 0x3d79;
+ static constexpr Tagged_t kquarter_string = 0x3d8d;
+ static constexpr Tagged_t kregion_string = 0x3da1;
+ static constexpr Tagged_t krelatedYear_string = 0x3db5;
+ static constexpr Tagged_t kroundingMode_string = 0x3dcd;
+ static constexpr Tagged_t kroundingPriority_string = 0x3de5;
+ static constexpr Tagged_t krtl_string = 0x3e01;
+ static constexpr Tagged_t kscientific_string = 0x3e11;
+ static constexpr Tagged_t ksecondsDisplay_string = 0x3e29;
+ static constexpr Tagged_t ksegment_string = 0x3e45;
+ static constexpr Tagged_t kSegmentIterator_string = 0x3e59;
+ static constexpr Tagged_t kSegments_string = 0x3e75;
+ static constexpr Tagged_t ksensitivity_string = 0x3e89;
+ static constexpr Tagged_t ksep_string = 0x3ea1;
+ static constexpr Tagged_t kshared_string = 0x3eb1;
+ static constexpr Tagged_t ksignDisplay_string = 0x3ec5;
+ static constexpr Tagged_t kstandard_string = 0x3edd;
+ static constexpr Tagged_t kstartRange_string = 0x3ef1;
+ static constexpr Tagged_t kstrict_string = 0x3f09;
+ static constexpr Tagged_t kstripIfInteger_string = 0x3f1d;
+ static constexpr Tagged_t kstyle_string = 0x3f39;
+ static constexpr Tagged_t kterm_string = 0x3f4d;
+ static constexpr Tagged_t ktextInfo_string = 0x3f5d;
+ static constexpr Tagged_t ktimeStyle_string = 0x3f71;
+ static constexpr Tagged_t ktimeZones_string = 0x3f89;
+ static constexpr Tagged_t ktimeZoneName_string = 0x3fa1;
+ static constexpr Tagged_t ktrailingZeroDisplay_string = 0x3fb9;
+ static constexpr Tagged_t ktrunc_string = 0x3fd9;
+ static constexpr Tagged_t ktwo_digit_string = 0x3fed;
+ static constexpr Tagged_t ktype_string = 0x4001;
+ static constexpr Tagged_t kunknown_string = 0x4011;
+ static constexpr Tagged_t kupper_string = 0x4025;
+ static constexpr Tagged_t kusage_string = 0x4039;
+ static constexpr Tagged_t kuseGrouping_string = 0x404d;
+ static constexpr Tagged_t kunitDisplay_string = 0x4065;
+ static constexpr Tagged_t kweekday_string = 0x407d;
+ static constexpr Tagged_t kweekend_string = 0x4091;
+ static constexpr Tagged_t kweeksDisplay_string = 0x40a5;
+ static constexpr Tagged_t kweekInfo_string = 0x40bd;
+ static constexpr Tagged_t kyearName_string = 0x40d1;
+ static constexpr Tagged_t kyearsDisplay_string = 0x40e5;
+ static constexpr Tagged_t kadd_string = 0x40fd;
+ static constexpr Tagged_t kAggregateError_string = 0x410d;
+ static constexpr Tagged_t kalways_string = 0x4129;
+ static constexpr Tagged_t kanonymous_function_string = 0x413d;
+ static constexpr Tagged_t kanonymous_string = 0x415d;
+ static constexpr Tagged_t kapply_string = 0x4175;
+ static constexpr Tagged_t kArguments_string = 0x4189;
+ static constexpr Tagged_t karguments_string = 0x41a1;
+ static constexpr Tagged_t karguments_to_string = 0x41b9;
+ static constexpr Tagged_t kArray_string = 0x41d9;
+ static constexpr Tagged_t karray_to_string = 0x41ed;
+ static constexpr Tagged_t kArrayBuffer_string = 0x4209;
+ static constexpr Tagged_t kArrayIterator_string = 0x4221;
+ static constexpr Tagged_t kas_string = 0x423d;
+ static constexpr Tagged_t kassert_string = 0x424d;
+ static constexpr Tagged_t kasync_string = 0x4261;
+ static constexpr Tagged_t kAtomicsCondition_string = 0x4275;
+ static constexpr Tagged_t kAtomicsMutex_string = 0x4295;
+ static constexpr Tagged_t kauto_string = 0x42b1;
+ static constexpr Tagged_t kawait_string = 0x42c1;
+ static constexpr Tagged_t kBigInt_string = 0x42d5;
+ static constexpr Tagged_t kbigint_string = 0x42e9;
+ static constexpr Tagged_t kBigInt64Array_string = 0x42fd;
+ static constexpr Tagged_t kBigUint64Array_string = 0x4319;
+ static constexpr Tagged_t kbind_string = 0x4335;
+ static constexpr Tagged_t kblank_string = 0x4345;
+ static constexpr Tagged_t kBoolean_string = 0x4359;
+ static constexpr Tagged_t kboolean_string = 0x436d;
+ static constexpr Tagged_t kboolean_to_string = 0x4381;
+ static constexpr Tagged_t kbound__string = 0x439d;
+ static constexpr Tagged_t kbuffer_string = 0x43b1;
+ static constexpr Tagged_t kbyte_length_string = 0x43c5;
+ static constexpr Tagged_t kbyte_offset_string = 0x43dd;
+ static constexpr Tagged_t kCompileError_string = 0x43f5;
+ static constexpr Tagged_t kcalendar_string = 0x440d;
+ static constexpr Tagged_t kcallee_string = 0x4421;
+ static constexpr Tagged_t kcaller_string = 0x4435;
+ static constexpr Tagged_t kcause_string = 0x4449;
+ static constexpr Tagged_t kcharacter_string = 0x445d;
+ static constexpr Tagged_t kclosure_string = 0x4475;
+ static constexpr Tagged_t kcode_string = 0x448d;
+ static constexpr Tagged_t kcolumn_string = 0x449d;
+ static constexpr Tagged_t kcomputed_string = 0x44b1;
+ static constexpr Tagged_t kconjunction_string = 0x44c9;
+ static constexpr Tagged_t kconsole_string = 0x44e1;
+ static constexpr Tagged_t kconstrain_string = 0x44f5;
+ static constexpr Tagged_t kconstruct_string = 0x450d;
+ static constexpr Tagged_t kcurrent_string = 0x4525;
+ static constexpr Tagged_t kDate_string = 0x4539;
+ static constexpr Tagged_t kdate_to_string = 0x4549;
+ static constexpr Tagged_t kdateAdd_string = 0x4565;
+ static constexpr Tagged_t kdateFromFields_string = 0x4579;
+ static constexpr Tagged_t kdateUntil_string = 0x4595;
+ static constexpr Tagged_t kday_string = 0x45ad;
+ static constexpr Tagged_t kdayOfWeek_string = 0x45bd;
+ static constexpr Tagged_t kdayOfYear_string = 0x45d5;
+ static constexpr Tagged_t kdays_string = 0x45ed;
+ static constexpr Tagged_t kdaysInMonth_string = 0x45fd;
+ static constexpr Tagged_t kdaysInWeek_string = 0x4615;
+ static constexpr Tagged_t kdaysInYear_string = 0x462d;
+ static constexpr Tagged_t kdefault_string = 0x4645;
+ static constexpr Tagged_t kdefineProperty_string = 0x4659;
+ static constexpr Tagged_t kdeleteProperty_string = 0x4675;
+ static constexpr Tagged_t kdetached_string = 0x4691;
+ static constexpr Tagged_t kdisjunction_string = 0x46a5;
+ static constexpr Tagged_t kdone_string = 0x46bd;
+ static constexpr Tagged_t kdot_brand_string = 0x46cd;
+ static constexpr Tagged_t kdot_catch_string = 0x46e1;
+ static constexpr Tagged_t kdot_default_string = 0x46f5;
+ static constexpr Tagged_t kdot_for_string = 0x4709;
+ static constexpr Tagged_t kdot_generator_object_string = 0x4719;
+ static constexpr Tagged_t kdot_home_object_string = 0x4739;
+ static constexpr Tagged_t kdot_new_target_string = 0x4751;
+ static constexpr Tagged_t knew_target_string = 0x4751;
+ static constexpr Tagged_t kdot_result_string = 0x4769;
+ static constexpr Tagged_t kdot_repl_result_string = 0x477d;
+ static constexpr Tagged_t kdot_static_home_object_string = 0x4795;
+ static constexpr Tagged_t kdot_switch_tag_string = 0x47b5;
+ static constexpr Tagged_t kdotAll_string = 0x47cd;
+ static constexpr Tagged_t kError_string = 0x47e1;
+ static constexpr Tagged_t kEvalError_string = 0x47f5;
+ static constexpr Tagged_t kelement_string = 0x480d;
+ static constexpr Tagged_t kepochMicroseconds_string = 0x4821;
+ static constexpr Tagged_t kepochMilliseconds_string = 0x4841;
+ static constexpr Tagged_t kepochNanoseconds_string = 0x4861;
+ static constexpr Tagged_t kepochSeconds_string = 0x487d;
+ static constexpr Tagged_t kera_string = 0x4895;
+ static constexpr Tagged_t keraYear_string = 0x48a5;
+ static constexpr Tagged_t kerrors_string = 0x48b9;
+ static constexpr Tagged_t kerror_to_string = 0x48cd;
+ static constexpr Tagged_t keval_string = 0x48e9;
+ static constexpr Tagged_t kexception_string = 0x48f9;
+ static constexpr Tagged_t kexec_string = 0x4911;
+ static constexpr Tagged_t kfalse_string = 0x4921;
+ static constexpr Tagged_t kfields_string = 0x4935;
+ static constexpr Tagged_t kFinalizationRegistry_string = 0x4949;
+ static constexpr Tagged_t kflags_string = 0x4969;
+ static constexpr Tagged_t kFloat32Array_string = 0x497d;
+ static constexpr Tagged_t kFloat64Array_string = 0x4995;
+ static constexpr Tagged_t kfractionalSecondDigits_string = 0x49ad;
+ static constexpr Tagged_t kfrom_string = 0x49d1;
+ static constexpr Tagged_t kFunction_string = 0x49e1;
+ static constexpr Tagged_t kfunction_native_code_string = 0x49f5;
+ static constexpr Tagged_t kfunction_string = 0x4a21;
+ static constexpr Tagged_t kfunction_to_string = 0x4a35;
+ static constexpr Tagged_t kGenerator_string = 0x4a55;
+ static constexpr Tagged_t kget_space_string = 0x4a6d;
+ static constexpr Tagged_t kget_string = 0x4a7d;
+ static constexpr Tagged_t kgetOffsetNanosecondsFor_string = 0x4a8d;
+ static constexpr Tagged_t kgetOwnPropertyDescriptor_string = 0x4ab1;
+ static constexpr Tagged_t kgetPossibleInstantsFor_string = 0x4ad5;
+ static constexpr Tagged_t kgetPrototypeOf_string = 0x4af9;
+ static constexpr Tagged_t kglobal_string = 0x4b15;
+ static constexpr Tagged_t kglobalThis_string = 0x4b29;
+ static constexpr Tagged_t kgroups_string = 0x4b41;
+ static constexpr Tagged_t kgrowable_string = 0x4b55;
+ static constexpr Tagged_t khas_string = 0x4b69;
+ static constexpr Tagged_t khasIndices_string = 0x4b79;
+ static constexpr Tagged_t khour_string = 0x4b91;
+ static constexpr Tagged_t khours_string = 0x4ba1;
+ static constexpr Tagged_t khoursInDay_string = 0x4bb5;
+ static constexpr Tagged_t kignoreCase_string = 0x4bcd;
+ static constexpr Tagged_t kid_string = 0x4be5;
+ static constexpr Tagged_t killegal_access_string = 0x4bf5;
+ static constexpr Tagged_t killegal_argument_string = 0x4c11;
+ static constexpr Tagged_t kinLeapYear_string = 0x4c2d;
+ static constexpr Tagged_t kindex_string = 0x4c45;
+ static constexpr Tagged_t kindices_string = 0x4c59;
+ static constexpr Tagged_t kInfinity_string = 0x4c6d;
+ static constexpr Tagged_t kinfinity_string = 0x4c81;
+ static constexpr Tagged_t kinput_string = 0x4c95;
+ static constexpr Tagged_t kInt16Array_string = 0x4ca9;
+ static constexpr Tagged_t kInt32Array_string = 0x4cc1;
+ static constexpr Tagged_t kInt8Array_string = 0x4cd9;
+ static constexpr Tagged_t kisExtensible_string = 0x4cf1;
+ static constexpr Tagged_t kiso8601_string = 0x4d09;
+ static constexpr Tagged_t kisoDay_string = 0x4d1d;
+ static constexpr Tagged_t kisoHour_string = 0x4d31;
+ static constexpr Tagged_t kisoMicrosecond_string = 0x4d45;
+ static constexpr Tagged_t kisoMillisecond_string = 0x4d61;
+ static constexpr Tagged_t kisoMinute_string = 0x4d7d;
+ static constexpr Tagged_t kisoMonth_string = 0x4d95;
+ static constexpr Tagged_t kisoNanosecond_string = 0x4da9;
+ static constexpr Tagged_t kisoSecond_string = 0x4dc5;
+ static constexpr Tagged_t kisoYear_string = 0x4ddd;
+ static constexpr Tagged_t kjsMemoryEstimate_string = 0x4df1;
+ static constexpr Tagged_t kjsMemoryRange_string = 0x4e0d;
+ static constexpr Tagged_t kkeys_string = 0x4e29;
+ static constexpr Tagged_t klargestUnit_string = 0x4e39;
+ static constexpr Tagged_t klastIndex_string = 0x4e51;
+ static constexpr Tagged_t klet_string = 0x4e69;
+ static constexpr Tagged_t kline_string = 0x4e79;
+ static constexpr Tagged_t klinear_string = 0x4e89;
+ static constexpr Tagged_t kLinkError_string = 0x4e9d;
+ static constexpr Tagged_t klong_string = 0x4eb5;
+ static constexpr Tagged_t kMap_string = 0x4ec5;
+ static constexpr Tagged_t kMapIterator_string = 0x4ed5;
+ static constexpr Tagged_t kmax_byte_length_string = 0x4eed;
+ static constexpr Tagged_t kmedium_string = 0x4f09;
+ static constexpr Tagged_t kmergeFields_string = 0x4f1d;
+ static constexpr Tagged_t kmessage_string = 0x4f35;
+ static constexpr Tagged_t kmeta_string = 0x4f49;
+ static constexpr Tagged_t kminus_Infinity_string = 0x4f59;
+ static constexpr Tagged_t kmicrosecond_string = 0x4f71;
+ static constexpr Tagged_t kmicroseconds_string = 0x4f89;
+ static constexpr Tagged_t kmillisecond_string = 0x4fa1;
+ static constexpr Tagged_t kmilliseconds_string = 0x4fb9;
+ static constexpr Tagged_t kminute_string = 0x4fd1;
+ static constexpr Tagged_t kminutes_string = 0x4fe5;
+ static constexpr Tagged_t kModule_string = 0x4ff9;
+ static constexpr Tagged_t kmonth_string = 0x500d;
+ static constexpr Tagged_t kmonthDayFromFields_string = 0x5021;
+ static constexpr Tagged_t kmonths_string = 0x5041;
+ static constexpr Tagged_t kmonthsInYear_string = 0x5055;
+ static constexpr Tagged_t kmonthCode_string = 0x506d;
+ static constexpr Tagged_t kmultiline_string = 0x5085;
+ static constexpr Tagged_t kNaN_string = 0x509d;
+ static constexpr Tagged_t knanosecond_string = 0x50ad;
+ static constexpr Tagged_t knanoseconds_string = 0x50c5;
+ static constexpr Tagged_t knarrow_string = 0x50dd;
+ static constexpr Tagged_t knative_string = 0x50f1;
+ static constexpr Tagged_t kNFC_string = 0x5105;
+ static constexpr Tagged_t kNFD_string = 0x5115;
+ static constexpr Tagged_t kNFKC_string = 0x5125;
+ static constexpr Tagged_t kNFKD_string = 0x5135;
+ static constexpr Tagged_t knot_equal_string = 0x5145;
+ static constexpr Tagged_t knull_string = 0x515d;
+ static constexpr Tagged_t knull_to_string = 0x516d;
+ static constexpr Tagged_t kNumber_string = 0x5189;
+ static constexpr Tagged_t knumber_string = 0x519d;
+ static constexpr Tagged_t knumber_to_string = 0x51b1;
+ static constexpr Tagged_t kObject_string = 0x51cd;
+ static constexpr Tagged_t kobject_string = 0x51e1;
+ static constexpr Tagged_t kobject_to_string = 0x51f5;
+ static constexpr Tagged_t kObject_prototype_string = 0x5211;
+ static constexpr Tagged_t kof_string = 0x522d;
+ static constexpr Tagged_t koffset_string = 0x523d;
+ static constexpr Tagged_t koffsetNanoseconds_string = 0x5251;
+ static constexpr Tagged_t kok_string = 0x5271;
+ static constexpr Tagged_t kother_string = 0x5281;
+ static constexpr Tagged_t koverflow_string = 0x5295;
+ static constexpr Tagged_t kownKeys_string = 0x52a9;
+ static constexpr Tagged_t kpercent_string = 0x52bd;
+ static constexpr Tagged_t kplainDate_string = 0x52d1;
+ static constexpr Tagged_t kplainTime_string = 0x52e9;
+ static constexpr Tagged_t kposition_string = 0x5301;
+ static constexpr Tagged_t kpreventExtensions_string = 0x5315;
+ static constexpr Tagged_t kprivate_constructor_string = 0x5335;
+ static constexpr Tagged_t kPromise_string = 0x534d;
+ static constexpr Tagged_t kproto_string = 0x5361;
+ static constexpr Tagged_t kproxy_string = 0x5379;
+ static constexpr Tagged_t kProxy_string = 0x538d;
+ static constexpr Tagged_t kquery_colon_string = 0x53a1;
+ static constexpr Tagged_t kRangeError_string = 0x53b1;
+ static constexpr Tagged_t kraw_json_string = 0x53c9;
+ static constexpr Tagged_t kraw_string = 0x53dd;
+ static constexpr Tagged_t kReferenceError_string = 0x53ed;
+ static constexpr Tagged_t kReflectGet_string = 0x5409;
+ static constexpr Tagged_t kReflectHas_string = 0x5421;
+ static constexpr Tagged_t kRegExp_string = 0x5439;
+ static constexpr Tagged_t kregexp_to_string = 0x544d;
+ static constexpr Tagged_t kreject_string = 0x5469;
+ static constexpr Tagged_t krelativeTo_string = 0x547d;
+ static constexpr Tagged_t kresizable_string = 0x5495;
+ static constexpr Tagged_t kResizableArrayBuffer_string = 0x54ad;
+ static constexpr Tagged_t kreturn_string = 0x54cd;
+ static constexpr Tagged_t krevoke_string = 0x54e1;
+ static constexpr Tagged_t kroundingIncrement_string = 0x54f5;
+ static constexpr Tagged_t kRuntimeError_string = 0x5515;
+ static constexpr Tagged_t kWebAssemblyException_string = 0x552d;
+ static constexpr Tagged_t kScript_string = 0x5551;
+ static constexpr Tagged_t kscript_string = 0x5565;
+ static constexpr Tagged_t ksecond_string = 0x5579;
+ static constexpr Tagged_t kseconds_string = 0x558d;
+ static constexpr Tagged_t kshort_string = 0x55a1;
+ static constexpr Tagged_t kSet_string = 0x55b5;
+ static constexpr Tagged_t ksentence_string = 0x55c5;
+ static constexpr Tagged_t kset_space_string = 0x55d9;
+ static constexpr Tagged_t kset_string = 0x55e9;
+ static constexpr Tagged_t kSetIterator_string = 0x55f9;
+ static constexpr Tagged_t ksetPrototypeOf_string = 0x5611;
+ static constexpr Tagged_t kShadowRealm_string = 0x562d;
+ static constexpr Tagged_t kSharedArray_string = 0x5645;
+ static constexpr Tagged_t kSharedArrayBuffer_string = 0x565d;
+ static constexpr Tagged_t kSharedStruct_string = 0x567d;
+ static constexpr Tagged_t ksign_string = 0x5695;
+ static constexpr Tagged_t ksmallestUnit_string = 0x56a5;
+ static constexpr Tagged_t ksource_string = 0x56bd;
+ static constexpr Tagged_t ksourceText_string = 0x56d1;
+ static constexpr Tagged_t kstack_string = 0x56e9;
+ static constexpr Tagged_t kstackTraceLimit_string = 0x56fd;
+ static constexpr Tagged_t ksticky_string = 0x5719;
+ static constexpr Tagged_t kString_string = 0x572d;
+ static constexpr Tagged_t kstring_string = 0x5741;
+ static constexpr Tagged_t kstring_to_string = 0x5755;
+ static constexpr Tagged_t kSymbol_iterator_string = 0x5771;
+ static constexpr Tagged_t kSymbol_replace_string = 0x578d;
+ static constexpr Tagged_t ksymbol_species_string = 0x57a9;
+ static constexpr Tagged_t kSymbol_species_string = 0x57c5;
+ static constexpr Tagged_t kSymbol_string = 0x57e1;
+ static constexpr Tagged_t ksymbol_string = 0x57f5;
+ static constexpr Tagged_t kSyntaxError_string = 0x5809;
+ static constexpr Tagged_t ktarget_string = 0x5821;
+ static constexpr Tagged_t kthis_function_string = 0x5835;
+ static constexpr Tagged_t kthis_string = 0x5851;
+ static constexpr Tagged_t kthrow_string = 0x5861;
+ static constexpr Tagged_t ktimed_out_string = 0x5875;
+ static constexpr Tagged_t ktimeZone_string = 0x588d;
+ static constexpr Tagged_t ktoJSON_string = 0x58a1;
+ static constexpr Tagged_t ktoString_string = 0x58b5;
+ static constexpr Tagged_t ktrue_string = 0x58c9;
+ static constexpr Tagged_t ktotal_string = 0x58d9;
+ static constexpr Tagged_t kTypeError_string = 0x58ed;
+ static constexpr Tagged_t kUint16Array_string = 0x5905;
+ static constexpr Tagged_t kUint32Array_string = 0x591d;
+ static constexpr Tagged_t kUint8Array_string = 0x5935;
+ static constexpr Tagged_t kUint8ClampedArray_string = 0x594d;
+ static constexpr Tagged_t kundefined_string = 0x596d;
+ static constexpr Tagged_t kundefined_to_string = 0x5985;
+ static constexpr Tagged_t kunicode_string = 0x59a5;
+ static constexpr Tagged_t kunicodeSets_string = 0x59b9;
+ static constexpr Tagged_t kunit_string = 0x59d1;
+ static constexpr Tagged_t kURIError_string = 0x59e1;
+ static constexpr Tagged_t kUTC_string = 0x59f5;
+ static constexpr Tagged_t kvalueOf_string = 0x5a05;
+ static constexpr Tagged_t kWeakMap_string = 0x5a19;
+ static constexpr Tagged_t kWeakRef_string = 0x5a2d;
+ static constexpr Tagged_t kWeakSet_string = 0x5a41;
+ static constexpr Tagged_t kweek_string = 0x5a55;
+ static constexpr Tagged_t kweeks_string = 0x5a65;
+ static constexpr Tagged_t kweekOfYear_string = 0x5a79;
+ static constexpr Tagged_t kword_string = 0x5a91;
+ static constexpr Tagged_t kyearMonthFromFields_string = 0x5aa1;
+ static constexpr Tagged_t kyear_string = 0x5ac1;
+ static constexpr Tagged_t kyears_string = 0x5ad1;
+ static constexpr Tagged_t kUninitializedValue = 0x5af5;
+ static constexpr Tagged_t kArgumentsMarker = 0x5b2d;
+ static constexpr Tagged_t kTerminationException = 0x5b65;
+ static constexpr Tagged_t kException = 0x5ba5;
+ static constexpr Tagged_t kOptimizedOut = 0x5bc1;
+ static constexpr Tagged_t kStaleRegister = 0x5bf9;
+ static constexpr Tagged_t kSelfReferenceMarker = 0x5c31;
+ static constexpr Tagged_t kBasicBlockCountersMarker = 0x5c71;
+ static constexpr Tagged_t karray_buffer_wasm_memory_symbol = 0x5cb5;
+ static constexpr Tagged_t kcall_site_info_symbol = 0x5cc5;
+ static constexpr Tagged_t kconsole_context_id_symbol = 0x5cd5;
+ static constexpr Tagged_t kconsole_context_name_symbol = 0x5ce5;
+ static constexpr Tagged_t kclass_fields_symbol = 0x5cf5;
+ static constexpr Tagged_t kclass_positions_symbol = 0x5d05;
+ static constexpr Tagged_t kerror_end_pos_symbol = 0x5d15;
+ static constexpr Tagged_t kerror_script_symbol = 0x5d25;
+ static constexpr Tagged_t kerror_stack_symbol = 0x5d35;
+ static constexpr Tagged_t kerror_start_pos_symbol = 0x5d45;
+ static constexpr Tagged_t kfrozen_symbol = 0x5d55;
+ static constexpr Tagged_t kinterpreter_trampoline_symbol = 0x5d65;
+ static constexpr Tagged_t knative_context_index_symbol = 0x5d75;
+ static constexpr Tagged_t knonextensible_symbol = 0x5d85;
+ static constexpr Tagged_t kpromise_debug_marker_symbol = 0x5d95;
+ static constexpr Tagged_t kpromise_debug_message_symbol = 0x5da5;
+ static constexpr Tagged_t kpromise_forwarding_handler_symbol = 0x5db5;
+ static constexpr Tagged_t kpromise_handled_by_symbol = 0x5dc5;
+ static constexpr Tagged_t kpromise_awaited_by_symbol = 0x5dd5;
+ static constexpr Tagged_t kregexp_result_names_symbol = 0x5de5;
+ static constexpr Tagged_t kregexp_result_regexp_input_symbol = 0x5df5;
+ static constexpr Tagged_t kregexp_result_regexp_last_index_symbol = 0x5e05;
+ static constexpr Tagged_t ksealed_symbol = 0x5e15;
+ static constexpr Tagged_t kstrict_function_transition_symbol = 0x5e25;
+ static constexpr Tagged_t ktemplate_literal_function_literal_id_symbol =
+ 0x5e35;
+ static constexpr Tagged_t ktemplate_literal_slot_id_symbol = 0x5e45;
+ static constexpr Tagged_t kwasm_exception_tag_symbol = 0x5e55;
+ static constexpr Tagged_t kwasm_exception_values_symbol = 0x5e65;
+ static constexpr Tagged_t kwasm_uncatchable_symbol = 0x5e75;
+ static constexpr Tagged_t kwasm_wrapped_object_symbol = 0x5e85;
+ static constexpr Tagged_t kwasm_debug_proxy_cache_symbol = 0x5e95;
+ static constexpr Tagged_t kwasm_debug_proxy_names_symbol = 0x5ea5;
+ static constexpr Tagged_t kasync_iterator_symbol = 0x5eb5;
+ static constexpr Tagged_t kintl_fallback_symbol = 0x5ee5;
+ static constexpr Tagged_t kmatch_all_symbol = 0x5f1d;
+ static constexpr Tagged_t kmatch_symbol = 0x5f49;
+ static constexpr Tagged_t ksearch_symbol = 0x5f71;
+ static constexpr Tagged_t ksplit_symbol = 0x5f9d;
+ static constexpr Tagged_t kto_primitive_symbol = 0x5fc5;
+ static constexpr Tagged_t kunscopables_symbol = 0x5ff5;
+ static constexpr Tagged_t khas_instance_symbol = 0x6025;
+ static constexpr Tagged_t kto_string_tag_symbol = 0x6055;
+ static constexpr Tagged_t kconstructor_string = 0x60ad;
+ static constexpr Tagged_t knext_string = 0x60c5;
+ static constexpr Tagged_t kresolve_string = 0x60d5;
+ static constexpr Tagged_t kthen_string = 0x60e9;
+ static constexpr Tagged_t kiterator_symbol = 0x60f9;
+ static constexpr Tagged_t kreplace_symbol = 0x6109;
+ static constexpr Tagged_t kspecies_symbol = 0x6119;
+ static constexpr Tagged_t kis_concat_spreadable_symbol = 0x6129;
+ static constexpr Tagged_t kEmptySlowElementDictionary = 0x6139;
+ static constexpr Tagged_t kEmptySymbolTable = 0x615d;
+ static constexpr Tagged_t kEmptyOrderedHashMap = 0x6179;
+ static constexpr Tagged_t kEmptyOrderedHashSet = 0x618d;
+ static constexpr Tagged_t kEmptyFeedbackMetadata = 0x61a1;
+ static constexpr Tagged_t kGlobalThisBindingScopeInfo = 0x61ad;
+ static constexpr Tagged_t kEmptyFunctionScopeInfo = 0x61cd;
+ static constexpr Tagged_t kNativeScopeInfo = 0x61f1;
+ static constexpr Tagged_t kShadowRealmScopeInfo = 0x6209;
+ static constexpr Tagged_t kWasmNull = 0xfffd;
+};
+
+static constexpr std::array<Tagged_t, 738> StaticReadOnlyRootsPointerTable = {
+ StaticReadOnlyRoot::kFreeSpaceMap,
+ StaticReadOnlyRoot::kOnePointerFillerMap,
+ StaticReadOnlyRoot::kTwoPointerFillerMap,
+ StaticReadOnlyRoot::kUninitializedValue,
+ StaticReadOnlyRoot::kUndefinedValue,
+ StaticReadOnlyRoot::kTheHoleValue,
+ StaticReadOnlyRoot::kNullValue,
+ StaticReadOnlyRoot::kTrueValue,
+ StaticReadOnlyRoot::kFalseValue,
+ StaticReadOnlyRoot::kempty_string,
+ StaticReadOnlyRoot::kMetaMap,
+ StaticReadOnlyRoot::kByteArrayMap,
+ StaticReadOnlyRoot::kFixedArrayMap,
+ StaticReadOnlyRoot::kFixedCOWArrayMap,
+ StaticReadOnlyRoot::kFixedDoubleArrayMap,
+ StaticReadOnlyRoot::kHashTableMap,
+ StaticReadOnlyRoot::kSymbolMap,
+ StaticReadOnlyRoot::kOneByteStringMap,
+ StaticReadOnlyRoot::kOneByteInternalizedStringMap,
+ StaticReadOnlyRoot::kScopeInfoMap,
+ StaticReadOnlyRoot::kSharedFunctionInfoMap,
+ StaticReadOnlyRoot::kInstructionStreamMap,
+ StaticReadOnlyRoot::kCellMap,
+ StaticReadOnlyRoot::kGlobalPropertyCellMap,
+ StaticReadOnlyRoot::kForeignMap,
+ StaticReadOnlyRoot::kHeapNumberMap,
+ StaticReadOnlyRoot::kTransitionArrayMap,
+ StaticReadOnlyRoot::kFeedbackVectorMap,
+ StaticReadOnlyRoot::kEmptyScopeInfo,
+ StaticReadOnlyRoot::kEmptyFixedArray,
+ StaticReadOnlyRoot::kEmptyDescriptorArray,
+ StaticReadOnlyRoot::kArgumentsMarker,
+ StaticReadOnlyRoot::kException,
+ StaticReadOnlyRoot::kTerminationException,
+ StaticReadOnlyRoot::kOptimizedOut,
+ StaticReadOnlyRoot::kStaleRegister,
+ StaticReadOnlyRoot::kScriptContextTableMap,
+ StaticReadOnlyRoot::kClosureFeedbackCellArrayMap,
+ StaticReadOnlyRoot::kFeedbackMetadataArrayMap,
+ StaticReadOnlyRoot::kArrayListMap,
+ StaticReadOnlyRoot::kBigIntMap,
+ StaticReadOnlyRoot::kObjectBoilerplateDescriptionMap,
+ StaticReadOnlyRoot::kBytecodeArrayMap,
+ StaticReadOnlyRoot::kCodeMap,
+ StaticReadOnlyRoot::kCoverageInfoMap,
+ StaticReadOnlyRoot::kGlobalDictionaryMap,
+ StaticReadOnlyRoot::kManyClosuresCellMap,
+ StaticReadOnlyRoot::kMegaDomHandlerMap,
+ StaticReadOnlyRoot::kModuleInfoMap,
+ StaticReadOnlyRoot::kNameDictionaryMap,
+ StaticReadOnlyRoot::kNoClosuresCellMap,
+ StaticReadOnlyRoot::kNumberDictionaryMap,
+ StaticReadOnlyRoot::kOneClosureCellMap,
+ StaticReadOnlyRoot::kOrderedHashMapMap,
+ StaticReadOnlyRoot::kOrderedHashSetMap,
+ StaticReadOnlyRoot::kNameToIndexHashTableMap,
+ StaticReadOnlyRoot::kRegisteredSymbolTableMap,
+ StaticReadOnlyRoot::kOrderedNameDictionaryMap,
+ StaticReadOnlyRoot::kPreparseDataMap,
+ StaticReadOnlyRoot::kPropertyArrayMap,
+ StaticReadOnlyRoot::kAccessorInfoMap,
+ StaticReadOnlyRoot::kSideEffectCallHandlerInfoMap,
+ StaticReadOnlyRoot::kSideEffectFreeCallHandlerInfoMap,
+ StaticReadOnlyRoot::kNextCallSideEffectFreeCallHandlerInfoMap,
+ StaticReadOnlyRoot::kSimpleNumberDictionaryMap,
+ StaticReadOnlyRoot::kSmallOrderedHashMapMap,
+ StaticReadOnlyRoot::kSmallOrderedHashSetMap,
+ StaticReadOnlyRoot::kSmallOrderedNameDictionaryMap,
+ StaticReadOnlyRoot::kSourceTextModuleMap,
+ StaticReadOnlyRoot::kSwissNameDictionaryMap,
+ StaticReadOnlyRoot::kSyntheticModuleMap,
+ StaticReadOnlyRoot::kWasmApiFunctionRefMap,
+ StaticReadOnlyRoot::kWasmCapiFunctionDataMap,
+ StaticReadOnlyRoot::kWasmExportedFunctionDataMap,
+ StaticReadOnlyRoot::kWasmInternalFunctionMap,
+ StaticReadOnlyRoot::kWasmJSFunctionDataMap,
+ StaticReadOnlyRoot::kWasmResumeDataMap,
+ StaticReadOnlyRoot::kWasmTypeInfoMap,
+ StaticReadOnlyRoot::kWasmContinuationObjectMap,
+ StaticReadOnlyRoot::kWasmNullMap,
+ StaticReadOnlyRoot::kWeakFixedArrayMap,
+ StaticReadOnlyRoot::kWeakArrayListMap,
+ StaticReadOnlyRoot::kEphemeronHashTableMap,
+ StaticReadOnlyRoot::kEmbedderDataArrayMap,
+ StaticReadOnlyRoot::kWeakCellMap,
+ StaticReadOnlyRoot::kStringMap,
+ StaticReadOnlyRoot::kConsOneByteStringMap,
+ StaticReadOnlyRoot::kConsStringMap,
+ StaticReadOnlyRoot::kThinStringMap,
+ StaticReadOnlyRoot::kSlicedStringMap,
+ StaticReadOnlyRoot::kSlicedOneByteStringMap,
+ StaticReadOnlyRoot::kExternalStringMap,
+ StaticReadOnlyRoot::kExternalOneByteStringMap,
+ StaticReadOnlyRoot::kUncachedExternalStringMap,
+ StaticReadOnlyRoot::kInternalizedStringMap,
+ StaticReadOnlyRoot::kExternalInternalizedStringMap,
+ StaticReadOnlyRoot::kExternalOneByteInternalizedStringMap,
+ StaticReadOnlyRoot::kUncachedExternalInternalizedStringMap,
+ StaticReadOnlyRoot::kUncachedExternalOneByteInternalizedStringMap,
+ StaticReadOnlyRoot::kUncachedExternalOneByteStringMap,
+ StaticReadOnlyRoot::kSharedOneByteStringMap,
+ StaticReadOnlyRoot::kSharedStringMap,
+ StaticReadOnlyRoot::kSharedExternalOneByteStringMap,
+ StaticReadOnlyRoot::kSharedExternalStringMap,
+ StaticReadOnlyRoot::kSharedUncachedExternalOneByteStringMap,
+ StaticReadOnlyRoot::kSharedUncachedExternalStringMap,
+ StaticReadOnlyRoot::kUndefinedMap,
+ StaticReadOnlyRoot::kTheHoleMap,
+ StaticReadOnlyRoot::kNullMap,
+ StaticReadOnlyRoot::kBooleanMap,
+ StaticReadOnlyRoot::kUninitializedMap,
+ StaticReadOnlyRoot::kArgumentsMarkerMap,
+ StaticReadOnlyRoot::kExceptionMap,
+ StaticReadOnlyRoot::kTerminationExceptionMap,
+ StaticReadOnlyRoot::kOptimizedOutMap,
+ StaticReadOnlyRoot::kStaleRegisterMap,
+ StaticReadOnlyRoot::kSelfReferenceMarkerMap,
+ StaticReadOnlyRoot::kBasicBlockCountersMarkerMap,
+ StaticReadOnlyRoot::kEmptyEnumCache,
+ StaticReadOnlyRoot::kEmptyPropertyArray,
+ StaticReadOnlyRoot::kEmptyByteArray,
+ StaticReadOnlyRoot::kEmptyObjectBoilerplateDescription,
+ StaticReadOnlyRoot::kEmptyArrayBoilerplateDescription,
+ StaticReadOnlyRoot::kEmptyClosureFeedbackCellArray,
+ StaticReadOnlyRoot::kEmptySlowElementDictionary,
+ StaticReadOnlyRoot::kEmptyOrderedHashMap,
+ StaticReadOnlyRoot::kEmptyOrderedHashSet,
+ StaticReadOnlyRoot::kEmptyFeedbackMetadata,
+ StaticReadOnlyRoot::kEmptyPropertyDictionary,
+ StaticReadOnlyRoot::kEmptyOrderedPropertyDictionary,
+ StaticReadOnlyRoot::kEmptySwissPropertyDictionary,
+ StaticReadOnlyRoot::kNoOpInterceptorInfo,
+ StaticReadOnlyRoot::kEmptyArrayList,
+ StaticReadOnlyRoot::kEmptyWeakFixedArray,
+ StaticReadOnlyRoot::kEmptyWeakArrayList,
+ StaticReadOnlyRoot::kInvalidPrototypeValidityCell,
+ StaticReadOnlyRoot::kNanValue,
+ StaticReadOnlyRoot::kHoleNanValue,
+ StaticReadOnlyRoot::kInfinityValue,
+ StaticReadOnlyRoot::kMinusZeroValue,
+ StaticReadOnlyRoot::kMinusInfinityValue,
+ StaticReadOnlyRoot::kMaxSafeInteger,
+ StaticReadOnlyRoot::kMaxUInt32,
+ StaticReadOnlyRoot::kSmiMinValue,
+ StaticReadOnlyRoot::kSmiMaxValuePlusOne,
+ StaticReadOnlyRoot::kSingleCharacterStringTable,
+ StaticReadOnlyRoot::kSelfReferenceMarker,
+ StaticReadOnlyRoot::kBasicBlockCountersMarker,
+ StaticReadOnlyRoot::kGlobalThisBindingScopeInfo,
+ StaticReadOnlyRoot::kEmptyFunctionScopeInfo,
+ StaticReadOnlyRoot::kNativeScopeInfo,
+ StaticReadOnlyRoot::kShadowRealmScopeInfo,
+ StaticReadOnlyRoot::kEmptySymbolTable,
+ StaticReadOnlyRoot::kHashSeed,
+ StaticReadOnlyRoot::kWasmNull,
+ StaticReadOnlyRoot::klength_string,
+ StaticReadOnlyRoot::kprototype_string,
+ StaticReadOnlyRoot::kname_string,
+ StaticReadOnlyRoot::kenumerable_string,
+ StaticReadOnlyRoot::kconfigurable_string,
+ StaticReadOnlyRoot::kvalue_string,
+ StaticReadOnlyRoot::kwritable_string,
+ StaticReadOnlyRoot::kadoptText_string,
+ StaticReadOnlyRoot::kapproximatelySign_string,
+ StaticReadOnlyRoot::kbaseName_string,
+ StaticReadOnlyRoot::kaccounting_string,
+ StaticReadOnlyRoot::kbreakType_string,
+ StaticReadOnlyRoot::kcalendars_string,
+ StaticReadOnlyRoot::kcardinal_string,
+ StaticReadOnlyRoot::kcaseFirst_string,
+ StaticReadOnlyRoot::kceil_string,
+ StaticReadOnlyRoot::kcompare_string,
+ StaticReadOnlyRoot::kcollation_string,
+ StaticReadOnlyRoot::kcollations_string,
+ StaticReadOnlyRoot::kcompact_string,
+ StaticReadOnlyRoot::kcompactDisplay_string,
+ StaticReadOnlyRoot::kcurrency_string,
+ StaticReadOnlyRoot::kcurrencyDisplay_string,
+ StaticReadOnlyRoot::kcurrencySign_string,
+ StaticReadOnlyRoot::kdateStyle_string,
+ StaticReadOnlyRoot::kdateTimeField_string,
+ StaticReadOnlyRoot::kdayPeriod_string,
+ StaticReadOnlyRoot::kdaysDisplay_string,
+ StaticReadOnlyRoot::kdecimal_string,
+ StaticReadOnlyRoot::kdialect_string,
+ StaticReadOnlyRoot::kdigital_string,
+ StaticReadOnlyRoot::kdirection_string,
+ StaticReadOnlyRoot::kendRange_string,
+ StaticReadOnlyRoot::kengineering_string,
+ StaticReadOnlyRoot::kexceptZero_string,
+ StaticReadOnlyRoot::kexpand_string,
+ StaticReadOnlyRoot::kexponentInteger_string,
+ StaticReadOnlyRoot::kexponentMinusSign_string,
+ StaticReadOnlyRoot::kexponentSeparator_string,
+ StaticReadOnlyRoot::kfallback_string,
+ StaticReadOnlyRoot::kfirst_string,
+ StaticReadOnlyRoot::kfirstDay_string,
+ StaticReadOnlyRoot::kfloor_string,
+ StaticReadOnlyRoot::kformat_string,
+ StaticReadOnlyRoot::kfraction_string,
+ StaticReadOnlyRoot::kfractionalDigits_string,
+ StaticReadOnlyRoot::kfractionalSecond_string,
+ StaticReadOnlyRoot::kfull_string,
+ StaticReadOnlyRoot::kgranularity_string,
+ StaticReadOnlyRoot::kgrapheme_string,
+ StaticReadOnlyRoot::kgroup_string,
+ StaticReadOnlyRoot::kh11_string,
+ StaticReadOnlyRoot::kh12_string,
+ StaticReadOnlyRoot::kh23_string,
+ StaticReadOnlyRoot::kh24_string,
+ StaticReadOnlyRoot::khalfCeil_string,
+ StaticReadOnlyRoot::khalfEven_string,
+ StaticReadOnlyRoot::khalfExpand_string,
+ StaticReadOnlyRoot::khalfFloor_string,
+ StaticReadOnlyRoot::khalfTrunc_string,
+ StaticReadOnlyRoot::khour12_string,
+ StaticReadOnlyRoot::khourCycle_string,
+ StaticReadOnlyRoot::khourCycles_string,
+ StaticReadOnlyRoot::khoursDisplay_string,
+ StaticReadOnlyRoot::kideo_string,
+ StaticReadOnlyRoot::kignorePunctuation_string,
+ StaticReadOnlyRoot::kInvalid_Date_string,
+ StaticReadOnlyRoot::kinteger_string,
+ StaticReadOnlyRoot::kisWordLike_string,
+ StaticReadOnlyRoot::kkana_string,
+ StaticReadOnlyRoot::klanguage_string,
+ StaticReadOnlyRoot::klanguageDisplay_string,
+ StaticReadOnlyRoot::klessPrecision_string,
+ StaticReadOnlyRoot::kletter_string,
+ StaticReadOnlyRoot::klist_string,
+ StaticReadOnlyRoot::kliteral_string,
+ StaticReadOnlyRoot::klocale_string,
+ StaticReadOnlyRoot::kloose_string,
+ StaticReadOnlyRoot::klower_string,
+ StaticReadOnlyRoot::kltr_string,
+ StaticReadOnlyRoot::kmaximumFractionDigits_string,
+ StaticReadOnlyRoot::kmaximumSignificantDigits_string,
+ StaticReadOnlyRoot::kmicrosecondsDisplay_string,
+ StaticReadOnlyRoot::kmillisecondsDisplay_string,
+ StaticReadOnlyRoot::kmin2_string,
+ StaticReadOnlyRoot::kminimalDays_string,
+ StaticReadOnlyRoot::kminimumFractionDigits_string,
+ StaticReadOnlyRoot::kminimumIntegerDigits_string,
+ StaticReadOnlyRoot::kminimumSignificantDigits_string,
+ StaticReadOnlyRoot::kminus_0,
+ StaticReadOnlyRoot::kminusSign_string,
+ StaticReadOnlyRoot::kminutesDisplay_string,
+ StaticReadOnlyRoot::kmonthsDisplay_string,
+ StaticReadOnlyRoot::kmorePrecision_string,
+ StaticReadOnlyRoot::knan_string,
+ StaticReadOnlyRoot::knanosecondsDisplay_string,
+ StaticReadOnlyRoot::knarrowSymbol_string,
+ StaticReadOnlyRoot::knegative_string,
+ StaticReadOnlyRoot::knever_string,
+ StaticReadOnlyRoot::knone_string,
+ StaticReadOnlyRoot::knotation_string,
+ StaticReadOnlyRoot::knormal_string,
+ StaticReadOnlyRoot::knumberingSystem_string,
+ StaticReadOnlyRoot::knumberingSystems_string,
+ StaticReadOnlyRoot::knumeric_string,
+ StaticReadOnlyRoot::kordinal_string,
+ StaticReadOnlyRoot::kpercentSign_string,
+ StaticReadOnlyRoot::kplusSign_string,
+ StaticReadOnlyRoot::kquarter_string,
+ StaticReadOnlyRoot::kregion_string,
+ StaticReadOnlyRoot::krelatedYear_string,
+ StaticReadOnlyRoot::kroundingMode_string,
+ StaticReadOnlyRoot::kroundingPriority_string,
+ StaticReadOnlyRoot::krtl_string,
+ StaticReadOnlyRoot::kscientific_string,
+ StaticReadOnlyRoot::ksecondsDisplay_string,
+ StaticReadOnlyRoot::ksegment_string,
+ StaticReadOnlyRoot::kSegmentIterator_string,
+ StaticReadOnlyRoot::kSegments_string,
+ StaticReadOnlyRoot::ksensitivity_string,
+ StaticReadOnlyRoot::ksep_string,
+ StaticReadOnlyRoot::kshared_string,
+ StaticReadOnlyRoot::ksignDisplay_string,
+ StaticReadOnlyRoot::kstandard_string,
+ StaticReadOnlyRoot::kstartRange_string,
+ StaticReadOnlyRoot::kstrict_string,
+ StaticReadOnlyRoot::kstripIfInteger_string,
+ StaticReadOnlyRoot::kstyle_string,
+ StaticReadOnlyRoot::kterm_string,
+ StaticReadOnlyRoot::ktextInfo_string,
+ StaticReadOnlyRoot::ktimeStyle_string,
+ StaticReadOnlyRoot::ktimeZones_string,
+ StaticReadOnlyRoot::ktimeZoneName_string,
+ StaticReadOnlyRoot::ktrailingZeroDisplay_string,
+ StaticReadOnlyRoot::ktrunc_string,
+ StaticReadOnlyRoot::ktwo_digit_string,
+ StaticReadOnlyRoot::ktype_string,
+ StaticReadOnlyRoot::kunknown_string,
+ StaticReadOnlyRoot::kupper_string,
+ StaticReadOnlyRoot::kusage_string,
+ StaticReadOnlyRoot::kuseGrouping_string,
+ StaticReadOnlyRoot::kunitDisplay_string,
+ StaticReadOnlyRoot::kweekday_string,
+ StaticReadOnlyRoot::kweekend_string,
+ StaticReadOnlyRoot::kweeksDisplay_string,
+ StaticReadOnlyRoot::kweekInfo_string,
+ StaticReadOnlyRoot::kyearName_string,
+ StaticReadOnlyRoot::kyearsDisplay_string,
+ StaticReadOnlyRoot::kadd_string,
+ StaticReadOnlyRoot::kAggregateError_string,
+ StaticReadOnlyRoot::kalways_string,
+ StaticReadOnlyRoot::kanonymous_function_string,
+ StaticReadOnlyRoot::kanonymous_string,
+ StaticReadOnlyRoot::kapply_string,
+ StaticReadOnlyRoot::kArguments_string,
+ StaticReadOnlyRoot::karguments_string,
+ StaticReadOnlyRoot::karguments_to_string,
+ StaticReadOnlyRoot::kArray_string,
+ StaticReadOnlyRoot::karray_to_string,
+ StaticReadOnlyRoot::kArrayBuffer_string,
+ StaticReadOnlyRoot::kArrayIterator_string,
+ StaticReadOnlyRoot::kas_string,
+ StaticReadOnlyRoot::kassert_string,
+ StaticReadOnlyRoot::kasync_string,
+ StaticReadOnlyRoot::kAtomicsCondition_string,
+ StaticReadOnlyRoot::kAtomicsMutex_string,
+ StaticReadOnlyRoot::kauto_string,
+ StaticReadOnlyRoot::kawait_string,
+ StaticReadOnlyRoot::kBigInt_string,
+ StaticReadOnlyRoot::kbigint_string,
+ StaticReadOnlyRoot::kBigInt64Array_string,
+ StaticReadOnlyRoot::kBigUint64Array_string,
+ StaticReadOnlyRoot::kbind_string,
+ StaticReadOnlyRoot::kblank_string,
+ StaticReadOnlyRoot::kBoolean_string,
+ StaticReadOnlyRoot::kboolean_string,
+ StaticReadOnlyRoot::kboolean_to_string,
+ StaticReadOnlyRoot::kbound__string,
+ StaticReadOnlyRoot::kbuffer_string,
+ StaticReadOnlyRoot::kbyte_length_string,
+ StaticReadOnlyRoot::kbyte_offset_string,
+ StaticReadOnlyRoot::kCompileError_string,
+ StaticReadOnlyRoot::kcalendar_string,
+ StaticReadOnlyRoot::kcallee_string,
+ StaticReadOnlyRoot::kcaller_string,
+ StaticReadOnlyRoot::kcause_string,
+ StaticReadOnlyRoot::kcharacter_string,
+ StaticReadOnlyRoot::kclosure_string,
+ StaticReadOnlyRoot::kcode_string,
+ StaticReadOnlyRoot::kcolumn_string,
+ StaticReadOnlyRoot::kcomputed_string,
+ StaticReadOnlyRoot::kconjunction_string,
+ StaticReadOnlyRoot::kconsole_string,
+ StaticReadOnlyRoot::kconstrain_string,
+ StaticReadOnlyRoot::kconstruct_string,
+ StaticReadOnlyRoot::kcurrent_string,
+ StaticReadOnlyRoot::kDate_string,
+ StaticReadOnlyRoot::kdate_to_string,
+ StaticReadOnlyRoot::kdateAdd_string,
+ StaticReadOnlyRoot::kdateFromFields_string,
+ StaticReadOnlyRoot::kdateUntil_string,
+ StaticReadOnlyRoot::kday_string,
+ StaticReadOnlyRoot::kdayOfWeek_string,
+ StaticReadOnlyRoot::kdayOfYear_string,
+ StaticReadOnlyRoot::kdays_string,
+ StaticReadOnlyRoot::kdaysInMonth_string,
+ StaticReadOnlyRoot::kdaysInWeek_string,
+ StaticReadOnlyRoot::kdaysInYear_string,
+ StaticReadOnlyRoot::kdefault_string,
+ StaticReadOnlyRoot::kdefineProperty_string,
+ StaticReadOnlyRoot::kdeleteProperty_string,
+ StaticReadOnlyRoot::kdetached_string,
+ StaticReadOnlyRoot::kdisjunction_string,
+ StaticReadOnlyRoot::kdone_string,
+ StaticReadOnlyRoot::kdot_brand_string,
+ StaticReadOnlyRoot::kdot_catch_string,
+ StaticReadOnlyRoot::kdot_default_string,
+ StaticReadOnlyRoot::kdot_for_string,
+ StaticReadOnlyRoot::kdot_generator_object_string,
+ StaticReadOnlyRoot::kdot_home_object_string,
+ StaticReadOnlyRoot::kdot_new_target_string,
+ StaticReadOnlyRoot::kdot_result_string,
+ StaticReadOnlyRoot::kdot_repl_result_string,
+ StaticReadOnlyRoot::kdot_static_home_object_string,
+ StaticReadOnlyRoot::kdot_string,
+ StaticReadOnlyRoot::kdot_switch_tag_string,
+ StaticReadOnlyRoot::kdotAll_string,
+ StaticReadOnlyRoot::kError_string,
+ StaticReadOnlyRoot::kEvalError_string,
+ StaticReadOnlyRoot::kelement_string,
+ StaticReadOnlyRoot::kepochMicroseconds_string,
+ StaticReadOnlyRoot::kepochMilliseconds_string,
+ StaticReadOnlyRoot::kepochNanoseconds_string,
+ StaticReadOnlyRoot::kepochSeconds_string,
+ StaticReadOnlyRoot::kera_string,
+ StaticReadOnlyRoot::keraYear_string,
+ StaticReadOnlyRoot::kerrors_string,
+ StaticReadOnlyRoot::kerror_to_string,
+ StaticReadOnlyRoot::keval_string,
+ StaticReadOnlyRoot::kexception_string,
+ StaticReadOnlyRoot::kexec_string,
+ StaticReadOnlyRoot::kfalse_string,
+ StaticReadOnlyRoot::kfields_string,
+ StaticReadOnlyRoot::kFinalizationRegistry_string,
+ StaticReadOnlyRoot::kflags_string,
+ StaticReadOnlyRoot::kFloat32Array_string,
+ StaticReadOnlyRoot::kFloat64Array_string,
+ StaticReadOnlyRoot::kfractionalSecondDigits_string,
+ StaticReadOnlyRoot::kfrom_string,
+ StaticReadOnlyRoot::kFunction_string,
+ StaticReadOnlyRoot::kfunction_native_code_string,
+ StaticReadOnlyRoot::kfunction_string,
+ StaticReadOnlyRoot::kfunction_to_string,
+ StaticReadOnlyRoot::kGenerator_string,
+ StaticReadOnlyRoot::kget_space_string,
+ StaticReadOnlyRoot::kget_string,
+ StaticReadOnlyRoot::kgetOffsetNanosecondsFor_string,
+ StaticReadOnlyRoot::kgetOwnPropertyDescriptor_string,
+ StaticReadOnlyRoot::kgetPossibleInstantsFor_string,
+ StaticReadOnlyRoot::kgetPrototypeOf_string,
+ StaticReadOnlyRoot::kglobal_string,
+ StaticReadOnlyRoot::kglobalThis_string,
+ StaticReadOnlyRoot::kgroups_string,
+ StaticReadOnlyRoot::kgrowable_string,
+ StaticReadOnlyRoot::khas_string,
+ StaticReadOnlyRoot::khasIndices_string,
+ StaticReadOnlyRoot::khour_string,
+ StaticReadOnlyRoot::khours_string,
+ StaticReadOnlyRoot::khoursInDay_string,
+ StaticReadOnlyRoot::kignoreCase_string,
+ StaticReadOnlyRoot::kid_string,
+ StaticReadOnlyRoot::killegal_access_string,
+ StaticReadOnlyRoot::killegal_argument_string,
+ StaticReadOnlyRoot::kinLeapYear_string,
+ StaticReadOnlyRoot::kindex_string,
+ StaticReadOnlyRoot::kindices_string,
+ StaticReadOnlyRoot::kInfinity_string,
+ StaticReadOnlyRoot::kinfinity_string,
+ StaticReadOnlyRoot::kinput_string,
+ StaticReadOnlyRoot::kInt16Array_string,
+ StaticReadOnlyRoot::kInt32Array_string,
+ StaticReadOnlyRoot::kInt8Array_string,
+ StaticReadOnlyRoot::kisExtensible_string,
+ StaticReadOnlyRoot::kiso8601_string,
+ StaticReadOnlyRoot::kisoDay_string,
+ StaticReadOnlyRoot::kisoHour_string,
+ StaticReadOnlyRoot::kisoMicrosecond_string,
+ StaticReadOnlyRoot::kisoMillisecond_string,
+ StaticReadOnlyRoot::kisoMinute_string,
+ StaticReadOnlyRoot::kisoMonth_string,
+ StaticReadOnlyRoot::kisoNanosecond_string,
+ StaticReadOnlyRoot::kisoSecond_string,
+ StaticReadOnlyRoot::kisoYear_string,
+ StaticReadOnlyRoot::kjsMemoryEstimate_string,
+ StaticReadOnlyRoot::kjsMemoryRange_string,
+ StaticReadOnlyRoot::kkeys_string,
+ StaticReadOnlyRoot::klargestUnit_string,
+ StaticReadOnlyRoot::klastIndex_string,
+ StaticReadOnlyRoot::klet_string,
+ StaticReadOnlyRoot::kline_string,
+ StaticReadOnlyRoot::klinear_string,
+ StaticReadOnlyRoot::kLinkError_string,
+ StaticReadOnlyRoot::klong_string,
+ StaticReadOnlyRoot::kMap_string,
+ StaticReadOnlyRoot::kMapIterator_string,
+ StaticReadOnlyRoot::kmax_byte_length_string,
+ StaticReadOnlyRoot::kmedium_string,
+ StaticReadOnlyRoot::kmergeFields_string,
+ StaticReadOnlyRoot::kmessage_string,
+ StaticReadOnlyRoot::kmeta_string,
+ StaticReadOnlyRoot::kminus_Infinity_string,
+ StaticReadOnlyRoot::kmicrosecond_string,
+ StaticReadOnlyRoot::kmicroseconds_string,
+ StaticReadOnlyRoot::kmillisecond_string,
+ StaticReadOnlyRoot::kmilliseconds_string,
+ StaticReadOnlyRoot::kminute_string,
+ StaticReadOnlyRoot::kminutes_string,
+ StaticReadOnlyRoot::kModule_string,
+ StaticReadOnlyRoot::kmonth_string,
+ StaticReadOnlyRoot::kmonthDayFromFields_string,
+ StaticReadOnlyRoot::kmonths_string,
+ StaticReadOnlyRoot::kmonthsInYear_string,
+ StaticReadOnlyRoot::kmonthCode_string,
+ StaticReadOnlyRoot::kmultiline_string,
+ StaticReadOnlyRoot::kNaN_string,
+ StaticReadOnlyRoot::knanosecond_string,
+ StaticReadOnlyRoot::knanoseconds_string,
+ StaticReadOnlyRoot::knarrow_string,
+ StaticReadOnlyRoot::knative_string,
+ StaticReadOnlyRoot::knew_target_string,
+ StaticReadOnlyRoot::kNFC_string,
+ StaticReadOnlyRoot::kNFD_string,
+ StaticReadOnlyRoot::kNFKC_string,
+ StaticReadOnlyRoot::kNFKD_string,
+ StaticReadOnlyRoot::knot_equal_string,
+ StaticReadOnlyRoot::knull_string,
+ StaticReadOnlyRoot::knull_to_string,
+ StaticReadOnlyRoot::kNumber_string,
+ StaticReadOnlyRoot::knumber_string,
+ StaticReadOnlyRoot::knumber_to_string,
+ StaticReadOnlyRoot::kObject_string,
+ StaticReadOnlyRoot::kobject_string,
+ StaticReadOnlyRoot::kobject_to_string,
+ StaticReadOnlyRoot::kObject_prototype_string,
+ StaticReadOnlyRoot::kof_string,
+ StaticReadOnlyRoot::koffset_string,
+ StaticReadOnlyRoot::koffsetNanoseconds_string,
+ StaticReadOnlyRoot::kok_string,
+ StaticReadOnlyRoot::kone_string,
+ StaticReadOnlyRoot::kother_string,
+ StaticReadOnlyRoot::koverflow_string,
+ StaticReadOnlyRoot::kownKeys_string,
+ StaticReadOnlyRoot::kpercent_string,
+ StaticReadOnlyRoot::kplainDate_string,
+ StaticReadOnlyRoot::kplainTime_string,
+ StaticReadOnlyRoot::kposition_string,
+ StaticReadOnlyRoot::kpreventExtensions_string,
+ StaticReadOnlyRoot::kprivate_constructor_string,
+ StaticReadOnlyRoot::kPromise_string,
+ StaticReadOnlyRoot::kproto_string,
+ StaticReadOnlyRoot::kproxy_string,
+ StaticReadOnlyRoot::kProxy_string,
+ StaticReadOnlyRoot::kquery_colon_string,
+ StaticReadOnlyRoot::kRangeError_string,
+ StaticReadOnlyRoot::kraw_json_string,
+ StaticReadOnlyRoot::kraw_string,
+ StaticReadOnlyRoot::kReferenceError_string,
+ StaticReadOnlyRoot::kReflectGet_string,
+ StaticReadOnlyRoot::kReflectHas_string,
+ StaticReadOnlyRoot::kRegExp_string,
+ StaticReadOnlyRoot::kregexp_to_string,
+ StaticReadOnlyRoot::kreject_string,
+ StaticReadOnlyRoot::krelativeTo_string,
+ StaticReadOnlyRoot::kresizable_string,
+ StaticReadOnlyRoot::kResizableArrayBuffer_string,
+ StaticReadOnlyRoot::kreturn_string,
+ StaticReadOnlyRoot::krevoke_string,
+ StaticReadOnlyRoot::kroundingIncrement_string,
+ StaticReadOnlyRoot::kRuntimeError_string,
+ StaticReadOnlyRoot::kWebAssemblyException_string,
+ StaticReadOnlyRoot::kScript_string,
+ StaticReadOnlyRoot::kscript_string,
+ StaticReadOnlyRoot::ksecond_string,
+ StaticReadOnlyRoot::kseconds_string,
+ StaticReadOnlyRoot::kshort_string,
+ StaticReadOnlyRoot::kSet_string,
+ StaticReadOnlyRoot::ksentence_string,
+ StaticReadOnlyRoot::kset_space_string,
+ StaticReadOnlyRoot::kset_string,
+ StaticReadOnlyRoot::kSetIterator_string,
+ StaticReadOnlyRoot::ksetPrototypeOf_string,
+ StaticReadOnlyRoot::kShadowRealm_string,
+ StaticReadOnlyRoot::kSharedArray_string,
+ StaticReadOnlyRoot::kSharedArrayBuffer_string,
+ StaticReadOnlyRoot::kSharedStruct_string,
+ StaticReadOnlyRoot::ksign_string,
+ StaticReadOnlyRoot::ksmallestUnit_string,
+ StaticReadOnlyRoot::ksource_string,
+ StaticReadOnlyRoot::ksourceText_string,
+ StaticReadOnlyRoot::kstack_string,
+ StaticReadOnlyRoot::kstackTraceLimit_string,
+ StaticReadOnlyRoot::ksticky_string,
+ StaticReadOnlyRoot::kString_string,
+ StaticReadOnlyRoot::kstring_string,
+ StaticReadOnlyRoot::kstring_to_string,
+ StaticReadOnlyRoot::kSymbol_iterator_string,
+ StaticReadOnlyRoot::kSymbol_replace_string,
+ StaticReadOnlyRoot::ksymbol_species_string,
+ StaticReadOnlyRoot::kSymbol_species_string,
+ StaticReadOnlyRoot::kSymbol_string,
+ StaticReadOnlyRoot::ksymbol_string,
+ StaticReadOnlyRoot::kSyntaxError_string,
+ StaticReadOnlyRoot::ktarget_string,
+ StaticReadOnlyRoot::kthis_function_string,
+ StaticReadOnlyRoot::kthis_string,
+ StaticReadOnlyRoot::kthrow_string,
+ StaticReadOnlyRoot::ktimed_out_string,
+ StaticReadOnlyRoot::ktimeZone_string,
+ StaticReadOnlyRoot::ktoJSON_string,
+ StaticReadOnlyRoot::ktoString_string,
+ StaticReadOnlyRoot::ktrue_string,
+ StaticReadOnlyRoot::ktotal_string,
+ StaticReadOnlyRoot::kTypeError_string,
+ StaticReadOnlyRoot::kUint16Array_string,
+ StaticReadOnlyRoot::kUint32Array_string,
+ StaticReadOnlyRoot::kUint8Array_string,
+ StaticReadOnlyRoot::kUint8ClampedArray_string,
+ StaticReadOnlyRoot::kundefined_string,
+ StaticReadOnlyRoot::kundefined_to_string,
+ StaticReadOnlyRoot::kunicode_string,
+ StaticReadOnlyRoot::kunicodeSets_string,
+ StaticReadOnlyRoot::kunit_string,
+ StaticReadOnlyRoot::kURIError_string,
+ StaticReadOnlyRoot::kUTC_string,
+ StaticReadOnlyRoot::kvalueOf_string,
+ StaticReadOnlyRoot::kWeakMap_string,
+ StaticReadOnlyRoot::kWeakRef_string,
+ StaticReadOnlyRoot::kWeakSet_string,
+ StaticReadOnlyRoot::kweek_string,
+ StaticReadOnlyRoot::kweeks_string,
+ StaticReadOnlyRoot::kweekOfYear_string,
+ StaticReadOnlyRoot::kword_string,
+ StaticReadOnlyRoot::kyearMonthFromFields_string,
+ StaticReadOnlyRoot::kyear_string,
+ StaticReadOnlyRoot::kyears_string,
+ StaticReadOnlyRoot::kzero_string,
+ StaticReadOnlyRoot::knot_mapped_symbol,
+ StaticReadOnlyRoot::kuninitialized_symbol,
+ StaticReadOnlyRoot::kmegamorphic_symbol,
+ StaticReadOnlyRoot::kelements_transition_symbol,
+ StaticReadOnlyRoot::kmega_dom_symbol,
+ StaticReadOnlyRoot::karray_buffer_wasm_memory_symbol,
+ StaticReadOnlyRoot::kcall_site_info_symbol,
+ StaticReadOnlyRoot::kconsole_context_id_symbol,
+ StaticReadOnlyRoot::kconsole_context_name_symbol,
+ StaticReadOnlyRoot::kclass_fields_symbol,
+ StaticReadOnlyRoot::kclass_positions_symbol,
+ StaticReadOnlyRoot::kerror_end_pos_symbol,
+ StaticReadOnlyRoot::kerror_script_symbol,
+ StaticReadOnlyRoot::kerror_stack_symbol,
+ StaticReadOnlyRoot::kerror_start_pos_symbol,
+ StaticReadOnlyRoot::kfrozen_symbol,
+ StaticReadOnlyRoot::kinterpreter_trampoline_symbol,
+ StaticReadOnlyRoot::knative_context_index_symbol,
+ StaticReadOnlyRoot::knonextensible_symbol,
+ StaticReadOnlyRoot::kpromise_debug_marker_symbol,
+ StaticReadOnlyRoot::kpromise_debug_message_symbol,
+ StaticReadOnlyRoot::kpromise_forwarding_handler_symbol,
+ StaticReadOnlyRoot::kpromise_handled_by_symbol,
+ StaticReadOnlyRoot::kpromise_awaited_by_symbol,
+ StaticReadOnlyRoot::kregexp_result_names_symbol,
+ StaticReadOnlyRoot::kregexp_result_regexp_input_symbol,
+ StaticReadOnlyRoot::kregexp_result_regexp_last_index_symbol,
+ StaticReadOnlyRoot::ksealed_symbol,
+ StaticReadOnlyRoot::kstrict_function_transition_symbol,
+ StaticReadOnlyRoot::ktemplate_literal_function_literal_id_symbol,
+ StaticReadOnlyRoot::ktemplate_literal_slot_id_symbol,
+ StaticReadOnlyRoot::kwasm_exception_tag_symbol,
+ StaticReadOnlyRoot::kwasm_exception_values_symbol,
+ StaticReadOnlyRoot::kwasm_uncatchable_symbol,
+ StaticReadOnlyRoot::kwasm_wrapped_object_symbol,
+ StaticReadOnlyRoot::kwasm_debug_proxy_cache_symbol,
+ StaticReadOnlyRoot::kwasm_debug_proxy_names_symbol,
+ StaticReadOnlyRoot::kasync_iterator_symbol,
+ StaticReadOnlyRoot::kintl_fallback_symbol,
+ StaticReadOnlyRoot::kmatch_all_symbol,
+ StaticReadOnlyRoot::kmatch_symbol,
+ StaticReadOnlyRoot::ksearch_symbol,
+ StaticReadOnlyRoot::ksplit_symbol,
+ StaticReadOnlyRoot::kto_primitive_symbol,
+ StaticReadOnlyRoot::kunscopables_symbol,
+ StaticReadOnlyRoot::khas_instance_symbol,
+ StaticReadOnlyRoot::kto_string_tag_symbol,
+ StaticReadOnlyRoot::kPromiseFulfillReactionJobTaskMap,
+ StaticReadOnlyRoot::kPromiseRejectReactionJobTaskMap,
+ StaticReadOnlyRoot::kCallableTaskMap,
+ StaticReadOnlyRoot::kCallbackTaskMap,
+ StaticReadOnlyRoot::kPromiseResolveThenableJobTaskMap,
+ StaticReadOnlyRoot::kFunctionTemplateInfoMap,
+ StaticReadOnlyRoot::kObjectTemplateInfoMap,
+ StaticReadOnlyRoot::kAccessCheckInfoMap,
+ StaticReadOnlyRoot::kAccessorPairMap,
+ StaticReadOnlyRoot::kAliasedArgumentsEntryMap,
+ StaticReadOnlyRoot::kAllocationMementoMap,
+ StaticReadOnlyRoot::kArrayBoilerplateDescriptionMap,
+ StaticReadOnlyRoot::kAsmWasmDataMap,
+ StaticReadOnlyRoot::kAsyncGeneratorRequestMap,
+ StaticReadOnlyRoot::kBreakPointMap,
+ StaticReadOnlyRoot::kBreakPointInfoMap,
+ StaticReadOnlyRoot::kCallSiteInfoMap,
+ StaticReadOnlyRoot::kClassPositionsMap,
+ StaticReadOnlyRoot::kDebugInfoMap,
+ StaticReadOnlyRoot::kEnumCacheMap,
+ StaticReadOnlyRoot::kErrorStackDataMap,
+ StaticReadOnlyRoot::kFunctionTemplateRareDataMap,
+ StaticReadOnlyRoot::kInterceptorInfoMap,
+ StaticReadOnlyRoot::kInterpreterDataMap,
+ StaticReadOnlyRoot::kModuleRequestMap,
+ StaticReadOnlyRoot::kPromiseCapabilityMap,
+ StaticReadOnlyRoot::kPromiseOnStackMap,
+ StaticReadOnlyRoot::kPromiseReactionMap,
+ StaticReadOnlyRoot::kPropertyDescriptorObjectMap,
+ StaticReadOnlyRoot::kPrototypeInfoMap,
+ StaticReadOnlyRoot::kRegExpBoilerplateDescriptionMap,
+ StaticReadOnlyRoot::kScriptMap,
+ StaticReadOnlyRoot::kScriptOrModuleMap,
+ StaticReadOnlyRoot::kSourceTextModuleInfoEntryMap,
+ StaticReadOnlyRoot::kStackFrameInfoMap,
+ StaticReadOnlyRoot::kTemplateObjectDescriptionMap,
+ StaticReadOnlyRoot::kTuple2Map,
+ StaticReadOnlyRoot::kWasmExceptionTagMap,
+ StaticReadOnlyRoot::kWasmIndirectFunctionTableMap,
+ StaticReadOnlyRoot::kSloppyArgumentsElementsMap,
+ StaticReadOnlyRoot::kDescriptorArrayMap,
+ StaticReadOnlyRoot::kStrongDescriptorArrayMap,
+ StaticReadOnlyRoot::kUncompiledDataWithoutPreparseDataMap,
+ StaticReadOnlyRoot::kUncompiledDataWithPreparseDataMap,
+ StaticReadOnlyRoot::kUncompiledDataWithoutPreparseDataWithJobMap,
+ StaticReadOnlyRoot::kUncompiledDataWithPreparseDataAndJobMap,
+ StaticReadOnlyRoot::kOnHeapBasicBlockProfilerDataMap,
+ StaticReadOnlyRoot::kTurbofanBitsetTypeMap,
+ StaticReadOnlyRoot::kTurbofanUnionTypeMap,
+ StaticReadOnlyRoot::kTurbofanRangeTypeMap,
+ StaticReadOnlyRoot::kTurbofanHeapConstantTypeMap,
+ StaticReadOnlyRoot::kTurbofanOtherNumberConstantTypeMap,
+ StaticReadOnlyRoot::kTurboshaftWord32TypeMap,
+ StaticReadOnlyRoot::kTurboshaftWord32RangeTypeMap,
+ StaticReadOnlyRoot::kTurboshaftWord32SetTypeMap,
+ StaticReadOnlyRoot::kTurboshaftWord64TypeMap,
+ StaticReadOnlyRoot::kTurboshaftWord64RangeTypeMap,
+ StaticReadOnlyRoot::kTurboshaftWord64SetTypeMap,
+ StaticReadOnlyRoot::kTurboshaftFloat64TypeMap,
+ StaticReadOnlyRoot::kTurboshaftFloat64RangeTypeMap,
+ StaticReadOnlyRoot::kTurboshaftFloat64SetTypeMap,
+ StaticReadOnlyRoot::kInternalClassMap,
+ StaticReadOnlyRoot::kSmiPairMap,
+ StaticReadOnlyRoot::kSmiBoxMap,
+ StaticReadOnlyRoot::kExportedSubClassBaseMap,
+ StaticReadOnlyRoot::kExportedSubClassMap,
+ StaticReadOnlyRoot::kAbstractInternalClassSubclass1Map,
+ StaticReadOnlyRoot::kAbstractInternalClassSubclass2Map,
+ StaticReadOnlyRoot::kInternalClassWithSmiElementsMap,
+ StaticReadOnlyRoot::kInternalClassWithStructElementsMap,
+ StaticReadOnlyRoot::kExportedSubClass2Map,
+ StaticReadOnlyRoot::kSortStateMap,
+ StaticReadOnlyRoot::kWasmStringViewIterMap,
+ StaticReadOnlyRoot::kAllocationSiteWithWeakNextMap,
+ StaticReadOnlyRoot::kAllocationSiteWithoutWeakNextMap,
+ StaticReadOnlyRoot::kconstructor_string,
+ StaticReadOnlyRoot::knext_string,
+ StaticReadOnlyRoot::kresolve_string,
+ StaticReadOnlyRoot::kthen_string,
+ StaticReadOnlyRoot::kiterator_symbol,
+ StaticReadOnlyRoot::kreplace_symbol,
+ StaticReadOnlyRoot::kspecies_symbol,
+ StaticReadOnlyRoot::kis_concat_spreadable_symbol,
+ StaticReadOnlyRoot::kLoadHandler1Map,
+ StaticReadOnlyRoot::kLoadHandler2Map,
+ StaticReadOnlyRoot::kLoadHandler3Map,
+ StaticReadOnlyRoot::kStoreHandler0Map,
+ StaticReadOnlyRoot::kStoreHandler1Map,
+ StaticReadOnlyRoot::kStoreHandler2Map,
+ StaticReadOnlyRoot::kStoreHandler3Map,
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_STATIC_ROOTS_BOOL
+#endif // V8_ROOTS_STATIC_ROOTS_H_
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index b4d3ff84e3..f64d9e5b1a 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -94,6 +94,25 @@ RUNTIME_FUNCTION(Runtime_ToBigInt) {
RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, x));
}
+RUNTIME_FUNCTION(Runtime_ToBigIntConvertNumber) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<Object> x = args.at(0);
+
+ if (x->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, x,
+ JSReceiver::ToPrimitive(isolate, Handle<JSReceiver>::cast(x),
+ ToPrimitiveHint::kNumber));
+ }
+
+ if (x->IsNumber()) {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, x));
+ } else {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, x));
+ }
+}
+
RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 2c3037e135..f52cf3a6bd 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -431,6 +431,12 @@ bool AddDescriptorsByTemplate(
name = isolate->factory()->InternalizeName(name);
ClassBoilerplate::AddToPropertiesTemplate(
isolate, properties_dictionary, name, key_index, value_kind, value);
+ if (name->IsInterestingSymbol()) {
+ // TODO(pthier): Add flags to swiss dictionaries.
+ if constexpr (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ properties_dictionary->set_may_have_interesting_symbols(true);
+ }
+ }
}
}
@@ -506,7 +512,7 @@ bool InitClassPrototype(Isolate* isolate,
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> properties_dictionary_template =
Handle<SwissNameDictionary>::cast(properties_template);
return AddDescriptorsByTemplate(
@@ -563,7 +569,7 @@ bool InitClassConstructor(Isolate* isolate,
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> properties_dictionary_template =
Handle<SwissNameDictionary>::cast(properties_template);
@@ -644,7 +650,7 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
MapEvent("InitialMap", empty_map, handle(constructor->map(), isolate),
"init class constructor",
SharedFunctionInfo::DebugName(
- handle(constructor->shared(), isolate))));
+ isolate, handle(constructor->shared(), isolate))));
LOG(isolate,
MapEvent("InitialMap", empty_map, handle(prototype->map(), isolate),
"init class prototype"));
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 3676f7814d..591f5c8d9d 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -25,7 +25,7 @@ void LogExecution(Isolate* isolate, Handle<JSFunction> function) {
if (!function->has_feedback_vector()) return;
if (!function->feedback_vector().log_next_execution()) return;
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
- Handle<String> name = SharedFunctionInfo::DebugName(sfi);
+ Handle<String> name = SharedFunctionInfo::DebugName(isolate, sfi);
DisallowGarbageCollection no_gc;
auto raw_sfi = *sfi;
std::string event_name = "first-execution";
@@ -85,7 +85,7 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
&is_compiled_scope);
{
DisallowGarbageCollection no_gc;
- CodeT baseline_code = sfi->baseline_code(kAcquireLoad);
+ Code baseline_code = sfi->baseline_code(kAcquireLoad);
function->set_code(baseline_code);
if V8_LIKELY (!v8_flags.log_function_events) return baseline_code;
}
@@ -126,7 +126,8 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
}
// As a pre- and post-condition of CompileOptimized, the function *must* be
- // compiled, i.e. the installed Code object must not be CompileLazy.
+ // compiled, i.e. the installed InstructionStream object must not be
+ // CompileLazy.
IsCompiledScope is_compiled_scope(function->shared(), isolate);
DCHECK(is_compiled_scope.is_compiled());
@@ -162,7 +163,7 @@ RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
DCHECK(function->shared().is_compiled());
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "Runtime_HealOptimizedCodeSlot");
+ isolate, function->shared(), "Runtime_HealOptimizedCodeSlot");
return function->code();
}
@@ -250,8 +251,8 @@ bool DeoptExitIsInsideOsrLoop(Isolate* isolate, JSFunction function,
bool TryGetOptimizedOsrCode(Isolate* isolate, FeedbackVector vector,
const interpreter::BytecodeArrayIterator& it,
- CodeT* code_out) {
- base::Optional<CodeT> maybe_code =
+ Code* code_out) {
+ base::Optional<Code> maybe_code =
vector.GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
if (maybe_code.has_value()) {
*code_out = maybe_code.value();
@@ -289,8 +290,8 @@ void DeoptAllOsrLoopsContainingDeoptExit(Isolate* isolate, JSFunction function,
deopt_exit_offset.ToInt());
FeedbackVector vector = function.feedback_vector();
- CodeT code;
- base::SmallVector<CodeT, 8> osr_codes;
+ Code code;
+ base::SmallVector<Code, 8> osr_codes;
// Visit before the first loop-with-deopt is found
for (; !it.done(); it.Advance()) {
// We're only interested in loop ranges.
@@ -384,7 +385,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
delete deoptimizer;
// Ensure the context register is updated for materialized objects.
- JavaScriptFrameIterator top_it(isolate);
+ JavaScriptStackFrameIterator top_it(isolate);
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
@@ -395,8 +396,8 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // Some eager deopts also don't invalidate Code (e.g. when preparing for OSR
- // from Maglev to Turbofan).
+ // Some eager deopts also don't invalidate InstructionStream (e.g. when
+ // preparing for OSR from Maglev to Turbofan).
if (IsDeoptimizationWithoutCodeInvalidation(deopt_reason)) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -414,11 +415,11 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
// the loop should pay for the deoptimization costs.
const BytecodeOffset osr_offset = optimized_code->osr_offset();
if (osr_offset.IsNone()) {
- Deoptimizer::DeoptimizeFunction(*function, ToCodeT(*optimized_code));
+ Deoptimizer::DeoptimizeFunction(*function, *optimized_code);
DeoptAllOsrLoopsContainingDeoptExit(isolate, *function, deopt_exit_offset);
} else if (DeoptExitIsInsideOsrLoop(isolate, *function, deopt_exit_offset,
osr_offset)) {
- Deoptimizer::DeoptimizeFunction(*function, ToCodeT(*optimized_code));
+ Deoptimizer::DeoptimizeFunction(*function, *optimized_code);
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -441,6 +442,14 @@ RUNTIME_FUNCTION(Runtime_VerifyType) {
return *obj;
}
+RUNTIME_FUNCTION(Runtime_CheckTurboshaftTypeOf) {
+ // %CheckTurboshaftTypeOf has no effect in the interpreter.
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> obj = args.at(0);
+ return *obj;
+}
+
namespace {
void GetOsrOffsetAndFunctionForOSR(Isolate* isolate, BytecodeOffset* osr_offset,
@@ -449,12 +458,12 @@ void GetOsrOffsetAndFunctionForOSR(Isolate* isolate, BytecodeOffset* osr_offset,
DCHECK(function->is_null());
// Determine the frame that triggered the OSR request.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
DCHECK_IMPLIES(frame->is_interpreted(),
- frame->LookupCodeT().is_interpreter_trampoline_builtin());
+ frame->LookupCode().is_interpreter_trampoline_builtin());
DCHECK_IMPLIES(frame->is_baseline(),
- frame->LookupCodeT().kind() == CodeKind::BASELINE);
+ frame->LookupCode().kind() == CodeKind::BASELINE);
*osr_offset = BytecodeOffset(frame->GetBytecodeOffset());
*function = handle(frame->function(), isolate);
@@ -471,7 +480,7 @@ Object CompileOptimizedOSR(Isolate* isolate, Handle<JSFunction> function,
? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kSynchronous;
- Handle<CodeT> result;
+ Handle<Code> result;
if (!Compiler::CompileOptimizedOSR(isolate, function, osr_offset, mode)
.ToHandle(&result)) {
// An empty result can mean one of two things:
@@ -479,7 +488,7 @@ Object CompileOptimizedOSR(Isolate* isolate, Handle<JSFunction> function,
// 2) synchronous compilation failed for some reason.
if (!function->HasAttachedOptimizedCode()) {
- function->set_code(function->shared().GetCode(), kReleaseStore);
+ function->set_code(function->shared().GetCode(isolate));
}
return {};
@@ -535,11 +544,32 @@ RUNTIME_FUNCTION(Runtime_CompileOptimizedOSRFromMaglev) {
const BytecodeOffset osr_offset(args.positive_smi_value_at(0));
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
MaglevFrame* frame = MaglevFrame::cast(it.frame());
- DCHECK_EQ(frame->LookupCodeT().kind(), CodeKind::MAGLEV);
+ DCHECK_EQ(frame->LookupCode().kind(), CodeKind::MAGLEV);
Handle<JSFunction> function = handle(frame->function(), isolate);
+ // This path is only relevant for tests (all production configurations enable
+ // concurrent OSR). It's quite subtle, if interested read on:
+ if (V8_UNLIKELY(!isolate->concurrent_recompilation_enabled() ||
+ !v8_flags.concurrent_osr)) {
+ // - Synchronous Turbofan compilation may trigger lazy deoptimization (e.g.
+ // through compilation dependency finalization actions).
+ // - Maglev (currently) disallows marking an opcode as both can_lazy_deopt
+ // and can_eager_deopt.
+ // - Maglev's JumpLoop opcode (the logical caller of this runtime function)
+ // is marked as can_eager_deopt since OSR'ing to Turbofan involves
+ // deoptimizing to Ignition under the hood.
+ // - Thus this runtime function *must not* trigger a lazy deopt, and
+ // therefore cannot trigger synchronous Turbofan compilation (see above).
+ //
+ // We solve this synchronous OSR case by bailing out early to Ignition, and
+ // letting it handle OSR. How do we trigger the early bailout? Returning
+ // any non-null InstructionStream from this function triggers the deopt in
+ // JumpLoop.
+ return function->code();
+ }
+
return CompileOptimizedOSR(isolate, function, osr_offset);
}
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 482e8bcadb..5a4277e3d3 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -12,7 +12,8 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
+ return *isolate->factory()->NewNumberFromInt64(
+ JSDate::CurrentTimeValue(isolate));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 671a995822..22bdd354f8 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -48,7 +48,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
isolate->debug()->set_return_value(*value);
// Get the top-most JavaScript frame.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
if (isolate->debug_execution_mode() == DebugInfo::kBreakpoints) {
isolate->debug()->Break(it.frame(),
handle(it.frame()->function(), isolate));
@@ -119,7 +119,7 @@ RUNTIME_FUNCTION(Runtime_DebugBreakAtEntry) {
DCHECK(function->shared().GetDebugInfo().BreakAtEntry());
// Get the top-most JavaScript frame. This is the debug target function.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
DCHECK_EQ(*function, it.frame()->function());
// Check whether the next JS frame is closer than the last API entry.
// if yes, then the call to the debug target came from JavaScript. Otherwise,
@@ -208,6 +208,12 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
if (iter.HasAccess()) {
iter.Advance();
Handle<Object> prototype = PrototypeIterator::GetCurrent(iter);
+ if (!iter.IsAtEnd() && iter.HasAccess() && object->IsJSGlobalProxy()) {
+ // Skip JSGlobalObject as the [[Prototype]].
+ DCHECK(prototype->IsJSGlobalObject());
+ iter.Advance();
+ prototype = PrototypeIterator::GetCurrent(iter);
+ }
if (!prototype->IsNull(isolate)) {
result = ArrayList::Add(
isolate, result,
@@ -905,6 +911,9 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
case v8::debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION:
return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
"LiveEdit failed: BLOCKED_BY_ACTIVE_FUNCTION"));
+ case v8::debug::LiveEditResult::BLOCKED_BY_TOP_LEVEL_ES_MODULE_CHANGE:
+ return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
+ "LiveEdit failed: BLOCKED_BY_TOP_LEVEL_ES_MODULE_CHANGE"));
case v8::debug::LiveEditResult::OK:
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index b46b2088e1..3587a827c9 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -45,7 +45,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(
Handle<JSFunction>::cast(function)->shared(), isolate);
- return *SharedFunctionInfo::GetSourceCode(shared);
+ return *SharedFunctionInfo::GetSourceCode(isolate, shared);
}
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index aafb9fe18f..c54debfed7 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -317,8 +317,22 @@ RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
DCHECK_EQ(1, args.length());
Handle<Object> object = args.at(0);
Handle<String> type = Object::TypeOf(isolate, object);
+ Handle<String> msg;
+ if (object->IsNull()) {
+ // "which is null"
+ msg = isolate->factory()->NewStringFromAsciiChecked("null");
+ } else if (isolate->factory()->object_string()->Equals(*type)) {
+ // "which is an object"
+ msg = isolate->factory()->NewStringFromAsciiChecked("an object");
+ } else {
+ // "which is a typeof arg"
+ msg = isolate->factory()
+ ->NewConsString(
+ isolate->factory()->NewStringFromAsciiChecked("a "), type)
+ .ToHandleChecked();
+ }
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
+ isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, msg));
}
RUNTIME_FUNCTION(Runtime_StackGuard) {
@@ -367,7 +381,6 @@ Object BytecodeBudgetInterruptWithStackCheck(Isolate* isolate,
// We ideally wouldn't actually get StackOverflows here, since we stack
// check on bytecode entry, but it's possible that this check fires due to
// the runtime function call being what overflows the stack.
- // if our function entry
return isolate->StackOverflow();
} else if (check.InterruptRequested()) {
Object return_value = isolate->stack_guard()->HandleInterrupts();
@@ -411,6 +424,10 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt_Sparkplug) {
return BytecodeBudgetInterrupt(isolate, args, CodeKind::BASELINE);
}
+RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt_Maglev) {
+ return BytecodeBudgetInterrupt(isolate, args, CodeKind::MAGLEV);
+}
+
RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptWithStackCheck_Maglev) {
return BytecodeBudgetInterruptWithStackCheck(isolate, args, CodeKind::MAGLEV);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 56e58bea3e..ff1f1d0c9a 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -219,8 +219,10 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Finally, perform the map rollback.
receiver->set_map(*parent_map, kReleaseStore);
#if VERIFY_HEAP
- receiver->HeapObjectVerify(isolate);
- receiver->property_array().PropertyArrayVerify(isolate);
+ if (v8_flags.verify_heap) {
+ receiver->HeapObjectVerify(isolate);
+ receiver->property_array().PropertyArrayVerify(isolate);
+ }
#endif
// If the {descriptor} was "const" so far, we need to update the
@@ -700,7 +702,7 @@ RUNTIME_FUNCTION(Runtime_ObjectIsExtensible) {
Maybe<bool> result =
object->IsJSReceiver()
- ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+ ? JSReceiver::IsExtensible(isolate, Handle<JSReceiver>::cast(object))
: Just(false);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
@@ -711,8 +713,8 @@ RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsThrow) {
DCHECK_EQ(1, args.length());
Handle<JSReceiver> object = args.at<JSReceiver>(0);
- MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
- kThrowOnError),
+ MAYBE_RETURN(JSReceiver::PreventExtensions(
+ isolate, Handle<JSReceiver>::cast(object), kThrowOnError),
ReadOnlyRoots(isolate).exception());
return *object;
}
@@ -723,7 +725,7 @@ RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsDontThrow) {
Handle<JSReceiver> object = args.at<JSReceiver>(0);
Maybe<bool> result = JSReceiver::PreventExtensions(
- Handle<JSReceiver>::cast(object), kDontThrow);
+ isolate, Handle<JSReceiver>::cast(object), kDontThrow);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -909,32 +911,6 @@ RUNTIME_FUNCTION(Runtime_SetNamedProperty) {
StoreOrigin::kNamed));
}
-// Similar to DefineKeyedOwnPropertyInLiteral, but does not update feedback, and
-// and does not have a flags parameter for performing SetFunctionName().
-//
-// Currently, this is used for ObjectLiteral spread properties in CloneObjectIC
-// and for array literal creations in StoreInArrayLiteralIC.
-// TODO(v8:12548): merge this into DefineKeyedOwnPropertyInLiteral.
-RUNTIME_FUNCTION(Runtime_DefineKeyedOwnPropertyInLiteral_Simple) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
- Handle<JSReceiver> object = args.at<JSReceiver>(0);
- Handle<Object> key = args.at(1);
- Handle<Object> value = args.at(2);
-
- PropertyKey lookup_key(isolate, key);
- LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN);
-
- Maybe<bool> result = JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, value, NONE, Just(kDontThrow));
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- DCHECK(result.IsJust());
- USE(result);
-
- return *value;
-}
-
namespace {
// ES6 section 12.5.4.
@@ -1054,7 +1030,8 @@ RUNTIME_FUNCTION(Runtime_GetDerivedMap) {
Handle<Object> rab_gsab = args.at(2);
if (rab_gsab->IsTrue()) {
RETURN_RESULT_OR_FAILURE(
- isolate, JSFunction::GetDerivedRabGsabMap(isolate, target, new_target));
+ isolate, JSFunction::GetDerivedRabGsabTypedArrayMap(isolate, target,
+ new_target));
} else {
RETURN_RESULT_OR_FAILURE(
isolate, JSFunction::GetDerivedMap(isolate, target, new_target));
@@ -1113,23 +1090,44 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_SetFunctionName) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> value = args.at(0);
+ Handle<Name> name = args.at<Name>(1);
+ DCHECK(value->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(value);
+ DCHECK(!function->shared().HasSharedName());
+ Handle<Map> function_map(function->map(), isolate);
+ if (!JSFunction::SetName(function, name,
+ isolate->factory()->empty_string())) {
+ return ReadOnlyRoots(isolate).exception();
+ }
+ // Class constructors do not reserve in-object space for name field.
+ DCHECK_IMPLIES(!IsClassConstructor(function->shared().kind()),
+ *function_map == function->map());
+ return *value;
+}
+
RUNTIME_FUNCTION(Runtime_DefineKeyedOwnPropertyInLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
- Handle<JSObject> object = args.at<JSObject>(0);
- Handle<Name> name = args.at<Name>(1);
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ Handle<Object> name = args.at(1);
Handle<Object> value = args.at(2);
int flag = args.smi_value_at(3);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(4);
- int index = args.tagged_index_value_at(5);
if (!maybe_vector->IsUndefined()) {
+ int index = args.tagged_index_value_at(5);
+ DCHECK(name->IsName());
DCHECK(maybe_vector->IsFeedbackVector());
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
if (name->IsUniqueName()) {
- nexus.ConfigureMonomorphic(name, handle(object->map(), isolate),
+ nexus.ConfigureMonomorphic(Handle<Name>::cast(name),
+ handle(object->map(), isolate),
MaybeObjectHandle());
} else {
nexus.ConfigureMegamorphic(IcCheckType::kProperty);
@@ -1142,32 +1140,32 @@ RUNTIME_FUNCTION(Runtime_DefineKeyedOwnPropertyInLiteral) {
}
DefineKeyedOwnPropertyInLiteralFlags flags(flag);
- PropertyAttributes attrs =
- (flags & DefineKeyedOwnPropertyInLiteralFlag::kDontEnum)
- ? PropertyAttributes::DONT_ENUM
- : PropertyAttributes::NONE;
if (flags & DefineKeyedOwnPropertyInLiteralFlag::kSetFunctionName) {
+ DCHECK(name->IsName());
DCHECK(value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(value);
DCHECK(!function->shared().HasSharedName());
Handle<Map> function_map(function->map(), isolate);
- if (!JSFunction::SetName(function, name,
+ if (!JSFunction::SetName(function, Handle<Name>::cast(name),
isolate->factory()->empty_string())) {
return ReadOnlyRoots(isolate).exception();
}
// Class constructors do not reserve in-object space for name field.
- CHECK_IMPLIES(!IsClassConstructor(function->shared().kind()),
- *function_map == function->map());
+ DCHECK_IMPLIES(!IsClassConstructor(function->shared().kind()),
+ *function_map == function->map());
}
PropertyKey key(isolate, name);
LookupIterator it(isolate, object, key, object, LookupIterator::OWN);
+
+ Maybe<bool> result = JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value, PropertyAttributes::NONE, Just(kDontThrow));
// Cannot fail since this should only be called when
// creating an object literal.
- CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
- Just(kDontThrow))
- .IsJust());
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ DCHECK(result.IsJust());
+ USE(result);
// Return the value so that
// BaselineCompiler::VisitDefineKeyedOwnPropertyInLiteral doesn't have to
@@ -1469,9 +1467,251 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception());
if (!found.FromJust()) return ReadOnlyRoots(isolate).undefined_value();
+ return *desc.ToObject(isolate);
+}
+
+// Returns a PropertyDescriptorObject (property-descriptor-object.h)
+RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptorObject) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ Handle<JSReceiver> object = args.at<JSReceiver>(0);
+ Handle<Name> name = args.at<Name>(1);
+
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, object, name, &desc);
+ MAYBE_RETURN(found, ReadOnlyRoots(isolate).exception());
+
+ if (!found.FromJust()) return ReadOnlyRoots(isolate).undefined_value();
return *desc.ToPropertyDescriptorObject(isolate);
}
+enum class PrivateMemberType {
+ kPrivateField,
+ kPrivateAccessor,
+ kPrivateMethod,
+};
+
+struct PrivateMember {
+ PrivateMemberType type;
+ // It's the class constructor for static methods/accessors,
+ // the brand symbol for instance methods/accessors,
+ // and the private name symbol for fields.
+ Handle<Object> brand_or_field_symbol;
+ Handle<Object> value;
+};
+
+namespace {
+void CollectPrivateMethodsAndAccessorsFromContext(
+ Isolate* isolate, Handle<Context> context, Handle<String> desc,
+ Handle<Object> brand, IsStaticFlag is_static_flag,
+ std::vector<PrivateMember>* results) {
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
+ VariableLookupResult lookup_result;
+ int context_index = scope_info->ContextSlotIndex(desc, &lookup_result);
+ if (context_index == -1 ||
+ !IsPrivateMethodOrAccessorVariableMode(lookup_result.mode) ||
+ lookup_result.is_static_flag != is_static_flag) {
+ return;
+ }
+
+ Handle<Object> slot_value(context->get(context_index), isolate);
+ DCHECK_IMPLIES(lookup_result.mode == VariableMode::kPrivateMethod,
+ slot_value->IsJSFunction());
+ DCHECK_IMPLIES(lookup_result.mode != VariableMode::kPrivateMethod,
+ slot_value->IsAccessorPair());
+ results->push_back({
+ lookup_result.mode == VariableMode::kPrivateMethod
+ ? PrivateMemberType::kPrivateMethod
+ : PrivateMemberType::kPrivateAccessor,
+ brand,
+ slot_value,
+ });
+}
+
+Maybe<bool> CollectPrivateMembersFromReceiver(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<String> desc,
+ std::vector<PrivateMember>* results) {
+ PropertyFilter key_filter =
+ static_cast<PropertyFilter>(PropertyFilter::PRIVATE_NAMES_ONLY);
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(isolate, receiver, KeyCollectionMode::kOwnOnly,
+ key_filter, GetKeysConversion::kConvertToString),
+ Nothing<bool>());
+
+ if (receiver->IsJSFunction()) {
+ Handle<JSFunction> func(JSFunction::cast(*receiver), isolate);
+ Handle<SharedFunctionInfo> shared(func->shared(), isolate);
+ if (shared->is_class_constructor() &&
+ shared->has_static_private_methods_or_accessors()) {
+ Handle<Context> recevier_context(JSFunction::cast(*receiver).context(),
+ isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(
+ isolate, recevier_context, desc, func, IsStaticFlag::kStatic,
+ results);
+ }
+ }
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> obj_key(keys->get(i), isolate);
+ Handle<Symbol> symbol(Symbol::cast(*obj_key), isolate);
+ CHECK(symbol->is_private_name());
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::GetProperty(isolate, receiver, symbol),
+ Nothing<bool>());
+
+ if (symbol->is_private_brand()) {
+ Handle<Context> value_context(Context::cast(*value), isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(
+ isolate, value_context, desc, symbol, IsStaticFlag::kNotStatic,
+ results);
+ } else {
+ Handle<String> symbol_desc(String::cast(symbol->description()), isolate);
+ if (symbol_desc->Equals(*desc)) {
+ results->push_back({
+ PrivateMemberType::kPrivateField,
+ symbol,
+ value,
+ });
+ }
+ }
+ }
+
+ return Just(true);
+}
+
+Maybe<bool> FindPrivateMembersFromReceiver(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<String> desc,
+ MessageTemplate not_found_message,
+ PrivateMember* result) {
+ std::vector<PrivateMember> results;
+ MAYBE_RETURN(
+ CollectPrivateMembersFromReceiver(isolate, receiver, desc, &results),
+ Nothing<bool>());
+
+ if (results.size() == 0) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate, NewError(not_found_message, desc),
+ Nothing<bool>());
+ } else if (results.size() > 1) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewError(MessageTemplate::kConflictingPrivateName, desc),
+ Nothing<bool>());
+ }
+
+ *result = results[0];
+ return Just(true);
+}
+} // namespace
+
+MaybeHandle<Object> Runtime::GetPrivateMember(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<String> desc) {
+ PrivateMember result;
+ MAYBE_RETURN_NULL(FindPrivateMembersFromReceiver(
+ isolate, receiver, desc, MessageTemplate::kInvalidPrivateMemberRead,
+ &result));
+
+ switch (result.type) {
+ case PrivateMemberType::kPrivateField:
+ case PrivateMemberType::kPrivateMethod: {
+ return result.value;
+ }
+ case PrivateMemberType::kPrivateAccessor: {
+ // The accessors are collected from the contexts, so there is no need to
+ // perform brand checks.
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(result.value);
+ if (pair->getter().IsNull()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewError(MessageTemplate::kInvalidPrivateGetterAccess, desc),
+ Object);
+ }
+ DCHECK(pair->getter().IsJSFunction());
+ Handle<JSFunction> getter(JSFunction::cast(pair->getter()), isolate);
+ return Execution::Call(isolate, getter, receiver, 0, nullptr);
+ }
+ }
+}
+
+MaybeHandle<Object> Runtime::SetPrivateMember(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<String> desc,
+ Handle<Object> value) {
+ PrivateMember result;
+ MAYBE_RETURN_NULL(FindPrivateMembersFromReceiver(
+ isolate, receiver, desc, MessageTemplate::kInvalidPrivateMemberRead,
+ &result));
+
+ switch (result.type) {
+ case PrivateMemberType::kPrivateField: {
+ Handle<Symbol> symbol =
+ Handle<Symbol>::cast(result.brand_or_field_symbol);
+ return Object::SetProperty(isolate, receiver, symbol, value,
+ StoreOrigin::kMaybeKeyed);
+ }
+ case PrivateMemberType::kPrivateMethod: {
+ THROW_NEW_ERROR(
+ isolate, NewError(MessageTemplate::kInvalidPrivateMethodWrite, desc),
+ Object);
+ }
+ case PrivateMemberType::kPrivateAccessor: {
+ // The accessors are collected from the contexts, so there is no need to
+ // perform brand checks.
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(result.value);
+ if (pair->setter().IsNull()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewError(MessageTemplate::kInvalidPrivateSetterAccess, desc),
+ Object);
+ }
+ DCHECK(pair->setter().IsJSFunction());
+ Handle<Object> argv[] = {value};
+ Handle<JSFunction> setter(JSFunction::cast(pair->setter()), isolate);
+ return Execution::Call(isolate, setter, receiver, arraysize(argv), argv);
+ }
+ }
+}
+
+RUNTIME_FUNCTION(Runtime_GetPrivateMember) {
+ HandleScope scope(isolate);
+ // TODO(chromium:1381806) support specifying scopes, or selecting the right
+ // one from the conflicting names.
+ DCHECK_EQ(args.length(), 2);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> desc = args.at<String>(1);
+ if (receiver->IsNullOrUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNonObjectPrivateNameAccess,
+ desc, receiver));
+ }
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Runtime::GetPrivateMember(
+ isolate, Handle<JSReceiver>::cast(receiver), desc));
+}
+
+RUNTIME_FUNCTION(Runtime_SetPrivateMember) {
+ HandleScope scope(isolate);
+ // TODO(chromium:1381806) support specifying scopes, or selecting the right
+ // one from the conflicting names.
+ DCHECK_EQ(args.length(), 3);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> desc = args.at<String>(1);
+ if (receiver->IsNullOrUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNonObjectPrivateNameAccess,
+ desc, receiver));
+ }
+ Handle<Object> value = args.at<Object>(2);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Runtime::SetPrivateMember(
+ isolate, Handle<JSReceiver>::cast(receiver), desc, value));
+}
+
RUNTIME_FUNCTION(Runtime_LoadPrivateSetter) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 2fb47682a8..52cc7f653a 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -38,13 +38,14 @@ uint32_t GetArgcForReplaceCallable(uint32_t num_captures,
bool has_named_captures) {
const uint32_t kAdditionalArgsWithoutNamedCaptures = 2;
const uint32_t kAdditionalArgsWithNamedCaptures = 3;
- if (num_captures > Code::kMaxArguments) return -1;
+ if (num_captures > InstructionStream::kMaxArguments) return -1;
uint32_t argc = has_named_captures
? num_captures + kAdditionalArgsWithNamedCaptures
: num_captures + kAdditionalArgsWithoutNamedCaptures;
- static_assert(Code::kMaxArguments < std::numeric_limits<uint32_t>::max() -
- kAdditionalArgsWithNamedCaptures);
- return (argc > Code::kMaxArguments) ? -1 : argc;
+ static_assert(InstructionStream::kMaxArguments <
+ std::numeric_limits<uint32_t>::max() -
+ kAdditionalArgsWithNamedCaptures);
+ return (argc > InstructionStream::kMaxArguments) ? -1 : argc;
}
// Looks up the capture of the given name. Returns the (1-based) numbered
@@ -1166,8 +1167,7 @@ Handle<JSObject> ConstructNamedCaptureGroupsObject(
template <bool has_capture>
static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<JSRegExp> regexp,
- Handle<RegExpMatchInfo> last_match_array,
- Handle<JSArray> result_array) {
+ Handle<RegExpMatchInfo> last_match_array) {
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
DCHECK_NE(has_capture, regexp->capture_count() == 0);
DCHECK(subject->IsFlat());
@@ -1197,9 +1197,10 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
if (cached_answer.IsFixedArray()) {
int capture_registers = JSRegExp::RegistersForCaptureCount(capture_count);
- int32_t* last_match = NewArray<int32_t>(capture_registers);
+ std::unique_ptr<int32_t[]> last_match(new int32_t[capture_registers]);
+ int32_t* raw_last_match = last_match.get();
for (int i = 0; i < capture_registers; i++) {
- last_match[i] = Smi::ToInt(last_match_cache.get(i));
+ raw_last_match[i] = Smi::ToInt(last_match_cache.get(i));
}
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(cached_answer), isolate);
@@ -1207,26 +1208,16 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<FixedArray> copied_fixed_array =
isolate->factory()->CopyFixedArrayWithMap(
cached_fixed_array, isolate->factory()->fixed_array_map());
- JSArray::SetContent(result_array, copied_fixed_array);
RegExp::SetLastMatchInfo(isolate, last_match_array, subject,
- capture_count, last_match);
- DeleteArray(last_match);
- return *result_array;
+ capture_count, raw_last_match);
+ return *copied_fixed_array;
}
}
RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
- // Ensured in Runtime_RegExpExecMultiple.
- DCHECK(result_array->HasObjectElements());
- Handle<FixedArray> result_elements(FixedArray::cast(result_array->elements()),
- isolate);
- if (result_elements->length() < 16) {
- result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
- }
-
- FixedArrayBuilder builder(result_elements);
+ FixedArrayBuilder builder = FixedArrayBuilder::Lazy(isolate);
// Position to search from.
int match_start = -1;
@@ -1338,7 +1329,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
isolate, subject, handle(regexp->data(), isolate), copied_fixed_array,
last_match_cache, RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
}
- return *builder.ToJSArray(result_array);
+ return *builder.array();
} else {
return ReadOnlyRoots(isolate).null_value(); // No matches at all.
}
@@ -1465,26 +1456,24 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
// This is only called for StringReplaceGlobalRegExpWithFunction.
RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(3, args.length());
Handle<JSRegExp> regexp = args.at<JSRegExp>(0);
Handle<String> subject = args.at<String>(1);
Handle<RegExpMatchInfo> last_match_info = args.at<RegExpMatchInfo>(2);
- Handle<JSArray> result_array = args.at<JSArray>(3);
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
- CHECK(result_array->HasObjectElements());
subject = String::Flatten(isolate, subject);
CHECK(regexp->flags() & JSRegExp::kGlobal);
Object result;
if (regexp->capture_count() == 0) {
- result = SearchRegExpMultiple<false>(isolate, subject, regexp,
- last_match_info, result_array);
+ result =
+ SearchRegExpMultiple<false>(isolate, subject, regexp, last_match_info);
} else {
- result = SearchRegExpMultiple<true>(isolate, subject, regexp,
- last_match_info, result_array);
+ result =
+ SearchRegExpMultiple<true>(isolate, subject, regexp, last_match_info);
}
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
return result;
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 7d84df19a1..6be19892e9 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -347,7 +347,7 @@ namespace {
std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
int* total_argc) {
// Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
std::vector<SharedFunctionInfo> functions;
frame->GetFunctions(&functions);
diff --git a/deps/v8/src/runtime/runtime-shadow-realm.cc b/deps/v8/src/runtime/runtime-shadow-realm.cc
index 7db79cc409..9a4174563a 100644
--- a/deps/v8/src/runtime/runtime-shadow-realm.cc
+++ b/deps/v8/src/runtime/runtime-shadow-realm.cc
@@ -40,5 +40,17 @@ RUNTIME_FUNCTION(Runtime_ShadowRealmImportValue) {
return *inner_capability;
}
+RUNTIME_FUNCTION(Runtime_ShadowRealmThrow) {
+ DCHECK_EQ(2, args.length());
+ HandleScope scope(isolate);
+ int message_id_smi = args.smi_value_at(0);
+ Handle<Object> value = args.at(1);
+
+ MessageTemplate message_id = MessageTemplateFromInt(message_id_smi);
+
+ Handle<String> string = Object::NoSideEffectsToString(isolate, value);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message_id, string));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 1f2da1cd19..ac3f8da1b4 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -10,6 +10,7 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/strings/string-builder-inl.h"
+#include "src/strings/unicode-inl.h"
#if V8_ENABLE_WEBASSEMBLY
// TODO(chromium:1236668): Drop this when the "SaveAndClearThreadInWasmFlag"
@@ -228,41 +229,61 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
return Smi::FromInt(subject->Get(i));
}
+RUNTIME_FUNCTION(Runtime_StringCodePointAt) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
+
+ Handle<String> subject = args.at<String>(0);
+ uint32_t i = NumberToUint32(args[1]);
+
+ // Flatten the string. If someone wants to get a char at an index
+ // in a cons string, it is likely that more indices will be
+ // accessed.
+ subject = String::Flatten(isolate, subject);
+
+ if (i >= static_cast<uint32_t>(subject->length())) {
+ return ReadOnlyRoots(isolate).nan_value();
+ }
+
+ int first_code_point = subject->Get(i);
+ if ((first_code_point & 0xFC00) != 0xD800) {
+ return Smi::FromInt(first_code_point);
+ }
+
+ if (i + 1 >= static_cast<uint32_t>(subject->length())) {
+ return Smi::FromInt(first_code_point);
+ }
+
+ int second_code_point = subject->Get(i + 1);
+ if ((second_code_point & 0xFC00) != 0xDC00) {
+ return Smi::FromInt(first_code_point);
+ }
+
+ int surrogate_offset = 0x10000 - (0xD800 << 10) - 0xDC00;
+ return Smi::FromInt((first_code_point << 10) +
+ (second_code_point + surrogate_offset));
+}
+
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<JSArray> array = args.at<JSArray>(0);
- int32_t array_length;
- if (!args[1].ToInt32(&array_length)) {
- THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
- }
- Handle<String> special = args.at<String>(2);
+ Handle<FixedArray> array = args.at<FixedArray>(0);
+
+ int array_length = args.smi_value_at(1);
- size_t actual_array_length = 0;
- CHECK(TryNumberToSize(array->length(), &actual_array_length));
- CHECK_GE(array_length, 0);
- CHECK(static_cast<size_t>(array_length) <= actual_array_length);
+ Handle<String> special = args.at<String>(2);
// This assumption is used by the slice encoding in one or two smis.
DCHECK_GE(Smi::kMaxValue, String::kMaxLength);
- CHECK(array->HasFastElements());
- JSObject::EnsureCanContainHeapObjectElements(array);
-
int special_length = special->length();
- if (!array->HasObjectElements()) {
- return isolate->Throw(ReadOnlyRoots(isolate).illegal_argument_string());
- }
int length;
bool one_byte = special->IsOneByteRepresentation();
{
DisallowGarbageCollection no_gc;
- FixedArray fixed_array = FixedArray::cast(array->elements());
- if (fixed_array.length() < array_length) {
- array_length = fixed_array.length();
- }
+ FixedArray fixed_array = *array;
if (array_length == 0) {
return ReadOnlyRoots(isolate).empty_string();
@@ -286,8 +307,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, answer, isolate->factory()->NewRawOneByteString(length));
DisallowGarbageCollection no_gc;
- StringBuilderConcatHelper(*special, answer->GetChars(no_gc),
- FixedArray::cast(array->elements()),
+ StringBuilderConcatHelper(*special, answer->GetChars(no_gc), *array,
array_length);
return *answer;
} else {
@@ -295,8 +315,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, answer, isolate->factory()->NewRawTwoByteString(length));
DisallowGarbageCollection no_gc;
- StringBuilderConcatHelper(*special, answer->GetChars(no_gc),
- FixedArray::cast(array->elements()),
+ StringBuilderConcatHelper(*special, answer->GetChars(no_gc), *array,
array_length);
return *answer;
}
@@ -409,6 +428,17 @@ RUNTIME_FUNCTION(Runtime_StringEqual) {
return isolate->heap()->ToBoolean(String::Equals(isolate, x, y));
}
+RUNTIME_FUNCTION(Runtime_StringCompare) {
+ CLEAR_THREAD_IN_WASM_SCOPE;
+ DCHECK_EQ(2, args.length());
+ HandleScope scope(isolate);
+ Handle<String> lhs(String::cast(args[0]), isolate);
+ Handle<String> rhs(String::cast(args[1]), isolate);
+ ComparisonResult result = String::Compare(isolate, lhs, rhs);
+ DCHECK_NE(result, ComparisonResult::kUndefined);
+ return Smi::FromInt(static_cast<int>(result));
+}
+
RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -471,5 +501,32 @@ RUNTIME_FUNCTION(Runtime_StringEscapeQuotes) {
return *builder.ToString().ToHandleChecked();
}
+RUNTIME_FUNCTION(Runtime_StringIsWellFormed) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<String> string = args.at<String>(0);
+ return isolate->heap()->ToBoolean(
+ String::IsWellFormedUnicode(isolate, string));
+}
+
+RUNTIME_FUNCTION(Runtime_StringToWellFormed) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<String> source = args.at<String>(0);
+ if (String::IsWellFormedUnicode(isolate, source)) return *source;
+ // String::IsWellFormedUnicode would have returned true above otherwise.
+ DCHECK(!String::IsOneByteRepresentationUnderneath(*source));
+ const int length = source->length();
+ Handle<SeqTwoByteString> dest =
+ isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
+ DisallowGarbageCollection no_gc;
+ String::FlatContent source_contents = source->GetFlatContent(no_gc);
+ DCHECK(source_contents.IsFlat());
+ const uint16_t* source_data = source_contents.ToUC16Vector().begin();
+ uint16_t* dest_data = dest->GetChars(no_gc);
+ unibrow::Utf16::ReplaceUnpairedSurrogates(source_data, dest_data, length);
+ return *dest;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test-wasm.cc b/deps/v8/src/runtime/runtime-test-wasm.cc
index f2a7c7ff42..1e4130903e 100644
--- a/deps/v8/src/runtime/runtime-test-wasm.cc
+++ b/deps/v8/src/runtime/runtime-test-wasm.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <cinttypes>
+
#include "include/v8-wasm.h"
#include "src/base/memory.h"
#include "src/base/platform/mutex.h"
@@ -131,7 +133,7 @@ int WasmStackSize(Isolate* isolate) {
// TODO(wasm): Fix this for mixed JS/Wasm stacks with both --trace and
// --trace-wasm.
int n = 0;
- for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
+ for (DebuggableStackFrameIterator it(isolate); !it.done(); it.Advance()) {
if (it.is_wasm()) n++;
}
return n;
@@ -146,7 +148,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
// Find the caller wasm frame.
wasm::WasmCodeRefScope wasm_code_ref_scope;
- StackTraceFrameIterator it(isolate);
+ DebuggableStackFrameIterator it(isolate);
DCHECK(!it.done());
DCHECK(it.is_wasm());
WasmFrame* frame = WasmFrame::cast(it.frame());
@@ -176,14 +178,14 @@ RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- auto value_addr_smi = Smi::cast(args[0]);
+ Smi return_addr_smi = Smi::cast(args[0]);
PrintIndentation(WasmStackSize(isolate));
PrintF("}");
// Find the caller wasm frame.
wasm::WasmCodeRefScope wasm_code_ref_scope;
- StackTraceFrameIterator it(isolate);
+ DebuggableStackFrameIterator it(isolate);
DCHECK(!it.done());
DCHECK(it.is_wasm());
WasmFrame* frame = WasmFrame::cast(it.frame());
@@ -192,26 +194,30 @@ RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
frame->wasm_instance().module()->functions[func_index].sig;
size_t num_returns = sig->return_count();
+ // If we have no returns, we should have passed {Smi::zero()}.
+ DCHECK_IMPLIES(num_returns == 0, return_addr_smi.IsZero());
if (num_returns == 1) {
wasm::ValueType return_type = sig->GetReturn(0);
switch (return_type.kind()) {
case wasm::kI32: {
- int32_t value = base::ReadUnalignedValue<int32_t>(value_addr_smi.ptr());
+ int32_t value =
+ base::ReadUnalignedValue<int32_t>(return_addr_smi.ptr());
PrintF(" -> %d\n", value);
break;
}
case wasm::kI64: {
- int64_t value = base::ReadUnalignedValue<int64_t>(value_addr_smi.ptr());
+ int64_t value =
+ base::ReadUnalignedValue<int64_t>(return_addr_smi.ptr());
PrintF(" -> %" PRId64 "\n", value);
break;
}
case wasm::kF32: {
- float value = base::ReadUnalignedValue<float>(value_addr_smi.ptr());
+ float value = base::ReadUnalignedValue<float>(return_addr_smi.ptr());
PrintF(" -> %f\n", value);
break;
}
case wasm::kF64: {
- double value = base::ReadUnalignedValue<double>(value_addr_smi.ptr());
+ double value = base::ReadUnalignedValue<double>(return_addr_smi.ptr());
PrintF(" -> %f\n", value);
break;
}
@@ -265,7 +271,7 @@ RUNTIME_FUNCTION(Runtime_IsWasmCode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
auto function = JSFunction::cast(args[0]);
- CodeT code = function.code();
+ Code code = function.code();
bool is_js_to_wasm = code.kind() == CodeKind::JS_TO_WASM_FUNCTION ||
(code.builtin_id() == Builtin::kGenericJSToWasmWrapper);
return isolate->heap()->ToBoolean(is_js_to_wasm);
@@ -407,7 +413,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
// Find the caller wasm frame.
wasm::WasmCodeRefScope wasm_code_ref_scope;
- StackTraceFrameIterator it(isolate);
+ DebuggableStackFrameIterator it(isolate);
DCHECK(!it.done());
DCHECK(it.is_wasm());
WasmFrame* frame = WasmFrame::cast(it.frame());
@@ -425,27 +431,47 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<WasmInstanceObject> instance = args.at<WasmInstanceObject>(0);
- int function_index = args.smi_value_at(1);
- wasm::TierUpNowForTesting(isolate, *instance, function_index);
+ DCHECK_EQ(1, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
+ Handle<WasmExportedFunction> exp_fun =
+ Handle<WasmExportedFunction>::cast(function);
+ WasmInstanceObject instance = exp_fun->instance();
+ int func_index = exp_fun->function_index();
+ wasm::TierUpNowForTesting(isolate, instance, func_index);
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmTierDown) {
+RUNTIME_FUNCTION(Runtime_WasmEnterDebugging) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- wasm::GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
+ wasm::GetWasmEngine()->EnterDebuggingForIsolate(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmTierUp) {
+RUNTIME_FUNCTION(Runtime_WasmLeaveDebugging) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- wasm::GetWasmEngine()->TierUpAllModulesPerIsolate(isolate);
+ wasm::GetWasmEngine()->LeaveDebuggingForIsolate(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_IsWasmDebugFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
+ Handle<WasmExportedFunction> exp_fun =
+ Handle<WasmExportedFunction>::cast(function);
+ wasm::NativeModule* native_module =
+ exp_fun->instance().module_object().native_module();
+ uint32_t func_index = exp_fun->function_index();
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmCode* code = native_module->GetCode(func_index);
+ return isolate->heap()->ToBoolean(code && code->is_liftoff() &&
+ code->for_debugging());
+}
+
RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -476,6 +502,19 @@ RUNTIME_FUNCTION(Runtime_IsTurboFanFunction) {
return isolate->heap()->ToBoolean(code && code->is_turbofan());
}
+RUNTIME_FUNCTION(Runtime_IsUncompiledWasmFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
+ Handle<WasmExportedFunction> exp_fun =
+ Handle<WasmExportedFunction>::cast(function);
+ wasm::NativeModule* native_module =
+ exp_fun->instance().module_object().native_module();
+ uint32_t func_index = exp_fun->function_index();
+ return isolate->heap()->ToBoolean(!native_module->HasCode(func_index));
+}
+
RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
DCHECK_EQ(1, args.length());
DisallowGarbageCollection no_gc;
@@ -485,5 +524,30 @@ RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
return ReadOnlyRoots(isolate).undefined_value();
}
+// This runtime function enables WebAssembly GC through an embedder
+// callback and thereby bypasses the value in v8_flags.
+RUNTIME_FUNCTION(Runtime_SetWasmGCEnabled) {
+ DCHECK_EQ(1, args.length());
+ bool enable = args.at(0)->BooleanValue(isolate);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ WasmGCEnabledCallback enabled = [](v8::Local<v8::Context>) { return true; };
+ WasmGCEnabledCallback disabled = [](v8::Local<v8::Context>) { return false; };
+ v8_isolate->SetWasmGCEnabledCallback(enable ? enabled : disabled);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_FlushWasmCode) {
+ wasm::GetWasmEngine()->FlushCode();
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmCompiledExportWrappersCount) {
+ int count = isolate->counters()
+ ->wasm_compiled_export_wrapper()
+ ->GetInternalPointer()
+ ->load();
+ return Smi::FromInt(count);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index e3685cd59d..79aa8ccb40 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -22,6 +22,7 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
#include "src/execution/tiering-manager.h"
+#include "src/flags/flags.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/pretenuring-handler-inl.h"
#include "src/ic/stub-cache.h"
@@ -36,7 +37,6 @@
#include "src/profiler/heap-snapshot-generator.h"
#include "src/regexp/regexp.h"
#include "src/snapshot/snapshot.h"
-#include "src/web-snapshot/web-snapshot.h"
#ifdef V8_ENABLE_MAGLEV
#include "src/maglev/maglev.h"
@@ -193,7 +193,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
Handle<JSFunction> function;
// Find the JavaScript function on the top of the stack.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return CrashUnlessFuzzing(isolate);
@@ -265,7 +265,8 @@ bool CanOptimizeFunction(CodeKind target_kind, Handle<JSFunction> function,
return CrashUnlessFuzzingReturnFalse(isolate);
}
- if (!v8_flags.turbofan) return false;
+ if (target_kind == CodeKind::TURBOFAN && !v8_flags.turbofan) return false;
+ if (target_kind == CodeKind::MAGLEV && !v8_flags.maglev) return false;
if (function->shared().optimization_disabled() &&
function->shared().disabled_optimization_reason() ==
@@ -325,11 +326,11 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
// function has.
if (!function->is_compiled()) {
DCHECK(function->shared().HasBytecodeArray());
- CodeT codet = *BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
+ Code code = *BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
if (function->shared().HasBaselineCode()) {
- codet = function->shared().baseline_code(kAcquireLoad);
+ code = function->shared().baseline_code(kAcquireLoad);
}
- function->set_code(codet);
+ function->set_code(code);
}
TraceManualRecompile(*function, target_kind, concurrency_mode);
@@ -390,7 +391,7 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
return CrashUnlessFuzzing(isolate);
}
- return *function;
+ return ReadOnlyRoots(isolate).undefined_value();
}
// TODO(v8:7700): Remove this function once we no longer need it to measure
@@ -403,10 +404,10 @@ RUNTIME_FUNCTION(Runtime_BenchMaglev) {
Handle<JSFunction> function = args.at<JSFunction>(0);
int count = args.smi_value_at(1);
- Handle<CodeT> codet;
+ Handle<Code> code;
base::ElapsedTimer timer;
timer.Start();
- codet = Maglev::Compile(isolate, function).ToHandleChecked();
+ code = Maglev::Compile(isolate, function).ToHandleChecked();
for (int i = 1; i < count; ++i) {
HandleScope handle_scope(isolate);
Maglev::Compile(isolate, function);
@@ -414,7 +415,7 @@ RUNTIME_FUNCTION(Runtime_BenchMaglev) {
PrintF("Maglev compile time: %g ms!\n",
timer.Elapsed().InMillisecondsF() / count);
- function->set_code(*codet);
+ function->set_code(*code);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -471,7 +472,7 @@ RUNTIME_FUNCTION(Runtime_IsTurbofanEnabled) {
RUNTIME_FUNCTION(Runtime_CurrentFrameIsTurbofan) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 0);
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
return isolate->heap()->ToBoolean(it.frame()->is_turbofan());
}
@@ -490,7 +491,10 @@ RUNTIME_FUNCTION(Runtime_OptimizeMaglevOnNextCall) {
// TODO(jgruber): Rename to OptimizeTurbofanOnNextCall.
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
- return OptimizeFunctionOnNextCall(args, isolate, CodeKind::TURBOFAN);
+ return OptimizeFunctionOnNextCall(
+ args, isolate,
+ v8_flags.optimize_on_next_call_optimizes_to_maglev ? CodeKind::MAGLEV
+ : CodeKind::TURBOFAN);
}
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
@@ -554,10 +558,9 @@ void FinalizeOptimization(Isolate* isolate) {
#endif // V8_ENABLE_MAGLEV
}
-BytecodeOffset OffsetOfNextJumpLoop(Isolate* isolate, UnoptimizedFrame* frame) {
- Handle<BytecodeArray> bytecode_array(frame->GetBytecodeArray(), isolate);
- const int current_offset = frame->GetBytecodeOffset();
-
+BytecodeOffset OffsetOfNextJumpLoop(Isolate* isolate,
+ Handle<BytecodeArray> bytecode_array,
+ int current_offset) {
interpreter::BytecodeArrayIterator it(bytecode_array, current_offset);
// First, look for a loop that contains the current bytecode offset.
@@ -600,7 +603,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
}
// Find the JavaScript function on the top of the stack.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
while (!it.done() && stack_depth--) it.Advance();
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return CrashUnlessFuzzing(isolate);
@@ -624,14 +627,15 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
*function);
}
- if (function->HasAvailableOptimizedCode()) {
+ if (function->HasAvailableOptimizedCode() &&
+ !function->code().is_maglevved()) {
DCHECK(function->HasAttachedOptimizedCode() ||
function->ChecksTieringState());
// If function is already optimized, return.
return ReadOnlyRoots(isolate).undefined_value();
}
- if (!it.frame()->is_unoptimized()) {
+ if (!it.frame()->is_unoptimized() && !it.frame()->is_maglev()) {
// Nothing to be done.
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -652,8 +656,22 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// see the cached OSR code with a mismatched offset, and trigger
// non-concurrent OSR compilation and installation.
if (isolate->concurrent_recompilation_enabled() && v8_flags.concurrent_osr) {
- const BytecodeOffset osr_offset =
- OffsetOfNextJumpLoop(isolate, UnoptimizedFrame::cast(it.frame()));
+ BytecodeOffset osr_offset = BytecodeOffset::None();
+ if (it.frame()->is_unoptimized()) {
+ UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
+ Handle<BytecodeArray> bytecode_array(frame->GetBytecodeArray(), isolate);
+ const int current_offset = frame->GetBytecodeOffset();
+ osr_offset =
+ OffsetOfNextJumpLoop(isolate, bytecode_array, current_offset);
+ } else {
+ MaglevFrame* frame = MaglevFrame::cast(it.frame());
+ Handle<BytecodeArray> bytecode_array(
+ frame->function().shared().GetBytecodeArray(isolate), isolate);
+ const int current_offset = frame->GetBytecodeOffsetForOSR().ToInt();
+ osr_offset =
+ OffsetOfNextJumpLoop(isolate, bytecode_array, current_offset);
+ }
+
if (osr_offset.IsNone()) {
// The loop may have been elided by bytecode generation (e.g. for
// patterns such as `do { ... } while (false);`.
@@ -670,7 +688,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
USE(unused_result);
// Finalize again to finish the queued job. The next call into
- // Runtime::kCompileOptimizedOSR will pick up the cached Code object.
+ // Runtime::kCompileOptimizedOSR will pick up the cached InstructionStream
+ // object.
FinalizeOptimization(isolate);
}
@@ -682,7 +701,7 @@ RUNTIME_FUNCTION(Runtime_BaselineOsr) {
DCHECK_EQ(0, args.length());
// Find the JavaScript function on the top of the stack.
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
Handle<JSFunction> function = handle(it.frame()->function(), isolate);
if (function.is_null()) return CrashUnlessFuzzing(isolate);
if (!v8_flags.sparkplug || !v8_flags.use_osr) {
@@ -723,7 +742,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
isolate->lazy_compile_dispatcher()->FinishNow(sfi);
}
- sfi->DisableOptimization(BailoutReason::kNeverOptimize);
+ sfi->DisableOptimization(isolate, BailoutReason::kNeverOptimize);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -732,9 +751,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
DCHECK_EQ(args.length(), 1);
int status = 0;
- if (v8_flags.lite_mode || v8_flags.jitless) {
- // Both jitless and lite modes cannot optimize. Unit tests should handle
- // these the same way. In the future, the two flags may become synonyms.
+ if (v8_flags.lite_mode || v8_flags.jitless || !V8_ENABLE_TURBOFAN_BOOL) {
+ // These modes cannot optimize. Unit tests should handle these the same
+ // way.
status |= static_cast<int>(OptimizationStatus::kLiteMode);
}
if (!isolate->use_optimizer()) {
@@ -746,6 +765,10 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (v8_flags.deopt_every_n_times) {
status |= static_cast<int>(OptimizationStatus::kMaybeDeopted);
}
+ if (v8_flags.optimize_on_next_call_optimizes_to_maglev) {
+ status |= static_cast<int>(
+ OptimizationStatus::kOptimizeOnNextCallOptimizesToMaglev);
+ }
Handle<Object> function_object = args.at(0);
if (function_object->IsUndefined()) return Smi::FromInt(status);
@@ -773,7 +796,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
}
if (function->HasAttachedOptimizedCode()) {
- CodeT code = function->code();
+ Code code = function->code();
if (code.marked_for_deoptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForDeoptimization);
} else {
@@ -798,7 +821,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
// Additionally, detect activations of this frame on the stack, and report the
// status of the topmost frame.
JavaScriptFrame* frame = nullptr;
- JavaScriptFrameIterator it(isolate);
+ JavaScriptStackFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->function() == *function) {
frame = it.frame();
@@ -816,6 +839,8 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
static_cast<int>(OptimizationStatus::kTopmostFrameIsInterpreted);
} else if (frame->is_baseline()) {
status |= static_cast<int>(OptimizationStatus::kTopmostFrameIsBaseline);
+ } else if (frame->is_maglev()) {
+ status |= static_cast<int>(OptimizationStatus::kTopmostFrameIsMaglev);
}
}
@@ -972,7 +997,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
- DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
+ DCHECK_IMPLIES(!heap->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
while (space_remaining > 0) {
@@ -1195,6 +1220,7 @@ RUNTIME_FUNCTION(Runtime_GlobalPrint) {
uint16_t character = stream.GetNext();
PrintF(output_stream, "%c", character);
}
+ fflush(output_stream);
return string;
}
@@ -1280,7 +1306,7 @@ namespace {
int StackSize(Isolate* isolate) {
int n = 0;
- for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
+ for (JavaScriptStackFrameIterator it(isolate); !it.done(); it.Advance()) n++;
return n;
}
@@ -1365,11 +1391,11 @@ RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
}
- PretenturingHandler* pretenuring_handler = heap->pretenuring_handler();
+ PretenuringHandler* pretenuring_handler = heap->pretenuring_handler();
AllocationMemento memento =
pretenuring_handler
- ->FindAllocationMemento<PretenturingHandler::kForRuntime>(
- object.map(), object);
+ ->FindAllocationMemento<PretenuringHandler::kForRuntime>(object.map(),
+ object);
if (memento.is_null())
return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
AllocationSite site = memento.GetAllocationSite();
@@ -1418,7 +1444,7 @@ RUNTIME_FUNCTION(Runtime_RegexpHasNativeCode) {
bool is_latin1 = Oddball::cast(args[1]).ToBool(isolate);
bool result;
if (regexp.type_tag() == JSRegExp::IRREGEXP) {
- result = regexp.code(is_latin1).IsCodeT();
+ result = regexp.code(is_latin1).IsCode();
} else {
result = false;
}
@@ -1642,7 +1668,8 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
void SetterCallbackEvent(Handle<Name> name, Address entry_point) final {}
void RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) final {}
- void CodeMoveEvent(AbstractCode from, AbstractCode to) final {}
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) final {}
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) final {}
void SharedFunctionInfoMoveEvent(Address from, Address to) final {}
void NativeContextMoveEvent(Address from, Address to) final {}
void CodeMovingGCEvent() final {}
diff --git a/deps/v8/src/runtime/runtime-trace.cc b/deps/v8/src/runtime/runtime-trace.cc
index 7d82f9d538..07641b3d4a 100644
--- a/deps/v8/src/runtime/runtime-trace.cc
+++ b/deps/v8/src/runtime/runtime-trace.cc
@@ -69,7 +69,8 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
// Print accumulator.
if ((is_input && interpreter::Bytecodes::ReadsAccumulator(bytecode)) ||
- (!is_input && interpreter::Bytecodes::WritesAccumulator(bytecode))) {
+ (!is_input &&
+ interpreter::Bytecodes::WritesOrClobbersAccumulator(bytecode))) {
os << " [ " << kAccumulator << kArrowDirection;
accumulator->ShortPrint(os);
os << " ]" << std::endl;
@@ -109,7 +110,7 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
return ReadOnlyRoots(isolate).undefined_value();
}
- JavaScriptFrameIterator frame_iterator(isolate);
+ JavaScriptStackFrameIterator frame_iterator(isolate);
UnoptimizedFrame* frame =
reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame());
@@ -159,7 +160,7 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeExit) {
return ReadOnlyRoots(isolate).undefined_value();
}
- JavaScriptFrameIterator frame_iterator(isolate);
+ JavaScriptStackFrameIterator frame_iterator(isolate);
UnoptimizedFrame* frame =
reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame());
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index bd966debf5..1b8526c690 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -36,7 +36,7 @@ static inline ObjectPair MakePair(Object x, Object y) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
return x.ptr() | (static_cast<ObjectPair>(y.ptr()) << 32);
#elif defined(V8_TARGET_BIG_ENDIAN)
- return y->ptr() | (static_cast<ObjectPair>(x->ptr()) << 32);
+ return y.ptr() | (static_cast<ObjectPair>(x.ptr()) << 32);
#else
#error Unknown endianness
#endif
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 94cb6b9f3d..8a772a3def 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -87,15 +87,18 @@ Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
class V8_NODISCARD ClearThreadInWasmScope {
public:
- explicit ClearThreadInWasmScope(Isolate* isolate) : isolate_(isolate) {
- DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
- trap_handler::IsThreadInWasm());
- trap_handler::ClearThreadInWasm();
+ explicit ClearThreadInWasmScope(Isolate* isolate)
+ : isolate_(isolate), is_thread_in_wasm_(trap_handler::IsThreadInWasm()) {
+ // In some cases we call this from Wasm code inlined into JavaScript
+ // so the flag might not be set.
+ if (is_thread_in_wasm_) {
+ trap_handler::ClearThreadInWasm();
+ }
}
~ClearThreadInWasmScope() {
DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
!trap_handler::IsThreadInWasm());
- if (!isolate_->has_pending_exception()) {
+ if (!isolate_->has_pending_exception() && is_thread_in_wasm_) {
trap_handler::SetThreadInWasm();
}
// Otherwise we only want to set the flag if the exception is caught in
@@ -104,6 +107,7 @@ class V8_NODISCARD ClearThreadInWasmScope {
private:
Isolate* isolate_;
+ const bool is_thread_in_wasm_;
};
Object ThrowWasmError(Isolate* isolate, MessageTemplate message,
@@ -121,33 +125,33 @@ Object ThrowWasmError(Isolate* isolate, MessageTemplate message,
// type; if the check succeeds, returns the object in its wasm representation;
// otherwise throws a type error.
RUNTIME_FUNCTION(Runtime_WasmJSToWasmObject) {
- // This code is called from wrappers, so the "thread is wasm" flag is not set.
- DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
- !trap_handler::IsThreadInWasm());
+ // TODO(manoskouk): Use {SaveAndClearThreadInWasmFlag} in runtime-internal.cc
+ // and runtime-strings.cc.
+ bool thread_in_wasm = trap_handler::IsThreadInWasm();
+ if (thread_in_wasm) trap_handler::ClearThreadInWasm();
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
// 'raw_instance' can be either a WasmInstanceObject or undefined.
- Object raw_instance = args[0];
- Handle<Object> value(args[1], isolate);
+ Handle<Object> value(args[0], isolate);
// Make sure ValueType fits properly in a Smi.
static_assert(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
- int raw_type = args.smi_value_at(2);
+ int raw_type = args.smi_value_at(1);
- const wasm::WasmModule* module =
- raw_instance.IsWasmInstanceObject()
- ? WasmInstanceObject::cast(raw_instance).module()
- : nullptr;
-
- wasm::ValueType type = wasm::ValueType::FromRawBitField(raw_type);
+ wasm::ValueType expected_canonical =
+ wasm::ValueType::FromRawBitField(raw_type);
const char* error_message;
Handle<Object> result;
- bool success = internal::wasm::JSToWasmObject(isolate, module, value, type,
- &error_message)
+ bool success = internal::wasm::JSToWasmObject(
+ isolate, value, expected_canonical, &error_message)
.ToHandle(&result);
- if (success) return *result;
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kWasmTrapJSTypeError));
+ Object ret = success ? *result
+ : isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kWasmTrapJSTypeError));
+ if (thread_in_wasm && !isolate->has_pending_exception()) {
+ trap_handler::SetThreadInWasm();
+ }
+ return ret;
}
RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
@@ -203,6 +207,15 @@ RUNTIME_FUNCTION(Runtime_ThrowBadSuspenderError) {
return ThrowWasmError(isolate, MessageTemplate::kWasmTrapBadSuspender);
}
+RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ MessageTemplate message_id = MessageTemplateFromInt(args.smi_value_at(0));
+ Handle<Object> arg(args[1], isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message_id, arg));
+}
+
RUNTIME_FUNCTION(Runtime_WasmThrow) {
ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
@@ -261,7 +274,6 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
RUNTIME_FUNCTION(Runtime_WasmAllocateFeedbackVector) {
ClearThreadInWasmScope wasm_flag(isolate);
- DCHECK(v8_flags.wasm_speculative_inlining);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(args[0]),
@@ -270,6 +282,7 @@ RUNTIME_FUNCTION(Runtime_WasmAllocateFeedbackVector) {
wasm::NativeModule** native_module_stack_slot =
reinterpret_cast<wasm::NativeModule**>(args.address_of_arg_at(2));
wasm::NativeModule* native_module = instance->module_object().native_module();
+ DCHECK(native_module->enabled_features().has_inlining());
// We have to save the native_module on the stack, in case the allocation
// triggers a GC and we need the module to scan LiftoffSetupFrame stack frame.
*native_module_stack_slot = native_module;
@@ -279,8 +292,11 @@ RUNTIME_FUNCTION(Runtime_WasmAllocateFeedbackVector) {
const wasm::WasmModule* module = native_module->module();
int func_index = declared_func_index + module->num_imported_functions;
- Handle<FixedArray> vector = isolate->factory()->NewFixedArrayWithZeroes(
- NumFeedbackSlots(module, func_index));
+ int num_slots = native_module->enabled_features().has_inlining()
+ ? NumFeedbackSlots(module, func_index)
+ : 0;
+ Handle<FixedArray> vector =
+ isolate->factory()->NewFixedArrayWithZeroes(num_slots);
DCHECK_EQ(instance->feedback_vectors().get(declared_func_index), Smi::zero());
instance->feedback_vectors().set(declared_func_index, *vector);
return *vector;
@@ -288,14 +304,14 @@ RUNTIME_FUNCTION(Runtime_WasmAllocateFeedbackVector) {
namespace {
void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
- int function_index, Handle<CodeT> wrapper_code) {
+ int function_index, Handle<Code> wrapper_code) {
Handle<WasmInternalFunction> internal =
WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
function_index)
.ToHandleChecked();
Handle<WasmExternalFunction> exported_function =
handle(WasmExternalFunction::cast(internal->external()), isolate);
- exported_function->set_code(*wrapper_code, kReleaseStore);
+ exported_function->set_code(*wrapper_code);
WasmExportedFunctionData function_data =
exported_function->shared().wasm_exported_function_data();
function_data.set_wrapper_code(*wrapper_code);
@@ -330,7 +346,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
return ReadOnlyRoots(isolate).undefined_value();
}
- Handle<CodeT> wrapper_code =
+ Handle<Code> wrapper_code =
wasm::JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
isolate, sig, canonical_sig_index, module);
@@ -361,7 +377,12 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
// We're reusing this interrupt mechanism to interrupt long-running loops.
StackLimitCheck check(isolate);
- DCHECK(!check.JsHasOverflowed());
+ // We don't need to handle stack overflows here, because the function that
+ // performed this runtime call did its own stack check at its beginning.
+ // However, we can't DCHECK(!check.JsHasOverflowed()) here, because the
+ // additional stack space used by the CEntryStub and this runtime function
+ // itself might have pushed us above the limit where a stack check would
+ // fail.
if (check.InterruptRequested()) {
Object result = isolate->stack_guard()->HandleInterrupts();
if (result.IsException()) return result;
@@ -788,19 +809,23 @@ RUNTIME_FUNCTION(Runtime_WasmArrayNewSegment) {
instance->data_segment_starts().get(segment_index) + offset;
return *isolate->factory()->NewWasmArrayFromMemory(length, rtt, source);
} else {
- const wasm::WasmElemSegment* elem_segment =
+ Handle<Object> elem_segment_raw =
+ handle(instance->element_segments().get(segment_index), isolate);
+ const wasm::WasmElemSegment* module_elem_segment =
&instance->module()->elem_segments[segment_index];
- if (!base::IsInBounds<size_t>(
- offset, length,
- instance->dropped_elem_segments().get(segment_index)
- ? 0
- : elem_segment->entries.size())) {
+ // If the segment is initialized in the instance, we have to get its length
+ // from there, as it might have been dropped. If the segment is
+ // uninitialized, we need to fetch its length from the module.
+ int segment_length =
+ elem_segment_raw->IsFixedArray()
+ ? Handle<FixedArray>::cast(elem_segment_raw)->length()
+ : module_elem_segment->element_count;
+ if (!base::IsInBounds<size_t>(offset, length, segment_length)) {
return ThrowWasmError(
isolate, MessageTemplate::kWasmTrapElementSegmentOutOfBounds);
}
-
Handle<Object> result = isolate->factory()->NewWasmArrayFromElementSegment(
- instance, elem_segment, offset, length, rtt);
+ instance, segment_index, offset, length, rtt);
if (result->IsSmi()) {
return ThrowWasmError(
isolate, static_cast<MessageTemplate>(result->ToSmi().value()));
@@ -828,6 +853,7 @@ void SyncStackLimit(Isolate* isolate) {
}
uintptr_t limit = reinterpret_cast<uintptr_t>(stack->jmpbuf()->stack_limit);
isolate->stack_guard()->SetStackLimit(limit);
+ isolate->RecordStackSwitchForScanning();
}
} // namespace
@@ -940,8 +966,16 @@ RUNTIME_FUNCTION(Runtime_WasmStringNewWtf8) {
const base::Vector<const uint8_t> bytes{instance.memory_start() + offset,
size};
- RETURN_RESULT_OR_TRAP(
- isolate->factory()->NewStringFromUtf8(bytes, utf8_variant));
+ MaybeHandle<v8::internal::String> result_string =
+ isolate->factory()->NewStringFromUtf8(bytes, utf8_variant);
+ if (utf8_variant == unibrow::Utf8Variant::kUtf8NoTrap) {
+ DCHECK(!isolate->has_pending_exception());
+ if (result_string.is_null()) {
+ return *isolate->factory()->wasm_null();
+ }
+ return *result_string.ToHandleChecked();
+ }
+ RETURN_RESULT_OR_TRAP(result_string);
}
RUNTIME_FUNCTION(Runtime_WasmStringNewWtf8Array) {
@@ -957,8 +991,16 @@ RUNTIME_FUNCTION(Runtime_WasmStringNewWtf8Array) {
static_cast<uint32_t>(unibrow::Utf8Variant::kLastUtf8Variant));
auto utf8_variant = static_cast<unibrow::Utf8Variant>(utf8_variant_value);
- RETURN_RESULT_OR_TRAP(
- isolate->factory()->NewStringFromUtf8(array, start, end, utf8_variant));
+ MaybeHandle<v8::internal::String> result_string =
+ isolate->factory()->NewStringFromUtf8(array, start, end, utf8_variant);
+ if (utf8_variant == unibrow::Utf8Variant::kUtf8NoTrap) {
+ DCHECK(!isolate->has_pending_exception());
+ if (result_string.is_null()) {
+ return *isolate->factory()->wasm_null();
+ }
+ return *result_string.ToHandleChecked();
+ }
+ RETURN_RESULT_OR_TRAP(result_string);
}
RUNTIME_FUNCTION(Runtime_WasmStringNewWtf16) {
@@ -1340,5 +1382,40 @@ RUNTIME_FUNCTION(Runtime_WasmStringViewWtf8Slice) {
.ToHandleChecked();
}
+RUNTIME_FUNCTION(Runtime_WasmStringFromCodePoint) {
+ ClearThreadInWasmScope flag_scope(isolate);
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+
+ uint32_t code_point = NumberToUint32(args[0]);
+ if (code_point <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(code_point);
+ }
+ if (code_point > 0x10FFFF) {
+ return ThrowWasmError(isolate, MessageTemplate::kInvalidCodePoint,
+ handle(args[0], isolate));
+ }
+
+ base::uc16 char_buffer[] = {
+ unibrow::Utf16::LeadSurrogate(code_point),
+ unibrow::Utf16::TrailSurrogate(code_point),
+ };
+ Handle<SeqTwoByteString> result =
+ isolate->factory()
+ ->NewRawTwoByteString(arraysize(char_buffer))
+ .ToHandleChecked();
+ DisallowGarbageCollection no_gc;
+ CopyChars(result->GetChars(no_gc), char_buffer, arraysize(char_buffer));
+ return *result;
+}
+
+RUNTIME_FUNCTION(Runtime_WasmStringHash) {
+ ClearThreadInWasmScope flag_scope(isolate);
+ DCHECK_EQ(1, args.length());
+ String string(String::cast(args[0]));
+ uint32_t hash = string.EnsureHash();
+ return Smi::FromInt(static_cast<int>(hash));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 4e81f21a61..f94bfb47ef 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -80,7 +80,8 @@ namespace internal {
F(BigIntToBoolean, 1, 1) \
F(BigIntToNumber, 1, 1) \
F(BigIntUnaryOp, 2, 1) \
- F(ToBigInt, 1, 1)
+ F(ToBigInt, 1, 1) \
+ F(ToBigIntConvertNumber, 1, 1)
#define FOR_EACH_INTRINSIC_CLASSES(F, I) \
F(DefineClass, -1 /* >= 3 */, 1) \
@@ -118,7 +119,8 @@ namespace internal {
F(NotifyDeoptimized, 0, 1) \
F(ObserveNode, 1, 1) \
F(ResolvePossiblyDirectEval, 6, 1) \
- F(VerifyType, 1, 1)
+ F(VerifyType, 1, 1) \
+ F(CheckTurboshaftTypeOf, 2, 1)
#define FOR_EACH_INTRINSIC_DATE(F, I) F(DateCurrentTime, 0, 1)
@@ -229,6 +231,7 @@ namespace internal {
F(BytecodeBudgetInterruptWithStackCheck_Ignition, 1, 1) \
F(BytecodeBudgetInterrupt_Sparkplug, 1, 1) \
F(BytecodeBudgetInterruptWithStackCheck_Sparkplug, 1, 1) \
+ F(BytecodeBudgetInterrupt_Maglev, 1, 1) \
F(BytecodeBudgetInterruptWithStackCheck_Maglev, 1, 1) \
F(NewError, 2, 1) \
F(NewForeign, 0, 1) \
@@ -314,7 +317,9 @@ namespace internal {
F(GetDerivedMap, 2, 1) \
F(GetFunctionName, 1, 1) \
F(GetOwnPropertyDescriptor, 2, 1) \
+ F(GetOwnPropertyDescriptorObject, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
+ F(GetPrivateMember, 2, 1) \
F(GetProperty, -1 /* [2, 3] */, 1) \
F(HasFastPackedElements, 1, 1) \
F(HasInPrototypeChain, 2, 1) \
@@ -341,11 +346,11 @@ namespace internal {
F(ObjectValuesSkipFastPath, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
F(SetDataProperties, 2, 1) \
+ F(SetFunctionName, 2, 1) \
F(SetKeyedProperty, 3, 1) \
F(DefineObjectOwnProperty, 3, 1) \
F(SetNamedProperty, 3, 1) \
F(SetOwnPropertyIgnoreAttributes, 4, 1) \
- F(DefineKeyedOwnPropertyInLiteral_Simple, 3, 1) \
F(ShrinkNameDictionary, 1, 1) \
F(ShrinkSwissNameDictionary, 1, 1) \
F(ToFastProperties, 1, 1) \
@@ -356,6 +361,7 @@ namespace internal {
F(ToObject, 1, 1) \
F(ToString, 1, 1) \
F(TryMigrateInstance, 1, 1) \
+ F(SetPrivateMember, 3, 1) \
F(SwissTableAdd, 4, 1) \
F(SwissTableAllocate, 1, 1) \
F(SwissTableDelete, 2, 1) \
@@ -411,7 +417,7 @@ namespace internal {
F(RegExpExecTreatMatchAtEndAsFailure, 4, 1) \
F(RegExpExperimentalOneshotExec, 4, 1) \
F(RegExpExperimentalOneshotExecTreatMatchAtEndAsFailure, 4, 1) \
- F(RegExpExecMultiple, 4, 1) \
+ F(RegExpExecMultiple, 3, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpReplaceRT, 3, 1) \
F(RegExpSplit, 3, 1) \
@@ -445,7 +451,8 @@ namespace internal {
#define FOR_EACH_INTRINSIC_SHADOW_REALM(F, I) \
F(ShadowRealmWrappedFunctionCreate, 2, 1) \
- F(ShadowRealmImportValue, 1, 1)
+ F(ShadowRealmImportValue, 1, 1) \
+ F(ShadowRealmThrow, 2, 1)
#define FOR_EACH_INTRINSIC_STRINGS(F, I) \
F(FlattenString, 1, 1) \
@@ -454,17 +461,21 @@ namespace internal {
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringCharCodeAt, 2, 1) \
+ F(StringCodePointAt, 2, 1) \
+ F(StringCompare, 2, 1) \
F(StringEqual, 2, 1) \
F(StringEscapeQuotes, 1, 1) \
F(StringGreaterThan, 2, 1) \
F(StringGreaterThanOrEqual, 2, 1) \
+ F(StringIsWellFormed, 1, 1) \
F(StringLastIndexOf, 2, 1) \
F(StringLessThan, 2, 1) \
F(StringLessThanOrEqual, 2, 1) \
F(StringMaxLength, 0, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringSubstring, 3, 1) \
- F(StringToArray, 2, 1)
+ F(StringToArray, 2, 1) \
+ F(StringToWellFormed, 1, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F, I) \
F(CreatePrivateNameSymbol, 1, 1) \
@@ -612,6 +623,7 @@ namespace internal {
F(WasmThrow, 2, 1) \
F(WasmReThrow, 1, 1) \
F(WasmThrowJSTypeError, 0, 1) \
+ F(WasmThrowTypeError, 2, 1) \
F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
@@ -619,7 +631,7 @@ namespace internal {
F(WasmTableCopy, 6, 1) \
F(WasmTableGrow, 3, 1) \
F(WasmTableFill, 5, 1) \
- F(WasmJSToWasmObject, 3, 1) \
+ F(WasmJSToWasmObject, 2, 1) \
F(WasmCompileLazy, 2, 1) \
F(WasmAllocateFeedbackVector, 3, 1) \
F(WasmCompileWrapper, 2, 1) \
@@ -642,11 +654,14 @@ namespace internal {
F(WasmStringEncodeWtf8Array, 4, 1) \
F(WasmStringAsWtf8, 1, 1) \
F(WasmStringViewWtf8Encode, 6, 1) \
- F(WasmStringViewWtf8Slice, 3, 1)
+ F(WasmStringViewWtf8Slice, 3, 1) \
+ F(WasmStringFromCodePoint, 1, 1) \
+ F(WasmStringHash, 1, 1)
#define FOR_EACH_INTRINSIC_WASM_TEST(F, I) \
F(DeserializeWasmModule, 2, 1) \
F(DisallowWasmCodegen, 1, 1) \
+ F(FlushWasmCode, 0, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
F(GetWasmExceptionTagId, 2, 1) \
F(GetWasmExceptionValues, 1, 1) \
@@ -654,17 +669,21 @@ namespace internal {
F(IsAsmWasmCode, 1, 1) \
F(IsLiftoffFunction, 1, 1) \
F(IsTurboFanFunction, 1, 1) \
+ F(IsWasmDebugFunction, 1, 1) \
+ F(IsUncompiledWasmFunction, 1, 1) \
F(IsThreadInWasm, 0, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
F(SerializeWasmModule, 1, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
+ F(SetWasmGCEnabled, 1, 1) \
+ F(WasmCompiledExportWrappersCount, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumCodeSpaces, 1, 1) \
- F(WasmTierDown, 0, 1) \
- F(WasmTierUp, 0, 1) \
- F(WasmTierUpFunction, 2, 1) \
+ F(WasmEnterDebugging, 0, 1) \
+ F(WasmLeaveDebugging, 0, 1) \
+ F(WasmTierUpFunction, 1, 1) \
F(WasmTraceEnter, 0, 1) \
F(WasmTraceExit, 1, 1) \
F(WasmTraceMemory, 1, 1)
@@ -868,6 +887,29 @@ class Runtime : public AllStatic {
Handle<Object> receiver = Handle<Object>(),
bool* is_found = nullptr);
+ // Look up for a private member with a name matching "desc" and return its
+ // value. "desc" should be a #-prefixed string, in the case of private fields,
+ // it should match the description of the private name symbol. Throw an error
+ // if the found private member is an accessor without a getter, or there is no
+ // matching private member, or there are more than one matching private member
+ // (which would be ambiguous). If the found private member is an accessor with
+ // a getter, the getter will be called to set the value.
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ GetPrivateMember(Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<String> desc);
+
+ // Look up for a private member with a name matching "desc" and set it to
+ // "value". "desc" should be a #-prefixed string, in the case of private
+ // fields, it should match the description of the private name symbol. Throw
+ // an error if the found private member is a private method, or an accessor
+ // without a setter, or there is no matching private member, or there are more
+ // than one matching private member (which would be ambiguous).
+ // If the found private member is an accessor with a setter, the setter will
+ // be called to set the value.
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ SetPrivateMember(Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<String> desc, Handle<Object> value);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> HasProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key);
@@ -944,6 +986,8 @@ enum class OptimizationStatus {
kTopmostFrameIsInterpreted = 1 << 16,
kTopmostFrameIsBaseline = 1 << 17,
kIsLazy = 1 << 18,
+ kTopmostFrameIsMaglev = 1 << 19,
+ kOptimizeOnNextCallOptimizesToMaglev = 1 << 20,
};
} // namespace internal
diff --git a/deps/v8/src/sandbox/external-pointer-inl.h b/deps/v8/src/sandbox/external-pointer-inl.h
index 7353f23db0..9ad36d66aa 100644
--- a/deps/v8/src/sandbox/external-pointer-inl.h
+++ b/deps/v8/src/sandbox/external-pointer-inl.h
@@ -34,51 +34,49 @@ template <ExternalPointerTag tag>
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
Address value) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- ExternalPointerTable& table = GetExternalPointerTable<tag>(isolate);
- ExternalPointerHandle handle =
- table.AllocateAndInitializeEntry(isolate, value, tag);
- // Use a Release_Store to ensure that the store of the pointer into the
- // table is not reordered after the store of the handle. Otherwise, other
- // threads may access an uninitialized table entry and crash.
- auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
- base::AsAtomic32::Release_Store(location, handle);
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+ static_assert(tag != kExternalPointerNullTag);
+ ExternalPointerTable& table = GetExternalPointerTable<tag>(isolate);
+ ExternalPointerHandle handle =
+ table.AllocateAndInitializeEntry(isolate, value, tag);
+ // Use a Release_Store to ensure that the store of the pointer into the
+ // table is not reordered after the store of the handle. Otherwise, other
+ // threads may access an uninitialized table entry and crash.
+ auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
+ base::AsAtomic32::Release_Store(location, handle);
+#else
WriteExternalPointerField<tag>(field_address, isolate, value);
+#endif // V8_ENABLE_SANDBOX
}
template <ExternalPointerTag tag>
V8_INLINE Address ReadExternalPointerField(Address field_address,
const Isolate* isolate) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- // Handles may be written to objects from other threads so the handle needs
- // to be loaded atomically. We assume that the load from the table cannot
- // be reordered before the load of the handle due to the data dependency
- // between the two loads and therefore use relaxed memory ordering.
- auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
- ExternalPointerHandle handle = base::AsAtomic32::Relaxed_Load(location);
- return GetExternalPointerTable<tag>(isolate).Get(handle, tag);
- }
-#endif // V8_ENABLE_SANDBOX
+ static_assert(tag != kExternalPointerNullTag);
+ // Handles may be written to objects from other threads so the handle needs
+ // to be loaded atomically. We assume that the load from the table cannot
+ // be reordered before the load of the handle due to the data dependency
+ // between the two loads and therefore use relaxed memory ordering.
+ auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
+ ExternalPointerHandle handle = base::AsAtomic32::Relaxed_Load(location);
+ return GetExternalPointerTable<tag>(isolate).Get(handle, tag);
+#else
return ReadMaybeUnalignedValue<Address>(field_address);
+#endif // V8_ENABLE_SANDBOX
}
template <ExternalPointerTag tag>
V8_INLINE void WriteExternalPointerField(Address field_address,
Isolate* isolate, Address value) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- // See comment above for why this is a Relaxed_Load.
- auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
- ExternalPointerHandle handle = base::AsAtomic32::Relaxed_Load(location);
- GetExternalPointerTable<tag>(isolate).Set(handle, value, tag);
- return;
- }
-#endif // V8_ENABLE_SANDBOX
+ static_assert(tag != kExternalPointerNullTag);
+ // See comment above for why this is a Relaxed_Load.
+ auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
+ ExternalPointerHandle handle = base::AsAtomic32::Relaxed_Load(location);
+ GetExternalPointerTable<tag>(isolate).Set(handle, value, tag);
+#else
WriteMaybeUnalignedValue<Address>(field_address, value);
+#endif // V8_ENABLE_SANDBOX
}
template <ExternalPointerTag tag>
@@ -86,23 +84,22 @@ V8_INLINE void WriteLazilyInitializedExternalPointerField(Address field_address,
Isolate* isolate,
Address value) {
#ifdef V8_ENABLE_SANDBOX
- if (IsSandboxedExternalPointerType(tag)) {
- // See comment above for why this uses a Relaxed_Load and Release_Store.
- ExternalPointerTable& table = GetExternalPointerTable<tag>(isolate);
- auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
- ExternalPointerHandle handle = base::AsAtomic32::Relaxed_Load(location);
- if (handle == kNullExternalPointerHandle) {
- // Field has not been initialized yet.
- ExternalPointerHandle handle =
- table.AllocateAndInitializeEntry(isolate, value, tag);
- base::AsAtomic32::Release_Store(location, handle);
- } else {
- table.Set(handle, value, tag);
- }
- return;
+ static_assert(tag != kExternalPointerNullTag);
+ // See comment above for why this uses a Relaxed_Load and Release_Store.
+ ExternalPointerTable& table = GetExternalPointerTable<tag>(isolate);
+ auto location = reinterpret_cast<ExternalPointerHandle*>(field_address);
+ ExternalPointerHandle handle = base::AsAtomic32::Relaxed_Load(location);
+ if (handle == kNullExternalPointerHandle) {
+ // Field has not been initialized yet.
+ ExternalPointerHandle handle =
+ table.AllocateAndInitializeEntry(isolate, value, tag);
+ base::AsAtomic32::Release_Store(location, handle);
+ } else {
+ table.Set(handle, value, tag);
}
-#endif // V8_ENABLE_SANDBOX
+#else
WriteMaybeUnalignedValue<Address>(field_address, value);
+#endif // V8_ENABLE_SANDBOX
}
} // namespace internal
diff --git a/deps/v8/src/sandbox/external-pointer-table-inl.h b/deps/v8/src/sandbox/external-pointer-table-inl.h
index 6b8694b2ba..8768840ba7 100644
--- a/deps/v8/src/sandbox/external-pointer-table-inl.h
+++ b/deps/v8/src/sandbox/external-pointer-table-inl.h
@@ -159,7 +159,9 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
// values would not be the same. This scenario is unproblematic though as the
// new entry will already be marked as alive as it has just been allocated.
DCHECK(handle == kNullExternalPointerHandle ||
- handle == *reinterpret_cast<ExternalPointerHandle*>(handle_location));
+ handle ==
+ base::AsAtomic32::Acquire_Load(
+ reinterpret_cast<ExternalPointerHandle*>(handle_location)));
uint32_t index = HandleToIndex(handle);
@@ -176,9 +178,12 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
if (new_handle) {
DCHECK_LT(HandleToIndex(new_handle), current_start_of_evacuation_area);
uint32_t index = HandleToIndex(new_handle);
- // No need for an atomic store as the entry will only be accessed during
- // sweeping.
- Store(index, Entry::MakeEvacuationEntry(handle_location));
+ // Even though the new entry will only be accessed during sweeping, this
+ // still needs to be an atomic write as another thread may attempt (and
+ // fail) to allocate the same table entry, thereby causing a read from
+ // this memory location. Without an atomic store here, TSan would then
+ // complain about a data race.
+ RelaxedStore(index, Entry::MakeEvacuationEntry(handle_location));
#ifdef DEBUG
// Mark the handle as visited in debug builds to detect double
// initialization of external pointer fields.
diff --git a/deps/v8/src/sandbox/sandbox.cc b/deps/v8/src/sandbox/sandbox.cc
index 8738690f9c..0999ba71f8 100644
--- a/deps/v8/src/sandbox/sandbox.cc
+++ b/deps/v8/src/sandbox/sandbox.cc
@@ -10,6 +10,7 @@
#include "src/base/cpu.h"
#include "src/base/emulated-virtual-address-subspace.h"
#include "src/base/lazy-instance.h"
+#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
#include "src/base/virtual-address-space-page-allocator.h"
#include "src/base/virtual-address-space.h"
@@ -17,19 +18,15 @@
#include "src/sandbox/sandboxed-pointer.h"
#include "src/utils/allocation.h"
-#if defined(V8_OS_WIN)
-#include <windows.h>
-// This has to come after windows.h.
-#include <versionhelpers.h> // For IsWindows8Point1OrGreater().
-#endif
-
namespace v8 {
namespace internal {
#ifdef V8_ENABLE_SANDBOX
-// Best-effort helper function to determine the size of the userspace virtual
-// address space. Used to determine appropriate sandbox size and placement.
+// Best-effort function to determine the approximate size of the virtual
+// address space that can be addressed by this process. Used to determine
+// appropriate sandbox size and placement.
+// The value returned by this function will always be a power of two.
static Address DetermineAddressSpaceLimit() {
#ifndef V8_TARGET_ARCH_64_BIT
#error Unsupported target architecture.
@@ -41,11 +38,11 @@ static Address DetermineAddressSpaceLimit() {
constexpr unsigned kMinVirtualAddressBits = 36;
constexpr unsigned kMaxVirtualAddressBits = 64;
- Address virtual_address_bits = kDefaultVirtualAddressBits;
+ unsigned hardware_virtual_address_bits = kDefaultVirtualAddressBits;
#if defined(V8_TARGET_ARCH_X64)
base::CPU cpu;
if (cpu.exposes_num_virtual_address_bits()) {
- virtual_address_bits = cpu.num_virtual_address_bits();
+ hardware_virtual_address_bits = cpu.num_virtual_address_bits();
}
#endif // V8_TARGET_ARCH_X64
@@ -54,36 +51,39 @@ static Address DetermineAddressSpaceLimit() {
// userspace and kernel each) as that appears to be the most common
// configuration and there seems to be no easy way to retrieve the actual
// number of virtual address bits from the CPU in userspace.
- virtual_address_bits = 40;
+ hardware_virtual_address_bits = 40;
#endif
+ // Assume virtual address space is split 50/50 between userspace and kernel.
+ hardware_virtual_address_bits -= 1;
+
+ // Check if there is a software-imposed limits on the size of the address
+ // space. For example, older Windows versions limit the address space to 8TB:
+ // https://learn.microsoft.com/en-us/windows/win32/memory/memory-limits-for-windows-releases).
+ Address software_limit = base::SysInfo::AddressSpaceEnd();
+ // Compute the next power of two that is larger or equal to the limit.
+ unsigned software_virtual_address_bits =
+ 64 - base::bits::CountLeadingZeros(software_limit - 1);
+
+ // The available address space is the smaller of the two limits.
+ unsigned virtual_address_bits =
+ std::min(hardware_virtual_address_bits, software_virtual_address_bits);
+
// Guard against nonsensical values.
if (virtual_address_bits < kMinVirtualAddressBits ||
virtual_address_bits > kMaxVirtualAddressBits) {
virtual_address_bits = kDefaultVirtualAddressBits;
}
- // Assume virtual address space is split 50/50 between userspace and kernel.
- Address userspace_virtual_address_bits = virtual_address_bits - 1;
- Address address_space_limit = 1ULL << userspace_virtual_address_bits;
-
-#if defined(V8_OS_WIN_X64)
- if (!IsWindows8Point1OrGreater()) {
- // On Windows pre 8.1 userspace is limited to 8TB on X64. See
- // https://docs.microsoft.com/en-us/windows/win32/memory/memory-limits-for-windows-releases
- address_space_limit = 8ULL * TB;
- }
-#endif // V8_OS_WIN_X64
-
- return address_space_limit;
+ return 1ULL << virtual_address_bits;
}
void Sandbox::Initialize(v8::VirtualAddressSpace* vas) {
- // Take the number of virtual address bits into account when determining the
- // size of the address space reservation backing the sandbox. For example, if
- // there are only 40 bits available, split evenly between userspace and
- // kernel, then userspace can only address 512GB and so we use a quarter of
- // that, 128GB, as maximum reservation size.
+ // Take the size of the virtual address space into account when determining
+ // the size of the address space reservation backing the sandbox. For
+ // example, if we only have a 40-bit address space, split evenly between
+ // userspace and kernel, then userspace can only address 512GB and so we use
+ // a quarter of that, 128GB, as maximum reservation size.
Address address_space_limit = DetermineAddressSpaceLimit();
// Note: this is technically the maximum reservation size excluding the guard
// regions (which are not created for partially-reserved sandboxes).
@@ -93,63 +93,50 @@ void Sandbox::Initialize(v8::VirtualAddressSpace* vas) {
// otherwise wouldn't always be able to allocate objects inside of it.
CHECK_LT(kSandboxSize, address_space_limit);
-#if defined(V8_OS_WIN)
- if (!IsWindows8Point1OrGreater()) {
- // On Windows pre 8.1, reserving virtual memory is an expensive operation,
- // apparently because the OS already charges for the memory required for
- // all page table entries. For example, a 1TB reservation increases private
- // memory usage by 2GB. As such, it is not possible to create a proper
- // sandbox there and so a partially reserved sandbox is created which
- // doesn't reserve most of the virtual memory, and so doesn't incur the
- // cost, but also doesn't provide the desired security benefits.
- max_reservation_size = kSandboxMinimumReservationSize;
- }
-#endif // V8_OS_WIN
-
if (!vas->CanAllocateSubspaces()) {
- // If we cannot create virtual memory subspaces, we also need to fall back
- // to creating a partially reserved sandbox. In practice, this should only
- // happen on Windows version before Windows 10, maybe including early
- // Windows 10 releases, where the necessary memory management APIs, in
- // particular, VirtualAlloc2, are not available. This check should also in
- // practice subsume the preceeding one for Windows 8 and earlier, but we'll
- // keep both just to be sure since there the partially reserved sandbox is
- // technically required for a different reason (large virtual memory
- // reservations being too expensive).
+ // If we cannot create virtual memory subspaces, we fall back to creating a
+ // partially reserved sandbox. This will happen for example on older
+ // Windows versions (before Windows 10) where the necessary memory
+ // management APIs, in particular, VirtualAlloc2, are not available.
+ // Since reserving virtual memory is an expensive operation on Windows
+ // before version 8.1 (reserving 1TB of address space will increase private
+ // memory usage by around 2GB), we only reserve the minimal amount of
+ // address space here. This way, we don't incur the cost of reserving
+ // virtual memory, but also don't get the desired security properties as
+ // unrelated mappings may end up inside the sandbox.
max_reservation_size = kSandboxMinimumReservationSize;
}
// If the maximum reservation size is less than the size of the sandbox, we
// can only create a partially-reserved sandbox.
- if (max_reservation_size < kSandboxSize) {
+ bool success;
+ size_t reservation_size = std::min(kSandboxSize, max_reservation_size);
+ DCHECK(base::bits::IsPowerOfTwo(reservation_size));
+ if (reservation_size < kSandboxSize) {
DCHECK_GE(max_reservation_size, kSandboxMinimumReservationSize);
- InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
- max_reservation_size);
+ success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
+ reservation_size);
} else {
+ DCHECK_EQ(kSandboxSize, reservation_size);
constexpr bool use_guard_regions = true;
- bool success = Initialize(vas, kSandboxSize, use_guard_regions);
-#ifdef V8_ENABLE_SANDBOX
- // If sandboxed pointers are enabled, we need the sandbox to be initialized,
- // so fall back to creating a partially reserved sandbox.
- if (!success) {
- // Try halving the size of the backing reservation until the minimum
- // reservation size is reached.
- size_t next_reservation_size = kSandboxSize / 2;
- while (!success &&
- next_reservation_size >= kSandboxMinimumReservationSize) {
- success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
- next_reservation_size);
- next_reservation_size /= 2;
- }
- }
-#endif // V8_ENABLE_SANDBOX
+ success = Initialize(vas, kSandboxSize, use_guard_regions);
+ }
+
+ // Fall back to creating a (smaller) partially reserved sandbox.
+ while (!success && reservation_size > kSandboxMinimumReservationSize) {
+ reservation_size /= 2;
+ DCHECK_GE(reservation_size, kSandboxMinimumReservationSize);
+ success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
+ reservation_size);
}
- if (!initialized_) {
+ if (!success) {
V8::FatalProcessOutOfMemory(
nullptr,
"Failed to reserve the virtual address space for the V8 sandbox");
}
+
+ DCHECK(initialized_);
}
bool Sandbox::Initialize(v8::VirtualAddressSpace* vas, size_t size,
@@ -196,7 +183,7 @@ bool Sandbox::Initialize(v8::VirtualAddressSpace* vas, size_t size,
initialized_ = true;
- InitializeConstants();
+ FinishInitialization();
DCHECK(!is_partially_reserved());
return true;
@@ -259,18 +246,35 @@ bool Sandbox::InitializeAsPartiallyReservedSandbox(v8::VirtualAddressSpace* vas,
std::make_unique<base::VirtualAddressSpacePageAllocator>(
address_space_.get());
- InitializeConstants();
+ FinishInitialization();
DCHECK(is_partially_reserved());
return true;
}
+void Sandbox::FinishInitialization() {
+ // Reserve the last page in the sandbox. This way, we can place inaccessible
+ // "objects" (e.g. the empty backing store buffer) there that are guaranteed
+ // to cause a fault on any accidental access.
+ // Further, this also prevents the accidental construction of invalid
+ // SandboxedPointers: if an ArrayBuffer is placed right at the end of the
+ // sandbox, a ArrayBufferView could be constructed with byteLength=0 and
+ // offset=buffer.byteLength, which would lead to a pointer that points just
+ // outside of the sandbox.
+ size_t allocation_granularity = address_space_->allocation_granularity();
+ bool success = address_space_->AllocateGuardRegion(
+ end_ - allocation_granularity, allocation_granularity);
+ // If the sandbox is partially-reserved, this operation may fail, for example
+ // if the last page is outside of the mappable address space of the process.
+ CHECK(success || is_partially_reserved());
+
+ InitializeConstants();
+}
+
void Sandbox::InitializeConstants() {
-#ifdef V8_ENABLE_SANDBOX
// Place the empty backing store buffer at the end of the sandbox, so that any
// accidental access to it will most likely hit a guard page.
- constants_.set_empty_backing_store_buffer(base_ + size_ - 1);
-#endif
+ constants_.set_empty_backing_store_buffer(end_ - 1);
}
void Sandbox::TearDown() {
@@ -284,17 +288,13 @@ void Sandbox::TearDown() {
reservation_base_ = kNullAddress;
reservation_size_ = 0;
initialized_ = false;
-#ifdef V8_ENABLE_SANDBOX
constants_.Reset();
-#endif
}
}
-#endif // V8_ENABLE_SANDBOX
-
-#ifdef V8_ENABLE_SANDBOX
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Sandbox, GetProcessWideSandbox)
-#endif
+
+#endif // V8_ENABLE_SANDBOX
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/sandbox/sandbox.h b/deps/v8/src/sandbox/sandbox.h
index 95d9cec2b7..6dac96ab2d 100644
--- a/deps/v8/src/sandbox/sandbox.h
+++ b/deps/v8/src/sandbox/sandbox.h
@@ -156,7 +156,6 @@ class V8_EXPORT_PRIVATE Sandbox {
return Contains(reinterpret_cast<Address>(ptr));
}
-#ifdef V8_ENABLE_SANDBOX
class SandboxedPointerConstants final {
public:
Address empty_backing_store_buffer() const {
@@ -175,7 +174,6 @@ class V8_EXPORT_PRIVATE Sandbox {
Address empty_backing_store_buffer_ = 0;
};
const SandboxedPointerConstants& constants() const { return constants_; }
-#endif
Address base_address() const { return reinterpret_cast<Address>(&base_); }
Address end_address() const { return reinterpret_cast<Address>(&end_); }
@@ -209,8 +207,11 @@ class V8_EXPORT_PRIVATE Sandbox {
size_t size,
size_t size_to_reserve);
- // Initialize the constant objects for this sandbox. Called by the Initialize
- // methods above.
+ // Performs final initialization steps after the sandbox address space has
+ // been initialized. Called from the two Initialize variants above.
+ void FinishInitialization();
+
+ // Initialize the constant objects for this sandbox.
void InitializeConstants();
Address base_ = kNullAddress;
@@ -231,18 +232,13 @@ class V8_EXPORT_PRIVATE Sandbox {
// The page allocator instance for this sandbox.
std::unique_ptr<v8::PageAllocator> sandbox_page_allocator_;
-#ifdef V8_ENABLE_SANDBOX
// Constant objects inside this sandbox.
SandboxedPointerConstants constants_;
-#endif
};
-#endif // V8_ENABLE_SANDBOX
-
-#ifdef V8_ENABLE_SANDBOX
-// This function is only available when the sandbox is actually used.
V8_EXPORT_PRIVATE Sandbox* GetProcessWideSandbox();
-#endif
+
+#endif // V8_ENABLE_SANDBOX
V8_INLINE void* EmptyBackingStoreBuffer() {
#ifdef V8_ENABLE_SANDBOX
diff --git a/deps/v8/src/sanitizer/OWNERS b/deps/v8/src/sanitizer/OWNERS
deleted file mode 100644
index 29f827d160..0000000000
--- a/deps/v8/src/sanitizer/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-file:../../INFRA_OWNERS
-
-clemensb@chromium.org
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 95352299f8..c83e8e4581 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -48,8 +48,7 @@ CodeSerializer::CodeSerializer(Isolate* isolate, uint32_t source_hash)
// static
ScriptCompiler::CachedData* CodeSerializer::Serialize(
- Handle<SharedFunctionInfo> info) {
- Isolate* isolate = info->GetIsolate();
+ Isolate* isolate, Handle<SharedFunctionInfo> info) {
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
NestedTimedHistogramScope histogram_timer(
isolate->counters()->compile_serialize());
@@ -108,28 +107,6 @@ AlignedCachedData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
-bool CodeSerializer::SerializeReadOnlyObject(
- HeapObject obj, const DisallowGarbageCollection& no_gc) {
- if (!ReadOnlyHeap::Contains(obj)) return false;
-
- // For objects on the read-only heap, never serialize the object, but instead
- // create a back reference that encodes the page number as the chunk_index and
- // the offset within the page as the chunk_offset.
- Address address = obj.address();
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
- uint32_t chunk_index = 0;
- ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
- for (ReadOnlyPage* page : read_only_space->pages()) {
- if (chunk == page) break;
- ++chunk_index;
- }
- uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
- sink_.Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
- sink_.PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex");
- sink_.PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset");
- return true;
-}
-
void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
ReadOnlyRoots roots(isolate());
InstanceType instance_type;
@@ -139,10 +116,10 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (SerializeHotObject(raw)) return;
if (SerializeRoot(raw)) return;
if (SerializeBackReference(raw)) return;
- if (SerializeReadOnlyObject(raw, no_gc)) return;
+ if (SerializeReadOnlyObjectReference(raw, &sink_)) return;
instance_type = raw.map().instance_type();
- CHECK(!InstanceTypeChecker::IsCode(instance_type));
+ CHECK(!InstanceTypeChecker::IsInstructionStream(instance_type));
if (ElideObject(raw)) {
AllowGarbageCollection allow_gc;
@@ -297,9 +274,9 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
INTERPRETER_DATA_TYPE, AllocationType::kOld));
interpreter_data->set_bytecode_array(info->GetBytecodeArray(isolate));
- interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
+ interpreter_data->set_interpreter_trampoline(*code);
if (info->HasBaselineCode()) {
- FromCodeT(info->baseline_code(kAcquireLoad))
+ info->baseline_code(kAcquireLoad)
.set_bytecode_or_interpreter_data(*interpreter_data);
} else {
info->set_interpreter_data(*interpreter_data);
@@ -429,7 +406,8 @@ void BaselineBatchCompileIfSparkplugCompiled(Isolate* isolate, Script script) {
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
Isolate* isolate, AlignedCachedData* cached_data, Handle<String> source,
- ScriptOriginOptions origin_options) {
+ ScriptOriginOptions origin_options,
+ MaybeHandle<Script> maybe_cached_script) {
if (v8_flags.stress_background_compile) {
StressOffThreadDeserializeThread thread(isolate, cached_data);
CHECK(thread.Start());
@@ -468,6 +446,22 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
if (v8_flags.profile_deserialization) PrintF("[Deserializing failed]\n");
return MaybeHandle<SharedFunctionInfo>();
}
+
+ // Check whether the newly deserialized data should be merged into an
+ // existing Script from the Isolate compilation cache. If so, perform
+ // the merge in a single-threaded manner since this deserialization was
+ // single-threaded.
+ if (Handle<Script> cached_script;
+ maybe_cached_script.ToHandle(&cached_script)) {
+ BackgroundMergeTask merge;
+ merge.SetUpOnMainThread(isolate, cached_script);
+ CHECK(merge.HasPendingBackgroundWork());
+ Handle<Script> new_script = handle(Script::cast(result->script()), isolate);
+ merge.BeginMergeInBackground(isolate->AsLocalIsolate(), new_script);
+ CHECK(merge.HasPendingForegroundWork());
+ result = merge.CompleteMergeInForeground(isolate, new_script);
+ }
+
BaselineBatchCompileIfSparkplugCompiled(isolate,
Script::cast(result->script()));
if (v8_flags.profile_deserialization) {
@@ -618,6 +612,9 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::FinishOffThreadDeserialize(
FinalizeDeserialization(isolate, result, timer);
+ DCHECK(!background_merge_task ||
+ !background_merge_task->HasPendingForegroundWork());
+
return scope.CloseAndEscape(result);
}
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 72d1f87a02..b6d9bec2f0 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -78,14 +78,15 @@ class CodeSerializer : public Serializer {
CodeSerializer(const CodeSerializer&) = delete;
CodeSerializer& operator=(const CodeSerializer&) = delete;
V8_EXPORT_PRIVATE static ScriptCompiler::CachedData* Serialize(
- Handle<SharedFunctionInfo> info);
+ Isolate* isolate, Handle<SharedFunctionInfo> info);
AlignedCachedData* SerializeSharedFunctionInfo(
Handle<SharedFunctionInfo> info);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
Isolate* isolate, AlignedCachedData* cached_data, Handle<String> source,
- ScriptOriginOptions origin_options);
+ ScriptOriginOptions origin_options,
+ MaybeHandle<Script> maybe_cached_script = {});
V8_WARN_UNUSED_RESULT static OffThreadDeserializeData
StartDeserializeOffThread(LocalIsolate* isolate,
@@ -110,9 +111,6 @@ class CodeSerializer : public Serializer {
private:
void SerializeObjectImpl(Handle<HeapObject> o) override;
- bool SerializeReadOnlyObject(HeapObject obj,
- const DisallowGarbageCollection& no_gc);
-
DISALLOW_GARBAGE_COLLECTION(no_gc_)
uint32_t source_hash_;
};
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index d249cf0d5a..7fbeb5e471 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -6,16 +6,19 @@
#include "src/api/api-inl.h"
#include "src/common/assert-scope.h"
+#include "src/logging/counters-scopes.h"
namespace v8 {
namespace internal {
+// static
MaybeHandle<Context> ContextDeserializer::DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
+ NestedTimedHistogramScope histogram_timer(
+ isolate->counters()->snapshot_deserialize_context());
ContextDeserializer d(isolate, data, can_rehash);
-
MaybeHandle<Object> maybe_result =
d.Deserialize(isolate, global_proxy, embedder_fields_deserializer);
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index cec20262ab..f9e2ab8935 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -18,17 +18,14 @@ namespace internal {
namespace {
// During serialization, puts the native context into a state understood by the
-// serializer (e.g. by clearing lists of Code objects). After serialization,
-// the original state is restored.
+// serializer (e.g. by clearing lists of InstructionStream objects). After
+// serialization, the original state is restored.
class V8_NODISCARD SanitizeNativeContextScope final {
public:
SanitizeNativeContextScope(Isolate* isolate, NativeContext native_context,
bool allow_active_isolate_for_testing,
const DisallowGarbageCollection& no_gc)
- : native_context_(native_context),
- optimized_code_list_(native_context.OptimizedCodeListHead()),
- deoptimized_code_list_(native_context.DeoptimizedCodeListHead()),
- no_gc_(no_gc) {
+ : native_context_(native_context), no_gc_(no_gc) {
#ifdef DEBUG
if (!allow_active_isolate_for_testing) {
// Microtasks.
@@ -37,24 +34,16 @@ class V8_NODISCARD SanitizeNativeContextScope final {
DCHECK(!microtask_queue->HasMicrotasksSuppressions());
DCHECK_EQ(0, microtask_queue->GetMicrotasksScopeDepth());
DCHECK(microtask_queue->DebugMicrotasksScopeDepthIsZero());
- // Code lists.
- DCHECK(optimized_code_list_.IsUndefined(isolate));
- DCHECK(deoptimized_code_list_.IsUndefined(isolate));
}
#endif
microtask_queue_external_pointer_ =
native_context
.RawExternalPointerField(NativeContext::kMicrotaskQueueOffset)
.GetAndClearContentForSerialization(no_gc);
- Object undefined = ReadOnlyRoots(isolate).undefined_value();
- native_context.SetOptimizedCodeListHead(undefined);
- native_context.SetDeoptimizedCodeListHead(undefined);
}
~SanitizeNativeContextScope() {
// Restore saved fields.
- native_context_.SetOptimizedCodeListHead(optimized_code_list_);
- native_context_.SetDeoptimizedCodeListHead(deoptimized_code_list_);
native_context_
.RawExternalPointerField(NativeContext::kMicrotaskQueueOffset)
.RestoreContentAfterSerialization(microtask_queue_external_pointer_,
@@ -64,8 +53,6 @@ class V8_NODISCARD SanitizeNativeContextScope final {
private:
NativeContext native_context_;
ExternalPointerSlot::RawContent microtask_queue_external_pointer_;
- const Object optimized_code_list_;
- const Object deoptimized_code_list_;
const DisallowGarbageCollection& no_gc_;
};
@@ -190,7 +177,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (closure.shared().HasBaselineCode()) {
closure.shared().FlushBaselineCode();
}
- closure.set_code(closure.shared().GetCode(), kReleaseStore);
+ closure.set_code(closure.shared().GetCode(isolate()), kReleaseStore);
}
}
}
@@ -207,10 +194,9 @@ bool ContextSerializer::ShouldBeInTheStartupObjectCache(HeapObject o) {
// contain a unique ID, and deserializing several context snapshots containing
// script would cause dupes.
return o.IsName() || o.IsScript() || o.IsSharedFunctionInfo() ||
- o.IsHeapNumber() ||
- (V8_EXTERNAL_CODE_SPACE_BOOL && o.IsCodeDataContainer()) ||
- o.IsCode() || o.IsScopeInfo() || o.IsAccessorInfo() ||
- o.IsTemplateInfo() || o.IsClassPositions() ||
+ o.IsHeapNumber() || o.IsCode() || o.IsInstructionStream() ||
+ o.IsScopeInfo() || o.IsAccessorInfo() || o.IsTemplateInfo() ||
+ o.IsClassPositions() ||
o.map() == ReadOnlyRoots(isolate()).fixed_cow_array_map();
}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 0fb3655949..e71af5266d 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -9,6 +9,7 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap-write-barrier.h"
@@ -195,6 +196,7 @@ Deserializer<IsolateT>::Deserializer(IsolateT* isolate,
: isolate_(isolate),
source_(payload),
magic_number_(magic_number),
+ new_descriptor_arrays_(isolate->heap()),
deserializing_user_code_(deserializing_user_code),
should_rehash_((v8_flags.rehash_snapshot && can_rehash) ||
deserializing_user_code) {
@@ -272,21 +274,13 @@ void Deserializer<IsolateT>::LogNewMapEvents() {
template <typename IsolateT>
void Deserializer<IsolateT>::WeakenDescriptorArrays() {
- DisallowGarbageCollection no_gc;
- Map descriptor_array_map = ReadOnlyRoots(isolate()).descriptor_array_map();
- for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
- DescriptorArray raw = *descriptor_array;
- DCHECK(raw.IsStrongDescriptorArray());
- raw.set_map_safe_transition(descriptor_array_map);
- WriteBarrier::Marking(raw, raw.number_of_descriptors());
- }
+ isolate()->heap()->WeakenDescriptorArrays(std::move(new_descriptor_arrays_));
}
template <typename IsolateT>
void Deserializer<IsolateT>::LogScriptEvents(Script script) {
DisallowGarbageCollection no_gc;
- LOG(isolate(),
- ScriptEvent(V8FileLogger::ScriptEventType::kDeserialize, script.id()));
+ LOG(isolate(), ScriptEvent(ScriptEventType::kDeserialize, script.id()));
LOG(isolate(), ScriptDetails(script));
}
@@ -362,8 +356,9 @@ void Deserializer<Isolate>::PostProcessNewJSReceiver(Map map,
SnapshotSpace space) {
DCHECK_EQ(map.instance_type(), instance_type);
- if (InstanceTypeChecker::IsJSDataView(instance_type)) {
- auto data_view = JSDataView::cast(*obj);
+ if (InstanceTypeChecker::IsJSDataView(instance_type) ||
+ InstanceTypeChecker::IsJSRabGsabDataView(instance_type)) {
+ auto data_view = JSDataViewOrRabGsabDataView::cast(*obj);
auto buffer = JSArrayBuffer::cast(data_view.buffer());
if (buffer.was_detached()) {
// Directly set the data pointer to point to the EmptyBackingStoreBuffer.
@@ -441,10 +436,10 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
if (space == SnapshotSpace::kReadOnlyHeap) {
- to_rehash_.push_back(obj);
+ PushObjectToRehash(obj);
}
} else if (raw_obj.NeedsRehashing(instance_type)) {
- to_rehash_.push_back(obj);
+ PushObjectToRehash(obj);
}
if (deserializing_user_code()) {
@@ -490,30 +485,23 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
}
}
- if (InstanceTypeChecker::IsCode(instance_type)) {
+ if (InstanceTypeChecker::IsInstructionStream(instance_type)) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
if (deserializing_user_code()) {
- new_code_objects_.push_back(Handle<Code>::cast(obj));
+ new_code_objects_.push_back(Handle<InstructionStream>::cast(obj));
}
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
- auto code_data_container = CodeDataContainer::cast(raw_obj);
- code_data_container.init_code_entry_point(main_thread_isolate(),
- kNullAddress);
-#ifdef V8_EXTERNAL_CODE_SPACE
- if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- code_data_container.is_off_heap_trampoline()) {
- Address entry = OffHeapInstructionStart(code_data_container,
- code_data_container.builtin_id());
- code_data_container.SetEntryPointForOffHeapBuiltin(main_thread_isolate(),
- entry);
+ } else if (InstanceTypeChecker::IsCode(instance_type)) {
+ Code code = Code::cast(raw_obj);
+ code.init_code_entry_point(main_thread_isolate(), kNullAddress);
+ if (!code.has_instruction_stream()) {
+ code.SetEntryPointForOffHeapBuiltin(main_thread_isolate(),
+ code.OffHeapInstructionStart());
} else {
- code_data_container.UpdateCodeEntryPoint(main_thread_isolate(),
- code_data_container.code());
+ code.UpdateCodeEntryPoint(main_thread_isolate(),
+ code.instruction_stream());
}
-#endif
} else if (InstanceTypeChecker::IsMap(instance_type)) {
if (v8_flags.log_maps) {
// Keep track of all seen Maps to log them later since they might be only
@@ -539,7 +527,7 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
} else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
- new_descriptor_arrays_.push_back(descriptors);
+ new_descriptor_arrays_.Push(*descriptors);
} else if (InstanceTypeChecker::IsNativeContext(instance_type)) {
NativeContext::cast(raw_obj).init_microtask_queue(main_thread_isolate(),
nullptr);
@@ -691,7 +679,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
PostProcessNewObject(map, obj, space);
#ifdef DEBUG
- if (obj->IsCode(cage_base)) {
+ if (obj->IsInstructionStream(cage_base)) {
DCHECK(space == SnapshotSpace::kCode ||
space == SnapshotSpace::kReadOnlyHeap);
} else {
@@ -742,11 +730,11 @@ class DeserializerRelocInfoVisitor {
DCHECK_EQ(current_object_, objects_->size());
}
- void VisitCodeTarget(Code host, RelocInfo* rinfo);
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
- void VisitExternalReference(Code host, RelocInfo* rinfo);
- void VisitInternalReference(Code host, RelocInfo* rinfo);
- void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
+ void VisitCodeTarget(RelocInfo* rinfo);
+ void VisitEmbeddedPointer(RelocInfo* rinfo);
+ void VisitExternalReference(RelocInfo* rinfo);
+ void VisitInternalReference(RelocInfo* rinfo);
+ void VisitOffHeapTarget(RelocInfo* rinfo);
private:
Isolate* isolate() { return deserializer_->isolate(); }
@@ -757,21 +745,19 @@ class DeserializerRelocInfoVisitor {
int current_object_;
};
-void DeserializerRelocInfoVisitor::VisitCodeTarget(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitCodeTarget(RelocInfo* rinfo) {
HeapObject object = *objects_->at(current_object_++);
- rinfo->set_target_address(Code::cast(object).raw_instruction_start());
+ rinfo->set_target_address(
+ InstructionStream::cast(object).instruction_start());
}
-void DeserializerRelocInfoVisitor::VisitEmbeddedPointer(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
HeapObject object = *objects_->at(current_object_++);
// Embedded object reference must be a strong one.
rinfo->set_target_object(isolate()->heap(), object);
}
-void DeserializerRelocInfoVisitor::VisitExternalReference(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitExternalReference(RelocInfo* rinfo) {
byte data = source().Get();
CHECK_EQ(data, Deserializer<Isolate>::kExternalReference);
@@ -780,32 +766,27 @@ void DeserializerRelocInfoVisitor::VisitExternalReference(Code host,
if (rinfo->IsCodedSpecially()) {
Address location_of_branch_data = rinfo->pc();
Assembler::deserialization_set_special_target_at(location_of_branch_data,
- host, address);
+ rinfo->code(), address);
} else {
WriteUnalignedValue(rinfo->target_address_address(), address);
}
}
-void DeserializerRelocInfoVisitor::VisitInternalReference(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitInternalReference(RelocInfo* rinfo) {
byte data = source().Get();
CHECK_EQ(data, Deserializer<Isolate>::kInternalReference);
- // Internal reference target is encoded as an offset from code entry.
+ // An internal reference target is encoded as an offset from code entry.
int target_offset = source().GetInt();
- // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
- // consider using raw_instruction_size() instead of raw_body_size() in the
- // future.
- static_assert(Code::kOnHeapBodyIsContiguous);
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
DCHECK_LT(static_cast<unsigned>(target_offset),
- static_cast<unsigned>(host.raw_body_size()));
- Address target = host.entry() + target_offset;
+ static_cast<unsigned>(rinfo->code().instruction_size()));
+ Address target = rinfo->code().InstructionStart() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
rinfo->pc(), target, rinfo->rmode());
}
-void DeserializerRelocInfoVisitor::VisitOffHeapTarget(Code host,
- RelocInfo* rinfo) {
+void DeserializerRelocInfoVisitor::VisitOffHeapTarget(RelocInfo* rinfo) {
// Currently we don't serialize code that contains near builtin entries.
DCHECK_NE(rinfo->rmode(), RelocInfo::NEAR_BUILTIN_ENTRY);
@@ -823,7 +804,7 @@ void DeserializerRelocInfoVisitor::VisitOffHeapTarget(Code host,
if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
Address location_of_branch_data = rinfo->pc();
Assembler::deserialization_set_special_target_at(location_of_branch_data,
- host, address);
+ rinfo->code(), address);
} else {
WriteUnalignedValue(rinfo->target_address_address(), address);
}
@@ -905,366 +886,490 @@ template <typename IsolateT>
template <typename SlotAccessor>
int Deserializer<IsolateT>::ReadSingleBytecodeData(byte data,
SlotAccessor slot_accessor) {
- using TSlot = decltype(slot_accessor.slot());
-
switch (data) {
- // Deserialize a new object and write a pointer to it to the current
- // object.
- case CASE_RANGE_ALL_SPACES(kNewObject): {
- SnapshotSpace space = NewObject::Decode(data);
- // Save the reference type before recursing down into reading the object.
- HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
- Handle<HeapObject> heap_object = ReadObject(space);
- return slot_accessor.Write(heap_object, ref_type);
- }
-
- // Find a recently deserialized object using its offset from the current
- // allocation point and write a pointer to it to the current object.
- case kBackref: {
- Handle<HeapObject> heap_object = GetBackReferencedObject();
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
-
- // Reference an object in the read-only heap. This should be used when an
- // object is read-only, but is not a root.
- case kReadOnlyHeapRef: {
- DCHECK(isolate()->heap()->deserialization_complete());
- uint32_t chunk_index = source_.GetInt();
- uint32_t chunk_offset = source_.GetInt();
-
- ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
- ReadOnlyPage* page = read_only_space->pages()[chunk_index];
- Address address = page->OffsetToAddress(chunk_offset);
- HeapObject heap_object = HeapObject::FromAddress(address);
+ case CASE_RANGE_ALL_SPACES(kNewObject):
+ return ReadNewObject(data, slot_accessor);
+ case kBackref:
+ return ReadBackref(data, slot_accessor);
+ case kReadOnlyHeapRef:
+ return ReadReadOnlyHeapRef(data, slot_accessor);
+ case kRootArray:
+ return ReadRootArray(data, slot_accessor);
+ case kStartupObjectCache:
+ return ReadStartupObjectCache(data, slot_accessor);
+ case kReadOnlyObjectCache:
+ return ReadReadOnlyObjectCache(data, slot_accessor);
+ case kSharedHeapObjectCache:
+ return ReadSharedHeapObjectCache(data, slot_accessor);
+ case kNewMetaMap:
+ return ReadNewMetaMap(data, slot_accessor);
+ case kSandboxedExternalReference:
+ case kExternalReference:
+ return ReadExternalReference(data, slot_accessor);
+ case kSandboxedRawExternalReference:
+ case kRawExternalReference:
+ return ReadRawExternalReference(data, slot_accessor);
+ case kInternalReference:
+ case kOffHeapTarget:
+ // These bytecodes are expected only during RelocInfo iteration.
+ UNREACHABLE();
+ case kAttachedReference:
+ return ReadAttachedReference(data, slot_accessor);
+ case kNop:
+ return 0;
+ case kRegisterPendingForwardRef:
+ return ReadRegisterPendingForwardRef(data, slot_accessor);
+ case kResolvePendingForwardRef:
+ return ReadResolvePendingForwardRef(data, slot_accessor);
+ case kSynchronize:
+ // If we get here then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ UNREACHABLE();
+ case kVariableRawData:
+ return ReadVariableRawData(data, slot_accessor);
+ case kCodeBody:
+ return ReadCodeBody(data, slot_accessor);
+ case kVariableRepeat:
+ return ReadVariableRepeat(data, slot_accessor);
+ case kOffHeapBackingStore:
+ case kOffHeapResizableBackingStore:
+ return ReadOffHeapBackingStore(data, slot_accessor);
+ case kSandboxedApiReference:
+ case kApiReference:
+ return ReadApiReference(data, slot_accessor);
+ case kClearedWeakReference:
+ return ReadClearedWeakReference(data, slot_accessor);
+ case kWeakPrefix:
+ return ReadWeakPrefix(data, slot_accessor);
+ case CASE_RANGE(kRootArrayConstants, 32):
+ return ReadRootArrayConstants(data, slot_accessor);
+ case CASE_RANGE(kHotObject, 8):
+ return ReadHotObject(data, slot_accessor);
+ case CASE_RANGE(kFixedRawData, 32):
+ return ReadFixedRawData(data, slot_accessor);
+ case CASE_RANGE(kFixedRepeat, 16):
+ return ReadFixedRepeat(data, slot_accessor);
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
+#ifdef DEBUG
+#define UNUSED_CASE(byte_code) \
+ case byte_code: \
+ UNREACHABLE();
+ UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
+#endif
+#undef UNUSED_CASE
+ }
- // Find an object in the roots array and write a pointer to it to the
- // current object.
- case kRootArray: {
- int id = source_.GetInt();
- RootIndex root_index = static_cast<RootIndex>(id);
- Handle<HeapObject> heap_object =
- Handle<HeapObject>::cast(isolate()->root_handle(root_index));
- hot_objects_.Add(heap_object);
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
+ // The above switch, including UNUSED_SERIALIZER_BYTE_CODES, covers all
+ // possible bytecodes; but, clang doesn't realize this, so we have an explicit
+ // UNREACHABLE here too.
+ UNREACHABLE();
+}
- // Find an object in the startup object cache and write a pointer to it to
- // the current object.
- case kStartupObjectCache: {
- int cache_index = source_.GetInt();
- // TODO(leszeks): Could we use the address of the startup_object_cache
- // entry as a Handle backing?
- HeapObject heap_object = HeapObject::cast(
- main_thread_isolate()->startup_object_cache()->at(cache_index));
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
+// Deserialize a new object and write a pointer to it to the current
+// object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadNewObject(byte data,
+ SlotAccessor slot_accessor) {
+ SnapshotSpace space = NewObject::Decode(data);
+ DCHECK_IMPLIES(V8_STATIC_ROOTS_BOOL, space != SnapshotSpace::kReadOnlyHeap);
+ // Save the reference type before recursing down into reading the object.
+ HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
+ Handle<HeapObject> heap_object = ReadObject(space);
+ return slot_accessor.Write(heap_object, ref_type);
+}
- // Find an object in the read-only object cache and write a pointer to it
- // to the current object.
- case kReadOnlyObjectCache: {
- int cache_index = source_.GetInt();
- // TODO(leszeks): Could we use the address of the cached_read_only_object
- // entry as a Handle backing?
- HeapObject heap_object = HeapObject::cast(
- isolate()->read_only_heap()->cached_read_only_object(cache_index));
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
+// Find a recently deserialized object using its offset from the current
+// allocation point and write a pointer to it to the current object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadBackref(byte data, SlotAccessor slot_accessor) {
+ Handle<HeapObject> heap_object = GetBackReferencedObject();
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- // Find an object in the shared heap object cache and write a pointer to it
- // to the current object.
- case kSharedHeapObjectCache: {
- int cache_index = source_.GetInt();
- // TODO(leszeks): Could we use the address of the
- // shared_heap_object_cache entry as a Handle backing?
- HeapObject heap_object = HeapObject::cast(
- main_thread_isolate()->shared_heap_object_cache()->at(cache_index));
- DCHECK(
- SharedHeapSerializer::ShouldBeInSharedHeapObjectCache(heap_object));
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
+// Reference an object in the read-only heap. This should be used when an
+// object is read-only, but is not a root. Except with static roots we
+// always use this reference to refer to read only objects since they are
+// created by loading a memory dump of r/o space.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadReadOnlyHeapRef(byte data,
+ SlotAccessor slot_accessor) {
+ DCHECK(isolate()->heap()->deserialization_complete() || V8_STATIC_ROOTS_BOOL);
+ uint32_t chunk_index = source_.GetInt();
+ uint32_t chunk_offset = source_.GetInt();
+
+ ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
+ ReadOnlyPage* page = read_only_space->pages()[chunk_index];
+ Address address = page->OffsetToAddress(chunk_offset);
+ HeapObject heap_object = HeapObject::FromAddress(address);
+
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- // Deserialize a new meta-map and write a pointer to it to the current
- // object.
- case kNewMetaMap: {
- Handle<HeapObject> heap_object = ReadMetaMap();
- return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
- }
+// Find an object in the roots array and write a pointer to it to the
+// current object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadRootArray(byte data,
+ SlotAccessor slot_accessor) {
+ int id = source_.GetInt();
+ RootIndex root_index = static_cast<RootIndex>(id);
+ Handle<HeapObject> heap_object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root_index));
+ hot_objects_.Add(heap_object);
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- // Find an external reference and write a pointer to it to the current
- // object.
- case kSandboxedExternalReference:
- case kExternalReference: {
- DCHECK_IMPLIES(data == kSandboxedExternalReference,
- V8_ENABLE_SANDBOX_BOOL);
- Address address = ReadExternalReferenceCase();
- ExternalPointerTag tag = kExternalPointerNullTag;
- if (data == kSandboxedExternalReference) {
- tag = ReadExternalPointerTag();
- }
- return WriteExternalPointer(slot_accessor.external_pointer_slot(),
- address, tag);
- }
+// Find an object in the startup object cache and write a pointer to it to
+// the current object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadStartupObjectCache(byte data,
+ SlotAccessor slot_accessor) {
+ int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the startup_object_cache
+ // entry as a Handle backing?
+ HeapObject heap_object = HeapObject::cast(
+ main_thread_isolate()->startup_object_cache()->at(cache_index));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- case kSandboxedRawExternalReference:
- case kRawExternalReference: {
- DCHECK_IMPLIES(data == kSandboxedExternalReference,
- V8_ENABLE_SANDBOX_BOOL);
- Address address;
- source_.CopyRaw(&address, kSystemPointerSize);
- ExternalPointerTag tag = kExternalPointerNullTag;
- if (data == kSandboxedRawExternalReference) {
- tag = ReadExternalPointerTag();
- }
- return WriteExternalPointer(slot_accessor.external_pointer_slot(),
- address, tag);
- }
+// Find an object in the read-only object cache and write a pointer to it
+// to the current object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadReadOnlyObjectCache(
+ byte data, SlotAccessor slot_accessor) {
+ DCHECK(!V8_STATIC_ROOTS_BOOL);
+ int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the cached_read_only_object
+ // entry as a Handle backing?
+ HeapObject heap_object = HeapObject::cast(
+ isolate()->read_only_heap()->cached_read_only_object(cache_index));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- case kInternalReference:
- case kOffHeapTarget:
- // These bytecodes are expected only during RelocInfo iteration.
- UNREACHABLE();
+// Find an object in the shared heap object cache and write a pointer to it
+// to the current object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadSharedHeapObjectCache(
+ byte data, SlotAccessor slot_accessor) {
+ int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the
+ // shared_heap_object_cache entry as a Handle backing?
+ HeapObject heap_object = HeapObject::cast(
+ main_thread_isolate()->shared_heap_object_cache()->at(cache_index));
+ DCHECK(SharedHeapSerializer::ShouldBeInSharedHeapObjectCache(heap_object));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- // Find an object in the attached references and write a pointer to it to
- // the current object.
- case kAttachedReference: {
- int index = source_.GetInt();
- Handle<HeapObject> heap_object = attached_objects_[index];
- return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
- }
+// Deserialize a new meta-map and write a pointer to it to the current
+// object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadNewMetaMap(byte data,
+ SlotAccessor slot_accessor) {
+ Handle<HeapObject> heap_object = ReadMetaMap();
+ return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
+}
- case kNop:
- return 0;
+// Find an external reference and write a pointer to it to the current
+// object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadExternalReference(byte data,
+ SlotAccessor slot_accessor) {
+ DCHECK_IMPLIES(data == kSandboxedExternalReference, V8_ENABLE_SANDBOX_BOOL);
+ Address address = ReadExternalReferenceCase();
+ ExternalPointerTag tag = kExternalPointerNullTag;
+ if (data == kSandboxedExternalReference) {
+ tag = ReadExternalPointerTag();
+ }
+ return WriteExternalPointer(slot_accessor.external_pointer_slot(), address,
+ tag);
+}
- case kRegisterPendingForwardRef: {
- HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
- unresolved_forward_refs_.emplace_back(slot_accessor.object(),
- slot_accessor.offset(), ref_type);
- num_unresolved_forward_refs_++;
- return 1;
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadRawExternalReference(
+ byte data, SlotAccessor slot_accessor) {
+ DCHECK_IMPLIES(data == kSandboxedExternalReference, V8_ENABLE_SANDBOX_BOOL);
+ Address address;
+ source_.CopyRaw(&address, kSystemPointerSize);
+ ExternalPointerTag tag = kExternalPointerNullTag;
+ if (data == kSandboxedRawExternalReference) {
+ tag = ReadExternalPointerTag();
+ }
+ return WriteExternalPointer(slot_accessor.external_pointer_slot(), address,
+ tag);
+}
- case kResolvePendingForwardRef: {
- // Pending forward refs can only be resolved after the heap object's map
- // field is deserialized; currently they only appear immediately after
- // the map field.
- DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
- Handle<HeapObject> obj = slot_accessor.object();
- int index = source_.GetInt();
- auto& forward_ref = unresolved_forward_refs_[index];
- SlotAccessorForHeapObject::ForSlotOffset(forward_ref.object,
- forward_ref.offset)
- .Write(*obj, forward_ref.ref_type);
- num_unresolved_forward_refs_--;
- if (num_unresolved_forward_refs_ == 0) {
- // If there's no more pending fields, clear the entire pending field
- // vector.
- unresolved_forward_refs_.clear();
- } else {
- // Otherwise, at least clear the pending field.
- forward_ref.object = Handle<HeapObject>();
- }
- return 0;
- }
+// Find an object in the attached references and write a pointer to it to
+// the current object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadAttachedReference(byte data,
+ SlotAccessor slot_accessor) {
+ int index = source_.GetInt();
+ Handle<HeapObject> heap_object = attached_objects_[index];
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+}
- case kSynchronize:
- // If we get here then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- UNREACHABLE();
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadRegisterPendingForwardRef(
+ byte data, SlotAccessor slot_accessor) {
+ HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
+ unresolved_forward_refs_.emplace_back(slot_accessor.object(),
+ slot_accessor.offset(), ref_type);
+ num_unresolved_forward_refs_++;
+ return 1;
+}
- // Deserialize raw data of variable length.
- case kVariableRawData: {
- // This operation is only supported for tagged-size slots, else we might
- // become misaligned.
- DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
- int size_in_tagged = source_.GetInt();
- // TODO(leszeks): Only copy slots when there are Smis in the serialized
- // data.
- source_.CopySlots(slot_accessor.slot().location(), size_in_tagged);
- return size_in_tagged;
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadResolvePendingForwardRef(
+ byte data, SlotAccessor slot_accessor) {
+ // Pending forward refs can only be resolved after the heap object's map
+ // field is deserialized; currently they only appear immediately after
+ // the map field.
+ DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
+ Handle<HeapObject> obj = slot_accessor.object();
+ int index = source_.GetInt();
+ auto& forward_ref = unresolved_forward_refs_[index];
+ SlotAccessorForHeapObject::ForSlotOffset(forward_ref.object,
+ forward_ref.offset)
+ .Write(*obj, forward_ref.ref_type);
+ num_unresolved_forward_refs_--;
+ if (num_unresolved_forward_refs_ == 0) {
+ // If there's no more pending fields, clear the entire pending field
+ // vector.
+ unresolved_forward_refs_.clear();
+ } else {
+ // Otherwise, at least clear the pending field.
+ forward_ref.object = Handle<HeapObject>();
+ }
+ return 0;
+}
- // Deserialize raw code directly into the body of the code object.
- case kCodeBody: {
- // This operation is only supported for tagged-size slots, else we might
- // become misaligned.
- DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
- // CodeBody can only occur right after the heap object header.
- DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
-
- int size_in_tagged = source_.GetInt();
- int size_in_bytes = size_in_tagged * kTaggedSize;
-
- {
- DisallowGarbageCollection no_gc;
- Code code = Code::cast(*slot_accessor.object());
-
- // First deserialize the code itself.
- source_.CopyRaw(
- reinterpret_cast<void*>(code.address() + Code::kDataStart),
- size_in_bytes);
- }
+// Deserialize raw data of variable length.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadVariableRawData(byte data,
+ SlotAccessor slot_accessor) {
+ // This operation is only supported for tagged-size slots, else we might
+ // become misaligned.
+ DCHECK_EQ(decltype(slot_accessor.slot())::kSlotDataSize, kTaggedSize);
+ int size_in_tagged = source_.GetInt();
+ // TODO(leszeks): Only copy slots when there are Smis in the serialized
+ // data.
+ source_.CopySlots(slot_accessor.slot().location(), size_in_tagged);
+ return size_in_tagged;
+}
- // Then deserialize the code header
- ReadData(slot_accessor.object(), HeapObject::kHeaderSize / kTaggedSize,
- Code::kDataStart / kTaggedSize);
+// Custom deserialization for a Code object and its associated InstructionStream
+// object.
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadCodeBody(byte data,
+ SlotAccessor slot_accessor) {
+ // This operation is only supported for tagged-size slots, else we might
+ // become misaligned.
+ DCHECK_EQ(decltype(slot_accessor.slot())::kSlotDataSize, kTaggedSize);
+ // CodeBody can only occur right after the heap object header.
+ DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
+
+ int size_in_tagged = source_.GetInt();
+ int size_in_bytes = size_in_tagged * kTaggedSize;
+
+ {
+ DisallowGarbageCollection no_gc;
+ InstructionStream istream =
+ InstructionStream::cast(*slot_accessor.object());
+
+ // First deserialize the untagged region of the InstructionStream object.
+ source_.CopyRaw(reinterpret_cast<void*>(istream.address() +
+ InstructionStream::kDataStart),
+ size_in_bytes);
+ }
- // Then deserialize the pre-serialized RelocInfo objects.
- std::vector<Handle<HeapObject>> preserialized_objects;
- while (source_.Peek() != kSynchronize) {
- Handle<HeapObject> obj = ReadObject();
- preserialized_objects.push_back(obj);
- }
- // Skip the synchronize bytecode.
- source_.Advance(1);
-
- // Finally iterate RelocInfos (the same way it was done by the serializer)
- // and deserialize respective data into RelocInfos. The RelocIterator
- // holds a raw pointer to the code, so we have to disable garbage
- // collection here. It's ok though, any objects it would have needed are
- // in the preserialized_objects vector.
- {
- DisallowGarbageCollection no_gc;
-
- Code code = Code::cast(*slot_accessor.object());
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- code.set_main_cage_base(isolate()->cage_base(), kRelaxedStore);
- }
- DeserializerRelocInfoVisitor visitor(this, &preserialized_objects);
- for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
- !it.done(); it.next()) {
- it.rinfo()->Visit(&visitor);
- }
- }
+ // Then deserialize the InstructionStream header
+ ReadData(slot_accessor.object(), HeapObject::kHeaderSize / kTaggedSize,
+ InstructionStream::kDataStart / kTaggedSize);
- // Advance to the end of the code object.
- return (int{Code::kDataStart} - HeapObject::kHeaderSize) / kTaggedSize +
- size_in_tagged;
+ // Then deserialize the pre-serialized RelocInfo objects.
+ std::vector<Handle<HeapObject>> preserialized_objects;
+ while (source_.Peek() != kSynchronize) {
+ Handle<HeapObject> obj = ReadObject();
+ preserialized_objects.push_back(obj);
+ }
+ // Skip the synchronize bytecode.
+ source_.Advance(1);
+
+ // Finally iterate RelocInfos (the same way it was done by the serializer)
+ // and deserialize respective data into RelocInfos. The RelocIterator
+ // holds a raw pointer to the code, so we have to disable garbage
+ // collection here. It's ok though, any objects it would have needed are
+ // in the preserialized_objects vector.
+ {
+ DisallowGarbageCollection no_gc;
+
+ InstructionStream istream =
+ InstructionStream::cast(*slot_accessor.object());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ istream.set_main_cage_base(isolate()->cage_base(), kRelaxedStore);
}
-
- case kVariableRepeat: {
- int repeats = VariableRepeatCount::Decode(source_.GetInt());
- return ReadRepeatedObject(slot_accessor, repeats);
+ Code code = istream.code(kAcquireLoad);
+ DeserializerRelocInfoVisitor visitor(this, &preserialized_objects);
+ for (RelocIterator it(code, istream, code.relocation_info(),
+ code.constant_pool(),
+ InstructionStream::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(&visitor);
}
+ }
- case kOffHeapBackingStore:
- case kOffHeapResizableBackingStore: {
- int byte_length = source_.GetInt();
- std::unique_ptr<BackingStore> backing_store;
- if (data == kOffHeapBackingStore) {
- backing_store = BackingStore::Allocate(
- main_thread_isolate(), byte_length, SharedFlag::kNotShared,
- InitializedFlag::kUninitialized);
- } else {
- int max_byte_length = source_.GetInt();
- size_t page_size, initial_pages, max_pages;
- Maybe<bool> result =
- JSArrayBuffer::GetResizableBackingStorePageConfiguration(
- nullptr, byte_length, max_byte_length, kDontThrow, &page_size,
- &initial_pages, &max_pages);
- DCHECK(result.FromJust());
- USE(result);
- backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
- main_thread_isolate(), byte_length, max_byte_length, page_size,
- initial_pages, max_pages, WasmMemoryFlag::kNotWasm,
- SharedFlag::kNotShared);
- }
- CHECK_NOT_NULL(backing_store);
- source_.CopyRaw(backing_store->buffer_start(), byte_length);
- backing_stores_.push_back(std::move(backing_store));
- return 0;
- }
+ // Advance to the end of the code object.
+ return (int{InstructionStream::kDataStart} - HeapObject::kHeaderSize) /
+ kTaggedSize +
+ size_in_tagged;
+}
- case kSandboxedApiReference:
- case kApiReference: {
- DCHECK_IMPLIES(data == kSandboxedExternalReference,
- V8_ENABLE_SANDBOX_BOOL);
- uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- Address address;
- if (main_thread_isolate()->api_external_references()) {
- DCHECK_WITH_MSG(reference_id < num_api_references_,
- "too few external references provided through the API");
- address = static_cast<Address>(
- main_thread_isolate()->api_external_references()[reference_id]);
- } else {
- address = reinterpret_cast<Address>(NoExternalReferencesCallback);
- }
- ExternalPointerTag tag = kExternalPointerNullTag;
- if (data == kSandboxedApiReference) {
- tag = ReadExternalPointerTag();
- }
- return WriteExternalPointer(slot_accessor.external_pointer_slot(),
- address, tag);
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadVariableRepeat(byte data,
+ SlotAccessor slot_accessor) {
+ int repeats = VariableRepeatCount::Decode(source_.GetInt());
+ return ReadRepeatedObject(slot_accessor, repeats);
+}
- case kClearedWeakReference:
- return slot_accessor.Write(HeapObjectReference::ClearedValue(isolate()));
-
- case kWeakPrefix: {
- // We shouldn't have two weak prefixes in a row.
- DCHECK(!next_reference_is_weak_);
- // We shouldn't have weak refs without a current object.
- DCHECK_NE(slot_accessor.object()->address(), kNullAddress);
- next_reference_is_weak_ = true;
- return 0;
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadOffHeapBackingStore(
+ byte data, SlotAccessor slot_accessor) {
+ int byte_length = source_.GetInt();
+ std::unique_ptr<BackingStore> backing_store;
+ if (data == kOffHeapBackingStore) {
+ backing_store = BackingStore::Allocate(main_thread_isolate(), byte_length,
+ SharedFlag::kNotShared,
+ InitializedFlag::kUninitialized);
+ } else {
+ int max_byte_length = source_.GetInt();
+ size_t page_size, initial_pages, max_pages;
+ Maybe<bool> result =
+ JSArrayBuffer::GetResizableBackingStorePageConfiguration(
+ nullptr, byte_length, max_byte_length, kDontThrow, &page_size,
+ &initial_pages, &max_pages);
+ DCHECK(result.FromJust());
+ USE(result);
+ backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
+ main_thread_isolate(), byte_length, max_byte_length, page_size,
+ initial_pages, max_pages, WasmMemoryFlag::kNotWasm,
+ SharedFlag::kNotShared);
+ }
+ CHECK_NOT_NULL(backing_store);
+ source_.CopyRaw(backing_store->buffer_start(), byte_length);
+ backing_stores_.push_back(std::move(backing_store));
+ return 0;
+}
- case CASE_RANGE(kRootArrayConstants, 32): {
- // First kRootArrayConstantsCount roots are guaranteed to be in
- // the old space.
- static_assert(static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) ==
- 0);
- static_assert(kRootArrayConstantsCount <=
- static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
-
- RootIndex root_index = RootArrayConstant::Decode(data);
- Handle<HeapObject> heap_object =
- Handle<HeapObject>::cast(isolate()->root_handle(root_index));
- return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadApiReference(byte data,
+ SlotAccessor slot_accessor) {
+ DCHECK_IMPLIES(data == kSandboxedExternalReference, V8_ENABLE_SANDBOX_BOOL);
+ uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
+ Address address;
+ if (main_thread_isolate()->api_external_references()) {
+ DCHECK_WITH_MSG(reference_id < num_api_references_,
+ "too few external references provided through the API");
+ address = static_cast<Address>(
+ main_thread_isolate()->api_external_references()[reference_id]);
+ } else {
+ address = reinterpret_cast<Address>(NoExternalReferencesCallback);
+ }
+ ExternalPointerTag tag = kExternalPointerNullTag;
+ if (data == kSandboxedApiReference) {
+ tag = ReadExternalPointerTag();
+ }
+ return WriteExternalPointer(slot_accessor.external_pointer_slot(), address,
+ tag);
+}
- case CASE_RANGE(kHotObject, 8): {
- int index = HotObject::Decode(data);
- Handle<HeapObject> hot_object = hot_objects_.Get(index);
- return slot_accessor.Write(hot_object, GetAndResetNextReferenceType());
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadClearedWeakReference(
+ byte data, SlotAccessor slot_accessor) {
+ return slot_accessor.Write(HeapObjectReference::ClearedValue(isolate()));
+}
- case CASE_RANGE(kFixedRawData, 32): {
- // Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
- int size_in_tagged = FixedRawDataWithSize::Decode(data);
- static_assert(TSlot::kSlotDataSize == kTaggedSize ||
- TSlot::kSlotDataSize == 2 * kTaggedSize);
- int size_in_slots = size_in_tagged / (TSlot::kSlotDataSize / kTaggedSize);
- // kFixedRawData can have kTaggedSize != TSlot::kSlotDataSize when
- // serializing Smi roots in pointer-compressed builds. In this case, the
- // size in bytes is unconditionally the (full) slot size.
- DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize, size_in_slots == 1);
- // TODO(leszeks): Only copy slots when there are Smis in the serialized
- // data.
- source_.CopySlots(slot_accessor.slot().location(), size_in_slots);
- return size_in_slots;
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadWeakPrefix(byte data,
+ SlotAccessor slot_accessor) {
+ // We shouldn't have two weak prefixes in a row.
+ DCHECK(!next_reference_is_weak_);
+ // We shouldn't have weak refs without a current object.
+ DCHECK_NE(slot_accessor.object()->address(), kNullAddress);
+ next_reference_is_weak_ = true;
+ return 0;
+}
- case CASE_RANGE(kFixedRepeat, 16): {
- int repeats = FixedRepeatWithCount::Decode(data);
- return ReadRepeatedObject(slot_accessor, repeats);
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadRootArrayConstants(byte data,
+ SlotAccessor slot_accessor) {
+ // First kRootArrayConstantsCount roots are guaranteed to be in
+ // the old space.
+ static_assert(static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) == 0);
+ static_assert(kRootArrayConstantsCount <=
+ static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
+
+ RootIndex root_index = RootArrayConstant::Decode(data);
+ Handle<HeapObject> heap_object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root_index));
+ return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
+}
-#ifdef DEBUG
-#define UNUSED_CASE(byte_code) \
- case byte_code: \
- UNREACHABLE();
- UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
-#endif
-#undef UNUSED_CASE
- }
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadHotObject(byte data,
+ SlotAccessor slot_accessor) {
+ int index = HotObject::Decode(data);
+ Handle<HeapObject> hot_object = hot_objects_.Get(index);
+ return slot_accessor.Write(hot_object, GetAndResetNextReferenceType());
+}
- // The above switch, including UNUSED_SERIALIZER_BYTE_CODES, covers all
- // possible bytecodes; but, clang doesn't realize this, so we have an explicit
- // UNREACHABLE here too.
- UNREACHABLE();
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadFixedRawData(byte data,
+ SlotAccessor slot_accessor) {
+ using TSlot = decltype(slot_accessor.slot());
+
+ // Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
+ int size_in_tagged = FixedRawDataWithSize::Decode(data);
+ static_assert(TSlot::kSlotDataSize == kTaggedSize ||
+ TSlot::kSlotDataSize == 2 * kTaggedSize);
+ int size_in_slots = size_in_tagged / (TSlot::kSlotDataSize / kTaggedSize);
+ // kFixedRawData can have kTaggedSize != TSlot::kSlotDataSize when
+ // serializing Smi roots in pointer-compressed builds. In this case, the
+ // size in bytes is unconditionally the (full) slot size.
+ DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize, size_in_slots == 1);
+ // TODO(leszeks): Only copy slots when there are Smis in the serialized
+ // data.
+ source_.CopySlots(slot_accessor.slot().location(), size_in_slots);
+ return size_in_slots;
+}
+
+template <typename IsolateT>
+template <typename SlotAccessor>
+int Deserializer<IsolateT>::ReadFixedRepeat(byte data,
+ SlotAccessor slot_accessor) {
+ int repeats = FixedRepeatWithCount::Decode(data);
+ return ReadRepeatedObject(slot_accessor, repeats);
}
#undef CASE_RANGE_ALL_SPACES
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 97cb1da387..c2e8c58f57 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -11,6 +11,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/execution/local-isolate.h"
+#include "src/handles/global-handles.h"
#include "src/objects/allocation-site.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/backing-store.h"
@@ -82,7 +83,7 @@ class Deserializer : public SerializerDeserializer {
const std::vector<Handle<AllocationSite>>& new_allocation_sites() const {
return new_allocation_sites_;
}
- const std::vector<Handle<Code>>& new_code_objects() const {
+ const std::vector<Handle<InstructionStream>>& new_code_objects() const {
return new_code_objects_;
}
const std::vector<Handle<Map>>& new_maps() const { return new_maps_; }
@@ -96,10 +97,6 @@ class Deserializer : public SerializerDeserializer {
return new_scripts_;
}
- const std::vector<Handle<DescriptorArray>>& new_descriptor_arrays() const {
- return new_descriptor_arrays_;
- }
-
std::shared_ptr<BackingStore> backing_store(size_t i) {
DCHECK_LT(i, backing_stores_.size());
return backing_stores_[i];
@@ -108,6 +105,9 @@ class Deserializer : public SerializerDeserializer {
bool deserializing_user_code() const { return deserializing_user_code_; }
bool should_rehash() const { return should_rehash_; }
+ void PushObjectToRehash(Handle<HeapObject> object) {
+ to_rehash_.push_back(object);
+ }
void Rehash();
Handle<HeapObject> ReadObject();
@@ -169,6 +169,55 @@ class Deserializer : public SerializerDeserializer {
template <typename SlotAccessor>
int ReadSingleBytecodeData(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadNewObject(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadBackref(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadReadOnlyHeapRef(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadRootArray(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadStartupObjectCache(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadReadOnlyObjectCache(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadSharedHeapObjectCache(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadNewMetaMap(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadExternalReference(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadRawExternalReference(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadAttachedReference(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadRegisterPendingForwardRef(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadResolvePendingForwardRef(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadVariableRawData(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadCodeBody(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadVariableRepeat(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadOffHeapBackingStore(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadApiReference(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadClearedWeakReference(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadWeakPrefix(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadRootArrayConstants(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadHotObject(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadFixedRawData(byte data, SlotAccessor slot_accessor);
+ template <typename SlotAccessor>
+ int ReadFixedRepeat(byte data, SlotAccessor slot_accessor);
+
// A helper function for ReadData for reading external references.
inline Address ReadExternalReferenceCase();
@@ -205,13 +254,16 @@ class Deserializer : public SerializerDeserializer {
HotObjectsList hot_objects_;
std::vector<Handle<Map>> new_maps_;
std::vector<Handle<AllocationSite>> new_allocation_sites_;
- std::vector<Handle<Code>> new_code_objects_;
+ std::vector<Handle<InstructionStream>> new_code_objects_;
std::vector<Handle<AccessorInfo>> accessor_infos_;
std::vector<Handle<CallHandlerInfo>> call_handler_infos_;
std::vector<Handle<Script>> new_scripts_;
- std::vector<Handle<DescriptorArray>> new_descriptor_arrays_;
std::vector<std::shared_ptr<BackingStore>> backing_stores_;
+ // Roots vector as those arrays are passed to Heap, see
+ // WeakenDescriptorArrays().
+ GlobalHandleVector<DescriptorArray> new_descriptor_arrays_;
+
// Vector of allocated objects that can be accessed by a backref, by index.
std::vector<Handle<HeapObject>> back_refs_;
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 1260f1a642..d7193d02d2 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -99,9 +99,9 @@ Builtin OffHeapInstructionStream::TryLookupCode(Isolate* isolate,
// When shared pointer compression cage is enabled and it has the embedded
// code blob copy then it could have been used regardless of whether the
// isolate uses it or knows about it or not (see
- // Code::OffHeapInstructionStart()).
+ // InstructionStream::OffHeapInstructionStart()).
// So, this blob has to be checked too.
- CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange();
if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
builtin = i::TryLookupCode(EmbeddedData::FromBlob(code_range), address);
}
@@ -179,38 +179,6 @@ void OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream(
namespace {
-bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
- DCHECK(Builtins::IsIsolateIndependent(code.builtin_id()));
- switch (Builtins::KindOf(code.builtin_id())) {
- case Builtins::CPP:
- case Builtins::TFC:
- case Builtins::TFH:
- case Builtins::TFJ:
- case Builtins::TFS:
- break;
-
- // Bytecode handlers will only ever be used by the interpreter and so there
- // will never be a need to use trampolines with them.
- case Builtins::BCH:
- case Builtins::ASM:
- // TODO(jgruber): Extend checks to remaining kinds.
- return false;
- }
-
- static_assert(CallInterfaceDescriptor::ContextRegister() !=
- kOffHeapTrampolineRegister);
-
- Callable callable = Builtins::CallableFor(isolate, code.builtin_id());
- CallInterfaceDescriptor descriptor = callable.descriptor();
-
- for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
- Register reg = descriptor.GetRegisterParameter(i);
- if (reg == kOffHeapTrampolineRegister) return true;
- }
-
- return false;
-}
-
void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
static const int kRelocMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
@@ -219,7 +187,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
static_assert(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = FromCodeT(isolate->builtins()->code(builtin));
+ Code code = isolate->builtins()->code(builtin);
RelocIterator on_heap_it(code, kRelocMask);
RelocIterator off_heap_it(blob, code, kRelocMask);
@@ -230,18 +198,18 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.
- // See also: Code::IsIsolateIndependent.
+ // See also: InstructionStream::IsIsolateIndependent.
while (!on_heap_it.done()) {
DCHECK(!off_heap_it.done());
RelocInfo* rinfo = on_heap_it.rinfo();
DCHECK_EQ(rinfo->rmode(), off_heap_it.rinfo()->rmode());
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- CHECK(Builtins::IsIsolateIndependentBuiltin(target));
+ Code target_code = Code::FromTargetAddress(rinfo->target_address());
+ CHECK(Builtins::IsIsolateIndependentBuiltin(target_code));
// Do not emit write-barrier for off-heap writes.
off_heap_it.rinfo()->set_off_heap_target_address(
- blob->InstructionStartOfBuiltin(target.builtin_id()));
+ blob->InstructionStartOfBuiltin(target_code.builtin_id()));
on_heap_it.next();
off_heap_it.next();
@@ -257,15 +225,14 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
}
}
-void EnsureRelocatable(CodeT codet) {
- Code code = FromCodeT(codet);
+void EnsureRelocatable(Code code) {
if (code.relocation_size() == 0) return;
// On some architectures (arm) the builtin might have a non-empty reloc
// info containing a CONST_POOL entry. These entries don't have to be
- // updated when Code object is relocated, so it's safe to drop the reloc
- // info alltogether. If it wasn't the case then we'd have to store it
- // in the metadata.
+ // updated when InstructionStream object is relocated, so it's safe to drop
+ // the reloc info alltogether. If it wasn't the case then we'd have to store
+ // it in the metadata.
for (RelocIterator it(code); !it.done(); it.next()) {
CHECK_EQ(it.rinfo()->rmode(), RelocInfo::CONST_POOL);
}
@@ -286,24 +253,17 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
static_assert(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = FromCodeT(builtins->code(builtin));
+ Code code = builtins->code(builtin);
- // Sanity-check that the given builtin is isolate-independent and does not
- // use the trampoline register in its calling convention.
+ // Sanity-check that the given builtin is isolate-independent.
if (!code.IsIsolateIndependent(isolate)) {
saw_unsafe_builtin = true;
fprintf(stderr, "%s is not isolate-independent.\n",
Builtins::name(builtin));
}
- if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
- Builtins::name(builtin));
- }
- uint32_t instruction_size =
- static_cast<uint32_t>(code.raw_instruction_size());
- uint32_t metadata_size = static_cast<uint32_t>(code.raw_metadata_size());
+ uint32_t instruction_size = static_cast<uint32_t>(code.instruction_size());
+ uint32_t metadata_size = static_cast<uint32_t>(code.metadata_size());
DCHECK_EQ(0, raw_code_size % kCodeAlignment);
{
@@ -345,7 +305,8 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
uint8_t* const blob_code = new uint8_t[blob_code_size]();
// Allocate space for the data section, value-initialized to 0.
- static_assert(IsAligned(FixedDataSize(), Code::kMetadataAlignment));
+ static_assert(
+ IsAligned(FixedDataSize(), InstructionStream::kMetadataAlignment));
const uint32_t blob_data_size = FixedDataSize() + raw_data_size;
uint8_t* const blob_data = new uint8_t[blob_data_size]();
@@ -371,14 +332,14 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
static_assert(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = FromCodeT(builtins->code(builtin));
+ Code code = builtins->code(builtin);
uint32_t offset =
layout_descriptions[static_cast<int>(builtin)].metadata_offset;
uint8_t* dst = raw_metadata_start + offset;
- DCHECK_LE(RawMetadataOffset() + offset + code.raw_metadata_size(),
+ DCHECK_LE(RawMetadataOffset() + offset + code.metadata_size(),
blob_data_size);
- std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_metadata_start()),
- code.raw_metadata_size());
+ std::memcpy(dst, reinterpret_cast<uint8_t*>(code.metadata_start()),
+ code.metadata_size());
}
CHECK_IMPLIES(
kMaxPCRelativeCodeRangeInMB,
@@ -389,14 +350,14 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
static_assert(Builtins::kAllBuiltinsAreIsolateIndependent);
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = FromCodeT(builtins->code(builtin));
+ Code code = builtins->code(builtin);
uint32_t offset =
layout_descriptions[static_cast<int>(builtin)].instruction_offset;
uint8_t* dst = raw_code_start + offset;
- DCHECK_LE(RawCodeOffset() + offset + code.raw_instruction_size(),
+ DCHECK_LE(RawCodeOffset() + offset + code.instruction_size(),
blob_code_size);
- std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_instruction_start()),
- code.raw_instruction_size());
+ std::memcpy(dst, reinterpret_cast<uint8_t*>(code.InstructionStart()),
+ code.instruction_size());
}
EmbeddedData d(blob_code, blob_code_size, blob_data, blob_data_size);
@@ -425,10 +386,10 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
if (DEBUG_BOOL) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- Code code = FromCodeT(builtins->code(builtin));
+ Code code = builtins->code(builtin);
- CHECK_EQ(d.InstructionSizeOfBuiltin(builtin), code.InstructionSize());
- CHECK_EQ(d.MetadataSizeOfBuiltin(builtin), code.MetadataSize());
+ CHECK_EQ(d.InstructionSizeOfBuiltin(builtin), code.instruction_size());
+ CHECK_EQ(d.MetadataSizeOfBuiltin(builtin), code.metadata_size());
CHECK_EQ(d.SafepointTableSizeOf(builtin), code.safepoint_table_size());
CHECK_EQ(d.HandlerTableSizeOf(builtin), code.handler_table_size());
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index 4c5a1f998a..ee752634e7 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-class Code;
+class InstructionStream;
class Isolate;
// Wraps an off-heap instruction stream.
@@ -106,9 +106,9 @@ class EmbeddedData final {
// When shared pointer compression cage is enabled and it has the embedded
// code blob copy then it could have been used regardless of whether the
// isolate uses it or knows about it or not (see
- // Code::OffHeapInstructionStart()).
+ // InstructionStream::OffHeapInstructionStart()).
// So, this blob has to be checked too.
- CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange();
if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
EmbeddedData remapped_d = EmbeddedData::FromBlob(code_range);
// If the pc does not belong to the embedded code blob we should be
@@ -183,7 +183,8 @@ class EmbeddedData final {
}
// Blob layout information for a single instruction stream. Corresponds
- // roughly to Code object layout (see the instruction and metadata area).
+ // roughly to InstructionStream object layout (see the instruction and
+ // metadata area).
struct LayoutDescription {
// The offset and (unpadded) length of this builtin's instruction area
// from the start of the embedded code section.
@@ -302,7 +303,7 @@ class EmbeddedData final {
static constexpr int PadAndAlignData(int size) {
// Ensure we have at least one byte trailing the actual builtin
// instructions which we can later fill with int3.
- return RoundUp<Code::kMetadataAlignment>(size);
+ return RoundUp<InstructionStream::kMetadataAlignment>(size);
}
void PrintStatistics() const;
@@ -313,8 +314,9 @@ class EmbeddedData final {
uint32_t code_size_;
// The data section contains both descriptions of the code section (hashes,
- // offsets, sizes) and metadata describing Code objects (see
- // Code::MetadataStart()). It is guaranteed to have read permissions.
+ // offsets, sizes) and metadata describing InstructionStream objects (see
+ // InstructionStream::MetadataStart()). It is guaranteed to have read
+ // permissions.
const uint8_t* data_;
uint32_t data_size_;
};
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h b/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h
index fd2b50897d..6d83e772bb 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer-interface.h
@@ -37,7 +37,7 @@ class EmbeddedFileWriterInterface {
virtual int GetExternallyCompiledFilenameCount() const = 0;
// The isolate will call the method below just prior to replacing the
- // compiled builtin Code objects with trampolines.
+ // compiled builtin InstructionStream objects with trampolines.
virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
virtual void PrepareBuiltinLabelInfoMap(int create_offset,
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 66cb66234d..9ce32f64e8 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -269,10 +269,7 @@ void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
// Retrieve the SourcePositionTable and copy it.
- Code code = FromCodeT(builtins->code(builtin));
- // Verify that the code object is still the "real code" and not a
- // trampoline (which wouldn't have source positions).
- DCHECK(!code.is_off_heap_trampoline());
+ Code code = builtins->code(builtin);
ByteArray source_position_table = code.source_position_table();
std::vector<unsigned char> data(source_position_table.GetDataStartAddress(),
source_position_table.GetDataEndAddress());
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 6519be7e2d..94bb8cc6f3 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -69,7 +69,7 @@ void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
}
void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
- static_assert((1 << 3) >= Code::kMetadataAlignment);
+ static_assert((1 << 3) >= InstructionStream::kMetadataAlignment);
fprintf(fp_, ".align 3\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index ad4c088124..4ec294734d 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -94,7 +94,7 @@ void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
// instructions are used to retrieve v8_Default_embedded_blob_ and/or
// v8_Default_embedded_blob_size_. The generated instructions require the
// load target to be aligned at 8 bytes (2^3).
- static_assert(8 >= Code::kMetadataAlignment);
+ static_assert(8 >= InstructionStream::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index b0cf92b21d..f092893c4c 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -80,7 +80,7 @@ void PlatformEmbeddedFileWriterMac::AlignToPageSizeIfNeeded() {
}
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
- static_assert(8 >= Code::kMetadataAlignment);
+ static_assert(8 >= InstructionStream::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
}
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index dd67969694..bea5b64d86 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -15,9 +15,11 @@
#include "src/base/platform/wrappers.h"
#include "src/base/vector.h"
#include "src/codegen/cpu-features.h"
+#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/snapshot/embedded/embedded-file-writer.h"
#include "src/snapshot/snapshot.h"
+#include "src/snapshot/static-roots-gen.h"
namespace {
@@ -230,7 +232,7 @@ int main(int argc, char** argv) {
std::string usage = "Usage: " + std::string(argv[0]) +
" [--startup-src=file]" + " [--startup-blob=file]" +
" [--embedded-src=file]" + " [--embedded-variant=label]" +
- " [--target-arch=arch]" +
+ " [--static-roots-src=file]" + " [--target-arch=arch]" +
" [--target-os=os] [extras]\n\n";
int result = i::FlagList::SetFlagsFromCommandLine(
&argc, argv, true, HelpOptions(HelpOptions::kExit, usage.c_str()));
@@ -289,6 +291,10 @@ int main(int argc, char** argv) {
// is still alive (we called DisableEmbeddedBlobRefcounting above).
// That's fine as far as the embedded file writer is concerned.
WriteEmbeddedFile(&embedded_writer);
+
+ if (i::v8_flags.static_roots_src) {
+ i::StaticRootsTableGen::write(i_isolate, i::v8_flags.static_roots_src);
+ }
}
if (warmup_script) {
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 067dab0320..ea86170a84 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -5,15 +5,27 @@
#include "src/snapshot/read-only-deserializer.h"
#include "src/api/api.h"
+#include "src/common/globals.h"
#include "src/execution/v8threads.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/read-only-heap.h"
+#include "src/logging/counters-scopes.h"
#include "src/objects/slots.h"
+#include "src/roots/static-roots.h"
+#include "src/snapshot/snapshot-data.h"
namespace v8 {
namespace internal {
+ReadOnlyDeserializer::ReadOnlyDeserializer(Isolate* isolate,
+ const SnapshotData* data,
+ bool can_rehash)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
+ can_rehash) {}
+
void ReadOnlyDeserializer::DeserializeIntoIsolate() {
+ NestedTimedHistogramScope histogram_timer(
+ isolate()->counters()->snapshot_deserialize_rospace());
HandleScope scope(isolate());
ReadOnlyHeap* ro_heap = isolate()->read_only_heap();
@@ -31,31 +43,79 @@ void ReadOnlyDeserializer::DeserializeIntoIsolate() {
{
ReadOnlyRoots roots(isolate());
+ if (V8_STATIC_ROOTS_BOOL) {
+ // When static roots are enabled, RO space is deserialized as a verbatim
+ // byte copy without going through any normal deserializer logic.
+ ro_heap->read_only_space()->InitFromMemoryDump(isolate(), source());
+ roots.InitFromStaticRootsTable(isolate()->cage_base());
+ ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
+ } else {
+ roots.Iterate(this);
- roots.Iterate(this);
- ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
-
- // Deserialize the Read-only Object Cache.
- for (;;) {
- Object* object = ro_heap->ExtendReadOnlyObjectCache();
- // During deserialization, the visitor populates the read-only object
- // cache and eventually terminates the cache with undefined.
- VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
- FullObjectSlot(object));
- if (object->IsUndefined(roots)) break;
+ // Deserialize the Read-only Object Cache.
+ for (;;) {
+ Object* object = ro_heap->ExtendReadOnlyObjectCache();
+ // During deserialization, the visitor populates the read-only object
+ // cache and eventually terminates the cache with undefined.
+ VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
+ FullObjectSlot(object));
+ if (object->IsUndefined(roots)) break;
+ }
+ DeserializeDeferredObjects();
}
- DeserializeDeferredObjects();
+
#ifdef DEBUG
roots.VerifyNameForProtectors();
#endif
roots.VerifyNameForProtectorsPages();
}
+ PostProcessNewObjectsIfStaticRootsEnabled();
+
if (should_rehash()) {
isolate()->heap()->InitializeHashSeed();
Rehash();
}
}
+#ifdef V8_STATIC_ROOTS
+void ReadOnlyDeserializer::PostProcessNewObjectsIfStaticRootsEnabled() {
+ // Since we are not deserializing individual objects we need to scan the
+ // heap and search for objects that need post-processing.
+ //
+ // See also Deserializer<IsolateT>::PostProcessNewObject.
+ //
+ // TODO(olivf): Make the V8_STATIC_ROOTS configuration use normal
+ // deserializer paths.
+ ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
+ PtrComprCageBase cage_base(isolate());
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
+ const InstanceType instance_type = object.map(cage_base).instance_type();
+
+ if (should_rehash()) {
+ if (InstanceTypeChecker::IsString(instance_type)) {
+ String str = String::cast(object);
+ str.set_raw_hash_field(Name::kEmptyHashField);
+ PushObjectToRehash(handle(str, isolate()));
+ } else if (object.NeedsRehashing(instance_type)) {
+ PushObjectToRehash(handle(object, isolate()));
+ }
+ }
+
+ if (InstanceTypeChecker::IsCode(instance_type)) {
+ Code code = Code::cast(object);
+ code.init_code_entry_point(main_thread_isolate(), kNullAddress);
+ // RO space only contains builtin Code objects which don't have an
+ // attached InstructionStream.
+ DCHECK(code.is_builtin());
+ DCHECK(!code.has_instruction_stream());
+ code.SetEntryPointForOffHeapBuiltin(main_thread_isolate(),
+ code.OffHeapInstructionStart());
+ }
+ }
+}
+#endif // V8_STATIC_ROOTS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/read-only-deserializer.h b/deps/v8/src/snapshot/read-only-deserializer.h
index 05b4379169..8c2d4ceddb 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.h
+++ b/deps/v8/src/snapshot/read-only-deserializer.h
@@ -6,22 +6,28 @@
#define V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
-#include "src/snapshot/snapshot-data.h"
namespace v8 {
namespace internal {
+class SnapshotData;
+
// Deserializes the read-only blob, creating the read-only roots and the
// Read-only object cache used by the other deserializers.
class ReadOnlyDeserializer final : public Deserializer<Isolate> {
public:
- explicit ReadOnlyDeserializer(Isolate* isolate, const SnapshotData* data,
- bool can_rehash)
- : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
- can_rehash) {}
+ ReadOnlyDeserializer(Isolate* isolate, const SnapshotData* data,
+ bool can_rehash);
// Deserialize the snapshot into an empty heap.
void DeserializeIntoIsolate();
+
+ private:
+#ifdef V8_STATIC_ROOTS
+ void PostProcessNewObjectsIfStaticRootsEnabled();
+#else
+ void PostProcessNewObjectsIfStaticRootsEnabled() {}
+#endif // V8_STATIC_ROOTS
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index b05a0c0870..6315d9833a 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -24,6 +24,10 @@ ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate,
#endif
{
static_assert(RootIndex::kFirstReadOnlyRoot == RootIndex::kFirstRoot);
+ if (V8_STATIC_ROOTS_BOOL) {
+ // .. since RO heap pages are serialized verbatim:
+ set_serializer_tracks_serialization_statistics(false);
+ }
}
ReadOnlySerializer::~ReadOnlySerializer() {
@@ -33,6 +37,7 @@ ReadOnlySerializer::~ReadOnlySerializer() {
void ReadOnlySerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
CHECK(ReadOnlyHeap::Contains(*obj));
CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
+ DCHECK(!V8_STATIC_ROOTS_BOOL);
// There should be no references to the not_mapped_symbol except for the entry
// in the root table, so don't try to serialize a reference and rely on the
@@ -73,14 +78,94 @@ void ReadOnlySerializer::SerializeReadOnlyRoots() {
CHECK_IMPLIES(!allow_active_isolate_for_testing(),
isolate()->handle_scope_implementer()->blocks()->empty());
- ReadOnlyRoots(isolate()).Iterate(this);
+ if (!V8_STATIC_ROOTS_BOOL) {
+ ReadOnlyRoots(isolate()).Iterate(this);
+ if (reconstruct_read_only_and_shared_object_caches_for_testing()) {
+ ReconstructReadOnlyObjectCacheForTesting();
+ }
+ }
+}
- if (reconstruct_read_only_and_shared_object_caches_for_testing()) {
- ReconstructReadOnlyObjectCacheForTesting();
+#ifdef V8_STATIC_ROOTS
+void ReadOnlySerializer::WipeCodeEntryPointsForDeterministicSerialization(
+ ReadOnlySerializer::CodeEntryPointVector& saved_entry_points) {
+ // See also ObjectSerializer::OutputRawData.
+ ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
+ if (!object.IsCode()) continue;
+ Code code = Code::cast(object);
+ saved_entry_points.push_back(code.code_entry_point());
+ code.SetCodeEntryPointForSerialization(isolate(), kNullAddress);
}
}
+void ReadOnlySerializer::RestoreCodeEntryPoints(
+ const ReadOnlySerializer::CodeEntryPointVector& saved_entry_points) {
+ int i = 0;
+ ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
+ if (!object.IsCode()) continue;
+ Code code = Code::cast(object);
+ code.SetCodeEntryPointForSerialization(isolate(), saved_entry_points[i++]);
+ }
+}
+#endif // V8_STATIC_ROOTS
+
void ReadOnlySerializer::FinalizeSerialization() {
+#if V8_STATIC_ROOTS_BOOL
+ DCHECK(object_cache_empty());
+ DCHECK(deferred_objects_empty());
+ DCHECK_EQ(sink_.Position(), 0);
+
+ // Note the memcpy-based serialization done here must also guarantee a
+ // deterministic serialized layout. See also ObjectSerializer::OutputRawData
+ // which implements custom logic for this (but is not reached when
+ // serializing memcpy-style).
+
+ {
+ DisallowGarbageCollection no_gc;
+
+ isolate()->heap()->read_only_space()->Unseal();
+ CodeEntryPointVector saved_entry_points;
+ WipeCodeEntryPointsForDeterministicSerialization(saved_entry_points);
+
+ auto space = isolate()->read_only_heap()->read_only_space();
+ size_t num_pages = space->pages().size();
+ sink_.PutInt(num_pages, "num pages");
+ Tagged_t pos = V8HeapCompressionScheme::CompressAny(
+ reinterpret_cast<Address>(space->pages()[0]));
+ sink_.PutInt(pos, "first page offset");
+ // Unprotect and reprotect the payload of wasm null. The header is not
+ // protected.
+ Address wasm_null_payload = isolate()->factory()->wasm_null()->payload();
+ constexpr int kWasmNullPayloadSize = WasmNull::kSize - kTaggedSize;
+ SetPermissions(isolate()->page_allocator(), wasm_null_payload,
+ kWasmNullPayloadSize, PageAllocator::kRead);
+ for (auto p : space->pages()) {
+ // Pages are shrunk, but memory at the end of the area is still
+ // uninitialized and we do not want to include it in the snapshot.
+ size_t page_content_bytes = p->HighWaterMark() - p->area_start();
+ sink_.PutInt(page_content_bytes, "page content bytes");
+#ifdef MEMORY_SANITIZER
+ __msan_check_mem_is_initialized(reinterpret_cast<void*>(p->area_start()),
+ static_cast<int>(page_content_bytes));
+#endif
+ sink_.PutRaw(reinterpret_cast<const byte*>(p->area_start()),
+ static_cast<int>(page_content_bytes), "page");
+ }
+ // Mark the virtual page range as inaccessible, and allow the OS to reclaim
+ // the underlying physical pages. We do not want to protect the header (map
+ // word), as it needs to remain accessible.
+ isolate()->page_allocator()->DecommitPages(
+ reinterpret_cast<void*>(wasm_null_payload), kWasmNullPayloadSize);
+
+ RestoreCodeEntryPoints(saved_entry_points);
+ isolate()->heap()->read_only_space()->Seal(
+ ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
+ }
+#else // V8_STATIC_ROOTS_BOOL
// This comes right after serialization of the other snapshots, where we
// add entries to the read-only object cache. Add one entry with 'undefined'
// to terminate the read-only object cache.
@@ -88,21 +173,23 @@ void ReadOnlySerializer::FinalizeSerialization() {
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
FullObjectSlot(&undefined));
SerializeDeferredObjects();
- Pad();
#ifdef DEBUG
- // Check that every object on read-only heap is reachable (and was
- // serialized).
- ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
- for (HeapObject object = iterator.Next(); !object.is_null();
- object = iterator.Next()) {
- if (IsNotMappedSymbol(object)) {
- CHECK(did_serialize_not_mapped_symbol_);
- } else {
- CHECK_NOT_NULL(serialized_objects_.Find(object));
+ // Check that every object on read-only heap is reachable (and was
+ // serialized).
+ ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
+ if (IsNotMappedSymbol(object)) {
+ CHECK(did_serialize_not_mapped_symbol_);
+ } else {
+ CHECK_NOT_NULL(serialized_objects_.Find(object));
+ }
}
- }
-#endif
+#endif // DEBUG
+#endif // V8_STATIC_ROOTS_BOOL
+
+ Pad();
}
bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
@@ -122,13 +209,16 @@ bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
SnapshotByteSink* sink, Handle<HeapObject> obj) {
if (!ReadOnlyHeap::Contains(*obj)) return false;
- // Get the cache index and serialize it into the read-only snapshot if
- // necessary.
- int cache_index = SerializeInObjectCache(obj);
-
- // Writing out the cache entry into the calling serializer's sink.
- sink->Put(kReadOnlyObjectCache, "ReadOnlyObjectCache");
- sink->PutInt(cache_index, "read_only_object_cache_index");
+ if (V8_STATIC_ROOTS_BOOL) {
+ SerializeReadOnlyObjectReference(*obj, sink);
+ } else {
+ // Get the cache index and serialize it into the read-only snapshot if
+ // necessary.
+ int cache_index = SerializeInObjectCache(obj);
+ // Writing out the cache entry into the calling serializer's sink.
+ sink->Put(kReadOnlyObjectCache, "ReadOnlyObjectCache");
+ sink->PutInt(cache_index, "read_only_object_cache_index");
+ }
return true;
}
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
index 7f9482f3b9..4e5062c7da 100644
--- a/deps/v8/src/snapshot/read-only-serializer.h
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -25,10 +25,16 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
void SerializeReadOnlyRoots();
- // Completes the serialization of the read-only object cache and serializes
- // any deferred objects.
+ // Completes the serialization of the read-only object cache (after it has
+ // been filled by other serializers) and serializes any deferred objects.
void FinalizeSerialization();
+ private:
+ void ReconstructReadOnlyObjectCacheForTesting();
+
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
+ bool MustBeDeferred(HeapObject object) override;
+
// If |obj| can be serialized in the read-only snapshot then add it to the
// read-only object cache if not already present and emit a
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
@@ -36,16 +42,21 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
Handle<HeapObject> obj);
- private:
- void ReconstructReadOnlyObjectCacheForTesting();
-
- void SerializeObjectImpl(Handle<HeapObject> o) override;
- bool MustBeDeferred(HeapObject object) override;
+#ifdef V8_STATIC_ROOTS
+ using CodeEntryPointVector = std::vector<Address>;
+ void WipeCodeEntryPointsForDeterministicSerialization(
+ CodeEntryPointVector& saved_entry_points);
+ void RestoreCodeEntryPoints(const CodeEntryPointVector& saved_entry_points);
+#endif // V8_STATIC_ROOTS
#ifdef DEBUG
IdentityMap<int, base::DefaultAllocationPolicy> serialized_objects_;
bool did_serialize_not_mapped_symbol_;
#endif
+
+ // For SerializeUsingReadOnlyObjectCache.
+ friend class SharedHeapSerializer;
+ friend class StartupSerializer;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index ac0636d8bc..04753df533 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -13,13 +13,14 @@
namespace v8 {
namespace internal {
+// Values must be contiguous and start at 0 since they're directly used as
+// array indices.
enum class SnapshotSpace : byte {
- kReadOnlyHeap,
- kOld,
- kCode,
+ kReadOnlyHeap = 0,
+ kOld = 1,
+ kCode = 2,
};
-static constexpr int kNumberOfSnapshotSpaces =
- static_cast<int>(SnapshotSpace::kCode) + 1;
+static constexpr int kNumberOfSnapshotSpaces = 3;
class SerializerReference {
private:
diff --git a/deps/v8/src/snapshot/roots-serializer.h b/deps/v8/src/snapshot/roots-serializer.h
index 7a699a7645..739d8df903 100644
--- a/deps/v8/src/snapshot/roots-serializer.h
+++ b/deps/v8/src/snapshot/roots-serializer.h
@@ -46,6 +46,8 @@ class RootsSerializer : public Serializer {
// Serializes |object| if not previously seen and returns its cache index.
int SerializeInObjectCache(Handle<HeapObject> object);
+ bool object_cache_empty() { return object_cache_index_map_.size() == 0; }
+
private:
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;
diff --git a/deps/v8/src/snapshot/serializer-deserializer.h b/deps/v8/src/snapshot/serializer-deserializer.h
index 5afcd7e2b8..586e7f6f13 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -152,7 +152,8 @@ class SerializerDeserializer : public RootVisitor {
// register as the pending field. We could either hack around this, or
// simply introduce this new bytecode.
kNewMetaMap,
- // Special construction bytecode for Code object bodies, which have a more
+ // Special construction bytecode for InstructionStream object bodies, which
+ // have a more
// complex deserialization ordering and RelocInfo processing.
kCodeBody,
diff --git a/deps/v8/src/snapshot/serializer-inl.h b/deps/v8/src/snapshot/serializer-inl.h
index 681d90d60b..3767f192e5 100644
--- a/deps/v8/src/snapshot/serializer-inl.h
+++ b/deps/v8/src/snapshot/serializer-inl.h
@@ -14,13 +14,15 @@ namespace internal {
bool Serializer::IsNotMappedSymbol(HeapObject obj) const {
Object not_mapped_symbol = ReadOnlyRoots(isolate()).not_mapped_symbol();
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // It's possible that a Code object might have the same compressed value
- // as the not_mapped_symbol, so we must compare full pointers.
+ // It's possible that a InstructionStream object might have the same
+ // compressed value as the not_mapped_symbol, so we must compare full
+ // pointers.
// TODO(v8:11880): Avoid the need for this special case by never putting
- // Code references anywhere except the CodeDadaContainer objects.
- // In particular, the Code objects should not appear in serializer's
- // identity map. This should be possible once the IsolateData::builtins
- // table is migrated to contain CodeT references.
+ // InstructionStream references anywhere except the CodeDadaContainer
+ // objects. In particular, the InstructionStream objects should not appear
+ // in serializer's identity map. This should be possible once the
+ // IsolateData::builtins table is migrated to contain Code
+ // references.
return obj.ptr() == not_mapped_symbol.ptr();
}
return obj == not_mapped_symbol;
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index eeb552ba56..99f7ee1ceb 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -11,6 +11,7 @@
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/code.h"
+#include "src/objects/descriptor-array.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/map.h"
@@ -42,7 +43,7 @@ Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
stack_(isolate->heap())
#endif
{
-#ifdef OBJECT_PRINT
+#ifdef VERBOSE_SERIALIZATION_STATISTICS
if (v8_flags.serialization_statistics) {
for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) {
// Value-initialized to 0.
@@ -50,7 +51,7 @@ Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
instance_type_size_[space] = std::make_unique<size_t[]>(kInstanceTypes);
}
}
-#endif // OBJECT_PRINT
+#endif // VERBOSE_SERIALIZATION_STATISTICS
}
#ifdef DEBUG
@@ -62,11 +63,11 @@ void Serializer::CountAllocation(Map map, int size, SnapshotSpace space) {
const int space_number = static_cast<int>(space);
allocation_size_[space_number] += size;
-#ifdef OBJECT_PRINT
+#ifdef VERBOSE_SERIALIZATION_STATISTICS
int instance_type = map.instance_type();
instance_type_count_[space_number][instance_type]++;
instance_type_size_[space_number][instance_type] += size;
-#endif // OBJECT_PRINT
+#endif // VERBOSE_SERIALIZATION_STATISTICS
}
int Serializer::TotalAllocationSize() const {
@@ -77,39 +78,62 @@ int Serializer::TotalAllocationSize() const {
return sum;
}
+namespace {
+
+const char* ToString(SnapshotSpace space) {
+ switch (space) {
+ case SnapshotSpace::kReadOnlyHeap:
+ return "ReadOnlyHeap";
+ case SnapshotSpace::kOld:
+ return "Old";
+ case SnapshotSpace::kCode:
+ return "Code";
+ }
+}
+
+} // namespace
+
void Serializer::OutputStatistics(const char* name) {
if (!v8_flags.serialization_statistics) return;
PrintF("%s:\n", name);
+ if (!serializer_tracks_serialization_statistics()) {
+ PrintF(" <serialization statistics are not tracked>\n");
+ return;
+ }
PrintF(" Spaces (bytes):\n");
- for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
- PrintF("%16s",
- BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
+ static constexpr SnapshotSpace kAllSnapshotSpaces[] = {
+ SnapshotSpace::kReadOnlyHeap,
+ SnapshotSpace::kOld,
+ SnapshotSpace::kCode,
+ };
+
+ for (SnapshotSpace space : kAllSnapshotSpaces) {
+ PrintF("%16s", ToString(space));
}
PrintF("\n");
- for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
- PrintF("%16zu", allocation_size_[space]);
+ for (SnapshotSpace space : kAllSnapshotSpaces) {
+ PrintF("%16zu", allocation_size_[static_cast<int>(space)]);
}
+ PrintF("\n");
-#ifdef OBJECT_PRINT
+#ifdef VERBOSE_SERIALIZATION_STATISTICS
PrintF(" Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name) \
- for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) { \
- if (instance_type_count_[space][Name]) { \
- PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
- instance_type_size_[space][Name], \
- BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)), \
- #Name); \
- } \
+#define PRINT_INSTANCE_TYPE(Name) \
+ for (SnapshotSpace space : kAllSnapshotSpaces) { \
+ const int space_i = static_cast<int>(space); \
+ if (instance_type_count_[space_i][Name]) { \
+ PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space_i][Name], \
+ instance_type_size_[space_i][Name], ToString(space), #Name); \
+ } \
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
-#endif // OBJECT_PRINT
-
PrintF("\n");
+#endif // VERBOSE_SERIALIZATION_STATISTICS
}
void Serializer::SerializeDeferredObjects() {
@@ -130,8 +154,8 @@ void Serializer::SerializeObject(Handle<HeapObject> obj) {
// indirection and serialize the actual string directly.
if (obj->IsThinString(isolate())) {
obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
- } else if (obj->IsCodeT(isolate())) {
- CodeT code = CodeT::cast(*obj);
+ } else if (obj->IsCode(isolate())) {
+ Code code = Code::cast(*obj);
if (code.kind() == CodeKind::BASELINE) {
// For now just serialize the BytecodeArray instead of baseline code.
// TODO(v8:11429,pthier): Handle Baseline code in cases we want to
@@ -396,16 +420,21 @@ void Serializer::InitializeCodeAddressMap() {
code_address_map_ = std::make_unique<CodeAddressMap>(isolate_);
}
-Code Serializer::CopyCode(Code code) {
+InstructionStream Serializer::CopyCode(InstructionStream code) {
code_buffer_.clear(); // Clear buffer without deleting backing store.
+ // Add InstructionStream padding which is usually added by the allocator.
+ // While this doesn't guarantee the exact same alignment, it's enough to
+ // fulfill the alignment requirements of writes during relocation.
+ code_buffer_.resize(InstructionStream::kCodeAlignmentMinusCodeHeader);
int size = code.CodeSize();
code_buffer_.insert(code_buffer_.end(),
reinterpret_cast<byte*>(code.address()),
reinterpret_cast<byte*>(code.address() + size));
// When pointer compression is enabled the checked cast will try to
- // decompress map field of off-heap Code object.
- return Code::unchecked_cast(HeapObject::FromAddress(
- reinterpret_cast<Address>(&code_buffer_.front())));
+ // decompress map field of off-heap InstructionStream object.
+ return InstructionStream::unchecked_cast(
+ HeapObject::FromAddress(reinterpret_cast<Address>(
+ &code_buffer_[InstructionStream::kCodeAlignmentMinusCodeHeader])));
}
void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
@@ -571,11 +600,11 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
uint32_t ref =
SerializeBackingStore(backing_store, byte_length, max_byte_length);
buffer.SetBackingStoreRefForSerialization(ref);
-
- // Ensure deterministic output by setting extension to null during
- // serialization.
- buffer.set_extension(nullptr);
}
+
+ // Ensure deterministic output by setting extension to null during
+ // serialization.
+ buffer.set_extension(nullptr);
}
SerializeObject();
{
@@ -742,9 +771,11 @@ void Serializer::ObjectSerializer::Serialize() {
return;
}
if (InstanceTypeChecker::IsScript(instance_type)) {
- // Clear cached line ends.
+ // Clear cached line ends & compiled lazy function positions.
Oddball undefined = ReadOnlyRoots(isolate()).undefined_value();
Handle<Script>::cast(object_)->set_line_ends(undefined);
+ Handle<Script>::cast(object_)->set_compiled_lazy_function_positions(
+ undefined);
}
// We don't expect fillers.
@@ -756,7 +787,7 @@ void Serializer::ObjectSerializer::Serialize() {
namespace {
SnapshotSpace GetSnapshotSpace(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- if (object.IsCode()) {
+ if (object.IsInstructionStream()) {
return SnapshotSpace::kCode;
} else if (ReadOnlyHeap::Contains(object)) {
return SnapshotSpace::kReadOnlyHeap;
@@ -843,9 +874,9 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
HeapObject raw = *object_;
UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), raw);
- if (raw.IsCode()) {
- // For code objects, perform a custom serialization.
- SerializeCode(map, size);
+ if (raw.IsInstructionStream()) {
+ // For InstructionStream objects, perform a custom serialization.
+ SerializeInstructionStream(map, size);
} else {
// For other objects, iterate references first.
raw.IterateBody(map, size, this);
@@ -928,9 +959,8 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
}
}
-void Serializer::ObjectSerializer::VisitCodePointer(HeapObject host,
+void Serializer::ObjectSerializer::VisitCodePointer(Code host,
CodeObjectSlot slot) {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
// A version of VisitPointers() customized for CodeObjectSlot.
HandleScope scope(isolate());
DisallowGarbageCollection no_gc;
@@ -943,13 +973,13 @@ void Serializer::ObjectSerializer::VisitCodePointer(HeapObject host,
Object contents = slot.load(code_cage_base);
if (contents.IsSmi()) {
// The contents of the CodeObjectSlot being a Smi means that the host
- // CodeDataContainer corresponds to Code-less embedded builtin trampoline,
- // the value will be serialized as a Smi.
+ // Code corresponds to Code-less embedded builtin
+ // trampoline, the value will be serialized as a Smi.
DCHECK_EQ(contents, Smi::zero());
return;
}
DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(contents.ptr()));
- DCHECK(contents.IsCode());
+ DCHECK(contents.IsInstructionStream());
Handle<HeapObject> obj = handle(HeapObject::cast(contents), isolate());
if (!serializer_->SerializePendingObject(*obj)) {
@@ -1021,23 +1051,24 @@ class Serializer::ObjectSerializer::RelocInfoObjectPreSerializer {
explicit RelocInfoObjectPreSerializer(Serializer* serializer)
: serializer_(serializer) {}
- void VisitEmbeddedPointer(Code host, RelocInfo* target) {
+ void VisitEmbeddedPointer(RelocInfo* target) {
HeapObject object = target->target_object(isolate());
serializer_->SerializeObject(handle(object, isolate()));
num_serialized_objects_++;
}
- void VisitCodeTarget(Code host, RelocInfo* target) {
+ void VisitCodeTarget(RelocInfo* target) {
#ifdef V8_TARGET_ARCH_ARM
DCHECK(!RelocInfo::IsRelativeCodeTarget(target->rmode()));
#endif
- Code object = Code::GetCodeFromTargetAddress(target->target_address());
+ InstructionStream object =
+ InstructionStream::FromTargetAddress(target->target_address());
serializer_->SerializeObject(handle(object, isolate()));
num_serialized_objects_++;
}
- void VisitExternalReference(Code host, RelocInfo* rinfo) {}
- void VisitInternalReference(Code host, RelocInfo* rinfo) {}
- void VisitOffHeapTarget(Code host, RelocInfo* target) {}
+ void VisitExternalReference(RelocInfo* rinfo) {}
+ void VisitInternalReference(RelocInfo* rinfo) {}
+ void VisitOffHeapTarget(RelocInfo* target) {}
int num_serialized_objects() const { return num_serialized_objects_; }
@@ -1048,8 +1079,7 @@ class Serializer::ObjectSerializer::RelocInfoObjectPreSerializer {
int num_serialized_objects_ = 0;
};
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
- RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
// Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
// just track the pointer's existence as kTaggedSize in
// bytes_processed_so_far_.
@@ -1058,10 +1088,10 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
bytes_processed_so_far_ += kTaggedSize;
}
-void Serializer::ObjectSerializer::VisitExternalReference(Code host,
- RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
Address target = rinfo->target_external_reference();
- DCHECK_NE(target, kNullAddress); // Code does not reference null.
+ DCHECK_NE(target,
+ kNullAddress); // InstructionStream does not reference null.
DCHECK_IMPLIES(serializer_->EncodeExternalReference(target).is_from_api(),
!rinfo->IsCodedSpecially());
// Don't "sandboxify" external references embedded in the code.
@@ -1069,16 +1099,12 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
kExternalPointerNullTag);
}
-void Serializer::ObjectSerializer::VisitInternalReference(Code host,
- RelocInfo* rinfo) {
- Address entry = Handle<Code>::cast(object_)->entry();
+void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
+ Address entry = rinfo->code().InstructionStart();
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
- // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
- // consider using raw_instruction_size() instead of raw_body_size() in the
- // future.
- static_assert(Code::kOnHeapBodyIsContiguous);
- DCHECK_LE(target_offset, Handle<Code>::cast(object_)->raw_body_size());
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
+ DCHECK_LT(target_offset, rinfo->code().instruction_size());
sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
@@ -1118,13 +1144,11 @@ void Serializer::ObjectSerializer::VisitExternalPointer(
(InstanceTypeChecker::IsJSObject(instance_type) &&
JSObject::cast(host).GetEmbedderFieldCount() > 0) ||
// See ObjectSerializer::OutputRawData().
- (V8_EXTERNAL_CODE_SPACE_BOOL &&
- InstanceTypeChecker::IsCodeDataContainer(instance_type)));
+ InstanceTypeChecker::IsCode(instance_type));
}
}
-void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
- RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitOffHeapTarget(RelocInfo* rinfo) {
static_assert(EmbeddedData::kTableSize == Builtins::kBuiltinCount);
// Currently we don't serialize code that contains near builtin entries.
@@ -1141,8 +1165,7 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
sink_->PutInt(static_cast<int>(builtin), "builtin index");
}
-void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
- RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
// Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
// just track the pointer's existence as kTaggedSize in
// bytes_processed_so_far_.
@@ -1197,15 +1220,8 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
}
#ifdef MEMORY_SANITIZER
// Check that we do not serialize uninitialized memory.
- int msan_bytes_to_output = bytes_to_output;
- if (object_->IsSeqString()) {
- // SeqStrings may have uninitialized padding bytes. These padding
- // bytes are never read and serialized as 0s.
- msan_bytes_to_output -=
- SeqString::cast(*object_).GetDataAndPaddingSizes().padding_size;
- }
__msan_check_mem_is_initialized(
- reinterpret_cast<void*>(object_start + base), msan_bytes_to_output);
+ reinterpret_cast<void*>(object_start + base), bytes_to_output);
#endif // MEMORY_SANITIZER
PtrComprCageBase cage_base(isolate_);
if (object_->IsBytecodeArray(cage_base)) {
@@ -1219,20 +1235,20 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
} else if (object_->IsDescriptorArray(cage_base)) {
// The number of marked descriptors field can be changed by GC
// concurrently.
- static byte field_value[2] = {0};
- OutputRawWithCustomField(
- sink_, object_start, base, bytes_to_output,
- DescriptorArray::kRawNumberOfMarkedDescriptorsOffset,
- sizeof(field_value), field_value);
- } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
- object_->IsCodeDataContainer(cage_base)) {
+ const auto field_value = DescriptorArrayMarkingState::kInitialGCState;
+ static_assert(sizeof(field_value) == DescriptorArray::kSizeOfRawGcState);
+ OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
+ DescriptorArray::kRawGcStateOffset,
+ sizeof(field_value),
+ reinterpret_cast<const byte*>(&field_value));
+ } else if (object_->IsCode(cage_base)) {
// code_entry_point field contains a raw value that will be recomputed
// after deserialization, so write zeros to keep the snapshot
// deterministic.
static byte field_value[kSystemPointerSize] = {0};
OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
- CodeDataContainer::kCodeEntryPointOffset,
- sizeof(field_value), field_value);
+ Code::kCodeEntryPointOffset, sizeof(field_value),
+ field_value);
} else if (object_->IsSeqString()) {
// SeqStrings may contain padding. Serialize the padding bytes as 0s to
// make the snapshot content deterministic.
@@ -1250,7 +1266,8 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
}
}
-void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
+void Serializer::ObjectSerializer::SerializeInstructionStream(Map map,
+ int size) {
static const int kWipeOutModeMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
@@ -1261,33 +1278,39 @@ void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
- Handle<Code> on_heap_code = Handle<Code>::cast(object_);
+ Handle<InstructionStream> on_heap_istream =
+ Handle<InstructionStream>::cast(object_);
+ Handle<Code> code = handle(on_heap_istream->code(kAcquireLoad), isolate_);
// With enabled pointer compression normal accessors no longer work for
// off-heap objects, so we have to get the relocation info data via the
- // on-heap code object.
- ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
-
- // To make snapshots reproducible, we make a copy of the code object
- // and wipe all pointers in the copy, which we then serialize.
- Code off_heap_code = serializer_->CopyCode(*on_heap_code);
- for (RelocIterator it(off_heap_code, relocation_info, kWipeOutModeMask);
+ // on-heap InstructionStream object.
+ // TODO(v8:13784): we can clean this up since we moved all data fields from
+ // InstructionStream to Code
+ ByteArray relocation_info = code->unchecked_relocation_info();
+
+ // To make snapshots reproducible, we make a copy of the InstructionStream
+ // object and wipe all pointers in the copy, which we then serialize.
+ InstructionStream off_heap_istream = serializer_->CopyCode(*on_heap_istream);
+ for (RelocIterator it(*code, off_heap_istream, relocation_info,
+ code->constant_pool(), kWipeOutModeMask);
!it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
rinfo->WipeOut();
}
// We need to wipe out the header fields *after* wiping out the
// relocations, because some of these fields are needed for the latter.
- off_heap_code.WipeOutHeader();
+ off_heap_istream.WipeOutHeader();
// Initially skip serializing the code header. We'll serialize it after the
- // Code body, so that the various fields the Code needs for iteration are
- // already valid.
+ // InstructionStream body, so that the various fields the InstructionStream
+ // needs for iteration are already valid.
+ // TODO(v8:13784): rename to kInstructionStreamBody
sink_->Put(kCodeBody, "kCodeBody");
- // Now serialize the wiped off-heap Code, as length + data.
- Address start = off_heap_code.address() + Code::kDataStart;
- int bytes_to_output = size - Code::kDataStart;
+ // Now serialize the wiped off-heap InstructionStream, as length + data.
+ Address start = off_heap_istream.address() + InstructionStream::kDataStart;
+ int bytes_to_output = size - InstructionStream::kDataStart;
DCHECK(IsAligned(bytes_to_output, kTaggedSize));
int tagged_to_output = bytes_to_output / kTaggedSize;
@@ -1298,17 +1321,20 @@ void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
__msan_check_mem_is_initialized(reinterpret_cast<void*>(start),
bytes_to_output);
#endif // MEMORY_SANITIZER
- sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
+ sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output,
+ "InstructionStream");
- // Manually serialize the code header. We don't use Code::BodyDescriptor
- // here as we don't yet want to walk the RelocInfos.
+ // Manually serialize the code header. We don't use
+ // InstructionStream::BodyDescriptor here as we don't yet want to walk the
+ // RelocInfos.
DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
- VisitPointers(*on_heap_code, on_heap_code->RawField(HeapObject::kHeaderSize),
- on_heap_code->RawField(Code::kDataStart));
- DCHECK_EQ(bytes_processed_so_far_, Code::kDataStart);
+ VisitPointers(*on_heap_istream,
+ on_heap_istream->RawField(HeapObject::kHeaderSize),
+ on_heap_istream->RawField(InstructionStream::kDataStart));
+ DCHECK_EQ(bytes_processed_so_far_, InstructionStream::kDataStart);
// Now serialize RelocInfos. We can't allocate during a RelocInfo walk during
- // deserualization, so we have two passes for RelocInfo serialization:
+ // deserialization, so we have two passes for RelocInfo serialization:
// 1. A pre-serializer which serializes all allocatable objects in the
// RelocInfo, followed by a kSynchronize bytecode, and
// 2. A walk the RelocInfo with this serializer, serializing any objects
@@ -1319,19 +1345,21 @@ void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
// TODO(leszeks): We only really need to pre-serialize objects which need
// serialization, i.e. no backrefs or roots.
RelocInfoObjectPreSerializer pre_serializer(serializer_);
- for (RelocIterator it(*on_heap_code, relocation_info,
- Code::BodyDescriptor::kRelocModeMask);
+ for (RelocIterator it(*code, *on_heap_istream, relocation_info,
+ code->constant_pool(),
+ InstructionStream::BodyDescriptor::kRelocModeMask);
!it.done(); it.next()) {
it.rinfo()->Visit(&pre_serializer);
}
// Mark that the pre-serialization finished with a kSynchronize bytecode.
sink_->Put(kSynchronize, "PreSerializationFinished");
- // Finally serialize all RelocInfo objects in the on-heap Code, knowing that
- // we will not do a recursive serialization.
+ // Finally serialize all RelocInfo objects in the on-heap InstructionStream,
+ // knowing that we will not do a recursive serialization.
// TODO(leszeks): Add a scope that DCHECKs this.
- for (RelocIterator it(*on_heap_code, relocation_info,
- Code::BodyDescriptor::kRelocModeMask);
+ for (RelocIterator it(*code, *on_heap_istream, relocation_info,
+ code->constant_pool(),
+ InstructionStream::BodyDescriptor::kRelocModeMask);
!it.done(); it.next()) {
it.rinfo()->Visit(this);
}
@@ -1340,9 +1368,9 @@ void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
// serialization, so DCHECK that bytes_processed_so_far_ matches the expected
// number of bytes (i.e. the code header + a tagged size per pre-serialized
// object).
- DCHECK_EQ(
- bytes_processed_so_far_,
- Code::kDataStart + kTaggedSize * pre_serializer.num_serialized_objects());
+ DCHECK_EQ(bytes_processed_so_far_,
+ InstructionStream::kDataStart +
+ kTaggedSize * pre_serializer.num_serialized_objects());
}
Serializer::HotObjectsList::HotObjectsList(Heap* heap) : heap_(heap) {
@@ -1370,5 +1398,28 @@ Handle<FixedArray> ObjectCacheIndexMap::Values(Isolate* isolate) {
return externals;
}
+bool Serializer::SerializeReadOnlyObjectReference(HeapObject obj,
+ SnapshotByteSink* sink) {
+ if (!ReadOnlyHeap::Contains(obj)) return false;
+
+ // For objects on the read-only heap, never serialize the object, but instead
+ // create a back reference that encodes the page number as the chunk_index and
+ // the offset within the page as the chunk_offset.
+ Address address = obj.address();
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
+ uint32_t chunk_index = 0;
+ ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
+ DCHECK(!read_only_space->writable());
+ for (ReadOnlyPage* page : read_only_space->pages()) {
+ if (chunk == page) break;
+ ++chunk_index;
+ }
+ uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
+ sink->Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
+ sink->PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex");
+ sink->PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset");
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index edd3dfea31..86a8cec8bd 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -29,7 +29,10 @@ class CodeAddressMap : public CodeEventLogger {
isolate_->v8_file_logger()->RemoveLogEventListener(this);
}
- void CodeMoveEvent(AbstractCode from, AbstractCode to) override {
+ void CodeMoveEvent(InstructionStream from, InstructionStream to) override {
+ address_to_name_map_.Move(from.address(), to.address());
+ }
+ void BytecodeMoveEvent(BytecodeArray from, BytecodeArray to) override {
address_to_name_map_.Move(from.address(), to.address());
}
@@ -115,10 +118,10 @@ class CodeAddressMap : public CodeEventLogger {
base::HashMap impl_;
};
- void LogRecordedBuffer(Handle<AbstractCode> code,
- MaybeHandle<SharedFunctionInfo>, const char* name,
- int length) override {
- address_to_name_map_.Insert(code->address(), name, length);
+ void LogRecordedBuffer(AbstractCode code, MaybeHandle<SharedFunctionInfo>,
+ const char* name, int length) override {
+ DisallowGarbageCollection no_gc;
+ address_to_name_map_.Insert(code.address(), name, length);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -186,7 +189,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
// The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
+ // tagged values except references to InstructionStream objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
@@ -266,6 +269,8 @@ class Serializer : public SerializerDeserializer {
return external_reference_encoder_.TryEncode(addr);
}
+ bool SerializeReadOnlyObjectReference(HeapObject obj, SnapshotByteSink* sink);
+
// GetInt reads 4 bytes at once, requiring padding at the end.
// Use padding_offset to specify the space you want to use after padding.
void Pad(int padding_offset = 0);
@@ -274,7 +279,7 @@ class Serializer : public SerializerDeserializer {
// of the serializer. Initialize it on demand.
void InitializeCodeAddressMap();
- Code CopyCode(Code code);
+ InstructionStream CopyCode(InstructionStream code);
void QueueDeferredObject(HeapObject obj) {
DCHECK_NULL(reference_map_.LookupReference(obj));
@@ -317,6 +322,16 @@ class Serializer : public SerializerDeserializer {
Snapshot::kReconstructReadOnlyAndSharedObjectCachesForTesting) != 0;
}
+ bool deferred_objects_empty() { return deferred_objects_.size() == 0; }
+
+ protected:
+ bool serializer_tracks_serialization_statistics() const {
+ return serializer_tracks_serialization_statistics_;
+ }
+ void set_serializer_tracks_serialization_statistics(bool v) {
+ serializer_tracks_serialization_statistics_ = v;
+ }
+
private:
// A circular queue of hot objects. This is added to in the same order as in
// Deserializer::HotObjectsList, but this stores the objects as an array of
@@ -404,12 +419,17 @@ class Serializer : public SerializerDeserializer {
int recursion_depth_ = 0;
const Snapshot::SerializerFlags flags_;
+ bool serializer_tracks_serialization_statistics_ = true;
size_t allocation_size_[kNumberOfSnapshotSpaces] = {0};
#ifdef OBJECT_PRINT
+// Verbose serialization_statistics output is only enabled conditionally.
+#define VERBOSE_SERIALIZATION_STATISTICS
+#endif
+#ifdef VERBOSE_SERIALIZATION_STATISTICS
static constexpr int kInstanceTypes = LAST_TYPE + 1;
std::unique_ptr<int[]> instance_type_count_[kNumberOfSnapshotSpaces];
std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSnapshotSpaces];
-#endif // OBJECT_PRINT
+#endif // VERBOSE_SERIALIZATION_STATISTICS
#ifdef DEBUG
GlobalHandleVector<HeapObject> back_refs_;
@@ -442,12 +462,12 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
ObjectSlot end) override;
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override;
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
- void VisitEmbeddedPointer(Code host, RelocInfo* target) override;
- void VisitExternalReference(Code host, RelocInfo* rinfo) override;
- void VisitInternalReference(Code host, RelocInfo* rinfo) override;
- void VisitCodeTarget(Code host, RelocInfo* target) override;
- void VisitOffHeapTarget(Code host, RelocInfo* target) override;
+ void VisitCodePointer(Code host, CodeObjectSlot slot) override;
+ void VisitEmbeddedPointer(RelocInfo* target) override;
+ void VisitExternalReference(RelocInfo* rinfo) override;
+ void VisitInternalReference(RelocInfo* rinfo) override;
+ void VisitCodeTarget(RelocInfo* target) override;
+ void VisitOffHeapTarget(RelocInfo* target) override;
void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
ExternalPointerTag tag) override;
@@ -465,7 +485,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void OutputExternalReference(Address target, int target_size, bool sandboxify,
ExternalPointerTag tag);
void OutputRawData(Address up_to);
- void SerializeCode(Map map, int size);
+ void SerializeInstructionStream(Map map, int size);
uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length,
Maybe<int32_t> max_byte_length);
void SerializeJSTypedArray();
diff --git a/deps/v8/src/snapshot/shared-heap-deserializer.cc b/deps/v8/src/snapshot/shared-heap-deserializer.cc
index d71220be73..f6cd57c904 100644
--- a/deps/v8/src/snapshot/shared-heap-deserializer.cc
+++ b/deps/v8/src/snapshot/shared-heap-deserializer.cc
@@ -12,7 +12,7 @@ namespace internal {
void SharedHeapDeserializer::DeserializeIntoIsolate() {
// Don't deserialize into client Isolates. If there are client Isolates, the
// shared heap object cache should already be populated.
- if (isolate()->has_shared_heap() && !isolate()->is_shared_space_isolate()) {
+ if (isolate()->has_shared_space() && !isolate()->is_shared_space_isolate()) {
DCHECK(!isolate()->shared_heap_object_cache()->empty());
return;
}
diff --git a/deps/v8/src/snapshot/shared-heap-serializer.cc b/deps/v8/src/snapshot/shared-heap-serializer.cc
index e8768f4ce1..6bd29774d7 100644
--- a/deps/v8/src/snapshot/shared-heap-serializer.cc
+++ b/deps/v8/src/snapshot/shared-heap-serializer.cc
@@ -95,7 +95,7 @@ bool SharedHeapSerializer::SerializeUsingSharedHeapObjectCache(
// not present in the startup snapshot to be serialized.
if (ShouldReconstructSharedHeapObjectCacheForTesting()) {
std::vector<Object>* existing_cache =
- isolate()->shared_heap_isolate()->shared_heap_object_cache();
+ isolate()->shared_space_isolate()->shared_heap_object_cache();
const size_t existing_cache_size = existing_cache->size();
// This is strictly < because the existing cache contains the terminating
// undefined value, which the reconstructed cache does not.
@@ -201,12 +201,12 @@ bool SharedHeapSerializer::ShouldReconstructSharedHeapObjectCacheForTesting()
// need to reconstruct the shared heap object cache because it is not actually
// shared.
return reconstruct_read_only_and_shared_object_caches_for_testing() &&
- isolate()->has_shared_heap();
+ isolate()->has_shared_space();
}
void SharedHeapSerializer::ReconstructSharedHeapObjectCacheForTesting() {
std::vector<Object>* cache =
- isolate()->shared_heap_isolate()->shared_heap_object_cache();
+ isolate()->shared_space_isolate()->shared_heap_object_cache();
// Don't reconstruct the final element, which is always undefined and marks
// the end of the cache, since serializing the live Isolate may extend the
// shared object cache.
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 3af67804cf..c444670ba0 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -316,7 +316,7 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
// Test serialization.
{
- SafepointKind safepoint_kind = isolate->has_shared_heap()
+ SafepointKind safepoint_kind = isolate->has_shared_space()
? SafepointKind::kGlobal
: SafepointKind::kIsolate;
SafepointScope safepoint_scope(isolate, safepoint_kind);
@@ -325,7 +325,7 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
Snapshot::SerializerFlags flags(
Snapshot::kAllowUnknownExternalReferencesForTesting |
Snapshot::kAllowActiveIsolateForTesting |
- ((isolate->has_shared_heap() || ReadOnlyHeap::IsReadOnlySpaceShared())
+ ((isolate->has_shared_space() || ReadOnlyHeap::IsReadOnlySpaceShared())
? Snapshot::kReconstructReadOnlyAndSharedObjectCachesForTesting
: 0));
serialized_data = Snapshot::Create(isolate, *default_context,
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index cd237d9e58..d8d81b6805 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -9,6 +9,8 @@
#include "src/execution/v8threads.h"
#include "src/handles/handles-inl.h"
#include "src/heap/paged-spaces-inl.h"
+#include "src/logging/counters-scopes.h"
+#include "src/logging/log.h"
#include "src/objects/oddball.h"
#include "src/roots/roots-inl.h"
@@ -16,6 +18,8 @@ namespace v8 {
namespace internal {
void StartupDeserializer::DeserializeIntoIsolate() {
+ NestedTimedHistogramScope histogram_timer(
+ isolate()->counters()->snapshot_deserialize_isolate());
HandleScope scope(isolate());
// No active threads.
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 2432d9e48b..f7af6a4ee6 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -5,7 +5,7 @@
#include "src/snapshot/startup-serializer.h"
#include "src/execution/v8threads.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/contexts.h"
@@ -85,16 +85,11 @@ StartupSerializer::~StartupSerializer() {
#ifdef DEBUG
namespace {
-bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
- if (!obj.IsCode()) return false;
-
- Code code = Code::cast(obj);
- if (code.kind() == CodeKind::REGEXP) return false;
- if (!code.is_builtin()) return true;
- if (code.is_off_heap_trampoline()) return false;
-
- // An on-heap builtin.
- return true;
+bool IsUnexpectedInstructionStreamObject(Isolate* isolate, HeapObject obj) {
+ if (!obj.IsInstructionStream()) return false;
+ // TODO(jgruber): Is REGEXP code still fully supported?
+ return InstructionStream::cast(obj).code(kAcquireLoad).kind() !=
+ CodeKind::REGEXP;
}
} // namespace
@@ -114,7 +109,7 @@ void StartupSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
{
DisallowGarbageCollection no_gc;
HeapObject raw = *obj;
- DCHECK(!IsUnexpectedCodeObject(isolate(), raw));
+ DCHECK(!IsUnexpectedInstructionStreamObject(isolate(), raw));
if (SerializeHotObject(raw)) return;
if (IsRootAndHasBeenSerialized(raw) && SerializeRoot(raw)) return;
}
diff --git a/deps/v8/src/snapshot/static-roots-gen.cc b/deps/v8/src/snapshot/static-roots-gen.cc
new file mode 100644
index 0000000000..33614c6401
--- /dev/null
+++ b/deps/v8/src/snapshot/static-roots-gen.cc
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/static-roots-gen.h"
+
+#include <fstream>
+
+#include "src/common/globals.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/objects-definitions.h"
+#include "src/objects/visitors.h"
+#include "src/roots/roots-inl.h"
+#include "src/roots/roots.h"
+
+namespace v8 {
+namespace internal {
+
+class StaticRootsTableGenImpl {
+ public:
+ explicit StaticRootsTableGenImpl(Isolate* isolate) {
+ // Collect all roots
+ ReadOnlyRoots ro_roots(isolate);
+ {
+ RootIndex pos = RootIndex::kFirstReadOnlyRoot;
+#define ADD_ROOT(_, value, CamelName) \
+ { \
+ Tagged_t ptr = V8HeapCompressionScheme::CompressObject( \
+ ro_roots.unchecked_##value().ptr()); \
+ sorted_roots_[ptr].push_back(pos); \
+ camel_names_[RootIndex::k##CamelName] = #CamelName; \
+ ++pos; \
+ }
+ READ_ONLY_ROOT_LIST(ADD_ROOT)
+#undef ADD_ROOT
+ }
+ }
+
+ const std::map<Tagged_t, std::list<RootIndex>>& sorted_roots() {
+ return sorted_roots_;
+ }
+
+ const std::string& camel_name(RootIndex idx) { return camel_names_.at(idx); }
+
+ private:
+ std::map<Tagged_t, std::list<RootIndex>> sorted_roots_;
+ std::unordered_map<RootIndex, std::string> camel_names_;
+};
+
+void StaticRootsTableGen::write(Isolate* isolate, const char* file) {
+ CHECK_WITH_MSG(!V8_STATIC_ROOTS_BOOL,
+ "Re-generating the table of roots is only supported in builds "
+ "with v8_enable_static_roots disabled");
+ CHECK(file);
+ static_assert(static_cast<int>(RootIndex::kFirstReadOnlyRoot) == 0);
+
+ std::ofstream out(file, std::ios::binary);
+
+ out << "// Copyright 2022 the V8 project authors. All rights reserved.\n"
+ << "// Use of this source code is governed by a BSD-style license "
+ "that can be\n"
+ << "// found in the LICENSE file.\n"
+ << "\n"
+ << "// This file is automatically generated by "
+ "`tools/dev/gen-static-roots.py`. Do\n// not edit manually.\n"
+ << "\n"
+ << "#ifndef V8_ROOTS_STATIC_ROOTS_H_\n"
+ << "#define V8_ROOTS_STATIC_ROOTS_H_\n"
+ << "\n"
+ << "#include \"src/common/globals.h\"\n"
+ << "\n"
+ << "#if V8_STATIC_ROOTS_BOOL\n"
+ << "\n"
+ << "#include \"src/objects/instance-type.h\"\n"
+ << "#include \"src/roots/roots.h\"\n"
+ << "\n"
+ << "// Disabling Wasm or Intl invalidates the contents of "
+ "static-roots.h.\n"
+ << "// TODO(olivf): To support static roots for multiple build "
+ "configurations we\n"
+ << "// will need to generate target specific versions of "
+ "this file.\n"
+ << "static_assert(V8_ENABLE_WEBASSEMBLY);\n"
+ << "static_assert(V8_INTL_SUPPORT);\n"
+ << "\n"
+ << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n"
+ << "struct StaticReadOnlyRoot {\n";
+
+ // Output a symbol for every root. Ordered by ptr to make it easier to see the
+ // memory layout of the read only page.
+ const auto size = static_cast<int>(RootIndex::kReadOnlyRootsCount);
+ StaticRootsTableGenImpl gen(isolate);
+
+ for (auto& entry : gen.sorted_roots()) {
+ Tagged_t ptr = entry.first;
+ const std::list<RootIndex>& roots = entry.second;
+
+ for (RootIndex root : roots) {
+ static const char* kPreString = " static constexpr Tagged_t k";
+ const std::string& name = gen.camel_name(root);
+ size_t ptr_len = ceil(log2(ptr) / 4.0);
+ // Full line is: "kPreString|name = 0x.....;"
+ size_t len = strlen(kPreString) + name.length() + 5 + ptr_len + 1;
+ out << kPreString << name << " =";
+ if (len > 80) out << "\n ";
+ out << " 0x" << std::hex << ptr << std::dec << ";\n";
+ }
+ }
+ out << "};\n";
+
+ // Output in order of roots table
+ out << "\nstatic constexpr std::array<Tagged_t, " << size
+ << "> StaticReadOnlyRootsPointerTable = {\n";
+
+ {
+#define ENTRY(_1, _2, CamelName) \
+ out << " StaticReadOnlyRoot::k" << #CamelName << ",\n";
+ READ_ONLY_ROOT_LIST(ENTRY)
+#undef ENTRY
+ out << "};\n";
+ }
+ out << "\n"
+ << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "#endif // V8_STATIC_ROOTS_BOOL\n"
+ << "#endif // V8_ROOTS_STATIC_ROOTS_H_\n";
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/static-roots-gen.h b/deps/v8/src/snapshot/static-roots-gen.h
new file mode 100644
index 0000000000..2422df75d0
--- /dev/null
+++ b/deps/v8/src/snapshot/static-roots-gen.h
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_STATIC_ROOTS_GEN_H_
+#define V8_SNAPSHOT_STATIC_ROOTS_GEN_H_
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+class StaticRootsTableGen {
+ public:
+ static void write(Isolate* isolate, const char* file);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_STATIC_ROOTS_GEN_H_
diff --git a/deps/v8/src/strings/string-builder-inl.h b/deps/v8/src/strings/string-builder-inl.h
index b6ee62ce0a..0170415833 100644
--- a/deps/v8/src/strings/string-builder-inl.h
+++ b/deps/v8/src/strings/string-builder-inl.h
@@ -39,6 +39,10 @@ class FixedArrayBuilder {
explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity);
explicit FixedArrayBuilder(Handle<FixedArray> backing_store);
+ // Creates a FixedArrayBuilder which allocates its backing store lazily when
+ // EnsureCapacity is called.
+ static FixedArrayBuilder Lazy(Isolate* isolate);
+
bool HasCapacity(int elements);
void EnsureCapacity(Isolate* isolate, int elements);
@@ -51,9 +55,9 @@ class FixedArrayBuilder {
int capacity();
- Handle<JSArray> ToJSArray(Handle<JSArray> target_array);
-
private:
+ explicit FixedArrayBuilder(Isolate* isolate);
+
Handle<FixedArray> array_;
int length_;
bool has_non_smi_elements_;
@@ -257,26 +261,6 @@ class IncrementalStringBuilder {
};
template <typename DestChar>
- class NoExtendString : public NoExtend<DestChar> {
- public:
- NoExtendString(Handle<String> string, int required_length)
- : NoExtend<DestChar>(string, 0), string_(string) {
- DCHECK(string->length() >= required_length);
- }
-
- Handle<String> Finalize() {
- Handle<SeqString> string = Handle<SeqString>::cast(string_);
- int length = NoExtend<DestChar>::written();
- Handle<String> result = SeqString::Truncate(string, length);
- string_ = Handle<String>();
- return result;
- }
-
- private:
- Handle<String> string_;
- };
-
- template <typename DestChar>
class NoExtendBuilder : public NoExtend<DestChar> {
public:
NoExtendBuilder(IncrementalStringBuilder* builder, int required_length,
@@ -325,7 +309,7 @@ class IncrementalStringBuilder {
void ShrinkCurrentPart() {
DCHECK(current_index_ < part_length_);
set_current_part(SeqString::Truncate(
- Handle<SeqString>::cast(current_part()), current_index_));
+ isolate_, Handle<SeqString>::cast(current_part()), current_index_));
}
void AppendStringByCopy(Handle<String> string);
diff --git a/deps/v8/src/strings/string-builder.cc b/deps/v8/src/strings/string-builder.cc
index 9d1e3a9574..f6871d9e6a 100644
--- a/deps/v8/src/strings/string-builder.cc
+++ b/deps/v8/src/strings/string-builder.cc
@@ -119,6 +119,16 @@ FixedArrayBuilder::FixedArrayBuilder(Handle<FixedArray> backing_store)
DCHECK_GT(backing_store->length(), 0);
}
+FixedArrayBuilder::FixedArrayBuilder(Isolate* isolate)
+ : array_(isolate->factory()->empty_fixed_array()),
+ length_(0),
+ has_non_smi_elements_(false) {}
+
+// static
+FixedArrayBuilder FixedArrayBuilder::Lazy(Isolate* isolate) {
+ return FixedArrayBuilder(isolate);
+}
+
bool FixedArrayBuilder::HasCapacity(int elements) {
int length = array_->length();
int required_length = length_ + elements;
@@ -129,6 +139,13 @@ void FixedArrayBuilder::EnsureCapacity(Isolate* isolate, int elements) {
int length = array_->length();
int required_length = length_ + elements;
if (length < required_length) {
+ if (length == 0) {
+ constexpr int kInitialCapacityForLazy = 16;
+ array_ = isolate->factory()->NewFixedArrayWithHoles(
+ std::max(kInitialCapacityForLazy, elements));
+ return;
+ }
+
int new_length = length;
do {
new_length *= 2;
@@ -155,12 +172,6 @@ void FixedArrayBuilder::Add(Smi value) {
int FixedArrayBuilder::capacity() { return array_->length(); }
-Handle<JSArray> FixedArrayBuilder::ToJSArray(Handle<JSArray> target_array) {
- JSArray::SetContent(target_array, array_);
- target_array->set_length(Smi::FromInt(length_));
- return target_array;
-}
-
ReplacementStringBuilder::ReplacementStringBuilder(Heap* heap,
Handle<String> subject,
int estimated_part_count)
diff --git a/deps/v8/src/strings/unicode.cc b/deps/v8/src/strings/unicode.cc
index 0a9b3bbb3d..d98d946f16 100644
--- a/deps/v8/src/strings/unicode.cc
+++ b/deps/v8/src/strings/unicode.cc
@@ -239,6 +239,37 @@ bool Utf8::ValidateEncoding(const byte* bytes, size_t length) {
return state == State::kAccept;
}
+// static
+void Utf16::ReplaceUnpairedSurrogates(const uint16_t* source_code_units,
+ uint16_t* dest_code_units,
+ size_t length) {
+ // U+FFFD (REPLACEMENT CHARACTER)
+ constexpr uint16_t kReplacement = 0xFFFD;
+
+ for (size_t i = 0; i < length; i++) {
+ const uint16_t source_code_unit = source_code_units[i];
+ const size_t copy_index = i;
+ uint16_t dest_code_unit = source_code_unit;
+ if (IsLeadSurrogate(source_code_unit)) {
+ // The current code unit is a leading surrogate. If it's not followed by a
+ // trailing surrogate, replace it with the replacement character.
+ if (i == length - 1 || !IsTrailSurrogate(source_code_units[i + 1])) {
+ dest_code_unit = kReplacement;
+ } else {
+ // Copy the paired trailing surrogate. The paired leading surrogate will
+ // be copied below.
+ ++i;
+ dest_code_units[i] = source_code_units[i];
+ }
+ } else if (IsTrailSurrogate(source_code_unit)) {
+ // All paired trailing surrogates are skipped above, so this branch is
+ // only for those that are unpaired.
+ dest_code_unit = kReplacement;
+ }
+ dest_code_units[copy_index] = dest_code_unit;
+ }
+}
+
#if V8_ENABLE_WEBASSEMBLY
bool Wtf8::ValidateEncoding(const byte* bytes, size_t length) {
using State = GeneralizedUtf8DfaDecoder::State;
diff --git a/deps/v8/src/strings/unicode.h b/deps/v8/src/strings/unicode.h
index 4b73bd4c3c..01349c70be 100644
--- a/deps/v8/src/strings/unicode.h
+++ b/deps/v8/src/strings/unicode.h
@@ -121,7 +121,7 @@ class Utf16 {
// 4 bytes and the 3 bytes that were used to encode the lead surrogate
// can be reclaimed.
static const int kMaxExtraUtf8BytesForOneUtf16CodeUnit = 3;
- // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
+ // One UTF-16 surrogate is encoded (illegally) as 3 UTF-8 bytes.
// The illegality stems from the surrogate not being part of a pair.
static const int kUtf8BytesToCodeASurrogate = 3;
static inline uint16_t LeadSurrogate(uint32_t char_code) {
@@ -132,6 +132,10 @@ class Utf16 {
}
static inline bool HasUnpairedSurrogate(const uint16_t* code_units,
size_t length);
+
+ static void ReplaceUnpairedSurrogates(const uint16_t* source_code_units,
+ uint16_t* dest_code_units,
+ size_t length);
};
class Latin1 {
@@ -155,10 +159,12 @@ class Latin1 {
enum class Utf8Variant : uint8_t {
#if V8_ENABLE_WEBASSEMBLY
- kUtf8, // UTF-8. Decoding an invalid byte sequence or encoding a
- // surrogate codepoint signals an error.
- kWtf8, // WTF-8: like UTF-8, but allows isolated (but not paired)
- // surrogate codepoints to be encoded and decoded.
+ kUtf8, // UTF-8. Decoding an invalid byte sequence or encoding a
+ // surrogate codepoint signals an error.
+ kUtf8NoTrap, // UTF-8. Decoding an invalid byte sequence or encoding a
+ // surrogate codepoint returns null.
+ kWtf8, // WTF-8: like UTF-8, but allows isolated (but not paired)
+ // surrogate codepoints to be encoded and decoded.
#endif
kLossyUtf8, // Lossy UTF-8: Any byte sequence can be decoded without
// error, replacing invalid UTF-8 with the replacement
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index d80993e38f..52e3e300b6 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -595,11 +595,13 @@ struct AssumeTypeImpossibleExpression : Expression {
struct NewExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(NewExpression)
NewExpression(SourcePosition pos, TypeExpression* type,
- std::vector<NameAndExpression> initializers, bool pretenured)
+ std::vector<NameAndExpression> initializers, bool pretenured,
+ bool clear_padding)
: Expression(kKind, pos),
type(type),
initializers(std::move(initializers)),
- pretenured(pretenured) {}
+ pretenured(pretenured),
+ clear_padding(clear_padding) {}
void VisitAllSubExpressions(VisitCallback callback) override {
for (auto& initializer : initializers) {
@@ -611,6 +613,7 @@ struct NewExpression : Expression {
TypeExpression* type;
std::vector<NameAndExpression> initializers;
bool pretenured;
+ bool clear_padding;
};
enum class ImplicitKind { kNoImplicit, kJSImplicit, kImplicit };
@@ -724,13 +727,10 @@ struct ReturnStatement : Statement {
struct DebugStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(DebugStatement)
- DebugStatement(SourcePosition pos, const std::string& reason,
- bool never_continues)
- : Statement(kKind, pos),
- reason(reason),
- never_continues(never_continues) {}
- std::string reason;
- bool never_continues;
+ enum class Kind { kUnreachable, kDebug };
+ DebugStatement(SourcePosition pos, Kind kind)
+ : Statement(kKind, pos), kind(kind) {}
+ Kind kind;
};
struct AssertStatement : Statement {
@@ -1083,10 +1083,13 @@ struct TorqueBuiltinDeclaration : BuiltinDeclaration {
bool javascript_linkage, Identifier* name,
ParameterList parameters,
TypeExpression* return_type,
+ bool has_custom_interface_descriptor,
base::Optional<Statement*> body)
: BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning, name,
std::move(parameters), return_type),
+ has_custom_interface_descriptor(has_custom_interface_descriptor),
body(body) {}
+ bool has_custom_interface_descriptor;
base::Optional<Statement*> body;
};
diff --git a/deps/v8/src/torque/cc-generator.cc b/deps/v8/src/torque/cc-generator.cc
index a1f4d496cf..53f1d368d1 100644
--- a/deps/v8/src/torque/cc-generator.cc
+++ b/deps/v8/src/torque/cc-generator.cc
@@ -328,10 +328,9 @@ void CCGenerator::EmitInstruction(const ReturnInstruction& instruction,
ReportError("Not supported in C++ output: Return");
}
-void CCGenerator::EmitInstruction(
- const PrintConstantStringInstruction& instruction,
- Stack<std::string>* stack) {
- out() << " std::cout << " << StringLiteralQuote(instruction.message)
+void CCGenerator::EmitInstruction(const PrintErrorInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " std::cerr << " << StringLiteralQuote(instruction.message)
<< ";\n";
}
diff --git a/deps/v8/src/torque/cfg.cc b/deps/v8/src/torque/cfg.cc
index 7ac1662cd4..379e2d3e97 100644
--- a/deps/v8/src/torque/cfg.cc
+++ b/deps/v8/src/torque/cfg.cc
@@ -144,7 +144,7 @@ void CfgAssembler::Poke(StackRange destination, StackRange origin,
}
void CfgAssembler::Print(std::string s) {
- Emit(PrintConstantStringInstruction{std::move(s)});
+ Emit(PrintErrorInstruction{std::move(s)});
}
void CfgAssembler::AssertionFailure(std::string message) {
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index e3ba0f55c0..c52897f7f4 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -124,6 +124,9 @@ static const char* const ANNOTATION_CPP_RELEASE_STORE = "@cppReleaseStore";
static const char* const ANNOTATION_CPP_ACQUIRE_LOAD = "@cppAcquireLoad";
// Generate BodyDescriptor using IterateCustomWeakPointers.
static const char* const ANNOTATION_CUSTOM_WEAK_MARKING = "@customWeakMarking";
+// Do not generate a interface descriptor for this builtin.
+static const char* const ANNOTATION_CUSTOM_INTERFACE_DESCRIPTOR =
+ "@customInterfaceDescriptor";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index adc86486c7..c151877d89 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -845,10 +845,9 @@ void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
out() << ");\n";
}
-void CSAGenerator::EmitInstruction(
- const PrintConstantStringInstruction& instruction,
- Stack<std::string>* stack) {
- out() << " CodeStubAssembler(state_).Print("
+void CSAGenerator::EmitInstruction(const PrintErrorInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " CodeStubAssembler(state_).PrintErr("
<< StringLiteralQuote(instruction.message) << ");\n";
}
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 10b0d09daf..faf0bf268f 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -16,8 +16,6 @@ namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(CurrentScope)
-
QualifiedName QualifiedName::Parse(std::string qualified_name) {
std::vector<std::string> qualifications;
while (true) {
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index f8c878d329..60110fec59 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -498,22 +498,30 @@ class Method : public TorqueMacro {
class Builtin : public Callable {
public:
enum Kind { kStub, kFixedArgsJavaScript, kVarArgsJavaScript };
+ enum class Flag { kNone = 0, kCustomInterfaceDescriptor = 1 << 0 };
+ using Flags = base::Flags<Flag>;
DECLARE_DECLARABLE_BOILERPLATE(Builtin, builtin)
Kind kind() const { return kind_; }
+ Flags flags() const { return flags_; }
bool IsStub() const { return kind_ == kStub; }
bool IsVarArgsJavaScript() const { return kind_ == kVarArgsJavaScript; }
bool IsFixedArgsJavaScript() const { return kind_ == kFixedArgsJavaScript; }
+ bool HasCustomInterfaceDescriptor() const {
+ return flags_ & Flag::kCustomInterfaceDescriptor;
+ }
private:
friend class Declarations;
Builtin(std::string external_name, std::string readable_name,
- Builtin::Kind kind, const Signature& signature,
+ Builtin::Kind kind, Flags flags, const Signature& signature,
base::Optional<Statement*> body)
: Callable(Declarable::kBuiltin, std::move(external_name),
std::move(readable_name), signature, body),
- kind_(kind) {}
+ kind_(kind),
+ flags_(flags) {}
Kind kind_;
+ Flags flags_;
};
class RuntimeFunction : public Callable {
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 7e46ce59c2..c881c81f8f 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -68,6 +68,12 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Builtin::Kind kind = !javascript ? Builtin::kStub
: varargs ? Builtin::kVarArgsJavaScript
: Builtin::kFixedArgsJavaScript;
+ bool has_custom_interface_descriptor = false;
+ if (decl->kind == AstNode::Kind::kTorqueBuiltinDeclaration) {
+ has_custom_interface_descriptor =
+ static_cast<TorqueBuiltinDeclaration*>(decl)
+ ->has_custom_interface_descriptor;
+ }
if (varargs && !javascript) {
Error("Rest parameters require ", decl->name,
@@ -92,11 +98,24 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
}
for (size_t i = 0; i < signature.types().size(); ++i) {
- if (signature.types()[i]->StructSupertype()) {
+ const Type* parameter_type = signature.types()[i];
+ if (parameter_type->StructSupertype()) {
Error("Builtin do not support structs as arguments, but argument ",
signature.parameter_names[i], " has type ", *signature.types()[i],
".");
}
+ if (parameter_type->IsFloat32() || parameter_type->IsFloat64()) {
+ if (!has_custom_interface_descriptor) {
+ Error("Builtin ", external_name,
+ " needs a custom interface descriptor, "
+ "because it uses type ",
+ *parameter_type, " for argument ", signature.parameter_names[i],
+ ". One reason being "
+ "that the default descriptor defines xmm0 to be the first "
+ "floating point argument register, which is current used as "
+ "scratch on ia32 and cannot be allocated.");
+ }
+ }
}
if (signature.return_type->StructSupertype() && javascript) {
@@ -110,9 +129,12 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Error("Builtins cannot have return type void.");
}
- Builtin* builtin = Declarations::CreateBuiltin(std::move(external_name),
- std::move(readable_name), kind,
- std::move(signature), body);
+ Builtin::Flags flags = Builtin::Flag::kNone;
+ if (has_custom_interface_descriptor)
+ flags |= Builtin::Flag::kCustomInterfaceDescriptor;
+ Builtin* builtin = Declarations::CreateBuiltin(
+ std::move(external_name), std::move(readable_name), kind, flags,
+ std::move(signature), body);
// TODO(v8:12261): Recheck this.
// builtin->SetIdentifierPosition(decl->name->pos);
return builtin;
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 6d349cb8a2..c4414af8a2 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -256,21 +256,20 @@ Intrinsic* Declarations::DeclareIntrinsic(const std::string& name,
Builtin* Declarations::CreateBuiltin(std::string external_name,
std::string readable_name,
- Builtin::Kind kind, Signature signature,
-
+ Builtin::Kind kind, Builtin::Flags flags,
+ Signature signature,
base::Optional<Statement*> body) {
return RegisterDeclarable(std::unique_ptr<Builtin>(
new Builtin(std::move(external_name), std::move(readable_name), kind,
- std::move(signature), body)));
+ flags, std::move(signature), body)));
}
Builtin* Declarations::DeclareBuiltin(const std::string& name,
- Builtin::Kind kind,
+ Builtin::Kind kind, Builtin::Flags flags,
const Signature& signature,
-
base::Optional<Statement*> body) {
CheckAlreadyDeclared<Builtin>(name, "builtin");
- return Declare(name, CreateBuiltin(name, name, kind, signature, body));
+ return Declare(name, CreateBuiltin(name, name, kind, flags, signature, body));
}
RuntimeFunction* Declarations::DeclareRuntimeFunction(
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 3bd50f6e48..4aa71e113b 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -123,9 +123,10 @@ class Declarations {
static Builtin* CreateBuiltin(std::string external_name,
std::string readable_name, Builtin::Kind kind,
- Signature signature,
+ Builtin::Flags flags, Signature signature,
base::Optional<Statement*> body);
static Builtin* DeclareBuiltin(const std::string& name, Builtin::Kind kind,
+ Builtin::Flags flags,
const Signature& signature,
base::Optional<Statement*> body);
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 6be44a619b..4190f38531 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -9,8 +9,8 @@
#include <memory>
#include <vector>
+#include "src/base/contextual.h"
#include "src/base/optional.h"
-#include "src/torque/contextual.h"
#include "src/torque/source-positions.h"
#include "src/torque/utils.h"
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
index 22dcf2b63d..f5405d5f4e 100644
--- a/deps/v8/src/torque/global-context.cc
+++ b/deps/v8/src/torque/global-context.cc
@@ -8,9 +8,6 @@ namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(GlobalContext)
-DEFINE_CONTEXTUAL_VARIABLE(TargetArchitecture)
-
GlobalContext::GlobalContext(Ast ast)
: collect_language_server_data_(false),
collect_kythe_data_(false),
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index c0945e575a..aab4da221a 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -8,9 +8,9 @@
#include <map>
#include <memory>
+#include "src/base/contextual.h"
#include "src/common/globals.h"
#include "src/torque/ast.h"
-#include "src/torque/contextual.h"
#include "src/torque/cpp-builder.h"
#include "src/torque/declarable.h"
@@ -18,7 +18,7 @@ namespace v8 {
namespace internal {
namespace torque {
-class GlobalContext : public ContextualClass<GlobalContext> {
+class GlobalContext : public base::ContextualClass<GlobalContext> {
public:
GlobalContext(GlobalContext&&) V8_NOEXCEPT = default;
GlobalContext& operator=(GlobalContext&&) V8_NOEXCEPT = default;
@@ -141,7 +141,7 @@ T* RegisterDeclarable(std::unique_ptr<T> d) {
return GlobalContext::Get().RegisterDeclarable(std::move(d));
}
-class TargetArchitecture : public ContextualClass<TargetArchitecture> {
+class TargetArchitecture : public base::ContextualClass<TargetArchitecture> {
public:
explicit TargetArchitecture(bool force_32bit);
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 1bfa898a1e..2dac36ea73 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -1171,19 +1171,28 @@ const Type* ImplementationVisitor::Visit(BlockStatement* block) {
}
const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
+ std::string reason;
+ const Type* return_type;
+ AbortInstruction::Kind kind;
+ switch (stmt->kind) {
+ case DebugStatement::Kind::kUnreachable:
+ // Use the same string as in C++ to simplify fuzzer pattern-matching.
+ reason = base::kUnreachableCodeMessage;
+ return_type = TypeOracle::GetNeverType();
+ kind = AbortInstruction::Kind::kUnreachable;
+ break;
+ case DebugStatement::Kind::kDebug:
+ reason = "debug break";
+ return_type = TypeOracle::GetVoidType();
+ kind = AbortInstruction::Kind::kDebugBreak;
+ break;
+ }
#if defined(DEBUG)
- assembler().Emit(PrintConstantStringInstruction{"halting because of '" +
- stmt->reason + "' at " +
- PositionAsString(stmt->pos)});
+ assembler().Emit(PrintErrorInstruction{"halting because of " + reason +
+ " at " + PositionAsString(stmt->pos)});
#endif
- assembler().Emit(AbortInstruction{stmt->never_continues
- ? AbortInstruction::Kind::kUnreachable
- : AbortInstruction::Kind::kDebugBreak});
- if (stmt->never_continues) {
- return TypeOracle::GetNeverType();
- } else {
- return TypeOracle::GetVoidType();
- }
+ assembler().Emit(AbortInstruction{kind});
+ return return_type;
}
namespace {
@@ -1478,7 +1487,9 @@ void ImplementationVisitor::InitializeClass(
InitializeClass(super, allocate_result, initializer_results, layout);
}
- for (Field f : class_type->fields()) {
+ for (const Field& f : class_type->fields()) {
+ // Support optional padding fields.
+ if (f.name_and_type.type->IsVoid()) continue;
VisitResult initializer_value =
initializer_results.field_value_map.at(f.name_and_type.name);
LocationReference field =
@@ -1682,6 +1693,8 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
allocate_arguments.parameters.push_back(object_map);
allocate_arguments.parameters.push_back(
GenerateBoolConstant(expr->pretenured));
+ allocate_arguments.parameters.push_back(
+ GenerateBoolConstant(expr->clear_padding));
VisitResult allocate_result = GenerateCall(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AllocateFromNew"),
allocate_arguments, {class_type}, false);
@@ -3403,12 +3416,6 @@ std::string ImplementationVisitor::ExternalParameterName(
return std::string("p_") + name;
}
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentFileStreams)
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
-
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
size_t label_count) {
auto i = sig.parameter_types.types.begin() + sig.implicit_count;
@@ -3570,43 +3577,47 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
if (builtin->IsStub()) {
builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
<< builtin->ExternalName();
- std::string descriptor_name = builtin->ExternalName() + "Descriptor";
- bool has_context_parameter = builtin->signature().HasContextParameter();
- size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
- TypeVector return_types = LowerType(builtin->signature().return_type);
+ if (!builtin->HasCustomInterfaceDescriptor()) {
+ std::string descriptor_name = builtin->ExternalName() + "Descriptor";
+ bool has_context_parameter =
+ builtin->signature().HasContextParameter();
+ size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
+ TypeVector return_types = LowerType(builtin->signature().return_type);
- interface_descriptors << "class " << descriptor_name
- << " : public StaticCallInterfaceDescriptor<"
- << descriptor_name << "> {\n";
+ interface_descriptors << "class " << descriptor_name
+ << " : public StaticCallInterfaceDescriptor<"
+ << descriptor_name << "> {\n";
- interface_descriptors << " public:\n";
+ interface_descriptors << " public:\n";
- if (has_context_parameter) {
- interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS(";
- } else {
- interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
- }
- interface_descriptors << return_types.size();
- for (size_t i = kFirstNonContextParameter;
- i < builtin->parameter_names().size(); ++i) {
- Identifier* parameter = builtin->parameter_names()[i];
- interface_descriptors << ", k" << CamelifyString(parameter->value);
- }
- interface_descriptors << ")\n";
-
- interface_descriptors << " DEFINE_RESULT_AND_PARAMETER_TYPES(";
- PrintCommaSeparatedList(interface_descriptors, return_types,
- MachineTypeString);
- for (size_t i = kFirstNonContextParameter;
- i < builtin->parameter_names().size(); ++i) {
- const Type* type = builtin->signature().parameter_types.types[i];
- interface_descriptors << ", " << MachineTypeString(type);
- }
- interface_descriptors << ")\n";
+ if (has_context_parameter) {
+ interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS(";
+ } else {
+ interface_descriptors
+ << " DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
+ }
+ interface_descriptors << return_types.size();
+ for (size_t i = kFirstNonContextParameter;
+ i < builtin->parameter_names().size(); ++i) {
+ Identifier* parameter = builtin->parameter_names()[i];
+ interface_descriptors << ", k" << CamelifyString(parameter->value);
+ }
+ interface_descriptors << ")\n";
- interface_descriptors << " DECLARE_DEFAULT_DESCRIPTOR("
- << descriptor_name << ")\n";
- interface_descriptors << "};\n\n";
+ interface_descriptors << " DEFINE_RESULT_AND_PARAMETER_TYPES(";
+ PrintCommaSeparatedList(interface_descriptors, return_types,
+ MachineTypeString);
+ for (size_t i = kFirstNonContextParameter;
+ i < builtin->parameter_names().size(); ++i) {
+ const Type* type = builtin->signature().parameter_types.types[i];
+ interface_descriptors << ", " << MachineTypeString(type);
+ }
+ interface_descriptors << ")\n";
+
+ interface_descriptors << " DECLARE_DEFAULT_DESCRIPTOR("
+ << descriptor_name << ")\n";
+ interface_descriptors << "};\n\n";
+ }
} else {
builtin_definitions << "TFJ(" << builtin->ExternalName();
if (builtin->IsVarArgsJavaScript()) {
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
index 52f0f81976..a96a563c8f 100644
--- a/deps/v8/src/torque/instructions.cc
+++ b/deps/v8/src/torque/instructions.cc
@@ -662,10 +662,10 @@ void ReturnInstruction::RecomputeDefinitionLocations(
locations->PopMany(count);
}
-void PrintConstantStringInstruction::TypeInstruction(
- Stack<const Type*>* stack, ControlFlowGraph* cfg) const {}
+void PrintErrorInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {}
-void PrintConstantStringInstruction::RecomputeDefinitionLocations(
+void PrintErrorInstruction::RecomputeDefinitionLocations(
Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {}
void AbortInstruction::TypeInstruction(Stack<const Type*>* stack,
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 88736a4ace..6981759d54 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -51,7 +51,7 @@ class RuntimeFunction;
V(GotoExternalInstruction) \
V(MakeLazyNodeInstruction) \
V(ReturnInstruction) \
- V(PrintConstantStringInstruction) \
+ V(PrintErrorInstruction) \
V(AbortInstruction) \
V(UnsafeCastInstruction)
@@ -714,16 +714,16 @@ inline std::ostream& operator<<(std::ostream& os,
return os << "Return count: " << instruction.count;
}
-struct PrintConstantStringInstruction : InstructionBase {
+struct PrintErrorInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- explicit PrintConstantStringInstruction(std::string message)
+ explicit PrintErrorInstruction(std::string message)
: message(std::move(message)) {}
std::string message;
};
-inline std::ostream& operator<<(
- std::ostream& os, const PrintConstantStringInstruction& instruction) {
+inline std::ostream& operator<<(std::ostream& os,
+ const PrintErrorInstruction& instruction) {
return os << "PrintConstantString "
<< StringLiteralQuote(instruction.message);
}
diff --git a/deps/v8/src/torque/kythe-data.cc b/deps/v8/src/torque/kythe-data.cc
index 4ef6c2910a..afdc1ab24d 100644
--- a/deps/v8/src/torque/kythe-data.cc
+++ b/deps/v8/src/torque/kythe-data.cc
@@ -8,8 +8,6 @@ namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(KytheData)
-
namespace {
KythePosition MakeKythePosition(const SourcePosition& pos) {
diff --git a/deps/v8/src/torque/kythe-data.h b/deps/v8/src/torque/kythe-data.h
index c335d484ed..e8738ead5f 100644
--- a/deps/v8/src/torque/kythe-data.h
+++ b/deps/v8/src/torque/kythe-data.h
@@ -5,8 +5,8 @@
#ifndef V8_TORQUE_KYTHE_DATA_H_
#define V8_TORQUE_KYTHE_DATA_H_
+#include "src/base/contextual.h"
#include "src/torque/ast.h"
-#include "src/torque/contextual.h"
#include "src/torque/global-context.h"
#include "src/torque/implementation-visitor.h"
@@ -46,7 +46,7 @@ class KytheConsumer {
};
inline KytheConsumer::~KytheConsumer() = default;
-class KytheData : public ContextualClass<KytheData> {
+class KytheData : public base::ContextualClass<KytheData> {
public:
KytheData() = default;
diff --git a/deps/v8/src/torque/ls/globals.h b/deps/v8/src/torque/ls/globals.h
index df6589c146..124bd18759 100644
--- a/deps/v8/src/torque/ls/globals.h
+++ b/deps/v8/src/torque/ls/globals.h
@@ -6,7 +6,8 @@
#define V8_TORQUE_LS_GLOBALS_H_
#include <fstream>
-#include "src/torque/contextual.h"
+
+#include "src/base/contextual.h"
namespace v8 {
namespace internal {
@@ -16,7 +17,7 @@ namespace torque {
// used as the communication channel. For debugging purposes a simple
// Log class is added, that allows writing diagnostics to a file configurable
// via command line flag.
-class Logger : public ContextualClass<Logger> {
+class Logger : public base::ContextualClass<Logger> {
public:
Logger() : enabled_(false) {}
~Logger() {
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
index 66995c0c89..c1f34484b1 100644
--- a/deps/v8/src/torque/ls/message-handler.cc
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -13,14 +13,12 @@
#include "src/torque/source-positions.h"
#include "src/torque/torque-compiler.h"
+EXPORT_CONTEXTUAL_VARIABLE(v8::internal::torque::DiagnosticsFiles)
+
namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(Logger)
-DEFINE_CONTEXTUAL_VARIABLE(TorqueFileList)
-DEFINE_CONTEXTUAL_VARIABLE(DiagnosticsFiles)
-
namespace ls {
static const char kContentLength[] = "Content-Length: ";
diff --git a/deps/v8/src/torque/server-data.cc b/deps/v8/src/torque/server-data.cc
index 2911a2b4cd..853beb05f5 100644
--- a/deps/v8/src/torque/server-data.cc
+++ b/deps/v8/src/torque/server-data.cc
@@ -4,15 +4,16 @@
#include "src/torque/server-data.h"
+#include "src/base/macros.h"
#include "src/torque/declarable.h"
#include "src/torque/implementation-visitor.h"
+EXPORT_CONTEXTUAL_VARIABLE(v8::internal::torque::LanguageServerData)
+
namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(LanguageServerData)
-
void LanguageServerData::AddDefinition(SourcePosition token,
SourcePosition definition) {
Get().definitions_map_[token.source].emplace_back(token, definition);
diff --git a/deps/v8/src/torque/server-data.h b/deps/v8/src/torque/server-data.h
index b80d1b67f4..89d4308d41 100644
--- a/deps/v8/src/torque/server-data.h
+++ b/deps/v8/src/torque/server-data.h
@@ -36,7 +36,7 @@ using SymbolsMap = std::map<SourceId, Symbols>;
// This contextual class holds all the necessary data to answer incoming
// LSP requests. It is reset for each compilation step and all information
// is calculated eagerly during compilation.
-class LanguageServerData : public ContextualClass<LanguageServerData> {
+class LanguageServerData : public base::ContextualClass<LanguageServerData> {
public:
LanguageServerData() = default;
diff --git a/deps/v8/src/torque/source-positions.cc b/deps/v8/src/torque/source-positions.cc
index 94f2579749..4c94093e52 100644
--- a/deps/v8/src/torque/source-positions.cc
+++ b/deps/v8/src/torque/source-positions.cc
@@ -7,14 +7,13 @@
#include <fstream>
#include "src/torque/utils.h"
+EXPORT_CONTEXTUAL_VARIABLE(v8::internal::torque::CurrentSourceFile)
+EXPORT_CONTEXTUAL_VARIABLE(v8::internal::torque::SourceFileMap)
+
namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(CurrentSourceFile)
-DEFINE_CONTEXTUAL_VARIABLE(CurrentSourcePosition)
-DEFINE_CONTEXTUAL_VARIABLE(SourceFileMap)
-
// static
const std::string& SourceFileMap::PathFromV8Root(SourceId file) {
CHECK(file.IsValid());
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index 32f60e06e0..6799cf3bff 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -7,7 +7,7 @@
#include <iostream>
-#include "src/torque/contextual.h"
+#include "src/base/contextual.h"
namespace v8 {
namespace internal {
@@ -86,7 +86,8 @@ struct SourcePosition {
DECLARE_CONTEXTUAL_VARIABLE(CurrentSourceFile, SourceId);
DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition);
-class V8_EXPORT_PRIVATE SourceFileMap : public ContextualClass<SourceFileMap> {
+class V8_EXPORT_PRIVATE SourceFileMap
+ : public base::ContextualClass<SourceFileMap> {
public:
explicit SourceFileMap(std::string v8_root) : v8_root_(std::move(v8_root)) {}
static const std::string& PathFromV8Root(SourceId file);
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index 816e42f1da..42f54d65d0 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -5,8 +5,8 @@
#ifndef V8_TORQUE_TORQUE_COMPILER_H_
#define V8_TORQUE_TORQUE_COMPILER_H_
+#include "src/base/contextual.h"
#include "src/torque/ast.h"
-#include "src/torque/contextual.h"
#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/source-positions.h"
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 596cc0740d..2faee24880 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -21,8 +21,6 @@ namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(CurrentAst)
-
using TypeList = std::vector<TypeExpression*>;
struct ExpressionWithSource {
@@ -42,12 +40,18 @@ struct EnumEntry {
base::Optional<TypeExpression*> type;
};
-class BuildFlags : public ContextualClass<BuildFlags> {
+class BuildFlags : public base::ContextualClass<BuildFlags> {
public:
BuildFlags() {
build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID;
+ build_flags_["V8_SFI_NEEDS_PADDING"] = V8_SFI_NEEDS_PADDING;
build_flags_["V8_EXTERNAL_CODE_SPACE"] = V8_EXTERNAL_CODE_SPACE_BOOL;
build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES;
+#ifdef V8_INTL_SUPPORT
+ build_flags_["V8_INTL_SUPPORT"] = true;
+#else
+ build_flags_["V8_INTL_SUPPORT"] = false;
+#endif
build_flags_["V8_ENABLE_SWISS_NAME_DICTIONARY"] =
V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL;
#ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS
@@ -82,7 +86,6 @@ class BuildFlags : public ContextualClass<BuildFlags> {
private:
std::unordered_map<std::string, bool> build_flags_;
};
-DEFINE_CONTEXTUAL_VARIABLE(BuildFlags)
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<std::string>::id =
@@ -420,12 +423,13 @@ base::Optional<ParseResult> MakeMethodCall(ParseResultIterator* child_results) {
base::Optional<ParseResult> MakeNewExpression(
ParseResultIterator* child_results) {
bool pretenured = child_results->NextAs<bool>();
+ bool clear_padding = child_results->NextAs<bool>();
auto type = child_results->NextAs<TypeExpression*>();
auto initializers = child_results->NextAs<std::vector<NameAndExpression>>();
- Expression* result =
- MakeNode<NewExpression>(type, std::move(initializers), pretenured);
+ Expression* result = MakeNode<NewExpression>(type, std::move(initializers),
+ pretenured, clear_padding);
return ParseResult{result};
}
@@ -546,7 +550,9 @@ base::Optional<ParseResult> MakeDebugStatement(
ParseResultIterator* child_results) {
auto kind = child_results->NextAs<Identifier*>()->value;
DCHECK(kind == "unreachable" || kind == "debug");
- Statement* result = MakeNode<DebugStatement>(kind, kind == "unreachable");
+ Statement* result = MakeNode<DebugStatement>(
+ kind == "unreachable" ? DebugStatement::Kind::kUnreachable
+ : DebugStatement::Kind::kDebug);
return ParseResult{result};
}
@@ -662,6 +668,8 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
ParseResultIterator* child_results) {
+ const bool has_custom_interface_descriptor = HasAnnotation(
+ child_results, ANNOTATION_CUSTOM_INTERFACE_DESCRIPTOR, "builtin");
auto transitioning = child_results->NextAs<bool>();
auto javascript_linkage = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
@@ -676,7 +684,8 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
auto return_type = child_results->NextAs<TypeExpression*>();
auto body = child_results->NextAs<base::Optional<Statement*>>();
CallableDeclaration* declaration = MakeNode<TorqueBuiltinDeclaration>(
- transitioning, javascript_linkage, name, args, return_type, body);
+ transitioning, javascript_linkage, name, args, return_type,
+ has_custom_interface_descriptor, body);
Declaration* result = declaration;
if (generic_parameters.empty()) {
if (!body) ReportError("A non-generic declaration needs a body.");
@@ -2560,6 +2569,7 @@ struct TorqueGrammar : Grammar {
Symbol newExpression = {
Rule({Token("new"),
CheckIf(Sequence({Token("("), Token("Pretenured"), Token(")")})),
+ CheckIf(Sequence({Token("("), Token("ClearPadding"), Token(")")})),
&simpleType, &initializerList},
MakeNewExpression)};
@@ -2831,8 +2841,8 @@ struct TorqueGrammar : Grammar {
&parameterListNoVararg, &returnType, optionalLabelList,
&optionalBody},
AsSingletonVector<Declaration*, MakeTorqueMacroDeclaration>()),
- Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
- Token("builtin"), &name,
+ Rule({annotations, CheckIf(Token("transitioning")),
+ CheckIf(Token("javascript")), Token("builtin"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListAllowVararg, &returnType, &optionalBody},
AsSingletonVector<Declaration*, MakeTorqueBuiltinDeclaration>()),
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
index acb998a7aa..7cd9dd2a52 100644
--- a/deps/v8/src/torque/type-oracle.cc
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -11,8 +11,6 @@ namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(TypeOracle)
-
// static
const std::vector<std::unique_ptr<AggregateType>>&
TypeOracle::GetAggregateTypes() {
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 7bcbd6a77b..0f9fda629a 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "src/torque/contextual.h"
+#include "src/base/contextual.h"
#include "src/torque/declarable.h"
#include "src/torque/declarations.h"
#include "src/torque/types.h"
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace torque {
-class TypeOracle : public ContextualClass<TypeOracle> {
+class TypeOracle : public base::ContextualClass<TypeOracle> {
public:
static const AbstractType* GetAbstractType(
const Type* parent, std::string name, AbstractTypeFlags flags,
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index e8a8e63ba6..e93fbab8f4 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -562,7 +562,9 @@ std::vector<Field> ClassType::ComputeHeaderFields() const {
std::vector<Field> result;
for (Field& field : ComputeAllFields()) {
if (field.index) break;
- DCHECK(*field.offset < header_size());
+ // The header is allowed to end with an optional padding field of size 0.
+ DCHECK(std::get<0>(field.GetFieldSizeInformation()) == 0 ||
+ *field.offset < header_size());
result.push_back(std::move(field));
}
return result;
@@ -572,7 +574,9 @@ std::vector<Field> ClassType::ComputeArrayFields() const {
std::vector<Field> result;
for (Field& field : ComputeAllFields()) {
if (!field.index) {
- DCHECK(*field.offset < header_size());
+ // The header is allowed to end with an optional padding field of size 0.
+ DCHECK(std::get<0>(field.GetFieldSizeInformation()) == 0 ||
+ *field.offset < header_size());
continue;
}
result.push_back(std::move(field));
@@ -605,6 +609,8 @@ void ComputeSlotKindsHelper(std::vector<ObjectSlotKind>* slots,
size_t offset = start_offset;
for (const Field& field : fields) {
size_t field_size = std::get<0>(field.GetFieldSizeInformation());
+ // Support optional padding fields.
+ if (field_size == 0) continue;
size_t slot_index = offset / TargetArchitecture::TaggedSize();
// Rounding-up division to find the number of slots occupied by all the
// fields up to and including the current one.
@@ -1040,7 +1046,8 @@ bool Signature::HasSameTypesAs(const Signature& other,
namespace {
bool FirstTypeIsContext(const std::vector<const Type*> parameter_types) {
return !parameter_types.empty() &&
- parameter_types[0] == TypeOracle::GetContextType();
+ (parameter_types[0] == TypeOracle::GetContextType() ||
+ parameter_types[0] == TypeOracle::GetNoContextType());
}
} // namespace
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 701e4e3a16..f298fd627d 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -127,6 +127,8 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
return IsAbstractName(CONSTEXPR_BOOL_TYPE_STRING);
}
bool IsVoidOrNever() const { return IsVoid() || IsNever(); }
+ bool IsFloat32() const { return IsAbstractName(FLOAT32_TYPE_STRING); }
+ bool IsFloat64() const { return IsAbstractName(FLOAT64_TYPE_STRING); }
std::string GetGeneratedTypeName() const;
std::string GetGeneratedTNodeTypeName() const;
virtual bool IsConstexpr() const {
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 3795f907b1..dff55ad784 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -14,12 +14,12 @@
#include "src/torque/declarable.h"
#include "src/torque/utils.h"
+EXPORT_CONTEXTUAL_VARIABLE(v8::internal::torque::TorqueMessages)
+
namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(TorqueMessages)
-
std::string StringLiteralUnquote(const std::string& s) {
DCHECK(('"' == s.front() && '"' == s.back()) ||
('\'' == s.front() && '\'' == s.back()));
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index e7d34d0c46..63eb764c71 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -12,9 +12,9 @@
#include <string>
#include <unordered_set>
+#include "src/base/contextual.h"
#include "src/base/functional.h"
#include "src/base/optional.h"
-#include "src/torque/contextual.h"
#include "src/torque/source-positions.h"
namespace v8 {
diff --git a/deps/v8/src/tracing/DEPS b/deps/v8/src/tracing/DEPS
index fd3531bfcc..a189e1ee78 100644
--- a/deps/v8/src/tracing/DEPS
+++ b/deps/v8/src/tracing/DEPS
@@ -1,4 +1,5 @@
include_rules = [
- "+perfetto/tracing.h",
+ "+perfetto/tracing/track_event.h",
+ "+perfetto/tracing/track_event_legacy.h",
"+protos/perfetto"
]
diff --git a/deps/v8/src/tracing/trace-categories.cc b/deps/v8/src/tracing/trace-categories.cc
index 98c41e41e8..bbfca3d020 100644
--- a/deps/v8/src/tracing/trace-categories.cc
+++ b/deps/v8/src/tracing/trace-categories.cc
@@ -5,5 +5,6 @@
#include "src/tracing/trace-categories.h"
#if defined(V8_USE_PERFETTO)
-PERFETTO_TRACK_EVENT_STATIC_STORAGE();
+PERFETTO_TRACK_EVENT_STATIC_STORAGE_IN_NAMESPACE_WITH_ATTRS(v8,
+ V8_EXPORT_PRIVATE);
#endif
diff --git a/deps/v8/src/tracing/trace-categories.h b/deps/v8/src/tracing/trace-categories.h
index 0150963a88..7dc9933f09 100644
--- a/deps/v8/src/tracing/trace-categories.h
+++ b/deps/v8/src/tracing/trace-categories.h
@@ -9,24 +9,20 @@
#if defined(V8_USE_PERFETTO)
-// Exports tracks events into the v8 namespace to avoid conflicts with embedders
-// like Chrome.
-#define PERFETTO_TRACK_EVENT_NAMESPACE v8
-
-// Export trace categories and the track event data source in components builds.
-#define PERFETTO_COMPONENT_EXPORT V8_EXPORT_PRIVATE
-
// For now most of v8 uses legacy trace events.
#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 1
-#include "perfetto/tracing.h"
+#include "perfetto/tracing/track_event.h"
+#include "perfetto/tracing/track_event_legacy.h"
// Trace category prefixes used in tests.
PERFETTO_DEFINE_TEST_CATEGORY_PREFIXES("v8-cat", "cat", "v8.Test2");
// List of categories used by built-in V8 trace events.
// clang-format off
-PERFETTO_DEFINE_CATEGORIES(
+PERFETTO_DEFINE_CATEGORIES_IN_NAMESPACE_WITH_ATTRS(
+ v8,
+ V8_EXPORT_PRIVATE,
perfetto::Category("cppgc"),
perfetto::Category("v8"),
perfetto::Category("v8.console"),
@@ -59,6 +55,8 @@ PERFETTO_DEFINE_CATEGORIES(
TRACE_DISABLED_BY_DEFAULT("v8.stack_trace")));
// clang-format on
+PERFETTO_USE_CATEGORIES_FROM_NAMESPACE(v8);
+
#endif // defined(V8_USE_PERFETTO)
#endif // V8_TRACING_TRACE_CATEGORIES_H_
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index ed53d55a88..fbaa53e0f0 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -620,7 +620,7 @@ class CallStatsScopedTracer {
Data* p_data_;
Data data_;
};
-#endif // defined(V8_RUNTIME_CALL_STATS)
+#endif // V8_RUNTIME_CALL_STATS
} // namespace tracing
} // namespace internal
@@ -660,8 +660,9 @@ class CallStatsScopedTracer {
} PERFETTO_UID(scoped_event) { \
{ isolate, 0 } \
}
-
-#endif // defined(V8_RUNTIME_CALL_STATS)
+#else // V8_RUNTIME_CALL_STATS
+#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name)
+#endif // V8_RUNTIME_CALL_STATS
#endif // defined(V8_USE_PERFETTO)
#endif // V8_TRACING_TRACE_EVENT_H_
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index 3debacb548..feee3807c6 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -16,17 +16,32 @@ TracingCategoryObserver* TracingCategoryObserver::instance_ = nullptr;
void TracingCategoryObserver::SetUp() {
TracingCategoryObserver::instance_ = new TracingCategoryObserver();
+#if defined(V8_USE_PERFETTO)
+ TrackEvent::AddSessionObserver(instance_);
+ // Fire the observer if tracing is already in progress.
+ if (TrackEvent::IsEnabled()) instance_->OnStart({});
+#else
i::V8::GetCurrentPlatform()->GetTracingController()->AddTraceStateObserver(
TracingCategoryObserver::instance_);
+#endif
}
void TracingCategoryObserver::TearDown() {
+#if defined(V8_USE_PERFETTO)
+ TrackEvent::RemoveSessionObserver(TracingCategoryObserver::instance_);
+#else
i::V8::GetCurrentPlatform()->GetTracingController()->RemoveTraceStateObserver(
TracingCategoryObserver::instance_);
+#endif
delete TracingCategoryObserver::instance_;
}
+#if defined(V8_USE_PERFETTO)
+void TracingCategoryObserver::OnStart(
+ const perfetto::DataSourceBase::StartArgs&) {
+#else
void TracingCategoryObserver::OnTraceEnabled() {
+#endif
bool enabled = false;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"), &enabled);
@@ -66,7 +81,12 @@ void TracingCategoryObserver::OnTraceEnabled() {
}
}
+#if defined(V8_USE_PERFETTO)
+void TracingCategoryObserver::OnStop(
+ const perfetto::DataSourceBase::StopArgs&) {
+#else
void TracingCategoryObserver::OnTraceDisabled() {
+#endif
i::TracingFlags::runtime_stats.fetch_and(
~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING), std::memory_order_relaxed);
diff --git a/deps/v8/src/tracing/tracing-category-observer.h b/deps/v8/src/tracing/tracing-category-observer.h
index 858bf0bdf8..bf9687b5c2 100644
--- a/deps/v8/src/tracing/tracing-category-observer.h
+++ b/deps/v8/src/tracing/tracing-category-observer.h
@@ -6,11 +6,17 @@
#define V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
#include "include/v8-platform.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace tracing {
-class TracingCategoryObserver : public TracingController::TraceStateObserver {
+class TracingCategoryObserver
+#if defined(V8_USE_PERFETTO)
+ : public perfetto::TrackEventSessionObserver {
+#else
+ : public TracingController::TraceStateObserver {
+#endif
public:
enum Mode {
ENABLED_BY_NATIVE = 1 << 0,
@@ -21,9 +27,15 @@ class TracingCategoryObserver : public TracingController::TraceStateObserver {
static void SetUp();
static void TearDown();
+#if defined(V8_USE_PERFETTO)
+ // perfetto::TrackEventSessionObserver
+ void OnStart(const perfetto::DataSourceBase::StartArgs&) override;
+ void OnStop(const perfetto::DataSourceBase::StopArgs&) override;
+#else
// v8::TracingController::TraceStateObserver
void OnTraceEnabled() final;
void OnTraceDisabled() final;
+#endif
private:
static TracingCategoryObserver* instance_;
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index 40e2deff77..93115d0edd 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -1,8 +1,4 @@
ahaas@chromium.org
thibaudm@chromium.org
-
-# Changes to this directory should also be reviewed by:
-#
-# ahaas@chromium.org
-# mseaborn@chromium.org
-# mark@chromium.org
+mark@chromium.org
+mseaborn@chromium.org
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.cc b/deps/v8/src/trap-handler/handler-inside-posix.cc
index 17af3d75dc..39d6e0d3ae 100644
--- a/deps/v8/src/trap-handler/handler-inside-posix.cc
+++ b/deps/v8/src/trap-handler/handler-inside-posix.cc
@@ -91,7 +91,12 @@ class UnmaskOobSignalScope {
#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
// This is the address where we continue on a failed "ProbeMemory". It's defined
// in "handler-outside-simulator.cc".
-extern "C" char v8_probe_memory_continuation[];
+extern char probe_memory_continuation[]
+#if V8_OS_DARWIN
+ asm("_v8_simulator_probe_memory_continuation");
+#else
+ asm("v8_simulator_probe_memory_continuation");
+#endif
#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
@@ -149,7 +154,7 @@ bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
auto* return_reg = CONTEXT_REG(rax, RAX);
*return_reg = landing_pad;
// Continue at the memory probing continuation.
- *context_ip = reinterpret_cast<uintptr_t>(&v8_probe_memory_continuation);
+ *context_ip = reinterpret_cast<uintptr_t>(&probe_memory_continuation);
#else
if (!TryFindLandingPad(fault_addr, &landing_pad)) return false;
diff --git a/deps/v8/src/trap-handler/handler-inside-win.cc b/deps/v8/src/trap-handler/handler-inside-win.cc
index 3d7a2c416a..4956437aa6 100644
--- a/deps/v8/src/trap-handler/handler-inside-win.cc
+++ b/deps/v8/src/trap-handler/handler-inside-win.cc
@@ -58,7 +58,8 @@ struct TEB {
#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
// This is the address where we continue on a failed "ProbeMemory". It's defined
// in "handler-outside-simulator.cc".
-extern "C" char v8_probe_memory_continuation[];
+extern char probe_memory_continuation[] asm(
+ "v8_simulator_probe_memory_continuation");
#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
@@ -112,7 +113,7 @@ bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
exception->ContextRecord->Rax = landing_pad;
// Continue at the memory probing continuation.
exception->ContextRecord->Rip =
- reinterpret_cast<uintptr_t>(&v8_probe_memory_continuation);
+ reinterpret_cast<uintptr_t>(&probe_memory_continuation);
#else
if (!TryFindLandingPad(fault_addr, &landing_pad)) return false;
diff --git a/deps/v8/src/trap-handler/handler-outside-simulator.cc b/deps/v8/src/trap-handler/handler-outside-simulator.cc
index 5e58719e7f..955b3c0b7c 100644
--- a/deps/v8/src/trap-handler/handler-outside-simulator.cc
+++ b/deps/v8/src/trap-handler/handler-outside-simulator.cc
@@ -14,29 +14,29 @@
#define SYMBOL(name) #name
#endif // !V8_OS_DARWIN
-// Define the ProbeMemory function declared in trap-handler-simulators.h.
-asm(
- ".globl " SYMBOL(ProbeMemory) " \n"
- SYMBOL(ProbeMemory) ": \n"
+// Define the v8::internal::trap_handler::ProbeMemory function declared in
+// trap-handler-simulators.h.
+asm(".globl " SYMBOL(v8_internal_simulator_ProbeMemory) " \n"
+ SYMBOL(v8_internal_simulator_ProbeMemory) ": \n"
// First parameter (address) passed in %rdi on Linux/Mac, and %rcx on Windows.
// The second parameter (pc) is unused here. It is read by the trap handler
// instead.
#if V8_OS_WIN
- " movb (%rcx), %al \n"
+ " movb (%rcx), %al \n"
#else
- " movb (%rdi), %al \n"
+ " movb (%rdi), %al \n"
#endif // V8_OS_WIN
// Return 0 on success.
- " xorl %eax, %eax \n"
+ " xorl %eax, %eax \n"
// Place an additional "ret" here instead of falling through to the one
// below, because (some) toolchain(s) on Mac set ".subsections_via_symbols",
// which can cause the "ret" below to be placed elsewhere. An alternative
// prevention would be to add ".alt_entry" (see
// https://reviews.llvm.org/D79926), but just adding a "ret" is simpler.
- " ret \n"
- ".globl " SYMBOL(v8_probe_memory_continuation) "\n"
- SYMBOL(v8_probe_memory_continuation) ": \n"
+ " ret \n"
+ ".globl " SYMBOL(v8_simulator_probe_memory_continuation) " \n"
+ SYMBOL(v8_simulator_probe_memory_continuation) ": \n"
// If the trap handler continues here, it wrote the landing pad in %rax.
- " ret \n");
+ " ret \n");
#endif
diff --git a/deps/v8/src/trap-handler/trap-handler-simulator.h b/deps/v8/src/trap-handler/trap-handler-simulator.h
index bfceb49697..0ab80d202e 100644
--- a/deps/v8/src/trap-handler/trap-handler-simulator.h
+++ b/deps/v8/src/trap-handler/trap-handler-simulator.h
@@ -7,6 +7,8 @@
#include <cstdint>
+#include "include/v8config.h"
+
// This header defines the ProbeMemory function to be used by simulators to
// trigger a signal at a defined location, before doing an actual memory access.
@@ -16,9 +18,7 @@
#error "Do only include this file on simulator builds on x64."
#endif
-namespace v8 {
-namespace internal {
-namespace trap_handler {
+namespace v8::internal::trap_handler {
// Probe a memory address by doing a 1-byte read from the given address. If the
// address is not readable, this will cause a trap as usual, but the trap
@@ -28,10 +28,16 @@ namespace trap_handler {
// is not registered as a protected instruction, the signal will be propagated
// as usual.
// If the read at {address} succeeds, this function returns {0} instead.
-extern "C" uintptr_t ProbeMemory(uintptr_t address, uintptr_t pc);
+uintptr_t ProbeMemory(uintptr_t address, uintptr_t pc)
+// Specify an explicit symbol name (defined in
+// handler-outside-simulator.cc). Just {extern "C"} would produce
+// "ProbeMemory", but we want something more expressive on stack traces.
+#if V8_OS_DARWIN
+ asm("_v8_internal_simulator_ProbeMemory");
+#else
+ asm("v8_internal_simulator_ProbeMemory");
+#endif
-} // namespace trap_handler
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::trap_handler
#endif // V8_TRAP_HANDLER_TRAP_HANDLER_SIMULATOR_H_
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index a585bfed68..e3ba9ad3a9 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -164,9 +164,9 @@ void* GetRandomMmapAddr() {
void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
size_t alignment, PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
- DCHECK_EQ(hint, AlignedAddress(hint, alignment));
+ DCHECK(IsAligned(reinterpret_cast<Address>(hint), alignment));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
- if (v8_flags.randomize_all_allocations) {
+ if (!hint && v8_flags.randomize_all_allocations) {
hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
}
void* result = nullptr;
@@ -240,7 +240,6 @@ bool VirtualMemory::SetPermissions(Address address, size_t size,
CHECK(InVM(address, size));
bool result = page_allocator_->SetPermissions(
reinterpret_cast<void*>(address), size, access);
- DCHECK(result);
return result;
}
@@ -249,7 +248,6 @@ bool VirtualMemory::RecommitPages(Address address, size_t size,
CHECK(InVM(address, size));
bool result = page_allocator_->RecommitPages(reinterpret_cast<void*>(address),
size, access);
- DCHECK(result);
return result;
}
@@ -312,21 +310,15 @@ VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT {
VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
V8_NOEXCEPT {
+ base_ = other.base_;
+ size_ = other.size_;
page_allocator_ = std::move(other.page_allocator_);
reservation_ = std::move(other.reservation_);
+ other.base_ = kNullAddress;
+ other.size_ = 0;
return *this;
}
-namespace {
-inline Address VirtualMemoryCageStart(
- Address reservation_start,
- const VirtualMemoryCage::ReservationParams& params) {
- return RoundUp(reservation_start + params.base_bias_size,
- params.base_alignment) -
- params.base_bias_size;
-}
-} // namespace
-
bool VirtualMemoryCage::InitReservation(
const ReservationParams& params, base::AddressRegion existing_reservation) {
DCHECK(!reservation_.IsReserved());
@@ -334,9 +326,7 @@ bool VirtualMemoryCage::InitReservation(
const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
CHECK(IsAligned(params.reservation_size, allocate_page_size));
CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
- (IsAligned(params.base_alignment, allocate_page_size) &&
- IsAligned(params.base_bias_size, allocate_page_size)));
- CHECK_LE(params.base_bias_size, params.reservation_size);
+ IsAligned(params.base_alignment, allocate_page_size));
if (!existing_reservation.is_empty()) {
CHECK_EQ(existing_reservation.size(), params.reservation_size);
@@ -345,101 +335,28 @@ bool VirtualMemoryCage::InitReservation(
reservation_ =
VirtualMemory(params.page_allocator, existing_reservation.begin(),
existing_reservation.size());
- base_ = reservation_.address() + params.base_bias_size;
- } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment ||
- params.base_bias_size == 0) {
- // When the base doesn't need to be aligned or when the requested
- // base_bias_size is zero, the virtual memory reservation fails only
- // due to OOM.
- Address hint =
- RoundDown(params.requested_start_hint,
- RoundUp(params.base_alignment, allocate_page_size));
+ base_ = reservation_.address();
+ } else {
+ Address hint = params.requested_start_hint;
+ // Require the hint to be properly aligned because here it's not clear
+ // anymore whether it should be rounded up or down.
+ CHECK(IsAligned(hint, params.base_alignment));
VirtualMemory reservation(params.page_allocator, params.reservation_size,
reinterpret_cast<void*>(hint),
params.base_alignment, params.jit);
+ // The virtual memory reservation fails only due to OOM.
if (!reservation.IsReserved()) return false;
reservation_ = std::move(reservation);
- base_ = reservation_.address() + params.base_bias_size;
+ base_ = reservation_.address();
CHECK_EQ(reservation_.size(), params.reservation_size);
- } else {
- // Otherwise, we need to try harder by first overreserving in hopes of
- // finding a correctly aligned address within the larger reservation.
- size_t bias_size = RoundUp(params.base_bias_size, allocate_page_size);
- Address hint =
- RoundDown(params.requested_start_hint + bias_size,
- RoundUp(params.base_alignment, allocate_page_size)) -
- bias_size;
- // Alignments requring overreserving more than twice the requested size
- // are not supported (they are too expensive and shouldn't be necessary
- // in the first place).
- DCHECK_LE(params.base_alignment, params.reservation_size);
- const int kMaxAttempts = 4;
- for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
- // Reserve a region of twice the size so that there is an aligned address
- // within it that's usable as the cage base.
- VirtualMemory padded_reservation(
- params.page_allocator, params.reservation_size * 2,
- reinterpret_cast<void*>(hint), 1, params.jit);
- if (!padded_reservation.IsReserved()) return false;
-
- // Find properly aligned sub-region inside the reservation.
- Address address =
- VirtualMemoryCageStart(padded_reservation.address(), params);
- CHECK(padded_reservation.InVM(address, params.reservation_size));
-
-#if defined(V8_OS_FUCHSIA)
- // Fuchsia does not respect given hints so as a workaround we will use
- // overreserved address space region instead of trying to re-reserve
- // a subregion.
- bool overreserve = true;
-#else
- // For the last attempt use the overreserved region to avoid an OOM crash.
- // This case can happen if there are many isolates being created in
- // parallel that race for reserving the regions.
- bool overreserve = (attempt == kMaxAttempts - 1);
-#endif
-
- if (overreserve) {
- if (padded_reservation.InVM(address, params.reservation_size)) {
- reservation_ = std::move(padded_reservation);
- base_ = address + params.base_bias_size;
- break;
- }
- } else {
- // Now free the padded reservation and immediately try to reserve an
- // exact region at aligned address. We have to do this dancing because
- // the reservation address requirement is more complex than just a
- // certain alignment and not all operating systems support freeing parts
- // of reserved address space regions.
- padded_reservation.Free();
-
- VirtualMemory reservation(
- params.page_allocator, params.reservation_size,
- reinterpret_cast<void*>(address), 1, params.jit);
- if (!reservation.IsReserved()) return false;
-
- // The reservation could still be somewhere else but we can accept it
- // if it has the required alignment.
- Address start_address =
- VirtualMemoryCageStart(reservation.address(), params);
- if (reservation.address() == start_address) {
- reservation_ = std::move(reservation);
- base_ = start_address + params.base_bias_size;
- CHECK_EQ(reservation_.size(), params.reservation_size);
- break;
- }
- }
- }
}
CHECK_NE(base_, kNullAddress);
CHECK(IsAligned(base_, params.base_alignment));
const Address allocatable_base = RoundUp(base_, params.page_size);
- const size_t allocatable_size =
- RoundDown(params.reservation_size - (allocatable_base - base_) -
- params.base_bias_size,
- params.page_size);
+ const size_t allocatable_size = RoundDown(
+ params.reservation_size - (allocatable_base - base_), params.page_size);
size_ = allocatable_base + allocatable_size - base_;
const base::PageFreeingMode page_freeing_mode =
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 87e30b43bd..662e4b0fdb 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -259,15 +259,15 @@ class VirtualMemory final {
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
- V8_EXPORT_PRIVATE bool SetPermissions(Address address, size_t size,
- PageAllocator::Permission access);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool SetPermissions(
+ Address address, size_t size, PageAllocator::Permission access);
// Recommits discarded pages in the given range with given permissions.
// Discarded pages must be recommitted with their original permissions
// before they are used again. |address| and |size| must be multiples of
// CommitPageSize(). Returns true on success, otherwise false.
- V8_EXPORT_PRIVATE bool RecommitPages(Address address, size_t size,
- PageAllocator::Permission access);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool RecommitPages(
+ Address address, size_t size, PageAllocator::Permission access);
// Frees memory in the given [address, address + size) range. address and size
// should be operating system page-aligned. The next write to this
@@ -303,29 +303,28 @@ class VirtualMemory final {
// ranges (on platforms that require code ranges) and are configurable via
// ReservationParams.
//
-// +------------+-----------+------------ ~~~ --+- ~~~ -+
-// | ... | ... | ... | ... |
-// +------------+-----------+------------ ~~~ --+- ~~~ -+
-// ^ ^ ^
-// start cage base allocatable base
+// +-----------+------------ ~~~ --+- ~~~ -+
+// | ... | ... | ... |
+// +-----------+------------ ~~~ --+- ~~~ -+
+// ^ ^
+// cage base allocatable base
//
-// <------------> <------------------->
-// base bias size allocatable size
-// <------------------------------->
-// cage size
-// <---------------------------------------------------->
-// reservation size
+// <------------------->
+// allocatable size
+// <------------------------------->
+// cage size
+// <--------------------------------------->
+// reservation size
//
// - The reservation is made using ReservationParams::page_allocator.
-// - start is the start of the virtual memory reservation.
-// - cage base is the base address of the cage.
+// - cage base is the start of the virtual memory reservation and the base
+// address of the cage.
// - allocatable base is the cage base rounded up to the nearest
// ReservationParams::page_size, and is the start of the allocatable area for
// the BoundedPageAllocator.
// - cage size is the size of the area from cage base to the end of the
// allocatable area.
//
-// - The base bias is configured by ReservationParams::base_bias_size.
// - The reservation size is configured by ReservationParams::reservation_size
// but it might be actually bigger if we end up over-reserving the virtual
// address space.
@@ -336,15 +335,16 @@ class VirtualMemory final {
// - The page size of the BoundedPageAllocator is configured by
// ReservationParams::page_size.
// - A hint for the value of start can be passed by
-// ReservationParams::requested_start_hint.
+// ReservationParams::requested_start_hint and it must be aligned to
+// ReservationParams::base_alignment.
//
// The configuration is subject to the following alignment requirements.
// Below, AllocatePageSize is short for
// ReservationParams::page_allocator->AllocatePageSize().
//
// - The reservation size must be AllocatePageSize-aligned.
-// - If the base alignment is not kAnyBaseAlignment, both the base alignment
-// and the base bias size must be AllocatePageSize-aligned.
+// - If the base alignment is not kAnyBaseAlignment then the base alignment
+// must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
class VirtualMemoryCage {
@@ -361,6 +361,10 @@ class VirtualMemoryCage {
Address base() const { return base_; }
size_t size() const { return size_; }
+ base::AddressRegion region() const {
+ return base::AddressRegion{base_, size_};
+ }
+
base::BoundedPageAllocator* page_allocator() const {
return page_allocator_.get();
}
@@ -380,7 +384,6 @@ class VirtualMemoryCage {
// See diagram above.
size_t reservation_size;
size_t base_alignment;
- size_t base_bias_size;
size_t page_size;
Address requested_start_hint;
JitPermission jit;
diff --git a/deps/v8/src/utils/ostreams.h b/deps/v8/src/utils/ostreams.h
index 0d0a0e9bdc..0b5bca95ea 100644
--- a/deps/v8/src/utils/ostreams.h
+++ b/deps/v8/src/utils/ostreams.h
@@ -162,7 +162,20 @@ template <typename T>
struct PrintIteratorRange {
T start;
T end;
+ const char* separator = ", ";
+ const char* startBracket = "[";
+ const char* endBracket = "]";
+
PrintIteratorRange(T start, T end) : start(start), end(end) {}
+ PrintIteratorRange& WithoutBrackets() {
+ startBracket = "";
+ endBracket = "";
+ return *this;
+ }
+ PrintIteratorRange& WithSeparator(const char* separator) {
+ this->separator = separator;
+ return *this;
+ }
};
// Print any collection which can be iterated via std::begin and std::end.
@@ -198,12 +211,12 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
template <typename T>
std::ostream& operator<<(std::ostream& os, const PrintIteratorRange<T>& range) {
- const char* comma = "";
- os << "[";
- for (T it = range.start; it != range.end; ++it, comma = ", ") {
- os << comma << *it;
+ const char* separator = "";
+ os << range.startBracket;
+ for (T it = range.start; it != range.end; ++it, separator = range.separator) {
+ os << separator << *it;
}
- os << "]";
+ os << range.endBracket;
return os;
}
diff --git a/deps/v8/src/wasm/DEPS b/deps/v8/src/wasm/DEPS
index 2d310c631c..d62661839b 100644
--- a/deps/v8/src/wasm/DEPS
+++ b/deps/v8/src/wasm/DEPS
@@ -13,4 +13,8 @@ specific_include_rules = {
"c-api\.h": [
"+third_party/wasm-api/wasm.hh",
],
+ "wasm-engine\.h": [
+ # The WasmEngine may cache common call descriptors.
+ "+src/compiler/wasm-call-descriptors.h",
+ ]
}
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 3f0ae37a40..c0feba5069 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -19,31 +19,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- return lt;
- case kSignedLessEqual:
- return le;
- case kSignedGreaterThan:
- return gt;
- case kSignedGreaterEqual:
- return ge;
- case kUnsignedLessThan:
- return lo;
- case kUnsignedLessEqual:
- return ls;
- case kUnsignedGreaterThan:
- return hi;
- case kUnsignedGreaterEqual:
- return hs;
- }
-}
-
// half
// slot Frame
// -----+--------------------+---------------------------
@@ -132,22 +107,22 @@ inline Register CalculateActualAddress(LiftoffAssembler* assm,
return actual_addr_reg;
}
-inline LiftoffCondition MakeUnsigned(LiftoffCondition cond) {
+inline Condition MakeUnsigned(Condition cond) {
switch (cond) {
- case kSignedLessThan:
+ case kLessThan:
return kUnsignedLessThan;
- case kSignedLessEqual:
- return kUnsignedLessEqual;
- case kSignedGreaterThan:
+ case kLessThanEqual:
+ return kUnsignedLessThanEqual;
+ case kGreaterThan:
return kUnsignedGreaterThan;
- case kSignedGreaterEqual:
- return kUnsignedGreaterEqual;
+ case kGreaterThanEqual:
+ return kUnsignedGreaterThanEqual;
case kEqual:
- case kUnequal:
+ case kNotEqual:
case kUnsignedLessThan:
- case kUnsignedLessEqual:
+ case kUnsignedLessThanEqual:
case kUnsignedGreaterThan:
- case kUnsignedGreaterEqual:
+ case kUnsignedGreaterThanEqual:
return cond;
default:
UNREACHABLE();
@@ -187,7 +162,7 @@ inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LeaveCC, al);
}
-template <void (TurboAssembler::*op)(Register, Register, Register, Register,
+template <void (MacroAssembler::*op)(Register, Register, Register, Register,
Register),
bool is_left_shift>
inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
@@ -209,7 +184,7 @@ inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
Register* later_src_reg = is_left_shift ? &src_low : &src_high;
if (*later_src_reg == clobbered_dst_reg) {
*later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
- assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg);
+ assm->MacroAssembler::Move(*later_src_reg, clobbered_dst_reg);
}
(assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped);
@@ -235,14 +210,14 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
MinOrMax min_or_max) {
DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8);
if (lhs == rhs) {
- assm->TurboAssembler::Move(dst, lhs);
+ assm->MacroAssembler::Move(dst, lhs);
return;
}
Label done, is_nan;
if (min_or_max == MinOrMax::kMin) {
- assm->TurboAssembler::FloatMin(dst, lhs, rhs, &is_nan);
+ assm->MacroAssembler::FloatMin(dst, lhs, rhs, &is_nan);
} else {
- assm->TurboAssembler::FloatMax(dst, lhs, rhs, &is_nan);
+ assm->MacroAssembler::FloatMax(dst, lhs, rhs, &is_nan);
}
assm->b(&done);
assm->bind(&is_nan);
@@ -508,13 +483,14 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
@@ -572,7 +548,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
- // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to
@@ -605,18 +581,16 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return kind == kS128 || is_reference(kind);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
+ MacroAssembler::Move(reg.gp(), Operand(value.to_i32()));
break;
case kI64: {
- DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
- TurboAssembler::Move(reg.low_gp(), Operand(low_word));
- TurboAssembler::Move(reg.high_gp(), Operand(high_word));
+ MacroAssembler::Move(reg.low_gp(), Operand(low_word));
+ MacroAssembler::Move(reg.high_gp(), Operand(high_word));
break;
}
case kF32:
@@ -805,12 +779,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
// The write barrier.
- Label write_barrier;
Label exit;
- CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
- &write_barrier);
- b(&exit);
- bind(&write_barrier);
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask,
+ kZero, &exit);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
@@ -1108,7 +1079,8 @@ inline void I64Store(LiftoffAssembler* lasm, LiftoffRegister dst,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
- LoadType type, LiftoffRegList /* pinned */) {
+ LoadType type, LiftoffRegList /* pinned */,
+ bool /* i64_offset */) {
if (type.value() != LoadType::kI64Load) {
Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
dmb(ISH);
@@ -1136,7 +1108,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, src, {},
liftoff::I64Store);
@@ -1151,7 +1124,8 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
liftoff::I64Binop<&Assembler::add, &Assembler::adc>);
@@ -1163,7 +1137,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>);
@@ -1175,7 +1150,8 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
liftoff::I64Binop<&Assembler::and_, &Assembler::and_>);
@@ -1187,7 +1163,8 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
liftoff::I64Binop<&Assembler::orr, &Assembler::orr>);
@@ -1199,7 +1176,8 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
liftoff::I64Binop<&Assembler::eor, &Assembler::eor>);
@@ -1212,7 +1190,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
liftoff::I64Store);
@@ -1292,7 +1271,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicI64CompareExchange(this, dst_addr, offset_reg, offset_imm,
expected, new_value, result);
@@ -1466,7 +1445,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
DCHECK(kind == kI32 || is_reference(kind));
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
@@ -1844,7 +1823,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount);
+ liftoff::I64Shiftop<&MacroAssembler::LslPair, true>(this, dst, src, amount);
}
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@@ -1859,7 +1838,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount);
+ liftoff::I64Shiftop<&MacroAssembler::AsrPair, false>(this, dst, src, amount);
}
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@@ -1874,7 +1853,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount);
+ liftoff::I64Shiftop<&MacroAssembler::LsrPair, false>(this, dst, src, amount);
}
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@@ -2101,7 +2080,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) {
switch (opcode) {
case kExprI32ConvertI64:
- TurboAssembler::Move(dst.gp(), src.low_gp());
+ MacroAssembler::Move(dst.gp(), src.low_gp());
return true;
case kExprI32SConvertF32: {
UseScratchRegisterScope temps(this);
@@ -2288,7 +2267,7 @@ void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Move(dst.low_gp(), src.low_gp());
+ MacroAssembler::Move(dst.low_gp(), src.low_gp());
mov(dst.high_gp(), Operand(src.low_gp(), ASR, 31));
}
@@ -2296,28 +2275,24 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); }
void LiftoffAssembler::emit_jump(Register target) { bx(target); }
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
-
if (rhs == no_reg) {
DCHECK_EQ(kind, kI32);
cmp(lhs, Operand(0));
} else {
- DCHECK(kind == kI32 || (is_reference(kind) && (liftoff_cond == kEqual ||
- liftoff_cond == kUnequal)));
+ DCHECK(kind == kI32 ||
+ (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
cmp(lhs, rhs);
}
b(label, cond);
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
cmp(lhs, Operand(imm));
b(label, cond);
}
@@ -2334,10 +2309,8 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
mov(dst, Operand(dst, LSR, kRegSizeInBitsLog2));
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
cmp(lhs, rhs);
mov(dst, Operand(0), LeaveCC);
mov(dst, Operand(1), LeaveCC, cond);
@@ -2349,15 +2322,13 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
mov(dst, Operand(dst, LSR, 5));
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
// For signed i64 comparisons, we still need to use unsigned comparison for
// the low word (the only bit carrying signedness information is the MSB in
// the high word).
- Condition cond = liftoff::ToCondition(liftoff_cond);
- Condition unsigned_cond =
- liftoff::ToCondition(liftoff::MakeUnsigned(liftoff_cond));
+ Condition unsigned_cond = liftoff::MakeUnsigned(cond);
Label set_cond;
Label cont;
LiftoffRegister dest = LiftoffRegister(dst);
@@ -2393,10 +2364,9 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
}
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
VFPCompareAndSetFlags(liftoff::GetFloatRegister(lhs),
liftoff::GetFloatRegister(rhs));
mov(dst, Operand(0), LeaveCC);
@@ -2407,10 +2377,9 @@ void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
}
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
VFPCompareAndSetFlags(lhs, rhs);
mov(dst, Operand(0), LeaveCC);
mov(dst, Operand(1), LeaveCC, cond);
@@ -2498,7 +2467,7 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Int64()) {
vld1(Neon32, NeonListOperand(dst.low_fp()),
NeonMemOperand(actual_src_addr));
- TurboAssembler::Move(dst.high_fp(), dst.low_fp());
+ MacroAssembler::Move(dst.high_fp(), dst.low_fp());
}
}
}
@@ -2506,24 +2475,26 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool /* i64_offset */) {
UseScratchRegisterScope temps(this);
Register actual_src_addr = liftoff::CalculateActualAddress(
this, &temps, addr, offset_reg, offset_imm);
- TurboAssembler::Move(liftoff::GetSimd128Register(dst),
+ MacroAssembler::Move(liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(src));
*protected_load_pc = pc_offset();
LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
NeonListOperand dst_op =
NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp());
- TurboAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx,
+ MacroAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx,
NeonMemOperand(actual_src_addr));
}
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t laneidx,
- uint32_t* protected_store_pc) {
+ uint32_t* protected_store_pc,
+ bool /* i64_offset */) {
UseScratchRegisterScope temps(this);
Register actual_dst_addr =
liftoff::CalculateActualAddress(this, &temps, dst, offset, offset_imm);
@@ -2532,7 +2503,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
LoadStoreLaneParams store_params(type.mem_rep(), laneidx);
NeonListOperand src_op =
NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp());
- TurboAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx,
+ MacroAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx,
NeonMemOperand(actual_dst_addr));
}
@@ -2545,7 +2516,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
if (dst == lhs) {
// dst will be overwritten, so keep the table somewhere else.
QwNeonRegister tbl = temps.AcquireQ();
- TurboAssembler::Move(tbl, liftoff::GetSimd128Register(lhs));
+ MacroAssembler::Move(tbl, liftoff::GetSimd128Register(lhs));
table = NeonListOperand(tbl);
}
@@ -2590,8 +2561,8 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Move(dst.low_fp(), src.fp());
- TurboAssembler::Move(dst.high_fp(), src.fp());
+ MacroAssembler::Move(dst.low_fp(), src.fp());
+ MacroAssembler::Move(dst.high_fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
@@ -3601,14 +3572,40 @@ void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
+ QwNeonRegister dest = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+
+ vmull(NeonS8, scratch, left.low(), right.low());
+ vpadd(Neon16, dest.low(), scratch.low(), scratch.high());
+
+ vmull(NeonS8, scratch, left.high(), right.high());
+ vpadd(Neon16, dest.high(), scratch.low(), scratch.high());
}
void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
+ QwNeonRegister dest = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+ QwNeonRegister accu = liftoff::GetSimd128Register(acc);
+
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+
+ vmull(NeonS8, scratch, left.low(), right.low());
+ vpadd(Neon16, dest.low(), scratch.low(), scratch.high());
+
+ vmull(NeonS8, scratch, left.high(), right.high());
+ vpadd(Neon16, dest.high(), scratch.low(), scratch.high());
+
+ vpaddl(NeonS16, dest, dest);
+ vadd(Neon32, dest, dest, accu);
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -4232,28 +4229,40 @@ void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfma");
+ vmul(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src1),
+ liftoff::GetSimd128Register(src2));
+ vadd(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src3),
+ liftoff::GetSimd128Register(dst));
}
void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfms");
+ vmul(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src1),
+ liftoff::GetSimd128Register(src2));
+ vsub(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src3),
+ liftoff::GetSimd128Register(dst));
}
void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfma");
+ vmul(dst.low_fp(), src1.low_fp(), src2.low_fp());
+ vmul(dst.high_fp(), src1.high_fp(), src2.high_fp());
+ vadd(dst.low_fp(), src3.low_fp(), dst.low_fp());
+ vadd(dst.high_fp(), src3.high_fp(), dst.high_fp());
}
void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfms");
+ vmul(dst.low_fp(), src1.low_fp(), src2.low_fp());
+ vmul(dst.high_fp(), src1.high_fp(), src2.high_fp());
+ vsub(dst.low_fp(), src3.low_fp(), dst.low_fp());
+ vsub(dst.high_fp(), src3.high_fp(), dst.high_fp());
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -4269,7 +4278,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
// Asserts unreachable within the wasm code.
- TurboAssembler::AssertUnreachable(reason);
+ MacroAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index b6b06b2b9e..b53083e0ab 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -16,31 +16,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- return lt;
- case kSignedLessEqual:
- return le;
- case kSignedGreaterThan:
- return gt;
- case kSignedGreaterEqual:
- return ge;
- case kUnsignedLessThan:
- return lo;
- case kUnsignedLessEqual:
- return ls;
- case kUnsignedGreaterThan:
- return hi;
- case kUnsignedGreaterEqual:
- return hs;
- }
-}
-
// Liftoff Frames.
//
// slot Frame
@@ -148,12 +123,12 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
inline Register GetEffectiveAddress(LiftoffAssembler* assm,
UseScratchRegisterScope* temps,
Register addr, Register offset,
- uintptr_t offset_imm) {
+ uintptr_t offset_imm,
+ bool i64_offset = false) {
if (!offset.is_valid() && offset_imm == 0) return addr;
Register tmp = temps->AcquireX();
if (offset.is_valid()) {
- // TODO(clemensb): This needs adaption for memory64.
- assm->Add(tmp, addr, Operand(offset, UXTW));
+ assm->Add(tmp, addr, i64_offset ? Operand(offset) : Operand(offset, UXTW));
addr = tmp;
}
if (offset_imm != 0) assm->Add(tmp, addr, offset_imm);
@@ -314,14 +289,15 @@ void LiftoffAssembler::AlignFrameSize() {
}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector, and an unused
// slot for alignment.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size = std::max(frame_size - 2 * kSystemPointerSize, 0);
}
@@ -382,7 +358,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
- // decrementing the SP; consult {TurboAssembler::Claim}.
+ // decrementing the SP; consult {MacroAssembler::Claim}.
Claim(frame_size, 1);
// Jump back to the start of the function, from {pc_offset()} to
@@ -416,20 +392,19 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return kind == kS128 || is_reference(kind);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- Mov(reg.gp().W(), Immediate(value.to_i32(), rmode));
+ Mov(reg.gp().W(), value.to_i32());
break;
case kI64:
- Mov(reg.gp().X(), Immediate(value.to_i64(), rmode));
+ Mov(reg.gp().X(), value.to_i64());
break;
case kF32:
- Fmov(reg.fp().S(), value.to_f32_boxed().get_scalar());
+ Fmov(reg.fp().S(), value.to_f32());
break;
case kF64:
- Fmov(reg.fp().D(), value.to_f64_boxed().get_scalar());
+ Fmov(reg.fp().D(), value.to_f64());
break;
default:
UNREACHABLE();
@@ -463,14 +438,14 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
DCHECK_LE(0, offset);
- LoadTaggedPointerField(dst, MemOperand{instance, offset});
+ LoadTaggedField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
int offset, ExternalPointerTag tag,
- Register isolate_root) {
+ Register /* scratch */) {
LoadExternalPointerField(dst, FieldMemOperand(instance, offset), tag,
- isolate_root);
+ kRootRegister);
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -486,7 +461,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
offset_imm, false, shift_amount);
- LoadTaggedPointerField(dst, src_op);
+ LoadTaggedField(dst, src_op);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
@@ -519,17 +494,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
// The write barrier.
- Label write_barrier;
Label exit;
- CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
- &write_barrier);
- b(&exit);
- bind(&write_barrier);
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask,
+ kZero, &exit);
JumpIfSmi(src.gp(), &exit);
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(src.gp(), src.gp());
- }
- CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
+ CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
CallRecordWriteStubSaveRegisters(dst_addr, offset_op, SaveFPRegsMode::kSave,
StubCallMode::kCallWasmRuntimeStub);
@@ -659,75 +628,182 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
Register actual_addr = liftoff::CalculateActualAddress(
lasm, dst_addr, offset_reg, offset_imm, temps.AcquireX());
- // Allocate an additional {temp} register to hold the result that should be
- // stored to memory. Note that {temp} and {store_result} are not allowed to be
- // the same register.
- Register temp = temps.AcquireX();
+ if (CpuFeatures::IsSupported(LSE)) {
+ CpuFeatureScope scope(lasm, LSE);
+ switch (op) {
+ case Binop::kAnd:
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireW();
+ __ mvn(temp, value.gp().W());
+ __ ldclralb(temp, result.gp().W(), MemOperand(actual_addr));
+ break;
+ }
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireW();
+ __ mvn(temp, value.gp().W());
+ __ ldclralh(temp, result.gp().W(), MemOperand(actual_addr));
+ break;
+ }
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireW();
+ __ mvn(temp, value.gp().W());
+ __ ldclral(temp, result.gp().W(), MemOperand(actual_addr));
+ break;
+ }
+ case StoreType::kI64Store: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireX();
+ __ mvn(temp, value.gp());
+ __ ldclral(temp, result.gp(), MemOperand(actual_addr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case Binop::kSub:
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireW();
+ __ neg(temp, value.gp().W());
+ __ ldaddalb(temp, result.gp().W(), MemOperand(actual_addr));
+ break;
+ }
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireW();
+ __ neg(temp, value.gp().W());
+ __ ldaddalh(temp, result.gp().W(), MemOperand(actual_addr));
+ break;
+ }
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireW();
+ __ neg(temp, value.gp().W());
+ __ ldaddal(temp, result.gp().W(), MemOperand(actual_addr));
+ break;
+ }
+ case StoreType::kI64Store: {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = temps.AcquireX();
+ __ neg(temp, value.gp());
+ __ ldaddal(temp, result.gp(), MemOperand(actual_addr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+#define ATOMIC_BINOP_CASE(op, instr) \
+ case Binop::op: \
+ switch (type.value()) { \
+ case StoreType::kI64Store8: \
+ case StoreType::kI32Store8: \
+ __ instr##b(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
+ break; \
+ case StoreType::kI64Store16: \
+ case StoreType::kI32Store16: \
+ __ instr##h(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
+ break; \
+ case StoreType::kI64Store32: \
+ case StoreType::kI32Store: \
+ __ instr(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
+ break; \
+ case StoreType::kI64Store: \
+ __ instr(value.gp(), result.gp(), MemOperand(actual_addr)); \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
+ break;
+ ATOMIC_BINOP_CASE(kAdd, ldaddal)
+ ATOMIC_BINOP_CASE(kOr, ldsetal)
+ ATOMIC_BINOP_CASE(kXor, ldeoral)
+ ATOMIC_BINOP_CASE(kExchange, swpal)
+#undef ATOMIC_BINOP_CASE
+ }
+ } else {
+ // Allocate an additional {temp} register to hold the result that should be
+ // stored to memory. Note that {temp} and {store_result} are not allowed to
+ // be the same register.
+ Register temp = temps.AcquireX();
+
+ Label retry;
+ __ Bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ ldaxrb(result.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ ldaxrh(result.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ ldaxr(result.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ ldaxr(result.gp().X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
- Label retry;
- __ Bind(&retry);
- switch (type.value()) {
- case StoreType::kI64Store8:
- case StoreType::kI32Store8:
- __ ldaxrb(result.gp().W(), actual_addr);
- break;
- case StoreType::kI64Store16:
- case StoreType::kI32Store16:
- __ ldaxrh(result.gp().W(), actual_addr);
- break;
- case StoreType::kI64Store32:
- case StoreType::kI32Store:
- __ ldaxr(result.gp().W(), actual_addr);
- break;
- case StoreType::kI64Store:
- __ ldaxr(result.gp().X(), actual_addr);
- break;
- default:
- UNREACHABLE();
- }
+ switch (op) {
+ case Binop::kAdd:
+ __ add(temp, result.gp(), value.gp());
+ break;
+ case Binop::kSub:
+ __ sub(temp, result.gp(), value.gp());
+ break;
+ case Binop::kAnd:
+ __ and_(temp, result.gp(), value.gp());
+ break;
+ case Binop::kOr:
+ __ orr(temp, result.gp(), value.gp());
+ break;
+ case Binop::kXor:
+ __ eor(temp, result.gp(), value.gp());
+ break;
+ case Binop::kExchange:
+ __ mov(temp, value.gp());
+ break;
+ }
- switch (op) {
- case Binop::kAdd:
- __ add(temp, result.gp(), value.gp());
- break;
- case Binop::kSub:
- __ sub(temp, result.gp(), value.gp());
- break;
- case Binop::kAnd:
- __ and_(temp, result.gp(), value.gp());
- break;
- case Binop::kOr:
- __ orr(temp, result.gp(), value.gp());
- break;
- case Binop::kXor:
- __ eor(temp, result.gp(), value.gp());
- break;
- case Binop::kExchange:
- __ mov(temp, value.gp());
- break;
- }
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ stlxrb(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ stlxrh(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ stlxr(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ stlxr(store_result.W(), temp.X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
- switch (type.value()) {
- case StoreType::kI64Store8:
- case StoreType::kI32Store8:
- __ stlxrb(store_result.W(), temp.W(), actual_addr);
- break;
- case StoreType::kI64Store16:
- case StoreType::kI32Store16:
- __ stlxrh(store_result.W(), temp.W(), actual_addr);
- break;
- case StoreType::kI64Store32:
- case StoreType::kI32Store:
- __ stlxr(store_result.W(), temp.W(), actual_addr);
- break;
- case StoreType::kI64Store:
- __ stlxr(store_result.W(), temp.X(), actual_addr);
- break;
- default:
- UNREACHABLE();
+ __ Cbnz(store_result.W(), &retry);
}
-
- __ Cbnz(store_result.W(), &retry);
}
#undef __
@@ -735,7 +811,8 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList /* pinned */) {
+ LoadType type, LiftoffRegList /* pinned */,
+ bool /* i64_offset */) {
UseScratchRegisterScope temps(this);
Register src_reg = liftoff::CalculateActualAddress(
this, src_addr, offset_reg, offset_imm, temps.AcquireX());
@@ -762,8 +839,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type,
- LiftoffRegList /* pinned */) {
+ StoreType type, LiftoffRegList /* pinned */,
+ bool /* i64_offset */) {
UseScratchRegisterScope temps(this);
Register dst_reg = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
@@ -790,35 +867,40 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kXor);
}
@@ -826,7 +908,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kExchange);
}
@@ -834,7 +917,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool /* i64_offset */) {
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value};
Register result_reg = result.gp();
@@ -847,46 +930,81 @@ void LiftoffAssembler::AtomicCompareExchange(
Register actual_addr = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
- Register store_result = temps.AcquireW();
+ if (CpuFeatures::IsSupported(LSE)) {
+ CpuFeatureScope scope(this, LSE);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ if (result.gp() != expected.gp()) {
+ mov(result.gp().W(), expected.gp().W());
+ }
+ casalb(result.gp().W(), new_value.gp().W(), MemOperand(actual_addr));
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ if (result.gp() != expected.gp()) {
+ mov(result.gp().W(), expected.gp().W());
+ }
+ casalh(result.gp().W(), new_value.gp().W(), MemOperand(actual_addr));
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ if (result.gp() != expected.gp()) {
+ mov(result.gp().W(), expected.gp().W());
+ }
+ casal(result.gp().W(), new_value.gp().W(), MemOperand(actual_addr));
+ break;
+ case StoreType::kI64Store:
+ if (result.gp() != expected.gp()) {
+ mov(result.gp().X(), expected.gp().X());
+ }
+ casal(result.gp().X(), new_value.gp().X(), MemOperand(actual_addr));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ Register store_result = temps.AcquireW();
+
+ Label retry;
+ Label done;
+ Bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ ldaxrb(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTB));
+ B(ne, &done);
+ stlxrb(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ ldaxrh(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTH));
+ B(ne, &done);
+ stlxrh(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ ldaxr(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTW));
+ B(ne, &done);
+ stlxr(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ ldaxr(result_reg.X(), actual_addr);
+ Cmp(result.gp().X(), Operand(expected.gp().X(), UXTX));
+ B(ne, &done);
+ stlxr(store_result.W(), new_value.gp().X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
- Label retry;
- Label done;
- Bind(&retry);
- switch (type.value()) {
- case StoreType::kI64Store8:
- case StoreType::kI32Store8:
- ldaxrb(result_reg.W(), actual_addr);
- Cmp(result.gp().W(), Operand(expected.gp().W(), UXTB));
- B(ne, &done);
- stlxrb(store_result.W(), new_value.gp().W(), actual_addr);
- break;
- case StoreType::kI64Store16:
- case StoreType::kI32Store16:
- ldaxrh(result_reg.W(), actual_addr);
- Cmp(result.gp().W(), Operand(expected.gp().W(), UXTH));
- B(ne, &done);
- stlxrh(store_result.W(), new_value.gp().W(), actual_addr);
- break;
- case StoreType::kI64Store32:
- case StoreType::kI32Store:
- ldaxr(result_reg.W(), actual_addr);
- Cmp(result.gp().W(), Operand(expected.gp().W(), UXTW));
- B(ne, &done);
- stlxr(store_result.W(), new_value.gp().W(), actual_addr);
- break;
- case StoreType::kI64Store:
- ldaxr(result_reg.X(), actual_addr);
- Cmp(result.gp().X(), Operand(expected.gp().X(), UXTX));
- B(ne, &done);
- stlxr(store_result.W(), new_value.gp().X(), actual_addr);
- break;
- default:
- UNREACHABLE();
+ Cbnz(store_result.W(), &retry);
+ Bind(&done);
}
- Cbnz(store_result.W(), &retry);
- Bind(&done);
-
if (result_reg != result.gp()) {
mov(result.gp(), result_reg);
}
@@ -1573,11 +1691,10 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); }
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
switch (kind) {
case kI32:
if (rhs.is_valid()) {
@@ -1590,7 +1707,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRefNull:
case kRtt:
DCHECK(rhs.is_valid());
- DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ DCHECK(cond == kEqual || cond == kNotEqual);
#if defined(V8_COMPRESS_POINTERS)
Cmp(lhs.W(), rhs.W());
#else
@@ -1610,11 +1727,9 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
B(label, cond);
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Cmp(lhs.W(), Operand(imm));
B(label, cond);
}
@@ -1631,10 +1746,8 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
Cset(dst.W(), eq);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
Cmp(lhs.W(), rhs.W());
Cset(dst.W(), cond);
}
@@ -1644,18 +1757,16 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
Cset(dst.W(), eq);
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Cmp(lhs.gp().X(), rhs.gp().X());
Cset(dst.W(), cond);
}
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Fcmp(lhs.S(), rhs.S());
Cset(dst.W(), cond);
if (cond != ne) {
@@ -1664,10 +1775,9 @@ void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
}
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Fcmp(lhs.D(), rhs.D());
Cset(dst.W(), cond);
if (cond != ne) {
@@ -1749,10 +1859,11 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
- MemOperand src_op{
- liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
+ MemOperand src_op{liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg,
+ offset_imm, i64_offset)};
MachineType mem_type = type.mem_type();
if (dst != src) {
@@ -1776,10 +1887,11 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
- MemOperand dst_op{
- liftoff::GetEffectiveAddress(this, &temps, dst, offset, offset_imm)};
+ MemOperand dst_op{liftoff::GetEffectiveAddress(this, &temps, dst, offset,
+ offset_imm, i64_offset)};
if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep();
@@ -3238,34 +3350,53 @@ void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
Abs(dst.fp().V2D(), src.fp().V2D());
}
+#define EMIT_QFMOP(instr, format) \
+ if (dst == src3) { \
+ instr(dst.fp().V##format(), src1.fp().V##format(), src2.fp().V##format()); \
+ return; \
+ } \
+ if (dst != src1 && dst != src2) { \
+ Mov(dst.fp().V##format(), src3.fp().V##format()); \
+ instr(dst.fp().V##format(), src1.fp().V##format(), src2.fp().V##format()); \
+ return; \
+ } \
+ DCHECK(dst == src1 || dst == src2); \
+ UseScratchRegisterScope temps(this); \
+ VRegister tmp = temps.AcquireV(kFormat##format); \
+ Mov(tmp, src3.fp().V##format()); \
+ instr(tmp, src1.fp().V##format(), src2.fp().V##format()); \
+ Mov(dst.fp().V##format(), tmp);
+
void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfma");
+ EMIT_QFMOP(Fmla, 4S);
}
void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfms");
+ EMIT_QFMOP(Fmls, 4S);
}
void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfma");
+ EMIT_QFMOP(Fmla, 2D);
}
void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfms");
+ EMIT_QFMOP(Fmls, 2D);
}
+#undef EMIT_QFMOP
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
Ldr(limit_address, MemOperand(limit_address));
Cmp(sp, limit_address);
@@ -3277,7 +3408,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- TurboAssembler::AssertUnreachable(reason);
+ MacroAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 3fcbf73976..c3ecfcaab7 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -24,31 +24,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return equal;
- case kUnequal:
- return not_equal;
- case kSignedLessThan:
- return less;
- case kSignedLessEqual:
- return less_equal;
- case kSignedGreaterThan:
- return greater;
- case kSignedGreaterEqual:
- return greater_equal;
- case kUnsignedLessThan:
- return below;
- case kUnsignedLessEqual:
- return below_equal;
- case kUnsignedGreaterThan:
- return above;
- case kUnsignedGreaterEqual:
- return above_equal;
- }
-}
-
// ebp-4 holds the stack marker, ebp-8 is the instance parameter.
constexpr int kInstanceOffset = 8;
constexpr int kFeedbackVectorOffset = 12; // ebp-12 is the feedback vector.
@@ -241,13 +216,14 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
DCHECK_EQ(0, frame_size % kSystemPointerSize);
@@ -313,7 +289,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
- // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to
@@ -340,25 +316,23 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return is_reference(kind);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
+ MacroAssembler::Move(reg.gp(), Immediate(value.to_i32()));
break;
case kI64: {
- DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
- TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
- TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
+ MacroAssembler::Move(reg.low_gp(), Immediate(low_word));
+ MacroAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
case kF32:
- TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
case kF64:
- TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
UNREACHABLE();
@@ -430,13 +404,10 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
- MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
- &write_barrier, Label::kNear);
- jmp(&exit, Label::kNear);
- bind(&write_barrier);
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &exit,
+ Label::kNear);
JumpIfSmi(src.gp(), &exit, Label::kNear);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
@@ -450,7 +421,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, uint32_t* protected_load_pc,
- bool /* is_load_mem */, bool i64_offset,
+ bool /* is_load_mem */, bool /* i64_offset */,
bool needs_shift) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
@@ -598,7 +569,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
- LoadType type, LiftoffRegList /* pinned */) {
+ LoadType type, LiftoffRegList /* pinned */,
+ bool /* i64_offset */) {
if (type.value() != LoadType::kI64Load) {
Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
return;
@@ -617,7 +589,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool /* i64_offset */) {
DCHECK_NE(offset_reg, no_reg);
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
Operand dst_op = Operand(dst_addr, offset_reg, times_1, offset_imm);
@@ -957,7 +930,8 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicBinop64(this, liftoff::kAdd, dst_addr, offset_reg,
offset_imm, value, result);
@@ -970,7 +944,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicBinop64(this, liftoff::kSub, dst_addr, offset_reg,
offset_imm, value, result);
@@ -982,7 +957,8 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicBinop64(this, liftoff::kAnd, dst_addr, offset_reg,
offset_imm, value, result);
@@ -995,7 +971,8 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicBinop64(this, liftoff::kOr, dst_addr, offset_reg, offset_imm,
value, result);
@@ -1008,7 +985,8 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicBinop64(this, liftoff::kXor, dst_addr, offset_reg,
offset_imm, value, result);
@@ -1022,7 +1000,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
liftoff::AtomicBinop64(this, liftoff::kExchange, dst_addr, offset_reg,
offset_imm, value, result);
@@ -1036,7 +1015,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool /* i64_offset */) {
// We expect that the offset has already been added to {dst_addr}, and no
// {offset_reg} is provided. This is to save registers.
DCHECK_EQ(offset_reg, no_reg);
@@ -1721,7 +1700,7 @@ inline LiftoffRegister ReplaceInPair(LiftoffRegister pair, Register old_reg,
inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
- Register amount, void (TurboAssembler::*emit_shift)(Register, Register)) {
+ Register amount, void (MacroAssembler::*emit_shift)(Register, Register)) {
// Temporary registers cannot overlap with {dst}.
LiftoffRegList pinned{dst};
@@ -1760,7 +1739,7 @@ inline void Emit64BitShiftOperation(
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount,
- &TurboAssembler::ShlPair_cl);
+ &MacroAssembler::ShlPair_cl);
}
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@@ -1779,7 +1758,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount,
- &TurboAssembler::SarPair_cl);
+ &MacroAssembler::SarPair_cl);
}
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@@ -1798,7 +1777,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount,
- &TurboAssembler::ShrPair_cl);
+ &MacroAssembler::ShrPair_cl);
}
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@@ -2042,10 +2021,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
- TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
+ MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
Andps(dst, liftoff::kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit - 1);
+ MacroAssembler::Move(dst, kSignBit - 1);
Andps(dst, src);
}
}
@@ -2053,10 +2032,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
- TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
+ MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
Xorps(dst, liftoff::kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit);
+ MacroAssembler::Move(dst, kSignBit);
Xorps(dst, src);
}
}
@@ -2179,10 +2158,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
- TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
+ MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
Andpd(dst, liftoff::kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit - 1);
+ MacroAssembler::Move(dst, kSignBit - 1);
Andpd(dst, src);
}
}
@@ -2190,10 +2169,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
- TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
+ MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
Xorpd(dst, liftoff::kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit);
+ MacroAssembler::Move(dst, kSignBit);
Xorpd(dst, src);
}
}
@@ -2483,17 +2462,16 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (kind) {
case kRef:
case kRefNull:
case kRtt:
- DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ DCHECK(cond == kEqual || cond == kNotEqual);
V8_FALLTHROUGH;
case kI32:
cmp(lhs, rhs);
@@ -2509,10 +2487,9 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
j(cond, label);
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs, int imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
cmp(lhs, Immediate(imm));
j(cond, label);
}
@@ -2547,10 +2524,8 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
liftoff::setcc_32(this, equal, dst);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
cmp(lhs, rhs);
liftoff::setcc_32(this, cond, dst);
}
@@ -2568,28 +2543,26 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
}
namespace liftoff {
-inline LiftoffCondition cond_make_unsigned(LiftoffCondition cond) {
+inline Condition cond_make_unsigned(Condition cond) {
switch (cond) {
- case kSignedLessThan:
+ case kLessThan:
return kUnsignedLessThan;
- case kSignedLessEqual:
- return kUnsignedLessEqual;
- case kSignedGreaterThan:
+ case kLessThanEqual:
+ return kUnsignedLessThanEqual;
+ case kGreaterThan:
return kUnsignedGreaterThan;
- case kSignedGreaterEqual:
- return kUnsignedGreaterEqual;
+ case kGreaterThanEqual:
+ return kUnsignedGreaterThanEqual;
default:
return cond;
}
}
} // namespace liftoff
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- Condition unsigned_cond =
- liftoff::ToCondition(liftoff::cond_make_unsigned(liftoff_cond));
+ Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
// Get the tmp byte register out here, such that we don't conditionally spill
// (this cannot be reflected in the cache state).
@@ -2644,17 +2617,15 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
liftoff::EmitFloatSetCond<&Assembler::ucomiss>(this, cond, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
liftoff::EmitFloatSetCond<&Assembler::ucomisd>(this, cond, dst, lhs, rhs);
}
@@ -2764,7 +2735,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->cmov(zero, dst.gp(), tmp);
}
-template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedMacroAssemblerBase::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2832,7 +2803,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool /* i64_offset */) {
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
Operand src_op{addr, offset_reg, times_1, static_cast<int32_t>(offset_imm)};
*protected_load_pc = pc_offset();
@@ -2858,7 +2830,8 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
+ uint32_t* protected_store_pc,
+ bool /* i64_offset */) {
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
Operand dst_op = Operand(dst, offset, times_1, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
@@ -3304,14 +3277,14 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
memcpy(vals, imms, sizeof(vals));
- TurboAssembler::Move(dst.fp(), vals[0]);
+ MacroAssembler::Move(dst.fp(), vals[0]);
uint64_t high = vals[1];
Register tmp = GetUnusedRegister(RegClass::kGpReg, {}).gp();
- TurboAssembler::Move(tmp, Immediate(high & 0xffff'ffff));
+ MacroAssembler::Move(tmp, Immediate(high & 0xffff'ffff));
Pinsrd(dst.fp(), tmp, 2);
- TurboAssembler::Move(tmp, Immediate(high >> 32));
+ MacroAssembler::Move(tmp, Immediate(high >> 32));
Pinsrd(dst.fp(), tmp, 3);
}
@@ -3372,7 +3345,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -3508,7 +3481,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@@ -3697,7 +3670,13 @@ void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
+ LiftoffRegister tmp1 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs});
+ LiftoffRegister tmp2 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs, tmp1});
+ I32x4DotI8x16I7x16AddS(dst.fp(), lhs.fp(), rhs.fp(), acc.fp(), tmp1.fp(),
+ tmp2.fp());
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3713,7 +3692,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -3885,7 +3864,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4610,7 +4589,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- TurboAssembler::AssertUnreachable(reason);
+ MacroAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 8a2881441d..d7329e2854 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -61,9 +61,10 @@ constexpr Register kLiftoffFrameSetupFunctionReg = t0;
#elif V8_TARGET_ARCH_LOONG64
// t6-t8 and s3-s4: scratch registers, s6: root
+// s8: pointer-compression-cage base
constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, a4, a5, a6,
a7, t0, t1, t2, t3, t4, t5,
- s0, s1, s2, s5, s7, s8};
+ s0, s1, s2, s5, s7};
// f29: zero, f30-f31: macro-assembler scratch float Registers.
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
@@ -151,7 +152,7 @@ constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs =
static_assert(kLiftoffFrameSetupFunctionReg != kWasmInstanceRegister);
static_assert(kLiftoffFrameSetupFunctionReg != kRootRegister);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
static_assert(kLiftoffFrameSetupFunctionReg != kPtrComprCageBaseRegister);
#endif
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index efcd583f4f..29120dd03b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -25,8 +25,7 @@ namespace wasm {
using VarState = LiftoffAssembler::VarState;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
-constexpr ValueKind LiftoffAssembler::kPointerKind;
-constexpr ValueKind LiftoffAssembler::kTaggedKind;
+constexpr ValueKind LiftoffAssembler::kIntPtrKind;
constexpr ValueKind LiftoffAssembler::kSmiKind;
namespace {
@@ -82,70 +81,75 @@ class StackTransferRecipe {
explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
StackTransferRecipe(const StackTransferRecipe&) = delete;
StackTransferRecipe& operator=(const StackTransferRecipe&) = delete;
- ~StackTransferRecipe() { Execute(); }
+ V8_INLINE ~StackTransferRecipe() { Execute(); }
- void Execute() {
+ V8_INLINE void Execute() {
// First, execute register moves. Then load constants and stack values into
// registers.
- ExecuteMoves();
+ if (!move_dst_regs_.is_empty()) ExecuteMoves();
DCHECK(move_dst_regs_.is_empty());
- ExecuteLoads();
+ if (!load_dst_regs_.is_empty()) ExecuteLoads();
DCHECK(load_dst_regs_.is_empty());
- }
-
- V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
- DCHECK(CheckCompatibleStackSlotTypes(dst.kind(), src.kind()));
- if (dst.is_reg()) {
- LoadIntoRegister(dst.reg(), src, src.offset());
- return;
- }
- if (dst.is_const()) {
+ // Tell the compiler that the StackTransferRecipe is empty after this, so it
+ // can eliminate a second {Execute} in the destructor.
+ bool all_done = move_dst_regs_.is_empty() && load_dst_regs_.is_empty();
+ V8_ASSUME(all_done);
+ USE(all_done);
+ }
+
+ V8_INLINE void Transfer(const VarState& dst, const VarState& src) {
+ DCHECK(CompatibleStackSlotTypes(dst.kind(), src.kind()));
+ if (dst.is_stack()) {
+ if (V8_UNLIKELY(!(src.is_stack() && src.offset() == dst.offset()))) {
+ TransferToStack(dst.offset(), src);
+ }
+ } else if (dst.is_reg()) {
+ LoadIntoRegister(dst.reg(), src);
+ } else {
+ DCHECK(dst.is_const());
DCHECK_EQ(dst.i32_const(), src.i32_const());
- return;
}
- DCHECK(dst.is_stack());
+ }
+
+ void TransferToStack(int dst_offset, const VarState& src) {
switch (src.loc()) {
case VarState::kStack:
- if (src.offset() != dst.offset()) {
- asm_->MoveStackValue(dst.offset(), src.offset(), src.kind());
+ if (src.offset() != dst_offset) {
+ asm_->MoveStackValue(dst_offset, src.offset(), src.kind());
}
break;
case VarState::kRegister:
- asm_->Spill(dst.offset(), src.reg(), src.kind());
+ asm_->Spill(dst_offset, src.reg(), src.kind());
break;
case VarState::kIntConst:
- asm_->Spill(dst.offset(), src.constant());
+ asm_->Spill(dst_offset, src.constant());
break;
}
}
V8_INLINE void LoadIntoRegister(LiftoffRegister dst,
- const LiftoffAssembler::VarState& src,
- uint32_t src_offset) {
- switch (src.loc()) {
- case VarState::kStack:
- LoadStackSlot(dst, src_offset, src.kind());
- break;
- case VarState::kRegister:
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
- break;
- case VarState::kIntConst:
- LoadConstant(dst, src.constant());
- break;
+ const LiftoffAssembler::VarState& src) {
+ if (src.is_reg()) {
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
+ } else if (src.is_stack()) {
+ LoadStackSlot(dst, src.offset(), src.kind());
+ } else {
+ DCHECK(src.is_const());
+ LoadConstant(dst, src.constant());
}
}
void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
- int offset, RegPairHalf half) {
+ RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
DCHECK_EQ(kI64, src.kind());
switch (src.loc()) {
case VarState::kStack:
- LoadI64HalfStackSlot(dst, offset, half);
+ LoadI64HalfStackSlot(dst, src.offset(), half);
break;
case VarState::kRegister: {
LiftoffRegister src_half =
@@ -265,6 +269,9 @@ class StackTransferRecipe {
LiftoffRegList move_dst_regs_;
LiftoffRegList load_dst_regs_;
LiftoffAssembler* const asm_;
+ // Cache the last spill offset in case we need to spill for resolving move
+ // cycles.
+ int last_spill_offset_ = asm_->TopSpillOffset();
RegisterMove* register_move(LiftoffRegister reg) {
return reinterpret_cast<RegisterMove*>(&register_moves_) +
@@ -297,7 +304,7 @@ class StackTransferRecipe {
ExecuteMove(move->src);
}
- void ExecuteMoves() {
+ V8_NOINLINE V8_PRESERVE_MOST void ExecuteMoves() {
// Execute all moves whose {dst} is not being used as src in another move.
// If any src count drops to zero, also (transitively) execute the
// corresponding move to that register.
@@ -310,21 +317,20 @@ class StackTransferRecipe {
// All remaining moves are parts of a cycle. Just spill the first one, then
// process all remaining moves in that cycle. Repeat for all cycles.
- int last_spill_offset = asm_->TopSpillOffset();
while (!move_dst_regs_.is_empty()) {
// TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
- last_spill_offset += LiftoffAssembler::SlotSizeForType(move->kind);
+ last_spill_offset_ += LiftoffAssembler::SlotSizeForType(move->kind);
LiftoffRegister spill_reg = move->src;
- asm_->Spill(last_spill_offset, spill_reg, move->kind);
+ asm_->Spill(last_spill_offset_, spill_reg, move->kind);
// Remember to reload into the destination register later.
- LoadStackSlot(dst, last_spill_offset, move->kind);
+ LoadStackSlot(dst, last_spill_offset_, move->kind);
ClearExecutedMove(dst);
}
}
- void ExecuteLoads() {
+ V8_NOINLINE V8_PRESERVE_MOST void ExecuteLoads() {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
switch (load->load_kind) {
@@ -398,25 +404,50 @@ enum ReuseRegisters : bool {
kReuseRegisters = true,
kNoReuseRegisters = false
};
+// {InitMergeRegion} is a helper used by {MergeIntoNewState} to initialize
+// a part of the target stack ([target, target+count]) from [source,
+// source+count]. The parameters specify how to initialize the part. The goal is
+// to set up the region such that later merges (via {MergeStackWith} /
+// {MergeFullStackWith} can successfully transfer their values to this new
+// state.
void InitMergeRegion(LiftoffAssembler::CacheState* state,
const VarState* source, VarState* target, uint32_t count,
MergeKeepStackSlots keep_stack_slots,
MergeAllowConstants allow_constants,
MergeAllowRegisters allow_registers,
- ReuseRegisters reuse_registers, LiftoffRegList used_regs) {
+ ReuseRegisters reuse_registers, LiftoffRegList used_regs,
+ int new_stack_offset, StackTransferRecipe& transfers) {
RegisterReuseMap register_reuse_map;
for (const VarState* source_end = source + count; source < source_end;
++source, ++target) {
- if ((source->is_stack() && keep_stack_slots) ||
- (source->is_const() && allow_constants)) {
+ if (source->is_stack() && keep_stack_slots) {
*target = *source;
+ // If {new_stack_offset} is set, we want to recompute stack offsets for
+ // the region we are initializing such that they are contiguous. If
+ // {new_stack_offset} is zero (which is an illegal stack offset), we just
+ // keep the source offsets.
+ if (new_stack_offset) {
+ new_stack_offset =
+ LiftoffAssembler::NextSpillOffset(source->kind(), new_stack_offset);
+ if (new_stack_offset != source->offset()) {
+ target->set_offset(new_stack_offset);
+ transfers.TransferToStack(new_stack_offset, *source);
+ }
+ }
+ continue;
+ }
+ if (source->is_const() && allow_constants) {
+ *target = *source;
+ DCHECK(!new_stack_offset);
continue;
}
base::Optional<LiftoffRegister> reg;
+ bool needs_reg_transfer = true;
if (allow_registers) {
// First try: Keep the same register, if it's free.
if (source->is_reg() && state->is_free(source->reg())) {
reg = source->reg();
+ needs_reg_transfer = false;
}
// Second try: Use the same register we used before (if we reuse
// registers).
@@ -429,51 +460,77 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
reg = state->unused_register(rc, used_regs);
}
}
- if (!reg) {
+ // See above: Recompute the stack offset if requested.
+ int target_offset = source->offset();
+ if (new_stack_offset) {
+ new_stack_offset =
+ LiftoffAssembler::NextSpillOffset(source->kind(), new_stack_offset);
+ target_offset = new_stack_offset;
+ }
+ if (reg) {
+ if (needs_reg_transfer) transfers.LoadIntoRegister(*reg, *source);
+ if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
+ state->inc_used(*reg);
+ *target = VarState(source->kind(), *reg, target_offset);
+ } else {
// No free register; make this a stack slot.
- *target = VarState(source->kind(), source->offset());
- continue;
+ *target = VarState(source->kind(), target_offset);
+ transfers.TransferToStack(target_offset, *source);
}
- if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
- state->inc_used(*reg);
- *target = VarState(source->kind(), *reg, source->offset());
}
}
} // namespace
-// TODO(clemensb): Don't copy the full parent state (this makes us N^2).
-void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
- uint32_t num_locals,
- uint32_t arity,
- uint32_t stack_depth) {
- // |------locals------|---(in between)----|--(discarded)--|----merge----|
- // <-- num_locals --> <-- stack_depth -->^stack_base <-- arity -->
+LiftoffAssembler::CacheState LiftoffAssembler::MergeIntoNewState(
+ uint32_t num_locals, uint32_t arity, uint32_t stack_depth) {
+ CacheState target{zone()};
+
+ // The source state looks like this:
+ // |------locals------|---(stack prefix)---|--(discarded)--|----merge----|
+ // <-- num_locals --> <-- stack_depth --> <-- arity -->
+ //
+ // We compute the following target state from it:
+ // |------locals------|---(stack prefix)----|----merge----|
+ // <-- num_locals --> <-- stack_depth --> <-- arity -->
+ //
+ // The target state will have dropped the "(discarded)" region, and the
+ // "locals" and "merge" regions have been modified to avoid any constants and
+ // avoid duplicate register uses. This ensures that later merges can
+ // successfully transfer into the target state.
+ // The "stack prefix" region will be identical for any source that merges into
+ // that state.
- if (source.cached_instance != no_reg) {
- SetInstanceCacheRegister(source.cached_instance);
+ if (cache_state_.cached_instance != no_reg) {
+ target.SetInstanceCacheRegister(cache_state_.cached_instance);
}
- if (source.cached_mem_start != no_reg) {
- SetMemStartCacheRegister(source.cached_mem_start);
+ if (cache_state_.cached_mem_start != no_reg) {
+ target.SetMemStartCacheRegister(cache_state_.cached_mem_start);
}
- uint32_t stack_base = stack_depth + num_locals;
- uint32_t target_height = stack_base + arity;
- uint32_t discarded = source.stack_height() - target_height;
- DCHECK(stack_state.empty());
+ uint32_t target_height = num_locals + stack_depth + arity;
- DCHECK_GE(source.stack_height(), stack_base);
- stack_state.resize_no_init(target_height);
+ target.stack_state.resize_no_init(target_height);
- const VarState* source_begin = source.stack_state.data();
- VarState* target_begin = stack_state.data();
+ const VarState* source_begin = cache_state_.stack_state.data();
+ VarState* target_begin = target.stack_state.data();
- // Try to keep locals and the merge region in their registers. Register used
+ // Compute the starts of the different regions, for source and target (see
+ // pictograms above).
+ const VarState* locals_source = source_begin;
+ const VarState* stack_prefix_source = source_begin + num_locals;
+ const VarState* discarded_source = stack_prefix_source + stack_depth;
+ const VarState* merge_source = cache_state_.stack_state.end() - arity;
+ VarState* locals_target = target_begin;
+ VarState* stack_prefix_target = target_begin + num_locals;
+ VarState* merge_target = target_begin + num_locals + stack_depth;
+
+ // Try to keep locals and the merge region in their registers. Registers used
// multiple times need to be copied to another free register. Compute the list
// of used registers.
LiftoffRegList used_regs;
- for (auto& src : base::VectorOf(source_begin, num_locals)) {
+ for (auto& src : base::VectorOf(locals_source, num_locals)) {
if (src.is_reg()) used_regs.set(src.reg());
}
// If there is more than one operand in the merge region, a stack-to-stack
@@ -483,47 +540,58 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
MergeAllowRegisters allow_registers =
arity <= 1 ? kRegistersAllowed : kRegistersNotAllowed;
if (allow_registers) {
- for (auto& src :
- base::VectorOf(source_begin + stack_base + discarded, arity)) {
+ for (auto& src : base::VectorOf(merge_source, arity)) {
if (src.is_reg()) used_regs.set(src.reg());
}
}
- // Initialize the merge region. If this region moves, try to turn stack slots
- // into registers since we need to load the value anyways.
- MergeKeepStackSlots keep_merge_stack_slots =
- discarded == 0 ? kKeepStackSlots : kTurnStackSlotsIntoRegisters;
- InitMergeRegion(this, source_begin + stack_base + discarded,
- target_begin + stack_base, arity, keep_merge_stack_slots,
- kConstantsNotAllowed, allow_registers, kNoReuseRegisters,
- used_regs);
- // Shift spill offsets down to keep slots contiguous.
- int offset = stack_base == 0 ? StaticStackFrameSize()
- : source.stack_state[stack_base - 1].offset();
- auto merge_region = base::VectorOf(target_begin + stack_base, arity);
- for (VarState& var : merge_region) {
- offset = LiftoffAssembler::NextSpillOffset(var.kind(), offset);
- var.set_offset(offset);
+ StackTransferRecipe transfers(this);
+
+ // The merge region is often empty, hence check for this before doing any
+ // work (even though not needed for correctness).
+ if (arity) {
+ // Initialize the merge region. If this region moves, try to turn stack
+ // slots into registers since we need to load the value anyways.
+ MergeKeepStackSlots keep_merge_stack_slots =
+ target_height == cache_state_.stack_height()
+ ? kKeepStackSlots
+ : kTurnStackSlotsIntoRegisters;
+ // Shift spill offsets down to keep slots contiguous. We place the merge
+ // region right after the "stack prefix", if it exists.
+ int merge_region_stack_offset = discarded_source == source_begin
+ ? StaticStackFrameSize()
+ : discarded_source[-1].offset();
+ InitMergeRegion(&target, merge_source, merge_target, arity,
+ keep_merge_stack_slots, kConstantsNotAllowed,
+ allow_registers, kNoReuseRegisters, used_regs,
+ merge_region_stack_offset, transfers);
}
// Initialize the locals region. Here, stack slots stay stack slots (because
// they do not move). Try to keep register in registers, but avoid duplicates.
- InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
- kConstantsNotAllowed, kRegistersAllowed, kNoReuseRegisters,
- used_regs);
+ if (num_locals) {
+ InitMergeRegion(&target, locals_source, locals_target, num_locals,
+ kKeepStackSlots, kConstantsNotAllowed, kRegistersAllowed,
+ kNoReuseRegisters, used_regs, 0, transfers);
+ }
// Consistency check: All the {used_regs} are really in use now.
- DCHECK_EQ(used_regs, used_registers & used_regs);
+ DCHECK_EQ(used_regs, target.used_registers & used_regs);
- // Last, initialize the section in between. Here, constants are allowed, but
- // registers which are already used for the merge region or locals must be
+ // Last, initialize the "stack prefix" region. Here, constants are allowed,
+ // but registers which are already used for the merge region or locals must be
// moved to other registers or spilled. If a register appears twice in the
// source region, ensure to use the same register twice in the target region.
- InitMergeRegion(this, source_begin + num_locals, target_begin + num_locals,
- stack_depth, kKeepStackSlots, kConstantsAllowed,
- kRegistersAllowed, kReuseRegisters, used_regs);
+ if (stack_depth) {
+ InitMergeRegion(&target, stack_prefix_source, stack_prefix_target,
+ stack_depth, kKeepStackSlots, kConstantsAllowed,
+ kRegistersAllowed, kReuseRegisters, used_regs, 0,
+ transfers);
+ }
+
+ return target;
}
-void LiftoffAssembler::CacheState::Steal(const CacheState& source) {
+void LiftoffAssembler::CacheState::Steal(CacheState& source) {
// Just use the move assignment operator.
*this = std::move(source);
}
@@ -611,9 +679,11 @@ AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; }
} // namespace
-LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
- : TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
- std::move(buffer)) {
+LiftoffAssembler::LiftoffAssembler(Zone* zone,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : MacroAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
+ std::move(buffer)),
+ cache_state_(zone) {
set_abort_hard(true); // Avoid calls to Abort.
}
@@ -623,21 +693,11 @@ LiftoffAssembler::~LiftoffAssembler() {
}
}
-LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
- LiftoffRegList pinned) {
- if (slot.is_reg()) return slot.reg();
+LiftoffRegister LiftoffAssembler::LoadToRegister_Slow(VarState slot,
+ LiftoffRegList pinned) {
+ DCHECK(!slot.is_reg());
LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned);
- return LoadToRegister(slot, reg);
-}
-
-LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
- LiftoffRegister reg) {
- if (slot.is_const()) {
- LoadConstant(reg, slot.constant());
- } else {
- DCHECK(slot.is_stack());
- Fill(reg, slot.offset(), slot.kind());
- }
+ LoadToFixedRegister(slot, reg);
return reg;
}
@@ -663,9 +723,7 @@ LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
LiftoffRegList pinned) {
DCHECK_LT(index, cache_state_.stack_state.size());
VarState& slot = cache_state_.stack_state.end()[-1 - index];
- if (slot.is_reg()) {
- return slot.reg();
- }
+ if (V8_LIKELY(slot.is_reg())) return slot.reg();
LiftoffRegister reg = LoadToRegister(slot, pinned);
cache_state_.inc_used(reg);
slot.MakeRegister(reg);
@@ -673,22 +731,36 @@ LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
}
void LiftoffAssembler::DropValues(int count) {
- for (int i = 0; i < count; ++i) {
- DCHECK(!cache_state_.stack_state.empty());
- VarState slot = cache_state_.stack_state.back();
- cache_state_.stack_state.pop_back();
+ DCHECK_GE(cache_state_.stack_state.size(), count);
+ for (VarState& slot :
+ base::VectorOf(cache_state_.stack_state.end() - count, count)) {
if (slot.is_reg()) {
cache_state_.dec_used(slot.reg());
}
}
+ cache_state_.stack_state.pop_back(count);
}
-void LiftoffAssembler::DropValue(int depth) {
- auto* dropped = cache_state_.stack_state.begin() + depth;
+void LiftoffAssembler::DropExceptionValueAtOffset(int offset) {
+ auto* dropped = cache_state_.stack_state.begin() + offset;
if (dropped->is_reg()) {
cache_state_.dec_used(dropped->reg());
}
- std::copy(dropped + 1, cache_state_.stack_state.end(), dropped);
+ // Compute the stack offset that the remaining slots are based on.
+ int stack_offset =
+ offset == 0 ? StaticStackFrameSize() : dropped[-1].offset();
+ // Move remaining slots down.
+ for (VarState *slot = dropped, *end = cache_state_.stack_state.end() - 1;
+ slot != end; ++slot) {
+ *slot = *(slot + 1);
+ stack_offset = NextSpillOffset(slot->kind(), stack_offset);
+ // Padding could allow us to exit early.
+ if (slot->offset() == stack_offset) break;
+ if (slot->is_stack()) {
+ MoveStackValue(stack_offset, slot->offset(), slot->kind());
+ }
+ slot->set_offset(stack_offset);
+ }
cache_state_.stack_state.pop_back();
}
@@ -779,25 +851,23 @@ bool SlotInterference(const VarState& a, base::Vector<const VarState> v) {
} // namespace
#endif
-void LiftoffAssembler::MergeFullStackWith(CacheState& target,
- const CacheState& source) {
- DCHECK_EQ(source.stack_height(), target.stack_height());
+void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
+ DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
// TODO(clemensb): Reuse the same StackTransferRecipe object to save some
// allocations.
StackTransferRecipe transfers(this);
- for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
- transfers.TransferStackSlot(target.stack_state[i], source.stack_state[i]);
+ for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
+ transfers.Transfer(target.stack_state[i], cache_state_.stack_state[i]);
DCHECK(!SlotInterference(target.stack_state[i],
- base::VectorOf(source.stack_state.data() + i + 1,
- source.stack_height() - i - 1)));
+ base::VectorOf(cache_state_.stack_state) + i + 1));
}
// Full stack merging is only done for forward jumps, so we can just clear the
// cache registers at the target in case of mismatch.
- if (source.cached_instance != target.cached_instance) {
+ if (cache_state_.cached_instance != target.cached_instance) {
target.ClearCachedInstanceRegister();
}
- if (source.cached_mem_start != target.cached_mem_start) {
+ if (cache_state_.cached_mem_start != target.cached_mem_start) {
target.ClearCachedMemStartRegister();
}
}
@@ -817,8 +887,7 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
for (uint32_t i = 0; i < target_stack_base; ++i) {
- transfers.TransferStackSlot(target.stack_state[i],
- cache_state_.stack_state[i]);
+ transfers.Transfer(target.stack_state[i], cache_state_.stack_state[i]);
DCHECK(!SlotInterference(
target.stack_state[i],
base::VectorOf(cache_state_.stack_state.data() + i + 1,
@@ -828,8 +897,8 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
base::VectorOf(cache_state_.stack_state.data() + stack_base, arity)));
}
for (uint32_t i = 0; i < arity; ++i) {
- transfers.TransferStackSlot(target.stack_state[target_stack_base + i],
- cache_state_.stack_state[stack_base + i]);
+ transfers.Transfer(target.stack_state[target_stack_base + i],
+ cache_state_.stack_state[stack_base + i]);
DCHECK(!SlotInterference(
target.stack_state[target_stack_base + i],
base::VectorOf(cache_state_.stack_state.data() + stack_base + i + 1,
@@ -860,7 +929,7 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
// If the source has the content but in the wrong register, execute a
// register move as part of the stack transfer.
transfers.MoveRegister(LiftoffRegister{*dst_reg},
- LiftoffRegister{src_reg}, kPointerKind);
+ LiftoffRegister{src_reg}, kIntPtrKind);
} else {
// Otherwise (the source state has no cached content), we reload later.
*reload = true;
@@ -952,7 +1021,7 @@ void LiftoffAssembler::ClearRegister(
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp();
- Move(replacement, reg, kPointerKind);
+ Move(replacement, reg, kIntPtrKind);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
@@ -978,7 +1047,7 @@ void PrepareStackTransfers(const ValueKindSig* sig,
const bool is_gp_pair = kNeedI64RegPair && kind == kI64;
const int num_lowered_params = is_gp_pair ? 2 : 1;
const VarState& slot = slots[param];
- const uint32_t stack_offset = slot.offset();
+ DCHECK(CompatibleStackSlotTypes(slot.kind(), kind));
// Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack.
for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
@@ -995,15 +1064,14 @@ void PrepareStackTransfers(const ValueKindSig* sig,
LiftoffRegister::from_external_code(rc, kind, reg_code);
param_regs->set(reg);
if (is_gp_pair) {
- stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
- half);
+ stack_transfers->LoadI64HalfIntoRegister(reg, slot, half);
} else {
- stack_transfers->LoadIntoRegister(reg, slot, stack_offset);
+ stack_transfers->LoadIntoRegister(reg, slot);
}
} else {
DCHECK(loc.IsCallerFrameSlot());
int param_offset = -loc.GetLocation() - 1;
- stack_slots->Add(slot, stack_offset, half, param_offset);
+ stack_slots->Add(slot, slot.offset(), half, param_offset);
}
}
}
@@ -1033,38 +1101,25 @@ void LiftoffAssembler::PrepareBuiltinCall(
void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
- Register* target,
- Register* target_instance) {
+ Register* target, Register target_instance) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
- // Input 0 is the call target.
- constexpr size_t kInputShift = 1;
-
- // Spill all cache slots which are not being used as parameters.
- cache_state_.ClearAllCacheRegisters();
- for (VarState* it = cache_state_.stack_state.end() - 1 - num_params;
- it >= cache_state_.stack_state.begin() &&
- !cache_state_.used_registers.is_empty();
- --it) {
- if (!it->is_reg()) continue;
- Spill(it->offset(), it->reg(), it->kind());
- cache_state_.dec_used(it->reg());
- it->MakeStack();
- }
LiftoffStackSlots stack_slots(this);
StackTransferRecipe stack_transfers(this);
LiftoffRegList param_regs;
// Move the target instance (if supplied) into the correct instance register.
- compiler::LinkageLocation instance_loc =
- call_descriptor->GetInputLocation(kInputShift);
- DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
- Register instance_reg = Register::from_code(instance_loc.AsRegister());
+ Register instance_reg = wasm::kGpParamRegisters[0];
+ // Check that the call descriptor agrees. Input 0 is the call target, 1 is the
+ // instance.
+ DCHECK_EQ(
+ instance_reg,
+ Register::from_code(call_descriptor->GetInputLocation(1).AsRegister()));
param_regs.set(instance_reg);
- if (target_instance && *target_instance != instance_reg) {
+ if (target_instance == no_reg) target_instance = cache_state_.cached_instance;
+ if (target_instance != no_reg && target_instance != instance_reg) {
stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
- LiftoffRegister(*target_instance),
- kPointerKind);
+ LiftoffRegister(target_instance), kIntPtrKind);
}
int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
@@ -1083,29 +1138,46 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
- kPointerKind);
+ kIntPtrKind);
*target = new_target.gp();
} else {
- stack_slots.Add(VarState(kPointerKind, LiftoffRegister(*target), 0),
+ stack_slots.Add(VarState(kIntPtrKind, LiftoffRegister(*target), 0),
param_slots);
param_slots++;
*target = no_reg;
}
}
+ // After figuring out all register and stack moves, drop the parameter slots
+ // from the stack.
+ DropValues(num_params);
+
+ // Spill all remaining cache slots.
+ cache_state_.ClearAllCacheRegisters();
+ // Iterate backwards, spilling register slots until all registers are free.
+ if (!cache_state_.used_registers.is_empty()) {
+ for (auto* slot = cache_state_.stack_state.end() - 1;; --slot) {
+ DCHECK_LE(cache_state_.stack_state.begin(), slot);
+ if (!slot->is_reg()) continue;
+ Spill(slot->offset(), slot->reg(), slot->kind());
+ cache_state_.dec_used(slot->reg());
+ slot->MakeStack();
+ if (cache_state_.used_registers.is_empty()) break;
+ }
+ }
+ // All slots are either spilled on the stack, or hold constants now.
+ DCHECK(std::all_of(
+ cache_state_.stack_state.begin(), cache_state_.stack_state.end(),
+ [](const VarState& slot) { return slot.is_stack() || slot.is_const(); }));
+
if (param_slots > 0) {
stack_slots.Construct(param_slots);
}
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
- // Pop parameters from the value stack.
- cache_state_.stack_state.pop_back(num_params);
-
- // Reset register use counters.
- cache_state_.reset_used_registers();
- // Reload the instance from the stack.
- if (!target_instance) {
+ // Reload the instance from the stack if we do not have it in a register.
+ if (target_instance == no_reg) {
LoadInstanceFromFrame(instance_reg);
}
}
@@ -1184,34 +1256,46 @@ void LiftoffAssembler::ParallelRegisterMove(
void LiftoffAssembler::MoveToReturnLocations(
const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
- StackTransferRecipe stack_transfers(this);
- if (sig->return_count() == 1) {
- ValueKind return_kind = sig->GetReturn(0).kind();
- // Defaults to a gp reg, will be set below if return kind is not gp.
- LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
-
- if (needs_gp_reg_pair(return_kind)) {
- return_reg = LiftoffRegister::ForPair(kGpReturnRegisters[0],
- kGpReturnRegisters[1]);
- } else if (needs_fp_reg_pair(return_kind)) {
- return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
- } else if (reg_class_for(return_kind) == kFpReg) {
- return_reg = LiftoffRegister(kFpReturnRegisters[0]);
- } else {
- DCHECK_EQ(kGpReg, reg_class_for(return_kind));
- }
- stack_transfers.LoadIntoRegister(return_reg,
- cache_state_.stack_state.back(),
- cache_state_.stack_state.back().offset());
+ DCHECK_LT(0, sig->return_count());
+ if (V8_UNLIKELY(sig->return_count() > 1)) {
+ MoveToReturnLocationsMultiReturn(sig, descriptor);
return;
}
- // Slow path for multi-return.
+ ValueKind return_kind = sig->GetReturn(0).kind();
+ // Defaults to a gp reg, will be set below if return kind is not gp.
+ LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
+
+ if (needs_gp_reg_pair(return_kind)) {
+ return_reg =
+ LiftoffRegister::ForPair(kGpReturnRegisters[0], kGpReturnRegisters[1]);
+ } else if (needs_fp_reg_pair(return_kind)) {
+ return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
+ } else if (reg_class_for(return_kind) == kFpReg) {
+ return_reg = LiftoffRegister(kFpReturnRegisters[0]);
+ } else {
+ DCHECK_EQ(kGpReg, reg_class_for(return_kind));
+ }
+ VarState& slot = cache_state_.stack_state.back();
+ if (V8_LIKELY(slot.is_reg())) {
+ if (slot.reg() != return_reg) {
+ Move(return_reg, slot.reg(), slot.kind());
+ }
+ } else {
+ LoadToFixedRegister(cache_state_.stack_state.back(), return_reg);
+ }
+}
+
+void LiftoffAssembler::MoveToReturnLocationsMultiReturn(
+ const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
+ DCHECK_LT(1, sig->return_count());
+ StackTransferRecipe stack_transfers(this);
+
// We sometimes allocate a register to perform stack-to-stack moves, which can
// cause a spill in the cache state. Conservatively save and restore the
// original state in case it is needed after the current instruction
// (conditional branch).
- CacheState saved_state;
+ CacheState saved_state{zone()};
#if DEBUG
uint32_t saved_state_frozenness = cache_state_.frozen;
cache_state_.frozen = 0;
@@ -1259,10 +1343,9 @@ void LiftoffAssembler::MoveToReturnLocations(
LiftoffRegister::from_external_code(rc, return_kind, reg_code);
VarState& slot = slots[i];
if (needs_gp_pair) {
- stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(),
- half);
+ stack_transfers.LoadI64HalfIntoRegister(reg, slot, half);
} else {
- stack_transfers.LoadIntoRegister(reg, slot, slot.offset());
+ stack_transfers.LoadIntoRegister(reg, slot);
}
}
}
@@ -1285,7 +1368,11 @@ void LiftoffRegList::Print() const {
bool LiftoffAssembler::ValidateCacheState() const {
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList used_regs;
+ int offset = StaticStackFrameSize();
for (const VarState& var : cache_state_.stack_state) {
+ // Check for continuous stack offsets.
+ offset = NextSpillOffset(var.kind(), offset);
+ DCHECK_EQ(offset, var.offset());
if (!var.is_reg()) continue;
LiftoffRegister reg = var.reg();
if ((kNeedI64RegPair || kNeedS128RegPair) && reg.is_pair()) {
@@ -1322,10 +1409,16 @@ bool LiftoffAssembler::ValidateCacheState() const {
#endif
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates) {
- // Spill one cached value to free a register.
- LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates);
- SpillRegister(spill_reg);
- return spill_reg;
+ // Before spilling a regular stack slot, try to drop a "volatile" register
+ // (used for caching the memory start or the instance itself). Those can be
+ // reloaded without requiring a spill here.
+ if (cache_state_.has_volatile_register(candidates)) {
+ return cache_state_.take_volatile_register(candidates);
+ }
+
+ LiftoffRegister spilled_reg = cache_state_.GetNextSpillReg(candidates);
+ SpillRegister(spilled_reg);
+ return spilled_reg;
}
LiftoffRegister LiftoffAssembler::SpillAdjacentFpRegisters(
@@ -1426,20 +1519,11 @@ std::ostream& operator<<(std::ostream& os, VarState slot) {
}
#if DEBUG
-bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b) {
- if (is_object_reference(a)) {
- // Since Liftoff doesn't do accurate type tracking (e.g. on loop back
- // edges), we only care that pointer types stay amongst pointer types.
- // It's fine if ref/ref null overwrite each other.
- DCHECK(is_object_reference(b));
- } else if (is_rtt(a)) {
- // Same for rtt/rtt_with_depth.
- DCHECK(is_rtt(b));
- } else {
- // All other types (primitive numbers, bottom/stmt) must be equal.
- DCHECK_EQ(a, b);
- }
- return true; // Dummy so this can be called via DCHECK.
+bool CompatibleStackSlotTypes(ValueKind a, ValueKind b) {
+ // Since Liftoff doesn't do accurate type tracking (e.g. on loop back edges,
+ // ref.as_non_null/br_on_cast results), we only care that pointer types stay
+ // amongst pointer types. It's fine if ref/ref null overwrite each other.
+ return a == b || (is_object_reference(a) && is_object_reference(b));
}
#endif
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 4218a232d5..aef63c647b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -30,68 +30,57 @@ class CallDescriptor;
namespace wasm {
-enum LiftoffCondition {
- kEqual,
- kEqualZero = kEqual, // When used in a unary operation.
- kUnequal,
- kNotEqualZero = kUnequal, // When used in a unary operation.
- kSignedLessThan,
- kSignedLessEqual,
- kSignedGreaterThan,
- kSignedGreaterEqual,
- kUnsignedLessThan,
- kUnsignedLessEqual,
- kUnsignedGreaterThan,
- kUnsignedGreaterEqual
-};
-
-inline constexpr LiftoffCondition Negate(LiftoffCondition cond) {
+inline constexpr Condition Negate(Condition cond) {
switch (cond) {
case kEqual:
- return kUnequal;
- case kUnequal:
+ return kNotEqual;
+ case kNotEqual:
return kEqual;
- case kSignedLessThan:
- return kSignedGreaterEqual;
- case kSignedLessEqual:
- return kSignedGreaterThan;
- case kSignedGreaterEqual:
- return kSignedLessThan;
- case kSignedGreaterThan:
- return kSignedLessEqual;
+ case kLessThan:
+ return kGreaterThanEqual;
+ case kLessThanEqual:
+ return kGreaterThan;
+ case kGreaterThanEqual:
+ return kLessThan;
+ case kGreaterThan:
+ return kLessThanEqual;
case kUnsignedLessThan:
- return kUnsignedGreaterEqual;
- case kUnsignedLessEqual:
+ return kUnsignedGreaterThanEqual;
+ case kUnsignedLessThanEqual:
return kUnsignedGreaterThan;
- case kUnsignedGreaterEqual:
+ case kUnsignedGreaterThanEqual:
return kUnsignedLessThan;
case kUnsignedGreaterThan:
- return kUnsignedLessEqual;
+ return kUnsignedLessThanEqual;
+ default:
+ UNREACHABLE();
}
}
-inline constexpr LiftoffCondition Flip(LiftoffCondition cond) {
+inline constexpr Condition Flip(Condition cond) {
switch (cond) {
case kEqual:
return kEqual;
- case kUnequal:
- return kUnequal;
- case kSignedLessThan:
- return kSignedGreaterThan;
- case kSignedLessEqual:
- return kSignedGreaterEqual;
- case kSignedGreaterEqual:
- return kSignedLessEqual;
- case kSignedGreaterThan:
- return kSignedLessThan;
+ case kNotEqual:
+ return kNotEqual;
+ case kLessThan:
+ return kGreaterThan;
+ case kLessThanEqual:
+ return kGreaterThanEqual;
+ case kGreaterThanEqual:
+ return kLessThanEqual;
+ case kGreaterThan:
+ return kLessThan;
case kUnsignedLessThan:
return kUnsignedGreaterThan;
- case kUnsignedLessEqual:
- return kUnsignedGreaterEqual;
- case kUnsignedGreaterEqual:
- return kUnsignedLessEqual;
+ case kUnsignedLessThanEqual:
+ return kUnsignedGreaterThanEqual;
+ case kUnsignedGreaterThanEqual:
+ return kUnsignedLessThanEqual;
case kUnsignedGreaterThan:
return kUnsignedLessThan;
+ default:
+ UNREACHABLE();
}
}
@@ -109,16 +98,15 @@ class FreezeCacheState {
#endif
};
-class LiftoffAssembler : public TurboAssembler {
+class LiftoffAssembler : public MacroAssembler {
public:
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8;
- static constexpr ValueKind kPointerKind =
+ static constexpr ValueKind kIntPtrKind =
kSystemPointerSize == kInt32Size ? kI32 : kI64;
- static constexpr ValueKind kTaggedKind =
- kTaggedSize == kInt32Size ? kI32 : kI64;
- static constexpr ValueKind kSmiKind = kTaggedKind;
+ // A tagged value known to be a Smi can be treated like a ptr-sized int.
+ static constexpr ValueKind kSmiKind = kTaggedSize == kInt32Size ? kI32 : kI64;
using ValueKindSig = Signature<ValueKind>;
@@ -223,8 +211,10 @@ class LiftoffAssembler : public TurboAssembler {
ASSERT_TRIVIALLY_COPYABLE(VarState);
struct CacheState {
- // Allow default construction, move construction, and move assignment.
- CacheState() = default;
+ explicit CacheState(Zone* zone)
+ : stack_state(ZoneAllocator<VarState>{zone}) {}
+
+ // Allow move construction and move assignment.
CacheState(CacheState&&) V8_NOEXCEPT = default;
CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
// Disallow copy construction.
@@ -250,7 +240,7 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(jkummerow): Wrap all accesses to {stack_state} in accessors that
// check {frozen}.
- base::SmallVector<VarState, 8> stack_state;
+ base::SmallVector<VarState, 16, ZoneAllocator<VarState>> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
@@ -270,7 +260,6 @@ class LiftoffAssembler : public TurboAssembler {
kFpCacheRegList.MaskOut(used_registers).MaskOut(pinned);
return available_regs.HasAdjacentFpRegsSet();
}
- DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates.MaskOut(pinned));
}
@@ -294,7 +283,6 @@ class LiftoffAssembler : public TurboAssembler {
DCHECK(is_free(LiftoffRegister::ForFpPair(low)));
return LiftoffRegister::ForFpPair(low);
}
- DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return unused_register(candidates, pinned);
}
@@ -465,11 +453,7 @@ class LiftoffAssembler : public TurboAssembler {
return reg;
}
- // TODO(clemensb): Don't copy the full parent state (this makes us N^2).
- void InitMerge(const CacheState& source, uint32_t num_locals,
- uint32_t arity, uint32_t stack_depth);
-
- void Steal(const CacheState& source);
+ void Steal(CacheState& source);
void Split(const CacheState& source);
@@ -482,18 +466,37 @@ class LiftoffAssembler : public TurboAssembler {
CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
};
- explicit LiftoffAssembler(std::unique_ptr<AssemblerBuffer>);
+ explicit LiftoffAssembler(Zone*, std::unique_ptr<AssemblerBuffer>);
~LiftoffAssembler() override;
- LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
+ Zone* zone() const { return cache_state_.stack_state.get_allocator().zone(); }
+
+ // Load a cache slot to a free register.
+ V8_INLINE LiftoffRegister LoadToRegister(VarState slot,
+ LiftoffRegList pinned) {
+ if (V8_LIKELY(slot.is_reg())) return slot.reg();
+ return LoadToRegister_Slow(slot, pinned);
+ }
+
+ // Slow path called for the method above.
+ V8_NOINLINE V8_PRESERVE_MOST LiftoffRegister
+ LoadToRegister_Slow(VarState slot, LiftoffRegList pinned);
- LiftoffRegister LoadToRegister(VarState slot, LiftoffRegister dst);
+ // Load a non-register cache slot to a given (fixed) register.
+ void LoadToFixedRegister(VarState slot, LiftoffRegister reg) {
+ DCHECK(slot.is_const() || slot.is_stack());
+ if (slot.is_const()) {
+ LoadConstant(reg, slot.constant());
+ } else {
+ Fill(reg, slot.offset(), slot.kind());
+ }
+ }
- LiftoffRegister PopToRegister(LiftoffRegList pinned = {}) {
+ V8_INLINE LiftoffRegister PopToRegister(LiftoffRegList pinned = {}) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
- if (slot.is_reg()) {
+ if (V8_LIKELY(slot.is_reg())) {
cache_state_.dec_used(slot.reg());
return slot.reg();
}
@@ -504,7 +507,7 @@ class LiftoffAssembler : public TurboAssembler {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
- if (slot.is_reg()) {
+ if (V8_LIKELY(slot.is_reg())) {
cache_state_.dec_used(slot.reg());
if (slot.reg() == reg) return;
if (cache_state_.is_used(reg)) SpillRegister(reg);
@@ -512,7 +515,7 @@ class LiftoffAssembler : public TurboAssembler {
return;
}
if (cache_state_.is_used(reg)) SpillRegister(reg);
- LoadToRegister(slot, reg);
+ LoadToFixedRegister(slot, reg);
}
// Use this to pop a value into a register that has no other uses, so it
@@ -540,9 +543,11 @@ class LiftoffAssembler : public TurboAssembler {
void DropValues(int count);
- // Careful: this indexes "from the other end", i.e. depth=0 is the value
- // at the bottom of the stack!
- void DropValue(int depth);
+ // Drop a specific value from the stack; this is an expensive operation which
+ // is currently only used for exceptions.
+ // Careful: this indexes "from the other end", i.e. offset=0 is the value at
+ // the bottom of the stack.
+ void DropExceptionValueAtOffset(int offset);
// Ensure that the loop inputs are either in a register or spilled to the
// stack, so that we can merge different values on the back-edge.
@@ -583,7 +588,7 @@ class LiftoffAssembler : public TurboAssembler {
}
void PushConstant(ValueKind kind, int32_t i32_const) {
- DCHECK(kind == kI32 || kind == kI64);
+ V8_ASSUME(kind == kI32 || kind == kI64);
cache_state_.stack_state.emplace_back(kind, i32_const,
NextSpillOffset(kind));
}
@@ -592,7 +597,7 @@ class LiftoffAssembler : public TurboAssembler {
cache_state_.stack_state.emplace_back(kind, NextSpillOffset(kind));
}
- void SpillRegister(LiftoffRegister);
+ V8_NOINLINE V8_PRESERVE_MOST void SpillRegister(LiftoffRegister);
uint32_t GetNumUses(LiftoffRegister reg) const {
return cache_state_.get_use_count(reg);
@@ -611,7 +616,8 @@ class LiftoffAssembler : public TurboAssembler {
return GetUnusedRegister(rc, pinned);
}
- // Get an unused register for class {rc}, potentially spilling to free one.
+ // Get an unused register for class {rc}, excluding registers from {pinned},
+ // potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
DCHECK(!cache_state_.frozen);
if (kNeedI64RegPair && rc == kGpRegPair) {
@@ -628,7 +634,6 @@ class LiftoffAssembler : public TurboAssembler {
DoubleRegister low_fp = SpillAdjacentFpRegisters(pinned).fp();
return LiftoffRegister::ForFpPair(low_fp);
}
- DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc).MaskOut(pinned);
return GetUnusedRegister(candidates);
}
@@ -637,12 +642,9 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister GetUnusedRegister(LiftoffRegList candidates) {
DCHECK(!cache_state_.frozen);
DCHECK(!candidates.is_empty());
- if (cache_state_.has_unused_register(candidates)) {
+ if (V8_LIKELY(cache_state_.has_unused_register(candidates))) {
return cache_state_.unused_register(candidates);
}
- if (cache_state_.has_volatile_register(candidates)) {
- return cache_state_.take_volatile_register(candidates);
- }
return SpillOneRegister(candidates);
}
@@ -651,8 +653,15 @@ class LiftoffAssembler : public TurboAssembler {
// avoids making each subsequent (conditional) branch repeat this work.
void PrepareForBranch(uint32_t arity, LiftoffRegList pinned);
+ // These methods handle control-flow merges. {MergeIntoNewState} is used to
+ // generate a new {CacheState} for a merge point, and also emits code to
+ // transfer values from the current state to the new merge state.
+ // {MergeFullStackWith} and {MergeStackWith} then later generate the code for
+ // more merges into an existing state.
+ V8_NODISCARD CacheState MergeIntoNewState(uint32_t num_locals, uint32_t arity,
+ uint32_t stack_depth);
+ void MergeFullStackWith(CacheState& target);
enum JumpDirection { kForwardJump, kBackwardJump };
- void MergeFullStackWith(CacheState& target, const CacheState& source);
void MergeStackWith(CacheState& target, uint32_t arity, JumpDirection);
void Spill(VarState* slot);
@@ -703,7 +712,7 @@ class LiftoffAssembler : public TurboAssembler {
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(const ValueKindSig*, compiler::CallDescriptor*,
Register* target = nullptr,
- Register* target_instance = nullptr);
+ Register target_instance = no_reg);
// Process return values of the call.
void FinishCall(const ValueKindSig*, compiler::CallDescriptor*);
@@ -729,8 +738,12 @@ class LiftoffAssembler : public TurboAssembler {
ParallelRegisterMove(base::VectorOf(moves));
}
- void MoveToReturnLocations(const FunctionSig*,
- compiler::CallDescriptor* descriptor);
+ // Move the top stack values into the expected return locations specified by
+ // the given call descriptor.
+ void MoveToReturnLocations(const FunctionSig*, compiler::CallDescriptor*);
+ // Slow path for multi-return, called from {MoveToReturnLocations}.
+ V8_NOINLINE V8_PRESERVE_MOST void MoveToReturnLocationsMultiReturn(
+ const FunctionSig*, compiler::CallDescriptor*);
#if DEBUG
void SetCacheStateFrozen() { cache_state_.frozen++; }
void UnfreezeCacheState() {
@@ -756,23 +769,22 @@ class LiftoffAssembler : public TurboAssembler {
inline void PrepareTailCall(int num_callee_stack_params,
int stack_param_delta);
inline void AlignFrameSize();
- inline void PatchPrepareStackFrame(int offset, SafepointTableBuilder*);
+ inline void PatchPrepareStackFrame(int offset, SafepointTableBuilder*,
+ bool feedback_vector_slot);
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
inline static int SlotSizeForType(ValueKind kind);
inline static bool NeedsAlignment(ValueKind kind);
- inline void LoadConstant(LiftoffRegister, WasmValue,
- RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ inline void LoadConstant(LiftoffRegister, WasmValue);
inline void LoadInstanceFromFrame(Register dst);
inline void LoadFromInstance(Register dst, Register instance, int offset,
int size);
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
inline void LoadExternalPointer(Register dst, Register instance, int offset,
- ExternalPointerTag tag,
- Register isolate_root);
+ ExternalPointerTag tag, Register scratch);
inline void SpillInstance(Register instance);
inline void ResetOSRTarget();
inline void LoadTaggedPointer(Register dst, Register src_addr,
@@ -820,40 +832,47 @@ class LiftoffAssembler : public TurboAssembler {
bool is_store_mem = false, bool i64_offset = false);
inline void AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList pinned);
+ LoadType type, LiftoffRegList pinned, bool i64_offset);
inline void AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned);
+ StoreType type, LiftoffRegList pinned,
+ bool i64_offset);
inline void AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type);
+ LiftoffRegister result, StoreType type,
+ bool i64_offset);
inline void AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type);
+ LiftoffRegister result, StoreType type,
+ bool i64_offset);
inline void AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type);
+ LiftoffRegister result, StoreType type,
+ bool i64_offset);
inline void AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type);
+ LiftoffRegister result, StoreType type, bool i64_offset);
inline void AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type);
+ LiftoffRegister result, StoreType type,
+ bool i64_offset);
inline void AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type);
+ LiftoffRegister result, StoreType type,
+ bool i64_offset);
inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister expected,
LiftoffRegister new_value,
- LiftoffRegister value, StoreType type);
+ LiftoffRegister value, StoreType type,
+ bool i64_offset);
inline void AtomicFence();
@@ -1001,7 +1020,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
- void emit_ptrsize_set_cond(LiftoffCondition condition, Register dst,
+ void emit_ptrsize_set_cond(Condition condition, Register dst,
LiftoffRegister lhs, LiftoffRegister rhs) {
if (kSystemPointerSize == 8) {
emit_i64_set_cond(condition, dst, lhs, rhs);
@@ -1081,24 +1100,23 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_jump(Label*);
inline void emit_jump(Register);
- inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
- Register lhs, Register rhs,
- const FreezeCacheState& frozen);
- inline void emit_i32_cond_jumpi(LiftoffCondition, Label*, Register lhs,
- int imm, const FreezeCacheState& frozen);
+ inline void emit_cond_jump(Condition, Label*, ValueKind value, Register lhs,
+ Register rhs, const FreezeCacheState& frozen);
+ inline void emit_i32_cond_jumpi(Condition, Label*, Register lhs, int imm,
+ const FreezeCacheState& frozen);
inline void emit_i32_subi_jump_negative(Register value, int subtrahend,
Label* result_negative,
const FreezeCacheState& frozen);
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_eqz(Register dst, Register src);
- inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
+ inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
Register rhs);
inline void emit_i64_eqz(Register dst, LiftoffRegister src);
- inline void emit_i64_set_cond(LiftoffCondition condition, Register dst,
+ inline void emit_i64_set_cond(Condition condition, Register dst,
LiftoffRegister lhs, LiftoffRegister rhs);
- inline void emit_f32_set_cond(LiftoffCondition condition, Register dst,
+ inline void emit_f32_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
- inline void emit_f64_set_cond(LiftoffCondition condition, Register dst,
+ inline void emit_f64_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
// Optional select support: Returns false if generic code (via branches)
@@ -1117,10 +1135,11 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t* protected_load_pc);
inline void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr,
Register offset_reg, uintptr_t offset_imm, LoadType type,
- uint8_t lane, uint32_t* protected_load_pc);
+ uint8_t lane, uint32_t* protected_load_pc,
+ bool i64_offset);
inline void StoreLane(Register dst, Register offset, uintptr_t offset_imm,
LiftoffRegister src, StoreType type, uint8_t lane,
- uint32_t* protected_store_pc);
+ uint32_t* protected_store_pc, bool i64_offset);
inline void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, const uint8_t shuffle[16],
bool is_swizzle);
@@ -1659,7 +1678,9 @@ class LiftoffAssembler : public TurboAssembler {
private:
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
- V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
+ // Spill one of the candidate registers.
+ V8_NOINLINE V8_PRESERVE_MOST LiftoffRegister
+ SpillOneRegister(LiftoffRegList candidates);
// Spill one or two fp registers to get a pair of adjacent fp registers.
LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
@@ -1852,7 +1873,7 @@ class LiftoffStackSlots {
};
#if DEBUG
-bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
+bool CompatibleStackSlotTypes(ValueKind a, ValueKind b);
#endif
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 9443ea7db1..f0887de7d2 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -34,9 +34,7 @@
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes-inl.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
+namespace v8::internal::wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst;
@@ -86,20 +84,19 @@ struct assert_field_size {
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef V8_CODE_COMMENTS
-#define CODE_COMMENT(str) \
- do { \
- __ RecordComment(str); \
- } while (false)
+#define CODE_COMMENT(str) __ RecordComment(str)
+#define SCOPED_CODE_COMMENT(str) \
+ AssemblerBase::CodeComment scoped_comment_##__LINE__(&asm_, str)
#else
#define CODE_COMMENT(str) ((void)0)
+#define SCOPED_CODE_COMMENT(str) ((void)0)
#endif
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
-constexpr ValueKind kPointerKind = LiftoffAssembler::kPointerKind;
+constexpr ValueKind kIntPtrKind = LiftoffAssembler::kIntPtrKind;
constexpr ValueKind kSmiKind = LiftoffAssembler::kSmiKind;
-constexpr ValueKind kTaggedKind = LiftoffAssembler::kTaggedKind;
// Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...);
using MakeSig = FixedSizeSignature<ValueKind>;
@@ -107,22 +104,23 @@ using MakeSig = FixedSizeSignature<ValueKind>;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Moving labels would confuse the Assembler,
-// thus store the label on the heap and keep a unique_ptr.
+// thus store the label in the Zone.
class MovableLabel {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
- MovableLabel() : label_(new Label()) {}
+ explicit MovableLabel(Zone* zone) : label_(zone->New<Label>()) {}
- Label* get() { return label_.get(); }
+ Label* get() { return label_; }
private:
- std::unique_ptr<Label> label_;
+ Label* label_;
};
#else
// On all other platforms, just store the Label directly.
class MovableLabel {
public:
- MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
+ explicit MovableLabel(Zone*) {}
Label* get() { return &label_; }
@@ -138,28 +136,28 @@ compiler::CallDescriptor* GetLoweredCallDescriptor(
: call_desc;
}
-constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
+constexpr Condition GetCompareCondition(WasmOpcode opcode) {
switch (opcode) {
case kExprI32Eq:
return kEqual;
case kExprI32Ne:
- return kUnequal;
+ return kNotEqual;
case kExprI32LtS:
- return kSignedLessThan;
+ return kLessThan;
case kExprI32LtU:
return kUnsignedLessThan;
case kExprI32GtS:
- return kSignedGreaterThan;
+ return kGreaterThan;
case kExprI32GtU:
return kUnsignedGreaterThan;
case kExprI32LeS:
- return kSignedLessEqual;
+ return kLessThanEqual;
case kExprI32LeU:
- return kUnsignedLessEqual;
+ return kUnsignedLessThanEqual;
case kExprI32GeS:
- return kSignedGreaterEqual;
+ return kGreaterThanEqual;
case kExprI32GeU:
- return kUnsignedGreaterEqual;
+ return kUnsignedGreaterThanEqual;
default:
UNREACHABLE();
}
@@ -340,17 +338,17 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
class LiftoffCompiler {
public:
- using ValidationTag = Decoder::BooleanValidationTag;
-
+ using ValidationTag = Decoder::NoValidationTag;
using Value = ValueBase<ValidationTag>;
struct ElseState {
+ explicit ElseState(Zone* zone) : label(zone), state(zone) {}
MovableLabel label;
LiftoffAssembler::CacheState state;
};
struct TryInfo {
- TryInfo() = default;
+ explicit TryInfo(Zone* zone) : catch_state(zone) {}
LiftoffAssembler::CacheState catch_state;
Label catch_label;
bool catch_reached = false;
@@ -358,18 +356,20 @@ class LiftoffCompiler {
};
struct Control : public ControlBase<Value, ValidationTag> {
- std::unique_ptr<ElseState> else_state;
+ ElseState* else_state = nullptr;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
- std::unique_ptr<TryInfo> try_info;
+ TryInfo* try_info = nullptr;
// Number of exceptions on the stack below this control.
int num_exceptions = 0;
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
template <typename... Args>
- explicit Control(Args&&... args) V8_NOEXCEPT
- : ControlBase(std::forward<Args>(args)...) {}
+ explicit Control(Zone* zone, Args&&... args) V8_NOEXCEPT
+ : ControlBase(zone, std::forward<Args>(args)...),
+ label_state(zone),
+ label(zone) {}
};
using FullDecoder = WasmFullDecoder<ValidationTag, LiftoffCompiler>;
@@ -433,14 +433,14 @@ class LiftoffCompiler {
// Named constructors:
static OutOfLineCode Trap(
- WasmCode::RuntimeStubId s, WasmCodePosition pos,
+ Zone* zone, WasmCode::RuntimeStubId s, WasmCodePosition pos,
SpilledRegistersForInspection* spilled_registers,
OutOfLineSafepointInfo* safepoint_info, uint32_t pc,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
DCHECK_LT(0, pos);
return {
- {}, // label
- {}, // continuation
+ MovableLabel{zone}, // label
+ MovableLabel{zone}, // continuation
s, // stub
pos, // position
{}, // regs_to_save
@@ -452,13 +452,13 @@ class LiftoffCompiler {
};
}
static OutOfLineCode StackCheck(
- WasmCodePosition pos, LiftoffRegList regs_to_save,
+ Zone* zone, WasmCodePosition pos, LiftoffRegList regs_to_save,
Register cached_instance, SpilledRegistersForInspection* spilled_regs,
OutOfLineSafepointInfo* safepoint_info,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {
- {}, // label
- {}, // continuation
+ MovableLabel{zone}, // label
+ MovableLabel{zone}, // continuation
WasmCode::kWasmStackGuard, // stub
pos, // position
regs_to_save, // regs_to_save
@@ -470,13 +470,13 @@ class LiftoffCompiler {
};
}
static OutOfLineCode TierupCheck(
- WasmCodePosition pos, LiftoffRegList regs_to_save,
+ Zone* zone, WasmCodePosition pos, LiftoffRegList regs_to_save,
Register cached_instance, SpilledRegistersForInspection* spilled_regs,
OutOfLineSafepointInfo* safepoint_info,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {
- {}, // label
- {}, // continuation,
+ MovableLabel{zone}, // label
+ MovableLabel{zone}, // continuation,
WasmCode::kWasmTriggerTierUp, // stub
pos, // position
regs_to_save, // regs_to_save
@@ -490,26 +490,25 @@ class LiftoffCompiler {
};
LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
- CompilationEnv* env, Zone* compilation_zone,
+ CompilationEnv* env, Zone* zone,
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
const LiftoffOptions& options)
- : asm_(std::move(buffer)),
- descriptor_(
- GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
+ : asm_(zone, std::move(buffer)),
+ descriptor_(GetLoweredCallDescriptor(zone, call_descriptor)),
env_(env),
debug_sidetable_builder_(debug_sidetable_builder),
for_debugging_(options.for_debugging),
func_index_(options.func_index),
- out_of_line_code_(compilation_zone),
- source_position_table_builder_(compilation_zone),
- protected_instructions_(compilation_zone),
- compilation_zone_(compilation_zone),
- safepoint_table_builder_(compilation_zone_),
+ out_of_line_code_(zone),
+ source_position_table_builder_(zone),
+ protected_instructions_(zone),
+ zone_(zone),
+ safepoint_table_builder_(zone_),
next_breakpoint_ptr_(options.breakpoints.begin()),
next_breakpoint_end_(options.breakpoints.end()),
dead_breakpoint_(options.dead_breakpoint),
- handlers_(compilation_zone),
+ handlers_(zone),
max_steps_(options.max_steps),
nondeterminism_(options.nondeterminism) {
// We often see huge numbers of traps per function, so pre-reserve some
@@ -744,7 +743,7 @@ class LiftoffCompiler {
SpilledRegistersForInspection* spilled_regs = nullptr;
OutOfLineSafepointInfo* safepoint_info =
- compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
+ zone_->New<OutOfLineSafepointInfo>(zone_);
__ cache_state()->GetTaggedSlotsForOOLCode(
&safepoint_info->slots, &safepoint_info->spills,
for_debugging_
@@ -763,8 +762,8 @@ class LiftoffCompiler {
spilled_regs = GetSpilledRegistersForInspection();
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
- position, regs_to_save, __ cache_state()->cached_instance, spilled_regs,
- safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
+ zone_, position, regs_to_save, __ cache_state()->cached_instance,
+ spilled_regs, safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
OutOfLineCode& ool = out_of_line_code_.back();
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
@@ -776,7 +775,7 @@ class LiftoffCompiler {
// overflows in the budget calculation.
DCHECK_LE(1, budget_used);
- if (for_debugging_ != kNoDebugging) return;
+ if (for_debugging_ != kNotForDebugging) return;
CODE_COMMENT("tierup check");
// We never want to blow the entire budget at once.
const int kMax = v8_flags.wasm_tiering_budget / 4;
@@ -815,13 +814,13 @@ class LiftoffCompiler {
SpilledRegistersForInspection* spilled_regs = nullptr;
OutOfLineSafepointInfo* safepoint_info =
- compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
+ zone_->New<OutOfLineSafepointInfo>(zone_);
__ cache_state()->GetTaggedSlotsForOOLCode(
&safepoint_info->slots, &safepoint_info->spills,
LiftoffAssembler::CacheState::SpillLocation::kTopOfStack);
out_of_line_code_.push_back(OutOfLineCode::TierupCheck(
- position, regs_to_save, __ cache_state()->cached_instance, spilled_regs,
- safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
+ zone_, position, regs_to_save, __ cache_state()->cached_instance,
+ spilled_regs, safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
OutOfLineCode& ool = out_of_line_code_.back();
FREEZE_STATE(trapping);
__ emit_i32_subi_jump_negative(budget_reg.gp(), budget_used,
@@ -848,7 +847,7 @@ class LiftoffCompiler {
return false;
}
- void TraceFunctionEntry(FullDecoder* decoder) {
+ V8_NOINLINE V8_PRESERVE_MOST void TraceFunctionEntry(FullDecoder* decoder) {
CODE_COMMENT("trace function entry");
__ SpillAllRegisters();
source_position_table_builder_.AddPosition(
@@ -858,7 +857,7 @@ class LiftoffCompiler {
}
bool dynamic_tiering() {
- return env_->dynamic_tiering && for_debugging_ == kNoDebugging &&
+ return env_->dynamic_tiering && for_debugging_ == kNotForDebugging &&
(v8_flags.wasm_tier_up_filter == -1 ||
v8_flags.wasm_tier_up_filter == func_index_);
}
@@ -874,7 +873,7 @@ class LiftoffCompiler {
__ CodeEntry();
- if (v8_flags.wasm_speculative_inlining) {
+ if (decoder->enabled_.has_inlining()) {
CODE_COMMENT("frame setup");
int declared_func_index =
func_index_ - env_->module->num_imported_functions;
@@ -927,14 +926,22 @@ class LiftoffCompiler {
// Initialize all reference type locals with ref.null.
if (has_refs) {
- Register null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
- LoadNullValue(null_ref_reg, {});
+ LiftoffRegList pinned;
+ Register null_ref_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned).gp());
+ Register wasm_null_ref_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned).gp());
+ LoadNullValue(null_ref_reg, pinned, kWasmExternRef);
+ LoadNullValue(wasm_null_ref_reg, pinned, kWasmAnyRef);
for (uint32_t local_index = num_params; local_index < __ num_locals();
++local_index) {
- ValueKind kind = __ local_kind(local_index);
- if (is_reference(kind)) {
+ ValueType type = decoder->local_types_[local_index];
+ if (type.is_reference()) {
__ Spill(__ cache_state()->stack_state[local_index].offset(),
- LiftoffRegister(null_ref_reg), kind);
+ IsSubtypeOf(type, kWasmExternRef, decoder->module_)
+ ? LiftoffRegister(null_ref_reg)
+ : LiftoffRegister(wasm_null_ref_reg),
+ type.kind());
}
}
}
@@ -958,7 +965,15 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(decoder, 0);
- if (v8_flags.trace_wasm) TraceFunctionEntry(decoder);
+ if (V8_UNLIKELY(max_steps_)) {
+ // Subtract 16 steps for the function call itself (including the function
+ // prologue), plus 1 for each local (including parameters).
+ // Do this only *after* setting up the frame completely, even though we
+ // already executed the work then.
+ CheckMaxSteps(decoder, 16 + __ num_locals());
+ }
+
+ if (V8_UNLIKELY(v8_flags.trace_wasm)) TraceFunctionEntry(decoder);
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
@@ -1064,7 +1079,8 @@ class LiftoffCompiler {
}
DCHECK_EQ(frame_size, __ GetTotalFrameSize());
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
- &safepoint_table_builder_);
+ &safepoint_table_builder_,
+ decoder->enabled_.has_inlining());
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
// Emit the handler table.
@@ -1080,11 +1096,12 @@ class LiftoffCompiler {
DidAssemblerBailout(decoder);
DCHECK_EQ(num_exceptions_, 0);
- if (v8_flags.wasm_speculative_inlining &&
+ if (decoder->enabled_.has_inlining() &&
!encountered_call_instructions_.empty()) {
// Update the call targets stored in the WasmModule.
TypeFeedbackStorage& type_feedback = env_->module->type_feedback;
- base::MutexGuard mutex_guard(&type_feedback.mutex);
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ &type_feedback.mutex);
base::OwnedVector<uint32_t>& call_targets =
type_feedback.feedback_for_function[func_index_].call_targets;
if (call_targets.empty()) {
@@ -1103,6 +1120,35 @@ class LiftoffCompiler {
asm_.AbortCompilation();
}
+ void CheckMaxSteps(FullDecoder* decoder, int steps_done = 1) {
+ DCHECK_LE(1, steps_done);
+ CODE_COMMENT("check max steps");
+ LiftoffRegList pinned;
+ LiftoffRegister max_steps = pinned.set(__ GetUnusedRegister(kGpReg, {}));
+ LiftoffRegister max_steps_addr =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ {
+ FREEZE_STATE(frozen);
+ __ LoadConstant(
+ max_steps_addr,
+ WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(max_steps_)));
+ __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load);
+ // Subtract first (and store the result), so the caller sees that
+ // max_steps ran negative. Since we never subtract too much at once, we
+ // cannot underflow.
+ DCHECK_GE(kMaxInt / 16, steps_done); // An arbitrary limit.
+ __ emit_i32_subi(max_steps.gp(), max_steps.gp(), steps_done);
+ __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store,
+ pinned);
+ Label cont;
+ __ emit_i32_cond_jumpi(kGreaterThanEqual, &cont, max_steps.gp(), 0,
+ frozen);
+ // Abort.
+ Trap(decoder, kTrapUnreachable);
+ __ bind(&cont);
+ }
+ }
+
V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
DCHECK(for_debugging_);
if (!WasmOpcodes::IsBreakable(opcode)) return;
@@ -1143,11 +1189,11 @@ class LiftoffCompiler {
{});
FREEZE_STATE(frozen);
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
- __ emit_cond_jump(kNotEqualZero, &do_break, kI32, flag, no_reg, frozen);
+ __ emit_cond_jump(kNotZero, &do_break, kI32, flag, no_reg, frozen);
// Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
- __ emit_cond_jump(kEqualZero, &no_break, kI32, flag, no_reg, frozen);
+ __ emit_cond_jump(kZero, &no_break, kI32, flag, no_reg, frozen);
__ bind(&do_break);
EmitBreakpoint(decoder);
@@ -1166,27 +1212,7 @@ class LiftoffCompiler {
__ bind(&cont);
}
if (V8_UNLIKELY(max_steps_ != nullptr)) {
- CODE_COMMENT("check max steps");
- LiftoffRegList pinned;
- LiftoffRegister max_steps = __ GetUnusedRegister(kGpReg, {});
- pinned.set(max_steps);
- LiftoffRegister max_steps_addr = __ GetUnusedRegister(kGpReg, pinned);
- pinned.set(max_steps_addr);
- {
- FREEZE_STATE(frozen);
- __ LoadConstant(
- max_steps_addr,
- WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(max_steps_)));
- __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load);
- Label cont;
- __ emit_i32_cond_jumpi(kUnequal, &cont, max_steps.gp(), 0, frozen);
- // Abort.
- Trap(decoder, kTrapUnreachable);
- __ bind(&cont);
- }
- __ emit_i32_subi(max_steps.gp(), max_steps.gp(), 1);
- __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store,
- pinned);
+ CheckMaxSteps(decoder);
}
}
@@ -1198,9 +1224,15 @@ class LiftoffCompiler {
SLOW_DCHECK(__ ValidateCacheState());
CODE_COMMENT(WasmOpcodes::OpcodeName(
WasmOpcodes::IsPrefixOpcode(opcode)
- ? decoder->read_prefixed_opcode<Decoder::FullValidationTag>(
- decoder->pc())
+ ? decoder->read_prefixed_opcode<ValidationTag>(decoder->pc()).first
: opcode));
+
+ if (!has_outstanding_op() && decoder->control_at(0)->reachable()) {
+ // Decoder stack and liftoff stack have to be in sync if current code
+ // path is reachable.
+ DCHECK_EQ(decoder->stack_size() + __ num_locals() + num_exceptions_,
+ __ cache_state()->stack_state.size());
+ }
}
void EmitBreakpoint(FullDecoder* decoder) {
@@ -1249,13 +1281,13 @@ class LiftoffCompiler {
}
void Try(FullDecoder* decoder, Control* block) {
- block->try_info = std::make_unique<TryInfo>();
+ block->try_info = zone_->New<TryInfo>(zone_);
PushControl(block);
}
// Load the property in {kReturnRegister0}.
- LiftoffRegister GetExceptionProperty(LiftoffAssembler::VarState& exception,
- RootIndex root_index) {
+ LiftoffRegister GetExceptionProperty(
+ const LiftoffAssembler::VarState& exception, RootIndex root_index) {
DCHECK(root_index == RootIndex::kwasm_exception_tag_symbol ||
root_index == RootIndex::kwasm_exception_values_symbol);
@@ -1267,12 +1299,11 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext, pinned);
- LiftoffAssembler::VarState tag_symbol(kPointerKind, tag_symbol_reg, 0);
- LiftoffAssembler::VarState context(kPointerKind, context_reg, 0);
+ LiftoffAssembler::VarState tag_symbol{kRef, tag_symbol_reg, 0};
+ LiftoffAssembler::VarState context{kRef, context_reg, 0};
CallRuntimeStub(WasmCode::kWasmGetOwnProperty,
- MakeSig::Returns(kPointerKind)
- .Params(kPointerKind, kPointerKind, kPointerKind),
+ MakeSig::Returns(kRef).Params(kRef, kRef, kRef),
{exception, tag_symbol, context}, kNoSourcePosition);
return LiftoffRegister(kReturnRegister0);
@@ -1321,7 +1352,7 @@ class LiftoffCompiler {
frozen);
// The tags don't match, merge the current state into the catch state and
// jump to the next handler.
- __ MergeFullStackWith(block->try_info->catch_state, *__ cache_state());
+ __ MergeFullStackWith(block->try_info->catch_state);
__ emit_jump(&block->try_info->catch_label);
__ bind(&caught);
}
@@ -1334,9 +1365,8 @@ class LiftoffCompiler {
void Rethrow(FullDecoder* decoder,
const LiftoffAssembler::VarState& exception) {
- DCHECK_EQ(exception.kind(), kRef);
- CallRuntimeStub(WasmCode::kWasmRethrow, MakeSig::Params(kPointerKind),
- {exception}, decoder->position());
+ CallRuntimeStub(WasmCode::kWasmRethrow, MakeSig::Params(kRef), {exception},
+ decoder->position());
}
void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
@@ -1352,14 +1382,14 @@ class LiftoffCompiler {
MaybeOSR();
} else {
DCHECK(target->is_incomplete_try());
- if (!target->try_info->catch_reached) {
- target->try_info->catch_state.InitMerge(
- *__ cache_state(), __ num_locals(), 1,
- target->stack_depth + target->num_exceptions);
+ if (target->try_info->catch_reached) {
+ __ MergeStackWith(target->try_info->catch_state, 1,
+ LiftoffAssembler::kForwardJump);
+ } else {
+ target->try_info->catch_state = __ MergeIntoNewState(
+ __ num_locals(), 1, target->stack_depth + target->num_exceptions);
target->try_info->catch_reached = true;
}
- __ MergeStackWith(target->try_info->catch_state, 1,
- LiftoffAssembler::kForwardJump);
__ emit_jump(&target->try_info->catch_label);
}
}
@@ -1387,24 +1417,26 @@ class LiftoffCompiler {
}
__ bind(&block->try_info->catch_label);
- __ cache_state()->Steal(block->try_info->catch_state);
+ __ cache_state()->Split(block->try_info->catch_state);
if (!block->try_info->in_handler) {
block->try_info->in_handler = true;
num_exceptions_++;
}
}
+ // Before emitting the conditional branch, {will_freeze} will be initialized
+ // to prevent cache state changes in conditionally executed code.
void JumpIfFalse(FullDecoder* decoder, Label* false_dst,
- std::unique_ptr<FreezeCacheState>& will_freeze) {
- LiftoffCondition cond =
- test_and_reset_outstanding_op(kExprI32Eqz) ? kNotEqualZero : kEqualZero;
+ base::Optional<FreezeCacheState>& will_freeze) {
+ DCHECK(!will_freeze.has_value());
+ Condition cond =
+ test_and_reset_outstanding_op(kExprI32Eqz) ? kNotZero : kZero;
if (!has_outstanding_op()) {
// Unary comparison.
Register value = __ PopToRegister().gp();
- will_freeze.reset(new FreezeCacheState(asm_));
- __ emit_cond_jump(cond, false_dst, kI32, value, no_reg,
- *will_freeze.get());
+ will_freeze.emplace(asm_);
+ __ emit_cond_jump(cond, false_dst, kI32, value, no_reg, *will_freeze);
return;
}
@@ -1417,8 +1449,8 @@ class LiftoffCompiler {
int32_t rhs_imm = rhs_slot.i32_const();
__ cache_state()->stack_state.pop_back();
Register lhs = __ PopToRegister().gp();
- will_freeze.reset(new FreezeCacheState(asm_));
- __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm, *will_freeze.get());
+ will_freeze.emplace(asm_);
+ __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm, *will_freeze);
return;
}
@@ -1429,16 +1461,15 @@ class LiftoffCompiler {
int32_t lhs_imm = lhs_slot.i32_const();
__ cache_state()->stack_state.pop_back();
// Flip the condition, because {lhs} and {rhs} are swapped.
- will_freeze.reset(new FreezeCacheState(asm_));
- __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm,
- *will_freeze.get());
+ will_freeze.emplace(asm_);
+ __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm, *will_freeze);
return;
}
// Compare two arbitrary values.
Register lhs = __ PopToRegister(LiftoffRegList{rhs}).gp();
- will_freeze.reset(new FreezeCacheState(asm_));
- __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs, *will_freeze.get());
+ will_freeze.emplace(asm_);
+ __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs, *will_freeze);
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
@@ -1446,10 +1477,10 @@ class LiftoffCompiler {
DCHECK(if_block->is_if());
// Allocate the else state.
- if_block->else_state = std::make_unique<ElseState>();
+ if_block->else_state = zone_->New<ElseState>(zone_);
// Test the condition on the value stack, jump to else if zero.
- std::unique_ptr<FreezeCacheState> frozen;
+ base::Optional<FreezeCacheState> frozen;
JumpIfFalse(decoder, if_block->else_state->label.get(), frozen);
frozen.reset();
@@ -1460,19 +1491,13 @@ class LiftoffCompiler {
}
void FallThruTo(FullDecoder* decoder, Control* c) {
- if (!c->end_merge.reached) {
- c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- c->end_merge.arity,
- c->stack_depth + c->num_exceptions);
- }
- DCHECK(!c->is_try_catchall());
- if (c->is_try_catch()) {
- // Drop the implicit exception ref if any. There may be none if this is a
- // catch-less try block.
+ DCHECK_IMPLIES(c->is_try_catchall(), !c->end_merge.reached);
+ if (c->end_merge.reached) {
__ MergeStackWith(c->label_state, c->br_merge()->arity,
LiftoffAssembler::kForwardJump);
} else {
- __ MergeFullStackWith(c->label_state, *__ cache_state());
+ c->label_state = __ MergeIntoNewState(__ num_locals(), c->end_merge.arity,
+ c->stack_depth + c->num_exceptions);
}
__ emit_jump(c->label.get());
TraceCacheState(decoder);
@@ -1484,30 +1509,29 @@ class LiftoffCompiler {
// Someone already merged to the end of the if. Merge both arms into that.
if (c->reachable()) {
// Merge the if state into the end state.
- __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ MergeFullStackWith(c->label_state);
__ emit_jump(c->label.get());
}
// Merge the else state into the end state. Set this state as the current
// state first so helper functions know which registers are in use.
__ bind(c->else_state->label.get());
__ cache_state()->Steal(c->else_state->state);
- __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ MergeFullStackWith(c->label_state);
__ cache_state()->Steal(c->label_state);
} else if (c->reachable()) {
// No merge yet at the end of the if, but we need to create a merge for
- // the both arms of this if. Thus init the merge point from the else
- // state, then merge the if state into that.
+ // the both arms of this if. Thus init the merge point from the current
+ // state, then merge the else state into that.
DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
- c->label_state.InitMerge(c->else_state->state, __ num_locals(),
- c->start_merge.arity,
+ c->label_state =
+ __ MergeIntoNewState(__ num_locals(), c->start_merge.arity,
c->stack_depth + c->num_exceptions);
- __ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
// Merge the else state into the end state. Set this state as the current
// state first so helper functions know which registers are in use.
__ bind(c->else_state->label.get());
__ cache_state()->Steal(c->else_state->state);
- __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ MergeFullStackWith(c->label_state);
__ cache_state()->Steal(c->label_state);
} else {
// No merge needed, just continue with the else state.
@@ -1521,7 +1545,8 @@ class LiftoffCompiler {
if (!c->end_merge.reached) {
if (c->try_info->catch_reached) {
// Drop the implicit exception ref.
- __ DropValue(__ num_locals() + c->stack_depth + c->num_exceptions);
+ __ DropExceptionValueAtOffset(__ num_locals() + c->stack_depth +
+ c->num_exceptions);
}
// Else we did not enter the catch state, continue with the current state.
} else {
@@ -1547,7 +1572,7 @@ class LiftoffCompiler {
// There is a merge already. Merge our state into that, then continue with
// that state.
if (c->reachable()) {
- __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ MergeFullStackWith(c->label_state);
}
__ cache_state()->Steal(c->label_state);
} else {
@@ -1694,6 +1719,26 @@ class LiftoffCompiler {
__ PushRegister(dst_kind, dst);
}
+ void EmitIsNull(WasmOpcode opcode, ValueType type) {
+ LiftoffRegList pinned;
+ LiftoffRegister ref = pinned.set(__ PopToRegister());
+ LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
+ LoadNullValueForCompare(null.gp(), pinned, type);
+ // Prefer to overwrite one of the input registers with the result
+ // of the comparison.
+ LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
+#if defined(V8_COMPRESS_POINTERS)
+ // As the value in the {null} register is only the tagged pointer part,
+ // we may only compare 32 bits, not the full pointer size.
+ __ emit_i32_set_cond(opcode == kExprRefIsNull ? kEqual : kNotEqual,
+ dst.gp(), ref.gp(), null.gp());
+#else
+ __ emit_ptrsize_set_cond(opcode == kExprRefIsNull ? kEqual : kNotEqual,
+ dst.gp(), ref, null);
+#endif
+ __ PushRegister(kI32, dst);
+ }
+
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
@@ -1822,32 +1867,34 @@ class LiftoffCompiler {
case kExprRefIsNull:
// We abuse ref.as_non_null, which isn't otherwise used in this switch, as
// a sentinel for the negation of ref.is_null.
- case kExprRefAsNonNull: {
+ case kExprRefAsNonNull:
+ return EmitIsNull(opcode, value.type);
+ case kExprExternInternalize: {
+ LiftoffAssembler::VarState input_state =
+ __ cache_state()->stack_state.back();
+ CallRuntimeStub(WasmCode::kWasmExternInternalize,
+ MakeSig::Returns(kRefNull).Params(kRefNull),
+ {input_state}, decoder->position());
+ __ DropValues(1);
+ __ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
+ return;
+ }
+ case kExprExternExternalize: {
LiftoffRegList pinned;
- LiftoffRegister ref = pinned.set(__ PopToRegister());
+ LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
- LoadNullValueForCompare(null.gp(), pinned);
- // Prefer to overwrite one of the input registers with the result
- // of the comparison.
- LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
-#if defined(V8_COMPRESS_POINTERS)
- // As the value in the {null} register is only the tagged pointer part,
- // we may only compare 32 bits, not the full pointer size.
- __ emit_i32_set_cond(opcode == kExprRefIsNull ? kEqual : kUnequal,
- dst.gp(), ref.gp(), null.gp());
-#else
- __ emit_ptrsize_set_cond(opcode == kExprRefIsNull ? kEqual : kUnequal,
- dst.gp(), ref, null);
-#endif
- __ PushRegister(kI32, dst);
+ LoadNullValueForCompare(null.gp(), pinned, kWasmAnyRef);
+ Label label;
+ {
+ FREEZE_STATE(frozen);
+ __ emit_cond_jump(kNotEqual, &label, kRefNull, ref.gp(), null.gp(),
+ frozen);
+ LoadNullValue(ref.gp(), pinned, kWasmExternRef);
+ __ bind(&label);
+ }
+ __ PushRegister(kRefNull, ref);
return;
}
- case kExprExternInternalize:
- // TODO(7748): Canonicalize heap numbers.
- return;
- case kExprExternExternalize:
- // This is a no-op.
- return;
default:
UNREACHABLE();
}
@@ -2031,37 +2078,37 @@ class LiftoffCompiler {
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kEqual));
case kExprI64Ne:
return EmitBinOp<kI64, kI32>(
- BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnequal));
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kNotEqual));
case kExprI64LtS:
return EmitBinOp<kI64, kI32>(
- BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessThan));
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kLessThan));
case kExprI64LtU:
return EmitBinOp<kI64, kI32>(
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThan));
case kExprI64GtS:
- return EmitBinOp<kI64, kI32>(BindFirst(
- &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterThan));
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kGreaterThan));
case kExprI64GtU:
return EmitBinOp<kI64, kI32>(BindFirst(
&LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThan));
case kExprI64LeS:
return EmitBinOp<kI64, kI32>(
- BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessEqual));
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kLessThanEqual));
case kExprI64LeU:
return EmitBinOp<kI64, kI32>(BindFirst(
- &LiftoffAssembler::emit_i64_set_cond, kUnsignedLessEqual));
+ &LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThanEqual));
case kExprI64GeS:
- return EmitBinOp<kI64, kI32>(BindFirst(
- &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterEqual));
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kGreaterThanEqual));
case kExprI64GeU:
return EmitBinOp<kI64, kI32>(BindFirst(
- &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterEqual));
+ &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThanEqual));
case kExprF32Eq:
return EmitBinOp<kF32, kI32>(
BindFirst(&LiftoffAssembler::emit_f32_set_cond, kEqual));
case kExprF32Ne:
return EmitBinOp<kF32, kI32>(
- BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnequal));
+ BindFirst(&LiftoffAssembler::emit_f32_set_cond, kNotEqual));
case kExprF32Lt:
return EmitBinOp<kF32, kI32>(
BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThan));
@@ -2070,16 +2117,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThan));
case kExprF32Le:
return EmitBinOp<kF32, kI32>(BindFirst(
- &LiftoffAssembler::emit_f32_set_cond, kUnsignedLessEqual));
+ &LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThanEqual));
case kExprF32Ge:
return EmitBinOp<kF32, kI32>(BindFirst(
- &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterEqual));
+ &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThanEqual));
case kExprF64Eq:
return EmitBinOp<kF64, kI32>(
BindFirst(&LiftoffAssembler::emit_f64_set_cond, kEqual));
case kExprF64Ne:
return EmitBinOp<kF64, kI32>(
- BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnequal));
+ BindFirst(&LiftoffAssembler::emit_f64_set_cond, kNotEqual));
case kExprF64Lt:
return EmitBinOp<kF64, kI32>(
BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThan));
@@ -2088,10 +2135,10 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThan));
case kExprF64Le:
return EmitBinOp<kF64, kI32>(BindFirst(
- &LiftoffAssembler::emit_f64_set_cond, kUnsignedLessEqual));
+ &LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThanEqual));
case kExprF64Ge:
return EmitBinOp<kF64, kI32>(BindFirst(
- &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterEqual));
+ &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThanEqual));
case kExprI32Shl:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shl,
&LiftoffAssembler::emit_i32_shli);
@@ -2274,7 +2321,7 @@ class LiftoffCompiler {
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
- LoadNullValue(null.gp(), {});
+ LoadNullValue(null.gp(), {}, type);
__ PushRegister(type.kind(), null);
}
@@ -2296,7 +2343,7 @@ class LiftoffCompiler {
void Drop(FullDecoder* decoder) { __ DropValues(1); }
- void TraceFunctionExit(FullDecoder* decoder) {
+ V8_NOINLINE V8_PRESERVE_MOST void TraceFunctionExit(FullDecoder* decoder) {
CODE_COMMENT("trace function exit");
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
@@ -2316,6 +2363,9 @@ class LiftoffCompiler {
}
DCHECK(return_slot.is_stack());
__ LoadSpillAddress(param_reg, return_slot.offset(), return_slot.kind());
+ } else {
+ // Make sure to pass a "valid" parameter (Smi::zero()).
+ LoadSmi(LiftoffRegister{param_reg}, 0);
}
source_position_table_builder_.AddPosition(
@@ -2344,7 +2394,7 @@ class LiftoffCompiler {
}
void ReturnImpl(FullDecoder* decoder, Register tmp1, Register tmp2) {
- if (v8_flags.trace_wasm) TraceFunctionExit(decoder);
+ if (V8_UNLIKELY(v8_flags.trace_wasm)) TraceFunctionExit(decoder);
if (dynamic_tiering()) {
TierupCheck(decoder, decoder->position(), __ pc_offset(), tmp1, tmp2);
}
@@ -2390,7 +2440,7 @@ class LiftoffCompiler {
state.dec_used(slot_reg);
dst_slot->MakeStack();
}
- DCHECK(CheckCompatibleStackSlotTypes(kind, __ local_kind(local_index)));
+ DCHECK(CompatibleStackSlotTypes(kind, __ local_kind(local_index)));
RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), kind);
@@ -2570,7 +2620,7 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(table_index_reg, WasmValue(imm.index));
- LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+ LiftoffAssembler::VarState table_index{kI32, table_index_reg, 0};
LiftoffAssembler::VarState index = __ cache_state()->stack_state.back();
@@ -2597,7 +2647,7 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(table_index_reg, WasmValue(imm.index));
- LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+ LiftoffAssembler::VarState table_index{kI32, table_index_reg, 0};
LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
@@ -2607,7 +2657,7 @@ class LiftoffCompiler {
auto stub =
is_funcref ? WasmCode::kWasmTableSetFuncRef : WasmCode::kWasmTableSet;
- CallRuntimeStub(stub, MakeSig::Params(kI32, kI32, type.kind()),
+ CallRuntimeStub(stub, MakeSig::Params(kI32, kI32, kRefNull),
{table_index, index, value}, decoder->position());
// Pop parameters from the value stack.
@@ -2636,14 +2686,14 @@ class LiftoffCompiler {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
- void AssertNullImpl(FullDecoder* decoder, const Value& arg, Value* result,
- LiftoffCondition cond) {
+ void AssertNullTypecheckImpl(FullDecoder* decoder, const Value& arg,
+ Value* result, Condition cond) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
- LoadNullValueForCompare(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned, arg.type);
{
FREEZE_STATE(trapping);
__ emit_cond_jump(cond, trap_label, kRefNull, obj.gp(), null.gp(),
@@ -2652,12 +2702,14 @@ class LiftoffCompiler {
__ PushRegister(kRefNull, obj);
}
- void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) {
- AssertNullImpl(decoder, arg, result, kUnequal);
+ void AssertNullTypecheck(FullDecoder* decoder, const Value& arg,
+ Value* result) {
+ AssertNullTypecheckImpl(decoder, arg, result, kNotEqual);
}
- void AssertNotNull(FullDecoder* decoder, const Value& arg, Value* result) {
- AssertNullImpl(decoder, arg, result, kEqual);
+ void AssertNotNullTypecheck(FullDecoder* decoder, const Value& arg,
+ Value* result) {
+ AssertNullTypecheckImpl(decoder, arg, result, kEqual);
}
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
@@ -2669,7 +2721,7 @@ class LiftoffCompiler {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
- DCHECK(CheckCompatibleStackSlotTypes(
+ DCHECK(CompatibleStackSlotTypes(
kind, __ cache_state()->stack_state.end()[-2].kind()));
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
@@ -2713,14 +2765,15 @@ class LiftoffCompiler {
// and found to not make a difference.
}
}
- if (!target->br_merge()->reached) {
- target->label_state.InitMerge(
- *__ cache_state(), __ num_locals(), target->br_merge()->arity,
- target->stack_depth + target->num_exceptions);
+ if (target->br_merge()->reached) {
+ __ MergeStackWith(target->label_state, target->br_merge()->arity,
+ target->is_loop() ? LiftoffAssembler::kBackwardJump
+ : LiftoffAssembler::kForwardJump);
+ } else {
+ target->label_state =
+ __ MergeIntoNewState(__ num_locals(), target->br_merge()->arity,
+ target->stack_depth + target->num_exceptions);
}
- __ MergeStackWith(target->label_state, target->br_merge()->arity,
- target->is_loop() ? LiftoffAssembler::kBackwardJump
- : LiftoffAssembler::kForwardJump);
__ jmp(target->label.get());
}
@@ -2767,7 +2820,7 @@ class LiftoffCompiler {
if (NeedsTierupCheck(decoder, depth)) AllocateTempRegisters(temps);
// Test the condition on the value stack, jump to {cont_false} if zero.
- std::unique_ptr<FreezeCacheState> frozen;
+ base::Optional<FreezeCacheState> frozen;
JumpIfFalse(decoder, &cont_false, frozen);
BrOrRetImpl(decoder, depth, temps.tmp1, temps.tmp2);
@@ -2778,14 +2831,16 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
- std::map<uint32_t, MovableLabel>* br_targets,
+ ZoneMap<uint32_t, MovableLabel>* br_targets,
Register tmp1, Register tmp2) {
- MovableLabel& label = (*br_targets)[br_depth];
- if (label.get()->is_bound()) {
- __ jmp(label.get());
- } else {
- __ bind(label.get());
+ auto [iterator, is_new_target] = br_targets->emplace(br_depth, zone_);
+ Label* label = iterator->second.get();
+ DCHECK_EQ(is_new_target, !label->is_bound());
+ if (is_new_target) {
+ __ bind(label);
BrOrRetImpl(decoder, br_depth, tmp1, tmp2);
+ } else {
+ __ jmp(label);
}
}
@@ -2794,7 +2849,7 @@ class LiftoffCompiler {
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
BranchTableIterator<ValidationTag>* table_iterator,
- std::map<uint32_t, MovableLabel>* br_targets,
+ ZoneMap<uint32_t, MovableLabel>* br_targets,
Register tmp1, Register tmp2,
const FreezeCacheState& frozen) {
DCHECK_LT(min, max);
@@ -2808,7 +2863,7 @@ class LiftoffCompiler {
uint32_t split = min + (max - min) / 2;
Label upper_half;
__ LoadConstant(tmp, WasmValue(split));
- __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(),
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, &upper_half, kI32, value.gp(),
tmp.gp(), frozen);
// Emit br table for lower half:
GenerateBrTable(decoder, tmp, value, min, split, table_iterator, br_targets,
@@ -2850,23 +2905,23 @@ class LiftoffCompiler {
{
// All targets must have the same arity (checked by validation), so
// we can just sample any of them to find that arity.
- uint32_t ignored_length;
- uint32_t sample_depth = decoder->read_u32v<Decoder::NoValidationTag>(
- imm.table, &ignored_length, "first depth");
+ auto [sample_depth, unused_length] =
+ decoder->read_u32v<Decoder::NoValidationTag>(imm.table,
+ "first depth");
__ PrepareForBranch(decoder->control_at(sample_depth)->br_merge()->arity,
pinned);
}
- BranchTableIterator<ValidationTag> table_iterator(decoder, imm);
- std::map<uint32_t, MovableLabel> br_targets;
+ BranchTableIterator<ValidationTag> table_iterator{decoder, imm};
+ ZoneMap<uint32_t, MovableLabel> br_targets{zone_};
if (imm.table_count > 0) {
LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
__ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
FREEZE_STATE(frozen);
Label case_default;
- __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(),
- tmp.gp(), frozen);
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, &case_default, kI32,
+ value.gp(), tmp.gp(), frozen);
GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
&br_targets, tmp1, tmp2, frozen);
@@ -2883,12 +2938,13 @@ class LiftoffCompiler {
void Else(FullDecoder* decoder, Control* c) {
if (c->reachable()) {
- if (!c->end_merge.reached) {
- c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- c->end_merge.arity,
+ if (c->end_merge.reached) {
+ __ MergeFullStackWith(c->label_state);
+ } else {
+ c->label_state =
+ __ MergeIntoNewState(__ num_locals(), c->end_merge.arity,
c->stack_depth + c->num_exceptions);
}
- __ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
}
__ bind(c->else_state->label.get());
@@ -2899,8 +2955,7 @@ class LiftoffCompiler {
DCHECK(for_debugging_);
// If we are generating debugging code, we really need to spill all
// registers to make them inspectable when stopping at the trap.
- auto* spilled = compilation_zone_->New<SpilledRegistersForInspection>(
- compilation_zone_);
+ auto* spilled = zone_->New<SpilledRegistersForInspection>(zone_);
for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) {
auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue;
@@ -2922,14 +2977,13 @@ class LiftoffCompiler {
// define a safepoint for traps that would preserve references on the
// stack. However, if this is debug code, then we have to preserve the
// references so that they can be inspected.
- safepoint_info =
- compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
+ safepoint_info = zone_->New<OutOfLineSafepointInfo>(zone_);
__ cache_state()->GetTaggedSlotsForOOLCode(
&safepoint_info->slots, &safepoint_info->spills,
LiftoffAssembler::CacheState::SpillLocation::kStackSlots);
}
out_of_line_code_.push_back(OutOfLineCode::Trap(
- stub, decoder->position(),
+ zone_, stub, decoder->position(),
V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
: nullptr,
safepoint_info, pc, RegisterOOLDebugSideTableEntry(decoder)));
@@ -2944,9 +2998,9 @@ class LiftoffCompiler {
Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint64_t offset, LiftoffRegister index,
LiftoffRegList pinned, ForceCheck force_check) {
- const bool statically_oob =
- !base::IsInBounds<uintptr_t>(offset, access_size,
- env_->max_memory_size);
+ // This is ensured by the decoder.
+ DCHECK(base::IsInBounds<uintptr_t>(offset, access_size,
+ env_->module->max_memory_size));
// After bounds checking, we know that the index must be ptrsize, hence only
// look at the lower word on 32-bit systems (the high word is bounds-checked
@@ -2962,8 +3016,7 @@ class LiftoffCompiler {
// Early return for trap handler.
DCHECK_IMPLIES(env_->module->is_memory64,
env_->bounds_checks == kExplicitBoundsChecks);
- if (!force_check && !statically_oob &&
- env_->bounds_checks == kTrapHandler) {
+ if (!force_check && env_->bounds_checks == kTrapHandler) {
// With trap handlers we should not have a register pair as input (we
// would only return the lower half).
DCHECK(index.is_gp());
@@ -2977,21 +3030,15 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, 0);
- if (V8_UNLIKELY(statically_oob)) {
- __ emit_jump(trap_label);
- decoder->SetSucceedingCodeDynamicallyUnreachable();
- return no_reg;
- }
-
// Convert the index to ptrsize, bounds-checking the high word on 32-bit
// systems for memory64.
if (!env_->module->is_memory64) {
__ emit_u32_to_uintptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
- DCHECK_GE(kMaxUInt32, env_->max_memory_size);
+ DCHECK_GE(kMaxUInt32, env_->module->max_memory_size);
FREEZE_STATE(trapping);
- __ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp(),
- no_reg, trapping);
+ __ emit_cond_jump(kNotZero, trap_label, kI32, index.high_gp(), no_reg,
+ trapping);
}
uintptr_t end_offset = offset + access_size - 1u;
@@ -3008,8 +3055,8 @@ class LiftoffCompiler {
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
// compile time. Otherwise, only one check is required (see below).
- if (end_offset > env_->min_memory_size) {
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind,
+ if (end_offset > env_->module->min_memory_size) {
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, trap_label, kIntPtrKind,
end_offset_reg.gp(), mem_size.gp(), trapping);
}
@@ -3019,7 +3066,7 @@ class LiftoffCompiler {
__ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind,
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, trap_label, kIntPtrKind,
index_ptrsize, effective_size_reg.gp(), trapping);
return index_ptrsize;
}
@@ -3042,12 +3089,12 @@ class LiftoffCompiler {
// {emit_cond_jump} to use the "test" instruction without the "and" here.
// Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask);
- __ emit_cond_jump(kUnequal, trap_label, kI32, address, no_reg, trapping);
+ __ emit_cond_jump(kNotEqual, trap_label, kI32, address, no_reg, trapping);
} else {
// For alignment checks we only look at the lower 32-bits in {offset}.
__ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
__ emit_i32_andi(address, address, align_mask);
- __ emit_cond_jump(kUnequal, trap_label, kI32, address, no_reg, trapping);
+ __ emit_cond_jump(kNotEqual, trap_label, kI32, address, no_reg, trapping);
}
}
@@ -3062,11 +3109,23 @@ class LiftoffCompiler {
// Get one register for computing the effective offset (offset + index).
LiftoffRegister effective_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- DCHECK_GE(kMaxUInt32, offset);
- __ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
- if (index != no_reg) {
- // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
- __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ bool is_memory64 = env_->module->is_memory64;
+ if (is_memory64 && !kNeedI64RegPair) {
+ __ LoadConstant(effective_offset,
+ WasmValue(static_cast<uint64_t>(offset)));
+ if (index != no_reg) {
+ __ emit_i64_add(effective_offset, effective_offset,
+ LiftoffRegister(index));
+ }
+ } else {
+ // The offset is actually a 32-bits number when 'kNeedI64RegPair'
+ // is true, so we just do 32-bits operations on it under memory64.
+ DCHECK_GE(kMaxUInt32, offset);
+ __ LoadConstant(effective_offset,
+ WasmValue(static_cast<uint32_t>(offset)));
+ if (index != no_reg) {
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ }
}
// Get a register to hold the stack slot for MemoryTracingInfo.
@@ -3079,7 +3138,7 @@ class LiftoffCompiler {
LiftoffRegister data = effective_offset;
// Now store all information into the MemoryTracingInfo struct.
- if (kSystemPointerSize == 8) {
+ if (kSystemPointerSize == 8 && !is_memory64) {
// Zero-extend the effective offset to u64.
CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,
nullptr));
@@ -3100,7 +3159,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), kPointerKind);
+ __ Move(param_reg, info.gp(), kIntPtrKind);
}
source_position_table_builder_.AddPosition(__ pc_offset(),
@@ -3121,7 +3180,7 @@ class LiftoffCompiler {
if (effective_offset < index // overflow
|| !base::IsInBounds<uintptr_t>(effective_offset, access_size,
- env_->min_memory_size)) {
+ env_->module->min_memory_size)) {
return false;
}
@@ -3129,17 +3188,24 @@ class LiftoffCompiler {
return true;
}
- Register GetMemoryStart(LiftoffRegList pinned) {
+ V8_INLINE Register GetMemoryStart(LiftoffRegList pinned) {
Register memory_start = __ cache_state()->cached_mem_start;
- if (memory_start == no_reg) {
- memory_start = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(memory_start, MemoryStart, kSystemPointerSize,
- pinned);
+ if (V8_UNLIKELY(memory_start == no_reg)) {
+ memory_start = GetMemoryStart_Slow(pinned);
+ }
+ return memory_start;
+ }
+
+ V8_NOINLINE V8_PRESERVE_MOST Register
+ GetMemoryStart_Slow(LiftoffRegList pinned) {
+ DCHECK_EQ(no_reg, __ cache_state()->cached_mem_start);
+ SCOPED_CODE_COMMENT("load memory start");
+ Register memory_start = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LOAD_INSTANCE_FIELD(memory_start, MemoryStart, kSystemPointerSize, pinned);
#ifdef V8_ENABLE_SANDBOX
- __ DecodeSandboxedPointer(memory_start);
+ __ DecodeSandboxedPointer(memory_start);
#endif
- __ cache_state()->SetMemStartCacheRegister(memory_start);
- }
+ __ cache_state()->SetMemStartCacheRegister(memory_start);
return memory_start;
}
@@ -3162,7 +3228,7 @@ class LiftoffCompiler {
bool i64_offset = index_slot.kind() == kI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
- CODE_COMMENT("load from memory (constant offset)");
+ SCOPED_CODE_COMMENT("load from memory (constant offset)");
LiftoffRegList pinned;
Register mem = pinned.set(GetMemoryStart(pinned));
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
@@ -3172,9 +3238,8 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister();
index = BoundsCheckMem(decoder, type.size(), offset, full_index, {},
kDontForceCheck);
- if (index == no_reg) return;
- CODE_COMMENT("load from memory");
+ SCOPED_CODE_COMMENT("load from memory");
LiftoffRegList pinned{index};
// Load the memory start address only now to reduce register pressure
@@ -3216,7 +3281,6 @@ class LiftoffCompiler {
transform == LoadTransformationKind::kExtend ? 8 : type.size();
Register index = BoundsCheckMem(decoder, access_size, imm.offset,
full_index, {}, kDontForceCheck);
- if (index == no_reg) return;
uintptr_t offset = imm.offset;
LiftoffRegList pinned{index};
@@ -3255,7 +3319,9 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
- if (index == no_reg) return;
+
+ DCHECK(_index.type.kind() == kI32 || _index.type.kind() == kI64);
+ bool i64_offset = _index.type.kind() == kI64;
uintptr_t offset = imm.offset;
pinned.set(index);
@@ -3265,7 +3331,7 @@ class LiftoffCompiler {
uint32_t protected_load_pc = 0;
__ LoadLane(result, value, addr, index, offset, type, laneidx,
- &protected_load_pc);
+ &protected_load_pc, i64_offset);
if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
@@ -3298,7 +3364,7 @@ class LiftoffCompiler {
bool i64_offset = index_slot.kind() == kI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
- CODE_COMMENT("store to memory (constant offset)");
+ SCOPED_CODE_COMMENT("store to memory (constant offset)");
Register mem = pinned.set(GetMemoryStart(pinned));
__ Store(mem, no_reg, offset, value, type, pinned, nullptr, true,
i64_offset);
@@ -3306,10 +3372,9 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
pinned, kDontForceCheck);
- if (index == no_reg) return;
pinned.set(index);
- CODE_COMMENT("store to memory");
+ SCOPED_CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -3339,14 +3404,17 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
- if (index == no_reg) return;
+
+ DCHECK(_index.type.kind() == kI32 || _index.type.kind() == kI64);
+ bool i64_offset = _index.type.kind() == kI64;
uintptr_t offset = imm.offset;
pinned.set(index);
CODE_COMMENT("store lane to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
- __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
+ __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc,
+ i64_offset);
if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
@@ -3388,14 +3456,14 @@ class LiftoffCompiler {
__ LoadConstant(result, WasmValue(int32_t{-1}));
if (kNeedI64RegPair) {
FREEZE_STATE(all_spilled_anyway);
- __ emit_cond_jump(kUnequal, &done, kI32, input.high_gp(), no_reg,
+ __ emit_cond_jump(kNotEqual, &done, kI32, input.high_gp(), no_reg,
all_spilled_anyway);
input = input.low();
} else {
LiftoffRegister high_word = __ GetUnusedRegister(kGpReg, pinned);
__ emit_i64_shri(high_word, input, 32);
FREEZE_STATE(all_spilled_anyway);
- __ emit_cond_jump(kUnequal, &done, kI32, high_word.gp(), no_reg,
+ __ emit_cond_jump(kNotEqual, &done, kI32, high_word.gp(), no_reg,
all_spilled_anyway);
}
}
@@ -3463,7 +3531,7 @@ class LiftoffCompiler {
? decoder->local_type(index)
: exception ? ValueType::Ref(HeapType::kAny)
: decoder->stack_value(decoder_stack_index--)->type;
- DCHECK(CheckCompatibleStackSlotTypes(slot.kind(), type.kind()));
+ DCHECK(CompatibleStackSlotTypes(slot.kind(), type.kind()));
value.type = type;
switch (slot.loc()) {
case kIntConst:
@@ -3567,11 +3635,11 @@ class LiftoffCompiler {
Register tmp = NeedsTierupCheck(decoder, depth)
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: no_reg;
- LoadNullValueForCompare(null, pinned);
+ LoadNullValueForCompare(null, pinned, ref_object.type);
{
FREEZE_STATE(frozen);
- __ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
- null, frozen);
+ __ emit_cond_jump(kNotEqual, &cont_false, ref_object.type.kind(),
+ ref.gp(), null, frozen);
BrOrRetImpl(decoder, depth, null, tmp);
}
__ bind(&cont_false);
@@ -3597,7 +3665,7 @@ class LiftoffCompiler {
Register tmp = NeedsTierupCheck(decoder, depth)
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: no_reg;
- LoadNullValueForCompare(null, pinned);
+ LoadNullValueForCompare(null, pinned, ref_object.type);
{
FREEZE_STATE(frozen);
__ emit_cond_jump(kEqual, &cont_false, ref_object.type.kind(), ref.gp(),
@@ -4625,7 +4693,7 @@ class LiftoffCompiler {
}
void GetExceptionValues(FullDecoder* decoder,
- LiftoffAssembler::VarState& exception_var,
+ const LiftoffAssembler::VarState& exception_var,
const WasmTag* tag) {
LiftoffRegList pinned;
CODE_COMMENT("get exception values");
@@ -4642,7 +4710,7 @@ class LiftoffCompiler {
void EmitLandingPad(FullDecoder* decoder, int handler_offset) {
if (decoder->current_catch() == -1) return;
- MovableLabel handler;
+ MovableLabel handler{zone_};
// If we return from the throwing code normally, just skip over the handler.
Label skip_handler;
@@ -4657,14 +4725,15 @@ class LiftoffCompiler {
Control* current_try =
decoder->control_at(decoder->control_depth_of_current_catch());
DCHECK_NOT_NULL(current_try->try_info);
- if (!current_try->try_info->catch_reached) {
- current_try->try_info->catch_state.InitMerge(
- *__ cache_state(), __ num_locals(), 1,
+ if (current_try->try_info->catch_reached) {
+ __ MergeStackWith(current_try->try_info->catch_state, 1,
+ LiftoffAssembler::kForwardJump);
+ } else {
+ current_try->try_info->catch_state = __ MergeIntoNewState(
+ __ num_locals(), 1,
current_try->stack_depth + current_try->num_exceptions);
current_try->try_info->catch_reached = true;
}
- __ MergeStackWith(current_try->try_info->catch_state, 1,
- LiftoffAssembler::kForwardJump);
__ emit_jump(&current_try->try_info->catch_label);
__ bind(&skip_handler);
@@ -4680,13 +4749,13 @@ class LiftoffCompiler {
int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.tag);
LiftoffRegister encoded_size_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(encoded_size_reg, WasmValue(encoded_size));
+ __ LoadConstant(encoded_size_reg, WasmValue::ForUintPtr(encoded_size));
// Call the WasmAllocateFixedArray builtin to create the values array.
CallRuntimeStub(WasmCode::kWasmAllocateFixedArray,
- MakeSig::Returns(kPointerKind).Params(kPointerKind),
+ MakeSig::Returns(kIntPtrKind).Params(kIntPtrKind),
{LiftoffAssembler::VarState{
- kSmiKind, LiftoffRegister{encoded_size_reg}, 0}},
+ kIntPtrKind, LiftoffRegister{encoded_size_reg}, 0}},
decoder->position());
MaybeOSR();
@@ -4718,9 +4787,9 @@ class LiftoffCompiler {
// Finally, call WasmThrow.
CallRuntimeStub(WasmCode::kWasmThrow,
- MakeSig::Params(kPointerKind, kPointerKind),
- {LiftoffAssembler::VarState{kPointerKind, exception_tag, 0},
- LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
+ MakeSig::Params(kIntPtrKind, kIntPtrKind),
+ {LiftoffAssembler::VarState{kIntPtrKind, exception_tag, 0},
+ LiftoffAssembler::VarState{kIntPtrKind, values_array, 0}},
decoder->position());
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
@@ -4734,10 +4803,10 @@ class LiftoffCompiler {
const MemoryAccessImmediate& imm) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
+ bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
- if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@@ -4746,7 +4815,7 @@ class LiftoffCompiler {
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
if (V8_UNLIKELY(v8_flags.trace_wasm_memory)) outer_pinned.set(index);
- __ AtomicStore(addr, index, offset, value, type, outer_pinned);
+ __ AtomicStore(addr, index, offset, value, type, outer_pinned, i64_offset);
if (V8_UNLIKELY(v8_flags.trace_wasm_memory)) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
@@ -4756,10 +4825,10 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate& imm) {
ValueKind kind = type.value_type().kind();
+ bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
- if (index == no_reg) return;
LiftoffRegList pinned{index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@@ -4768,7 +4837,7 @@ class LiftoffCompiler {
Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
- __ AtomicLoad(value, addr, index, offset, type, pinned);
+ __ AtomicLoad(value, addr, index, offset, type, pinned, i64_offset);
__ PushRegister(kind, value);
if (V8_UNLIKELY(v8_flags.trace_wasm_memory)) {
@@ -4781,8 +4850,8 @@ class LiftoffCompiler {
const MemoryAccessImmediate& imm,
void (LiftoffAssembler::*emit_fn)(Register, Register,
uintptr_t, LiftoffRegister,
- LiftoffRegister,
- StoreType)) {
+ LiftoffRegister, StoreType,
+ bool)) {
ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
@@ -4802,10 +4871,10 @@ class LiftoffCompiler {
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
#endif
+ bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
- if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@@ -4814,7 +4883,7 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
Register addr = pinned.set(GetMemoryStart(pinned));
- (asm_.*emit_fn)(addr, index, offset, value, result, type);
+ (asm_.*emit_fn)(addr, index, offset, value, result, type, i64_offset);
__ PushRegister(result_kind, result);
}
@@ -4829,7 +4898,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
- if (index == no_reg) return;
LiftoffRegList pinned{index};
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@@ -4845,6 +4913,7 @@ class LiftoffCompiler {
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
// Pop the index from the stack.
+ bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
__ DropValues(1);
LiftoffRegister result = expected;
@@ -4853,7 +4922,7 @@ class LiftoffCompiler {
// We already added the index to addr, so we can just pass no_reg to the
// assembler now.
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
- type);
+ type, i64_offset);
__ PushRegister(type.value_type().kind(), result);
return;
#else
@@ -4861,10 +4930,10 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
+ bool i64_offset = __ cache_state()->stack_state.back().kind() == kI64;
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDoForceCheck);
- if (index == no_reg) return;
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
@@ -4874,7 +4943,7 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
- type);
+ type, i64_offset);
__ PushRegister(result_kind, result);
#endif
}
@@ -4887,7 +4956,7 @@ class LiftoffCompiler {
auto interface_descriptor = Builtins::CallInterfaceDescriptorFor(
RuntimeStubIdToBuiltinName(stub_id));
auto* call_descriptor = compiler::Linkage::GetStubCallDescriptor(
- compilation_zone_, // zone
+ zone_, // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
compiler::CallDescriptor::kNoFlags, // flags
@@ -4911,7 +4980,6 @@ class LiftoffCompiler {
Register index_reg =
BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
pinned, kDoForceCheck);
- if (index_reg == no_reg) return;
pinned.set(index_reg);
AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
pinned);
@@ -4927,7 +4995,7 @@ class LiftoffCompiler {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
- LiftoffAssembler::VarState index =
+ LiftoffAssembler::VarState& index =
__ cache_state()->stack_state.end()[-3];
// We replace the index on the value stack with the `index_plus_offset`
@@ -4937,7 +5005,8 @@ class LiftoffCompiler {
__ cache_state()->dec_used(full_index);
__ cache_state()->inc_used(LiftoffRegister(index_plus_offset));
}
- index.MakeRegister(LiftoffRegister(index_plus_offset));
+ index = LiftoffAssembler::VarState{
+ kIntPtrKind, LiftoffRegister{index_plus_offset}, index.offset()};
}
{
// Convert the top value of the stack (the timeout) from I64 to a BigInt,
@@ -4954,9 +5023,9 @@ class LiftoffCompiler {
__ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
- Register expected_reg = no_reg;
+ Register expected = no_reg;
if (kind == kI32) {
- expected_reg = __ PeekToRegister(1, {}).gp();
+ expected = __ PeekToRegister(1, {}).gp();
} else {
LiftoffAssembler::VarState i64_expected =
__ cache_state()->stack_state.end()[-2];
@@ -4964,21 +5033,21 @@ class LiftoffCompiler {
kNeedI64RegPair ? WasmCode::kI32PairToBigInt : WasmCode::kI64ToBigInt,
MakeSig::Returns(kRef).Params(kI64), {i64_expected},
decoder->position());
- expected_reg = kReturnRegister0;
+ expected = kReturnRegister0;
}
- LiftoffRegister expected(expected_reg);
+ ValueKind expected_kind = kind == kI32 ? kI32 : kRef;
LiftoffAssembler::VarState timeout =
__ cache_state()->stack_state.end()[-1];
- LiftoffAssembler::VarState expected_value(kRef, expected, 0);
+ LiftoffAssembler::VarState expected_value{expected_kind,
+ LiftoffRegister{expected}, 0};
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
auto target = kind == kI32 ? WasmCode::kWasmI32AtomicWait
: WasmCode::kWasmI64AtomicWait;
- CallRuntimeStub(
- target, MakeSig::Params(kPointerKind, kind == kI32 ? kI32 : kRef, kRef),
- {index, expected_value, timeout}, decoder->position());
+ CallRuntimeStub(target, MakeSig::Params(kIntPtrKind, expected_kind, kRef),
+ {index, expected_value, timeout}, decoder->position());
// Pop parameters from the value stack.
__ DropValues(3);
@@ -4991,7 +5060,6 @@ class LiftoffCompiler {
LiftoffRegister full_index = __ PeekToRegister(1, {});
Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
full_index, {}, kDoForceCheck);
- if (index_reg == no_reg) return;
LiftoffRegList pinned{index_reg};
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
@@ -5007,12 +5075,12 @@ class LiftoffCompiler {
}
LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
- LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
- index.MakeRegister(LiftoffRegister(index_plus_offset));
+ LiftoffAssembler::VarState index_plus_offset_input{
+ kIntPtrKind, LiftoffRegister{index_plus_offset}, 0};
CallRuntimeStub(WasmCode::kWasmAtomicNotify,
- MakeSig::Returns(kI32).Params(kPointerKind, kI32),
- {index, count}, decoder->position());
+ MakeSig::Returns(kI32).Params(kIntPtrKind, kI32),
+ {index_plus_offset_input, count}, decoder->position());
// Pop parameters from the value stack.
__ DropValues(2);
@@ -5174,7 +5242,7 @@ class LiftoffCompiler {
// For memory64 on 32-bit systems, combine all high words for a zero-check
// and only use the low words afterwards. This keeps the register pressure
// managable.
- DCHECK_GE(kMaxUInt32, env_->max_memory_size);
+ DCHECK_GE(kMaxUInt32, env_->module->max_memory_size);
pinned->set(reg.low());
if (*high_word == no_reg) {
// Choose a register to hold the (combination of) high word(s). It cannot
@@ -5218,7 +5286,7 @@ class LiftoffCompiler {
if (mem_offsets_high_word != no_reg) {
// If any high word has bits set, jump to the OOB trap.
FREEZE_STATE(trapping);
- __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word,
+ __ emit_cond_jump(kNotZero, trap_label, kI32, mem_offsets_high_word,
no_reg, trapping);
pinned.clear(mem_offsets_high_word);
}
@@ -5227,7 +5295,7 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(segment_index, WasmValue(imm.data_segment.index));
- auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32,
+ auto sig = MakeSig::Returns(kI32).Params(kIntPtrKind, kIntPtrKind, kI32,
kI32, kI32);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
@@ -5286,12 +5354,12 @@ class LiftoffCompiler {
if (mem_offsets_high_word != no_reg) {
// If any high word has bits set, jump to the OOB trap.
FREEZE_STATE(trapping);
- __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word,
+ __ emit_cond_jump(kNotZero, trap_label, kI32, mem_offsets_high_word,
no_reg, trapping);
}
- auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind,
- kPointerKind, kPointerKind);
+ auto sig = MakeSig::Returns(kI32).Params(kIntPtrKind, kIntPtrKind,
+ kIntPtrKind, kIntPtrKind);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
@@ -5325,12 +5393,12 @@ class LiftoffCompiler {
if (mem_offsets_high_word != no_reg) {
// If any high word has bits set, jump to the OOB trap.
FREEZE_STATE(trapping);
- __ emit_cond_jump(kNotEqualZero, trap_label, kI32, mem_offsets_high_word,
+ __ emit_cond_jump(kNotZero, trap_label, kI32, mem_offsets_high_word,
no_reg, trapping);
}
- auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32,
- kPointerKind);
+ auto sig = MakeSig::Returns(kI32).Params(kIntPtrKind, kIntPtrKind, kI32,
+ kIntPtrKind);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
@@ -5354,13 +5422,12 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_index_reg, imm.table.index);
- LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+ LiftoffAssembler::VarState table_index{kSmiKind, table_index_reg, 0};
LiftoffRegister segment_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(segment_index_reg, imm.element_segment.index);
- LiftoffAssembler::VarState segment_index(kPointerKind, segment_index_reg,
- 0);
+ LiftoffAssembler::VarState segment_index{kSmiKind, segment_index_reg, 0};
LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
@@ -5379,24 +5446,26 @@ class LiftoffCompiler {
void ElemDrop(FullDecoder* decoder, const IndexImmediate& imm) {
LiftoffRegList pinned;
- Register dropped_elem_segments =
+ Register element_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments,
- pinned);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(element_segments, ElementSegments, pinned);
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
seg_index,
- WasmValue(wasm::ObjectAccess::ElementOffsetInTaggedFixedUInt8Array(
- imm.index)));
+ WasmValue(
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index)));
- // Mark the segment as dropped by setting its value in the dropped
- // segments list to 1.
- LiftoffRegister one_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(one_reg, WasmValue(1));
- __ Store(dropped_elem_segments, seg_index.gp(), 0, one_reg,
- StoreType::kI32Store8, pinned);
+ // Mark the segment as dropped by setting it to the empty fixed array.
+ LiftoffRegister empty_fixed_array =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadFullPointer(
+ empty_fixed_array.gp(), kRootRegister,
+ IsolateData::root_slot_offset(RootIndex::kEmptyFixedArray));
+
+ __ StoreTaggedPointer(element_segments, seg_index.gp(), 0,
+ empty_fixed_array, pinned);
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate& imm,
@@ -5406,14 +5475,14 @@ class LiftoffCompiler {
LiftoffRegister table_dst_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_dst_index_reg, imm.table_dst.index);
- LiftoffAssembler::VarState table_dst_index(kPointerKind,
- table_dst_index_reg, 0);
+ LiftoffAssembler::VarState table_dst_index{kSmiKind, table_dst_index_reg,
+ 0};
LiftoffRegister table_src_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_src_index_reg, imm.table_src.index);
- LiftoffAssembler::VarState table_src_index(kPointerKind,
- table_src_index_reg, 0);
+ LiftoffAssembler::VarState table_src_index{kSmiKind, table_src_index_reg,
+ 0};
LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
@@ -5437,15 +5506,14 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_index_reg, imm.index);
- LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+ LiftoffAssembler::VarState table_index(kSmiKind, table_index_reg, 0);
LiftoffAssembler::VarState delta = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
- CallRuntimeStub(
- WasmCode::kWasmTableGrow,
- MakeSig::Returns(kSmiKind).Params(kSmiKind, kI32, kTaggedKind),
- {table_index, delta, value}, decoder->position());
+ CallRuntimeStub(WasmCode::kWasmTableGrow,
+ MakeSig::Returns(kSmiKind).Params(kSmiKind, kI32, kRefNull),
+ {table_index, delta, value}, decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(2);
@@ -5487,14 +5555,14 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_index_reg, imm.index);
- LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
+ LiftoffAssembler::VarState table_index(kSmiKind, table_index_reg, 0);
LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState start = __ cache_state()->stack_state.end()[-3];
CallRuntimeStub(WasmCode::kWasmTableFill,
- MakeSig::Params(kSmiKind, kI32, kI32, kTaggedKind),
+ MakeSig::Params(kSmiKind, kI32, kI32, kRefNull),
{table_index, start, count, value}, decoder->position());
// Pop parameters from the value stack.
@@ -5527,16 +5595,22 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
- ValueKind field_kind = imm.struct_type->field(i).kind();
+ ValueType field_type = imm.struct_type->field(i);
LiftoffRegister value = pinned.set(
initial_values_on_stack
? __ PopToRegister(pinned)
- : __ GetUnusedRegister(reg_class_for(field_kind), pinned));
+ : __ GetUnusedRegister(reg_class_for(field_type.kind()), pinned));
if (!initial_values_on_stack) {
- if (!CheckSupportedType(decoder, field_kind, "default value")) return;
- SetDefaultValue(value, field_kind, pinned);
+ if (!CheckSupportedType(decoder, field_type.kind(), "default value")) {
+ return;
+ }
+ SetDefaultValue(value, field_type, pinned);
}
- StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
+ // Skipping the write barrier is safe as long as:
+ // (1) {obj} is freshly allocated, and
+ // (2) {obj} is in new-space (not pretenured).
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned,
+ field_type.kind(), LiftoffAssembler::kSkipWriteBarrier);
pinned.clear(value);
}
// If this assert fails then initialization of padding field might be
@@ -5597,7 +5671,8 @@ class LiftoffCompiler {
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
WasmArray::MaxLength(imm.array_type), trapping);
}
- ValueKind elem_kind = imm.array_type->element_type().kind();
+ ValueType elem_type = imm.array_type->element_type();
+ ValueKind elem_kind = elem_type.kind();
int elem_size = value_kind_size(elem_kind);
// Allocate the array.
{
@@ -5625,39 +5700,18 @@ class LiftoffCompiler {
__ PopToFixedRegister(value);
} else {
if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
- SetDefaultValue(value, elem_kind, pinned);
+ SetDefaultValue(value, elem_type, pinned);
}
- // Initialize the array's elements.
- LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(
- offset,
- WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
- LiftoffRegister end_offset = length;
- if (value_kind_size_log2(elem_kind) != 0) {
- __ emit_i32_shli(end_offset.gp(), length.gp(),
- value_kind_size_log2(elem_kind));
- }
- __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
- Label loop, done;
- __ bind(&loop);
- {
- // This is subtle: {StoreObjectField} can request a temp register, which
- // is precisely what {FREEZE_STATE} (with non-trivial live range) is
- // supposed to guard against. In this case it's fine though, because we've
- // just done a call, so there are plenty of recently-spilled unused
- // registers, so requesting a temp register won't actually cause any state
- // changes.
- // TODO(jkummerow): See if we can make this more elegant, e.g. by passing
- // a temp register to {StoreObjectField}.
- FREEZE_STATE(in_this_case_its_fine);
- __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
- end_offset.gp(), in_this_case_its_fine);
- }
- StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
- __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
- __ emit_jump(&loop);
- __ bind(&done);
+ LiftoffRegister index = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(index, WasmValue(int32_t{0}));
+
+ // Initialize the array's elements.
+ // Skipping the write barrier is safe as long as:
+ // (1) {obj} is freshly allocated, and
+ // (2) {obj} is in new-space (not pretenured).
+ ArrayFillImpl(pinned, obj, index, value, length, elem_kind,
+ LiftoffAssembler::kSkipWriteBarrier);
__ PushRegister(kRef, obj);
}
@@ -5673,6 +5727,48 @@ class LiftoffCompiler {
ArrayNew(decoder, imm, rtt.type.kind(), false);
}
+ void ArrayFill(FullDecoder* decoder, ArrayIndexImmediate& imm,
+ const Value& array, const Value& /* index */,
+ const Value& /* value */, const Value& /* length */) {
+ {
+ // Null check.
+ LiftoffRegList pinned;
+ LiftoffRegister array_reg = pinned.set(__ PeekToRegister(3, pinned));
+ MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type);
+
+ // Bounds checks.
+ Label* trap_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayOutOfBounds);
+ LiftoffRegister array_length =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LoadObjectField(array_length, array_reg.gp(), no_reg,
+ ObjectAccess::ToTagged(WasmArray::kLengthOffset), kI32,
+ false, pinned);
+ LiftoffRegister index = pinned.set(__ PeekToRegister(2, pinned));
+ LiftoffRegister length = pinned.set(__ PeekToRegister(0, pinned));
+ LiftoffRegister index_plus_length =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ DCHECK(index_plus_length != array_length);
+ __ emit_i32_add(index_plus_length.gp(), length.gp(), index.gp());
+ FREEZE_STATE(frozen);
+ __ emit_cond_jump(kUnsignedGreaterThan, trap_label, kI32,
+ index_plus_length.gp(), array_length.gp(), frozen);
+ // Guard against overflow.
+ __ emit_cond_jump(kUnsignedGreaterThan, trap_label, kI32, index.gp(),
+ index_plus_length.gp(), frozen);
+ }
+
+ LiftoffRegList pinned;
+ LiftoffRegister length = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister index = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
+
+ ArrayFillImpl(pinned, obj, index, value, length,
+ imm.array_type->element_type().kind(),
+ LiftoffAssembler::kNoSkipWriteBarrier);
+ }
+
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate& imm, const Value& index_val,
bool is_signed, Value* result) {
@@ -5783,13 +5879,14 @@ class LiftoffCompiler {
for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
LiftoffRegList pinned{array};
LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
- LiftoffRegister offset_reg =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(offset_reg,
- WasmValue(i << value_kind_size_log2(elem_kind)));
- StoreObjectField(array.gp(), offset_reg.gp(),
- wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
- element, pinned, elem_kind);
+ int offset =
+ WasmArray::kHeaderSize + (i << value_kind_size_log2(elem_kind));
+ // Skipping the write barrier is safe as long as:
+ // (1) {array} is freshly allocated, and
+ // (2) {array} is in new-space (not pretenured).
+ StoreObjectField(array.gp(), no_reg, wasm::ObjectAccess::ToTagged(offset),
+ element, pinned, elem_kind,
+ LiftoffAssembler::kSkipWriteBarrier);
}
// Push the array onto the stack.
@@ -5826,18 +5923,19 @@ class LiftoffCompiler {
__ PushRegister(kRef, result);
}
- // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
- constexpr static int kI31To32BitSmiShift = 33;
-
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
- if (SmiValuesAre31Bits()) {
+ if constexpr (SmiValuesAre31Bits()) {
static_assert(kSmiTag == 0);
__ emit_i32_shli(dst.gp(), src.gp(), kSmiTagSize);
} else {
DCHECK(SmiValuesAre32Bits());
- __ emit_i64_shli(dst, src, kI31To32BitSmiShift);
+ // Set the topmost bit to sign-extend the second bit. This way,
+ // interpretation in JS (if this value escapes there) will be the same as
+ // i31.get_s.
+ __ emit_i64_shli(dst, src, kSmiTagSize + kSmiShiftSize + 1);
+ __ emit_i64_sari(dst, dst, 1);
}
__ PushRegister(kRef, dst);
}
@@ -5847,11 +5945,12 @@ class LiftoffCompiler {
LiftoffRegister src = pinned.set(__ PopToRegister());
MaybeEmitNullCheck(decoder, src.gp(), pinned, input.type);
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
- if (SmiValuesAre31Bits()) {
+ if constexpr (SmiValuesAre31Bits()) {
__ emit_i32_sari(dst.gp(), src.gp(), kSmiTagSize);
} else {
DCHECK(SmiValuesAre32Bits());
- __ emit_i64_sari(dst, src, kI31To32BitSmiShift);
+ // Topmost bit is already sign-extended.
+ __ emit_i64_sari(dst, src, kSmiTagSize + kSmiShiftSize);
}
__ PushRegister(kI32, dst);
}
@@ -5861,11 +5960,13 @@ class LiftoffCompiler {
LiftoffRegister src = pinned.set(__ PopToRegister());
MaybeEmitNullCheck(decoder, src.gp(), pinned, input.type);
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
- if (SmiValuesAre31Bits()) {
+ if constexpr (SmiValuesAre31Bits()) {
__ emit_i32_shri(dst.gp(), src.gp(), kSmiTagSize);
} else {
DCHECK(SmiValuesAre32Bits());
- __ emit_i64_shri(dst, src, kI31To32BitSmiShift);
+ // Remove topmost bit.
+ __ emit_i64_shli(dst, src, 1);
+ __ emit_i64_shri(dst, dst, kSmiTagSize + kSmiShiftSize + 1);
}
__ PushRegister(kI32, dst);
}
@@ -5921,44 +6022,50 @@ class LiftoffCompiler {
__ LoadMap(tmp1, obj_reg);
// {tmp1} now holds the object's map.
- // Check for rtt equality, and if not, check if the rtt is a struct/array
- // rtt.
- __ emit_cond_jump(kEqual, &match, rtt_type.kind(), tmp1, rtt_reg, frozen);
-
- if (is_cast_from_any) {
- // Check for map being a map for a wasm object (struct, array, func).
- __ Load(LiftoffRegister(scratch2), tmp1, no_reg,
- wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
- LoadType::kI32Load16U);
- __ emit_i32_subi(scratch2, scratch2, FIRST_WASM_OBJECT_TYPE);
- __ emit_i32_cond_jumpi(kUnsignedGreaterThan, no_match, scratch2,
- LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
- frozen);
- }
+ if (module->types[rtt_type.ref_index()].is_final) {
+ // In this case, simply check for map equality.
+ __ emit_cond_jump(kNotEqual, no_match, rtt_type.kind(), tmp1, rtt_reg,
+ frozen);
+ } else {
+ // Check for rtt equality, and if not, check if the rtt is a struct/array
+ // rtt.
+ __ emit_cond_jump(kEqual, &match, rtt_type.kind(), tmp1, rtt_reg, frozen);
+
+ if (is_cast_from_any) {
+ // Check for map being a map for a wasm object (struct, array, func).
+ __ Load(LiftoffRegister(scratch2), tmp1, no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U);
+ __ emit_i32_subi(scratch2, scratch2, FIRST_WASM_OBJECT_TYPE);
+ __ emit_i32_cond_jumpi(kUnsignedGreaterThan, no_match, scratch2,
+ LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
+ frozen);
+ }
- // Constant-time subtyping check: load exactly one candidate RTT from the
- // supertypes list.
- // Step 1: load the WasmTypeInfo into {tmp1}.
- constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
- Map::kConstructorOrBackPointerOrNativeContextOffset);
- __ LoadTaggedPointer(tmp1, tmp1, no_reg, kTypeInfoOffset);
- // Step 2: check the list's length if needed.
- uint32_t rtt_depth = GetSubtypingDepth(module, rtt_type.ref_index());
- if (rtt_depth >= kMinimumSupertypeArraySize) {
- LiftoffRegister list_length(scratch2);
- int offset =
- ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesLengthOffset);
- __ LoadSmiAsInt32(list_length, tmp1, offset);
- __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
- rtt_depth, frozen);
+ // Constant-time subtyping check: load exactly one candidate RTT from the
+ // supertypes list.
+ // Step 1: load the WasmTypeInfo into {tmp1}.
+ constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
+ Map::kConstructorOrBackPointerOrNativeContextOffset);
+ __ LoadTaggedPointer(tmp1, tmp1, no_reg, kTypeInfoOffset);
+ // Step 2: check the list's length if needed.
+ uint32_t rtt_depth = GetSubtypingDepth(module, rtt_type.ref_index());
+ if (rtt_depth >= kMinimumSupertypeArraySize) {
+ LiftoffRegister list_length(scratch2);
+ int offset =
+ ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesLengthOffset);
+ __ LoadSmiAsInt32(list_length, tmp1, offset);
+ __ emit_i32_cond_jumpi(kUnsignedLessThanEqual, no_match,
+ list_length.gp(), rtt_depth, frozen);
+ }
+ // Step 3: load the candidate list slot into {tmp1}, and compare it.
+ __ LoadTaggedPointer(
+ tmp1, tmp1, no_reg,
+ ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
+ rtt_depth * kTaggedSize));
+ __ emit_cond_jump(kNotEqual, no_match, rtt_type.kind(), tmp1, rtt_reg,
+ frozen);
}
- // Step 3: load the candidate list slot into {tmp1}, and compare it.
- __ LoadTaggedPointer(
- tmp1, tmp1, no_reg,
- ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
- rtt_depth * kTaggedSize));
- __ emit_cond_jump(kUnequal, no_match, rtt_type.kind(), tmp1, rtt_reg,
- frozen);
// Fall through to {match}.
__ bind(&match);
@@ -5973,7 +6080,9 @@ class LiftoffCompiler {
Register scratch_null =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- if (obj.type.is_nullable()) LoadNullValueForCompare(scratch_null, pinned);
+ if (obj.type.is_nullable()) {
+ LoadNullValueForCompare(scratch_null, pinned, obj.type);
+ }
{
FREEZE_STATE(frozen);
@@ -6003,6 +6112,11 @@ class LiftoffCompiler {
return RefIsStruct(decoder, obj, result_val, null_succeeds);
case HeapType::kArray:
return RefIsArray(decoder, obj, result_val, null_succeeds);
+ case HeapType::kNone:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ return EmitIsNull(kExprRefIsNull, obj.type);
case HeapType::kAny:
// Any may never need a cast as it is either implicitly convertible or
// never convertible for any given type.
@@ -6026,7 +6140,9 @@ class LiftoffCompiler {
Register scratch_null =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- if (obj.type.is_nullable()) LoadNullValueForCompare(scratch_null, pinned);
+ if (obj.type.is_nullable()) {
+ LoadNullValueForCompare(scratch_null, pinned, obj.type);
+ }
{
FREEZE_STATE(frozen);
@@ -6049,6 +6165,11 @@ class LiftoffCompiler {
return RefAsStruct(decoder, obj, result_val, null_succeeds);
case HeapType::kArray:
return RefAsArray(decoder, obj, result_val, null_succeeds);
+ case HeapType::kNone:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ return AssertNullTypecheck(decoder, obj, result_val);
case HeapType::kAny:
// Any may never need a cast as it is either implicitly convertible or
// never convertible for any given type.
@@ -6058,7 +6179,8 @@ class LiftoffCompiler {
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* /* result_on_branch */, uint32_t depth) {
+ Value* /* result_on_branch */, uint32_t depth,
+ bool null_succeeds) {
// Avoid having sequences of branches do duplicate work.
if (depth != decoder->control_depth() - 1) {
__ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity, {});
@@ -6071,11 +6193,14 @@ class LiftoffCompiler {
Register scratch_null =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- if (obj.type.is_nullable()) LoadNullValue(scratch_null, pinned);
+ if (obj.type.is_nullable()) {
+ LoadNullValue(scratch_null, pinned, kWasmAnyRef);
+ }
FREEZE_STATE(frozen);
+ NullSucceeds null_handling = null_succeeds ? kNullSucceeds : kNullFails;
SubtypeCheck(decoder->module_, obj_reg.gp(), obj.type, rtt_reg.gp(),
- rtt.type, scratch_null, scratch2, &cont_false, kNullFails,
+ rtt.type, scratch_null, scratch2, &cont_false, null_handling,
frozen);
BrOrRetImpl(decoder, depth, scratch_null, scratch2);
@@ -6084,7 +6209,8 @@ class LiftoffCompiler {
}
void BrOnCastFail(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* /* result_on_fallthrough */, uint32_t depth) {
+ Value* /* result_on_fallthrough */, uint32_t depth,
+ bool null_succeeds) {
// Avoid having sequences of branches do duplicate work.
if (depth != decoder->control_depth() - 1) {
__ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity, {});
@@ -6097,11 +6223,14 @@ class LiftoffCompiler {
Register scratch_null =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- if (obj.type.is_nullable()) LoadNullValue(scratch_null, pinned);
+ if (obj.type.is_nullable()) {
+ LoadNullValue(scratch_null, pinned, kWasmAnyRef);
+ }
FREEZE_STATE(frozen);
+ NullSucceeds null_handling = null_succeeds ? kNullSucceeds : kNullFails;
SubtypeCheck(decoder->module_, obj_reg.gp(), obj.type, rtt_reg.gp(),
- rtt.type, scratch_null, scratch2, &cont_branch, kNullFails,
+ rtt.type, scratch_null, scratch2, &cont_branch, null_handling,
frozen);
__ emit_jump(&fallthrough);
@@ -6111,6 +6240,62 @@ class LiftoffCompiler {
__ bind(&fallthrough);
}
+ void BrOnCastAbstract(FullDecoder* decoder, const Value& obj, HeapType type,
+ Value* result_on_branch, uint32_t depth,
+ bool null_succeeds) {
+ switch (type.representation()) {
+ case HeapType::kEq:
+ return BrOnEq(decoder, obj, result_on_branch, depth, null_succeeds);
+ case HeapType::kI31:
+ return BrOnI31(decoder, obj, result_on_branch, depth, null_succeeds);
+ case HeapType::kStruct:
+ return BrOnStruct(decoder, obj, result_on_branch, depth, null_succeeds);
+ case HeapType::kArray:
+ return BrOnArray(decoder, obj, result_on_branch, depth, null_succeeds);
+ case HeapType::kNone:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ return BrOnNull(decoder, obj, depth, /*pass_null_along_branch*/ true,
+ nullptr);
+ case HeapType::kAny:
+ // Any may never need a cast as it is either implicitly convertible or
+ // never convertible for any given type.
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void BrOnCastFailAbstract(FullDecoder* decoder, const Value& obj,
+ HeapType type, Value* result_on_fallthrough,
+ uint32_t depth, bool null_succeeds) {
+ switch (type.representation()) {
+ case HeapType::kEq:
+ return BrOnNonEq(decoder, obj, result_on_fallthrough, depth,
+ null_succeeds);
+ case HeapType::kI31:
+ return BrOnNonI31(decoder, obj, result_on_fallthrough, depth,
+ null_succeeds);
+ case HeapType::kStruct:
+ return BrOnNonStruct(decoder, obj, result_on_fallthrough, depth,
+ null_succeeds);
+ case HeapType::kArray:
+ return BrOnNonArray(decoder, obj, result_on_fallthrough, depth,
+ null_succeeds);
+ case HeapType::kNone:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ return BrOnNonNull(decoder, obj, nullptr, depth,
+ /*drop_null_on_fallthrough*/ false);
+ case HeapType::kAny:
+ // Any may never need a cast as it is either implicitly convertible or
+ // never convertible for any given type.
+ default:
+ UNREACHABLE();
+ }
+ }
+
struct TypeCheck {
Register obj_reg = no_reg;
ValueType obj_type;
@@ -6130,7 +6315,7 @@ class LiftoffCompiler {
enum PopOrPeek { kPop, kPeek };
- void Initialize(TypeCheck& check, PopOrPeek pop_or_peek) {
+ void Initialize(TypeCheck& check, PopOrPeek pop_or_peek, ValueType type) {
LiftoffRegList pinned;
if (pop_or_peek == kPop) {
check.obj_reg = pinned.set(__ PopToRegister(pinned)).gp();
@@ -6140,7 +6325,7 @@ class LiftoffCompiler {
check.tmp1 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
check.tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
if (check.obj_type.is_nullable()) {
- LoadNullValue(check.null_reg(), pinned);
+ LoadNullValue(check.null_reg(), pinned, type);
}
}
void LoadInstanceType(TypeCheck& check, const FreezeCacheState& frozen,
@@ -6164,22 +6349,14 @@ class LiftoffCompiler {
void StructCheck(TypeCheck& check, const FreezeCacheState& frozen) {
LoadInstanceType(check, frozen, check.no_match);
LiftoffRegister instance_type(check.instance_type());
- if (!v8_flags.wasm_gc_structref_as_dataref) {
- __ emit_i32_cond_jumpi(kUnequal, check.no_match, check.instance_type(),
- WASM_STRUCT_TYPE, frozen);
- } else {
- Register tmp = check.instance_type();
- __ emit_i32_subi(tmp, tmp, FIRST_WASM_OBJECT_TYPE);
- __ emit_i32_cond_jumpi(kUnsignedGreaterThan, check.no_match, tmp,
- LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
- frozen);
- }
+ __ emit_i32_cond_jumpi(kNotEqual, check.no_match, check.instance_type(),
+ WASM_STRUCT_TYPE, frozen);
}
void ArrayCheck(TypeCheck& check, const FreezeCacheState& frozen) {
LoadInstanceType(check, frozen, check.no_match);
LiftoffRegister instance_type(check.instance_type());
- __ emit_i32_cond_jumpi(kUnequal, check.no_match, check.instance_type(),
+ __ emit_i32_cond_jumpi(kNotEqual, check.no_match, check.instance_type(),
WASM_ARRAY_TYPE, frozen);
}
@@ -6208,7 +6385,7 @@ class LiftoffCompiler {
void AbstractTypeCheck(const Value& object, bool null_succeeds) {
Label match, no_match, done;
TypeCheck check(object.type, &no_match, null_succeeds);
- Initialize(check, kPop);
+ Initialize(check, kPop, object.type);
LiftoffRegister result(check.tmp1);
{
FREEZE_STATE(frozen);
@@ -6259,7 +6436,7 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
TypeCheck check(object.type, trap_label, null_succeeds);
- Initialize(check, kPeek);
+ Initialize(check, kPeek, object.type);
FREEZE_STATE(frozen);
if (null_succeeds && check.obj_type.is_nullable()) {
@@ -6296,19 +6473,24 @@ class LiftoffCompiler {
template <TypeChecker type_checker>
void BrOnAbstractType(const Value& object, FullDecoder* decoder,
- uint32_t br_depth) {
- bool null_succeeds = false; // TODO(mliedtke): Use parameter.
+ uint32_t br_depth, bool null_succeeds) {
// Avoid having sequences of branches do duplicate work.
if (br_depth != decoder->control_depth() - 1) {
__ PrepareForBranch(decoder->control_at(br_depth)->br_merge()->arity, {});
}
- Label no_match;
+ Label no_match, match;
TypeCheck check(object.type, &no_match, null_succeeds);
- Initialize(check, kPeek);
+ Initialize(check, kPeek, object.type);
FREEZE_STATE(frozen);
+ if (null_succeeds && check.obj_type.is_nullable()) {
+ __ emit_cond_jump(kEqual, &match, kRefNull, check.obj_reg,
+ check.null_reg(), frozen);
+ }
+
(this->*type_checker)(check, frozen);
+ __ bind(&match);
BrOrRetImpl(decoder, br_depth, check.tmp1, check.tmp2);
__ bind(&no_match);
@@ -6316,8 +6498,7 @@ class LiftoffCompiler {
template <TypeChecker type_checker>
void BrOnNonAbstractType(const Value& object, FullDecoder* decoder,
- uint32_t br_depth) {
- bool null_succeeds = false; // TODO(mliedtke): Use parameter.
+ uint32_t br_depth, bool null_succeeds) {
// Avoid having sequences of branches do duplicate work.
if (br_depth != decoder->control_depth() - 1) {
__ PrepareForBranch(decoder->control_at(br_depth)->br_merge()->arity, {});
@@ -6325,9 +6506,14 @@ class LiftoffCompiler {
Label no_match, end;
TypeCheck check(object.type, &no_match, null_succeeds);
- Initialize(check, kPeek);
+ Initialize(check, kPeek, object.type);
FREEZE_STATE(frozen);
+ if (null_succeeds && check.obj_type.is_nullable()) {
+ __ emit_cond_jump(kEqual, &end, kRefNull, check.obj_reg, check.null_reg(),
+ frozen);
+ }
+
(this->*type_checker)(check, frozen);
__ emit_jump(&end);
@@ -6337,36 +6523,60 @@ class LiftoffCompiler {
__ bind(&end);
}
+ void BrOnEq(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnAbstractType<&LiftoffCompiler::EqCheck>(object, decoder, br_depth,
+ null_succeeds);
+ }
+
void BrOnStruct(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnAbstractType<&LiftoffCompiler::StructCheck>(object, decoder, br_depth);
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnAbstractType<&LiftoffCompiler::StructCheck>(object, decoder, br_depth,
+ null_succeeds);
}
void BrOnI31(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder, br_depth);
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder, br_depth,
+ null_succeeds);
}
void BrOnArray(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder, br_depth);
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder, br_depth,
+ null_succeeds);
}
void BrOnNonStruct(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
BrOnNonAbstractType<&LiftoffCompiler::StructCheck>(object, decoder,
- br_depth);
+ br_depth, null_succeeds);
}
void BrOnNonI31(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnNonAbstractType<&LiftoffCompiler::I31Check>(object, decoder, br_depth);
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnNonAbstractType<&LiftoffCompiler::I31Check>(object, decoder, br_depth,
+ null_succeeds);
}
void BrOnNonArray(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnNonAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder,
- br_depth);
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnNonAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder, br_depth,
+ null_succeeds);
+ }
+
+ void BrOnNonEq(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth,
+ bool null_succeeds) {
+ BrOnNonAbstractType<&LiftoffCompiler::EqCheck>(object, decoder, br_depth,
+ null_succeeds);
}
void StringNewWtf8(FullDecoder* decoder, const MemoryIndexImmediate& imm,
@@ -6386,7 +6596,7 @@ class LiftoffCompiler {
CallRuntimeStub(
WasmCode::kWasmStringNewWtf8,
- MakeSig::Returns(kRef).Params(kI32, kI32, kSmiKind, kSmiKind),
+ MakeSig::Returns(kRefNull).Params(kI32, kI32, kSmiKind, kSmiKind),
{
__ cache_state()->stack_state.end()[-2], // offset
__ cache_state()->stack_state.end()[-1], // size
@@ -6417,15 +6627,16 @@ class LiftoffCompiler {
LoadSmi(variant_reg, static_cast<int32_t>(variant));
LiftoffAssembler::VarState variant_var(kSmiKind, variant_reg, 0);
- CallRuntimeStub(WasmCode::kWasmStringNewWtf8Array,
- MakeSig::Returns(kRef).Params(kI32, kI32, kRef, kSmiKind),
- {
- __ cache_state()->stack_state.end()[-2], // start
- __ cache_state()->stack_state.end()[-1], // end
- array_var,
- variant_var,
- },
- decoder->position());
+ CallRuntimeStub(
+ WasmCode::kWasmStringNewWtf8Array,
+ MakeSig::Returns(kRefNull).Params(kI32, kI32, kRef, kSmiKind),
+ {
+ __ cache_state()->stack_state.end()[-2], // start
+ __ cache_state()->stack_state.end()[-1], // end
+ array_var,
+ variant_var,
+ },
+ decoder->position());
__ cache_state()->stack_state.pop_back(3);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
@@ -6518,6 +6729,8 @@ class LiftoffCompiler {
case unibrow::Utf8Variant::kWtf8:
stub_id = WasmCode::kWasmStringMeasureWtf8;
break;
+ case unibrow::Utf8Variant::kUtf8NoTrap:
+ UNREACHABLE();
}
CallRuntimeStub(stub_id, MakeSig::Returns(kI32).Params(kRef),
{
@@ -6727,26 +6940,26 @@ class LiftoffCompiler {
LiftoffRegister null = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
bool check_for_null = a.type.is_nullable() || b.type.is_nullable();
if (check_for_null) {
- LoadNullValueForCompare(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned, kWasmStringRef);
}
FREEZE_STATE(frozen);
// If values pointer-equal, result is 1.
__ LoadConstant(result_reg, WasmValue(int32_t{1}));
- __ emit_cond_jump(LiftoffCondition::kEqual, &done, kRefNull, a_reg.gp(),
- b_reg.gp(), frozen);
+ __ emit_cond_jump(kEqual, &done, kRefNull, a_reg.gp(), b_reg.gp(),
+ frozen);
// Otherwise if either operand is null, result is 0.
if (check_for_null) {
__ LoadConstant(result_reg, WasmValue(int32_t{0}));
if (a.type.is_nullable()) {
- __ emit_cond_jump(LiftoffCondition::kEqual, &done, kRefNull,
- a_reg.gp(), null.gp(), frozen);
+ __ emit_cond_jump(kEqual, &done, kRefNull, a_reg.gp(), null.gp(),
+ frozen);
}
if (b.type.is_nullable()) {
- __ emit_cond_jump(LiftoffCondition::kEqual, &done, kRefNull,
- b_reg.gp(), null.gp(), frozen);
+ __ emit_cond_jump(kEqual, &done, kRefNull, b_reg.gp(), null.gp(),
+ frozen);
}
}
@@ -6924,7 +7137,22 @@ class LiftoffCompiler {
}
void StringAsWtf16(FullDecoder* decoder, const Value& str, Value* result) {
- RefAsNonNull(decoder, str, result);
+ LiftoffRegList pinned;
+
+ LiftoffRegister str_reg = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, str_reg.gp(), pinned, str.type);
+ LiftoffAssembler::VarState str_var(kRef, str_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmStringAsWtf16,
+ MakeSig::Returns(kRef).Params(kRef),
+ {
+ str_var,
+ },
+ decoder->position());
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+
+ LiftoffRegister result_reg(kReturnRegister0);
+ __ PushRegister(kRef, result_reg);
}
void StringViewWtf16GetCodeUnit(FullDecoder* decoder, const Value& view,
@@ -7077,6 +7305,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
LiftoffRegister result_reg(kReturnRegister0);
+ __ DropValues(2);
__ PushRegister(kI32, result_reg);
}
@@ -7102,6 +7331,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
LiftoffRegister result_reg(kReturnRegister0);
+ __ DropValues(2);
__ PushRegister(kI32, result_reg);
}
@@ -7127,9 +7357,65 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
LiftoffRegister result_reg(kReturnRegister0);
+ __ DropValues(2);
__ PushRegister(kRef, result_reg);
}
+ void StringCompare(FullDecoder* decoder, const Value& lhs, const Value& rhs,
+ Value* result) {
+ LiftoffRegList pinned;
+ LiftoffRegister rhs_reg = pinned.set(
+ __ LoadToRegister(__ cache_state()->stack_state.end()[-1], pinned));
+ MaybeEmitNullCheck(decoder, rhs_reg.gp(), pinned, rhs.type);
+ LiftoffAssembler::VarState rhs_var(kRef, rhs_reg, 0);
+
+ LiftoffRegister lhs_reg = pinned.set(
+ __ LoadToRegister(__ cache_state()->stack_state.end()[-2], pinned));
+ MaybeEmitNullCheck(decoder, lhs_reg.gp(), pinned, lhs.type);
+ LiftoffAssembler::VarState lhs_var(kRef, lhs_reg, 0);
+
+ CallRuntimeStub(WasmCode::kStringCompare,
+ MakeSig::Returns(kSmiKind).Params(kRef, kRef),
+ {lhs_var, rhs_var}, decoder->position());
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+
+ LiftoffRegister result_reg(kReturnRegister0);
+ __ DropValues(2);
+ __ SmiToInt32(kReturnRegister0);
+ __ PushRegister(kI32, result_reg);
+ }
+
+ void StringFromCodePoint(FullDecoder* decoder, const Value& code_point,
+ Value* result) {
+ LiftoffAssembler::VarState& codepoint_var =
+ __ cache_state()->stack_state.end()[-1];
+
+ CallRuntimeStub(WasmCode::kWasmStringFromCodePoint,
+ MakeSig::Returns(kRef).Params(kI32), {codepoint_var},
+ decoder->position());
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+
+ LiftoffRegister result_reg(kReturnRegister0);
+ __ DropValues(1);
+ __ PushRegister(kRef, result_reg);
+ }
+
+ void StringHash(FullDecoder* decoder, const Value& string, Value* result) {
+ LiftoffRegList pinned;
+ LiftoffRegister string_reg = pinned.set(
+ __ LoadToRegister(__ cache_state()->stack_state.end()[-1], pinned));
+ MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, string.type);
+ LiftoffAssembler::VarState string_var(kRef, string_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmStringHash,
+ MakeSig::Returns(kI32).Params(kRef), {string_var},
+ decoder->position());
+
+ LiftoffRegister result_reg(kReturnRegister0);
+ __ DropValues(1);
+ __ PushRegister(kI32, result_reg);
+ }
+
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
// Nothing to do here.
}
@@ -7137,20 +7423,18 @@ class LiftoffCompiler {
private:
void CallDirect(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[], Value returns[], TailCall tail_call) {
- MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ MostlySmallValueKindSig sig(zone_, imm.sig);
for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
- auto call_descriptor =
- compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
- call_descriptor =
- GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(zone_, imm.sig);
+ call_descriptor = GetLoweredCallDescriptor(zone_, call_descriptor);
// One slot would be enough for call_direct, but would make index
// computations much more complicated.
size_t vector_slot = encountered_call_instructions_.size() * 2;
- if (v8_flags.wasm_speculative_inlining) {
+ if (decoder->enabled_.has_inlining()) {
encountered_call_instructions_.push_back(imm.index);
}
@@ -7176,7 +7460,7 @@ class LiftoffCompiler {
imported_function_ref, imported_function_refs, no_reg,
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index));
- Register* explicit_instance = &imported_function_ref;
+ Register explicit_instance = imported_function_ref;
__ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
@@ -7193,9 +7477,9 @@ class LiftoffCompiler {
} else {
// Inlining direct calls isn't speculative, but existence of the
// feedback vector currently depends on this flag.
- if (v8_flags.wasm_speculative_inlining) {
+ if (decoder->enabled_.has_inlining()) {
LiftoffRegister vector = __ GetUnusedRegister(kGpReg, {});
- __ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind);
+ __ Fill(vector, liftoff::kFeedbackVectorOffset, kIntPtrKind);
__ IncrementSmi(vector,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
static_cast<int>(vector_slot)));
@@ -7223,7 +7507,7 @@ class LiftoffCompiler {
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate& imm, TailCall tail_call) {
- MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ MostlySmallValueKindSig sig(zone_, imm.sig);
for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -7232,149 +7516,247 @@ class LiftoffCompiler {
LiftoffRegList pinned{index};
// Get all temporary registers unconditionally up front.
- Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ // We do not use temporary registers directly; instead we rename them as
+ // appropriate in each scope they are used.
+ Register tmp1 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register tmp3 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register indirect_function_table = no_reg;
- if (imm.table_imm.index != 0) {
- Register indirect_function_tables =
+ if (imm.table_imm.index > 0) {
+ indirect_function_table =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(indirect_function_tables,
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(indirect_function_table,
IndirectFunctionTables, pinned);
-
- indirect_function_table = indirect_function_tables;
__ LoadTaggedPointer(
- indirect_function_table, indirect_function_tables, no_reg,
+ indirect_function_table, indirect_function_table, no_reg,
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.table_imm.index));
}
-
- // Bounds check against the table size.
- Label* invalid_func_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapTableOutOfBounds);
-
- // Compare against table size stored in
- // {instance->indirect_function_table_size}.
- if (imm.table_imm.index == 0) {
- LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
- pinned);
- } else {
- __ Load(
- LiftoffRegister(tmp_const), indirect_function_table, no_reg,
- wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
- LoadType::kI32Load);
- }
{
- FREEZE_STATE(trapping);
- __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
- tmp_const, trapping);
+ CODE_COMMENT("Check index is in-bounds");
+ Register table_size = tmp1;
+ if (imm.table_imm.index == 0) {
+ LOAD_INSTANCE_FIELD(table_size, IndirectFunctionTableSize, kUInt32Size,
+ pinned);
+ } else {
+ __ Load(LiftoffRegister(table_size), indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kSizeOffset),
+ LoadType::kI32Load);
+ }
+
+ // Bounds check against the table size: Compare against table size stored
+ // in {instance->indirect_function_table_size}.
+ Label* out_of_bounds_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapTableOutOfBounds);
+ {
+ FREEZE_STATE(trapping);
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, out_of_bounds_label, kI32,
+ index, table_size, trapping);
+ }
}
- CODE_COMMENT("Check indirect call signature");
- // Load the signature from {instance->ift_sig_ids[key]}
- if (imm.table_imm.index == 0) {
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds,
+ ValueType table_type = decoder->module_->tables[imm.table_imm.index].type;
+ bool needs_type_check = !EquivalentTypes(
+ table_type.AsNonNull(), ValueType::Ref(imm.sig_imm.index),
+ decoder->module_, decoder->module_);
+ bool needs_null_check = table_type.is_nullable();
+
+ if (needs_type_check) {
+ CODE_COMMENT("Check indirect call signature");
+ Register real_sig_id = tmp1;
+ Register formal_sig_id = tmp2;
+
+ // Load the signature from {instance->ift_sig_ids[key]}
+ if (imm.table_imm.index == 0) {
+ LOAD_INSTANCE_FIELD(real_sig_id, IndirectFunctionTableSigIds,
+ kSystemPointerSize, pinned);
+ } else {
+ __ Load(LiftoffRegister(real_sig_id), indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kSigIdsOffset),
+ kPointerLoadType);
+ }
+ static_assert((1 << 2) == kInt32Size);
+ __ Load(LiftoffRegister(real_sig_id), real_sig_id, index, 0,
+ LoadType::kI32Load, nullptr, false, false, true);
+
+ // Compare against expected signature.
+ LOAD_INSTANCE_FIELD(formal_sig_id, IsorecursiveCanonicalTypes,
kSystemPointerSize, pinned);
- } else {
- __ Load(LiftoffRegister(table), indirect_function_table, no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmIndirectFunctionTable::kSigIdsOffset),
- kPointerLoadType);
- }
- static_assert((1 << 2) == kInt32Size);
- __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
- nullptr, false, false, true);
+ __ Load(LiftoffRegister(formal_sig_id), formal_sig_id, no_reg,
+ imm.sig_imm.index * kInt32Size, LoadType::kI32Load);
- // Compare against expected signature.
- LOAD_INSTANCE_FIELD(tmp_const, IsorecursiveCanonicalTypes,
- kSystemPointerSize, pinned);
- __ Load(LiftoffRegister(tmp_const), tmp_const, no_reg,
- imm.sig_imm.index * kInt32Size, LoadType::kI32Load);
+ Label* sig_mismatch_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
+ __ DropValues(1);
- Label* sig_mismatch_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
- __ DropValues(1);
- {
- FREEZE_STATE(trapping);
- __ emit_cond_jump(kUnequal, sig_mismatch_label, kPointerKind, scratch,
- tmp_const, trapping);
- }
+ if (decoder->enabled_.has_gc() &&
+ !decoder->module_->types[imm.sig_imm.index].is_final) {
+ Label success_label;
+ FREEZE_STATE(frozen);
+ __ emit_cond_jump(kEqual, &success_label, kI32, real_sig_id,
+ formal_sig_id, frozen);
+ if (needs_null_check) {
+ __ emit_i32_cond_jumpi(kEqual, sig_mismatch_label, real_sig_id, -1,
+ frozen);
+ }
+ Register real_rtt = tmp3;
+ __ LoadFullPointer(
+ real_rtt, kRootRegister,
+ IsolateData::root_slot_offset(RootIndex::kWasmCanonicalRtts));
+ __ LoadTaggedPointer(real_rtt, real_rtt, real_sig_id,
+ ObjectAccess::ToTagged(WeakArrayList::kHeaderSize),
+ true);
+ // Remove the weak reference tag.
+ if (kSystemPointerSize == 4) {
+ __ emit_i32_andi(real_rtt, real_rtt,
+ static_cast<int32_t>(~kWeakHeapObjectMask));
+ } else {
+ __ emit_i64_andi(LiftoffRegister(real_rtt), LiftoffRegister(real_rtt),
+ static_cast<int64_t>(~kWeakHeapObjectMask));
+ }
+ // Constant-time subtyping check: load exactly one candidate RTT from
+ // the supertypes list.
+ // Step 1: load the WasmTypeInfo.
+ constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
+ Map::kConstructorOrBackPointerOrNativeContextOffset);
+ Register type_info = real_rtt;
+ __ LoadTaggedPointer(type_info, real_rtt, no_reg, kTypeInfoOffset);
+ // Step 2: check the list's length if needed.
+ uint32_t rtt_depth =
+ GetSubtypingDepth(decoder->module_, imm.sig_imm.index);
+ if (rtt_depth >= kMinimumSupertypeArraySize) {
+ LiftoffRegister list_length(formal_sig_id);
+ int offset =
+ ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesLengthOffset);
+ __ LoadSmiAsInt32(list_length, type_info, offset);
+ __ emit_i32_cond_jumpi(kUnsignedLessThanEqual, sig_mismatch_label,
+ list_length.gp(), rtt_depth, frozen);
+ }
+ // Step 3: load the candidate list slot, and compare it.
+ Register maybe_match = type_info;
+ __ LoadTaggedPointer(
+ maybe_match, type_info, no_reg,
+ ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset +
+ rtt_depth * kTaggedSize));
+ Register formal_rtt = formal_sig_id;
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(formal_rtt, ManagedObjectMaps, pinned);
+ __ LoadTaggedPointer(
+ formal_rtt, formal_rtt, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
+ imm.sig_imm.index));
+ __ emit_cond_jump(kNotEqual, sig_mismatch_label, kRtt, formal_rtt,
+ maybe_match, frozen);
- CODE_COMMENT("Execute indirect call");
- // At this point {index} has already been multiplied by kTaggedSize.
+ __ bind(&success_label);
+ } else {
+ FREEZE_STATE(trapping);
+ __ emit_cond_jump(kNotEqual, sig_mismatch_label, kI32, real_sig_id,
+ formal_sig_id, trapping);
+ }
+ } else if (needs_null_check) {
+ CODE_COMMENT("Check indirect call element for nullity");
+ Register real_sig_id = tmp1;
+
+ // Load the signature from {instance->ift_sig_ids[key]}
+ if (imm.table_imm.index == 0) {
+ LOAD_INSTANCE_FIELD(real_sig_id, IndirectFunctionTableSigIds,
+ kSystemPointerSize, pinned);
+ } else {
+ __ Load(LiftoffRegister(real_sig_id), indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kSigIdsOffset),
+ kPointerLoadType);
+ }
+ static_assert((1 << 2) == kInt32Size);
+ __ Load(LiftoffRegister(real_sig_id), real_sig_id, index, 0,
+ LoadType::kI32Load, nullptr, false, false, true);
- // Load the instance from {instance->ift_instances[key]}
- if (imm.table_imm.index == 0) {
- LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
+ Label* sig_mismatch_label =
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
+ __ DropValues(1);
+
+ FREEZE_STATE(frozen);
+ __ emit_i32_cond_jumpi(kEqual, sig_mismatch_label, real_sig_id, -1,
+ frozen);
} else {
- __ LoadTaggedPointer(
- table, indirect_function_table, no_reg,
- wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset));
+ __ DropValues(1);
}
- __ LoadTaggedPointer(tmp_const, table, index,
- ObjectAccess::ElementOffsetInTaggedFixedArray(0),
- true);
+ {
+ CODE_COMMENT("Execute indirect call");
- Register* explicit_instance = &tmp_const;
+ Register function_instance = tmp1;
+ Register function_target = tmp2;
- // Load the target from {instance->ift_targets[key]}
- if (imm.table_imm.index == 0) {
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
- kSystemPointerSize, pinned);
- } else {
- __ Load(LiftoffRegister(table), indirect_function_table, no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmIndirectFunctionTable::kTargetsOffset),
- kPointerLoadType);
- }
- __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
- nullptr, false, false, true);
+ // Load the instance from {instance->ift_instances[key]}
+ if (imm.table_imm.index == 0) {
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(function_instance,
+ IndirectFunctionTableRefs, pinned);
+ } else {
+ __ LoadTaggedPointer(function_instance, indirect_function_table, no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kRefsOffset));
+ }
+ __ LoadTaggedPointer(function_instance, function_instance, index,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(0),
+ true);
+
+ // Load the target from {instance->ift_targets[key]}
+ if (imm.table_imm.index == 0) {
+ LOAD_INSTANCE_FIELD(function_target, IndirectFunctionTableTargets,
+ kSystemPointerSize, pinned);
+ } else {
+ __ Load(LiftoffRegister(function_target), indirect_function_table,
+ no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmIndirectFunctionTable::kTargetsOffset),
+ kPointerLoadType);
+ }
+ __ Load(LiftoffRegister(function_target), function_target, index, 0,
+ kPointerLoadType, nullptr, false, false, true);
- auto call_descriptor =
- compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
- call_descriptor =
- GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(zone_, imm.sig);
+ call_descriptor = GetLoweredCallDescriptor(zone_, call_descriptor);
- Register target = scratch;
- __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
- if (tail_call) {
- __ PrepareTailCall(
- static_cast<int>(call_descriptor->ParameterSlotCount()),
- static_cast<int>(
- call_descriptor->GetStackParameterDelta(descriptor_)));
- __ TailCallIndirect(target);
- } else {
- source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(&sig, call_descriptor, target);
+ __ PrepareCall(&sig, call_descriptor, &function_target,
+ function_instance);
+ if (tail_call) {
+ __ PrepareTailCall(
+ static_cast<int>(call_descriptor->ParameterSlotCount()),
+ static_cast<int>(
+ call_descriptor->GetStackParameterDelta(descriptor_)));
+ __ TailCallIndirect(function_target);
+ } else {
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+ __ CallIndirect(&sig, call_descriptor, function_target);
- FinishCall(decoder, &sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
+ }
}
}
void CallRef(FullDecoder* decoder, ValueType func_ref_type,
const FunctionSig* type_sig, TailCall tail_call) {
- MostlySmallValueKindSig sig(compilation_zone_, type_sig);
+ MostlySmallValueKindSig sig(zone_, type_sig);
for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
compiler::CallDescriptor* call_descriptor =
- compiler::GetWasmCallDescriptor(compilation_zone_, type_sig);
- call_descriptor =
- GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+ compiler::GetWasmCallDescriptor(zone_, type_sig);
+ call_descriptor = GetLoweredCallDescriptor(zone_, call_descriptor);
Register target_reg = no_reg, instance_reg = no_reg;
- if (v8_flags.wasm_speculative_inlining) {
- ValueKind kIntPtrKind = kPointerKind;
-
+ if (decoder->enabled_.has_inlining()) {
LiftoffRegList pinned;
LiftoffRegister func_ref = pinned.set(__ PopToRegister(pinned));
LiftoffRegister vector = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
LiftoffAssembler::VarState func_ref_var(kRef, func_ref, 0);
- __ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind);
- LiftoffAssembler::VarState vector_var(kPointerKind, vector, 0);
+ __ Fill(vector, liftoff::kFeedbackVectorOffset, kRef);
+ LiftoffAssembler::VarState vector_var{kRef, vector, 0};
LiftoffRegister index = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
size_t vector_slot = encountered_call_instructions_.size() * 2;
encountered_call_instructions_.push_back(
@@ -7385,15 +7767,15 @@ class LiftoffCompiler {
// CallRefIC(vector: FixedArray, index: intptr,
// funcref: WasmInternalFunction)
CallRuntimeStub(WasmCode::kCallRefIC,
- MakeSig::Returns(kPointerKind, kPointerKind)
- .Params(kPointerKind, kIntPtrKind, kPointerKind),
+ MakeSig::Returns(kIntPtrKind, kIntPtrKind)
+ .Params(kRef, kIntPtrKind, kRef),
{vector_var, index_var, func_ref_var},
decoder->position());
target_reg = LiftoffRegister(kReturnRegister0).gp();
instance_reg = LiftoffRegister(kReturnRegister1).gp();
- } else { // v8_flags.wasm_speculative_inlining
+ } else { // decoder->enabled_.has_inlining()
// Non-feedback-collecting version.
// Executing a write barrier needs temp registers; doing this on a
// conditional branch confuses the LiftoffAssembler's register management.
@@ -7419,7 +7801,6 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
#ifdef V8_ENABLE_SANDBOX
- LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
__ LoadExternalPointer(target.gp(), func_ref.gp(),
WasmInternalFunction::kCallTargetOffset,
kWasmInternalFunctionCallTargetTag, temp.gp());
@@ -7435,18 +7816,13 @@ class LiftoffCompiler {
LiftoffRegister null_address = temp;
__ LoadConstant(null_address, WasmValue::ForUintPtr(0));
- __ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
+ __ emit_cond_jump(kNotEqual, &perform_call, kIntPtrKind, target.gp(),
null_address.gp(), frozen);
// The cached target can only be null for WasmJSFunctions.
__ LoadTaggedPointer(
target.gp(), func_ref.gp(), no_reg,
wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset));
-#ifdef V8_EXTERNAL_CODE_SPACE
- __ LoadCodeDataContainerEntry(target.gp(), target.gp());
-#else
- __ emit_ptrsize_addi(target.gp(), target.gp(),
- wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
-#endif
+ __ LoadCodeEntry(target.gp(), target.gp());
// Fall through to {perform_call}.
__ bind(&perform_call);
@@ -7454,9 +7830,9 @@ class LiftoffCompiler {
// is in {instance}.
target_reg = target.gp();
instance_reg = instance.gp();
- } // v8_flags.wasm_speculative_inlining
+ } // decoder->enabled_.has_inlining()
- __ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
+ __ PrepareCall(&sig, call_descriptor, &target_reg, instance_reg);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -7472,35 +7848,37 @@ class LiftoffCompiler {
}
}
- void LoadNullValue(Register null, LiftoffRegList pinned) {
- // TODO(13449): Use root register instead of isolate to retrieve null.
- LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
- __ LoadFullPointer(null, null,
- IsolateData::root_slot_offset(RootIndex::kNullValue));
+ void LoadNullValue(Register null, LiftoffRegList pinned, ValueType type) {
+ __ LoadFullPointer(
+ null, kRootRegister,
+ type == kWasmExternRef || type == kWasmNullExternRef
+ ? IsolateData::root_slot_offset(RootIndex::kNullValue)
+ : IsolateData::root_slot_offset(RootIndex::kWasmNull));
}
// Stores the null value representation in the passed register.
// If pointer compression is active, only the compressed tagged pointer
// will be stored. Any operations with this register therefore must
// not compare this against 64 bits using quadword instructions.
- void LoadNullValueForCompare(Register null, LiftoffRegList pinned) {
+ void LoadNullValueForCompare(Register null, LiftoffRegList pinned,
+ ValueType type) {
Tagged_t static_null =
- wasm::GetWasmEngine()->compressed_null_value_or_zero();
- if (static_null != 0) {
+ wasm::GetWasmEngine()->compressed_wasm_null_value_or_zero();
+ if (type != kWasmExternRef && type != kWasmNullExternRef &&
+ static_null != 0) {
// static_null is only set for builds with pointer compression.
DCHECK_LE(static_null, std::numeric_limits<uint32_t>::max());
__ LoadConstant(LiftoffRegister(null),
WasmValue(static_cast<uint32_t>(static_null)));
} else {
- LoadNullValue(null, pinned);
+ LoadNullValue(null, pinned, type);
}
}
void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
RootIndex root_index) {
- LOAD_INSTANCE_FIELD(dst, IsolateRoot, kSystemPointerSize, pinned);
- uint32_t offset_imm = IsolateData::root_slot_offset(root_index);
- __ LoadFullPointer(dst, dst, offset_imm);
+ __ LoadFullPointer(dst, kRootRegister,
+ IsolateData::root_slot_offset(root_index));
}
void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
@@ -7511,10 +7889,10 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
- LoadNullValueForCompare(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned, type);
FREEZE_STATE(trapping);
- __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kRefNull, object,
- null.gp(), trapping);
+ __ emit_cond_jump(kEqual, trap_label, kRefNull, object, null.gp(),
+ trapping);
}
void BoundsCheckArray(FullDecoder* decoder, LiftoffRegister array,
@@ -7527,8 +7905,8 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
__ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load);
FREEZE_STATE(trapping);
- __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32,
- index.gp(), length.gp(), trapping);
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, trap_label, kI32, index.gp(),
+ length.gp(), trapping);
}
int StructFieldOffset(const StructType* struct_type, int field_index) {
@@ -7550,9 +7928,12 @@ class LiftoffCompiler {
void StoreObjectField(Register obj, Register offset_reg, int offset,
LiftoffRegister value, LiftoffRegList pinned,
- ValueKind kind) {
+ ValueKind kind,
+ LiftoffAssembler::SkipWriteBarrier skip_write_barrier =
+ LiftoffAssembler::kNoSkipWriteBarrier) {
if (is_reference(kind)) {
- __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
+ __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned,
+ skip_write_barrier);
} else {
// Primitive kind.
StoreType store_type = StoreType::ForValueKind(kind);
@@ -7560,10 +7941,10 @@ class LiftoffCompiler {
}
}
- void SetDefaultValue(LiftoffRegister reg, ValueKind kind,
+ void SetDefaultValue(LiftoffRegister reg, ValueType type,
LiftoffRegList pinned) {
- DCHECK(is_defaultable(kind));
- switch (kind) {
+ DCHECK(is_defaultable(type.kind()));
+ switch (type.kind()) {
case kI8:
case kI16:
case kI32:
@@ -7578,7 +7959,7 @@ class LiftoffCompiler {
DCHECK(CpuFeatures::SupportsWasmSimd128());
return __ emit_s128_xor(reg, reg, reg);
case kRefNull:
- return LoadNullValue(reg.gp(), pinned);
+ return LoadNullValue(reg.gp(), pinned, type);
case kRtt:
case kVoid:
case kBottom:
@@ -7626,6 +8007,52 @@ class LiftoffCompiler {
tmp_s128, lane_kind);
}
+ void ArrayFillImpl(LiftoffRegList pinned, LiftoffRegister obj,
+ LiftoffRegister index, LiftoffRegister value,
+ LiftoffRegister length, ValueKind elem_kind,
+ LiftoffAssembler::SkipWriteBarrier skip_write_barrier) {
+ // initial_offset = WasmArray::kHeaderSize + index * elem_size.
+ LiftoffRegister offset = index;
+ if (value_kind_size_log2(elem_kind) != 0) {
+ __ emit_i32_shli(offset.gp(), index.gp(),
+ value_kind_size_log2(elem_kind));
+ }
+ __ emit_i32_addi(offset.gp(), offset.gp(),
+ wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
+
+ // end_offset = initial_offset + length * elem_size.
+ LiftoffRegister end_offset = length;
+ if (value_kind_size_log2(elem_kind) != 0) {
+ __ emit_i32_shli(end_offset.gp(), length.gp(),
+ value_kind_size_log2(elem_kind));
+ }
+ __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
+
+ Label loop, done;
+ __ bind(&loop);
+ {
+ // This is subtle: {StoreObjectField} can request a temp register, which
+ // is precisely what {FREEZE_STATE} (with non-trivial live range) is
+ // supposed to guard against. In this case it's fine though, because we
+ // are explicitly requesting {unused_and_unpinned} here, therefore we know
+ // a register is available, so requesting a temp register won't actually
+ // cause any state changes.
+ // TODO(jkummerow): See if we can make this more elegant, e.g. by passing
+ // a temp register to {StoreObjectField}.
+ LiftoffRegister unused_and_unpinned = __ GetUnusedRegister(pinned);
+ USE(unused_and_unpinned);
+ FREEZE_STATE(in_this_case_its_fine);
+ __ emit_cond_jump(kUnsignedGreaterThanEqual, &done, kI32, offset.gp(),
+ end_offset.gp(), in_this_case_its_fine);
+ }
+ StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind,
+ skip_write_barrier);
+ __ emit_i32_addi(offset.gp(), offset.gp(), value_kind_size(elem_kind));
+ __ emit_jump(&loop);
+
+ __ bind(&done);
+ }
+
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
}
@@ -7662,17 +8089,30 @@ class LiftoffCompiler {
__ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
}
- Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
+ // Return a register holding the instance, populating the "cached instance"
+ // register if possible. If no free register is available, the cache is not
+ // set and we use {fallback} instead. This can be freely overwritten by the
+ // caller then.
+ V8_INLINE Register LoadInstanceIntoRegister(LiftoffRegList pinned,
+ Register fallback) {
Register instance = __ cache_state()->cached_instance;
- if (instance == no_reg) {
- instance = __ cache_state()->TrySetCachedInstanceRegister(
- pinned | LiftoffRegList{fallback});
- if (instance == no_reg) instance = fallback;
- __ LoadInstanceFromFrame(instance);
+ if (V8_UNLIKELY(instance == no_reg)) {
+ instance = LoadInstanceIntoRegister_Slow(pinned, fallback);
}
return instance;
}
+ V8_NOINLINE V8_PRESERVE_MOST Register
+ LoadInstanceIntoRegister_Slow(LiftoffRegList pinned, Register fallback) {
+ DCHECK_EQ(no_reg, __ cache_state()->cached_instance);
+ SCOPED_CODE_COMMENT("load instance");
+ Register instance = __ cache_state()->TrySetCachedInstanceRegister(
+ pinned | LiftoffRegList{fallback});
+ if (instance == no_reg) instance = fallback;
+ __ LoadInstanceFromFrame(instance);
+ return instance;
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
// MVP:
@@ -7700,7 +8140,7 @@ class LiftoffCompiler {
// Zone used to store information during compilation. The result will be
// stored independently, such that this zone can die together with the
// LiftoffCompiler after compilation.
- Zone* compilation_zone_;
+ Zone* zone_;
SafepointTableBuilder safepoint_table_builder_;
// The pc offset of the instructions to reserve the stack frame. Needed to
// patch the actually needed stack size in the end.
@@ -7770,6 +8210,8 @@ WasmCompilationResult ExecuteLiftoffCompilation(
CompilationEnv* env, const FunctionBody& func_body,
const LiftoffOptions& compiler_options) {
DCHECK(compiler_options.is_initialized());
+ // Liftoff does not validate the code, so that should have run before.
+ DCHECK(env->module->function_was_validated(compiler_options.func_index));
base::TimeTicks start_time;
if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) {
start_time = base::TimeTicks::Now();
@@ -7790,7 +8232,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
compiler_options.for_debugging == kForDebugging);
WasmFeatures unused_detected_features;
- WasmFullDecoder<Decoder::BooleanValidationTag, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::NoValidationTag, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features,
compiler_options.detected_features ? compiler_options.detected_features
: &unused_detected_features,
@@ -7827,6 +8269,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
result.func_index = compiler_options.func_index;
result.result_tier = ExecutionTier::kLiftoff;
result.for_debugging = compiler_options.for_debugging;
+ result.frame_has_feedback_slot = env->enabled_features.has_inlining();
if (auto* debug_sidetable = compiler_options.debug_sidetable) {
*debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
}
@@ -7843,7 +8286,6 @@ WasmCompilationResult ExecuteLiftoffCompilation(
}
DCHECK(result.succeeded());
- env->module->set_function_validated(compiler_options.func_index);
return result;
}
@@ -7870,7 +8312,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
code->for_debugging() == kForStepping
? base::ArrayVector(kSteppingBreakpoints)
: base::Vector<const int>{};
- WasmFullDecoder<Decoder::BooleanValidationTag, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::NoValidationTag, LiftoffCompiler> decoder(
&zone, native_module->module(), env.enabled_features, &detected,
func_body, call_descriptor, &env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
@@ -7885,14 +8327,4 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
return debug_sidetable_builder.GenerateDebugSideTable();
}
-#undef __
-#undef TRACE
-#undef WASM_INSTANCE_OBJECT_FIELD_OFFSET
-#undef WASM_INSTANCE_OBJECT_FIELD_SIZE
-#undef LOAD_INSTANCE_FIELD
-#undef LOAD_TAGGED_PTR_INSTANCE_FIELD
-#undef CODE_COMMENT
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 13d332d8f0..a4e3c96566 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -56,7 +56,7 @@ enum LiftoffBailoutReason : int8_t {
struct LiftoffOptions {
int func_index = -1;
- ForDebugging for_debugging = kNoDebugging;
+ ForDebugging for_debugging = kNotForDebugging;
Counters* counters = nullptr;
AssemblerBufferCache* assembler_buffer_cache = nullptr;
WasmFeatures* detected_features = nullptr;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index ea7eaec235..8818b1078d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -54,25 +54,35 @@ static inline constexpr bool needs_fp_reg_pair(ValueKind kind) {
}
static inline constexpr RegClass reg_class_for(ValueKind kind) {
- switch (kind) {
- case kF32:
- case kF64:
- return kFpReg;
- case kI8:
- case kI16:
- case kI32:
- return kGpReg;
- case kI64:
- return kNeedI64RegPair ? kGpRegPair : kGpReg;
- case kS128:
- return kNeedS128RegPair ? kFpRegPair : kFpReg;
- case kRef:
- case kRefNull:
- case kRtt:
- return kGpReg;
- default:
- return kNoReg; // unsupported kind
- }
+ // Statically generate an array that we use for lookup at runtime.
+ constexpr size_t kNumValueKinds = static_cast<size_t>(kBottom);
+ constexpr auto kRegClasses =
+ base::make_array<kNumValueKinds>([](std::size_t kind) {
+ switch (kind) {
+ case kF32:
+ case kF64:
+ return kFpReg;
+ case kI8:
+ case kI16:
+ case kI32:
+ return kGpReg;
+ case kI64:
+ return kNeedI64RegPair ? kGpRegPair : kGpReg;
+ case kS128:
+ return kNeedS128RegPair ? kFpRegPair : kFpReg;
+ case kRef:
+ case kRefNull:
+ case kRtt:
+ return kGpReg;
+ case kVoid:
+ return kNoReg; // unsupported kind
+ }
+ CONSTEXPR_UNREACHABLE();
+ });
+ V8_ASSUME(kind < kNumValueKinds);
+ RegClass rc = kRegClasses[kind];
+ V8_ASSUME(rc != kNoReg);
+ return rc;
}
// Description of LiftoffRegister code encoding.
@@ -446,13 +456,13 @@ class LiftoffRegList {
}
LiftoffRegister GetFirstRegSet() const {
- DCHECK(!is_empty());
+ V8_ASSUME(regs_ != 0);
int first_code = base::bits::CountTrailingZeros(regs_);
return LiftoffRegister::from_liftoff_code(first_code);
}
LiftoffRegister GetLastRegSet() const {
- DCHECK(!is_empty());
+ V8_ASSUME(regs_ != 0);
int last_code =
8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
return LiftoffRegister::from_liftoff_code(last_code);
@@ -526,7 +536,10 @@ LiftoffRegList::Iterator LiftoffRegList::end() const {
}
static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
- return rc == kFpReg ? kFpCacheRegList : kGpCacheRegList;
+ V8_ASSUME(rc == kFpReg || rc == kGpReg);
+ static_assert(kGpReg == 0 && kFpReg == 1);
+ constexpr LiftoffRegList kRegLists[2]{kGpCacheRegList, kFpCacheRegList};
+ return kRegLists[rc];
}
inline std::ostream& operator<<(std::ostream& os, LiftoffRegList reglist) {
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
index 4dc1086fca..dbc2b35df4 100644
--- a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -16,31 +16,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- return lt;
- case kSignedLessEqual:
- return le;
- case kSignedGreaterThan:
- return gt;
- case kSignedGreaterEqual:
- return ge;
- case kUnsignedLessThan:
- return ult;
- case kUnsignedLessEqual:
- return ule;
- case kUnsignedGreaterThan:
- return ugt;
- case kUnsignedGreaterEqual:
- return uge;
- }
-}
-
// Liftoff Frames.
//
// slot Frame
@@ -234,20 +209,21 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(
+ MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@@ -334,20 +310,19 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return kind == kS128 || is_reference(kind);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
break;
case kI64:
- TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i64()));
break;
case kF32:
- TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
case kF64:
- TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
UNREACHABLE();
@@ -379,8 +354,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int32_t offset) {
- static_assert(kTaggedSize == kSystemPointerSize);
- Ld_d(dst, MemOperand(instance, offset));
+ LoadTaggedField(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -392,11 +366,10 @@ void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm, bool needs_shift) {
- static_assert(kTaggedSize == kInt64Size);
- unsigned shift_amount = !needs_shift ? 0 : 3;
+ unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
false, shift_amount);
- Ld_d(dst, src_op);
+ LoadTaggedField(dst, src_op);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
@@ -423,19 +396,16 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
offset_op = Operand(effective_offset);
}
if (offset_op.is_reg()) {
- St_d(src.gp(), MemOperand(dst_addr, offset_op.rm()));
+ StoreTaggedField(src.gp(), MemOperand(dst_addr, offset_op.rm()));
} else {
- St_d(src.gp(), MemOperand(dst_addr, offset_imm));
+ StoreTaggedField(src.gp(), MemOperand(dst_addr, offset_imm));
}
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
- Label write_barrier;
Label exit;
- CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
- &write_barrier);
- b(&exit);
- bind(&write_barrier);
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask,
+ kZero, &exit);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
@@ -465,27 +435,27 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
- TurboAssembler::Ld_hu(dst.gp(), src_op);
+ MacroAssembler::Ld_hu(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
case LoadType::kI64Load16S:
- TurboAssembler::Ld_h(dst.gp(), src_op);
+ MacroAssembler::Ld_h(dst.gp(), src_op);
break;
case LoadType::kI64Load32U:
- TurboAssembler::Ld_wu(dst.gp(), src_op);
+ MacroAssembler::Ld_wu(dst.gp(), src_op);
break;
case LoadType::kI32Load:
case LoadType::kI64Load32S:
- TurboAssembler::Ld_w(dst.gp(), src_op);
+ MacroAssembler::Ld_w(dst.gp(), src_op);
break;
case LoadType::kI64Load:
- TurboAssembler::Ld_d(dst.gp(), src_op);
+ MacroAssembler::Ld_d(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::Fld_s(dst.fp(), src_op);
+ MacroAssembler::Fld_s(dst.fp(), src_op);
break;
case LoadType::kF64Load:
- TurboAssembler::Fld_d(dst.fp(), src_op);
+ MacroAssembler::Fld_d(dst.fp(), src_op);
break;
case LoadType::kS128Load:
UNREACHABLE();
@@ -511,20 +481,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
- TurboAssembler::St_h(src.gp(), dst_op);
+ MacroAssembler::St_h(src.gp(), dst_op);
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
- TurboAssembler::St_w(src.gp(), dst_op);
+ MacroAssembler::St_w(src.gp(), dst_op);
break;
case StoreType::kI64Store:
- TurboAssembler::St_d(src.gp(), dst_op);
+ MacroAssembler::St_d(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::Fst_s(src.fp(), dst_op);
+ MacroAssembler::Fst_s(src.fp(), dst_op);
break;
case StoreType::kF64Store:
- TurboAssembler::Fst_d(src.fp(), dst_op);
+ MacroAssembler::Fst_d(src.fp(), dst_op);
break;
case StoreType::kS128Store:
UNREACHABLE();
@@ -536,9 +506,11 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList pinned) {
+ LoadType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
- MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, i64_offset);
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U: {
@@ -574,9 +546,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8: {
@@ -624,43 +598,44 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
dbar(0); \
} while (0)
-#define ATOMIC_BINOP_CASE(name, inst32, inst64, opcode) \
- void LiftoffAssembler::Atomic##name( \
- Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
- LiftoffRegister value, LiftoffRegister result, StoreType type) { \
- LiftoffRegList pinned{dst_addr, offset_reg, value, result}; \
- Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- MemOperand dst_op = \
- liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); \
- Add_d(temp0, dst_op.base(), dst_op.offset()); \
- switch (type.value()) { \
- case StoreType::kI64Store8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, inst64, 7); \
- break; \
- case StoreType::kI32Store8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, inst32, 3); \
- break; \
- case StoreType::kI64Store16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, inst64, 7); \
- break; \
- case StoreType::kI32Store16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, inst32, 3); \
- break; \
- case StoreType::kI64Store32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, inst64, 7); \
- break; \
- case StoreType::kI32Store: \
- am##opcode##_db_w(result.gp(), value.gp(), temp0); \
- break; \
- case StoreType::kI64Store: \
- am##opcode##_db_d(result.gp(), value.gp(), temp0); \
- break; \
- default: \
- UNREACHABLE(); \
- } \
+#define ATOMIC_BINOP_CASE(name, inst32, inst64, opcode) \
+ void LiftoffAssembler::Atomic##name( \
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
+ LiftoffRegister value, LiftoffRegister result, StoreType type, \
+ bool i64_offset) { \
+ LiftoffRegList pinned{dst_addr, offset_reg, value, result}; \
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ MemOperand dst_op = \
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset); \
+ Add_d(temp0, dst_op.base(), dst_op.offset()); \
+ switch (type.value()) { \
+ case StoreType::kI64Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, inst64, 7); \
+ break; \
+ case StoreType::kI32Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, inst32, 3); \
+ break; \
+ case StoreType::kI64Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, inst64, 7); \
+ break; \
+ case StoreType::kI32Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, inst32, 3); \
+ break; \
+ case StoreType::kI64Store32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, inst64, 7); \
+ break; \
+ case StoreType::kI32Store: \
+ am##opcode##_db_w(result.gp(), value.gp(), temp0); \
+ break; \
+ case StoreType::kI64Store: \
+ am##opcode##_db_d(result.gp(), value.gp(), temp0); \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
}
ATOMIC_BINOP_CASE(Add, Add_w, Add_d, add)
@@ -682,13 +657,15 @@ ATOMIC_BINOP_CASE(Xor, Xor, Xor, xor)
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
LiftoffRegList pinned{dst_addr, offset_reg, value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
Add_d(temp0, dst_op.base(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
@@ -740,12 +717,14 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
LiftoffRegList pinned{dst_addr, offset_reg, value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
Add_d(temp0, dst_op.base(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
@@ -815,12 +794,13 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool i64_offset) {
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
Add_d(temp0, dst_op.base(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
@@ -901,14 +881,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
if (kind != kS128) {
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
} else {
UNREACHABLE();
}
@@ -931,7 +911,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
Fst_s(reg.fp(), dst);
break;
case kF64:
- TurboAssembler::Fst_d(reg.fp(), dst);
+ MacroAssembler::Fst_d(reg.fp(), dst);
break;
case kS128:
UNREACHABLE();
@@ -948,7 +928,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kI32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- TurboAssembler::li(scratch, Operand(value.to_i32()));
+ MacroAssembler::li(scratch, Operand(value.to_i32()));
St_w(scratch, dst);
break;
}
@@ -957,7 +937,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kRefNull: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- TurboAssembler::li(scratch, value.to_i64());
+ MacroAssembler::li(scratch, value.to_i64());
St_d(scratch, dst);
break;
}
@@ -985,7 +965,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
Fld_s(reg.fp(), src);
break;
case kF64:
- TurboAssembler::Fld_d(reg.fp(), src);
+ MacroAssembler::Fld_d(reg.fp(), src);
break;
case kS128:
UNREACHABLE();
@@ -1037,65 +1017,72 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
}
void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
- TurboAssembler::Clz_d(dst.gp(), src.gp());
+ MacroAssembler::Clz_d(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
- TurboAssembler::Ctz_d(dst.gp(), src.gp());
+ MacroAssembler::Ctz_d(dst.gp(), src.gp());
}
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt_d(dst.gp(), src.gp());
+ MacroAssembler::Popcnt_d(dst.gp(), src.gp());
return true;
}
void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- SmiUntag(scratch, MemOperand(dst.gp(), offset));
- Add_d(scratch, scratch, Operand(1));
- SmiTag(scratch);
- St_d(scratch, MemOperand(dst.gp(), offset));
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(SmiValuesAre31Bits());
+ Ld_w(scratch, MemOperand(dst.gp(), offset));
+ Add_w(scratch, scratch, Operand(Smi::FromInt(1)));
+ St_w(scratch, MemOperand(dst.gp(), offset));
+ } else {
+ SmiUntag(scratch, MemOperand(dst.gp(), offset));
+ Add_d(scratch, scratch, Operand(1));
+ SmiTag(scratch);
+ St_d(scratch, MemOperand(dst.gp(), offset));
+ }
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
- TurboAssembler::Mul_w(dst, lhs, rhs);
+ MacroAssembler::Mul_w(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
- TurboAssembler::li(kScratchReg, 1);
- TurboAssembler::li(kScratchReg2, 1);
- TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
- TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ MacroAssembler::li(kScratchReg, 1);
+ MacroAssembler::li(kScratchReg2, 1);
+ MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
add_d(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Div_w(dst, lhs, rhs);
+ MacroAssembler::Div_w(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Div_wu(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Div_wu(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Mod_w(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Mod_w(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Mod_wu(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Mod_wu(dst, lhs, rhs);
}
#define I32_BINOP(name, instruction) \
@@ -1131,15 +1118,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
- TurboAssembler::Clz_w(dst, src);
+ MacroAssembler::Clz_w(dst, src);
}
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
- TurboAssembler::Ctz_w(dst, src);
+ MacroAssembler::Ctz_w(dst, src);
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt_w(dst, src);
+ MacroAssembler::Popcnt_w(dst, src);
return true;
}
@@ -1164,55 +1151,55 @@ I32_SHIFTOP_I(shr, srl_w, srli_w)
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
- TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
+ MacroAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
}
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
// Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
- TurboAssembler::li(kScratchReg, 1);
- TurboAssembler::li(kScratchReg2, 1);
- TurboAssembler::LoadZeroOnCondition(
+ MacroAssembler::li(kScratchReg, 1);
+ MacroAssembler::li(kScratchReg2, 1);
+ MacroAssembler::LoadZeroOnCondition(
kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
- TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
add_d(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
@@ -1270,32 +1257,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_s(dst, src);
+ MacroAssembler::Neg_s(dst, src);
}
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_d(dst, src);
+ MacroAssembler::Neg_d(dst, src);
}
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ MacroAssembler::Float32Min(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs);
bind(&done);
}
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ MacroAssembler::Float32Max(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
bind(&done);
}
@@ -1307,22 +1294,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ MacroAssembler::Float64Min(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs);
bind(&done);
}
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ MacroAssembler::Float64Max(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
bind(&done);
}
@@ -1376,7 +1363,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) {
switch (opcode) {
case kExprI32ConvertI64:
- TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
+ MacroAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
return true;
case kExprI32SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@@ -1384,20 +1371,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_s(rounded.fp(), src.fp());
ftintrz_w_s(kScratchDoubleReg, rounded.fp());
movfr2gr_s(dst.gp(), kScratchDoubleReg);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
- TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
- TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
- TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+ MacroAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap.
movgr2fr_w(kScratchDoubleReg, dst.gp());
ffint_s_w(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32UConvertF32: {
@@ -1406,18 +1393,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_s(rounded.fp(), src.fp());
- TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ MacroAssembler::Trunc_s(rounded.fp(), src.fp());
+ MacroAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
- TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
- TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+ MacroAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
// Checking if trap.
- TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
fcvt_s_d(converted_back.fp(), converted_back.fp());
- TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32SConvertF64: {
@@ -1426,14 +1413,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_d(rounded.fp(), src.fp());
ftintrz_w_d(kScratchDoubleReg, rounded.fp());
movfr2gr_s(dst.gp(), kScratchDoubleReg);
// Checking if trap.
ffint_d_w(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32UConvertF64: {
@@ -1442,23 +1429,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_d(rounded.fp(), src.fp());
- TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ MacroAssembler::Trunc_d(rounded.fp(), src.fp());
+ MacroAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Checking if trap.
- TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
- TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32ReinterpretF32:
- TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ MacroAssembler::FmoveLow(dst.gp(), src.fp());
return true;
case kExprI64SConvertI32:
slli_w(dst.gp(), src.gp(), 0);
return true;
case kExprI64UConvertI32:
- TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
+ MacroAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
return true;
case kExprI64SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@@ -1466,29 +1453,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_s(rounded.fp(), src.fp());
ftintrz_l_s(kScratchDoubleReg, rounded.fp());
movfr2gr_d(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection.
- TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
- TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
- TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+ MacroAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap.
movgr2fr_d(kScratchDoubleReg, dst.gp());
ffint_s_l(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI64UConvertF32: {
// Real conversion.
- TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ MacroAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg);
// Checking if trap.
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true;
}
case kExprI64SConvertF64: {
@@ -1497,29 +1484,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_d(rounded.fp(), src.fp());
ftintrz_l_d(kScratchDoubleReg, rounded.fp());
movfr2gr_d(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection.
- TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
- TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
- TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+ MacroAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap.
movgr2fr_d(kScratchDoubleReg, dst.gp());
ffint_d_l(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI64UConvertF64: {
// Real conversion.
- TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ MacroAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg);
// Checking if trap.
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true;
}
case kExprI64ReinterpretF64:
@@ -1532,13 +1519,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprF32UConvertI32:
- TurboAssembler::Ffint_s_uw(dst.fp(), src.gp());
+ MacroAssembler::Ffint_s_uw(dst.fp(), src.gp());
return true;
case kExprF32ConvertF64:
fcvt_s_d(dst.fp(), src.fp());
return true;
case kExprF32ReinterpretI32:
- TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ MacroAssembler::FmoveLow(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
@@ -1547,7 +1534,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprF64UConvertI32:
- TurboAssembler::Ffint_d_uw(dst.fp(), src.gp());
+ MacroAssembler::Ffint_d_uw(dst.fp(), src.gp());
return true;
case kExprF64ConvertF32:
fcvt_d_s(dst.fp(), src.fp());
@@ -1562,7 +1549,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
@@ -1576,7 +1563,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
@@ -1590,7 +1577,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg);
@@ -1604,7 +1591,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg);
@@ -1640,109 +1627,120 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
}
void LiftoffAssembler::emit_jump(Label* label) {
- TurboAssembler::Branch(label);
+ MacroAssembler::Branch(label);
}
void LiftoffAssembler::emit_jump(Register target) {
- TurboAssembler::Jump(target);
+ MacroAssembler::Jump(target);
}
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK(kind == kI32 || kind == kI64);
- TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ if (kind == kI32) {
+ UseScratchRegisterScope temps(this);
+ Register scratch0 = temps.Acquire();
+ slli_w(scratch0, lhs, 0);
+ MacroAssembler::Branch(label, cond, scratch0, Operand(zero_reg));
+ } else {
+ DCHECK(kind == kI64);
+ MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
} else {
- DCHECK((kind == kI32 || kind == kI64) ||
- (is_reference(kind) &&
- (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
- TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ if (kind == kI64) {
+ MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ DCHECK((kind == kI32) || (kind == kRtt) || (kind == kRef) ||
+ (kind == kRefNull));
+ MacroAssembler::CompareTaggedAndBranch(label, cond, lhs, Operand(rhs));
+ }
}
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+ MacroAssembler::CompareTaggedAndBranch(label, cond, lhs, Operand(imm));
}
void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) {
- TurboAssembler::Sub_d(value, value, Operand(subtrahend));
- TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg));
+ MacroAssembler::Sub_w(value, value, Operand(subtrahend));
+ MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg));
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- sltui(dst, src, 1);
+ slli_w(dst, src, 0);
+ sltui(dst, dst, 1);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
Register tmp = dst;
if (dst == lhs || dst == rhs) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
- TurboAssembler::li(tmp, 1);
+ MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond);
- TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+ UseScratchRegisterScope temps(this);
+ Register scratch0 = temps.Acquire();
+ Register scratch1 = kScratchReg;
- // If tmp != dst, result will be moved.
- TurboAssembler::Move(dst, tmp);
+ slli_w(scratch0, lhs, 0);
+ slli_w(scratch1, rhs, 0);
+ // Write 1 as result.
+ MacroAssembler::li(dst, 1);
+ MacroAssembler::LoadZeroOnCondition(dst, scratch0, Operand(scratch1),
+ neg_cond);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
sltui(dst, src.gp(), 1);
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs.gp() || dst == rhs.gp()) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
- TurboAssembler::li(tmp, 1);
+ MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond);
- TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
neg_cond);
// If tmp != dst, result will be moved.
- TurboAssembler::Move(dst, tmp);
+ MacroAssembler::Move(dst, tmp);
}
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
bool* predicate) {
switch (condition) {
case kEqual:
*predicate = true;
return CEQ;
- case kUnequal:
+ case kNotEqual:
*predicate = false;
return CEQ;
case kUnsignedLessThan:
*predicate = true;
return CLT;
- case kUnsignedGreaterEqual:
+ case kUnsignedGreaterThanEqual:
*predicate = false;
return CLT;
- case kUnsignedLessEqual:
+ case kUnsignedLessThanEqual:
*predicate = true;
return CLE;
case kUnsignedGreaterThan:
@@ -1757,63 +1755,59 @@ inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
- TurboAssembler::CompareIsNanF32(lhs, rhs);
- TurboAssembler::BranchFalseF(&not_nan);
+ MacroAssembler::CompareIsNanF32(lhs, rhs);
+ MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f32.ne, else 0.
if (cond == ne) {
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
} else {
- TurboAssembler::Move(dst, zero_reg);
+ MacroAssembler::Move(dst, zero_reg);
}
- TurboAssembler::Branch(&cont);
+ MacroAssembler::Branch(&cont);
bind(&not_nan);
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond =
- liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
- TurboAssembler::CompareF32(lhs, rhs, fcond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ MacroAssembler::CompareF32(lhs, rhs, fcond);
if (predicate) {
- TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else {
- TurboAssembler::LoadZeroIfFPUCondition(dst);
+ MacroAssembler::LoadZeroIfFPUCondition(dst);
}
bind(&cont);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
- TurboAssembler::CompareIsNanF64(lhs, rhs);
- TurboAssembler::BranchFalseF(&not_nan);
+ MacroAssembler::CompareIsNanF64(lhs, rhs);
+ MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f64.ne, else 0.
if (cond == ne) {
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
} else {
- TurboAssembler::Move(dst, zero_reg);
+ MacroAssembler::Move(dst, zero_reg);
}
- TurboAssembler::Branch(&cont);
+ MacroAssembler::Branch(&cont);
bind(&not_nan);
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond =
- liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
- TurboAssembler::CompareF64(lhs, rhs, fcond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ MacroAssembler::CompareF64(lhs, rhs, fcond);
if (predicate) {
- TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else {
- TurboAssembler::LoadZeroIfFPUCondition(dst);
+ MacroAssembler::LoadZeroIfFPUCondition(dst);
}
bind(&cont);
@@ -1847,14 +1841,16 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
bailout(kSimd, "loadlane");
}
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
bailout(kSimd, "storelane");
}
@@ -3026,8 +3022,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
- TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+ MacroAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
+ MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -3061,7 +3057,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
+ MacroAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
fp_regs.clear(reg);
offset += slot_size;
}
@@ -3074,7 +3070,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
unsigned fp_offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
+ MacroAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
fp_regs.clear(reg);
fp_offset += 8;
}
@@ -3193,7 +3189,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
addi_d(sp, sp, -size);
- TurboAssembler::Move(addr, sp);
+ MacroAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index f3b78299c8..96cba24c9d 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -16,31 +16,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- return lt;
- case kSignedLessEqual:
- return le;
- case kSignedGreaterThan:
- return gt;
- case kSignedGreaterEqual:
- return ge;
- case kUnsignedLessThan:
- return ult;
- case kUnsignedLessEqual:
- return ule;
- case kUnsignedGreaterThan:
- return ugt;
- case kUnsignedGreaterEqual:
- return uge;
- }
-}
-
// Liftoff Frames.
//
// slot Frame
@@ -201,19 +176,19 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
V8_FALLTHROUGH;
case LoadType::kI64Load32U:
- assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
+ assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
break;
case LoadType::kI32Load:
case LoadType::kI64Load32S:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break;
case LoadType::kI32Load16S:
case LoadType::kI64Load16S:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
- assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
break;
case LoadType::kF64Load:
is_float = true;
@@ -221,7 +196,7 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
V8_FALLTHROUGH;
case LoadType::kI64Load:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
break;
default:
UNREACHABLE();
@@ -256,10 +231,10 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
V8_FALLTHROUGH;
case StoreType::kI32Store:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break;
case StoreType::kI32Store16:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break;
case StoreType::kF64Store:
is_float = true;
@@ -267,13 +242,13 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
V8_FALLTHROUGH;
case StoreType::kI64Store:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
break;
case StoreType::kI64Store32:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break;
case StoreType::kI64Store16:
- assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break;
default:
UNREACHABLE();
@@ -352,20 +327,21 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(
+ MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@@ -450,20 +426,19 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return kind == kS128 || is_reference(kind);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
break;
case kI64:
- TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i64()));
break;
case kF32:
- TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
case kF64:
- TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
UNREACHABLE();
@@ -534,17 +509,12 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
- MemoryChunk::kPointersFromHereAreInterestingMask, ne,
- &write_barrier);
- Branch(&exit);
- bind(&write_barrier);
+ MemoryChunk::kPointersFromHereAreInterestingMask, kZero, &exit);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
- MemoryChunk::kPointersToHereAreInterestingMask, eq,
- &exit);
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
Daddu(scratch, dst_op.rm(), dst_op.offset());
CallRecordWriteStubSaveRegisters(dst_addr, scratch, SaveFPRegsMode::kSave,
StubCallMode::kCallWasmRuntimeStub);
@@ -572,30 +542,30 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
- TurboAssembler::Ulhu(dst.gp(), src_op);
+ MacroAssembler::Ulhu(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
case LoadType::kI64Load16S:
- TurboAssembler::Ulh(dst.gp(), src_op);
+ MacroAssembler::Ulh(dst.gp(), src_op);
break;
case LoadType::kI64Load32U:
- TurboAssembler::Ulwu(dst.gp(), src_op);
+ MacroAssembler::Ulwu(dst.gp(), src_op);
break;
case LoadType::kI32Load:
case LoadType::kI64Load32S:
- TurboAssembler::Ulw(dst.gp(), src_op);
+ MacroAssembler::Ulw(dst.gp(), src_op);
break;
case LoadType::kI64Load:
- TurboAssembler::Uld(dst.gp(), src_op);
+ MacroAssembler::Uld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ MacroAssembler::Ulwc1(dst.fp(), src_op, t8);
break;
case LoadType::kF64Load:
- TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ MacroAssembler::Uldc1(dst.fp(), src_op, t8);
break;
case LoadType::kS128Load:
- TurboAssembler::ld_b(dst.fp().toW(), src_op);
+ MacroAssembler::ld_b(dst.fp().toW(), src_op);
break;
default:
UNREACHABLE();
@@ -638,23 +608,23 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
- TurboAssembler::Ush(src.gp(), dst_op, t8);
+ MacroAssembler::Ush(src.gp(), dst_op, t8);
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
- TurboAssembler::Usw(src.gp(), dst_op);
+ MacroAssembler::Usw(src.gp(), dst_op);
break;
case StoreType::kI64Store:
- TurboAssembler::Usd(src.gp(), dst_op);
+ MacroAssembler::Usd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ MacroAssembler::Uswc1(src.fp(), dst_op, t8);
break;
case StoreType::kF64Store:
- TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ MacroAssembler::Usdc1(src.fp(), dst_op, t8);
break;
case StoreType::kS128Store:
- TurboAssembler::st_b(src.fp().toW(), dst_op);
+ MacroAssembler::st_b(src.fp().toW(), dst_op);
break;
default:
UNREACHABLE();
@@ -663,9 +633,11 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList pinned) {
+ LoadType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
- MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, i64_offset);
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U: {
@@ -701,9 +673,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8: {
@@ -763,43 +737,44 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
sync(); \
} while (0)
-#define ATOMIC_BINOP_CASE(name, inst32, inst64) \
- void LiftoffAssembler::Atomic##name( \
- Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
- LiftoffRegister value, LiftoffRegister result, StoreType type) { \
- LiftoffRegList pinned{dst_addr, offset_reg, value, result}; \
- Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
- MemOperand dst_op = \
- liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); \
- Daddu(temp0, dst_op.rm(), dst_op.offset()); \
- switch (type.value()) { \
- case StoreType::kI64Store8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 8, inst64, 7); \
- break; \
- case StoreType::kI32Store8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst32, 3); \
- break; \
- case StoreType::kI64Store16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 16, inst64, 7); \
- break; \
- case StoreType::kI32Store16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst32, 3); \
- break; \
- case StoreType::kI64Store32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 32, inst64, 7); \
- break; \
- case StoreType::kI32Store: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
- break; \
- case StoreType::kI64Store: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
- break; \
- default: \
- UNREACHABLE(); \
- } \
+#define ATOMIC_BINOP_CASE(name, inst32, inst64) \
+ void LiftoffAssembler::Atomic##name( \
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
+ LiftoffRegister value, LiftoffRegister result, StoreType type, \
+ bool i64_offset) { \
+ LiftoffRegList pinned{dst_addr, offset_reg, value, result}; \
+ Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
+ MemOperand dst_op = \
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset); \
+ Daddu(temp0, dst_op.rm(), dst_op.offset()); \
+ switch (type.value()) { \
+ case StoreType::kI64Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 8, inst64, 7); \
+ break; \
+ case StoreType::kI32Store8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst32, 3); \
+ break; \
+ case StoreType::kI64Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 16, inst64, 7); \
+ break; \
+ case StoreType::kI32Store16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst32, 3); \
+ break; \
+ case StoreType::kI64Store32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 32, inst64, 7); \
+ break; \
+ case StoreType::kI32Store: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case StoreType::kI64Store: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
}
ATOMIC_BINOP_CASE(Add, Addu, Daddu)
@@ -843,12 +818,14 @@ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
LiftoffRegList pinned{dst_addr, offset_reg, value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
Daddu(temp0, dst_op.rm(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
@@ -919,12 +896,13 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool i64_offset) {
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
Daddu(temp0, dst_op.rm(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
@@ -1004,16 +982,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
if (kind != kS128) {
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
} else {
- TurboAssembler::move_v(dst.toW(), src.toW());
+ MacroAssembler::move_v(dst.toW(), src.toW());
}
}
@@ -1034,10 +1012,10 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
Swc1(reg.fp(), dst);
break;
case kF64:
- TurboAssembler::Sdc1(reg.fp(), dst);
+ MacroAssembler::Sdc1(reg.fp(), dst);
break;
case kS128:
- TurboAssembler::st_b(reg.fp().toW(), dst);
+ MacroAssembler::st_b(reg.fp().toW(), dst);
break;
default:
UNREACHABLE();
@@ -1049,14 +1027,14 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case kI32: {
- TurboAssembler::li(kScratchReg, Operand(value.to_i32()));
+ MacroAssembler::li(kScratchReg, Operand(value.to_i32()));
Sw(kScratchReg, dst);
break;
}
case kI64:
case kRef:
case kRefNull: {
- TurboAssembler::li(kScratchReg, value.to_i64());
+ MacroAssembler::li(kScratchReg, value.to_i64());
Sd(kScratchReg, dst);
break;
}
@@ -1082,10 +1060,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
Lwc1(reg.fp(), src);
break;
case kF64:
- TurboAssembler::Ldc1(reg.fp(), src);
+ MacroAssembler::Ldc1(reg.fp(), src);
break;
case kS128:
- TurboAssembler::ld_b(reg.fp().toW(), src);
+ MacroAssembler::ld_b(reg.fp().toW(), src);
break;
default:
UNREACHABLE();
@@ -1134,16 +1112,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
}
void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
- TurboAssembler::Dclz(dst.gp(), src.gp());
+ MacroAssembler::Dclz(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
- TurboAssembler::Dctz(dst.gp(), src.gp());
+ MacroAssembler::Dctz(dst.gp(), src.gp());
}
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Dpopcnt(dst.gp(), src.gp());
+ MacroAssembler::Dpopcnt(dst.gp(), src.gp());
return true;
}
@@ -1157,42 +1135,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
- TurboAssembler::Mul(dst, lhs, rhs);
+ MacroAssembler::Mul(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
- TurboAssembler::li(kScratchReg, 1);
- TurboAssembler::li(kScratchReg2, 1);
- TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
- TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ MacroAssembler::li(kScratchReg, 1);
+ MacroAssembler::li(kScratchReg2, 1);
+ MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
daddu(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Div(dst, lhs, rhs);
+ MacroAssembler::Div(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Divu(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Divu(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Mod(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Mod(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Modu(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Modu(dst, lhs, rhs);
}
#define I32_BINOP(name, instruction) \
@@ -1228,15 +1206,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
- TurboAssembler::Clz(dst, src);
+ MacroAssembler::Clz(dst, src);
}
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
- TurboAssembler::Ctz(dst, src);
+ MacroAssembler::Ctz(dst, src);
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt(dst, src);
+ MacroAssembler::Popcnt(dst, src);
return true;
}
@@ -1261,55 +1239,55 @@ I32_SHIFTOP_I(shr, srl)
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
- TurboAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm));
+ MacroAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm));
}
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
// Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
- TurboAssembler::li(kScratchReg, 1);
- TurboAssembler::li(kScratchReg2, 1);
- TurboAssembler::LoadZeroOnCondition(
+ MacroAssembler::li(kScratchReg, 1);
+ MacroAssembler::li(kScratchReg2, 1);
+ MacroAssembler::LoadZeroOnCondition(
kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
- TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
daddu(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
@@ -1371,32 +1349,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_s(dst, src);
+ MacroAssembler::Neg_s(dst, src);
}
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_d(dst, src);
+ MacroAssembler::Neg_d(dst, src);
}
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ MacroAssembler::Float32Min(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs);
bind(&done);
}
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ MacroAssembler::Float32Max(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
bind(&done);
}
@@ -1427,22 +1405,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ MacroAssembler::Float64Min(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs);
bind(&done);
}
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
Label ool, done;
- TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ MacroAssembler::Float64Max(dst, lhs, rhs, &ool);
Branch(&done);
bind(&ool);
- TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
bind(&done);
}
@@ -1515,7 +1493,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) {
switch (opcode) {
case kExprI32ConvertI64:
- TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32);
+ MacroAssembler::Ext(dst.gp(), src.gp(), 0, 32);
return true;
case kExprI32SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@@ -1523,20 +1501,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_s_s(rounded.fp(), src.fp());
trunc_w_s(kScratchDoubleReg, rounded.fp());
mfc1(dst.gp(), kScratchDoubleReg);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
- TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
- TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
- TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+ MacroAssembler::Addu(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap.
mtc1(dst.gp(), kScratchDoubleReg);
cvt_s_w(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32UConvertF32: {
@@ -1545,18 +1523,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
- TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ MacroAssembler::Trunc_s_s(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
- TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
- TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+ MacroAssembler::Addu(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
// Checking if trap.
- TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
+ MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
cvt_s_d(converted_back.fp(), converted_back.fp());
- TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32SConvertF64: {
@@ -1565,14 +1543,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_d_d(rounded.fp(), src.fp());
trunc_w_d(kScratchDoubleReg, rounded.fp());
mfc1(dst.gp(), kScratchDoubleReg);
// Checking if trap.
cvt_d_w(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32UConvertF64: {
@@ -1581,23 +1559,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
- TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ MacroAssembler::Trunc_d_d(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Checking if trap.
- TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
- TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
+ MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI32ReinterpretF32:
- TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ MacroAssembler::FmoveLow(dst.gp(), src.fp());
return true;
case kExprI64SConvertI32:
sll(dst.gp(), src.gp(), 0);
return true;
case kExprI64UConvertI32:
- TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32);
+ MacroAssembler::Dext(dst.gp(), src.gp(), 0, 32);
return true;
case kExprI64SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@@ -1605,29 +1583,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_s_s(rounded.fp(), src.fp());
trunc_l_s(kScratchDoubleReg, rounded.fp());
dmfc1(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection.
- TurboAssembler::Daddu(kScratchReg, dst.gp(), 1);
- TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
- TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+ MacroAssembler::Daddu(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap.
dmtc1(dst.gp(), kScratchDoubleReg);
cvt_s_l(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI64UConvertF32: {
// Real conversion.
- TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ MacroAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg);
// Checking if trap.
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true;
}
case kExprI64SConvertF64: {
@@ -1636,29 +1614,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion.
- TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
+ MacroAssembler::Trunc_d_d(rounded.fp(), src.fp());
trunc_l_d(kScratchDoubleReg, rounded.fp());
dmfc1(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection.
- TurboAssembler::Daddu(kScratchReg, dst.gp(), 1);
- TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
- TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+ MacroAssembler::Daddu(kScratchReg, dst.gp(), 1);
+ MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap.
dmtc1(dst.gp(), kScratchDoubleReg);
cvt_d_l(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
+ MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
+ MacroAssembler::BranchFalseF(trap);
return true;
}
case kExprI64UConvertF64: {
// Real conversion.
- TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ MacroAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg);
// Checking if trap.
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true;
}
case kExprI64ReinterpretF64:
@@ -1671,13 +1649,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprF32UConvertI32:
- TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
+ MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
return true;
case kExprF32ConvertF64:
cvt_s_d(dst.fp(), src.fp());
return true;
case kExprF32ReinterpretI32:
- TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ MacroAssembler::FmoveLow(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
@@ -1686,7 +1664,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprF64UConvertI32:
- TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
+ MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
return true;
case kExprF64ConvertF32:
cvt_d_s(dst.fp(), src.fp());
@@ -1705,7 +1683,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF32(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
- TurboAssembler::Move(
+ MacroAssembler::Move(
kScratchDoubleReg,
static_cast<float>(std::numeric_limits<int32_t>::min()));
CompareF32(OLT, src.fp(), kScratchDoubleReg);
@@ -1719,7 +1697,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
@@ -1736,7 +1714,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF64(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
- TurboAssembler::Move(
+ MacroAssembler::Move(
kScratchDoubleReg,
static_cast<double>(std::numeric_limits<int32_t>::min()));
CompareF64(OLT, src.fp(), kScratchDoubleReg);
@@ -1750,7 +1728,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
@@ -1767,7 +1745,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF32(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
- TurboAssembler::Move(
+ MacroAssembler::Move(
kScratchDoubleReg,
static_cast<float>(std::numeric_limits<int64_t>::min()));
CompareF32(OLT, src.fp(), kScratchDoubleReg);
@@ -1781,7 +1759,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
@@ -1798,7 +1776,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF64(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
- TurboAssembler::Move(
+ MacroAssembler::Move(
kScratchDoubleReg,
static_cast<double>(std::numeric_limits<int64_t>::min()));
CompareF64(OLT, src.fp(), kScratchDoubleReg);
@@ -1812,7 +1790,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
- TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
+ MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
@@ -1848,109 +1826,102 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
}
void LiftoffAssembler::emit_jump(Label* label) {
- TurboAssembler::Branch(label);
+ MacroAssembler::Branch(label);
}
void LiftoffAssembler::emit_jump(Register target) {
- TurboAssembler::Jump(target);
+ MacroAssembler::Jump(target);
}
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
DCHECK(kind == kI32 || kind == kI64);
- TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
DCHECK((kind == kI32 || kind == kI64) ||
- (is_reference(kind) &&
- (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
- TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
+ MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
}
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+ MacroAssembler::Branch(label, cond, lhs, Operand(imm));
}
void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) {
- TurboAssembler::Dsubu(value, value, Operand(subtrahend));
- TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg));
+ MacroAssembler::Dsubu(value, value, Operand(subtrahend));
+ MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg));
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
sltiu(dst, src, 1);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
Register tmp = dst;
if (dst == lhs || dst == rhs) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
- TurboAssembler::li(tmp, 1);
+ MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond);
- TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+ MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
// If tmp != dst, result will be moved.
- TurboAssembler::Move(dst, tmp);
+ MacroAssembler::Move(dst, tmp);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
sltiu(dst, src.gp(), 1);
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs.gp() || dst == rhs.gp()) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
}
// Write 1 as result.
- TurboAssembler::li(tmp, 1);
+ MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond);
- TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
neg_cond);
// If tmp != dst, result will be moved.
- TurboAssembler::Move(dst, tmp);
+ MacroAssembler::Move(dst, tmp);
}
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
bool* predicate) {
switch (condition) {
case kEqual:
*predicate = true;
return EQ;
- case kUnequal:
+ case kNotEqual:
*predicate = false;
return EQ;
case kUnsignedLessThan:
*predicate = true;
return OLT;
- case kUnsignedGreaterEqual:
+ case kUnsignedGreaterThanEqual:
*predicate = false;
return OLT;
- case kUnsignedLessEqual:
+ case kUnsignedLessThanEqual:
*predicate = true;
return OLE;
case kUnsignedGreaterThan:
@@ -1985,63 +1956,59 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
- TurboAssembler::CompareIsNanF32(lhs, rhs);
- TurboAssembler::BranchFalseF(&not_nan);
+ MacroAssembler::CompareIsNanF32(lhs, rhs);
+ MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f32.ne, else 0.
if (cond == ne) {
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
} else {
- TurboAssembler::Move(dst, zero_reg);
+ MacroAssembler::Move(dst, zero_reg);
}
- TurboAssembler::Branch(&cont);
+ MacroAssembler::Branch(&cont);
bind(&not_nan);
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond =
- liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
- TurboAssembler::CompareF32(fcond, lhs, rhs);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ MacroAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
- TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else {
- TurboAssembler::LoadZeroIfFPUCondition(dst);
+ MacroAssembler::LoadZeroIfFPUCondition(dst);
}
bind(&cont);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
- TurboAssembler::CompareIsNanF64(lhs, rhs);
- TurboAssembler::BranchFalseF(&not_nan);
+ MacroAssembler::CompareIsNanF64(lhs, rhs);
+ MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f64.ne, else 0.
if (cond == ne) {
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
} else {
- TurboAssembler::Move(dst, zero_reg);
+ MacroAssembler::Move(dst, zero_reg);
}
- TurboAssembler::Branch(&cont);
+ MacroAssembler::Branch(&cont);
bind(&not_nan);
- TurboAssembler::li(dst, 1);
+ MacroAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond =
- liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
- TurboAssembler::CompareF64(fcond, lhs, rhs);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ MacroAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
- TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else {
- TurboAssembler::LoadZeroIfFPUCondition(dst);
+ MacroAssembler::LoadZeroIfFPUCondition(dst);
}
bind(&cont);
@@ -2135,21 +2102,25 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
- MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
+ MemOperand src_op =
+ liftoff::GetMemOp(this, addr, offset_reg, offset_imm, i64_offset);
*protected_load_pc = pc_offset();
LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
- TurboAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op);
+ MacroAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op);
}
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
- MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst, offset, offset_imm, i64_offset);
if (protected_store_pc) *protected_store_pc = pc_offset();
LoadStoreLaneParams store_params(type.mem_rep(), lane);
- TurboAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op);
+ MacroAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op);
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -2256,25 +2227,25 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::FmoveLow(kScratchReg, src.fp());
+ MacroAssembler::FmoveLow(kScratchReg, src.fp());
fill_w(dst.fp().toW(), kScratchReg);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Move(kScratchReg, src.fp());
+ MacroAssembler::Move(kScratchReg, src.fp());
fill_d(dst.fp().toW(), kScratchReg);
}
#define SIMD_BINOP(name1, name2, type) \
void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
- TurboAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \
+ MacroAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \
src2.fp().toW()); \
} \
void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
- TurboAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \
+ MacroAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \
src2.fp().toW()); \
}
@@ -2292,7 +2263,7 @@ SIMD_BINOP(i64x2, i32x4_u, MSAU32)
#define SIMD_BINOP(name1, name2, type) \
void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
LiftoffRegister dst, LiftoffRegister src) { \
- TurboAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \
+ MacroAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \
}
SIMD_BINOP(i16x8, i8x16_s, MSAS8)
@@ -3483,14 +3454,14 @@ void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx);
- TurboAssembler::FmoveLow(dst.fp(), kScratchReg);
+ MacroAssembler::FmoveLow(dst.fp(), kScratchReg);
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx);
- TurboAssembler::Move(dst.fp(), kScratchReg);
+ MacroAssembler::Move(dst.fp(), kScratchReg);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -3537,7 +3508,7 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- TurboAssembler::FmoveLow(kScratchReg, src2.fp());
+ MacroAssembler::FmoveLow(kScratchReg, src2.fp());
if (dst != src1) {
move_v(dst.fp().toW(), src1.fp().toW());
}
@@ -3548,7 +3519,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- TurboAssembler::Move(kScratchReg, src2.fp());
+ MacroAssembler::Move(kScratchReg, src2.fp());
if (dst != src1) {
move_v(dst.fp().toW(), src1.fp().toW());
}
@@ -3584,8 +3555,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- TurboAssembler::Uld(limit_address, MemOperand(limit_address));
- TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+ MacroAssembler::Uld(limit_address, MemOperand(limit_address));
+ MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -3620,9 +3591,9 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
if (IsEnabled(MIPS_SIMD)) {
- TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset));
+ MacroAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset));
} else {
- TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
+ MacroAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
}
fp_regs.clear(reg);
offset += slot_size;
@@ -3637,9 +3608,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
if (IsEnabled(MIPS_SIMD)) {
- TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset));
+ MacroAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset));
} else {
- TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
+ MacroAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
}
fp_regs.clear(reg);
fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8);
@@ -3676,7 +3647,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint(
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
- TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
+ MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
void LiftoffAssembler::CallC(const ValueKindSig* sig,
@@ -3758,7 +3729,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
Daddu(sp, sp, -size);
- TurboAssembler::Move(addr, sp);
+ MacroAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 348db4d1af..808200fd47 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -62,47 +62,6 @@ inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- case kUnsignedLessThan:
- return lt;
- case kSignedLessEqual:
- case kUnsignedLessEqual:
- return le;
- case kSignedGreaterEqual:
- case kUnsignedGreaterEqual:
- return ge;
- case kSignedGreaterThan:
- case kUnsignedGreaterThan:
- return gt;
- }
-}
-
-inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- case kUnequal:
- case kSignedLessThan:
- case kSignedLessEqual:
- case kSignedGreaterThan:
- case kSignedGreaterEqual:
- return true;
- case kUnsignedLessThan:
- case kUnsignedLessEqual:
- case kUnsignedGreaterThan:
- case kUnsignedGreaterEqual:
- return false;
- default:
- UNREACHABLE();
- }
- return false;
-}
-
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -152,12 +111,13 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
int frame_size =
GetTotalFrameSize() -
(V8_EMBEDDED_CONSTANT_POOL_BOOL ? 3 : 2) * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
@@ -218,7 +178,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
- // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
SubS64(sp, sp, Operand(frame_size), r0);
// Jump back to the start of the function, from {pc_offset()} to
@@ -254,14 +214,13 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return (kind == kS128 || is_reference(kind));
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- mov(reg.gp(), Operand(value.to_i32(), rmode));
+ mov(reg.gp(), Operand(value.to_i32()));
break;
case kI64:
- mov(reg.gp(), Operand(value.to_i64(), rmode));
+ mov(reg.gp(), Operand(value.to_i64()));
break;
case kF32: {
UseScratchRegisterScope temps(this);
@@ -307,7 +266,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
- LoadTaggedPointerField(dst, MemOperand(instance, offset), r0);
+ LoadTaggedField(dst, MemOperand(instance, offset), r0);
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -324,7 +283,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
offset_reg = ip;
}
- LoadTaggedPointerField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
+ LoadTaggedField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
@@ -343,16 +302,10 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, ip, MemoryChunk::kPointersFromHereAreInterestingMask,
- ne, &write_barrier);
- b(&exit);
- bind(&write_barrier);
+ to_condition(kZero), &exit);
JumpIfSmi(src.gp(), &exit);
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(src.gp(), src.gp());
- }
CheckPageFlag(src.gp(), ip, MemoryChunk::kPointersToHereAreInterestingMask,
eq, &exit);
mov(ip, Operand(offset_imm));
@@ -543,16 +496,19 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList /* pinned */) {
- Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
+ LoadType type, LiftoffRegList /* pinned */,
+ bool i64_offset) {
+ Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true, i64_offset);
lwsync();
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool i64_offset) {
lwsync();
- Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true);
+ Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true,
+ i64_offset);
sync();
}
@@ -564,17 +520,19 @@ constexpr bool is_be = false;
#define ATOMIC_OP(instr) \
{ \
+ if (!i64_offset && offset_reg != no_reg) { \
+ ZeroExtWord32(ip, offset_reg); \
+ offset_reg = ip; \
+ } \
+ \
Register offset = r0; \
if (offset_imm != 0) { \
- mov(ip, Operand(offset_imm)); \
- if (offset_reg != no_reg) { \
- add(ip, ip, offset_reg); \
- } \
+ mov(offset, Operand(offset_imm)); \
+ if (offset_reg != no_reg) add(offset, offset, offset_reg); \
+ mr(ip, offset); \
offset = ip; \
- } else { \
- if (offset_reg != no_reg) { \
- offset = offset_reg; \
- } \
+ } else if (offset_reg != no_reg) { \
+ offset = offset_reg; \
} \
\
MemOperand dst = MemOperand(offset, dst_addr); \
@@ -651,55 +609,63 @@ constexpr bool is_be = false;
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
ATOMIC_OP(add);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
ATOMIC_OP(sub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
ATOMIC_OP(and_);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
ATOMIC_OP(orx);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
ATOMIC_OP(xor_);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
+ if (!i64_offset && offset_reg != no_reg) {
+ ZeroExtWord32(ip, offset_reg);
+ offset_reg = ip;
+ }
+
Register offset = r0;
if (offset_imm != 0) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- add(ip, ip, offset_reg);
- }
+ mov(offset, Operand(offset_imm));
+ if (offset_reg != no_reg) add(offset, offset, offset_reg);
+ mr(ip, offset);
offset = ip;
- } else {
- if (offset_reg != no_reg) {
- offset = offset_reg;
- }
+ } else if (offset_reg != no_reg) {
+ offset = offset_reg;
}
MemOperand dst = MemOperand(offset, dst_addr);
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
- TurboAssembler::AtomicExchange<uint8_t>(dst, value.gp(), result.gp());
+ MacroAssembler::AtomicExchange<uint8_t>(dst, value.gp(), result.gp());
break;
}
case StoreType::kI32Store16:
@@ -709,10 +675,10 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
push(scratch);
ByteReverseU16(r0, value.gp(), scratch);
pop(scratch);
- TurboAssembler::AtomicExchange<uint16_t>(dst, r0, result.gp());
+ MacroAssembler::AtomicExchange<uint16_t>(dst, r0, result.gp());
ByteReverseU16(result.gp(), result.gp(), ip);
} else {
- TurboAssembler::AtomicExchange<uint16_t>(dst, value.gp(), result.gp());
+ MacroAssembler::AtomicExchange<uint16_t>(dst, value.gp(), result.gp());
}
break;
}
@@ -723,20 +689,20 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
push(scratch);
ByteReverseU32(r0, value.gp(), scratch);
pop(scratch);
- TurboAssembler::AtomicExchange<uint32_t>(dst, r0, result.gp());
+ MacroAssembler::AtomicExchange<uint32_t>(dst, r0, result.gp());
ByteReverseU32(result.gp(), result.gp(), ip);
} else {
- TurboAssembler::AtomicExchange<uint32_t>(dst, value.gp(), result.gp());
+ MacroAssembler::AtomicExchange<uint32_t>(dst, value.gp(), result.gp());
}
break;
}
case StoreType::kI64Store: {
if (is_be) {
ByteReverseU64(r0, value.gp());
- TurboAssembler::AtomicExchange<uint64_t>(dst, r0, result.gp());
+ MacroAssembler::AtomicExchange<uint64_t>(dst, r0, result.gp());
ByteReverseU64(result.gp(), result.gp());
} else {
- TurboAssembler::AtomicExchange<uint64_t>(dst, value.gp(), result.gp());
+ MacroAssembler::AtomicExchange<uint64_t>(dst, value.gp(), result.gp());
}
break;
}
@@ -748,24 +714,26 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool i64_offset) {
+ if (!i64_offset && offset_reg != no_reg) {
+ ZeroExtWord32(ip, offset_reg);
+ offset_reg = ip;
+ }
+
Register offset = r0;
if (offset_imm != 0) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- add(ip, ip, offset_reg);
- }
+ mov(offset, Operand(offset_imm));
+ if (offset_reg != no_reg) add(offset, offset, offset_reg);
+ mr(ip, offset);
offset = ip;
- } else {
- if (offset_reg != no_reg) {
- offset = offset_reg;
- }
+ } else if (offset_reg != no_reg) {
+ offset = offset_reg;
}
MemOperand dst = MemOperand(offset, dst_addr);
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
- TurboAssembler::AtomicCompareExchange<uint8_t>(
+ MacroAssembler::AtomicCompareExchange<uint8_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
break;
}
@@ -779,12 +747,12 @@ void LiftoffAssembler::AtomicCompareExchange(
ByteReverseU16(new_value.gp(), new_value.gp(), scratch);
ByteReverseU16(expected.gp(), expected.gp(), scratch);
pop(scratch);
- TurboAssembler::AtomicCompareExchange<uint16_t>(
+ MacroAssembler::AtomicCompareExchange<uint16_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU16(result.gp(), result.gp(), r0);
Pop(new_value.gp(), expected.gp());
} else {
- TurboAssembler::AtomicCompareExchange<uint16_t>(
+ MacroAssembler::AtomicCompareExchange<uint16_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
}
break;
@@ -799,12 +767,12 @@ void LiftoffAssembler::AtomicCompareExchange(
ByteReverseU32(new_value.gp(), new_value.gp(), scratch);
ByteReverseU32(expected.gp(), expected.gp(), scratch);
pop(scratch);
- TurboAssembler::AtomicCompareExchange<uint32_t>(
+ MacroAssembler::AtomicCompareExchange<uint32_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU32(result.gp(), result.gp(), r0);
Pop(new_value.gp(), expected.gp());
} else {
- TurboAssembler::AtomicCompareExchange<uint32_t>(
+ MacroAssembler::AtomicCompareExchange<uint32_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
}
break;
@@ -814,12 +782,12 @@ void LiftoffAssembler::AtomicCompareExchange(
Push(new_value.gp(), expected.gp());
ByteReverseU64(new_value.gp(), new_value.gp());
ByteReverseU64(expected.gp(), expected.gp());
- TurboAssembler::AtomicCompareExchange<uint64_t>(
+ MacroAssembler::AtomicCompareExchange<uint64_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU64(result.gp(), result.gp());
Pop(new_value.gp(), expected.gp());
} else {
- TurboAssembler::AtomicCompareExchange<uint64_t>(
+ MacroAssembler::AtomicCompareExchange<uint64_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
}
break;
@@ -1642,12 +1610,11 @@ void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ bool use_signed = is_signed(cond);
if (rhs != no_reg) {
switch (kind) {
@@ -1661,7 +1628,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kRefNull:
case kRtt:
- DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ DCHECK(cond == kEqual || cond == kNotEqual);
#if defined(V8_COMPRESS_POINTERS)
if (use_signed) {
CmpS32(lhs, rhs);
@@ -1692,21 +1659,19 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
CmpS32(lhs, Operand::Zero(), r0);
}
- b(cond, label);
+ b(to_condition(cond), label);
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
- Condition cond = liftoff::ToCondition(liftoff_cond);
+ bool use_signed = is_signed(cond);
if (use_signed) {
CmpS32(lhs, Operand(imm), r0);
} else {
CmpU32(lhs, Operand(imm), r0);
}
- b(cond, label);
+ b(to_condition(cond), label);
}
void LiftoffAssembler::emit_i32_subi_jump_negative(
@@ -1725,10 +1690,9 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
bind(&done);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ bool use_signed = is_signed(cond);
if (use_signed) {
CmpS32(lhs, rhs);
} else {
@@ -1736,7 +1700,7 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
}
Label done;
mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ b(to_condition(to_condition(cond)), &done);
mov(dst, Operand::Zero());
bind(&done);
}
@@ -1750,10 +1714,10 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
bind(&done);
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ bool use_signed = is_signed(cond);
if (use_signed) {
CmpS64(lhs.gp(), rhs.gp());
} else {
@@ -1761,23 +1725,23 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
Label done;
mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ b(to_condition(to_condition(cond)), &done);
mov(dst, Operand::Zero());
bind(&done);
}
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
fcmpu(lhs, rhs, cr0);
Label nan, done;
bunordered(&nan, cr0);
mov(dst, Operand::Zero());
- b(NegateCondition(liftoff::ToCondition(liftoff_cond)), &done, cr0);
+ b(NegateCondition(to_condition(to_condition(cond))), &done, cr0);
mov(dst, Operand(1));
b(&done);
bind(&nan);
- if (liftoff_cond == kUnequal) {
+ if (cond == kNotEqual) {
mov(dst, Operand(1));
} else {
mov(dst, Operand::Zero());
@@ -1785,10 +1749,10 @@ void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
bind(&done);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- emit_f32_set_cond(liftoff_cond, dst, lhs, rhs);
+ emit_f32_set_cond(to_condition(cond), dst, lhs, rhs);
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -1798,71 +1762,73 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_add_sat_s, I16x8AddSatS) \
- V(i16x8_sub_sat_s, I16x8SubSatS) \
- V(i16x8_add_sat_u, I16x8AddSatU) \
- V(i16x8_sub_sat_u, I16x8SubSatU) \
- V(i16x8_sconvert_i32x4, I16x8SConvertI32x4) \
- V(i16x8_uconvert_i32x4, I16x8UConvertI32x4) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_add_sat_s, I8x16AddSatS) \
- V(i8x16_sub_sat_s, I8x16SubSatS) \
- V(i8x16_add_sat_u, I8x16AddSatU) \
- V(i8x16_sub_sat_u, I8x16SubSatU) \
- V(i8x16_sconvert_i16x8, I8x16SConvertI16x8) \
- V(i8x16_uconvert_i16x8, I8x16UConvertI16x8) \
- V(s128_and, S128And) \
- V(s128_or, S128Or) \
- V(s128_xor, S128Xor) \
+#define SIMD_BINOP_LIST(V) \
+ V(f64x2_add, F64x2Add) \
+ V(f64x2_sub, F64x2Sub) \
+ V(f64x2_mul, F64x2Mul) \
+ V(f64x2_div, F64x2Div) \
+ V(f64x2_eq, F64x2Eq) \
+ V(f64x2_lt, F64x2Lt) \
+ V(f64x2_le, F64x2Le) \
+ V(f32x4_add, F32x4Add) \
+ V(f32x4_sub, F32x4Sub) \
+ V(f32x4_mul, F32x4Mul) \
+ V(f32x4_div, F32x4Div) \
+ V(f32x4_min, F32x4Min) \
+ V(f32x4_max, F32x4Max) \
+ V(f32x4_eq, F32x4Eq) \
+ V(f32x4_lt, F32x4Lt) \
+ V(f32x4_le, F32x4Le) \
+ V(i64x2_add, I64x2Add) \
+ V(i64x2_sub, I64x2Sub) \
+ V(i64x2_eq, I64x2Eq) \
+ V(i64x2_gt_s, I64x2GtS) \
+ V(i32x4_add, I32x4Add) \
+ V(i32x4_sub, I32x4Sub) \
+ V(i32x4_mul, I32x4Mul) \
+ V(i32x4_min_s, I32x4MinS) \
+ V(i32x4_min_u, I32x4MinU) \
+ V(i32x4_max_s, I32x4MaxS) \
+ V(i32x4_max_u, I32x4MaxU) \
+ V(i32x4_eq, I32x4Eq) \
+ V(i32x4_gt_s, I32x4GtS) \
+ V(i32x4_gt_u, I32x4GtU) \
+ V(i16x8_add, I16x8Add) \
+ V(i16x8_sub, I16x8Sub) \
+ V(i16x8_mul, I16x8Mul) \
+ V(i16x8_min_s, I16x8MinS) \
+ V(i16x8_min_u, I16x8MinU) \
+ V(i16x8_max_s, I16x8MaxS) \
+ V(i16x8_max_u, I16x8MaxU) \
+ V(i16x8_eq, I16x8Eq) \
+ V(i16x8_gt_s, I16x8GtS) \
+ V(i16x8_gt_u, I16x8GtU) \
+ V(i16x8_add_sat_s, I16x8AddSatS) \
+ V(i16x8_sub_sat_s, I16x8SubSatS) \
+ V(i16x8_add_sat_u, I16x8AddSatU) \
+ V(i16x8_sub_sat_u, I16x8SubSatU) \
+ V(i16x8_sconvert_i32x4, I16x8SConvertI32x4) \
+ V(i16x8_uconvert_i32x4, I16x8UConvertI32x4) \
+ V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
+ V(i8x16_add, I8x16Add) \
+ V(i8x16_sub, I8x16Sub) \
+ V(i8x16_min_s, I8x16MinS) \
+ V(i8x16_min_u, I8x16MinU) \
+ V(i8x16_max_s, I8x16MaxS) \
+ V(i8x16_max_u, I8x16MaxU) \
+ V(i8x16_eq, I8x16Eq) \
+ V(i8x16_gt_s, I8x16GtS) \
+ V(i8x16_gt_u, I8x16GtU) \
+ V(i8x16_add_sat_s, I8x16AddSatS) \
+ V(i8x16_sub_sat_s, I8x16SubSatS) \
+ V(i8x16_add_sat_u, I8x16AddSatU) \
+ V(i8x16_sub_sat_u, I8x16SubSatU) \
+ V(i8x16_sconvert_i16x8, I8x16SConvertI16x8) \
+ V(i8x16_uconvert_i16x8, I8x16UConvertI16x8) \
+ V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
+ V(s128_and, S128And) \
+ V(s128_or, S128Or) \
+ V(s128_xor, S128Xor) \
V(s128_and_not, S128AndNot)
#define EMIT_SIMD_BINOP(name, op) \
@@ -1894,6 +1860,7 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
+ V(i32x4_dot_i16x8_s, I32x4DotI16x8S) \
V(i16x8_ne, I16x8Ne) \
V(i16x8_ge_s, I16x8GeS) \
V(i16x8_ge_u, I16x8GeU) \
@@ -1901,9 +1868,12 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U) \
+ V(i16x8_q15mulr_sat_s, I16x8Q15MulRSatS) \
+ V(i16x8_dot_i8x16_i7x16_s, I16x8DotI8x16S) \
V(i8x16_ne, I8x16Ne) \
V(i8x16_ge_s, I8x16GeS) \
- V(i8x16_ge_u, I8x16GeU)
+ V(i8x16_ge_u, I8x16GeU) \
+ V(i8x16_swizzle, I8x16Swizzle)
#define EMIT_SIMD_BINOP_WITH_SCRATCH(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
@@ -1969,12 +1939,15 @@ SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
V(f64x2_ceil, F64x2Ceil, true, bool) \
V(f64x2_floor, F64x2Floor, true, bool) \
V(f64x2_trunc, F64x2Trunc, true, bool) \
+ V(f64x2_promote_low_f32x4, F64x2PromoteLowF32x4, , void) \
V(f32x4_abs, F32x4Abs, , void) \
V(f32x4_neg, F32x4Neg, , void) \
V(f32x4_sqrt, F32x4Sqrt, , void) \
V(f32x4_ceil, F32x4Ceil, true, bool) \
V(f32x4_floor, F32x4Floor, true, bool) \
V(f32x4_trunc, F32x4Trunc, true, bool) \
+ V(f32x4_sconvert_i32x4, F32x4SConvertI32x4, , void) \
+ V(f32x4_uconvert_i32x4, F32x4UConvertI32x4, , void) \
V(i64x2_neg, I64x2Neg, , void) \
V(f64x2_convert_low_i32x4_s, F64x2ConvertLowI32x4S, , void) \
V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, , void) \
@@ -1982,6 +1955,7 @@ SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
V(i32x4_neg, I32x4Neg, , void) \
V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, , void) \
V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, , void) \
+ V(i32x4_uconvert_f32x4, I32x4UConvertF32x4, , void) \
V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, , void) \
V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, , void) \
V(i8x16_popcnt, I8x16Popcnt, , void) \
@@ -1997,12 +1971,16 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
-#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
- V(i64x2_abs, I64x2Abs, , void) \
- V(i32x4_abs, I32x4Abs, , void) \
- V(i16x8_abs, I16x8Abs, , void) \
- V(i16x8_neg, I16x8Neg, , void) \
- V(i8x16_abs, I8x16Abs, , void) \
+#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
+ V(f32x4_demote_f64x2_zero, F32x4DemoteF64x2Zero, , void) \
+ V(i64x2_abs, I64x2Abs, , void) \
+ V(i32x4_abs, I32x4Abs, , void) \
+ V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \
+ V(i32x4_trunc_sat_f64x2_s_zero, I32x4TruncSatF64x2SZero, , void) \
+ V(i32x4_trunc_sat_f64x2_u_zero, I32x4TruncSatF64x2UZero, , void) \
+ V(i16x8_abs, I16x8Abs, , void) \
+ V(i16x8_neg, I16x8Neg, , void) \
+ V(i8x16_abs, I8x16Abs, , void) \
V(i8x16_neg, I8x16Neg, , void)
#define EMIT_SIMD_UNOP_WITH_SCRATCH(name, op, return_val, return_type) \
@@ -2029,6 +2007,70 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
#undef EMIT_SIMD_ALL_TRUE
#undef SIMD_ALL_TRUE_LIST
+#define SIMD_QFM_LIST(V) \
+ V(f64x2_qfma, F64x2Qfma) \
+ V(f64x2_qfms, F64x2Qfms) \
+ V(f32x4_qfma, F32x4Qfma) \
+ V(f32x4_qfms, F32x4Qfms)
+
+#define EMIT_SIMD_QFM(name, op) \
+ void LiftoffAssembler::emit_##name( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
+ LiftoffRegister src3) { \
+ op(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp().toSimd(), \
+ src3.fp().toSimd(), kScratchSimd128Reg); \
+ }
+SIMD_QFM_LIST(EMIT_SIMD_QFM)
+#undef EMIT_SIMD_QFM
+#undef SIMD_QFM_LIST
+
+#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
+ V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
+ V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
+ V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
+ V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
+#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ op(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg, \
+ kScratchSimd128Reg2); \
+ }
+SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
+#undef EMIT_SIMD_EXT_ADD_PAIRWISE
+#undef SIMD_EXT_ADD_PAIRWISE_LIST
+
+#define SIMD_RELAXED_BINOP_LIST(V) \
+ V(i8x16_relaxed_swizzle, i8x16_swizzle) \
+ V(f64x2_relaxed_min, f64x2_pmin) \
+ V(f64x2_relaxed_max, f64x2_pmax) \
+ V(f32x4_relaxed_min, f32x4_pmin) \
+ V(f32x4_relaxed_max, f32x4_pmax) \
+ V(i16x8_relaxed_q15mulr_s, i16x8_q15mulr_sat_s)
+
+#define SIMD_VISIT_RELAXED_BINOP(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ emit_##op(dst, lhs, rhs); \
+ }
+SIMD_RELAXED_BINOP_LIST(SIMD_VISIT_RELAXED_BINOP)
+#undef SIMD_VISIT_RELAXED_BINOP
+#undef SIMD_RELAXED_BINOP_LIST
+
+#define SIMD_RELAXED_UNOP_LIST(V) \
+ V(i32x4_relaxed_trunc_f32x4_s, i32x4_sconvert_f32x4) \
+ V(i32x4_relaxed_trunc_f32x4_u, i32x4_uconvert_f32x4) \
+ V(i32x4_relaxed_trunc_f64x2_s_zero, i32x4_trunc_sat_f64x2_s_zero) \
+ V(i32x4_relaxed_trunc_f64x2_u_zero, i32x4_trunc_sat_f64x2_u_zero)
+
+#define SIMD_VISIT_RELAXED_UNOP(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ emit_##op(dst, src); \
+ }
+SIMD_RELAXED_UNOP_LIST(SIMD_VISIT_RELAXED_UNOP)
+#undef SIMD_VISIT_RELAXED_UNOP
+#undef SIMD_RELAXED_UNOP_LIST
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
F64x2Splat(dst.fp().toSimd(), src.fp(), r0);
@@ -2200,7 +2242,42 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
- bailout(kSimd, "Load transform unimplemented");
+ MemOperand src_op = MemOperand(src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ LoadAndExtend8x8SLE(dst.fp().toSimd(), src_op, r0);
+ } else if (memtype == MachineType::Uint8()) {
+ LoadAndExtend8x8ULE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
+ } else if (memtype == MachineType::Int16()) {
+ LoadAndExtend16x4SLE(dst.fp().toSimd(), src_op, r0);
+ } else if (memtype == MachineType::Uint16()) {
+ LoadAndExtend16x4ULE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
+ } else if (memtype == MachineType::Int32()) {
+ LoadAndExtend32x2SLE(dst.fp().toSimd(), src_op, r0);
+ } else if (memtype == MachineType::Uint32()) {
+ LoadAndExtend32x2ULE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
+ }
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ LoadV32ZeroLE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ LoadV64ZeroLE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ LoadAndSplat8x16LE(dst.fp().toSimd(), src_op, r0);
+ } else if (memtype == MachineType::Int16()) {
+ LoadAndSplat16x8LE(dst.fp().toSimd(), src_op, r0);
+ } else if (memtype == MachineType::Int32()) {
+ LoadAndSplat32x4LE(dst.fp().toSimd(), src_op, r0);
+ } else if (memtype == MachineType::Int64()) {
+ LoadAndSplat64x2LE(dst.fp().toSimd(), src_op, r0);
+ }
+ }
}
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
@@ -2214,66 +2291,63 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
+ if (!i64_offset && offset_reg != no_reg) {
+ ZeroExtWord32(ip, offset_reg);
+ offset_reg = ip;
+ }
+ MemOperand src_op = MemOperand(addr, offset_reg, offset_imm);
+
+ MachineType mem_type = type.mem_type();
+ if (dst != src) {
+ vor(dst.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd());
+ }
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ if (mem_type == MachineType::Int8()) {
+ LoadLane8LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
+ } else if (mem_type == MachineType::Int16()) {
+ LoadLane16LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
+ } else if (mem_type == MachineType::Int32()) {
+ LoadLane32LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), mem_type);
+ LoadLane64LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
+ }
}
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
- bailout(kSimd, "store lane");
-}
-
-void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_swizzle");
-}
-
-void LiftoffAssembler::emit_i8x16_relaxed_swizzle(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kRelaxedSimd, "emit_i8x16_relaxed_swizzle");
-}
-
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_s");
-}
-
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_u");
-}
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
+ if (!i64_offset && offset != no_reg) {
+ ZeroExtWord32(ip, offset);
+ offset = ip;
+ }
+ MemOperand dst_op = MemOperand(dst, offset, offset_imm);
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_s_zero(
- LiftoffRegister dst, LiftoffRegister src) {
- bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_s_zero");
-}
+ if (protected_store_pc) *protected_store_pc = pc_offset();
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_u_zero(
- LiftoffRegister dst, LiftoffRegister src) {
- bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_u_zero");
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ StoreLane8LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
+ } else if (rep == MachineRepresentation::kWord16) {
+ StoreLane16LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
+ } else if (rep == MachineRepresentation::kWord32) {
+ StoreLane32LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ StoreLane64LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
+ }
}
void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect");
-}
-
-void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kRelaxedSimd, "emit_f64x2_relaxed_min");
-}
-
-void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kRelaxedSimd, "emit_f64x2_relaxed_max");
+ emit_s128_select(dst, src1, src2, mask);
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
@@ -2282,26 +2356,9 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
kScratchSimd128Reg);
}
-void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f64x2.promote_low_f32x4");
-}
-
-void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_relaxed_min");
-}
-
-void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32x4_relaxed_max");
-}
-
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ I64x2BitMask(dst.gp(), src.fp().toSimd(), r0, kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
@@ -2318,63 +2375,20 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4_bitmask");
-}
-
-void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kRelaxedSimd, "i32x4_dot_i16x8_s");
-}
-
-void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
-}
-
-void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+ I32x4BitMask(dst.gp(), src.fp().toSimd(), r0, kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8_bitmask");
-}
-
-void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
-}
-
-void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
-}
-
-void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8_q15mulr_sat_s");
-}
-
-void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kRelaxedSimd, "emit_i16x8_relaxed_q15mulr_s");
-}
-
-void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
+ I16x8BitMask(dst.gp(), src.fp().toSimd(), r0, kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
+ I32x4DotI8x16AddS(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(),
+ acc.fp().toSimd());
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -2382,7 +2396,26 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "i8x16_shuffle");
+ // Remap the shuffle indices to match IBM lane numbering.
+ // TODO(miladfarca): Put this in a function and share it with the instruction
+ // selector.
+ int max_index = 15;
+ int total_lane_count = 2 * kSimd128Size;
+ uint8_t shuffle_remapped[kSimd128Size];
+ for (int i = 0; i < kSimd128Size; i++) {
+ uint8_t current_index = shuffle[i];
+ shuffle_remapped[i] = (current_index <= max_index
+ ? max_index - current_index
+ : total_lane_count - current_index + max_index);
+ }
+ uint64_t vals[2];
+ memcpy(vals, shuffle_remapped, sizeof(shuffle_remapped));
+#ifdef V8_TARGET_BIG_ENDIAN
+ vals[0] = ByteReverse(vals[0]);
+ vals[1] = ByteReverse(vals[1]);
+#endif
+ I8x16Shuffle(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), vals[1],
+ vals[0], r0, ip, kScratchSimd128Reg);
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
@@ -2392,44 +2425,26 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i8x16_bitmask");
+ I8x16BitMask(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg);
}
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kUnsupportedArchitecture, "emit_s128_const");
+ uint64_t vals[2];
+ memcpy(vals, imms, sizeof(vals));
+#ifdef V8_TARGET_BIG_ENDIAN
+ vals[0] = ByteReverse(vals[0]);
+ vals[1] = ByteReverse(vals[1]);
+#endif
+ S128Const(dst.fp().toSimd(), vals[1], vals[0], r0, ip);
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- bailout(kUnsupportedArchitecture, "emit_s128select");
-}
-
-void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4_sconvert_f32x4");
-}
-
-void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4_uconvert_f32x4");
-}
-
-void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4_sconvert_i32x4");
-}
-
-void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4_uconvert_i32x4");
-}
-
-void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f32x4.demote_f64x2_zero");
+ S128Select(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp().toSimd(),
+ mask.fp().toSimd());
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
@@ -2456,56 +2471,6 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
kScratchSimd128Reg);
}
-void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
-}
-
-void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
-}
-
-void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_rounding_average_u");
-}
-
-void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u");
-}
-
-void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfma");
-}
-
-void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfms");
-}
-
-void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfma");
-}
-
-void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfms");
-}
-
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
LoadU64(limit_address, MemOperand(limit_address), r0);
CmpU64(sp, limit_address);
@@ -2738,7 +2703,11 @@ void LiftoffStackSlots::Construct(int param_slots) {
break;
}
case kS128: {
- asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->LoadSimd128(kScratchSimd128Reg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSimd128Size));
+ asm_->StoreSimd128(kScratchSimd128Reg, MemOperand(sp), r0);
break;
}
default:
@@ -2766,7 +2735,8 @@ void LiftoffStackSlots::Construct(int param_slots) {
asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
break;
case kS128: {
- asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ asm_->AddS64(sp, sp, Operand(-kSimd128Size), r0);
+ asm_->StoreSimd128(src.reg().fp().toSimd(), MemOperand(sp), r0);
break;
}
default:
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
index 903cb07ef8..52a024cd72 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
@@ -23,31 +23,6 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- return lt;
- case kSignedLessEqual:
- return le;
- case kSignedGreaterThan:
- return gt;
- case kSignedGreaterEqual:
- return ge;
- case kUnsignedLessThan:
- return ult;
- case kUnsignedLessEqual:
- return ule;
- case kUnsignedGreaterThan:
- return ugt;
- case kUnsignedGreaterEqual:
- return uge;
- }
-}
-
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
@@ -86,19 +61,20 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(
+ MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@@ -221,7 +197,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
DCHECK_LE(0, offset);
- LoadTaggedPointerField(dst, MemOperand{instance, offset});
+ LoadTaggedField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -231,21 +207,21 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_s(dst, src);
+ MacroAssembler::Neg_s(dst, src);
}
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_d(dst, src);
+ MacroAssembler::Neg_d(dst, src);
}
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float32Min(dst, lhs, rhs);
+ MacroAssembler::Float32Min(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float32Max(dst, lhs, rhs);
+ MacroAssembler::Float32Max(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
@@ -255,12 +231,12 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float64Min(dst, lhs, rhs);
+ MacroAssembler::Float64Min(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float64Max(dst, lhs, rhs);
+ MacroAssembler::Float64Max(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
@@ -303,17 +279,17 @@ FP_UNOP(f64_sqrt, fsqrt_d)
#undef FP_UNOP
#undef FP_UNOP_RETURN_TRUE
-static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) {
+static FPUCondition ConditionToConditionCmpFPU(Condition condition) {
switch (condition) {
case kEqual:
return EQ;
- case kUnequal:
+ case kNotEqual:
return NE;
case kUnsignedLessThan:
return LT;
- case kUnsignedGreaterEqual:
+ case kUnsignedGreaterThanEqual:
return GE;
- case kUnsignedLessEqual:
+ case kUnsignedLessThanEqual:
return LE;
case kUnsignedGreaterThan:
return GT;
@@ -323,18 +299,18 @@ static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) {
UNREACHABLE();
}
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
- TurboAssembler::CompareF32(dst, fcond, lhs, rhs);
+ FPUCondition fcond = ConditionToConditionCmpFPU(cond);
+ MacroAssembler::CompareF32(dst, fcond, lhs, rhs);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
- TurboAssembler::CompareF64(dst, fcond, lhs, rhs);
+ FPUCondition fcond = ConditionToConditionCmpFPU(cond);
+ MacroAssembler::CompareF64(dst, fcond, lhs, rhs);
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -354,24 +330,41 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
Branch(target, condition, scratch, Operand(zero_reg));
}
+// Implemente vector popcnt refer dense_popcnt
+// int dense_popcnt(uint32_t n)
+// {
+// int count = 32; // sizeof(uint32_t) * CHAR_BIT;
+// n ^= 0xFF'FF'FF'FF;
+// while(n)
+// {
+// --count;
+// n &= n - 1;
+// }
+// return count;
+// }
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
VRegister src_v = src.fp().toV();
VRegister dst_v = dst.fp().toV();
- Label t;
-
+ Label t, done;
VU.set(kScratchReg, E8, m1);
vmv_vv(kSimd128ScratchReg, src_v);
- vmv_vv(dst_v, kSimd128RegZero);
-
+ li(kScratchReg, 0xFF);
+ vxor_vx(kSimd128ScratchReg, kSimd128ScratchReg, kScratchReg);
+ vmv_vi(dst_v, 8);
+ vmv_vi(kSimd128RegZero, 0);
bind(&t);
- vmsne_vv(v0, kSimd128ScratchReg, kSimd128RegZero);
- vadd_vi(dst_v, dst_v, 1, Mask);
- vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, Mask);
- vand_vv(kSimd128ScratchReg, kSimd128ScratchReg, kSimd128ScratchReg2);
- // kScratchReg = -1 if kSimd128ScratchReg == 0 i.e. no active element
- vfirst_m(kScratchReg, kSimd128ScratchReg);
- bgez(kScratchReg, &t);
+ vmsne_vi(v0, kSimd128ScratchReg, 0);
+ VU.set(kScratchReg, E16, m1);
+ vmv_xs(kScratchReg, v0);
+ beqz(kScratchReg, &done);
+ VU.set(kScratchReg, E8, m1);
+ vadd_vi(dst_v, dst_v, -1, MaskType::Mask);
+ vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, MaskType::Mask);
+ vand_vv(kSimd128ScratchReg, kSimd128ScratchReg2, kSimd128ScratchReg,
+ MaskType::Mask);
+ Branch(&t);
+ bind(&done);
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -616,23 +609,11 @@ void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
vsmul_vv(dst.fp().toV(), src1.fp().toV(), src2.fp().toV());
}
-void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
-}
-
-void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs,
- LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
-}
-
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
VU.set(kScratchReg, E32, m1);
vmv_xs(dst.gp(), kSimd128ScratchReg);
@@ -988,20 +969,20 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E8, m1);
- Label alltrue;
- li(kScratchReg, -1);
- vmv_sx(kSimd128ScratchReg, kScratchReg);
+ Label notalltrue;
+ vmv_vi(kSimd128ScratchReg, -1);
vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
vmv_xs(dst.gp(), kSimd128ScratchReg);
- beqz(dst.gp(), &alltrue);
+ beqz(dst.gp(), &notalltrue);
li(dst.gp(), 1);
- bind(&alltrue);
+ bind(&notalltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E8, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
VU.set(kScratchReg, E32, m1);
vmv_xs(dst.gp(), kSimd128ScratchReg);
@@ -1140,6 +1121,7 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E16, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
VU.set(kScratchReg, E32, m1);
vmv_xs(dst.gp(), kSimd128ScratchReg);
@@ -1284,6 +1266,7 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E32, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
vmv_xs(dst.gp(), kSimd128ScratchReg);
}
@@ -1406,6 +1389,67 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
}
+void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ VU.set(kScratchReg, E8, m1);
+ vwmul_vv(kSimd128ScratchReg3, lhs.fp().toV(), rhs.fp().toV());
+ VU.set(kScratchReg, E16, m2);
+
+ constexpr int32_t FIRST_INDEX = 0b0101010101010101;
+ constexpr int32_t SECOND_INDEX = 0b1010101010101010;
+ li(kScratchReg, FIRST_INDEX);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, kSimd128ScratchReg3, v0);
+
+ li(kScratchReg, SECOND_INDEX);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, kSimd128ScratchReg3, kSimd128ScratchReg2);
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ LiftoffRegister acc) {
+ VU.set(kScratchReg, E8, m1);
+ VRegister intermediate = kSimd128ScratchReg3;
+ vwmul_vv(intermediate, lhs.fp().toV(), rhs.fp().toV()); // i16*16 v8 v9
+
+ constexpr int32_t FIRST_INDEX = 0b0001000100010001;
+ constexpr int32_t SECOND_INDEX = 0b0010001000100010;
+ constexpr int32_t THIRD_INDEX = 0b0100010001000100;
+ constexpr int32_t FOURTH_INDEX = 0b1000100010001000;
+
+ VU.set(kScratchReg, E16, m2);
+ li(kScratchReg, FIRST_INDEX);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, intermediate, v0); // i16*4 a
+ li(kScratchReg, SECOND_INDEX);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, intermediate, kSimd128ScratchReg2); // i16*4 b
+
+ VU.set(kScratchReg, E16, m1);
+ vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0); // i32*4 c
+
+ VU.set(kScratchReg, E16, m2);
+ li(kScratchReg, THIRD_INDEX);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, intermediate, v0); // i16*4 a
+
+ li(kScratchReg, FOURTH_INDEX);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, intermediate, kSimd128ScratchReg2); // i16*4 b
+
+ VU.set(kScratchReg, E16, m1);
+ vwadd_vv(kSimd128ScratchReg3, kSimd128ScratchReg, v0); // i32*4 c
+
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), dst.fp().toV(), kSimd128ScratchReg3);
+ vadd_vv(dst.fp().toV(), dst.fp().toV(), acc.fp().toV());
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
@@ -1723,8 +1767,9 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
VU.set(kScratchReg, E32, m1);
VU.set(FPURoundingMode::RTZ);
vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
- vfcvt_x_f_v(dst.fp().toV(), src.fp().toV(), Mask);
+ vfcvt_x_f_v(dst.fp().toV(), kSimd128ScratchReg, Mask);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -1732,8 +1777,9 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
VU.set(kScratchReg, E32, m1);
VU.set(FPURoundingMode::RTZ);
vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
- vfcvt_xu_f_v(dst.fp().toV(), src.fp().toV(), Mask);
+ vfcvt_xu_f_v(dst.fp().toV(), kSimd128ScratchReg, Mask);
}
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
@@ -1754,48 +1800,48 @@ void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E16, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E8, m1);
VU.set(FPURoundingMode::RNE);
- vnclip_vi(dst.fp().toV(), v26, 0);
+ vnclip_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E16, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E16, m2);
- vmax_vx(v26, v26, zero_reg);
+ vmax_vx(kSimd128ScratchReg, kSimd128ScratchReg, zero_reg);
VU.set(kScratchReg, E8, m1);
VU.set(FPURoundingMode::RNE);
- vnclipu_vi(dst.fp().toV(), v26, 0);
+ vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E16, m1);
VU.set(FPURoundingMode::RNE);
- vnclip_vi(dst.fp().toV(), v26, 0);
+ vnclip_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E32, m2);
- vmax_vx(v26, v26, zero_reg);
+ vmax_vx(kSimd128ScratchReg, kSimd128ScratchReg, zero_reg);
VU.set(kScratchReg, E16, m1);
VU.set(FPURoundingMode::RNE);
- vnclipu_vi(dst.fp().toV(), v26, 0);
+ vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
@@ -1890,6 +1936,7 @@ void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
VU.set(kScratchReg, E8, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -1899,6 +1946,7 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
VU.set(kScratchReg, E16, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -1908,6 +1956,7 @@ void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
VU.set(kScratchReg, E64, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -1917,6 +1966,7 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
VU.set(kScratchReg, E32, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -2062,33 +2112,41 @@ void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfma");
+ VU.set(kScratchReg, E32, m1);
+ vfmadd_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfms");
+ VU.set(kScratchReg, E32, m1);
+ vfnmsub_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfma");
+ VU.set(kScratchReg, E64, m1);
+ vfmadd_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfms");
+ VU.set(kScratchReg, E64, m1);
+ vfnmsub_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- TurboAssembler::LoadWord(limit_address, MemOperand(limit_address));
- TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+ MacroAssembler::LoadWord(limit_address, MemOperand(limit_address));
+ MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -2121,7 +2179,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
int32_t offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
+ MacroAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
fp_regs.clear(reg);
offset += sizeof(double);
}
@@ -2134,7 +2192,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
int32_t fp_offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
+ MacroAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
fp_regs.clear(reg);
fp_offset += sizeof(double);
}
@@ -2168,7 +2226,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint(
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
+ MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
@@ -2207,7 +2265,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
AddWord(sp, sp, Operand(-size));
- TurboAssembler::Move(addr, sp);
+ MacroAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h
index ed559941d4..00fd61cc22 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h
@@ -174,26 +174,24 @@ inline Register EnsureNoAlias(Assembler* assm, Register reg,
}
} // namespace liftoff
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
break;
case kI64: {
- DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
- TurboAssembler::li(reg.low_gp(), Operand(low_word));
- TurboAssembler::li(reg.high_gp(), Operand(high_word));
+ MacroAssembler::li(reg.low_gp(), Operand(low_word));
+ MacroAssembler::li(reg.high_gp(), Operand(high_word));
break;
}
case kF32:
- TurboAssembler::LoadFPRImmediate(reg.fp(),
+ MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f32_boxed().get_bits());
break;
case kF64:
- TurboAssembler::LoadFPRImmediate(reg.fp(),
+ MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f64_boxed().get_bits());
break;
default:
@@ -229,13 +227,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, kScratchReg,
- MemoryChunk::kPointersFromHereAreInterestingMask, ne,
- &write_barrier);
- Branch(&exit);
- bind(&write_barrier);
+ MemoryChunk::kPointersFromHereAreInterestingMask, kZero, &exit);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), kScratchReg,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
@@ -261,39 +255,39 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load8U:
Lbu(dst.low_gp(), src_op);
- TurboAssembler::mv(dst.high_gp(), zero_reg);
+ MacroAssembler::mv(dst.high_gp(), zero_reg);
break;
case LoadType::kI32Load8S:
Lb(dst.gp(), src_op);
break;
case LoadType::kI64Load8S:
Lb(dst.low_gp(), src_op);
- TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
+ MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
break;
case LoadType::kI32Load16U:
- TurboAssembler::Lhu(dst.gp(), src_op);
+ MacroAssembler::Lhu(dst.gp(), src_op);
break;
case LoadType::kI64Load16U:
- TurboAssembler::Lhu(dst.low_gp(), src_op);
- TurboAssembler::mv(dst.high_gp(), zero_reg);
+ MacroAssembler::Lhu(dst.low_gp(), src_op);
+ MacroAssembler::mv(dst.high_gp(), zero_reg);
break;
case LoadType::kI32Load16S:
- TurboAssembler::Lh(dst.gp(), src_op);
+ MacroAssembler::Lh(dst.gp(), src_op);
break;
case LoadType::kI64Load16S:
- TurboAssembler::Lh(dst.low_gp(), src_op);
- TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
+ MacroAssembler::Lh(dst.low_gp(), src_op);
+ MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
break;
case LoadType::kI64Load32U:
- TurboAssembler::Lw(dst.low_gp(), src_op);
- TurboAssembler::mv(dst.high_gp(), zero_reg);
+ MacroAssembler::Lw(dst.low_gp(), src_op);
+ MacroAssembler::mv(dst.high_gp(), zero_reg);
break;
case LoadType::kI64Load32S:
- TurboAssembler::Lw(dst.low_gp(), src_op);
- TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
+ MacroAssembler::Lw(dst.low_gp(), src_op);
+ MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
break;
case LoadType::kI32Load:
- TurboAssembler::Lw(dst.gp(), src_op);
+ MacroAssembler::Lw(dst.gp(), src_op);
break;
case LoadType::kI64Load: {
Lw(dst.low_gp(), src_op);
@@ -302,16 +296,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Lw(dst.high_gp(), src_op);
} break;
case LoadType::kF32Load:
- TurboAssembler::LoadFloat(dst.fp(), src_op);
+ MacroAssembler::LoadFloat(dst.fp(), src_op);
break;
case LoadType::kF64Load:
- TurboAssembler::LoadDouble(dst.fp(), src_op);
+ MacroAssembler::LoadDouble(dst.fp(), src_op);
break;
case LoadType::kS128Load: {
VU.set(kScratchReg, E8, m1);
Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
if (src_op.offset() != 0) {
- TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
+ MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
}
vl(dst.fp().toV(), src_reg, 0, E8);
break;
@@ -361,29 +355,29 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
Sb(src.low_gp(), dst_op);
break;
case StoreType::kI32Store16:
- TurboAssembler::Sh(src.gp(), dst_op);
+ MacroAssembler::Sh(src.gp(), dst_op);
break;
case StoreType::kI64Store16:
- TurboAssembler::Sh(src.low_gp(), dst_op);
+ MacroAssembler::Sh(src.low_gp(), dst_op);
break;
case StoreType::kI32Store:
- TurboAssembler::Sw(src.gp(), dst_op);
+ MacroAssembler::Sw(src.gp(), dst_op);
break;
case StoreType::kI64Store32:
- TurboAssembler::Sw(src.low_gp(), dst_op);
+ MacroAssembler::Sw(src.low_gp(), dst_op);
break;
case StoreType::kI64Store: {
- TurboAssembler::Sw(src.low_gp(), dst_op);
+ MacroAssembler::Sw(src.low_gp(), dst_op);
dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg,
offset_imm + kSystemPointerSize, scratch);
- TurboAssembler::Sw(src.high_gp(), dst_op);
+ MacroAssembler::Sw(src.high_gp(), dst_op);
break;
}
case StoreType::kF32Store:
- TurboAssembler::StoreFloat(src.fp(), dst_op);
+ MacroAssembler::StoreFloat(src.fp(), dst_op);
break;
case StoreType::kF64Store:
- TurboAssembler::StoreDouble(src.fp(), dst_op);
+ MacroAssembler::StoreDouble(src.fp(), dst_op);
break;
case StoreType::kS128Store: {
VU.set(kScratchReg, E8, m1);
@@ -548,7 +542,8 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList pinned) {
+ LoadType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
Register src_reg = liftoff::CalculateActualAddress(
this, src_addr, offset_reg, offset_imm, temps.Acquire());
@@ -600,7 +595,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
Register dst_reg = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.Acquire());
@@ -647,7 +643,8 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "Atomic64");
}
@@ -670,7 +667,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "Atomic64");
}
@@ -693,7 +691,8 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "Atomic64");
}
@@ -715,7 +714,8 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "Atomic64");
}
@@ -737,7 +737,8 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "Atomic64");
}
@@ -760,7 +761,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "Atomic64");
}
@@ -783,7 +785,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool i64_offset) {
ASM_CODE_COMMENT(this);
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value, result};
@@ -917,14 +919,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
StoreFloat(kScratchDoubleReg, dst);
break;
case kF64:
- TurboAssembler::LoadDouble(kScratchDoubleReg, src);
- TurboAssembler::StoreDouble(kScratchDoubleReg, dst);
+ MacroAssembler::LoadDouble(kScratchDoubleReg, src);
+ MacroAssembler::StoreDouble(kScratchDoubleReg, dst);
break;
case kS128: {
VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) {
- TurboAssembler::AddWord(src_reg, src.rm(), src.offset());
+ MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
}
vl(kSimd128ScratchReg, src_reg, 0, E8);
Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
@@ -942,16 +944,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
if (kind != kS128) {
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
} else {
- TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ MacroAssembler::vmv_vv(dst.toV(), dst.toV());
}
}
@@ -973,7 +975,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
StoreFloat(reg.fp(), dst);
break;
case kF64:
- TurboAssembler::StoreDouble(reg.fp(), dst);
+ MacroAssembler::StoreDouble(reg.fp(), dst);
break;
case kS128: {
VU.set(kScratchReg, E8, m1);
@@ -992,25 +994,23 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
+ UseScratchRegisterScope assembler_temps(this);
+ Register tmp = assembler_temps.Acquire();
switch (value.type().kind()) {
case kI32:
case kRef:
case kRefNull: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
- TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
- Sw(tmp.gp(), dst);
+ MacroAssembler::li(tmp, Operand(value.to_i32()));
+ Sw(tmp, dst);
break;
}
case kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
-
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
- TurboAssembler::li(tmp.low_gp(), Operand(low_word));
- TurboAssembler::li(tmp.high_gp(), Operand(high_word));
-
- Sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
- Sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
+ MacroAssembler::li(tmp, Operand(low_word));
+ Sw(tmp, liftoff::GetHalfStackSlot(offset, kLowWord));
+ MacroAssembler::li(tmp, Operand(high_word));
+ Sw(tmp, liftoff::GetHalfStackSlot(offset, kHighWord));
break;
break;
}
@@ -1037,13 +1037,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
LoadFloat(reg.fp(), src);
break;
case kF64:
- TurboAssembler::LoadDouble(reg.fp(), src);
+ MacroAssembler::LoadDouble(reg.fp(), src);
break;
case kS128: {
VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) {
- TurboAssembler::AddWord(src_reg, src.rm(), src.offset());
+ MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
}
vl(reg.fp().toV(), src_reg, 0, E8);
break;
@@ -1131,8 +1131,8 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
// Produce partial popcnts in the two dst registers.
Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp();
Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp();
- TurboAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg);
- TurboAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg);
+ MacroAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg);
+ MacroAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg);
// Now add the two into the lower dst reg and clear the higher dst reg.
AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp());
mv(dst.high_gp(), zero_reg);
@@ -1140,40 +1140,40 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
- TurboAssembler::Mul(dst, lhs, rhs);
+ MacroAssembler::Mul(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
- TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
- TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
+ MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
+ MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
add(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Div(dst, lhs, rhs);
+ MacroAssembler::Div(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Divu(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Divu(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Mod(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Mod(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Modu(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Modu(dst, lhs, rhs);
}
#define I32_BINOP(name, instruction) \
@@ -1209,15 +1209,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
- TurboAssembler::Clz32(dst, src);
+ MacroAssembler::Clz32(dst, src);
}
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
- TurboAssembler::Ctz32(dst, src);
+ MacroAssembler::Ctz32(dst, src);
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src, kScratchReg);
+ MacroAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -1245,7 +1245,7 @@ I32_SHIFTOP_I(shr, srli)
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
+ MacroAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
kScratchReg, kScratchReg2);
}
@@ -1285,7 +1285,7 @@ inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) {
inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
Register amount,
- void (TurboAssembler::*emit_shift)(Register, Register, Register, Register,
+ void (MacroAssembler::*emit_shift)(Register, Register, Register, Register,
Register, Register, Register)) {
LiftoffRegList pinned{dst, src, amount};
@@ -1304,8 +1304,8 @@ inline void Emit64BitShiftOperation(
kScratchReg2);
// Place result in destination register.
- assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp());
- assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp());
+ assm->MacroAssembler::Move(dst.high_gp(), tmp.high_gp());
+ assm->MacroAssembler::Move(dst.low_gp(), tmp.low_gp());
} else {
(assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(),
src.high_gp(), amount_capped, kScratchReg,
@@ -1316,7 +1316,7 @@ inline void Emit64BitShiftOperation(
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
+ MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
kScratchReg, kScratchReg2);
}
@@ -1330,16 +1330,16 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
// TODO(riscv32): are there some optimization we can make without
// materializing?
- TurboAssembler::li(imm_reg.low_gp(), imm_low_word);
- TurboAssembler::li(imm_reg.high_gp(), imm_high_word);
- TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
+ MacroAssembler::li(imm_reg.low_gp(), imm_low_word);
+ MacroAssembler::li(imm_reg.high_gp(), imm_high_word);
+ MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(),
kScratchReg, kScratchReg2);
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
+ MacroAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
kScratchReg, kScratchReg2);
}
@@ -1348,7 +1348,7 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
ASM_CODE_COMMENT(this);
liftoff::Emit64BitShiftOperation(this, dst, src, amount,
- &TurboAssembler::ShlPair);
+ &MacroAssembler::ShlPair);
}
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@@ -1365,14 +1365,14 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
DCHECK_NE(dst.low_gp(), kScratchReg);
DCHECK_NE(dst.high_gp(), kScratchReg);
- TurboAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
+ MacroAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
amount, kScratchReg, kScratchReg2);
}
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount,
- &TurboAssembler::SarPair);
+ &MacroAssembler::SarPair);
}
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@@ -1388,14 +1388,14 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
DCHECK_NE(dst.low_gp(), kScratchReg);
DCHECK_NE(dst.high_gp(), kScratchReg);
- TurboAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
+ MacroAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
amount, kScratchReg, kScratchReg2);
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount,
- &TurboAssembler::ShrPair);
+ &MacroAssembler::ShrPair);
}
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@@ -1411,7 +1411,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
DCHECK_NE(dst.low_gp(), kScratchReg);
DCHECK_NE(dst.high_gp(), kScratchReg);
- TurboAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
+ MacroAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
amount, kScratchReg, kScratchReg2);
}
@@ -1432,7 +1432,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) {
switch (opcode) {
case kExprI32ConvertI64:
- TurboAssembler::Move(dst.gp(), src.low_gp());
+ MacroAssembler::Move(dst.gp(), src.low_gp());
return true;
case kExprI32SConvertF32:
case kExprI32UConvertF32:
@@ -1472,22 +1472,22 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
// Checking if trap.
if (trap != nullptr) {
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
}
return true;
}
case kExprI32ReinterpretF32:
- TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
+ MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
return true;
case kExprI64SConvertI32:
- TurboAssembler::Move(dst.low_gp(), src.gp());
- TurboAssembler::Move(dst.high_gp(), src.gp());
+ MacroAssembler::Move(dst.low_gp(), src.gp());
+ MacroAssembler::Move(dst.high_gp(), src.gp());
srai(dst.high_gp(), dst.high_gp(), 31);
return true;
case kExprI64UConvertI32:
- TurboAssembler::Move(dst.low_gp(), src.gp());
- TurboAssembler::Move(dst.high_gp(), zero_reg);
+ MacroAssembler::Move(dst.low_gp(), src.gp());
+ MacroAssembler::Move(dst.high_gp(), zero_reg);
return true;
case kExprI64ReinterpretF64:
SubWord(sp, sp, kDoubleSize);
@@ -1497,21 +1497,21 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
AddWord(sp, sp, kDoubleSize);
return true;
case kExprF32SConvertI32: {
- TurboAssembler::Cvt_s_w(dst.fp(), src.gp());
+ MacroAssembler::Cvt_s_w(dst.fp(), src.gp());
return true;
}
case kExprF32UConvertI32:
- TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
+ MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
return true;
case kExprF32ReinterpretI32:
fmv_w_x(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
- TurboAssembler::Cvt_d_w(dst.fp(), src.gp());
+ MacroAssembler::Cvt_d_w(dst.fp(), src.gp());
return true;
}
case kExprF64UConvertI32:
- TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
+ MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
return true;
case kExprF64ConvertF32:
fcvt_d_s(dst.fp(), src.fp());
@@ -1582,53 +1582,47 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
}
void LiftoffAssembler::emit_jump(Label* label) {
- TurboAssembler::Branch(label);
+ MacroAssembler::Branch(label);
}
void LiftoffAssembler::emit_jump(Register target) {
- TurboAssembler::Jump(target);
+ MacroAssembler::Jump(target);
}
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
DCHECK(kind == kI32);
- TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
DCHECK((kind == kI32) ||
- (is_reference(kind) &&
- (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
- TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
+ MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
}
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+ MacroAssembler::Branch(label, cond, lhs, Operand(imm));
}
void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) {
SubWord(value, value, Operand(subtrahend));
- TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg));
+ MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg));
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- TurboAssembler::Sltu(dst, src, 1);
+ MacroAssembler::Sltu(dst, src, 1);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
@@ -1639,34 +1633,32 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
}
namespace liftoff {
-inline LiftoffCondition cond_make_unsigned(LiftoffCondition cond) {
+inline Condition cond_make_unsigned(Condition cond) {
switch (cond) {
- case kSignedLessThan:
+ case kLessThan:
return kUnsignedLessThan;
- case kSignedLessEqual:
- return kUnsignedLessEqual;
- case kSignedGreaterThan:
+ case kLessThanEqual:
+ return kUnsignedLessThanEqual;
+ case kGreaterThan:
return kUnsignedGreaterThan;
- case kSignedGreaterEqual:
- return kUnsignedGreaterEqual;
+ case kGreaterThanEqual:
+ return kUnsignedGreaterThanEqual;
default:
return cond;
}
}
} // namespace liftoff
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
ASM_CODE_COMMENT(this);
- Condition cond = liftoff::ToCondition(liftoff_cond);
Label low, cont;
// For signed i64 comparisons, we still need to use unsigned comparison for
// the low word (the only bit carrying signedness information is the MSB in
// the high word).
- Condition unsigned_cond =
- liftoff::ToCondition(liftoff::cond_make_unsigned(liftoff_cond));
+ Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
Register tmp = dst;
if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
@@ -1674,7 +1666,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
// Write 1 initially in tmp register.
- TurboAssembler::li(tmp, 1);
+ MacroAssembler::li(tmp, 1);
// If high words are equal, then compare low words, else compare high.
Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp()));
@@ -1700,7 +1692,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
bind(&cont);
// Move result to dst register if needed.
- TurboAssembler::Move(dst, tmp);
+ MacroAssembler::Move(dst, tmp);
}
void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
@@ -1795,7 +1787,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool /* i64_offfset */) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
MemOperand src_op =
@@ -1835,7 +1828,8 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
+ uint32_t* protected_store_pc,
+ bool /* i64_offfset */) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm, scratch);
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h
index a04bd47790..1d6ae09e8b 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h
@@ -149,21 +149,20 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
} // namespace liftoff
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
break;
case kI64:
- TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ MacroAssembler::li(reg.gp(), Operand(value.to_i64()));
break;
case kF32:
- TurboAssembler::LoadFPRImmediate(reg.fp(),
+ MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f32_boxed().get_bits());
break;
case kF64:
- TurboAssembler::LoadFPRImmediate(reg.fp(),
+ MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f64_boxed().get_bits());
break;
default:
@@ -198,14 +197,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
StoreTaggedField(src.gp(), dst_op);
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
-
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
- MemoryChunk::kPointersFromHereAreInterestingMask, ne,
- &write_barrier);
- Branch(&exit);
- bind(&write_barrier);
+ MemoryChunk::kPointersFromHereAreInterestingMask, kZero, &exit);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
@@ -236,33 +230,33 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
- TurboAssembler::Lhu(dst.gp(), src_op);
+ MacroAssembler::Lhu(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
case LoadType::kI64Load16S:
- TurboAssembler::Lh(dst.gp(), src_op);
+ MacroAssembler::Lh(dst.gp(), src_op);
break;
case LoadType::kI64Load32U:
- TurboAssembler::Lwu(dst.gp(), src_op);
+ MacroAssembler::Lwu(dst.gp(), src_op);
break;
case LoadType::kI32Load:
case LoadType::kI64Load32S:
- TurboAssembler::Lw(dst.gp(), src_op);
+ MacroAssembler::Lw(dst.gp(), src_op);
break;
case LoadType::kI64Load:
- TurboAssembler::Ld(dst.gp(), src_op);
+ MacroAssembler::Ld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::LoadFloat(dst.fp(), src_op);
+ MacroAssembler::LoadFloat(dst.fp(), src_op);
break;
case LoadType::kF64Load:
- TurboAssembler::LoadDouble(dst.fp(), src_op);
+ MacroAssembler::LoadDouble(dst.fp(), src_op);
break;
case LoadType::kS128Load: {
VU.set(kScratchReg, E8, m1);
Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
if (src_op.offset() != 0) {
- TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
+ MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
}
vl(dst.fp().toV(), src_reg, 0, E8);
break;
@@ -309,20 +303,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
- TurboAssembler::Sh(src.gp(), dst_op);
+ MacroAssembler::Sh(src.gp(), dst_op);
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
- TurboAssembler::Sw(src.gp(), dst_op);
+ MacroAssembler::Sw(src.gp(), dst_op);
break;
case StoreType::kI64Store:
- TurboAssembler::Sd(src.gp(), dst_op);
+ MacroAssembler::Sd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::StoreFloat(src.fp(), dst_op);
+ MacroAssembler::StoreFloat(src.fp(), dst_op);
break;
case StoreType::kF64Store:
- TurboAssembler::StoreDouble(src.fp(), dst_op);
+ MacroAssembler::StoreDouble(src.fp(), dst_op);
break;
case StoreType::kS128Store: {
VU.set(kScratchReg, E8, m1);
@@ -460,7 +454,8 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList pinned) {
+ LoadType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
Register src_reg = liftoff::CalculateActualAddress(
this, src_addr, offset_reg, offset_imm, temps.Acquire());
@@ -491,7 +486,8 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned) {
+ StoreType type, LiftoffRegList pinned,
+ bool i64_offset) {
UseScratchRegisterScope temps(this);
Register dst_reg = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.Acquire());
@@ -522,35 +518,40 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kXor);
}
@@ -558,7 +559,8 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kExchange);
}
@@ -603,7 +605,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool i64_offset) {
LiftoffRegList pinned{dst_addr, offset_reg, expected, new_value, result};
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
@@ -683,14 +685,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
StoreFloat(kScratchDoubleReg, dst);
break;
case kF64:
- TurboAssembler::LoadDouble(kScratchDoubleReg, src);
- TurboAssembler::StoreDouble(kScratchDoubleReg, dst);
+ MacroAssembler::LoadDouble(kScratchDoubleReg, src);
+ MacroAssembler::StoreDouble(kScratchDoubleReg, dst);
break;
case kS128: {
VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) {
- TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ MacroAssembler::Add64(src_reg, src.rm(), src.offset());
}
vl(kSimd128ScratchReg, src_reg, 0, E8);
Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
@@ -711,16 +713,17 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
if (kind != kS128) {
- TurboAssembler::Move(dst, src);
+ MacroAssembler::Move(dst, src);
} else {
- TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ VU.set(kScratchReg, E8, m1);
+ MacroAssembler::vmv_vv(dst.toV(), src.toV());
}
}
@@ -741,7 +744,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
StoreFloat(reg.fp(), dst);
break;
case kF64:
- TurboAssembler::StoreDouble(reg.fp(), dst);
+ MacroAssembler::StoreDouble(reg.fp(), dst);
break;
case kS128: {
VU.set(kScratchReg, E8, m1);
@@ -764,7 +767,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kI32: {
UseScratchRegisterScope temps(this);
Register tmp = temps.Acquire();
- TurboAssembler::li(tmp, Operand(value.to_i32()));
+ MacroAssembler::li(tmp, Operand(value.to_i32()));
Sw(tmp, dst);
break;
}
@@ -773,7 +776,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kRefNull: {
UseScratchRegisterScope temps(this);
Register tmp = temps.Acquire();
- TurboAssembler::li(tmp, value.to_i64());
+ MacroAssembler::li(tmp, value.to_i64());
Sd(tmp, dst);
break;
}
@@ -799,13 +802,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
LoadFloat(reg.fp(), src);
break;
case kF64:
- TurboAssembler::LoadDouble(reg.fp(), src);
+ MacroAssembler::LoadDouble(reg.fp(), src);
break;
case kS128: {
VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) {
- TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ MacroAssembler::Add64(src_reg, src.rm(), src.offset());
}
vl(reg.fp().toV(), src_reg, 0, E8);
break;
@@ -852,54 +855,54 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
- TurboAssembler::Clz64(dst.gp(), src.gp());
+ MacroAssembler::Clz64(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
- TurboAssembler::Ctz64(dst.gp(), src.gp());
+ MacroAssembler::Ctz64(dst.gp(), src.gp());
}
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
+ MacroAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true;
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
- TurboAssembler::Mul32(dst, lhs, rhs);
+ MacroAssembler::Mul32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
- TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
- TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
+ MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
+ MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
add(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Div32(dst, lhs, rhs);
+ MacroAssembler::Div32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Divu32(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Divu32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Mod32(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Mod32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
- TurboAssembler::Modu32(dst, lhs, rhs);
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ MacroAssembler::Modu32(dst, lhs, rhs);
}
#define I32_BINOP(name, instruction) \
@@ -935,15 +938,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
- TurboAssembler::Clz32(dst, src);
+ MacroAssembler::Clz32(dst, src);
}
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
- TurboAssembler::Ctz32(dst, src);
+ MacroAssembler::Ctz32(dst, src);
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src, kScratchReg);
+ MacroAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -971,48 +974,48 @@ I32_SHIFTOP_I(shr, srliw)
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- TurboAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
// Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
- TurboAssembler::CompareI(kScratchReg, lhs.gp(),
+ MacroAssembler::CompareI(kScratchReg, lhs.gp(),
Operand(std::numeric_limits<int64_t>::min()), ne);
- TurboAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne);
+ MacroAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne);
add(kScratchReg, kScratchReg, kScratchReg2);
- TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg));
- TurboAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
- TurboAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
+ MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ MacroAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
@@ -1089,7 +1092,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
- TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
+ MacroAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
}
void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
ZeroExtendWord(dst, src);
@@ -1116,7 +1119,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
// According to WebAssembly spec, if I64 value does not fit the range of
// I32, the value is undefined. Therefore, We use sign extension to
// implement I64 to I32 truncation
- TurboAssembler::SignExtendWord(dst.gp(), src.gp());
+ MacroAssembler::SignExtendWord(dst.gp(), src.gp());
return true;
case kExprI32SConvertF32:
case kExprI32UConvertF32:
@@ -1163,39 +1166,39 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
// Checking if trap.
if (trap != nullptr) {
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
}
return true;
}
case kExprI32ReinterpretF32:
- TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
+ MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
return true;
case kExprI64SConvertI32:
- TurboAssembler::SignExtendWord(dst.gp(), src.gp());
+ MacroAssembler::SignExtendWord(dst.gp(), src.gp());
return true;
case kExprI64UConvertI32:
- TurboAssembler::ZeroExtendWord(dst.gp(), src.gp());
+ MacroAssembler::ZeroExtendWord(dst.gp(), src.gp());
return true;
case kExprI64ReinterpretF64:
fmv_x_d(dst.gp(), src.fp());
return true;
case kExprF32SConvertI32: {
- TurboAssembler::Cvt_s_w(dst.fp(), src.gp());
+ MacroAssembler::Cvt_s_w(dst.fp(), src.gp());
return true;
}
case kExprF32UConvertI32:
- TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
+ MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
return true;
case kExprF32ReinterpretI32:
fmv_w_x(dst.fp(), src.gp());
return true;
case kExprF64SConvertI32: {
- TurboAssembler::Cvt_d_w(dst.fp(), src.gp());
+ MacroAssembler::Cvt_d_w(dst.fp(), src.gp());
return true;
}
case kExprF64UConvertI32:
- TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
+ MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
return true;
case kExprF64ConvertF32:
fcvt_d_s(dst.fp(), src.fp());
@@ -1277,64 +1280,57 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
}
void LiftoffAssembler::emit_jump(Label* label) {
- TurboAssembler::Branch(label);
+ MacroAssembler::Branch(label);
}
void LiftoffAssembler::emit_jump(Register target) {
- TurboAssembler::Jump(target);
+ MacroAssembler::Jump(target);
}
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
DCHECK(kind == kI32 || kind == kI64);
- TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
DCHECK((kind == kI32 || kind == kI64) ||
- (is_reference(kind) &&
- (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
- TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
+ MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
}
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+ MacroAssembler::Branch(label, cond, lhs, Operand(imm));
}
void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) {
Sub64(value, value, Operand(subtrahend));
- TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg));
+ MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg));
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- TurboAssembler::Sltu(dst, src, 1);
+ MacroAssembler::Sltu(dst, src, 1);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- TurboAssembler::Sltu(dst, src.gp(), 1);
+ MacroAssembler::Sltu(dst, src.gp(), 1);
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond);
+ MacroAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond);
}
void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
@@ -1434,8 +1430,10 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
- MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
+ MemOperand src_op =
+ liftoff::GetMemOp(this, addr, offset_reg, offset_imm, i64_offset);
MachineType mem_type = type.mem_type();
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -1473,8 +1471,10 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
- MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, dst, offset, offset_imm, i64_offset);
if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep();
if (rep == MachineRepresentation::kWord8) {
@@ -1573,6 +1573,8 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
+ vmv_vi(kSimd128ScratchReg, -1);
+ vmv_vi(kSimd128ScratchReg3, -1);
li(kScratchReg, 0x0006000400020000);
vmv_sx(kSimd128ScratchReg, kScratchReg);
li(kScratchReg, 0x0007000500030001);
@@ -1587,6 +1589,8 @@ void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
+ vmv_vi(kSimd128ScratchReg, -1);
+ vmv_vi(kSimd128ScratchReg3, -1);
li(kScratchReg, 0x0006000400020000);
vmv_sx(kSimd128ScratchReg, kScratchReg);
li(kScratchReg, 0x0007000500030001);
@@ -1601,6 +1605,8 @@ void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
+ vmv_vi(kSimd128ScratchReg, -1);
+ vmv_vi(kSimd128ScratchReg3, -1);
li(kScratchReg, 0x0E0C0A0806040200);
vmv_sx(kSimd128ScratchReg, kScratchReg);
li(kScratchReg, 0x0F0D0B0907050301);
@@ -1615,6 +1621,8 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
+ vmv_vi(kSimd128ScratchReg, -1);
+ vmv_vi(kSimd128ScratchReg3, -1);
li(kScratchReg, 0x0E0C0A0806040200);
vmv_sx(kSimd128ScratchReg, kScratchReg);
li(kScratchReg, 0x0F0D0B0907050301);
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1aa8864f5d..c24f548570 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -18,47 +18,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- case kUnsignedLessThan:
- return lt;
- case kSignedLessEqual:
- case kUnsignedLessEqual:
- return le;
- case kSignedGreaterEqual:
- case kUnsignedGreaterEqual:
- return ge;
- case kSignedGreaterThan:
- case kUnsignedGreaterThan:
- return gt;
- }
-}
-
-inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- case kUnequal:
- case kSignedLessThan:
- case kSignedLessEqual:
- case kSignedGreaterThan:
- case kSignedGreaterEqual:
- return true;
- case kUnsignedLessThan:
- case kUnsignedLessEqual:
- case kUnsignedGreaterThan:
- case kUnsignedGreaterEqual:
- return false;
- default:
- UNREACHABLE();
- }
- return false;
-}
-
// half
// slot Frame
// -----+--------------------+---------------------------
@@ -142,10 +101,11 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
@@ -203,7 +163,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
- // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
lay(sp, MemOperand(sp, -frame_size));
// Jump back to the start of the function, from {pc_offset()} to
@@ -235,25 +195,24 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return (kind == kS128 || is_reference(kind));
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- mov(reg.gp(), Operand(value.to_i32(), rmode));
+ mov(reg.gp(), Operand(value.to_i32()));
break;
case kI64:
- mov(reg.gp(), Operand(value.to_i64(), rmode));
+ mov(reg.gp(), Operand(value.to_i64()));
break;
case kF32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- LoadF32(reg.fp(), value.to_f32_boxed().get_scalar(), scratch);
+ LoadF32(reg.fp(), value.to_f32(), scratch);
break;
}
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- LoadF64(reg.fp(), value.to_f64_boxed().get_bits(), scratch);
+ LoadF64(reg.fp(), value.to_f64(), scratch);
break;
}
default:
@@ -287,7 +246,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
DCHECK_LE(0, offset);
- LoadTaggedPointerField(dst, MemOperand(instance, offset));
+ LoadTaggedField(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -305,7 +264,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
offset_reg = ip;
}
- LoadTaggedPointerField(
+ LoadTaggedField(
dst,
MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
}
@@ -328,16 +287,10 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, r1, MemoryChunk::kPointersFromHereAreInterestingMask,
- ne, &write_barrier);
- b(&exit);
- bind(&write_barrier);
+ to_condition(kZero), &exit);
JumpIfSmi(src.gp(), &exit);
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(src.gp(), src.gp());
- }
CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
eq, &exit);
lay(r1, dst_op);
@@ -449,27 +402,29 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
}
+#define PREP_MEM_OPERAND(offset_reg, offset_imm, scratch) \
+ if (offset_reg != no_reg && !i64_offset) { \
+ /* Clear the upper 32 bits of the 64 bit offset register.*/ \
+ llgfr(scratch, offset_reg); \
+ offset_reg = scratch; \
+ } \
+ if (!is_int20(offset_imm)) { \
+ if (offset_reg != no_reg) { \
+ mov(r0, Operand(offset_imm)); \
+ AddS64(r0, offset_reg); \
+ mov(scratch, r0); \
+ } else { \
+ mov(scratch, Operand(offset_imm)); \
+ } \
+ offset_reg = scratch; \
+ offset_imm = 0; \
+ }
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList /* pinned */,
uint32_t* protected_store_pc, bool is_store_mem,
bool i64_offset) {
- if (offset_reg != no_reg && !i64_offset) {
- // Clear the upper 32 bits of the 64 bit offset register.
- llgfr(ip, offset_reg);
- offset_reg = ip;
- }
- if (!is_int20(offset_imm)) {
- if (offset_reg != no_reg) {
- mov(r0, Operand(offset_imm));
- AddS64(r0, offset_reg);
- mov(ip, r0);
- } else {
- mov(ip, Operand(offset_imm));
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
MemOperand dst_op =
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
@@ -530,22 +485,16 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList /* pinned */) {
- Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
+ LoadType type, LiftoffRegList /* pinned */,
+ bool i64_offset) {
+ Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true, i64_offset);
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type,
- LiftoffRegList /* pinned */) {
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ StoreType type, LiftoffRegList /* pinned */,
+ bool i64_offset) {
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -600,7 +549,8 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
value, result})
.gp();
@@ -608,14 +558,7 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
value, result, tmp1})
.gp();
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -700,7 +643,8 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
value, result})
.gp();
@@ -708,14 +652,7 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
value, result, tmp1})
.gp();
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -800,7 +737,8 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
value, result})
.gp();
@@ -808,14 +746,7 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
value, result, tmp1})
.gp();
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -900,7 +831,8 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
value, result})
.gp();
@@ -908,14 +840,7 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
value, result, tmp1})
.gp();
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -1000,7 +925,8 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
value, result})
.gp();
@@ -1008,14 +934,7 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
value, result, tmp1})
.gp();
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -1101,15 +1020,9 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -1177,15 +1090,8 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ StoreType type, bool i64_offset) {
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -2169,12 +2075,11 @@ void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ bool use_signed = is_signed(cond);
if (rhs != no_reg) {
switch (kind) {
@@ -2188,7 +2093,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kRefNull:
case kRtt:
- DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ DCHECK(cond == kEqual || cond == kNotEqual);
#if defined(V8_COMPRESS_POINTERS)
if (use_signed) {
CmpS32(lhs, rhs);
@@ -2219,21 +2124,19 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
CmpS32(lhs, Operand::Zero());
}
- b(cond, label);
+ b(to_condition(cond), label);
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs,
- int32_t imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
- Condition cond = liftoff::ToCondition(liftoff_cond);
+ bool use_signed = is_signed(cond);
if (use_signed) {
CmpS32(lhs, Operand(imm));
} else {
CmpU32(lhs, Operand(imm));
}
- b(cond, label);
+ b(to_condition(cond), label);
}
#define EMIT_EQZ(test, src) \
@@ -2266,48 +2169,47 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
bind(&done); \
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ bool use_signed = is_signed(cond);
if (use_signed) {
CmpS32(lhs, rhs);
} else {
CmpU32(lhs, rhs);
}
- EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
+ EMIT_SET_CONDITION(dst, to_condition(cond));
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
EMIT_EQZ(ltgr, src.gp());
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ bool use_signed = is_signed(cond);
if (use_signed) {
CmpS64(lhs.gp(), rhs.gp());
} else {
CmpU64(lhs.gp(), rhs.gp());
}
- EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
+ EMIT_SET_CONDITION(dst, to_condition(cond));
}
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
cebr(lhs, rhs);
- EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
+ EMIT_SET_CONDITION(dst, to_condition(cond));
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
cdbr(lhs, rhs);
- EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
+ EMIT_SET_CONDITION(dst, to_condition(cond));
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -2657,6 +2559,38 @@ SIMD_QFM_LIST(EMIT_SIMD_QFM)
#undef EMIT_SIMD_QFM
#undef SIMD_QFM_LIST
+#define SIMD_RELAXED_BINOP_LIST(V) \
+ V(i8x16_relaxed_swizzle, i8x16_swizzle) \
+ V(f64x2_relaxed_min, f64x2_pmin) \
+ V(f64x2_relaxed_max, f64x2_pmax) \
+ V(f32x4_relaxed_min, f32x4_pmin) \
+ V(f32x4_relaxed_max, f32x4_pmax) \
+ V(i16x8_relaxed_q15mulr_s, i16x8_q15mulr_sat_s)
+
+#define SIMD_VISIT_RELAXED_BINOP(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ emit_##op(dst, lhs, rhs); \
+ }
+SIMD_RELAXED_BINOP_LIST(SIMD_VISIT_RELAXED_BINOP)
+#undef SIMD_VISIT_RELAXED_BINOP
+#undef SIMD_RELAXED_BINOP_LIST
+
+#define SIMD_RELAXED_UNOP_LIST(V) \
+ V(i32x4_relaxed_trunc_f32x4_s, i32x4_sconvert_f32x4) \
+ V(i32x4_relaxed_trunc_f32x4_u, i32x4_uconvert_f32x4) \
+ V(i32x4_relaxed_trunc_f64x2_s_zero, i32x4_trunc_sat_f64x2_s_zero) \
+ V(i32x4_relaxed_trunc_f64x2_u_zero, i32x4_trunc_sat_f64x2_u_zero)
+
+#define SIMD_VISIT_RELAXED_UNOP(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ emit_##op(dst, src); \
+ }
+SIMD_RELAXED_UNOP_LIST(SIMD_VISIT_RELAXED_UNOP)
+#undef SIMD_VISIT_RELAXED_UNOP
+#undef SIMD_RELAXED_UNOP_LIST
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
@@ -2712,15 +2646,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset_reg != no_reg) {
- AddS64(ip, offset_reg);
- }
- offset_reg = ip;
- offset_imm = 0;
- }
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
+ PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
MemOperand src_op =
MemOperand(addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
@@ -2745,15 +2673,9 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
- if (!is_int20(offset_imm)) {
- mov(ip, Operand(offset_imm));
- if (offset != no_reg) {
- AddS64(ip, offset);
- }
- offset = ip;
- offset_imm = 0;
- }
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
+ PREP_MEM_OPERAND(offset, offset_imm, ip)
MemOperand dst_op =
MemOperand(dst, offset == no_reg ? r0 : offset, offset_imm);
@@ -2803,68 +2725,6 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
I8x16Swizzle(dest, src1, src2, r0, r1, kScratchDoubleReg, temp);
}
-void LiftoffAssembler::emit_i8x16_relaxed_swizzle(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Simd128Register src1 = lhs.fp();
- Simd128Register src2 = rhs.fp();
- Simd128Register dest = dst.fp();
- Simd128Register temp =
- GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp();
- I8x16Swizzle(dest, src1, src2, r0, r1, kScratchDoubleReg, temp);
-}
-
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst,
- LiftoffRegister src) {
- I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
-}
-
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst,
- LiftoffRegister src) {
- I32x4UConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
-}
-
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_s_zero(
- LiftoffRegister dst, LiftoffRegister src) {
- emit_i32x4_trunc_sat_f64x2_s_zero(dst, src);
-}
-
-void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_u_zero(
- LiftoffRegister dst, LiftoffRegister src) {
- emit_i32x4_trunc_sat_f64x2_u_zero(dst, src);
-}
-
-void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- LiftoffRegister mask) {
- emit_s128_select(dst, src1, src2, mask);
-}
-
-void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- emit_f64x2_pmin(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- emit_f64x2_pmax(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- emit_f32x4_pmin(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- emit_f32x4_pmax(dst, lhs, rhs);
-}
-
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src) {
F64x2ConvertLowI32x4S(dst.fp(), src.fp());
@@ -2915,31 +2775,21 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
I16x8Q15MulRSatS(dest, s1, s2, kScratchDoubleReg, temp1, temp2);
}
-void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- Simd128Register s1 = src1.fp();
- Simd128Register s2 = src2.fp();
- Simd128Register dest = dst.fp();
- // Make sure temp registers are unique.
- Simd128Register temp1 =
- GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2}).fp();
- Simd128Register temp2 =
- GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2, temp1}).fp();
- I16x8Q15MulRSatS(dest, s1, s2, kScratchDoubleReg, temp1, temp2);
-}
-
void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
+ I16x8DotI8x16S(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
+ // Make sure temp register is unique.
+ Simd128Register temp =
+ GetUnusedRegister(kFpReg, LiftoffRegList{dst, lhs, rhs}).fp();
+ I32x4DotI8x16AddS(dst.fp(), lhs.fp(), rhs.fp(), acc.fp(), kScratchDoubleReg,
+ temp);
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -3061,6 +2911,13 @@ void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg);
}
+void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ emit_s128_select(dst, src1, src2, mask);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
LoadU64(limit_address, MemOperand(limit_address));
CmpU64(sp, limit_address);
@@ -3074,7 +2931,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
// Asserts unreachable within the wasm code.
- TurboAssembler::AssertUnreachable(reason);
+ MacroAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -3228,7 +3085,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
lay(sp, MemOperand(sp, -size));
- TurboAssembler::Move(addr, sp);
+ MacroAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 526be9fc68..d5106c0401 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -9,6 +9,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
+#include "src/codegen/x64/assembler-x64.h"
#include "src/codegen/x64/register-x64.h"
#include "src/flags/flags.h"
#include "src/heap/memory-chunk.h"
@@ -26,31 +27,6 @@ namespace wasm {
namespace liftoff {
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return equal;
- case kUnequal:
- return not_equal;
- case kSignedLessThan:
- return less;
- case kSignedLessEqual:
- return less_equal;
- case kSignedGreaterThan:
- return greater;
- case kSignedGreaterEqual:
- return greater_equal;
- case kUnsignedLessThan:
- return below;
- case kUnsignedLessEqual:
- return below_equal;
- case kUnsignedGreaterThan:
- return above;
- case kUnsignedGreaterEqual:
- return above_equal;
- }
-}
-
constexpr Register kScratchRegister2 = r11;
static_assert(kScratchRegister != kScratchRegister2, "collision");
static_assert((kLiftoffAssemblerGpCacheRegs &
@@ -90,7 +66,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr,
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
- assm->TurboAssembler::Move(scratch, offset_imm);
+ assm->MacroAssembler::Move(scratch, offset_imm);
if (offset_reg != no_reg) assm->addq(scratch, offset_reg);
return Operand(addr, scratch, scale_factor, 0);
}
@@ -226,13 +202,14 @@ void LiftoffAssembler::AlignFrameSize() {
}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
DCHECK_EQ(0, frame_size % kSystemPointerSize);
@@ -294,7 +271,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
- // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to
@@ -321,28 +298,23 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
return is_reference(kind);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
- RelocInfo::Mode rmode) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type().kind()) {
case kI32:
- if (value.to_i32() == 0 && RelocInfo::IsNoInfo(rmode)) {
+ if (value.to_i32() == 0) {
xorl(reg.gp(), reg.gp());
} else {
- movl(reg.gp(), Immediate(value.to_i32(), rmode));
+ movl(reg.gp(), Immediate(value.to_i32()));
}
break;
case kI64:
- if (RelocInfo::IsNoInfo(rmode)) {
- TurboAssembler::Move(reg.gp(), value.to_i64());
- } else {
- movq(reg.gp(), Immediate64(value.to_i64(), rmode));
- }
+ MacroAssembler::Move(reg.gp(), value.to_i64());
break;
case kF32:
- TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
case kF64:
- TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
UNREACHABLE();
@@ -376,15 +348,14 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
DCHECK_LE(0, offset);
- LoadTaggedPointerField(dst, Operand(instance, offset));
+ LoadTaggedField(dst, Operand(instance, offset));
}
void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
int offset, ExternalPointerTag tag,
- Register isolate_root) {
- LoadExternalPointerField(dst, FieldOperand(instance, offset), tag,
- isolate_root,
- IsolateRootLocation::kInScratchRegister);
+ Register scratch) {
+ LoadExternalPointerField(dst, FieldOperand(instance, offset), tag, scratch,
+ IsolateRootLocation::kInRootRegister);
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -406,7 +377,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Operand src_op =
liftoff::GetMemOp(this, src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), scale_factor);
- LoadTaggedPointerField(dst, src_op);
+ LoadTaggedField(dst, src_op);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
@@ -430,17 +401,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
if (skip_write_barrier || v8_flags.disable_write_barriers) return;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
- MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
- &write_barrier, Label::kNear);
- jmp(&exit, Label::kNear);
- bind(&write_barrier);
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &exit,
+ Label::kNear);
JumpIfSmi(src.gp(), &exit, Label::kNear);
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(src.gp(), src.gp());
- }
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
@@ -453,8 +418,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
- LoadType type, LiftoffRegList /* pinned */) {
- Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true);
+ LoadType type, LiftoffRegList /* pinned */,
+ bool i64_offset) {
+ Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true, i64_offset);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -550,8 +516,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
- StoreType type,
- LiftoffRegList /* pinned */) {
+ StoreType type, LiftoffRegList /* pinned */,
+ bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Register src_reg = src.gp();
if (cache_state()->is_used(src)) {
@@ -581,7 +548,9 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
DCHECK(!cache_state()->is_used(result));
if (cache_state()->is_used(value)) {
// We cannot overwrite {value}, but the {value} register is changed in the
@@ -623,7 +592,9 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
LiftoffRegList dont_overwrite =
cache_state()->used_registers | LiftoffRegList{dst_addr, offset_reg};
DCHECK(!dont_overwrite.has(result));
@@ -680,7 +651,9 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) __ AssertZeroExtended(offset_reg);
DCHECK(!__ cache_state()->is_used(result));
Register value_reg = value.gp();
// The cmpxchg instruction uses rax to store the old value of the
@@ -753,29 +726,34 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, &Assembler::andl, &Assembler::andq, dst_addr,
- offset_reg, offset_imm, value, result, type);
+ offset_reg, offset_imm, value, result, type, i64_offset);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, &Assembler::orl, &Assembler::orq, dst_addr,
- offset_reg, offset_imm, value, result, type);
+ offset_reg, offset_imm, value, result, type, i64_offset);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
liftoff::AtomicBinop(this, &Assembler::xorl, &Assembler::xorq, dst_addr,
- offset_reg, offset_imm, value, result, type);
+ offset_reg, offset_imm, value, result, type, i64_offset);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
- LiftoffRegister result, StoreType type) {
+ LiftoffRegister result, StoreType type,
+ bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
DCHECK(!cache_state()->is_used(result));
if (cache_state()->is_used(value)) {
// We cannot overwrite {value}, but the {value} register is changed in the
@@ -817,7 +795,8 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
- StoreType type) {
+ StoreType type, bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
Register value_reg = new_value.gp();
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
@@ -1350,7 +1329,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
if (!is_int32(imm)) {
- TurboAssembler::Move(kScratchRegister, imm);
+ MacroAssembler::Move(kScratchRegister, imm);
if (lhs.gp() == dst.gp()) {
addq(dst.gp(), kScratchRegister);
} else {
@@ -1651,10 +1630,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
+ MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1);
Andps(dst, kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit - 1);
+ MacroAssembler::Move(dst, kSignBit - 1);
Andps(dst, src);
}
}
@@ -1662,10 +1641,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ MacroAssembler::Move(kScratchDoubleReg, kSignBit);
Xorps(dst, kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit);
+ MacroAssembler::Move(dst, kSignBit);
Xorps(dst, src);
}
}
@@ -1784,10 +1763,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
+ MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1);
Andpd(dst, kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit - 1);
+ MacroAssembler::Move(dst, kSignBit - 1);
Andpd(dst, src);
}
}
@@ -1795,10 +1774,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
- TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ MacroAssembler::Move(kScratchDoubleReg, kSignBit);
Xorpd(dst, kScratchDoubleReg);
} else {
- TurboAssembler::Move(dst, kSignBit);
+ MacroAssembler::Move(dst, kSignBit);
Xorpd(dst, src);
}
}
@@ -2167,11 +2146,10 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
-void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueKind kind,
- Register lhs, Register rhs,
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueKind kind, Register lhs,
+ Register rhs,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (kind) {
case kI32:
@@ -2180,7 +2158,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRef:
case kRefNull:
case kRtt:
- DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ DCHECK(cond == kEqual || cond == kNotEqual);
#if defined(V8_COMPRESS_POINTERS)
// It's enough to do a 32-bit comparison. This is also necessary for
// null checks which only compare against a 32 bit value, not a full
@@ -2204,10 +2182,9 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
j(cond, label);
}
-void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
- Label* label, Register lhs, int imm,
+void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
+ Register lhs, int imm,
const FreezeCacheState& frozen) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
cmpl(lhs, Immediate(imm));
j(cond, label);
}
@@ -2225,10 +2202,8 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
movzxbl(dst, dst);
}
-void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, Register lhs,
- Register rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
cmpl(lhs, rhs);
setcc(cond, dst);
movzxbl(dst, dst);
@@ -2240,17 +2215,17 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
movzxbl(dst, dst);
}
-void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
LiftoffRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
cmpq(lhs.gp(), rhs.gp());
setcc(cond, dst);
movzxbl(dst, dst);
}
namespace liftoff {
-template <void (SharedTurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
+template <void (SharedMacroAssemblerBase::*cmp_op)(DoubleRegister,
+ DoubleRegister)>
void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister rhs) {
Label cont;
@@ -2274,19 +2249,17 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs,
+ liftoff::EmitFloatSetCond<&MacroAssembler::Ucomiss>(this, cond, dst, lhs,
rhs);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- Condition cond = liftoff::ToCondition(liftoff_cond);
- liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs,
+ liftoff::EmitFloatSetCond<&MacroAssembler::Ucomisd>(this, cond, dst, lhs,
rhs);
}
@@ -2412,7 +2385,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->setcc(not_equal, dst.gp());
}
-template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedMacroAssemblerBase::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2475,7 +2448,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
- uint8_t laneidx, uint32_t* protected_load_pc) {
+ uint8_t laneidx, uint32_t* protected_load_pc,
+ bool i64_offset) {
+ if (offset_reg != no_reg && !i64_offset) AssertZeroExtended(offset_reg);
Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
MachineType mem_type = type.mem_type();
@@ -2494,7 +2469,9 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
- uint32_t* protected_store_pc) {
+ uint32_t* protected_store_pc,
+ bool i64_offset) {
+ if (offset != no_reg && !i64_offset) AssertZeroExtended(offset);
Operand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep();
@@ -2519,7 +2496,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
uint32_t imms[4];
// Shuffles that use just 1 operand are called swizzles, rhs can be ignored.
wasm::SimdShuffle::Pack16Lanes(imms, shuffle);
- TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]),
+ MacroAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0]));
Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
return;
@@ -2532,7 +2509,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask1[j] <<= 8;
mask1[j] |= lane < kSimd128Size ? lane : 0x80;
}
- TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]);
+ MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]);
Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2);
uint64_t mask2[2] = {};
@@ -2542,7 +2519,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask2[j] <<= 8;
mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
}
- TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]);
+ MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]);
Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2);
Por(dst.fp(), kScratchDoubleReg);
@@ -2919,7 +2896,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
memcpy(vals, imms, sizeof(vals));
- TurboAssembler::Move(dst.fp(), vals[1], vals[0]);
+ MacroAssembler::Move(dst.fp(), vals[1], vals[0]);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -2977,7 +2954,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -3102,7 +3079,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@@ -3290,7 +3267,13 @@ void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
+ LiftoffRegister tmp1 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs});
+ LiftoffRegister tmp2 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList{dst, lhs, rhs, tmp1});
+ I32x4DotI8x16I7x16AddS(dst.fp(), lhs.fp(), rhs.fp(), acc.fp(), tmp1.fp(),
+ tmp2.fp());
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3306,7 +3289,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -3474,7 +3457,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
+ liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4173,7 +4156,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- TurboAssembler::AssertUnreachable(reason);
+ MacroAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index df97940aa4..c930b588a6 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -434,6 +434,7 @@ struct ManagedData {
void StoreImpl::SetHostInfo(i::Handle<i::Object> object, void* info,
void (*finalizer)(void*)) {
+ v8::Isolate::Scope isolate_scope(isolate());
i::HandleScope scope(i_isolate());
// Ideally we would specify the total size kept alive by {info} here,
// but all we get from the embedder is a {void*}, so our best estimate
@@ -484,6 +485,7 @@ auto Store::make(Engine*) -> own<Store> {
// and hence must not be called by anything reachable via this file.
{
+ v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
// Create context.
@@ -887,6 +889,7 @@ class RefImpl {
RefImpl* self = new (std::nothrow) RefImpl();
if (!self) return nullptr;
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
self->val_ = isolate->global_handles()->Create(*obj);
return make_own(seal<Ref>(self));
}
@@ -1000,6 +1003,7 @@ auto Trap::copy() const -> own<Trap> { return impl(this)->copy(); }
auto Trap::make(Store* store_abs, const Message& message) -> own<Trap> {
auto store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope handle_scope(isolate);
i::Handle<i::String> string = VecToString(isolate, message);
i::Handle<i::JSObject> exception =
@@ -1012,6 +1016,7 @@ auto Trap::make(Store* store_abs, const Message& message) -> own<Trap> {
auto Trap::message() const -> Message {
auto isolate = impl(this)->isolate();
+ v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate());
i::HandleScope handle_scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -1085,6 +1090,7 @@ auto Foreign::copy() const -> own<Foreign> { return impl(this)->copy(); }
auto Foreign::make(Store* store_abs) -> own<Foreign> {
StoreImpl* store = impl(store_abs);
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
@@ -1116,6 +1122,7 @@ auto Module::validate(Store* store_abs, const vec<byte_t>& binary) -> bool {
auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module> {
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope scope(isolate);
CheckAndHandleInterrupts(isolate);
i::wasm::ModuleWireBytes bytes(
@@ -1173,13 +1180,13 @@ auto Module::exports() const -> ownvec<ExportType> {
return ExportsImpl(impl(this)->v8_object());
}
-// We serialize the state of the module when calling this method; an arbitrary
-// number of functions can be tiered up to TurboFan, and only those will be
-// serialized.
-// The caller is responsible for "warming up" the module before serializing.
+// We tier up all functions to TurboFan, and then serialize all TurboFan code.
+// If no TurboFan code existed before calling this function, then the call to
+// {serialize} may take a long time.
auto Module::serialize() const -> vec<byte_t> {
i::wasm::NativeModule* native_module =
impl(this)->v8_object()->native_module();
+ native_module->compilation_state()->TierUpAllFunctions();
v8::base::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
size_t binary_size = wire_bytes.size();
i::wasm::WasmSerializer serializer(native_module);
@@ -1194,8 +1201,10 @@ auto Module::serialize() const -> vec<byte_t> {
ptr += binary_size;
if (!serializer.SerializeNativeModule(
{reinterpret_cast<uint8_t*>(ptr), serial_size})) {
- // Serialization failed, because no TurboFan code is present yet. In this
- // case, the serialized module just contains the wire bytes.
+ // Serialization fails if no TurboFan code is present. This may happen
+ // because the module does not have any functions, or because another thread
+ // modifies the {NativeModule} concurrently. In this case, the serialized
+ // module just contains the wire bytes.
buffer = vec<byte_t>::make_uninitialized(size_size + binary_size);
byte_t* ptr = buffer.get();
i::wasm::LEBHelper::write_u64v(reinterpret_cast<uint8_t**>(&ptr),
@@ -1209,6 +1218,7 @@ auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
-> own<Module> {
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope handle_scope(isolate);
const byte_t* ptr = serialized.get();
uint64_t binary_size = ReadLebU64(&ptr);
@@ -1440,6 +1450,7 @@ constexpr i::wasm::ValueType SignatureHelper::kMarker;
auto make_func(Store* store_abs, FuncData* data) -> own<Func> {
auto store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope handle_scope(isolate);
CheckAndHandleInterrupts(isolate);
i::Handle<i::Managed<FuncData>> embedder_data =
@@ -1532,7 +1543,7 @@ void PrepareFunctionData(i::Isolate* isolate,
return;
}
// Compile wrapper code.
- i::Handle<i::CodeT> wrapper_code =
+ i::Handle<i::Code> wrapper_code =
i::compiler::CompileCWasmEntry(isolate, sig, module);
function_data->set_c_wrapper_code(*wrapper_code);
// Compute packed args size.
@@ -1649,6 +1660,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
auto func = impl(this);
auto store = func->store();
auto isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope handle_scope(isolate);
i::Object raw_function_data =
func->v8_object()->shared().function_data(v8::kAcquireLoad);
@@ -1668,7 +1680,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
const i::wasm::FunctionSig* sig =
instance->module()->functions[function_index].sig;
PrepareFunctionData(isolate, function_data, sig, instance->module());
- i::Handle<i::CodeT> wrapper_code(function_data->c_wrapper_code(), isolate);
+ i::Handle<i::Code> wrapper_code(function_data->c_wrapper_code(), isolate);
i::Address call_target = function_data->internal().call_target(isolate);
i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
@@ -1717,8 +1729,11 @@ i::Address FuncData::v8_callback(i::Address host_data_foreign,
i::Managed<FuncData>::cast(i::Object(host_data_foreign)).raw();
StoreImpl* store = impl(self->store);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope scope(isolate);
+ isolate->set_context(*v8::Utils::OpenHandle(*store->context()));
+
const ownvec<ValType>& param_types = self->type->params();
const ownvec<ValType>& result_types = self->type->results();
@@ -1816,6 +1831,7 @@ auto Global::copy() const -> own<Global> { return impl(this)->copy(); }
auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
-> own<Global> {
StoreImpl* store = impl(store_abs);
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
CheckAndHandleInterrupts(isolate);
@@ -1861,12 +1877,16 @@ auto Global::get() const -> Val {
// TODO(7748): Handle types other than funcref and externref if needed.
StoreImpl* store = impl(this)->store();
i::HandleScope scope(store->i_isolate());
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::Handle<i::Object> result = v8_global->GetRef();
if (result->IsWasmInternalFunction()) {
result =
handle(i::Handle<i::WasmInternalFunction>::cast(result)->external(),
v8_global->GetIsolate());
}
+ if (result->IsWasmNull()) {
+ result = v8_global->GetIsolate()->factory()->null_value();
+ }
return Val(V8RefValueToWasm(store, result));
}
case i::wasm::kS128:
@@ -1882,6 +1902,7 @@ auto Global::get() const -> Val {
}
void Global::set(const Val& val) {
+ v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate());
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
switch (val.kind()) {
case I32:
@@ -1926,6 +1947,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
-> own<Table> {
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope scope(isolate);
CheckAndHandleInterrupts(isolate);
@@ -2004,6 +2026,9 @@ auto Table::get(size_t index) const -> own<Ref> {
result = handle(
i::Handle<i::WasmInternalFunction>::cast(result)->external(), isolate);
}
+ if (result->IsWasmNull()) {
+ result = isolate->factory()->null_value();
+ }
DCHECK(result->IsNull(isolate) || result->IsJSReceiver());
return V8RefValueToWasm(impl(this)->store(), result);
}
@@ -2012,6 +2037,7 @@ auto Table::set(size_t index, const Ref* ref) -> bool {
i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
if (index >= static_cast<size_t>(table->current_length())) return false;
i::Isolate* isolate = table->GetIsolate();
+ v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate());
i::HandleScope handle_scope(isolate);
i::Handle<i::Object> obj = WasmRefToV8(isolate, ref);
const char* error_message;
@@ -2032,6 +2058,7 @@ auto Table::size() const -> size_t {
auto Table::grow(size_t delta, const Ref* ref) -> bool {
i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
i::Isolate* isolate = table->GetIsolate();
+ v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate());
i::HandleScope scope(isolate);
i::Handle<i::Object> obj = WasmRefToV8(isolate, ref);
const char* error_message;
@@ -2058,6 +2085,7 @@ auto Memory::copy() const -> own<Memory> { return impl(this)->copy(); }
auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> {
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope scope(isolate);
CheckAndHandleInterrupts(isolate);
@@ -2108,6 +2136,7 @@ auto Memory::size() const -> pages_t {
auto Memory::grow(pages_t delta) -> bool {
i::Handle<i::WasmMemoryObject> memory = impl(this)->v8_object();
i::Isolate* isolate = memory->GetIsolate();
+ v8::Isolate::Scope isolate_scope(impl(this)->store()->isolate());
i::HandleScope handle_scope(isolate);
int32_t old = i::WasmMemoryObject::Grow(isolate, memory, delta);
return old != -1;
@@ -2129,6 +2158,7 @@ own<Instance> Instance::make(Store* store_abs, const Module* module_abs,
StoreImpl* store = impl(store_abs);
const implement<Module>::type* module = impl(module_abs);
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope handle_scope(isolate);
CheckAndHandleInterrupts(isolate);
@@ -2199,6 +2229,7 @@ auto Instance::exports() const -> ownvec<Extern> {
const implement<Instance>::type* instance = impl(this);
StoreImpl* store = instance->store();
i::Isolate* isolate = store->i_isolate();
+ v8::Isolate::Scope isolate_scope(store->isolate());
i::HandleScope handle_scope(isolate);
CheckAndHandleInterrupts(isolate);
i::Handle<i::WasmInstanceObject> instance_obj = instance->v8_object();
diff --git a/deps/v8/src/wasm/canonical-types.cc b/deps/v8/src/wasm/canonical-types.cc
index c5cb34b54c..fc50d6ec54 100644
--- a/deps/v8/src/wasm/canonical-types.cc
+++ b/deps/v8/src/wasm/canonical-types.cc
@@ -15,11 +15,16 @@ TypeCanonicalizer* GetTypeCanonicalizer() {
}
void TypeCanonicalizer::AddRecursiveGroup(WasmModule* module, uint32_t size) {
+ AddRecursiveGroup(module, size,
+ static_cast<uint32_t>(module->types.size() - size));
+}
+
+void TypeCanonicalizer::AddRecursiveGroup(WasmModule* module, uint32_t size,
+ uint32_t start_index) {
// Multiple threads could try to register recursive groups concurrently.
// TODO(manoskouk): Investigate if we can fine-grain the synchronization.
base::MutexGuard mutex_guard(&mutex_);
- DCHECK_GE(module->types.size(), size);
- uint32_t start_index = static_cast<uint32_t>(module->types.size()) - size;
+ DCHECK_GE(module->types.size(), start_index + size);
CanonicalGroup group;
group.types.resize(size);
for (uint32_t i = 0; i < size; i++) {
@@ -63,7 +68,8 @@ uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) {
#endif
CanonicalGroup group;
group.types.resize(1);
- group.types[0].type_def = TypeDefinition(sig, kNoSuperType);
+ group.types[0].type_def =
+ TypeDefinition(sig, kNoSuperType, v8_flags.wasm_final_types);
group.types[0].is_relative_supertype = false;
int canonical_index = FindCanonicalGroup(group);
if (canonical_index < 0) {
@@ -75,7 +81,8 @@ uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) {
for (auto type : sig->returns()) builder.AddReturn(type);
for (auto type : sig->parameters()) builder.AddParam(type);
const FunctionSig* allocated_sig = builder.Build();
- group.types[0].type_def = TypeDefinition(allocated_sig, kNoSuperType);
+ group.types[0].type_def =
+ TypeDefinition(allocated_sig, kNoSuperType, v8_flags.wasm_final_types);
group.types[0].is_relative_supertype = false;
canonical_groups_.emplace(group, canonical_index);
canonical_supertypes_.emplace_back(kNoSuperType);
@@ -95,23 +102,28 @@ ValueType TypeCanonicalizer::CanonicalizeValueType(
module->isorecursive_canonical_type_ids[type.ref_index()]);
}
-bool TypeCanonicalizer::IsCanonicalSubtype(uint32_t sub_index,
- uint32_t super_index,
- const WasmModule* sub_module,
- const WasmModule* super_module) {
+bool TypeCanonicalizer::IsCanonicalSubtype(uint32_t canonical_sub_index,
+ uint32_t canonical_super_index) {
// Multiple threads could try to register and access recursive groups
// concurrently.
// TODO(manoskouk): Investigate if we can improve this synchronization.
base::MutexGuard mutex_guard(&mutex_);
+ while (canonical_sub_index != kNoSuperType) {
+ if (canonical_sub_index == canonical_super_index) return true;
+ canonical_sub_index = canonical_supertypes_[canonical_sub_index];
+ }
+ return false;
+}
+
+bool TypeCanonicalizer::IsCanonicalSubtype(uint32_t sub_index,
+ uint32_t super_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module) {
uint32_t canonical_super =
super_module->isorecursive_canonical_type_ids[super_index];
uint32_t canonical_sub =
sub_module->isorecursive_canonical_type_ids[sub_index];
- while (canonical_sub != kNoSuperType) {
- if (canonical_sub == canonical_super) return true;
- canonical_sub = canonical_supertypes_[canonical_sub];
- }
- return false;
+ return IsCanonicalSubtype(canonical_sub, canonical_super);
}
TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef(
@@ -140,7 +152,8 @@ TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef(
builder.AddParam(
CanonicalizeValueType(module, param, recursive_group_start));
}
- result = TypeDefinition(builder.Build(), canonical_supertype);
+ result =
+ TypeDefinition(builder.Build(), canonical_supertype, type.is_final);
break;
}
case TypeDefinition::kStruct: {
@@ -149,9 +162,13 @@ TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef(
for (uint32_t i = 0; i < original_type->field_count(); i++) {
builder.AddField(CanonicalizeValueType(module, original_type->field(i),
recursive_group_start),
- original_type->mutability(i));
+ original_type->mutability(i),
+ original_type->field_offset(i));
}
- result = TypeDefinition(builder.Build(), canonical_supertype);
+ builder.set_total_fields_size(original_type->total_fields_size());
+ result = TypeDefinition(
+ builder.Build(StructType::Builder::kUseProvidedOffsets),
+ canonical_supertype, type.is_final);
break;
}
case TypeDefinition::kArray: {
@@ -159,7 +176,7 @@ TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef(
module, type.array_type->element_type(), recursive_group_start);
result = TypeDefinition(
zone_.New<ArrayType>(element_type, type.array_type->mutability()),
- canonical_supertype);
+ canonical_supertype, type.is_final);
break;
}
}
diff --git a/deps/v8/src/wasm/canonical-types.h b/deps/v8/src/wasm/canonical-types.h
index 91f0576ced..1750ebba9c 100644
--- a/deps/v8/src/wasm/canonical-types.h
+++ b/deps/v8/src/wasm/canonical-types.h
@@ -38,9 +38,13 @@ class TypeCanonicalizer {
TypeCanonicalizer(TypeCanonicalizer&& other) = delete;
TypeCanonicalizer& operator=(TypeCanonicalizer&& other) = delete;
- // Registers the last {size} types of {module} as a recursive group, and
- // possibly canonicalizes it if an identical one has been found.
- // Modifies {module->isorecursive_canonical_type_ids}.
+ // Registers {size} types of {module} as a recursive group, starting at
+ // {start_index}, and possibly canonicalizes it if an identical one has been
+ // found. Modifies {module->isorecursive_canonical_type_ids}.
+ V8_EXPORT_PRIVATE void AddRecursiveGroup(WasmModule* module, uint32_t size,
+ uint32_t start_index);
+
+ // Same as above, except it registers the last {size} types in the module.
V8_EXPORT_PRIVATE void AddRecursiveGroup(WasmModule* module, uint32_t size);
// Adds a module-independent signature as a recursive group, and canonicalizes
@@ -48,6 +52,11 @@ class TypeCanonicalizer {
// signature.
V8_EXPORT_PRIVATE uint32_t AddRecursiveGroup(const FunctionSig* sig);
+ // Returns if {canonical_sub_index} is a canonical subtype of
+ // {canonical_super_index}.
+ V8_EXPORT_PRIVATE bool IsCanonicalSubtype(uint32_t canonical_sub_index,
+ uint32_t canonical_super_index);
+
// Returns if the type at {sub_index} in {sub_module} is a subtype of the
// type at {super_index} in {super_module} after canonicalization.
V8_EXPORT_PRIVATE bool IsCanonicalSubtype(uint32_t sub_index,
@@ -74,6 +83,8 @@ class TypeCanonicalizer {
// TODO(manoskouk): Improve this.
size_t hash_value() const {
return base::hash_combine(base::hash_value(type_def.kind),
+ base::hash_value(type_def.supertype),
+ base::hash_value(type_def.is_final),
base::hash_value(is_relative_supertype));
}
};
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
index a9e76e2f1f..bac1002a28 100644
--- a/deps/v8/src/wasm/code-space-access.cc
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -12,46 +12,18 @@ namespace v8 {
namespace internal {
namespace wasm {
-namespace {
-// For PKU and if MAP_JIT is available, the CodeSpaceWriteScope does not
-// actually make use of the supplied {NativeModule}. In fact, there are
-// situations where we can't provide a specific {NativeModule} to the scope. For
-// those situations, we use this dummy pointer instead.
-NativeModule* GetDummyNativeModule() {
- static struct alignas(NativeModule) DummyNativeModule {
- char content;
- } dummy_native_module;
- return reinterpret_cast<NativeModule*>(&dummy_native_module);
-}
-} // namespace
-
-thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
- nullptr;
+thread_local int CodeSpaceWriteScope::scope_depth_ = 0;
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
-CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
- : previous_native_module_(current_native_module_) {
- if (!native_module) {
- // Passing in a {nullptr} is OK if we don't use that pointer anyway.
- // Internally, we need a non-nullptr though to know whether a scope is
- // already open from looking at {current_native_module_}.
- DCHECK(!SwitchingPerNativeModule());
- native_module = GetDummyNativeModule();
- }
- if (previous_native_module_ == native_module) return;
- current_native_module_ = native_module;
- if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {
- SetWritable();
- }
+CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module) {
+ DCHECK_LE(0, scope_depth_);
+ if (++scope_depth_ == 1) SetWritable();
}
CodeSpaceWriteScope::~CodeSpaceWriteScope() {
- if (previous_native_module_ == current_native_module_) return;
- if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {
- SetExecutable();
- }
- current_native_module_ = previous_native_module_;
+ DCHECK_LT(0, scope_depth_);
+ if (--scope_depth_ == 0) SetExecutable();
}
#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
@@ -64,17 +36,12 @@ void CodeSpaceWriteScope::SetExecutable() {
RwxMemoryWriteScope::SetExecutable();
}
-// static
-bool CodeSpaceWriteScope::SwitchingPerNativeModule() { return false; }
-
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
// static
void CodeSpaceWriteScope::SetWritable() {
if (WasmCodeManager::MemoryProtectionKeysEnabled()) {
RwxMemoryWriteScope::SetWritable();
- } else if (v8_flags.wasm_write_protect_code_memory) {
- current_native_module_->AddWriter();
}
}
@@ -83,17 +50,9 @@ void CodeSpaceWriteScope::SetExecutable() {
if (WasmCodeManager::MemoryProtectionKeysEnabled()) {
DCHECK(v8_flags.wasm_memory_protection_keys);
RwxMemoryWriteScope::SetExecutable();
- } else if (v8_flags.wasm_write_protect_code_memory) {
- current_native_module_->RemoveWriter();
}
}
-// static
-bool CodeSpaceWriteScope::SwitchingPerNativeModule() {
- return !WasmCodeManager::MemoryProtectionKeysEnabled() &&
- v8_flags.wasm_write_protect_code_memory;
-}
-
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
} // namespace wasm
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index 659361612c..fd02e428dc 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -49,27 +49,16 @@ class V8_NODISCARD CodeSpaceWriteScope final {
CodeSpaceWriteScope(const CodeSpaceWriteScope&) = delete;
CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete;
- static bool IsInScope() { return current_native_module_ != nullptr; }
+ static bool IsInScope() {
+ DCHECK_LE(0, scope_depth_);
+ return scope_depth_ != 0;
+ }
private:
- // The M1 implementation knows implicitly from the {MAP_JIT} flag during
- // allocation which region to switch permissions for. On non-M1 hardware
- // without memory protection key support, we need the code space from the
- // {NativeModule}.
- static thread_local NativeModule* current_native_module_;
+ static thread_local int scope_depth_;
- // {SetWritable} and {SetExecutable} implicitly operate on
- // {current_native_module_} (for mprotect-based protection).
static void SetWritable();
static void SetExecutable();
-
- // Returns {true} if switching permissions happens on a per-module level, and
- // not globally (like for MAP_JIT and PKU).
- static bool SwitchingPerNativeModule();
-
- // Save the previous module to put it back in {current_native_module_} when
- // exiting this scope.
- NativeModule* const previous_native_module_;
};
} // namespace v8::internal::wasm
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index be84dd58f7..63c2533dfa 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -64,14 +64,6 @@ struct CompilationEnv {
// be generated differently.
const RuntimeExceptionSupport runtime_exception_support;
- // The smallest size of any memory that could be used with this module, in
- // bytes.
- const uintptr_t min_memory_size;
-
- // The largest size of any memory that could be used with this module, in
- // bytes.
- const uintptr_t max_memory_size;
-
// Features enabled for this compilation.
const WasmFeatures enabled_features;
@@ -85,25 +77,8 @@ struct CompilationEnv {
: module(module),
bounds_checks(bounds_checks),
runtime_exception_support(runtime_exception_support),
- min_memory_size(MinPages(module) * kWasmPageSize),
- max_memory_size(MaxPages(module) * kWasmPageSize),
enabled_features(enabled_features),
dynamic_tiering(dynamic_tiering) {}
-
- static constexpr uintptr_t MinPages(const WasmModule* module) {
- if (!module) return 0;
- const uintptr_t platform_max_pages =
- module->is_memory64 ? kV8MaxWasmMemory64Pages : kV8MaxWasmMemory32Pages;
- return std::min(platform_max_pages, uintptr_t{module->initial_pages});
- }
-
- static constexpr uintptr_t MaxPages(const WasmModule* module) {
- if (!module) return kV8MaxWasmMemory32Pages;
- const uintptr_t platform_max_pages =
- module->is_memory64 ? kV8MaxWasmMemory64Pages : kV8MaxWasmMemory32Pages;
- if (!module->has_maximum_pages) return platform_max_pages;
- return std::min(platform_max_pages, uintptr_t{module->maximum_pages});
- }
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -124,7 +99,6 @@ enum class CompilationEvent : uint8_t {
kFinishedExportWrappers,
kFinishedCompilationChunk,
kFailedCompilation,
- kFinishedRecompilation
};
class V8_EXPORT_PRIVATE CompilationEventCallback {
@@ -173,9 +147,16 @@ class V8_EXPORT_PRIVATE CompilationState {
// Set a higher priority for the compilation job.
void SetHighPriority();
+ void TierUpAllFunctions();
+
+ // By default, only one top-tier compilation task will be executed for each
+ // function. These functions allow resetting that counter, to be used when
+ // optimized code is intentionally thrown away and should be re-created.
+ void AllowAnotherTopTierJob(uint32_t func_index);
+ void AllowAnotherTopTierJobForAllFunctions();
+
bool failed() const;
bool baseline_compilation_finished() const;
- bool recompilation_finished() const;
void set_compilation_id(int compilation_id);
diff --git a/deps/v8/src/wasm/constant-expression-interface.cc b/deps/v8/src/wasm/constant-expression-interface.cc
index 00f78a0f4e..e75bbfbb46 100644
--- a/deps/v8/src/wasm/constant-expression-interface.cc
+++ b/deps/v8/src/wasm/constant-expression-interface.cc
@@ -36,12 +36,39 @@ void ConstantExpressionInterface::F64Const(FullDecoder* decoder, Value* result,
}
void ConstantExpressionInterface::S128Const(FullDecoder* decoder,
- Simd128Immediate& imm,
+ const Simd128Immediate& imm,
Value* result) {
if (!generate_value()) return;
result->runtime_value = WasmValue(imm.value, kWasmS128);
}
+void ConstantExpressionInterface::UnOp(FullDecoder* decoder, WasmOpcode opcode,
+ const Value& input, Value* result) {
+ if (!generate_value()) return;
+ switch (opcode) {
+ case kExprExternExternalize: {
+ const char* error_message = nullptr;
+ result->runtime_value = WasmValue(
+ WasmToJSObject(isolate_, input.runtime_value.to_ref(),
+ input.type.heap_type(), &error_message)
+ .ToHandleChecked(),
+ ValueType::RefMaybeNull(HeapType::kExtern, input.type.nullability()));
+ break;
+ }
+ case kExprExternInternalize: {
+ const char* error_message = nullptr;
+ result->runtime_value = WasmValue(
+ JSToWasmObject(isolate_, input.runtime_value.to_ref(), kWasmAnyRef,
+ &error_message)
+ .ToHandleChecked(),
+ ValueType::RefMaybeNull(HeapType::kAny, input.type.nullability()));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
void ConstantExpressionInterface::BinOp(FullDecoder* decoder, WasmOpcode opcode,
const Value& lhs, const Value& rhs,
Value* result) {
@@ -79,7 +106,11 @@ void ConstantExpressionInterface::BinOp(FullDecoder* decoder, WasmOpcode opcode,
void ConstantExpressionInterface::RefNull(FullDecoder* decoder, ValueType type,
Value* result) {
if (!generate_value()) return;
- result->runtime_value = WasmValue(isolate_->factory()->null_value(), type);
+ result->runtime_value =
+ WasmValue(type == kWasmExternRef || type == kWasmNullExternRef
+ ? Handle<Object>::cast(isolate_->factory()->null_value())
+ : Handle<Object>::cast(isolate_->factory()->wasm_null()),
+ type);
}
void ConstantExpressionInterface::RefFunc(FullDecoder* decoder,
@@ -236,6 +267,9 @@ void ConstantExpressionInterface::ArrayNewFixed(
ValueType::Ref(HeapType(imm.index)));
}
+// TODO(7748): These expressions are non-constant for now. There are plans to
+// make them constant in the future, so we retain the required infrastructure
+// here.
void ConstantExpressionInterface::ArrayNewSegment(
FullDecoder* decoder, const ArrayIndexImmediate& array_imm,
const IndexImmediate& segment_imm, const Value& offset_value,
@@ -276,7 +310,7 @@ void ConstantExpressionInterface::ArrayNewSegment(
if (!base::IsInBounds<size_t>(
offset, length,
elem_segment->status == WasmElemSegment::kStatusPassive
- ? elem_segment->entries.size()
+ ? elem_segment->element_count
: 0)) {
error_ = MessageTemplate::kWasmTrapElementSegmentOutOfBounds;
return;
@@ -284,7 +318,7 @@ void ConstantExpressionInterface::ArrayNewSegment(
Handle<Object> array_object =
isolate_->factory()->NewWasmArrayFromElementSegment(
- instance_, elem_segment, offset, length,
+ instance_, segment_imm.index, offset, length,
Handle<Map>::cast(rtt.runtime_value.to_ref()));
if (array_object->IsSmi()) {
// A smi result stands for an error code.
@@ -306,9 +340,13 @@ void ConstantExpressionInterface::RttCanon(FullDecoder* decoder,
void ConstantExpressionInterface::I31New(FullDecoder* decoder,
const Value& input, Value* result) {
if (!generate_value()) return;
- Address raw = static_cast<Address>(input.runtime_value.to_i32());
- // 33 = 1 (Smi tag) + 31 (Smi shift) + 1 (i31ref high-bit truncation).
- Address shifted = raw << (SmiValuesAre31Bits() ? 1 : 33);
+ Address raw = input.runtime_value.to_i32();
+ // We have to craft the Smi manually because we accept out-of-bounds inputs.
+ // For 32-bit Smi builds, set the topmost bit to sign-extend the second bit.
+ // This way, interpretation in JS (if this value escapes there) will be the
+ // same as i31.get_s.
+ intptr_t shifted =
+ static_cast<intptr_t>(raw << (kSmiTagSize + kSmiShiftSize + 1)) >> 1;
result->runtime_value =
WasmValue(handle(Smi(shifted), isolate_), wasm::kWasmI31Ref.AsNonNull());
}
@@ -316,7 +354,8 @@ void ConstantExpressionInterface::I31New(FullDecoder* decoder,
void ConstantExpressionInterface::DoReturn(FullDecoder* decoder,
uint32_t /*drop_values*/) {
end_found_ = true;
- // End decoding on "end".
+ // End decoding on "end". Note: We need this because we do not know the length
+ // of a constant expression while decoding it.
decoder->set_end(decoder->pc() + 1);
if (generate_value()) {
computed_value_ = decoder->stack_value(1)->runtime_value;
diff --git a/deps/v8/src/wasm/constant-expression.cc b/deps/v8/src/wasm/constant-expression.cc
index 6b6c69ad75..0459d2240a 100644
--- a/deps/v8/src/wasm/constant-expression.cc
+++ b/deps/v8/src/wasm/constant-expression.cc
@@ -35,8 +35,11 @@ ValueOrError EvaluateConstantExpression(Zone* zone, ConstantExpression expr,
case ConstantExpression::kI32Const:
return WasmValue(expr.i32_value());
case ConstantExpression::kRefNull:
- return WasmValue(isolate->factory()->null_value(),
- ValueType::RefNull(expr.repr()));
+ return WasmValue(
+ expected == kWasmExternRef || expected == kWasmNullExternRef
+ ? Handle<Object>::cast(isolate->factory()->null_value())
+ : Handle<Object>::cast(isolate->factory()->wasm_null()),
+ ValueType::RefNull(expr.repr()));
case ConstantExpression::kRefFunc: {
uint32_t index = expr.index();
Handle<Object> value =
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 8e9830b447..3bfb9a1a91 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -38,6 +38,48 @@ namespace wasm {
// A {DecodeResult} only stores the failure / success status, but no data.
using DecodeResult = VoidResult;
+struct WasmFunction;
+
+class ITracer {
+ public:
+ static constexpr ITracer* NoTrace = nullptr;
+
+ // Hooks for extracting byte offsets of things.
+ virtual void TypeOffset(uint32_t offset) = 0;
+ virtual void ImportOffset(uint32_t offset) = 0;
+ virtual void ImportsDone() = 0;
+ virtual void TableOffset(uint32_t offset) = 0;
+ virtual void MemoryOffset(uint32_t offset) = 0;
+ virtual void TagOffset(uint32_t offset) = 0;
+ virtual void GlobalOffset(uint32_t offset) = 0;
+ virtual void StartOffset(uint32_t offset) = 0;
+ virtual void ElementOffset(uint32_t offset) = 0;
+ virtual void DataOffset(uint32_t offset) = 0;
+
+ // Hooks for annotated hex dumps.
+ virtual void Bytes(const byte* start, uint32_t count) = 0;
+
+ virtual void Description(const char* desc) = 0;
+ virtual void Description(const char* desc, size_t length) = 0;
+ virtual void Description(uint32_t number) = 0;
+ virtual void Description(ValueType type) = 0;
+ virtual void Description(HeapType type) = 0;
+ virtual void Description(const FunctionSig* sig) = 0;
+
+ virtual void NextLine() = 0;
+ virtual void NextLineIfFull() = 0;
+ virtual void NextLineIfNonEmpty() = 0;
+
+ virtual void InitializerExpression(const byte* start, const byte* end,
+ ValueType expected_type) = 0;
+ virtual void FunctionBody(const WasmFunction* func, const byte* start) = 0;
+ virtual void FunctionName(uint32_t func_index) = 0;
+ virtual void NameSection(const byte* start, const byte* end,
+ uint32_t offset) = 0;
+
+ virtual ~ITracer() = default;
+};
+
// A helper utility to decode bytes, integers, fields, varints, etc, from
// a buffer of bytes.
class Decoder {
@@ -117,82 +159,81 @@ class Decoder {
return read_little_endian<uint64_t, ValidationTag>(pc, msg);
}
- // Reads a variable-length unsigned integer (little endian).
+ // Reads a variable-length unsigned integer (little endian). Returns the read
+ // value and the number of bytes read.
template <typename ValidationTag>
- uint32_t read_u32v(const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "LEB32") {
- return read_leb<uint32_t, ValidationTag, kNoTrace>(pc, length, name);
+ std::pair<uint32_t, uint32_t> read_u32v(const byte* pc,
+ Name<ValidationTag> name = "LEB32") {
+ return read_leb<uint32_t, ValidationTag, kNoTrace>(pc, name);
}
- // Reads a variable-length signed integer (little endian).
+ // Reads a variable-length signed integer (little endian). Returns the read
+ // value and the number of bytes read.
template <typename ValidationTag>
- int32_t read_i32v(const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "signed LEB32") {
- return read_leb<int32_t, ValidationTag, kNoTrace>(pc, length, name);
+ std::pair<int32_t, uint32_t> read_i32v(
+ const byte* pc, Name<ValidationTag> name = "signed LEB32") {
+ return read_leb<int32_t, ValidationTag, kNoTrace>(pc, name);
}
- // Reads a variable-length unsigned integer (little endian).
+ // Reads a variable-length unsigned integer (little endian). Returns the read
+ // value and the number of bytes read.
template <typename ValidationTag>
- uint64_t read_u64v(const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "LEB64") {
- return read_leb<uint64_t, ValidationTag, kNoTrace>(pc, length, name);
+ std::pair<uint64_t, uint32_t> read_u64v(const byte* pc,
+ Name<ValidationTag> name = "LEB64") {
+ return read_leb<uint64_t, ValidationTag, kNoTrace>(pc, name);
}
- // Reads a variable-length signed integer (little endian).
+ // Reads a variable-length signed integer (little endian). Returns the read
+ // value and the number of bytes read.
template <typename ValidationTag>
- int64_t read_i64v(const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "signed LEB64") {
- return read_leb<int64_t, ValidationTag, kNoTrace>(pc, length, name);
+ std::pair<int64_t, uint32_t> read_i64v(
+ const byte* pc, Name<ValidationTag> name = "signed LEB64") {
+ return read_leb<int64_t, ValidationTag, kNoTrace>(pc, name);
}
- // Reads a variable-length 33-bit signed integer (little endian).
+ // Reads a variable-length 33-bit signed integer (little endian). Returns the
+ // read value and the number of bytes read.
template <typename ValidationTag>
- int64_t read_i33v(const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "signed LEB33") {
- return read_leb<int64_t, ValidationTag, kNoTrace, 33>(pc, length, name);
- }
-
- // Convenient overload for callers who don't care about length.
- template <typename ValidationTag>
- WasmOpcode read_prefixed_opcode(const byte* pc) {
- uint32_t len;
- return read_prefixed_opcode<ValidationTag>(pc, &len);
+ std::pair<int64_t, uint32_t> read_i33v(
+ const byte* pc, Name<ValidationTag> name = "signed LEB33") {
+ return read_leb<int64_t, ValidationTag, kNoTrace, 33>(pc, name);
}
// Reads a prefixed-opcode, possibly with variable-length index.
- // `length` is set to the number of bytes that make up this opcode,
+ // Returns the read opcode and the number of bytes that make up this opcode,
// *including* the prefix byte. For most opcodes, it will be 2.
template <typename ValidationTag>
- WasmOpcode read_prefixed_opcode(
- const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "prefixed opcode") {
- uint32_t index;
-
+ std::pair<WasmOpcode, uint32_t> read_prefixed_opcode(
+ const byte* pc, Name<ValidationTag> name = "prefixed opcode") {
// Prefixed opcodes all use LEB128 encoding.
- index = read_u32v<ValidationTag>(pc + 1, length, "prefixed opcode index");
- *length += 1; // Prefix byte.
+ auto [index, index_length] =
+ read_u32v<ValidationTag>(pc + 1, "prefixed opcode index");
+ uint32_t length = index_length + 1; // 1 for prefix byte.
// Only support opcodes that go up to 0xFFF (when decoded). Anything
// bigger will need more than 2 bytes, and the '<< 12' below will be wrong.
if (ValidationTag::validate && V8_UNLIKELY(index > 0xfff)) {
errorf(pc, "Invalid prefixed opcode %d", index);
- // If size validation fails.
- index = 0;
- *length = 0;
+ // On validation failure we return "unreachable" (opcode 0).
+ static_assert(kExprUnreachable == 0);
+ return {kExprUnreachable, 0};
}
- if (index > 0xff) return static_cast<WasmOpcode>((*pc) << 12 | index);
+ if (index > 0xff) {
+ return {static_cast<WasmOpcode>((*pc) << 12 | index), length};
+ }
- return static_cast<WasmOpcode>((*pc) << 8 | index);
+ return {static_cast<WasmOpcode>((*pc) << 8 | index), length};
}
// Reads a 8-bit unsigned integer (byte) and advances {pc_}.
uint8_t consume_u8(const char* name = "uint8_t") {
return consume_little_endian<uint8_t, kTrace>(name);
}
- template <class Tracer>
- uint8_t consume_u8(const char* name, Tracer& tracer) {
- tracer.Bytes(pc_, sizeof(uint8_t));
- tracer.Description(name);
+ uint8_t consume_u8(const char* name, ITracer* tracer) {
+ if (tracer) {
+ tracer->Bytes(pc_, sizeof(uint8_t));
+ tracer->Description(name);
+ }
return consume_little_endian<uint8_t, kNoTrace>(name);
}
@@ -202,58 +243,56 @@ class Decoder {
}
// Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
- template <class Tracer>
- uint32_t consume_u32(const char* name, Tracer& tracer) {
- tracer.Bytes(pc_, sizeof(uint32_t));
- tracer.Description(name);
+ uint32_t consume_u32(const char* name, ITracer* tracer) {
+ if (tracer) {
+ tracer->Bytes(pc_, sizeof(uint32_t));
+ tracer->Description(name);
+ }
return consume_little_endian<uint32_t, kNoTrace>(name);
}
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = "var_uint32") {
- uint32_t length = 0;
- uint32_t result =
- read_leb<uint32_t, FullValidationTag, kTrace>(pc_, &length, name);
+ auto [result, length] =
+ read_leb<uint32_t, FullValidationTag, kTrace>(pc_, name);
pc_ += length;
return result;
}
- template <class Tracer>
- uint32_t consume_u32v(const char* name, Tracer& tracer) {
- uint32_t length = 0;
- uint32_t result =
- read_leb<uint32_t, FullValidationTag, kNoTrace>(pc_, &length, name);
- tracer.Bytes(pc_, length);
- tracer.Description(name);
+ uint32_t consume_u32v(const char* name, ITracer* tracer) {
+ auto [result, length] =
+ read_leb<uint32_t, FullValidationTag, kNoTrace>(pc_, name);
+ if (tracer) {
+ tracer->Bytes(pc_, length);
+ tracer->Description(name);
+ }
pc_ += length;
return result;
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = "var_int32") {
- uint32_t length = 0;
- int32_t result =
- read_leb<int32_t, FullValidationTag, kTrace>(pc_, &length, name);
+ auto [result, length] =
+ read_leb<int32_t, FullValidationTag, kTrace>(pc_, name);
pc_ += length;
return result;
}
// Reads a LEB128 variable-length unsigned 64-bit integer and advances {pc_}.
- template <class Tracer>
- uint64_t consume_u64v(const char* name, Tracer& tracer) {
- uint32_t length = 0;
- uint64_t result =
- read_leb<uint64_t, FullValidationTag, kNoTrace>(pc_, &length, name);
- tracer.Bytes(pc_, length);
- tracer.Description(name);
+ uint64_t consume_u64v(const char* name, ITracer* tracer) {
+ auto [result, length] =
+ read_leb<uint64_t, FullValidationTag, kNoTrace>(pc_, name);
+ if (tracer) {
+ tracer->Bytes(pc_, length);
+ tracer->Description(name);
+ }
pc_ += length;
return result;
}
// Reads a LEB128 variable-length signed 64-bit integer and advances {pc_}.
int64_t consume_i64v(const char* name = "var_int64") {
- uint32_t length = 0;
- int64_t result =
- read_leb<int64_t, FullValidationTag, kTrace>(pc_, &length, name);
+ auto [result, length] =
+ read_leb<int64_t, FullValidationTag, kTrace>(pc_, name);
pc_ += length;
return result;
}
@@ -268,10 +307,11 @@ class Decoder {
pc_ = end_;
}
}
- template <class Tracer>
- void consume_bytes(uint32_t size, const char* name, Tracer& tracer) {
- tracer.Bytes(pc_, size);
- tracer.Description(name);
+ void consume_bytes(uint32_t size, const char* name, ITracer* tracer) {
+ if (tracer) {
+ tracer->Bytes(pc_, size);
+ tracer->Description(name);
+ }
consume_bytes(size, nullptr);
}
@@ -292,7 +332,7 @@ class Decoder {
// Use this for "boolean validation", i.e. if the error message is not used
// anyway.
- void V8_NOINLINE MarkError() {
+ void V8_NOINLINE V8_PRESERVE_MOST MarkError() {
if (!ok()) return;
error_ = {0, "validation failed"};
onFirstError();
@@ -300,35 +340,34 @@ class Decoder {
// Do not inline error methods. This has measurable impact on validation time,
// see https://crbug.com/910432.
- void V8_NOINLINE error(const char* msg) { errorf(pc_offset(), "%s", msg); }
- void V8_NOINLINE error(const uint8_t* pc, const char* msg) {
+ void V8_NOINLINE V8_PRESERVE_MOST error(const char* msg) {
+ errorf(pc_offset(), "%s", msg);
+ }
+ void V8_NOINLINE V8_PRESERVE_MOST error(const uint8_t* pc, const char* msg) {
errorf(pc_offset(pc), "%s", msg);
}
- void V8_NOINLINE error(uint32_t offset, const char* msg) {
+ void V8_NOINLINE V8_PRESERVE_MOST error(uint32_t offset, const char* msg) {
errorf(offset, "%s", msg);
}
- void V8_NOINLINE PRINTF_FORMAT(2, 3) errorf(const char* format, ...) {
- va_list args;
- va_start(args, format);
- verrorf(pc_offset(), format, args);
- va_end(args);
+ template <typename... Args>
+ void V8_NOINLINE V8_PRESERVE_MOST errorf(const char* format, Args... args) {
+ errorf(pc_offset(), format, args...);
}
- void V8_NOINLINE PRINTF_FORMAT(3, 4)
- errorf(uint32_t offset, const char* format, ...) {
- va_list args;
- va_start(args, format);
- verrorf(offset, format, args);
- va_end(args);
+ template <typename... Args>
+ void V8_NOINLINE V8_PRESERVE_MOST errorf(const uint8_t* pc,
+ const char* format, Args... args) {
+ errorf(pc_offset(pc), format, args...);
}
- void V8_NOINLINE PRINTF_FORMAT(3, 4)
- errorf(const uint8_t* pc, const char* format, ...) {
- va_list args;
- va_start(args, format);
- verrorf(pc_offset(pc), format, args);
- va_end(args);
+ template <typename... Args>
+ void V8_NOINLINE V8_PRESERVE_MOST errorf(uint32_t offset, const char* format,
+ Args... args) {
+ static_assert(
+ sizeof...(Args) > 0,
+ "Use error instead of errorf if the format string has no placeholders");
+ verrorf(offset, format, args...);
}
// Behavior triggered on first error, overridden in subclasses.
@@ -371,8 +410,8 @@ class Decoder {
Reset(bytes.begin(), bytes.end(), buffer_offset);
}
- bool ok() const { return error_.empty(); }
- bool failed() const { return !ok(); }
+ bool ok() const { return !failed(); }
+ bool failed() const { return error_.has_error(); }
bool more() const { return pc_ < end_; }
const WasmError& error() const { return error_; }
@@ -413,12 +452,16 @@ class Decoder {
WasmError error_;
private:
- void verrorf(uint32_t offset, const char* format, va_list args) {
+ void V8_NOINLINE PRINTF_FORMAT(3, 4)
+ verrorf(uint32_t offset, const char* format, ...) {
// Only report the first error.
if (!ok()) return;
constexpr int kMaxErrorMsg = 256;
base::EmbeddedVector<char, kMaxErrorMsg> buffer;
+ va_list args;
+ va_start(args, format);
int len = base::VSNPrintF(buffer, format, args);
+ va_end(args);
CHECK_LT(0, len);
error_ = {offset, {buffer.begin(), static_cast<size_t>(len)}};
onFirstError();
@@ -457,18 +500,19 @@ class Decoder {
return val;
}
+ // The implementation of LEB-decoding; returns the value and the number of
+ // bytes read.
template <typename IntType, typename ValidationTag, TraceFlag trace,
size_t size_in_bits = 8 * sizeof(IntType)>
- V8_INLINE IntType read_leb(const byte* pc, uint32_t* length,
- Name<ValidationTag> name = "varint") {
+ V8_INLINE std::pair<IntType, uint32_t> read_leb(
+ const byte* pc, Name<ValidationTag> name = "varint") {
static_assert(size_in_bits <= 8 * sizeof(IntType),
"leb does not fit in type");
TRACE_IF(trace, " +%u %-20s: ", pc_offset(),
implicit_cast<const char*>(name));
// Fast path for single-byte integers.
- if ((!ValidationTag::validate || V8_LIKELY(pc < end_)) && !(*pc & 0x80)) {
+ if (V8_LIKELY((!ValidationTag::validate || pc < end_) && !(*pc & 0x80))) {
TRACE_IF(trace, "%02x ", *pc);
- *length = 1;
IntType result = *pc;
if (std::is_signed<IntType>::value) {
// Perform sign extension.
@@ -478,25 +522,29 @@ class Decoder {
} else {
TRACE_IF(trace, "= %" PRIu64 "\n", static_cast<uint64_t>(result));
}
- return result;
+ return {result, 1};
}
- return read_leb_slowpath<IntType, ValidationTag, trace, size_in_bits>(
- pc, length, name);
+ auto [result, length] =
+ read_leb_slowpath<IntType, ValidationTag, trace, size_in_bits>(pc,
+ name);
+ V8_ASSUME(length >= 0 && length <= (size_in_bits + 6) / 7);
+ V8_ASSUME(ValidationTag::validate || length >= 1);
+ return {result, length};
}
template <typename IntType, typename ValidationTag, TraceFlag trace,
size_t size_in_bits = 8 * sizeof(IntType)>
- V8_NOINLINE IntType read_leb_slowpath(const byte* pc, uint32_t* length,
- Name<ValidationTag> name) {
+ V8_NOINLINE V8_PRESERVE_MOST std::pair<IntType, uint32_t> read_leb_slowpath(
+ const byte* pc, Name<ValidationTag> name) {
// Create an unrolled LEB decoding function per integer type.
return read_leb_tail<IntType, ValidationTag, trace, size_in_bits, 0>(
- pc, length, name, 0);
+ pc, name, 0);
}
template <typename IntType, typename ValidationTag, TraceFlag trace,
size_t size_in_bits, int byte_index>
- V8_INLINE IntType read_leb_tail(const byte* pc, uint32_t* length,
- Name<ValidationTag> name, IntType result) {
+ V8_INLINE std::pair<IntType, uint32_t> read_leb_tail(
+ const byte* pc, Name<ValidationTag> name, IntType intermediate_result) {
constexpr bool is_signed = std::is_signed<IntType>::value;
constexpr int kMaxLength = (size_in_bits + 6) / 7;
static_assert(byte_index < kMaxLength, "invalid template instantiation");
@@ -509,8 +557,8 @@ class Decoder {
b = *pc;
TRACE_IF(trace, "%02x ", b);
using Unsigned = typename std::make_unsigned<IntType>::type;
- result = result |
- (static_cast<Unsigned>(static_cast<IntType>(b) & 0x7f) << shift);
+ intermediate_result |=
+ (static_cast<Unsigned>(static_cast<IntType>(b) & 0x7f) << shift);
}
if (!is_last_byte && (b & 0x80)) {
// Make sure that we only instantiate the template for valid byte indexes.
@@ -518,9 +566,8 @@ class Decoder {
// following call is unreachable if is_last_byte is false.
constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
return read_leb_tail<IntType, ValidationTag, trace, size_in_bits,
- next_byte_index>(pc + 1, length, name, result);
+ next_byte_index>(pc + 1, name, intermediate_result);
}
- *length = byte_index + (at_end ? 0 : 1);
if (ValidationTag::validate && V8_UNLIKELY(at_end || (b & 0x80))) {
TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
if constexpr (ValidationTag::full_validation) {
@@ -528,8 +575,7 @@ class Decoder {
} else {
MarkError();
}
- result = 0;
- *length = 0;
+ return {0, 0};
}
if constexpr (is_last_byte) {
// A signed-LEB128 must sign-extend the final byte, excluding its
@@ -553,20 +599,21 @@ class Decoder {
} else {
MarkError();
}
- result = 0;
- *length = 0;
+ return {0, 0};
}
}
constexpr int sign_ext_shift =
is_signed ? std::max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
// Perform sign extension.
- result = (result << sign_ext_shift) >> sign_ext_shift;
+ intermediate_result =
+ (intermediate_result << sign_ext_shift) >> sign_ext_shift;
if (trace && is_signed) {
- TRACE("= %" PRIi64 "\n", static_cast<int64_t>(result));
+ TRACE("= %" PRIi64 "\n", static_cast<int64_t>(intermediate_result));
} else if (trace) {
- TRACE("= %" PRIu64 "\n", static_cast<uint64_t>(result));
+ TRACE("= %" PRIu64 "\n", static_cast<uint64_t>(intermediate_result));
}
- return result;
+ const uint32_t length = byte_index + 1;
+ return {intermediate_result, length};
}
};
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index b92d93de65..dd9e3b0171 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -16,6 +16,7 @@
#include <optional>
+#include "src/base/bounds.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
#include "src/base/v8-fallthrough.h"
@@ -46,11 +47,20 @@ struct WasmTag;
// Return the evaluation of {condition} if {ValidationTag::validate} is true,
// DCHECK that it is true and always return true otherwise.
-#define VALIDATE(condition) \
- (ValidationTag::validate ? V8_LIKELY(condition) : [&] { \
- DCHECK(condition); \
- return true; \
- }())
+// Note that this needs to be a macro, because the "likely" annotation does not
+// survive inlining.
+#ifdef DEBUG
+#define VALIDATE(condition) \
+ (ValidationTag::validate ? V8_LIKELY(condition) \
+ : ValidateAssumeTrue(condition, #condition))
+
+V8_INLINE bool ValidateAssumeTrue(bool condition, const char* message) {
+ DCHECK_WITH_MSG(condition, message);
+ return true;
+}
+#else
+#define VALIDATE(condition) (!ValidationTag::validate || V8_LIKELY(condition))
+#endif
#define CHECK_PROTOTYPE_OPCODE(feat) \
DCHECK(this->module_->origin == kWasmOrigin); \
@@ -166,68 +176,56 @@ static constexpr StoreType GetStoreType(WasmOpcode opcode) {
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
-// Decoder error with explicit PC and format arguments.
+// Decoder error with explicit PC and optional format arguments.
+// Depending on the validation tag and the number of arguments, this forwards to
+// a V8_NOINLINE and V8_PRESERVE_MOST method of the decoder.
template <typename ValidationTag, typename... Args>
-void DecodeError(Decoder* decoder, const byte* pc, const char* str,
- Args&&... args) {
- if constexpr (!ValidationTag::validate) UNREACHABLE();
- static_assert(sizeof...(Args) > 0);
- if constexpr (ValidationTag::full_validation) {
- decoder->errorf(pc, str, std::forward<Args>(args)...);
- } else {
+V8_INLINE void DecodeError(Decoder* decoder, const byte* pc, const char* str,
+ Args&&... args) {
+ // Decode errors can only happen if we are validating; the compiler should
+ // know this e.g. from the VALIDATE macro, but this assumption tells it again
+ // that this path is impossible.
+ V8_ASSUME(ValidationTag::validate);
+ if constexpr (!ValidationTag::full_validation) {
decoder->MarkError();
- }
-}
-
-// Decoder error with explicit PC and no format arguments.
-template <typename ValidationTag>
-void DecodeError(Decoder* decoder, const byte* pc, const char* str) {
- if constexpr (!ValidationTag::validate) UNREACHABLE();
- if constexpr (ValidationTag::full_validation) {
+ } else if constexpr (sizeof...(Args) == 0) {
decoder->error(pc, str);
} else {
- decoder->MarkError();
+ decoder->errorf(pc, str, std::forward<Args>(args)...);
}
}
-// Decoder error without explicit PC, but with format arguments.
+// Decoder error without explicit PC and with optional format arguments.
+// Depending on the validation tag and the number of arguments, this forwards to
+// a V8_NOINLINE and V8_PRESERVE_MOST method of the decoder.
template <typename ValidationTag, typename... Args>
-void DecodeError(Decoder* decoder, const char* str, Args&&... args) {
- if constexpr (!ValidationTag::validate) UNREACHABLE();
- static_assert(sizeof...(Args) > 0);
- if constexpr (ValidationTag::full_validation) {
- decoder->errorf(str, std::forward<Args>(args)...);
- } else {
+V8_INLINE void DecodeError(Decoder* decoder, const char* str, Args&&... args) {
+ // Decode errors can only happen if we are validating; the compiler should
+ // know this e.g. from the VALIDATE macro, but this assumption tells it again
+ // that this path is impossible.
+ V8_ASSUME(ValidationTag::validate);
+ if constexpr (!ValidationTag::full_validation) {
decoder->MarkError();
- }
-}
-
-// Decoder error without explicit PC and without format arguments.
-template <typename ValidationTag>
-void DecodeError(Decoder* decoder, const char* str) {
- if constexpr (!ValidationTag::validate) UNREACHABLE();
- if constexpr (ValidationTag::full_validation) {
+ } else if constexpr (sizeof...(Args) == 0) {
decoder->error(str);
} else {
- decoder->MarkError();
+ decoder->errorf(str, std::forward<Args>(args)...);
}
}
namespace value_type_reader {
-// If {module} is not null, the read index will be checked against the module's
-// type capacity.
template <typename ValidationTag>
-HeapType read_heap_type(Decoder* decoder, const byte* pc,
- uint32_t* const length, const WasmFeatures& enabled) {
- int64_t heap_index =
- decoder->read_i33v<ValidationTag>(pc, length, "heap type");
+std::pair<HeapType, uint32_t> read_heap_type(Decoder* decoder, const byte* pc,
+ const WasmFeatures& enabled) {
+ auto [heap_index, length] =
+ decoder->read_i33v<ValidationTag>(pc, "heap type");
if (heap_index < 0) {
int64_t min_1_byte_leb128 = -64;
if (!VALIDATE(heap_index >= min_1_byte_leb128)) {
DecodeError<ValidationTag>(decoder, pc, "Unknown heap type %" PRId64,
heap_index);
- return HeapType(HeapType::kBottom);
+ return {HeapType(HeapType::kBottom), length};
}
uint8_t uint_7_mask = 0x7F;
uint8_t code = static_cast<ValueTypeCode>(heap_index) & uint_7_mask;
@@ -249,7 +247,7 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
V8_FALLTHROUGH;
case kExternRefCode:
case kFuncRefCode:
- return HeapType::from_code(code);
+ return {HeapType::from_code(code), length};
case kStringRefCode:
case kStringViewWtf8Code:
case kStringViewWtf16Code:
@@ -260,11 +258,11 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
"--experimental-wasm-stringref",
HeapType::from_code(code).name().c_str());
}
- return HeapType::from_code(code);
+ return {HeapType::from_code(code), length};
default:
DecodeError<ValidationTag>(decoder, pc, "Unknown heap type %" PRId64,
heap_index);
- return HeapType(HeapType::kBottom);
+ return {HeapType(HeapType::kBottom), length};
}
} else {
if (!VALIDATE(enabled.has_typed_funcref())) {
@@ -279,25 +277,21 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
"Type index %u is greater than the maximum number %zu "
"of type definitions supported by V8",
type_index, kV8MaxWasmTypes);
- return HeapType(HeapType::kBottom);
+ return {HeapType(HeapType::kBottom), length};
}
- return HeapType(type_index);
+ return {HeapType(type_index), length};
}
}
// Read a value type starting at address {pc} using {decoder}.
// No bytes are consumed.
-// The length of the read value type is written in {length}.
-// Registers an error for an invalid type only if {ValidationTag::validate} is
-// true.
+// Returns the read value type and the number of bytes read (a.k.a. length).
template <typename ValidationTag>
-ValueType read_value_type(Decoder* decoder, const byte* pc,
- uint32_t* const length, const WasmFeatures& enabled) {
- *length = 1;
+std::pair<ValueType, uint32_t> read_value_type(Decoder* decoder, const byte* pc,
+ const WasmFeatures& enabled) {
byte val = decoder->read_u8<ValidationTag>(pc, "value type opcode");
if (!VALIDATE(decoder->ok())) {
- *length = 0;
- return kWasmBottom;
+ return {kWasmBottom, 0};
}
ValueTypeCode code = static_cast<ValueTypeCode>(val);
switch (code) {
@@ -314,12 +308,12 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
decoder, pc,
"invalid value type '%sref', enable with --experimental-wasm-gc",
HeapType::from_code(code).name().c_str());
- return kWasmBottom;
+ return {kWasmBottom, 0};
}
V8_FALLTHROUGH;
case kExternRefCode:
case kFuncRefCode:
- return ValueType::RefNull(HeapType::from_code(code));
+ return {ValueType::RefNull(HeapType::from_code(code)), 1};
case kStringRefCode:
case kStringViewWtf8Code:
case kStringViewWtf16Code:
@@ -329,18 +323,18 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
"invalid value type '%sref', enable with "
"--experimental-wasm-stringref",
HeapType::from_code(code).name().c_str());
- return kWasmBottom;
+ return {kWasmBottom, 0};
}
- return ValueType::RefNull(HeapType::from_code(code));
+ return {ValueType::RefNull(HeapType::from_code(code)), 1};
}
case kI32Code:
- return kWasmI32;
+ return {kWasmI32, 1};
case kI64Code:
- return kWasmI64;
+ return {kWasmI64, 1};
case kF32Code:
- return kWasmF32;
+ return {kWasmF32, 1};
case kF64Code:
- return kWasmF64;
+ return {kWasmF64, 1};
case kRefCode:
case kRefNullCode: {
Nullability nullability = code == kRefNullCode ? kNullable : kNonNullable;
@@ -350,21 +344,21 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
"Invalid type '(ref%s <heaptype>)', enable with "
"--experimental-wasm-typed-funcref",
nullability == kNullable ? " null" : "");
- return kWasmBottom;
+ return {kWasmBottom, 0};
}
- HeapType heap_type =
- read_heap_type<ValidationTag>(decoder, pc + 1, length, enabled);
- *length += 1;
- return heap_type.is_bottom()
- ? kWasmBottom
- : ValueType::RefMaybeNull(heap_type, nullability);
+ auto [heap_type, length] =
+ read_heap_type<ValidationTag>(decoder, pc + 1, enabled);
+ ValueType type = heap_type.is_bottom()
+ ? kWasmBottom
+ : ValueType::RefMaybeNull(heap_type, nullability);
+ return {type, length + 1};
}
case kS128Code: {
if (!VALIDATE(CheckHardwareSupportsSimd())) {
DecodeError<ValidationTag>(decoder, pc, "Wasm SIMD unsupported");
- return kWasmBottom;
+ return {kWasmBottom, 0};
}
- return kWasmS128;
+ return {kWasmS128, 1};
}
// Although these codes are included in ValueTypeCode, they technically
// do not correspond to value types and are only used in specific
@@ -372,17 +366,13 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kVoidCode:
case kI8Code:
case kI16Code:
- if (ValidationTag::validate) {
- DecodeError<ValidationTag>(decoder, pc, "invalid value type 0x%x",
- code);
- }
- return kWasmBottom;
+ // Fall through to the error reporting below.
+ break;
}
// Anything that doesn't match an enumeration value is an invalid type code.
- if (ValidationTag::validate) {
- DecodeError<ValidationTag>(decoder, pc, "invalid value type 0x%x", code);
- }
- return kWasmBottom;
+ if constexpr (!ValidationTag::validate) UNREACHABLE();
+ DecodeError<ValidationTag>(decoder, pc, "invalid value type 0x%x", code);
+ return {kWasmBottom, 0};
}
template <typename ValidationTag>
@@ -392,8 +382,7 @@ bool ValidateHeapType(Decoder* decoder, const byte* pc,
// A {nullptr} module is accepted if we are not validating anyway (e.g. for
// opcode length computation).
if (!ValidationTag::validate && module == nullptr) return true;
- // We use capacity over size so this works mid-DecodeTypeSection.
- if (!VALIDATE(type.ref_index() < module->types.capacity())) {
+ if (!VALIDATE(type.ref_index() < module->types.size())) {
DecodeError<ValidationTag>(decoder, pc, "Type index %u is out of bounds",
type.ref_index());
return false;
@@ -419,7 +408,7 @@ struct ImmI32Immediate {
template <typename ValidationTag>
ImmI32Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
- value = decoder->read_i32v<ValidationTag>(pc, &length, "immi32");
+ std::tie(value, length) = decoder->read_i32v<ValidationTag>(pc, "immi32");
}
};
@@ -429,7 +418,7 @@ struct ImmI64Immediate {
template <typename ValidationTag>
ImmI64Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
- value = decoder->read_i64v<ValidationTag>(pc, &length, "immi64");
+ std::tie(value, length) = decoder->read_i64v<ValidationTag>(pc, "immi64");
}
};
@@ -479,7 +468,7 @@ struct IndexImmediate {
template <typename ValidationTag>
IndexImmediate(Decoder* decoder, const byte* pc, const char* name,
ValidationTag = {}) {
- index = decoder->read_u32v<ValidationTag>(pc, &length, name);
+ std::tie(index, length) = decoder->read_u32v<ValidationTag>(pc, name);
}
};
@@ -544,8 +533,9 @@ struct SelectTypeImmediate {
template <typename ValidationTag>
SelectTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
const byte* pc, ValidationTag = {}) {
- uint8_t num_types = decoder->read_u32v<ValidationTag>(
- pc, &length, "number of select types");
+ uint8_t num_types;
+ std::tie(num_types, length) =
+ decoder->read_u32v<ValidationTag>(pc, "number of select types");
if (!VALIDATE(num_types == 1)) {
DecodeError<ValidationTag>(
decoder, pc,
@@ -553,23 +543,35 @@ struct SelectTypeImmediate {
return;
}
uint32_t type_length;
- type = value_type_reader::read_value_type<ValidationTag>(
- decoder, pc + length, &type_length, enabled);
+ std::tie(type, type_length) =
+ value_type_reader::read_value_type<ValidationTag>(decoder, pc + length,
+ enabled);
length += type_length;
}
};
struct BlockTypeImmediate {
uint32_t length = 1;
- ValueType type = kWasmVoid;
- uint32_t sig_index = 0;
- const FunctionSig* sig = nullptr;
+ // After decoding, either {sig_index} is set XOR {sig} points to
+ // {single_return_sig_storage}.
+ uint32_t sig_index;
+ FunctionSig sig{0, 0, single_return_sig_storage};
+ // Internal field, potentially pointed to by {sig}. Do not access directly.
+ ValueType single_return_sig_storage[1];
+
+ // Do not copy or move, as {sig} might point to {single_return_sig_storage} so
+ // this cannot trivially be copied. If needed, define those operators later.
+ BlockTypeImmediate(const BlockTypeImmediate&) = delete;
+ BlockTypeImmediate(BlockTypeImmediate&&) = delete;
+ BlockTypeImmediate& operator=(const BlockTypeImmediate&) = delete;
+ BlockTypeImmediate& operator=(BlockTypeImmediate&&) = delete;
template <typename ValidationTag>
BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
const byte* pc, ValidationTag = {}) {
- int64_t block_type =
- decoder->read_i33v<ValidationTag>(pc, &length, "block type");
+ int64_t block_type;
+ std::tie(block_type, length) =
+ decoder->read_i33v<ValidationTag>(pc, "block type");
if (block_type < 0) {
// All valid negative types are 1 byte in length, so we check against the
// minimum 1-byte LEB128 value.
@@ -579,34 +581,26 @@ struct BlockTypeImmediate {
block_type);
return;
}
- if (static_cast<ValueTypeCode>(block_type & 0x7F) == kVoidCode) return;
- type = value_type_reader::read_value_type<ValidationTag>(
- decoder, pc, &length, enabled);
+ if (static_cast<ValueTypeCode>(block_type & 0x7F) != kVoidCode) {
+ sig = FunctionSig{1, 0, single_return_sig_storage};
+ std::tie(single_return_sig_storage[0], length) =
+ value_type_reader::read_value_type<ValidationTag>(decoder, pc,
+ enabled);
+ }
} else {
- type = kWasmBottom;
+ sig = FunctionSig{0, 0, nullptr};
sig_index = static_cast<uint32_t>(block_type);
}
}
uint32_t in_arity() const {
- if (type != kWasmBottom) return 0;
- return static_cast<uint32_t>(sig->parameter_count());
+ return static_cast<uint32_t>(sig.parameter_count());
}
uint32_t out_arity() const {
- if (type == kWasmVoid) return 0;
- if (type != kWasmBottom) return 1;
- return static_cast<uint32_t>(sig->return_count());
- }
- ValueType in_type(uint32_t index) {
- DCHECK_EQ(kWasmBottom, type);
- return sig->GetParam(index);
- }
- ValueType out_type(uint32_t index) {
- if (type == kWasmBottom) return sig->GetReturn(index);
- DCHECK_NE(kWasmVoid, type);
- DCHECK_EQ(0, index);
- return type;
+ return static_cast<uint32_t>(sig.return_count());
}
+ ValueType in_type(uint32_t index) { return sig.GetParam(index); }
+ ValueType out_type(uint32_t index) { return sig.GetReturn(index); }
};
struct BranchDepthImmediate {
@@ -615,7 +609,8 @@ struct BranchDepthImmediate {
template <typename ValidationTag>
BranchDepthImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
- depth = decoder->read_u32v<ValidationTag>(pc, &length, "branch depth");
+ std::tie(depth, length) =
+ decoder->read_u32v<ValidationTag>(pc, "branch depth");
}
};
@@ -653,8 +648,9 @@ struct BranchTableImmediate {
template <typename ValidationTag>
BranchTableImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
start = pc;
- uint32_t len = 0;
- table_count = decoder->read_u32v<ValidationTag>(pc, &len, "table count");
+ uint32_t len;
+ std::tie(table_count, len) =
+ decoder->read_u32v<ValidationTag>(pc, "table count");
table = pc + len;
}
};
@@ -668,9 +664,8 @@ class BranchTableIterator {
uint32_t next() {
DCHECK(has_next());
index_++;
- uint32_t length;
- uint32_t result =
- decoder_->read_u32v<ValidationTag>(pc_, &length, "branch table entry");
+ auto [result, length] =
+ decoder_->read_u32v<ValidationTag>(pc_, "branch table entry");
pc_ += length;
return result;
}
@@ -702,12 +697,19 @@ struct MemoryAccessImmediate {
uint32_t length = 0;
template <typename ValidationTag>
- MemoryAccessImmediate(Decoder* decoder, const byte* pc,
- uint32_t max_alignment, bool is_memory64,
- ValidationTag = {}) {
- uint32_t alignment_length;
- alignment =
- decoder->read_u32v<ValidationTag>(pc, &alignment_length, "alignment");
+ V8_INLINE MemoryAccessImmediate(Decoder* decoder, const byte* pc,
+ uint32_t max_alignment, bool is_memory64,
+ ValidationTag = {}) {
+ // Check for the fast path (two single-byte LEBs).
+ const bool two_bytes = !ValidationTag::validate || decoder->end() - pc >= 2;
+ const bool use_fast_path = two_bytes && !((pc[0] | pc[1]) & 0x80);
+ if (V8_LIKELY(use_fast_path)) {
+ alignment = pc[0];
+ offset = pc[1];
+ length = 2;
+ } else {
+ ConstructSlow<ValidationTag>(decoder, pc, max_alignment, is_memory64);
+ }
if (!VALIDATE(alignment <= max_alignment)) {
DecodeError<ValidationTag>(
decoder, pc,
@@ -715,11 +717,25 @@ struct MemoryAccessImmediate {
"actual alignment is %u",
max_alignment, alignment);
}
+ }
+
+ private:
+ template <typename ValidationTag>
+ V8_NOINLINE V8_PRESERVE_MOST void ConstructSlow(Decoder* decoder,
+ const byte* pc,
+ uint32_t max_alignment,
+ bool is_memory64) {
+ uint32_t alignment_length;
+ std::tie(alignment, alignment_length) =
+ decoder->read_u32v<ValidationTag>(pc, "alignment");
uint32_t offset_length;
- offset = is_memory64 ? decoder->read_u64v<ValidationTag>(
- pc + alignment_length, &offset_length, "offset")
- : decoder->read_u32v<ValidationTag>(
- pc + alignment_length, &offset_length, "offset");
+ if (is_memory64) {
+ std::tie(offset, offset_length) =
+ decoder->read_u64v<ValidationTag>(pc + alignment_length, "offset");
+ } else {
+ std::tie(offset, offset_length) =
+ decoder->read_u32v<ValidationTag>(pc + alignment_length, "offset");
+ }
length = alignment_length + offset_length;
}
};
@@ -800,14 +816,15 @@ struct TableCopyImmediate {
};
struct HeapTypeImmediate {
- uint32_t length = 1;
- HeapType type;
+ uint32_t length;
+ HeapType type{kBottom};
template <typename ValidationTag>
HeapTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, ValidationTag = {})
- : type(value_type_reader::read_heap_type<ValidationTag>(
- decoder, pc, &length, enabled)) {}
+ const byte* pc, ValidationTag = {}) {
+ std::tie(type, length) =
+ value_type_reader::read_heap_type<ValidationTag>(decoder, pc, enabled);
+ }
};
struct StringConstImmediate {
@@ -816,8 +833,8 @@ struct StringConstImmediate {
template <typename ValidationTag>
StringConstImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
- index = decoder->read_u32v<ValidationTag>(pc, &length,
- "stringref literal index");
+ std::tie(index, length) =
+ decoder->read_u32v<ValidationTag>(pc, "stringref literal index");
}
};
@@ -903,7 +920,8 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ControlBase);
- ControlBase(ControlKind kind, uint32_t stack_depth, uint32_t init_stack_depth,
+ ControlBase(Zone* /* unused in the base class */, ControlKind kind,
+ uint32_t stack_depth, uint32_t init_stack_depth,
const uint8_t* pc, Reachability reachability)
: PcForErrors<ValidationTag::full_validation>(pc),
kind(kind),
@@ -964,9 +982,10 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
F(I64Const, Value* result, int64_t value) \
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
- F(S128Const, Simd128Immediate& imm, Value* result) \
+ F(S128Const, const Simd128Immediate& imm, Value* result) \
F(GlobalGet, Value* result, const GlobalIndexImmediate& imm) \
F(DoReturn, uint32_t drop_values) \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
Value* result) \
F(RefNull, ValueType type, Value* result) \
@@ -997,7 +1016,6 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
F(FallThruTo, Control* c) \
F(PopControl, Control* block) \
/* Instructions: */ \
- F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
F(RefAsNonNull, const Value& arg, Value* result) \
F(Drop) \
F(LocalGet, Value* result, const IndexImmediate& imm) \
@@ -1046,7 +1064,6 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
F(SimdOp, WasmOpcode opcode, base::Vector<Value> args, Value* result) \
F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate& imm, \
const base::Vector<Value> inputs, Value* result) \
- F(S128Const, const Simd128Immediate& imm, Value* result) \
F(Simd8x16ShuffleOp, const Simd128Immediate& imm, const Value& input0, \
const Value& input1, Value* result) \
F(Throw, const TagIndexImmediate& imm, const base::Vector<Value>& args) \
@@ -1084,6 +1101,8 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
F(ArrayLen, const Value& array_obj, Value* result) \
F(ArrayCopy, const Value& src, const Value& src_index, const Value& dst, \
const Value& dst_index, const Value& length) \
+ F(ArrayFill, const ArrayIndexImmediate& imm, const Value& array, \
+ const Value& index, const Value& value, const Value& length) \
F(I31GetS, const Value& input, Value* result) \
F(I31GetU, const Value& input, Value* result) \
F(RefTest, const Value& obj, const Value& rtt, Value* result, \
@@ -1094,12 +1113,16 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
bool null_succeeds) \
F(RefCastAbstract, const Value& obj, HeapType type, Value* result, \
bool null_succeeds) \
- F(AssertNull, const Value& obj, Value* result) \
- F(AssertNotNull, const Value& obj, Value* result) \
+ F(AssertNullTypecheck, const Value& obj, Value* result) \
+ F(AssertNotNullTypecheck, const Value& obj, Value* result) \
F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
- uint32_t depth) \
+ uint32_t depth, bool null_succeeds) \
F(BrOnCastFail, const Value& obj, const Value& rtt, \
- Value* result_on_fallthrough, uint32_t depth) \
+ Value* result_on_fallthrough, uint32_t depth, bool null_succeeds) \
+ F(BrOnCastAbstract, const Value& obj, HeapType type, \
+ Value* result_on_branch, uint32_t depth, bool null_succeeds) \
+ F(BrOnCastFailAbstract, const Value& obj, HeapType type, \
+ Value* result_on_fallthrough, uint32_t depth, bool null_succeeds) \
F(RefIsStruct, const Value& object, Value* result) \
F(RefIsEq, const Value& object, Value* result) \
F(RefIsI31, const Value& object, Value* result) \
@@ -1108,15 +1131,17 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
F(RefAsI31, const Value& object, Value* result) \
F(RefAsArray, const Value& object, Value* result) \
F(BrOnStruct, const Value& object, Value* value_on_branch, \
- uint32_t br_depth) \
- F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(BrOnArray, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ uint32_t br_depth, bool null_succeeds) \
+ F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth, \
+ bool null_succeeds) \
+ F(BrOnArray, const Value& object, Value* value_on_branch, uint32_t br_depth, \
+ bool null_succeeds) \
F(BrOnNonStruct, const Value& object, Value* value_on_fallthrough, \
- uint32_t br_depth) \
+ uint32_t br_depth, bool null_succeeds) \
F(BrOnNonI31, const Value& object, Value* value_on_fallthrough, \
- uint32_t br_depth) \
+ uint32_t br_depth, bool null_succeeds) \
F(BrOnNonArray, const Value& object, Value* value_on_fallthrough, \
- uint32_t br_depth) \
+ uint32_t br_depth, bool null_succeeds) \
F(StringNewWtf8, const MemoryIndexImmediate& memory, \
const unibrow::Utf8Variant variant, const Value& offset, \
const Value& size, Value* result) \
@@ -1165,7 +1190,10 @@ struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
F(StringViewIterRewind, const Value& view, const Value& codepoints, \
Value* result) \
F(StringViewIterSlice, const Value& view, const Value& codepoints, \
- Value* result)
+ Value* result) \
+ F(StringCompare, const Value& lhs, const Value& rhs, Value* result) \
+ F(StringFromCodePoint, const Value& code_point, Value* result) \
+ F(StringHash, const Value& string, Value* result)
// This is a global constant invalid instruction trace, to be pointed at by
// the current instruction trace pointer in the default case
@@ -1176,6 +1204,11 @@ const std::pair<uint32_t, uint32_t> invalid_instruction_trace = {0, 0};
template <typename T>
class FastZoneVector {
public:
+ FastZoneVector() = default;
+ explicit FastZoneVector(int initial_size, Zone* zone) {
+ Grow(initial_size, zone);
+ }
+
#ifdef DEBUG
~FastZoneVector() {
// Check that {Reset} was called on this vector.
@@ -1250,7 +1283,7 @@ class FastZoneVector {
}
private:
- V8_NOINLINE void Grow(int slots_needed, Zone* zone) {
+ V8_NOINLINE V8_PRESERVE_MOST void Grow(int slots_needed, Zone* zone) {
size_t new_capacity = std::max(
size_t{8}, base::bits::RoundUpToPowerOfTwo(size() + slots_needed));
CHECK_GE(kMaxUInt32, new_capacity);
@@ -1287,7 +1320,7 @@ class WasmDecoder : public Decoder {
WasmFeatures* detected, const FunctionSig* sig, const byte* start,
const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, end, buffer_offset),
- compilation_zone_(zone),
+ zone_(zone),
module_(module),
enabled_(enabled),
detected_(detected),
@@ -1308,7 +1341,7 @@ class WasmDecoder : public Decoder {
}
}
- Zone* zone() const { return compilation_zone_; }
+ Zone* zone() const { return zone_; }
uint32_t num_locals() const { return num_locals_; }
@@ -1318,34 +1351,33 @@ class WasmDecoder : public Decoder {
}
// Decodes local definitions in the current decoder.
- // Writes the total length of decoded locals in {total_length}.
// The decoded locals will be appended to {this->local_types_}.
// The decoder's pc is not advanced.
- void DecodeLocals(const byte* pc, uint32_t* total_length) {
+ // The total length of decoded locals is returned.
+ uint32_t DecodeLocals(const byte* pc) {
DCHECK_NULL(local_types_);
DCHECK_EQ(0, num_locals_);
// In a first step, count the number of locals and store the decoded
// entries.
num_locals_ = static_cast<uint32_t>(this->sig_->parameter_count());
- uint32_t length;
- *total_length = 0;
// Decode local declarations, if any.
- uint32_t entries =
- read_u32v<ValidationTag>(pc, &length, "local decls count");
+ auto [entries, entries_length] =
+ read_u32v<ValidationTag>(pc, "local decls count");
+
if (!VALIDATE(ok())) {
- return DecodeError(pc + *total_length, "invalid local decls count");
+ DecodeError(pc, "invalid local decls count");
+ return 0;
}
- *total_length += length;
TRACE("local decls count: %u\n", entries);
// Do an early validity check, to avoid allocating too much memory below.
// Every entry needs at least two bytes (count plus type); if that many are
// not available any more, flag that as an error.
if (available_bytes() / 2 < entries) {
- return DecodeError(
- pc, "local decls count bigger than remaining function size");
+ DecodeError(pc, "local decls count bigger than remaining function size");
+ return 0;
}
struct DecodedLocalEntry {
@@ -1353,57 +1385,63 @@ class WasmDecoder : public Decoder {
ValueType type;
};
base::SmallVector<DecodedLocalEntry, 8> decoded_locals(entries);
+ uint32_t total_length = entries_length;
for (uint32_t entry = 0; entry < entries; ++entry) {
if (!VALIDATE(more())) {
- return DecodeError(
- end(), "expected more local decls but reached end of input");
+ DecodeError(end(),
+ "expected more local decls but reached end of input");
+ return 0;
}
- uint32_t count =
- read_u32v<ValidationTag>(pc + *total_length, &length, "local count");
+ auto [count, count_length] =
+ read_u32v<ValidationTag>(pc + total_length, "local count");
if (!VALIDATE(ok())) {
- return DecodeError(pc + *total_length, "invalid local count");
+ DecodeError(pc + total_length, "invalid local count");
+ return 0;
}
DCHECK_LE(num_locals_, kV8MaxWasmFunctionLocals);
if (!VALIDATE(count <= kV8MaxWasmFunctionLocals - num_locals_)) {
- return DecodeError(pc + *total_length, "local count too large");
+ DecodeError(pc + total_length, "local count too large");
+ return 0;
}
- *total_length += length;
+ total_length += count_length;
- ValueType type = value_type_reader::read_value_type<ValidationTag>(
- this, pc + *total_length, &length, enabled_);
- ValidateValueType(pc + *total_length, type);
- if (!VALIDATE(ok())) return;
- *total_length += length;
+ auto [type, type_length] =
+ value_type_reader::read_value_type<ValidationTag>(
+ this, pc + total_length, enabled_);
+ ValidateValueType(pc + total_length, type);
+ if (!VALIDATE(ok())) return 0;
+ total_length += type_length;
num_locals_ += count;
decoded_locals[entry] = DecodedLocalEntry{count, type};
}
DCHECK(ok());
- if (num_locals_ == 0) return;
-
- // Now build the array of local types from the parsed entries.
- local_types_ = compilation_zone_->NewArray<ValueType>(num_locals_);
- ValueType* locals_ptr = local_types_;
+ if (num_locals_ > 0) {
+ // Now build the array of local types from the parsed entries.
+ local_types_ = zone_->NewArray<ValueType>(num_locals_);
+ ValueType* locals_ptr = local_types_;
- if (sig_->parameter_count() > 0) {
- std::copy(sig_->parameters().begin(), sig_->parameters().end(),
- locals_ptr);
- locals_ptr += sig_->parameter_count();
- }
+ if (sig_->parameter_count() > 0) {
+ std::copy(sig_->parameters().begin(), sig_->parameters().end(),
+ locals_ptr);
+ locals_ptr += sig_->parameter_count();
+ }
- for (auto& entry : decoded_locals) {
- std::fill_n(locals_ptr, entry.count, entry.type);
- locals_ptr += entry.count;
+ for (auto& entry : decoded_locals) {
+ std::fill_n(locals_ptr, entry.count, entry.type);
+ locals_ptr += entry.count;
+ }
+ DCHECK_EQ(locals_ptr, local_types_ + num_locals_);
}
- DCHECK_EQ(locals_ptr, local_types_ + num_locals_);
+ return total_length;
}
// Shorthand that forwards to the {DecodeError} functions above, passing our
// {ValidationTag}.
template <typename... Args>
- void DecodeError(Args... args) {
+ V8_INLINE void DecodeError(Args... args) {
wasm::DecodeError<ValidationTag>(this, std::forward<Args>(args)...);
}
@@ -1412,7 +1450,8 @@ class WasmDecoder : public Decoder {
// position at the end of the vector represents possible assignments to
// the instance cache.
static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc,
- uint32_t locals_count, Zone* zone) {
+ uint32_t locals_count, Zone* zone,
+ bool* loop_is_innermost = nullptr) {
if (pc >= decoder->end()) return nullptr;
if (*pc != kExprLoop) return nullptr;
// The number of locals_count is augmented by 1 so that the 'locals_count'
@@ -1420,11 +1459,14 @@ class WasmDecoder : public Decoder {
BitVector* assigned = zone->New<BitVector>(locals_count + 1, zone);
int depth = -1; // We will increment the depth to 0 when we decode the
// starting 'loop' opcode.
+ if (loop_is_innermost) *loop_is_innermost = true;
// Iteratively process all AST nodes nested inside the loop.
while (pc < decoder->end() && VALIDATE(decoder->ok())) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
switch (opcode) {
case kExprLoop:
+ if (loop_is_innermost && depth >= 0) *loop_is_innermost = false;
+ V8_FALLTHROUGH;
case kExprIf:
case kExprBlock:
case kExprTry:
@@ -1440,7 +1482,6 @@ class WasmDecoder : public Decoder {
case kExprMemoryGrow:
case kExprCallFunction:
case kExprCallIndirect:
- case kExprCallRefDeprecated:
case kExprCallRef:
// Add instance cache to the assigned set.
assigned->Add(locals_count);
@@ -1458,10 +1499,12 @@ class WasmDecoder : public Decoder {
}
bool Validate(const byte* pc, TagIndexImmediate& imm) {
- if (!VALIDATE(imm.index < module_->tags.size())) {
+ size_t num_tags = module_->tags.size();
+ if (!VALIDATE(imm.index < num_tags)) {
DecodeError(pc, "Invalid tag index: %u", imm.index);
return false;
}
+ V8_ASSUME(imm.index < num_tags);
imm.tag = &module_->tags[imm.index];
return true;
}
@@ -1469,13 +1512,15 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, GlobalIndexImmediate& imm) {
// We compare with the current size of the globals vector. This is important
// if we are decoding a constant expression in the global section.
- if (!VALIDATE(imm.index < module_->globals.size())) {
+ size_t num_globals = module_->globals.size();
+ if (!VALIDATE(imm.index < num_globals)) {
DecodeError(pc, "Invalid global index: %u", imm.index);
return false;
}
+ V8_ASSUME(imm.index < num_globals);
imm.global = &module_->globals[imm.index];
- if (decoding_mode == kConstantExpression) {
+ if constexpr (decoding_mode == kConstantExpression) {
if (!VALIDATE(!imm.global->mutability)) {
this->DecodeError(pc,
"mutable globals cannot be used in constant "
@@ -1540,10 +1585,12 @@ class WasmDecoder : public Decoder {
}
bool Validate(const byte* pc, CallFunctionImmediate& imm) {
- if (!VALIDATE(imm.index < module_->functions.size())) {
+ size_t num_functions = module_->functions.size();
+ if (!VALIDATE(imm.index < num_functions)) {
DecodeError(pc, "function index #%u is out of bounds", imm.index);
return false;
}
+ V8_ASSUME(imm.index < num_functions);
imm.sig = module_->functions[imm.index].sig;
return true;
}
@@ -1653,14 +1700,21 @@ class WasmDecoder : public Decoder {
}
bool Validate(const byte* pc, BlockTypeImmediate& imm) {
- if (!ValidateValueType(pc, imm.type)) return false;
- if (imm.type == kWasmBottom) {
+ if (imm.sig.all().begin() == nullptr) {
+ // Then use {sig_index} to initialize the signature.
if (!VALIDATE(module_->has_signature(imm.sig_index))) {
DecodeError(pc, "block type index %u is not a signature definition",
imm.sig_index);
return false;
}
- imm.sig = module_->signature(imm.sig_index);
+ imm.sig = *module_->signature(imm.sig_index);
+ } else {
+ // Then it's an MVP immediate with 0 parameters and 0-1 returns.
+ DCHECK_EQ(0, imm.sig.parameter_count());
+ DCHECK_GE(1, imm.sig.return_count());
+ if (imm.sig.return_count()) {
+ if (!ValidateValueType(pc, imm.sig.GetReturn(0))) return false;
+ }
}
return true;
}
@@ -1714,6 +1768,9 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, TableCopyImmediate& imm) {
if (!ValidateTable(pc, imm.table_src)) return false;
if (!ValidateTable(pc + imm.table_src.length, imm.table_dst)) return false;
+ size_t num_tables = module_->tables.size();
+ V8_ASSUME(imm.table_src.index < num_tables);
+ V8_ASSUME(imm.table_dst.index < num_tables);
ValueType src_type = module_->tables[imm.table_src.index].type;
if (!VALIDATE(IsSubtypeOf(
src_type, module_->tables[imm.table_dst.index].type, module_))) {
@@ -1738,18 +1795,22 @@ class WasmDecoder : public Decoder {
if (imm.index > 0 || imm.length > 1) {
this->detected_->Add(kFeature_reftypes);
}
- if (!VALIDATE(imm.index < module_->tables.size())) {
+ size_t num_tables = module_->tables.size();
+ if (!VALIDATE(imm.index < num_tables)) {
DecodeError(pc, "invalid table index: %u", imm.index);
return false;
}
+ V8_ASSUME(imm.index < num_tables);
return true;
}
bool ValidateElementSegment(const byte* pc, IndexImmediate& imm) {
- if (!VALIDATE(imm.index < module_->elem_segments.size())) {
+ size_t num_elem_segments = module_->elem_segments.size();
+ if (!VALIDATE(imm.index < num_elem_segments)) {
DecodeError(pc, "invalid element segment index: %u", imm.index);
return false;
}
+ V8_ASSUME(imm.index < num_elem_segments);
return true;
}
@@ -1778,10 +1839,12 @@ class WasmDecoder : public Decoder {
}
bool ValidateFunction(const byte* pc, IndexImmediate& imm) {
- if (!VALIDATE(imm.index < module_->functions.size())) {
+ size_t num_functions = module_->functions.size();
+ if (!VALIDATE(imm.index < num_functions)) {
DecodeError(pc, "function index #%u is out of bounds", imm.index);
return false;
}
+ V8_ASSUME(imm.index < num_functions);
if (decoding_mode == kFunctionBody &&
!VALIDATE(module_->functions[imm.index].declared)) {
DecodeError(pc, "undeclared reference to function #%u", imm.index);
@@ -1879,7 +1942,6 @@ class WasmDecoder : public Decoder {
(ios.CallIndirect(imm), ...);
return 1 + imm.length;
}
- case kExprCallRefDeprecated: // TODO(7748): Drop after grace period.
case kExprCallRef:
case kExprReturnCallRef: {
SigIndexImmediate imm(decoder, pc + 1, validate);
@@ -1974,8 +2036,9 @@ class WasmDecoder : public Decoder {
/********** Prefixed opcodes **********/
case kNumericPrefix: {
- uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length);
+ uint32_t length;
+ std::tie(opcode, length) =
+ decoder->read_prefixed_opcode<ValidationTag>(pc);
switch (opcode) {
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
@@ -2031,15 +2094,16 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
default:
- if (ValidationTag::validate) {
- decoder->DecodeError(pc, "invalid numeric opcode");
- }
+ // This path is only possible if we are validating.
+ V8_ASSUME(ValidationTag::validate);
+ decoder->DecodeError(pc, "invalid numeric opcode");
return length;
}
}
case kSimdPrefix: {
- uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length);
+ uint32_t length;
+ std::tie(opcode, length) =
+ decoder->read_prefixed_opcode<ValidationTag>(pc);
switch (opcode) {
// clang-format off
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
@@ -2079,16 +2143,16 @@ class WasmDecoder : public Decoder {
}
return length + kSimd128Size;
default:
- if (ValidationTag::validate) {
- decoder->DecodeError(pc, "invalid SIMD opcode");
- }
+ // This path is only possible if we are validating.
+ V8_ASSUME(ValidationTag::validate);
+ decoder->DecodeError(pc, "invalid SIMD opcode");
return length;
}
}
case kAtomicPrefix: {
- uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length,
- "atomic_index");
+ uint32_t length;
+ std::tie(opcode, length) =
+ decoder->read_prefixed_opcode<ValidationTag>(pc, "atomic_index");
switch (opcode) {
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE) {
MemoryAccessImmediate imm(decoder, pc + length, UINT32_MAX,
@@ -2101,16 +2165,16 @@ class WasmDecoder : public Decoder {
return length + 1;
}
default:
- if (ValidationTag::validate) {
- decoder->DecodeError(pc, "invalid Atomics opcode");
- }
+ // This path is only possible if we are validating.
+ V8_ASSUME(ValidationTag::validate);
+ decoder->DecodeError(pc, "invalid Atomics opcode");
return length;
}
}
case kGCPrefix: {
- uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length,
- "gc_index");
+ uint32_t length;
+ std::tie(opcode, length) =
+ decoder->read_prefixed_opcode<ValidationTag>(pc, "gc_index");
switch (opcode) {
case kExprStructNew:
case kExprStructNewDefault: {
@@ -2131,8 +2195,7 @@ class WasmDecoder : public Decoder {
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
- case kExprArraySet:
- case kExprArrayLenDeprecated: {
+ case kExprArraySet: {
ArrayIndexImmediate imm(decoder, pc + length, validate);
(ios.TypeIndex(imm), ...);
return length + imm.length;
@@ -2152,6 +2215,11 @@ class WasmDecoder : public Decoder {
(ios.ArrayCopy(dst_imm, src_imm), ...);
return length + dst_imm.length + src_imm.length;
}
+ case kExprArrayFill: {
+ ArrayIndexImmediate imm(decoder, pc + length, validate);
+ (ios.TypeIndex(imm), ...);
+ return length + imm.length;
+ }
case kExprArrayNewData:
case kExprArrayNewElem: {
ArrayIndexImmediate array_imm(decoder, pc + length, validate);
@@ -2188,7 +2256,18 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
case kExprBrOnCast:
- case kExprBrOnCastFail: {
+ case kExprBrOnCastNull:
+ case kExprBrOnCastFail:
+ case kExprBrOnCastFailNull: {
+ BranchDepthImmediate branch(decoder, pc + length, validate);
+ HeapTypeImmediate imm(WasmFeatures::All(), decoder,
+ pc + length + branch.length, validate);
+ (ios.BranchDepth(branch), ...);
+ (ios.HeapType(imm), ...);
+ return length + branch.length + imm.length;
+ }
+ case kExprBrOnCastFailDeprecated:
+ case kExprBrOnCastDeprecated: {
BranchDepthImmediate branch(decoder, pc + length, validate);
IndexImmediate index(decoder, pc + length + branch.length,
"type index", validate);
@@ -2210,6 +2289,7 @@ class WasmDecoder : public Decoder {
case kExprArrayLen:
return length;
case kExprStringNewUtf8:
+ case kExprStringNewUtf8Try:
case kExprStringNewLossyUtf8:
case kExprStringNewWtf8:
case kExprStringEncodeUtf8:
@@ -2233,6 +2313,7 @@ class WasmDecoder : public Decoder {
case kExprStringMeasureUtf8:
case kExprStringMeasureWtf8:
case kExprStringNewUtf8Array:
+ case kExprStringNewUtf8ArrayTry:
case kExprStringNewLossyUtf8Array:
case kExprStringNewWtf8Array:
case kExprStringEncodeUtf8Array:
@@ -2256,12 +2337,14 @@ class WasmDecoder : public Decoder {
case kExprStringViewIterSlice:
case kExprStringNewWtf16Array:
case kExprStringEncodeWtf16Array:
+ case kExprStringCompare:
+ case kExprStringFromCodePoint:
+ case kExprStringHash:
return length;
default:
- // This is unreachable except for malformed modules.
- if (ValidationTag::validate) {
- decoder->DecodeError(pc, "invalid gc opcode");
- }
+ // This path is only possible if we are validating.
+ V8_ASSUME(ValidationTag::validate);
+ decoder->DecodeError(pc, "invalid gc opcode");
return length;
}
}
@@ -2290,7 +2373,7 @@ class WasmDecoder : public Decoder {
}
// TODO(clemensb): This is only used by the interpreter; move there.
- std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
+ V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// Handle "simple" opcodes with a fixed signature first.
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -2370,7 +2453,7 @@ class WasmDecoder : public Decoder {
case kNumericPrefix:
case kAtomicPrefix:
case kSimdPrefix: {
- opcode = this->read_prefixed_opcode<ValidationTag>(pc);
+ opcode = this->read_prefixed_opcode<ValidationTag>(pc).first;
switch (opcode) {
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1};
@@ -2396,8 +2479,7 @@ class WasmDecoder : public Decoder {
}
}
case kGCPrefix: {
- uint32_t unused_length;
- opcode = this->read_prefixed_opcode<ValidationTag>(pc, &unused_length);
+ opcode = this->read_prefixed_opcode<ValidationTag>(pc).first;
switch (opcode) {
case kExprStructGet:
case kExprStructGetS:
@@ -2406,7 +2488,6 @@ class WasmDecoder : public Decoder {
case kExprI31GetS:
case kExprI31GetU:
case kExprArrayNewDefault:
- case kExprArrayLenDeprecated:
case kExprArrayLen:
case kExprRefTest:
case kExprRefTestNull:
@@ -2416,7 +2497,11 @@ class WasmDecoder : public Decoder {
case kExprRefCastDeprecated:
case kExprRefCastNop:
case kExprBrOnCast:
+ case kExprBrOnCastNull:
case kExprBrOnCastFail:
+ case kExprBrOnCastFailNull:
+ case kExprBrOnCastFailDeprecated:
+ case kExprBrOnCastDeprecated:
return {1, 1};
case kExprStructSet:
return {2, 0};
@@ -2431,6 +2516,8 @@ class WasmDecoder : public Decoder {
return {3, 0};
case kExprArrayCopy:
return {5, 0};
+ case kExprArrayFill:
+ return {4, 0};
case kExprStructNewDefault:
return {0, 1};
case kExprStructNew: {
@@ -2455,8 +2542,11 @@ class WasmDecoder : public Decoder {
case kExprStringAsIter:
case kExprStringViewWtf16Length:
case kExprStringViewIterNext:
+ case kExprStringFromCodePoint:
+ case kExprStringHash:
return { 1, 1 };
case kExprStringNewUtf8:
+ case kExprStringNewUtf8Try:
case kExprStringNewLossyUtf8:
case kExprStringNewWtf8:
case kExprStringNewWtf16:
@@ -2466,8 +2556,10 @@ class WasmDecoder : public Decoder {
case kExprStringViewIterAdvance:
case kExprStringViewIterRewind:
case kExprStringViewIterSlice:
+ case kExprStringCompare:
return { 2, 1 };
case kExprStringNewUtf8Array:
+ case kExprStringNewUtf8ArrayTry:
case kExprStringNewLossyUtf8Array:
case kExprStringNewWtf8Array:
case kExprStringNewWtf16Array:
@@ -2504,7 +2596,7 @@ class WasmDecoder : public Decoder {
static constexpr ValidationTag validate = {};
- Zone* const compilation_zone_;
+ Zone* const zone_;
ValueType* local_types_ = nullptr;
uint32_t num_locals_ = 0;
@@ -2564,25 +2656,26 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
: WasmDecoder<ValidationTag, decoding_mode>(
zone, module, enabled, detected, body.sig, body.start, body.end,
body.offset),
- interface_(std::forward<InterfaceArgs>(interface_args)...) {}
+ interface_(std::forward<InterfaceArgs>(interface_args)...),
+ stack_(16, zone),
+ control_(16, zone) {}
~WasmFullDecoder() {
- control_.Reset(this->compilation_zone_);
- stack_.Reset(this->compilation_zone_);
- locals_initializers_stack_.Reset(this->compilation_zone_);
+ control_.Reset(this->zone_);
+ stack_.Reset(this->zone_);
+ locals_initializers_stack_.Reset(this->zone_);
}
Interface& interface() { return interface_; }
- bool Decode() {
+ void Decode() {
DCHECK(stack_.empty());
DCHECK(control_.empty());
DCHECK_LE(this->pc_, this->end_);
DCHECK_EQ(this->num_locals(), 0);
locals_offset_ = this->pc_offset();
- uint32_t locals_length;
- this->DecodeLocals(this->pc(), &locals_length);
+ uint32_t locals_length = this->DecodeLocals(this->pc());
if (!VALIDATE(this->ok())) return TraceFailed();
this->consume_bytes(locals_length);
int non_defaultable = 0;
@@ -2596,7 +2689,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// Cannot use CALL_INTERFACE_* macros because control is empty.
interface().StartFunction(this);
DecodeFunctionBody();
- if (!VALIDATE(this->ok())) return TraceFailed();
+ // Decoding can fail even without validation, e.g. due to missing Liftoff
+ // support.
+ if (this->failed()) return TraceFailed();
if (!VALIDATE(control_.empty())) {
if (control_.size() > 1) {
@@ -2609,14 +2704,13 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
// Cannot use CALL_INTERFACE_* macros because control is empty.
interface().FinishFunction(this);
- if (!VALIDATE(this->ok())) return TraceFailed();
+ if (this->failed()) return TraceFailed();
+ DCHECK(stack_.empty());
TRACE("wasm-decode ok\n\n");
- return true;
}
- bool TraceFailed() {
- if constexpr (!ValidationTag::validate) UNREACHABLE();
+ void TraceFailed() {
if (this->error_.offset()) {
TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
this->GetBufferRelativeOffset(this->error_.offset()),
@@ -2624,7 +2718,6 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
} else {
TRACE("wasm-error: %s\n\n", this->error_.message().c_str());
}
- return false;
}
const char* SafeOpcodeNameAt(const byte* pc) {
@@ -2634,8 +2727,8 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
- opcode =
- this->template read_prefixed_opcode<Decoder::FullValidationTag>(pc);
+ opcode = this->template read_prefixed_opcode<Decoder::FullValidationTag>(pc)
+ .first;
return WasmOpcodes::OpcodeName(opcode);
}
@@ -2714,7 +2807,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
has_nondefaultable_locals_ = non_defaultable_locals > 0;
if (!has_nondefaultable_locals_) return;
initialized_locals_ =
- this->compilation_zone_->template NewArray<bool>(this->num_locals_);
+ this->zone_->template NewArray<bool>(this->num_locals_);
// Parameters are always initialized.
const size_t num_params = this->sig_->parameter_count();
std::fill_n(initialized_locals_, num_params, true);
@@ -2724,7 +2817,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
DCHECK(locals_initializers_stack_.empty());
locals_initializers_stack_.EnsureMoreCapacity(non_defaultable_locals,
- this->compilation_zone_);
+ this->zone_);
}
void DecodeFunctionBody() {
@@ -2737,11 +2830,11 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
DCHECK(control_.empty());
constexpr uint32_t kStackDepth = 0;
constexpr uint32_t kInitStackDepth = 0;
- control_.EnsureMoreCapacity(1, this->compilation_zone_);
- control_.emplace_back(kControlBlock, kStackDepth, kInitStackDepth,
- this->pc_, kReachable);
+ control_.EnsureMoreCapacity(1, this->zone_);
+ control_.emplace_back(this->zone_, kControlBlock, kStackDepth,
+ kInitStackDepth, this->pc_, kReachable);
Control* c = &control_.back();
- if (decoding_mode == kFunctionBody) {
+ if constexpr (decoding_mode == kFunctionBody) {
InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
InitMerge(&c->end_merge,
static_cast<uint32_t>(this->sig_->return_count()),
@@ -2765,7 +2858,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// and binary operations, local.get, constants, ...). Thus check that
// there is enough space for those operations centrally, and avoid any
// bounds checks in those operations.
- stack_.EnsureMoreCapacity(1, this->compilation_zone_);
+ stack_.EnsureMoreCapacity(1, this->zone_);
uint8_t first_byte = *this->pc_;
WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
@@ -2803,7 +2896,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// and binary operations, local.get, constants, ...). Thus check that
// there is enough space for those operations centrally, and avoid any
// bounds checks in those operations.
- stack_.EnsureMoreCapacity(1, this->compilation_zone_);
+ stack_.EnsureMoreCapacity(1, this->zone_);
uint8_t first_byte = *this->pc_;
WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
@@ -2813,7 +2906,10 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
}
- if (!VALIDATE(this->pc_ == this->end_)) {
+ // Even without validation, compilation could fail because of unsupported
+ // Liftoff operations. In that case, {pc_} did not necessarily advance until
+ // {end_}. Thus do not wrap the next check in {VALIDATE}.
+ if (this->pc_ != this->end_) {
this->DecodeError("Beyond end of code");
}
}
@@ -2865,8 +2961,8 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return true;
}
- MemoryAccessImmediate MakeMemoryAccessImmediate(uint32_t pc_offset,
- uint32_t max_alignment) {
+ V8_INLINE MemoryAccessImmediate
+ MakeMemoryAccessImmediate(uint32_t pc_offset, uint32_t max_alignment) {
return MemoryAccessImmediate(this, this->pc_ + pc_offset, max_alignment,
this->module_->is_memory64, validate);
}
@@ -2976,6 +3072,11 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return 0;
}
CALL_INTERFACE_IF_OK_AND_REACHABLE(NopForTestingUnsupportedInLiftoff);
+ // Return {0} if we failed, to not advance the pc past the end.
+ if (this->failed()) {
+ DCHECK_EQ(this->pc_, this->end_);
+ return 0;
+ }
return 1;
}
@@ -2986,7 +3087,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
#define BUILD_SIMPLE_OPCODE(op, _, sig, ...) \
DECODE(op) { \
- if (decoding_mode == kConstantExpression) { \
+ if constexpr (decoding_mode == kConstantExpression) { \
if (!VALIDATE(this->enabled_.has_extended_const())) { \
NonConstError(this, kExpr##op); \
return 0; \
@@ -3000,11 +3101,11 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
DECODE(Block) {
BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PeekArgs(imm.sig);
+ ArgVector args = PeekArgs(&imm.sig);
Control* block = PushControl(kControlBlock, args.length());
SetBlockType(block, imm, args.begin());
CALL_INTERFACE_IF_OK_AND_REACHABLE(Block, block);
- DropArgs(imm.sig);
+ DropArgs(&imm.sig);
PushMergeValues(block, &block->start_merge);
return 1 + imm.length;
}
@@ -3038,13 +3139,13 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
this->detected_->Add(kFeature_eh);
BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PeekArgs(imm.sig);
+ ArgVector args = PeekArgs(&imm.sig);
Control* try_block = PushControl(kControlTry, args.length());
SetBlockType(try_block, imm, args.begin());
try_block->previous_catch = current_catch_;
current_catch_ = static_cast<int>(control_depth() - 1);
CALL_INTERFACE_IF_OK_AND_REACHABLE(Try, try_block);
- DropArgs(imm.sig);
+ DropArgs(&imm.sig);
PushMergeValues(try_block, &try_block->start_merge);
return 1 + imm.length;
}
@@ -3072,7 +3173,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
RollbackLocalsInitialization(c);
const WasmTagSig* sig = imm.tag->sig;
stack_.EnsureMoreCapacity(static_cast<int>(sig->parameter_count()),
- this->compilation_zone_);
+ this->zone_);
for (ValueType type : sig->parameters()) Push(CreateValue(type));
base::Vector<Value> values(stack_.begin() + c->stack_depth,
sig->parameter_count());
@@ -3225,11 +3326,11 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
DECODE(Loop) {
BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- ArgVector args = PeekArgs(imm.sig);
+ ArgVector args = PeekArgs(&imm.sig);
Control* block = PushControl(kControlLoop, args.length());
SetBlockType(&control_.back(), imm, args.begin());
CALL_INTERFACE_IF_OK_AND_REACHABLE(Loop, block);
- DropArgs(imm.sig);
+ DropArgs(&imm.sig);
PushMergeValues(block, &block->start_merge);
return 1 + imm.length;
}
@@ -3238,13 +3339,13 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
Value cond = Peek(0, 0, kWasmI32);
- ArgVector args = PeekArgs(imm.sig, 1);
+ ArgVector args = PeekArgs(&imm.sig, 1);
if (!VALIDATE(this->ok())) return 0;
Control* if_block = PushControl(kControlIf, 1 + args.length());
SetBlockType(if_block, imm, args.begin());
CALL_INTERFACE_IF_OK_AND_REACHABLE(If, cond, if_block);
Drop(cond);
- DropArgs(imm.sig); // Drop {args}.
+ DropArgs(&imm.sig);
PushMergeValues(if_block, &if_block->start_merge);
return 1 + imm.length;
}
@@ -3273,7 +3374,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
DECODE(End) {
DCHECK(!control_.empty());
- if (decoding_mode == kFunctionBody) {
+ if constexpr (decoding_mode == kFunctionBody) {
Control* c = &control_.back();
if (c->is_incomplete_try()) {
// Catch-less try, fall through to the implicit catch-all.
@@ -3729,22 +3830,6 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return 1 + imm.length;
}
- // TODO(7748): After a certain grace period, drop this in favor of "CallRef".
- DECODE(CallRefDeprecated) {
- CHECK_PROTOTYPE_OPCODE(typed_funcref);
- SigIndexImmediate imm(this, this->pc_ + 1, validate);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value func_ref = Peek(0, 0, ValueType::RefNull(imm.index));
- ArgVector args = PeekArgs(imm.sig, 1);
- ReturnVector returns = CreateReturnValues(imm.sig);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(CallRef, func_ref, imm.sig, imm.index,
- args.begin(), returns.begin());
- Drop(func_ref);
- DropArgs(imm.sig);
- PushReturns(returns);
- return 1 + imm.length;
- }
-
DECODE(CallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
SigIndexImmediate imm(this, this->pc_ + 1, validate);
@@ -3776,9 +3861,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
DECODE(Numeric) {
- uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
- this->pc_, &opcode_length, "numeric index");
+ auto [full_opcode, opcode_length] =
+ this->template read_prefixed_opcode<ValidationTag>(this->pc_,
+ "numeric index");
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
this->detected_->Add(kFeature_reftypes);
@@ -3796,9 +3881,8 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
this->DecodeError("Wasm SIMD unsupported");
return 0;
}
- uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
- this->pc_, &opcode_length);
+ auto [full_opcode, opcode_length] =
+ this->template read_prefixed_opcode<ValidationTag>(this->pc_);
if (!VALIDATE(this->ok())) return 0;
trace_msg->AppendOpcode(full_opcode);
if (!CheckSimdFeatureFlagOpcode(full_opcode)) {
@@ -3809,19 +3893,24 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
DECODE(Atomic) {
this->detected_->Add(kFeature_threads);
- uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
- this->pc_, &opcode_length, "atomic index");
+ auto [full_opcode, opcode_length] =
+ this->template read_prefixed_opcode<ValidationTag>(this->pc_,
+ "atomic index");
trace_msg->AppendOpcode(full_opcode);
return DecodeAtomicOpcode(full_opcode, opcode_length);
}
DECODE(GC) {
- uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
- this->pc_, &opcode_length, "gc index");
+ auto [full_opcode, opcode_length] =
+ this->template read_prefixed_opcode<ValidationTag>(this->pc_,
+ "gc index");
trace_msg->AppendOpcode(full_opcode);
- if (full_opcode >= kExprStringNewUtf8) {
+ // If we are validating we could have read an illegal opcode. Handle that
+ // separately.
+ if (!VALIDATE(full_opcode != 0)) {
+ DCHECK(this->failed());
+ return 0;
+ } else if (full_opcode >= kExprStringNewUtf8) {
CHECK_PROTOTYPE_OPCODE(stringref);
return DecodeStringRefOpcode(full_opcode, opcode_length);
} else {
@@ -3863,13 +3952,13 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// Hence just list all implementations explicitly here, which also gives more
// freedom to use the same implementation for different opcodes.
#define DECODE_IMPL(opcode) DECODE_IMPL2(kExpr##opcode, opcode)
-#define DECODE_IMPL2(opcode, name) \
- if (idx == opcode) { \
- if (decoding_mode == kConstantExpression) { \
- return &WasmFullDecoder::NonConstError; \
- } else { \
- return &WasmFullDecoder::Decode##name; \
- } \
+#define DECODE_IMPL2(opcode, name) \
+ if (idx == opcode) { \
+ if constexpr (decoding_mode == kConstantExpression) { \
+ return &WasmFullDecoder::NonConstError; \
+ } else { \
+ return &WasmFullDecoder::Decode##name; \
+ } \
}
#define DECODE_IMPL_CONST(opcode) DECODE_IMPL_CONST2(kExpr##opcode, opcode)
#define DECODE_IMPL_CONST2(opcode, name) \
@@ -3932,7 +4021,6 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
DECODE_IMPL(CallIndirect);
DECODE_IMPL(ReturnCall);
DECODE_IMPL(ReturnCallIndirect);
- DECODE_IMPL(CallRefDeprecated);
DECODE_IMPL(CallRef);
DECODE_IMPL(ReturnCallRef);
DECODE_IMPL2(kNumericPrefix, Numeric);
@@ -4003,7 +4091,8 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return EnsureStackArguments_Slow(count, limit);
}
- V8_NOINLINE int EnsureStackArguments_Slow(int count, uint32_t limit) {
+ V8_NOINLINE V8_PRESERVE_MOST int EnsureStackArguments_Slow(int count,
+ uint32_t limit) {
if (!VALIDATE(control_.back().unreachable())) {
NotEnoughArgumentsError(count, stack_.size() - limit);
}
@@ -4014,7 +4103,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
int current_values = stack_.size() - limit;
int additional_values = count - current_values;
DCHECK_GT(additional_values, 0);
- stack_.EnsureMoreCapacity(additional_values, this->compilation_zone_);
+ stack_.EnsureMoreCapacity(additional_values, this->zone_);
Value unreachable_value = UnreachableValue(this->pc_);
for (int i = 0; i < additional_values; ++i) stack_.push(unreachable_value);
if (current_values > 0) {
@@ -4033,19 +4122,15 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// Peeks arguments as required by signature.
V8_INLINE ArgVector PeekArgs(const FunctionSig* sig, int depth = 0) {
- int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
- if (count == 0) return {};
- EnsureStackArguments(depth + count);
- ArgVector args(stack_value(depth + count), count);
- for (int i = 0; i < count; i++) {
- ValidateArgType(args, i, sig->GetParam(i));
- }
- return args;
+ return PeekArgs(
+ base::VectorOf(sig->parameters().begin(), sig->parameter_count()),
+ depth);
}
+
// Drops a number of stack elements equal to the {sig}'s parameter count (0 if
// {sig} is null), or all of them if less are present.
V8_INLINE void DropArgs(const FunctionSig* sig) {
- int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
+ int count = static_cast<int>(sig->parameter_count());
Drop(count);
}
@@ -4065,10 +4150,12 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Drop(static_cast<int>(type->field_count()));
}
- V8_INLINE ArgVector PeekArgs(base::Vector<ValueType> arg_types) {
+ V8_INLINE ArgVector PeekArgs(base::Vector<const ValueType> arg_types,
+ int depth = 0) {
int size = static_cast<int>(arg_types.size());
- EnsureStackArguments(size);
- ArgVector args(stack_value(size), arg_types.size());
+ if (size == 0) return {};
+ EnsureStackArguments(size + depth);
+ ArgVector args(stack_value(size + depth), arg_types.size());
for (int i = 0; i < size; i++) {
ValidateArgType(args, i, arg_types[i]);
}
@@ -4086,9 +4173,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
stack_.size() >= drop_values ? stack_.size() - drop_values : 0;
stack_depth = std::max(stack_depth, control_.back().stack_depth);
uint32_t init_stack_depth = this->locals_initialization_stack_depth();
- control_.EnsureMoreCapacity(1, this->compilation_zone_);
- control_.emplace_back(kind, stack_depth, init_stack_depth, this->pc_,
- reachability);
+ control_.EnsureMoreCapacity(1, this->zone_);
+ control_.emplace_back(this->zone_, kind, stack_depth, init_stack_depth,
+ this->pc_, reachability);
current_code_reachable_and_ok_ =
VALIDATE(this->ok()) && reachability == kReachable;
return &control_.back();
@@ -4129,7 +4216,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(0, 0, index_type);
Value result = CreateValue(type.value_type());
- CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadMem, type, imm, index, &result);
+ if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), imm.offset))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadMem, type, imm, index, &result);
+ }
Drop(index);
Push(result);
return prefix_len + imm.length;
@@ -4146,8 +4235,12 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(0, 0, index_type);
Value result = CreateValue(kWasmS128);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadTransform, type, transform, imm,
- index, &result);
+ uintptr_t op_size =
+ transform == LoadTransformationKind::kExtend ? 8 : type.size();
+ if (V8_LIKELY(!CheckStaticallyOutOfBounds(op_size, imm.offset))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadTransform, type, transform, imm,
+ index, &result);
+ }
Drop(index);
Push(result);
return opcode_length + imm.length;
@@ -4161,11 +4254,14 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
validate);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
Value v128 = Peek(0, 1, kWasmS128);
- Value index = Peek(1, 0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Peek(1, 0, index_type);
Value result = CreateValue(kWasmS128);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadLane, type, v128, index, mem_imm,
- lane_imm.lane, &result);
+ if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), mem_imm.offset))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(LoadLane, type, v128, index, mem_imm,
+ lane_imm.lane, &result);
+ }
Drop(2);
Push(result);
return opcode_length + mem_imm.length + lane_imm.length;
@@ -4180,14 +4276,27 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
validate);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
Value v128 = Peek(0, 1, kWasmS128);
- Value index = Peek(1, 0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Peek(1, 0, index_type);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreLane, type, mem_imm, index, v128,
- lane_imm.lane);
+ if (V8_LIKELY(!CheckStaticallyOutOfBounds(type.size(), mem_imm.offset))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreLane, type, mem_imm, index, v128,
+ lane_imm.lane);
+ }
Drop(2);
return opcode_length + mem_imm.length + lane_imm.length;
}
+ bool CheckStaticallyOutOfBounds(uintptr_t size, uintptr_t offset) {
+ const bool statically_oob = !base::IsInBounds<uintptr_t>(
+ offset, size, this->module_->max_memory_size);
+ if (V8_UNLIKELY(statically_oob)) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap, TrapReason::kTrapMemOutOfBounds);
+ SetSucceedingCodeDynamicallyUnreachable();
+ }
+ return statically_oob;
+ }
+
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
MemoryAccessImmediate imm =
MakeMemoryAccessImmediate(prefix_len, store.size_log_2());
@@ -4195,7 +4304,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Value value = Peek(0, 1, store.value_type());
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(1, 0, index_type);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreMem, store, imm, index, value);
+ if (V8_LIKELY(!CheckStaticallyOutOfBounds(store.size(), imm.offset))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StoreMem, store, imm, index, value);
+ }
Drop(2);
return prefix_len + imm.length;
}
@@ -4251,7 +4362,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
uint32_t DecodeSimdOpcode(WasmOpcode opcode, uint32_t opcode_length) {
- if (decoding_mode == kConstantExpression) {
+ if constexpr (decoding_mode == kConstantExpression) {
// Currently, only s128.const is allowed in constant expressions.
if (opcode != kExprS128Const) {
this->DecodeError("opcode %s is not allowed in constant expressions",
@@ -4420,13 +4531,15 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
#define NON_CONST_ONLY \
- if (decoding_mode == kConstantExpression) { \
+ if constexpr (decoding_mode == kConstantExpression) { \
this->DecodeError("opcode %s is not allowed in constant expressions", \
this->SafeOpcodeNameAt(this->pc())); \
return 0; \
}
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
+ // Bigger GC opcodes are handled via {DecodeStringRefOpcode}, so we can
+ // assume here that opcodes are within [0xfb00, 0xfbff].
// This assumption might help the big switch below.
V8_ASSUME(opcode >> 8 == kGCPrefix);
switch (opcode) {
@@ -4577,6 +4690,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return opcode_length + imm.length;
}
case kExprArrayNewData: {
+ NON_CONST_ONLY
ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
validate);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
@@ -4619,6 +4733,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return opcode_length + array_imm.length + data_segment.length;
}
case kExprArrayNewElem: {
+ NON_CONST_ONLY
ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
validate);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
@@ -4732,18 +4847,6 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprArrayLenDeprecated: {
- NON_CONST_ONLY
- // Read but ignore an immediate array type index.
- // TODO(7748): Remove this once we are ready to make breaking changes.
- ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
- Value array_obj = Peek(0, 0, kWasmArrayRef);
- Value value = CreateValue(kWasmI32);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayLen, array_obj, &value);
- Drop(array_obj);
- Push(value);
- return opcode_length + imm.length;
- }
case kExprArrayCopy: {
NON_CONST_ONLY
ArrayIndexImmediate dst_imm(this, this->pc_ + opcode_length, validate);
@@ -4779,6 +4882,26 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Drop(5);
return opcode_length + dst_imm.length + src_imm.length;
}
+ case kExprArrayFill: {
+ NON_CONST_ONLY
+ ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
+ validate);
+ if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
+ if (!VALIDATE(array_imm.array_type->mutability())) {
+ this->DecodeError("array.init: immediate array type #%d is immutable",
+ array_imm.index);
+ return 0;
+ }
+
+ Value array = Peek(3, 0, ValueType::RefNull(array_imm.index));
+ Value offset = Peek(2, 1, kWasmI32);
+ Value value = Peek(1, 2, array_imm.array_type->element_type());
+ Value length = Peek(0, 3, kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayFill, array_imm, array, offset,
+ value, length);
+ Drop(4);
+ return opcode_length + array_imm.length;
+ }
case kExprArrayNewFixed: {
ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
validate);
@@ -4860,18 +4983,16 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
obj.type.is_bottom())) {
this->DecodeError(
obj.pc(),
- "Invalid types for ref.cast: %s of type %s has to "
+ "Invalid types for %s: %s of type %s has to "
"be in the same reference type hierarchy as (ref %s)",
- SafeOpcodeNameAt(obj.pc()), obj.type.name().c_str(),
- target_type.name().c_str());
+ WasmOpcodes::OpcodeName(opcode), SafeOpcodeNameAt(obj.pc()),
+ obj.type.name().c_str(), target_type.name().c_str());
return 0;
}
bool null_succeeds = opcode == kExprRefCastNull;
Value value = CreateValue(ValueType::RefMaybeNull(
- imm.type, (obj.type.is_bottom() || !null_succeeds)
- ? kNonNullable
- : obj.type.nullability()));
+ target_type, null_succeeds ? kNullable : kNonNullable));
if (current_code_reachable_and_ok_) {
// This logic ensures that code generation can assume that functions
// can only be cast to function types, and data objects to data types.
@@ -4882,7 +5003,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
CALL_INTERFACE(Drop);
}
if (obj.type.is_nullable() && !null_succeeds) {
- CALL_INTERFACE(AssertNotNull, obj, &value);
+ CALL_INTERFACE(AssertNotNullTypecheck, obj, &value);
} else {
CALL_INTERFACE(Forward, obj, &value);
}
@@ -4895,7 +5016,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// is null.
if (obj.type.is_nullable() && null_succeeds) {
// Drop rtt from the stack, then assert that obj is null.
- CALL_INTERFACE(AssertNull, obj, &value);
+ CALL_INTERFACE(AssertNullTypecheck, obj, &value);
} else {
CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast);
// We know that the following code is not reachable, but according
@@ -4942,10 +5063,10 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
obj.type.is_bottom())) {
this->DecodeError(
obj.pc(),
- "Invalid types for ref.test: %s of type %s has to "
+ "Invalid types for %s: %s of type %s has to "
"be in the same reference type hierarchy as (ref %s)",
- SafeOpcodeNameAt(obj.pc()), obj.type.name().c_str(),
- target_type.name().c_str());
+ WasmOpcodes::OpcodeName(opcode), SafeOpcodeNameAt(obj.pc()),
+ obj.type.name().c_str(), target_type.name().c_str());
return 0;
}
bool null_succeeds = opcode == kExprRefTestNull;
@@ -5037,30 +5158,33 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return opcode_length;
}
case kExprRefCastNop: {
+ NON_CONST_ONLY
// Temporary non-standard instruction, for performance experiments.
if (!VALIDATE(this->enabled_.has_ref_cast_nop())) {
this->DecodeError(
- "Invalid opcode 0xfb48 (enable with "
+ "Invalid opcode 0xfb4c (enable with "
"--experimental-wasm-ref-cast-nop)");
return 0;
}
- IndexImmediate imm(this, this->pc_ + opcode_length, "type index",
- validate);
- if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ HeapTypeImmediate imm(this->enabled_, this, this->pc_ + opcode_length,
+ validate);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
opcode_length += imm.length;
+ HeapType target_type = imm.type;
Value obj = Peek(0);
- if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmStructRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) ||
+ if (!VALIDATE((obj.type.is_object_reference() &&
+ IsSameTypeHierarchy(obj.type.heap_type(), target_type,
+ this->module_)) ||
obj.type.is_bottom())) {
- PopTypeError(0, obj,
- "subtype of (ref null func), (ref null struct) or (ref "
- "null array)");
+ this->DecodeError(
+ obj.pc(),
+ "Invalid types for %s: %s of type %s has to "
+ "be in the same reference type hierarchy as (ref %s)",
+ WasmOpcodes::OpcodeName(opcode), SafeOpcodeNameAt(obj.pc()),
+ obj.type.name().c_str(), target_type.name().c_str());
return 0;
}
- Value value = CreateValue(ValueType::RefMaybeNull(
- imm.index,
- obj.type.is_bottom() ? kNonNullable : obj.type.nullability()));
+ Value value = CreateValue(ValueType::Ref(target_type));
CALL_INTERFACE_IF_OK_AND_REACHABLE(Forward, obj, &value);
Drop(obj);
Push(value);
@@ -5104,7 +5228,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
if (obj.type.is_nullable()) {
// Drop rtt from the stack, then assert that obj is null.
CALL_INTERFACE(Drop);
- CALL_INTERFACE(AssertNull, obj, &value);
+ CALL_INTERFACE(AssertNullTypecheck, obj, &value);
} else {
CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast);
// We know that the following code is not reachable, but according
@@ -5120,7 +5244,121 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprBrOnCast: {
+ case kExprBrOnCast:
+ case kExprBrOnCastNull: {
+ NON_CONST_ONLY
+ BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
+ validate);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
+ return 0;
+ }
+ uint32_t pc_offset = opcode_length + branch_depth.length;
+
+ HeapTypeImmediate imm(this->enabled_, this, this->pc_ + pc_offset,
+ validate);
+ this->Validate(this->pc_ + opcode_length, imm);
+ if (!VALIDATE(this->ok())) return 0;
+ pc_offset += imm.length;
+
+ std::optional<Value> rtt;
+ HeapType target_type = imm.type;
+ if (imm.type.is_index()) {
+ rtt = CreateValue(ValueType::Rtt(imm.type.ref_index()));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.type.ref_index(),
+ &rtt.value());
+ // Differently to other instructions we don't push the RTT yet.
+ }
+
+ Value obj = Peek(0);
+
+ if (!VALIDATE((obj.type.is_object_reference() &&
+ IsSameTypeHierarchy(obj.type.heap_type(), target_type,
+ this->module_)) ||
+ obj.type.is_bottom())) {
+ this->DecodeError(
+ obj.pc(),
+ "Invalid types for %s: %s of type %s has to "
+ "be in the same reference type hierarchy as (ref %s)",
+ WasmOpcodes::OpcodeName(opcode), SafeOpcodeNameAt(obj.pc()),
+ obj.type.name().c_str(), target_type.name().c_str());
+ return 0;
+ }
+
+ Control* c = control_at(branch_depth.depth);
+ if (c->br_merge()->arity == 0) {
+ this->DecodeError("%s must target a branch of arity at least 1",
+ WasmOpcodes::OpcodeName(opcode));
+ return 0;
+ }
+ // Attention: contrary to most other instructions, we modify the
+ // stack before calling the interface function. This makes it
+ // significantly more convenient to pass around the values that
+ // will be on the stack when the branch is taken.
+ // TODO(jkummerow): Reconsider this choice.
+ Drop(obj);
+ bool null_succeeds = opcode == kExprBrOnCastNull;
+ Push(CreateValue(ValueType::RefMaybeNull(
+ target_type, null_succeeds ? kNullable : kNonNullable)));
+ // The {value_on_branch} parameter we pass to the interface must
+ // be pointer-identical to the object on the stack.
+ Value* value_on_branch = stack_value(1);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, target_type))) {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(Drop); // rtt
+ }
+ // The branch will still not be taken on null if not
+ // {null_succeeds}.
+ if (obj.type.is_nullable() && !null_succeeds) {
+ CALL_INTERFACE(BrOnNonNull, obj, value_on_branch,
+ branch_depth.depth, false);
+ } else {
+ CALL_INTERFACE(Forward, obj, value_on_branch);
+ CALL_INTERFACE(BrOrRet, branch_depth.depth, 0);
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
+ }
+ c->br_merge()->reached = true;
+ } else if (V8_LIKELY(!TypeCheckAlwaysFails(obj, target_type,
+ null_succeeds))) {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(BrOnCast, obj, rtt.value(), value_on_branch,
+ branch_depth.depth, null_succeeds);
+ } else {
+ CALL_INTERFACE(BrOnCastAbstract, obj, target_type,
+ value_on_branch, branch_depth.depth,
+ null_succeeds);
+ }
+ c->br_merge()->reached = true;
+ } else {
+ // Otherwise the types are unrelated. Do not branch.
+ if (rtt.has_value()) {
+ CALL_INTERFACE(Drop); // rtt
+ }
+ }
+ }
+
+ Drop(1); // value_on_branch
+ Push(obj); // Restore stack state on fallthrough.
+ if (current_code_reachable_and_ok_ && null_succeeds) {
+ // As null branches, the type on fallthrough will be the non-null
+ // variant of the input type.
+ // Note that this is handled differently for br_on_cast_fail for which
+ // the Forward is handled by TurboFan.
+ // TODO(mliedtke): This currently deviates from the spec and is
+ // discussed at
+ // https://github.com/WebAssembly/gc/issues/342#issuecomment-1354505307.
+ stack_value(1)->type = obj.type.AsNonNull();
+ CALL_INTERFACE(Forward, obj, stack_value(1));
+ }
+ return pc_offset;
+ }
+ case kExprBrOnCastDeprecated: {
NON_CONST_ONLY
BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
validate);
@@ -5181,18 +5419,127 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
c->br_merge()->reached = true;
} else if (V8_LIKELY(!TypeCheckAlwaysFails(obj, rtt))) {
+ bool null_succeeds = false;
CALL_INTERFACE(BrOnCast, obj, rtt, value_on_branch,
- branch_depth.depth);
+ branch_depth.depth, null_succeeds);
c->br_merge()->reached = true;
+ } else {
+ // Otherwise the types are unrelated. Do not branch.
+ CALL_INTERFACE(Drop); // rtt
}
- // Otherwise the types are unrelated. Do not branch.
}
Drop(1); // value_on_branch
Push(obj); // Restore stack state on fallthrough.
return pc_offset;
}
- case kExprBrOnCastFail: {
+ case kExprBrOnCastFail:
+ case kExprBrOnCastFailNull: {
+ NON_CONST_ONLY
+ BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
+ validate);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
+ return 0;
+ }
+ uint32_t pc_offset = opcode_length + branch_depth.length;
+ HeapTypeImmediate imm(this->enabled_, this, this->pc_ + pc_offset,
+ validate);
+ this->Validate(this->pc_ + opcode_length, imm);
+ if (!VALIDATE(this->ok())) return 0;
+ pc_offset += imm.length;
+
+ std::optional<Value> rtt;
+ HeapType target_type = imm.type;
+ if (imm.type.is_index()) {
+ rtt = CreateValue(ValueType::Rtt(imm.type.ref_index()));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.type.ref_index(),
+ &rtt.value());
+ // Differently to other instructions we don't push the RTT yet.
+ }
+
+ Value obj = Peek(0);
+
+ if (!VALIDATE((obj.type.is_object_reference() &&
+ IsSameTypeHierarchy(obj.type.heap_type(), target_type,
+ this->module_)) ||
+ obj.type.is_bottom())) {
+ this->DecodeError(
+ obj.pc(),
+ "Invalid types for %s: %s of type %s has to "
+ "be in the same reference type hierarchy as (ref %s)",
+ WasmOpcodes::OpcodeName(opcode), SafeOpcodeNameAt(obj.pc()),
+ obj.type.name().c_str(), target_type.name().c_str());
+ return 0;
+ }
+
+ Control* c = control_at(branch_depth.depth);
+ if (c->br_merge()->arity == 0) {
+ this->DecodeError("%s must target a branch of arity at least 1",
+ WasmOpcodes::OpcodeName(opcode));
+ return 0;
+ }
+
+ bool null_succeeds = opcode == kExprBrOnCastFailNull;
+ if (null_succeeds) {
+ // If null is treated as a successful cast, then the branch type is
+ // guaranteed to be non-null.
+ Drop(obj);
+ Push(CreateValue(obj.type.AsNonNull()));
+ }
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+
+ Value result_on_fallthrough = CreateValue(ValueType::RefMaybeNull(
+ target_type, (obj.type.is_bottom() || !null_succeeds)
+ ? kNonNullable
+ : obj.type.nullability()));
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ // This logic ensures that code generation can assume that functions
+ // can only be cast between compatible types.
+ if (V8_UNLIKELY(
+ TypeCheckAlwaysFails(obj, target_type, null_succeeds))) {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(Drop); // rtt
+ }
+ // The types are incompatible (i.e. neither of the two types is a
+ // subtype of the other). Always branch.
+ CALL_INTERFACE(Forward, obj, stack_value(1));
+ CALL_INTERFACE(BrOrRet, branch_depth.depth, 0);
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
+ c->br_merge()->reached = true;
+ } else if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, target_type))) {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(Drop); // rtt
+ }
+ // The branch can still be taken on null.
+ if (obj.type.is_nullable() && !null_succeeds) {
+ CALL_INTERFACE(BrOnNull, obj, branch_depth.depth, true,
+ &result_on_fallthrough);
+ c->br_merge()->reached = true;
+ }
+ // Otherwise, the type check always succeeds. Do not branch. Also,
+ // the object is already on the stack; do not manipulate the stack.
+ } else {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(BrOnCastFail, obj, rtt.value(),
+ &result_on_fallthrough, branch_depth.depth,
+ null_succeeds);
+ } else {
+ CALL_INTERFACE(BrOnCastFailAbstract, obj, target_type,
+ &result_on_fallthrough, branch_depth.depth,
+ null_succeeds);
+ }
+ c->br_merge()->reached = true;
+ }
+ }
+ // Make sure the correct value is on the stack state on fallthrough.
+ Drop(obj);
+ Push(result_on_fallthrough);
+ return pc_offset;
+ }
+ case kExprBrOnCastFailDeprecated: {
NON_CONST_ONLY
BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
validate);
@@ -5222,12 +5569,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
"br_on_cast_fail must target a branch of arity at least 1");
return 0;
}
- // Attention: contrary to most other instructions, we modify the stack
- // before calling the interface function. This makes it significantly
- // more convenient to pass around the values that will be on the stack
- // when the branch is taken. In this case, we leave {obj} on the stack
- // to type check the branch.
- // TODO(jkummerow): Reconsider this choice.
+
if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
Value result_on_fallthrough = CreateValue(ValueType::Ref(imm.index));
if (V8_LIKELY(current_code_reachable_and_ok_)) {
@@ -5254,8 +5596,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// Otherwise, the type check always succeeds. Do not branch. Also,
// the object is already on the stack; do not manipulate the stack.
} else {
+ bool null_succeeds = false;
CALL_INTERFACE(BrOnCastFail, obj, rtt, &result_on_fallthrough,
- branch_depth.depth);
+ branch_depth.depth, null_succeeds);
c->br_merge()->reached = true;
}
}
@@ -5369,13 +5712,16 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// {result_on_branch} which was passed-by-value to {Push}.
Value* value_on_branch = stack_value(1);
if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ bool null_succeeds = false;
if (opcode == kExprBrOnStruct) {
- CALL_INTERFACE(BrOnStruct, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnStruct, obj, value_on_branch, branch_depth.depth,
+ null_succeeds);
} else if (opcode == kExprBrOnArray) {
- CALL_INTERFACE(BrOnArray, obj, value_on_branch, branch_depth.depth);
+ CALL_INTERFACE(BrOnArray, obj, value_on_branch, branch_depth.depth,
+ null_succeeds);
} else {
- CALL_INTERFACE(BrOnI31, obj, value_on_branch, branch_depth.depth);
+ CALL_INTERFACE(BrOnI31, obj, value_on_branch, branch_depth.depth,
+ null_succeeds);
}
c->br_merge()->reached = true;
}
@@ -5410,15 +5756,16 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Value value_on_fallthrough = CreateValue(ValueType::Ref(heap_type));
if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ bool null_succeeds = false;
if (opcode == kExprBrOnNonStruct) {
CALL_INTERFACE(BrOnNonStruct, obj, &value_on_fallthrough,
- branch_depth.depth);
+ branch_depth.depth, null_succeeds);
} else if (opcode == kExprBrOnNonArray) {
CALL_INTERFACE(BrOnNonArray, obj, &value_on_fallthrough,
- branch_depth.depth);
+ branch_depth.depth, null_succeeds);
} else {
CALL_INTERFACE(BrOnNonI31, obj, &value_on_fallthrough,
- branch_depth.depth);
+ branch_depth.depth, null_succeeds);
}
c->br_merge()->reached = true;
}
@@ -5459,12 +5806,14 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
int DecodeStringNewWtf8(unibrow::Utf8Variant variant,
uint32_t opcode_length) {
NON_CONST_ONLY
+ bool null_on_invalid = variant == unibrow::Utf8Variant::kUtf8NoTrap;
MemoryIndexImmediate memory(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, memory)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value offset = Peek(1, 0, addr_type);
Value size = Peek(0, 1, kWasmI32);
- Value result = CreateValue(ValueType::Ref(HeapType::kString));
+ Value result = CreateValue(ValueType::RefMaybeNull(
+ HeapType::kString, null_on_invalid ? kNullable : kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StringNewWtf8, memory, variant, offset,
size, &result);
Drop(2);
@@ -5527,7 +5876,9 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Value array = PeekPackedArray(2, 0, kWasmI8, WasmArrayAccess::kRead);
Value start = Peek(1, 1, kWasmI32);
Value end = Peek(0, 2, kWasmI32);
- Value result = CreateValue(ValueType::Ref(HeapType::kString));
+ bool null_on_invalid = variant == unibrow::Utf8Variant::kUtf8NoTrap;
+ Value result = CreateValue(ValueType::RefMaybeNull(
+ HeapType::kString, null_on_invalid ? kNullable : kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StringNewWtf8Array, variant, array,
start, end, &result);
Drop(3);
@@ -5550,11 +5901,19 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
int DecodeStringRefOpcode(WasmOpcode opcode, uint32_t opcode_length) {
- // This assumption might help the big switch below.
- V8_ASSUME(opcode >> 8 == kGCPrefix);
+ // Fast check for out-of-range opcodes (only allow 0xfbXX).
+ // This might help the big switch below.
+ if (!VALIDATE((opcode >> 8) == kGCPrefix)) {
+ this->DecodeError("invalid stringref opcode: %x", opcode);
+ return 0;
+ }
+
switch (opcode) {
case kExprStringNewUtf8:
return DecodeStringNewWtf8(unibrow::Utf8Variant::kUtf8, opcode_length);
+ case kExprStringNewUtf8Try:
+ return DecodeStringNewWtf8(unibrow::Utf8Variant::kUtf8NoTrap,
+ opcode_length);
case kExprStringNewLossyUtf8:
return DecodeStringNewWtf8(unibrow::Utf8Variant::kLossyUtf8,
opcode_length);
@@ -5803,6 +6162,10 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
CHECK_PROTOTYPE_OPCODE(gc);
return DecodeStringNewWtf8Array(unibrow::Utf8Variant::kUtf8,
opcode_length);
+ case kExprStringNewUtf8ArrayTry:
+ CHECK_PROTOTYPE_OPCODE(gc);
+ return DecodeStringNewWtf8Array(unibrow::Utf8Variant::kUtf8NoTrap,
+ opcode_length);
case kExprStringNewLossyUtf8Array:
CHECK_PROTOTYPE_OPCODE(gc);
return DecodeStringNewWtf8Array(unibrow::Utf8Variant::kLossyUtf8,
@@ -5849,6 +6212,35 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
Push(result);
return opcode_length;
}
+ case kExprStringCompare: {
+ NON_CONST_ONLY
+ Value lhs = Peek(1, 0, kWasmStringRef);
+ Value rhs = Peek(0, 1, kWasmStringRef);
+ Value result = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StringCompare, lhs, rhs, &result);
+ Drop(2);
+ Push(result);
+ return opcode_length;
+ }
+ case kExprStringFromCodePoint: {
+ NON_CONST_ONLY
+ Value code_point = Peek(0, 0, kWasmI32);
+ Value result = CreateValue(ValueType::Ref(HeapType::kString));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StringFromCodePoint, code_point,
+ &result);
+ Drop(1);
+ Push(result);
+ return opcode_length;
+ }
+ case kExprStringHash: {
+ NON_CONST_ONLY
+ Value string = Peek(0, 0, kWasmStringRef);
+ Value result = CreateValue(kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StringHash, string, &result);
+ Drop(1);
+ Push(result);
+ return opcode_length;
+ }
default:
this->DecodeError("invalid stringref opcode: %x", opcode);
return 0;
@@ -5891,6 +6283,8 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return 1 + opcode_length;
}
default:
+ // This path is only possible if we are validating.
+ V8_ASSUME(ValidationTag::validate);
this->DecodeError("invalid atomic opcode: 0x%x", opcode);
return 0;
}
@@ -5902,22 +6296,30 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
opcode_length, ElementSizeLog2Of(memtype.representation()));
if (!this->Validate(this->pc_ + opcode_length, imm)) return false;
- // TODO(10949): Fix this for memory64 (index type should be kWasmI64
- // then).
- CHECK(!this->module_->is_memory64);
- ArgVector args = PeekArgs(sig);
- if (sig->return_count() == 0) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
- imm, nullptr);
- DropArgs(sig);
- } else {
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ DCHECK_LE(1, parameter_count);
+ DCHECK_EQ(kWasmI32, sig->GetParam(0));
+ EnsureStackArguments(parameter_count);
+ ArgVector args(stack_value(parameter_count), parameter_count);
+ ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ ValidateArgType(args, 0, mem_type);
+ for (int i = 1; i < parameter_count; i++) {
+ ValidateArgType(args, i, sig->GetParam(i));
+ }
+
+ base::Optional<Value> result;
+ if (sig->return_count()) {
DCHECK_EQ(1, sig->return_count());
- Value result = CreateValue(sig->GetReturn());
- CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
- imm, &result);
- DropArgs(sig);
- Push(result);
+ result = CreateValue(sig->GetReturn());
}
+
+ if (V8_LIKELY(!CheckStaticallyOutOfBounds(memtype.MemSize(), imm.offset))) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(
+ AtomicOp, opcode, base::VectorOf(args), imm,
+ result.has_value() ? &result.value() : nullptr);
+ }
+ DropArgs(sig);
+ if (result.has_value()) Push(result.value());
return opcode_length + imm.length;
}
@@ -6052,6 +6454,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
V8_INLINE Value CreateValue(ValueType type) { return Value{this->pc_, type}; }
+
V8_INLINE void Push(Value value) {
DCHECK_NE(kWasmVoid, value.type);
// {stack_.EnsureMoreCapacity} should have been called before, either in the
@@ -6061,7 +6464,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
}
void PushMergeValues(Control* c, Merge<Value>* merge) {
- if (decoding_mode == kConstantExpression) return;
+ if constexpr (decoding_mode == kConstantExpression) return;
DCHECK_EQ(c, &control_.back());
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
stack_.shrink_to(c->stack_depth);
@@ -6070,7 +6473,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
// central decoding loop.
stack_.push(merge->vals.first);
} else {
- stack_.EnsureMoreCapacity(merge->arity, this->compilation_zone_);
+ stack_.EnsureMoreCapacity(merge->arity, this->zone_);
for (uint32_t i = 0; i < merge->arity; i++) {
stack_.push(merge->vals.array[i]);
}
@@ -6086,29 +6489,32 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
return values;
}
V8_INLINE void PushReturns(ReturnVector values) {
- stack_.EnsureMoreCapacity(static_cast<int>(values.size()),
- this->compilation_zone_);
+ stack_.EnsureMoreCapacity(static_cast<int>(values.size()), this->zone_);
for (Value& value : values) Push(value);
}
// We do not inline these functions because doing so causes a large binary
// size increase. Not inlining them should not create a performance
// degradation, because their invocations are guarded by V8_LIKELY.
- V8_NOINLINE void PopTypeError(int index, Value val, const char* expected) {
+ V8_NOINLINE V8_PRESERVE_MOST void PopTypeError(int index, Value val,
+ const char* expected) {
this->DecodeError(val.pc(), "%s[%d] expected %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index, expected,
SafeOpcodeNameAt(val.pc()), val.type.name().c_str());
}
- V8_NOINLINE void PopTypeError(int index, Value val, std::string expected) {
+ V8_NOINLINE V8_PRESERVE_MOST void PopTypeError(int index, Value val,
+ std::string expected) {
PopTypeError(index, val, expected.c_str());
}
- V8_NOINLINE void PopTypeError(int index, Value val, ValueType expected) {
+ V8_NOINLINE V8_PRESERVE_MOST void PopTypeError(int index, Value val,
+ ValueType expected) {
PopTypeError(index, val, ("type " + expected.name()).c_str());
}
- V8_NOINLINE void NotEnoughArgumentsError(int needed, int actual) {
+ V8_NOINLINE V8_PRESERVE_MOST void NotEnoughArgumentsError(int needed,
+ int actual) {
DCHECK_LT(0, needed);
DCHECK_LE(0, actual);
DCHECK_LT(actual, needed);
@@ -6264,7 +6670,7 @@ class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
for (int i = arity - 1, depth = drop_values; i >= 0; --i, ++depth) {
Peek(depth, i, (*merge)[i].type);
}
- if (push_branch_values) {
+ if constexpr (push_branch_values) {
uint32_t inserted_value_count =
static_cast<uint32_t>(EnsureStackArguments(drop_values + arity));
if (inserted_value_count > 0) {
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 62d1cd552c..1d667f4952 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -7,6 +7,7 @@
#include "src/utils/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
@@ -25,14 +26,12 @@ bool DecodeLocalDecls(WasmFeatures enabled, BodyLocalDecls* decls,
constexpr FixedSizeSignature<ValueType, 0, 0> kNoSig;
WasmDecoder<ValidationTag> decoder(zone, module, enabled, &no_features,
&kNoSig, start, end);
- uint32_t length;
- decoder.DecodeLocals(decoder.pc(), &length);
+ decls->encoded_size = decoder.DecodeLocals(decoder.pc());
if (ValidationTag::validate && decoder.failed()) {
- decls->encoded_size = 0;
+ DCHECK_EQ(0, decls->encoded_size);
return false;
}
DCHECK(decoder.ok());
- decls->encoded_size = length;
// Copy the decoded locals types into {decls->local_types}.
DCHECK_NULL(decls->local_types);
decls->num_locals = decoder.num_locals_;
@@ -69,12 +68,13 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
if (pc_ > end_) pc_ = end_;
}
-DecodeResult ValidateFunctionBody(AccountingAllocator* allocator,
- const WasmFeatures& enabled,
+DecodeResult ValidateFunctionBody(const WasmFeatures& enabled,
const WasmModule* module,
WasmFeatures* detected,
const FunctionBody& body) {
- Zone zone(allocator, ZONE_NAME);
+ // Asm.js functions should never be validated; they are valid by design.
+ DCHECK_EQ(kWasmOrigin, module->origin);
+ Zone zone(GetWasmEngine()->allocator(), ZONE_NAME);
WasmFullDecoder<Decoder::FullValidationTag, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
decoder.Decode();
@@ -231,10 +231,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
if (opcode == kExprLoop || opcode == kExprIf || opcode == kExprBlock ||
opcode == kExprTry) {
if (i.pc()[1] & 0x80) {
- uint32_t temp_length;
- ValueType type =
+ auto [type, temp_length] =
value_type_reader::read_value_type<Decoder::NoValidationTag>(
- &decoder, i.pc() + 1, &temp_length, WasmFeatures::All());
+ &decoder, i.pc() + 1, WasmFeatures::All());
if (temp_length == 1) {
os << type.name() << ",";
} else {
@@ -323,12 +322,13 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, uint32_t num_locals,
- const byte* start, const byte* end) {
+ const byte* start, const byte* end,
+ bool* loop_is_innermost) {
WasmFeatures no_features = WasmFeatures::None();
WasmDecoder<Decoder::FullValidationTag> decoder(
zone, nullptr, no_features, &no_features, nullptr, start, end, 0);
return WasmDecoder<Decoder::FullValidationTag>::AnalyzeLoopAssignment(
- &decoder, start, num_locals, zone);
+ &decoder, start, num_locals, zone, loop_is_innermost);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 0b2073e2a9..5a91669cb6 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -42,9 +42,10 @@ struct FunctionBody {
enum class LoadTransformationKind : uint8_t { kSplat, kExtend, kZeroExtend };
-V8_EXPORT_PRIVATE DecodeResult ValidateFunctionBody(
- AccountingAllocator* allocator, const WasmFeatures& enabled,
- const WasmModule* module, WasmFeatures* detected, const FunctionBody& body);
+V8_EXPORT_PRIVATE DecodeResult ValidateFunctionBody(const WasmFeatures& enabled,
+ const WasmModule* module,
+ WasmFeatures* detected,
+ const FunctionBody& body);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
@@ -80,7 +81,8 @@ V8_EXPORT_PRIVATE bool ValidateAndDecodeLocalDeclsForTesting(
const byte* start, const byte* end, Zone* zone);
V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(
- Zone* zone, uint32_t num_locals, const byte* start, const byte* end);
+ Zone* zone, uint32_t num_locals, const byte* start, const byte* end,
+ bool* loop_is_innermost);
// Computes the length of the opcode at the given address.
V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
@@ -188,7 +190,8 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- return read_prefixed_opcode<Decoder::NoValidationTag>(pc_);
+ auto [opcode, length] = read_prefixed_opcode<Decoder::NoValidationTag>(pc_);
+ return opcode;
}
};
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 51eb935110..4972776fd9 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -14,7 +14,6 @@
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-debug.h"
-#include "src/wasm/wasm-engine.h"
namespace v8::internal::wasm {
@@ -47,7 +46,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
const FunctionSig* sig = env->module->functions[func_index_].sig;
// Assume the wrapper is going to be a JS function with matching arity at
// instantiation time.
- auto kind = compiler::kDefaultImportCallKind;
+ auto kind = kDefaultImportCallKind;
bool source_positions = is_asmjs_module(env->module);
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
env, kind, sig, source_positions,
@@ -80,6 +79,26 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
wasm_compile_function_time_scope.emplace(timed_histogram);
}
+ // Before executing compilation, make sure that the function was validated.
+ // Both Liftoff and TurboFan compilation do not perform validation, so can
+ // only run on valid functions.
+ if (V8_UNLIKELY(!env->module->function_was_validated(func_index_))) {
+ // This code path can only be reached in
+ // - eager compilation mode,
+ // - with lazy validation,
+ // - with PGO (which compiles some functions eagerly), or
+ // - with compilation hints (which also compiles some functions eagerly).
+ DCHECK(!v8_flags.wasm_lazy_compilation || v8_flags.wasm_lazy_validation ||
+ v8_flags.experimental_wasm_pgo_from_file ||
+ v8_flags.experimental_wasm_compilation_hints);
+ if (ValidateFunctionBody(env->enabled_features, env->module, detected,
+ func_body)
+ .failed()) {
+ return {};
+ }
+ env->module->set_function_validated(func_index_);
+ }
+
if (v8_flags.trace_wasm_compiler) {
PrintF("Compiling wasm function %d with %s\n", func_index_,
ExecutionTierToString(tier_));
@@ -130,29 +149,22 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
V8_FALLTHROUGH;
case ExecutionTier::kTurbofan:
- // Before executing TurboFan compilation, make sure that the function was
- // validated (because TurboFan compilation assumes valid input).
- if (V8_UNLIKELY(!env->module->function_was_validated(func_index_))) {
- AccountingAllocator allocator;
- if (ValidateFunctionBody(&allocator, env->enabled_features, env->module,
- detected, func_body)
- .failed()) {
- return {};
- }
- env->module->set_function_validated(func_index_);
- }
- result = compiler::ExecuteTurbofanWasmCompilation(
- env, wire_bytes_storage, func_body, func_index_, counters,
- buffer_cache, detected);
+ compiler::WasmCompilationData data(func_body);
+ data.func_index = func_index_;
+ data.wire_bytes_storage = wire_bytes_storage;
+ data.buffer_cache = buffer_cache;
+ result = compiler::ExecuteTurbofanWasmCompilation(env, data, counters,
+ detected);
result.for_debugging = for_debugging_;
break;
}
+ DCHECK(result.succeeded());
return result;
}
// static
-void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
+void WasmCompilationUnit::CompileWasmFunction(Counters* counters,
NativeModule* native_module,
WasmFeatures* detected,
const WasmFunction* function,
@@ -164,15 +176,22 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
DCHECK_LE(native_module->num_imported_functions(), function->func_index);
DCHECK_LT(function->func_index, native_module->num_functions());
- WasmCompilationUnit unit(function->func_index, tier, kNoDebugging);
+ WasmCompilationUnit unit(function->func_index, tier, kNotForDebugging);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage().get(),
- isolate->counters(), nullptr, detected);
+ counters, nullptr, detected);
if (result.succeeded()) {
WasmCodeRefScope code_ref_scope;
+ // We need to extend the lifetime of {assumptions} beyond the
+ // {std::move(result)} statement.
+ // TODO(jkummerow): Refactor this: make {result} stack-allocated here
+ // and pass it by reference to other code that populates or consumes it.
+ AssumptionsJournal* assumptions = result.assumptions.release();
native_module->PublishCode(
- native_module->AddCompiledCode(std::move(result)));
+ native_module->AddCompiledCode(std::move(result)),
+ assumptions->empty() ? nullptr : assumptions);
+ delete assumptions;
} else {
native_module->compilation_state()->SetError();
}
@@ -240,7 +259,7 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
}
-Handle<CodeT> JSToWasmWrapperCompilationUnit::Finalize() {
+Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
if (use_generic_wrapper_) {
return isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
}
@@ -255,11 +274,11 @@ Handle<CodeT> JSToWasmWrapperCompilationUnit::Finalize() {
PROFILE(isolate_, CodeCreateEvent(LogEventListener::CodeTag::kStub,
Handle<AbstractCode>::cast(code), name));
}
- return ToCodeT(code, isolate_);
+ return code;
}
// static
-Handle<CodeT> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index,
const WasmModule* module, bool is_import) {
// Run the compilation unit synchronously.
@@ -272,7 +291,7 @@ Handle<CodeT> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
}
// static
-Handle<CodeT> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index,
const WasmModule* module) {
// Run the compilation unit synchronously.
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index fa70073291..6304f36670 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -33,6 +33,31 @@ class WasmCode;
class WasmEngine;
struct WasmFunction;
+// Stores assumptions that a Wasm compilation job made while executing,
+// so they can be checked for continued validity when the job finishes.
+class AssumptionsJournal {
+ public:
+ AssumptionsJournal() = default;
+
+ void RecordAssumption(uint32_t func_index, WellKnownImport status) {
+ imports_.push_back(std::make_pair(func_index, status));
+ }
+
+ const std::vector<std::pair<uint32_t, WellKnownImport>>& import_statuses() {
+ return imports_;
+ }
+
+ bool empty() const { return imports_.empty(); }
+
+ private:
+ // This is not particularly efficient, but it's probably good enough.
+ // For most compilations, this won't hold any entries. If it does
+ // hold entries, their number is expected to be small, because most
+ // functions don't call many imports, and many imports won't be
+ // specially recognized.
+ std::vector<std::pair<uint32_t, WellKnownImport>> imports_;
+};
+
struct WasmCompilationResult {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
@@ -51,18 +76,24 @@ struct WasmCompilationResult {
uint32_t frame_slot_count = 0;
uint32_t tagged_parameter_slots = 0;
base::OwnedVector<byte> source_positions;
+ base::OwnedVector<byte> inlining_positions;
base::OwnedVector<byte> protected_instructions_data;
+ std::unique_ptr<AssumptionsJournal> assumptions;
int func_index = kAnonymousFuncIndex;
ExecutionTier requested_tier;
ExecutionTier result_tier;
Kind kind = kFunction;
- ForDebugging for_debugging = kNoDebugging;
+ ForDebugging for_debugging = kNotForDebugging;
+ bool frame_has_feedback_slot = false;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
public:
WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging)
- : func_index_(index), tier_(tier), for_debugging_(for_debugging) {}
+ : func_index_(index), tier_(tier), for_debugging_(for_debugging) {
+ DCHECK_IMPLIES(for_debugging != ForDebugging::kNotForDebugging,
+ tier_ == ExecutionTier::kLiftoff);
+ }
WasmCompilationResult ExecuteCompilation(CompilationEnv*,
const WireBytesStorage*, Counters*,
@@ -73,7 +104,7 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ForDebugging for_debugging() const { return for_debugging_; }
int func_index() const { return func_index_; }
- static void CompileWasmFunction(Isolate*, NativeModule*,
+ static void CompileWasmFunction(Counters*, NativeModule*,
WasmFeatures* detected, const WasmFunction*,
ExecutionTier);
@@ -112,22 +143,22 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
Isolate* isolate() const { return isolate_; }
void Execute();
- Handle<CodeT> Finalize();
+ Handle<Code> Finalize();
bool is_import() const { return is_import_; }
const FunctionSig* sig() const { return sig_; }
uint32_t canonical_sig_index() const { return canonical_sig_index_; }
// Run a compilation unit synchronously.
- static Handle<CodeT> CompileJSToWasmWrapper(Isolate* isolate,
- const FunctionSig* sig,
- uint32_t canonical_sig_index,
- const WasmModule* module,
- bool is_import);
+ static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ const FunctionSig* sig,
+ uint32_t canonical_sig_index,
+ const WasmModule* module,
+ bool is_import);
// Run a compilation unit synchronously, but ask for the specific
// wrapper.
- static Handle<CodeT> CompileSpecificJSToWasmWrapper(
+ static Handle<Code> CompileSpecificJSToWasmWrapper(
Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index,
const WasmModule* module);
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 4371d3a185..939b9f22d4 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -4,6 +4,7 @@
#include "src/wasm/graph-builder-interface.h"
+#include "src/base/vector.h"
#include "src/compiler/wasm-compiler-definitions.h"
#include "src/compiler/wasm-compiler.h"
#include "src/flags/flags.h"
@@ -16,6 +17,7 @@
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes-inl.h"
+#include "src/wasm/well-known-imports.h"
namespace v8 {
namespace internal {
@@ -25,11 +27,54 @@ namespace {
// Expose {compiler::Node} opaquely as {wasm::TFNode}.
using TFNode = compiler::Node;
+using LocalsAllocator = RecyclingZoneAllocator<TFNode*>;
+
+class LocalsVector {
+ public:
+ LocalsVector(LocalsAllocator* allocator, size_t size)
+ : allocator_(allocator), data_(allocator->allocate(size), size) {
+ std::fill(data_.begin(), data_.end(), nullptr);
+ }
+ LocalsVector(const LocalsVector& other) V8_NOEXCEPT
+ : allocator_(other.allocator_),
+ data_(allocator_->allocate(other.size()), other.size()) {
+ data_.OverwriteWith(other.data_);
+ }
+ LocalsVector(LocalsVector&& other) V8_NOEXCEPT
+ : allocator_(other.allocator_),
+ data_(other.data_.begin(), other.size()) {
+ other.data_.Truncate(0);
+ }
+ ~LocalsVector() { Clear(); }
+
+ LocalsVector& operator=(const LocalsVector& other) V8_NOEXCEPT {
+ allocator_ = other.allocator_;
+ if (!data_.size()) {
+ data_ = base::Vector<TFNode*>(allocator_->allocate(other.size()),
+ other.size());
+ }
+ data_.OverwriteWith(other.data_);
+ return *this;
+ }
+ TFNode*& operator[](size_t index) { return data_[index]; }
+ size_t size() const { return data_.size(); }
+
+ void Clear() {
+ if (size()) allocator_->deallocate(data_.begin(), size());
+ data_.Truncate(0);
+ }
+
+ private:
+ LocalsAllocator* allocator_ = nullptr;
+ base::Vector<TFNode*> data_;
+};
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
// is reachable, has reached a control end, or has been merged.
+// It's encouraged to manage lifetime of SsaEnv by `ScopedSsaEnv` or
+// `Control` (`block_env`, `false_env`, or `try_info->catch_env`).
struct SsaEnv : public ZoneObject {
enum State { kUnreachable, kReached, kMerged };
@@ -37,14 +82,14 @@ struct SsaEnv : public ZoneObject {
TFNode* control;
TFNode* effect;
compiler::WasmInstanceCacheNodes instance_cache;
- ZoneVector<TFNode*> locals;
+ LocalsVector locals;
- SsaEnv(Zone* zone, State state, TFNode* control, TFNode* effect,
+ SsaEnv(LocalsAllocator* alloc, State state, TFNode* control, TFNode* effect,
uint32_t locals_size)
: state(state),
control(control),
effect(effect),
- locals(locals_size, zone) {}
+ locals(alloc, locals_size) {}
SsaEnv(const SsaEnv& other) V8_NOEXCEPT = default;
SsaEnv(SsaEnv&& other) V8_NOEXCEPT : state(other.state),
@@ -57,12 +102,10 @@ struct SsaEnv : public ZoneObject {
void Kill() {
state = kUnreachable;
- for (TFNode*& local : locals) {
- local = nullptr;
- }
control = nullptr;
effect = nullptr;
instance_cache = {};
+ locals.Clear();
}
void SetNotMerged() {
if (state == kMerged) state = kReached;
@@ -74,7 +117,7 @@ class WasmGraphBuildingInterface {
using ValidationTag = Decoder::NoValidationTag;
using FullDecoder =
WasmFullDecoder<ValidationTag, WasmGraphBuildingInterface>;
- using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
+ using CheckForNull = compiler::CheckForNull;
struct Value : public ValueBase<ValidationTag> {
TFNode* node = nullptr;
@@ -100,21 +143,48 @@ class WasmGraphBuildingInterface {
struct Control : public ControlBase<Value, ValidationTag> {
SsaEnv* merge_env = nullptr; // merge environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
+ SsaEnv* block_env = nullptr; // environment that dies with this block.
TryInfo* try_info = nullptr; // information about try statements.
int32_t previous_catch = -1; // previous Control with a catch.
+ bool loop_innermost = false; // whether this loop can be innermost.
BitVector* loop_assignments = nullptr; // locals assigned in this loop.
TFNode* loop_node = nullptr; // loop header of this loop.
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
template <typename... Args>
explicit Control(Args&&... args) V8_NOEXCEPT
: ControlBase(std::forward<Args>(args)...) {}
+ Control(Control&& other) V8_NOEXCEPT
+ : ControlBase(std::move(other)),
+ merge_env(other.merge_env),
+ false_env(other.false_env),
+ block_env(other.block_env),
+ try_info(other.try_info),
+ previous_catch(other.previous_catch),
+ loop_innermost(other.loop_innermost),
+ loop_assignments(other.loop_assignments),
+ loop_node(other.loop_node) {
+ // The `control_` vector in WasmFullDecoder calls destructor of this when
+ // growing capacity. Nullify these pointers to avoid destroying
+ // environments before used.
+ other.false_env = nullptr;
+ other.block_env = nullptr;
+ other.try_info = nullptr;
+ }
+ ~Control() {
+ if (false_env) false_env->Kill();
+ if (block_env) block_env->Kill();
+ if (try_info) try_info->catch_env->Kill();
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Control);
};
WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
- int func_index, InlinedStatus inlined_status)
- : builder_(builder),
+ int func_index, AssumptionsJournal* assumptions,
+ InlinedStatus inlined_status, Zone* zone)
+ : locals_allocator_(zone),
+ builder_(builder),
func_index_(func_index),
+ assumptions_(assumptions),
inlined_status_(inlined_status) {}
void StartFunction(FullDecoder* decoder) {
@@ -125,8 +195,8 @@ class WasmGraphBuildingInterface {
if (branch_hints_it != decoder->module_->branch_hints.end()) {
branch_hints_ = &branch_hints_it->second;
}
- TypeFeedbackStorage& feedbacks = decoder->module_->type_feedback;
- base::MutexGuard mutex_guard(&feedbacks.mutex);
+ const TypeFeedbackStorage& feedbacks = decoder->module_->type_feedback;
+ base::SharedMutexGuard<base::kShared> mutex_guard(&feedbacks.mutex);
auto feedback = feedbacks.feedback_for_function.find(func_index_);
if (feedback != feedbacks.feedback_for_function.end()) {
// This creates a copy of the vector, which is cheaper than holding on
@@ -148,7 +218,7 @@ class WasmGraphBuildingInterface {
builder_->Start(static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
uint32_t num_locals = decoder->num_locals();
SsaEnv* ssa_env = decoder->zone()->New<SsaEnv>(
- decoder->zone(), SsaEnv::kReached, effect(), control(), num_locals);
+ &locals_allocator_, SsaEnv::kReached, effect(), control(), num_locals);
SetEnv(ssa_env);
// Initialize local variables. Parameters are shifted by 1 because of the
@@ -165,7 +235,7 @@ class WasmGraphBuildingInterface {
DCHECK(type.is_reference());
// TODO(jkummerow): Consider using "the hole" instead, to make any
// illegal uses more obvious.
- node = builder_->SetType(builder_->RefNull(), type);
+ node = builder_->SetType(builder_->RefNull(type), type);
} else {
node = builder_->SetType(builder_->DefaultValue(type), type);
}
@@ -196,8 +266,8 @@ class WasmGraphBuildingInterface {
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
- void FinishFunction(FullDecoder*) {
- if (v8_flags.wasm_speculative_inlining) {
+ void FinishFunction(FullDecoder* decoder) {
+ if (decoder->enabled_.has_inlining()) {
DCHECK_EQ(feedback_instruction_index_, type_feedback_.size());
}
if (inlined_status_ == kRegularFunction) {
@@ -213,37 +283,19 @@ class WasmGraphBuildingInterface {
// The branch environment is the outer environment.
block->merge_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
+ block->block_env = ssa_env_;
}
void Loop(FullDecoder* decoder, Control* block) {
// This is the merge environment at the beginning of the loop.
SsaEnv* merge_env = Steal(decoder->zone(), ssa_env_);
- block->merge_env = merge_env;
+ block->merge_env = block->block_env = merge_env;
SetEnv(merge_env);
ssa_env_->state = SsaEnv::kMerged;
TFNode* loop_node = builder_->Loop(control());
- if (emit_loop_exits()) {
- uint32_t nesting_depth = 0;
- for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) {
- if (decoder->control_at(depth)->is_loop()) {
- nesting_depth++;
- }
- }
- // If this loop is nested, the parent loop's can_be_innermost field needs
- // to be false. If the last loop in loop_infos_ has less depth, it has to
- // be the parent loop. If it does not, it means another loop has been
- // found within the parent loop, and that loop will have set the parent's
- // can_be_innermost to false, so we do not need to do anything.
- if (nesting_depth > 0 &&
- loop_infos_.back().nesting_depth < nesting_depth) {
- loop_infos_.back().can_be_innermost = false;
- }
- loop_infos_.emplace_back(loop_node, nesting_depth, true);
- }
-
builder_->SetControl(loop_node);
decoder->control_at(0)->loop_node = loop_node;
@@ -252,8 +304,10 @@ class WasmGraphBuildingInterface {
builder_->TerminateLoop(effect(), control());
// Doing a preprocessing pass to analyze loop assignments seems to pay off
// compared to reallocating Nodes when rearranging Phis in Goto.
+ bool can_be_innermost = false;
BitVector* assigned = WasmDecoder<ValidationTag>::AnalyzeLoopAssignment(
- decoder, decoder->pc(), decoder->num_locals(), decoder->zone());
+ decoder, decoder->pc(), decoder->num_locals(), decoder->zone(),
+ &can_be_innermost);
if (decoder->failed()) return;
int instance_cache_index = decoder->num_locals();
// If the module has shared memory, the stack guard might reallocate the
@@ -264,6 +318,19 @@ class WasmGraphBuildingInterface {
DCHECK_NOT_NULL(assigned);
decoder->control_at(0)->loop_assignments = assigned;
+ if (emit_loop_exits()) {
+ uint32_t nesting_depth = 0;
+ for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) {
+ if (decoder->control_at(depth)->is_loop()) {
+ nesting_depth++;
+ }
+ }
+ loop_infos_.emplace_back(loop_node, nesting_depth, can_be_innermost);
+ // Only innermost loops can be unrolled. We can avoid allocating
+ // unnecessary nodes if this loop can not be innermost.
+ decoder->control_at(0)->loop_innermost = can_be_innermost;
+ }
+
// Only introduce phis for variables assigned in this loop.
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
@@ -279,6 +346,7 @@ class WasmGraphBuildingInterface {
}
// Now we setup a new environment for the inside of the loop.
+ // TODO(choongwoo): Clear locals of the following SsaEnv after use.
SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->module_->has_shared_memory
? &ssa_env_->instance_cache
@@ -296,15 +364,15 @@ class WasmGraphBuildingInterface {
void Try(FullDecoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
- SsaEnv* catch_env = Split(decoder->zone(), outer_env);
- // Mark catch environment as unreachable, since only accessable
- // through catch unwinding (i.e. landing pads).
- catch_env->state = SsaEnv::kUnreachable;
- SsaEnv* try_env = Steal(decoder->zone(), outer_env);
+ SsaEnv* catch_env = Steal(decoder->zone(), outer_env);
+ // Steal catch_env to make catch_env unreachable and clear locals.
+ // The unreachable catch_env will create and copy locals in `Goto`.
+ SsaEnv* try_env = Steal(decoder->zone(), catch_env);
SetEnv(try_env);
TryInfo* try_info = decoder->zone()->New<TryInfo>(catch_env);
block->merge_env = outer_env;
block->try_info = try_info;
+ block->block_env = try_env;
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
@@ -332,6 +400,7 @@ class WasmGraphBuildingInterface {
true_env->control = if_true;
if_block->merge_env = merge_env;
if_block->false_env = false_env;
+ if_block->block_env = true_env;
SetEnv(true_env);
}
@@ -345,7 +414,7 @@ class WasmGraphBuildingInterface {
// However, if loop unrolling is enabled, we must create a loop exit and
// wrap the fallthru values on the stack.
if (block->is_loop()) {
- if (emit_loop_exits() && block->reachable()) {
+ if (emit_loop_exits() && block->reachable() && block->loop_innermost) {
BuildLoopExits(decoder, block);
WrapLocalsAtLoopExit(decoder, block);
uint32_t arity = block->end_merge.arity;
@@ -377,8 +446,8 @@ class WasmGraphBuildingInterface {
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
- SetAndTypeNode(result,
- builder_->Unop(opcode, value.node, decoder->position()));
+ SetAndTypeNode(result, builder_->Unop(opcode, value.node, value.type,
+ decoder->position()));
}
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
@@ -414,7 +483,7 @@ class WasmGraphBuildingInterface {
}
void RefNull(FullDecoder* decoder, ValueType type, Value* result) {
- SetAndTypeNode(result, builder_->RefNull());
+ SetAndTypeNode(result, builder_->RefNull(type));
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
@@ -422,7 +491,8 @@ class WasmGraphBuildingInterface {
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
- TFNode* cast_node = builder_->AssertNotNull(arg.node, decoder->position());
+ TFNode* cast_node =
+ builder_->AssertNotNull(arg.node, arg.type, decoder->position());
SetAndTypeNode(result, cast_node);
}
@@ -469,16 +539,19 @@ class WasmGraphBuildingInterface {
builder_->Trap(reason, decoder->position());
}
- void AssertNull(FullDecoder* decoder, const Value& obj, Value* result) {
+ void AssertNullTypecheck(FullDecoder* decoder, const Value& obj,
+ Value* result) {
builder_->TrapIfFalse(wasm::TrapReason::kTrapIllegalCast,
- builder_->IsNull(obj.node), decoder->position());
+ builder_->IsNull(obj.node, obj.type),
+ decoder->position());
Forward(decoder, obj, result);
}
- void AssertNotNull(FullDecoder* decoder, const Value& obj, Value* result) {
- builder_->TrapIfTrue(wasm::TrapReason::kTrapIllegalCast,
- builder_->IsNull(obj.node), decoder->position());
- Forward(decoder, obj, result);
+ void AssertNotNullTypecheck(FullDecoder* decoder, const Value& obj,
+ Value* result) {
+ SetAndTypeNode(
+ result, builder_->AssertNotNull(obj.node, obj.type, decoder->position(),
+ TrapReason::kTrapIllegalCast));
}
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {}
@@ -504,8 +577,9 @@ class WasmGraphBuildingInterface {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
NodeVector values(ret_count);
SsaEnv* internal_env = ssa_env_;
+ SsaEnv* exit_env = nullptr;
if (emit_loop_exits()) {
- SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
+ exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
auto stack_values = CopyStackValues(decoder, ret_count, drop_values);
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
@@ -521,6 +595,7 @@ class WasmGraphBuildingInterface {
builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
builder_->Return(base::VectorOf(values));
+ if (exit_env) exit_env->Kill();
SetEnv(internal_env);
}
@@ -530,15 +605,12 @@ class WasmGraphBuildingInterface {
} else {
Control* target = decoder->control_at(depth);
if (emit_loop_exits()) {
- SsaEnv* internal_env = ssa_env_;
- SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
- SetEnv(exit_env);
+ ScopedSsaEnv exit_env(this, Split(decoder->zone(), ssa_env_));
uint32_t value_count = target->br_merge()->arity;
auto stack_values = CopyStackValues(decoder, value_count, drop_values);
BuildNestedLoopExits(decoder, depth, true, stack_values);
MergeValuesInto(decoder, target, target->br_merge(),
stack_values.data());
- SetEnv(internal_env);
} else {
MergeValuesInto(decoder, target, target->br_merge(), drop_values);
}
@@ -565,9 +637,8 @@ class WasmGraphBuildingInterface {
break;
}
builder_->SetControl(fenv->control);
- SetEnv(tenv);
+ ScopedSsaEnv scoped_env(this, tenv);
BrOrRet(decoder, depth, 1);
- SetEnv(fenv);
}
void BrTable(FullDecoder* decoder, const BranchTableImmediate& imm,
@@ -579,23 +650,19 @@ class WasmGraphBuildingInterface {
return;
}
- SsaEnv* branch_env = ssa_env_;
// Build branches to the various blocks based on the table.
TFNode* sw = builder_->Switch(imm.table_count + 1, key.node);
- SsaEnv* copy = Steal(decoder->zone(), branch_env);
- SetEnv(copy);
BranchTableIterator<ValidationTag> iterator(decoder, imm);
while (iterator.has_next()) {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
- SetEnv(Split(decoder->zone(), copy));
+ ScopedSsaEnv env(this, Split(decoder->zone(), ssa_env_));
builder_->SetControl(i == imm.table_count ? builder_->IfDefault(sw)
: builder_->IfValue(i, sw));
BrOrRet(decoder, target, 1);
}
DCHECK(decoder->ok());
- SetEnv(branch_env);
}
void Else(FullDecoder* decoder, Control* if_block) {
@@ -658,14 +725,43 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_, decoder);
}
+ bool HandleWellKnownImport(FullDecoder* decoder, uint32_t index,
+ const Value args[], Value returns[]) {
+ if (!decoder->module_) return false; // Only needed for tests.
+ if (index >= decoder->module_->num_imported_functions) return false;
+ WellKnownImportsList& well_known_imports =
+ decoder->module_->type_feedback.well_known_imports;
+ using WKI = WellKnownImport;
+ WKI import = well_known_imports.get(index);
+ TFNode* result = nullptr;
+ switch (import) {
+ case WKI::kUninstantiated:
+ case WKI::kGeneric:
+ return false;
+ case WKI::kStringToLowerCaseStringref:
+ result = builder_->WellKnown_StringToLowerCaseStringref(
+ args[0].node, NullCheckFor(args[0].type));
+ break;
+ }
+ assumptions_->RecordAssumption(index, import);
+ SetAndTypeNode(&returns[0], result);
+ if (v8_flags.trace_wasm_inlining) {
+ PrintF("[function %d: import %d is well-known built-in %s]\n",
+ func_index_, index, WellKnownImportName(import));
+ }
+ return true;
+ }
+
void CallDirect(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[], Value returns[]) {
int maybe_call_count = -1;
- if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
+ if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
DCHECK_EQ(feedback.num_cases(), 1);
maybe_call_count = feedback.call_count(0);
}
+ // This must happen after the {next_call_feedback()} call.
+ if (HandleWellKnownImport(decoder, imm.index, args, returns)) return;
DoCall(decoder, CallInfo::CallDirect(imm.index, maybe_call_count), imm.sig,
args, returns);
}
@@ -673,7 +769,7 @@ class WasmGraphBuildingInterface {
void ReturnCall(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[]) {
int maybe_call_count = -1;
- if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
+ if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
DCHECK_EQ(feedback.num_cases(), 1);
maybe_call_count = feedback.call_count(0);
@@ -704,7 +800,7 @@ class WasmGraphBuildingInterface {
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
const CallSiteFeedback* feedback = nullptr;
- if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
+ if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
feedback = &next_call_feedback();
}
if (feedback == nullptr || feedback->num_cases() == 0) {
@@ -801,7 +897,7 @@ class WasmGraphBuildingInterface {
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
const CallSiteFeedback* feedback = nullptr;
- if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
+ if (decoder->enabled_.has_inlining() && type_feedback_.size() > 0) {
feedback = &next_call_feedback();
}
if (feedback == nullptr || feedback->num_cases() == 0) {
@@ -852,12 +948,13 @@ class WasmGraphBuildingInterface {
SsaEnv* false_env = ssa_env_;
SsaEnv* true_env = Split(decoder->zone(), false_env);
false_env->SetNotMerged();
- builder_->BrOnNull(ref_object.node, &true_env->control,
+ builder_->BrOnNull(ref_object.node, ref_object.type, &true_env->control,
&false_env->control);
builder_->SetControl(false_env->control);
- SetEnv(true_env);
- BrOrRet(decoder, depth, pass_null_along_branch ? 0 : 1);
- SetEnv(false_env);
+ {
+ ScopedSsaEnv scoped_env(this, true_env);
+ BrOrRet(decoder, depth, pass_null_along_branch ? 0 : 1);
+ }
SetAndTypeNode(
result_on_fallthrough,
builder_->TypeGuard(ref_object.node, result_on_fallthrough->type));
@@ -870,12 +967,11 @@ class WasmGraphBuildingInterface {
SsaEnv* false_env = ssa_env_;
SsaEnv* true_env = Split(decoder->zone(), false_env);
false_env->SetNotMerged();
- builder_->BrOnNull(ref_object.node, &false_env->control,
+ builder_->BrOnNull(ref_object.node, ref_object.type, &false_env->control,
&true_env->control);
builder_->SetControl(false_env->control);
- SetEnv(true_env);
+ ScopedSsaEnv scoped_env(this, true_env);
BrOrRet(decoder, depth, 0);
- SetEnv(false_env);
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, base::Vector<Value> args,
@@ -955,6 +1051,7 @@ class WasmGraphBuildingInterface {
SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_);
if_catch_env->control = if_catch;
block->try_info->catch_env = if_no_catch_env;
+ block->block_env = if_catch_env;
// If the tags match we extract the values from the exception object and
// push them onto the operand stack using the passed {values} vector.
@@ -1129,7 +1226,7 @@ class WasmGraphBuildingInterface {
SetAndTypeNode(result, builder_->ArrayNew(imm.index, imm.array_type,
length.node, initial_value.node,
rtt.node, decoder->position()));
- // array.new_with_rtt introduces a loop. Therefore, we have to mark the
+ // array.new(_default) introduces a loop. Therefore, we have to mark the
// immediately nesting loop (if any) as non-innermost.
if (!loop_infos_.empty()) loop_infos_.back().can_be_innermost = false;
}
@@ -1141,6 +1238,9 @@ class WasmGraphBuildingInterface {
SetAndTypeNode(result, builder_->ArrayNew(imm.index, imm.array_type,
length.node, initial_value,
rtt.node, decoder->position()));
+ // array.new(_default) introduces a loop. Therefore, we have to mark the
+ // immediately nesting loop (if any) as non-innermost.
+ if (!loop_infos_.empty()) loop_infos_.back().can_be_innermost = false;
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
@@ -1168,9 +1268,22 @@ class WasmGraphBuildingInterface {
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
- builder_->ArrayCopy(dst.node, dst_index.node, NullCheckFor(dst.type),
- src.node, src_index.node, NullCheckFor(src.type),
- length.node, decoder->position());
+ builder_->ArrayCopy(
+ dst.node, dst_index.node, NullCheckFor(dst.type), src.node,
+ src_index.node, NullCheckFor(src.type), length.node,
+ decoder->module_->types[src.type.ref_index()].array_type,
+ decoder->position());
+ }
+
+ void ArrayFill(FullDecoder* decoder, ArrayIndexImmediate& imm,
+ const Value& array, const Value& index, const Value& value,
+ const Value& length) {
+ builder_->ArrayFill(array.node, index.node, value.node, length.node,
+ imm.array_type, NullCheckFor(array.type),
+ decoder->position());
+ // array.fill introduces a loop. Therefore, we have to mark the immediately
+ // nesting loop (if any) as non-innermost.
+ if (!loop_infos_.empty()) loop_infos_.back().can_be_innermost = false;
}
void ArrayNewFixed(FullDecoder* decoder, const ArrayIndexImmediate& imm,
@@ -1227,8 +1340,9 @@ class WasmGraphBuildingInterface {
void RefTestAbstract(FullDecoder* decoder, const Value& object,
wasm::HeapType type, Value* result, bool null_succeeds) {
- SetAndTypeNode(result,
- builder_->RefTestAbstract(object.node, type, null_succeeds));
+ bool is_nullable = object.type.is_nullable();
+ SetAndTypeNode(result, builder_->RefTestAbstract(
+ object.node, type, is_nullable, null_succeeds));
}
void RefCast(FullDecoder* decoder, const Value& object, const Value& rtt,
@@ -1248,8 +1362,9 @@ class WasmGraphBuildingInterface {
wasm::HeapType type, Value* result, bool null_succeeds) {
TFNode* node = object.node;
if (!v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
+ bool is_nullable = object.type.is_nullable();
node = builder_->RefCastAbstract(object.node, type, decoder->position(),
- null_succeeds);
+ is_nullable, null_succeeds);
}
SetAndTypeNode(result, builder_->TypeGuard(node, result->type));
}
@@ -1259,13 +1374,16 @@ class WasmGraphBuildingInterface {
TFNode**)>
void BrOnCastAbs(FullDecoder* decoder, const Value& object, const Value& rtt,
Value* forwarding_value, uint32_t br_depth,
- bool branch_on_match) {
- // TODO(mliedtke): Add generic br_on_cast instructions where null succeeds.
- WasmTypeCheckConfig config = {object.type,
- !rtt.type.is_bottom()
- ? ValueType::Ref(rtt.type.ref_index())
- : kWasmBottom};
+ bool branch_on_match, bool null_succeeds) {
+ // If the type is bottom (used for abstract types), set HeapType to None.
+ // The heap type is not read but the null information is needed for the
+ // cast.
+ ValueType to_type = ValueType::RefMaybeNull(
+ !rtt.type.is_bottom() ? rtt.type.ref_index() : HeapType::kNone,
+ null_succeeds ? kNullable : kNonNullable);
+ WasmTypeCheckConfig config = {object.type, to_type};
SsaEnv* branch_env = Split(decoder->zone(), ssa_env_);
+ // TODO(choongwoo): Clear locals of `no_branch_env` after use.
SsaEnv* no_branch_env = Steal(decoder->zone(), ssa_env_);
no_branch_env->SetNotMerged();
SsaEnv* match_env = branch_on_match ? branch_env : no_branch_env;
@@ -1274,25 +1392,115 @@ class WasmGraphBuildingInterface {
&match_env->control, &match_env->effect,
&no_match_env->control, &no_match_env->effect);
builder_->SetControl(no_branch_env->control);
- SetEnv(branch_env);
- SetAndTypeNode(forwarding_value,
- builder_->TypeGuard(object.node, forwarding_value->type));
- // Currently, br_on_* instructions modify the value stack before calling
- // the interface function, so we don't need to drop any values here.
- BrOrRet(decoder, br_depth, 0);
- SetEnv(no_branch_env);
+
+ if (branch_on_match) {
+ ScopedSsaEnv scoped_env(this, branch_env, no_branch_env);
+ // Narrow type for the successful cast target branch.
+ Forward(decoder, object, forwarding_value);
+ // Currently, br_on_* instructions modify the value stack before calling
+ // the interface function, so we don't need to drop any values here.
+ BrOrRet(decoder, br_depth, 0);
+ // Note: Differently to below for !{branch_on_match}, we do not Forward
+ // the value here to perform a TypeGuard. It can't be done here due to
+ // asymmetric decoder code. A Forward here would be poped from the stack
+ // and ignored by the decoder. Therefore the decoder has to call Forward
+ // itself.
+ } else {
+ {
+ ScopedSsaEnv scoped_env(this, branch_env, no_branch_env);
+ // It is necessary in case of {null_succeeds} to forward the value.
+ // This will add a TypeGuard to the non-null type (as in this case the
+ // object is non-nullable).
+ Forward(decoder, object, decoder->stack_value(1));
+ BrOrRet(decoder, br_depth, 0);
+ }
+ // Narrow type for the successful cast fallthrough branch.
+ Forward(decoder, object, forwarding_value);
+ }
}
void BrOnCast(FullDecoder* decoder, const Value& object, const Value& rtt,
- Value* value_on_branch, uint32_t br_depth) {
+ Value* value_on_branch, uint32_t br_depth, bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnCast>(
- decoder, object, rtt, value_on_branch, br_depth, true);
+ decoder, object, rtt, value_on_branch, br_depth, true, null_succeeds);
}
void BrOnCastFail(FullDecoder* decoder, const Value& object, const Value& rtt,
- Value* value_on_fallthrough, uint32_t br_depth) {
+ Value* value_on_fallthrough, uint32_t br_depth,
+ bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnCast>(
- decoder, object, rtt, value_on_fallthrough, br_depth, false);
+ decoder, object, rtt, value_on_fallthrough, br_depth, false,
+ null_succeeds);
+ }
+
+ void BrOnCastAbstract(FullDecoder* decoder, const Value& object,
+ HeapType type, Value* value_on_branch,
+ uint32_t br_depth, bool null_succeeds) {
+ switch (type.representation()) {
+ case HeapType::kEq:
+ return BrOnEq(decoder, object, value_on_branch, br_depth,
+ null_succeeds);
+ case HeapType::kI31:
+ return BrOnI31(decoder, object, value_on_branch, br_depth,
+ null_succeeds);
+ case HeapType::kStruct:
+ return BrOnStruct(decoder, object, value_on_branch, br_depth,
+ null_succeeds);
+ case HeapType::kArray:
+ return BrOnArray(decoder, object, value_on_branch, br_depth,
+ null_succeeds);
+ case HeapType::kNone:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ // This is needed for BrOnNull. {value_on_branch} is on the value stack
+ // and BrOnNull interacts with the values on the stack.
+ // TODO(7748): The compiler shouldn't have to access the stack used by
+ // the decoder ideally.
+ SetAndTypeNode(value_on_branch,
+ builder_->TypeGuard(object.node, value_on_branch->type));
+ return BrOnNull(decoder, object, br_depth, true, value_on_branch);
+ case HeapType::kAny:
+ // Any may never need a cast as it is either implicitly convertible or
+ // never convertible for any given type.
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void BrOnCastFailAbstract(FullDecoder* decoder, const Value& object,
+ HeapType type, Value* value_on_fallthrough,
+ uint32_t br_depth, bool null_succeeds) {
+ switch (type.representation()) {
+ case HeapType::kEq:
+ return BrOnNonEq(decoder, object, value_on_fallthrough, br_depth,
+ null_succeeds);
+ case HeapType::kI31:
+ return BrOnNonI31(decoder, object, value_on_fallthrough, br_depth,
+ null_succeeds);
+ case HeapType::kStruct:
+ return BrOnNonStruct(decoder, object, value_on_fallthrough, br_depth,
+ null_succeeds);
+ case HeapType::kArray:
+ return BrOnNonArray(decoder, object, value_on_fallthrough, br_depth,
+ null_succeeds);
+ case HeapType::kNone:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ DCHECK(null_succeeds);
+ // We need to store a node in the stack where the decoder so far only
+ // pushed a value and expects the `BrOnCastFailAbstract` to set it.
+ // TODO(7748): The compiler shouldn't have to access the stack used by
+ // the decoder ideally.
+ Forward(decoder, object, decoder->stack_value(1));
+ return BrOnNonNull(decoder, object, value_on_fallthrough, br_depth,
+ true);
+ case HeapType::kAny:
+ // Any may never need a cast as it is either implicitly convertible or
+ // never convertible for any given type.
+ default:
+ UNREACHABLE();
+ }
}
void RefIsEq(FullDecoder* decoder, const Value& object, Value* result) {
@@ -1302,6 +1510,24 @@ class WasmGraphBuildingInterface {
null_succeeds));
}
+ void BrOnEq(FullDecoder* decoder, const Value& object, Value* value_on_branch,
+ uint32_t br_depth, bool null_succeeds) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnEq>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
+ true, null_succeeds);
+ }
+
+ void BrOnNonEq(FullDecoder* decoder, const Value& object,
+ Value* value_on_fallthrough, uint32_t br_depth,
+ bool null_succeeds) {
+ // TODO(7748): Merge BrOn* and BrOnNon* instructions as their only
+ // difference is a boolean flag passed to BrOnCastAbs. This could also be
+ // leveraged to merge BrOnCastFailAbstract and BrOnCastAbstract.
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnEq>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
+ br_depth, false, null_succeeds);
+ }
+
void RefIsStruct(FullDecoder* decoder, const Value& object, Value* result) {
bool null_succeeds = false;
SetAndTypeNode(result,
@@ -1319,17 +1545,19 @@ class WasmGraphBuildingInterface {
}
void BrOnStruct(FullDecoder* decoder, const Value& object,
- Value* value_on_branch, uint32_t br_depth) {
+ Value* value_on_branch, uint32_t br_depth,
+ bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnStruct>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
- true);
+ true, null_succeeds);
}
void BrOnNonStruct(FullDecoder* decoder, const Value& object,
- Value* value_on_fallthrough, uint32_t br_depth) {
+ Value* value_on_fallthrough, uint32_t br_depth,
+ bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnStruct>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
- br_depth, false);
+ br_depth, false, null_succeeds);
}
void RefIsArray(FullDecoder* decoder, const Value& object, Value* result) {
@@ -1349,17 +1577,19 @@ class WasmGraphBuildingInterface {
}
void BrOnArray(FullDecoder* decoder, const Value& object,
- Value* value_on_branch, uint32_t br_depth) {
+ Value* value_on_branch, uint32_t br_depth,
+ bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnArray>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
- true);
+ true, null_succeeds);
}
void BrOnNonArray(FullDecoder* decoder, const Value& object,
- Value* value_on_fallthrough, uint32_t br_depth) {
+ Value* value_on_fallthrough, uint32_t br_depth,
+ bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnArray>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
- br_depth, false);
+ br_depth, false, null_succeeds);
}
void RefIsI31(FullDecoder* decoder, const Value& object, Value* result) {
@@ -1376,17 +1606,18 @@ class WasmGraphBuildingInterface {
}
void BrOnI31(FullDecoder* decoder, const Value& object,
- Value* value_on_branch, uint32_t br_depth) {
+ Value* value_on_branch, uint32_t br_depth, bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnI31>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
- true);
+ true, null_succeeds);
}
void BrOnNonI31(FullDecoder* decoder, const Value& object,
- Value* value_on_fallthrough, uint32_t br_depth) {
+ Value* value_on_fallthrough, uint32_t br_depth,
+ bool null_succeeds) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnI31>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
- br_depth, false);
+ br_depth, false, null_succeeds);
}
void StringNewWtf8(FullDecoder* decoder, const MemoryIndexImmediate& memory,
@@ -1427,54 +1658,62 @@ class WasmGraphBuildingInterface {
Value* result) {
switch (variant) {
case unibrow::Utf8Variant::kUtf8:
- result->node = builder_->StringMeasureUtf8(
- str.node, NullCheckFor(str.type), decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringMeasureUtf8(
+ str.node, NullCheckFor(str.type), decoder->position()));
break;
case unibrow::Utf8Variant::kLossyUtf8:
case unibrow::Utf8Variant::kWtf8:
- result->node = builder_->StringMeasureWtf8(
- str.node, NullCheckFor(str.type), decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringMeasureWtf8(
+ str.node, NullCheckFor(str.type), decoder->position()));
break;
+ case unibrow::Utf8Variant::kUtf8NoTrap:
+ UNREACHABLE();
}
}
void StringMeasureWtf16(FullDecoder* decoder, const Value& str,
Value* result) {
- result->node = builder_->StringMeasureWtf16(
- str.node, NullCheckFor(str.type), decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringMeasureWtf16(str.node, NullCheckFor(str.type),
+ decoder->position()));
}
void StringEncodeWtf8(FullDecoder* decoder,
const MemoryIndexImmediate& memory,
const unibrow::Utf8Variant variant, const Value& str,
const Value& offset, Value* result) {
- result->node = builder_->StringEncodeWtf8(memory.index, variant, str.node,
- NullCheckFor(str.type),
- offset.node, decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringEncodeWtf8(memory.index, variant, str.node,
+ NullCheckFor(str.type), offset.node,
+ decoder->position()));
}
void StringEncodeWtf8Array(FullDecoder* decoder,
const unibrow::Utf8Variant variant,
const Value& str, const Value& array,
const Value& start, Value* result) {
- result->node = builder_->StringEncodeWtf8Array(
- variant, str.node, NullCheckFor(str.type), array.node,
- NullCheckFor(array.type), start.node, decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringEncodeWtf8Array(
+ variant, str.node, NullCheckFor(str.type), array.node,
+ NullCheckFor(array.type), start.node, decoder->position()));
}
void StringEncodeWtf16(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const Value& str, const Value& offset, Value* result) {
- result->node =
- builder_->StringEncodeWtf16(imm.index, str.node, NullCheckFor(str.type),
- offset.node, decoder->position());
+ SetAndTypeNode(result, builder_->StringEncodeWtf16(
+ imm.index, str.node, NullCheckFor(str.type),
+ offset.node, decoder->position()));
}
void StringEncodeWtf16Array(FullDecoder* decoder, const Value& str,
const Value& array, const Value& start,
Value* result) {
- result->node = builder_->StringEncodeWtf16Array(
- str.node, NullCheckFor(str.type), array.node, NullCheckFor(array.type),
- start.node, decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringEncodeWtf16Array(
+ str.node, NullCheckFor(str.type), array.node,
+ NullCheckFor(array.type), start.node, decoder->position()));
}
void StringConcat(FullDecoder* decoder, const Value& head, const Value& tail,
@@ -1486,15 +1725,16 @@ class WasmGraphBuildingInterface {
void StringEq(FullDecoder* decoder, const Value& a, const Value& b,
Value* result) {
- result->node =
- builder_->StringEqual(a.node, NullCheckFor(a.type), b.node,
- NullCheckFor(b.type), decoder->position());
+ SetAndTypeNode(result, builder_->StringEqual(a.node, NullCheckFor(a.type),
+ b.node, NullCheckFor(b.type),
+ decoder->position()));
}
void StringIsUSVSequence(FullDecoder* decoder, const Value& str,
Value* result) {
- result->node = builder_->StringIsUSVSequence(
- str.node, NullCheckFor(str.type), decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringIsUSVSequence(str.node, NullCheckFor(str.type),
+ decoder->position()));
}
void StringAsWtf8(FullDecoder* decoder, const Value& str, Value* result) {
@@ -1506,9 +1746,9 @@ class WasmGraphBuildingInterface {
void StringViewWtf8Advance(FullDecoder* decoder, const Value& view,
const Value& pos, const Value& bytes,
Value* result) {
- result->node = builder_->StringViewWtf8Advance(
- view.node, NullCheckFor(view.type), pos.node, bytes.node,
- decoder->position());
+ SetAndTypeNode(result, builder_->StringViewWtf8Advance(
+ view.node, NullCheckFor(view.type), pos.node,
+ bytes.node, decoder->position()));
}
void StringViewWtf8Encode(FullDecoder* decoder,
@@ -1521,6 +1761,8 @@ class WasmGraphBuildingInterface {
NullCheckFor(view.type), addr.node, pos.node,
bytes.node, &next_pos->node,
&bytes_written->node, decoder->position());
+ builder_->SetType(next_pos->node, next_pos->type);
+ builder_->SetType(bytes_written->node, bytes_written->type);
}
void StringViewWtf8Slice(FullDecoder* decoder, const Value& view,
@@ -1532,27 +1774,26 @@ class WasmGraphBuildingInterface {
}
void StringAsWtf16(FullDecoder* decoder, const Value& str, Value* result) {
- // Since we implement stringview_wtf16 as string, that's the type we'll
- // use for the Node. (The decoder's Value type must be stringview_wtf16
- // because static type validation relies on it.)
- result->node = builder_->SetType(
- builder_->AssertNotNull(str.node, decoder->position()),
- ValueType::Ref(HeapType::kString));
+ SetAndTypeNode(result,
+ builder_->StringAsWtf16(str.node, NullCheckFor(str.type),
+ decoder->position()));
}
void StringViewWtf16GetCodeUnit(FullDecoder* decoder, const Value& view,
const Value& pos, Value* result) {
- result->node = builder_->StringViewWtf16GetCodeUnit(
- view.node, NullCheckFor(view.type), pos.node, decoder->position());
+ SetAndTypeNode(result, builder_->StringViewWtf16GetCodeUnit(
+ view.node, NullCheckFor(view.type), pos.node,
+ decoder->position()));
}
void StringViewWtf16Encode(FullDecoder* decoder,
const MemoryIndexImmediate& imm, const Value& view,
const Value& offset, const Value& pos,
const Value& codeunits, Value* result) {
- result->node = builder_->StringViewWtf16Encode(
- imm.index, view.node, NullCheckFor(view.type), offset.node, pos.node,
- codeunits.node, decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringViewWtf16Encode(
+ imm.index, view.node, NullCheckFor(view.type), offset.node,
+ pos.node, codeunits.node, decoder->position()));
}
void StringViewWtf16Slice(FullDecoder* decoder, const Value& view,
@@ -1571,22 +1812,23 @@ class WasmGraphBuildingInterface {
void StringViewIterNext(FullDecoder* decoder, const Value& view,
Value* result) {
- result->node = builder_->StringViewIterNext(
- view.node, NullCheckFor(view.type), decoder->position());
+ SetAndTypeNode(
+ result, builder_->StringViewIterNext(view.node, NullCheckFor(view.type),
+ decoder->position()));
}
void StringViewIterAdvance(FullDecoder* decoder, const Value& view,
const Value& codepoints, Value* result) {
- result->node =
- builder_->StringViewIterAdvance(view.node, NullCheckFor(view.type),
- codepoints.node, decoder->position());
+ SetAndTypeNode(result, builder_->StringViewIterAdvance(
+ view.node, NullCheckFor(view.type),
+ codepoints.node, decoder->position()));
}
void StringViewIterRewind(FullDecoder* decoder, const Value& view,
const Value& codepoints, Value* result) {
- result->node =
- builder_->StringViewIterRewind(view.node, NullCheckFor(view.type),
- codepoints.node, decoder->position());
+ SetAndTypeNode(result, builder_->StringViewIterRewind(
+ view.node, NullCheckFor(view.type),
+ codepoints.node, decoder->position()));
}
void StringViewIterSlice(FullDecoder* decoder, const Value& view,
@@ -1596,6 +1838,24 @@ class WasmGraphBuildingInterface {
codepoints.node, decoder->position()));
}
+ void StringCompare(FullDecoder* decoder, const Value& lhs, const Value& rhs,
+ Value* result) {
+ SetAndTypeNode(result, builder_->StringCompare(
+ lhs.node, NullCheckFor(lhs.type), rhs.node,
+ NullCheckFor(rhs.type), decoder->position()));
+ }
+
+ void StringFromCodePoint(FullDecoder* decoder, const Value& code_point,
+ Value* result) {
+ SetAndTypeNode(result, builder_->StringFromCodePoint(code_point.node));
+ }
+
+ void StringHash(FullDecoder* decoder, const Value& string, Value* result) {
+ SetAndTypeNode(result,
+ builder_->StringHash(string.node, NullCheckFor(string.type),
+ decoder->position()));
+ }
+
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
if (from.type == to->type) {
to->node = from.node;
@@ -1604,21 +1864,45 @@ class WasmGraphBuildingInterface {
}
}
- std::vector<compiler::WasmLoopInfo> loop_infos() { return loop_infos_; }
+ std::vector<compiler::WasmLoopInfo>& loop_infos() { return loop_infos_; }
+ DanglingExceptions& dangling_exceptions() { return dangling_exceptions_; }
private:
+ LocalsAllocator locals_allocator_;
SsaEnv* ssa_env_ = nullptr;
compiler::WasmGraphBuilder* builder_;
int func_index_;
const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
+ // When inlining, tracks exception handlers that are left dangling and must be
+ // handled by the callee.
+ DanglingExceptions dangling_exceptions_;
+ AssumptionsJournal* assumptions_;
InlinedStatus inlined_status_;
// The entries in {type_feedback_} are indexed by the position of feedback-
// consuming instructions (currently only calls).
int feedback_instruction_index_ = 0;
std::vector<CallSiteFeedback> type_feedback_;
+ class V8_NODISCARD ScopedSsaEnv {
+ public:
+ ScopedSsaEnv(WasmGraphBuildingInterface* interface, SsaEnv* env,
+ SsaEnv* next_env = nullptr)
+ : interface_(interface),
+ next_env_(next_env ? next_env : interface->ssa_env_) {
+ interface_->SetEnv(env);
+ }
+ ~ScopedSsaEnv() {
+ interface_->ssa_env_->Kill();
+ interface_->SetEnv(next_env_);
+ }
+
+ private:
+ WasmGraphBuildingInterface* interface_;
+ SsaEnv* next_env_;
+ };
+
TFNode* effect() { return builder_->effect(); }
TFNode* control() { return builder_->control(); }
@@ -1707,13 +1991,15 @@ class WasmGraphBuildingInterface {
return node;
}
+ // TODO(choongwoo): Clear locals of `success_env` after use.
SsaEnv* success_env = Steal(decoder->zone(), ssa_env_);
success_env->control = if_success;
SsaEnv* exception_env = Split(decoder->zone(), success_env);
exception_env->control = if_exception;
exception_env->effect = if_exception;
- SetEnv(exception_env);
+
+ ScopedSsaEnv scoped_env(this, exception_env, success_env);
// If the exceptional operation could have modified memory size, we need to
// reload the memory context into the exceptional control path.
@@ -1744,13 +2030,13 @@ class WasmGraphBuildingInterface {
}
} else {
DCHECK_EQ(inlined_status_, kInlinedHandledCall);
- // Leave the IfException/LoopExit node dangling. We will connect it during
- // inlining to the handler of the inlined call.
+ // We leave the IfException/LoopExit node dangling, and record the
+ // exception/effect/control here. We will connect them to the handler of
+ // the inlined call during inlining.
// Note: We have to generate the handler now since we have no way of
// generating a LoopExit if needed in the inlining code.
+ dangling_exceptions_.Add(if_exception, effect(), control());
}
-
- SetEnv(success_env);
return node;
}
@@ -1875,8 +2161,6 @@ class WasmGraphBuildingInterface {
ssa_env_->effect = effect();
}
SsaEnv* result = zone->New<SsaEnv>(std::move(*from));
- // Restore the length of {from->locals} after applying move-constructor.
- from->locals.resize(result->locals.size());
result->state = SsaEnv::kReached;
return result;
}
@@ -2087,7 +2371,7 @@ class WasmGraphBuildingInterface {
break;
}
}
- if (control != nullptr) {
+ if (control != nullptr && control->loop_innermost) {
BuildLoopExits(decoder, control);
for (Value& value : stack_values) {
if (value.node != nullptr) {
@@ -2122,17 +2406,18 @@ class WasmGraphBuildingInterface {
} // namespace
-DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- const WasmFeatures& enabled, const WasmModule* module,
- compiler::WasmGraphBuilder* builder,
- WasmFeatures* detected, const FunctionBody& body,
- std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins,
- int func_index, InlinedStatus inlined_status) {
+void BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
+ const WasmModule* module, compiler::WasmGraphBuilder* builder,
+ WasmFeatures* detected, const FunctionBody& body,
+ std::vector<compiler::WasmLoopInfo>* loop_infos,
+ DanglingExceptions* dangling_exceptions,
+ compiler::NodeOriginTable* node_origins, int func_index,
+ AssumptionsJournal* assumptions,
+ InlinedStatus inlined_status) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::NoValidationTag, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder, func_index,
- inlined_status);
+ &zone, module, enabled, detected, body, builder, func_index, assumptions,
+ inlined_status, &zone);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
@@ -2140,9 +2425,13 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
if (node_origins) {
builder->RemoveBytecodePositionDecorator();
}
- *loop_infos = decoder.interface().loop_infos();
-
- return decoder.toResult(nullptr);
+ *loop_infos = std::move(decoder.interface().loop_infos());
+ if (dangling_exceptions != nullptr) {
+ *dangling_exceptions = std::move(decoder.interface().dangling_exceptions());
+ }
+ // TurboFan does not run with validation, so graph building must always
+ // succeed.
+ CHECK(decoder.ok());
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index b733fc8b20..2af67b3bc3 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -18,6 +18,7 @@ namespace internal {
class AccountingAllocator;
namespace compiler { // external declarations from compiler.
+class Node;
class NodeOriginTable;
class WasmGraphBuilder;
struct WasmLoopInfo;
@@ -25,6 +26,7 @@ struct WasmLoopInfo;
namespace wasm {
+class AssumptionsJournal;
struct FunctionBody;
class WasmFeatures;
struct WasmModule;
@@ -39,13 +41,29 @@ enum InlinedStatus {
kRegularFunction
};
-V8_EXPORT_PRIVATE DecodeResult
-BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
- const WasmModule* module, compiler::WasmGraphBuilder* builder,
- WasmFeatures* detected, const FunctionBody& body,
- std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins, int func_index,
- InlinedStatus inlined_status);
+struct DanglingExceptions {
+ std::vector<compiler::Node*> exception_values;
+ std::vector<compiler::Node*> effects;
+ std::vector<compiler::Node*> controls;
+
+ void Add(compiler::Node* exception_value, compiler::Node* effect,
+ compiler::Node* control) {
+ exception_values.emplace_back(exception_value);
+ effects.emplace_back(effect);
+ controls.emplace_back(control);
+ }
+
+ size_t Size() const { return exception_values.size(); }
+};
+
+V8_EXPORT_PRIVATE void BuildTFGraph(
+ AccountingAllocator* allocator, const WasmFeatures& enabled,
+ const WasmModule* module, compiler::WasmGraphBuilder* builder,
+ WasmFeatures* detected, const FunctionBody& body,
+ std::vector<compiler::WasmLoopInfo>* loop_infos,
+ DanglingExceptions* dangling_exceptions,
+ compiler::NodeOriginTable* node_origins, int func_index,
+ AssumptionsJournal* assumptions, InlinedStatus inlined_status);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index e039eeed9b..76dac35768 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -10,6 +10,43 @@ namespace v8 {
namespace internal {
namespace wasm {
+void JumpTableAssembler::InitializeJumpsToLazyCompileTable(
+ Address base, uint32_t num_slots, Address lazy_compile_table_start) {
+ uint32_t jump_table_size = SizeForNumberOfSlots(num_slots);
+ JumpTableAssembler jtasm(base, jump_table_size + 256);
+
+ for (uint32_t slot_index = 0; slot_index < num_slots; ++slot_index) {
+ // Make sure we write at the correct offset.
+ int slot_offset =
+ static_cast<int>(JumpTableAssembler::JumpSlotIndexToOffset(slot_index));
+
+ jtasm.SkipUntil(slot_offset);
+
+ Address target =
+ lazy_compile_table_start +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+
+ int offset_before_emit = jtasm.pc_offset();
+ // This function initializes the first jump table with jumps to the lazy
+ // compile table. Both get allocated in the constructor of the
+ // {NativeModule}, so they both should end up in the initial code space.
+ // Jumps within one code space can always be near jumps, so the following
+ // call to {EmitJumpSlot} should always succeed. If the call fails, then
+ // either the jump table allocation was changed incorrectly so that the lazy
+ // compile table was not within near-jump distance of the jump table
+ // anymore (e.g. the initial code space was too small to fit both tables),
+ // or the code space was allocated larger than the maximum near-jump
+ // distance.
+ CHECK(jtasm.EmitJumpSlot(target));
+ int written_bytes = jtasm.pc_offset() - offset_before_emit;
+ // We write nops here instead of skipping to avoid partial instructions in
+ // the jump table. Partial instructions can cause problems for the
+ // disassembler.
+ jtasm.NopBytes(kJumpTableSlotSize - written_bytes);
+ }
+ FlushInstructionCache(base, jump_table_size);
+}
+
// The implementation is compact enough to implement it inline here. If it gets
// much bigger, we might want to split it in a separate file per architecture.
#if V8_TARGET_ARCH_X64
@@ -54,8 +91,12 @@ void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
}
void JumpTableAssembler::NopBytes(int bytes) {
- DCHECK_LE(0, bytes);
- Nop(bytes);
+ if (bytes) Nop(bytes);
+}
+
+void JumpTableAssembler::SkipUntil(int offset) {
+ DCHECK_GE(offset, pc_offset());
+ pc_ += offset - pc_offset();
}
#elif V8_TARGET_ARCH_IA32
@@ -80,8 +121,12 @@ void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
}
void JumpTableAssembler::NopBytes(int bytes) {
- DCHECK_LE(0, bytes);
- Nop(bytes);
+ if (bytes) Nop(bytes);
+}
+
+void JumpTableAssembler::SkipUntil(int offset) {
+ DCHECK_GE(offset, pc_offset());
+ pc_ += offset - pc_offset();
}
#elif V8_TARGET_ARCH_ARM
@@ -129,6 +174,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -142,14 +193,27 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
}
bool JumpTableAssembler::EmitJumpSlot(Address target) {
- if (!TurboAssembler::IsNearCallOffset(
- (reinterpret_cast<byte*>(target) - pc_) / kInstrSize)) {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ static constexpr ptrdiff_t kCodeEntryMarkerSize = kInstrSize;
+#else
+ static constexpr ptrdiff_t kCodeEntryMarkerSize = 0;
+#endif
+
+ byte* jump_pc = pc_ + kCodeEntryMarkerSize;
+ ptrdiff_t jump_distance = reinterpret_cast<byte*>(target) - jump_pc;
+ DCHECK_EQ(0, jump_distance % kInstrSize);
+ int64_t instr_offset = jump_distance / kInstrSize;
+ if (!MacroAssembler::IsNearCallOffset(instr_offset)) {
return false;
}
CodeEntry();
- Jump(target, RelocInfo::NO_INFO);
+ DCHECK_EQ(jump_pc, pc_);
+ DCHECK_EQ(instr_offset,
+ reinterpret_cast<Instr*>(target) - reinterpret_cast<Instr*>(pc_));
+ DCHECK(is_int26(instr_offset));
+ b(static_cast<int>(instr_offset));
return true;
}
@@ -199,6 +263,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_S390X
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -246,6 +316,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -265,7 +341,8 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
}
void JumpTableAssembler::EmitFarJumpSlot(Address target) {
- JumpToOffHeapInstructionStream(target);
+ li(t9, Operand(target, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t9);
}
// static
@@ -281,6 +358,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_LOONG64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -298,7 +381,8 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
return true;
}
void JumpTableAssembler::EmitFarJumpSlot(Address target) {
- JumpToOffHeapInstructionStream(target);
+ li(t7, Operand(target, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t7);
}
void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
UNREACHABLE();
@@ -311,6 +395,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -367,6 +457,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_RISCV64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -407,6 +503,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#elif V8_TARGET_ARCH_RISCV32
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
@@ -447,6 +549,12 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+void JumpTableAssembler::SkipUntil(int offset) {
+ // On this platform the jump table is not zapped with valid instructions, so
+ // skipping over bytes is not allowed.
+ DCHECK_EQ(offset, pc_offset());
+}
+
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index c69cd9bc81..eeb399996b 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -127,6 +127,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
FlushInstructionCache(base, lazy_compile_table_size);
}
+ // Initializes the jump table starting at {base} with jumps to the lazy
+ // compile table starting at {lazy_compile_table_start}.
+ static void InitializeJumpsToLazyCompileTable(
+ Address base, uint32_t num_slots, Address lazy_compile_table_start);
+
static void GenerateFarJumpTable(Address base, Address* stub_targets,
int num_runtime_slots,
int num_function_slots) {
@@ -160,6 +165,9 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
JumpTableAssembler::PatchFarJumpSlot(far_jump_table_slot, target);
CHECK(jtasm.EmitJumpSlot(far_jump_table_slot));
}
+ // We write nops here instead of skipping to avoid partial instructions in
+ // the jump table. Partial instructions can cause problems for the
+ // disassembler.
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
FlushInstructionCache(jump_table_slot, kJumpTableSlotSize);
}
@@ -262,6 +270,8 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static void PatchFarJumpSlot(Address slot, Address target);
void NopBytes(int bytes);
+
+ void SkipUntil(int offset);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index ab8b49027e..96abaa0a04 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -5,6 +5,7 @@
#include "src/wasm/module-compiler.h"
#include <algorithm>
+#include <memory>
#include <queue>
#include "src/api/api-inl.h"
@@ -96,10 +97,7 @@ class V8_NODISCARD BackgroundCompileScope {
std::shared_ptr<NativeModule> native_module_;
};
-enum CompileBaselineOnly : bool {
- kBaselineOnly = true,
- kBaselineOrTopTier = false
-};
+enum CompilationTier { kBaseline = 0, kTopTier = 1, kNumTiers = kTopTier + 1 };
// A set of work-stealing queues (vectors of units). Each background compile
// task owns one of the queues and steals from all others once its own queue
@@ -173,19 +171,15 @@ class CompilationUnitQueues {
return queues_[task_id].get();
}
- base::Optional<WasmCompilationUnit> GetNextUnit(
- Queue* queue, CompileBaselineOnly baseline_only) {
- // As long as any lower-tier units are outstanding we need to steal them
- // before executing own higher-tier units.
- int max_tier = baseline_only ? kBaseline : kTopTier;
- for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
- if (auto unit = GetNextUnitOfTier(queue, tier)) {
- size_t old_units_count =
- num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
- DCHECK_LE(1, old_units_count);
- USE(old_units_count);
- return unit;
- }
+ base::Optional<WasmCompilationUnit> GetNextUnit(Queue* queue,
+ CompilationTier tier) {
+ DCHECK_LT(tier, CompilationTier::kNumTiers);
+ if (auto unit = GetNextUnitOfTier(queue, tier)) {
+ size_t old_units_count =
+ num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
+ DCHECK_LE(1, old_units_count);
+ USE(old_units_count);
+ return unit;
}
return {};
}
@@ -210,8 +204,9 @@ class CompilationUnitQueues {
base::MutexGuard guard(&queue->mutex);
base::Optional<base::MutexGuard> big_units_guard;
- for (auto pair : {std::make_pair(int{kBaseline}, baseline_units),
- std::make_pair(int{kTopTier}, top_tier_units)}) {
+ for (auto pair :
+ {std::make_pair(CompilationTier::kBaseline, baseline_units),
+ std::make_pair(CompilationTier::kTopTier, top_tier_units)}) {
int tier = pair.first;
base::Vector<WasmCompilationUnit> units = pair.second;
if (units.empty()) continue;
@@ -256,26 +251,29 @@ class CompilationUnitQueues {
queue->top_tier_priority_units.emplace(priority, unit);
}
num_priority_units_.fetch_add(1, std::memory_order_relaxed);
- num_units_[kTopTier].fetch_add(1, std::memory_order_relaxed);
+ num_units_[CompilationTier::kTopTier].fetch_add(1,
+ std::memory_order_relaxed);
}
- // Get the current total number of units in all queues. This is only a
+ // Get the current number of units in the queue for |tier|. This is only a
// momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit
// if this method returns non-zero.
- size_t GetTotalSize() const {
- size_t total = 0;
- for (auto& atomic_counter : num_units_) {
- total += atomic_counter.load(std::memory_order_relaxed);
+ size_t GetSizeForTier(CompilationTier tier) const {
+ DCHECK_LT(tier, CompilationTier::kNumTiers);
+ return num_units_[tier].load(std::memory_order_relaxed);
+ }
+
+ void AllowAnotherTopTierJob(uint32_t func_index) {
+ top_tier_compiled_[func_index].store(false, std::memory_order_relaxed);
+ }
+
+ void AllowAnotherTopTierJobForAllFunctions() {
+ for (int i = 0; i < num_declared_functions_; i++) {
+ AllowAnotherTopTierJob(i);
}
- return total;
}
private:
- // Store tier in int so we can easily loop over it:
- static constexpr int kBaseline = 0;
- static constexpr int kTopTier = 1;
- static constexpr int kNumTiers = kTopTier + 1;
-
// Functions bigger than {kBigUnitsLimit} will be compiled first, in ascending
// order of their function body size.
static constexpr size_t kBigUnitsLimit = 4096;
@@ -315,10 +313,10 @@ class CompilationUnitQueues {
base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
- std::atomic<bool> has_units[kNumTiers];
+ std::atomic<bool> has_units[CompilationTier::kNumTiers];
// Protected by {mutex}:
- std::priority_queue<BigUnit> units[kNumTiers];
+ std::priority_queue<BigUnit> units[CompilationTier::kNumTiers];
};
struct QueueImpl : public Queue {
@@ -334,7 +332,7 @@ class CompilationUnitQueues {
base::Mutex mutex;
// All fields below are protected by {mutex}.
- std::vector<WasmCompilationUnit> units[kNumTiers];
+ std::vector<WasmCompilationUnit> units[CompilationTier::kNumTiers];
std::priority_queue<TopTierPriorityUnit> top_tier_priority_units;
int next_steal_task_id;
};
@@ -344,19 +342,12 @@ class CompilationUnitQueues {
return next == static_cast<int>(num_queues) ? 0 : next;
}
- int GetLowestTierWithUnits() const {
- for (int tier = 0; tier < kNumTiers; ++tier) {
- if (num_units_[tier].load(std::memory_order_relaxed) > 0) return tier;
- }
- return kNumTiers;
- }
-
base::Optional<WasmCompilationUnit> GetNextUnitOfTier(Queue* public_queue,
int tier) {
QueueImpl* queue = static_cast<QueueImpl*>(public_queue);
// First check whether there is a priority unit. Execute that first.
- if (tier == kTopTier) {
+ if (tier == CompilationTier::kTopTier) {
if (auto unit = GetTopTierPriorityUnit(queue)) {
return unit;
}
@@ -430,7 +421,8 @@ class CompilationUnitQueues {
true, std::memory_order_relaxed)) {
return unit;
}
- num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
+ num_units_[CompilationTier::kTopTier].fetch_sub(
+ 1, std::memory_order_relaxed);
}
steal_task_id = queue->next_steal_task_id;
}
@@ -504,7 +496,8 @@ class CompilationUnitQueues {
returned_unit = unit;
break;
}
- num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
+ num_units_[CompilationTier::kTopTier].fetch_sub(
+ 1, std::memory_order_relaxed);
}
}
base::MutexGuard guard(&queue->mutex);
@@ -520,7 +513,7 @@ class CompilationUnitQueues {
BigUnitsQueue big_units_queue_;
- std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<size_t> num_units_[CompilationTier::kNumTiers];
std::atomic<size_t> num_priority_units_{0};
std::unique_ptr<std::atomic<bool>[]> top_tier_compiled_;
std::atomic<int> next_queue_to_add{0};
@@ -544,7 +537,12 @@ class CompilationStateImpl {
std::shared_ptr<Counters> async_counters,
DynamicTiering dynamic_tiering);
~CompilationStateImpl() {
- if (compile_job_->IsValid()) compile_job_->CancelAndDetach();
+ if (js_to_wasm_wrapper_job_->IsValid())
+ js_to_wasm_wrapper_job_->CancelAndDetach();
+ if (baseline_compile_job_->IsValid())
+ baseline_compile_job_->CancelAndDetach();
+ if (top_tier_compile_job_->IsValid())
+ top_tier_compile_job_->CancelAndDetach();
}
// Call right after the constructor, after the {compilation_state_} field in
@@ -575,8 +573,6 @@ class CompilationStateImpl {
int num_export_wrappers,
ProfileInformation* pgo_info);
- // Initialize the compilation progress after deserialization. This is needed
- // for recompilation (e.g. for tier down) to work later.
void InitializeCompilationProgressAfterDeserialization(
base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions);
@@ -591,14 +587,6 @@ class CompilationStateImpl {
// equivalent to {InitializeCompilationUnits}.
void AddCompilationUnit(CompilationUnitBuilder* builder, int func_index);
- // Initialize recompilation of the whole module: Setup compilation progress
- // for recompilation and add the respective compilation units. The callback is
- // called immediately if no recompilation is needed, or called later
- // otherwise.
- void InitializeRecompilation(TieringState new_tiering_state,
- std::unique_ptr<CompilationEventCallback>
- recompilation_finished_callback);
-
// Add the callback to be called on compilation events. Needs to be
// set before {CommitCompilationUnits} is run to ensure that it receives all
// events. The callback object must support being deleted from any thread.
@@ -616,7 +604,7 @@ class CompilationStateImpl {
CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
- CompilationUnitQueues::Queue*, CompileBaselineOnly);
+ CompilationUnitQueues::Queue*, CompilationTier tier);
std::shared_ptr<JSToWasmWrapperCompilationUnit>
GetNextJSToWasmWrapperCompilationUnit();
@@ -630,15 +618,21 @@ class CompilationStateImpl {
void SchedulePublishCompilationResults(
std::vector<std::unique_ptr<WasmCode>> unpublished_code);
- size_t NumOutstandingCompilations() const;
+ size_t NumOutstandingExportWrappers() const;
+ size_t NumOutstandingCompilations(CompilationTier tier) const;
void SetError();
void WaitForCompilationEvent(CompilationEvent event);
- void SetHighPriority() {
- // TODO(wasm): Keep a lower priority for TurboFan-only jobs.
- compile_job_->UpdatePriority(TaskPriority::kUserBlocking);
+ void TierUpAllFunctions();
+
+ void AllowAnotherTopTierJob(uint32_t func_index) {
+ compilation_unit_queues_.AllowAnotherTopTierJob(func_index);
+ }
+
+ void AllowAnotherTopTierJobForAllFunctions() {
+ compilation_unit_queues_.AllowAnotherTopTierJobForAllFunctions();
}
bool failed() const {
@@ -651,11 +645,6 @@ class CompilationStateImpl {
outstanding_export_wrappers_ == 0;
}
- bool recompilation_finished() const {
- base::MutexGuard guard(&callbacks_mutex_);
- return outstanding_recompilation_functions_ == 0;
- }
-
DynamicTiering dynamic_tiering() const { return dynamic_tiering_; }
Counters* counters() const { return async_counters_.get(); }
@@ -682,15 +671,14 @@ class CompilationStateImpl {
}
private:
- // Returns the potentially-updated {function_progress}.
- uint8_t AddCompilationUnitInternal(CompilationUnitBuilder* builder,
- int function_index,
- uint8_t function_progress);
+ void AddCompilationUnitInternal(CompilationUnitBuilder* builder,
+ int function_index,
+ uint8_t function_progress);
// Trigger callbacks according to the internal counters below
- // (outstanding_...), plus the given events.
+ // (outstanding_...).
// Hold the {callbacks_mutex_} when calling this method.
- void TriggerCallbacks(base::EnumSet<CompilationEvent> additional_events = {});
+ void TriggerCallbacks();
void PublishCompilationResults(
std::vector<std::unique_ptr<WasmCode>> unpublished_code);
@@ -726,9 +714,11 @@ class CompilationStateImpl {
// being accessed concurrently.
mutable base::Mutex mutex_;
- // The compile job handle, initialized right after construction of
+ // The compile job handles, initialized right after construction of
// {CompilationStateImpl}.
- std::unique_ptr<JobHandle> compile_job_;
+ std::unique_ptr<JobHandle> js_to_wasm_wrapper_job_;
+ std::unique_ptr<JobHandle> baseline_compile_job_;
+ std::unique_ptr<JobHandle> top_tier_compile_job_;
// The compilation id to identify trace events linked to this compilation.
static constexpr int kInvalidCompilationID = -1;
@@ -770,9 +760,6 @@ class CompilationStateImpl {
size_t bytes_since_last_chunk_ = 0;
std::vector<uint8_t> compilation_progress_;
- int outstanding_recompilation_functions_ = 0;
- TieringState tiering_state_ = kTieredUp;
-
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -785,7 +772,6 @@ class CompilationStateImpl {
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
- using MissingRecompilationField = base::BitField8<bool, 6, 1>;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -805,7 +791,7 @@ bool BackgroundCompileScope::cancelled() const {
Impl(native_module_->compilation_state())->cancelled();
}
-void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
+void UpdateFeatureUseCounts(Isolate* isolate, WasmFeatures detected) {
using Feature = v8::Isolate::UseCounterFeature;
constexpr static std::pair<WasmFeature, Feature> kUseCounters[] = {
{kFeature_reftypes, Feature::kWasmRefTypes},
@@ -853,7 +839,17 @@ void CompilationState::AddCallback(
return Impl(this)->AddCallback(std::move(callback));
}
-void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); }
+void CompilationState::TierUpAllFunctions() {
+ Impl(this)->TierUpAllFunctions();
+}
+
+void CompilationState::AllowAnotherTopTierJob(uint32_t func_index) {
+ Impl(this)->AllowAnotherTopTierJob(func_index);
+}
+
+void CompilationState::AllowAnotherTopTierJobForAllFunctions() {
+ Impl(this)->AllowAnotherTopTierJobForAllFunctions();
+}
void CompilationState::InitializeAfterDeserialization(
base::Vector<const int> lazy_functions,
@@ -868,10 +864,6 @@ bool CompilationState::baseline_compilation_finished() const {
return Impl(this)->baseline_compilation_finished();
}
-bool CompilationState::recompilation_finished() const {
- return Impl(this)->recompilation_finished();
-}
-
void CompilationState::set_compilation_id(int compilation_id) {
Impl(this)->set_compilation_id(compilation_id);
}
@@ -920,7 +912,7 @@ const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
}
CompileStrategy GetCompileStrategy(const WasmModule* module,
- const WasmFeatures& enabled_features,
+ WasmFeatures enabled_features,
uint32_t func_index, bool lazy_module) {
if (lazy_module) return CompileStrategy::kLazy;
if (!enabled_features.has_compilation_hints()) {
@@ -945,20 +937,24 @@ struct ExecutionTierPair {
ExecutionTier top_tier;
};
+// Pass the debug state as a separate parameter to avoid data races: the debug
+// state may change between its use here and its use at the call site. To have
+// a consistent view on the debug state, the caller reads the debug state once
+// and then passes it to this function.
ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
DynamicTiering dynamic_tiering,
+ DebugState is_in_debug_state,
bool lazy_module) {
const WasmModule* module = native_module->module();
if (is_asmjs_module(module)) {
return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan};
}
- // TODO(13224): Use lazy compilation for debug code.
- if (native_module->IsTieredDown()) {
- return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
- }
if (lazy_module) {
return {ExecutionTier::kNone, ExecutionTier::kNone};
}
+ if (is_in_debug_state) {
+ return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
+ }
ExecutionTier baseline_tier =
v8_flags.liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
bool eager_tier_up = !dynamic_tiering && v8_flags.wasm_tier_up;
@@ -968,14 +964,17 @@ ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
}
ExecutionTierPair GetLazyCompilationTiers(NativeModule* native_module,
- uint32_t func_index) {
+ uint32_t func_index,
+ DebugState is_in_debug_state) {
DynamicTiering dynamic_tiering =
Impl(native_module->compilation_state())->dynamic_tiering();
// For lazy compilation, get the tiers we would use if lazy compilation is
// disabled.
constexpr bool kNotLazy = false;
- ExecutionTierPair tiers =
- GetDefaultTiersPerModule(native_module, dynamic_tiering, kNotLazy);
+ ExecutionTierPair tiers = GetDefaultTiersPerModule(
+ native_module, dynamic_tiering, is_in_debug_state, kNotLazy);
+ // If we are in debug mode, we ignore compilation hints.
+ if (is_in_debug_state) return tiers;
// Check if compilation hints override default tiering behaviour.
if (native_module->enabled_features().has_compilation_hints()) {
@@ -1012,7 +1011,7 @@ class CompilationUnitBuilder {
void AddImportUnit(uint32_t func_index) {
DCHECK_GT(native_module_->module()->num_imported_functions, func_index);
baseline_units_.emplace_back(func_index, ExecutionTier::kNone,
- kNoDebugging);
+ kNotForDebugging);
}
void AddJSToWasmWrapperUnit(
@@ -1021,35 +1020,22 @@ class CompilationUnitBuilder {
}
void AddBaselineUnit(int func_index, ExecutionTier tier) {
- baseline_units_.emplace_back(func_index, tier, kNoDebugging);
+ baseline_units_.emplace_back(func_index, tier, kNotForDebugging);
}
void AddTopTierUnit(int func_index, ExecutionTier tier) {
- tiering_units_.emplace_back(func_index, tier, kNoDebugging);
+ tiering_units_.emplace_back(func_index, tier, kNotForDebugging);
}
- void AddDebugUnit(int func_index) {
- baseline_units_.emplace_back(func_index, ExecutionTier::kLiftoff,
- kForDebugging);
- }
-
- void AddRecompilationUnit(int func_index, ExecutionTier tier) {
- // For recompilation, just treat all units like baseline units.
- baseline_units_.emplace_back(
- func_index, tier,
- tier == ExecutionTier::kLiftoff ? kForDebugging : kNoDebugging);
- }
-
- bool Commit() {
+ void Commit() {
if (baseline_units_.empty() && tiering_units_.empty() &&
js_to_wasm_wrapper_units_.empty()) {
- return false;
+ return;
}
compilation_state()->CommitCompilationUnits(
base::VectorOf(baseline_units_), base::VectorOf(tiering_units_),
base::VectorOf(js_to_wasm_wrapper_units_));
Clear();
- return true;
}
void Clear() {
@@ -1072,38 +1058,20 @@ class CompilationUnitBuilder {
js_to_wasm_wrapper_units_;
};
-WasmError GetWasmErrorWithName(ModuleWireBytes wire_bytes,
- const WasmFunction* func,
- const WasmModule* module, WasmError error) {
- WasmName name = wire_bytes.GetNameOrNull(func, module);
- if (name.begin() == nullptr) {
- return WasmError(error.offset(), "Compiling function #%d failed: %s",
- func->func_index, error.message().c_str());
- } else {
- TruncatedUserString<> truncated_name(name);
- return WasmError(error.offset(),
- "Compiling function #%d:\"%.*s\" failed: %s",
- func->func_index, truncated_name.length(),
- truncated_name.start(), error.message().c_str());
- }
-}
-
-void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes,
- const WasmFunction* func, const WasmModule* module,
- WasmError error) {
- thrower->CompileFailed(GetWasmErrorWithName(std::move(wire_bytes), func,
- module, std::move(error)));
-}
-
DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
base::Vector<const uint8_t> code,
- AccountingAllocator* allocator,
WasmFeatures enabled_features) {
+ // Sometimes functions get validated unpredictably in the background, for
+ // debugging or when inlining one function into another. We check here if that
+ // is the case, and exit early if so.
+ if (module->function_was_validated(func_index)) return {};
const WasmFunction* func = &module->functions[func_index];
FunctionBody body{func->sig, func->code.offset(), code.begin(), code.end()};
WasmFeatures detected_features;
- return ValidateFunctionBody(allocator, enabled_features, module,
- &detected_features, body);
+ DecodeResult result =
+ ValidateFunctionBody(enabled_features, module, &detected_features, body);
+ if (result.ok()) module->set_function_validated(func_index);
+ return result;
}
enum OnlyLazyFunctions : bool {
@@ -1111,37 +1079,6 @@ enum OnlyLazyFunctions : bool {
kOnlyLazyFunctions = true,
};
-void ValidateSequentially(
- const WasmModule* module, NativeModule* native_module, Counters* counters,
- AccountingAllocator* allocator, ErrorThrower* thrower,
- OnlyLazyFunctions only_lazy_functions = kAllFunctions) {
- DCHECK(!thrower->error());
- uint32_t start = module->num_imported_functions;
- uint32_t end = start + module->num_declared_functions;
- auto enabled_features = native_module->enabled_features();
- bool lazy_module = v8_flags.wasm_lazy_compilation;
- for (uint32_t func_index = start; func_index < end; func_index++) {
- // Skip non-lazy functions if requested.
- if (only_lazy_functions) {
- CompileStrategy strategy =
- GetCompileStrategy(module, enabled_features, func_index, lazy_module);
- if (strategy != CompileStrategy::kLazy &&
- strategy != CompileStrategy::kLazyBaselineEagerTopTier) {
- continue;
- }
- }
-
- ModuleWireBytes wire_bytes{native_module->wire_bytes()};
- const WasmFunction* func = &module->functions[func_index];
- base::Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
- DecodeResult result = ValidateSingleFunction(module, func_index, code,
- allocator, enabled_features);
- if (result.failed()) {
- SetCompileError(thrower, wire_bytes, func, module, result.error());
- }
- }
-}
-
bool IsLazyModule(const WasmModule* module) {
return v8_flags.wasm_lazy_compilation ||
(v8_flags.asm_wasm_lazy_compilation && is_asmjs_module(module));
@@ -1187,18 +1124,17 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
- base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported()
- ? base::ThreadTicks::Now()
- : base::ThreadTicks();
-
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
- ExecutionTierPair tiers = GetLazyCompilationTiers(native_module, func_index);
+ DebugState is_in_debug_state = native_module->IsInDebugState();
+ ExecutionTierPair tiers =
+ GetLazyCompilationTiers(native_module, func_index, is_in_debug_state);
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
- WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier,
- kNoDebugging};
+ WasmCompilationUnit baseline_unit{
+ func_index, tiers.baseline_tier,
+ is_in_debug_state ? kForDebugging : kNotForDebugging};
CompilationEnv env = native_module->CreateCompilationEnv();
// TODO(wasm): Use an assembler buffer cache for lazy compilation.
AssemblerBufferCache* assembler_buffer_cache = nullptr;
@@ -1207,11 +1143,6 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
&env, compilation_state->GetWireBytesStorage().get(), counters,
assembler_buffer_cache, &detected_features);
compilation_state->OnCompilationStopped(detected_features);
- if (!thread_ticks.IsNull()) {
- native_module->UpdateCPUDuration(
- (base::ThreadTicks::Now() - thread_ticks).InMicroseconds(),
- tiers.baseline_tier);
- }
// During lazy compilation, we can only get compilation errors when
// {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
@@ -1245,7 +1176,8 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
if (GetCompileStrategy(module, native_module->enabled_features(), func_index,
lazy_module) == CompileStrategy::kLazy &&
tiers.baseline_tier < tiers.top_tier) {
- WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
+ WasmCompilationUnit tiering_unit{func_index, tiers.top_tier,
+ kNotForDebugging};
compilation_state->CommitTopTierCompilationUnit(tiering_unit);
}
return true;
@@ -1262,15 +1194,15 @@ void ThrowLazyCompilationError(Isolate* isolate,
base::Vector<const uint8_t> code =
compilation_state->GetWireBytesStorage()->GetCode(func->code);
- WasmEngine* engine = GetWasmEngine();
auto enabled_features = native_module->enabled_features();
- DecodeResult decode_result = ValidateSingleFunction(
- module, func_index, code, engine->allocator(), enabled_features);
+ DecodeResult decode_result =
+ ValidateSingleFunction(module, func_index, code, enabled_features);
CHECK(decode_result.failed());
wasm::ErrorThrower thrower(isolate, nullptr);
- SetCompileError(&thrower, ModuleWireBytes(native_module->wire_bytes()), func,
- module, decode_result.error());
+ thrower.CompileFailed(GetWasmErrorWithName(native_module->wire_bytes(),
+ func_index, module,
+ std::move(decode_result).error()));
}
class TransitiveTypeFeedbackProcessor {
@@ -1321,7 +1253,9 @@ class TransitiveTypeFeedbackProcessor {
DisallowGarbageCollection no_gc_scope_;
WasmInstanceObject instance_;
const WasmModule* const module_;
- base::MutexGuard mutex_guard;
+ // TODO(jkummerow): Check if it makes a difference to apply any updates
+ // as a single batch at the end.
+ base::SharedMutexGuard<base::kExclusive> mutex_guard;
std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_for_function_;
std::set<int> queue_;
};
@@ -1455,12 +1389,13 @@ void TriggerTierUp(WasmInstanceObject instance, int func_index) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan,
- kNoDebugging};
+ kNotForDebugging};
const WasmModule* module = native_module->module();
int priority;
{
- base::MutexGuard mutex_guard(&module->type_feedback.mutex);
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ &module->type_feedback.mutex);
int array_index =
wasm::declared_function_index(instance.module(), func_index);
instance.tiering_budget_array()[array_index] = v8_flags.wasm_tiering_budget;
@@ -1477,7 +1412,7 @@ void TriggerTierUp(WasmInstanceObject instance, int func_index) {
// Before adding the tier-up unit or increasing priority, do process type
// feedback for best code generation.
- if (v8_flags.wasm_speculative_inlining) {
+ if (native_module->enabled_features().has_inlining()) {
// TODO(jkummerow): we could have collisions here if different instances
// of the same module have collected different feedback. If that ever
// becomes a problem, figure out a solution.
@@ -1489,21 +1424,21 @@ void TriggerTierUp(WasmInstanceObject instance, int func_index) {
void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance,
int func_index) {
- if (v8_flags.wasm_speculative_inlining) {
+ NativeModule* native_module = instance.module_object().native_module();
+ if (native_module->enabled_features().has_inlining()) {
TransitiveTypeFeedbackProcessor::Process(instance, func_index);
}
- auto* native_module = instance.module_object().native_module();
- wasm::GetWasmEngine()->CompileFunction(isolate, native_module, func_index,
+ wasm::GetWasmEngine()->CompileFunction(isolate->counters(), native_module,
+ func_index,
wasm::ExecutionTier::kTurbofan);
CHECK(!native_module->compilation_state()->failed());
}
namespace {
-void RecordStats(CodeT codet, Counters* counters) {
- if (codet.is_off_heap_trampoline()) return;
- Code code = FromCodeT(codet);
- counters->wasm_generated_code_size()->Increment(code.raw_body_size());
+void RecordStats(Code code, Counters* counters) {
+ if (!code.has_instruction_stream()) return;
+ counters->wasm_generated_code_size()->Increment(code.body_size());
counters->wasm_reloc_size()->Increment(code.relocation_info().length());
}
@@ -1570,17 +1505,8 @@ constexpr uint8_t kMainTaskId = 0;
// Run by the {BackgroundCompileJob} (on any thread).
CompilationExecutionResult ExecuteCompilationUnits(
std::weak_ptr<NativeModule> native_module, Counters* counters,
- JobDelegate* delegate, CompileBaselineOnly baseline_only) {
+ JobDelegate* delegate, CompilationTier tier) {
TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
-
- // Execute JS to Wasm wrapper units first, so that they are ready to be
- // finalized by the main thread when the kFinishedBaselineCompilation event is
- // triggered.
- if (ExecuteJSToWasmWrapperCompilationUnits(native_module, delegate) ==
- kYield) {
- return kYield;
- }
-
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
base::Optional<CompilationEnv> env;
@@ -1596,10 +1522,6 @@ CompilationExecutionResult ExecuteCompilationUnits(
WasmFeatures detected_features = WasmFeatures::None();
- base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported()
- ? base::ThreadTicks::Now()
- : base::ThreadTicks();
-
// Preparation (synchronized): Initialize the fields above and get the first
// compilation unit.
{
@@ -1609,8 +1531,8 @@ CompilationExecutionResult ExecuteCompilationUnits(
wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
queue = compile_scope.compilation_state()->GetQueueForCompileTask(task_id);
- unit = compile_scope.compilation_state()->GetNextCompilationUnit(
- queue, baseline_only);
+ unit =
+ compile_scope.compilation_state()->GetNextCompilationUnit(queue, tier);
if (!unit) return kNoMoreUnits;
}
TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id);
@@ -1659,12 +1581,7 @@ CompilationExecutionResult ExecuteCompilationUnits(
// Yield or get next unit.
if (yield ||
!(unit = compile_scope.compilation_state()->GetNextCompilationUnit(
- queue, baseline_only))) {
- if (!thread_ticks.IsNull()) {
- compile_scope.native_module()->UpdateCPUDuration(
- (base::ThreadTicks::Now() - thread_ticks).InMicroseconds(),
- current_tier);
- }
+ queue, tier))) {
std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope.native_module()->AddCompiledCode(
base::VectorOf(std::move(results_to_publish)));
@@ -1686,12 +1603,6 @@ CompilationExecutionResult ExecuteCompilationUnits(
bool liftoff_finished = unit->tier() != current_tier &&
unit->tier() == ExecutionTier::kTurbofan;
if (batch_full || liftoff_finished) {
- if (!thread_ticks.IsNull()) {
- base::ThreadTicks thread_ticks_now = base::ThreadTicks::Now();
- compile_scope.native_module()->UpdateCPUDuration(
- (thread_ticks_now - thread_ticks).InMicroseconds(), current_tier);
- thread_ticks = thread_ticks_now;
- }
std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope.native_module()->AddCompiledCode(
base::VectorOf(std::move(results_to_publish)));
@@ -1717,6 +1628,19 @@ int AddExportWrapperUnits(Isolate* isolate, NativeModule* native_module,
uint32_t canonical_type_index =
native_module->module()
->isorecursive_canonical_type_ids[function.sig_index];
+ int wrapper_index =
+ GetExportWrapperIndex(canonical_type_index, function.imported);
+ if (wrapper_index < isolate->heap()->js_to_wasm_wrappers().length()) {
+ MaybeObject existing_wrapper =
+ isolate->heap()->js_to_wasm_wrappers().Get(wrapper_index);
+ if (existing_wrapper.IsStrongOrWeak() &&
+ !existing_wrapper.GetHeapObject().IsUndefined()) {
+ // Skip wrapper compilation as the wrapper is already cached.
+ // Note that this does not guarantee that the wrapper is still cached
+ // at the moment at which the WasmInternalFunction is instantiated.
+ continue;
+ }
+ }
JSToWasmWrapperKey key(function.imported, canonical_type_index);
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
@@ -1740,15 +1664,12 @@ int AddImportWrapperUnits(NativeModule* native_module,
for (int func_index = 0; func_index < num_imported_functions; func_index++) {
const WasmFunction& function =
native_module->module()->functions[func_index];
- if (!IsJSCompatibleSignature(function.sig, native_module->module(),
- native_module->enabled_features())) {
- continue;
- }
+ if (!IsJSCompatibleSignature(function.sig)) continue;
uint32_t canonical_type_index =
native_module->module()
->isorecursive_canonical_type_ids[function.sig_index];
WasmImportWrapperCache::CacheKey key(
- compiler::kDefaultImportCallKind, canonical_type_index,
+ kDefaultImportCallKind, canonical_type_index,
static_cast<int>(function.sig->parameter_count()), kNoSuspend);
auto it = keys.insert(key);
if (it.second) {
@@ -1761,32 +1682,9 @@ int AddImportWrapperUnits(NativeModule* native_module,
return static_cast<int>(keys.size());
}
-void InitializeLazyCompilation(NativeModule* native_module) {
- const bool lazy_module = IsLazyModule(native_module->module());
- auto* module = native_module->module();
-
- uint32_t start = module->num_imported_functions;
- uint32_t end = start + module->num_declared_functions;
- base::Optional<CodeSpaceWriteScope> lazy_code_space_write_scope;
- for (uint32_t func_index = start; func_index < end; func_index++) {
- CompileStrategy strategy = GetCompileStrategy(
- module, native_module->enabled_features(), func_index, lazy_module);
- if (strategy == CompileStrategy::kLazy ||
- strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
- // Open a single scope for all following calls to {UseLazyStub()}, instead
- // of flipping page permissions for each {func_index} individually.
- if (!lazy_code_space_write_scope.has_value()) {
- lazy_code_space_write_scope.emplace(native_module);
- }
- native_module->UseLazyStub(func_index);
- }
- }
-}
-
std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
Isolate* isolate, NativeModule* native_module,
ProfileInformation* pgo_info) {
- InitializeLazyCompilation(native_module);
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
auto builder = std::make_unique<CompilationUnitBuilder>(native_module);
@@ -1799,7 +1697,7 @@ std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
}
bool MayCompriseLazyFunctions(const WasmModule* module,
- const WasmFeatures& enabled_features) {
+ WasmFeatures enabled_features) {
if (IsLazyModule(module)) return true;
if (enabled_features.has_compilation_hints()) return true;
#ifdef ENABLE_SLOW_DCHECKS
@@ -1854,9 +1752,7 @@ class CompilationTimeCallback : public CompilationEventCallback {
true, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds(), // wall_clock_duration_in_us
- static_cast<int64_t>( // cpu_time_duration_in_us
- native_module->baseline_compilation_cpu_duration())};
+ duration.InMicroseconds()}; // wall_clock_duration_in_us
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
if (compilation_event == CompilationEvent::kFailedCompilation) {
@@ -1869,9 +1765,7 @@ class CompilationTimeCallback : public CompilationEventCallback {
false, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds(), // wall_clock_duration_in_us
- static_cast<int64_t>( // cpu_time_duration_in_us
- native_module->baseline_compilation_cpu_duration())};
+ duration.InMicroseconds()}; // wall_clock_duration_in_us
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
}
@@ -1885,26 +1779,44 @@ class CompilationTimeCallback : public CompilationEventCallback {
const CompileMode compile_mode_;
};
+WasmError ValidateFunctions(const WasmModule* module,
+ base::Vector<const uint8_t> wire_bytes,
+ WasmFeatures enabled_features,
+ OnlyLazyFunctions only_lazy_functions) {
+ DCHECK_EQ(module->origin, kWasmOrigin);
+ if (only_lazy_functions &&
+ !MayCompriseLazyFunctions(module, enabled_features)) {
+ return {};
+ }
+
+ std::function<bool(int)> filter; // Initially empty for "all functions".
+ if (only_lazy_functions) {
+ const bool is_lazy_module = IsLazyModule(module);
+ filter = [module, enabled_features, is_lazy_module](int func_index) {
+ CompileStrategy strategy = GetCompileStrategy(module, enabled_features,
+ func_index, is_lazy_module);
+ return strategy == CompileStrategy::kLazy ||
+ strategy == CompileStrategy::kLazyBaselineEagerTopTier;
+ };
+ }
+ // Call {ValidateFunctions} in the module decoder.
+ return ValidateFunctions(module, enabled_features, wire_bytes, filter);
+}
+
+WasmError ValidateFunctions(const NativeModule& native_module,
+ OnlyLazyFunctions only_lazy_functions) {
+ return ValidateFunctions(native_module.module(), native_module.wire_bytes(),
+ native_module.enabled_features(),
+ only_lazy_functions);
+}
+
void CompileNativeModule(Isolate* isolate,
v8::metrics::Recorder::ContextId context_id,
- ErrorThrower* thrower, const WasmModule* wasm_module,
+ ErrorThrower* thrower,
std::shared_ptr<NativeModule> native_module,
ProfileInformation* pgo_info) {
CHECK(!v8_flags.jitless);
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- if (!v8_flags.wasm_lazy_validation && wasm_module->origin == kWasmOrigin &&
- MayCompriseLazyFunctions(wasm_module,
- native_module->enabled_features())) {
- // Validate wasm modules for lazy compilation if requested. Never validate
- // asm.js modules as these are valid by construction (additionally a CHECK
- // will catch this during lazy compilation).
- ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
- isolate->allocator(), thrower, kOnlyLazyFunctions);
- // On error: Return and leave the module in an unexecutable state.
- if (thrower->error()) return;
- }
-
- DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
+ const WasmModule* module = native_module->module();
// The callback captures a shared ptr to the semaphore.
auto* compilation_state = Impl(native_module->compilation_state());
@@ -1919,72 +1831,113 @@ void CompileNativeModule(Isolate* isolate,
InitializeCompilation(isolate, native_module.get(), pgo_info);
compilation_state->InitializeCompilationUnits(std::move(builder));
+ // Validate wasm modules for lazy compilation if requested. Never validate
+ // asm.js modules as these are valid by construction (additionally a CHECK
+ // will catch this during lazy compilation).
+ if (!v8_flags.wasm_lazy_validation && module->origin == kWasmOrigin) {
+ DCHECK(!thrower->error());
+ if (WasmError validation_error =
+ ValidateFunctions(*native_module, kOnlyLazyFunctions)) {
+ thrower->CompileFailed(std::move(validation_error));
+ return;
+ }
+ }
+
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedExportWrappers);
- if (compilation_state->failed()) {
- DCHECK_IMPLIES(IsLazyModule(wasm_module), !v8_flags.wasm_lazy_validation);
- ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
- isolate->allocator(), thrower);
- CHECK(thrower->error());
- return;
- }
-
- compilation_state->FinalizeJSToWasmWrappers(isolate, wasm_module);
+ if (!compilation_state->failed()) {
+ compilation_state->FinalizeJSToWasmWrappers(isolate, module);
- compilation_state->WaitForCompilationEvent(
- CompilationEvent::kFinishedBaselineCompilation);
+ compilation_state->WaitForCompilationEvent(
+ CompilationEvent::kFinishedBaselineCompilation);
- compilation_state->PublishDetectedFeatures(isolate);
+ compilation_state->PublishDetectedFeatures(isolate);
+ }
if (compilation_state->failed()) {
- DCHECK_IMPLIES(IsLazyModule(wasm_module), !v8_flags.wasm_lazy_validation);
- ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
- isolate->allocator(), thrower);
- CHECK(thrower->error());
+ DCHECK_IMPLIES(IsLazyModule(module), !v8_flags.wasm_lazy_validation);
+ WasmError validation_error =
+ ValidateFunctions(*native_module, kAllFunctions);
+ CHECK(validation_error.has_error());
+ thrower->CompileFailed(std::move(validation_error));
}
}
+class AsyncCompileJSToWasmWrapperJob final : public JobTask {
+ public:
+ explicit AsyncCompileJSToWasmWrapperJob(
+ std::weak_ptr<NativeModule> native_module)
+ : native_module_(std::move(native_module)),
+ engine_barrier_(GetWasmEngine()->GetBarrierForBackgroundCompile()) {}
+
+ void Run(JobDelegate* delegate) override {
+ auto engine_scope = engine_barrier_->TryLock();
+ if (!engine_scope) return;
+ ExecuteJSToWasmWrapperCompilationUnits(native_module_, delegate);
+ }
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ BackgroundCompileScope compile_scope(native_module_);
+ if (compile_scope.cancelled()) return 0;
+ size_t flag_limit = static_cast<size_t>(
+ std::max(1, v8_flags.wasm_num_compilation_tasks.value()));
+ // NumOutstandingExportWrappers() does not reflect the units that running
+ // workers are processing, thus add the current worker count to that number.
+ return std::min(
+ flag_limit,
+ worker_count +
+ compile_scope.compilation_state()->NumOutstandingExportWrappers());
+ }
+
+ private:
+ std::weak_ptr<NativeModule> native_module_;
+ std::shared_ptr<OperationsBarrier> engine_barrier_;
+};
+
class BackgroundCompileJob final : public JobTask {
public:
explicit BackgroundCompileJob(std::weak_ptr<NativeModule> native_module,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters,
+ CompilationTier tier)
: native_module_(std::move(native_module)),
engine_barrier_(GetWasmEngine()->GetBarrierForBackgroundCompile()),
- async_counters_(std::move(async_counters)) {}
+ async_counters_(std::move(async_counters)),
+ tier_(tier) {}
void Run(JobDelegate* delegate) override {
auto engine_scope = engine_barrier_->TryLock();
if (!engine_scope) return;
ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate,
- kBaselineOrTopTier);
+ tier_);
}
size_t GetMaxConcurrency(size_t worker_count) const override {
BackgroundCompileScope compile_scope(native_module_);
if (compile_scope.cancelled()) return 0;
+ size_t flag_limit = static_cast<size_t>(
+ std::max(1, v8_flags.wasm_num_compilation_tasks.value()));
// NumOutstandingCompilations() does not reflect the units that running
// workers are processing, thus add the current worker count to that number.
- return std::min(
- static_cast<size_t>(v8_flags.wasm_num_compilation_tasks),
- worker_count +
- compile_scope.compilation_state()->NumOutstandingCompilations());
+ return std::min(flag_limit,
+ worker_count + compile_scope.compilation_state()
+ ->NumOutstandingCompilations(tier_));
}
private:
std::weak_ptr<NativeModule> native_module_;
std::shared_ptr<OperationsBarrier> engine_barrier_;
const std::shared_ptr<Counters> async_counters_;
+ const CompilationTier tier_;
};
} // namespace
std::shared_ptr<NativeModule> CompileToNativeModule(
- Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
- std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
+ Isolate* isolate, WasmFeatures enabled_features, ErrorThrower* thrower,
+ std::shared_ptr<const WasmModule> module, ModuleWireBytes wire_bytes,
int compilation_id, v8::metrics::Recorder::ContextId context_id,
ProfileInformation* pgo_info) {
- const WasmModule* wasm_module = module.get();
WasmEngine* engine = GetWasmEngine();
base::OwnedVector<uint8_t> wire_bytes_copy =
base::OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
@@ -1993,20 +1946,20 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
// bytes of the temporary key and the new key have the same base pointer and
// we can skip the full bytes comparison.
std::shared_ptr<NativeModule> native_module = engine->MaybeGetNativeModule(
- wasm_module->origin, wire_bytes_copy.as_vector(), isolate);
+ module->origin, wire_bytes_copy.as_vector(), isolate);
if (native_module) {
- CompileJsToWasmWrappers(isolate, wasm_module);
+ CompileJsToWasmWrappers(isolate, module.get());
return native_module;
}
base::Optional<TimedHistogramScope> wasm_compile_module_time_scope;
if (base::TimeTicks::IsHighResolution()) {
wasm_compile_module_time_scope.emplace(SELECT_WASM_COUNTER(
- isolate->counters(), wasm_module->origin, wasm_compile, module_time));
+ isolate->counters(), module->origin, wasm_compile, module_time));
}
// Embedder usage count for declared shared memories.
- if (wasm_module->has_shared_memory) {
+ if (module->has_shared_memory) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
@@ -2017,22 +1970,27 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
module.get(), include_liftoff,
DynamicTiering{v8_flags.wasm_dynamic_tiering.value()});
- native_module =
- engine->NewNativeModule(isolate, enabled, module, code_size_estimate);
+ native_module = engine->NewNativeModule(isolate, enabled_features, module,
+ code_size_estimate);
native_module->SetWireBytes(std::move(wire_bytes_copy));
native_module->compilation_state()->set_compilation_id(compilation_id);
- // Sync compilation is user blocking, so we increase the priority.
- native_module->compilation_state()->SetHighPriority();
- CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module,
- pgo_info);
- bool cache_hit = !engine->UpdateNativeModuleCache(thrower->error(),
- &native_module, isolate);
- if (thrower->error()) return {};
+ CompileNativeModule(isolate, context_id, thrower, native_module, pgo_info);
- if (cache_hit) {
- CompileJsToWasmWrappers(isolate, wasm_module);
- return native_module;
+ if (thrower->error()) {
+ engine->UpdateNativeModuleCache(true, std::move(native_module), isolate);
+ return {};
+ }
+
+ std::shared_ptr<NativeModule> cached_native_module =
+ engine->UpdateNativeModuleCache(false, native_module, isolate);
+
+ if (cached_native_module != native_module) {
+ // Do not use {module} or {native_module} any more; use
+ // {cached_native_module} instead.
+ module.reset();
+ native_module.reset();
+ return cached_native_module;
}
// Ensure that the code objects are logged before returning.
@@ -2041,56 +1999,18 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
return native_module;
}
-void RecompileNativeModule(NativeModule* native_module,
- TieringState tiering_state) {
- // Install a callback to notify us once background recompilation finished.
- auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
- auto* compilation_state = Impl(native_module->compilation_state());
-
- class RecompilationFinishedCallback : public CompilationEventCallback {
- public:
- explicit RecompilationFinishedCallback(
- std::shared_ptr<base::Semaphore> recompilation_finished_semaphore)
- : recompilation_finished_semaphore_(
- std::move(recompilation_finished_semaphore)) {}
-
- void call(CompilationEvent event) override {
- DCHECK_NE(CompilationEvent::kFailedCompilation, event);
- if (event == CompilationEvent::kFinishedRecompilation) {
- recompilation_finished_semaphore_->Signal();
- }
- }
-
- private:
- std::shared_ptr<base::Semaphore> recompilation_finished_semaphore_;
- };
-
- // The callback captures a shared ptr to the semaphore.
- // Initialize the compilation units and kick off background compile tasks.
- compilation_state->InitializeRecompilation(
- tiering_state, std::make_unique<RecompilationFinishedCallback>(
- recompilation_finished_semaphore));
-
- constexpr JobDelegate* kNoDelegate = nullptr;
- ExecuteCompilationUnits(compilation_state->native_module_weak(),
- compilation_state->counters(), kNoDelegate,
- kBaselineOnly);
- recompilation_finished_semaphore->Wait();
- DCHECK(!compilation_state->failed());
-}
-
AsyncCompileJob::AsyncCompileJob(
- Isolate* isolate, const WasmFeatures& enabled,
- std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
+ Isolate* isolate, WasmFeatures enabled_features,
+ base::OwnedVector<const uint8_t> bytes, Handle<Context> context,
Handle<Context> incumbent_context, const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver, int compilation_id)
: isolate_(isolate),
api_method_name_(api_method_name),
- enabled_features_(enabled),
+ enabled_features_(enabled_features),
dynamic_tiering_(DynamicTiering{v8_flags.wasm_dynamic_tiering.value()}),
start_time_(base::TimeTicks::Now()),
- bytes_copy_(std::move(bytes_copy)),
- wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
+ bytes_copy_(std::move(bytes)),
+ wire_bytes_(bytes_copy_.as_vector()),
resolver_(std::move(resolver)),
compilation_id_(compilation_id) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
@@ -2119,13 +2039,124 @@ void AsyncCompileJob::Abort() {
GetWasmEngine()->RemoveCompileJob(this);
}
-class AsyncStreamingProcessor final : public StreamingProcessor {
+// {ValidateFunctionsStreamingJobData} holds information that is shared between
+// the {AsyncStreamingProcessor} and the {ValidateFunctionsStreamingJob}. It
+// lives in the {AsyncStreamingProcessor} and is updated from both classes.
+struct ValidateFunctionsStreamingJobData {
+ struct Unit {
+ // {func_index == -1} represents an "invalid" unit.
+ int func_index = -1;
+ base::Vector<const uint8_t> code;
+
+ // Check whether the unit is valid.
+ operator bool() const {
+ DCHECK_LE(-1, func_index);
+ return func_index >= 0;
+ }
+ };
+
+ void Initialize(int num_declared_functions) {
+ DCHECK_NULL(units);
+ units = base::OwnedVector<Unit>::NewForOverwrite(num_declared_functions);
+ // Initially {next == end}.
+ next_available_unit.store(units.begin(), std::memory_order_relaxed);
+ end_of_available_units.store(units.begin(), std::memory_order_relaxed);
+ }
+
+ void AddUnit(int declared_func_index, base::Vector<const uint8_t> code,
+ JobHandle* job_handle) {
+ DCHECK_NOT_NULL(units);
+ // Write new unit to {*end}, then increment {end}. There is only one thread
+ // adding new units, so no further synchronization needed.
+ Unit* ptr = end_of_available_units.load(std::memory_order_relaxed);
+ // Check invariant: {next <= end}.
+ DCHECK_LE(next_available_unit.load(std::memory_order_relaxed), ptr);
+ *ptr++ = {declared_func_index, code};
+ // Use release semantics, so whoever loads this pointer (using acquire
+ // semantics) sees all our previous stores.
+ end_of_available_units.store(ptr, std::memory_order_release);
+ size_t total_units_added = ptr - units.begin();
+ // Periodically notify concurrency increase. This has overhead, so avoid
+ // calling it too often. As long as threads are still running they will
+ // continue processing new units anyway, and if background threads validate
+ // faster than we can add units, then only notifying after increasingly long
+ // delays is the right thing to do to avoid too many small validation tasks.
+ // We notify on each power of two after 16 units, and every 16k units (just
+ // to have *some* upper limit and avoiding to pile up too many units).
+ // Additionally, notify after receiving the last unit of the module.
+ if ((total_units_added >= 16 &&
+ base::bits::IsPowerOfTwo(total_units_added)) ||
+ (total_units_added % (16 * 1024)) == 0 || ptr == units.end()) {
+ job_handle->NotifyConcurrencyIncrease();
+ }
+ }
+
+ size_t NumOutstandingUnits() const {
+ Unit* next = next_available_unit.load(std::memory_order_relaxed);
+ Unit* end = end_of_available_units.load(std::memory_order_relaxed);
+ DCHECK_LE(next, end);
+ return end - next;
+ }
+
+ // Retrieve one unit to validate; returns an "invalid" unit if nothing is in
+ // the queue.
+ Unit GetUnit() {
+ // Use an acquire load to synchronize with the store in {AddUnit}. All units
+ // before this {end} are fully initialized and ready to execute.
+ Unit* end = end_of_available_units.load(std::memory_order_acquire);
+ Unit* next = next_available_unit.load(std::memory_order_relaxed);
+ while (next < end) {
+ if (next_available_unit.compare_exchange_weak(
+ next, next + 1, std::memory_order_relaxed)) {
+ return *next;
+ }
+ // Otherwise retry with updated {next} pointer.
+ }
+ return {};
+ }
+
+ base::OwnedVector<Unit> units;
+ std::atomic<Unit*> next_available_unit;
+ std::atomic<Unit*> end_of_available_units;
+ std::atomic<bool> found_error{false};
+};
+
+class ValidateFunctionsStreamingJob final : public JobTask {
public:
- explicit AsyncStreamingProcessor(AsyncCompileJob* job,
- std::shared_ptr<Counters> counters,
- AccountingAllocator* allocator);
+ ValidateFunctionsStreamingJob(const WasmModule* module,
+ WasmFeatures enabled_features,
+ ValidateFunctionsStreamingJobData* data)
+ : module_(module), enabled_features_(enabled_features), data_(data) {}
- ~AsyncStreamingProcessor() override;
+ void Run(JobDelegate* delegate) override {
+ TRACE_EVENT0("v8.wasm", "wasm.ValidateFunctionsStreaming");
+ using Unit = ValidateFunctionsStreamingJobData::Unit;
+ while (Unit unit = data_->GetUnit()) {
+ DecodeResult result = ValidateSingleFunction(
+ module_, unit.func_index, unit.code, enabled_features_);
+
+ if (result.failed()) {
+ data_->found_error.store(true, std::memory_order_relaxed);
+ break;
+ }
+ // After validating one function, check if we should yield.
+ if (delegate->ShouldYield()) break;
+ }
+ }
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ return worker_count + data_->NumOutstandingUnits();
+ }
+
+ private:
+ const WasmModule* const module_;
+ const WasmFeatures enabled_features_;
+ ValidateFunctionsStreamingJobData* data_;
+};
+
+class AsyncStreamingProcessor final : public StreamingProcessor {
+ public:
+ explicit AsyncStreamingProcessor(AsyncCompileJob* job);
bool ProcessModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) override;
@@ -2140,14 +2171,13 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
int code_section_start,
int code_section_length) override;
- void ProcessFunctionBody(base::Vector<const uint8_t> bytes,
+ bool ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t offset) override;
void OnFinishedChunk() override;
- void OnFinishedStream(base::OwnedVector<uint8_t> bytes) override;
-
- void OnError(const WasmError&) override;
+ void OnFinishedStream(base::OwnedVector<const uint8_t> bytes,
+ bool after_error) override;
void OnAbort() override;
@@ -2155,11 +2185,6 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
base::Vector<const uint8_t> module_bytes) override;
private:
- enum ErrorLocation { kErrorInFunction, kErrorInSection };
- // Finishes the AsyncCompileJob with an error.
- void FinishAsyncCompileJobWithError(
- const WasmError&, ErrorLocation error_location = kErrorInSection);
-
void CommitCompilationUnits();
ModuleDecoder decoder_;
@@ -2168,20 +2193,19 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
int num_functions_ = 0;
bool prefix_cache_hit_ = false;
bool before_code_section_ = true;
- std::shared_ptr<Counters> async_counters_;
- AccountingAllocator* allocator_;
+ ValidateFunctionsStreamingJobData validate_functions_job_data_;
+ std::unique_ptr<JobHandle> validate_functions_job_handle_;
// Running hash of the wire bytes up to code section size, but excluding the
// code section itself. Used by the {NativeModuleCache} to detect potential
// duplicate modules.
- size_t prefix_hash_;
+ size_t prefix_hash_ = 0;
};
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
DCHECK_NULL(stream_);
stream_ = StreamingDecoder::CreateAsyncStreamingDecoder(
- std::make_unique<AsyncStreamingProcessor>(
- this, isolate_->async_counters(), isolate_->allocator()));
+ std::make_unique<AsyncStreamingProcessor>(this));
return stream_;
}
@@ -2195,9 +2219,7 @@ AsyncCompileJob::~AsyncCompileJob() {
}
// Tell the streaming decoder that the AsyncCompileJob is not available
// anymore.
- // TODO(ahaas): Is this notification really necessary? Check
- // https://crbug.com/888170.
- if (stream_) stream_->NotifyCompilationEnded();
+ if (stream_) stream_->NotifyCompilationDiscarded();
CancelPendingForegroundTask();
isolate_->global_handles()->Destroy(native_context_.location());
isolate_->global_handles()->Destroy(incumbent_context_.location());
@@ -2213,15 +2235,12 @@ void AsyncCompileJob::CreateNativeModule(
isolate_->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
- // TODO(wasm): Improve efficiency of storing module wire bytes. Only store
- // relevant sections, not function bodies
-
// Create the module object and populate with compiled functions and
// information needed at instantiation time.
native_module_ = GetWasmEngine()->NewNativeModule(
isolate_, enabled_features_, std::move(module), code_size_estimate);
- native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
+ native_module_->SetWireBytes(std::move(bytes_copy_));
native_module_->compilation_state()->set_compilation_id(compilation_id_);
}
@@ -2255,15 +2274,15 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.FinishAsyncCompile");
+ if (stream_) {
+ stream_->NotifyNativeModuleCreated(native_module_);
+ }
bool is_after_deserialization = !module_object_.is_null();
- auto compilation_state = Impl(native_module_->compilation_state());
if (!is_after_deserialization) {
- if (stream_) {
- stream_->NotifyNativeModuleCreated(native_module_);
- }
PrepareRuntimeObjects();
}
+ auto compilation_state = Impl(native_module_->compilation_state());
// Measure duration of baseline compilation or deserialization from cache.
if (base::TimeTicks::IsHighResolution()) {
base::TimeDelta duration = base::TimeTicks::Now() - start_time_;
@@ -2281,9 +2300,7 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
!compilation_state->failed(), // success
native_module_->turbofan_code_size(), // code_size_in_bytes
native_module_->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds(), // wall_clock_duration_in_us
- static_cast<int64_t>( // cpu_time_duration_in_us
- native_module_->baseline_compilation_cpu_duration())};
+ duration.InMicroseconds()}; // wall_clock_duration_in_us
isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_);
}
}
@@ -2321,51 +2338,37 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
// We can only update the feature counts once the entire compile is done.
compilation_state->PublishDetectedFeatures(isolate_);
- // We might need to recompile the module for debugging, if the debugger was
- // enabled while streaming compilation was running. Since handling this while
- // compiling via streaming is tricky, we just tier down now, before publishing
- // the module.
- if (native_module_->IsTieredDown()) native_module_->RecompileForTiering();
+ // We might need debug code for the module, if the debugger was enabled while
+ // streaming compilation was running. Since handling this while compiling via
+ // streaming is tricky, we just remove all code which may have been generated,
+ // and compile debug code lazily.
+ if (native_module_->IsInDebugState()) {
+ native_module_->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveNonDebugCode);
+ }
// Finally, log all generated code (it does not matter if this happens
// repeatedly in case the script is shared).
native_module_->LogWasmCodes(isolate_, module_object_->script());
- FinishModule();
+ FinishSuccessfully();
}
-void AsyncCompileJob::DecodeFailed(const WasmError& error) {
- ErrorThrower thrower(isolate_, api_method_name_);
- thrower.CompileFailed(error);
+void AsyncCompileJob::Failed() {
// {job} keeps the {this} pointer alive.
- std::shared_ptr<AsyncCompileJob> job =
+ std::unique_ptr<AsyncCompileJob> job =
GetWasmEngine()->RemoveCompileJob(this);
- resolver_->OnCompilationFailed(thrower.Reify());
-}
-void AsyncCompileJob::AsyncCompileFailed() {
+ // Revalidate the whole module to produce a deterministic error message.
+ constexpr bool kValidate = true;
+ ModuleResult result = DecodeWasmModule(
+ enabled_features_, wire_bytes_.module_bytes(), kValidate, kWasmOrigin);
+ CHECK(result.failed());
ErrorThrower thrower(isolate_, api_method_name_);
- DCHECK_EQ(native_module_->module()->origin, kWasmOrigin);
- ValidateSequentially(native_module_->module(), native_module_.get(),
- isolate_->counters(), isolate_->allocator(), &thrower);
- DCHECK(thrower.error());
- // {job} keeps the {this} pointer alive.
- std::shared_ptr<AsyncCompileJob> job =
- GetWasmEngine()->RemoveCompileJob(this);
+ thrower.CompileFailed(std::move(result).error());
resolver_->OnCompilationFailed(thrower.Reify());
}
-void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.OnCompilationSucceeded");
- // We have to make sure that an "incumbent context" is available in case
- // the module's start function calls out to Blink.
- Local<v8::Context> backup_incumbent_context =
- Utils::ToLocal(incumbent_context_);
- v8::Context::BackupIncumbentScope incumbent(backup_incumbent_context);
- resolver_->OnCompilationSucceeded(result);
-}
-
class AsyncCompileJob::CompilationStateCallback
: public CompilationEventCallback {
public:
@@ -2385,12 +2388,14 @@ class AsyncCompileJob::CompilationStateCallback
// Install the native module in the cache, or reuse a conflicting one.
// If we get a conflicting module, wait until we are back in the
// main thread to update {job_->native_module_} to avoid a data race.
- std::shared_ptr<NativeModule> native_module = job_->native_module_;
- bool cache_hit = !GetWasmEngine()->UpdateNativeModuleCache(
- false, &native_module, job_->isolate_);
- DCHECK_EQ(cache_hit, native_module != job_->native_module_);
- job_->DoSync<CompileFinished>(cache_hit ? std::move(native_module)
- : nullptr);
+ std::shared_ptr<NativeModule> cached_native_module =
+ GetWasmEngine()->UpdateNativeModuleCache(
+ false, job_->native_module_, job_->isolate_);
+ if (cached_native_module == job_->native_module_) {
+ // There was no cached module.
+ cached_native_module = nullptr;
+ }
+ job_->DoSync<FinishCompilation>(std::move(cached_native_module));
}
break;
case CompilationEvent::kFinishedCompilationChunk:
@@ -2403,16 +2408,11 @@ class AsyncCompileJob::CompilationStateCallback
if (job_->DecrementAndCheckFinisherCount(kCompilation)) {
// Don't update {job_->native_module_} to avoid data races with other
// compilation threads. Use a copy of the shared pointer instead.
- std::shared_ptr<NativeModule> native_module = job_->native_module_;
- GetWasmEngine()->UpdateNativeModuleCache(true, &native_module,
+ GetWasmEngine()->UpdateNativeModuleCache(true, job_->native_module_,
job_->isolate_);
- job_->DoSync<CompileFailed>();
+ job_->DoSync<Fail>();
}
break;
- case CompilationEvent::kFinishedRecompilation:
- // This event can happen out of order, hence don't remember this in
- // {last_event_}.
- return;
}
#ifdef DEBUG
last_event_ = event;
@@ -2549,38 +2549,6 @@ void AsyncCompileJob::NextStep(Args&&... args) {
step_.reset(new Step(std::forward<Args>(args)...));
}
-WasmError ValidateLazilyCompiledFunctions(const WasmModule* module,
- ModuleWireBytes wire_bytes,
- WasmFeatures enabled_features) {
- if (v8_flags.wasm_lazy_validation) return {};
- if (!MayCompriseLazyFunctions(module, enabled_features)) return {};
-
- auto allocator = GetWasmEngine()->allocator();
-
- // TODO(clemensb): Parallelize this.
- const bool is_lazy_module = IsLazyModule(module);
- for (const WasmFunction& function : module->declared_functions()) {
- if (module->function_was_validated(function.func_index)) continue;
- base::Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(&function);
-
- CompileStrategy strategy = GetCompileStrategy(
- module, enabled_features, function.func_index, is_lazy_module);
- if (strategy != CompileStrategy::kLazy &&
- strategy != CompileStrategy::kLazyBaselineEagerTopTier) {
- continue;
- }
- DecodeResult function_result = ValidateSingleFunction(
- module, function.func_index, code, allocator, enabled_features);
- if (function_result.failed()) {
- WasmError error = std::move(function_result).error();
- return GetWasmErrorWithName(wire_bytes, &function, module,
- std::move(error));
- }
- module->set_function_validated(function.func_index);
- }
- return {};
-}
-
//==========================================================================
// Step 1: (async) Decode the module.
//==========================================================================
@@ -2600,24 +2568,23 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.DecodeModule");
auto enabled_features = job->enabled_features_;
- result = DecodeWasmModule(
- enabled_features, job->wire_bytes_.start(), job->wire_bytes_.end(),
- false, kWasmOrigin, counters_, metrics_recorder_, job->context_id(),
- DecodingMethod::kAsync, GetWasmEngine()->allocator());
+ result =
+ DecodeWasmModule(enabled_features, job->wire_bytes_.module_bytes(),
+ false, kWasmOrigin, counters_, metrics_recorder_,
+ job->context_id(), DecodingMethod::kAsync);
// Validate lazy functions here if requested.
- if (result.ok()) {
+ if (result.ok() && !v8_flags.wasm_lazy_validation) {
const WasmModule* module = result.value().get();
- WasmError validation_error = ValidateLazilyCompiledFunctions(
- module, job->wire_bytes_, job->enabled_features_);
- if (validation_error.has_error()) {
+ if (WasmError validation_error =
+ ValidateFunctions(module, job->wire_bytes_.module_bytes(),
+ job->enabled_features_, kOnlyLazyFunctions))
result = ModuleResult{std::move(validation_error)};
- }
}
}
if (result.failed()) {
// Decoding failure; reject the promise and clean up.
- job->DoSync<DecodeFail>(std::move(result).error());
+ job->DoSync<Fail>();
} else {
// Decode passed.
std::shared_ptr<WasmModule> module = std::move(result).value();
@@ -2636,24 +2603,7 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
};
//==========================================================================
-// Step 1b: (sync) Fail decoding the module.
-//==========================================================================
-class AsyncCompileJob::DecodeFail : public CompileStep {
- public:
- explicit DecodeFail(WasmError error) : error_(std::move(error)) {}
-
- private:
- WasmError error_;
-
- void RunInForeground(AsyncCompileJob* job) override {
- TRACE_COMPILE("(1b) Decoding failed.\n");
- // {job_} is deleted in DecodeFailed, therefore the {return}.
- return job->DecodeFailed(error_);
- }
-};
-
-//==========================================================================
-// Step 2 (sync): Create heap-allocated data and start compile.
+// Step 2 (sync): Create heap-allocated data and start compilation.
//==========================================================================
class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
public:
@@ -2685,13 +2635,10 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// Note that we only need to validate lazily compiled functions, others
// will be validated during eager compilation.
DCHECK(start_compilation_);
- if (ValidateLazilyCompiledFunctions(
- module_.get(), ModuleWireBytes{job->native_module_->wire_bytes()},
- job->native_module_->enabled_features())
+ if (!v8_flags.wasm_lazy_validation &&
+ ValidateFunctions(*job->native_module_, kOnlyLazyFunctions)
.has_error()) {
- // TODO(clemensb): Use the error message instead of re-validation in
- // {AsyncCompileFailed}.
- job->AsyncCompileFailed();
+ job->Failed();
return;
}
}
@@ -2735,37 +2682,18 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
};
//==========================================================================
-// Step 3a (sync): Compilation failed.
-//==========================================================================
-class AsyncCompileJob::CompileFailed : public CompileStep {
- private:
- void RunInForeground(AsyncCompileJob* job) override {
- TRACE_COMPILE("(3a) Compilation failed\n");
- DCHECK(job->native_module_->compilation_state()->failed());
-
- // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job->AsyncCompileFailed();
- }
-};
-
-//==========================================================================
-// Step 3b (sync): Compilation finished.
+// Step 3 (sync): Compilation finished.
//==========================================================================
-class AsyncCompileJob::CompileFinished : public CompileStep {
+class AsyncCompileJob::FinishCompilation : public CompileStep {
public:
- explicit CompileFinished(std::shared_ptr<NativeModule> cached_native_module)
+ explicit FinishCompilation(std::shared_ptr<NativeModule> cached_native_module)
: cached_native_module_(std::move(cached_native_module)) {}
private:
void RunInForeground(AsyncCompileJob* job) override {
- TRACE_COMPILE("(3b) Compilation finished\n");
+ TRACE_COMPILE("(3) Compilation finished\n");
if (cached_native_module_) {
job->native_module_ = cached_native_module_;
- } else {
- DCHECK(!job->native_module_->compilation_state()->failed());
- // Sample the generated code size when baseline compilation finished.
- job->native_module_->SampleCodeSize(job->isolate_->counters(),
- NativeModule::kAfterBaseline);
}
// Then finalize and publish the generated module.
job->FinishCompile(cached_native_module_ != nullptr);
@@ -2774,80 +2702,44 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
std::shared_ptr<NativeModule> cached_native_module_;
};
-void AsyncCompileJob::FinishModule() {
+//==========================================================================
+// Step 4 (sync): Decoding or compilation failed.
+//==========================================================================
+class AsyncCompileJob::Fail : public CompileStep {
+ private:
+ void RunInForeground(AsyncCompileJob* job) override {
+ TRACE_COMPILE("(4) Async compilation failed.\n");
+ // {job_} is deleted in {Failed}, therefore the {return}.
+ return job->Failed();
+ }
+};
+
+void AsyncCompileJob::FinishSuccessfully() {
TRACE_COMPILE("(4) Finish module...\n");
- AsyncCompileSucceeded(module_object_);
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.OnCompilationSucceeded");
+ // We have to make sure that an "incumbent context" is available in case
+ // the module's start function calls out to Blink.
+ Local<v8::Context> backup_incumbent_context =
+ Utils::ToLocal(incumbent_context_);
+ v8::Context::BackupIncumbentScope incumbent(backup_incumbent_context);
+ resolver_->OnCompilationSucceeded(module_object_);
+ }
GetWasmEngine()->RemoveCompileJob(this);
}
-AsyncStreamingProcessor::AsyncStreamingProcessor(
- AsyncCompileJob* job, std::shared_ptr<Counters> async_counters,
- AccountingAllocator* allocator)
+AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
: decoder_(job->enabled_features_),
job_(job),
- compilation_unit_builder_(nullptr),
- async_counters_(async_counters),
- allocator_(allocator) {}
-
-AsyncStreamingProcessor::~AsyncStreamingProcessor() {
- if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
- // Clean up the temporary cache entry.
- GetWasmEngine()->StreamingCompilationFailed(prefix_hash_);
- }
-}
-
-void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
- const WasmError& error, ErrorLocation error_location) {
- DCHECK(error.has_error());
- // Make sure all background tasks stopped executing before we change the state
- // of the AsyncCompileJob to DecodeFail.
- job_->background_task_manager_.CancelAndWait();
-
- // Record event metrics.
- auto duration = base::TimeTicks::Now() - job_->start_time_;
- job_->metrics_event_.success = false;
- job_->metrics_event_.streamed = true;
- job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
- job_->metrics_event_.function_count = num_functions_;
- job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
- job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
- job_->context_id_);
-
- // Check if there is already a CompiledModule, in which case we have to clean
- // up the CompilationStateImpl as well.
- if (job_->native_module_) {
- CompilationStateImpl* impl =
- Impl(job_->native_module_->compilation_state());
-
- if (error_location == kErrorInFunction) {
- impl->SetError();
- }
- impl->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
- if (error_location == kErrorInSection) {
- job_->DoSync<AsyncCompileJob::DecodeFail,
- AsyncCompileJob::kUseExistingForegroundTask>(error);
- }
- // Clear the {compilation_unit_builder_} if it exists. This is needed
- // because there is a check in the destructor of the
- // {CompilationUnitBuilder} that it is empty.
- if (compilation_unit_builder_) compilation_unit_builder_->Clear();
- } else {
- job_->DoSync<AsyncCompileJob::DecodeFail>(error);
- }
-}
+ compilation_unit_builder_(nullptr) {}
// Process the module header.
bool AsyncStreamingProcessor::ProcessModuleHeader(
base::Vector<const uint8_t> bytes, uint32_t offset) {
TRACE_STREAMING("Process module header...\n");
- decoder_.StartDecoding(job_->isolate()->counters(),
- job_->isolate()->metrics_recorder(),
- job_->context_id(), GetWasmEngine()->allocator());
decoder_.DecodeModuleHeader(bytes, offset);
- if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding().error());
- return false;
- }
+ if (!decoder_.ok()) return false;
prefix_hash_ = GetWireBytesHash(bytes);
return true;
}
@@ -2870,10 +2762,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
if (section_code == SectionCode::kUnknownSectionCode) {
size_t bytes_consumed = ModuleDecoder::IdentifyUnknownSection(
&decoder_, bytes, offset, &section_code);
- if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding().error());
- return false;
- }
+ if (!decoder_.ok()) return false;
if (section_code == SectionCode::kUnknownSectionCode) {
// Skip unknown sections that we do not know how to handle.
return true;
@@ -2883,11 +2772,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
bytes = bytes.SubVector(bytes_consumed, bytes.size());
}
decoder_.DecodeSection(section_code, bytes, offset);
- if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding().error());
- return false;
- }
- return true;
+ return decoder_.ok();
}
// Start the code section.
@@ -2903,7 +2788,6 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
static_cast<uint32_t>(code_section_length));
if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(num_functions),
functions_mismatch_error_offset)) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding().error());
return false;
}
@@ -2944,7 +2828,7 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
}
// Process a function body.
-void AsyncStreamingProcessor::ProcessFunctionBody(
+bool AsyncStreamingProcessor::ProcessFunctionBody(
base::Vector<const uint8_t> bytes, uint32_t offset) {
TRACE_STREAMING("Process function body %d ...\n", num_functions_);
uint32_t func_index =
@@ -2957,15 +2841,7 @@ void AsyncStreamingProcessor::ProcessFunctionBody(
if (prefix_cache_hit_) {
// Don't compile yet if we might have a cache hit.
- return;
- }
-
- // Bail out after the {prefix_cache_hit_}, because if {prefix_cache_hit_} is
- // true, the native module does not exist.
- if (job_->native_module_->compilation_state()->failed()) {
- // There has already been an error, there is no need to do any more
- // validation or compiling.
- return;
+ return true;
}
const WasmModule* module = decoder_.module();
@@ -2979,20 +2855,24 @@ void AsyncStreamingProcessor::ProcessFunctionBody(
(strategy == CompileStrategy::kLazy ||
strategy == CompileStrategy::kLazyBaselineEagerTopTier);
if (validate_lazily_compiled_function) {
- // The native module does not own the wire bytes until {SetWireBytes} is
- // called in {OnFinishedStream}. Validation must use {bytes} parameter.
- DecodeResult result = ValidateSingleFunction(module, func_index, bytes,
- allocator_, enabled_features);
-
- if (result.failed()) {
- FinishAsyncCompileJobWithError(result.error(), kErrorInFunction);
- return;
+ // {bytes} is part of a section buffer owned by the streaming decoder. The
+ // streaming decoder is held alive by the {AsyncCompileJob}, so we can just
+ // use the {bytes} vector as long as the {AsyncCompileJob} is still running.
+ if (!validate_functions_job_handle_) {
+ validate_functions_job_data_.Initialize(module->num_declared_functions);
+ validate_functions_job_handle_ = V8::GetCurrentPlatform()->CreateJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<ValidateFunctionsStreamingJob>(
+ module, enabled_features, &validate_functions_job_data_));
}
+ validate_functions_job_data_.AddUnit(func_index, bytes,
+ validate_functions_job_handle_.get());
}
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->AddCompilationUnit(compilation_unit_builder_.get(),
func_index);
+ return true;
}
void AsyncStreamingProcessor::CommitCompilationUnits() {
@@ -3007,21 +2887,27 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
// Finish the processing of the stream.
void AsyncStreamingProcessor::OnFinishedStream(
- base::OwnedVector<uint8_t> bytes) {
+ base::OwnedVector<const uint8_t> bytes, bool after_error) {
TRACE_STREAMING("Finish stream...\n");
- DCHECK_EQ(NativeModuleCache::PrefixHash(bytes.as_vector()), prefix_hash_);
- ModuleResult result = decoder_.FinishDecoding();
- if (result.failed()) {
- FinishAsyncCompileJobWithError(result.error());
- return;
+ ModuleResult module_result = decoder_.FinishDecoding();
+ if (module_result.failed()) after_error = true;
+
+ if (validate_functions_job_handle_) {
+ // Wait for background validation to finish, then check if a validation
+ // error was found.
+ // TODO(13447): Do not block here; register validation as another finisher
+ // instead.
+ validate_functions_job_handle_->Join();
+ validate_functions_job_handle_.reset();
+ if (validate_functions_job_data_.found_error) after_error = true;
}
job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
- job_->bytes_copy_ = bytes.ReleaseData();
+ job_->bytes_copy_ = std::move(bytes);
// Record event metrics.
auto duration = base::TimeTicks::Now() - job_->start_time_;
- job_->metrics_event_.success = true;
+ job_->metrics_event_.success = !after_error;
job_->metrics_event_.streamed = true;
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
@@ -3029,15 +2915,39 @@ void AsyncStreamingProcessor::OnFinishedStream(
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
+ if (after_error) {
+ if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
+ // Clean up the temporary cache entry.
+ GetWasmEngine()->StreamingCompilationFailed(prefix_hash_);
+ }
+ // Calling {Failed} will invalidate the {AsyncCompileJob} and delete {this}.
+ job_->Failed();
+ return;
+ }
+
+ std::shared_ptr<WasmModule> module = std::move(module_result).value();
+
+ // At this point we identified the module as valid (except maybe for function
+ // bodies, if lazy validation is enabled).
+ // This DCHECK could be considered slow, but it only happens once per async
+ // module compilation, and we only re-decode the module structure, without
+ // validation function bodies. Overall this does not add a lot of overhead.
+ DCHECK(DecodeWasmModule(job_->enabled_features_,
+ job_->bytes_copy_.as_vector(),
+ /* validate functions */ false, kWasmOrigin)
+ .ok());
+
+ DCHECK_EQ(NativeModuleCache::PrefixHash(job_->wire_bytes_.module_bytes()),
+ prefix_hash_);
if (prefix_cache_hit_) {
// Restart as an asynchronous, non-streaming compilation. Most likely
// {PrepareAndStartCompile} will get the native module from the cache.
const bool include_liftoff = v8_flags.liftoff;
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
- result.value().get(), include_liftoff, job_->dynamic_tiering_);
+ module.get(), include_liftoff, job_->dynamic_tiering_);
job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(
- std::move(result).value(), true, code_size_estimate);
+ std::move(module), true, code_size_estimate);
return;
}
@@ -3047,10 +2957,15 @@ void AsyncStreamingProcessor::OnFinishedStream(
HandleScope scope(job_->isolate_);
SaveAndSwitchContext saved_context(job_->isolate_, *job_->native_context_);
- // Record the size of the wire bytes. In synchronous and asynchronous
- // (non-streaming) compilation, this happens in {DecodeWasmModule}.
- auto* histogram = job_->isolate_->counters()->wasm_wasm_module_size_bytes();
- histogram->AddSample(job_->wire_bytes_.module_bytes().length());
+ // Record the size of the wire bytes and the number of functions. In
+ // synchronous and asynchronous (non-streaming) compilation, this happens in
+ // {DecodeWasmModule}.
+ auto* module_size_histogram =
+ job_->isolate_->counters()->wasm_wasm_module_size_bytes();
+ module_size_histogram->AddSample(job_->wire_bytes_.module_bytes().length());
+ auto* num_functions_histogram =
+ job_->isolate_->counters()->wasm_functions_per_wasm_module();
+ num_functions_histogram->AddSample(static_cast<int>(num_functions_));
const bool has_code_section = job_->native_module_ != nullptr;
bool cache_hit = false;
@@ -3059,11 +2974,10 @@ void AsyncStreamingProcessor::OnFinishedStream(
// native module now (would otherwise happen in {PrepareAndStartCompile} or
// {ProcessCodeSectionHeader}).
constexpr size_t kCodeSizeEstimate = 0;
- cache_hit = job_->GetOrCreateNativeModule(std::move(result).value(),
- kCodeSizeEstimate);
+ cache_hit =
+ job_->GetOrCreateNativeModule(std::move(module), kCodeSizeEstimate);
} else {
- job_->native_module_->SetWireBytes(
- {std::move(job_->bytes_copy_), job_->wire_bytes_.length()});
+ job_->native_module_->SetWireBytes(std::move(job_->bytes_copy_));
}
const bool needs_finish =
job_->DecrementAndCheckFinisherCount(AsyncCompileJob::kStreamingDecoder);
@@ -3071,25 +2985,32 @@ void AsyncStreamingProcessor::OnFinishedStream(
if (needs_finish) {
const bool failed = job_->native_module_->compilation_state()->failed();
if (!cache_hit) {
- cache_hit = !GetWasmEngine()->UpdateNativeModuleCache(
- failed, &job_->native_module_, job_->isolate_);
+ auto* prev_native_module = job_->native_module_.get();
+ job_->native_module_ = GetWasmEngine()->UpdateNativeModuleCache(
+ failed, std::move(job_->native_module_), job_->isolate_);
+ cache_hit = prev_native_module != job_->native_module_.get();
}
+ // We finally call {Failed} or {FinishCompile}, which will invalidate the
+ // {AsyncCompileJob} and delete {this}.
if (failed) {
- job_->AsyncCompileFailed();
+ job_->Failed();
} else {
job_->FinishCompile(cache_hit);
}
}
}
-// Report an error detected in the StreamingDecoder.
-void AsyncStreamingProcessor::OnError(const WasmError& error) {
- TRACE_STREAMING("Stream error...\n");
- FinishAsyncCompileJobWithError(error);
-}
-
void AsyncStreamingProcessor::OnAbort() {
TRACE_STREAMING("Abort stream...\n");
+ if (validate_functions_job_handle_) {
+ validate_functions_job_handle_->Cancel();
+ validate_functions_job_handle_.reset();
+ }
+ if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
+ // Clean up the temporary cache entry.
+ GetWasmEngine()->StreamingCompilationFailed(prefix_hash_);
+ }
+ // {Abort} invalidates the {AsyncCompileJob}, which in turn deletes {this}.
job_->Abort();
}
@@ -3117,6 +3038,7 @@ bool AsyncStreamingProcessor::Deserialize(
job_->isolate_->global_handles()->Create(*result.ToHandleChecked());
job_->native_module_ = job_->module_object_->shared_native_module();
job_->wire_bytes_ = ModuleWireBytes(job_->native_module_->wire_bytes());
+ // Calling {FinishCompile} deletes the {AsyncCompileJob} and {this}.
job_->FinishCompile(false);
return true;
}
@@ -3131,12 +3053,22 @@ CompilationStateImpl::CompilationStateImpl(
dynamic_tiering_(dynamic_tiering) {}
void CompilationStateImpl::InitCompileJob() {
- DCHECK_NULL(compile_job_);
+ DCHECK_NULL(js_to_wasm_wrapper_job_);
+ DCHECK_NULL(baseline_compile_job_);
+ DCHECK_NULL(top_tier_compile_job_);
// Create the job, but don't spawn workers yet. This will happen on
// {NotifyConcurrencyIncrease}.
- compile_job_ = V8::GetCurrentPlatform()->CreateJob(
- TaskPriority::kUserVisible, std::make_unique<BackgroundCompileJob>(
- native_module_weak_, async_counters_));
+ js_to_wasm_wrapper_job_ = V8::GetCurrentPlatform()->CreateJob(
+ TaskPriority::kUserBlocking,
+ std::make_unique<AsyncCompileJSToWasmWrapperJob>(native_module_weak_));
+ baseline_compile_job_ = V8::GetCurrentPlatform()->CreateJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<BackgroundCompileJob>(
+ native_module_weak_, async_counters_, CompilationTier::kBaseline));
+ top_tier_compile_job_ = V8::GetCurrentPlatform()->CreateJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<BackgroundCompileJob>(
+ native_module_weak_, async_counters_, CompilationTier::kTopTier));
}
void CompilationStateImpl::CancelCompilation(
@@ -3256,7 +3188,8 @@ void CompilationStateImpl::InitializeCompilationProgress(
// Compute the default compilation progress for all functions, and set it.
const ExecutionTierPair default_tiers = GetDefaultTiersPerModule(
- native_module_, dynamic_tiering_, IsLazyModule(module));
+ native_module_, dynamic_tiering_, native_module_->IsInDebugState(),
+ IsLazyModule(module));
const uint8_t default_progress =
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
RequiredTopTierField::encode(default_tiers.top_tier) |
@@ -3289,7 +3222,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
TriggerCallbacks();
}
-uint8_t CompilationStateImpl::AddCompilationUnitInternal(
+void CompilationStateImpl::AddCompilationUnitInternal(
CompilationUnitBuilder* builder, int function_index,
uint8_t function_progress) {
ExecutionTier required_baseline_tier =
@@ -3300,26 +3233,6 @@ uint8_t CompilationStateImpl::AddCompilationUnitInternal(
ExecutionTier reached_tier =
CompilationStateImpl::ReachedTierField::decode(function_progress);
- if (v8_flags.experimental_wasm_gc && !v8_flags.wasm_lazy_compilation) {
- // The Turbofan optimizations we enable for WasmGC code can (for now)
- // take a very long time, so skip Turbofan compilation for super-large
- // functions.
- // Besides, module serialization currently requires that all functions
- // have been TF-compiled. By enabling this limit only for WasmGC, we
- // make sure that non-experimental modules can be serialize as usual.
- // TODO(jkummerow): This is a stop-gap solution to avoid excessive
- // compile times. We would like to replace this hard threshold with
- // a better solution (TBD) eventually.
- constexpr uint32_t kMaxWasmFunctionSizeForTurbofan = 500 * KB;
- uint32_t size = builder->module()->functions[function_index].code.length();
- if (size > kMaxWasmFunctionSizeForTurbofan) {
- required_baseline_tier = ExecutionTier::kLiftoff;
- if (required_top_tier == ExecutionTier::kTurbofan) {
- required_top_tier = ExecutionTier::kLiftoff;
- }
- }
- }
-
if (reached_tier < required_baseline_tier) {
builder->AddBaselineUnit(function_index, required_baseline_tier);
}
@@ -3327,28 +3240,18 @@ uint8_t CompilationStateImpl::AddCompilationUnitInternal(
required_baseline_tier != required_top_tier) {
builder->AddTopTierUnit(function_index, required_top_tier);
}
- return CompilationStateImpl::RequiredBaselineTierField::encode(
- required_baseline_tier) |
- CompilationStateImpl::RequiredTopTierField::encode(required_top_tier) |
- CompilationStateImpl::ReachedTierField::encode(reached_tier);
}
void CompilationStateImpl::InitializeCompilationUnits(
std::unique_ptr<CompilationUnitBuilder> builder) {
int offset = native_module_->module()->num_imported_functions;
- if (native_module_->IsTieredDown()) {
- for (size_t i = 0; i < compilation_progress_.size(); ++i) {
- int func_index = offset + static_cast<int>(i);
- builder->AddDebugUnit(func_index);
- }
- } else {
+ {
base::MutexGuard guard(&callbacks_mutex_);
- for (size_t i = 0; i < compilation_progress_.size(); ++i) {
+ for (size_t i = 0, e = compilation_progress_.size(); i < e; ++i) {
uint8_t function_progress = compilation_progress_[i];
int func_index = offset + static_cast<int>(i);
- compilation_progress_[i] = AddCompilationUnitInternal(
- builder.get(), func_index, function_progress);
+ AddCompilationUnitInternal(builder.get(), func_index, function_progress);
}
}
builder->Commit();
@@ -3356,10 +3259,6 @@ void CompilationStateImpl::InitializeCompilationUnits(
void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
int func_index) {
- if (native_module_->IsTieredDown()) {
- builder->AddDebugUnit(func_index);
- return;
- }
int offset = native_module_->module()->num_imported_functions;
int progress_index = func_index - offset;
uint8_t function_progress;
@@ -3373,14 +3272,7 @@ void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
base::MutexGuard guard(&callbacks_mutex_);
function_progress = compilation_progress_[progress_index];
}
- uint8_t updated_function_progress =
- AddCompilationUnitInternal(builder, func_index, function_progress);
- if (updated_function_progress != function_progress) {
- // This should happen very rarely (only for super-large functions), so we're
- // not worried about overhead.
- base::MutexGuard guard(&callbacks_mutex_);
- compilation_progress_[progress_index] = updated_function_progress;
- }
+ AddCompilationUnitInternal(builder, func_index, function_progress);
}
void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
@@ -3419,8 +3311,6 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
RequiredTopTierField::encode(ExecutionTier::kNone) |
ReachedTierField::encode(ExecutionTier::kNone);
for (auto func_index : lazy_functions) {
- native_module_->UseLazyStub(func_index);
-
compilation_progress_[declared_function_index(module, func_index)] =
kProgressForLazyFunctions;
}
@@ -3428,7 +3318,8 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
// Update compilation state for eagerly compiled functions.
constexpr bool kNotLazy = false;
ExecutionTierPair default_tiers =
- GetDefaultTiersPerModule(native_module_, dynamic_tiering_, kNotLazy);
+ GetDefaultTiersPerModule(native_module_, dynamic_tiering_,
+ native_module_->IsInDebugState(), kNotLazy);
uint8_t progress_for_eager_functions =
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
RequiredTopTierField::encode(default_tiers.top_tier) |
@@ -3448,7 +3339,7 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
// that as finished already. Baseline compilation is done if we do not have
// any Liftoff functions to compile.
finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
- if (eager_functions.empty()) {
+ if (eager_functions.empty() || v8_flags.wasm_lazy_compilation) {
finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
}
}
@@ -3457,87 +3348,6 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
WaitForCompilationEvent(CompilationEvent::kFinishedBaselineCompilation);
}
-void CompilationStateImpl::InitializeRecompilation(
- TieringState new_tiering_state,
- std::unique_ptr<CompilationEventCallback> recompilation_finished_callback) {
- DCHECK(!failed());
-
- // Hold the mutex as long as possible, to synchronize between multiple
- // recompilations that are triggered at the same time (e.g. when the profiler
- // is disabled).
- base::Optional<base::MutexGuard> guard(&callbacks_mutex_);
-
- // As long as there are outstanding recompilation functions, take part in
- // compilation. This is to avoid recompiling for the same tier or for
- // different tiers concurrently. Note that the compilation unit queues can run
- // empty before {outstanding_recompilation_functions_} drops to zero. In this
- // case, we do not wait for the last running compilation threads to finish
- // their units, but just start our own recompilation already.
- while (outstanding_recompilation_functions_ > 0 &&
- compilation_unit_queues_.GetTotalSize() > 0) {
- guard.reset();
- constexpr JobDelegate* kNoDelegate = nullptr;
- ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
- kNoDelegate, kBaselineOrTopTier);
- guard.emplace(&callbacks_mutex_);
- }
-
- // Information about compilation progress is shared between this class and the
- // NativeModule. Before updating information here, consult the NativeModule to
- // find all functions that need recompilation.
- // Since the current tiering state is updated on the NativeModule before
- // triggering recompilation, it's OK if the information is slightly outdated.
- // If we compile functions twice, the NativeModule will ignore all redundant
- // code (or code compiled for the wrong tier).
- std::vector<int> recompile_function_indexes =
- native_module_->FindFunctionsToRecompile(new_tiering_state);
-
- callbacks_.emplace_back(std::move(recompilation_finished_callback));
- tiering_state_ = new_tiering_state;
-
- // If compilation progress is not initialized yet, then compilation didn't
- // start yet, and new code will be kept tiered-down from the start. For
- // streaming compilation, there is a special path to tier down later, when
- // the module is complete. In any case, we don't need to recompile here.
- base::Optional<CompilationUnitBuilder> builder;
- if (compilation_progress_.size() > 0) {
- builder.emplace(native_module_);
- const WasmModule* module = native_module_->module();
- DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
- DCHECK_GE(module->num_declared_functions,
- recompile_function_indexes.size());
- outstanding_recompilation_functions_ =
- static_cast<int>(recompile_function_indexes.size());
- // Restart recompilation if another recompilation is already happening.
- for (auto& progress : compilation_progress_) {
- progress = MissingRecompilationField::update(progress, false);
- }
- auto new_tier = new_tiering_state == kTieredDown ? ExecutionTier::kLiftoff
- : ExecutionTier::kTurbofan;
- int imported = module->num_imported_functions;
- // Generate necessary compilation units on the fly.
- for (int function_index : recompile_function_indexes) {
- DCHECK_LE(imported, function_index);
- int slot_index = function_index - imported;
- auto& progress = compilation_progress_[slot_index];
- progress = MissingRecompilationField::update(progress, true);
- builder->AddRecompilationUnit(function_index, new_tier);
- }
- }
-
- // Trigger callback if module needs no recompilation.
- if (outstanding_recompilation_functions_ == 0) {
- TriggerCallbacks(base::EnumSet<CompilationEvent>(
- {CompilationEvent::kFinishedRecompilation}));
- }
-
- if (builder.has_value()) {
- // Avoid holding lock while scheduling a compile job.
- guard.reset();
- builder->Commit();
- }
-}
-
void CompilationStateImpl::AddCallback(
std::unique_ptr<CompilationEventCallback> callback) {
base::MutexGuard callbacks_guard(&callbacks_mutex_);
@@ -3571,13 +3381,22 @@ void CompilationStateImpl::CommitCompilationUnits(
// are available to other threads doing an acquire load.
outstanding_js_to_wasm_wrappers_.store(js_to_wasm_wrapper_units.size(),
std::memory_order_release);
+ DCHECK(js_to_wasm_wrapper_job_->IsValid());
+ js_to_wasm_wrapper_job_->NotifyConcurrencyIncrease();
}
if (!baseline_units.empty() || !top_tier_units.empty()) {
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
}
ResetPKUPermissionsForThreadSpawning pku_reset_scope;
- compile_job_->NotifyConcurrencyIncrease();
+ if (!baseline_units.empty()) {
+ DCHECK(baseline_compile_job_->IsValid());
+ baseline_compile_job_->NotifyConcurrencyIncrease();
+ }
+ if (!top_tier_units.empty()) {
+ DCHECK(top_tier_compile_job_->IsValid());
+ top_tier_compile_job_->NotifyConcurrencyIncrease();
+ }
}
void CompilationStateImpl::CommitTopTierCompilationUnit(
@@ -3592,7 +3411,7 @@ void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
// {NotifyConcurrencyIncrease} can spawn new threads which could inherit PKU
// permissions (which would be a security issue).
DCHECK(!CodeSpaceWriteScope::IsInScope());
- compile_job_->NotifyConcurrencyIncrease();
+ top_tier_compile_job_->NotifyConcurrencyIncrease();
}
std::shared_ptr<JSToWasmWrapperCompilationUnit>
@@ -3626,12 +3445,18 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(Isolate* isolate,
CodePageCollectionMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
DCHECK_EQ(isolate, unit->isolate());
- Handle<CodeT> code = unit->Finalize();
+ // Note: The code is either the compiled signature-specific wrapper or the
+ // generic wrapper built-in.
+ Handle<Code> code = unit->Finalize();
uint32_t index =
GetExportWrapperIndex(unit->canonical_sig_index(), unit->is_import());
isolate->heap()->js_to_wasm_wrappers().Set(index,
MaybeObject::FromObject(*code));
- RecordStats(*code, isolate->counters());
+ if (!code->is_builtin()) {
+ // Do not increase code stats for non-jitted wrappers.
+ RecordStats(*code, isolate->counters());
+ isolate->counters()->wasm_compiled_export_wrapper()->Increment(1);
+ }
}
}
@@ -3642,8 +3467,8 @@ CompilationUnitQueues::Queue* CompilationStateImpl::GetQueueForCompileTask(
base::Optional<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit(
- CompilationUnitQueues::Queue* queue, CompileBaselineOnly baseline_only) {
- return compilation_unit_queues_.GetNextUnit(queue, baseline_only);
+ CompilationUnitQueues::Queue* queue, CompilationTier tier) {
+ return compilation_unit_queues_.GetNextUnit(queue, tier);
}
void CompilationStateImpl::OnFinishedUnits(
@@ -3662,8 +3487,6 @@ void CompilationStateImpl::OnFinishedUnits(
DCHECK_EQ(compilation_progress_.size(),
native_module_->module()->num_declared_functions);
- base::EnumSet<CompilationEvent> triggered_events;
-
for (size_t i = 0; i < code_vector.size(); i++) {
WasmCode* code = code_vector[i];
DCHECK_NOT_NULL(code);
@@ -3699,25 +3522,6 @@ void CompilationStateImpl::OnFinishedUnits(
bytes_since_last_chunk_ += code->instructions().size();
}
- if (V8_UNLIKELY(MissingRecompilationField::decode(function_progress))) {
- DCHECK_LT(0, outstanding_recompilation_functions_);
- // If tiering up, accept any TurboFan code. For tiering down, look at
- // the {for_debugging} flag. The tier can be Liftoff or TurboFan and is
- // irrelevant here. In particular, we want to ignore any outstanding
- // non-debugging units.
- bool matches = tiering_state_ == kTieredDown
- ? code->for_debugging()
- : code->tier() == ExecutionTier::kTurbofan;
- if (matches) {
- outstanding_recompilation_functions_--;
- compilation_progress_[slot_index] = MissingRecompilationField::update(
- compilation_progress_[slot_index], false);
- if (outstanding_recompilation_functions_ == 0) {
- triggered_events.Add(CompilationEvent::kFinishedRecompilation);
- }
- }
- }
-
// Update function's compilation progress.
if (code->tier() > reached_tier) {
compilation_progress_[slot_index] = ReachedTierField::update(
@@ -3727,7 +3531,7 @@ void CompilationStateImpl::OnFinishedUnits(
}
}
- TriggerCallbacks(triggered_events);
+ TriggerCallbacks();
}
void CompilationStateImpl::OnFinishedJSToWasmWrapperUnits(int num) {
@@ -3738,10 +3542,10 @@ void CompilationStateImpl::OnFinishedJSToWasmWrapperUnits(int num) {
TriggerCallbacks();
}
-void CompilationStateImpl::TriggerCallbacks(
- base::EnumSet<CompilationEvent> triggered_events) {
+void CompilationStateImpl::TriggerCallbacks() {
DCHECK(!callbacks_mutex_.TryLock());
+ base::EnumSet<CompilationEvent> triggered_events;
if (outstanding_export_wrappers_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedExportWrappers);
if (outstanding_baseline_units_ == 0) {
@@ -3767,11 +3571,9 @@ void CompilationStateImpl::TriggerCallbacks(
// Don't trigger past events again.
triggered_events -= finished_events_;
- // Recompilation can happen multiple times, thus do not store this. There can
- // also be multiple compilation chunks.
- finished_events_ |= triggered_events -
- CompilationEvent::kFinishedRecompilation -
- CompilationEvent::kFinishedCompilationChunk;
+ // There can be multiple compilation chunks, thus do not store this.
+ finished_events_ |=
+ triggered_events - CompilationEvent::kFinishedCompilationChunk;
for (auto event :
{std::make_pair(CompilationEvent::kFailedCompilation,
@@ -3781,9 +3583,7 @@ void CompilationStateImpl::TriggerCallbacks(
std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
"wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedCompilationChunk,
- "wasm.CompilationChunkFinished"),
- std::make_pair(CompilationEvent::kFinishedRecompilation,
- "wasm.RecompilationFinished")}) {
+ "wasm.CompilationChunkFinished")}) {
if (!triggered_events.contains(event.first)) continue;
DCHECK_NE(compilation_id_, kInvalidCompilationID);
TRACE_EVENT1("v8.wasm", event.second, "id", compilation_id_);
@@ -3792,8 +3592,7 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
- if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
- outstanding_recompilation_functions_ == 0) {
+ if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0) {
auto new_end = std::remove_if(
callbacks_.begin(), callbacks_.end(), [](const auto& callback) {
return callback->release_after_final_event();
@@ -3833,7 +3632,7 @@ void CompilationStateImpl::PublishCompilationResults(
native_module_->module()
->isorecursive_canonical_type_ids[function.sig_index];
WasmImportWrapperCache::CacheKey key(
- compiler::kDefaultImportCallKind, canonical_type_index,
+ kDefaultImportCallKind, canonical_type_index,
static_cast<int>(function.sig->parameter_count()), kNoSuspend);
// If two imported functions have the same key, only one of them should
// have been added as a compilation unit. So it is always the first time
@@ -3889,11 +3688,13 @@ void CompilationStateImpl::SchedulePublishCompilationResults(
}
}
-size_t CompilationStateImpl::NumOutstandingCompilations() const {
- size_t outstanding_wrappers =
- outstanding_js_to_wasm_wrappers_.load(std::memory_order_relaxed);
- size_t outstanding_functions = compilation_unit_queues_.GetTotalSize();
- return outstanding_wrappers + outstanding_functions;
+size_t CompilationStateImpl::NumOutstandingExportWrappers() const {
+ return outstanding_js_to_wasm_wrappers_.load(std::memory_order_relaxed);
+}
+
+size_t CompilationStateImpl::NumOutstandingCompilations(
+ CompilationTier tier) const {
+ return compilation_unit_queues_.GetSizeForTier(tier);
}
void CompilationStateImpl::SetError() {
@@ -3909,61 +3710,62 @@ void CompilationStateImpl::SetError() {
void CompilationStateImpl::WaitForCompilationEvent(
CompilationEvent expect_event) {
- class WaitForCompilationEventCallback : public CompilationEventCallback {
- public:
- WaitForCompilationEventCallback(std::shared_ptr<base::Semaphore> semaphore,
- std::shared_ptr<std::atomic<bool>> done,
- base::EnumSet<CompilationEvent> events)
- : semaphore_(std::move(semaphore)),
- done_(std::move(done)),
- events_(events) {}
-
- void call(CompilationEvent event) override {
- if (!events_.contains(event)) return;
- done_->store(true, std::memory_order_relaxed);
- semaphore_->Signal();
- }
-
- private:
- std::shared_ptr<base::Semaphore> semaphore_;
- std::shared_ptr<std::atomic<bool>> done_;
- base::EnumSet<CompilationEvent> events_;
- };
-
- auto semaphore = std::make_shared<base::Semaphore>(0);
- auto done = std::make_shared<std::atomic<bool>>(false);
+ switch (expect_event) {
+ case CompilationEvent::kFinishedExportWrappers:
+ break;
+ case CompilationEvent::kFinishedBaselineCompilation:
+ if (baseline_compile_job_->IsValid()) baseline_compile_job_->Join();
+ break;
+ default:
+ // Waiting on other CompilationEvent doesn't make sense.
+ UNREACHABLE();
+ }
+ if (js_to_wasm_wrapper_job_->IsValid()) js_to_wasm_wrapper_job_->Join();
+#ifdef DEBUG
base::EnumSet<CompilationEvent> events{expect_event,
CompilationEvent::kFailedCompilation};
- {
- base::MutexGuard callbacks_guard(&callbacks_mutex_);
- if (finished_events_.contains_any(events)) return;
- callbacks_.emplace_back(std::make_unique<WaitForCompilationEventCallback>(
- semaphore, done, events));
- }
-
- class WaitForEventDelegate final : public JobDelegate {
- public:
- explicit WaitForEventDelegate(std::shared_ptr<std::atomic<bool>> done)
- : done_(std::move(done)) {}
+ base::MutexGuard guard(&callbacks_mutex_);
+ DCHECK(finished_events_.contains_any(events));
+#endif
+}
- bool ShouldYield() override {
- return done_->load(std::memory_order_relaxed);
+void CompilationStateImpl::TierUpAllFunctions() {
+ const WasmModule* module = native_module_->module();
+ uint32_t num_wasm_functions = module->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ CompilationUnitBuilder builder(native_module_);
+ for (uint32_t i = 0; i < num_wasm_functions; ++i) {
+ int func_index = module->num_imported_functions + i;
+ WasmCode* code = native_module_->GetCode(func_index);
+ if (!code || !code->is_turbofan()) {
+ builder.AddTopTierUnit(func_index, ExecutionTier::kTurbofan);
}
+ }
+ builder.Commit();
+ // Join the compilation, until no compilation units are left anymore.
+ class DummyDelegate final : public JobDelegate {
+ bool ShouldYield() override { return false; }
bool IsJoiningThread() const override { return true; }
-
void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); }
-
uint8_t GetTaskId() override { return kMainTaskId; }
-
- private:
- std::shared_ptr<std::atomic<bool>> done_;
};
- WaitForEventDelegate delegate{done};
+ DummyDelegate delegate;
ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate,
- kBaselineOnly);
- semaphore->Wait();
+ CompilationTier::kTopTier);
+
+ // We cannot wait for other compilation threads to finish, so we explicitly
+ // compile all functions which are not yet available as TurboFan code.
+ for (uint32_t i = 0; i < num_wasm_functions; ++i) {
+ uint32_t func_index = module->num_imported_functions + i;
+ WasmCode* code = native_module_->GetCode(func_index);
+ if (!code || !code->is_turbofan()) {
+ wasm::GetWasmEngine()->CompileFunction(async_counters_.get(),
+ native_module_, func_index,
+ wasm::ExecutionTier::kTurbofan);
+ }
+ }
}
namespace {
@@ -4028,7 +3830,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module) {
module->isorecursive_canonical_type_ids[function.sig_index];
int wrapper_index =
GetExportWrapperIndex(canonical_type_index, function.imported);
- auto existing_wrapper =
+ MaybeObject existing_wrapper =
isolate->heap()->js_to_wasm_wrappers().Get(wrapper_index);
if (existing_wrapper.IsStrongOrWeak() &&
!existing_wrapper.GetHeapObject().IsUndefined()) {
@@ -4073,19 +3875,22 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module) {
JSToWasmWrapperKey key = pair.first;
JSToWasmWrapperCompilationUnit* unit = pair.second.get();
DCHECK_EQ(isolate, unit->isolate());
- Handle<CodeT> code = unit->Finalize();
+ Handle<Code> code = unit->Finalize();
int wrapper_index = GetExportWrapperIndex(key.second, key.first);
isolate->heap()->js_to_wasm_wrappers().Set(
wrapper_index, HeapObjectReference::Strong(*code));
- RecordStats(*code, isolate->counters());
+ if (!code->is_builtin()) {
+ // Do not increase code stats for non-jitted wrappers.
+ RecordStats(*code, isolate->counters());
+ isolate->counters()->wasm_compiled_export_wrapper()->Increment(1);
+ }
}
}
WasmCode* CompileImportWrapper(
- NativeModule* native_module, Counters* counters,
- compiler::WasmImportCallKind kind, const FunctionSig* sig,
- uint32_t canonical_type_index, int expected_arity, Suspend suspend,
- WasmImportWrapperCache::ModificationScope* cache_scope) {
+ NativeModule* native_module, Counters* counters, ImportCallKind kind,
+ const FunctionSig* sig, uint32_t canonical_type_index, int expected_arity,
+ Suspend suspend, WasmImportWrapperCache::ModificationScope* cache_scope) {
// Entry should exist, so that we don't insert a new one and invalidate
// other threads' iterators/references, but it should not have been compiled
// yet.
@@ -4106,7 +3911,7 @@ WasmCode* CompileImportWrapper(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
- ExecutionTier::kNone, kNoDebugging);
+ ExecutionTier::kNone, kNotForDebugging);
published_code = native_module->PublishCode(std::move(wasm_code));
}
(*cache_scope)[key] = published_code;
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 0e1c33b257..e984ad436c 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -56,14 +56,11 @@ struct WasmModule;
V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
- Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
- std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
+ Isolate* isolate, WasmFeatures enabled_features, ErrorThrower* thrower,
+ std::shared_ptr<const WasmModule> module, ModuleWireBytes wire_bytes,
int compilation_id, v8::metrics::Recorder::ContextId context_id,
ProfileInformation* pgo_info);
-void RecompileNativeModule(NativeModule* native_module,
- TieringState new_tiering_state);
-
V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module);
@@ -72,10 +69,9 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module);
// compiled yet.
V8_EXPORT_PRIVATE
WasmCode* CompileImportWrapper(
- NativeModule* native_module, Counters* counters,
- compiler::WasmImportCallKind kind, const FunctionSig* sig,
- uint32_t canonical_type_index, int expected_arity, Suspend suspend,
- WasmImportWrapperCache::ModificationScope* cache_scope);
+ NativeModule* native_module, Counters* counters, ImportCallKind kind,
+ const FunctionSig* sig, uint32_t canonical_type_index, int expected_arity,
+ Suspend suspend, WasmImportWrapperCache::ModificationScope* cache_scope);
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
// compilation was successful. Lazy compilation can fail only if validation is
@@ -138,8 +134,8 @@ class WrapperQueue {
// TODO(wasm): factor out common parts of this with the synchronous pipeline.
class AsyncCompileJob {
public:
- AsyncCompileJob(Isolate* isolate, const WasmFeatures& enabled_features,
- std::unique_ptr<byte[]> bytes_copy, size_t length,
+ AsyncCompileJob(Isolate* isolate, WasmFeatures enabled_features,
+ base::OwnedVector<const uint8_t> bytes,
Handle<Context> context, Handle<Context> incumbent_context,
const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver,
@@ -164,11 +160,25 @@ class AsyncCompileJob {
class CompilationStateCallback;
// States of the AsyncCompileJob.
- class DecodeModule; // Step 1 (async)
- class DecodeFail; // Step 1b (sync)
- class PrepareAndStartCompile; // Step 2 (sync)
- class CompileFailed; // Step 3a (sync)
- class CompileFinished; // Step 3b (sync)
+ // Step 1 (async). Decodes the wasm module.
+ // --> Fail on decoding failure,
+ // --> PrepareAndStartCompile on success.
+ class DecodeModule;
+
+ // Step 2 (sync). Prepares runtime objects and starts background compilation.
+ // --> finish directly on native module cache hit,
+ // --> finish directly on validation error,
+ // --> trigger eager compilation, if any; FinishCompile is triggered when
+ // done.
+ class PrepareAndStartCompile;
+
+ // Step 3 (sync). Compilation finished. Finalize the module and resolve the
+ // promise.
+ class FinishCompilation;
+
+ // Step 4 (sync). Decoding, validation or compilation failed. Reject the
+ // promise.
+ class Fail;
friend class AsyncStreamingProcessor;
@@ -217,12 +227,11 @@ class AsyncCompileJob {
void FinishCompile(bool is_after_cache_hit);
- void DecodeFailed(const WasmError&);
- void AsyncCompileFailed();
+ void Failed();
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
- void FinishModule();
+ void FinishSuccessfully();
void StartForegroundTask();
void ExecuteForegroundTaskImmediately();
@@ -263,7 +272,7 @@ class AsyncCompileJob {
base::TimeTicks start_time_;
// Copy of the module wire bytes, moved into the {native_module_} on its
// creation.
- std::unique_ptr<byte[]> bytes_copy_;
+ base::OwnedVector<const uint8_t> bytes_copy_;
// Reference to the wire bytes (held in {bytes_copy_} or as part of
// {native_module_}).
ModuleWireBytes wire_bytes_;
diff --git a/deps/v8/src/wasm/module-decoder-impl.h b/deps/v8/src/wasm/module-decoder-impl.h
index 9cecb23da3..9396fe11d6 100644
--- a/deps/v8/src/wasm/module-decoder-impl.h
+++ b/deps/v8/src/wasm/module-decoder-impl.h
@@ -17,53 +17,18 @@
#include "src/wasm/constant-expression-interface.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-subtyping.h"
+#include "src/wasm/well-known-imports.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
+namespace v8::internal::wasm {
#define TRACE(...) \
do { \
if (v8_flags.trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
-class NoTracer {
- public:
- // Hooks for extracting byte offsets of things.
- void TypeOffset(uint32_t offset) {}
- void ImportOffset(uint32_t offset) {}
- void ImportsDone() {}
- void TableOffset(uint32_t offset) {}
- void MemoryOffset(uint32_t offset) {}
- void TagOffset(uint32_t offset) {}
- void GlobalOffset(uint32_t offset) {}
- void StartOffset(uint32_t offset) {}
- void ElementOffset(uint32_t offset) {}
- void DataOffset(uint32_t offset) {}
-
- // Hooks for annotated hex dumps.
- void Bytes(const byte* start, uint32_t count) {}
-
- void Description(const char* desc) {}
- void Description(const char* desc, size_t length) {}
- void Description(uint32_t number) {}
- void Description(ValueType type) {}
- void Description(HeapType type) {}
- void Description(const FunctionSig* sig) {}
-
- void NextLine() {}
- void NextLineIfFull() {}
- void NextLineIfNonEmpty() {}
-
- void InitializerExpression(const byte* start, const byte* end,
- ValueType expected_type) {}
- void FunctionBody(const WasmFunction* func, const byte* start) {}
- void FunctionName(uint32_t func_index) {}
- void NameSection(const byte* start, const byte* end, uint32_t offset) {}
-};
-
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kInstTraceString[] = "metadata.code.trace_inst";
@@ -96,23 +61,26 @@ inline bool validate_utf8(Decoder* decoder, WireBytesRef string) {
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
-template <class Tracer>
inline WireBytesRef consume_string(Decoder* decoder,
unibrow::Utf8Variant grammar,
- const char* name, Tracer& tracer) {
- tracer.Description(name);
+ const char* name, ITracer* tracer) {
+ if (tracer) tracer->Description(name);
uint32_t length = decoder->consume_u32v(" length:", tracer);
- tracer.Description(length);
- tracer.NextLine();
+ if (tracer) {
+ tracer->Description(length);
+ tracer->NextLine();
+ }
uint32_t offset = decoder->pc_offset();
const byte* string_start = decoder->pc();
// Consume bytes before validation to guarantee that the string is not oob.
if (length > 0) {
- tracer.Bytes(decoder->pc(), length);
- tracer.Description(name);
- tracer.Description(": ");
- tracer.Description(reinterpret_cast<const char*>(decoder->pc()), length);
- tracer.NextLine();
+ if (tracer) {
+ tracer->Bytes(decoder->pc(), length);
+ tracer->Description(name);
+ tracer->Description(": ");
+ tracer->Description(reinterpret_cast<const char*>(decoder->pc()), length);
+ tracer->NextLine();
+ }
decoder->consume_bytes(length, name);
if (decoder->ok()) {
switch (grammar) {
@@ -128,6 +96,8 @@ inline WireBytesRef consume_string(Decoder* decoder,
decoder->errorf(string_start, "%s: no valid WTF-8 string", name);
}
break;
+ case unibrow::Utf8Variant::kUtf8NoTrap:
+ UNREACHABLE();
}
}
}
@@ -137,19 +107,16 @@ inline WireBytesRef consume_string(Decoder* decoder,
inline WireBytesRef consume_string(Decoder* decoder,
unibrow::Utf8Variant grammar,
const char* name) {
- NoTracer no_tracer;
- return consume_string(decoder, grammar, name, no_tracer);
+ return consume_string(decoder, grammar, name, ITracer::NoTrace);
}
-template <class Tracer>
inline WireBytesRef consume_utf8_string(Decoder* decoder, const char* name,
- Tracer& tracer) {
+ ITracer* tracer) {
return consume_string(decoder, unibrow::Utf8Variant::kUtf8, name, tracer);
}
-template <class Tracer>
inline SectionCode IdentifyUnknownSectionInternal(Decoder* decoder,
- Tracer& tracer) {
+ ITracer* tracer) {
WireBytesRef string = consume_utf8_string(decoder, "section name", tracer);
if (decoder->failed()) {
return kUnknownSectionCode;
@@ -185,10 +152,9 @@ inline SectionCode IdentifyUnknownSectionInternal(Decoder* decoder,
// An iterator over the sections in a wasm binary module.
// Automatically skips all unknown sections.
-template <class Tracer>
class WasmSectionIterator {
public:
- explicit WasmSectionIterator(Decoder* decoder, Tracer& tracer)
+ explicit WasmSectionIterator(Decoder* decoder, ITracer* tracer)
: decoder_(decoder),
tracer_(tracer),
section_code_(kUnknownSectionCode),
@@ -239,7 +205,7 @@ class WasmSectionIterator {
private:
Decoder* decoder_;
- Tracer& tracer_;
+ ITracer* tracer_;
SectionCode section_code_;
const byte* section_start_;
const byte* payload_start_;
@@ -253,21 +219,28 @@ class WasmSectionIterator {
return;
}
section_start_ = decoder_->pc();
- tracer_.NextLine(); // Empty line before next section.
+ // Empty line before next section.
+ if (tracer_) tracer_->NextLine();
uint8_t section_code = decoder_->consume_u8("section kind: ", tracer_);
- tracer_.Description(SectionName(static_cast<SectionCode>(section_code)));
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(SectionName(static_cast<SectionCode>(section_code)));
+ tracer_->NextLine();
+ }
// Read and check the section size.
uint32_t section_length = decoder_->consume_u32v("section length", tracer_);
- tracer_.Description(section_length);
- tracer_.NextLine();
-
+ if (tracer_) {
+ tracer_->Description(section_length);
+ tracer_->NextLine();
+ }
payload_start_ = decoder_->pc();
- if (decoder_->checkAvailable(section_length)) {
- // Get the limit of the section within the module.
- section_end_ = payload_start_ + section_length;
- } else {
- // The section would extend beyond the end of the module.
+ section_end_ = payload_start_ + section_length;
+ if (section_length > decoder_->available_bytes()) {
+ decoder_->errorf(
+ section_start_,
+ "section (code %u, \"%s\") extends past end of the module "
+ "(length %u, remaining bytes %u)",
+ section_code, SectionName(static_cast<SectionCode>(section_code)),
+ section_length, decoder_->available_bytes());
section_end_ = payload_start_;
}
@@ -299,35 +272,18 @@ class WasmSectionIterator {
}
};
-// Add an explicit template deduction guide for {WasmSectionIterator}.
-template <class T>
-WasmSectionIterator(Decoder*, T&) -> WasmSectionIterator<T>;
-
// The main logic for decoding the bytes of a module.
-template <class Tracer>
-class ModuleDecoderTemplate : public Decoder {
+class ModuleDecoderImpl : public Decoder {
public:
- explicit ModuleDecoderTemplate(const WasmFeatures& enabled,
- ModuleOrigin origin, Tracer& tracer)
- : Decoder(nullptr, nullptr),
- enabled_features_(enabled),
- tracer_(tracer),
- origin_(origin) {}
-
- ModuleDecoderTemplate(const WasmFeatures& enabled, const byte* module_start,
- const byte* module_end, ModuleOrigin origin,
- Tracer& tracer)
- : Decoder(module_start, module_end),
- enabled_features_(enabled),
- module_start_(module_start),
- module_end_(module_end),
- tracer_(tracer),
- origin_(origin) {
- if (end_ < start_) {
- error(start_, "end is less than start");
- end_ = start_;
- }
- }
+ ModuleDecoderImpl(WasmFeatures enabled_features,
+ base::Vector<const uint8_t> wire_bytes, ModuleOrigin origin,
+ ITracer* tracer = ITracer::NoTrace)
+ : Decoder(wire_bytes),
+ enabled_features_(enabled_features),
+ module_(std::make_shared<WasmModule>(origin)),
+ module_start_(wire_bytes.begin()),
+ module_end_(wire_bytes.end()),
+ tracer_(tracer) {}
void onFirstError() override {
pc_ = end_; // On error, terminate section decoding loop.
@@ -359,24 +315,13 @@ class ModuleDecoderTemplate : public Decoder {
}
}
- void StartDecoding(Counters* counters, AccountingAllocator* allocator) {
- CHECK_NULL(module_);
- SetCounters(counters);
- module_.reset(
- new WasmModule(std::make_unique<Zone>(allocator, "signatures")));
- module_->initial_pages = 0;
- module_->maximum_pages = 0;
- module_->mem_export = false;
- module_->origin = origin_;
- }
-
void DecodeModuleHeader(base::Vector<const uint8_t> bytes, uint8_t offset) {
if (failed()) return;
Reset(bytes, offset);
const byte* pos = pc_;
uint32_t magic_word = consume_u32("wasm magic", tracer_);
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
if (magic_word != kWasmMagic) {
errorf(pos,
@@ -388,7 +333,7 @@ class ModuleDecoderTemplate : public Decoder {
pos = pc_;
{
uint32_t magic_version = consume_u32("wasm version", tracer_);
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
if (magic_version != kWasmVersion) {
errorf(pos,
"expected version %02x %02x %02x %02x, "
@@ -447,9 +392,6 @@ class ModuleDecoderTemplate : public Decoder {
// Now check the ordering constraints of specific unordered sections.
switch (section_code) {
case kDataCountSectionCode:
- // If wasm-gc is enabled, we allow the data count section anywhere in
- // the module.
- if (enabled_features_.has_gc()) return true;
return check_order(kElementSectionCode, kCodeSectionCode);
case kTagSectionCode:
return check_order(kMemorySectionCode, kGlobalSectionCode);
@@ -601,54 +543,56 @@ class ModuleDecoderTemplate : public Decoder {
TypeDefinition consume_base_type_definition() {
DCHECK(enabled_features_.has_gc());
uint8_t kind = consume_u8(" kind: ", tracer_);
- tracer_.Description(TypeKindName(kind));
+ if (tracer_) tracer_->Description(TypeKindName(kind));
switch (kind) {
case kWasmFunctionTypeCode: {
- const FunctionSig* sig = consume_sig(module_->signature_zone.get());
- return {sig, kNoSuperType};
+ const FunctionSig* sig = consume_sig(&module_->signature_zone);
+ return {sig, kNoSuperType, v8_flags.wasm_final_types};
}
case kWasmStructTypeCode: {
- const StructType* type = consume_struct(module_->signature_zone.get());
- return {type, kNoSuperType};
+ const StructType* type = consume_struct(&module_->signature_zone);
+ return {type, kNoSuperType, v8_flags.wasm_final_types};
}
case kWasmArrayTypeCode: {
- const ArrayType* type = consume_array(module_->signature_zone.get());
- return {type, kNoSuperType};
+ const ArrayType* type = consume_array(&module_->signature_zone);
+ return {type, kNoSuperType, v8_flags.wasm_final_types};
}
default:
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
errorf(pc() - 1, "unknown type form: %d", kind);
return {};
}
}
- bool check_supertype(uint32_t supertype) {
- if (V8_UNLIKELY(supertype >= module_->types.size())) {
- errorf(pc(), "type %zu: forward-declared supertype %d",
- module_->types.size(), supertype);
- return false;
- }
- return true;
- }
-
TypeDefinition consume_subtype_definition() {
DCHECK(enabled_features_.has_gc());
uint8_t kind = read_u8<Decoder::FullValidationTag>(pc(), "type kind");
- if (kind == kWasmSubtypeCode) {
- consume_bytes(1, " subtype, ", tracer_);
+ if (kind == kWasmSubtypeCode || kind == kWasmSubtypeFinalCode) {
+ bool is_final =
+ v8_flags.wasm_final_types && kind == kWasmSubtypeFinalCode;
+ consume_bytes(1, is_final ? " subtype final, " : " subtype extensible, ",
+ tracer_);
constexpr uint32_t kMaximumSupertypes = 1;
uint32_t supertype_count =
consume_count("supertype count", kMaximumSupertypes);
- uint32_t supertype = supertype_count == 1
- ? consume_u32v("supertype", tracer_)
- : kNoSuperType;
+ uint32_t supertype = kNoSuperType;
if (supertype_count == 1) {
- tracer_.Description(supertype);
- tracer_.NextLine();
+ supertype = consume_u32v("supertype", tracer_);
+ if (supertype >= kV8MaxWasmTypes) {
+ errorf(
+ "supertype %u is greater than the maximum number of type "
+ "definitions %zu supported by V8",
+ supertype, kV8MaxWasmTypes);
+ return {};
+ }
+ if (tracer_) {
+ tracer_->Description(supertype);
+ tracer_->NextLine();
+ }
}
- if (!check_supertype(supertype)) return {};
TypeDefinition type = consume_base_type_definition();
type.supertype = supertype;
+ type.is_final = is_final;
return type;
} else {
return consume_base_type_definition();
@@ -661,29 +605,33 @@ class ModuleDecoderTemplate : public Decoder {
// Non wasm-gc type section decoding.
if (!enabled_features_.has_gc()) {
- module_->types.reserve(types_count);
+ module_->types.resize(types_count);
+ module_->isorecursive_canonical_type_ids.resize(types_count);
for (uint32_t i = 0; i < types_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
uint8_t opcode =
read_u8<FullValidationTag>(pc(), "signature definition");
- tracer_.Bytes(pc_, 1);
- tracer_.TypeOffset(pc_offset());
- tracer_.Description(" kind: ");
- tracer_.Description(TypeKindName(opcode));
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Bytes(pc_, 1);
+ tracer_->TypeOffset(pc_offset());
+ tracer_->Description(" kind: ");
+ tracer_->Description(TypeKindName(opcode));
+ tracer_->NextLine();
+ }
switch (opcode) {
case kWasmFunctionTypeCode: {
consume_bytes(1, "function");
- const FunctionSig* sig = consume_sig(module_->signature_zone.get());
+ const FunctionSig* sig = consume_sig(&module_->signature_zone);
if (!ok()) break;
- module_->add_signature(sig, kNoSuperType);
- type_canon->AddRecursiveGroup(module_.get(), 1);
+ module_->types[i] = {sig, kNoSuperType, v8_flags.wasm_final_types};
+ type_canon->AddRecursiveGroup(module_.get(), 1, i);
break;
}
case kWasmArrayTypeCode:
case kWasmStructTypeCode:
case kWasmSubtypeCode:
+ case kWasmSubtypeFinalCode:
case kWasmRecursiveTypeGroupCode:
errorf(
"Unknown type code 0x%02x, enable with --experimental-wasm-gc",
@@ -701,30 +649,39 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < types_count; ++i) {
TRACE("DecodeType[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
uint8_t kind = read_u8<Decoder::FullValidationTag>(pc(), "type kind");
+ size_t initial_size = module_->types.size();
if (kind == kWasmRecursiveTypeGroupCode) {
consume_bytes(1, "rec. group definition", tracer_);
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
uint32_t group_size =
consume_count("recursive group size", kV8MaxWasmTypes);
- if (module_->types.size() + group_size > kV8MaxWasmTypes) {
+ if (initial_size + group_size > kV8MaxWasmTypes) {
errorf(pc(), "Type definition count exceeds maximum %zu",
kV8MaxWasmTypes);
return;
}
- // Reserve space for the current recursive group, so we are
- // allowed to reference its elements.
- module_->types.reserve(module_->types.size() + group_size);
+ // We need to resize types before decoding the type definitions in this
+ // group, so that the correct type size is visible to type definitions.
+ module_->types.resize(initial_size + group_size);
+ module_->isorecursive_canonical_type_ids.resize(initial_size +
+ group_size);
for (uint32_t j = 0; j < group_size; j++) {
- tracer_.TypeOffset(pc_offset());
+ if (tracer_) tracer_->TypeOffset(pc_offset());
TypeDefinition type = consume_subtype_definition();
- if (ok()) module_->add_type(type);
+ if (ok()) module_->types[initial_size + j] = type;
+ }
+ if (ok()) {
+ type_canon->AddRecursiveGroup(module_.get(), group_size,
+ static_cast<uint32_t>(initial_size));
}
- if (ok()) type_canon->AddRecursiveGroup(module_.get(), group_size);
} else {
- tracer_.TypeOffset(pc_offset());
+ if (tracer_) tracer_->TypeOffset(pc_offset());
+ // Similarly to above, we need to resize types for a group of size 1.
+ module_->types.resize(initial_size + 1);
+ module_->isorecursive_canonical_type_ids.resize(initial_size + 1);
TypeDefinition type = consume_subtype_definition();
if (ok()) {
- module_->add_type(type);
+ module_->types[initial_size] = type;
type_canon->AddRecursiveGroup(module_.get(), 1);
}
}
@@ -735,16 +692,28 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < module_->types.size(); ++i) {
uint32_t explicit_super = module_->supertype(i);
if (explicit_super == kNoSuperType) continue;
- // {consume_super_type} has checked this.
- DCHECK_LT(explicit_super, module_->types.size());
+ if (explicit_super >= module_->types.size()) {
+ errorf("type %u: supertype %u out of bounds", i, explicit_super);
+ continue;
+ }
+ if (explicit_super >= i) {
+ errorf("type %u: forward-declared supertype %u", i, explicit_super);
+ continue;
+ }
int depth = GetSubtypingDepth(module, i);
DCHECK_GE(depth, 0);
if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) {
- errorf("type %d: subtyping depth is greater than allowed", i);
+ errorf("type %u: subtyping depth is greater than allowed", i);
+ continue;
+ }
+ // This check is technically redundant; we include for the improved error
+ // message.
+ if (module->types[explicit_super].is_final) {
+ errorf("type %u extends final type %u", i, explicit_super);
continue;
}
if (!ValidSubtypeDefinition(i, explicit_super, module, module)) {
- errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ errorf("type %u has invalid explicit supertype %u", i, explicit_super);
continue;
}
}
@@ -757,7 +726,7 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
TRACE("DecodeImportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- tracer_.ImportOffset(pc_offset());
+ if (tracer_) tracer_->ImportOffset(pc_offset());
module_->import_table.push_back({
{0, 0}, // module_name
@@ -771,7 +740,7 @@ class ModuleDecoderTemplate : public Decoder {
import->field_name = consume_utf8_string(this, "field name", tracer_);
import->kind =
static_cast<ImportExportKindCode>(consume_u8("kind: ", tracer_));
- tracer_.Description(ExternalKindName(import->kind));
+ if (tracer_) tracer_->Description(ExternalKindName(import->kind));
switch (import->kind) {
case kExternalFunction: {
// ===== Imported function ===========================================
@@ -836,7 +805,7 @@ class ModuleDecoderTemplate : public Decoder {
if (global->mutability) {
module_->num_imported_mutable_globals++;
}
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
break;
}
case kExternalTag: {
@@ -845,8 +814,8 @@ class ModuleDecoderTemplate : public Decoder {
module_->num_imported_tags++;
const WasmTagSig* tag_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
- consume_tag_sig_index(module_.get(), &tag_sig);
- module_->tags.emplace_back(tag_sig);
+ uint32_t sig_index = consume_tag_sig_index(module_.get(), &tag_sig);
+ module_->tags.emplace_back(tag_sig, sig_index);
break;
}
default:
@@ -854,30 +823,37 @@ class ModuleDecoderTemplate : public Decoder {
break;
}
}
- tracer_.ImportsDone();
+ UpdateMemorySizes();
+ module_->type_feedback.well_known_imports.Initialize(
+ module_->num_imported_functions);
+ if (tracer_) tracer_->ImportsDone();
}
void DecodeFunctionSection() {
uint32_t functions_count =
- consume_count("functions count", kV8MaxWasmFunctions);
- if (counters_ != nullptr) {
- auto counter = SELECT_WASM_COUNTER(GetCounters(), origin_,
- wasm_functions_per, module);
- counter->AddSample(static_cast<int>(functions_count));
- }
+ consume_count("functions count", v8_flags.max_wasm_functions);
DCHECK_EQ(module_->functions.size(), module_->num_imported_functions);
uint32_t total_function_count =
module_->num_imported_functions + functions_count;
module_->functions.resize(total_function_count);
module_->num_declared_functions = functions_count;
+ // Also initialize the {validated_functions} bitset here, now that we know
+ // the number of declared functions.
DCHECK_NULL(module_->validated_functions);
module_->validated_functions =
std::make_unique<std::atomic<uint8_t>[]>((functions_count + 7) / 8);
+ if (is_asmjs_module(module_.get())) {
+ // Mark all asm.js functions as valid by design (it's faster to do this
+ // here than to check this in {WasmModule::function_was_validated}).
+ std::fill_n(module_->validated_functions.get(), (functions_count + 7) / 8,
+ 0xff);
+ }
+
for (uint32_t func_index = module_->num_imported_functions;
func_index < total_function_count; ++func_index) {
WasmFunction* function = &module_->functions[func_index];
function->func_index = func_index;
- tracer_.FunctionName(func_index);
+ if (tracer_) tracer_->FunctionName(func_index);
function->sig_index = consume_sig_index(module_.get(), &function->sig);
if (!ok()) return;
}
@@ -887,7 +863,7 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
for (uint32_t i = 0; ok() && i < table_count; i++) {
- tracer_.TableOffset(pc_offset());
+ if (tracer_) tracer_->TableOffset(pc_offset());
module_->tables.emplace_back();
WasmTable* table = &module_->tables.back();
const byte* type_position = pc();
@@ -930,7 +906,7 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t memory_count = consume_count("memory count", kV8MaxWasmMemories);
for (uint32_t i = 0; ok() && i < memory_count; i++) {
- tracer_.MemoryOffset(pc_offset());
+ if (tracer_) tracer_->MemoryOffset(pc_offset());
if (!AddMemory(module_.get())) break;
consume_memory_flags(&module_->has_shared_memory, &module_->is_memory64,
&module_->has_maximum_pages);
@@ -941,6 +917,20 @@ class ModuleDecoderTemplate : public Decoder {
module_->has_maximum_pages, max_pages, &module_->maximum_pages,
module_->is_memory64 ? k64BitLimits : k32BitLimits);
}
+ UpdateMemorySizes();
+ }
+
+ void UpdateMemorySizes() {
+ // Set min and max memory size.
+ const uintptr_t platform_max_pages = module_->is_memory64
+ ? kV8MaxWasmMemory64Pages
+ : kV8MaxWasmMemory32Pages;
+ module_->min_memory_size =
+ std::min(platform_max_pages, uintptr_t{module_->initial_pages}) *
+ kWasmPageSize;
+ module_->max_memory_size =
+ std::min(platform_max_pages, uintptr_t{module_->maximum_pages}) *
+ kWasmPageSize;
}
void DecodeGlobalSection() {
@@ -951,7 +941,7 @@ class ModuleDecoderTemplate : public Decoder {
module_->globals.reserve(imported_globals + globals_count);
for (uint32_t i = 0; ok() && i < globals_count; ++i) {
TRACE("DecodeGlobal[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
- tracer_.GlobalOffset(pc_offset());
+ if (tracer_) tracer_->GlobalOffset(pc_offset());
ValueType type = consume_value_type();
bool mutability = consume_mutability();
if (failed()) break;
@@ -967,9 +957,11 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
TRACE("DecodeExportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- tracer_.Description("export #");
- tracer_.Description(i);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description("export #");
+ tracer_->Description(i);
+ tracer_->NextLine();
+ }
module_->export_table.push_back({
{0, 0}, // name
@@ -983,8 +975,10 @@ class ModuleDecoderTemplate : public Decoder {
const byte* pos = pc();
exp->kind =
static_cast<ImportExportKindCode>(consume_u8("kind: ", tracer_));
- tracer_.Description(ExternalKindName(exp->kind));
- tracer_.Description(" ");
+ if (tracer_) {
+ tracer_->Description(ExternalKindName(exp->kind));
+ tracer_->Description(" ");
+ }
switch (exp->kind) {
case kExternalFunction: {
WasmFunction* func = nullptr;
@@ -1032,10 +1026,11 @@ class ModuleDecoderTemplate : public Decoder {
errorf(pos, "invalid export kind 0x%02x", exp->kind);
break;
}
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
}
// Check for duplicate exports (except for asm.js).
- if (ok() && origin_ == kWasmOrigin && module_->export_table.size() > 1) {
+ if (ok() && module_->origin == kWasmOrigin &&
+ module_->export_table.size() > 1) {
std::vector<WasmExport> sorted_exports(module_->export_table);
auto cmp_less = [this](const WasmExport& a, const WasmExport& b) {
@@ -1066,11 +1061,11 @@ class ModuleDecoderTemplate : public Decoder {
}
void DecodeStartSection() {
- tracer_.StartOffset(pc_offset());
+ if (tracer_) tracer_->StartOffset(pc_offset());
WasmFunction* func;
const byte* pos = pc_;
module_->start_function_index = consume_func_index(module_.get(), &func);
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
if (func &&
(func->sig->parameter_count() > 0 || func->sig->return_count() > 0)) {
error(pos, "invalid start function: non-zero parameter or return count");
@@ -1082,23 +1077,17 @@ class ModuleDecoderTemplate : public Decoder {
consume_count("segment count", v8_flags.wasm_max_table_size);
for (uint32_t i = 0; i < segment_count; ++i) {
- tracer_.ElementOffset(pc_offset());
+ if (tracer_) tracer_->ElementOffset(pc_offset());
WasmElemSegment segment = consume_element_segment_header();
- tracer_.NextLineIfNonEmpty();
+ if (tracer_) tracer_->NextLineIfNonEmpty();
if (failed()) return;
DCHECK_NE(segment.type, kWasmBottom);
- uint32_t num_elem =
- consume_count("number of elements", max_table_init_entries());
-
- for (uint32_t j = 0; j < num_elem; j++) {
- ConstantExpression entry =
- segment.element_type == WasmElemSegment::kExpressionElements
- ? consume_init_expr(module_.get(), segment.type)
- : ConstantExpression::RefFunc(
- consume_element_func_index(segment.type));
+ for (uint32_t j = 0; j < segment.element_count; j++) {
+ // Just run validation on elements; do not store them anywhere. We will
+ // decode them again from wire bytes as needed.
+ consume_element_segment_entry(module_.get(), segment);
if (failed()) return;
- segment.entries.push_back(entry);
}
module_->elem_segments.push_back(std::move(segment));
}
@@ -1110,8 +1099,10 @@ class ModuleDecoderTemplate : public Decoder {
CalculateGlobalOffsets(module_.get());
uint32_t code_section_start = pc_offset();
uint32_t functions_count = consume_u32v("functions count", tracer_);
- tracer_.Description(functions_count);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(functions_count);
+ tracer_->NextLine();
+ }
CheckFunctionsCount(functions_count, code_section_start);
auto inst_traces_it = this->inst_traces_.begin();
@@ -1119,13 +1110,17 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < functions_count; ++i) {
int function_index = module_->num_imported_functions + i;
- tracer_.Description("function #");
- tracer_.FunctionName(function_index);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description("function #");
+ tracer_->FunctionName(function_index);
+ tracer_->NextLine();
+ }
const byte* pos = pc();
uint32_t size = consume_u32v("body size", tracer_);
- tracer_.Description(size);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(size);
+ tracer_->NextLine();
+ }
if (size > kV8MaxWasmFunctionSize) {
errorf(pos, "size %u > maximum function size %zu", size,
kV8MaxWasmFunctionSize);
@@ -1181,7 +1176,9 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t offset) {
WasmFunction* function = &module_->functions[func_index];
function->code = {offset, length};
- tracer_.FunctionBody(function, pc_ - (pc_offset() - offset));
+ if (tracer_) {
+ tracer_->FunctionBody(function, pc_ - (pc_offset() - offset));
+ }
}
bool CheckDataSegmentsCount(uint32_t data_segments_count) {
@@ -1204,7 +1201,7 @@ class ModuleDecoderTemplate : public Decoder {
const byte* pos = pc();
TRACE("DecodeDataSegment[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- tracer_.DataOffset(pc_offset());
+ if (tracer_) tracer_->DataOffset(pc_offset());
bool is_active;
uint32_t memory_index;
@@ -1224,8 +1221,10 @@ class ModuleDecoderTemplate : public Decoder {
}
uint32_t source_length = consume_u32v("source size", tracer_);
- tracer_.Description(source_length);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(source_length);
+ tracer_->NextLine();
+ }
uint32_t source_offset = pc_offset();
if (is_active) {
@@ -1236,9 +1235,11 @@ class ModuleDecoderTemplate : public Decoder {
WasmDataSegment* segment = &module_->data_segments.back();
- tracer_.Bytes(pc_, source_length);
- tracer_.Description("segment data");
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Bytes(pc_, source_length);
+ tracer_->Description("segment data");
+ tracer_->NextLine();
+ }
consume_bytes(source_length, "segment data");
if (failed()) break;
@@ -1247,8 +1248,10 @@ class ModuleDecoderTemplate : public Decoder {
}
void DecodeNameSection() {
- tracer_.NameSection(pc_, end_,
- buffer_offset_ + static_cast<uint32_t>(pc_ - start_));
+ if (tracer_) {
+ tracer_->NameSection(
+ pc_, end_, buffer_offset_ + static_cast<uint32_t>(pc_ - start_));
+ }
// TODO(titzer): find a way to report name errors as warnings.
// Ignore all but the first occurrence of name section.
if (!has_seen_unordered_section(kNameSectionCode)) {
@@ -1268,11 +1271,10 @@ class ModuleDecoderTemplate : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
- NoTracer tracing_already_done;
if (name_type == NameSectionKindCode::kModuleCode) {
WireBytesRef name =
consume_string(&inner, unibrow::Utf8Variant::kLossyUtf8,
- "module name", tracing_already_done);
+ "module name", ITracer::NoTrace);
if (inner.ok() && validate_utf8(&inner, name)) {
module_->name = name;
}
@@ -1550,18 +1552,18 @@ class ModuleDecoderTemplate : public Decoder {
void DecodeDataCountSection() {
module_->num_declared_data_segments =
consume_count("data segments count", kV8MaxWasmDataSegments);
- tracer_.NextLineIfNonEmpty();
+ if (tracer_) tracer_->NextLineIfNonEmpty();
}
void DecodeTagSection() {
uint32_t tag_count = consume_count("tag count", kV8MaxWasmTags);
for (uint32_t i = 0; ok() && i < tag_count; ++i) {
TRACE("DecodeTag[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
- tracer_.TagOffset(pc_offset());
+ if (tracer_) tracer_->TagOffset(pc_offset());
const WasmTagSig* tag_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
- consume_tag_sig_index(module_.get(), &tag_sig);
- module_->tags.emplace_back(tag_sig);
+ uint32_t sig_index = consume_tag_sig_index(module_.get(), &tag_sig);
+ module_->tags.emplace_back(tag_sig, sig_index);
}
}
@@ -1620,26 +1622,25 @@ class ModuleDecoderTemplate : public Decoder {
}
void ValidateAllFunctions() {
- DCHECK(ok());
-
- // Spawn a {ValidateFunctionsTask} and join it. The earliest error found
- // will be set on this decoder.
- std::unique_ptr<JobHandle> job_handle = V8::GetCurrentPlatform()->CreateJob(
- TaskPriority::kUserVisible,
- std::make_unique<ValidateFunctionsTask>(this));
- job_handle->Join();
+ DCHECK(!error_.has_error());
+ // Pass nullptr for an "empty" filter function.
+ error_ = ValidateFunctions(module_.get(), enabled_features_,
+ base::VectorOf(start_, end_ - start_), nullptr);
}
// Decodes an entire module.
- ModuleResult DecodeModule(Counters* counters, AccountingAllocator* allocator,
- bool validate_functions) {
- StartDecoding(counters, allocator);
- uint32_t offset = 0;
- base::Vector<const byte> orig_bytes(start(), end() - start());
- DecodeModuleHeader(base::VectorOf(start(), end() - start()), offset);
- if (failed()) {
- return FinishDecoding();
+ ModuleResult DecodeModule(bool validate_functions) {
+ base::Vector<const byte> wire_bytes(start_, end_ - start_);
+ size_t max_size = max_module_size();
+ if (wire_bytes.size() > max_size) {
+ return ModuleResult{WasmError{0, "size > maximum module size (%zu): %zu",
+ max_size, wire_bytes.size()}};
}
+
+ uint32_t offset = 0;
+ DecodeModuleHeader(wire_bytes, offset);
+ if (failed()) return toResult(nullptr);
+
// Size of the module header.
offset += 8;
Decoder decoder(start_ + offset, end_, offset);
@@ -1647,35 +1648,36 @@ class ModuleDecoderTemplate : public Decoder {
WasmSectionIterator section_iter(&decoder, tracer_);
while (ok()) {
- // Shift the offset by the section header length
+ // Shift the offset by the section header length.
offset += section_iter.payload_start() - section_iter.section_start();
if (section_iter.section_code() != SectionCode::kUnknownSectionCode) {
DecodeSection(section_iter.section_code(), section_iter.payload(),
offset);
}
- // Shift the offset by the remaining section payload
+ // Shift the offset by the remaining section payload.
offset += section_iter.payload_length();
if (!section_iter.more() || !ok()) break;
section_iter.advance(true);
}
if (ok() && validate_functions) {
- Reset(orig_bytes);
+ Reset(wire_bytes);
ValidateAllFunctions();
}
- if (v8_flags.dump_wasm_module) DumpModule(orig_bytes);
+ if (v8_flags.dump_wasm_module) DumpModule(wire_bytes);
if (decoder.failed()) {
- return decoder.toResult<std::shared_ptr<WasmModule>>(nullptr);
+ return decoder.toResult(nullptr);
}
return FinishDecoding();
}
// Decodes a single anonymous function starting at {start_}.
- FunctionResult DecodeSingleFunctionForTesting(
- Zone* zone, const ModuleWireBytes& wire_bytes, const WasmModule* module) {
+ FunctionResult DecodeSingleFunctionForTesting(Zone* zone,
+ ModuleWireBytes wire_bytes,
+ const WasmModule* module) {
DCHECK(ok());
pc_ = start_;
expect_u8("type form", kWasmFunctionTypeCode);
@@ -1685,13 +1687,11 @@ class ModuleDecoderTemplate : public Decoder {
if (!ok()) return FunctionResult{std::move(error_)};
- AccountingAllocator* allocator = zone->allocator();
-
FunctionBody body{function.sig, off(pc_), pc_, end_};
WasmFeatures unused_detected_features;
- DecodeResult result = ValidateFunctionBody(
- allocator, enabled_features_, module, &unused_detected_features, body);
+ DecodeResult result = ValidateFunctionBody(enabled_features_, module,
+ &unused_detected_features, body);
if (result.failed()) return FunctionResult{std::move(result).error()};
@@ -1710,48 +1710,21 @@ class ModuleDecoderTemplate : public Decoder {
return consume_init_expr(module_.get(), expected);
}
- const std::shared_ptr<WasmModule>& shared_module() const { return module_; }
-
- Counters* GetCounters() const {
- DCHECK_NOT_NULL(counters_);
- return counters_;
+ // Takes a module as parameter so that wasm-disassembler.cc can pass its own
+ // module.
+ ConstantExpression consume_element_segment_entry(
+ WasmModule* module, const WasmElemSegment& segment) {
+ if (segment.element_type == WasmElemSegment::kExpressionElements) {
+ return consume_init_expr(module, segment.type);
+ } else {
+ return ConstantExpression::RefFunc(
+ consume_element_func_index(module, segment.type));
+ }
}
- void SetCounters(Counters* counters) {
- DCHECK_NULL(counters_);
- counters_ = counters;
- }
+ const std::shared_ptr<WasmModule>& shared_module() const { return module_; }
private:
- const WasmFeatures enabled_features_;
- std::shared_ptr<WasmModule> module_;
- const byte* module_start_ = nullptr;
- const byte* module_end_ = nullptr;
- Counters* counters_ = nullptr;
- Tracer& tracer_;
- // The type section is the first section in a module.
- uint8_t next_ordered_section_ = kFirstSectionInModule;
- // We store next_ordered_section_ as uint8_t instead of SectionCode so that
- // we can increment it. This static_assert should make sure that SectionCode
- // does not get bigger than uint8_t accidentally.
- static_assert(sizeof(ModuleDecoderTemplate::next_ordered_section_) ==
- sizeof(SectionCode),
- "type mismatch");
- uint32_t seen_unordered_sections_ = 0;
- static_assert(
- kBitsPerByte * sizeof(ModuleDecoderTemplate::seen_unordered_sections_) >
- kLastKnownModuleSection,
- "not enough bits");
- ModuleOrigin origin_;
- AccountingAllocator allocator_;
- Zone init_expr_zone_{&allocator_, "constant expr. zone"};
-
- // Instruction traces are decoded in DecodeInstTraceSection as a 3-tuple
- // of the function index, function offset, and mark_id. In DecodeCodeSection,
- // after the functions have been decoded this is translated to pairs of module
- // offsets and mark ids.
- std::vector<std::tuple<uint32_t, uint32_t, uint32_t>> inst_traces_;
-
bool has_seen_unordered_section(SectionCode section_code) {
return seen_unordered_sections_ & (1 << section_code);
}
@@ -1811,16 +1784,19 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t consume_sig_index(WasmModule* module, const FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
- tracer_.Bytes(pos, static_cast<uint32_t>(pc_ - pos));
+ if (tracer_) tracer_->Bytes(pos, static_cast<uint32_t>(pc_ - pos));
if (!module->has_signature(sig_index)) {
- errorf(pos, "signature index %u out of bounds (%d signatures)", sig_index,
- static_cast<int>(module->types.size()));
+ errorf(pos, "no signature at index %u (%d %s)", sig_index,
+ static_cast<int>(module->types.size()),
+ enabled_features_.has_gc() ? "types" : "signatures");
*sig = nullptr;
return 0;
}
*sig = module->signature(sig_index);
- tracer_.Description(*sig);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(*sig);
+ tracer_->NextLine();
+ }
return sig_index;
}
@@ -1838,11 +1814,13 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t consume_count(const char* name, size_t maximum) {
const byte* p = pc_;
uint32_t count = consume_u32v(name, tracer_);
- tracer_.Description(count);
- if (count == 1) {
- tracer_.Description(": ");
- } else {
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(count);
+ if (count == 1) {
+ tracer_->Description(": ");
+ } else {
+ tracer_->NextLine();
+ }
}
if (count > maximum) {
errorf(p, "%s of %u exceeds internal limit of %zu", name, count, maximum);
@@ -1871,7 +1849,7 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t consume_index(const char* name, std::vector<T>* vector, T** ptr) {
const byte* pos = pc_;
uint32_t index = consume_u32v("index:", tracer_);
- tracer_.Description(index);
+ if (tracer_) tracer_->Description(index);
if (index >= vector->size()) {
errorf(pos, "%s index %u out of bounds (%d entr%s)", name, index,
static_cast<int>(vector->size()),
@@ -1884,10 +1862,14 @@ class ModuleDecoderTemplate : public Decoder {
}
void consume_table_flags(const char* name, bool* has_maximum_out) {
- tracer_.Bytes(pc_, 1);
+ if (tracer_) tracer_->Bytes(pc_, 1);
uint8_t flags = consume_u8("table limits flags");
- tracer_.Description(flags == kNoMaximum ? " no maximum" : " with maximum");
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(flags == kNoMaximum ? " no maximum"
+ : " with maximum");
+ tracer_->NextLine();
+ }
+
static_assert(kNoMaximum == 0 && kWithMaximum == 1);
*has_maximum_out = flags == kWithMaximum;
if (V8_UNLIKELY(flags > kWithMaximum)) {
@@ -1897,7 +1879,7 @@ class ModuleDecoderTemplate : public Decoder {
void consume_memory_flags(bool* is_shared_out, bool* is_memory64_out,
bool* has_maximum_out) {
- tracer_.Bytes(pc_, 1);
+ if (tracer_) tracer_->Bytes(pc_, 1);
uint8_t flags = consume_u8("memory limits flags");
// Flags 0..7 are valid (3 bits).
if (flags & ~0x7) {
@@ -1914,7 +1896,7 @@ class ModuleDecoderTemplate : public Decoder {
// V8 does not support shared memory without a maximum.
if (is_shared && !has_maximum) {
- errorf(pc() - 1, "shared memory must have a maximum defined");
+ error(pc() - 1, "shared memory must have a maximum defined");
}
if (is_memory64 && !enabled_features_.has_memory64()) {
@@ -1925,10 +1907,12 @@ class ModuleDecoderTemplate : public Decoder {
}
// Tracing.
- if (is_shared) tracer_.Description(" shared");
- if (is_memory64) tracer_.Description(" mem64");
- tracer_.Description(has_maximum ? " with maximum" : " no maximum");
- tracer_.NextLine();
+ if (tracer_) {
+ if (is_shared) tracer_->Description(" shared");
+ if (is_memory64) tracer_->Description(" mem64");
+ tracer_->Description(has_maximum ? " with maximum" : " no maximum");
+ tracer_->NextLine();
+ }
}
enum ResizableLimitsType : bool { k32BitLimits, k64BitLimits };
@@ -1949,8 +1933,10 @@ class ModuleDecoderTemplate : public Decoder {
name, initial_64, units, max_initial, units);
}
*initial = static_cast<uint32_t>(initial_64);
- tracer_.Description(*initial);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(*initial);
+ tracer_->NextLine();
+ }
if (has_maximum) {
pos = pc();
uint64_t maximum_64 = type == k64BitLimits
@@ -1968,8 +1954,10 @@ class ModuleDecoderTemplate : public Decoder {
name, maximum_64, units, *initial, units);
}
*maximum = static_cast<uint32_t>(maximum_64);
- tracer_.Description(*maximum);
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(*maximum);
+ tracer_->NextLine();
+ }
} else {
*maximum = max_initial;
}
@@ -1987,18 +1975,16 @@ class ModuleDecoderTemplate : public Decoder {
}
ConstantExpression consume_init_expr(WasmModule* module, ValueType expected) {
- uint32_t length;
-
// The error message mimics the one generated by the {WasmFullDecoder}.
#define TYPE_CHECK(found) \
- if (V8_UNLIKELY(!IsSubtypeOf(found, expected, module_.get()))) { \
+ if (V8_UNLIKELY(!IsSubtypeOf(found, expected, module))) { \
errorf(pc() + 1, \
"type error in constant expression[0] (expected %s, got %s)", \
expected.name().c_str(), found.name().c_str()); \
return {}; \
}
- tracer_.NextLineIfNonEmpty();
+ if (tracer_) tracer_->NextLineIfNonEmpty();
// To avoid initializing a {WasmFullDecoder} for the most common
// expressions, we replicate their decoding and validation here. The
// manually handled cases correspond to {ConstantExpression}'s kinds.
@@ -2011,48 +1997,55 @@ class ModuleDecoderTemplate : public Decoder {
}
switch (static_cast<WasmOpcode>(*pc())) {
case kExprI32Const: {
- int32_t value =
- read_i32v<FullValidationTag>(pc() + 1, &length, "i32.const");
+ auto [value, length] =
+ read_i32v<FullValidationTag>(pc() + 1, "i32.const");
if (V8_UNLIKELY(failed())) return {};
if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
TYPE_CHECK(kWasmI32)
- tracer_.InitializerExpression(pc_, pc_ + length + 2, kWasmI32);
+ if (tracer_) {
+ tracer_->InitializerExpression(pc_, pc_ + length + 2, kWasmI32);
+ }
consume_bytes(length + 2);
return ConstantExpression::I32Const(value);
}
break;
}
case kExprRefFunc: {
- uint32_t index =
- read_u32v<FullValidationTag>(pc() + 1, &length, "ref.func");
+ auto [index, length] =
+ read_u32v<FullValidationTag>(pc() + 1, "ref.func");
if (V8_UNLIKELY(failed())) return {};
if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
- if (V8_UNLIKELY(index >= module_->functions.size())) {
+ if (V8_UNLIKELY(index >= module->functions.size())) {
errorf(pc() + 1, "function index %u out of bounds", index);
return {};
}
ValueType type =
enabled_features_.has_typed_funcref()
- ? ValueType::Ref(module_->functions[index].sig_index)
+ ? ValueType::Ref(module->functions[index].sig_index)
: kWasmFuncRef;
TYPE_CHECK(type)
- module_->functions[index].declared = true;
- tracer_.InitializerExpression(pc_, pc_ + length + 2, type);
+ module->functions[index].declared = true;
+ if (tracer_) {
+ tracer_->InitializerExpression(pc_, pc_ + length + 2, type);
+ }
consume_bytes(length + 2);
return ConstantExpression::RefFunc(index);
}
break;
}
case kExprRefNull: {
- HeapType type = value_type_reader::read_heap_type<FullValidationTag>(
- this, pc() + 1, &length, enabled_features_);
- value_type_reader::ValidateHeapType<FullValidationTag>(
- this, pc_, module_.get(), type);
+ auto [type, length] =
+ value_type_reader::read_heap_type<FullValidationTag>(
+ this, pc() + 1, enabled_features_);
+ value_type_reader::ValidateHeapType<FullValidationTag>(this, pc_,
+ module, type);
if (V8_UNLIKELY(failed())) return {};
if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
TYPE_CHECK(ValueType::RefNull(type))
- tracer_.InitializerExpression(pc_, pc_ + length + 2,
- ValueType::RefNull(type));
+ if (tracer_) {
+ tracer_->InitializerExpression(pc_, pc_ + length + 2,
+ ValueType::RefNull(type));
+ }
consume_bytes(length + 2);
return ConstantExpression::RefNull(type.representation());
}
@@ -2075,7 +2068,9 @@ class ModuleDecoderTemplate : public Decoder {
decoder.DecodeFunctionBody();
- tracer_.InitializerExpression(pc_, decoder.end(), expected);
+ if (tracer_) {
+ tracer_->InitializerExpression(pc_, decoder.end(), expected);
+ }
this->pc_ = decoder.end();
if (decoder.failed()) {
@@ -2094,37 +2089,30 @@ class ModuleDecoderTemplate : public Decoder {
// Read a mutability flag
bool consume_mutability() {
- tracer_.Bytes(pc_, 1);
+ if (tracer_) tracer_->Bytes(pc_, 1);
byte val = consume_u8("mutability");
- tracer_.Description(val == 0 ? " immutable"
- : val == 1 ? " mutable"
- : " invalid");
+ if (tracer_) {
+ tracer_->Description(val == 0 ? " immutable"
+ : val == 1 ? " mutable"
+ : " invalid");
+ }
if (val > 1) error(pc_ - 1, "invalid mutability");
return val != 0;
}
ValueType consume_value_type() {
- uint32_t type_length;
- ValueType result = value_type_reader::read_value_type<FullValidationTag>(
- this, pc_, &type_length,
- origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
- value_type_reader::ValidateValueType<FullValidationTag>(
- this, pc_, module_.get(), result);
- tracer_.Bytes(pc_, type_length);
- tracer_.Description(result);
- consume_bytes(type_length, "value type");
- return result;
- }
-
- HeapType consume_super_type() {
- uint32_t type_length;
- HeapType result = value_type_reader::read_heap_type<FullValidationTag>(
- this, pc_, &type_length, enabled_features_);
+ auto [result, length] =
+ value_type_reader::read_value_type<FullValidationTag>(
+ this, pc_,
+ module_->origin == kWasmOrigin ? enabled_features_
+ : WasmFeatures::None());
value_type_reader::ValidateValueType<FullValidationTag>(
this, pc_, module_.get(), result);
- tracer_.Bytes(pc_, type_length);
- tracer_.Description(result);
- consume_bytes(type_length, "heap type");
+ if (tracer_) {
+ tracer_->Bytes(pc_, length);
+ tracer_->Description(result);
+ }
+ consume_bytes(length, "value type");
return result;
}
@@ -2144,7 +2132,7 @@ class ModuleDecoderTemplate : public Decoder {
}
const FunctionSig* consume_sig(Zone* zone) {
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
// Parse parameter types.
uint32_t param_count =
consume_count("param count", kV8MaxWasmFunctionParams);
@@ -2152,9 +2140,9 @@ class ModuleDecoderTemplate : public Decoder {
std::vector<ValueType> params;
for (uint32_t i = 0; ok() && i < param_count; ++i) {
params.push_back(consume_value_type());
- tracer_.NextLineIfFull();
+ if (tracer_) tracer_->NextLineIfFull();
}
- tracer_.NextLineIfNonEmpty();
+ if (tracer_) tracer_->NextLineIfNonEmpty();
if (failed()) return nullptr;
// Parse return types.
@@ -2164,9 +2152,9 @@ class ModuleDecoderTemplate : public Decoder {
if (failed()) return nullptr;
for (uint32_t i = 0; ok() && i < return_count; ++i) {
returns.push_back(consume_value_type());
- tracer_.NextLineIfFull();
+ if (tracer_) tracer_->NextLineIfFull();
}
- tracer_.NextLineIfNonEmpty();
+ if (tracer_) tracer_->NextLineIfNonEmpty();
if (failed()) return nullptr;
// FunctionSig stores the return types first.
@@ -2187,17 +2175,20 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < field_count; ++i) {
fields[i] = consume_storage_type();
mutabilities[i] = consume_mutability();
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
}
if (failed()) return nullptr;
uint32_t* offsets = zone->NewArray<uint32_t>(field_count);
- return zone->New<StructType>(field_count, offsets, fields, mutabilities);
+ StructType* result =
+ zone->New<StructType>(field_count, offsets, fields, mutabilities);
+ result->InitializeOffsets();
+ return result;
}
const ArrayType* consume_array(Zone* zone) {
ValueType element_type = consume_storage_type();
bool mutability = consume_mutability();
- tracer_.NextLine();
+ if (tracer_) tracer_->NextLine();
if (failed()) return nullptr;
return zone->New<ArrayType>(element_type, mutability);
}
@@ -2206,7 +2197,7 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t consume_exception_attribute() {
const byte* pos = pc_;
uint32_t attribute = consume_u32v("exception attribute");
- tracer_.Bytes(pos, static_cast<uint32_t>(pc_ - pos));
+ if (tracer_) tracer_->Bytes(pos, static_cast<uint32_t>(pc_ - pos));
if (attribute != kExceptionAttribute) {
errorf(pos, "exception attribute %u not supported", attribute);
return 0;
@@ -2243,11 +2234,12 @@ class ModuleDecoderTemplate : public Decoder {
: WasmElemSegment::kStatusPassive
: WasmElemSegment::kStatusActive;
const bool is_active = status == WasmElemSegment::kStatusActive;
- // clang-format off
- tracer_.Description(status == WasmElemSegment::kStatusActive ? "active" :
- status == WasmElemSegment::kStatusPassive ? "passive," :
- "declarative,");
- // clang-format on
+ if (tracer_) {
+ tracer_->Description(status == WasmElemSegment::kStatusActive ? "active"
+ : status == WasmElemSegment::kStatusPassive
+ ? "passive,"
+ : "declarative,");
+ }
WasmElemSegment::ElementType element_type =
flag & kExpressionsAsElementsMask
@@ -2259,7 +2251,7 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t table_index = 0;
if (has_table_index) {
table_index = consume_u32v(", table index", tracer_);
- tracer_.Description(table_index);
+ if (tracer_) tracer_->Description(table_index);
}
if (V8_UNLIKELY(is_active && table_index >= module_->tables.size())) {
errorf(pos, "out of bounds%s table index %u",
@@ -2271,8 +2263,10 @@ class ModuleDecoderTemplate : public Decoder {
ConstantExpression offset;
if (is_active) {
- tracer_.Description(", offset:");
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(", offset:");
+ tracer_->NextLine();
+ }
offset = consume_init_expr(module_.get(), kWasmI32);
// Failed to parse offset initializer, return early.
if (failed()) return {};
@@ -2286,9 +2280,9 @@ class ModuleDecoderTemplate : public Decoder {
if (backwards_compatible_mode) {
type = kWasmFuncRef;
} else {
- tracer_.Description(" element type:");
+ if (tracer_) tracer_->Description(" element type:");
type = consume_value_type();
- if (type == kWasmBottom) return {};
+ if (failed()) return {};
}
if (V8_UNLIKELY(is_active &&
!IsSubtypeOf(type, table_type, this->module_.get()))) {
@@ -2334,10 +2328,14 @@ class ModuleDecoderTemplate : public Decoder {
}
}
+ uint32_t num_elem =
+ consume_count("number of elements", max_table_init_entries());
+
if (is_active) {
- return {type, table_index, std::move(offset), element_type};
+ return {type, table_index, std::move(offset),
+ element_type, num_elem, pc_offset()};
} else {
- return {type, status, element_type};
+ return {type, status, element_type, num_elem, pc_offset()};
}
}
@@ -2345,12 +2343,14 @@ class ModuleDecoderTemplate : public Decoder {
ConstantExpression* offset) {
const byte* pos = pc();
uint32_t flag = consume_u32v("flag: ", tracer_);
- tracer_.Description(flag == SegmentFlags::kActiveNoIndex ? "active no index"
- : flag == SegmentFlags::kPassive ? "passive"
- : flag == SegmentFlags::kActiveWithIndex
- ? "active with index"
- : "unknown");
- tracer_.NextLine();
+ if (tracer_) {
+ tracer_->Description(
+ flag == SegmentFlags::kActiveNoIndex ? "active no index"
+ : flag == SegmentFlags::kPassive ? "passive"
+ : flag == SegmentFlags::kActiveWithIndex ? "active with index"
+ : "unknown");
+ tracer_->NextLine();
+ }
// Some flag values are only valid for specific proposals.
if (flag != SegmentFlags::kActiveNoIndex &&
@@ -2375,21 +2375,21 @@ class ModuleDecoderTemplate : public Decoder {
if (flag == SegmentFlags::kActiveWithIndex) {
*is_active = true;
*index = consume_u32v("memory index", tracer_);
- tracer_.Description(*index);
+ if (tracer_) tracer_->Description(*index);
*offset = consume_init_expr(module_.get(), expected_type);
}
}
- uint32_t consume_element_func_index(ValueType expected) {
+ uint32_t consume_element_func_index(WasmModule* module, ValueType expected) {
WasmFunction* func = nullptr;
const byte* initial_pc = pc();
- uint32_t index = consume_func_index(module_.get(), &func);
- tracer_.NextLine();
+ uint32_t index = consume_func_index(module, &func);
+ if (tracer_) tracer_->NextLine();
if (failed()) return index;
DCHECK_NOT_NULL(func);
DCHECK_EQ(index, func->func_index);
ValueType entry_type = ValueType::Ref(func->sig_index);
- if (V8_UNLIKELY(!IsSubtypeOf(entry_type, expected, module_.get()))) {
+ if (V8_UNLIKELY(!IsSubtypeOf(entry_type, expected, module))) {
errorf(initial_pc,
"Invalid type in element entry: expected %s, got %s instead.",
expected.name().c_str(), entry_type.name().c_str());
@@ -2399,89 +2399,36 @@ class ModuleDecoderTemplate : public Decoder {
return index;
}
- // A task that validates multiple functions in parallel, storing the earliest
- // validation error in {this} decoder.
- class ValidateFunctionsTask : public JobTask {
- public:
- ValidateFunctionsTask(ModuleDecoderTemplate* decoder)
- : decoder_(decoder),
- next_function_(decoder->module_->num_imported_functions),
- after_last_function_(next_function_ +
- decoder->module_->num_declared_functions) {}
-
- void Run(JobDelegate* delegate) override {
- AccountingAllocator* allocator = decoder_->module_->allocator();
- do {
- // Get the index of the next function to validate.
- // {fetch_add} might overrun {after_last_function_} by a bit. Since the
- // number of functions is limited to a value much smaller than the
- // integer range, this is highly unlikely.
- static_assert(kV8MaxWasmFunctions < kMaxInt / 2);
- int func_index = next_function_.fetch_add(1, std::memory_order_relaxed);
- if (V8_UNLIKELY(func_index >= after_last_function_)) return;
- DCHECK_LE(0, func_index);
-
- if (!ValidateFunction(allocator, func_index)) {
- // No need to validate any more functions.
- next_function_.store(after_last_function_, std::memory_order_relaxed);
- return;
- }
- } while (!delegate->ShouldYield());
- }
-
- size_t GetMaxConcurrency(size_t /* worker_count */) const override {
- int next_func = next_function_.load(std::memory_order_relaxed);
- return std::max(0, after_last_function_ - next_func);
- }
-
- private:
- // Validate a single function; use {SetError} on errors.
- bool ValidateFunction(AccountingAllocator* allocator, int func_index) {
- DCHECK(!decoder_->module_->function_was_validated(func_index));
- WasmFeatures unused_detected_features;
- const WasmFunction& function = decoder_->module_->functions[func_index];
- FunctionBody body{function.sig, function.code.offset(),
- decoder_->start_ + function.code.offset(),
- decoder_->start_ + function.code.end_offset()};
- DecodeResult validation_result = ValidateFunctionBody(
- allocator, decoder_->enabled_features_, decoder_->module_.get(),
- &unused_detected_features, body);
- if (V8_UNLIKELY(validation_result.failed())) {
- SetError(func_index, std::move(validation_result).error());
- return false;
- }
- decoder_->module_->set_function_validated(func_index);
- return true;
- }
-
- // Set the error from the argument if it's earlier than the error we already
- // have (or if we have none yet). Thread-safe.
- void SetError(int func_index, WasmError error) {
- base::MutexGuard mutex_guard{&set_error_mutex_};
- if (decoder_->error_.empty() ||
- decoder_->error_.offset() > error.offset()) {
- // Wrap the error message from the function decoder.
- const WasmFunction& function = decoder_->module_->functions[func_index];
- WasmFunctionName func_name{
- &function,
- ModuleWireBytes{decoder_->start_, decoder_->end_}.GetNameOrNull(
- &function, decoder_->module_.get())};
- std::ostringstream error_msg;
- error_msg << "in function " << func_name << ": " << error.message();
- decoder_->error_ = WasmError{error.offset(), error_msg.str()};
- }
- DCHECK(!decoder_->ok());
- }
+ const WasmFeatures enabled_features_;
+ const std::shared_ptr<WasmModule> module_;
+ const byte* module_start_ = nullptr;
+ const byte* module_end_ = nullptr;
+ ITracer* tracer_;
+ // The type section is the first section in a module.
+ uint8_t next_ordered_section_ = kFirstSectionInModule;
+ // We store next_ordered_section_ as uint8_t instead of SectionCode so that
+ // we can increment it. This static_assert should make sure that SectionCode
+ // does not get bigger than uint8_t accidentally.
+ static_assert(sizeof(ModuleDecoderImpl::next_ordered_section_) ==
+ sizeof(SectionCode),
+ "type mismatch");
+ uint32_t seen_unordered_sections_ = 0;
+ static_assert(kBitsPerByte *
+ sizeof(ModuleDecoderImpl::seen_unordered_sections_) >
+ kLastKnownModuleSection,
+ "not enough bits");
+ AccountingAllocator allocator_;
+ Zone init_expr_zone_{&allocator_, "constant expr. zone"};
- ModuleDecoderTemplate* decoder_;
- base::Mutex set_error_mutex_;
- std::atomic<int> next_function_;
- const int after_last_function_;
- };
+ // Instruction traces are decoded in DecodeInstTraceSection as a 3-tuple
+ // of the function index, function offset, and mark_id. In DecodeCodeSection,
+ // after the functions have been decoded this is translated to pairs of module
+ // offsets and mark ids.
+ std::vector<std::tuple<uint32_t, uint32_t, uint32_t>> inst_traces_;
};
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm
+
+#undef TRACE
#endif // V8_WASM_MODULE_DECODER_IMPL_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 21a5bf2f2f..82e224899a 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -5,6 +5,7 @@
#include "src/wasm/module-decoder.h"
#include "src/logging/metrics.h"
+#include "src/tracing/trace-event.h"
#include "src/wasm/constant-expression.h"
#include "src/wasm/decoder.h"
#include "src/wasm/module-decoder-impl.h"
@@ -69,86 +70,72 @@ const char* SectionName(SectionCode code) {
}
}
-// Ideally we'd just say:
-// using ModuleDecoderImpl = ModuleDecoderTemplate<NoTracer>
-// but that doesn't work with the forward declaration in the header file.
-class ModuleDecoderImpl : public ModuleDecoderTemplate<NoTracer> {
- public:
- ModuleDecoderImpl(const WasmFeatures& enabled, ModuleOrigin origin)
- : ModuleDecoderTemplate<NoTracer>(enabled, origin, no_tracer_) {}
-
- ModuleDecoderImpl(const WasmFeatures& enabled, const byte* module_start,
- const byte* module_end, ModuleOrigin origin)
- : ModuleDecoderTemplate<NoTracer>(enabled, module_start, module_end,
- origin, no_tracer_) {}
-
- private:
- NoTracer no_tracer_;
-};
-
ModuleResult DecodeWasmModule(
- const WasmFeatures& enabled, const byte* module_start,
- const byte* module_end, bool validate_functions, ModuleOrigin origin,
- Counters* counters, std::shared_ptr<metrics::Recorder> metrics_recorder,
- v8::metrics::Recorder::ContextId context_id, DecodingMethod decoding_method,
- AccountingAllocator* allocator) {
- size_t size = module_end - module_start;
- CHECK_LE(module_start, module_end);
+ WasmFeatures enabled_features, base::Vector<const uint8_t> wire_bytes,
+ bool validate_functions, ModuleOrigin origin, Counters* counters,
+ std::shared_ptr<metrics::Recorder> metrics_recorder,
+ v8::metrics::Recorder::ContextId context_id,
+ DecodingMethod decoding_method) {
size_t max_size = max_module_size();
- if (size > max_size) {
- return ModuleResult{
- WasmError{0, "size > maximum module size (%zu): %zu", max_size, size}};
+ if (wire_bytes.size() > max_size) {
+ return ModuleResult{WasmError{0, "size > maximum module size (%zu): %zu",
+ max_size, wire_bytes.size()}};
}
- // TODO(bradnelson): Improve histogram handling of size_t.
- auto size_counter =
- SELECT_WASM_COUNTER(counters, origin, wasm, module_size_bytes);
- size_counter->AddSample(static_cast<int>(size));
- // Signatures are stored in zone memory, which have the same lifetime
- // as the {module}.
- ModuleDecoderImpl decoder(enabled, module_start, module_end, origin);
+ if (counters) {
+ auto size_counter =
+ SELECT_WASM_COUNTER(counters, origin, wasm, module_size_bytes);
+ static_assert(kV8MaxWasmModuleSize < kMaxInt);
+ size_counter->AddSample(static_cast<int>(wire_bytes.size()));
+ }
+
v8::metrics::WasmModuleDecoded metrics_event;
base::ElapsedTimer timer;
timer.Start();
- base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported()
- ? base::ThreadTicks::Now()
- : base::ThreadTicks();
- ModuleResult result =
- decoder.DecodeModule(counters, allocator, validate_functions);
+ ModuleResult result = DecodeWasmModule(enabled_features, wire_bytes,
+ validate_functions, origin);
+ if (counters && result.ok()) {
+ auto counter =
+ SELECT_WASM_COUNTER(counters, origin, wasm_functions_per, module);
+ counter->AddSample(
+ static_cast<int>(result.value()->num_declared_functions));
+ }
// Record event metrics.
metrics_event.wall_clock_duration_in_us = timer.Elapsed().InMicroseconds();
timer.Stop();
- if (!thread_ticks.IsNull()) {
- metrics_event.cpu_duration_in_us =
- (base::ThreadTicks::Now() - thread_ticks).InMicroseconds();
- }
- metrics_event.success = decoder.ok() && result.ok();
+ metrics_event.success = result.ok();
metrics_event.async = decoding_method == DecodingMethod::kAsync ||
decoding_method == DecodingMethod::kAsyncStream;
metrics_event.streamed = decoding_method == DecodingMethod::kSyncStream ||
decoding_method == DecodingMethod::kAsyncStream;
if (result.ok()) {
metrics_event.function_count = result.value()->num_declared_functions;
- } else if (auto&& module = decoder.shared_module()) {
- metrics_event.function_count = module->num_declared_functions;
}
- metrics_event.module_size_in_bytes = size;
+ metrics_event.module_size_in_bytes = wire_bytes.size();
metrics_recorder->DelayMainThreadEvent(metrics_event, context_id);
return result;
}
-ModuleResult DecodeWasmModuleForDisassembler(const byte* module_start,
- const byte* module_end,
- AccountingAllocator* allocator) {
- constexpr bool validate_functions = false;
- ModuleDecoderImpl decoder(WasmFeatures::All(), module_start, module_end,
- kWasmOrigin);
- return decoder.DecodeModule(nullptr, allocator, validate_functions);
+ModuleResult DecodeWasmModule(WasmFeatures enabled_features,
+ base::Vector<const uint8_t> wire_bytes,
+ bool validate_functions, ModuleOrigin origin) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.DecodeWasmModule");
+ ModuleDecoderImpl decoder{enabled_features, wire_bytes, origin};
+ return decoder.DecodeModule(validate_functions);
+}
+
+ModuleResult DecodeWasmModuleForDisassembler(
+ base::Vector<const uint8_t> wire_bytes) {
+ constexpr bool kNoValidateFunctions = false;
+ ModuleDecoderImpl decoder{WasmFeatures::All(), wire_bytes, kWasmOrigin};
+ return decoder.DecodeModule(kNoValidateFunctions);
}
-ModuleDecoder::ModuleDecoder(const WasmFeatures& enabled)
- : enabled_features_(enabled) {}
+ModuleDecoder::ModuleDecoder(WasmFeatures enabled_features)
+ : impl_(std::make_unique<ModuleDecoderImpl>(
+ enabled_features, base::Vector<const uint8_t>{}, kWasmOrigin)) {}
ModuleDecoder::~ModuleDecoder() = default;
@@ -156,15 +143,6 @@ const std::shared_ptr<WasmModule>& ModuleDecoder::shared_module() const {
return impl_->shared_module();
}
-void ModuleDecoder::StartDecoding(
- Counters* counters, std::shared_ptr<metrics::Recorder> metrics_recorder,
- v8::metrics::Recorder::ContextId context_id, AccountingAllocator* allocator,
- ModuleOrigin origin) {
- DCHECK_NULL(impl_);
- impl_.reset(new ModuleDecoderImpl(enabled_features_, origin));
- impl_->StartDecoding(counters, allocator);
-}
-
void ModuleDecoder::DecodeModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) {
impl_->DecodeModuleHeader(bytes, offset);
@@ -198,43 +176,36 @@ size_t ModuleDecoder::IdentifyUnknownSection(ModuleDecoder* decoder,
SectionCode* result) {
if (!decoder->ok()) return 0;
decoder->impl_->Reset(bytes, offset);
- NoTracer no_tracer;
- *result = IdentifyUnknownSectionInternal(decoder->impl_.get(), no_tracer);
+ *result =
+ IdentifyUnknownSectionInternal(decoder->impl_.get(), ITracer::NoTrace);
return decoder->impl_->pc() - bytes.begin();
}
bool ModuleDecoder::ok() { return impl_->ok(); }
Result<const FunctionSig*> DecodeWasmSignatureForTesting(
- const WasmFeatures& enabled, Zone* zone, const byte* start,
- const byte* end) {
- ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
- return decoder.toResult(decoder.DecodeFunctionSignature(zone, start));
+ WasmFeatures enabled_features, Zone* zone,
+ base::Vector<const uint8_t> bytes) {
+ ModuleDecoderImpl decoder{enabled_features, bytes, kWasmOrigin};
+ return decoder.toResult(decoder.DecodeFunctionSignature(zone, bytes.begin()));
}
-ConstantExpression DecodeWasmInitExprForTesting(const WasmFeatures& enabled,
- const byte* start,
- const byte* end,
- ValueType expected) {
- ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
- AccountingAllocator allocator;
- decoder.StartDecoding(nullptr, &allocator);
+ConstantExpression DecodeWasmInitExprForTesting(
+ WasmFeatures enabled_features, base::Vector<const uint8_t> bytes,
+ ValueType expected) {
+ ModuleDecoderImpl decoder{enabled_features, bytes, kWasmOrigin};
return decoder.DecodeInitExprForTesting(expected);
}
FunctionResult DecodeWasmFunctionForTesting(
- const WasmFeatures& enabled, Zone* zone, const ModuleWireBytes& wire_bytes,
- const WasmModule* module, const byte* function_start,
- const byte* function_end, Counters* counters) {
- size_t size = function_end - function_start;
- CHECK_LE(function_start, function_end);
- if (size > kV8MaxWasmFunctionSize) {
- return FunctionResult{WasmError{0,
- "size > maximum function size (%zu): %zu",
- kV8MaxWasmFunctionSize, size}};
+ WasmFeatures enabled_features, Zone* zone, ModuleWireBytes wire_bytes,
+ const WasmModule* module, base::Vector<const uint8_t> function_bytes) {
+ if (function_bytes.size() > kV8MaxWasmFunctionSize) {
+ return FunctionResult{
+ WasmError{0, "size > maximum function size (%zu): %zu",
+ kV8MaxWasmFunctionSize, function_bytes.size()}};
}
- ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin);
- decoder.SetCounters(counters);
+ ModuleDecoderImpl decoder{enabled_features, function_bytes, kWasmOrigin};
return decoder.DecodeSingleFunctionForTesting(zone, wire_bytes, module);
}
@@ -294,9 +265,9 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(
return decoder.toResult(AsmJsOffsets{std::move(functions)});
}
-std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
- const byte* end) {
- Decoder decoder(start, end);
+std::vector<CustomSectionOffset> DecodeCustomSections(
+ base::Vector<const uint8_t> bytes) {
+ Decoder decoder(bytes);
decoder.consume_bytes(4, "wasm magic");
decoder.consume_bytes(4, "wasm version");
@@ -336,8 +307,7 @@ bool FindNameSection(Decoder* decoder) {
static constexpr int kModuleHeaderSize = 8;
decoder->consume_bytes(kModuleHeaderSize, "module header");
- NoTracer no_tracer;
- WasmSectionIterator section_iter(decoder, no_tracer);
+ WasmSectionIterator section_iter(decoder, ITracer::NoTrace);
while (decoder->ok() && section_iter.more() &&
section_iter.section_code() != kNameSectionCode) {
@@ -350,10 +320,10 @@ bool FindNameSection(Decoder* decoder) {
return true;
}
-enum EmptyNames : bool { kAllowEmptyNames, kSkipEmptyNames };
+enum class EmptyNames : bool { kAllow, kSkip };
-void DecodeNameMap(NameMap& target, Decoder& decoder,
- EmptyNames empty_names = kSkipEmptyNames) {
+void DecodeNameMapInternal(NameMap& target, Decoder& decoder,
+ EmptyNames empty_names = EmptyNames::kSkip) {
uint32_t count = decoder.consume_u32v("names count");
for (uint32_t i = 0; i < count; i++) {
uint32_t index = decoder.consume_u32v("index");
@@ -361,20 +331,35 @@ void DecodeNameMap(NameMap& target, Decoder& decoder,
consume_string(&decoder, unibrow::Utf8Variant::kLossyUtf8, "name");
if (!decoder.ok()) break;
if (index > NameMap::kMaxKey) continue;
- if (empty_names == kSkipEmptyNames && name.is_empty()) continue;
+ if (empty_names == EmptyNames::kSkip && name.is_empty()) continue;
if (!validate_utf8(&decoder, name)) continue;
target.Put(index, name);
}
target.FinishInitialization();
}
-void DecodeIndirectNameMap(IndirectNameMap& target, Decoder& decoder) {
+void DecodeNameMap(NameMap& target, Decoder& decoder,
+ uint32_t subsection_payload_length,
+ EmptyNames empty_names = EmptyNames::kSkip) {
+ if (target.is_set()) {
+ decoder.consume_bytes(subsection_payload_length);
+ return;
+ }
+ DecodeNameMapInternal(target, decoder, empty_names);
+}
+
+void DecodeIndirectNameMap(IndirectNameMap& target, Decoder& decoder,
+ uint32_t subsection_payload_length) {
+ if (target.is_set()) {
+ decoder.consume_bytes(subsection_payload_length);
+ return;
+ }
uint32_t outer_count = decoder.consume_u32v("outer count");
for (uint32_t i = 0; i < outer_count; ++i) {
uint32_t outer_index = decoder.consume_u32v("outer index");
if (outer_index > IndirectNameMap::kMaxKey) continue;
NameMap names;
- DecodeNameMap(names, decoder);
+ DecodeNameMapInternal(names, decoder);
target.Put(outer_index, std::move(names));
if (!decoder.ok()) break;
}
@@ -383,9 +368,9 @@ void DecodeIndirectNameMap(IndirectNameMap& target, Decoder& decoder) {
} // namespace
-void DecodeFunctionNames(const byte* module_start, const byte* module_end,
+void DecodeFunctionNames(base::Vector<const uint8_t> wire_bytes,
NameMap& names) {
- Decoder decoder(module_start, module_end);
+ Decoder decoder(wire_bytes);
if (FindNameSection(&decoder)) {
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -399,7 +384,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
continue;
}
// We need to allow empty function names for spec-conformant stack traces.
- DecodeNameMap(names, decoder, kAllowEmptyNames);
+ DecodeNameMapInternal(names, decoder, EmptyNames::kAllow);
// The spec allows only one occurrence of each subsection. We could be
// more permissive and allow repeated subsections; in that case we'd
// have to delay calling {target.FinishInitialization()} on the function
@@ -411,6 +396,150 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
+namespace {
+// A task that validates multiple functions in parallel, storing the earliest
+// validation error in {this} decoder.
+class ValidateFunctionsTask : public JobTask {
+ public:
+ explicit ValidateFunctionsTask(base::Vector<const uint8_t> wire_bytes,
+ const WasmModule* module,
+ WasmFeatures enabled_features,
+ std::function<bool(int)> filter,
+ WasmError* error_out)
+ : wire_bytes_(wire_bytes),
+ module_(module),
+ enabled_features_(enabled_features),
+ filter_(std::move(filter)),
+ next_function_(module->num_imported_functions),
+ after_last_function_(next_function_ + module->num_declared_functions),
+ error_out_(error_out) {
+ DCHECK(!error_out->has_error());
+ }
+
+ void Run(JobDelegate* delegate) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.ValidateFunctionsTask");
+ do {
+ // Get the index of the next function to validate.
+ // {fetch_add} might overrun {after_last_function_} by a bit. Since the
+ // number of functions is limited to a value much smaller than the
+ // integer range, this is near impossible to happen.
+ static_assert(kV8MaxWasmFunctions < kMaxInt / 2);
+ int func_index;
+ do {
+ func_index = next_function_.fetch_add(1, std::memory_order_relaxed);
+ if (V8_UNLIKELY(func_index >= after_last_function_)) return;
+ DCHECK_LE(0, func_index);
+ } while ((filter_ && !filter_(func_index)) ||
+ module_->function_was_validated(func_index));
+
+ if (!ValidateFunction(func_index)) {
+ // No need to validate any more functions.
+ next_function_.store(after_last_function_, std::memory_order_relaxed);
+ return;
+ }
+ } while (!delegate->ShouldYield());
+ }
+
+ size_t GetMaxConcurrency(size_t /* worker_count */) const override {
+ int next_func = next_function_.load(std::memory_order_relaxed);
+ return std::max(0, after_last_function_ - next_func);
+ }
+
+ private:
+ bool ValidateFunction(int func_index) {
+ WasmFeatures unused_detected_features;
+ const WasmFunction& function = module_->functions[func_index];
+ FunctionBody body{function.sig, function.code.offset(),
+ wire_bytes_.begin() + function.code.offset(),
+ wire_bytes_.begin() + function.code.end_offset()};
+ DecodeResult validation_result = ValidateFunctionBody(
+ enabled_features_, module_, &unused_detected_features, body);
+ if (V8_UNLIKELY(validation_result.failed())) {
+ SetError(func_index, std::move(validation_result).error());
+ return false;
+ }
+ module_->set_function_validated(func_index);
+ return true;
+ }
+
+ // Set the error from the argument if it's earlier than the error we already
+ // have (or if we have none yet). Thread-safe.
+ void SetError(int func_index, WasmError error) {
+ base::MutexGuard mutex_guard{&set_error_mutex_};
+ if (error_out_->has_error() && error_out_->offset() <= error.offset()) {
+ return;
+ }
+ *error_out_ = GetWasmErrorWithName(wire_bytes_, func_index, module_, error);
+ }
+
+ const base::Vector<const uint8_t> wire_bytes_;
+ const WasmModule* const module_;
+ const WasmFeatures enabled_features_;
+ const std::function<bool(int)> filter_;
+ std::atomic<int> next_function_;
+ const int after_last_function_;
+ base::Mutex set_error_mutex_;
+ WasmError* const error_out_;
+};
+} // namespace
+
+WasmError ValidateFunctions(const WasmModule* module,
+ WasmFeatures enabled_features,
+ base::Vector<const uint8_t> wire_bytes,
+ std::function<bool(int)> filter) {
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.ValidateFunctions", "num_declared_functions",
+ module->num_declared_functions, "has_filter", filter != nullptr);
+ DCHECK_EQ(kWasmOrigin, module->origin);
+
+ class NeverYieldDelegate final : public JobDelegate {
+ public:
+ bool ShouldYield() override { return false; }
+
+ bool IsJoiningThread() const override { UNIMPLEMENTED(); }
+ void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); }
+ uint8_t GetTaskId() override { UNIMPLEMENTED(); }
+ };
+
+ // Create a {ValidateFunctionsTask} to validate all functions. The earliest
+ // error found will be set on this decoder.
+ WasmError validation_error;
+ std::unique_ptr<JobTask> validate_job =
+ std::make_unique<ValidateFunctionsTask>(
+ wire_bytes, module, enabled_features, std::move(filter),
+ &validation_error);
+
+ if (v8_flags.single_threaded) {
+ // In single-threaded mode, run the {ValidateFunctionsTask} synchronously.
+ NeverYieldDelegate delegate;
+ validate_job->Run(&delegate);
+ } else {
+ // Spawn the task and join it.
+ std::unique_ptr<JobHandle> job_handle = V8::GetCurrentPlatform()->CreateJob(
+ TaskPriority::kUserVisible, std::move(validate_job));
+ job_handle->Join();
+ }
+
+ return validation_error;
+}
+
+WasmError GetWasmErrorWithName(base::Vector<const uint8_t> wire_bytes,
+ int func_index, const WasmModule* module,
+ WasmError error) {
+ WasmName name = ModuleWireBytes{wire_bytes}.GetNameOrNull(func_index, module);
+ if (name.begin() == nullptr) {
+ return WasmError(error.offset(), "Compiling function #%d failed: %s",
+ func_index, error.message().c_str());
+ } else {
+ TruncatedUserString<> truncated_name(name);
+ return WasmError(error.offset(),
+ "Compiling function #%d:\"%.*s\" failed: %s", func_index,
+ truncated_name.length(), truncated_name.start(),
+ error.message().c_str());
+ }
+}
+
DecodedNameSection::DecodedNameSection(base::Vector<const uint8_t> wire_bytes,
WireBytesRef name_section) {
if (name_section.is_empty()) return; // No name section.
@@ -431,61 +560,47 @@ DecodedNameSection::DecodedNameSection(base::Vector<const uint8_t> wire_bytes,
decoder.consume_bytes(name_payload_len);
break;
case kLocalCode:
- if (local_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmFunctions <= IndirectNameMap::kMaxKey);
static_assert(kV8MaxWasmFunctionLocals <= NameMap::kMaxKey);
- DecodeIndirectNameMap(local_names_, decoder);
+ DecodeIndirectNameMap(local_names_, decoder, name_payload_len);
break;
case kLabelCode:
- if (label_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmFunctions <= IndirectNameMap::kMaxKey);
static_assert(kV8MaxWasmFunctionSize <= NameMap::kMaxKey);
- DecodeIndirectNameMap(label_names_, decoder);
+ DecodeIndirectNameMap(label_names_, decoder, name_payload_len);
break;
case kTypeCode:
- if (type_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmTypes <= NameMap::kMaxKey);
- DecodeNameMap(type_names_, decoder);
+ DecodeNameMap(type_names_, decoder, name_payload_len);
break;
case kTableCode:
- if (table_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmTables <= NameMap::kMaxKey);
- DecodeNameMap(table_names_, decoder);
+ DecodeNameMap(table_names_, decoder, name_payload_len);
break;
case kMemoryCode:
- if (memory_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmMemories <= NameMap::kMaxKey);
- DecodeNameMap(memory_names_, decoder);
+ DecodeNameMap(memory_names_, decoder, name_payload_len);
break;
case kGlobalCode:
- if (global_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmGlobals <= NameMap::kMaxKey);
- DecodeNameMap(global_names_, decoder);
+ DecodeNameMap(global_names_, decoder, name_payload_len);
break;
case kElementSegmentCode:
- if (element_segment_names_.is_set()) {
- decoder.consume_bytes(name_payload_len);
- }
static_assert(kV8MaxWasmTableInitEntries <= NameMap::kMaxKey);
- DecodeNameMap(element_segment_names_, decoder);
+ DecodeNameMap(element_segment_names_, decoder, name_payload_len);
break;
case kDataSegmentCode:
- if (data_segment_names_.is_set()) {
- decoder.consume_bytes(name_payload_len);
- }
static_assert(kV8MaxWasmDataSegments <= NameMap::kMaxKey);
- DecodeNameMap(data_segment_names_, decoder);
+ DecodeNameMap(data_segment_names_, decoder, name_payload_len);
break;
case kFieldCode:
- if (field_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmTypes <= IndirectNameMap::kMaxKey);
static_assert(kV8MaxWasmStructFields <= NameMap::kMaxKey);
- DecodeIndirectNameMap(field_names_, decoder);
+ DecodeIndirectNameMap(field_names_, decoder, name_payload_len);
break;
case kTagCode:
- if (tag_names_.is_set()) decoder.consume_bytes(name_payload_len);
static_assert(kV8MaxWasmTags <= NameMap::kMaxKey);
- DecodeNameMap(tag_names_, decoder);
+ DecodeNameMap(tag_names_, decoder, name_payload_len);
break;
}
}
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 5123dc75be..96680020d8 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -84,34 +84,38 @@ enum class DecodingMethod {
kDeserialize
};
-// Decodes the bytes of a wasm module between {module_start} and {module_end}.
+// Decodes the bytes of a wasm module in {wire_bytes} while recording events and
+// updating counters.
V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(
- const WasmFeatures& enabled, const byte* module_start,
- const byte* module_end, bool verify_functions, ModuleOrigin origin,
- Counters* counters, std::shared_ptr<metrics::Recorder> metrics_recorder,
- v8::metrics::Recorder::ContextId context_id, DecodingMethod decoding_method,
- AccountingAllocator* allocator);
+ WasmFeatures enabled_features, base::Vector<const uint8_t> wire_bytes,
+ bool validate_functions, ModuleOrigin origin, Counters* counters,
+ std::shared_ptr<metrics::Recorder> metrics_recorder,
+ v8::metrics::Recorder::ContextId context_id,
+ DecodingMethod decoding_method);
+// Decodes the bytes of a wasm module in {wire_bytes} without recording events
+// or updating counters.
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(
+ WasmFeatures enabled_features, base::Vector<const uint8_t> wire_bytes,
+ bool validate_functions, ModuleOrigin origin);
// Stripped down version for disassembler needs.
-V8_EXPORT_PRIVATE ModuleResult DecodeWasmModuleForDisassembler(
- const byte* module_start, const byte* module_end,
- AccountingAllocator* allocator);
+V8_EXPORT_PRIVATE ModuleResult
+DecodeWasmModuleForDisassembler(base::Vector<const uint8_t> wire_bytes);
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone.
V8_EXPORT_PRIVATE Result<const FunctionSig*> DecodeWasmSignatureForTesting(
- const WasmFeatures& enabled, Zone* zone, const byte* start,
- const byte* end);
+ WasmFeatures enabled_features, Zone* zone,
+ base::Vector<const uint8_t> bytes);
-// Decodes the bytes of a wasm function between
-// {function_start} and {function_end}.
+// Decodes the bytes of a wasm function in {function_bytes} (part of
+// {wire_bytes}).
V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunctionForTesting(
- const WasmFeatures& enabled, Zone* zone, const ModuleWireBytes& wire_bytes,
- const WasmModule* module, const byte* function_start,
- const byte* function_end, Counters* counters);
+ WasmFeatures enabled, Zone* zone, ModuleWireBytes wire_bytes,
+ const WasmModule* module, base::Vector<const uint8_t> function_bytes);
-V8_EXPORT_PRIVATE ConstantExpression
-DecodeWasmInitExprForTesting(const WasmFeatures& enabled, const byte* start,
- const byte* end, ValueType expected);
+V8_EXPORT_PRIVATE ConstantExpression DecodeWasmInitExprForTesting(
+ WasmFeatures enabled_features, base::Vector<const uint8_t> bytes,
+ ValueType expected);
struct CustomSectionOffset {
WireBytesRef section;
@@ -120,7 +124,7 @@ struct CustomSectionOffset {
};
V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
- const byte* start, const byte* end);
+ base::Vector<const uint8_t> wire_bytes);
// Extracts the mapping from wasm byte offset to asm.js source position per
// function.
@@ -130,22 +134,28 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(
// Decode the function names from the name section. Returns the result as an
// unordered map. Only names with valid utf8 encoding are stored and conflicts
// are resolved by choosing the last name read.
-void DecodeFunctionNames(const byte* module_start, const byte* module_end,
+void DecodeFunctionNames(base::Vector<const uint8_t> wire_bytes,
NameMap& names);
+// Validate specific functions in the module. Return the first validation error
+// (deterministically), or an empty {WasmError} if all validated functions are
+// valid. {filter} determines which functions are validated. Pass an empty
+// function for "all functions". The {filter} callback needs to be thread-safe.
+V8_EXPORT_PRIVATE WasmError ValidateFunctions(
+ const WasmModule*, WasmFeatures enabled_features,
+ base::Vector<const uint8_t> wire_bytes, std::function<bool(int)> filter);
+
+WasmError GetWasmErrorWithName(base::Vector<const uint8_t> wire_bytes,
+ int func_index, const WasmModule* module,
+ WasmError error);
+
class ModuleDecoderImpl;
class ModuleDecoder {
public:
- explicit ModuleDecoder(const WasmFeatures& enabled);
+ explicit ModuleDecoder(WasmFeatures enabled_feature);
~ModuleDecoder();
- void StartDecoding(Counters* counters,
- std::shared_ptr<metrics::Recorder> metrics_recorder,
- v8::metrics::Recorder::ContextId context_id,
- AccountingAllocator* allocator,
- ModuleOrigin origin = ModuleOrigin::kWasmOrigin);
-
void DecodeModuleHeader(base::Vector<const uint8_t> bytes, uint32_t offset);
void DecodeSection(SectionCode section_code,
@@ -175,7 +185,6 @@ class ModuleDecoder {
uint32_t offset, SectionCode* result);
private:
- const WasmFeatures enabled_features_;
std::unique_ptr<ModuleDecoderImpl> impl_;
};
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index d3dcda955c..6db5364f6a 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -7,6 +7,8 @@
#include "src/api/api-inl.h"
#include "src/asmjs/asm-js.h"
#include "src/base/atomicops.h"
+#include "src/codegen/compiler.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
#include "src/numbers/conversions-inl.h"
@@ -17,6 +19,7 @@
#include "src/wasm/code-space-access.h"
#include "src/wasm/constant-expression-interface.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/module-decoder-impl.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
@@ -25,7 +28,6 @@
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes-inl.h"
#include "src/wasm/wasm-subtyping.h"
-#include "src/wasm/wasm-value.h"
#define TRACE(...) \
do { \
@@ -226,8 +228,310 @@ void CreateMapForType(Isolate* isolate, const WasmModule* module,
maps->set(type_index, *map);
}
+MachineRepresentation NormalizeFastApiRepresentation(const CTypeInfo& info) {
+ MachineType t = MachineType::TypeForCType(info);
+ // Wasm representation of bool is i32 instead of i1.
+ if (t.semantic() == MachineSemantic::kBool) {
+ return MachineRepresentation::kWord32;
+ }
+ return t.representation();
+}
+
+bool IsSupportedWasmFastApiFunction(Isolate* isolate,
+ const wasm::FunctionSig* expected_sig,
+ Handle<SharedFunctionInfo> shared) {
+ if (!shared->IsApiFunction()) {
+ return false;
+ }
+ if (shared->get_api_func_data().GetCFunctionsCount() == 0) {
+ return false;
+ }
+ if (!shared->get_api_func_data().accept_any_receiver()) {
+ return false;
+ }
+ if (!shared->get_api_func_data().signature().IsUndefined()) {
+ // TODO(wasm): CFunctionInfo* signature check.
+ return false;
+ }
+ const CFunctionInfo* info = shared->get_api_func_data().GetCSignature(0);
+ if (!compiler::IsFastCallSupportedSignature(info)) {
+ return false;
+ }
+
+ const auto log_imported_function_mismatch = [&shared,
+ isolate](const char* reason) {
+ if (v8_flags.trace_opt) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[disabled optimization for ");
+ shared->ShortPrint(scope.file());
+ PrintF(scope.file(),
+ ", reason: the signature of the imported function in the Wasm "
+ "module doesn't match that of the Fast API function (%s)]\n",
+ reason);
+ }
+ };
+
+ // C functions only have one return value.
+ if (expected_sig->return_count() > 1) {
+ // Here and below, we log when the function we call is declared as an Api
+ // function but we cannot optimize the call, which might be unxepected. In
+ // that case we use the "slow" path making a normal Wasm->JS call and
+ // calling the "slow" callback specified in FunctionTemplate::New().
+ log_imported_function_mismatch("too many return values");
+ return false;
+ }
+ CTypeInfo return_info = info->ReturnInfo();
+ // Unsupported if return type doesn't match.
+ if (expected_sig->return_count() == 0 &&
+ return_info.GetType() != CTypeInfo::Type::kVoid) {
+ log_imported_function_mismatch("too few return values");
+ return false;
+ }
+ // Unsupported if return type doesn't match.
+ if (expected_sig->return_count() == 1) {
+ if (return_info.GetType() == CTypeInfo::Type::kVoid) {
+ log_imported_function_mismatch("too many return values");
+ return false;
+ }
+ if (NormalizeFastApiRepresentation(return_info) !=
+ expected_sig->GetReturn(0).machine_type().representation()) {
+ log_imported_function_mismatch("mismatching return value");
+ return false;
+ }
+ }
+ // Unsupported if arity doesn't match.
+ if (expected_sig->parameter_count() != info->ArgumentCount() - 1) {
+ log_imported_function_mismatch("mismatched arity");
+ return false;
+ }
+ // Unsupported if any argument types don't match.
+ for (unsigned int i = 0; i < expected_sig->parameter_count(); i += 1) {
+ // Arg 0 is the receiver, skip over it since wasm doesn't
+ // have a concept of receivers.
+ CTypeInfo arg = info->ArgumentInfo(i + 1);
+ if (NormalizeFastApiRepresentation(arg) !=
+ expected_sig->GetParam(i).machine_type().representation()) {
+ log_imported_function_mismatch("parameter type mismatch");
+ return false;
+ }
+ }
+ return true;
+}
+
+bool ResolveBoundJSFastApiFunction(const wasm::FunctionSig* expected_sig,
+ Handle<JSReceiver> callable) {
+ Handle<JSFunction> target;
+ if (callable->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> bound_target =
+ Handle<JSBoundFunction>::cast(callable);
+ // Nested bound functions and arguments not supported yet.
+ if (bound_target->bound_arguments().length() > 0) {
+ return false;
+ }
+ if (bound_target->bound_target_function().IsJSBoundFunction()) {
+ return false;
+ }
+ Handle<JSReceiver> bound_target_function =
+ handle(bound_target->bound_target_function(), callable->GetIsolate());
+ if (!bound_target_function->IsJSFunction()) {
+ return false;
+ }
+ target = Handle<JSFunction>::cast(bound_target_function);
+ } else if (callable->IsJSFunction()) {
+ target = Handle<JSFunction>::cast(callable);
+ } else {
+ return false;
+ }
+
+ Isolate* isolate = target->GetIsolate();
+ Handle<SharedFunctionInfo> shared(target->shared(), isolate);
+ return IsSupportedWasmFastApiFunction(isolate, expected_sig, shared);
+}
+
+#if V8_INTL_SUPPORT
+namespace {
+
+bool IsStringRef(wasm::ValueType type) {
+ return type.is_reference_to(wasm::HeapType::kString);
+}
+
+} // namespace
+#endif
+
+// This detects imports of the form: `Function.prototype.call.bind(foo)`, where
+// `foo` is something that has a Builtin id.
+WellKnownImport CheckForWellKnownImport(Handle<JSReceiver> callable,
+ const wasm::FunctionSig* sig) {
+ WellKnownImport kGeneric = WellKnownImport::kGeneric; // "using" is C++20.
+ // First part: check that the callable is a bound function whose target
+ // is {Function.prototype.call}, and which only binds a receiver.
+ if (!callable->IsJSBoundFunction()) return kGeneric;
+ Handle<JSBoundFunction> bound = Handle<JSBoundFunction>::cast(callable);
+ if (bound->bound_arguments().length() != 0) return kGeneric;
+ if (!bound->bound_target_function().IsJSFunction()) return kGeneric;
+ SharedFunctionInfo sfi =
+ JSFunction::cast(bound->bound_target_function()).shared();
+ if (!sfi.HasBuiltinId()) return kGeneric;
+ if (sfi.builtin_id() != Builtin::kFunctionPrototypeCall) return kGeneric;
+ // Second part: check if the bound receiver is one of the builtins for which
+ // we have special-cased support.
+ Object bound_this = bound->bound_this();
+ if (!bound_this.IsJSFunction()) return kGeneric;
+ sfi = JSFunction::cast(bound_this).shared();
+ if (!sfi.HasBuiltinId()) return kGeneric;
+ switch (sfi.builtin_id()) {
+#if V8_INTL_SUPPORT
+ case Builtin::kStringPrototypeToLowerCaseIntl:
+ // TODO(jkummerow): Consider caching signatures to compare with, similar
+ // to {wasm::WasmOpcodes::Signature(...)}.
+ if (sig->parameter_count() == 1 && sig->return_count() == 1 &&
+ IsStringRef(sig->GetParam(0)) && IsStringRef(sig->GetReturn(0))) {
+ return WellKnownImport::kStringToLowerCaseStringref;
+ }
+ return kGeneric;
+#endif
+ default:
+ break;
+ }
+ return kGeneric;
+}
+
} // namespace
+WasmImportData::WasmImportData(Handle<JSReceiver> callable,
+ const wasm::FunctionSig* expected_sig,
+ uint32_t expected_canonical_type_index)
+ : callable_(callable) {
+ kind_ = ComputeKind(expected_sig, expected_canonical_type_index);
+}
+
+ImportCallKind WasmImportData::ComputeKind(
+ const wasm::FunctionSig* expected_sig,
+ uint32_t expected_canonical_type_index) {
+ Isolate* isolate = callable_->GetIsolate();
+ if (WasmExportedFunction::IsWasmExportedFunction(*callable_)) {
+ auto imported_function = Handle<WasmExportedFunction>::cast(callable_);
+ if (!imported_function->MatchesSignature(expected_canonical_type_index)) {
+ return ImportCallKind::kLinkError;
+ }
+ uint32_t func_index =
+ static_cast<uint32_t>(imported_function->function_index());
+ if (func_index >=
+ imported_function->instance().module()->num_imported_functions) {
+ return ImportCallKind::kWasmToWasm;
+ }
+ // Resolve the shortcut to the underlying callable and continue.
+ Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
+ ImportedFunctionEntry entry(instance, func_index);
+ callable_ = handle(entry.callable(), isolate);
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*callable_)) {
+ auto js_function = Handle<WasmJSFunction>::cast(callable_);
+ suspend_ = js_function->GetSuspend();
+ if (!js_function->MatchesSignature(expected_canonical_type_index)) {
+ return ImportCallKind::kLinkError;
+ }
+ // Resolve the short-cut to the underlying callable and continue.
+ callable_ = handle(js_function->GetCallable(), isolate);
+ }
+ if (WasmCapiFunction::IsWasmCapiFunction(*callable_)) {
+ auto capi_function = Handle<WasmCapiFunction>::cast(callable_);
+ if (!capi_function->MatchesSignature(expected_canonical_type_index)) {
+ return ImportCallKind::kLinkError;
+ }
+ return ImportCallKind::kWasmToCapi;
+ }
+ // Assuming we are calling to JS, check whether this would be a runtime error.
+ if (!wasm::IsJSCompatibleSignature(expected_sig)) {
+ return ImportCallKind::kRuntimeTypeError;
+ }
+ // Check if this can be a JS fast API call.
+ if (v8_flags.turbo_fast_api_calls &&
+ ResolveBoundJSFastApiFunction(expected_sig, callable_)) {
+ return ImportCallKind::kWasmToJSFastApi;
+ }
+ well_known_status_ = CheckForWellKnownImport(callable_, expected_sig);
+ // For JavaScript calls, determine whether the target has an arity match.
+ if (callable_->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable_);
+ Handle<SharedFunctionInfo> shared(function->shared(),
+ function->GetIsolate());
+
+// Check for math intrinsics.
+#define COMPARE_SIG_FOR_BUILTIN(name) \
+ { \
+ const wasm::FunctionSig* sig = \
+ wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
+ if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
+ DCHECK_NOT_NULL(sig); \
+ if (*expected_sig == *sig) { \
+ return ImportCallKind::k##name; \
+ } \
+ }
+#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
+ case Builtin::kMath##name: \
+ COMPARE_SIG_FOR_BUILTIN(F64##name); \
+ break;
+#define COMPARE_SIG_FOR_BUILTIN_F32_F64(name) \
+ case Builtin::kMath##name: \
+ COMPARE_SIG_FOR_BUILTIN(F64##name); \
+ COMPARE_SIG_FOR_BUILTIN(F32##name); \
+ break;
+
+ if (v8_flags.wasm_math_intrinsics && shared->HasBuiltinId()) {
+ switch (shared->builtin_id()) {
+ COMPARE_SIG_FOR_BUILTIN_F64(Acos);
+ COMPARE_SIG_FOR_BUILTIN_F64(Asin);
+ COMPARE_SIG_FOR_BUILTIN_F64(Atan);
+ COMPARE_SIG_FOR_BUILTIN_F64(Cos);
+ COMPARE_SIG_FOR_BUILTIN_F64(Sin);
+ COMPARE_SIG_FOR_BUILTIN_F64(Tan);
+ COMPARE_SIG_FOR_BUILTIN_F64(Exp);
+ COMPARE_SIG_FOR_BUILTIN_F64(Log);
+ COMPARE_SIG_FOR_BUILTIN_F64(Atan2);
+ COMPARE_SIG_FOR_BUILTIN_F64(Pow);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Min);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Max);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Abs);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Ceil);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Floor);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Sqrt);
+ case Builtin::kMathFround:
+ COMPARE_SIG_FOR_BUILTIN(F32ConvertF64);
+ break;
+ default:
+ break;
+ }
+ }
+
+#undef COMPARE_SIG_FOR_BUILTIN
+#undef COMPARE_SIG_FOR_BUILTIN_F64
+#undef COMPARE_SIG_FOR_BUILTIN_F32_F64
+
+ if (IsClassConstructor(shared->kind())) {
+ // Class constructor will throw anyway.
+ return ImportCallKind::kUseCallBuiltin;
+ }
+
+ if (shared->internal_formal_parameter_count_without_receiver() ==
+ expected_sig->parameter_count() - suspend_) {
+ return ImportCallKind::kJSFunctionArityMatch;
+ }
+
+ // If function isn't compiled, compile it now.
+ Isolate* isolate = callable_->GetIsolate();
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
+ if (!is_compiled_scope.is_compiled()) {
+ Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope);
+ }
+
+ return ImportCallKind::kJSFunctionArityMismatch;
+ }
+ // Unknown case. Use the call builtin.
+ return ImportCallKind::kUseCallBuiltin;
+}
+
// A helper class to simplify instantiating a module from a module object.
// It closes over the {Isolate}, the {ErrorThrower}, etc.
class InstanceBuilder {
@@ -264,6 +568,7 @@ class InstanceBuilder {
std::vector<Handle<WasmTagObject>> tags_wrappers_;
Handle<WasmExportedFunction> start_function_;
std::vector<SanitizedImport> sanitized_imports_;
+ std::vector<WellKnownImport> well_known_imports_;
// We pass this {Zone} to the temporary {WasmFullDecoder} we allocate during
// each call to {EvaluateConstantExpression}. This has been found to improve
// performance a bit over allocating a new {Zone} each time.
@@ -502,6 +807,7 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate,
memory_buffer_(memory_buffer),
init_expr_zone_(isolate_->allocator(), "constant expression zone") {
sanitized_imports_.reserve(module_->import_table.size());
+ well_known_imports_.reserve(module_->num_imported_functions);
}
// Build an instance, in all of its glory.
@@ -680,7 +986,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<WasmTableObject> table_obj = WasmTableObject::New(
isolate_, instance, table.type, table.initial_size,
table.has_maximum_size, table.maximum_size, nullptr,
- isolate_->factory()->null_value());
+ IsSubtypeOf(table.type, kWasmExternRef, module_)
+ ? Handle<Object>::cast(isolate_->factory()->null_value())
+ : Handle<Object>::cast(isolate_->factory()->wasm_null()));
tables->set(i, *table_obj);
}
instance->set_tables(*tables);
@@ -734,7 +1042,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Allocate the array that will hold type feedback vectors.
//--------------------------------------------------------------------------
- if (v8_flags.wasm_speculative_inlining) {
+ if (enabled_.has_inlining()) {
int num_functions = static_cast<int>(module_->num_declared_functions);
// Zero-fill the array so we can do a quick Smi-check to test if a given
// slot was initialized.
@@ -772,7 +1080,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Initialize non-defaultable tables.
//--------------------------------------------------------------------------
- if (v8_flags.experimental_wasm_typed_funcref) {
+ if (enabled_.has_typed_funcref()) {
SetTableInitialValues(instance);
}
@@ -790,6 +1098,25 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (thrower_->error()) return {};
//--------------------------------------------------------------------------
+ // Set up uninitialized element segments.
+ //--------------------------------------------------------------------------
+ if (!module_->elem_segments.empty()) {
+ Handle<FixedArray> elements = isolate_->factory()->NewFixedArray(
+ static_cast<int>(module_->elem_segments.size()));
+ for (int i = 0; i < static_cast<int>(module_->elem_segments.size()); i++) {
+ // Initialize declarative segments as empty. The rest remain
+ // uninitialized.
+ bool is_declarative = module_->elem_segments[i].status ==
+ WasmElemSegment::kStatusDeclarative;
+ elements->set(
+ i, is_declarative
+ ? Object::cast(*isolate_->factory()->empty_fixed_array())
+ : *isolate_->factory()->undefined_value());
+ }
+ instance->set_element_segments(*elements);
+ }
+
+ //--------------------------------------------------------------------------
// Load element segments into tables.
//--------------------------------------------------------------------------
if (table_count > 0) {
@@ -814,7 +1141,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
uint32_t canonical_sig_index =
module_->isorecursive_canonical_type_ids[module_->functions[start_index]
.sig_index];
- Handle<CodeT> wrapper_code =
+ Handle<Code> wrapper_code =
JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
isolate_, function.sig, canonical_sig_index, module_,
function.imported);
@@ -1125,16 +1452,19 @@ bool InstanceBuilder::ProcessImportedFunction(
}
auto js_receiver = Handle<JSReceiver>::cast(value);
const FunctionSig* expected_sig = module_->functions[func_index].sig;
- auto resolved = compiler::ResolveWasmImportCall(js_receiver, expected_sig,
- module_, enabled_);
- compiler::WasmImportCallKind kind = resolved.kind;
- js_receiver = resolved.callable;
+ uint32_t sig_index = module_->functions[func_index].sig_index;
+ uint32_t canonical_type_index =
+ module_->isorecursive_canonical_type_ids[sig_index];
+ WasmImportData resolved(js_receiver, expected_sig, canonical_type_index);
+ well_known_imports_.push_back(resolved.well_known_status());
+ ImportCallKind kind = resolved.kind();
+ js_receiver = resolved.callable();
switch (kind) {
- case compiler::WasmImportCallKind::kLinkError:
+ case ImportCallKind::kLinkError:
ReportLinkError("imported function does not match the expected type",
import_index, module_name, import_name);
return false;
- case compiler::WasmImportCallKind::kWasmToWasm: {
+ case ImportCallKind::kWasmToWasm: {
// The imported function is a Wasm function from another instance.
auto imported_function = Handle<WasmExportedFunction>::cast(js_receiver);
Handle<WasmInstanceObject> imported_instance(
@@ -1145,7 +1475,7 @@ bool InstanceBuilder::ProcessImportedFunction(
entry.SetWasmToWasm(*imported_instance, imported_target);
break;
}
- case compiler::WasmImportCallKind::kWasmToCapi: {
+ case ImportCallKind::kWasmToCapi: {
NativeModule* native_module = instance->module_object().native_module();
int expected_arity = static_cast<int>(expected_sig->parameter_count());
WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
@@ -1177,7 +1507,7 @@ bool InstanceBuilder::ProcessImportedFunction(
entry.SetWasmToJs(isolate_, js_receiver, wasm_code, kNoSuspend);
break;
}
- case compiler::WasmImportCallKind::kWasmToJSFastApi: {
+ case ImportCallKind::kWasmToJSFastApi: {
NativeModule* native_module = instance->module_object().native_module();
DCHECK(js_receiver->IsJSFunction() || js_receiver->IsJSBoundFunction());
WasmCodeRefScope code_ref_scope;
@@ -1191,7 +1521,7 @@ bool InstanceBuilder::ProcessImportedFunction(
// The imported function is a callable.
int expected_arity = static_cast<int>(expected_sig->parameter_count());
- if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
+ if (kind == ImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
expected_arity =
@@ -1203,16 +1533,16 @@ bool InstanceBuilder::ProcessImportedFunction(
module_->isorecursive_canonical_type_ids
[module_->functions[func_index].sig_index];
WasmCode* wasm_code = native_module->import_wrapper_cache()->Get(
- kind, canonical_type_index, expected_arity, resolved.suspend);
+ kind, canonical_type_index, expected_arity, resolved.suspend());
DCHECK_NOT_NULL(wasm_code);
ImportedFunctionEntry entry(instance, func_index);
if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) {
// Wasm to JS wrappers are treated specially in the import table.
- entry.SetWasmToJs(isolate_, js_receiver, wasm_code, resolved.suspend);
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code, resolved.suspend());
} else {
// Wasm math intrinsics are compiled as regular Wasm functions.
- DCHECK(kind >= compiler::WasmImportCallKind::kFirstMathIntrinsic &&
- kind <= compiler::WasmImportCallKind::kLastMathIntrinsic);
+ DCHECK(kind >= ImportCallKind::kFirstMathIntrinsic &&
+ kind <= ImportCallKind::kLastMathIntrinsic);
entry.SetWasmToWasm(*instance, wasm_code->instruction_start());
}
break;
@@ -1451,14 +1781,16 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
case kF64:
value = WasmValue(global_object->GetF64());
break;
- case kRtt:
+ case kS128:
+ value = WasmValue(global_object->GetS128RawBytes(), kWasmS128);
+ break;
case kRef:
case kRefNull:
value = WasmValue(global_object->GetRef(), global_object->type());
break;
case kVoid:
- case kS128:
case kBottom:
+ case kRtt:
case kI8:
case kI16:
UNREACHABLE();
@@ -1589,29 +1921,28 @@ void InstanceBuilder::CompileImportWrappers(
auto js_receiver = Handle<JSReceiver>::cast(value);
uint32_t func_index = module_->import_table[index].index;
const FunctionSig* sig = module_->functions[func_index].sig;
- auto resolved =
- compiler::ResolveWasmImportCall(js_receiver, sig, module_, enabled_);
- compiler::WasmImportCallKind kind = resolved.kind;
- if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
- kind == compiler::WasmImportCallKind::kLinkError ||
- kind == compiler::WasmImportCallKind::kWasmToCapi ||
- kind == compiler::WasmImportCallKind::kWasmToJSFastApi) {
+ uint32_t sig_index = module_->functions[func_index].sig_index;
+ uint32_t canonical_type_index =
+ module_->isorecursive_canonical_type_ids[sig_index];
+ WasmImportData resolved(js_receiver, sig, canonical_type_index);
+ ImportCallKind kind = resolved.kind();
+ if (kind == ImportCallKind::kWasmToWasm ||
+ kind == ImportCallKind::kLinkError ||
+ kind == ImportCallKind::kWasmToCapi ||
+ kind == ImportCallKind::kWasmToJSFastApi) {
continue;
}
int expected_arity = static_cast<int>(sig->parameter_count());
- if (resolved.kind ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.callable);
+ if (kind == ImportCallKind::kJSFunctionArityMismatch) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(resolved.callable());
SharedFunctionInfo shared = function->shared();
expected_arity =
shared.internal_formal_parameter_count_without_receiver();
}
- uint32_t canonical_type_index =
- module_->isorecursive_canonical_type_ids[module_->functions[func_index]
- .sig_index];
WasmImportWrapperCache::CacheKey key(kind, canonical_type_index,
- expected_arity, resolved.suspend);
+ expected_arity, resolved.suspend());
if (cache_scope[key] != nullptr) {
// Cache entry already exists, no need to compile it again.
continue;
@@ -1689,7 +2020,9 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
return -1;
}
Handle<WasmTagObject> imported_tag = Handle<WasmTagObject>::cast(value);
- if (!imported_tag->MatchesSignature(module_->tags[import.index].sig)) {
+ if (!imported_tag->MatchesSignature(
+ module_->isorecursive_canonical_type_ids
+ [module_->tags[import.index].sig_index])) {
ReportLinkError("imported tag does not match the expected type",
index, module_name, import_name);
return -1;
@@ -1704,6 +2037,15 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
UNREACHABLE();
}
}
+ if (num_imported_functions > 0) {
+ WellKnownImportsList::UpdateResult result =
+ module_->type_feedback.well_known_imports.Update(
+ base::VectorOf(well_known_imports_));
+ if (result == WellKnownImportsList::UpdateResult::kFoundIncompatibility) {
+ module_object_->native_module()->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveTurbofanCode);
+ }
+ }
return num_imported_functions;
}
@@ -1903,7 +2245,10 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<HeapObject> tag_object(
HeapObject::cast(instance->tags_table().get(exp.index)),
isolate_);
- wrapper = WasmTagObject::New(isolate_, tag.sig, tag_object);
+ uint32_t canonical_sig_index =
+ module_->isorecursive_canonical_type_ids[tag.sig_index];
+ wrapper = WasmTagObject::New(isolate_, tag.sig, canonical_sig_index,
+ tag_object);
tags_wrappers_[exp.index] = wrapper;
}
desc.set_value(wrapper);
@@ -1925,8 +2270,8 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
if (module_->origin == kWasmOrigin) {
- v8::Maybe<bool> success =
- JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
+ v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
+ isolate_, exports_object, FROZEN, kDontThrow);
DCHECK(success.FromMaybe(false));
USE(success);
}
@@ -1959,7 +2304,7 @@ V8_INLINE void SetFunctionTablePlaceholder(Isolate* isolate,
V8_INLINE void SetFunctionTableNullEntry(Isolate* isolate,
Handle<WasmTableObject> table_object,
uint32_t entry_index) {
- table_object->entries().set(entry_index, *isolate->factory()->null_value());
+ table_object->entries().set(entry_index, *isolate->factory()->wasm_null());
WasmTableObject::ClearDispatchTables(isolate, table_object, entry_index);
}
} // namespace
@@ -2002,84 +2347,181 @@ void InstanceBuilder::SetTableInitialValues(
}
namespace {
-// If the operation succeeds, returns an empty {Optional}. Otherwise, returns an
-// {Optional} containing the {MessageTemplate} code of the error.
-base::Optional<MessageTemplate> LoadElemSegmentImpl(
- Zone* zone, Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<WasmTableObject> table_object, uint32_t table_index,
- uint32_t segment_index, uint32_t dst, uint32_t src, size_t count) {
- DCHECK_LT(segment_index, instance->module()->elem_segments.size());
- auto& elem_segment = instance->module()->elem_segments[segment_index];
- // TODO(wasm): Move this functionality into wasm-objects, since it is used
- // for both instantiation and in the implementation of the table.init
- // instruction.
- if (!base::IsInBounds<uint64_t>(dst, count, table_object->current_length())) {
- return {MessageTemplate::kWasmTrapTableOutOfBounds};
- }
- if (!base::IsInBounds<uint64_t>(
- src, count,
- instance->dropped_elem_segments().get(segment_index) == 0
- ? elem_segment.entries.size()
- : 0)) {
- return {MessageTemplate::kWasmTrapElementSegmentOutOfBounds};
- }
-
- bool is_function_table =
- IsSubtypeOf(table_object->type(), kWasmFuncRef, instance->module());
-
- ErrorThrower thrower(isolate, "LoadElemSegment");
-
- for (size_t i = 0; i < count; ++i) {
- ConstantExpression entry = elem_segment.entries[src + i];
- int entry_index = static_cast<int>(dst + i);
- if (is_function_table && entry.kind() == ConstantExpression::kRefFunc) {
- SetFunctionTablePlaceholder(isolate, instance, table_object, entry_index,
- entry.index());
- } else if (is_function_table &&
- entry.kind() == ConstantExpression::kRefNull) {
- SetFunctionTableNullEntry(isolate, table_object, entry_index);
- } else {
- ValueOrError result = EvaluateConstantExpression(
- zone, entry, elem_segment.type, isolate, instance);
- if (is_error(result)) return to_error(result);
- WasmTableObject::Set(isolate, table_object, entry_index,
- to_value(result).to_ref());
+
+enum FunctionComputationMode { kLazyFunctionsAndNull, kStrictFunctionsAndNull };
+
+// If {function_mode == kLazyFunctionsAndNull}, may return a function index
+// instead of computing a function object, and {WasmValue(-1)} instead of null.
+// Assumes the underlying module is verified.
+ValueOrError ConsumeElementSegmentEntry(Zone* zone, Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ const WasmElemSegment& segment,
+ Decoder& decoder,
+ FunctionComputationMode function_mode) {
+ if (segment.element_type == WasmElemSegment::kFunctionIndexElements) {
+ uint32_t function_index = decoder.consume_u32v();
+ return function_mode == kStrictFunctionsAndNull
+ ? EvaluateConstantExpression(
+ zone, ConstantExpression::RefFunc(function_index),
+ segment.type, isolate, instance)
+ : ValueOrError(WasmValue(function_index));
+ }
+
+ switch (static_cast<WasmOpcode>(*decoder.pc())) {
+ case kExprRefFunc: {
+ auto [function_index, length] =
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.pc() + 1,
+ "ref.func");
+ if (V8_LIKELY(decoder.lookahead(1 + length, kExprEnd))) {
+ decoder.consume_bytes(length + 2);
+ return function_mode == kStrictFunctionsAndNull
+ ? EvaluateConstantExpression(
+ zone, ConstantExpression::RefFunc(function_index),
+ segment.type, isolate, instance)
+ : ValueOrError(WasmValue(function_index));
+ }
+ break;
+ }
+ case kExprRefNull: {
+ auto [heap_type, length] =
+ value_type_reader::read_heap_type<Decoder::FullValidationTag>(
+ &decoder, decoder.pc() + 1, WasmFeatures::All());
+ if (V8_LIKELY(decoder.lookahead(1 + length, kExprEnd))) {
+ decoder.consume_bytes(length + 2);
+ return function_mode == kStrictFunctionsAndNull
+ ? EvaluateConstantExpression(zone,
+ ConstantExpression::RefNull(
+ heap_type.representation()),
+ segment.type, isolate, instance)
+ : WasmValue(int32_t{-1});
+ }
+ break;
}
+ default:
+ break;
}
- return {};
+
+ auto sig = FixedSizeSignature<ValueType>::Returns(segment.type);
+ FunctionBody body(&sig, decoder.pc_offset(), decoder.pc(), decoder.end());
+ WasmFeatures detected;
+ // We use FullValidationTag so we do not have to create another template
+ // instance of WasmFullDecoder, which would cost us >50Kb binary code
+ // size.
+ WasmFullDecoder<Decoder::FullValidationTag, ConstantExpressionInterface,
+ kConstantExpression>
+ full_decoder(zone, instance->module(), WasmFeatures::All(), &detected,
+ body, instance->module(), isolate, instance);
+
+ full_decoder.DecodeFunctionBody();
+
+ decoder.consume_bytes(static_cast<int>(full_decoder.pc() - decoder.pc()));
+
+ return full_decoder.interface().has_error()
+ ? ValueOrError(full_decoder.interface().error())
+ : ValueOrError(full_decoder.interface().computed_value());
}
+
} // namespace
+base::Optional<MessageTemplate> InitializeElementSegment(
+ Zone* zone, Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t segment_index) {
+ if (!instance->element_segments().get(segment_index).IsUndefined()) return {};
+
+ const WasmElemSegment& elem_segment =
+ instance->module()->elem_segments[segment_index];
+
+ base::Vector<const byte> module_bytes =
+ instance->module_object().native_module()->wire_bytes();
+
+ Decoder decoder(module_bytes);
+ decoder.consume_bytes(elem_segment.elements_wire_bytes_offset);
+
+ Handle<FixedArray> result =
+ isolate->factory()->NewFixedArray(elem_segment.element_count);
+
+ for (size_t i = 0; i < elem_segment.element_count; ++i) {
+ ValueOrError value =
+ ConsumeElementSegmentEntry(zone, isolate, instance, elem_segment,
+ decoder, kStrictFunctionsAndNull);
+ if (is_error(value)) return {to_error(value)};
+ result->set(static_cast<int>(i), *to_value(value).to_ref());
+ }
+
+ instance->element_segments().set(segment_index, *result);
+
+ return {};
+}
+
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
for (uint32_t segment_index = 0;
segment_index < module_->elem_segments.size(); ++segment_index) {
- auto& elem_segment = instance->module()->elem_segments[segment_index];
+ const WasmElemSegment& elem_segment =
+ instance->module()->elem_segments[segment_index];
// Passive segments are not copied during instantiation.
if (elem_segment.status != WasmElemSegment::kStatusActive) continue;
- uint32_t table_index = elem_segment.table_index;
- ValueOrError value = EvaluateConstantExpression(
+ const uint32_t table_index = elem_segment.table_index;
+ ValueOrError maybe_dst = EvaluateConstantExpression(
&init_expr_zone_, elem_segment.offset, kWasmI32, isolate_, instance);
- if (MaybeMarkError(value, thrower_)) return;
- uint32_t dst = std::get<WasmValue>(value).to_u32();
- if (thrower_->error()) return;
- uint32_t src = 0;
- size_t count = elem_segment.entries.size();
-
- base::Optional<MessageTemplate> opt_error = LoadElemSegmentImpl(
- &init_expr_zone_, isolate_, instance,
- handle(WasmTableObject::cast(
- instance->tables().get(elem_segment.table_index)),
- isolate_),
- table_index, segment_index, dst, src, count);
- // Set the active segments to being already dropped, since table.init on
- // a dropped passive segment and an active segment have the same behavior.
- instance->dropped_elem_segments().set(segment_index, 1);
- if (opt_error.has_value()) {
- thrower_->RuntimeError(
- "%s", MessageFormatter::TemplateString(opt_error.value()));
+ if (MaybeMarkError(maybe_dst, thrower_)) return;
+ const uint32_t dst = to_value(maybe_dst).to_u32();
+ const size_t count = elem_segment.element_count;
+
+ Handle<WasmTableObject> table_object = handle(
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate_);
+ if (!base::IsInBounds<size_t>(dst, count, table_object->current_length())) {
+ thrower_->RuntimeError("%s",
+ MessageFormatter::TemplateString(
+ MessageTemplate::kWasmTrapTableOutOfBounds));
return;
}
+
+ base::Vector<const byte> module_bytes =
+ instance->module_object().native_module()->wire_bytes();
+ Decoder decoder(module_bytes);
+ decoder.consume_bytes(elem_segment.elements_wire_bytes_offset);
+
+ bool is_function_table =
+ IsSubtypeOf(module_->tables[table_index].type, kWasmFuncRef, module_);
+
+ if (is_function_table) {
+ for (size_t i = 0; i < count; i++) {
+ int entry_index = static_cast<int>(dst + i);
+ ValueOrError computed_element = ConsumeElementSegmentEntry(
+ &init_expr_zone_, isolate_, instance, elem_segment, decoder,
+ kLazyFunctionsAndNull);
+ if (MaybeMarkError(computed_element, thrower_)) return;
+
+ WasmValue computed_value = to_value(computed_element);
+
+ if (computed_value.type() == kWasmI32) {
+ if (computed_value.to_i32() >= 0) {
+ SetFunctionTablePlaceholder(isolate_, instance, table_object,
+ entry_index, computed_value.to_i32());
+ } else {
+ SetFunctionTableNullEntry(isolate_, table_object, entry_index);
+ }
+ } else {
+ WasmTableObject::Set(isolate_, table_object, entry_index,
+ computed_value.to_ref());
+ }
+ }
+ } else {
+ for (size_t i = 0; i < count; i++) {
+ int entry_index = static_cast<int>(dst + i);
+ ValueOrError computed_element = ConsumeElementSegmentEntry(
+ &init_expr_zone_, isolate_, instance, elem_segment, decoder,
+ kStrictFunctionsAndNull);
+ if (MaybeMarkError(computed_element, thrower_)) return;
+ WasmTableObject::Set(isolate_, table_object, entry_index,
+ to_value(computed_element).to_ref());
+ }
+ }
+ // Active segment have to be set to empty after instance initialization
+ // (much like passive segments after dropping).
+ instance->element_segments().set(segment_index,
+ *isolate_->factory()->empty_fixed_array());
}
}
@@ -2092,21 +2534,6 @@ void InstanceBuilder::InitializeTags(Handle<WasmInstanceObject> instance) {
}
}
-base::Optional<MessageTemplate> LoadElemSegment(
- Isolate* isolate, Handle<WasmInstanceObject> instance, uint32_t table_index,
- uint32_t segment_index, uint32_t dst, uint32_t src, uint32_t count) {
- AccountingAllocator allocator;
- // This {Zone} will be used only by the temporary WasmFullDecoder allocated
- // down the line from this call. Therefore it is safe to stack-allocate it
- // here.
- Zone zone(&allocator, "LoadElemSegment");
- return LoadElemSegmentImpl(
- &zone, isolate, instance,
- handle(WasmTableObject::cast(instance->tables().get(table_index)),
- isolate),
- table_index, segment_index, dst, src, count);
-}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-instantiate.h b/deps/v8/src/wasm/module-instantiate.h
index e407c96b0d..589b256ecb 100644
--- a/deps/v8/src/wasm/module-instantiate.h
+++ b/deps/v8/src/wasm/module-instantiate.h
@@ -14,6 +14,7 @@
#include "src/base/optional.h"
#include "src/common/message-template.h"
#include "src/wasm/wasm-value.h"
+#include "src/wasm/well-known-imports.h"
namespace v8 {
namespace internal {
@@ -31,21 +32,93 @@ template <typename T>
class MaybeHandle;
namespace wasm {
-class ConstantExpression;
class ErrorThrower;
+enum Suspend : bool;
+
+// Calls to Wasm imports are handled in several different ways, depending on the
+// type of the target function/callable and whether the signature matches the
+// argument arity.
+// TODO(jkummerow): Merge kJSFunctionArity{Match,Mismatch}, we don't really
+// need the distinction any more.
+enum class ImportCallKind : uint8_t {
+ kLinkError, // static Wasm->Wasm type error
+ kRuntimeTypeError, // runtime Wasm->JS type error
+ kWasmToCapi, // fast Wasm->C-API call
+ kWasmToJSFastApi, // fast Wasm->JS Fast API C call
+ kWasmToWasm, // fast Wasm->Wasm call
+ kJSFunctionArityMatch, // fast Wasm->JS call
+ kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
+ // Math functions imported from JavaScript that are intrinsified
+ kFirstMathIntrinsic,
+ kF64Acos = kFirstMathIntrinsic,
+ kF64Asin,
+ kF64Atan,
+ kF64Cos,
+ kF64Sin,
+ kF64Tan,
+ kF64Exp,
+ kF64Log,
+ kF64Atan2,
+ kF64Pow,
+ kF64Ceil,
+ kF64Floor,
+ kF64Sqrt,
+ kF64Min,
+ kF64Max,
+ kF64Abs,
+ kF32Min,
+ kF32Max,
+ kF32Abs,
+ kF32Ceil,
+ kF32Floor,
+ kF32Sqrt,
+ kF32ConvertF64,
+ kLastMathIntrinsic = kF32ConvertF64,
+ // For everything else, there's the call builtin.
+ kUseCallBuiltin
+};
+
+constexpr ImportCallKind kDefaultImportCallKind =
+ ImportCallKind::kJSFunctionArityMatch;
+
+// Resolves which import call wrapper is required for the given JS callable.
+// Provides the kind of wrapper needed, the ultimate target callable, and the
+// suspender object if applicable. Note that some callables (e.g. a
+// {WasmExportedFunction} or {WasmJSFunction}) just wrap another target, which
+// is why the ultimate target is provided as well.
+class WasmImportData {
+ public:
+ V8_EXPORT_PRIVATE WasmImportData(Handle<JSReceiver> callable,
+ const wasm::FunctionSig* sig,
+ uint32_t expected_canonical_type_index);
+
+ ImportCallKind kind() const { return kind_; }
+ WellKnownImport well_known_status() const { return well_known_status_; }
+ Suspend suspend() const { return suspend_; }
+ Handle<JSReceiver> callable() const { return callable_; }
+
+ private:
+ ImportCallKind ComputeKind(const wasm::FunctionSig* expected_sig,
+ uint32_t expected_canonical_type_index);
+
+ ImportCallKind kind_;
+ WellKnownImport well_known_status_{WellKnownImport::kGeneric};
+ Suspend suspend_{false};
+ Handle<JSReceiver> callable_;
+};
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory);
-// Loads a range of elements from element segment into a table.
-// Returns the empty {Optional} if the operation succeeds, or an {Optional} with
-// the error {MessageTemplate} if it fails.
-base::Optional<MessageTemplate> LoadElemSegment(
- Isolate* isolate, Handle<WasmInstanceObject> instance, uint32_t table_index,
- uint32_t segment_index, uint32_t dst, uint32_t src,
- uint32_t count) V8_WARN_UNUSED_RESULT;
+// Initializes a segment at index {segment_index} of the segment array of
+// {instance}. If successful, returns the empty {Optional}, otherwise an
+// {Optional} that contains the error message. Exits early if the segment is
+// already initialized.
+base::Optional<MessageTemplate> InitializeElementSegment(
+ Zone* zone, Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t segment_index);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/pgo.cc b/deps/v8/src/wasm/pgo.cc
index 5f17cf1b15..b8fd847c20 100644
--- a/deps/v8/src/wasm/pgo.cc
+++ b/deps/v8/src/wasm/pgo.cc
@@ -31,8 +31,8 @@ class ProfileGenerator {
private:
void SerializeTypeFeedback(ZoneBuffer& buffer) {
- std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_for_function =
- module_->type_feedback.feedback_for_function;
+ const std::unordered_map<uint32_t, FunctionTypeFeedback>&
+ feedback_for_function = module_->type_feedback.feedback_for_function;
// Get an ordered list of function indexes, so we generate deterministic
// data.
@@ -70,8 +70,8 @@ class ProfileGenerator {
}
void SerializeTieringInfo(ZoneBuffer& buffer) {
- std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_for_function =
- module_->type_feedback.feedback_for_function;
+ const std::unordered_map<uint32_t, FunctionTypeFeedback>&
+ feedback_for_function = module_->type_feedback.feedback_for_function;
const uint32_t initial_budget = v8_flags.wasm_tiering_budget;
for (uint32_t declared_index = 0;
declared_index < module_->num_declared_functions; ++declared_index) {
@@ -97,11 +97,13 @@ class ProfileGenerator {
const WasmModule* module_;
AccountingAllocator allocator_;
Zone zone_{&allocator_, "wasm::ProfileGenerator"};
- base::MutexGuard type_feedback_mutex_guard_;
+ base::SharedMutexGuard<base::kShared> type_feedback_mutex_guard_;
const uint32_t* const tiering_budget_array_;
};
void DeserializeTypeFeedback(Decoder& decoder, WasmModule* module) {
+ // TODO(clemensb): Guard this with a lock on {module->type_feedback.mutex}
+ // if this code can run in multi-threaded situations in the future.
std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_for_function =
module->type_feedback.feedback_for_function;
uint32_t num_entries = decoder.consume_u32v("num function entries");
diff --git a/deps/v8/src/wasm/stacks.cc b/deps/v8/src/wasm/stacks.cc
index 3c1a697522..360b53551a 100644
--- a/deps/v8/src/wasm/stacks.cc
+++ b/deps/v8/src/wasm/stacks.cc
@@ -4,12 +4,17 @@
#include "src/wasm/stacks.h"
+#include "src/base/platform/platform.h"
+
namespace v8::internal::wasm {
// static
StackMemory* StackMemory::GetCurrentStackView(Isolate* isolate) {
- byte* limit = reinterpret_cast<byte*>(isolate->stack_guard()->real_jslimit());
- return new StackMemory(isolate, limit);
+ uintptr_t limit = isolate->stack_guard()->real_jslimit();
+ uintptr_t stack_start = base::Stack::GetStackStart();
+ DCHECK_LE(limit, stack_start);
+ size_t size = stack_start - limit;
+ return new StackMemory(isolate, reinterpret_cast<byte*>(limit), size);
}
StackMemory::~StackMemory() {
@@ -50,11 +55,8 @@ StackMemory::StackMemory(Isolate* isolate) : isolate_(isolate), owned_(true) {
}
// Overload to represent a view of the libc stack.
-StackMemory::StackMemory(Isolate* isolate, byte* limit)
- : isolate_(isolate),
- limit_(limit),
- size_(v8_flags.stack_size * KB),
- owned_(false) {
+StackMemory::StackMemory(Isolate* isolate, byte* limit, size_t size)
+ : isolate_(isolate), limit_(limit), size_(size), owned_(false) {
id_ = 0;
}
diff --git a/deps/v8/src/wasm/stacks.h b/deps/v8/src/wasm/stacks.h
index 995c6b53af..516d7bb426 100644
--- a/deps/v8/src/wasm/stacks.h
+++ b/deps/v8/src/wasm/stacks.h
@@ -63,7 +63,7 @@ class StackMemory {
explicit StackMemory(Isolate* isolate);
// Overload to represent a view of the libc stack.
- StackMemory(Isolate* isolate, byte* limit);
+ StackMemory(Isolate* isolate, byte* limit, size_t size);
Isolate* isolate_;
byte* limit_;
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 5d7f25fdda..2d28fd343f 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -28,16 +28,18 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
AsyncStreamingDecoder(const AsyncStreamingDecoder&) = delete;
AsyncStreamingDecoder& operator=(const AsyncStreamingDecoder&) = delete;
- // The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(base::Vector<const uint8_t> bytes) override;
void Finish(bool can_use_compiled_module) override;
void Abort() override;
- // Notify the StreamingDecoder that compilation ended and the
- // StreamingProcessor should not be called anymore.
- void NotifyCompilationEnded() override { Fail(); }
+ void NotifyCompilationDiscarded() override {
+ auto& active_processor = processor_ ? processor_ : failed_processor_;
+ active_processor.reset();
+ DCHECK_NULL(processor_);
+ DCHECK_NULL(failed_processor_);
+ }
void NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) override;
@@ -58,12 +60,12 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
bytes_(base::OwnedVector<uint8_t>::NewForOverwrite(
1 + length_bytes.length() + payload_length)),
payload_offset_(1 + length_bytes.length()) {
- bytes_.start()[0] = id;
- memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
+ bytes_.begin()[0] = id;
+ memcpy(bytes_.begin() + 1, &length_bytes.first(), length_bytes.length());
}
SectionCode section_code() const {
- return static_cast<SectionCode>(bytes_.start()[0]);
+ return static_cast<SectionCode>(bytes_.begin()[0]);
}
base::Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
@@ -154,14 +156,9 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
size_t length,
base::Vector<const uint8_t> length_bytes);
- std::unique_ptr<DecodingState> Error(const WasmError& error) {
- if (ok()) processor_->OnError(error);
+ std::unique_ptr<DecodingState> ToErrorState() {
Fail();
- return std::unique_ptr<DecodingState>(nullptr);
- }
-
- std::unique_ptr<DecodingState> Error(std::string message) {
- return Error(WasmError{module_offset_ - 1, std::move(message)});
+ return nullptr;
}
void ProcessModuleHeader() {
@@ -195,38 +192,67 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
void ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t module_offset) {
if (!ok()) return;
- processor_->ProcessFunctionBody(bytes, module_offset);
+ if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
}
void Fail() {
- // We reset the {processor_} field to represent failure. This also ensures
- // that we do not accidentally call further methods on the processor after
- // failure.
- processor_.reset();
+ // {Fail} cannot be called after {Finish}, {Abort}, {Fail}, or
+ // {NotifyCompilationDiscarded}.
+ DCHECK_EQ(processor_ == nullptr, failed_processor_ != nullptr);
+ if (processor_ != nullptr) failed_processor_ = std::move(processor_);
+ DCHECK_NULL(processor_);
+ DCHECK_NOT_NULL(failed_processor_);
}
- bool ok() const { return processor_ != nullptr; }
+ bool ok() const {
+ DCHECK_EQ(processor_ == nullptr, failed_processor_ != nullptr);
+ return processor_ != nullptr;
+ }
uint32_t module_offset() const { return module_offset_; }
+ // As long as we did not detect an invalid module, {processor_} will be set.
+ // On failure, the pointer is transferred to {failed_processor_} and will only
+ // be used for a final callback once all bytes have arrived. Finally, both
+ // {processor_} and {failed_processor_} will be null.
std::unique_ptr<StreamingProcessor> processor_;
+ std::unique_ptr<StreamingProcessor> failed_processor_;
std::unique_ptr<DecodingState> state_;
std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
bool code_section_processed_ = false;
uint32_t module_offset_ = 0;
- size_t total_size_ = 0;
- bool stream_finished_ = false;
- // We need wire bytes in an array for deserializing cached modules.
- std::vector<uint8_t> wire_bytes_for_deserializing_;
+ // Store the full wire bytes in a vector of vectors to avoid having to grow
+ // large vectors (measured up to 100ms delay in 2023-03).
+ // TODO(clemensb): Avoid holding the wire bytes live twice (here and in the
+ // section buffers).
+ std::vector<std::vector<uint8_t>> full_wire_bytes_{{}};
};
void AsyncStreamingDecoder::OnBytesReceived(base::Vector<const uint8_t> bytes) {
- if (deserializing()) {
- wire_bytes_for_deserializing_.insert(wire_bytes_for_deserializing_.end(),
- bytes.begin(), bytes.end());
- return;
- }
+ DCHECK(!full_wire_bytes_.empty());
+ // Fill the previous vector, growing up to 16kB. After that, allocate new
+ // vectors on overflow.
+ size_t remaining_capacity =
+ std::max(full_wire_bytes_.back().capacity(), size_t{16} * KB) -
+ full_wire_bytes_.back().size();
+ size_t bytes_for_existing_vector = std::min(remaining_capacity, bytes.size());
+ full_wire_bytes_.back().insert(full_wire_bytes_.back().end(), bytes.data(),
+ bytes.data() + bytes_for_existing_vector);
+ if (bytes.size() > bytes_for_existing_vector) {
+ // The previous vector's capacity is not enough to hold all new bytes, and
+ // it's bigger than 16kB, so expensive to copy. Allocate a new vector for
+ // the remaining bytes, growing exponentially.
+ size_t new_capacity = std::max(bytes.size() - bytes_for_existing_vector,
+ 2 * full_wire_bytes_.back().capacity());
+ full_wire_bytes_.emplace_back();
+ full_wire_bytes_.back().reserve(new_capacity);
+ full_wire_bytes_.back().insert(full_wire_bytes_.back().end(),
+ bytes.data() + bytes_for_existing_vector,
+ bytes.end());
+ }
+
+ if (deserializing()) return;
TRACE_STREAMING("OnBytesReceived(%zu bytes)\n", bytes.size());
@@ -240,7 +266,6 @@ void AsyncStreamingDecoder::OnBytesReceived(base::Vector<const uint8_t> bytes) {
state_ = state_->Next(this);
}
}
- total_size_ += bytes.size();
if (ok()) {
processor_->OnFinishedChunk();
}
@@ -258,57 +283,66 @@ size_t AsyncStreamingDecoder::DecodingState::ReadBytes(
void AsyncStreamingDecoder::Finish(bool can_use_compiled_module) {
TRACE_STREAMING("Finish\n");
- DCHECK(!stream_finished_);
- stream_finished_ = true;
- if (!ok()) return;
+ // {Finish} cannot be called after {Finish}, {Abort}, {Fail}, or
+ // {NotifyCompilationDiscarded}.
+ CHECK_EQ(processor_ == nullptr, failed_processor_ != nullptr);
+
+ // Create a final copy of the overall wire bytes; this will finally be
+ // transferred and stored in the NativeModule.
+ base::OwnedVector<const uint8_t> bytes_copy;
+ DCHECK_IMPLIES(full_wire_bytes_.back().empty(), full_wire_bytes_.size() == 1);
+ if (!full_wire_bytes_.back().empty()) {
+ size_t total_length = 0;
+ for (auto& bytes : full_wire_bytes_) total_length += bytes.size();
+ auto all_bytes = base::OwnedVector<uint8_t>::NewForOverwrite(total_length);
+ uint8_t* ptr = all_bytes.begin();
+ for (auto& bytes : full_wire_bytes_) {
+ memcpy(ptr, bytes.data(), bytes.size());
+ ptr += bytes.size();
+ }
+ DCHECK_EQ(all_bytes.end(), ptr);
+ bytes_copy = std::move(all_bytes);
+ }
- if (deserializing()) {
- base::Vector<const uint8_t> wire_bytes =
- base::VectorOf(wire_bytes_for_deserializing_);
+ if (ok() && deserializing()) {
// Try to deserialize the module from wire bytes and module bytes.
if (can_use_compiled_module &&
- processor_->Deserialize(compiled_module_bytes_, wire_bytes))
+ processor_->Deserialize(compiled_module_bytes_,
+ base::VectorOf(bytes_copy))) {
return;
+ }
// Compiled module bytes are invalidated by can_use_compiled_module = false
- // or the deserialization failed. Restart decoding using |wire_bytes|.
+ // or the deserialization failed. Restart decoding using |bytes_copy|.
+ // Reset {full_wire_bytes} to a single empty vector.
+ full_wire_bytes_.assign({{}});
compiled_module_bytes_ = {};
DCHECK(!deserializing());
- OnBytesReceived(wire_bytes);
+ OnBytesReceived(base::VectorOf(bytes_copy));
// The decoder has received all wire bytes; fall through and finish.
}
- if (!state_->is_finishing_allowed()) {
+ if (ok() && !state_->is_finishing_allowed()) {
// The byte stream ended too early, we report an error.
- Error("unexpected end of stream");
- return;
+ Fail();
}
- base::OwnedVector<uint8_t> bytes =
- base::OwnedVector<uint8_t>::NewForOverwrite(total_size_);
- uint8_t* cursor = bytes.start();
- {
-#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
- uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
-#undef BYTES
- memcpy(cursor, module_header, arraysize(module_header));
- cursor += arraysize(module_header);
- }
- for (const auto& buffer : section_buffers_) {
- DCHECK_LE(cursor - bytes.start() + buffer->length(), total_size_);
- memcpy(cursor, buffer->bytes().begin(), buffer->length());
- cursor += buffer->length();
- }
- processor_->OnFinishedStream(std::move(bytes));
+ // Calling {OnFinishedStream} calls out to JS. Avoid further callbacks (by
+ // aborting the stream) by resetting the processor field before calling
+ // {OnFinishedStream}.
+ const bool failed = !ok();
+ std::unique_ptr<StreamingProcessor> processor =
+ failed ? std::move(failed_processor_) : std::move(processor_);
+ processor->OnFinishedStream(std::move(bytes_copy), failed);
}
void AsyncStreamingDecoder::Abort() {
TRACE_STREAMING("Abort\n");
- if (stream_finished_) return;
- stream_finished_ = true;
- if (!ok()) return; // Failed already.
- processor_->OnAbort();
+ // Ignore {Abort} after {Finish}.
+ if (!processor_ && !failed_processor_) return;
Fail();
+ failed_processor_->OnAbort();
+ failed_processor_.reset();
}
namespace {
@@ -461,7 +495,7 @@ class AsyncStreamingDecoder::DecodeSectionPayload : public DecodingState {
class AsyncStreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
public:
explicit DecodeNumberOfFunctions(SectionBuffer* section_buffer)
- : DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
+ : DecodeVarInt32(v8_flags.max_wasm_functions, "functions count"),
section_buffer_(section_buffer) {}
std::unique_ptr<DecodingState> NextWithValue(
@@ -537,7 +571,7 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
if (decoder.failed()) {
if (new_bytes == remaining_buf.size()) {
// We only report an error if we read all bytes.
- streaming->Error(decoder.error());
+ streaming->Fail();
}
set_offset(offset() + new_bytes);
return new_bytes;
@@ -561,12 +595,7 @@ std::unique_ptr<AsyncStreamingDecoder::DecodingState>
AsyncStreamingDecoder::DecodeVarInt32::Next(AsyncStreamingDecoder* streaming) {
if (!streaming->ok()) return nullptr;
- if (value_ > max_value_) {
- std::ostringstream oss;
- oss << "The value " << value_ << " for " << field_name_
- << " exceeds the maximum allowed value of " << max_value_;
- return streaming->Error(oss.str());
- }
+ if (value_ > max_value_) return streaming->ToErrorState();
return NextWithValue(streaming);
}
@@ -584,15 +613,11 @@ std::unique_ptr<AsyncStreamingDecoder::DecodingState>
AsyncStreamingDecoder::DecodeSectionID::Next(AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %u (%s)\n", id_,
SectionName(static_cast<SectionCode>(id_)));
- if (!IsValidSectionCode(id_)) return streaming->Error("invalid section code");
+ if (!IsValidSectionCode(id_)) return streaming->ToErrorState();
if (id_ == SectionCode::kCodeSectionCode) {
// Explicitly check for multiple code sections as module decoder never
// sees the code section and hence cannot track this section.
- if (streaming->code_section_processed_) {
- // TODO(wasm): This error message (and others in this class) is different
- // for non-streaming decoding. Bring them in sync and test.
- return streaming->Error("code section can only appear once");
- }
+ if (streaming->code_section_processed_) return streaming->ToErrorState();
streaming->code_section_processed_ = true;
}
return std::make_unique<DecodeSectionLength>(id_, module_offset_);
@@ -608,7 +633,7 @@ AsyncStreamingDecoder::DecodeSectionLength::NextWithValue(
DCHECK_NOT_NULL(buf);
if (value_ == 0) {
if (section_id_ == SectionCode::kCodeSectionCode) {
- return streaming->Error("code section cannot have size 0");
+ return streaming->ToErrorState();
}
// Process section without payload as well, to enforce section order and
// other feature checks specific to each individual section.
@@ -640,9 +665,7 @@ AsyncStreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
base::Vector<uint8_t> payload_buf = section_buffer_->payload();
- if (payload_buf.size() < bytes_consumed_) {
- return streaming->Error("invalid code section length");
- }
+ if (payload_buf.size() < bytes_consumed_) return streaming->ToErrorState();
memcpy(payload_buf.begin(), buffer().begin(), bytes_consumed_);
DCHECK_GE(kMaxInt, section_buffer_->module_offset() +
@@ -660,7 +683,7 @@ AsyncStreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
// {value} is the number of functions.
if (value_ == 0) {
if (payload_buf.size() != bytes_consumed_) {
- return streaming->Error("not all code section bytes were used");
+ return streaming->ToErrorState();
}
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
@@ -678,15 +701,15 @@ AsyncStreamingDecoder::DecodeFunctionLength::NextWithValue(
base::Vector<uint8_t> fun_length_buffer =
section_buffer_->bytes() + buffer_offset_;
if (fun_length_buffer.size() < bytes_consumed_) {
- return streaming->Error("read past code section end");
+ return streaming->ToErrorState();
}
memcpy(fun_length_buffer.begin(), buffer().begin(), bytes_consumed_);
// {value} is the length of the function.
- if (value_ == 0) return streaming->Error("invalid function length (0)");
+ if (value_ == 0) return streaming->ToErrorState();
if (buffer_offset_ + bytes_consumed_ + value_ > section_buffer_->length()) {
- return streaming->Error("not enough code section bytes");
+ return streaming->ToErrorState();
}
return std::make_unique<DecodeFunctionBody>(
@@ -708,7 +731,7 @@ AsyncStreamingDecoder::DecodeFunctionBody::Next(
}
// We just read the last function body. Continue with the next section.
if (end_offset != section_buffer_->length()) {
- return streaming->Error("not all code section bytes were used");
+ return streaming->ToErrorState();
}
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 229219ec22..fe552d4365 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -46,18 +46,17 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
int code_section_start,
int code_section_length) = 0;
- // Process a function body.
- virtual void ProcessFunctionBody(base::Vector<const uint8_t> bytes,
+ // Process a function body. Returns true if the processing finished
+ // successfully and the decoding should continue.
+ virtual bool ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t offset) = 0;
// Report the end of a chunk.
virtual void OnFinishedChunk() = 0;
- // Report the end of the stream. If the stream was successful, all
- // received bytes are passed by parameter. If there has been an error, an
- // empty array is passed.
- virtual void OnFinishedStream(base::OwnedVector<uint8_t> bytes) = 0;
- // Report an error detected in the StreamingDecoder.
- virtual void OnError(const WasmError&) = 0;
+ // Report the end of the stream. This will be called even after an error has
+ // been detected. In any case, the parameter is the total received bytes.
+ virtual void OnFinishedStream(base::OwnedVector<const uint8_t> bytes,
+ bool after_error) = 0;
// Report the abortion of the stream.
virtual void OnAbort() = 0;
@@ -80,9 +79,9 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
virtual void Abort() = 0;
- // Notify the StreamingDecoder that compilation ended and the
+ // Notify the StreamingDecoder that the job was discarded and the
// StreamingProcessor should not be called anymore.
- virtual void NotifyCompilationEnded() = 0;
+ virtual void NotifyCompilationDiscarded() = 0;
// Caching support.
// Sets the callback that is called after a new chunk of the module is tiered
diff --git a/deps/v8/src/wasm/struct-types.h b/deps/v8/src/wasm/struct-types.h
index 187461d24a..f17ed10279 100644
--- a/deps/v8/src/wasm/struct-types.h
+++ b/deps/v8/src/wasm/struct-types.h
@@ -26,9 +26,7 @@ class StructType : public ZoneObject {
: field_count_(field_count),
field_offsets_(field_offsets),
reps_(reps),
- mutabilities_(mutabilities) {
- InitializeOffsets();
- }
+ mutabilities_(mutabilities) {}
uint32_t field_count() const { return field_count_; }
@@ -66,53 +64,125 @@ class StructType : public ZoneObject {
uint32_t field_offset(uint32_t index) const {
DCHECK_LT(index, field_count());
if (index == 0) return 0;
+ DCHECK(offsets_initialized_);
return field_offsets_[index - 1];
}
uint32_t total_fields_size() const {
return field_count() == 0 ? 0 : field_offsets_[field_count() - 1];
}
+ uint32_t Align(uint32_t offset, uint32_t alignment) {
+ return RoundUp(offset, std::min(alignment, uint32_t{kTaggedSize}));
+ }
+
void InitializeOffsets() {
if (field_count() == 0) return;
+ DCHECK(!offsets_initialized_);
uint32_t offset = field(0).value_kind_size();
+ // Optimization: we track the last gap that was introduced by alignment,
+ // and place any sufficiently-small fields in it.
+ // It's important that the algorithm that assigns offsets to fields is
+ // subtyping-safe, i.e. two lists of fields with a common prefix must
+ // always compute the same offsets for the fields in this common prefix.
+ uint32_t gap_position = 0;
+ uint32_t gap_size = 0;
for (uint32_t i = 1; i < field_count(); i++) {
uint32_t field_size = field(i).value_kind_size();
- // TODO(jkummerow): Don't round up to more than kTaggedSize-alignment.
- offset = RoundUp(offset, field_size);
+ if (field_size <= gap_size) {
+ uint32_t aligned_gap = Align(gap_position, field_size);
+ uint32_t gap_before = aligned_gap - gap_position;
+ uint32_t aligned_gap_size = gap_size - gap_before;
+ if (field_size <= aligned_gap_size) {
+ field_offsets_[i - 1] = aligned_gap;
+ uint32_t gap_after = aligned_gap_size - field_size;
+ if (gap_before > gap_after) {
+ // Keep old {gap_position}.
+ gap_size = gap_before;
+ } else {
+ gap_position = aligned_gap + field_size;
+ gap_size = gap_after;
+ }
+ continue; // Successfully placed the field in the gap.
+ }
+ }
+ uint32_t old_offset = offset;
+ offset = Align(offset, field_size);
+ uint32_t gap = offset - old_offset;
+ if (gap > gap_size) {
+ gap_size = gap;
+ gap_position = old_offset;
+ }
field_offsets_[i - 1] = offset;
offset += field_size;
}
offset = RoundUp(offset, kTaggedSize);
field_offsets_[field_count() - 1] = offset;
+#if DEBUG
+ offsets_initialized_ = true;
+#endif
}
// For incrementally building StructTypes.
class Builder {
public:
+ enum ComputeOffsets : bool {
+ kComputeOffsets = true,
+ kUseProvidedOffsets = false
+ };
+
Builder(Zone* zone, uint32_t field_count)
- : field_count_(field_count),
- zone_(zone),
+ : zone_(zone),
+ field_count_(field_count),
cursor_(0),
+ field_offsets_(zone_->NewArray<uint32_t>(field_count_)),
buffer_(zone->NewArray<ValueType>(static_cast<int>(field_count))),
mutabilities_(zone->NewArray<bool>(static_cast<int>(field_count))) {}
- void AddField(ValueType type, bool mutability) {
+ void AddField(ValueType type, bool mutability, uint32_t offset = 0) {
DCHECK_LT(cursor_, field_count_);
+ if (cursor_ > 0) {
+ field_offsets_[cursor_ - 1] = offset;
+ } else {
+ DCHECK_EQ(0, offset); // First field always has offset 0.
+ }
mutabilities_[cursor_] = mutability;
buffer_[cursor_++] = type;
}
- StructType* Build() {
+ void set_total_fields_size(uint32_t size) {
+ if (field_count_ == 0) {
+ DCHECK_EQ(0, size);
+ return;
+ }
+ field_offsets_[field_count_ - 1] = size;
+ }
+
+ StructType* Build(ComputeOffsets compute_offsets = kComputeOffsets) {
DCHECK_EQ(cursor_, field_count_);
- uint32_t* offsets = zone_->NewArray<uint32_t>(field_count_);
- return zone_->New<StructType>(field_count_, offsets, buffer_,
- mutabilities_);
+ StructType* result = zone_->New<StructType>(field_count_, field_offsets_,
+ buffer_, mutabilities_);
+ if (compute_offsets == kComputeOffsets) {
+ result->InitializeOffsets();
+ } else {
+#if DEBUG
+ bool offsets_specified = true;
+ for (uint32_t i = 0; i < field_count_; i++) {
+ if (field_offsets_[i] == 0) {
+ offsets_specified = false;
+ break;
+ }
+ }
+ result->offsets_initialized_ = offsets_specified;
+#endif
+ }
+ return result;
}
private:
- const uint32_t field_count_;
Zone* const zone_;
+ const uint32_t field_count_;
uint32_t cursor_;
+ uint32_t* field_offsets_;
ValueType* const buffer_;
bool* const mutabilities_;
};
@@ -122,6 +192,9 @@ class StructType : public ZoneObject {
private:
const uint32_t field_count_;
+#if DEBUG
+ bool offsets_initialized_ = false;
+#endif
uint32_t* const field_offsets_;
const ValueType* const reps_;
const bool* const mutabilities_;
diff --git a/deps/v8/src/wasm/sync-streaming-decoder.cc b/deps/v8/src/wasm/sync-streaming-decoder.cc
index ad0ecbdd7d..e447289ade 100644
--- a/deps/v8/src/wasm/sync-streaming-decoder.cc
+++ b/deps/v8/src/wasm/sync-streaming-decoder.cc
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
buffer_.clear();
}
- void NotifyCompilationEnded() override { buffer_.clear(); }
+ void NotifyCompilationDiscarded() override { buffer_.clear(); }
void NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>&) override {
diff --git a/deps/v8/src/wasm/value-type.cc b/deps/v8/src/wasm/value-type.cc
index da184941a6..f84de26a6a 100644
--- a/deps/v8/src/wasm/value-type.cc
+++ b/deps/v8/src/wasm/value-type.cc
@@ -12,21 +12,11 @@ namespace wasm {
base::Optional<wasm::ValueKind> WasmReturnTypeFromSignature(
const FunctionSig* wasm_signature) {
- if (wasm_signature->return_count() == 0) {
- return {};
- } else {
- DCHECK_EQ(wasm_signature->return_count(), 1);
- ValueType return_type = wasm_signature->GetReturn(0);
- switch (return_type.kind()) {
- case kI32:
- case kI64:
- case kF32:
- case kF64:
- return {return_type.kind()};
- default:
- UNREACHABLE();
- }
- }
+ if (wasm_signature->return_count() == 0) return {};
+
+ DCHECK_EQ(wasm_signature->return_count(), 1);
+ ValueType return_type = wasm_signature->GetReturn(0);
+ return {return_type.kind()};
}
#if DEBUG
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 22281a0e6f..e6a15eb81b 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -178,6 +178,8 @@ class HeapType {
return std::string("noextern");
case kNoFunc:
return std::string("nofunc");
+ case kBottom:
+ return std::string("<bot>");
default:
return std::to_string(representation_);
}
@@ -608,11 +610,20 @@ class ValueType {
break;
case kRefNull:
if (heap_type().is_generic()) {
- // TODO(mliedtke): Adapt short names:
- // noneref -> nullref
- // nofuncref -> nullfuncref
- // noexternref -> nullexternref
- buf << heap_type().name() << "ref";
+ switch (heap_type().representation()) {
+ case HeapType::kNone:
+ buf << "nullref";
+ break;
+ case HeapType::kNoExtern:
+ buf << "nullexternref";
+ break;
+ case HeapType::kNoFunc:
+ buf << "nullfuncref";
+ break;
+ default:
+ buf << heap_type().name() << "ref";
+ break;
+ }
} else {
buf << "(ref null " << heap_type().name() << ")";
}
@@ -722,9 +733,6 @@ constexpr ValueType kWasmNullFuncRef = ValueType::RefNull(HeapType::kNoFunc);
// Constants used by the generic js-to-wasm wrapper.
constexpr int kWasmValueKindBitsMask = (1u << ValueType::kKindBits) - 1;
-// This is used in wasm.tq.
-constexpr ValueType kWasmAnyNonNullableRef = ValueType::Ref(HeapType::kAny);
-
#define FOREACH_WASMVALUE_CTYPES(V) \
V(kI32, int32_t) \
V(kI64, int64_t) \
@@ -764,8 +772,8 @@ class LoadType {
: val_(val) {}
constexpr LoadTypeValue value() const { return val_; }
- constexpr unsigned size_log_2() const { return kLoadSizeLog2[val_]; }
- constexpr unsigned size() const { return 1 << size_log_2(); }
+ constexpr uint8_t size_log_2() const { return kLoadSizeLog2[val_]; }
+ constexpr uint8_t size() const { return kLoadSize[val_]; }
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; }
@@ -793,6 +801,15 @@ class LoadType {
private:
const LoadTypeValue val_;
+ static constexpr uint8_t kLoadSize[] = {
+ // MSVC wants a static_cast here.
+#define LOAD_SIZE(_, __, memtype) \
+ static_cast<uint8_t>( \
+ ElementSizeInBytes(MachineType::memtype().representation())),
+ FOREACH_LOAD_TYPE(LOAD_SIZE)
+#undef LOAD_SIZE
+ };
+
static constexpr uint8_t kLoadSizeLog2[] = {
// MSVC wants a static_cast here.
#define LOAD_SIZE(_, __, memtype) \
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 845384606d..5d4f0f5d62 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -42,6 +42,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
+#include "src/wasm/well-known-imports.h"
#if defined(V8_OS_WIN64)
#include "src/diagnostics/unwinding-info-win64.h"
@@ -58,6 +59,14 @@ namespace wasm {
using trap_handler::ProtectedInstructionData;
+// Check that {WasmCode} objects are sufficiently small. We create many of them,
+// often for rather small functions.
+// Increase the limit if needed, but first check if the size increase is
+// justified.
+#ifndef V8_GC_MOLE
+static_assert(sizeof(WasmCode) <= 88);
+#endif
+
base::AddressRegion DisjointAllocationPool::Merge(
base::AddressRegion new_region) {
// Find the possible insertion position by identifying the first region whose
@@ -505,24 +514,34 @@ void WasmCode::DecrementRefCount(base::Vector<WasmCode* const> code_vec) {
GetWasmEngine()->FreeDeadCode(dead_code);
}
-int WasmCode::GetSourcePositionBefore(int offset) {
- int position = kNoSourcePosition;
+SourcePosition WasmCode::GetSourcePositionBefore(int code_offset) {
+ SourcePosition position;
for (SourcePositionTableIterator iterator(source_positions());
- !iterator.done() && iterator.code_offset() < offset;
+ !iterator.done() && iterator.code_offset() < code_offset;
iterator.Advance()) {
- position = iterator.source_position().ScriptOffset();
+ position = iterator.source_position();
}
return position;
}
-// static
-constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
+int WasmCode::GetSourceOffsetBefore(int code_offset) {
+ return GetSourcePositionBefore(code_offset).ScriptOffset();
+}
+
+std::pair<int, SourcePosition> WasmCode::GetInliningPosition(
+ int inlining_id) const {
+ const size_t elem_size = sizeof(int) + sizeof(SourcePosition);
+ const byte* start = inlining_positions().begin() + elem_size * inlining_id;
+ DCHECK_LE(start, inlining_positions().end());
+ std::pair<int, SourcePosition> result;
+ std::memcpy(&result.first, start, sizeof result.first);
+ std::memcpy(&result.second, start + sizeof result.first,
+ sizeof result.second);
+ return result;
+}
WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
- : protect_code_memory_(!V8_HAS_PTHREAD_JIT_WRITE_PROTECT &&
- v8_flags.wasm_write_protect_code_memory &&
- !WasmCodeManager::MemoryProtectionKeysEnabled()),
- async_counters_(std::move(async_counters)) {
+ : async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
}
@@ -607,7 +626,8 @@ size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
return overhead;
}
-// Returns an estimate how much code space should be reserved.
+// Returns an estimate how much code space should be reserved. This can be
+// smaller than the passed-in {code_size_estimate}, see comments in the code.
size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
size_t total_reserved) {
size_t overhead = OverheadPerCodeSpace(num_declared_functions);
@@ -616,91 +636,38 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
// a) needed size + overhead (this is the minimum needed)
// b) 2 * overhead (to not waste too much space by overhead)
// c) 1/4 of current total reservation size (to grow exponentially)
+ // For the minimum size we only take the overhead into account and not the
+ // code space estimate, for two reasons:
+ // - The code space estimate is only an estimate; we might actually need less
+ // space later.
+ // - When called at module construction time we pass the estimate for all
+ // code in the module; this can still be split up into multiple spaces
+ // later.
size_t minimum_size = 2 * overhead;
size_t suggested_size =
std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
minimum_size),
total_reserved / 4);
- if (V8_UNLIKELY(minimum_size > WasmCodeAllocator::kMaxCodeSpaceSize)) {
+ const size_t max_code_space_size =
+ size_t{v8_flags.wasm_max_code_space_size_mb} * MB;
+ if (V8_UNLIKELY(minimum_size > max_code_space_size)) {
auto oom_detail = base::FormattedString{}
<< "required reservation minimum (" << minimum_size
<< ") is bigger than supported maximum ("
- << WasmCodeAllocator::kMaxCodeSpaceSize << ")";
+ << max_code_space_size << ")";
V8::FatalProcessOutOfMemory(nullptr,
"Exceeding maximum wasm code space size",
oom_detail.PrintToArray().data());
UNREACHABLE();
}
- // Limit by the maximum supported code space size.
- size_t reserve_size =
- std::min(WasmCodeAllocator::kMaxCodeSpaceSize, suggested_size);
+ // Limit by the maximum code space size.
+ size_t reserve_size = std::min(max_code_space_size, suggested_size);
return reserve_size;
}
-#ifdef DEBUG
-// Check postconditions when returning from this method:
-// 1) {region} must be fully contained in {writable_memory_};
-// 2) {writable_memory_} must be a maximally merged ordered set of disjoint
-// non-empty regions.
-class CheckWritableMemoryRegions {
- public:
- CheckWritableMemoryRegions(
- std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
- writable_memory,
- base::AddressRegion new_region, size_t& new_writable_memory)
- : writable_memory_(writable_memory),
- new_region_(new_region),
- new_writable_memory_(new_writable_memory),
- old_writable_size_(std::accumulate(
- writable_memory_.begin(), writable_memory_.end(), size_t{0},
- [](size_t old, base::AddressRegion region) {
- return old + region.size();
- })) {}
-
- ~CheckWritableMemoryRegions() {
- // {new_region} must be contained in {writable_memory_}.
- DCHECK(std::any_of(
- writable_memory_.begin(), writable_memory_.end(),
- [this](auto region) { return region.contains(new_region_); }));
-
- // The new total size of writable memory must have increased by
- // {new_writable_memory}.
- size_t total_writable_size = std::accumulate(
- writable_memory_.begin(), writable_memory_.end(), size_t{0},
- [](size_t old, auto region) { return old + region.size(); });
- DCHECK_EQ(old_writable_size_ + new_writable_memory_, total_writable_size);
-
- // There are no empty regions.
- DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
- [](auto region) { return region.is_empty(); }));
-
- // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
- // so USE is required to prevent build failures in debug builds).
- USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
- Address{0}, [](Address previous_end, auto region) {
- DCHECK_LT(previous_end, region.begin());
- return region.end();
- }));
- }
-
- private:
- const std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
- writable_memory_;
- const base::AddressRegion new_region_;
- const size_t& new_writable_memory_;
- const size_t old_writable_size_;
-};
-#else // !DEBUG
-class CheckWritableMemoryRegions {
- public:
- template <typename... Args>
- explicit CheckWritableMemoryRegions(Args...) {}
-};
-#endif // !DEBUG
-
// Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
// restriction on the region to allocate in.
constexpr base::AddressRegion kUnrestrictedRegion{
@@ -733,6 +700,14 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
size_t reserve_size = ReservationSize(
size, native_module->module()->num_declared_functions, total_reserved);
+ if (reserve_size < size) {
+ auto oom_detail = base::FormattedString{}
+ << "cannot reserve space for " << size
+ << "bytes of code (maximum reservation size is "
+ << reserve_size << ")";
+ V8::FatalProcessOutOfMemory(nullptr, "Grow wasm code space",
+ oom_detail.PrintToArray().data());
+ }
VirtualMemory new_mem =
code_manager->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) {
@@ -751,15 +726,13 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
native_module->AddCodeSpaceLocked(new_region);
code_space = free_code_space_.Allocate(size);
- DCHECK(!code_space.is_empty());
+ CHECK(!code_space.is_empty());
+
async_counters_->wasm_module_num_code_spaces()->AddSample(
static_cast<int>(owned_code_space_.size()));
}
const Address commit_page_size = CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
- if (commit_start != code_space.begin()) {
- MakeWritable({commit_start - commit_page_size, commit_page_size});
- }
Address commit_end = RoundUp(code_space.end(), commit_page_size);
// {commit_start} will be either code_space.start or the start of the next
@@ -777,12 +750,8 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
committed_code_space_.fetch_add(commit_end - commit_start);
// Committed code cannot grow bigger than maximum code space size.
- DCHECK_LE(committed_code_space_.load(), v8_flags.wasm_max_code_space * MB);
- if (protect_code_memory_) {
- DCHECK_LT(0, writers_count_);
- InsertIntoWritableRegions({commit_start, commit_end - commit_start},
- false);
- }
+ DCHECK_LE(committed_code_space_.load(),
+ v8_flags.wasm_max_committed_code_mb * MB);
}
DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
@@ -792,52 +761,6 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}
-// TODO(dlehmann): Ensure that {AddWriter()} is always paired up with a
-// {RemoveWriter}, such that eventually the code space is write protected.
-// One solution is to make the API foolproof by hiding {SetWritable()} and
-// allowing change of permissions only through {CodeSpaceWriteScope}.
-// TODO(dlehmann): Add tests that ensure the code space is eventually write-
-// protected.
-void WasmCodeAllocator::AddWriter() {
- DCHECK(protect_code_memory_);
- ++writers_count_;
-}
-
-void WasmCodeAllocator::RemoveWriter() {
- DCHECK(protect_code_memory_);
- DCHECK_GT(writers_count_, 0);
- if (--writers_count_ > 0) return;
-
- // Switch all memory to non-writable.
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- for (base::AddressRegion writable : writable_memory_) {
- for (base::AddressRegion split_range :
- SplitRangeByReservationsIfNeeded(writable, owned_code_space_)) {
- TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RX\n",
- split_range.begin(), split_range.end());
- CHECK(SetPermissions(page_allocator, split_range.begin(),
- split_range.size(), PageAllocator::kReadExecute));
- }
- }
- writable_memory_.clear();
-}
-
-void WasmCodeAllocator::MakeWritable(base::AddressRegion region) {
- if (!protect_code_memory_) return;
- DCHECK_LT(0, writers_count_);
- DCHECK(!region.is_empty());
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
-
- // Align to commit page size.
- size_t commit_page_size = page_allocator->CommitPageSize();
- DCHECK(base::bits::IsPowerOfTwo(commit_page_size));
- Address begin = RoundDown(region.begin(), commit_page_size);
- Address end = RoundUp(region.end(), commit_page_size);
- region = base::AddressRegion(begin, end - begin);
-
- InsertIntoWritableRegions(region, true);
-}
-
void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
@@ -882,84 +805,6 @@ size_t WasmCodeAllocator::GetNumCodeSpaces() const {
return owned_code_space_.size();
}
-void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region,
- bool switch_to_writable) {
- size_t new_writable_memory = 0;
-
- CheckWritableMemoryRegions check_on_return{writable_memory_, region,
- new_writable_memory};
-
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- // Subroutine to make a non-writable region writable (if {switch_to_writable}
- // is {true}) and insert it into {writable_memory_}.
- auto make_writable = [&](decltype(writable_memory_)::iterator insert_pos,
- base::AddressRegion region) {
- new_writable_memory += region.size();
- if (switch_to_writable) {
- for (base::AddressRegion split_range :
- SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
- TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RWX\n",
- split_range.begin(), split_range.end());
- CHECK(SetPermissions(page_allocator, split_range.begin(),
- split_range.size(),
- PageAllocator::kReadWriteExecute));
- }
- }
-
- // Insert {region} into {writable_memory_} before {insert_pos}, potentially
- // merging it with the surrounding regions.
- if (insert_pos != writable_memory_.begin()) {
- auto previous = insert_pos;
- --previous;
- if (previous->end() == region.begin()) {
- region = {previous->begin(), previous->size() + region.size()};
- writable_memory_.erase(previous);
- }
- }
- if (insert_pos != writable_memory_.end() &&
- region.end() == insert_pos->begin()) {
- region = {region.begin(), insert_pos->size() + region.size()};
- insert_pos = writable_memory_.erase(insert_pos);
- }
- writable_memory_.insert(insert_pos, region);
- };
-
- DCHECK(!region.is_empty());
- // Find a possible insertion position by identifying the first region whose
- // start address is not less than that of {new_region}, and the starting the
- // merge from the existing region before that.
- auto it = writable_memory_.lower_bound(region);
- if (it != writable_memory_.begin()) --it;
- for (;; ++it) {
- if (it == writable_memory_.end() || it->begin() >= region.end()) {
- // No overlap; add before {it}.
- make_writable(it, region);
- return;
- }
- if (it->end() <= region.begin()) continue; // Continue after {it}.
- base::AddressRegion overlap = it->GetOverlap(region);
- DCHECK(!overlap.is_empty());
- if (overlap.begin() == region.begin()) {
- if (overlap.end() == region.end()) return; // Fully contained already.
- // Remove overlap (which is already writable) and continue.
- region = {overlap.end(), region.end() - overlap.end()};
- continue;
- }
- if (overlap.end() == region.end()) {
- // Remove overlap (which is already writable), then make the remaining
- // region writable.
- region = {region.begin(), overlap.begin() - region.begin()};
- make_writable(it, region);
- return;
- }
- // Split {region}, make the split writable, and continue with the rest.
- base::AddressRegion split = {region.begin(),
- overlap.begin() - region.begin()};
- make_writable(it, split);
- region = {overlap.end(), region.end() - overlap.end()};
- }
-}
-
namespace {
BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
if (!v8_flags.wasm_bounds_checks) return kNoBoundsChecks;
@@ -1033,6 +878,8 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
single_code_space_region);
code_space_data_[0].jump_table = main_jump_table_;
+ CodeSpaceWriteScope code_space_write_scope(this);
+ InitializeJumpTableForLazyCompilation(max_functions);
}
void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
@@ -1045,7 +892,8 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
Object url_obj = script.name();
DCHECK(url_obj.IsString() || url_obj.IsUndefined());
std::unique_ptr<char[]> source_url =
- url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
+ url_obj.IsString() ? String::cast(url_obj).ToCString()
+ : std::unique_ptr<char[]>(new char[1]{'\0'});
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
@@ -1069,26 +917,26 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
base::Vector<byte>{code->relocation_start(), relocation_size});
}
Handle<ByteArray> source_pos_table(code->source_position_table(),
- code->GetIsolate());
+ code->instruction_stream().GetIsolate());
base::OwnedVector<byte> source_pos =
base::OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
if (source_pos_table->length() > 0) {
- source_pos_table->copy_out(0, source_pos.start(),
+ source_pos_table->copy_out(0, source_pos.begin(),
source_pos_table->length());
}
- CHECK(!code->is_off_heap_trampoline());
- static_assert(Code::kOnHeapBodyIsContiguous);
+ static_assert(InstructionStream::kOnHeapBodyIsContiguous);
base::Vector<const byte> instructions(
- reinterpret_cast<byte*>(code->raw_body_start()),
- static_cast<size_t>(code->raw_body_size()));
+ reinterpret_cast<byte*>(code->body_start()),
+ static_cast<size_t>(code->body_size()));
const int stack_slots = code->stack_slots();
- // Metadata offsets in Code objects are relative to the start of the metadata
- // section, whereas WasmCode expects offsets relative to InstructionStart.
- const int base_offset = code->raw_instruction_size();
+ // Metadata offsets in InstructionStream objects are relative to the start of
+ // the metadata section, whereas WasmCode expects offsets relative to
+ // InstructionStart.
+ const int base_offset = code->instruction_size();
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
- // Code objects contains real offsets but WasmCode expects an offset of 0 to
- // mean 'empty'.
+ // InstructionStream objects contains real offsets but WasmCode expects an
+ // offset of 0 to mean 'empty'.
const int safepoint_table_offset =
code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
: 0;
@@ -1103,7 +951,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
- code->raw_instruction_start();
+ code->InstructionStart();
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
@@ -1129,6 +977,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Flush the i-cache after relocation.
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
+ // FIXME(mliedtke): Get inlining positions from input.
std::unique_ptr<WasmCode> new_code{
new WasmCode{this, // native_module
kAnonymousFuncIndex, // index
@@ -1143,16 +992,46 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
{}, // protected_instructions
reloc_info.as_vector(), // reloc_info
source_pos.as_vector(), // source positions
+ {}, // inlining positions
WasmCode::kWasmFunction, // kind
ExecutionTier::kNone, // tier
- kNoDebugging}}; // for_debugging
+ kNotForDebugging}}; // for_debugging
new_code->MaybePrint();
new_code->Validate();
return PublishCodeLocked(std::move(new_code));
}
-void NativeModule::UseLazyStub(uint32_t func_index) {
+void NativeModule::InitializeJumpTableForLazyCompilation(
+ uint32_t num_wasm_functions) {
+ if (!num_wasm_functions) return;
+ allocation_mutex_.AssertHeld();
+ DCHECK(CodeSpaceWriteScope::IsInScope());
+
+ DCHECK_NULL(lazy_compile_table_);
+ lazy_compile_table_ = CreateEmptyJumpTableLocked(
+ JumpTableAssembler::SizeForNumberOfLazyFunctions(num_wasm_functions));
+
+ DCHECK_EQ(1, code_space_data_.size());
+ const CodeSpaceData& code_space_data = code_space_data_[0];
+ DCHECK_NOT_NULL(code_space_data.jump_table);
+ DCHECK_NOT_NULL(code_space_data.far_jump_table);
+
+ Address compile_lazy_address =
+ code_space_data.far_jump_table->instruction_start() +
+ JumpTableAssembler::FarJumpSlotIndexToOffset(WasmCode::kWasmCompileLazy);
+
+ JumpTableAssembler::GenerateLazyCompileTable(
+ lazy_compile_table_->instruction_start(), num_wasm_functions,
+ module_->num_imported_functions, compile_lazy_address);
+
+ JumpTableAssembler::InitializeJumpsToLazyCompileTable(
+ code_space_data.jump_table->instruction_start(), num_wasm_functions,
+ lazy_compile_table_->instruction_start());
+}
+
+void NativeModule::UseLazyStubLocked(uint32_t func_index) {
+ allocation_mutex_.AssertHeld();
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
@@ -1160,20 +1039,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
// scope instead.
DCHECK(CodeSpaceWriteScope::IsInScope());
- base::RecursiveMutexGuard guard(&allocation_mutex_);
- if (!lazy_compile_table_) {
- uint32_t num_slots = module_->num_declared_functions;
- WasmCodeRefScope code_ref_scope;
- lazy_compile_table_ = CreateEmptyJumpTableLocked(
- JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
- Address compile_lazy_address = GetNearRuntimeStubEntry(
- WasmCode::kWasmCompileLazy,
- FindJumpTablesForRegionLocked(
- base::AddressRegionOf(lazy_compile_table_->instructions())));
- JumpTableAssembler::GenerateLazyCompileTable(
- lazy_compile_table_->instruction_start(), num_slots,
- module_->num_imported_functions, compile_lazy_address);
- }
+ DCHECK_NOT_NULL(lazy_compile_table_);
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = declared_function_index(module(), func_index);
@@ -1191,6 +1057,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
base::Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging) {
base::Vector<byte> code_space;
+ base::Vector<byte> inlining_positions;
NativeModule::JumpTablesRef jump_table_ref;
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
@@ -1198,9 +1065,13 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
jump_table_ref =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
}
+ // Only Liftoff code can have the {frame_has_feedback_slot} bit set.
+ DCHECK_NE(tier, ExecutionTier::kLiftoff);
+ bool frame_has_feedback_slot = false;
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
- source_position_table, kind, tier, for_debugging,
+ source_position_table, inlining_positions, kind,
+ tier, for_debugging, frame_has_feedback_slot,
code_space, jump_table_ref);
}
@@ -1208,9 +1079,11 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
int index, const CodeDesc& desc, int stack_slots,
uint32_t tagged_parameter_slots,
base::Vector<const byte> protected_instructions_data,
- base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ base::Vector<const byte> source_position_table,
+ base::Vector<const byte> inlining_positions, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging,
- base::Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
+ bool frame_has_feedback_slot, base::Vector<uint8_t> dst_code_bytes,
+ const JumpTablesRef& jump_tables) {
base::Vector<byte> reloc_info{
desc.buffer + desc.buffer_size - desc.reloc_size,
static_cast<size_t>(desc.reloc_size)};
@@ -1266,7 +1139,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
- source_position_table, kind, tier, for_debugging}};
+ source_position_table, inlining_positions, kind, tier, for_debugging,
+ frame_has_feedback_slot}};
code->MaybePrint();
code->Validate();
@@ -1274,10 +1148,26 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
return code;
}
-WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
+WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code,
+ AssumptionsJournal* assumptions) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode");
base::RecursiveMutexGuard lock(&allocation_mutex_);
+ if (assumptions != nullptr) {
+ // Acquiring the lock is expensive, so callers should only pass non-empty
+ // assumptions journals.
+ DCHECK(!assumptions->empty());
+ // Only Turbofan makes assumptions.
+ DCHECK_EQ(ExecutionTier::kTurbofan, code->tier());
+ WellKnownImportsList& current = module_->type_feedback.well_known_imports;
+ base::MutexGuard wki_lock(current.mutex());
+ for (auto [import_index, status] : assumptions->import_statuses()) {
+ if (current.get(import_index) != status) {
+ compilation_state_->AllowAnotherTopTierJob(code->index());
+ return nullptr;
+ }
+ }
+ }
CodeSpaceWriteScope code_space_write_scope(this);
return PublishCodeLocked(std::move(code));
}
@@ -1345,20 +1235,10 @@ WasmCode* NativeModule::PublishCodeLocked(
// code table of jump table). Otherwise, install code if it was compiled
// with a higher tier.
static_assert(
- kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
+ kForDebugging > kNotForDebugging && kWithBreakpoints > kForDebugging,
"for_debugging is ordered");
- const bool update_code_table =
- // Never install stepping code.
- code->for_debugging() != kForStepping &&
- (!prior_code ||
- (tiering_state_ == kTieredDown
- // Tiered down: Install breakpoints over normal debug code.
- ? prior_code->for_debugging() <= code->for_debugging()
- // Tiered up: Install if the tier is higher than before or we
- // replace debugging code with non-debugging code.
- : (prior_code->tier() < code->tier() ||
- (prior_code->for_debugging() && !code->for_debugging()))));
- if (update_code_table) {
+
+ if (should_update_code_table(code, prior_code)) {
code_table_[slot_idx] = code;
if (prior_code) {
WasmCodeRefScope::AddRef(prior_code);
@@ -1378,6 +1258,32 @@ WasmCode* NativeModule::PublishCodeLocked(
return code;
}
+bool NativeModule::should_update_code_table(WasmCode* new_code,
+ WasmCode* prior_code) const {
+ if (new_code->for_debugging() == kForStepping) {
+ // Never install stepping code.
+ return false;
+ }
+ if (debug_state_ == kDebugging) {
+ if (new_code->for_debugging() == kNotForDebugging) {
+ // In debug state, only install debug code.
+ return false;
+ }
+ if (prior_code && prior_code->for_debugging() > new_code->for_debugging()) {
+ // In debug state, install breakpoints over normal debug code.
+ return false;
+ }
+ }
+ // In kNoDebugging:
+ // Install if the tier is higher than before or we replace debugging code with
+ // non-debugging code.
+ if (prior_code && !prior_code->for_debugging() &&
+ prior_code->tier() > new_code->tier()) {
+ return false;
+ }
+ return true;
+}
+
void NativeModule::ReinstallDebugCode(WasmCode* code) {
base::RecursiveMutexGuard lock(&allocation_mutex_);
@@ -1388,7 +1294,7 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
DCHECK_LT(code->index(), num_functions());
// If the module is tiered up by now, do not reinstall debug code.
- if (tiering_state_ != kTieredDown) return;
+ if (debug_state_ != kDebugging) return;
uint32_t slot_idx = declared_function_index(module(), code->index());
if (WasmCode* prior_code = code_table_[slot_idx]) {
@@ -1421,25 +1327,32 @@ std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
int code_comments_offset, int unpadded_binary_size,
base::Vector<const byte> protected_instructions_data,
base::Vector<const byte> reloc_info,
- base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ base::Vector<const byte> source_position_table,
+ base::Vector<const byte> inlining_positions, WasmCode::Kind kind,
ExecutionTier tier) {
- UpdateCodeSize(instructions.size(), tier, kNoDebugging);
+ UpdateCodeSize(instructions.size(), tier, kNotForDebugging);
return std::unique_ptr<WasmCode>{new WasmCode{
this, index, instructions, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, unpadded_binary_size, protected_instructions_data,
- reloc_info, source_position_table, kind, tier, kNoDebugging}};
+ reloc_info, source_position_table, inlining_positions, kind, tier,
+ kNotForDebugging}};
}
-std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
+std::pair<std::vector<WasmCode*>, std::vector<WellKnownImport>>
+NativeModule::SnapshotCodeTable() const {
base::RecursiveMutexGuard lock(&allocation_mutex_);
WasmCode** start = code_table_.get();
WasmCode** end = start + module_->num_declared_functions;
for (WasmCode* code : base::VectorOf(start, end - start)) {
if (code) WasmCodeRefScope::AddRef(code);
}
- return std::vector<WasmCode*>{start, end};
+ std::vector<WellKnownImport> import_statuses(module_->num_imported_functions);
+ for (uint32_t i = 0; i < module_->num_imported_functions; i++) {
+ import_statuses[i] = module_->type_feedback.well_known_imports.get(i);
+ }
+ return {std::vector<WasmCode*>{start, end}, std::move(import_statuses)};
}
std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
@@ -1494,7 +1407,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
base::Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
- UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
+ UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNotForDebugging);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
@@ -1510,15 +1423,16 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
{}, // protected_instructions
{}, // reloc_info
{}, // source_pos
+ {}, // inlining pos
WasmCode::kJumpTable, // kind
ExecutionTier::kNone, // tier
- kNoDebugging}}; // for_debugging
+ kNotForDebugging}}; // for_debugging
return PublishCodeLocked(std::move(code));
}
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
ForDebugging for_debugging) {
- if (for_debugging != kNoDebugging) return;
+ if (for_debugging != kNotForDebugging) return;
// Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
// this is shared code.
if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
@@ -1542,22 +1456,6 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
- // Jump tables are often allocated next to each other, so we can switch
- // permissions on both at the same time.
- if (code_space_data.jump_table->instructions().end() ==
- code_space_data.far_jump_table->instructions().begin()) {
- base::Vector<uint8_t> jump_tables_space = base::VectorOf(
- code_space_data.jump_table->instructions().begin(),
- code_space_data.jump_table->instructions().size() +
- code_space_data.far_jump_table->instructions().size());
- code_allocator_.MakeWritable(AddressRegionOf(jump_tables_space));
- } else {
- code_allocator_.MakeWritable(
- AddressRegionOf(code_space_data.jump_table->instructions()));
- code_allocator_.MakeWritable(
- AddressRegionOf(code_space_data.far_jump_table->instructions()));
- }
-
DCHECK_LT(slot_index, module_->num_declared_functions);
Address jump_table_slot =
code_space_data.jump_table->instruction_start() +
@@ -1656,6 +1554,10 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
+ if (is_first_code_space) {
+ InitializeJumpTableForLazyCompilation(num_wasm_functions);
+ }
+
if (jump_table && !is_first_code_space) {
// Patch the new jump table(s) with existing functions. If this is the first
// code space, there cannot be any functions that have been compiled yet.
@@ -1710,15 +1612,6 @@ void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {
}
}
-void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) {
- if (!compilation_state_->baseline_compilation_finished()) {
- baseline_compilation_cpu_duration_.fetch_add(cpu_duration,
- std::memory_order_relaxed);
- } else if (tier == ExecutionTier::kTurbofan) {
- tier_up_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
- }
-}
-
void NativeModule::AddLazyCompilationTimeSample(int64_t sample_in_micro_sec) {
num_lazy_compilations_.fetch_add(1, std::memory_order_relaxed);
sum_lazy_compilation_time_in_micro_sec_.fetch_add(sample_in_micro_sec,
@@ -1790,6 +1683,9 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
base::AddressRegion code_region) const {
allocation_mutex_.AssertHeld();
auto jump_table_usable = [code_region](const WasmCode* jump_table) {
+ // We only ever need to check for suitable jump tables if
+ // {kNeedsFarJumpsBetweenCodeSpaces} is true.
+ if constexpr (!kNeedsFarJumpsBetweenCodeSpaces) UNREACHABLE();
Address table_start = jump_table->instruction_start();
Address table_end = table_start + jump_table->instructions().size();
// Compute the maximum distance from anywhere in the code region to anywhere
@@ -1797,11 +1693,13 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
size_t max_distance = std::max(
code_region.end() > table_start ? code_region.end() - table_start : 0,
table_end > code_region.begin() ? table_end - code_region.begin() : 0);
- // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
- // every call or jump will target an address *within* the region, but never
- // exactly the end of the region. So all occuring offsets are actually
- // smaller than max_distance.
- return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
+ // kDefaultMaxWasmCodeSpaceSizeMb is <= the maximum near call distance on
+ // the current platform.
+ // We can allow a max_distance that is equal to
+ // kDefaultMaxWasmCodeSpaceSizeMb, because every call or jump will target an
+ // address *within* the region, but never exactly the end of the region. So
+ // all occuring offsets are actually smaller than max_distance.
+ return max_distance <= kDefaultMaxWasmCodeSpaceSizeMb * MB;
};
for (auto& code_space_data : code_space_data_) {
@@ -1892,8 +1790,13 @@ NativeModule::~NativeModule() {
}
WasmCodeManager::WasmCodeManager()
- : max_committed_code_space_(v8_flags.wasm_max_code_space * MB),
- critical_committed_code_space_(max_committed_code_space_ / 2) {}
+ : max_committed_code_space_(v8_flags.wasm_max_committed_code_mb * MB),
+ critical_committed_code_space_(max_committed_code_space_ / 2) {
+ // Check that --wasm-max-code-space-size-mb is not set bigger than the default
+ // value. Otherwise we run into DCHECKs or other crashes later.
+ CHECK_GE(kDefaultMaxWasmCodeSpaceSizeMb,
+ v8_flags.wasm_max_code_space_size_mb);
+}
WasmCodeManager::~WasmCodeManager() {
// No more committed code space.
@@ -1932,23 +1835,13 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
break;
}
}
- // Even when we employ W^X with v8_flags.wasm_write_protect_code_memory ==
- // true, code pages need to be initially allocated with RWX permission because
- // of concurrent compilation/execution. For this reason there is no
- // distinction here based on v8_flags.wasm_write_protect_code_memory.
- // TODO(dlehmann): This allocates initially as writable and executable, and
- // as such is not safe-by-default. In particular, if
- // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
- // because no {CodeSpaceWriteScope} is created), the writable permission is
- // never withdrawn.
- // One potential fix is to allocate initially with kReadExecute only, which
- // forces all compilation threads to add the missing {CodeSpaceWriteScope}s
- // before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is
- // open when calling this method.
+ // Allocate with RWX permissions; this will be restricted via PKU if
+ // available and enabled.
PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
bool success = false;
if (MemoryProtectionKeysEnabled()) {
+ DCHECK(CodeSpaceWriteScope::IsInScope());
#if V8_HAS_PKU_JIT_WRITE_PROTECT
TRACE_HEAP(
"Setting rwx permissions and memory protection key for 0x%" PRIxPTR
@@ -1987,8 +1880,14 @@ void WasmCodeManager::Decommit(base::AddressRegion region) {
USE(old_committed);
TRACE_HEAP("Decommitting system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
- CHECK(allocator->DecommitPages(reinterpret_cast<void*>(region.begin()),
- region.size()));
+ if (V8_UNLIKELY(!allocator->DecommitPages(
+ reinterpret_cast<void*>(region.begin()), region.size()))) {
+ // Decommit can fail in near-OOM situations.
+ auto oom_detail = base::FormattedString{} << "region size: "
+ << region.size();
+ V8::FatalProcessOutOfMemory(nullptr, "Decommit Wasm code space",
+ oom_detail.PrintToArray().data());
+ }
}
void WasmCodeManager::AssignRange(base::AddressRegion region,
@@ -2289,34 +2188,20 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
return ret;
}
-void NativeModule::SampleCodeSize(
- Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
- size_t code_size = sampling_time == kSampling
- ? code_allocator_.committed_code_space()
- : code_allocator_.generated_code_size();
+void NativeModule::SampleCodeSize(Counters* counters) const {
+ size_t code_size = code_allocator_.committed_code_space();
int code_size_mb = static_cast<int>(code_size / MB);
- Histogram* histogram = nullptr;
- switch (sampling_time) {
- case kAfterBaseline:
- histogram = counters->wasm_module_code_size_mb_after_baseline();
- break;
- case kSampling: {
- histogram = counters->wasm_module_code_size_mb();
- // If this is a wasm module of >= 2MB, also sample the freed code size,
- // absolute and relative. Code GC does not happen on asm.js modules, and
- // small modules will never trigger GC anyway.
- size_t generated_size = code_allocator_.generated_code_size();
- if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
- size_t freed_size = code_allocator_.freed_code_size();
- DCHECK_LE(freed_size, generated_size);
- int freed_percent = static_cast<int>(100 * freed_size / generated_size);
- counters->wasm_module_freed_code_size_percent()->AddSample(
- freed_percent);
- }
- break;
- }
+ counters->wasm_module_code_size_mb()->AddSample(code_size_mb);
+ // If this is a wasm module of >= 2MB, also sample the freed code size,
+ // absolute and relative. Code GC does not happen on asm.js
+ // modules, and small modules will never trigger GC anyway.
+ size_t generated_size = code_allocator_.generated_code_size();
+ if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
+ size_t freed_size = code_allocator_.freed_code_size();
+ DCHECK_LE(freed_size, generated_size);
+ int freed_percent = static_cast<int>(100 * freed_size / generated_size);
+ counters->wasm_module_freed_code_size_percent()->AddSample(freed_percent);
}
- histogram->AddSample(code_size_mb);
}
std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
@@ -2330,11 +2215,38 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.AddCompiledCode", "num", results.size());
DCHECK(!results.empty());
+ std::vector<std::unique_ptr<WasmCode>> generated_code;
+ generated_code.reserve(results.size());
+
// First, allocate code space for all the results.
+ // Never add more than half of a code space at once. This leaves some space
+ // for jump tables and other overhead. We could use {OverheadPerCodeSpace},
+ // but that's only an approximation, so we are conservative here and never use
+ // more than half a code space.
+ size_t max_code_batch_size = v8_flags.wasm_max_code_space_size_mb * MB / 2;
size_t total_code_space = 0;
for (auto& result : results) {
DCHECK(result.succeeded());
- total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
+ size_t new_code_space =
+ RoundUp<kCodeAlignment>(result.code_desc.instr_size);
+ if (total_code_space + new_code_space > max_code_batch_size) {
+ // Split off the first part of the {results} vector and process it
+ // separately. This method then continues with the rest.
+ size_t split_point = &result - results.begin();
+ CHECK_WITH_MSG(
+ split_point != 0,
+ "A single code object needs more than half of the code space size");
+ auto first_results = AddCompiledCode(results.SubVector(0, split_point));
+ generated_code.insert(generated_code.end(),
+ std::make_move_iterator(first_results.begin()),
+ std::make_move_iterator(first_results.end()));
+ // Continue processing the rest of the vector. This change to the
+ // {results} vector does not invalidate iterators (which are just
+ // pointers). In particular, the end pointer stays the same.
+ results += split_point;
+ total_code_space = 0;
+ }
+ total_code_space += new_code_space;
}
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_tables;
@@ -2352,9 +2264,6 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
// {results} vector in smaller chunks).
CHECK(jump_tables.is_valid());
- std::vector<std::unique_ptr<WasmCode>> generated_code;
- generated_code.reserve(results.size());
-
// Now copy the generated code into the code space and relocate it.
for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer->start());
@@ -2365,95 +2274,75 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), GetCodeKind(result),
- result.result_tier, result.for_debugging, this_code_space,
- jump_tables));
+ result.source_positions.as_vector(),
+ result.inlining_positions.as_vector(), GetCodeKind(result),
+ result.result_tier, result.for_debugging,
+ result.frame_has_feedback_slot, this_code_space, jump_tables));
}
DCHECK_EQ(0, code_space.size());
+ // Check that we added the expected amount of code objects, even if we split
+ // the {results} vector.
+ DCHECK_EQ(generated_code.capacity(), generated_code.size());
+
return generated_code;
}
-void NativeModule::SetTieringState(TieringState new_tiering_state) {
+void NativeModule::SetDebugState(DebugState new_debug_state) {
// Do not tier down asm.js (just never change the tiering state).
if (module()->origin != kWasmOrigin) return;
base::RecursiveMutexGuard lock(&allocation_mutex_);
- tiering_state_ = new_tiering_state;
-}
-
-bool NativeModule::IsTieredDown() {
- base::RecursiveMutexGuard lock(&allocation_mutex_);
- return tiering_state_ == kTieredDown;
+ debug_state_ = new_debug_state;
}
-void NativeModule::RecompileForTiering() {
- // If baseline compilation is not finished yet, we do not tier down now. This
- // would be tricky because not all code is guaranteed to be available yet.
- // Instead, we tier down after streaming compilation finished.
- if (!compilation_state_->baseline_compilation_finished()) return;
-
- // Read the tiering state under the lock, then trigger recompilation after
- // releasing the lock. If the tiering state was changed when the triggered
- // compilation units finish, code installation will handle that correctly.
- TieringState current_state;
- {
- base::RecursiveMutexGuard lock(&allocation_mutex_);
- current_state = tiering_state_;
-
- // Initialize {cached_code_} to signal that this cache should get filled
- // from now on.
- if (!cached_code_) {
- cached_code_ = std::make_unique<
- std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
- // Fill with existing code.
- for (auto& code_entry : owned_code_) {
- InsertToCodeCache(code_entry.second.get());
- }
- }
+namespace {
+bool ShouldRemoveCode(WasmCode* code, NativeModule::RemoveFilter filter) {
+ if (filter == NativeModule::RemoveFilter::kRemoveDebugCode &&
+ !code->for_debugging()) {
+ return false;
+ }
+ if (filter == NativeModule::RemoveFilter::kRemoveNonDebugCode &&
+ code->for_debugging()) {
+ return false;
+ }
+ if (filter == NativeModule::RemoveFilter::kRemoveLiftoffCode &&
+ !code->is_liftoff()) {
+ return false;
}
- RecompileNativeModule(this, current_state);
+ if (filter == NativeModule::RemoveFilter::kRemoveTurbofanCode &&
+ !code->is_turbofan()) {
+ return false;
+ }
+ return true;
}
+} // namespace
-std::vector<int> NativeModule::FindFunctionsToRecompile(
- TieringState new_tiering_state) {
- WasmCodeRefScope code_ref_scope;
+void NativeModule::RemoveCompiledCode(RemoveFilter filter) {
+ const uint32_t num_imports = module_->num_imported_functions;
+ const uint32_t num_functions = module_->num_declared_functions;
+ WasmCodeRefScope ref_scope;
+ CodeSpaceWriteScope write_scope(this);
base::RecursiveMutexGuard guard(&allocation_mutex_);
- // Get writable permission already here (and not inside the loop in
- // {PatchJumpTablesLocked}), to avoid switching for each slot individually.
- CodeSpaceWriteScope code_space_write_scope(this);
- std::vector<int> function_indexes;
- int imported = module()->num_imported_functions;
- int declared = module()->num_declared_functions;
- const bool tier_down = new_tiering_state == kTieredDown;
- for (int slot_index = 0; slot_index < declared; ++slot_index) {
- int function_index = imported + slot_index;
- WasmCode* old_code = code_table_[slot_index];
- bool code_is_good =
- tier_down ? old_code && old_code->for_debugging()
- : old_code && old_code->tier() == ExecutionTier::kTurbofan;
- if (code_is_good) continue;
- DCHECK_NOT_NULL(cached_code_);
- auto cache_it = cached_code_->find(std::make_pair(
- tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
- function_index));
- if (cache_it != cached_code_->end()) {
- WasmCode* cached_code = cache_it->second;
- if (old_code) {
- WasmCodeRefScope::AddRef(old_code);
- // The code is added to the current {WasmCodeRefScope}, hence the ref
- // count cannot drop to zero here.
- old_code->DecRefOnLiveCode();
- }
- code_table_[slot_index] = cached_code;
- PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
- cached_code->IncRef();
- continue;
+ for (uint32_t i = 0; i < num_functions; i++) {
+ WasmCode* code = code_table_[i];
+ if (code && ShouldRemoveCode(code, filter)) {
+ code_table_[i] = nullptr;
+ // Add the code to the {WasmCodeRefScope}, so the ref count cannot drop to
+ // zero here. It might in the {WasmCodeRefScope} destructor, though.
+ WasmCodeRefScope::AddRef(code);
+ code->DecRefOnLiveCode();
+ uint32_t func_index = i + num_imports;
+ UseLazyStubLocked(func_index);
}
- // Otherwise add the function to the set of functions to recompile.
- function_indexes.push_back(function_index);
}
- return function_indexes;
+ // When resuming optimized execution after a debugging session ends, or when
+ // discarding optimized code that made outdated assumptions, allow another
+ // tier-up task to get scheduled.
+ if (filter == RemoveFilter::kRemoveDebugCode ||
+ filter == RemoveFilter::kRemoveTurbofanCode) {
+ compilation_state_->AllowAnotherTopTierJobForAllFunctions();
+ }
}
void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 5dc13960f1..251d19b364 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -21,6 +21,7 @@
#include "src/base/macros.h"
#include "src/base/vector.h"
#include "src/builtins/builtins.h"
+#include "src/codegen/source-position.h"
#include "src/common/code-memory-access.h"
#include "src/handles/handles.h"
#include "src/tasks/operations-barrier.h"
@@ -34,12 +35,13 @@
namespace v8 {
namespace internal {
-class Code;
+class InstructionStream;
class CodeDesc;
class Isolate;
namespace wasm {
+class AssumptionsJournal;
class DebugInfo;
class NamesProvider;
class NativeModule;
@@ -47,6 +49,7 @@ struct WasmCompilationResult;
class WasmEngine;
class WasmImportWrapperCache;
struct WasmModule;
+enum class WellKnownImport : uint8_t;
// Convenience macro listing all wasm runtime stubs. Note that the first few
// elements of the list coincide with {compiler::TrapId}, order matters.
@@ -132,6 +135,7 @@ struct WasmModule;
V(WasmStringConcat) \
V(WasmStringEqual) \
V(WasmStringIsUSVSequence) \
+ V(WasmStringAsWtf16) \
V(WasmStringViewWtf16GetCodeUnit) \
V(WasmStringViewWtf16Encode) \
V(WasmStringViewWtf16Slice) \
@@ -148,8 +152,10 @@ struct WasmModule;
V(WasmStringViewIterAdvance) \
V(WasmStringViewIterRewind) \
V(WasmStringViewIterSlice) \
- V(WasmExternInternalize) \
- V(WasmExternExternalize)
+ V(StringCompare) \
+ V(WasmStringFromCodePoint) \
+ V(WasmStringHash) \
+ V(WasmExternInternalize)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
@@ -279,6 +285,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
base::Vector<const byte> source_positions() const {
return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
}
+ base::Vector<const byte> inlining_positions() const {
+ return {source_positions().end(),
+ static_cast<size_t>(inlining_positions_size_)};
+ }
int index() const { return index_; }
// Anonymous functions are functions that don't carry an index.
@@ -385,7 +395,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
static void DecrementRefCount(base::Vector<WasmCode* const>);
// Returns the last source position before {offset}.
- int GetSourcePositionBefore(int offset);
+ SourcePosition GetSourcePositionBefore(int code_offset);
+ int GetSourceOffsetBefore(int code_offset);
+
+ std::pair<int, SourcePosition> GetInliningPosition(int inlining_id) const;
// Returns whether this code was generated for debugging. If this returns
// {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
@@ -394,6 +407,13 @@ class V8_EXPORT_PRIVATE WasmCode final {
return ForDebuggingField::decode(flags_);
}
+ // Returns {true} for Liftoff code that sets up a feedback vector slot in its
+ // stack frame.
+ // TODO(jkummerow): This can be dropped when we ship Wasm inlining.
+ bool frame_has_feedback_slot() const {
+ return FrameHasFeedbackSlotField::decode(flags_);
+ }
+
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
private:
@@ -406,17 +426,19 @@ class V8_EXPORT_PRIVATE WasmCode final {
int code_comments_offset, int unpadded_binary_size,
base::Vector<const byte> protected_instructions_data,
base::Vector<const byte> reloc_info,
- base::Vector<const byte> source_position_table, Kind kind,
- ExecutionTier tier, ForDebugging for_debugging)
+ base::Vector<const byte> source_position_table,
+ base::Vector<const byte> inlining_positions, Kind kind,
+ ExecutionTier tier, ForDebugging for_debugging,
+ bool frame_has_feedback_slot = false)
: native_module_(native_module),
instructions_(instructions.begin()),
- flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
- ForDebuggingField::encode(for_debugging)),
- meta_data_(ConcatenateBytes(
- {protected_instructions_data, reloc_info, source_position_table})),
+ meta_data_(
+ ConcatenateBytes({protected_instructions_data, reloc_info,
+ source_position_table, inlining_positions})),
instructions_size_(instructions.length()),
reloc_info_size_(reloc_info.length()),
source_positions_size_(source_position_table.length()),
+ inlining_positions_size_(inlining_positions.length()),
protected_instructions_size_(protected_instructions_data.length()),
index_(index),
constant_pool_offset_(constant_pool_offset),
@@ -425,7 +447,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
code_comments_offset_(code_comments_offset),
- unpadded_binary_size_(unpadded_binary_size) {
+ unpadded_binary_size_(unpadded_binary_size),
+ flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
+ ForDebuggingField::encode(for_debugging) |
+ FrameHasFeedbackSlotField::encode(frame_has_feedback_slot)) {
DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
DCHECK_LE(handler_table_offset, unpadded_binary_size);
DCHECK_LE(code_comments_offset, unpadded_binary_size);
@@ -439,8 +464,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
// back to the function index. Return value is guaranteed to not be empty.
std::string DebugName() const;
- // Code objects that have been registered with the global trap handler within
- // this process, will have a {trap_handler_index} associated with them.
+ // Code objects that have been registered with the global trap
+ // handler within this process, will have a {trap_handler_index} associated
+ // with them.
int trap_handler_index() const {
CHECK(has_trap_handler_index());
return trap_handler_index_;
@@ -461,7 +487,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
NativeModule* const native_module_ = nullptr;
byte* const instructions_;
- const uint8_t flags_; // Bit field, see below.
// {meta_data_} contains several byte vectors concatenated into one:
// - protected instructions data of size {protected_instructions_size_}
// - relocation info of size {reloc_info_size_}
@@ -471,6 +496,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
const int instructions_size_;
const int reloc_info_size_;
const int source_positions_size_;
+ const int inlining_positions_size_;
const int protected_instructions_size_;
const int index_;
const int constant_pool_offset_;
@@ -487,10 +513,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
const int unpadded_binary_size_;
int trap_handler_index_ = -1;
+ const uint8_t flags_; // Bit field, see below.
// Bits encoded in {flags_}:
using KindField = base::BitField8<Kind, 0, 2>;
using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
+ using FrameHasFeedbackSlotField = ForDebuggingField::Next<bool, 1>;
// WasmCode is ref counted. Counters are held by:
// 1) The jump table / code table.
@@ -505,14 +533,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::atomic<int> ref_count_{1};
};
-// Check that {WasmCode} objects are sufficiently small. We create many of them,
-// often for rather small functions.
-// Increase the limit if needed, but first check if the size increase is
-// justified.
-#ifndef V8_GC_MOLE
-static_assert(sizeof(WasmCode) <= 88);
-#endif
-
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
// Return a textual description of the kind.
@@ -521,20 +541,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
public:
-#if V8_TARGET_ARCH_ARM64
- // ARM64 only supports direct calls within a 128 MB range.
- static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
-#elif V8_TARGET_ARCH_PPC64
- // branches only takes 26 bits
- static constexpr size_t kMaxCodeSpaceSize = 32 * MB;
-#else
- // Use 1024 MB limit for code spaces on other platforms. This is smaller than
- // the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
- // big reservations, and to ensure that distances within a code space fit
- // within a 32-bit signed integer.
- static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
-#endif
-
explicit WasmCodeAllocator(std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
@@ -561,20 +567,6 @@ class WasmCodeAllocator {
base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion);
- // Increases or decreases the {writers_count_} field. While there is at least
- // one writer, it is allowed to call {MakeWritable} to make regions writable.
- // When the last writer is removed, all code is switched back to
- // write-protected.
- // Hold the {NativeModule}'s {allocation_mutex_} when calling one of these
- // methods. The methods should only be called via {CodeSpaceWriteScope}.
- V8_EXPORT_PRIVATE void AddWriter();
- V8_EXPORT_PRIVATE void RemoveWriter();
-
- // Make a code region writable. Only allowed if there is at lease one writer
- // (see above).
- // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
-
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
void FreeCode(base::Vector<WasmCode* const>);
@@ -586,32 +578,21 @@ class WasmCodeAllocator {
Counters* counters() const { return async_counters_.get(); }
private:
- void InsertIntoWritableRegions(base::AddressRegion region,
- bool switch_to_writable);
-
//////////////////////////////////////////////////////////////////////////////
// These fields are protected by the mutex in {NativeModule}.
- // Code space that was reserved and is available for allocations (subset of
- // {owned_code_space_}).
+ // Code space that was reserved and is available for allocations
+ // (subset of {owned_code_space_}).
DisjointAllocationPool free_code_space_;
- // Code space that was allocated before but is dead now. Full pages within
- // this region are discarded. It's still a subset of {owned_code_space_}.
+ // Code space that was allocated before but is dead now. Full
+ // pages within this region are discarded. It's still a subset of
+ // {owned_code_space_}.
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
- // The following two fields are only used if {protect_code_memory_} is true.
- int writers_count_{0};
- std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>
- writable_memory_;
-
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
- // {protect_code_memory_} is true if traditional memory permission switching
- // is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is
- // being used, or protection is completely disabled.
- const bool protect_code_memory_;
std::atomic<size_t> committed_code_space_{0};
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
@@ -645,7 +626,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object. Ownership is transferred to the {NativeModule}.
- WasmCode* PublishCode(std::unique_ptr<WasmCode>);
+ // Returns {nullptr} if the {AssumptionsJournal} is non-nullptr and contains
+ // invalid assumptions.
+ WasmCode* PublishCode(std::unique_ptr<WasmCode>,
+ AssumptionsJournal* = nullptr);
std::vector<WasmCode*> PublishCode(base::Vector<std::unique_ptr<WasmCode>>);
// ReinstallDebugCode does a subset of PublishCode: It installs the code in
@@ -672,20 +656,27 @@ class V8_EXPORT_PRIVATE NativeModule final {
int code_comments_offset, int unpadded_binary_size,
base::Vector<const byte> protected_instructions_data,
base::Vector<const byte> reloc_info,
- base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ base::Vector<const byte> source_position_table,
+ base::Vector<const byte> inlining_positions, WasmCode::Kind kind,
ExecutionTier tier);
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
- // Use {UseLazyStub} to setup lazy compilation per function. It will use the
- // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
- // table with trampolines accordingly.
- void UseLazyStub(uint32_t func_index);
+ // Allocates and initializes the {lazy_compile_table_} and initializes the
+ // first jump table with jumps to the {lazy_compile_table_}.
+ void InitializeJumpTableForLazyCompilation(uint32_t num_wasm_functions);
+
+ // Use {UseLazyStubLocked} to setup lazy compilation per function. It will use
+ // the existing {WasmCode::kWasmCompileLazy} runtime stub and populate the
+ // jump table with trampolines accordingly.
+ void UseLazyStubLocked(uint32_t func_index);
- // Creates a snapshot of the current state of the code table. This is useful
+ // Creates a snapshot of the current state of the code table, along with the
+ // current import statuses that these code objects depend on. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
- std::vector<WasmCode*> SnapshotCodeTable() const;
+ std::pair<std::vector<WasmCode*>, std::vector<WellKnownImport>>
+ SnapshotCodeTable() const;
// Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
// {owned_code_}.
std::vector<WasmCode*> SnapshotAllOwnedCode() const;
@@ -702,12 +693,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
: kNullAddress;
}
- // Finds the jump tables that should be used for given code region. This
- // information is then passed to {GetNearCallTargetForFunction} and
- // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
- // up there. Return an empty struct if no suitable jump tables exist.
- JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
-
// Get the call target in the jump table previously looked up via
// {FindJumpTablesForRegionLocked}.
Address GetNearCallTargetForFunction(uint32_t func_index,
@@ -722,21 +707,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
- void AddWriter() {
- base::RecursiveMutexGuard guard{&allocation_mutex_};
- code_allocator_.AddWriter();
- }
-
- void RemoveWriter() {
- base::RecursiveMutexGuard guard{&allocation_mutex_};
- code_allocator_.RemoveWriter();
- }
-
- void MakeWritable(base::AddressRegion region) {
- base::RecursiveMutexGuard guard{&allocation_mutex_};
- code_allocator_.MakeWritable(region);
- }
-
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ReserveCodeTableForTesting(uint32_t max_functions);
@@ -781,12 +751,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t turbofan_code_size() const {
return turbofan_code_size_.load(std::memory_order_relaxed);
}
- size_t baseline_compilation_cpu_duration() const {
- return baseline_compilation_cpu_duration_.load();
- }
- size_t tier_up_cpu_duration() const {
- return tier_up_cpu_duration_.load(std::memory_order_relaxed);
- }
void AddLazyCompilationTimeSample(int64_t sample);
@@ -819,7 +783,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
}
void SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes);
- void UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier);
void AddLiftoffBailout() {
liftoff_bailout_count_.fetch_add(1, std::memory_order_relaxed);
}
@@ -837,32 +800,33 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;
// Sample the current code size of this modules to the given counters.
- enum CodeSamplingTime : int8_t { kAfterBaseline, kSampling };
- void SampleCodeSize(Counters*, CodeSamplingTime) const;
+ void SampleCodeSize(Counters*) const;
V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
WasmCompilationResult);
V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
base::Vector<WasmCompilationResult>);
- // Set a new tiering state, but don't trigger any recompilation yet; use
- // {RecompileForTiering} for that. The two steps are split because In some
- // scenarios we need to drop locks before triggering recompilation.
- void SetTieringState(TieringState);
+ // Set a new debugging state, but don't trigger any recompilation;
+ // recompilation happens lazily.
+ void SetDebugState(DebugState);
- // Check whether this modules is tiered down for debugging.
- bool IsTieredDown();
-
- // Fully recompile this module in the tier set previously via
- // {SetTieringState}. The calling thread contributes to compilation and only
- // returns once recompilation is done.
- void RecompileForTiering();
+ // Check whether this modules is in debug state.
+ DebugState IsInDebugState() const {
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
+ return debug_state_;
+ }
- // Find all functions that need to be recompiled for a new tier. Note that
- // compilation jobs might run concurrently, so this method only considers the
- // compilation state of this native module at the time of the call.
- // Returns a vector of function indexes to recompile.
- std::vector<int> FindFunctionsToRecompile(TieringState);
+ enum class RemoveFilter {
+ kRemoveDebugCode,
+ kRemoveNonDebugCode,
+ kRemoveLiftoffCode,
+ kRemoveTurbofanCode,
+ kRemoveAllCode,
+ };
+ // Remove all compiled code from the {NativeModule} and replace it with
+ // {CompileLazy} builtins.
+ void RemoveCompiledCode(RemoveFilter filter);
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
@@ -883,7 +847,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Get or create the NamesProvider. Requires {HasWireBytes()}.
NamesProvider* GetNamesProvider();
- uint32_t* tiering_budget_array() { return tiering_budgets_.get(); }
+ uint32_t* tiering_budget_array() const { return tiering_budgets_.get(); }
Counters* counters() const { return code_allocator_.counters(); }
@@ -910,15 +874,23 @@ class V8_EXPORT_PRIVATE NativeModule final {
int index, const CodeDesc& desc, int stack_slots,
uint32_t tagged_parameter_slots,
base::Vector<const byte> protected_instructions_data,
- base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ base::Vector<const byte> source_position_table,
+ base::Vector<const byte> inlining_positions, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging,
- base::Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
+ bool frame_has_feedback_slot, base::Vector<uint8_t> code_space,
+ const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableLocked(int jump_table_size);
WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
base::AddressRegion);
+ // Finds the jump tables that should be used for given code region. This
+ // information is then passed to {GetNearCallTargetForFunction} and
+ // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
+ // up there. Return an empty struct if no suitable jump tables exist.
+ JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
+
void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
// Hold the {allocation_mutex_} when calling one of these methods.
@@ -941,6 +913,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// not have code in the cache yet.
void InsertToCodeCache(WasmCode* code);
+ bool should_update_code_table(WasmCode* new_code, WasmCode* prior_code) const;
+
// -- Fields of {NativeModule} start here.
// Keep the engine alive as long as this NativeModule is alive. In its
@@ -1030,7 +1004,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<NamesProvider> names_provider_;
- TieringState tiering_state_ = kTieredUp;
+ DebugState debug_state_ = kNotDebugging;
// Cache both baseline and top-tier code if we are debugging, to speed up
// repeated enabling/disabling of the debugger or profiler.
@@ -1046,8 +1020,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::atomic<size_t> liftoff_bailout_count_{0};
std::atomic<size_t> liftoff_code_size_{0};
std::atomic<size_t> turbofan_code_size_{0};
- std::atomic<size_t> baseline_compilation_cpu_duration_{0};
- std::atomic<size_t> tier_up_cpu_duration_{0};
// Metrics for lazy compilation.
std::atomic<int> num_lazy_compilations_{0};
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 8d40b4646f..ef0d7f44ff 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -62,6 +62,7 @@ constexpr uint8_t kWasmFunctionTypeCode = 0x60;
constexpr uint8_t kWasmStructTypeCode = 0x5f;
constexpr uint8_t kWasmArrayTypeCode = 0x5e;
constexpr uint8_t kWasmSubtypeCode = 0x50;
+constexpr uint8_t kWasmSubtypeFinalCode = 0x4e;
constexpr uint8_t kWasmRecursiveTypeGroupCode = 0x4f;
// Binary encoding of import/export kinds.
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index d64a9c7032..6071774e1a 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -173,7 +173,7 @@ class DebugInfoImpl {
// is in the function of the given index.
int DeadBreakpoint(int func_index, base::Vector<const int> breakpoints,
Isolate* isolate) {
- StackTraceFrameIterator it(isolate);
+ DebuggableStackFrameIterator it(isolate);
if (it.done() || !it.is_wasm()) return 0;
auto* wasm_frame = WasmFrame::cast(it.frame());
if (static_cast<int>(wasm_frame->function_index()) != func_index) return 0;
@@ -209,7 +209,7 @@ class DebugInfoImpl {
// Recompile the function with Liftoff, setting the new breakpoints.
// Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv();
- auto* function = &native_module_->module()->functions[func_index];
+ auto* function = &env.module->functions[func_index];
base::Vector<const uint8_t> wire_bytes = native_module_->wire_bytes();
FunctionBody body{function->sig, function->code.offset(),
wire_bytes.begin() + function->code.offset(),
@@ -218,6 +218,17 @@ class DebugInfoImpl {
// Debug side tables for stepping are generated lazily.
bool generate_debug_sidetable = for_debugging == kWithBreakpoints;
+ // If lazy validation is on, we might need to lazily validate here.
+ if (V8_UNLIKELY(!env.module->function_was_validated(func_index))) {
+ WasmFeatures unused_detected_features;
+ DecodeResult validation_result = ValidateFunctionBody(
+ env.enabled_features, env.module, &unused_detected_features, body);
+ // Handling illegal modules here is tricky. As lazy validation is off by
+ // default anyway and this is for debugging only, we just crash for now.
+ CHECK_WITH_MSG(validation_result.ok(),
+ validation_result.error().message().c_str());
+ env.module->set_function_validated(func_index);
+ }
WasmCompilationResult result = ExecuteLiftoffCompilation(
&env, body,
LiftoffOptions{}
@@ -618,7 +629,7 @@ class DebugInfoImpl {
// The first return location is after the breakpoint, others are after wasm
// calls.
ReturnLocation return_location = kAfterBreakpoint;
- for (StackTraceFrameIterator it(isolate); !it.done();
+ for (DebuggableStackFrameIterator it(isolate); !it.done();
it.Advance(), return_location = kAfterWasmCall) {
// We still need the flooded function for stepping.
if (it.frame()->id() == stepping_frame) continue;
@@ -637,8 +648,8 @@ class DebugInfoImpl {
DCHECK_EQ(frame->function_index(), new_code->index());
DCHECK_EQ(frame->native_module(), new_code->native_module());
DCHECK(frame->wasm_code()->is_liftoff());
- Address new_pc =
- FindNewPC(frame, new_code, frame->byte_offset(), return_location);
+ Address new_pc = FindNewPC(frame, new_code, frame->generated_code_offset(),
+ return_location);
#ifdef DEBUG
int old_position = frame->position();
#endif
diff --git a/deps/v8/src/wasm/wasm-disassembler.cc b/deps/v8/src/wasm/wasm-disassembler.cc
index 3ab0805178..f12af425f4 100644
--- a/deps/v8/src/wasm/wasm-disassembler.cc
+++ b/deps/v8/src/wasm/wasm-disassembler.cc
@@ -167,8 +167,7 @@ void FunctionBodyDisassembler::DecodeAsWat(MultiLineStringBuilder& out,
indentation.increase();
// Decode and print locals.
- uint32_t locals_length;
- DecodeLocals(pc_, &locals_length);
+ uint32_t locals_length = DecodeLocals(pc_);
if (failed()) {
// TODO(jkummerow): Improve error handling.
out << "Failed to decode locals\n";
@@ -195,7 +194,9 @@ void FunctionBodyDisassembler::DecodeAsWat(MultiLineStringBuilder& out,
// Deal with indentation.
if (opcode == kExprEnd || opcode == kExprElse || opcode == kExprCatch ||
opcode == kExprCatchAll || opcode == kExprDelegate) {
- indentation.decrease();
+ if (indentation.current() >= base_indentation) {
+ indentation.decrease();
+ }
}
out << indentation;
if (opcode == kExprElse || opcode == kExprCatch ||
@@ -206,7 +207,9 @@ void FunctionBodyDisassembler::DecodeAsWat(MultiLineStringBuilder& out,
// Print the opcode and its immediates.
if (opcode == kExprEnd) {
- if (indentation.current() == base_indentation) {
+ if (indentation.current() < base_indentation) {
+ out << ";; Unexpected end byte";
+ } else if (indentation.current() == base_indentation) {
out << ")"; // End of the function.
} else {
out << "end";
@@ -254,8 +257,7 @@ void FunctionBodyDisassembler::DecodeGlobalInitializer(StringBuilder& out) {
WasmOpcode FunctionBodyDisassembler::GetOpcode() {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
if (!WasmOpcodes::IsPrefixOpcode(opcode)) return opcode;
- uint32_t opcode_length;
- return read_prefixed_opcode<ValidationTag>(pc_, &opcode_length);
+ return read_prefixed_opcode<ValidationTag>(pc_).first;
}
void FunctionBodyDisassembler::PrintHexNumber(StringBuilder& out,
@@ -309,16 +311,20 @@ class ImmediatesPrinter {
owner_->out_->PatchLabel(label_info, out_.start() + label_start_position);
}
- void BlockType(BlockTypeImmediate& imm) {
- if (imm.type == kWasmBottom) {
- const FunctionSig* sig = owner_->module_->signature(imm.sig_index);
+ void PrintSignature(uint32_t sig_index) {
+ if (owner_->module_->has_signature(sig_index)) {
+ const FunctionSig* sig = owner_->module_->signature(sig_index);
PrintSignatureOneLine(out_, sig, 0 /* ignored */, names(), false);
- } else if (imm.type == kWasmVoid) {
- // Just be silent.
} else {
- out_ << " (result ";
- names()->PrintValueType(out_, imm.type);
- out_ << ")";
+ out_ << " (signature: " << sig_index << " INVALID)";
+ }
+ }
+
+ void BlockType(BlockTypeImmediate& imm) {
+ if (imm.sig.all().begin() == nullptr) {
+ PrintSignature(imm.sig_index);
+ } else {
+ PrintSignatureOneLine(out_, &imm.sig, 0 /* ignored */, names(), false);
}
}
@@ -333,16 +339,14 @@ class ImmediatesPrinter {
void BranchTable(BranchTableImmediate& imm) {
const byte* pc = imm.table;
for (uint32_t i = 0; i <= imm.table_count; i++) {
- uint32_t length;
- uint32_t target = owner_->read_u32v<ValidationTag>(pc, &length);
+ auto [target, length] = owner_->read_u32v<ValidationTag>(pc);
PrintDepthAsLabel(target);
pc += length;
}
}
void CallIndirect(CallIndirectImmediate& imm) {
- const FunctionSig* sig = owner_->module_->signature(imm.sig_imm.index);
- PrintSignatureOneLine(out_, sig, 0 /* ignored */, names(), false);
+ PrintSignature(imm.sig_imm.index);
if (imm.table_imm.index != 0) TableIndex(imm.table_imm);
}
@@ -559,12 +563,12 @@ uint32_t FunctionBodyDisassembler::PrintImmediatesAndGetLength(
////////////////////////////////////////////////////////////////////////////////
// OffsetsProvider.
-class OffsetsProvider {
+class OffsetsProvider : public ITracer {
public:
OffsetsProvider() = default;
- void CollectOffsets(const WasmModule* module, const byte* start,
- const byte* end, AccountingAllocator* allocator) {
+ void CollectOffsets(const WasmModule* module,
+ base::Vector<const uint8_t> wire_bytes) {
num_imported_tables_ = module->num_imported_tables;
num_imported_globals_ = module->num_imported_globals;
num_imported_tags_ = module->num_imported_tags;
@@ -576,50 +580,58 @@ class OffsetsProvider {
element_offsets_.reserve(module->elem_segments.size());
data_offsets_.reserve(module->data_segments.size());
- using OffsetsCollectingDecoder = ModuleDecoderTemplate<OffsetsProvider>;
- OffsetsCollectingDecoder decoder(WasmFeatures::All(), start, end,
- kWasmOrigin, *this);
- constexpr bool verify_functions = false;
- decoder.DecodeModule(nullptr, allocator, verify_functions);
+ ModuleDecoderImpl decoder{WasmFeatures::All(), wire_bytes, kWasmOrigin,
+ this};
+ constexpr bool kNoVerifyFunctions = false;
+ decoder.DecodeModule(kNoVerifyFunctions);
enabled_ = true;
}
- void TypeOffset(uint32_t offset) { type_offsets_.push_back(offset); }
+ void TypeOffset(uint32_t offset) override { type_offsets_.push_back(offset); }
- void ImportOffset(uint32_t offset) { import_offsets_.push_back(offset); }
+ void ImportOffset(uint32_t offset) override {
+ import_offsets_.push_back(offset);
+ }
- void TableOffset(uint32_t offset) { table_offsets_.push_back(offset); }
+ void TableOffset(uint32_t offset) override {
+ table_offsets_.push_back(offset);
+ }
- void MemoryOffset(uint32_t offset) { memory_offset_ = offset; }
+ void MemoryOffset(uint32_t offset) override { memory_offset_ = offset; }
- void TagOffset(uint32_t offset) { tag_offsets_.push_back(offset); }
+ void TagOffset(uint32_t offset) override { tag_offsets_.push_back(offset); }
- void GlobalOffset(uint32_t offset) { global_offsets_.push_back(offset); }
+ void GlobalOffset(uint32_t offset) override {
+ global_offsets_.push_back(offset);
+ }
- void StartOffset(uint32_t offset) { start_offset_ = offset; }
+ void StartOffset(uint32_t offset) override { start_offset_ = offset; }
- void ElementOffset(uint32_t offset) { element_offsets_.push_back(offset); }
+ void ElementOffset(uint32_t offset) override {
+ element_offsets_.push_back(offset);
+ }
- void DataOffset(uint32_t offset) { data_offsets_.push_back(offset); }
+ void DataOffset(uint32_t offset) override { data_offsets_.push_back(offset); }
// Unused by this tracer:
- void ImportsDone() {}
- void Bytes(const byte* start, uint32_t count) {}
- void Description(const char* desc) {}
- void Description(const char* desc, size_t length) {}
- void Description(uint32_t number) {}
- void Description(ValueType type) {}
- void Description(HeapType type) {}
- void Description(const FunctionSig* sig) {}
- void NextLine() {}
- void NextLineIfFull() {}
- void NextLineIfNonEmpty() {}
+ void ImportsDone() override {}
+ void Bytes(const byte* start, uint32_t count) override {}
+ void Description(const char* desc) override {}
+ void Description(const char* desc, size_t length) override {}
+ void Description(uint32_t number) override {}
+ void Description(ValueType type) override {}
+ void Description(HeapType type) override {}
+ void Description(const FunctionSig* sig) override {}
+ void NextLine() override {}
+ void NextLineIfFull() override {}
+ void NextLineIfNonEmpty() override {}
void InitializerExpression(const byte* start, const byte* end,
- ValueType expected_type) {}
- void FunctionBody(const WasmFunction* func, const byte* start) {}
- void FunctionName(uint32_t func_index) {}
- void NameSection(const byte* start, const byte* end, uint32_t offset) {}
+ ValueType expected_type) override {}
+ void FunctionBody(const WasmFunction* func, const byte* start) override {}
+ void FunctionName(uint32_t func_index) override {}
+ void NameSection(const byte* start, const byte* end,
+ uint32_t offset) override {}
#define GETTER(name) \
uint32_t name##_offset(uint32_t index) { \
@@ -682,8 +694,7 @@ ModuleDisassembler::ModuleDisassembler(MultiLineStringBuilder& out,
offsets_(new OffsetsProvider()),
function_body_offsets_(function_body_offsets) {
if (function_body_offsets != nullptr) {
- offsets_->CollectOffsets(module, wire_bytes_.start(), wire_bytes_.end(),
- allocator);
+ offsets_->CollectOffsets(module, wire_bytes_.module_bytes());
}
}
@@ -703,12 +714,11 @@ void ModuleDisassembler::PrintTypeDefinition(uint32_t type_index,
// types; update this for isorecursive hybrid types.
out_ << (has_super ? " (array_subtype (field " : " (array (field ");
PrintMutableType(type->mutability(), type->element_type());
- out_ << ")";
+ out_ << ")"; // Closes `(field ...`
if (has_super) {
out_ << " ";
names_->PrintHeapType(out_, HeapType(module_->supertype(type_index)));
}
- out_ << ")";
} else if (module_->has_struct(type_index)) {
const StructType* type = module_->struct_type(type_index);
out_ << (has_super ? " (struct_subtype" : " (struct");
@@ -725,7 +735,6 @@ void ModuleDisassembler::PrintTypeDefinition(uint32_t type_index,
LineBreakOrSpace(break_lines, indentation, offset);
names_->PrintHeapType(out_, HeapType(module_->supertype(type_index)));
}
- out_ << ")";
} else if (module_->has_signature(type_index)) {
const FunctionSig* sig = module_->signature(type_index);
out_ << (has_super ? " (func_subtype" : " (func");
@@ -748,8 +757,8 @@ void ModuleDisassembler::PrintTypeDefinition(uint32_t type_index,
LineBreakOrSpace(break_lines, indentation, offset);
names_->PrintHeapType(out_, HeapType(module_->supertype(type_index)));
}
- out_ << ")";
}
+ out_ << "))"; // Closes "(type" and "(array" / "(struct" / "(func".
}
void ModuleDisassembler::PrintModule(Indentation indentation, size_t max_mb) {
@@ -918,7 +927,13 @@ void ModuleDisassembler::PrintModule(Indentation indentation, size_t max_mb) {
}
out_ << " ";
names_->PrintValueType(out_, elem.type);
- for (const ConstantExpression& entry : elem.entries) {
+
+ ModuleDecoderImpl decoder(WasmFeatures::All(), wire_bytes_.module_bytes(),
+ ModuleOrigin::kWasmOrigin);
+ decoder.consume_bytes(elem.elements_wire_bytes_offset);
+ for (size_t i = 0; i < elem.element_count; i++) {
+ ConstantExpression entry = decoder.consume_element_segment_entry(
+ const_cast<WasmModule*>(module_), elem);
PrintInitExpression(entry, elem.type);
}
out_ << ")";
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 065794d381..4a3372707a 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -6,6 +6,7 @@
#include "src/base/functional.h"
#include "src/base/platform/time.h"
+#include "src/base/small-vector.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/diagnostics/code-tracer.h"
@@ -237,7 +238,6 @@ bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
base::MutexGuard lock(&mutex_);
Key key{prefix_hash, {}};
- DCHECK_EQ(1, map_.count(key));
map_.erase(key);
cache_cv_.NotifyAll();
}
@@ -259,6 +259,10 @@ std::shared_ptr<NativeModule> NativeModuleCache::Update(
auto conflicting_module = it->second.value().lock();
if (conflicting_module != nullptr) {
DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
+ // This return might delete {native_module} if we were the last holder.
+ // That in turn can call {NativeModuleCache::Erase}, which takes the
+ // mutex. This is not a problem though, since the {MutexGuard} above is
+ // released before the {native_module}, per the definition order.
return conflicting_module;
}
}
@@ -386,8 +390,8 @@ struct WasmEngine::IsolateInfo {
const std::shared_ptr<Counters> async_counters;
- // Keep new modules in tiered down state.
- bool keep_tiered_down = false;
+ // Keep new modules in debug state.
+ bool keep_in_debug_state = false;
// Keep track whether we already added a sample for PKU support (we only want
// one sample per Isolate).
@@ -435,7 +439,7 @@ struct WasmEngine::NativeModuleInfo {
int8_t num_code_gcs_triggered = 0;
};
-WasmEngine::WasmEngine() = default;
+WasmEngine::WasmEngine() : call_descriptors_(&allocator_) {}
WasmEngine::~WasmEngine() {
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
@@ -462,27 +466,20 @@ WasmEngine::~WasmEngine() {
}
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
- const ModuleWireBytes& bytes,
- std::string* error_message) {
+ ModuleWireBytes bytes) {
TRACE_EVENT0("v8.wasm", "wasm.SyncValidate");
- // TODO(titzer): remove dependency on the isolate.
- if (bytes.start() == nullptr || bytes.length() == 0) {
- if (error_message) *error_message = "empty module wire bytes";
- return false;
- }
+ if (bytes.length() == 0) return false;
+
auto result = DecodeWasmModule(
- enabled, bytes.start(), bytes.end(), true, kWasmOrigin,
- isolate->counters(), isolate->metrics_recorder(),
+ enabled, bytes.module_bytes(), true, kWasmOrigin, isolate->counters(),
+ isolate->metrics_recorder(),
isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
- DecodingMethod::kSync, allocator());
- if (result.failed() && error_message) {
- *error_message = result.error().message();
- }
+ DecodingMethod::kSync);
return result.ok();
}
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Isolate* isolate, ErrorThrower* thrower, ModuleWireBytes bytes,
base::Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
int compilation_id = next_compilation_id_.fetch_add(1);
@@ -495,10 +492,10 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
// the context id in here.
v8::metrics::Recorder::ContextId context_id =
v8::metrics::Recorder::ContextId::Empty();
- ModuleResult result = DecodeWasmModule(
- WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(), false, origin,
- isolate->counters(), isolate->metrics_recorder(), context_id,
- DecodingMethod::kSync, allocator());
+ ModuleResult result =
+ DecodeWasmModule(WasmFeatures::ForAsmjs(), bytes.module_bytes(), false,
+ origin, isolate->counters(), isolate->metrics_recorder(),
+ context_id, DecodingMethod::kSync);
if (result.failed()) {
// This happens once in a while when we have missed some limit check
// in the asm parser. Output an error message to help diagnose, but crash.
@@ -532,7 +529,7 @@ Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
+ ModuleWireBytes bytes) {
int compilation_id = next_compilation_id_.fetch_add(1);
TRACE_EVENT1("v8.wasm", "wasm.SyncCompile", "id", compilation_id);
v8::metrics::Recorder::ContextId context_id =
@@ -540,9 +537,8 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
std::shared_ptr<WasmModule> module;
{
ModuleResult result = DecodeWasmModule(
- enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
- isolate->counters(), isolate->metrics_recorder(), context_id,
- DecodingMethod::kSync, allocator());
+ enabled, bytes.module_bytes(), false, kWasmOrigin, isolate->counters(),
+ isolate->metrics_recorder(), context_id, DecodingMethod::kSync);
if (result.failed()) {
thrower->CompileFailed(result.error());
return {};
@@ -638,9 +634,8 @@ void WasmEngine::AsyncInstantiate(
void WasmEngine::AsyncCompile(
Isolate* isolate, const WasmFeatures& enabled,
- std::shared_ptr<CompilationResultResolver> resolver,
- const ModuleWireBytes& bytes, bool is_shared,
- const char* api_method_name_for_errors) {
+ std::shared_ptr<CompilationResultResolver> resolver, ModuleWireBytes bytes,
+ bool is_shared, const char* api_method_name_for_errors) {
int compilation_id = next_compilation_id_.fetch_add(1);
TRACE_EVENT1("v8.wasm", "wasm.AsyncCompile", "id", compilation_id);
if (!v8_flags.wasm_async_compilation) {
@@ -671,19 +666,38 @@ void WasmEngine::AsyncCompile(
StartStreamingCompilation(
isolate, enabled, handle(isolate->context(), isolate),
api_method_name_for_errors, std::move(resolver));
- streaming_decoder->OnBytesReceived(bytes.module_bytes());
+
+ auto* rng = isolate->random_number_generator();
+ base::SmallVector<base::Vector<const uint8_t>, 16> ranges;
+ if (!bytes.module_bytes().empty()) ranges.push_back(bytes.module_bytes());
+ // Split into up to 16 ranges (2^4).
+ for (int round = 0; round < 4; ++round) {
+ for (auto it = ranges.begin(); it != ranges.end(); ++it) {
+ auto range = *it;
+ if (range.size() < 2 || !rng->NextBool()) continue; // Do not split.
+ // Choose split point within [1, range.size() - 1].
+ static_assert(kV8MaxWasmModuleSize <= kMaxInt);
+ size_t split_point =
+ 1 + rng->NextInt(static_cast<int>(range.size() - 1));
+ // Insert first sub-range *before* {it} and make {it} point after it.
+ it = ranges.insert(it, range.SubVector(0, split_point)) + 1;
+ *it = range.SubVectorFrom(split_point);
+ }
+ }
+ for (auto range : ranges) {
+ streaming_decoder->OnBytesReceived(range);
+ }
streaming_decoder->Finish();
return;
}
// Make a copy of the wire bytes in case the user program changes them
// during asynchronous compilation.
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
+ base::OwnedVector<const uint8_t> copy =
+ base::OwnedVector<const uint8_t>::Of(bytes.module_bytes());
AsyncCompileJob* job = CreateAsyncCompileJob(
- isolate, enabled, std::move(copy), bytes.length(),
- handle(isolate->context(), isolate), api_method_name_for_errors,
- std::move(resolver), compilation_id);
+ isolate, enabled, std::move(copy), handle(isolate->context(), isolate),
+ api_method_name_for_errors, std::move(resolver), compilation_id);
job->Start();
}
@@ -695,56 +709,61 @@ std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
TRACE_EVENT1("v8.wasm", "wasm.StartStreamingCompilation", "id",
compilation_id);
if (v8_flags.wasm_async_compilation) {
- AsyncCompileJob* job = CreateAsyncCompileJob(
- isolate, enabled, std::unique_ptr<byte[]>(nullptr), 0, context,
- api_method_name, std::move(resolver), compilation_id);
+ AsyncCompileJob* job =
+ CreateAsyncCompileJob(isolate, enabled, {}, context, api_method_name,
+ std::move(resolver), compilation_id);
return job->CreateStreamingDecoder();
}
return StreamingDecoder::CreateSyncStreamingDecoder(
isolate, enabled, context, api_method_name, std::move(resolver));
}
-void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
+void WasmEngine::CompileFunction(Counters* counters,
+ NativeModule* native_module,
uint32_t function_index, ExecutionTier tier) {
// Note we assume that "one-off" compilations can discard detected features.
WasmFeatures detected = WasmFeatures::None();
WasmCompilationUnit::CompileWasmFunction(
- isolate, native_module, &detected,
+ counters, native_module, &detected,
&native_module->module()->functions[function_index], tier);
}
-void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
+void WasmEngine::EnterDebuggingForIsolate(Isolate* isolate) {
std::vector<std::shared_ptr<NativeModule>> native_modules;
+ // {mutex_} gets taken both here and in {RemoveCompiledCode} in
+ // {AddPotentiallyDeadCode}. Therefore {RemoveCompiledCode} has to be
+ // called outside the lock.
{
base::MutexGuard lock(&mutex_);
- if (isolates_[isolate]->keep_tiered_down) return;
- isolates_[isolate]->keep_tiered_down = true;
+ if (isolates_[isolate]->keep_in_debug_state) return;
+ isolates_[isolate]->keep_in_debug_state = true;
for (auto* native_module : isolates_[isolate]->native_modules) {
- native_module->SetTieringState(kTieredDown);
DCHECK_EQ(1, native_modules_.count(native_module));
if (auto shared_ptr = native_modules_[native_module]->weak_ptr.lock()) {
native_modules.emplace_back(std::move(shared_ptr));
}
+ native_module->SetDebugState(kDebugging);
}
}
for (auto& native_module : native_modules) {
- native_module->RecompileForTiering();
+ native_module->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveNonDebugCode);
}
}
-void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
+void WasmEngine::LeaveDebuggingForIsolate(Isolate* isolate) {
// Only trigger recompilation after releasing the mutex, otherwise we risk
// deadlocks because of lock inversion. The bool tells whether the module
// needs recompilation for tier up.
std::vector<std::pair<std::shared_ptr<NativeModule>, bool>> native_modules;
{
base::MutexGuard lock(&mutex_);
- isolates_[isolate]->keep_tiered_down = false;
- auto test_can_tier_up = [this](NativeModule* native_module) {
+ isolates_[isolate]->keep_in_debug_state = false;
+ auto can_remove_debug_code = [this](NativeModule* native_module) {
DCHECK_EQ(1, native_modules_.count(native_module));
for (auto* isolate : native_modules_[native_module]->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
- if (isolates_[isolate]->keep_tiered_down) return false;
+ if (isolates_[isolate]->keep_in_debug_state) return false;
}
return true;
};
@@ -752,22 +771,25 @@ void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
DCHECK_EQ(1, native_modules_.count(native_module));
auto shared_ptr = native_modules_[native_module]->weak_ptr.lock();
if (!shared_ptr) continue; // The module is not used any more.
- if (!native_module->IsTieredDown()) continue;
+ if (!native_module->IsInDebugState()) continue;
// Only start tier-up if no other isolate needs this module in tiered
// down state.
- bool tier_up = test_can_tier_up(native_module);
- if (tier_up) native_module->SetTieringState(kTieredUp);
- native_modules.emplace_back(std::move(shared_ptr), tier_up);
+ bool remove_debug_code = can_remove_debug_code(native_module);
+ if (remove_debug_code) native_module->SetDebugState(kNotDebugging);
+ native_modules.emplace_back(std::move(shared_ptr), remove_debug_code);
}
}
for (auto& entry : native_modules) {
auto& native_module = entry.first;
- bool tier_up = entry.second;
+ bool remove_debug_code = entry.second;
// Remove all breakpoints set by this isolate.
if (native_module->HasDebugInfo()) {
native_module->GetDebugInfo()->RemoveIsolate(isolate);
}
- if (tier_up) native_module->RecompileForTiering();
+ if (remove_debug_code) {
+ native_module->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveDebugCode);
+ }
}
}
@@ -868,6 +890,7 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
ModuleWireBytes wire_bytes(native_module->wire_bytes());
Handle<Script> script =
GetOrCreateScript(isolate, shared_native_module, source_url);
+ native_module->LogWasmCodes(isolate, *script);
Handle<WasmModuleObject> module_object =
WasmModuleObject::New(isolate, std::move(shared_native_module), script);
{
@@ -883,6 +906,14 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
return module_object;
}
+void WasmEngine::FlushCode() {
+ for (auto& entry : native_modules_) {
+ NativeModule* native_module = entry.first;
+ native_module->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveLiftoffCode);
+ }
+}
+
std::shared_ptr<CompilationStatistics>
WasmEngine::GetOrCreateTurboStatistics() {
base::MutexGuard guard(&mutex_);
@@ -917,13 +948,13 @@ CodeTracer* WasmEngine::GetCodeTracer() {
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
- std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
+ base::OwnedVector<const uint8_t> bytes, Handle<Context> context,
const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver, int compilation_id) {
Handle<Context> incumbent_context = isolate->GetIncumbentContext();
AsyncCompileJob* job = new AsyncCompileJob(
- isolate, enabled, std::move(bytes_copy), length, context,
- incumbent_context, api_method_name, std::move(resolver), compilation_id);
+ isolate, enabled, std::move(bytes), context, incumbent_context,
+ api_method_name, std::move(resolver), compilation_id);
// Pass ownership to the unique_ptr in {async_compile_jobs_}.
base::MutexGuard guard(&mutex_);
async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
@@ -1023,8 +1054,8 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
#if defined(V8_COMPRESS_POINTERS)
// The null value is not accessible on mksnapshot runs.
if (isolate->snapshot_available()) {
- null_tagged_compressed_ = V8HeapCompressionScheme::CompressTagged(
- isolate->factory()->null_value()->ptr());
+ wasm_null_tagged_compressed_ = V8HeapCompressionScheme::CompressObject(
+ isolate->factory()->wasm_null()->ptr());
}
#endif
@@ -1040,7 +1071,7 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
base::MutexGuard lock(&engine->mutex_);
DCHECK_EQ(1, engine->isolates_.count(isolate));
for (auto* native_module : engine->isolates_[isolate]->native_modules) {
- native_module->SampleCodeSize(counters, NativeModule::kSampling);
+ native_module->SampleCodeSize(counters);
}
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
@@ -1179,6 +1210,8 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled,
std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.NewNativeModule");
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
if (v8_flags.wasm_gdb_remote && !gdb_server_) {
gdb_server_ = gdb_server::GdbServer::Create();
@@ -1203,8 +1236,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
pair.first->second.get()->isolates.insert(isolate);
auto* isolate_info = isolates_[isolate].get();
isolate_info->native_modules.insert(native_module.get());
- if (isolate_info->keep_tiered_down) {
- native_module->SetTieringState(kTieredDown);
+ if (isolate_info->keep_in_debug_state) {
+ native_module->SetDebugState(kDebugging);
}
// Record memory protection key support.
@@ -1230,7 +1263,7 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
wire_bytes.size());
std::shared_ptr<NativeModule> native_module =
native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
- bool recompile_module = false;
+ bool remove_all_code = false;
if (native_module) {
TRACE_EVENT0("v8.wasm", "CacheHit");
base::MutexGuard guard(&mutex_);
@@ -1240,42 +1273,46 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
}
native_module_info->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
- if (isolates_[isolate]->keep_tiered_down) {
- native_module->SetTieringState(kTieredDown);
- recompile_module = true;
+ if (isolates_[isolate]->keep_in_debug_state &&
+ !native_module->IsInDebugState()) {
+ remove_all_code = true;
+ native_module->SetDebugState(kDebugging);
}
}
- // Potentially recompile the module for tier down, after releasing the mutex.
- if (recompile_module) native_module->RecompileForTiering();
+ if (remove_all_code) {
+ native_module->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveNonDebugCode);
+ }
return native_module;
}
-bool WasmEngine::UpdateNativeModuleCache(
- bool error, std::shared_ptr<NativeModule>* native_module,
+std::shared_ptr<NativeModule> WasmEngine::UpdateNativeModuleCache(
+ bool has_error, std::shared_ptr<NativeModule> native_module,
Isolate* isolate) {
- // Pass {native_module} by value here to keep it alive until at least after
- // we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
- // which would lock the mutex twice.
- auto prev = native_module->get();
- *native_module = native_module_cache_.Update(*native_module, error);
-
- if (prev == native_module->get()) return true;
-
- bool recompile_module = false;
+ // Keep the previous pointer, but as a `void*`, because we only want to use it
+ // later to compare pointers, and never need to dereference it.
+ void* prev = native_module.get();
+ native_module =
+ native_module_cache_.Update(std::move(native_module), has_error);
+ if (prev == native_module.get()) return native_module;
+ bool remove_all_code = false;
{
base::MutexGuard guard(&mutex_);
- DCHECK_EQ(1, native_modules_.count(native_module->get()));
- native_modules_[native_module->get()]->isolates.insert(isolate);
+ DCHECK_EQ(1, native_modules_.count(native_module.get()));
+ native_modules_[native_module.get()]->isolates.insert(isolate);
DCHECK_EQ(1, isolates_.count(isolate));
- isolates_[isolate]->native_modules.insert(native_module->get());
- if (isolates_[isolate]->keep_tiered_down) {
- native_module->get()->SetTieringState(kTieredDown);
- recompile_module = true;
+ isolates_[isolate]->native_modules.insert(native_module.get());
+ if (isolates_[isolate]->keep_in_debug_state &&
+ !native_module->IsInDebugState()) {
+ remove_all_code = true;
+ native_module->SetDebugState(kDebugging);
}
}
- // Potentially recompile the module for tier down, after releasing the mutex.
- if (recompile_module) native_module->get()->RecompileForTiering();
- return false;
+ if (remove_all_code) {
+ native_module->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveNonDebugCode);
+ }
+ return native_module;
}
bool WasmEngine::GetStreamingCompilationOwnership(size_t prefix_hash) {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 5f721c3ad5..e941958e47 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -17,6 +17,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/compiler/wasm-call-descriptors.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/operations-barrier.h"
#include "src/wasm/canonical-types.h"
@@ -146,17 +147,15 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmEngine& operator=(const WasmEngine&) = delete;
~WasmEngine();
- // Synchronously validates the given bytes that represent an encoded Wasm
- // module. If validation fails and {error_msg} is present, it is set to the
- // validation error.
+ // Synchronously validates the given bytes. Returns whether the bytes
+ // represent a valid encoded Wasm module.
bool SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
- const ModuleWireBytes& bytes,
- std::string* error_message = nullptr);
+ ModuleWireBytes bytes);
// Synchronously compiles the given bytes that represent a translated
// asm.js module.
MaybeHandle<AsmWasmData> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Isolate* isolate, ErrorThrower* thrower, ModuleWireBytes bytes,
base::Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode);
Handle<WasmModuleObject> FinalizeTranslatedAsmJs(
@@ -168,7 +167,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
const WasmFeatures& enabled,
ErrorThrower* thrower,
- const ModuleWireBytes& bytes);
+ ModuleWireBytes bytes);
// Synchronously instantiate the given Wasm module with the given imports.
// If the module represents an asm.js module, then the supplied {memory}
@@ -184,7 +183,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// be shared across threads, i.e. could be concurrently modified.
void AsyncCompile(Isolate* isolate, const WasmFeatures& enabled,
std::shared_ptr<CompilationResultResolver> resolver,
- const ModuleWireBytes& bytes, bool is_shared,
+ ModuleWireBytes bytes, bool is_shared,
const char* api_method_name_for_errors);
// Begin an asynchronous instantiation of the given Wasm module.
@@ -201,11 +200,12 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Compiles the function with the given index at a specific compilation tier.
// Errors are stored internally in the CompilationState.
// This is mostly used for testing to force a function into a specific tier.
- void CompileFunction(Isolate* isolate, NativeModule* native_module,
+ void CompileFunction(Counters* counters, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier);
- void TierDownAllModulesPerIsolate(Isolate* isolate);
- void TierUpAllModulesPerIsolate(Isolate* isolate);
+ void EnterDebuggingForIsolate(Isolate* isolate);
+
+ void LeaveDebuggingForIsolate(Isolate* isolate);
// Exports the sharable parts of the given module object so that they can be
// transferred to a different Context/Isolate using the same engine.
@@ -218,6 +218,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
Isolate* isolate, std::shared_ptr<NativeModule> shared_module,
base::Vector<const char> source_url);
+ void FlushCode();
+
AccountingAllocator* allocator() { return &allocator_; }
// Compilation statistics for TurboFan compilations. Returns a shared_ptr
@@ -301,12 +303,13 @@ class V8_EXPORT_PRIVATE WasmEngine {
// To avoid a deadlock on the main thread between synchronous and streaming
// compilation, two compilation jobs might compile the same native module at
// the same time. In this case the first call to {UpdateNativeModuleCache}
- // will insert the native module in the cache, and the last call will discard
- // its {native_module} argument and replace it with the existing entry.
- // Return true in the former case, and false in the latter.
- bool UpdateNativeModuleCache(bool error,
- std::shared_ptr<NativeModule>* native_module,
- Isolate* isolate);
+ // will insert the native module in the cache, and the last call will receive
+ // the existing entry from the cache.
+ // Return the cached entry, or {native_module} if there was no previously
+ // cached module.
+ std::shared_ptr<NativeModule> UpdateNativeModuleCache(
+ bool has_error, std::shared_ptr<NativeModule> native_module,
+ Isolate* isolate);
// Register this prefix hash for a streaming compilation job.
// If the hash is not in the cache yet, the function returns true and the
@@ -360,10 +363,14 @@ class V8_EXPORT_PRIVATE WasmEngine {
TypeCanonicalizer* type_canonicalizer() { return &type_canonicalizer_; }
+ compiler::WasmCallDescriptors* call_descriptors() {
+ return &call_descriptors_;
+ }
+
// Returns either the compressed tagged pointer representing a null value or
// 0 if pointer compression is not available.
- Tagged_t compressed_null_value_or_zero() const {
- return null_tagged_compressed_;
+ Tagged_t compressed_wasm_null_value_or_zero() const {
+ return wasm_null_tagged_compressed_;
}
// Call on process start and exit.
@@ -377,8 +384,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
AsyncCompileJob* CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
- std::unique_ptr<byte[]> bytes_copy, size_t length,
- Handle<Context> context, const char* api_method_name,
+ base::OwnedVector<const uint8_t> bytes, Handle<Context> context,
+ const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver, int compilation_id);
void TriggerGC(int8_t gc_sequence_index);
@@ -402,10 +409,12 @@ class V8_EXPORT_PRIVATE WasmEngine {
std::atomic<int> next_compilation_id_{0};
// Compressed tagged pointer to null value.
- std::atomic<Tagged_t> null_tagged_compressed_{0};
+ std::atomic<Tagged_t> wasm_null_tagged_compressed_{0};
TypeCanonicalizer type_canonicalizer_;
+ compiler::WasmCallDescriptors call_descriptors_;
+
// This mutex protects all information which is mutated concurrently or
// fields that are initialized lazily on the first access.
base::Mutex mutex_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index aad2f6aab8..ae6e49bbc4 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -565,7 +565,7 @@ void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
if (element_type.is_reference()) {
WasmInstanceObject instance =
WasmInstanceObject::cast(Object(raw_instance));
- Isolate* isolate = Isolate::FromRootAddress(instance.isolate_root());
+ Isolate* isolate = instance.GetIsolate();
ObjectSlot dst_slot = dst_array.ElementSlot(dst_index);
ObjectSlot src_slot = src_array.ElementSlot(src_index);
if (overlapping_ranges) {
@@ -588,16 +588,16 @@ void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
}
}
-void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
- uint32_t raw_type,
- Address initial_value_addr) {
+void array_fill_wrapper(Address raw_array, uint32_t index, uint32_t length,
+ uint32_t emit_write_barrier, uint32_t raw_type,
+ Address initial_value_addr) {
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
ValueType type = ValueType::FromRawBitField(raw_type);
int8_t* initial_element_address = reinterpret_cast<int8_t*>(
- ArrayElementAddress(raw_array, 0, type.value_kind_size()));
+ ArrayElementAddress(raw_array, index, type.value_kind_size()));
int64_t initial_value = *reinterpret_cast<int64_t*>(initial_value_addr);
- int bytes_to_set = length * type.value_kind_size();
+ const int bytes_to_set = length * type.value_kind_size();
// If the initial value is zero, we memset the array.
if (type.is_numeric() && initial_value == 0) {
@@ -606,7 +606,7 @@ void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
}
// We implement the general case by setting the first 8 bytes manually, then
- // filling the rest by exponentially growing {memmove}s.
+ // filling the rest by exponentially growing {memcpy}s.
DCHECK_GE(static_cast<size_t>(bytes_to_set), sizeof(int64_t));
@@ -636,6 +636,7 @@ void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
break;
}
case kRefNull:
+ case kRef:
if constexpr (kTaggedSize == 4) {
int32_t* base = reinterpret_cast<int32_t*>(initial_element_address);
base[0] = base[1] = static_cast<int32_t>(initial_value);
@@ -645,7 +646,6 @@ void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
break;
case kS128:
case kRtt:
- case kRef:
case kVoid:
case kBottom:
UNREACHABLE();
@@ -663,6 +663,16 @@ void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
std::memcpy(initial_element_address + bytes_already_set,
initial_element_address, bytes_to_set - bytes_already_set);
}
+
+ if (emit_write_barrier) {
+ DCHECK(type.is_reference());
+ WasmArray array = WasmArray::cast(Object(raw_array));
+ Isolate* isolate = array.GetIsolate();
+ ObjectSlot start(reinterpret_cast<Address>(initial_element_address));
+ ObjectSlot end(
+ reinterpret_cast<Address>(initial_element_address + bytes_to_set));
+ isolate->heap()->WriteBarrierForRange(array, start, end);
+ }
}
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 4ae78ce537..7bbdcee31e 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -118,10 +118,11 @@ void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
uint32_t dst_index, Address raw_src_array,
uint32_t src_index, uint32_t length);
-// The initial value is passed as an int64_t on the stack.
-void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
- uint32_t raw_type,
- Address initial_value_addr);
+// The initial value is passed as an int64_t on the stack. Cannot handle s128
+// other than 0.
+void array_fill_wrapper(Address raw_array, uint32_t index, uint32_t length,
+ uint32_t emit_write_barrier, uint32_t raw_type,
+ Address initial_value_addr);
using WasmTrapCallbackForTesting = void (*)();
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 5b5b6003dc..095b6debff 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -29,6 +29,10 @@
/* V8 side owner: jkummerow */ \
V(gc, "garbage collection", false) \
\
+ /* Inlining of small wasm GC functions into JavaScript */ \
+ /* V8 side owner: mliedtke */ \
+ V(js_inlining, "inline small wasm functions into JS", false) \
+ \
/* Non-specified, V8-only experimental additions to the GC proposal */ \
/* V8 side owner: jkummerow */ \
V(assume_ref_cast_succeeds, \
@@ -46,16 +50,6 @@
/* V8 side owner: manoskouk */ \
V(typed_funcref, "typed function references", false) \
\
- /* Memory64 proposal. */ \
- /* https://github.com/WebAssembly/memory64 */ \
- /* V8 side owner: clemensb */ \
- V(memory64, "memory64", false) \
- \
- /* Relaxed SIMD proposal. */ \
- /* https://github.com/WebAssembly/relaxed-simd */ \
- /* V8 side owner: zhin */ \
- V(relaxed_simd, "relaxed simd", false) \
- \
/* Branch Hinting proposal. */ \
/* https://github.com/WebAssembly/branch-hinting */ \
/* V8 side owner: jkummerow */ \
@@ -66,11 +60,6 @@
/* V8 side owner: thibaudm, fgm */ \
V(stack_switching, "stack switching", false) \
\
- /* Extended Constant Expressions Proposal. */ \
- /* https://github.com/WebAssembly/extended-const */ \
- /* V8 side owner: manoskouk */ \
- V(extended_const, "extended constant expressions", false) \
- \
/* Reference-Typed Strings Proposal. */ \
/* https://github.com/WebAssembly/stringref */ \
/* V8 side owner: jkummerow */ \
@@ -81,26 +70,50 @@
// exposed as chrome://flags/#enable-experimental-webassembly-features). Staged
// features get limited fuzzer coverage, and should come with their own tests.
// They are not run through all fuzzers though and don't get much exposure in
-// the wild. Staged features do not necessarily be fully stabilized. They should
+// the wild. Staged features are not necessarily fully stabilized. They should
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Tail call / return call proposal. */ \
- /* https://github.com/webassembly/tail-call */ \
- /* V8 side owner: thibaudm */ \
- /* Staged in v8.7 * */ \
- V(return_call, "return call opcodes", false) \
- \
/* Type reflection proposal. */ \
/* https://github.com/webassembly/js-types */ \
/* V8 side owner: ahaas */ \
/* Staged in v7.8. */ \
- V(type_reflection, "wasm type reflection in JS", false)
+ V(type_reflection, "wasm type reflection in JS", false) \
+ \
+ /* Memory64 proposal. */ \
+ /* https://github.com/WebAssembly/memory64 */ \
+ /* V8 side owner: clemensb */ \
+ V(memory64, "memory64", false) \
+ \
+ /* Relaxed SIMD proposal. */ \
+ /* https://github.com/WebAssembly/relaxed-simd */ \
+ /* V8 side owner: gdeepti */ \
+ V(relaxed_simd, "relaxed simd", false) \
+ \
+ /* Extended Constant Expressions Proposal. */ \
+ /* https://github.com/WebAssembly/extended-const */ \
+ /* V8 side owner: manoskouk */ \
+ /* Staged in v11.3. */ \
+ V(extended_const, "extended constant expressions", false) \
+ \
+ /* Not user-visible, defined here so an Origin Trial can control it. */ \
+ /* V8 side owner: manoskouk, clemensb */ \
+ /* Staged in v11.3 */ \
+ /* Launch bug: https://crbug.com/1424350 */ \
+ V(inlining, "wasm-into-wasm inlining", false)
// #############################################################################
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
-#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */
+#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
+ /* Tail call / return call proposal. */ \
+ /* https://github.com/webassembly/tail-call */ \
+ /* V8 side owner: thibaudm */ \
+ /* Staged in v8.7 * */ \
+ /* Shipped in v11.2 * */ \
+ /* ITS: https://groups.google.com/a/chromium.org/g/blink-dev/c/6VEOK4WZ7Wk \
+ */ \
+ V(return_call, "return call opcodes", true)
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
@@ -108,4 +121,15 @@
FOREACH_WASM_STAGING_FEATURE_FLAG(V) \
FOREACH_WASM_SHIPPED_FEATURE_FLAG(V)
+// Consistency check: Experimental and staged features are off by default.
+#define CHECK_WASM_FEATURE_OFF_BY_DEFAULT(name, desc, enabled) \
+ static_assert(enabled == false);
+#define CHECK_WASM_FEATURE_ON_BY_DEFAULT(name, desc, enabled) \
+ static_assert(enabled == true);
+FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(CHECK_WASM_FEATURE_OFF_BY_DEFAULT)
+FOREACH_WASM_STAGING_FEATURE_FLAG(CHECK_WASM_FEATURE_OFF_BY_DEFAULT)
+FOREACH_WASM_SHIPPED_FEATURE_FLAG(CHECK_WASM_FEATURE_ON_BY_DEFAULT)
+#undef CHECK_WASM_FEATURE_OFF_BY_DEFAULT
+#undef CHECK_WASM_FEATURE_ON_BY_DEFAULT
+
#endif // V8_WASM_WASM_FEATURE_FLAGS_H_
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index ab26412d89..b11b0ae809 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -33,6 +33,18 @@ WasmFeatures WasmFeatures::FromIsolate(Isolate* isolate) {
WasmFeatures WasmFeatures::FromContext(Isolate* isolate,
Handle<Context> context) {
WasmFeatures features = WasmFeatures::FromFlags();
+ if (isolate->IsWasmGCEnabled(context)) {
+ features.Add(kFeature_gc);
+ // Also enable typed function references, since the commandline flag
+ // implication won't do that for us in this case.
+ features.Add(kFeature_typed_funcref);
+ }
+ if (isolate->IsWasmStringRefEnabled(context)) {
+ features.Add(kFeature_stringref);
+ }
+ if (isolate->IsWasmInliningEnabled(context)) {
+ features.Add(kFeature_inlining);
+ }
// This space intentionally left blank for future Wasm origin trials.
return features;
}
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index bbe3a480e1..b600258d0b 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -22,7 +22,7 @@ WasmCode*& WasmImportWrapperCache::operator[](
return entry_map_[key];
}
-WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
+WasmCode* WasmImportWrapperCache::Get(ImportCallKind kind,
uint32_t canonical_type_index,
int expected_arity,
Suspend suspend) const {
@@ -34,7 +34,7 @@ WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
return it->second;
}
-WasmCode* WasmImportWrapperCache::MaybeGet(compiler::WasmImportCallKind kind,
+WasmCode* WasmImportWrapperCache::MaybeGet(ImportCallKind kind,
uint32_t canonical_type_index,
int expected_arity,
Suspend suspend) const {
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index c334ce009c..9ab568b91b 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -10,7 +10,7 @@
#define V8_WASM_WASM_IMPORT_WRAPPER_CACHE_H_
#include "src/base/platform/mutex.h"
-#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/module-instantiate.h"
namespace v8 {
namespace internal {
@@ -28,8 +28,8 @@ using FunctionSig = Signature<ValueType>;
class WasmImportWrapperCache {
public:
struct CacheKey {
- CacheKey(const compiler::WasmImportCallKind& kind,
- uint32_t canonical_type_index, int expected_arity, Suspend suspend)
+ CacheKey(ImportCallKind kind, uint32_t canonical_type_index,
+ int expected_arity, Suspend suspend)
: kind(kind),
canonical_type_index(canonical_type_index),
expected_arity(expected_arity),
@@ -41,7 +41,7 @@ class WasmImportWrapperCache {
expected_arity == rhs.expected_arity && suspend == rhs.suspend;
}
- compiler::WasmImportCallKind kind;
+ ImportCallKind kind;
uint32_t canonical_type_index;
int expected_arity;
Suspend suspend;
@@ -73,13 +73,12 @@ class WasmImportWrapperCache {
V8_EXPORT_PRIVATE WasmCode*& operator[](const CacheKey& key);
// Thread-safe. Assumes the key exists in the map.
- V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
+ V8_EXPORT_PRIVATE WasmCode* Get(ImportCallKind kind,
uint32_t canonical_type_index,
int expected_arity, Suspend suspend) const;
// Thread-safe. Returns nullptr if the key doesn't exist in the map.
- WasmCode* MaybeGet(compiler::WasmImportCallKind kind,
- uint32_t canonical_type_index, int expected_arity,
- Suspend suspend) const;
+ WasmCode* MaybeGet(ImportCallKind kind, uint32_t canonical_type_index,
+ int expected_arity, Suspend suspend) const;
~WasmImportWrapperCache();
diff --git a/deps/v8/src/wasm/wasm-init-expr.cc b/deps/v8/src/wasm/wasm-init-expr.cc
index 7c57f6d26a..2e04de5c68 100644
--- a/deps/v8/src/wasm/wasm-init-expr.cc
+++ b/deps/v8/src/wasm/wasm-init-expr.cc
@@ -21,7 +21,13 @@ ValueType WasmInitExpr::type(const WasmModule* module,
? module->globals[immediate().index].type
: kWasmBottom;
case kI32Const:
+ case kI32Add:
+ case kI32Sub:
+ case kI32Mul:
return kWasmI32;
+ case kI64Add:
+ case kI64Sub:
+ case kI64Mul:
case kI64Const:
return kWasmI64;
case kF32Const:
diff --git a/deps/v8/src/wasm/wasm-init-expr.h b/deps/v8/src/wasm/wasm-init-expr.h
index 8d5be635a2..fcdcd25003 100644
--- a/deps/v8/src/wasm/wasm-init-expr.h
+++ b/deps/v8/src/wasm/wasm-init-expr.h
@@ -35,6 +35,12 @@ class WasmInitExpr : public ZoneObject {
kF32Const,
kF64Const,
kS128Const,
+ kI32Add,
+ kI32Sub,
+ kI32Mul,
+ kI64Add,
+ kI64Sub,
+ kI64Mul,
kRefNullConst,
kRefFuncConst,
kStructNew,
@@ -74,6 +80,15 @@ class WasmInitExpr : public ZoneObject {
memcpy(immediate_.s128_const.data(), v, kSimd128Size);
}
+ static WasmInitExpr Binop(Zone* zone, Operator op, WasmInitExpr lhs,
+ WasmInitExpr rhs) {
+ DCHECK(op == kI32Add || op == kI32Sub || op == kI32Mul || op == kI64Add ||
+ op == kI64Sub || op == kI64Mul);
+ return WasmInitExpr(
+ op, zone->New<ZoneVector<WasmInitExpr>>(
+ std::initializer_list<WasmInitExpr>{lhs, rhs}, zone));
+ }
+
static WasmInitExpr GlobalGet(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kGlobalGet;
@@ -151,6 +166,14 @@ class WasmInitExpr : public ZoneObject {
return immediate().f32_const == other.immediate().f32_const;
case kF64Const:
return immediate().f64_const == other.immediate().f64_const;
+ case kI32Add:
+ case kI32Sub:
+ case kI32Mul:
+ case kI64Add:
+ case kI64Sub:
+ case kI64Mul:
+ return operands_[0] == other.operands_[0] &&
+ operands_[1] == other.operands_[1];
case kS128Const:
return immediate().s128_const == other.immediate().s128_const;
case kRefNullConst:
@@ -171,9 +194,7 @@ class WasmInitExpr : public ZoneObject {
}
return true;
case kI31New: {
- int32_t mask = int32_t{0x7fffffff};
- return (immediate().i32_const & mask) ==
- (other.immediate().i32_const & mask);
+ return operands_[0] == other.operands_[0];
}
}
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 6cbfdbc837..ba12cf19c9 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -485,6 +485,20 @@ bool EnforceUint32(T argument_name, Local<v8::Value> v, Local<Context> context,
*res = static_cast<uint32_t>(double_number);
return true;
}
+
+// The enum values need to match "WasmCompilationMethod" in
+// tools/metrics/histograms/enums.xml.
+enum CompilationMethod {
+ kSyncCompilation = 0,
+ kAsyncCompilation = 1,
+ kStreamingCompilation = 2,
+ kAsyncInstantiation = 3,
+ kStreamingInstantiation = 4,
+};
+
+void RecordCompilationMethod(i::Isolate* isolate, CompilationMethod method) {
+ isolate->counters()->wasm_compilation_method()->AddSample(method);
+}
} // namespace
// WebAssembly.compile(bytes) -> Promise
@@ -492,6 +506,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
constexpr const char* kAPIMethodName = "WebAssembly.compile()";
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ RecordCompilationMethod(i_isolate, kAsyncCompilation);
HandleScope scope(isolate);
ScheduledErrorThrower thrower(i_isolate, kAPIMethodName);
@@ -561,6 +576,7 @@ void WebAssemblyCompileStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ RecordCompilationMethod(i_isolate, kStreamingCompilation);
HandleScope scope(isolate);
const char* const kAPIMethodName = "WebAssembly.compileStreaming()";
ScheduledErrorThrower thrower(i_isolate, kAPIMethodName);
@@ -680,6 +696,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (i_isolate->wasm_module_callback()(args)) return;
+ RecordCompilationMethod(i_isolate, kSyncCompilation);
HandleScope scope(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
@@ -795,6 +812,7 @@ void WebAssemblyModuleCustomSections(
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ RecordCompilationMethod(i_isolate, kAsyncInstantiation);
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
@@ -856,6 +874,7 @@ void WebAssemblyInstantiateStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ RecordCompilationMethod(i_isolate, kStreamingInstantiation);
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
@@ -1116,15 +1135,13 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
namespace {
i::Handle<i::Object> DefaultReferenceValue(i::Isolate* isolate,
i::wasm::ValueType type) {
- if (type.is_reference()) {
- // Use undefined for JS type (externref) but null for wasm types as wasm
- // does not know undefined.
- if (type.heap_representation() == i::wasm::HeapType::kExtern) {
- return isolate->factory()->undefined_value();
- }
- return isolate->factory()->null_value();
+ DCHECK(type.is_object_reference());
+ // Use undefined for JS type (externref) but null for wasm types as wasm does
+ // not know undefined.
+ if (type.heap_representation() == i::wasm::HeapType::kExtern) {
+ return isolate->factory()->undefined_value();
}
- UNREACHABLE();
+ return isolate->factory()->wasm_null();
}
} // namespace
@@ -1179,8 +1196,10 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
} else if (enabled_features.has_gc() &&
string->StringEquals(v8_str(isolate, "arrayref"))) {
type = i::wasm::kWasmArrayRef;
+ } else if (enabled_features.has_gc() &&
+ string->StringEquals(v8_str(isolate, "i31ref"))) {
+ type = i::wasm::kWasmI31Ref;
} else {
- // TODO(7748): Add "i31ref".
thrower.TypeError(
"Descriptor property 'element' must be a WebAssembly reference type");
return;
@@ -1336,7 +1355,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
i_isolate);
Maybe<bool> result =
- buffer->SetIntegrityLevel(buffer, i::FROZEN, i::kDontThrow);
+ buffer->SetIntegrityLevel(i_isolate, buffer, i::FROZEN, i::kDontThrow);
if (!result.FromJust()) {
thrower.TypeError(
"Status of setting SetIntegrityLevel of buffer is false.");
@@ -1390,9 +1409,11 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
} else if (enabled_features.has_gc() &&
string->StringEquals(v8_str(isolate, "arrayref"))) {
*type = i::wasm::kWasmArrayRef;
+ } else if (enabled_features.has_gc() &&
+ string->StringEquals(v8_str(isolate, "i31ref"))) {
+ *type = i::wasm::kWasmI31Ref;
} else {
// Unrecognized type.
- // TODO(7748): Add "i31ref".
*type = i::wasm::kWasmVoid;
}
return true;
@@ -1550,7 +1571,10 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
(args.Length() < 2) ? i_isolate->factory()->null_value()
: Utils::OpenHandle(*value);
const char* error_message;
- if (!i::wasm::JSToWasmObject(i_isolate, nullptr, value_handle, type,
+ // The JS API does not allow for indexed types.
+ // TODO(7748): Fix this if that changes.
+ DCHECK(!type.has_index());
+ if (!i::wasm::JSToWasmObject(i_isolate, value_handle, type,
&error_message)
.ToHandle(&value_handle)) {
thrower.TypeError("%s", error_message);
@@ -1645,8 +1669,12 @@ void WebAssemblyTag(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Set the tag index to 0. It is only used for debugging purposes, and has no
// meaningful value when declared outside of a wasm module.
auto tag = i::WasmExceptionTag::New(i_isolate, 0);
+
+ uint32_t canonical_type_index =
+ i::wasm::GetWasmEngine()->type_canonicalizer()->AddRecursiveGroup(&sig);
+
i::Handle<i::JSObject> tag_object =
- i::WasmTagObject::New(i_isolate, &sig, tag);
+ i::WasmTagObject::New(i_isolate, &sig, canonical_type_index, tag);
args.GetReturnValue().Set(Utils::ToLocal(tag_object));
}
@@ -1685,8 +1713,7 @@ uint32_t GetEncodedSize(i::Handle<i::WasmTagObject> tag_object) {
i::wasm::WasmTagSig sig{0, static_cast<size_t>(serialized_sig.length()),
reinterpret_cast<i::wasm::ValueType*>(
serialized_sig.GetDataStartAddress())};
- i::wasm::WasmTag tag(&sig);
- return i::WasmExceptionPackage::GetEncodedSize(&tag);
+ return i::WasmExceptionPackage::GetEncodedSize(&sig);
}
void EncodeExceptionValues(v8::Isolate* isolate,
@@ -2028,7 +2055,10 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool is_wasm_js_function = i::WasmJSFunction::IsWasmJSFunction(*callable);
if (is_wasm_exported_function && !suspend && !promise) {
- if (*i::Handle<i::WasmExportedFunction>::cast(callable)->sig() == *sig) {
+ uint32_t canonical_sig_index =
+ i::wasm::GetWasmEngine()->type_canonicalizer()->AddRecursiveGroup(sig);
+ if (i::Handle<i::WasmExportedFunction>::cast(callable)->MatchesSignature(
+ canonical_sig_index)) {
args.GetReturnValue().Set(Utils::ToLocal(callable));
return;
}
@@ -2040,7 +2070,10 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
if (is_wasm_js_function && !suspend && !promise) {
- if (i::Handle<i::WasmJSFunction>::cast(callable)->MatchesSignature(sig)) {
+ uint32_t canonical_sig_index =
+ i::wasm::GetWasmEngine()->type_canonicalizer()->AddRecursiveGroup(sig);
+ if (i::Handle<i::WasmJSFunction>::cast(callable)->MatchesSignature(
+ canonical_sig_index)) {
args.GetReturnValue().Set(Utils::ToLocal(callable));
return;
}
@@ -2067,7 +2100,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::WasmInstanceObject> instance(
i::WasmInstanceObject::cast(data.internal().ref()), i_isolate);
int func_index = data.function_index();
- i::Handle<i::CodeT> wrapper =
+ i::Handle<i::Code> wrapper =
BUILTIN_CODE(i_isolate, WasmReturnPromiseOnSuspend);
i::Handle<i::JSFunction> result = i::WasmExportedFunction::New(
i_isolate, instance, func_index,
@@ -2226,56 +2259,15 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
namespace {
void WasmObjectToJSReturnValue(v8::ReturnValue<v8::Value>& return_value,
i::Handle<i::Object> value,
- i::wasm::HeapType::Representation repr,
- const i::wasm::WasmModule* module,
- i::Isolate* isolate,
+ i::wasm::HeapType type, i::Isolate* isolate,
ScheduledErrorThrower* thrower) {
- switch (repr) {
- case i::wasm::HeapType::kExtern:
- case i::wasm::HeapType::kString:
- // TODO(7748): Make sure i31ref is compatible with Smi, or transform here.
- case i::wasm::HeapType::kI31:
- return_value.Set(Utils::ToLocal(value));
- return;
- case i::wasm::HeapType::kFunc: {
- if (!value->IsNull()) {
- DCHECK(value->IsWasmInternalFunction());
- value =
- handle(i::Handle<i::WasmInternalFunction>::cast(value)->external(),
- isolate);
- }
- return_value.Set(Utils::ToLocal(value));
- return;
- }
- case i::wasm::HeapType::kStringViewWtf8:
- thrower->TypeError("stringview_wtf8 has no JS representation");
- return;
- case i::wasm::HeapType::kStringViewWtf16:
- thrower->TypeError("stringview_wtf16 has no JS representation");
- return;
- case i::wasm::HeapType::kStringViewIter:
- thrower->TypeError("stringview_iter has no JS representation");
- return;
- case i::wasm::HeapType::kBottom:
- UNREACHABLE();
- case i::wasm::HeapType::kStruct:
- case i::wasm::HeapType::kArray:
- case i::wasm::HeapType::kEq:
- case i::wasm::HeapType::kAny: {
- return_value.Set(Utils::ToLocal(value));
- return;
- }
- default:
- if (module->has_signature(repr)) {
- if (!value->IsNull()) {
- DCHECK(value->IsWasmInternalFunction());
- value = handle(
- i::Handle<i::WasmInternalFunction>::cast(value)->external(),
- isolate);
- }
- }
- return_value.Set(Utils::ToLocal(value));
- return;
+ const char* error_message = nullptr;
+ i::MaybeHandle<i::Object> maybe_result =
+ i::wasm::WasmToJSObject(isolate, value, type, &error_message);
+ if (maybe_result.is_null()) {
+ thrower->TypeError("%s", error_message);
+ } else {
+ return_value.Set(Utils::ToLocal(maybe_result.ToHandleChecked()));
}
}
} // namespace
@@ -2315,12 +2307,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::WasmTableObject::Get(i_isolate, receiver, index);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- const i::wasm::WasmModule* module =
- receiver->instance().IsWasmInstanceObject()
- ? i::WasmInstanceObject::cast(receiver->instance()).module()
- : nullptr;
- WasmObjectToJSReturnValue(return_value, result,
- receiver->type().heap_representation(), module,
+ WasmObjectToJSReturnValue(return_value, result, receiver->type().heap_type(),
i_isolate, &thrower);
}
@@ -2346,6 +2333,13 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::Object> element;
if (args.Length() >= 2) {
element = Utils::OpenHandle(*args[1]);
+ const char* error_message;
+ if (!i::WasmTableObject::JSToWasmElement(i_isolate, table_object, element,
+ &error_message)
+ .ToHandle(&element)) {
+ thrower.TypeError("Argument 1 is invalid for table: %s", error_message);
+ return;
+ }
} else if (table_object->type().is_defaultable()) {
element = DefaultReferenceValue(i_isolate, table_object->type());
} else {
@@ -2354,14 +2348,6 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- const char* error_message;
- if (!i::WasmTableObject::JSToWasmElement(i_isolate, table_object, element,
- &error_message)
- .ToHandle(&element)) {
- thrower.TypeError("Argument 1 is invalid for table: %s", error_message);
- return;
- }
-
i::WasmTableObject::Set(i_isolate, table_object, index, element);
}
@@ -2435,7 +2421,7 @@ void WebAssemblyMemoryGetBuffer(
// buffer are out of sync, handle that here when bounds checks, and Grow
// are handled correctly.
Maybe<bool> result =
- buffer->SetIntegrityLevel(buffer, i::FROZEN, i::kDontThrow);
+ buffer->SetIntegrityLevel(i_isolate, buffer, i::FROZEN, i::kDontThrow);
if (!result.FromJust()) {
thrower.TypeError(
"Status of setting SetIntegrityLevel of buffer is false.");
@@ -2688,13 +2674,9 @@ void WebAssemblyGlobalGetValueCommon(
break;
case i::wasm::kRef:
case i::wasm::kRefNull: {
- const i::wasm::WasmModule* module =
- receiver->instance().IsWasmInstanceObject()
- ? i::WasmInstanceObject::cast(receiver->instance()).module()
- : nullptr;
WasmObjectToJSReturnValue(return_value, receiver->GetRef(),
- receiver->type().heap_representation(), module,
- i_isolate, &thrower);
+ receiver->type().heap_type(), i_isolate,
+ &thrower);
break;
}
case i::wasm::kRtt:
@@ -2967,6 +2949,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// TODO(7748): These built-ins should not be shipped with wasm GC.
// Either a new flag will be needed or the built-ins have to be deleted prior
// to shipping.
+ // TODO(13810): We should install these later, when we can query the
+ // isolate's wasm_gc_enabled_callback, to take the Origin Trial into account.
if (v8_flags.experimental_wasm_gc) {
SimpleInstallFunction(
isolate, webassembly, "experimentalConvertArrayToString",
@@ -3151,6 +3135,19 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<Context> context) {
// This space left blank for future origin trials.
}
+
+namespace wasm {
+// static
+std::unique_ptr<WasmStreaming> StartStreamingForTesting(
+ Isolate* isolate,
+ std::shared_ptr<wasm::CompilationResultResolver> resolver) {
+ return std::make_unique<WasmStreaming>(
+ std::make_unique<WasmStreaming::WasmStreamingImpl>(
+ reinterpret_cast<v8::Isolate*>(isolate), "StartStreamingForTesting",
+ resolver));
+}
+} // namespace wasm
+
#undef ASSIGN
#undef EXTRACT_THIS
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index ce50b2822a..526f5fc932 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -9,16 +9,25 @@
#ifndef V8_WASM_WASM_JS_H_
#define V8_WASM_WASM_JS_H_
+#include <memory>
+
#include "src/common/globals.h"
namespace v8 {
-namespace internal {
+class WasmStreaming;
+} // namespace v8
+
+namespace v8::internal {
class Context;
template <typename T>
class Handle;
namespace wasm {
+class CompilationResultResolver;
class StreamingDecoder;
+
+V8_EXPORT_PRIVATE std::unique_ptr<WasmStreaming> StartStreamingForTesting(
+ Isolate*, std::shared_ptr<wasm::CompilationResultResolver>);
} // namespace wasm
// Exposes a WebAssembly API to JavaScript through the V8 API.
@@ -31,7 +40,6 @@ class WasmJs {
Isolate* isolate, Handle<Context> context);
};
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal
#endif // V8_WASM_WASM_JS_H_
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index a083d606e0..5b82012159 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -16,9 +16,7 @@
#include "src/base/macros.h"
#include "src/wasm/wasm-constants.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
+namespace v8::internal::wasm {
// These constants limit the amount of *declared* memory. At runtime, memory can
// only grow up to kV8MaxWasmMemory{32,64}Pages.
@@ -99,10 +97,8 @@ inline uint64_t max_mem64_bytes() {
}
V8_EXPORT_PRIVATE uint32_t max_table_init_entries();
-size_t max_module_size();
+V8_EXPORT_PRIVATE size_t max_module_size();
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm
#endif // V8_WASM_WASM_LIMITS_H_
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 0e485bd09b..ec838f4b12 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -57,7 +57,7 @@ void WasmFunctionBuilder::EmitU32V(uint32_t val) { body_.write_u32v(val); }
void WasmFunctionBuilder::SetSignature(const FunctionSig* sig) {
DCHECK(!locals_.has_sig());
locals_.set_sig(sig);
- signature_index_ = builder_->AddSignature(sig);
+ signature_index_ = builder_->AddSignature(sig, true);
}
void WasmFunctionBuilder::SetSignature(uint32_t sig_index) {
@@ -305,38 +305,40 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
}
uint32_t WasmModuleBuilder::ForceAddSignature(const FunctionSig* sig,
+ bool is_final,
uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
signature_map_.emplace(*sig, index);
- types_.emplace_back(sig, supertype);
+ types_.emplace_back(sig, supertype, is_final);
return index;
}
-uint32_t WasmModuleBuilder::AddSignature(const FunctionSig* sig,
+uint32_t WasmModuleBuilder::AddSignature(const FunctionSig* sig, bool is_final,
uint32_t supertype) {
auto sig_entry = signature_map_.find(*sig);
if (sig_entry != signature_map_.end()) return sig_entry->second;
- return ForceAddSignature(sig, supertype);
+ return ForceAddSignature(sig, is_final, supertype);
}
uint32_t WasmModuleBuilder::AddException(const FunctionSig* type) {
DCHECK_EQ(0, type->return_count());
- int type_index = AddSignature(type);
+ int type_index = AddSignature(type, true);
uint32_t except_index = static_cast<uint32_t>(exceptions_.size());
exceptions_.push_back(type_index);
return except_index;
}
-uint32_t WasmModuleBuilder::AddStructType(StructType* type,
+uint32_t WasmModuleBuilder::AddStructType(StructType* type, bool is_final,
uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.emplace_back(type, supertype);
+ types_.emplace_back(type, supertype, is_final);
return index;
}
-uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, uint32_t supertype) {
+uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, bool is_final,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.emplace_back(type, supertype);
+ types_.emplace_back(type, supertype, is_final);
return index;
}
@@ -390,7 +392,7 @@ uint32_t WasmModuleBuilder::AddImport(base::Vector<const char> name,
FunctionSig* sig,
base::Vector<const char> module) {
DCHECK(adding_imports_allowed_);
- function_imports_.push_back({module, name, AddSignature(sig)});
+ function_imports_.push_back({module, name, AddSignature(sig, true)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
@@ -473,6 +475,48 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
buffer->write_u8(kExprS128Const & 0xFF);
buffer->write(init.immediate().s128_const.data(), kSimd128Size);
break;
+ case WasmInitExpr::kI32Add:
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
+ kWasmI32);
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[1],
+ kWasmI32);
+ buffer->write_u8(kExprI32Add);
+ break;
+ case WasmInitExpr::kI32Sub:
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
+ kWasmI32);
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[1],
+ kWasmI32);
+ buffer->write_u8(kExprI32Sub);
+ break;
+ case WasmInitExpr::kI32Mul:
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
+ kWasmI32);
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[1],
+ kWasmI32);
+ buffer->write_u8(kExprI32Mul);
+ break;
+ case WasmInitExpr::kI64Add:
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
+ kWasmI64);
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[1],
+ kWasmI64);
+ buffer->write_u8(kExprI64Add);
+ break;
+ case WasmInitExpr::kI64Sub:
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
+ kWasmI64);
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[1],
+ kWasmI64);
+ buffer->write_u8(kExprI64Sub);
+ break;
+ case WasmInitExpr::kI64Mul:
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
+ kWasmI64);
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[1],
+ kWasmI64);
+ buffer->write_u8(kExprI64Mul);
+ break;
case WasmInitExpr::kGlobalGet:
buffer->write_u8(kExprGlobalGet);
buffer->write_u32v(init.immediate().index);
@@ -613,9 +657,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
const TypeDefinition& type = types_[i];
if (type.supertype != kNoSuperType) {
- buffer->write_u8(kWasmSubtypeCode);
- buffer->write_u8(1); // The supertype count is always 1.
+ buffer->write_u8(type.is_final ? kWasmSubtypeFinalCode
+ : kWasmSubtypeCode);
+ buffer->write_u8(1);
buffer->write_u32v(type.supertype);
+ } else if (!type.is_final) {
+ buffer->write_u8(kWasmSubtypeCode);
+ buffer->write_u8(0);
}
switch (type.kind) {
case TypeDefinition::kFunction: {
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 09c0b68c92..dbf29bd429 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -334,14 +334,16 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// exceeded.
uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
// Adds the signature to the module if it does not already exist.
- uint32_t AddSignature(const FunctionSig* sig,
+ uint32_t AddSignature(const FunctionSig* sig, bool is_final,
uint32_t supertype = kNoSuperType);
// Does not deduplicate function signatures.
- uint32_t ForceAddSignature(const FunctionSig* sig,
+ uint32_t ForceAddSignature(const FunctionSig* sig, bool is_final,
uint32_t supertype = kNoSuperType);
uint32_t AddException(const FunctionSig* type);
- uint32_t AddStructType(StructType* type, uint32_t supertype = kNoSuperType);
- uint32_t AddArrayType(ArrayType* type, uint32_t supertype = kNoSuperType);
+ uint32_t AddStructType(StructType* type, bool is_final,
+ uint32_t supertype = kNoSuperType);
+ uint32_t AddArrayType(ArrayType* type, bool is_final,
+ uint32_t supertype = kNoSuperType);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 5ad67a1817..b0412620bc 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -14,6 +14,7 @@
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-init-expr.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h" // For {ZoneBuffer}.
@@ -47,11 +48,11 @@ template void NameMap::FinishInitialization();
template void IndirectNameMap::FinishInitialization();
WireBytesRef LazilyGeneratedNames::LookupFunctionName(
- const ModuleWireBytes& wire_bytes, uint32_t function_index) {
+ ModuleWireBytes wire_bytes, uint32_t function_index) {
base::MutexGuard lock(&mutex_);
if (!has_functions_) {
has_functions_ = true;
- DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(), function_names_);
+ DecodeFunctionNames(wire_bytes.module_bytes(), function_names_);
}
const WireBytesRef* result = function_names_.Get(function_index);
if (!result) return WireBytesRef();
@@ -194,14 +195,14 @@ WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
}
// Get a string stored in the module bytes representing a function name.
-WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
+WasmName ModuleWireBytes::GetNameOrNull(int func_index,
const WasmModule* module) const {
- return GetNameOrNull(module->lazily_generated_names.LookupFunctionName(
- *this, function->func_index));
+ return GetNameOrNull(
+ module->lazily_generated_names.LookupFunctionName(*this, func_index));
}
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
- os << "#" << name.function_->func_index;
+ os << "#" << name.func_index_;
if (!name.name_.empty()) {
if (name.name_.begin()) {
os << ":";
@@ -213,8 +214,9 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
return os;
}
-WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
- : signature_zone(std::move(signature_zone)) {}
+WasmModule::WasmModule(ModuleOrigin origin)
+ : signature_zone(GetWasmEngine()->allocator(), "signature zone"),
+ origin(origin) {}
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
@@ -555,7 +557,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
base::Vector<const uint8_t> wire_bytes =
module_object->native_module()->wire_bytes();
std::vector<CustomSectionOffset> custom_sections =
- DecodeCustomSections(wire_bytes.begin(), wire_bytes.end());
+ DecodeCustomSections(wire_bytes);
std::vector<Handle<Object>> matching_sections;
@@ -597,8 +599,8 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
return array_object;
}
-// Get the source position from a given function index and byte offset,
-// for either asm.js or pure Wasm modules.
+// Get the source position from a given function index and wire bytes offset
+// (relative to the function entry), for either asm.js or pure Wasm modules.
int GetSourcePosition(const WasmModule* module, uint32_t func_index,
uint32_t byte_offset, bool is_at_number_conversion) {
DCHECK_EQ(is_asmjs_module(module),
@@ -624,9 +626,7 @@ inline size_t VectorSize(const std::vector<T>& vector) {
size_t EstimateStoredSize(const WasmModule* module) {
return sizeof(WasmModule) + VectorSize(module->globals) +
- (module->signature_zone ? module->signature_zone->allocation_size()
- : 0) +
- VectorSize(module->types) +
+ module->signature_zone.allocation_size() + VectorSize(module->types) +
VectorSize(module->isorecursive_canonical_type_ids) +
VectorSize(module->functions) + VectorSize(module->data_segments) +
VectorSize(module->tables) + VectorSize(module->import_table) +
@@ -666,10 +666,8 @@ size_t GetWireBytesHash(base::Vector<const uint8_t> wire_bytes) {
}
int NumFeedbackSlots(const WasmModule* module, int func_index) {
- if (!v8_flags.wasm_speculative_inlining) return 0;
- // TODO(clemensb): Avoid the mutex once this ships, or at least switch to a
- // shared mutex.
- base::MutexGuard type_feedback_guard{&module->type_feedback.mutex};
+ base::SharedMutexGuard<base::kShared> type_feedback_guard{
+ &module->type_feedback.mutex};
auto it = module->type_feedback.feedback_for_function.find(func_index);
if (it == module->type_feedback.feedback_for_function.end()) return 0;
// The number of call instructions is capped by max function size.
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 235d9de4d1..bc96893274 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -24,6 +24,7 @@
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-init-expr.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/well-known-imports.h"
namespace v8::internal {
class WasmModuleObject;
@@ -35,6 +36,7 @@ using WasmName = base::Vector<const char>;
struct AsmJsOffsets;
class ErrorThrower;
+class WellKnownImportsList;
// Reference to a string in the wire bytes.
class WireBytesRef {
@@ -90,10 +92,12 @@ using WasmTagSig = FunctionSig;
// Static representation of a wasm tag type.
struct WasmTag {
- explicit WasmTag(const WasmTagSig* sig) : sig(sig) {}
+ explicit WasmTag(const WasmTagSig* sig, uint32_t sig_index)
+ : sig(sig), sig_index(sig_index) {}
const FunctionSig* ToFunctionSig() const { return sig; }
const WasmTagSig* sig; // type signature of the tag.
+ uint32_t sig_index;
};
// Static representation of a wasm literal stringref.
@@ -127,17 +131,26 @@ struct WasmElemSegment {
// Construct an active segment.
WasmElemSegment(ValueType type, uint32_t table_index,
- ConstantExpression offset, ElementType element_type)
+ ConstantExpression offset, ElementType element_type,
+ uint32_t element_count, uint32_t elements_wire_bytes_offset)
: status(kStatusActive),
type(type),
table_index(table_index),
offset(std::move(offset)),
- element_type(element_type) {}
+ element_type(element_type),
+ element_count(element_count),
+ elements_wire_bytes_offset(elements_wire_bytes_offset) {}
// Construct a passive or declarative segment, which has no table index or
// offset.
- WasmElemSegment(ValueType type, Status status, ElementType element_type)
- : status(status), type(type), table_index(0), element_type(element_type) {
+ WasmElemSegment(ValueType type, Status status, ElementType element_type,
+ uint32_t element_count, uint32_t elements_wire_bytes_offset)
+ : status(status),
+ type(type),
+ table_index(0),
+ element_type(element_type),
+ element_count(element_count),
+ elements_wire_bytes_offset(elements_wire_bytes_offset) {
DCHECK_NE(status, kStatusActive);
}
@@ -146,7 +159,9 @@ struct WasmElemSegment {
: status(kStatusActive),
type(kWasmBottom),
table_index(0),
- element_type(kFunctionIndexElements) {}
+ element_type(kFunctionIndexElements),
+ element_count(0),
+ elements_wire_bytes_offset(0) {}
WasmElemSegment(const WasmElemSegment&) = delete;
WasmElemSegment(WasmElemSegment&&) V8_NOEXCEPT = default;
@@ -158,7 +173,8 @@ struct WasmElemSegment {
uint32_t table_index;
ConstantExpression offset;
ElementType element_type;
- std::vector<ConstantExpression> entries;
+ uint32_t element_count;
+ uint32_t elements_wire_bytes_offset;
};
// Static representation of a wasm import.
@@ -286,7 +302,7 @@ struct ModuleWireBytes;
class V8_EXPORT_PRIVATE LazilyGeneratedNames {
public:
- WireBytesRef LookupFunctionName(const ModuleWireBytes& wire_bytes,
+ WireBytesRef LookupFunctionName(ModuleWireBytes wire_bytes,
uint32_t function_index);
void AddForTesting(int function_index, WireBytesRef name);
@@ -334,23 +350,30 @@ constexpr uint32_t kNoSuperType = std::numeric_limits<uint32_t>::max();
struct TypeDefinition {
enum Kind { kFunction, kStruct, kArray };
- TypeDefinition(const FunctionSig* sig, uint32_t supertype)
- : function_sig(sig), supertype(supertype), kind(kFunction) {}
- TypeDefinition(const StructType* type, uint32_t supertype)
- : struct_type(type), supertype(supertype), kind(kStruct) {}
- TypeDefinition(const ArrayType* type, uint32_t supertype)
- : array_type(type), supertype(supertype), kind(kArray) {}
+ TypeDefinition(const FunctionSig* sig, uint32_t supertype, bool is_final)
+ : function_sig(sig),
+ supertype(supertype),
+ kind(kFunction),
+ is_final(is_final) {}
+ TypeDefinition(const StructType* type, uint32_t supertype, bool is_final)
+ : struct_type(type),
+ supertype(supertype),
+ kind(kStruct),
+ is_final(is_final) {}
+ TypeDefinition(const ArrayType* type, uint32_t supertype, bool is_final)
+ : array_type(type),
+ supertype(supertype),
+ kind(kArray),
+ is_final(is_final) {}
TypeDefinition()
- : function_sig(nullptr), supertype(kNoSuperType), kind(kFunction) {}
-
- union {
- const FunctionSig* function_sig;
- const StructType* struct_type;
- const ArrayType* array_type;
- };
+ : function_sig(nullptr),
+ supertype(kNoSuperType),
+ kind(kFunction),
+ is_final(false) {}
bool operator==(const TypeDefinition& other) const {
- if (supertype != other.supertype || kind != other.kind) {
+ if (supertype != other.supertype || kind != other.kind ||
+ is_final != other.is_final) {
return false;
}
switch (kind) {
@@ -367,8 +390,14 @@ struct TypeDefinition {
return !(*this == other);
}
+ union {
+ const FunctionSig* function_sig;
+ const StructType* struct_type;
+ const ArrayType* array_type;
+ };
uint32_t supertype;
Kind kind;
+ bool is_final;
};
struct V8_EXPORT_PRIVATE WasmDebugSymbols {
@@ -473,16 +502,32 @@ struct FunctionTypeFeedback {
struct TypeFeedbackStorage {
std::unordered_map<uint32_t, FunctionTypeFeedback> feedback_for_function;
// Accesses to {feedback_for_function} are guarded by this mutex.
- mutable base::Mutex mutex;
+ // Multiple reads are allowed (shared lock), but only exclusive writes.
+ // Currently known users of the mutex are:
+ // - LiftoffCompiler: writes {call_targets}.
+ // - TransitiveTypeFeedbackProcessor: reads {call_targets},
+ // writes {feedback_vector}, reads {feedback_vector.size()}.
+ // - TriggerTierUp: increments {tierup_priority}.
+ // - WasmGraphBuilder: reads {feedback_vector}.
+ // - Feedback vector allocation: reads {call_targets.size()}.
+ // - PGO ProfileGenerator: reads everything.
+ // - PGO deserializer: writes everything, currently not locked, relies on
+ // being called before multi-threading enters the picture.
+ mutable base::SharedMutex mutex;
+
+ WellKnownImportsList well_known_imports;
};
struct WasmTable;
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
- std::unique_ptr<Zone> signature_zone;
+ // ================ Fields ===================================================
+ Zone signature_zone;
uint32_t initial_pages = 0; // initial size of the memory in 64k pages
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
+ uintptr_t min_memory_size = 0; // smallest size of any memory in bytes
+ uintptr_t max_memory_size = 0; // largest size of any memory in bytes
bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
bool is_memory64 = false; // true if the memory is 64 bit
@@ -510,8 +555,49 @@ struct V8_EXPORT_PRIVATE WasmModule {
// ID and length).
WireBytesRef name_section = {0, 0};
- AccountingAllocator* allocator() const { return signature_zone->allocator(); }
+ std::vector<TypeDefinition> types; // by type index
+ // Maps each type index to its global (cross-module) canonical index as per
+ // isorecursive type canonicalization.
+ std::vector<uint32_t> isorecursive_canonical_type_ids;
+ std::vector<WasmFunction> functions;
+ std::vector<WasmGlobal> globals;
+ std::vector<WasmDataSegment> data_segments;
+ std::vector<WasmTable> tables;
+ std::vector<WasmImport> import_table;
+ std::vector<WasmExport> export_table;
+ std::vector<WasmTag> tags;
+ std::vector<WasmStringRefLiteral> stringref_literals;
+ std::vector<WasmElemSegment> elem_segments;
+ std::vector<WasmCompilationHint> compilation_hints;
+ BranchHintInfo branch_hints;
+ // Pairs of module offsets and mark id.
+ std::vector<std::pair<uint32_t, uint32_t>> inst_traces;
+
+ // This is the only member of {WasmModule} where we store dynamic information
+ // that's not a decoded representation of the wire bytes.
+ // TODO(jkummerow): Rename.
+ mutable TypeFeedbackStorage type_feedback;
+
+ const ModuleOrigin origin;
+ mutable LazilyGeneratedNames lazily_generated_names;
+ WasmDebugSymbols debug_symbols;
+
+ // Asm.js source position information. Only available for modules compiled
+ // from asm.js.
+ std::unique_ptr<AsmJsOffsetInformation> asm_js_offset_information;
+ // {validated_functions} is atomically updated when functions get validated
+ // (during compilation, streaming decoding, or via explicit validation).
+ static_assert(sizeof(std::atomic<uint8_t>) == 1);
+ static_assert(alignof(std::atomic<uint8_t>) == 1);
+ mutable std::unique_ptr<std::atomic<uint8_t>[]> validated_functions;
+
+ // ================ Constructors =============================================
+ explicit WasmModule(ModuleOrigin = kWasmOrigin);
+ WasmModule(const WasmModule&) = delete;
+ WasmModule& operator=(const WasmModule&) = delete;
+
+ // ================ Accessors ================================================
void add_type(TypeDefinition type) {
types.push_back(type);
// Isorecursive canonical type will be computed later.
@@ -520,9 +606,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool has_type(uint32_t index) const { return index < types.size(); }
- void add_signature(const FunctionSig* sig, uint32_t supertype) {
+ void add_signature(const FunctionSig* sig, uint32_t supertype,
+ bool is_final) {
DCHECK_NOT_NULL(sig);
- add_type(TypeDefinition(sig, supertype));
+ add_type(TypeDefinition(sig, supertype, is_final));
}
bool has_signature(uint32_t index) const {
return index < types.size() &&
@@ -530,35 +617,44 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
const FunctionSig* signature(uint32_t index) const {
DCHECK(has_signature(index));
+ size_t num_types = types.size();
+ V8_ASSUME(index < num_types);
return types[index].function_sig;
}
- void add_struct_type(const StructType* type, uint32_t supertype) {
+ void add_struct_type(const StructType* type, uint32_t supertype,
+ bool is_final) {
DCHECK_NOT_NULL(type);
- add_type(TypeDefinition(type, supertype));
+ add_type(TypeDefinition(type, supertype, is_final));
}
bool has_struct(uint32_t index) const {
return index < types.size() && types[index].kind == TypeDefinition::kStruct;
}
const StructType* struct_type(uint32_t index) const {
DCHECK(has_struct(index));
+ size_t num_types = types.size();
+ V8_ASSUME(index < num_types);
return types[index].struct_type;
}
- void add_array_type(const ArrayType* type, uint32_t supertype) {
+ void add_array_type(const ArrayType* type, uint32_t supertype,
+ bool is_final) {
DCHECK_NOT_NULL(type);
- add_type(TypeDefinition(type, supertype));
+ add_type(TypeDefinition(type, supertype, is_final));
}
bool has_array(uint32_t index) const {
return index < types.size() && types[index].kind == TypeDefinition::kArray;
}
const ArrayType* array_type(uint32_t index) const {
DCHECK(has_array(index));
+ size_t num_types = types.size();
+ V8_ASSUME(index < num_types);
return types[index].array_type;
}
uint32_t supertype(uint32_t index) const {
- DCHECK(index < types.size());
+ size_t num_types = types.size();
+ V8_ASSUME(index < num_types);
return types[index].supertype;
}
bool has_supertype(uint32_t index) const {
@@ -580,10 +676,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
DCHECK_LE(pos, num_declared_functions);
uint8_t byte =
validated_functions[pos >> 3].load(std::memory_order_relaxed);
+ DCHECK_IMPLIES(origin != kWasmOrigin, byte == 0xff);
return byte & (1 << (pos & 7));
}
void set_function_validated(int func_index) const {
+ DCHECK_EQ(kWasmOrigin, origin);
DCHECK_NOT_NULL(validated_functions);
DCHECK_LE(num_imported_functions, func_index);
int pos = func_index - num_imported_functions;
@@ -598,46 +696,19 @@ struct V8_EXPORT_PRIVATE WasmModule {
}
}
+ void set_all_functions_validated() const {
+ DCHECK_EQ(kWasmOrigin, origin);
+ if (num_declared_functions == 0) return;
+ DCHECK_NOT_NULL(validated_functions);
+ size_t num_words = (num_declared_functions + 7) / 8;
+ for (size_t i = 0; i < num_words; ++i) {
+ validated_functions[i].store(0xff, std::memory_order_relaxed);
+ }
+ }
+
base::Vector<const WasmFunction> declared_functions() const {
return base::VectorOf(functions) + num_imported_functions;
}
-
- std::vector<TypeDefinition> types; // by type index
- // Maps each type index to its global (cross-module) canonical index as per
- // isorecursive type canonicalization.
- std::vector<uint32_t> isorecursive_canonical_type_ids;
- std::vector<WasmFunction> functions;
- std::vector<WasmGlobal> globals;
- std::vector<WasmDataSegment> data_segments;
- std::vector<WasmTable> tables;
- std::vector<WasmImport> import_table;
- std::vector<WasmExport> export_table;
- std::vector<WasmTag> tags;
- std::vector<WasmStringRefLiteral> stringref_literals;
- std::vector<WasmElemSegment> elem_segments;
- std::vector<WasmCompilationHint> compilation_hints;
- BranchHintInfo branch_hints;
- // Pairs of module offsets and mark id.
- std::vector<std::pair<uint32_t, uint32_t>> inst_traces;
- mutable TypeFeedbackStorage type_feedback;
-
- ModuleOrigin origin = kWasmOrigin; // origin of the module
- mutable LazilyGeneratedNames lazily_generated_names;
- WasmDebugSymbols debug_symbols;
-
- // Asm.js source position information. Only available for modules compiled
- // from asm.js.
- std::unique_ptr<AsmJsOffsetInformation> asm_js_offset_information;
-
- // {validated_functions} is atomically updated when functions get validated
- // (during compilation, streaming decoding, or via explicit validation).
- static_assert(sizeof(std::atomic<uint8_t>) == 1);
- static_assert(alignof(std::atomic<uint8_t>) == 1);
- mutable std::unique_ptr<std::atomic<uint8_t>[]> validated_functions;
-
- explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
- WasmModule(const WasmModule&) = delete;
- WasmModule& operator=(const WasmModule&) = delete;
};
// Static representation of a wasm indirect call table.
@@ -689,6 +760,8 @@ V8_EXPORT_PRIVATE int GetSubtypingDepth(const WasmModule* module,
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
// this struct is alive.
+// As {ModuleWireBytes} is just a wrapper around a {base::Vector<const byte>},
+// it should generally be passed by value.
struct V8_EXPORT_PRIVATE ModuleWireBytes {
explicit ModuleWireBytes(base::Vector<const byte> module_bytes)
: module_bytes_(module_bytes) {}
@@ -701,8 +774,7 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
WasmName GetNameOrNull(WireBytesRef ref) const;
// Get a string stored in the module bytes representing a function name.
- WasmName GetNameOrNull(const WasmFunction* function,
- const WasmModule* module) const;
+ WasmName GetNameOrNull(int func_index, const WasmModule* module) const;
// Checks the given reference is contained within the module bytes.
bool BoundsCheck(WireBytesRef ref) const {
@@ -724,13 +796,14 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
private:
base::Vector<const byte> module_bytes_;
};
+ASSERT_TRIVIALLY_COPYABLE(ModuleWireBytes);
// A helper for printing out the names of functions.
struct WasmFunctionName {
- WasmFunctionName(const WasmFunction* function, WasmName name)
- : function_(function), name_(name) {}
+ WasmFunctionName(int func_index, WasmName name)
+ : func_index_(func_index), name_(name) {}
- const WasmFunction* function_;
+ const int func_index_;
const WasmName name_;
};
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index a73bb9a76a..5d1f10642b 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -56,6 +56,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmContinuationObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmSuspenderObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmResumeData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmNull)
CAST_ACCESSOR(WasmInstanceObject)
@@ -125,7 +126,8 @@ int WasmGlobalObject::type_size() const { return type().value_kind_size(); }
Address WasmGlobalObject::address() const {
DCHECK_NE(type(), wasm::kWasmAnyRef);
DCHECK_LE(offset() + type_size(), untagged_buffer().byte_length());
- return Address(untagged_buffer().backing_store()) + offset();
+ return reinterpret_cast<Address>(untagged_buffer().backing_store()) +
+ offset();
}
int32_t WasmGlobalObject::GetI32() {
@@ -144,6 +146,10 @@ double WasmGlobalObject::GetF64() {
return base::ReadUnalignedValue<double>(address());
}
+byte* WasmGlobalObject::GetS128RawBytes() {
+ return reinterpret_cast<byte*>(address());
+}
+
Handle<Object> WasmGlobalObject::GetRef() {
// We use this getter for externref, funcref, and stringref.
DCHECK(type().is_reference());
@@ -175,8 +181,6 @@ void WasmGlobalObject::SetRef(Handle<Object> value) {
SANDBOXED_POINTER_ACCESSORS(WasmInstanceObject, memory_start, byte*,
kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
- kIsolateRootOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
kStackLimitAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, real_stack_limit_address, Address,
@@ -213,8 +217,8 @@ ACCESSORS(WasmInstanceObject, data_segment_starts, FixedAddressArray,
kDataSegmentStartsOffset)
ACCESSORS(WasmInstanceObject, data_segment_sizes, FixedUInt32Array,
kDataSegmentSizesOffset)
-ACCESSORS(WasmInstanceObject, dropped_elem_segments, FixedUInt8Array,
- kDroppedElemSegmentsOffset)
+ACCESSORS(WasmInstanceObject, element_segments, FixedArray,
+ kElementSegmentsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, break_on_entry, uint8_t,
kBreakOnEntryOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 4c2bfe0344..c881766723 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -280,9 +280,9 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate,
Handle<FixedArray> entries,
int entry_index,
Handle<Object> entry) {
- if (entry->IsNull(isolate)) {
+ if (entry->IsWasmNull(isolate)) {
ClearDispatchTables(isolate, table, entry_index); // Degenerate case.
- entries->set(entry_index, ReadOnlyRoots(isolate).null_value());
+ entries->set(entry_index, ReadOnlyRoots(isolate).wasm_null());
return;
}
Handle<Object> external =
@@ -307,6 +307,7 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate,
entries->set(entry_index, *entry);
}
+// TODO(manoskouk): Does this need to be handlified?
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t index, Handle<Object> entry) {
// Callers need to perform bounds checks, type check, and error handling.
@@ -327,6 +328,9 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
case wasm::HeapType::kArray:
case wasm::HeapType::kAny:
case wasm::HeapType::kI31:
+ case wasm::HeapType::kNone:
+ case wasm::HeapType::kNoFunc:
+ case wasm::HeapType::kNoExtern:
entries->set(entry_index, *entry);
return;
case wasm::HeapType::kFunc:
@@ -359,7 +363,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
Handle<Object> entry(entries->get(entry_index), isolate);
- if (entry->IsNull(isolate)) {
+ if (entry->IsWasmNull(isolate)) {
return entry;
}
@@ -374,6 +378,9 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
case wasm::HeapType::kStruct:
case wasm::HeapType::kArray:
case wasm::HeapType::kAny:
+ case wasm::HeapType::kNone:
+ case wasm::HeapType::kNoFunc:
+ case wasm::HeapType::kNoExtern:
return entry;
case wasm::HeapType::kFunc:
if (entry->IsWasmInternalFunction()) return entry;
@@ -520,7 +527,7 @@ void WasmTableObject::UpdateDispatchTables(
wasm::NativeModule* native_module =
instance->module_object().native_module();
wasm::WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
- auto kind = compiler::WasmImportCallKind::kWasmToCapi;
+ auto kind = wasm::ImportCallKind::kWasmToCapi;
uint32_t canonical_type_index =
wasm::GetTypeCanonicalizer()->AddRecursiveGroup(&sig);
wasm::WasmCode* wasm_code = cache->MaybeGet(kind, canonical_type_index,
@@ -592,7 +599,7 @@ void WasmTableObject::GetFunctionTableEntry(
*is_valid = true;
Handle<Object> element(table->entries().get(entry_index), isolate);
- *is_null = element->IsNull(isolate);
+ *is_null = element->IsWasmNull(isolate);
if (*is_null) return;
if (element->IsWasmInternalFunction()) {
@@ -783,16 +790,24 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(
// On 32-bit platforms we need an heuristic here to balance overall memory
// and address space consumption.
constexpr int kGBPages = 1024 * 1024 * 1024 / wasm::kWasmPageSize;
+ // We allocate the smallest of the following sizes, but at least the initial
+ // size:
+ // 1) the module-defined maximum;
+ // 2) 1GB;
+ // 3) the engine maximum;
+ int allocation_maximum = std::min(kGBPages, engine_maximum);
int heuristic_maximum;
if (initial > kGBPages) {
// We always allocate at least the initial size.
heuristic_maximum = initial;
} else if (has_maximum) {
- // We try to reserve the maximum, but at most 1GB to avoid OOMs.
- heuristic_maximum = std::min(maximum, kGBPages);
+ // We try to reserve the maximum, but at most the allocation_maximum to
+ // avoid OOMs.
+ heuristic_maximum = std::min(maximum, allocation_maximum);
} else if (shared == SharedFlag::kShared) {
- // If shared memory has no maximum, we use an implicit maximum of 1GB.
- heuristic_maximum = kGBPages;
+ // If shared memory has no maximum, we use the allocation_maximum as an
+ // implicit maximum.
+ heuristic_maximum = allocation_maximum;
} else {
// If non-shared memory has no maximum, we only allocate the initial size
// and then grow with realloc.
@@ -1144,16 +1159,12 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
FixedUInt32Array::New(isolate, num_data_segments);
instance->set_data_segment_sizes(*data_segment_sizes);
- int num_elem_segments = static_cast<int>(module->elem_segments.size());
- Handle<FixedUInt8Array> dropped_elem_segments =
- FixedUInt8Array::New(isolate, num_elem_segments);
- instance->set_dropped_elem_segments(*dropped_elem_segments);
+ instance->set_element_segments(*isolate->factory()->empty_fixed_array());
Handle<FixedArray> imported_function_refs =
isolate->factory()->NewFixedArray(num_imported_functions);
instance->set_imported_function_refs(*imported_function_refs);
- instance->set_isolate_root(isolate->isolate_root());
instance->set_stack_limit_address(
isolate->stack_guard()->address_of_jslimit());
instance->set_real_stack_limit_address(
@@ -1180,9 +1191,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_hook_on_function_call_address(
isolate->debug()->hook_on_function_call_address());
instance->set_managed_object_maps(*isolate->factory()->empty_fixed_array());
- // TODO(manoskouk): Initialize this array with zeroes, and check for zero in
- // wasm-compiler.
- Handle<FixedArray> functions = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> functions = isolate->factory()->NewFixedArrayWithZeroes(
static_cast<int>(module->functions.size()));
instance->set_wasm_internal_functions(*functions);
instance->set_feedback_vectors(*isolate->factory()->empty_fixed_array());
@@ -1202,7 +1211,6 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
}
InitDataSegmentArrays(instance, module_object);
- InitElemSegmentArrays(instance, module_object);
return instance;
}
@@ -1236,20 +1244,6 @@ void WasmInstanceObject::InitDataSegmentArrays(
}
}
-void WasmInstanceObject::InitElemSegmentArrays(
- Handle<WasmInstanceObject> instance,
- Handle<WasmModuleObject> module_object) {
- auto module = module_object->module();
- auto num_elem_segments = module->elem_segments.size();
- for (size_t i = 0; i < num_elem_segments; ++i) {
- instance->dropped_elem_segments().set(
- static_cast<int>(i), module->elem_segments[i].status ==
- wasm::WasmElemSegment::kStatusDeclarative
- ? 1
- : 0);
- }
-}
-
Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
wasm::NativeModule* native_module = module_object().native_module();
if (func_index < native_module->num_imported_functions()) {
@@ -1319,19 +1313,44 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
base::Optional<MessageTemplate> WasmInstanceObject::InitTableEntries(
Isolate* isolate, Handle<WasmInstanceObject> instance, uint32_t table_index,
uint32_t segment_index, uint32_t dst, uint32_t src, uint32_t count) {
- // Note that this implementation just calls through to module instantiation.
- // This is intentional, so that the runtime only depends on the object
- // methods, and not the module instantiation logic.
- return wasm::LoadElemSegment(isolate, instance, table_index, segment_index,
- dst, src, count);
+ AccountingAllocator allocator;
+ // This {Zone} will be used only by the temporary WasmFullDecoder allocated
+ // down the line from this call. Therefore it is safe to stack-allocate it
+ // here.
+ Zone zone(&allocator, "LoadElemSegment");
+
+ Handle<WasmTableObject> table_object = handle(
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
+
+ // If needed, try to lazily initialize the element segment.
+ base::Optional<MessageTemplate> opt_error =
+ wasm::InitializeElementSegment(&zone, isolate, instance, segment_index);
+ if (opt_error.has_value()) return opt_error;
+
+ Handle<FixedArray> elem_segment =
+ handle(FixedArray::cast(instance->element_segments().get(segment_index)),
+ isolate);
+ if (!base::IsInBounds<uint64_t>(dst, count, table_object->current_length())) {
+ return {MessageTemplate::kWasmTrapTableOutOfBounds};
+ }
+ if (!base::IsInBounds<uint64_t>(src, count, elem_segment->length())) {
+ return {MessageTemplate::kWasmTrapElementSegmentOutOfBounds};
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ WasmTableObject::Set(
+ isolate, table_object, static_cast<int>(dst + i),
+ handle(elem_segment->get(static_cast<int>(src + i)), isolate));
+ }
+
+ return {};
}
MaybeHandle<WasmInternalFunction> WasmInstanceObject::GetWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index) {
Object val = instance->wasm_internal_functions().get(index);
- return val.IsWasmInternalFunction()
- ? handle(WasmInternalFunction::cast(val), isolate)
- : MaybeHandle<WasmInternalFunction>();
+ if (val.IsSmi()) return {};
+ return handle(WasmInternalFunction::cast(val), isolate);
}
Handle<WasmInternalFunction>
@@ -1355,10 +1374,10 @@ WasmInstanceObject::GetOrCreateWasmInternalFunction(
MaybeObject entry = isolate->heap()->js_to_wasm_wrappers().Get(wrapper_index);
- Handle<CodeT> wrapper;
- // {entry} can be cleared, {undefined}, or a ready {CodeT}.
- if (entry.IsStrongOrWeak() && entry.GetHeapObject().IsCodeT()) {
- wrapper = handle(CodeT::cast(entry.GetHeapObject()), isolate);
+ Handle<Code> wrapper;
+ // {entry} can be cleared, {undefined}, or a ready {Code}.
+ if (entry.IsStrongOrWeak() && entry.GetHeapObject().IsCode()) {
+ wrapper = handle(Code::cast(entry.GetHeapObject()), isolate);
} else {
// The wrapper may not exist yet if no function in the exports section has
// this signature. We compile it and store the wrapper in the module for
@@ -1418,18 +1437,14 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (sig_in_module != module_canonical_ids.end()) {
wasm::NativeModule* native_module =
instance->module_object().native_module();
- // TODO(wasm): Cache and reuse wrapper code, to avoid repeated compilation
- // and permissions switching.
- const wasm::WasmFeatures enabled = native_module->enabled_features();
- auto resolved = compiler::ResolveWasmImportCall(
- callable, sig, instance->module(), enabled);
- compiler::WasmImportCallKind kind = resolved.kind;
- callable = resolved.callable; // Update to ultimate target.
- DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
+ wasm::WasmImportData resolved(callable, sig, canonical_sig_index);
+ wasm::ImportCallKind kind = resolved.kind();
+ callable = resolved.callable(); // Update to ultimate target.
+ DCHECK_NE(wasm::ImportCallKind::kLinkError, kind);
wasm::CompilationEnv env = native_module->CreateCompilationEnv();
// {expected_arity} should only be used if kind != kJSFunctionArityMismatch.
int expected_arity = -1;
- if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
+ if (kind == wasm::ImportCallKind ::kJSFunctionArityMismatch) {
expected_arity = Handle<JSFunction>::cast(callable)
->shared()
.internal_formal_parameter_count_without_receiver();
@@ -1443,7 +1458,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
wasm::WasmCode* published_code =
native_module->PublishCode(std::move(wasm_code));
isolate->counters()->wasm_generated_code_size()->Increment(
@@ -1582,6 +1597,7 @@ void WasmArray::SetTaggedElement(uint32_t index, Handle<Object> value,
// static
Handle<WasmTagObject> WasmTagObject::New(Isolate* isolate,
const wasm::FunctionSig* sig,
+ uint32_t canonical_type_index,
Handle<HeapObject> tag) {
Handle<JSFunction> tag_cons(isolate->native_context()->wasm_tag_constructor(),
isolate);
@@ -1601,45 +1617,52 @@ Handle<WasmTagObject> WasmTagObject::New(Isolate* isolate,
isolate->factory()->NewJSObject(tag_cons, AllocationType::kOld);
Handle<WasmTagObject> tag_wrapper = Handle<WasmTagObject>::cast(tag_object);
tag_wrapper->set_serialized_signature(*serialized_sig);
+ tag_wrapper->set_canonical_type_index(canonical_type_index);
tag_wrapper->set_tag(*tag);
return tag_wrapper;
}
-// TODO(9495): Update this if function type variance is introduced.
-bool WasmTagObject::MatchesSignature(const wasm::FunctionSig* sig) {
- DCHECK_EQ(0, sig->return_count());
- DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
- int sig_size = static_cast<int>(sig->parameter_count());
- if (sig_size != serialized_signature().length()) return false;
- for (int index = 0; index < sig_size; ++index) {
- if (sig->GetParam(index) != serialized_signature().get(index)) {
- return false;
- }
- }
- return true;
+bool WasmTagObject::MatchesSignature(uint32_t expected_canonical_type_index) {
+ return wasm::GetWasmEngine()->type_canonicalizer()->IsCanonicalSubtype(
+ this->canonical_type_index(), expected_canonical_type_index);
}
-// TODO(9495): Update this if function type variance is introduced.
-bool WasmCapiFunction::MatchesSignature(const wasm::FunctionSig* sig) const {
- // TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc.
- int param_count = static_cast<int>(sig->parameter_count());
- int result_count = static_cast<int>(sig->return_count());
+const wasm::FunctionSig* WasmCapiFunction::GetSignature(Zone* zone) const {
+ WasmCapiFunctionData function_data = shared().wasm_capi_function_data();
PodArray<wasm::ValueType> serialized_sig =
- shared().wasm_capi_function_data().serialized_signature();
- if (param_count + result_count + 1 != serialized_sig.length()) return false;
- int serialized_index = 0;
- for (int i = 0; i < result_count; i++, serialized_index++) {
- if (sig->GetReturn(i) != serialized_sig.get(serialized_index)) {
- return false;
- }
+ function_data.serialized_signature();
+ int sig_size = serialized_sig.length() - 1;
+ wasm::ValueType* types = zone->NewArray<wasm::ValueType>(sig_size);
+ int returns_size = 0;
+ int index = 0;
+ while (serialized_sig.get(index) != wasm::kWasmVoid) {
+ types[index] = serialized_sig.get(index);
+ index++;
}
- if (serialized_sig.get(serialized_index) != wasm::kWasmVoid) return false;
- serialized_index++;
- for (int i = 0; i < param_count; i++, serialized_index++) {
- if (sig->GetParam(i) != serialized_sig.get(serialized_index)) return false;
+ returns_size = index;
+ while (index < sig_size) {
+ types[index] = serialized_sig.get(index + 1);
+ index++;
}
- return true;
+
+ return zone->New<wasm::FunctionSig>(returns_size, sig_size - returns_size,
+ types);
+}
+
+bool WasmCapiFunction::MatchesSignature(
+ uint32_t other_canonical_sig_index) const {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ const wasm::FunctionSig* sig = GetSignature(&zone);
+#if DEBUG
+ // TODO(7748): Change this if indexed types are allowed.
+ for (wasm::ValueType type : sig->all()) CHECK(!type.has_index());
+#endif
+ // TODO(7748): Check for subtyping instead if C API functions can define
+ // signature supertype.
+ return wasm::GetWasmEngine()->type_canonicalizer()->AddRecursiveGroup(sig) ==
+ other_canonical_sig_index;
}
// static
@@ -1808,7 +1831,11 @@ size_t ComputeEncodedElementSize(wasm::ValueType type) {
// static
uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTag* tag) {
- const wasm::WasmTagSig* sig = tag->sig;
+ return GetEncodedSize(tag->sig);
+}
+
+// static
+uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTagSig* sig) {
uint32_t encoded_size = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
switch (sig->GetParam(i).kind()) {
@@ -1844,7 +1871,7 @@ uint32_t WasmExceptionPackage::GetEncodedSize(const wasm::WasmTag* tag) {
bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
if (!object.IsJSFunction()) return false;
JSFunction js_function = JSFunction::cast(object);
- CodeT code = js_function.code();
+ Code code = js_function.code();
if (CodeKind::JS_TO_WASM_FUNCTION != code.kind() &&
code.builtin_id() != Builtin::kGenericJSToWasmWrapper &&
code.builtin_id() != Builtin::kWasmReturnPromiseOnSuspend) {
@@ -1878,8 +1905,6 @@ Handle<WasmCapiFunction> WasmCapiFunction::New(
// call target (which is an address pointing into the C++ binary).
call_target = ExternalReference::Create(call_target).address();
- // TODO(7748): Support proper typing for external functions. That requires
- // global (cross-module) canonicalization of signatures/RTTs.
Handle<Map> rtt = isolate->factory()->wasm_internal_function_map();
Handle<WasmCapiFunctionData> fun_data =
isolate->factory()->NewWasmCapiFunctionData(
@@ -1904,7 +1929,7 @@ int WasmExportedFunction::function_index() {
Handle<WasmExportedFunction> WasmExportedFunction::New(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
- int arity, Handle<CodeT> export_wrapper) {
+ int arity, Handle<Code> export_wrapper) {
DCHECK(
CodeKind::JS_TO_WASM_FUNCTION == export_wrapper->kind() ||
(export_wrapper->is_builtin() &&
@@ -1934,10 +1959,13 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
export_wrapper->builtin_id() == Builtin::kWasmReturnPromiseOnSuspend
? wasm::kPromise
: wasm::kNoPromise;
+ uint32_t sig_index = instance->module()->functions[func_index].sig_index;
+ uint32_t canonical_type_index =
+ instance->module()->isorecursive_canonical_type_ids[sig_index];
Handle<WasmExportedFunctionData> function_data =
factory->NewWasmExportedFunctionData(
export_wrapper, instance, call_target, ref, func_index, sig,
- wasm::kGenericWrapperBudget, rtt, promise);
+ canonical_type_index, wasm::kGenericWrapperBudget, rtt, promise);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -1997,20 +2025,10 @@ const wasm::FunctionSig* WasmExportedFunction::sig() {
}
bool WasmExportedFunction::MatchesSignature(
- const WasmModule* other_module, const wasm::FunctionSig* other_sig) {
- const wasm::FunctionSig* sig = this->sig();
- if (sig->parameter_count() != other_sig->parameter_count() ||
- sig->return_count() != other_sig->return_count()) {
- return false;
- }
-
- for (int i = 0; i < sig->all().size(); i++) {
- if (!wasm::EquivalentTypes(sig->all()[i], other_sig->all()[i],
- this->instance().module(), other_module)) {
- return false;
- }
- }
- return true;
+ uint32_t other_canonical_type_index) {
+ return wasm::GetWasmEngine()->type_canonicalizer()->IsCanonicalSubtype(
+ this->shared().wasm_exported_function_data().canonical_type_index(),
+ other_canonical_type_index);
}
// static
@@ -2020,7 +2038,7 @@ std::unique_ptr<char[]> WasmExportedFunction::GetDebugName(
// prefix + parameters + delimiter + returns + zero byte
size_t len = strlen(kPrefix) + sig->all().size() + 2;
auto buffer = base::OwnedVector<char>::New(len);
- memcpy(buffer.start(), kPrefix, strlen(kPrefix));
+ memcpy(buffer.begin(), kPrefix, strlen(kPrefix));
PrintSignature(buffer.as_vector() + strlen(kPrefix), sig);
return buffer.ReleaseData();
}
@@ -2047,9 +2065,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
// TODO(wasm): Think about caching and sharing the JS-to-JS wrappers per
// signature instead of compiling a new one for every instantiation.
- Handle<CodeT> wrapper_code = ToCodeT(
- compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked(),
- isolate);
+ Handle<Code> wrapper_code =
+ compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
// WasmJSFunctions use on-heap Code objects as call targets, so we can't
// cache the target address, unless the WasmJSFunction wraps a
@@ -2060,17 +2077,15 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
Factory* factory = isolate->factory();
- // TODO(7748): Support proper typing for external functions. That requires
- // global (cross-module) canonicalization of signatures/RTTs.
Handle<Map> rtt = factory->wasm_internal_function_map();
Handle<WasmJSFunctionData> function_data = factory->NewWasmJSFunctionData(
call_target, callable, return_count, parameter_count, serialized_sig,
wrapper_code, rtt, suspend, wasm::kNoPromise);
if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
- using CK = compiler::WasmImportCallKind;
+ using CK = wasm::ImportCallKind;
int expected_arity = parameter_count;
- CK kind = compiler::kDefaultImportCallKind;
+ CK kind = wasm::kDefaultImportCallKind;
if (callable->IsJSFunction()) {
SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
expected_arity =
@@ -2081,11 +2096,10 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
// TODO(wasm): Think about caching and sharing the wasm-to-JS wrappers per
// signature instead of compiling a new one for every instantiation.
- Handle<CodeT> wasm_to_js_wrapper_code =
- ToCodeT(compiler::CompileWasmToJSWrapper(isolate, sig, kind,
- expected_arity, suspend)
- .ToHandleChecked(),
- isolate);
+ Handle<Code> wasm_to_js_wrapper_code =
+ compiler::CompileWasmToJSWrapper(isolate, sig, kind, expected_arity,
+ suspend)
+ .ToHandleChecked();
function_data->internal().set_code(*wasm_to_js_wrapper_code);
}
@@ -2120,7 +2134,7 @@ wasm::Suspend WasmJSFunction::GetSuspend() const {
.suspend());
}
-const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
+const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) const {
WasmJSFunctionData function_data = shared().wasm_js_function_data();
int sig_size = function_data.serialized_signature().length();
wasm::ValueType* types = zone->NewArray<wasm::ValueType>(sig_size);
@@ -2132,21 +2146,19 @@ const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
return zone->New<wasm::FunctionSig>(return_count, parameter_count, types);
}
-// TODO(9495): Update this if function type variance is introduced.
-bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
- DCHECK_LE(sig->all().size(), kMaxInt);
- int sig_size = static_cast<int>(sig->all().size());
- int return_count = static_cast<int>(sig->return_count());
- int parameter_count = static_cast<int>(sig->parameter_count());
- DisallowHeapAllocation no_alloc;
- WasmJSFunctionData function_data = shared().wasm_js_function_data();
- if (return_count != function_data.serialized_return_count() ||
- parameter_count != function_data.serialized_parameter_count()) {
- return false;
- }
- if (sig_size == 0) return true; // Prevent undefined behavior.
- const wasm::ValueType* expected = sig->all().begin();
- return function_data.serialized_signature().matches(expected, sig_size);
+bool WasmJSFunction::MatchesSignature(
+ uint32_t other_canonical_sig_index) const {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ const wasm::FunctionSig* sig = GetSignature(&zone);
+#if DEBUG
+ // TODO(7748): Change this if indexed types are allowed.
+ for (wasm::ValueType type : sig->all()) CHECK(!type.has_index());
+#endif
+ // TODO(7748): Check for subtyping instead if WebAssembly.Function can define
+ // signature supertype.
+ return wasm::GetWasmEngine()->type_canonicalizer()->AddRecursiveGroup(sig) ==
+ other_canonical_sig_index;
}
PodArray<wasm::ValueType> WasmCapiFunction::GetSerializedSignature() const {
@@ -2199,193 +2211,265 @@ Handle<AsmWasmData> AsmWasmData::New(
return result;
}
+namespace {
+constexpr int32_t kInt31MaxValue = 0x3fffffff;
+constexpr int32_t kInt31MinValue = -kInt31MaxValue - 1;
+
+// Tries to canonicalize a HeapNumber to an i31ref Smi. Returns the original
+// HeapNumber if it fails.
+Handle<Object> CanonicalizeHeapNumber(Handle<Object> number, Isolate* isolate) {
+ double double_value = Handle<HeapNumber>::cast(number)->value();
+ if (double_value >= kInt31MinValue && double_value <= kInt31MaxValue &&
+ !IsMinusZero(double_value) &&
+ double_value == FastI2D(FastD2I(double_value))) {
+ return handle(Smi::FromInt(FastD2I(double_value)), isolate);
+ }
+ return number;
+}
+
+// Tries to canonicalize a Smi into an i31 Smi. Returns a HeapNumber if it
+// fails.
+Handle<Object> CanonicalizeSmi(Handle<Object> smi, Isolate* isolate) {
+ if constexpr (SmiValuesAre31Bits()) return smi;
+
+ int32_t value = Handle<Smi>::cast(smi)->value();
+
+ if (value <= kInt31MaxValue && value >= kInt31MinValue) {
+ return smi;
+ } else {
+ return isolate->factory()->NewHeapNumber(value);
+ }
+}
+} // namespace
+
namespace wasm {
-MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
- Handle<Object> value, ValueType expected,
+MaybeHandle<Object> JSToWasmObject(Isolate* isolate, Handle<Object> value,
+ ValueType expected_canonical,
const char** error_message) {
- DCHECK(expected.is_reference());
- switch (expected.kind()) {
- case kRefNull:
- if (value->IsNull(isolate)) {
- HeapType::Representation repr = expected.heap_representation();
- switch (repr) {
- case HeapType::kStringViewWtf8:
- *error_message = "stringview_wtf8 has no JS representation";
- return {};
- case HeapType::kStringViewWtf16:
- *error_message = "stringview_wtf16 has no JS representation";
- return {};
- case HeapType::kStringViewIter:
- *error_message = "stringview_iter has no JS representation";
- return {};
- default:
- return value;
- }
+ DCHECK(expected_canonical.is_object_reference());
+ if (expected_canonical.kind() == kRefNull && value->IsNull(isolate)) {
+ switch (expected_canonical.heap_representation()) {
+ case HeapType::kStringViewWtf8:
+ *error_message = "stringview_wtf8 has no JS representation";
+ return {};
+ case HeapType::kStringViewWtf16:
+ *error_message = "stringview_wtf16 has no JS representation";
+ return {};
+ case HeapType::kStringViewIter:
+ *error_message = "stringview_iter has no JS representation";
+ return {};
+ default:
+ bool is_extern_subtype =
+ expected_canonical.heap_representation() == HeapType::kExtern ||
+ expected_canonical.heap_representation() == HeapType::kNoExtern;
+ return is_extern_subtype ? value : isolate->factory()->wasm_null();
+ }
+ }
+
+ // TODO(7748): Streamline interaction of undefined and (ref any).
+ switch (expected_canonical.heap_representation()) {
+ case HeapType::kFunc: {
+ if (!(WasmExternalFunction::IsWasmExternalFunction(*value) ||
+ WasmCapiFunction::IsWasmCapiFunction(*value))) {
+ *error_message =
+ "function-typed object must be null (if nullable) or a Wasm "
+ "function object";
+ return {};
}
- V8_FALLTHROUGH;
- case kRef: {
- // TODO(7748): Allow all in-range numbers for i31. Make sure to convert
- // Smis to i31refs if needed.
- // TODO(7748): Streamline interaction of undefined and (ref any).
- HeapType::Representation repr = expected.heap_representation();
- switch (repr) {
- case HeapType::kFunc: {
- if (!(WasmExternalFunction::IsWasmExternalFunction(*value) ||
- WasmCapiFunction::IsWasmCapiFunction(*value))) {
- *error_message =
- "function-typed object must be null (if nullable) or a Wasm "
- "function object";
- return {};
- }
- return MaybeHandle<Object>(Handle<JSFunction>::cast(value)
- ->shared()
- .wasm_function_data()
- .internal(),
- isolate);
- }
- case HeapType::kExtern: {
- if (!value->IsNull(isolate)) return value;
- *error_message = "null is not allowed for (ref extern)";
- return {};
- }
- case HeapType::kAny: {
- if (!value->IsNull(isolate)) return value;
- *error_message = "null is not allowed for (ref any)";
- return {};
- }
- case HeapType::kStruct: {
- if (value->IsWasmStruct() ||
- (value->IsWasmArray() && v8_flags.wasm_gc_structref_as_dataref)) {
- return value;
- }
+ return MaybeHandle<Object>(Handle<JSFunction>::cast(value)
+ ->shared()
+ .wasm_function_data()
+ .internal(),
+ isolate);
+ }
+ case HeapType::kExtern: {
+ if (!value->IsNull(isolate)) return value;
+ *error_message = "null is not allowed for (ref extern)";
+ return {};
+ }
+ case HeapType::kAny: {
+ if (value->IsSmi()) return CanonicalizeSmi(value, isolate);
+ if (value->IsHeapNumber()) {
+ return CanonicalizeHeapNumber(value, isolate);
+ }
+ if (!value->IsNull(isolate)) return value;
+ *error_message = "null is not allowed for (ref any)";
+ return {};
+ }
+ case HeapType::kStruct: {
+ if (value->IsWasmStruct()) {
+ return value;
+ }
+ *error_message =
+ "structref object must be null (if nullable) or a wasm struct";
+ return {};
+ }
+ case HeapType::kArray: {
+ if (value->IsWasmArray()) {
+ return value;
+ }
+ *error_message =
+ "arrayref object must be null (if nullable) or a wasm array";
+ return {};
+ }
+ case HeapType::kEq: {
+ if (value->IsSmi()) {
+ Handle<Object> truncated = CanonicalizeSmi(value, isolate);
+ if (truncated->IsSmi()) return truncated;
+ } else if (value->IsHeapNumber()) {
+ Handle<Object> truncated = CanonicalizeHeapNumber(value, isolate);
+ if (truncated->IsSmi()) return truncated;
+ } else if (value->IsWasmStruct() || value->IsWasmArray()) {
+ return value;
+ }
+ *error_message =
+ "eqref object must be null (if nullable), or a wasm "
+ "struct/array, or a Number that fits in i31ref range";
+ return {};
+ }
+ case HeapType::kI31: {
+ if (value->IsSmi()) {
+ Handle<Object> truncated = CanonicalizeSmi(value, isolate);
+ if (truncated->IsSmi()) return truncated;
+ } else if (value->IsHeapNumber()) {
+ Handle<Object> truncated = CanonicalizeHeapNumber(value, isolate);
+ if (truncated->IsSmi()) return truncated;
+ }
+ *error_message =
+ "i31ref object must be null (if nullable) or a Number that fits "
+ "in i31ref range";
+ return {};
+ }
+ case HeapType::kString:
+ if (value->IsString()) return value;
+ *error_message = "wrong type (expected a string)";
+ return {};
+ case HeapType::kStringViewWtf8:
+ *error_message = "stringview_wtf8 has no JS representation";
+ return {};
+ case HeapType::kStringViewWtf16:
+ *error_message = "stringview_wtf16 has no JS representation";
+ return {};
+ case HeapType::kStringViewIter:
+ *error_message = "stringview_iter has no JS representation";
+ return {};
+ case HeapType::kNoFunc:
+ case HeapType::kNoExtern:
+ case HeapType::kNone: {
+ *error_message = "only null allowed for null types";
+ return {};
+ }
+ default: {
+ auto type_canonicalizer = GetWasmEngine()->type_canonicalizer();
+
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ WasmExportedFunction function = WasmExportedFunction::cast(*value);
+ uint32_t real_type_index = function.shared()
+ .wasm_exported_function_data()
+ .canonical_type_index();
+ if (!type_canonicalizer->IsCanonicalSubtype(
+ real_type_index, expected_canonical.ref_index())) {
*error_message =
- "structref object must be null (if nullable) or a wasm struct";
+ "assigned exported function has to be a subtype of the "
+ "expected type";
return {};
}
- case HeapType::kArray: {
- if (value->IsWasmArray()) {
- return value;
- }
+ return WasmInternalFunction::FromExternal(value, isolate);
+ } else if (WasmJSFunction::IsWasmJSFunction(*value)) {
+ if (!WasmJSFunction::cast(*value).MatchesSignature(
+ expected_canonical.ref_index())) {
*error_message =
- "arrayref object must be null (if nullable) or a wasm array";
+ "assigned WebAssembly.Function has to be a subtype of the "
+ "expected type";
return {};
}
- case HeapType::kEq: {
- if (value->IsSmi() || value->IsWasmStruct() || value->IsWasmArray()) {
- return value;
- }
+ return WasmInternalFunction::FromExternal(value, isolate);
+ } else if (WasmCapiFunction::IsWasmCapiFunction(*value)) {
+ if (!WasmCapiFunction::cast(*value).MatchesSignature(
+ expected_canonical.ref_index())) {
*error_message =
- "eqref object must be null (if nullable) or a wasm "
- "i31/struct/array";
+ "assigned C API function has to be a subtype of the expected "
+ "type";
return {};
}
- case HeapType::kI31: {
- if (value->IsSmi()) return value;
- *error_message =
- "i31ref object must be null (if nullable) or a wasm i31";
+ return WasmInternalFunction::FromExternal(value, isolate);
+ } else if (value->IsWasmStruct() || value->IsWasmArray()) {
+ auto wasm_obj = Handle<WasmObject>::cast(value);
+ WasmTypeInfo type_info = wasm_obj->map().wasm_type_info();
+ uint32_t real_idx = type_info.type_index();
+ const WasmModule* real_module = type_info.instance().module();
+ uint32_t real_canonical_index =
+ real_module->isorecursive_canonical_type_ids[real_idx];
+ if (!type_canonicalizer->IsCanonicalSubtype(
+ real_canonical_index, expected_canonical.ref_index())) {
+ *error_message = "object is not a subtype of expected type";
return {};
}
- case HeapType::kString:
- if (value->IsString()) return value;
- *error_message = "wrong type (expected a string)";
- return {};
- case HeapType::kStringViewWtf8:
- *error_message = "stringview_wtf8 has no JS representation";
- return {};
- case HeapType::kStringViewWtf16:
- *error_message = "stringview_wtf16 has no JS representation";
- return {};
- case HeapType::kStringViewIter:
- *error_message = "stringview_iter has no JS representation";
- return {};
- default:
- if (module == nullptr) {
- *error_message =
- "an object defined in JavaScript cannot be compatible with a "
- "type defined in a Webassembly module";
- return {};
- }
- DCHECK(module->has_type(expected.ref_index()));
- if (module->has_signature(expected.ref_index())) {
- if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
- WasmExportedFunction function =
- WasmExportedFunction::cast(*value);
- const WasmModule* exporting_module = function.instance().module();
- ValueType real_type = ValueType::Ref(
- exporting_module->functions[function.function_index()]
- .sig_index);
- if (!IsSubtypeOf(real_type, expected, exporting_module, module)) {
- *error_message =
- "assigned exported function has to be a subtype of the "
- "expected type";
- return {};
- }
- } else if (WasmJSFunction::IsWasmJSFunction(*value)) {
- // Since a WasmJSFunction cannot refer to indexed types (definable
- // only in a module), we do not need full function subtyping.
- // TODO(manoskouk): Change this if wasm types can be exported.
- if (!WasmJSFunction::cast(*value).MatchesSignature(
- module->signature(expected.ref_index()))) {
- *error_message =
- "assigned WasmJSFunction has to be a subtype of the "
- "expected type";
- return {};
- }
- } else if (WasmCapiFunction::IsWasmCapiFunction(*value)) {
- // Since a WasmCapiFunction cannot refer to indexed types
- // (definable only in a module), we do not need full function
- // subtyping.
- // TODO(manoskouk): Change this if wasm types can be exported.
- if (!WasmCapiFunction::cast(*value).MatchesSignature(
- module->signature(expected.ref_index()))) {
- *error_message =
- "assigned WasmCapiFunction has to be a subtype of the "
- "expected type";
- return {};
- }
- } else {
- *error_message =
- "function-typed object must be null (if nullable) or a Wasm "
- "function object";
- return {};
- }
- return MaybeHandle<Object>(Handle<JSFunction>::cast(value)
- ->shared()
- .wasm_function_data()
- .internal(),
- isolate);
- } else {
- // A struct or array type with index is expected.
- DCHECK(module->has_struct(expected.ref_index()) ||
- module->has_array(expected.ref_index()));
- if (!value->IsWasmStruct() && !value->IsWasmArray()) {
- *error_message = "object incompatible with wasm type";
- return {};
- }
- auto wasm_obj = Handle<WasmObject>::cast(value);
- WasmTypeInfo type_info = wasm_obj->map().wasm_type_info();
- uint32_t actual_idx = type_info.type_index();
- const WasmModule* actual_module = type_info.instance().module();
- if (!IsHeapSubtypeOf(HeapType(actual_idx), expected.heap_type(),
- actual_module, module)) {
- *error_message = "object is not a subtype of element type";
- return {};
- }
- return value;
- }
+ return value;
+ } else {
+ *error_message = "JS object does not match expected wasm type";
+ return {};
}
}
- case kRtt:
- case kI8:
- case kI16:
- case kI32:
- case kI64:
- case kF32:
- case kF64:
- case kS128:
- case kVoid:
- case kBottom:
+ }
+}
+
+// Utility which canonicalizes {expected} in addition.
+MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
+ Handle<Object> value, ValueType expected,
+ const char** error_message) {
+ ValueType expected_canonical = expected;
+ if (expected_canonical.has_index()) {
+ uint32_t canonical_index =
+ module->isorecursive_canonical_type_ids[expected_canonical.ref_index()];
+ expected_canonical = ValueType::RefMaybeNull(
+ canonical_index, expected_canonical.nullability());
+ }
+ return JSToWasmObject(isolate, value, expected_canonical, error_message);
+}
+
+MaybeHandle<Object> WasmToJSObject(Isolate* isolate, Handle<Object> value,
+ HeapType type, const char** error_message) {
+ switch (type.representation()) {
+ case i::wasm::HeapType::kExtern:
+ case i::wasm::HeapType::kString:
+ case i::wasm::HeapType::kI31:
+ case i::wasm::HeapType::kStruct:
+ case i::wasm::HeapType::kArray:
+ case i::wasm::HeapType::kEq:
+ case i::wasm::HeapType::kAny:
+ return value->IsWasmNull() ? isolate->factory()->null_value() : value;
+ case i::wasm::HeapType::kFunc: {
+ if (value->IsWasmNull()) {
+ return isolate->factory()->null_value();
+ } else {
+ DCHECK(value->IsWasmInternalFunction());
+ return handle(
+ i::Handle<i::WasmInternalFunction>::cast(value)->external(),
+ isolate);
+ }
+ }
+ case i::wasm::HeapType::kStringViewWtf8:
+ *error_message = "stringview_wtf8 has no JS representation";
+ return {};
+ case i::wasm::HeapType::kStringViewWtf16:
+ *error_message = "stringview_wtf16 has no JS representation";
+ return {};
+ case i::wasm::HeapType::kStringViewIter:
+ *error_message = "stringview_iter has no JS representation";
+ return {};
+ case i::wasm::HeapType::kBottom:
UNREACHABLE();
+ default:
+ if (value->IsWasmNull()) {
+ return isolate->factory()->null_value();
+ } else if (value->IsWasmInternalFunction()) {
+ return handle(
+ i::Handle<i::WasmInternalFunction>::cast(value)->external(),
+ isolate);
+ } else {
+ return value;
+ }
}
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index b3b99175a1..74cbdd4d9c 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -37,6 +37,7 @@ struct WasmFunction;
struct WasmGlobal;
struct WasmModule;
struct WasmTag;
+using WasmTagSig = FunctionSig;
class WasmValue;
class WireBytesRef;
} // namespace wasm
@@ -305,6 +306,7 @@ class WasmGlobalObject
inline int64_t GetI64();
inline float GetF32();
inline double GetF64();
+ inline byte* GetS128RawBytes();
inline Handle<Object> GetRef();
inline void SetI32(int32_t value);
@@ -347,7 +349,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(feedback_vectors, FixedArray)
DECL_SANDBOXED_POINTER_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
- DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(new_allocation_limit_address, Address*)
@@ -364,7 +365,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(tiering_budget_array, uint32_t*)
DECL_ACCESSORS(data_segment_starts, FixedAddressArray)
DECL_ACCESSORS(data_segment_sizes, FixedUInt32Array)
- DECL_ACCESSORS(dropped_elem_segments, FixedUInt8Array)
+ DECL_ACCESSORS(element_segments, FixedArray)
DECL_PRIMITIVE_ACCESSORS(break_on_entry, uint8_t)
// Clear uninitialized padding space. This ensures that the snapshot content
@@ -393,7 +394,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
V(kGlobalsStartOffset, kSystemPointerSize) \
- V(kIsolateRootOffset, kSystemPointerSize) \
V(kJumpTableStartOffset, kSystemPointerSize) \
/* End of often-accessed fields. */ \
/* Continue with system pointer size fields to maintain alignment. */ \
@@ -407,7 +407,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
/* Less than system pointer size aligned fields are below. */ \
V(kDataSegmentStartsOffset, kTaggedSize) \
V(kDataSegmentSizesOffset, kTaggedSize) \
- V(kDroppedElemSegmentsOffset, kTaggedSize) \
+ V(kElementSegmentsOffset, kTaggedSize) \
V(kModuleObjectOffset, kTaggedSize) \
V(kExportsObjectOffset, kTaggedSize) \
V(kNativeContextOffset, kTaggedSize) \
@@ -461,7 +461,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
kImportedFunctionTargetsOffset,
kDataSegmentStartsOffset,
kDataSegmentSizesOffset,
- kDroppedElemSegmentsOffset};
+ kElementSegmentsOffset};
const wasm::WasmModule* module();
@@ -541,8 +541,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
private:
static void InitDataSegmentArrays(Handle<WasmInstanceObject>,
Handle<WasmModuleObject>);
- static void InitElemSegmentArrays(Handle<WasmInstanceObject>,
- Handle<WasmModuleObject>);
};
// Representation of WebAssembly.Exception JavaScript-level object.
@@ -551,10 +549,11 @@ class WasmTagObject
public:
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this tag object.
- bool MatchesSignature(const wasm::FunctionSig* sig);
+ bool MatchesSignature(uint32_t expected_canonical_type_index);
static Handle<WasmTagObject> New(Isolate* isolate,
const wasm::FunctionSig* sig,
+ uint32_t canonical_type_index,
Handle<HeapObject> tag);
TQ_OBJECT_CONSTRUCTORS(WasmTagObject)
@@ -579,6 +578,7 @@ class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSObject {
Isolate* isolate, Handle<WasmExceptionPackage> exception_package);
// Determines the size of the array holding all encoded exception values.
+ static uint32_t GetEncodedSize(const wasm::WasmTagSig* tag);
static uint32_t GetEncodedSize(const wasm::WasmTag* tag);
DECL_CAST(WasmExceptionPackage)
@@ -612,14 +612,13 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE static Handle<WasmExportedFunction> New(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
- int arity, Handle<CodeT> export_wrapper);
+ int arity, Handle<Code> export_wrapper);
Address GetWasmCallTarget();
V8_EXPORT_PRIVATE const wasm::FunctionSig* sig();
- bool MatchesSignature(const wasm::WasmModule* other_module,
- const wasm::FunctionSig* other_sig);
+ bool MatchesSignature(uint32_t other_canonical_sig_index);
// Return a null-terminated string with the debug name in the form
// 'js-to-wasm:<sig>'.
@@ -644,8 +643,8 @@ class WasmJSFunction : public JSFunction {
wasm::Suspend GetSuspend() const;
// Deserializes the signature of this function using the provided zone. Note
// that lifetime of the signature is hence directly coupled to the zone.
- const wasm::FunctionSig* GetSignature(Zone* zone);
- bool MatchesSignature(const wasm::FunctionSig* sig);
+ const wasm::FunctionSig* GetSignature(Zone* zone) const;
+ bool MatchesSignature(uint32_t other_canonical_sig_index) const;
DECL_CAST(WasmJSFunction)
OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
@@ -663,7 +662,8 @@ class WasmCapiFunction : public JSFunction {
PodArray<wasm::ValueType> GetSerializedSignature() const;
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this C-API function object.
- bool MatchesSignature(const wasm::FunctionSig* sig) const;
+ bool MatchesSignature(uint32_t other_canonical_sig_index) const;
+ const wasm::FunctionSig* GetSignature(Zone* zone) const;
DECL_CAST(WasmCapiFunction)
OBJECT_CONSTRUCTORS(WasmCapiFunction, JSFunction);
@@ -777,7 +777,7 @@ class WasmJSFunctionData
: public TorqueGeneratedWasmJSFunctionData<WasmJSFunctionData,
WasmFunctionData> {
public:
- DECL_ACCESSORS(wasm_to_js_wrapper_code, CodeT)
+ DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
// Dispatched behavior.
DECL_PRINTER(WasmJSFunctionData)
@@ -1060,17 +1060,47 @@ class WasmSuspenderObject
TQ_OBJECT_CONSTRUCTORS(WasmSuspenderObject)
};
+class WasmNull : public TorqueGeneratedWasmNull<WasmNull, HeapObject> {
+ public:
+#if V8_STATIC_ROOTS_BOOL || V8_STATIC_ROOT_GENERATION_BOOL
+ // TODO(manoskouk): Make it smaller if able and needed.
+ static constexpr int kSize = 64 * KB + kTaggedSize;
+ // Payload should be a multiple of page size.
+ static_assert((kSize - kTaggedSize) % kMinimumOSPageSize == 0);
+ // Any wasm struct offset should fit in the object.
+ static_assert(kSize >= WasmStruct::kHeaderSize +
+ wasm::kV8MaxWasmStructFields * kSimd128Size);
+
+ Address payload() { return ptr() + kHeaderSize - kHeapObjectTag; }
+#else
+ static constexpr int kSize = kTaggedSize;
+#endif
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(WasmNull)
+};
+
#undef DECL_OPTIONAL_ACCESSORS
namespace wasm {
// Takes a {value} in the JS representation and typechecks it according to
// {expected}. If the typecheck succeeds, returns the wasm representation of the
// object; otherwise, returns the empty handle.
+MaybeHandle<Object> JSToWasmObject(Isolate* isolate, Handle<Object> value,
+ ValueType expected_canonical,
+ const char** error_message);
+
+// Utility which canonicalizes {expected} in addition.
MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
Handle<Object> value, ValueType expected,
const char** error_message);
-} // namespace wasm
+// Takes a {value} in the Wasm representation and tries to transform it to the
+// respective JS representation. If the transformation fails, the empty handle
+// is returned.
+MaybeHandle<Object> WasmToJSObject(Isolate* isolate, Handle<Object> value,
+ HeapType type, const char** error_message);
+} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index 3607621bbf..ea72ccb95e 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -18,7 +18,6 @@ extern class WasmInstanceObject extends JSObject;
// a sandboxed pointer, because that would require having access to the isolate
// root in the first place.
extern class WasmApiFunctionRef extends HeapObject {
- isolate_root: RawPtr;
native_context: NativeContext;
callable: JSReceiver|Undefined;
// Present when compiling JSFastApiCall wrappers, needed
@@ -40,8 +39,7 @@ extern class WasmInternalFunction extends HeapObject {
// The external (JS) representation of this function reference.
external: JSFunction|Undefined;
// This field is used when the call target is null.
- @if(V8_EXTERNAL_CODE_SPACE) code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) code: Code;
+ code: Code;
// The call target. Tagged with the kWasmInternalFunctionCallTargetTag
call_target: ExternalPointer;
}
@@ -53,8 +51,7 @@ extern class WasmFunctionData extends HeapObject {
// The wasm-internal representation of this function object.
internal: WasmInternalFunction;
// Used for calling this function from JavaScript.
- @if(V8_EXTERNAL_CODE_SPACE) wrapper_code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) wrapper_code: Code;
+ wrapper_code: Code;
// Encode the {promising} and {suspending} flags in a single smi.
js_promise_flags: Smi;
}
@@ -68,22 +65,20 @@ extern class WasmExportedFunctionData extends WasmFunctionData {
wrapper_budget: Smi;
// The next two fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
- @if(V8_EXTERNAL_CODE_SPACE) c_wrapper_code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) c_wrapper_code: Code;
+ c_wrapper_code: Code;
packed_args_size: Smi;
+ canonical_type_index: Smi;
sig: ExternalPointer; // wasm::FunctionSig*
}
extern class WasmJSFunctionData extends WasmFunctionData {
serialized_return_count: Smi;
serialized_parameter_count: Smi;
- // TODO(7748): Maybe store the canonical type index of the signature instead.
serialized_signature: PodArrayOfWasmValueType;
}
extern class WasmCapiFunctionData extends WasmFunctionData {
embedder_data: Foreign; // Managed<wasm::FuncData>
- // TODO(7748): Maybe store the canonical type index of the signature instead.
serialized_signature: PodArrayOfWasmValueType;
}
@@ -170,6 +165,7 @@ extern class WasmGlobalObject extends JSObject {
extern class WasmTagObject extends JSObject {
serialized_signature: PodArrayOfWasmValueType;
tag: HeapObject;
+ canonical_type_index: Smi;
}
type WasmExportedFunction extends JSFunction;
@@ -220,4 +216,12 @@ extern class WasmArray extends WasmObject {
class WasmStringViewIter extends HeapObject {
string: String;
offset: uint32; // Index into string.
+
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
}
+
+extern class WasmNull extends HeapObject {}
+
+extern macro WasmNullConstant(): WasmNull;
+const kWasmNull: WasmNull = WasmNullConstant();
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index cda2c11721..a30eadab44 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -28,18 +28,11 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
return os;
}
-// TODO(7748): Once we have a story for JS interaction of structs/arrays, this
-// function should become independent of module. Remove 'module' parameter in
-// this function as well as all transitive callees that no longer need it
-// (In essence, revert
-// https://chromium-review.googlesource.com/c/v8/v8/+/2413251).
-bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
- const WasmFeatures& enabled_features) {
+bool IsJSCompatibleSignature(const FunctionSig* sig) {
for (auto type : sig->all()) {
// Structs and arrays may only be passed via externref.
// Rtts are implicit and can not be used explicitly.
- if (type == kWasmS128 || type.is_rtt() ||
- (type.has_index() && !module->has_signature(type.ref_index()))) {
+ if (type == kWasmS128 || type.is_rtt()) {
return false;
}
if (type.is_object_reference()) {
@@ -47,11 +40,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
case HeapType::kStringViewWtf8:
case HeapType::kStringViewWtf16:
case HeapType::kStringViewIter:
- case HeapType::kNone:
- case HeapType::kNoFunc:
- case HeapType::kNoExtern:
- case HeapType::kAny:
- case HeapType::kI31:
return false;
default:
break;
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 685645a4f2..6d55228f8d 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -24,10 +24,9 @@ namespace wasm {
class WasmFeatures;
struct WasmModule;
-std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
- const WasmModule* module,
- const WasmFeatures&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const FunctionSig& function);
+V8_EXPORT_PRIVATE bool IsJSCompatibleSignature(const FunctionSig* sig);
// Format of all opcode macros: kExprName, binary, signature, wat name
@@ -62,7 +61,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(ReturnCallIndirect, 0x13, _, "return_call_indirect") \
V(CallRef, 0x14, _, "call_ref") /* typed_funcref prototype */ \
V(ReturnCallRef, 0x15, _, "return_call_ref") /* typed_funcref prototype */ \
- V(CallRefDeprecated, 0x17, _, "call_ref") /* temporary, for compat.*/ \
V(Drop, 0x1a, _, "drop") \
V(Select, 0x1b, _, "select") \
V(SelectWithType, 0x1c, _, "select") \
@@ -695,15 +693,14 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(ArrayGetS, 0xfb14, _, "array.get_s") \
V(ArrayGetU, 0xfb15, _, "array.get_u") \
V(ArraySet, 0xfb16, _, "array.set") \
- V(ArrayLenDeprecated, 0xfb17, _, "array.len") \
- V(ArrayCopy, 0xfb18, _, \
- "array.copy") /* not standardized - V8 experimental */ \
+ V(ArrayCopy, 0xfb18, _, "array.copy") \
V(ArrayLen, 0xfb19, _, "array.len") \
V(ArrayNewFixed, 0xfb1a, _, "array.new_fixed") \
V(ArrayNew, 0xfb1b, _, "array.new") \
V(ArrayNewDefault, 0xfb1c, _, "array.new_default") \
V(ArrayNewData, 0xfb1d, _, "array.new_data") \
V(ArrayNewElem, 0xfb1f, _, "array.new_elem") \
+ V(ArrayFill, 0xfb0f, _, "array.init") \
V(I31New, 0xfb20, _, "i31.new") \
V(I31GetS, 0xfb21, _, "i31.get_s") \
V(I31GetU, 0xfb22, _, "i31.get_u") \
@@ -713,8 +710,12 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RefCast, 0xfb41, _, "ref.cast") \
V(RefCastNull, 0xfb49, _, "ref.cast null") \
V(RefCastDeprecated, 0xfb45, _, "ref.cast") \
- V(BrOnCast, 0xfb46, _, "br_on_cast") \
- V(BrOnCastFail, 0xfb47, _, "br_on_cast_fail") \
+ V(BrOnCast, 0xfb42, _, "br_on_cast") \
+ V(BrOnCastNull, 0xfb4a, _, "br_on_cast null") \
+ V(BrOnCastDeprecated, 0xfb46, _, "br_on_cast") \
+ V(BrOnCastFail, 0xfb43, _, "br_on_cast_fail") \
+ V(BrOnCastFailNull, 0xfb4b, _, "br_on_cast_fail null") \
+ V(BrOnCastFailDeprecated, 0xfb47, _, "br_on_cast_fail") \
V(RefCastNop, 0xfb4c, _, "ref.cast_nop") \
V(RefIsStruct, 0xfb51, _, "ref.is_struct") \
V(RefIsI31, 0xfb52, _, "ref.is_i31") \
@@ -745,6 +746,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(StringNewWtf8, 0xfb8c, _, "string.new_wtf8") \
V(StringEncodeLossyUtf8, 0xfb8d, _, "string.encode_lossy_utf8") \
V(StringEncodeWtf8, 0xfb8e, _, "string.encode_wtf8") \
+ V(StringNewUtf8Try, 0xfb8f, _, "string.new_utf8_try") \
V(StringAsWtf8, 0xfb90, _, "string.as_wtf8") \
V(StringViewWtf8Advance, 0xfb91, _, "stringview_wtf8.advance") \
V(StringViewWtf8EncodeUtf8, 0xfb92, _, "stringview_wtf8.encode_utf8") \
@@ -762,6 +764,9 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(StringViewIterAdvance, 0xfba2, _, "stringview_iter.advance") \
V(StringViewIterRewind, 0xfba3, _, "stringview_iter.rewind") \
V(StringViewIterSlice, 0xfba4, _, "stringview_iter.slice") \
+ V(StringCompare, 0xfba8, _, "string.compare") \
+ V(StringFromCodePoint, 0xfba9, _, "string.from_code_point") \
+ V(StringHash, 0xfbaa, _, "string.hash") \
V(StringNewUtf8Array, 0xfbb0, _, "string.new_utf8_array") \
V(StringNewWtf16Array, 0xfbb1, _, "string.new_wtf16_array") \
V(StringEncodeUtf8Array, 0xfbb2, _, "string.encode_utf8_array") \
@@ -769,7 +774,8 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(StringNewLossyUtf8Array, 0xfbb4, _, "string.new_lossy_utf8_array") \
V(StringNewWtf8Array, 0xfbb5, _, "string.new_wtf8_array") \
V(StringEncodeLossyUtf8Array, 0xfbb6, _, "string.encode_lossy_utf8_array") \
- V(StringEncodeWtf8Array, 0xfbb7, _, "string.encode_wtf8_array")
+ V(StringEncodeWtf8Array, 0xfbb7, _, "string.encode_wtf8_array") \
+ V(StringNewUtf8ArrayTry, 0xfbb8, _, "string.new_utf8_array_try")
// All opcodes.
#define FOREACH_OPCODE(V) \
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 802326dd9e..8a53e96ac9 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -33,22 +33,26 @@ class V8_EXPORT_PRIVATE WasmError {
WasmError(uint32_t offset, std::string message)
: offset_(offset), message_(std::move(message)) {
- // The error message must not be empty, otherwise {empty()} would be true.
+ DCHECK_NE(kNoErrorOffset, offset);
DCHECK(!message_.empty());
}
PRINTF_FORMAT(3, 4)
WasmError(uint32_t offset, const char* format, ...) : offset_(offset) {
+ DCHECK_NE(kNoErrorOffset, offset);
va_list args;
va_start(args, format);
message_ = FormatError(format, args);
va_end(args);
- // The error message must not be empty, otherwise {empty()} would be true.
DCHECK(!message_.empty());
}
- bool empty() const { return message_.empty(); }
- bool has_error() const { return !message_.empty(); }
+ bool has_error() const {
+ DCHECK_EQ(offset_ == kNoErrorOffset, message_.empty());
+ return offset_ != kNoErrorOffset;
+ }
+
+ operator bool() const { return has_error(); }
uint32_t offset() const { return offset_; }
const std::string& message() const& { return message_; }
@@ -58,7 +62,8 @@ class V8_EXPORT_PRIVATE WasmError {
static std::string FormatError(const char* format, va_list args);
private:
- uint32_t offset_ = 0;
+ static constexpr uint32_t kNoErrorOffset = kMaxUInt32;
+ uint32_t offset_ = kNoErrorOffset;
std::string message_;
};
@@ -85,7 +90,16 @@ class Result {
explicit Result(WasmError error) : error_(std::move(error)) {}
- bool ok() const { return error_.empty(); }
+ // Implicitly convert a Result<T> to Result<U> if T implicitly converts to U.
+ // Only provide that for r-value references (i.e. temporary objects) though,
+ // to be used if passing or returning a result by value.
+ template <typename U,
+ typename = std::enable_if_t<std::is_assignable_v<U, T&&>>>
+ operator Result<U>() const&& {
+ return ok() ? Result<U>{std::move(value_)} : Result<U>{error_};
+ }
+
+ bool ok() const { return !failed(); }
bool failed() const { return error_.has_error(); }
const WasmError& error() const& { return error_; }
WasmError&& error() && { return std::move(error_); }
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 8d6adeec00..7661ae8c05 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -21,6 +21,7 @@
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/well-known-imports.h"
namespace v8 {
namespace internal {
@@ -56,14 +57,17 @@ class Writer {
}
}
- void WriteVector(const base::Vector<const byte> v) {
- DCHECK_GE(current_size(), v.size());
- if (v.size() > 0) {
- memcpy(current_location(), v.begin(), v.size());
- pos_ += v.size();
+ template <typename T>
+ void WriteVector(const base::Vector<T> v) {
+ base::Vector<const byte> bytes = base::Vector<const byte>::cast(v);
+ DCHECK_GE(current_size(), bytes.size());
+ if (bytes.size() > 0) {
+ memcpy(current_location(), bytes.begin(), bytes.size());
+ pos_ += bytes.size();
}
if (v8_flags.trace_wasm_serialization) {
- StdoutStream{} << "wrote vector of " << v.size() << " elements"
+ StdoutStream{} << "wrote vector of " << v.size()
+ << " elements (total size " << bytes.size() << " bytes)"
<< std::endl;
}
}
@@ -187,7 +191,8 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
#endif
}
-constexpr size_t kHeaderSize = sizeof(size_t); // total code size
+constexpr size_t kHeaderSize = sizeof(size_t) + // total code size
+ sizeof(bool); // all functions validated
constexpr size_t kCodeHeaderSize = sizeof(uint8_t) + // code kind
sizeof(int) + // offset of constant pool
@@ -200,6 +205,7 @@ constexpr size_t kCodeHeaderSize = sizeof(uint8_t) + // code kind
sizeof(int) + // code size
sizeof(int) + // reloc size
sizeof(int) + // source positions size
+ sizeof(int) + // inlining positions size
sizeof(int) + // protected instructions size
sizeof(WasmCode::Kind) + // code kind
sizeof(ExecutionTier); // tier
@@ -275,7 +281,8 @@ static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
- NativeModuleSerializer(const NativeModule*, base::Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModule*, base::Vector<WasmCode* const>,
+ base::Vector<WellKnownImport const>);
NativeModuleSerializer(const NativeModuleSerializer&) = delete;
NativeModuleSerializer& operator=(const NativeModuleSerializer&) = delete;
@@ -286,17 +293,22 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
size_t MeasureCode(const WasmCode*) const;
void WriteHeader(Writer*, size_t total_code_size);
void WriteCode(const WasmCode*, Writer*);
+ void WriteTieringBudget(Writer* writer);
const NativeModule* const native_module_;
const base::Vector<WasmCode* const> code_table_;
+ const base::Vector<WellKnownImport const> import_statuses_;
bool write_called_ = false;
size_t total_written_code_ = 0;
int num_turbofan_functions_ = 0;
};
NativeModuleSerializer::NativeModuleSerializer(
- const NativeModule* module, base::Vector<WasmCode* const> code_table)
- : native_module_(module), code_table_(code_table) {
+ const NativeModule* module, base::Vector<WasmCode* const> code_table,
+ base::Vector<WellKnownImport const> import_statuses)
+ : native_module_(module),
+ code_table_(code_table),
+ import_statuses_(import_statuses) {
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
@@ -310,6 +322,7 @@ size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
}
return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() +
+ code->inlining_positions().size() +
code->protected_instructions_data().size();
}
@@ -318,6 +331,11 @@ size_t NativeModuleSerializer::Measure() const {
for (WasmCode* code : code_table_) {
size += MeasureCode(code);
}
+ // Add the size of the well-known imports status.
+ size += import_statuses_.size() * sizeof(WellKnownImport);
+ // Add the size of the tiering budget.
+ size += native_module_->module()->num_declared_functions * sizeof(uint32_t);
+
return size;
}
@@ -327,6 +345,22 @@ void NativeModuleSerializer::WriteHeader(Writer* writer,
// handler was used or not when serializing.
writer->Write(total_code_size);
+
+ // We do not ship lazy validation, so in most cases all functions will be
+ // validated. Thus only write out a single bit instead of serializing the
+ // information per function.
+ const bool fully_validated = !v8_flags.wasm_lazy_validation;
+ writer->Write(fully_validated);
+#ifdef DEBUG
+ if (fully_validated) {
+ const WasmModule* module = native_module_->module();
+ for (auto& function : module->declared_functions()) {
+ DCHECK(module->function_was_validated(function.func_index));
+ }
+ }
+#endif
+
+ writer->WriteVector(base::VectorOf(import_statuses_));
}
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
@@ -367,6 +401,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->instructions().length());
writer->Write(code->reloc_info().length());
writer->Write(code->source_positions().length());
+ writer->Write(code->inlining_positions().length());
writer->Write(code->protected_instructions_data().length());
writer->Write(code->kind());
writer->Write(code->tier());
@@ -376,9 +411,11 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
byte* code_start = serialized_code_start;
size_t code_size = code->instructions().size();
writer->Skip(code_size);
- // Write the reloc info, source positions, and protected code.
+ // Write the reloc info, source positions, inlining positions and protected
+ // code.
writer->WriteVector(code->reloc_info());
writer->WriteVector(code->source_positions());
+ writer->WriteVector(code->inlining_positions());
writer->WriteVector(code->protected_instructions_data());
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_RISCV32 || \
@@ -446,6 +483,12 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
total_written_code_ += code_size;
}
+void NativeModuleSerializer::WriteTieringBudget(Writer* writer) {
+ writer->WriteVector(
+ base::VectorOf(native_module_->tiering_budget_array(),
+ native_module_->module()->num_declared_functions));
+}
+
bool NativeModuleSerializer::Write(Writer* writer) {
DCHECK(!write_called_);
write_called_ = true;
@@ -468,22 +511,24 @@ bool NativeModuleSerializer::Write(Writer* writer) {
// Make sure that the serialized total code size was correct.
CHECK_EQ(total_written_code_, total_code_size);
+ WriteTieringBudget(writer);
return true;
}
WasmSerializer::WasmSerializer(NativeModule* native_module)
- : native_module_(native_module),
- code_table_(native_module->SnapshotCodeTable()) {}
+ : native_module_(native_module) {
+ std::tie(code_table_, import_statuses_) = native_module->SnapshotCodeTable();
+}
size_t WasmSerializer::GetSerializedNativeModuleSize() const {
- NativeModuleSerializer serializer(native_module_,
- base::VectorOf(code_table_));
+ NativeModuleSerializer serializer(native_module_, base::VectorOf(code_table_),
+ base::VectorOf(import_statuses_));
return kHeaderSize + serializer.Measure();
}
bool WasmSerializer::SerializeNativeModule(base::Vector<byte> buffer) const {
- NativeModuleSerializer serializer(native_module_,
- base::VectorOf(code_table_));
+ NativeModuleSerializer serializer(native_module_, base::VectorOf(code_table_),
+ base::VectorOf(import_statuses_));
size_t measured_size = kHeaderSize + serializer.Measure();
if (buffer.size() < measured_size) return false;
@@ -561,6 +606,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
void ReadHeader(Reader* reader);
DeserializationUnit ReadCode(int fn_index, Reader* reader);
+ void ReadTieringBudget(Reader* reader);
void CopyAndRelocate(const DeserializationUnit& unit);
void Publish(std::vector<DeserializationUnit> batch);
@@ -571,6 +617,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
// Updated in {ReadCode}.
size_t remaining_code_size_ = 0;
+ bool all_functions_validated_ = false;
base::Vector<byte> current_code_space_;
NativeModule::JumpTablesRef current_jump_tables_;
std::vector<int> lazy_functions_;
@@ -653,6 +700,10 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
uint32_t total_fns = native_module_->num_functions();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
+ if (all_functions_validated_) {
+ native_module_->module()->set_all_functions_validated();
+ }
+
WasmCodeRefScope wasm_code_ref_scope;
DeserializationQueue reloc_queue;
@@ -700,11 +751,21 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
// Wait for all tasks to finish, while participating in their work.
job_handle->Join();
+ ReadTieringBudget(reader);
return reader->current_size() == 0;
}
void NativeModuleDeserializer::ReadHeader(Reader* reader) {
remaining_code_size_ = reader->Read<size_t>();
+ all_functions_validated_ = reader->Read<bool>();
+
+ uint32_t imported = native_module_->module()->num_imported_functions;
+ if (imported > 0) {
+ base::Vector<const WellKnownImport> well_known_imports =
+ reader->ReadVector<WellKnownImport>(imported);
+ native_module_->module()->type_feedback.well_known_imports.Initialize(
+ well_known_imports);
+ }
}
DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
@@ -729,6 +790,7 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
int code_size = reader->Read<int>();
int reloc_size = reader->Read<int>();
int source_position_size = reader->Read<int>();
+ int inlining_position_size = reader->Read<int>();
int protected_instructions_size = reader->Read<int>();
WasmCode::Kind kind = reader->Read<WasmCode::Kind>();
ExecutionTier tier = reader->Read<ExecutionTier>();
@@ -738,19 +800,20 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
if (current_code_space_.size() < static_cast<size_t>(code_size)) {
// Allocate the next code space. Don't allocate more than 90% of
// {kMaxCodeSpaceSize}, to leave some space for jump tables.
- constexpr size_t kMaxReservation =
- RoundUp<kCodeAlignment>(WasmCodeAllocator::kMaxCodeSpaceSize * 9 / 10);
- size_t code_space_size = std::min(kMaxReservation, remaining_code_size_);
+ size_t max_reservation = RoundUp<kCodeAlignment>(
+ v8_flags.wasm_max_code_space_size_mb * MB * 9 / 10);
+ size_t code_space_size = std::min(max_reservation, remaining_code_size_);
std::tie(current_code_space_, current_jump_tables_) =
native_module_->AllocateForDeserializedCode(code_space_size);
DCHECK_EQ(current_code_space_.size(), code_space_size);
- DCHECK(current_jump_tables_.is_valid());
+ CHECK(current_jump_tables_.is_valid());
}
DeserializationUnit unit;
unit.src_code_buffer = reader->ReadVector<byte>(code_size);
auto reloc_info = reader->ReadVector<byte>(reloc_size);
auto source_pos = reader->ReadVector<byte>(source_position_size);
+ auto inlining_pos = reader->ReadVector<byte>(inlining_position_size);
auto protected_instructions =
reader->ReadVector<byte>(protected_instructions_size);
@@ -763,7 +826,7 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
fn_index, instructions, stack_slot_count, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comment_offset, unpadded_binary_size, protected_instructions,
- reloc_info, source_pos, kind, tier);
+ reloc_info, source_pos, inlining_pos, kind, tier);
unit.jump_tables = current_jump_tables_;
return unit;
}
@@ -823,6 +886,19 @@ void NativeModuleDeserializer::CopyAndRelocate(
unit.code->instructions().size());
}
+void NativeModuleDeserializer::ReadTieringBudget(Reader* reader) {
+ size_t size_of_tiering_budget =
+ native_module_->module()->num_declared_functions * sizeof(uint32_t);
+ if (size_of_tiering_budget > reader->current_size()) {
+ return;
+ }
+ base::Vector<const byte> serialized_budget =
+ reader->ReadVector<const byte>(size_of_tiering_budget);
+
+ memcpy(native_module_->tiering_budget_array(), serialized_budget.begin(),
+ size_of_tiering_budget);
+}
+
void NativeModuleDeserializer::Publish(std::vector<DeserializationUnit> batch) {
DCHECK(!batch.empty());
std::vector<std::unique_ptr<WasmCode>> codes;
@@ -858,17 +934,17 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
auto owned_wire_bytes = base::OwnedVector<uint8_t>::Of(wire_bytes_vec);
// TODO(titzer): module features should be part of the serialization format.
- WasmEngine* wasm_engine = GetWasmEngine();
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
ModuleResult decode_result = DecodeWasmModule(
- enabled_features, owned_wire_bytes.start(), owned_wire_bytes.end(), false,
+ enabled_features, owned_wire_bytes.as_vector(), false,
i::wasm::kWasmOrigin, isolate->counters(), isolate->metrics_recorder(),
isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
- DecodingMethod::kDeserialize, wasm_engine->allocator());
+ DecodingMethod::kDeserialize);
if (decode_result.failed()) return {};
std::shared_ptr<WasmModule> module = std::move(decode_result).value();
CHECK_NOT_NULL(module);
+ WasmEngine* wasm_engine = GetWasmEngine();
auto shared_native_module = wasm_engine->MaybeGetNativeModule(
module->origin, owned_wire_bytes.as_vector(), isolate);
if (shared_native_module == nullptr) {
@@ -881,7 +957,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
isolate, enabled_features, std::move(module), code_size_estimate);
// We have to assign a compilation ID here, as it is required for a
// potential re-compilation, e.g. triggered by
- // {TierDownAllModulesPerIsolate}. The value is -2 so that it is different
+ // {EnterDebuggingForIsolate}. The value is -2 so that it is different
// than the compilation ID of actual compilations, and also different than
// the sentinel value of the CompilationState.
shared_native_module->compilation_state()->set_compilation_id(-2);
@@ -891,13 +967,13 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Reader reader(data + WasmSerializer::kHeaderSize);
bool error = !deserializer.Read(&reader);
if (error) {
- wasm_engine->UpdateNativeModuleCache(error, &shared_native_module,
- isolate);
+ wasm_engine->UpdateNativeModuleCache(
+ error, std::move(shared_native_module), isolate);
return {};
}
shared_native_module->compilation_state()->InitializeAfterDeserialization(
deserializer.lazy_functions(), deserializer.eager_functions());
- wasm_engine->UpdateNativeModuleCache(error, &shared_native_module, isolate);
+ wasm_engine->UpdateNativeModuleCache(error, shared_native_module, isolate);
}
Handle<Script> script =
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index dc33dcf5cc..9613e9951c 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE WasmSerializer {
// The {WasmCodeRefScope} keeps the pointers in {code_table_} alive.
WasmCodeRefScope code_ref_scope_;
std::vector<WasmCode*> code_table_;
+ std::vector<WellKnownImport> import_statuses_;
};
// Support for deserializing WebAssembly {NativeModule} objects.
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index 0b74af4c70..5a2b8f6a90 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -141,10 +141,11 @@ bool IsNullSentinel(HeapType type) {
bool ValidSubtypeDefinition(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
- TypeDefinition::Kind sub_kind = sub_module->types[subtype_index].kind;
- TypeDefinition::Kind super_kind = super_module->types[supertype_index].kind;
- if (sub_kind != super_kind) return false;
- switch (sub_kind) {
+ const TypeDefinition& subtype = sub_module->types[subtype_index];
+ const TypeDefinition& supertype = super_module->types[supertype_index];
+ if (subtype.kind != supertype.kind) return false;
+ if (supertype.is_final) return false;
+ switch (subtype.kind) {
case TypeDefinition::kFunction:
return ValidFunctionSubtypeDefinition(subtype_index, supertype_index,
sub_module, super_module);
@@ -214,19 +215,11 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsHeapSubtypeOfImpl(
case HeapType::kI31:
case HeapType::kStruct:
case HeapType::kArray:
- if (v8_flags.wasm_gc_structref_as_dataref &&
- sub_heap.representation() == HeapType::kArray) {
- // TODO(7748): Remove temporary workaround for backwards compatibility.
- return super_heap == HeapType::kArray ||
- super_heap == HeapType::kStruct || super_heap == HeapType::kEq ||
- super_heap == HeapType::kAny;
- }
return super_heap == sub_heap || super_heap == HeapType::kEq ||
super_heap == HeapType::kAny;
case HeapType::kString:
- // stringref is a subtype of anyref under wasm-gc.
- return sub_heap == super_heap ||
- (v8_flags.experimental_wasm_gc && super_heap == HeapType::kAny);
+ // stringref is a subtype of anyref.
+ return sub_heap == super_heap || super_heap == HeapType::kAny;
case HeapType::kStringViewWtf8:
case HeapType::kStringViewWtf16:
case HeapType::kStringViewIter:
@@ -263,10 +256,7 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsHeapSubtypeOfImpl(
case HeapType::kFunc:
return sub_module->has_signature(sub_index);
case HeapType::kStruct:
- if (!v8_flags.wasm_gc_structref_as_dataref) {
- return sub_module->has_struct(sub_index);
- }
- V8_FALLTHROUGH;
+ return sub_module->has_struct(sub_index);
case HeapType::kEq:
case HeapType::kAny:
return !sub_module->has_signature(sub_index);
@@ -352,16 +342,17 @@ HeapType::Representation CommonAncestor(uint32_t type_index1,
}
switch (kind1) {
case TypeDefinition::kFunction:
- DCHECK_EQ(kind2, kind1);
- return HeapType::kFunc;
- case TypeDefinition::kStruct:
- if (v8_flags.wasm_gc_structref_as_dataref) {
- DCHECK_NE(kind2, TypeDefinition::kFunction);
- return HeapType::kStruct;
+ switch (kind2) {
+ case TypeDefinition::kFunction:
+ return HeapType::kFunc;
+ case TypeDefinition::kStruct:
+ case TypeDefinition::kArray:
+ return HeapType::kBottom;
}
+ case TypeDefinition::kStruct:
switch (kind2) {
case TypeDefinition::kFunction:
- UNREACHABLE();
+ return HeapType::kBottom;
case TypeDefinition::kStruct:
return HeapType::kStruct;
case TypeDefinition::kArray:
@@ -370,10 +361,9 @@ HeapType::Representation CommonAncestor(uint32_t type_index1,
case TypeDefinition::kArray:
switch (kind2) {
case TypeDefinition::kFunction:
- UNREACHABLE();
+ return HeapType::kBottom;
case TypeDefinition::kStruct:
- return v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
- : HeapType::kEq;
+ return HeapType::kEq;
case TypeDefinition::kArray:
return HeapType::kArray;
}
@@ -382,21 +372,67 @@ HeapType::Representation CommonAncestor(uint32_t type_index1,
// Returns the least common ancestor of a generic HeapType {heap1}, and
// another HeapType {heap2}.
-// TODO(7748): This function sometimes assumes that incompatible types cannot be
-// compared, in some cases explicitly and in others implicitly. Make it
-// consistent.
HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
HeapType heap2,
const WasmModule* module2) {
DCHECK(heap1.is_generic());
switch (heap1.representation()) {
- case HeapType::kFunc:
- DCHECK(IsHeapSubtypeOf(heap2, heap1, module2, module2));
- return HeapType::kFunc;
+ case HeapType::kFunc: {
+ if (heap2 == HeapType::kFunc || heap2 == HeapType::kNoFunc ||
+ (heap2.is_index() && module2->has_signature(heap2.ref_index()))) {
+ return HeapType::kFunc;
+ } else {
+ return HeapType::kBottom;
+ }
+ }
+ case HeapType::kAny: {
+ switch (heap2.representation()) {
+ case HeapType::kI31:
+ case HeapType::kNone:
+ case HeapType::kEq:
+ case HeapType::kStruct:
+ case HeapType::kArray:
+ case HeapType::kAny:
+ case HeapType::kString:
+ return HeapType::kAny;
+ case HeapType::kFunc:
+ case HeapType::kExtern:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
+ default:
+ return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
+ : HeapType::kAny;
+ }
+ }
case HeapType::kEq: {
- return IsHeapSubtypeOf(heap2, heap1, module2, module2)
- ? heap1.representation()
- : HeapType::kAny;
+ switch (heap2.representation()) {
+ case HeapType::kI31:
+ case HeapType::kNone:
+ case HeapType::kEq:
+ case HeapType::kStruct:
+ case HeapType::kArray:
+ return HeapType::kEq;
+ case HeapType::kAny:
+ case HeapType::kString:
+ return HeapType::kAny;
+ case HeapType::kFunc:
+ case HeapType::kExtern:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
+ default:
+ return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
+ : HeapType::kEq;
+ }
}
case HeapType::kI31:
switch (heap2.representation()) {
@@ -408,12 +444,17 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
case HeapType::kArray:
return HeapType::kEq;
case HeapType::kAny:
+ case HeapType::kString:
return HeapType::kAny;
case HeapType::kFunc:
case HeapType::kExtern:
case HeapType::kNoExtern:
case HeapType::kNoFunc:
- UNREACHABLE();
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
default:
return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
: HeapType::kEq;
@@ -424,23 +465,25 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
case HeapType::kNone:
return HeapType::kStruct;
case HeapType::kArray:
- return v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
- : HeapType::kEq;
case HeapType::kI31:
case HeapType::kEq:
return HeapType::kEq;
case HeapType::kAny:
+ case HeapType::kString:
return HeapType::kAny;
case HeapType::kFunc:
case HeapType::kExtern:
case HeapType::kNoExtern:
case HeapType::kNoFunc:
- UNREACHABLE();
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
default:
- return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
- : module2->has_struct(heap2.ref_index()) ? HeapType::kStruct
- : v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
- : HeapType::kEq;
+ return module2->has_struct(heap2.ref_index()) ? HeapType::kStruct
+ : module2->has_array(heap2.ref_index()) ? HeapType::kEq
+ : HeapType::kBottom;
}
case HeapType::kArray:
switch (heap2.representation()) {
@@ -448,33 +491,29 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
case HeapType::kNone:
return HeapType::kArray;
case HeapType::kStruct:
- return v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
- : HeapType::kEq;
case HeapType::kI31:
case HeapType::kEq:
return HeapType::kEq;
case HeapType::kAny:
+ case HeapType::kString:
return HeapType::kAny;
case HeapType::kFunc:
case HeapType::kExtern:
case HeapType::kNoExtern:
case HeapType::kNoFunc:
- UNREACHABLE();
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
default:
- return module2->has_array(heap2.ref_index()) ? HeapType::kArray
- : module2->has_struct(heap2.ref_index())
- ? (v8_flags.wasm_gc_structref_as_dataref
- ? HeapType::kStruct
- : HeapType::kEq)
- : HeapType::kBottom;
+ return module2->has_array(heap2.ref_index()) ? HeapType::kArray
+ : module2->has_struct(heap2.ref_index()) ? HeapType::kEq
+ : HeapType::kBottom;
}
- case HeapType::kAny:
- return HeapType::kAny;
case HeapType::kBottom:
return HeapType::kBottom;
case HeapType::kNone:
- return heap2.representation();
- case HeapType::kNoFunc:
switch (heap2.representation()) {
case HeapType::kArray:
case HeapType::kNone:
@@ -482,25 +521,63 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
case HeapType::kI31:
case HeapType::kEq:
case HeapType::kAny:
+ case HeapType::kString:
+ return heap2.representation();
case HeapType::kExtern:
case HeapType::kNoExtern:
- UNREACHABLE();
case HeapType::kNoFunc:
- return HeapType::kNoFunc;
case HeapType::kFunc:
- return HeapType::kFunc;
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
default:
return module2->has_signature(heap2.ref_index())
- ? heap2.representation()
- : HeapType::kBottom;
+ ? HeapType::kBottom
+ : heap2.representation();
}
+ case HeapType::kNoFunc:
+ return (heap2 == HeapType::kNoFunc || heap2 == HeapType::kFunc ||
+ (heap2.is_index() && module2->has_signature(heap2.ref_index())))
+ ? heap2.representation()
+ : HeapType::kBottom;
case HeapType::kNoExtern:
- return heap2.representation() == HeapType::kExtern ? HeapType::kExtern
- : HeapType::kNoExtern;
+ return heap2 == HeapType::kExtern || heap2 == HeapType::kNoExtern
+ ? heap2.representation()
+ : HeapType::kBottom;
case HeapType::kExtern:
- return HeapType::kExtern;
- case HeapType::kString:
+ return heap2 == HeapType::kExtern || heap2 == HeapType::kNoExtern
+ ? HeapType::kExtern
+ : HeapType::kBottom;
+ case HeapType::kString: {
+ switch (heap2.representation()) {
+ case HeapType::kI31:
+ case HeapType::kEq:
+ case HeapType::kStruct:
+ case HeapType::kArray:
+ case HeapType::kAny:
+ return HeapType::kAny;
+ case HeapType::kNone:
+ case HeapType::kString:
+ return HeapType::kString;
+ case HeapType::kFunc:
+ case HeapType::kExtern:
+ case HeapType::kNoExtern:
+ case HeapType::kNoFunc:
+ case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf8:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kBottom:
+ return HeapType::kBottom;
+ default:
+ return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
+ : HeapType::kAny;
+ }
+ }
case HeapType::kStringViewIter:
+ case HeapType::kStringViewWtf16:
+ case HeapType::kStringViewWtf8:
return heap1 == heap2 ? heap1.representation() : HeapType::kBottom;
default:
UNREACHABLE();
@@ -523,21 +600,23 @@ V8_EXPORT_PRIVATE TypeInModule Union(ValueType type1, ValueType type2,
if (heap1 == heap2 && module1 == module2) {
return {ValueType::RefMaybeNull(heap1, nullability), module1};
}
+ HeapType::Representation result_repr;
+ const WasmModule* result_module;
if (heap1.is_generic()) {
- return {ValueType::RefMaybeNull(
- CommonAncestorWithGeneric(heap1, heap2, module2), nullability),
- module1};
+ result_repr = CommonAncestorWithGeneric(heap1, heap2, module2);
+ result_module = module2;
} else if (heap2.is_generic()) {
- return {ValueType::RefMaybeNull(
- CommonAncestorWithGeneric(heap2, heap1, module1), nullability),
- module1};
+ result_repr = CommonAncestorWithGeneric(heap2, heap1, module1);
+ result_module = module1;
} else {
- return {ValueType::RefMaybeNull(
- CommonAncestor(heap1.ref_index(), heap2.ref_index(), module1,
- module2),
- nullability),
- module1};
+ result_repr =
+ CommonAncestor(heap1.ref_index(), heap2.ref_index(), module1, module2);
+ result_module = module1;
}
+ return {result_repr == HeapType::kBottom
+ ? kWasmBottom
+ : ValueType::RefMaybeNull(result_repr, nullability),
+ result_module};
}
TypeInModule Intersection(ValueType type1, ValueType type2,
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 07630118c2..b247a8ddf2 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -48,20 +48,18 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool EquivalentTypes(ValueType type1,
// - rtt1 <: rtt2 iff rtt1 ~ rtt2.
// For heap types, the following subtyping rules hold:
// - The abstract heap types form the following type hierarchies:
-// TODO(7748): abstract ref.data should become ref.struct.
//
-// any func extern
-// | | |
-// eq nofunc noextern
-// / \
-// i31 data
-// | |
-// | array
-// \ /
-// none
+// any func extern
+// / \ | |
+// eq \ nofunc noextern
+// / | \ \
+// i31 array struct string
+// \___|______|_____/
+// |
+// none
//
// - All functions are subtypes of func.
-// - All structs are subtypes of data.
+// - All structs are subtypes of struct.
// - All arrays are subtypes of array.
// - An indexed heap type h1 is a subtype of indexed heap type h2 if h2 is
// transitively an explicit canonical supertype of h1.
@@ -82,6 +80,13 @@ V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
return IsSubtypeOfImpl(subtype, supertype, module, module);
}
+V8_INLINE bool TypesUnrelated(ValueType type1, ValueType type2,
+ const WasmModule* module1,
+ const WasmModule* module2) {
+ return !IsSubtypeOf(type1, type2, module1, module2) &&
+ !IsSubtypeOf(type2, type1, module2, module1);
+}
+
V8_INLINE bool IsHeapSubtypeOf(HeapType subtype, HeapType supertype,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -142,6 +147,7 @@ inline std::ostream& operator<<(std::ostream& oss, TypeInModule type) {
<< reinterpret_cast<intptr_t>(type.module);
}
+// Returns {kWasmBottom} if the union of {type1} and {type2} is not defined.
V8_EXPORT_PRIVATE TypeInModule Union(ValueType type1, ValueType type2,
const WasmModule* module1,
const WasmModule* module2);
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index 51c0adedde..afd106b017 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -37,13 +37,13 @@ inline const char* ExecutionTierToString(ExecutionTier tier) {
// the code also contains breakpoints, and {kForStepping} for code that is
// flooded with breakpoints.
enum ForDebugging : int8_t {
- kNoDebugging = 0,
+ kNotForDebugging = 0,
kForDebugging,
kWithBreakpoints,
kForStepping
};
-enum TieringState : int8_t { kTieredUp, kTieredDown };
+enum DebugState : bool { kNotDebugging = false, kDebugging = true };
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 443784b3b2..5878732d0b 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -113,7 +113,8 @@ class WasmValue {
FOREACH_PRIMITIVE_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS
- WasmValue(byte* raw_bytes, ValueType type) : type_(type), bit_pattern_{} {
+ WasmValue(const uint8_t* raw_bytes, ValueType type)
+ : type_(type), bit_pattern_{} {
DCHECK(type_.is_numeric());
memcpy(bit_pattern_, raw_bytes, type.value_kind_size());
}
diff --git a/deps/v8/src/wasm/well-known-imports.cc b/deps/v8/src/wasm/well-known-imports.cc
new file mode 100644
index 0000000000..beeb6b54aa
--- /dev/null
+++ b/deps/v8/src/wasm/well-known-imports.cc
@@ -0,0 +1,64 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/well-known-imports.h"
+
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8::internal::wasm {
+
+const char* WellKnownImportName(WellKnownImport wki) {
+ switch (wki) {
+ case WellKnownImport::kUninstantiated:
+ return "uninstantiated";
+ case WellKnownImport::kGeneric:
+ return "generic";
+ case WellKnownImport::kStringToLowerCaseStringref:
+ return "String.toLowerCase";
+ }
+}
+
+WellKnownImportsList::UpdateResult WellKnownImportsList::Update(
+ base::Vector<WellKnownImport> entries) {
+ DCHECK_EQ(entries.size(), static_cast<size_t>(size_));
+ {
+ base::MutexGuard lock(&mutex_);
+ for (size_t i = 0; i < entries.size(); i++) {
+ WellKnownImport entry = entries[i];
+ DCHECK(entry != WellKnownImport::kUninstantiated);
+ WellKnownImport old = statuses_[i].load(std::memory_order_relaxed);
+ if (old == WellKnownImport::kGeneric) continue;
+ if (old == entry) continue;
+ if (old == WellKnownImport::kUninstantiated) {
+ statuses_[i].store(entry, std::memory_order_relaxed);
+ } else {
+ // To avoid having to clear Turbofan code multiple times, we give up
+ // entirely once the first problem occurs.
+ // This is a heuristic; we could also choose to make finer-grained
+ // decisions and only set {statuses_[i] = kGeneric}. We expect that
+ // this case won't ever happen for production modules, so guarding
+ // against pathological cases seems more important than being lenient
+ // towards almost-well-behaved modules.
+ for (size_t j = 0; j < entries.size(); j++) {
+ statuses_[j].store(WellKnownImport::kGeneric,
+ std::memory_order_relaxed);
+ }
+ return UpdateResult::kFoundIncompatibility;
+ }
+ }
+ }
+ return UpdateResult::kOK;
+}
+
+void WellKnownImportsList::Initialize(
+ base::Vector<const WellKnownImport> entries) {
+ DCHECK_EQ(entries.size(), static_cast<size_t>(size_));
+ for (size_t i = 0; i < entries.size(); i++) {
+ DCHECK_EQ(WellKnownImport::kUninstantiated,
+ statuses_[i].load(std::memory_order_relaxed));
+ statuses_[i].store(entries[i], std::memory_order_relaxed);
+ }
+}
+
+} // namespace v8::internal::wasm
diff --git a/deps/v8/src/wasm/well-known-imports.h b/deps/v8/src/wasm/well-known-imports.h
new file mode 100644
index 0000000000..11c7b46580
--- /dev/null
+++ b/deps/v8/src/wasm/well-known-imports.h
@@ -0,0 +1,82 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_WELL_KNOWN_IMPORTS_H_
+#define V8_WASM_WELL_KNOWN_IMPORTS_H_
+
+#include <memory>
+
+#include "src/base/atomicops.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/vector.h"
+#include "src/common/globals.h"
+
+namespace v8::internal::wasm {
+
+enum class WellKnownImport : uint8_t {
+ kUninstantiated,
+ kGeneric,
+ kStringToLowerCaseStringref,
+};
+
+class NativeModule;
+
+// For debugging/tracing.
+const char* WellKnownImportName(WellKnownImport wki);
+
+class WellKnownImportsList {
+ public:
+ enum class UpdateResult : bool { kFoundIncompatibility, kOK };
+
+ WellKnownImportsList() = default;
+
+ // Regular initialization. Allocates size-dependent internal data.
+ void Initialize(int size) {
+#if DEBUG
+ DCHECK_EQ(-1, size_);
+ size_ = size;
+#endif
+ static_assert(static_cast<int>(WellKnownImport::kUninstantiated) == 0);
+ statuses_ = std::make_unique<std::atomic<WellKnownImport>[]>(size);
+#if !defined(__cpp_lib_atomic_value_initialization) || \
+ __cpp_lib_atomic_value_initialization < 201911L
+ for (int i = 0; i < size; i++) {
+ std::atomic_init(&statuses_.get()[i], WellKnownImport::kUninstantiated);
+ }
+#endif
+ }
+
+ // Intended for deserialization. Does not check consistency with code.
+ void Initialize(base::Vector<const WellKnownImport> entries);
+
+ WellKnownImport get(int index) const {
+ DCHECK_LT(index, size_);
+ return statuses_[index].load(std::memory_order_relaxed);
+ }
+
+ V8_WARN_UNUSED_RESULT UpdateResult
+ Update(base::Vector<WellKnownImport> entries);
+
+ // If you need this mutex and the NativeModule's allocation_mutex_, always
+ // get the latter first.
+ base::Mutex* mutex() { return &mutex_; }
+
+ private:
+ // This mutex guards {statuses_}, for operations that need to ensure that
+ // they see a consistent view of {statutes_} for some period of time.
+ base::Mutex mutex_;
+ std::unique_ptr<std::atomic<WellKnownImport>[]> statuses_;
+
+#if DEBUG
+ int size_{-1};
+#endif
+};
+
+} // namespace v8::internal::wasm
+
+#endif // V8_WASM_WELL_KNOWN_IMPORTS_H_
diff --git a/deps/v8/src/web-snapshot/OWNERS b/deps/v8/src/web-snapshot/OWNERS
deleted file mode 100644
index 5e08666da6..0000000000
--- a/deps/v8/src/web-snapshot/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-cbruni@chromium.org
-marja@chromium.org
-leszeks@chromium.org
-syg@chromium.org
-verwaest@chromium.org
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
deleted file mode 100644
index e686cec30f..0000000000
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ /dev/null
@@ -1,4289 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/web-snapshot/web-snapshot.h"
-
-#include <limits>
-
-#include "include/v8-isolate.h"
-#include "include/v8-local-handle.h"
-#include "include/v8-object.h"
-#include "include/v8-primitive.h"
-#include "include/v8-script.h"
-#include "src/api/api-inl.h"
-#include "src/handles/handles.h"
-#include "src/logging/runtime-call-stats-scope.h"
-#include "src/objects/bigint.h"
-#include "src/objects/contexts-inl.h"
-#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/script.h"
-
-namespace v8 {
-namespace internal {
-
-constexpr uint8_t WebSnapshotSerializerDeserializer::kMagicNumber[4];
-constexpr int WebSnapshotSerializerDeserializer::kBuiltinObjectCount;
-
-// When encountering an error during deserializing, we note down the error but
-// don't bail out from processing the snapshot further. This is to speed up
-// deserialization; the error case is now slower since we don't bail out, but
-// the non-error case is faster, since we don't repeatedly check for errors.
-// (Invariant: we might fill our internal data structures with arbitrary data,
-// but it shouldn't have an observable effect.)
-
-// This doesn't increase the complexity of processing the data in a robust and
-// secure way. We cannot trust the data anyway, so every upcoming byte can have
-// an arbitrary value, not depending on whether or not we've encountered an
-// error before.
-void WebSnapshotSerializerDeserializer::Throw(const char* message) {
- if (error_message_ != nullptr) {
- return;
- }
- error_message_ = message;
- if (!isolate_->has_pending_exception()) {
- isolate_->Throw(*factory()->NewError(
- MessageTemplate::kWebSnapshotError,
- factory()->NewStringFromAsciiChecked(error_message_)));
- }
-}
-
-void WebSnapshotSerializerDeserializer::IterateBuiltinObjects(
- std::function<void(Handle<String>, Handle<HeapObject>)> func) {
- // TODO(v8:11525): Add more builtins.
- auto roots = ReadOnlyRoots(isolate_);
-
- func(handle(roots.Error_string(), isolate_),
- handle(isolate_->context().error_function(), isolate_));
-
- func(factory()->NewStringFromAsciiChecked("Error.prototype"),
- handle(isolate_->context().error_function().instance_prototype(),
- isolate_));
-
- func(handle(roots.Object_string(), isolate_),
- handle(isolate_->context().object_function(), isolate_));
-
- func(factory()->NewStringFromAsciiChecked("Object.prototype"),
- handle(isolate_->context().initial_object_prototype(), isolate_));
-
- func(handle(roots.Function_string(), isolate_),
- handle(isolate_->context().function_function(), isolate_));
-
- func(factory()->NewStringFromAsciiChecked("Function.prototype"),
- handle(isolate_->context().function_prototype(), isolate_));
-
- // TODO(v8:11525): There are no obvious names for these, since AsyncFunction
- // etc are not properties of the global object.
-
- func(factory()->NewStringFromAsciiChecked("AsyncFunction"),
- handle(isolate_->context().async_function_constructor(), isolate_));
-
- func(
- factory()->NewStringFromAsciiChecked("AsyncFunction"),
- handle(
- isolate_->context().async_function_constructor().instance_prototype(),
- isolate_));
-
- auto generator_function =
- handle(JSFunction::cast(isolate_->context()
- .generator_function_map()
- .constructor_or_back_pointer()),
- isolate_);
- func(factory()->NewStringFromAsciiChecked("GeneratorFunction"),
- generator_function);
-
- func(factory()->NewStringFromAsciiChecked("GeneratorFunction.prototype"),
- handle(generator_function->instance_prototype(), isolate_));
-
- auto async_generator_function =
- handle(JSFunction::cast(isolate_->context()
- .async_generator_function_map()
- .constructor_or_back_pointer()),
- isolate_);
- func(factory()->NewStringFromAsciiChecked("AsyncGeneratorFunction"),
- async_generator_function);
-
- func(factory()->NewStringFromAsciiChecked("AsyncGeneratorFunction.prototype"),
- handle(async_generator_function->instance_prototype(), isolate_));
-
- static_assert(kBuiltinObjectCount == 12);
-}
-
-uint8_t WebSnapshotSerializerDeserializer::FunctionKindToFunctionFlags(
- FunctionKind kind) {
- // TODO(v8:11525): Support more function kinds.
- switch (kind) {
- case FunctionKind::kNormalFunction:
- case FunctionKind::kArrowFunction:
- case FunctionKind::kGeneratorFunction:
- case FunctionKind::kAsyncFunction:
- case FunctionKind::kAsyncArrowFunction:
- case FunctionKind::kAsyncGeneratorFunction:
- case FunctionKind::kBaseConstructor:
- case FunctionKind::kDefaultBaseConstructor:
- case FunctionKind::kDerivedConstructor:
- case FunctionKind::kDefaultDerivedConstructor:
- case FunctionKind::kConciseMethod:
- case FunctionKind::kAsyncConciseMethod:
- case FunctionKind::kStaticConciseMethod:
- case FunctionKind::kStaticAsyncConciseMethod:
- case FunctionKind::kStaticConciseGeneratorMethod:
- case FunctionKind::kStaticAsyncConciseGeneratorMethod:
- break;
- default:
- Throw("Unsupported function kind");
- }
- auto flags = AsyncFunctionBitField::encode(IsAsyncFunction(kind)) |
- GeneratorFunctionBitField::encode(IsGeneratorFunction(kind)) |
- ArrowFunctionBitField::encode(IsArrowFunction(kind)) |
- MethodBitField::encode(IsConciseMethod(kind)) |
- StaticBitField::encode(IsStatic(kind)) |
- ClassConstructorBitField::encode(IsClassConstructor(kind)) |
- DefaultConstructorBitField::encode(IsDefaultConstructor(kind)) |
- DerivedConstructorBitField::encode(IsDerivedConstructor(kind));
- return flags;
-}
-
-// TODO(v8:11525): Optionally, use an enum instead.
-FunctionKind WebSnapshotSerializerDeserializer::FunctionFlagsToFunctionKind(
- uint8_t flags) {
- FunctionKind kind;
- if (IsFunctionOrMethod(flags)) {
- if (ArrowFunctionBitField::decode(flags) && MethodBitField::decode(flags)) {
- kind = FunctionKind::kInvalid;
- } else {
- uint32_t index = AsyncFunctionBitField::decode(flags) << 0 |
- GeneratorFunctionBitField::decode(flags) << 1 |
- (ArrowFunctionBitField::decode(flags) ||
- StaticBitField::decode(flags))
- << 2 |
- MethodBitField::decode(flags) << 3;
- static const FunctionKind kFunctionKinds[] = {
- // kNormalFunction
- // is_generator = false
- FunctionKind::kNormalFunction, // is_async = false
- FunctionKind::kAsyncFunction, // is_async = true
- // is_generator = true
- FunctionKind::kGeneratorFunction, // is_async = false
- FunctionKind::kAsyncGeneratorFunction, // is_async = true
-
- // kArrowFunction
- // is_generator = false
- FunctionKind::kArrowFunction, // is_async = false
- FunctionKind::kAsyncArrowFunction, // is_async = true
- // is_generator = true
- FunctionKind::kInvalid, // is_async = false
- FunctionKind::kInvalid, // is_async = true
-
- // kNonStaticMethod
- // is_generator = false
- FunctionKind::kConciseMethod, // is_async = false
- FunctionKind::kAsyncConciseMethod, // is_async = true
- // is_generator = true
- // TODO(v8::11525) Support FunctionKind::kConciseGeneratorMethod.
- FunctionKind::kInvalid, // is_async = false
- // TODO(v8::11525) Support FunctionKind::kAsyncConciseGeneratorMethod.
- FunctionKind::kInvalid, // is_async = true
-
- // kStaticMethod
- // is_generator = false
- FunctionKind::kStaticConciseMethod, // is_async = false
- FunctionKind::kStaticAsyncConciseMethod, // is_async = true
- // is_generator = true
- FunctionKind::kStaticConciseGeneratorMethod, // is_async = false
- FunctionKind::kStaticAsyncConciseGeneratorMethod // is_async = true
- };
- kind = kFunctionKinds[index];
- }
- } else if (IsConstructor(flags)) {
- static const FunctionKind kFunctionKinds[] = {
- // is_derived = false
- FunctionKind::kBaseConstructor, // is_default = false
- FunctionKind::kDefaultBaseConstructor, // is_default = true
- // is_derived = true
- FunctionKind::kDerivedConstructor, // is_default = false
- FunctionKind::kDefaultDerivedConstructor // is_default = true
- };
- kind = kFunctionKinds[flags >> DefaultConstructorBitField::kShift];
- } else {
- kind = FunctionKind::kInvalid;
- }
- if (kind == FunctionKind::kInvalid) {
- Throw("Invalid function flags\n");
- }
- return kind;
-}
-
-bool WebSnapshotSerializerDeserializer::IsFunctionOrMethod(uint8_t flags) {
- uint32_t mask = AsyncFunctionBitField::kMask |
- GeneratorFunctionBitField::kMask |
- ArrowFunctionBitField::kMask | MethodBitField::kMask |
- StaticBitField::kMask;
- return (flags & mask) == flags;
-}
-
-bool WebSnapshotSerializerDeserializer::IsConstructor(uint8_t flags) {
- uint32_t mask = ClassConstructorBitField::kMask |
- DefaultConstructorBitField::kMask |
- DerivedConstructorBitField::kMask;
- return ClassConstructorBitField::decode(flags) && (flags & mask) == flags;
-}
-
-uint8_t WebSnapshotSerializerDeserializer::GetDefaultAttributeFlags() {
- auto flags = ReadOnlyBitField::encode(false) |
- ConfigurableBitField::encode(true) |
- EnumerableBitField::encode(true);
- return flags;
-}
-
-uint8_t WebSnapshotSerializerDeserializer::AttributesToFlags(
- PropertyDetails details) {
- auto flags = ReadOnlyBitField::encode(details.IsReadOnly()) |
- ConfigurableBitField::encode(details.IsConfigurable()) |
- EnumerableBitField::encode(details.IsEnumerable());
- return flags;
-}
-
-PropertyAttributes WebSnapshotSerializerDeserializer::FlagsToAttributes(
- uint8_t flags) {
- int attributes = ReadOnlyBitField::decode(flags) * READ_ONLY +
- !ConfigurableBitField::decode(flags) * DONT_DELETE +
- !EnumerableBitField::decode(flags) * DONT_ENUM;
- return PropertyAttributesFromInt(attributes);
-}
-
-WebSnapshotSerializer::WebSnapshotSerializer(v8::Isolate* isolate)
- : WebSnapshotSerializer(reinterpret_cast<v8::internal::Isolate*>(isolate)) {
-}
-
-WebSnapshotSerializer::WebSnapshotSerializer(Isolate* isolate)
- : WebSnapshotSerializerDeserializer(isolate),
- string_serializer_(isolate_, nullptr),
- symbol_serializer_(isolate_, nullptr),
- bigint_serializer_(isolate_, nullptr),
- map_serializer_(isolate_, nullptr),
- builtin_object_serializer_(isolate_, nullptr),
- context_serializer_(isolate_, nullptr),
- function_serializer_(isolate_, nullptr),
- class_serializer_(isolate_, nullptr),
- array_serializer_(isolate_, nullptr),
- typed_array_serializer_(isolate_, nullptr),
- array_buffer_serializer_(isolate_, nullptr),
- data_view_serializer_(isolate_, nullptr),
- object_serializer_(isolate_, nullptr),
- export_serializer_(isolate_, nullptr),
- external_object_ids_(isolate_->heap()),
- string_ids_(isolate_->heap()),
- symbol_ids_(isolate_->heap()),
- bigint_ids_(isolate_->heap()),
- map_ids_(isolate_->heap()),
- context_ids_(isolate_->heap()),
- function_ids_(isolate_->heap()),
- class_ids_(isolate_->heap()),
- array_ids_(isolate_->heap()),
- typed_array_ids_(isolate->heap()),
- array_buffer_ids_(isolate->heap()),
- data_view_ids_(isolate->heap()),
- object_ids_(isolate_->heap()),
- builtin_object_to_name_(isolate_->heap()),
- builtin_object_ids_(isolate_->heap()),
- all_strings_(isolate_->heap()) {
- auto empty_array_list = factory()->empty_array_list();
- strings_ = empty_array_list;
- symbols_ = empty_array_list;
- bigints_ = empty_array_list;
- maps_ = empty_array_list;
- contexts_ = empty_array_list;
- functions_ = empty_array_list;
- classes_ = empty_array_list;
- arrays_ = empty_array_list;
- array_buffers_ = empty_array_list;
- typed_arrays_ = empty_array_list;
- data_views_ = empty_array_list;
- objects_ = empty_array_list;
-}
-
-WebSnapshotSerializer::~WebSnapshotSerializer() {}
-
-bool WebSnapshotSerializer::TakeSnapshot(
- Handle<Object> object, MaybeHandle<FixedArray> maybe_externals,
- WebSnapshotData& data_out) {
- if (string_ids_.size() > 0) {
- Throw("Can't reuse WebSnapshotSerializer");
- return false;
- }
- if (!maybe_externals.is_null()) {
- ShallowDiscoverExternals(*maybe_externals.ToHandleChecked());
- }
-
- v8::Local<v8::Context> context =
- reinterpret_cast<v8::Isolate*>(isolate_)->GetCurrentContext();
- ShallowDiscoverBuiltinObjects(context);
-
- if (object->IsHeapObject()) Discover(Handle<HeapObject>::cast(object));
-
- ConstructSource();
- // The export is serialized with the empty string as name; we need to
- // "discover" the name here.
- DiscoverString(factory()->empty_string());
- SerializeExport(object, factory()->empty_string());
-
- WriteSnapshot(data_out.buffer, data_out.buffer_size);
-
- if (has_error()) {
- isolate_->ReportPendingMessages();
- return false;
- }
- return true;
-}
-
-bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
- v8::Local<v8::PrimitiveArray> exports,
- WebSnapshotData& data_out) {
- if (string_ids_.size() > 0) {
- Throw("Can't reuse WebSnapshotSerializer");
- return false;
- }
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
-
- ShallowDiscoverBuiltinObjects(context);
-
- Handle<FixedArray> export_objects =
- isolate_->factory()->NewFixedArray(exports->Length());
- for (int i = 0, length = exports->Length(); i < length; ++i) {
- v8::Local<v8::String> str =
- exports->Get(v8_isolate, i)->ToString(context).ToLocalChecked();
- if (str->Length() == 0) {
- continue;
- }
- // Discover the export name.
- DiscoverString(Handle<String>::cast(Utils::OpenHandle(*str)));
- v8::ScriptCompiler::Source source(str);
- auto script = ScriptCompiler::Compile(context, &source).ToLocalChecked();
- v8::MaybeLocal<v8::Value> script_result = script->Run(context);
- v8::Local<v8::Object> v8_object;
- if (script_result.IsEmpty() ||
- !script_result.ToLocalChecked()->ToObject(context).ToLocal(
- &v8_object)) {
- Throw("Exported object not found");
- return false;
- }
- auto object = Handle<JSObject>::cast(Utils::OpenHandle(*v8_object));
- export_objects->set(i, *object);
- Discover(object);
- // The error messages will be confusing if we continue running code when
- // already in the error state.
- if (has_error()) {
- isolate_->ReportPendingMessages();
- return false;
- }
- }
-
- ConstructSource();
-
- for (int i = 0, length = exports->Length(); i < length; ++i) {
- v8::Local<v8::String> str =
- exports->Get(v8_isolate, i)->ToString(context).ToLocalChecked();
- if (str->Length() == 0) {
- continue;
- }
- SerializeExport(handle(export_objects->get(i), isolate_),
- Handle<String>::cast(Utils::OpenHandle(*str)));
- }
-
- WriteSnapshot(data_out.buffer, data_out.buffer_size);
-
- if (has_error()) {
- isolate_->ReportPendingMessages();
- return false;
- }
- return true;
-}
-
-void WebSnapshotSerializer::SerializePendingItems() {
- // The information about string reference counts is now complete. The strings
- // in strings_ are not in place and can be serialized now. The in-place
- // strings will be serialized as part of their respective objects.
- for (int i = 0; i < strings_->Length(); ++i) {
- Handle<String> string = handle(String::cast(strings_->Get(i)), isolate_);
- SerializeString(string, string_serializer_);
- }
-
- for (int i = 0; i < symbols_->Length(); ++i) {
- Handle<Symbol> symbol = handle(Symbol::cast(symbols_->Get(i)), isolate_);
- SerializeSymbol(symbol);
- }
-
- for (int i = 0; i < bigints_->Length(); ++i) {
- Handle<BigInt> bigint = handle(BigInt::cast(bigints_->Get(i)), isolate_);
- SerializeBigInt(bigint);
- }
-
- for (int i = 0; i < maps_->Length(); ++i) {
- Handle<Map> map = handle(Map::cast(maps_->Get(i)), isolate_);
- SerializeMap(map);
- }
- for (auto name_id : builtin_objects_) {
- SerializeBuiltinObject(name_id);
- }
-
- for (int i = 0; i < contexts_->Length(); ++i) {
- Handle<Context> context =
- handle(Context::cast(contexts_->Get(i)), isolate_);
- SerializeContext(context, static_cast<uint32_t>(i));
- }
-
- // Serialize the items in the reverse order. The items at the end of the
- // functions_ etc get lower IDs and vice versa. IDs which items use for
- // referring to each other are reversed by Get<item>Id() functions.
- for (int i = functions_->Length() - 1; i >= 0; --i) {
- Handle<JSFunction> function =
- handle(JSFunction::cast(functions_->Get(i)), isolate_);
- SerializeFunction(function);
- }
- for (int i = classes_->Length() - 1; i >= 0; --i) {
- Handle<JSFunction> function =
- handle(JSFunction::cast(classes_->Get(i)), isolate_);
- SerializeClass(function);
- }
- for (int i = arrays_->Length() - 1; i >= 0; --i) {
- Handle<JSArray> array = handle(JSArray::cast(arrays_->Get(i)), isolate_);
- SerializeArray(array);
- }
- for (int i = array_buffers_->Length() - 1; i >= 0; --i) {
- Handle<JSArrayBuffer> array_buffer =
- handle(JSArrayBuffer::cast(array_buffers_->Get(i)), isolate_);
- SerializeArrayBuffer(array_buffer);
- }
- for (int i = typed_arrays_->Length() - 1; i >= 0; --i) {
- Handle<JSTypedArray> typed_array =
- handle(JSTypedArray::cast(typed_arrays_->Get(i)), isolate_);
- SerializeTypedArray(typed_array);
- }
- for (int i = data_views_->Length() - 1; i >= 0; --i) {
- Handle<JSDataView> data_view =
- handle(JSDataView::cast(data_views_->Get(i)), isolate_);
- SerializeDataView(data_view);
- }
- for (int i = objects_->Length() - 1; i >= 0; --i) {
- Handle<JSObject> object =
- handle(JSObject::cast(objects_->Get(i)), isolate_);
- SerializeObject(object);
- }
-}
-
-// Format (full snapshot):
-// - Magic number (4 bytes)
-// - String count
-// - For each string:
-// - Serialized string
-// - Symbol count
-// - For each symbol:
-// - Serialized symbol
-// - Builtin object count
-// - For each builtin object:
-// - Id of the builtin object name string
-// - Shape count
-// - For each shape:
-// - Serialized shape
-// - Context count
-// - For each context:
-// - Serialized context
-// - Function count
-// - For each function:
-// - Serialized function
-// - Object count
-// - For each object:
-// - Serialized object
-// - Export count
-// - For each export:
-// - Serialized export
-void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
- size_t& buffer_size) {
- if (has_error()) {
- return;
- }
- SerializePendingItems();
-
- ValueSerializer total_serializer(isolate_, nullptr);
- size_t needed_size =
- sizeof(kMagicNumber) + string_serializer_.buffer_size_ +
- symbol_serializer_.buffer_size_ + bigint_serializer_.buffer_size_ +
- builtin_object_serializer_.buffer_size_ + map_serializer_.buffer_size_ +
- context_serializer_.buffer_size_ + function_serializer_.buffer_size_ +
- class_serializer_.buffer_size_ + array_serializer_.buffer_size_ +
- array_buffer_serializer_.buffer_size_ +
- typed_array_serializer_.buffer_size_ +
- data_view_serializer_.buffer_size_ + object_serializer_.buffer_size_ +
- export_serializer_.buffer_size_ + 14 * sizeof(uint32_t);
- if (total_serializer.ExpandBuffer(needed_size).IsNothing()) {
- Throw("Out of memory");
- return;
- }
-
- total_serializer.WriteRawBytes(kMagicNumber, 4);
- WriteObjects(total_serializer, string_count(), string_serializer_, "strings");
- WriteObjects(total_serializer, symbol_count(), symbol_serializer_, "symbols");
- WriteObjects(total_serializer, bigint_count(), bigint_serializer_, "bigints");
- WriteObjects(total_serializer, builtin_object_count(),
- builtin_object_serializer_, "builtin_objects");
- WriteObjects(total_serializer, map_count(), map_serializer_, "maps");
- WriteObjects(total_serializer, context_count(), context_serializer_,
- "contexts");
- WriteObjects(total_serializer, function_count(), function_serializer_,
- "functions");
- WriteObjects(total_serializer, array_count(), array_serializer_, "arrays");
- WriteObjects(total_serializer, array_buffer_count(), array_buffer_serializer_,
- "array buffers");
- WriteObjects(total_serializer, typed_array_count(), typed_array_serializer_,
- "typed arrays");
- WriteObjects(total_serializer, data_view_count(), data_view_serializer_,
- "data views");
- WriteObjects(total_serializer, object_count(), object_serializer_, "objects");
- WriteObjects(total_serializer, class_count(), class_serializer_, "classes");
- WriteObjects(total_serializer, export_count_, export_serializer_, "exports");
-
- if (has_error()) {
- return;
- }
-
- auto result = total_serializer.Release();
- buffer = result.first;
- buffer_size = result.second;
-}
-void WebSnapshotSerializer::WriteObjects(ValueSerializer& destination,
- size_t count, ValueSerializer& source,
- const char* name) {
- if (count > std::numeric_limits<uint32_t>::max()) {
- Throw("Too many objects");
- return;
- }
- destination.WriteUint32(static_cast<uint32_t>(count));
- destination.WriteRawBytes(source.buffer_, source.buffer_size_);
-}
-
-bool WebSnapshotSerializer::InsertIntoIndexMap(ObjectCacheIndexMap& map,
- HeapObject heap_object,
- uint32_t& id) {
- DisallowGarbageCollection no_gc;
- int index_out;
- bool found = map.LookupOrInsert(heap_object, &index_out);
- id = static_cast<uint32_t>(index_out);
- return found;
-}
-
-// Format:
-// - Length
-// - Raw bytes (data)
-void WebSnapshotSerializer::SerializeString(Handle<String> string,
- ValueSerializer& serializer) {
- DisallowGarbageCollection no_gc;
- String::FlatContent flat = string->GetFlatContent(no_gc);
- DCHECK(flat.IsFlat());
- if (flat.IsOneByte()) {
- base::Vector<const uint8_t> chars = flat.ToOneByteVector();
- serializer.WriteUint32(chars.length());
- serializer.WriteRawBytes(chars.begin(), chars.length() * sizeof(uint8_t));
- } else if (flat.IsTwoByte()) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- v8::Local<v8::String> api_string = Utils::ToLocal(string);
- int length = api_string->Utf8Length(v8_isolate);
- std::unique_ptr<char[]> buffer(new char[length]);
- api_string->WriteUtf8(v8_isolate, buffer.get(), length);
- serializer.WriteUint32(length);
- serializer.WriteRawBytes(buffer.get(), length * sizeof(uint8_t));
- } else {
- UNREACHABLE();
- }
-}
-
-// Format (serialized symbol):
-// - 0 if the symbol is non-global and there's no description, 1 if the symbol
-// is non-global and there is a description, 2 if the symbol is global (there
-// must be a description).
-void WebSnapshotSerializer::SerializeSymbol(Handle<Symbol> symbol) {
- if (symbol->description().IsUndefined()) {
- CHECK(!symbol->is_in_public_symbol_table());
- symbol_serializer_.WriteUint32(SymbolType::kNonGlobalNoDesription);
- } else {
- symbol_serializer_.WriteUint32(symbol->is_in_public_symbol_table()
- ? SymbolType::kGlobal
- : SymbolType::kNonGlobal);
- WriteStringId(handle(String::cast(symbol->description()), isolate_),
- symbol_serializer_);
- }
-}
-
-// Format (serialized bigint)
-// - BigIntFlags, including sign and byte length.
-// - digit bytes.
-void WebSnapshotSerializer::SerializeBigInt(Handle<BigInt> bigint) {
- uint32_t flags = BigIntSignAndLengthToFlags(bigint);
- bigint_serializer_.WriteUint32(flags);
- int byte_length = BigIntLengthBitField::decode(flags);
- uint8_t* dest;
- if (bigint_serializer_.ReserveRawBytes(byte_length).To(&dest)) {
- bigint->SerializeDigits(dest);
- } else {
- Throw("Serialize BigInt failed");
- return;
- }
-}
-
-bool WebSnapshotSerializer::ShouldBeSerialized(Handle<Name> key) {
- // Don't serialize class_positions_symbol property in Class.
- if (key->Equals(*factory()->class_positions_symbol())) {
- return false;
- }
- return true;
-}
-
-// Format (serialized shape):
-// - PropertyAttributesType
-// - Property count
-// - For each property
-// - Name: STRING_ID + String id or SYMBOL_ID + Symbol id or in-place string
-// - If the PropertyAttributesType is CUSTOM: attributes
-// - __proto__: Serialized value
-void WebSnapshotSerializer::SerializeMap(Handle<Map> map) {
- DCHECK(!map->is_dictionary_map());
- int first_custom_index = -1;
- std::vector<Handle<Name>> keys;
- std::vector<uint8_t> attributes;
- keys.reserve(map->NumberOfOwnDescriptors());
- attributes.reserve(map->NumberOfOwnDescriptors());
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
-
- // If there are non-field properties in a map that doesn't allow them, i.e.,
- // a non-function map, DiscoverMap has already thrown.
- if (details.location() != PropertyLocation::kField) {
- continue;
- }
-
- Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
- isolate_);
- if (!ShouldBeSerialized(key)) {
- continue;
- }
- keys.push_back(key);
- if (first_custom_index >= 0 || details.IsReadOnly() ||
- !details.IsConfigurable() || details.IsDontEnum()) {
- if (first_custom_index == -1) first_custom_index = i.as_int();
- attributes.push_back(AttributesToFlags(details));
- }
- }
-
- map_serializer_.WriteUint32(first_custom_index == -1
- ? PropertyAttributesType::DEFAULT
- : PropertyAttributesType::CUSTOM);
-
- map_serializer_.WriteUint32(static_cast<uint32_t>(keys.size()));
-
- uint8_t default_flags = GetDefaultAttributeFlags();
- for (size_t i = 0; i < keys.size(); ++i) {
- if (keys[i]->IsString()) {
- WriteStringMaybeInPlace(Handle<String>::cast(keys[i]), map_serializer_);
- } else if (keys[i]->IsSymbol()) {
- map_serializer_.WriteByte(ValueType::SYMBOL_ID);
- map_serializer_.WriteUint32(GetSymbolId(Symbol::cast(*keys[i])));
- } else {
- // This error should've been recognized in the discovery phase.
- CHECK(false);
- }
- if (first_custom_index >= 0) {
- if (static_cast<int>(i) < first_custom_index) {
- map_serializer_.WriteByte(default_flags);
- } else {
- map_serializer_.WriteByte(attributes[i - first_custom_index]);
- }
- }
- }
-
- WriteValue(handle(map->prototype(), isolate_), map_serializer_);
-}
-
-void WebSnapshotSerializer::SerializeBuiltinObject(uint32_t name_id) {
- builtin_object_serializer_.WriteUint32(name_id);
-}
-
-// Construct the minimal source string to be included in the snapshot. Maintain
-// the "inner function is textually inside its outer function" relationship.
-// Example:
-// Input:
-// Full source: abcdefghijklmnopqrstuvwxyzåäö
-// Functions: 11111111 22222222 3
-// Inner functions: 44 55 666
-// Output:
-// Constructed source: defghijkstuvwxyzö
-// Functions: 11111111222222223
-// Inner functions 44 55 666
-void WebSnapshotSerializer::ConstructSource() {
- if (source_intervals_.empty()) {
- return;
- }
-
- Handle<String> source_string = factory()->empty_string();
- int current_interval_start = 0;
- int current_interval_end = 0;
- for (const auto& interval : source_intervals_) {
- DCHECK_LE(current_interval_start, interval.first); // Iterated in order.
- DCHECK_LE(interval.first, interval.second);
- if (interval.second <= current_interval_end) {
- // This interval is fully within the current interval. We don't need to
- // include any new source code, just record the position conversion.
- auto offset_within_parent = interval.first - current_interval_start;
- source_offset_to_compacted_source_offset_[interval.first] =
- source_offset_to_compacted_source_offset_[current_interval_start] +
- offset_within_parent;
- continue;
- }
- // Start a new interval.
- current_interval_start = interval.first;
- current_interval_end = interval.second;
- source_offset_to_compacted_source_offset_[current_interval_start] =
- source_string->length();
- MaybeHandle<String> new_source_string = factory()->NewConsString(
- source_string,
- factory()->NewSubString(full_source_, current_interval_start,
- current_interval_end));
- if (!new_source_string.ToHandle(&source_string)) {
- Throw("Cannot construct source string");
- return;
- }
- }
- DiscoverString(source_string);
- bool in_place = false;
- source_id_ = GetStringId(source_string, in_place);
- DCHECK(!in_place);
-}
-
-void WebSnapshotSerializer::SerializeFunctionProperties(
- Handle<JSFunction> function, ValueSerializer& serializer) {
- Handle<Map> map(function->map(), isolate_);
- if (function->map() ==
- isolate_->context().get(function->shared().function_map_index())) {
- serializer.WriteUint32(0);
- return;
- } else {
- serializer.WriteUint32(GetMapId(function->map()) + 1);
- }
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- if (details.location() == PropertyLocation::kDescriptor) {
- continue;
- }
- if (!ShouldBeSerialized(
- handle(map->instance_descriptors().GetKey(i), isolate_))) {
- continue;
- }
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value = JSObject::FastPropertyAt(
- isolate_, function, details.representation(), field_index);
- WriteValue(value, serializer);
- }
-}
-
-void WebSnapshotSerializer::SerializeFunctionInfo(Handle<JSFunction> function,
- ValueSerializer& serializer) {
- if (!function->shared().HasSourceCode()) {
- Throw("Function without source code");
- return;
- }
-
- {
- DisallowGarbageCollection no_gc;
- Context context = function->context();
- if (context.IsNativeContext() || context.IsScriptContext()) {
- serializer.WriteUint32(0);
- } else {
- DCHECK(context.IsFunctionContext() || context.IsBlockContext());
- uint32_t context_id = GetContextId(context);
- serializer.WriteUint32(context_id + 1);
- }
- }
-
- serializer.WriteUint32(source_id_);
- Handle<Script> script =
- handle(Script::cast(function->shared().script()), isolate_);
- int start = function->shared().StartPosition();
- int end = function->shared().EndPosition();
- int final_offset =
- source_offset_to_compacted_source_offset_[script_offsets_[script->id()] +
- start];
- serializer.WriteUint32(final_offset);
- serializer.WriteUint32(end - start);
-
- serializer.WriteUint32(
- function->shared().internal_formal_parameter_count_without_receiver());
- serializer.WriteByte(FunctionKindToFunctionFlags(function->shared().kind()));
-
- if (function->has_prototype_slot() && function->has_instance_prototype()) {
- DisallowGarbageCollection no_gc;
- JSObject prototype = JSObject::cast(function->instance_prototype());
- uint32_t prototype_id = GetObjectId(prototype);
- serializer.WriteUint32(prototype_id + 1);
- } else {
- serializer.WriteUint32(0);
- }
-}
-
-void WebSnapshotSerializer::ShallowDiscoverExternals(FixedArray externals) {
- DisallowGarbageCollection no_gc;
- for (int i = 0; i < externals.length(); i++) {
- Object object = externals.get(i);
- if (!object.IsHeapObject()) continue;
- uint32_t unused_id = 0;
- InsertIntoIndexMap(external_object_ids_, HeapObject::cast(object),
- unused_id);
- }
-}
-
-void WebSnapshotSerializer::ShallowDiscoverBuiltinObjects(
- v8::Local<v8::Context> context) {
- // Fill in builtin_object_to_name_. Don't discover them or their
- // names, so that they won't be included in the snapshot unless needed.
-
- builtin_object_name_strings_ =
- isolate_->factory()->NewFixedArray(kBuiltinObjectCount);
-
- int i = 0;
- IterateBuiltinObjects([&](Handle<String> name, Handle<HeapObject> object) {
- builtin_object_name_strings_->set(i, *name);
- uint32_t id;
- bool already_exists =
- InsertIntoIndexMap(builtin_object_to_name_, *object, id);
- CHECK(!already_exists);
- CHECK_EQ(static_cast<int>(id), i);
- ++i;
- });
- DCHECK_EQ(i, kBuiltinObjectCount);
-}
-
-void WebSnapshotSerializer::Discover(Handle<HeapObject> start_object) {
- // The object discovery phase assigns IDs for objects / functions / classes /
- // arrays and discovers outgoing references from them. This is needed so that
- // e.g., we know all functions upfront and can construct the source code that
- // covers them before serializing the functions.
-
- discovery_queue_.push(start_object);
-
- while (!discovery_queue_.empty()) {
- const Handle<HeapObject>& object = discovery_queue_.front();
- switch (object->map().instance_type()) {
- case JS_FUNCTION_TYPE:
- DiscoverFunction(Handle<JSFunction>::cast(object));
- break;
- case JS_CLASS_CONSTRUCTOR_TYPE:
- DiscoverClass(Handle<JSFunction>::cast(object));
- break;
- case JS_OBJECT_TYPE:
- case JS_OBJECT_PROTOTYPE_TYPE:
- DiscoverObject(Handle<JSObject>::cast(object));
- break;
- case JS_ARRAY_TYPE:
- DiscoverArray(Handle<JSArray>::cast(object));
- break;
- case SYMBOL_TYPE:
- DiscoverSymbol(Handle<Symbol>::cast(object));
- break;
- case BIGINT_TYPE:
- DiscoverBigInt(Handle<BigInt>::cast(object));
- break;
- case ODDBALL_TYPE:
- case HEAP_NUMBER_TYPE:
- // Can't contain references to other objects.
- break;
- case JS_PRIMITIVE_WRAPPER_TYPE: {
- Handle<JSPrimitiveWrapper> wrapper =
- Handle<JSPrimitiveWrapper>::cast(object);
- Handle<Object> value = handle(wrapper->value(), isolate_);
- if (value->IsHeapObject()) {
- discovery_queue_.push(Handle<HeapObject>::cast(value));
- }
- break;
- }
- case JS_REG_EXP_TYPE: {
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
- Handle<String> pattern = handle(regexp->source(), isolate_);
- DiscoverString(pattern);
- Handle<String> flags_string =
- JSRegExp::StringFromFlags(isolate_, regexp->flags());
- DiscoverString(flags_string);
- break;
- }
- case JS_ARRAY_BUFFER_TYPE: {
- Handle<JSArrayBuffer> array_buffer =
- Handle<JSArrayBuffer>::cast(object);
- DiscoverArrayBuffer(array_buffer);
- break;
- }
- case JS_TYPED_ARRAY_TYPE: {
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
- DiscoverTypedArray(typed_array);
- break;
- }
- case JS_DATA_VIEW_TYPE: {
- Handle<JSDataView> data_view = Handle<JSDataView>::cast(object);
- DiscoverDataView(data_view);
- break;
- }
- default:
- if (object->IsString()) {
- // These are array elements / object properties -> allow in place
- // strings.
- DiscoverString(Handle<String>::cast(object), AllowInPlace::Yes);
- break;
- } else if (external_object_ids_.size() > 0) {
- int unused_id;
- external_object_ids_.LookupOrInsert(*object, &unused_id);
- } else {
- Throw("Unsupported object");
- }
- }
- discovery_queue_.pop();
- }
-}
-
-void WebSnapshotSerializer::DiscoverPropertyKey(Handle<Name> key) {
- if (key->IsString()) {
- DiscoverString(Handle<String>::cast(key), AllowInPlace::Yes);
- } else if (key->IsSymbol()) {
- DiscoverSymbol(Handle<Symbol>::cast(key));
- } else {
- Throw("Property key is not a String / Symbol");
- return;
- }
-}
-
-void WebSnapshotSerializer::DiscoverMap(Handle<Map> map,
- bool allow_property_in_descriptor) {
- // Dictionary map object names get discovered in DiscoverObject.
- if (map->is_dictionary_map()) {
- return;
- }
-
- uint32_t id;
- if (InsertIntoIndexMap(map_ids_, *map, id)) {
- return;
- }
- DCHECK_EQ(id, maps_->Length());
- maps_ = ArrayList::Add(isolate_, maps_, map);
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- if (details.location() != PropertyLocation::kField) {
- if (!allow_property_in_descriptor) {
- Throw("Properties which are not fields not supported");
- return;
- } else {
- continue;
- }
- }
- Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
- isolate_);
- if (ShouldBeSerialized(key)) {
- DiscoverPropertyKey(key);
- }
- }
-}
-
-void WebSnapshotSerializer::DiscoverString(Handle<String> string,
- AllowInPlace can_be_in_place) {
- // Can't contain references to other objects. We only log the existence of the
- // string itself. Internalize the strings so that we can properly track which
- // String objects are the same string.
- string = factory()->InternalizeString(string);
- auto result = all_strings_.FindOrInsert(string);
- if (can_be_in_place == AllowInPlace::Yes && !result.already_exists) {
- // This is the only reference to the string so far. Don't generate and
- // ID for it yet; only generate it when another reference to the string is
- // found.
- return;
- }
- // The string is referred to more than two places, or in-placing not allowed
- // -> not a candidate for writing it in-place. Generate an ID for it.
-
- // TODO(v8:11525): Allow in-place strings in more places. Heuristics for
- // when to make them in place?
- uint32_t id;
- if (InsertIntoIndexMap(string_ids_, *string, id)) {
- return;
- }
- DCHECK_EQ(id, strings_->Length());
- strings_ = ArrayList::Add(isolate_, strings_, string);
-}
-
-void WebSnapshotSerializer::DiscoverMapForFunction(
- Handle<JSFunction> function) {
- JSObject::MigrateSlowToFast(function, 0, "Web snapshot");
- // TODO(v8:11525): Support functions with so many properties that they can't
- // be in fast mode.
- if (!function->HasFastProperties()) {
- Throw("Unsupported function with dictionary map");
- return;
- }
- if (function->map() !=
- isolate_->context().get(function->shared().function_map_index())) {
- Handle<Map> map(function->map(), isolate_);
- // We only serialize properties which are fields in function. And properties
- // which are descriptors will be setup in CreateJSFunction.
- DiscoverMap(map, true);
- discovery_queue_.push(handle(map->prototype(), isolate_));
- // Discover property values.
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- if (details.location() == PropertyLocation::kDescriptor) {
- continue;
- }
- if (!ShouldBeSerialized(
- handle(map->instance_descriptors().GetKey(i), isolate_))) {
- continue;
- }
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value = JSObject::FastPropertyAt(
- isolate_, function, details.representation(), field_index);
- if (!value->IsHeapObject()) continue;
- discovery_queue_.push(Handle<HeapObject>::cast(value));
- }
- }
-}
-
-void WebSnapshotSerializer::DiscoverFunction(Handle<JSFunction> function) {
- if (DiscoverIfBuiltinObject(function)) {
- return;
- }
-
- uint32_t id;
- if (InsertIntoIndexMap(function_ids_, *function, id)) {
- return;
- }
-
- DCHECK_EQ(id, functions_->Length());
- functions_ = ArrayList::Add(isolate_, functions_, function);
-
- DiscoverContextAndPrototype(function);
-
- DiscoverMapForFunction(function);
- DiscoverSource(function);
-}
-
-void WebSnapshotSerializer::DiscoverClass(Handle<JSFunction> function) {
- uint32_t id;
- if (InsertIntoIndexMap(class_ids_, *function, id)) {
- return;
- }
-
- DCHECK_EQ(id, classes_->Length());
- classes_ = ArrayList::Add(isolate_, classes_, function);
-
- DiscoverContextAndPrototype(function);
-
- DiscoverMapForFunction(function);
- // TODO(v8:11525): Support class members.
- DiscoverSource(function);
-}
-
-void WebSnapshotSerializer::DiscoverContextAndPrototype(
- Handle<JSFunction> function) {
- Handle<Context> context(function->context(), isolate_);
- if (context->IsFunctionContext() || context->IsBlockContext()) {
- DiscoverContext(context);
- }
-
- if (function->has_prototype_slot() &&
- function->map().has_non_instance_prototype()) {
- Throw("Functions with non-instance prototypes not supported");
- return;
- }
-
- if (function->has_prototype_slot() && function->has_instance_prototype()) {
- Handle<JSObject> prototype = Handle<JSObject>::cast(
- handle(function->instance_prototype(), isolate_));
- discovery_queue_.push(prototype);
- }
-
- discovery_queue_.push(handle(function->map().prototype(), isolate_));
-}
-
-void WebSnapshotSerializer::DiscoverContext(Handle<Context> context) {
- // Make sure the parent context (if any), gets a smaller ID. This ensures the
- // parent context references in the snapshot are not deferred.
- if (!context->previous().IsNativeContext() &&
- !context->previous().IsScriptContext()) {
- DiscoverContext(handle(context->previous(), isolate_));
- }
-
- uint32_t id;
- if (InsertIntoIndexMap(context_ids_, *context, id)) return;
-
- DCHECK_EQ(id, contexts_->Length());
- contexts_ = ArrayList::Add(isolate_, contexts_, context);
-
- Handle<ScopeInfo> scope_info = handle(context->scope_info(), isolate_);
- for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
- DiscoverString(handle(it->name(), isolate_));
- Object value =
- context->get(scope_info->ContextHeaderLength() + it->index());
- if (!value.IsHeapObject()) continue;
- discovery_queue_.push(handle(HeapObject::cast(value), isolate_));
- }
-
- if (!context->previous().IsNativeContext() &&
- !context->previous().IsScriptContext()) {
- DiscoverContext(handle(context->previous(), isolate_));
- }
-}
-
-void WebSnapshotSerializer::DiscoverSource(Handle<JSFunction> function) {
- // Function may not have source code, e.g. we discover source for a builtin
- // function. In SerializeFunctionInfo, we also check if the function has
- // source code, and we throw the same error here if the function doesn't
- // have source code to be consistent with SerializeFunctionInfo.
- if (!function->shared().HasSourceCode()) {
- Throw("Function without source code");
- return;
- }
- // There might be multiple scripts where functions are coming from. Construct
- // a combined source code for them by simply concatenating the sources (and
- // keep track which script source is where); the source code will be later
- // optimized by ConstructSource.
- Handle<Script> script =
- handle(Script::cast(function->shared().script()), isolate_);
- Handle<String> function_script_source =
- handle(String::cast(script->source()), isolate_);
- int script_offset_int;
- if (full_source_.is_null()) {
- // This is the first script.
- script_offset_int = 0;
- full_source_ = function_script_source;
- script_offsets_.insert({script->id(), script_offset_int});
- } else {
- auto it = script_offsets_.find(script->id());
- if (it == script_offsets_.end()) {
- // This script hasn't been encountered yet and its source code has to be
- // added to full_source_.
- DCHECK(!full_source_.is_null());
- script_offset_int = full_source_->length();
- script_offsets_.insert({script->id(), script_offset_int});
- if (!factory()
- ->NewConsString(full_source_, function_script_source)
- .ToHandle(&full_source_)) {
- Throw("Can't construct source");
- return;
- }
- } else {
- // The script source is already somewhere in full_source_.
- script_offset_int = it->second;
- }
- }
- source_intervals_.emplace(
- script_offset_int + function->shared().StartPosition(),
- script_offset_int + function->shared().EndPosition());
-}
-
-void WebSnapshotSerializer::DiscoverArray(Handle<JSArray> array) {
- uint32_t id;
- if (InsertIntoIndexMap(array_ids_, *array, id)) {
- return;
- }
- DCHECK_EQ(id, arrays_->Length());
- arrays_ = ArrayList::Add(isolate_, arrays_, array);
-
- DiscoverElements(array);
-}
-
-void WebSnapshotSerializer::DiscoverElements(Handle<JSObject> object) {
- auto elements_kind = object->GetElementsKind();
-
- DisallowGarbageCollection no_gc;
-
- // TODO(v8:11525): Handle sealed & frozen elements correctly. (Also: handle
- // sealed & frozen objects.)
- switch (elements_kind) {
- case PACKED_SMI_ELEMENTS:
- case PACKED_ELEMENTS:
- case HOLEY_SMI_ELEMENTS:
- case HOLEY_ELEMENTS:
- case PACKED_SEALED_ELEMENTS:
- case PACKED_FROZEN_ELEMENTS:
- case HOLEY_SEALED_ELEMENTS:
- case HOLEY_FROZEN_ELEMENTS: {
- FixedArray elements = FixedArray::cast(object->elements());
- for (int i = 0; i < elements.length(); ++i) {
- Object object = elements.get(i);
- if (!object.IsHeapObject()) continue;
- discovery_queue_.push(handle(HeapObject::cast(object), isolate_));
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(object->element_dictionary(), isolate_);
- ReadOnlyRoots roots(isolate_);
- for (InternalIndex index : dict->IterateEntries()) {
- Handle<Object> key = handle(dict->KeyAt(index), isolate_);
- if (!dict->IsKey(roots, *key)) {
- continue;
- }
- DCHECK(key->IsNumber());
- if (key->Number() > std::numeric_limits<uint32_t>::max()) {
- // TODO(v8:11525): Support large element indices.
- Throw("Large element indices not supported");
- return;
- }
- Handle<Object> object = handle(dict->ValueAt(index), isolate_);
- if (!object->IsHeapObject()) continue;
- discovery_queue_.push(Handle<HeapObject>::cast(object));
- }
- break;
- }
- case PACKED_DOUBLE_ELEMENTS:
- case HOLEY_DOUBLE_ELEMENTS: {
- break;
- }
- default: {
- Throw("Unsupported elements");
- return;
- }
- }
-}
-
-void WebSnapshotSerializer::DiscoverArrayBuffer(
- Handle<JSArrayBuffer> array_buffer) {
- if (array_buffer->was_detached()) {
- CHECK_EQ(array_buffer->GetByteLength(), 0);
- }
- uint32_t id;
- if (InsertIntoIndexMap(array_buffer_ids_, *array_buffer, id)) {
- return;
- }
- DCHECK_EQ(id, array_buffers_->Length());
- array_buffers_ = ArrayList::Add(isolate_, array_buffers_, array_buffer);
-}
-
-void WebSnapshotSerializer::DiscoverDataView(Handle<JSDataView> data_view) {
- uint32_t id;
- if (InsertIntoIndexMap(data_view_ids_, *data_view, id)) {
- return;
- }
- DCHECK_EQ(id, data_views_->Length());
- data_views_ = ArrayList::Add(isolate_, data_views_, data_view);
- discovery_queue_.push(handle(data_view->buffer(), isolate_));
-}
-
-void WebSnapshotSerializer::DiscoverTypedArray(
- Handle<JSTypedArray> typed_array) {
- uint32_t id;
- if (InsertIntoIndexMap(typed_array_ids_, *typed_array, id)) {
- return;
- }
- DCHECK_EQ(id, typed_arrays_->Length());
- typed_arrays_ = ArrayList::Add(isolate_, typed_arrays_, typed_array);
- discovery_queue_.push(typed_array->GetBuffer());
-}
-
-template <typename T>
-void WebSnapshotSerializer::DiscoverObjectPropertiesWithDictionaryMap(T dict) {
- DisallowGarbageCollection no_gc;
-
- ReadOnlyRoots roots(isolate_);
- for (InternalIndex index : dict->IterateEntries()) {
- Handle<Object> key = handle(dict->KeyAt(index), isolate_);
- if (!dict->IsKey(roots, *key)) {
- // Ignore deleted entries.
- continue;
- }
- DiscoverPropertyKey(Handle<Name>::cast(key));
- Handle<Object> value = handle(dict->ValueAt(index), isolate_);
- if (!value->IsHeapObject()) {
- continue;
- } else {
- discovery_queue_.push(Handle<HeapObject>::cast(value));
- }
- }
-}
-
-void WebSnapshotSerializer::DiscoverObject(Handle<JSObject> object) {
- if (GetExternalId(*object)) {
- return;
- }
- if (DiscoverIfBuiltinObject(object)) {
- return;
- }
-
- uint32_t id;
- if (InsertIntoIndexMap(object_ids_, *object, id)) return;
-
- DCHECK_EQ(id, objects_->Length());
- objects_ = ArrayList::Add(isolate_, objects_, object);
-
- // TODO(v8:11525): After we allow "non-map" objects which are small
- // enough to have a fast map, we should remove this. Although we support
- // objects with dictionary map now, we still check the property count is
- // bigger than kMaxNumberOfDescriptors when deserializing dictionary map and
- // then removing this will break deserializing prototype objects having a
- // dictionary map with few properties.
- JSObject::MigrateSlowToFast(object, 0, "Web snapshot");
-
- Handle<Map> map(object->map(), isolate_);
- DiscoverMap(map);
-
- // Discover __proto__.
- discovery_queue_.push(handle(map->prototype(), isolate_));
-
- if (object->HasFastProperties()) {
- // Discover property values.
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value = JSObject::FastPropertyAt(
- isolate_, object, details.representation(), field_index);
- if (!value->IsHeapObject()) continue;
- discovery_queue_.push(Handle<HeapObject>::cast(value));
- }
- } else {
- ReadOnlyRoots roots(isolate_);
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- Handle<SwissNameDictionary> swiss_dictionary =
- handle(object->property_dictionary_swiss(), isolate_);
- DiscoverObjectPropertiesWithDictionaryMap(swiss_dictionary);
- } else {
- Handle<NameDictionary> dictionary =
- handle(object->property_dictionary(), isolate_);
- DiscoverObjectPropertiesWithDictionaryMap(dictionary);
- }
- }
-
- DiscoverElements(object);
-}
-
-bool WebSnapshotSerializer::DiscoverIfBuiltinObject(Handle<HeapObject> object) {
- // TODO(v8:11525): Consider speccing a set of fixed builtins (such as
- // Object.prototype) for objects which are almost always included in the
- // snapshot.
- uint32_t name_index;
- if (!GetBuiltinObjectNameIndex(*object, name_index)) {
- return false;
- }
- CHECK_LT(name_index, builtin_object_name_strings_->length());
- Handle<String> name_string = handle(
- String::cast(builtin_object_name_strings_->get(name_index)), isolate_);
- DiscoverString(name_string, AllowInPlace::No);
-
- // Ensure the builtin object reference gets included in the snapshot.
- uint32_t id;
- if (InsertIntoIndexMap(builtin_object_ids_, *object, id)) {
- // The builtin object is already referred to by something else.
- return true;
- }
- DCHECK_EQ(id, builtin_objects_.size());
-
- bool in_place = false;
- uint32_t name_id = GetStringId(name_string, in_place);
- DCHECK(!in_place);
- USE(in_place);
- builtin_objects_.push_back(name_id);
- return true;
-}
-
-void WebSnapshotSerializer::DiscoverSymbol(Handle<Symbol> symbol) {
- if (symbol->is_well_known_symbol()) {
- // TODO(v8:11525): Support well-known Symbols.
- Throw("Well known Symbols aren't supported");
- return;
- }
- uint32_t id;
- if (InsertIntoIndexMap(symbol_ids_, *symbol, id)) return;
-
- DCHECK_EQ(id, symbols_->Length());
- symbols_ = ArrayList::Add(isolate_, symbols_, symbol);
-
- if (!symbol->description().IsUndefined()) {
- DiscoverString(handle(String::cast(symbol->description()), isolate_));
- }
-}
-
-void WebSnapshotSerializer::DiscoverBigInt(Handle<BigInt> bigint) {
- uint32_t id;
- if (InsertIntoIndexMap(bigint_ids_, *bigint, id)) return;
-
- DCHECK_EQ(id, bigints_->Length());
- bigints_ = ArrayList::Add(isolate_, bigints_, bigint);
-}
-
-// Format (serialized function):
-// - 0 if there's no context, 1 + context id otherwise
-// - String id (source snippet)
-// - Start position in the source snippet
-// - Length in the source snippet
-// - Formal parameter count
-// - Flags (see FunctionFlags)
-// - 0 if there's no map, 1 + map id otherwise
-// - For each function property
-// - Serialized value
-// - Function prototype
-// TODO(v8:11525): Investigate whether the length is really needed.
-void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function) {
- SerializeFunctionInfo(function, function_serializer_);
- SerializeFunctionProperties(function, function_serializer_);
- WriteValue(handle(function->map().prototype(), isolate_),
- function_serializer_);
-}
-
-// Format (serialized class):
-// - 1 + context id
-// - String id (source snippet)
-// - Start position in the source snippet
-// - Length in the source snippet
-// - Formal parameter count
-// - Flags (see FunctionFlags)
-// - 0 if there's no map, 1 + map id otherwise
-// - For each function property
-// - Serialized value
-// - Function prototype
-void WebSnapshotSerializer::SerializeClass(Handle<JSFunction> function) {
- SerializeFunctionInfo(function, class_serializer_);
- SerializeFunctionProperties(function, class_serializer_);
- WriteValue(handle(function->map().prototype(), isolate_), class_serializer_);
- // TODO(v8:11525): Support class members.
-}
-
-// Format (serialized context):
-// - 0 if there's no parent context, 1 + parent context id otherwise
-// - Variable count
-// - For each variable:
-// - String id (name)
-// - For each variable:
-// - Serialized value
-void WebSnapshotSerializer::SerializeContext(Handle<Context> context,
- uint32_t id) {
- uint32_t parent_context_id = 0;
- if (!context->previous().IsNativeContext() &&
- !context->previous().IsScriptContext()) {
- parent_context_id = GetContextId(context->previous());
- DCHECK_LT(parent_context_id, id);
- ++parent_context_id; // 0 is reserved for "no parent context".
- }
-
- // TODO(v8:11525): Use less space for encoding the context type.
- if (context->IsFunctionContext()) {
- context_serializer_.WriteUint32(ContextType::FUNCTION);
- } else if (context->IsBlockContext()) {
- context_serializer_.WriteUint32(ContextType::BLOCK);
- } else {
- Throw("Unsupported context type");
- return;
- }
-
- context_serializer_.WriteUint32(parent_context_id);
-
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
- int count = scope_info->ContextLocalCount();
- context_serializer_.WriteUint32(count);
-
- for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
- // TODO(v8:11525): support parameters
- // TODO(v8:11525): distinguish variable modes
- WriteStringId(handle(it->name(), isolate_), context_serializer_);
- }
- for (auto it : ScopeInfo::IterateLocalNames(scope_info)) {
- Handle<Object> value(
- context->get(scope_info->ContextHeaderLength() + it->index()),
- isolate_);
- WriteValue(value, context_serializer_);
- }
-}
-
-template <typename T>
-void WebSnapshotSerializer::SerializeObjectPropertiesWithDictionaryMap(T dict) {
- DisallowGarbageCollection no_gc;
-
- std::vector<uint8_t> attributes;
- attributes.reserve(dict->NumberOfElements());
- HandleScope scope(isolate_);
- int first_custom_index = -1;
-
- ReadOnlyRoots roots(isolate_);
- for (InternalIndex index : dict->IterateEntries()) {
- if (!dict->IsKey(roots, dict->KeyAt(index))) {
- continue;
- }
- PropertyDetails details = dict->DetailsAt(index);
- if (first_custom_index >= 0 || details.IsReadOnly() ||
- !details.IsConfigurable() || details.IsDontEnum()) {
- if (first_custom_index == -1) first_custom_index = index.as_int();
- attributes.push_back(AttributesToFlags(details));
- }
- }
- object_serializer_.WriteUint32(first_custom_index == -1
- ? PropertyAttributesType::DEFAULT
- : PropertyAttributesType::CUSTOM);
- object_serializer_.WriteUint32(dict->NumberOfElements());
-
- uint8_t default_flags = GetDefaultAttributeFlags();
- for (InternalIndex index : dict->IterateEntries()) {
- Object key = dict->KeyAt(index);
- if (!dict->IsKey(roots, key)) {
- continue;
- }
- WriteValue(handle(key, isolate_), object_serializer_);
- WriteValue(handle(dict->ValueAt(index), isolate_), object_serializer_);
- if (first_custom_index >= 0) {
- if (index.as_int() < first_custom_index) {
- object_serializer_.WriteByte(default_flags);
- } else {
- object_serializer_.WriteByte(
- attributes[index.as_int() - first_custom_index]);
- }
- }
- }
-}
-
-// Format (serialized object):
-// - 0 if there's no shape (dictionary map), 1 + shape id otherwise
-// If has shape
-// - For each property:
-// - Serialized value
-// Else (dictionary map)
-// - PropertyAttributesType
-// - Property count
-// - For each property
-// - Name: STRING_ID + String id or SYMBOL_ID + Symbol id or in-place string
-// - Serialized value
-// - If the PropertyAttributesType is CUSTOM: attributes
-// - __proto__: serialized value
-// - Elements (see serialized array)
-// TODO(v8:11525): Support packed elements with a denser format.
-void WebSnapshotSerializer::SerializeObject(Handle<JSObject> object) {
- Handle<Map> map(object->map(), isolate_);
- if (map->is_dictionary_map()) {
- object_serializer_.WriteUint32(0);
- } else {
- uint32_t map_id = GetMapId(*map);
- object_serializer_.WriteUint32(map_id + 1);
- }
-
- if (object->HasFastProperties()) {
- // Properties.
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details =
- map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value = JSObject::FastPropertyAt(
- isolate_, object, details.representation(), field_index);
- WriteValue(value, object_serializer_);
- }
- } else {
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- Handle<SwissNameDictionary> swiss_dictionary =
- handle(object->property_dictionary_swiss(), isolate_);
- SerializeObjectPropertiesWithDictionaryMap(swiss_dictionary);
- } else {
- Handle<NameDictionary> dictionary =
- handle(object->property_dictionary(), isolate_);
- SerializeObjectPropertiesWithDictionaryMap(dictionary);
- }
- WriteValue(handle(map->prototype(), isolate_), object_serializer_);
- }
-
- // Elements.
- ElementsKind kind = object->GetElementsKind();
- // We only serialize the actual elements excluding the slack part.
- DCHECK(!IsDoubleElementsKind(kind));
- if (!IsDictionaryElementsKind(kind)) {
- uint32_t elements_length = object->elements().length();
- if (IsHoleyElementsKindForRead(kind)) {
- uint32_t max_element_index = 0;
- FixedArray elements = FixedArray::cast(object->elements());
- for (int i = elements_length - 1; i >= 0; i--) {
- if (!elements.is_the_hole(isolate_, i)) {
- max_element_index = i + 1;
- break;
- }
- }
- return SerializeElements(object, object_serializer_,
- Just(max_element_index));
- } else {
- return SerializeElements(object, object_serializer_,
- Just(elements_length));
- }
- }
- SerializeElements(object, object_serializer_, Nothing<uint32_t>());
-}
-
-// Format (serialized array):
-// - Elements type (dense or sparse)
-// - Length
-// If dense array
-// - For each element:
-// - Serialized value
-// If sparse array
-// - For each element:
-// - Element index
-// - Serialized value
-void WebSnapshotSerializer::SerializeArray(Handle<JSArray> array) {
- uint32_t length;
- if (!array->length().ToUint32(&length)) {
- Throw("Invalid array length");
- return;
- }
- SerializeElements(array, array_serializer_, Just(length));
-}
-
-void WebSnapshotSerializer::SerializeElements(Handle<JSObject> object,
- ValueSerializer& serializer,
- Maybe<uint32_t> length) {
- // TODO(v8:11525): Handle sealed & frozen elements correctly. (Also: handle
- // sealed & frozen objects.)
-
- // TODO(v8:11525): Sometimes it would make sense to serialize dictionary
- // mode elements as dense (the number of elements is large but the array is
- // densely filled).
-
- // TODO(v8:11525): Sometimes it would make sense to serialize packed mode
- // elements as sparse (if there are a considerable amount of holes in it).
- ReadOnlyRoots roots(isolate_);
- auto elements_kind = object->GetElementsKind();
- switch (elements_kind) {
- case PACKED_SMI_ELEMENTS:
- case PACKED_ELEMENTS:
- case HOLEY_SMI_ELEMENTS:
- case HOLEY_ELEMENTS:
- case PACKED_FROZEN_ELEMENTS:
- case PACKED_SEALED_ELEMENTS:
- case HOLEY_FROZEN_ELEMENTS:
- case HOLEY_SEALED_ELEMENTS: {
- serializer.WriteUint32(ElementsType::kDense);
- Handle<FixedArray> elements =
- handle(FixedArray::cast(object->elements()), isolate_);
- serializer.WriteUint32(length.ToChecked());
- for (uint32_t i = 0; i < length.ToChecked(); ++i) {
- WriteValue(handle(elements->get(i), isolate_), serializer);
- }
- break;
- }
- case PACKED_DOUBLE_ELEMENTS:
- case HOLEY_DOUBLE_ELEMENTS: {
- serializer.WriteUint32(ElementsType::kDense);
- Handle<FixedDoubleArray> elements =
- handle(FixedDoubleArray::cast(object->elements()), isolate_);
- serializer.WriteUint32(length.ToChecked());
- for (uint32_t i = 0; i < length.ToChecked(); ++i) {
- if (!elements->is_the_hole(i)) {
- double double_value = elements->get_scalar(i);
- Handle<Object> element_value =
- isolate_->factory()->NewNumber(double_value);
- WriteValue(element_value, serializer);
- } else {
- WriteValue(handle(roots.the_hole_value(), isolate_), serializer);
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- DisallowGarbageCollection no_gc;
- serializer.WriteUint32(ElementsType::kSparse);
-
- auto dict = object->element_dictionary();
- serializer.WriteUint32(dict.NumberOfElements());
-
- for (InternalIndex index : dict.IterateEntries()) {
- Object key = dict.KeyAt(index);
- if (!dict.IsKey(roots, key)) {
- continue;
- }
- CHECK(key.IsNumber());
- // This case is checked by DiscoverElements.
- // TODO(v8:11525): Support large element indices.
- CHECK_LE(key.Number(), std::numeric_limits<uint32_t>::max());
- uint32_t element_index = static_cast<uint32_t>(key.Number());
- serializer.WriteUint32(element_index);
- WriteValue(handle(dict.ValueAt(index), isolate_), serializer);
- }
- break;
- }
- default: {
- Throw("Unsupported elements");
- return;
- }
- }
-}
-uint8_t WebSnapshotSerializerDeserializer::ArrayBufferKindToFlags(
- Handle<JSArrayBuffer> array_buffer) {
- return DetachedBitField::encode(array_buffer->was_detached()) |
- SharedBitField::encode(array_buffer->is_shared()) |
- ResizableBitField::encode(array_buffer->is_resizable_by_js());
-}
-
-uint32_t WebSnapshotSerializerDeserializer::BigIntSignAndLengthToFlags(
- Handle<BigInt> bigint) {
- uint32_t bitfield = bigint->GetBitfieldForSerialization();
- int byte_length = BigInt::DigitsByteLengthForBitfield(bitfield);
- int sign = BigInt::SignBits::decode(bitfield);
-
- return BigIntSignBitField::encode(sign) |
- BigIntLengthBitField::encode(byte_length);
-}
-
-uint32_t WebSnapshotSerializerDeserializer::BigIntFlagsToBitField(
- uint32_t flags) {
- int byte_length = BigIntLengthBitField::decode(flags);
- int sign = BigIntSignBitField::decode(flags);
- return BigInt::SignBits::encode(sign) |
- BigInt::LengthBits::encode(byte_length);
-}
-
-// Format (serialized array buffer):
-// - ArrayBufferFlags, including was_detached, is_shared and is_resizable_by_js.
-// - Byte length
-// - if is_resizable_by_js
-// - Max byte length
-// - Raw bytes
-void WebSnapshotSerializer::SerializeArrayBuffer(
- Handle<JSArrayBuffer> array_buffer) {
- size_t byte_length = array_buffer->GetByteLength();
- if (byte_length > std::numeric_limits<uint32_t>::max()) {
- Throw("Too large array buffer");
- return;
- }
- array_buffer_serializer_.WriteByte(ArrayBufferKindToFlags(array_buffer));
-
- array_buffer_serializer_.WriteUint32(static_cast<uint32_t>(byte_length));
- if (array_buffer->is_resizable_by_js()) {
- size_t max_byte_length = array_buffer->max_byte_length();
- if (max_byte_length > std::numeric_limits<uint32_t>::max()) {
- Throw("Too large resizable array buffer");
- return;
- }
- array_buffer_serializer_.WriteUint32(
- static_cast<uint32_t>(array_buffer->max_byte_length()));
- }
- array_buffer_serializer_.WriteRawBytes(array_buffer->backing_store(),
- byte_length);
-}
-
-uint8_t WebSnapshotSerializerDeserializer::ArrayBufferViewKindToFlags(
- Handle<JSArrayBufferView> array_buffer_view) {
- return LengthTrackingBitField::encode(
- array_buffer_view->is_length_tracking());
-}
-
-// static
-ExternalArrayType
-WebSnapshotSerializerDeserializer::TypedArrayTypeToExternalArrayType(
- TypedArrayType type) {
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case k##Type##Array: \
- return kExternal##Type##Array;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- }
- UNREACHABLE();
-}
-
-// static
-WebSnapshotSerializerDeserializer::TypedArrayType
-WebSnapshotSerializerDeserializer::ExternalArrayTypeToTypedArrayType(
- ExternalArrayType type) {
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case kExternal##Type##Array: \
- return TypedArrayType::k##Type##Array;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- }
- UNREACHABLE();
-}
-
-// Format (serialized array buffer view):
-// - Serialized ArrayBuffer
-// - ArrayBufferViewFlags, including is_length_tracking
-// - Byte offset
-// - If not is_length_tracking
-// - Byte length
-void WebSnapshotSerializer::SerializeArrayBufferView(
- Handle<JSArrayBufferView> array_buffer_view, ValueSerializer& serializer) {
- WriteValue(handle(array_buffer_view->buffer(), isolate_), serializer);
- // TODO(v8:11525): Implement WriteByte.
- serializer.WriteByte(ArrayBufferViewKindToFlags(array_buffer_view));
- if (array_buffer_view->byte_offset() > std::numeric_limits<uint32_t>::max()) {
- Throw("Too large byte offset in TypedArray");
- return;
- }
- serializer.WriteUint32(
- static_cast<uint32_t>(array_buffer_view->byte_offset()));
- if (array_buffer_view->byte_length() > std::numeric_limits<uint32_t>::max()) {
- Throw("Too large byte length in TypedArray");
- return;
- }
- if (!array_buffer_view->is_length_tracking()) {
- serializer.WriteUint32(
- static_cast<uint32_t>(array_buffer_view->byte_length()));
- }
-}
-
-// Format (serialized typed array):
-// - TypedArrayType
-// - ArrayBufferView
-void WebSnapshotSerializer::SerializeTypedArray(
- Handle<JSTypedArray> typed_array) {
- TypedArrayType typed_array_type =
- ExternalArrayTypeToTypedArrayType(typed_array->type());
- typed_array_serializer_.WriteUint32(typed_array_type);
- SerializeArrayBufferView(typed_array, typed_array_serializer_);
-}
-
-// Format (serialized data view):
-// - ArrayBufferView
-void WebSnapshotSerializer::SerializeDataView(Handle<JSDataView> data_view) {
- SerializeArrayBufferView(data_view, data_view_serializer_);
-}
-
-// Format (serialized export):
-// - String id (export name)
-// - Serialized value (export value)
-void WebSnapshotSerializer::SerializeExport(Handle<Object> object,
- Handle<String> export_name) {
- ++export_count_;
- WriteStringId(export_name, export_serializer_);
- if (object->IsJSPrimitiveWrapper()) {
- Handle<JSPrimitiveWrapper> wrapper =
- Handle<JSPrimitiveWrapper>::cast(object);
- Handle<Object> export_value = handle(wrapper->value(), isolate_);
- WriteValue(export_value, export_serializer_);
- } else {
- WriteValue(object, export_serializer_);
- }
-}
-
-// Format (serialized value):
-// - Type id (ValueType enum)
-// - Value or id (interpretation depends on the type)
-void WebSnapshotSerializer::WriteValue(Handle<Object> object,
- ValueSerializer& serializer) {
- if (object->IsSmi()) {
- serializer.WriteByte(ValueType::INTEGER);
- serializer.WriteZigZag<int32_t>(Smi::cast(*object).value());
- return;
- }
-
- uint32_t id;
- if (GetExternalId(HeapObject::cast(*object), &id)) {
- serializer.WriteByte(ValueType::EXTERNAL_ID);
- serializer.WriteUint32(id);
- return;
- }
-
- if (GetBuiltinObjectId(HeapObject::cast(*object), id)) {
- serializer.WriteByte(ValueType::BUILTIN_OBJECT_ID);
- serializer.WriteUint32(id);
- return;
- }
-
- DCHECK(object->IsHeapObject());
- Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- switch ((*heap_object).map().instance_type()) {
- case ODDBALL_TYPE:
- switch (Oddball::cast(*heap_object).kind()) {
- case Oddball::kFalse:
- serializer.WriteByte(ValueType::FALSE_CONSTANT);
- return;
- case Oddball::kTrue:
- serializer.WriteByte(ValueType::TRUE_CONSTANT);
- return;
- case Oddball::kNull:
- serializer.WriteByte(ValueType::NULL_CONSTANT);
- return;
- case Oddball::kUndefined:
- serializer.WriteByte(ValueType::UNDEFINED_CONSTANT);
- return;
- case Oddball::kTheHole:
- serializer.WriteByte(ValueType::NO_ELEMENT_CONSTANT);
- return;
- default:
- UNREACHABLE();
- }
- case HEAP_NUMBER_TYPE:
- // TODO(v8:11525): Handle possible endianness mismatch.
- serializer.WriteByte(ValueType::DOUBLE);
- serializer.WriteDouble(HeapNumber::cast(*heap_object).value());
- break;
- case JS_FUNCTION_TYPE:
- serializer.WriteByte(ValueType::FUNCTION_ID);
- serializer.WriteUint32(GetFunctionId(JSFunction::cast(*heap_object)));
- break;
- case JS_CLASS_CONSTRUCTOR_TYPE:
- serializer.WriteByte(ValueType::CLASS_ID);
- serializer.WriteUint32(GetClassId(JSFunction::cast(*heap_object)));
- break;
- case JS_OBJECT_TYPE:
- serializer.WriteByte(ValueType::OBJECT_ID);
- serializer.WriteUint32(GetObjectId(JSObject::cast(*heap_object)));
- break;
- case JS_ARRAY_TYPE:
- serializer.WriteByte(ValueType::ARRAY_ID);
- serializer.WriteUint32(GetArrayId(JSArray::cast(*heap_object)));
- break;
- case SYMBOL_TYPE:
- serializer.WriteByte(ValueType::SYMBOL_ID);
- serializer.WriteUint32(GetSymbolId(Symbol::cast(*heap_object)));
- break;
- case BIGINT_TYPE:
- serializer.WriteByte(ValueType::BIGINT_ID);
- serializer.WriteUint32(GetBigIntId(BigInt::cast(*heap_object)));
- break;
- case JS_REG_EXP_TYPE: {
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(heap_object);
- if (regexp->map() != isolate_->regexp_function()->initial_map()) {
- Throw("Unsupported RegExp map");
- return;
- }
- serializer.WriteByte(ValueType::REGEXP);
- Handle<String> pattern = handle(regexp->source(), isolate_);
- WriteStringId(pattern, serializer);
- Handle<String> flags_string =
- JSRegExp::StringFromFlags(isolate_, regexp->flags());
- WriteStringId(flags_string, serializer);
- break;
- }
- case JS_ARRAY_BUFFER_TYPE: {
- Handle<JSArrayBuffer> array_buffer =
- Handle<JSArrayBuffer>::cast(heap_object);
- serializer.WriteByte(ValueType::ARRAY_BUFFER_ID);
- serializer.WriteUint32(GetArrayBufferId(*array_buffer));
- break;
- }
- case JS_TYPED_ARRAY_TYPE: {
- Handle<JSTypedArray> typed_array =
- Handle<JSTypedArray>::cast(heap_object);
- serializer.WriteByte(ValueType::TYPED_ARRAY_ID);
- serializer.WriteUint32(GetTypedArrayId(*typed_array));
- break;
- }
- case JS_DATA_VIEW_TYPE: {
- Handle<JSDataView> data_view = Handle<JSDataView>::cast(heap_object);
- serializer.WriteUint32(ValueType::DATA_VIEW_ID);
- serializer.WriteUint32(GetDataViewId(*data_view));
- break;
- }
- default:
- if (heap_object->IsString()) {
- // Write strings which are referred to only once as in-place strings.
- WriteStringMaybeInPlace(Handle<String>::cast(heap_object), serializer);
- } else {
- Throw("Unsupported object");
- }
- }
- // TODO(v8:11525): Support more types.
-}
-
-void WebSnapshotSerializer::WriteStringMaybeInPlace(
- Handle<String> string, ValueSerializer& serializer) {
- // If the string is only referred to by one location, write it in-place.
- bool in_place = false;
- uint32_t id = GetStringId(string, in_place);
- if (in_place) {
- serializer.WriteByte(ValueType::IN_PLACE_STRING_ID);
- SerializeString(string, serializer);
- } else {
- serializer.WriteByte(ValueType::STRING_ID);
- serializer.WriteUint32(id);
- }
-}
-
-void WebSnapshotSerializer::WriteStringId(Handle<String> string,
- ValueSerializer& serializer) {
- bool in_place = false;
- uint32_t id = GetStringId(string, in_place);
- CHECK(!in_place); // The string must have an ID.
- serializer.WriteUint32(id);
-}
-
-uint32_t WebSnapshotSerializer::GetStringId(Handle<String> string,
- bool& in_place) {
- // Internalize strings so that they're unique.
- string = factory()->InternalizeString(string);
-
- // Strings referred to more than one places are inserted in string_ids_.
- // Strings referred to by only one place aren't.
-#ifdef DEBUG
- auto result = all_strings_.FindOrInsert(string);
- DCHECK(result.already_exists);
-#endif
- int id = 0;
- in_place = !string_ids_.Lookup(*string, &id);
- return static_cast<uint32_t>(id);
-}
-
-uint32_t WebSnapshotSerializer::GetSymbolId(Symbol symbol) {
- int id;
- bool return_value = symbol_ids_.Lookup(symbol, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(id);
-}
-
-uint32_t WebSnapshotSerializer::GetBigIntId(BigInt bigint) {
- int id;
- bool return_value = bigint_ids_.Lookup(bigint, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(id);
-}
-
-uint32_t WebSnapshotSerializer::GetMapId(Map map) {
- int id;
- bool return_value = map_ids_.Lookup(map, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(id);
-}
-
-uint32_t WebSnapshotSerializer::GetFunctionId(JSFunction function) {
- int id;
- bool return_value = function_ids_.Lookup(function, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(function_ids_.size() - 1 - id);
-}
-
-uint32_t WebSnapshotSerializer::GetClassId(JSFunction function) {
- int id;
- bool return_value = class_ids_.Lookup(function, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(class_ids_.size() - 1 - id);
-}
-
-uint32_t WebSnapshotSerializer::GetContextId(Context context) {
- int id;
- bool return_value = context_ids_.Lookup(context, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(id);
-}
-
-uint32_t WebSnapshotSerializer::GetArrayId(JSArray array) {
- int id;
- bool return_value = array_ids_.Lookup(array, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(array_ids_.size() - 1 - id);
-}
-
-uint32_t WebSnapshotSerializer::GetTypedArrayId(JSTypedArray typed_array) {
- int id;
- bool return_value = typed_array_ids_.Lookup(typed_array, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(typed_array_ids_.size() - 1 - id);
-}
-
-uint32_t WebSnapshotSerializer::GetDataViewId(JSDataView data_view) {
- int id;
- bool return_value = data_view_ids_.Lookup(data_view, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(data_view_ids_.size() - 1 - id);
-}
-
-uint32_t WebSnapshotSerializer::GetArrayBufferId(JSArrayBuffer array_buffer) {
- int id;
- bool return_value = array_buffer_ids_.Lookup(array_buffer, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(array_buffer_ids_.size() - 1 - id);
-}
-
-uint32_t WebSnapshotSerializer::GetObjectId(JSObject object) {
- int id;
- bool return_value = object_ids_.Lookup(object, &id);
- DCHECK(return_value);
- USE(return_value);
- return static_cast<uint32_t>(object_ids_.size() - 1 - id);
-}
-
-bool WebSnapshotSerializer::GetExternalId(HeapObject object, uint32_t* id) {
- int id_int;
- bool return_value = external_object_ids_.Lookup(object, &id_int);
- if (id != nullptr) {
- *id = static_cast<uint32_t>(id_int);
- }
- return return_value;
-}
-
-bool WebSnapshotSerializer::GetBuiltinObjectNameIndex(HeapObject object,
- uint32_t& index) {
- int index_int = 0;
- bool return_value = builtin_object_to_name_.Lookup(object, &index_int);
- index = static_cast<uint32_t>(index_int);
- return return_value;
-}
-
-bool WebSnapshotSerializer::GetBuiltinObjectId(HeapObject object,
- uint32_t& id) {
- int id_int;
- bool return_value = builtin_object_ids_.Lookup(object, &id_int);
- id = static_cast<uint32_t>(id_int);
- return return_value;
-}
-
-Handle<FixedArray> WebSnapshotSerializer::GetExternals() {
- return external_object_ids_.Values(isolate_);
-}
-
-WebSnapshotDeserializer::WebSnapshotDeserializer(v8::Isolate* isolate,
- const uint8_t* data,
- size_t buffer_size)
- : WebSnapshotDeserializer(reinterpret_cast<i::Isolate*>(isolate),
- Handle<Object>(), {data, buffer_size}) {}
-
-WebSnapshotDeserializer::WebSnapshotDeserializer(
- Isolate* isolate, Handle<Script> snapshot_as_script)
- : WebSnapshotSerializerDeserializer(isolate),
- script_name_(handle(snapshot_as_script->name(), isolate_)),
- roots_(isolate) {
- auto [data, buffer_size, buffer_owned] =
- ExtractScriptBuffer(isolate, snapshot_as_script);
- deserializer_.reset(new ValueDeserializer(isolate_, data, buffer_size));
- if (buffer_owned) {
- owned_data_.reset(data);
- }
-}
-
-WebSnapshotDeserializer::WebSnapshotDeserializer(
- Isolate* isolate, Handle<Object> script_name,
- base::Vector<const uint8_t> buffer)
- : WebSnapshotSerializerDeserializer(isolate),
- script_name_(script_name),
- deserializer_(
- new ValueDeserializer(isolate_, buffer.data(), buffer.length())),
- roots_(isolate) {
- Handle<FixedArray> empty_array = factory()->empty_fixed_array();
- strings_handle_ = empty_array;
- symbols_handle_ = empty_array;
- bigints_handle_ = empty_array;
- builtin_objects_handle_ = empty_array;
- maps_handle_ = empty_array;
- contexts_handle_ = empty_array;
- functions_handle_ = empty_array;
- classes_handle_ = empty_array;
- arrays_handle_ = empty_array;
- array_buffers_handle_ = empty_array;
- typed_arrays_handle_ = empty_array;
- data_views_handle_ = empty_array;
- objects_handle_ = empty_array;
- external_references_handle_ = empty_array;
- isolate_->heap()->AddGCEpilogueCallback(UpdatePointersCallback,
- v8::kGCTypeAll, this);
-}
-
-WebSnapshotDeserializer::~WebSnapshotDeserializer() {
- isolate_->heap()->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
-}
-
-void WebSnapshotDeserializer::UpdatePointers() {
- strings_ = *strings_handle_;
- symbols_ = *symbols_handle_;
- bigints_ = *bigints_handle_;
- builtin_objects_ = *builtin_objects_handle_;
- maps_ = *maps_handle_;
- contexts_ = *contexts_handle_;
- functions_ = *functions_handle_;
- classes_ = *classes_handle_;
- arrays_ = *arrays_handle_;
- array_buffers_ = *array_buffers_handle_;
- typed_arrays_ = *typed_arrays_handle_;
- data_views_ = *data_views_handle_;
- objects_ = *objects_handle_;
- external_references_ = *external_references_handle_;
-}
-
-// static
-std::tuple<const uint8_t*, uint32_t, bool>
-WebSnapshotDeserializer::ExtractScriptBuffer(
- Isolate* isolate, Handle<Script> snapshot_as_script) {
- Handle<String> source =
- handle(String::cast(snapshot_as_script->source()), isolate);
- if (source->IsExternalOneByteString()) {
- const v8::String::ExternalOneByteStringResource* resource =
- ExternalOneByteString::cast(*source).resource();
- return std::make_tuple(reinterpret_cast<const uint8_t*>(resource->data()),
- resource->length(), false);
- } else if (source->IsSeqOneByteString()) {
- SeqOneByteString source_as_seq = SeqOneByteString::cast(*source);
- size_t length = source_as_seq.length();
- std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
- {
- DisallowGarbageCollection no_gc;
- uint8_t* data = source_as_seq.GetChars(no_gc);
- memcpy(data_copy.get(), data, length);
- }
- return std::make_tuple(data_copy.release(), length, true);
- } else if (source->IsExternalTwoByteString()) {
- // TODO(v8:11525): Implement end-to-end snapshot processing which gets rid
- // of the need to copy the data here.
- const v8::String::ExternalStringResource* resource =
- ExternalTwoByteString::cast(*source).resource();
- size_t length = resource->length();
- std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
- {
- DisallowGarbageCollection no_gc;
- const uint16_t* data = resource->data();
- uint8_t* data_copy_ptr = data_copy.get();
- for (size_t i = 0; i < length; ++i) {
- data_copy_ptr[i] = static_cast<uint8_t>(data[i]);
- }
- }
- return std::make_tuple(data_copy.release(), length, true);
- } else if (source->IsSeqTwoByteString()) {
- SeqTwoByteString source_as_seq = SeqTwoByteString::cast(*source);
- size_t length = source_as_seq.length();
- std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
- {
- DisallowGarbageCollection no_gc;
- uint16_t* data = source_as_seq.GetChars(no_gc);
- uint8_t* data_copy_ptr = data_copy.get();
- for (size_t i = 0; i < length; ++i) {
- data_copy_ptr[i] = static_cast<uint8_t>(data[i]);
- }
- }
- return std::make_tuple(data_copy.release(), length, true);
- }
- UNREACHABLE();
-}
-
-void WebSnapshotDeserializer::Throw(const char* message) {
- string_count_ = 0;
- symbol_count_ = 0;
- bigint_count_ = 0;
- map_count_ = 0;
- builtin_object_count_ = 0;
- context_count_ = 0;
- class_count_ = 0;
- function_count_ = 0;
- object_count_ = 0;
- deferred_references_->SetLength(0);
-
- // Make sure we don't read any more data
- deserializer_->position_ = deserializer_->end_;
-
- WebSnapshotSerializerDeserializer::Throw(message);
-}
-
-bool WebSnapshotDeserializer::Deserialize(
- MaybeHandle<FixedArray> external_references, bool skip_exports) {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize);
- if (external_references.ToHandle(&external_references_handle_)) {
- external_references_ = *external_references_handle_;
- } else {
- external_references_handle_ = roots_.empty_fixed_array_handle();
- }
-
- if (deserialized_) {
- Throw("Can't reuse WebSnapshotDeserializer");
- return false;
- }
- deserialized_ = true;
- auto buffer_size = deserializer_->end_ - deserializer_->position_;
-
- base::ElapsedTimer timer;
- if (v8_flags.trace_web_snapshot) {
- timer.Start();
- }
- if (!DeserializeSnapshot(skip_exports)) {
- return false;
- }
- if (!DeserializeScript()) {
- return false;
- }
-
- if (v8_flags.trace_web_snapshot) {
- double ms = timer.Elapsed().InMillisecondsF();
- PrintF("[Deserializing snapshot (%zu bytes) took %0.3f ms]\n", buffer_size,
- ms);
- }
- return true;
-}
-
-#ifdef VERIFY_HEAP
-void WebSnapshotDeserializer::VerifyObjects() {
- for (int i = 0; i < strings_.length(); i++) {
- String::cast(strings_.get(i)).StringVerify(isolate_);
- }
- for (int i = 0; i < symbols_.length(); i++) {
- Symbol::cast(symbols_.get(i)).SymbolVerify(isolate_);
- }
- for (int i = 0; i < builtin_objects_.length(); i++) {
- builtin_objects_.get(i).ObjectVerify(isolate_);
- }
- for (int i = 0; i < maps_.length(); i++) {
- Map::cast(maps_.get(i)).MapVerify(isolate_);
- }
- for (int i = 0; i < contexts_.length(); i++) {
- Context::cast(contexts_.get(i)).ContextVerify(isolate_);
- }
- for (int i = 0; i < functions_.length(); i++) {
- JSFunction::cast(functions_.get(i)).JSFunctionVerify(isolate_);
- }
- for (int i = 0; i < arrays_.length(); i++) {
- JSArray::cast(arrays_.get(i)).JSArrayVerify(isolate_);
- }
- for (int i = 0; i < array_buffers_.length(); i++) {
- JSArrayBuffer::cast(array_buffers_.get(i)).JSArrayBufferVerify(isolate_);
- }
- for (int i = 0; i < typed_arrays_.length(); i++) {
- JSTypedArray::cast(typed_arrays_.get(i)).JSTypedArrayVerify(isolate_);
- }
- for (int i = 0; i < data_views_.length(); i++) {
- JSDataView::cast(data_views_.get(i)).JSDataViewVerify(isolate_);
- }
- for (int i = 0; i < objects_.length(); i++) {
- JSObject::cast(objects_.get(i)).JSObjectVerify(isolate_);
- }
- for (int i = 0; i < classes_.length(); i++) {
- JSFunction::cast(classes_.get(i)).JSFunctionVerify(isolate_);
- }
-}
-#endif
-
-bool WebSnapshotDeserializer::DeserializeSnapshot(bool skip_exports) {
- CollectBuiltinObjects();
-
- deferred_references_ = ArrayList::New(isolate_, 30);
-
- const void* magic_bytes;
- if (!deserializer_->ReadRawBytes(sizeof(kMagicNumber), &magic_bytes) ||
- memcmp(magic_bytes, kMagicNumber, sizeof(kMagicNumber)) != 0) {
- Throw("Invalid magic number");
- return false;
- }
-
- DeserializeStrings();
- DeserializeSymbols();
- DeserializeBigInts();
- DeserializeBuiltinObjects();
- DeserializeMaps();
- DeserializeContexts();
- DeserializeFunctions();
- DeserializeArrays();
- DeserializeArrayBuffers();
- DeserializeTypedArrays();
- DeserializeDataViews();
- DeserializeObjects();
- DeserializeClasses();
- ProcessDeferredReferences();
- DeserializeExports(skip_exports);
- DCHECK_EQ(0, deferred_references_->Length());
-
-#ifdef VERIFY_HEAP
- // Verify the objects we produced during deserializing snapshot.
- if (v8_flags.verify_heap && !has_error()) {
- VerifyObjects();
- }
-#endif
-
- return !has_error();
-}
-
-void WebSnapshotDeserializer::CollectBuiltinObjects() {
- // TODO(v8:11525): Look up the builtin objects from the global object.
- builtin_object_name_to_object_ =
- ObjectHashTable::New(isolate_, kBuiltinObjectCount);
-#if DEBUG
- int i = 0;
-#endif
- IterateBuiltinObjects([&](Handle<String> name, Handle<HeapObject> object) {
- auto new_builtin_object_name_to_object =
- ObjectHashTable::Put(builtin_object_name_to_object_, name, object);
- USE(new_builtin_object_name_to_object);
- // We preallocated the correct size, so the hash table doesn't grow.
- DCHECK_EQ(*new_builtin_object_name_to_object,
- *builtin_object_name_to_object_);
-#if DEBUG
- ++i;
-#endif
- });
- DCHECK_EQ(i, kBuiltinObjectCount);
-}
-
-bool WebSnapshotDeserializer::DeserializeScript() {
- // If there is more data, treat it as normal JavaScript.
- DCHECK_LE(deserializer_->position_, deserializer_->end_);
- auto remaining_bytes = deserializer_->end_ - deserializer_->position_;
- if (remaining_bytes > 0 && remaining_bytes < v8::String::kMaxLength) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- v8::Local<v8::String> source =
- v8::String::NewFromUtf8(
- v8_isolate, reinterpret_cast<const char*>(deserializer_->position_),
- NewStringType::kNormal, static_cast<int>(remaining_bytes))
- .ToLocalChecked();
-
- ScriptOrigin origin(v8_isolate, Utils::ToLocal(script_name_));
-
- ScriptCompiler::Source script_source(source, origin);
- Local<UnboundScript> script;
- if (!ScriptCompiler::CompileUnboundScript(v8_isolate, &script_source)
- .ToLocal(&script)) {
- // The exception has already been reported.
- DCHECK(!isolate_->has_pending_exception());
- return false;
- }
- Local<Value> result;
- if (!script->BindToCurrentContext()
- ->Run(v8_isolate->GetCurrentContext())
- .ToLocal(&result)) {
- // The exception has already been reported.
- DCHECK(!isolate_->has_pending_exception());
- return false;
- }
- }
-
- return !has_error();
-}
-
-void WebSnapshotDeserializer::DeserializeStrings() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Strings);
- if (!ReadCount(string_count_)) {
- Throw("Malformed string table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- strings_handle_ = factory()->NewFixedArray(string_count_);
- strings_ = *strings_handle_;
- for (uint32_t i = 0; i < string_count_; ++i) {
- MaybeHandle<String> maybe_string =
- deserializer_->ReadUtf8String(AllocationType::kOld);
- Handle<String> string;
- if (!maybe_string.ToHandle(&string)) {
- Throw("Malformed string");
- return;
- }
- strings_.set(i, *string);
- }
-}
-
-String WebSnapshotDeserializer::ReadString(
- InternalizeStrings internalize_strings) {
- DCHECK(!strings_handle_->is_null());
- uint32_t string_id;
- if (!deserializer_->ReadUint32(&string_id) || string_id >= string_count_) {
- Throw("malformed string id\n");
- return roots_.empty_string();
- }
- String string = String::cast(strings_.get(string_id));
- if (internalize_strings == InternalizeStrings::kYes &&
- !string.IsInternalizedString(isolate_)) {
- string = *factory()->InternalizeString(handle(string, isolate_));
- strings_.set(string_id, string);
- }
- return string;
-}
-
-String WebSnapshotDeserializer::ReadInPlaceString(
- InternalizeStrings internalize_strings) {
- MaybeHandle<String> maybe_string =
- deserializer_->ReadUtf8String(AllocationType::kOld);
- Handle<String> string;
- if (!maybe_string.ToHandle(&string)) {
- Throw("Malformed string");
- return roots_.empty_string();
- }
- if (internalize_strings == InternalizeStrings::kYes) {
- string = factory()->InternalizeString(string);
- }
- return *string;
-}
-
-Object WebSnapshotDeserializer::ReadSymbol() {
- DCHECK(!symbols_handle_->is_null());
- uint32_t symbol_id;
- if (!deserializer_->ReadUint32(&symbol_id) || symbol_id >= symbol_count_) {
- Throw("malformed symbol id\n");
- return roots_.undefined_value();
- }
- return symbols_.get(symbol_id);
-}
-
-Object WebSnapshotDeserializer::ReadBigInt() {
- DCHECK(!bigints_handle_->is_null());
- uint32_t bigint_id;
- if (!deserializer_->ReadUint32(&bigint_id) || bigint_id >= bigint_count_) {
- Throw("malformed bigint id\n");
- return roots_.undefined_value();
- }
- return bigints_.get(bigint_id);
-}
-
-void WebSnapshotDeserializer::DeserializeBigInts() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_BigInts);
- if (!ReadCount(bigint_count_)) {
- Throw("Malformed bigint table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- bigints_handle_ = factory()->NewFixedArray(bigint_count_);
- bigints_ = *bigints_handle_;
- for (uint32_t i = 0; i < bigint_count_; ++i) {
- uint32_t flags;
- if (!deserializer_->ReadUint32(&flags)) {
- Throw("malformed bigint flag");
- return;
- }
- int byte_length = BigIntLengthBitField::decode(flags);
- base::Vector<const uint8_t> digits_storage;
- if (!deserializer_->ReadRawBytes(byte_length).To(&digits_storage)) {
- Throw("malformed bigint");
- return;
- }
- Handle<BigInt> bigint;
- // BigIntFlags are browser independent, so we explicity convert BigIntFlags
- // to BigInt bitfield here though they are same now.
- if (!BigInt::FromSerializedDigits(isolate_, BigIntFlagsToBitField(flags),
- digits_storage)
- .ToHandle(&bigint)) {
- Throw("malformed bigint");
- return;
- }
- bigints_.set(i, *bigint);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeSymbols() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Symbols);
- if (!ReadCount(symbol_count_)) {
- Throw("Malformed symbol table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- symbols_handle_ = factory()->NewFixedArray(symbol_count_);
- symbols_ = *symbols_handle_;
- for (uint32_t i = 0; i < symbol_count_; ++i) {
- uint32_t symbol_type;
- if (!deserializer_->ReadUint32(&symbol_type) || symbol_type > 2) {
- Throw("malformed symbol\n");
- }
-
- Handle<Symbol> symbol;
- if (symbol_type == SymbolType::kNonGlobalNoDesription) {
- symbol = factory()->NewSymbol();
- } else { // Symbol with description
- uint32_t string_id;
- if (!deserializer_->ReadUint32(&string_id) ||
- string_id >= string_count_) {
- Throw("malformed string id\n");
- }
- if (symbol_type == SymbolType::kNonGlobal) {
- symbol = factory()->NewSymbol();
- symbol->set_description(String::cast(strings_.get(string_id)));
- } else {
- DCHECK_EQ(SymbolType::kGlobal, symbol_type);
- symbol = isolate_->SymbolFor(
- RootIndex::kPublicSymbolTable,
- handle(String::cast(strings_.get(string_id)), isolate_), false);
- }
- }
- symbols_.set(i, *symbol);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeMaps() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Maps);
- if (!ReadCount(map_count_)) {
- Throw("Malformed shape table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- maps_handle_ = factory()->NewFixedArray(map_count_);
- maps_ = *maps_handle_;
- for (uint32_t i = 0; i < map_count_; ++i) {
- bool has_custom_property_attributes = ReadMapType();
-
- uint32_t property_count;
- if (!deserializer_->ReadUint32(&property_count)) {
- Throw("Malformed shape");
- return;
- }
- // TODO(v8:11525): Consider passing the upper bound as a param and
- // systematically enforcing it on the ValueSerializer side.
- // TODO(v8:11525): Allow "objects with map" which need to be turned to
- // dictionary mode objects.
- // TODO(v8:11525): Create map trees.
- if (property_count > kMaxNumberOfDescriptors) {
- Throw("Malformed shape: too many properties");
- return;
- }
-
- if (property_count == 0) {
- Handle<Map> map = DeserializeObjectPrototypeAndCreateEmptyMap();
- maps_.set(i, *map);
- continue;
- }
-
- Handle<DescriptorArray> descriptors =
- factory()->NewDescriptorArray(property_count, 0);
- for (InternalIndex i : InternalIndex::Range(property_count)) {
- // No deferred references here, since strings and symbols have already
- // been deserialized.
- Object key = std::get<0>(
- ReadValue(Handle<HeapObject>(), 0, InternalizeStrings::kYes));
- DisallowGarbageCollection no_gc;
- if (!key.IsName()) {
- Throw("Invalid map key");
- return;
- }
- PropertyAttributes attributes = PropertyAttributes::NONE;
- if (has_custom_property_attributes) {
- uint8_t flags;
- if (!deserializer_->ReadByte(&flags)) {
- Throw("Malformed property attributes");
- return;
- }
- attributes = FlagsToAttributes(flags);
- }
- // Use the "none" representation until we see the first object having this
- // map. At that point, modify the representation.
- Descriptor desc =
- Descriptor::DataField(isolate_, handle(Name::cast(key), isolate_),
- i.as_int(), attributes, Representation::None());
- descriptors->Set(i, &desc);
- }
- DCHECK_EQ(descriptors->number_of_descriptors(), property_count);
- descriptors->Sort();
-
- Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize,
- HOLEY_ELEMENTS, 0);
- map->InitializeDescriptors(isolate_, *descriptors);
- // TODO(v8:11525): Set 'constructor'.
- DeserializeObjectPrototype(map);
- maps_.set(i, *map);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeBuiltinObjects() {
- RCS_SCOPE(isolate_,
- RuntimeCallCounterId::kWebSnapshotDeserialize_BuiltinObjects);
- if (!ReadCount(builtin_object_count_)) {
- Throw("Malformed builtin object table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- builtin_objects_handle_ = factory()->NewFixedArray(builtin_object_count_);
- builtin_objects_ = *builtin_objects_handle_;
- for (uint32_t i = 0; i < builtin_object_count_; ++i) {
- Handle<String> name = handle(ReadString(), isolate_);
- builtin_objects_.set(static_cast<int>(i),
- builtin_object_name_to_object_->Lookup(name));
- }
-}
-
-void WebSnapshotDeserializer::DeserializeContexts() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Contexts);
- if (!ReadCount(context_count_)) {
- Throw("Malformed context table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- contexts_handle_ = factory()->NewFixedArray(context_count_);
- contexts_ = *contexts_handle_;
- for (uint32_t i = 0; i < context_count_; ++i) {
- uint32_t context_type;
- if (!deserializer_->ReadUint32(&context_type)) {
- Throw("Malformed context type");
- return;
- }
-
- uint32_t parent_context_id;
- // Parent context is serialized before child context. Note: not >= on
- // purpose, we're going to subtract 1 later.
- if (!deserializer_->ReadUint32(&parent_context_id) ||
- parent_context_id > i) {
- Throw("Malformed context");
- return;
- }
-
- uint32_t variable_count;
- if (!deserializer_->ReadUint32(&variable_count)) {
- Throw("Malformed context");
- return;
- }
- const bool has_inlined_local_names =
- variable_count < kScopeInfoMaxInlinedLocalNamesSize;
- // TODO(v8:11525): Enforce upper limit for variable count.
- Handle<ScopeInfo> scope_info = CreateScopeInfo(
- variable_count, parent_context_id > 0,
- static_cast<ContextType>(context_type), has_inlined_local_names);
-
- Handle<Context> parent_context;
- if (parent_context_id > 0) {
- parent_context =
- handle(Context::cast(contexts_.get(parent_context_id - 1)), isolate_);
- scope_info->set_outer_scope_info(parent_context->scope_info());
- } else {
- parent_context = handle(isolate_->context(), isolate_);
- }
-
- const int local_names_container_size =
- has_inlined_local_names ? variable_count : 1;
- const int context_local_base = ScopeInfo::kVariablePartIndex;
- const int context_local_info_base =
- context_local_base + local_names_container_size;
-
- for (int variable_index = 0;
- variable_index < static_cast<int>(variable_count); ++variable_index) {
- {
- String name = ReadString(InternalizeStrings::kYes);
- if (has_inlined_local_names) {
- scope_info->set(context_local_base + variable_index, name);
- } else {
- Handle<NameToIndexHashTable> local_names_hashtable(
- scope_info->context_local_names_hashtable(), isolate_);
-
- Handle<NameToIndexHashTable> new_table =
- NameToIndexHashTable::Add(isolate_, local_names_hashtable,
- handle(name, isolate_), variable_index);
- // The hash table didn't grow, since it was preallocated to
- // be large enough in CreateScopeInfo.
- DCHECK_EQ(*new_table, *local_names_hashtable);
- USE(new_table);
- }
- }
- // TODO(v8:11525): Support variable modes etc.
- uint32_t info =
- ScopeInfo::VariableModeBits::encode(VariableMode::kLet) |
- ScopeInfo::InitFlagBit::encode(
- InitializationFlag::kNeedsInitialization) |
- ScopeInfo::MaybeAssignedFlagBit::encode(
- MaybeAssignedFlag::kMaybeAssigned) |
- ScopeInfo::ParameterNumberBits::encode(
- ScopeInfo::ParameterNumberBits::kMax) |
- ScopeInfo::IsStaticFlagBit::encode(IsStaticFlag::kNotStatic);
- scope_info->set(context_local_info_base + variable_index,
- Smi::FromInt(info));
- }
-
- // Allocate the FunctionContext after setting up the ScopeInfo to avoid
- // pointing to a ScopeInfo which is not set up yet.
- Handle<Context> context;
- switch (context_type) {
- case ContextType::FUNCTION:
- context = factory()->NewFunctionContext(parent_context, scope_info);
- break;
- case ContextType::BLOCK:
- context = factory()->NewBlockContext(parent_context, scope_info);
- break;
- default:
- Throw("Unsupported context type");
- return;
- }
- for (int variable_index = 0;
- variable_index < static_cast<int>(variable_count); ++variable_index) {
- int context_index = scope_info->ContextHeaderLength() + variable_index;
- Object value = std::get<0>(ReadValue(context, context_index));
- context->set(context_index, value);
- }
- contexts_.set(i, *context);
- }
-}
-
-Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
- uint32_t variable_count, bool has_parent, ContextType context_type,
- bool has_inlined_local_names) {
- // TODO(v8:11525): Decide how to handle language modes. (The code below sets
- // the language mode as strict.)
- // TODO(v8:11525): Support (context-allocating) receiver.
- // TODO(v8:11525): Support function variable & function name.
- // TODO(v8:11525): Support classes.
-
- ScopeType scope_type;
- int flags =
- ScopeInfo::SloppyEvalCanExtendVarsBit::encode(false) |
- ScopeInfo::LanguageModeBit::encode(LanguageMode::kStrict) |
- ScopeInfo::DeclarationScopeBit::encode(false) |
- ScopeInfo::ReceiverVariableBits::encode(VariableAllocationInfo::NONE) |
- ScopeInfo::ClassScopeHasPrivateBrandBit::encode(false) |
- ScopeInfo::HasSavedClassVariableBit::encode(false) |
- ScopeInfo::HasNewTargetBit::encode(false) |
- ScopeInfo::FunctionVariableBits::encode(VariableAllocationInfo::NONE) |
- ScopeInfo::HasInferredFunctionNameBit::encode(false) |
- ScopeInfo::IsAsmModuleBit::encode(false) |
- ScopeInfo::HasSimpleParametersBit::encode(false) |
- ScopeInfo::FunctionKindBits::encode(FunctionKind::kNormalFunction) |
- ScopeInfo::HasOuterScopeInfoBit::encode(has_parent) |
- ScopeInfo::IsDebugEvaluateScopeBit::encode(false) |
- ScopeInfo::ForceContextAllocationBit::encode(false) |
- ScopeInfo::PrivateNameLookupSkipsOuterClassBit::encode(false) |
- ScopeInfo::HasContextExtensionSlotBit::encode(false) |
- ScopeInfo::IsReplModeScopeBit::encode(false) |
- ScopeInfo::HasLocalsBlockListBit::encode(false);
- switch (context_type) {
- case ContextType::FUNCTION:
- scope_type = ScopeType::FUNCTION_SCOPE;
- flags |= ScopeInfo::DeclarationScopeBit::encode(true) |
- ScopeInfo::HasSimpleParametersBit::encode(true);
- break;
- case ContextType::BLOCK:
- scope_type = ScopeType::CLASS_SCOPE;
- flags |= ScopeInfo::ForceContextAllocationBit::encode(true);
- break;
- default:
- // Default to a CLASS_SCOPE, so that the rest of the code can be executed
- // without failures.
- scope_type = ScopeType::CLASS_SCOPE;
- Throw("Unsupported context type");
- }
- flags |= ScopeInfo::ScopeTypeBits::encode(scope_type);
- const int local_names_container_size =
- has_inlined_local_names ? variable_count : 1;
- const int length = ScopeInfo::kVariablePartIndex +
- (ScopeInfo::NeedsPositionInfo(scope_type)
- ? ScopeInfo::kPositionInfoEntries
- : 0) +
- (has_parent ? 1 : 0) + local_names_container_size +
- variable_count;
- Handle<NameToIndexHashTable> local_names_hashtable;
- if (!has_inlined_local_names) {
- local_names_hashtable = NameToIndexHashTable::New(isolate_, variable_count,
- AllocationType::kOld);
- }
- Handle<ScopeInfo> scope_info = factory()->NewScopeInfo(length);
- {
- DisallowGarbageCollection no_gc;
- ScopeInfo raw = *scope_info;
-
- raw.set_flags(flags);
- DCHECK(!raw.IsEmpty());
-
- raw.set_context_local_count(variable_count);
- // TODO(v8:11525): Support parameters.
- raw.set_parameter_count(0);
- if (raw.HasPositionInfo()) {
- raw.SetPositionInfo(0, 0);
- }
- if (!has_inlined_local_names) {
- raw.set_context_local_names_hashtable(*local_names_hashtable);
- }
- }
- return scope_info;
-}
-
-Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
- int shared_function_info_index, uint32_t start_position, uint32_t length,
- uint32_t parameter_count, uint8_t flags, uint32_t context_id) {
- // TODO(v8:11525): Deduplicate the SFIs for class methods.
- FunctionKind kind = FunctionFlagsToFunctionKind(flags);
- Handle<SharedFunctionInfo> shared = factory()->NewSharedFunctionInfo(
- factory()->empty_string(), MaybeHandle<Code>(), Builtin::kCompileLazy,
- kind);
- Handle<UncompiledData> uncompiled_data =
- factory()->NewUncompiledDataWithoutPreparseData(
- roots_.empty_string_handle(), start_position,
- start_position + length);
- {
- DisallowGarbageCollection no_gc;
- SharedFunctionInfo raw = *shared;
- if (IsConciseMethod(kind)) {
- raw.set_syntax_kind(FunctionSyntaxKind::kAccessorOrMethod);
- }
- raw.set_script(*script_);
- raw.set_function_literal_id(shared_function_info_index);
- raw.set_internal_formal_parameter_count(JSParameterCount(parameter_count));
- // TODO(v8:11525): Decide how to handle language modes.
- raw.set_language_mode(LanguageMode::kStrict);
- raw.set_uncompiled_data(*uncompiled_data);
- raw.set_allows_lazy_compilation(true);
- shared_function_infos_.Set(shared_function_info_index,
- HeapObjectReference::Weak(raw));
- }
- shared_function_info_table_ = ObjectHashTable::Put(
- shared_function_info_table_,
- handle(Smi::FromInt(start_position), isolate_),
- handle(Smi::FromInt(shared_function_info_index), isolate_));
-
- Handle<JSFunction> function =
- Factory::JSFunctionBuilder(isolate_, shared, isolate_->native_context())
- .Build();
- if (context_id > 0) {
- DCHECK_LT(context_id - 1, context_count_);
- // Guards raw pointer "context" below.
- DisallowHeapAllocation no_heap_access;
- Context context = Context::cast(contexts_.get(context_id - 1));
- function->set_context(context);
- shared->set_outer_scope_info(context.scope_info());
- }
- return function;
-}
-
-void WebSnapshotDeserializer::DeserializeFunctionProperties(
- Handle<JSFunction> function) {
- uint32_t map_id;
- if (!deserializer_->ReadUint32(&map_id) || map_id >= map_count_ + 1) {
- Throw("Malformed function");
- return;
- }
-
- if (map_id > 0) {
- map_id--; // Subtract 1 to get the real map_id.
- Handle<Map> map(Map::cast(maps_.get(map_id)), isolate_);
- int no_properties = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descriptors =
- handle(map->instance_descriptors(kRelaxedLoad), isolate_);
- Handle<PropertyArray> property_array =
- DeserializePropertyArray(descriptors, no_properties);
- // This function map was already deserialized completely and can be
- // directly used.
- auto iter = deserialized_function_maps_.find(map_id);
- if (iter != deserialized_function_maps_.end()) {
- function->set_map(*iter->second, kReleaseStore);
- function->set_raw_properties_or_hash(*property_array);
- } else {
- // TODO(v8:11525): In-object properties.
- Handle<Map> function_map = Map::Copy(
- isolate_, handle(function->map(), isolate_), "Web Snapshot");
- Map::EnsureDescriptorSlack(isolate_, function_map,
- descriptors->number_of_descriptors());
- {
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- Descriptor d = Descriptor::DataField(
- isolate_, handle(descriptors->GetKey(i), isolate_),
- descriptors->GetDetails(i).field_index(),
- descriptors->GetDetails(i).attributes(),
- descriptors->GetDetails(i).representation());
- function_map->instance_descriptors().Append(&d);
- if (d.GetKey()->IsInterestingSymbol()) {
- function_map->set_may_have_interesting_symbols(true);
- }
- }
- function_map->SetNumberOfOwnDescriptors(
- function_map->NumberOfOwnDescriptors() +
- descriptors->number_of_descriptors());
- function->set_map(*function_map, kReleaseStore);
- function->set_raw_properties_or_hash(*property_array);
- }
- deserialized_function_maps_.insert(std::make_pair(map_id, function_map));
- }
- }
-}
-
-void WebSnapshotDeserializer::DeserializeFunctions() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Functions);
- if (!ReadCount(function_count_)) {
- Throw("Malformed function table");
- return;
- }
- static_assert(kMaxItemCount + 1 <= FixedArray::kMaxLength);
- functions_handle_ = factory()->NewFixedArray(function_count_);
- functions_ = *functions_handle_;
-
- // Overallocate the array for SharedFunctionInfos; functions which we
- // deserialize soon will create more SharedFunctionInfos when called.
- shared_function_infos_handle_ = factory()->NewWeakFixedArray(
- WeakArrayList::CapacityForLength(function_count_ + 1),
- AllocationType::kOld);
- shared_function_infos_ = *shared_function_infos_handle_;
- shared_function_info_table_ = ObjectHashTable::New(isolate_, function_count_);
- script_ = factory()->NewScript(factory()->empty_string());
- {
- DisallowGarbageCollection no_gc;
- Script raw = *script_;
- raw.set_type(Script::TYPE_WEB_SNAPSHOT);
- raw.set_shared_function_infos(shared_function_infos_);
- raw.set_shared_function_info_table(*shared_function_info_table_);
- }
-
- for (; current_function_count_ < function_count_; ++current_function_count_) {
- uint32_t context_id;
- // Note: > (not >= on purpose, we will subtract 1).
- if (!deserializer_->ReadUint32(&context_id) ||
- context_id > context_count_) {
- Throw("Malformed function");
- return;
- }
- {
- String source = ReadString();
- DisallowGarbageCollection no_gc;
- if (current_function_count_ == 0) {
- script_->set_source(source);
- } else {
- // TODO(v8:11525): Support multiple source snippets.
- DCHECK_EQ(script_->source(), source);
- }
- }
-
- uint32_t start_position;
- uint32_t length;
- uint32_t parameter_count;
- uint8_t flags;
- if (!deserializer_->ReadUint32(&start_position) ||
- !deserializer_->ReadUint32(&length) ||
- !deserializer_->ReadUint32(&parameter_count) ||
- !deserializer_->ReadByte(&flags)) {
- Throw("Malformed function");
- return;
- }
-
- // Index 0 is reserved for top-level shared function info (which web
- // snapshot scripts don't have).
- Handle<JSFunction> function =
- CreateJSFunction(current_function_count_ + 1, start_position, length,
- parameter_count, flags, context_id);
- functions_.set(current_function_count_, *function);
-
- ReadFunctionPrototype(function);
- DeserializeFunctionProperties(function);
- DeserializeObjectPrototypeForFunction(function);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeClasses() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Classes);
- if (!ReadCount(class_count_)) {
- Throw("Malformed class table");
- return;
- }
- static_assert(kMaxItemCount + 1 <= FixedArray::kMaxLength);
- classes_handle_ = factory()->NewFixedArray(class_count_);
- classes_ = *classes_handle_;
-
- // Grow the array for SharedFunctionInfos.
- shared_function_infos_handle_ = WeakFixedArray::EnsureSpace(
- isolate_, shared_function_infos_handle_,
- WeakArrayList::CapacityForLength(function_count_ + 1 + class_count_));
- shared_function_infos_ = *shared_function_infos_handle_;
- script_->set_shared_function_infos(shared_function_infos_);
-
- for (; current_class_count_ < class_count_; ++current_class_count_) {
- uint32_t context_id;
- // Note: > (not >= on purpose, we will subtract 1).
- if (!deserializer_->ReadUint32(&context_id) ||
- context_id > context_count_) {
- Throw("Malformed class");
- return;
- }
-
- {
- String source = ReadString();
- if (current_function_count_ + current_class_count_ == 0) {
- script_->set_source(source);
- } else {
- // TODO(v8:11525): Support multiple source snippets.
- DCHECK_EQ(script_->source(), source);
- }
- }
-
- uint32_t start_position;
- uint32_t length;
- uint32_t parameter_count;
- uint8_t flags;
- if (!deserializer_->ReadUint32(&start_position) ||
- !deserializer_->ReadUint32(&length) ||
- !deserializer_->ReadUint32(&parameter_count) ||
- !deserializer_->ReadByte(&flags)) {
- Throw("Malformed class");
- return;
- }
-
- // Index 0 is reserved for top-level shared function info (which web
- // snapshot scripts don't have).
- Handle<JSFunction> function = CreateJSFunction(
- function_count_ + current_class_count_ + 1, start_position, length,
- parameter_count, flags, context_id);
-
- ReadFunctionPrototype(function);
- // TODO(v8:11525): Use serialized start_position and length to add
- // ClassPositions property to class.
- DeserializeFunctionProperties(function);
- DeserializeObjectPrototypeForFunction(function);
- classes_.set(current_class_count_, *function);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeObjectPrototype(Handle<Map> map) {
- auto result = ReadValue(map, 0, InternalizeStrings::kNo);
- Object prototype = std::get<0>(result);
- bool was_deferred = std::get<1>(result);
- if (!was_deferred) {
- SetPrototype(map, handle(prototype, isolate_));
- }
-}
-
-bool WebSnapshotDeserializer::IsInitialFunctionPrototype(Object prototype) {
- return prototype == isolate_->context().function_prototype() ||
- // Asyncfunction prototype.
- prototype == isolate_->context()
- .async_function_constructor()
- .instance_prototype() ||
- // GeneratorFunction prototype.
- prototype == JSFunction::cast(isolate_->context()
- .generator_function_map()
- .constructor_or_back_pointer())
- .instance_prototype() ||
- // AsyncGeneratorFunction prototype
- prototype == JSFunction::cast(isolate_->context()
- .async_generator_function_map()
- .constructor_or_back_pointer())
- .instance_prototype();
-}
-
-void WebSnapshotDeserializer::DeserializeObjectPrototypeForFunction(
- Handle<JSFunction> function) {
- Handle<Map> map(function->map(), isolate_);
- // If the function prototype is not the initial function prototype, then the
- // map must not be the canonical maps because we already copy the map when
- // deserializaing the map for the function. And so we don't need to copy the
- // map.
- // TODO(v8:11525): Ensure we create the same map tree as for non-websnapshot
- // functions + add a test.
- auto result = ReadValue(map, 0, InternalizeStrings::kNo);
- Object prototype = std::get<0>(result);
- bool was_deferred = std::get<1>(result);
- // If we got a deferred reference, the prototype cannot be a builtin; those
- // references aren't deferred.
- // TODO(v8:11525): if the object order is relaxed, it's possible to have a
- // deferred reference to Function.prototype, and we'll need to recognize and
- // handle that case.
- if (IsInitialFunctionPrototype(prototype)) {
- DCHECK(IsInitialFunctionPrototype(function->map().prototype()));
- return;
- }
- if (!was_deferred) {
- SetPrototype(map, handle(prototype, isolate_));
- }
-}
-
-Handle<Map>
-WebSnapshotDeserializer::DeserializeObjectPrototypeAndCreateEmptyMap() {
- Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize,
- HOLEY_ELEMENTS, 0);
- auto result = ReadValue(map, 0, InternalizeStrings::kNo);
- Object prototype = std::get<0>(result);
- bool was_deferred = std::get<1>(result);
- // If we got a deferred reference, the prototype cannot be a builtin; those
- // references aren't deferred.
- // TODO(v8:11525): if the object order is relaxed, it's possible to have a
- // deferred reference to Object.prototype, and we'll need to recognize and
- // handle that case.
- if (prototype == isolate_->context().initial_object_prototype()) {
- // TODO(v8:11525): Avoid map creation (above) in this case.
- // TODO(v8:11525): Should the __proto__ be a property of the map or a
- // property of the object? Investigate which solution is better for the
- // other JS engines.
- return handle(isolate_->native_context()->object_function().initial_map(),
- isolate_);
- }
- if (!was_deferred) {
- SetPrototype(map, handle(prototype, isolate_));
- }
- return map;
-}
-
-void WebSnapshotDeserializer::SetPrototype(Handle<Map> map,
- Handle<Object> prototype) {
- if (prototype->IsJSObject()) {
- HeapObject::cast(*prototype).map().set_is_prototype_map(true);
- Map::SetPrototype(isolate_, map, Handle<JSObject>::cast(prototype));
- } else if (prototype->IsNull(isolate_)) {
- map->set_prototype(HeapObject::cast(*prototype));
- } else {
- Throw("Invalid prototype");
- }
-}
-
-template <typename T>
-void WebSnapshotDeserializer::DeserializeObjectPropertiesWithDictionaryMap(
- T dict, uint32_t property_count, bool has_custom_property_attributes) {
- for (uint32_t i = 0; i < property_count; i++) {
- Handle<Object> key(std::get<0>(ReadValue(Handle<HeapObject>(), 0,
- InternalizeStrings::kYes)),
- isolate_);
- if (!key->IsName()) {
- Throw("Invalid map key");
- return;
- }
- Handle<Object> value(std::get<0>(ReadValue()), isolate_);
- PropertyAttributes attributes = PropertyAttributes::NONE;
- if (has_custom_property_attributes) {
- uint8_t flags;
- if (!deserializer_->ReadByte(&flags)) {
- Throw("Malformed property attributes");
- return;
- }
- attributes = FlagsToAttributes(flags);
- }
-
- PropertyDetails details(PropertyKind::kData, attributes,
- PropertyDetails::kConstIfDictConstnessTracking);
- auto new_dict =
- dict->Add(isolate_, dict, Handle<Name>::cast(key), value, details);
- // The dictionary didn't grow, since it was preallocated to be large enough
- // in DeserializeObjects.
- DCHECK_EQ(*new_dict, *dict);
- USE(new_dict);
- }
-}
-
-bool WebSnapshotDeserializer::ReadMapType() {
- uint32_t map_type;
- if (!deserializer_->ReadUint32(&map_type)) {
- Throw("Malformed shape");
- return false;
- }
-
- switch (map_type) {
- case PropertyAttributesType::DEFAULT:
- return false;
- case PropertyAttributesType::CUSTOM:
- return true;
- default:
- Throw("Unsupported map type");
- return false;
- }
-}
-
-Handle<PropertyArray> WebSnapshotDeserializer::DeserializePropertyArray(
- Handle<DescriptorArray> descriptors, int no_properties) {
- Handle<PropertyArray> property_array =
- factory()->NewPropertyArray(no_properties);
- for (int i = 0; i < no_properties; ++i) {
- Object value = std::get<0>(ReadValue(property_array, i));
- DisallowGarbageCollection no_gc;
- // Read the representation from the map.
- DescriptorArray raw_descriptors = *descriptors;
- PropertyDetails details = raw_descriptors.GetDetails(InternalIndex(i));
- CHECK_EQ(details.location(), PropertyLocation::kField);
- CHECK_EQ(PropertyKind::kData, details.kind());
- Representation r = details.representation();
- if (r.IsNone()) {
- // Switch over to wanted_representation.
- details = details.CopyWithRepresentation(Representation::Tagged());
- raw_descriptors.SetDetails(InternalIndex(i), details);
- } else if (!r.Equals(Representation::Tagged())) {
- // TODO(v8:11525): Support this case too.
- UNREACHABLE();
- }
- property_array->set(i, value);
- }
- return property_array;
-}
-
-void WebSnapshotDeserializer::DeserializeObjects() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Objects);
- if (!ReadCount(object_count_)) {
- Throw("Malformed objects table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- objects_handle_ = factory()->NewFixedArray(object_count_);
- objects_ = *objects_handle_;
- bool map_from_snapshot = false;
- for (; current_object_count_ < object_count_; ++current_object_count_) {
- uint32_t map_id;
- if (!deserializer_->ReadUint32(&map_id) || map_id >= map_count_ + 1) {
- Throw("Malformed object");
- return;
- }
- Handle<JSObject> object;
- if (map_id > 0) {
- map_id--; // Subtract 1 to get the real map_id.
- Map raw_map = Map::cast(maps_.get(map_id));
- map_from_snapshot = true;
- Handle<DescriptorArray> descriptors =
- handle(raw_map.instance_descriptors(kRelaxedLoad), isolate_);
- int no_properties = raw_map.NumberOfOwnDescriptors();
- // TODO(v8:11525): In-object properties.
- Handle<Map> map(raw_map, isolate_);
- Handle<PropertyArray> property_array =
- DeserializePropertyArray(descriptors, no_properties);
- object = factory()->NewJSObjectFromMap(map);
- object->set_raw_properties_or_hash(*property_array, kRelaxedStore);
- } else {
- Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize,
- HOLEY_ELEMENTS, 0);
- map->set_may_have_interesting_symbols(true);
- map->set_is_dictionary_map(true);
-
- bool has_custom_property_attributes = ReadMapType();
-
- uint32_t property_count;
- if (!deserializer_->ReadUint32(&property_count)) {
- Throw("Malformed object");
- return;
- }
- // TODO(v8:11525): Allow "non-map" objects which are small enough to have
- // a fast map.
- if (property_count <= kMaxNumberOfDescriptors) {
- Throw("Malformed object: too few properties for 'no map' object");
- return;
- }
-
- if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- Handle<SwissNameDictionary> swiss_dictionary =
- isolate_->factory()->NewSwissNameDictionary(property_count);
- DeserializeObjectPropertiesWithDictionaryMap(
- swiss_dictionary, property_count, has_custom_property_attributes);
- object = factory()->NewJSObjectFromMap(map);
- object->SetProperties(*swiss_dictionary);
- } else {
- Handle<NameDictionary> dictionary =
- isolate_->factory()->NewNameDictionary(property_count);
- DeserializeObjectPropertiesWithDictionaryMap(
- dictionary, property_count, has_custom_property_attributes);
- object = factory()->NewJSObjectFromMap(map);
- object->SetProperties(*dictionary);
- }
- DeserializeObjectPrototype(map);
- }
- DCHECK(!object->is_null());
-
- DeserializeObjectElements(object, map_from_snapshot);
- objects_.set(static_cast<int>(current_object_count_), *object);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeObjectElements(
- Handle<JSObject> object, bool map_from_snapshot) {
- auto [elements, elements_kind, length] = DeserializeElements();
- USE(length);
- // Ensure objects always get HOLEY_ELEMENTS or DICTIONARY_ELEMENTS: don't
- // change the elements kind if it's holey.
- DCHECK(object->HasHoleyElements());
- if (IsDictionaryElementsKind(elements_kind)) {
- DCHECK_GT(length, 0);
- Handle<Map> map(object->map(), isolate_);
- if (map_from_snapshot) {
- // Copy the map so that we don't end up modifying the maps coming from
- // the web snapshot, since they might get reused.
- // TODO(v8:11525): Is it reasonable to encode the elements kind to the
- // map? Investigate what other JS engines do.
- // TODO(v8:11525): Add a test where two objects share the map but have
- // different elements kinds.
- map = Map::Copy(isolate_, map, "Web Snapshot");
- object->set_map(*map, kReleaseStore);
- }
- map->set_elements_kind(elements_kind);
- }
- object->set_elements(*elements);
- DCHECK(object->HasHoleyElements() || object->HasDictionaryElements());
-}
-
-WebSnapshotDeserializer::ElementsType
-WebSnapshotDeserializer::ReadElementsType() {
- uint32_t elements_type;
- if (!deserializer_->ReadUint32(&elements_type)) {
- Throw("Malformed elements type");
- return ElementsType::kDense;
- }
- if (elements_type != ElementsType::kDense &&
- elements_type != ElementsType::kSparse) {
- Throw("Unknown elements type");
- return ElementsType::kDense;
- }
- return static_cast<ElementsType>(elements_type);
-}
-
-std::tuple<Handle<FixedArrayBase>, ElementsKind, uint32_t>
-WebSnapshotDeserializer::DeserializeElements() {
- uint32_t length;
- ElementsType elements_type = ReadElementsType();
- if (!deserializer_->ReadUint32(&length) || length > kMaxItemCount) {
- Throw("Malformed elements");
- return std::make_tuple(factory()->NewFixedArray(0), PACKED_SMI_ELEMENTS, 0);
- }
- if (elements_type == ElementsType::kDense) {
- // TODO(v8:11525): we need to convert the elements to dictionary mode if
- // there are too many elements for packed elements.
- return ReadDenseElements(length);
- } else {
- // TODO(v8:11525): we need to convert sparse elements to packed elements
- // (including double elements) if the elements fit into packed elements
- // kind.
- return ReadSparseElements(length);
- }
-}
-
-std::tuple<Handle<FixedArrayBase>, ElementsKind, uint32_t>
-WebSnapshotDeserializer::ReadDenseElements(uint32_t length) {
- Handle<FixedArray> elements = factory()->NewFixedArray(length);
- ElementsKind elements_kind = PACKED_SMI_ELEMENTS;
- bool has_hole = false;
- bool has_non_number = false;
- for (uint32_t i = 0; i < length; ++i) {
- Object value = std::get<0>(ReadValue(elements, i));
- DisallowGarbageCollection no_gc;
- if (!value.IsSmi()) {
- elements_kind = PACKED_ELEMENTS;
- }
- if (!value.IsNumber()) {
- has_non_number = true;
- }
- if (value.IsTheHole()) {
- has_hole = true;
- }
- elements->set(static_cast<int>(i), value);
- }
- if (has_hole) {
- elements_kind =
- elements_kind == PACKED_ELEMENTS ? HOLEY_ELEMENTS : HOLEY_SMI_ELEMENTS;
- }
- // If all elements are number and not all elements are smi, we could convert
- // the array to double array.
- if (!has_non_number && !IsSmiElementsKind(elements_kind)) {
- DCHECK(IsObjectElementsKind(elements_kind));
- ElementsKind new_elements_kind =
- has_hole ? HOLEY_DOUBLE_ELEMENTS : PACKED_DOUBLE_ELEMENTS;
- Handle<FixedArrayBase> new_elements =
- isolate_->factory()->NewFixedDoubleArray(length);
- ElementsAccessor* element_accessor =
- ElementsAccessor::ForKind(new_elements_kind);
- element_accessor->CopyElements(isolate_, elements, elements_kind,
- new_elements, length);
- return std::make_tuple(new_elements, new_elements_kind, length);
- }
- return std::make_tuple(elements, elements_kind, length);
-}
-
-std::tuple<Handle<FixedArrayBase>, ElementsKind, uint32_t>
-WebSnapshotDeserializer::ReadSparseElements(uint32_t length) {
- Handle<NumberDictionary> dict = NumberDictionary::New(isolate_, length);
- uint32_t max_element_index = 0;
- for (uint32_t i = 0; i < length; ++i) {
- uint32_t element_index;
- if (!deserializer_->ReadUint32(&element_index)) {
- Throw("Malformed element index in sparse elements");
- return std::make_tuple(dict, DICTIONARY_ELEMENTS, 0);
- }
- Object value = std::get<0>(ReadValue(dict, element_index));
- Handle<NumberDictionary> new_dict =
- dict->Set(isolate_, dict, element_index, handle(value, isolate_));
- // The number dictionary didn't grow, since it was preallocated to be
- // large enough before.
- DCHECK_EQ(*new_dict, *dict);
- USE(new_dict);
- if (element_index > max_element_index) {
- max_element_index = element_index;
- }
- }
- // Bypasses JSObject::RequireSlowElements which is fine when we're setting up
- // objects from the web snapshot.
- dict->UpdateMaxNumberKey(max_element_index, Handle<JSObject>());
- return std::make_tuple(dict, DICTIONARY_ELEMENTS, max_element_index + 1);
-}
-
-void WebSnapshotDeserializer::DeserializeArrays() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Arrays);
- if (!ReadCount(array_count_)) {
- Throw("Malformed array table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- arrays_handle_ = factory()->NewFixedArray(array_count_);
- arrays_ = *arrays_handle_;
- for (; current_array_count_ < array_count_; ++current_array_count_) {
- auto [elements, elements_kind, length] = DeserializeElements();
- Handle<JSArray> array;
-
- if (IsDictionaryElementsKind(elements_kind)) {
- array = isolate_->factory()->NewJSArray(0);
-
- Handle<Object> array_length =
- isolate_->factory()->NewNumberFromUint(length);
- Handle<Map> map =
- JSObject::GetElementsTransitionMap(array, DICTIONARY_ELEMENTS);
- array->set_length(*array_length);
- array->set_elements(*elements);
- array->set_map(*map, kReleaseStore);
- } else {
- array =
- factory()->NewJSArrayWithElements(elements, elements_kind, length);
- }
- DCHECK(!array->is_null());
- arrays_.set(static_cast<int>(current_array_count_), *array);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeArrayBuffers() {
- RCS_SCOPE(isolate_,
- RuntimeCallCounterId::kWebSnapshotDeserialize_ArrayBuffers);
- if (!ReadCount(array_buffer_count_)) {
- Throw("Malformed array buffer table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- array_buffers_handle_ = factory()->NewFixedArray(array_buffer_count_);
- array_buffers_ = *array_buffers_handle_;
- for (; current_array_buffer_count_ < array_buffer_count_;
- ++current_array_buffer_count_) {
- uint8_t flags;
- uint32_t byte_length;
- if (!deserializer_->ReadByte(&flags) ||
- !deserializer_->ReadUint32(&byte_length) ||
- byte_length > static_cast<size_t>(deserializer_->end_ -
- deserializer_->position_)) {
- Throw("Malformed array buffer");
- return;
- }
-
- uint32_t mask = DetachedBitField::kMask | SharedBitField::kMask |
- ResizableBitField::kMask;
- if ((flags | mask) != mask) {
- Throw("Malformed array buffer");
- return;
- }
- bool was_detached = DetachedBitField::decode(flags);
- CHECK_IMPLIES(was_detached, (byte_length == 0));
- SharedFlag shared = SharedBitField::decode(flags) ? SharedFlag::kShared
- : SharedFlag::kNotShared;
- CHECK_IMPLIES(was_detached, (shared == SharedFlag::kNotShared));
- ResizableFlag resizable = ResizableBitField::decode(flags)
- ? ResizableFlag::kResizable
- : ResizableFlag::kNotResizable;
- uint32_t max_byte_length = byte_length;
- if (resizable == ResizableFlag::kResizable) {
- if (!deserializer_->ReadUint32(&max_byte_length)) {
- Throw("Malformed array buffer");
- return;
- }
- CHECK_GE(max_byte_length, byte_length);
- }
-
- Handle<Map> map;
- if (shared == SharedFlag::kNotShared) {
- map = handle(
- isolate_->raw_native_context().array_buffer_fun().initial_map(),
- isolate_);
- } else {
- map = handle(isolate_->raw_native_context()
- .shared_array_buffer_fun()
- .initial_map(),
- isolate_);
- }
- Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, AllocationType::kYoung));
- array_buffer->Setup(shared, resizable, nullptr, isolate_);
-
- std::unique_ptr<BackingStore> backing_store;
- if (was_detached) {
- array_buffer->set_was_detached(true);
- } else {
- if (resizable == ResizableFlag::kNotResizable) {
- backing_store = BackingStore::Allocate(isolate_, byte_length, shared,
- InitializedFlag::kUninitialized);
- } else {
- size_t page_size, initial_pages, max_pages;
- if (JSArrayBuffer::GetResizableBackingStorePageConfiguration(
- isolate_, byte_length, max_byte_length, kThrowOnError,
- &page_size, &initial_pages, &max_pages)
- .IsNothing()) {
- Throw("Create array buffer failed");
- return;
- }
- backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
- isolate_, byte_length, max_byte_length, page_size, initial_pages,
- max_pages, WasmMemoryFlag::kNotWasm, shared);
- }
- if (!backing_store) {
- Throw("Create array buffer failed");
- return;
- }
- array_buffer->Attach(std::move(backing_store));
- }
-
- array_buffer->set_max_byte_length(max_byte_length);
-
- if (byte_length > 0) {
- memcpy(array_buffer->backing_store(), deserializer_->position_,
- byte_length);
- }
- deserializer_->position_ += byte_length;
- array_buffers_.set(static_cast<int>(current_array_buffer_count_),
- *array_buffer);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeDataViews() {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_DataViews);
- if (!ReadCount(data_view_count_)) {
- Throw("Malformed data view table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- data_views_handle_ = factory()->NewFixedArray(data_view_count_);
- data_views_ = *data_views_handle_;
- for (; current_data_view_count_ < data_view_count_;
- ++current_data_view_count_) {
- Handle<JSArrayBuffer> array_buffer(
- JSArrayBuffer::cast(std::get<0>(ReadValue())), isolate_);
- uint32_t byte_offset = 0;
- uint8_t flags = 0;
- if (!deserializer_->ReadByte(&flags) ||
- !deserializer_->ReadUint32(&byte_offset)) {
- Throw("Malformed data view");
- return;
- }
-
- Handle<Map> map(
- isolate_->raw_native_context().data_view_fun().initial_map(), isolate_);
- uint32_t mask = LengthTrackingBitField::kMask;
- if ((flags | mask) != mask) {
- Throw("Malformed data view");
- return;
- }
-
- uint32_t byte_length = 0;
- bool is_length_tracking = LengthTrackingBitField::decode(flags);
-
- if (is_length_tracking) {
- CHECK(array_buffer->is_resizable_by_js());
- } else {
- if (!deserializer_->ReadUint32(&byte_length)) {
- Throw("Malformed data view");
- return;
- }
- }
-
- Handle<JSDataView> data_view =
- Handle<JSDataView>::cast(factory()->NewJSArrayBufferView(
- map, factory()->empty_fixed_array(), array_buffer, byte_offset,
- byte_length));
-
- {
- DisallowGarbageCollection no_gc;
- JSDataView raw_data_view = *data_view;
- JSArrayBuffer raw_array_buffer = *array_buffer;
- raw_data_view.set_data_pointer(
- isolate_, static_cast<uint8_t*>(raw_array_buffer.backing_store()) +
- byte_offset);
- raw_data_view.set_is_length_tracking(is_length_tracking);
- raw_data_view.set_is_backed_by_rab(!raw_array_buffer.is_shared() &&
- raw_array_buffer.is_resizable_by_js());
- }
-
- data_views_.set(static_cast<int>(current_data_view_count_), *data_view);
- }
-}
-
-bool WebSnapshotDeserializer::ReadCount(uint32_t& count) {
- return deserializer_->ReadUint32(&count) && count <= kMaxItemCount;
-}
-
-void WebSnapshotDeserializer::DeserializeTypedArrays() {
- RCS_SCOPE(isolate_,
- RuntimeCallCounterId::kWebSnapshotDeserialize_TypedArrays);
- if (!ReadCount(typed_array_count_)) {
- Throw("Malformed typed array table");
- return;
- }
- static_assert(kMaxItemCount <= FixedArray::kMaxLength);
- typed_arrays_handle_ = factory()->NewFixedArray(typed_array_count_);
- typed_arrays_ = *typed_arrays_handle_;
- for (; current_typed_array_count_ < typed_array_count_;
- ++current_typed_array_count_) {
- uint32_t typed_array_type;
- if (!deserializer_->ReadUint32(&typed_array_type)) {
- Throw("Malformed array buffer");
- return;
- }
- Handle<JSArrayBuffer> array_buffer(
- JSArrayBuffer::cast(std::get<0>(ReadValue())), isolate_);
- uint32_t byte_offset = 0;
- uint8_t flags = 0;
- if (!deserializer_->ReadByte(&flags) ||
- !deserializer_->ReadUint32(&byte_offset)) {
- Throw("Malformed typed array");
- return;
- }
- size_t element_size = 0;
- ElementsKind element_kind = UINT8_ELEMENTS;
- JSTypedArray::ForFixedTypedArray(
- TypedArrayTypeToExternalArrayType(
- static_cast<TypedArrayType>(typed_array_type)),
- &element_size, &element_kind);
-
- Handle<Map> map(
- isolate_->raw_native_context().TypedArrayElementsKindToCtorMap(
- element_kind),
- isolate_);
- uint32_t mask = LengthTrackingBitField::kMask;
- if ((flags | mask) != mask) {
- Throw("Malformed typed array");
- return;
- }
-
- if (byte_offset % element_size != 0) {
- Throw("Malformed typed array");
- return;
- }
-
- uint32_t byte_length = 0;
- size_t length = 0;
- bool is_length_tracking = LengthTrackingBitField::decode(flags);
-
- if (is_length_tracking) {
- CHECK(array_buffer->is_resizable_by_js());
- } else {
- if (!deserializer_->ReadUint32(&byte_length)) {
- Throw("Malformed typed array");
- return;
- }
- if (byte_length % element_size != 0) {
- Throw("Malformed typed array");
- return;
- }
- length = byte_length / element_size;
- if (length > JSTypedArray::kMaxLength) {
- Throw("Too large typed array");
- return;
- }
- }
-
- bool rabGsab = array_buffer->is_resizable_by_js() &&
- (!array_buffer->is_shared() || is_length_tracking);
- if (rabGsab) {
- map = handle(
- isolate_->raw_native_context().TypedArrayElementsKindToRabGsabCtorMap(
- element_kind),
- isolate_);
- }
-
- Handle<JSTypedArray> typed_array =
- Handle<JSTypedArray>::cast(factory()->NewJSArrayBufferView(
- map, factory()->empty_byte_array(), array_buffer, byte_offset,
- byte_length));
-
- {
- DisallowGarbageCollection no_gc;
- JSTypedArray raw = *typed_array;
- raw.set_length(length);
- raw.SetOffHeapDataPtr(isolate_, array_buffer->backing_store(),
- byte_offset);
- raw.set_is_length_tracking(is_length_tracking);
- raw.set_is_backed_by_rab(array_buffer->is_resizable_by_js() &&
- !array_buffer->is_shared());
- }
-
- typed_arrays_.set(static_cast<int>(current_typed_array_count_),
- *typed_array);
- }
-}
-
-void WebSnapshotDeserializer::DeserializeExports(bool skip_exports) {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Exports);
- uint32_t count;
- if (!ReadCount(count)) {
- Throw("Malformed export table");
- return;
- }
-
- if (skip_exports) {
- // In the skip_exports mode, we read the exports but don't do anything about
- // them. This is useful for stress testing; otherwise the GlobalDictionary
- // handling below dominates.
- for (uint32_t i = 0; i < count; ++i) {
- Handle<String> export_name(ReadString(InternalizeStrings::kYes),
- isolate_);
- // No deferred references should occur at this point, since all objects
- // have been deserialized.
- Object export_value = std::get<0>(ReadValue());
-#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap) {
- export_value.ObjectVerify(isolate_);
- }
-#endif
- USE(export_name);
- USE(export_value);
- }
- return;
- }
-
- // Pre-reserve the space for the properties we're going to add to the global
- // object.
- Handle<JSGlobalObject> global = isolate_->global_object();
- Handle<GlobalDictionary> dictionary(
- global->global_dictionary(isolate_, kAcquireLoad), isolate_);
-
- dictionary = GlobalDictionary::EnsureCapacity(
- isolate_, dictionary, dictionary->NumberOfElements() + count,
- AllocationType::kYoung);
- bool has_exported_values = false;
-
- // TODO(v8:11525): The code below skips checks, in particular
- // LookupIterator::UpdateProtectors and
- // LookupIterator::ExtendingNonExtensible.
- InternalIndex entry = InternalIndex::NotFound();
- for (uint32_t i = 0; i < count; ++i) {
- Handle<String> export_name(ReadString(InternalizeStrings::kYes), isolate_);
- // No deferred references should occur at this point, since all objects have
- // been deserialized.
- Object export_value = std::get<0>(ReadValue());
-#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap) {
- export_value.ObjectVerify(isolate_);
- }
-#endif
-
- if (export_name->length() == 0 && i == 0) {
- // Hack: treat the first empty-string-named export value as a return value
- // from the deserializer.
- CHECK_EQ(i, 0);
- return_value_ = handle(export_value, isolate_);
- continue;
- }
-
- DisallowGarbageCollection no_gc;
- // Check for the correctness of the snapshot (thus far) before producing
- // something observable. TODO(v8:11525): Strictly speaking, we should
- // produce observable effects only when we know that the whole snapshot is
- // correct.
- if (has_error()) return;
-
- PropertyDetails property_details =
- PropertyDetails(PropertyKind::kData, NONE,
- PropertyCell::InitialType(isolate_, export_value));
- Handle<Object> export_value_handle(export_value, isolate_);
- AllowGarbageCollection allow_gc;
- Handle<PropertyCell> transition_cell = factory()->NewPropertyCell(
- export_name, property_details, export_value_handle);
- dictionary =
- GlobalDictionary::Add(isolate_, dictionary, export_name,
- transition_cell, property_details, &entry);
- has_exported_values = true;
- }
-
- if (!has_exported_values) return;
-
- global->set_global_dictionary(*dictionary, kReleaseStore);
- JSObject::InvalidatePrototypeChains(global->map(isolate_));
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadValue(
- Handle<HeapObject> container, uint32_t container_index,
- InternalizeStrings internalize_strings) {
- uint8_t value_type;
- if (!deserializer_->ReadByte(&value_type)) {
- Throw("Malformed variable");
- // Return a placeholder "value" so that the "keep on trucking" error
- // handling won't fail.
- return std::make_tuple(Smi::zero(), false);
- }
- switch (value_type) {
- case ValueType::FALSE_CONSTANT:
- return std::make_tuple(roots_.false_value(), false);
- case ValueType::TRUE_CONSTANT:
- return std::make_tuple(roots_.true_value(), false);
- case ValueType::NULL_CONSTANT:
- return std::make_tuple(roots_.null_value(), false);
- case ValueType::UNDEFINED_CONSTANT:
- return std::make_tuple(roots_.undefined_value(), false);
- case ValueType::NO_ELEMENT_CONSTANT:
- return std::make_tuple(roots_.the_hole_value(), false);
- case ValueType::INTEGER:
- return std::make_tuple(ReadInteger(), false);
- case ValueType::DOUBLE:
- return std::make_tuple(ReadNumber(), false);
- case ValueType::STRING_ID:
- return std::make_tuple(ReadString(internalize_strings), false);
- case ValueType::ARRAY_ID:
- return ReadArray(container, container_index);
- case ValueType::OBJECT_ID:
- return ReadObject(container, container_index);
- case ValueType::FUNCTION_ID:
- return ReadFunction(container, container_index);
- case ValueType::CLASS_ID:
- return ReadClass(container, container_index);
- case ValueType::REGEXP:
- return std::make_tuple(ReadRegexp(), false);
- case ValueType::SYMBOL_ID:
- return std::make_tuple(ReadSymbol(), false);
- case ValueType::BIGINT_ID:
- return std::make_tuple(ReadBigInt(), false);
- case ValueType::EXTERNAL_ID:
- return std::make_tuple(ReadExternalReference(), false);
- case ValueType::BUILTIN_OBJECT_ID:
- return std::make_tuple(ReadBuiltinObjectReference(), false);
- case ValueType::IN_PLACE_STRING_ID:
- return std::make_tuple(ReadInPlaceString(internalize_strings), false);
- case ValueType::ARRAY_BUFFER_ID:
- return ReadArrayBuffer(container, container_index);
- case ValueType::TYPED_ARRAY_ID:
- return ReadTypedArray(container, container_index);
- case ValueType::DATA_VIEW_ID:
- return ReadDataView(container, container_index);
- default:
- // TODO(v8:11525): Handle other value types.
- Throw("Unsupported value type");
- return std::make_tuple(Smi::zero(), false);
- }
-}
-
-Object WebSnapshotDeserializer::ReadInteger() {
- Maybe<int32_t> number = deserializer_->ReadZigZag<int32_t>();
- if (number.IsNothing()) {
- Throw("Malformed integer");
- return Smi::zero();
- }
- return *factory()->NewNumberFromInt(number.FromJust());
-}
-
-Object WebSnapshotDeserializer::ReadNumber() {
- double number;
- if (!deserializer_->ReadDouble(&number)) {
- Throw("Malformed double");
- return Smi::zero();
- }
- return *factory()->NewNumber(number);
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadArray(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t array_id;
- if (!deserializer_->ReadUint32(&array_id) || array_id >= kMaxItemCount) {
- Throw("Malformed variable");
- return std::make_tuple(Smi::zero(), false);
- }
- if (array_id < current_array_count_) {
- return std::make_tuple(arrays_.get(array_id), false);
- }
- // The array hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, ARRAY_ID, array_id), true);
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadArrayBuffer(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t array_buffer_id;
- if (!deserializer_->ReadUint32(&array_buffer_id) ||
- array_buffer_id >= kMaxItemCount) {
- Throw("Malformed variable");
- return std::make_tuple(Smi::zero(), false);
- }
- if (array_buffer_id < current_array_buffer_count_) {
- return std::make_tuple(array_buffers_.get(array_buffer_id), false);
- }
- // The array buffer hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, ARRAY_BUFFER_ID, array_buffer_id),
- true);
-}
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadTypedArray(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t typed_array_id;
- if (!deserializer_->ReadUint32(&typed_array_id) ||
- typed_array_id >= kMaxItemCount) {
- Throw("Malformed variable");
- return std::make_tuple(Smi::zero(), false);
- }
- if (typed_array_id < current_typed_array_count_) {
- return std::make_tuple(typed_arrays_.get(typed_array_id), false);
- }
- // The typed array hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, TYPED_ARRAY_ID, typed_array_id),
- true);
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadDataView(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t data_view_id;
- if (!deserializer_->ReadUint32(&data_view_id) ||
- data_view_id >= kMaxItemCount) {
- Throw("Malformed variable");
- return std::make_tuple(Smi::zero(), false);
- }
- if (data_view_id < current_data_view_count_) {
- return std::make_tuple(data_views_.get(data_view_id), false);
- }
- // The data view hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, DATA_VIEW_ID, data_view_id), true);
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadObject(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t object_id;
- if (!deserializer_->ReadUint32(&object_id) || object_id > kMaxItemCount) {
- Throw("Malformed variable");
- return std::make_tuple(Smi::zero(), false);
- }
- if (object_id < current_object_count_) {
- return std::make_tuple(objects_.get(object_id), false);
- }
- // The object hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, OBJECT_ID, object_id), true);
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadFunction(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t function_id;
- if (!deserializer_->ReadUint32(&function_id)) {
- Throw("Malformed object property");
- return std::make_tuple(Smi::zero(), false);
- }
- if (function_id < current_function_count_) {
- return std::make_tuple(functions_.get(function_id), false);
- }
- // The function hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, FUNCTION_ID, function_id), true);
-}
-
-std::tuple<Object, bool> WebSnapshotDeserializer::ReadClass(
- Handle<HeapObject> container, uint32_t index) {
- uint32_t class_id;
- if (!deserializer_->ReadUint32(&class_id) || class_id >= kMaxItemCount) {
- Throw("Malformed object property");
- return std::make_tuple(Smi::zero(), false);
- }
- if (class_id < current_class_count_) {
- return std::make_tuple(classes_.get(class_id), false);
- }
- // The class hasn't been deserialized yet.
- return std::make_tuple(
- AddDeferredReference(container, index, CLASS_ID, class_id), true);
-}
-
-Object WebSnapshotDeserializer::ReadRegexp() {
- Handle<String> pattern(ReadString(), isolate_);
- Handle<String> flags_string(ReadString(), isolate_);
- base::Optional<JSRegExp::Flags> flags =
- JSRegExp::FlagsFromString(isolate_, flags_string);
- if (!flags.has_value()) {
- Throw("Malformed flags in regular expression");
- return Smi::zero();
- }
- MaybeHandle<JSRegExp> maybe_regexp =
- JSRegExp::New(isolate_, pattern, flags.value());
- Handle<JSRegExp> regexp;
- if (!maybe_regexp.ToHandle(&regexp)) {
- Throw("Malformed RegExp");
- return Smi::zero();
- }
- return *regexp;
-}
-
-Object WebSnapshotDeserializer::ReadExternalReference() {
- uint32_t ref_id;
- if (!deserializer_->ReadUint32(&ref_id) ||
- ref_id >= static_cast<uint32_t>(external_references_.length())) {
- Throw("Invalid external reference");
- return Smi::zero();
- }
- return external_references_.get(ref_id);
-}
-
-Object WebSnapshotDeserializer::ReadBuiltinObjectReference() {
- uint32_t ref_id;
- if (!deserializer_->ReadUint32(&ref_id) ||
- ref_id >= static_cast<uint32_t>(builtin_objects_.length())) {
- Throw("Invalid builtin object reference");
- return Smi::zero();
- }
- return builtin_objects_.get(ref_id);
-}
-
-void WebSnapshotDeserializer::ReadFunctionPrototype(
- Handle<JSFunction> function) {
- uint32_t object_id;
-
- if (!deserializer_->ReadUint32(&object_id) || object_id > kMaxItemCount + 1) {
- Throw("Malformed class / function");
- return;
- }
- if (object_id == 0) {
- // No prototype.
- return;
- }
- --object_id;
- if (object_id < current_object_count_) {
- if (!SetFunctionPrototype(*function,
- JSReceiver::cast(objects_.get(object_id)))) {
- Throw("Can't reuse function prototype");
- return;
- }
- } else {
- // The object hasn't been deserialized yet.
- AddDeferredReference(function, 0, OBJECT_ID, object_id);
- }
-}
-
-bool WebSnapshotDeserializer::SetFunctionPrototype(JSFunction function,
- JSReceiver prototype) {
- DisallowGarbageCollection no_gc;
- // TODO(v8:11525): Enforce the invariant that no two prototypes share a map.
- Map map = prototype.map();
- map.set_is_prototype_map(true);
- if (!map.constructor_or_back_pointer().IsNullOrUndefined(isolate_)) {
- return false;
- }
- map.set_constructor_or_back_pointer(function);
- function.set_prototype_or_initial_map(prototype, kReleaseStore);
- return true;
-}
-
-HeapObject WebSnapshotDeserializer::AddDeferredReference(
- Handle<HeapObject> container, uint32_t index, ValueType target_type,
- uint32_t target_index) {
- if (container.is_null()) {
- const char* message = "Invalid reference";
- switch (target_type) {
- case ARRAY_ID:
- message = "Invalid array reference";
- break;
- case OBJECT_ID:
- message = "Invalid object reference";
- break;
- case CLASS_ID:
- message = "Invalid class reference";
- break;
- case FUNCTION_ID:
- message = "Invalid function reference";
- break;
- default:
- break;
- }
- Throw(message);
- return roots_.undefined_value();
- }
- DCHECK(container->IsPropertyArray() || container->IsContext() ||
- container->IsFixedArray() || container->IsJSFunction() ||
- container->IsMap());
- deferred_references_ = ArrayList::Add(
- isolate_, deferred_references_, container, Smi::FromInt(index),
- Smi::FromInt(target_type), Smi::FromInt(target_index));
- // Use HeapObject as placeholder since this might break elements kinds.
- return roots_.undefined_value();
-}
-
-void WebSnapshotDeserializer::ProcessDeferredReferences() {
- // Check for error now, since the FixedArrays below might not have been
- // created if there was an error.
- if (has_error()) return;
-
- DisallowGarbageCollection no_gc;
- ArrayList raw_deferred_references = *deferred_references_;
-
- // Deferred references is a list of (object, index, target type, target index)
- // tuples.
- for (int i = 0; i < raw_deferred_references.Length() - 3; i += 4) {
- HeapObject container = HeapObject::cast(raw_deferred_references.Get(i));
- int index = raw_deferred_references.Get(i + 1).ToSmi().value();
- ValueType target_type = static_cast<ValueType>(
- raw_deferred_references.Get(i + 2).ToSmi().value());
- int target_index = raw_deferred_references.Get(i + 3).ToSmi().value();
- Object target;
- switch (target_type) {
- case FUNCTION_ID:
- if (static_cast<uint32_t>(target_index) >= function_count_) {
- // Throw can allocate, but it's ok, since we're not using the raw
- // pointers after that.
- AllowGarbageCollection allow_gc;
- Throw("Invalid function reference");
- return;
- }
- target = functions_.get(target_index);
- break;
- case CLASS_ID:
- if (static_cast<uint32_t>(target_index) >= class_count_) {
- AllowGarbageCollection allow_gc;
- Throw("Invalid class reference");
- return;
- }
- target = classes_.get(target_index);
- break;
- case ARRAY_ID:
- if (static_cast<uint32_t>(target_index) >= array_count_) {
- AllowGarbageCollection allow_gc;
- Throw("Invalid array reference");
- return;
- }
- target = arrays_.get(target_index);
- break;
- case ARRAY_BUFFER_ID:
- if (static_cast<uint32_t>(target_index) >= array_buffer_count_) {
- AllowGarbageCollection allow_gc;
- Throw("Invalid array buffer reference");
- return;
- }
- target = array_buffers_.get(target_index);
- break;
- case TYPED_ARRAY_ID:
- if (static_cast<uint32_t>(target_index) >= typed_array_count_) {
- AllowGarbageCollection allow_gc;
- Throw("Invalid typed array reference");
- return;
- }
- target = typed_arrays_.get(target_index);
- break;
- case DATA_VIEW_ID:
- if (static_cast<uint32_t>(target_index) >= data_view_count_) {
- AllowGarbageCollection allow_gc;
- Throw("Invalid data view reference");
- return;
- }
- target = data_views_.get(target_index);
- break;
- case OBJECT_ID:
- if (static_cast<uint32_t>(target_index) >= object_count_) {
- AllowGarbageCollection allow_gc;
- Throw("Invalid object reference");
- return;
- }
- target = objects_.get(target_index);
- break;
- default:
- UNREACHABLE();
- }
- InstanceType instance_type = container.map().instance_type();
- if (InstanceTypeChecker::IsPropertyArray(instance_type)) {
- PropertyArray::cast(container).set(index, target);
- } else if (InstanceTypeChecker::IsContext(instance_type)) {
- Context::cast(container).set(index, target);
- } else if (InstanceTypeChecker::IsNumberDictionary(instance_type)) {
- // NumberDictionary::Set may need create HeapNumber for index.
- AllowGarbageCollection allow_gc;
- Handle<NumberDictionary> new_container = NumberDictionary::Set(
- isolate_, handle(NumberDictionary::cast(container), isolate_), index,
- handle(target, isolate_));
- // The number dictionary didn't grow, since it was preallocated to be
- // large enough in DeserializeArrays.
- DCHECK_EQ(*new_container, container);
- USE(new_container);
- // We also need to reload raw_deferred_references because
- // NumberDictionary::Set may allocate.
- raw_deferred_references = *deferred_references_;
- } else if (InstanceTypeChecker::IsFixedArray(instance_type)) {
- FixedArray::cast(container).set(index, target);
- } else if (InstanceTypeChecker::IsJSFunction(instance_type)) {
- // The only deferred reference allowed for a JSFunction is the function
- // prototype.
- DCHECK_EQ(index, 0);
- DCHECK(target.IsJSReceiver());
- if (!SetFunctionPrototype(JSFunction::cast(container),
- JSReceiver::cast(target))) {
- AllowGarbageCollection allow_gc;
- Throw("Can't reuse function prototype");
- return;
- }
- } else if (InstanceTypeChecker::IsMap(instance_type)) {
- // The only deferred reference allowed for a Map is the __proto__.
- DCHECK_EQ(index, 0);
- DCHECK(target.IsJSReceiver());
- AllowGarbageCollection allow_gc;
- SetPrototype(handle(Map::cast(container), isolate_),
- handle(target, isolate_));
- raw_deferred_references = *deferred_references_;
- } else {
- UNREACHABLE();
- }
- }
- deferred_references_->SetLength(0);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/web-snapshot/web-snapshot.h b/deps/v8/src/web-snapshot/web-snapshot.h
deleted file mode 100644
index 1de4efa46b..0000000000
--- a/deps/v8/src/web-snapshot/web-snapshot.h
+++ /dev/null
@@ -1,669 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
-#define V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
-
-#include <queue>
-
-#include "src/handles/handles.h"
-#include "src/objects/bigint.h"
-#include "src/objects/value-serializer.h"
-#include "src/snapshot/serializer.h" // For ObjectCacheIndexMap
-
-namespace v8 {
-
-class Context;
-class Isolate;
-
-template <typename T>
-class Local;
-
-namespace internal {
-
-class Context;
-class Map;
-class Object;
-class String;
-
-struct WebSnapshotData : public std::enable_shared_from_this<WebSnapshotData> {
- uint8_t* buffer = nullptr;
- size_t buffer_size = 0;
- WebSnapshotData() = default;
- WebSnapshotData(const WebSnapshotData&) = delete;
- WebSnapshotData& operator=(const WebSnapshotData&) = delete;
- ~WebSnapshotData() { free(buffer); }
-};
-
-class WebSnapshotSerializerDeserializer {
- public:
- inline bool has_error() const { return error_message_ != nullptr; }
- const char* error_message() const { return error_message_; }
-
- enum ValueType : uint8_t {
- FALSE_CONSTANT,
- TRUE_CONSTANT,
- NULL_CONSTANT,
- UNDEFINED_CONSTANT,
- // It corresponds to the hole value.
- NO_ELEMENT_CONSTANT,
- INTEGER,
- DOUBLE,
- REGEXP,
- STRING_ID,
- ARRAY_ID,
- OBJECT_ID,
- FUNCTION_ID,
- CLASS_ID,
- SYMBOL_ID,
- EXTERNAL_ID,
- BUILTIN_OBJECT_ID,
- IN_PLACE_STRING_ID,
- ARRAY_BUFFER_ID,
- TYPED_ARRAY_ID,
- DATA_VIEW_ID,
- BIGINT_ID
- };
-
- enum SymbolType : uint8_t {
- kNonGlobalNoDesription = 0,
- kNonGlobal = 1,
- kGlobal = 2
- };
-
- enum ElementsType : uint8_t { kDense = 0, kSparse = 1 };
-
- enum TypedArrayType : uint8_t {
- kInt8Array,
- kUint8Array,
- kUint8ClampedArray,
- kInt16Array,
- kUint16Array,
- kInt32Array,
- kUint32Array,
- kFloat32Array,
- kFloat64Array,
- kBigInt64Array,
- kBigUint64Array,
- };
-
- static inline ExternalArrayType TypedArrayTypeToExternalArrayType(
- TypedArrayType type);
- static inline TypedArrayType ExternalArrayTypeToTypedArrayType(
- ExternalArrayType type);
-
- static constexpr uint8_t kMagicNumber[4] = {'+', '+', '+', ';'};
-
- enum ContextType : uint8_t { FUNCTION, BLOCK };
-
- enum PropertyAttributesType : uint8_t { DEFAULT, CUSTOM };
-
- uint8_t FunctionKindToFunctionFlags(FunctionKind kind);
- FunctionKind FunctionFlagsToFunctionKind(uint8_t flags);
- bool IsFunctionOrMethod(uint8_t flags);
- bool IsConstructor(uint8_t flags);
-
- uint8_t GetDefaultAttributeFlags();
- uint8_t AttributesToFlags(PropertyDetails details);
- PropertyAttributes FlagsToAttributes(uint8_t flags);
-
- uint8_t ArrayBufferViewKindToFlags(
- Handle<JSArrayBufferView> array_buffer_view);
-
- uint8_t ArrayBufferKindToFlags(Handle<JSArrayBuffer> array_buffer);
-
- uint32_t BigIntSignAndLengthToFlags(Handle<BigInt> bigint);
- uint32_t BigIntFlagsToBitField(uint32_t flags);
- // The maximum count of items for each value type (strings, objects etc.)
- static constexpr uint32_t kMaxItemCount =
- static_cast<uint32_t>(FixedArray::kMaxLength - 1);
- // This ensures indices and lengths can be converted between uint32_t and int
- // without problems:
- static_assert(kMaxItemCount <
- static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
-
- protected:
- explicit WebSnapshotSerializerDeserializer(Isolate* isolate)
- : isolate_(isolate) {}
- // Not virtual, on purpose (because it doesn't need to be).
- void Throw(const char* message);
-
- void IterateBuiltinObjects(
- std::function<void(Handle<String>, Handle<HeapObject>)> func);
-
- static constexpr int kBuiltinObjectCount = 12;
-
- inline Factory* factory() const { return isolate_->factory(); }
-
- Isolate* isolate_;
- const char* error_message_ = nullptr;
-
- // Encode JSArrayBufferFlags, including was_detached, is_shared, is_resizable.
- // DetachedBitField indicates whether the ArrayBuffer was detached.
- using DetachedBitField = base::BitField<bool, 0, 1, uint8_t>;
- // SharedBitField indicates whether the ArrayBuffer is SharedArrayBuffer.
- using SharedBitField = DetachedBitField::Next<bool, 1>;
- // ResizableBitField indicates whether the ArrayBuffer is ResizableArrayBuffer
- // or GrowableSharedArrayBuffer.
- using ResizableBitField = SharedBitField::Next<bool, 1>;
-
- // Encode JSArrayBufferViewFlags, including is_length_tracking, see
- // https://github.com/tc39/proposal-resizablearraybuffer.
- // LengthTrackingBitField indicates whether the ArrayBufferView should track
- // the length of the backing buffer, that is whether the ArrayBufferView is
- // constructed without the specified length argument.
- using LengthTrackingBitField = base::BitField<bool, 0, 1, uint8_t>;
-
- // Encode BigInt's sign and digits length.
- using BigIntSignBitField = base::BitField<bool, 0, 1>;
- using BigIntLengthBitField =
- BigIntSignBitField::Next<int, BigInt::kLengthFieldBits>;
- static_assert(BigIntLengthBitField::kSize == BigInt::LengthBits::kSize);
-
- private:
- WebSnapshotSerializerDeserializer(const WebSnapshotSerializerDeserializer&) =
- delete;
- WebSnapshotSerializerDeserializer& operator=(
- const WebSnapshotSerializerDeserializer&) = delete;
-
- using AsyncFunctionBitField = base::BitField<bool, 0, 1, uint8_t>;
- using GeneratorFunctionBitField = AsyncFunctionBitField::Next<bool, 1>;
- using ArrowFunctionBitField = GeneratorFunctionBitField::Next<bool, 1>;
- using MethodBitField = ArrowFunctionBitField::Next<bool, 1>;
- using StaticBitField = MethodBitField::Next<bool, 1>;
- using ClassConstructorBitField = StaticBitField::Next<bool, 1>;
- using DefaultConstructorBitField = ClassConstructorBitField::Next<bool, 1>;
- using DerivedConstructorBitField = DefaultConstructorBitField::Next<bool, 1>;
-
- using ReadOnlyBitField = base::BitField<bool, 0, 1, uint8_t>;
- using ConfigurableBitField = ReadOnlyBitField::Next<bool, 1>;
- using EnumerableBitField = ConfigurableBitField::Next<bool, 1>;
-};
-
-class V8_EXPORT WebSnapshotSerializer
- : public WebSnapshotSerializerDeserializer {
- public:
- explicit WebSnapshotSerializer(v8::Isolate* isolate);
- explicit WebSnapshotSerializer(Isolate* isolate);
-
- ~WebSnapshotSerializer();
-
- bool TakeSnapshot(v8::Local<v8::Context> context,
- v8::Local<v8::PrimitiveArray> exports,
- WebSnapshotData& data_out);
- bool TakeSnapshot(Handle<Object> object, MaybeHandle<FixedArray> block_list,
- WebSnapshotData& data_out);
-
- // For inspecting the state after taking a snapshot.
- uint32_t string_count() const {
- return static_cast<uint32_t>(string_ids_.size());
- }
-
- uint32_t symbol_count() const {
- return static_cast<uint32_t>(symbol_ids_.size());
- }
-
- uint32_t bigint_count() const {
- return static_cast<uint32_t>(bigint_ids_.size());
- }
-
- uint32_t map_count() const { return static_cast<uint32_t>(map_ids_.size()); }
-
- uint32_t builtin_object_count() const {
- return static_cast<uint32_t>(builtin_object_ids_.size());
- }
-
- uint32_t context_count() const {
- return static_cast<uint32_t>(context_ids_.size());
- }
-
- uint32_t function_count() const {
- return static_cast<uint32_t>(function_ids_.size());
- }
-
- uint32_t class_count() const {
- return static_cast<uint32_t>(class_ids_.size());
- }
-
- uint32_t array_count() const {
- return static_cast<uint32_t>(array_ids_.size());
- }
-
- uint32_t array_buffer_count() const {
- return static_cast<uint32_t>(array_buffer_ids_.size());
- }
-
- uint32_t typed_array_count() const {
- return static_cast<uint32_t>(typed_array_ids_.size());
- }
-
- uint32_t data_view_count() const {
- return static_cast<uint32_t>(data_view_ids_.size());
- }
-
- uint32_t object_count() const {
- return static_cast<uint32_t>(object_ids_.size());
- }
-
- uint32_t external_object_count() const {
- return static_cast<uint32_t>(external_object_ids_.size());
- }
-
- Handle<FixedArray> GetExternals();
-
- private:
- WebSnapshotSerializer(const WebSnapshotSerializer&) = delete;
- WebSnapshotSerializer& operator=(const WebSnapshotSerializer&) = delete;
-
- enum class AllowInPlace {
- No, // This reference cannot be replace with an in-place item.
- Yes, // This reference can be replaced with an in-place item.
- };
-
- void SerializePendingItems();
- void WriteSnapshot(uint8_t*& buffer, size_t& buffer_size);
- void WriteObjects(ValueSerializer& destination, size_t count,
- ValueSerializer& source, const char* name);
-
- // Returns true if the object was already in the map, false if it was added.
- bool InsertIntoIndexMap(ObjectCacheIndexMap& map, HeapObject heap_object,
- uint32_t& id);
-
- void ShallowDiscoverExternals(FixedArray externals);
- void ShallowDiscoverBuiltinObjects(v8::Local<v8::Context> context);
- void Discover(Handle<HeapObject> object);
- void DiscoverString(Handle<String> string,
- AllowInPlace can_be_in_place = AllowInPlace::No);
- void DiscoverSymbol(Handle<Symbol> symbol);
- void DiscoverBigInt(Handle<BigInt> bigint);
- void DiscoverMap(Handle<Map> map, bool allow_property_in_descriptor = false);
- void DiscoverPropertyKey(Handle<Name> key);
- void DiscoverMapForFunction(Handle<JSFunction> function);
- void DiscoverFunction(Handle<JSFunction> function);
- void DiscoverClass(Handle<JSFunction> function);
- void DiscoverContextAndPrototype(Handle<JSFunction> function);
- void DiscoverContext(Handle<Context> context);
- void DiscoverArray(Handle<JSArray> array);
- void DiscoverTypedArray(Handle<JSTypedArray> typed_array);
- void DiscoverDataView(Handle<JSDataView> data_view);
- void DiscoverArrayBuffer(Handle<JSArrayBuffer> array_buffer);
- void DiscoverElements(Handle<JSObject> object);
- void DiscoverObject(Handle<JSObject> object);
- bool DiscoverIfBuiltinObject(Handle<HeapObject> object);
- void DiscoverSource(Handle<JSFunction> function);
- template <typename T>
- void DiscoverObjectPropertiesWithDictionaryMap(T dict);
- bool ShouldBeSerialized(Handle<Name> key);
- void ConstructSource();
-
- void SerializeFunctionInfo(Handle<JSFunction> function,
- ValueSerializer& serializer);
- void SerializeFunctionProperties(Handle<JSFunction> function,
- ValueSerializer& serializer);
- void SerializeString(Handle<String> string, ValueSerializer& serializer);
- void SerializeSymbol(Handle<Symbol> symbol);
- void SerializeBigInt(Handle<BigInt> bigint);
- void SerializeMap(Handle<Map> map);
- void SerializeBuiltinObject(uint32_t name_id);
- void SerializeObjectPrototype(Handle<Map> map, ValueSerializer& serializer);
-
- template <typename T>
- void SerializeObjectPropertiesWithDictionaryMap(T dict);
- void SerializeFunction(Handle<JSFunction> function);
- void SerializeClass(Handle<JSFunction> function);
- void SerializeContext(Handle<Context> context, uint32_t id);
- void SerializeArray(Handle<JSArray> array);
- void SerializeElements(Handle<JSObject> object, ValueSerializer& serializer,
- Maybe<uint32_t> length);
- void SerializeObject(Handle<JSObject> object);
- void SerializeArrayBufferView(Handle<JSArrayBufferView> array_buffer_view,
- ValueSerializer& serializer);
- void SerializeArrayBuffer(Handle<JSArrayBuffer> array_buffer);
- void SerializeTypedArray(Handle<JSTypedArray> typed_array);
- void SerializeDataView(Handle<JSDataView> data_view);
-
- void SerializeExport(Handle<Object> object, Handle<String> export_name);
- void WriteValue(Handle<Object> object, ValueSerializer& serializer);
- void WriteStringMaybeInPlace(Handle<String> string,
- ValueSerializer& serializer);
- void WriteStringId(Handle<String> string, ValueSerializer& serializer);
-
- uint32_t GetStringId(Handle<String> string, bool& in_place);
- uint32_t GetSymbolId(Symbol symbol);
- uint32_t GetBigIntId(BigInt bigint);
- uint32_t GetMapId(Map map);
- uint32_t GetFunctionId(JSFunction function);
- uint32_t GetClassId(JSFunction function);
- uint32_t GetContextId(Context context);
- uint32_t GetArrayId(JSArray array);
- uint32_t GetTypedArrayId(JSTypedArray typed_array);
- uint32_t GetDataViewId(JSDataView data_view);
- uint32_t GetArrayBufferId(JSArrayBuffer array_buffer);
- uint32_t GetObjectId(JSObject object);
- bool GetExternalId(HeapObject object, uint32_t* id = nullptr);
- // Returns index into builtin_object_name_strings_.
- bool GetBuiltinObjectNameIndex(HeapObject object, uint32_t& index);
- bool GetBuiltinObjectId(HeapObject object, uint32_t& id);
-
- ValueSerializer string_serializer_;
- ValueSerializer symbol_serializer_;
- ValueSerializer bigint_serializer_;
- ValueSerializer map_serializer_;
- ValueSerializer builtin_object_serializer_;
- ValueSerializer context_serializer_;
- ValueSerializer function_serializer_;
- ValueSerializer class_serializer_;
- ValueSerializer array_serializer_;
- ValueSerializer typed_array_serializer_;
- ValueSerializer array_buffer_serializer_;
- ValueSerializer data_view_serializer_;
- ValueSerializer object_serializer_;
- ValueSerializer export_serializer_;
-
- // These are needed for being able to serialize items in order.
- Handle<ArrayList> strings_;
- Handle<ArrayList> symbols_;
- Handle<ArrayList> bigints_;
- Handle<ArrayList> maps_;
- Handle<ArrayList> contexts_;
- Handle<ArrayList> functions_;
- Handle<ArrayList> classes_;
- Handle<ArrayList> arrays_;
- Handle<ArrayList> typed_arrays_;
- Handle<ArrayList> array_buffers_;
- Handle<ArrayList> data_views_;
- Handle<ArrayList> objects_;
-
- // IndexMap to keep track of explicitly blocked external objects and
- // non-serializable/not-supported objects (e.g. API Objects).
- ObjectCacheIndexMap external_object_ids_;
-
- // ObjectCacheIndexMap implements fast lookup item -> id. Some items (context,
- // function, class, array, object) can point to other items and we serialize
- // them in the reverse order. This ensures that the items this item points to
- // have a lower ID and will be deserialized first.
- ObjectCacheIndexMap string_ids_;
- ObjectCacheIndexMap symbol_ids_;
- ObjectCacheIndexMap bigint_ids_;
- ObjectCacheIndexMap map_ids_;
- ObjectCacheIndexMap context_ids_;
- ObjectCacheIndexMap function_ids_;
- ObjectCacheIndexMap class_ids_;
- ObjectCacheIndexMap array_ids_;
- ObjectCacheIndexMap typed_array_ids_;
- ObjectCacheIndexMap array_buffer_ids_;
- ObjectCacheIndexMap data_view_ids_;
- ObjectCacheIndexMap object_ids_;
- uint32_t export_count_ = 0;
-
- // For handling references to builtin objects:
- // --------------------------------
- // String objects for the names of all known builtins.
- Handle<FixedArray> builtin_object_name_strings_;
-
- // Map object -> index in builtin_name_strings_ for all known builtins.
- ObjectCacheIndexMap builtin_object_to_name_;
-
- // Map object -> index in builtins_. Includes only builtins which will be
- // incluced in the snapshot.
- ObjectCacheIndexMap builtin_object_ids_;
-
- // For creating the Builtin wrappers in the snapshot. Includes only builtins
- // which will be incluced in the snapshot. Each element is the id of the
- // builtin name string in the snapshot.
- std::vector<uint32_t> builtin_objects_;
- // --------------------------------
-
- std::queue<Handle<HeapObject>> discovery_queue_;
-
- // For keeping track of which strings have exactly one reference. Strings are
- // inserted here when the first reference is discovered, and never removed.
- // Strings which have more than one reference get an ID and are inserted to
- // strings_.
- IdentityMap<int, base::DefaultAllocationPolicy> all_strings_;
-
- // For constructing the minimal, "compacted", source string to cover all
- // function bodies.
- // --------------------------------
- // Script id -> offset of the script source code in full_source_.
- std::map<int, int> script_offsets_;
- Handle<String> full_source_;
- uint32_t source_id_;
- // Ordered set of (start, end) pairs of all functions we've discovered.
- std::set<std::pair<int, int>> source_intervals_;
- // Maps function positions in the real source code into the function positions
- // in the constructed source code (which we'll include in the web snapshot).
- std::unordered_map<int, int> source_offset_to_compacted_source_offset_;
- // --------------------------------
-};
-
-class V8_EXPORT WebSnapshotDeserializer
- : public WebSnapshotSerializerDeserializer {
- public:
- WebSnapshotDeserializer(v8::Isolate* v8_isolate, const uint8_t* data,
- size_t buffer_size);
- WebSnapshotDeserializer(Isolate* isolate, Handle<Script> snapshot_as_script);
- ~WebSnapshotDeserializer();
- bool Deserialize(MaybeHandle<FixedArray> external_references = {},
- bool skip_exports = false);
-
- // For inspecting the state after deserializing a snapshot.
- uint32_t string_count() const { return string_count_; }
- uint32_t symbol_count() const { return symbol_count_; }
- uint32_t map_count() const { return map_count_; }
- uint32_t builtin_object_count() const { return builtin_object_count_; }
- uint32_t context_count() const { return context_count_; }
- uint32_t function_count() const { return function_count_; }
- uint32_t class_count() const { return class_count_; }
- uint32_t array_count() const { return array_count_; }
- uint32_t object_count() const { return object_count_; }
-
- static void UpdatePointersCallback(v8::Isolate* isolate, v8::GCType type,
- v8::GCCallbackFlags flags,
- void* deserializer) {
- reinterpret_cast<WebSnapshotDeserializer*>(deserializer)->UpdatePointers();
- }
-
- void UpdatePointers();
-
- MaybeHandle<Object> value() const { return return_value_; }
-
- private:
- enum class InternalizeStrings {
- kNo,
- kYes,
- };
-
- WebSnapshotDeserializer(Isolate* isolate, Handle<Object> script_name,
- base::Vector<const uint8_t> buffer);
- // Return value: {data, length, data_owned}.
- std::tuple<const uint8_t*, uint32_t, bool> ExtractScriptBuffer(
- Isolate* isolate, Handle<Script> snapshot_as_script);
- bool DeserializeSnapshot(bool skip_exports);
- void CollectBuiltinObjects();
- bool DeserializeScript();
-
- WebSnapshotDeserializer(const WebSnapshotDeserializer&) = delete;
- WebSnapshotDeserializer& operator=(const WebSnapshotDeserializer&) = delete;
-
- void DeserializeStrings();
- void DeserializeSymbols();
- void DeserializeBigInts();
- void DeserializeMaps();
- void DeserializeBuiltinObjects();
- void DeserializeContexts();
- Handle<ScopeInfo> CreateScopeInfo(uint32_t variable_count, bool has_parent,
- ContextType context_type,
- bool has_inlined_local_names);
- Handle<JSFunction> CreateJSFunction(int index, uint32_t start,
- uint32_t length, uint32_t parameter_count,
- uint8_t flags, uint32_t context_id);
- void DeserializeFunctionData(uint32_t count, uint32_t current_count);
- void DeserializeFunctions();
- void DeserializeClasses();
- void DeserializeArrays();
- void DeserializeArrayBuffers();
- void DeserializeTypedArrays();
- void DeserializeDataViews();
- void DeserializeObjects();
- void DeserializeObjectElements(Handle<JSObject> object,
- bool map_from_snapshot);
- void DeserializeExports(bool skip_exports);
- void DeserializeObjectPrototype(Handle<Map> map);
- Handle<Map> DeserializeObjectPrototypeAndCreateEmptyMap();
- void DeserializeObjectPrototypeForFunction(Handle<JSFunction> function);
- void SetPrototype(Handle<Map> map, Handle<Object> prototype);
- void DeserializeFunctionProperties(Handle<JSFunction> function);
- bool ReadCount(uint32_t& count);
-
- bool IsInitialFunctionPrototype(Object prototype);
-
- template <typename T>
- void DeserializeObjectPropertiesWithDictionaryMap(
- T dict, uint32_t property_count, bool has_custom_property_attributes);
-
- Handle<PropertyArray> DeserializePropertyArray(
- Handle<DescriptorArray> descriptors, int no_properties);
-
- // Return value: (object, was_deferred)
- std::tuple<Object, bool> ReadValue(
- Handle<HeapObject> object_for_deferred_reference = Handle<HeapObject>(),
- uint32_t index_for_deferred_reference = 0,
- InternalizeStrings internalize_strings = InternalizeStrings::kNo);
-
- Object ReadInteger();
- Object ReadNumber();
- String ReadString(
- InternalizeStrings internalize_strings = InternalizeStrings::kNo);
- String ReadInPlaceString(
- InternalizeStrings internalize_strings = InternalizeStrings::kNo);
- Object ReadSymbol();
- Object ReadBigInt();
- std::tuple<Object, bool> ReadArray(Handle<HeapObject> container,
- uint32_t container_index);
- std::tuple<Object, bool> ReadArrayBuffer(Handle<HeapObject> container,
- uint32_t container_index);
- std::tuple<Object, bool> ReadTypedArray(Handle<HeapObject> container,
- uint32_t container_index);
- std::tuple<Object, bool> ReadDataView(Handle<HeapObject> container,
- uint32_t container_index);
- std::tuple<Object, bool> ReadObject(Handle<HeapObject> container,
- uint32_t container_index);
- std::tuple<Object, bool> ReadFunction(Handle<HeapObject> container,
- uint32_t container_index);
- std::tuple<Object, bool> ReadClass(Handle<HeapObject> container,
- uint32_t container_index);
- Object ReadRegexp();
- Object ReadBuiltinObjectReference();
- Object ReadExternalReference();
- bool ReadMapType();
- std::tuple<Handle<FixedArrayBase>, ElementsKind, uint32_t>
- DeserializeElements();
- ElementsType ReadElementsType();
- std::tuple<Handle<FixedArrayBase>, ElementsKind, uint32_t> ReadDenseElements(
- uint32_t length);
- std::tuple<Handle<FixedArrayBase>, ElementsKind, uint32_t> ReadSparseElements(
- uint32_t length);
-
- void ReadFunctionPrototype(Handle<JSFunction> function);
- bool SetFunctionPrototype(JSFunction function, JSReceiver prototype);
-
- HeapObject AddDeferredReference(Handle<HeapObject> container, uint32_t index,
- ValueType target_type,
- uint32_t target_object_index);
- void ProcessDeferredReferences();
- // Not virtual, on purpose (because it doesn't need to be).
- void Throw(const char* message);
- void VerifyObjects();
-
- Handle<FixedArray> strings_handle_;
- FixedArray strings_;
-
- Handle<FixedArray> symbols_handle_;
- FixedArray symbols_;
-
- Handle<FixedArray> bigints_handle_;
- FixedArray bigints_;
-
- Handle<FixedArray> builtin_objects_handle_;
- FixedArray builtin_objects_;
-
- Handle<FixedArray> maps_handle_;
- FixedArray maps_;
- std::map<int, Handle<Map>> deserialized_function_maps_;
-
- Handle<FixedArray> contexts_handle_;
- FixedArray contexts_;
-
- Handle<FixedArray> functions_handle_;
- FixedArray functions_;
-
- Handle<FixedArray> classes_handle_;
- FixedArray classes_;
-
- Handle<FixedArray> arrays_handle_;
- FixedArray arrays_;
-
- Handle<FixedArray> array_buffers_handle_;
- FixedArray array_buffers_;
-
- Handle<FixedArray> typed_arrays_handle_;
- FixedArray typed_arrays_;
-
- Handle<FixedArray> data_views_handle_;
- FixedArray data_views_;
-
- Handle<FixedArray> objects_handle_;
- FixedArray objects_;
-
- Handle<FixedArray> external_references_handle_;
- FixedArray external_references_;
-
- // Map: String -> builtin object.
- Handle<ObjectHashTable> builtin_object_name_to_object_;
-
- Handle<ArrayList> deferred_references_;
-
- Handle<WeakFixedArray> shared_function_infos_handle_;
- WeakFixedArray shared_function_infos_;
-
- Handle<ObjectHashTable> shared_function_info_table_;
-
- Handle<Script> script_;
- Handle<Object> script_name_;
-
- Handle<Object> return_value_;
-
- uint32_t string_count_ = 0;
- uint32_t symbol_count_ = 0;
- uint32_t bigint_count_ = 0;
- uint32_t map_count_ = 0;
- uint32_t builtin_object_count_ = 0;
- uint32_t context_count_ = 0;
- uint32_t function_count_ = 0;
- uint32_t current_function_count_ = 0;
- uint32_t class_count_ = 0;
- uint32_t current_class_count_ = 0;
- uint32_t array_count_ = 0;
- uint32_t current_array_count_ = 0;
- uint32_t array_buffer_count_ = 0;
- uint32_t current_array_buffer_count_ = 0;
- uint32_t typed_array_count_ = 0;
- uint32_t current_typed_array_count_ = 0;
- uint32_t data_view_count_ = 0;
- uint32_t current_data_view_count_ = 0;
- uint32_t object_count_ = 0;
- uint32_t current_object_count_ = 0;
-
- std::unique_ptr<ValueDeserializer> deserializer_;
- std::unique_ptr<const uint8_t[]> owned_data_;
- ReadOnlyRoots roots_;
-
- bool deserialized_ = false;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WEB_SNAPSHOT_WEB_SNAPSHOT_H_
diff --git a/deps/v8/src/zone/type-stats.cc b/deps/v8/src/zone/type-stats.cc
index af4777a8ef..f92439aee6 100644
--- a/deps/v8/src/zone/type-stats.cc
+++ b/deps/v8/src/zone/type-stats.cc
@@ -4,12 +4,13 @@
#ifdef V8_ENABLE_PRECISE_ZONE_STATS
-#if defined(__clang__) || defined(__GLIBCXX__)
+#if (defined(__clang__) || defined(__GLIBCXX__)) && !defined(_MSC_VER)
#include <cxxabi.h>
#endif // __GLIBCXX__
#include <cinttypes>
#include <cstdio>
+#include "src/base/platform/memory.h"
#include "src/base/platform/wrappers.h"
#include "src/utils/utils.h"
#include "src/zone/type-stats.h"
@@ -32,7 +33,7 @@ class Demangler {
}
const char* demangle(std::type_index type_id) {
-#if defined(__clang__) || defined(__GLIBCXX__)
+#if (defined(__clang__) || defined(__GLIBCXX__)) && !defined(_MSC_VER)
int status = -1;
char* result =
abi::__cxa_demangle(type_id.name(), buffer_, &buffer_len_, &status);
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index ed8b544ad1..f78948e2dd 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -8,6 +8,7 @@
#include <deque>
#include <forward_list>
#include <initializer_list>
+#include <iterator>
#include <list>
#include <map>
#include <queue>
@@ -15,7 +16,6 @@
#include <stack>
#include <unordered_map>
#include <unordered_set>
-#include <vector>
#include "src/base/functional.h"
#include "src/zone/zone-allocator.h"
@@ -23,42 +23,571 @@
namespace v8 {
namespace internal {
-// A wrapper subclass for std::vector to make it easy to construct one
-// that uses a zone allocator.
+// A drop-in replacement for std::vector that uses a Zone for its allocations,
+// and (contrary to a std::vector subclass with custom allocator) gives us
+// precise control over its implementation and performance characteristics.
+//
+// When working on this code, keep the following rules of thumb in mind:
+// - Everything between {data_} and {end_} (exclusive) is a live instance of T.
+// When writing to these slots, use the {CopyingOverwrite} or
+// {MovingOverwrite} helpers.
+// - Everything between {end_} (inclusive) and {capacity_} (exclusive) is
+// considered uninitialized memory. When writing to these slots, use the
+// {CopyToNewStorage} or {MoveToNewStorage} helpers. Obviously, also use
+// these helpers to initialize slots in newly allocated backing stores.
+// - When shrinking, call ~T on all slots between the new and the old position
+// of {end_} to maintain the above invariant. Also call ~T on all slots in
+// discarded backing stores.
+// - The interface offered by {ZoneVector} should be a subset of
+// {std::vector}'s API, so that calling code doesn't need to be aware of
+// ZoneVector's implementation details and can assume standard C++ behavior.
+// (It's okay if we don't support everything that std::vector supports; we
+// can fill such gaps when use cases arise.)
template <typename T>
-class ZoneVector : public std::vector<T, ZoneAllocator<T>> {
+class ZoneVector {
public:
+ using iterator = T*;
+ using const_iterator = const T*;
+ using reverse_iterator = std::reverse_iterator<T*>;
+ using const_reverse_iterator = std::reverse_iterator<const T*>;
+ using value_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using size_type = size_t;
+
// Constructs an empty vector.
- explicit ZoneVector(Zone* zone)
- : std::vector<T, ZoneAllocator<T>>(ZoneAllocator<T>(zone)) {}
+ explicit ZoneVector(Zone* zone) : zone_(zone) {}
// Constructs a new vector and fills it with {size} elements, each
// constructed via the default constructor.
- ZoneVector(size_t size, Zone* zone)
- : std::vector<T, ZoneAllocator<T>>(size, T(), ZoneAllocator<T>(zone)) {}
+ ZoneVector(size_t size, Zone* zone) : zone_(zone) {
+ data_ = size > 0 ? zone->NewArray<T>(size) : nullptr;
+ end_ = capacity_ = data_ + size;
+ for (T* p = data_; p < end_; p++) emplace(p);
+ }
// Constructs a new vector and fills it with {size} elements, each
// having the value {def}.
- ZoneVector(size_t size, T def, Zone* zone)
- : std::vector<T, ZoneAllocator<T>>(size, def, ZoneAllocator<T>(zone)) {}
+ ZoneVector(size_t size, T def, Zone* zone) : zone_(zone) {
+ data_ = size > 0 ? zone->NewArray<T>(size) : nullptr;
+ end_ = capacity_ = data_ + size;
+ for (T* p = data_; p < end_; p++) emplace(p, def);
+ }
// Constructs a new vector and fills it with the contents of the given
// initializer list.
- ZoneVector(std::initializer_list<T> list, Zone* zone)
- : std::vector<T, ZoneAllocator<T>>(list, ZoneAllocator<T>(zone)) {}
+ ZoneVector(std::initializer_list<T> list, Zone* zone) : zone_(zone) {
+ size_t size = list.size();
+ if (size > 0) {
+ data_ = zone->NewArray<T>(size);
+ CopyToNewStorage(data_, list.begin(), list.end());
+ } else {
+ data_ = nullptr;
+ }
+ end_ = capacity_ = data_ + size;
+ }
+
+ // Constructs a new vector and fills it with the contents of the range
+ // [first, last).
+ template <class It,
+ typename = typename std::iterator_traits<It>::iterator_category>
+ ZoneVector(It first, It last, Zone* zone) : zone_(zone) {
+ if constexpr (std::is_base_of_v<
+ std::random_access_iterator_tag,
+ typename std::iterator_traits<It>::iterator_category>) {
+ size_t size = last - first;
+ data_ = size > 0 ? zone->NewArray<T>(size) : nullptr;
+ end_ = capacity_ = data_ + size;
+ for (T* p = data_; p < end_; p++) emplace(p, *first++);
+ } else {
+ while (first != last) push_back(*first++);
+ }
+ DCHECK_EQ(first, last);
+ }
+
+ ZoneVector(const ZoneVector& other) V8_NOEXCEPT : zone_(other.zone_) {
+ *this = other;
+ }
+
+ ZoneVector(ZoneVector&& other) V8_NOEXCEPT { *this = std::move(other); }
+
+ ~ZoneVector() {
+ for (T* p = data_; p < end_; p++) p->~T();
+ if (data_) zone_->DeleteArray(data_, capacity());
+ }
+
+ // Assignment operators.
+ ZoneVector& operator=(const ZoneVector& other) V8_NOEXCEPT {
+ // Self-assignment would cause undefined behavior in the !copy_assignable
+ // branch, but likely indicates a bug in calling code anyway.
+ DCHECK_NE(this, &other);
+ T* src = other.data_;
+ if (capacity() >= other.size() && zone_ == other.zone_) {
+ T* dst = data_;
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ size_t size = other.size();
+ if (size) memcpy(dst, src, size * sizeof(T));
+ end_ = dst + size;
+ } else if constexpr (std::is_copy_assignable_v<T>) {
+ while (dst < end_ && src < other.end_) *dst++ = *src++;
+ while (src < other.end_) emplace(dst++, *src++);
+ T* old_end = end_;
+ end_ = dst;
+ for (T* p = end_; p < old_end; p++) p->~T();
+ } else {
+ for (T* p = data_; p < end_; p++) p->~T();
+ while (src < other.end_) emplace(dst++, *src++);
+ end_ = dst;
+ }
+ } else {
+ for (T* p = data_; p < end_; p++) p->~T();
+ if (data_) zone_->DeleteArray(data_, capacity());
+ size_t new_cap = other.capacity();
+ if (new_cap > 0) {
+ data_ = zone_->NewArray<T>(new_cap);
+ CopyToNewStorage(data_, other.data_, other.end_);
+ } else {
+ data_ = nullptr;
+ }
+ capacity_ = data_ + new_cap;
+ end_ = data_ + other.size();
+ }
+ return *this;
+ }
+
+ ZoneVector& operator=(ZoneVector&& other) V8_NOEXCEPT {
+ // Self-assignment would cause undefined behavior, and is probably a bug.
+ DCHECK_NE(this, &other);
+ // Move-assigning vectors from different zones would have surprising
+ // lifetime semantics regardless of how we choose to implement it (keep
+ // the old zone? Take the new zone?).
+ if (zone_ == nullptr) {
+ zone_ = other.zone_;
+ } else {
+ DCHECK_EQ(zone_, other.zone_);
+ }
+ for (T* p = data_; p < end_; p++) p->~T();
+ if (data_) zone_->DeleteArray(data_, capacity());
+ data_ = other.data_;
+ end_ = other.end_;
+ capacity_ = other.capacity_;
+ // {other.zone_} may stay.
+ other.data_ = other.end_ = other.capacity_ = nullptr;
+ return *this;
+ }
ZoneVector& operator=(std::initializer_list<T> ilist) {
- std::vector<T, ZoneAllocator<T>>::operator=(ilist);
+ clear();
+ EnsureCapacity(ilist.size());
+ CopyToNewStorage(data_, ilist.begin(), ilist.end());
+ end_ = data_ + ilist.size();
return *this;
}
- // Constructs a new vector and fills it with the contents of the range
- // [first, last).
- template <class InputIt>
- ZoneVector(InputIt first, InputIt last, Zone* zone)
- : std::vector<T, ZoneAllocator<T>>(first, last, ZoneAllocator<T>(zone)) {}
+ void swap(ZoneVector<T>& other) noexcept {
+ DCHECK_EQ(zone_, other.zone_);
+ std::swap(data_, other.data_);
+ std::swap(end_, other.end_);
+ std::swap(capacity_, other.capacity_);
+ }
+
+ void resize(size_t new_size) {
+ EnsureCapacity(new_size);
+ T* new_end = data_ + new_size;
+ for (T* p = end_; p < new_end; p++) emplace(p);
+ for (T* p = new_end; p < end_; p++) p->~T();
+ end_ = new_end;
+ }
+
+ void resize(size_t new_size, const T& value) {
+ EnsureCapacity(new_size);
+ T* new_end = data_ + new_size;
+ for (T* p = end_; p < new_end; p++) emplace(p, value);
+ for (T* p = new_end; p < end_; p++) p->~T();
+ end_ = new_end;
+ }
+
+ void assign(size_t new_size, const T& value) {
+ if (capacity() >= new_size) {
+ T* new_end = data_ + new_size;
+ T* assignable = data_ + std::min(size(), new_size);
+ for (T* p = data_; p < assignable; p++) CopyingOverwrite(p, &value);
+ for (T* p = assignable; p < new_end; p++) CopyToNewStorage(p, &value);
+ for (T* p = new_end; p < end_; p++) p->~T();
+ end_ = new_end;
+ } else {
+ clear();
+ EnsureCapacity(new_size);
+ T* new_end = data_ + new_size;
+ for (T* p = data_; p < new_end; p++) emplace(p, value);
+ end_ = new_end;
+ }
+ }
+
+ void clear() {
+ for (T* p = data_; p < end_; p++) p->~T();
+ end_ = data_;
+ }
+
+ size_t size() const { return end_ - data_; }
+ bool empty() const { return end_ == data_; }
+ size_t capacity() const { return capacity_ - data_; }
+ void reserve(size_t new_cap) { EnsureCapacity(new_cap); }
+ T* data() { return data_; }
+ const T* data() const { return data_; }
+ Zone* zone() const { return zone_; }
+
+ T& at(size_t pos) {
+ DCHECK_LT(pos, size());
+ return data_[pos];
+ }
+ const T& at(size_t pos) const {
+ DCHECK_LT(pos, size());
+ return data_[pos];
+ }
+
+ T& operator[](size_t pos) { return at(pos); }
+ const T& operator[](size_t pos) const { return at(pos); }
+
+ T& front() {
+ DCHECK_GT(end_, data_);
+ return *data_;
+ }
+ const T& front() const {
+ DCHECK_GT(end_, data_);
+ return *data_;
+ }
+
+ T& back() {
+ DCHECK_GT(end_, data_);
+ return *(end_ - 1);
+ }
+ const T& back() const {
+ DCHECK_GT(end_, data_);
+ return *(end_ - 1);
+ }
+
+ T* begin() V8_NOEXCEPT { return data_; }
+ const T* begin() const V8_NOEXCEPT { return data_; }
+ const T* cbegin() const V8_NOEXCEPT { return data_; }
+
+ T* end() V8_NOEXCEPT { return end_; }
+ const T* end() const V8_NOEXCEPT { return end_; }
+ const T* cend() const V8_NOEXCEPT { return end_; }
+
+ reverse_iterator rbegin() V8_NOEXCEPT {
+ return std::make_reverse_iterator(end());
+ }
+ const_reverse_iterator rbegin() const V8_NOEXCEPT {
+ return std::make_reverse_iterator(end());
+ }
+ const_reverse_iterator crbegin() const V8_NOEXCEPT {
+ return std::make_reverse_iterator(cend());
+ }
+ reverse_iterator rend() V8_NOEXCEPT {
+ return std::make_reverse_iterator(begin());
+ }
+ const_reverse_iterator rend() const V8_NOEXCEPT {
+ return std::make_reverse_iterator(begin());
+ }
+ const_reverse_iterator crend() const V8_NOEXCEPT {
+ return std::make_reverse_iterator(cbegin());
+ }
+
+ void push_back(const T& value) {
+ EnsureOneMoreCapacity();
+ emplace(end_++, value);
+ }
+ void push_back(T&& value) { emplace_back(std::move(value)); }
+
+ void pop_back() {
+ DCHECK_GT(end_, data_);
+ (--end_)->~T();
+ }
+
+ template <typename... Args>
+ T& emplace_back(Args&&... args) {
+ EnsureOneMoreCapacity();
+ T* ptr = end_++;
+ new (ptr) T(std::forward<Args>(args)...);
+ return *ptr;
+ }
+
+ template <class It,
+ typename = typename std::iterator_traits<It>::iterator_category>
+ T* insert(const T* pos, It first, It last) {
+ T* position;
+ if constexpr (std::is_base_of_v<
+ std::random_access_iterator_tag,
+ typename std::iterator_traits<It>::iterator_category>) {
+ DCHECK_LE(0, last - first);
+ size_t count = last - first;
+ size_t assignable;
+ position = PrepareForInsertion(pos, count, &assignable);
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ if (count > 0) memcpy(position, first, count * sizeof(T));
+ } else {
+ CopyingOverwrite(position, first, first + assignable);
+ CopyToNewStorage(position + assignable, first + assignable, last);
+ }
+ } else if (pos == end()) {
+ position = end_;
+ while (first != last) {
+ EnsureOneMoreCapacity();
+ emplace(end_++, *first++);
+ }
+ } else {
+ UNIMPLEMENTED();
+ // We currently have no users of this case.
+ // It could be implemented inefficiently as a combination of the two
+ // cases above: while (first != last) { PrepareForInsertion(_, 1, _); }.
+ // A more efficient approach would be to accumulate the input iterator's
+ // results into a temporary vector first, then grow {this} only once
+ // (by calling PrepareForInsertion(_, count, _)), then copy over the
+ // accumulated elements.
+ }
+ return position;
+ }
+ T* insert(const T* pos, size_t count, const T& value) {
+ size_t assignable;
+ T* position = PrepareForInsertion(pos, count, &assignable);
+ T* dst = position;
+ T* stop = dst + assignable;
+ while (dst < stop) {
+ CopyingOverwrite(dst++, &value);
+ }
+ stop = position + count;
+ while (dst < stop) emplace(dst++, value);
+ return position;
+ }
+
+ T* erase(const T* pos) {
+ DCHECK(data_ <= pos && pos <= end());
+ if (pos == end()) return const_cast<T*>(pos);
+ return erase(pos, 1);
+ }
+ T* erase(const T* first, const T* last) {
+ DCHECK(data_ <= first && first <= last && last <= end());
+ if (first == last) return const_cast<T*>(first);
+ return erase(first, last - first);
+ }
+
+ private:
+ static constexpr size_t kMinCapacity = 2;
+ size_t NewCapacity(size_t minimum) {
+ // We can ignore possible overflow here: on 32-bit platforms, if the
+ // multiplication overflows, there's no better way to handle it than
+ // relying on the "new_capacity < minimum" check; in particular, a
+ // saturating multiplication would make no sense. On 64-bit platforms,
+ // overflow is effectively impossible anyway.
+ size_t new_capacity = data_ == capacity_ ? kMinCapacity : capacity() * 2;
+ return new_capacity < minimum ? minimum : new_capacity;
+ }
+
+ void EnsureOneMoreCapacity() {
+ if (end_ < capacity_) return;
+ Grow(capacity() + 1);
+ }
+
+ void EnsureCapacity(size_t minimum) {
+ if (minimum <= capacity()) return;
+ Grow(minimum);
+ }
+
+ V8_INLINE void CopyToNewStorage(T* dst, const T* src) { emplace(dst, *src); }
+
+ V8_INLINE void MoveToNewStorage(T* dst, T* src) {
+ if constexpr (std::is_move_constructible_v<T>) {
+ emplace(dst, std::move(*src));
+ } else {
+ CopyToNewStorage(dst, src);
+ }
+ }
+
+ V8_INLINE void CopyingOverwrite(T* dst, const T* src) {
+ if constexpr (std::is_copy_assignable_v<T>) {
+ *dst = *src;
+ } else {
+ dst->~T();
+ CopyToNewStorage(dst, src);
+ }
+ }
+
+ V8_INLINE void MovingOverwrite(T* dst, T* src) {
+ if constexpr (std::is_move_assignable_v<T>) {
+ *dst = std::move(*src);
+ } else {
+ CopyingOverwrite(dst, src);
+ }
+ }
+
+#define EMIT_TRIVIAL_CASE(memcpy_function) \
+ DCHECK_LE(src, src_end); \
+ if constexpr (std::is_trivially_copyable_v<T>) { \
+ size_t count = src_end - src; \
+ /* Add V8_ASSUME to silence gcc null check warning. */ \
+ V8_ASSUME(src != nullptr); \
+ memcpy_function(dst, src, count * sizeof(T)); \
+ return; \
+ }
+
+ V8_INLINE void CopyToNewStorage(T* dst, const T* src, const T* src_end) {
+ EMIT_TRIVIAL_CASE(memcpy)
+ for (; src < src_end; dst++, src++) {
+ CopyToNewStorage(dst, src);
+ }
+ }
+
+ V8_INLINE void MoveToNewStorage(T* dst, T* src, const T* src_end) {
+ EMIT_TRIVIAL_CASE(memcpy)
+ for (; src < src_end; dst++, src++) {
+ MoveToNewStorage(dst, src);
+ src->~T();
+ }
+ }
+
+ V8_INLINE void CopyingOverwrite(T* dst, const T* src, const T* src_end) {
+ EMIT_TRIVIAL_CASE(memmove)
+ for (; src < src_end; dst++, src++) {
+ CopyingOverwrite(dst, src);
+ }
+ }
+
+ V8_INLINE void MovingOverwrite(T* dst, T* src, const T* src_end) {
+ EMIT_TRIVIAL_CASE(memmove)
+ for (; src < src_end; dst++, src++) {
+ MovingOverwrite(dst, src);
+ }
+ }
+
+#undef EMIT_TRIVIAL_CASE
+
+ void Grow(size_t minimum) {
+ T* old_data = data_;
+ T* old_end = end_;
+ size_t old_size = size();
+ size_t new_capacity = NewCapacity(minimum);
+ data_ = zone_->NewArray<T>(new_capacity);
+ end_ = data_ + old_size;
+ if (old_data) {
+ MoveToNewStorage(data_, old_data, old_end);
+ zone_->DeleteArray(old_data, capacity_ - old_data);
+ }
+ capacity_ = data_ + new_capacity;
+ }
+
+ T* PrepareForInsertion(const T* pos, size_t count, size_t* assignable) {
+ DCHECK(data_ <= pos && pos <= end_);
+ CHECK(std::numeric_limits<size_t>::max() - size() >= count);
+ size_t index = pos - data_;
+ size_t to_shift = end() - pos;
+ DCHECK_EQ(index + to_shift, size());
+ if (capacity() < size() + count) {
+ *assignable = 0; // Fresh memory is not assignable (must be constructed).
+ T* old_data = data_;
+ T* old_end = end_;
+ size_t old_size = size();
+ size_t new_capacity = NewCapacity(old_size + count);
+ data_ = zone_->NewArray<T>(new_capacity);
+ end_ = data_ + old_size + count;
+ if (old_data) {
+ MoveToNewStorage(data_, old_data, pos);
+ MoveToNewStorage(data_ + index + count, const_cast<T*>(pos), old_end);
+ zone_->DeleteArray(old_data, capacity_ - old_data);
+ }
+ capacity_ = data_ + new_capacity;
+ } else {
+ // There are two interesting cases: we're inserting more elements
+ // than we're shifting (top), or the other way round (bottom).
+ //
+ // Old: [ABCDEFGHIJ___________]
+ // <--used--><--empty-->
+ //
+ // Case 1: index=7, count=8, to_shift=3
+ // New: [ABCDEFGaaacccccHIJ___]
+ // <-><------>
+ // ↑ ↑ to be in-place constructed
+ // ↑
+ // assignable_slots
+ //
+ // Case 2: index=3, count=3, to_shift=7
+ // New: [ABCaaaDEFGHIJ________]
+ // <-----><->
+ // ↑ ↑ to be in-place constructed
+ // ↑
+ // This range can be assigned. We report the first 3
+ // as {assignable_slots} to the caller, and use the other 4
+ // in the loop below.
+ // Observe that the number of old elements that are moved to the
+ // new end by in-place construction always equals {assignable_slots}.
+ size_t assignable_slots = std::min(to_shift, count);
+ *assignable = assignable_slots;
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ if (to_shift > 0) {
+ // Add V8_ASSUME to silence gcc null check warning.
+ V8_ASSUME(pos != nullptr);
+ memmove(const_cast<T*>(pos + count), pos, to_shift * sizeof(T));
+ }
+ end_ += count;
+ return data_ + index;
+ }
+ // Construct elements in previously-unused area ("HIJ" in the example
+ // above). This frees up assignable slots.
+ T* dst = end_ + count;
+ T* src = end_;
+ for (T* stop = dst - assignable_slots; dst > stop;) {
+ MoveToNewStorage(--dst, --src);
+ }
+ // Move (by assignment) elements into previously used area. This is
+ // "DEFG" in "case 2" in the example above.
+ DCHECK_EQ(src > pos, to_shift > count);
+ DCHECK_IMPLIES(src > pos, dst == end_);
+ while (src > pos) MovingOverwrite(--dst, --src);
+ // Not destructing {src} here because that'll happen either in a
+ // future iteration (when that spot becomes {dst}) or in {insert()}.
+ end_ += count;
+ }
+ return data_ + index;
+ }
+
+ T* erase(const T* first, size_t count) {
+ DCHECK(data_ <= first && first <= end());
+ DCHECK_LE(count, end() - first);
+ T* position = const_cast<T*>(first);
+ MovingOverwrite(position, position + count, end());
+ T* old_end = end();
+ end_ -= count;
+ for (T* p = end_; p < old_end; p++) p->~T();
+ return position;
+ }
+
+ template <typename... Args>
+ void emplace(T* target, Args&&... args) {
+ new (target) T(std::forward<Args>(args)...);
+ }
+
+ Zone* zone_{nullptr};
+ T* data_{nullptr};
+ T* end_{nullptr};
+ T* capacity_{nullptr};
};
+template <class T>
+bool operator==(const ZoneVector<T>& lhs, const ZoneVector<T>& rhs) {
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+}
+
+template <class T>
+bool operator!=(const ZoneVector<T>& lhs, const ZoneVector<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <class T>
+bool operator<(const ZoneVector<T>& lhs, const ZoneVector<T>& rhs) {
+ return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(),
+ rhs.end());
+}
+
// A wrapper subclass for std::deque to make it easy to construct one
// that uses a zone allocator.
template <typename T>
@@ -201,7 +730,6 @@ class ZoneMultimap
};
// Typedefs to shorten commonly used vectors.
-using BoolVector = ZoneVector<bool>;
using IntVector = ZoneVector<int>;
} // namespace internal
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 66039f5368..d5275d2c16 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -49,15 +49,15 @@ void* Zone::AsanNew(size_t size) {
size = RoundUp(size, kAlignmentInBytes);
// Check if the requested size is available without expanding.
- Address result = position_;
-
const size_t size_with_redzone = size + kASanRedzoneBytes;
DCHECK_LE(position_, limit_);
- if (size_with_redzone > limit_ - position_) {
- result = NewExpand(size_with_redzone);
- } else {
- position_ += size_with_redzone;
+ if (V8_UNLIKELY(size_with_redzone > limit_ - position_)) {
+ Expand(size_with_redzone);
}
+ DCHECK_LE(size_with_redzone, limit_ - position_);
+
+ Address result = position_;
+ position_ += size_with_redzone;
Address redzone_position = result + size;
DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_);
@@ -128,7 +128,7 @@ void Zone::ReleaseSegment(Segment* segment) {
allocator_->ReturnSegment(segment, supports_compression());
}
-Address Zone::NewExpand(size_t size) {
+void Zone::Expand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
@@ -178,15 +178,10 @@ Address Zone::NewExpand(size_t size) {
allocator_->TraceAllocateSegment(segment);
// Recompute 'top' and 'limit' based on the new segment.
- Address result = RoundUp(segment->start(), kAlignmentInBytes);
- position_ = result + size;
- // Check for address overflow.
- // (Should not happen since the segment is guaranteed to accommodate
- // size bytes + header and alignment padding)
- DCHECK(position_ >= result);
+ position_ = RoundUp(segment->start(), kAlignmentInBytes);
limit_ = segment->end();
- DCHECK(position_ <= limit_);
- return result;
+ DCHECK_LE(position_, limit_);
+ DCHECK_LE(size, limit_ - position_);
}
ZoneScope::ZoneScope(Zone* zone)
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index df1e3c09ef..3ddda6b2df 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -67,13 +67,16 @@ class V8_EXPORT_PRIVATE Zone final {
}
allocation_size_for_tracing_ += size;
#endif
- Address result = position_;
if (V8_UNLIKELY(size > limit_ - position_)) {
- result = NewExpand(size);
- } else {
- position_ += size;
+ Expand(size);
}
- return reinterpret_cast<void*>(result);
+
+ DCHECK_LE(position_, limit_);
+ DCHECK_LE(size, limit_ - position_);
+ DCHECK_EQ(0, position_ % kAlignmentInBytes);
+ void* result = reinterpret_cast<void*>(position_);
+ position_ += size;
+ return result;
#endif // V8_USE_ADDRESS_SANITIZER
}
@@ -107,6 +110,7 @@ class V8_EXPORT_PRIVATE Zone final {
// associated with the T type.
template <typename T, typename... Args>
T* New(Args&&... args) {
+ static_assert(alignof(T) <= kAlignmentInBytes);
void* memory = Allocate<T>(sizeof(T));
return new (memory) T(std::forward<Args>(args)...);
}
@@ -119,6 +123,7 @@ class V8_EXPORT_PRIVATE Zone final {
// distinguishable between each other.
template <typename T, typename TypeTag = T[]>
T* NewArray(size_t length) {
+ static_assert(alignof(T) <= kAlignmentInBytes);
DCHECK_IMPLIES(is_compressed_pointer<T>::value, supports_compression());
DCHECK_LT(length, std::numeric_limits<size_t>::max() / sizeof(T));
return static_cast<T*>(Allocate<TypeTag>(length * sizeof(T)));
@@ -228,11 +233,9 @@ class V8_EXPORT_PRIVATE Zone final {
// the zone.
std::atomic<size_t> segment_bytes_allocated_ = {0};
- // Expand the Zone to hold at least 'size' more bytes and allocate
- // the bytes. Returns the address of the newly allocated chunk of
- // memory in the Zone. Should only be called if there isn't enough
- // room in the Zone already.
- Address NewExpand(size_t size);
+ // Expand the Zone to hold at least 'size' more bytes.
+ // Should only be called if there is not enough room in the Zone already.
+ V8_NOINLINE V8_PRESERVE_MOST void Expand(size_t size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
@@ -252,7 +255,7 @@ class V8_EXPORT_PRIVATE Zone final {
std::atomic<size_t> allocation_size_for_tracing_ = {0};
// The number of bytes freed in this zone so far.
- stdd::atomic<size_t> freed_size_for_tracing_ = {0};
+ std::atomic<size_t> freed_size_for_tracing_ = {0};
#endif
friend class ZoneScope;
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 0f455b641a..6a04cbca45 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -42,7 +42,7 @@ group("gn_all") {
"benchmarks/cpp:gn_all",
"cctest:cctest",
"unittests:generate-bytecode-expectations",
- "unittests:unittests",
+ "unittests:v8_unittests",
]
}
}
@@ -72,6 +72,20 @@ group("v8_perf") {
]
}
+group("d8_pgo") {
+ testonly = true
+
+ data_deps = [
+ "..:d8",
+ "..:v8_python_base",
+ ]
+
+ data = [
+ "../tools/builtins-pgo/profile_only.py",
+ "../tools/builtins-pgo/get_hints.py",
+ ]
+}
+
group("v8_bot_default") {
testonly = true
@@ -84,7 +98,7 @@ group("v8_bot_default") {
"message:v8_message",
"mjsunit:v8_mjsunit",
"mkgrokdump:mkgrokdump",
- "unittests:unittests",
+ "unittests:v8_unittests",
"webkit:v8_webkit",
]
@@ -109,7 +123,7 @@ group("v8_default") {
"message:v8_message",
"mjsunit:v8_mjsunit",
"mkgrokdump:mkgrokdump",
- "unittests:unittests",
+ "unittests:v8_unittests",
]
if (v8_enable_webassembly) {
diff --git a/deps/v8/test/OWNERS b/deps/v8/test/OWNERS
index 3c70cea2fd..c04255f03e 100644
--- a/deps/v8/test/OWNERS
+++ b/deps/v8/test/OWNERS
@@ -1 +1,2 @@
file:../COMMON_OWNERS
+file:../INFRA_OWNERS
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 022a47a0b5..a75bfb87b4 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -36,6 +36,7 @@
'kraken/imaging-gaussian-blur': [PASS, SLOW],
'octane/box2d': [PASS, SLOW],
'octane/regexp': [PASS, SLOW],
+ 'octane/splay': [PASS, SLOW],
'octane/typescript': [PASS, SLOW],
# https://crbug.com/v8/11905
@@ -51,6 +52,8 @@
'kraken/ai-astar': [PASS, SLOW],
'kraken/audio-beat-detection': [SLOW],
'kraken/audio-dft': [PASS, SLOW],
+ 'kraken/audio-fft': [PASS, SLOW],
+ 'kraken/audio-oscillator': [PASS, SLOW],
'kraken/imaging-darkroom': [PASS, SLOW],
'kraken/imaging-desaturate': [PASS, SLOW],
'octane/code-load': [PASS, SLOW],
diff --git a/deps/v8/test/benchmarks/cpp/BUILD.gn b/deps/v8/test/benchmarks/cpp/BUILD.gn
index 07eeb94f15..631d92dd95 100644
--- a/deps/v8/test/benchmarks/cpp/BUILD.gn
+++ b/deps/v8/test/benchmarks/cpp/BUILD.gn
@@ -30,4 +30,17 @@ if (v8_enable_google_benchmark) {
"//third_party/google_benchmark:benchmark_main",
]
}
+
+ v8_executable("dtoa_benchmark") {
+ testonly = true
+
+ configs = []
+
+ sources = [ "dtoa.cc" ]
+
+ deps = [
+ "//:v8_libbase",
+ "//third_party/google_benchmark:benchmark_main",
+ ]
+ }
}
diff --git a/deps/v8/test/benchmarks/cpp/dtoa.cc b/deps/v8/test/benchmarks/cpp/dtoa.cc
new file mode 100644
index 0000000000..7f9a97f253
--- /dev/null
+++ b/deps/v8/test/benchmarks/cpp/dtoa.cc
@@ -0,0 +1,1069 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/macros.h"
+#include "src/base/numbers/fast-dtoa.h"
+#include "src/base/vector.h"
+#include "third_party/google_benchmark/src/include/benchmark/benchmark.h"
+
+using v8::base::FAST_DTOA_PRECISION;
+using v8::base::FAST_DTOA_SHORTEST;
+using v8::base::kFastDtoaMaximalLength;
+using v8::base::Vector;
+
+// This is a dump from a benchmark (MotionMark suits).
+// It contains a large number of nontrivial doubles,
+// including the ones that fail the fast-dtoa algorithm.
+constexpr const double kTestDoubles[4096] = {
+ 468.1588596224, 480.3152423710, 523.3453890260, 570.1679798156,
+ 190.0465907999, 562.4923651186, 373.9153690274, 570.5984547204,
+ 515.4506822597, 308.7429754814, 176.5603162753, 43.9027116524,
+ 481.3434789318, 41.0993040511, 461.0124155912, 228.7679668768,
+ 491.6391940140, 733.0830583525, 227.6475280451, 493.0394209479,
+ 279.5408924144, 735.1125461811, 492.0691872806, 737.3663743301,
+ 87.1932548647, 762.5608311158, 300.9207923423, 246.1835506766,
+ 181.2835066099, 691.5725161284, 375.5178018165, 509.0501544704,
+ 207.1629680427, 371.9653871617, 310.2547970842, 279.4268876632,
+ 272.2559833257, 354.8581094830, 292.4814681094, 572.6136458742,
+ 291.1816687366, 835.3018571642, 117.5735765227, 584.8558183250,
+ 375.5703242000, 96.1422415615, 465.7228320626, 230.3613560100,
+ 558.1811795368, 586.1937948580, 193.3071465316, 164.7315231297,
+ 550.2162216151, 205.9741458774, 484.2569807606, 776.4126345078,
+ 374.4623562300, 290.1958843022, 453.9585512747, 465.4893101328,
+ 548.2565749399, 237.3136547446, 508.7985270223, 524.6502722544,
+ 193.4088935622, 77.9520729860, 160.5456417440, 54.6766152489,
+ 780.4914417607, 426.0727237702, 516.4995632726, 467.0438681707,
+ 827.7248524368, 275.1848233277, 506.1141300144, 73.0874965955,
+ 452.7129190767, 458.2850162522, 649.4299442894, 174.0508265151,
+ 270.2334743447, 162.0554894773, 844.6592777163, 277.4507392173,
+ 245.2313072573, 213.7873144789, 438.9344755736, 49.8126353642,
+ 187.4533692831, 531.3201674381, 425.8655536173, 379.4700465341,
+ 791.5863745783, 487.3973446329, 402.3151309389, 689.4106297947,
+ 223.7827699703, 846.7327281835, 237.1972576348, 614.7105084412,
+ 250.9736372060, 183.0601289549, 441.3473549917, 730.2877659622,
+ 176.3644984393, 372.8873484144, 343.4431735106, 866.6693864452,
+ 550.6362287984, 308.0283033251, 452.0509174680, 756.5892672550,
+ 260.9771943310, 130.1919787891, 476.7450943186, 440.7273213825,
+ 292.7602563236, 356.8079870938, 291.4262464090, 80.4305399214,
+ 703.7380547633, 261.3531254844, 215.4715027252, 431.7450361498,
+ 26.0419129206, 308.5045893580, 588.9014064061, 463.5525083473,
+ 821.7421101484, 228.4247100458, 766.0971221778, 339.4377738174,
+ 412.6341570736, 120.4403656706, 746.5716149113, 556.4490946118,
+ 797.5920347736, 241.5991630926, 612.1756970343, 229.2432469916,
+ 436.6797336511, 314.9449072017, 678.8403882783, 265.8133004467,
+ 284.4977102105, 443.3688720197, 751.8110258991, 288.6424820345,
+ 826.3144706325, 511.0279680708, 266.0506115043, 468.0653613444,
+ 539.4045651487, 468.7310158448, 81.0144695011, 400.0658987168,
+ 775.2842887593, 214.8558022547, 637.9186211297, 97.4338479295,
+ 202.7853649035, 152.1996706865, 637.6331366756, 292.7410837653,
+ 744.7716440842, 224.1139979479, 571.5668345547, 360.4495549501,
+ 782.1958542804, 188.0105462824, 38.0868154957, 465.4173187423,
+ 494.2204254255, 62.0640839692, 193.0405623360, 69.3528878075,
+ 356.0777562379, 661.2374952557, 228.0128880273, 325.3917825379,
+ 454.5339908012, 486.1457130016, 126.7946914065, 78.4242259984,
+ 266.2578038947, 574.9997956419, 150.2799424673, 808.8567313210,
+ 129.1252583358, 884.5000000000, 486.5468173868, 119.5950037048,
+ 488.4131881457, 584.3793369729, 84.9875334191, 203.6093123783,
+ 182.2859868258, 494.7116078570, 458.2190888610, 61.4774560550,
+ 402.3898069806, 728.1262779382, 347.0235583124, 829.6507381468,
+ 471.2398388466, 169.7708006324, 479.0151953503, 652.4946064187,
+ 348.6151379498, 79.7056834059, 243.6418719160, 722.8087194220,
+ 466.1883324660, 790.3884648281, 581.5648934640, 150.8295394505,
+ 462.6093111947, 42.2227945536, 179.4090043741, 372.8065515741,
+ 230.9919588033, 389.1472213205, 436.2594879588, 709.6601569093,
+ 88.1027017046, 22.5000000000, 403.3002883813, 833.5137523583,
+ 414.1482937256, 416.9911612710, 42.1947903320, 346.9305632705,
+ 540.2680792693, 573.6009903024, 353.5650585340, 703.3171894339,
+ 167.7476576376, 474.5765114232, 278.1135582025, 255.8693864644,
+ 360.8026986553, 84.9254816069, 295.6691419007, 259.6184858094,
+ 512.4030535233, 498.9198620968, 758.7456517081, 244.9206499767,
+ 16.7980179552, 75.8480444114, 135.1264626047, 438.8486465591,
+ 406.8157704753, 184.2350004046, 182.0958427683, 466.9882405134,
+ 508.4265398356, 261.8592275943, 559.6191545164, 197.7835796500,
+ 682.2163750726, 308.2233956586, 813.6172917328, 465.0575642925,
+ 88.8299171865, 135.5335850727, 254.5339945217, 463.6714454391,
+ 812.0230595122, 258.7931153396, 677.8951299782, 232.0559746924,
+ 34.8982603843, 519.1818744522, 527.4875215610, 495.0556590611,
+ 127.2060663755, 190.7010077010, 547.1644761968, 180.0473016614,
+ 676.3706880534, 461.8524783928, 213.8277535164, 97.8010744851,
+ 850.2959113846, 360.7417196351, 277.9543674996, 178.5236196748,
+ 90.4217541687, 198.3290708395, 277.5007351598, 350.7035905150,
+ 67.1082096846, 396.5931702061, 877.9275197004, 462.8396392321,
+ 77.0916598052, 420.1914155606, 724.3312211248, 180.8459986492,
+ 882.2705245878, 193.7235160745, 59.0411850211, 331.4489310045,
+ 91.5465590356, 365.0733424801, 227.9788120772, 250.7546503651,
+ 240.7035174334, 135.0715690820, 255.9093611959, 103.0732465910,
+ 583.5000000000, 467.7582573911, 253.8132344735, 382.0047947124,
+ 286.7705161157, 430.8770132382, 557.3196136475, 467.2204275684,
+ 47.9794737917, 137.7357904719, 232.7129390863, 329.7287725700,
+ 185.8077710098, 548.2746613969, 288.3794649303, 276.1350352103,
+ 434.9596003850, 245.3556661520, 226.2046116105, 529.3626355976,
+ 71.5386842779, 748.1890120862, 526.6588344006, 439.8470831188,
+ 432.0930332308, 665.2821626850, 261.5584726101, 572.4952718185,
+ 354.2129324571, 275.2556654028, 343.5661185854, 439.0880701817,
+ 111.4999318329, 470.3676929109, 456.3160712012, 569.8699369857,
+ 47.2841506100, 739.1812859068, 19.6172667580, 458.7706302263,
+ 219.0526785488, 391.5257563787, 74.8714927980, 463.4834484495,
+ 775.0867976781, 146.5856110625, 491.7471713388, 138.0562846286,
+ 597.7429202676, 352.6364405246, 192.0631840033, 177.7225067993,
+ 209.3954931344, 269.4641946244, 57.2247292912, 568.4658236656,
+ 857.2830795107, 529.4285984529, 335.0138259073, 114.7556280053,
+ 356.5974421783, 464.3530212797, 120.0515315526, 233.6829120313,
+ 489.9658934763, 125.4273019459, 732.9703056942, 572.7137286249,
+ 612.6597962940, 406.8909817441, 34.6886847306, 443.9002144852,
+ 627.9462826211, 471.2208432799, 278.5056764723, 377.5707253631,
+ 450.1937072957, 189.6392490642, 736.7944841153, 27.8749537844,
+ 523.6884842045, 251.6608864242, 743.5831047430, 504.9621433452,
+ 809.4955333103, 273.2013142694, 849.0200482558, 431.0941276276,
+ 146.4527203661, 219.8787653843, 326.0590586927, 506.0773400813,
+ 750.6237219131, 529.2881719903, 142.8579560342, 213.2316477743,
+ 688.4728314852, 535.5787206238, 461.6623032178, 475.0944398827,
+ 175.5966807158, 335.1151819121, 471.2136094302, 467.8323086700,
+ 396.5173883633, 250.8460236858, 706.4894229601, 153.0950909286,
+ 788.5334476657, 478.3854390729, 139.3264699593, 533.1816475667,
+ 648.4910609145, 327.0099230131, 155.3251635863, 266.0311408651,
+ 240.0511731779, 810.6179678204, 329.2489588901, 201.5497396899,
+ 470.5755719123, 174.3237777035, 428.4787954110, 102.4009615461,
+ 337.8621567202, 430.4749120479, 677.5211397605, 311.5311951225,
+ 374.5576534741, 289.8332025061, 662.2893894385, 452.2515202318,
+ 399.3249017949, 481.9510838776, 61.3584044335, 341.8616690931,
+ 513.1620612293, 184.3003039998, 264.3987906182, 165.9855167055,
+ 729.1875250790, 520.9548057848, 205.1872074459, 290.9794891538,
+ 186.2846129908, 507.0961311202, 787.7005538004, 452.4821885632,
+ 461.5744575612, 474.8184657202, 235.1909837669, 577.4070807754,
+ 261.8480066013, 100.7294297995, 38.9045159166, 299.8782935684,
+ 675.8971036095, 114.4202522220, 641.0206514540, 561.0036522169,
+ 258.3074994420, 228.3066891185, 592.4152753285, 68.8376708090,
+ 578.3508909872, 91.5645298205, 316.1097113152, 501.3930207871,
+ 138.0715853373, 766.5588975894, 294.7349323019, 489.4035963387,
+ 261.2447152829, 87.0689345250, 190.5698862432, 49.1672796667,
+ 227.7897786652, 568.9529130837, 272.7899442553, 485.8919869450,
+ 371.5790500621, 285.6709471653, 258.0152365968, 803.4392535146,
+ 533.3594896129, 223.6525494198, 548.3431153405, 866.5712777370,
+ 167.6691830755, 538.2955434401, 71.3453181470, 220.4777226597,
+ 576.6866092503, 205.1444302019, 182.4833507986, 95.6127185811,
+ 125.9495877082, 736.9705595895, 295.4486766628, 764.3920529565,
+ 552.9833484801, 78.9335944363, 116.3325984505, 201.5321001957,
+ 29.7625281794, 533.8622328568, 498.4066442650, 382.0897436055,
+ 549.3293274395, 740.3585239321, 301.7123158343, 592.3586141814,
+ 260.2621594400, 684.8418391077, 278.8280194020, 800.3029298065,
+ 298.4474494276, 329.0443873758, 226.7235313434, 406.2821026208,
+ 383.2683666591, 756.1296472501, 407.0649153487, 209.3483952462,
+ 444.0504015055, 71.2084800261, 342.5365403474, 516.4893129090,
+ 340.5325402108, 470.4869220887, 321.7571713083, 142.5171713727,
+ 338.1453265880, 382.0030245468, 384.2816594972, 545.1176011152,
+ 170.0987264769, 323.8026240543, 337.6365363991, 433.7411830353,
+ 58.9071151611, 200.3050385152, 778.5624426703, 500.0402479571,
+ 614.5538989565, 466.6254749803, 776.7269195474, 279.8259676875,
+ 705.9453211331, 463.9647624701, 299.2772177283, 347.9224715474,
+ 128.0395006953, 169.5937409096, 150.2891311846, 314.2536711159,
+ 652.5786055030, 366.0362876005, 606.1980299473, 500.7979020236,
+ 738.8533700241, 135.3760081455, 180.8784060345, 369.9019068674,
+ 714.7990591272, 509.9282102240, 550.3499421783, 468.1426558391,
+ 708.4982398855, 208.2682402528, 862.5315972697, 507.3673299905,
+ 18.1837093974, 306.4385916327, 278.7680825049, 466.7728485341,
+ 572.7996353839, 51.4281915470, 833.9190829209, 299.4227212449,
+ 576.9270604306, 363.0907760628, 447.7349074443, 283.3384815038,
+ 576.0852243676, 407.1559358485, 100.8463616601, 531.2206661099,
+ 416.0873967507, 520.8621499277, 308.5074777197, 515.8182766043,
+ 209.5000000000, 281.5000000000, 301.2559464406, 28.2901967445,
+ 831.8578966754, 527.5110229639, 445.3392869505, 133.6636777893,
+ 285.7982464996, 426.7475115510, 820.4371608704, 385.8079181592,
+ 585.7686680200, 336.7421089350, 872.7675647593, 446.3788737769,
+ 43.7120670459, 408.8272032431, 668.3648205564, 257.3771685896,
+ 679.5836110410, 307.7914440437, 428.3165579405, 252.6325482303,
+ 482.5793703218, 100.6855582836, 207.4137953397, 407.3575940149,
+ 468.5702351823, 588.8198027905, 531.3937365422, 131.3452137392,
+ 337.5826651754, 587.8247253992, 141.8864393869, 500.3923373054,
+ 400.3824396347, 140.0292419999, 283.4309146650, 667.8284309031,
+ 136.2600610558, 651.6271618361, 88.8318022357, 705.2303488064,
+ 159.8192021109, 683.2739937035, 215.2590065046, 640.2248726813,
+ 454.6189018461, 420.5719657854, 256.5664877796, 450.6886607958,
+ 287.5182407384, 682.0706665826, 217.7748582067, 39.0462660741,
+ 218.0137796065, 690.3888402986, 399.5977155380, 522.2067408689,
+ 405.3929532686, 194.9571483695, 467.8223289389, 715.9477754993,
+ 503.9679170843, 555.4753218466, 232.5318901928, 648.2472989182,
+ 428.6519404099, 45.9176844812, 136.2083425854, 293.1534368051,
+ 502.2837156053, 77.6927673004, 106.6850139442, 533.7446860820,
+ 254.4722719900, 417.3978184098, 381.6664451305, 704.4862018271,
+ 62.2545296088, 824.3719233222, 193.4864805574, 329.2407371221,
+ 163.5322776383, 796.7210636123, 171.6914849939, 256.5245967811,
+ 206.5842943697, 190.4650117966, 188.2565124745, 236.8750691960,
+ 420.4499316864, 144.5791427940, 639.3404979349, 385.3774752320,
+ 289.6684408534, 300.7427922815, 839.4766391814, 367.2516463203,
+ 68.3391568038, 99.6397056812, 309.4246710336, 537.2201564735,
+ 266.8936390506, 521.8190024603, 453.7911268652, 458.0707963476,
+ 135.5849998035, 178.7634931151, 259.5244315697, 34.2095838459,
+ 44.7987144504, 485.7495653064, 397.9448422609, 620.1809336586,
+ 498.2755076242, 133.5842536272, 244.5420554029, 210.5000000000,
+ 292.5000000000, 862.7752443774, 503.3170338345, 254.5400063431,
+ 548.0580175711, 282.3785158570, 78.9363752101, 274.0648191677,
+ 361.5054580825, 53.0979410000, 168.3850551026, 124.3500138723,
+ 304.7403704201, 584.2848998438, 263.4182004292, 287.4060904930,
+ 473.9934867930, 747.5017294939, 494.7767506123, 758.0980992971,
+ 560.8375556194, 499.7643463347, 246.6144125194, 194.6530032918,
+ 462.7918545647, 590.3917824714, 76.1490604474, 761.4594941092,
+ 229.9590116142, 237.7216925434, 462.5416184124, 791.6091001192,
+ 341.6526669076, 694.7741153341, 342.4983112695, 874.7225037844,
+ 142.6932143650, 680.7617714576, 291.8889080467, 337.7761963087,
+ 173.5227898154, 753.4118264946, 347.5892316583, 37.1323302305,
+ 273.8842276086, 777.5700758822, 518.1281889964, 673.7333639160,
+ 420.1741423182, 782.0488501019, 558.7848647662, 530.2529043146,
+ 252.9386017490, 672.0937479287, 285.9481572407, 835.2416004811,
+ 275.4505364534, 717.8100379298, 469.9631374820, 255.7848202930,
+ 145.3416661405, 850.9668680853, 94.1424719797, 254.4382481370,
+ 545.9119241282, 218.0397349072, 461.7377315965, 493.5858479262,
+ 171.1829693461, 143.4035159950, 195.2955625324, 306.0363645425,
+ 527.8843452805, 264.0526432374, 225.0262634147, 614.7373051793,
+ 140.1848355582, 511.2186393811, 513.2622498121, 339.6202149649,
+ 691.4260162803, 467.3378165290, 737.8219655995, 511.9396363182,
+ 400.2261887241, 854.1265612233, 504.8166034375, 262.9535618309,
+ 368.8766143811, 854.7044628921, 262.5679887032, 618.6465757897,
+ 262.9288832251, 777.7371988437, 568.5895172149, 174.4038038022,
+ 322.5258367003, 43.3879958970, 525.2912657241, 205.2986145212,
+ 471.7944228973, 28.4864594264, 148.2680004014, 70.9671883417,
+ 286.1578590144, 848.9011540980, 172.7805372774, 739.0631967819,
+ 254.3833249540, 192.2193077486, 387.7539229651, 377.0736156979,
+ 468.2789335717, 212.9810710998, 175.2770963625, 191.4299492662,
+ 871.0973766117, 138.1930311023, 364.4465024501, 268.2995271577,
+ 50.0573317927, 85.0211963818, 709.2693182540, 318.8823647056,
+ 321.3891414538, 567.9909805701, 293.9262330922, 209.1693842534,
+ 388.3073296410, 276.2101740683, 129.5233779433, 231.1911141741,
+ 36.1288236386, 89.6986558110, 369.1544941697, 327.5464872991,
+ 694.9703922781, 571.8497945682, 116.9997643099, 359.5581217427,
+ 268.5540905282, 363.7561550590, 843.6559608929, 464.9217682606,
+ 706.5054704490, 218.5967421246, 616.8789115065, 509.7600616188,
+ 756.5735077613, 546.6671256922, 289.7920870280, 352.6618603814,
+ 256.9773574380, 32.3198602236, 67.8690647572, 173.4520458228,
+ 251.7249089952, 40.4063490479, 594.7155837988, 432.4352871055,
+ 690.0915213980, 569.5000000000, 524.1544658652, 99.7283618264,
+ 219.5023697487, 485.4921899664, 409.9171843010, 267.4082593047,
+ 468.1151653831, 116.5519069329, 786.2313222331, 208.1760202457,
+ 226.3384974514, 85.2516819250, 821.9152706716, 271.1511271268,
+ 99.2400871788, 160.1187061929, 673.8728006889, 528.3738603332,
+ 863.3655508756, 389.6525361521, 210.4946526249, 236.6573848722,
+ 287.4943900458, 478.0444318993, 690.7528394870, 268.7242220686,
+ 393.6968634645, 479.5384847316, 458.5275879153, 564.1632344203,
+ 312.3686878252, 566.2994731457, 540.4954651066, 367.5094235910,
+ 475.9528367239, 470.6413644948, 207.6207706791, 356.8466040436,
+ 343.8816713458, 284.1276873616, 438.2709379779, 135.2227234382,
+ 286.5286205499, 521.8363754746, 410.2154883310, 426.6126009068,
+ 196.5551699579, 160.6805929085, 130.8439878141, 842.5565067767,
+ 216.7822113738, 794.3837124605, 569.3424678821, 336.1298141590,
+ 179.3982047096, 465.1517246928, 457.3796671871, 401.1234959443,
+ 277.4389526747, 772.4376483746, 512.2008758501, 677.7574168765,
+ 262.4721382441, 728.7541737614, 496.2177922305, 593.3531537968,
+ 289.9357128571, 156.5889073426, 354.3561756940, 613.8436794870,
+ 365.7281833317, 311.3245121907, 180.1830827583, 708.9987672354,
+ 314.1925660067, 717.8754424625, 191.8786209926, 486.1984529918,
+ 313.0376050418, 507.1099697999, 352.2004124087, 682.9994749358,
+ 883.5000000000, 307.4665035438, 402.7246498011, 252.1727884551,
+ 201.9964021317, 352.6667080436, 503.7060975335, 133.3259296711,
+ 381.1705078209, 454.0079253219, 243.5229164288, 295.9468141297,
+ 522.9381820463, 468.0092614532, 315.7988060748, 519.1580244670,
+ 446.8296427816, 186.7458967200, 549.5105396416, 250.3640355736,
+ 362.6994617827, 110.3426640140, 810.6153527642, 388.1700447076,
+ 210.3128242438, 397.8234765337, 662.1656271164, 498.3018861448,
+ 515.3035400457, 415.9115392726, 207.7522414561, 115.0259911807,
+ 545.9738676699, 147.9242373528, 225.9106686001, 367.3280794708,
+ 186.8762162703, 81.5795323365, 258.3589765767, 765.8755776601,
+ 548.6569667969, 575.6547637085, 387.0642522961, 241.0527573657,
+ 488.3961429240, 612.7536354696, 167.5595899166, 726.8797539869,
+ 362.8982697343, 366.8281253310, 458.2081846738, 184.4297445035,
+ 570.6829415347, 511.1117090802, 415.0961157671, 79.0786428283,
+ 62.5987022676, 728.7552573685, 409.7322237339, 330.1573847349,
+ 447.6867603332, 690.8333836351, 431.6067659820, 461.7902870783,
+ 98.7703028951, 438.9602177470, 788.0190191680, 367.9005631974,
+ 219.6557567253, 350.8358816065, 611.0851104215, 187.7432799668,
+ 422.5181486429, 295.0444133733, 734.2135079282, 566.9354930587,
+ 597.8150541492, 471.7973378895, 178.5669050127, 516.3655411452,
+ 114.0201916903, 256.7351163300, 475.0269804957, 15.9220345287,
+ 855.1252311366, 371.0423872806, 843.6231739541, 315.0946227072,
+ 154.0490129717, 406.4608296453, 205.4032316465, 272.3001730444,
+ 582.2830940913, 321.2979670244, 123.8868090828, 466.3162106480,
+ 784.7537847714, 412.6532150046, 603.7291904494, 342.1567176252,
+ 350.8393002913, 431.9584962862, 574.0378810373, 400.5168007575,
+ 766.0987030318, 95.2596953735, 528.4785628807, 518.0928150913,
+ 688.5505286801, 452.7707744142, 525.6830313694, 195.3260897864,
+ 654.7126790660, 189.3564345662, 202.7833858379, 162.6396481576,
+ 283.2758016528, 459.3589198996, 796.4225497331, 111.9306275450,
+ 855.6433236088, 569.3886881454, 379.4976756277, 223.9758299780,
+ 643.5471365034, 475.9767134930, 152.0159053512, 300.2940150457,
+ 530.6271220744, 145.9132440772, 829.2393324711, 577.6466540231,
+ 737.8570105686, 397.3556483163, 399.3082291353, 145.9903281782,
+ 695.8723148653, 269.0676973940, 794.8246878235, 504.4767373779,
+ 823.9027902144, 33.1129793449, 106.1114250991, 644.3271739174,
+ 399.8622557185, 835.2883024657, 230.7827104657, 271.8432046810,
+ 183.1359546153, 405.0234361727, 262.9962924577, 743.9638403691,
+ 234.7159241609, 206.1149731500, 298.6392163131, 347.2768487679,
+ 465.0581530652, 301.2258394456, 387.3345340403, 792.9681911540,
+ 31.5222335622, 705.9833262148, 485.0164399441, 546.5870455869,
+ 79.7786416294, 156.7352766156, 122.8819110847, 180.0172556246,
+ 100.6590612908, 160.0635769155, 238.9659552802, 461.6806673060,
+ 423.7312877116, 422.0361318989, 156.4565121335, 292.3718813264,
+ 496.1575763630, 73.2750477168, 238.2577463900, 204.3475286185,
+ 789.8394384835, 277.0531168080, 86.7694115905, 481.3905974738,
+ 442.3778631057, 107.4294161372, 164.3873452004, 271.2929608816,
+ 598.9600285823, 169.9145236252, 300.8834530512, 59.9505203401,
+ 677.8611141117, 250.6133976711, 414.3517397671, 99.6412370083,
+ 280.8243937194, 396.8588793816, 116.4831446507, 283.2005206267,
+ 413.1890532706, 352.0075373283, 588.1567507478, 346.7968591542,
+ 171.0537739146, 297.3150393830, 627.1189430648, 161.0262533014,
+ 403.6271624373, 263.5929096777, 792.0677989841, 281.8524517075,
+ 171.4949906028, 90.7137287168, 653.3474963643, 86.9035764491,
+ 533.4506907631, 510.7062719042, 338.1751256562, 226.8047049356,
+ 320.8573242544, 318.9760575659, 783.9048189300, 415.0791907493,
+ 102.0715788589, 148.9586061654, 728.4715837811, 479.0901046147,
+ 477.3074938055, 337.2936144520, 681.5745404755, 124.4842998940,
+ 649.1077975027, 338.1546274125, 628.0957066934, 328.0620049883,
+ 78.1336855944, 132.2461366362, 721.0391229960, 321.0970804251,
+ 214.6768025237, 115.5620149203, 239.5568456449, 29.4005953937,
+ 617.1124408854, 488.6639750961, 264.8137175777, 223.8434411151,
+ 217.4008174993, 509.7268600883, 446.3885897390, 36.2968130759,
+ 256.9440449766, 147.6743838878, 131.7339062035, 200.8968257679,
+ 608.8711619247, 474.0859426513, 86.2278026839, 502.0188829816,
+ 306.0514988556, 453.5222792915, 583.7210453858, 480.8630202978,
+ 853.5872191107, 241.0319124627, 835.7507560770, 16.0142377840,
+ 65.9820483645, 393.7981386785, 804.7255034818, 567.9135808335,
+ 271.9276191800, 431.9240042273, 305.4355579799, 349.6993806196,
+ 318.2134308971, 577.0133648724, 344.0023856657, 142.9814354957,
+ 236.3610111198, 584.7133997682, 318.9543854822, 498.1032076625,
+ 130.8350539739, 437.3222433167, 473.6244130367, 880.7534924951,
+ 173.3144748988, 254.9129620672, 118.8027959959, 352.9246822862,
+ 396.5842569310, 415.4595461803, 170.4477758971, 693.5793207437,
+ 103.7661690469, 771.9352848048, 181.0642497450, 94.0131635726,
+ 325.6151051977, 190.3146479500, 18.9358605650, 607.3678996354,
+ 336.2950504142, 62.7847433589, 86.2955726397, 492.3905855669,
+ 164.6482762581, 242.5591549361, 87.1268649314, 701.1910670681,
+ 125.6335729449, 772.6878846047, 355.4895634121, 333.5668840897,
+ 562.2918480534, 207.1744050555, 73.6124394554, 435.1354334485,
+ 162.0273297741, 174.0715844129, 156.8382210070, 513.5303834188,
+ 188.3049739764, 72.9653274678, 414.5107077640, 75.5323587813,
+ 283.3754873063, 722.7639409791, 451.5807510887, 261.5572188451,
+ 185.9303060573, 859.1958787550, 345.5369863027, 181.1500902664,
+ 169.7486257404, 648.6493267968, 156.6828582407, 595.7828247249,
+ 126.7257921465, 561.3786238267, 456.7255906462, 787.4769628176,
+ 316.4717916024, 576.4954887114, 481.9516765202, 98.3852105308,
+ 106.6958446791, 128.4697231089, 415.5746797222, 491.8544632501,
+ 244.6621124884, 393.5563442187, 292.6430324604, 524.5735396489,
+ 303.6274209201, 248.3698244627, 340.7506056615, 359.6998869403,
+ 452.2304793817, 714.4791394360, 536.1939805491, 855.7400286777,
+ 384.5683644231, 148.8349768451, 275.0436014235, 271.5836040640,
+ 192.7838282995, 405.1654318004, 533.8833628471, 23.3741755182,
+ 685.3279760120, 445.1578480594, 819.6715237954, 75.4706799279,
+ 842.1356927879, 462.5810227394, 463.3954917359, 89.1797527902,
+ 103.5596584963, 300.2196626721, 497.6877487648, 471.8223906459,
+ 275.7389866135, 428.8810382767, 788.6937121280, 449.8869768867,
+ 425.8306414562, 69.1077412316, 113.7450707104, 186.0948785140,
+ 623.2285861214, 366.6074253820, 611.5712530825, 131.1815976730,
+ 791.5768088041, 294.1364744979, 319.0274615942, 137.0737469103,
+ 738.5117671213, 292.7950225960, 851.7377034731, 184.4367269742,
+ 827.6791673684, 508.6797675759, 487.2673155814, 241.0827156115,
+ 91.7702682068, 377.0564448291, 865.0706515513, 463.6753131137,
+ 119.6486625631, 562.8566835627, 745.9217690354, 255.7003110682,
+ 131.1657108043, 232.7006190461, 354.6950222506, 574.5000000000,
+ 632.1491477362, 521.1436929601, 382.8243129962, 341.8374733072,
+ 240.6895366531, 249.5232551785, 498.7326333800, 451.3195565247,
+ 504.1926489476, 507.2433581105, 373.3474018846, 354.0111108066,
+ 183.3174326692, 449.3793051641, 222.3730232202, 172.5697177124,
+ 663.3902179576, 565.1119021759, 290.2210361178, 494.1515501767,
+ 354.2432551599, 225.3282196811, 376.1381774230, 416.4367637754,
+ 484.8711418033, 244.8043972161, 370.0948930112, 390.5394732947,
+ 860.4591347643, 509.6543083551, 99.4088531227, 297.4524216205,
+ 480.2172527878, 459.5124211693, 635.9605141371, 341.2882017997,
+ 598.2865279087, 478.5792901954, 631.7512778931, 57.0456718615,
+ 675.7512592586, 357.4073051822, 619.1392863800, 558.1232315106,
+ 44.5604506824, 18.6170476304, 573.2349973447, 264.5690128899,
+ 263.3709831900, 407.3232645569, 198.2954827585, 460.8600193450,
+ 221.9092048252, 309.1631456608, 276.8306060741, 466.5677340383,
+ 701.0243920902, 256.6905686123, 558.5256358887, 325.5375967570,
+ 417.6498822274, 189.2853096387, 151.0522779992, 37.5319905031,
+ 502.6456016026, 450.0090436676, 43.0355671315, 344.9825352878,
+ 479.7423126905, 279.1487531746, 697.3759237683, 94.6749258549,
+ 347.4984444655, 233.1269132477, 436.9890878920, 471.1372573852,
+ 668.4979558937, 445.7891710020, 716.3860146454, 73.3773269427,
+ 358.5678369783, 255.1757369973, 433.6632150040, 183.9160248621,
+ 178.6681088478, 410.6899738731, 592.7190955499, 281.2532707342,
+ 445.7289609102, 260.4412642042, 101.6539006852, 28.4766843788,
+ 467.7894737248, 427.6108606825, 70.1620609522, 514.6641879247,
+ 266.2187253232, 594.4049810482, 154.1867783030, 721.7397761869,
+ 449.6797869184, 738.7187680109, 106.4121817265, 324.5565347026,
+ 523.4025554365, 248.2706134183, 324.1129666971, 63.5958401275,
+ 539.4046284602, 451.9338750698, 368.6715013241, 581.3084315401,
+ 399.2252521727, 81.0872475128, 125.2380676561, 211.5000000000,
+ 286.5000000000, 199.8000000000, 617.1345240048, 398.7470344886,
+ 816.6566005273, 426.4620777677, 81.7730059360, 540.3510983785,
+ 230.3255602236, 390.9088217108, 119.0843418558, 508.0553256868,
+ 590.1622881156, 575.2697050364, 764.0784346893, 273.3560447025,
+ 54.0053571655, 548.7097261571, 27.1464837007, 124.7022750840,
+ 798.2907011588, 161.7439768037, 500.6506923992, 324.0666999976,
+ 777.6925747468, 173.9867669276, 638.2929223595, 297.1079412667,
+ 391.5089986157, 365.3517510920, 724.7902793611, 466.2979329074,
+ 220.1349765090, 111.2532443083, 371.9139550393, 413.0123818425,
+ 405.9516422014, 335.5379122198, 441.9032353935, 235.6508779687,
+ 258.6107615216, 225.8433674684, 427.5058819938, 149.1947740122,
+ 518.7161075246, 356.8129449187, 689.7947209709, 382.2697419271,
+ 749.9207006495, 432.9427378684, 475.6347139448, 375.7253169158,
+ 767.3298468392, 482.9307523328, 405.2837229566, 246.2996342638,
+ 481.1576192966, 488.7145265121, 308.3553896594, 390.9148400883,
+ 608.0689320032, 534.2692418426, 637.4550249259, 469.6628722431,
+ 480.2400521559, 390.1622014838, 723.1782101568, 821.8691153992,
+ 565.7902341478, 280.1099992622, 455.0816599007, 713.9366820431,
+ 514.2638436133, 468.9775564592, 249.8152260218, 568.6020935759,
+ 363.6102929840, 600.9733456407, 36.3333223028, 412.3749006933,
+ 541.3359807271, 559.1681372604, 237.0305790769, 642.0592900481,
+ 103.5086677276, 166.5248613748, 500.4085911325, 680.5072765542,
+ 331.3671316612, 353.5142561657, 196.5979766942, 745.2230163799,
+ 579.1686524131, 299.7810709040, 572.2637190203, 105.1305464250,
+ 547.5428497583, 440.4240992946, 96.3351560476, 329.2525228890,
+ 443.5293168834, 277.6774474055, 170.4522118287, 398.8490739766,
+ 314.5283504396, 696.4486491772, 352.7735765342, 484.1554384273,
+ 181.1305533033, 535.3067965676, 362.7590371444, 466.5027996042,
+ 375.9256162151, 294.9245917401, 116.9826253714, 275.7516448441,
+ 487.9683271461, 587.1071339153, 168.6939718363, 592.5140477638,
+ 470.3620754360, 645.9817343733, 303.4203161733, 471.9007680660,
+ 88.0180427593, 717.0018954270, 197.7700918055, 187.1765337052,
+ 352.9661219642, 379.4075446081, 116.0330910326, 88.1299487699,
+ 448.0318870231, 616.3663592460, 178.6581819847, 408.6012397757,
+ 64.1066498653, 273.4842523538, 546.8743571346, 182.1255452755,
+ 337.2196356431, 330.9481130211, 217.7072835209, 58.6205147667,
+ 217.5765654719, 234.3650152382, 139.1616368622, 711.3591303221,
+ 223.1772024093, 617.8434926936, 521.3114202029, 111.8417003645,
+ 26.0401544026, 533.5068715122, 138.0938002922, 854.3010033671,
+ 512.8487594599, 702.4077058194, 413.2787142184, 111.9795351790,
+ 510.4897615346, 693.9546592149, 399.5936924686, 446.9625524573,
+ 224.2022278066, 390.9749517372, 221.4081692155, 588.1693657874,
+ 255.8528824560, 577.2685457817, 555.9943927087, 843.4297029468,
+ 144.7309432567, 495.5155437215, 380.2405180398, 316.4933372033,
+ 43.2355659653, 322.1193449007, 484.6982041905, 762.8293378319,
+ 436.4157401314, 820.1565904443, 241.1122399686, 44.3253512905,
+ 151.0054741300, 535.4099188455, 375.2561701674, 70.0932516325,
+ 22.4945177050, 756.0819272375, 431.4131141517, 345.5659978806,
+ 114.1310633857, 537.3910625057, 68.8699456026, 244.1316675011,
+ 436.9886854833, 782.5528184277, 238.7464441257, 149.9871312866,
+ 452.3464640662, 707.3389312016, 424.0107455285, 329.0428834720,
+ 357.3548931927, 840.4534026430, 500.9222569192, 247.7282284196,
+ 307.0425526440, 784.4468128957, 367.1359281482, 334.3618577969,
+ 95.0300966135, 216.6028938665, 364.1888751982, 533.5888822885,
+ 538.9575355661, 230.7740894302, 274.0711933107, 821.5272717879,
+ 396.4063831836, 564.0669940378, 578.3641142724, 212.5000000000,
+ 273.5000000000, 820.0122886408, 555.6441957642, 544.6095127602,
+ 367.9171191072, 733.4979407705, 497.6453112891, 863.5777134207,
+ 140.6335870532, 122.5864976638, 228.2182590682, 219.2866803697,
+ 468.0218121968, 78.0860242602, 224.9203496790, 868.5706573787,
+ 458.6096365585, 404.1962428607, 20.9601048299, 755.6544303219,
+ 319.3065169835, 498.0275750978, 110.7229155098, 124.3796084156,
+ 833.9526495150, 405.9198141142, 42.3816242309, 447.5194281556,
+ 733.9780528636, 65.7819571538, 549.1365815381, 251.7842174555,
+ 483.9250948131, 424.1581441105, 282.6202036456, 565.5220643584,
+ 291.9099133332, 401.9494943701, 43.4324938265, 732.9596093199,
+ 224.7270341756, 861.0311828169, 566.1714866784, 517.3143874751,
+ 235.1543815522, 449.8916663542, 334.2308732691, 241.8435817212,
+ 236.8998313258, 85.3274896510, 326.3908471987, 538.4608238521,
+ 377.0585843765, 506.3569303093, 254.0365051101, 450.6165250379,
+ 313.5175257344, 129.2076914051, 448.8140960936, 832.7234256223,
+ 248.6701133515, 539.4725597540, 63.7030533210, 197.2716401810,
+ 35.6674016419, 210.1133999827, 555.4045969667, 558.5485089541,
+ 472.6772249609, 467.8482775472, 299.7377586393, 862.7443177454,
+ 79.8512193246, 774.3161094769, 118.8607165825, 68.3732659053,
+ 403.2431159762, 773.3756953393, 147.0714116586, 43.1045140676,
+ 573.1696317320, 731.9232767438, 265.0362378182, 407.1644241441,
+ 194.9247525665, 132.4382508724, 323.0820752663, 733.7974753387,
+ 512.8359665480, 591.5025046873, 409.4926464484, 553.4780624292,
+ 241.9875152814, 711.6112498269, 478.3878084228, 656.3634248594,
+ 462.1695846649, 696.0853249341, 497.0467208213, 554.7031003443,
+ 304.9246163174, 55.9464976204, 551.3082876723, 56.4664004567,
+ 328.1924288460, 130.7025244276, 547.3318959153, 650.3410002831,
+ 309.8006127593, 861.6416591564, 512.0162476207, 124.6500056126,
+ 198.1825928890, 665.0717430877, 304.8915263086, 735.8899504389,
+ 106.0823698256, 812.2910019411, 110.7982996666, 463.4787724542,
+ 309.9724209044, 553.6546283933, 132.9408170282, 96.7427047632,
+ 425.6288735435, 252.3577097322, 153.6808765436, 784.9745774513,
+ 192.4232796647, 752.5786316723, 475.4278806700, 648.9595777480,
+ 180.9303327890, 191.0058320470, 302.1505831014, 74.5647233443,
+ 91.4888158492, 366.0549904063, 390.7990577672, 593.1326095856,
+ 28.5231303220, 748.3165691962, 569.2256750820, 101.6806720557,
+ 249.1967564920, 152.1106740210, 68.7467502549, 283.8971522497,
+ 582.5000000000, 303.8995041955, 106.4620089364, 632.7337006300,
+ 191.0337664066, 137.4560796890, 522.6319190513, 202.9777711829,
+ 177.7567476824, 790.1770104507, 269.3972528234, 471.7426143450,
+ 260.4208609575, 718.5311401667, 448.4462208776, 623.6338043186,
+ 493.9571869715, 666.9041998929, 276.6538101544, 810.7676975034,
+ 534.4293873087, 511.2109221469, 45.6439802064, 300.8462525029,
+ 268.4733035993, 802.1163360305, 341.1041517586, 431.3360358407,
+ 190.5287868457, 378.8977419205, 538.6010498317, 834.5826726426,
+ 403.5847961197, 442.3115047168, 256.6090144763, 605.6859151090,
+ 502.2374621196, 325.1284461894, 490.0636660628, 16.5616244561,
+ 258.8580094289, 166.7016039937, 403.4872588976, 832.9022671071,
+ 488.2308664746, 317.1605945904, 219.2012808291, 137.7677915224,
+ 374.0709933583, 793.9652734656, 385.3933696637, 119.4308309853,
+ 465.8102467744, 155.4171889478, 47.3819871075, 340.0441502992,
+ 273.1819437793, 399.1326663173, 369.2615209586, 241.6866288990,
+ 116.4237353218, 415.7258805321, 645.8358232090, 363.0567234473,
+ 616.4154709448, 47.2631595834, 715.2576638377, 361.2738056875,
+ 667.3659825598, 364.1596613149, 136.2090333856, 501.8444879129,
+ 474.2117502075, 85.6465137404, 496.5722219167, 423.7165434603,
+ 631.4146508718, 257.0359324680, 568.3283992839, 430.1890487553,
+ 849.0570672061, 336.9809920106, 616.4930023936, 308.4555988029,
+ 94.6727563054, 197.3500077200, 685.8162722829, 266.3218838992,
+ 790.5071925146, 211.8405166628, 153.9353945475, 198.2987850168,
+ 105.8400840138, 462.6211968464, 72.6948719059, 548.6399432755,
+ 350.8140519788, 296.4936184049, 412.7432486520, 58.5524094825,
+ 679.8730216753, 273.1860239338, 792.9334776230, 284.9730967782,
+ 240.2619127888, 170.6848350989, 739.9034195220, 365.2006365440,
+ 517.3401691139, 479.6784149287, 159.6680764644, 453.2585038032,
+ 563.5840623868, 331.3817796395, 698.7596466375, 474.4226649795,
+ 802.6948532213, 423.0905851001, 728.9704573600, 465.2963674908,
+ 178.2330616554, 126.2695801971, 502.0058474182, 426.4710232866,
+ 255.9294567300, 338.2385348779, 463.5051086898, 364.4270978572,
+ 278.7846838824, 74.9631356998, 275.3495311776, 164.4888501161,
+ 120.3797405583, 120.3758860356, 411.2007643865, 485.4601057221,
+ 174.1518964709, 452.7277549246, 476.0381678037, 75.0584592209,
+ 279.7349354546, 312.4578845292, 510.5893281052, 204.0214100872,
+ 563.4097461538, 385.8370690586, 624.6749222238, 295.4713256612,
+ 663.7880425547, 51.5816679115, 569.9398904689, 262.1684312840,
+ 664.2597825044, 23.6330945481, 843.3271913581, 219.2823974596,
+ 198.0103968065, 530.0209164462, 352.2188302359, 459.6559220224,
+ 804.4356343060, 305.0557069800, 266.0826555510, 123.2205074375,
+ 184.3680600332, 554.6558995900, 570.1684695561, 281.7190801233,
+ 265.8167667751, 72.5323325674, 467.3523934680, 299.3436529355,
+ 455.1864314794, 32.9249257685, 518.9660948062, 421.8482832955,
+ 386.5796087808, 564.4169280481, 118.9128387110, 317.0567633211,
+ 522.9375822318, 274.5201104622, 170.5958563611, 305.1473507801,
+ 493.2088258255, 555.0617400394, 440.9890293994, 286.9802675196,
+ 220.1566403481, 139.1801439648, 872.8817135451, 326.4084712280,
+ 626.7888452281, 310.5599335124, 672.5662248681, 176.0204107885,
+ 239.6338921283, 463.9961003601, 752.5372792141, 554.7794580535,
+ 549.5263118178, 494.1900262260, 553.1578313827, 482.4209129022,
+ 71.2752382695, 298.5904609802, 848.5336379824, 158.0406534268,
+ 96.8968333706, 451.4128294532, 367.9871499940, 139.2989965877,
+ 820.9057445254, 241.8758180630, 698.1282618086, 148.5866664082,
+ 370.3715146267, 28.5609838542, 679.2115523990, 231.6128235294,
+ 378.0460553063, 547.8262887434, 679.3052063661, 217.2083623843,
+ 148.0030292990, 435.3883592755, 206.8864260212, 132.4902518323,
+ 487.4480917049, 247.9192607195, 176.4678789080, 463.7572033540,
+ 141.0355333844, 489.6539810522, 641.9832696858, 288.0710916449,
+ 335.3422537871, 78.8636788882, 528.1270993334, 574.0186671587,
+ 416.5973977800, 511.3758525297, 804.6863258241, 580.0620838780,
+ 521.0440793815, 377.6676862790, 676.6170834492, 473.5745359166,
+ 620.6241985837, 231.1849983211, 685.9271365622, 556.0851565518,
+ 859.6657376327, 358.8167365801, 136.3586794804, 163.8969073261,
+ 733.7681814531, 53.8773492345, 678.1702253311, 449.5554703206,
+ 670.6167476759, 146.8945721839, 397.5690796564, 634.8460860811,
+ 206.5207845028, 77.1924728309, 342.6994827842, 275.6117647958,
+ 379.9530372841, 668.4370109985, 106.5991597995, 153.4439933831,
+ 192.7305745511, 877.0548658269, 435.9426104523, 24.5000000000,
+ 501.6220720342, 451.0916395189, 204.9122725365, 675.8959990659,
+ 398.9252014618, 397.9139233175, 415.3026437605, 321.4488022608,
+ 497.5559144013, 451.1625454729, 30.8416731100, 262.7639600646,
+ 396.4590008739, 695.1985550973, 485.5548357416, 590.0004591485,
+ 367.6432675867, 25.5000000000, 121.8931692009, 399.1714358643,
+ 551.4524474367, 58.0282540324, 285.8393116341, 161.9047656748,
+ 813.0377926386, 574.4507409234, 564.3339957197, 57.9355725949,
+ 416.9388029036, 154.3584194770, 157.7953679478, 406.3841191719,
+ 253.4391657052, 328.4346192438, 288.7654682945, 419.6682533698,
+ 99.2819336678, 560.9980791282, 834.1221373133, 102.4432571150,
+ 432.9320402355, 392.5558269246, 726.1716208182, 393.4953941008,
+ 166.0364645213, 96.5590757181, 508.0901288703, 473.4311831216,
+ 596.7313484868, 525.7741072382, 472.6074271817, 52.4470997604,
+ 92.8122861625, 352.8121598620, 737.6950240014, 261.4202996522,
+ 162.9354943663, 548.0228479964, 118.8157814339, 239.0240404960,
+ 701.4547611806, 281.0496425970, 20.2036613828, 478.4218473664,
+ 40.8175813658, 234.2887555326, 295.4102360799, 345.0520543952,
+ 66.1373049179, 367.2430249964, 101.8281250780, 68.6939198898,
+ 264.1018304231, 488.3919908541, 400.3765470046, 41.6361996841,
+ 720.9329661743, 351.8349205587, 695.8180724599, 344.2839180875,
+ 742.0074324399, 144.2949753193, 355.8627098786, 230.2807597251,
+ 190.2278269455, 560.1908955403, 233.8885842781, 393.1100523023,
+ 324.0251160008, 380.4823948328, 619.9501893282, 422.4802500630,
+ 299.0421732770, 124.9216808573, 511.3420047711, 454.1305288391,
+ 89.4746433224, 144.1039666230, 277.1124231808, 262.7666754971,
+ 803.1732327787, 448.8140239406, 583.2639892430, 198.5905077460,
+ 83.1114666410, 370.3084085660, 872.2085512676, 476.4340749267,
+ 484.9116106220, 360.8573016915, 772.6057426368, 282.5237814987,
+ 507.0042091214, 287.1073533868, 567.4828967717, 77.6638236072,
+ 691.1121680399, 456.7582312892, 121.3765148875, 132.1427164631,
+ 263.7887606281, 467.5290794598, 713.2332481745, 209.0405124103,
+ 512.0876058071, 438.5789167409, 246.6029602691, 384.2014472740,
+ 721.4169417444, 72.9779419625, 608.6473249425, 328.0311011960,
+ 313.4962762107, 459.4197606034, 542.0705413935, 322.8168425219,
+ 629.2394460797, 23.6288793215, 653.5719153655, 360.4089140851,
+ 582.2139991666, 163.8328830829, 621.3713390844, 497.3343778797,
+ 871.8811445324, 238.6704807058, 204.0570714829, 826.7269314280,
+ 255.1804404453, 523.9480584399, 449.5299969837, 335.4295995321,
+ 149.7863918270, 454.7429014134, 102.3244946742, 502.6301853078,
+ 180.3489733917, 332.6747864283, 259.6430604050, 230.8772845444,
+ 560.4429621060, 296.4176472799, 219.0217740151, 116.3082515091,
+ 257.5868649798, 554.9964123956, 453.6092829338, 172.3277598091,
+ 437.1656535443, 33.2912818699, 519.6016003523, 269.8884015175,
+ 348.3783328032, 338.8614039587, 764.8175087663, 254.1132427399,
+ 849.6462157379, 481.8001836843, 778.1360778019, 245.7504044664,
+ 68.5839537093, 195.2958974697, 768.7059655018, 71.9757617704,
+ 774.3291194745, 130.3677109490, 367.4284956499, 461.2664125059,
+ 162.0689102483, 230.0654948111, 371.5185411738, 235.1079770159,
+ 182.9111909426, 415.8328965689, 279.3676699127, 48.4862395811,
+ 112.2859858350, 85.4527716332, 190.3006181641, 466.8324650799,
+ 296.9772299695, 436.7872589982, 649.3853741424, 160.6637209782,
+ 726.1933457741, 185.6285572668, 179.5126695824, 754.4497487399,
+ 166.8606597707, 711.1544132514, 513.4264797779, 716.7615520789,
+ 325.4252504193, 802.8131853606, 61.7555448309, 299.3589952048,
+ 202.5493178688, 362.5807019437, 252.7866297237, 178.5983602950,
+ 351.7999857042, 232.6682340856, 408.2613972378, 820.5174221858,
+ 151.1805944706, 442.3805853850, 278.8521684476, 587.8779826082,
+ 558.2107509045, 610.5912241546, 214.7978010602, 880.5000000000,
+ 407.3620010627, 424.3382600225, 504.4861445521, 571.8620738460,
+ 282.6388077858, 298.4593183289, 223.5049699746, 427.7934864283,
+ 453.9395727332, 786.9816148269, 406.9181668460, 465.2963000458,
+ 441.6367461912, 597.2897407766, 450.6501420868, 794.0888075935,
+ 485.3491176714, 646.1344765308, 307.3999172242, 522.9371011051,
+ 439.0671352442, 596.4051597772, 271.9569527638, 704.8304456469,
+ 116.4877670719, 679.0757769097, 208.8706470673, 478.4900202739,
+ 567.6677374937, 244.9922597238, 176.3144633075, 398.3576770003,
+ 274.3748793873, 156.4203561054, 169.5461561150, 195.6236713418,
+ 437.3724799878, 68.7288123631, 222.8184819547, 260.9504574946,
+ 221.4449380550, 747.2548169790, 551.0003540676, 79.3797054021,
+ 188.4105439389, 525.6107520488, 373.8820214634, 230.5689308501,
+ 562.4786591538, 719.1384150168, 241.5176103456, 150.7132025240,
+ 32.1273031231, 697.5549188394, 310.7529082863, 315.9274164218,
+ 267.0188643646, 206.4161182735, 166.5483236937, 140.1703483119,
+ 533.4942556566, 610.9678897418, 415.1746886915, 847.5281064699,
+ 561.9031764623, 385.4993156304, 557.6038920327, 621.4236088643,
+ 489.3500352106, 662.7090995610, 310.3478538342, 543.1513947513,
+ 53.3405795089, 470.1885162133, 537.0309770600, 845.8579155872,
+ 150.6076098027, 598.9326206872, 426.7003687590, 844.4283095903,
+ 578.0585406062, 569.6676116209, 406.4738281011, 584.5066893307,
+ 280.7221352719, 777.4371794192, 362.8772955575, 769.9491265178,
+ 226.4691002450, 384.3182527322, 269.3935827160, 291.8315730741,
+ 438.3627363506, 504.7212415182, 84.8146096720, 289.0463800053,
+ 169.1661213981, 513.8783466274, 31.5868844984, 480.0234430000,
+ 252.6084815577, 128.2478175486, 527.7429819700, 230.4971954513,
+ 458.7293877910, 448.7501021552, 447.0734982846, 714.1541582896,
+ 559.0492298435, 526.9326737146, 442.1734023097, 449.9457136017,
+ 205.6449656386, 357.9133121917, 361.9923214523, 273.8675591290,
+ 131.8544719048, 720.0681714723, 98.5333374848, 707.6394793132,
+ 115.0750208184, 211.2449241526, 485.1515593592, 387.8769971161,
+ 77.3226980078, 696.3914721127, 522.4537022390, 759.6986003192,
+ 315.7546196269, 134.8750148341, 195.9596258054, 149.4126448810,
+ 547.1512838903, 44.8077658242, 169.4172520820, 422.6260278045,
+ 564.4525421854, 180.5627179055, 340.3380289137, 878.5994563229,
+ 410.8579710060, 28.1807605279, 521.9307046183, 347.6646177391,
+ 549.2969670442, 213.6811320029, 175.7525603627, 503.1614426497,
+ 289.3958670593, 559.7726822634, 245.7456637139, 669.4705086475,
+ 546.4973907248, 377.8501055922, 483.9154513833, 557.6708731062,
+ 260.2124729198, 724.9855566955, 120.9375490930, 194.3462122359,
+ 347.0916845236, 757.9861160159, 434.1933606188, 422.4407069819,
+ 178.8088591436, 318.7096392550, 562.1119046258, 591.3938802173,
+ 521.5541188908, 771.5465580195, 73.4220522701, 343.1665570209,
+ 91.5394245617, 441.0202341279, 161.9707982915, 592.3953172522,
+ 142.5788297051, 66.1476981022, 37.7847340182, 842.9089913289,
+ 384.7781932026, 510.7847191763, 521.4284183935, 748.0048359080,
+ 254.3234004735, 394.6246904320, 188.2032436761, 850.5507490889,
+ 518.5071111191, 307.5369830419, 495.1689335475, 856.7662521290,
+ 361.7922064361, 351.9881588743, 364.6879206350, 18.5000000000,
+ 284.8621338164, 379.3257641680, 511.5744214132, 315.2766621377,
+ 96.8721562430, 631.2576917132, 418.8692970904, 229.6010010539,
+ 297.6511615586, 213.5000000000, 310.5000000000, 219.8192913005,
+ 829.8679219845, 319.0204437574, 283.6043698835, 336.2804998924,
+ 879.2507655801, 529.8661845092, 441.3812785854, 342.0056963690,
+ 416.3384018026, 564.9176945171, 17.0523443546, 256.7579219868,
+ 616.8194226233, 497.0303294030, 551.6397150881, 40.8397690965,
+ 794.7046375849, 385.4151014198, 677.5994220622, 192.3252471994,
+ 296.6336881878, 246.9982858891, 212.4154712524, 181.9230000300,
+ 718.7420763636, 462.1795420080, 750.3990285575, 404.0127711366,
+ 444.5237202150, 496.7932608167, 603.4406944826, 402.6714288921,
+ 414.4628263685, 320.0082332440, 640.1466027347, 297.0574674004,
+ 702.5912420635, 389.0127441152, 768.2868333738, 123.1951411596,
+ 374.9437158722, 486.3861117598, 44.7580069713, 357.1053550234,
+ 822.1069283867, 348.6579641508, 552.1221039741, 371.2073480687,
+ 408.4737592379, 109.1873134054, 430.1180898159, 333.9661151048,
+ 264.7220697013, 423.3111369182, 446.7984650890, 301.9076616378,
+ 816.2163497698, 245.9560277175, 404.8804266839, 374.9654249335,
+ 526.4642292975, 535.2303192588, 510.0722775435, 323.6336032238,
+ 253.3729940081, 436.1105475384, 433.6089531661, 404.8002473087,
+ 271.3825959154, 455.7133666318, 321.7995266819, 459.1267308515,
+ 179.8802368948, 94.6784300716, 372.1960287151, 569.0751497886,
+ 241.8947157229, 126.9567354394, 594.0259343061, 327.9182244169,
+ 583.5948754673, 373.2619511911, 513.1057775771, 68.7346142823,
+ 610.9349253144, 333.4269710134, 647.9065302285, 534.1577378502,
+ 141.0643783326, 407.4254584021, 855.3510727711, 279.7737839324,
+ 598.2586831597, 85.7992260046, 781.2640444420, 352.7639318957,
+ 243.1718153278, 292.2133949181, 836.6131401916, 133.1416785951,
+ 644.8878495363, 287.5637983082, 675.6127287214, 309.7105877587,
+ 176.8293660523, 224.7238416567, 578.2828430248, 531.7846927899,
+ 311.3692643718, 387.0643126167, 788.2080577208, 563.2561894045,
+ 765.4155935505, 129.2067174785, 460.8340228593, 424.8139457242,
+ 176.8742336457, 383.9455640422, 634.9894813039, 572.1060772182,
+ 160.6356244616, 436.5253575350, 95.8308132361, 90.5814022304,
+ 232.1697849059, 485.1747605468, 697.7059777731, 327.1194614442,
+ 370.0169975264, 567.0292960128, 719.2591473022, 499.6266974463,
+ 641.6680864807, 269.3278178414, 604.5986430529, 125.6702708434,
+ 352.7242636428, 382.5140900795, 647.8618031754, 279.0539325605,
+ 634.8861203698, 400.0217457803, 115.0518954726, 414.0739686365,
+ 653.9408926088, 65.4721766181, 828.7682231072, 224.7976491950,
+ 164.4039144643, 457.8286974731, 660.6199094824, 460.5918126618,
+ 858.7326879641, 160.2739107841, 831.7857756192, 437.2727526565,
+ 557.4618095516, 336.4779827311, 221.5153373008, 363.5379913736,
+ 183.1480998833, 445.2882523887, 286.3505780145, 245.8494462972,
+ 378.6519558128, 543.3393395211, 319.1672117713, 106.8274715755,
+ 531.8519313069, 332.9228224957, 199.0145146414, 262.2811728528,
+ 466.2729028214, 618.3849432178, 561.4070490292, 237.6409371623,
+ 267.5312433816, 233.2993495004, 368.0862767341, 580.7995401801,
+ 483.8229744608, 669.7469936788, 268.0951516727, 835.3332562313,
+ 353.9319947761, 448.6291595480, 86.0106151162, 360.0569631053,
+ 484.3813319895, 761.1529135012, 530.9333820874, 683.7260092305,
+ 454.8580778705, 814.9194069509, 486.5977870945, 94.6865546229,
+ 318.5775683960, 124.3285159886, 388.4165851435, 648.9205602789,
+ 130.1682438100, 278.2651922385, 118.0866762952, 82.5988208700,
+ 263.4975108346, 277.1914025654, 306.4621987841, 452.0447216317,
+ 565.4839299172, 427.5720574702, 378.2298464275, 98.5897547299,
+ 451.7487111435, 357.0478190024, 434.3880396244, 18.0897036234,
+ 560.9467083696, 504.5615039879, 235.7270650065, 178.5736328995,
+ 865.7728303724, 199.4691903023, 504.3603146142, 178.2560057852,
+ 323.0695956460, 487.8186837672, 691.9835864077, 352.2030700755,
+ 242.1982782113, 530.7352619668, 243.1200070816, 482.9276494686,
+ 270.1682580181, 101.1920769771, 529.3450506807, 451.8198467988,
+ 567.2773639348, 412.9646467172, 113.3044189439, 61.6239719360,
+ 19.1814321231, 160.6861796441, 224.4432294818, 132.6211320776,
+ 563.1526755407, 373.4178588396, 722.1069520230, 472.0747703094,
+ 869.9741209536, 335.6690180733, 215.8055158962, 226.8494509352,
+ 520.0947921137, 340.9958926679, 401.8708503244, 506.2681259310,
+ 380.9185544124, 93.8695440132, 619.9938435830, 300.2598889143,
+ 73.4496746840, 755.9005052153, 478.5176270985, 389.0028055071,
+ 480.4503848303, 728.4615452025, 76.5972729853, 386.1860827966,
+ 458.7013400464, 558.9650097048, 418.8396962155, 468.0048012483,
+ 444.7963432104, 721.5116282243, 419.6537896420, 614.0495625983,
+ 333.0274204198, 321.4449434791, 64.6965337603, 256.3318098322,
+ 390.7162246578, 388.8666931108, 302.4619598750, 792.8384088608,
+ 378.8583179243, 586.4166082123, 462.5337990518, 472.1744771545,
+ 271.0364972190, 580.5029075631, 342.5525664712, 717.1356684896,
+ 520.0615909669, 783.7389093081, 319.5557161677, 87.3707607263,
+ 527.3019215117, 83.5394074991, 130.9424463231, 787.5984417501,
+ 322.3847299648, 530.3221724212, 25.4459814518, 823.5926392050,
+ 409.8333800241, 214.5000000000, 279.5000000000, 358.7528776265,
+ 388.7208251683, 249.4155882029, 566.5000000000, 116.1057216746,
+ 188.5149031870, 232.8430013640, 449.0264370512, 274.3594464981,
+ 485.9560968261, 215.5095523995, 494.9943624393, 739.8479898735,
+ 336.7773839412, 476.3681730020, 176.0296313486, 256.6838187028,
+ 348.1229250706, 198.0666195662, 225.7909368261, 540.0316656605,
+ 553.2028052570, 220.9857551897, 424.5784348066, 47.6498367151,
+ 285.2312708240, 290.7119995935, 339.0030043263, 868.1064323252,
+ 503.5545522068, 681.3130518774, 421.3803077462, 565.8837894874,
+ 498.0496664484, 840.7491286861, 356.9871255465, 622.2089029465,
+ 142.3444002694, 437.1102872897, 197.2545073019, 43.7520564229,
+ 364.0923112161, 366.7483653682, 460.6192254920, 805.1323664987,
+ 425.8514923567, 677.1976240404, 283.8235922772, 518.2831833527,
+ 423.3123394140, 635.5201567753, 117.7158672888, 217.4629803272,
+ 461.4853970574, 426.6192228800, 226.5385378286, 562.6231974689,
+ 137.1113468128, 109.7095852140, 652.7399764249, 125.9602264292,
+ 182.6772600497, 95.3666357239, 326.6147341515, 173.9147147181,
+ 789.1249341787, 567.1161752205, 784.7111941190, 409.3437928284,
+ 522.8689743010, 337.4548631909, 461.6893176408, 78.6783348838,
+ 273.6633931358, 164.7204461124, 452.1618082564, 335.7813960725,
+ 263.3402679425, 345.0466393462, 364.2999156028, 388.7877328651,
+ 579.9134483355, 73.5940142001, 57.1567595976, 689.6136570600,
+ 267.5663478495, 679.4321238051, 180.0124723676, 532.0821706496,
+ 465.3902242265, 171.0914485359, 358.8634117482, 483.7389017240,
+ 490.7345272295, 477.2577879147, 463.9150216846, 528.3002005336,
+ 475.7477497260, 235.9645423258, 370.9086380255, 624.3686639412,
+ 190.4190817821, 37.1538215588, 163.7875441968, 281.7841730888,
+ 438.2780576391, 612.2018979684, 485.1061600847, 57.8798459013,
+ 203.8961308219, 72.0171112333, 525.4281834727, 603.4193530731,
+ 48.4597356202, 218.8730557501, 58.8451777196, 815.5005122179,
+ 276.5418342923, 231.0253535361, 176.8651683562, 447.5521775985,
+ 111.5091755825, 530.5373342108, 109.8497127001, 123.2327753974,
+ 310.6533568218, 295.1947544784, 390.4765697552, 352.1354289605,
+ 678.0538599638, 462.6107041793, 497.2383300955, 502.7988578218,
+ 677.1447412705, 151.4490411245, 302.7941329942, 170.8569591749,
+ 831.5889660036, 446.8142201578, 275.9830758086, 542.8406387836,
+ 275.5720122183, 458.3099125265, 685.8560826033, 389.0600963430,
+ 865.2117063625, 505.8001919745, 53.0697539456, 119.1974389274,
+ 414.3244671932, 172.9594672762, 833.5938371685, 514.5060210595,
+ 753.3553141131, 171.4892995390, 408.9293825100, 481.8294861833,
+ 150.5026648787, 374.0646366811, 59.5392883877, 188.3021796069,
+ 403.4492596246, 87.9648361362, 149.2817765509, 514.0240592069,
+ 400.3664425610, 282.7576273982, 490.9563636103, 864.4245125385,
+ 434.5025880835, 241.5939598859, 518.7482550566, 554.9144226227,
+ 418.9121090454, 176.3882196235, 501.6985827069, 717.8677221435,
+ 571.9711578413, 401.0524033215, 141.4036484201, 588.0335255417,
+ 518.6805706101, 46.1587447894, 185.9624450373, 628.5603237622,
+ 300.2353549779, 423.9310416875, 199.7668624213, 613.3138386735,
+ 168.5643779804, 770.0024877010, 134.4493610041, 506.0683819264,
+ 228.4669408101, 505.9756011107, 477.9580082725, 162.8023298529,
+ 578.0247069374, 220.8188696274, 780.6795433503, 487.0282920016,
+ 106.7505252062, 483.1833892297, 397.4235290323, 347.4835716058,
+ 379.0327006647, 335.7327639906, 435.2132943048, 460.3424282287,
+ 518.8354407434, 517.6422788506, 467.6563286575, 263.5152826410,
+ 67.3551502855, 262.1841622870, 272.6981295776, 557.0255003429,
+ 414.4081907078, 143.5780738203, 100.3176985943, 795.4839653411,
+ 258.2283411414, 725.9987875695, 171.3567065047, 742.2261042927,
+ 225.2902180486, 402.3605163850, 526.6176819152, 839.6252701217,
+ 86.0318001252, 644.5951362234, 312.5659219941, 216.9940605356,
+ 142.0552798749, 858.1362817072, 447.8740414717, 303.2645310663,
+ 190.5418717542, 583.8351784847, 460.6315474618, 661.0525331185,
+ 333.0271541029, 78.8238696870, 332.9181519373, 195.7248335329,
+ 26.0462914306, 49.9150358444, 348.7996921311, 367.1460722295,
+ 350.5750548702, 219.0303896976, 494.4970443295, 134.7993101833,
+ 454.0343325569, 459.7026291882, 610.4249553913, 275.9512817573,
+ 683.2942962369, 58.9218351202, 437.6072320145, 343.7579681065,
+ 217.2355881337, 187.6818455207, 780.2653050177, 54.1755620275,
+ 793.8638427958, 450.5591062561, 531.9581929737, 492.0213106321,
+ 473.0929801757, 59.3165422792, 725.6902487556, 255.0409546586,
+ 469.0802752041, 532.8418307729, 585.1618077951, 227.1127440639,
+ 445.6244598018, 263.7505388510, 542.6848313010, 314.5808597109,
+ 72.9027957178, 530.4753520951, 837.6080986158, 553.6094776227,
+ 133.8763190409, 198.2323133225, 492.4103065085, 428.9136537528,
+ 602.2810166767, 194.9065868559, 530.4821728677, 137.7881339182,
+ 671.3187715175, 340.0909063250, 274.9026436766, 158.5145919703,
+ 836.5795611316, 137.3222895207, 379.1497640784, 358.5904202584,
+ 679.0985494207, 191.1898320175, 440.1149969998, 298.3514698987,
+ 558.9174708628, 584.5875327235, 221.3779897711, 86.8817440628,
+ 639.4426229757, 543.2461101990, 731.4848363021, 331.1177709625,
+ 278.5014953865, 543.3677154586, 348.6873214038, 426.4291866668,
+ 205.5936429703, 118.0021599486, 780.1625854310, 336.1810324376,
+ 137.8852730576, 409.7864898255, 542.6187551700, 298.0789056286,
+ 427.5498459576, 433.5019084787, 72.6173691788, 399.3427105588,
+ 493.9939226899, 446.0814658246, 447.2346855519, 384.8084874401,
+ 121.3450288318, 716.9525378394, 379.5457463985, 303.8299315447,
+ 243.0927131019, 49.5854072703, 275.7770738745, 796.0438289275,
+ 512.8677904100, 771.7122317101, 187.3678727966, 783.9540477555,
+ 300.4906396052, 784.5413192732, 262.8273687195, 439.6668097272,
+ 224.8500098633, 570.6177534739, 180.7027376160, 593.1336304975,
+ 211.1059122177, 873.6102597045, 357.0731789842, 537.8849393372,
+ 324.9480864529, 599.5344512555, 378.4374648412, 610.2423383379,
+ 305.6506987114, 755.7390731148, 323.9257038562, 80.9324668293,
+ 550.1750907106, 25.3309136931, 329.0835899583, 797.2324071972,
+ 408.6889731146, 338.2281069443, 231.2765966915, 771.7815694667,
+ 424.9742779581, 775.9336105414, 168.0736784145, 565.7008876448,
+ 553.2851158248, 280.1042384662, 199.1224839809, 630.2602215701,
+ 287.3623955789, 853.7023318239, 317.1056761779, 636.0580374750,
+ 571.8755823217, 331.7892213117, 381.9852190492, 621.7007497512,
+ 302.2345596074, 306.2269843534, 66.5921896287, 464.4967898812,
+ 532.2438737668, 183.1994094074, 316.6446194318, 175.8102868542,
+ 32.1352499828, 175.6654098571, 636.4167862189, 384.1478000458,
+ 717.7256520875, 556.0747247524, 863.5540879634, 433.6879274570,
+ 695.3558134566, 408.7520546666, 464.7390727976, 510.7960732776,
+ 673.0642576082, 288.0066208795, 19.5000000000, 488.8484768999,
+ 246.3412718259, 176.0165460784, 630.4117789350, 377.5102343936,
+ 678.0506901675, 15.5000000000, 72.1881430350, 381.9918826176,
+ 522.3594221552, 76.3496554036, 719.0175853927, 421.6416859266,
+ 684.7743103355, 389.3870585218, 701.2154826542, 212.3269034922,
+ 45.4672139538, 230.6467364247, 392.1410648813, 211.3692877433,
+ 399.4318298072, 573.5114844734, 266.8353896072, 772.9894593883,
+ 174.5686341804, 629.3357392140, 522.8820859739, 471.8201133092,
+ 136.9431253078, 676.9585206233, 360.4750963941, 180.8082333832,
+ 133.6779694128, 464.7791760786, 115.4268068377, 683.0119870870,
+ 351.1414877226, 673.4231361268, 493.0454027643, 566.3273941574,
+ 543.2104582197, 694.5355922615, 397.0710473770, 466.0099153688,
+ 174.6305680378, 676.1875599859, 398.6634201945, 798.2353761535,
+ 242.2452906890, 34.2117485749, 460.6185991377, 553.7760466875,
+ 264.3035302257, 745.8959476089, 196.5336503989, 569.6717130333,
+ 313.5398134898, 653.4711706942, 312.3949365585, 553.1122941669,
+ 115.8350015418, 88.0574934860, 103.4593209967, 364.0813191909,
+ 490.2777855596, 669.1207734137, 340.4291641039, 215.5000000000,
+ 274.5000000000, 746.5026681196, 553.7963466366, 26.7085449647,
+ 303.5995250593, 408.2975324992, 417.2433095204, 105.3473696010,
+ 480.6906717391, 160.0593397081, 461.0607457886, 185.6792037312,
+ 32.2396839867, 448.9970814988, 856.0966303491, 475.6476857503,
+ 69.9660367550, 157.8302024485, 590.9213687486, 516.7408039169,
+ 337.2162556826, 385.1186778216, 695.6608332389, 19.4499167373,
+ 282.3832085524, 32.2450298994, 200.4395416672, 303.6404747500,
+ 70.5972773791, 264.8476323248, 474.9727702731, 190.0488457880,
+ 127.2391344988, 265.3736935060, 391.0818052628, 512.0873774802,
+ 281.7494029347, 238.3533851500, 789.4460645963, 421.8497393456,
+ 22.1855441364, 533.9091797419, 102.7870196793, 455.6423139238,
+ 820.5033574157, 521.6115331319, 120.9198875244, 67.7898642342,
+ 800.8973815545, 248.0620845540, 259.8932647738, 303.9826156374,
+ 43.8833694777, 174.3642853301, 827.2571618618, 325.1443791417,
+ 293.4852992538, 128.6428235168, 534.4013130503, 218.3014223875,
+ 370.2143285379, 467.8494000091, 148.4781111710, 514.1674244971,
+ 472.6572843767, 37.3922894762, 178.0935177833, 786.3026900309,
+ 463.5154387526, 368.3829433204, 457.7574769459, 510.3503920779,
+ 156.3121483915, 189.5198039577, 102.1391107593, 441.3128081982,
+ 420.4639684905, 746.5789998924, 208.5448051527, 680.3741890779,
+ 509.2523542884, 65.2866664593, 480.7189856753, 604.6095532971,
+ 278.5945589181, 864.1182822955, 508.5575766684, 785.4607473781,
+ 106.1162659222, 574.8807764325, 703.8781509075, 205.8461055811,
+ 542.5155223050, 269.7478936500, 534.1703443158, 397.6297207975,
+ 452.9409507873, 197.8211739759, 290.9268959730, 130.2649070452,
+ 271.4592984322, 115.7620372215, 508.7370017948, 354.3265658258,
+ 728.9520641391, 87.6634423925, 860.5706105563, 427.8758900894,
+ 419.9614509366, 89.9419220725, 93.7652622808, 388.9694433531,
+ 541.3453748904, 302.5857944090, 218.2587148128, 206.2042086365,
+ 805.5836531263, 158.9517994226, 864.5907589867, 260.5363881423,
+ 654.2560149092, 570.6199396471, 736.4839207001, 536.3317688372,
+ 779.9421124414, 302.8388899700, 660.4579529197, 484.8698019568,
+ 712.3589048614, 255.6707380298, 220.0546148816, 86.2740485189,
+ 674.2090670580, 428.7823455118, 595.1775407582, 534.8346698538,
+ 593.8955419385, 473.2078894912, 754.4565171143, 485.5991308067,
+ 669.5212716891, 497.2124802650, 508.9254389097, 80.6353488656,
+ 183.0943366161, 468.6811580547, 250.5363121991, 197.1434826458,
+ 214.4619622635, 555.4838359226, 389.7104658252, 147.8724064640,
+ 108.0244165238, 154.5041215787, 79.3847400707, 578.5000000000,
+ 260.0954987798, 119.1325263275, 798.9060071514, 152.3710273647,
+ 197.2946655195, 241.2763514495, 731.7269156878, 140.5079025632,
+ 410.3093326534, 571.2967969259, 167.6601606171, 60.9824058064,
+ 619.3476886445, 429.3357373956, 80.1028783289, 101.2553053161,
+ 616.8625687653, 516.8461972703, 710.0863554522, 501.7760521122,
+ 395.6627703700, 240.5758198466, 44.5154529353, 29.7220972992,
+ 612.2248605113, 164.9844215241, 776.2003527600, 106.5517711142,
+ 819.4281050454, 376.9814872720, 867.2210419883, 424.6259861651,
+ 531.5974366318, 381.3418709936, 463.5177109660, 518.9244576151,
+ 513.1795568652, 300.0025238808, 142.1159764892, 598.4406485025,
+ 513.8170038465, 519.3135261766, 491.8084854426, 674.7759008561,
+ 427.7987199186, 666.7386767578, 197.3058629777, 554.0481755838,
+ 359.9676814439, 497.4111502035, 650.1813219703, 500.9723127985,
+ 56.7890056053, 138.0389096226, 393.1157721859, 127.4267516156,
+ 111.2729713874, 105.5476989533, 232.8685725590, 260.0734857710,
+ 756.7445754986, 453.7315661727, 15.5000000000, 148.4549198025,
+ 115.2930740179, 42.3781986239, 229.7821548449, 86.8004186516,
+ 541.1132487513, 362.8120287381, 462.4939629415, 163.9362700178,
+ 183.0625951734, 241.3089983627, 156.0916461744, 299.6626012989,
+ 369.6107118368, 296.5680685652, 543.5218249451, 343.8130818413,
+ 875.8471521557, 365.0622574914, 50.6177152562, 382.2182579173,
+ 730.2532011224, 61.6786619302, 177.8494992886, 344.0165727931,
+ 646.5022870950, 429.2585430205, 240.4225504872, 509.3935871602,
+ 675.5861489945, 277.7655613210, 690.9251859464, 134.0803331939,
+ 150.0871678765, 361.2223364883, 243.8221538920, 105.1701142597,
+ 549.2739159634, 330.9405173159, 479.7383886459, 434.3402482106,
+ 770.2443285355, 467.9541838377, 724.7967923487, 446.1956607321,
+ 204.3244604265, 381.9975669919, 113.5594017090, 119.7454225899,
+ 25.0772489069, 466.9139323451, 627.1422566789, 424.3096410917,
+ 428.8829324688, 323.5587618743, 590.0320692829, 551.9451632706,
+ 618.6722696777, 533.0018739627, 119.8157006362, 542.7684242160,
+ 748.0763351995, 201.1109168381, 360.2361873150, 420.0204705020,
+ 110.6970754974, 401.3238092364, 150.9786226366, 546.0651999149,
+ 575.6487710569, 489.7393153702, 463.5902900009, 381.2264570629,
+ 381.3519498130, 468.9550175341, 65.8429731956, 513.3232940532,
+ 296.5213211026, 437.2442060434, 602.3995546970, 490.5037130965,
+ 762.9563299703, 134.4482185861, 541.2329701179, 421.0550673570,
+ 133.9958692155, 67.5731862276, 260.2322582214, 321.3165620647,
+ 32.5359518436, 390.7611331661, 67.7878643893, 335.6357316084,
+ 57.2038399880, 570.4850123525, 395.7596298285, 453.7440230126,
+ 298.3423528999, 97.5580347904, 694.1430164731, 226.3156096574,
+ 213.4850713901, 403.6321322793, 674.6308053819, 393.6867655443,
+ 450.6327651048, 535.4263045545, 142.7621194139, 538.9608085365,
+ 248.2871043044, 409.6079345149, 879.5000000000, 554.7375547988,
+ 448.3602507163, 544.6371450247, 468.4004631370, 530.1418366052,
+ 558.4232383210, 127.1736955139, 556.8700916592, 371.1191850872,
+ 350.7274604791, 539.1114140665, 123.5348812343, 558.1790593755,
+ 143.2078328499, 72.7237933446, 330.6975344240, 316.6235446090,
+ 328.2535489741, 340.9819356600, 429.7848416560, 276.3433330278,
+ 565.2458743246, 373.2939327828, 646.3155367202, 120.1339632853,
+ 767.2785329583, 341.5023597143, 376.5313624643, 420.0295265532,
+ 119.0352955174, 170.2550568440, 57.0202714641, 363.4738424333,
+ 735.8373374471, 209.7488790124, 262.0315724226, 61.0560075765,
+ 291.1736333538, 779.5938215932, 394.6647903298, 604.8498191345,
+ 218.3934755542, 15.5000000000, 238.4578676452, 705.0211274192,
+ 500.5341159618, 860.1755874539, 324.9194277072, 625.7899272373,
+ 144.7715791478, 381.4098951796, 353.0407163407, 183.9024647622,
+ 370.2693663855, 444.8860934928, 19.5000000000, 96.4429276975,
+ 398.9284313491, 671.4878663694, 581.9897862474, 419.3431211343,
+ 73.6716672886, 288.6345426315, 38.0989022262, 598.3286301691,
+ 68.6619082704, 333.4881127218, 150.2681618239, 616.4146794535,
+ 207.5400256830, 853.6829090152, 520.7111376439, 225.8491170605,
+ 474.8372490251, 216.7074524422, 439.9207118976, 871.5523254278,
+ 482.0818377889, 741.8900132312, 116.5788505778, 308.4623957870,
+ 324.2964853847, 518.6788697929, 386.0083426565, 464.8217571706,
+ 350.2567502019, 630.4676624524, 290.0632181526, 450.9299106771,
+ 465.6364070670, 500.4819880923, 566.0829786957, 807.8969160238,
+ 508.7832847058, 308.3340467926, 472.0807216480, 208.2522088705,
+ 160.3348812317, 271.4448074473, 69.8604365866, 570.1429122598,
+ 17.7932910504, 349.5132219245, 554.6141544949, 16.3753699184,
+ 143.4715562756, 748.1783990569, 350.3075431281, 725.8674600354,
+ 68.0541953008, 656.9876681725, 535.4170555010, 777.7861015686,
+ 568.1950006959, 292.7121747782, 270.9829747699, 291.7444687249,
+ 554.6621580750, 273.6367441197, 413.8266367177, 460.7526079064,
+ 99.2402415281, 298.8843950059, 717.9300151853, 333.7016662859,
+ 558.0190182033, 490.0813238413, 699.7345112405, 444.3735707052,
+ 549.4983827314, 169.5988928769, 115.2268875348, 542.8788183586,
+ 683.8933533579, 582.1892664432, 794.1042808091, 35.8553295109,
+ 812.3534504557, 310.8231029075, 176.9218246968, 73.5025929049,
+ 176.6711959107, 464.3164187485, 603.4666201797, 501.8954779097,
+ 148.9590191351, 391.9491798683, 486.0789261363, 388.1346759791,
+ 154.3906032807, 563.0551475333, 41.0161776026, 486.3997879309,
+ 240.3293604000, 29.5000000000, 587.7097513708, 331.4551787975,
+ 399.5133639019, 104.0939785438, 493.2747600594, 421.9757190320,
+ 376.3849210118, 497.4798687642, 867.1707574019, 345.2324905678,
+ 170.3254845631, 29.9862749355, 330.6913235816, 472.8136788569,
+ 402.2753562357, 822.2664722862, 556.6313996475, 596.1599229005,
+ 430.4066994492, 205.9707740383, 249.9608922555, 632.6439743160,
+ 234.7543688641, 798.6721001433, 471.9341437757, 745.3371430623,
+ 461.7654986283, 507.4056335594, 34.7341023261, 583.5105584534,
+ 478.0911288795, 105.8708225812, 113.2945618175, 465.4508595407,
+ 76.1354456035, 260.4710832581, 443.7009205743, 833.0821693411,
+ 292.4649297815, 447.3373085623, 82.0738116621, 627.8421788274,
+ 118.4762591417, 678.1343345532, 43.2483075677, 375.2241985385,
+ 381.8079300672, 582.9694839542, 174.1525323256, 866.9098579740,
+ 257.5338737504, 452.7134192941, 368.5827060843, 882.5000000000,
+ 182.2791872190, 672.3559204976, 435.4648218918, 744.1443766993,
+ 457.3179593166, 506.2703527984, 176.3208122804, 184.0787102660,
+ 256.2352099146, 722.1472752339, 192.1047547049, 694.3446908093,
+ 119.0161509320, 375.9585538793, 365.6496456167, 813.7726813953,
+ 411.9223138008, 503.3723515549, 360.9005498177, 852.1393750181,
+ 32.5000000000, 272.2517225641, 137.1954956714, 549.4801022975,
+ 326.2355741962, 249.6379857265, 599.5052733640, 473.4999527023,
+ 672.0437251685, 469.8457838967, 400.4405564972, 464.2546016065,
+ 768.0746958263, 230.7405981069, 709.2667591636, 241.2720723952,
+ 207.7743405267, 114.4490129585, 286.4096326438, 291.4627764319,
+ 747.6702041385, 582.4579433732, 543.1455371940, 200.9219963863,
+ 481.6300101363, 106.9856005277, 353.9937739625, 268.8836441253,
+ 572.1937805884, 116.9085139112, 216.5000000000, 266.5000000000,
+ 537.1949982742, 219.4512886424, 47.2106190185, 276.4239617768,
+ 649.1814880145, 172.3727345118, 281.9842787650, 441.3006750090,
+ 374.5762745067, 249.9389456452, 490.5823926519, 165.6396752409,
+ 859.9391568174, 328.1299759847, 507.8928061342, 235.9452700838,
+ 333.4729011926, 533.7907613021, 419.3509155797, 561.0126469936,
+ 61.6991671716, 476.0309479589, 413.2237918443, 469.4217608485,
+ 502.3156249516, 397.7587362444, 115.4950960109, 472.7524859332,
+ 589.6447793806, 315.4569410762, 220.7615822172, 326.8262785184,
+ 329.3257178164, 155.9035135277, 77.6787072028, 110.0956584715,
+ 161.3792718595, 426.4708600001, 845.7098527058, 511.2764121502,
+ 50.6250960631, 543.0741197817, 128.2047507023, 566.2798288414,
+ 345.2116531184, 302.4887409686, 485.4189121748, 245.4848603889,
+ 73.7158838558, 199.1618264821, 475.7640859392, 43.4889116967,
+ 142.5322709821, 221.0105162101, 767.1556720643, 75.3337802797,
+ 838.8156251957, 482.2085061759, 842.4450739249, 346.3385774260,
+ 570.1952457927, 428.0257649900, 109.0554286871, 306.8699653424,
+ 32.0398159778, 665.7692493874, 472.2959059029, 407.7800096974,
+ 224.1146084330, 684.6686923233, 483.1730281795, 214.1621186899,
+ 460.2779001753, 631.7300027209, 315.1900754254, 528.8657617724,
+ 466.9507227357, 17.2210983519, 181.0893944837, 188.6156741443,
+ 160.5097262899, 258.2838382355, 179.9887974339, 677.0419725309,
+ 217.3165725380, 708.9312197163, 238.3548651561, 130.2945546992,
+ 126.0970054917, 693.9045991032, 512.2156884277, 252.0933563879,
+ 119.8960288897, 64.0827193263, 89.0092556558, 624.5584140593,
+ 327.8781051636, 597.5486552104, 49.7224619985, 645.0500365476,
+ 451.4149822954, 535.2117252211, 440.7713227294, 690.3713855146,
+ 368.6896739299, 400.9769871139, 117.7526615978, 162.4247615392,
+ 462.6795451567, 491.0262810999, 476.4983924674, 626.9438819228,
+ 142.7699090896, 823.2390348783, 438.0843238300, 212.2648917517,
+ 188.6091087472, 645.3535190376, 88.4503918831, 64.1293184761,
+ 344.6456070430, 343.5234451691, 167.8290145273, 350.7692553422};
+
+static void BM_DtoaShortest(benchmark::State& state) {
+ char output[kFastDtoaMaximalLength + 10];
+ Vector<char> buffer(output, sizeof(output));
+ int length, decimal_point;
+ unsigned idx = 0;
+ for (auto _ : state) {
+ bool ok = FastDtoa(kTestDoubles[idx++ % 4096], FAST_DTOA_SHORTEST, 0,
+ buffer, &length, &decimal_point);
+ USE(ok);
+ }
+}
+
+static void BM_DtoaSixDigits(benchmark::State& state) {
+ char output[kFastDtoaMaximalLength + 10];
+ Vector<char> buffer(output, sizeof(output));
+ int length, decimal_point;
+ unsigned idx = 0;
+ for (auto _ : state) {
+ bool ok = FastDtoa(kTestDoubles[idx++ % 4096], FAST_DTOA_PRECISION, 6,
+ buffer, &length, &decimal_point);
+ USE(ok);
+ }
+}
+
+BENCHMARK(BM_DtoaShortest);
+BENCHMARK(BM_DtoaSixDigits);
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 59965d6e0c..3451d833ed 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -91,38 +91,6 @@ v8_source_set("cctest_sources") {
"../common/value-helper.h",
"cctest-utils.h",
"collector.h",
- "compiler/codegen-tester.cc",
- "compiler/codegen-tester.h",
- "compiler/function-tester.cc",
- "compiler/function-tester.h",
- "compiler/test-atomic-load-store-codegen.cc",
- "compiler/test-basic-block-profiler.cc",
- "compiler/test-branch-combine.cc",
- "compiler/test-calls-with-arraylike-or-spread.cc",
- "compiler/test-code-assembler.cc",
- "compiler/test-code-generator.cc",
- "compiler/test-concurrent-shared-function-info.cc",
- "compiler/test-gap-resolver.cc",
- "compiler/test-graph-visualizer.cc",
- "compiler/test-instruction-scheduler.cc",
- "compiler/test-instruction.cc",
- "compiler/test-js-constant-cache.cc",
- "compiler/test-js-context-specialization.cc",
- "compiler/test-js-typed-lowering.cc",
- "compiler/test-jump-threading.cc",
- "compiler/test-linkage.cc",
- "compiler/test-loop-analysis.cc",
- "compiler/test-machine-operator-reducer.cc",
- "compiler/test-node.cc",
- "compiler/test-operator.cc",
- "compiler/test-representation-change.cc",
- "compiler/test-run-calls-to-external-references.cc",
- "compiler/test-run-load-store.cc",
- "compiler/test-run-machops.cc",
- "compiler/test-run-stackcheck.cc",
- "compiler/test-run-unwinding-info.cc",
- "compiler/test-run-variables.cc",
- "compiler/test-verify-type.cc",
"expression-type-collector-macros.h",
"feedback-vector-helper.h",
"heap/heap-tester.h",
@@ -150,9 +118,6 @@ v8_source_set("cctest_sources") {
"print-extension.h",
"profiler-extension.cc",
"profiler-extension.h",
- "setup-isolate-for-tests.cc",
- "setup-isolate-for-tests.h",
- "test-accessor-assembler.cc",
"test-accessors.cc",
"test-allocation.cc",
"test-api-array-buffer.cc",
@@ -161,12 +126,10 @@ v8_source_set("cctest_sources") {
"test-api-typed-array.cc",
"test-api.cc",
"test-api.h",
- "test-code-stub-assembler.cc",
"test-constantpool.cc",
"test-cpu-profiler.cc",
"test-debug-helper.cc",
"test-debug.cc",
- "test-descriptor-array.cc",
"test-disasm-regex-helper.cc",
"test-disasm-regex-helper.h",
"test-field-type-tracking.cc",
@@ -184,12 +147,11 @@ v8_source_set("cctest_sources") {
"test-property-details.cc",
"test-ptr-compr-cage.cc",
"test-random-number-generator.cc",
+ "test-regexp.cc",
"test-sampler-api.cc",
- "test-serialize.cc",
"test-shared-strings.cc",
"test-smi-lexicographic-compare.cc",
"test-strings.cc",
- "test-swiss-name-dictionary-csa.cc",
"test-swiss-name-dictionary-infra.cc",
"test-swiss-name-dictionary.cc",
"test-trace-event.cc",
@@ -201,13 +163,58 @@ v8_source_set("cctest_sources") {
"test-usecounters.cc",
"test-utils.cc",
"test-verifiers.cc",
- "torque/test-torque.cc",
"trace-extension.cc",
"trace-extension.h",
]
+ if (v8_enable_turbofan) {
+ sources += [
+ "compiler/codegen-tester.cc",
+ "compiler/codegen-tester.h",
+ "compiler/function-tester.cc",
+ "compiler/function-tester.h",
+ "compiler/test-atomic-load-store-codegen.cc",
+ "compiler/test-basic-block-profiler.cc",
+ "compiler/test-branch-combine.cc",
+ "compiler/test-calls-with-arraylike-or-spread.cc",
+ "compiler/test-code-assembler.cc",
+ "compiler/test-code-generator.cc",
+ "compiler/test-concurrent-shared-function-info.cc",
+ "compiler/test-gap-resolver.cc",
+ "compiler/test-graph-visualizer.cc",
+ "compiler/test-instruction-scheduler.cc",
+ "compiler/test-instruction.cc",
+ "compiler/test-js-constant-cache.cc",
+ "compiler/test-js-context-specialization.cc",
+ "compiler/test-js-typed-lowering.cc",
+ "compiler/test-jump-threading.cc",
+ "compiler/test-linkage.cc",
+ "compiler/test-loop-analysis.cc",
+ "compiler/test-machine-operator-reducer.cc",
+ "compiler/test-node.cc",
+ "compiler/test-operator.cc",
+ "compiler/test-representation-change.cc",
+ "compiler/test-run-calls-to-external-references.cc",
+ "compiler/test-run-load-store.cc",
+ "compiler/test-run-machops.cc",
+ "compiler/test-run-stackcheck.cc",
+ "compiler/test-run-unwinding-info.cc",
+ "compiler/test-run-variables.cc",
+ "compiler/test-verify-type.cc",
+ "setup-isolate-for-tests.cc",
+ "setup-isolate-for-tests.h",
+ "test-accessor-assembler.cc",
+ "test-code-stub-assembler.cc",
+ "test-descriptor-array.cc",
+ "test-serialize.cc",
+ "test-swiss-name-dictionary-csa.cc",
+ "torque/test-torque.cc",
+ ]
+ }
+
if (v8_current_cpu == "arm") {
- sources += [ ### gcmole(arch:arm) ###
+ sources += [
+ ### gcmole(arm) ###
"assembler-helper-arm.cc",
"assembler-helper-arm.h",
"test-assembler-arm.cc",
@@ -215,7 +222,8 @@ v8_source_set("cctest_sources") {
"test-sync-primitives-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
- sources += [ ### gcmole(arch:arm64) ###
+ sources += [
+ ### gcmole(arm64) ###
"test-assembler-arm64.cc",
"test-fuzz-arm64.cc",
"test-javascript-arm64.cc",
@@ -228,48 +236,60 @@ v8_source_set("cctest_sources") {
sources += [ "test-stack-unwinding-win64.cc" ]
}
} else if (v8_current_cpu == "x86") {
- sources += [ ### gcmole(arch:ia32) ###
+ sources += [
+ ### gcmole(ia32) ###
"test-assembler-ia32.cc",
"test-log-stack-tracer.cc",
]
} else if (v8_current_cpu == "mips64") {
- sources += [ ### gcmole(arch:mips64) ###
+ sources += [
+ ### gcmole(mips64) ###
"test-assembler-mips64.cc",
"test-macro-assembler-mips64.cc",
]
} else if (v8_current_cpu == "mips64el") {
- sources += [ ### gcmole(arch:mips64el) ###
+ sources += [
+ ### gcmole(mips64el) ###
"test-assembler-mips64.cc",
"test-macro-assembler-mips64.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [
- ### gcmole(arch:x64) ###
+ ### gcmole(x64) ###
"test-log-stack-tracer.cc",
]
if (is_win) {
sources += [ "test-stack-unwinding-win64.cc" ]
}
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
- sources += [ "test-assembler-ppc.cc" ] ### gcmole(arch:ppc) ###
+ sources += [
+ ### gcmole(ppc) ###
+ "test-assembler-ppc.cc",
+ ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += [ "test-assembler-s390.cc" ] ### gcmole(arch:s390) ###
+ sources += [
+ ### gcmole(s390) ###
+ "test-assembler-s390.cc",
+ ]
} else if (v8_current_cpu == "riscv64") {
- sources += [ ### gcmole(arch:riscv64) ###
+ sources += [
+ ### gcmole(riscv64) ###
"test-assembler-riscv64.cc",
"test-helper-riscv64.cc",
"test-macro-assembler-riscv64.cc",
"test-simple-riscv64.cc",
]
} else if (v8_current_cpu == "riscv32") {
- sources += [ ### gcmole(arch:riscv32) ###
+ sources += [
+ ### gcmole(riscv32) ###
"test-assembler-riscv32.cc",
"test-helper-riscv32.cc",
"test-macro-assembler-riscv32.cc",
"test-simple-riscv32.cc",
]
} else if (v8_current_cpu == "loong64") {
- sources += [ ### gcmole(arch:loong64) ###
+ sources += [
+ ### gcmole(loong64) ###
"test-assembler-loong64.cc",
"test-macro-assembler-loong64.cc",
]
@@ -352,11 +372,12 @@ v8_source_set("cctest_sources") {
deps = [
"../..:run_torque",
"../..:v8_shared_internal_headers",
+ "../..:v8_tracing",
]
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
- public_deps += [ "//third_party/icu" ]
+ public_deps += [ v8_icu_path ]
}
cflags = []
diff --git a/deps/v8/test/cctest/assembler-helper-arm.h b/deps/v8/test/cctest/assembler-helper-arm.h
index ec31d719e6..fbef7ce902 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.h
+++ b/deps/v8/test/cctest/assembler-helper-arm.h
@@ -28,7 +28,7 @@ template <typename Signature>
GeneratedCode<Signature> AssembleCode(
Isolate* isolate, std::function<void(MacroAssembler&)> assemble) {
return GeneratedCode<Signature>::FromCode(
- *AssembleCodeImpl(isolate, assemble));
+ isolate, *AssembleCodeImpl(isolate, assemble));
}
} // namespace internal
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 8722f72a8e..bbadc6849b 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -44,7 +44,10 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/common/globals.h"
+#include "src/init/v8.h"
+#ifdef V8_ENABLE_TURBOFAN
#include "src/compiler/pipeline.h"
+#endif // V8_ENABLE_TURBOFAN
#include "src/flags/flags.h"
#include "src/objects/objects-inl.h"
#include "src/trap-handler/trap-handler.h"
@@ -117,7 +120,7 @@ void CcTest::Run(const char* snapshot_directory) {
} else {
platform = std::move(underlying_default_platform);
}
- v8::V8::InitializePlatform(platform.get());
+ i::V8::InitializePlatformForTesting(platform.get());
cppgc::InitializeProcess(platform->GetPageAllocator());
// Allow changing flags in cctests.
@@ -197,40 +200,30 @@ void CcTest::AddGlobalFunction(v8::Local<v8::Context> env, const char* name,
env->Global()->Set(env, v8_str(name), func).FromJust();
}
-void CcTest::CollectGarbage(i::AllocationSpace space, i::Isolate* isolate,
- i::Heap::ScanStackMode mode) {
+void CcTest::CollectGarbage(i::AllocationSpace space, i::Isolate* isolate) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
-void CcTest::CollectAllGarbage(i::Isolate* isolate,
- i::Heap::ScanStackMode mode) {
+void CcTest::CollectAllGarbage(i::Isolate* isolate) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
-void CcTest::CollectAllAvailableGarbage(i::Isolate* isolate,
- i::Heap::ScanStackMode mode) {
+void CcTest::CollectAllAvailableGarbage(i::Isolate* isolate) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
}
-void CcTest::PreciseCollectAllGarbage(i::Isolate* isolate,
- i::Heap::ScanStackMode mode) {
+void CcTest::PreciseCollectAllGarbage(i::Isolate* isolate) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
-void CcTest::CollectSharedGarbage(i::Isolate* isolate,
- i::Heap::ScanStackMode mode) {
+void CcTest::CollectSharedGarbage(i::Isolate* isolate) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectGarbageShared(iso->main_thread_local_heap(),
i::GarbageCollectionReason::kTesting);
}
@@ -321,6 +314,7 @@ HandleAndZoneScope::HandleAndZoneScope(bool support_zone_compression)
HandleAndZoneScope::~HandleAndZoneScope() = default;
+#ifdef V8_ENABLE_TURBOFAN
i::Handle<i::JSFunction> Optimize(
i::Handle<i::JSFunction> function, i::Zone* zone, i::Isolate* isolate,
uint32_t flags, std::unique_ptr<i::compiler::JSHeapBroker>* out_broker) {
@@ -343,14 +337,13 @@ i::Handle<i::JSFunction> Optimize(
CHECK(info.shared_info()->HasBytecodeArray());
i::JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
- i::Handle<i::CodeT> code = i::ToCodeT(
+ i::Handle<i::Code> code =
i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
- .ToHandleChecked(),
- isolate);
- info.native_context().AddOptimizedCode(*code);
+ .ToHandleChecked();
function->set_code(*code, v8::kReleaseStore);
return function;
}
+#endif // V8_ENABLE_TURBOFAN
static void PrintTestList() {
int test_num = 0;
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 598124de35..203ba968f6 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -170,22 +170,12 @@ class CcTest {
static void AddGlobalFunction(v8::Local<v8::Context> env, const char* name,
v8::FunctionCallback callback);
- // By default, the GC methods do not scan the stack conservatively.
- static void CollectGarbage(
- i::AllocationSpace space, i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone);
- static void CollectAllGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone);
- static void CollectAllAvailableGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone);
- static void PreciseCollectAllGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone);
- static void CollectSharedGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone);
+ static void CollectGarbage(i::AllocationSpace space,
+ i::Isolate* isolate = nullptr);
+ static void CollectAllGarbage(i::Isolate* isolate = nullptr);
+ static void CollectAllAvailableGarbage(i::Isolate* isolate = nullptr);
+ static void PreciseCollectAllGarbage(i::Isolate* isolate = nullptr);
+ static void CollectSharedGarbage(i::Isolate* isolate = nullptr);
static i::Handle<i::String> MakeString(const char* str);
static i::Handle<i::String> MakeName(const char* str, int suffix);
@@ -784,12 +774,20 @@ class SimulatorHelper {
state->sp = reinterpret_cast<void*>(simulator_->sp());
state->fp = reinterpret_cast<void*>(simulator_->fp());
state->lr = reinterpret_cast<void*>(simulator_->lr());
-#elif V8_TARGET_ARCH_MIPS64
+#elif V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
state->pc = reinterpret_cast<void*>(simulator_->get_pc());
state->sp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::sp));
state->fp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::fp));
+#elif V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::fp));
+ state->lr = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::ra));
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
state->pc = reinterpret_cast<void*>(simulator_->get_pc());
state->sp = reinterpret_cast<void*>(
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index fdaa1bca45..73fbd95cec 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -95,10 +95,15 @@
# Slow tests.
'test-api/InternalFieldsSubclassing': [PASS, SLOW],
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
+ 'test-heap/TestInternalWeakLists': [PASS, SLOW],
'test-heap-profiler/ManyLocalsInSharedContext': [PASS, SLOW],
'test-jump-table-assembler/JumpTablePatchingStress': [PASS, SLOW],
- 'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
+ 'test-ptr-compr-cage/SharedPtrComprCageRace': [PASS, SLOW],
'test-serialize/CustomSnapshotDataBlobImmortalImmovableRoots': [PASS, ['mode == debug', SKIP]],
+ 'test-serialize/SharedStrings': [PASS, SLOW],
+ 'test-serialize/StartupSerializerOnce': [PASS, SLOW],
+ 'test-serialize/StartupSerializerTwice': [PASS, SLOW],
+ 'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
# Tests that need to run sequentially (e.g. due to memory consumption).
'test-accessors/HandleScopePop': [PASS, HEAVY],
@@ -114,6 +119,7 @@
'test-serialize/SnapshotCompression': [PASS, HEAVY],
'test-serialize/StartupSerializerOnceRunScript': [PASS, HEAVY],
'test-serialize/StartupSerializerTwiceRunScript': [PASS, HEAVY],
+ 'test-serialize/StaticRootsPredictableSnapshot': [PASS, HEAVY],
'test-strings/StringOOMNewStringFromOneByte': [PASS, HEAVY],
'test-strings/StringOOMNewStringFromUtf8': [PASS, HEAVY],
'test-strings/Traverse': [PASS, HEAVY],
@@ -125,6 +131,9 @@
['mode == debug', {
# BUG(v8:10996): Flaky on Linux64 - debug
'test-cpu-profiler/StartProfilingAfterOsr': [SKIP],
+
+ 'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadParksAndUnparks': [PASS, SLOW],
+ 'test-concurrent-allocation/ConcurrentAllocationInLargeSpace': [PASS, SLOW],
}],
##############################################################################
@@ -280,11 +289,6 @@
# BUG(v8:4642).
'test-lockers/LockAndUnlockDifferentIsolates': [PASS, NO_VARIANTS],
- # BUG(10107): Failing flakily
- 'test-cpu-profiler/Inlining2': ['arch == ia32 and mode == debug', SKIP],
- 'test-cpu-profiler/CrossScriptInliningCallerLineNumbers': ['arch == ia32 and mode == debug', SKIP],
- 'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': ['arch == ia32 and mode == debug', SKIP],
-
# BUG(v8:10996): Flaky on Win32
'test-cpu-profiler/StartProfilingAfterOsr': ['arch == ia32', SKIP],
}], # 'system == windows'
@@ -297,7 +301,9 @@
'test-serialize/StartupSerializerOnce': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
+ 'test-serialize/StaticRootsPredictableSnapshot': [SKIP],
'test-serialize/StartupSerializerTwice': [SKIP],
+
}], # 'system == windows and arch == x64 and mode == debug'
##############################################################################
@@ -510,9 +516,7 @@
# TODO(v8:7777): Change this once wasm is supported in jitless mode.
['not has_webassembly or variant == jitless', {
'test-api/TurboAsmDisablesDetach': [SKIP],
- 'test-api/WasmI32AtomicWaitCallback': [SKIP],
- 'test-api/WasmI64AtomicWaitCallback': [SKIP],
- 'test-api/WasmSetJitCodeEventHandler': [SKIP],
+ 'test-api/Wasm*': [SKIP],
'test-api-array-buffer/ArrayBuffer_NonDetachableWasDetached': [SKIP],
'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP],
'test-c-wasm-entry/*': [SKIP],
@@ -578,6 +582,16 @@
}], # lite_mode or variant == jitless
##############################################################################
+['jitless_build_mode', {
+ # invocation_count maintenance is disabled.
+ 'test-debug/DebugCoverage*': [SKIP],
+ # Feedback collection maintenance is (mostly) disabled.
+ 'test-heap/IncrementalMarkingPreservesMonomorphicCallIC': [SKIP],
+ # WebAssembly not included.
+ 'test-api/Threading8': [SKIP],
+}], # jitless_build_mode
+
+##############################################################################
['lite_mode', {
# TODO(v8:8510): Tests that currently fail with lazy source positions.
'test-cpu-profiler/Inlining2': [SKIP],
@@ -778,6 +792,7 @@
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
+ 'test-serialize/StaticRootsPredictableSnapshot': [SKIP],
'test-serialize/WeakArraySerializationInSnapshot': [SKIP],
'test-typedarrays/SpeciesConstructor': [SKIP],
'test-typedarrays/SpeciesConstructorAccessor': [SKIP],
@@ -919,18 +934,6 @@
'test-debug/TerminateOnResumeFromOtherThread': [SKIP],
'test-debug/TerminateOnResumeRunJavaScriptAtBreakpoint': [SKIP],
'test-debug/TerminateOnResumeRunMicrotaskAtBreakpoint': [SKIP],
- 'test-embedder-tracing/BasicTracedReference': [SKIP],
- 'test-embedder-tracing/GarbageCollectionForTesting': [SKIP],
- 'test-embedder-tracing/NotifyEmptyStack': [SKIP],
- 'test-embedder-tracing/TracedReferenceCopyReferences': [SKIP],
- 'test-embedder-tracing/TracedReferenceCopy': [SKIP],
- 'test-embedder-tracing/TracedReferenceHandlesDoNotLeak': [SKIP],
- 'test-embedder-tracing/TracedReferenceHandlesMarking': [SKIP],
- 'test-embedder-tracing/TracedReferenceMove': [SKIP],
- 'test-embedder-tracing/TracedReferenceToUnmodifiedJSObjectDiesOnMarkSweep': [SKIP],
- 'test-embedder-tracing/TracingInEphemerons': [SKIP],
- 'test-embedder-tracing/TracingInRevivedSubgraph': [SKIP],
- 'test-embedder-tracing/V8RegisteringEmbedderReference': [SKIP],
'test-external-string-tracker/ExternalString_ExternalBackingStoreSizeIncreasesAfterExternalization': [SKIP],
'test-external-string-tracker/ExternalString_ExternalBackingStoreSizeIncreasesMarkCompact': [SKIP],
'test-heap-profiler/HeapSnapshotDeleteDuringTakeSnapshot': [SKIP],
@@ -1090,13 +1093,11 @@
'test-concurrent-shared-function-info/TestConcurrentSharedFunctionInfo': [SKIP],
}], # variant == assert_types
-['arch != ia32 and arch != x64', {
- # TODO(12284): Implement relaxed SIMD in Liftoff on missing architectures.
- 'test-run-wasm-relaxed-simd/RunWasm_F32x4Qfma_liftoff': [SKIP],
- 'test-run-wasm-relaxed-simd/RunWasm_F32x4Qfms_liftoff': [SKIP],
- 'test-run-wasm-relaxed-simd/RunWasm_F64x2Qfma_liftoff': [SKIP],
- 'test-run-wasm-relaxed-simd/RunWasm_F64x2Qfms_liftoff': [SKIP],
- 'test-run-wasm-relaxed-simd/RunWasm_RegressFmaReg_liftoff': [SKIP],
-}],
+##############################################################################
+['variant == turboshaft', {
+ # TODO(v8:12783, nicohartmann@): FastApiCalls temporarily disabled on
+ # Turboshaft until support has been properly ported.
+ 'test-api/FastApiCalls': [SKIP],
+}], # variant == turboshaft
]
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 64637cfea2..219650def3 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -77,21 +77,17 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
return code_.ToHandleChecked();
}
- Handle<CodeT> GetCodeT() { return ToCodeT(GetCode(), main_isolate()); }
-
protected:
Address Generate() override {
if (code_.is_null()) {
- Schedule* schedule = this->ExportForTest();
- auto call_descriptor = this->call_descriptor();
- Graph* graph = this->graph();
+ Schedule* schedule = ExportForTest();
OptimizedCompilationInfo info(base::ArrayVector("testing"), main_zone(),
kind_);
code_ = Pipeline::GenerateCodeForTesting(
- &info, main_isolate(), call_descriptor, graph,
+ &info, main_isolate(), call_descriptor(), graph(),
AssemblerOptions::Default(main_isolate()), schedule);
}
- return this->code_.ToHandleChecked()->entry();
+ return code_.ToHandleChecked()->code_entry_point();
}
private:
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 5d104467c0..f3bfd5bb11 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -45,8 +45,9 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count)
NewFunction(BuildFunction(param_count).c_str()))),
flags_(0) {
CHECK(!code.is_null());
+ CHECK(code->IsCode());
Compile(function);
- function->set_code(ToCodeT(*code), kReleaseStore);
+ function->set_code(*code, kReleaseStore);
}
FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {}
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.h b/deps/v8/test/cctest/compiler/serializer-tester.h
index 8d314f7240..d530ca1c54 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.h
+++ b/deps/v8/test/cctest/compiler/serializer-tester.h
@@ -35,7 +35,7 @@ class SerializerTester : public HandleAndZoneScope {
private:
CanonicalHandleScope canonical_;
- base::Optional<JSFunctionRef> function_;
+ OptionalJSFunctionRef function_;
std::unique_ptr<JSHeapBroker> broker_;
};
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index 218944bbd1..3ffcd3c5bd 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -61,13 +61,13 @@ TEST(ProfileDiamond) {
m.GenerateCode();
{
- uint32_t expected[] = {0, 0, 0, 0, 0, 0};
+ uint32_t expected[] = {0, 0, 0, 0, 0, 0, 0};
m.Expect(arraysize(expected), expected);
}
m.Call(0);
{
- uint32_t expected[] = {1, 1, 1, 0, 0, 1};
+ uint32_t expected[] = {1, 1, 1, 0, 0, 1, 0};
m.Expect(arraysize(expected), expected);
}
@@ -75,28 +75,34 @@ TEST(ProfileDiamond) {
m.Call(1);
{
- uint32_t expected[] = {1, 0, 0, 1, 1, 1};
+ uint32_t expected[] = {1, 0, 0, 1, 1, 1, 0};
m.Expect(arraysize(expected), expected);
}
m.Call(0);
{
- uint32_t expected[] = {2, 1, 1, 1, 1, 2};
+ uint32_t expected[] = {2, 1, 1, 1, 1, 2, 0};
m.Expect(arraysize(expected), expected);
}
// Set the counters very high, to verify that they saturate rather than
// overflowing.
- uint32_t near_overflow[] = {UINT32_MAX - 1, UINT32_MAX - 1, UINT32_MAX - 1,
- UINT32_MAX - 1, UINT32_MAX - 1, UINT32_MAX - 1};
+ uint32_t near_overflow[] = {UINT32_MAX - 1,
+ UINT32_MAX - 1,
+ UINT32_MAX - 1,
+ UINT32_MAX - 1,
+ UINT32_MAX - 1,
+ UINT32_MAX - 1,
+ 0};
m.SetCounts(arraysize(near_overflow), near_overflow);
m.Expect(arraysize(near_overflow), near_overflow);
m.Call(0);
m.Call(0);
{
- uint32_t expected[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX,
- UINT32_MAX - 1, UINT32_MAX - 1, UINT32_MAX};
+ uint32_t expected[] = {
+ UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX - 1,
+ UINT32_MAX - 1, UINT32_MAX, 0};
m.Expect(arraysize(expected), expected);
}
}
@@ -121,7 +127,7 @@ TEST(ProfileLoop) {
m.GenerateCode();
{
- uint32_t expected[] = {0, 0, 0, 0, 0, 0};
+ uint32_t expected[] = {0, 0, 0, 0, 0, 0, 0};
m.Expect(arraysize(expected), expected);
}
@@ -129,7 +135,7 @@ TEST(ProfileLoop) {
for (size_t i = 0; i < arraysize(runs); i++) {
m.ResetCounts();
CHECK_EQ(1, m.Call(static_cast<int>(runs[i])));
- uint32_t expected[] = {1, runs[i] + 1, runs[i], runs[i], 1, 1};
+ uint32_t expected[] = {1, runs[i] + 1, runs[i], runs[i], 1, 1, 0};
m.Expect(arraysize(expected), expected);
}
}
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index d37ec78c34..09e70c9b02 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -148,12 +148,11 @@ Handle<JSFunction> CreateSumAllArgumentsFunction(FunctionTester* ft) {
TEST(SimpleCallJSFunction0Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- const int kContextOffset = kNumParams + 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeAssembler m(asm_tester.state());
{
auto function = m.Parameter<JSFunction>(1);
- auto context = m.Parameter<Context>(kContextOffset);
+ auto context = m.GetJSContextParameter();
auto receiver = SmiTag(&m, m.IntPtrConstant(42));
@@ -171,12 +170,11 @@ TEST(SimpleCallJSFunction0Arg) {
TEST(SimpleCallJSFunction1Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- const int kContextOffset = kNumParams + 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeAssembler m(asm_tester.state());
{
auto function = m.Parameter<JSFunction>(1);
- auto context = m.Parameter<Context>(kContextOffset);
+ auto context = m.GetJSContextParameter();
Node* receiver = SmiTag(&m, m.IntPtrConstant(42));
Node* a = SmiTag(&m, m.IntPtrConstant(13));
@@ -195,12 +193,11 @@ TEST(SimpleCallJSFunction1Arg) {
TEST(SimpleCallJSFunction2Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- const int kContextOffset = kNumParams + 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeAssembler m(asm_tester.state());
{
auto function = m.Parameter<JSFunction>(1);
- auto context = m.Parameter<Context>(kContextOffset);
+ auto context = m.GetJSContextParameter();
Node* receiver = SmiTag(&m, m.IntPtrConstant(42));
Node* a = SmiTag(&m, m.IntPtrConstant(13));
@@ -446,7 +443,7 @@ TEST(TestOutOfScopeVariable) {
TEST(ExceptionHandler) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeAssembler m(asm_tester.state());
TVariable<Object> var(m.SmiConstant(0), &m);
@@ -471,7 +468,7 @@ TEST(TestCodeAssemblerCodeComment) {
i::v8_flags.code_comments = true;
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeAssembler m(asm_tester.state());
m.Comment("Comment1");
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 26ca83a475..7d86c2e359 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -2,9 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
@@ -34,13 +37,12 @@ namespace {
enum MoveMode { kParallelMoves, kSequentialMoves };
-ParallelMove* CopyMoves(ParallelMove* moves, Zone* zone) {
- ParallelMove* copy = zone->New<ParallelMove>(zone);
- for (auto m : *moves) {
- copy->AddMove(m->source(), m->destination());
- }
- return copy;
-}
+// Whether the layout before and after the moves must be the same.
+enum LayoutMode {
+ kPreserveLayout,
+ kChangeLayout,
+};
+enum OperandLifetime { kInput, kOutput };
int GetSlotSizeInBytes(MachineRepresentation rep) {
switch (rep) {
@@ -60,9 +62,9 @@ int GetSlotSizeInBytes(MachineRepresentation rep) {
}
// Forward declaration.
-Handle<Code> BuildTeardownFunction(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::vector<AllocatedOperand> parameters);
+Handle<Code> BuildTeardownFunction(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ const std::vector<AllocatedOperand>& parameters);
// Build the `setup` function. It takes a code object and a FixedArray as
// parameters and calls the former while passing it each element of the array as
@@ -90,23 +92,27 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
// | | | 128-bit vector. |
//
Handle<Code> BuildSetupFunction(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::vector<AllocatedOperand> parameters) {
- CodeAssemblerTester tester(isolate, 3, CodeKind::BUILTIN,
- "setup"); // Include receiver.
+ CallDescriptor* test_call_descriptor,
+ CallDescriptor* teardown_call_descriptor,
+ std::vector<AllocatedOperand> parameters,
+ const std::vector<AllocatedOperand>& results) {
+ CodeAssemblerTester tester(isolate, JSParameterCount(2), CodeKind::BUILTIN,
+ "setup");
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
params.push_back(__ Parameter<Object>(1));
- params.push_back(__ HeapConstant(ToCodeT(
- BuildTeardownFunction(isolate, call_descriptor, parameters), isolate)));
+ // The parameters of the teardown function are the results of the test
+ // function.
+ params.push_back(__ HeapConstant(
+ BuildTeardownFunction(isolate, teardown_call_descriptor, results)));
// First allocate the FixedArray which will hold the final results. Here we
// should take care of all allocations, meaning we allocate HeapNumbers and
// FixedArrays representing Simd128 values.
TNode<FixedArray> state_out =
- __ AllocateZeroedFixedArray(__ IntPtrConstant(parameters.size()));
- for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
- switch (parameters[i].representation()) {
+ __ AllocateZeroedFixedArray(__ IntPtrConstant(results.size()));
+ for (int i = 0; i < static_cast<int>(results.size()); i++) {
+ switch (results[i].representation()) {
case MachineRepresentation::kTagged:
break;
case MachineRepresentation::kFloat32:
@@ -165,7 +171,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
}
__ Return(
__ UncheckedCast<Object>(tester.raw_assembler_for_testing()->AddNode(
- tester.raw_assembler_for_testing()->common()->Call(call_descriptor),
+ tester.raw_assembler_for_testing()->common()->Call(
+ test_call_descriptor),
static_cast<int>(params.size()), params.data())));
return tester.GenerateCodeCloseAndEscape();
}
@@ -212,9 +219,9 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
// UNSAFE_SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may
// clobber the top 64 bits of Simd128 registers. This is the case on x64, ia32
// and Arm64 for example.
-Handle<Code> BuildTeardownFunction(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::vector<AllocatedOperand> parameters) {
+Handle<Code> BuildTeardownFunction(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ const std::vector<AllocatedOperand>& parameters) {
CodeAssemblerTester tester(isolate, call_descriptor, "teardown");
CodeStubAssembler assembler(tester.state());
auto result_array = __ Parameter<FixedArray>(1);
@@ -397,10 +404,11 @@ class TestEnvironment : public HandleAndZoneScope {
static constexpr int kFloatConstantCount = 4;
static constexpr int kDoubleConstantCount = 4;
- TestEnvironment()
+ explicit TestEnvironment(LayoutMode layout_mode = kPreserveLayout)
: blocks_(1, NewBlock(main_zone(), RpoNumber::FromInt(0)), main_zone()),
instructions_(main_isolate(), main_zone(), &blocks_),
rng_(CcTest::random_number_generator()),
+ layout_mode_(layout_mode),
supported_reps_({MachineRepresentation::kTagged,
MachineRepresentation::kFloat32,
MachineRepresentation::kFloat64}) {
@@ -424,20 +432,23 @@ class TestEnvironment : public HandleAndZoneScope {
LocationSignature::Builder test_signature(
main_zone(), 1,
2 + kGeneralRegisterCount + kDoubleRegisterCount + stack_slot_count_);
+ LocationSignature::Builder teardown_signature(
+ main_zone(), 1,
+ 2 + kGeneralRegisterCount + kDoubleRegisterCount + stack_slot_count_);
- // The first parameter will be the code object of the "teardown"
- // function. This way, the "test" function can tail-call to it.
- test_signature.AddParam(LinkageLocation::ForRegister(
- kReturnRegister0.code(), MachineType::AnyTagged()));
-
- // The second parameter will be a pre-allocated FixedArray that the
- // "teardown" function will fill with result and then return. We place this
- // parameter on the first stack argument slot which is always -1. And
- // therefore slots to perform moves on start at -2.
- test_signature.AddParam(
- LinkageLocation::ForCallerFrameSlot(-1, MachineType::AnyTagged()));
- int slot_parameter_n = -2;
- const int kTotalStackParameterCount = stack_slot_count_ + 1;
+ for (auto* sig : {&test_signature, &teardown_signature}) {
+ // The first parameter will be the code object of the "teardown"
+ // function. This way, the "test" function can tail-call to it.
+ sig->AddParam(LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::AnyTagged()));
+
+ // The second parameter will be a pre-allocated FixedArray that the
+ // "teardown" function will fill with result and then return. We place
+ // this parameter on the first stack argument slot which is always -1. And
+ // therefore slots to perform moves on start at -2.
+ sig->AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1, MachineType::AnyTagged()));
+ }
// Initialise registers.
@@ -449,6 +460,60 @@ class TestEnvironment : public HandleAndZoneScope {
DCHECK_LE(kGeneralRegisterCount,
GetRegConfig()->num_allocatable_general_registers() - 2);
+ GenerateLayout(setup_layout_, allocated_slots_in_, &test_signature);
+ test_descriptor_ = MakeCallDescriptor(test_signature.Build());
+
+ if (layout_mode_ == kChangeLayout) {
+ GenerateLayout(teardown_layout_, allocated_slots_out_,
+ &teardown_signature);
+ teardown_descriptor_ = MakeCallDescriptor(teardown_signature.Build());
+ }
+ // Else, we just reuse the layout and signature of the setup function for
+ // the teardown function since they are the same.
+ }
+
+ void AddStackSlots(
+ std::vector<AllocatedOperand>& layout,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots,
+ LocationSignature::Builder* sig) {
+ // The first stack slot is the FixedArray, start at -2.
+ int slot_parameter_n = -2;
+ std::map<MachineRepresentation, int> slot_count = {
+ {MachineRepresentation::kTagged, kTaggedSlotCount},
+ {MachineRepresentation::kFloat32, kFloat32SlotCount},
+ {MachineRepresentation::kFloat64, kFloat64SlotCount}};
+ if (TestSimd128Moves()) {
+ slot_count.emplace(MachineRepresentation::kSimd128, kSimd128SlotCount);
+ }
+
+ // Allocate new slots until we run out of them.
+ while (std::any_of(slot_count.cbegin(), slot_count.cend(),
+ [](const std::pair<MachineRepresentation, int>& entry) {
+ // True if there are slots left to allocate for this
+ // representation.
+ return entry.second > 0;
+ })) {
+ // Pick a random MachineRepresentation from supported_reps_.
+ MachineRepresentation rep = CreateRandomMachineRepresentation();
+ auto entry = slot_count.find(rep);
+ DCHECK_NE(entry, slot_count.end());
+ // We may have picked a representation for which all slots have already
+ // been allocated.
+ if (entry->second > 0) {
+ // Keep a map of (MachineRepresentation . std::vector<int>) with
+ // allocated slots to pick from for each representation.
+ int slot = slot_parameter_n;
+ slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
+ AddStackSlot(layout, slots, sig, rep, slot);
+ entry->second--;
+ }
+ }
+ }
+
+ void GenerateLayout(
+ std::vector<AllocatedOperand>& layout,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots,
+ LocationSignature::Builder* sig) {
RegList general_mask =
RegList::FromBits(GetRegConfig()->allocatable_general_codes_mask());
// kReturnRegister0 is used to hold the "teardown" code object, do not
@@ -459,7 +524,7 @@ class TestEnvironment : public HandleAndZoneScope {
for (int i = 0; i < kGeneralRegisterCount; i++) {
int code = registers->GetAllocatableGeneralCode(i);
- AddRegister(&test_signature, MachineRepresentation::kTagged, code);
+ AddRegister(layout, sig, MachineRepresentation::kTagged, code);
}
// We assume that Double, Float and Simd128 registers alias, depending on
// kSimpleFPAliasing. For this reason, we allocate a Float, Double and
@@ -472,12 +537,12 @@ class TestEnvironment : public HandleAndZoneScope {
if (kFPAliasing != AliasingKind::kCombine) {
// Allocate three registers at once if kSimd128 is supported, else
// allocate in pairs.
- AddRegister(&test_signature, MachineRepresentation::kFloat32,
+ AddRegister(layout, sig, MachineRepresentation::kFloat32,
registers->GetAllocatableFloatCode(i));
- AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ AddRegister(layout, sig, MachineRepresentation::kFloat64,
registers->GetAllocatableDoubleCode(i + 1));
if (TestSimd128Moves()) {
- AddRegister(&test_signature, MachineRepresentation::kSimd128,
+ AddRegister(layout, sig, MachineRepresentation::kSimd128,
registers->GetAllocatableSimd128Code(i + 2));
i++;
}
@@ -499,52 +564,19 @@ class TestEnvironment : public HandleAndZoneScope {
"Arm has a q8 and a d16 register but no overlapping s32 register.");
int first_simd128 = registers->GetAllocatableSimd128Code(i);
int second_simd128 = registers->GetAllocatableSimd128Code(i + 1);
- AddRegister(&test_signature, MachineRepresentation::kFloat32,
+ AddRegister(layout, sig, MachineRepresentation::kFloat32,
first_simd128 * 4);
- AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ AddRegister(layout, sig, MachineRepresentation::kFloat64,
second_simd128 * 2);
if (TestSimd128Moves()) {
int third_simd128 = registers->GetAllocatableSimd128Code(i + 2);
- AddRegister(&test_signature, MachineRepresentation::kSimd128,
+ AddRegister(layout, sig, MachineRepresentation::kSimd128,
third_simd128);
i++;
}
}
}
- // Initialise stack slots.
-
- std::map<MachineRepresentation, int> slots = {
- {MachineRepresentation::kTagged, kTaggedSlotCount},
- {MachineRepresentation::kFloat32, kFloat32SlotCount},
- {MachineRepresentation::kFloat64, kFloat64SlotCount}};
- if (TestSimd128Moves()) {
- slots.emplace(MachineRepresentation::kSimd128, kSimd128SlotCount);
- }
-
- // Allocate new slots until we run out of them.
- while (std::any_of(slots.cbegin(), slots.cend(),
- [](const std::pair<MachineRepresentation, int>& entry) {
- // True if there are slots left to allocate for this
- // representation.
- return entry.second > 0;
- })) {
- // Pick a random MachineRepresentation from supported_reps_.
- MachineRepresentation rep = CreateRandomMachineRepresentation();
- auto entry = slots.find(rep);
- DCHECK(entry != slots.end());
- // We may have picked a representation for which all slots have already
- // been allocated.
- if (entry->second > 0) {
- // Keep a map of (MachineRepresentation . std::vector<int>) with
- // allocated slots to pick from for each representation.
- int slot = slot_parameter_n;
- slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
- AddStackSlot(&test_signature, rep, slot);
- entry->second--;
- }
- }
-
// Initialise random constants.
// While constants do not know about Smis, we need to be able to
@@ -574,15 +606,19 @@ class TestEnvironment : public HandleAndZoneScope {
}
// The "teardown" function returns a FixedArray with the resulting state.
- test_signature.AddReturn(LinkageLocation::ForRegister(
- kReturnRegister0.code(), MachineType::AnyTagged()));
+ sig->AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::AnyTagged()));
+ AddStackSlots(layout, slots, sig);
+ }
- test_descriptor_ = main_zone()->New<CallDescriptor>(
+ CallDescriptor* MakeCallDescriptor(LocationSignature* sig) {
+ const int kTotalStackParameterCount = stack_slot_count_ + 1;
+ return main_zone()->New<CallDescriptor>(
CallDescriptor::kCallCodeObject, // kind
MachineType::AnyTagged(), // target MachineType
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
- test_signature.Build(), // location_sig
+ sig, // location_sig
kTotalStackParameterCount, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
@@ -613,10 +649,11 @@ class TestEnvironment : public HandleAndZoneScope {
// from on `teardown`. Therefore they are part of the environment's layout,
// and are parameters of the `test` function.
- void AddRegister(LocationSignature::Builder* test_signature,
+ void AddRegister(std::vector<AllocatedOperand>& layout,
+ LocationSignature::Builder* test_signature,
MachineRepresentation rep, int code) {
AllocatedOperand operand(AllocatedOperand::REGISTER, rep, code);
- layout_.push_back(operand);
+ layout.push_back(operand);
test_signature->AddParam(LinkageLocation::ForRegister(
code, MachineType::TypeForRepresentation(rep)));
auto entry = allocated_registers_.find(rep);
@@ -627,15 +664,17 @@ class TestEnvironment : public HandleAndZoneScope {
}
}
- void AddStackSlot(LocationSignature::Builder* test_signature,
- MachineRepresentation rep, int slot) {
+ void AddStackSlot(
+ std::vector<AllocatedOperand>& layout,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots,
+ LocationSignature::Builder* sig, MachineRepresentation rep, int slot) {
AllocatedOperand operand(AllocatedOperand::STACK_SLOT, rep, slot);
- layout_.push_back(operand);
- test_signature->AddParam(LinkageLocation::ForCallerFrameSlot(
+ layout.push_back(operand);
+ sig->AddParam(LinkageLocation::ForCallerFrameSlot(
slot, MachineType::TypeForRepresentation(rep)));
- auto entry = allocated_slots_.find(rep);
- if (entry == allocated_slots_.end()) {
- allocated_slots_.emplace(rep, std::vector<AllocatedOperand>{operand});
+ auto entry = slots.find(rep);
+ if (entry == slots.end()) {
+ slots.emplace(rep, std::vector<AllocatedOperand>{operand});
} else {
entry->second.push_back(operand);
}
@@ -646,9 +685,9 @@ class TestEnvironment : public HandleAndZoneScope {
// environment.
Handle<FixedArray> GenerateInitialState() {
Handle<FixedArray> state = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(setup_layout_.size()));
for (int i = 0; i < state->length(); i++) {
- switch (layout_[i].representation()) {
+ switch (setup_layout_[i].representation()) {
case MachineRepresentation::kTagged:
state->set(i, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
break;
@@ -687,7 +726,7 @@ class TestEnvironment : public HandleAndZoneScope {
// return a new resulting state.
Handle<FixedArray> Run(Handle<Code> test, Handle<FixedArray> state_in) {
Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(TeardownLayout().size()));
{
#ifdef ENABLE_SLOW_DCHECKS
// The "setup" and "teardown" functions are relatively big, and with
@@ -696,8 +735,9 @@ class TestEnvironment : public HandleAndZoneScope {
bool old_enable_slow_asserts = v8_flags.enable_slow_asserts;
v8_flags.enable_slow_asserts = false;
#endif
- Handle<Code> setup =
- BuildSetupFunction(main_isolate(), test_descriptor_, layout_);
+ Handle<Code> setup = BuildSetupFunction(main_isolate(), test_descriptor_,
+ TeardownCallDescriptor(),
+ setup_layout_, TeardownLayout());
#ifdef ENABLE_SLOW_DCHECKS
v8_flags.enable_slow_asserts = old_enable_slow_asserts;
#endif
@@ -705,72 +745,109 @@ class TestEnvironment : public HandleAndZoneScope {
// return value will be freed along with it. Copy the result into
// state_out.
FunctionTester ft(setup, 2);
- Handle<FixedArray> result =
- ft.CallChecked<FixedArray>(ToCodeT(test, main_isolate()), state_in);
+ Handle<FixedArray> result = ft.CallChecked<FixedArray>(test, state_in);
CHECK_EQ(result->length(), state_in->length());
result->CopyTo(0, *state_out, 0, result->length());
}
return state_out;
}
+ std::vector<AllocatedOperand>& TeardownLayout() {
+ return layout_mode_ == kPreserveLayout ? setup_layout_ : teardown_layout_;
+ }
+
+ CallDescriptor* TeardownCallDescriptor() {
+ return layout_mode_ == kPreserveLayout ? test_descriptor_
+ : teardown_descriptor_;
+ }
+
// For a given operand representing either a register or a stack slot, return
// what position it should live in inside a FixedArray state.
- int OperandToStatePosition(const AllocatedOperand& operand) const {
+ int OperandToStatePosition(std::vector<AllocatedOperand>& layout,
+ const AllocatedOperand& operand) const {
// Search `layout_` for `operand`.
- auto it = std::find_if(layout_.cbegin(), layout_.cend(),
+ auto it = std::find_if(layout.cbegin(), layout.cend(),
[operand](const AllocatedOperand& this_operand) {
return this_operand.Equals(operand);
});
- DCHECK_NE(it, layout_.cend());
- return static_cast<int>(std::distance(layout_.cbegin(), it));
+ DCHECK_NE(it, layout.cend());
+ return static_cast<int>(std::distance(layout.cbegin(), it));
}
- // Perform the given list of moves on `state_in` and return a newly allocated
- // state with the results.
- Handle<FixedArray> SimulateMoves(ParallelMove* moves,
- Handle<FixedArray> state_in,
- MoveMode move_mode) {
+ Object GetMoveSource(Handle<FixedArray> state, MoveOperands* move) {
+ InstructionOperand from = move->source();
+ if (from.IsConstant()) {
+ Constant constant = instructions_.GetConstant(
+ ConstantOperand::cast(from).virtual_register());
+ Handle<Object> constant_value;
+ switch (constant.type()) {
+ case Constant::kInt32:
+ constant_value =
+ Handle<Smi>(Smi(static_cast<Address>(
+ static_cast<intptr_t>(constant.ToInt32()))),
+ main_isolate());
+ break;
+ case Constant::kInt64:
+ constant_value = Handle<Smi>(
+ Smi(static_cast<Address>(constant.ToInt64())), main_isolate());
+ break;
+ case Constant::kFloat32:
+ constant_value = main_isolate()->factory()->NewHeapNumber(
+ static_cast<double>(constant.ToFloat32()));
+ break;
+ case Constant::kFloat64:
+ constant_value = main_isolate()->factory()->NewHeapNumber(
+ constant.ToFloat64().value());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return *constant_value;
+ } else {
+ int from_index =
+ OperandToStatePosition(setup_layout_, AllocatedOperand::cast(from));
+ return state->get(from_index);
+ }
+ }
+
+ // Perform the given list of sequential moves on `state_in` and return a newly
+ // allocated state with the results.
+ Handle<FixedArray> SimulateSequentialMoves(ParallelMove* moves,
+ Handle<FixedArray> state_in) {
Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(setup_layout_.size()));
// We do not want to modify `state_in` in place so perform the moves on a
// copy.
state_in->CopyTo(0, *state_out, 0, state_in->length());
+ DCHECK_EQ(kPreserveLayout, layout_mode_);
for (auto move : *moves) {
- int to_index =
- OperandToStatePosition(AllocatedOperand::cast(move->destination()));
- InstructionOperand from = move->source();
- if (from.IsConstant()) {
- Constant constant = instructions_.GetConstant(
- ConstantOperand::cast(from).virtual_register());
- Handle<Object> constant_value;
- switch (constant.type()) {
- case Constant::kInt32:
- constant_value =
- Handle<Smi>(Smi(static_cast<Address>(
- static_cast<intptr_t>(constant.ToInt32()))),
- main_isolate());
- break;
- case Constant::kInt64:
- constant_value = Handle<Smi>(
- Smi(static_cast<Address>(constant.ToInt64())), main_isolate());
- break;
- case Constant::kFloat32:
- constant_value = main_isolate()->factory()->NewHeapNumber(
- static_cast<double>(constant.ToFloat32()));
- break;
- case Constant::kFloat64:
- constant_value = main_isolate()->factory()->NewHeapNumber(
- constant.ToFloat64().value());
- break;
- default:
- UNREACHABLE();
- }
- state_out->set(to_index, *constant_value);
- } else {
- int from_index = OperandToStatePosition(AllocatedOperand::cast(from));
- state_out->set(to_index, move_mode == kParallelMoves
- ? state_in->get(from_index)
- : state_out->get(from_index));
+ int to_index = OperandToStatePosition(
+ TeardownLayout(), AllocatedOperand::cast(move->destination()));
+ Object source = GetMoveSource(state_out, move);
+ state_out->set(to_index, source);
+ }
+ return state_out;
+ }
+
+ // Perform the given list of parallel moves on `state_in` and return a newly
+ // allocated state with the results.
+ Handle<FixedArray> SimulateParallelMoves(ParallelMove* moves,
+ Handle<FixedArray> state_in) {
+ Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
+ static_cast<int>(teardown_layout_.size()));
+ for (auto move : *moves) {
+ int to_index = OperandToStatePosition(
+ TeardownLayout(), AllocatedOperand::cast(move->destination()));
+ Object source = GetMoveSource(state_in, move);
+ state_out->set(to_index, source);
+ }
+ // If we generated redundant moves, they were eliminated automatically and
+ // don't appear in the parallel move. Simulate them now.
+ for (auto& operand : teardown_layout_) {
+ int to_index = OperandToStatePosition(TeardownLayout(), operand);
+ if (state_out->get(to_index).IsUndefined()) {
+ int from_index = OperandToStatePosition(setup_layout_, operand);
+ state_out->set(to_index, state_in->get(from_index));
}
}
return state_out;
@@ -781,15 +858,15 @@ class TestEnvironment : public HandleAndZoneScope {
Handle<FixedArray> SimulateSwaps(ParallelMove* swaps,
Handle<FixedArray> state_in) {
Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(setup_layout_.size()));
// We do not want to modify `state_in` in place so perform the swaps on a
// copy.
state_in->CopyTo(0, *state_out, 0, state_in->length());
for (auto swap : *swaps) {
- int lhs_index =
- OperandToStatePosition(AllocatedOperand::cast(swap->destination()));
- int rhs_index =
- OperandToStatePosition(AllocatedOperand::cast(swap->source()));
+ int lhs_index = OperandToStatePosition(
+ setup_layout_, AllocatedOperand::cast(swap->destination()));
+ int rhs_index = OperandToStatePosition(
+ setup_layout_, AllocatedOperand::cast(swap->source()));
Handle<Object> lhs{state_out->get(lhs_index), main_isolate()};
Handle<Object> rhs{state_out->get(rhs_index), main_isolate()};
state_out->set(lhs_index, *rhs);
@@ -800,16 +877,17 @@ class TestEnvironment : public HandleAndZoneScope {
// Compare the given state with a reference.
void CheckState(Handle<FixedArray> actual, Handle<FixedArray> expected) {
- for (int i = 0; i < static_cast<int>(layout_.size()); i++) {
+ for (int i = 0; i < static_cast<int>(TeardownLayout().size()); i++) {
Handle<Object> actual_value{actual->get(i), main_isolate()};
Handle<Object> expected_value{expected->get(i), main_isolate()};
if (!CompareValues(actual_value, expected_value,
- layout_[i].representation())) {
+ TeardownLayout()[i].representation())) {
std::ostringstream expected_str;
PrintStateValue(expected_str, main_isolate(), expected_value,
- layout_[i]);
+ TeardownLayout()[i]);
std::ostringstream actual_str;
- PrintStateValue(actual_str, main_isolate(), actual_value, layout_[i]);
+ PrintStateValue(actual_str, main_isolate(), actual_value,
+ TeardownLayout()[i]);
FATAL("Expected: '%s' but got '%s'", expected_str.str().c_str(),
actual_str.str().c_str());
}
@@ -846,48 +924,40 @@ class TestEnvironment : public HandleAndZoneScope {
kCannotBeConstant
};
- // Generate parallel moves at random.
- // In sequential mode, they can be incompatible between each other as this
- // doesn't matter to the code generator.
- // In parallel mode, ensure that two destinations can't conflict with each
- // other, and pick sources among the compatible destinations if any, to
- // increase the number of dependencies and stress the gap resolver.
+ // Generate sequential moves at random. Note that they may not be compatible
+ // between each other as this doesn't matter to the code generator.
ParallelMove* GenerateRandomMoves(int size, MoveMode move_mode) {
ParallelMove* parallel_move = main_zone()->New<ParallelMove>(main_zone());
- std::map<MachineRepresentation, std::vector<InstructionOperand*>>
- destinations;
for (int i = 0; i < size;) {
MachineRepresentation rep = CreateRandomMachineRepresentation();
- InstructionOperand source;
- if (move_mode == kParallelMoves && !destinations[rep].empty()) {
- // Try reusing a destination.
- source = *destinations[rep][rng_->NextInt(
- static_cast<int>(destinations[rep].size()))];
- } else {
- source = CreateRandomOperand(kNone, rep);
- }
- MoveOperands mo(source, CreateRandomOperand(kCannotBeConstant, rep));
+ InstructionOperand source = CreateRandomOperand(kNone, rep, kInput);
+ MoveOperands mo(source,
+ CreateRandomOperand(kCannotBeConstant, rep, kOutput));
// It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
// moves.
if (mo.IsRedundant()) continue;
- // Do not generate parallel moves with conflicting destinations.
- if (move_mode == kParallelMoves) {
- bool conflict = std::any_of(
- destinations.begin(), destinations.end(), [&mo](auto& p) {
- return std::any_of(
- p.second.begin(), p.second.end(), [&mo](auto& dest) {
- return dest->InterferesWith(mo.destination());
- });
- });
-
- if (conflict) continue;
- }
- MoveOperands* operands =
- parallel_move->AddMove(mo.source(), mo.destination());
- // Iterate only when a move was created.
+ parallel_move->AddMove(mo.source(), mo.destination());
i++;
- destinations[rep].push_back(&operands->destination());
+ }
+
+ return parallel_move;
+ }
+
+ // Generate parallel moves at random. Generate exactly one move for each
+ // available destination operand. Since the output layout is different from
+ // the input layout, this ensures that each destination operand is initialized
+ // with one of the values in the input fixed array.
+ ParallelMove* GenerateRandomParallelMoves() {
+ ParallelMove* parallel_move = main_zone()->New<ParallelMove>(main_zone());
+ std::vector<AllocatedOperand> destinations = teardown_layout_;
+ std::shuffle(destinations.begin(), destinations.end(), *rng_);
+
+ for (size_t i = 0; i < destinations.size(); ++i) {
+ MachineRepresentation rep = destinations[i].representation();
+ InstructionOperand source = CreateRandomOperand(kNone, rep, kInput);
+ MoveOperands mo(source, destinations[i]);
+ parallel_move->AddMove(mo.source(), mo.destination());
}
return parallel_move;
@@ -898,8 +968,10 @@ class TestEnvironment : public HandleAndZoneScope {
for (int i = 0; i < size;) {
MachineRepresentation rep = CreateRandomMachineRepresentation();
- InstructionOperand lhs = CreateRandomOperand(kCannotBeConstant, rep);
- InstructionOperand rhs = CreateRandomOperand(kCannotBeConstant, rep);
+ InstructionOperand lhs =
+ CreateRandomOperand(kCannotBeConstant, rep, kOutput);
+ InstructionOperand rhs =
+ CreateRandomOperand(kCannotBeConstant, rep, kInput);
MoveOperands mo(lhs, rhs);
// It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
// moves.
@@ -923,7 +995,8 @@ class TestEnvironment : public HandleAndZoneScope {
}
InstructionOperand CreateRandomOperand(OperandConstraint constraint,
- MachineRepresentation rep) {
+ MachineRepresentation rep,
+ OperandLifetime operand_lifetime) {
// Only generate a Constant if the operand is a source and we have a
// constant with a compatible representation in stock.
bool generate_constant =
@@ -931,7 +1004,7 @@ class TestEnvironment : public HandleAndZoneScope {
(allocated_constants_.find(rep) != allocated_constants_.end());
switch (rng_->NextInt(generate_constant ? 3 : 2)) {
case 0:
- return CreateRandomStackSlotOperand(rep);
+ return CreateRandomStackSlotOperand(rep, operand_lifetime);
case 1:
return CreateRandomRegisterOperand(rep);
case 2:
@@ -946,9 +1019,29 @@ class TestEnvironment : public HandleAndZoneScope {
return allocated_registers_[rep][index];
}
- AllocatedOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
- int index = rng_->NextInt(static_cast<int>(allocated_slots_[rep].size()));
- return allocated_slots_[rep][index];
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>&
+ AllocatedSlotsIn() {
+ return allocated_slots_in_;
+ }
+
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>&
+ AllocatedSlotsOut() {
+ return layout_mode_ == kPreserveLayout ? allocated_slots_in_
+ : allocated_slots_out_;
+ }
+
+ AllocatedOperand CreateRandomStackSlotOperand(
+ MachineRepresentation rep,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots) {
+ int index = rng_->NextInt(static_cast<int>(AllocatedSlotsIn()[rep].size()));
+ return slots[rep][index];
+ }
+
+ AllocatedOperand CreateRandomStackSlotOperand(
+ MachineRepresentation rep, OperandLifetime operand_lifetime) {
+ return CreateRandomStackSlotOperand(rep, operand_lifetime == kInput
+ ? AllocatedSlotsIn()
+ : AllocatedSlotsOut());
}
ConstantOperand CreateRandomConstant(MachineRepresentation rep) {
@@ -967,14 +1060,18 @@ class TestEnvironment : public HandleAndZoneScope {
InstructionSequence* instructions() { return &instructions_; }
CallDescriptor* test_descriptor() { return test_descriptor_; }
int stack_slot_count() const { return stack_slot_count_; }
+ LayoutMode layout_mode() const { return layout_mode_; }
private:
ZoneVector<InstructionBlock*> blocks_;
InstructionSequence instructions_;
v8::base::RandomNumberGenerator* rng_;
// The layout describes the type of each element in the environment, in order.
- std::vector<AllocatedOperand> layout_;
+ const LayoutMode layout_mode_;
+ std::vector<AllocatedOperand> setup_layout_;
+ std::vector<AllocatedOperand> teardown_layout_;
CallDescriptor* test_descriptor_;
+ CallDescriptor* teardown_descriptor_;
// Allocated constants, registers and stack slots that we can generate moves
// with. Each per compatible representation.
std::vector<MachineRepresentation> supported_reps_;
@@ -983,7 +1080,9 @@ class TestEnvironment : public HandleAndZoneScope {
std::map<MachineRepresentation, std::vector<AllocatedOperand>>
allocated_registers_;
std::map<MachineRepresentation, std::vector<AllocatedOperand>>
- allocated_slots_;
+ allocated_slots_in_;
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>
+ allocated_slots_out_;
int stack_slot_count_;
};
@@ -998,8 +1097,8 @@ constexpr int TestEnvironment::kSmiConstantCount;
constexpr int TestEnvironment::kFloatConstantCount;
constexpr int TestEnvironment::kDoubleConstantCount;
-// Wrapper around the CodeGenerator. Code generated by this can only be called
-// using the given `TestEnvironment`.
+// Wrapper around the CodeGenerator. Code generated by this can
+// only be called using the given `TestEnvironment`.
class CodeGeneratorTester {
public:
explicit CodeGeneratorTester(TestEnvironment* environment,
@@ -1016,24 +1115,26 @@ class CodeGeneratorTester {
// CheckAssembleSwap, we'll transparently make use of local spill slots
// instead of stack parameters for those that were picked. This allows us to
// test negative, positive, far and near ranges.
- for (int i = 0; i < (environment->stack_slot_count() / 2);) {
- MachineRepresentation rep =
- environment->CreateRandomMachineRepresentation();
- LocationOperand old_slot =
- LocationOperand::cast(environment->CreateRandomStackSlotOperand(rep));
- // Do not pick the same slot twice.
- if (GetSpillSlot(&old_slot) != spill_slots_.end()) {
- continue;
- }
- LocationOperand new_slot =
- AllocatedOperand(LocationOperand::STACK_SLOT, rep,
- frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
- // Artificially create space on the stack by allocating a new slot.
- if (extra_stack_space > 0) {
- frame_.AllocateSpillSlot(extra_stack_space);
+ if (environment->layout_mode() == kPreserveLayout) {
+ for (int i = 0; i < (environment->stack_slot_count() / 2);) {
+ MachineRepresentation rep =
+ environment->CreateRandomMachineRepresentation();
+ LocationOperand old_slot = LocationOperand::cast(
+ environment->CreateRandomStackSlotOperand(rep, kInput));
+ // Do not pick the same slot twice.
+ if (GetSpillSlot(&old_slot) != spill_slots_.end()) {
+ continue;
+ }
+ LocationOperand new_slot =
+ AllocatedOperand(LocationOperand::STACK_SLOT, rep,
+ frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
+ // Artificially create space on the stack by allocating a new slot.
+ if (extra_stack_space > 0) {
+ frame_.AllocateSpillSlot(extra_stack_space);
+ }
+ spill_slots_.emplace_back(old_slot, new_slot);
+ i++;
}
- spill_slots_.emplace_back(old_slot, new_slot);
- i++;
}
constexpr size_t kMaxUnoptimizedFrameHeight = 0;
@@ -1047,7 +1148,7 @@ class CodeGeneratorTester {
Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight,
kMaxPushedArgumentCount);
- generator_->tasm()->CodeEntry();
+ generator_->masm()->CodeEntry();
// Force a frame to be created.
generator_->frame_access_state()->MarkHasFrame(true);
@@ -1139,10 +1240,10 @@ class CodeGeneratorTester {
void CheckAssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- int start = generator_->tasm()->pc_offset();
+ int start = generator_->masm()->pc_offset();
generator_->AssembleMove(MaybeTranslateSlot(source),
MaybeTranslateSlot(destination));
- CHECK(generator_->tasm()->pc_offset() > start);
+ CHECK(generator_->masm()->pc_offset() > start);
}
void CheckAssembleMoves(ParallelMove* moves) {
@@ -1155,15 +1256,15 @@ class CodeGeneratorTester {
void CheckAssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- int start = generator_->tasm()->pc_offset();
+ int start = generator_->masm()->pc_offset();
generator_->AssembleSwap(MaybeTranslateSlot(source),
MaybeTranslateSlot(destination));
- CHECK(generator_->tasm()->pc_offset() > start);
+ CHECK(generator_->masm()->pc_offset() > start);
}
Handle<Code> Finalize() {
generator_->FinishCode();
- generator_->safepoints()->Emit(generator_->tasm(),
+ generator_->safepoints()->Emit(generator_->masm(),
frame_.GetTotalFrameSlotCount());
generator_->MaybeEmitOutOfLineConstantPool();
@@ -1240,7 +1341,7 @@ class CodeGeneratorTester {
//
// And finally, we are able to compare the resulting FixedArray against a
// reference, computed with a simulation of AssembleMove and AssembleSwap. See
-// SimulateMoves and SimulateSwaps.
+// SimulateSequentialMoves, SimulateParallelMoves and SimulateSwaps.
// Allocate space between slots to increase coverage of moves with larger
// ranges. Note that this affects how much stack is allocated when running the
@@ -1258,8 +1359,7 @@ TEST(FuzzAssembleMove) {
Handle<FixedArray> state_in = env.GenerateInitialState();
ParallelMove* moves = env.GenerateRandomMoves(1000, kSequentialMoves);
- Handle<FixedArray> expected =
- env.SimulateMoves(moves, state_in, kSequentialMoves);
+ Handle<FixedArray> expected = env.SimulateSequentialMoves(moves, state_in);
// Test small and potentially large ranges separately.
for (int extra_space : {0, kExtraSpace}) {
@@ -1281,41 +1381,25 @@ TEST(FuzzAssembleMove) {
// Test integration with the gap resolver by resolving parallel moves first.
TEST(FuzzAssembleParallelMove) {
- TestEnvironment env;
+ TestEnvironment env(kChangeLayout);
- // Generate a sequence of N parallel moves of M moves each.
- constexpr int N = 100;
- constexpr int M = 10;
Handle<FixedArray> state_in = env.GenerateInitialState();
- Handle<FixedArray> state_out =
- env.main_isolate()->factory()->NewFixedArray(state_in->length());
- state_in->CopyTo(0, *state_out, 0, state_in->length());
- ParallelMove* moves[N];
- for (int i = 0; i < N; ++i) {
- moves[i] = env.GenerateRandomMoves(M, kParallelMoves);
- state_out = env.SimulateMoves(moves[i], state_out, kParallelMoves);
- }
-
- // Test small and potentially large ranges separately.
- for (int extra_space : {0, kExtraSpace}) {
- CodeGeneratorTester c(&env, extra_space);
+ ParallelMove* moves = env.GenerateRandomParallelMoves();
+ Handle<FixedArray> state_out = env.SimulateParallelMoves(moves, state_in);
- for (int i = 0; i < N; ++i) {
- // The gap resolver modifies the parallel move in-place. Copy and restore
- // it after assembling.
- ParallelMove* save_moves = CopyMoves(moves[i], env.main_zone());
- c.CheckAssembleMoves(moves[i]);
- moves[i] = save_moves;
- }
+ CodeGeneratorTester c(&env);
- Handle<Code> test = c.FinalizeForExecuting();
- if (v8_flags.print_code) {
- test->Print();
- }
+ // The gap resolver modifies the parallel move in-place. Copy and restore
+ // it after assembling.
+ c.CheckAssembleMoves(moves);
- Handle<FixedArray> actual = env.Run(test, state_in);
- env.CheckState(actual, state_out);
+ Handle<Code> test = c.FinalizeForExecuting();
+ if (v8_flags.print_code) {
+ test->Print();
}
+
+ Handle<FixedArray> actual = env.Run(test, state_in);
+ env.CheckState(actual, state_out);
}
TEST(FuzzAssembleSwap) {
@@ -1361,7 +1445,7 @@ TEST(FuzzAssembleMoveAndSwap) {
// Randomly alternate between swaps and moves.
if (env.rng()->NextInt(2) == 0) {
ParallelMove* move = env.GenerateRandomMoves(1, kSequentialMoves);
- expected = env.SimulateMoves(move, expected, kSequentialMoves);
+ expected = env.SimulateSequentialMoves(move, expected);
c.CheckAssembleMove(&move->at(0)->source(),
&move->at(0)->destination());
} else {
@@ -1579,8 +1663,8 @@ TEST(Regress_1171759) {
AssemblerOptions::Default(handles.main_isolate()), m.ExportForTest())
.ToHandleChecked();
- std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
- handles.main_isolate(), code->raw_instruction_size());
+ std::shared_ptr<wasm::NativeModule> module =
+ AllocateNativeModule(handles.main_isolate(), code->InstructionSize());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index d9aabbe0c9..87a80ce331 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -36,14 +36,14 @@ void ExpectSharedFunctionInfoState(SharedFunctionInfo sfi,
switch (expectedState) {
case SfiState::Compiled:
CHECK(function_data.IsBytecodeArray() ||
- (function_data.IsCodeT() &&
- CodeT::cast(function_data).kind() == CodeKind::BASELINE));
+ (function_data.IsCode() &&
+ Code::cast(function_data).kind() == CodeKind::BASELINE));
CHECK(script_or_debug_info.IsScript());
break;
case SfiState::DebugInfo:
CHECK(function_data.IsBytecodeArray() ||
- (function_data.IsCodeT() &&
- CodeT::cast(function_data).kind() == CodeKind::BASELINE));
+ (function_data.IsCode() &&
+ Code::cast(function_data).kind() == CodeKind::BASELINE));
CHECK(script_or_debug_info.IsDebugInfo());
{
DebugInfo debug_info = DebugInfo::cast(script_or_debug_info);
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index 998fa10f38..e79ccec7ec 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -41,6 +41,12 @@ void GetCanonicalOperands(const InstructionOperand& op,
}
}
+// Fake frame size. The test's stack operand indices should be below this value.
+// Stack slots above this value correspond to temporaries pushed by the gap
+// resolver to resolve move cycles, and are ignored when comparing interpreter
+// states.
+constexpr int kLastFrameSlotId = 1000;
+
// The state of our move interpreter is the mapping of operands to values. Note
// that the actual values don't really matter, all we care about is equality.
class InterpreterState {
@@ -73,6 +79,13 @@ class InterpreterState {
}
}
+ void ExecuteMove(Zone* zone, InstructionOperand* source,
+ InstructionOperand* dest) {
+ ParallelMove* moves = zone->New<ParallelMove>(zone);
+ moves->AddMove(*source, *dest);
+ ExecuteInParallel(moves);
+ }
+
void MoveToTempLocation(InstructionOperand& source) {
scratch_ = KeyFor(source);
}
@@ -103,6 +116,20 @@ class InterpreterState {
return values_ == other.values_;
}
+ // Clear stack operands above kLastFrameSlotId. They correspond to temporaries
+ // pushed by the gap resolver to break cycles.
+ void ClearTemps() {
+ auto it = values_.begin();
+ while (it != values_.end()) {
+ if (it->first.kind == LocationOperand::STACK_SLOT &&
+ it->first.index >= kLastFrameSlotId) {
+ it = values_.erase(it);
+ } else {
+ it++;
+ }
+ }
+ }
+
private:
// struct for mapping operands to a unique value, that makes it easier to
// detect illegal parallel moves, and to evaluate moves for equivalence. This
@@ -217,7 +244,33 @@ class MoveInterpreter : public GapResolver::Assembler {
public:
explicit MoveInterpreter(Zone* zone) : zone_(zone) {}
- void MoveToTempLocation(InstructionOperand* source) final {
+ AllocatedOperand Push(InstructionOperand* source) override {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = ElementSizeInPointers(rep);
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep,
+ kLastFrameSlotId + sp_delta_ + new_slots);
+ ParallelMove* moves = zone_->New<ParallelMove>(zone_);
+ moves->AddMove(*source, stack_slot);
+ state_.ExecuteMove(zone_, source, &stack_slot);
+ sp_delta_ += new_slots;
+ return stack_slot;
+ }
+
+ void Pop(InstructionOperand* dest, MachineRepresentation rep) override {
+ int new_slots = ElementSizeInPointers(rep);
+ int temp_slot = kLastFrameSlotId + sp_delta_ + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, temp_slot);
+ state_.ExecuteMove(zone_, &stack_slot, dest);
+ sp_delta_ -= new_slots;
+ }
+
+ void PopTempStackSlots() override {
+ sp_delta_ = 0;
+ state_.ClearTemps();
+ }
+
+ void MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) final {
state_.MoveToTempLocation(*source);
}
void MoveTempLocationTo(InstructionOperand* dest,
@@ -247,6 +300,7 @@ class MoveInterpreter : public GapResolver::Assembler {
private:
Zone* const zone_;
InterpreterState state_;
+ int sp_delta_ = 0;
};
class ParallelMoveCreator : public HandleAndZoneScope {
@@ -419,7 +473,8 @@ void RunTest(ParallelMove* pm, Zone* zone) {
GapResolver resolver(&mi2);
resolver.Resolve(pm);
- CHECK_EQ(mi1.state(), mi2.state());
+ auto mi2_state = mi2.state();
+ CHECK_EQ(mi1.state(), mi2_state);
}
TEST(Aliasing) {
@@ -534,6 +589,75 @@ TEST(Aliasing) {
}
}
+// Test parallel moves that change the frame layout. These typically happen when
+// preparing tail-calls.
+TEST(ComplexParallelMoves) {
+ ParallelMoveCreator pmc;
+ Zone* zone = pmc.main_zone();
+
+ auto w64_2 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord64, 2);
+ auto w64_5 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord64, 5);
+ auto s128_1 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, 1);
+ auto s128_4 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, 4);
+ auto s128_5 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, 5);
+ auto s128_2 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, 2);
+ auto w64_3 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord64, 3);
+ auto w64_0 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord64, 0);
+ auto s128_6 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, 6);
+ auto w64_6 = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord64, 6);
+ auto s128_reg = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kSimd128, 0);
+
+ {
+ // A parallel move with multiple cycles that requires > 1 temporary
+ // location.
+ std::vector<InstructionOperand> moves = {
+ w64_2, w64_5, // -
+ s128_1, s128_4, // -
+ s128_5, s128_2, // -
+ w64_3, w64_0 // -
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // Regression test for https://crbug.com/1335537.
+ {
+ std::vector<InstructionOperand> moves = {
+ s128_5, s128_6, // -
+ s128_1, s128_6, // -
+ w64_6, w64_0 // -
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // A cycle with 2 moves that should not use a swap, because the
+ // interfering operands don't have the same base address.
+ {
+ std::vector<InstructionOperand> moves = {
+ s128_1, s128_reg, // -
+ s128_reg, s128_2 // -
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // Another cycle with 2 moves that should not use a swap, because the
+ // interfering operands don't have the same representation.
+ {
+ std::vector<InstructionOperand> moves = {
+ s128_2, s128_5, // -
+ w64_2, w64_5 // -
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+}
+
TEST(FuzzResolver) {
ParallelMoveCreator pmc;
for (int size = 0; size < 80; ++size) {
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 3713ef14ca..560e324495 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -39,7 +39,8 @@ class JSConstantCacheTester : public HandleAndZoneScope,
JSGraph(main_isolate(), &main_graph_, &main_common_, &main_javascript_,
nullptr, &main_machine_),
canonical_(main_isolate()),
- broker_(main_isolate(), main_zone()) {
+ broker_(main_isolate(), main_zone()),
+ current_broker_(&broker_) {
main_graph_.SetStart(main_graph_.NewNode(common()->Start(0)));
main_graph_.SetEnd(
main_graph_.NewNode(common()->End(1), main_graph_.start()));
@@ -56,6 +57,7 @@ class JSConstantCacheTester : public HandleAndZoneScope,
private:
CanonicalHandleScope canonical_;
JSHeapBroker broker_;
+ CurrentHeapBrokerScope current_broker_;
};
@@ -193,8 +195,8 @@ TEST(HeapNumbers) {
Handle<Object> num = T.factory()->NewNumber(value);
Handle<HeapNumber> heap = T.factory()->NewHeapNumber(value);
Node* node1 = T.Constant(value);
- Node* node2 = T.Constant(MakeRef(T.broker(), num));
- Node* node3 = T.Constant(MakeRef(T.broker(), heap));
+ Node* node2 = T.Constant(MakeRef(T.broker(), num), T.broker());
+ Node* node3 = T.Constant(MakeRef(T.broker(), heap), T.broker());
CHECK_EQ(node1, node2);
CHECK_EQ(node1, node3);
}
@@ -204,20 +206,15 @@ TEST(HeapNumbers) {
TEST(OddballHandle) {
JSConstantCacheTester T;
- CHECK_EQ(
- T.UndefinedConstant(),
- T.Constant(MakeRef<Object>(T.broker(), T.factory()->undefined_value())));
- CHECK_EQ(
- T.TheHoleConstant(),
- T.Constant(MakeRef<Object>(T.broker(), T.factory()->the_hole_value())));
- CHECK_EQ(T.TrueConstant(),
- T.Constant(MakeRef<Object>(T.broker(), T.factory()->true_value())));
+ CHECK_EQ(T.UndefinedConstant(),
+ T.Constant(T.broker()->undefined_value(), T.broker()));
+ CHECK_EQ(T.TheHoleConstant(),
+ T.Constant(T.broker()->the_hole_value(), T.broker()));
+ CHECK_EQ(T.TrueConstant(), T.Constant(T.broker()->true_value(), T.broker()));
CHECK_EQ(T.FalseConstant(),
- T.Constant(MakeRef<Object>(T.broker(), T.factory()->false_value())));
- CHECK_EQ(T.NullConstant(),
- T.Constant(MakeRef<Object>(T.broker(), T.factory()->null_value())));
- CHECK_EQ(T.NaNConstant(),
- T.Constant(MakeRef<Object>(T.broker(), T.factory()->nan_value())));
+ T.Constant(T.broker()->false_value(), T.broker()));
+ CHECK_EQ(T.NullConstant(), T.Constant(T.broker()->null_value(), T.broker()));
+ CHECK_EQ(T.NaNConstant(), T.Constant(T.broker()->nan_value(), T.broker()));
}
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 8201b26680..c8eadddf9a 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -36,6 +36,7 @@ class ContextSpecializationTester : public HandleAndZoneScope {
&machine_),
reducer_(main_zone(), graph(), &tick_counter_, &js_heap_broker_),
js_heap_broker_(main_isolate(), main_zone()),
+ current_broker_(&js_heap_broker_),
spec_(&reducer_, jsgraph(), &js_heap_broker_, context,
MaybeHandle<JSFunction>()) {}
@@ -68,6 +69,7 @@ class ContextSpecializationTester : public HandleAndZoneScope {
JSGraph jsgraph_;
GraphReducer reducer_;
JSHeapBroker js_heap_broker_;
+ CurrentHeapBrokerScope current_broker_;
JSContextSpecialization spec_;
};
@@ -138,9 +140,10 @@ TEST(ReduceJSLoadContext0) {
const int slot = 5;
native->set(slot, *expected);
- Node* const_context = t.jsgraph()->Constant(MakeRef(t.broker(), native));
+ Node* const_context =
+ t.jsgraph()->Constant(MakeRef(t.broker(), native), t.broker());
Node* deep_const_context =
- t.jsgraph()->Constant(MakeRef(t.broker(), subcontext2));
+ t.jsgraph()->Constant(MakeRef(t.broker(), subcontext2), t.broker());
Node* param_context = t.graph()->NewNode(t.common()->Parameter(0), start);
{
@@ -202,7 +205,7 @@ TEST(ReduceJSLoadContext1) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
+ ScopeInfoRef empty = t.broker()->empty_scope_info();
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -272,7 +275,7 @@ TEST(ReduceJSLoadContext2) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
+ ScopeInfoRef empty = t.broker()->empty_scope_info();
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -285,7 +288,8 @@ TEST(ReduceJSLoadContext2) {
context_object0->set_extension(*slot_value0);
context_object1->set_extension(*slot_value1);
- Node* context0 = t.jsgraph()->Constant(MakeRef(t.broker(), context_object1));
+ Node* context0 =
+ t.jsgraph()->Constant(MakeRef(t.broker(), context_object1), t.broker());
Node* context1 =
t.graph()->NewNode(create_function_context, context0, start, start);
Node* context2 =
@@ -366,7 +370,7 @@ TEST(ReduceJSLoadContext3) {
Node* start = t.graph()->NewNode(t.common()->Start(2));
t.graph()->SetStart(start);
- ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
+ ScopeInfoRef empty = t.broker()->empty_scope_info();
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -441,9 +445,10 @@ TEST(ReduceJSStoreContext0) {
const int slot = 5;
native->set(slot, *expected);
- Node* const_context = t.jsgraph()->Constant(MakeRef(t.broker(), native));
+ Node* const_context =
+ t.jsgraph()->Constant(MakeRef(t.broker(), native), t.broker());
Node* deep_const_context =
- t.jsgraph()->Constant(MakeRef(t.broker(), subcontext2));
+ t.jsgraph()->Constant(MakeRef(t.broker(), subcontext2), t.broker());
Node* param_context = t.graph()->NewNode(t.common()->Parameter(0), start);
{
@@ -496,7 +501,7 @@ TEST(ReduceJSStoreContext1) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
+ ScopeInfoRef empty = t.broker()->empty_scope_info();
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -540,7 +545,7 @@ TEST(ReduceJSStoreContext2) {
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
- ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
+ ScopeInfoRef empty = t.broker()->empty_scope_info();
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
@@ -553,7 +558,8 @@ TEST(ReduceJSStoreContext2) {
context_object0->set_extension(*slot_value0);
context_object1->set_extension(*slot_value1);
- Node* context0 = t.jsgraph()->Constant(MakeRef(t.broker(), context_object1));
+ Node* context0 =
+ t.jsgraph()->Constant(MakeRef(t.broker(), context_object1), t.broker());
Node* context1 =
t.graph()->NewNode(create_function_context, context0, start, start);
Node* context2 =
@@ -606,7 +612,7 @@ TEST(ReduceJSStoreContext3) {
Node* start = t.graph()->NewNode(t.common()->Start(2));
t.graph()->SetStart(start);
- ScopeInfoRef empty = MakeRef(t.broker(), ScopeInfo::Empty(t.main_isolate()));
+ ScopeInfoRef empty = t.broker()->empty_scope_info();
const i::compiler::Operator* create_function_context =
t.javascript()->CreateFunctionContext(empty, 42, FUNCTION_SCOPE);
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 82649b59ca..0b18caeaaa 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -29,6 +29,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
isolate(main_isolate()),
canonical(isolate),
js_heap_broker(isolate, main_zone()),
+ current_broker(&js_heap_broker),
binop(nullptr),
unop(nullptr),
javascript(main_zone()),
@@ -48,6 +49,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
TickCounter tick_counter;
CanonicalHandleScope canonical;
JSHeapBroker js_heap_broker;
+ CurrentHeapBrokerScope current_broker;
const Operator* binop;
const Operator* unop;
JSOperatorBuilder javascript;
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index b10c336851..cf0def37b1 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -129,7 +129,9 @@ class ReducerTester : public HandleAndZoneScope {
void CheckFoldBinop(T expect, Node* a, Node* b) {
CHECK(binop);
Node* n = CreateBinopNode(a, b);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_NE(n, reduction.replacement());
@@ -149,7 +151,9 @@ class ReducerTester : public HandleAndZoneScope {
void CheckBinop(Node* expect, Node* a, Node* b) {
CHECK(binop);
Node* n = CreateBinopNode(a, b);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_EQ(expect, reduction.replacement());
@@ -161,7 +165,9 @@ class ReducerTester : public HandleAndZoneScope {
Node* right) {
CHECK(binop);
Node* n = CreateBinopNode(left, right);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_EQ(binop, reduction.replacement()->op());
@@ -176,7 +182,9 @@ class ReducerTester : public HandleAndZoneScope {
Node* right_expect, Node* left, Node* right) {
CHECK(binop);
Node* n = CreateBinopNode(left, right);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction r = reducer.Reduce(n);
CHECK(r.Changed());
CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
@@ -191,7 +199,9 @@ class ReducerTester : public HandleAndZoneScope {
T right_expect, Node* left, Node* right) {
CHECK(binop);
Node* n = CreateBinopNode(left, right);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction r = reducer.Reduce(n);
CHECK(r.Changed());
CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
@@ -210,7 +220,9 @@ class ReducerTester : public HandleAndZoneScope {
Node* k = Constant<T>(constant);
{
Node* n = CreateBinopNode(k, p);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed() || reduction.replacement() == n);
CHECK_EQ(p, n->InputAt(0));
@@ -218,7 +230,9 @@ class ReducerTester : public HandleAndZoneScope {
}
{
Node* n = CreateBinopNode(p, k);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed());
CHECK_EQ(p, n->InputAt(0));
@@ -234,7 +248,9 @@ class ReducerTester : public HandleAndZoneScope {
Node* p = Parameter();
Node* k = Constant<T>(constant);
Node* n = CreateBinopNode(k, p);
- MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed());
CHECK_EQ(k, n->InputAt(0));
@@ -791,7 +807,9 @@ TEST(ReduceLoadStore) {
index, R.graph.start(), R.graph.start());
{
- MachineOperatorReducer reducer(&R.graph_reducer, &R.jsgraph);
+ MachineOperatorReducer reducer(
+ &R.graph_reducer, &R.jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(load);
CHECK(!reduction.Changed()); // loads should not be reduced.
}
@@ -801,7 +819,9 @@ TEST(ReduceLoadStore) {
R.graph.NewNode(R.machine.Store(StoreRepresentation(
MachineRepresentation::kWord32, kNoWriteBarrier)),
base, index, load, load, R.graph.start());
- MachineOperatorReducer reducer(&R.graph_reducer, &R.jsgraph);
+ MachineOperatorReducer reducer(
+ &R.graph_reducer, &R.jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
Reduction reduction = reducer.Reduce(store);
CHECK(!reduction.Changed()); // stores should not be reduced.
}
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index c21ddff33f..9c65ee1582 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -120,7 +120,7 @@ Node* ToInt32(RawMachineAssembler* m, MachineType type, Node* a) {
std::shared_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
size_t code_size) {
- std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule());
+ auto module = std::make_shared<wasm::WasmModule>(wasm::kWasmOrigin);
module->num_declared_functions = 1;
// We have to add the code object to a NativeModule, because the
// WasmCallDescriptor assumes that code is on the native heap and not
@@ -185,7 +185,7 @@ void TestReturnMultipleValues(MachineType type, int min_count, int max_count) {
}
std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
- handles.main_isolate(), code->raw_instruction_size());
+ handles.main_isolate(), code->instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start =
module->AddCodeForTesting(code)->instructions().begin();
@@ -216,9 +216,9 @@ void TestReturnMultipleValues(MachineType type, int min_count, int max_count) {
}
mt.Return(ToInt32(&mt, type, ret));
#ifdef ENABLE_DISASSEMBLER
- Handle<Code> code2 = mt.GetCode();
if (v8_flags.print_code) {
StdoutStream os;
+ Handle<Code> code2 = mt.GetCode();
code2->Disassemble("multi_value_call", os, handles.main_isolate());
}
#endif
@@ -281,8 +281,8 @@ void ReturnLastValue(MachineType type) {
m.ExportForTest())
.ToHandleChecked();
- std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
- handles.main_isolate(), code->raw_instruction_size());
+ std::shared_ptr<wasm::NativeModule> module =
+ AllocateNativeModule(handles.main_isolate(), code->instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
@@ -344,8 +344,8 @@ void ReturnSumOfReturns(MachineType type) {
m.ExportForTest())
.ToHandleChecked();
- std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
- handles.main_isolate(), code->raw_instruction_size());
+ std::shared_ptr<wasm::NativeModule> module =
+ AllocateNativeModule(handles.main_isolate(), code->instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index b1e1d51d2a..38cab3452b 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -29,6 +29,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
jsgraph_(main_isolate(), main_graph_, &main_common_, &javascript_,
&main_simplified_, &main_machine_),
broker_(main_isolate(), main_zone()),
+ current_broker_(&broker_),
canonical_(main_isolate()),
changer_(&jsgraph_, &broker_, nullptr) {
Node* s = graph()->NewNode(common()->Start(num_parameters));
@@ -38,6 +39,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
JSOperatorBuilder javascript_;
JSGraph jsgraph_;
JSHeapBroker broker_;
+ CurrentHeapBrokerScope current_broker_;
CanonicalHandleScope canonical_;
RepresentationChanger changer_;
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index f445eda5b9..fa97076a1a 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -11,6 +11,7 @@
#include "src/base/overflowing-math.h"
#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/builtins/builtins.h"
#include "src/common/ptr-compr-inl.h"
#include "src/objects/objects-inl.h"
#include "src/utils/boxed-float.h"
@@ -937,12 +938,83 @@ TEST(RunInt64SubWithOverflowInBranchP) {
}
}
+TEST(RunInt64MulWithOverflowImm) {
+ int64_t actual_val = -1, expected_val = 0;
+ FOR_INT64_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
+ Node* mul = m.Int64MulWithOverflow(m.Int64Constant(i), m.Parameter(0));
+ Node* val = m.Projection(0, mul);
+ Node* ovf = m.Projection(1, mul);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ FOR_INT64_INPUTS(j) {
+ int expected_ovf = base::bits::SignedMulOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
+ Node* mul = m.Int64MulWithOverflow(m.Parameter(0), m.Int64Constant(i));
+ Node* val = m.Projection(0, mul);
+ Node* ovf = m.Projection(1, mul);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ FOR_INT64_INPUTS(j) {
+ int expected_ovf = base::bits::SignedMulOverflow64(j, i, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ FOR_INT64_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* mul =
+ m.Int64MulWithOverflow(m.Int64Constant(i), m.Int64Constant(j));
+ Node* val = m.Projection(0, mul);
+ Node* ovf = m.Projection(1, mul);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ int expected_ovf = base::bits::SignedMulOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+TEST(RunInt64MulWithOverflowInBranchP) {
+ int constant = 911999;
+ RawMachineLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int64BinopTester bt(&m);
+ Node* mul = m.Int64MulWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, mul);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Int64Constant(constant));
+ m.Bind(&blockb);
+ Node* val = m.Projection(0, mul);
+ Node* truncated = m.TruncateInt64ToInt32(val);
+ bt.AddReturn(truncated);
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int32_t expected = constant;
+ int64_t result;
+ if (!base::bits::SignedMulOverflow64(i, j, &result)) {
+ expected = static_cast<int32_t>(result);
+ }
+ CHECK_EQ(expected, static_cast<int32_t>(bt.call(i, j)));
+ }
+ }
+}
+
static int64_t RunInt64AddShift(bool is_left, int64_t add_left,
int64_t add_right, int64_t shift_left,
int64_t shift_right) {
RawMachineAssemblerTester<int64_t> m;
- Node* shift = m.Word64Shl(m.Int64Constant(4), m.Int64Constant(2));
- Node* add = m.Int64Add(m.Int64Constant(20), m.Int64Constant(22));
+ Node* shift =
+ m.Word64Shl(m.Int64Constant(shift_left), m.Int64Constant(shift_right));
+ Node* add = m.Int64Add(m.Int64Constant(add_left), m.Int64Constant(add_right));
Node* dlsa = is_left ? m.Int64Add(shift, add) : m.Int64Add(add, shift);
m.Return(dlsa);
return m.Call();
@@ -963,10 +1035,12 @@ TEST(RunInt64AddShift) {
const size_t tc_size = sizeof(tc) / sizeof(Test_case);
for (size_t i = 0; i < tc_size; ++i) {
- CHECK_EQ(58, RunInt64AddShift(false, tc[i].add_left, tc[i].add_right,
- tc[i].shift_left, tc[i].shift_right));
- CHECK_EQ(58, RunInt64AddShift(true, tc[i].add_left, tc[i].add_right,
- tc[i].shift_left, tc[i].shift_right));
+ CHECK_EQ(tc[i].expected,
+ RunInt64AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shift_right));
+ CHECK_EQ(tc[i].expected,
+ RunInt64AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shift_right));
}
}
@@ -6248,7 +6322,7 @@ TEST(RunFloat64Cos) {
m.Return(m.Float64Cos(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::cos(i), m.Call(i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(COS_IMPL(i), m.Call(i)); }
}
TEST(RunFloat64Cosh) {
@@ -6358,7 +6432,7 @@ TEST(RunFloat64Sin) {
m.Return(m.Float64Sin(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::sin(i), m.Call(i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(SIN_IMPL(i), m.Call(i)); }
}
TEST(RunFloat64Sinh) {
@@ -7461,10 +7535,10 @@ TEST(RunComputedCodeObject) {
RawMachineLabel merge;
r.Branch(r.Parameter(0), &tlabel, &flabel);
r.Bind(&tlabel);
- Node* fa = r.HeapConstant(a.GetCodeT());
+ Node* fa = r.HeapConstant(a.GetCode());
r.Goto(&merge);
r.Bind(&flabel);
- Node* fb = r.HeapConstant(b.GetCodeT());
+ Node* fb = r.HeapConstant(b.GetCode());
r.Goto(&merge);
r.Bind(&merge);
Node* phi = r.Phi(MachineRepresentation::kWord32, fa, fb);
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index cf55b622e9..1fee7933c0 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -241,8 +241,8 @@ class Int32Signature : public MachineSignature {
}
};
-Handle<CodeT> CompileGraph(const char* name, CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule = nullptr) {
+Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
OptimizedCompilationInfo info(base::ArrayVector("testing"), graph->zone(),
CodeKind::FOR_TESTING);
@@ -256,12 +256,12 @@ Handle<CodeT> CompileGraph(const char* name, CallDescriptor* call_descriptor,
code->Disassemble(name, os, isolate);
}
#endif
- return ToCodeT(code, isolate);
+ return code;
}
-Handle<CodeT> WrapWithCFunction(Handle<CodeT> inner,
- CallDescriptor* call_descriptor) {
- Zone zone(inner->GetIsolate()->allocator(), ZONE_NAME, kCompressGraphZone);
+Handle<Code> WrapWithCFunction(Isolate* isolate, Handle<Code> inner,
+ CallDescriptor* call_descriptor) {
+ Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
int param_count = static_cast<int>(call_descriptor->ParameterCount());
GraphAndBuilders caller(&zone);
{
@@ -424,7 +424,7 @@ class Computer {
CHECK_LE(num_params, kMaxParamCount);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- Handle<CodeT> inner;
+ Handle<Code> inner;
{
// Build the graph for the computation.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -439,7 +439,7 @@ class Computer {
{
// constant mode.
- Handle<CodeT> wrapper;
+ Handle<Code> wrapper;
{
// Wrap the above code with a callable function that passes constants.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -473,7 +473,7 @@ class Computer {
{
// buffer mode.
- Handle<CodeT> wrapper;
+ Handle<Code> wrapper;
{
// Wrap the above code with a callable function that loads from {input}.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -533,8 +533,8 @@ static void TestInt32Sub(CallDescriptor* desc) {
b.graph()->SetEnd(ret);
}
- Handle<CodeT> inner_code = CompileGraph("Int32Sub", desc, inner.graph());
- Handle<CodeT> wrapper = WrapWithCFunction(inner_code, desc);
+ Handle<Code> inner_code = CompileGraph("Int32Sub", desc, inner.graph());
+ Handle<Code> wrapper = WrapWithCFunction(isolate, inner_code, desc);
MachineSignature* msig = desc->GetMachineSignature(&zone);
CodeRunner<int32_t> runnable(isolate, wrapper,
CSignature::FromMachine(&zone, msig));
@@ -556,7 +556,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
int32_t output[kNumParams];
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- Handle<CodeT> inner;
+ Handle<Code> inner;
{
// Writes all parameters into the output buffer.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -573,7 +573,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
}
CSignatureOf<int32_t> csig;
- Handle<CodeT> wrapper;
+ Handle<Code> wrapper;
{
// Loads parameters from the input buffer and calls the above code.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
@@ -582,7 +582,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
RawMachineAssembler raw(isolate, &graph, cdesc);
Node* base = raw.PointerConstant(input);
Node* target = raw.HeapConstant(inner);
- Node** inputs = zone.NewArray<Node*>(kNumParams + 1);
+ Node** inputs = zone.NewArray<Node*>(JSParameterCount(kNumParams));
int input_count = 0;
inputs[input_count++] = target;
for (int i = 0; i < kNumParams; i++) {
@@ -939,7 +939,7 @@ TEST(Float64Select_stack_params_return_reg) {
template <typename CType, int which>
static void Build_Select_With_Call(CallDescriptor* desc,
RawMachineAssembler* raw) {
- Handle<CodeT> inner;
+ Handle<Code> inner;
int num_params = ParamCount(desc);
CHECK_LE(num_params, kMaxParamCount);
{
@@ -951,7 +951,7 @@ static void Build_Select_With_Call(CallDescriptor* desc,
r.Return(r.Parameter(which));
inner = CompileGraph("Select-indirection", desc, &graph, r.ExportForTest());
CHECK(!inner.is_null());
- CHECK(inner->IsCodeT());
+ CHECK(inner->IsCode());
}
{
@@ -1038,7 +1038,7 @@ void MixedParamTest(int start) {
MachineSignature* sig = builder.Build();
CallDescriptor* desc = config.Create(&zone, sig);
- Handle<CodeT> select;
+ Handle<Code> select;
{
// build the select.
Zone select_zone(&allocator, ZONE_NAME, kCompressGraphZone);
@@ -1050,7 +1050,7 @@ void MixedParamTest(int start) {
{
// call the select.
- Handle<CodeT> wrapper;
+ Handle<Code> wrapper;
int32_t expected_ret;
char bytes[kDoubleSize];
alignas(8) char output[kDoubleSize];
@@ -1157,7 +1157,7 @@ void TestStackSlot(MachineType slot_type, T expected) {
// Create inner function g. g has lots of parameters so that they are passed
// over the stack.
- Handle<CodeT> inner;
+ Handle<Code> inner;
Graph graph(&zone);
RawMachineAssembler g(isolate, &graph, desc);
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 578c5b25b9..89075df06b 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -31,7 +31,6 @@
V(InvalidatedSlotsResetObjectRegression) \
V(InvalidatedSlotsRightTrimFixedArray) \
V(InvalidatedSlotsRightTrimLargeFixedArray) \
- V(InvalidatedSlotsLeftTrimFixedArray) \
V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index ac422f37ea..53f2100c9d 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -264,7 +264,7 @@ void FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
- DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
+ DCHECK_IMPLIES(!space->heap()->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
CHECK(space_remaining >= extra_bytes);
@@ -292,7 +292,6 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
// If minor incremental marking is running, we need to finalize it first
// because of the AdvanceForTesting call in this function which is currently
// only possible for MajorMC.
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kFinalizeMinorMC);
}
@@ -334,7 +333,6 @@ void AbandonCurrentlyFreeMemory(PagedSpace* space) {
}
void GcAndSweep(Heap* heap, AllocationSpace space) {
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap->sweeping_in_progress()) {
IsolateSafepointScope scope(heap);
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
index d1dffd3fe2..abaa10c7ca 100644
--- a/deps/v8/test/cctest/heap/heap-utils.h
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -8,29 +8,7 @@
#include "src/api/api-inl.h"
#include "src/heap/heap.h"
-namespace v8 {
-namespace internal {
-namespace heap {
-
-START_ALLOW_USE_DEPRECATED()
-
-class V8_NODISCARD TemporaryEmbedderHeapTracerScope {
- public:
- TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
- v8::EmbedderHeapTracer* tracer)
- : isolate_(isolate) {
- isolate_->SetEmbedderHeapTracer(tracer);
- }
-
- ~TemporaryEmbedderHeapTracerScope() {
- isolate_->SetEmbedderHeapTracer(nullptr);
- }
-
- private:
- v8::Isolate* const isolate_;
-};
-
-END_ALLOW_USE_DEPRECATED()
+namespace v8::internal::heap {
void SealCurrentObjects(Heap* heap);
@@ -91,8 +69,6 @@ bool InCorrectGeneration(v8::Isolate* isolate,
return InCorrectGeneration(*v8::Utils::OpenHandle(*tmp));
}
-} // namespace heap
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::heap
#endif // HEAP_HEAP_UTILS_H_
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 763b25e3dd..a6facdd101 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -57,6 +57,8 @@ TEST(ArrayBuffer_OnlyMC) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
ArrayBufferExtension* extension;
{
@@ -84,6 +86,8 @@ TEST(ArrayBuffer_OnlyScavenge) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
ArrayBufferExtension* extension;
{
@@ -110,6 +114,8 @@ TEST(ArrayBuffer_ScavengeAndMC) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
ArrayBufferExtension* extension;
{
@@ -217,6 +223,8 @@ TEST(ArrayBuffer_NonLivePromotion) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
{
v8::HandleScope handle_scope(isolate);
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
index 5c4ba35974..846d2bf6dc 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -132,7 +132,14 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
+#ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
v8_flags.max_old_space_size = 4;
+#else
+ // With CSS, it is expected that the GCs triggered by concurrent allocation
+ // will reclaim less memory. If this test fails, this limit should probably
+ // be further increased.
+ v8_flags.max_old_space_size = 10;
+#endif
v8_flags.stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
@@ -144,8 +151,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
const int kThreads = 4;
{
- ScanStackModeScopeForTesting no_stack_scanning(i_isolate->heap(),
- Heap::ScanStackMode::kNone);
ParkedScope scope(i_isolate->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
@@ -164,9 +169,17 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
+#ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
v8_flags.max_old_space_size = 4;
+#else
+ // With CSS, it is expected that the GCs triggered by concurrent allocation
+ // will reclaim less memory. If this test fails, this limit should probably
+ // be further increased.
+ v8_flags.max_old_space_size = 10;
+#endif
v8_flags.stress_concurrent_allocation = false;
v8_flags.incremental_marking = false;
+ i::FlagList::EnforceFlagImplications();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -177,9 +190,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
const int kThreads = 4;
{
- ScanStackModeScopeForTesting no_stack_scanning(i_isolate->heap(),
- Heap::ScanStackMode::kNone);
-
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
@@ -204,9 +214,17 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
}
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
+#ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
v8_flags.max_old_space_size = 4;
+#else
+ // With CSS, it is expected that the GCs triggered by concurrent allocation
+ // will reclaim less memory. If this test fails, this limit should probably
+ // be further increased.
+ v8_flags.max_old_space_size = 10;
+#endif
v8_flags.stress_concurrent_allocation = false;
v8_flags.incremental_marking = false;
+ i::FlagList::EnforceFlagImplications();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -217,9 +235,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
const int kThreads = 4;
{
- ScanStackModeScopeForTesting no_stack_scanning(i_isolate->heap(),
- Heap::ScanStackMode::kNone);
-
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
@@ -385,9 +400,9 @@ UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
HeapObject object = HeapObject::FromAddress(address);
if (i < kWhiteIterations * kObjectsAllocatedPerIteration) {
- CHECK(heap->marking_state()->IsWhite(object));
+ CHECK(heap->marking_state()->IsUnmarked(object));
} else {
- CHECK(heap->marking_state()->IsBlack(object));
+ CHECK(heap->marking_state()->IsMarked(object));
}
}
@@ -441,7 +456,7 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
}
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
- CHECK(heap->marking_state()->IsWhite(value));
+ CHECK(heap->marking_state()->IsUnmarked(value));
auto thread =
std::make_unique<ConcurrentWriteBarrierThread>(heap, fixed_array, value);
@@ -468,7 +483,7 @@ class ConcurrentRecordRelocSlotThread final : public v8::base::Thread {
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap);
- // Modification of Code object requires write access.
+ // Modification of InstructionStream object requires write access.
RwxMemoryWriteScopeForTesting rwx_write_scope;
int mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(code_, mode_mask); !it.done(); it.next()) {
@@ -527,7 +542,7 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
}
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
- CHECK(heap->marking_state()->IsWhite(value));
+ CHECK(heap->marking_state()->IsUnmarked(value));
{
// TODO(v8:13023): remove ResetPKUPermissionsForThreadSpawning in the
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index e57fa68f37..280ecb11aa 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -5,102 +5,11 @@
#include <stdlib.h>
#include "src/heap/concurrent-marking.h"
-#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/marking-worklist-inl.h"
-#include "src/heap/marking-worklist.h"
-#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
-namespace v8 {
-namespace internal {
-namespace heap {
-
-void PublishSegment(MarkingWorklist& worklist, HeapObject object) {
- MarkingWorklist::Local local(worklist);
- for (size_t i = 0; i < MarkingWorklist::kMinSegmentSizeForTesting; i++) {
- local.Push(object);
- }
- local.Publish();
-}
-
-TEST(ConcurrentMarking) {
- if (!i::v8_flags.concurrent_marking) return;
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- CcTest::CollectAllGarbage();
- if (!heap->incremental_marking()->IsStopped()) return;
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
-
- WeakObjects weak_objects;
- ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &weak_objects);
- MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
- PublishSegment(*collector->marking_worklists()->shared(),
- ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
- concurrent_marking->Join();
- delete concurrent_marking;
-}
-
-TEST(ConcurrentMarkingReschedule) {
- if (!i::v8_flags.concurrent_marking) return;
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- CcTest::CollectAllGarbage();
- if (!heap->incremental_marking()->IsStopped()) return;
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
-
- WeakObjects weak_objects;
- ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &weak_objects);
- MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
- PublishSegment(*collector->marking_worklists()->shared(),
- ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
- concurrent_marking->Join();
- PublishSegment(*collector->marking_worklists()->shared(),
- ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->RescheduleJobIfNeeded(GarbageCollector::MARK_COMPACTOR);
- concurrent_marking->Join();
- delete concurrent_marking;
-}
-
-TEST(ConcurrentMarkingPreemptAndReschedule) {
- if (!i::v8_flags.concurrent_marking) return;
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- CcTest::CollectAllGarbage();
- if (!heap->incremental_marking()->IsStopped()) return;
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
-
- WeakObjects weak_objects;
- ConcurrentMarking* concurrent_marking =
- new ConcurrentMarking(heap, &weak_objects);
- MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
- for (int i = 0; i < 5000; i++)
- PublishSegment(*collector->marking_worklists()->shared(),
- ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->ScheduleJob(GarbageCollector::MARK_COMPACTOR);
- concurrent_marking->Pause();
- for (int i = 0; i < 5000; i++)
- PublishSegment(*collector->marking_worklists()->shared(),
- ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->RescheduleJobIfNeeded(GarbageCollector::MARK_COMPACTOR);
- concurrent_marking->Join();
- delete concurrent_marking;
-}
+namespace v8::internal::heap {
TEST(ConcurrentMarkingMarkedBytes) {
if (!v8_flags.incremental_marking) return;
@@ -150,6 +59,4 @@ UNINITIALIZED_TEST(ConcurrentMarkingStoppedOnTeardown) {
isolate->Dispose();
}
-} // namespace heap
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::heap
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
index df32d6ec4a..d9b240a945 100644
--- a/deps/v8/test/cctest/heap/test-external-string-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -75,6 +75,8 @@ TEST(ExternalString_ExternalBackingStoreSizeDecreases) {
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
+
const size_t backing_store_before =
heap->old_space()->ExternalBackingStoreBytes(type);
@@ -104,6 +106,8 @@ TEST(ExternalString_ExternalBackingStoreSizeIncreasesMarkCompact) {
heap::AbandonCurrentlyFreeMemory(heap->old_space());
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
+
const size_t backing_store_before =
heap->old_space()->ExternalBackingStoreBytes(type);
@@ -139,6 +143,8 @@ TEST(ExternalString_ExternalBackingStoreSizeIncreasesAfterExternalization) {
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
size_t old_backing_store_before = 0, new_backing_store_before = 0;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
+
{
v8::HandleScope handle_scope(isolate);
@@ -166,9 +172,9 @@ TEST(ExternalString_ExternalBackingStoreSizeIncreasesAfterExternalization) {
}
heap::GcAndSweep(heap, OLD_SPACE);
-
- CHECK_EQ(0, heap->old_space()->ExternalBackingStoreBytes(type) -
- old_backing_store_before);
+ const size_t backing_store_after =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+ CHECK_EQ(0, backing_store_after - old_backing_store_before);
}
TEST(ExternalString_PromotedThinString) {
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 46516735d2..ad00fc13d6 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -34,6 +34,7 @@
#include "src/base/strings.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/script-details.h"
#include "src/common/globals.h"
@@ -200,8 +201,8 @@ void CheckEmbeddedObjectsAreEqual(Isolate* isolate, Handle<Code> lhs,
CHECK(lhs_it.done() == rhs_it.done());
}
-static void CheckFindCodeObject(Isolate* isolate) {
- // Test FindCodeObject
+static void CheckGcSafeFindCodeForInnerPointer(Isolate* isolate) {
+ // Test GcSafeFindCodeForInnerPointer
#define __ assm.
Assembler assm(AssemblerOptions{});
@@ -212,28 +213,33 @@ static void CheckFindCodeObject(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- CHECK(code->IsCode(cage_base));
+ Handle<InstructionStream> code(
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
+ .Build()
+ ->instruction_stream(),
+ isolate);
+ CHECK(code->IsInstructionStream(cage_base));
HeapObject obj = HeapObject::cast(*code);
Address obj_addr = obj.address();
for (int i = 0; i < obj.Size(cage_base); i += kTaggedSize) {
- CodeLookupResult lookup_result = isolate->FindCodeObject(obj_addr + i);
- CHECK_EQ(*code, lookup_result.code());
+ Code lookup_result = isolate->heap()->FindCodeForInnerPointer(obj_addr + i);
+ CHECK_EQ(*code, lookup_result.instruction_stream());
}
- Handle<Code> copy =
- Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ Handle<InstructionStream> copy(
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
+ .Build()
+ ->instruction_stream(),
+ isolate);
HeapObject obj_copy = HeapObject::cast(*copy);
- CodeLookupResult not_right = isolate->FindCodeObject(
+ Code not_right = isolate->heap()->FindCodeForInnerPointer(
obj_copy.address() + obj_copy.Size(cage_base) / 2);
- CHECK_NE(not_right.code(), *code);
- CHECK_EQ(not_right.code(), *copy);
+ CHECK_NE(not_right.instruction_stream(), *code);
+ CHECK_EQ(not_right.instruction_stream(), *copy);
}
-
TEST(HandleNull) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -325,7 +331,7 @@ TEST(HeapObjects) {
// Check ToString for Numbers
CheckNumber(isolate, 1.1, "1.1");
- CheckFindCodeObject(isolate);
+ CheckGcSafeFindCodeForInnerPointer(isolate);
}
TEST(Tagging) {
@@ -498,6 +504,8 @@ TEST(WeakGlobalUnmodifiedApiHandlesScavenge) {
LocalContext context;
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
WeakPointerCleared = false;
@@ -537,6 +545,8 @@ TEST(WeakGlobalHandlesMark) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
WeakPointerCleared = false;
@@ -578,6 +588,8 @@ TEST(DeleteWeakGlobalHandle) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
WeakPointerCleared = false;
Handle<Object> h;
@@ -1021,10 +1033,10 @@ static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
for (int i = 0; i < size; i++) {
- // V8_EXTERNAL_CODE_SPACE specific: we might be comparing Code object
- // with non-Code object here and it might produce false positives because
- // operator== for tagged values compares only lower 32 bits when pointer
- // compression is enabled.
+ // V8_EXTERNAL_CODE_SPACE specific: we might be comparing
+ // InstructionStream object with non-InstructionStream object here and it
+ // might produce false positives because operator== for tagged values
+ // compares only lower 32 bits when pointer compression is enabled.
if (objs[i]->ptr() == obj.ptr()) {
found_count++;
}
@@ -1073,11 +1085,11 @@ TEST(Iteration) {
}
TEST(TestBytecodeFlushing) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
v8_flags.turbofan = false;
v8_flags.always_turbofan = false;
i::v8_flags.optimize_for_size = false;
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
#if ENABLE_SPARKPLUG
v8_flags.always_sparkplug = false;
#endif // ENABLE_SPARKPLUG
@@ -1088,6 +1100,8 @@ TEST(TestBytecodeFlushing) {
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
Factory* factory = i_isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
{
v8::HandleScope scope(isolate);
@@ -1136,14 +1150,17 @@ TEST(TestBytecodeFlushing) {
}
}
-TEST(TestMultiReferencedBytecodeFlushing) {
-#ifndef V8_LITE_MODE
+static void TestMultiReferencedBytecodeFlushing(bool sparkplug_compile) {
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
v8_flags.turbofan = false;
v8_flags.always_turbofan = false;
i::v8_flags.optimize_for_size = false;
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
#if ENABLE_SPARKPLUG
v8_flags.always_sparkplug = false;
+ v8_flags.flush_baseline_code = true;
+#else
+ if (sparkplug_compile) return;
#endif // ENABLE_SPARKPLUG
i::v8_flags.flush_bytecode = true;
i::v8_flags.allow_natives_syntax = true;
@@ -1152,6 +1169,8 @@ TEST(TestMultiReferencedBytecodeFlushing) {
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
Factory* factory = i_isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
{
v8::HandleScope scope(isolate);
@@ -1184,6 +1203,13 @@ TEST(TestMultiReferencedBytecodeFlushing) {
Handle<SharedFunctionInfo> copy =
i_isolate->factory()->CloneSharedFunctionInfo(shared);
+ if (sparkplug_compile) {
+ v8::HandleScope baseline_compilation_scope(isolate);
+ IsCompiledScope is_compiled_scope = copy->is_compiled_scope(i_isolate);
+ Compiler::CompileSharedWithBaseline(
+ i_isolate, copy, Compiler::CLEAR_EXCEPTION, &is_compiled_scope);
+ }
+
// Simulate several GCs that use full marking.
const int kAgingThreshold = 7;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -1202,12 +1228,22 @@ TEST(TestMultiReferencedBytecodeFlushing) {
}
}
+TEST(TestMultiReferencedBytecodeFlushing) {
+ TestMultiReferencedBytecodeFlushing(/*sparkplug_compile=*/false);
+}
+
+TEST(TestMultiReferencedBytecodeFlushingWithSparkplug) {
+ TestMultiReferencedBytecodeFlushing(/*sparkplug_compile=*/true);
+}
+
HEAP_TEST(Regress10560) {
i::v8_flags.flush_bytecode = true;
i::v8_flags.allow_natives_syntax = true;
// Disable flags that allocate a feedback vector eagerly.
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
i::v8_flags.turbofan = false;
i::v8_flags.always_turbofan = false;
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
#if ENABLE_SPARKPLUG
v8_flags.always_sparkplug = false;
#endif // ENABLE_SPARKPLUG
@@ -1306,6 +1342,7 @@ UNINITIALIZED_TEST(Regress10843) {
for (int i = 0; i < 100; i++) {
arrays.push_back(factory->NewFixedArray(10000));
}
+ CcTest::CollectAllGarbage(i_isolate);
CHECK(callback_was_invoked);
}
isolate->Dispose();
@@ -1376,8 +1413,7 @@ UNINITIALIZED_TEST(Regress12777) {
isolate->Dispose();
}
-#ifndef V8_LITE_MODE
-
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
if (v8_flags.single_generation) return;
v8_flags.turbofan = true;
@@ -1394,6 +1430,8 @@ TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8::HandleScope outer_scope(CcTest::isolate());
const char* source =
"function foo() {"
@@ -1460,8 +1498,7 @@ TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
CHECK(function->shared().is_compiled());
CHECK(function->is_compiled());
}
-
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
if (!v8_flags.incremental_marking) return;
@@ -1519,6 +1556,8 @@ void CompilationCacheCachingBehavior(bool retain_script) {
Factory* factory = isolate->factory();
CompilationCache* compilation_cache = isolate->compilation_cache();
LanguageMode language_mode = LanguageMode::kSloppy;
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8::HandleScope outer_scope(CcTest::isolate());
const char* raw_source = retain_script ? "function foo() {"
@@ -1625,6 +1664,8 @@ void CompilationCacheRegeneration(bool retain_root_sfi, bool flush_root_sfi,
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
const char* source =
"({"
@@ -1859,6 +1900,8 @@ TEST(TestInternalWeakLists) {
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
if (!isolate->use_optimizer()) return;
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
CHECK_EQ(0, CountNativeContexts());
@@ -1944,6 +1987,7 @@ TEST(TestSizeOfRegExpCode) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
HandleScope scope(isolate);
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
LocalContext context;
@@ -1998,6 +2042,8 @@ HEAP_TEST(TestSizeOfObjects) {
v8_flags.stress_concurrent_allocation = false;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
// Disable LAB, such that calculations with SizeOfObjects() and object size
// are correct.
heap->DisableInlineAllocation();
@@ -2307,6 +2353,8 @@ static int NumberOfGlobalObjects() {
// Test that we don't embed maps from foreign contexts into
// optimized code.
TEST(LeakNativeContextViaMap) {
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8_flags.allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
@@ -2357,6 +2405,8 @@ TEST(LeakNativeContextViaMap) {
// Test that we don't embed functions from foreign contexts into
// optimized code.
TEST(LeakNativeContextViaFunction) {
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8_flags.allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
@@ -2405,6 +2455,8 @@ TEST(LeakNativeContextViaFunction) {
TEST(LeakNativeContextViaMapKeyed) {
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8_flags.allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
@@ -2453,6 +2505,8 @@ TEST(LeakNativeContextViaMapKeyed) {
TEST(LeakNativeContextViaMapProto) {
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8_flags.allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
@@ -2550,7 +2604,7 @@ TEST(InstanceOfStubWriteBarrier) {
MarkingState* marking_state = CcTest::heap()->marking_state();
const double kStepSizeInMs = 100;
- while (!marking_state->IsBlack(f->code())) {
+ while (!marking_state->IsMarked(f->code())) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
CHECK(!marking->IsMajorMarkingComplete());
@@ -2619,10 +2673,9 @@ HEAP_TEST(Regress845060) {
Local<Value> str = CompileRun("var str = (new Array(10000)).join('x'); str");
CHECK(Heap::InYoungGeneration(*v8::Utils::OpenHandle(*str)));
- // Idle incremental marking sets the "kReduceMemoryFootprint" flag, which
- // causes from_space to be unmapped after scavenging.
- heap->StartIdleIncrementalMarking(GarbageCollectionReason::kTesting);
- CHECK(heap->ShouldReduceMemory());
+ // Use kReduceMemoryFootprintMask to unmap from space after scavenging.
+ heap->StartIncrementalMarking(i::Heap::kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kTesting);
// Run the test (which allocates results) until the original string was
// promoted to old space. Unmapping of from_space causes accesses to any
@@ -2652,10 +2705,12 @@ TEST(IdleNotificationFinishMarking) {
// The next idle notification has to finish incremental marking.
const double kLongIdleTime = 1000.0;
+ START_ALLOW_USE_DEPRECATED();
CcTest::isolate()->IdleNotificationDeadline(
(v8::base::TimeTicks::Now().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
kLongIdleTime);
+ END_ALLOW_USE_DEPRECATED();
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count + 1);
}
@@ -3113,13 +3168,15 @@ TEST(Regress1465) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> ctx = isolate->GetCurrentContext();
static const int transitions_count = 256;
CompileRun("function F() {}");
{
- AlwaysAllocateScopeForTesting always_allocate(CcTest::i_isolate()->heap());
+ AlwaysAllocateScopeForTesting always_allocate(heap);
for (int i = 0; i < transitions_count; i++) {
base::EmbeddedVector<char, 64> buffer;
base::SNPrintF(buffer, "var o = new F; o.prop%d = %d;", i, i);
@@ -3137,7 +3194,7 @@ TEST(Regress1465) {
CompileRun("%DebugPrint(root);");
CHECK_EQ(transitions_count, transitions_before);
- heap::SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(heap);
CcTest::CollectAllGarbage();
// Count number of live transitions after marking. Note that one transition
@@ -3274,10 +3331,10 @@ TEST(ReleaseOverReservedPages) {
if (!v8_flags.compact) return;
v8_flags.trace_gc = true;
// The optimizer can allocate stuff, messing up the test.
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
v8_flags.turbofan = false;
v8_flags.always_turbofan = false;
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
// - Parallel compaction increases fragmentation, depending on how existing
// memory is distributed. Since this is non-deterministic because of
// concurrent sweeping, we disable it for this test.
@@ -3610,6 +3667,9 @@ void ReleaseStackTraceDataTest(v8::Isolate* isolate, const char* source,
// resource's callback is fired when the external string is GC'ed.
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::HandleScope scope(isolate);
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
+
SourceResource* resource = new SourceResource(i::StrDup(source));
{
v8::HandleScope new_scope(isolate);
@@ -3811,9 +3871,9 @@ TEST(DetailedErrorStackTraceBuiltinExit) {
TEST(Regress169928) {
v8_flags.allow_natives_syntax = true;
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
v8_flags.turbofan = false;
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
LocalContext env;
@@ -4110,6 +4170,8 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
GlobalHandles* global_handles = isolate->global_handles();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
if (!isolate->use_optimizer()) return;
@@ -4148,7 +4210,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
CHECK_EQ(dependency.length(), DependentCode::kSlotsPerEntry);
MaybeObject code = dependency.Get(0 + DependentCode::kCodeSlotOffset);
CHECK(code->IsWeak());
- CHECK_EQ(bar_handle->code(), CodeT::cast(code->GetHeapObjectAssumeWeak()));
+ CHECK_EQ(bar_handle->code(), Code::cast(code->GetHeapObjectAssumeWeak()));
Smi groups = dependency.Get(0 + DependentCode::kGroupsSlotOffset).ToSmi();
CHECK_EQ(static_cast<DependentCode::DependencyGroups>(groups.value()),
DependentCode::kAllocationSiteTransitionChangedGroup |
@@ -4274,6 +4336,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
@@ -4301,7 +4364,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- code = handle(FromCodeT(bar->code()), isolate);
+ code = handle(bar->code(), isolate);
code = scope.CloseAndEscape(code);
}
@@ -4321,6 +4384,8 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
@@ -4346,7 +4411,7 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- code = handle(FromCodeT(bar->code()), isolate);
+ code = handle(bar->code(), isolate);
code = scope.CloseAndEscape(code);
}
@@ -4366,6 +4431,8 @@ TEST(NewSpaceObjectsInOptimizedCode) {
v8_flags.allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
if (!isolate->use_optimizer()) return;
HandleScope outer_scope(isolate);
@@ -4410,7 +4477,7 @@ TEST(NewSpaceObjectsInOptimizedCode) {
HeapVerifier::VerifyHeap(CcTest::heap());
#endif
CHECK(!bar->code().marked_for_deoptimization());
- code = handle(FromCodeT(bar->code()), isolate);
+ code = handle(bar->code(), isolate);
code = scope.CloseAndEscape(code);
}
@@ -4429,6 +4496,8 @@ TEST(ObjectsInEagerlyDeoptimizedCodeAreWeak) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
@@ -4455,7 +4524,7 @@ TEST(ObjectsInEagerlyDeoptimizedCodeAreWeak) {
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- code = handle(FromCodeT(bar->code()), isolate);
+ code = handle(bar->code(), isolate);
code = scope.CloseAndEscape(code);
}
@@ -4470,97 +4539,7 @@ TEST(ObjectsInEagerlyDeoptimizedCodeAreWeak) {
CHECK(code->embedded_objects_cleared());
}
-static Handle<JSFunction> OptimizeDummyFunction(v8::Isolate* isolate,
- const char* name) {
- base::EmbeddedVector<char, 256> source;
- base::SNPrintF(source,
- "function %s() { return 0; }"
- "%%PrepareFunctionForOptimization(%s);"
- "%s(); %s();"
- "%%OptimizeFunctionOnNextCall(%s);"
- "%s();",
- name, name, name, name, name, name);
- CompileRun(source.begin());
- i::Handle<JSFunction> fun = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- CcTest::global()
- ->Get(isolate->GetCurrentContext(), v8_str(name))
- .ToLocalChecked())));
- return fun;
-}
-
-static int GetCodeChainLength(Code code) {
- int result = 0;
- while (code.next_code_link().IsCodeT()) {
- result++;
- code = FromCodeT(CodeT::cast(code.next_code_link()));
- }
- return result;
-}
-
-
-TEST(NextCodeLinkIsWeak) {
- v8_flags.always_turbofan = false;
- v8_flags.allow_natives_syntax = true;
- v8_flags.stress_concurrent_inlining =
- false; // Test needs deterministic timing.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::internal::Heap* heap = CcTest::heap();
-
- if (!isolate->use_optimizer()) return;
- HandleScope outer_scope(heap->isolate());
- Handle<Code> code;
- CcTest::CollectAllAvailableGarbage();
- int code_chain_length_before, code_chain_length_after;
- {
- HandleScope scope(heap->isolate());
- Handle<JSFunction> mortal =
- OptimizeDummyFunction(CcTest::isolate(), "mortal");
- Handle<JSFunction> immortal =
- OptimizeDummyFunction(CcTest::isolate(), "immortal");
- CHECK_EQ(immortal->code().next_code_link(), mortal->code());
- code_chain_length_before = GetCodeChainLength(FromCodeT(immortal->code()));
- // Keep the immortal code and let the mortal code die.
- code = handle(FromCodeT(immortal->code()), isolate);
- code = scope.CloseAndEscape(code);
- CompileRun("mortal = null; immortal = null;");
- }
- CcTest::CollectAllAvailableGarbage();
- // Now mortal code should be dead.
- code_chain_length_after = GetCodeChainLength(*code);
- CHECK_EQ(code_chain_length_before - 1, code_chain_length_after);
-}
-
-TEST(NextCodeLinkInCodeDataContainerIsCleared) {
- v8_flags.always_turbofan = false;
- v8_flags.allow_natives_syntax = true;
- v8_flags.stress_concurrent_inlining =
- false; // Test needs deterministic timing.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::internal::Heap* heap = CcTest::heap();
-
- if (!isolate->use_optimizer()) return;
- HandleScope outer_scope(heap->isolate());
- Handle<CodeDataContainer> code_data_container;
- {
- HandleScope scope(heap->isolate());
- Handle<JSFunction> mortal1 =
- OptimizeDummyFunction(CcTest::isolate(), "mortal1");
- Handle<JSFunction> mortal2 =
- OptimizeDummyFunction(CcTest::isolate(), "mortal2");
- CHECK_EQ(mortal2->code().next_code_link(), mortal1->code());
- code_data_container =
- handle(CodeDataContainerFromCodeT(mortal2->code()), isolate);
- code_data_container = scope.CloseAndEscape(code_data_container);
- CompileRun("mortal1 = null; mortal2 = null;");
- }
- CcTest::CollectAllAvailableGarbage();
- CHECK(code_data_container->next_code_link().IsUndefined(isolate));
-}
-
-static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
+static Handle<InstructionStream> DummyOptimizedCode(Isolate* isolate) {
i::byte buffer[i::Assembler::kDefaultBufferSize];
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
@@ -4576,44 +4555,16 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
#endif
masm.Drop(2);
masm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::TURBOFAN)
- .set_self_reference(masm.CodeObject())
- .Build();
- CHECK(code->IsCode());
+ Handle<InstructionStream> code(
+ Factory::CodeBuilder(isolate, desc, CodeKind::TURBOFAN)
+ .set_self_reference(masm.CodeObject())
+ .Build()
+ ->instruction_stream(),
+ isolate);
+ CHECK(code->IsInstructionStream());
return code;
}
-
-TEST(NextCodeLinkIsWeak2) {
- v8_flags.allow_natives_syntax = true;
- v8_flags.stress_concurrent_inlining =
- false; // Test needs deterministic timing.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::internal::Heap* heap = CcTest::heap();
-
- if (!isolate->use_optimizer()) return;
- HandleScope outer_scope(heap->isolate());
- CcTest::CollectAllAvailableGarbage();
- Handle<NativeContext> context(
- NativeContext::cast(heap->native_contexts_list()), isolate);
- Handle<Code> new_head;
- Handle<Object> old_head(context->get(Context::OPTIMIZED_CODE_LIST), isolate);
- {
- HandleScope scope(heap->isolate());
- Handle<Code> immortal = DummyOptimizedCode(isolate);
- Handle<Code> mortal = DummyOptimizedCode(isolate);
- mortal->set_next_code_link(*old_head);
- immortal->set_next_code_link(ToCodeT(*mortal));
- context->SetOptimizedCodeListHead(ToCodeT(*immortal));
- new_head = scope.CloseAndEscape(immortal);
- }
- CcTest::CollectAllAvailableGarbage();
- // Now mortal code should be dead.
- CHECK_EQ(*old_head, new_head->next_code_link());
-}
-
-
static bool weak_ic_cleared = false;
static void ClearWeakIC(
@@ -4633,6 +4584,8 @@ TEST(WeakFunctionInConstructor) {
v8::Isolate* isolate = CcTest::isolate();
LocalContext env;
v8::HandleScope scope(isolate);
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
CompileRun(
"function createObj(obj) {"
" return new obj();"
@@ -4694,6 +4647,8 @@ void CheckWeakness(const char* source) {
v8_flags.allow_natives_syntax = true;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
LocalContext env;
v8::HandleScope scope(isolate);
v8::Persistent<v8::Object> garbage;
@@ -5250,6 +5205,8 @@ TEST(Regress3877) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
HandleScope scope(isolate);
CompileRun("function cls() { this.x = 10; }");
Handle<WeakFixedArray> weak_prototype_holder = factory->NewWeakFixedArray(1);
@@ -5297,6 +5254,8 @@ void CheckMapRetainingFor(int n) {
v8_flags.retain_maps_for_n_gc = n;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8::Local<v8::Context> ctx = v8::Context::New(CcTest::isolate());
Handle<Context> context = Utils::OpenHandle(*ctx);
CHECK(context->IsNativeContext());
@@ -5371,7 +5330,7 @@ TEST(PreprocessStackTrace) {
Object::GetProperty(isolate, exception, key).ToHandleChecked();
Handle<Object> code =
Object::GetElement(isolate, stack_trace, 3).ToHandleChecked();
- CHECK(code->IsCode());
+ CHECK(code->IsInstructionStream());
CcTest::CollectAllAvailableGarbage();
@@ -5384,7 +5343,7 @@ TEST(PreprocessStackTrace) {
for (int i = 0; i < array_length; i++) {
Handle<Object> element =
Object::GetElement(isolate, stack_trace, i).ToHandleChecked();
- CHECK(!element->IsCode());
+ CHECK(!element->IsInstructionStream());
}
}
@@ -5614,7 +5573,7 @@ bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
bool result = heap->code_space()->EnsureAllocation(
size_in_bytes, AllocationAlignment::kTaggedAligned,
AllocationOrigin::kRuntime, nullptr);
- heap->code_space()->UpdateInlineAllocationLimit(0);
+ heap->code_space()->UpdateInlineAllocationLimit();
return result;
}
@@ -5820,10 +5779,10 @@ TEST(Regress598319) {
CHECK(heap->lo_space()->Contains(arr.get()));
IncrementalMarking* marking = heap->incremental_marking();
MarkingState* marking_state = heap->marking_state();
- CHECK(marking_state->IsWhite(arr.get()));
+ CHECK(marking_state->IsUnmarked(arr.get()));
for (int i = 0; i < arr.get().length(); i++) {
HeapObject arr_value = HeapObject::cast(arr.get().get(i));
- CHECK(marking_state->IsWhite(arr_value));
+ CHECK(marking_state->IsUnmarked(arr_value));
}
// Start incremental marking.
@@ -5837,7 +5796,7 @@ TEST(Regress598319) {
// Check that we have not marked the interesting array during root scanning.
for (int i = 0; i < arr.get().length(); i++) {
HeapObject arr_value = HeapObject::cast(arr.get().get(i));
- CHECK(marking_state->IsWhite(arr_value));
+ CHECK(marking_state->IsUnmarked(arr_value));
}
// Now we search for a state where we are in incremental marking and have
@@ -5874,7 +5833,7 @@ TEST(Regress598319) {
// progress bar, we would fail here.
for (int i = 0; i < arr.get().length(); i++) {
HeapObject arr_value = HeapObject::cast(arr.get().get(i));
- CHECK(marking_state->IsBlack(arr_value));
+ CHECK(arr_value.InReadOnlySpace() || marking_state->IsMarked(arr_value));
}
}
@@ -6022,115 +5981,6 @@ TEST(Regress631969) {
}
}
-TEST(LeftTrimFixedArrayInBlackArea) {
- if (!v8_flags.incremental_marking) return;
- v8_flags.stress_concurrent_allocation = false; // For SimulateFullSpace.
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Heap* heap = CcTest::heap();
- Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage();
-
- i::IncrementalMarking* marking = heap->incremental_marking();
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
- CHECK(marking->IsMarking() || marking->IsStopped());
- if (marking->IsStopped()) {
- heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
- i::GarbageCollectionReason::kTesting);
- }
- CHECK(marking->IsMarking());
- CHECK(marking->black_allocation());
-
- // Ensure that we allocate a new page, set up a bump pointer area, and
- // perform the allocation in a black area.
- heap::SimulateFullSpace(heap->old_space());
- isolate->factory()->NewFixedArray(4, AllocationType::kOld);
- Handle<FixedArray> array =
- isolate->factory()->NewFixedArray(50, AllocationType::kOld);
- CHECK(heap->old_space()->Contains(*array));
- MarkingState* marking_state = heap->marking_state();
- CHECK(marking_state->IsBlack(*array));
-
- // Now left trim the allocated black area. A filler has to be installed
- // for the trimmed area and all mark bits of the trimmed area have to be
- // cleared.
- FixedArrayBase trimmed = heap->LeftTrimFixedArray(*array, 10);
- CHECK(marking_state->IsBlack(trimmed));
-
- heap::GcAndSweep(heap, OLD_SPACE);
-}
-
-TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
- if (!v8_flags.incremental_marking) return;
- v8_flags.stress_concurrent_allocation = false; // For SimulateFullSpace.
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Heap* heap = CcTest::heap();
- Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage();
-
- i::IncrementalMarking* marking = heap->incremental_marking();
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
- CHECK(marking->IsMarking() || marking->IsStopped());
- if (marking->IsStopped()) {
- heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
- i::GarbageCollectionReason::kTesting);
- }
- CHECK(marking->IsMarking());
- CHECK(marking->black_allocation());
-
- // Ensure that we allocate a new page, set up a bump pointer area, and
- // perform the allocation in a black area.
- heap::SimulateFullSpace(heap->old_space());
- isolate->factory()->NewFixedArray(10, AllocationType::kOld);
-
- // Allocate the fixed array that will be trimmed later.
- Handle<FixedArray> array =
- isolate->factory()->NewFixedArray(100, AllocationType::kOld);
- Address start_address = array->address();
- Address end_address = start_address + array->Size();
- Page* page = Page::FromAddress(start_address);
- NonAtomicMarkingState* marking_state = heap->non_atomic_marking_state();
- CHECK(marking_state->IsBlack(*array));
- CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
- page->AddressToMarkbitIndex(start_address),
- page->AddressToMarkbitIndex(end_address)));
- CHECK(heap->old_space()->Contains(*array));
-
- FixedArrayBase previous = *array;
- FixedArrayBase trimmed;
-
- // First trim in one word steps.
- for (int i = 0; i < 10; i++) {
- trimmed = heap->LeftTrimFixedArray(previous, 1);
- HeapObject filler = HeapObject::FromAddress(previous.address());
- CHECK(filler.IsFreeSpaceOrFiller());
- CHECK(marking_state->IsBlack(trimmed));
- CHECK(marking_state->IsBlack(previous));
- previous = trimmed;
- }
-
- // Then trim in two and three word steps.
- for (int i = 2; i <= 3; i++) {
- for (int j = 0; j < 10; j++) {
- trimmed = heap->LeftTrimFixedArray(previous, i);
- HeapObject filler = HeapObject::FromAddress(previous.address());
- CHECK(filler.IsFreeSpaceOrFiller());
- CHECK(marking_state->IsBlack(trimmed));
- CHECK(marking_state->IsBlack(previous));
- previous = trimmed;
- }
- }
-
- heap::GcAndSweep(heap, OLD_SPACE);
-}
-
TEST(ContinuousRightTrimFixedArrayInBlackArea) {
if (!v8_flags.incremental_marking) return;
v8_flags.stress_concurrent_allocation = false; // For SimulateFullSpace.
@@ -6165,7 +6015,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
NonAtomicMarkingState* marking_state = heap->non_atomic_marking_state();
- CHECK(marking_state->IsBlack(*array));
+ CHECK(marking_state->IsMarked(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),
page->AddressToMarkbitIndex(end_address)));
@@ -6186,7 +6036,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
isolate->heap()->RightTrimFixedArray(*array, i);
filler = HeapObject::FromAddress(previous);
CHECK(filler.IsFreeSpaceOrFiller());
- CHECK(marking_state->IsWhite(filler));
+ CHECK(marking_state->IsUnmarked(filler));
}
}
@@ -6600,7 +6450,7 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
Handle<Map> map;
{
AlwaysAllocateScopeForTesting always_allocate(heap);
- map = isolate->factory()->NewMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
+ map = isolate->factory()->NewMap(BIGINT_TYPE, HeapNumber::kSize);
}
CHECK(heap->incremental_marking()->black_allocation());
Handle<HeapObject> object;
@@ -6935,32 +6785,38 @@ UNINITIALIZED_TEST(RestoreHeapLimit) {
reinterpret_cast<Isolate*>(v8::Isolate::New(create_params));
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
- OutOfMemoryState state;
- state.heap = heap;
- state.oom_triggered = false;
- heap->AddNearHeapLimitCallback(NearHeapLimitCallback, &state);
- heap->AutomaticallyRestoreInitialHeapLimit(0.5);
- const int kFixedArrayLength = 1000000;
+
{
- HandleScope handle_scope(isolate);
- while (!state.oom_triggered) {
- factory->NewFixedArray(kFixedArrayLength);
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
+
+ OutOfMemoryState state;
+ state.heap = heap;
+ state.oom_triggered = false;
+ heap->AddNearHeapLimitCallback(NearHeapLimitCallback, &state);
+ heap->AutomaticallyRestoreInitialHeapLimit(0.5);
+ const int kFixedArrayLength = 1000000;
+ {
+ HandleScope handle_scope(isolate);
+ while (!state.oom_triggered) {
+ factory->NewFixedArray(kFixedArrayLength);
+ }
}
- }
- heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
- state.oom_triggered = false;
- {
- HandleScope handle_scope(isolate);
- while (!state.oom_triggered) {
- factory->NewFixedArray(kFixedArrayLength);
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
+ state.oom_triggered = false;
+ {
+ HandleScope handle_scope(isolate);
+ while (!state.oom_triggered) {
+ factory->NewFixedArray(kFixedArrayLength);
+ }
}
+ CHECK_EQ(state.current_heap_limit, state.initial_heap_limit);
}
- CHECK_EQ(state.current_heap_limit, state.initial_heap_limit);
+
reinterpret_cast<v8::Isolate*>(isolate)->Dispose();
}
void HeapTester::UncommitUnusedMemory(Heap* heap) {
- if (!v8_flags.minor_mc) heap->new_space()->Shrink();
+ if (!v8_flags.minor_mc) SemiSpaceNewSpace::From(heap->new_space())->Shrink();
heap->memory_allocator()->unmapper()->EnsureUnmappingCompleted();
}
@@ -7038,7 +6894,7 @@ HEAP_TEST(MemoryReducerActivationForSmallHeaps) {
LocalContext env;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- CHECK_EQ(heap->memory_reducer()->state_.action, MemoryReducer::Action::kDone);
+ CHECK_EQ(heap->memory_reducer()->state_.id(), MemoryReducer::kDone);
HandleScope scope(isolate);
const size_t kActivationThreshold = 1 * MB;
size_t initial_capacity = heap->OldGenerationCapacity();
@@ -7046,7 +6902,7 @@ HEAP_TEST(MemoryReducerActivationForSmallHeaps) {
initial_capacity + kActivationThreshold) {
isolate->factory()->NewFixedArray(1 * KB, AllocationType::kOld);
}
- CHECK_EQ(heap->memory_reducer()->state_.action, MemoryReducer::Action::kWait);
+ CHECK_EQ(heap->memory_reducer()->state_.id(), MemoryReducer::kWait);
}
TEST(AllocateExternalBackingStore) {
@@ -7070,7 +6926,7 @@ TEST(CodeObjectRegistry) {
Heap* heap = isolate->heap();
CodePageCollectionMemoryModificationScopeForTesting code_scope(heap);
- Handle<Code> code1;
+ Handle<InstructionStream> code1;
HandleScope outer_scope(heap->isolate());
Address code2_address;
{
@@ -7078,7 +6934,7 @@ TEST(CodeObjectRegistry) {
CHECK(HeapTester::CodeEnsureLinearAllocationArea(
heap, MemoryChunkLayout::MaxRegularCodeObjectSize()));
code1 = DummyOptimizedCode(isolate);
- Handle<Code> code2 = DummyOptimizedCode(isolate);
+ Handle<InstructionStream> code2 = DummyOptimizedCode(isolate);
code2_address = code2->address();
CHECK_EQ(MemoryChunk::FromHeapObject(*code1),
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index f6fd45b229..97eabfb5c1 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -288,38 +288,6 @@ HEAP_TEST(InvalidatedSlotsRightTrimLargeFixedArray) {
CcTest::CollectGarbage(i::OLD_SPACE);
}
-HEAP_TEST(InvalidatedSlotsLeftTrimFixedArray) {
- if (!v8_flags.incremental_marking) return;
- v8_flags.manual_evacuation_candidates_selection = true;
- v8_flags.parallel_compaction = false;
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- Heap* heap = CcTest::heap();
- HandleScope scope(isolate);
- PagedSpace* old_space = heap->old_space();
- // Allocate a dummy page to be swept be the sweeper during evacuation.
- AllocateArrayOnFreshPage(isolate, old_space, 1);
- Handle<FixedArray> evacuated =
- AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
- Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
- heap::SimulateIncrementalMarking(heap);
- for (int i = 0; i + 1 < trimmed->length(); i++) {
- trimmed->set(i, *evacuated);
- }
- {
- HandleScope new_scope(isolate);
- Handle<HeapObject> dead = factory->NewFixedArray(1);
- for (int i = 1; i < trimmed->length(); i++) {
- trimmed->set(i, *dead);
- }
- heap->LeftTrimFixedArray(*trimmed, trimmed->length() - 1);
- }
- CcTest::CollectGarbage(i::NEW_SPACE);
- CcTest::CollectGarbage(i::OLD_SPACE);
-}
-
HEAP_TEST(InvalidatedSlotsFastToSlow) {
if (!v8_flags.incremental_marking) return;
v8_flags.manual_evacuation_candidates_selection = true;
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index d4d62728fb..6dd9ec471a 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -224,119 +224,6 @@ HEAP_TEST(DoNotEvacuatePinnedPages) {
}
}
-HEAP_TEST(ObjectStartBitmap) {
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope sc(CcTest::isolate());
-
- Heap* heap = isolate->heap();
- heap::SealCurrentObjects(heap);
-
- auto* factory = isolate->factory();
-
- Handle<HeapObject> h1 = factory->NewStringFromStaticChars("hello");
- Handle<HeapObject> h2 = factory->NewStringFromStaticChars("world");
-
- HeapObject obj1 = *h1;
- HeapObject obj2 = *h2;
- Page* page1 = Page::FromHeapObject(obj1);
- Page* page2 = Page::FromHeapObject(obj2);
-
- CHECK(page1->object_start_bitmap()->CheckBit(obj1.address()));
- CHECK(page2->object_start_bitmap()->CheckBit(obj2.address()));
-
- {
- // We need a safepoint for calling FindBasePtr.
- IsolateSafepointScope scope(heap);
-
- for (int k = 0; k < obj1.Size(); ++k) {
- Address obj1_inner_ptr = obj1.address() + k;
- CHECK_EQ(obj1.address(),
- page1->object_start_bitmap()->FindBasePtr(obj1_inner_ptr));
- }
- for (int k = 0; k < obj2.Size(); ++k) {
- Address obj2_inner_ptr = obj2.address() + k;
- CHECK_EQ(obj2.address(),
- page2->object_start_bitmap()->FindBasePtr(obj2_inner_ptr));
- }
- }
-
- // TODO(v8:12851): Patch the location of handle h2 with an inner pointer.
- // For now, garbage collection doesn't work with inner pointers in handles,
- // so we're sticking to a zero offset.
- const size_t offset = 0;
- h2.PatchValue(String::FromAddress(h2->address() + offset));
-
- CcTest::CollectAllGarbage();
-
- obj1 = *h1;
- obj2 = HeapObject::FromAddress(h2->address() - offset);
- page1 = Page::FromHeapObject(obj1);
- page2 = Page::FromHeapObject(obj2);
-
- CHECK(obj1.IsString());
- CHECK(obj2.IsString());
-
- // Bits set in the object_start_bitmap are not preserved when objects are
- // evacuated.
- CHECK(!page1->object_start_bitmap()->CheckBit(obj1.address()));
- CHECK(!page2->object_start_bitmap()->CheckBit(obj2.address()));
-
- {
- // We need a safepoint for calling FindBasePtr.
- IsolateSafepointScope scope(heap);
-
- // After FindBasePtr, the bits should be properly set again.
- for (int k = 0; k < obj1.Size(); ++k) {
- Address obj1_inner_ptr = obj1.address() + k;
- CHECK_EQ(obj1.address(),
- page1->object_start_bitmap()->FindBasePtr(obj1_inner_ptr));
- }
- CHECK(page1->object_start_bitmap()->CheckBit(obj1.address()));
- for (int k = obj2.Size() - 1; k >= 0; --k) {
- Address obj2_inner_ptr = obj2.address() + k;
- CHECK_EQ(obj2.address(),
- page2->object_start_bitmap()->FindBasePtr(obj2_inner_ptr));
- }
- CHECK(page2->object_start_bitmap()->CheckBit(obj2.address()));
- }
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-}
-
-// TODO(1600): compaction of map space is temporary removed from GC.
-#if 0
-static Handle<Map> CreateMap(Isolate* isolate) {
- return isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-}
-
-TEST(MapCompact) {
- v8_flags.max_map_space_pages = 16;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
-
- {
- v8::HandleScope sc;
- // keep allocating maps while pointers are still encodable and thus
- // mark compact is permitted.
- Handle<JSObject> root = factory->NewJSObjectFromMap(CreateMap());
- do {
- Handle<Map> map = CreateMap();
- map->set_prototype(*root);
- root = factory->NewJSObjectFromMap(map);
- } while (CcTest::heap()->map_space()->MapPointersEncodable());
- }
- // Now, as we don't have any handles to just allocated maps, we should
- // be able to trigger map compaction.
- // To give an additional chance to fail, try to force compaction which
- // should be impossible right now.
- CcTest::CollectAllGarbage(Heap::kForceCompactionMask);
- // And now map pointers should be encodable again.
- CHECK(CcTest::heap()->map_space()->MapPointersEncodable());
-}
-#endif
-
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
#define V8_WITH_ASAN 1
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 6192eee2dc..5ed8781d13 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -136,9 +136,10 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
memory_allocator->AllocateLargePage(space, area_size, executable);
size_t reserved_size =
((executable == EXECUTABLE))
- ? allocatable_memory_area_offset +
- RoundUp(area_size, page_allocator->CommitPageSize()) +
- guard_size
+ ? RoundUp(allocatable_memory_area_offset +
+ RoundUp(area_size, page_allocator->CommitPageSize()) +
+ guard_size,
+ page_allocator->CommitPageSize())
: RoundUp(allocatable_memory_area_offset + area_size,
page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
@@ -318,6 +319,7 @@ TEST(SemiSpaceNewSpace) {
TEST(PagedNewSpace) {
if (v8_flags.single_generation) return;
+ ManualGCScope manual_gc_scope;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
@@ -664,7 +666,8 @@ HEAP_TEST(Regress777177) {
old_space->AllocateRaw(max_object_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked();
// Simulate allocation folding moving the top pointer back.
- old_space->SetTopAndLimit(obj.address(), old_space->limit());
+ old_space->SetTopAndLimit(obj.address(), old_space->limit(),
+ old_space->limit());
}
{
@@ -913,17 +916,19 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
// Allocated objects size.
CHECK_EQ(faked_space->Size(), 16);
- size_t committed_memory = RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
- MemoryAllocator::GetCommitPageSize());
+ size_t committed_memory =
+ RoundUp(MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage() +
+ faked_space->Size(),
+ MemoryAllocator::GetCommitPageSize());
// Amount of OS allocated memory.
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
// Capacity will be one OS page minus the page header.
- CHECK_EQ(faked_space->Capacity(),
- committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
+ CHECK_EQ(
+ faked_space->Capacity(),
+ committed_memory - MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage());
}
TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
@@ -968,16 +973,18 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
// with pointer compression.
CHECK_EQ(faked_space->Size(), expected_size);
- size_t committed_memory = RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space->Size(),
- MemoryAllocator::GetCommitPageSize());
+ size_t committed_memory =
+ RoundUp(MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage() +
+ faked_space->Size(),
+ MemoryAllocator::GetCommitPageSize());
CHECK_EQ(faked_space->CommittedMemory(), committed_memory);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), committed_memory);
// Capacity will be 3 OS pages minus the page header.
- CHECK_EQ(faked_space->Capacity(),
- committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
+ CHECK_EQ(
+ faked_space->Capacity(),
+ committed_memory - MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage());
}
TEST(ReadOnlySpaceMetrics_TwoPages) {
@@ -1016,9 +1023,9 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
CHECK_EQ(faked_space->Size(), object_size * 2);
// Amount of OS allocated memory.
- size_t committed_memory_per_page =
- RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
- MemoryAllocator::GetCommitPageSize());
+ size_t committed_memory_per_page = RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage() + object_size,
+ MemoryAllocator::GetCommitPageSize());
CHECK_EQ(faked_space->CommittedMemory(), 2 * committed_memory_per_page);
CHECK_EQ(faked_space->CommittedPhysicalMemory(),
2 * committed_memory_per_page);
@@ -1026,9 +1033,10 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
// Capacity will be the space up to the amount of committed memory minus the
// page headers.
size_t capacity_per_page =
- RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
- MemoryAllocator::GetCommitPageSize()) -
- MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage() + object_size,
+ MemoryAllocator::GetCommitPageSize()) -
+ MemoryChunkLayout::ObjectStartOffsetInReadOnlyPage();
CHECK_EQ(faked_space->Capacity(), 2 * capacity_per_page);
}
diff --git a/deps/v8/test/cctest/heap/test-unmapper.cc b/deps/v8/test/cctest/heap/test-unmapper.cc
index cbc1fa5af8..c31a2ad227 100644
--- a/deps/v8/test/cctest/heap/test-unmapper.cc
+++ b/deps/v8/test/cctest/heap/test-unmapper.cc
@@ -45,6 +45,7 @@ UNINITIALIZED_TEST(EagerUnmappingInCollectAllAvailableGarbage) {
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
+ v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = CcTest::NewContext(isolate);
v8::Context::Scope context_scope(context);
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 23fa52f553..8e24eb8bb1 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -31,6 +31,8 @@ TEST(WeakReferencesBasic) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
HandleScope outer_scope(isolate);
Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
@@ -51,10 +53,9 @@ TEST(WeakReferencesBasic) {
assm.nop(); // supported on all architectures
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<CodeT> code = ToCodeT(
- Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(),
- isolate);
- CHECK(code->IsCodeT());
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ CHECK(code->IsCode());
lh->set_data1(HeapObjectReference::Weak(*code));
HeapObject code_heap_object;
@@ -185,6 +186,7 @@ TEST(ObjectMovesBeforeClearingWeakField) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
@@ -356,6 +358,7 @@ TEST(WeakArraysBasic) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
const int length = 4;
@@ -427,6 +430,7 @@ TEST(WeakArrayListBasic) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
Handle<WeakArrayList> array(ReadOnlyRoots(heap).empty_weak_array_list(),
@@ -723,6 +727,7 @@ TEST(PrototypeUsersCompacted) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
Handle<WeakArrayList> array(ReadOnlyRoots(heap).empty_weak_array_list(),
diff --git a/deps/v8/test/cctest/heap/test-write-barrier.cc b/deps/v8/test/cctest/heap/test-write-barrier.cc
index 1608a5f716..051476bfb6 100644
--- a/deps/v8/test/cctest/heap/test-write-barrier.cc
+++ b/deps/v8/test/cctest/heap/test-write-barrier.cc
@@ -41,19 +41,18 @@ HEAP_TEST(WriteBarrier_Marking) {
FixedArray host = FixedArray::cast(objects->get(0));
HeapObject value1 = HeapObject::cast(objects->get(1));
HeapObject value2 = HeapObject::cast(objects->get(2));
- CHECK(heap->marking_state()->IsWhite(host));
- CHECK(heap->marking_state()->IsWhite(value1));
+ CHECK(heap->marking_state()->IsUnmarked(host));
+ CHECK(heap->marking_state()->IsUnmarked(value1));
WriteBarrier::Marking(host, host.RawFieldOfElementAt(0), value1);
- CHECK_EQ(V8_CONCURRENT_MARKING_BOOL, heap->marking_state()->IsGrey(value1));
- heap->marking_state()->WhiteToGrey(host);
- heap->marking_state()->GreyToBlack(host);
- CHECK(heap->marking_state()->IsWhite(value2));
+ CHECK(heap->marking_state()->IsGrey(value1));
+ heap->marking_state()->TryMarkAndAccountLiveBytes(host);
+ CHECK(heap->marking_state()->IsUnmarked(value2));
WriteBarrier::Marking(host, host.RawFieldOfElementAt(0), value2);
CHECK(heap->marking_state()->IsGrey(value2));
heap::SimulateIncrementalMarking(CcTest::heap(), true);
- CHECK(heap->marking_state()->IsBlack(host));
- CHECK(heap->marking_state()->IsBlack(value1));
- CHECK(heap->marking_state()->IsBlack(value2));
+ CHECK(heap->marking_state()->IsMarked(host));
+ CHECK(heap->marking_state()->IsMarked(value1));
+ CHECK(heap->marking_state()->IsMarked(value2));
}
HEAP_TEST(WriteBarrier_MarkingExtension) {
@@ -75,16 +74,16 @@ HEAP_TEST(WriteBarrier_MarkingExtension) {
}
heap::SimulateIncrementalMarking(CcTest::heap(), false);
JSArrayBuffer host = JSArrayBuffer::cast(objects->get(0));
- CHECK(heap->marking_state()->IsWhite(host));
+ CHECK(heap->marking_state()->IsUnmarked(host));
CHECK(!extension->IsMarked());
WriteBarrier::Marking(host, extension);
// Concurrent marking barrier should mark this object.
- CHECK_EQ(V8_CONCURRENT_MARKING_BOOL, extension->IsMarked());
+ CHECK(extension->IsMarked());
// Keep object alive using the global handle.
v8::Global<ArrayBuffer> global_host(CcTest::isolate(),
Utils::ToLocal(handle(host, isolate)));
heap::SimulateIncrementalMarking(CcTest::heap(), true);
- CHECK(heap->marking_state()->IsBlack(host));
+ CHECK(heap->marking_state()->IsMarked(host));
CHECK(extension->IsMarked());
}
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.cc b/deps/v8/test/cctest/setup-isolate-for-tests.cc
index 8aae2de769..fd2823effd 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.cc
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.cc
@@ -4,20 +4,22 @@
#include "test/cctest/setup-isolate-for-tests.h"
+// Almost identical to setup-isolate-full.cc. The difference is that while
+// testing the embedded snapshot blob can be missing.
+
namespace v8 {
namespace internal {
-void SetupIsolateDelegateForTests::SetupBuiltins(Isolate* isolate) {
- if (create_heap_objects_) {
- SetupBuiltinsInternal(isolate);
- }
+bool SetupIsolateDelegateForTests::SetupHeap(Isolate* isolate,
+ bool create_heap_objects) {
+ if (!create_heap_objects) return true;
+ return SetupHeapInternal(isolate);
}
-bool SetupIsolateDelegateForTests::SetupHeap(Heap* heap) {
- if (create_heap_objects_) {
- return SetupHeapInternal(heap);
- }
- return true;
+void SetupIsolateDelegateForTests::SetupBuiltins(Isolate* isolate,
+ bool compile_builtins) {
+ if (!compile_builtins) return;
+ SetupBuiltinsInternal(isolate);
}
} // namespace internal
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.h b/deps/v8/test/cctest/setup-isolate-for-tests.h
index f9335338a3..409bfb0d93 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.h
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.h
@@ -12,13 +12,10 @@ namespace internal {
class SetupIsolateDelegateForTests : public SetupIsolateDelegate {
public:
- explicit SetupIsolateDelegateForTests(bool create_heap_objects)
- : SetupIsolateDelegate(create_heap_objects) {}
- ~SetupIsolateDelegateForTests() override = default;
+ SetupIsolateDelegateForTests() = default;
- void SetupBuiltins(Isolate* isolate) override;
-
- bool SetupHeap(Heap* heap) override;
+ bool SetupHeap(Isolate* isolate, bool create_heap_objects) override;
+ void SetupBuiltins(Isolate* isolate, bool compile_builtins) override;
};
} // namespace internal
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 5c45090370..4cba63b0ac 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -23,7 +23,7 @@ namespace {
void TestStubCacheOffsetCalculation(StubCache::Table table) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester data(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester data(isolate, JSParameterCount(kNumParams));
AccessorAssembler m(data.state());
{
@@ -59,16 +59,11 @@ void TestStubCacheOffsetCalculation(StubCache::Table table) {
};
Handle<Map> maps[] = {
- factory->cell_map(),
- Map::Create(isolate, 0),
- factory->meta_map(),
- factory->code_map(),
- Map::Create(isolate, 0),
- factory->hash_table_map(),
- factory->symbol_map(),
- factory->string_map(),
- Map::Create(isolate, 0),
- factory->sloppy_arguments_elements_map(),
+ factory->cell_map(), Map::Create(isolate, 0),
+ factory->meta_map(), factory->instruction_stream_map(),
+ Map::Create(isolate, 0), factory->hash_table_map(),
+ factory->symbol_map(), factory->string_map(),
+ Map::Create(isolate, 0), factory->sloppy_arguments_elements_map(),
};
for (size_t name_index = 0; name_index < arraysize(names); name_index++) {
@@ -119,7 +114,7 @@ TEST(TryProbeStubCache) {
using Label = CodeStubAssembler::Label;
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester data(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester data(isolate, JSParameterCount(kNumParams));
AccessorAssembler m(data.state());
StubCache stub_cache(isolate);
@@ -215,8 +210,7 @@ TEST(TryProbeStubCache) {
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
Handle<Code> handler = handlers[index % handlers.size()];
- stub_cache.Set(*name, receiver->map(),
- MaybeObject::FromObject(ToCodeT(*handler)));
+ stub_cache.Set(*name, receiver->map(), MaybeObject::FromObject(*handler));
}
// Perform some queries.
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 08a4459817..d51559316e 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -533,7 +533,7 @@ static void StackCheck(Local<String> name,
for (int i = 0; !iter.done(); i++) {
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
- i::CodeT code = frame->LookupCodeT().ToCodeT();
+ i::Code code = frame->LookupCode();
CHECK(code.contains(isolate, frame->pc()));
iter.Advance();
}
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index ad30146664..5977ecd230 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -6,6 +6,7 @@
#include "src/base/strings.h"
#include "src/objects/js-array-buffer-inl.h"
#include "test/cctest/test-api.h"
+#include "test/common/flag-utils.h"
using ::v8::Array;
using ::v8::Context;
@@ -448,11 +449,29 @@ THREADED_TEST(ArrayBuffer_NewBackingStore) {
std::shared_ptr<v8::BackingStore> backing_store =
v8::ArrayBuffer::NewBackingStore(isolate, 100);
CHECK(!backing_store->IsShared());
+ CHECK(!backing_store->IsResizableByUserJavaScript());
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, backing_store);
CHECK_EQ(backing_store.get(), ab->GetBackingStore().get());
CHECK_EQ(backing_store->Data(), ab->Data());
}
+THREADED_TEST(ArrayBuffer_NewResizableBackingStore) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ std::shared_ptr<v8::BackingStore> backing_store =
+ v8::ArrayBuffer::NewResizableBackingStore(32, 1024);
+ CHECK(!backing_store->IsShared());
+ CHECK(backing_store->IsResizableByUserJavaScript());
+ CHECK_EQ(1024, backing_store->MaxByteLength());
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, backing_store);
+ CHECK_EQ(backing_store.get(), ab->GetBackingStore().get());
+ CHECK_EQ(backing_store->Data(), ab->Data());
+ CHECK_EQ(backing_store->MaxByteLength(), ab->MaxByteLength());
+}
+
THREADED_TEST(SharedArrayBuffer_NewBackingStore) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -460,6 +479,7 @@ THREADED_TEST(SharedArrayBuffer_NewBackingStore) {
std::shared_ptr<v8::BackingStore> backing_store =
v8::SharedArrayBuffer::NewBackingStore(isolate, 100);
CHECK(backing_store->IsShared());
+ CHECK(!backing_store->IsResizableByUserJavaScript());
Local<v8::SharedArrayBuffer> ab =
v8::SharedArrayBuffer::New(isolate, backing_store);
CHECK_EQ(backing_store.get(), ab->GetBackingStore().get());
@@ -810,3 +830,48 @@ TEST(BackingStore_ReallocateShared) {
v8::BackingStore::Reallocate(isolate, std::move(backing_store), 10);
CHECK(new_backing_store->IsShared());
}
+
+TEST(ArrayBuffer_Resizable) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ const char rab_source[] = "new ArrayBuffer(32, { maxByteLength: 1024 });";
+ v8::Local<v8::ArrayBuffer> rab = CompileRun(rab_source).As<v8::ArrayBuffer>();
+ CHECK(rab->GetBackingStore()->IsResizableByUserJavaScript());
+ CHECK_EQ(32, rab->ByteLength());
+ CHECK_EQ(1024, rab->MaxByteLength());
+
+ const char gsab_source[] =
+ "new SharedArrayBuffer(32, { maxByteLength: 1024 });";
+ v8::Local<v8::SharedArrayBuffer> gsab =
+ CompileRun(gsab_source).As<v8::SharedArrayBuffer>();
+ CHECK(gsab->GetBackingStore()->IsResizableByUserJavaScript());
+ CHECK_EQ(32, gsab->ByteLength());
+ CHECK_EQ(1024, gsab->MaxByteLength());
+ CHECK_EQ(gsab->MaxByteLength(), gsab->GetBackingStore()->MaxByteLength());
+}
+
+TEST(ArrayBuffer_FixedLength) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ // Fixed-length ArrayBuffers' byte length are equal to their max byte length.
+ v8::Local<v8::ArrayBuffer> ab =
+ CompileRun("new ArrayBuffer(32);").As<v8::ArrayBuffer>();
+ CHECK(!ab->GetBackingStore()->IsResizableByUserJavaScript());
+ CHECK_EQ(32, ab->ByteLength());
+ CHECK_EQ(32, ab->MaxByteLength());
+ CHECK_EQ(ab->MaxByteLength(), ab->GetBackingStore()->MaxByteLength());
+ v8::Local<v8::SharedArrayBuffer> sab =
+ CompileRun("new SharedArrayBuffer(32);").As<v8::SharedArrayBuffer>();
+ CHECK(!sab->GetBackingStore()->IsResizableByUserJavaScript());
+ CHECK_EQ(32, sab->ByteLength());
+ CHECK_EQ(32, sab->MaxByteLength());
+ CHECK_EQ(sab->MaxByteLength(), sab->GetBackingStore()->MaxByteLength());
+}
diff --git a/deps/v8/test/cctest/test-api-typed-array.cc b/deps/v8/test/cctest/test-api-typed-array.cc
index 42402d5d86..866f979442 100644
--- a/deps/v8/test/cctest/test-api-typed-array.cc
+++ b/deps/v8/test/cctest/test-api-typed-array.cc
@@ -5,6 +5,7 @@
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-buffer.h"
#include "test/cctest/test-api.h"
using ::v8::Array;
@@ -454,7 +455,7 @@ THREADED_TEST(DataView) {
// TODO(v8:11111): Use API functions for testing these, once they're exposed
// via the API.
- i::Handle<i::JSDataView> i_dv = v8::Utils::OpenHandle(*dv);
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv = v8::Utils::OpenHandle(*dv);
CHECK(!i_dv->is_length_tracking());
CHECK(!i_dv->is_backed_by_rab());
}
@@ -531,7 +532,7 @@ THREADED_TEST(SharedDataView) {
// TODO(v8:11111): Use API functions for testing these, once they're exposed
// via the API.
- i::Handle<i::JSDataView> i_dv = v8::Utils::OpenHandle(*dv);
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv = v8::Utils::OpenHandle(*dv);
CHECK(!i_dv->is_length_tracking());
CHECK(!i_dv->is_backed_by_rab());
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 2fd28f0689..af7dfbf03c 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -80,6 +80,7 @@
#include "test/common/flag-utils.h"
#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-engine.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
@@ -531,6 +532,9 @@ class TestOneByteResource : public String::ExternalOneByteStringResource {
THREADED_TEST(ScriptUsingStringResource) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
int dispose_count = 0;
const char* c_source = "1 + 2 * 3";
uint16_t* two_byte_source = AsciiToTwoByteString(c_source);
@@ -563,6 +567,9 @@ THREADED_TEST(ScriptUsingStringResource) {
THREADED_TEST(ScriptUsingOneByteStringResource) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
int dispose_count = 0;
const char* c_source = "1 + 2 * 3";
{
@@ -596,6 +603,9 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
THREADED_TEST(ScriptMakingExternalString) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
int dispose_count = 0;
uint16_t* two_byte_source = AsciiToTwoByteString("1 + 2 * 3");
{
@@ -630,6 +640,9 @@ THREADED_TEST(ScriptMakingExternalString) {
THREADED_TEST(ScriptMakingExternalOneByteString) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
int dispose_count = 0;
const char* c_source = "1 + 2 * 3";
{
@@ -665,31 +678,29 @@ TEST(MakingExternalStringConditions) {
CcTest::CollectGarbage(i::NEW_SPACE);
}
- uint16_t* two_byte_string = AsciiToTwoByteString("s1");
- Local<String> tiny_local_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string)
- .ToLocalChecked();
- i::DeleteArray(two_byte_string);
+ Local<String> tiny_local_string = v8_str("\xCF\x80");
+ Local<String> local_string = v8_str("s1234\xCF\x80");
- two_byte_string = AsciiToTwoByteString("s1234");
- Local<String> local_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string)
- .ToLocalChecked();
- i::DeleteArray(two_byte_string);
+ CHECK(!tiny_local_string->IsOneByte());
+ CHECK(!local_string->IsOneByte());
if (!i::v8_flags.single_generation) {
// We should refuse to externalize new space strings.
- CHECK(!local_string->CanMakeExternal());
+ CHECK(!local_string->CanMakeExternal(String::Encoding::TWO_BYTE_ENCODING));
// Trigger full GC so that the newly allocated string moves to old gen.
CcTest::CollectGarbage(i::OLD_SPACE);
}
// Old space strings should be accepted.
- CHECK(local_string->CanMakeExternal());
+ CHECK(local_string->CanMakeExternal(String::Encoding::TWO_BYTE_ENCODING));
// Tiny strings are not in-place externalizable when pointer compression is
// enabled, but they are if the sandbox is enabled.
- CHECK_EQ(V8_ENABLE_SANDBOX_BOOL || i::kTaggedSize == i::kSystemPointerSize,
- tiny_local_string->CanMakeExternal());
+ CHECK_EQ(
+ V8_ENABLE_SANDBOX_BOOL || i::kTaggedSize == i::kSystemPointerSize,
+ tiny_local_string->CanMakeExternal(String::Encoding::TWO_BYTE_ENCODING));
+
+ // Change of representation is not allowed.
+ CHECK(!local_string->CanMakeExternal(String::Encoding::ONE_BYTE_ENCODING));
}
@@ -706,18 +717,26 @@ TEST(MakingExternalOneByteStringConditions) {
Local<String> tiny_local_string = v8_str("s");
Local<String> local_string = v8_str("s1234");
+ CHECK(tiny_local_string->IsOneByte());
+ CHECK(local_string->IsOneByte());
+
// Single-character strings should not be externalized because they
// are always in the RO-space.
- CHECK(!tiny_local_string->CanMakeExternal());
+ CHECK(
+ !tiny_local_string->CanMakeExternal(String::Encoding::ONE_BYTE_ENCODING));
if (!i::v8_flags.single_generation) {
// We should refuse to externalize new space strings.
- CHECK(!local_string->CanMakeExternal());
+ CHECK(!local_string->CanMakeExternal(String::Encoding::ONE_BYTE_ENCODING));
// Trigger full GC so that the newly allocated string moves to old gen.
CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(!tiny_local_string->CanMakeExternal());
+ CHECK(!tiny_local_string->CanMakeExternal(
+ String::Encoding::ONE_BYTE_ENCODING));
}
// Old space strings should be accepted.
- CHECK(local_string->CanMakeExternal());
+ CHECK(local_string->CanMakeExternal(String::Encoding::ONE_BYTE_ENCODING));
+
+ // Change of representation is not allowed.
+ CHECK(!local_string->CanMakeExternal(String::Encoding::TWO_BYTE_ENCODING));
}
@@ -752,8 +771,6 @@ TEST(MakingExternalUnalignedOneByteString) {
// Trigger GCs and force evacuation.
CcTest::CollectAllGarbage();
- i::ScanStackModeScopeForTesting no_stack_scanning(
- CcTest::heap(), i::Heap::ScanStackMode::kNone);
CcTest::heap()->CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask,
i::GarbageCollectionReason::kTesting);
}
@@ -855,6 +872,9 @@ TEST(ScavengeExternalString) {
ManualGCScope manual_gc_scope;
i::v8_flags.stress_compaction = false;
i::v8_flags.gc_global = false;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
int dispose_count = 0;
bool in_young_generation = false;
{
@@ -880,6 +900,9 @@ TEST(ScavengeExternalOneByteString) {
ManualGCScope manual_gc_scope;
i::v8_flags.stress_compaction = false;
i::v8_flags.gc_global = false;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
int dispose_count = 0;
bool in_young_generation = false;
{
@@ -925,6 +948,9 @@ int TestOneByteResourceWithDisposeControl::dispose_calls = 0;
TEST(ExternalStringWithDisposeHandling) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
const char* c_source = "1 + 2 * 3";
// Use a stack allocated external string resource allocated object.
@@ -2431,7 +2457,6 @@ THREADED_TEST(TestObjectTemplateReflectConstruct) {
fun_B->SetClassName(class_name);
v8::Local<v8::String> subclass_name = v8_str("C");
- v8::Local<v8::Object> b_proto;
v8::Local<v8::Object> c_proto;
// Perform several iterations to make sure the cache doesn't break
// subclassing.
@@ -3091,15 +3116,17 @@ THREADED_TEST(SetAlignedPointerInInternalFields) {
delete[] heap_allocated_2;
}
-static void CheckAlignedPointerInEmbedderData(LocalContext* env, int index,
- void* value) {
+static void CheckAlignedPointerInEmbedderData(LocalContext* env,
+ v8::Local<v8::Object> some_obj,
+ int index, void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
(*env)->SetAlignedPointerInEmbedderData(index, value);
CcTest::CollectAllGarbage();
CHECK_EQ(value, (*env)->GetAlignedPointerFromEmbedderData(index));
+ CHECK_EQ(value,
+ some_obj->GetAlignedPointerFromEmbedderDataInCreationContext(index));
}
-
static void* AlignedTestPointer(int i) {
return reinterpret_cast<void*>(i * 1234);
}
@@ -3107,24 +3134,27 @@ static void* AlignedTestPointer(int i) {
THREADED_TEST(EmbedderDataAlignedPointers) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- CheckAlignedPointerInEmbedderData(&env, 0, nullptr);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+
+ CheckAlignedPointerInEmbedderData(&env, obj, 0, nullptr);
CHECK_EQ(1, (*env)->GetNumberOfEmbedderDataFields());
int* heap_allocated = new int[100];
- CheckAlignedPointerInEmbedderData(&env, 1, heap_allocated);
+ CheckAlignedPointerInEmbedderData(&env, obj, 1, heap_allocated);
CHECK_EQ(2, (*env)->GetNumberOfEmbedderDataFields());
delete[] heap_allocated;
int stack_allocated[100];
- CheckAlignedPointerInEmbedderData(&env, 2, stack_allocated);
+ CheckAlignedPointerInEmbedderData(&env, obj, 2, stack_allocated);
CHECK_EQ(3, (*env)->GetNumberOfEmbedderDataFields());
// The aligned pointer must have the top bits be zero on 64-bit machines (at
// least if the sandboxed external pointers are enabled).
void* huge = reinterpret_cast<void*>(0x0000fffffffffffe);
- CheckAlignedPointerInEmbedderData(&env, 3, huge);
+ CheckAlignedPointerInEmbedderData(&env, obj, 3, huge);
CHECK_EQ(4, (*env)->GetNumberOfEmbedderDataFields());
// Test growing of the embedder data's backing store.
@@ -4199,6 +4229,8 @@ void FirstPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
TEST(TwoPassPhantomCallbacks) {
auto isolate = CcTest::isolate();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
GCCallbackMetadata metadata;
const size_t kLength = 20;
for (size_t i = 0; i < kLength; ++i) {
@@ -4213,6 +4245,8 @@ TEST(TwoPassPhantomCallbacks) {
TEST(TwoPassPhantomCallbacksNestedGc) {
auto isolate = CcTest::isolate();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
GCCallbackMetadata metadata;
const size_t kLength = 20;
TwoPassCallbackData* array[kLength];
@@ -4233,6 +4267,8 @@ TEST(TwoPassPhantomCallbacksNestedGc) {
// the second pass callback can still execute JS as per its API contract.
TEST(TwoPassPhantomCallbacksTriggeredByStringAlloc) {
auto isolate = CcTest::isolate();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
GCCallbackMetadata metadata;
auto data = new TwoPassCallbackData(isolate, &metadata);
data->SetWeak();
@@ -7600,8 +7636,11 @@ static void SetFlag(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
static void IndependentWeakHandle(bool global_gc, bool interlinked) {
ManualGCScope manual_gc_scope;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
// Parallel scavenge introduces too much fragmentation.
i::v8_flags.parallel_scavenge = false;
+
v8::Isolate* iso = CcTest::isolate();
v8::HandleScope scope(iso);
v8::Local<Context> context = Context::New(iso);
@@ -7706,6 +7745,8 @@ void InternalFieldCallback(bool global_gc) {
// which prevents it from being reclaimed and the callbacks from being
// executed.
ManualGCScope manual_gc_scope;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -7741,6 +7782,7 @@ void InternalFieldCallback(bool global_gc) {
handle.SetWeak<v8::Persistent<v8::Object>>(
&handle, CheckInternalFields, v8::WeakCallbackType::kInternalFields);
}
+
if (i::v8_flags.single_generation || global_gc) {
CcTest::CollectAllGarbage();
} else {
@@ -7847,6 +7889,8 @@ THREADED_TEST(GCFromWeakCallbacks) {
v8::HandleScope scope(isolate);
v8::Local<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
if (i::v8_flags.single_generation) {
FlagAndPersistent object;
@@ -13009,12 +13053,13 @@ bool ApiTestFuzzer::NextThread() {
void ApiTestFuzzer::Run() {
- // When it is our turn...
+ // Wait until it is our turn.
gate_.Wait();
{
- // ... get the V8 lock
+ // Get the V8 lock.
v8::Locker locker(CcTest::isolate());
- // ... and start running the test.
+ // Start running the test, which will enter the isolate and exit it when it
+ // finishes.
CallTest();
}
// This test finished.
@@ -13055,11 +13100,18 @@ static void CallTestNumber(int test_number) {
void ApiTestFuzzer::RunAllTests() {
+ // This method is called when running each THREADING_TEST, which is an
+ // initialized test and has entered the isolate at this point. We need to exit
+ // the isolate, so that the fuzzer threads can enter it in turn, while running
+ // their tests.
+ CcTest::isolate()->Exit();
// Set off the first test.
current_ = -1;
NextThread();
// Wait till they are all done.
all_tests_done_.Wait();
+ // We enter the isolate again, to prepare for teardown.
+ CcTest::isolate()->Enter();
}
@@ -13077,10 +13129,16 @@ int ApiTestFuzzer::GetNextTestNumber() {
void ApiTestFuzzer::ContextSwitch() {
// If the new thread is the same as the current thread there is nothing to do.
if (NextThread()) {
- // Now it can start.
- v8::Unlocker unlocker(CcTest::isolate());
- // Wait till someone starts us again.
- gate_.Wait();
+ // Exit the isolate from this thread.
+ CcTest::i_isolate()->Exit();
+ {
+ // Now the new thread can start.
+ v8::Unlocker unlocker(CcTest::isolate());
+ // Wait till someone starts us again.
+ gate_.Wait();
+ }
+ // Enter the isolate from this thread again.
+ CcTest::i_isolate()->Enter();
// And we're off.
}
}
@@ -13295,6 +13353,8 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
int count = GetGlobalObjectsCount();
@@ -13346,6 +13406,8 @@ static void WeakApiCallback(
TEST(WeakCallbackApi) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
i::GlobalHandles* globals =
@@ -13521,7 +13583,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
env->Exit();
}
-static v8::base::HashMap* code_map = nullptr;
+static v8::base::HashMap* instruction_stream_map = nullptr;
static v8::base::HashMap* jitcode_line_info = nullptr;
static int saw_bar = 0;
static int move_events = 0;
@@ -13564,7 +13626,7 @@ static bool FunctionNameIs(const char* expected,
static void event_handler(const v8::JitCodeEvent* event) {
CHECK_NOT_NULL(event);
- CHECK_NOT_NULL(code_map);
+ CHECK_NOT_NULL(instruction_stream_map);
CHECK_NOT_NULL(jitcode_line_info);
class DummyJitCodeLineInfo {
@@ -13575,7 +13637,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
CHECK_NOT_NULL(event->code_start);
CHECK_NE(0, static_cast<int>(event->code_len));
CHECK_NOT_NULL(event->name.str);
- v8::base::HashMap::Entry* entry = code_map->LookupOrInsert(
+ v8::base::HashMap::Entry* entry = instruction_stream_map->LookupOrInsert(
event->code_start, i::ComputePointerHash(event->code_start));
entry->value = reinterpret_cast<void*>(event->code_len);
@@ -13595,14 +13657,14 @@ static void event_handler(const v8::JitCodeEvent* event) {
// calculations can cause a GC, which can move the newly created code
// before its existence can be logged.
v8::base::HashMap::Entry* entry =
- code_map->Lookup(event->code_start, hash);
+ instruction_stream_map->Lookup(event->code_start, hash);
if (entry != nullptr) {
++move_events;
CHECK_EQ(reinterpret_cast<void*>(event->code_len), entry->value);
- code_map->Remove(event->code_start, hash);
+ instruction_stream_map->Remove(event->code_start, hash);
- entry = code_map->LookupOrInsert(
+ entry = instruction_stream_map->LookupOrInsert(
event->new_code_start,
i::ComputePointerHash(event->new_code_start));
entry->value = reinterpret_cast<void*>(event->code_len);
@@ -13663,6 +13725,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::v8_flags.baseline_batch_compilation = false;
#endif
if (!i::v8_flags.compact) return;
+ i::FlagList::EnforceFlagImplications();
const char* script =
"function bar() {"
" var sum = 0;"
@@ -13687,7 +13750,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
{
v8::HandleScope scope(isolate);
v8::base::HashMap code;
- code_map = &code;
+ instruction_stream_map = &code;
v8::base::HashMap lineinfo;
jitcode_line_info = &lineinfo;
@@ -13734,7 +13797,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
CHECK_LE(kIterations, saw_bar);
CHECK_LT(0, move_events);
- code_map = nullptr;
+ instruction_stream_map = nullptr;
jitcode_line_info = nullptr;
}
@@ -13754,7 +13817,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// Now get code through initial iteration.
v8::base::HashMap code;
- code_map = &code;
+ instruction_stream_map = &code;
v8::base::HashMap lineinfo;
jitcode_line_info = &lineinfo;
@@ -13770,7 +13833,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// with EnumExisting.
CHECK_LT(0u, code.occupancy());
- code_map = nullptr;
+ instruction_stream_map = nullptr;
}
isolate->Exit();
@@ -13805,12 +13868,10 @@ static void wasm_event_handler(const v8::JitCodeEvent* event) {
}
}
-namespace v8 {
-namespace internal {
-namespace wasm {
+namespace v8::internal::wasm {
TEST(WasmSetJitCodeEventHandler) {
v8::base::HashMap code;
- code_map = &code;
+ instruction_stream_map = &code;
v8::base::HashMap lineinfo;
jitcode_line_info = &lineinfo;
@@ -13822,15 +13883,21 @@ TEST(WasmSetJitCodeEventHandler) {
v8_isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault,
wasm_event_handler);
+ // Add (unreached) endless recursion to prevent fully inling "f". Otherwise we
+ // won't have source positions and will miss the
+ // {CODE_END_LINE_INFO_RECORDING} event.
TestSignatures sigs;
auto& f = r.NewFunction(sigs.i_i(), "f");
- BUILD(f, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ f.Build({WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_LOCAL_SET(0, WASM_CALL_FUNCTION(f.function_index(),
+ WASM_LOCAL_GET(0)))),
+ WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
LocalContext env;
- BUILD(r,
- WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION(f.function_index(),
- WASM_LOCAL_GET(1))));
+ r.Build(
+ {WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION(f.function_index(),
+ WASM_LOCAL_GET(1)))});
Handle<JSFunction> func = r.builder().WrapCode(0);
CHECK(env->Global()
@@ -13841,11 +13908,8 @@ TEST(WasmSetJitCodeEventHandler) {
)";
CompileRun(script);
CHECK(saw_wasm_main);
- saw_wasm_main = false;
}
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm
#endif // V8_ENABLE_WEBASSEMBLY
TEST(ExternalAllocatedMemory) {
@@ -16604,13 +16668,16 @@ TEST(TestIdleNotification) {
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
if (i < 10 && CcTest::heap()->incremental_marking()->IsStopped()) {
- CcTest::heap()->StartIdleIncrementalMarking(
+ CcTest::heap()->StartIncrementalMarking(
+ i::Heap::kReduceMemoryFootprintMask,
i::GarbageCollectionReason::kTesting);
}
+ START_ALLOW_USE_DEPRECATED();
finished = env->GetIsolate()->IdleNotificationDeadline(
(v8::base::TimeTicks::Now().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
IdlePauseInSeconds);
+ END_ALLOW_USE_DEPRECATED();
if (CcTest::heap()->sweeping_in_progress()) {
CcTest::heap()->EnsureSweepingCompleted(
i::Heap::SweepingForcedFinalizationMode::kV8Only);
@@ -16625,11 +16692,13 @@ TEST(TestMemorySavingsMode) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ START_ALLOW_USE_DEPRECATED();
CHECK(!i_isolate->IsMemorySavingsModeActive());
isolate->EnableMemorySavingsMode();
CHECK(i_isolate->IsMemorySavingsModeActive());
isolate->DisableMemorySavingsMode();
CHECK(!i_isolate->IsMemorySavingsModeActive());
+ END_ALLOW_USE_DEPRECATED();
}
TEST(Regress2333) {
@@ -16776,6 +16845,8 @@ TEST(GetHeapSpaceStatistics) {
}
TEST(NumberOfNativeContexts) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
static const size_t kNumTestContexts = 10;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
@@ -16800,6 +16871,8 @@ TEST(NumberOfNativeContexts) {
}
TEST(NumberOfDetachedContexts) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
static const size_t kNumTestContexts = 10;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
@@ -17027,6 +17100,8 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
TEST(ExternalInternalizedStringCollectedAtGC) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
int destroyed = 0;
{ LocalContext env;
v8::HandleScope handle_scope(env->GetIsolate());
@@ -17221,6 +17296,9 @@ TEST(Regress528) {
v8::Local<Context> other_context;
int gc_count;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
// Create a context used to keep the code from aging in the compilation
// cache.
other_context = Context::New(isolate);
@@ -19608,6 +19686,8 @@ static int CountLiveMapsInMapCache(i::Context context) {
THREADED_TEST(Regress1516) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
// Object with 20 properties is not a common case, so it should be removed
// from the cache after GC.
@@ -21088,198 +21168,6 @@ TEST(AccessCheckThrows) {
isolate->SetFailedAccessCheckCallbackFunction(nullptr);
}
-namespace {
-
-const char kOneByteSubjectString[] = {
- 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
- 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
- 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', '\0'};
-const uint16_t kTwoByteSubjectString[] = {
- 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
- 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
- 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', '\0'};
-
-const int kSubjectStringLength = arraysize(kOneByteSubjectString) - 1;
-static_assert(arraysize(kOneByteSubjectString) ==
- arraysize(kTwoByteSubjectString));
-
-OneByteVectorResource one_byte_string_resource(v8::base::Vector<const char>(
- &kOneByteSubjectString[0], kSubjectStringLength));
-UC16VectorResource two_byte_string_resource(
- v8::base::Vector<const v8::base::uc16>(&kTwoByteSubjectString[0],
- kSubjectStringLength));
-
-class RegExpInterruptTest {
- public:
- RegExpInterruptTest()
- : i_thread(this),
- env_(),
- isolate_(env_->GetIsolate()),
- sem_(0),
- ran_test_body_(false),
- ran_to_completion_(false) {}
-
- void RunTest(v8::InterruptCallback test_body_fn) {
- v8::HandleScope handle_scope(isolate_);
-
- i_thread.SetTestBody(test_body_fn);
- CHECK(i_thread.Start());
-
- TestBody();
-
- i_thread.Join();
- }
-
- static void CollectAllGarbage(v8::Isolate* isolate, void* data) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::ScanStackModeScopeForTesting no_stack_scanning(
- CcTest::heap(), i::Heap::ScanStackMode::kNone);
- i_isolate->heap()->PreciseCollectAllGarbage(
- i::Heap::kNoGCFlags, i::GarbageCollectionReason::kRuntime);
- }
-
- static void MakeSubjectOneByteExternal(v8::Isolate* isolate, void* data) {
- auto instance = reinterpret_cast<RegExpInterruptTest*>(data);
-
- v8::HandleScope scope(isolate);
- v8::Local<v8::String> string =
- v8::Local<v8::String>::New(isolate, instance->string_handle_);
- CHECK(string->CanMakeExternal());
- string->MakeExternal(&one_byte_string_resource);
- }
-
- static void MakeSubjectTwoByteExternal(v8::Isolate* isolate, void* data) {
- auto instance = reinterpret_cast<RegExpInterruptTest*>(data);
-
- v8::HandleScope scope(isolate);
- v8::Local<v8::String> string =
- v8::Local<v8::String>::New(isolate, instance->string_handle_);
- CHECK(string->CanMakeExternal());
- string->MakeExternal(&two_byte_string_resource);
- }
-
- private:
- static void SignalSemaphore(v8::Isolate* isolate, void* data) {
- reinterpret_cast<RegExpInterruptTest*>(data)->sem_.Signal();
- }
-
- void CreateTestStrings() {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
-
- // The string must be in old space to support externalization.
- i::Handle<i::String> i_string =
- i_isolate->factory()->NewStringFromAsciiChecked(
- &kOneByteSubjectString[0], i::AllocationType::kOld);
- v8::Local<v8::String> string = v8::Utils::ToLocal(i_string);
-
- env_->Global()->Set(env_.local(), v8_str("a"), string).FromJust();
-
- string_handle_.Reset(env_->GetIsolate(), string);
- }
-
- void TestBody() {
- CHECK(!ran_test_body_.load());
- CHECK(!ran_to_completion_.load());
-
- CreateTestStrings();
-
- v8::TryCatch try_catch(env_->GetIsolate());
-
- isolate_->RequestInterrupt(&SignalSemaphore, this);
- CompileRun("/((a*)*)*b/.exec(a)");
-
- CHECK(try_catch.HasTerminated());
- CHECK(ran_test_body_.load());
- CHECK(ran_to_completion_.load());
- }
-
- class InterruptThread : public v8::base::Thread {
- public:
- explicit InterruptThread(RegExpInterruptTest* test)
- : Thread(Options("RegExpInterruptTest")), test_(test) {}
-
- void Run() override {
- CHECK_NOT_NULL(test_body_fn_);
-
- // Wait for JS execution to start.
- test_->sem_.Wait();
-
- // Sleep for a bit to allow irregexp execution to start up, then run the
- // test body.
- v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(50));
- test_->isolate_->RequestInterrupt(&RunTestBody, test_);
- test_->isolate_->RequestInterrupt(&SignalSemaphore, test_);
-
- // Wait for the scheduled interrupt to signal.
- test_->sem_.Wait();
-
- // Sleep again to resume irregexp execution, then terminate.
- v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(50));
- test_->ran_to_completion_.store(true);
- test_->isolate_->TerminateExecution();
- }
-
- static void RunTestBody(v8::Isolate* isolate, void* data) {
- auto instance = reinterpret_cast<RegExpInterruptTest*>(data);
- instance->i_thread.test_body_fn_(isolate, data);
- instance->ran_test_body_.store(true);
- }
-
- void SetTestBody(v8::InterruptCallback callback) {
- test_body_fn_ = callback;
- }
-
- private:
- v8::InterruptCallback test_body_fn_;
- RegExpInterruptTest* test_;
- };
-
- InterruptThread i_thread;
-
- LocalContext env_;
- v8::Isolate* isolate_;
- v8::base::Semaphore sem_; // Coordinates between main and interrupt threads.
-
- v8::Persistent<v8::String> string_handle_;
-
- std::atomic<bool> ran_test_body_;
- std::atomic<bool> ran_to_completion_;
-};
-
-} // namespace
-
-TEST(RegExpInterruptAndCollectAllGarbage) {
- // Move all movable objects on GC.
- i::v8_flags.compact_on_every_full_gc = true;
- // We want to be stuck regexp execution, so no fallback to linear-time
- // engine.
- // TODO(mbid,v8:10765): Find a way to test interrupt support of the
- // experimental engine.
- i::v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks = false;
- RegExpInterruptTest test;
- test.RunTest(RegExpInterruptTest::CollectAllGarbage);
-}
-
-TEST(RegExpInterruptAndMakeSubjectOneByteExternal) {
- // We want to be stuck regexp execution, so no fallback to linear-time
- // engine.
- // TODO(mbid,v8:10765): Find a way to test interrupt support of the
- // experimental engine.
- i::v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks = false;
- RegExpInterruptTest test;
- test.RunTest(RegExpInterruptTest::MakeSubjectOneByteExternal);
-}
-
-TEST(RegExpInterruptAndMakeSubjectTwoByteExternal) {
- // We want to be stuck regexp execution, so no fallback to linear-time
- // engine.
- // TODO(mbid,v8:10765): Find a way to test interrupt support of the
- // experimental engine.
- i::v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks = false;
- RegExpInterruptTest test;
- test.RunTest(RegExpInterruptTest::MakeSubjectTwoByteExternal);
-}
-
class RequestInterruptTestBase {
public:
RequestInterruptTestBase()
@@ -21834,7 +21722,6 @@ TEST(EscapableHandleScope) {
}
}
for (int i = 0; i < runs; i++) {
- Local<String> expected;
if (i != 0) {
CHECK(v8_str("escape value")
->Equals(context.local(), values[i])
@@ -23058,8 +22945,7 @@ void SourceURLHelper(v8::Isolate* isolate, const char* source_text,
Local<Value>(), // source map URL
false, // is opaque
false, // is WASM
- true // is ES Module
- );
+ true); // is ES Module
v8::ScriptCompiler::Source source(source_str, origin, nullptr);
Local<v8::Module> module =
@@ -23896,12 +23782,6 @@ TEST(StreamingWithIsolateScriptCache) {
// Variant of the above test which evicts the root SharedFunctionInfo from the
// Isolate script cache but still reuses the same Script.
TEST(StreamingWithIsolateScriptCacheClearingRootSFI) {
- // TODO(v8:12808): Remove this check once background compilation is capable of
- // reusing an existing Script.
- if (i::v8_flags.stress_background_compile) {
- return;
- }
-
StreamingWithIsolateScriptCache(true);
}
@@ -24769,7 +24649,7 @@ TEST(StringConcatOverflow) {
}
TEST(TurboAsmDisablesDetach) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
i::v8_flags.turbofan = true;
i::v8_flags.allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
@@ -24803,7 +24683,7 @@ TEST(TurboAsmDisablesDetach) {
result = CompileRun(store).As<v8::ArrayBuffer>();
CHECK(!result->IsDetachable());
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
}
TEST(ClassPrototypeCreationContext) {
@@ -25694,8 +25574,8 @@ TEST(MemoryPressure) {
WeakCallCounter counter(1234);
// Conservative stack scanning might break results.
- i::ScanStackModeScopeForTesting no_stack_scanning(
- CcTest::heap(), i::Heap::ScanStackMode::kNone);
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
// Check that critical memory pressure notification sets GC interrupt.
auto garbage = CreateGarbageWithWeakCallCounter(isolate, &counter);
@@ -26857,9 +26737,9 @@ TEST(WasmI32AtomicWaitCallback) {
WasmRunner<int32_t, int32_t, int32_t, double> r(TestExecutionTier::kTurbofan);
r.builder().AddMemory(kWasmPageSize, SharedFlag::kShared);
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_WAIT(kExprI32AtomicWait, WASM_LOCAL_GET(0),
+ r.Build({WASM_ATOMICS_WAIT(kExprI32AtomicWait, WASM_LOCAL_GET(0),
WASM_LOCAL_GET(1),
- WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(2)), 4));
+ WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(2)), 4)});
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -26892,9 +26772,9 @@ TEST(WasmI64AtomicWaitCallback) {
WasmRunner<int32_t, int32_t, double, double> r(TestExecutionTier::kTurbofan);
r.builder().AddMemory(kWasmPageSize, SharedFlag::kShared);
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_WAIT(kExprI64AtomicWait, WASM_LOCAL_GET(0),
+ r.Build({WASM_ATOMICS_WAIT(kExprI64AtomicWait, WASM_LOCAL_GET(0),
WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(1)),
- WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(2)), 8));
+ WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(2)), 8)});
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -27559,8 +27439,6 @@ static void CallIsolate2(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context>::New(isolate_2, context_2);
v8::Context::Scope context_scope(context);
i::Heap* heap_2 = reinterpret_cast<i::Isolate*>(isolate_2)->heap();
- i::ScanStackModeScopeForTesting no_stack_scanning(
- heap_2, i::Heap::ScanStackMode::kNone);
heap_2->CollectAllGarbage(i::Heap::kForcedGC,
i::GarbageCollectionReason::kTesting);
CompileRun("f2() //# sourceURL=isolate2b");
@@ -27669,7 +27547,7 @@ UNINITIALIZED_TEST(NestedIsolates) {
#undef THREADED_PROFILED_TEST
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
namespace {
#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
@@ -28379,10 +28257,10 @@ void CheckDynamicTypeInfo() {
CHECK_EQ(c_func.ReturnInfo().GetType(), v8::CTypeInfo::Type::kVoid);
}
} // namespace
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
TEST(FastApiStackSlot) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
if (i::v8_flags.jitless) return;
i::v8_flags.turbofan = true;
@@ -28430,11 +28308,11 @@ TEST(FastApiStackSlot) {
int32_t slow_value_typed = checker.slow_value_.ToChecked();
CHECK_EQ(slow_value_typed, test_value);
CHECK_EQ(checker.fast_value_, test_value);
-#endif
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
}
TEST(FastApiCalls) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
if (i::v8_flags.jitless) return;
i::v8_flags.turbofan = true;
@@ -28901,10 +28779,10 @@ TEST(FastApiCalls) {
// TODO(mslekova): Restructure the tests so that the fast optimized calls
// are compared against the slow optimized calls.
// TODO(mslekova): Add tests for FTI that requires access check.
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
}
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
namespace {
static Trivial* UnwrapTrivialObject(Local<Object> object) {
i::Address addr = *reinterpret_cast<i::Address*>(*object);
@@ -28981,10 +28859,10 @@ void SequenceSlowCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
} // namespace
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
TEST(FastApiSequenceOverloads) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
if (i::v8_flags.jitless) return;
i::v8_flags.turbofan = true;
@@ -29038,11 +28916,11 @@ TEST(FastApiSequenceOverloads) {
CompileRun("const ta = new Int32Array([1, 2, 3, 4]);"
"func(4, ta);"));
CHECK_EQ(4, rcv->x());
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
}
TEST(FastApiOverloadResolution) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
if (i::v8_flags.jitless) return;
i::v8_flags.turbofan = true;
@@ -29086,7 +28964,7 @@ TEST(FastApiOverloadResolution) {
CHECK_EQ(v8::CFunction::OverloadResolution::kAtCompileTime,
typed_array_callback.GetOverloadResolution(&diff_arity_callback));
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
}
THREADED_TEST(Recorder_GetContext) {
@@ -29096,6 +28974,8 @@ THREADED_TEST(Recorder_GetContext) {
// Set up isolate and context.
v8::Isolate* iso = CcTest::isolate();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
v8::metrics::Recorder::ContextId original_id;
std::vector<v8::metrics::Recorder::ContextId> ids;
{
@@ -29179,6 +29059,9 @@ TEST(TriggerMainThreadMetricsEvent) {
using v8::Local;
using v8::MaybeLocal;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
// Set up isolate and context.
v8::Isolate* iso = CcTest::isolate();
i::Isolate* i_iso = reinterpret_cast<i::Isolate*>(iso);
@@ -29218,6 +29101,9 @@ TEST(TriggerDelayedMainThreadMetricsEvent) {
using v8::MaybeLocal;
i::v8_flags.stress_concurrent_allocation = false;
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
// Set up isolate and context.
v8::Isolate* iso = CcTest::isolate();
i::Isolate* i_iso = reinterpret_cast<i::Isolate*>(iso);
@@ -29571,3 +29457,653 @@ UNINITIALIZED_TEST(OOMDetailsAreMovableAndCopyable) {
UNINITIALIZED_TEST(JitCodeEventIsMovableAndCopyable) {
TestCopyAndMoveConstructionAndAssignment<v8::JitCodeEvent>();
}
+
+#if V8_ENABLE_WEBASSEMBLY
+TEST(WasmAbortStreamingAfterContextDisposal) {
+ // This is a regression test for https://crbug.com/1403531.
+
+ class Resolver final : public i::wasm::CompilationResultResolver {
+ public:
+ void OnCompilationSucceeded(
+ i::Handle<i::WasmModuleObject> result) override {
+ UNREACHABLE();
+ }
+ void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ UNREACHABLE();
+ }
+ };
+
+ auto resolver = std::make_shared<Resolver>();
+
+ std::unique_ptr<v8::WasmStreaming> wasm_streaming;
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ {
+ v8::HandleScope scope(isolate);
+ LocalContext context;
+
+ wasm_streaming =
+ i::wasm::StartStreamingForTesting(i_isolate, std::move(resolver));
+ isolate->ContextDisposedNotification(false);
+ }
+
+ wasm_streaming->Abort({});
+ wasm_streaming.reset();
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+TEST(DeepFreezeIncompatibleTypes) {
+ const int numCases = 7;
+ struct {
+ const char* script;
+ const char* exception;
+ } test_cases[numCases] = {
+ {
+ R"(
+ "use strict"
+ let foo = 1;
+ )",
+ "TypeError: Cannot DeepFreeze non-const value foo"},
+ {
+ R"(
+ "use strict"
+ const foo = 1;
+ const generator = function*() {
+ yield 1;
+ yield 2;
+ }
+ const gen = generator();
+ )",
+ "TypeError: Cannot DeepFreeze object of type Generator"},
+ {
+ R"(
+ "use strict"
+ const incrementer = (function() {
+ let a = 1;
+ return function() { a += 1; return a; };
+ })();
+ )",
+ "TypeError: Cannot DeepFreeze non-const value a"},
+ {
+ R"(
+ let a = new Number();
+ )",
+ "TypeError: Cannot DeepFreeze non-const value a"},
+ {
+ R"(
+ const a = [0, 1, 2, 3, 4, 5];
+ var it = a[Symbol.iterator]();
+ function foo() {
+ return it.next().value;
+ }
+ foo();
+ )",
+ "TypeError: Cannot DeepFreeze object of type Array Iterator"},
+ {
+ R"(
+ const a = "0123456789";
+ var it = a[Symbol.iterator]();
+ function foo() {
+ return it.next().value;
+ }
+ foo();
+ )",
+ "TypeError: Cannot DeepFreeze object of type Object"},
+ {R"(
+ const a = "0123456789";
+ var it = a.matchAll(/\d/g);
+ function foo() {
+ return it.next().value;
+ }
+ foo();
+ )",
+ "TypeError: Cannot DeepFreeze object of type Object"},
+ };
+
+ for (int idx = 0; idx < numCases; idx++) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ v8::Maybe<void> maybe_success = v8::Nothing<void>();
+ CompileRun(context, test_cases[idx].script);
+ v8::TryCatch tc(isolate);
+ maybe_success = context->DeepFreeze(nullptr);
+ CHECK(maybe_success.IsNothing());
+ CHECK(tc.HasCaught());
+ v8::String::Utf8Value uS(isolate, tc.Exception());
+ std::string exception(*uS, uS.length());
+ CHECK_EQ(std::string(test_cases[idx].exception), exception);
+ }
+}
+
+TEST(DeepFreezeIsFrozen) {
+ const int numCases = 10;
+ struct {
+ const char* script;
+ const char* exception;
+ int32_t expected;
+ } test_cases[numCases] = {
+ {// Closure
+ R"(
+ const incrementer = (function() {
+ const a = {b: 1};
+ return function() { a.b += 1; return a.b; };
+ })();
+ const foo = function() { return incrementer(); }
+ foo();
+ )",
+ nullptr, 2},
+ {
+ R"(
+ const incrementer = (function() {
+ const a = {b: 1};
+ return function() { a.b += 1; return a.b; };
+ })();
+ const foo = function() { return incrementer(); }
+ foo();
+ )",
+ nullptr, 2},
+ {// Array
+ R"(
+ const a = [0, -1, -2];
+ const foo = function() { a[0] += 1; return a[0]; }
+ )",
+ nullptr, 0},
+ {
+ R"(
+ const a = [0, -1, -2];
+ const foo = function() { a[0] += 1; return a[0]; }
+ )",
+ nullptr, 0},
+ {// Wrapper Objects
+ R"(
+ const a = {b: new Number()};
+ const foo = function() {
+ a.b = new Number(a.b + 1);
+ return a.b.valueOf();
+ }
+ )",
+ nullptr, 0},
+ {// Functions
+ // Assignment to constant doesn't work.
+ R"(
+ const foo = function() {
+ foo = function() { return 2;}
+ return 1;
+ }
+ )",
+ "TypeError: Assignment to constant variable.", 0},
+ {
+ R"(
+ const a = {b: {c: {d: {e: {f: 1}}}}};
+ const foo = function() {
+ a.b.c.d.e.f += 1;
+ return a.b.c.d.e.f;
+ }
+ )",
+ nullptr, 1},
+ {
+ R"(
+ const foo = function() {
+ if (!('count' in globalThis))
+ globalThis.count = 1;
+ ++count;
+ return count;
+ }
+ )",
+ "ReferenceError: count is not defined", 0},
+ {
+ R"(
+ const countPrototype = {
+ get() {
+ return 1;
+ },
+ };
+ const count = Object.create(countPrototype);
+ function foo() {
+ const curr_count = count.get();
+ count.prototype = { get() { return curr_count + 1; }};
+ return count.get();
+ }
+ )",
+ nullptr, 1},
+ {
+ R"(
+ const a = (function(){
+ function A(){};
+ A.o = 1;
+ return new A();
+ })();
+ function foo() {
+ a.constructor.o++;
+ return a.constructor.o;
+ }
+ )",
+ nullptr, 1},
+ };
+ for (int idx = 0; idx < numCases; idx++) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ v8::Maybe<void> maybe_success = v8::Nothing<void>();
+ v8::TryCatch tc(isolate);
+ v8::MaybeLocal<v8::Value> status =
+ CompileRun(context, test_cases[idx].script);
+ CHECK(!status.IsEmpty());
+ CHECK(!tc.HasCaught());
+
+ maybe_success = context->DeepFreeze(nullptr);
+ CHECK(!tc.HasCaught());
+ status = CompileRun(context, "foo()");
+
+ if (test_cases[idx].exception) {
+ CHECK(tc.HasCaught());
+ v8::String::Utf8Value uS(isolate, tc.Exception());
+ std::string exception(*uS, uS.length());
+ CHECK_EQ(std::string(test_cases[idx].exception), exception);
+ } else {
+ CHECK(!tc.HasCaught());
+ CHECK(!status.IsEmpty());
+ ExpectInt32("foo()", test_cases[idx].expected);
+ }
+ }
+}
+
+TEST(DeepFreezeAllowsSyntax) {
+ const int numCases = 2;
+ struct {
+ const char* script;
+ int32_t expected;
+ } test_cases[numCases] = {
+ {
+ R"(
+ const a = 1;
+ function foo() {
+ let b = 4;
+ b += 1;
+ return a + b;
+ }
+ )",
+ 6,
+ },
+ {
+ R"(
+ var a = 1;
+ function foo() {
+ let b = 4;
+ b += 1;
+ return a + b;
+ }
+ )",
+ 6,
+ }}; // TODO(behamilton): Add more cases that should be supported.
+ for (int idx = 0; idx < numCases; idx++) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ v8::Maybe<void> maybe_success = v8::Nothing<void>();
+ v8::MaybeLocal<v8::Value> status =
+ CompileRun(context, test_cases[idx].script);
+ CHECK(!status.IsEmpty());
+ maybe_success = context->DeepFreeze(nullptr);
+ CHECK(!maybe_success.IsNothing());
+ ExpectInt32("foo()", test_cases[idx].expected);
+ }
+}
+
+namespace {
+void DoNothing(const v8::FunctionCallbackInfo<v8::Value>& ignored) {}
+
+class AllowEmbedderObjects : public v8::Context::DeepFreezeDelegate {
+ public:
+ bool FreezeEmbedderObjectAndGetChildren(
+ v8::Local<v8::Object> obj,
+ std::vector<v8::Local<v8::Object>>& children_out) override {
+ return true;
+ }
+};
+
+} // namespace
+
+TEST(DeepFreezesJSApiObjectWithDelegate) {
+ const int numCases = 3;
+ struct {
+ const char* script;
+ std::function<void()> run_check;
+ } test_cases[numCases] = {
+ {
+ R"(
+ globalThis.jsApiObject.foo = {test: 4};
+ function foo() {
+ globalThis.jsApiObject.foo.test++;
+ return globalThis.jsApiObject.foo.test;
+ }
+ foo();
+ )",
+ []() { ExpectInt32("foo()", 5); }},
+ {
+ R"(
+ function foo() {
+ if (!('foo' in globalThis.jsApiObject))
+ globalThis.jsApiObject.foo = {test: 4}
+ globalThis.jsApiObject.foo.test++;
+ return globalThis.jsApiObject.foo.test;
+ }
+ foo();
+ )",
+ []() { ExpectInt32("foo()", 5); }},
+ {
+ R"(
+ function foo() {
+ if (!('foo' in globalThis.jsApiObject))
+ globalThis.jsApiObject.foo = 4
+ globalThis.jsApiObject.foo++;
+ return globalThis.jsApiObject.foo;
+ }
+ )",
+ []() { ExpectUndefined("foo()"); }},
+ };
+
+ for (int idx = 0; idx < numCases; idx++) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> v8_template =
+ v8::FunctionTemplate::New(isolate, &DoNothing);
+ v8_template->RemovePrototype();
+ global_template->Set(v8_str("jsApiObject"), v8_template);
+
+ LocalContext env(isolate, /*extensions=*/nullptr, global_template);
+ v8::Local<v8::Context> context = env.local();
+
+ v8::TryCatch tc(isolate);
+ v8::MaybeLocal<v8::Value> status =
+ CompileRun(context, test_cases[idx].script);
+ CHECK(!tc.HasCaught());
+ CHECK(!status.IsEmpty());
+
+ AllowEmbedderObjects delegate;
+ v8::Maybe<void> maybe_success = context->DeepFreeze(&delegate);
+ CHECK(!tc.HasCaught());
+ CHECK(!maybe_success.IsNothing());
+
+ test_cases[idx].run_check();
+ }
+}
+
+namespace {
+
+class MyObject {
+ public:
+ bool Freeze() {
+ was_frozen_ = true;
+ return true;
+ }
+
+ bool was_frozen_ = false;
+ v8::Local<v8::Object> internal_data_;
+};
+
+class HiddenDataDelegate : public v8::Context::DeepFreezeDelegate {
+ public:
+ explicit HiddenDataDelegate(v8::Local<v8::External> my_object)
+ : my_object_(my_object) {}
+
+ bool FreezeEmbedderObjectAndGetChildren(
+ v8::Local<v8::Object> obj,
+ std::vector<v8::Local<v8::Object>>& children_out) override {
+ int fields = obj->InternalFieldCount();
+ for (int idx = 0; idx < fields; idx++) {
+ v8::Local<v8::Value> child_value = obj->GetInternalField(idx);
+ if (child_value->IsExternal()) {
+ if (!FreezeExternal(v8::Local<v8::External>::Cast(child_value),
+ children_out)) {
+ return false;
+ }
+ }
+ }
+ if (obj->IsExternal()) {
+ return FreezeExternal(v8::Local<v8::External>::Cast(obj), children_out);
+ }
+ return true;
+ }
+
+ private:
+ bool FreezeExternal(v8::Local<v8::External> ext,
+ std::vector<v8::Local<v8::Object>>& children_out) {
+ if (ext->Value() == my_object_->Value()) {
+ MyObject* my_obj = static_cast<MyObject*>(ext->Value());
+ if (my_obj->Freeze()) {
+ children_out.push_back(my_obj->internal_data_);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ v8::Local<v8::External> my_object_;
+};
+
+} // namespace
+
+TEST(DeepFreezeDoesntFreezeJSApiObjectFunctionData) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ MyObject foo;
+ v8::Local<v8::External> v8_foo = v8::External::New(isolate, &foo);
+
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> v8_template =
+ v8::FunctionTemplate::New(isolate, &DoNothing, /*data=*/v8_foo);
+ v8_template->RemovePrototype();
+ global_template->Set(v8_str("jsApiObject"), v8_template);
+
+ LocalContext env(isolate, /*extensions=*/nullptr, global_template);
+ v8::Local<v8::Context> context = env.local();
+
+ foo = {false, v8::Object::New(isolate)};
+
+ HiddenDataDelegate hdd{v8_foo};
+ v8::TryCatch tc(isolate);
+
+ v8::Maybe<void> maybe_success = context->DeepFreeze(&hdd);
+
+ CHECK(!maybe_success.IsNothing());
+ CHECK(!foo.was_frozen_);
+
+ v8::Local<v8::String> param_list[] = {v8_str("obj")};
+ v8::Local<v8::Value> params[] = {
+ v8::Local<v8::Value>::Cast(foo.internal_data_)};
+ v8::ScriptCompiler::Source source{v8_str("return Object.isFrozen(obj)")};
+ v8::Local<v8::Function> is_frozen =
+ v8::ScriptCompiler::CompileFunction(context, &source, 1, param_list)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result =
+ is_frozen->Call(context, context->Global(), 1, params);
+
+ CHECK(!result.IsEmpty());
+ CHECK(result.ToLocalChecked()->IsFalse());
+}
+
+TEST(DeepFreezeForbidsJSApiObjectWithoutDelegate) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> v8_template = v8::ObjectTemplate::New(isolate);
+ v8_template->SetInternalFieldCount(1);
+ global_template->Set(v8_str("jsApiObject"), v8_template);
+
+ LocalContext env(isolate, /*extensions=*/nullptr, global_template);
+ v8::Local<v8::Context> context = env.local();
+
+ MyObject foo{false, v8::Object::New(isolate)};
+ v8::Local<v8::External> v8_foo = v8::External::New(isolate, &foo);
+
+ v8::Local<v8::Value> val =
+ context->Global()->Get(context, v8_str("jsApiObject")).ToLocalChecked();
+ CHECK(val->IsObject());
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(val);
+ CHECK_EQ(1, obj->InternalFieldCount());
+ obj->SetInternalField(0, v8_foo);
+
+ v8::TryCatch tc(isolate);
+ v8::Maybe<void> maybe_success = context->DeepFreeze(nullptr);
+
+ CHECK(tc.HasCaught());
+ v8::String::Utf8Value uS(isolate, tc.Exception());
+ std::string exception(*uS, uS.length());
+ CHECK_EQ(std::string("TypeError: Cannot DeepFreeze object of type Object"),
+ exception);
+ CHECK(maybe_success.IsNothing());
+}
+
+TEST(DeepFreezeFreezesJSApiObjectData) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> v8_template = v8::ObjectTemplate::New(isolate);
+ v8_template->SetInternalFieldCount(1);
+ global_template->Set(v8_str("jsApiObject"), v8_template);
+
+ LocalContext env(isolate, /*extensions=*/nullptr, global_template);
+ v8::Local<v8::Context> context = env.local();
+
+ MyObject foo{false, v8::Object::New(isolate)};
+ v8::Local<v8::External> v8_foo = v8::External::New(isolate, &foo);
+
+ v8::Local<v8::Value> val =
+ context->Global()->Get(context, v8_str("jsApiObject")).ToLocalChecked();
+ CHECK(val->IsObject());
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(val);
+ CHECK_EQ(1, obj->InternalFieldCount());
+ obj->SetInternalField(0, v8_foo);
+
+ HiddenDataDelegate hdd{v8_foo};
+
+ v8::TryCatch tc(isolate);
+
+ v8::Maybe<void> maybe_success = context->DeepFreeze(&hdd);
+
+ CHECK(!maybe_success.IsNothing());
+ CHECK(foo.was_frozen_);
+
+ v8::Local<v8::String> param_list[] = {v8_str("obj")};
+ v8::Local<v8::Value> params[] = {
+ v8::Local<v8::Value>::Cast(foo.internal_data_)};
+ v8::ScriptCompiler::Source source{v8_str("return Object.isFrozen(obj)")};
+ v8::Local<v8::Function> is_frozen =
+ v8::ScriptCompiler::CompileFunction(context, &source, 1, param_list)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result =
+ is_frozen->Call(context, context->Global(), 1, params);
+
+ CHECK(!result.IsEmpty());
+ CHECK(result.ToLocalChecked()->IsTrue());
+}
+
+TEST(DeepFreezeFreezesExternalObjectData) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+
+ MyObject foo{false, v8::Object::New(isolate)};
+ v8::Local<v8::External> v8_foo = v8::External::New(isolate, &foo);
+ v8::Maybe<bool> success =
+ context->Global()->CreateDataProperty(context, v8_str("foo"), v8_foo);
+ CHECK(!success.IsNothing() && success.FromJust());
+
+ HiddenDataDelegate hdd{v8_foo};
+
+ v8::Maybe<void> maybe_success = context->DeepFreeze(&hdd);
+
+ CHECK(!maybe_success.IsNothing());
+ CHECK(foo.was_frozen_);
+
+ v8::Local<v8::String> param_list[] = {v8_str("obj")};
+ v8::Local<v8::Value> params[] = {
+ v8::Local<v8::Value>::Cast(foo.internal_data_)};
+ v8::ScriptCompiler::Source source{v8_str("return Object.isFrozen(obj)")};
+ v8::Local<v8::Function> is_frozen =
+ v8::ScriptCompiler::CompileFunction(context, &source, 1, param_list)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result =
+ is_frozen->Call(context, context->Global(), 1, params);
+
+ CHECK(!result.IsEmpty());
+ CHECK(result.ToLocalChecked()->IsTrue());
+}
+
+namespace {
+void handle_property(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_num(900));
+}
+
+void handle_property_2(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_num(902));
+}
+
+void handle_property(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK_EQ(0, info.Length());
+ info.GetReturnValue().Set(v8_num(907));
+}
+
+} // namespace
+
+TEST(DeepFreezeInstantiatesAccessors) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
+ Local<v8::FunctionTemplate> getter_templ =
+ v8::FunctionTemplate::New(isolate, handle_property);
+ getter_templ->SetLength(0);
+ fun_templ->SetAccessorProperty(v8_str("bar"), getter_templ);
+ fun_templ->SetNativeDataProperty(v8_str("instance_foo"), handle_property);
+ fun_templ->SetNativeDataProperty(v8_str("object_foo"), handle_property_2);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("Fun"), fun).FromJust());
+
+ v8::Local<v8::Context> context = env.local();
+ v8::Maybe<void> maybe_success = context->DeepFreeze(nullptr);
+ CHECK(maybe_success.IsNothing());
+}
+
+namespace {
+void handle_object_property(v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<Value>& info) {
+ info.GetReturnValue().Set(v8_num(909));
+}
+} // namespace
+
+TEST(DeepFreezeInstantiatesAccessors2) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::ObjectTemplate> fun_templ = v8::ObjectTemplate::New(isolate);
+ fun_templ->SetAccessor(v8_str("foo"), handle_object_property);
+ Local<v8::FunctionTemplate> getter_templ =
+ v8::FunctionTemplate::New(isolate, handle_property);
+ getter_templ->SetLength(0);
+ fun_templ->SetAccessorProperty(v8_str("bar"), getter_templ);
+ fun_templ->SetNativeDataProperty(v8_str("instance_foo"), handle_property);
+ fun_templ->SetNativeDataProperty(v8_str("object_foo"), handle_property_2);
+ Local<Object> fun = fun_templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("Fun"), fun).FromJust());
+
+ v8::Local<v8::Context> context = env.local();
+ v8::Maybe<void> maybe_success = context->DeepFreeze(nullptr);
+ CHECK(maybe_success.IsNothing());
+}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 48f3cd2c44..0604deb9af 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -72,6 +72,14 @@ struct ConvertJSValue<uint32_t> {
}
};
+template <>
+struct ConvertJSValue<std::nullptr_t> {
+ static v8::Maybe<std::nullptr_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->IsNull() ? v8::Just(nullptr) : v8::Nothing<std::nullptr_t>();
+ }
+};
+
// NaNs and +/-Infinity should be 0, otherwise (modulo 2^64) - 2^63.
// Step 8 - 12 of https://heycam.github.io/webidl/#abstract-opdef-converttoint
// The int64_t and uint64_t implementations below are copied from Blink:
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 210f6bf29c..d511f2d289 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -64,7 +64,7 @@ TEST(0) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(7, res);
@@ -100,7 +100,7 @@ TEST(1) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(100, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(5050, res);
@@ -145,7 +145,7 @@ TEST(2) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(10, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(3628800, res);
@@ -191,7 +191,7 @@ TEST(3) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.i = 100000;
t.c = 10;
t.s = 1000;
@@ -321,7 +321,7 @@ TEST(4) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -383,7 +383,7 @@ TEST(5) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
@@ -414,7 +414,7 @@ TEST(6) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
@@ -481,7 +481,7 @@ static void TestRoundingMode(VCVTTypes types,
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("res = %d\n", res);
CHECK_EQ(expected, res);
@@ -663,7 +663,7 @@ TEST(8) {
StdoutStream os;
code->Print(os);
#endif
- auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto fn = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -772,7 +772,7 @@ TEST(9) {
StdoutStream os;
code->Print(os);
#endif
- auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto fn = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -877,7 +877,7 @@ TEST(10) {
StdoutStream os;
code->Print(os);
#endif
- auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto fn = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -971,7 +971,7 @@ TEST(11) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
f.Call(&i, 0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0xABCD0001), i.a);
@@ -1096,7 +1096,7 @@ TEST(13) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -1168,7 +1168,7 @@ TEST(14) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.left = base::bit_cast<double>(kHoleNanInt64);
t.right = 1;
t.add_result = 0;
@@ -2172,7 +2172,7 @@ TEST(15) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.src0 = 0x01020304;
t.src1 = 0x11121314;
t.src2 = 0x21222324;
@@ -2476,7 +2476,7 @@ TEST(16) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.src0 = 0x01020304;
t.src1 = 0x11121314;
t.src2 = 0x11121300;
@@ -2554,7 +2554,7 @@ TEST(sdiv) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
TEST_SDIV(0, kMinInt, 0);
TEST_SDIV(0, 1024, 0);
TEST_SDIV(1073741824, kMinInt, -2);
@@ -2614,7 +2614,7 @@ TEST(udiv) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
TEST_UDIV(0u, 0, 0);
TEST_UDIV(0u, 1024, 0);
TEST_UDIV(5u, 10, 2);
@@ -2642,7 +2642,7 @@ TEST(smmla) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
f.Call(&r, x, y, z, 0);
@@ -2667,7 +2667,7 @@ TEST(smmul) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
f.Call(&r, x, y, 0, 0);
@@ -2692,7 +2692,7 @@ TEST(sxtb) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
f.Call(&r, x, 0, 0, 0);
@@ -2717,7 +2717,7 @@ TEST(sxtab) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
f.Call(&r, x, y, 0, 0);
@@ -2742,7 +2742,7 @@ TEST(sxth) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
f.Call(&r, x, 0, 0, 0);
@@ -2767,7 +2767,7 @@ TEST(sxtah) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
f.Call(&r, x, y, 0, 0);
@@ -2792,7 +2792,7 @@ TEST(uxtb) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
f.Call(&r, x, 0, 0, 0);
@@ -2817,7 +2817,7 @@ TEST(uxtab) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
f.Call(&r, x, y, 0, 0);
@@ -2842,7 +2842,7 @@ TEST(uxth) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
f.Call(&r, x, 0, 0, 0);
@@ -2867,7 +2867,7 @@ TEST(uxtah) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
f.Call(&r, x, y, 0, 0);
@@ -2910,7 +2910,7 @@ TEST(rbit) {
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
TEST_RBIT(0xFFFFFFFF, 0xFFFFFFFF);
TEST_RBIT(0x00000000, 0x00000000);
TEST_RBIT(0xFFFF0000, 0x0000FFFF);
@@ -2987,7 +2987,7 @@ TEST(code_relative_offset) {
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
.set_self_reference(code_object)
.Build();
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(21, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(42, res);
@@ -3030,7 +3030,7 @@ TEST(msr_mrs) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_ippii>::FromCode(*code);
+ auto f = GeneratedCode<F_ippii>::FromCode(isolate, *code);
#define CHECK_MSR_MRS(n, z, c, v) \
do { \
@@ -3127,7 +3127,7 @@ TEST(ARMv8_float32_vrintX) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
t.input = input_val; \
@@ -3228,7 +3228,7 @@ TEST(ARMv8_vrintX) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
t.input = input_val; \
@@ -3365,7 +3365,7 @@ TEST(ARMv8_vsel) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_ippii>::FromCode(*code);
+ auto f = GeneratedCode<F_ippii>::FromCode(isolate, *code);
static_assert(kResultPass == -kResultFail);
#define CHECK_VSEL(n, z, c, v, vseleq, vselge, vselgt, vselvs) \
@@ -3456,7 +3456,7 @@ TEST(ARMv8_vminmax_f64) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto f = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
do { \
@@ -3538,7 +3538,7 @@ TEST(ARMv8_vminmax_f32) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto f = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
do { \
@@ -3673,7 +3673,7 @@ static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(
StdoutStream os;
code->Print(os);
#endif
- return GeneratedCode<F_ppiii>::FromCode(*code);
+ return GeneratedCode<F_ppiii>::FromCode(assm.isolate(), *code);
}
TEST(macro_float_minmax_f64) {
@@ -3848,7 +3848,7 @@ TEST(unaligned_loads) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto f = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
#ifndef V8_TARGET_LITTLE_ENDIAN
#error This test assumes a little-endian layout.
@@ -3891,7 +3891,7 @@ TEST(unaligned_stores) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto f = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
#ifndef V8_TARGET_LITTLE_ENDIAN
#error This test assumes a little-endian layout.
@@ -3991,7 +3991,7 @@ TEST(vswp) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(minus_one, t.vswp_d0);
CHECK_EQ(one, t.vswp_d1);
@@ -4203,7 +4203,7 @@ TEST(split_add_immediate) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
uint32_t res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("f() = 0x%x\n", res);
CHECK_EQ(0x12345678, res);
@@ -4223,7 +4223,7 @@ TEST(split_add_immediate) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
uint32_t res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("f() = 0x%x\n", res);
CHECK_EQ(0x12345678, res);
@@ -4246,7 +4246,7 @@ TEST(split_add_immediate) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
uint32_t res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("f() = 0x%x\n", res);
CHECK_EQ(0x12345678, res);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index efcc5ebad1..829b124fd6 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -110,11 +110,21 @@ static void InitializeVM() {
#define BUF_SIZE 8192
#define SETUP() SETUP_SIZE(BUF_SIZE)
-#define INIT_V8() \
- CcTest::InitializeVM(); \
+#define INIT_V8() CcTest::InitializeVM();
+
+// Declare that a test will use an optional feature, which means execution needs
+// to be behind CAN_RUN().
+#define SETUP_FEATURE(feature) \
+ const bool can_run = CpuFeatures::IsSupported(feature); \
+ USE(can_run); \
+ CpuFeatureScope feature_scope(&masm, feature, \
+ CpuFeatureScope::kDontCheckSupported)
#ifdef USE_SIMULATOR
+// The simulator can always run the code even when IsSupported(f) is false.
+#define CAN_RUN() true
+
// Run tests with the simulator.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
@@ -156,7 +166,8 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() simulator.RunFrom(reinterpret_cast<Instruction*>(code->entry()))
+#define RUN() \
+ simulator.RunFrom(reinterpret_cast<Instruction*>(code->code_entry_point()))
#define END() \
__ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
@@ -171,6 +182,9 @@ static void InitializeVM() {
}
#else // ifdef USE_SIMULATOR.
+
+#define CAN_RUN() can_run
+
// Run the test on real hardware or models.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
@@ -203,7 +217,7 @@ static void InitializeVM() {
{ \
/* Reset the scope and thus make the buffer executable. */ \
rw_buffer_scope.reset(); \
- auto f = GeneratedCode<void>::FromCode(*code); \
+ auto f = GeneratedCode<void>::FromCode(isolate, *code); \
f.Call(); \
}
@@ -12407,7 +12421,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case PushPopByFour:
// Push high-numbered registers first (to the highest addresses).
for (i = reg_count; i >= 4; i -= 4) {
- __ Push<TurboAssembler::kDontStoreLR>(r[i - 1], r[i - 2], r[i - 3],
+ __ Push<MacroAssembler::kDontStoreLR>(r[i - 1], r[i - 2], r[i - 3],
r[i - 4]);
}
// Finish off the leftovers.
@@ -12432,7 +12446,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case PushPopByFour:
// Pop low-numbered registers first (from the lowest addresses).
for (i = 0; i <= (reg_count-4); i += 4) {
- __ Pop<TurboAssembler::kDontLoadLR>(r[i], r[i + 1], r[i + 2],
+ __ Pop<MacroAssembler::kDontLoadLR>(r[i], r[i + 1], r[i + 2],
r[i + 3]);
}
// Finish off the leftovers.
@@ -12974,7 +12988,7 @@ TEST(copy_double_words_downwards_even) {
__ SlotAddress(x5, 12);
__ SlotAddress(x6, 11);
__ Mov(x7, 12);
- __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
+ __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst);
__ Pop(xzr, x4, x5, x6);
__ Pop(x7, x8, x9, x10);
@@ -13028,7 +13042,7 @@ TEST(copy_double_words_downwards_odd) {
__ SlotAddress(x5, 13);
__ SlotAddress(x6, 12);
__ Mov(x7, 13);
- __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
+ __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst);
__ Pop(xzr, x4);
__ Pop(x5, x6, x7, x8);
@@ -13084,13 +13098,13 @@ TEST(copy_noop) {
__ SlotAddress(x5, 3);
__ SlotAddress(x6, 2);
__ Mov(x7, 0);
- __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst);
+ __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst);
// dst < src, count == 0
__ SlotAddress(x5, 2);
__ SlotAddress(x6, 3);
__ Mov(x7, 0);
- __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kDstLessThanSrc);
+ __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kDstLessThanSrc);
__ Pop(x1, x2, x3, x4);
__ Pop(x5, x6, x7, x8);
@@ -14230,6 +14244,871 @@ TEST(barriers) {
RUN();
}
+TEST(cas_casa_casl_casal_w) {
+ uint64_t data1 = 0x0123456789abcdef;
+ uint64_t data2 = 0x0123456789abcdef;
+ uint64_t data3 = 0x0123456789abcdef;
+ uint64_t data4 = 0x0123456789abcdef;
+ uint64_t data5 = 0x0123456789abcdef;
+ uint64_t data6 = 0x0123456789abcdef;
+ uint64_t data7 = 0x0123456789abcdef;
+ uint64_t data8 = 0x0123456789abcdef;
+
+ INIT_V8();
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(&data1) + 0);
+ __ Mov(x22, reinterpret_cast<uintptr_t>(&data2) + 0);
+ __ Mov(x23, reinterpret_cast<uintptr_t>(&data3) + 4);
+ __ Mov(x24, reinterpret_cast<uintptr_t>(&data4) + 4);
+ __ Mov(x25, reinterpret_cast<uintptr_t>(&data5) + 0);
+ __ Mov(x26, reinterpret_cast<uintptr_t>(&data6) + 0);
+ __ Mov(x27, reinterpret_cast<uintptr_t>(&data7) + 4);
+ __ Mov(x28, reinterpret_cast<uintptr_t>(&data8) + 4);
+
+ __ Mov(x0, 0xffffffff);
+
+ __ Mov(x1, 0xfedcba9876543210);
+ __ Mov(x2, 0x0123456789abcdef);
+ __ Mov(x3, 0xfedcba9876543210);
+ __ Mov(x4, 0x89abcdef01234567);
+ __ Mov(x5, 0xfedcba9876543210);
+ __ Mov(x6, 0x0123456789abcdef);
+ __ Mov(x7, 0xfedcba9876543210);
+ __ Mov(x8, 0x89abcdef01234567);
+
+ __ Cas(w1, w0, MemOperand(x21));
+ __ Cas(w2, w0, MemOperand(x22));
+ __ Casa(w3, w0, MemOperand(x23));
+ __ Casa(w4, w0, MemOperand(x24));
+ __ Casl(w5, w0, MemOperand(x25));
+ __ Casl(w6, w0, MemOperand(x26));
+ __ Casal(w7, w0, MemOperand(x27));
+ __ Casal(w8, w0, MemOperand(x28));
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(0x89abcdef, x1);
+ CHECK_EQUAL_64(0x89abcdef, x2);
+ CHECK_EQUAL_64(0x01234567, x3);
+ CHECK_EQUAL_64(0x01234567, x4);
+ CHECK_EQUAL_64(0x89abcdef, x5);
+ CHECK_EQUAL_64(0x89abcdef, x6);
+ CHECK_EQUAL_64(0x01234567, x7);
+ CHECK_EQUAL_64(0x01234567, x8);
+
+ CHECK_EQUAL_64(0x0123456789abcdef, data1);
+ CHECK_EQUAL_64(0x01234567ffffffff, data2);
+ CHECK_EQUAL_64(0x0123456789abcdef, data3);
+ CHECK_EQUAL_64(0xffffffff89abcdef, data4);
+ CHECK_EQUAL_64(0x0123456789abcdef, data5);
+ CHECK_EQUAL_64(0x01234567ffffffff, data6);
+ CHECK_EQUAL_64(0x0123456789abcdef, data7);
+ CHECK_EQUAL_64(0xffffffff89abcdef, data8);
+ }
+}
+
+TEST(cas_casa_casl_casal_x) {
+ uint64_t data1 = 0x0123456789abcdef;
+ uint64_t data2 = 0x0123456789abcdef;
+ uint64_t data3 = 0x0123456789abcdef;
+ uint64_t data4 = 0x0123456789abcdef;
+ uint64_t data5 = 0x0123456789abcdef;
+ uint64_t data6 = 0x0123456789abcdef;
+ uint64_t data7 = 0x0123456789abcdef;
+ uint64_t data8 = 0x0123456789abcdef;
+
+ INIT_V8();
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(&data1));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(&data2));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(&data3));
+ __ Mov(x24, reinterpret_cast<uintptr_t>(&data4));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(&data5));
+ __ Mov(x26, reinterpret_cast<uintptr_t>(&data6));
+ __ Mov(x27, reinterpret_cast<uintptr_t>(&data7));
+ __ Mov(x28, reinterpret_cast<uintptr_t>(&data8));
+
+ __ Mov(x0, 0xffffffffffffffff);
+
+ __ Mov(x1, 0xfedcba9876543210);
+ __ Mov(x2, 0x0123456789abcdef);
+ __ Mov(x3, 0xfedcba9876543210);
+ __ Mov(x4, 0x0123456789abcdef);
+ __ Mov(x5, 0xfedcba9876543210);
+ __ Mov(x6, 0x0123456789abcdef);
+ __ Mov(x7, 0xfedcba9876543210);
+ __ Mov(x8, 0x0123456789abcdef);
+
+ __ Cas(x1, x0, MemOperand(x21));
+ __ Cas(x2, x0, MemOperand(x22));
+ __ Casa(x3, x0, MemOperand(x23));
+ __ Casa(x4, x0, MemOperand(x24));
+ __ Casl(x5, x0, MemOperand(x25));
+ __ Casl(x6, x0, MemOperand(x26));
+ __ Casal(x7, x0, MemOperand(x27));
+ __ Casal(x8, x0, MemOperand(x28));
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(0x0123456789abcdef, x1);
+ CHECK_EQUAL_64(0x0123456789abcdef, x2);
+ CHECK_EQUAL_64(0x0123456789abcdef, x3);
+ CHECK_EQUAL_64(0x0123456789abcdef, x4);
+ CHECK_EQUAL_64(0x0123456789abcdef, x5);
+ CHECK_EQUAL_64(0x0123456789abcdef, x6);
+ CHECK_EQUAL_64(0x0123456789abcdef, x7);
+ CHECK_EQUAL_64(0x0123456789abcdef, x8);
+
+ CHECK_EQUAL_64(0x0123456789abcdef, data1);
+ CHECK_EQUAL_64(0xffffffffffffffff, data2);
+ CHECK_EQUAL_64(0x0123456789abcdef, data3);
+ CHECK_EQUAL_64(0xffffffffffffffff, data4);
+ CHECK_EQUAL_64(0x0123456789abcdef, data5);
+ CHECK_EQUAL_64(0xffffffffffffffff, data6);
+ CHECK_EQUAL_64(0x0123456789abcdef, data7);
+ CHECK_EQUAL_64(0xffffffffffffffff, data8);
+ }
+}
+
+TEST(casb_casab_caslb_casalb) {
+ uint32_t data1 = 0x01234567;
+ uint32_t data2 = 0x01234567;
+ uint32_t data3 = 0x01234567;
+ uint32_t data4 = 0x01234567;
+ uint32_t data5 = 0x01234567;
+ uint32_t data6 = 0x01234567;
+ uint32_t data7 = 0x01234567;
+ uint32_t data8 = 0x01234567;
+
+ INIT_V8();
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(&data1) + 0);
+ __ Mov(x22, reinterpret_cast<uintptr_t>(&data2) + 0);
+ __ Mov(x23, reinterpret_cast<uintptr_t>(&data3) + 1);
+ __ Mov(x24, reinterpret_cast<uintptr_t>(&data4) + 1);
+ __ Mov(x25, reinterpret_cast<uintptr_t>(&data5) + 2);
+ __ Mov(x26, reinterpret_cast<uintptr_t>(&data6) + 2);
+ __ Mov(x27, reinterpret_cast<uintptr_t>(&data7) + 3);
+ __ Mov(x28, reinterpret_cast<uintptr_t>(&data8) + 3);
+
+ __ Mov(x0, 0xff);
+
+ __ Mov(x1, 0x76543210);
+ __ Mov(x2, 0x01234567);
+ __ Mov(x3, 0x76543210);
+ __ Mov(x4, 0x67012345);
+ __ Mov(x5, 0x76543210);
+ __ Mov(x6, 0x45670123);
+ __ Mov(x7, 0x76543210);
+ __ Mov(x8, 0x23456701);
+
+ __ Casb(w1, w0, MemOperand(x21));
+ __ Casb(w2, w0, MemOperand(x22));
+ __ Casab(w3, w0, MemOperand(x23));
+ __ Casab(w4, w0, MemOperand(x24));
+ __ Caslb(w5, w0, MemOperand(x25));
+ __ Caslb(w6, w0, MemOperand(x26));
+ __ Casalb(w7, w0, MemOperand(x27));
+ __ Casalb(w8, w0, MemOperand(x28));
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(0x00000067, x1);
+ CHECK_EQUAL_64(0x00000067, x2);
+ CHECK_EQUAL_64(0x00000045, x3);
+ CHECK_EQUAL_64(0x00000045, x4);
+ CHECK_EQUAL_64(0x00000023, x5);
+ CHECK_EQUAL_64(0x00000023, x6);
+ CHECK_EQUAL_64(0x00000001, x7);
+ CHECK_EQUAL_64(0x00000001, x8);
+
+ CHECK_EQUAL_64(0x01234567, data1);
+ CHECK_EQUAL_64(0x012345ff, data2);
+ CHECK_EQUAL_64(0x01234567, data3);
+ CHECK_EQUAL_64(0x0123ff67, data4);
+ CHECK_EQUAL_64(0x01234567, data5);
+ CHECK_EQUAL_64(0x01ff4567, data6);
+ CHECK_EQUAL_64(0x01234567, data7);
+ CHECK_EQUAL_64(0xff234567, data8);
+ }
+}
+
+TEST(cash_casah_caslh_casalh) {
+ uint64_t data1 = 0x0123456789abcdef;
+ uint64_t data2 = 0x0123456789abcdef;
+ uint64_t data3 = 0x0123456789abcdef;
+ uint64_t data4 = 0x0123456789abcdef;
+ uint64_t data5 = 0x0123456789abcdef;
+ uint64_t data6 = 0x0123456789abcdef;
+ uint64_t data7 = 0x0123456789abcdef;
+ uint64_t data8 = 0x0123456789abcdef;
+
+ INIT_V8();
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(&data1) + 0);
+ __ Mov(x22, reinterpret_cast<uintptr_t>(&data2) + 0);
+ __ Mov(x23, reinterpret_cast<uintptr_t>(&data3) + 2);
+ __ Mov(x24, reinterpret_cast<uintptr_t>(&data4) + 2);
+ __ Mov(x25, reinterpret_cast<uintptr_t>(&data5) + 4);
+ __ Mov(x26, reinterpret_cast<uintptr_t>(&data6) + 4);
+ __ Mov(x27, reinterpret_cast<uintptr_t>(&data7) + 6);
+ __ Mov(x28, reinterpret_cast<uintptr_t>(&data8) + 6);
+
+ __ Mov(x0, 0xffff);
+
+ __ Mov(x1, 0xfedcba9876543210);
+ __ Mov(x2, 0x0123456789abcdef);
+ __ Mov(x3, 0xfedcba9876543210);
+ __ Mov(x4, 0xcdef0123456789ab);
+ __ Mov(x5, 0xfedcba9876543210);
+ __ Mov(x6, 0x89abcdef01234567);
+ __ Mov(x7, 0xfedcba9876543210);
+ __ Mov(x8, 0x456789abcdef0123);
+
+ __ Cash(w1, w0, MemOperand(x21));
+ __ Cash(w2, w0, MemOperand(x22));
+ __ Casah(w3, w0, MemOperand(x23));
+ __ Casah(w4, w0, MemOperand(x24));
+ __ Caslh(w5, w0, MemOperand(x25));
+ __ Caslh(w6, w0, MemOperand(x26));
+ __ Casalh(w7, w0, MemOperand(x27));
+ __ Casalh(w8, w0, MemOperand(x28));
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(0x0000cdef, x1);
+ CHECK_EQUAL_64(0x0000cdef, x2);
+ CHECK_EQUAL_64(0x000089ab, x3);
+ CHECK_EQUAL_64(0x000089ab, x4);
+ CHECK_EQUAL_64(0x00004567, x5);
+ CHECK_EQUAL_64(0x00004567, x6);
+ CHECK_EQUAL_64(0x00000123, x7);
+ CHECK_EQUAL_64(0x00000123, x8);
+
+ CHECK_EQUAL_64(0x0123456789abcdef, data1);
+ CHECK_EQUAL_64(0x0123456789abffff, data2);
+ CHECK_EQUAL_64(0x0123456789abcdef, data3);
+ CHECK_EQUAL_64(0x01234567ffffcdef, data4);
+ CHECK_EQUAL_64(0x0123456789abcdef, data5);
+ CHECK_EQUAL_64(0x0123ffff89abcdef, data6);
+ CHECK_EQUAL_64(0x0123456789abcdef, data7);
+ CHECK_EQUAL_64(0xffff456789abcdef, data8);
+ }
+}
+
+TEST(casp_caspa_caspl_caspal_w) {
+ uint64_t data1[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data2[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data3[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data4[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data5[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data6[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data7[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+ uint64_t data8[] = {0x7766554433221100, 0xffeeddccbbaa9988};
+
+ INIT_V8();
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1) + 0);
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2) + 0);
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3) + 8);
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4) + 8);
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5) + 8);
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6) + 8);
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7) + 0);
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8) + 0);
+
+ __ Mov(x0, 0xfff00fff);
+ __ Mov(x1, 0xfff11fff);
+
+ __ Mov(x2, 0x77665544);
+ __ Mov(x3, 0x33221100);
+ __ Mov(x4, 0x33221100);
+ __ Mov(x5, 0x77665544);
+
+ __ Mov(x6, 0xffeeddcc);
+ __ Mov(x7, 0xbbaa9988);
+ __ Mov(x8, 0xbbaa9988);
+ __ Mov(x9, 0xffeeddcc);
+
+ __ Mov(x10, 0xffeeddcc);
+ __ Mov(x11, 0xbbaa9988);
+ __ Mov(x12, 0xbbaa9988);
+ __ Mov(x13, 0xffeeddcc);
+
+ __ Mov(x14, 0x77665544);
+ __ Mov(x15, 0x33221100);
+ __ Mov(x16, 0x33221100);
+ __ Mov(x17, 0x77665544);
+
+ __ Casp(w2, w3, w0, w1, MemOperand(x21));
+ __ Casp(w4, w5, w0, w1, MemOperand(x22));
+ __ Caspa(w6, w7, w0, w1, MemOperand(x23));
+ __ Caspa(w8, w9, w0, w1, MemOperand(x24));
+ __ Caspl(w10, w11, w0, w1, MemOperand(x25));
+ __ Caspl(w12, w13, w0, w1, MemOperand(x26));
+ __ Caspal(w14, w15, w0, w1, MemOperand(x27));
+ __ Caspal(w16, w17, w0, w1, MemOperand(x28));
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(0x33221100, x2);
+ CHECK_EQUAL_64(0x77665544, x3);
+ CHECK_EQUAL_64(0x33221100, x4);
+ CHECK_EQUAL_64(0x77665544, x5);
+ CHECK_EQUAL_64(0xbbaa9988, x6);
+ CHECK_EQUAL_64(0xffeeddcc, x7);
+ CHECK_EQUAL_64(0xbbaa9988, x8);
+ CHECK_EQUAL_64(0xffeeddcc, x9);
+ CHECK_EQUAL_64(0xbbaa9988, x10);
+ CHECK_EQUAL_64(0xffeeddcc, x11);
+ CHECK_EQUAL_64(0xbbaa9988, x12);
+ CHECK_EQUAL_64(0xffeeddcc, x13);
+ CHECK_EQUAL_64(0x33221100, x14);
+ CHECK_EQUAL_64(0x77665544, x15);
+ CHECK_EQUAL_64(0x33221100, x16);
+ CHECK_EQUAL_64(0x77665544, x17);
+
+ CHECK_EQUAL_64(0x7766554433221100, data1[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data1[1]);
+ CHECK_EQUAL_64(0xfff11ffffff00fff, data2[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data2[1]);
+ CHECK_EQUAL_64(0x7766554433221100, data3[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data3[1]);
+ CHECK_EQUAL_64(0x7766554433221100, data4[0]);
+ CHECK_EQUAL_64(0xfff11ffffff00fff, data4[1]);
+ CHECK_EQUAL_64(0x7766554433221100, data5[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data5[1]);
+ CHECK_EQUAL_64(0x7766554433221100, data6[0]);
+ CHECK_EQUAL_64(0xfff11ffffff00fff, data6[1]);
+ CHECK_EQUAL_64(0x7766554433221100, data7[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data7[1]);
+ CHECK_EQUAL_64(0xfff11ffffff00fff, data8[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data8[1]);
+ }
+}
+
+TEST(casp_caspa_caspl_caspal_x) {
+ alignas(kXRegSize * 2)
+ uint64_t data1[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data2[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data3[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data4[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data5[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data6[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data7[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+ alignas(kXRegSize * 2)
+ uint64_t data8[] = {0x7766554433221100, 0xffeeddccbbaa9988,
+ 0xfedcba9876543210, 0x0123456789abcdef};
+
+ INIT_V8();
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1) + 0);
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2) + 0);
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3) + 16);
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4) + 16);
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5) + 16);
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6) + 16);
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7) + 0);
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8) + 0);
+
+ __ Mov(x0, 0xfffffff00fffffff);
+ __ Mov(x1, 0xfffffff11fffffff);
+
+ __ Mov(x2, 0xffeeddccbbaa9988);
+ __ Mov(x3, 0x7766554433221100);
+ __ Mov(x4, 0x7766554433221100);
+ __ Mov(x5, 0xffeeddccbbaa9988);
+
+ __ Mov(x6, 0x0123456789abcdef);
+ __ Mov(x7, 0xfedcba9876543210);
+ __ Mov(x8, 0xfedcba9876543210);
+ __ Mov(x9, 0x0123456789abcdef);
+
+ __ Mov(x10, 0x0123456789abcdef);
+ __ Mov(x11, 0xfedcba9876543210);
+ __ Mov(x12, 0xfedcba9876543210);
+ __ Mov(x13, 0x0123456789abcdef);
+
+ __ Mov(x14, 0xffeeddccbbaa9988);
+ __ Mov(x15, 0x7766554433221100);
+ __ Mov(x16, 0x7766554433221100);
+ __ Mov(x17, 0xffeeddccbbaa9988);
+
+ __ Casp(x2, x3, x0, x1, MemOperand(x21));
+ __ Casp(x4, x5, x0, x1, MemOperand(x22));
+ __ Caspa(x6, x7, x0, x1, MemOperand(x23));
+ __ Caspa(x8, x9, x0, x1, MemOperand(x24));
+ __ Caspl(x10, x11, x0, x1, MemOperand(x25));
+ __ Caspl(x12, x13, x0, x1, MemOperand(x26));
+ __ Caspal(x14, x15, x0, x1, MemOperand(x27));
+ __ Caspal(x16, x17, x0, x1, MemOperand(x28));
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(0x7766554433221100, x2);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, x3);
+ CHECK_EQUAL_64(0x7766554433221100, x4);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, x5);
+
+ CHECK_EQUAL_64(0xfedcba9876543210, x6);
+ CHECK_EQUAL_64(0x0123456789abcdef, x7);
+ CHECK_EQUAL_64(0xfedcba9876543210, x8);
+ CHECK_EQUAL_64(0x0123456789abcdef, x9);
+
+ CHECK_EQUAL_64(0xfedcba9876543210, x10);
+ CHECK_EQUAL_64(0x0123456789abcdef, x11);
+ CHECK_EQUAL_64(0xfedcba9876543210, x12);
+ CHECK_EQUAL_64(0x0123456789abcdef, x13);
+
+ CHECK_EQUAL_64(0x7766554433221100, x14);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, x15);
+ CHECK_EQUAL_64(0x7766554433221100, x16);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, x17);
+
+ CHECK_EQUAL_64(0x7766554433221100, data1[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data1[1]);
+ CHECK_EQUAL_64(0xfedcba9876543210, data1[2]);
+ CHECK_EQUAL_64(0x0123456789abcdef, data1[3]);
+
+ CHECK_EQUAL_64(0xfffffff00fffffff, data2[0]);
+ CHECK_EQUAL_64(0xfffffff11fffffff, data2[1]);
+ CHECK_EQUAL_64(0xfedcba9876543210, data2[2]);
+ CHECK_EQUAL_64(0x0123456789abcdef, data2[3]);
+
+ CHECK_EQUAL_64(0x7766554433221100, data3[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data3[1]);
+ CHECK_EQUAL_64(0xfedcba9876543210, data3[2]);
+ CHECK_EQUAL_64(0x0123456789abcdef, data3[3]);
+
+ CHECK_EQUAL_64(0x7766554433221100, data4[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data4[1]);
+ CHECK_EQUAL_64(0xfffffff00fffffff, data4[2]);
+ CHECK_EQUAL_64(0xfffffff11fffffff, data4[3]);
+
+ CHECK_EQUAL_64(0x7766554433221100, data5[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data5[1]);
+ CHECK_EQUAL_64(0xfedcba9876543210, data5[2]);
+ CHECK_EQUAL_64(0x0123456789abcdef, data5[3]);
+
+ CHECK_EQUAL_64(0x7766554433221100, data6[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data6[1]);
+ CHECK_EQUAL_64(0xfffffff00fffffff, data6[2]);
+ CHECK_EQUAL_64(0xfffffff11fffffff, data6[3]);
+
+ CHECK_EQUAL_64(0x7766554433221100, data7[0]);
+ CHECK_EQUAL_64(0xffeeddccbbaa9988, data7[1]);
+ CHECK_EQUAL_64(0xfedcba9876543210, data7[2]);
+ CHECK_EQUAL_64(0x0123456789abcdef, data7[3]);
+
+ CHECK_EQUAL_64(0xfffffff00fffffff, data8[0]);
+ CHECK_EQUAL_64(0xfffffff11fffffff, data8[1]);
+ CHECK_EQUAL_64(0xfedcba9876543210, data8[2]);
+ CHECK_EQUAL_64(0x0123456789abcdef, data8[3]);
+ }
+}
+
+typedef void (MacroAssembler::*AtomicMemoryLoadSignature)(
+ const Register& rs, const Register& rt, const MemOperand& src);
+typedef void (MacroAssembler::*AtomicMemoryStoreSignature)(
+ const Register& rs, const MemOperand& src);
+
+static void AtomicMemoryWHelper(AtomicMemoryLoadSignature* load_funcs,
+ AtomicMemoryStoreSignature* store_funcs,
+ uint64_t arg1, uint64_t arg2, uint64_t expected,
+ uint64_t result_mask) {
+ uint64_t data0[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data1[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data2[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data3[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data4[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data5[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x20, reinterpret_cast<uintptr_t>(data0));
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3));
+
+ __ Mov(x0, arg1);
+ __ Mov(x1, arg1);
+ __ Mov(x2, arg1);
+ __ Mov(x3, arg1);
+
+ (masm.*(load_funcs[0]))(w0, w10, MemOperand(x20));
+ (masm.*(load_funcs[1]))(w1, w11, MemOperand(x21));
+ (masm.*(load_funcs[2]))(w2, w12, MemOperand(x22));
+ (masm.*(load_funcs[3]))(w3, w13, MemOperand(x23));
+
+ if (store_funcs != NULL) {
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5));
+ __ Mov(x4, arg1);
+ __ Mov(x5, arg1);
+
+ (masm.*(store_funcs[0]))(w4, MemOperand(x24));
+ (masm.*(store_funcs[1]))(w5, MemOperand(x25));
+ }
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ uint64_t stored_value = arg2 & result_mask;
+ CHECK_EQUAL_64(stored_value, x10);
+ CHECK_EQUAL_64(stored_value, x11);
+ CHECK_EQUAL_64(stored_value, x12);
+ CHECK_EQUAL_64(stored_value, x13);
+
+ // The data fields contain arg2 already then only the bits masked by
+ // result_mask are overwritten.
+ uint64_t final_expected = (arg2 & ~result_mask) | (expected & result_mask);
+ CHECK_EQUAL_64(final_expected, data0[0]);
+ CHECK_EQUAL_64(final_expected, data1[0]);
+ CHECK_EQUAL_64(final_expected, data2[0]);
+ CHECK_EQUAL_64(final_expected, data3[0]);
+
+ if (store_funcs != NULL) {
+ CHECK_EQUAL_64(final_expected, data4[0]);
+ CHECK_EQUAL_64(final_expected, data5[0]);
+ }
+ }
+}
+
+static void AtomicMemoryXHelper(AtomicMemoryLoadSignature* load_funcs,
+ AtomicMemoryStoreSignature* store_funcs,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t expected) {
+ uint64_t data0[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data1[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data2[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data3[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data4[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+ uint64_t data5[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+
+ SETUP();
+ SETUP_FEATURE(LSE);
+
+ START();
+
+ __ Mov(x20, reinterpret_cast<uintptr_t>(data0));
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3));
+
+ __ Mov(x0, arg1);
+ __ Mov(x1, arg1);
+ __ Mov(x2, arg1);
+ __ Mov(x3, arg1);
+
+ (masm.*(load_funcs[0]))(x0, x10, MemOperand(x20));
+ (masm.*(load_funcs[1]))(x1, x11, MemOperand(x21));
+ (masm.*(load_funcs[2]))(x2, x12, MemOperand(x22));
+ (masm.*(load_funcs[3]))(x3, x13, MemOperand(x23));
+
+ if (store_funcs != NULL) {
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5));
+ __ Mov(x4, arg1);
+ __ Mov(x5, arg1);
+
+ (masm.*(store_funcs[0]))(x4, MemOperand(x24));
+ (masm.*(store_funcs[1]))(x5, MemOperand(x25));
+ }
+
+ END();
+
+ if (CAN_RUN()) {
+ RUN();
+
+ CHECK_EQUAL_64(arg2, x10);
+ CHECK_EQUAL_64(arg2, x11);
+ CHECK_EQUAL_64(arg2, x12);
+ CHECK_EQUAL_64(arg2, x13);
+
+ CHECK_EQUAL_64(expected, data0[0]);
+ CHECK_EQUAL_64(expected, data1[0]);
+ CHECK_EQUAL_64(expected, data2[0]);
+ CHECK_EQUAL_64(expected, data3[0]);
+
+ if (store_funcs != NULL) {
+ CHECK_EQUAL_64(expected, data4[0]);
+ CHECK_EQUAL_64(expected, data5[0]);
+ }
+ }
+}
+
+// clang-format off
+#define MAKE_LOADS(NAME) \
+ {&MacroAssembler::Ld##NAME, \
+ &MacroAssembler::Ld##NAME##a, \
+ &MacroAssembler::Ld##NAME##l, \
+ &MacroAssembler::Ld##NAME##al}
+#define MAKE_STORES(NAME) \
+ {&MacroAssembler::St##NAME, &MacroAssembler::St##NAME##l}
+
+#define MAKE_B_LOADS(NAME) \
+ {&MacroAssembler::Ld##NAME##b, \
+ &MacroAssembler::Ld##NAME##ab, \
+ &MacroAssembler::Ld##NAME##lb, \
+ &MacroAssembler::Ld##NAME##alb}
+#define MAKE_B_STORES(NAME) \
+ {&MacroAssembler::St##NAME##b, &MacroAssembler::St##NAME##lb}
+
+#define MAKE_H_LOADS(NAME) \
+ {&MacroAssembler::Ld##NAME##h, \
+ &MacroAssembler::Ld##NAME##ah, \
+ &MacroAssembler::Ld##NAME##lh, \
+ &MacroAssembler::Ld##NAME##alh}
+#define MAKE_H_STORES(NAME) \
+ {&MacroAssembler::St##NAME##h, &MacroAssembler::St##NAME##lh}
+// clang-format on
+
+TEST(atomic_memory_add) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(add);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(add);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(add);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(add);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(add);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(add);
+
+ // The arguments are chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t arg1 = 0x0100001000100101;
+ uint64_t arg2 = 0x0200002000200202;
+ uint64_t expected = arg1 + arg2;
+
+ INIT_V8();
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_clr) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(clr);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(clr);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(clr);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(clr);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(clr);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(clr);
+
+ uint64_t arg1 = 0x0300003000300303;
+ uint64_t arg2 = 0x0500005000500505;
+ uint64_t expected = arg2 & ~arg1;
+
+ INIT_V8();
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_eor) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(eor);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(eor);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(eor);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(eor);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(eor);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(eor);
+
+ uint64_t arg1 = 0x0300003000300303;
+ uint64_t arg2 = 0x0500005000500505;
+ uint64_t expected = arg1 ^ arg2;
+
+ INIT_V8();
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_set) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(set);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(set);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(set);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(set);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(set);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(set);
+
+ uint64_t arg1 = 0x0300003000300303;
+ uint64_t arg2 = 0x0500005000500505;
+ uint64_t expected = arg1 | arg2;
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_smax) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(smax);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(smax);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(smax);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(smax);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(smax);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(smax);
+
+ uint64_t arg1 = 0x8100000080108181;
+ uint64_t arg2 = 0x0100001000100101;
+ uint64_t expected = 0x0100001000100101;
+
+ INIT_V8();
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_smin) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(smin);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(smin);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(smin);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(smin);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(smin);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(smin);
+
+ uint64_t arg1 = 0x8100000080108181;
+ uint64_t arg2 = 0x0100001000100101;
+ uint64_t expected = 0x8100000080108181;
+
+ INIT_V8();
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_umax) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(umax);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(umax);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(umax);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(umax);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(umax);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(umax);
+
+ uint64_t arg1 = 0x8100000080108181;
+ uint64_t arg2 = 0x0100001000100101;
+ uint64_t expected = 0x8100000080108181;
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_umin) {
+ AtomicMemoryLoadSignature loads[] = MAKE_LOADS(umin);
+ AtomicMemoryStoreSignature stores[] = MAKE_STORES(umin);
+ AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(umin);
+ AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(umin);
+ AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(umin);
+ AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(umin);
+
+ uint64_t arg1 = 0x8100000080108181;
+ uint64_t arg2 = 0x0100001000100101;
+ uint64_t expected = 0x0100001000100101;
+
+ INIT_V8();
+
+ AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
+}
+
+TEST(atomic_memory_swp) {
+ AtomicMemoryLoadSignature loads[] = {
+ &MacroAssembler::Swp, &MacroAssembler::Swpa, &MacroAssembler::Swpl,
+ &MacroAssembler::Swpal};
+ AtomicMemoryLoadSignature b_loads[] = {
+ &MacroAssembler::Swpb, &MacroAssembler::Swpab, &MacroAssembler::Swplb,
+ &MacroAssembler::Swpalb};
+ AtomicMemoryLoadSignature h_loads[] = {
+ &MacroAssembler::Swph, &MacroAssembler::Swpah, &MacroAssembler::Swplh,
+ &MacroAssembler::Swpalh};
+
+ uint64_t arg1 = 0x0100001000100101;
+ uint64_t arg2 = 0x0200002000200202;
+ uint64_t expected = 0x0100001000100101;
+
+ INIT_V8();
+
+ // SWP functions have equivalent signatures to the Atomic Memory LD functions
+ // so we can use the same helper but without the ST aliases.
+ AtomicMemoryWHelper(b_loads, NULL, arg1, arg2, expected, kByteMask);
+ AtomicMemoryWHelper(h_loads, NULL, arg1, arg2, expected, kHalfWordMask);
+ AtomicMemoryWHelper(loads, NULL, arg1, arg2, expected, kWordMask);
+ AtomicMemoryXHelper(loads, NULL, arg1, arg2, expected);
+}
+
TEST(process_nan_double) {
INIT_V8();
// Make sure that NaN propagation works correctly.
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 644b2f51d6..3996b71cec 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -68,7 +68,7 @@ TEST(AssemblerIa320) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
auto res = f.Call(3, 4);
::printf("f() = %d\n", res);
CHECK_EQ(7, res);
@@ -106,7 +106,7 @@ TEST(AssemblerIa321) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int res = f.Call(100);
::printf("f() = %d\n", res);
CHECK_EQ(5050, res);
@@ -147,7 +147,7 @@ TEST(AssemblerIa322) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int res = f.Call(10);
::printf("f() = %d\n", res);
CHECK_EQ(3628800, res);
@@ -176,7 +176,7 @@ TEST(AssemblerIa323) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
int res = f.Call(-3.1415f);
::printf("f() = %d\n", res);
CHECK_EQ(-3, res);
@@ -205,7 +205,7 @@ TEST(AssemblerIa324) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F4>::FromCode(*code);
+ auto f = GeneratedCode<F4>::FromCode(isolate, *code);
int res = f.Call(2.718281828);
::printf("f() = %d\n", res);
CHECK_EQ(2, res);
@@ -229,7 +229,7 @@ TEST(AssemblerIa325) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
int res = f.Call();
CHECK_EQ(42, res);
}
@@ -266,7 +266,7 @@ TEST(AssemblerIa326) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F5>::FromCode(*code);
+ auto f = GeneratedCode<F5>::FromCode(isolate, *code);
double res = f.Call(2.2, 1.1);
::printf("f() = %f\n", res);
CHECK(2.29 < res && res < 2.31);
@@ -298,7 +298,7 @@ TEST(AssemblerIa328) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F6>::FromCode(*code);
+ auto f = GeneratedCode<F6>::FromCode(isolate, *code);
double res = f.Call(12);
::printf("f() = %f\n", res);
@@ -379,7 +379,7 @@ TEST(AssemblerMultiByteNop) {
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
CHECK(code->IsCode());
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
int res = f.Call();
CHECK_EQ(42, res);
}
@@ -429,7 +429,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
int res = f.Call();
args.GetReturnValue().Set(v8::Integer::New(CcTest::isolate(), res));
}
@@ -498,7 +498,7 @@ TEST(AssemblerIa32Extractps) {
code->Print(os);
#endif
- auto f = GeneratedCode<F4>::FromCode(*code);
+ auto f = GeneratedCode<F4>::FromCode(isolate, *code);
uint64_t value1 = 0x1234'5678'8765'4321;
CHECK_EQ(0x12345678, f.Call(base::uint64_to_double(value1)));
uint64_t value2 = 0x8765'4321'1234'5678;
@@ -538,7 +538,7 @@ TEST(AssemblerIa32SSE) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
CHECK_EQ(2, f.Call(1.0, 2.0));
}
@@ -571,7 +571,7 @@ TEST(AssemblerIa32SSE3) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
CHECK_EQ(4, f.Call(1.0, 2.0));
}
@@ -799,7 +799,7 @@ TEST(AssemblerX64FMA_sd) {
code->Print(os);
#endif
- auto f = GeneratedCode<F9>::FromCode(*code);
+ auto f = GeneratedCode<F9>::FromCode(isolate, *code);
CHECK_EQ(
0, f.Call(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
}
@@ -1028,7 +1028,7 @@ TEST(AssemblerX64FMA_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F10>::FromCode(*code);
+ auto f = GeneratedCode<F10>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call(9.26621069e-05f, -2.4607749f, -1.09587872f));
}
@@ -1136,7 +1136,7 @@ TEST(AssemblerIa32BMI1) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -1184,7 +1184,7 @@ TEST(AssemblerIa32LZCNT) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -1232,7 +1232,7 @@ TEST(AssemblerIa32POPCNT) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -1378,7 +1378,7 @@ TEST(AssemblerIa32BMI2) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -1421,7 +1421,7 @@ TEST(AssemblerIa32JumpTables1) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int res = f.Call(i);
::printf("f(%d) = %d\n", i, res);
@@ -1469,7 +1469,7 @@ TEST(AssemblerIa32JumpTables2) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int res = f.Call(i);
::printf("f(%d) = %d\n", i, res);
@@ -1514,7 +1514,7 @@ TEST(Regress621926) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(1, f.Call());
}
diff --git a/deps/v8/test/cctest/test-assembler-loong64.cc b/deps/v8/test/cctest/test-assembler-loong64.cc
index 0bc18b5fd2..142b746da3 100644
--- a/deps/v8/test/cctest/test-assembler-loong64.cc
+++ b/deps/v8/test/cctest/test-assembler-loong64.cc
@@ -66,7 +66,7 @@ TEST(LA0) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0, 0, 0, 0));
CHECK_EQ(0xABCL, res);
}
@@ -100,7 +100,7 @@ TEST(LA1) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275L, res);
}
@@ -146,7 +146,7 @@ TEST(LA2) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -275,7 +275,7 @@ TEST(LA3) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -395,7 +395,7 @@ TEST(LA4) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -510,7 +510,7 @@ TEST(LA5) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -608,7 +608,7 @@ TEST(LA6) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.si1 = 0x11223344;
t.si2 = 0x99AABBCC;
t.si3 = 0x1122334455667788;
@@ -741,7 +741,7 @@ TEST(LA7) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.si1 = 0x11223344;
t.si2 = 0x99AABBCC;
t.si3 = 0x1122334455667788;
@@ -796,7 +796,7 @@ TEST(LDPTR_STPTR) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test[0] = 0x11223344;
test[1] = 0x99AABBCC;
test[2] = 0x1122334455667788;
@@ -962,7 +962,7 @@ TEST(LA8) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
@@ -1160,7 +1160,7 @@ TEST(LA9) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.input = 0x51F4B764A26E7412;
f.Call(&t, 0, 0, 0, 0);
@@ -1325,7 +1325,7 @@ TEST(LA10) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.si1 = 0x51F4B764A26E7412;
t.si2 = 0x81F25A87C423B891;
f.Call(&t, 0, 0, 0, 0);
@@ -1513,7 +1513,7 @@ TEST(LA11) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.si1 = 0x10C021098B710CDE;
t.si2 = 0xFB8017FF781A15C3;
f.Call(&t, 0, 0, 0, 0);
@@ -1593,7 +1593,7 @@ uint64_t run_beq(int64_t value1, int64_t value2, int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
return res;
@@ -1667,7 +1667,7 @@ uint64_t run_bne(int64_t value1, int64_t value2, int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
return res;
@@ -1741,7 +1741,7 @@ uint64_t run_blt(int64_t value1, int64_t value2, int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
return res;
@@ -1816,7 +1816,7 @@ uint64_t run_bge(uint64_t value1, uint64_t value2, int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
return res;
@@ -1890,7 +1890,7 @@ uint64_t run_bltu(int64_t value1, int64_t value2, int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
return res;
@@ -1964,7 +1964,7 @@ uint64_t run_bgeu(int64_t value1, int64_t value2, int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
return res;
@@ -2038,7 +2038,7 @@ uint64_t run_beqz(int64_t value, int32_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value, 0, 0, 0, 0));
return res;
@@ -2112,7 +2112,7 @@ uint64_t run_bnez_b(int64_t value, int32_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value, 0, 0, 0, 0));
return res;
@@ -2187,7 +2187,7 @@ uint64_t run_bl(int32_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
@@ -2326,7 +2326,7 @@ TEST(PCADD) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -2376,7 +2376,7 @@ uint64_t run_jirl(int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
@@ -2495,7 +2495,7 @@ TEST(LA12) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
// Double test values.
t.a = 1.5e14;
t.b = -2.75e11;
@@ -2607,7 +2607,7 @@ TEST(LA13) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
// Float test values.
t.a = 1.5e6;
t.b = -2.75e4;
@@ -2868,7 +2868,7 @@ TEST(FCMP_COND) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.dTrue = 1234.0;
test.dFalse = 0.0;
test.fTrue = 12.0;
@@ -3060,7 +3060,7 @@ TEST(FCVT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.fcsr = kRoundToNearest;
test.fcvt_d_s_in = -0.51;
@@ -3136,7 +3136,7 @@ TEST(FFINT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.fcsr = kRoundToNearest;
test.ffint_s_w_in = -1;
@@ -3273,7 +3273,7 @@ TEST(FTINT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
@@ -3345,7 +3345,7 @@ TEST(FTINTRM) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_d[i];
test.b = inputs_s[i];
@@ -3414,7 +3414,7 @@ TEST(FTINTRP) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_d[i];
test.b = inputs_s[i];
@@ -3483,7 +3483,7 @@ TEST(FTINTRZ) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_d[i];
test.b = inputs_s[i];
@@ -3552,7 +3552,7 @@ TEST(FTINTRNE) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_d[i];
test.b = inputs_s[i];
@@ -3758,7 +3758,7 @@ TEST(FRINT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
@@ -3824,7 +3824,7 @@ TEST(FMOV) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -3878,7 +3878,7 @@ TEST(LA14) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -3951,7 +3951,7 @@ uint64_t run_bceqz(int fcc_value, int32_t offset) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
@@ -4032,7 +4032,7 @@ uint64_t run_bcnez(int fcc_value, int32_t offset) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
return res;
@@ -4118,7 +4118,7 @@ TEST(jump_tables1) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -4181,7 +4181,7 @@ TEST(jump_tables2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -4251,7 +4251,7 @@ TEST(jump_tables3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
Handle<Object> result(
Object(reinterpret_cast<Address>(f.Call(i, 0, 0, 0, 0))), isolate);
@@ -4286,7 +4286,7 @@ uint64_t run_li_macro(int64_t imm, LiFlags mode, int32_t num_instr = 0) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -4427,7 +4427,7 @@ TEST(FMIN_FMAX) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -4513,7 +4513,7 @@ TEST(FMINA_FMAXA) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -4566,7 +4566,7 @@ TEST(FADD) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.a = 2.0;
test.b = 3.0;
test.d = 2.0;
@@ -4659,7 +4659,7 @@ TEST(FSUB) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
@@ -4717,7 +4717,7 @@ TEST(FMUL) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
@@ -4765,7 +4765,7 @@ TEST(FDIV) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&test, 0, 0, 0, 0);
const int test_size = 3;
// clang-format off
@@ -4857,7 +4857,7 @@ TEST(FABS) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.a = -2.0;
test.b = -2.0;
f.Call(&test, 0, 0, 0, 0);
@@ -4972,7 +4972,7 @@ void helper_fmadd_fmsub_fnmadd_fnmsub(F func) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
TestCaseMaddMsub<T> tc;
@@ -5084,8 +5084,9 @@ TEST(FSQRT_FRSQRT_FRECIP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc,
-CodeKind::STUB).Build(); auto f = GeneratedCode<F3>::FromCode(*code);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
float f1;
@@ -5140,7 +5141,7 @@ TEST(LA15) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
f.Call(1, 1, 0, 0, 0);
}
@@ -5168,7 +5169,7 @@ TEST(Trampoline) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(42, 42, 0, 0, 0));
CHECK_EQ(0, res);
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 6e57ca3308..fecd5779b2 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -65,7 +65,7 @@ TEST(MIPS0) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0xABCL, res);
}
@@ -100,7 +100,7 @@ TEST(MIPS1) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275L, res);
}
@@ -245,7 +245,7 @@ TEST(MIPS2) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -346,7 +346,7 @@ TEST(MIPS3) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
// Double test values.
t.a = 1.5e14;
t.b = 2.75e11;
@@ -438,7 +438,7 @@ TEST(MIPS4) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.a = 1.5e22;
t.b = 2.75e11;
t.c = 17.17;
@@ -502,7 +502,7 @@ TEST(MIPS5) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.a = 1.5e4;
t.b = 2.75e8;
t.i = 12345678;
@@ -570,7 +570,7 @@ TEST(MIPS6) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
f.Call(&t, 0, 0, 0, 0);
@@ -656,7 +656,7 @@ TEST(MIPS7) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.a = 1.5e14;
t.b = 2.75e11;
t.c = 2.0;
@@ -752,7 +752,7 @@ TEST(MIPS8) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
@@ -873,7 +873,7 @@ TEST(MIPS10) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.a = 2.147483647e9; // 0x7FFFFFFF -> 0x41DFFFFFFFC00000 as double.
t.b_long_hi = 0x000000FF; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
t.b_long_lo = 0x00FF00FF;
@@ -1007,7 +1007,7 @@ TEST(MIPS11) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
@@ -1131,7 +1131,7 @@ TEST(MIPS12) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.x = 1;
t.y = 2;
t.y1 = 3;
@@ -1184,7 +1184,7 @@ TEST(MIPS13) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.cvt_big_in = 0xFFFFFFFF;
t.cvt_small_in = 333;
@@ -1304,7 +1304,7 @@ TEST(MIPS14) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.round_up_in = 123.51;
t.round_down_in = 123.49;
@@ -1432,7 +1432,7 @@ TEST(MIPS16) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.ui = 0x44332211;
t.si = 0x99AABBCC;
t.r1 = 0x5555555555555555;
@@ -1559,7 +1559,7 @@ TEST(seleqz_selnez) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&test, 0, 0, 0, 0);
@@ -1674,7 +1674,7 @@ TEST(min_max) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -1782,7 +1782,7 @@ TEST(rint_d) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -1829,7 +1829,7 @@ TEST(sel) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
const int test_size = 3;
const int input_size = 5;
@@ -1961,7 +1961,7 @@ TEST(rint_s) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2046,7 +2046,7 @@ TEST(mina_maxa) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -2127,7 +2127,7 @@ TEST(trunc_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -2207,7 +2207,7 @@ TEST(movz_movn) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
@@ -2307,7 +2307,7 @@ TEST(movt_movd) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&test, 0, 0, 0, 0);
CHECK_EQ(test.dstf, outputs_S[i]);
@@ -2393,7 +2393,7 @@ TEST(cvt_w_d) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
@@ -2460,7 +2460,7 @@ TEST(trunc_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -2529,7 +2529,7 @@ TEST(round_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -2599,7 +2599,7 @@ TEST(round_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -2671,7 +2671,7 @@ TEST(sub) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
@@ -2743,7 +2743,7 @@ TEST(sqrt_rsqrt_recip) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
float f1;
@@ -2821,7 +2821,7 @@ TEST(neg) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
@@ -2879,7 +2879,7 @@ TEST(mul) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
@@ -2934,7 +2934,7 @@ TEST(mov) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
@@ -3001,7 +3001,7 @@ TEST(floor_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -3071,7 +3071,7 @@ TEST(floor_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -3142,7 +3142,7 @@ TEST(ceil_w) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -3212,7 +3212,7 @@ TEST(ceil_l) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
@@ -3282,7 +3282,7 @@ TEST(jump_tables1) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -3347,7 +3347,7 @@ TEST(jump_tables2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -3422,7 +3422,7 @@ TEST(jump_tables3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
Handle<Object> result(
Object(reinterpret_cast<Address>(f.Call(i, 0, 0, 0, 0))), isolate);
@@ -3492,7 +3492,7 @@ TEST(BITSWAP) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.r1 = 0x00102100781A15C3;
t.r2 = 0x001021008B71FCDE;
t.r3 = 0xFF8017FF781A15C3;
@@ -3634,7 +3634,7 @@ TEST(class_fmt) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
// Double test values.
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
@@ -3727,7 +3727,7 @@ TEST(ABS) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.a = -2.0;
test.b = -2.0;
f.Call(&test, 0, 0, 0, 0);
@@ -3820,7 +3820,7 @@ TEST(ADD_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.a = 2.0;
test.b = 3.0;
test.fa = 2.0;
@@ -3974,7 +3974,7 @@ TEST(C_COND_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
test.fOp1 = 2.0;
@@ -4174,7 +4174,7 @@ TEST(CMP_COND_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
uint32_t fTrue = 0xFFFFFFFF;
@@ -4352,7 +4352,7 @@ TEST(CVT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
test.cvt_d_s_in = -0.51;
test.cvt_d_w_in = -1;
@@ -4523,7 +4523,7 @@ TEST(DIV_FMT) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&test, 0, 0, 0, 0);
@@ -4615,7 +4615,7 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F4>::FromCode(*code);
+ auto f = GeneratedCode<F4>::FromCode(isolate, *code);
uint64_t res =
reinterpret_cast<uint64_t>(f.Call(rs_value, rt_value, 0, 0, 0));
@@ -4670,7 +4670,7 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F4>::FromCode(*code);
+ auto f = GeneratedCode<F4>::FromCode(isolate, *code);
uint64_t res =
reinterpret_cast<uint64_t>(f.Call(rs_value, rt_value, 0, 0, 0));
@@ -4730,8 +4730,8 @@ uint64_t run_aluipc(int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
- PC = (uint64_t)code->entry(); // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
+ PC = (uint64_t)code->code_entry_point(); // Set the program counter.
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -4783,8 +4783,8 @@ uint64_t run_auipc(int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
- PC = (uint64_t)code->entry(); // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
+ PC = (uint64_t)code->code_entry_point(); // Set the program counter.
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -4837,7 +4837,7 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -4861,7 +4861,7 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -4885,7 +4885,7 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -4909,7 +4909,7 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5014,7 +5014,7 @@ uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5224,7 +5224,7 @@ uint64_t run_lwpc(int offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5301,7 +5301,7 @@ uint64_t run_lwupc(int offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5381,7 +5381,7 @@ uint64_t run_jic(int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5452,7 +5452,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(value, 0, 0, 0, 0));
@@ -5560,7 +5560,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
if (branched) {
@@ -5728,7 +5728,7 @@ uint64_t run_jialc(int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5779,8 +5779,8 @@ uint64_t run_addiupc(int32_t imm19) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
- PC = (uint64_t)code->entry(); // Set the program counter.
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
+ PC = (uint64_t)code->code_entry_point(); // Set the program counter.
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5854,7 +5854,7 @@ uint64_t run_ldpc(int offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -5942,7 +5942,7 @@ int64_t run_bc(int32_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6023,7 +6023,7 @@ int64_t run_balc(int32_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6072,7 +6072,7 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F4>::FromCode(*code);
+ auto f = GeneratedCode<F4>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt_value, 0, 0, 0, 0));
@@ -6129,7 +6129,7 @@ uint64_t run_bal(int16_t offset) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6182,7 +6182,7 @@ TEST(Trampoline) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(42, 42, 0, 0, 0));
CHECK_EQ(0, res);
@@ -6195,11 +6195,11 @@ TEST(Trampoline_with_massive_unbound_labels) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumSlots =
- TurboAssembler::kMaxBranchOffset / TurboAssembler::kTrampolineSlotsSize;
+ MacroAssembler::kMaxBranchOffset / MacroAssembler::kTrampolineSlotsSize;
Label labels[kNumSlots];
{
- TurboAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm);
+ MacroAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm);
for (int i = 0; i < kNumSlots; i++) {
__ Branch(&labels[i]);
}
@@ -6218,12 +6218,12 @@ TEST(Call_with_trampoline) {
int next_buffer_check_ = v8_flags.force_long_branches
? kMaxInt
- : TurboAssembler::kMaxBranchOffset -
- TurboAssembler::kTrampolineSlotsSize * 16;
+ : MacroAssembler::kMaxBranchOffset -
+ MacroAssembler::kTrampolineSlotsSize * 16;
Label done;
__ Branch(&done);
- next_buffer_check_ -= TurboAssembler::kTrampolineSlotsSize;
+ next_buffer_check_ -= MacroAssembler::kTrampolineSlotsSize;
int num_nops = (next_buffer_check_ - __ pc_offset()) / kInstrSize - 1;
for (int i = 0; i < num_nops; i++) {
@@ -6309,7 +6309,7 @@ void helper_madd_msub_maddf_msubf(F func) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
TestCaseMaddMsub<T> tc;
@@ -6395,7 +6395,7 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6478,7 +6478,7 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6571,7 +6571,7 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6631,7 +6631,7 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6699,7 +6699,7 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6785,7 +6785,7 @@ TEST(MSA_fill_copy) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&t, 0, 0, 0, 0);
@@ -6848,7 +6848,7 @@ TEST(MSA_fill_copy_2) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F5>::FromCode(*code);
+ auto f = GeneratedCode<F5>::FromCode(isolate, *code);
f.Call(&t[0], &t[1], 0, 0, 0);
@@ -6901,7 +6901,7 @@ TEST(MSA_fill_copy_3) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F5>::FromCode(*code);
+ auto f = GeneratedCode<F5>::FromCode(isolate, *code);
f.Call(&t[0], &t[1], 0, 0, 0);
@@ -6950,7 +6950,7 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(w, 0, 0, 0, 0);
}
@@ -7060,7 +7060,7 @@ void run_msa_ctc_cfc(uint64_t value) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
uint64_t res;
f.Call(&res, 0, 0, 0, 0);
@@ -7110,7 +7110,7 @@ TEST(MSA_move_v) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&t[i].wd_lo, 0, 0, 0, 0);
CHECK_EQ(t[i].ws_lo, t[i].wd_lo);
CHECK_EQ(t[i].ws_hi, t[i].wd_hi);
@@ -7155,7 +7155,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res[0], 0, 0, 0, 0);
GenerateExpectedResult(reinterpret_cast<uint8_t*>(&t[i].ws_lo),
reinterpret_cast<uint8_t*>(&t[i].wd_lo));
@@ -7315,7 +7315,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -7519,7 +7519,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -7945,7 +7945,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -8995,7 +8995,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -9083,7 +9083,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -9556,7 +9556,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -9634,7 +9634,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F5>::FromCode(*code);
+ auto f = GeneratedCode<F5>::FromCode(isolate, *code);
f.Call(in_array_middle, out_array_middle, 0, 0, 0);
@@ -9713,7 +9713,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
@@ -10720,7 +10720,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&res, 0, 0, 0, 0);
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 0ee531a8bc..b1c4a6b90a 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -65,7 +65,7 @@ TEST(0) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(7, static_cast<int>(res));
@@ -101,7 +101,7 @@ TEST(1) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(100, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(5050, static_cast<int>(res));
@@ -149,7 +149,7 @@ TEST(2) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(10, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(3628800, static_cast<int>(res));
@@ -218,7 +218,7 @@ TEST(3) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.i = 100000;
t.c = 10;
t.s = 1000;
@@ -333,7 +333,7 @@ TEST(4) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
@@ -393,7 +393,7 @@ TEST(5) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
@@ -428,7 +428,7 @@ TEST(6) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
@@ -503,7 +503,7 @@ static void TestRoundingMode(VCVTTypes types,
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
int res = reinterpret_cast<int>(f.Call(0, 0, 0, 0, 0));
::printf("res = %d\n", res);
CHECK_EQ(expected, res);
@@ -690,7 +690,7 @@ TEST(8) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto fn = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -805,7 +805,7 @@ TEST(9) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto fn = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -916,7 +916,7 @@ TEST(10) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto fn = GeneratedCode<F_ppiii>::FromCode(*code);
+ auto fn = GeneratedCode<F_ppiii>::FromCode(isolate, *code);
d.a = 1.1;
d.b = 2.2;
d.c = 3.3;
@@ -1013,7 +1013,7 @@ TEST(11) {
#ifdef DEBUG
Code::cast(code)->Print();
#endif
- auto f = GeneratedCode<F_piiii>::FromCode(*code);
+ auto f = GeneratedCode<F_piiii>::FromCode(isolate, *code);
f.Call(&i, 0, 0, 0, 0);
CHECK_EQ(0xABCD0001, i.a);
@@ -1077,7 +1077,7 @@ TEST(WordSizedVectorInstructions) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ auto f = GeneratedCode<F_iiiii>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(173, static_cast<int>(res));
diff --git a/deps/v8/test/cctest/test-assembler-riscv32.cc b/deps/v8/test/cctest/test-assembler-riscv32.cc
index a72834e578..0bcbd5fae4 100644
--- a/deps/v8/test/cctest/test-assembler-riscv32.cc
+++ b/deps/v8/test/cctest/test-assembler-riscv32.cc
@@ -374,10 +374,10 @@ UTEST_R2_FORM_WITH_OP(sra, int32_t, -0x12340000, 17, >>)
// -- CSR --
UTEST_CSRI(csr_frm, DYN, RUP)
UTEST_CSRI(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
-UTEST_CSRI(csr_fcsr, kDivideByZero | kOverflow, kUnderflow)
+UTEST_CSRI(csr_fcsr, kDivideByZero | kFPUOverflow, kUnderflow)
UTEST_CSR(csr_frm, DYN, RUP)
UTEST_CSR(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
-UTEST_CSR(csr_fcsr, kDivideByZero | kOverflow | (RDN << kFcsrFrmShift),
+UTEST_CSR(csr_fcsr, kDivideByZero | kFPUOverflow | (RDN << kFcsrFrmShift),
kUnderflow | (RNE << kFcsrFrmShift))
// -- RV32M Standard Extension --
@@ -476,6 +476,40 @@ UTEST_R1_FORM_WITH_RES_F(fneg_s, float, 23.5f, -23.5f)
// UTEST_R1_FORM_WITH_RES_F(fabs_d, double, -23.5, 23.5)
// UTEST_R1_FORM_WITH_RES_F(fneg_d, double, 23.5, -23.5)
+// Test fmv_d
+TEST(RISCV_UTEST_fmv_d_double) {
+ CcTest::InitializeVM();
+
+ double src = base::bit_cast<double>(0xC037800000000000); // -23.5
+ double dst;
+ auto fn = [](MacroAssembler& assm) {
+ __ fld(ft0, a0, 0);
+ __ fmv_d(fa0, ft0);
+ __ fsd(fa0, a1, 0);
+ };
+ GenAndRunTest<int32_t, double*>(&src, &dst, fn);
+ CHECK_EQ(base::bit_cast<int64_t>(0xC037800000000000),
+ base::bit_cast<int64_t>(dst));
+}
+
+// Test fmv_d
+// double not a canonical NaN
+TEST(RISCV_UTEST_fmv_d_double_NAN_BOX) {
+ CcTest::InitializeVM();
+
+ int64_t src = base::bit_cast<int64_t>(0x7ff4000000000000);
+ int64_t dst;
+ auto fn = [](MacroAssembler& assm) {
+ __ fld(ft0, a0, 0);
+ __ fmv_d(fa0, ft0);
+ __ fsd(fa0, a1, 0);
+ };
+
+ GenAndRunTest<int32_t, int64_t*>(&src, &dst, fn);
+ CHECK_EQ(base::bit_cast<int64_t>(0x7ff4000000000000),
+ base::bit_cast<int64_t>(dst));
+}
+
// Test LI
TEST(RISCV0) {
CcTest::InitializeVM();
@@ -690,7 +724,7 @@ TEST(RISCV3) {
__ fsqrt_s(ft5, ft4);
__ fsw(ft5, a0, offsetof(T, fg));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
// Double test values.
t.a = 1.5e14;
@@ -762,7 +796,7 @@ TEST(RISCV4) {
__ sw(a4, a0, offsetof(T, e));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -813,7 +847,7 @@ TEST(RISCV5) {
__ fcvt_d_w(fa1, a5);
__ fsd(fa1, a0, offsetof(T, b));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e4;
t.b = 2.75e4;
@@ -871,7 +905,7 @@ TEST(RISCV6) {
__ lhu(t1, a0, offsetof(T, si));
__ sh(t1, a0, offsetof(T, r6));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -989,7 +1023,7 @@ TEST(RISCV7) {
__ bind(&outa_here);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -1039,6 +1073,17 @@ TEST(NAN_BOX) {
CHECK_EQ((uint32_t)base::bit_cast<uint32_t>(1234.56f), res);
}
+ // Test NaN boxing in FMV.S
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ fmv_w_x(fa0, a0);
+ __ fmv_s(ft1, fa0);
+ __ fmv_s(fa0, ft1);
+ };
+ auto res = GenAndRunTest<uint32_t>(0x7f400000, fn);
+ CHECK_EQ((uint32_t)base::bit_cast<uint32_t>(0x7f400000), res);
+ }
+
// Test FLW and FSW
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1057,7 +1102,7 @@ TEST(NAN_BOX) {
// Check only transfer low 32bits when fsw
__ fsw(fa0, a0, offsetof(T, res));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = -123.45;
t.box = 0;
@@ -1239,7 +1284,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ add(a3, a1, a2);
__ c_sw(a3, a0, offsetof(S, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
s.a = 1;
s.b = 2;
@@ -1355,7 +1400,7 @@ TEST(RVC_CB_BRANCH) {
__ bind(&outa_here);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -1568,7 +1613,7 @@ TEST(jump_tables1) {
CHECK_EQ(0, assm.UnboundLabelsCount());
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
int32_t res = reinterpret_cast<int32_t>(f.Call(i, 0, 0, 0, 0));
@@ -1618,7 +1663,7 @@ TEST(jump_tables2) {
__ Lw(ra, MemOperand(sp));
__ addi(sp, sp, 4);
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
int32_t res = reinterpret_cast<int32_t>(f.Call(i, 0, 0, 0, 0));
@@ -1678,7 +1723,7 @@ TEST(jump_tables3) {
__ Lw(ra, MemOperand(sp));
__ addi(sp, sp, 4);
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
Handle<Object> result(
@@ -1705,7 +1750,7 @@ TEST(li_estimate) {
Label a;
assm.bind(&a);
assm.RV_li(t0, p);
- int expected_count = assm.li_estimate(p, true);
+ int expected_count = assm.RV_li_count(p, true);
int count = assm.InstructionsGeneratedSince(&a);
CHECK_EQ(count, expected_count);
}
diff --git a/deps/v8/test/cctest/test-assembler-riscv64.cc b/deps/v8/test/cctest/test-assembler-riscv64.cc
index 898929b16d..c5a937d0e6 100644
--- a/deps/v8/test/cctest/test-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-assembler-riscv64.cc
@@ -381,10 +381,10 @@ UTEST_R2_FORM_WITH_OP(sra, int64_t, -0x1234'5678'0000'0000LL, 33, >>)
// -- CSR --
UTEST_CSRI(csr_frm, DYN, RUP)
UTEST_CSRI(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
-UTEST_CSRI(csr_fcsr, kDivideByZero | kOverflow, kUnderflow)
+UTEST_CSRI(csr_fcsr, kDivideByZero | kFPUOverflow, kUnderflow)
UTEST_CSR(csr_frm, DYN, RUP)
UTEST_CSR(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
-UTEST_CSR(csr_fcsr, kDivideByZero | kOverflow | (RDN << kFcsrFrmShift),
+UTEST_CSR(csr_fcsr, kDivideByZero | kFPUOverflow | (RDN << kFcsrFrmShift),
kUnderflow | (RNE << kFcsrFrmShift))
// -- RV64I --
@@ -598,6 +598,40 @@ TEST(RISCV0) {
}
}
+TEST(RISCVLi) {
+ CcTest::InitializeVM();
+
+ FOR_INT64_INPUTS(i) {
+ auto fn = [i](MacroAssembler& assm) { __ RecursiveLi(a0, i); };
+ auto res = GenAndRunTest(fn);
+ CHECK_EQ(i, res);
+ }
+ for (int i = 0; i < 64; i++) {
+ auto fn = [i](MacroAssembler& assm) { __ RecursiveLi(a0, 1 << i); };
+ auto res = GenAndRunTest(fn);
+ CHECK_EQ(1 << i, res);
+ }
+}
+
+TEST(RISCVLiEstimate) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ FOR_INT64_INPUTS(i) {
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label a, b;
+ assm.bind(&a);
+ assm.RecordComment("V8 RV_li");
+ assm.RV_li(a0, i);
+ int count_a = assm.InstructionsGeneratedSince(&a);
+ assm.bind(&b);
+ assm.RecordComment("LLVM li");
+ assm.RecursiveLi(a0, i);
+ int count_b = assm.InstructionsGeneratedSince(&b);
+ CHECK_LE(count_a, count_b);
+ }
+}
+
TEST(RISCV1) {
CcTest::InitializeVM();
@@ -825,7 +859,7 @@ TEST(RISCV3) {
__ fsqrt_s(ft5, ft4);
__ fsw(ft5, a0, offsetof(T, fg));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
// Double test values.
t.a = 1.5e14;
@@ -898,7 +932,7 @@ TEST(RISCV4) {
__ sd(a4, a0, offsetof(T, e));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -949,7 +983,7 @@ TEST(RISCV5) {
__ fcvt_d_l(fa1, a5);
__ fsd(fa1, a0, offsetof(T, b));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e4;
t.b = 2.75e8;
@@ -1007,7 +1041,7 @@ TEST(RISCV6) {
__ lhu(t1, a0, offsetof(T, si));
__ sh(t1, a0, offsetof(T, r6));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -1123,7 +1157,7 @@ TEST(RISCV7) {
__ bind(&outa_here);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -1180,6 +1214,28 @@ TEST(NAN_BOX) {
CHECK_EQ((uint64_t)base::bit_cast<uint32_t>(1234.56f), res);
}
+ // Test NaN boxing in FMV.S
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ fmv_w_x(fa0, a0);
+ __ fmv_s(ft1, fa0);
+ __ fmv_s(fa0, ft1);
+ };
+ auto res = GenAndRunTest<uint32_t>(0x7f400000, fn);
+ CHECK_EQ((uint32_t)base::bit_cast<uint32_t>(0x7f400000), res);
+ }
+
+ // Test NaN boxing in FMV.D
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ fmv_d_x(fa0, a0);
+ __ fmv_d(ft1, fa0);
+ __ fmv_d(fa0, ft1);
+ };
+ auto res = GenAndRunTest<uint64_t>(0x7ff4000000000000, fn);
+ CHECK_EQ((uint64_t)base::bit_cast<uint64_t>(0x7ff4000000000000), res);
+ }
+
// Test FLW and FSW
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1198,7 +1254,7 @@ TEST(NAN_BOX) {
// Check only transfer low 32bits when fsw
__ fsw(fa0, a0, offsetof(T, res));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = -123.45;
t.box = 0;
@@ -1417,7 +1473,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ fadd_d(fa2, fa1, fa0);
__ c_fsd(fa2, a0, offsetof(T, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 1.5e14;
@@ -1442,7 +1498,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ add(a3, a1, a2);
__ c_sw(a3, a0, offsetof(S, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
s.a = 1;
s.b = 2;
@@ -1466,7 +1522,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ add(a3, a1, a2);
__ c_sd(a3, a0, offsetof(U, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
u.a = 1;
u.b = 2;
@@ -1582,7 +1638,7 @@ TEST(RVC_CB_BRANCH) {
__ bind(&outa_here);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -1833,7 +1889,7 @@ TEST(jump_tables1) {
CHECK_EQ(0, assm.UnboundLabelsCount());
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
@@ -1883,7 +1939,7 @@ TEST(jump_tables2) {
__ Ld(ra, MemOperand(sp));
__ addi(sp, sp, 8);
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
@@ -1943,7 +1999,7 @@ TEST(jump_tables3) {
__ Ld(ra, MemOperand(sp));
__ addi(sp, sp, 8);
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
Handle<Object> result(
@@ -1961,7 +2017,7 @@ TEST(li_estimate) {
std::vector<int64_t> immediates = {
-256, -255, 0, 255, 8192, 0x7FFFFFFF,
INT32_MIN, INT32_MAX / 2, INT32_MAX, UINT32_MAX, INT64_MAX, INT64_MAX / 2,
- INT64_MIN};
+ INT64_MIN, 12312874234};
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1970,7 +2026,7 @@ TEST(li_estimate) {
Label a;
assm.bind(&a);
assm.RV_li(t0, p);
- int expected_count = assm.li_estimate(p, true);
+ int expected_count = assm.RV_li_count(p, true);
int count = assm.InstructionsGeneratedSince(&a);
CHECK_EQ(count, expected_count);
}
@@ -2254,11 +2310,14 @@ UTEST_RVV_VI_VX_FORM_WITH_FN(vminu_vx, 32, ARRAY_INT32, std::min<uint32_t>)
#define UTEST_RVV_VF_VV_FORM_WITH_OP(instr_name, tested_op) \
UTEST_RVV_VF_VV_FORM_WITH_RES(instr_name, ((rs1_fval)tested_op(rs2_fval)))
-#define UTEST_RVV_VF_VF_FORM_WITH_OP(instr_name, tested_op) \
- UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, ((rs1_fval)tested_op(rs2_fval)))
+#define UTEST_RVV_VF_VF_FORM_WITH_OP(instr_name, array, tested_op) \
+ UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, array, \
+ ((rs1_fval)tested_op(rs2_fval)))
+
+#define ARRAY_FLOAT compiler::ValueHelper::GetVector<float>()
UTEST_RVV_VF_VV_FORM_WITH_OP(vfadd_vv, +)
-// UTEST_RVV_VF_VF_FORM_WITH_OP(vfadd_vf, ARRAY_FLOAT, +)
+UTEST_RVV_VF_VF_FORM_WITH_OP(vfadd_vf, ARRAY_FLOAT, +)
UTEST_RVV_VF_VV_FORM_WITH_OP(vfsub_vv, -)
// UTEST_RVV_VF_VF_FORM_WITH_OP(vfsub_vf, ARRAY_FLOAT, -)
UTEST_RVV_VF_VV_FORM_WITH_OP(vfmul_vv, *)
@@ -2832,6 +2891,30 @@ UTEST_VCPOP_M_WITH_WIDTH(32)
UTEST_VCPOP_M_WITH_WIDTH(16)
UTEST_VCPOP_M_WITH_WIDTH(8)
+TEST(RISCV_UTEST_WasmRvvS128const) {
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return;
+ CcTest::InitializeVM();
+ for (uint64_t x : compiler::ValueHelper::GetVector<int64_t>()) {
+ for (uint64_t y : compiler::ValueHelper::GetVector<int64_t>()) {
+ uint64_t src[2] = {x, y};
+ uint8_t vals[16];
+ volatile uint64_t result[kRvvVLEN / 64] = {0};
+ memcpy(vals, src, sizeof(vals));
+ auto fn = [vals, &result](MacroAssembler& assm) {
+ __ Push(kScratchReg);
+ __ WasmRvvS128const(v10, vals);
+ __ li(t1, Operand(int64_t(result)));
+ __ VU.set(t0, VSew::E64, Vlmul::m1);
+ __ vs(v10, t1, 0, VSew::E64);
+ __ Pop(kScratchReg);
+ };
+ GenAndRunTest(fn);
+ CHECK_EQ(result[0], x);
+ CHECK_EQ(result[1], y);
+ }
+ }
+}
+
#undef UTEST_VCPOP_M_WITH_WIDTH
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index a69bf64988..69b22e6907 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -68,7 +68,7 @@ TEST(0) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(7, static_cast<int>(res));
@@ -107,7 +107,7 @@ TEST(1) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(100, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(5050, static_cast<int>(res));
@@ -158,7 +158,7 @@ TEST(2) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(10, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(3628800, static_cast<int>(res));
@@ -254,7 +254,7 @@ TEST(4) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(
f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
@@ -282,7 +282,7 @@ TEST(5) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
intptr_t res =
reinterpret_cast<intptr_t>(f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
@@ -316,7 +316,7 @@ TEST(6) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
intptr_t res =
reinterpret_cast<intptr_t>(f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
@@ -348,7 +348,7 @@ TEST(7) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
intptr_t res =
reinterpret_cast<intptr_t>(f.Call(3, 4, 3, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
@@ -379,7 +379,7 @@ TEST(8) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res =
reinterpret_cast<intptr_t>(f.Call(100, 0,
0, 0, 0));
@@ -406,7 +406,7 @@ TEST(9) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res =
reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
@@ -491,7 +491,7 @@ TEST(10) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(3, 4, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -545,7 +545,7 @@ TEST(11) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -599,7 +599,7 @@ TEST(12) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -663,7 +663,7 @@ TEST(13) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(50, 250, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -754,7 +754,7 @@ TEST(14) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -844,7 +844,7 @@ TEST(15) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -891,7 +891,7 @@ TEST(16) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -965,7 +965,7 @@ TEST(17) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0x2, 0x30, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
CHECK_EQ(0, static_cast<int>(res));
@@ -1057,7 +1057,7 @@ TEST(18) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
// f.Call(reg2, reg3, reg4, reg5, reg6) -> set the register value
intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
::printf("f() = %" V8PRIxPTR "\n", res);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index d1e6f08d0a..5efe5281c2 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -56,7 +56,7 @@ TEST(CallCFunction) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -90,7 +90,7 @@ TEST(CallCFunctionWithCallerSavedRegisters) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -119,7 +119,7 @@ TEST(NumberToString) {
Factory* factory = isolate->factory();
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -199,14 +199,14 @@ TEST(ToUint32) {
Factory* factory = isolate->factory();
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
- const int kContextOffset = 3;
- auto context = m.Parameter<Context>(kNumParams + kContextOffset);
- auto input = m.Parameter<Object>(1);
- m.Return(m.ToUint32(context, input));
-
+ {
+ auto context = m.GetJSContextParameter();
+ auto input = m.Parameter<Object>(1);
+ m.Return(m.ToUint32(context, input));
+ }
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
// clang-format off
@@ -275,7 +275,7 @@ TEST(ToUint32) {
namespace {
void IsValidPositiveSmiCase(Isolate* isolate, intptr_t value) {
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
m.Return(
@@ -320,7 +320,7 @@ TEST(ConvertAndClampRelativeIndex) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
enum Result { kFound, kNotFound };
@@ -470,7 +470,7 @@ TEST(DecodeWordFromWord32) {
TEST(JSFunction) {
const int kNumParams = 2; // left, right.
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
m.Return(m.SmiFromInt32(m.Int32Add(m.SmiToInt32(m.Parameter<Smi>(1)),
m.SmiToInt32(m.Parameter<Smi>(2)))));
@@ -485,7 +485,7 @@ TEST(JSFunction) {
TEST(ComputeIntegerHash) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
m.Return(m.SmiFromInt32(m.UncheckedCast<Int32T>(
@@ -510,10 +510,9 @@ TEST(ComputeIntegerHash) {
TEST(ToString) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
- m.Return(m.ToStringImpl(m.Parameter<Context>(kNumParams + 3),
- m.Parameter<Object>(1)));
+ m.Return(m.ToStringImpl(m.GetJSContextParameter(), m.Parameter<Object>(1)));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -569,7 +568,7 @@ TEST(TryToName) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
enum Result { kKeyIsIndex, kKeyIsUnique, kBailout };
@@ -792,7 +791,7 @@ void TestEntryToIndex() {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<IntPtrT> entry = m.SmiUntag(m.Parameter<Smi>(1));
@@ -824,7 +823,7 @@ void TestNameDictionaryLookup() {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 4;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
enum Result { kFound, kNotFound };
@@ -928,7 +927,7 @@ TEST(NumberDictionaryLookup) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 4;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
enum Result { kFound, kNotFound };
@@ -1013,7 +1012,7 @@ TEST(TransitionLookup) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 4;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
enum Result { kFound, kNotFound };
@@ -1192,7 +1191,7 @@ TEST(TryHasOwnProperty) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
enum Result { kFound, kNotFound, kBailout };
@@ -1384,7 +1383,7 @@ TEST(TryGetOwnProperty) {
Factory* factory = isolate->factory();
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
Handle<Symbol> not_found_symbol = factory->NewSymbol();
@@ -1392,7 +1391,7 @@ TEST(TryGetOwnProperty) {
{
auto object = m.Parameter<JSReceiver>(1);
auto unique_name = m.Parameter<Name>(2);
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
TVariable<Object> var_value(&m);
Label if_found(&m), if_not_found(&m), if_bailout(&m);
@@ -1605,7 +1604,7 @@ TEST(TryLookupElement) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
enum Result { kFound, kAbsent, kNotFound, kBailout };
@@ -1835,7 +1834,7 @@ TEST(AllocateJSObjectFromMap) {
Factory* factory = isolate->factory();
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -1917,8 +1916,8 @@ TEST(AllocationFoldingCSA) {
const int kNumParams = 1;
const int kNumArrays = 7;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1,
- CodeKind::FOR_TESTING); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams),
+ CodeKind::FOR_TESTING);
CodeStubAssembler m(asm_tester.state());
{
@@ -1986,7 +1985,7 @@ void TestDictionaryAllocation(CSAAllocator<Dictionary> csa_alloc,
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -2060,10 +2059,10 @@ TEST(PopAndReturnFromJSBuiltinWithStackParameters) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumStackParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumStackParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumStackParams));
{
CodeStubAssembler m(asm_tester.state());
- m.PopAndReturn(m.SmiUntag(m.Parameter<Smi>(0)),
+ m.PopAndReturn(m.SmiUntag(m.Parameter<Smi>(1)),
m.SmiConstant(Smi::FromInt(1234)));
}
@@ -2149,7 +2148,8 @@ void CallFunctionWithStackPointerChecks(Isolate* isolate,
CSA_CHECK(
&m, m.TaggedEqual(result, MakeConstantNode(m, expected_result)));
},
- 1, CodeStubAssembler::IndexAdvanceMode::kPost);
+ 1, CodeStubAssembler::LoopUnrollingMode::kNo,
+ CodeStubAssembler::IndexAdvanceMode::kPost);
#ifdef V8_CC_GNU
TNode<WordT> stack_pointer1 =
@@ -2158,7 +2158,7 @@ void CallFunctionWithStackPointerChecks(Isolate* isolate,
#endif
m.Return(m.SmiConstant(42));
}
- FunctionTester ft(asm_tester.GenerateCode(), 1); // Include receiver.
+ FunctionTester ft(asm_tester.GenerateCode(), 1);
Handle<Object> result;
for (int test_count = 0; test_count < 100; ++test_count) {
@@ -2241,7 +2241,7 @@ TEST(OneToTwoByteStringCopy) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
@@ -2274,7 +2274,7 @@ TEST(OneToOneByteStringCopy) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
@@ -2307,7 +2307,7 @@ TEST(OneToOneByteStringCopyNonZeroStart) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
@@ -2337,7 +2337,7 @@ TEST(TwoToTwoByteStringCopy) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
StringBuiltinsAssembler m(asm_tester.state());
m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
@@ -2479,7 +2479,7 @@ TEST(IsDebugActive) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
Label if_active(&m), if_not_active(&m);
@@ -2545,14 +2545,13 @@ TEST(CallBuiltin) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
{
auto receiver = m.Parameter<Object>(1);
auto name = m.Parameter<Name>(2);
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
auto value = m.CallBuiltin(Builtin::kGetProperty, context, receiver, name);
m.Return(value);
@@ -2574,14 +2573,13 @@ TEST(TailCallBuiltin) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
{
auto receiver = m.Parameter<Object>(1);
auto name = m.Parameter<Name>(2);
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
m.TailCallBuiltin(Builtin::kGetProperty, context, receiver, name);
}
@@ -2651,7 +2649,7 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
static void TestAppendJSArray(Isolate* isolate, ElementsKind kind, Object o1,
Object o2, Object o3, Object o4,
int initial_size, int result_size) {
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, i::JSParameterCount(kNumParams));
AppendJSArrayCodeStubAssembler m(asm_tester.state(), kind);
m.TestAppendJSArrayImpl(
isolate, &asm_tester, Handle<Object>(o1, isolate),
@@ -2745,7 +2743,7 @@ TEST(IsPromiseHookEnabled) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
m.Return(
@@ -2770,10 +2768,10 @@ TEST(NewJSPromise) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 2);
+ auto context = m.GetJSContextParameter();
const TNode<JSPromise> promise = m.NewJSPromise(context);
m.Return(promise);
@@ -2787,10 +2785,10 @@ TEST(NewJSPromise2) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 2);
+ auto context = m.GetJSContextParameter();
const TNode<JSPromise> promise =
m.NewJSPromise(context, v8::Promise::kRejected, m.SmiConstant(1));
m.Return(promise);
@@ -2809,7 +2807,7 @@ TEST(IsSymbol) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
auto symbol = m.Parameter<HeapObject>(1);
@@ -2828,7 +2826,7 @@ TEST(IsPrivateSymbol) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
auto symbol = m.Parameter<HeapObject>(1);
@@ -2850,10 +2848,10 @@ TEST(PromiseHasHandler) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 2);
+ auto context = m.GetJSContextParameter();
const TNode<JSPromise> promise =
m.NewJSPromise(context, m.UndefinedConstant());
m.Return(m.SelectBooleanConstant(m.PromiseHasHandler(promise)));
@@ -2868,10 +2866,10 @@ TEST(CreatePromiseResolvingFunctionsContext) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- const auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<JSPromise> promise =
m.NewJSPromise(context, m.UndefinedConstant());
@@ -2895,10 +2893,10 @@ TEST(CreatePromiseResolvingFunctions) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 2);
+ auto context = m.GetJSContextParameter();
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<JSPromise> promise =
m.NewJSPromise(context, m.UndefinedConstant());
@@ -2924,12 +2922,13 @@ TEST(CreatePromiseResolvingFunctions) {
TEST(NewElementsCapacity) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 2);
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
m.Return(m.SmiTag(
m.CalculateNewElementsCapacity(m.SmiUntag(m.Parameter<Smi>(1)))));
- FunctionTester ft(asm_tester.GenerateCode(), 1);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Smi> test_value = Handle<Smi>(Smi::FromInt(1), isolate);
Handle<Smi> result_obj = ft.CallChecked<Smi>(test_value);
CHECK_EQ(
@@ -2954,11 +2953,12 @@ TEST(NewElementsCapacity) {
TEST(NewElementsCapacitySmi) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 2);
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
m.Return(m.CalculateNewElementsCapacity(m.UncheckedParameter<Smi>(1)));
- FunctionTester ft(asm_tester.GenerateCode(), 1);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Smi> test_value = Handle<Smi>(Smi::FromInt(0), isolate);
Handle<Smi> result_obj = ft.CallChecked<Smi>(test_value);
CHECK_EQ(
@@ -2985,10 +2985,10 @@ TEST(AllocateFunctionWithMapAndContext) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- const auto context = m.Parameter<Context>(kNumParams + 2);
+ const auto context = m.GetJSContextParameter();
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<JSPromise> promise =
m.NewJSPromise(context, m.UndefinedConstant());
@@ -3016,7 +3016,7 @@ TEST(AllocateFunctionWithMapAndContext) {
fun->shared());
CHECK_EQ(isolate->factory()
->promise_capability_default_resolve_shared_fun()
- ->GetCode(),
+ ->GetCode(isolate),
fun->code());
}
@@ -3024,10 +3024,10 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 2);
+ auto context = m.GetJSContextParameter();
TNode<NativeContext> native_context = m.LoadNativeContext(context);
TNode<PromiseCapability> capability = m.CreatePromiseCapability(
@@ -3053,11 +3053,10 @@ TEST(NewPromiseCapability) {
{ // Builtin Promise
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<Object> promise_constructor =
m.LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
@@ -3104,11 +3103,10 @@ TEST(NewPromiseCapability) {
{ // Custom Promise
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
PromiseBuiltinsAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
auto constructor = m.Parameter<Object>(1);
const TNode<Oddball> debug_event = m.TrueConstant();
@@ -3170,7 +3168,7 @@ TEST(DirectMemoryTest8BitWord32Immediate) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
int8_t buffer[] = {1, 2, 4, 8, 17, 33, 65, 127};
const int element_count = 8;
@@ -3202,7 +3200,7 @@ TEST(DirectMemoryTest8BitWord32Immediate) {
TEST(DirectMemoryTest16BitWord32Immediate) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
int16_t buffer[] = {156, 2234, 4544, 8444, 1723, 3888, 658, 1278};
const int element_count = 8;
@@ -3234,7 +3232,7 @@ TEST(DirectMemoryTest16BitWord32Immediate) {
TEST(DirectMemoryTest8BitWord32) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
int8_t buffer[] = {1, 2, 4, 8, 17, 33, 65, 127, 67, 38};
const int element_count = 10;
@@ -3278,7 +3276,7 @@ TEST(DirectMemoryTest8BitWord32) {
TEST(DirectMemoryTest16BitWord32) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
int16_t buffer[] = {1, 2, 4, 8, 12345, 33, 65, 255, 67, 3823};
const int element_count = 10;
@@ -3334,10 +3332,10 @@ TEST(DirectMemoryTest16BitWord32) {
TEST(LoadJSArrayElementsMap) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
TNode<NativeContext> native_context = m.LoadNativeContext(context);
TNode<Int32T> kind = m.SmiToInt32(m.Parameter<Smi>(1));
m.Return(m.LoadJSArrayElementsMap(kind, native_context));
@@ -3359,7 +3357,7 @@ TEST(IsWhiteSpaceOrLineTerminator) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{ // Returns true if whitespace, false otherwise.
CodeStubAssembler m(asm_tester.state());
@@ -3388,7 +3386,7 @@ TEST(BranchIfNumberRelationalComparison) {
Isolate* isolate(CcTest::InitIsolateOnce());
Factory* f = isolate->factory();
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
Label return_true(&m), return_false(&m);
@@ -3418,7 +3416,7 @@ TEST(BranchIfNumberRelationalComparison) {
TEST(IsNumberArrayIndex) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
auto number = m.Parameter<Number>(1);
@@ -3466,16 +3464,14 @@ TEST(IsNumberArrayIndex) {
TEST(NumberMinMax) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester_min(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester_min(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester_min.state());
m.Return(m.NumberMin(m.Parameter<Number>(1), m.Parameter<Number>(2)));
}
FunctionTester ft_min(asm_tester_min.GenerateCode(), kNumParams);
- CodeAssemblerTester asm_tester_max(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester_max(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester_max.state());
m.Return(m.NumberMax(m.Parameter<Number>(1), m.Parameter<Number>(2)));
@@ -3524,16 +3520,14 @@ TEST(NumberMinMax) {
TEST(NumberAddSub) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester_add(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester_add(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester_add.state());
m.Return(m.NumberAdd(m.Parameter<Number>(1), m.Parameter<Number>(2)));
}
FunctionTester ft_add(asm_tester_add.GenerateCode(), kNumParams);
- CodeAssemblerTester asm_tester_sub(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester_sub(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester_sub.state());
m.Return(m.NumberSub(m.Parameter<Number>(1), m.Parameter<Number>(2)));
@@ -3570,7 +3564,7 @@ TEST(NumberAddSub) {
TEST(CloneEmptyFixedArray) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
m.Return(m.CloneFixedArray(m.Parameter<FixedArrayBase>(1)));
@@ -3587,7 +3581,7 @@ TEST(CloneEmptyFixedArray) {
TEST(CloneFixedArray) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
m.Return(m.CloneFixedArray(m.Parameter<FixedArrayBase>(1)));
@@ -3609,7 +3603,7 @@ TEST(CloneFixedArray) {
TEST(CloneFixedArrayCOW) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
m.Return(m.CloneFixedArray(m.Parameter<FixedArrayBase>(1)));
@@ -3627,7 +3621,7 @@ TEST(CloneFixedArrayCOW) {
TEST(ExtractFixedArrayCOWForceCopy) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
CodeStubAssembler::ExtractFixedArrayFlags flags;
@@ -3657,7 +3651,7 @@ TEST(ExtractFixedArrayCOWForceCopy) {
TEST(ExtractFixedArraySimple) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
CodeStubAssembler::ExtractFixedArrayFlags flags;
@@ -3686,7 +3680,7 @@ TEST(ExtractFixedArraySimple) {
TEST(ExtractFixedArraySimpleSmiConstant) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
CodeStubAssembler::ExtractFixedArrayFlags flags;
@@ -3712,7 +3706,7 @@ TEST(ExtractFixedArraySimpleSmiConstant) {
TEST(ExtractFixedArraySimpleIntPtrConstant) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
CodeStubAssembler::ExtractFixedArrayFlags flags;
@@ -3738,7 +3732,7 @@ TEST(ExtractFixedArraySimpleIntPtrConstant) {
TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
base::Optional<TNode<IntPtrT>> constant_1(m.IntPtrConstant(1));
@@ -3762,7 +3756,7 @@ TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
TEST(ExtractFixedArraySimpleIntPtrParameters) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
base::Optional<TNode<IntPtrT>> p1_untagged(m.SmiUntag(m.Parameter<Smi>(2)));
@@ -3803,7 +3797,7 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
TEST(SingleInputPhiElimination) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
TVariable<Smi> temp1(&m);
@@ -3812,8 +3806,7 @@ TEST(SingleInputPhiElimination) {
Label end_label(&m, {&temp1, &temp2});
temp1 = m.Parameter<Smi>(1);
temp2 = m.Parameter<Smi>(1);
- m.Branch(m.TaggedEqual(m.UncheckedParameter<Object>(0),
- m.UncheckedParameter<Object>(1)),
+ m.Branch(m.TaggedEqual(m.Parameter<Object>(0), m.Parameter<Object>(1)),
&end_label, &temp_label);
m.BIND(&temp_label);
temp1 = m.Parameter<Smi>(2);
@@ -3830,7 +3823,7 @@ TEST(SingleInputPhiElimination) {
TEST(SmallOrderedHashMapAllocate) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
auto capacity = m.Parameter<Smi>(1);
@@ -3868,7 +3861,7 @@ TEST(SmallOrderedHashMapAllocate) {
TEST(SmallOrderedHashSetAllocate) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(asm_tester.state());
auto capacity = m.Parameter<Smi>(1);
@@ -3906,7 +3899,7 @@ TEST(SmallOrderedHashSetAllocate) {
TEST(IsDoubleElementsKind) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester ft_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester ft_tester(isolate, JSParameterCount(kNumParams));
{
CodeStubAssembler m(ft_tester.state());
m.Return(m.SmiFromInt32(m.UncheckedCast<Int32T>(
@@ -3952,22 +3945,39 @@ TEST(IsDoubleElementsKind) {
0);
}
-TEST(TestCallBuiltinAbsolute) {
+namespace {
+
+enum CallJumpMode { kCall, kTailCall };
+
+void TestCallJumpBuiltin(CallJumpMode mode,
+ BuiltinCallJumpMode builtin_call_jump_mode) {
Isolate* isolate(CcTest::InitIsolateOnce());
+ if (builtin_call_jump_mode == BuiltinCallJumpMode::kPCRelative &&
+ !isolate->is_short_builtin_calls_enabled()) {
+ // PC-relative mode requires short builtin calls to be enabled.
+ return;
+ }
+
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
- const int kContextOffset = 3;
- auto str = m.Parameter<String>(1);
- auto context = m.Parameter<Context>(kNumParams + kContextOffset);
+ {
+ auto str = m.Parameter<String>(1);
+ auto context = m.GetJSContextParameter();
- TNode<Smi> index = m.SmiConstant(2);
+ TNode<Smi> index = m.SmiConstant(2);
- m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtin::kStringRepeat),
- context, str, index));
+ Callable callable = Builtins::CallableFor(isolate, Builtin::kStringRepeat);
+ if (mode == kCall) {
+ m.Return(m.CallStub(callable, context, str, index));
+ } else {
+ DCHECK_EQ(mode, kTailCall);
+ m.TailCallStub(callable, context, str, index);
+ }
+ }
AssemblerOptions options = AssemblerOptions::Default(isolate);
- options.builtin_call_jump_mode = BuiltinCallJumpMode::kAbsolute;
+ options.builtin_call_jump_mode = builtin_call_jump_mode;
options.isolate_independent_code = false;
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
MaybeHandle<Object> result = ft.Call(CcTest::MakeString("abcdef"));
@@ -3975,54 +3985,30 @@ TEST(TestCallBuiltinAbsolute) {
Handle<String>::cast(result.ToHandleChecked())));
}
-DISABLED_TEST(TestCallBuiltinPCRelative) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- if (!isolate->is_short_builtin_calls_enabled()) return;
-
- const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
-
- const int kContextOffset = 2;
- auto str = m.Parameter<String>(0);
- auto context = m.Parameter<Context>(kNumParams + kContextOffset);
+} // namespace
- TNode<Smi> index = m.SmiConstant(2);
+TEST(TestCallBuiltinAbsolute) {
+ TestCallJumpBuiltin(kCall, BuiltinCallJumpMode::kAbsolute);
+}
- m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtin::kStringRepeat),
- context, str, index));
- AssemblerOptions options = AssemblerOptions::Default(isolate);
- options.builtin_call_jump_mode = BuiltinCallJumpMode::kPCRelative;
- options.isolate_independent_code = false;
- FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
- MaybeHandle<Object> result = ft.Call(CcTest::MakeString("abcdef"));
- CHECK(String::Equals(isolate, CcTest::MakeString("abcdefabcdef"),
- Handle<String>::cast(result.ToHandleChecked())));
+TEST(TestCallBuiltinPCRelative) {
+ TestCallJumpBuiltin(kCall, BuiltinCallJumpMode::kPCRelative);
}
-// TODO(v8:9821): Remove the option to disable inlining off-heap trampolines
-// along with this test.
-DISABLED_TEST(TestCallBuiltinIndirect) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
+TEST(TestCallBuiltinIndirect) {
+ TestCallJumpBuiltin(kCall, BuiltinCallJumpMode::kIndirect);
+}
- const int kContextOffset = 2;
- auto str = m.Parameter<String>(0);
- auto context = m.Parameter<Context>(kNumParams + kContextOffset);
+TEST(TestTailCallBuiltinAbsolute) {
+ TestCallJumpBuiltin(kTailCall, BuiltinCallJumpMode::kAbsolute);
+}
- TNode<Smi> index = m.SmiConstant(2);
+TEST(TestTailCallBuiltinPCRelative) {
+ TestCallJumpBuiltin(kTailCall, BuiltinCallJumpMode::kPCRelative);
+}
- m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtin::kStringRepeat),
- context, str, index));
- AssemblerOptions options = AssemblerOptions::Default(isolate);
- options.builtin_call_jump_mode = BuiltinCallJumpMode::kIndirect;
- options.isolate_independent_code = true;
- FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
- MaybeHandle<Object> result = ft.Call(CcTest::MakeString("abcdef"));
- CHECK(String::Equals(isolate, CcTest::MakeString("abcdefabcdef"),
- Handle<String>::cast(result.ToHandleChecked())));
+TEST(TestTailCallBuiltinIndirect) {
+ TestCallJumpBuiltin(kTailCall, BuiltinCallJumpMode::kIndirect);
}
TEST(InstructionSchedulingCallerSavedRegisters) {
@@ -4034,7 +4020,7 @@ TEST(InstructionSchedulingCallerSavedRegisters) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -4086,7 +4072,7 @@ TEST(WasmInt32ToHeapNumber) {
const int kNumParams = 1;
for (size_t i = 0; i < arraysize(test_values); ++i) {
int32_t test_value = test_values[i];
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
const TNode<Int32T> arg = m.Int32Constant(test_value);
const TNode<Object> call_result = m.CallBuiltin(
@@ -4130,9 +4116,9 @@ TEST(WasmTaggedNonSmiToInt32) {
};
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
const auto arg = m.Parameter<Object>(1);
int32_t result = 0;
Node* base = m.IntPtrConstant(reinterpret_cast<intptr_t>(&result));
@@ -4172,7 +4158,7 @@ TEST(WasmFloat32ToNumber) {
const int kNumParams = 1;
for (size_t i = 0; i < arraysize(test_values); ++i) {
double test_value = test_values[i];
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
const TNode<Float32T> arg = m.Float32Constant(test_value);
const TNode<Object> call_result = m.CallBuiltin(
@@ -4212,7 +4198,7 @@ TEST(WasmFloat64ToNumber) {
const int kNumParams = 1;
for (size_t i = 0; i < arraysize(test_values); ++i) {
double test_value = test_values[i];
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
const TNode<Float64T> arg = m.Float64Constant(test_value);
const TNode<Object> call_result = m.CallBuiltin(
@@ -4266,9 +4252,9 @@ TEST(WasmTaggedToFloat64) {
};
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
- auto context = m.Parameter<Context>(kNumParams + 3);
+ auto context = m.GetJSContextParameter();
const auto arg = m.Parameter<Object>(1);
double result = 0;
Node* base = m.IntPtrConstant(reinterpret_cast<intptr_t>(&result));
@@ -4294,7 +4280,7 @@ TEST(WasmTaggedToFloat64) {
TEST(SmiUntagLeftShiftOptimization) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -4314,7 +4300,7 @@ TEST(SmiUntagLeftShiftOptimization) {
TEST(SmiUntagComparisonOptimization) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
{
@@ -4421,7 +4407,7 @@ TEST(IntPtrMulHigh) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
TNode<IntPtrT> a = m.IntPtrConstant(std::numeric_limits<intptr_t>::min());
@@ -4460,7 +4446,7 @@ TEST(UintPtrMulHigh) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
TNode<IntPtrT> a = m.IntPtrConstant(std::numeric_limits<intptr_t>::min());
@@ -4505,8 +4491,7 @@ TEST(IntPtrMulWithOverflow) {
const int kNumParams = 1;
{
- CodeAssemblerTester asm_tester(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
TNode<IntPtrT> a = m.IntPtrConstant(std::numeric_limits<intptr_t>::min());
@@ -4531,8 +4516,7 @@ TEST(IntPtrMulWithOverflow) {
}
{
- CodeAssemblerTester asm_tester(isolate,
- kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
CodeStubAssembler m(asm_tester.state());
TNode<IntPtrT> a = m.IntPtrConstant(std::numeric_limits<intptr_t>::max());
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e6991ba142..09b0d0541f 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -94,7 +94,7 @@ TEST(StartStop) {
CodeEntryStorage storage;
CpuProfilesCollection profiles(isolate);
ProfilerCodeObserver code_observer(isolate, storage);
- Symbolizer symbolizer(code_observer.code_map());
+ Symbolizer symbolizer(code_observer.instruction_stream_map());
std::unique_ptr<ProfilerEventsProcessor> processor(
new SamplingEventsProcessor(
isolate, &symbolizer, &code_observer, &profiles,
@@ -178,7 +178,8 @@ TEST(CodeEvents) {
CodeEntryStorage storage;
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer.instruction_stream_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
isolate, symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
@@ -197,9 +198,17 @@ TEST(CodeEvents) {
comment_code, "comment");
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kBuiltin,
comment2_code, "comment2");
- profiler_listener.CodeMoveEvent(*comment2_code, *moved_code);
PtrComprCageBase cage_base(isolate);
+ if (comment2_code->IsBytecodeArray(cage_base)) {
+ profiler_listener.BytecodeMoveEvent(comment2_code->GetBytecodeArray(),
+ moved_code->GetBytecodeArray());
+ } else {
+ profiler_listener.CodeMoveEvent(
+ comment2_code->GetCode().instruction_stream(),
+ moved_code->GetCode().instruction_stream());
+ }
+
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(processor, aaa_code->InstructionStart(cage_base));
@@ -207,20 +216,20 @@ TEST(CodeEvents) {
processor->StopSynchronously();
// Check the state of the symbolizer.
- CodeEntry* aaa =
- symbolizer->code_map()->FindEntry(aaa_code->InstructionStart(cage_base));
+ CodeEntry* aaa = symbolizer->instruction_stream_map()->FindEntry(
+ aaa_code->InstructionStart(cage_base));
CHECK(aaa);
CHECK_EQ(0, strcmp(aaa_str, aaa->name()));
- CodeEntry* comment = symbolizer->code_map()->FindEntry(
+ CodeEntry* comment = symbolizer->instruction_stream_map()->FindEntry(
comment_code->InstructionStart(cage_base));
CHECK(comment);
CHECK_EQ(0, strcmp("comment", comment->name()));
- CHECK(!symbolizer->code_map()->FindEntry(
+ CHECK(!symbolizer->instruction_stream_map()->FindEntry(
comment2_code->InstructionStart(cage_base)));
- CodeEntry* comment2 = symbolizer->code_map()->FindEntry(
+ CodeEntry* comment2 = symbolizer->instruction_stream_map()->FindEntry(
moved_code->InstructionStart(cage_base));
CHECK(comment2);
CHECK_EQ(0, strcmp("comment2", comment2->name()));
@@ -245,7 +254,8 @@ TEST(TickEvents) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver* code_observer =
new ProfilerCodeObserver(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer->instruction_stream_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
CcTest::i_isolate(), symbolizer, code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
@@ -315,15 +325,15 @@ TEST(CodeMapClearedBetweenProfilesWithLazyLogging) {
CHECK(profile);
// Check that the code map is empty.
- CodeMap* code_map = profiler.code_map_for_test();
- CHECK_EQ(code_map->size(), 0);
+ InstructionStreamMap* instruction_stream_map = profiler.code_map_for_test();
+ CHECK_EQ(instruction_stream_map->size(), 0);
profiler.DeleteProfile(profile);
// Create code between profiles. This should not be logged yet.
i::Handle<i::AbstractCode> code2(CreateCode(isolate, &env), isolate);
- CHECK(!code_map->FindEntry(code2->InstructionStart(isolate)));
+ CHECK(!instruction_stream_map->FindEntry(code2->InstructionStart(isolate)));
}
TEST(CodeMapNotClearedBetweenProfilesWithEagerLogging) {
@@ -343,33 +353,35 @@ TEST(CodeMapNotClearedBetweenProfilesWithEagerLogging) {
PtrComprCageBase cage_base(isolate);
// Check that our code is still in the code map.
- CodeMap* code_map = profiler.code_map_for_test();
+ InstructionStreamMap* instruction_stream_map = profiler.code_map_for_test();
CodeEntry* code1_entry =
- code_map->FindEntry(code1->InstructionStart(cage_base));
+ instruction_stream_map->FindEntry(code1->InstructionStart(cage_base));
CHECK(code1_entry);
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
profiler.DeleteProfile(profile);
// We should still have an entry in kEagerLogging mode.
- code1_entry = code_map->FindEntry(code1->InstructionStart(cage_base));
+ code1_entry =
+ instruction_stream_map->FindEntry(code1->InstructionStart(cage_base));
CHECK(code1_entry);
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
// Create code between profiles. This should be logged too.
i::Handle<i::AbstractCode> code2(CreateCode(isolate, &env), isolate);
- CHECK(code_map->FindEntry(code2->InstructionStart(cage_base)));
+ CHECK(instruction_stream_map->FindEntry(code2->InstructionStart(cage_base)));
profiler.StartProfiling("");
CpuProfile* profile2 = profiler.StopProfiling("");
CHECK(profile2);
// Check that we still have code map entries for both code objects.
- code1_entry = code_map->FindEntry(code1->InstructionStart(cage_base));
+ code1_entry =
+ instruction_stream_map->FindEntry(code1->InstructionStart(cage_base));
CHECK(code1_entry);
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
CodeEntry* code2_entry =
- code_map->FindEntry(code2->InstructionStart(cage_base));
+ instruction_stream_map->FindEntry(code2->InstructionStart(cage_base));
CHECK(code2_entry);
CHECK_EQ(0, strcmp("function_2", code2_entry->name()));
@@ -377,10 +389,12 @@ TEST(CodeMapNotClearedBetweenProfilesWithEagerLogging) {
// Check that we still have code map entries for both code objects, even after
// the last profile is deleted.
- code1_entry = code_map->FindEntry(code1->InstructionStart(cage_base));
+ code1_entry =
+ instruction_stream_map->FindEntry(code1->InstructionStart(cage_base));
CHECK(code1_entry);
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
- code2_entry = code_map->FindEntry(code2->InstructionStart(cage_base));
+ code2_entry =
+ instruction_stream_map->FindEntry(code2->InstructionStart(cage_base));
CHECK(code2_entry);
CHECK_EQ(0, strcmp("function_2", code2_entry->name()));
}
@@ -411,7 +425,8 @@ TEST(Issue1398) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver* code_observer =
new ProfilerCodeObserver(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer->instruction_stream_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
CcTest::i_isolate(), symbolizer, code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
@@ -996,11 +1011,11 @@ class TestApiCallbacks {
void Wait() {
if (is_warming_up_) return;
v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
- double start = platform->CurrentClockTimeMillis();
- double duration = 0;
+ int64_t start = platform->CurrentClockTimeMilliseconds();
+ int64_t duration = 0;
while (duration < min_duration_ms_) {
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
- duration = platform->CurrentClockTimeMillis() - start;
+ duration = platform->CurrentClockTimeMilliseconds() - start;
}
}
@@ -1236,13 +1251,13 @@ TEST(BoundFunctionCall) {
// This tests checks distribution of the samples through the source lines.
static void TickLines(bool optimize) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
v8_flags.turbofan = optimize;
#ifdef V8_ENABLE_MAGLEV
// TODO(v8:7700): Also test maglev here.
v8_flags.maglev = false;
#endif // V8_ENABLE_MAGLEV
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
CcTest::InitializeVM();
LocalContext env;
i::v8_flags.allow_natives_syntax = true;
@@ -1299,7 +1314,8 @@ static void TickLines(bool optimize) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver* code_observer =
new ProfilerCodeObserver(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer->instruction_stream_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
CcTest::i_isolate(), symbolizer, code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
@@ -1333,7 +1349,8 @@ static void TickLines(bool optimize) {
CHECK(profile);
// Check the state of the symbolizer.
- CodeEntry* func_entry = symbolizer->code_map()->FindEntry(code_address);
+ CodeEntry* func_entry =
+ symbolizer->instruction_stream_map()->FindEntry(code_address);
CHECK(func_entry);
CHECK_EQ(0, strcmp(func_name, func_entry->name()));
const i::SourcePositionTable* line_info = func_entry->line_info();
@@ -3448,8 +3465,12 @@ TEST(MultipleThreadsSingleIsolate) {
env, "YieldIsolate", [](const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Isolate* isolate = info.GetIsolate();
if (!info[0]->IsTrue()) return;
- v8::Unlocker unlocker(isolate);
- v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
+ isolate->Exit();
+ {
+ v8::Unlocker unlocker(isolate);
+ v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
+ }
+ isolate->Enter();
});
CompileRun(varying_frame_size_script);
@@ -3461,11 +3482,13 @@ TEST(MultipleThreadsSingleIsolate) {
// For good measure, profile on our own thread
UnlockingThread::Profile(env, 0);
+ isolate->Exit();
{
v8::Unlocker unlocker(isolate);
thread1.Join();
thread2.Join();
}
+ isolate->Enter();
}
// Tests that StopProfiling doesn't wait for the next sample tick in order to
@@ -3479,11 +3502,11 @@ TEST(FastStopProfiling) {
profiler->StartProfiling("", {kLeafNodeLineNumbers});
v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
- double start = platform->CurrentClockTimeMillis();
+ int64_t start = platform->CurrentClockTimeMilliseconds();
profiler->StopProfiling("");
- double duration = platform->CurrentClockTimeMillis() - start;
+ int64_t duration = platform->CurrentClockTimeMilliseconds() - start;
- CHECK_LT(duration, kWaitThreshold.InMillisecondsF());
+ CHECK_LT(duration, kWaitThreshold.InMilliseconds());
}
// Tests that when current_profiles->size() is greater than the max allowable
@@ -3539,7 +3562,7 @@ TEST(LowPrecisionSamplingStartStopInternal) {
CodeEntryStorage storage;
CpuProfilesCollection profiles(isolate);
ProfilerCodeObserver code_observer(isolate, storage);
- Symbolizer symbolizer(code_observer.code_map());
+ Symbolizer symbolizer(code_observer.instruction_stream_map());
std::unique_ptr<ProfilerEventsProcessor> processor(
new SamplingEventsProcessor(
isolate, &symbolizer, &code_observer, &profiles,
@@ -3667,7 +3690,8 @@ TEST(ProflilerSubsampling) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver* code_observer =
new ProfilerCodeObserver(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer->instruction_stream_map());
ProfilerEventsProcessor* processor =
new SamplingEventsProcessor(isolate, symbolizer, code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(1),
@@ -3713,7 +3737,8 @@ TEST(DynamicResampling) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver* code_observer =
new ProfilerCodeObserver(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer->instruction_stream_map());
ProfilerEventsProcessor* processor =
new SamplingEventsProcessor(isolate, symbolizer, code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(1),
@@ -3785,7 +3810,8 @@ TEST(DynamicResamplingWithBaseInterval) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver* code_observer =
new ProfilerCodeObserver(isolate, storage);
- Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
+ Symbolizer* symbolizer =
+ new Symbolizer(code_observer->instruction_stream_map());
ProfilerEventsProcessor* processor =
new SamplingEventsProcessor(isolate, symbolizer, code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(1),
@@ -4260,7 +4286,7 @@ int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source,
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*CompileRun(source)));
if (function->ActiveTierIsIgnition()) return -1;
- i::Handle<i::Code> code(i::FromCodeT(function->code()), isolate);
+ i::Handle<i::Code> code(function->code(), isolate);
i::SourcePositionTableIterator iterator(
ByteArray::cast(code->source_position_table()));
@@ -4463,7 +4489,8 @@ TEST(CanStartStopProfilerWithTitlesAndIds) {
}
TEST(FastApiCPUProfiler) {
-#if !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR)
+#if !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR) && \
+ defined(V8_ENABLE_TURBOFAN)
// None of the following configurations include JSCallReducer.
if (i::v8_flags.jitless) return;
@@ -4539,11 +4566,12 @@ TEST(FastApiCPUProfiler) {
// Check that the CodeEntry is the expected one, i.e. the fast callback.
CodeEntry* code_entry =
reinterpret_cast<const ProfileNode*>(api_func_node)->entry();
- CodeMap* code_map = reinterpret_cast<CpuProfile*>(profile)
- ->cpu_profiler()
- ->code_map_for_test();
- CodeEntry* expected_code_entry =
- code_map->FindEntry(reinterpret_cast<Address>(c_func.GetAddress()));
+ InstructionStreamMap* instruction_stream_map =
+ reinterpret_cast<CpuProfile*>(profile)
+ ->cpu_profiler()
+ ->code_map_for_test();
+ CodeEntry* expected_code_entry = instruction_stream_map->FindEntry(
+ reinterpret_cast<Address>(c_func.GetAddress()));
CHECK_EQ(code_entry, expected_code_entry);
int foo_ticks = foo_node->GetHitCount();
@@ -4559,15 +4587,16 @@ TEST(FastApiCPUProfiler) {
CHECK_GE(api_func_ticks, 800);
profile->Delete();
-#endif
+#endif // !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR) &&
+ // defined(V8_ENABLE_TURBOFAN)
}
TEST(BytecodeFlushEventsEagerLogging) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
v8_flags.turbofan = false;
v8_flags.always_turbofan = false;
v8_flags.optimize_for_size = false;
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
#if ENABLE_SPARKPLUG
v8_flags.always_sparkplug = false;
#endif // ENABLE_SPARKPLUG
@@ -4581,9 +4610,11 @@ TEST(BytecodeFlushEventsEagerLogging) {
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
Factory* factory = i_isolate->factory();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
CpuProfiler profiler(i_isolate, kDebugNaming, kEagerLogging);
- CodeMap* code_map = profiler.code_map_for_test();
+ InstructionStreamMap* instruction_stream_map = profiler.code_map_for_test();
{
v8::HandleScope scope(isolate);
@@ -4615,7 +4646,7 @@ TEST(BytecodeFlushEventsEagerLogging) {
function->shared().GetBytecodeArray(i_isolate);
i::Address bytecode_start = compiled_data.GetFirstBytecodeAddress();
- CHECK(code_map->FindEntry(bytecode_start));
+ CHECK(instruction_stream_map->FindEntry(bytecode_start));
// The code will survive at least two GCs.
CcTest::CollectAllGarbage();
@@ -4632,7 +4663,7 @@ TEST(BytecodeFlushEventsEagerLogging) {
CHECK(!function->shared().is_compiled());
CHECK(!function->is_compiled());
- CHECK(!code_map->FindEntry(bytecode_start));
+ CHECK(!instruction_stream_map->FindEntry(bytecode_start));
}
}
@@ -4642,6 +4673,8 @@ TEST(ClearUnusedWithEagerLogging) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
CodeEntryStorage storage;
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
@@ -4651,8 +4684,8 @@ TEST(ClearUnusedWithEagerLogging) {
CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging, profiles, nullptr,
nullptr, code_observer);
- CodeMap* code_map = profiler.code_map_for_test();
- size_t initial_size = code_map->size();
+ InstructionStreamMap* instruction_stream_map = profiler.code_map_for_test();
+ size_t initial_size = instruction_stream_map->size();
size_t profiler_size = profiler.GetEstimatedMemoryUsage();
{
@@ -4664,7 +4697,7 @@ TEST(ClearUnusedWithEagerLogging) {
CompileRun(
"function some_func() {}"
"some_func();");
- CHECK_GT(code_map->size(), initial_size);
+ CHECK_GT(instruction_stream_map->size(), initial_size);
CHECK_GT(profiler.GetEstimatedMemoryUsage(), profiler_size);
CHECK_GT(profiler.GetAllProfilersMemorySize(isolate), profiler_size);
}
@@ -4675,8 +4708,8 @@ TEST(ClearUnusedWithEagerLogging) {
CcTest::CollectAllGarbage();
- // Verify that the CodeMap's size is unchanged post-GC.
- CHECK_EQ(code_map->size(), initial_size);
+ // Verify that the InstructionStreamMap's size is unchanged post-GC.
+ CHECK_EQ(instruction_stream_map->size(), initial_size);
CHECK_EQ(profiler.GetEstimatedMemoryUsage(), profiler_size);
CHECK_EQ(profiler.GetAllProfilersMemorySize(isolate), profiler_size);
}
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index 3d0fb228d6..e8b4f3365b 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -228,19 +228,22 @@ TEST(GetObjectProperties) {
// its properties should match what we read last time.
d::ObjectPropertiesResultPtr props2;
{
- heap_addresses.read_only_space_first_page = 0;
+ d::HeapAddresses heap_addresses_without_ro_space = heap_addresses;
+ heap_addresses_without_ro_space.read_only_space_first_page = 0;
uintptr_t map_ptr = props->properties[0]->address;
uintptr_t map_map_ptr = *reinterpret_cast<i::Tagged_t*>(map_ptr);
#if V8_MAP_PACKING
map_map_ptr = reinterpret_cast<i::MapWord*>(&map_map_ptr)->ToMap().ptr();
#endif
uintptr_t map_address =
- d::GetObjectProperties(map_map_ptr, &ReadMemory, heap_addresses)
+ d::GetObjectProperties(map_map_ptr, &ReadMemory,
+ heap_addresses_without_ro_space)
->properties[0]
->address;
MemoryFailureRegion failure(map_address, map_address + i::Map::kSize);
props2 = d::GetObjectProperties(second_string_address, &ReadMemory,
- heap_addresses, "v8::internal::String");
+ heap_addresses_without_ro_space,
+ "v8::internal::String");
if (COMPRESS_POINTERS_BOOL) {
// The first page of each heap space can be automatically detected when
// pointer compression is active, so we expect to use known maps instead
@@ -349,6 +352,7 @@ TEST(GetObjectProperties) {
props = d::GetObjectProperties(
ReadProp<i::Tagged_t>(*props, "instance_descriptors"), &ReadMemory,
heap_addresses);
+ CHECK_EQ(props->num_properties, 6);
// It should have at least two descriptors (possibly plus slack).
CheckProp(*props->properties[1], "uint16_t", "number_of_all_descriptors");
uint16_t number_of_all_descriptors =
@@ -356,7 +360,7 @@ TEST(GetObjectProperties) {
CHECK_GE(number_of_all_descriptors, 2);
// The "descriptors" property should describe the struct layout for each
// element in the array.
- const d::ObjectProperty& descriptors = *props->properties[6];
+ const d::ObjectProperty& descriptors = *props->properties[5];
// No C++ type is reported directly because there may not be an actual C++
// struct with this layout, hence the empty string in this check.
CheckProp(descriptors, /*type=*/"", "descriptors",
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 1f55824e3a..f8f8e6a8a6 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -165,6 +165,8 @@ void CheckDebuggerUnloaded() {
CHECK(!CcTest::i_isolate()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
@@ -253,8 +255,9 @@ class DebugEventBreak : public v8::debug::DebugDelegate {
}
};
+v8::debug::BreakReasons break_right_now_reasons = {};
static void BreakRightNow(v8::Isolate* isolate, void*) {
- v8::debug::BreakRightNow(isolate);
+ v8::debug::BreakRightNow(isolate, break_right_now_reasons);
}
// Debug event handler which re-issues a debug break until a limit has been
@@ -580,8 +583,6 @@ TEST(BreakPointApiIntrinsics) {
DebugEventCounter delegate;
v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
- v8::Local<v8::Function> builtin;
-
// === Test that using API-exposed functions won't trigger breakpoints ===
{
v8::Local<v8::Function> weakmap_get =
@@ -2716,7 +2717,6 @@ TEST(DebugStepWith) {
v8::Object::New(env->GetIsolate()))
.FromJust());
v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
- v8::Local<v8::Value> result;
SetBreakPoint(foo, 8); // "var a = {};"
run_step.set_step_action(StepInto);
@@ -3828,6 +3828,12 @@ void DebugBreakLoop(const char* loop_header, const char** loop_bodies,
TestDebugBreakInLoop(loop_header, loop_bodies, loop_footer);
+ // Also test with "Scheduled" break reason.
+ break_right_now_reasons =
+ v8::debug::BreakReasons{v8::debug::BreakReason::kScheduled};
+ TestDebugBreakInLoop(loop_header, loop_bodies, loop_footer);
+ break_right_now_reasons = v8::debug::BreakReasons{};
+
// Get rid of the debug event listener.
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
@@ -4202,6 +4208,12 @@ class ArchiveRestoreThread : public v8::base::Thread,
// child.GetBreakCount() will return 1 if the debugger fails to stop
// on the `next()` line after the grandchild thread returns.
CHECK_EQ(child.GetBreakCount(), 5);
+
+ // This test on purpose unlocks the isolate without exiting and
+ // re-entering. It must however update the stack start, which would have
+ // been done automatically if the isolate was properly re-entered.
+ reinterpret_cast<i::Isolate*>(isolate_)->heap()->SetStackStart(
+ v8::base::Stack::GetStackStart());
}
}
@@ -4565,7 +4577,7 @@ TEST(BuiltinsExceptionPrediction) {
bool fail = false;
for (i::Builtin builtin = i::Builtins::kFirst; builtin <= i::Builtins::kLast;
++builtin) {
- i::CodeT code = builtins->code(builtin);
+ i::Code code = builtins->code(builtin);
if (code.kind() != i::CodeKind::BUILTIN) continue;
auto prediction = code.GetBuiltinCatchPrediction();
USE(prediction);
@@ -4963,7 +4975,8 @@ TEST(GetPrivateFields) {
.ToLocalChecked());
std::vector<v8::Local<v8::Value>> names;
std::vector<v8::Local<v8::Value>> values;
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+ int filter = static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateFields);
+ CHECK(v8::debug::GetPrivateMembers(context, object, filter, &names, &values));
CHECK_EQ(names.size(), 2);
for (int i = 0; i < 2; i++) {
@@ -4995,7 +5008,7 @@ TEST(GetPrivateFields) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "x"))
.ToLocalChecked());
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+ CHECK(v8::debug::GetPrivateMembers(context, object, filter, &names, &values));
CHECK_EQ(names.size(), 3);
for (int i = 0; i < 3; i++) {
@@ -5030,7 +5043,7 @@ TEST(GetPrivateFields) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "x"))
.ToLocalChecked());
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+ CHECK(v8::debug::GetPrivateMembers(context, object, filter, &names, &values));
CHECK_EQ(names.size(), 2);
for (int i = 0; i < 2; i++) {
@@ -5069,31 +5082,46 @@ TEST(GetPrivateMethodsAndAccessors) {
.ToLocalChecked());
std::vector<v8::Local<v8::Value>> names;
std::vector<v8::Local<v8::Value>> values;
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
- CHECK_EQ(names.size(), 4);
- for (int i = 0; i < 4; i++) {
+ int accessor_filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateAccessors);
+ int method_filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateMethods);
+
+ CHECK(v8::debug::GetPrivateMembers(context, object, method_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
+ CHECK(name->IsString());
+ CHECK(v8_str("#method")->Equals(context, name.As<v8::String>()).FromJust());
+ CHECK(value->IsFunction());
+ }
+
+ names.clear();
+ values.clear();
+ CHECK(v8::debug::GetPrivateMembers(context, object, accessor_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 3);
+ for (int i = 0; i < 3; i++) {
v8::Local<v8::Value> name = names[i];
v8::Local<v8::Value> value = values[i];
CHECK(name->IsString());
std::string name_str = FromString(v8_isolate, name.As<v8::String>());
- if (name_str == "#method") {
- CHECK(value->IsFunction());
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#accessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#readOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
} else {
- CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
- v8::Local<v8::debug::AccessorPair> accessors =
- value.As<v8::debug::AccessorPair>();
- if (name_str == "#accessor") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsFunction());
- } else if (name_str == "#readOnly") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsNull());
- } else {
- CHECK_EQ(name_str, "#writeOnly");
- CHECK(accessors->getter()->IsNull());
- CHECK(accessors->setter()->IsFunction());
- }
+ CHECK_EQ(name_str, "#writeOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
}
}
@@ -5115,31 +5143,41 @@ TEST(GetPrivateMethodsAndAccessors) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "x"))
.ToLocalChecked());
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
- CHECK_EQ(names.size(), 4);
- for (int i = 0; i < 4; i++) {
+ CHECK(v8::debug::GetPrivateMembers(context, object, method_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
+ CHECK(name->IsString());
+ CHECK(v8_str("#method")->Equals(context, name.As<v8::String>()).FromJust());
+ CHECK(value->IsFunction());
+ }
+
+ names.clear();
+ values.clear();
+ CHECK(v8::debug::GetPrivateMembers(context, object, accessor_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 3);
+ for (int i = 0; i < 3; i++) {
v8::Local<v8::Value> name = names[i];
v8::Local<v8::Value> value = values[i];
CHECK(name->IsString());
std::string name_str = FromString(v8_isolate, name.As<v8::String>());
- if (name_str == "#method") {
- CHECK(value->IsFunction());
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#accessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#readOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
} else {
- CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
- v8::Local<v8::debug::AccessorPair> accessors =
- value.As<v8::debug::AccessorPair>();
- if (name_str == "#accessor") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsFunction());
- } else if (name_str == "#readOnly") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsNull());
- } else {
- CHECK_EQ(name_str, "#writeOnly");
- CHECK(accessors->getter()->IsNull());
- CHECK(accessors->setter()->IsFunction());
- }
+ CHECK_EQ(name_str, "#writeOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
}
}
@@ -5162,24 +5200,34 @@ TEST(GetPrivateMethodsAndAccessors) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "x"))
.ToLocalChecked());
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
- CHECK_EQ(names.size(), 2);
- for (int i = 0; i < 2; i++) {
- v8::Local<v8::Value> name = names[i];
- v8::Local<v8::Value> value = values[i];
+ CHECK(v8::debug::GetPrivateMembers(context, object, method_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
CHECK(name->IsString());
- std::string name_str = FromString(v8_isolate, name.As<v8::String>());
- if (name_str == "#method") {
- CHECK(value->IsFunction());
- } else {
- CHECK_EQ(name_str, "#accessor");
- CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
- v8::Local<v8::debug::AccessorPair> accessors =
- value.As<v8::debug::AccessorPair>();
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsFunction());
- }
+ CHECK(v8_str("#method")->Equals(context, name.As<v8::String>()).FromJust());
+ CHECK(value->IsFunction());
+ }
+
+ names.clear();
+ values.clear();
+ CHECK(v8::debug::GetPrivateMembers(context, object, accessor_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
+ CHECK(name->IsString());
+ CHECK(
+ v8_str("#accessor")->Equals(context, name.As<v8::String>()).FromJust());
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
}
}
@@ -5204,31 +5252,48 @@ TEST(GetPrivateStaticMethodsAndAccessors) {
.ToLocalChecked());
std::vector<v8::Local<v8::Value>> names;
std::vector<v8::Local<v8::Value>> values;
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
- CHECK_EQ(names.size(), 4);
- for (int i = 0; i < 4; i++) {
+ int accessor_filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateAccessors);
+ int method_filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateMethods);
+
+ CHECK(v8::debug::GetPrivateMembers(context, object, method_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
+ CHECK(name->IsString());
+ CHECK(v8_str("#staticMethod")
+ ->Equals(context, name.As<v8::String>())
+ .FromJust());
+ CHECK(value->IsFunction());
+ }
+
+ names.clear();
+ values.clear();
+ CHECK(v8::debug::GetPrivateMembers(context, object, accessor_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 3);
+ for (int i = 0; i < 3; i++) {
v8::Local<v8::Value> name = names[i];
v8::Local<v8::Value> value = values[i];
CHECK(name->IsString());
std::string name_str = FromString(v8_isolate, name.As<v8::String>());
- if (name_str == "#staticMethod") {
- CHECK(value->IsFunction());
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#staticAccessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#staticReadOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
} else {
- CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
- v8::Local<v8::debug::AccessorPair> accessors =
- value.As<v8::debug::AccessorPair>();
- if (name_str == "#staticAccessor") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsFunction());
- } else if (name_str == "#staticReadOnly") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsNull());
- } else {
- CHECK_EQ(name_str, "#staticWriteOnly");
- CHECK(accessors->getter()->IsNull());
- CHECK(accessors->setter()->IsFunction());
- }
+ CHECK_EQ(name_str, "#staticWriteOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
}
}
}
@@ -5260,31 +5325,47 @@ TEST(GetPrivateStaticAndInstanceMethodsAndAccessors) {
.ToLocalChecked());
std::vector<v8::Local<v8::Value>> names;
std::vector<v8::Local<v8::Value>> values;
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+ int accessor_filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateAccessors);
+ int method_filter =
+ static_cast<int>(v8::debug::PrivateMemberFilter::kPrivateMethods);
+
+ CHECK(v8::debug::GetPrivateMembers(context, object, method_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
+ CHECK(name->IsString());
+ CHECK(v8_str("#staticMethod")
+ ->Equals(context, name.As<v8::String>())
+ .FromJust());
+ CHECK(value->IsFunction());
+ }
- CHECK_EQ(names.size(), 4);
- for (int i = 0; i < 4; i++) {
+ names.clear();
+ values.clear();
+ CHECK(v8::debug::GetPrivateMembers(context, object, accessor_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 3);
+ for (int i = 0; i < 3; i++) {
v8::Local<v8::Value> name = names[i];
v8::Local<v8::Value> value = values[i];
CHECK(name->IsString());
std::string name_str = FromString(v8_isolate, name.As<v8::String>());
- if (name_str == "#staticMethod") {
- CHECK(value->IsFunction());
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#staticAccessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#staticReadOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
} else {
- CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
- v8::Local<v8::debug::AccessorPair> accessors =
- value.As<v8::debug::AccessorPair>();
- if (name_str == "#staticAccessor") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsFunction());
- } else if (name_str == "#staticReadOnly") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsNull());
- } else {
- CHECK_EQ(name_str, "#staticWriteOnly");
- CHECK(accessors->getter()->IsNull());
- CHECK(accessors->setter()->IsFunction());
- }
+ CHECK_EQ(name_str, "#staticWriteOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
}
}
@@ -5294,31 +5375,40 @@ TEST(GetPrivateStaticAndInstanceMethodsAndAccessors) {
env->Global()
->Get(context, v8_str(env->GetIsolate(), "x"))
.ToLocalChecked());
- CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+ CHECK(v8::debug::GetPrivateMembers(context, object, method_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 1);
+ {
+ v8::Local<v8::Value> name = names[0];
+ v8::Local<v8::Value> value = values[0];
+ CHECK(name->IsString());
+ CHECK(v8_str("#method")->Equals(context, name.As<v8::String>()).FromJust());
+ CHECK(value->IsFunction());
+ }
- CHECK_EQ(names.size(), 4);
- for (int i = 0; i < 4; i++) {
+ names.clear();
+ values.clear();
+ CHECK(v8::debug::GetPrivateMembers(context, object, accessor_filter, &names,
+ &values));
+ CHECK_EQ(names.size(), 3);
+ for (int i = 0; i < 3; i++) {
v8::Local<v8::Value> name = names[i];
v8::Local<v8::Value> value = values[i];
CHECK(name->IsString());
std::string name_str = FromString(v8_isolate, name.As<v8::String>());
- if (name_str == "#method") {
- CHECK(value->IsFunction());
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#accessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#readOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
} else {
- CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
- v8::Local<v8::debug::AccessorPair> accessors =
- value.As<v8::debug::AccessorPair>();
- if (name_str == "#accessor") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsFunction());
- } else if (name_str == "#readOnly") {
- CHECK(accessors->getter()->IsFunction());
- CHECK(accessors->setter()->IsNull());
- } else {
- CHECK_EQ(name_str, "#writeOnly");
- CHECK(accessors->getter()->IsNull());
- CHECK(accessors->setter()->IsFunction());
- }
+ CHECK_EQ(name_str, "#writeOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
}
}
}
@@ -5924,8 +6014,8 @@ class ScopeListener : public v8::debug::DebugDelegate {
const std::vector<v8::debug::BreakpointId>&,
v8::debug::BreakReasons break_reasons) override {
i::Isolate* isolate = CcTest::i_isolate();
- i::StackTraceFrameIterator iterator_(isolate,
- isolate->debug()->break_frame_id());
+ i::DebuggableStackFrameIterator iterator_(
+ isolate, isolate->debug()->break_frame_id());
// Go up one frame so we are on the script level.
iterator_.Advance();
@@ -5944,8 +6034,6 @@ class ScopeListener : public v8::debug::DebugDelegate {
} // namespace
TEST(ScopeIteratorDoesNotCreateBlocklistForScriptScope) {
- i::v8_flags.experimental_reuse_locals_blocklists = true;
-
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -5987,8 +6075,6 @@ class DebugEvaluateListener : public v8::debug::DebugDelegate {
// scope nested inside an eval scope with the exact same source positions.
// This can confuse the blocklist mechanism if not handled correctly.
TEST(DebugEvaluateInWrappedScript) {
- i::v8_flags.experimental_reuse_locals_blocklists = true;
-
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -6012,3 +6098,70 @@ TEST(DebugEvaluateInWrappedScript) {
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
}
+
+namespace {
+
+class ConditionListener : public v8::debug::DebugDelegate {
+ public:
+ void BreakpointConditionEvaluated(
+ v8::Local<v8::Context> context, v8::debug::BreakpointId breakpoint_id_arg,
+ bool exception_thrown_arg, v8::Local<v8::Value> exception_arg) override {
+ breakpoint_id = breakpoint_id_arg;
+ exception_thrown = exception_thrown_arg;
+ exception = exception_arg;
+ }
+
+ void BreakProgramRequested(v8::Local<v8::Context> context,
+ const std::vector<v8::debug::BreakpointId>&,
+ v8::debug::BreakReasons break_reasons) override {
+ break_point_hit_count++;
+ }
+
+ v8::debug::BreakpointId breakpoint_id;
+ bool exception_thrown = false;
+ v8::Local<v8::Value> exception;
+};
+
+} // namespace
+
+TEST(SuccessfulBreakpointConditionEvaluationEvent) {
+ break_point_hit_count = 0;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ ConditionListener delegate;
+ v8::debug::SetDebugDelegate(isolate, &delegate);
+
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo() { const x = 5; }", "foo");
+
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0, "true");
+ foo->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
+ CHECK_EQ(1, break_point_hit_count);
+ CHECK_EQ(bp->id(), delegate.breakpoint_id);
+ CHECK(!delegate.exception_thrown);
+ CHECK(delegate.exception.IsEmpty());
+}
+
+// Checks that SyntaxErrors in breakpoint conditions are reported to the
+// DebugDelegate.
+TEST(FailedBreakpointConditoinEvaluationEvent) {
+ break_point_hit_count = 0;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ ConditionListener delegate;
+ v8::debug::SetDebugDelegate(isolate, &delegate);
+
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo() { const x = 5; }", "foo");
+
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0, "bar().");
+ foo->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
+ CHECK_EQ(0, break_point_hit_count);
+ CHECK_EQ(bp->id(), delegate.breakpoint_id);
+ CHECK(delegate.exception_thrown);
+ CHECK(!delegate.exception.IsEmpty());
+}
diff --git a/deps/v8/test/cctest/test-descriptor-array.cc b/deps/v8/test/cctest/test-descriptor-array.cc
index 8642174d18..db70c4f6a0 100644
--- a/deps/v8/test/cctest/test-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-descriptor-array.cc
@@ -125,8 +125,7 @@ Handle<JSFunction> CreateCsaDescriptorArrayLookup(Isolate* isolate) {
const int kNumParams = 2;
compiler::CodeAssemblerTester asm_tester(
- isolate, kNumParams + 1, // +1 to include receiver.
- CodeKind::FOR_TESTING);
+ isolate, JSParameterCount(kNumParams), CodeKind::FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
@@ -170,8 +169,7 @@ Handle<JSFunction> CreateCsaTransitionArrayLookup(Isolate* isolate) {
const int kNumParams = 2;
compiler::CodeAssemblerTester asm_tester(
- isolate, kNumParams + 1, // +1 to include receiver.
- CodeKind::FOR_TESTING);
+ isolate, JSParameterCount(kNumParams), CodeKind::FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
diff --git a/deps/v8/test/cctest/test-disasm-regex-helper.cc b/deps/v8/test/cctest/test-disasm-regex-helper.cc
index e0ac994044..8c3b251d15 100644
--- a/deps/v8/test/cctest/test-disasm-regex-helper.cc
+++ b/deps/v8/test/cctest/test-disasm-regex-helper.cc
@@ -21,9 +21,9 @@ std::string DisassembleFunction(const char* function) {
CcTest::global()->Get(context, v8_str(function)).ToLocalChecked())));
Isolate* isolate = CcTest::i_isolate();
- Handle<Code> code(FromCodeT(f->code()), isolate);
- Address begin = code->raw_instruction_start();
- Address end = code->raw_instruction_end();
+ Handle<Code> code(f->code(), isolate);
+ Address begin = code->InstructionStart();
+ Address end = code->InstructionEnd();
std::ostringstream os;
Disassembler::Decode(isolate, os, reinterpret_cast<byte*>(begin),
reinterpret_cast<byte*>(end), CodeReference(code));
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index b87e67d884..6a69fa5771 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -2907,10 +2907,10 @@ void TestStoreToConstantField_NaN(const char* store_func_source,
Handle<Object> nan1 = isolate->factory()->NewNumber(nan_double1);
Handle<Object> nan2 = isolate->factory()->NewNumber(nan_double2);
- // NaNs with different bit patters are treated as equal upon stores.
+ // NaNs with different bit patters are not treated as equal upon stores.
TestStoreToConstantField(store_func_source, nan1, nan2,
- Representation::Double(), PropertyConstness::kConst,
- store_repetitions);
+ Representation::Double(),
+ PropertyConstness::kMutable, store_repetitions);
}
} // namespace
@@ -2926,10 +2926,8 @@ TEST(StoreToConstantField_PlusMinusZero) {
"}";
TestStoreToConstantField_PlusMinusZero(store_func_source, 1);
- TestStoreToConstantField_PlusMinusZero(store_func_source, 3);
TestStoreToConstantField_NaN(store_func_source, 1);
- TestStoreToConstantField_NaN(store_func_source, 2);
}
TEST(StoreToConstantField_ObjectDefineProperty) {
@@ -2946,10 +2944,8 @@ TEST(StoreToConstantField_ObjectDefineProperty) {
"}";
TestStoreToConstantField_PlusMinusZero(store_func_source, 1);
- TestStoreToConstantField_PlusMinusZero(store_func_source, 3);
TestStoreToConstantField_NaN(store_func_source, 1);
- TestStoreToConstantField_NaN(store_func_source, 2);
}
TEST(StoreToConstantField_ReflectSet) {
@@ -2962,10 +2958,8 @@ TEST(StoreToConstantField_ReflectSet) {
"}";
TestStoreToConstantField_PlusMinusZero(store_func_source, 1);
- TestStoreToConstantField_PlusMinusZero(store_func_source, 3);
TestStoreToConstantField_NaN(store_func_source, 1);
- TestStoreToConstantField_NaN(store_func_source, 2);
}
TEST(StoreToConstantField_StoreIC) {
@@ -2978,10 +2972,8 @@ TEST(StoreToConstantField_StoreIC) {
"}";
TestStoreToConstantField_PlusMinusZero(store_func_source, 1);
- TestStoreToConstantField_PlusMinusZero(store_func_source, 3);
TestStoreToConstantField_NaN(store_func_source, 1);
- TestStoreToConstantField_NaN(store_func_source, 2);
}
TEST(NormalizeToMigrationTarget) {
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index cda65e0835..d9d363eeae 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -1260,8 +1260,8 @@ static TestStatsStream GetHeapStatsUpdate(
TEST(HeapSnapshotObjectsStats) {
// Concurrent allocation and conservative stack scanning might break results.
i::v8_flags.stress_concurrent_allocation = false;
- i::ScanStackModeScopeForTesting no_stack_scanning(
- CcTest::heap(), i::Heap::ScanStackMode::kNone);
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1609,8 +1609,6 @@ class EmbedderGraphBuilder : public v8::PersistentHandleVisitor {
graph->AddNode(std::unique_ptr<Group>(new Group("ccc-group")));
}
- START_ALLOW_USE_DEPRECATED()
-
static void BuildEmbedderGraph(v8::Isolate* isolate, v8::EmbedderGraph* graph,
void* data) {
EmbedderGraphBuilder builder(isolate, graph);
@@ -1619,8 +1617,6 @@ class EmbedderGraphBuilder : public v8::PersistentHandleVisitor {
->IterateAllRootsForTesting(&builder);
}
- END_ALLOW_USE_DEPRECATED()
-
void VisitPersistentHandle(v8::Persistent<v8::Value>* value,
uint16_t class_id) override {
v8::Local<v8::Value> wrapper = v8::Local<v8::Value>::New(
@@ -2641,11 +2637,11 @@ TEST(AllocationSitesAreVisible) {
const v8::HeapGraphNode* vector = GetProperty(
env->GetIsolate(), feedback_cell, v8::HeapGraphEdge::kInternal, "value");
CHECK_EQ(v8::HeapGraphNode::kCode, vector->GetType());
- CHECK_EQ(4, vector->GetChildrenCount());
+ CHECK_EQ(5, vector->GetChildrenCount());
// The last value in the feedback vector should be the boilerplate,
// found in AllocationSite.transition_info.
- const v8::HeapGraphEdge* prop = vector->GetChild(3);
+ const v8::HeapGraphEdge* prop = vector->GetChild(4);
const v8::HeapGraphNode* allocation_site = prop->GetToNode();
v8::String::Utf8Value name(env->GetIsolate(), allocation_site->GetName());
CHECK_EQ(0, strcmp("system / AllocationSite", *name));
@@ -2737,11 +2733,7 @@ TEST(CheckCodeNames) {
const char* builtin_path1[] = {
"::(GC roots)",
"::(Builtins)",
-#ifdef V8_EXTERNAL_CODE_SPACE
"::(KeyedLoadIC_PolymorphicName builtin handle)",
-#else
- "::(KeyedLoadIC_PolymorphicName builtin)",
-#endif
};
const v8::HeapGraphNode* node = GetNodeByPath(
env->GetIsolate(), snapshot, builtin_path1, arraysize(builtin_path1));
@@ -2750,21 +2742,13 @@ TEST(CheckCodeNames) {
const char* builtin_path2[] = {
"::(GC roots)",
"::(Builtins)",
-#ifdef V8_EXTERNAL_CODE_SPACE
"::(CompileLazy builtin handle)",
-#else
- "::(CompileLazy builtin)",
-#endif
};
node = GetNodeByPath(env->GetIsolate(), snapshot, builtin_path2,
arraysize(builtin_path2));
CHECK(node);
v8::String::Utf8Value node_name(env->GetIsolate(), node->GetName());
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- CHECK_EQ(0, strcmp("(CompileLazy builtin handle)", *node_name));
- } else {
- CHECK_EQ(0, strcmp("(CompileLazy builtin)", *node_name));
- }
+ CHECK_EQ(0, strcmp("(CompileLazy builtin handle)", *node_name));
}
@@ -2972,7 +2956,7 @@ TEST(TrackBumpPointerAllocations) {
// Now check that not all allocations are tracked if we manually reenable
// inline allocations.
CHECK(i::v8_flags.single_generation ||
- !CcTest::heap()->new_space()->IsInlineAllocationEnabled());
+ !CcTest::heap()->IsInlineAllocationEnabled());
CcTest::heap()->EnableInlineAllocation();
CompileRun(inline_heap_allocation_source);
@@ -4098,10 +4082,11 @@ TEST(WeakReference) {
i_isolate);
i::Handle<i::ClosureFeedbackCellArray> feedback_cell_array =
i::ClosureFeedbackCellArray::New(i_isolate, shared_function);
- i::Handle<i::FeedbackVector> fv =
- factory->NewFeedbackVector(shared_function, feedback_cell_array);
+ i::Handle<i::FeedbackVector> fv = factory->NewFeedbackVector(
+ shared_function, feedback_cell_array,
+ handle(i::JSFunction::cast(*obj).raw_feedback_cell(), i_isolate));
- // Create a Code.
+ // Create a Code object.
i::Assembler assm(i::AssemblerOptions{});
assm.nop(); // supported on all architectures
i::CodeDesc desc;
@@ -4113,8 +4098,7 @@ TEST(WeakReference) {
// Manually inlined version of FeedbackVector::SetOptimizedCode (needed due
// to the FOR_TESTING code kind).
- fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(ToCodeT(*code)),
- v8::kReleaseStore);
+ fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code));
fv->set_flags(
i::FeedbackVector::MaybeHasTurbofanCodeBit::encode(true) |
i::FeedbackVector::TieringStateBits::encode(i::TieringState::kNone));
diff --git a/deps/v8/test/cctest/test-helper-riscv32.cc b/deps/v8/test/cctest/test-helper-riscv32.cc
index 0e9738a7b7..79468c7373 100644
--- a/deps/v8/test/cctest/test-helper-riscv32.cc
+++ b/deps/v8/test/cctest/test-helper-riscv32.cc
@@ -24,12 +24,11 @@ int32_t GenAndRunTest(Func test_generator) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<int32_t()>::FromCode(*code);
+ auto f = GeneratedCode<int32_t()>::FromCode(isolate, *code);
return f.Call();
}
-Handle<Code> AssembleCodeImpl(Func assemble) {
- Isolate* isolate = CcTest::i_isolate();
+Handle<Code> AssembleCodeImpl(Isolate* isolate, Func assemble) {
MacroAssembler assm(isolate, CodeObjectRequired::kYes);
assemble(assm);
diff --git a/deps/v8/test/cctest/test-helper-riscv32.h b/deps/v8/test/cctest/test-helper-riscv32.h
index b5c2f7730b..91fe1835dc 100644
--- a/deps/v8/test/cctest/test-helper-riscv32.h
+++ b/deps/v8/test/cctest/test-helper-riscv32.h
@@ -70,7 +70,7 @@ OUTPUT_T GenAndRunTest(INPUT_T input0, Func test_generator) {
typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
int64_t>::type>::type;
- auto f = GeneratedCode<OINT_T(IINT_T)>::FromCode(*code);
+ auto f = GeneratedCode<OINT_T(IINT_T)>::FromCode(isolate, *code);
auto res = f.Call(base::bit_cast<IINT_T>(input0));
return base::bit_cast<OUTPUT_T>(res);
@@ -116,7 +116,7 @@ OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, Func test_generator) {
std::is_integral<INPUT_T>::value, INPUT_T,
typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
int64_t>::type>::type;
- auto f = GeneratedCode<OINT_T(IINT_T, IINT_T)>::FromCode(*code);
+ auto f = GeneratedCode<OINT_T(IINT_T, IINT_T)>::FromCode(isolate, *code);
auto res =
f.Call(base::bit_cast<IINT_T>(input0), base::bit_cast<IINT_T>(input1));
@@ -165,7 +165,8 @@ OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, INPUT_T input2,
std::is_integral<INPUT_T>::value, INPUT_T,
typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
int64_t>::type>::type;
- auto f = GeneratedCode<OINT_T(IINT_T, IINT_T, IINT_T)>::FromCode(*code);
+ auto f =
+ GeneratedCode<OINT_T(IINT_T, IINT_T, IINT_T)>::FromCode(isolate, *code);
auto res =
f.Call(base::bit_cast<IINT_T>(input0), base::bit_cast<IINT_T>(input1),
@@ -206,7 +207,8 @@ void GenAndRunTestForLoadStore(T value, Func test_generator) {
std::is_integral<T>::value, T,
typename std::conditional<sizeof(T) == 4, int32_t, int64_t>::type>::type;
- auto f = GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(*code);
+ auto f =
+ GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(isolate, *code);
int64_t tmp = 0;
auto res = f.Call(&tmp, base::bit_cast<INT_T>(value));
@@ -253,7 +255,8 @@ void GenAndRunTestForLRSC(T value, Func test_generator) {
typename std::conditional<sizeof(T) == 4, int32_t, int64_t>::type;
T tmp = 0;
- auto f = GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(*code);
+ auto f =
+ GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(isolate, *code);
auto res = f.Call(&tmp, base::bit_cast<T>(value));
CHECK_EQ(base::bit_cast<T>(res), static_cast<T>(0));
}
@@ -312,18 +315,19 @@ OUTPUT_T GenAndRunTestForAMO(INPUT_T input0, INPUT_T input1,
code->Print();
#endif
OUTPUT_T tmp = 0;
- auto f =
- GeneratedCode<OUTPUT_T(void* base, INPUT_T, INPUT_T)>::FromCode(*code);
+ auto f = GeneratedCode<OUTPUT_T(void* base, INPUT_T, INPUT_T)>::FromCode(
+ isolate, *code);
auto res = f.Call(&tmp, base::bit_cast<INPUT_T>(input0),
base::bit_cast<INPUT_T>(input1));
return base::bit_cast<OUTPUT_T>(res);
}
-Handle<Code> AssembleCodeImpl(Func assemble);
+Handle<Code> AssembleCodeImpl(Isolate* isolate, Func assemble);
template <typename Signature>
-GeneratedCode<Signature> AssembleCode(Func assemble) {
- return GeneratedCode<Signature>::FromCode(*AssembleCodeImpl(assemble));
+GeneratedCode<Signature> AssembleCode(Isolate* isolate, Func assemble) {
+ return GeneratedCode<Signature>::FromCode(
+ isolate, *AssembleCodeImpl(isolate, assemble));
}
template <typename T>
diff --git a/deps/v8/test/cctest/test-helper-riscv64.cc b/deps/v8/test/cctest/test-helper-riscv64.cc
index 75263d35f5..e4cf6bd29b 100644
--- a/deps/v8/test/cctest/test-helper-riscv64.cc
+++ b/deps/v8/test/cctest/test-helper-riscv64.cc
@@ -23,12 +23,11 @@ int64_t GenAndRunTest(Func test_generator) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<int64_t()>::FromCode(*code);
+ auto f = GeneratedCode<int64_t()>::FromCode(isolate, *code);
return f.Call();
}
-Handle<Code> AssembleCodeImpl(Func assemble) {
- Isolate* isolate = CcTest::i_isolate();
+Handle<Code> AssembleCodeImpl(Isolate* isolate, Func assemble) {
MacroAssembler assm(isolate, CodeObjectRequired::kYes);
assemble(assm);
diff --git a/deps/v8/test/cctest/test-helper-riscv64.h b/deps/v8/test/cctest/test-helper-riscv64.h
index 6bd2c996e5..a6914e20c2 100644
--- a/deps/v8/test/cctest/test-helper-riscv64.h
+++ b/deps/v8/test/cctest/test-helper-riscv64.h
@@ -67,7 +67,7 @@ OUTPUT_T GenAndRunTest(INPUT_T input0, Func test_generator) {
typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
int64_t>::type>::type;
- auto f = GeneratedCode<OINT_T(IINT_T)>::FromCode(*code);
+ auto f = GeneratedCode<OINT_T(IINT_T)>::FromCode(isolate, *code);
auto res = f.Call(base::bit_cast<IINT_T>(input0));
return base::bit_cast<OUTPUT_T>(res);
@@ -114,7 +114,7 @@ OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, Func test_generator) {
std::is_integral<INPUT_T>::value, INPUT_T,
typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
int64_t>::type>::type;
- auto f = GeneratedCode<OINT_T(IINT_T, IINT_T)>::FromCode(*code);
+ auto f = GeneratedCode<OINT_T(IINT_T, IINT_T)>::FromCode(isolate, *code);
auto res =
f.Call(base::bit_cast<IINT_T>(input0), base::bit_cast<IINT_T>(input1));
@@ -165,7 +165,8 @@ OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, INPUT_T input2,
std::is_integral<INPUT_T>::value, INPUT_T,
typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
int64_t>::type>::type;
- auto f = GeneratedCode<OINT_T(IINT_T, IINT_T, IINT_T)>::FromCode(*code);
+ auto f =
+ GeneratedCode<OINT_T(IINT_T, IINT_T, IINT_T)>::FromCode(isolate, *code);
auto res =
f.Call(base::bit_cast<IINT_T>(input0), base::bit_cast<IINT_T>(input1),
@@ -211,7 +212,8 @@ void GenAndRunTestForLoadStore(T value, Func test_generator) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(*code);
+ auto f =
+ GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(isolate, *code);
int64_t tmp = 0;
auto res = f.Call(&tmp, base::bit_cast<INT_T>(value));
@@ -258,7 +260,8 @@ void GenAndRunTestForLRSC(T value, Func test_generator) {
typename std::conditional<sizeof(T) == 4, int32_t, int64_t>::type;
T tmp = 0;
- auto f = GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(*code);
+ auto f =
+ GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(isolate, *code);
auto res = f.Call(&tmp, base::bit_cast<T>(value));
CHECK_EQ(base::bit_cast<T>(res), static_cast<T>(0));
}
@@ -318,18 +321,19 @@ OUTPUT_T GenAndRunTestForAMO(INPUT_T input0, INPUT_T input1,
code->Print();
#endif
OUTPUT_T tmp = 0;
- auto f =
- GeneratedCode<OUTPUT_T(void* base, INPUT_T, INPUT_T)>::FromCode(*code);
+ auto f = GeneratedCode<OUTPUT_T(void* base, INPUT_T, INPUT_T)>::FromCode(
+ isolate, *code);
auto res = f.Call(&tmp, base::bit_cast<INPUT_T>(input0),
base::bit_cast<INPUT_T>(input1));
return base::bit_cast<OUTPUT_T>(res);
}
-Handle<Code> AssembleCodeImpl(Func assemble);
+Handle<Code> AssembleCodeImpl(Isolate* isolate, Func assemble);
template <typename Signature>
-GeneratedCode<Signature> AssembleCode(Func assemble) {
- return GeneratedCode<Signature>::FromCode(*AssembleCodeImpl(assemble));
+GeneratedCode<Signature> AssembleCode(Isolate* isolate, Func assemble) {
+ return GeneratedCode<Signature>::FromCode(
+ isolate, *AssembleCodeImpl(isolate, assemble));
}
template <typename T>
diff --git a/deps/v8/test/cctest/test-js-to-wasm.cc b/deps/v8/test/cctest/test-js-to-wasm.cc
index fe32b8d786..867a725b22 100644
--- a/deps/v8/test/cctest/test-js-to-wasm.cc
+++ b/deps/v8/test/cctest/test-js-to-wasm.cc
@@ -42,12 +42,21 @@ template <>
bool CheckType<v8::Local<v8::BigInt>>(v8::Local<v8::Value> result) {
return result->IsBigInt();
}
+template <>
+bool CheckType<v8::Local<v8::String>>(v8::Local<v8::Value> result) {
+ return result->IsString();
+}
+
+template <>
+bool CheckType<std::nullptr_t>(v8::Local<v8::Value> result) {
+ return result->IsNull();
+}
static TestSignatures sigs;
struct ExportedFunction {
std::string name;
- FunctionSig* signature;
+ const FunctionSig* signature;
std::vector<ValueType> locals;
std::vector<uint8_t> code;
@@ -80,6 +89,16 @@ DECLARE_EXPORTED_FUNCTION(i64_square, sigs.l_l(),
WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
kExprI64Mul}))
+DECLARE_EXPORTED_FUNCTION(externref_null_id, sigs.a_a(),
+ WASM_CODE({WASM_LOCAL_GET(0)}))
+
+static constexpr ValueType extern_extern_types[] = {kWasmExternRef.AsNonNull(),
+ kWasmExternRef.AsNonNull()};
+static constexpr FunctionSig sig_extern_extern(1, 1, extern_extern_types);
+
+DECLARE_EXPORTED_FUNCTION(externref_id, &sig_extern_extern,
+ WASM_CODE({WASM_LOCAL_GET(0)}))
+
DECLARE_EXPORTED_FUNCTION(f32_square, sigs.f_f(),
WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
kExprF32Mul}))
@@ -266,6 +285,7 @@ class FastJSWasmCallTester {
: allocator_(),
zone_(&allocator_, ZONE_NAME),
builder_(zone_.New<WasmModuleBuilder>(&zone_)) {
+ i::v8_flags.experimental_wasm_typed_funcref = true;
i::v8_flags.allow_natives_syntax = true;
i::v8_flags.turbo_inline_js_wasm_calls = true;
i::v8_flags.stress_background_compile = false;
@@ -308,8 +328,12 @@ class FastJSWasmCallTester {
env, exported_function_name, args, test_lazy_deopt);
CHECK(CheckType<T>(result_value));
- T result = ConvertJSValue<T>::Get(result_value, env.local()).ToChecked();
- CHECK_EQ(result, expected_result);
+ if constexpr (std::is_convertible_v<T, decltype(result_value)>) {
+ CHECK_EQ(result_value, expected_result);
+ } else {
+ T result = ConvertJSValue<T>::Get(result_value, env.local()).ToChecked();
+ CHECK_EQ(result, expected_result);
+ }
}
// Executes a test function that returns NaN.
@@ -806,6 +830,30 @@ TEST(TestFastJSWasmCall_I64NegativeResult) {
"i64_add", {v8_bigint(1ll), v8_bigint(-2ll)}, v8_bigint(-1ll));
}
+TEST(TestFastJSWasmCall_ExternrefNullArg) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_externref_null_id);
+ Local<Primitive> v8_null = v8::Null(CcTest::isolate());
+ tester.CallAndCheckWasmFunction("externref_null_id", {v8_null}, nullptr);
+ tester.CallAndCheckWasmFunction("externref_null_id", {v8_num(42)}, 42);
+ tester.CallAndCheckWasmFunctionBigInt("externref_null_id", {v8_bigint(42)},
+ v8_bigint(42));
+ auto str = v8_str("test");
+ tester.CallAndCheckWasmFunction("externref_null_id", {str}, str);
+}
+
+TEST(TestFastJSWasmCall_ExternrefArg) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_externref_id);
+ tester.CallAndCheckWasmFunction("externref_id", {v8_num(42)}, 42);
+ tester.CallAndCheckWasmFunctionBigInt("externref_id", {v8_bigint(42)},
+ v8_bigint(42));
+ auto str = v8_str("test");
+ tester.CallAndCheckWasmFunction("externref_id", {str}, str);
+}
+
TEST(TestFastJSWasmCall_MultipleArgs) {
v8::HandleScope scope(CcTest::isolate());
FastJSWasmCallTester tester;
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 2ac7a1dcf9..eb44c3c890 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -687,6 +687,8 @@ TEST(TestJSWeakRef) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
HandleScope outer_scope(isolate);
Handle<JSWeakRef> weak_ref;
{
@@ -720,6 +722,7 @@ TEST(TestJSWeakRefIncrementalMarking) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
Handle<JSWeakRef> weak_ref;
{
@@ -750,6 +753,9 @@ TEST(TestJSWeakRefKeepDuringJob) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ CcTest::heap());
+
HandleScope outer_scope(isolate);
Handle<JSWeakRef> weak_ref = MakeWeakRefAndKeepDuringJob(isolate);
CHECK(!weak_ref->target().IsUndefined(isolate));
@@ -792,6 +798,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
Handle<JSWeakRef> weak_ref = MakeWeakRefAndKeepDuringJob(isolate);
@@ -880,6 +887,7 @@ TEST(JSWeakRefScavengedInWorklist) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
{
HandleScope outer_scope(isolate);
@@ -931,6 +939,7 @@ TEST(JSWeakRefTenuredInWorklist) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
HandleScope outer_scope(isolate);
Handle<JSWeakRef> weak_ref;
@@ -984,6 +993,7 @@ TEST(UnregisterTokenHeapVerifier) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Heap* heap = CcTest::heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
v8::HandleScope outer_scope(isolate);
{
@@ -1032,6 +1042,7 @@ TEST(UnregisteredAndUnclearedCellHeapVerifier) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Heap* heap = CcTest::heap();
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap);
v8::HandleScope outer_scope(isolate);
{
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index c33c94a818..b75e68bc0a 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -871,7 +871,9 @@ TEST(LockUnlockLockDefaultIsolateMultithreaded) {
threads.push_back(new LockUnlockLockDefaultIsolateThread(context));
}
}
+ CcTest::isolate()->Exit();
StartJoinAndDeleteThreads(threads);
+ CcTest::isolate()->Enter();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index f66653a647..cce64f0080 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -152,7 +152,7 @@ TEST(ExtractLane) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&t, 0, 0, 0, 0);
for (int i = 0; i < 4; i++) {
CHECK_EQ(i, t.i32x4_low[i]);
@@ -283,7 +283,7 @@ TEST(ReplaceLane) {
StdoutStream os;
code->Print(os);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&t, 0, 0, 0, 0);
for (int i = 0; i < 4; i++) {
CHECK_EQ(i, t.i32x4_low[i]);
diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
index 83b61966d0..3939279993 100644
--- a/deps/v8/test/cctest/test-macro-assembler-loong64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
@@ -106,7 +106,7 @@ TEST(BYTESWAP) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (size_t i = 0; i < arraysize(test_values); i++) {
int32_t in_s4 = static_cast<int32_t>(test_values[i]);
@@ -151,7 +151,7 @@ TEST(LoadConstants) {
// Load constant.
__ li(a5, Operand(refConstants[i]));
__ St_d(a5, MemOperand(a4, zero_reg));
- __ Add_d(a4, a4, Operand(kPointerSize));
+ __ Add_d(a4, a4, Operand(kSystemPointerSize));
}
__ jirl(zero_reg, ra, 0);
@@ -161,7 +161,7 @@ TEST(LoadConstants) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
// Check results.
for (int i = 0; i < 64; i++) {
@@ -220,7 +220,7 @@ TEST(jump_tables4) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -304,7 +304,7 @@ TEST(jump_tables6) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kSwitchTableCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -327,7 +327,7 @@ static uint64_t run_alsl_w(uint32_t rj, uint32_t rk, int8_t sa) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(rj, rk, 0, 0, 0));
@@ -405,7 +405,7 @@ static uint64_t run_alsl_d(uint64_t rj, uint64_t rk, int8_t sa) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(rj, rk, 0, 0, 0));
@@ -553,10 +553,10 @@ RET_TYPE run_CVT(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc,
- CodeKind::FOR_TESTING).Build();
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ auto f = GeneratedCode<F_CVT>::FromCode(isolate, *code);
return reinterpret_cast<RET_TYPE>(f.Call(x, 0, 0, 0, 0));
}
@@ -745,7 +745,7 @@ TEST(OverflowInstructions) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.lhs = ii;
t.rhs = jj;
f.Call(&t, 0, 0, 0, 0);
@@ -840,6 +840,9 @@ TEST(min_max_nan) {
Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan;
__ Push(s6);
+#ifdef V8_COMPRESS_POINTERS
+ __ Push(s8);
+#endif
__ InitializeRootRegister();
__ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
__ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b)));
@@ -857,6 +860,9 @@ TEST(min_max_nan) {
__ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, d)));
__ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g)));
__ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h)));
+#ifdef V8_COMPRESS_POINTERS
+ __ Pop(s8);
+#endif
__ Pop(s6);
__ jirl(zero_reg, ra, 0);
@@ -869,7 +875,7 @@ TEST(min_max_nan) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -903,7 +909,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ auto f = GeneratedCode<F_CVT>::FromCode(isolate, *code);
MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
f.Call(memory_buffer, 0, 0, 0, 0);
@@ -1392,7 +1398,7 @@ bool run_Sltu(uint64_t rj, uint64_t rk, Func GenerateSltuInstructionFunc) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ auto f = GeneratedCode<F_CVT>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(rj, rk, 0, 0, 0));
return res == 1;
}
@@ -1489,7 +1495,7 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
StdoutStream os;
code->Print(os);
#endif
- return GeneratedCode<F4>::FromCode(*code);
+ return GeneratedCode<F4>::FromCode(masm->isolate(), *code);
}
TEST(macro_float_minmax_f32) {
@@ -1636,7 +1642,7 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
StdoutStream os;
code->Print(os);
#endif
- return GeneratedCode<F4>::FromCode(*code);
+ return GeneratedCode<F4>::FromCode(masm->isolate(), *code);
}
TEST(macro_float_minmax_f64) {
@@ -1733,7 +1739,7 @@ uint64_t run_Sub_w(uint64_t imm, int32_t num_instr) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -1817,7 +1823,7 @@ uint64_t run_Sub_d(uint64_t imm, int32_t num_instr) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -1930,7 +1936,7 @@ TEST(Move) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
f.Call(&t, 0, 0, 0, 0);
CHECK_EQ(t.a, t.result_a);
CHECK_EQ(t.b, t.result_b);
@@ -2003,7 +2009,7 @@ TEST(Movz_Movn) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_W[i];
@@ -2120,7 +2126,7 @@ TEST(macro_instructions1) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -2212,7 +2218,7 @@ TEST(macro_instructions2) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -2378,7 +2384,7 @@ TEST(macro_instructions3) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
@@ -2470,7 +2476,7 @@ TEST(Rotr_w) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.input = 0x12345678;
f.Call(&t, 0, 0, 0, 0);
@@ -2581,7 +2587,7 @@ TEST(Rotr_d) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.input = 0x0123456789ABCDEF;
f.Call(&t, 0, 0, 0, 0);
@@ -2726,7 +2732,7 @@ TEST(macro_instructions4) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
t.a = inputs_d[i];
t.b = inputs_s[i];
@@ -2760,7 +2766,7 @@ uint64_t run_ExtractBits(uint64_t source, int pos, int size, bool sign_extend) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(source, pos, 0, 0, 0));
return res;
}
@@ -2811,7 +2817,7 @@ uint64_t run_InsertBits(uint64_t dest, uint64_t source, int pos, int size) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(dest, source, pos, 0, 0));
return res;
}
@@ -2879,7 +2885,7 @@ TEST(Popcnt) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
size_t nr_test_cases = sizeof(tc) / sizeof(TestCase);
for (size_t i = 0; i < nr_test_cases; ++i) {
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index dead68ee36..6b821931ec 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -111,7 +111,7 @@ TEST(BYTESWAP) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (size_t i = 0; i < arraysize(test_values); i++) {
int32_t in_s4 = static_cast<int32_t>(test_values[i]);
@@ -167,7 +167,7 @@ TEST(LoadConstants) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
// Check results.
for (int i = 0; i < 64; i++) {
@@ -210,7 +210,7 @@ TEST(LoadAddress) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
(void)f.Call(0, 0, 0, 0, 0);
// Check results.
}
@@ -269,7 +269,7 @@ TEST(jump_tables4) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -339,7 +339,7 @@ TEST(jump_tables5) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -428,7 +428,7 @@ TEST(jump_tables6) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kSwitchTableCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -451,7 +451,7 @@ static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt, rs, 0, 0, 0));
@@ -531,7 +531,7 @@ static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt, rs, 0, 0, 0));
@@ -681,7 +681,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ auto f = GeneratedCode<F_CVT>::FromCode(isolate, *code);
return reinterpret_cast<RET_TYPE>(f.Call(x, 0, 0, 0, 0));
}
@@ -855,7 +855,7 @@ TEST(OverflowInstructions) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
t.lhs = ii;
t.rhs = jj;
f.Call(&t, 0, 0, 0, 0);
@@ -978,7 +978,7 @@ TEST(min_max_nan) {
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
@@ -1014,7 +1014,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ auto f = GeneratedCode<F_CVT>::FromCode(isolate, *code);
MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
f.Call(memory_buffer, 0, 0, 0, 0);
@@ -1378,7 +1378,7 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ auto f = GeneratedCode<F_CVT>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(rs, rd, 0, 0, 0));
return res == 1;
}
@@ -1476,7 +1476,7 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
StdoutStream os;
code->Print(os);
#endif
- return GeneratedCode<F4>::FromCode(*code);
+ return GeneratedCode<F4>::FromCode(masm->isolate(), *code);
}
TEST(macro_float_minmax_f32) {
@@ -1624,7 +1624,7 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
StdoutStream os;
code->Print(os);
#endif
- return GeneratedCode<F4>::FromCode(*code);
+ return GeneratedCode<F4>::FromCode(masm->isolate(), *code);
}
TEST(macro_float_minmax_f64) {
diff --git a/deps/v8/test/cctest/test-macro-assembler-riscv32.cc b/deps/v8/test/cctest/test-macro-assembler-riscv32.cc
index 64928a5eba..533e1ac043 100644
--- a/deps/v8/test/cctest/test-macro-assembler-riscv32.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-riscv32.cc
@@ -69,7 +69,7 @@ static uint32_t run_CalcScaledAddress(uint32_t rt, uint32_t rs, int8_t sa) {
auto fn = [sa](MacroAssembler& masm) {
__ CalcScaledAddress(a0, a0, a1, sa);
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(rt, rs, 0, 0, 0));
@@ -86,7 +86,7 @@ VTYPE run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
GenerateUnalignedInstructionFunc](MacroAssembler& masm) {
GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
};
- auto f = AssembleCode<int32_t(char*)>(fn);
+ auto f = AssembleCode<int32_t(char*)>(isolate, fn);
MemCopy(memory_buffer + in_offset, &value, sizeof(VTYPE));
f.Call(memory_buffer);
@@ -130,7 +130,7 @@ TEST(LoadConstants) {
__ AddWord(a4, a4, Operand(kSystemPointerSize));
}
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
(void)f.Call(reinterpret_cast<int32_t>(result), 0, 0, 0, 0);
// Check results.
@@ -175,7 +175,7 @@ TEST(LoadAddress) {
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
(void)f.Call(0, 0, 0, 0, 0);
// Check results.
@@ -232,7 +232,7 @@ TEST(jump_tables4) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int32_t res = reinterpret_cast<int32_t>(f.Call(i, 0, 0, 0, 0));
// ::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -319,7 +319,7 @@ TEST(jump_tables6) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kSwitchTableCases; ++i) {
int32_t res = reinterpret_cast<int32_t>(f.Call(i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId32 "\n", i, res);
@@ -522,7 +522,7 @@ TEST(OverflowInstructions) {
__ Sw(t0, MemOperand(a0, offsetof(T, output_mul2)));
__ Sw(a1, MemOperand(a0, offsetof(T, overflow_mul2)));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.lhs = ii;
t.rhs = jj;
@@ -614,7 +614,7 @@ TEST(min_max_nan) {
__ StoreFloat(fa0, MemOperand(a0, offsetof(TestFloat, h)));
__ pop(s6);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -890,7 +890,7 @@ TEST(macro_float_minmax_f32) {
};
auto f = AssembleCode<F4>(
- GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>);
+ isolate, GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
@@ -990,7 +990,7 @@ TEST(macro_float_minmax_f64) {
};
auto f = AssembleCode<F4>(
- GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>);
+ isolate, GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
@@ -1247,7 +1247,7 @@ TEST(Popcnt) {
__ Sw(a5, MemOperand(a4));
__ AddWord(a4, a4, Operand(kSystemPointerSize));
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
(void)f.Call(reinterpret_cast<uint32_t>(result), 0, 0, 0, 0);
// Check results.
diff --git a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
index 6769223a08..d88b2912f7 100644
--- a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
@@ -67,7 +67,7 @@ static uint64_t run_CalcScaledAddress(uint64_t rt, uint64_t rs, int8_t sa) {
auto fn = [sa](MacroAssembler& masm) {
__ CalcScaledAddress(a0, a0, a1, sa);
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt, rs, 0, 0, 0));
@@ -84,7 +84,7 @@ VTYPE run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
GenerateUnalignedInstructionFunc](MacroAssembler& masm) {
GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
};
- auto f = AssembleCode<int32_t(char*)>(fn);
+ auto f = AssembleCode<int32_t(char*)>(isolate, fn);
MemCopy(memory_buffer + in_offset, &value, sizeof(VTYPE));
f.Call(memory_buffer);
@@ -128,7 +128,7 @@ TEST(LoadConstants) {
__ Add64(a4, a4, Operand(kSystemPointerSize));
}
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
// Check results.
@@ -173,7 +173,7 @@ TEST(LoadAddress) {
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<FV>::FromCode(*code);
+ auto f = GeneratedCode<FV>::FromCode(isolate, *code);
(void)f.Call(0, 0, 0, 0, 0);
// Check results.
@@ -230,7 +230,7 @@ TEST(jump_tables4) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
// ::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -317,7 +317,7 @@ TEST(jump_tables6) {
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kSwitchTableCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
// ::printf("f(%d) = %" PRId64 "\n", i, res);
@@ -582,7 +582,7 @@ TEST(OverflowInstructions) {
__ Sd(t0, MemOperand(a0, offsetof(T, output_mul2)));
__ Sd(a1, MemOperand(a0, offsetof(T, overflow_mul2)));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.lhs = ii;
t.rhs = jj;
@@ -674,7 +674,7 @@ TEST(min_max_nan) {
__ StoreFloat(fa0, MemOperand(a0, offsetof(TestFloat, h)));
__ pop(s6);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1054,7 +1054,7 @@ TEST(macro_float_minmax_f32) {
};
auto f = AssembleCode<F4>(
- GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>);
+ isolate, GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
@@ -1154,7 +1154,7 @@ TEST(macro_float_minmax_f64) {
};
auto f = AssembleCode<F4>(
- GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>);
+ isolate, GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>);
#define CHECK_MINMAX(src1, src2, min, max) \
do { \
@@ -1433,7 +1433,7 @@ TEST(Dpopcnt) {
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
// Check results.
@@ -1485,7 +1485,7 @@ TEST(Popcnt) {
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
};
- auto f = AssembleCode<FV>(fn);
+ auto f = AssembleCode<FV>(isolate, fn);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
// Check results.
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index ccaa1c733f..1fd892815a 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -53,9 +53,11 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
Object(new_space->top() + kHeapObjectTag));
memento.set_map_after_allocation(ReadOnlyRoots(heap).allocation_memento_map(),
SKIP_WRITE_BARRIER);
- memento.set_allocation_site(
- AllocationSite::unchecked_cast(Object(kHeapObjectTag)),
- SKIP_WRITE_BARRIER);
+
+ // Using this accessor because set_memento expects an Object and not a
+ // MaybeObject.
+ TaggedField<MaybeObject, AllocationMemento::kAllocationSiteOffset>::store(
+ memento, MaybeObject(kHeapObjectTag));
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index e0164a7d8e..e34554951c 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -313,7 +313,7 @@ static inline void* ToPointer(int n) { return reinterpret_cast<void*>(n); }
TEST(CodeMapAddCode) {
CodeEntryStorage storage;
- CodeMap code_map(storage);
+ InstructionStreamMap instruction_stream_map(storage);
CodeEntry* entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* entry2 =
@@ -322,61 +322,65 @@ TEST(CodeMapAddCode) {
storage.Create(i::LogEventListener::CodeTag::kFunction, "ccc");
CodeEntry* entry4 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "ddd");
- code_map.AddCode(ToAddress(0x1500), entry1, 0x200);
- code_map.AddCode(ToAddress(0x1700), entry2, 0x100);
- code_map.AddCode(ToAddress(0x1900), entry3, 0x50);
- code_map.AddCode(ToAddress(0x1950), entry4, 0x10);
- CHECK(!code_map.FindEntry(0));
- CHECK(!code_map.FindEntry(ToAddress(0x1500 - 1)));
- CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1500)));
- CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1500 + 0x100)));
- CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1500 + 0x200 - 1)));
- CHECK_EQ(entry2, code_map.FindEntry(ToAddress(0x1700)));
- CHECK_EQ(entry2, code_map.FindEntry(ToAddress(0x1700 + 0x50)));
- CHECK_EQ(entry2, code_map.FindEntry(ToAddress(0x1700 + 0x100 - 1)));
- CHECK(!code_map.FindEntry(ToAddress(0x1700 + 0x100)));
- CHECK(!code_map.FindEntry(ToAddress(0x1900 - 1)));
- CHECK_EQ(entry3, code_map.FindEntry(ToAddress(0x1900)));
- CHECK_EQ(entry3, code_map.FindEntry(ToAddress(0x1900 + 0x28)));
- CHECK_EQ(entry4, code_map.FindEntry(ToAddress(0x1950)));
- CHECK_EQ(entry4, code_map.FindEntry(ToAddress(0x1950 + 0x7)));
- CHECK_EQ(entry4, code_map.FindEntry(ToAddress(0x1950 + 0x10 - 1)));
- CHECK(!code_map.FindEntry(ToAddress(0x1950 + 0x10)));
- CHECK(!code_map.FindEntry(ToAddress(0xFFFFFFFF)));
+ instruction_stream_map.AddCode(ToAddress(0x1500), entry1, 0x200);
+ instruction_stream_map.AddCode(ToAddress(0x1700), entry2, 0x100);
+ instruction_stream_map.AddCode(ToAddress(0x1900), entry3, 0x50);
+ instruction_stream_map.AddCode(ToAddress(0x1950), entry4, 0x10);
+ CHECK(!instruction_stream_map.FindEntry(0));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1500 - 1)));
+ CHECK_EQ(entry1, instruction_stream_map.FindEntry(ToAddress(0x1500)));
+ CHECK_EQ(entry1, instruction_stream_map.FindEntry(ToAddress(0x1500 + 0x100)));
+ CHECK_EQ(entry1,
+ instruction_stream_map.FindEntry(ToAddress(0x1500 + 0x200 - 1)));
+ CHECK_EQ(entry2, instruction_stream_map.FindEntry(ToAddress(0x1700)));
+ CHECK_EQ(entry2, instruction_stream_map.FindEntry(ToAddress(0x1700 + 0x50)));
+ CHECK_EQ(entry2,
+ instruction_stream_map.FindEntry(ToAddress(0x1700 + 0x100 - 1)));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1700 + 0x100)));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1900 - 1)));
+ CHECK_EQ(entry3, instruction_stream_map.FindEntry(ToAddress(0x1900)));
+ CHECK_EQ(entry3, instruction_stream_map.FindEntry(ToAddress(0x1900 + 0x28)));
+ CHECK_EQ(entry4, instruction_stream_map.FindEntry(ToAddress(0x1950)));
+ CHECK_EQ(entry4, instruction_stream_map.FindEntry(ToAddress(0x1950 + 0x7)));
+ CHECK_EQ(entry4,
+ instruction_stream_map.FindEntry(ToAddress(0x1950 + 0x10 - 1)));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1950 + 0x10)));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0xFFFFFFFF)));
}
TEST(CodeMapMoveAndDeleteCode) {
CodeEntryStorage storage;
- CodeMap code_map(storage);
+ InstructionStreamMap instruction_stream_map(storage);
CodeEntry* entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* entry2 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "bbb");
- code_map.AddCode(ToAddress(0x1500), entry1, 0x200);
- code_map.AddCode(ToAddress(0x1700), entry2, 0x100);
- CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1500)));
- CHECK_EQ(entry2, code_map.FindEntry(ToAddress(0x1700)));
- code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700)); // Deprecate bbb.
- CHECK(!code_map.FindEntry(ToAddress(0x1500)));
- CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1700)));
+ instruction_stream_map.AddCode(ToAddress(0x1500), entry1, 0x200);
+ instruction_stream_map.AddCode(ToAddress(0x1700), entry2, 0x100);
+ CHECK_EQ(entry1, instruction_stream_map.FindEntry(ToAddress(0x1500)));
+ CHECK_EQ(entry2, instruction_stream_map.FindEntry(ToAddress(0x1700)));
+ instruction_stream_map.MoveCode(ToAddress(0x1500),
+ ToAddress(0x1700)); // Deprecate bbb.
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1500)));
+ CHECK_EQ(entry1, instruction_stream_map.FindEntry(ToAddress(0x1700)));
}
TEST(CodeMapClear) {
CodeEntryStorage storage;
- CodeMap code_map(storage);
+ InstructionStreamMap instruction_stream_map(storage);
CodeEntry* entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* entry2 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "bbb");
- code_map.AddCode(ToAddress(0x1500), entry1, 0x200);
- code_map.AddCode(ToAddress(0x1700), entry2, 0x100);
+ instruction_stream_map.AddCode(ToAddress(0x1500), entry1, 0x200);
+ instruction_stream_map.AddCode(ToAddress(0x1700), entry2, 0x100);
- code_map.Clear();
- CHECK(!code_map.FindEntry(ToAddress(0x1500)));
- CHECK(!code_map.FindEntry(ToAddress(0x1700)));
+ instruction_stream_map.Clear();
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1500)));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1700)));
// Check that Clear() doesn't cause issues if called twice.
- code_map.Clear();
+ instruction_stream_map.Clear();
}
namespace {
@@ -398,17 +402,19 @@ class TestSetup {
TEST(SymbolizeTickSample) {
TestSetup test_setup;
CodeEntryStorage storage;
- CodeMap code_map(storage);
- Symbolizer symbolizer(&code_map);
+ InstructionStreamMap instruction_stream_map(storage);
+ Symbolizer symbolizer(&instruction_stream_map);
CodeEntry* entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* entry2 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "bbb");
CodeEntry* entry3 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "ccc");
- symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
- symbolizer.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
- symbolizer.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1500), entry1,
+ 0x200);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1700), entry2,
+ 0x100);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
// We are building the following calls tree:
// -> aaa - sample1
@@ -471,17 +477,19 @@ TEST(SampleIds) {
ProfilerId id =
profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers}).id;
CodeEntryStorage storage;
- CodeMap code_map(storage);
- Symbolizer symbolizer(&code_map);
+ InstructionStreamMap instruction_stream_map(storage);
+ Symbolizer symbolizer(&instruction_stream_map);
CodeEntry* entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* entry2 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "bbb");
CodeEntry* entry3 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "ccc");
- symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
- symbolizer.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
- symbolizer.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1500), entry1,
+ 0x200);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1700), entry2,
+ 0x100);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
// We are building the following calls tree:
// -> aaa #3 - sample1
@@ -641,8 +649,8 @@ TEST_WITH_PLATFORM(MaxSamplesCallback, MockPlatform) {
.id;
CodeEntryStorage storage;
- CodeMap code_map(storage);
- Symbolizer symbolizer(&code_map);
+ InstructionStreamMap instruction_stream_map(storage);
+ Symbolizer symbolizer(&instruction_stream_map);
TickSample sample1;
sample1.timestamp = v8::base::TimeTicks::Now();
sample1.pc = ToPointer(0x1600);
@@ -685,11 +693,12 @@ TEST(NoSamples) {
profiles.set_cpu_profiler(&profiler);
ProfilerId id = profiles.StartProfiling().id;
CodeEntryStorage storage;
- CodeMap code_map(storage);
- Symbolizer symbolizer(&code_map);
+ InstructionStreamMap instruction_stream_map(storage);
+ Symbolizer symbolizer(&instruction_stream_map);
CodeEntry* entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
- symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
+ symbolizer.instruction_stream_map()->AddCode(ToAddress(0x1500), entry1,
+ 0x200);
// We are building the following calls tree:
// (root)#1 -> aaa #2 -> aaa #3 - sample1
@@ -858,12 +867,13 @@ static const char* line_number_test_source_profile_time_functions =
int GetFunctionLineNumber(CpuProfiler* profiler, LocalContext* env,
i::Isolate* isolate, const char* name) {
- CodeMap* code_map = profiler->symbolizer()->code_map();
+ InstructionStreamMap* instruction_stream_map =
+ profiler->symbolizer()->instruction_stream_map();
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
(*env)->Global()->Get(env->local(), v8_str(name)).ToLocalChecked())));
PtrComprCageBase cage_base(isolate);
- CodeEntry* func_entry = code_map->FindEntry(
+ CodeEntry* func_entry = instruction_stream_map->FindEntry(
func->abstract_code(isolate).InstructionStart(cage_base));
if (!func_entry) FATAL("%s", name);
return func_entry->line_number();
@@ -900,7 +910,7 @@ TEST(LineNumber) {
}
TEST(BailoutReason) {
-#ifndef V8_LITE_MODE
+#if !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
i::v8_flags.allow_natives_syntax = true;
i::v8_flags.always_turbofan = false;
i::v8_flags.turbofan = true;
@@ -944,7 +954,7 @@ TEST(BailoutReason) {
CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK(
!strcmp("Optimization is always disabled", current->GetBailoutReason()));
-#endif // V8_LITE_MODE
+#endif // !defined(V8_LITE_MODE) && defined(V8_ENABLE_TURBOFAN)
}
TEST(NodeSourceTypes) {
@@ -1008,13 +1018,13 @@ TEST(NodeSourceTypes) {
TEST(CodeMapRemoveCode) {
CodeEntryStorage storage;
- CodeMap code_map(storage);
+ InstructionStreamMap instruction_stream_map(storage);
CodeEntry* entry =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
- code_map.AddCode(ToAddress(0x1000), entry, 0x100);
- CHECK(code_map.RemoveCode(entry));
- CHECK(!code_map.FindEntry(ToAddress(0x1000)));
+ instruction_stream_map.AddCode(ToAddress(0x1000), entry, 0x100);
+ CHECK(instruction_stream_map.RemoveCode(entry));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1000)));
// Test that when two entries share the same address, we remove only the
// entry that we desired to.
@@ -1022,19 +1032,20 @@ TEST(CodeMapRemoveCode) {
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* colliding_entry2 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
- code_map.AddCode(ToAddress(0x1000), colliding_entry1, 0x100);
- code_map.AddCode(ToAddress(0x1000), colliding_entry2, 0x100);
+ instruction_stream_map.AddCode(ToAddress(0x1000), colliding_entry1, 0x100);
+ instruction_stream_map.AddCode(ToAddress(0x1000), colliding_entry2, 0x100);
- CHECK(code_map.RemoveCode(colliding_entry1));
- CHECK_EQ(code_map.FindEntry(ToAddress(0x1000)), colliding_entry2);
+ CHECK(instruction_stream_map.RemoveCode(colliding_entry1));
+ CHECK_EQ(instruction_stream_map.FindEntry(ToAddress(0x1000)),
+ colliding_entry2);
- CHECK(code_map.RemoveCode(colliding_entry2));
- CHECK(!code_map.FindEntry(ToAddress(0x1000)));
+ CHECK(instruction_stream_map.RemoveCode(colliding_entry2));
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1000)));
}
TEST(CodeMapMoveOverlappingCode) {
CodeEntryStorage storage;
- CodeMap code_map(storage);
+ InstructionStreamMap instruction_stream_map(storage);
CodeEntry* colliding_entry1 =
storage.Create(i::LogEventListener::CodeTag::kFunction, "aaa");
CodeEntry* colliding_entry2 =
@@ -1042,22 +1053,22 @@ TEST(CodeMapMoveOverlappingCode) {
CodeEntry* after_entry =
storage.Create(i::LogEventListener::CodeTag::kFunction, "ccc");
- code_map.AddCode(ToAddress(0x1400), colliding_entry1, 0x200);
- code_map.AddCode(ToAddress(0x1400), colliding_entry2, 0x200);
- code_map.AddCode(ToAddress(0x1800), after_entry, 0x200);
+ instruction_stream_map.AddCode(ToAddress(0x1400), colliding_entry1, 0x200);
+ instruction_stream_map.AddCode(ToAddress(0x1400), colliding_entry2, 0x200);
+ instruction_stream_map.AddCode(ToAddress(0x1800), after_entry, 0x200);
CHECK_EQ(colliding_entry1->instruction_start(), ToAddress(0x1400));
CHECK_EQ(colliding_entry2->instruction_start(), ToAddress(0x1400));
CHECK_EQ(after_entry->instruction_start(), ToAddress(0x1800));
- CHECK(code_map.FindEntry(ToAddress(0x1400)));
- CHECK_EQ(code_map.FindEntry(ToAddress(0x1800)), after_entry);
+ CHECK(instruction_stream_map.FindEntry(ToAddress(0x1400)));
+ CHECK_EQ(instruction_stream_map.FindEntry(ToAddress(0x1800)), after_entry);
- code_map.MoveCode(ToAddress(0x1400), ToAddress(0x1600));
+ instruction_stream_map.MoveCode(ToAddress(0x1400), ToAddress(0x1600));
- CHECK(!code_map.FindEntry(ToAddress(0x1400)));
- CHECK(code_map.FindEntry(ToAddress(0x1600)));
- CHECK_EQ(code_map.FindEntry(ToAddress(0x1800)), after_entry);
+ CHECK(!instruction_stream_map.FindEntry(ToAddress(0x1400)));
+ CHECK(instruction_stream_map.FindEntry(ToAddress(0x1600)));
+ CHECK_EQ(instruction_stream_map.FindEntry(ToAddress(0x1800)), after_entry);
CHECK_EQ(colliding_entry1->instruction_start(), ToAddress(0x1600));
CHECK_EQ(colliding_entry2->instruction_start(), ToAddress(0x1600));
diff --git a/deps/v8/test/cctest/test-ptr-compr-cage.cc b/deps/v8/test/cctest/test-ptr-compr-cage.cc
index 549b389384..10cacc45b8 100644
--- a/deps/v8/test/cctest/test-ptr-compr-cage.cc
+++ b/deps/v8/test/cctest/test-ptr-compr-cage.cc
@@ -21,18 +21,14 @@ UNINITIALIZED_TEST(PtrComprCageAndIsolateRoot) {
v8::Isolate* isolate2 = v8::Isolate::New(create_params);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- CHECK_EQ(i_isolate1->isolate_root(), i_isolate1->cage_base());
- CHECK_EQ(i_isolate2->isolate_root(), i_isolate2->cage_base());
- CHECK_NE(i_isolate1->cage_base(), i_isolate2->cage_base());
-#endif
-
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- CHECK_NE(i_isolate1->isolate_root(), i_isolate1->cage_base());
- CHECK_NE(i_isolate2->isolate_root(), i_isolate2->cage_base());
+#ifdef V8_COMPRESS_POINTERS
CHECK_NE(i_isolate1->isolate_root(), i_isolate2->isolate_root());
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
CHECK_EQ(i_isolate1->cage_base(), i_isolate2->cage_base());
-#endif
+#else
+ CHECK_NE(i_isolate1->cage_base(), i_isolate2->cage_base());
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#endif // V8_COMPRESS_POINTERS
isolate1->Dispose();
isolate2->Dispose();
@@ -129,17 +125,21 @@ UNINITIALIZED_TEST(SharedPtrComprCageRace) {
// Make a bunch of Isolates concurrently as a smoke test against races during
// initialization and de-initialization.
- std::vector<std::unique_ptr<IsolateAllocatingThread>> threads;
- constexpr int kThreads = 10;
+ // Repeat twice to enforce multiple initializations of CodeRange instances.
+ constexpr int kRepeats = 2;
+ for (int repeat = 0; repeat < kRepeats; repeat++) {
+ std::vector<std::unique_ptr<IsolateAllocatingThread>> threads;
+ constexpr int kThreads = 10;
- for (int i = 0; i < kThreads; i++) {
- auto thread = std::make_unique<IsolateAllocatingThread>();
- CHECK(thread->Start());
- threads.push_back(std::move(thread));
- }
+ for (int i = 0; i < kThreads; i++) {
+ auto thread = std::make_unique<IsolateAllocatingThread>();
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
- for (auto& thread : threads) {
- thread->Join();
+ for (auto& thread : threads) {
+ thread->Join();
+ }
}
}
@@ -158,8 +158,8 @@ UNINITIALIZED_TEST(SharedPtrComprCageImpliesSharedReadOnlyHeap) {
// Spot check that some read-only roots are the same.
CHECK_EQ(ReadOnlyRoots(i_isolate1).the_hole_value(),
ReadOnlyRoots(i_isolate2).the_hole_value());
- CHECK_EQ(ReadOnlyRoots(i_isolate1).code_map(),
- ReadOnlyRoots(i_isolate2).code_map());
+ CHECK_EQ(ReadOnlyRoots(i_isolate1).instruction_stream_map(),
+ ReadOnlyRoots(i_isolate2).instruction_stream_map());
CHECK_EQ(ReadOnlyRoots(i_isolate1).exception(),
ReadOnlyRoots(i_isolate2).exception());
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
new file mode 100644
index 0000000000..81e75b9d7c
--- /dev/null
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -0,0 +1,263 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-function.h"
+#include "include/v8-regexp.h"
+#include "src/api/api-inl.h"
+#include "src/execution/frames-inl.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8;
+
+namespace {
+
+const char kOneByteSubjectString[] = {
+ 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
+ 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
+ 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', '\0'};
+const uint16_t kTwoByteSubjectString[] = {
+ 0xCF80, 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
+ 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a',
+ 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', '\0'};
+
+const int kSubjectStringLength = arraysize(kOneByteSubjectString) - 1;
+static_assert(arraysize(kOneByteSubjectString) ==
+ arraysize(kTwoByteSubjectString));
+
+class OneByteVectorResource : public String::ExternalOneByteStringResource {
+ public:
+ explicit OneByteVectorResource(base::Vector<const char> vector)
+ : data_(vector) {}
+ ~OneByteVectorResource() override = default;
+ size_t length() const override { return data_.length(); }
+ const char* data() const override { return data_.begin(); }
+ void Dispose() override {}
+
+ private:
+ base::Vector<const char> data_;
+};
+
+class UC16VectorResource : public String::ExternalStringResource {
+ public:
+ explicit UC16VectorResource(base::Vector<const base::uc16> vector)
+ : data_(vector) {}
+ ~UC16VectorResource() override = default;
+ size_t length() const override { return data_.length(); }
+ const base::uc16* data() const override { return data_.begin(); }
+ void Dispose() override {}
+
+ private:
+ base::Vector<const base::uc16> data_;
+};
+
+OneByteVectorResource one_byte_string_resource(
+ base::Vector<const char>(&kOneByteSubjectString[0], kSubjectStringLength));
+UC16VectorResource two_byte_string_resource(base::Vector<const base::uc16>(
+ &kTwoByteSubjectString[0], kSubjectStringLength));
+
+class InterruptTest {
+ public:
+ InterruptTest()
+ : i_thread(this),
+ env_(),
+ isolate_(env_->GetIsolate()),
+ sem_(0),
+ ran_test_body_(false),
+ ran_to_completion_(false) {}
+
+ void RunTest(InterruptCallback test_body_fn) {
+ HandleScope handle_scope(isolate_);
+ i_thread.SetTestBody(test_body_fn);
+ CHECK(i_thread.Start());
+ TestBody();
+ i_thread.Join();
+ }
+
+ static void CollectAllGarbage(Isolate* isolate, void* data) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->heap()->PreciseCollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
+ }
+
+ static void MakeSubjectOneByteExternal(Isolate* isolate, void* data) {
+ auto instance = reinterpret_cast<InterruptTest*>(data);
+ HandleScope scope(isolate);
+ Local<String> string =
+ Local<String>::New(isolate, instance->one_byte_string_handle_);
+ CHECK(string->CanMakeExternal(String::Encoding::ONE_BYTE_ENCODING));
+ string->MakeExternal(&one_byte_string_resource);
+ }
+
+ static void MakeSubjectTwoByteExternal(Isolate* isolate, void* data) {
+ auto instance = reinterpret_cast<InterruptTest*>(data);
+ HandleScope scope(isolate);
+ Local<String> string =
+ Local<String>::New(isolate, instance->two_byte_string_handle_);
+ CHECK(string->CanMakeExternal(String::Encoding::TWO_BYTE_ENCODING));
+ string->MakeExternal(&two_byte_string_resource);
+ }
+
+ static void IterateStack(Isolate* isolate, void* data) {
+ HandleScope scope(isolate);
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::RegisterState state;
+#if defined(USE_SIMULATOR)
+ SimulatorHelper simulator_helper;
+ if (!simulator_helper.Init(isolate)) return;
+ simulator_helper.FillRegisters(&state);
+#else
+ state.pc = nullptr;
+ state.fp = &state;
+ state.sp = &state;
+#endif
+
+ i::StackFrameIteratorForProfiler it(
+ i_isolate, reinterpret_cast<i::Address>(state.pc),
+ reinterpret_cast<i::Address>(state.fp),
+ reinterpret_cast<i::Address>(state.sp),
+ reinterpret_cast<i::Address>(state.lr), i_isolate->js_entry_sp());
+
+ for (; !it.done(); it.Advance()) {
+ // Ideally we'd access the frame a bit (doesn't matter how); but this
+ // iterator is very limited in what it may access, and prints run into
+ // DCHECKs. So we can't do this:
+ // it.frame()->Print(&accumulator, i::StackFrame::OVERVIEW,
+ // frame_index++);
+ }
+ }
+
+ private:
+ static void SignalSemaphore(Isolate* isolate, void* data) {
+ reinterpret_cast<InterruptTest*>(data)->sem_.Signal();
+ }
+
+ void CreateTestStrings() {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+
+ // The string must be in old space to support externalization.
+ i::Handle<i::String> i_one_byte_string =
+ i_isolate->factory()->NewStringFromAsciiChecked(
+ &kOneByteSubjectString[0], i::AllocationType::kOld);
+ Local<String> one_byte_string = Utils::ToLocal(i_one_byte_string);
+
+ i::Handle<i::String> i_two_byte_string =
+ i_isolate->factory()
+ ->NewStringFromTwoByte(
+ base::Vector<const base::uc16>(&kTwoByteSubjectString[0],
+ kSubjectStringLength),
+ i::AllocationType::kOld)
+ .ToHandleChecked();
+ Local<String> two_byte_string = Utils::ToLocal(i_two_byte_string);
+
+ env_->Global()
+ ->Set(env_.local(), v8_str("subject_string"), one_byte_string)
+ .FromJust();
+ env_->Global()
+ ->Set(env_.local(), v8_str("I 8 some \xCF\x80"), two_byte_string)
+ .FromJust();
+
+ one_byte_string_handle_.Reset(env_->GetIsolate(), one_byte_string);
+ two_byte_string_handle_.Reset(env_->GetIsolate(), two_byte_string);
+ }
+
+ void TestBody() {
+ CHECK(!ran_test_body_.load());
+ CHECK(!ran_to_completion_.load());
+
+ CreateTestStrings();
+
+ TryCatch try_catch(env_->GetIsolate());
+
+ isolate_->RequestInterrupt(&SignalSemaphore, this);
+ CompileRun("/((a*)*)*b/.exec(subject_string)");
+
+ CHECK(try_catch.HasTerminated());
+ CHECK(ran_test_body_.load());
+ CHECK(ran_to_completion_.load());
+ }
+
+ class InterruptThread : public base::Thread {
+ public:
+ explicit InterruptThread(InterruptTest* test)
+ : Thread(Options("InterruptTest")), test_(test) {}
+
+ void Run() override {
+ CHECK_NOT_NULL(test_body_fn_);
+
+ // Wait for JS execution to start.
+ test_->sem_.Wait();
+
+ // Sleep for a bit to allow irregexp execution to start up, then run the
+ // test body.
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
+ test_->isolate_->RequestInterrupt(&RunTestBody, test_);
+ test_->isolate_->RequestInterrupt(&SignalSemaphore, test_);
+
+ // Wait for the scheduled interrupt to signal.
+ test_->sem_.Wait();
+
+ // Sleep again to resume irregexp execution, then terminate.
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
+ test_->ran_to_completion_.store(true);
+ test_->isolate_->TerminateExecution();
+ }
+
+ static void RunTestBody(Isolate* isolate, void* data) {
+ auto instance = reinterpret_cast<InterruptTest*>(data);
+ instance->i_thread.test_body_fn_(isolate, data);
+ instance->ran_test_body_.store(true);
+ }
+
+ void SetTestBody(InterruptCallback callback) { test_body_fn_ = callback; }
+
+ private:
+ InterruptCallback test_body_fn_;
+ InterruptTest* test_;
+ };
+
+ InterruptThread i_thread;
+
+ LocalContext env_;
+ Isolate* isolate_;
+ base::Semaphore sem_; // Coordinates between main and interrupt threads.
+
+ Persistent<String> one_byte_string_handle_;
+ Persistent<String> two_byte_string_handle_;
+
+ std::atomic<bool> ran_test_body_;
+ std::atomic<bool> ran_to_completion_;
+};
+
+void SetCommonV8FlagsForInterruptTests() {
+ // Interrupt tests rely on quirks of the backtracking engine to trigger
+ // pattern execution long enough s.t. we can reliably trigger an interrupt
+ // while the regexp code is still executing.
+ i::v8_flags.enable_experimental_regexp_engine_on_excessive_backtracks = false;
+}
+
+} // namespace
+
+TEST(InterruptAndCollectAllGarbage) {
+ // Move all movable objects on GC.
+ i::v8_flags.compact_on_every_full_gc = true;
+ SetCommonV8FlagsForInterruptTests();
+ InterruptTest{}.RunTest(InterruptTest::CollectAllGarbage);
+}
+
+TEST(InterruptAndMakeSubjectOneByteExternal) {
+ SetCommonV8FlagsForInterruptTests();
+ InterruptTest{}.RunTest(InterruptTest::MakeSubjectOneByteExternal);
+}
+
+TEST(InterruptAndMakeSubjectTwoByteExternal) {
+ SetCommonV8FlagsForInterruptTests();
+ InterruptTest{}.RunTest(InterruptTest::MakeSubjectTwoByteExternal);
+}
+
+TEST(InterruptAndIterateStack) {
+ i::v8_flags.regexp_tier_up = false;
+ SetCommonV8FlagsForInterruptTests();
+ InterruptTest{}.RunTest(InterruptTest::IterateStack);
+}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 79f6d4e723..eb4a9db631 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -91,14 +91,11 @@ class TestSerializer {
public:
static v8::Isolate* NewIsolateInitialized() {
const bool kEnableSerializer = true;
- const bool kGenerateHeap = true;
- const bool kIsShared = false;
DisableEmbeddedBlobRefcounting();
- v8::Isolate* v8_isolate =
- NewIsolate(kEnableSerializer, kGenerateHeap, kIsShared);
+ v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(nullptr, nullptr, nullptr, false);
+ isolate->InitWithoutSnapshot();
return v8_isolate;
}
@@ -107,10 +104,7 @@ class TestSerializer {
// the production Isolate class has one or the other behavior baked in.
static v8::Isolate* NewIsolate(const v8::Isolate::CreateParams& params) {
const bool kEnableSerializer = false;
- const bool kGenerateHeap = params.snapshot_blob == nullptr;
- const bool kIsShared = false;
- v8::Isolate* v8_isolate =
- NewIsolate(kEnableSerializer, kGenerateHeap, kIsShared);
+ v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer);
v8::Isolate::Initialize(v8_isolate, params);
return v8_isolate;
}
@@ -120,57 +114,23 @@ class TestSerializer {
SnapshotData read_only_snapshot(blobs.read_only);
SnapshotData shared_space_snapshot(blobs.shared_space);
const bool kEnableSerializer = false;
- const bool kGenerateHeap = false;
- const bool kIsShared = false;
- v8::Isolate* v8_isolate =
- NewIsolate(kEnableSerializer, kGenerateHeap, kIsShared);
+ v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(&startup_snapshot, &read_only_snapshot,
- &shared_space_snapshot, false);
+ isolate->InitWithSnapshot(&startup_snapshot, &read_only_snapshot,
+ &shared_space_snapshot, false);
return v8_isolate;
}
- static void InitializeProcessWideSharedIsolateFromBlob(
- const StartupBlobs& blobs) {
- base::MutexGuard guard(
- i::Isolate::process_wide_shared_isolate_mutex_.Pointer());
- CHECK_NULL(i::Isolate::process_wide_shared_isolate_);
-
- SnapshotData startup_snapshot(blobs.startup);
- SnapshotData read_only_snapshot(blobs.read_only);
- SnapshotData shared_space_snapshot(blobs.shared_space);
- const bool kEnableSerializer = false;
- const bool kGenerateHeap = false;
- const bool kIsShared = true;
- v8::Isolate* v8_isolate =
- NewIsolate(kEnableSerializer, kGenerateHeap, kIsShared);
- v8::Isolate::Scope isolate_scope(v8_isolate);
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(&startup_snapshot, &read_only_snapshot,
- &shared_space_snapshot, false);
- i::Isolate::process_wide_shared_isolate_ = isolate;
- }
-
- static void DeleteProcessWideSharedIsolate() {
- i::Isolate::DeleteProcessWideSharedIsolate();
- }
-
private:
// Creates an Isolate instance configured for testing.
- static v8::Isolate* NewIsolate(bool with_serializer, bool generate_heap,
- bool is_shared) {
- i::Isolate* isolate;
- if (is_shared) {
- isolate = i::Isolate::Allocate(true);
- } else {
- isolate = i::Isolate::New();
- }
+ static v8::Isolate* NewIsolate(bool with_serializer) {
+ i::Isolate* isolate = i::Isolate::New();
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
if (with_serializer) isolate->enable_serializer();
isolate->set_array_buffer_allocator(CcTest::array_buffer_allocator());
- isolate->setup_delegate_ = new SetupIsolateDelegateForTests(generate_heap);
+ isolate->setup_delegate_ = new SetupIsolateDelegateForTests;
return v8_isolate;
}
@@ -2863,10 +2823,70 @@ TEST(Regress503552) {
heap::SimulateIncrementalMarking(isolate->heap());
v8::ScriptCompiler::CachedData* cache_data =
- CodeSerializer::Serialize(shared);
+ CodeSerializer::Serialize(isolate, shared);
delete cache_data;
}
+static void CodeSerializerMergeDeserializedScript(bool retain_toplevel_sfi) {
+ v8_flags.stress_background_compile = false;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+
+ HandleScope outer_scope(isolate);
+ Handle<String> source = isolate->factory()->NewStringFromAsciiChecked(
+ "(function () {return 123;})");
+ AlignedCachedData* cached_data = nullptr;
+ Handle<Script> script;
+ {
+ HandleScope first_compilation_scope(isolate);
+ Handle<SharedFunctionInfo> shared = CompileScriptAndProduceCache(
+ isolate, source, ScriptDetails(), &cached_data,
+ v8::ScriptCompiler::kNoCompileOptions);
+ Handle<BytecodeArray> bytecode =
+ handle(shared->GetBytecodeArray(isolate), isolate);
+ for (int i = 0; i <= v8_flags.bytecode_old_age; ++i) {
+ bytecode->MakeOlder();
+ }
+ Handle<Script> local_script =
+ handle(Script::cast(shared->script()), isolate);
+ script = first_compilation_scope.CloseAndEscape(local_script);
+ }
+
+ Handle<HeapObject> retained_toplevel_sfi;
+ if (retain_toplevel_sfi) {
+ retained_toplevel_sfi =
+ handle(script->shared_function_infos().Get(0).GetHeapObjectAssumeWeak(),
+ isolate);
+ }
+
+ // GC twice in case incremental marking had already marked the bytecode array.
+ // After this, the Isolate compilation cache contains a weak reference to the
+ // Script but not the top-level SharedFunctionInfo.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+
+ Handle<SharedFunctionInfo> copy =
+ CompileScript(isolate, source, ScriptDetails(), cached_data,
+ v8::ScriptCompiler::kConsumeCodeCache);
+ delete cached_data;
+
+ // The existing Script was reused.
+ CHECK_EQ(*script, copy->script());
+
+ // The existing top-level SharedFunctionInfo was also reused.
+ if (retain_toplevel_sfi) {
+ CHECK_EQ(*retained_toplevel_sfi, *copy);
+ }
+}
+
+TEST(CodeSerializerMergeDeserializedScript) {
+ CodeSerializerMergeDeserializedScript(/*retain_toplevel_sfi=*/false);
+}
+
+TEST(CodeSerializerMergeDeserializedScriptRetainingToplevelSfi) {
+ CodeSerializerMergeDeserializedScript(/*retain_toplevel_sfi=*/true);
+}
+
UNINITIALIZED_TEST(SnapshotCreatorBlobNotCreated) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
@@ -5080,7 +5100,7 @@ void CheckObjectsAreInSharedHeap(Isolate* isolate) {
heap->MustBeInSharedOldSpace(obj) ||
(obj.IsString() && String::IsInPlaceInternalizable(String::cast(obj)));
if (expected_in_shared_old) {
- CHECK(obj.InSharedHeap());
+ CHECK(obj.InAnySharedSpace());
}
}
}
@@ -5103,10 +5123,6 @@ UNINITIALIZED_TEST(SharedStrings) {
v8_flags.shared_string_table = true;
- if (!v8_flags.shared_space) {
- TestSerializer::InitializeProcessWideSharedIsolateFromBlob(blobs);
- }
-
v8::Isolate* isolate1 = TestSerializer::NewIsolateFromBlob(blobs);
v8::Isolate* isolate2 = TestSerializer::NewIsolateFromBlob(blobs);
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
@@ -5132,10 +5148,6 @@ UNINITIALIZED_TEST(SharedStrings) {
}
isolate1->Dispose();
- if (!v8_flags.shared_space) {
- TestSerializer::DeleteProcessWideSharedIsolate();
- }
-
blobs.Dispose();
FreeCurrentEmbeddedBlob();
}
@@ -5245,5 +5257,36 @@ UNINITIALIZED_TEST(BreakPointAccessorContextSnapshot) {
FreeCurrentEmbeddedBlob();
}
+// These two flags are preconditions for static roots to work. We don't check
+// for V8_STATIC_ROOTS_BOOL since the test targets mksnapshot built without
+// static roots, to be able to generate the static-roots.h file.
+#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && defined(V8_SHARED_RO_HEAP)
+UNINITIALIZED_TEST(StaticRootsPredictableSnapshot) {
+ if (v8_flags.random_seed == 0) {
+ return;
+ }
+
+ v8::Isolate* isolate1 = TestSerializer::NewIsolateInitialized();
+ StartupBlobs blobs1 = Serialize(isolate1);
+ isolate1->Dispose();
+
+ v8::Isolate* isolate2 = TestSerializer::NewIsolateInitialized();
+ StartupBlobs blobs2 = Serialize(isolate2);
+ isolate2->Dispose();
+
+ // We want to ensure that setup-heap-internal.cc creates a predictable heap.
+ // For static roots it would be sufficient to check that the root pointers
+ // relative to the cage base are identical. However, we can't test this, since
+ // when we create two isolates in the same process, the offsets will actually
+ // be different.
+ CHECK_EQ(blobs1.read_only, blobs2.read_only);
+
+ blobs1.Dispose();
+ blobs2.Dispose();
+ FreeCurrentEmbeddedBlob();
+}
+#endif // defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) &&
+ // defined(V8_SHARED_RO_HEAP)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-shared-strings.cc b/deps/v8/test/cctest/test-shared-strings.cc
index 41ff4ef054..cd5ebb276f 100644
--- a/deps/v8/test/cctest/test-shared-strings.cc
+++ b/deps/v8/test/cctest/test-shared-strings.cc
@@ -4,15 +4,21 @@
#include "include/v8-initialization.h"
#include "src/api/api-inl.h"
+#include "src/api/api.h"
#include "src/base/strings.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/parked-scope.h"
#include "src/heap/remembered-set.h"
+#include "src/heap/safepoint.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-weak-refs.h"
#include "src/objects/objects-inl.h"
#include "src/objects/string-forwarding-table-inl.h"
#include "test/cctest/cctest.h"
@@ -57,9 +63,13 @@ class MultiClientIsolateTest {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
main_isolate_ = v8::Isolate::New(create_params);
+ i_main_isolate()->Enter();
}
- ~MultiClientIsolateTest() { main_isolate_->Dispose(); }
+ ~MultiClientIsolateTest() {
+ i_main_isolate()->Exit();
+ main_isolate_->Dispose();
+ }
v8::Isolate* main_isolate() const { return main_isolate_; }
@@ -347,30 +357,50 @@ class ConcurrentInternalizationThread final
namespace {
+Handle<String> CreateSharedOneByteString(Isolate* isolate, Factory* factory,
+ int length, bool internalize) {
+ char* ascii = new char[length + 1];
+ // Don't make single character strings, which will end up deduplicating to
+ // an RO string and mess up the string table hit test.
+ CHECK_GT(length, 1);
+ for (int j = 0; j < length; j++) ascii[j] = 'a';
+ ascii[length] = '\0';
+ if (internalize) {
+ // When testing concurrent string table hits, pre-internalize a string
+ // of the same contents so all subsequent internalizations are hits.
+ factory->InternalizeString(factory->NewStringFromAsciiChecked(ascii));
+ }
+ Handle<String> string = String::Share(
+ isolate, factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld));
+ delete[] ascii;
+ CHECK(string->IsShared());
+ string->EnsureHash();
+ return string;
+}
+
Handle<FixedArray> CreateSharedOneByteStrings(Isolate* isolate,
Factory* factory, int count,
- int min_length = 2,
+ int lo_count, int min_length = 2,
bool internalize = false) {
Handle<FixedArray> shared_strings =
- factory->NewFixedArray(count, AllocationType::kSharedOld);
- for (int i = 0; i < count; i++) {
- char* ascii = new char[i + min_length + 1];
- // Don't make single character strings, which will end up deduplicating to
- // an RO string and mess up the string table hit test.
- for (int j = 0; j < i + min_length; j++) ascii[j] = 'a';
- ascii[i + min_length] = '\0';
- if (internalize) {
- // When testing concurrent string table hits, pre-internalize a string of
- // the same contents so all subsequent internalizations are hits.
- factory->InternalizeString(factory->NewStringFromAsciiChecked(ascii));
+ factory->NewFixedArray(count + lo_count, AllocationType::kSharedOld);
+ {
+ // Create strings in their own scope to be able to delete and GC them.
+ HandleScope scope(isolate);
+ for (int i = 0; i < count; i++) {
+ int length = i + min_length + 1;
+ Handle<String> string =
+ CreateSharedOneByteString(isolate, factory, length, internalize);
+ shared_strings->set(i, *string);
+ }
+ int min_lo_length =
+ isolate->heap()->MaxRegularHeapObjectSize(AllocationType::kOld) + 1;
+ for (int i = 0; i < lo_count; i++) {
+ int length = i + min_lo_length + 1;
+ Handle<String> string =
+ CreateSharedOneByteString(isolate, factory, length, internalize);
+ shared_strings->set(count + i, *string);
}
- Handle<String> string = String::Share(
- isolate,
- factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld));
- CHECK(string->IsShared());
- string->EnsureHash();
- shared_strings->set(i, *string);
- delete[] ascii;
}
return shared_strings;
}
@@ -382,6 +412,7 @@ void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) {
constexpr int kThreads = 4;
constexpr int kStrings = 4096;
+ constexpr int kLOStrings = 16;
MultiClientIsolateTest test;
Isolate* i_isolate = test.i_main_isolate();
@@ -389,8 +420,9 @@ void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) {
HandleScope scope(i_isolate);
- Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
- i_isolate, factory, kStrings, 2, hit_or_miss == kTestHit);
+ Handle<FixedArray> shared_strings =
+ CreateSharedOneByteStrings(i_isolate, factory, kStrings - kLOStrings,
+ kLOStrings, 2, hit_or_miss == kTestHit);
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_start(0);
@@ -465,6 +497,7 @@ UNINITIALIZED_TEST(ConcurrentStringTableLookup) {
constexpr int kTotalThreads = 4;
constexpr int kInternalizationThreads = 1;
constexpr int kStrings = 4096;
+ constexpr int kLOStrings = 16;
MultiClientIsolateTest test;
Isolate* i_isolate = test.i_main_isolate();
@@ -472,8 +505,8 @@ UNINITIALIZED_TEST(ConcurrentStringTableLookup) {
HandleScope scope(i_isolate);
- Handle<FixedArray> shared_strings =
- CreateSharedOneByteStrings(i_isolate, factory, kStrings, 2, false);
+ Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
+ i_isolate, factory, kStrings - kLOStrings, kLOStrings, 2, false);
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_start(0);
@@ -537,6 +570,7 @@ class OneByteResource : public v8::String::ExternalOneByteStringResource {
const char* data() const override { return data_; }
size_t length() const override { return length_; }
void Dispose() override {
+ CHECK(!IsDisposed());
i::DeleteArray(data_);
data_ = nullptr;
}
@@ -792,7 +826,6 @@ UNINITIALIZED_TEST(PromotionMarkCompact) {
// In-place-internalizable strings are promoted into the shared heap when
// sharing.
- CHECK_IMPLIES(!v8_flags.shared_space, !heap->Contains(*one_byte_seq));
CHECK(heap->SharedHeapContains(*one_byte_seq));
}
}
@@ -1029,7 +1062,7 @@ UNINITIALIZED_TEST(PagePromotionRecordingOldToShared) {
Handle<String> shared_string = factory->NewStringFromAsciiChecked(
raw_one_byte, AllocationType::kSharedOld);
- CHECK(shared_string->InSharedWritableHeap());
+ CHECK(shared_string->InWritableSharedSpace());
young_object->set(0, *shared_string);
@@ -1052,8 +1085,10 @@ UNINITIALIZED_TEST(InternalizedSharedStringsTransitionDuringGC) {
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
v8_flags.shared_string_table = true;
+ v8_flags.transition_strings_during_gc_with_stack = true;
constexpr int kStrings = 4096;
+ constexpr int kLOStrings = 16;
MultiClientIsolateTest test;
Isolate* i_isolate = test.i_main_isolate();
@@ -1063,8 +1098,8 @@ UNINITIALIZED_TEST(InternalizedSharedStringsTransitionDuringGC) {
// Run two times to test that everything is reset correctly during GC.
for (int run = 0; run < 2; run++) {
- Handle<FixedArray> shared_strings =
- CreateSharedOneByteStrings(i_isolate, factory, kStrings, 2, run == 0);
+ Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
+ i_isolate, factory, kStrings - kLOStrings, kLOStrings, 2, run == 0);
// Check strings are in the forwarding table after internalization.
for (int i = 0; i < shared_strings->length(); i++) {
@@ -1195,11 +1230,13 @@ UNINITIALIZED_TEST(ExternalizedSharedStringsTransitionDuringGC) {
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
v8_flags.shared_string_table = true;
+ v8_flags.transition_strings_during_gc_with_stack = true;
ExternalResourceFactory resource_factory;
MultiClientIsolateTest test;
constexpr int kStrings = 4096;
+ constexpr int kLOStrings = 16;
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
@@ -1209,7 +1246,8 @@ UNINITIALIZED_TEST(ExternalizedSharedStringsTransitionDuringGC) {
// Run two times to test that everything is reset correctly during GC.
for (int run = 0; run < 2; run++) {
Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
- i_isolate, factory, kStrings, ExternalString::kUncachedSize, run == 0);
+ i_isolate, factory, kStrings - kLOStrings, kLOStrings,
+ ExternalString::kUncachedSize, run == 0);
// Check strings are in the forwarding table after internalization.
for (int i = 0; i < shared_strings->length(); i++) {
@@ -1303,10 +1341,6 @@ UNINITIALIZED_TEST(ExternalizeInternalizedString) {
// Check that API calls return the resource from the forwarding table.
CheckExternalStringResource(one_byte_intern, one_byte_res);
CheckExternalStringResource(two_byte_intern, two_byte_res);
-
- // API calls to the ThinStrings should also return the correct resource.
- CheckExternalStringResource(one_byte, one_byte_res);
- CheckExternalStringResource(two_byte, two_byte_res);
}
UNINITIALIZED_TEST(InternalizeSharedExternalString) {
@@ -1314,6 +1348,7 @@ UNINITIALIZED_TEST(InternalizeSharedExternalString) {
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
v8_flags.shared_string_table = true;
+ v8_flags.transition_strings_during_gc_with_stack = true;
ExternalResourceFactory resource_factory;
MultiClientIsolateTest test;
@@ -1562,10 +1597,12 @@ namespace {
void CreateExternalResources(Isolate* i_isolate, Handle<FixedArray> strings,
std::vector<OneByteResource*>& resources,
ExternalResourceFactory& resource_factory) {
+ HandleScope scope(i_isolate);
resources.reserve(strings->length());
for (int i = 0; i < strings->length(); i++) {
Handle<String> input_string(String::cast(strings->get(i)), i_isolate);
- CHECK(Utils::ToLocal(input_string)->CanMakeExternal());
+ CHECK(Utils::ToLocal(input_string)
+ ->CanMakeExternal(v8::String::Encoding::ONE_BYTE_ENCODING));
const int length = input_string->length();
char* buffer = new char[length + 1];
String::WriteToFlat(*input_string, reinterpret_cast<uint8_t*>(buffer), 0,
@@ -1574,18 +1611,62 @@ void CreateExternalResources(Isolate* i_isolate, Handle<FixedArray> strings,
}
}
+void CheckStringAndResource(
+ String string, int index, bool should_be_alive, String deleted_string,
+ bool check_transition, bool shared_resources,
+ const std::vector<std::unique_ptr<ConcurrentExternalizationThread>>&
+ threads) {
+ if (check_transition) {
+ if (should_be_alive) {
+ CHECK(string.IsExternalString());
+ } else {
+ CHECK_EQ(string, deleted_string);
+ }
+ }
+ int alive_resources = 0;
+ for (size_t t = 0; t < threads.size(); t++) {
+ ConcurrentExternalizationThread* thread = threads[t].get();
+ if (!thread->Resource(index)->IsDisposed()) {
+ alive_resources++;
+ }
+ }
+
+ // Check exact alive resources only if the string has transitioned, otherwise
+ // there can still be mulitple resource instances in the forwarding table.
+ // Only check no resource is alive if the string is dead.
+ const bool check_alive = check_transition || !should_be_alive;
+ if (check_alive) {
+ size_t expected_alive;
+ if (should_be_alive) {
+ if (shared_resources) {
+ // Since we share the same resource for all threads, we accounted for it
+ // in every thread.
+ expected_alive = threads.size();
+ } else {
+ // Check that exactly one resource is alive.
+ expected_alive = 1;
+ }
+ } else {
+ expected_alive = 0;
+ }
+ CHECK_EQ(alive_resources, expected_alive);
+ }
+}
+
} // namespace
void TestConcurrentExternalization(bool share_resources) {
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
v8_flags.shared_string_table = true;
+ v8_flags.transition_strings_during_gc_with_stack = true;
ExternalResourceFactory resource_factory;
MultiClientIsolateTest test;
constexpr int kThreads = 4;
constexpr int kStrings = 4096;
+ constexpr int kLOStrings = 16;
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
@@ -1593,7 +1674,8 @@ void TestConcurrentExternalization(bool share_resources) {
HandleScope scope(i_isolate);
Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
- i_isolate, factory, kStrings, ExternalString::kUncachedSize, false);
+ i_isolate, factory, kStrings - kLOStrings, kLOStrings,
+ ExternalString::kUncachedSize, false);
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_start(0);
@@ -1639,23 +1721,8 @@ void TestConcurrentExternalization(bool share_resources) {
Handle<String> input_string(String::cast(shared_strings->get(i)),
i_isolate);
String string = *input_string;
- CHECK(string.IsExternalString());
- int alive_resources = 0;
- for (int t = 0; t < kThreads; t++) {
- ConcurrentExternalizationThread* thread = threads[t].get();
- if (!thread->Resource(i)->IsDisposed()) {
- alive_resources++;
- }
- }
-
- if (share_resources) {
- // Since we share the same resource for all threads, we accounted for it
- // in every thread.
- CHECK_EQ(alive_resources, kThreads);
- } else {
- // Check that exaclty one resource is alive.
- CHECK_EQ(alive_resources, 1);
- }
+ CheckStringAndResource(string, i, true, String{}, true, share_resources,
+ threads);
}
ParkedScope parked(local_isolate);
@@ -1672,11 +1739,140 @@ UNINITIALIZED_TEST(ConcurrentExternalizationWithSharedResources) {
TestConcurrentExternalization(true);
}
+void TestConcurrentExternalizationWithDeadStrings(bool share_resources,
+ bool transition_with_stack) {
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+
+ v8_flags.shared_string_table = true;
+ v8_flags.transition_strings_during_gc_with_stack = transition_with_stack;
+
+ ExternalResourceFactory resource_factory;
+ MultiClientIsolateTest test;
+
+ constexpr int kThreads = 4;
+ constexpr int kStrings = 12;
+ constexpr int kLOStrings = 2;
+
+ Isolate* i_isolate = test.i_main_isolate();
+ Factory* factory = i_isolate->factory();
+
+ HandleScope scope(i_isolate);
+
+ Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
+ i_isolate, factory, kStrings - kLOStrings, kLOStrings,
+ ExternalString::kUncachedSize, false);
+
+ ParkingSemaphore sema_ready(0);
+ ParkingSemaphore sema_execute_start(0);
+ ParkingSemaphore sema_execute_complete(0);
+ std::vector<std::unique_ptr<ConcurrentExternalizationThread>> threads;
+ std::vector<OneByteResource*> shared_resources;
+
+ if (share_resources) {
+ CreateExternalResources(i_isolate, shared_strings, shared_resources,
+ resource_factory);
+ }
+
+ for (int i = 0; i < kThreads; i++) {
+ std::vector<OneByteResource*> local_resources;
+ if (share_resources) {
+ local_resources = shared_resources;
+ } else {
+ CreateExternalResources(i_isolate, shared_strings, local_resources,
+ resource_factory);
+ }
+ auto thread = std::make_unique<ConcurrentExternalizationThread>(
+ &test, shared_strings, local_resources, share_resources, &sema_ready,
+ &sema_execute_start, &sema_execute_complete);
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ LocalIsolate* local_isolate = i_isolate->main_thread_local_isolate();
+ for (int i = 0; i < kThreads; i++) {
+ sema_ready.ParkedWait(local_isolate);
+ }
+ for (int i = 0; i < kThreads; i++) {
+ sema_execute_start.Signal();
+ }
+ for (int i = 0; i < kThreads; i++) {
+ sema_execute_complete.ParkedWait(local_isolate);
+ }
+
+ Handle<String> empty_string =
+ handle(ReadOnlyRoots(i_isolate->heap()).empty_string(), i_isolate);
+ for (int i = 0; i < shared_strings->length(); i++) {
+ Handle<String> input_string(String::cast(shared_strings->get(i)),
+ i_isolate);
+ // Patch every third string to empty. The next GC will dispose the external
+ // resources.
+ if (i % 3 == 0) {
+ input_string.PatchValue(*empty_string);
+ shared_strings->set(i, *input_string);
+ }
+ }
+
+ i_isolate->heap()->CollectGarbageShared(i_isolate->main_thread_local_heap(),
+ GarbageCollectionReason::kTesting);
+
+ for (int i = 0; i < shared_strings->length(); i++) {
+ Handle<String> input_string(String::cast(shared_strings->get(i)),
+ i_isolate);
+ const bool should_be_alive = i % 3 != 0;
+ String string = *input_string;
+ CheckStringAndResource(string, i, should_be_alive, *empty_string,
+ transition_with_stack, share_resources, threads);
+ }
+
+ // If we didn't test transitions during GC with stack, trigger another GC
+ // (allowing transitions with stack) to ensure everything is handled
+ // correctly.
+ if (!transition_with_stack) {
+ v8_flags.transition_strings_during_gc_with_stack = true;
+
+ i_isolate->heap()->CollectGarbageShared(i_isolate->main_thread_local_heap(),
+ GarbageCollectionReason::kTesting);
+
+ for (int i = 0; i < shared_strings->length(); i++) {
+ Handle<String> input_string(String::cast(shared_strings->get(i)),
+ i_isolate);
+ const bool should_be_alive = i % 3 != 0;
+ String string = *input_string;
+ CheckStringAndResource(string, i, should_be_alive, *empty_string, true,
+ share_resources, threads);
+ }
+ }
+
+ ParkedScope parked(local_isolate);
+ for (auto& thread : threads) {
+ thread->ParkedJoin(parked);
+ }
+}
+
+UNINITIALIZED_TEST(
+ ExternalizationWithDeadStringsAndUniqueResourcesTransitionWithStack) {
+ TestConcurrentExternalizationWithDeadStrings(false, true);
+}
+
+UNINITIALIZED_TEST(
+ ExternalizationWithDeadStringsAndSharedResourcesTransitionWithStack) {
+ TestConcurrentExternalizationWithDeadStrings(true, true);
+}
+
+UNINITIALIZED_TEST(ExternalizationWithDeadStringsAndUniqueResources) {
+ TestConcurrentExternalizationWithDeadStrings(false, false);
+}
+
+UNINITIALIZED_TEST(ExternalizationWithDeadStringsAndSharedResources) {
+ TestConcurrentExternalizationWithDeadStrings(true, false);
+}
+
void TestConcurrentExternalizationAndInternalization(
TestHitOrMiss hit_or_miss) {
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
v8_flags.shared_string_table = true;
+ v8_flags.transition_strings_during_gc_with_stack = true;
ExternalResourceFactory resource_factory;
MultiClientIsolateTest test;
@@ -1686,6 +1882,7 @@ void TestConcurrentExternalizationAndInternalization(
constexpr int kTotalThreads =
kInternalizationThreads + kExternalizationThreads;
constexpr int kStrings = 4096;
+ constexpr int kLOStrings = 16;
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
@@ -1693,8 +1890,8 @@ void TestConcurrentExternalizationAndInternalization(
HandleScope scope(i_isolate);
Handle<FixedArray> shared_strings = CreateSharedOneByteStrings(
- i_isolate, factory, kStrings, ExternalString::kUncachedSize,
- hit_or_miss == kTestHit);
+ i_isolate, factory, kStrings - kLOStrings, kLOStrings,
+ ExternalString::kUncachedSize, hit_or_miss == kTestHit);
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_start(0);
@@ -1774,6 +1971,557 @@ UNINITIALIZED_TEST(ConcurrentExternalizationAndInternalizationHit) {
TestConcurrentExternalizationAndInternalization(kTestHit);
}
+UNINITIALIZED_TEST(SharedStringInGlobalHandle) {
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+
+ v8_flags.shared_string_table = true;
+
+ MultiClientIsolateTest test;
+ Isolate* i_isolate = test.i_main_isolate();
+ Factory* factory = i_isolate->factory();
+
+ HandleScope handle_scope(i_isolate);
+ Handle<String> shared_string =
+ factory->NewStringFromAsciiChecked("foobar", AllocationType::kSharedOld);
+ CHECK(shared_string->InWritableSharedSpace());
+ v8::Local<v8::String> lh_shared_string =
+ Utils::Convert<String, v8::String>(shared_string);
+ v8::Global<v8::String> gh_shared_string(test.main_isolate(),
+ lh_shared_string);
+ gh_shared_string.SetWeak();
+
+ CcTest::CollectGarbage(OLD_SPACE, i_isolate);
+
+ CHECK(!gh_shared_string.IsEmpty());
+}
+
+class WakeupTask : public CancelableTask {
+ public:
+ explicit WakeupTask(Isolate* isolate) : CancelableTask(isolate) {}
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override {}
+};
+
+class WorkerIsolateThread : public v8::base::Thread {
+ public:
+ WorkerIsolateThread(const char* name, MultiClientIsolateTest* test,
+ std::atomic<bool>* done)
+ : v8::base::Thread(base::Thread::Options(name)),
+ test_(test),
+ done_(done) {}
+
+ void Run() override {
+ v8::Isolate* client = test_->NewClientIsolate();
+ Isolate* i_client = reinterpret_cast<Isolate*>(client);
+ Factory* factory = i_client->factory();
+
+ v8::Global<v8::String> gh_shared_string;
+
+ {
+ HandleScope handle_scope(i_client);
+ Handle<String> shared_string = factory->NewStringFromAsciiChecked(
+ "foobar", AllocationType::kSharedOld);
+ CHECK(shared_string->InWritableSharedSpace());
+ v8::Local<v8::String> lh_shared_string =
+ Utils::Convert<String, v8::String>(shared_string);
+ gh_shared_string.Reset(test_->main_isolate(), lh_shared_string);
+ gh_shared_string.SetWeak();
+ }
+
+ {
+ // Disable CSS for the shared heap and all clients.
+ // DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ // i_client->shared_space_isolate()->heap());
+
+ Isolate* gc_isolate = i_client->shared_space_isolate();
+ gc_isolate->heap()->ForceSharedGCWithEmptyStackForTesting();
+ i_client->heap()->CollectGarbageShared(i_client->main_thread_local_heap(),
+ GarbageCollectionReason::kTesting);
+ }
+
+ CHECK(gh_shared_string.IsEmpty());
+ client->Dispose();
+
+ *done_ = true;
+
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(test_->main_isolate())
+ ->PostTask(std::make_unique<WakeupTask>(test_->i_main_isolate()));
+ }
+
+ private:
+ MultiClientIsolateTest* test_;
+ std::atomic<bool>* done_;
+};
+
+UNINITIALIZED_TEST(SharedStringInClientGlobalHandle) {
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+
+ v8_flags.shared_string_table = true;
+
+ MultiClientIsolateTest test;
+ std::atomic<bool> done = false;
+ WorkerIsolateThread thread("worker", &test, &done);
+ CHECK(thread.Start());
+
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ thread.Join();
+}
+
+class ClientIsolateThreadForPagePromotions : public v8::base::Thread {
+ public:
+ ClientIsolateThreadForPagePromotions(const char* name,
+ MultiClientIsolateTest* test,
+ std::atomic<bool>* done,
+ Handle<String>* shared_string)
+ : v8::base::Thread(base::Thread::Options(name)),
+ test_(test),
+ done_(done),
+ shared_string_(shared_string) {}
+
+ void Run() override {
+ CHECK(v8_flags.minor_mc);
+ v8::Isolate* client = test_->NewClientIsolate();
+ Isolate* i_client = reinterpret_cast<Isolate*>(client);
+ Factory* factory = i_client->factory();
+ Heap* heap = i_client->heap();
+
+ {
+ HandleScope scope(i_client);
+
+ Handle<FixedArray> young_object =
+ factory->NewFixedArray(1, AllocationType::kYoung);
+ CHECK(Heap::InYoungGeneration(*young_object));
+ Address young_object_address = young_object->address();
+
+ std::vector<Handle<FixedArray>> handles;
+ // Make the whole page transition from new->old, getting the buffers
+ // processed in the sweeper (relying on marking information) instead of
+ // processing during newspace evacuation.
+ heap::FillCurrentPage(heap->new_space(), &handles);
+
+ CHECK(!heap->Contains(**shared_string_));
+ CHECK(heap->SharedHeapContains(**shared_string_));
+ young_object->set(0, **shared_string_);
+
+ CcTest::CollectGarbage(NEW_SPACE, i_client);
+ heap->CompleteSweepingFull();
+
+ // Object should get promoted using page promotion, so address should
+ // remain the same.
+ CHECK(!Heap::InYoungGeneration(*young_object));
+ CHECK(heap->Contains(*young_object));
+ CHECK_EQ(young_object_address, young_object->address());
+
+ // Since the GC promoted that string into shared heap, it also needs to
+ // create an OLD_TO_SHARED slot.
+ ObjectSlot slot = young_object->GetFirstElementAddress();
+ CHECK(RememberedSet<OLD_TO_SHARED>::Contains(
+ MemoryChunk::FromHeapObject(*young_object), slot.address()));
+ }
+
+ client->Dispose();
+
+ *done_ = true;
+
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(test_->main_isolate())
+ ->PostTask(std::make_unique<WakeupTask>(test_->i_main_isolate()));
+ }
+
+ private:
+ MultiClientIsolateTest* test_;
+ std::atomic<bool>* done_;
+ Handle<String>* shared_string_;
+};
+
+UNINITIALIZED_TEST(RegisterOldToSharedForPromotedPageFromClient) {
+ if (v8_flags.single_generation) return;
+ if (!v8_flags.minor_mc) return;
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+
+ v8_flags.stress_concurrent_allocation = false; // For SealCurrentObjects.
+ v8_flags.shared_string_table = true;
+ v8_flags.manual_evacuation_candidates_selection = true;
+
+ MultiClientIsolateTest test;
+ std::atomic<bool> done = false;
+
+ Isolate* i_isolate = test.i_main_isolate();
+ Isolate* shared_isolate = i_isolate->shared_space_isolate();
+ Heap* shared_heap = shared_isolate->heap();
+
+ HandleScope scope(i_isolate);
+
+ const char raw_one_byte[] = "foo";
+ Handle<String> shared_string =
+ i_isolate->factory()->NewStringFromAsciiChecked(
+ raw_one_byte, AllocationType::kSharedOld);
+ CHECK(shared_heap->Contains(*shared_string));
+
+ ClientIsolateThreadForPagePromotions thread("worker", &test, &done,
+ &shared_string);
+ CHECK(thread.Start());
+
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ thread.Join();
+}
+
+UNINITIALIZED_TEST(
+ RegisterOldToSharedForPromotedPageFromClientDuringIncrementalMarking) {
+ if (v8_flags.single_generation) return;
+ if (!v8_flags.minor_mc) return;
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+
+ v8_flags.stress_concurrent_allocation = false; // For SealCurrentObjects.
+ v8_flags.shared_string_table = true;
+ v8_flags.manual_evacuation_candidates_selection = true;
+ v8_flags.incremental_marking_task =
+ false; // Prevent the incremental GC from finishing and finalizing in a
+ // task.
+
+ MultiClientIsolateTest test;
+ std::atomic<bool> done = false;
+
+ Isolate* i_isolate = test.i_main_isolate();
+ Isolate* shared_isolate = i_isolate->shared_space_isolate();
+ Heap* shared_heap = shared_isolate->heap();
+
+ HandleScope scope(i_isolate);
+
+ const char raw_one_byte[] = "foo";
+ Handle<String> shared_string =
+ i_isolate->factory()->NewStringFromAsciiChecked(
+ raw_one_byte, AllocationType::kSharedOld);
+ CHECK(shared_heap->Contains(*shared_string));
+
+ // Start an incremental shared GC such that shared_string resides on an
+ // evacuation candidate.
+ ManualGCScope manual_gc_scope(shared_isolate);
+ heap::ForceEvacuationCandidate(Page::FromHeapObject(*shared_string));
+ i::IncrementalMarking* marking = shared_heap->incremental_marking();
+ CHECK(marking->IsStopped());
+ {
+ IsolateSafepointScope safepoint_scope(shared_heap);
+ shared_heap->tracer()->StartCycle(
+ GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector cctest", GCTracer::MarkingType::kIncremental);
+ marking->Start(GarbageCollector::MARK_COMPACTOR,
+ i::GarbageCollectionReason::kTesting);
+ }
+
+ ClientIsolateThreadForPagePromotions thread("worker", &test, &done,
+ &shared_string);
+ CHECK(thread.Start());
+
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ thread.Join();
+}
+
+class ClientIsolateThreadForRetainingByRememberedSet : public v8::base::Thread {
+ public:
+ ClientIsolateThreadForRetainingByRememberedSet(
+ const char* name, MultiClientIsolateTest* test, std::atomic<bool>* done,
+ Persistent<v8::String>* weak_ref)
+ : v8::base::Thread(base::Thread::Options(name)),
+ test_(test),
+ done_(done),
+ weak_ref_(weak_ref) {}
+
+ void Run() override {
+ CHECK(v8_flags.minor_mc);
+ client_isolate_ = test_->NewClientIsolate();
+ Isolate* i_client = reinterpret_cast<Isolate*>(client_isolate_);
+ Factory* factory = i_client->factory();
+ Heap* heap = i_client->heap();
+ ManualGCScope manual_gc_scope(i_client);
+
+ {
+ HandleScope scope(i_client);
+
+ Handle<FixedArray> young_object =
+ factory->NewFixedArray(1, AllocationType::kYoung);
+ CHECK(Heap::InYoungGeneration(*young_object));
+ Address young_object_address = young_object->address();
+
+ std::vector<Handle<FixedArray>> handles;
+ // Make the whole page transition from new->old, getting the buffers
+ // processed in the sweeper (relying on marking information) instead of
+ // processing during newspace evacuation.
+ heap::FillCurrentPage(heap->new_space(), &handles);
+
+ // Create a new to shared reference.
+ CHECK(!weak_ref_->IsEmpty());
+ Handle<String> shared_string = Utils::OpenHandle<v8::String, String>(
+ weak_ref_->Get(client_isolate_));
+ CHECK(!heap->Contains(*shared_string));
+ CHECK(heap->SharedHeapContains(*shared_string));
+ young_object->set(0, *shared_string);
+
+ CcTest::CollectGarbage(NEW_SPACE, i_client);
+
+ // Object should get promoted using page promotion, so address should
+ // remain the same.
+ CHECK(!Heap::InYoungGeneration(*young_object));
+ CHECK(heap->Contains(*young_object));
+ CHECK_EQ(young_object_address, young_object->address());
+
+ // GC should still be in progress (unless heap verification is enabled).
+ CHECK_IMPLIES(!v8_flags.verify_heap, heap->sweeping_in_progress());
+
+ // Inform main thread that the client is set up and is doing a GC.
+ *done_ = true;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(test_->main_isolate())
+ ->PostTask(std::make_unique<WakeupTask>(test_->i_main_isolate()));
+
+ // Wait for main thread to do a shared GC.
+ while (*done_) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ // Since the GC promoted that string into shared heap, it also needs to
+ // create an OLD_TO_SHARED slot.
+ ObjectSlot slot = young_object->GetFirstElementAddress();
+ CHECK(RememberedSet<OLD_TO_SHARED>::Contains(
+ MemoryChunk::FromHeapObject(*young_object), slot.address()));
+ }
+
+ client_isolate_->Dispose();
+
+ // Inform main thread that client is finished.
+ *done_ = true;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(test_->main_isolate())
+ ->PostTask(std::make_unique<WakeupTask>(test_->i_main_isolate()));
+ }
+
+ v8::Isolate* isolate() const {
+ DCHECK_NOT_NULL(client_isolate_);
+ return client_isolate_;
+ }
+
+ private:
+ MultiClientIsolateTest* test_;
+ std::atomic<bool>* done_;
+ Persistent<v8::String>* weak_ref_;
+ v8::Isolate* client_isolate_;
+};
+
+UNINITIALIZED_TEST(SharedObjectRetainedByClientRememberedSet) {
+ if (v8_flags.single_generation) return;
+ if (!v8_flags.minor_mc) return;
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+
+ v8_flags.stress_concurrent_allocation = false; // For SealCurrentObjects.
+ v8_flags.shared_string_table = true;
+ v8_flags.manual_evacuation_candidates_selection = true;
+
+ MultiClientIsolateTest test;
+ std::atomic<bool> done = false;
+
+ v8::Isolate* isolate = test.main_isolate();
+ Isolate* i_isolate = test.i_main_isolate();
+ Isolate* shared_isolate = i_isolate->shared_space_isolate();
+ Heap* shared_heap = shared_isolate->heap();
+
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ shared_heap);
+
+ // Create two weak references to Strings. One should die, the other should be
+ // kept alive by the client isolate.
+ Persistent<v8::String> live_weak_ref;
+ Persistent<v8::String> dead_weak_ref;
+ {
+ HandleScope scope(i_isolate);
+ const char raw_one_byte[] = "foo";
+
+ Handle<String> live_shared_string =
+ i_isolate->factory()->NewStringFromAsciiChecked(
+ raw_one_byte, AllocationType::kSharedOld);
+ CHECK(shared_heap->Contains(*live_shared_string));
+ live_weak_ref.Reset(isolate, Utils::ToLocal(live_shared_string));
+ live_weak_ref.SetWeak();
+
+ Handle<String> dead_shared_string =
+ i_isolate->factory()->NewStringFromAsciiChecked(
+ raw_one_byte, AllocationType::kSharedOld);
+ CHECK(shared_heap->Contains(*dead_shared_string));
+ dead_weak_ref.Reset(isolate, Utils::ToLocal(dead_shared_string));
+ dead_weak_ref.SetWeak();
+ }
+
+ ClientIsolateThreadForRetainingByRememberedSet thread("worker", &test, &done,
+ &live_weak_ref);
+ CHECK(thread.Start());
+
+ // Wait for client isolate to allocate objects and start a GC.
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ // Do shared GC. The live weak ref should be kept alive via a OLD_TO_SHARED
+ // slot in the client isolate.
+ CHECK(!live_weak_ref.IsEmpty());
+ CHECK(!dead_weak_ref.IsEmpty());
+ CcTest::CollectSharedGarbage(i_isolate);
+ CHECK(!live_weak_ref.IsEmpty());
+ CHECK(dead_weak_ref.IsEmpty());
+
+ // Inform client that shared GC is finished.
+ done = false;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(thread.isolate())
+ ->PostTask(std::make_unique<WakeupTask>(
+ reinterpret_cast<Isolate*>(thread.isolate())));
+
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ thread.Join();
+}
+
+class Regress1424955ClientIsolateThread : public v8::base::Thread {
+ public:
+ Regress1424955ClientIsolateThread(const char* name,
+ MultiClientIsolateTest* test,
+ std::atomic<bool>* done)
+ : v8::base::Thread(base::Thread::Options(name)),
+ test_(test),
+ done_(done) {}
+
+ void Run() override {
+ client_isolate_ = test_->NewClientIsolate();
+ Isolate* i_client = reinterpret_cast<Isolate*>(client_isolate_);
+ Heap* i_client_heap = i_client->heap();
+ Factory* factory = i_client->factory();
+
+ {
+ // Allocate an object so that there is work for the sweeper. Otherwise,
+ // starting a minor GC after a full GC may finalize sweeping since it is
+ // out of work.
+ HandleScope handle_scope(i_client);
+ Handle<FixedArray> array =
+ factory->NewFixedArray(64, AllocationType::kOld);
+ USE(array);
+
+ // Start sweeping.
+ i_client_heap->CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kTesting);
+ CHECK(i_client_heap->sweeping_in_progress());
+
+ // Inform the initiator thread it's time to request a global safepoint.
+ *done_ = true;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(test_->main_isolate())
+ ->PostTask(std::make_unique<WakeupTask>(test_->i_main_isolate()));
+
+ // Wait for the initiator thread to request a global safepoint.
+ while (!i_client->shared_space_isolate()
+ ->global_safepoint()
+ ->IsRequestedForTesting()) {
+ v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
+ }
+
+ // Start a minor GC. This will cause this client isolate to join the
+ // global safepoint. At which point, the initiator isolate will try to
+ // finalize sweeping on behalf of this client isolate.
+ i_client_heap->CollectGarbage(NEW_SPACE,
+ GarbageCollectionReason::kTesting);
+ }
+
+ // Wait for the initiator isolate to finish the shared GC.
+ while (*done_) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), client_isolate_,
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ client_isolate_->Dispose();
+
+ *done_ = true;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(test_->main_isolate())
+ ->PostTask(std::make_unique<WakeupTask>(test_->i_main_isolate()));
+ }
+
+ v8::Isolate* isolate() const {
+ DCHECK_NOT_NULL(client_isolate_);
+ return client_isolate_;
+ }
+
+ private:
+ MultiClientIsolateTest* test_;
+ std::atomic<bool>* done_;
+ v8::Isolate* client_isolate_;
+};
+
+UNINITIALIZED_TEST(Regress1424955) {
+ if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
+ if (v8_flags.single_generation) return;
+ // When heap verification is enabled, sweeping is finalized in the atomic
+ // pause. This issue requires that sweeping is still in progress after the
+ // atomic pause is finished.
+ if (v8_flags.verify_heap) return;
+ v8_flags.shared_string_table = true;
+
+ ManualGCScope maunal_gc_scope;
+
+ MultiClientIsolateTest test;
+ std::atomic<bool> done = false;
+ Regress1424955ClientIsolateThread thread("worker", &test, &done);
+ CHECK(thread.Start());
+
+ // Wait for client thread to start sweeping.
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ // Client isolate waits for this isolate to request a global safepoint and
+ // then triggers a minor GC.
+ CcTest::CollectSharedGarbage(test.i_main_isolate());
+ done = false;
+ V8::GetCurrentPlatform()
+ ->GetForegroundTaskRunner(thread.isolate())
+ ->PostTask(std::make_unique<WakeupTask>(
+ reinterpret_cast<Isolate*>(thread.isolate())));
+
+ // Wait for client isolate to finish the minor GC and dispose of its isolate.
+ while (!done) {
+ v8::platform::PumpMessageLoop(
+ i::V8::GetCurrentPlatform(), test.main_isolate(),
+ v8::platform::MessageLoopBehavior::kWaitForWork);
+ }
+
+ thread.Join();
+}
+
} // namespace test_shared_strings
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-simple-riscv32.cc b/deps/v8/test/cctest/test-simple-riscv32.cc
index 7a81643d42..be8919f0c7 100644
--- a/deps/v8/test/cctest/test-simple-riscv32.cc
+++ b/deps/v8/test/cctest/test-simple-riscv32.cc
@@ -64,7 +64,7 @@ TEST(RISCV_SIMPLE0) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int32_t res = reinterpret_cast<int32_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0xABCL, res);
}
@@ -84,7 +84,7 @@ TEST(RISCV_SIMPLE1) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int32_t res = reinterpret_cast<int32_t>(f.Call(100, 0, 0, 0, 0));
CHECK_EQ(99L, res);
}
@@ -118,7 +118,7 @@ TEST(RISCV_SIMPLE2) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int32_t res = reinterpret_cast<int32_t>(f.Call(100, 0, 0, 0, 0));
CHECK_EQ(5050, res);
}
@@ -139,7 +139,7 @@ TEST(RISCV_SIMPLE3) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int32_t res = reinterpret_cast<int32_t>(f.Call(255, 0, 0, 0, 0));
CHECK_EQ(-1, res);
}
@@ -179,7 +179,7 @@ TEST(LI) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int32_t res = reinterpret_cast<int32_t>(f.Call(0xDEADBEEF, 0, 0, 0, 0));
CHECK_EQ(0L, res);
}
@@ -218,7 +218,7 @@ TEST(LI_CONST) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int32_t res = reinterpret_cast<int32_t>(f.Call(0xDEADBEEF, 0, 0, 0, 0));
CHECK_EQ(0L, res);
}
diff --git a/deps/v8/test/cctest/test-simple-riscv64.cc b/deps/v8/test/cctest/test-simple-riscv64.cc
index c5feda47df..3cb1dddcec 100644
--- a/deps/v8/test/cctest/test-simple-riscv64.cc
+++ b/deps/v8/test/cctest/test-simple-riscv64.cc
@@ -64,7 +64,7 @@ TEST(RISCV_SIMPLE0) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F2>::FromCode(*code);
+ auto f = GeneratedCode<F2>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0xABCL, res);
}
@@ -84,7 +84,7 @@ TEST(RISCV_SIMPLE1) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(100, 0, 0, 0, 0));
CHECK_EQ(99L, res);
}
@@ -118,7 +118,7 @@ TEST(RISCV_SIMPLE2) {
#ifdef DEBUG
code->Print();
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(100, 0, 0, 0, 0));
CHECK_EQ(5050, res);
}
@@ -139,7 +139,7 @@ TEST(RISCV_SIMPLE3) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(255, 0, 0, 0, 0));
CHECK_EQ(-1, res);
}
@@ -191,7 +191,7 @@ TEST(LI) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xDEADBEEF, 0, 0, 0, 0));
CHECK_EQ(0L, res);
}
@@ -242,7 +242,7 @@ TEST(LI_CONST) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xDEADBEEF, 0, 0, 0, 0));
CHECK_EQ(0L, res);
}
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 837d42669a..f070118bd8 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1393,6 +1393,49 @@ TEST(InternalizeExternal) {
CcTest::CollectGarbage(i::OLD_SPACE);
}
+TEST(Regress1402187) {
+ CcTest::InitializeVM();
+ i::Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ // This won't leak; the external string mechanism will call Dispose() on it.
+ const char ext_string_content[] = "prop-1234567890asdf";
+ OneByteVectorResource* resource =
+ new OneByteVectorResource(v8::base::Vector<const char>(
+ ext_string_content, strlen(ext_string_content)));
+ const uint32_t fake_hash =
+ String::CreateHashFieldValue(4711, String::HashFieldType::kHash);
+ {
+ v8::HandleScope scope(CcTest::isolate());
+ // Internalize a string with the same hash to ensure collision.
+ Handle<String> intern = isolate->factory()->NewStringFromAsciiChecked(
+ "internalized1234567", AllocationType::kOld);
+ intern->set_raw_hash_field(fake_hash);
+ factory->InternalizeName(intern);
+ CHECK(intern->IsInternalizedString());
+
+ v8::Local<v8::String> ext_string =
+ v8::String::NewFromUtf8Literal(CcTest::isolate(), ext_string_content);
+ ext_string->MakeExternal(resource);
+ Handle<String> string = v8::Utils::OpenHandle(*ext_string);
+ string->set_raw_hash_field(fake_hash);
+ CHECK(string->IsExternalString());
+ CHECK(!StringShape(*string).IsUncachedExternal());
+ CHECK(!string->IsInternalizedString());
+ CHECK(!String::Equals(isolate, string, intern));
+ CHECK_EQ(string->hash(), intern->hash());
+ CHECK_EQ(string->length(), intern->length());
+
+ CHECK_EQ(isolate->string_table()->TryStringToIndexOrLookupExisting(
+ isolate, string->ptr()),
+ Smi::FromInt(ResultSentinel::kNotFound).ptr());
+ string = factory->InternalizeString(string);
+ CHECK(string->IsExternalString());
+ CHECK(string->IsInternalizedString());
+ }
+ CcTest::CollectGarbage(i::OLD_SPACE);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+}
+
TEST(SliceFromExternal) {
if (!v8_flags.string_slices) return;
CcTest::InitializeVM();
@@ -1682,8 +1725,8 @@ TEST(FormatMessage) {
Handle<String> arg1 = isolate->factory()->NewStringFromAsciiChecked("arg1");
Handle<String> arg2 = isolate->factory()->NewStringFromAsciiChecked("arg2");
Handle<String> result =
- MessageFormatter::Format(isolate, MessageTemplate::kPropertyNotFunction,
- arg0, arg1, arg2)
+ MessageFormatter::TryFormat(
+ isolate, MessageTemplate::kPropertyNotFunction, arg0, arg1, arg2)
.ToHandleChecked();
Handle<String> expected = isolate->factory()->NewStringFromAsciiChecked(
"'arg0' returned for property 'arg1' of object 'arg2' is not a function");
@@ -1893,7 +1936,6 @@ TEST(Regress876759) {
// The grandparent string becomes one-byte, but the child strings are still
// two-byte.
CHECK(grandparent->IsOneByteRepresentation());
- CHECK(parent->IsTwoByteRepresentation());
CHECK(sliced->IsTwoByteRepresentation());
// The *Underneath version returns the correct representation.
CHECK(String::IsOneByteRepresentationUnderneath(*sliced));
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
index ebb5ea0c42..9017c8c0ff 100644
--- a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
@@ -266,11 +266,11 @@ Handle<Code> CSATestRunner::create_find_entry(Isolate* isolate) {
// TODO(v8:11330): Remove once CSA implementation has a fallback for
// non-SSSE3/AVX configurations.
if (!IsEnabled()) {
- return FromCodeT(isolate->builtins()->code_handle(Builtin::kIllegal),
- isolate);
+ return isolate->builtins()->code_handle(Builtin::kIllegal);
}
static_assert(kFindEntryParams == 2); // (table, key)
- compiler::CodeAssemblerTester asm_tester(isolate, kFindEntryParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kFindEntryParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
@@ -293,7 +293,8 @@ Handle<Code> CSATestRunner::create_find_entry(Isolate* isolate) {
Handle<Code> CSATestRunner::create_get_data(Isolate* isolate) {
static_assert(kGetDataParams == 2); // (table, entry)
- compiler::CodeAssemblerTester asm_tester(isolate, kGetDataParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kGetDataParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
@@ -316,7 +317,8 @@ Handle<Code> CSATestRunner::create_get_data(Isolate* isolate) {
Handle<Code> CSATestRunner::create_put(Isolate* isolate) {
static_assert(kPutParams == 4); // (table, entry, value, details)
- compiler::CodeAssemblerTester asm_tester(isolate, kPutParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kPutParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
@@ -339,11 +341,11 @@ Handle<Code> CSATestRunner::create_delete(Isolate* isolate) {
// TODO(v8:11330): Remove once CSA implementation has a fallback for
// non-SSSE3/AVX configurations.
if (!IsEnabled()) {
- return FromCodeT(isolate->builtins()->code_handle(Builtin::kIllegal),
- isolate);
+ return isolate->builtins()->code_handle(Builtin::kIllegal);
}
static_assert(kDeleteParams == 2); // (table, entry)
- compiler::CodeAssemblerTester asm_tester(isolate, kDeleteParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kDeleteParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
@@ -365,11 +367,11 @@ Handle<Code> CSATestRunner::create_add(Isolate* isolate) {
// TODO(v8:11330): Remove once CSA implementation has a fallback for
// non-SSSE3/AVX configurations.
if (!IsEnabled()) {
- return FromCodeT(isolate->builtins()->code_handle(Builtin::kIllegal),
- isolate);
+ return isolate->builtins()->code_handle(Builtin::kIllegal);
}
static_assert(kAddParams == 4); // (table, key, value, details)
- compiler::CodeAssemblerTester asm_tester(isolate, kAddParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kAddParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
@@ -393,7 +395,8 @@ Handle<Code> CSATestRunner::create_add(Isolate* isolate) {
Handle<Code> CSATestRunner::create_allocate(Isolate* isolate) {
static_assert(kAllocateParams == 1); // (capacity)
- compiler::CodeAssemblerTester asm_tester(isolate, kAllocateParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kAllocateParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<IntPtrT> capacity = m.SmiToIntPtr(m.Parameter<Smi>(1));
@@ -408,7 +411,8 @@ Handle<Code> CSATestRunner::create_allocate(Isolate* isolate) {
Handle<Code> CSATestRunner::create_get_counts(Isolate* isolate) {
static_assert(kGetCountsParams == 1); // (table)
- compiler::CodeAssemblerTester asm_tester(isolate, kGetCountsParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kGetCountsParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
@@ -441,7 +445,8 @@ Handle<Code> CSATestRunner::create_get_counts(Isolate* isolate) {
Handle<Code> CSATestRunner::create_copy(Isolate* isolate) {
static_assert(kCopyParams == 1); // (table)
- compiler::CodeAssemblerTester asm_tester(isolate, kCopyParams + 1);
+ compiler::CodeAssemblerTester asm_tester(isolate,
+ JSParameterCount(kCopyParams));
CodeStubAssembler m(asm_tester.state());
{
TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index 6efec4948e..dbba048339 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -205,7 +205,7 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
TestData t = initial_data;
- Simulator::current(isolate)->Call<void>(code->entry(), &t);
+ Simulator::current(isolate)->Call<void>(code->code_entry_point(), &t);
int res = Simulator::current(isolate)->wreg(0);
CHECK_EQ(expected_res, res);
@@ -275,7 +275,7 @@ int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- Simulator::current(isolate)->Call<void>(code->entry(), test_data);
+ Simulator::current(isolate)->Call<void>(code->code_entry_point(), test_data);
return Simulator::current(isolate)->wreg(0);
}
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index 86479697a6..fe72a45d89 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -168,7 +168,7 @@ TEST(Unwind_BuiltinPCInMiddle_Success_CodePagesAPI) {
register_state.fp = stack;
// Put the current PC inside of a valid builtin.
- CodeT builtin = *BUILTIN_CODE(i_isolate, StringEqual);
+ Code builtin = *BUILTIN_CODE(i_isolate, StringEqual);
const uintptr_t offset = 40;
CHECK_LT(offset, builtin.InstructionSize());
register_state.pc =
@@ -225,7 +225,7 @@ TEST(Unwind_BuiltinPCAtStart_Success_CodePagesAPI) {
// Put the current PC at the start of a valid builtin, so that we are setting
// up the frame.
- CodeT builtin = *BUILTIN_CODE(i_isolate, StringEqual);
+ Code builtin = *BUILTIN_CODE(i_isolate, StringEqual);
register_state.pc = reinterpret_cast<void*>(builtin.InstructionStart());
bool unwound = v8::Unwinder::TryUnwindV8Frames(
@@ -296,17 +296,16 @@ TEST(Unwind_CodeObjectPCInMiddle_Success_CodePagesAPI) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*local_foo));
// Put the current PC inside of the created code object.
- CodeT codet = foo->code();
+ Code code = foo->code();
// We don't produce optimized code when run with --no-turbofan and
// --no-maglev.
- if (!codet.is_optimized_code()) return;
+ if (!code.is_optimized_code()) return;
- Code code = FromCodeT(codet);
// We don't want the offset too early or it could be the `push rbp`
// instruction (which is not at the start of generated code, because the lazy
// deopt check happens before frame setup).
- const uintptr_t offset = code.InstructionSize() - 20;
- CHECK_LT(offset, code.InstructionSize());
+ const uintptr_t offset = code.instruction_size() - 20;
+ CHECK_LT(offset, code.instruction_size());
Address pc = code.InstructionStart() + offset;
register_state.pc = reinterpret_cast<void*>(pc);
@@ -456,7 +455,7 @@ TEST(Unwind_JSEntry_Fail_CodePagesAPI) {
CHECK_LE(pages_length, arraysize(code_pages));
RegisterState register_state;
- CodeT js_entry = *BUILTIN_CODE(i_isolate, JSEntry);
+ Code js_entry = *BUILTIN_CODE(i_isolate, JSEntry);
byte* start = reinterpret_cast<byte*>(js_entry.InstructionStart());
register_state.pc = start + 10;
@@ -638,7 +637,7 @@ TEST(PCIsInV8_InJSEntryRange_CodePagesAPI) {
isolate->CopyCodePages(arraysize(code_pages), code_pages);
CHECK_LE(pages_length, arraysize(code_pages));
- CodeT js_entry = *BUILTIN_CODE(i_isolate, JSEntry);
+ Code js_entry = *BUILTIN_CODE(i_isolate, JSEntry);
byte* start = reinterpret_cast<byte*>(js_entry.InstructionStart());
size_t length = js_entry.InstructionSize();
@@ -676,7 +675,8 @@ TEST(PCIsInV8_LargeCodeObject_CodePagesAPI) {
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate, desc, CodeKind::WASM_FUNCTION).Build();
- CHECK(i_isolate->heap()->InSpace(*foo_code, CODE_LO_SPACE));
+ CHECK(i_isolate->heap()->InSpace(foo_code->instruction_stream(),
+ CODE_LO_SPACE));
byte* start = reinterpret_cast<byte*>(foo_code->InstructionStart());
MemoryRange code_pages[v8::Isolate::kMinCodePagesBufferSize];
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 1eebfeacf6..48707059ae 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -37,107 +37,116 @@ class TestTorqueAssembler : public CodeStubAssembler {
TEST(TestConstexpr1) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestConstexpr1();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestConstexprIf) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestConstexprIf();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestConstexprReturn) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestConstexprReturn();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestGotoLabel) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestGotoLabel()); }
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.CheckCall(ft.true_value());
}
TEST(TestGotoLabelWithOneParameter) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestGotoLabelWithOneParameter()); }
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.CheckCall(ft.true_value());
}
TEST(TestGotoLabelWithTwoParameters) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestGotoLabelWithTwoParameters()); }
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.CheckCall(ft.true_value());
}
TEST(TestPartiallyUnusedLabel) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestPartiallyUnusedLabel()); }
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.CheckCall(ft.true_value());
}
TEST(TestBuiltinSpecialization) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestBuiltinSpecialization();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestMacroSpecialization) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestMacroSpecialization();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestFunctionPointers) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
- TNode<Context> context = m.UncheckedParameter<Context>(kNumParams + 2);
+ auto context = m.GetJSContextParameter();
m.Return(m.TestFunctionPointers(context));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -147,10 +156,10 @@ TEST(TestFunctionPointers) {
TEST(TestTernaryOperator) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
- TNode<Smi> arg = m.UncheckedParameter<Smi>(1);
+ TNode<Smi> arg = m.Parameter<Smi>(1);
m.Return(m.TestTernaryOperator(arg));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -164,19 +173,21 @@ TEST(TestTernaryOperator) {
TEST(TestFunctionPointerToGeneric) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestFunctionPointerToGeneric();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestUnsafeCast) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
TNode<Object> temp = m.SmiConstant(0);
@@ -184,55 +195,59 @@ TEST(TestUnsafeCast) {
m.Return(m.TestUnsafeCast(m.UncheckedCast<Context>(temp),
m.UncheckedCast<Number>(n)));
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.CheckCall(ft.true_value());
}
TEST(TestHexLiteral) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestHexLiteral();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestModuleConstBindings) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestModuleConstBindings();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestLocalConstBindings) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestLocalConstBindings();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestForLoop) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestForLoop();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -242,13 +257,14 @@ TEST(TestTypeswitch) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestTypeswitch(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -258,13 +274,14 @@ TEST(TestGenericOverload) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestGenericOverload(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -274,31 +291,34 @@ TEST(TestEquality) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestEquality(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestLogicalOperators) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestLogicalOperators();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
TEST(TestOtherwiseAndLabels) {
Isolate* isolate(CcTest::InitIsolateOnce());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestOtherwiseWithCode1();
@@ -307,7 +327,7 @@ TEST(TestOtherwiseAndLabels) {
m.TestForwardLabel();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -317,7 +337,8 @@ TEST(TestCatch1) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
TNode<Smi> result =
@@ -326,7 +347,7 @@ TEST(TestCatch1) {
CSA_DCHECK(&m, m.TaggedEqual(result, m.SmiConstant(1)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -336,7 +357,8 @@ TEST(TestCatch2) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
TNode<Smi> result =
@@ -345,7 +367,7 @@ TEST(TestCatch2) {
CSA_DCHECK(&m, m.TaggedEqual(result, m.SmiConstant(2)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -355,7 +377,8 @@ TEST(TestCatch3) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
TNode<Smi> result =
@@ -364,7 +387,7 @@ TEST(TestCatch3) {
CSA_DCHECK(&m, m.TaggedEqual(result, m.SmiConstant(2)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -374,13 +397,14 @@ TEST(TestLookup) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 0);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestQualifiedAccess(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -616,7 +640,8 @@ TEST(TestBranchOnBoolOptimization) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestBranchOnBoolOptimization(
@@ -632,7 +657,7 @@ TEST(TestBitFieldLoad) {
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
const int kNumParams = 5;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
// Untag all of the parameters to get plain integer values.
@@ -672,7 +697,7 @@ TEST(TestBitFieldStore) {
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
// Untag the parameters to get a plain integer value.
@@ -695,7 +720,7 @@ TEST(TestBitFieldInit) {
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
const int kNumParams = 4;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
// Untag all of the parameters to get plain integer values.
@@ -732,7 +757,7 @@ TEST(TestBitFieldUintptrOps) {
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
const int kNumParams = 2;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
// Untag the parameters to get a plain integer value.
@@ -758,7 +783,7 @@ TEST(TestBitFieldMultipleFlags) {
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
TNode<BoolT> a =
@@ -779,7 +804,8 @@ TEST(TestTestParentFrameArguments) {
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestParentFrameArguments(
@@ -793,10 +819,11 @@ TEST(TestFullyGeneratedClassFromCpp) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestFullyGeneratedClassFromCpp()); }
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<ExportedSubClass> result =
Handle<ExportedSubClass>::cast(ft.Call().ToHandleChecked());
CHECK_EQ(result->c_field(), 7);
@@ -808,13 +835,14 @@ TEST(TestFullyGeneratedClassWithElements) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestFullyGeneratedClassWithElements();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -822,7 +850,8 @@ TEST(TestGeneratedCastOperators) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
Handle<Context> context =
@@ -831,7 +860,7 @@ TEST(TestGeneratedCastOperators) {
m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -839,7 +868,8 @@ TEST(TestNewPretenured) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
Handle<Context> context =
@@ -847,7 +877,7 @@ TEST(TestNewPretenured) {
m.TestNewPretenured(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -855,13 +885,14 @@ TEST(TestWord8Phi) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestWord8Phi();
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -869,7 +900,8 @@ TEST(TestOffHeapSlice) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
std::string data = "Hello World!";
{
@@ -877,7 +909,7 @@ TEST(TestOffHeapSlice) {
m.IntPtrConstant(data.size()));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -885,7 +917,8 @@ TEST(TestCallMultiReturnBuiltin) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
- CodeAssemblerTester asm_tester(isolate, 1);
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
Handle<Context> context =
@@ -894,7 +927,7 @@ TEST(TestCallMultiReturnBuiltin) {
m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
- FunctionTester ft(asm_tester.GenerateCode(), 0);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call();
}
@@ -904,7 +937,7 @@ TEST(TestRunLazyTwice) {
i::HandleScope scope(isolate);
const int kNumParams = 0;
int lazyNumber = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
CodeStubAssembler::LazyNode<Smi> lazy = [&]() {
@@ -923,7 +956,7 @@ TEST(TestCreateLazyNodeFromTorque) {
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
const int kNumParams = 0;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssemblerTester asm_tester(isolate, JSParameterCount(kNumParams));
TestTorqueAssembler m(asm_tester.state());
{
m.TestCreateLazyNodeFromTorque();
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 0b12ef3174..4871dad766 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -80,7 +80,7 @@ class CWasmEntryArgTester {
Isolate* isolate_;
std::function<ReturnType(Args...)> expected_fn_;
const FunctionSig* sig_;
- Handle<CodeT> c_wasm_entry_;
+ Handle<Code> c_wasm_entry_;
WasmCode* wasm_code_;
};
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index 395d756ea7..bddfe64e8f 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -81,23 +81,24 @@ class WasmGCTester {
}
byte DefineStruct(std::initializer_list<F> fields,
- uint32_t supertype = kNoSuperType) {
+ uint32_t supertype = kNoSuperType, bool is_final = false) {
StructType::Builder type_builder(&zone_,
static_cast<uint32_t>(fields.size()));
for (F field : fields) {
type_builder.AddField(field.first, field.second);
}
- return builder_.AddStructType(type_builder.Build(), supertype);
+ return builder_.AddStructType(type_builder.Build(), is_final, supertype);
}
byte DefineArray(ValueType element_type, bool mutability,
- uint32_t supertype = kNoSuperType) {
+ uint32_t supertype = kNoSuperType, bool is_final = false) {
return builder_.AddArrayType(zone_.New<ArrayType>(element_type, mutability),
- supertype);
+ is_final, supertype);
}
- byte DefineSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType) {
- return builder_.AddSignature(sig, supertype);
+ byte DefineSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType,
+ bool is_final = false) {
+ return builder_.ForceAddSignature(sig, is_final, supertype);
}
byte DefineTable(ValueType type, uint32_t min_size, uint32_t max_size) {
@@ -241,7 +242,7 @@ class WasmGCTester {
NativeModule* native_module = instance_->module_object().native_module();
Address wasm_call_target = instance_->GetCallTarget(function_index);
Handle<Object> object_ref = instance_;
- Handle<CodeT> c_wasm_entry =
+ Handle<Code> c_wasm_entry =
compiler::CompileCWasmEntry(isolate_, sig, native_module->module());
Execution::CallWasm(isolate_, c_wasm_entry, wasm_call_target, object_ref,
packer->argv());
@@ -318,6 +319,10 @@ WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
WASM_LOCAL_GET(j_local_index)),
kExprEnd});
+ const byte kNullDereference = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_STRUCT_GET(type_index, 0, WASM_REF_NULL(type_index)), kExprEnd});
+
tester.CompileModule();
tester.CheckResult(kGet1, 42);
@@ -330,6 +335,7 @@ WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
.ToHandleChecked()
->IsWasmStruct());
tester.CheckResult(kSet, -99);
+ tester.CheckHasThrown(kNullDereference);
}
// Test struct.get, ref.as_non_null and ref-typed globals.
@@ -385,7 +391,7 @@ WASM_COMPILED_EXEC_TEST(WasmRefAsNonNullSkipCheck) {
tester.CompileModule();
Handle<Object> result = tester.GetResultObject(kFunc).ToHandleChecked();
// Without null checks, ref.as_non_null can actually return null.
- CHECK(result->IsNull());
+ CHECK(result->IsWasmNull());
}
WASM_COMPILED_EXEC_TEST(WasmBrOnNull) {
@@ -543,8 +549,6 @@ WASM_COMPILED_EXEC_TEST(RefCastNoChecks) {
const byte supertype_index = tester.DefineStruct({F(kWasmI32, true)});
const byte subtype1_index = tester.DefineStruct(
{F(kWasmI32, true), F(kWasmF32, true)}, supertype_index);
- const byte subtype2_index = tester.DefineStruct(
- {F(kWasmI32, true), F(kWasmI64, false)}, supertype_index);
const byte kTestSuccessful = tester.DefineFunction(
tester.sigs.i_v(), {ValueType::RefNull(supertype_index)},
@@ -553,16 +557,8 @@ WASM_COMPILED_EXEC_TEST(RefCastNoChecks) {
WASM_REF_CAST(WASM_LOCAL_GET(0), subtype1_index)),
WASM_END});
- const byte kTestFailed = tester.DefineFunction(
- tester.sigs.i_v(), {ValueType::RefNull(supertype_index)},
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT(subtype1_index)),
- WASM_STRUCT_GET(subtype2_index, 0,
- WASM_REF_CAST(WASM_LOCAL_GET(0), subtype2_index)),
- WASM_END});
-
tester.CompileModule();
tester.CheckResult(kTestSuccessful, 0);
- tester.CheckResult(kTestFailed, 0);
}
WASM_COMPILED_EXEC_TEST(BrOnCast) {
@@ -575,7 +571,7 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
{WASM_BLOCK_R(
ValueType::RefNull(type_index), WASM_LOCAL_SET(0, WASM_I32V(111)),
// Pipe a struct through a local so it's statically typed
- // as dataref.
+ // as structref.
WASM_LOCAL_SET(1, WASM_STRUCT_NEW(other_type_index, WASM_F32(1.0))),
WASM_LOCAL_GET(1),
// The type check fails, so this branch isn't taken.
@@ -592,6 +588,29 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
WASM_GC_OP(kExprStructGet), type_index, 0, WASM_LOCAL_GET(0),
kExprI32Add, kExprEnd});
+ const byte kTestStructStaticNull = tester.DefineFunction(
+ tester.sigs.i_v(), {kWasmI32, kWasmStructRef},
+ {WASM_BLOCK_R(
+ ValueType::RefNull(type_index), WASM_LOCAL_SET(0, WASM_I32V(111)),
+ // Pipe a struct through a local so it's statically typed as
+ // structref.
+ WASM_LOCAL_SET(1, WASM_STRUCT_NEW(other_type_index, WASM_F32(1.0))),
+ WASM_LOCAL_GET(1),
+ // The type check fails, so this branch isn't taken.
+ WASM_BR_ON_CAST(0, type_index), WASM_DROP,
+
+ WASM_LOCAL_SET(0, WASM_I32V(221)), // (Final result) - 1
+ WASM_LOCAL_SET(1, WASM_STRUCT_NEW(type_index, WASM_I32V(1))),
+ WASM_LOCAL_GET(1),
+ // This branch is taken.
+ WASM_BR_ON_CAST_NULL(0, type_index), WASM_GC_OP(kExprRefCast),
+ type_index,
+
+ // Not executed due to the branch.
+ WASM_LOCAL_SET(0, WASM_I32V(333))),
+ WASM_GC_OP(kExprStructGet), type_index, 0, WASM_LOCAL_GET(0),
+ kExprI32Add, kExprEnd});
+
const byte kTestNullDeprecated = tester.DefineFunction(
tester.sigs.i_v(), {kWasmI32, kWasmStructRef},
{WASM_BLOCK_R(ValueType::RefNull(type_index),
@@ -614,6 +633,17 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
type_index), // Traps
WASM_DROP, WASM_LOCAL_GET(0), kExprEnd});
+ // "br_on_cast null" also branches on null, treating it as a successful cast.
+ const byte kTestNullNull = tester.DefineFunction(
+ tester.sigs.i_v(), {kWasmI32, kWasmStructRef},
+ {WASM_BLOCK_R(ValueType::RefNull(type_index),
+ WASM_LOCAL_SET(0, WASM_I32V(111)),
+ WASM_LOCAL_GET(1), // Put a nullref onto the value stack.
+ // Taken for nullref with br_on_cast null.
+ WASM_BR_ON_CAST_NULL(0, type_index),
+ WASM_GC_OP(kExprRefCast), type_index),
+ WASM_DROP, WASM_LOCAL_GET(0), kExprEnd});
+
const byte kTypedAfterBranch = tester.DefineFunction(
tester.sigs.i_v(), {kWasmI32, kWasmStructRef},
{WASM_LOCAL_SET(1, WASM_STRUCT_NEW(type_index, WASM_I32V(42))),
@@ -631,8 +661,10 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
tester.CompileModule();
tester.CheckResult(kTestStructStatic, 222);
+ tester.CheckResult(kTestStructStaticNull, 222);
tester.CheckResult(kTestNullDeprecated, 222);
tester.CheckHasThrown(kTestNull);
+ tester.CheckResult(kTestNullNull, 111);
tester.CheckResult(kTypedAfterBranch, 42);
}
@@ -839,12 +871,6 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
{WASM_ARRAY_LEN(WASM_ARRAY_NEW(type_index, WASM_I32V(0), WASM_I32V(42))),
kExprEnd});
- const byte kGetLengthDeprecated = tester.DefineFunction(
- tester.sigs.i_v(), {},
- {WASM_ARRAY_NEW(type_index, WASM_I32V(0), WASM_I32V(42)),
- WASM_GC_OP(kExprArrayLenDeprecated), /*dummy type immediate*/ 0,
- kExprEnd});
-
// Create an array of length 2, initialized to [42, 42].
const byte kAllocate = tester.DefineFunction(
&sig_q_v, {},
@@ -901,7 +927,6 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
tester.CheckHasThrown(kGetElem, 3);
tester.CheckHasThrown(kGetElem, -1);
tester.CheckResult(kGetLength, 42);
- tester.CheckResult(kGetLengthDeprecated, 42);
tester.CheckResult(kImmutable, 42);
tester.CheckResult(kTestFpArray, static_cast<int32_t>(result_value));
@@ -1125,7 +1150,7 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
{
Handle<Object> result5 =
tester.GetResultObject(kCopyRef, 5).ToHandleChecked();
- CHECK(result5->IsNull());
+ CHECK(result5->IsWasmNull());
for (int i = 6; i <= 9; i++) {
Handle<Object> res =
tester.GetResultObject(kCopyRef, i).ToHandleChecked();
@@ -1136,7 +1161,7 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
}
CHECK(tester.GetResultObject(kCopyRefOverlapping, 6)
.ToHandleChecked()
- ->IsNull());
+ ->IsWasmNull());
Handle<Object> res0 =
tester.GetResultObject(kCopyRefOverlapping, 0).ToHandleChecked();
CHECK(res0->IsWasmArray());
@@ -1293,21 +1318,21 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCastsStatic) {
tester.sigs.i_v(), {refNull(subtype_index)},
{WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT(subtype_index)),
WASM_BLOCK_R(refNull(sig_index), WASM_LOCAL_GET(0),
- WASM_BR_ON_CAST(0, sig_index), WASM_DROP,
+ WASM_BR_ON_CAST_DEPRECATED(0, sig_index), WASM_DROP,
WASM_RETURN(WASM_I32V(0))),
WASM_DROP, WASM_I32V(1), WASM_END});
const byte kBrOnCastUnrelatedNull = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_BLOCK_R(refNull(sig_index), WASM_REF_NULL(subtype_index),
- WASM_BR_ON_CAST(0, sig_index), WASM_DROP,
+ WASM_BR_ON_CAST_DEPRECATED(0, sig_index), WASM_DROP,
WASM_RETURN(WASM_I32V(0))),
WASM_DROP, WASM_I32V(1), WASM_END});
const byte kBrOnCastUnrelatedNonNullable = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_BLOCK_R(refNull(sig_index), WASM_STRUCT_NEW_DEFAULT(subtype_index),
- WASM_BR_ON_CAST(0, sig_index), WASM_DROP,
+ WASM_BR_ON_CAST_DEPRECATED(0, sig_index), WASM_DROP,
WASM_RETURN(WASM_I32V(0))),
WASM_DROP, WASM_I32V(1), WASM_END});
@@ -1337,14 +1362,14 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCastsStatic) {
tester.sigs.i_v(), {refNull(subtype_index)},
{WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT(subtype_index)),
WASM_BLOCK_R(refNull(subtype_index), WASM_LOCAL_GET(0),
- WASM_BR_ON_CAST_FAIL(0, sig_index), WASM_DROP,
+ WASM_BR_ON_CAST_FAIL_DEPRECATED(0, sig_index), WASM_DROP,
WASM_RETURN(WASM_I32V(0))),
WASM_DROP, WASM_I32V(1), WASM_END});
const byte kBrOnCastFailUnrelatedNull = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_BLOCK_R(refNull(subtype_index), WASM_REF_NULL(subtype_index),
- WASM_BR_ON_CAST_FAIL(0, sig_index), WASM_DROP,
+ WASM_BR_ON_CAST_FAIL_DEPRECATED(0, sig_index), WASM_DROP,
WASM_RETURN(WASM_I32V(0))),
WASM_DROP, WASM_I32V(1), WASM_END});
@@ -1352,7 +1377,7 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCastsStatic) {
tester.sigs.i_v(), {},
{WASM_BLOCK_R(refNull(subtype_index),
WASM_STRUCT_NEW_DEFAULT(subtype_index),
- WASM_BR_ON_CAST_FAIL(0, sig_index), WASM_DROP,
+ WASM_BR_ON_CAST_FAIL_DEPRECATED(0, sig_index), WASM_DROP,
WASM_RETURN(WASM_I32V(0))),
WASM_DROP, WASM_I32V(1), WASM_END});
@@ -1547,8 +1572,8 @@ WASM_COMPILED_EXEC_TEST(FunctionRefs) {
Handle<WasmInternalFunction>::cast(result_cast_reference)->external(),
tester.isolate()));
- CHECK_EQ(cast_function->code().raw_instruction_start(),
- cast_function_reference->code().raw_instruction_start());
+ CHECK_EQ(cast_function->code().InstructionStart(),
+ cast_function_reference->code().InstructionStart());
tester.CheckResult(test_deprecated, 1);
tester.CheckResult(test_fail_deprecated, 0);
@@ -1950,6 +1975,7 @@ WASM_COMPILED_EXEC_TEST(GlobalInitReferencingGlobal) {
WASM_COMPILED_EXEC_TEST(GCTables) {
WasmGCTester tester(execution_tier);
+ tester.builder()->StartRecursiveTypeGroup();
byte super_struct = tester.DefineStruct({F(kWasmI32, false)});
byte sub_struct = tester.DefineStruct({F(kWasmI32, false), F(kWasmI32, true)},
super_struct);
@@ -1959,6 +1985,8 @@ WASM_COMPILED_EXEC_TEST(GCTables) {
FunctionSig* sub_sig =
FunctionSig::Build(tester.zone(), {kWasmI32}, {refNull(super_struct)});
byte sub_sig_index = tester.DefineSignature(sub_sig, super_sig_index);
+ byte unrelated_sig_index = tester.DefineSignature(sub_sig, super_sig_index);
+ tester.builder()->EndRecursiveTypeGroup();
tester.DefineTable(refNull(super_sig_index), 10, 10);
@@ -1976,8 +2004,8 @@ WASM_COMPILED_EXEC_TEST(GCTables) {
tester.sigs.i_v(), {},
{WASM_TABLE_SET(0, WASM_I32V(0), WASM_REF_NULL(super_sig_index)),
WASM_TABLE_SET(0, WASM_I32V(1), WASM_REF_FUNC(super_func)),
- WASM_TABLE_SET(0, WASM_I32V(2), WASM_REF_FUNC(sub_func)), WASM_I32V(0),
- WASM_END});
+ WASM_TABLE_SET(0, WASM_I32V(2), WASM_REF_FUNC(sub_func)), // --
+ WASM_I32V(0), WASM_END});
byte super_struct_producer = tester.DefineFunction(
FunctionSig::Build(tester.zone(), {ref(super_struct)}, {}), {},
@@ -2009,12 +2037,20 @@ WASM_COMPILED_EXEC_TEST(GCTables) {
WASM_CALL_FUNCTION0(super_struct_producer),
WASM_I32V(2)),
WASM_END});
+ // Calling with a signature that is a subtype of the type of the table should
+ // work, provided the entry has a subtype of the declared signature.
+ byte call_table_subtype_entry_subtype = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_CALL_INDIRECT(super_sig_index,
+ WASM_CALL_FUNCTION0(sub_struct_producer),
+ WASM_I32V(2)),
+ WASM_END});
// Calling with a signature that is mismatched to that of the entry should
// trap.
byte call_type_mismatch = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_CALL_INDIRECT(super_sig_index,
- WASM_CALL_FUNCTION0(sub_struct_producer),
+ {WASM_CALL_INDIRECT(unrelated_sig_index,
+ WASM_CALL_FUNCTION0(super_struct_producer),
WASM_I32V(2)),
WASM_END});
// Getting a table element and then calling it with call_ref should work.
@@ -2036,6 +2072,7 @@ WASM_COMPILED_EXEC_TEST(GCTables) {
tester.CheckHasThrown(call_null);
tester.CheckResult(call_same_type, 18);
tester.CheckResult(call_subtype, -5);
+ tester.CheckResult(call_table_subtype_entry_subtype, 7);
tester.CheckHasThrown(call_type_mismatch);
tester.CheckResult(table_get_and_call_ref, 7);
}
@@ -2050,11 +2087,11 @@ WASM_COMPILED_EXEC_TEST(JsAccess) {
FunctionSig sig_i_super(1, 1, kSupertypeToI);
tester.DefineExportedFunction(
- "disallowed", &sig_t_v,
+ "typed_producer", &sig_t_v,
{WASM_STRUCT_NEW(type_index, WASM_I32V(42)), kExprEnd});
// Same code, different signature.
tester.DefineExportedFunction(
- "producer", &sig_super_v,
+ "untyped_producer", &sig_super_v,
{WASM_STRUCT_NEW(type_index, WASM_I32V(42)), kExprEnd});
tester.DefineExportedFunction(
"consumer", &sig_i_super,
@@ -2065,41 +2102,37 @@ WASM_COMPILED_EXEC_TEST(JsAccess) {
tester.CompileModule();
Isolate* isolate = tester.isolate();
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- MaybeHandle<Object> maybe_result =
- tester.CallExportedFunction("disallowed", 0, nullptr);
- CHECK(maybe_result.is_null());
- CHECK(try_catch.HasCaught());
- try_catch.Reset();
- isolate->clear_pending_exception();
-
- maybe_result = tester.CallExportedFunction("producer", 0, nullptr);
- if (maybe_result.is_null()) {
- FATAL("Calling 'producer' failed: %s",
- *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
- try_catch.Message()->Get()));
- }
- {
- Handle<Object> args[] = {maybe_result.ToHandleChecked()};
- maybe_result = tester.CallExportedFunction("consumer", 1, args);
- }
- if (maybe_result.is_null()) {
- FATAL("Calling 'consumer' failed: %s",
- *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
- try_catch.Message()->Get()));
- }
- Handle<Object> result = maybe_result.ToHandleChecked();
- CHECK(result->IsSmi());
- CHECK_EQ(42, Smi::cast(*result).value());
- // Calling {consumer} with any other object (e.g. the Smi we just got as
- // {result}) should trap.
- {
- Handle<Object> args[] = {result};
- maybe_result = tester.CallExportedFunction("consumer", 1, args);
+ for (const char* producer : {"typed_producer", "untyped_producer"}) {
+ MaybeHandle<Object> maybe_result =
+ tester.CallExportedFunction(producer, 0, nullptr);
+ if (maybe_result.is_null()) {
+ FATAL("Calling %s failed: %s", producer,
+ *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
+ try_catch.Message()->Get()));
+ }
+ {
+ Handle<Object> args[] = {maybe_result.ToHandleChecked()};
+ maybe_result = tester.CallExportedFunction("consumer", 1, args);
+ }
+ if (maybe_result.is_null()) {
+ FATAL("Calling 'consumer' failed: %s",
+ *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
+ try_catch.Message()->Get()));
+ }
+ Handle<Object> result = maybe_result.ToHandleChecked();
+ CHECK(result->IsSmi());
+ CHECK_EQ(42, Smi::cast(*result).value());
+ // Calling {consumer} with any other object (e.g. the Smi we just got as
+ // {result}) should trap.
+ {
+ Handle<Object> args[] = {result};
+ maybe_result = tester.CallExportedFunction("consumer", 1, args);
+ }
+ CHECK(maybe_result.is_null());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+ isolate->clear_pending_exception();
}
- CHECK(maybe_result.is_null());
- CHECK(try_catch.HasCaught());
- try_catch.Reset();
- isolate->clear_pending_exception();
}
WASM_COMPILED_EXEC_TEST(WasmExternInternalize) {
diff --git a/deps/v8/test/cctest/wasm/test-grow-memory.cc b/deps/v8/test/cctest/wasm/test-grow-memory.cc
index a984fc9706..b796e8ff9a 100644
--- a/deps/v8/test/cctest/wasm/test-grow-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-grow-memory.cc
@@ -114,8 +114,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
handle(memory_object->array_buffer(), isolate));
// Grow using an internal Wasm bytecode.
- result = testing::CallWasmFunctionForTesting(isolate, instance, "main", 0,
- nullptr);
+ result = testing::CallWasmFunctionForTesting(isolate, instance, "main", {});
CHECK_EQ(26, result);
CHECK(external2.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, external2.buffer_->byte_length());
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 274278007c..0453464c08 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -44,9 +44,10 @@ constexpr size_t kThunkBufferSize = 64 * KB;
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so we generate all code in a single
// kMaxCodeMemory-sized chunk.
-constexpr size_t kAssemblerBufferSize = WasmCodeAllocator::kMaxCodeSpaceSize;
+constexpr size_t kAssemblerBufferSize =
+ size_t{kDefaultMaxWasmCodeSpaceSizeMb} * MB;
constexpr uint32_t kAvailableBufferSlots =
- (WasmCodeAllocator::kMaxCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
+ (kAssemblerBufferSize - kJumpTableSize) / kThunkBufferSize;
constexpr uint32_t kBufferSlotStartOffset =
RoundUp<kThunkBufferSize>(kJumpTableSize);
#else
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc b/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc
index 3843089dd8..205e981ee7 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc
@@ -17,7 +17,7 @@ namespace test_liftoff_for_fuzzing {
TEST(MaxSteps) {
WasmRunner<uint32_t> r(TestExecutionTier::kLiftoffForFuzzing);
- BUILD(r, WASM_LOOP(WASM_BR(0)), WASM_I32V(23));
+ r.Build({WASM_LOOP(WASM_BR(0)), WASM_I32V(23)});
r.SetMaxSteps(10);
r.CheckCallViaJSTraps();
}
@@ -25,7 +25,7 @@ TEST(MaxSteps) {
TEST(NondeterminismUnopF32) {
WasmRunner<float> r(TestExecutionTier::kLiftoffForFuzzing);
- BUILD(r, WASM_F32_ABS(WASM_F32(std::nanf(""))));
+ r.Build({WASM_F32_ABS(WASM_F32(std::nanf("")))});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(std::nanf(""));
CHECK(r.HasNondeterminism());
@@ -34,7 +34,7 @@ TEST(NondeterminismUnopF32) {
TEST(NondeterminismUnopF64) {
WasmRunner<double> r(TestExecutionTier::kLiftoffForFuzzing);
- BUILD(r, WASM_F64_ABS(WASM_F64(std::nan(""))));
+ r.Build({WASM_F64_ABS(WASM_F64(std::nan("")))});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(std::nan(""));
CHECK(r.HasNondeterminism());
@@ -44,10 +44,9 @@ TEST(NondeterminismUnopF32x4AllNaN) {
WasmRunner<int32_t, float> r(TestExecutionTier::kLiftoffForFuzzing);
byte value = 0;
- BUILD(r,
- WASM_SIMD_UNOP(kExprF32x4Ceil,
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
- kExprDrop, WASM_ONE);
+ r.Build({WASM_SIMD_UNOP(kExprF32x4Ceil,
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
+ kExprDrop, WASM_ONE});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, 0.0);
CHECK(!r.HasNondeterminism());
@@ -58,9 +57,9 @@ TEST(NondeterminismUnopF32x4AllNaN) {
TEST(NondeterminismUnopF32x4OneNaN) {
for (byte lane = 0; lane < 4; ++lane) {
WasmRunner<int32_t, float> r(TestExecutionTier::kLiftoffForFuzzing);
- BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_F32(0)), WASM_LOCAL_GET(0),
- WASM_SIMD_OP(kExprF32x4ReplaceLane), lane,
- WASM_SIMD_OP(kExprF32x4Ceil), kExprDrop, WASM_ONE);
+ r.Build({WASM_SIMD_F32x4_SPLAT(WASM_F32(0)), WASM_LOCAL_GET(0),
+ WASM_SIMD_OP(kExprF32x4ReplaceLane), lane,
+ WASM_SIMD_OP(kExprF32x4Ceil), kExprDrop, WASM_ONE});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, 0.0);
CHECK(!r.HasNondeterminism());
@@ -73,10 +72,9 @@ TEST(NondeterminismUnopF64x2AllNaN) {
WasmRunner<int32_t, double> r(TestExecutionTier::kLiftoffForFuzzing);
byte value = 0;
- BUILD(r,
- WASM_SIMD_UNOP(kExprF64x2Ceil,
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
- kExprDrop, WASM_ONE);
+ r.Build({WASM_SIMD_UNOP(kExprF64x2Ceil,
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
+ kExprDrop, WASM_ONE});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, 0.0);
CHECK(!r.HasNondeterminism());
@@ -87,9 +85,9 @@ TEST(NondeterminismUnopF64x2AllNaN) {
TEST(NondeterminismUnopF64x2OneNaN) {
for (byte lane = 0; lane < 2; ++lane) {
WasmRunner<int32_t, double> r(TestExecutionTier::kLiftoffForFuzzing);
- BUILD(r, WASM_SIMD_F64x2_SPLAT(WASM_F64(0)), WASM_LOCAL_GET(0),
- WASM_SIMD_OP(kExprF64x2ReplaceLane), lane,
- WASM_SIMD_OP(kExprF64x2Ceil), kExprDrop, WASM_ONE);
+ r.Build({WASM_SIMD_F64x2_SPLAT(WASM_F64(0)), WASM_LOCAL_GET(0),
+ WASM_SIMD_OP(kExprF64x2ReplaceLane), lane,
+ WASM_SIMD_OP(kExprF64x2Ceil), kExprDrop, WASM_ONE});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, 0.0);
CHECK(!r.HasNondeterminism());
@@ -101,7 +99,7 @@ TEST(NondeterminismUnopF64x2OneNaN) {
TEST(NondeterminismBinop) {
WasmRunner<float> r(TestExecutionTier::kLiftoffForFuzzing);
- BUILD(r, WASM_F32_ADD(WASM_F32(std::nanf("")), WASM_F32(0)));
+ r.Build({WASM_F32_ADD(WASM_F32(std::nanf("")), WASM_F32(0))});
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(std::nanf(""));
CHECK(r.HasNondeterminism());
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index 4f43001d43..96f06d7786 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -21,12 +21,12 @@ class LiftoffCompileEnvironment {
: isolate_(CcTest::InitIsolateOnce()),
handle_scope_(isolate_),
zone_(isolate_->allocator(), ZONE_NAME),
- wasm_runner_(nullptr, TestExecutionTier::kLiftoff, 0,
+ wasm_runner_(nullptr, kWasmOrigin, TestExecutionTier::kLiftoff, 0,
kRuntimeExceptionSupport) {
// Add a table of length 1, for indirect calls.
wasm_runner_.builder().AddIndirectFunctionTable(nullptr, 1);
// Set tiered down such that we generate debugging code.
- wasm_runner_.builder().SetTieredDown();
+ wasm_runner_.builder().SetDebugState();
}
struct TestFunction {
@@ -135,7 +135,7 @@ class LiftoffCompileEnvironment {
// Compile the function so we can get the WasmCode* which is later used to
// generate the debug side table lazily.
auto& func_compiler = wasm_runner_.NewFunction(sig, "f");
- func_compiler.Build(function_bytes.begin(), function_bytes.end());
+ func_compiler.Build(base::VectorOf(function_bytes));
WasmCode* code =
wasm_runner_.builder().GetFunctionCode(func_compiler.function_index());
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 7960743944..5cb74db51c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -26,7 +26,7 @@ WASM_EXEC_TEST(I64Const) {
WasmRunner<int64_t> r(execution_tier);
const int64_t kExpectedValue = 0x1122334455667788LL;
// return(kExpectedValue)
- BUILD(r, WASM_I64V_9(kExpectedValue));
+ r.Build({WASM_I64V_9(kExpectedValue)});
CHECK_EQ(kExpectedValue, r.Call());
}
@@ -36,7 +36,7 @@ WASM_EXEC_TEST(I64Const_many) {
WasmRunner<int64_t> r(execution_tier);
const int64_t kExpectedValue = (static_cast<uint64_t>(i) << 32) | cntr;
// return(kExpectedValue)
- BUILD(r, WASM_I64V(kExpectedValue));
+ r.Build({WASM_I64V(kExpectedValue)});
CHECK_EQ(kExpectedValue, r.Call());
cntr++;
}
@@ -45,14 +45,14 @@ WASM_EXEC_TEST(I64Const_many) {
WASM_EXEC_TEST(Return_I64) {
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_RETURN(WASM_LOCAL_GET(0)));
+ r.Build({WASM_RETURN(WASM_LOCAL_GET(0))});
FOR_INT64_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(I64Add) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(base::AddWithWraparound(i, j), r.Call(i, j));
@@ -67,17 +67,16 @@ const int64_t kHasBit33On = 0x100000000;
WASM_EXEC_TEST(Regress5800_Add) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_ADD(
- WASM_I64V(0), WASM_I64V(kHasBit33On)))),
- WASM_RETURN(WASM_I32V(0))),
- WASM_I32V(0));
+ r.Build({WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_ADD(
+ WASM_I64V(0), WASM_I64V(kHasBit33On)))),
+ WASM_RETURN(WASM_I32V(0))),
+ WASM_I32V(0)});
CHECK_EQ(0, r.Call());
}
WASM_EXEC_TEST(I64Sub) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(base::SubWithWraparound(i, j), r.Call(i, j));
@@ -87,18 +86,17 @@ WASM_EXEC_TEST(I64Sub) {
WASM_EXEC_TEST(Regress5800_Sub) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_SUB(
- WASM_I64V(0), WASM_I64V(kHasBit33On)))),
- WASM_RETURN(WASM_I32V(0))),
- WASM_I32V(0));
+ r.Build({WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_SUB(
+ WASM_I64V(0), WASM_I64V(kHasBit33On)))),
+ WASM_RETURN(WASM_I32V(0))),
+ WASM_I32V(0)});
CHECK_EQ(0, r.Call());
}
WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_I64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(static_cast<int32_t>(base::AddWithWraparound(i, j)),
@@ -109,8 +107,8 @@ WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_I64_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(static_cast<int32_t>(base::SubWithWraparound(i, j)),
@@ -121,8 +119,8 @@ WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_I64_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(static_cast<int32_t>(base::MulWithWraparound(i, j)),
@@ -133,8 +131,8 @@ WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int32_t expected = static_cast<int32_t>(base::ShlWithWraparound(i, j));
@@ -145,8 +143,8 @@ WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
int32_t expected = static_cast<int32_t>((i) >> (j & 0x3F));
@@ -157,8 +155,8 @@ WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int32_t expected = static_cast<int32_t>((i) >> (j & 0x3F));
@@ -169,7 +167,7 @@ WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
WASM_EXEC_TEST(I64DivS) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
if (j == 0) {
@@ -185,7 +183,7 @@ WASM_EXEC_TEST(I64DivS) {
WASM_EXEC_TEST(I64DivS_Trap) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(0, r.Call(int64_t{0}, int64_t{100}));
CHECK_TRAP64(r.Call(int64_t{100}, int64_t{0}));
CHECK_TRAP64(r.Call(int64_t{-1001}, int64_t{0}));
@@ -196,7 +194,7 @@ WASM_EXEC_TEST(I64DivS_Trap) {
WASM_EXEC_TEST(I64DivS_Byzero_Const) {
for (int8_t denom = -2; denom < 8; denom++) {
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_I64V_1(denom)));
+ r.Build({WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_I64V_1(denom))});
for (int64_t val = -7; val < 8; val++) {
if (denom == 0) {
CHECK_TRAP64(r.Call(val));
@@ -209,7 +207,7 @@ WASM_EXEC_TEST(I64DivS_Byzero_Const) {
WASM_EXEC_TEST(I64DivU) {
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_DIVU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
if (j == 0) {
@@ -223,7 +221,7 @@ WASM_EXEC_TEST(I64DivU) {
WASM_EXEC_TEST(I64DivU_Trap) {
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_DIVU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(0, r.Call(uint64_t{0}, uint64_t{100}));
CHECK_TRAP64(r.Call(uint64_t{100}, uint64_t{0}));
CHECK_TRAP64(r.Call(uint64_t{1001}, uint64_t{0}));
@@ -233,7 +231,7 @@ WASM_EXEC_TEST(I64DivU_Trap) {
WASM_EXEC_TEST(I64DivU_Byzero_Const) {
for (uint64_t denom = 0xFFFFFFFFFFFFFFFE; denom < 8; denom++) {
WasmRunner<uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVU(WASM_LOCAL_GET(0), WASM_I64V_1(denom)));
+ r.Build({WASM_I64_DIVU(WASM_LOCAL_GET(0), WASM_I64V_1(denom))});
for (uint64_t val = 0xFFFFFFFFFFFFFFF0; val < 8; val++) {
if (denom == 0) {
@@ -247,7 +245,7 @@ WASM_EXEC_TEST(I64DivU_Byzero_Const) {
WASM_EXEC_TEST(I64RemS) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
if (j == 0) {
@@ -263,7 +261,7 @@ WASM_EXEC_TEST(I64RemS) {
WASM_EXEC_TEST(I64RemS_Trap) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(33, r.Call(int64_t{133}, int64_t{100}));
CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), int64_t{-1}));
CHECK_TRAP64(r.Call(int64_t{100}, int64_t{0}));
@@ -273,7 +271,7 @@ WASM_EXEC_TEST(I64RemS_Trap) {
WASM_EXEC_TEST(I64RemU) {
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_REMU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_REMU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
if (j == 0) {
@@ -287,7 +285,7 @@ WASM_EXEC_TEST(I64RemU) {
WASM_EXEC_TEST(I64RemU_Trap) {
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_REMU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_REMU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(17, r.Call(uint64_t{217}, uint64_t{100}));
CHECK_TRAP64(r.Call(uint64_t{100}, uint64_t{0}));
CHECK_TRAP64(r.Call(uint64_t{1001}, uint64_t{0}));
@@ -296,7 +294,7 @@ WASM_EXEC_TEST(I64RemU_Trap) {
WASM_EXEC_TEST(I64And) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_AND(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_AND(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((i) & (j), r.Call(i, j)); }
}
@@ -304,7 +302,7 @@ WASM_EXEC_TEST(I64And) {
WASM_EXEC_TEST(I64Ior) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_IOR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_IOR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((i) | (j), r.Call(i, j)); }
}
@@ -312,7 +310,7 @@ WASM_EXEC_TEST(I64Ior) {
WASM_EXEC_TEST(I64Xor) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_XOR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((i) ^ (j), r.Call(i, j)); }
}
@@ -321,7 +319,7 @@ WASM_EXEC_TEST(I64Xor) {
WASM_EXEC_TEST(I64Shl) {
{
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -332,22 +330,22 @@ WASM_EXEC_TEST(I64Shl) {
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(0)));
+ r.Build({WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(0))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 0, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(32)));
+ r.Build({WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(32))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 32, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(20)));
+ r.Build({WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(20))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 20, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(40)));
+ r.Build({WASM_I64_SHL(WASM_LOCAL_GET(0), WASM_I64V_1(40))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 40, r.Call(i)); }
}
}
@@ -355,7 +353,7 @@ WASM_EXEC_TEST(I64Shl) {
WASM_EXEC_TEST(I64ShrU) {
{
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -366,22 +364,22 @@ WASM_EXEC_TEST(I64ShrU) {
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(0)));
+ r.Build({WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(0))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 0, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(32)));
+ r.Build({WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(32))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 32, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(20)));
+ r.Build({WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(20))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 20, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(40)));
+ r.Build({WASM_I64_SHR(WASM_LOCAL_GET(0), WASM_I64V_1(40))});
FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 40, r.Call(i)); }
}
}
@@ -389,7 +387,7 @@ WASM_EXEC_TEST(I64ShrU) {
WASM_EXEC_TEST(I64ShrS) {
{
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -400,29 +398,29 @@ WASM_EXEC_TEST(I64ShrS) {
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(0)));
+ r.Build({WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(0))});
FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 0, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(32)));
+ r.Build({WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(32))});
FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 32, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(20)));
+ r.Build({WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(20))});
FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 20, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(40)));
+ r.Build({WASM_I64_SAR(WASM_LOCAL_GET(0), WASM_I64V_1(40))});
FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 40, r.Call(i)); }
}
}
WASM_EXEC_TEST(I64Eq) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_EQ(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_EQ(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i == j ? 1 : 0, r.Call(i, j)); }
}
@@ -430,7 +428,7 @@ WASM_EXEC_TEST(I64Eq) {
WASM_EXEC_TEST(I64Ne) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_NE(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_NE(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i != j ? 1 : 0, r.Call(i, j)); }
}
@@ -438,7 +436,7 @@ WASM_EXEC_TEST(I64Ne) {
WASM_EXEC_TEST(I64LtS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_LTS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_LTS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i < j ? 1 : 0, r.Call(i, j)); }
}
@@ -446,7 +444,7 @@ WASM_EXEC_TEST(I64LtS) {
WASM_EXEC_TEST(I64LeS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_LES(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_LES(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i <= j ? 1 : 0, r.Call(i, j)); }
}
@@ -454,7 +452,7 @@ WASM_EXEC_TEST(I64LeS) {
WASM_EXEC_TEST(I64LtU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_LTU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_LTU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(i < j ? 1 : 0, r.Call(i, j)); }
}
@@ -462,7 +460,7 @@ WASM_EXEC_TEST(I64LtU) {
WASM_EXEC_TEST(I64LeU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_LEU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_LEU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(i <= j ? 1 : 0, r.Call(i, j)); }
}
@@ -470,7 +468,7 @@ WASM_EXEC_TEST(I64LeU) {
WASM_EXEC_TEST(I64GtS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_GTS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_GTS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i > j ? 1 : 0, r.Call(i, j)); }
}
@@ -478,7 +476,7 @@ WASM_EXEC_TEST(I64GtS) {
WASM_EXEC_TEST(I64GeS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_GES(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_GES(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i >= j ? 1 : 0, r.Call(i, j)); }
}
@@ -486,7 +484,7 @@ WASM_EXEC_TEST(I64GeS) {
WASM_EXEC_TEST(I64GtU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_GTU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_GTU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(i > j ? 1 : 0, r.Call(i, j)); }
}
@@ -494,7 +492,7 @@ WASM_EXEC_TEST(I64GtU) {
WASM_EXEC_TEST(I64GeU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_GEU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_GEU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(i >= j ? 1 : 0, r.Call(i, j)); }
}
@@ -503,20 +501,20 @@ WASM_EXEC_TEST(I64GeU) {
WASM_EXEC_TEST(I32ConvertI64) {
FOR_INT64_INPUTS(i) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(i)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_I64V(i))});
CHECK_EQ(static_cast<int32_t>(i), r.Call());
}
}
WASM_EXEC_TEST(I64SConvertI32) {
WasmRunner<int64_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I64_SCONVERT_I32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SCONVERT_I32(WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(i), r.Call(i)); }
}
WASM_EXEC_TEST(I64UConvertI32) {
WasmRunner<int64_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_I64_UCONVERT_I32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_UCONVERT_I32(WASM_LOCAL_GET(0))});
FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(i), r.Call(i)); }
}
@@ -531,7 +529,7 @@ WASM_EXEC_TEST(I64Popcnt) {
{38, 0xFFEDCBA09EDCBA09}};
WasmRunner<int64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_POPCNT(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_POPCNT(WASM_LOCAL_GET(0))});
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
@@ -539,7 +537,7 @@ WASM_EXEC_TEST(I64Popcnt) {
WASM_EXEC_TEST(F32SConvertI64) {
WasmRunner<float, int64_t> r(execution_tier);
- BUILD(r, WASM_F32_SCONVERT_I64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_SCONVERT_I64(WASM_LOCAL_GET(0))});
FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(i), r.Call(i)); }
}
@@ -626,7 +624,7 @@ WASM_EXEC_TEST(F32UConvertI64) {
{0x20000020000001, 0x5a000001},
{0xFFFFFe8000000001, 0x5f7FFFFF}};
WasmRunner<float, uint64_t> r(execution_tier);
- BUILD(r, WASM_F32_UCONVERT_I64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_UCONVERT_I64(WASM_LOCAL_GET(0))});
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(base::bit_cast<float>(values[i].expected),
r.Call(values[i].input));
@@ -635,7 +633,7 @@ WASM_EXEC_TEST(F32UConvertI64) {
WASM_EXEC_TEST(F64SConvertI64) {
WasmRunner<double, int64_t> r(execution_tier);
- BUILD(r, WASM_F64_SCONVERT_I64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_SCONVERT_I64(WASM_LOCAL_GET(0))});
FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(i), r.Call(i)); }
}
@@ -719,7 +717,7 @@ WASM_EXEC_TEST(F64UConvertI64) {
{0x8000000000000400, 0x43E0000000000000},
{0x8000000000000401, 0x43E0000000000001}};
WasmRunner<double, uint64_t> r(execution_tier);
- BUILD(r, WASM_F64_UCONVERT_I64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_UCONVERT_I64(WASM_LOCAL_GET(0))});
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(base::bit_cast<double>(values[i].expected),
r.Call(values[i].input));
@@ -728,7 +726,7 @@ WASM_EXEC_TEST(F64UConvertI64) {
WASM_EXEC_TEST(I64SConvertF32) {
WasmRunner<int64_t, float> r(execution_tier);
- BUILD(r, WASM_I64_SCONVERT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SCONVERT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
if (base::IsValueInRangeForNumericType<int64_t>(i)) {
@@ -741,7 +739,7 @@ WASM_EXEC_TEST(I64SConvertF32) {
WASM_EXEC_TEST(I64SConvertSatF32) {
WasmRunner<int64_t, float> r(execution_tier);
- BUILD(r, WASM_I64_SCONVERT_SAT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SCONVERT_SAT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
int64_t expected;
if (base::IsValueInRangeForNumericType<int64_t>(i)) {
@@ -760,7 +758,7 @@ WASM_EXEC_TEST(I64SConvertSatF32) {
WASM_EXEC_TEST(I64SConvertF64) {
WasmRunner<int64_t, double> r(execution_tier);
- BUILD(r, WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SCONVERT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
if (base::IsValueInRangeForNumericType<int64_t>(i)) {
@@ -773,7 +771,7 @@ WASM_EXEC_TEST(I64SConvertF64) {
WASM_EXEC_TEST(I64SConvertSatF64) {
WasmRunner<int64_t, double> r(execution_tier);
- BUILD(r, WASM_I64_SCONVERT_SAT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SCONVERT_SAT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
if (base::IsValueInRangeForNumericType<int64_t>(i)) {
@@ -792,7 +790,7 @@ WASM_EXEC_TEST(I64SConvertSatF64) {
WASM_EXEC_TEST(I64UConvertF32) {
WasmRunner<uint64_t, float> r(execution_tier);
- BUILD(r, WASM_I64_UCONVERT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_UCONVERT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
@@ -806,7 +804,7 @@ WASM_EXEC_TEST(I64UConvertF32) {
WASM_EXEC_TEST(I64UConvertSatF32) {
WasmRunner<int64_t, float> r(execution_tier);
- BUILD(r, WASM_I64_UCONVERT_SAT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_UCONVERT_SAT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
uint64_t expected;
if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
@@ -826,7 +824,7 @@ WASM_EXEC_TEST(I64UConvertSatF32) {
WASM_EXEC_TEST(I64UConvertF64) {
WasmRunner<uint64_t, double> r(execution_tier);
- BUILD(r, WASM_I64_UCONVERT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_UCONVERT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
@@ -840,7 +838,7 @@ WASM_EXEC_TEST(I64UConvertF64) {
WASM_EXEC_TEST(I64UConvertSatF64) {
WasmRunner<int64_t, double> r(execution_tier);
- BUILD(r, WASM_I64_UCONVERT_SAT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_UCONVERT_SAT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
@@ -869,23 +867,21 @@ WASM_EXEC_TEST(CallI64Parameter) {
WasmRunner<int32_t> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& t = r.NewFunction(&sig);
- BUILD(t, WASM_LOCAL_GET(i));
+ t.Build({WASM_LOCAL_GET(i)});
// Build the calling function.
- BUILD(
- r,
- WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
- t.function_index(), WASM_I64V_9(0xBCD12340000000B),
- WASM_I64V_9(0xBCD12340000000C), WASM_I32V_1(0xD),
- WASM_I32_CONVERT_I64(WASM_I64V_9(0xBCD12340000000E)),
- WASM_I64V_9(0xBCD12340000000F), WASM_I64V_10(0xBCD1234000000010),
- WASM_I64V_10(0xBCD1234000000011), WASM_I64V_10(0xBCD1234000000012),
- WASM_I64V_10(0xBCD1234000000013), WASM_I64V_10(0xBCD1234000000014),
- WASM_I64V_10(0xBCD1234000000015), WASM_I64V_10(0xBCD1234000000016),
- WASM_I64V_10(0xBCD1234000000017), WASM_I64V_10(0xBCD1234000000018),
- WASM_I64V_10(0xBCD1234000000019), WASM_I64V_10(0xBCD123400000001A),
- WASM_I64V_10(0xBCD123400000001B), WASM_I64V_10(0xBCD123400000001C),
- WASM_I64V_10(0xBCD123400000001D))));
+ r.Build({WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
+ t.function_index(), WASM_I64V_9(0xBCD12340000000B),
+ WASM_I64V_9(0xBCD12340000000C), WASM_I32V_1(0xD),
+ WASM_I32_CONVERT_I64(WASM_I64V_9(0xBCD12340000000E)),
+ WASM_I64V_9(0xBCD12340000000F), WASM_I64V_10(0xBCD1234000000010),
+ WASM_I64V_10(0xBCD1234000000011), WASM_I64V_10(0xBCD1234000000012),
+ WASM_I64V_10(0xBCD1234000000013), WASM_I64V_10(0xBCD1234000000014),
+ WASM_I64V_10(0xBCD1234000000015), WASM_I64V_10(0xBCD1234000000016),
+ WASM_I64V_10(0xBCD1234000000017), WASM_I64V_10(0xBCD1234000000018),
+ WASM_I64V_10(0xBCD1234000000019), WASM_I64V_10(0xBCD123400000001A),
+ WASM_I64V_10(0xBCD123400000001B), WASM_I64V_10(0xBCD123400000001C),
+ WASM_I64V_10(0xBCD123400000001D)))});
CHECK_EQ(i + 0xB, r.Call());
}
@@ -900,11 +896,11 @@ WASM_EXEC_TEST(CallI64Return) {
WasmRunner<int64_t> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& t = r.NewFunction(&sig);
- BUILD(t, WASM_LOCAL_GET(0), WASM_I32V(7));
+ t.Build({WASM_LOCAL_GET(0), WASM_I32V(7)});
// Build the first calling function.
- BUILD(r, WASM_CALL_FUNCTION(t.function_index(), WASM_I64V(0xBCD12340000000B)),
- WASM_DROP);
+ r.Build({WASM_CALL_FUNCTION(t.function_index(), WASM_I64V(0xBCD12340000000B)),
+ WASM_DROP});
CHECK_EQ(0xBCD12340000000B, r.Call());
}
@@ -914,13 +910,13 @@ void TestI64Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
{
WasmRunner<int64_t> r(execution_tier);
// return K op K
- BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
+ r.Build({WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
// return a op b
- BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(expected, r.Call(a, b));
}
}
@@ -930,13 +926,13 @@ void TestI64Cmp(TestExecutionTier execution_tier, WasmOpcode opcode,
{
WasmRunner<int32_t> r(execution_tier);
// return K op K
- BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
+ r.Build({WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
// return a op b
- BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(expected, r.Call(a, b));
}
}
@@ -1044,7 +1040,7 @@ WASM_EXEC_TEST(I64Clz) {
{64, 0x0000000000000000}};
WasmRunner<int64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_CLZ(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_CLZ(WASM_LOCAL_GET(0))});
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
@@ -1089,7 +1085,7 @@ WASM_EXEC_TEST(I64Ctz) {
{0, 0x000000009AFDBC81}};
WasmRunner<int64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_CTZ(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_CTZ(WASM_LOCAL_GET(0))});
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
@@ -1106,7 +1102,7 @@ WASM_EXEC_TEST(I64Popcnt2) {
{38, 0xFFEDCBA09EDCBA09}};
WasmRunner<int64_t, uint64_t> r(execution_tier);
- BUILD(r, WASM_I64_POPCNT(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_POPCNT(WASM_LOCAL_GET(0))});
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
@@ -1117,25 +1113,25 @@ WASM_EXEC_TEST(I64Popcnt2) {
WASM_EXEC_TEST(I64WasmRunner) {
FOR_INT64_INPUTS(i) {
WasmRunner<int64_t> r(execution_tier);
- BUILD(r, WASM_I64V(i));
+ r.Build({WASM_I64V(i)});
CHECK_EQ(i, r.Call());
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_LOCAL_GET(0));
+ r.Build({WASM_LOCAL_GET(0)});
FOR_INT64_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_XOR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(i ^ j, r.Call(i, j)); }
}
}
{
WasmRunner<int64_t, int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_LOCAL_GET(0),
- WASM_I64_XOR(WASM_LOCAL_GET(1), WASM_LOCAL_GET(2))));
+ r.Build({WASM_I64_XOR(WASM_LOCAL_GET(0),
+ WASM_I64_XOR(WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(i ^ j ^ j, r.Call(i, j, j));
@@ -1146,10 +1142,10 @@ WASM_EXEC_TEST(I64WasmRunner) {
}
{
WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_LOCAL_GET(0),
- WASM_I64_XOR(WASM_LOCAL_GET(1),
- WASM_I64_XOR(WASM_LOCAL_GET(2),
- WASM_LOCAL_GET(3)))));
+ r.Build({WASM_I64_XOR(
+ WASM_LOCAL_GET(0),
+ WASM_I64_XOR(WASM_LOCAL_GET(1),
+ WASM_I64_XOR(WASM_LOCAL_GET(2), WASM_LOCAL_GET(3))))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(i ^ j ^ j ^ j, r.Call(i, j, j, j));
@@ -1166,11 +1162,11 @@ WASM_EXEC_TEST(Call_Int64Sub) {
// Build the target function.
TestSignatures sigs;
WasmFunctionCompiler& t = r.NewFunction(sigs.l_ll());
- BUILD(t, WASM_I64_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t.Build({WASM_I64_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
// Build the caller function.
- BUILD(r, WASM_CALL_FUNCTION(t.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1)));
+ r.Build({WASM_CALL_FUNCTION(t.function_index(), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1))});
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -1227,8 +1223,8 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
- BUILD(r, WASM_I64_REINTERPRET_F64(
- WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
+ r.Build({WASM_I64_REINTERPRET_F64(
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO))});
FOR_INT32_INPUTS(i) {
int64_t expected =
@@ -1240,8 +1236,8 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
WasmRunner<int64_t> r(execution_tier);
- BUILD(r, WASM_I64_REINTERPRET_F64(WASM_SEQ(kExprF64Const, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xF4, 0x7F)));
+ r.Build({WASM_I64_REINTERPRET_F64(WASM_SEQ(kExprF64Const, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xF4, 0x7F))});
// This is a signalling nan.
CHECK_EQ(0x7FF4000000000000, r.Call());
@@ -1252,10 +1248,9 @@ WASM_EXEC_TEST(F64ReinterpretI64) {
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
- BUILD(r,
- WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
- WASM_F64_REINTERPRET_I64(WASM_LOCAL_GET(0))),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
+ WASM_F64_REINTERPRET_I64(WASM_LOCAL_GET(0))),
+ WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) {
int64_t expected =
@@ -1271,7 +1266,7 @@ WASM_EXEC_TEST(LoadMemI64) {
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
r.builder().RandomizeMemory(1111);
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO));
+ r.Build({WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO)});
r.builder().WriteMemory<int64_t>(&memory[0], 0x1ABBCCDD00112233LL);
CHECK_EQ(0x1ABBCCDD00112233LL, r.Call());
@@ -1290,8 +1285,8 @@ WASM_EXEC_TEST(LoadMemI64_alignment) {
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
r.builder().RandomizeMemory(1111);
- BUILD(r,
- WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, alignment));
+ r.Build(
+ {WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, alignment)});
r.builder().WriteMemory<int64_t>(&memory[0], 0x1ABBCCDD00112233LL);
CHECK_EQ(0x1ABBCCDD00112233LL, r.Call());
@@ -1311,17 +1306,16 @@ WASM_EXEC_TEST(MemI64_Sum) {
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
const byte kSum = r.AllocateLocal(kWasmI64);
- BUILD(
- r,
- WASM_WHILE(
- WASM_LOCAL_GET(0),
- WASM_BLOCK(WASM_LOCAL_SET(
- kSum, WASM_I64_ADD(WASM_LOCAL_GET(kSum),
- WASM_LOAD_MEM(MachineType::Int64(),
- WASM_LOCAL_GET(0)))),
- WASM_LOCAL_SET(
- 0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(8))))),
- WASM_LOCAL_GET(1));
+ r.Build(
+ {WASM_WHILE(
+ WASM_LOCAL_GET(0),
+ WASM_BLOCK(WASM_LOCAL_SET(
+ kSum, WASM_I64_ADD(WASM_LOCAL_GET(kSum),
+ WASM_LOAD_MEM(MachineType::Int64(),
+ WASM_LOCAL_GET(0)))),
+ WASM_LOCAL_SET(
+ 0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(8))))),
+ WASM_LOCAL_GET(1)});
// Run 4 trials.
for (int i = 0; i < 3; i++) {
@@ -1342,10 +1336,9 @@ WASM_EXEC_TEST(StoreMemI64_alignment) {
WasmRunner<int64_t, int64_t> r(execution_tier);
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
- BUILD(r,
- WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, i,
- WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, i,
+ WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(0)});
r.builder().RandomizeMemory(1111);
r.builder().WriteMemory<int64_t>(&memory[0], 0);
@@ -1358,11 +1351,10 @@ WASM_EXEC_TEST(I64Global) {
WasmRunner<int32_t, int32_t> r(execution_tier);
int64_t* global = r.builder().AddGlobal<int64_t>();
// global = global + p0
- BUILD(r,
- WASM_GLOBAL_SET(0,
- WASM_I64_AND(WASM_GLOBAL_GET(0),
- WASM_I64_SCONVERT_I32(WASM_LOCAL_GET(0)))),
- WASM_ZERO);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_I64_AND(WASM_GLOBAL_GET(0),
+ WASM_I64_SCONVERT_I32(WASM_LOCAL_GET(0)))),
+ WASM_ZERO});
r.builder().WriteMemory<int64_t>(global, 0xFFFFFFFFFFFFFFFFLL);
for (int i = 9; i < 444444; i += 111111) {
@@ -1374,7 +1366,7 @@ WASM_EXEC_TEST(I64Global) {
WASM_EXEC_TEST(I64Eqz) {
WasmRunner<int32_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_EQZ(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_EQZ(WASM_LOCAL_GET(0))});
FOR_INT64_INPUTS(i) {
int32_t result = i == 0 ? 1 : 0;
@@ -1384,7 +1376,7 @@ WASM_EXEC_TEST(I64Eqz) {
WASM_EXEC_TEST(I64Ror) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_ROR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_ROR(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -1396,7 +1388,7 @@ WASM_EXEC_TEST(I64Ror) {
WASM_EXEC_TEST(I64Rol) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_ROL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_ROL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -1420,10 +1412,9 @@ WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
byte* memory = r.builder().AddMemoryElems<byte>(num_bytes);
r.builder().RandomizeMemory(1119 + static_cast<int>(m));
- BUILD(r,
- WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_LOCAL_GET(0),
- WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
- WASM_ZERO);
+ r.Build({WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_LOCAL_GET(0),
+ WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
+ WASM_ZERO});
byte memsize = machineTypes[m].MemSize();
uint32_t boundary = num_bytes - 8 - memsize;
@@ -1448,10 +1439,10 @@ WASM_EXEC_TEST(Store_i64_narrowed) {
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
constexpr uint64_t kPattern = 0x0123456789abcdef;
- BUILD(r, WASM_LOCAL_GET(0), // index
- WASM_LOCAL_GET(1), // value
- opcode, ZERO_ALIGNMENT, ZERO_OFFSET, // store
- WASM_ZERO); // return value
+ r.Build({WASM_LOCAL_GET(0), // index
+ WASM_LOCAL_GET(1), // value
+ opcode, ZERO_ALIGNMENT, ZERO_OFFSET, // store
+ WASM_ZERO}); // return value
for (int i = 0; i <= kBytes - stored_size_in_bytes; ++i) {
uint64_t pattern = base::bits::RotateLeft64(kPattern, i % 64);
@@ -1469,16 +1460,16 @@ WASM_EXEC_TEST(Store_i64_narrowed) {
WASM_EXEC_TEST(UnalignedInt64Load) {
WasmRunner<uint64_t> r(execution_tier);
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
- BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_ONE, 3));
+ r.Build({WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_ONE, 3)});
r.Call();
}
WASM_EXEC_TEST(UnalignedInt64Store) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(int64_t));
- BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ONE, 3,
+ r.Build({WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ONE, 3,
WASM_I64V_1(1)),
- WASM_I32V_1(12)));
+ WASM_I32V_1(12))});
r.Call();
}
@@ -1503,7 +1494,7 @@ static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
ADD_CODE(code, kExprI32Const, 0);
ADD_CODE(code, kExprCallIndirect, 1, TABLE_ZERO);
- t.Build(&code[0], &code[0] + code.size());
+ t.Build(base::VectorOf(code));
}
}
@@ -1541,7 +1532,7 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
b.AddParam(ValueType::For(memtypes[i]));
}
WasmFunctionCompiler& f = r.NewFunction(b.Build());
- BUILD(f, WASM_LOCAL_GET(which));
+ f.Build({WASM_LOCAL_GET(which)});
// =========================================================================
// Build the calling function.
@@ -1595,11 +1586,11 @@ WASM_EXEC_TEST(Regress5874) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
- BUILD(r, kExprI64Const, 0x00, // --
- kExprI32ConvertI64, // --
- kExprI64Const, 0x00, // --
- kExprI64StoreMem, 0x03, 0x00, // --
- kExprI32Const, 0x00); // --
+ r.Build({kExprI64Const, 0x00, // --
+ kExprI32ConvertI64, // --
+ kExprI64Const, 0x00, // --
+ kExprI64StoreMem, 0x03, 0x00, // --
+ kExprI32Const, 0x00}); // --
r.Call();
}
@@ -1607,7 +1598,7 @@ WASM_EXEC_TEST(Regress5874) {
WASM_EXEC_TEST(Regression_6858) {
// WasmRunner with 5 params and returns, which is the maximum.
WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I64_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
int64_t dividend = 15;
int64_t divisor = 0;
int64_t filler = 34;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 2e71a86a1d..286578a580 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -31,9 +31,9 @@ namespace wasm {
void RunWasm_##name(TestExecutionTier execution_tier)
ASMJS_EXEC_TEST(Int32AsmjsDivS) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_BINOP(kExprI32AsmjsDivS, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build(
+ {WASM_BINOP(kExprI32AsmjsDivS, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
CHECK_EQ(0, r.Call(100, 0));
@@ -43,9 +43,9 @@ ASMJS_EXEC_TEST(Int32AsmjsDivS) {
}
ASMJS_EXEC_TEST(Int32AsmjsRemS) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_BINOP(kExprI32AsmjsRemS, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build(
+ {WASM_BINOP(kExprI32AsmjsRemS, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(33, r.Call(133, 100));
CHECK_EQ(0, r.Call(kMin, -1));
@@ -55,9 +55,9 @@ ASMJS_EXEC_TEST(Int32AsmjsRemS) {
}
ASMJS_EXEC_TEST(Int32AsmjsDivU) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_BINOP(kExprI32AsmjsDivU, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build(
+ {WASM_BINOP(kExprI32AsmjsDivU, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
CHECK_EQ(0, r.Call(kMin, -1));
@@ -67,9 +67,9 @@ ASMJS_EXEC_TEST(Int32AsmjsDivU) {
}
ASMJS_EXEC_TEST(Int32AsmjsRemU) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_BINOP(kExprI32AsmjsRemU, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build(
+ {WASM_BINOP(kExprI32AsmjsRemU, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(17, r.Call(217, 100));
CHECK_EQ(0, r.Call(100, 0));
@@ -79,9 +79,8 @@ ASMJS_EXEC_TEST(Int32AsmjsRemU) {
}
ASMJS_EXEC_TEST(I32AsmjsSConvertF32) {
- WasmRunner<int32_t, float> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_LOCAL_GET(0)));
+ WasmRunner<int32_t, float> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build({WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
int32_t expected = DoubleToInt32(i);
@@ -90,9 +89,8 @@ ASMJS_EXEC_TEST(I32AsmjsSConvertF32) {
}
ASMJS_EXEC_TEST(I32AsmjsSConvertF64) {
- WasmRunner<int32_t, double> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_LOCAL_GET(0)));
+ WasmRunner<int32_t, double> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build({WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
int32_t expected = DoubleToInt32(i);
@@ -101,9 +99,8 @@ ASMJS_EXEC_TEST(I32AsmjsSConvertF64) {
}
ASMJS_EXEC_TEST(I32AsmjsUConvertF32) {
- WasmRunner<uint32_t, float> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_LOCAL_GET(0)));
+ WasmRunner<uint32_t, float> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build({WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
uint32_t expected = DoubleToUint32(i);
@@ -112,9 +109,8 @@ ASMJS_EXEC_TEST(I32AsmjsUConvertF32) {
}
ASMJS_EXEC_TEST(I32AsmjsUConvertF64) {
- WasmRunner<uint32_t, double> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_LOCAL_GET(0)));
+ WasmRunner<uint32_t, double> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build({WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
uint32_t expected = DoubleToUint32(i);
@@ -123,12 +119,11 @@ ASMJS_EXEC_TEST(I32AsmjsUConvertF64) {
}
ASMJS_EXEC_TEST(LoadMemI32_oob_asm) {
- WasmRunner<int32_t, uint32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
+ WasmRunner<int32_t, uint32_t> r(execution_tier, kAsmJsSloppyOrigin);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
r.builder().RandomizeMemory(1112);
- BUILD(r, WASM_UNOP(kExprI32AsmjsLoadMem, WASM_LOCAL_GET(0)));
+ r.Build({WASM_UNOP(kExprI32AsmjsLoadMem, WASM_LOCAL_GET(0))});
memory[0] = 999999;
CHECK_EQ(999999, r.Call(0u));
@@ -143,12 +138,11 @@ ASMJS_EXEC_TEST(LoadMemI32_oob_asm) {
}
ASMJS_EXEC_TEST(LoadMemF32_oob_asm) {
- WasmRunner<float, uint32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
+ WasmRunner<float, uint32_t> r(execution_tier, kAsmJsSloppyOrigin);
float* memory = r.builder().AddMemoryElems<float>(8);
r.builder().RandomizeMemory(1112);
- BUILD(r, WASM_UNOP(kExprF32AsmjsLoadMem, WASM_LOCAL_GET(0)));
+ r.Build({WASM_UNOP(kExprF32AsmjsLoadMem, WASM_LOCAL_GET(0))});
memory[0] = 9999.5f;
CHECK_EQ(9999.5f, r.Call(0u));
@@ -163,12 +157,11 @@ ASMJS_EXEC_TEST(LoadMemF32_oob_asm) {
}
ASMJS_EXEC_TEST(LoadMemF64_oob_asm) {
- WasmRunner<double, uint32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
+ WasmRunner<double, uint32_t> r(execution_tier, kAsmJsSloppyOrigin);
double* memory = r.builder().AddMemoryElems<double>(8);
r.builder().RandomizeMemory(1112);
- BUILD(r, WASM_UNOP(kExprF64AsmjsLoadMem, WASM_LOCAL_GET(0)));
+ r.Build({WASM_UNOP(kExprF64AsmjsLoadMem, WASM_LOCAL_GET(0))});
memory[0] = 9799.5;
CHECK_EQ(9799.5, r.Call(0u));
@@ -185,13 +178,12 @@ ASMJS_EXEC_TEST(LoadMemF64_oob_asm) {
}
ASMJS_EXEC_TEST(StoreMemI32_oob_asm) {
- WasmRunner<int32_t, uint32_t, uint32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
+ WasmRunner<int32_t, uint32_t, uint32_t> r(execution_tier, kAsmJsSloppyOrigin);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
r.builder().RandomizeMemory(1112);
- BUILD(r, WASM_BINOP(kExprI32AsmjsStoreMem, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1)));
+ r.Build({WASM_BINOP(kExprI32AsmjsStoreMem, WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1))});
memory[0] = 7777;
CHECK_EQ(999999, r.Call(0u, 999999));
@@ -208,9 +200,8 @@ ASMJS_EXEC_TEST(StoreMemI32_oob_asm) {
ASMJS_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_I32_ASMJS_DIVS(WASM_LOCAL_GET(0), WASM_I32V_1(denom)));
+ WasmRunner<int32_t, int32_t> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build({WASM_I32_ASMJS_DIVS(WASM_LOCAL_GET(0), WASM_I32V_1(denom))});
FOR_INT32_INPUTS(i) {
if (denom == 0) {
CHECK_EQ(0, r.Call(i));
@@ -225,9 +216,8 @@ ASMJS_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
ASMJS_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_I32_ASMJS_REMS(WASM_LOCAL_GET(0), WASM_I32V_1(denom)));
+ WasmRunner<int32_t, int32_t> r(execution_tier, kAsmJsSloppyOrigin);
+ r.Build({WASM_I32_ASMJS_REMS(WASM_LOCAL_GET(0), WASM_I32V_1(denom))});
FOR_INT32_INPUTS(i) {
if (denom == 0) {
CHECK_EQ(0, r.Call(i));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index a83738eb87..205bf9badc 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -17,8 +17,8 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t initial = i;
@@ -45,8 +45,8 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord16));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t initial = i;
@@ -72,8 +72,8 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord8));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t initial = i;
@@ -98,9 +98,9 @@ WASM_EXEC_TEST(I32AtomicCompareExchange) {
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(
- kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t initial = i;
@@ -118,10 +118,9 @@ WASM_EXEC_TEST(I32AtomicCompareExchange16U) {
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange16U,
- WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1),
- MachineRepresentation::kWord16));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange16U, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t initial = i;
@@ -138,10 +137,9 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r,
- WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange8U, WASM_I32V_1(0),
- WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- MachineRepresentation::kWord8));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange8U, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t initial = i;
@@ -159,9 +157,9 @@ WASM_EXEC_TEST(I32AtomicCompareExchange_fail) {
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(
- kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord32)});
// The original value at the memory location.
uint32_t old_val = 4;
@@ -180,8 +178,8 @@ WASM_EXEC_TEST(I32AtomicLoad) {
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t expected = i;
@@ -195,8 +193,8 @@ WASM_EXEC_TEST(I32AtomicLoad16U) {
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
- MachineRepresentation::kWord16));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
+ MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t expected = i;
@@ -209,8 +207,8 @@ WASM_EXEC_TEST(I32AtomicLoad8U) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
- MachineRepresentation::kWord8));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
+ MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t expected = i;
@@ -225,11 +223,11 @@ WASM_EXEC_TEST(I32AtomicStoreLoad) {
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
- MachineRepresentation::kWord32),
- WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
- MachineRepresentation::kWord32));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord32),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t expected = i;
@@ -244,12 +242,11 @@ WASM_EXEC_TEST(I32AtomicStoreLoad16U) {
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(
- r,
- WASM_ATOMICS_STORE_OP(kExprI32AtomicStore16U, WASM_ZERO,
- WASM_LOCAL_GET(0), MachineRepresentation::kWord16),
- WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
- MachineRepresentation::kWord16));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI32AtomicStore16U, WASM_ZERO,
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord16),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad16U, WASM_ZERO,
+ MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t expected = i;
@@ -263,11 +260,11 @@ WASM_EXEC_TEST(I32AtomicStoreLoad8U) {
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI32AtomicStore8U, WASM_ZERO,
- WASM_LOCAL_GET(0), MachineRepresentation::kWord8),
- WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
- MachineRepresentation::kWord8));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI32AtomicStore8U, WASM_ZERO,
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord8),
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
+ MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t expected = i;
@@ -282,11 +279,11 @@ WASM_EXEC_TEST(I32AtomicStoreParameter) {
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
r.builder().SetHasSharedMemory();
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
- MachineRepresentation::kWord8),
- WASM_ATOMICS_BINOP(kExprI32AtomicAdd, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord32));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord8),
+ WASM_ATOMICS_BINOP(kExprI32AtomicAdd, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord32)});
CHECK_EQ(10, r.Call(10));
CHECK_EQ(20, r.builder().ReadMemory(&memory[0]));
}
@@ -298,7 +295,7 @@ WASM_EXEC_TEST(AtomicFence) {
// modules which declare no memory, or a non-shared memory, without causing a
// validation error.
- BUILD(r, WASM_ATOMICS_FENCE, WASM_ZERO);
+ r.Build({WASM_ATOMICS_FENCE, WASM_ZERO});
CHECK_EQ(0, r.Call());
}
@@ -308,10 +305,11 @@ WASM_EXEC_TEST(AtomicStoreNoConsideredEffectful) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
- WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_I32V_1(20),
- MachineRepresentation::kWord32),
- kExprI64Eqz);
+ r.Build(
+ {WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_I32V_1(20),
+ MachineRepresentation::kWord32),
+ kExprI64Eqz});
CHECK_EQ(1, r.Call());
}
@@ -321,10 +319,10 @@ void RunNoEffectTest(TestExecutionTier execution_tier, WasmOpcode wasm_op) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
- WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I32V_1(20),
- MachineRepresentation::kWord32),
- WASM_DROP, kExprI64Eqz);
+ r.Build({WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I32V_1(20),
+ MachineRepresentation::kWord32),
+ WASM_DROP, kExprI64Eqz});
CHECK_EQ(1, r.Call());
}
@@ -342,11 +340,11 @@ WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO),
- WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange, WASM_ZERO,
- WASM_ZERO, WASM_I32V_1(30),
- MachineRepresentation::kWord32),
- WASM_DROP, kExprI32Eqz);
+ r.Build({WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO),
+ WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange, WASM_ZERO,
+ WASM_ZERO, WASM_I32V_1(30),
+ MachineRepresentation::kWord32),
+ WASM_DROP, kExprI32Eqz});
CHECK_EQ(1, r.Call());
}
@@ -354,8 +352,8 @@ WASM_EXEC_TEST(I32AtomicLoad_trap) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_I32V_3(kWasmPageSize),
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_I32V_3(kWasmPageSize),
+ MachineRepresentation::kWord32)});
CHECK_TRAP(r.Call());
}
@@ -363,8 +361,8 @@ WASM_EXEC_TEST(I64AtomicLoad_trap) {
WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_I32V_3(kWasmPageSize),
- MachineRepresentation::kWord64));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_I32V_3(kWasmPageSize),
+ MachineRepresentation::kWord64)});
CHECK_TRAP64(r.Call());
}
@@ -372,10 +370,10 @@ WASM_EXEC_TEST(I32AtomicStore_trap) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_I32V_3(kWasmPageSize),
- WASM_ZERO, MachineRepresentation::kWord32),
- WASM_ZERO);
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_I32V_3(kWasmPageSize),
+ WASM_ZERO, MachineRepresentation::kWord32),
+ WASM_ZERO});
CHECK_TRAP(r.Call());
}
@@ -383,10 +381,10 @@ WASM_EXEC_TEST(I64AtomicStore_trap) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_I32V_3(kWasmPageSize),
- WASM_ZERO64, MachineRepresentation::kWord64),
- WASM_ZERO);
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_I32V_3(kWasmPageSize),
+ WASM_ZERO64, MachineRepresentation::kWord64),
+ WASM_ZERO});
CHECK_TRAP(r.Call());
}
@@ -394,10 +392,10 @@ WASM_EXEC_TEST(I32AtomicLoad_NotOptOut) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_I32_AND(
- WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_I32V_3(kWasmPageSize),
- MachineRepresentation::kWord32),
- WASM_ZERO));
+ r.Build({WASM_I32_AND(
+ WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad, WASM_I32V_3(kWasmPageSize),
+ MachineRepresentation::kWord32),
+ WASM_ZERO)});
CHECK_TRAP(r.Call());
}
@@ -406,8 +404,8 @@ void RunU32BinOp_OOB(TestExecutionTier execution_tier, WasmOpcode wasm_op) {
r.builder().AddMemory(kWasmPageSize);
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_3(kWasmPageSize),
- WASM_ZERO, MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_3(kWasmPageSize), WASM_ZERO,
+ MachineRepresentation::kWord32)});
CHECK_TRAP(r.Call());
}
@@ -424,8 +422,8 @@ void RunU64BinOp_OOB(TestExecutionTier execution_tier, WasmOpcode wasm_op) {
r.builder().AddMemory(kWasmPageSize);
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_3(kWasmPageSize),
- WASM_ZERO64, MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_3(kWasmPageSize), WASM_ZERO64,
+ MachineRepresentation::kWord32)});
CHECK_TRAP64(r.Call());
}
@@ -442,10 +440,9 @@ WASM_EXEC_TEST(I32AtomicCompareExchange_trap) {
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(
- kExprI32AtomicCompareExchange, WASM_I32V_3(kWasmPageSize),
- WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange, WASM_I32V_3(kWasmPageSize),
+ WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t initial = i;
@@ -460,10 +457,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchange_trap) {
WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange, WASM_I32V_3(kWasmPageSize),
- WASM_ZERO64, WASM_ZERO64,
- MachineRepresentation::kWord64));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_3(kWasmPageSize), WASM_ZERO64,
+ WASM_ZERO64, MachineRepresentation::kWord64)});
CHECK_TRAP64(r.Call());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 7b1d58e9d3..8c1fe8d063 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -17,8 +17,8 @@ void RunU64BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord64));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord64)});
FOR_UINT64_INPUTS(i) {
uint64_t initial = i;
@@ -45,8 +45,8 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t initial = i;
@@ -73,8 +73,8 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord16));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t initial = i;
@@ -100,8 +100,8 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord8));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t initial = i;
@@ -126,9 +126,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchange) {
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord64));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord64)});
FOR_UINT64_INPUTS(i) {
uint64_t initial = i;
@@ -146,10 +146,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32U) {
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange32U,
- WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1),
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange32U, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t initial = i;
@@ -167,10 +166,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchange16U) {
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange16U,
- WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1),
- MachineRepresentation::kWord16));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange16U, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t initial = i;
@@ -187,10 +185,9 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r,
- WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange8U, WASM_I32V_1(0),
- WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- MachineRepresentation::kWord8));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange8U, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t initial = i;
FOR_UINT8_INPUTS(j) {
@@ -207,8 +204,8 @@ WASM_EXEC_TEST(I64AtomicLoad) {
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
- MachineRepresentation::kWord64));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord64)});
FOR_UINT64_INPUTS(i) {
uint64_t expected = i;
@@ -222,8 +219,8 @@ WASM_EXEC_TEST(I64AtomicLoad32U) {
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad32U, WASM_ZERO,
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad32U, WASM_ZERO,
+ MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t expected = i;
@@ -237,8 +234,8 @@ WASM_EXEC_TEST(I64AtomicLoad16U) {
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad16U, WASM_ZERO,
- MachineRepresentation::kWord16));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad16U, WASM_ZERO,
+ MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t expected = i;
@@ -251,8 +248,8 @@ WASM_EXEC_TEST(I64AtomicLoad8U) {
WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
- MachineRepresentation::kWord8));
+ r.Build({WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
+ MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t expected = i;
@@ -267,11 +264,11 @@ WASM_EXEC_TEST(I64AtomicStoreLoad) {
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
- MachineRepresentation::kWord64),
- WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
- MachineRepresentation::kWord64));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord64),
+ WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord64)});
FOR_UINT64_INPUTS(i) {
uint64_t expected = i;
@@ -286,12 +283,11 @@ WASM_EXEC_TEST(I64AtomicStoreLoad32U) {
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(
- r,
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore32U, WASM_ZERO,
- WASM_LOCAL_GET(0), MachineRepresentation::kWord32),
- WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad32U, WASM_ZERO,
- MachineRepresentation::kWord32));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI64AtomicStore32U, WASM_ZERO,
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord32),
+ WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad32U, WASM_ZERO,
+ MachineRepresentation::kWord32)});
FOR_UINT32_INPUTS(i) {
uint32_t expected = i;
@@ -306,12 +302,11 @@ WASM_EXEC_TEST(I64AtomicStoreLoad16U) {
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
- BUILD(
- r,
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore16U, WASM_ZERO,
- WASM_LOCAL_GET(0), MachineRepresentation::kWord16),
- WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad16U, WASM_ZERO,
- MachineRepresentation::kWord16));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI64AtomicStore16U, WASM_ZERO,
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord16),
+ WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad16U, WASM_ZERO,
+ MachineRepresentation::kWord16)});
FOR_UINT16_INPUTS(i) {
uint16_t expected = i;
@@ -325,11 +320,11 @@ WASM_EXEC_TEST(I64AtomicStoreLoad8U) {
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore8U, WASM_ZERO,
- WASM_LOCAL_GET(0), MachineRepresentation::kWord8),
- WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
- MachineRepresentation::kWord8));
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI64AtomicStore8U, WASM_ZERO,
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord8),
+ WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
+ MachineRepresentation::kWord8)});
FOR_UINT8_INPUTS(i) {
uint8_t expected = i;
@@ -347,10 +342,9 @@ void RunDropTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r,
- WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord64),
- WASM_DROP, WASM_LOCAL_GET(0));
+ r.Build({WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord64),
+ WASM_DROP, WASM_LOCAL_GET(0)});
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
r.builder().WriteMemory(&memory[0], initial);
@@ -372,10 +366,10 @@ WASM_EXEC_TEST(I64AtomicSub16UDrop) {
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
r.builder().SetHasSharedMemory();
- BUILD(r,
- WASM_ATOMICS_BINOP(kExprI64AtomicSub16U, WASM_I32V_1(0),
- WASM_LOCAL_GET(0), MachineRepresentation::kWord16),
- WASM_DROP, WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_ATOMICS_BINOP(kExprI64AtomicSub16U, WASM_I32V_1(0),
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord16),
+ WASM_DROP, WASM_LOCAL_GET(0)});
uint16_t initial = 0x7, local = 0xffe0;
r.builder().WriteMemory(&memory[0], initial);
@@ -389,11 +383,10 @@ WASM_EXEC_TEST(I64AtomicCompareExchangeDrop) {
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r,
- WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_I32V_1(0),
- WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- MachineRepresentation::kWord64),
- WASM_DROP, WASM_LOCAL_GET(1));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord64),
+ WASM_DROP, WASM_LOCAL_GET(1)});
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
r.builder().WriteMemory(&memory[0], initial);
@@ -408,12 +401,12 @@ WASM_EXEC_TEST(I64AtomicStoreLoadDrop) {
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r,
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
- MachineRepresentation::kWord64),
- WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
+ r.Build(
+ {WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_LOCAL_GET(0),
MachineRepresentation::kWord64),
- WASM_DROP, WASM_LOCAL_GET(1));
+ WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
+ MachineRepresentation::kWord64),
+ WASM_DROP, WASM_LOCAL_GET(1)});
uint64_t store_value = 0x1111111111111111, expected = 0xC0DE;
CHECK_EQ(expected, r.Call(store_value, expected));
@@ -426,10 +419,10 @@ WASM_EXEC_TEST(I64AtomicAddConvertDrop) {
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r,
- WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- MachineRepresentation::kWord64),
- kExprI32ConvertI64, WASM_DROP, WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ MachineRepresentation::kWord64),
+ kExprI32ConvertI64, WASM_DROP, WASM_LOCAL_GET(0)});
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
r.builder().WriteMemory(&memory[0], initial);
@@ -444,8 +437,8 @@ WASM_EXEC_TEST(I64AtomicLoadConvertDrop) {
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
- kExprI64AtomicLoad, WASM_ZERO, MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
+ kExprI64AtomicLoad, WASM_ZERO, MachineRepresentation::kWord64))});
uint64_t initial = 0x1111222233334444;
r.builder().WriteMemory(&memory[0], initial);
@@ -461,9 +454,8 @@ void RunConvertTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_LOCAL_GET(0),
- MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
+ wasm_op, WASM_ZERO, WASM_LOCAL_GET(0), MachineRepresentation::kWord64))});
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
r.builder().WriteMemory(&memory[0], initial);
@@ -485,9 +477,9 @@ WASM_EXEC_TEST(I64AtomicConvertCompareExchange) {
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord64))});
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
r.builder().WriteMemory(&memory[0], initial);
@@ -505,9 +497,9 @@ void RunNonConstIndexTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
- wasm_op, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
- WASM_LOCAL_GET(0), MachineRepresentation::kWord32)));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_ATOMICS_BINOP(wasm_op, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
+ WASM_LOCAL_GET(0), MachineRepresentation::kWord32))});
uint64_t initial = 0x1111222233334444, local = 0x5555666677778888;
r.builder().WriteMemory(&memory[0], initial);
@@ -538,10 +530,9 @@ WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) {
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange16U,
- WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord16)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange16U, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
+ WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), MachineRepresentation::kWord16))});
uint64_t initial = 0x4444333322221111, local = 0x9999888877776666;
r.builder().WriteMemory(&memory[0], initial);
@@ -556,10 +547,9 @@ WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchange) {
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange,
- WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord16)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
+ WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), MachineRepresentation::kWord16))});
uint64_t initial = 4444333322221111, local = 0x9999888877776666;
r.builder().WriteMemory(&memory[0], initial);
@@ -573,9 +563,9 @@ WASM_EXEC_TEST(I64AtomicNonConstIndexLoad8U) {
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
- kExprI64AtomicLoad8U, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
- MachineRepresentation::kWord8)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
+ kExprI64AtomicLoad8U, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
+ MachineRepresentation::kWord8))});
uint64_t expected = 0xffffeeeeddddcccc;
r.builder().WriteMemory(&memory[0], expected);
@@ -587,9 +577,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchangeFail) {
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), MachineRepresentation::kWord64));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord64)});
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111,
test = 0x2222222222222222;
@@ -604,10 +594,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32UFail) {
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
- BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange32U,
- WASM_I32V_1(0), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1),
- MachineRepresentation::kWord32));
+ r.Build({WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange32U, WASM_I32V_1(0), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), MachineRepresentation::kWord32)});
uint64_t initial = 0x1111222233334444, test = 0xffffffff, local = 0xeeeeeeee;
r.builder().WriteMemory(&memory[0], initial);
@@ -622,10 +611,10 @@ WASM_EXEC_TEST(AtomicStoreNoConsideredEffectful) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
- WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_I64V(20),
- MachineRepresentation::kWord64),
- kExprI64Eqz);
+ r.Build({WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_I64V(20),
+ MachineRepresentation::kWord64),
+ kExprI64Eqz});
CHECK_EQ(1, r.Call());
}
@@ -635,10 +624,10 @@ void RunNoEffectTest(TestExecutionTier execution_tier, WasmOpcode wasm_op) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
- WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I64V(20),
- MachineRepresentation::kWord64),
- WASM_DROP, kExprI64Eqz);
+ r.Build({WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I64V(20),
+ MachineRepresentation::kWord64),
+ WASM_DROP, kExprI64Eqz});
CHECK_EQ(1, r.Call());
}
@@ -656,11 +645,11 @@ WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
- WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_ZERO,
- WASM_I64V(0), WASM_I64V(30),
- MachineRepresentation::kWord64),
- WASM_DROP, kExprI64Eqz);
+ r.Build({WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_ZERO,
+ WASM_I64V(0), WASM_I64V(30),
+ MachineRepresentation::kWord64),
+ WASM_DROP, kExprI64Eqz});
CHECK_EQ(1, r.Call());
}
@@ -672,9 +661,8 @@ WASM_EXEC_TEST(I64AtomicLoadUseOnlyLowWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the low word of an I64AtomicLoad.
- BUILD(r,
- WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
- kExprI64AtomicLoad, WASM_I32V(8), MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
+ kExprI64AtomicLoad, WASM_I32V(8), MachineRepresentation::kWord64))});
CHECK_EQ(0x90abcdef, r.Call());
}
@@ -686,10 +674,10 @@ WASM_EXEC_TEST(I64AtomicLoadUseOnlyHighWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the high word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_ROR(
- WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_I32V(8),
- MachineRepresentation::kWord64),
- WASM_I64V(32))));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_I64_ROR(WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_I32V(8),
+ MachineRepresentation::kWord64),
+ WASM_I64V(32)))});
CHECK_EQ(0x12345678, r.Call());
}
@@ -701,9 +689,9 @@ WASM_EXEC_TEST(I64AtomicAddUseOnlyLowWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the low word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(
- WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V(8), WASM_I64V(1),
- MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V(8), WASM_I64V(1),
+ MachineRepresentation::kWord64))});
CHECK_EQ(0x90abcdef, r.Call());
}
@@ -715,10 +703,10 @@ WASM_EXEC_TEST(I64AtomicAddUseOnlyHighWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the high word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_ROR(
- WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V(8), WASM_I64V(1),
- MachineRepresentation::kWord64),
- WASM_I64V(32))));
+ r.Build({WASM_I32_CONVERT_I64(WASM_I64_ROR(
+ WASM_ATOMICS_BINOP(kExprI64AtomicAdd, WASM_I32V(8), WASM_I64V(1),
+ MachineRepresentation::kWord64),
+ WASM_I64V(32)))});
CHECK_EQ(0x12345678, r.Call());
}
@@ -730,9 +718,9 @@ WASM_EXEC_TEST(I64AtomicCompareExchangeUseOnlyLowWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the low word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange, WASM_I32V(8), WASM_I64V(1),
- WASM_I64V(memory[1]), MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V(8), WASM_I64V(1),
+ WASM_I64V(memory[1]), MachineRepresentation::kWord64))});
CHECK_EQ(0x90abcdef, r.Call());
}
@@ -744,11 +732,11 @@ WASM_EXEC_TEST(I64AtomicCompareExchangeUseOnlyHighWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the high word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_ROR(
- WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange, WASM_I32V(8), WASM_I64V(1),
- WASM_I64V(memory[1]), MachineRepresentation::kWord64),
- WASM_I64V(32))));
+ r.Build({WASM_I32_CONVERT_I64(WASM_I64_ROR(
+ WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_I32V(8),
+ WASM_I64V(1), WASM_I64V(memory[1]),
+ MachineRepresentation::kWord64),
+ WASM_I64V(32)))});
CHECK_EQ(0x12345678, r.Call());
}
@@ -760,9 +748,9 @@ WASM_EXEC_TEST(I64AtomicExchangeUseOnlyLowWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the low word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
- kExprI64AtomicExchange, WASM_I32V(8), WASM_I64V(1),
- MachineRepresentation::kWord64)));
+ r.Build({WASM_I32_CONVERT_I64(
+ WASM_ATOMICS_BINOP(kExprI64AtomicExchange, WASM_I32V(8), WASM_I64V(1),
+ MachineRepresentation::kWord64))});
CHECK_EQ(0x90abcdef, r.Call());
}
@@ -774,10 +762,10 @@ WASM_EXEC_TEST(I64AtomicExchangeUseOnlyHighWord) {
r.builder().WriteMemory(&memory[1], initial);
r.builder().SetHasSharedMemory();
// Test that we can use just the high word of an I64AtomicLoad.
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_ROR(
- WASM_ATOMICS_BINOP(kExprI64AtomicExchange, WASM_I32V(8),
- WASM_I64V(1), MachineRepresentation::kWord64),
- WASM_I64V(32))));
+ r.Build({WASM_I32_CONVERT_I64(WASM_I64_ROR(
+ WASM_ATOMICS_BINOP(kExprI64AtomicExchange, WASM_I32V(8), WASM_I64V(1),
+ MachineRepresentation::kWord64),
+ WASM_I64V(32)))});
CHECK_EQ(0x12345678, r.Call());
}
@@ -789,10 +777,10 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32UZeroExtended) {
r.builder().SetHasSharedMemory();
// Test that the high word of the expected value is cleared in the return
// value.
- BUILD(r, WASM_I64_EQZ(WASM_ATOMICS_TERNARY_OP(
- kExprI64AtomicCompareExchange32U, WASM_I32V(8),
- WASM_I64V(0x1234567800000000), WASM_I64V(0),
- MachineRepresentation::kWord32)));
+ r.Build({WASM_I64_EQZ(
+ WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange32U, WASM_I32V(8),
+ WASM_I64V(0x1234567800000000), WASM_I64V(0),
+ MachineRepresentation::kWord32))});
CHECK_EQ(1, r.Call());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index f8523b114c..dd59b48ef5 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -51,10 +51,9 @@ WASM_EXEC_TEST(MemoryInit) {
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
r.builder().AddPassiveDataSegment(base::ArrayVector(data));
- BUILD(r,
- WASM_MEMORY_INIT(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_INIT(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
// All zeroes.
CheckMemoryEqualsZero(&r.builder(), 0, kWasmPageSize);
@@ -87,10 +86,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
r.builder().AddPassiveDataSegment(base::ArrayVector(data));
- BUILD(r,
- WASM_MEMORY_INIT(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_INIT(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
const uint32_t last_5_bytes = kWasmPageSize - 5;
@@ -108,10 +106,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
r.builder().AddMemory(kWasmPageSize);
const byte data[kWasmPageSize] = {};
r.builder().AddPassiveDataSegment(base::ArrayVector(data));
- BUILD(r,
- WASM_MEMORY_INIT(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_INIT(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
// OK, copy the full data segment to memory.
r.Call(0, 0, kWasmPageSize);
@@ -137,10 +134,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
WASM_EXEC_TEST(MemoryCopy) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
byte* mem = r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
const byte initial[] = {0, 11, 22, 33, 44, 55, 66, 77};
memcpy(mem, initial, sizeof(initial));
@@ -165,10 +161,9 @@ WASM_EXEC_TEST(MemoryCopy) {
WASM_EXEC_TEST(MemoryCopyOverlapping) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
byte* mem = r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
const byte initial[] = {10, 20, 30};
memcpy(mem, initial, sizeof(initial));
@@ -187,10 +182,9 @@ WASM_EXEC_TEST(MemoryCopyOverlapping) {
WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
byte* mem = r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
const byte data[] = {11, 22, 33, 44, 55, 66, 77, 88};
memcpy(mem, data, sizeof(data));
@@ -215,10 +209,9 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_COPY(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
// Copy full range is OK.
CHECK_EQ(0, r.Call(0, 0, kWasmPageSize));
@@ -244,10 +237,9 @@ WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
WASM_EXEC_TEST(MemoryFill) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
CHECK_EQ(0, r.Call(1, 33, 5));
CheckMemoryEqualsFollowedByZeroes(&r.builder(), {0, 33, 33, 33, 33, 33});
@@ -267,10 +259,9 @@ WASM_EXEC_TEST(MemoryFill) {
WASM_EXEC_TEST(MemoryFillValueWrapsToByte) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
CHECK_EQ(0, r.Call(0, 1000, 3));
const byte expected = 1000 & 255;
CheckMemoryEqualsFollowedByZeroes(&r.builder(),
@@ -280,10 +271,9 @@ WASM_EXEC_TEST(MemoryFillValueWrapsToByte) {
WASM_EXEC_TEST(MemoryFillOutOfBoundsData) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
const byte v = 123;
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize - 5, v, 999));
CheckMemoryEquals(&r.builder(), kWasmPageSize - 6, {0, 0, 0, 0, 0, 0});
@@ -292,10 +282,9 @@ WASM_EXEC_TEST(MemoryFillOutOfBoundsData) {
WASM_EXEC_TEST(MemoryFillOutOfBounds) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_MEMORY_FILL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
const byte v = 123;
@@ -316,7 +305,7 @@ WASM_EXEC_TEST(DataDropTwice) {
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0};
r.builder().AddPassiveDataSegment(base::ArrayVector(data));
- BUILD(r, WASM_DATA_DROP(0), kExprI32Const, 0);
+ r.Build({WASM_DATA_DROP(0), kExprI32Const, 0});
CHECK_EQ(0, r.Call());
CHECK_EQ(0, r.Call());
@@ -327,9 +316,9 @@ WASM_EXEC_TEST(DataDropThenMemoryInit) {
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
r.builder().AddPassiveDataSegment(base::ArrayVector(data));
- BUILD(r, WASM_DATA_DROP(0),
- WASM_MEMORY_INIT(0, WASM_I32V_1(0), WASM_I32V_1(1), WASM_I32V_1(2)),
- kExprI32Const, 0);
+ r.Build({WASM_DATA_DROP(0),
+ WASM_MEMORY_INIT(0, WASM_I32V_1(0), WASM_I32V_1(1), WASM_I32V_1(2)),
+ kExprI32Const, 0});
CHECK_EQ(0xDEADBEEF, r.Call());
}
@@ -342,10 +331,9 @@ void TestTableCopyInbounds(TestExecutionTier execution_tier, int table_dst,
for (int i = 0; i < 10; ++i) {
r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
}
- BUILD(r,
- WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
for (uint32_t i = 0; i <= kTableSize; ++i) {
r.CheckCallViaJS(0, 0, 0, i); // nop
@@ -394,151 +382,6 @@ void CheckTableCall(Isolate* isolate, Handle<WasmTableObject> table,
}
} // namespace
-void TestTableInitElems(TestExecutionTier execution_tier, int table_index) {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- TestSignatures sigs;
- WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
- const uint32_t kTableSize = 5;
- std::vector<uint32_t> function_indexes;
- const uint32_t sig_index = r.builder().AddSignature(sigs.i_v());
-
- for (uint32_t i = 0; i < kTableSize; ++i) {
- WasmFunctionCompiler& fn = r.NewFunction(sigs.i_v(), "f");
- BUILD(fn, WASM_I32V_1(i));
- fn.SetSigIndex(sig_index);
- function_indexes.push_back(fn.function_index());
- }
-
- // Add 10 function tables, even though we only test one table.
- for (int i = 0; i < 10; ++i) {
- r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
- }
- // Passive element segment has [f0, f1, f2, f3, f4].
- r.builder().AddPassiveElementSegment(function_indexes);
-
- WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
- BUILD(call,
- WASM_CALL_INDIRECT_TABLE(table_index, sig_index, WASM_LOCAL_GET(0)));
- const uint32_t call_index = call.function_index();
-
- BUILD(r,
- WASM_TABLE_INIT(table_index, 0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
-
- auto table =
- handle(WasmTableObject::cast(
- r.builder().instance_object()->tables().get(table_index)),
- isolate);
- const double null = 0xDEADBEEF;
-
- CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
-
- // 0 count is ok in bounds, and at end of regions.
- r.CheckCallViaJS(0, 0, 0, 0);
- r.CheckCallViaJS(0, kTableSize, 0, 0);
- r.CheckCallViaJS(0, 0, kTableSize, 0);
-
- // Test actual writes.
- r.CheckCallViaJS(0, 0, 0, 1);
- CheckTableCall(isolate, table, &r, call_index, 0.0, null, null, null, null);
- r.CheckCallViaJS(0, 0, 0, 2);
- CheckTableCall(isolate, table, &r, call_index, 0.0, 1.0, null, null, null);
- r.CheckCallViaJS(0, 0, 0, 3);
- CheckTableCall(isolate, table, &r, call_index, 0.0, 1.0, 2.0, null, null);
- r.CheckCallViaJS(0, 3, 0, 2);
- CheckTableCall(isolate, table, &r, call_index, 0.0, 1.0, 2.0, 0.0, 1.0);
- r.CheckCallViaJS(0, 3, 1, 2);
- CheckTableCall(isolate, table, &r, call_index, 0.0, 1.0, 2.0, 1.0, 2.0);
- r.CheckCallViaJS(0, 3, 2, 2);
- CheckTableCall(isolate, table, &r, call_index, 0.0, 1.0, 2.0, 2.0, 3.0);
- r.CheckCallViaJS(0, 3, 3, 2);
- CheckTableCall(isolate, table, &r, call_index, 0.0, 1.0, 2.0, 3.0, 4.0);
-}
-
-WASM_COMPILED_EXEC_TEST(TableInitElems0) {
- TestTableInitElems(execution_tier, 0);
-}
-WASM_COMPILED_EXEC_TEST(TableInitElems7) {
- TestTableInitElems(execution_tier, 7);
-}
-WASM_COMPILED_EXEC_TEST(TableInitElems9) {
- TestTableInitElems(execution_tier, 9);
-}
-
-void TestTableInitOob(TestExecutionTier execution_tier, int table_index) {
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- TestSignatures sigs;
- WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
- const uint32_t kTableSize = 5;
- std::vector<uint32_t> function_indexes;
- const uint32_t sig_index = r.builder().AddSignature(sigs.i_v());
-
- for (uint32_t i = 0; i < kTableSize; ++i) {
- WasmFunctionCompiler& fn = r.NewFunction(sigs.i_v(), "f");
- BUILD(fn, WASM_I32V_1(i));
- fn.SetSigIndex(sig_index);
- function_indexes.push_back(fn.function_index());
- }
-
- for (int i = 0; i < 10; ++i) {
- r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
- }
- r.builder().AddPassiveElementSegment(function_indexes);
-
- WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
- BUILD(call,
- WASM_CALL_INDIRECT_TABLE(table_index, sig_index, WASM_LOCAL_GET(0)));
- const uint32_t call_index = call.function_index();
-
- BUILD(r,
- WASM_TABLE_INIT(table_index, 0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
-
- auto table =
- handle(WasmTableObject::cast(
- r.builder().instance_object()->tables().get(table_index)),
- isolate);
- const double null = 0xDEADBEEF;
-
- CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
-
- // Out-of-bounds table.init should not have any effect.
- r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
- CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
-
- r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
- CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
-
- // 0-count is still oob if target is invalid.
- r.CheckCallViaJS(0xDEADBEEF, kTableSize + 1, 0, 0);
- r.CheckCallViaJS(0xDEADBEEF, 0, kTableSize + 1, 0);
-
- r.CheckCallViaJS(0xDEADBEEF, 0, 0, 6);
- r.CheckCallViaJS(0xDEADBEEF, 0, 1, 5);
- r.CheckCallViaJS(0xDEADBEEF, 0, 2, 4);
- r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
- r.CheckCallViaJS(0xDEADBEEF, 0, 4, 2);
- r.CheckCallViaJS(0xDEADBEEF, 0, 5, 1);
-
- r.CheckCallViaJS(0xDEADBEEF, 0, 0, 6);
- r.CheckCallViaJS(0xDEADBEEF, 1, 0, 5);
- r.CheckCallViaJS(0xDEADBEEF, 2, 0, 4);
- r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
- r.CheckCallViaJS(0xDEADBEEF, 4, 0, 2);
- r.CheckCallViaJS(0xDEADBEEF, 5, 0, 1);
-
- r.CheckCallViaJS(0xDEADBEEF, 10, 0, 1);
- r.CheckCallViaJS(0xDEADBEEF, 0, 10, 1);
-}
-
-WASM_COMPILED_EXEC_TEST(TableInitOob0) { TestTableInitOob(execution_tier, 0); }
-WASM_COMPILED_EXEC_TEST(TableInitOob7) { TestTableInitOob(execution_tier, 7); }
-WASM_COMPILED_EXEC_TEST(TableInitOob9) { TestTableInitOob(execution_tier, 9); }
-
void TestTableCopyElems(TestExecutionTier execution_tier, int table_dst,
int table_src) {
Isolate* isolate = CcTest::InitIsolateOnce();
@@ -551,7 +394,7 @@ void TestTableCopyElems(TestExecutionTier execution_tier, int table_dst,
for (uint32_t i = 0; i < kTableSize; ++i) {
WasmFunctionCompiler& fn = r.NewFunction(sigs.i_v(), "f");
- BUILD(fn, WASM_I32V_1(i));
+ fn.Build({WASM_I32V_1(i)});
fn.SetSigIndex(sig_index);
function_indexes[i] = fn.function_index();
}
@@ -560,10 +403,9 @@ void TestTableCopyElems(TestExecutionTier execution_tier, int table_dst,
r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
}
- BUILD(r,
- WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
r.builder().InitializeWrapperCache();
@@ -629,7 +471,7 @@ void TestTableCopyCalls(TestExecutionTier execution_tier, int table_dst,
for (uint32_t i = 0; i < kTableSize; ++i) {
WasmFunctionCompiler& fn = r.NewFunction(sigs.i_v(), "f");
- BUILD(fn, WASM_I32V_1(i));
+ fn.Build({WASM_I32V_1(i)});
fn.SetSigIndex(sig_index);
function_indexes[i] = fn.function_index();
}
@@ -639,14 +481,13 @@ void TestTableCopyCalls(TestExecutionTier execution_tier, int table_dst,
}
WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
- BUILD(call,
- WASM_CALL_INDIRECT_TABLE(table_dst, sig_index, WASM_LOCAL_GET(0)));
+ call.Build(
+ {WASM_CALL_INDIRECT_TABLE(table_dst, sig_index, WASM_LOCAL_GET(0))});
const uint32_t call_index = call.function_index();
- BUILD(r,
- WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
auto table =
handle(WasmTableObject::cast(
@@ -700,7 +541,7 @@ void TestTableCopyOobWrites(TestExecutionTier execution_tier, int table_dst,
for (uint32_t i = 0; i < kTableSize; ++i) {
WasmFunctionCompiler& fn = r.NewFunction(sigs.i_v(), "f");
- BUILD(fn, WASM_I32V_1(i));
+ fn.Build({WASM_I32V_1(i)});
fn.SetSigIndex(sig_index);
function_indexes[i] = fn.function_index();
}
@@ -709,10 +550,9 @@ void TestTableCopyOobWrites(TestExecutionTier execution_tier, int table_dst,
r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
}
- BUILD(r,
- WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
r.builder().InitializeWrapperCache();
@@ -769,10 +609,9 @@ void TestTableCopyOob1(TestExecutionTier execution_tier, int table_dst,
r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
}
- BUILD(r,
- WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
- kExprI32Const, 0);
+ r.Build({WASM_TABLE_COPY(table_dst, table_src, WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)),
+ kExprI32Const, 0});
r.CheckCallViaJS(0, 0, 0, 1); // nop
r.CheckCallViaJS(0, 0, 0, kTableSize); // nop
@@ -815,29 +654,6 @@ WASM_COMPILED_EXEC_TEST(TableCopyOob1From6To6) {
TestTableCopyOob1(execution_tier, 6, 6);
}
-WASM_COMPILED_EXEC_TEST(ElemDropTwice) {
- WasmRunner<uint32_t> r(execution_tier);
- r.builder().AddIndirectFunctionTable(nullptr, 1);
- r.builder().AddPassiveElementSegment({});
- BUILD(r, WASM_ELEM_DROP(0), kExprI32Const, 0);
-
- r.CheckCallViaJS(0);
- r.CheckCallViaJS(0);
-}
-
-WASM_COMPILED_EXEC_TEST(ElemDropThenTableInit) {
- WasmRunner<uint32_t, uint32_t> r(execution_tier);
- r.builder().AddIndirectFunctionTable(nullptr, 1);
- r.builder().AddPassiveElementSegment({});
- BUILD(
- r, WASM_ELEM_DROP(0),
- WASM_TABLE_INIT(0, 0, WASM_I32V_1(0), WASM_I32V_1(0), WASM_LOCAL_GET(0)),
- kExprI32Const, 0);
-
- r.CheckCallViaJS(0, 0);
- r.CheckCallViaJS(0xDEADBEEF, 1);
-}
-
} // namespace test_run_wasm_bulk_memory
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index a100bc3d49..d7be4a1480 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -21,11 +21,11 @@ WASM_EXEC_TEST(TryCatchThrow) {
constexpr uint32_t kResult1 = 42;
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_T(kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_THROW(except))),
- WASM_STMTS(WASM_I32V(kResult0)), except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except))),
+ WASM_STMTS(WASM_I32V(kResult0)), except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -45,12 +45,12 @@ WASM_EXEC_TEST(TryCatchThrowWithValue) {
constexpr uint32_t kResult1 = 42;
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_I32V(kResult0), WASM_THROW(except))),
- WASM_STMTS(kExprNop), except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_I32V(kResult0),
+ WASM_THROW(except))),
+ WASM_STMTS(kExprNop), except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -72,14 +72,14 @@ WASM_EXEC_TEST(TryMultiCatchThrow) {
constexpr uint32_t kResult2 = 51;
// Build the main test function.
- BUILD(
- r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
- WASM_STMTS(WASM_I32V(kResult2),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_THROW(except2))),
- kExprCatch, except1, WASM_STMTS(WASM_I32V(kResult0)), kExprCatch, except2,
- WASM_STMTS(WASM_I32V(kResult1)), kExprEnd);
+ r.Build(
+ {kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult2),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_THROW(except2))),
+ kExprCatch, except1, WASM_STMTS(WASM_I32V(kResult0)), kExprCatch,
+ except2, WASM_STMTS(WASM_I32V(kResult1)), kExprEnd});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -101,10 +101,11 @@ WASM_EXEC_TEST(TryCatchAllThrow) {
constexpr uint32_t kResult1 = 42;
// Build the main test function.
- BUILD(r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
- WASM_STMTS(WASM_I32V(kResult1), WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_THROW(except))),
- kExprCatchAll, WASM_I32V(kResult0), kExprEnd);
+ r.Build(
+ {kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except))),
+ kExprCatchAll, WASM_I32V(kResult0), kExprEnd});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -126,14 +127,14 @@ WASM_EXEC_TEST(TryCatchCatchAllThrow) {
constexpr uint32_t kResult2 = 51;
// Build the main test function.
- BUILD(
- r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
- WASM_STMTS(WASM_I32V(kResult2),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_THROW(except2))),
- kExprCatch, except1, WASM_I32V(kResult0), kExprCatchAll,
- WASM_I32V(kResult1), kExprEnd);
+ r.Build(
+ {kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult2),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_THROW(except2))),
+ kExprCatch, except1, WASM_I32V(kResult0), kExprCatchAll,
+ WASM_I32V(kResult1), kExprEnd});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -157,15 +158,14 @@ WASM_EXEC_TEST(TryImplicitRethrow) {
constexpr uint32_t kResult2 = 51;
// Build the main test function.
- BUILD(r,
- WASM_TRY_CATCH_T(
- kWasmI32,
- WASM_TRY_CATCH_T(kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_THROW(except2))),
- WASM_STMTS(WASM_I32V(kResult2)), except1),
- WASM_I32V(kResult0), except2));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_TRY_CATCH_T(kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except2))),
+ WASM_STMTS(WASM_I32V(kResult2)), except1),
+ WASM_I32V(kResult0), except2)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -185,15 +185,14 @@ WASM_EXEC_TEST(TryDelegate) {
constexpr uint32_t kResult1 = 42;
// Build the main test function.
- BUILD(r,
- WASM_TRY_CATCH_T(kWasmI32,
- WASM_TRY_DELEGATE_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_THROW(except))),
- 0),
- WASM_I32V(kResult0), except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_TRY_DELEGATE_T(kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ 0),
+ WASM_I32V(kResult0), except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -209,11 +208,10 @@ WASM_EXEC_TEST(TestCatchlessTry) {
TestSignatures sigs;
WasmRunner<uint32_t> r(execution_tier);
byte except = r.builder().AddException(sigs.v_i());
- BUILD(r,
- WASM_TRY_CATCH_T(
- kWasmI32,
- WASM_TRY_T(kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(except))),
- WASM_NOP, except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_TRY_T(kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(except))),
+ WASM_NOP, except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
r.CheckCallViaJS(0);
} else {
@@ -231,19 +229,18 @@ WASM_EXEC_TEST(TryCatchRethrow) {
constexpr uint32_t kUnreachable = 51;
// Build the main test function.
- BUILD(r,
- WASM_TRY_CATCH_CATCH_T(
- kWasmI32,
- WASM_TRY_CATCH_T(
- kWasmI32, WASM_THROW(except2),
- WASM_TRY_CATCH_T(
- kWasmI32, WASM_THROW(except1),
- WASM_STMTS(WASM_I32V(kUnreachable),
- WASM_IF_ELSE(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_RETHROW(1), WASM_RETHROW(2))),
- except1),
- except2),
- except1, WASM_I32V(kResult0), except2, WASM_I32V(kResult1)));
+ r.Build({WASM_TRY_CATCH_CATCH_T(
+ kWasmI32,
+ WASM_TRY_CATCH_T(
+ kWasmI32, WASM_THROW(except2),
+ WASM_TRY_CATCH_T(
+ kWasmI32, WASM_THROW(except1),
+ WASM_STMTS(WASM_I32V(kUnreachable),
+ WASM_IF_ELSE(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_RETHROW(1), WASM_RETHROW(2))),
+ except1),
+ except2),
+ except1, WASM_I32V(kResult0), except2, WASM_I32V(kResult1))});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -263,15 +260,14 @@ WASM_EXEC_TEST(TryDelegateToCaller) {
constexpr uint32_t kResult1 = 42;
// Build the main test function.
- BUILD(r,
- WASM_TRY_CATCH_T(kWasmI32,
- WASM_TRY_DELEGATE_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_THROW(except))),
- 1),
- WASM_I32V(kResult0), except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_TRY_DELEGATE_T(kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ 1),
+ WASM_I32V(kResult0), except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -294,18 +290,18 @@ WASM_EXEC_TEST(TryCatchCallDirect) {
// Build a throwing helper function.
WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
- BUILD(throw_func, WASM_THROW(except));
+ throw_func.Build({WASM_THROW(except)});
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_STMTS(WASM_CALL_FUNCTION(
- throw_func.function_index(),
- WASM_I32V(7), WASM_I32V(9)),
- WASM_DROP))),
- WASM_STMTS(WASM_I32V(kResult0)), except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(throw_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_I32V(kResult0)), except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -326,18 +322,18 @@ WASM_EXEC_TEST(TryCatchAllCallDirect) {
// Build a throwing helper function.
WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
- BUILD(throw_func, WASM_THROW(except));
+ throw_func.Build({WASM_THROW(except)});
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_STMTS(WASM_CALL_FUNCTION(
- throw_func.function_index(),
- WASM_I32V(7), WASM_I32V(9)),
- WASM_DROP))),
- WASM_STMTS(WASM_I32V(kResult0))));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(throw_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_I32V(kResult0)))});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -358,7 +354,7 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
// Build a throwing helper function.
WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
- BUILD(throw_func, WASM_THROW(except));
+ throw_func.Build({WASM_THROW(except)});
// Add an indirect function table.
uint16_t indirect_function_table[] = {
@@ -367,16 +363,15 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
arraysize(indirect_function_table));
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_T(
- kWasmI32,
- WASM_STMTS(
- WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_STMTS(WASM_CALL_INDIRECT(
- throw_func.sig_index(), WASM_I32V(7),
- WASM_I32V(9), WASM_LOCAL_GET(0)),
- WASM_DROP))),
- WASM_I32V(kResult0), except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_INDIRECT(
+ throw_func.sig_index(), WASM_I32V(7),
+ WASM_I32V(9), WASM_LOCAL_GET(0)),
+ WASM_DROP))),
+ WASM_I32V(kResult0), except)});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -397,7 +392,7 @@ WASM_EXEC_TEST(TryCatchAllCallIndirect) {
// Build a throwing helper function.
WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
- BUILD(throw_func, WASM_THROW(except));
+ throw_func.Build({WASM_THROW(except)});
// Add an indirect function table.
uint16_t indirect_function_table[] = {
@@ -406,16 +401,15 @@ WASM_EXEC_TEST(TryCatchAllCallIndirect) {
arraysize(indirect_function_table));
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_STMTS(
- WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_STMTS(WASM_CALL_INDIRECT(
- throw_func.sig_index(), WASM_I32V(7),
- WASM_I32V(9), WASM_LOCAL_GET(0)),
- WASM_DROP))),
- WASM_I32V(kResult0)));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_INDIRECT(
+ throw_func.sig_index(), WASM_I32V(7),
+ WASM_I32V(9), WASM_LOCAL_GET(0)),
+ WASM_DROP))),
+ WASM_I32V(kResult0))});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -435,21 +429,20 @@ WASM_COMPILED_EXEC_TEST(TryCatchCallExternal) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
ManuallyImportedJSFunction import = {sigs.i_ii(), js_function};
- WasmRunner<uint32_t, uint32_t> r(execution_tier, &import);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, kWasmOrigin, &import);
constexpr uint32_t kResult0 = 23;
constexpr uint32_t kResult1 = 42;
constexpr uint32_t kJSFunc = 0;
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_STMTS(
- WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
- WASM_I32V(9)),
- WASM_DROP))),
- WASM_I32V(kResult0)));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
+ WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_I32V(kResult0))});
// Need to call through JS to allow for creation of stack traces.
r.CheckCallViaJS(kResult0, 0);
@@ -464,21 +457,20 @@ WASM_COMPILED_EXEC_TEST(TryCatchAllCallExternal) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
ManuallyImportedJSFunction import = {sigs.i_ii(), js_function};
- WasmRunner<uint32_t, uint32_t> r(execution_tier, &import);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, kWasmOrigin, &import);
constexpr uint32_t kResult0 = 23;
constexpr uint32_t kResult1 = 42;
constexpr uint32_t kJSFunc = 0;
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_STMTS(
- WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
- WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
- WASM_I32V(9)),
- WASM_DROP))),
- WASM_I32V(kResult0)));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
+ WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_I32V(kResult0))});
// Need to call through JS to allow for creation of stack traces.
r.CheckCallViaJS(kResult0, 0);
@@ -490,7 +482,7 @@ namespace {
void TestTrapNotCaught(byte* code, size_t code_size,
TestExecutionTier execution_tier) {
TestSignatures sigs;
- WasmRunner<uint32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<uint32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport);
r.builder().AddMemory(kWasmPageSize);
constexpr uint32_t kResultSuccess = 23;
@@ -502,16 +494,16 @@ void TestTrapNotCaught(byte* code, size_t code_size,
// Build a trapping helper function.
WasmFunctionCompiler& trap_func = r.NewFunction(sigs.i_ii());
- trap_func.Build(code, code + code_size);
+ trap_func.Build(base::VectorOf(code, code_size));
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(kResultSuccess),
- WASM_CALL_FUNCTION(trap_func.function_index(),
- WASM_I32V(7), WASM_I32V(9)),
- WASM_DROP),
- WASM_STMTS(WASM_I32V(kResultCaught))));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResultSuccess),
+ WASM_CALL_FUNCTION(trap_func.function_index(), WASM_I32V(7),
+ WASM_I32V(9)),
+ WASM_DROP),
+ WASM_STMTS(WASM_I32V(kResultCaught)))});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -585,23 +577,21 @@ UNINITIALIZED_WASM_EXEC_TEST(TestStackOverflowNotCaught) {
IsolateScope isolate_scope;
LocalContext context(isolate_scope.isolate());
- WasmRunner<uint32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<uint32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport, kMemory32,
isolate_scope.i_isolate());
// Build a function that calls itself until stack overflow.
WasmFunctionCompiler& stack_overflow = r.NewFunction(sigs.v_v());
- byte stack_overflow_code[] = {
- kExprCallFunction, static_cast<byte>(stack_overflow.function_index())};
- stack_overflow.Build(stack_overflow_code,
- stack_overflow_code + arraysize(stack_overflow_code));
+ stack_overflow.Build(
+ {kExprCallFunction, static_cast<byte>(stack_overflow.function_index())});
// Build the main test function.
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(1), kExprCallFunction,
- static_cast<byte>(stack_overflow.function_index())),
- WASM_STMTS(WASM_I32V(1))));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(1), kExprCallFunction,
+ static_cast<byte>(stack_overflow.function_index())),
+ WASM_STMTS(WASM_I32V(1)))});
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -617,11 +607,11 @@ TEST(Regress1180457) {
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
constexpr uint32_t kResult0 = 23;
constexpr uint32_t kUnreachable = 42;
- BUILD(r, WASM_TRY_CATCH_ALL_T(
- kWasmI32,
- WASM_TRY_DELEGATE_T(
- kWasmI32, WASM_STMTS(WASM_I32V(kResult0), WASM_BR(0)), 0),
- WASM_I32V(kUnreachable)));
+ r.Build({WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_TRY_DELEGATE_T(kWasmI32, WASM_STMTS(WASM_I32V(kResult0), WASM_BR(0)),
+ 0),
+ WASM_I32V(kUnreachable))});
CHECK_EQ(kResult0, r.CallInterpreter());
}
@@ -631,8 +621,8 @@ TEST(Regress1187896) {
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
byte try_sig = r.builder().AddSignature(sigs.v_i());
constexpr uint32_t kResult = 23;
- BUILD(r, kExprI32Const, 0, kExprTry, try_sig, kExprDrop, kExprCatchAll,
- kExprNop, kExprEnd, kExprI32Const, kResult);
+ r.Build({kExprI32Const, 0, kExprTry, try_sig, kExprDrop, kExprCatchAll,
+ kExprNop, kExprEnd, kExprI32Const, kResult});
CHECK_EQ(kResult, r.CallInterpreter());
}
@@ -640,8 +630,8 @@ TEST(Regress1190291) {
TestSignatures sigs;
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
byte try_sig = r.builder().AddSignature(sigs.v_i());
- BUILD(r, kExprUnreachable, kExprTry, try_sig, kExprCatchAll, kExprEnd,
- kExprI32Const, 0);
+ r.Build({kExprUnreachable, kExprTry, try_sig, kExprCatchAll, kExprEnd,
+ kExprI32Const, 0});
r.CallInterpreter();
}
@@ -649,16 +639,15 @@ TEST(Regress1186795) {
TestSignatures sigs;
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
byte except = r.builder().AddException(sigs.v_i());
- BUILD(r,
- WASM_TRY_CATCH_T(
- kWasmI32,
- WASM_STMTS(WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
- WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
- WASM_TRY_T(kWasmI32,
- WASM_STMTS(WASM_I32V(0), WASM_THROW(except))),
- WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
- WASM_DROP, WASM_DROP),
- WASM_NOP, except));
+ r.Build({WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
+ WASM_I32V(0), WASM_I32V(0),
+ WASM_TRY_T(kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(except))),
+ WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
+ WASM_DROP),
+ WASM_NOP, except)});
CHECK_EQ(0, r.CallInterpreter());
}
@@ -667,9 +656,9 @@ TEST(Regress1197408) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(
TestExecutionTier::kInterpreter);
byte sig_id = r.builder().AddSignature(sigs.i_iii());
- BUILD(r, WASM_STMTS(WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), kExprTry,
+ r.Build({WASM_STMTS(WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), kExprTry,
sig_id, kExprTry, sig_id, kExprCallFunction, 0,
- kExprDelegate, 0, kExprDelegate, 0));
+ kExprDelegate, 0, kExprDelegate, 0)});
CHECK_EQ(0, r.CallInterpreter(0, 0, 0));
}
@@ -677,16 +666,16 @@ TEST(Regress1212396) {
TestSignatures sigs;
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
byte except = r.builder().AddException(sigs.v_v());
- BUILD(r, kExprTry, kVoidCode, kExprTry, kVoidCode, kExprI32Const, 0,
- kExprThrow, except, kExprDelegate, 0, kExprCatch, except, kExprEnd,
- kExprI32Const, 42);
+ r.Build({kExprTry, kVoidCode, kExprTry, kVoidCode, kExprI32Const, 0,
+ kExprThrow, except, kExprDelegate, 0, kExprCatch, except, kExprEnd,
+ kExprI32Const, 42});
CHECK_EQ(42, r.CallInterpreter());
}
TEST(Regress1219746) {
TestSignatures sigs;
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, kExprTry, kVoidCode, kExprI32Const, 0, kExprEnd);
+ r.Build({kExprTry, kVoidCode, kExprI32Const, 0, kExprEnd});
CHECK_EQ(0, r.CallInterpreter());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index d1526ac245..7728138cea 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -25,21 +25,21 @@ TEST(Run_WasmInt8Const_i) {
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
const byte kExpectedValue = 109;
// return(kExpectedValue)
- BUILD(r, WASM_I32V_2(kExpectedValue));
+ r.Build({WASM_I32V_2(kExpectedValue)});
CHECK_EQ(kExpectedValue, r.Call());
}
TEST(Run_WasmIfElse) {
WasmRunner<int32_t, int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_IF_ELSE_I(WASM_LOCAL_GET(0), WASM_I32V_1(9), WASM_I32V_1(10)));
+ r.Build({WASM_IF_ELSE_I(WASM_LOCAL_GET(0), WASM_I32V_1(9), WASM_I32V_1(10))});
CHECK_EQ(10, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
TEST(Run_WasmIfReturn) {
WasmRunner<int32_t, int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_RETURN(WASM_I32V_2(77))),
- WASM_I32V_2(65));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_RETURN(WASM_I32V_2(77))),
+ WASM_I32V_2(65)});
CHECK_EQ(65, r.Call(0));
CHECK_EQ(77, r.Call(1));
}
@@ -130,12 +130,10 @@ TEST(Run_WasmBlockBreakN) {
TEST(Run_Wasm_nested_ifs_i) {
WasmRunner<int32_t, int32_t, int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(
- r,
- WASM_IF_ELSE_I(
- WASM_LOCAL_GET(0),
- WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(11), WASM_I32V_1(12)),
- WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(13), WASM_I32V_1(14))));
+ r.Build({WASM_IF_ELSE_I(
+ WASM_LOCAL_GET(0),
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(11), WASM_I32V_1(12)),
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(13), WASM_I32V_1(14)))});
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -164,16 +162,15 @@ TEST(Run_Wasm_returnCallFactorial) {
WasmFunctionCompiler& fact_aux_fn =
r.NewFunction<int32_t, int32_t, int32_t>("fact_aux");
- BUILD(r, WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
- WASM_LOCAL_GET(0), WASM_I32V(1)));
+ r.Build({WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
+ WASM_LOCAL_GET(0), WASM_I32V(1))});
- BUILD(fact_aux_fn,
- WASM_IF_ELSE_I(
- WASM_I32_EQ(WASM_I32V(1), WASM_LOCAL_GET(0)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_FUNCTION(
- fact_aux_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))));
+ fact_aux_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_EQ(WASM_I32V(1), WASM_LOCAL_GET(0)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_FUNCTION(
+ fact_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))))});
// Runs out of stack space without using return call.
uint32_t test_values[] = {1, 2, 5, 10, 20, 20000};
@@ -193,17 +190,16 @@ TEST(Run_Wasm_returnCallFactorial64) {
WasmFunctionCompiler& fact_aux_fn =
r.NewFunction<int64_t, int32_t, int64_t>("fact_aux");
- BUILD(r, WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
- WASM_LOCAL_GET(0), WASM_I64V(1)));
+ r.Build({WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
+ WASM_LOCAL_GET(0), WASM_I64V(1))});
- BUILD(fact_aux_fn,
- WASM_IF_ELSE_L(
- WASM_I32_EQ(WASM_I32V(1), WASM_LOCAL_GET(0)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_FUNCTION(
- fact_aux_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I64_MUL(WASM_I64_SCONVERT_I32(WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(1)))));
+ fact_aux_fn.Build({WASM_IF_ELSE_L(
+ WASM_I32_EQ(WASM_I32V(1), WASM_LOCAL_GET(0)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_FUNCTION(
+ fact_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I64_MUL(WASM_I64_SCONVERT_I32(WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(1))))});
for (int32_t v : test_values) {
CHECK_EQ(factorial<int64_t>(v), r.Call(v));
@@ -226,17 +222,15 @@ TEST(Run_Wasm_returnCallIndirectFactorial) {
r.builder().AddIndirectFunctionTable(indirect_function_table,
arraysize(indirect_function_table));
- BUILD(r, WASM_RETURN_CALL_INDIRECT(fact_aux_fn.sig_index(), WASM_LOCAL_GET(0),
- WASM_I32V(1), WASM_ZERO));
+ r.Build({WASM_RETURN_CALL_INDIRECT(fact_aux_fn.sig_index(), WASM_LOCAL_GET(0),
+ WASM_I32V(1), WASM_ZERO)});
- BUILD(
- fact_aux_fn,
- WASM_IF_ELSE_I(
- WASM_I32_EQ(WASM_I32V(1), WASM_LOCAL_GET(0)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_INDIRECT(
- fact_aux_fn.sig_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), WASM_ZERO)));
+ fact_aux_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_EQ(WASM_I32V(1), WASM_LOCAL_GET(0)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_INDIRECT(
+ fact_aux_fn.sig_index(),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), WASM_ZERO))});
uint32_t test_values[] = {1, 2, 5, 10, 20};
@@ -310,14 +304,14 @@ TEST(MemoryGrow) {
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
- BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
+ r.Build({WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
CHECK_EQ(1, r.Call(1));
}
{
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
- BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
+ r.Build({WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
CHECK_EQ(-1, r.Call(11));
}
}
@@ -327,11 +321,10 @@ TEST(MemoryGrowPreservesData) {
int32_t value = 2335;
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
- BUILD(
- r,
- WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value)),
- WASM_MEMORY_GROW(WASM_LOCAL_GET(0)), WASM_DROP,
- WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index)));
+ r.Build(
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value)),
+ WASM_MEMORY_GROW(WASM_LOCAL_GET(0)), WASM_DROP,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index))});
CHECK_EQ(value, r.Call(1));
}
@@ -339,26 +332,26 @@ TEST(MemoryGrowInvalidSize) {
// Grow memory by an invalid amount without initial memory.
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
+ r.Build({WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
CHECK_EQ(-1, r.Call(1048575));
}
TEST(ReferenceTypeLocals) {
{
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_REF_IS_NULL(WASM_REF_NULL(kAnyRefCode)));
+ r.Build({WASM_REF_IS_NULL(WASM_REF_NULL(kAnyRefCode))});
CHECK_EQ(1, r.Call());
}
{
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
r.AllocateLocal(kWasmAnyRef);
- BUILD(r, WASM_REF_IS_NULL(WASM_LOCAL_GET(0)));
+ r.Build({WASM_REF_IS_NULL(WASM_LOCAL_GET(0))});
CHECK_EQ(1, r.Call());
}
{
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
r.AllocateLocal(kWasmAnyRef);
- BUILD(r, WASM_REF_IS_NULL(WASM_LOCAL_TEE(0, WASM_REF_NULL(kAnyRefCode))));
+ r.Build({WASM_REF_IS_NULL(WASM_LOCAL_TEE(0, WASM_REF_NULL(kAnyRefCode)))});
CHECK_EQ(1, r.Call());
}
}
@@ -366,7 +359,7 @@ TEST(ReferenceTypeLocals) {
TEST(TestPossibleNondeterminism) {
{
WasmRunner<int32_t, float> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_I32_REINTERPRET_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_REINTERPRET_F32(WASM_LOCAL_GET(0))});
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
@@ -374,7 +367,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<int64_t, double> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_I64_REINTERPRET_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_REINTERPRET_F64(WASM_LOCAL_GET(0))});
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -382,7 +375,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<float, float> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F32_COPYSIGN(WASM_F32(42.0f), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_COPYSIGN(WASM_F32(42.0f), WASM_LOCAL_GET(0))});
r.Call(16.0f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -390,7 +383,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<double, double> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F64_COPYSIGN(WASM_F64(42.0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_COPYSIGN(WASM_F64(42.0), WASM_LOCAL_GET(0))});
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -400,10 +393,9 @@ TEST(TestPossibleNondeterminism) {
int32_t index = 16;
WasmRunner<int32_t, float> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r,
- WASM_STORE_MEM(MachineType::Float32(), WASM_I32V(index),
- WASM_LOCAL_GET(0)),
- WASM_I32V(index));
+ r.Build({WASM_STORE_MEM(MachineType::Float32(), WASM_I32V(index),
+ WASM_LOCAL_GET(0)),
+ WASM_I32V(index)});
r.Call(1345.3456f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
@@ -413,10 +405,9 @@ TEST(TestPossibleNondeterminism) {
int32_t index = 16;
WasmRunner<int32_t, double> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r,
- WASM_STORE_MEM(MachineType::Float64(), WASM_I32V(index),
- WASM_LOCAL_GET(0)),
- WASM_I32V(index));
+ r.Build({WASM_STORE_MEM(MachineType::Float64(), WASM_I32V(index),
+ WASM_LOCAL_GET(0)),
+ WASM_I32V(index)});
r.Call(1345.3456);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -424,7 +415,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<float, float> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
@@ -432,7 +423,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<double, double> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -440,7 +431,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<int32_t, float> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F32_EQ(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_EQ(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
@@ -448,7 +439,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<int32_t, double> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F64_EQ(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_EQ(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -456,7 +447,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<float, float> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F32_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<float>::quiet_NaN());
@@ -464,7 +455,7 @@ TEST(TestPossibleNondeterminism) {
}
{
WasmRunner<double, double> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_F64_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
r.Call(std::numeric_limits<double>::quiet_NaN());
@@ -475,7 +466,7 @@ TEST(TestPossibleNondeterminism) {
TEST(InterpreterLoadWithoutMemory) {
WasmRunner<int32_t, int32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(0);
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0))});
CHECK_TRAP32(r.Call(0));
}
@@ -483,28 +474,28 @@ TEST(Regress1111015) {
EXPERIMENTAL_FLAG_SCOPE(return_call);
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
WasmFunctionCompiler& f = r.NewFunction<int32_t>("f");
- BUILD(r, WASM_BLOCK_I(WASM_RETURN_CALL_FUNCTION0(f.function_index()),
- kExprDrop));
- BUILD(f, WASM_I32V(0));
+ r.Build({WASM_BLOCK_I(WASM_RETURN_CALL_FUNCTION0(f.function_index()),
+ kExprDrop)});
+ f.Build({WASM_I32V(0)});
}
TEST(Regress1092130) {
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
TestSignatures sigs;
byte sig_v_i = r.builder().AddSignature(sigs.v_i());
- BUILD(r, WASM_I32V(0),
- WASM_IF_ELSE_I(
- WASM_I32V(0),
- WASM_SEQ(WASM_UNREACHABLE, WASM_BLOCK_X(sig_v_i, WASM_NOP)),
- WASM_I32V(0)),
- WASM_DROP);
+ r.Build({WASM_I32V(0),
+ WASM_IF_ELSE_I(
+ WASM_I32V(0),
+ WASM_SEQ(WASM_UNREACHABLE, WASM_BLOCK_X(sig_v_i, WASM_NOP)),
+ WASM_I32V(0)),
+ WASM_DROP});
r.Call();
}
TEST(Regress1247119) {
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, kExprLoop, 0, kExprTry, 0, kExprUnreachable, kExprDelegate, 0,
- kExprEnd);
+ r.Build({kExprLoop, 0, kExprTry, 0, kExprUnreachable, kExprDelegate, 0,
+ kExprEnd});
r.Call();
}
@@ -513,16 +504,16 @@ TEST(Regress1246712) {
TestSignatures sigs;
const int kExpected = 1;
uint8_t except = r.builder().AddException(sigs.v_v());
- BUILD(r, kExprTry, kWasmI32.value_type_code(), kExprTry,
- kWasmI32.value_type_code(), kExprThrow, except, kExprEnd, kExprCatchAll,
- kExprI32Const, kExpected, kExprEnd);
+ r.Build({kExprTry, kWasmI32.value_type_code(), kExprTry,
+ kWasmI32.value_type_code(), kExprThrow, except, kExprEnd,
+ kExprCatchAll, kExprI32Const, kExpected, kExprEnd});
CHECK_EQ(kExpected, r.Call());
}
TEST(Regress1249306) {
WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, kExprTry, kVoid, kExprCatchAll, kExprTry, kVoid, kExprDelegate, 0,
- kExprEnd, kExprI32Const, 0);
+ r.Build({kExprTry, kVoid, kExprCatchAll, kExprTry, kVoid, kExprDelegate, 0,
+ kExprEnd, kExprI32Const, 0});
r.Call();
}
@@ -532,11 +523,32 @@ TEST(Regress1251845) {
ValueType reps[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32};
FunctionSig sig_iii_i(1, 3, reps);
byte sig = r.builder().AddSignature(&sig_iii_i);
- BUILD(r, kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0, kExprTry, sig,
- kExprI32Const, 0, kExprTry, 0, kExprTry, 0, kExprI32Const, 0, kExprTry,
- sig, kExprUnreachable, kExprTry, 0, kExprUnreachable, kExprEnd,
- kExprTry, sig, kExprUnreachable, kExprEnd, kExprEnd, kExprUnreachable,
- kExprEnd, kExprEnd, kExprUnreachable, kExprEnd);
+ r.Build({
+ // clang-format off
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprTry, sig,
+ kExprI32Const, 0,
+ kExprTry, 0,
+ kExprTry, 0,
+ kExprI32Const, 0,
+ kExprTry, sig,
+ kExprUnreachable,
+ kExprTry, 0,
+ kExprUnreachable,
+ kExprEnd,
+ kExprTry, sig,
+ kExprUnreachable,
+ kExprEnd,
+ kExprEnd,
+ kExprUnreachable,
+ kExprEnd,
+ kExprEnd,
+ kExprUnreachable,
+ kExprEnd
+ // clang-format on
+ });
r.Call(0, 0, 0);
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 2d51f0fe9d..04ab378ace 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -68,7 +68,7 @@ ManuallyImportedJSFunction CreateJSSelector(FunctionSig* sig, int which) {
WASM_COMPILED_EXEC_TEST(Run_Int32Sub_jswrapped) {
WasmRunner<int, int, int> r(execution_tier);
- BUILD(r, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
r.CheckCallViaJS(33, 44, 11);
r.CheckCallViaJS(-8723487, -8000000, 723487);
@@ -76,7 +76,7 @@ WASM_COMPILED_EXEC_TEST(Run_Int32Sub_jswrapped) {
WASM_COMPILED_EXEC_TEST(Run_Float32Div_jswrapped) {
WasmRunner<float, float, float> r(execution_tier);
- BUILD(r, WASM_F32_DIV(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F32_DIV(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
r.CheckCallViaJS(92, 46, 0.5);
r.CheckCallViaJS(64, -16, -0.25);
@@ -84,7 +84,7 @@ WASM_COMPILED_EXEC_TEST(Run_Float32Div_jswrapped) {
WASM_COMPILED_EXEC_TEST(Run_Float64Add_jswrapped) {
WasmRunner<double, double, double> r(execution_tier);
- BUILD(r, WASM_F64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F64_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
r.CheckCallViaJS(3, 2, 1);
r.CheckCallViaJS(-5.5, -5.25, -0.25);
@@ -92,7 +92,7 @@ WASM_COMPILED_EXEC_TEST(Run_Float64Add_jswrapped) {
WASM_COMPILED_EXEC_TEST(Run_I32Popcount_jswrapped) {
WasmRunner<int, int> r(execution_tier);
- BUILD(r, WASM_I32_POPCNT(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_POPCNT(WASM_LOCAL_GET(0))});
r.CheckCallViaJS(2, 9);
r.CheckCallViaJS(3, 11);
@@ -107,9 +107,9 @@ WASM_COMPILED_EXEC_TEST(Run_CallJS_Add_jswrapped) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
ManuallyImportedJSFunction import = {sigs.i_i(), js_function};
- WasmRunner<int, int> r(execution_tier, &import);
+ WasmRunner<int, int> r(execution_tier, kWasmOrigin, &import);
uint32_t js_index = 0;
- BUILD(r, WASM_CALL_FUNCTION(js_index, WASM_LOCAL_GET(0)));
+ r.Build({WASM_CALL_FUNCTION(js_index, WASM_LOCAL_GET(0))});
r.CheckCallViaJS(101, 2);
r.CheckCallViaJS(199, 100);
@@ -127,7 +127,7 @@ void RunJSSelectTest(TestExecutionTier tier, int which) {
FunctionSig sig(1, num_params, types);
ManuallyImportedJSFunction import = CreateJSSelector(&sig, which);
- WasmRunner<void> r(tier, &import);
+ WasmRunner<void> r(tier, kWasmOrigin, &import);
uint32_t js_index = 0;
WasmFunctionCompiler& t = r.NewFunction(&sig);
@@ -143,7 +143,7 @@ void RunJSSelectTest(TestExecutionTier tier, int which) {
size_t end = code.size();
code.push_back(0);
- t.Build(&code[0], &code[end]);
+ t.Build(base::VectorOf(code.data(), end));
}
double expected = inputs.arg_d(which);
@@ -203,7 +203,7 @@ void RunWASMSelectTest(TestExecutionTier tier, int which) {
WasmRunner<void> r(tier);
WasmFunctionCompiler& t = r.NewFunction(&sig);
- BUILD(t, WASM_LOCAL_GET(which));
+ t.Build({WASM_LOCAL_GET(which)});
Handle<Object> args[] = {
isolate->factory()->NewNumber(inputs.arg_d(0)),
@@ -275,7 +275,7 @@ void RunWASMSelectAlignTest(TestExecutionTier tier, int num_args,
for (int which = 0; which < num_params; which++) {
WasmRunner<void> r(tier);
WasmFunctionCompiler& t = r.NewFunction(&sig);
- BUILD(t, WASM_LOCAL_GET(which));
+ t.Build({WASM_LOCAL_GET(which)});
Handle<Object> args[] = {isolate->factory()->NewNumber(inputs.arg_d(0)),
isolate->factory()->NewNumber(inputs.arg_d(1)),
@@ -387,9 +387,9 @@ void RunJSSelectAlignTest(TestExecutionTier tier, int num_args,
for (int which = 0; which < num_params; which++) {
HandleScope scope(isolate);
ManuallyImportedJSFunction import = CreateJSSelector(&sig, which);
- WasmRunner<void> r(tier, &import);
+ WasmRunner<void> r(tier, kWasmOrigin, &import);
WasmFunctionCompiler& t = r.NewFunction(&sig);
- t.Build(&code[0], &code[end]);
+ t.Build(base::VectorOf(code.data(), end));
Handle<Object> args[] = {
factory->NewNumber(inputs.arg_d(0)),
@@ -490,7 +490,7 @@ void RunPickerTest(TestExecutionTier tier, bool indirect) {
ManuallyImportedJSFunction import = {sigs.i_iii(), js_function};
- WasmRunner<int32_t, int32_t> r(tier, &import);
+ WasmRunner<int32_t, int32_t> r(tier, kWasmOrigin, &import);
const uint32_t js_index = 0;
const int32_t left = -2;
@@ -505,13 +505,12 @@ void RunPickerTest(TestExecutionTier tier, bool indirect) {
r.builder().AddIndirectFunctionTable(indirect_function_table,
arraysize(indirect_function_table));
- BUILD(rc_fn, WASM_RETURN_CALL_INDIRECT(sig_index, WASM_I32V(left),
- WASM_I32V(right), WASM_LOCAL_GET(0),
- WASM_I32V(js_index)));
+ rc_fn.Build(
+ {WASM_RETURN_CALL_INDIRECT(sig_index, WASM_I32V(left), WASM_I32V(right),
+ WASM_LOCAL_GET(0), WASM_I32V(js_index))});
} else {
- BUILD(rc_fn,
- WASM_RETURN_CALL_FUNCTION(js_index, WASM_I32V(left), WASM_I32V(right),
- WASM_LOCAL_GET(0)));
+ rc_fn.Build({WASM_RETURN_CALL_FUNCTION(
+ js_index, WASM_I32V(left), WASM_I32V(right), WASM_LOCAL_GET(0))});
}
Handle<Object> args_left[] = {isolate->factory()->NewNumber(1)};
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
index 73a670a6b3..633dc309f7 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
@@ -17,9 +17,9 @@ template <typename ReturnType, typename... ParamTypes>
class Memory64Runner : public WasmRunner<ReturnType, ParamTypes...> {
public:
explicit Memory64Runner(TestExecutionTier execution_tier)
- : WasmRunner<ReturnType, ParamTypes...>(execution_tier, nullptr, "main",
- kNoRuntimeExceptionSupport,
- kMemory64) {
+ : WasmRunner<ReturnType, ParamTypes...>(
+ execution_tier, kWasmOrigin, nullptr, "main",
+ kNoRuntimeExceptionSupport, kMemory64) {
this->builder().EnableFeature(kFeature_memory64);
}
};
@@ -32,7 +32,7 @@ WASM_EXEC_TEST(Load) {
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0))});
CHECK_EQ(0, r.Call(0));
@@ -94,7 +94,7 @@ WASM_EXEC_TEST(MemorySize) {
constexpr int kNumPages = 13;
r.builder().AddMemoryElems<uint8_t>(kNumPages * kWasmPageSize);
- BUILD(r, WASM_MEMORY_SIZE);
+ r.Build({WASM_MEMORY_SIZE});
CHECK_EQ(kNumPages, r.Call());
}
@@ -107,7 +107,7 @@ WASM_EXEC_TEST(MemoryGrow) {
r.builder().SetMaxMemPages(13);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
+ r.Build({WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
CHECK_EQ(1, r.Call(6));
CHECK_EQ(7, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 8de6b4224d..e46be43703 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -138,7 +138,7 @@ TEST(Run_WasmModule_CompilationHintsLazy) {
isolate, &thrower, module.ToHandleChecked(), {}, {});
CHECK(!instance.is_null());
int32_t result = testing::CallWasmFunctionForTesting(
- isolate, instance.ToHandleChecked(), "main", 0, nullptr);
+ isolate, instance.ToHandleChecked(), "main", {});
CHECK_EQ(kReturnValue, result);
// Lazy function was invoked and therefore compiled.
@@ -578,7 +578,7 @@ TEST(TestInterruptLoop) {
InterruptThread thread(isolate, memory_array);
CHECK(thread.Start());
- testing::CallWasmFunctionForTesting(isolate, instance, "main", 0, nullptr);
+ testing::CallWasmFunctionForTesting(isolate, instance, "main", {});
Address address = reinterpret_cast<Address>(
&memory_array[InterruptThread::interrupt_location_]);
CHECK_EQ(InterruptThread::interrupt_value_,
@@ -658,16 +658,17 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
// Initial memory size is 16 pages, should trap till index > MemSize on
// consecutive GrowMem calls
for (uint32_t i = 1; i < 5; i++) {
- Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(i), isolate)};
+ Handle<Object> params[1] = {handle(Smi::FromInt(i), isolate)};
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- testing::CallWasmFunctionForTesting(isolate, instance, "main", 1, params);
+ testing::CallWasmFunctionForTesting(isolate, instance, "main",
+ base::ArrayVector(params));
CHECK(try_catch.HasCaught());
isolate->clear_pending_exception();
}
- Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(1), isolate)};
- int32_t result = testing::CallWasmFunctionForTesting(isolate, instance,
- "main", 1, params);
+ Handle<Object> params[1] = {handle(Smi::FromInt(1), isolate)};
+ int32_t result = testing::CallWasmFunctionForTesting(
+ isolate, instance, "main", base::ArrayVector(params));
CHECK_EQ(0xACED, result);
}
Cleanup();
@@ -708,23 +709,24 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
Handle<Object> params[1] = {
Handle<Object>(Smi::FromInt((16 + i) * kPageSize - 3), isolate)};
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- testing::CallWasmFunctionForTesting(isolate, instance, "main", 1, params);
+ testing::CallWasmFunctionForTesting(isolate, instance, "main",
+ base::ArrayVector(params));
CHECK(try_catch.HasCaught());
isolate->clear_pending_exception();
}
for (int i = 1; i < 5; i++) {
Handle<Object> params[1] = {
- Handle<Object>(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
- int32_t result = testing::CallWasmFunctionForTesting(isolate, instance,
- "main", 1, params);
+ handle(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
+ int32_t result = testing::CallWasmFunctionForTesting(
+ isolate, instance, "main", base::ArrayVector(params));
CHECK_EQ(0xACED, result);
}
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- Handle<Object> params[1] = {
- Handle<Object>(Smi::FromInt(25 * kPageSize), isolate)};
- testing::CallWasmFunctionForTesting(isolate, instance, "main", 1, params);
+ Handle<Object> params[1] = {handle(Smi::FromInt(25 * kPageSize), isolate)};
+ testing::CallWasmFunctionForTesting(isolate, instance, "main",
+ base::ArrayVector(params));
CHECK(try_catch.HasCaught());
isolate->clear_pending_exception();
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
index 41f4fa4cf0..acaf8b7b05 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
@@ -38,8 +38,6 @@ namespace test_run_wasm_relaxed_simd {
} \
void RunWasm_##name##_Impl(TestExecutionTier execution_tier)
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
// Only used for qfma and qfms tests below.
// FMOperation holds the params (a, b, c) for a Multiply-Add or
@@ -66,23 +64,23 @@ constexpr double large_n<double> = 1e200;
template <>
constexpr float large_n<float> = 1e20;
-// Fused Multiply-Add performs a + b * c.
+// Fused Multiply-Add performs a * b + c.
template <typename T>
static constexpr FMOperation<T> qfma_array[] = {
- {1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
- // fused: a + b * c = -inf + (positive overflow) = -inf
- // unfused: a + b * c = -inf + inf = NaN
- {-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ {2.0f, 3.0f, 1.0f, 7.0f, 7.0f},
+ // fused: a * b + c = (positive overflow) + -inf = -inf
+ // unfused: a * b + c = inf + -inf = NaN
+ {large_n<T>, large_n<T>, -std::numeric_limits<T>::infinity(),
-std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // fused: a + b * c = inf + (negative overflow) = inf
- // unfused: a + b * c = inf + -inf = NaN
- {std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ // fused: a * b + c = (negative overflow) + inf = inf
+ // unfused: a * b + c = -inf + inf = NaN
+ {-large_n<T>, large_n<T>, std::numeric_limits<T>::infinity(),
std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
// NaN
- {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ {2.0f, 3.0f, std::numeric_limits<T>::quiet_NaN(),
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
// -NaN
- {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ {2.0f, 3.0f, -std::numeric_limits<T>::quiet_NaN(),
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
template <typename T>
@@ -90,23 +88,23 @@ static constexpr base::Vector<const FMOperation<T>> qfma_vector() {
return base::ArrayVector(qfma_array<T>);
}
-// Fused Multiply-Subtract performs a - b * c.
+// Fused Multiply-Subtract performs -(a * b) + c.
template <typename T>
static constexpr FMOperation<T> qfms_array[]{
- {1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
- // fused: a - b * c = inf - (positive overflow) = inf
- // unfused: a - b * c = inf - inf = NaN
- {std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ {2.0f, 3.0f, 1.0f, -5.0f, -5.0f},
+ // fused: -(a * b) + c = - (positive overflow) + inf = inf
+ // unfused: -(a * b) + c = - inf + inf = NaN
+ {large_n<T>, large_n<T>, std::numeric_limits<T>::infinity(),
std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // fused: a - b * c = -inf - (negative overflow) = -inf
- // unfused: a - b * c = -inf - -inf = NaN
- {-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ // fused: -(a * b) + c = (negative overflow) + -inf = -inf
+ // unfused: -(a * b) + c = -inf - -inf = NaN
+ {-large_n<T>, large_n<T>, -std::numeric_limits<T>::infinity(),
-std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
// NaN
- {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ {2.0f, 3.0f, std::numeric_limits<T>::quiet_NaN(),
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
// -NaN
- {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ {2.0f, 3.0f, -std::numeric_limits<T>::quiet_NaN(),
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
template <typename T>
@@ -114,35 +112,38 @@ static constexpr base::Vector<const FMOperation<T>> qfms_vector() {
return base::ArrayVector(qfms_array<T>);
}
-// Fused results only when fma3 feature is enabled, and running on TurboFan or
-// Liftoff (which can fall back to TurboFan if FMA is not implemented).
bool ExpectFused(TestExecutionTier tier) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ // Fused results only when fma3 feature is enabled, and running on TurboFan or
+ // Liftoff (which can fall back to TurboFan if FMA is not implemented).
return CpuFeatures::IsSupported(FMA3) &&
(tier == TestExecutionTier::kTurbofan ||
tier == TestExecutionTier::kLiftoff);
+#elif V8_TARGET_ARCH_ARM
+ // Consistent feature detection for Neonv2 is required before emitting
+ // fused instructions on Arm32. Not all Neon enabled Arm32 devices have
+ // FMA instructions.
+ return false;
#else
+ // All ARM64 Neon enabled devices have support for FMA instructions, only the
+ // Liftoff/Turbofan tiers emit codegen for fused results.
return (tier == TestExecutionTier::kTurbofan ||
tier == TestExecutionTier::kLiftoff);
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
- // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
WASM_RELAXED_SIMD_TEST(F32x4Qfma) {
WasmRunner<int32_t, float, float, float> r(execution_tier);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE});
for (FMOperation<float> x : qfma_vector<float>()) {
r.Call(x.a, x.b, x.c);
@@ -161,12 +162,12 @@ WASM_RELAXED_SIMD_TEST(F32x4Qfms) {
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE});
for (FMOperation<float> x : qfms_vector<float>()) {
r.Call(x.a, x.b, x.c);
@@ -185,12 +186,12 @@ WASM_RELAXED_SIMD_TEST(F64x2Qfma) {
double* g = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE});
for (FMOperation<double> x : qfma_vector<double>()) {
r.Call(x.a, x.b, x.c);
@@ -209,12 +210,12 @@ WASM_RELAXED_SIMD_TEST(F64x2Qfms) {
double* g = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE});
for (FMOperation<double> x : qfms_vector<double>()) {
r.Call(x.a, x.b, x.c);
@@ -235,16 +236,16 @@ TEST(RunWasm_RegressFmaReg_liftoff) {
byte local = r.AllocateLocal(kWasmS128);
float* g = r.builder().AddGlobal<float>(kWasmS128);
byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- // Get the first arg from a local so that the register is blocked even
- // after the arguments have been popped off the stack. This ensures that
- // the first source register is not also the destination.
- WASM_LOCAL_SET(local, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
- WASM_LOCAL_GET(local),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
+ r.Build(
+ {// Get the first arg from a local so that the register is blocked even
+ // after the arguments have been popped off the stack. This ensures that
+ // the first source register is not also the destination.
+ WASM_LOCAL_SET(local, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
+ WASM_LOCAL_GET(local),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE});
for (FMOperation<float> x : qfma_vector<float>()) {
r.Call(x.a, x.b, x.c);
@@ -256,8 +257,6 @@ TEST(RunWasm_RegressFmaReg_liftoff) {
}
}
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
- // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_RISCV64
namespace {
// Helper to convert an array of T into an array of uint8_t to be used a v128
@@ -280,11 +279,10 @@ void RelaxedLaneSelectTest(TestExecutionTier execution_tier, const T v1[kElems],
auto mask = as_uint8<T>(s);
WasmRunner<int32_t> r(execution_tier);
T* dst = r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_OPN(laneselect, WASM_SIMD_CONSTANT(lhs),
- WASM_SIMD_CONSTANT(rhs),
- WASM_SIMD_CONSTANT(mask))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_OPN(laneselect, WASM_SIMD_CONSTANT(lhs),
+ WASM_SIMD_CONSTANT(rhs),
+ WASM_SIMD_CONSTANT(mask))),
+ WASM_ONE});
CHECK_EQ(1, r.Call());
for (int i = 0; i < kElems; i++) {
@@ -373,11 +371,10 @@ void IntRelaxedTruncFloatTest(TestExecutionTier execution_tier,
constexpr int lanes = kSimd128Size / sizeof(FloatType);
// global[0] = trunc(splat(local[0])).
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(trunc_op,
- WASM_SIMD_UNOP(splat_op, WASM_LOCAL_GET(0)))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(trunc_op,
+ WASM_SIMD_UNOP(splat_op, WASM_LOCAL_GET(0)))),
+ WASM_ONE});
for (FloatType x : compiler::ValueHelper::GetVector<FloatType>()) {
if (ShouldSkipTestingConstant<IntType>(x)) continue;
@@ -417,11 +414,10 @@ WASM_RELAXED_SIMD_TEST(I8x16RelaxedSwizzle) {
uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
uint8_t* src = r.builder().AddGlobal<uint8_t>(kWasmS128);
uint8_t* indices = r.builder().AddGlobal<uint8_t>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI8x16RelaxedSwizzle, WASM_GLOBAL_GET(1),
- WASM_GLOBAL_GET(2))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(kExprI8x16RelaxedSwizzle, WASM_GLOBAL_GET(1),
+ WASM_GLOBAL_GET(2))),
+ WASM_ONE});
for (int i = 0; i < kElems; i++) {
LANE(src, i) = kElems - i - 1;
LANE(indices, i) = kElems - i - 1;
@@ -440,12 +436,12 @@ WASM_RELAXED_SIMD_TEST(I16x8RelaxedQ15MulRS) {
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI16x8RelaxedQ15MulRS, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(kExprI16x8RelaxedQ15MulRS,
+ WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
for (int16_t x : compiler::ValueHelper::GetVector<int16_t>()) {
for (int16_t y : compiler::ValueHelper::GetVector<int16_t>()) {
@@ -465,19 +461,18 @@ WASM_RELAXED_SIMD_TEST(I16x8RelaxedQ15MulRS) {
}
}
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
WASM_RELAXED_SIMD_TEST(I16x8DotI8x16I7x16S) {
WasmRunner<int32_t, int8_t, int8_t> r(execution_tier);
int16_t* g = r.builder().template AddGlobal<int16_t>(kWasmS128);
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI16x8DotI8x16I7x16S, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(kExprI16x8DotI8x16I7x16S,
+ WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
for (int8_t x : compiler::ValueHelper::GetVector<int8_t>()) {
for (int8_t y : compiler::ValueHelper::GetVector<int8_t>()) {
@@ -490,9 +485,7 @@ WASM_RELAXED_SIMD_TEST(I16x8DotI8x16I7x16S) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-#if V8_TARGET_ARCH_ARM64
WASM_RELAXED_SIMD_TEST(I32x4DotI8x16I7x16AddS) {
WasmRunner<int32_t, int8_t, int8_t, int32_t> r(execution_tier);
int32_t* g = r.builder().template AddGlobal<int32_t>(kWasmS128);
@@ -500,21 +493,21 @@ WASM_RELAXED_SIMD_TEST(I32x4DotI8x16I7x16AddS) {
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(
- r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value3))),
- WASM_GLOBAL_SET(0, WASM_SIMD_TERNOP(
- kExprI32x4DotI8x16I7x16AddS, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2), WASM_LOCAL_GET(temp3))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value3))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_TERNOP(kExprI32x4DotI8x16I7x16AddS,
+ WASM_LOCAL_GET(temp1), WASM_LOCAL_GET(temp2),
+ WASM_LOCAL_GET(temp3))),
+ WASM_ONE});
for (int8_t x : compiler::ValueHelper::GetVector<int8_t>()) {
for (int8_t y : compiler::ValueHelper::GetVector<int8_t>()) {
for (int32_t z : compiler::ValueHelper::GetVector<int32_t>()) {
- r.Call(x, y & 0x7F, z);
int32_t expected = base::AddWithWraparound(
base::MulWithWraparound(x * (y & 0x7F), 4), z);
+ r.Call(x, y & 0x7F, z);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, LANE(g, i));
}
@@ -522,7 +515,6 @@ WASM_RELAXED_SIMD_TEST(I32x4DotI8x16I7x16AddS) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM64
#undef WASM_RELAXED_SIMD_TEST
} // namespace test_run_wasm_relaxed_simd
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc b/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
index d886279070..2e4d405e14 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
@@ -12,7 +12,7 @@ namespace wasm {
WASM_EXEC_TEST(I32SExtendI8) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_SIGN_EXT_I8(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_SIGN_EXT_I8(WASM_LOCAL_GET(0))});
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
@@ -22,7 +22,7 @@ WASM_EXEC_TEST(I32SExtendI8) {
WASM_EXEC_TEST(I32SExtendI16) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_SIGN_EXT_I16(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_SIGN_EXT_I16(WASM_LOCAL_GET(0))});
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
@@ -32,7 +32,7 @@ WASM_EXEC_TEST(I32SExtendI16) {
WASM_EXEC_TEST(I64SExtendI8) {
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SIGN_EXT_I8(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SIGN_EXT_I8(WASM_LOCAL_GET(0))});
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
@@ -42,7 +42,7 @@ WASM_EXEC_TEST(I64SExtendI8) {
WASM_EXEC_TEST(I64SExtendI16) {
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SIGN_EXT_I16(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SIGN_EXT_I16(WASM_LOCAL_GET(0))});
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
@@ -52,7 +52,7 @@ WASM_EXEC_TEST(I64SExtendI16) {
WASM_EXEC_TEST(I64SExtendI32) {
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_SIGN_EXT_I32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I64_SIGN_EXT_I32(WASM_LOCAL_GET(0))});
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc
index 573cedb90a..1f6ddd519f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc
@@ -24,7 +24,7 @@ namespace test_run_wasm_simd_liftoff {
TEST(S128Local) {
WasmRunner<int32_t> r(TestExecutionTier::kLiftoff);
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_LOCAL_GET(temp1)), WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_LOCAL_GET(temp1)), WASM_ONE});
CHECK_EQ(1, r.Call());
}
@@ -33,7 +33,7 @@ TEST(S128Global) {
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(1, WASM_GLOBAL_GET(0)), WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(1, WASM_GLOBAL_GET(0)), WASM_ONE});
int32_t expected = 0x1234;
for (int i = 0; i < 4; i++) {
@@ -56,10 +56,10 @@ TEST(S128Param) {
// Liftoff does not support any SIMD operations.
byte temp1 = r.AllocateLocal(kWasmS128);
WasmFunctionCompiler& simd_func = r.NewFunction(sigs.i_s());
- BUILD(simd_func, WASM_ONE);
+ simd_func.Build({WASM_ONE});
- BUILD(r,
- WASM_CALL_FUNCTION(simd_func.function_index(), WASM_LOCAL_GET(temp1)));
+ r.Build(
+ {WASM_CALL_FUNCTION(simd_func.function_index(), WASM_LOCAL_GET(temp1))});
CHECK_EQ(1, r.Call());
}
@@ -70,10 +70,10 @@ TEST(S128Return) {
TestSignatures sigs;
WasmFunctionCompiler& simd_func = r.NewFunction(sigs.s_i());
byte temp1 = simd_func.AllocateLocal(kWasmS128);
- BUILD(simd_func, WASM_LOCAL_GET(temp1));
+ simd_func.Build({WASM_LOCAL_GET(temp1)});
- BUILD(r, WASM_CALL_FUNCTION(simd_func.function_index(), WASM_ONE), kExprDrop,
- WASM_ONE);
+ r.Build({WASM_CALL_FUNCTION(simd_func.function_index(), WASM_ONE), kExprDrop,
+ WASM_ONE});
CHECK_EQ(1, r.Call());
}
@@ -89,11 +89,11 @@ TEST(REGRESS_1088273) {
TestSignatures sigs;
WasmFunctionCompiler& simd_func = r.NewFunction(sigs.s_i());
byte temp1 = simd_func.AllocateLocal(kWasmS128);
- BUILD(simd_func, WASM_LOCAL_GET(temp1));
+ simd_func.Build({WASM_LOCAL_GET(temp1)});
- BUILD(r, WASM_SIMD_SPLAT(I8x16, WASM_I32V(0x80)),
- WASM_SIMD_SPLAT(I8x16, WASM_I32V(0x92)),
- WASM_SIMD_I16x8_EXTRACT_LANE_U(0, WASM_SIMD_OP(kExprI64x2Mul)));
+ r.Build({WASM_SIMD_SPLAT(I8x16, WASM_I32V(0x80)),
+ WASM_SIMD_SPLAT(I8x16, WASM_I32V(0x92)),
+ WASM_SIMD_I16x8_EXTRACT_LANE_U(0, WASM_SIMD_OP(kExprI64x2Mul))});
CHECK_EQ(18688, r.Call());
}
@@ -125,13 +125,14 @@ TEST(I8x16Shuffle) {
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31}};
// Set up locals so shuffle is called with non-adjacent registers v2 and v0.
- BUILD(r, WASM_LOCAL_SET(local0, WASM_GLOBAL_GET(1)), // local0 is in v0
- WASM_LOCAL_SET(local1, WASM_GLOBAL_GET(0)), // local1 is in v1
- WASM_GLOBAL_GET(0), // global0 is in v2
- WASM_LOCAL_GET(local0), // local0 is in v0
- WASM_GLOBAL_SET(2, WASM_SIMD_I8x16_SHUFFLE_OP(
- kExprI8x16Shuffle, pattern, WASM_NOP, WASM_NOP)),
- WASM_ONE);
+ r.Build(
+ {WASM_LOCAL_SET(local0, WASM_GLOBAL_GET(1)), // local0 is in v0
+ WASM_LOCAL_SET(local1, WASM_GLOBAL_GET(0)), // local1 is in v1
+ WASM_GLOBAL_GET(0), // global0 is in v2
+ WASM_LOCAL_GET(local0), // local0 is in v0
+ WASM_GLOBAL_SET(2, WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, pattern,
+ WASM_NOP, WASM_NOP)),
+ WASM_ONE});
r.Call();
@@ -163,11 +164,12 @@ TEST(I8x16Shuffle_SingleOperand) {
{31, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}};
// Set up locals so shuffle is called with non-adjacent registers v2 and v0.
- BUILD(r, WASM_LOCAL_SET(local0, WASM_GLOBAL_GET(0)), WASM_LOCAL_GET(local0),
- WASM_LOCAL_GET(local0),
- WASM_GLOBAL_SET(1, WASM_SIMD_I8x16_SHUFFLE_OP(
- kExprI8x16Shuffle, pattern, WASM_NOP, WASM_NOP)),
- WASM_ONE);
+ r.Build(
+ {WASM_LOCAL_SET(local0, WASM_GLOBAL_GET(0)), WASM_LOCAL_GET(local0),
+ WASM_LOCAL_GET(local0),
+ WASM_GLOBAL_SET(1, WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, pattern,
+ WASM_NOP, WASM_NOP)),
+ WASM_ONE});
r.Call();
@@ -198,17 +200,41 @@ TEST(FillStackSlotsWithZero_CheckStartOffset) {
// remainder, 8 in this case, so we hit the case where we use str.
simd_func.AllocateLocal(kWasmS128);
simd_func.AllocateLocal(kWasmI64);
- BUILD(simd_func, WASM_I64V_1(1));
-
- BUILD(r, WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
- WASM_CALL_FUNCTION0(simd_func.function_index()));
+ simd_func.Build({WASM_I64V_1(1)});
+
+ r.Build({WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_I64V_1(1),
+ WASM_CALL_FUNCTION0(simd_func.function_index())});
CHECK_EQ(1, r.Call());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 2ada29591b..c353d8ec49 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -238,7 +238,7 @@ WASM_EXEC_TEST(S128Globals) {
// Set up a global to hold input and output vectors.
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(1, WASM_GLOBAL_GET(0)), WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(1, WASM_GLOBAL_GET(0)), WASM_ONE});
FOR_INT32_INPUTS(x) {
for (int i = 0; i < 4; i++) {
@@ -258,8 +258,8 @@ WASM_EXEC_TEST(F32x4Splat) {
// Set up a global to hold output vector.
float* g = r.builder().AddGlobal<float>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(param1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(param1))),
+ WASM_ONE});
FOR_FLOAT32_INPUTS(x) {
r.Call(x);
@@ -281,16 +281,16 @@ WASM_EXEC_TEST(F32x4ReplaceLane) {
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build function to replace each lane with its (FP) index.
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(3.14159f))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
- 0, WASM_LOCAL_GET(temp1), WASM_F32(0.0f))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
- 1, WASM_LOCAL_GET(temp1), WASM_F32(1.0f))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
- 2, WASM_LOCAL_GET(temp1), WASM_F32(2.0f))),
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(
- 3, WASM_LOCAL_GET(temp1), WASM_F32(3.0f))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(3.14159f))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(temp1), WASM_F32(0.0f))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(temp1), WASM_F32(1.0f))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
+ 2, WASM_LOCAL_GET(temp1), WASM_F32(2.0f))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(
+ 3, WASM_LOCAL_GET(temp1), WASM_F32(3.0f))),
+ WASM_ONE});
r.Call();
for (int i = 0; i < 4; i++) {
@@ -307,12 +307,12 @@ WASM_EXEC_TEST(F32x4ConvertI32x4) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4, WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(
- 1, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4,
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT32_INPUTS(x) {
r.Call(x);
@@ -344,15 +344,15 @@ void RunF128CompareOpConstImmTest(
WriteLittleEndianValue<FloatType>(
base::bit_cast<FloatType*>(&const_buffer[0]) + i, x);
}
- BUILD(r,
- WASM_LOCAL_SET(temp,
- WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(cmp_opcode, WASM_SIMD_CONSTANT(const_buffer),
- WASM_LOCAL_GET(temp))),
- WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(cmp_opcode, WASM_LOCAL_GET(temp),
- WASM_SIMD_CONSTANT(const_buffer))),
- WASM_ONE);
+ r.Build(
+ {WASM_LOCAL_SET(temp,
+ WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(cmp_opcode, WASM_SIMD_CONSTANT(const_buffer),
+ WASM_LOCAL_GET(temp))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(cmp_opcode, WASM_LOCAL_GET(temp),
+ WASM_SIMD_CONSTANT(const_buffer))),
+ WASM_ONE});
for (FloatType y : compiler::ValueHelper::GetVector<FloatType>()) {
if (!PlatformCanRepresent(y)) continue;
FloatType diff = x - y; // Model comparison as subtraction.
@@ -462,21 +462,22 @@ void RunShiftAddTestSequence(TestExecutionTier execution_tier,
auto expected_fn = [shift_fn](ScalarType x, ScalarType y, uint32_t imm) {
return base::AddWithWraparound(x, shift_fn(y, imm));
};
- BUILD(
- r,
- WASM_LOCAL_SET(temp1, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(param))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(param))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(add_opcode,
- WASM_SIMD_BINOP(shiftr_opcode,
- WASM_LOCAL_GET(temp2),
- WASM_I32V(imm)),
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(add_opcode, WASM_LOCAL_GET(temp1),
- WASM_SIMD_BINOP(shiftr_opcode,
- WASM_LOCAL_GET(temp2),
- WASM_I32V(imm)))),
-
- WASM_ONE);
+ r.Build(
+ {WASM_LOCAL_SET(temp1,
+ WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(param))),
+ WASM_LOCAL_SET(temp2,
+ WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(param))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(add_opcode,
+ WASM_SIMD_BINOP(shiftr_opcode,
+ WASM_LOCAL_GET(temp2),
+ WASM_I32V(imm)),
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(add_opcode, WASM_LOCAL_GET(temp1),
+ WASM_SIMD_BINOP(shiftr_opcode,
+ WASM_LOCAL_GET(temp2),
+ WASM_I32V(imm)))),
+
+ WASM_ONE});
for (ScalarType x : compiler::ValueHelper::GetVector<ScalarType>()) {
r.Call(x);
ScalarType expected = expected_fn(x, x, imm);
@@ -522,8 +523,8 @@ WASM_EXEC_TEST(I64x2Splat) {
// Set up a global to hold output vector.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(param1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(param1))),
+ WASM_ONE});
FOR_INT64_INPUTS(x) {
r.Call(x);
@@ -539,12 +540,11 @@ WASM_EXEC_TEST(I64x2ExtractLane) {
WasmRunner<int64_t> r(execution_tier);
r.AllocateLocal(kWasmI64);
r.AllocateLocal(kWasmS128);
- BUILD(
- r,
- WASM_LOCAL_SET(0, WASM_SIMD_I64x2_EXTRACT_LANE(
- 0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(0xFFFFFFFFFF)))),
- WASM_LOCAL_SET(1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
- WASM_SIMD_I64x2_EXTRACT_LANE(1, WASM_LOCAL_GET(1)));
+ r.Build({WASM_LOCAL_SET(
+ 0, WASM_SIMD_I64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(0xFFFFFFFFFF)))),
+ WASM_LOCAL_SET(1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_SIMD_I64x2_EXTRACT_LANE(1, WASM_LOCAL_GET(1))});
CHECK_EQ(0xFFFFFFFFFF, r.Call());
}
@@ -554,12 +554,12 @@ WASM_EXEC_TEST(I64x2ReplaceLane) {
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
- 0, WASM_LOCAL_GET(temp1), WASM_I64V(0))),
- WASM_GLOBAL_SET(0, WASM_SIMD_I64x2_REPLACE_LANE(
- 1, WASM_LOCAL_GET(temp1), WASM_I64V(1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(temp1), WASM_I64V(0))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_I64x2_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(temp1), WASM_I64V(1))),
+ WASM_ONE});
r.Call();
for (int64_t i = 0; i < 2; i++) {
@@ -651,15 +651,15 @@ void RunICompareOpConstImmTest(TestExecutionTier execution_tier,
WriteLittleEndianValue<ScalarType>(
base::bit_cast<ScalarType*>(&const_buffer[0]) + i, x);
}
- BUILD(r,
- WASM_LOCAL_SET(temp,
- WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(cmp_opcode, WASM_SIMD_CONSTANT(const_buffer),
- WASM_LOCAL_GET(temp))),
- WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(cmp_opcode, WASM_LOCAL_GET(temp),
- WASM_SIMD_CONSTANT(const_buffer))),
- WASM_ONE);
+ r.Build(
+ {WASM_LOCAL_SET(temp,
+ WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(cmp_opcode, WASM_SIMD_CONSTANT(const_buffer),
+ WASM_LOCAL_GET(temp))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(cmp_opcode, WASM_LOCAL_GET(temp),
+ WASM_SIMD_CONSTANT(const_buffer))),
+ WASM_ONE});
for (ScalarType y : compiler::ValueHelper::GetVector<ScalarType>()) {
r.Call(y);
ScalarType expected1 = expected_op(x, y);
@@ -709,8 +709,8 @@ WASM_EXEC_TEST(F64x2Splat) {
// Set up a global to hold output vector.
double* g = r.builder().AddGlobal<double>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(param1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(param1))),
+ WASM_ONE});
FOR_FLOAT64_INPUTS(x) {
r.Call(x);
@@ -731,12 +731,11 @@ WASM_EXEC_TEST(F64x2ExtractLane) {
byte param1 = 0;
byte temp1 = r.AllocateLocal(kWasmF64);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r,
- WASM_LOCAL_SET(temp1,
- WASM_SIMD_F64x2_EXTRACT_LANE(
- 0, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(param1)))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(temp1))),
- WASM_SIMD_F64x2_EXTRACT_LANE(1, WASM_LOCAL_GET(temp2)));
+ r.Build({WASM_LOCAL_SET(
+ temp1, WASM_SIMD_F64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(param1)))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(temp1))),
+ WASM_SIMD_F64x2_EXTRACT_LANE(1, WASM_LOCAL_GET(temp2))});
FOR_FLOAT64_INPUTS(x) {
double actual = r.Call(x);
double expected = x;
@@ -755,14 +754,14 @@ WASM_EXEC_TEST(F64x2ReplaceLane) {
double* g1 = r.builder().AddGlobal<double>(kWasmS128);
// Build function to replace each lane with its (FP) index.
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e100))),
- // Replace lane 0.
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_REPLACE_LANE(
- 0, WASM_LOCAL_GET(temp1), WASM_F64(0.0f))),
- // Replace lane 1.
- WASM_GLOBAL_SET(1, WASM_SIMD_F64x2_REPLACE_LANE(
- 1, WASM_LOCAL_GET(temp1), WASM_F64(1.0f))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e100))),
+ // Replace lane 0.
+ WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(temp1), WASM_F64(0.0f))),
+ // Replace lane 1.
+ WASM_GLOBAL_SET(1, WASM_SIMD_F64x2_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(temp1), WASM_F64(1.0f))),
+ WASM_ONE});
r.Call();
CHECK_EQ(0., LANE(g0, 0));
@@ -773,21 +772,21 @@ WASM_EXEC_TEST(F64x2ReplaceLane) {
WASM_EXEC_TEST(F64x2ExtractLaneWithI64x2) {
WasmRunner<int64_t> r(execution_tier);
- BUILD(r, WASM_IF_ELSE_L(
- WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE(
- 0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(1e15))),
- WASM_F64_REINTERPRET_I64(WASM_I64V(1e15))),
- WASM_I64V(1), WASM_I64V(0)));
+ r.Build({WASM_IF_ELSE_L(
+ WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(1e15))),
+ WASM_F64_REINTERPRET_I64(WASM_I64V(1e15))),
+ WASM_I64V(1), WASM_I64V(0))});
CHECK_EQ(1, r.Call());
}
WASM_EXEC_TEST(I64x2ExtractWithF64x2) {
WasmRunner<int64_t> r(execution_tier);
- BUILD(r, WASM_IF_ELSE_L(
- WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
- 0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
- WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
- WASM_I64V(1), WASM_I64V(0)));
+ r.Build(
+ {WASM_IF_ELSE_L(WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
+ WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
+ WASM_I64V(1), WASM_I64V(0))});
CHECK_EQ(1, r.Call());
}
@@ -824,15 +823,14 @@ void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
WasmOpcode opcode) {
WasmRunner<int32_t, SrcType> r(execution_tier);
double* g = r.builder().template AddGlobal<double>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0,
- WASM_SIMD_UNOP(
- opcode,
- // Set top lane of i64x2 == set top 2 lanes of i32x4.
- WASM_SIMD_I64x2_REPLACE_LANE(
- 1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)), WASM_ZERO64))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(
+ opcode,
+ // Set top lane of i64x2 == set top 2 lanes of i32x4.
+ WASM_SIMD_I64x2_REPLACE_LANE(
+ 1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
+ WASM_ZERO64))),
+ WASM_ONE});
for (SrcType x : compiler::ValueHelper::GetVector<SrcType>()) {
r.Call(x);
@@ -859,11 +857,9 @@ void RunI32x4TruncSatF64x2Test(TestExecutionTier execution_tier,
WasmOpcode opcode) {
WasmRunner<int32_t, double> r(execution_tier);
SrcType* g = r.builder().AddGlobal<SrcType>(kWasmS128);
- BUILD(
- r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(opcode, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_SIMD_F64x2_SPLAT(
+ WASM_LOCAL_GET(0)))),
+ WASM_ONE});
FOR_FLOAT64_INPUTS(x) {
r.Call(x);
@@ -892,11 +888,10 @@ WASM_EXEC_TEST(I32x4TruncSatF64x2UZero) {
WASM_EXEC_TEST(F32x4DemoteF64x2Zero) {
WasmRunner<int32_t, double> r(execution_tier);
float* g = r.builder().AddGlobal<float>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprF32x4DemoteF64x2Zero,
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF32x4DemoteF64x2Zero,
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_ONE});
FOR_FLOAT64_INPUTS(x) {
r.Call(x);
@@ -915,11 +910,10 @@ WASM_EXEC_TEST(F32x4DemoteF64x2Zero) {
WASM_EXEC_TEST(F64x2PromoteLowF32x4) {
WasmRunner<int32_t, float> r(execution_tier);
double* g = r.builder().AddGlobal<double>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_ONE});
FOR_FLOAT32_INPUTS(x) {
r.Call(x);
@@ -948,12 +942,11 @@ WASM_EXEC_TEST(F64x2PromoteLowF32x4WithS128Load64Zero) {
r.builder().WriteMemory(&memory[3], 8.0f);
// Load at 4 (index) + 4 (offset) bytes, which is 2 floats.
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
- WASM_SIMD_LOAD_OP_OFFSET(kExprS128Load64Zero,
- WASM_I32V(4), 4))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
+ WASM_SIMD_LOAD_OP_OFFSET(kExprS128Load64Zero,
+ WASM_I32V(4), 4))),
+ WASM_ONE});
r.Call();
CHECK_EQ(5.0f, LANE(g, 0));
@@ -965,12 +958,11 @@ WASM_EXEC_TEST(F64x2PromoteLowF32x4WithS128Load64Zero) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddGlobal<double>(kWasmS128);
r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
- WASM_SIMD_LOAD_OP(kExprS128Load64Zero,
- WASM_I32V(kWasmPageSize)))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
+ WASM_SIMD_LOAD_OP(
+ kExprS128Load64Zero,
+ WASM_I32V(kWasmPageSize)))),
+ WASM_ONE});
CHECK_TRAP(r.Call());
}
@@ -1071,8 +1063,8 @@ WASM_EXEC_TEST(I32x4Splat) {
// Set up a global to hold output vector.
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(param1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(param1))),
+ WASM_ONE});
FOR_INT32_INPUTS(x) {
r.Call(x);
@@ -1090,16 +1082,16 @@ WASM_EXEC_TEST(I32x4ReplaceLane) {
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(-1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
- 0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
- 1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
- 2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_I32x4_REPLACE_LANE(
- 3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(-1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_I32x4_REPLACE_LANE(
+ 3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
+ WASM_ONE});
r.Call();
for (int32_t i = 0; i < 4; i++) {
@@ -1112,8 +1104,8 @@ WASM_EXEC_TEST(I16x8Splat) {
// Set up a global to hold output vector.
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(param1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(param1))),
+ WASM_ONE});
FOR_INT16_INPUTS(x) {
r.Call(x);
@@ -1141,24 +1133,24 @@ WASM_EXEC_TEST(I16x8ReplaceLane) {
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_I32V(-1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 4, WASM_LOCAL_GET(temp1), WASM_I32V(4))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 5, WASM_LOCAL_GET(temp1), WASM_I32V(5))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
- 6, WASM_LOCAL_GET(temp1), WASM_I32V(6))),
- WASM_GLOBAL_SET(0, WASM_SIMD_I16x8_REPLACE_LANE(
- 7, WASM_LOCAL_GET(temp1), WASM_I32V(7))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_I32V(-1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 4, WASM_LOCAL_GET(temp1), WASM_I32V(4))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 5, WASM_LOCAL_GET(temp1), WASM_I32V(5))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 6, WASM_LOCAL_GET(temp1), WASM_I32V(6))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_I16x8_REPLACE_LANE(
+ 7, WASM_LOCAL_GET(temp1), WASM_I32V(7))),
+ WASM_ONE});
r.Call();
for (int16_t i = 0; i < 8; i++) {
@@ -1170,12 +1162,13 @@ WASM_EXEC_TEST(I8x16BitMask) {
WasmRunner<int32_t, int32_t> r(execution_tier);
byte value1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_REPLACE_LANE(
- 0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_REPLACE_LANE(
- 1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
- WASM_SIMD_UNOP(kExprI8x16BitMask, WASM_LOCAL_GET(value1)));
+ r.Build(
+ {WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
+ WASM_SIMD_UNOP(kExprI8x16BitMask, WASM_LOCAL_GET(value1))});
FOR_INT8_INPUTS(x) {
int32_t actual = r.Call(x);
@@ -1189,12 +1182,13 @@ WASM_EXEC_TEST(I16x8BitMask) {
WasmRunner<int32_t, int32_t> r(execution_tier);
byte value1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_REPLACE_LANE(
- 0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_REPLACE_LANE(
- 1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
- WASM_SIMD_UNOP(kExprI16x8BitMask, WASM_LOCAL_GET(value1)));
+ r.Build(
+ {WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
+ WASM_SIMD_UNOP(kExprI16x8BitMask, WASM_LOCAL_GET(value1))});
FOR_INT16_INPUTS(x) {
int32_t actual = r.Call(x);
@@ -1208,12 +1202,13 @@ WASM_EXEC_TEST(I32x4BitMask) {
WasmRunner<int32_t, int32_t> r(execution_tier);
byte value1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_REPLACE_LANE(
- 0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_REPLACE_LANE(
- 1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
- WASM_SIMD_UNOP(kExprI32x4BitMask, WASM_LOCAL_GET(value1)));
+ r.Build(
+ {WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(value1), WASM_I32V(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(value1), WASM_I32V(-1))),
+ WASM_SIMD_UNOP(kExprI32x4BitMask, WASM_LOCAL_GET(value1))});
FOR_INT32_INPUTS(x) {
int32_t actual = r.Call(x);
@@ -1227,10 +1222,11 @@ WASM_EXEC_TEST(I64x2BitMask) {
WasmRunner<int32_t, int64_t> r(execution_tier);
byte value1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(value1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
- WASM_LOCAL_SET(value1, WASM_SIMD_I64x2_REPLACE_LANE(
- 0, WASM_LOCAL_GET(value1), WASM_I64V_1(0))),
- WASM_SIMD_UNOP(kExprI64x2BitMask, WASM_LOCAL_GET(value1)));
+ r.Build(
+ {WASM_LOCAL_SET(value1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_LOCAL_SET(value1, WASM_SIMD_I64x2_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(value1), WASM_I64V_1(0))),
+ WASM_SIMD_UNOP(kExprI64x2BitMask, WASM_LOCAL_GET(value1))});
for (int64_t x : compiler::ValueHelper::GetVector<int64_t>()) {
int32_t actual = r.Call(x);
@@ -1245,8 +1241,8 @@ WASM_EXEC_TEST(I8x16Splat) {
// Set up a global to hold output vector.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(param1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(param1))),
+ WASM_ONE});
FOR_INT8_INPUTS(x) {
r.Call(x);
@@ -1274,40 +1270,40 @@ WASM_EXEC_TEST(I8x16ReplaceLane) {
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_I32V(-1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 4, WASM_LOCAL_GET(temp1), WASM_I32V(4))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 5, WASM_LOCAL_GET(temp1), WASM_I32V(5))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 6, WASM_LOCAL_GET(temp1), WASM_I32V(6))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 7, WASM_LOCAL_GET(temp1), WASM_I32V(7))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 8, WASM_LOCAL_GET(temp1), WASM_I32V(8))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 9, WASM_LOCAL_GET(temp1), WASM_I32V(9))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 10, WASM_LOCAL_GET(temp1), WASM_I32V(10))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 11, WASM_LOCAL_GET(temp1), WASM_I32V(11))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 12, WASM_LOCAL_GET(temp1), WASM_I32V(12))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 13, WASM_LOCAL_GET(temp1), WASM_I32V(13))),
- WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
- 14, WASM_LOCAL_GET(temp1), WASM_I32V(14))),
- WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_REPLACE_LANE(
- 15, WASM_LOCAL_GET(temp1), WASM_I32V(15))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_I32V(-1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 0, WASM_LOCAL_GET(temp1), WASM_I32V(0))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 1, WASM_LOCAL_GET(temp1), WASM_I32V(1))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 2, WASM_LOCAL_GET(temp1), WASM_I32V(2))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 3, WASM_LOCAL_GET(temp1), WASM_I32V(3))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 4, WASM_LOCAL_GET(temp1), WASM_I32V(4))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 5, WASM_LOCAL_GET(temp1), WASM_I32V(5))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 6, WASM_LOCAL_GET(temp1), WASM_I32V(6))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 7, WASM_LOCAL_GET(temp1), WASM_I32V(7))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 8, WASM_LOCAL_GET(temp1), WASM_I32V(8))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 9, WASM_LOCAL_GET(temp1), WASM_I32V(9))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 10, WASM_LOCAL_GET(temp1), WASM_I32V(10))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 11, WASM_LOCAL_GET(temp1), WASM_I32V(11))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 12, WASM_LOCAL_GET(temp1), WASM_I32V(12))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 13, WASM_LOCAL_GET(temp1), WASM_I32V(13))),
+ WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 14, WASM_LOCAL_GET(temp1), WASM_I32V(14))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_REPLACE_LANE(
+ 15, WASM_LOCAL_GET(temp1), WASM_I32V(15))),
+ WASM_ONE});
r.Call();
for (int8_t i = 0; i < 16; i++) {
@@ -1338,12 +1334,12 @@ WASM_EXEC_TEST(I32x4ConvertF32x4) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4, WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(
- 1, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4,
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
@@ -1368,16 +1364,16 @@ WASM_EXEC_TEST(I32x4ConvertI16x8) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
- WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT16_INPUTS(x) {
r.Call(x);
@@ -1403,16 +1399,16 @@ WASM_EXEC_TEST(I64x2ConvertI32x4) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4High,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4Low,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4High,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4Low,
- WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4High,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4Low,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4High,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4Low,
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT32_INPUTS(x) {
r.Call(x);
@@ -1448,11 +1444,10 @@ void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
WasmRunner<int32_t, Narrow, Narrow> r(execution_tier);
Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
- BUILD(r,
- WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, interleaving_shuffle,
- WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
- WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1))),
- WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE);
+ r.Build({WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, interleaving_shuffle,
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1))),
+ WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE});
auto v = compiler::ValueHelper::GetVector<Narrow>();
// Iterate vector from both ends to try and splat two different values.
@@ -1553,23 +1548,21 @@ void RunS128ConstBinOpTest(TestExecutionTier execution_tier,
}
switch (const_side) {
case kConstLeft:
- BUILD(
- r,
- WASM_LOCAL_SET(temp,
- WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(binop_opcode,
- WASM_SIMD_CONSTANT(const_buffer),
- WASM_LOCAL_GET(temp))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(
+ temp, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(binop_opcode,
+ WASM_SIMD_CONSTANT(const_buffer),
+ WASM_LOCAL_GET(temp))),
+ WASM_ONE});
break;
case kConstRight:
- BUILD(r,
- WASM_LOCAL_SET(
- temp, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(binop_opcode, WASM_LOCAL_GET(temp),
- WASM_SIMD_CONSTANT(const_buffer))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(
+ temp, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(binop_opcode, WASM_LOCAL_GET(temp),
+ WASM_SIMD_CONSTANT(const_buffer))),
+ WASM_ONE});
break;
}
for (ScalarType y : compiler::ValueHelper::GetVector<ScalarType>()) {
@@ -1733,16 +1726,16 @@ WASM_EXEC_TEST(I16x8ConvertI8x16) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
- WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(3, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT8_INPUTS(x) {
r.Call(x);
@@ -1766,14 +1759,14 @@ WASM_EXEC_TEST(I16x8ConvertI32x4) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(
- 1, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4,
+ WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4,
+ WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT32_INPUTS(x) {
r.Call(x);
@@ -1934,15 +1927,14 @@ void RunExtMulTest(TestExecutionTier execution_tier, WasmOpcode opcode,
int lane_to_zero = half == MulHalf::kLow ? 1 : 0;
T* g = r.builder().template AddGlobal<T>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(
- opcode,
- WASM_SIMD_I64x2_REPLACE_LANE(
- lane_to_zero, WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
- WASM_I64V_1(0)),
- WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1)))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(opcode,
+ WASM_SIMD_I64x2_REPLACE_LANE(
+ lane_to_zero,
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
+ WASM_I64V_1(0)),
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1)))),
+ WASM_ONE});
constexpr int lanes = kSimd128Size / sizeof(T);
for (S x : compiler::ValueHelper::GetVector<S>()) {
@@ -2039,14 +2031,14 @@ void RunExtMulAddOptimizationTest(TestExecutionTier execution_tier,
// add(
// splat(local[1]),
// extmul(splat(local[0]), splat(local[0])))
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(
- wide_add, WASM_SIMD_UNOP(wide_splat, WASM_LOCAL_GET(1)),
- WASM_SIMD_BINOP(
- ext_mul, WASM_SIMD_UNOP(narrow_splat, WASM_LOCAL_GET(0)),
- WASM_SIMD_UNOP(narrow_splat, WASM_LOCAL_GET(0))))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(
+ wide_add, WASM_SIMD_UNOP(wide_splat, WASM_LOCAL_GET(1)),
+ WASM_SIMD_BINOP(
+ ext_mul, WASM_SIMD_UNOP(narrow_splat, WASM_LOCAL_GET(0)),
+ WASM_SIMD_UNOP(narrow_splat, WASM_LOCAL_GET(0))))),
+ WASM_ONE});
constexpr int lanes = kSimd128Size / sizeof(T);
for (S x : compiler::ValueHelper::GetVector<S>()) {
@@ -2102,12 +2094,12 @@ WASM_EXEC_TEST(I32x4DotI16x8S) {
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI32x4DotI16x8S, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(kExprI32x4DotI16x8S, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
for (int16_t x : compiler::ValueHelper::GetVector<int16_t>()) {
for (int16_t y : compiler::ValueHelper::GetVector<int16_t>()) {
@@ -2159,10 +2151,10 @@ WASM_EXEC_TEST(I8x16Popcnt) {
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(kExprI8x16Popcnt, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprI8x16Popcnt, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_UINT8_INPUTS(x) {
r.Call(x);
@@ -2182,14 +2174,14 @@ WASM_EXEC_TEST(I8x16ConvertI16x8) {
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp1))),
- WASM_GLOBAL_SET(
- 1, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8,
+ WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8,
+ WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT16_INPUTS(x) {
r.Call(x);
@@ -2345,37 +2337,37 @@ WASM_EXEC_TEST(I8x16ShiftAdd) {
// Test Select by making a mask where the 0th and 3rd lanes are true and the
// rest false, and comparing for non-equality with zero to convert to a boolean
// vector.
-#define WASM_SIMD_SELECT_TEST(format) \
- WASM_EXEC_TEST(S##format##Select) { \
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier); \
- byte val1 = 0; \
- byte val2 = 1; \
- byte src1 = r.AllocateLocal(kWasmS128); \
- byte src2 = r.AllocateLocal(kWasmS128); \
- byte zero = r.AllocateLocal(kWasmS128); \
- byte mask = r.AllocateLocal(kWasmS128); \
- BUILD(r, \
- WASM_LOCAL_SET(src1, \
- WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val1))), \
- WASM_LOCAL_SET(src2, \
- WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val2))), \
- WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
- WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
- 1, WASM_LOCAL_GET(zero), WASM_I32V(-1))), \
- WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
- 2, WASM_LOCAL_GET(mask), WASM_I32V(-1))), \
- WASM_LOCAL_SET( \
- mask, \
- WASM_SIMD_SELECT( \
- format, WASM_LOCAL_GET(src1), WASM_LOCAL_GET(src2), \
- WASM_SIMD_BINOP(kExprI##format##Ne, WASM_LOCAL_GET(mask), \
- WASM_LOCAL_GET(zero)))), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 1), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 2), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE); \
- \
- CHECK_EQ(1, r.Call(0x12, 0x34)); \
+#define WASM_SIMD_SELECT_TEST(format) \
+ WASM_EXEC_TEST(S##format##Select) { \
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier); \
+ byte val1 = 0; \
+ byte val2 = 1; \
+ byte src1 = r.AllocateLocal(kWasmS128); \
+ byte src2 = r.AllocateLocal(kWasmS128); \
+ byte zero = r.AllocateLocal(kWasmS128); \
+ byte mask = r.AllocateLocal(kWasmS128); \
+ r.Build( \
+ {WASM_LOCAL_SET(src1, \
+ WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val1))), \
+ WASM_LOCAL_SET(src2, \
+ WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val2))), \
+ WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
+ WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
+ 1, WASM_LOCAL_GET(zero), WASM_I32V(-1))), \
+ WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
+ 2, WASM_LOCAL_GET(mask), WASM_I32V(-1))), \
+ WASM_LOCAL_SET( \
+ mask, \
+ WASM_SIMD_SELECT( \
+ format, WASM_LOCAL_GET(src1), WASM_LOCAL_GET(src2), \
+ WASM_SIMD_BINOP(kExprI##format##Ne, WASM_LOCAL_GET(mask), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 1), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val1, 2), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE}); \
+ \
+ CHECK_EQ(1, r.Call(0x12, 0x34)); \
}
WASM_SIMD_SELECT_TEST(32x4)
@@ -2384,35 +2376,35 @@ WASM_SIMD_SELECT_TEST(8x16)
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
// rest 0. The mask is not the result of a comparison op.
-#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
- WASM_EXEC_TEST(S##format##NonCanonicalSelect) { \
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier); \
- byte val1 = 0; \
- byte val2 = 1; \
- byte combined = 2; \
- byte src1 = r.AllocateLocal(kWasmS128); \
- byte src2 = r.AllocateLocal(kWasmS128); \
- byte zero = r.AllocateLocal(kWasmS128); \
- byte mask = r.AllocateLocal(kWasmS128); \
- BUILD(r, \
- WASM_LOCAL_SET(src1, \
- WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val1))), \
- WASM_LOCAL_SET(src2, \
- WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val2))), \
- WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
- WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
- 1, WASM_LOCAL_GET(zero), WASM_I32V(0xF))), \
- WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
- 2, WASM_LOCAL_GET(mask), WASM_I32V(0xF))), \
- WASM_LOCAL_SET(mask, WASM_SIMD_SELECT(format, WASM_LOCAL_GET(src1), \
- WASM_LOCAL_GET(src2), \
- WASM_LOCAL_GET(mask))), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 1), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 2), \
- WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE); \
- \
- CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
+#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
+ WASM_EXEC_TEST(S##format##NonCanonicalSelect) { \
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier); \
+ byte val1 = 0; \
+ byte val2 = 1; \
+ byte combined = 2; \
+ byte src1 = r.AllocateLocal(kWasmS128); \
+ byte src2 = r.AllocateLocal(kWasmS128); \
+ byte zero = r.AllocateLocal(kWasmS128); \
+ byte mask = r.AllocateLocal(kWasmS128); \
+ r.Build( \
+ {WASM_LOCAL_SET(src1, \
+ WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val1))), \
+ WASM_LOCAL_SET(src2, \
+ WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(val2))), \
+ WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
+ WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
+ 1, WASM_LOCAL_GET(zero), WASM_I32V(0xF))), \
+ WASM_LOCAL_SET(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
+ 2, WASM_LOCAL_GET(mask), WASM_I32V(0xF))), \
+ WASM_LOCAL_SET(mask, WASM_SIMD_SELECT(format, WASM_LOCAL_GET(src1), \
+ WASM_LOCAL_GET(src2), \
+ WASM_LOCAL_GET(mask))), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 0), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 1), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, combined, 2), \
+ WASM_SIMD_CHECK_LANE_S(I##format, mask, I32, val2, 3), WASM_ONE}); \
+ \
+ CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
}
WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
@@ -2434,16 +2426,14 @@ void RunBinaryLaneOpTest(
LANE(src1, i) = kElems + i;
}
if (simd_op == kExprI8x16Shuffle) {
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SHUFFLE_OP(simd_op, expected,
- WASM_GLOBAL_GET(0),
- WASM_GLOBAL_GET(1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SHUFFLE_OP(simd_op, expected,
+ WASM_GLOBAL_GET(0),
+ WASM_GLOBAL_GET(1))),
+ WASM_ONE});
} else {
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(simd_op, WASM_GLOBAL_GET(0),
- WASM_GLOBAL_GET(1))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(simd_op, WASM_GLOBAL_GET(0),
+ WASM_GLOBAL_GET(1))),
+ WASM_ONE});
}
CHECK_EQ(1, r.Call());
@@ -2658,11 +2648,11 @@ WASM_EXEC_TEST(I8x16ShuffleWithZeroInput) {
2, 0, 0, 0, 3, 0, 0, 0};
constexpr std::array<int8_t, 16> zeros = {0};
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SHUFFLE_OP(
- kExprI8x16Shuffle, shuffle,
- WASM_SIMD_CONSTANT(zeros), WASM_GLOBAL_GET(1))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, shuffle,
+ WASM_SIMD_CONSTANT(zeros),
+ WASM_GLOBAL_GET(1))),
+ WASM_ONE});
CHECK_EQ(1, r.Call());
for (int i = 0; i < kElems; i++) {
CHECK_EQ(LANE(dst, i), expected[i]);
@@ -2704,11 +2694,10 @@ WASM_EXEC_TEST(I8x16Swizzle) {
uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
uint8_t* src1 = r.builder().AddGlobal<uint8_t>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(0,
- WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
- WASM_GLOBAL_GET(2))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
+ WASM_GLOBAL_GET(2))),
+ WASM_ONE});
for (SwizzleTestArgs si : swizzle_test_vector) {
for (int i = 0; i < kElems; i++) {
@@ -2730,11 +2719,10 @@ WASM_EXEC_TEST(I8x16Swizzle) {
WasmRunner<int32_t> r(execution_tier);
uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
- WASM_SIMD_CONSTANT(si.indices))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
+ WASM_SIMD_CONSTANT(si.indices))),
+ WASM_ONE});
for (int i = 0; i < kSimd128Size; i++) {
LANE(src0, i) = si.input[i];
@@ -2858,75 +2846,76 @@ WASM_EXEC_TEST(S8x16MultiShuffleFuzz) {
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
// result. Use relational ops on numeric vectors to create the boolean vector
// test inputs. Test inputs with all true, all false, one true, and one false.
-#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
- WASM_EXEC_TEST(ReductionTest##lanes) { \
- WasmRunner<int32_t> r(execution_tier); \
- if (lanes == 2) return; \
- byte zero = r.AllocateLocal(kWasmS128); \
- byte one_one = r.AllocateLocal(kWasmS128); \
- byte reduced = r.AllocateLocal(kWasmI32); \
- BUILD(r, WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(int_type(0))), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
- WASM_SIMD_BINOP(kExprI##format##Eq, \
- WASM_LOCAL_GET(zero), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
- WASM_SIMD_BINOP(kExprI##format##Ne, \
- WASM_LOCAL_GET(zero), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
- WASM_SIMD_BINOP(kExprI##format##Eq, \
- WASM_LOCAL_GET(zero), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
- WASM_SIMD_BINOP(kExprI##format##Ne, \
- WASM_LOCAL_GET(zero), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET(one_one, \
- WASM_SIMD_I##format##_REPLACE_LANE( \
- lanes - 1, WASM_LOCAL_GET(zero), int_type(1))), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
- WASM_SIMD_BINOP(kExprI##format##Eq, \
- WASM_LOCAL_GET(one_one), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
- WASM_SIMD_BINOP(kExprI##format##Ne, \
- WASM_LOCAL_GET(one_one), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
- WASM_SIMD_BINOP(kExprI##format##Eq, \
- WASM_LOCAL_GET(one_one), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
- WASM_SIMD_BINOP(kExprI##format##Ne, \
- WASM_LOCAL_GET(one_one), \
- WASM_LOCAL_GET(zero)))), \
- WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN(WASM_ZERO)), \
- WASM_ONE); \
- CHECK_EQ(1, r.Call()); \
+#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
+ WASM_EXEC_TEST(ReductionTest##lanes) { \
+ WasmRunner<int32_t> r(execution_tier); \
+ if (lanes == 2) return; \
+ byte zero = r.AllocateLocal(kWasmS128); \
+ byte one_one = r.AllocateLocal(kWasmS128); \
+ byte reduced = r.AllocateLocal(kWasmI32); \
+ r.Build( \
+ {WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(int_type(0))), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Eq, \
+ WASM_LOCAL_GET(zero), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Ne, \
+ WASM_LOCAL_GET(zero), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Eq, \
+ WASM_LOCAL_GET(zero), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Ne, \
+ WASM_LOCAL_GET(zero), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET(one_one, \
+ WASM_SIMD_I##format##_REPLACE_LANE( \
+ lanes - 1, WASM_LOCAL_GET(zero), int_type(1))), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Eq, \
+ WASM_LOCAL_GET(one_one), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Ne, \
+ WASM_LOCAL_GET(one_one), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Eq, \
+ WASM_LOCAL_GET(one_one), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_LOCAL_SET( \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
+ WASM_SIMD_BINOP(kExprI##format##Ne, \
+ WASM_LOCAL_GET(one_one), \
+ WASM_LOCAL_GET(zero)))), \
+ WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
+ WASM_RETURN(WASM_ZERO)), \
+ WASM_ONE}); \
+ CHECK_EQ(1, r.Call()); \
}
WASM_SIMD_BOOL_REDUCTION_TEST(64x2, 2, WASM_I64V)
@@ -2936,21 +2925,21 @@ WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16, WASM_I32V)
WASM_EXEC_TEST(SimdI32x4ExtractWithF32x4) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_IF_ELSE_I(
- WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
- WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
- WASM_I32V(1), WASM_I32V(0)));
+ r.Build(
+ {WASM_IF_ELSE_I(WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
+ WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
+ WASM_I32V(1), WASM_I32V(0))});
CHECK_EQ(1, r.Call());
}
WASM_EXEC_TEST(SimdF32x4ExtractWithI32x4) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
- 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
- WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
- WASM_I32V(1), WASM_I32V(0)));
+ r.Build(
+ {WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
+ WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
+ WASM_I32V(1), WASM_I32V(0))});
CHECK_EQ(1, r.Call());
}
@@ -2958,11 +2947,10 @@ WASM_EXEC_TEST(SimdF32x4ExtractLane) {
WasmRunner<float> r(execution_tier);
r.AllocateLocal(kWasmF32);
r.AllocateLocal(kWasmS128);
- BUILD(r,
- WASM_LOCAL_SET(0, WASM_SIMD_F32x4_EXTRACT_LANE(
- 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5)))),
- WASM_LOCAL_SET(1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0))),
- WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)));
+ r.Build({WASM_LOCAL_SET(0, WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5)))),
+ WASM_LOCAL_SET(1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1))});
CHECK_EQ(30.5, r.Call());
}
@@ -2972,40 +2960,37 @@ WASM_EXEC_TEST(SimdF32x4AddWithI32x4) {
const int kOne = 0x3F800000;
const int kTwo = 0x40000000;
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_IF_ELSE_I(
- WASM_F32_EQ(
- WASM_SIMD_F32x4_EXTRACT_LANE(
- 0, WASM_SIMD_BINOP(kExprF32x4Add,
- WASM_SIMD_I32x4_SPLAT(WASM_I32V(kOne)),
- WASM_SIMD_I32x4_SPLAT(WASM_I32V(kTwo)))),
- WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(kOne)),
- WASM_F32_REINTERPRET_I32(WASM_I32V(kTwo)))),
- WASM_I32V(1), WASM_I32V(0)));
+ r.Build({WASM_IF_ELSE_I(
+ WASM_F32_EQ(
+ WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_BINOP(kExprF32x4Add,
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(kOne)),
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(kTwo)))),
+ WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(kOne)),
+ WASM_F32_REINTERPRET_I32(WASM_I32V(kTwo)))),
+ WASM_I32V(1), WASM_I32V(0))});
CHECK_EQ(1, r.Call());
}
WASM_EXEC_TEST(SimdI32x4AddWithF32x4) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_IF_ELSE_I(
- WASM_I32_EQ(
- WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_BINOP(kExprI32x4Add,
- WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
- WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
- WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
- WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
- WASM_I32V(1), WASM_I32V(0)));
+ r.Build({WASM_IF_ELSE_I(
+ WASM_I32_EQ(
+ WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_BINOP(kExprI32x4Add,
+ WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
+ WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
+ WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
+ WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
+ WASM_I32V(1), WASM_I32V(0))});
CHECK_EQ(1, r.Call());
}
WASM_EXEC_TEST(SimdI32x4Local) {
WasmRunner<int32_t> r(execution_tier);
r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
-
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOCAL_SET(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(0))});
CHECK_EQ(31, r.Call());
}
@@ -3013,11 +2998,10 @@ WASM_EXEC_TEST(SimdI32x4SplatFromExtract) {
WasmRunner<int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
- BUILD(r,
- WASM_LOCAL_SET(0, WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
- WASM_LOCAL_SET(1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0))),
- WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)));
+ r.Build({WASM_LOCAL_SET(0, WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
+ WASM_LOCAL_SET(1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1))});
CHECK_EQ(76, r.Call());
}
@@ -3025,33 +3009,32 @@ WASM_EXEC_TEST(SimdI32x4For) {
WasmRunner<int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
- BUILD(r,
-
- WASM_LOCAL_SET(1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
- WASM_LOCAL_SET(1, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_LOCAL_GET(1),
- WASM_I32V(53))),
- WASM_LOCAL_SET(1, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_LOCAL_GET(1),
- WASM_I32V(23))),
- WASM_LOCAL_SET(0, WASM_I32V(0)),
- WASM_LOOP(
- WASM_LOCAL_SET(
- 1, WASM_SIMD_BINOP(kExprI32x4Add, WASM_LOCAL_GET(1),
- WASM_SIMD_I32x4_SPLAT(WASM_I32V(1)))),
- WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(5)), WASM_BR(1))),
- WASM_LOCAL_SET(0, WASM_I32V(1)),
- WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(1)),
- WASM_I32V(36)),
- WASM_LOCAL_SET(0, WASM_I32V(0))),
- WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)),
- WASM_I32V(58)),
- WASM_LOCAL_SET(0, WASM_I32V(0))),
- WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_LOCAL_GET(1)),
- WASM_I32V(28)),
- WASM_LOCAL_SET(0, WASM_I32V(0))),
- WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_LOCAL_GET(1)),
- WASM_I32V(36)),
- WASM_LOCAL_SET(0, WASM_I32V(0))),
- WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_LOCAL_SET(1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
+ WASM_LOCAL_SET(1, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_LOCAL_GET(1),
+ WASM_I32V(53))),
+ WASM_LOCAL_SET(1, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_LOCAL_GET(1),
+ WASM_I32V(23))),
+ WASM_LOCAL_SET(0, WASM_I32V(0)),
+ WASM_LOOP(
+ WASM_LOCAL_SET(1,
+ WASM_SIMD_BINOP(kExprI32x4Add, WASM_LOCAL_GET(1),
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(1)))),
+ WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(5)), WASM_BR(1))),
+ WASM_LOCAL_SET(0, WASM_I32V(1)),
+ WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(1)),
+ WASM_I32V(36)),
+ WASM_LOCAL_SET(0, WASM_I32V(0))),
+ WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_LOCAL_GET(1)),
+ WASM_I32V(58)),
+ WASM_LOCAL_SET(0, WASM_I32V(0))),
+ WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_LOCAL_GET(1)),
+ WASM_I32V(28)),
+ WASM_LOCAL_SET(0, WASM_I32V(0))),
+ WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_LOCAL_GET(1)),
+ WASM_I32V(36)),
+ WASM_LOCAL_SET(0, WASM_I32V(0))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(1, r.Call());
}
@@ -3059,23 +3042,24 @@ WASM_EXEC_TEST(SimdF32x4For) {
WasmRunner<int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
- WASM_LOCAL_SET(1, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_LOCAL_GET(1),
- WASM_F32(19.5))),
- WASM_LOCAL_SET(0, WASM_I32V(0)),
- WASM_LOOP(
- WASM_LOCAL_SET(
- 1, WASM_SIMD_BINOP(kExprF32x4Add, WASM_LOCAL_GET(1),
- WASM_SIMD_F32x4_SPLAT(WASM_F32(2.0)))),
- WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(3)), WASM_BR(1))),
- WASM_LOCAL_SET(0, WASM_I32V(1)),
- WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(1)),
- WASM_F32(27.25)),
- WASM_LOCAL_SET(0, WASM_I32V(0))),
- WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_LOCAL_GET(1)),
- WASM_F32(25.5)),
- WASM_LOCAL_SET(0, WASM_I32V(0))),
- WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_LOCAL_SET(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
+ WASM_LOCAL_SET(1, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_LOCAL_GET(1),
+ WASM_F32(19.5))),
+ WASM_LOCAL_SET(0, WASM_I32V(0)),
+ WASM_LOOP(
+ WASM_LOCAL_SET(
+ 1, WASM_SIMD_BINOP(kExprF32x4Add, WASM_LOCAL_GET(1),
+ WASM_SIMD_F32x4_SPLAT(WASM_F32(2.0)))),
+ WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(3)), WASM_BR(1))),
+ WASM_LOCAL_SET(0, WASM_I32V(1)),
+ WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(1)),
+ WASM_F32(27.25)),
+ WASM_LOCAL_SET(0, WASM_I32V(0))),
+ WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_LOCAL_GET(1)),
+ WASM_F32(25.5)),
+ WASM_LOCAL_SET(0, WASM_I32V(0))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(1, r.Call());
}
@@ -3103,21 +3087,21 @@ WASM_EXEC_TEST(SimdI32x4GetGlobal) {
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
SetVectorByLanes(global, {{0, 1, 2, 3}});
r.AllocateLocal(kWasmI32);
- BUILD(
- r, WASM_LOCAL_SET(1, WASM_I32V(1)),
- WASM_IF(WASM_I32_NE(WASM_I32V(0),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GLOBAL_GET(4))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_IF(WASM_I32_NE(WASM_I32V(1),
- WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GLOBAL_GET(4))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_IF(WASM_I32_NE(WASM_I32V(2),
- WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GLOBAL_GET(4))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_IF(WASM_I32_NE(WASM_I32V(3),
- WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GLOBAL_GET(4))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_LOCAL_GET(1));
+ r.Build(
+ {WASM_LOCAL_SET(1, WASM_I32V(1)),
+ WASM_IF(WASM_I32_NE(WASM_I32V(0),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GLOBAL_GET(4))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_IF(WASM_I32_NE(WASM_I32V(1),
+ WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GLOBAL_GET(4))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_IF(WASM_I32_NE(WASM_I32V(2),
+ WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GLOBAL_GET(4))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_IF(WASM_I32_NE(WASM_I32V(3),
+ WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GLOBAL_GET(4))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_LOCAL_GET(1)});
CHECK_EQ(1, r.Call(0));
}
@@ -3129,14 +3113,14 @@ WASM_EXEC_TEST(SimdI32x4SetGlobal) {
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
- WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GLOBAL_GET(4),
- WASM_I32V(34))),
- WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GLOBAL_GET(4),
- WASM_I32V(45))),
- WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GLOBAL_GET(4),
- WASM_I32V(56))),
- WASM_I32V(1));
+ r.Build({WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
+ WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(
+ 1, WASM_GLOBAL_GET(4), WASM_I32V(34))),
+ WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(
+ 2, WASM_GLOBAL_GET(4), WASM_I32V(45))),
+ WASM_GLOBAL_SET(4, WASM_SIMD_I32x4_REPLACE_LANE(
+ 3, WASM_GLOBAL_GET(4), WASM_I32V(56))),
+ WASM_I32V(1)});
CHECK_EQ(1, r.Call(0));
CHECK_EQ(GetScalar(global, 0), 23);
CHECK_EQ(GetScalar(global, 1), 34);
@@ -3149,35 +3133,35 @@ WASM_EXEC_TEST(SimdF32x4GetGlobal) {
float* global = r.builder().AddGlobal<float>(kWasmS128);
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
r.AllocateLocal(kWasmI32);
- BUILD(
- r, WASM_LOCAL_SET(1, WASM_I32V(1)),
- WASM_IF(WASM_F32_NE(WASM_F32(0.0),
- WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GLOBAL_GET(0))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_IF(WASM_F32_NE(WASM_F32(1.5),
- WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GLOBAL_GET(0))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_IF(WASM_F32_NE(WASM_F32(2.25),
- WASM_SIMD_F32x4_EXTRACT_LANE(2, WASM_GLOBAL_GET(0))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_IF(WASM_F32_NE(WASM_F32(3.5),
- WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GLOBAL_GET(0))),
- WASM_LOCAL_SET(1, WASM_I32V(0))),
- WASM_LOCAL_GET(1));
+ r.Build(
+ {WASM_LOCAL_SET(1, WASM_I32V(1)),
+ WASM_IF(WASM_F32_NE(WASM_F32(0.0),
+ WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GLOBAL_GET(0))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_IF(WASM_F32_NE(WASM_F32(1.5),
+ WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GLOBAL_GET(0))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_IF(WASM_F32_NE(WASM_F32(2.25),
+ WASM_SIMD_F32x4_EXTRACT_LANE(2, WASM_GLOBAL_GET(0))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_IF(WASM_F32_NE(WASM_F32(3.5),
+ WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GLOBAL_GET(0))),
+ WASM_LOCAL_SET(1, WASM_I32V(0))),
+ WASM_LOCAL_GET(1)});
CHECK_EQ(1, r.Call(0));
}
WASM_EXEC_TEST(SimdF32x4SetGlobal) {
WasmRunner<int32_t, int32_t> r(execution_tier);
float* global = r.builder().AddGlobal<float>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GLOBAL_GET(0),
- WASM_F32(45.5))),
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(2, WASM_GLOBAL_GET(0),
- WASM_F32(32.25))),
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GLOBAL_GET(0),
- WASM_F32(65.0))),
- WASM_I32V(1));
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(
+ 1, WASM_GLOBAL_GET(0), WASM_F32(45.5))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(
+ 2, WASM_GLOBAL_GET(0), WASM_F32(32.25))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_REPLACE_LANE(
+ 3, WASM_GLOBAL_GET(0), WASM_F32(65.0))),
+ WASM_I32V(1)});
CHECK_EQ(1, r.Call(0));
CHECK_EQ(GetScalar(global, 0), 13.5f);
CHECK_EQ(GetScalar(global, 1), 45.5f);
@@ -3185,6 +3169,49 @@ WASM_EXEC_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
+WASM_EXEC_TEST(F32x4AddRevec) {
+ WasmRunner<float, int32_t, int32_t> r(execution_tier);
+ float* memory =
+ r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
+ byte param1 = 0;
+ byte param2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ byte temp3 = r.AllocateLocal(kWasmS128);
+ byte temp4 = r.AllocateLocal(kWasmS128);
+ byte temp5 = r.AllocateLocal(kWasmF32);
+ byte temp6 = r.AllocateLocal(kWasmF32);
+ constexpr byte offset = 16;
+
+ // Multiple a vector of F32x8 with a constant and store the result to another
+ // array
+ r.Build(
+ {WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(10.0f))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_LOAD_MEM(WASM_LOCAL_GET(param1))),
+ WASM_LOCAL_SET(temp3,
+ WASM_SIMD_BINOP(kExprF32x4Add, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_LOCAL_SET(
+ temp2, WASM_SIMD_LOAD_MEM_OFFSET(offset, WASM_LOCAL_GET(param1))),
+ WASM_LOCAL_SET(temp4,
+ WASM_SIMD_BINOP(kExprF32x4Add, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_STORE_MEM(WASM_LOCAL_GET(param2), WASM_LOCAL_GET(temp3)),
+ WASM_SIMD_STORE_MEM_OFFSET(offset, WASM_LOCAL_GET(param2),
+ WASM_LOCAL_GET(temp4)),
+ WASM_LOCAL_SET(temp5,
+ WASM_SIMD_F32x4_EXTRACT_LANE(
+ 1, WASM_SIMD_LOAD_MEM(WASM_LOCAL_GET(param2)))),
+ WASM_LOCAL_SET(temp6, WASM_SIMD_F32x4_EXTRACT_LANE(
+ 2, WASM_SIMD_LOAD_MEM_OFFSET(
+ offset, WASM_LOCAL_GET(param2)))),
+ WASM_BINOP(kExprF32Add, WASM_LOCAL_GET(temp5), WASM_LOCAL_GET(temp6))});
+
+ r.builder().WriteMemory(&memory[1], 1.0f);
+ r.builder().WriteMemory(&memory[6], 2.0f);
+ CHECK_EQ(23.0f, r.Call(0, 32));
+}
+
WASM_EXEC_TEST(SimdLoadStoreLoad) {
{
WasmRunner<int32_t> r(execution_tier);
@@ -3192,9 +3219,9 @@ WASM_EXEC_TEST(SimdLoadStoreLoad) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
// Load memory, store it, then reload it and extract the first lane. Use a
// non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
- BUILD(r,
- WASM_SIMD_STORE_MEM(WASM_I32V(8), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(8))));
+ r.Build(
+ {WASM_SIMD_STORE_MEM(WASM_I32V(8), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(8)))});
FOR_INT32_INPUTS(i) {
int32_t expected = i;
@@ -3207,8 +3234,8 @@ WASM_EXEC_TEST(SimdLoadStoreLoad) {
// OOB tests for loads.
WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_LOAD_MEM(WASM_LOCAL_GET(0))));
+ r.Build({WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_LOAD_MEM(WASM_LOCAL_GET(0)))});
for (uint32_t offset = kWasmPageSize - (kSimd128Size - 1);
offset < kWasmPageSize; ++offset) {
@@ -3220,9 +3247,9 @@ WASM_EXEC_TEST(SimdLoadStoreLoad) {
// OOB tests for stores.
WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r,
- WASM_SIMD_STORE_MEM(WASM_LOCAL_GET(0), WASM_SIMD_LOAD_MEM(WASM_ZERO)),
- WASM_ONE);
+ r.Build(
+ {WASM_SIMD_STORE_MEM(WASM_LOCAL_GET(0), WASM_SIMD_LOAD_MEM(WASM_ZERO)),
+ WASM_ONE});
for (uint32_t offset = kWasmPageSize - (kSimd128Size - 1);
offset < kWasmPageSize; ++offset) {
@@ -3241,12 +3268,11 @@ WASM_EXEC_TEST(SimdLoadStoreLoadMemargOffset) {
// Load from memory at offset_1, store to offset_2, load from offset_2, and
// extract first lane. We use non-zero memarg offsets to test offset
// decoding.
- BUILD(r,
- WASM_SIMD_STORE_MEM_OFFSET(
- offset_2, WASM_ZERO,
- WASM_SIMD_LOAD_MEM_OFFSET(offset_1, WASM_ZERO)),
- WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_LOAD_MEM_OFFSET(offset_2, WASM_ZERO)));
+ r.Build({WASM_SIMD_STORE_MEM_OFFSET(
+ offset_2, WASM_ZERO,
+ WASM_SIMD_LOAD_MEM_OFFSET(offset_1, WASM_ZERO)),
+ WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_LOAD_MEM_OFFSET(offset_2, WASM_ZERO))});
FOR_INT32_INPUTS(i) {
int32_t expected = i;
@@ -3262,8 +3288,8 @@ WASM_EXEC_TEST(SimdLoadStoreLoadMemargOffset) {
offset < kWasmPageSize; ++offset) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_LOAD_MEM_OFFSET(U32V_3(offset), WASM_ZERO)));
+ r.Build({WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_LOAD_MEM_OFFSET(U32V_3(offset), WASM_ZERO))});
CHECK_TRAP(r.Call());
}
}
@@ -3274,10 +3300,9 @@ WASM_EXEC_TEST(SimdLoadStoreLoadMemargOffset) {
offset < kWasmPageSize; ++offset) {
WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r,
- WASM_SIMD_STORE_MEM_OFFSET(U32V_3(offset), WASM_ZERO,
- WASM_SIMD_LOAD_MEM(WASM_ZERO)),
- WASM_ONE);
+ r.Build({WASM_SIMD_STORE_MEM_OFFSET(U32V_3(offset), WASM_ZERO,
+ WASM_SIMD_LOAD_MEM(WASM_ZERO)),
+ WASM_ONE});
CHECK_TRAP(r.Call(offset));
}
}
@@ -3291,11 +3316,10 @@ WASM_EXEC_TEST(S128Load8SplatOffset) {
WasmRunner<int32_t> r(execution_tier);
int8_t* memory = r.builder().AddMemoryElems<int8_t>(kWasmPageSize);
int8_t* global = r.builder().AddGlobal<int8_t>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_LOAD_OP_OFFSET(kExprS128Load8Splat, WASM_I32V(0),
- U32V_2(offset))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_LOAD_OP_OFFSET(kExprS128Load8Splat, WASM_I32V(0),
+ U32V_2(offset))),
+ WASM_ONE});
// We don't really care about all valid values, so just test for 1.
int8_t x = 7;
@@ -3314,8 +3338,8 @@ void RunLoadSplatTest(TestExecutionTier execution_tier, WasmOpcode op) {
WasmRunner<int32_t> r(execution_tier);
T* memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
T* global = r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
+ WASM_ONE});
for (T x : compiler::ValueHelper::GetVector<T>()) {
// 16-th byte in memory is lanes-th element (size T) of memory.
@@ -3333,8 +3357,8 @@ void RunLoadSplatTest(TestExecutionTier execution_tier, WasmOpcode op) {
r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
+ WASM_ONE});
// Load splats load sizeof(T) bytes.
for (uint32_t offset = kWasmPageSize - (sizeof(T) - 1);
@@ -3372,10 +3396,9 @@ void RunLoadExtendTest(TestExecutionTier execution_tier, WasmOpcode op) {
WasmRunner<int32_t> r(execution_tier);
S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
T* global = r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_ALIGNMENT(
- op, WASM_I32V(mem_index), alignment)),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_ALIGNMENT(
+ op, WASM_I32V(mem_index), alignment)),
+ WASM_ONE});
for (S x : compiler::ValueHelper::GetVector<S>()) {
for (int i = 0; i < lanes_s; i++) {
@@ -3395,9 +3418,9 @@ void RunLoadExtendTest(TestExecutionTier execution_tier, WasmOpcode op) {
S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
T* global = r.builder().AddGlobal<T>(kWasmS128);
constexpr byte offset = sizeof(S);
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, offset)),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, offset)),
+ WASM_ONE});
// Let max_s be the max_s value for type S, we set up the memory as such:
// memory = [max_s, max_s - 1, ... max_s - (lane_s - 1)].
@@ -3423,8 +3446,8 @@ void RunLoadExtendTest(TestExecutionTier execution_tier, WasmOpcode op) {
r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
+ WASM_ONE});
// Load extends load 8 bytes, so should trap from -7.
for (uint32_t offset = kWasmPageSize - 7; offset < kWasmPageSize;
@@ -3479,8 +3502,8 @@ void RunLoadZeroTest(TestExecutionTier execution_tier, WasmOpcode op) {
WasmRunner<int32_t> r(execution_tier);
std::tie(memory, global) = initialize_builder(&r);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
+ WASM_ONE});
r.Call();
// Only first lane is set to sentinel.
@@ -3496,10 +3519,9 @@ void RunLoadZeroTest(TestExecutionTier execution_tier, WasmOpcode op) {
WasmRunner<int32_t> r(execution_tier);
std::tie(memory, global) = initialize_builder(&r);
- BUILD(
- r,
- WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, mem_index)),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, mem_index)),
+ WASM_ONE});
r.Call();
// Only first lane is set to sentinel.
@@ -3516,8 +3538,8 @@ void RunLoadZeroTest(TestExecutionTier execution_tier, WasmOpcode op) {
r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
r.builder().AddGlobal<S>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_LOCAL_GET(0))),
+ WASM_ONE});
// Load extends load sizeof(S) bytes.
for (uint32_t offset = kWasmPageSize - (sizeof(S) - 1);
@@ -3543,7 +3565,7 @@ void RunLoadLaneTest(TestExecutionTier execution_tier, WasmOpcode load_op,
constexpr byte lanes_s = kSimd128Size / sizeof(T);
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
- constexpr int splat_value = 33;
+ constexpr byte splat_value = 33;
T sentinel = T{-1};
T* memory;
@@ -3557,9 +3579,9 @@ void RunLoadLaneTest(TestExecutionTier execution_tier, WasmOpcode load_op,
r.builder().WriteMemory(&memory[lanes_s], sentinel);
// Splat splat_value, then only load and replace a single lane with the
// sentinel value.
- BUILD(r, WASM_I32V(mem_index), const_op, splat_value,
- WASM_SIMD_OP(splat_op), WASM_SIMD_OP(load_op), alignment, offset,
- lane, kExprGlobalSet, 0, WASM_ONE);
+ r.Build({WASM_I32V(mem_index), const_op, splat_value,
+ WASM_SIMD_OP(splat_op), WASM_SIMD_OP(load_op), alignment, offset,
+ lane, kExprGlobalSet, 0, WASM_ONE});
};
auto check_results = [=](T* global, int sentinel_lane = 0) {
@@ -3602,9 +3624,9 @@ void RunLoadLaneTest(TestExecutionTier execution_tier, WasmOpcode load_op,
r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r, WASM_LOCAL_GET(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
- WASM_SIMD_OP(load_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, kExprGlobalSet,
- 0, WASM_ONE);
+ r.Build({WASM_LOCAL_GET(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
+ WASM_SIMD_OP(load_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0,
+ kExprGlobalSet, 0, WASM_ONE});
// Load lane load sizeof(T) bytes.
for (uint32_t index = kWasmPageSize - (sizeof(T) - 1);
@@ -3638,7 +3660,7 @@ void RunStoreLaneTest(TestExecutionTier execution_tier, WasmOpcode store_op,
WasmOpcode splat_op) {
constexpr byte lanes = kSimd128Size / sizeof(T);
constexpr int mem_index = 16; // Store to mem index 16 (bytes).
- constexpr int splat_value = 33;
+ constexpr byte splat_value = 33;
byte const_op = static_cast<byte>(
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const);
@@ -3648,9 +3670,9 @@ void RunStoreLaneTest(TestExecutionTier execution_tier, WasmOpcode store_op,
byte lane_index, byte alignment, byte offset) {
memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
// Splat splat_value, then only Store and replace a single lane.
- BUILD(r, WASM_I32V(mem_index), const_op, splat_value,
- WASM_SIMD_OP(splat_op), WASM_SIMD_OP(store_op), alignment, offset,
- lane_index, WASM_ONE);
+ r.Build({WASM_I32V(mem_index), const_op, splat_value,
+ WASM_SIMD_OP(splat_op), WASM_SIMD_OP(store_op), alignment, offset,
+ lane_index, WASM_ONE});
r.builder().BlankMemory();
};
@@ -3695,8 +3717,8 @@ void RunStoreLaneTest(TestExecutionTier execution_tier, WasmOpcode store_op,
WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
- BUILD(r, WASM_LOCAL_GET(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
- WASM_SIMD_OP(store_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, WASM_ONE);
+ r.Build({WASM_LOCAL_GET(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
+ WASM_SIMD_OP(store_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, WASM_ONE});
// StoreLane stores sizeof(T) bytes.
for (uint32_t index = kWasmPageSize - (sizeof(T) - 1);
@@ -3726,18 +3748,17 @@ WASM_EXEC_TEST(S128Store64Lane) {
kExprI64x2Splat);
}
-#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
- WASM_EXEC_TEST(S##format##AnyTrue) { \
- WasmRunner<int32_t, param_type> r(execution_tier); \
- if (lanes == 2) return; \
- byte simd = r.AllocateLocal(kWasmS128); \
- BUILD( \
- r, \
- WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
- WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd))); \
- CHECK_EQ(1, r.Call(max)); \
- CHECK_EQ(1, r.Call(5)); \
- CHECK_EQ(0, r.Call(0)); \
+#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
+ WASM_EXEC_TEST(S##format##AnyTrue) { \
+ WasmRunner<int32_t, param_type> r(execution_tier); \
+ if (lanes == 2) return; \
+ byte simd = r.AllocateLocal(kWasmS128); \
+ r.Build( \
+ {WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
+ WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd))}); \
+ CHECK_EQ(1, r.Call(max)); \
+ CHECK_EQ(1, r.Call(5)); \
+ CHECK_EQ(0, r.Call(0)); \
}
WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff, int32_t)
@@ -3749,24 +3770,23 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
WASM_EXEC_TEST(V128AnytrueWithNegativeZero) {
WasmRunner<int32_t, int64_t> r(execution_tier);
byte simd = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
- WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd)));
+ r.Build({WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd))});
CHECK_EQ(1, r.Call(0x8000000000000000));
CHECK_EQ(0, r.Call(0x0000000000000000));
}
-#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
- WASM_EXEC_TEST(I##format##AllTrue) { \
- WasmRunner<int32_t, param_type> r(execution_tier); \
- if (lanes == 2) return; \
- byte simd = r.AllocateLocal(kWasmS128); \
- BUILD( \
- r, \
- WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
- WASM_SIMD_UNOP(kExprI##format##AllTrue, WASM_LOCAL_GET(simd))); \
- CHECK_EQ(1, r.Call(max)); \
- CHECK_EQ(1, r.Call(0x1)); \
- CHECK_EQ(0, r.Call(0)); \
+#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
+ WASM_EXEC_TEST(I##format##AllTrue) { \
+ WasmRunner<int32_t, param_type> r(execution_tier); \
+ if (lanes == 2) return; \
+ byte simd = r.AllocateLocal(kWasmS128); \
+ r.Build( \
+ {WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
+ WASM_SIMD_UNOP(kExprI##format##AllTrue, WASM_LOCAL_GET(simd))}); \
+ CHECK_EQ(1, r.Call(max)); \
+ CHECK_EQ(1, r.Call(0x1)); \
+ CHECK_EQ(0, r.Call(0)); \
}
WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
@@ -3776,13 +3796,12 @@ WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
WASM_EXEC_TEST(BitSelect) {
WasmRunner<int32_t, int32_t> r(execution_tier);
byte simd = r.AllocateLocal(kWasmS128);
- BUILD(r,
- WASM_LOCAL_SET(
- simd,
- WASM_SIMD_SELECT(32x4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(0x01020304)),
- WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
- WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(simd)));
+ r.Build({WASM_LOCAL_SET(
+ simd, WASM_SIMD_SELECT(
+ 32x4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(0x01020304)),
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
+ WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_LOCAL_GET(simd))});
CHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
}
@@ -3791,7 +3810,7 @@ void RunSimdConstTest(TestExecutionTier execution_tier,
WasmRunner<uint32_t> r(execution_tier);
byte temp1 = r.AllocateLocal(kWasmS128);
uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(temp1, WASM_SIMD_CONSTANT(expected)), WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(temp1, WASM_SIMD_CONSTANT(expected)), WASM_ONE});
CHECK_EQ(1, r.Call());
for (size_t i = 0; i < expected.size(); i++) {
CHECK_EQ(LANE(src0, i), expected[i]);
@@ -3870,8 +3889,8 @@ WASM_EXEC_TEST(I16x8ExtractLaneU_I8x16Splat) {
// Test that we are correctly signed/unsigned extending when extracting.
WasmRunner<int32_t, int32_t> r(execution_tier);
byte simd_val = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(simd_val, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0))),
- WASM_SIMD_I16x8_EXTRACT_LANE_U(0, WASM_LOCAL_GET(simd_val)));
+ r.Build({WASM_LOCAL_SET(simd_val, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(0))),
+ WASM_SIMD_I16x8_EXTRACT_LANE_U(0, WASM_LOCAL_GET(simd_val))});
CHECK_EQ(0xfafa, r.Call(0xfa));
}
@@ -3897,24 +3916,22 @@ void RunAddExtAddPairwiseTest(
switch (extAddSide) {
case LEFT:
// x = add(extadd_pairwise_s(y), x)
- BUILD(r,
- WASM_GLOBAL_SET(
- 0,
- WASM_SIMD_BINOP(
- addOpcode, WASM_SIMD_UNOP(extAddOpcode, WASM_GLOBAL_GET(1)),
- WASM_GLOBAL_GET(0))),
-
- WASM_ONE);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(
+ addOpcode,
+ WASM_SIMD_UNOP(extAddOpcode, WASM_GLOBAL_GET(1)),
+ WASM_GLOBAL_GET(0))),
+
+ WASM_ONE});
break;
case RIGHT:
// x = add(x, extadd_pairwise_s(y))
- BUILD(r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_BINOP(
- addOpcode, WASM_GLOBAL_GET(0),
- WASM_SIMD_UNOP(extAddOpcode, WASM_GLOBAL_GET(1)))),
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(
+ addOpcode, WASM_GLOBAL_GET(0),
+ WASM_SIMD_UNOP(extAddOpcode, WASM_GLOBAL_GET(1)))),
- WASM_ONE);
+ WASM_ONE});
break;
}
r.Call();
@@ -3974,15 +3991,14 @@ WASM_EXEC_TEST(Regress_12237) {
byte value = 0;
byte temp = r.AllocateLocal(kWasmS128);
int64_t local = 123;
- BUILD(r,
- WASM_LOCAL_SET(temp,
- WASM_SIMD_OPN(kExprI64x2Splat, WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(
- 0,
- WASM_SIMD_BINOP(kExprI64x2GtS, WASM_LOCAL_GET(temp),
- WASM_SIMD_BINOP(kExprI64x2Sub, WASM_LOCAL_GET(temp),
- WASM_LOCAL_GET(temp)))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(
+ temp, WASM_SIMD_OPN(kExprI64x2Splat, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(
+ kExprI64x2GtS, WASM_LOCAL_GET(temp),
+ WASM_SIMD_BINOP(kExprI64x2Sub, WASM_LOCAL_GET(temp),
+ WASM_LOCAL_GET(temp)))),
+ WASM_ONE});
r.Call(local);
int64_t expected = Greater(local, local - local);
for (size_t i = 0; i < kSimd128Size / sizeof(int64_t); i++) {
@@ -3990,41 +4006,40 @@ WASM_EXEC_TEST(Regress_12237) {
}
}
-#define WASM_EXTRACT_I16x8_TEST(Sign, Type) \
- WASM_EXEC_TEST(I16X8ExtractLane##Sign) { \
- WasmRunner<int32_t, int32_t> r(execution_tier); \
- byte int_val = r.AllocateLocal(kWasmI32); \
- byte simd_val = r.AllocateLocal(kWasmS128); \
- BUILD(r, \
- WASM_LOCAL_SET(simd_val, \
- WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(int_val))), \
- WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 0), \
- WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 2), \
- WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 4), \
- WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 6), WASM_ONE); \
- FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
+#define WASM_EXTRACT_I16x8_TEST(Sign, Type) \
+ WASM_EXEC_TEST(I16X8ExtractLane##Sign) { \
+ WasmRunner<int32_t, int32_t> r(execution_tier); \
+ byte int_val = r.AllocateLocal(kWasmI32); \
+ byte simd_val = r.AllocateLocal(kWasmS128); \
+ r.Build({WASM_LOCAL_SET(simd_val, \
+ WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(int_val))), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 0), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 2), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 4), \
+ WASM_SIMD_CHECK_LANE_U(I16x8, simd_val, I32, int_val, 6), \
+ WASM_ONE}); \
+ FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
}
WASM_EXTRACT_I16x8_TEST(S, UINT16) WASM_EXTRACT_I16x8_TEST(I, INT16)
#undef WASM_EXTRACT_I16x8_TEST
-#define WASM_EXTRACT_I8x16_TEST(Sign, Type) \
- WASM_EXEC_TEST(I8x16ExtractLane##Sign) { \
- WasmRunner<int32_t, int32_t> r(execution_tier); \
- byte int_val = r.AllocateLocal(kWasmI32); \
- byte simd_val = r.AllocateLocal(kWasmS128); \
- BUILD(r, \
- WASM_LOCAL_SET(simd_val, \
- WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(int_val))), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 1), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 3), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 5), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 7), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 9), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 10), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 11), \
- WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 13), \
- WASM_ONE); \
- FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
+#define WASM_EXTRACT_I8x16_TEST(Sign, Type) \
+ WASM_EXEC_TEST(I8x16ExtractLane##Sign) { \
+ WasmRunner<int32_t, int32_t> r(execution_tier); \
+ byte int_val = r.AllocateLocal(kWasmI32); \
+ byte simd_val = r.AllocateLocal(kWasmS128); \
+ r.Build({WASM_LOCAL_SET(simd_val, \
+ WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(int_val))), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 1), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 3), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 5), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 7), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 9), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 10), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 11), \
+ WASM_SIMD_CHECK_LANE_U(I8x16, simd_val, I32, int_val, 13), \
+ WASM_ONE}); \
+ FOR_##Type##_INPUTS(x) { CHECK_EQ(1, r.Call(x)); } \
}
WASM_EXTRACT_I8x16_TEST(S, UINT8) WASM_EXTRACT_I8x16_TEST(I, INT8)
#undef WASM_EXTRACT_I8x16_TEST
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
index 4f9dae4333..c0d1bfb46d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
@@ -32,12 +32,12 @@ Handle<WasmInstanceObject> CompileModule(Zone* zone, Isolate* isolate,
return maybe_instance.ToHandleChecked();
}
-bool IsGeneric(CodeT wrapper) {
+bool IsGeneric(Code wrapper) {
return wrapper.is_builtin() &&
wrapper.builtin_id() == Builtin::kGenericJSToWasmWrapper;
}
-bool IsSpecific(CodeT wrapper) {
+bool IsSpecific(Code wrapper) {
return wrapper.kind() == CodeKind::JS_TO_WASM_FUNCTION;
}
@@ -161,7 +161,7 @@ TEST(WrapperReplacement) {
// Call the exported Wasm function as many times as required to almost
// exhaust the remaining budget for using the generic wrapper.
- Handle<CodeT> wrapper_before_call;
+ Handle<Code> wrapper_before_call;
for (int i = remaining_budget; i > 0; --i) {
// Verify that the wrapper to be used is the generic one.
wrapper_before_call = handle(main_function_data->wrapper_code(), isolate);
@@ -174,7 +174,7 @@ TEST(WrapperReplacement) {
}
// Get the wrapper-code object after the wrapper replacement.
- CodeT wrapper_after_call = main_function_data->wrapper_code();
+ Code wrapper_after_call = main_function_data->wrapper_code();
// Verify that the budget has been exhausted.
CHECK_EQ(main_function_data->wrapper_budget(), 0);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 9b967e239f..1cfb1bb1d8 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -31,7 +31,7 @@ WASM_EXEC_TEST(Int32Const) {
WasmRunner<int32_t> r(execution_tier);
const int32_t kExpectedValue = 0x11223344;
// return(kExpectedValue)
- BUILD(r, WASM_I32V_5(kExpectedValue));
+ r.Build({WASM_I32V_5(kExpectedValue)});
CHECK_EQ(kExpectedValue, r.Call());
}
@@ -40,7 +40,7 @@ WASM_EXEC_TEST(Int32Const_many) {
WasmRunner<int32_t> r(execution_tier);
const int32_t kExpectedValue = i;
// return(kExpectedValue)
- BUILD(r, WASM_I32V(kExpectedValue));
+ r.Build({WASM_I32V(kExpectedValue)});
CHECK_EQ(kExpectedValue, r.Call());
}
}
@@ -48,50 +48,50 @@ WASM_EXEC_TEST(Int32Const_many) {
WASM_EXEC_TEST(GraphTrimming) {
// This WebAssembly code requires graph trimming in the TurboFan compiler.
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, kExprLocalGet, 0, kExprLocalGet, 0, kExprLocalGet, 0, kExprI32RemS,
- kExprI32Eq, kExprLocalGet, 0, kExprI32DivS, kExprUnreachable);
+ r.Build({kExprLocalGet, 0, kExprLocalGet, 0, kExprLocalGet, 0, kExprI32RemS,
+ kExprI32Eq, kExprLocalGet, 0, kExprI32DivS, kExprUnreachable});
r.Call(1);
}
WASM_EXEC_TEST(Int32Param0) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// return(local[0])
- BUILD(r, WASM_LOCAL_GET(0));
+ r.Build({WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Int32Param0_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// local[0]
- BUILD(r, WASM_LOCAL_GET(0));
+ r.Build({WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Int32Param1) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// local[1]
- BUILD(r, WASM_LOCAL_GET(1));
+ r.Build({WASM_LOCAL_GET(1)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(-111, i)); }
}
WASM_EXEC_TEST(Int32Add) {
WasmRunner<int32_t> r(execution_tier);
// 11 + 44
- BUILD(r, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(44)));
+ r.Build({WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(44))});
CHECK_EQ(55, r.Call());
}
WASM_EXEC_TEST(Int32Add_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
- BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_ADD(WASM_I32V_1(13), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(i, 13), r.Call(i)); }
}
WASM_EXEC_TEST(Int32Add_P_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
- BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_ADD(WASM_I32V_1(13), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(i, 13), r.Call(i)); }
}
@@ -101,7 +101,7 @@ static void RunInt32AddTest(TestExecutionTier execution_tier, const byte* code,
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().AddSignature(sigs.ii_v());
r.builder().AddSignature(sigs.iii_v());
- r.Build(code, code + size);
+ r.Build(base::VectorOf(code, size));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(i) +
@@ -142,15 +142,16 @@ WASM_EXEC_TEST(Int32Add_multi_if) {
WASM_EXEC_TEST(Float32Add) {
WasmRunner<int32_t> r(execution_tier);
// int(11.5f + 44.5f)
- BUILD(r,
- WASM_I32_SCONVERT_F32(WASM_F32_ADD(WASM_F32(11.5f), WASM_F32(44.5f))));
+ r.Build(
+ {WASM_I32_SCONVERT_F32(WASM_F32_ADD(WASM_F32(11.5f), WASM_F32(44.5f)))});
CHECK_EQ(56, r.Call());
}
WASM_EXEC_TEST(Float64Add) {
WasmRunner<int32_t> r(execution_tier);
// return int(13.5d + 43.5d)
- BUILD(r, WASM_I32_SCONVERT_F64(WASM_F64_ADD(WASM_F64(13.5), WASM_F64(43.5))));
+ r.Build(
+ {WASM_I32_SCONVERT_F64(WASM_F64_ADD(WASM_F64(13.5), WASM_F64(43.5)))});
CHECK_EQ(57, r.Call());
}
@@ -163,14 +164,14 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(j) {
WasmRunner<ctype> r(execution_tier);
// Apply {opcode} on two constants.
- BUILD(r, WASM_BINOP(opcode, WASM_I32V(i), WASM_I32V(j)));
+ r.Build({WASM_BINOP(opcode, WASM_I32V(i), WASM_I32V(j))});
CHECK_EQ(expected(i, j), r.Call());
}
}
{
WasmRunner<ctype, ctype, ctype> r(execution_tier);
// Apply {opcode} on two parameters.
- BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
CHECK_EQ(expected(i, j), r.Call(i, j));
@@ -180,7 +181,7 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(i) {
WasmRunner<ctype, ctype> r(execution_tier);
// Apply {opcode} on constant and parameter.
- BUILD(r, WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0)));
+ r.Build({WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(j) {
CHECK_EQ(expected(i, j), r.Call(j));
}
@@ -188,7 +189,7 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(j) {
WasmRunner<ctype, ctype> r(execution_tier);
// Apply {opcode} on parameter and constant.
- BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j)));
+ r.Build({WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j))});
FOR_INT32_INPUTS(i) {
CHECK_EQ(expected(i, j), r.Call(i));
}
@@ -199,9 +200,9 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(i) {
WasmRunner<ctype, ctype> r(execution_tier);
// Apply {opcode} on constant and parameter, followed by {if}.
- BUILD(r, WASM_IF(WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0)),
+ r.Build({WASM_IF(WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0)),
WASM_RETURN(WASM_ONE)),
- WASM_ZERO);
+ WASM_ZERO});
FOR_INT32_INPUTS(j) {
CHECK_EQ(to_bool(expected(i, j)), r.Call(j));
}
@@ -209,9 +210,9 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(j) {
WasmRunner<ctype, ctype> r(execution_tier);
// Apply {opcode} on parameter and constant, followed by {if}.
- BUILD(r, WASM_IF(WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j)),
+ r.Build({WASM_IF(WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j)),
WASM_RETURN(WASM_ONE)),
- WASM_ZERO);
+ WASM_ZERO});
FOR_INT32_INPUTS(i) {
CHECK_EQ(to_bool(expected(i, j)), r.Call(i));
}
@@ -219,9 +220,9 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(i) {
WasmRunner<ctype, ctype> r(execution_tier);
// Apply {opcode} on constant and parameter, followed by {br_if}.
- BUILD(r, WASM_BR_IFD(0, WASM_ONE,
+ r.Build({WASM_BR_IFD(0, WASM_ONE,
WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0))),
- WASM_ZERO);
+ WASM_ZERO});
FOR_INT32_INPUTS(j) {
CHECK_EQ(to_bool(expected(i, j)), r.Call(j));
}
@@ -229,9 +230,9 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(j) {
WasmRunner<ctype, ctype> r(execution_tier);
// Apply {opcode} on parameter and constant, followed by {br_if}.
- BUILD(r, WASM_BR_IFD(0, WASM_ONE,
+ r.Build({WASM_BR_IFD(0, WASM_ONE,
WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j))),
- WASM_ZERO);
+ WASM_ZERO});
FOR_INT32_INPUTS(i) {
CHECK_EQ(to_bool(expected(i, j)), r.Call(i));
}
@@ -281,13 +282,13 @@ void TestInt32Unop(TestExecutionTier execution_tier, WasmOpcode opcode,
{
WasmRunner<int32_t> r(execution_tier);
// return op K
- BUILD(r, WASM_UNOP(opcode, WASM_I32V(a)));
+ r.Build({WASM_UNOP(opcode, WASM_I32V(a))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, int32_t> r(execution_tier);
// return op a
- BUILD(r, WASM_UNOP(opcode, WASM_LOCAL_GET(0)));
+ r.Build({WASM_UNOP(opcode, WASM_LOCAL_GET(0))});
CHECK_EQ(expected, r.Call(a));
}
}
@@ -383,7 +384,7 @@ WASM_EXEC_TEST(I32Eqz) {
WASM_EXEC_TEST(Int32DivS_trap) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I32_DIVS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
CHECK_TRAP(r.Call(100, 0));
@@ -394,7 +395,7 @@ WASM_EXEC_TEST(Int32DivS_trap) {
WASM_EXEC_TEST(Int32RemS_trap) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I32_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(33, r.Call(133, 100));
CHECK_EQ(0, r.Call(kMin, -1));
@@ -405,7 +406,7 @@ WASM_EXEC_TEST(Int32RemS_trap) {
WASM_EXEC_TEST(Int32DivU_trap) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_DIVU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I32_DIVU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
CHECK_EQ(0, r.Call(kMin, -1));
@@ -416,7 +417,7 @@ WASM_EXEC_TEST(Int32DivU_trap) {
WASM_EXEC_TEST(Int32RemU_trap) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_REMU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_I32_REMU(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(17, r.Call(217, 100));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_TRAP(r.Call(100, 0));
@@ -428,7 +429,7 @@ WASM_EXEC_TEST(Int32RemU_trap) {
WASM_EXEC_TEST(Int32DivS_byzero_const) {
for (int8_t denom = -2; denom < 8; ++denom) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_DIVS(WASM_LOCAL_GET(0), WASM_I32V_1(denom)));
+ r.Build({WASM_I32_DIVS(WASM_LOCAL_GET(0), WASM_I32V_1(denom))});
for (int32_t val = -7; val < 8; ++val) {
if (denom == 0) {
CHECK_TRAP(r.Call(val));
@@ -442,7 +443,7 @@ WASM_EXEC_TEST(Int32DivS_byzero_const) {
WASM_EXEC_TEST(Int32DivU_byzero_const) {
for (uint32_t denom = 0xFFFFFFFE; denom < 8; ++denom) {
WasmRunner<uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_I32_DIVU(WASM_LOCAL_GET(0), WASM_I32V_1(denom)));
+ r.Build({WASM_I32_DIVU(WASM_LOCAL_GET(0), WASM_I32V_1(denom))});
for (uint32_t val = 0xFFFFFFF0; val < 8; ++val) {
if (denom == 0) {
@@ -458,18 +459,16 @@ WASM_EXEC_TEST(Int32DivS_trap_effect) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_IF_ELSE_I(
- WASM_LOCAL_GET(0),
- WASM_I32_DIVS(
- WASM_BLOCK_I(WASM_STORE_MEM(MachineType::Int8(), WASM_ZERO,
- WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(1)),
- WASM_I32_DIVS(
- WASM_BLOCK_I(WASM_STORE_MEM(MachineType::Int8(), WASM_ZERO,
- WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(1))));
+ r.Build({WASM_IF_ELSE_I(
+ WASM_LOCAL_GET(0),
+ WASM_I32_DIVS(WASM_BLOCK_I(WASM_STORE_MEM(MachineType::Int8(), WASM_ZERO,
+ WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(1)),
+ WASM_I32_DIVS(WASM_BLOCK_I(WASM_STORE_MEM(MachineType::Int8(), WASM_ZERO,
+ WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(1)))});
CHECK_EQ(0, r.Call(0, 100));
CHECK_TRAP(r.Call(8, 0));
CHECK_TRAP(r.Call(4, 0));
@@ -481,13 +480,13 @@ void TestFloat32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
{
WasmRunner<int32_t> r(execution_tier);
// return K op K
- BUILD(r, WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b)));
+ r.Build({WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, float, float> r(execution_tier);
// return a op b
- BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(expected, r.Call(a, b));
}
}
@@ -498,15 +497,15 @@ void TestFloat32BinopWithConvert(TestExecutionTier execution_tier,
{
WasmRunner<int32_t> r(execution_tier);
// return int(K op K)
- BUILD(r,
- WASM_I32_SCONVERT_F32(WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b))));
+ r.Build(
+ {WASM_I32_SCONVERT_F32(WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b)))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, float, float> r(execution_tier);
// return int(a op b)
- BUILD(r, WASM_I32_SCONVERT_F32(
- WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_SCONVERT_F32(
+ WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
CHECK_EQ(expected, r.Call(a, b));
}
}
@@ -516,13 +515,13 @@ void TestFloat32UnopWithConvert(TestExecutionTier execution_tier,
{
WasmRunner<int32_t> r(execution_tier);
// return int(op(K))
- BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_F32(a))));
+ r.Build({WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_F32(a)))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, float> r(execution_tier);
// return int(op(a))
- BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_LOCAL_GET(0))));
+ r.Build({WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_LOCAL_GET(0)))});
CHECK_EQ(expected, r.Call(a));
}
}
@@ -532,13 +531,13 @@ void TestFloat64Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
{
WasmRunner<int32_t> r(execution_tier);
// return K op K
- BUILD(r, WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b)));
+ r.Build({WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, double, double> r(execution_tier);
// return a op b
- BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
CHECK_EQ(expected, r.Call(a, b));
}
}
@@ -549,14 +548,14 @@ void TestFloat64BinopWithConvert(TestExecutionTier execution_tier,
{
WasmRunner<int32_t> r(execution_tier);
// return int(K op K)
- BUILD(r,
- WASM_I32_SCONVERT_F64(WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b))));
+ r.Build(
+ {WASM_I32_SCONVERT_F64(WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b)))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, double, double> r(execution_tier);
- BUILD(r, WASM_I32_SCONVERT_F64(
- WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_I32_SCONVERT_F64(
+ WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
CHECK_EQ(expected, r.Call(a, b));
}
}
@@ -566,13 +565,13 @@ void TestFloat64UnopWithConvert(TestExecutionTier execution_tier,
{
WasmRunner<int32_t> r(execution_tier);
// return int(op(K))
- BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_F64(a))));
+ r.Build({WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_F64(a)))});
CHECK_EQ(expected, r.Call());
}
{
WasmRunner<int32_t, double> r(execution_tier);
// return int(op(a))
- BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_LOCAL_GET(0))));
+ r.Build({WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_LOCAL_GET(0)))});
CHECK_EQ(expected, r.Call(a));
}
}
@@ -622,7 +621,7 @@ WASM_EXEC_TEST(Float64Unops) {
WASM_EXEC_TEST(Float32Neg) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_NEG(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_NEG(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
CHECK_EQ(0x80000000,
@@ -632,7 +631,7 @@ WASM_EXEC_TEST(Float32Neg) {
WASM_EXEC_TEST(Float64Neg) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_NEG(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_NEG(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
CHECK_EQ(0x8000000000000000,
@@ -643,9 +642,9 @@ WASM_EXEC_TEST(Float64Neg) {
WASM_EXEC_TEST(IfElse_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// if (p0) return 11; else return 22;
- BUILD(r, WASM_IF_ELSE_I(WASM_LOCAL_GET(0), // --
- WASM_I32V_1(11), // --
- WASM_I32V_1(22))); // --
+ r.Build({WASM_IF_ELSE_I(WASM_LOCAL_GET(0), // --
+ WASM_I32V_1(11), // --
+ WASM_I32V_1(22))}); // --
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 11 : 22;
CHECK_EQ(expected, r.Call(i));
@@ -654,45 +653,45 @@ WASM_EXEC_TEST(IfElse_P) {
WASM_EXEC_TEST(If_empty1) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_LOCAL_GET(0), kExprIf, kVoidCode, kExprEnd, WASM_LOCAL_GET(1));
+ r.Build({WASM_LOCAL_GET(0), kExprIf, kVoidCode, kExprEnd, WASM_LOCAL_GET(1)});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 9, i)); }
}
WASM_EXEC_TEST(IfElse_empty1) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_LOCAL_GET(0), kExprIf, kVoidCode, kExprElse, kExprEnd,
- WASM_LOCAL_GET(1));
+ r.Build({WASM_LOCAL_GET(0), kExprIf, kVoidCode, kExprElse, kExprEnd,
+ WASM_LOCAL_GET(1)});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 8, i)); }
}
WASM_EXEC_TEST(IfElse_empty2) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_LOCAL_GET(0), kExprIf, kVoidCode, WASM_NOP, kExprElse, kExprEnd,
- WASM_LOCAL_GET(1));
+ r.Build({WASM_LOCAL_GET(0), kExprIf, kVoidCode, WASM_NOP, kExprElse, kExprEnd,
+ WASM_LOCAL_GET(1)});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 7, i)); }
}
WASM_EXEC_TEST(IfElse_empty3) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_LOCAL_GET(0), kExprIf, kVoidCode, kExprElse, WASM_NOP, kExprEnd,
- WASM_LOCAL_GET(1));
+ r.Build({WASM_LOCAL_GET(0), kExprIf, kVoidCode, kExprElse, WASM_NOP, kExprEnd,
+ WASM_LOCAL_GET(1)});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 6, i)); }
}
WASM_EXEC_TEST(If_chain1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// if (p0) 13; if (p0) 14; 15
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_NOP),
- WASM_IF(WASM_LOCAL_GET(0), WASM_NOP), WASM_I32V_1(15));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_NOP),
+ WASM_IF(WASM_LOCAL_GET(0), WASM_NOP), WASM_I32V_1(15)});
FOR_INT32_INPUTS(i) { CHECK_EQ(15, r.Call(i)); }
}
WASM_EXEC_TEST(If_chain_set) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// if (p0) p1 = 73; if (p0) p1 = 74; p1
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_LOCAL_SET(1, WASM_I32V_2(73))),
- WASM_IF(WASM_LOCAL_GET(0), WASM_LOCAL_SET(1, WASM_I32V_2(74))),
- WASM_LOCAL_GET(1));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_LOCAL_SET(1, WASM_I32V_2(73))),
+ WASM_IF(WASM_LOCAL_GET(0), WASM_LOCAL_SET(1, WASM_I32V_2(74))),
+ WASM_LOCAL_GET(1)});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 74 : i;
CHECK_EQ(expected, r.Call(i, i));
@@ -702,39 +701,39 @@ WASM_EXEC_TEST(If_chain_set) {
WASM_EXEC_TEST(IfElse_Unreachable1) {
WasmRunner<int32_t> r(execution_tier);
// 0 ? unreachable : 27
- BUILD(r, WASM_IF_ELSE_I(WASM_ZERO, // --
- WASM_UNREACHABLE, // --
- WASM_I32V_1(27))); // --
+ r.Build({WASM_IF_ELSE_I(WASM_ZERO, // --
+ WASM_UNREACHABLE, // --
+ WASM_I32V_1(27))}); // --
CHECK_EQ(27, r.Call());
}
WASM_EXEC_TEST(IfElse_Unreachable2) {
WasmRunner<int32_t> r(execution_tier);
// 1 ? 28 : unreachable
- BUILD(r, WASM_IF_ELSE_I(WASM_I32V_1(1), // --
- WASM_I32V_1(28), // --
- WASM_UNREACHABLE)); // --
+ r.Build({WASM_IF_ELSE_I(WASM_I32V_1(1), // --
+ WASM_I32V_1(28), // --
+ WASM_UNREACHABLE)}); // --
CHECK_EQ(28, r.Call());
}
WASM_EXEC_TEST(Return12) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, RET_I8(12));
+ r.Build({RET_I8(12)});
CHECK_EQ(12, r.Call());
}
WASM_EXEC_TEST(Return17) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK(RET_I8(17)), WASM_ZERO);
+ r.Build({WASM_BLOCK(RET_I8(17)), WASM_ZERO});
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(Return_I32) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, RET(WASM_LOCAL_GET(0)));
+ r.Build({RET(WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
@@ -742,7 +741,7 @@ WASM_EXEC_TEST(Return_I32) {
WASM_EXEC_TEST(Return_F32) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, RET(WASM_LOCAL_GET(0)));
+ r.Build({RET(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
float expect = i;
@@ -758,7 +757,7 @@ WASM_EXEC_TEST(Return_F32) {
WASM_EXEC_TEST(Return_F64) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, RET(WASM_LOCAL_GET(0)));
+ r.Build({RET(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
double expect = i;
@@ -773,8 +772,8 @@ WASM_EXEC_TEST(Return_F64) {
WASM_EXEC_TEST(Select_float_parameters) {
WasmRunner<float, float, float, int32_t> r(execution_tier);
- BUILD(r,
- WASM_SELECT(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)));
+ r.Build(
+ {WASM_SELECT(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2))});
CHECK_FLOAT_EQ(2.0f, r.Call(2.0f, 1.0f, 1));
}
@@ -788,10 +787,10 @@ WASM_EXEC_TEST(Select_s128_parameters) {
LANE(g0, i) = i;
LANE(g1, i) = i + 4;
}
- BUILD(r,
- WASM_GLOBAL_SET(2, WASM_SELECT(WASM_GLOBAL_GET(0), WASM_GLOBAL_GET(1),
- WASM_LOCAL_GET(0))),
- WASM_ONE);
+ r.Build(
+ {WASM_GLOBAL_SET(2, WASM_SELECT(WASM_GLOBAL_GET(0), WASM_GLOBAL_GET(1),
+ WASM_LOCAL_GET(0))),
+ WASM_ONE});
r.Call(1);
for (int i = 0; i < 4; i++) {
CHECK_EQ(i, LANE(output, i));
@@ -800,15 +799,15 @@ WASM_EXEC_TEST(Select_s128_parameters) {
WASM_EXEC_TEST(SelectWithType_float_parameters) {
WasmRunner<float, float, float, int32_t> r(execution_tier);
- BUILD(r,
- WASM_SELECT_F(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2)));
+ r.Build(
+ {WASM_SELECT_F(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2))});
CHECK_FLOAT_EQ(2.0f, r.Call(2.0f, 1.0f, 1));
}
WASM_EXEC_TEST(Select) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// return select(11, 22, a);
- BUILD(r, WASM_SELECT(WASM_I32V_1(11), WASM_I32V_1(22), WASM_LOCAL_GET(0)));
+ r.Build({WASM_SELECT(WASM_I32V_1(11), WASM_I32V_1(22), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 11 : 22;
CHECK_EQ(expected, r.Call(i));
@@ -818,7 +817,7 @@ WASM_EXEC_TEST(Select) {
WASM_EXEC_TEST(SelectWithType) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// return select(11, 22, a);
- BUILD(r, WASM_SELECT_I(WASM_I32V_1(11), WASM_I32V_1(22), WASM_LOCAL_GET(0)));
+ r.Build({WASM_SELECT_I(WASM_I32V_1(11), WASM_I32V_1(22), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 11 : 22;
CHECK_EQ(expected, r.Call(i));
@@ -828,22 +827,20 @@ WASM_EXEC_TEST(SelectWithType) {
WASM_EXEC_TEST(Select_strict1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// select(a=0, a=1, a=2); return a
- BUILD(r,
- WASM_SELECT(WASM_LOCAL_TEE(0, WASM_ZERO),
- WASM_LOCAL_TEE(0, WASM_I32V_1(1)),
- WASM_LOCAL_TEE(0, WASM_I32V_1(2))),
- WASM_DROP, WASM_LOCAL_GET(0));
+ r.Build({WASM_SELECT(WASM_LOCAL_TEE(0, WASM_ZERO),
+ WASM_LOCAL_TEE(0, WASM_I32V_1(1)),
+ WASM_LOCAL_TEE(0, WASM_I32V_1(2))),
+ WASM_DROP, WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(i)); }
}
WASM_EXEC_TEST(SelectWithType_strict1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// select(a=0, a=1, a=2); return a
- BUILD(r,
- WASM_SELECT_I(WASM_LOCAL_TEE(0, WASM_ZERO),
- WASM_LOCAL_TEE(0, WASM_I32V_1(1)),
- WASM_LOCAL_TEE(0, WASM_I32V_1(2))),
- WASM_DROP, WASM_LOCAL_GET(0));
+ r.Build({WASM_SELECT_I(WASM_LOCAL_TEE(0, WASM_ZERO),
+ WASM_LOCAL_TEE(0, WASM_I32V_1(1)),
+ WASM_LOCAL_TEE(0, WASM_I32V_1(2))),
+ WASM_DROP, WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(i)); }
}
@@ -852,8 +849,8 @@ WASM_EXEC_TEST(Select_strict2) {
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
// select(b=5, c=6, a)
- BUILD(r, WASM_SELECT(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
- WASM_LOCAL_TEE(2, WASM_I32V_1(6)), WASM_LOCAL_GET(0)));
+ r.Build({WASM_SELECT(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
+ WASM_LOCAL_TEE(2, WASM_I32V_1(6)), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 5 : 6;
CHECK_EQ(expected, r.Call(i));
@@ -865,8 +862,9 @@ WASM_EXEC_TEST(SelectWithType_strict2) {
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
// select(b=5, c=6, a)
- BUILD(r, WASM_SELECT_I(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
- WASM_LOCAL_TEE(2, WASM_I32V_1(6)), WASM_LOCAL_GET(0)));
+ r.Build(
+ {WASM_SELECT_I(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
+ WASM_LOCAL_TEE(2, WASM_I32V_1(6)), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 5 : 6;
CHECK_EQ(expected, r.Call(i));
@@ -878,9 +876,9 @@ WASM_EXEC_TEST(Select_strict3) {
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
// select(b=5, c=6, a=b)
- BUILD(r, WASM_SELECT(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
+ r.Build({WASM_SELECT(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
WASM_LOCAL_TEE(2, WASM_I32V_1(6)),
- WASM_LOCAL_TEE(0, WASM_LOCAL_GET(1))));
+ WASM_LOCAL_TEE(0, WASM_LOCAL_GET(1)))});
FOR_INT32_INPUTS(i) {
int32_t expected = 5;
CHECK_EQ(expected, r.Call(i));
@@ -892,9 +890,9 @@ WASM_EXEC_TEST(SelectWithType_strict3) {
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
// select(b=5, c=6, a=b)
- BUILD(r, WASM_SELECT_I(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
+ r.Build({WASM_SELECT_I(WASM_LOCAL_TEE(1, WASM_I32V_1(5)),
WASM_LOCAL_TEE(2, WASM_I32V_1(6)),
- WASM_LOCAL_TEE(0, WASM_LOCAL_GET(1))));
+ WASM_LOCAL_TEE(0, WASM_LOCAL_GET(1)))});
FOR_INT32_INPUTS(i) {
int32_t expected = 5;
CHECK_EQ(expected, r.Call(i));
@@ -903,18 +901,18 @@ WASM_EXEC_TEST(SelectWithType_strict3) {
WASM_EXEC_TEST(BrIf_strict) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_LOCAL_GET(0),
- WASM_LOCAL_TEE(0, WASM_I32V_2(99)))));
+ r.Build({WASM_BLOCK_I(
+ WASM_BRV_IF(0, WASM_LOCAL_GET(0), WASM_LOCAL_TEE(0, WASM_I32V_2(99))))});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Br_height) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(
- WASM_BLOCK(WASM_BRV_IFD(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)),
- WASM_RETURN(WASM_I32V_1(9))),
- WASM_BRV(0, WASM_I32V_1(8))));
+ r.Build({WASM_BLOCK_I(
+ WASM_BLOCK(WASM_BRV_IFD(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)),
+ WASM_RETURN(WASM_I32V_1(9))),
+ WASM_BRV(0, WASM_I32V_1(8)))});
for (int32_t i = 0; i < 5; i++) {
int32_t expected = i != 0 ? 8 : 9;
@@ -925,33 +923,32 @@ WASM_EXEC_TEST(Br_height) {
WASM_EXEC_TEST(Regression_660262) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, kExprI32Const, 0x00, kExprI32Const, 0x00, kExprI32LoadMem, 0x00,
- 0x0F, kExprBrTable, 0x00, 0x80, 0x00); // entries=0
+ r.Build({kExprI32Const, 0x00, kExprI32Const, 0x00, kExprI32LoadMem, 0x00,
+ 0x0F, kExprBrTable, 0x00, 0x80, 0x00}); // entries=0
r.Call();
}
WASM_EXEC_TEST(BrTable0a) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, B1(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 0, BR_TARGET(0)))),
- WASM_I32V_2(91));
+ r.Build({B1(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 0, BR_TARGET(0)))),
+ WASM_I32V_2(91)});
FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(i)); }
}
WASM_EXEC_TEST(BrTable0b) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- B1(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 1, BR_TARGET(0), BR_TARGET(0)))),
- WASM_I32V_2(92));
+ r.Build(
+ {B1(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 1, BR_TARGET(0), BR_TARGET(0)))),
+ WASM_I32V_2(92)});
FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(i)); }
}
WASM_EXEC_TEST(BrTable0c) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(
- r,
- B1(B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 1, BR_TARGET(0), BR_TARGET(1))),
- RET_I8(76))),
- WASM_I32V_2(77));
+ r.Build({B1(B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 1, BR_TARGET(0),
+ BR_TARGET(1))),
+ RET_I8(76))),
+ WASM_I32V_2(77)});
FOR_INT32_INPUTS(i) {
int32_t expected = i == 0 ? 76 : 77;
CHECK_EQ(expected, r.Call(i));
@@ -960,17 +957,17 @@ WASM_EXEC_TEST(BrTable0c) {
WASM_EXEC_TEST(BrTable1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 0, BR_TARGET(0))), RET_I8(93));
+ r.Build({B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 0, BR_TARGET(0))), RET_I8(93)});
FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(i)); }
}
WASM_EXEC_TEST(BrTable_loop) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- B2(B1(WASM_LOOP(WASM_BR_TABLE(WASM_INC_LOCAL_BYV(0, 1), 2, BR_TARGET(2),
- BR_TARGET(1), BR_TARGET(0)))),
- RET_I8(99)),
- WASM_I32V_2(98));
+ r.Build(
+ {B2(B1(WASM_LOOP(WASM_BR_TABLE(WASM_INC_LOCAL_BYV(0, 1), 2, BR_TARGET(2),
+ BR_TARGET(1), BR_TARGET(0)))),
+ RET_I8(99)),
+ WASM_I32V_2(98)});
CHECK_EQ(99, r.Call(0));
CHECK_EQ(98, r.Call(-1));
CHECK_EQ(98, r.Call(-2));
@@ -980,10 +977,10 @@ WASM_EXEC_TEST(BrTable_loop) {
WASM_EXEC_TEST(BrTable_br) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 1, BR_TARGET(1), BR_TARGET(0))),
- RET_I8(91)),
- WASM_I32V_2(99));
+ r.Build(
+ {B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 1, BR_TARGET(1), BR_TARGET(0))),
+ RET_I8(91)),
+ WASM_I32V_2(99)});
CHECK_EQ(99, r.Call(0));
CHECK_EQ(91, r.Call(1));
CHECK_EQ(91, r.Call(2));
@@ -993,13 +990,12 @@ WASM_EXEC_TEST(BrTable_br) {
WASM_EXEC_TEST(BrTable_br2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- B2(B2(B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 3, BR_TARGET(1),
- BR_TARGET(2), BR_TARGET(3), BR_TARGET(0))),
- RET_I8(85)),
- RET_I8(86)),
- RET_I8(87)),
- WASM_I32V_2(88));
+ r.Build({B2(B2(B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 3, BR_TARGET(1),
+ BR_TARGET(2), BR_TARGET(3), BR_TARGET(0))),
+ RET_I8(85)),
+ RET_I8(86)),
+ RET_I8(87)),
+ WASM_I32V_2(88)});
CHECK_EQ(86, r.Call(0));
CHECK_EQ(87, r.Call(1));
CHECK_EQ(88, r.Call(2));
@@ -1013,18 +1009,17 @@ WASM_EXEC_TEST(BrTable4) {
for (int t = 0; t < 4; ++t) {
uint32_t cases[] = {0, 1, 2, 3};
cases[i] = t;
- byte code[] = {B2(B2(B2(B2(B1(WASM_BR_TABLE(
- WASM_LOCAL_GET(0), 3, BR_TARGET(cases[0]),
- BR_TARGET(cases[1]), BR_TARGET(cases[2]),
- BR_TARGET(cases[3]))),
- RET_I8(70)),
- RET_I8(71)),
- RET_I8(72)),
- RET_I8(73)),
- WASM_I32V_2(75)};
WasmRunner<int32_t, int32_t> r(execution_tier);
- r.Build(code, code + arraysize(code));
+ r.Build({B2(B2(B2(B2(B1(WASM_BR_TABLE(
+ WASM_LOCAL_GET(0), 3, BR_TARGET(cases[0]),
+ BR_TARGET(cases[1]), BR_TARGET(cases[2]),
+ BR_TARGET(cases[3]))),
+ RET_I8(70)),
+ RET_I8(71)),
+ RET_I8(72)),
+ RET_I8(73)),
+ WASM_I32V_2(75)});
for (int x = -3; x < 50; ++x) {
int index = (x > 3 || x < 0) ? 3 : x;
@@ -1042,19 +1037,17 @@ WASM_EXEC_TEST(BrTable4x4) {
for (byte d = 0; d < 4; ++d) {
for (int i = 0; i < 4; ++i) {
uint32_t cases[] = {a, b, c, d};
- byte code[] = {
- B2(B2(B2(B2(B1(WASM_BR_TABLE(
- WASM_LOCAL_GET(0), 3, BR_TARGET(cases[0]),
- BR_TARGET(cases[1]), BR_TARGET(cases[2]),
- BR_TARGET(cases[3]))),
- RET_I8(50)),
- RET_I8(51)),
- RET_I8(52)),
- RET_I8(53)),
- WASM_I32V_2(55)};
WasmRunner<int32_t, int32_t> r(execution_tier);
- r.Build(code, code + arraysize(code));
+ r.Build({B2(B2(B2(B2(B1(WASM_BR_TABLE(
+ WASM_LOCAL_GET(0), 3, BR_TARGET(cases[0]),
+ BR_TARGET(cases[1]), BR_TARGET(cases[2]),
+ BR_TARGET(cases[3]))),
+ RET_I8(50)),
+ RET_I8(51)),
+ RET_I8(52)),
+ RET_I8(53)),
+ WASM_I32V_2(55)});
for (int x = -6; x < 47; ++x) {
int index = (x > 3 || x < 0) ? 3 : x;
@@ -1069,17 +1062,15 @@ WASM_EXEC_TEST(BrTable4x4) {
}
WASM_EXEC_TEST(BrTable4_fallthru) {
- byte code[] = {
- B2(B2(B2(B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 3, BR_TARGET(0),
- BR_TARGET(1), BR_TARGET(2), BR_TARGET(3))),
- WASM_INC_LOCAL_BY(1, 1)),
- WASM_INC_LOCAL_BY(1, 2)),
- WASM_INC_LOCAL_BY(1, 4)),
- WASM_INC_LOCAL_BY(1, 8)),
- WASM_LOCAL_GET(1)};
-
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- r.Build(code, code + arraysize(code));
+ r.Build(
+ {B2(B2(B2(B2(B1(WASM_BR_TABLE(WASM_LOCAL_GET(0), 3, BR_TARGET(0),
+ BR_TARGET(1), BR_TARGET(2), BR_TARGET(3))),
+ WASM_INC_LOCAL_BY(1, 1)),
+ WASM_INC_LOCAL_BY(1, 2)),
+ WASM_INC_LOCAL_BY(1, 4)),
+ WASM_INC_LOCAL_BY(1, 8)),
+ WASM_LOCAL_GET(1)});
CHECK_EQ(15, r.Call(0, 0));
CHECK_EQ(14, r.Call(1, 0));
@@ -1095,13 +1086,11 @@ WASM_EXEC_TEST(BrTable4_fallthru) {
}
WASM_EXEC_TEST(BrTable_loop_target) {
- byte code[] = {
- WASM_LOOP_I(WASM_BLOCK(WASM_BR_TABLE(WASM_LOCAL_GET(0), 2, BR_TARGET(0),
- BR_TARGET(1), BR_TARGET(1))),
- WASM_ONE)};
-
WasmRunner<int32_t, int32_t> r(execution_tier);
- r.Build(code, code + arraysize(code));
+ r.Build(
+ {WASM_LOOP_I(WASM_BLOCK(WASM_BR_TABLE(WASM_LOCAL_GET(0), 2, BR_TARGET(0),
+ BR_TARGET(1), BR_TARGET(1))),
+ WASM_ONE)});
CHECK_EQ(1, r.Call(0));
}
@@ -1111,8 +1100,8 @@ WASM_EXEC_TEST(I32ReinterpretF32) {
float* memory =
r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
- BUILD(r, WASM_I32_REINTERPRET_F32(
- WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)));
+ r.Build({WASM_I32_REINTERPRET_F32(
+ WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO))});
FOR_FLOAT32_INPUTS(i) {
float input = i;
@@ -1127,8 +1116,8 @@ WASM_EXEC_TEST(F32ReinterpretI32) {
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_F32_REINTERPRET_I32(
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
+ r.Build({WASM_F32_REINTERPRET_I32(
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))});
FOR_INT32_INPUTS(i) {
int32_t input = i;
@@ -1150,8 +1139,8 @@ WASM_EXEC_TEST(F32ReinterpretI32) {
WASM_EXEC_TEST(SignallingNanSurvivesI32ReinterpretF32) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_I32_REINTERPRET_F32(
- WASM_SEQ(kExprF32Const, 0x00, 0x00, 0xA0, 0x7F)));
+ r.Build({WASM_I32_REINTERPRET_F32(
+ WASM_SEQ(kExprF32Const, 0x00, 0x00, 0xA0, 0x7F))});
// This is a signalling nan.
CHECK_EQ(0x7FA00000, r.Call());
@@ -1163,9 +1152,9 @@ WASM_EXEC_TEST(LoadMaxUint32Offset) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), // type
+ r.Build({WASM_LOAD_MEM_OFFSET(MachineType::Int32(), // type
U32V_5(0xFFFFFFFF), // offset
- WASM_ZERO)); // index
+ WASM_ZERO)}); // index
CHECK_TRAP32(r.Call());
}
@@ -1175,9 +1164,9 @@ WASM_EXEC_TEST(LoadStoreLoad) {
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ r.Build({WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)});
FOR_INT32_INPUTS(i) {
int32_t expected = i;
@@ -1189,48 +1178,48 @@ WASM_EXEC_TEST(LoadStoreLoad) {
WASM_EXEC_TEST(UnalignedFloat32Load) {
WasmRunner<float> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Float32(), WASM_ONE, 2));
+ r.Build({WASM_LOAD_MEM_ALIGNMENT(MachineType::Float32(), WASM_ONE, 2)});
r.Call();
}
WASM_EXEC_TEST(UnalignedFloat64Load) {
WasmRunner<double> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Float64(), WASM_ONE, 3));
+ r.Build({WASM_LOAD_MEM_ALIGNMENT(MachineType::Float64(), WASM_ONE, 3)});
r.Call();
}
WASM_EXEC_TEST(UnalignedInt32Load) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_ONE, 2));
+ r.Build({WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_ONE, 2)});
r.Call();
}
WASM_EXEC_TEST(UnalignedInt32Store) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ONE, 2,
+ r.Build({WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ONE, 2,
WASM_I32V_1(1)),
- WASM_I32V_1(12)));
+ WASM_I32V_1(12))});
r.Call();
}
WASM_EXEC_TEST(UnalignedFloat32Store) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Float32(), WASM_ONE,
+ r.Build({WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Float32(), WASM_ONE,
2, WASM_F32(1.0)),
- WASM_I32V_1(12)));
+ WASM_I32V_1(12))});
r.Call();
}
WASM_EXEC_TEST(UnalignedFloat64Store) {
WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Float64(), WASM_ONE,
+ r.Build({WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Float64(), WASM_ONE,
3, WASM_F64(1.0)),
- WASM_I32V_1(12)));
+ WASM_I32V_1(12))});
r.Call();
}
@@ -1240,11 +1229,11 @@ WASM_EXEC_TEST(VoidReturn1) {
// Build the test function.
WasmFunctionCompiler& test_func = r.NewFunction<void>();
- BUILD(test_func, kExprNop);
+ test_func.Build({kExprNop});
// Build the calling function.
- BUILD(r, WASM_CALL_FUNCTION0(test_func.function_index()),
- WASM_I32V_3(kExpected));
+ r.Build({WASM_CALL_FUNCTION0(test_func.function_index()),
+ WASM_I32V_3(kExpected)});
// Call and check.
int32_t result = r.Call();
@@ -1257,11 +1246,11 @@ WASM_EXEC_TEST(VoidReturn2) {
// Build the test function.
WasmFunctionCompiler& test_func = r.NewFunction<void>();
- BUILD(test_func, WASM_RETURN0);
+ test_func.Build({WASM_RETURN0});
// Build the calling function.
- BUILD(r, WASM_CALL_FUNCTION0(test_func.function_index()),
- WASM_I32V_3(kExpected));
+ r.Build({WASM_CALL_FUNCTION0(test_func.function_index()),
+ WASM_I32V_3(kExpected)});
// Call and check.
int32_t result = r.Call();
@@ -1270,71 +1259,71 @@ WASM_EXEC_TEST(VoidReturn2) {
WASM_EXEC_TEST(BrEmpty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BRV(0, WASM_LOCAL_GET(0)));
+ r.Build({WASM_BRV(0, WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(BrIfEmpty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BRV_IF(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_BRV_IF(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, kExprBlock, kVoidCode, kExprEnd, WASM_LOCAL_GET(0));
+ r.Build({kExprBlock, kVoidCode, kExprEnd, WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty_br1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, B1(WASM_BR(0)), WASM_LOCAL_GET(0));
+ r.Build({B1(WASM_BR(0)), WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty_brif1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_ZERO)), WASM_LOCAL_GET(0));
+ r.Build({WASM_BLOCK(WASM_BR_IF(0, WASM_ZERO)), WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty_brif2) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_LOCAL_GET(1))), WASM_LOCAL_GET(0));
+ r.Build({WASM_BLOCK(WASM_BR_IF(0, WASM_LOCAL_GET(1))), WASM_LOCAL_GET(0)});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i, i + 1)); }
}
WASM_EXEC_TEST(Block_i) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_LOCAL_GET(0)));
+ r.Build({WASM_BLOCK_I(WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_f) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_BLOCK_F(WASM_LOCAL_GET(0)));
+ r.Build({WASM_BLOCK_F(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_d) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_BLOCK_D(WASM_LOCAL_GET(0)));
+ r.Build({WASM_BLOCK_D(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_br2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_LOCAL_GET(0))));
+ r.Build({WASM_BLOCK_I(WASM_BRV(0, WASM_LOCAL_GET(0)))});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, static_cast<uint32_t>(r.Call(i))); }
}
WASM_EXEC_TEST(Block_If_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// block { if (p0) break 51; 52; }
- BUILD(r, WASM_BLOCK_I( // --
- WASM_IF(WASM_LOCAL_GET(0), // --
- WASM_BRV(1, WASM_I32V_1(51))), // --
- WASM_I32V_1(52))); // --
+ r.Build({WASM_BLOCK_I( // --
+ WASM_IF(WASM_LOCAL_GET(0), // --
+ WASM_BRV(1, WASM_I32V_1(51))), // --
+ WASM_I32V_1(52))}); // --
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 51 : 52;
CHECK_EQ(expected, r.Call(i));
@@ -1343,50 +1332,50 @@ WASM_EXEC_TEST(Block_If_P) {
WASM_EXEC_TEST(Loop_empty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, kExprLoop, kVoidCode, kExprEnd, WASM_LOCAL_GET(0));
+ r.Build({kExprLoop, kVoidCode, kExprEnd, WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_i) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_LOOP_I(WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOOP_I(WASM_LOCAL_GET(0))});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_f) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_LOOP_F(WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOOP_F(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_d) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_LOOP_D(WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOOP_D(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_empty_br1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, B1(WASM_LOOP(WASM_BR(1))), WASM_LOCAL_GET(0));
+ r.Build({B1(WASM_LOOP(WASM_BR(1))), WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_empty_brif1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, B1(WASM_LOOP(WASM_BR_IF(1, WASM_ZERO))), WASM_LOCAL_GET(0));
+ r.Build({B1(WASM_LOOP(WASM_BR_IF(1, WASM_ZERO))), WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_empty_brif2) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_LOOP_I(WASM_BRV_IF(1, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))));
+ r.Build({WASM_LOOP_I(WASM_BRV_IF(1, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))});
FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i, i + 1)); }
}
WASM_EXEC_TEST(Loop_empty_brif3) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
- BUILD(r, WASM_LOOP(WASM_BRV_IFD(1, WASM_LOCAL_GET(2), WASM_LOCAL_GET(0))),
- WASM_LOCAL_GET(1));
+ r.Build({WASM_LOOP(WASM_BRV_IFD(1, WASM_LOCAL_GET(2), WASM_LOCAL_GET(0))),
+ WASM_LOCAL_GET(1)});
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
CHECK_EQ(i, r.Call(0, i, j));
@@ -1397,8 +1386,8 @@ WASM_EXEC_TEST(Loop_empty_brif3) {
WASM_EXEC_TEST(Block_BrIf_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(51), WASM_LOCAL_GET(0)),
- WASM_I32V_1(52)));
+ r.Build({WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(51), WASM_LOCAL_GET(0)),
+ WASM_I32V_1(52))});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 51 : 52;
CHECK_EQ(expected, r.Call(i));
@@ -1408,11 +1397,10 @@ WASM_EXEC_TEST(Block_BrIf_P) {
WASM_EXEC_TEST(Block_IfElse_P_assign) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// { if (p0) p0 = 71; else p0 = 72; return p0; }
- BUILD(r, // --
- WASM_IF_ELSE(WASM_LOCAL_GET(0), // --
- WASM_LOCAL_SET(0, WASM_I32V_2(71)), // --
- WASM_LOCAL_SET(0, WASM_I32V_2(72))), // --
- WASM_LOCAL_GET(0));
+ r.Build({WASM_IF_ELSE(WASM_LOCAL_GET(0), // --
+ WASM_LOCAL_SET(0, WASM_I32V_2(71)), // --
+ WASM_LOCAL_SET(0, WASM_I32V_2(72))), // --
+ WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 71 : 72;
CHECK_EQ(expected, r.Call(i));
@@ -1422,11 +1410,10 @@ WASM_EXEC_TEST(Block_IfElse_P_assign) {
WASM_EXEC_TEST(Block_IfElse_P_return) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// if (p0) return 81; else return 82;
- BUILD(r, // --
- WASM_IF_ELSE(WASM_LOCAL_GET(0), // --
- RET_I8(81), // --
- RET_I8(82)), // --
- WASM_ZERO); // --
+ r.Build({WASM_IF_ELSE(WASM_LOCAL_GET(0), // --
+ RET_I8(81), // --
+ RET_I8(82)), // --
+ WASM_ZERO}); // --
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 81 : 82;
CHECK_EQ(expected, r.Call(i));
@@ -1436,8 +1423,8 @@ WASM_EXEC_TEST(Block_IfElse_P_return) {
WASM_EXEC_TEST(Block_If_P_assign) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// { if (p0) p0 = 61; p0; }
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_LOCAL_SET(0, WASM_I32V_1(61))),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_LOCAL_SET(0, WASM_I32V_1(61))),
+ WASM_LOCAL_GET(0)});
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 61 : i;
CHECK_EQ(expected, r.Call(i));
@@ -1447,16 +1434,16 @@ WASM_EXEC_TEST(Block_If_P_assign) {
WASM_EXEC_TEST(DanglingAssign) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// { return 0; p0 = 0; }
- BUILD(r, WASM_BLOCK_I(RET_I8(99), WASM_LOCAL_TEE(0, WASM_ZERO)));
+ r.Build({WASM_BLOCK_I(RET_I8(99), WASM_LOCAL_TEE(0, WASM_ZERO))});
CHECK_EQ(99, r.Call(1));
}
WASM_EXEC_TEST(ExprIf_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 ? 11 : 22;
- BUILD(r, WASM_IF_ELSE_I(WASM_LOCAL_GET(0), // --
- WASM_I32V_1(11), // --
- WASM_I32V_1(22))); // --
+ r.Build({WASM_IF_ELSE_I(WASM_LOCAL_GET(0), // --
+ WASM_I32V_1(11), // --
+ WASM_I32V_1(22))}); // --
FOR_INT32_INPUTS(i) {
int32_t expected = i ? 11 : 22;
CHECK_EQ(expected, r.Call(i));
@@ -1465,12 +1452,11 @@ WASM_EXEC_TEST(ExprIf_P) {
WASM_EXEC_TEST(CountDown) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- WASM_LOOP(WASM_IF(
- WASM_LOCAL_GET(0),
- WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(1))),
- WASM_BR(1))),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0),
+ WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0),
+ WASM_I32V_1(1))),
+ WASM_BR(1))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
@@ -1478,13 +1464,12 @@ WASM_EXEC_TEST(CountDown) {
WASM_EXEC_TEST(CountDown_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(
- r,
- WASM_LOOP(
- WASM_IF(WASM_NOT(WASM_LOCAL_GET(0)), WASM_BRV(2, WASM_LOCAL_GET(0))),
- WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(1))),
- WASM_CONTINUE(0)),
- WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_LOOP(
+ WASM_IF(WASM_NOT(WASM_LOCAL_GET(0)), WASM_BRV(2, WASM_LOCAL_GET(0))),
+ WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(1))),
+ WASM_CONTINUE(0)),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
@@ -1492,11 +1477,10 @@ WASM_EXEC_TEST(CountDown_fallthru) {
WASM_EXEC_TEST(WhileCountDown) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- WASM_WHILE(
- WASM_LOCAL_GET(0),
- WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(1)))),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_WHILE(WASM_LOCAL_GET(0),
+ WASM_LOCAL_SET(
+ 0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(1)))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
@@ -1504,10 +1488,9 @@ WASM_EXEC_TEST(WhileCountDown) {
WASM_EXEC_TEST(Loop_if_break1) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r,
- WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0), WASM_BRV(2, WASM_LOCAL_GET(1))),
- WASM_LOCAL_SET(0, WASM_I32V_2(99))),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0), WASM_BRV(2, WASM_LOCAL_GET(1))),
+ WASM_LOCAL_SET(0, WASM_I32V_2(99))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(99, r.Call(0, 11));
CHECK_EQ(65, r.Call(3, 65));
CHECK_EQ(10001, r.Call(10000, 10001));
@@ -1516,10 +1499,9 @@ WASM_EXEC_TEST(Loop_if_break1) {
WASM_EXEC_TEST(Loop_if_break2) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r,
- WASM_LOOP(WASM_BRV_IF(1, WASM_LOCAL_GET(1), WASM_LOCAL_GET(0)),
- WASM_DROP, WASM_LOCAL_SET(0, WASM_I32V_2(99))),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_LOOP(WASM_BRV_IF(1, WASM_LOCAL_GET(1), WASM_LOCAL_GET(0)),
+ WASM_DROP, WASM_LOCAL_SET(0, WASM_I32V_2(99))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(99, r.Call(0, 33));
CHECK_EQ(3, r.Call(1, 3));
CHECK_EQ(10000, r.Call(99, 10000));
@@ -1528,10 +1510,9 @@ WASM_EXEC_TEST(Loop_if_break2) {
WASM_EXEC_TEST(Loop_if_break_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- B1(WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0), WASM_BR(2)),
- WASM_LOCAL_SET(0, WASM_I32V_2(93)))),
- WASM_LOCAL_GET(0));
+ r.Build({B1(WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0), WASM_BR(2)),
+ WASM_LOCAL_SET(0, WASM_I32V_2(93)))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(93, r.Call(0));
CHECK_EQ(3, r.Call(3));
CHECK_EQ(10001, r.Call(10001));
@@ -1540,10 +1521,9 @@ WASM_EXEC_TEST(Loop_if_break_fallthru) {
WASM_EXEC_TEST(Loop_if_break_fallthru2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r,
- B1(B1(WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0), WASM_BR(2)),
- WASM_LOCAL_SET(0, WASM_I32V_2(93))))),
- WASM_LOCAL_GET(0));
+ r.Build({B1(B1(WASM_LOOP(WASM_IF(WASM_LOCAL_GET(0), WASM_BR(2)),
+ WASM_LOCAL_SET(0, WASM_I32V_2(93))))),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(93, r.Call(0));
CHECK_EQ(3, r.Call(3));
CHECK_EQ(10001, r.Call(10001));
@@ -1552,8 +1532,8 @@ WASM_EXEC_TEST(Loop_if_break_fallthru2) {
WASM_EXEC_TEST(IfBreak1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_SEQ(WASM_BR(0), WASM_UNREACHABLE)),
- WASM_I32V_2(91));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_SEQ(WASM_BR(0), WASM_UNREACHABLE)),
+ WASM_I32V_2(91)});
CHECK_EQ(91, r.Call(0));
CHECK_EQ(91, r.Call(1));
CHECK_EQ(91, r.Call(-8734));
@@ -1561,8 +1541,8 @@ WASM_EXEC_TEST(IfBreak1) {
WASM_EXEC_TEST(IfBreak2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_SEQ(WASM_BR(0), RET_I8(77))),
- WASM_I32V_2(81));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_SEQ(WASM_BR(0), RET_I8(77))),
+ WASM_I32V_2(81)});
CHECK_EQ(81, r.Call(0));
CHECK_EQ(81, r.Call(1));
CHECK_EQ(81, r.Call(-8734));
@@ -1574,7 +1554,7 @@ WASM_EXEC_TEST(LoadMemI32) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
+ r.Build({WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)});
r.builder().WriteMemory(&memory[0], 99999999);
CHECK_EQ(99999999, r.Call(0));
@@ -1593,8 +1573,8 @@ WASM_EXEC_TEST(LoadMemI32_alignment) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
- BUILD(r,
- WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, alignment));
+ r.Build(
+ {WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, alignment)});
r.builder().WriteMemory(&memory[0], 0x1A2B3C4D);
CHECK_EQ(0x1A2B3C4D, r.Call(0));
@@ -1613,7 +1593,7 @@ WASM_EXEC_TEST(LoadMemI32_oob) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0))});
r.builder().WriteMemory(&memory[0], 88888888);
CHECK_EQ(88888888, r.Call(0u));
@@ -1644,8 +1624,8 @@ WASM_EXEC_TEST(LoadMem_offset_oob) {
constexpr byte kOffset = 8;
uint32_t boundary = num_bytes - kOffset - machineTypes[m].MemSize();
- BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], kOffset, WASM_LOCAL_GET(0)),
- WASM_DROP, WASM_ZERO);
+ r.Build({WASM_LOAD_MEM_OFFSET(machineTypes[m], kOffset, WASM_LOCAL_GET(0)),
+ WASM_DROP, WASM_ZERO});
CHECK_EQ(0, r.Call(boundary)); // in bounds.
@@ -1661,7 +1641,7 @@ WASM_EXEC_TEST(LoadMemI32_offset) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
- BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), 4, WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM_OFFSET(MachineType::Int32(), 4, WASM_LOCAL_GET(0))});
r.builder().WriteMemory(&memory[0], 66666666);
r.builder().WriteMemory(&memory[1], 77777777);
@@ -1692,8 +1672,8 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
r.builder().AddMemoryElems<byte>(kWasmPageSize);
r.builder().RandomizeMemory();
- BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset,
- WASM_I32V_3(index)));
+ r.Build({WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset,
+ WASM_I32V_3(index))});
if (offset + index + sizeof(int32_t) <= kWasmPageSize) {
CHECK_EQ(r.builder().raw_val_at<int32_t>(offset + index), r.Call());
@@ -1715,8 +1695,8 @@ WASM_EXEC_TEST(LoadMemI32_const_oob) {
r.builder().AddMemoryElems<byte>(kWasmPageSize);
r.builder().RandomizeMemory();
- BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset,
- WASM_I32V_3(index)));
+ r.Build({WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset,
+ WASM_I32V_3(index))});
if (offset + index + sizeof(int32_t) <= kWasmPageSize) {
CHECK_EQ(r.builder().raw_val_at<int32_t>(offset + index), r.Call());
@@ -1734,10 +1714,9 @@ WASM_EXEC_TEST(StoreMemI32_alignment) {
WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r,
- WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, i,
- WASM_LOCAL_GET(0)),
- WASM_LOCAL_GET(0));
+ r.Build({WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, i,
+ WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(0)});
r.builder().RandomizeMemory(1111);
memory[0] = 0;
@@ -1752,10 +1731,9 @@ WASM_EXEC_TEST(StoreMemI32_offset) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
const int32_t kWritten = 0xAABBCCDD;
- BUILD(r,
- WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_LOCAL_GET(0),
- WASM_I32V_5(kWritten)),
- WASM_I32V_5(kWritten));
+ r.Build({WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_LOCAL_GET(0),
+ WASM_I32V_5(kWritten)),
+ WASM_I32V_5(kWritten)});
for (int i = 0; i < 2; ++i) {
r.builder().RandomizeMemory(1111);
@@ -1786,10 +1764,9 @@ WASM_EXEC_TEST(StoreMem_offset_oob) {
r.builder().RandomizeMemory(1119 + static_cast<int>(m));
- BUILD(r,
- WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_LOCAL_GET(0),
- WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
- WASM_ZERO);
+ r.Build({WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_LOCAL_GET(0),
+ WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
+ WASM_ZERO});
byte memsize = machineTypes[m].MemSize();
uint32_t boundary = num_bytes - 8 - memsize;
@@ -1814,10 +1791,10 @@ WASM_EXEC_TEST(Store_i32_narrowed) {
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
constexpr uint32_t kPattern = 0x12345678;
- BUILD(r, WASM_LOCAL_GET(0), // index
- WASM_LOCAL_GET(1), // value
- opcode, ZERO_ALIGNMENT, ZERO_OFFSET, // store
- WASM_ZERO); // return value
+ r.Build({WASM_LOCAL_GET(0), // index
+ WASM_LOCAL_GET(1), // value
+ opcode, ZERO_ALIGNMENT, ZERO_OFFSET, // store
+ WASM_ZERO}); // return value
for (int i = 0; i <= kBytes - stored_size_in_bytes; ++i) {
uint32_t pattern = base::bits::RotateLeft32(kPattern, i % 32);
@@ -1839,7 +1816,7 @@ WASM_EXEC_TEST(LoadMemI32_P) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(2222);
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0))});
for (int i = 0; i < kNumElems; ++i) {
CHECK_EQ(r.builder().ReadMemory(&memory[i]), r.Call(i * 4));
@@ -1853,17 +1830,16 @@ WASM_EXEC_TEST(MemI32_Sum) {
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(int32_t));
const byte kSum = r.AllocateLocal(kWasmI32);
- BUILD(
- r,
- WASM_WHILE(
- WASM_LOCAL_GET(0),
- WASM_BLOCK(WASM_LOCAL_SET(
- kSum, WASM_I32_ADD(WASM_LOCAL_GET(kSum),
- WASM_LOAD_MEM(MachineType::Int32(),
- WASM_LOCAL_GET(0)))),
- WASM_LOCAL_SET(
- 0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(4))))),
- WASM_LOCAL_GET(1));
+ r.Build(
+ {WASM_WHILE(
+ WASM_LOCAL_GET(0),
+ WASM_BLOCK(WASM_LOCAL_SET(
+ kSum, WASM_I32_ADD(WASM_LOCAL_GET(kSum),
+ WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_LOCAL_GET(0)))),
+ WASM_LOCAL_SET(
+ 0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(4))))),
+ WASM_LOCAL_GET(1)});
// Run 4 trials.
for (int i = 0; i < 3; ++i) {
@@ -1882,24 +1858,27 @@ WASM_EXEC_TEST(CheckMachIntsZero) {
WasmRunner<uint32_t, int32_t> r(execution_tier);
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
- BUILD(r, // --
- /**/ kExprLoop, kVoidCode, // --
- /* */ kExprLocalGet, 0, // --
- /* */ kExprIf, kVoidCode, // --
- /* */ kExprLocalGet, 0, // --
- /* */ kExprI32LoadMem, 0, 0, // --
- /* */ kExprIf, kVoidCode, // --
- /* */ kExprI32Const, 127, // --
- /* */ kExprReturn, // --
- /* */ kExprEnd, // --
- /* */ kExprLocalGet, 0, // --
- /* */ kExprI32Const, 4, // --
- /* */ kExprI32Sub, // --
- /* */ kExprLocalTee, 0, // --
- /* */ kExprBr, DEPTH_0, // --
- /* */ kExprEnd, // --
- /**/ kExprEnd, // --
- /**/ kExprI32Const, 0); // --
+ r.Build({
+ // clang-format off
+ kExprLoop, kVoidCode,
+ kExprLocalGet, 0,
+ kExprIf, kVoidCode,
+ kExprLocalGet, 0,
+ kExprI32LoadMem, 0, 0,
+ kExprIf, kVoidCode,
+ kExprI32Const, 127,
+ kExprReturn,
+ kExprEnd,
+ kExprLocalGet, 0,
+ kExprI32Const, 4,
+ kExprI32Sub,
+ kExprLocalTee, 0,
+ kExprBr, DEPTH_0,
+ kExprEnd,
+ kExprEnd,
+ kExprI32Const, 0
+ // clang-format on
+ });
r.builder().BlankMemory();
CHECK_EQ(0, r.Call((kNumElems - 1) * 4));
@@ -1917,18 +1896,17 @@ WASM_EXEC_TEST(MemF32_Sum) {
r.builder().WriteMemory(&buffer[4], 5555.25f);
const byte kSum = r.AllocateLocal(kWasmF32);
- BUILD(r,
- WASM_WHILE(
- WASM_LOCAL_GET(0),
- WASM_BLOCK(
- WASM_LOCAL_SET(
- kSum, WASM_F32_ADD(WASM_LOCAL_GET(kSum),
- WASM_LOAD_MEM(MachineType::Float32(),
- WASM_LOCAL_GET(0)))),
- WASM_LOCAL_SET(
- 0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(4))))),
- WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_LOCAL_GET(kSum)),
- WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_WHILE(WASM_LOCAL_GET(0),
+ WASM_BLOCK(WASM_LOCAL_SET(
+ kSum, WASM_F32_ADD(
+ WASM_LOCAL_GET(kSum),
+ WASM_LOAD_MEM(MachineType::Float32(),
+ WASM_LOCAL_GET(0)))),
+ WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0),
+ WASM_I32V_1(4))))),
+ WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_LOCAL_GET(kSum)),
+ WASM_LOCAL_GET(0)});
CHECK_EQ(0, r.Call(4 * (kSize - 1)));
CHECK_NE(-99.25f, r.builder().ReadMemory(&buffer[0]));
@@ -1947,18 +1925,18 @@ T GenerateAndRunFold(TestExecutionTier execution_tier, WasmOpcode binop,
}
const byte kAccum = r.AllocateLocal(astType);
- BUILD(
- r, WASM_LOCAL_SET(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
- WASM_WHILE(
- WASM_LOCAL_GET(0),
- WASM_BLOCK(WASM_LOCAL_SET(
- kAccum,
- WASM_BINOP(binop, WASM_LOCAL_GET(kAccum),
- WASM_LOAD_MEM(memType, WASM_LOCAL_GET(0)))),
- WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0),
- WASM_I32V_1(sizeof(T)))))),
- WASM_STORE_MEM(memType, WASM_ZERO, WASM_LOCAL_GET(kAccum)),
- WASM_LOCAL_GET(0));
+ r.Build(
+ {WASM_LOCAL_SET(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
+ WASM_WHILE(
+ WASM_LOCAL_GET(0),
+ WASM_BLOCK(WASM_LOCAL_SET(
+ kAccum, WASM_BINOP(binop, WASM_LOCAL_GET(kAccum),
+ WASM_LOAD_MEM(memType,
+ WASM_LOCAL_GET(0)))),
+ WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0),
+ WASM_I32V_1(sizeof(T)))))),
+ WASM_STORE_MEM(memType, WASM_ZERO, WASM_LOCAL_GET(kAccum)),
+ WASM_LOCAL_GET(0)});
r.Call(static_cast<int>(sizeof(T) * (size - 1)));
return r.builder().ReadMemory(&memory[0]);
}
@@ -1975,7 +1953,7 @@ WASM_EXEC_TEST(MemF64_Mul) {
WASM_EXEC_TEST(Build_Wasm_Infinite_Loop) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// Only build the graph and compile, don't run.
- BUILD(r, WASM_INFINITE_LOOP, WASM_ZERO);
+ r.Build({WASM_INFINITE_LOOP, WASM_ZERO});
}
WASM_EXEC_TEST(Build_Wasm_Infinite_Loop_effect) {
@@ -1983,89 +1961,91 @@ WASM_EXEC_TEST(Build_Wasm_Infinite_Loop_effect) {
r.builder().AddMemory(kWasmPageSize);
// Only build the graph and compile, don't run.
- BUILD(r, WASM_LOOP(WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO), WASM_DROP),
- WASM_ZERO);
+ r.Build({WASM_LOOP(WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO), WASM_DROP),
+ WASM_ZERO});
}
WASM_EXEC_TEST(Unreachable0a) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(9)), RET(WASM_LOCAL_GET(0))));
+ r.Build({WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(9)), RET(WASM_LOCAL_GET(0)))});
CHECK_EQ(9, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
WASM_EXEC_TEST(Unreachable0b) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(7)), WASM_UNREACHABLE));
+ r.Build({WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(7)), WASM_UNREACHABLE)});
CHECK_EQ(7, r.Call(0));
CHECK_EQ(7, r.Call(1));
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_UNREACHABLE);
+ r.Build({WASM_UNREACHABLE});
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE);
+ r.Build({WASM_UNREACHABLE, WASM_UNREACHABLE});
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable3) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE, WASM_UNREACHABLE);
+ r.Build({WASM_UNREACHABLE, WASM_UNREACHABLE, WASM_UNREACHABLE});
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_UnreachableIf1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_UNREACHABLE,
- WASM_IF(WASM_LOCAL_GET(0), WASM_SEQ(WASM_LOCAL_GET(0), WASM_DROP)),
- WASM_ZERO);
+ r.Build({WASM_UNREACHABLE,
+ WASM_IF(WASM_LOCAL_GET(0), WASM_SEQ(WASM_LOCAL_GET(0), WASM_DROP)),
+ WASM_ZERO});
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_UnreachableIf2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_UNREACHABLE,
- WASM_IF_ELSE_I(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0), WASM_UNREACHABLE));
+ r.Build(
+ {WASM_UNREACHABLE,
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0), WASM_UNREACHABLE)});
}
WASM_EXEC_TEST(Unreachable_Load) {
WasmRunner<int32_t, int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_LOCAL_GET(0)),
- WASM_LOAD_MEM(MachineType::Int8(), WASM_LOCAL_GET(0))));
+ r.Build(
+ {WASM_BLOCK_I(WASM_BRV(0, WASM_LOCAL_GET(0)),
+ WASM_LOAD_MEM(MachineType::Int8(), WASM_LOCAL_GET(0)))});
CHECK_EQ(11, r.Call(11));
CHECK_EQ(21, r.Call(21));
}
WASM_EXEC_TEST(BrV_Fallthrough) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BLOCK(WASM_BRV(1, WASM_I32V_1(42))),
- WASM_I32V_1(22)));
+ r.Build({WASM_BLOCK_I(WASM_BLOCK(WASM_BRV(1, WASM_I32V_1(42))),
+ WASM_I32V_1(22))});
CHECK_EQ(42, r.Call());
}
WASM_EXEC_TEST(Infinite_Loop_not_taken1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_INFINITE_LOOP), WASM_I32V_1(45));
+ r.Build({WASM_IF(WASM_LOCAL_GET(0), WASM_INFINITE_LOOP), WASM_I32V_1(45)});
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(0));
}
WASM_EXEC_TEST(Infinite_Loop_not_taken2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(
- WASM_IF_ELSE(WASM_LOCAL_GET(0), WASM_BRV(1, WASM_I32V_1(45)),
- WASM_INFINITE_LOOP),
- WASM_ZERO));
+ r.Build({WASM_BLOCK_I(
+ WASM_IF_ELSE(WASM_LOCAL_GET(0), WASM_BRV(1, WASM_I32V_1(45)),
+ WASM_INFINITE_LOOP),
+ WASM_ZERO)});
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
WASM_EXEC_TEST(Infinite_Loop_not_taken2_brif) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_I32V_1(45), WASM_LOCAL_GET(0)),
- WASM_INFINITE_LOOP));
+ r.Build({WASM_BLOCK_I(WASM_BRV_IF(0, WASM_I32V_1(45), WASM_LOCAL_GET(0)),
+ WASM_INFINITE_LOOP)});
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
@@ -2127,7 +2107,7 @@ WASM_EXEC_TEST(Int32LoadInt8_signext) {
int8_t* memory = r.builder().AddMemoryElems<int8_t>(kNumElems);
r.builder().RandomizeMemory();
memory[0] = -1;
- BUILD(r, WASM_LOAD_MEM(MachineType::Int8(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Int8(), WASM_LOCAL_GET(0))});
for (int i = 0; i < kNumElems; ++i) {
CHECK_EQ(memory[i], r.Call(i));
@@ -2140,7 +2120,7 @@ WASM_EXEC_TEST(Int32LoadInt8_zeroext) {
byte* memory = r.builder().AddMemory(kNumElems);
r.builder().RandomizeMemory(77);
memory[0] = 255;
- BUILD(r, WASM_LOAD_MEM(MachineType::Uint8(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Uint8(), WASM_LOCAL_GET(0))});
for (int i = 0; i < kNumElems; ++i) {
CHECK_EQ(memory[i], r.Call(i));
@@ -2153,7 +2133,7 @@ WASM_EXEC_TEST(Int32LoadInt16_signext) {
byte* memory = r.builder().AddMemory(kNumBytes);
r.builder().RandomizeMemory(888);
memory[1] = 200;
- BUILD(r, WASM_LOAD_MEM(MachineType::Int16(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Int16(), WASM_LOCAL_GET(0))});
for (int i = 0; i < kNumBytes; i += 2) {
int32_t expected = static_cast<int16_t>(memory[i] | (memory[i + 1] << 8));
@@ -2167,7 +2147,7 @@ WASM_EXEC_TEST(Int32LoadInt16_zeroext) {
byte* memory = r.builder().AddMemory(kNumBytes);
r.builder().RandomizeMemory(9999);
memory[1] = 204;
- BUILD(r, WASM_LOAD_MEM(MachineType::Uint16(), WASM_LOCAL_GET(0)));
+ r.Build({WASM_LOAD_MEM(MachineType::Uint16(), WASM_LOCAL_GET(0))});
for (int i = 0; i < kNumBytes; i += 2) {
int32_t expected = memory[i] | (memory[i + 1] << 8);
@@ -2179,9 +2159,9 @@ WASM_EXEC_TEST(Int32Global) {
WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* global = r.builder().AddGlobal<int32_t>();
// global = global + p0
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_I32_ADD(WASM_GLOBAL_GET(0), WASM_LOCAL_GET(0))),
- WASM_ZERO);
+ r.Build(
+ {WASM_GLOBAL_SET(0, WASM_I32_ADD(WASM_GLOBAL_GET(0), WASM_LOCAL_GET(0))),
+ WASM_ZERO});
*global = 116;
for (int i = 9; i < 444444; i += 111111) {
@@ -2200,10 +2180,9 @@ WASM_EXEC_TEST(Int32Globals_DontAlias) {
r.builder().AddGlobal<int32_t>(),
r.builder().AddGlobal<int32_t>()};
- BUILD(
- r,
- WASM_GLOBAL_SET(g, WASM_I32_ADD(WASM_GLOBAL_GET(g), WASM_LOCAL_GET(0))),
- WASM_GLOBAL_GET(g));
+ r.Build({WASM_GLOBAL_SET(
+ g, WASM_I32_ADD(WASM_GLOBAL_GET(g), WASM_LOCAL_GET(0))),
+ WASM_GLOBAL_GET(g)});
// Check that reading/writing global number {g} doesn't alter the others.
*(globals[g]) = 116 * g;
@@ -2225,11 +2204,10 @@ WASM_EXEC_TEST(Float32Global) {
WasmRunner<int32_t, int32_t> r(execution_tier);
float* global = r.builder().AddGlobal<float>();
// global = global + p0
- BUILD(r,
- WASM_GLOBAL_SET(0,
- WASM_F32_ADD(WASM_GLOBAL_GET(0),
- WASM_F32_SCONVERT_I32(WASM_LOCAL_GET(0)))),
- WASM_ZERO);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_F32_ADD(WASM_GLOBAL_GET(0),
+ WASM_F32_SCONVERT_I32(WASM_LOCAL_GET(0)))),
+ WASM_ZERO});
*global = 1.25;
for (int i = 9; i < 4444; i += 1111) {
@@ -2243,11 +2221,10 @@ WASM_EXEC_TEST(Float64Global) {
WasmRunner<int32_t, int32_t> r(execution_tier);
double* global = r.builder().AddGlobal<double>();
// global = global + p0
- BUILD(r,
- WASM_GLOBAL_SET(0,
- WASM_F64_ADD(WASM_GLOBAL_GET(0),
- WASM_F64_SCONVERT_I32(WASM_LOCAL_GET(0)))),
- WASM_ZERO);
+ r.Build({WASM_GLOBAL_SET(
+ 0, WASM_F64_ADD(WASM_GLOBAL_GET(0),
+ WASM_F64_SCONVERT_I32(WASM_LOCAL_GET(0)))),
+ WASM_ZERO});
*global = 1.25;
for (int i = 9; i < 4444; i += 1111) {
@@ -2268,11 +2245,11 @@ WASM_EXEC_TEST(MixedGlobals) {
float* var_float = r.builder().AddGlobal<float>();
double* var_double = r.builder().AddGlobal<double>();
- BUILD(r, WASM_GLOBAL_SET(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
- WASM_GLOBAL_SET(2, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
- WASM_GLOBAL_SET(3, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
- WASM_GLOBAL_SET(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
- WASM_ZERO);
+ r.Build({WASM_GLOBAL_SET(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_GLOBAL_SET(2, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
+ WASM_GLOBAL_SET(3, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
+ WASM_GLOBAL_SET(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
+ WASM_ZERO});
memory[0] = 0xAA;
memory[1] = 0xCC;
@@ -2298,10 +2275,10 @@ WASM_EXEC_TEST(CallEmpty) {
// Build the target function.
WasmFunctionCompiler& target_func = r.NewFunction<int>();
- BUILD(target_func, WASM_I32V_3(kExpected));
+ target_func.Build({WASM_I32V_3(kExpected)});
// Build the calling function.
- BUILD(r, WASM_CALL_FUNCTION0(target_func.function_index()));
+ r.Build({WASM_CALL_FUNCTION0(target_func.function_index())});
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
@@ -2315,16 +2292,15 @@ WASM_EXEC_TEST(CallF32StackParameter) {
for (int i = 0; i < 20; ++i) param_types[i] = kWasmF32;
FunctionSig sig(1, 19, param_types);
WasmFunctionCompiler& t = r.NewFunction(&sig);
- BUILD(t, WASM_LOCAL_GET(17));
+ t.Build({WASM_LOCAL_GET(17)});
// Build the calling function.
- BUILD(r, WASM_CALL_FUNCTION(
- t.function_index(), WASM_F32(1.0f), WASM_F32(2.0f),
- WASM_F32(4.0f), WASM_F32(8.0f), WASM_F32(16.0f), WASM_F32(32.0f),
- WASM_F32(64.0f), WASM_F32(128.0f), WASM_F32(256.0f),
- WASM_F32(1.5f), WASM_F32(2.5f), WASM_F32(4.5f), WASM_F32(8.5f),
- WASM_F32(16.5f), WASM_F32(32.5f), WASM_F32(64.5f),
- WASM_F32(128.5f), WASM_F32(256.5f), WASM_F32(512.5f)));
+ r.Build({WASM_CALL_FUNCTION(
+ t.function_index(), WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
+ WASM_F32(8.0f), WASM_F32(16.0f), WASM_F32(32.0f), WASM_F32(64.0f),
+ WASM_F32(128.0f), WASM_F32(256.0f), WASM_F32(1.5f), WASM_F32(2.5f),
+ WASM_F32(4.5f), WASM_F32(8.5f), WASM_F32(16.5f), WASM_F32(32.5f),
+ WASM_F32(64.5f), WASM_F32(128.5f), WASM_F32(256.5f), WASM_F32(512.5f))});
float result = r.Call();
CHECK_EQ(256.5f, result);
@@ -2338,16 +2314,15 @@ WASM_EXEC_TEST(CallF64StackParameter) {
for (int i = 0; i < 20; ++i) param_types[i] = kWasmF64;
FunctionSig sig(1, 19, param_types);
WasmFunctionCompiler& t = r.NewFunction(&sig);
- BUILD(t, WASM_LOCAL_GET(17));
+ t.Build({WASM_LOCAL_GET(17)});
// Build the calling function.
- BUILD(r, WASM_CALL_FUNCTION(t.function_index(), WASM_F64(1.0), WASM_F64(2.0),
- WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
- WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
- WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
- WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5),
- WASM_F64(32.5), WASM_F64(64.5), WASM_F64(128.5),
- WASM_F64(256.5), WASM_F64(512.5)));
+ r.Build({WASM_CALL_FUNCTION(
+ t.function_index(), WASM_F64(1.0), WASM_F64(2.0), WASM_F64(4.0),
+ WASM_F64(8.0), WASM_F64(16.0), WASM_F64(32.0), WASM_F64(64.0),
+ WASM_F64(128.0), WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
+ WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5), WASM_F64(32.5),
+ WASM_F64(64.5), WASM_F64(128.5), WASM_F64(256.5), WASM_F64(512.5))});
float result = r.Call();
CHECK_EQ(256.5, result);
@@ -2365,12 +2340,12 @@ WASM_EXEC_TEST(CallVoid) {
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory();
WasmFunctionCompiler& t = r.NewFunction(sigs.v_v());
- BUILD(t, WASM_STORE_MEM(MachineType::Int32(), WASM_I32V_1(kMemOffset),
- WASM_I32V_3(kExpected)));
+ t.Build({WASM_STORE_MEM(MachineType::Int32(), WASM_I32V_1(kMemOffset),
+ WASM_I32V_3(kExpected))});
// Build the calling function.
- BUILD(r, WASM_CALL_FUNCTION0(t.function_index()),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(kMemOffset)));
+ r.Build({WASM_CALL_FUNCTION0(t.function_index()),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(kMemOffset))});
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
@@ -2383,11 +2358,11 @@ WASM_EXEC_TEST(Call_Int32Add) {
// Build the target function.
WasmFunctionCompiler& t = r.NewFunction<int32_t, int32_t, int32_t>();
- BUILD(t, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t.Build({WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
// Build the caller function.
- BUILD(r, WASM_CALL_FUNCTION(t.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1)));
+ r.Build({WASM_CALL_FUNCTION(t.function_index(), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1))});
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -2403,11 +2378,11 @@ WASM_EXEC_TEST(Call_Float32Sub) {
// Build the target function.
WasmFunctionCompiler& target_func = r.NewFunction<float, float, float>();
- BUILD(target_func, WASM_F32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ target_func.Build({WASM_F32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
// Build the caller function.
- BUILD(r, WASM_CALL_FUNCTION(target_func.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1)));
+ r.Build({WASM_CALL_FUNCTION(target_func.function_index(), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1))});
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(i - j, r.Call(i, j)); }
@@ -2419,12 +2394,12 @@ WASM_EXEC_TEST(Call_Float64Sub) {
double* memory =
r.builder().AddMemoryElems<double>(kWasmPageSize / sizeof(double));
- BUILD(r, WASM_STORE_MEM(
- MachineType::Float64(), WASM_ZERO,
- WASM_F64_SUB(
- WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
- WASM_LOAD_MEM(MachineType::Float64(), WASM_I32V_1(8)))),
- WASM_I32V_2(107));
+ r.Build(
+ {WASM_STORE_MEM(
+ MachineType::Float64(), WASM_ZERO,
+ WASM_F64_SUB(WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_I32V_1(8)))),
+ WASM_I32V_2(107)});
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
@@ -2463,16 +2438,14 @@ WASM_EXEC_TEST(Regular_Factorial) {
WasmFunctionCompiler& fact_aux_fn =
r.NewFunction<uint32_t, uint32_t, uint32_t>("fact_aux");
- BUILD(r, WASM_CALL_FUNCTION(fact_aux_fn.function_index(), WASM_LOCAL_GET(0),
- WASM_I32V(1)));
+ r.Build({WASM_CALL_FUNCTION(fact_aux_fn.function_index(), WASM_LOCAL_GET(0),
+ WASM_I32V(1))});
- BUILD(fact_aux_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
- WASM_CALL_FUNCTION(
- fact_aux_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))));
+ fact_aux_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
+ WASM_CALL_FUNCTION(fact_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))))});
uint32_t test_values[] = {1, 2, 5, 10, 20};
@@ -2519,22 +2492,21 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_Factorial) {
IsolateScope isolate_scope;
LocalContext current(isolate_scope.isolate());
- WasmRunner<uint32_t, uint32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport, kMemory32,
isolate_scope.i_isolate());
WasmFunctionCompiler& fact_aux_fn =
r.NewFunction<uint32_t, uint32_t, uint32_t>("fact_aux");
- BUILD(r, WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
- WASM_LOCAL_GET(0), WASM_I32V(1)));
+ r.Build({WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
+ WASM_LOCAL_GET(0), WASM_I32V(1))});
- BUILD(fact_aux_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_FUNCTION(
- fact_aux_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))));
+ fact_aux_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_FUNCTION(
+ fact_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))))});
uint32_t test_values[] = {1, 2, 5, 10, 20, 2000};
@@ -2557,31 +2529,28 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_MutualFactorial) {
IsolateScope isolate_scope;
LocalContext current(isolate_scope.isolate());
- WasmRunner<uint32_t, uint32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport, kMemory32,
isolate_scope.i_isolate());
WasmFunctionCompiler& f_fn = r.NewFunction<uint32_t, uint32_t, uint32_t>("f");
WasmFunctionCompiler& g_fn = r.NewFunction<uint32_t, uint32_t, uint32_t>("g");
- BUILD(r, WASM_RETURN_CALL_FUNCTION(f_fn.function_index(), WASM_LOCAL_GET(0),
- WASM_I32V(1)));
-
- BUILD(f_fn,
- WASM_IF_ELSE_I(WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_FUNCTION(
- g_fn.function_index(),
- WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)))));
-
- BUILD(g_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LES(WASM_LOCAL_GET(1), WASM_I32V(1)), WASM_LOCAL_GET(0),
- WASM_RETURN_CALL_FUNCTION(
- f_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(1), WASM_I32V(1)),
- WASM_I32_MUL(WASM_LOCAL_GET(1), WASM_LOCAL_GET(0)))));
+ r.Build({WASM_RETURN_CALL_FUNCTION(f_fn.function_index(), WASM_LOCAL_GET(0),
+ WASM_I32V(1))});
+
+ f_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_FUNCTION(
+ g_fn.function_index(),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1))))});
+
+ g_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LES(WASM_LOCAL_GET(1), WASM_I32V(1)), WASM_LOCAL_GET(0),
+ WASM_RETURN_CALL_FUNCTION(
+ f_fn.function_index(), WASM_I32_SUB(WASM_LOCAL_GET(1), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_LOCAL_GET(1), WASM_LOCAL_GET(0))))});
uint32_t test_values[] = {1, 2, 5, 10, 20, 2000};
@@ -2604,7 +2573,7 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_IndirectFactorial) {
IsolateScope isolate_scope;
LocalContext current(isolate_scope.isolate());
- WasmRunner<uint32_t, uint32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport, kMemory32,
isolate_scope.i_isolate());
@@ -2622,17 +2591,16 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_IndirectFactorial) {
r.builder().AddIndirectFunctionTable(indirect_function_table,
arraysize(indirect_function_table));
- BUILD(r,
- WASM_RETURN_CALL_FUNCTION(f_ind_fn.function_index(), WASM_LOCAL_GET(0),
- WASM_I32V(1), WASM_I32V(f_ind_index)));
+ r.Build(
+ {WASM_RETURN_CALL_FUNCTION(f_ind_fn.function_index(), WASM_LOCAL_GET(0),
+ WASM_I32V(1), WASM_I32V(f_ind_index))});
- BUILD(f_ind_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_INDIRECT(
- sig_index, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)),
- WASM_LOCAL_GET(2), WASM_LOCAL_GET(2))));
+ f_ind_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LES(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_INDIRECT(
+ sig_index, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), WASM_LOCAL_GET(2),
+ WASM_LOCAL_GET(2)))});
uint32_t test_values[] = {1, 2, 5, 10, 10000};
@@ -2653,22 +2621,21 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_Sum) {
IsolateScope isolate_scope;
LocalContext current(isolate_scope.isolate());
- WasmRunner<int32_t, int32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<int32_t, int32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport, kMemory32,
isolate_scope.i_isolate());
TestSignatures sigs;
WasmFunctionCompiler& sum_aux_fn = r.NewFunction(sigs.i_ii(), "sum_aux");
- BUILD(r, WASM_RETURN_CALL_FUNCTION(sum_aux_fn.function_index(),
- WASM_LOCAL_GET(0), WASM_I32V(0)));
+ r.Build({WASM_RETURN_CALL_FUNCTION(sum_aux_fn.function_index(),
+ WASM_LOCAL_GET(0), WASM_I32V(0))});
- BUILD(sum_aux_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_FUNCTION(
- sum_aux_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))));
+ sum_aux_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_FUNCTION(
+ sum_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))))});
int32_t test_values[] = {1, 2, 5, 10, 1000};
@@ -2695,7 +2662,7 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_Bounce_Sum) {
IsolateScope isolate_scope;
LocalContext current(isolate_scope.isolate());
- WasmRunner<int32_t, int32_t> r(execution_tier, nullptr, "main",
+ WasmRunner<int32_t, int32_t> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport, kMemory32,
isolate_scope.i_isolate());
TestSignatures sigs;
@@ -2705,34 +2672,28 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_Bounce_Sum) {
WasmFunctionCompiler& b3_fn =
r.NewFunction<int32_t, int32_t, int32_t, int32_t, int32_t>("b3");
- BUILD(r, WASM_RETURN_CALL_FUNCTION(b1_fn.function_index(), WASM_LOCAL_GET(0),
- WASM_I32V(0)));
-
- BUILD(
- b1_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
- WASM_RETURN_CALL_FUNCTION(
- b2_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(0),
- WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)))));
-
- BUILD(b2_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(2),
- WASM_RETURN_CALL_FUNCTION(
- b3_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(2)))));
-
- BUILD(b3_fn,
- WASM_IF_ELSE_I(
- WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(3),
- WASM_RETURN_CALL_FUNCTION(
- b1_fn.function_index(),
- WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
- WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(3)))));
+ r.Build({WASM_RETURN_CALL_FUNCTION(b1_fn.function_index(), WASM_LOCAL_GET(0),
+ WASM_I32V(0))});
+
+ b1_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(1),
+ WASM_RETURN_CALL_FUNCTION(
+ b2_fn.function_index(), WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_LOCAL_GET(0),
+ WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))))});
+
+ b2_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(2),
+ WASM_RETURN_CALL_FUNCTION(
+ b3_fn.function_index(), WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(2))))});
+
+ b3_fn.Build({WASM_IF_ELSE_I(
+ WASM_I32_LTS(WASM_LOCAL_GET(0), WASM_I32V(1)), WASM_LOCAL_GET(3),
+ WASM_RETURN_CALL_FUNCTION(
+ b1_fn.function_index(), WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(3))))});
int32_t test_values[] = {1, 2, 5, 10, 1000};
@@ -2771,7 +2732,7 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
b.AddParam(ValueType::For(memtypes[i]));
}
WasmFunctionCompiler& f = r.NewFunction(b.Build());
- BUILD(f, WASM_LOCAL_GET(which));
+ f.Build({WASM_LOCAL_GET(which)});
// =========================================================================
// Build the calling function.
@@ -2798,7 +2759,7 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
// Return the expected value.
ADD_CODE(code, WASM_I32V_2(kExpected));
- r.Build(&code[0], &code[0] + code.size());
+ r.Build(base::VectorOf(code));
// Run the code.
for (int t = 0; t < 10; ++t) {
@@ -2824,15 +2785,15 @@ WASM_EXEC_TEST(MixedCall_3) { Run_WasmMixedCall_N(execution_tier, 3); }
WASM_EXEC_TEST(AddCall) {
WasmRunner<int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction<int32_t, int32_t, int32_t>();
- BUILD(t1, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t1.Build({WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
byte local = r.AllocateLocal(kWasmI32);
- BUILD(r, WASM_LOCAL_SET(local, WASM_I32V_2(99)),
- WASM_I32_ADD(
- WASM_CALL_FUNCTION(t1.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(0)),
- WASM_CALL_FUNCTION(t1.function_index(), WASM_LOCAL_GET(local),
- WASM_LOCAL_GET(local))));
+ r.Build({WASM_LOCAL_SET(local, WASM_I32V_2(99)),
+ WASM_I32_ADD(
+ WASM_CALL_FUNCTION(t1.function_index(), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(0)),
+ WASM_CALL_FUNCTION(t1.function_index(), WASM_LOCAL_GET(local),
+ WASM_LOCAL_GET(local)))});
CHECK_EQ(198, r.Call(0));
CHECK_EQ(200, r.Call(1));
@@ -2845,10 +2806,10 @@ WASM_EXEC_TEST(MultiReturnSub) {
ValueType storage[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32};
FunctionSig sig_ii_ii(2, 2, storage);
WasmFunctionCompiler& t1 = r.NewFunction(&sig_ii_ii);
- BUILD(t1, WASM_LOCAL_GET(1), WASM_LOCAL_GET(0));
+ t1.Build({WASM_LOCAL_GET(1), WASM_LOCAL_GET(0)});
- BUILD(r, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
- WASM_CALL_FUNCTION0(t1.function_index()), kExprI32Sub);
+ r.Build({WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_CALL_FUNCTION0(t1.function_index()), kExprI32Sub});
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -2873,20 +2834,18 @@ void RunMultiReturnSelect(TestExecutionTier execution_tier, const T* inputs) {
WasmRunner<T, T, T, T, T> r(execution_tier);
WasmFunctionCompiler& r1 = r.NewFunction(&sig);
- BUILD(r1, WASM_LOCAL_GET(i), WASM_LOCAL_GET(j));
+ r1.Build({WASM_LOCAL_GET(i), WASM_LOCAL_GET(j)});
if (k == 0) {
- BUILD(r,
- WASM_CALL_FUNCTION(r1.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
- WASM_LOCAL_GET(3)),
- WASM_DROP);
+ r.Build({WASM_CALL_FUNCTION(r1.function_index(), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
+ WASM_LOCAL_GET(3)),
+ WASM_DROP});
} else {
- BUILD(r,
- WASM_CALL_FUNCTION(r1.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
- WASM_LOCAL_GET(3)),
- kExprLocalSet, 0, WASM_DROP, WASM_LOCAL_GET(0));
+ r.Build({WASM_CALL_FUNCTION(r1.function_index(), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
+ WASM_LOCAL_GET(3)),
+ kExprLocalSet, 0, WASM_DROP, WASM_LOCAL_GET(0)});
}
T expected = inputs[k == 0 ? i : j];
@@ -2922,39 +2881,39 @@ WASM_EXEC_TEST(MultiReturnSelect_f64) {
WASM_EXEC_TEST(ExprBlock2a) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_LOCAL_GET(0), WASM_BRV(1, WASM_I32V_1(1))),
- WASM_I32V_1(1)));
+ r.Build({WASM_BLOCK_I(WASM_IF(WASM_LOCAL_GET(0), WASM_BRV(1, WASM_I32V_1(1))),
+ WASM_I32V_1(1))});
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock2b) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_LOCAL_GET(0), WASM_BRV(1, WASM_I32V_1(1))),
- WASM_I32V_1(2)));
+ r.Build({WASM_BLOCK_I(WASM_IF(WASM_LOCAL_GET(0), WASM_BRV(1, WASM_I32V_1(1))),
+ WASM_I32V_1(2))});
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock2c) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(1), WASM_LOCAL_GET(0)),
- WASM_I32V_1(1)));
+ r.Build({WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(1), WASM_LOCAL_GET(0)),
+ WASM_I32V_1(1))});
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock2d) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(1), WASM_LOCAL_GET(0)),
- WASM_I32V_1(2)));
+ r.Build({WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(1), WASM_LOCAL_GET(0)),
+ WASM_I32V_1(2))});
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(1)),
+ r.Build({WASM_BLOCK_I(WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(1)),
WASM_BRV(1, WASM_I32V_1(11))),
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(2)),
WASM_BRV(1, WASM_I32V_1(12))),
@@ -2964,7 +2923,7 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
WASM_BRV(1, WASM_I32V_1(14))),
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(5)),
WASM_BRV(1, WASM_I32V_1(15))),
- WASM_I32V_2(99)));
+ WASM_I32V_2(99))});
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(12, r.Call(2));
@@ -2976,18 +2935,18 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(
- WASM_BRV_IFD(0, WASM_I32V_1(11),
- WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(1))),
- WASM_BRV_IFD(0, WASM_I32V_1(12),
- WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(2))),
- WASM_BRV_IFD(0, WASM_I32V_1(13),
- WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(3))),
- WASM_BRV_IFD(0, WASM_I32V_1(14),
- WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(4))),
- WASM_BRV_IFD(0, WASM_I32V_1(15),
- WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(5))),
- WASM_I32V_2(99)));
+ r.Build({WASM_BLOCK_I(
+ WASM_BRV_IFD(0, WASM_I32V_1(11),
+ WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(1))),
+ WASM_BRV_IFD(0, WASM_I32V_1(12),
+ WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(2))),
+ WASM_BRV_IFD(0, WASM_I32V_1(13),
+ WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(3))),
+ WASM_BRV_IFD(0, WASM_I32V_1(14),
+ WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(4))),
+ WASM_BRV_IFD(0, WASM_I32V_1(15),
+ WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V_1(5))),
+ WASM_I32V_2(99))});
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(12, r.Call(2));
@@ -3000,12 +2959,10 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
WASM_EXEC_TEST(If_nested) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(
- r,
- WASM_IF_ELSE_I(
- WASM_LOCAL_GET(0),
- WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(11), WASM_I32V_1(12)),
- WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(13), WASM_I32V_1(14))));
+ r.Build({WASM_IF_ELSE_I(
+ WASM_LOCAL_GET(0),
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(11), WASM_I32V_1(12)),
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_I32V_1(13), WASM_I32V_1(14)))});
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -3016,9 +2973,9 @@ WASM_EXEC_TEST(If_nested) {
WASM_EXEC_TEST(ExprBlock_if) {
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_IF_ELSE_I(WASM_LOCAL_GET(0),
+ r.Build({WASM_BLOCK_I(WASM_IF_ELSE_I(WASM_LOCAL_GET(0),
WASM_BRV(0, WASM_I32V_1(11)),
- WASM_BRV(1, WASM_I32V_1(14)))));
+ WASM_BRV(1, WASM_I32V_1(14))))});
CHECK_EQ(11, r.Call(1));
CHECK_EQ(14, r.Call(0));
@@ -3027,12 +2984,12 @@ WASM_EXEC_TEST(ExprBlock_if) {
WASM_EXEC_TEST(ExprBlock_nested_ifs) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK_I(WASM_IF_ELSE_I(
- WASM_LOCAL_GET(0),
- WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_BRV(0, WASM_I32V_1(11)),
- WASM_BRV(1, WASM_I32V_1(12))),
- WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_BRV(0, WASM_I32V_1(13)),
- WASM_BRV(1, WASM_I32V_1(14))))));
+ r.Build({WASM_BLOCK_I(WASM_IF_ELSE_I(
+ WASM_LOCAL_GET(0),
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_BRV(0, WASM_I32V_1(11)),
+ WASM_BRV(1, WASM_I32V_1(12))),
+ WASM_IF_ELSE_I(WASM_LOCAL_GET(1), WASM_BRV(0, WASM_I32V_1(13)),
+ WASM_BRV(1, WASM_I32V_1(14)))))});
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -3045,11 +3002,11 @@ WASM_EXEC_TEST(SimpleCallIndirect) {
WasmRunner<int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
- BUILD(t1, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t1.Build({WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
t1.SetSigIndex(1);
WasmFunctionCompiler& t2 = r.NewFunction(sigs.i_ii());
- BUILD(t2, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t2.Build({WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
t2.SetSigIndex(1);
// Signature table.
@@ -3065,8 +3022,8 @@ WASM_EXEC_TEST(SimpleCallIndirect) {
arraysize(indirect_function_table));
// Build the caller function.
- BUILD(r, WASM_CALL_INDIRECT(1, WASM_I32V_2(66), WASM_I32V_1(22),
- WASM_LOCAL_GET(0)));
+ r.Build({WASM_CALL_INDIRECT(1, WASM_I32V_2(66), WASM_I32V_1(22),
+ WASM_LOCAL_GET(0))});
CHECK_EQ(88, r.Call(0));
CHECK_EQ(44, r.Call(1));
@@ -3078,11 +3035,11 @@ WASM_EXEC_TEST(MultipleCallIndirect) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
- BUILD(t1, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t1.Build({WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
t1.SetSigIndex(1);
WasmFunctionCompiler& t2 = r.NewFunction(sigs.i_ii());
- BUILD(t2, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t2.Build({WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
t2.SetSigIndex(1);
// Signature table.
@@ -3098,11 +3055,11 @@ WASM_EXEC_TEST(MultipleCallIndirect) {
arraysize(indirect_function_table));
// Build the caller function.
- BUILD(r,
- WASM_I32_ADD(WASM_CALL_INDIRECT(1, WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
- WASM_LOCAL_GET(0)),
- WASM_CALL_INDIRECT(1, WASM_LOCAL_GET(2), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1))));
+ r.Build(
+ {WASM_I32_ADD(WASM_CALL_INDIRECT(1, WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
+ WASM_LOCAL_GET(0)),
+ WASM_CALL_INDIRECT(1, WASM_LOCAL_GET(2), WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1)))});
CHECK_EQ(5, r.Call(0, 1, 2));
CHECK_EQ(19, r.Call(0, 1, 9));
@@ -3121,7 +3078,7 @@ WASM_EXEC_TEST(CallIndirect_EmptyTable) {
// One function.
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
- BUILD(t1, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t1.Build({WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
t1.SetSigIndex(1);
// Signature table.
@@ -3130,8 +3087,8 @@ WASM_EXEC_TEST(CallIndirect_EmptyTable) {
r.builder().AddIndirectFunctionTable(nullptr, 0);
// Build the caller function.
- BUILD(r, WASM_CALL_INDIRECT(1, WASM_I32V_2(66), WASM_I32V_1(22),
- WASM_LOCAL_GET(0)));
+ r.Build({WASM_CALL_INDIRECT(1, WASM_I32V_2(66), WASM_I32V_1(22),
+ WASM_LOCAL_GET(0))});
CHECK_TRAP(r.Call(0));
CHECK_TRAP(r.Call(1));
@@ -3143,13 +3100,13 @@ WASM_EXEC_TEST(CallIndirect_canonical) {
WasmRunner<int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
- BUILD(t1, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t1.Build({WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
WasmFunctionCompiler& t2 = r.NewFunction(sigs.i_ii());
- BUILD(t2, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t2.Build({WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
WasmFunctionCompiler& t3 = r.NewFunction(sigs.f_ff());
- BUILD(t3, WASM_F32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ t3.Build({WASM_F32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
// Function table.
uint16_t i1 = static_cast<uint16_t>(t1.function_index());
@@ -3161,8 +3118,8 @@ WASM_EXEC_TEST(CallIndirect_canonical) {
arraysize(indirect_function_table));
// Build the caller function.
- BUILD(r, WASM_CALL_INDIRECT(1, WASM_I32V_2(77), WASM_I32V_1(11),
- WASM_LOCAL_GET(0)));
+ r.Build({WASM_CALL_INDIRECT(1, WASM_I32V_2(77), WASM_I32V_1(11),
+ WASM_LOCAL_GET(0))});
CHECK_EQ(88, r.Call(0));
CHECK_EQ(66, r.Call(1));
@@ -3180,13 +3137,13 @@ WASM_EXEC_TEST(Regress_PushReturns) {
WasmRunner<int32_t> r(execution_tier);
WasmFunctionCompiler& f1 = r.NewFunction(&sig);
- BUILD(f1, WASM_I32V(1), WASM_I32V(2), WASM_I32V(3), WASM_I32V(4),
- WASM_I32V(5), WASM_I32V(6), WASM_I32V(7), WASM_I32V(8), WASM_I32V(9),
- WASM_I32V(10), WASM_I32V(11), WASM_I32V(12));
+ f1.Build({WASM_I32V(1), WASM_I32V(2), WASM_I32V(3), WASM_I32V(4),
+ WASM_I32V(5), WASM_I32V(6), WASM_I32V(7), WASM_I32V(8),
+ WASM_I32V(9), WASM_I32V(10), WASM_I32V(11), WASM_I32V(12)});
- BUILD(r, WASM_CALL_FUNCTION0(f1.function_index()), WASM_DROP, WASM_DROP,
- WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
- WASM_DROP, WASM_DROP, WASM_DROP);
+ r.Build({WASM_CALL_FUNCTION0(f1.function_index()), WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP, WASM_DROP});
CHECK_EQ(1, r.Call());
}
@@ -3198,44 +3155,44 @@ WASM_EXEC_TEST(Regress_EnsureArguments) {
WasmRunner<int32_t> r(execution_tier);
WasmFunctionCompiler& f2 = r.NewFunction(&sig);
- BUILD(f2, kExprReturn);
+ f2.Build({kExprReturn});
- BUILD(r, WASM_I32V(42), kExprReturn,
- WASM_CALL_FUNCTION(f2.function_index(), WASM_I32V(1)));
+ r.Build({WASM_I32V(42), kExprReturn,
+ WASM_CALL_FUNCTION(f2.function_index(), WASM_I32V(1))});
CHECK_EQ(42, r.Call());
}
WASM_EXEC_TEST(Regress_PushControl) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_I32V(42),
- WASM_IF(WASM_I32V(0), WASM_UNREACHABLE, kExprIf, kVoidCode, kExprEnd));
+ r.Build({WASM_I32V(42), WASM_IF(WASM_I32V(0), WASM_UNREACHABLE, kExprIf,
+ kVoidCode, kExprEnd)});
CHECK_EQ(42, r.Call());
}
WASM_EXEC_TEST(F32Floor) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_FLOOR(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_FLOOR(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32Ceil) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_CEIL(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_CEIL(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32Trunc) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_TRUNC(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_TRUNC(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32NearestInt) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_NEARESTINT(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_NEARESTINT(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
float value = nearbyintf(i);
@@ -3248,28 +3205,28 @@ WASM_EXEC_TEST(F32NearestInt) {
WASM_EXEC_TEST(F64Floor) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_FLOOR(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_FLOOR(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64Ceil) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_CEIL(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_CEIL(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64Trunc) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_TRUNC(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_TRUNC(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(trunc(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64NearestInt) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_NEARESTINT(WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_NEARESTINT(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
double value = nearbyint(i);
@@ -3282,7 +3239,7 @@ WASM_EXEC_TEST(F64NearestInt) {
WASM_EXEC_TEST(F32Min) {
WasmRunner<float, float, float> r(execution_tier);
- BUILD(r, WASM_F32_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F32_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(i, j), r.Call(i, j)); }
@@ -3291,14 +3248,14 @@ WASM_EXEC_TEST(F32Min) {
WASM_EXEC_TEST(F32MinSameValue) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
float result = r.Call(5.0f);
CHECK_FLOAT_EQ(5.0f, result);
}
WASM_EXEC_TEST(F64Min) {
WasmRunner<double, double, double> r(execution_tier);
- BUILD(r, WASM_F64_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F64_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(i, j), r.Call(i, j)); }
@@ -3307,14 +3264,14 @@ WASM_EXEC_TEST(F64Min) {
WASM_EXEC_TEST(F64MinSameValue) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_MIN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
double result = r.Call(5.0);
CHECK_DOUBLE_EQ(5.0, result);
}
WASM_EXEC_TEST(F32Max) {
WasmRunner<float, float, float> r(execution_tier);
- BUILD(r, WASM_F32_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F32_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(JSMax(i, j), r.Call(i, j)); }
@@ -3323,14 +3280,14 @@ WASM_EXEC_TEST(F32Max) {
WASM_EXEC_TEST(F32MaxSameValue) {
WasmRunner<float, float> r(execution_tier);
- BUILD(r, WASM_F32_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F32_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
float result = r.Call(5.0f);
CHECK_FLOAT_EQ(5.0f, result);
}
WASM_EXEC_TEST(F64Max) {
WasmRunner<double, double, double> r(execution_tier);
- BUILD(r, WASM_F64_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F64_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
@@ -3342,14 +3299,14 @@ WASM_EXEC_TEST(F64Max) {
WASM_EXEC_TEST(F64MaxSameValue) {
WasmRunner<double, double> r(execution_tier);
- BUILD(r, WASM_F64_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+ r.Build({WASM_F64_MAX(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))});
double result = r.Call(5.0);
CHECK_DOUBLE_EQ(5.0, result);
}
WASM_EXEC_TEST(I32SConvertF32) {
WasmRunner<int32_t, float> r(execution_tier);
- BUILD(r, WASM_I32_SCONVERT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_SCONVERT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
if (is_inbounds<int32_t>(i)) {
@@ -3362,7 +3319,7 @@ WASM_EXEC_TEST(I32SConvertF32) {
WASM_EXEC_TEST(I32SConvertSatF32) {
WasmRunner<int32_t, float> r(execution_tier);
- BUILD(r, WASM_I32_SCONVERT_SAT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_SCONVERT_SAT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
int32_t expected =
@@ -3378,7 +3335,7 @@ WASM_EXEC_TEST(I32SConvertSatF32) {
WASM_EXEC_TEST(I32SConvertF64) {
WasmRunner<int32_t, double> r(execution_tier);
- BUILD(r, WASM_I32_SCONVERT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_SCONVERT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
if (is_inbounds<int32_t>(i)) {
@@ -3391,7 +3348,7 @@ WASM_EXEC_TEST(I32SConvertF64) {
WASM_EXEC_TEST(I32SConvertSatF64) {
WasmRunner<int32_t, double> r(execution_tier);
- BUILD(r, WASM_I32_SCONVERT_SAT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_SCONVERT_SAT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
int32_t expected =
is_inbounds<int32_t>(i)
@@ -3406,7 +3363,7 @@ WASM_EXEC_TEST(I32SConvertSatF64) {
WASM_EXEC_TEST(I32UConvertF32) {
WasmRunner<uint32_t, float> r(execution_tier);
- BUILD(r, WASM_I32_UCONVERT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_UCONVERT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
if (is_inbounds<uint32_t>(i)) {
CHECK_EQ(static_cast<uint32_t>(i), r.Call(i));
@@ -3418,7 +3375,7 @@ WASM_EXEC_TEST(I32UConvertF32) {
WASM_EXEC_TEST(I32UConvertSatF32) {
WasmRunner<uint32_t, float> r(execution_tier);
- BUILD(r, WASM_I32_UCONVERT_SAT_F32(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_UCONVERT_SAT_F32(WASM_LOCAL_GET(0))});
FOR_FLOAT32_INPUTS(i) {
int32_t expected =
is_inbounds<uint32_t>(i)
@@ -3433,7 +3390,7 @@ WASM_EXEC_TEST(I32UConvertSatF32) {
WASM_EXEC_TEST(I32UConvertF64) {
WasmRunner<uint32_t, double> r(execution_tier);
- BUILD(r, WASM_I32_UCONVERT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_UCONVERT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
if (is_inbounds<uint32_t>(i)) {
CHECK_EQ(static_cast<uint32_t>(i), r.Call(i));
@@ -3445,7 +3402,7 @@ WASM_EXEC_TEST(I32UConvertF64) {
WASM_EXEC_TEST(I32UConvertSatF64) {
WasmRunner<uint32_t, double> r(execution_tier);
- BUILD(r, WASM_I32_UCONVERT_SAT_F64(WASM_LOCAL_GET(0)));
+ r.Build({WASM_I32_UCONVERT_SAT_F64(WASM_LOCAL_GET(0))});
FOR_FLOAT64_INPUTS(i) {
int32_t expected =
is_inbounds<uint32_t>(i)
@@ -3460,7 +3417,7 @@ WASM_EXEC_TEST(I32UConvertSatF64) {
WASM_EXEC_TEST(F64CopySign) {
WasmRunner<double, double, double> r(execution_tier);
- BUILD(r, WASM_F64_COPYSIGN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F64_COPYSIGN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(copysign(i, j), r.Call(i, j)); }
@@ -3469,7 +3426,7 @@ WASM_EXEC_TEST(F64CopySign) {
WASM_EXEC_TEST(F32CopySign) {
WasmRunner<float, float, float> r(execution_tier);
- BUILD(r, WASM_F32_COPYSIGN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
+ r.Build({WASM_F32_COPYSIGN(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(copysignf(i, j), r.Call(i, j)); }
@@ -3497,7 +3454,7 @@ static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
ADD_CODE(code, kExprI32Const, 0);
ADD_CODE(code, kExprCallIndirect, 1, TABLE_ZERO);
- t.Build(&code[0], &code[0] + code.size());
+ t.Build(base::VectorOf(code));
}
}
@@ -3515,8 +3472,8 @@ WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_f64) {
WASM_EXEC_TEST(Int32RemS_dead) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
- BUILD(r, WASM_I32_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), WASM_DROP,
- WASM_ZERO);
+ r.Build({WASM_I32_REMS(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), WASM_DROP,
+ WASM_ZERO});
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(133, 100));
CHECK_EQ(0, r.Call(kMin, -1));
@@ -3529,120 +3486,119 @@ WASM_EXEC_TEST(Int32RemS_dead) {
WASM_EXEC_TEST(BrToLoopWithValue) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// Subtracts <1> times 3 from <0> and returns the result.
- BUILD(r,
- // loop i32
- kExprLoop, kI32Code,
- // decrement <0> by 3.
- WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(3))),
- // decrement <1> by 1.
- WASM_LOCAL_SET(1, WASM_I32_SUB(WASM_LOCAL_GET(1), WASM_ONE)),
- // load return value <0>, br_if will drop if if the branch is taken.
- WASM_LOCAL_GET(0),
- // continue loop if <1> is != 0.
- WASM_BR_IF(0, WASM_LOCAL_GET(1)),
- // end of loop, value loaded above is the return value.
- kExprEnd);
+ r.Build({// loop i32
+ kExprLoop, kI32Code,
+ // decrement <0> by 3.
+ WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(3))),
+ // decrement <1> by 1.
+ WASM_LOCAL_SET(1, WASM_I32_SUB(WASM_LOCAL_GET(1), WASM_ONE)),
+ // load return value <0>, br_if will drop if if the branch is taken.
+ WASM_LOCAL_GET(0),
+ // continue loop if <1> is != 0.
+ WASM_BR_IF(0, WASM_LOCAL_GET(1)),
+ // end of loop, value loaded above is the return value.
+ kExprEnd});
CHECK_EQ(12, r.Call(27, 5));
}
WASM_EXEC_TEST(BrToLoopWithoutValue) {
// This was broken in the interpreter, see http://crbug.com/715454
WasmRunner<int32_t, int32_t> r(execution_tier);
- BUILD(
- r, kExprLoop, kI32Code, // loop i32
- WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_ONE)), // dec <0>
- WASM_BR_IF(0, WASM_LOCAL_GET(0)), // br_if <0> != 0
- kExprUnreachable, // unreachable
- kExprEnd); // end
+ r.Build(
+ {kExprLoop, kI32Code, // loop i32
+ WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_ONE)), // dec <0>
+ WASM_BR_IF(0, WASM_LOCAL_GET(0)), // br_if <0> != 0
+ kExprUnreachable, // unreachable
+ kExprEnd}); // end
CHECK_TRAP32(r.Call(2));
}
WASM_EXEC_TEST(LoopsWithValues) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_LOOP_I(WASM_LOOP_I(WASM_ONE), WASM_ONE, kExprI32Add));
+ r.Build({WASM_LOOP_I(WASM_LOOP_I(WASM_ONE), WASM_ONE, kExprI32Add)});
CHECK_EQ(2, r.Call());
}
WASM_EXEC_TEST(InvalidStackAfterUnreachable) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, kExprUnreachable, kExprI32Add);
+ r.Build({kExprUnreachable, kExprI32Add});
CHECK_TRAP32(r.Call());
}
WASM_EXEC_TEST(InvalidStackAfterBr) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_BRV(0, WASM_I32V_1(27)), kExprI32Add);
+ r.Build({WASM_BRV(0, WASM_I32V_1(27)), kExprI32Add});
CHECK_EQ(27, r.Call());
}
WASM_EXEC_TEST(InvalidStackAfterReturn) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_RETURN(WASM_I32V_1(17)), kExprI32Add);
+ r.Build({WASM_RETURN(WASM_I32V_1(17)), kExprI32Add});
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(BranchOverUnreachableCode) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- // Start a block which breaks in the middle (hence unreachable code
- // afterwards) and continue execution after this block.
- WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(17)), kExprI32Add),
- // Add one to the 17 returned from the block.
- WASM_ONE, kExprI32Add);
+ r.Build({// Start a block which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this block.
+ WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(17)), kExprI32Add),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add});
CHECK_EQ(18, r.Call());
}
WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop0) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_BLOCK_I(
- // Start a loop which breaks in the middle (hence unreachable code
- // afterwards) and continue execution after this loop.
- // This should validate even though there is no value on the stack
- // at the end of the loop.
- WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)))),
- // Add one to the 17 returned from the block.
- WASM_ONE, kExprI32Add);
+ r.Build(
+ {WASM_BLOCK_I(
+ // Start a loop which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this loop.
+ // This should validate even though there is no value on the stack
+ // at the end of the loop.
+ WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)))),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add});
CHECK_EQ(18, r.Call());
}
WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop1) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_BLOCK_I(
- // Start a loop which breaks in the middle (hence unreachable code
- // afterwards) and continue execution after this loop.
- // Even though unreachable, the loop leaves one value on the stack.
- WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)), WASM_ONE)),
- // Add one to the 17 returned from the block.
- WASM_ONE, kExprI32Add);
+ r.Build(
+ {WASM_BLOCK_I(
+ // Start a loop which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this loop.
+ // Even though unreachable, the loop leaves one value on the stack.
+ WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)), WASM_ONE)),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add});
CHECK_EQ(18, r.Call());
}
WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop2) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r,
- WASM_BLOCK_I(
- // Start a loop which breaks in the middle (hence unreachable code
- // afterwards) and continue execution after this loop.
- // The unreachable code is allowed to pop non-existing values off
- // the stack and push back the result.
- WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)), kExprI32Add)),
- // Add one to the 17 returned from the block.
- WASM_ONE, kExprI32Add);
+ r.Build(
+ {WASM_BLOCK_I(
+ // Start a loop which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this loop.
+ // The unreachable code is allowed to pop non-existing values off
+ // the stack and push back the result.
+ WASM_LOOP_I(WASM_BRV(1, WASM_I32V_1(17)), kExprI32Add)),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add});
CHECK_EQ(18, r.Call());
}
WASM_EXEC_TEST(BlockInsideUnreachable) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_RETURN(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0)));
+ r.Build({WASM_RETURN(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0))});
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(IfInsideUnreachable) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_RETURN(WASM_I32V_1(17)),
- WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN(WASM_ONE)));
+ r.Build(
+ {WASM_RETURN(WASM_I32V_1(17)),
+ WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN(WASM_ONE))});
CHECK_EQ(17, r.Call());
}
@@ -3652,7 +3608,7 @@ WASM_EXEC_TEST(IndirectNull) {
byte sig_index = r.builder().AddSignature(&sig);
r.builder().AddIndirectFunctionTable(nullptr, 1);
- BUILD(r, WASM_CALL_INDIRECT(sig_index, WASM_I32V(0)));
+ r.Build({WASM_CALL_INDIRECT(sig_index, WASM_I32V(0))});
CHECK_TRAP(r.Call());
}
@@ -3664,7 +3620,7 @@ WASM_EXEC_TEST(IndirectNullTyped) {
r.builder().AddIndirectFunctionTable(nullptr, 1,
ValueType::RefNull(sig_index));
- BUILD(r, WASM_CALL_INDIRECT(sig_index, WASM_I32V(0)));
+ r.Build({WASM_CALL_INDIRECT(sig_index, WASM_I32V(0))});
CHECK_TRAP(r.Call());
}
@@ -3717,7 +3673,7 @@ void BinOpOnDifferentRegisters(
WASM_ZERO);
code.insert(code.end(), write_locals_code.begin(),
write_locals_code.end());
- r.Build(code.data(), code.data() + code.size());
+ r.Build(base::VectorOf(code));
for (ctype lhs_value : inputs) {
for (ctype rhs_value : inputs) {
if (lhs == rhs) lhs_value = rhs_value;
@@ -3918,67 +3874,25 @@ WASM_EXEC_TEST(I64RemUOnDifferentRegisters) {
});
}
-TEST(Liftoff_tier_up) {
- WasmRunner<int32_t, int32_t, int32_t> r(TestExecutionTier::kLiftoff);
-
- WasmFunctionCompiler& add = r.NewFunction<int32_t, int32_t, int32_t>("add");
- BUILD(add, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
-
- WasmFunctionCompiler& sub = r.NewFunction<int32_t, int32_t, int32_t>("sub");
- BUILD(sub, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)));
-
- // Create the main function, which shall call {add}.
- BUILD(r, WASM_CALL_FUNCTION(add.function_index(), WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1)));
-
- NativeModule* native_module =
- r.builder().instance_object()->module_object().native_module();
-
- // This test only works if we managed to compile with Liftoff.
- if (!native_module->GetCode(add.function_index())->is_liftoff()) return;
-
- // First run should execute {add}.
- CHECK_EQ(18, r.Call(11, 7));
-
- // Now make a copy of the {sub} function, and add it to the native module at
- // the index of {add}.
- CodeDesc desc;
- memset(&desc, 0, sizeof(CodeDesc));
- WasmCode* sub_code = native_module->GetCode(sub.function_index());
- size_t sub_size = sub_code->instructions().size();
- std::unique_ptr<byte[]> buffer(new byte[sub_code->instructions().size()]);
- memcpy(buffer.get(), sub_code->instructions().begin(), sub_size);
- desc.buffer = buffer.get();
- desc.instr_size = static_cast<int>(sub_size);
- {
- CodeSpaceWriteScope write_scope(native_module);
- std::unique_ptr<WasmCode> new_code = native_module->AddCode(
- add.function_index(), desc, 0, 0, {}, {}, WasmCode::kWasmFunction,
- ExecutionTier::kTurbofan, kNoDebugging);
- native_module->PublishCode(std::move(new_code));
- }
-
- // Second run should now execute {sub}.
- CHECK_EQ(4, r.Call(11, 7));
-}
-
TEST(Regression_1085507) {
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
TestSignatures sigs;
uint32_t sig_v_i = r.builder().AddSignature(sigs.v_i());
- BUILD(r, WASM_I32V_1(0), kExprIf, kVoidCode, WASM_UNREACHABLE,
- WASM_BLOCK_X(sig_v_i, kExprDrop), kExprElse, kExprEnd, WASM_I32V_1(0));
+ r.Build({WASM_I32V_1(0), kExprIf, kVoidCode, WASM_UNREACHABLE,
+ WASM_BLOCK_X(sig_v_i, kExprDrop), kExprElse, kExprEnd,
+ WASM_I32V_1(0)});
}
TEST(Regression_1185323_1185492) {
WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddIndirectFunctionTable(nullptr, 1);
- BUILD(r, WASM_I32V_1(0),
- // Use a long leb128 encoding of kExprTableSize instruction.
- // This exercises a bug in the interpreter which tries to read the
- // immediate at pc+2 (it should be pc+4).
- kNumericPrefix, 0x90, 0x80, 0x00, 0x00, // table.size 0.
- WASM_UNREACHABLE, kExprTableSet, 0x00); // Hits a DCHECK if reached.
+ r.Build({WASM_I32V_1(0),
+ // Use a long leb128 encoding of kExprTableSize instruction.
+ // This exercises a bug in the interpreter which tries to read the
+ // immediate at pc+2 (it should be pc+4).
+ kNumericPrefix, 0x90, 0x80, 0x00, 0x00, // table.size 0.
+ WASM_UNREACHABLE, kExprTableSet,
+ 0x00}); // Hits a DCHECK if reached.
r.Call();
}
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 393a088adf..84bc7cea2e 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -195,8 +195,9 @@ class StreamTester {
Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ WasmFeatures features = WasmFeatures::FromIsolate(i_isolate);
stream_ = GetWasmEngine()->StartStreamingCompilation(
- i_isolate, WasmFeatures::All(), v8::Utils::OpenHandle(*context),
+ i_isolate, features, v8::Utils::OpenHandle(*context),
"WebAssembly.compileStreaming()",
std::make_shared<TestResolver>(i_isolate, &state_, &error_message_,
&module_object_));
@@ -324,7 +325,7 @@ ZoneBuffer GetValidCompiledModuleBytes(v8::Isolate* isolate, Zone* zone,
}
while (true) {
WasmCodeRefScope code_ref_scope;
- std::vector<WasmCode*> all_code = native_module->SnapshotCodeTable();
+ std::vector<WasmCode*> all_code = native_module->SnapshotCodeTable().first;
if (std::all_of(all_code.begin(), all_code.end(), [](const WasmCode* code) {
return code && code->tier() == ExecutionTier::kTurbofan;
})) {
@@ -381,13 +382,10 @@ STREAM_TEST(TestAllBytesArriveAOTCompilerFinishesFirst) {
CHECK(tester.IsPromiseFulfilled());
}
-size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
- size_t size, size_t index) {
- ModuleResult result = DecodeWasmModule(
- WasmFeatures::All(), buffer, buffer + size, false,
- ModuleOrigin::kWasmOrigin, isolate->counters(),
- isolate->metrics_recorder(), v8::metrics::Recorder::ContextId::Empty(),
- DecodingMethod::kSyncStream, GetWasmEngine()->allocator());
+size_t GetFunctionOffset(i::Isolate* isolate, base::Vector<const uint8_t> bytes,
+ size_t index) {
+ ModuleResult result = DecodeWasmModule(WasmFeatures::All(), bytes, false,
+ ModuleOrigin::kWasmOrigin);
CHECK(result.ok());
const WasmFunction* func = &result.value()->functions[index];
return func->code.offset();
@@ -400,8 +398,7 @@ STREAM_TEST(TestCutAfterOneFunctionStreamFinishesFirst) {
ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- size_t offset =
- GetFunctionOffset(i_isolate, buffer.begin(), buffer.size(), 1);
+ size_t offset = GetFunctionOffset(i_isolate, base::VectorOf(buffer), 1);
tester.OnBytesReceived(buffer.begin(), offset);
tester.RunCompilerTasks();
CHECK(tester.IsPromisePending());
@@ -419,8 +416,7 @@ STREAM_TEST(TestCutAfterOneFunctionCompilerFinishesFirst) {
ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- size_t offset =
- GetFunctionOffset(i_isolate, buffer.begin(), buffer.size(), 1);
+ size_t offset = GetFunctionOffset(i_isolate, base::VectorOf(buffer), 1);
tester.OnBytesReceived(buffer.begin(), offset);
tester.RunCompilerTasks();
CHECK(tester.IsPromisePending());
@@ -1606,7 +1602,7 @@ STREAM_TEST(TierDownWithError) {
builder.WriteTo(&buffer);
}
- GetWasmEngine()->TierDownAllModulesPerIsolate(i_isolate);
+ GetWasmEngine()->EnterDebuggingForIsolate(i_isolate);
tester.OnBytesReceived(buffer.begin(), buffer.size());
tester.FinishStream();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 86a4602516..a070dab4e4 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -116,9 +116,9 @@ class BreakHandler : public debug::DebugDelegate {
CHECK_GT(expected_breaks_.size(), count_);
// Check the current position.
- StackTraceFrameIterator frame_it(isolate_);
+ DebuggableStackFrameIterator frame_it(isolate_);
auto summ = FrameSummary::GetTop(frame_it.frame()).AsWasm();
- CHECK_EQ(expected_breaks_[count_].position, summ.byte_offset());
+ CHECK_EQ(expected_breaks_[count_].position, summ.code_offset());
expected_breaks_[count_].pre_action();
Action next_action = expected_breaks_[count_].action;
@@ -140,7 +140,7 @@ class BreakHandler : public debug::DebugDelegate {
Handle<BreakPoint> SetBreakpoint(WasmRunnerBase* runner, int function_index,
int byte_offset,
int expected_set_byte_offset = -1) {
- runner->TierDown();
+ runner->SwitchToDebug();
int func_offset =
runner->builder().GetFunctionAt(function_index)->code.offset();
int code_offset = func_offset + byte_offset;
@@ -231,7 +231,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
HandleScope handles(isolate_);
- StackTraceFrameIterator frame_it(isolate_);
+ DebuggableStackFrameIterator frame_it(isolate_);
WasmFrame* frame = WasmFrame::cast(frame_it.frame());
DebugInfo* debug_info = frame->native_module()->GetDebugInfo();
@@ -284,7 +284,7 @@ int GetIntReturnValue(MaybeHandle<Object> retval) {
WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
WasmRunner<int> runner(execution_tier);
- BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
+ runner.Build({WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE)});
WasmInstanceObject instance = *runner.builder().instance_object();
NativeModule* native_module = instance.module_object().native_module();
@@ -312,7 +312,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
- BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3)));
+ runner.Build({WASM_NOP, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3))});
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -330,7 +330,7 @@ WASM_COMPILED_EXEC_TEST(WasmNonBreakablePosition) {
WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
- BUILD(runner, WASM_RETURN(WASM_I32V_2(1024)));
+ runner.Build({WASM_RETURN(WASM_I32V_2(1024))});
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -346,7 +346,7 @@ WASM_COMPILED_EXEC_TEST(WasmNonBreakablePosition) {
WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
WasmRunner<int> runner(execution_tier);
- BUILD(runner, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3)));
+ runner.Build({WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3))});
Isolate* isolate = runner.main_isolate();
Handle<JSFunction> main_fun_wrapper =
@@ -370,7 +370,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
WasmRunner<int, int> runner(execution_tier);
- runner.TierDown();
+ runner.SwitchToDebug();
WasmFunctionCompiler& f2 = runner.NewFunction<void>();
f2.AllocateLocal(kWasmI32);
@@ -379,15 +379,14 @@ WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
// functions in the code section matches the function indexes.
// return arg0
- BUILD(runner, WASM_RETURN(WASM_LOCAL_GET(0)));
+ runner.Build({WASM_RETURN(WASM_LOCAL_GET(0))});
// for (int i = 0; i < 10; ++i) { f2(i); }
- BUILD(f2, WASM_LOOP(
- WASM_BR_IF(0, WASM_BINOP(kExprI32GeU, WASM_LOCAL_GET(0),
- WASM_I32V_1(10))),
- WASM_LOCAL_SET(
- 0, WASM_BINOP(kExprI32Sub, WASM_LOCAL_GET(0), WASM_ONE)),
- WASM_CALL_FUNCTION(runner.function_index(), WASM_LOCAL_GET(0)),
- WASM_DROP, WASM_BR(1)));
+ f2.Build({WASM_LOOP(
+ WASM_BR_IF(0,
+ WASM_BINOP(kExprI32GeU, WASM_LOCAL_GET(0), WASM_I32V_1(10))),
+ WASM_LOCAL_SET(0, WASM_BINOP(kExprI32Sub, WASM_LOCAL_GET(0), WASM_ONE)),
+ WASM_CALL_FUNCTION(runner.function_index(), WASM_LOCAL_GET(0)), WASM_DROP,
+ WASM_BR(1))});
Isolate* isolate = runner.main_isolate();
Handle<JSFunction> main_fun_wrapper =
@@ -415,14 +414,14 @@ WASM_COMPILED_EXEC_TEST(WasmGetLocalsAndStack) {
runner.AllocateLocal(kWasmF32);
runner.AllocateLocal(kWasmF64);
- BUILD(runner,
- // set [1] to 17
- WASM_LOCAL_SET(1, WASM_I64V_1(17)),
- // set [2] to <arg0> = 7
- WASM_LOCAL_SET(2, WASM_F32_SCONVERT_I32(WASM_LOCAL_GET(0))),
- // set [3] to <arg1>/2 = 8.5
- WASM_LOCAL_SET(3, WASM_F64_DIV(WASM_F64_SCONVERT_I64(WASM_LOCAL_GET(1)),
- WASM_F64(2))));
+ runner.Build(
+ {// set [1] to 17
+ WASM_LOCAL_SET(1, WASM_I64V_1(17)),
+ // set [2] to <arg0> = 7
+ WASM_LOCAL_SET(2, WASM_F32_SCONVERT_I32(WASM_LOCAL_GET(0))),
+ // set [3] to <arg1>/2 = 8.5
+ WASM_LOCAL_SET(3, WASM_F64_DIV(WASM_F64_SCONVERT_I64(WASM_LOCAL_GET(1)),
+ WASM_F64(2)))});
Isolate* isolate = runner.main_isolate();
Handle<JSFunction> main_fun_wrapper =
@@ -458,8 +457,8 @@ WASM_COMPILED_EXEC_TEST(WasmRemoveBreakPoint) {
WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
- BUILD(runner, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP,
- WASM_I32V_1(14));
+ runner.Build(
+ {WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_I32V_1(14)});
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -489,8 +488,8 @@ WASM_COMPILED_EXEC_TEST(WasmRemoveLastBreakPoint) {
WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
- BUILD(runner, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP,
- WASM_I32V_1(14));
+ runner.Build(
+ {WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_I32V_1(14)});
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -517,8 +516,8 @@ WASM_COMPILED_EXEC_TEST(WasmRemoveAllBreakPoint) {
WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
- BUILD(runner, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP,
- WASM_I32V_1(14));
+ runner.Build(
+ {WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_NOP, WASM_I32V_1(14)});
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -558,9 +557,9 @@ WASM_COMPILED_EXEC_TEST(WasmBreakInPostMVP) {
constexpr int kReturn = 13;
constexpr int kIgnored = 23;
- BUILD(runner,
- WASM_BLOCK_X(sig_idx, WASM_I32V_1(kReturn), WASM_I32V_1(kIgnored)),
- WASM_DROP);
+ runner.Build(
+ {WASM_BLOCK_X(sig_idx, WASM_I32V_1(kReturn), WASM_I32V_1(kIgnored)),
+ WASM_DROP});
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -578,7 +577,7 @@ WASM_COMPILED_EXEC_TEST(WasmBreakInPostMVP) {
WASM_COMPILED_EXEC_TEST(Regress10889) {
FLAG_SCOPE(print_wasm_code);
WasmRunner<int> runner(execution_tier);
- BUILD(runner, WASM_I32V_1(0));
+ runner.Build({WASM_I32V_1(0)});
SetBreakpoint(&runner, runner.function_index(), 1, 1);
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
index 9e11ab13e0..0c9120355f 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
@@ -19,7 +19,7 @@ namespace wasm {
namespace test_wasm_import_wrapper_cache {
std::shared_ptr<NativeModule> NewModule(Isolate* isolate) {
- std::shared_ptr<WasmModule> module(new WasmModule);
+ auto module = std::make_shared<WasmModule>(kWasmOrigin);
constexpr size_t kCodeSizeEstimate = 16384;
auto native_module = GetWasmEngine()->NewNativeModule(
isolate, WasmFeatures::All(), std::move(module), kCodeSizeEstimate);
@@ -35,7 +35,7 @@ TEST(CacheHit) {
WasmImportWrapperCache::ModificationScope cache_scope(
module->import_wrapper_cache());
- auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+ auto kind = ImportCallKind::kJSFunctionArityMatch;
auto sig = sigs.i_i();
uint32_t canonical_type_index =
GetTypeCanonicalizer()->AddRecursiveGroup(sig);
@@ -63,7 +63,7 @@ TEST(CacheMissSig) {
WasmImportWrapperCache::ModificationScope cache_scope(
module->import_wrapper_cache());
- auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+ auto kind = ImportCallKind::kJSFunctionArityMatch;
auto sig1 = sigs.i_i();
int expected_arity1 = static_cast<int>(sig1->parameter_count());
uint32_t canonical_type_index1 =
@@ -94,8 +94,8 @@ TEST(CacheMissKind) {
WasmImportWrapperCache::ModificationScope cache_scope(
module->import_wrapper_cache());
- auto kind1 = compiler::WasmImportCallKind::kJSFunctionArityMatch;
- auto kind2 = compiler::WasmImportCallKind::kJSFunctionArityMismatch;
+ auto kind1 = ImportCallKind::kJSFunctionArityMatch;
+ auto kind2 = ImportCallKind::kJSFunctionArityMismatch;
auto sig = sigs.i_i();
int expected_arity = static_cast<int>(sig->parameter_count());
uint32_t canonical_type_index =
@@ -122,7 +122,7 @@ TEST(CacheHitMissSig) {
WasmImportWrapperCache::ModificationScope cache_scope(
module->import_wrapper_cache());
- auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+ auto kind = ImportCallKind::kJSFunctionArityMatch;
auto sig1 = sigs.i_i();
int expected_arity1 = static_cast<int>(sig1->parameter_count());
uint32_t canonical_type_index1 =
diff --git a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
index e686668d2c..397d37089a 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
@@ -312,9 +312,6 @@ COMPILE_TEST(TestEventMetrics) {
recorder->module_decoded_.back().module_size_in_bytes);
CHECK_EQ(1, recorder->module_decoded_.back().function_count);
CHECK_LE(0, recorder->module_decoded_.back().wall_clock_duration_in_us);
- CHECK_IMPLIES(
- v8::base::ThreadTicks::IsSupported() && !i::v8_flags.wasm_test_streaming,
- recorder->module_decoded_.back().cpu_duration_in_us > 0);
CHECK_EQ(1, recorder->module_compiled_.size());
CHECK(recorder->module_compiled_.back().success);
@@ -335,12 +332,6 @@ COMPILE_TEST(TestEventMetrics) {
CHECK_GE(native_module->generated_code_size(),
recorder->module_compiled_.back().code_size_in_bytes);
CHECK_LE(0, recorder->module_compiled_.back().wall_clock_duration_in_us);
- CHECK_EQ(native_module->baseline_compilation_cpu_duration(),
- recorder->module_compiled_.back().cpu_duration_in_us);
- CHECK_IMPLIES(v8::base::ThreadTicks::IsSupported() &&
- !i::v8_flags.wasm_test_streaming &&
- !i::v8_flags.wasm_lazy_compilation,
- recorder->module_compiled_.back().cpu_duration_in_us > 0);
CHECK_EQ(1, recorder->module_instantiated_.size());
CHECK(recorder->module_instantiated_.back().success);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 0afa5fe1c4..b23bf65e5a 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -39,9 +39,14 @@ class WasmSerializationTest {
WasmModuleBuilder* builder = zone->New<WasmModuleBuilder>(zone);
TestSignatures sigs;
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
- byte code[] = {WASM_LOCAL_GET(0), kExprI32Const, 1, kExprI32Add, kExprEnd};
- f->EmitCode(code, sizeof(code));
+ // Generate 3 functions, and export the last one with the name "increment".
+ WasmFunctionBuilder* f;
+ for (int i = 0; i < 3; ++i) {
+ f = builder->AddFunction(sigs.i_i());
+ byte code[] = {WASM_LOCAL_GET(0), kExprI32Const, 1, kExprI32Add,
+ kExprEnd};
+ f->EmitCode(code, sizeof(code));
+ }
builder->AddExport(base::CStrVector(kFunctionName), f);
builder->WriteTo(buffer);
@@ -60,6 +65,11 @@ class WasmSerializationTest {
memset(const_cast<uint8_t*>(wire_bytes_.data()), 0, wire_bytes_.size() / 2);
}
+ void PartlyDropTieringBudget() {
+ serialized_bytes_ = {serialized_bytes_.data(),
+ serialized_bytes_.size() - 1};
+ }
+
MaybeHandle<WasmModuleObject> Deserialize(
base::Vector<const char> source_url = {}) {
return DeserializeNativeModule(CcTest::i_isolate(),
@@ -86,10 +96,10 @@ class WasmSerializationTest {
Handle<JSReceiver>::null(),
MaybeHandle<JSArrayBuffer>())
.ToHandleChecked();
- Handle<Object> params[1] = {
- Handle<Object>(Smi::FromInt(41), CcTest::i_isolate())};
+ Handle<Object> params[1] = {handle(Smi::FromInt(41), CcTest::i_isolate())};
int32_t result = testing::CallWasmFunctionForTesting(
- CcTest::i_isolate(), instance, kFunctionName, 1, params);
+ CcTest::i_isolate(), instance, kFunctionName,
+ base::ArrayVector(params));
CHECK_EQ(42, result);
}
@@ -99,6 +109,8 @@ class WasmSerializationTest {
CcTest::CollectAllAvailableGarbage();
}
+ v8::MemorySpan<const uint8_t> wire_bytes() const { return wire_bytes_; }
+
private:
Zone* zone() { return &zone_; }
@@ -119,6 +131,7 @@ class WasmSerializationTest {
// serialization (when the isolate is disposed).
std::weak_ptr<NativeModule> weak_native_module;
{
+ v8::Isolate::Scope isolate_scope(serialization_v8_isolate);
HandleScope scope(serialization_isolate);
v8::Local<v8::Context> serialization_context =
v8::Context::New(serialization_v8_isolate);
@@ -158,7 +171,7 @@ class WasmSerializationTest {
CHECK_EQ(0, data_.size);
while (data_.size == 0) {
testing::CallWasmFunctionForTesting(serialization_isolate, instance,
- kFunctionName, 0, nullptr);
+ kFunctionName, {});
data_ = compiled_module.Serialize();
}
CHECK_LT(0, data_.size);
@@ -269,6 +282,7 @@ UNINITIALIZED_TEST(CompiledWasmModulesTransfer) {
std::vector<v8::CompiledWasmModule> store;
std::shared_ptr<NativeModule> original_native_module;
{
+ v8::Isolate::Scope isolate_scope(from_isolate);
v8::HandleScope scope(from_isolate);
LocalContext env(from_isolate);
@@ -292,6 +306,7 @@ UNINITIALIZED_TEST(CompiledWasmModulesTransfer) {
{
v8::Isolate* to_isolate = v8::Isolate::New(create_params);
{
+ v8::Isolate::Scope isolate_scope(to_isolate);
v8::HandleScope scope(to_isolate);
LocalContext env(to_isolate);
@@ -319,18 +334,19 @@ TEST(TierDownAfterDeserialization) {
CHECK(test.Deserialize().ToHandle(&module_object));
auto* native_module = module_object->native_module();
- CHECK_EQ(1, native_module->module()->functions.size());
+ CHECK_EQ(3, native_module->module()->functions.size());
WasmCodeRefScope code_ref_scope;
// The deserialized code must be TurboFan (we wait for tier-up before
// serializing).
- auto* turbofan_code = native_module->GetCode(0);
+ auto* turbofan_code = native_module->GetCode(2);
CHECK_NOT_NULL(turbofan_code);
CHECK_EQ(ExecutionTier::kTurbofan, turbofan_code->tier());
- GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
+ GetWasmEngine()->EnterDebuggingForIsolate(isolate);
- auto* liftoff_code = native_module->GetCode(0);
- CHECK_EQ(ExecutionTier::kLiftoff, liftoff_code->tier());
+ // Entering debugging should delete all code, so that debug code gets compiled
+ // lazily.
+ CHECK_NULL(native_module->GetCode(0));
}
TEST(SerializeLiftoffModuleFails) {
@@ -363,4 +379,54 @@ TEST(SerializeLiftoffModuleFails) {
CHECK(!wasm_serializer.SerializeNativeModule({buffer.get(), buffer_size}));
}
+TEST(SerializeTieringBudget) {
+ WasmSerializationTest test;
+
+ Isolate* isolate = CcTest::i_isolate();
+ v8::OwnedBuffer serialized_bytes;
+ uint32_t mock_budget[3]{1, 2, 3};
+ {
+ HandleScope scope(isolate);
+ Handle<WasmModuleObject> module_object;
+ CHECK(test.Deserialize().ToHandle(&module_object));
+
+ auto* native_module = module_object->native_module();
+ memcpy(native_module->tiering_budget_array(), mock_budget,
+ arraysize(mock_budget) * sizeof(uint32_t));
+ v8::Local<v8::Object> v8_module_obj =
+ v8::Utils::ToLocal(Handle<JSObject>::cast(module_object));
+ CHECK(v8_module_obj->IsWasmModuleObject());
+
+ v8::Local<v8::WasmModuleObject> v8_module_object =
+ v8_module_obj.As<v8::WasmModuleObject>();
+ serialized_bytes = v8_module_object->GetCompiledModule().Serialize();
+
+ // Change one entry in the tiering budget after serialization to make sure
+ // the module gets deserialized and not just loaded from the module cache.
+ native_module->tiering_budget_array()[0]++;
+ }
+ test.CollectGarbage();
+ HandleScope scope(isolate);
+ Handle<WasmModuleObject> module_object;
+ CHECK(DeserializeNativeModule(isolate,
+ base::VectorOf(serialized_bytes.buffer.get(),
+ serialized_bytes.size),
+ base::VectorOf(test.wire_bytes()), {})
+ .ToHandle(&module_object));
+
+ auto* native_module = module_object->native_module();
+ for (size_t i = 0; i < arraysize(mock_budget); ++i) {
+ CHECK_EQ(mock_budget[i], native_module->tiering_budget_array()[i]);
+ }
+}
+
+TEST(DeserializeTieringBudgetPartlyMissing) {
+ WasmSerializationTest test;
+ {
+ HandleScope scope(CcTest::i_isolate());
+ test.PartlyDropTieringBudget();
+ CHECK(test.Deserialize().is_null());
+ }
+ test.CollectGarbage();
+}
} // namespace v8::internal::wasm
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 29db1efe95..d02c8ff6ad 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -34,12 +34,14 @@ class SharedEngineIsolate {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate::Initialize(isolate_, create_params);
+ v8_isolate()->Enter();
v8::HandleScope handle_scope(v8_isolate());
v8::Context::New(v8_isolate())->Enter();
testing::SetupIsolateForWasmModule(isolate());
zone_.reset(new Zone(isolate()->allocator(), ZONE_NAME));
}
~SharedEngineIsolate() {
+ v8_isolate()->Exit();
zone_.reset();
isolate_->Dispose();
}
@@ -71,8 +73,7 @@ class SharedEngineIsolate {
}
int32_t Run(Handle<WasmInstanceObject> instance) {
- return testing::CallWasmFunctionForTesting(isolate(), instance, "main", 0,
- nullptr);
+ return testing::CallWasmFunctionForTesting(isolate(), instance, "main", {});
}
private:
@@ -299,7 +300,7 @@ TEST(SharedEngineRunThreadedTierUp) {
Handle<WasmInstanceObject> instance = isolate->ImportInstance(module);
WasmFeatures detected = WasmFeatures::None();
WasmCompilationUnit::CompileWasmFunction(
- isolate->isolate(), module.get(), &detected,
+ isolate->isolate()->counters(), module.get(), &detected,
&module->module()->functions[0], ExecutionTier::kTurbofan);
CHECK_EQ(23, isolate->Run(instance));
});
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index e5086553ea..5ca8716325 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -130,14 +130,14 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
*v8::Local<v8::Function>::Cast(CompileRun(source))));
ManuallyImportedJSFunction import = {sigs.v_v(), js_function};
uint32_t js_throwing_index = 0;
- WasmRunner<void> r(execution_tier, &import);
+ WasmRunner<void> r(execution_tier, kWasmOrigin, &import);
// Add a nop such that we don't always get position 1.
- BUILD(r, WASM_NOP, WASM_CALL_FUNCTION0(js_throwing_index));
+ r.Build({WASM_NOP, WASM_CALL_FUNCTION0(js_throwing_index)});
uint32_t wasm_index_1 = r.function()->func_index;
WasmFunctionCompiler& f2 = r.NewFunction<void>("call_main");
- BUILD(f2, WASM_CALL_FUNCTION0(wasm_index_1));
+ f2.Build({WASM_CALL_FUNCTION0(wasm_index_1)});
uint32_t wasm_index_2 = f2.function_index();
Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index_2);
@@ -171,13 +171,14 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
// Trigger a trap in wasm, stack should contain a source url.
WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<int> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
+ WasmRunner<int> r(execution_tier, kWasmOrigin, nullptr, "main",
+ kRuntimeExceptionSupport);
std::vector<byte> trap_code(1, kExprUnreachable);
r.Build(trap_code.data(), trap_code.data() + trap_code.size());
WasmFunctionCompiler& f = r.NewFunction<int>("call_main");
- BUILD(f, WASM_CALL_FUNCTION0(0));
+ f.Build({WASM_CALL_FUNCTION0(0)});
uint32_t wasm_index = f.function_index();
Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index);
@@ -232,7 +233,7 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
int unreachable_pos = 1 << (8 * pos_shift);
TestSignatures sigs;
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<int> r(execution_tier, nullptr, "main",
+ WasmRunner<int> r(execution_tier, kWasmOrigin, nullptr, "main",
kRuntimeExceptionSupport);
std::vector<byte> trap_code(unreachable_pos + 1, kExprNop);
@@ -242,7 +243,7 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
uint32_t wasm_index_1 = r.function()->func_index;
WasmFunctionCompiler& f2 = r.NewFunction<int>("call_main");
- BUILD(f2, WASM_CALL_FUNCTION0(0));
+ f2.Build({WASM_CALL_FUNCTION0(0)});
uint32_t wasm_index_2 = f2.function_index();
Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index_2);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 9d3c158f50..826ad92875 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -68,10 +68,11 @@ void CheckExceptionInfos(v8::internal::Isolate* isolate, Handle<Object> exc,
// Trigger a trap for executing unreachable.
WASM_COMPILED_EXEC_TEST(Unreachable) {
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<void> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
+ WasmRunner<void> r(execution_tier, kWasmOrigin, nullptr, "main",
+ kRuntimeExceptionSupport);
TestSignatures sigs;
- BUILD(r, WASM_UNREACHABLE);
+ r.Build({WASM_UNREACHABLE});
uint32_t wasm_index = r.function()->func_index;
Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index);
@@ -101,19 +102,20 @@ WASM_COMPILED_EXEC_TEST(Unreachable) {
// Trigger a trap for loading from out-of-bounds.
WASM_COMPILED_EXEC_TEST(IllegalLoad) {
- WasmRunner<void> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
+ WasmRunner<void> r(execution_tier, kWasmOrigin, nullptr, "main",
+ kRuntimeExceptionSupport);
TestSignatures sigs;
r.builder().AddMemory(0L);
- BUILD(r, WASM_IF(WASM_ONE, WASM_SEQ(WASM_LOAD_MEM(MachineType::Int32(),
- WASM_I32V_1(-3)),
- WASM_DROP)));
+ r.Build({WASM_IF(
+ WASM_ONE, WASM_SEQ(WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-3)),
+ WASM_DROP))});
uint32_t wasm_index_1 = r.function()->func_index;
WasmFunctionCompiler& f2 = r.NewFunction<void>("call_main");
// Insert a NOP such that the position of the call is not one.
- BUILD(f2, WASM_NOP, WASM_CALL_FUNCTION0(wasm_index_1));
+ f2.Build({WASM_NOP, WASM_CALL_FUNCTION0(wasm_index_1)});
uint32_t wasm_index_2 = f2.function_index();
Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index_2);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index d6aacab5c3..ee0f88ab82 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -18,6 +18,7 @@
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
@@ -44,10 +45,10 @@ bool IsSameNan(double expected, double actual) {
}
TestingModuleBuilder::TestingModuleBuilder(
- Zone* zone, ManuallyImportedJSFunction* maybe_import,
+ Zone* zone, ModuleOrigin origin, ManuallyImportedJSFunction* maybe_import,
TestExecutionTier tier, RuntimeExceptionSupport exception_support,
TestingModuleMemoryType mem_type, Isolate* isolate)
- : test_module_(std::make_shared<WasmModule>()),
+ : test_module_(std::make_shared<WasmModule>(origin)),
isolate_(isolate ? isolate : CcTest::InitIsolateOnce()),
enabled_features_(WasmFeatures::FromIsolate(isolate_)),
execution_tier_(tier),
@@ -75,15 +76,14 @@ TestingModuleBuilder::TestingModuleBuilder(
if (maybe_import) {
// Manually compile an import wrapper and insert it into the instance.
- auto resolved = compiler::ResolveWasmImportCall(
- maybe_import->js_function, maybe_import->sig,
- instance_object_->module(), enabled_features_);
- compiler::WasmImportCallKind kind = resolved.kind;
- Handle<JSReceiver> callable = resolved.callable;
- WasmImportWrapperCache::ModificationScope cache_scope(
- native_module_->import_wrapper_cache());
uint32_t canonical_type_index =
GetTypeCanonicalizer()->AddRecursiveGroup(maybe_import->sig);
+ WasmImportData resolved(maybe_import->js_function, maybe_import->sig,
+ canonical_type_index);
+ ImportCallKind kind = resolved.kind();
+ Handle<JSReceiver> callable = resolved.callable();
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ native_module_->import_wrapper_cache());
WasmImportWrapperCache::CacheKey key(
kind, canonical_type_index,
static_cast<int>(maybe_import->sig->parameter_count()), kNoSuspend);
@@ -98,7 +98,7 @@ TestingModuleBuilder::TestingModuleBuilder(
}
ImportedFunctionEntry(instance_object_, maybe_import_index)
- .SetWasmToJs(isolate_, callable, import_wrapper, resolved.suspend);
+ .SetWasmToJs(isolate_, callable, import_wrapper, resolved.suspend());
}
if (tier == TestExecutionTier::kInterpreter) {
@@ -125,6 +125,8 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
? test_module_->maximum_pages
: initial_pages;
test_module_->has_memory = true;
+ test_module_->min_memory_size = initial_pages * kWasmPageSize;
+ test_module_->max_memory_size = maximum_pages * kWasmPageSize;
// Create the WasmMemoryObject.
Handle<WasmMemoryObject> memory_object =
@@ -155,6 +157,12 @@ uint32_t TestingModuleBuilder::AddFunction(const FunctionSig* sig,
DCHECK_NULL(test_module_->validated_functions);
test_module_->validated_functions =
std::make_unique<std::atomic<uint8_t>[]>((kMaxFunctions + 7) / 8);
+ if (is_asmjs_module(test_module_.get())) {
+ // All asm.js functions are valid by design.
+ std::fill_n(test_module_->validated_functions.get(),
+ (kMaxFunctions + 7) / 8, 0xff);
+ }
+ test_module_->type_feedback.well_known_imports.Initialize(kMaxFunctions);
}
uint32_t index = static_cast<uint32_t>(test_module_->functions.size());
test_module_->functions.push_back({sig, // sig
@@ -185,7 +193,7 @@ uint32_t TestingModuleBuilder::AddFunction(const FunctionSig* sig,
}
DCHECK_LT(index, kMaxFunctions); // limited for testing.
if (!instance_object_.is_null()) {
- Handle<FixedArray> funcs = isolate_->factory()->NewFixedArray(
+ Handle<FixedArray> funcs = isolate_->factory()->NewFixedArrayWithZeroes(
static_cast<int>(test_module_->functions.size()));
instance_object_->set_wasm_internal_functions(*funcs);
}
@@ -235,10 +243,12 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance_object(), table_index, table_size);
- Handle<WasmTableObject> table_obj =
- WasmTableObject::New(isolate_, instance, table.type, table.initial_size,
- table.has_maximum_size, table.maximum_size, nullptr,
- isolate_->factory()->null_value());
+ Handle<WasmTableObject> table_obj = WasmTableObject::New(
+ isolate_, instance, table.type, table.initial_size,
+ table.has_maximum_size, table.maximum_size, nullptr,
+ IsSubtypeOf(table.type, kWasmExternRef, test_module_.get())
+ ? Handle<Object>::cast(isolate_->factory()->null_value())
+ : Handle<Object>::cast(isolate_->factory()->wasm_null()));
WasmTableObject::AddDispatchTable(isolate_, table_obj, instance_object_,
table_index);
@@ -273,13 +283,13 @@ uint32_t TestingModuleBuilder::AddBytes(base::Vector<const byte> bytes) {
base::OwnedVector<uint8_t> new_bytes =
base::OwnedVector<uint8_t>::New(new_size);
if (old_size > 0) {
- memcpy(new_bytes.start(), old_bytes.begin(), old_size);
+ memcpy(new_bytes.begin(), old_bytes.begin(), old_size);
} else {
// Set the unused byte. It is never decoded, but the bytes are used as the
// key in the native module cache.
new_bytes[0] = 0;
}
- memcpy(new_bytes.start() + bytes_offset, bytes.begin(), bytes.length());
+ memcpy(new_bytes.begin() + bytes_offset, bytes.begin(), bytes.length());
native_module_->SetWireBytes(std::move(new_bytes));
return bytes_offset;
}
@@ -287,7 +297,7 @@ uint32_t TestingModuleBuilder::AddBytes(base::Vector<const byte> bytes) {
uint32_t TestingModuleBuilder::AddException(const FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
uint32_t index = static_cast<uint32_t>(test_module_->tags.size());
- test_module_->tags.push_back(WasmTag{sig});
+ test_module_->tags.emplace_back(sig, AddSignature(sig));
Handle<WasmExceptionTag> tag = WasmExceptionTag::New(isolate_, index);
Handle<FixedArray> table(instance_object_->tags_table(), isolate_);
table = isolate_->factory()->CopyFixedArrayAndGrow(table, 1);
@@ -346,29 +356,6 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(
return index;
}
-uint32_t TestingModuleBuilder::AddPassiveElementSegment(
- const std::vector<uint32_t>& entries) {
- uint32_t index = static_cast<uint32_t>(test_module_->elem_segments.size());
- DCHECK_EQ(index, dropped_elem_segments_.size());
-
- test_module_->elem_segments.emplace_back(
- kWasmFuncRef, WasmElemSegment::kStatusPassive,
- WasmElemSegment::kFunctionIndexElements);
- auto& elem_segment = test_module_->elem_segments.back();
- for (uint32_t entry : entries) {
- elem_segment.entries.emplace_back(ConstantExpression::RefFunc(entry));
- }
-
- // The vector pointers may have moved, so update the instance object.
- dropped_elem_segments_.push_back(0);
- uint32_t size = static_cast<uint32_t>(dropped_elem_segments_.size());
- Handle<FixedUInt8Array> dropped_elem_segments =
- FixedUInt8Array::New(isolate_, size);
- dropped_elem_segments->copy_in(0, dropped_elem_segments_.data(), size);
- instance_object_->set_dropped_elem_segments(*dropped_elem_segments);
- return index;
-}
-
CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
return {test_module_.get(), native_module_->bounds_checks(),
runtime_exception_support_, enabled_features_, kNoDynamicTiering};
@@ -408,8 +395,11 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
native_module_->ReserveCodeTableForTesting(kMaxFunctions);
auto instance = WasmInstanceObject::New(isolate_, module_object);
- instance->set_tags_table(*isolate_->factory()->empty_fixed_array());
+ instance->set_tags_table(ReadOnlyRoots{isolate_}.empty_fixed_array());
instance->set_globals_start(globals_data_);
+ Handle<FixedArray> feedback_vector =
+ isolate_->factory()->NewFixedArrayWithZeroes(kMaxFunctions);
+ instance->set_feedback_vectors(*feedback_vector);
return instance;
}
@@ -419,23 +409,9 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
WasmFeatures unused_detected_features;
FunctionBody body(sig, 0, start, end);
std::vector<compiler::WasmLoopInfo> loops;
- DecodeResult result = BuildTFGraph(
- zone->allocator(), WasmFeatures::All(), nullptr, builder,
- &unused_detected_features, body, &loops, nullptr, 0, kRegularFunction);
- if (result.failed()) {
-#ifdef DEBUG
- if (!v8_flags.trace_wasm_decoder) {
- // Retry the compilation with the tracing flag on, to help in debugging.
- v8_flags.trace_wasm_decoder = true;
- result = BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr,
- builder, &unused_detected_features, body, &loops,
- nullptr, 0, kRegularFunction);
- }
-#endif
-
- FATAL("Verification failed; pc = +%x, msg = %s", result.error().offset(),
- result.error().message().c_str());
- }
+ BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
+ &unused_detected_features, body, &loops, nullptr, nullptr, 0,
+ nullptr, kRegularFunction);
builder->LowerInt64(compiler::WasmGraphBuilder::kCalledFromWasm);
}
@@ -544,7 +520,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode(Isolate* isolate) {
rep_builder.AddParam(MachineRepresentation::kWord32);
}
compiler::Int64Lowering r(graph(), machine(), common(), simplified(),
- zone(), nullptr, rep_builder.Build());
+ zone(), rep_builder.Build());
r.LowerGraph();
}
@@ -570,28 +546,26 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode(Isolate* isolate) {
// This struct is just a type tag for Zone::NewArray<T>(size_t) call.
struct WasmFunctionCompilerBuffer {};
-void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
+void WasmFunctionCompiler::Build(base::Vector<const uint8_t> bytes) {
size_t locals_size = local_decls.Size();
- size_t total_size = end - start + locals_size + 1;
+ size_t total_size = bytes.size() + locals_size + 1;
byte* buffer = zone()->NewArray<byte, WasmFunctionCompilerBuffer>(total_size);
// Prepend the local decls to the code.
local_decls.Emit(buffer);
// Emit the code.
- memcpy(buffer + locals_size, start, end - start);
+ memcpy(buffer + locals_size, bytes.begin(), bytes.size());
// Append an extra end opcode.
buffer[total_size - 1] = kExprEnd;
- start = buffer;
- end = buffer + total_size;
+ bytes = base::VectorOf(buffer, total_size);
- CHECK_GE(kMaxInt, end - start);
- int len = static_cast<int>(end - start);
- function_->code = {builder_->AddBytes(base::Vector<const byte>(start, len)),
- static_cast<uint32_t>(len)};
+ function_->code = {builder_->AddBytes(bytes),
+ static_cast<uint32_t>(bytes.size())};
if (interpreter_) {
// Add the code to the interpreter; do not generate compiled code.
- interpreter_->SetFunctionCodeForTesting(function_, start, end);
+ interpreter_->SetFunctionCodeForTesting(function_, bytes.begin(),
+ bytes.end());
return;
}
@@ -610,7 +584,19 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
NativeModule* native_module =
builder_->instance_object()->module_object().native_module();
ForDebugging for_debugging =
- native_module->IsTieredDown() ? kForDebugging : kNoDebugging;
+ native_module->IsInDebugState() ? kForDebugging : kNotForDebugging;
+
+ WasmFeatures unused_detected_features;
+ // Validate Wasm modules; asm.js is assumed to be always valid.
+ if (env.module->origin == kWasmOrigin) {
+ DecodeResult validation_result = ValidateFunctionBody(
+ env.enabled_features, env.module, &unused_detected_features, func_body);
+ if (validation_result.failed()) {
+ FATAL("Validation failed: %s",
+ validation_result.error().message().c_str());
+ }
+ env.module->set_function_validated(function_->func_index);
+ }
base::Optional<WasmCompilationResult> result;
if (builder_->test_execution_tier() ==
@@ -625,11 +611,11 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
} else {
WasmCompilationUnit unit(function_->func_index, builder_->execution_tier(),
for_debugging);
- WasmFeatures unused_detected_features;
result.emplace(unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage().get(),
nullptr, nullptr, &unused_detected_features));
}
+ CHECK(result->succeeded());
WasmCode* code = native_module->PublishCode(
native_module->AddCompiledCode(std::move(*result)));
DCHECK_NOT_NULL(code);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index fc96870157..506159f471 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -82,12 +82,6 @@ using compiler::Node;
#define WASM_WRAPPER_RETURN_VALUE 8754
-#define BUILD(r, ...) \
- do { \
- byte __code[] = {__VA_ARGS__}; \
- r.Build(__code, __code + arraysize(__code)); \
- } while (false)
-
#define ADD_CODE(vec, ...) \
do { \
byte __buf[] = {__VA_ARGS__}; \
@@ -111,13 +105,11 @@ bool IsSameNan(double expected, double actual);
// the interpreter.
class TestingModuleBuilder {
public:
- TestingModuleBuilder(Zone*, ManuallyImportedJSFunction*, TestExecutionTier,
- RuntimeExceptionSupport, TestingModuleMemoryType,
- Isolate* isolate);
+ TestingModuleBuilder(Zone*, ModuleOrigin origin, ManuallyImportedJSFunction*,
+ TestExecutionTier, RuntimeExceptionSupport,
+ TestingModuleMemoryType, Isolate* isolate);
~TestingModuleBuilder();
- void ChangeOriginToAsmjs() { test_module_->origin = kAsmJsSloppyOrigin; }
-
byte* AddMemory(uint32_t size, SharedFlag shared = SharedFlag::kNotShared);
size_t CodeTableLength() const { return native_module_->num_functions(); }
@@ -134,8 +126,9 @@ class TestingModuleBuilder {
return reinterpret_cast<T*>(globals_data_ + global->offset);
}
+ // TODO(7748): Allow selecting type finality.
byte AddSignature(const FunctionSig* sig) {
- test_module_->add_signature(sig, kNoSuperType);
+ test_module_->add_signature(sig, kNoSuperType, v8_flags.wasm_final_types);
GetTypeCanonicalizer()->AddRecursiveGroup(test_module_.get(), 1);
instance_object_->set_isorecursive_canonical_types(
test_module_->isorecursive_canonical_type_ids.data());
@@ -225,7 +218,6 @@ class TestingModuleBuilder {
uint32_t AddException(const FunctionSig* sig);
uint32_t AddPassiveDataSegment(base::Vector<const byte> bytes);
- uint32_t AddPassiveElementSegment(const std::vector<uint32_t>& entries);
WasmFunction* GetFunctionAt(int index) {
return &test_module_->functions[index];
@@ -244,14 +236,15 @@ class TestingModuleBuilder {
return reinterpret_cast<Address>(globals_data_);
}
- void SetTieredDown() {
- native_module_->SetTieringState(kTieredDown);
+ void SetDebugState() {
+ native_module_->SetDebugState(kDebugging);
execution_tier_ = TestExecutionTier::kLiftoff;
}
- void TierDown() {
- SetTieredDown();
- native_module_->RecompileForTiering();
+ void SwitchToDebug() {
+ SetDebugState();
+ native_module_->RemoveCompiledCode(
+ NativeModule::RemoveFilter::kRemoveNonDebugCode);
}
CompilationEnv CreateCompilationEnv();
@@ -374,7 +367,10 @@ class WasmFunctionCompiler : public compiler::GraphAndBuilders {
uint32_t function_index() { return function_->func_index; }
uint32_t sig_index() { return function_->sig_index; }
- void Build(const byte* start, const byte* end);
+ void Build(std::initializer_list<const uint8_t> bytes) {
+ Build(base::VectorOf(bytes));
+ }
+ void Build(base::Vector<const uint8_t> bytes);
byte AllocateLocal(ValueType type) {
uint32_t index = local_decls.AddLocals(1, type);
@@ -406,7 +402,7 @@ class WasmFunctionCompiler : public compiler::GraphAndBuilders {
// code, and run that code.
class WasmRunnerBase : public InitializedHandleScope {
public:
- WasmRunnerBase(ManuallyImportedJSFunction* maybe_import,
+ WasmRunnerBase(ManuallyImportedJSFunction* maybe_import, ModuleOrigin origin,
TestExecutionTier execution_tier, int num_params,
RuntimeExceptionSupport runtime_exception_support =
kNoRuntimeExceptionSupport,
@@ -414,7 +410,7 @@ class WasmRunnerBase : public InitializedHandleScope {
Isolate* isolate = nullptr)
: InitializedHandleScope(isolate),
zone_(&allocator_, ZONE_NAME, kCompressGraphZone),
- builder_(&zone_, maybe_import, execution_tier,
+ builder_(&zone_, origin, maybe_import, execution_tier,
runtime_exception_support, mem_type, isolate),
wrapper_(&zone_, num_params) {}
@@ -430,10 +426,16 @@ class WasmRunnerBase : public InitializedHandleScope {
// Builds a graph from the given Wasm code and generates the machine
// code and call wrapper for that graph. This method must not be called
// more than once.
- void Build(const byte* start, const byte* end) {
+ void Build(const uint8_t* start, const uint8_t* end) {
+ Build(base::VectorOf(start, end - start));
+ }
+ void Build(std::initializer_list<const uint8_t> bytes) {
+ Build(base::VectorOf(bytes));
+ }
+ void Build(base::Vector<const uint8_t> bytes) {
CHECK(!compiled_);
compiled_ = true;
- functions_[0]->Build(start, end);
+ functions_[0]->Build(bytes);
}
// Resets the state for building the next function.
@@ -471,7 +473,7 @@ class WasmRunnerBase : public InitializedHandleScope {
bool interpret() { return builder_.interpret(); }
- void TierDown() { builder_.TierDown(); }
+ void SwitchToDebug() { builder_.SwitchToDebug(); }
template <typename ReturnType, typename... ParamTypes>
FunctionSig* CreateSig() {
@@ -574,14 +576,16 @@ template <typename ReturnType, typename... ParamTypes>
class WasmRunner : public WasmRunnerBase {
public:
explicit WasmRunner(TestExecutionTier execution_tier,
+ ModuleOrigin origin = kWasmOrigin,
ManuallyImportedJSFunction* maybe_import = nullptr,
const char* main_fn_name = "main",
RuntimeExceptionSupport runtime_exception_support =
kNoRuntimeExceptionSupport,
TestingModuleMemoryType mem_type = kMemory32,
Isolate* isolate = nullptr)
- : WasmRunnerBase(maybe_import, execution_tier, sizeof...(ParamTypes),
- runtime_exception_support, mem_type, isolate) {
+ : WasmRunnerBase(maybe_import, origin, execution_tier,
+ sizeof...(ParamTypes), runtime_exception_support,
+ mem_type, isolate) {
WasmFunctionCompiler& main_fn =
NewFunction<ReturnType, ParamTypes...>(main_fn_name);
// Non-zero if there is an import.
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
index 5d280d6a58..44dc6ce075 100644
--- a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
@@ -30,9 +30,9 @@ void RunI8x16UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT8_INPUTS(x) {
r.Call(x);
@@ -53,11 +53,11 @@ void RunI8x16BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
for (T x : compiler::ValueHelper::GetVector<T>()) {
for (T y : compiler::ValueHelper::GetVector<T>()) {
@@ -88,14 +88,14 @@ void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(
+ 1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE});
r.builder().WriteMemory(&memory[0], shift);
FOR_INT8_INPUTS(x) {
@@ -116,11 +116,11 @@ void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3))});
CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
r.Call(0xff, 0x7fff));
@@ -138,9 +138,9 @@ void RunI16x8UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT16_INPUTS(x) {
r.Call(x);
@@ -161,11 +161,11 @@ void RunI16x8BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
for (T x : compiler::ValueHelper::GetVector<T>()) {
for (T y : compiler::ValueHelper::GetVector<T>()) {
@@ -195,14 +195,14 @@ void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(
+ 1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE});
r.builder().WriteMemory(&memory[0], shift);
FOR_INT16_INPUTS(x) {
@@ -223,11 +223,11 @@ void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3))});
CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
r.Call(0xffff, 0x7fffffff));
@@ -245,9 +245,9 @@ void RunI32x4UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT32_INPUTS(x) {
r.Call(x);
@@ -267,11 +267,11 @@ void RunI32x4BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
FOR_INT32_INPUTS(x) {
FOR_INT32_INPUTS(y) {
@@ -295,14 +295,14 @@ void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(
+ 1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE});
r.builder().WriteMemory(&memory[0], shift);
FOR_INT32_INPUTS(x) {
@@ -324,9 +324,9 @@ void RunI64x2UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_INT64_INPUTS(x) {
r.Call(x);
@@ -346,11 +346,11 @@ void RunI64x2BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
FOR_INT64_INPUTS(x) {
FOR_INT64_INPUTS(y) {
@@ -374,14 +374,14 @@ void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(
+ 1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE});
r.builder().WriteMemory(&memory[0], shift);
FOR_INT64_INPUTS(x) {
@@ -448,9 +448,9 @@ void RunF32x4UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
@@ -508,11 +508,11 @@ void RunF32x4BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
@@ -556,11 +556,11 @@ void RunF32x4CompareOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
@@ -630,9 +630,9 @@ void RunF64x2UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE});
FOR_FLOAT64_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
@@ -675,11 +675,11 @@ void RunF64x2BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
FOR_FLOAT64_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
@@ -724,14 +724,14 @@ void RunF64x2CompareOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
byte temp2 = r.AllocateLocal(kWasmS128);
// Make the lanes of each temp compare differently:
// temp1 = y, x and temp2 = y, y.
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp1,
- WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
+ r.Build({WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp1,
+ WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE});
FOR_FLOAT64_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
diff --git a/deps/v8/test/common/call-tester.h b/deps/v8/test/common/call-tester.h
index 81fe72f619..6659dc4caf 100644
--- a/deps/v8/test/common/call-tester.h
+++ b/deps/v8/test/common/call-tester.h
@@ -53,18 +53,17 @@ Object CallHelper<Object>::Call(Params... args) {
template <typename T>
class CodeRunner : public CallHelper<T> {
public:
- CodeRunner(Isolate* isolate, Handle<Code> code, MachineSignature* csig)
+ CodeRunner(Isolate* isolate, Handle<InstructionStream> code,
+ MachineSignature* csig)
: CallHelper<T>(isolate, csig), code_(code) {}
-#ifdef V8_EXTERNAL_CODE_SPACE
- CodeRunner(Isolate* isolate, Handle<CodeT> code, MachineSignature* csig)
- : CallHelper<T>(isolate, csig), code_(FromCodeT(*code), isolate) {}
-#endif // V8_EXTERNAL_CODE_SPACE
+ CodeRunner(Isolate* isolate, Handle<Code> code, MachineSignature* csig)
+ : CallHelper<T>(isolate, csig), code_(FromCode(*code), isolate) {}
~CodeRunner() override = default;
Address Generate() override { return code_->entry(); }
private:
- Handle<Code> code_;
+ Handle<InstructionStream> code_;
};
} // namespace compiler
diff --git a/deps/v8/test/common/code-assembler-tester.h b/deps/v8/test/common/code-assembler-tester.h
index b7f021dade..9a0fca885e 100644
--- a/deps/v8/test/common/code-assembler-tester.h
+++ b/deps/v8/test/common/code-assembler-tester.h
@@ -36,11 +36,14 @@ class CodeAssemblerTester {
const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
- state_(isolate, &zone_, parameter_count, kind, name) {}
+ state_(isolate, &zone_, parameter_count, kind, name) {
+ // Parameter count must include at least the receiver.
+ DCHECK_LE(1, parameter_count);
+ }
CodeAssemblerTester(Isolate* isolate, CodeKind kind,
const char* name = "test")
- : CodeAssemblerTester(isolate, 0, kind, name) {}
+ : CodeAssemblerTester(isolate, 1, kind, name) {}
CodeAssemblerTester(Isolate* isolate, CallDescriptor* call_descriptor,
const char* name = "test")
diff --git a/deps/v8/test/common/types-fuzz.h b/deps/v8/test/common/types-fuzz.h
index ba7c4ce702..eaed1ed364 100644
--- a/deps/v8/test/common/types-fuzz.h
+++ b/deps/v8/test/common/types-fuzz.h
@@ -31,6 +31,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
@@ -41,7 +42,10 @@ namespace compiler {
class Types {
public:
Types(Zone* zone, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
- : zone_(zone), js_heap_broker_(isolate, zone), rng_(rng) {
+ : zone_(zone),
+ js_heap_broker_(isolate, zone),
+ current_broker_(&js_heap_broker_),
+ rng_(rng) {
#define DECLARE_TYPE(name, value) \
name = Type::name(); \
types.push_back(name);
@@ -209,6 +213,7 @@ class Types {
private:
Zone* zone_;
JSHeapBroker js_heap_broker_;
+ CurrentHeapBrokerScope current_broker_;
v8::base::RandomNumberGenerator* rng_;
};
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index d8a5318833..085355c88f 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -751,6 +751,14 @@ class SideTable : public ZoneObject {
max_exception_arity, static_cast<int>(tag.sig->parameter_count()));
}
}
+ WasmFeatures unused_detected_features;
+ WasmDecoder<Decoder::NoValidationTag> decoder{zone,
+ module,
+ WasmFeatures::All(),
+ &unused_detected_features,
+ code->function->sig,
+ code->start,
+ code->end};
for (BytecodeIterator i(code->start, code->end, &code->locals, zone);
i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
@@ -794,11 +802,9 @@ class SideTable : public ZoneObject {
case kExprBlock:
case kExprLoop: {
bool is_loop = opcode == kExprLoop;
- BlockTypeImmediate imm(WasmFeatures::All(), &i, i.pc() + 1,
+ BlockTypeImmediate imm(WasmFeatures::All(), &decoder, i.pc() + 1,
kNoValidate);
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
+ CHECK(decoder.Validate(i.pc() + 1, imm));
TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
DCHECK_IMPLIES(!unreachable,
@@ -816,11 +822,9 @@ class SideTable : public ZoneObject {
break;
}
case kExprIf: {
- BlockTypeImmediate imm(WasmFeatures::All(), &i, i.pc() + 1,
+ BlockTypeImmediate imm(WasmFeatures::All(), &decoder, i.pc() + 1,
kNoValidate);
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
+ CHECK(decoder.Validate(i.pc() + 1, imm));
TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
imm.in_arity(), imm.out_arity());
DCHECK_IMPLIES(!unreachable,
@@ -877,11 +881,9 @@ class SideTable : public ZoneObject {
break;
}
case kExprTry: {
- BlockTypeImmediate imm(WasmFeatures::All(), &i, i.pc() + 1,
+ BlockTypeImmediate imm(WasmFeatures::All(), &decoder, i.pc() + 1,
kNoValidate);
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
+ CHECK(decoder.Validate(i.pc() + 1, imm));
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
imm.in_arity(), imm.out_arity());
int target_stack_height = stack_height - imm.in_arity();
@@ -901,7 +903,7 @@ class SideTable : public ZoneObject {
break;
}
case kExprRethrow: {
- BranchDepthImmediate imm(&i, i.pc() + 1, kNoValidate);
+ BranchDepthImmediate imm(&decoder, i.pc() + 1, kNoValidate);
int index = static_cast<int>(control_stack.size()) - 1 - imm.depth;
rethrow_map_.emplace(i.pc() - i.start(), index);
break;
@@ -912,7 +914,7 @@ class SideTable : public ZoneObject {
// Only pop the exception stack once when we enter the first catch.
exception_stack.pop_back();
}
- TagIndexImmediate imm(&i, i.pc() + 1, kNoValidate);
+ TagIndexImmediate imm(&decoder, i.pc() + 1, kNoValidate);
Control* c = &control_stack.back();
copy_unreachable();
TRACE("control @%u: Catch\n", i.pc_offset());
@@ -980,7 +982,7 @@ class SideTable : public ZoneObject {
break;
}
case kExprDelegate: {
- BranchDepthImmediate imm(&i, i.pc() + 1, kNoValidate);
+ BranchDepthImmediate imm(&decoder, i.pc() + 1, kNoValidate);
TRACE("control @%u: Delegate[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack.back();
const size_t new_stack_size = control_stack.size() - 1;
@@ -1018,21 +1020,21 @@ class SideTable : public ZoneObject {
break;
}
case kExprBr: {
- BranchDepthImmediate imm(&i, i.pc() + 1, kNoValidate);
+ BranchDepthImmediate imm(&decoder, i.pc() + 1, kNoValidate);
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
- BranchDepthImmediate imm(&i, i.pc() + 1, kNoValidate);
+ BranchDepthImmediate imm(&decoder, i.pc() + 1, kNoValidate);
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrTable: {
- BranchTableImmediate imm(&i, i.pc() + 1, kNoValidate);
+ BranchTableImmediate imm(&decoder, i.pc() + 1, kNoValidate);
BranchTableIterator<Decoder::NoValidationTag> iterator(&i, imm);
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
imm.table_count);
@@ -1208,7 +1210,7 @@ V8_INLINE bool has_nondeterminism<double>(double val) {
class WasmInterpreterInternals {
public:
WasmInterpreterInternals(Zone* zone, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
+ ModuleWireBytes wire_bytes,
Handle<WasmInstanceObject> instance_object)
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
codemap_(module, module_bytes_.data(), zone),
@@ -1910,7 +1912,8 @@ class WasmInterpreterInternals {
IndexImmediate imm(decoder, code->at(pc + *len),
"element segment index", kNoValidate);
*len += imm.length;
- instance_object_->dropped_elem_segments().set(imm.index, 1);
+ instance_object_->element_segments().set(
+ imm.index, *isolate_->factory()->empty_fixed_array());
return true;
}
case kExprTableCopy: {
@@ -2863,18 +2866,19 @@ class WasmInterpreterInternals {
REDUCTION_CASE(I16x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(I8x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
-#define QFM_CASE(op, name, stype, count, operation) \
- case kExpr##op: { \
- stype c = Pop().to_s128().to_##name(); \
- stype b = Pop().to_s128().to_##name(); \
- stype a = Pop().to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; i++) { \
- res.val[LANE(i, res)] = \
- a.val[LANE(i, a)] operation(b.val[LANE(i, b)] * c.val[LANE(i, c)]); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define QFM_CASE(op, name, stype, count, operation) \
+ case kExpr##op: { \
+ stype c = Pop().to_s128().to_##name(); \
+ stype b = Pop().to_s128().to_##name(); \
+ stype a = Pop().to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; i++) { \
+ res.val[LANE(i, res)] = \
+ operation(a.val[LANE(i, a)] * b.val[LANE(i, b)]) + \
+ c.val[LANE(i, c)]; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
@@ -3321,11 +3325,10 @@ class WasmInterpreterInternals {
WasmOpcode opcode = static_cast<WasmOpcode>(orig);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- uint32_t prefixed_opcode_length = 0;
- opcode = decoder.read_prefixed_opcode<Decoder::NoValidationTag>(
- code->at(pc), &prefixed_opcode_length);
// read_prefixed_opcode includes the prefix byte, overwrite len.
- len = prefixed_opcode_length;
+ std::tie(opcode, len) =
+ decoder.read_prefixed_opcode<Decoder::NoValidationTag>(
+ code->at(pc));
}
// If max is 0, break. If max is positive (a limit is set), decrement it.
@@ -4147,7 +4150,7 @@ Handle<WasmInstanceObject> MakeWeak(
// Implementation of the public interface of the interpreter.
//============================================================================
WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
+ ModuleWireBytes wire_bytes,
Handle<WasmInstanceObject> instance_object)
: zone_(isolate->allocator(), ZONE_NAME),
internals_(new WasmInterpreterInternals(
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.h b/deps/v8/test/common/wasm/wasm-interpreter.h
index 10d6f74593..41fe50ba43 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.h
+++ b/deps/v8/test/common/wasm/wasm-interpreter.h
@@ -71,7 +71,7 @@ class WasmInterpreter {
enum ExceptionHandlingResult { HANDLED, UNWOUND };
WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
+ ModuleWireBytes wire_bytes,
Handle<WasmInstanceObject> instance);
~WasmInterpreter();
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 65115917a9..eaae6ea17f 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -525,18 +525,34 @@ inline uint16_t ExtractPrefixedOpcodeBytes(WasmOpcode opcode) {
ref, WASM_GC_OP(kExprRefTestDeprecated), static_cast<byte>(typeidx)
#define WASM_REF_TEST(ref, typeidx) \
ref, WASM_GC_OP(kExprRefTest), static_cast<byte>(typeidx)
+#define WASM_REF_TEST_NULL(ref, typeidx) \
+ ref, WASM_GC_OP(kExprRefTestNull), static_cast<byte>(typeidx)
#define WASM_REF_CAST_DEPRECATED(ref, typeidx) \
ref, WASM_GC_OP(kExprRefCastDeprecated), static_cast<byte>(typeidx)
#define WASM_REF_CAST(ref, typeidx) \
ref, WASM_GC_OP(kExprRefCast), static_cast<byte>(typeidx)
+#define WASM_REF_CAST_NULL(ref, typeidx) \
+ ref, WASM_GC_OP(kExprRefCastNull), static_cast<byte>(typeidx)
// Takes a reference value from the value stack to allow sequences of
// conditional branches.
#define WASM_BR_ON_CAST(depth, typeidx) \
WASM_GC_OP(kExprBrOnCast), static_cast<byte>(depth), \
static_cast<byte>(typeidx)
+#define WASM_BR_ON_CAST_NULL(depth, typeidx) \
+ WASM_GC_OP(kExprBrOnCastNull), static_cast<byte>(depth), \
+ static_cast<byte>(typeidx)
+#define WASM_BR_ON_CAST_DEPRECATED(depth, typeidx) \
+ WASM_GC_OP(kExprBrOnCastDeprecated), static_cast<byte>(depth), \
+ static_cast<byte>(typeidx)
#define WASM_BR_ON_CAST_FAIL(depth, typeidx) \
WASM_GC_OP(kExprBrOnCastFail), static_cast<byte>(depth), \
static_cast<byte>(typeidx)
+#define WASM_BR_ON_CAST_FAIL_NULL(depth, typeidx) \
+ WASM_GC_OP(kExprBrOnCastFailNull), static_cast<byte>(depth), \
+ static_cast<byte>(typeidx)
+#define WASM_BR_ON_CAST_FAIL_DEPRECATED(depth, typeidx) \
+ WASM_GC_OP(kExprBrOnCastFailDeprecated), static_cast<byte>(depth), \
+ static_cast<byte>(typeidx)
#define WASM_GC_INTERNALIZE(extern) extern, WASM_GC_OP(kExprExternInternalize)
#define WASM_GC_EXTERNALIZE(ref) ref, WASM_GC_OP(kExprExternExternalize)
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 797239270e..bab0241c70 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -25,7 +25,7 @@ namespace testing {
MaybeHandle<WasmModuleObject> CompileForTesting(Isolate* isolate,
ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
+ ModuleWireBytes bytes) {
auto enabled_features = WasmFeatures::FromIsolate(isolate);
MaybeHandle<WasmModuleObject> module =
GetWasmEngine()->SyncCompile(isolate, enabled_features, thrower, bytes);
@@ -34,7 +34,7 @@ MaybeHandle<WasmModuleObject> CompileForTesting(Isolate* isolate,
}
MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+ Isolate* isolate, ErrorThrower* thrower, ModuleWireBytes bytes) {
MaybeHandle<WasmModuleObject> module =
CompileForTesting(isolate, thrower, bytes);
if (module.is_null()) return {};
@@ -106,6 +106,8 @@ base::OwnedVector<Handle<Object>> MakeDefaultArguments(Isolate* isolate,
arguments[i] = isolate->factory()->null_value();
break;
case kRef:
+ arguments[i] = isolate->factory()->undefined_value();
+ break;
case kRtt:
case kI8:
case kI16:
@@ -128,7 +130,7 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
return -1;
}
return CallWasmFunctionForTesting(isolate, instance.ToHandleChecked(), "main",
- 0, nullptr);
+ {});
}
WasmInterpretationResult InterpretWasmModule(
@@ -143,8 +145,7 @@ WasmInterpretationResult InterpretWasmModule(
CHECK(func->exported);
// This would normally be handled by export wrappers.
- if (!IsJSCompatibleSignature(func->sig, instance->module(),
- WasmFeatures::FromIsolate(isolate))) {
+ if (!IsJSCompatibleSignature(func->sig)) {
return WasmInterpretationResult::Trapped(false);
}
@@ -217,9 +218,10 @@ MaybeHandle<WasmExportedFunction> GetExportedFunction(
int32_t CallWasmFunctionForTesting(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- const char* name, int argc,
- Handle<Object> argv[], bool* exception) {
- if (exception) *exception = false;
+ const char* name,
+ base::Vector<Handle<Object>> args,
+ std::unique_ptr<const char[]>* exception) {
+ DCHECK_IMPLIES(exception != nullptr, *exception == nullptr);
MaybeHandle<WasmExportedFunction> maybe_export =
GetExportedFunction(isolate, instance, name);
Handle<WasmExportedFunction> main_export;
@@ -229,14 +231,18 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate,
// Call the JS function.
Handle<Object> undefined = isolate->factory()->undefined_value();
- MaybeHandle<Object> retval =
- Execution::Call(isolate, main_export, undefined, argc, argv);
+ MaybeHandle<Object> retval = Execution::Call(isolate, main_export, undefined,
+ args.length(), args.begin());
// The result should be a number.
if (retval.is_null()) {
DCHECK(isolate->has_pending_exception());
+ if (exception) {
+ Handle<String> exception_string = Object::NoSideEffectsToString(
+ isolate, handle(isolate->pending_exception(), isolate));
+ *exception = exception_string->ToCString();
+ }
isolate->clear_pending_exception();
- if (exception) *exception = true;
return -1;
}
Handle<Object> result = retval.ToHandleChecked();
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index 027ce4ec10..00ddca519f 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -30,13 +30,12 @@ MaybeHandle<WasmExportedFunction> GetExportedFunction(
// Call an exported wasm function by name. Returns -1 if the export does not
// exist or throws an error. Errors are cleared from the isolate before
-// returning. {exception} is set to to true if an exception happened during
-// execution of the wasm function.
-int32_t CallWasmFunctionForTesting(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- const char* name, int argc,
- Handle<Object> argv[],
- bool* exception = nullptr);
+// returning. {exception} is set to a string representation of the exception (if
+// set and an exception occurs).
+int32_t CallWasmFunctionForTesting(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, const char* name,
+ base::Vector<Handle<Object>> args,
+ std::unique_ptr<const char[]>* exception = nullptr);
// Decode, verify, and run the function labeled "main" in the
// given encoded module. The module should have no imports.
@@ -46,11 +45,11 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
// Decode and compile the given module with no imports.
MaybeHandle<WasmModuleObject> CompileForTesting(Isolate* isolate,
ErrorThrower* thrower,
- const ModuleWireBytes& bytes);
+ ModuleWireBytes bytes);
// Decode, compile, and instantiate the given module with no imports.
MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+ Isolate* isolate, ErrorThrower* thrower, ModuleWireBytes bytes);
class WasmInterpretationResult {
public:
diff --git a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
index 6a5011f790..fa6e32529d 100644
--- a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
+++ b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
@@ -17,19 +17,21 @@ function create_builder(delta = 0) {
return builder;
}
-function checkTieredDown(instance) {
+function checkDebugCode(instance) {
for (let i = 0; i < num_functions; ++i) {
- assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
+ // Call the function once because of lazy compilation.
+ instance.exports['f' + i]();
+ assertTrue(%IsWasmDebugFunction(instance.exports['f' + i]));
}
}
-function waitForTieredUp(instance) {
- // Busy waiting until all functions are tiered up.
+function waitForNoDebugCode(instance) {
+ // Busy waiting until all functions left debug mode.
let num_liftoff_functions = 0;
while (true) {
num_liftoff_functions = 0;
for (let i = 0; i < num_functions; ++i) {
- if (%IsLiftoffFunction(instance.exports['f' + i])) {
+ if (%IsWasmDebugFunction(instance.exports['f' + i])) {
num_liftoff_functions++;
}
}
@@ -39,37 +41,37 @@ function waitForTieredUp(instance) {
const Debug = new DebugWrapper();
-(function testTierDownToLiftoff() {
+(function testEnterDebugMode() {
// In the 'isolates' test, this test runs in parallel to itself on two
// isolates. All checks below should still hold.
const instance = create_builder(0).instantiate();
Debug.enable();
- checkTieredDown(instance);
+ checkDebugCode(instance);
const instance2 = create_builder(1).instantiate();
- checkTieredDown(instance2);
+ checkDebugCode(instance2);
Debug.disable();
- // Eventually the instances will be completely tiered up again.
- waitForTieredUp(instance);
- waitForTieredUp(instance2);
+ // Eventually the instances will have completely left debug mode again.
+ waitForNoDebugCode(instance);
+ waitForNoDebugCode(instance2);
})();
// Test async compilation.
-assertPromiseResult((async function testTierDownToLiftoffAsync() {
+assertPromiseResult((async function testEnterDebugModeAsync() {
// First test: enable the debugger *after* compiling the module.
const instance = await create_builder(2).asyncInstantiate();
Debug.enable();
- checkTieredDown(instance);
+ checkDebugCode(instance);
const instance2 = await create_builder(3).asyncInstantiate();
- checkTieredDown(instance2);
+ checkDebugCode(instance2);
Debug.disable();
- waitForTieredUp(instance);
- waitForTieredUp(instance2);
+ waitForNoDebugCode(instance);
+ waitForNoDebugCode(instance2);
// Second test: enable the debugger *while* compiling the module.
const instancePromise = create_builder(4).asyncInstantiate();
Debug.enable();
const instance3 = await instancePromise;
- checkTieredDown(instance3);
+ checkDebugCode(instance3);
Debug.disable();
- waitForTieredUp(instance3);
+ waitForNoDebugCode(instance3);
})());
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index 1f3e49d24f..fb7169bdfa 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -55,7 +55,7 @@ class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = self._source_files
- if self._test_config.isolates:
+ if self.test_config.isolates:
files = files + ['--isolate'] + files
return files
diff --git a/deps/v8/test/debugging/testcfg.py b/deps/v8/test/debugging/testcfg.py
index f5d15a9f93..4976768b2f 100644
--- a/deps/v8/test/debugging/testcfg.py
+++ b/deps/v8/test/debugging/testcfg.py
@@ -23,7 +23,11 @@ class PYTestCase(testcase.TestCase):
return super(PYTestCase, self).get_command()
def _get_cmd_params(self):
- return self._get_files_params() + ['--', os.path.join(self._test_config.shell_dir, 'd8')] + self._get_source_flags()
+ return (
+ self._get_files_params() +
+ ['--', os.path.join(self.test_config.shell_dir, 'd8')] +
+ self._get_source_flags()
+ )
def _get_shell_flags(self):
return []
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 62419539ed..e54e095cb1 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -134,7 +134,7 @@ CallDescriptor* CreateRandomCallDescriptor(Zone* zone, size_t return_count,
std::shared_ptr<wasm::NativeModule> AllocateNativeModule(i::Isolate* isolate,
size_t code_size) {
- std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule);
+ auto module = std::make_shared<wasm::WasmModule>(wasm::kWasmOrigin);
module->num_declared_functions = 1;
// We have to add the code object to a NativeModule, because the
@@ -160,7 +160,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
InputProvider input(data, size);
// Create randomized descriptor.
size_t param_count = input.NumNonZeroBytes(0, kNumTypes);
- if (param_count > Code::kMaxArguments) return 0;
+ if (param_count > InstructionStream::kMaxArguments) return 0;
size_t return_count = input.NumNonZeroBytes(param_count + 1, kNumTypes);
if (return_count > wasm::kV8MaxWasmFunctionReturns) return 0;
@@ -246,7 +246,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
.ToHandleChecked();
std::shared_ptr<wasm::NativeModule> module =
- AllocateNativeModule(i_isolate, code->raw_instruction_size());
+ AllocateNativeModule(i_isolate, code->InstructionSize());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
// Generate wrapper.
@@ -291,7 +291,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
AssemblerOptions::Default(i_isolate), caller.ExportForTest())
.ToHandleChecked();
- auto fn = GeneratedCode<int32_t>::FromCode(*wrapper_code);
+ auto fn = GeneratedCode<int32_t>::FromCode(i_isolate, *wrapper_code);
int result = fn.Call();
CHECK_EQ(expect, result);
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 2e6cdfb4e1..2d939f6fb0 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -183,6 +183,9 @@ std::string PickRandomPresetPattern(FuzzerArgs* args) {
"\\p{General_Category=Decimal_Number}", "\\P{gc=Decimal_Number}",
"\\p{gc=Nd}", "\\P{Decimal_Number}", "\\p{Nd}", "\\P{Any}",
"\\p{Changes_When_NFKC_Casefolded}",
+ "[\\p{Script_Extensions=Greek}--[α-γ]]",
+ "[\\p{Script_Extensions=Mongolian}&&\\p{Number}]",
+ "[\\q{abc|def|0|5}--\\d]",
};
static constexpr int preset_pattern_count = arraysize(preset_patterns);
static_assert(preset_pattern_count < 0xFF);
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index a18914e2a1..488047d1d7 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -17,29 +17,29 @@
#include "test/fuzzer/fuzzer-support.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-namespace v8 {
-namespace internal {
+namespace v8::internal {
class WasmModuleObject;
+}
-namespace wasm {
-namespace fuzzer {
+namespace v8::internal::wasm::fuzzer {
-class AsyncFuzzerResolver : public i::wasm::CompilationResultResolver {
+class AsyncFuzzerResolver : public CompilationResultResolver {
public:
- AsyncFuzzerResolver(i::Isolate* isolate, bool* done)
+ AsyncFuzzerResolver(Isolate* isolate, bool* done)
: isolate_(isolate), done_(done) {}
- void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> module) override {
+ void OnCompilationSucceeded(Handle<WasmModuleObject> module) override {
*done_ = true;
- InterpretAndExecuteModule(isolate_, module);
+ ExecuteAgainstReference(isolate_, module,
+ kDefaultMaxFuzzerExecutedInstructions);
}
- void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ void OnCompilationFailed(Handle<Object> error_reason) override {
*done_ = true;
}
private:
- i::Isolate* isolate_;
+ Isolate* isolate_;
bool* done_;
};
@@ -52,7 +52,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8_flags.wasm_max_mem_pages = 32;
v8_flags.wasm_max_table_size = 100;
- i::Isolate* i_isolate = reinterpret_cast<v8::internal::Isolate*>(isolate);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
if (i_isolate->has_pending_exception()) {
@@ -61,19 +61,18 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- i::HandleScope internal_scope(i_isolate);
v8::Context::Scope context_scope(support->GetContext());
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- OneTimeEnableStagedWasmFeatures(isolate);
+ // We explicitly enable staged/experimental WebAssembly features here to
+ // increase fuzzer coverage. For libfuzzer fuzzers it is not possible that the
+ // fuzzer enables the flag by itself.
+ EnableExperimentalWasmFeatures(isolate);
TryCatch try_catch(isolate);
testing::SetupIsolateForWasmModule(i_isolate);
bool done = false;
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+ auto enabled_features = WasmFeatures::FromIsolate(i_isolate);
constexpr const char* kAPIMethodName = "WasmAsyncFuzzer.compile";
GetWasmEngine()->AsyncCompile(
i_isolate, enabled_features,
@@ -88,7 +87,4 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return 0;
}
-} // namespace fuzzer
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm::fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index d32617dc7e..558958c544 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -10,15 +10,12 @@
#include "test/common/wasm/test-signatures.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace fuzzer {
+namespace v8::internal::wasm::fuzzer {
class WasmCodeFuzzer : public WasmExecutionFuzzer {
bool GenerateModule(Isolate* isolate, Zone* zone,
- base::Vector<const uint8_t> data, ZoneBuffer* buffer,
- bool liftoff_as_reference) override {
+ base::Vector<const uint8_t> data,
+ ZoneBuffer* buffer) override {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
@@ -38,7 +35,4 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return 0;
}
-} // namespace fuzzer
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm::fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index ddc3a87a46..cdd7201012 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -19,10 +19,7 @@
#include "test/common/wasm/test-signatures.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace fuzzer {
+namespace v8::internal::wasm::fuzzer {
namespace {
@@ -104,8 +101,7 @@ enum NonNullables { kAllowNonNullables, kDisallowNonNullables };
enum PackedTypes { kIncludePackedTypes, kExcludePackedTypes };
enum Generics { kIncludeGenerics, kExcludeGenerics };
-ValueType GetValueTypeHelper(DataRange* data, bool liftoff_as_reference,
- uint32_t num_nullable_types,
+ValueType GetValueTypeHelper(DataRange* data, uint32_t num_nullable_types,
uint32_t num_non_nullable_types,
NonNullables allow_non_nullable,
PackedTypes include_packed_types,
@@ -114,11 +110,6 @@ ValueType GetValueTypeHelper(DataRange* data, bool liftoff_as_reference,
std::vector<ValueType> types{kWasmI32, kWasmI64, kWasmF32, kWasmF64,
kWasmS128};
- if (!liftoff_as_reference) {
- return types[data->get<uint8_t>() % types.size()];
- }
-
- // If {liftoff_as_reference}, include wasm-gc types.
if (include_packed_types == kIncludePackedTypes) {
types.insert(types.end(), {kWasmI8, kWasmI16});
}
@@ -158,11 +149,9 @@ ValueType GetValueTypeHelper(DataRange* data, bool liftoff_as_reference,
return types[id];
}
-ValueType GetValueType(DataRange* data, bool liftoff_as_reference,
- uint32_t num_types) {
- return GetValueTypeHelper(data, liftoff_as_reference, num_types, num_types,
- kAllowNonNullables, kExcludePackedTypes,
- kIncludeGenerics);
+ValueType GetValueType(DataRange* data, uint32_t num_types) {
+ return GetValueTypeHelper(data, num_types, num_types, kAllowNonNullables,
+ kExcludePackedTypes, kIncludeGenerics);
}
class WasmGenerator {
@@ -203,7 +192,8 @@ class WasmGenerator {
builder.AddReturn(type);
}
FunctionSig* sig = builder.Build();
- int sig_id = gen->builder_->builder()->AddSignature(sig);
+ int sig_id = gen->builder_->builder()->AddSignature(
+ sig, v8_flags.wasm_final_types);
gen->builder_->EmitI32V(sig_id);
}
@@ -369,10 +359,6 @@ class WasmGenerator {
DCHECK(!blocks_.empty());
const uint32_t target_block = data->get<uint32_t>() % blocks_.size();
const auto break_types = base::VectorOf(blocks_[target_block]);
- if (!liftoff_as_reference_) {
- Generate<wanted_kind>(data);
- return;
- }
Generate(break_types, data);
GenerateRef(HeapType(HeapType::kAny), data);
builder_->EmitWithI32V(
@@ -566,9 +552,8 @@ class WasmGenerator {
}
void drop(DataRange* data) {
- Generate(GetValueType(data, liftoff_as_reference_,
- static_cast<uint32_t>(functions_.size()) +
- num_structs_ + num_arrays_),
+ Generate(GetValueType(data, static_cast<uint32_t>(functions_.size()) +
+ num_structs_ + num_arrays_),
data);
builder_->Emit(kExprDrop);
}
@@ -587,11 +572,7 @@ class WasmGenerator {
template <ValueKind wanted_kind>
void call_ref(DataRange* data) {
- if (liftoff_as_reference_) {
- call(data, ValueType::Primitive(wanted_kind), kCallRef);
- } else {
- Generate<wanted_kind>(data);
- }
+ call(data, ValueType::Primitive(wanted_kind), kCallRef);
}
void Convert(ValueType src, ValueType dst) {
@@ -634,8 +615,7 @@ class WasmGenerator {
// Emit call.
// If the return types of the callee happen to match the return types of the
// caller, generate a tail call.
- // TODO(thibaudm): Re-enable when crbug.com/1269989 is fixed.
- bool use_return_call = false;
+ bool use_return_call = random_byte > 127;
if (use_return_call &&
std::equal(sig->returns().begin(), sig->returns().end(),
builder_->signature()->returns().begin(),
@@ -853,7 +833,7 @@ class WasmGenerator {
}
bool new_object(HeapType type, DataRange* data, Nullability nullable) {
- DCHECK(liftoff_as_reference_ && type.is_index());
+ DCHECK(type.is_index());
uint32_t index = type.ref_index();
bool new_default = data->get<bool>();
@@ -1030,10 +1010,6 @@ class WasmGenerator {
}
void i31_get(DataRange* data) {
- if (!liftoff_as_reference_) {
- Generate(kWasmI32, data);
- return;
- }
GenerateRef(HeapType(HeapType::kI31), data);
builder_->Emit(kExprRefAsNonNull);
if (data->get<bool>()) {
@@ -1156,10 +1132,6 @@ class WasmGenerator {
}
void ref_eq(DataRange* data) {
- if (!liftoff_as_reference_) {
- Generate(kWasmI32, data);
- return;
- }
GenerateRef(HeapType(HeapType::kEq), data);
GenerateRef(HeapType(HeapType::kEq), data);
builder_->Emit(kExprRefEq);
@@ -1230,15 +1202,13 @@ class WasmGenerator {
WasmGenerator(WasmFunctionBuilder* fn, const std::vector<uint32_t>& functions,
const std::vector<ValueType>& globals,
const std::vector<uint8_t>& mutable_globals,
- uint32_t num_structs, uint32_t num_arrays, DataRange* data,
- bool liftoff_as_reference)
+ uint32_t num_structs, uint32_t num_arrays, DataRange* data)
: builder_(fn),
functions_(functions),
globals_(globals),
mutable_globals_(mutable_globals),
num_structs_(num_structs),
- num_arrays_(num_arrays),
- liftoff_as_reference_(liftoff_as_reference) {
+ num_arrays_(num_arrays) {
const FunctionSig* sig = fn->signature();
blocks_.emplace_back();
for (size_t i = 0; i < sig->return_count(); ++i) {
@@ -1249,9 +1219,9 @@ class WasmGenerator {
uint32_t num_types =
static_cast<uint32_t>(functions_.size()) + num_structs_ + num_arrays_;
for (ValueType& local : locals_) {
- local = GetValueTypeHelper(data, liftoff_as_reference_, num_types,
- num_types, kDisallowNonNullables,
- kExcludePackedTypes, kIncludeGenerics);
+ local =
+ GetValueTypeHelper(data, num_types, num_types, kDisallowNonNullables,
+ kExcludePackedTypes, kIncludeGenerics);
fn->AddLocal(local);
}
}
@@ -1291,7 +1261,6 @@ class WasmGenerator {
bool has_simd_;
uint32_t num_structs_;
uint32_t num_arrays_;
- bool liftoff_as_reference_;
static constexpr uint32_t kMaxRecursionDepth = 64;
bool recursion_limit_reached() {
@@ -2016,6 +1985,31 @@ void WasmGenerator::Generate<kS128>(DataRange* data) {
&WasmGenerator::simd_lane_memop<kExprS128Load16Lane, 8, kS128>,
&WasmGenerator::simd_lane_memop<kExprS128Load32Lane, 4, kS128>,
&WasmGenerator::simd_lane_memop<kExprS128Load64Lane, 2, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI8x16RelaxedSwizzle, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16RelaxedLaneSelect, kS128, kS128,
+ kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8RelaxedLaneSelect, kS128, kS128,
+ kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4RelaxedLaneSelect, kS128, kS128,
+ kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2RelaxedLaneSelect, kS128, kS128,
+ kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Qfma, kS128, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Qfms, kS128, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Qfma, kS128, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Qfms, kS128, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4RelaxedMin, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4RelaxedMax, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2RelaxedMin, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2RelaxedMax, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4RelaxedTruncF32x4S, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4RelaxedTruncF32x4U, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4RelaxedTruncF64x2SZero, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4RelaxedTruncF64x2UZero, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8DotI8x16I7x16S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4DotI8x16I7x16AddS, kS128, kS128,
+ kS128>,
};
GenerateOneOf(alternatives, data);
@@ -2080,14 +2074,6 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
switch (type.representation()) {
// For abstract types, sometimes generate one of their subtypes.
case HeapType::kAny: {
- // Note: It is possible we land here even without {liftoff_as_reference_}.
- // In this case, we do not support any subtyping, and just fall back to
- // directly generating anyref.
- if (!liftoff_as_reference_) {
- DCHECK(nullability);
- GenerateOneOf(alternatives_func_any, type, data, nullability);
- return;
- }
// Weighed according to the types in the module:
// If there are D data types and F function types, the relative
// frequencies for dataref is D, for funcref F, and for i31ref and falling
@@ -2103,7 +2089,6 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
// In order to know which alternative to fall back to in case
// GenerateOneOf failed, the random variable is recomputed.
if (random >= num_data_types + emit_i31ref) {
- DCHECK(liftoff_as_reference_);
if (GenerateOneOf(alternatives_func_any, type, data, nullability)) {
return;
}
@@ -2119,7 +2104,6 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
return;
}
case HeapType::kArray: {
- DCHECK(liftoff_as_reference_);
constexpr uint8_t fallback_to_dataref = 1;
uint8_t random =
data->get<uint8_t>() % (num_arrays_ + fallback_to_dataref);
@@ -2133,7 +2117,6 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
return;
}
case HeapType::kStruct: {
- DCHECK(liftoff_as_reference_);
constexpr uint8_t fallback_to_dataref = 2;
uint8_t random =
data->get<uint8_t>() % (num_structs_ + fallback_to_dataref);
@@ -2149,7 +2132,6 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
return;
}
case HeapType::kEq: {
- DCHECK(liftoff_as_reference_);
const uint8_t num_types = num_arrays_ + num_structs_;
const uint8_t emit_i31ref = 2;
constexpr uint8_t fallback_to_eqref = 1;
@@ -2180,19 +2162,12 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
}
random = data->get<uint32_t>() % functions_.size();
}
- if (liftoff_as_reference_) {
- // Only reduce to indexed type with liftoff as reference.
- uint32_t signature_index = functions_[random];
- DCHECK(builder_->builder()->IsSignature(signature_index));
- GenerateRef(HeapType(signature_index), data, nullability);
- } else {
- // If interpreter is used as reference, generate a ref.func directly.
- builder_->EmitWithU32V(kExprRefFunc, random);
- }
+ uint32_t signature_index = functions_[random];
+ DCHECK(builder_->builder()->IsSignature(signature_index));
+ GenerateRef(HeapType(signature_index), data, nullability);
return;
}
case HeapType::kI31: {
- DCHECK(liftoff_as_reference_);
// Try generating one of the alternatives
// and continue to the rest of the methods in case it fails.
if (data->get<bool>() &&
@@ -2213,7 +2188,6 @@ void WasmGenerator::GenerateRef(HeapType type, DataRange* data,
default:
// Indexed type.
DCHECK(type.is_index());
- DCHECK(liftoff_as_reference_);
GenerateOneOf(alternatives_indexed_type, type, data, nullability);
return;
}
@@ -2225,7 +2199,7 @@ std::vector<ValueType> WasmGenerator::GenerateTypes(DataRange* data) {
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
for (int i = 0; i < num_params; ++i) {
types.push_back(GetValueType(
- data, liftoff_as_reference_,
+ data,
num_structs_ + num_arrays_ + static_cast<uint32_t>(functions_.size())));
}
return types;
@@ -2326,7 +2300,7 @@ void WasmGenerator::ConsumeAndGenerate(
enum SigKind { kFunctionSig, kExceptionSig };
FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind,
- bool liftoff_as_reference, int num_types) {
+ int num_types) {
// Generate enough parameters to spill some to the stack.
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
int num_returns = sig_kind == kFunctionSig
@@ -2335,19 +2309,20 @@ FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind,
FunctionSig::Builder builder(zone, num_returns, num_params);
for (int i = 0; i < num_returns; ++i) {
- builder.AddReturn(GetValueType(data, liftoff_as_reference, num_types));
+ builder.AddReturn(GetValueType(data, num_types));
}
for (int i = 0; i < num_params; ++i) {
- builder.AddParam(GetValueType(data, liftoff_as_reference, num_types));
+ builder.AddParam(GetValueType(data, num_types));
}
return builder.Build();
}
-WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
- ValueType type,
+WasmInitExpr GenerateInitExpr(Zone* zone, DataRange& range,
+ WasmModuleBuilder* builder, ValueType type,
uint32_t num_struct_and_array_types);
-WasmInitExpr GenerateStructNewInitExpr(Zone* zone, WasmModuleBuilder* builder,
+WasmInitExpr GenerateStructNewInitExpr(Zone* zone, DataRange& range,
+ WasmModuleBuilder* builder,
uint32_t index,
uint32_t num_struct_and_array_types) {
const StructType* struct_type = builder->GetStructType(index);
@@ -2355,25 +2330,62 @@ WasmInitExpr GenerateStructNewInitExpr(Zone* zone, WasmModuleBuilder* builder,
zone->New<ZoneVector<WasmInitExpr>>(zone);
int field_count = struct_type->field_count();
for (int field_index = 0; field_index < field_count; field_index++) {
- elements->push_back(GenerateInitExpr(zone, builder,
+ elements->push_back(GenerateInitExpr(zone, range, builder,
struct_type->field(field_index),
num_struct_and_array_types));
}
return WasmInitExpr::StructNew(index, elements);
}
-WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
- ValueType type,
+// TODO(manoskouk): Generate a variety of expressions for all cases.
+WasmInitExpr GenerateInitExpr(Zone* zone, DataRange& range,
+ WasmModuleBuilder* builder, ValueType type,
uint32_t num_struct_and_array_types) {
switch (type.kind()) {
case kRefNull:
return WasmInitExpr::RefNullConst(type.heap_type().representation());
case kI8:
case kI16:
- case kI32:
- return WasmInitExpr(int32_t{0});
- case kI64:
- return WasmInitExpr(int64_t{0});
+ case kI32: {
+ // 50% to generate a constant, 50% to generate a binary operator.
+ byte choice = range.get<byte>() % 6;
+ switch (choice) {
+ case 0:
+ case 1:
+ case 2:
+ return WasmInitExpr(range.get<int32_t>());
+ default:
+ WasmInitExpr::Operator op = choice == 3 ? WasmInitExpr::kI32Add
+ : choice == 4 ? WasmInitExpr::kI32Sub
+ : WasmInitExpr::kI32Mul;
+ return WasmInitExpr::Binop(
+ zone, op,
+ GenerateInitExpr(zone, range, builder, kWasmI32,
+ num_struct_and_array_types),
+ GenerateInitExpr(zone, range, builder, kWasmI32,
+ num_struct_and_array_types));
+ }
+ }
+ case kI64: {
+ // 50% to generate a constant, 50% to generate a binary operator.
+ byte choice = range.get<byte>() % 6;
+ switch (choice) {
+ case 0:
+ case 1:
+ case 2:
+ return WasmInitExpr(range.get<int64_t>());
+ default:
+ WasmInitExpr::Operator op = choice == 3 ? WasmInitExpr::kI64Add
+ : choice == 4 ? WasmInitExpr::kI64Sub
+ : WasmInitExpr::kI64Mul;
+ return WasmInitExpr::Binop(
+ zone, op,
+ GenerateInitExpr(zone, range, builder, kWasmI64,
+ num_struct_and_array_types),
+ GenerateInitExpr(zone, range, builder, kWasmI64,
+ num_struct_and_array_types));
+ }
+ }
case kF32:
return WasmInitExpr(0.0f);
case kF64:
@@ -2390,7 +2402,7 @@ WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
// We materialize all these types with a struct because they are all
// its supertypes.
DCHECK(builder->IsStructType(0));
- return GenerateStructNewInitExpr(zone, builder, 0,
+ return GenerateStructNewInitExpr(zone, range, builder, 0,
num_struct_and_array_types);
}
case HeapType::kFunc:
@@ -2400,15 +2412,16 @@ WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
default: {
uint32_t index = type.ref_index();
if (builder->IsStructType(index)) {
- return GenerateStructNewInitExpr(zone, builder, index,
+ return GenerateStructNewInitExpr(zone, range, builder, index,
num_struct_and_array_types);
}
if (builder->IsArrayType(index)) {
ZoneVector<WasmInitExpr>* elements =
zone->New<ZoneVector<WasmInitExpr>>(zone);
- elements->push_back(GenerateInitExpr(
- zone, builder, builder->GetArrayType(index)->element_type(),
- num_struct_and_array_types));
+ elements->push_back(
+ GenerateInitExpr(zone, range, builder,
+ builder->GetArrayType(index)->element_type(),
+ num_struct_and_array_types));
return WasmInitExpr::ArrayNewFixed(index, elements);
}
if (builder->IsSignature(index)) {
@@ -2430,8 +2443,8 @@ WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder,
class WasmCompileFuzzer : public WasmExecutionFuzzer {
bool GenerateModule(Isolate* isolate, Zone* zone,
- base::Vector<const uint8_t> data, ZoneBuffer* buffer,
- bool liftoff_as_reference) override {
+ base::Vector<const uint8_t> data,
+ ZoneBuffer* buffer) override {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
@@ -2444,72 +2457,68 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
// Currently, WasmGenerator assumes this order for struct/array/signature
// definitions.
- uint8_t num_structs = 0;
- uint8_t num_arrays = 0;
static_assert(kMaxFunctions >= 1, "need min. 1 function");
uint8_t num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
- uint16_t num_types = num_functions;
-
- if (liftoff_as_reference) {
- // We need at least one struct/array in order to support WasmInitExpr
- // for kData, kAny and kEq.
- num_structs = 1 + range.get<uint8_t>() % kMaxStructs;
- num_arrays = range.get<uint8_t>() % (kMaxArrays + 1);
- num_types += num_structs + num_arrays;
-
- for (int struct_index = 0; struct_index < num_structs; struct_index++) {
- uint8_t num_fields = range.get<uint8_t>() % (kMaxStructFields + 1);
- StructType::Builder struct_builder(zone, num_fields);
- for (int field_index = 0; field_index < num_fields; field_index++) {
- // Notes:
- // - We allow a type to only have non-nullable fields of types that
- // are defined earlier. This way we avoid infinite non-nullable
- // constructions. Also relevant for arrays and functions.
- // - Currently, we also allow nullable fields to only reference types
- // that are defined earlier. The reason is that every type can only
- // reference types in its own or earlier recursive groups, and we do
- // not support recursive groups yet. Also relevant for arrays and
- // functions. TODO(7748): Change the number of nullable types once
- // we support rec. groups.
- // - We exclude the generics types anyref, dataref, and eqref from the
- // fields of struct 0. This is because in GenerateInitExpr we
- // materialize these types with (ref 0), and having such fields in
- // struct 0 would produce an infinite recursion.
- ValueType type = GetValueTypeHelper(
- &range, true, builder.NumTypes(), builder.NumTypes(),
- kAllowNonNullables, kIncludePackedTypes,
- struct_index != 0 ? kIncludeGenerics : kExcludeGenerics);
-
- bool mutability = range.get<bool>();
- struct_builder.AddField(type, mutability);
- }
- StructType* struct_fuz = struct_builder.Build();
- builder.AddStructType(struct_fuz);
+
+ // We need at least one struct/array in order to support WasmInitExpr
+ // for kData, kAny and kEq.
+ uint8_t num_structs = 1 + range.get<uint8_t>() % kMaxStructs;
+ uint8_t num_arrays = range.get<uint8_t>() % (kMaxArrays + 1);
+ uint16_t num_types = num_functions + num_structs + num_arrays;
+
+ for (int struct_index = 0; struct_index < num_structs; struct_index++) {
+ uint8_t num_fields = range.get<uint8_t>() % (kMaxStructFields + 1);
+ StructType::Builder struct_builder(zone, num_fields);
+ for (int field_index = 0; field_index < num_fields; field_index++) {
+ // Notes:
+ // - We allow a type to only have non-nullable fields of types that
+ // are defined earlier. This way we avoid infinite non-nullable
+ // constructions. Also relevant for arrays and functions.
+ // - Currently, we also allow nullable fields to only reference types
+ // that are defined earlier. The reason is that every type can only
+ // reference types in its own or earlier recursive groups, and we do
+ // not support recursive groups yet. Also relevant for arrays and
+ // functions. TODO(7748): Change the number of nullable types once
+ // we support rec. groups.
+ // - We exclude the generics types anyref, dataref, and eqref from the
+ // fields of struct 0. This is because in GenerateInitExpr we
+ // materialize these types with (ref 0), and having such fields in
+ // struct 0 would produce an infinite recursion.
+ ValueType type = GetValueTypeHelper(
+ &range, builder.NumTypes(), builder.NumTypes(), kAllowNonNullables,
+ kIncludePackedTypes,
+ struct_index != 0 ? kIncludeGenerics : kExcludeGenerics);
+
+ bool mutability = range.get<bool>();
+ struct_builder.AddField(type, mutability);
}
+ StructType* struct_fuz = struct_builder.Build();
+ builder.AddStructType(struct_fuz, false);
+ }
for (int array_index = 0; array_index < num_arrays; array_index++) {
ValueType type = GetValueTypeHelper(
- &range, true, builder.NumTypes(), builder.NumTypes(),
- kAllowNonNullables, kIncludePackedTypes, kIncludeGenerics);
+ &range, builder.NumTypes(), builder.NumTypes(), kAllowNonNullables,
+ kIncludePackedTypes, kIncludeGenerics);
ArrayType* array_fuz = zone->New<ArrayType>(type, true);
- builder.AddArrayType(array_fuz);
+ builder.AddArrayType(array_fuz, false);
}
- }
// We keep the signature for the first (main) function constant.
- function_signatures.push_back(builder.ForceAddSignature(sigs.i_iii()));
-
- for (uint8_t i = 1; i < num_functions; i++) {
- FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig,
- liftoff_as_reference, builder.NumTypes());
- uint32_t signature_index = builder.ForceAddSignature(sig);
- function_signatures.push_back(signature_index);
- }
+ function_signatures.push_back(
+ builder.ForceAddSignature(sigs.i_iii(), v8_flags.wasm_final_types));
+
+ for (uint8_t i = 1; i < num_functions; i++) {
+ FunctionSig* sig =
+ GenerateSig(zone, &range, kFunctionSig, builder.NumTypes());
+ uint32_t signature_index =
+ builder.ForceAddSignature(sig, v8_flags.wasm_final_types);
+ function_signatures.push_back(signature_index);
+ }
int num_exceptions = 1 + (range.get<uint8_t>() % kMaxExceptions);
for (int i = 0; i < num_exceptions; ++i) {
- FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig,
- liftoff_as_reference, num_types);
+ FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig, num_types);
builder.AddException(sig);
}
@@ -2517,16 +2526,13 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
// have typed-function tables.
std::vector<WasmFunctionBuilder*> functions;
for (uint8_t i = 0; i < num_functions; i++) {
- const FunctionSig* sig = builder.GetSignature(function_signatures[i]);
// If we are using wasm-gc, we cannot allow signature normalization
// performed by adding a function by {FunctionSig}, because we emit
// everything in one recursive group which blocks signature
// canonicalization.
// TODO(7748): Relax this when we implement proper recursive-group
// support.
- functions.push_back(liftoff_as_reference
- ? builder.AddFunction(function_signatures[i])
- : builder.AddFunction(sig));
+ functions.push_back(builder.AddFunction(function_signatures[i]));
}
int num_globals = range.get<uint8_t>() % (kMaxGlobals + 1);
@@ -2536,15 +2542,15 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
mutable_globals.reserve(num_globals);
for (int i = 0; i < num_globals; ++i) {
- ValueType type = GetValueTypeHelper(
- &range, liftoff_as_reference, num_types, num_types,
- kAllowNonNullables, kExcludePackedTypes, kIncludeGenerics);
+ ValueType type =
+ GetValueTypeHelper(&range, num_types, num_types, kAllowNonNullables,
+ kExcludePackedTypes, kIncludeGenerics);
// 1/8 of globals are immutable.
const bool mutability = (range.get<uint8_t>() % 8) != 0;
builder.AddGlobal(
type, mutability,
- GenerateInitExpr(zone, &builder, type,
+ GenerateInitExpr(zone, range, &builder, type,
static_cast<uint32_t>(num_structs + num_arrays)));
globals.push_back(type);
if (mutability) mutable_globals.push_back(static_cast<uint8_t>(i));
@@ -2591,8 +2597,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmFunctionBuilder* f = functions[i];
DataRange function_range = range.split();
WasmGenerator gen(f, function_signatures, globals, mutable_globals,
- num_structs, num_arrays, &function_range,
- liftoff_as_reference);
+ num_structs, num_arrays, &function_range);
const FunctionSig* sig = f->signature();
base::Vector<const ValueType> return_types(sig->returns().begin(),
sig->return_count());
@@ -2610,13 +2615,9 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
constexpr bool require_valid = true;
- EXPERIMENTAL_FLAG_SCOPE(typed_funcref);
- EXPERIMENTAL_FLAG_SCOPE(gc);
+ EXPERIMENTAL_FLAG_SCOPE(relaxed_simd);
WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid);
return 0;
}
-} // namespace fuzzer
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm::fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index c44d011111..a85a74a2ff 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -15,6 +15,7 @@
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/module-decoder-impl.h"
#include "src/wasm/module-instantiate.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-feature-flags.h"
@@ -28,42 +29,39 @@
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace fuzzer {
+namespace v8::internal::wasm::fuzzer {
// Compile a baseline module. We pass a pointer to a max step counter and a
// nondeterminsm flag that are updated during execution by Liftoff.
-Handle<WasmModuleObject> CompileReferenceModule(Zone* zone, Isolate* isolate,
- ModuleWireBytes wire_bytes,
- ErrorThrower* thrower,
- int32_t* max_steps,
- int32_t* nondeterminism) {
+Handle<WasmModuleObject> CompileReferenceModule(
+ Isolate* isolate, base::Vector<const uint8_t> wire_bytes,
+ int32_t* max_steps, int32_t* nondeterminism) {
// Create the native module.
std::shared_ptr<NativeModule> native_module;
constexpr bool kNoVerifyFunctions = false;
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(isolate);
- ModuleResult module_res = DecodeWasmModule(
- enabled_features, wire_bytes.start(), wire_bytes.end(),
- kNoVerifyFunctions, ModuleOrigin::kWasmOrigin, isolate->counters(),
- isolate->metrics_recorder(), v8::metrics::Recorder::ContextId::Empty(),
- DecodingMethod::kSync, GetWasmEngine()->allocator());
+ auto enabled_features = WasmFeatures::FromIsolate(isolate);
+ ModuleResult module_res =
+ DecodeWasmModule(enabled_features, wire_bytes, kNoVerifyFunctions,
+ ModuleOrigin::kWasmOrigin);
CHECK(module_res.ok());
std::shared_ptr<WasmModule> module = module_res.value();
CHECK_NOT_NULL(module);
native_module =
GetWasmEngine()->NewNativeModule(isolate, enabled_features, module, 0);
- native_module->SetWireBytes(
- base::OwnedVector<uint8_t>::Of(wire_bytes.module_bytes()));
+ native_module->SetWireBytes(base::OwnedVector<uint8_t>::Of(wire_bytes));
+ // The module is known to be valid as this point (it was compiled by the
+ // caller before).
+ module->set_all_functions_validated();
// Compile all functions with Liftoff.
WasmCodeRefScope code_ref_scope;
auto env = native_module->CreateCompilationEnv();
+ ModuleWireBytes wire_bytes_accessor{wire_bytes};
for (size_t i = module->num_imported_functions; i < module->functions.size();
++i) {
auto& func = module->functions[i];
- base::Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
+ base::Vector<const uint8_t> func_code =
+ wire_bytes_accessor.GetFunctionBytes(&func);
FunctionBody func_body(func.sig, func.code.offset(), func_code.begin(),
func_code.end());
auto result =
@@ -73,6 +71,11 @@ Handle<WasmModuleObject> CompileReferenceModule(Zone* zone, Isolate* isolate,
.set_for_debugging(kForDebugging)
.set_max_steps(max_steps)
.set_nondeterminism(nondeterminism));
+ if (!result.succeeded()) {
+ FATAL(
+ "Liftoff compilation failed on a valid module. Run with "
+ "--trace-wasm-decoder (in a debug build) to see why.");
+ }
native_module->PublishCode(
native_module->AddCompiledCode(std::move(result)));
}
@@ -86,27 +89,29 @@ Handle<WasmModuleObject> CompileReferenceModule(Zone* zone, Isolate* isolate,
return WasmModuleObject::New(isolate, std::move(native_module), script);
}
-void InterpretAndExecuteModule(i::Isolate* isolate,
- Handle<WasmModuleObject> module_object,
- Handle<WasmModuleObject> module_ref,
- int32_t* max_steps, int32_t* nondeterminism) {
+void ExecuteAgainstReference(Isolate* isolate,
+ Handle<WasmModuleObject> module_object,
+ int32_t max_executed_instructions) {
// We do not instantiate the module if there is a start function, because a
// start function can contain an infinite loop which we cannot handle.
if (module_object->module()->start_function_index >= 0) return;
+ int32_t max_steps = max_executed_instructions;
+ int32_t nondeterminism = 0;
+
HandleScope handle_scope(isolate); // Avoid leaking handles.
+ Zone reference_module_zone(isolate->allocator(), "wasm reference module");
+ Handle<WasmModuleObject> module_ref = CompileReferenceModule(
+ isolate, module_object->native_module()->wire_bytes(), &max_steps,
+ &nondeterminism);
Handle<WasmInstanceObject> instance_ref;
- // Try to instantiate the reference instance, return if it fails. Use
- // {module_ref} if provided (for "Liftoff as reference"), {module_object}
- // otherwise (for "interpreter as reference").
+ // Try to instantiate the reference instance, return if it fails.
{
- ErrorThrower thrower(isolate, "InterpretAndExecuteModule");
+ ErrorThrower thrower(isolate, "ExecuteAgainstReference");
if (!GetWasmEngine()
- ->SyncInstantiate(
- isolate, &thrower,
- module_ref.is_null() ? module_object : module_ref, {},
- {}) // no imports & memory
+ ->SyncInstantiate(isolate, &thrower, module_ref, {},
+ {}) // no imports & memory
.ToHandle(&instance_ref)) {
isolate->clear_pending_exception();
thrower.Reset(); // Ignore errors.
@@ -123,50 +128,20 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
base::OwnedVector<Handle<Object>> compiled_args =
testing::MakeDefaultArguments(isolate, main_function->sig());
- bool exception_ref = false;
- int32_t result_ref = 0;
-
- if (module_ref.is_null()) {
- // Use the interpreter as reference.
- base::OwnedVector<WasmValue> arguments =
- testing::MakeDefaultInterpreterArguments(isolate, main_function->sig());
-
- testing::WasmInterpretationResult interpreter_result =
- testing::InterpretWasmModule(isolate, instance_ref,
- main_function->function_index(),
- arguments.begin());
- if (interpreter_result.failed()) return;
-
- // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit can make the difference between an infinite loop and
- // terminating code. With possible non-determinism we cannot guarantee that
- // the generated code will not go into an infinite loop and cause a timeout
- // in Clusterfuzz. Therefore we do not execute the generated code if the
- // result may be non-deterministic.
- if (interpreter_result.possible_nondeterminism()) return;
- if (interpreter_result.finished()) {
- result_ref = interpreter_result.result();
- } else {
- DCHECK(interpreter_result.trapped());
- exception_ref = true;
- }
- } else {
- // Use Liftoff code as reference.
- result_ref = testing::CallWasmFunctionForTesting(
- isolate, instance_ref, "main", static_cast<int>(compiled_args.size()),
- compiled_args.begin(), &exception_ref);
- // Reached max steps, do not try to execute the test module as it might
- // never terminate.
- if (*max_steps == 0) return;
- // If there is nondeterminism, we cannot guarantee the behavior of the test
- // module, and in particular it may not terminate.
- if (*nondeterminism != 0) return;
- }
+ std::unique_ptr<const char[]> exception_ref;
+ int32_t result_ref = testing::CallWasmFunctionForTesting(
+ isolate, instance_ref, "main", compiled_args.as_vector(), &exception_ref);
+ // Reached max steps, do not try to execute the test module as it might
+ // never terminate.
+ if (max_steps < 0) return;
+ // If there is nondeterminism, we cannot guarantee the behavior of the test
+ // module, and in particular it may not terminate.
+ if (nondeterminism != 0) return;
// Instantiate a fresh instance for the actual (non-ref) execution.
Handle<WasmInstanceObject> instance;
{
- ErrorThrower thrower(isolate, "InterpretAndExecuteModule (second)");
+ ErrorThrower thrower(isolate, "ExecuteAgainstReference (second)");
// We instantiated before, so the second instantiation must also succeed.
if (!GetWasmEngine()
->SyncInstantiate(isolate, &thrower, module_object, {},
@@ -185,15 +160,14 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
DCHECK(!thrower.error());
}
- bool exception = false;
+ std::unique_ptr<const char[]> exception;
int32_t result = testing::CallWasmFunctionForTesting(
- isolate, instance, "main", static_cast<int>(compiled_args.size()),
- compiled_args.begin(), &exception);
+ isolate, instance, "main", compiled_args.as_vector(), &exception);
- if (exception_ref != exception) {
- const char* exception_text[] = {"no exception", "exception"};
- FATAL("expected: %s; got: %s", exception_text[exception_ref],
- exception_text[exception]);
+ if ((exception_ref != nullptr) != (exception != nullptr)) {
+ FATAL("Exception mismatch! Expected: <%s>; got: <%s>",
+ exception_ref ? exception_ref.get() : "<no exception>",
+ exception ? exception.get() : "<no exception>");
}
if (!exception) {
@@ -396,6 +370,32 @@ class InitExprInterface {
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
const Value& rhs, Value* result) {
+ switch (opcode) {
+ case kExprI32Add:
+ os_ << "kExprI32Add, ";
+ break;
+ case kExprI32Sub:
+ os_ << "kExprI32Sub, ";
+ break;
+ case kExprI32Mul:
+ os_ << "kExprI32Mul, ";
+ break;
+ case kExprI64Add:
+ os_ << "kExprI64Add, ";
+ break;
+ case kExprI64Sub:
+ os_ << "kExprI64Sub, ";
+ break;
+ case kExprI64Mul:
+ os_ << "kExprI64Mul, ";
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
+ Value* result) {
// TODO(12089): Implement.
UNIMPLEMENTED();
}
@@ -506,12 +506,10 @@ void DecodeAndAppendInitExpr(StdoutStream& os, Zone* zone,
void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
bool compiles) {
constexpr bool kVerifyFunctions = false;
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(isolate);
- ModuleResult module_res = DecodeWasmModule(
- enabled_features, wire_bytes.start(), wire_bytes.end(), kVerifyFunctions,
- ModuleOrigin::kWasmOrigin, isolate->counters(),
- isolate->metrics_recorder(), v8::metrics::Recorder::ContextId::Empty(),
- DecodingMethod::kSync, GetWasmEngine()->allocator());
+ auto enabled_features = WasmFeatures::FromIsolate(isolate);
+ ModuleResult module_res =
+ DecodeWasmModule(enabled_features, wire_bytes.module_bytes(),
+ kVerifyFunctions, ModuleOrigin::kWasmOrigin);
CHECK_WITH_MSG(module_res.ok(), module_res.error().message().c_str());
WasmModule* module = module_res.value().get();
CHECK_NOT_NULL(module);
@@ -538,6 +536,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
"// found in the LICENSE file.\n"
"\n"
"// Flags: --wasm-staging --experimental-wasm-gc\n"
+ "// Flags: --experimental-wasm-relaxed-simd\n"
"\n"
"d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');\n"
"\n"
@@ -643,14 +642,20 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
os << ", ";
}
os << "[";
- for (uint32_t i = 0; i < elem_segment.entries.size(); i++) {
+ ModuleDecoderImpl decoder(WasmFeatures::All(),
+ wire_bytes.module_bytes().SubVectorFrom(
+ elem_segment.elements_wire_bytes_offset),
+ ModuleOrigin::kWasmOrigin);
+ for (uint32_t i = 0; i < elem_segment.element_count; i++) {
+ ConstantExpression expr =
+ decoder.consume_element_segment_entry(module, elem_segment);
if (elem_segment.element_type == WasmElemSegment::kExpressionElements) {
- DecodeAndAppendInitExpr(os, &zone, module, wire_bytes,
- elem_segment.entries[i], elem_segment.type);
+ DecodeAndAppendInitExpr(os, &zone, module, wire_bytes, expr,
+ elem_segment.type);
} else {
- os << elem_segment.entries[i].index();
+ os << expr.index();
}
- if (i < elem_segment.entries.size() - 1) os << ", ";
+ if (i < elem_segment.element_count - 1) os << ", ";
}
os << "], "
<< (elem_segment.element_type == WasmElemSegment::kExpressionElements
@@ -717,18 +722,29 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
-void OneTimeEnableStagedWasmFeatures(v8::Isolate* isolate) {
- struct EnableStagedWasmFeatures {
- explicit EnableStagedWasmFeatures(v8::Isolate* isolate) {
-#define ENABLE_STAGED_FEATURES(feat, desc, val) \
+void EnableExperimentalWasmFeatures(v8::Isolate* isolate) {
+ struct EnableExperimentalWasmFeatures {
+ explicit EnableExperimentalWasmFeatures(v8::Isolate* isolate) {
+ // Enable all staged features.
+#define ENABLE_STAGED_FEATURES(feat, ...) \
v8_flags.experimental_wasm_##feat = true;
FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
#undef ENABLE_STAGED_FEATURES
+
+ // Enable non-staged experimental features that we also want to fuzz.
+ v8_flags.experimental_wasm_gc = true;
+
+ // Enforce implications from enabling features.
+ FlagList::EnforceFlagImplications();
+
+ // Last, install any conditional features. Implications are handled
+ // implicitly.
isolate->InstallConditionalFeatures(isolate->GetCurrentContext());
}
};
// The compiler will properly synchronize the constructor call.
- static EnableStagedWasmFeatures one_time_enable_staged_features(isolate);
+ static EnableExperimentalWasmFeatures one_time_enable_experimental_features(
+ isolate);
}
void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
@@ -741,7 +757,7 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
// respect that limit.
if (data.size() > max_input_size()) return;
- i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
i_isolate->clear_pending_exception();
@@ -753,7 +769,7 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
// We explicitly enable staged WebAssembly features here to increase fuzzer
// coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
// the flag by itself.
- OneTimeEnableStagedWasmFeatures(isolate);
+ EnableExperimentalWasmFeatures(isolate);
v8::TryCatch try_catch(isolate);
HandleScope scope(i_isolate);
@@ -782,32 +798,26 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
}
// Note: After dividing by 3 for 4 times, configuration_byte is within [0, 3].
-// Control whether Liftoff or the interpreter will be used as the reference
-// tier.
-// TODO(thibaudm): Port nondeterminism detection to arm.
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_X86) || \
- defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_ARM)
- bool liftoff_as_reference = configuration_byte & 1;
-#else
- bool liftoff_as_reference = false;
-#endif
FlagScope<bool> turbo_mid_tier_regalloc(
&v8_flags.turbo_force_mid_tier_regalloc, configuration_byte == 0);
- if (!GenerateModule(i_isolate, &zone, data, &buffer, liftoff_as_reference)) {
+ if (!GenerateModule(i_isolate, &zone, data, &buffer)) {
return;
}
testing::SetupIsolateForWasmModule(i_isolate);
- ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
- if (require_valid && v8_flags.wasm_fuzzer_gen_test) {
- GenerateTestCase(i_isolate, wire_bytes, true);
+ auto enabled_features = WasmFeatures::FromIsolate(i_isolate);
+
+ bool valid =
+ GetWasmEngine()->SyncValidate(i_isolate, enabled_features, wire_bytes);
+
+ if (v8_flags.wasm_fuzzer_gen_test) {
+ GenerateTestCase(i_isolate, wire_bytes, valid);
}
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
MaybeHandle<WasmModuleObject> compiled_module;
{
// Explicitly enable Liftoff, disable tiering and set the tier_mask. This
@@ -818,38 +828,22 @@ void WasmExecutionFuzzer::FuzzWasmModule(base::Vector<const uint8_t> data,
tier_mask);
FlagScope<int> debug_mask_scope(&v8_flags.wasm_debug_mask_for_testing,
debug_mask);
- compiled_module = GetWasmEngine()->SyncCompile(
- i_isolate, enabled_features, &interpreter_thrower, wire_bytes);
- }
- bool compiles = !compiled_module.is_null();
- if (!require_valid && v8_flags.wasm_fuzzer_gen_test) {
- GenerateTestCase(i_isolate, wire_bytes, compiles);
+ ErrorThrower thrower(i_isolate, "WasmFuzzerSyncCompile");
+ compiled_module = GetWasmEngine()->SyncCompile(i_isolate, enabled_features,
+ &thrower, wire_bytes);
+ CHECK_EQ(valid, !compiled_module.is_null());
+ CHECK_EQ(!valid, thrower.error());
+ if (require_valid && !valid) {
+ FATAL("Generated module should validate, but got: %s",
+ thrower.error_msg());
+ }
+ thrower.Reset();
}
- std::string error_message;
- bool result = GetWasmEngine()->SyncValidate(i_isolate, enabled_features,
- wire_bytes, &error_message);
-
- CHECK_EQ(compiles, result);
- CHECK_WITH_MSG(
- !require_valid || result,
- ("Generated module should validate, but got: " + error_message).c_str());
-
- if (!compiles) return;
-
- int32_t max_steps = 16 * 1024;
- int32_t nondeterminism = false;
- Handle<WasmModuleObject> module_ref;
- if (liftoff_as_reference) {
- module_ref = CompileReferenceModule(&zone, i_isolate, wire_bytes,
- &interpreter_thrower, &max_steps,
- &nondeterminism);
+ if (valid) {
+ ExecuteAgainstReference(i_isolate, compiled_module.ToHandleChecked(),
+ kDefaultMaxFuzzerExecutedInstructions);
}
- InterpretAndExecuteModule(i_isolate, compiled_module.ToHandleChecked(),
- module_ref, &max_steps, &nondeterminism);
}
-} // namespace fuzzer
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm::fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 36dc073f4b..e2fd428cc2 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -13,28 +13,33 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-module-builder.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace fuzzer {
-
-// First instantiates and interprets the "main" function within module_object if
-// possible. If the interpretation finishes within kMaxSteps steps,
-// module_object is instantiated again and the compiled "main" function is
-// executed.
-void InterpretAndExecuteModule(
- Isolate* isolate, Handle<WasmModuleObject> module_object,
- Handle<WasmModuleObject> module_ref = Handle<WasmModuleObject>::null(),
- int32_t* max_steps = nullptr, int32_t* nondeterminism = nullptr);
+namespace v8::internal::wasm::fuzzer {
+
+// A default value for {max_executed_instructions} in {ExecuteAgainstReference}.
+#ifdef USE_SIMULATOR
+constexpr int kDefaultMaxFuzzerExecutedInstructions = 16'000;
+#else
+constexpr int kDefaultMaxFuzzerExecutedInstructions = 1'000'000;
+#endif
+
+// First creates a reference module fully compiled with Liftoff, with
+// instrumentation to stop after a given number of steps and to record any
+// nondeterminism while executing. If execution finishes within {max_steps},
+// {module_object} is instantiated, its "main" function is executed, and the
+// result is compared against the reference execution. If non-determinism was
+// detected during the reference execution, the result is allowed to differ.
+void ExecuteAgainstReference(Isolate* isolate,
+ Handle<WasmModuleObject> module_object,
+ int32_t max_executed_instructions);
void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
bool compiles);
-// On the first call, enables all staged wasm features. All subsequent calls are
-// no-ops. This avoids race conditions with threads reading the flags. Fuzzers
-// are executed in their own process anyway, so this should not interfere with
-// anything.
-void OneTimeEnableStagedWasmFeatures(v8::Isolate* isolate);
+// On the first call, enables all staged wasm features and experimental features
+// that are ready for fuzzing. All subsequent calls are no-ops. This avoids race
+// conditions with threads reading the flags. Fuzzers are executed in their own
+// process anyway, so this should not interfere with anything.
+void EnableExperimentalWasmFeatures(v8::Isolate* isolate);
class WasmExecutionFuzzer {
public:
@@ -47,12 +52,8 @@ class WasmExecutionFuzzer {
protected:
virtual bool GenerateModule(Isolate* isolate, Zone* zone,
base::Vector<const uint8_t> data,
- ZoneBuffer* buffer,
- bool liftoff_as_reference) = 0;
+ ZoneBuffer* buffer) = 0;
};
-} // namespace fuzzer
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm::fuzzer
#endif // WASM_FUZZER_COMMON_H_
diff --git a/deps/v8/test/fuzzer/wasm-streaming.cc b/deps/v8/test/fuzzer/wasm-streaming.cc
index a257add472..23a282a553 100644
--- a/deps/v8/test/fuzzer/wasm-streaming.cc
+++ b/deps/v8/test/fuzzer/wasm-streaming.cc
@@ -14,7 +14,7 @@
#include "test/fuzzer/fuzzer-support.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-namespace v8::internal::wasm {
+namespace v8::internal::wasm::fuzzer {
// Some properties of the compilation result to check. Extend if needed.
struct CompilationResult {
@@ -40,14 +40,14 @@ struct CompilationResult {
class TestResolver : public CompilationResultResolver {
public:
- explicit TestResolver(i::Isolate* isolate) : isolate_(isolate) {}
+ explicit TestResolver(Isolate* isolate) : isolate_(isolate) {}
- void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> module) override {
+ void OnCompilationSucceeded(Handle<WasmModuleObject> module) override {
done_ = true;
native_module_ = module->shared_native_module();
}
- void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ void OnCompilationFailed(Handle<Object> error_reason) override {
done_ = true;
failed_ = true;
Handle<String> str =
@@ -66,7 +66,7 @@ class TestResolver : public CompilationResultResolver {
const std::string& error_message() const { return error_message_; }
private:
- i::Isolate* isolate_;
+ Isolate* isolate_;
bool done_ = false;
bool failed_ = false;
std::string error_message_;
@@ -145,21 +145,21 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
- i::Isolate* i_isolate = reinterpret_cast<v8::internal::Isolate*>(isolate);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
v8::Isolate::Scope isolate_scope(isolate);
- i::HandleScope handle_scope(i_isolate);
+ v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- fuzzer::OneTimeEnableStagedWasmFeatures(isolate);
+ // We explicitly enable staged/experimental WebAssembly features here to
+ // increase fuzzer coverage. For libfuzzer fuzzers it is not possible that the
+ // fuzzer enables the flag by itself.
+ EnableExperimentalWasmFeatures(isolate);
// Limit the maximum module size to avoid OOM.
v8_flags.wasm_max_module_size = 256 * KB;
- WasmFeatures enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+ WasmFeatures enabled_features = WasmFeatures::FromIsolate(i_isolate);
base::Vector<const uint8_t> data_vec{data, size - 1};
uint8_t config = data[size - 1];
@@ -196,4 +196,4 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return 0;
}
-} // namespace v8::internal::wasm
+} // namespace v8::internal::wasm::fuzzer
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index b4d14f271f..ad4d5163d0 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -19,7 +19,7 @@
#include "test/fuzzer/fuzzer-support.h"
#include "test/fuzzer/wasm-fuzzer-common.h"
-namespace i = v8::internal;
+namespace v8::internal::wasm::fuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
@@ -27,10 +27,10 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// We reduce the maximum memory size and table size of WebAssembly instances
// to avoid OOMs in the fuzzer.
- i::v8_flags.wasm_max_mem_pages = 32;
- i::v8_flags.wasm_max_table_size = 100;
+ v8_flags.wasm_max_mem_pages = 32;
+ v8_flags.wasm_max_table_size = 100;
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
if (i_isolate->has_pending_exception()) {
@@ -41,30 +41,31 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- i::wasm::fuzzer::OneTimeEnableStagedWasmFeatures(isolate);
+ // We explicitly enable staged/experimental WebAssembly features here to
+ // increase fuzzer coverage. For libfuzzer fuzzers it is not possible that the
+ // fuzzer enables the flag by itself.
+ EnableExperimentalWasmFeatures(isolate);
v8::TryCatch try_catch(isolate);
- i::wasm::testing::SetupIsolateForWasmModule(i_isolate);
- i::wasm::ModuleWireBytes wire_bytes(data, data + size);
+ testing::SetupIsolateForWasmModule(i_isolate);
+ ModuleWireBytes wire_bytes(data, data + size);
- i::HandleScope scope(i_isolate);
- i::wasm::ErrorThrower thrower(i_isolate, "wasm fuzzer");
- i::Handle<i::WasmModuleObject> module_object;
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+ HandleScope scope(i_isolate);
+ ErrorThrower thrower(i_isolate, "wasm fuzzer");
+ Handle<WasmModuleObject> module_object;
+ auto enabled_features = WasmFeatures::FromIsolate(i_isolate);
bool compiles =
- i::wasm::GetWasmEngine()
+ GetWasmEngine()
->SyncCompile(i_isolate, enabled_features, &thrower, wire_bytes)
.ToHandle(&module_object);
- if (i::v8_flags.wasm_fuzzer_gen_test) {
- i::wasm::fuzzer::GenerateTestCase(i_isolate, wire_bytes, compiles);
+ if (v8_flags.wasm_fuzzer_gen_test) {
+ GenerateTestCase(i_isolate, wire_bytes, compiles);
}
if (compiles) {
- i::wasm::fuzzer::InterpretAndExecuteModule(i_isolate, module_object);
+ ExecuteAgainstReference(i_isolate, module_object,
+ kDefaultMaxFuzzerExecutedInstructions);
}
// Pump the message loop and run micro tasks, e.g. GC finalization tasks.
@@ -72,3 +73,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
isolate->PerformMicrotaskCheckpoint();
return 0;
}
+
+} // namespace v8::internal::wasm::fuzzer
diff --git a/deps/v8/test/fuzzer/wasm/regress-1404619.wasm b/deps/v8/test/fuzzer/wasm/regress-1404619.wasm
new file mode 100644
index 0000000000..0904cc4866
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm/regress-1404619.wasm
Binary files differ
diff --git a/deps/v8/test/fuzzer/wasm_async/regress-1405322.wasm b/deps/v8/test/fuzzer/wasm_async/regress-1405322.wasm
new file mode 100644
index 0000000000..04309a8330
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_async/regress-1405322.wasm
Binary files differ
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
index a71c5d0ec5..6cdea4819c 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
@@ -49,14 +49,14 @@ Paused #2
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #3
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- - [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- - [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
+ - [1] {"functionName":"callDebugger","function_lineNumber":0,"function_columnNumber":105,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":0,"function_columnNumber":110,"lineNumber":8,"columnNumber":6}
- [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #4
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
- - [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- - [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
+ - [1] {"functionName":"callDebugger","function_lineNumber":0,"function_columnNumber":105,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":0,"function_columnNumber":110,"lineNumber":8,"columnNumber":6}
- [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
index ac4cfa1485..4f4b101a54 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
@@ -9,8 +9,8 @@ Running test: runTestFunction
Script nr 2 parsed!
Paused #1
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- - [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- - [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
+ - [1] {"functionName":"callDebugger","function_lineNumber":0,"function_columnNumber":105,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":0,"function_columnNumber":110,"lineNumber":8,"columnNumber":6}
- [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
First time paused, setting breakpoints!
@@ -63,8 +63,8 @@ Script nr 3 parsed!
Resuming...
Paused #2
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
- - [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- - [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
+ - [1] {"functionName":"callDebugger","function_lineNumber":0,"function_columnNumber":105,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":0,"function_columnNumber":110,"lineNumber":8,"columnNumber":6}
- [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 4 parsed!
diff --git a/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt b/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
index e028f2a595..df92b1516a 100644
--- a/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
@@ -2,8 +2,8 @@ Tests that asm-js scripts produce correct stack
Paused on 'debugger;'
Number of frames: 5
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- - [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- - [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
+ - [1] {"functionName":"callDebugger","function_lineNumber":0,"function_columnNumber":105,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":0,"function_columnNumber":110,"lineNumber":8,"columnNumber":6}
- [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Getting v8-generated stack trace...
diff --git a/deps/v8/test/inspector/debugger/break-on-exception-async-gen-expected.txt b/deps/v8/test/inspector/debugger/break-on-exception-async-gen-expected.txt
new file mode 100644
index 0000000000..0e529170ae
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/break-on-exception-async-gen-expected.txt
@@ -0,0 +1,45 @@
+Check that "break on exceptions" works for async generators.
+
+Running test: testSimpleGeneratorThrowCaught
+Paused for CAUGHT exception.
+
+Running test: testSimpleGeneratorThrowUncaught
+Paused for UNCAUGHT exception.
+
+Running test: testYieldBeforeThrowCaught
+Paused for CAUGHT exception.
+
+Running test: testYieldBeforeThrowUncaught
+Paused for UNCAUGHT exception.
+
+Running test: testAwaitBeforeThrowCaught
+Paused for CAUGHT exception.
+
+Running test: testAwaitBeforeThrowUncaught
+Paused for UNCAUGHT exception.
+
+Running test: testYieldBeforeThrowWithAwaitCaught
+Paused for CAUGHT exception.
+
+Running test: testYieldBeforeThrowWithAwaitUncaught
+Paused for UNCAUGHT exception.
+
+Running test: testAwaitBeforeThrowWithYieldCaught
+Paused for CAUGHT exception.
+
+Running test: testAwaitBeforeThrowWithYieldUncaught
+Paused for UNCAUGHT exception.
+
+Running test: testYieldThrowsCaught
+Paused for CAUGHT exception.
+Paused for CAUGHT exception.
+
+Running test: testYieldThrowsUncaught
+Paused for UNCAUGHT exception.
+Paused for UNCAUGHT exception.
+
+Running test: testAwaitThrowsCaught
+Paused for CAUGHT exception.
+
+Running test: testAwaitThrowsUncaught
+Paused for UNCAUGHT exception.
diff --git a/deps/v8/test/inspector/debugger/break-on-exception-async-gen.js b/deps/v8/test/inspector/debugger/break-on-exception-async-gen.js
new file mode 100644
index 0000000000..7fd0c0c791
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/break-on-exception-async-gen.js
@@ -0,0 +1,118 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Check that "break on exceptions" works for async generators.');
+
+contextGroup.addScript(`
+async function* simpleThrow() {
+ throw new Error();
+}
+
+async function* yieldBeforeThrow() {
+ yield 1;
+ throw new Error();
+}
+
+async function* awaitBeforeThrow() {
+ await 1;
+ throw new Error();
+}
+
+async function* yieldBeforeThrowWithAwait() {
+ await 1;
+ yield 2;
+ throw new Error();
+}
+
+async function* awaitBeforeThrowWithYield() {
+ yield 1;
+ await 2;
+ throw new Error();
+}
+
+async function* yieldThrows() {
+ yield 1;
+ yield thrower();
+}
+
+async function* awaitThrows() {
+ yield 1;
+ await thrower();
+}
+
+async function runGenWithCatch(gen) {
+ try {
+ for await (const val of gen());
+ } catch {}
+}
+
+async function runGenWithoutCatch(gen) {
+ for await (const val of gen());
+}
+
+async function thrower() {
+ await 1; // Suspend once.
+ throw new Error();
+}`);
+
+Protocol.Debugger.onPaused(async ({ params: { data }}) => {
+ const caughtOrUncaught = data.uncaught ? 'UNCAUGHT' : 'CAUGHT';
+ InspectorTest.log(`Paused for ${caughtOrUncaught} exception.`);
+ await Protocol.Debugger.resume();
+});
+
+async function runTest(expression) {
+ await Promise.all([
+ Protocol.Debugger.enable(),
+ Protocol.Debugger.setPauseOnExceptions({state: 'all'}),
+ ]);
+
+ await Protocol.Runtime.evaluate({ expression, awaitPromise: true });
+ await Protocol.Debugger.disable();
+}
+
+InspectorTest.runAsyncTestSuite([
+ async function testSimpleGeneratorThrowCaught() {
+ await runTest('runGenWithCatch(simpleThrow)');
+ },
+ async function testSimpleGeneratorThrowUncaught() {
+ await runTest('runGenWithoutCatch(simpleThrow)');
+ },
+ async function testYieldBeforeThrowCaught() {
+ await runTest('runGenWithCatch(yieldBeforeThrow)');
+ },
+ async function testYieldBeforeThrowUncaught() {
+ await runTest('runGenWithoutCatch(yieldBeforeThrow)');
+ },
+ async function testAwaitBeforeThrowCaught() {
+ await runTest('runGenWithCatch(awaitBeforeThrow)');
+ },
+ async function testAwaitBeforeThrowUncaught() {
+ await runTest('runGenWithoutCatch(awaitBeforeThrow)');
+ },
+ async function testYieldBeforeThrowWithAwaitCaught() {
+ await runTest('runGenWithCatch(yieldBeforeThrowWithAwait)');
+ },
+ async function testYieldBeforeThrowWithAwaitUncaught() {
+ await runTest('runGenWithoutCatch(yieldBeforeThrowWithAwait)');
+ },
+ async function testAwaitBeforeThrowWithYieldCaught() {
+ await runTest('runGenWithCatch(awaitBeforeThrowWithYield)');
+ },
+ async function testAwaitBeforeThrowWithYieldUncaught() {
+ await runTest('runGenWithoutCatch(awaitBeforeThrowWithYield)');
+ },
+ async function testYieldThrowsCaught() {
+ await runTest('runGenWithCatch(yieldThrows)');
+ },
+ async function testYieldThrowsUncaught() {
+ await runTest('runGenWithoutCatch(yieldThrows)');
+ },
+ async function testAwaitThrowsCaught() {
+ await runTest('runGenWithCatch(awaitThrows)');
+ },
+ async function testAwaitThrowsUncaught() {
+ await runTest('runGenWithoutCatch(awaitThrows)');
+ },
+]);
diff --git a/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt b/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt
index e9c6a95626..b302e1d465 100644
--- a/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-fields-scopes-expected.txt
@@ -53,7 +53,7 @@ Running test: testScopesPaused
objectId : <objectId>
type : object
}
- url :
+ url :
}
[1] : {
callFrameId : <callFrameId>
@@ -106,7 +106,7 @@ Running test: testScopesPaused
objectId : <objectId>
type : object
}
- url :
+ url :
}
[2] : {
callFrameId : <callFrameId>
@@ -116,7 +116,7 @@ Running test: testScopesPaused
lineNumber : 0
scriptId : <scriptId>
}
- functionName :
+ functionName :
location : {
columnNumber : 0
lineNumber : 0
@@ -139,6 +139,6 @@ Running test: testScopesPaused
objectId : <objectId>
type : object
}
- url :
+ url :
}
]
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt
index cb77ef1343..82bb85cf52 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt
@@ -1,4 +1,7 @@
Test empty inner classes with private instance methods in the outer class
Running test: testScopesPaused
+privateProperties from Runtime.getProperties()
+undefined
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
undefined
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js
index 5a687480be..8d595ceb95 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test empty inner classes with private instance methods in the outer class');
@@ -32,10 +34,7 @@ InspectorTest.runAsyncTestSuite([async function testScopesPaused() {
await Protocol.Debugger.oncePaused(); // inside fn()
let frame = callFrames[0];
- let {result} =
- await Protocol.Runtime.getProperties({objectId: frame.this.objectId});
-
- InspectorTest.logObject(result.privateProperties);
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
Protocol.Debugger.resume();
Protocol.Debugger.disable();
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
index 21da2d844c..91e9daba4f 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
@@ -1,18 +1,9 @@
Test private class methods
Running test: testScopesPaused
-private properties on the base class instance
+privateProperties from Runtime.getProperties()
[
[0] : {
- name : #inc
- value : {
- className : Function
- description : #inc() { this.#field++; return this.#field; }
- objectId : <objectId>
- type : function
- }
- }
- [1] : {
name : #writeOnly
set : {
className : Function
@@ -21,7 +12,7 @@ private properties on the base class instance
type : function
}
}
- [2] : {
+ [1] : {
get : {
className : Function
description : get #readOnly() { return this.#field; }
@@ -30,7 +21,7 @@ private properties on the base class instance
}
name : #readOnly
}
- [3] : {
+ [2] : {
get : {
className : Function
description : get #accessor() { return this.#field; }
@@ -45,7 +36,7 @@ private properties on the base class instance
type : function
}
}
- [4] : {
+ [3] : {
name : #field
value : {
description : 2
@@ -54,7 +45,37 @@ private properties on the base class instance
}
}
]
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
+ }
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #inc() { this.#field++; return this.#field; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
private accessors properties on the base class instance
+privateProperties from Runtime.getProperties()
[
[0] : {
name : #writeOnly
@@ -130,18 +151,10 @@ Evaluating write-only accessor
value : 0
}
}
-privateProperties on the subclass instance
+private members on the subclass instance
+privateProperties from Runtime.getProperties()
[
[0] : {
- name : #inc
- value : {
- className : Function
- description : #inc() { this.#field++; return this.#field; }
- objectId : <objectId>
- type : function
- }
- }
- [1] : {
name : #writeOnly
set : {
className : Function
@@ -150,7 +163,7 @@ privateProperties on the subclass instance
type : function
}
}
- [2] : {
+ [1] : {
get : {
className : Function
description : get #readOnly() { return this.#field; }
@@ -159,7 +172,7 @@ privateProperties on the subclass instance
}
name : #readOnly
}
- [3] : {
+ [2] : {
get : {
className : Function
description : get #accessor() { return this.#field; }
@@ -174,7 +187,7 @@ privateProperties on the subclass instance
type : function
}
}
- [4] : {
+ [3] : {
name : #field
value : {
description : 2
@@ -182,25 +195,64 @@ privateProperties on the subclass instance
value : 2
}
}
- [5] : {
- name : #subclassMethod
- value : {
- className : Function
- description : #subclassMethod() { return 'subclassMethod'; }
- objectId : <objectId>
- type : function
- }
+]
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[3]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
}
- [6] : {
- name : #inc
- value : {
- className : Function
- description : #inc() { return 'subclass #inc'; }
- objectId : <objectId>
- type : function
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #inc() { this.#field++; return this.#field; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
}
- }
-]
+ [1] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 1
+ value : {
+ className : Object
+ description : #subclassMethod() { return 'subclassMethod'; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ [2] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 2
+ value : {
+ className : Object
+ description : #inc() { return 'subclass #inc'; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
Evaluating private methods in the base class from the subclass
{
exceptionDetails : {
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt
index 4e1b681a19..3d9587b109 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-nested-super-expected.txt
@@ -1,30 +1,42 @@
Test getting private class methods from an instance that calls nested super()
Running test: testScopesPaused
-properties after super() is called in IIFE
-[
- [0] : {
- name : #b
- value : {
- className : Function
- description : #b() {}
- objectId : <objectId>
- type : function
- }
+private members after super() is called in IIFE
+privateProperties from Runtime.getProperties()
+undefined
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
}
-]
-privateProperties after super() is called in arrow function
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #b() {}
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
+private members after super() is called in arrow function
+privateProperties from Runtime.getProperties()
[
[0] : {
- name : #b
- value : {
- className : Function
- description : #b() {}
- objectId : <objectId>
- type : function
- }
- }
- [1] : {
get : {
className : Function
description : get #c() {}
@@ -34,18 +46,39 @@ privateProperties after super() is called in arrow function
name : #c
}
]
-privateProperties after super() is called in eval()
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
+ }
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #b() {}
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
+private members after super() is called in eval()
+privateProperties from Runtime.getProperties()
[
[0] : {
- name : #b
- value : {
- className : Function
- description : #b() {}
- objectId : <objectId>
- type : function
- }
- }
- [1] : {
get : {
className : Function
description : get #c() {}
@@ -54,7 +87,7 @@ privateProperties after super() is called in eval()
}
name : #c
}
- [2] : {
+ [1] : {
name : #d
set : {
className : Function
@@ -64,3 +97,32 @@ privateProperties after super() is called in eval()
}
}
]
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
+ }
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #b() {}
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js b/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js
index 5a8452f55c..d3c04ef9a8 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-nested-super.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
let { session, contextGroup, Protocol } = InspectorTest.start(
"Test getting private class methods from an instance that calls nested super()"
);
@@ -47,31 +49,25 @@ InspectorTest.runAsyncTestSuite([
params: { callFrames }
} = await Protocol.Debugger.oncePaused(); // inside B constructor
let frame = callFrames[0];
- let { result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- });
- InspectorTest.log('properties after super() is called in IIFE');
- InspectorTest.logMessage(result.privateProperties);
+ InspectorTest.log('private members after super() is called in IIFE');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
+
Protocol.Debugger.resume();
({ params: { callFrames } }
= await Protocol.Debugger.oncePaused()); // inside C constructor
frame = callFrames[0];
- ({ result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- }));
- InspectorTest.log('privateProperties after super() is called in arrow function');
- InspectorTest.logMessage(result.privateProperties);
+ InspectorTest.log('private members after super() is called in arrow function');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
+
Protocol.Debugger.resume();
({ params: { callFrames } }
= await Protocol.Debugger.oncePaused()); // inside D constructor
frame = callFrames[0];
- ({ result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- }));
- InspectorTest.log('privateProperties after super() is called in eval()');
- InspectorTest.logMessage(result.privateProperties);
+
+ InspectorTest.log('private members after super() is called in eval()');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
Protocol.Debugger.resume();
Protocol.Debugger.disable();
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-preview-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-preview-expected.txt
index 01f8a7b363..b6029a133a 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-preview-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-preview-expected.txt
@@ -7,11 +7,6 @@ expression: new class extends class { constructor() { return new Proxy({}, {});
expression: new class { #method() { return 1; } get #accessor() { } set #accessor(val) { } }
[
[0] : {
- name : #method
- type : function
- value :
- }
- [1] : {
name : #accessor
type : accessor
}
@@ -19,11 +14,6 @@ expression: new class { #method() { return 1; } get #accessor() { } set #accesso
expression: new class extends class { #method() { return 1; } } { get #accessor() { } set #accessor(val) { } }
[
[0] : {
- name : #method
- type : function
- value :
- }
- [1] : {
name : #accessor
type : accessor
}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
index 1da6f6d264..43b5b5e948 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
@@ -1,7 +1,8 @@
Test static private class methods
Running test: testScopesPaused
-privateProperties on the base class
+private members on the base class
+privateProperties from Runtime.getProperties()
[
[0] : {
name : #writeOnly
@@ -37,15 +38,6 @@ privateProperties on the base class
}
}
[3] : {
- name : #inc
- value : {
- className : Function
- description : #inc() { return ++A.#accessor; }
- objectId : <objectId>
- type : function
- }
- }
- [4] : {
name : #field
value : {
description : 2
@@ -54,6 +46,35 @@ privateProperties on the base class
}
}
]
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
+ }
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #inc() { return ++A.#accessor; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
Evaluating A.#inc();
{
result : {
@@ -94,7 +115,8 @@ Evaluating this.#writeOnly = 0; this.#field;
value : 0
}
}
-privateProperties on the subclass
+private members on the subclass
+privateProperties from Runtime.getProperties()
[
[0] : {
get : {
@@ -105,16 +127,36 @@ privateProperties on the subclass
}
name : #accessor
}
- [1] : {
- name : #subclassMethod
- value : {
- className : Function
- description : #subclassMethod() { return B.#accessor; }
- objectId : <objectId>
- type : function
- }
- }
]
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
+ }
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #subclassMethod() { return B.#accessor; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
Evaluating this.#inc(); from the base class
{
exceptionDetails : {
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
index cb12a7446b..b47bf7e8e7 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
@@ -1,17 +1,40 @@
Test static private class methods
Running test: testScopesPaused
-privateProperties on class A
-[
- [0] : {
- name : #method
- value : {
- className : Function
- description : #method() { debugger; }
- objectId : <objectId>
- type : function
- }
+private members on class A
+privateProperties from Runtime.getProperties()
+undefined
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
}
-]
-privateProperties on class B
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #method() { debugger; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
+private members on class B
+privateProperties from Runtime.getProperties()
+undefined
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
undefined
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js b/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
index db43d867f0..0d67f819d2 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
let { session, contextGroup, Protocol } = InspectorTest.start(
"Test static private class methods"
);
@@ -32,26 +34,20 @@ InspectorTest.runAsyncTestSuite([
// Do not await here, instead oncePaused should be awaited.
Protocol.Runtime.evaluate({ expression: 'run()' });
- InspectorTest.log('privateProperties on class A');
+ InspectorTest.log('private members on class A');
let {
params: { callFrames }
} = await Protocol.Debugger.oncePaused(); // inside A.#method()
let frame = callFrames[0];
- let { result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- });
- InspectorTest.logMessage(result.privateProperties);
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
Protocol.Debugger.resume();
({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // B.test();
frame = callFrames[0];
- InspectorTest.log('privateProperties on class B');
- ({ result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- }));
- InspectorTest.logObject(result.privateProperties);
+ InspectorTest.log('private members on class B');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
Protocol.Debugger.resume();
Protocol.Debugger.disable();
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static.js b/deps/v8/test/inspector/debugger/class-private-methods-static.js
index 57afd27456..8f1584a241 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
let { session, contextGroup, Protocol } = InspectorTest.start(
"Test static private class methods"
);
@@ -45,21 +47,19 @@ InspectorTest.runAsyncTestSuite([
// Do not await here, instead oncePaused should be awaited.
Protocol.Runtime.evaluate({ expression: 'run()' });
- InspectorTest.log('privateProperties on the base class');
+ InspectorTest.log('private members on the base class');
let {
params: { callFrames }
} = await Protocol.Debugger.oncePaused(); // inside A.test()
let frame = callFrames[0];
- let { result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- });
- InspectorTest.logMessage(result.privateProperties);
+
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
InspectorTest.log('Evaluating A.#inc();');
- ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ let { result } = await Protocol.Debugger.evaluateOnCallFrame({
expression: 'A.#inc();',
callFrameId: callFrames[0].callFrameId
- }));
+ });
InspectorTest.logObject(result);
InspectorTest.log('Evaluating this.#inc();');
@@ -94,11 +94,8 @@ InspectorTest.runAsyncTestSuite([
({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // B.test();
frame = callFrames[0];
- InspectorTest.log('privateProperties on the subclass');
- ({ result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- }));
- InspectorTest.logMessage(result.privateProperties);
+ InspectorTest.log('private members on the subclass');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
InspectorTest.log('Evaluating this.#inc(); from the base class');
({ result } = await Protocol.Debugger.evaluateOnCallFrame({
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
index 04a4f5aa0f..aef4e9a06b 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
@@ -1,18 +1,38 @@
Test accessing unused private methods at runtime
Running test: testScopesPaused
-Get privateProperties of A in testStatic()
-[
- [0] : {
- name : #staticMethod
- value : {
- className : Function
- description : #staticMethod() { return 1; }
- objectId : <objectId>
- type : function
- }
+private members of A in testStatic()
+privateProperties from Runtime.getProperties()
+undefined
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
}
-]
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #staticMethod() { return 1; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
Access A.#staticMethod() in testStatic()
{
exceptionDetails : {
@@ -61,18 +81,38 @@ Access this.#staticMethod() in testStatic()
type : object
}
}
-get privateProperties of a in testInstance()
-[
- [0] : {
- name : #instanceMethod
- value : {
- className : Function
- description : #instanceMethod() { return 2; }
- objectId : <objectId>
- type : function
- }
+private members of a in testInstance()
+privateProperties from Runtime.getProperties()
+undefined
+[[PrivateMethods]] in internalProperties from Runtime.getProperties()
+{
+ name : [[PrivateMethods]]
+ value : {
+ className : Array
+ description : PrivateMethods[1]
+ objectId : <objectId>
+ subtype : internal#privateMethodList
+ type : object
}
-]
+}
+{
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : #instanceMethod() { return 2; }
+ objectId : <objectId>
+ subtype : internal#privateMethod
+ type : object
+ }
+ writable : true
+ }
+ ]
+}
Evaluating this.#instanceMethod() in testInstance()
{
result : {
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused.js b/deps/v8/test/inspector/debugger/class-private-methods-unused.js
index 5ab79f2843..270bec428f 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-unused.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
let { session, contextGroup, Protocol } = InspectorTest.start(
"Test accessing unused private methods at runtime"
);
@@ -28,15 +30,12 @@ InspectorTest.runAsyncTestSuite([
// Do not await here, instead oncePaused should be awaited.
Protocol.Runtime.evaluate({ expression: 'run()' });
- InspectorTest.log('Get privateProperties of A in testStatic()');
+ InspectorTest.log('private members of A in testStatic()');
let {
params: { callFrames }
} = await Protocol.Debugger.oncePaused(); // inside A.testStatic()
let frame = callFrames[0];
- let { result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- });
- InspectorTest.logMessage(result.privateProperties);
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
// Variables not referenced in the source code are currently
// considered "optimized away".
@@ -58,11 +57,8 @@ InspectorTest.runAsyncTestSuite([
({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // a.testInstatnce();
frame = callFrames[0];
- InspectorTest.log('get privateProperties of a in testInstance()');
- ({ result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- }));
- InspectorTest.logMessage(result.privateProperties);
+ InspectorTest.log('private members of a in testInstance()');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
InspectorTest.log('Evaluating this.#instanceMethod() in testInstance()');
({ result } = await Protocol.Debugger.evaluateOnCallFrame({
diff --git a/deps/v8/test/inspector/debugger/class-private-methods.js b/deps/v8/test/inspector/debugger/class-private-methods.js
index 86839f87d6..584ecb9b8e 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
let { session, contextGroup, Protocol } = InspectorTest.start(
"Test private class methods"
);
@@ -52,25 +54,18 @@ InspectorTest.runAsyncTestSuite([
params: { callFrames }
} = await Protocol.Debugger.oncePaused(); // inside a.fn()
let frame = callFrames[0];
- let { result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- });
-
- InspectorTest.log('private properties on the base class instance');
- InspectorTest.logMessage(result.privateProperties);
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
- ({ result } = await Protocol.Runtime.getProperties({
+ InspectorTest.log('private accessors properties on the base class instance');
+ await printPrivateMembers(Protocol, InspectorTest, {
objectId: frame.this.objectId,
accessorPropertiesOnly: true,
- }));
-
- InspectorTest.log('private accessors properties on the base class instance');
- InspectorTest.logMessage(result.privateProperties);
+ });
- ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ let { result } = await Protocol.Debugger.evaluateOnCallFrame({
expression: 'this.#inc();',
callFrameId: callFrames[0].callFrameId
- }));
+ });
InspectorTest.log('Evaluating private methods');
InspectorTest.logObject(result);
@@ -111,11 +106,8 @@ InspectorTest.runAsyncTestSuite([
({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // b.fn();
frame = callFrames[0];
- ({ result } = await Protocol.Runtime.getProperties({
- objectId: frame.this.objectId
- }));
- InspectorTest.log('privateProperties on the subclass instance');
- InspectorTest.logMessage(result.privateProperties);
+ InspectorTest.log('private members on the subclass instance');
+ await printPrivateMembers(Protocol, InspectorTest, { objectId: frame.this.objectId });
({ result } = await Protocol.Debugger.evaluateOnCallFrame({
expression: 'this.#subclassMethod();',
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict-expected.txt
new file mode 100644
index 0000000000..5b406d0a4d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict-expected.txt
@@ -0,0 +1,55 @@
+Evaluate conflicting private class member out of class scope in Debugger.evaluateOnCallFrame()
+
+class Klass {
+ #name = "string";
+}
+class ClassWithField extends Klass {
+ #name = "child";
+}
+class ClassWithMethod extends Klass {
+ #name() {}
+}
+class ClassWithAccessor extends Klass {
+ get #name() {}
+ set #name(val) {}
+}
+class StaticClass extends Klass {
+ static #name = "child";
+}
+debugger;
+
+Running test: evaluatePrivateMembers
+Debugger.evaluateOnCallFrame: `(new ClassWithField).#name`
+{
+ className : Error
+ description : Error: Operation is ambiguous because there are more than one private name'#name' on the object at eval (eval at <anonymous> (:18:1), <anonymous>:1:2) at <anonymous>:18:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `(new ClassWithMethod).#name`
+{
+ className : Error
+ description : Error: Operation is ambiguous because there are more than one private name'#name' on the object at eval (eval at <anonymous> (:18:1), <anonymous>:1:2) at <anonymous>:18:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `(new ClassWithAccessor).#name`
+{
+ className : Error
+ description : Error: Operation is ambiguous because there are more than one private name'#name' on the object at eval (eval at <anonymous> (:18:1), <anonymous>:1:2) at <anonymous>:18:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `StaticClass.#name`
+{
+ type : string
+ value : child
+}
+Debugger.evaluateOnCallFrame: `(new StaticClass).#name`
+{
+ type : string
+ value : string
+}
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict.js
new file mode 100644
index 0000000000..8dae59021c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-conflict.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
+const options = {
+ type: 'private-conflicting-member',
+ testRuntime: false,
+ message: `Evaluate conflicting private class member out of class scope in Debugger.evaluateOnCallFrame()`
+};
+PrivateClassMemberInspectorTest.runTest(InspectorTest, options);
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-expected.txt
new file mode 100644
index 0000000000..651306c9cd
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-expected.txt
@@ -0,0 +1,231 @@
+Evaluate private class member out of class scope in Debugger.evaluateOnCallFrame()
+
+class Klass {
+ #field = "string";
+ get #getterOnly() { return "getterOnly"; }
+ set #setterOnly(val) { this.#field = "setterOnlyCalled"; }
+ get #accessor() { return this.#field }
+ set #accessor(val) { this.#field = val; }
+ #method() { return "method"; }
+}
+const obj = new Klass();
+debugger;
+
+Running test: evaluatePrivateMembers
+Checking private fields
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ type : string
+ value : string
+}
+Debugger.evaluateOnCallFrame: `obj.#field = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#field++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Debugger.evaluateOnCallFrame: `++obj.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Debugger.evaluateOnCallFrame: `obj.#field -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private getter-only accessors
+Debugger.evaluateOnCallFrame: `obj.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Debugger.evaluateOnCallFrame: `obj.#getterOnly = 1`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at eval (eval at <anonymous> (:11:1), <anonymous>:1:17) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#getterOnly++`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at eval (eval at <anonymous> (:11:1), <anonymous>:1:16) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#getterOnly -= 3`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at eval (eval at <anonymous> (:11:1), <anonymous>:1:17) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Checking private setter-only accessors
+Debugger.evaluateOnCallFrame: `obj.#setterOnly`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at eval (eval at <anonymous> (:11:1), <anonymous>:1:1) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#setterOnly = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#setterOnly++`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at eval (eval at <anonymous> (:11:1), <anonymous>:1:1) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#setterOnly -= 3`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at eval (eval at <anonymous> (:11:1), <anonymous>:1:1) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Checking private accessors
+Debugger.evaluateOnCallFrame: `obj.#accessor`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Debugger.evaluateOnCallFrame: `obj.#accessor = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#accessor++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Debugger.evaluateOnCallFrame: `++obj.#accessor`
+{
+ type : undefined
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Debugger.evaluateOnCallFrame: `obj.#accessor -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private methods
+Debugger.evaluateOnCallFrame: `obj.#method`
+{
+ className : Function
+ description : #method() { return "method"; }
+ objectId : <objectId>
+ type : function
+}
+Debugger.evaluateOnCallFrame: `obj.#method = 1`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:13) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#method++`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:12) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `++obj.#method`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:7) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `obj.#method -= 3`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:13) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module-expected.txt
new file mode 100644
index 0000000000..dd0cad227a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module-expected.txt
@@ -0,0 +1,16 @@
+Evaluate private class member out of class scope in Debugger.evaluateOnCallFrame() in module
+
+class Klass {
+ #field = 1;
+}
+const obj = new Klass;
+debugger;
+
+
+Running test: evaluatePrivateMembers
+Debugger.evaluateOnCallFrame: `obj.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module.js
new file mode 100644
index 0000000000..98b86f12bf
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-in-module.js
@@ -0,0 +1,32 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { contextGroup, Protocol } = InspectorTest.start(
+ 'Evaluate private class member out of class scope in Debugger.evaluateOnCallFrame() in module'
+);
+
+Protocol.Debugger.enable();
+const source = `
+class Klass {
+ #field = 1;
+}
+const obj = new Klass;
+debugger;
+`;
+
+InspectorTest.log(source);
+contextGroup.addModule(source, 'module');
+
+InspectorTest.runAsyncTestSuite([async function evaluatePrivateMembers() {
+ const { params: { callFrames } } = await Protocol.Debugger.oncePaused();
+ const frame = callFrames[0];
+ const expression = 'obj.#field';
+ InspectorTest.log(`Debugger.evaluateOnCallFrame: \`${expression}\``);
+ const { result: { result } } =
+ await Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: frame.callFrameId,
+ expression
+ });
+ InspectorTest.logMessage(result);
+}]);
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static-expected.txt
new file mode 100644
index 0000000000..28122d3999
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static-expected.txt
@@ -0,0 +1,231 @@
+Evaluate static private class member out of class scope in Debugger.evaluateOnCallFrame()
+
+class Klass {
+ static #field = "string";
+ static get #getterOnly() { return "getterOnly"; }
+ static set #setterOnly(val) { this.#field = "setterOnlyCalled"; }
+ static get #accessor() { return this.#field }
+ static set #accessor(val) { this.#field = val; }
+ static #method() { return "method"; }
+}
+const obj = new Klass();
+debugger;
+
+Running test: evaluatePrivateMembers
+Checking private fields
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ type : string
+ value : string
+}
+Debugger.evaluateOnCallFrame: `Klass.#field = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#field++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Debugger.evaluateOnCallFrame: `++Klass.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Debugger.evaluateOnCallFrame: `Klass.#field -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private getter-only accessors
+Debugger.evaluateOnCallFrame: `Klass.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Debugger.evaluateOnCallFrame: `Klass.#getterOnly = 1`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at eval (eval at <anonymous> (:11:1), <anonymous>:1:19) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#getterOnly++`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at eval (eval at <anonymous> (:11:1), <anonymous>:1:18) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#getterOnly -= 3`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at eval (eval at <anonymous> (:11:1), <anonymous>:1:19) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Checking private setter-only accessors
+Debugger.evaluateOnCallFrame: `Klass.#setterOnly`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at eval (eval at <anonymous> (:11:1), <anonymous>:1:1) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#setterOnly = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#setterOnly++`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at eval (eval at <anonymous> (:11:1), <anonymous>:1:1) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#setterOnly -= 3`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at eval (eval at <anonymous> (:11:1), <anonymous>:1:1) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Checking private accessors
+Debugger.evaluateOnCallFrame: `Klass.#accessor`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Debugger.evaluateOnCallFrame: `Klass.#accessor = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#accessor++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Debugger.evaluateOnCallFrame: `++Klass.#accessor`
+{
+ type : undefined
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Debugger.evaluateOnCallFrame: `Klass.#accessor -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Debugger.evaluateOnCallFrame: `Klass.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private methods
+Debugger.evaluateOnCallFrame: `Klass.#method`
+{
+ className : Function
+ description : #method() { return "method"; }
+ objectId : <objectId>
+ type : function
+}
+Debugger.evaluateOnCallFrame: `Klass.#method = 1`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:15) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#method++`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:14) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `++Klass.#method`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:9) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Debugger.evaluateOnCallFrame: `Klass.#method -= 3`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at eval (eval at <anonymous> (:11:1), <anonymous>:1:15) at <anonymous>:11:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static.js
new file mode 100644
index 0000000000..d01e88de5b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-static.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
+const options = {
+ type: 'private-static-member',
+ testRuntime: false,
+ message: `Evaluate static private class member out of class scope in Debugger.evaluateOnCallFrame()`
+};
+PrivateClassMemberInspectorTest.runTest(InspectorTest, options);
diff --git a/deps/v8/test/mjsunit/web-snapshot-helpers.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-super-expected.txt
index e69de29bb2..e69de29bb2 100644
--- a/deps/v8/test/mjsunit/web-snapshot-helpers.js
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member-super-expected.txt
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member.js
new file mode 100644
index 0000000000..667877920e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-private-class-member.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
+const options = {
+ type: 'private-instance-member',
+ testRuntime: false,
+ message: `Evaluate private class member out of class scope in Debugger.evaluateOnCallFrame()`
+};
+PrivateClassMemberInspectorTest.runTest(InspectorTest, options);
diff --git a/deps/v8/test/inspector/debugger/instrumentation-multiple-sessions-expected.txt b/deps/v8/test/inspector/debugger/instrumentation-multiple-sessions-expected.txt
new file mode 100644
index 0000000000..1b8131848f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/instrumentation-multiple-sessions-expected.txt
@@ -0,0 +1,64 @@
+Checks instrumentation pause with multiple sessions
+
+Running test: testTwoInstrumentationBreaksResume
+Created two sessions.
+Paused 1: instrumentation
+Paused 2: instrumentation
+Resumed session 1
+Resumed session 2
+Evaluation result: 42
+Evaluation finished
+
+Running test: testInstrumentedSessionNotification
+Created two sessions.
+Session 1 paused (instrumentation)
+Session 2 paused (other)
+Resumed session 1
+Resumed session 2
+Evaluation result: 42
+Evaluation finished
+
+Running test: testNonInstrumentedSessionCannotsResumeInstrumentationPause
+Created two sessions.
+Session 1 paused (instrumentation)
+Session 2 paused (other)
+Called "resume" on session 2
+Called "resume" on session 1
+Resumed session 1
+Resumed session 2
+Evaluation result: 42
+Evaluation finished
+
+Running test: testEvaluationFromNonInstrumentedSession
+Created two sessions.
+Session 1 paused (instrumentation)
+Session 2 paused (other)
+Called "resume" on session 1
+Resumed session 1
+Resumed session 2
+Evaluation result: 42
+Evaluation finished
+
+Running test: testTransparentEvaluationFromNonInstrumentedSessionDuringPause
+Created two sessions.
+Session 1 paused (instrumentation)
+Session 2 paused (other)
+Resumed session 1
+Session 2 evaluation result: 42
+
+Running test: testInstrumentationStopResumesWithOtherSessions
+Created two sessions.
+Session 1 paused (instrumentation)
+Stopped session 1
+Resumed session 2
+Session 2 evaluation result: 42
+
+Running test: testInstrumentationPauseAndNormalPause
+Created two sessions.
+Session 1 paused (instrumentation)
+Session 2 paused (other)
+Session 2 pause requested
+Session 1 instrumentation resume requested
+Session 2 paused (other)
+Session 2 resumed
+Session 1 evaluation result: 42
diff --git a/deps/v8/test/inspector/debugger/instrumentation-multiple-sessions.js b/deps/v8/test/inspector/debugger/instrumentation-multiple-sessions.js
new file mode 100644
index 0000000000..79022dda49
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/instrumentation-multiple-sessions.js
@@ -0,0 +1,292 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.log('Checks instrumentation pause with multiple sessions');
+
+InspectorTest.runAsyncTestSuite([
+ async function testTwoInstrumentationBreaksResume() {
+ // Initialize two sessions with instrumentation breakpoints.
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+ await Protocol2.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ InspectorTest.log('Created two sessions.');
+
+ // Expect both sessions pausing on instrumentation breakpoint.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ const paused2 = Protocol2.Debugger.oncePaused();
+ const evaluationFinished =
+ Protocol1.Runtime.evaluate({expression: '42'})
+ .then(
+ r => InspectorTest.log(
+ `Evaluation result: ${r.result.result.value}`));
+
+ // Verify the instrumentation breakpoint puased the sessions.
+ InspectorTest.log(`Paused 1: ${(await paused1).params.reason}`);
+ InspectorTest.log(`Paused 2: ${(await paused2).params.reason}`);
+
+ // Let us call resume in the first session and make sure that this
+ // does not resume the instrumentation pause (the instrumentation
+ // pause should only resume once all sessions ask for resumption).
+ //
+ // Unfortunately, we cannot check for absence of resumptions, so
+ // let's just give the evaluation chance to finish early by calling
+ // 'resume' on the first session multiple times.
+ for (let i = 0; i < 20; i++) {
+ await Protocol1.Debugger.resume();
+ }
+ InspectorTest.log('Resumed session 1');
+
+ // Resuming the second session should allow the evaluation to
+ // finish.
+ await Protocol2.Debugger.resume();
+ InspectorTest.log('Resumed session 2');
+
+ await evaluationFinished;
+ InspectorTest.log('Evaluation finished');
+ },
+ async function testInstrumentedSessionNotification() {
+ // Initialize two debugger sessions - one with instrumentation
+ // breakpoints, one without.
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+ InspectorTest.log('Created two sessions.');
+
+ // Verify that the instrumented session sees the instrumentation pause.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ const paused2 = Protocol2.Debugger.oncePaused();
+ const evaluationFinished =
+ Protocol1.Runtime.evaluate({expression: '42'})
+ .then(
+ r => InspectorTest.log(
+ `Evaluation result: ${r.result.result.value}`));
+ InspectorTest.log(`Session 1 paused (${(await paused1).params.reason})`);
+ InspectorTest.log(`Session 2 paused (${(await paused2).params.reason})`);
+
+ const onResume1 = Protocol1.Debugger.onceResumed();
+ const onResume2 = Protocol2.Debugger.onceResumed();
+ await Protocol1.Debugger.resume();
+ await onResume1;
+ InspectorTest.log('Resumed session 1');
+ await onResume2;
+ InspectorTest.log('Resumed session 2');
+
+ await evaluationFinished;
+ InspectorTest.log('Evaluation finished');
+ },
+ async function testNonInstrumentedSessionCannotsResumeInstrumentationPause() {
+ // Initialize two debugger sessions - one with instrumentation
+ // breakpoints, one without.
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+ InspectorTest.log('Created two sessions.');
+
+ // Make sure the non-instrumentation session does not pause or resume on
+ // instrumentation.
+ Protocol2.Debugger.onResumed(
+ m => InspectorTest.log('[Unexpected] Session 2 resumed'));
+
+ // Induce instrumentation pause.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ const paused2 = Protocol2.Debugger.oncePaused();
+ const evaluationFinished =
+ Protocol1.Runtime.evaluate({expression: '42'})
+ .then(
+ r => InspectorTest.log(
+ `Evaluation result: ${r.result.result.value}`));
+ InspectorTest.log(`Session 1 paused (${(await paused1).params.reason})`);
+ InspectorTest.log(`Session 2 paused (${(await paused2).params.reason})`);
+
+ // Calling 'resume' on the non-instrumented session should not have any
+ // effect on the session in the instrumentation pause.
+ for (let i = 0; i < 10; i++) {
+ await Protocol2.Debugger.resume();
+ }
+ InspectorTest.log('Called "resume" on session 2');
+
+ const onResume1 = Protocol1.Debugger.onceResumed();
+ const onResume2 = Protocol2.Debugger.onceResumed();
+ await Protocol1.Debugger.resume();
+ InspectorTest.log('Called "resume" on session 1');
+ await onResume1;
+ InspectorTest.log('Resumed session 1');
+ await onResume2;
+ InspectorTest.log('Resumed session 2');
+
+ await evaluationFinished;
+ InspectorTest.log('Evaluation finished');
+ },
+ async function testEvaluationFromNonInstrumentedSession() {
+ // Initialize two debugger sessions - one with instrumentation
+ // breakpoints, one without.
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+ InspectorTest.log('Created two sessions.');
+
+ // Start evaluation in the non-instrumentation session and expect that
+ // the instrumentation session is paused.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ const paused2 = Protocol2.Debugger.oncePaused();
+ const evaluationFinished =
+ Protocol2.Runtime.evaluate({expression: '42'})
+ .then(
+ r => InspectorTest.log(
+ `Evaluation result: ${r.result.result.value}`));
+ InspectorTest.log(`Session 1 paused (${(await paused1).params.reason})`);
+ InspectorTest.log(`Session 2 paused (${(await paused2).params.reason})`);
+
+ const onResume1 = Protocol1.Debugger.onceResumed();
+ const onResume2 = Protocol2.Debugger.onceResumed();
+ await Protocol1.Debugger.resume();
+ InspectorTest.log('Called "resume" on session 1');
+ await onResume1;
+ InspectorTest.log('Resumed session 1');
+ await onResume2;
+ InspectorTest.log('Resumed session 2');
+
+ await evaluationFinished;
+ InspectorTest.log('Evaluation finished');
+ },
+ async function
+ testTransparentEvaluationFromNonInstrumentedSessionDuringPause() {
+ // Initialize two debugger sessions - one with instrumentation
+ // breakpoints, one without.
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+ InspectorTest.log('Created two sessions.');
+
+ // Enter instrumentation pause.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ const paused2 = Protocol2.Debugger.oncePaused();
+ Protocol1.Runtime.evaluate({expression: 'null'})
+ InspectorTest.log(
+ `Session 1 paused (${(await paused1).params.reason})`);
+ InspectorTest.log(
+ `Session 2 paused (${(await paused2).params.reason})`);
+
+ // Start evaluation in session 2.
+ const evaluation = Protocol2.Runtime.evaluate({expression: '42'});
+
+ await Protocol1.Debugger.resume();
+ InspectorTest.log('Resumed session 1');
+
+ // Make sure the evaluation finished.
+ InspectorTest.log(`Session 2 evaluation result: ${
+ (await evaluation).result.result.value}`);
+ },
+ async function testInstrumentationStopResumesWithOtherSessions() {
+ // Initialize two debugger sessions - one with instrumentation
+ // breakpoints, one without.
+ let contextGroup = new InspectorTest.ContextGroup();
+
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+
+ InspectorTest.log('Created two sessions.');
+
+ // Enter instrumentation pause.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ Protocol1.Runtime.evaluate({expression: 'null'})
+ InspectorTest.log(`Session 1 paused (${(await paused1).params.reason})`);
+
+ // Start evaluation in session 2.
+ const evaluation = Protocol2.Runtime.evaluate({expression: '42'});
+
+ // Stop the first session.
+ const onResume2 = Protocol2.Debugger.onceResumed();
+ session1.stop();
+ InspectorTest.log('Stopped session 1');
+
+ await onResume2;
+ InspectorTest.log('Resumed session 2');
+
+ // Make sure the second session gets the evaluation result.
+ InspectorTest.log(`Session 2 evaluation result: ${
+ (await evaluation).result.result.value}`);
+ },
+ async function testInstrumentationPauseAndNormalPause() {
+ // Initialize two debugger sessions - one with instrumentation
+ // breakpoints, one without.
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ Protocol1.Debugger.enable();
+ await Protocol1.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+
+ InspectorTest.log('Created two sessions.');
+
+ // Enter instrumentation pause.
+ const paused1 = Protocol1.Debugger.oncePaused();
+ const instrumentationPaused2 = Protocol2.Debugger.oncePaused();
+ const evaluation = Protocol1.Runtime.evaluate({expression: '42'})
+ InspectorTest.log(`Session 1 paused (${(await paused1).params.reason})`);
+ InspectorTest.log(
+ `Session 2 paused (${(await instrumentationPaused2).params.reason})`);
+
+ await Protocol2.Debugger.pause();
+ InspectorTest.log('Session 2 pause requested');
+ await Protocol1.Debugger.resume();
+ InspectorTest.log('Session 1 instrumentation resume requested');
+
+ // Check that the second session pauses and resumes correctly.
+ const userPaused2 = Protocol1.Debugger.oncePaused();
+ InspectorTest.log(
+ `Session 2 paused (${(await userPaused2).params.reason})`);
+
+ const resumed2 = Protocol2.Debugger.onceResumed();
+ Protocol2.Debugger.resume();
+ await resumed2;
+ InspectorTest.log('Session 2 resumed');
+
+ InspectorTest.log(`Session 1 evaluation result: ${
+ (await evaluation).result.result.value}`);
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index 1bdef8231b..60fa264522 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -384,6 +384,11 @@ expression: /123/
type : boolean
value : false
}
+{
+ name : unicodeSets
+ type : boolean
+ value : false
+}
expression: ({})
diff --git a/deps/v8/test/inspector/debugger/pause-on-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/pause-on-instrumentation-expected.txt
index 0a24b69c8f..67dda7889d 100644
--- a/deps/v8/test/inspector/debugger/pause-on-instrumentation-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-instrumentation-expected.txt
@@ -8,3 +8,9 @@ Resumed.
Paused at foo.js with reason "other".
Resumed.
Done.
+
+Running test: testInstrumentationRemoveDuringInstrumentationPause
+Paused at with reason "instrumentation".
+Removed instrumentation breakpoint
+Resumed
+Evaluation result: 42
diff --git a/deps/v8/test/inspector/debugger/pause-on-instrumentation.js b/deps/v8/test/inspector/debugger/pause-on-instrumentation.js
index d3b22eaaf8..cde65195fc 100644
--- a/deps/v8/test/inspector/debugger/pause-on-instrumentation.js
+++ b/deps/v8/test/inspector/debugger/pause-on-instrumentation.js
@@ -53,4 +53,27 @@ async function testPauseDuringInstrumentationPause() {
await Protocol.Debugger.disable();
}
-InspectorTest.runAsyncTestSuite([testPauseDuringInstrumentationPause]);
+async function testInstrumentationRemoveDuringInstrumentationPause() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+
+ const {result: {breakpointId}} =
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const pause = Protocol.Debugger.oncePaused();
+ Protocol.Runtime.evaluate({expression: 'console.log(\'Hi\')'});
+ logPause(await pause);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ InspectorTest.log('Removed instrumentation breakpoint');
+ await Protocol.Debugger.resume();
+ InspectorTest.log('Resumed');
+
+ const {result: {result: {value}}} =
+ await Protocol.Runtime.evaluate({expression: '42'});
+ InspectorTest.log(`Evaluation result: ${value}`);
+}
+
+InspectorTest.runAsyncTestSuite([
+ testPauseDuringInstrumentationPause,
+ testInstrumentationRemoveDuringInstrumentationPause
+]);
diff --git a/deps/v8/test/inspector/debugger/restore-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/restore-breakpoint-expected.txt
index ac23487bf8..3de4c54f79 100644
--- a/deps/v8/test/inspector/debugger/restore-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/restore-breakpoint-expected.txt
@@ -8,6 +8,79 @@ function foo() {
#boo();
}
+Running test: testSameSourceDuplicateLines
+function foo() {
+boo();
+// something
+#boo();
+}
+function foo() {
+boo();
+// something
+#boo();
+}
+
+Running test: testSameSourceDuplicateLinesLongLineBetween
+function foo() {
+boo();
+/////////////////////////////////////////////////////////////////////////////...
+#boo();
+}
+function foo() {
+boo();
+/////////////////////////////////////////////////////////////////////////////...
+#boo();
+}
+
+Running test: testSameSourceLongCommentBefore
+/////////////////////////////////////////////////////////////////////////////...
+function foo() {
+bad();
+#boo();
+}
+/////////////////////////////////////////////////////////////////////////////...
+function foo() {
+bad();
+#boo();
+}
+
+Running test: testInsertNewLineWithLongCommentBefore
+/////////////////////////////////////////////////////////////////////////////...
+function foo() {
+boo();
+#boo();
+}
+/////////////////////////////////////////////////////////////////////////////...
+function foo() {
+boo();
+
+#boo();
+}
+
+Running test: testSameSourceBreakAfterReturnWithWhitespace
+function baz() {
+}
+
+function foo() {
+return 1;# }
+function baz() {
+}
+
+function foo() {
+return 1;# }
+
+Running test: testSameSourceDuplicateLinesDifferentPrefix
+function foo() {
+boo();
+// something
+#boo();
+}
+function foo() {
+#boo();
+// somethinX
+boo();
+}
+
Running test: testOneLineOffset
function foo() {
#boo();
diff --git a/deps/v8/test/inspector/debugger/restore-breakpoint.js b/deps/v8/test/inspector/debugger/restore-breakpoint.js
index 020143f6d1..809f609150 100644
--- a/deps/v8/test/inspector/debugger/restore-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/restore-breakpoint.js
@@ -9,31 +9,68 @@ var finishedTests = 0;
InspectorTest.runTestSuite([
function testSameSource(next) {
var source = 'function foo() {\nboo();\n}';
- test(source, source, { lineNumber: 1, columnNumber: 0 }, next);
+ test(source, source, {lineNumber: 1, columnNumber: 0}, next);
+ },
+
+ function testSameSourceDuplicateLines(next) {
+ var source = 'function foo() {\nboo();\n// something\nboo();\n}';
+ test(source, source, {lineNumber: 2, columnNumber: 0}, next);
+ },
+
+ function testSameSourceDuplicateLinesLongLineBetween(next) {
+ var longComment = '/'.repeat(1e4);
+ var source = `function foo() {\nboo();\n${longComment}\nboo();\n}`;
+ test(source, source, {lineNumber: 2, columnNumber: 0}, next);
+ },
+
+ function testSameSourceLongCommentBefore(next) {
+ var longComment = '/'.repeat(1e3);
+ var source = `${longComment}\nfunction foo() {\nbad();\nboo();\n}`;
+ test(source, source, {lineNumber: 3, columnNumber: 0}, next);
+ },
+
+ function testInsertNewLineWithLongCommentBefore(next) {
+ var longComment = '/'.repeat(1e3);
+ var source = `${longComment}\nfunction foo() {\nboo();\nboo();\n}`;
+ var newSource = `${longComment}\nfunction foo() {\nboo();\n\nboo();\n}`;
+ test(source, newSource, {lineNumber: 3, columnNumber: 0}, next);
+ },
+
+ function testSameSourceBreakAfterReturnWithWhitespace(next) {
+ var whitespace = ' '.repeat(30);
+ var source =
+ `function baz() {\n}\n\nfunction foo() {\nreturn 1;${whitespace}}`;
+ test(source, source, {lineNumber: 4, columnNumber: 9}, next);
+ },
+
+ function testSameSourceDuplicateLinesDifferentPrefix(next) {
+ var source = 'function foo() {\nboo();\n// something\nboo();\n}';
+ var newSource = 'function foo() {\nboo();\n// somethinX\nboo();\n}';
+ test(source, newSource, {lineNumber: 2, columnNumber: 0}, next);
},
function testOneLineOffset(next) {
var source = 'function foo() {\nboo();\n}';
var newSource = 'function foo() {\nboo();\nboo();\n}';
- test(source, newSource, { lineNumber: 1, columnNumber: 0 }, next);
+ test(source, newSource, {lineNumber: 1, columnNumber: 0}, next);
},
function testTwoSimilarLinesCloseToOriginalLocation1(next) {
var source = 'function foo() {\n\n\nboo();\n}';
var newSource = 'function foo() {\nboo();\n\nnewCode();\nboo();\n\n\n\nboo();\n}';
- test(source, newSource, { lineNumber: 3, columnNumber: 0 }, next);
+ test(source, newSource, {lineNumber: 3, columnNumber: 0}, next);
},
function testTwoSimilarLinesCloseToOriginalLocation2(next) {
var source = 'function foo() {\n\n\nboo();\n}';
var newSource = 'function foo() {\nboo();\nnewLongCode();\nnewCode();\nboo();\n\n\n\nboo();\n}';
- test(source, newSource, { lineNumber: 3, columnNumber: 0 }, next);
+ test(source, newSource, {lineNumber: 3, columnNumber: 0}, next);
},
function testHintIgnoreWhiteSpaces(next) {
var source = 'function foo() {\n\n\n\nboo();\n}';
var newSource = 'function foo() {\nfoo();\n\n\nboo();\n}';
- test(source, newSource, { lineNumber: 1, columnNumber: 0 }, next);
+ test(source, newSource, {lineNumber: 1, columnNumber: 0}, next);
},
function testCheckOnlyLimitedOffsets(next) {
diff --git a/deps/v8/test/inspector/debugger/session-stop-expected.txt b/deps/v8/test/inspector/debugger/session-stop-expected.txt
new file mode 100644
index 0000000000..9896c36f38
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/session-stop-expected.txt
@@ -0,0 +1,20 @@
+Checks V8InspectorSession::stop
+
+Running test: testSessionStopResumesPause
+Evaluation returned: 42
+
+Running test: testSessionStopResumesInstrumentationPause
+Paused: instrumentation
+Evaluation returned: 42
+
+Running test: testSessionStopDisablesDebugger
+Pause error(?): Debugger agent is not enabled
+
+Running test: testSessionStopDisallowsReenabling
+Pause error(?) after stop: Debugger agent is not enabled
+Pause error(?) after re-enable: Debugger agent is not enabled
+
+Running test: testSessionStopDoesNotDisableOtherSessions
+Session 1 pause error after stop: Debugger agent is not enabled
+Session 2 paused: other
+Session 2 evaluation: 42 \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/session-stop.js b/deps/v8/test/inspector/debugger/session-stop.js
new file mode 100644
index 0000000000..5c667ee93d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/session-stop.js
@@ -0,0 +1,86 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.log('Checks V8InspectorSession::stop');
+
+InspectorTest.runAsyncTestSuite([
+ async function testSessionStopResumesPause() {
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session = contextGroup.connect();
+ let Protocol = session.Protocol;
+
+ Protocol.Debugger.enable();
+ await Protocol.Debugger.pause();
+ const result = Protocol.Runtime.evaluate({expression: '42'});
+ session.stop();
+ InspectorTest.log(
+ `Evaluation returned: ${(await result).result.result.value}`);
+ },
+ async function testSessionStopResumesInstrumentationPause() {
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session = contextGroup.connect();
+ let Protocol = session.Protocol;
+
+ Protocol.Debugger.enable();
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'});
+ const paused = Protocol.Debugger.oncePaused();
+ const result = Protocol.Runtime.evaluate({expression: '42'});
+ InspectorTest.log(`Paused: ${(await paused).params.reason}`);
+ session.stop();
+ InspectorTest.log(
+ `Evaluation returned: ${(await result).result.result.value}`);
+ },
+ async function testSessionStopDisablesDebugger() {
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session = contextGroup.connect();
+ let Protocol = session.Protocol;
+
+ await Protocol.Debugger.enable();
+ session.stop();
+ const pauseResult = await Protocol.Debugger.pause();
+ InspectorTest.log(`Pause error(?): ${pauseResult?.error?.message}`);
+ },
+ async function testSessionStopDisallowsReenabling() {
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session = contextGroup.connect();
+ let Protocol = session.Protocol;
+
+ await Protocol.Debugger.enable();
+ session.stop();
+ const pauseResultAfterStop = await Protocol.Debugger.pause();
+ InspectorTest.log(
+ `Pause error(?) after stop: ${pauseResultAfterStop?.error?.message}`);
+ await Protocol.Debugger.enable();
+ const pauseResult = await Protocol.Debugger.pause();
+ InspectorTest.log(
+ `Pause error(?) after re-enable: ${pauseResult?.error?.message}`);
+ },
+ async function testSessionStopDoesNotDisableOtherSessions() {
+ let contextGroup = new InspectorTest.ContextGroup();
+
+ let session1 = contextGroup.connect();
+ let Protocol1 = session1.Protocol;
+ await Protocol1.Debugger.enable();
+
+ let session2 = contextGroup.connect();
+ let Protocol2 = session2.Protocol;
+ await Protocol2.Debugger.enable();
+
+ session1.stop();
+ const pauseResult1 = await Protocol1.Debugger.pause();
+ InspectorTest.log(
+ `Session 1 pause error after stop: ${pauseResult1?.error?.message}`);
+
+ await Protocol2.Debugger.pause();
+
+ const paused = Protocol2.Debugger.oncePaused();
+ const result = Protocol2.Runtime.evaluate({expression: '42'});
+ InspectorTest.log(`Session 2 paused: ${(await paused).params.reason}`);
+ await Protocol2.Debugger.resume();
+
+ InspectorTest.log(
+ `Session 2 evaluation: ${(await result).result.result.value}`);
+ },
+]);
diff --git a/deps/v8/test/inspector/debugger/set-breakpoints-active-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoints-active-expected.txt
new file mode 100644
index 0000000000..1e9009350e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoints-active-expected.txt
@@ -0,0 +1,36 @@
+Tests for set-breakpoints-active
+
+Running test: testDeactivatedBreakpointsAfterReconnect
+Breakpoints activated.
+Paused. (reason: other)
+Debugger break executed.
+Breakpoints deactivated.
+Reconnected.
+Debugger break executed.
+
+Running test: testDeactivatedBreakpointsAfterDisableEnable
+Breakpoints activated.
+Paused. (reason: other)
+Debugger break executed.
+Breakpoints deactivated.
+Disabled.
+Enabled.
+Debugger break executed.
+
+Running test: testDeactivateBreakpointsWhileDisabled
+Breakpoints activated.
+Paused. (reason: other)
+Debugger break executed.
+Disabled.
+Breakpoints deactivated.
+Enabled.
+Debugger break executed.
+
+Running test: testActivateBreakpointsWhileDisabled
+Breakpoints deactivated.
+Debugger break executed.
+Disabled.
+Breakpoints activated.
+Enabled.
+Paused. (reason: other)
+Debugger break executed.
diff --git a/deps/v8/test/inspector/debugger/set-breakpoints-active.js b/deps/v8/test/inspector/debugger/set-breakpoints-active.js
new file mode 100644
index 0000000000..40dc4beb11
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoints-active.js
@@ -0,0 +1,69 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, Protocol} =
+ InspectorTest.start('Tests for set-breakpoints-active');
+
+Protocol.Debugger.enable();
+Protocol.Debugger.onPaused(({params}) => {
+ InspectorTest.log(`Paused. (reason: ${params.reason})`);
+ Protocol.Debugger.resume();
+});
+
+InspectorTest.runAsyncTestSuite([
+ async function testDeactivatedBreakpointsAfterReconnect() {
+ await Protocol.Debugger.setBreakpointsActive({active: true});
+ InspectorTest.log('Breakpoints activated.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ await Protocol.Debugger.setBreakpointsActive({active: false});
+ InspectorTest.log('Breakpoints deactivated.');
+ session.reconnect();
+ InspectorTest.log('Reconnected.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ },
+ async function testDeactivatedBreakpointsAfterDisableEnable() {
+ await Protocol.Debugger.setBreakpointsActive({active: true});
+ InspectorTest.log('Breakpoints activated.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ await Protocol.Debugger.setBreakpointsActive({active: false});
+ InspectorTest.log('Breakpoints deactivated.');
+ await Protocol.Debugger.disable();
+ InspectorTest.log('Disabled.');
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Enabled.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ },
+ async function testDeactivateBreakpointsWhileDisabled() {
+ await Protocol.Debugger.setBreakpointsActive({active: true});
+ InspectorTest.log('Breakpoints activated.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ await Protocol.Debugger.disable();
+ InspectorTest.log('Disabled.');
+ await Protocol.Debugger.setBreakpointsActive({active: false});
+ InspectorTest.log('Breakpoints deactivated.');
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Enabled.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ },
+ async function testActivateBreakpointsWhileDisabled() {
+ await Protocol.Debugger.setBreakpointsActive({active: false});
+ InspectorTest.log('Breakpoints deactivated.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ await Protocol.Debugger.disable();
+ InspectorTest.log('Disabled.');
+ await Protocol.Debugger.setBreakpointsActive({active: true});
+ InspectorTest.log('Breakpoints activated.');
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Enabled.');
+ await Protocol.Runtime.evaluate({expression: 'debugger'});
+ InspectorTest.log('Debugger break executed.');
+ },
+]);
diff --git a/deps/v8/test/inspector/debugger/set-script-source-es-module-expected.txt b/deps/v8/test/inspector/debugger/set-script-source-es-module-expected.txt
new file mode 100644
index 0000000000..152643a9c5
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-script-source-es-module-expected.txt
@@ -0,0 +1,9 @@
+Test that live editing the top-level function of an ES module does not work [crbug.com/1413447]
+Uncommenting the import line should fail:
+{
+ status : BlockedByTopLevelEsModuleChange
+}
+Uncommenting the console.log line should work:
+{
+ status : Ok
+}
diff --git a/deps/v8/test/inspector/debugger/set-script-source-es-module.js b/deps/v8/test/inspector/debugger/set-script-source-es-module.js
new file mode 100644
index 0000000000..a307c78658
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-script-source-es-module.js
@@ -0,0 +1,37 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test that live editing the top-level function of an ES module does not work [crbug.com/1413447]');
+
+const inputSnippet = `
+//$1 import {b} from "./b.js";
+
+export function foo() {
+ console.log('foo');
+}
+
+export function bar() {
+ //$2 console.log('bar');
+}
+`;
+
+(async () => {
+ await Protocol.Debugger.enable();
+
+ contextGroup.addModule(inputSnippet, 'a.js', 3, 8);
+ const { params: { scriptId } } = await Protocol.Debugger.onceScriptParsed();
+
+ InspectorTest.log('Uncommenting the import line should fail:');
+ const changedTopLevelModule = inputSnippet.replace('//$1 ', '');
+ const response1 = await Protocol.Debugger.setScriptSource({ scriptId, scriptSource: changedTopLevelModule });
+ InspectorTest.logMessage(response1.result);
+
+ InspectorTest.log('Uncommenting the console.log line should work:');
+ const changedFunction = inputSnippet.replace('//$2 ', '');
+ const response2 = await Protocol.Debugger.setScriptSource({ scriptId, scriptSource: changedFunction });
+ InspectorTest.logMessage(response2.result);
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/set-script-source-top-frame-expected.txt b/deps/v8/test/inspector/debugger/set-script-source-top-frame-expected.txt
index 8f39a070fd..88a6cde245 100644
--- a/deps/v8/test/inspector/debugger/set-script-source-top-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-script-source-top-frame-expected.txt
@@ -6,7 +6,7 @@ function testExpression(a, b) {
Paused at (after live edit):
function testExpression(a, b) {
- return a * b;#
+ #return a * b;
}
Result:
diff --git a/deps/v8/test/inspector/debugger/suspended-generator-scopes.js b/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
index 4ed4796583..55a1fd57ca 100644
--- a/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
+++ b/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-experimental-remove-internal-scopes-property
-
let {session, contextGroup, Protocol} = InspectorTest.start('Tests that suspended generators produce scopes');
contextGroup.addScript(`
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-anyref-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-anyref-expected.txt
index 4b627d5372..37585580b9 100644
--- a/deps/v8/test/inspector/debugger/wasm-gc-anyref-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-gc-anyref-expected.txt
@@ -8,31 +8,32 @@ Module instantiated.
Tables populated.
Setting breakpoint
{
- columnNumber : 246
+ columnNumber : 316
lineNumber : 0
scriptId : <scriptId>
}
Paused:
-Script wasm://wasm/739f5f0a byte offset 246: Wasm opcode 0x01 (kExprNop)
+Script wasm://wasm/2633f626 byte offset 316: Wasm opcode 0x01 (kExprNop)
Scope:
-at $main (0:246):
+at $main (0:316):
- scope (wasm-expression-stack):
stack:
- scope (local):
$anyref_local: Struct ((ref $type0))
$anyref_local2: Array ((ref $type1))
- $anyref_local_i31: null (anyref)
+ $anyref_local_i31: 30 (anyref)
$anyref_local_null: null (anyref)
- scope (module):
- instance: exports: "exported_ref_table" (Table), "exported_func_table" (Table), "fill_tables" (Function), "main" (Function)
+ instance: exports: "exported_ref_table" (Table), "exported_func_table" (Table), "exported_i31_table" (Table), "fill_tables" (Function), "main" (Function)
module: Module
functions: "$my_func": (Function), "$fill_tables": (Function), "$main": (Function)
globals: "$global0": function $my_func() { [native code] } (funcref)
tables:
- $import.any_table: 0: Array(2) (anyref), 1: Struct ((ref $type0)), 2: undefined (anyref)
- $import.func_table: 0: function () { [native code] } (funcref), 1: function $my_func() { [native code] } (funcref), 2: undefined (funcref)
- $exported_ref_table: 0: Struct ((ref $type0)), 1: Array ((ref $type1)), 2: undefined (anyref), 3: undefined (anyref)
- $exported_func_table: 0: function external_fct() { [native code] } (funcref), 1: function $my_func() { [native code] } (funcref), 2: undefined (funcref)
+ $import.any_table: 0: Array(2) (anyref), 1: 321 (anyref), 2: null (anyref), 3: null (anyref)
+ $import.func_table: 0: function () { [native code] } (funcref), 1: function $my_func() { [native code] } (funcref), 2: null (funcref)
+ $exported_ref_table: 0: Struct ((ref $type0)), 1: Array ((ref $type1)), 2: 30 (anyref), 3: null (anyref)
+ $exported_func_table: 0: function external_fct() { [native code] } (funcref), 1: function $my_func() { [native code] } (funcref), 2: null (funcref)
+ $exported_i31_table: 0: 123456 (i31ref), 1: -123 (i31ref), 2: null (i31ref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-anyref.js b/deps/v8/test/inspector/debugger/wasm-gc-anyref.js
index 04750908cf..aa835e82e4 100644
--- a/deps/v8/test/inspector/debugger/wasm-gc-anyref.js
+++ b/deps/v8/test/inspector/debugger/wasm-gc-anyref.js
@@ -66,16 +66,19 @@ async function printPauseLocationsAndContinue(msg) {
async function instantiateWasm() {
var builder = new WasmModuleBuilder();
+ builder.startRecGroup();
let struct_type = builder.addStruct([makeField(kWasmI32, false)]);
let array_type = builder.addArray(kWasmI32);
let imported_ref_table =
- builder.addImportedTable('import', 'any_table', 3, 3, kWasmAnyRef);
+ builder.addImportedTable('import', 'any_table', 4, 4, kWasmAnyRef);
let imported_func_table =
builder.addImportedTable('import', 'func_table', 3, 3, kWasmFuncRef);
let ref_table = builder.addTable(kWasmAnyRef, 4)
.exportAs('exported_ref_table');
let func_table = builder.addTable(kWasmFuncRef, 3)
.exportAs('exported_func_table');
+ let i31ref_table = builder.addTable(kWasmI31Ref, 3)
+ .exportAs('exported_i31_table');
let func = builder.addFunction('my_func', kSig_v_v).addBody([kExprNop]);
// Make the function "declared".
@@ -88,19 +91,18 @@ async function instantiateWasm() {
...wasmI32Const(1), ...wasmI32Const(20), ...wasmI32Const(21),
kGCPrefix, kExprArrayNewFixed, array_type, 2,
kExprTableSet, ref_table.index,
- // TODO(7748): Reactivate this test when JS interop between i31refs and
- // JS SMIs is fixed. The problem right now is the 33-bit shift for i31ref
- // values on non-pointer-compressed platforms, which means i31refs and
- // Smis have different encodings there but it's impossible to tell them
- // apart.
- // ...wasmI32Const(2), ...wasmI32Const(30),
- // kGCPrefix, kExprI31New, kExprTableSet, ref_table.index,
+ ...wasmI32Const(2), ...wasmI32Const(30),
+ kGCPrefix, kExprI31New, kExprTableSet, ref_table.index,
// Fill imported any table.
...wasmI32Const(1),
...wasmI32Const(123), kGCPrefix, kExprStructNew, struct_type,
kExprTableSet, imported_ref_table,
+ ...wasmI32Const(1),
+ ...wasmI32Const(321), kGCPrefix, kExprI31New,
+ kExprTableSet, imported_ref_table,
+
// Fill imported func table.
...wasmI32Const(1),
kExprRefFunc, func.index,
@@ -110,6 +112,15 @@ async function instantiateWasm() {
...wasmI32Const(1),
kExprRefFunc, func.index,
kExprTableSet, func_table.index,
+
+ // Fill i31 table.
+ ...wasmI32Const(0),
+ ...wasmI32Const(123456), kGCPrefix, kExprI31New,
+ kExprTableSet, i31ref_table.index,
+
+ ...wasmI32Const(1),
+ ...wasmI32Const(-123), kGCPrefix, kExprI31New,
+ kExprTableSet, i31ref_table.index,
]).exportFunc();
let body = [
@@ -121,12 +132,9 @@ async function instantiateWasm() {
...wasmI32Const(21),
kGCPrefix, kExprArrayNewFixed, array_type, 1,
kExprLocalSet, 1,
- // Set local anyref_local_i31.
- // TODO(7748): Reactivate this test when JS interop between i31refs and JS
- // SMIs is fixed (same issue as above).
- // ...wasmI32Const(30),
- // kGCPrefix, kExprI31New,
- // kExprLocalSet, 2,
+ ...wasmI32Const(30),
+ kGCPrefix, kExprI31New,
+ kExprLocalSet, 2,
kExprNop,
];
let main = builder.addFunction('main', kSig_v_v)
@@ -136,7 +144,7 @@ async function instantiateWasm() {
.addLocals(kWasmAnyRef, 1, ['anyref_local_null'])
.addBody(body)
.exportFunc();
-
+ builder.endRecGroup();
var module_bytes = builder.toArray();
breakpointLocation = main.body_offset + body.length - 1;
@@ -144,7 +152,7 @@ async function instantiateWasm() {
let imports = `{'import' : {
'any_table': (() => {
let js_table =
- new WebAssembly.Table({element: 'anyref', initial: 3, maximum: 3});
+ new WebAssembly.Table({element: 'anyref', initial: 4, maximum: 4});
js_table.set(0, ['JavaScript', 'value']);
return js_table;
})(),
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index 3db7ebda51..f7e758bf41 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -15,7 +15,7 @@ Script wasm://wasm/e33badc2 byte offset 169: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at $C (interpreted) (0:169):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -26,8 +26,8 @@ at $C (interpreted) (0:169):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 0 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $B (liftoff) (0:158):
- scope (wasm-expression-stack):
stack: "0": 42 (i32), "1": 3 (i32)
@@ -44,11 +44,11 @@ at $B (liftoff) (0:158):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 0 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -57,8 +57,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 0 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -79,8 +79,8 @@ at $C (interpreted) (0:171):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 0 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $B (liftoff) (0:158):
- scope (wasm-expression-stack):
stack: "0": 42 (i32), "1": 3 (i32)
@@ -97,11 +97,11 @@ at $B (liftoff) (0:158):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 0 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -110,8 +110,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 0 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -121,7 +121,7 @@ Script wasm://wasm/e33badc2 byte offset 173: Wasm opcode 0x41 (kExprI32Const)
Scope:
at $C (interpreted) (0:173):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -132,8 +132,8 @@ at $C (interpreted) (0:173):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $B (liftoff) (0:158):
- scope (wasm-expression-stack):
stack: "0": 42 (i32), "1": 3 (i32)
@@ -150,11 +150,11 @@ at $B (liftoff) (0:158):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -163,8 +163,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -185,8 +185,8 @@ at $C (interpreted) (0:175):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $B (liftoff) (0:158):
- scope (wasm-expression-stack):
stack: "0": 42 (i32), "1": 3 (i32)
@@ -203,11 +203,11 @@ at $B (liftoff) (0:158):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -216,8 +216,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -227,7 +227,7 @@ Script wasm://wasm/e33badc2 byte offset 177: Wasm opcode 0x0b (kExprEnd)
Scope:
at $C (interpreted) (0:177):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 47 (i32)
@@ -238,8 +238,8 @@ at $C (interpreted) (0:177):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $B (liftoff) (0:158):
- scope (wasm-expression-stack):
stack: "0": 42 (i32), "1": 3 (i32)
@@ -256,11 +256,11 @@ at $B (liftoff) (0:158):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -269,8 +269,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -294,11 +294,11 @@ at $B (liftoff) (0:160):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -307,8 +307,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -332,11 +332,11 @@ at $B (liftoff) (0:161):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -345,8 +345,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -356,7 +356,7 @@ Script wasm://wasm/e33badc2 byte offset 162: Wasm opcode 0x0b (kExprEnd)
Scope:
at $B (liftoff) (0:162):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -370,11 +370,11 @@ at $B (liftoff) (0:162):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at $A (liftoff) (0:128):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -383,8 +383,8 @@ at $A (liftoff) (0:128):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -394,7 +394,7 @@ Script wasm://wasm/e33badc2 byte offset 130: Wasm opcode 0x0b (kExprEnd)
Scope:
at $A (liftoff) (0:130):
- scope (wasm-expression-stack):
- stack:
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -403,8 +403,8 @@ at $A (liftoff) (0:130):
functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
globals: "$exported_global": 42 (i32)
memories: "$exported_memory": (Memory)
- tables:
- $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: undefined (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
+ tables:
+ $exported_table: 0: function js_func() { [native code] } (funcref), 1: function () { [native code] } (funcref), 2: null (funcref), 3: function $A (liftoff)() { [native code] } (funcref)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
index dd9a73bd45..7fe4f19bc6 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
@@ -3,8 +3,8 @@ Running testFunction with generated wasm bytes...
Paused on 'debugger;'
Number of frames: 5
- [0] {"functionName":"call_debugger","function_lineNumber":1,"function_columnNumber":24,"lineNumber":2,"columnNumber":4}
- - [1] {"functionName":"$call_func","lineNumber":0,"columnNumber":55}
- - [2] {"functionName":"$main","lineNumber":0,"columnNumber":62}
+ - [1] {"functionName":"$call_func","function_lineNumber":0,"function_columnNumber":54,"lineNumber":0,"columnNumber":55}
+ - [2] {"functionName":"$main","function_lineNumber":0,"function_columnNumber":59,"lineNumber":0,"columnNumber":62}
- [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":14,"columnNumber":19}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Getting v8-generated stack trace...
diff --git a/deps/v8/test/inspector/debugger/wasm-step-a-lot.js b/deps/v8/test/inspector/debugger/wasm-step-a-lot.js
index e6e57830e0..4eee55bcdc 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-a-lot.js
+++ b/deps/v8/test/inspector/debugger/wasm-step-a-lot.js
@@ -4,7 +4,7 @@
// Lower the maximum code space size to detect missed garbage collection
// earlier.
-// Flags: --wasm-max-code-space=2
+// Flags: --wasm-max-committed-code-mb=2
utils.load('test/inspector/wasm-inspector-test.js');
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index dd78836f3a..dfc50b2522 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -73,6 +73,8 @@ class UtilsExtension : public InspectorIsolateData::SetupGlobalTask {
utils->Set(isolate, "cancelPauseOnNextStatement",
v8::FunctionTemplate::New(
isolate, &UtilsExtension::CancelPauseOnNextStatement));
+ utils->Set(isolate, "stop",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Stop));
utils->Set(isolate, "setLogConsoleApiMessageCalls",
v8::FunctionTemplate::New(
isolate, &UtilsExtension::SetLogConsoleApiMessageCalls));
@@ -104,6 +106,9 @@ class UtilsExtension : public InspectorIsolateData::SetupGlobalTask {
utils->Set(isolate, "interruptForMessages",
v8::FunctionTemplate::New(
isolate, &UtilsExtension::InterruptForMessages));
+ utils->Set(
+ isolate, "waitForDebugger",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::WaitForDebugger));
global->Set(isolate, "utils", utils);
}
@@ -272,6 +277,16 @@ class UtilsExtension : public InspectorIsolateData::SetupGlobalTask {
});
}
+ static void Stop(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ FATAL("Internal error: stop(session_id).");
+ }
+ int session_id = args[0].As<v8::Int32>()->Value();
+ RunSyncTask(backend_runner_, [&session_id](InspectorIsolateData* data) {
+ data->Stop(session_id);
+ });
+ }
+
static void SetLogConsoleApiMessageCalls(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsBoolean()) {
@@ -404,6 +419,19 @@ class UtilsExtension : public InspectorIsolateData::SetupGlobalTask {
backend_runner_->InterruptForMessages();
}
+ static void WaitForDebugger(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsInt32() || !args[1]->IsFunction()) {
+ FATAL("Internal error: waitForDebugger(context_group_id, callback).");
+ }
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ RunSimpleAsyncTask(
+ backend_runner_,
+ [context_group_id](InspectorIsolateData* data) {
+ data->WaitForDebugger(context_group_id);
+ },
+ args[1].As<v8::Function>());
+ }
+
static std::map<int, std::unique_ptr<FrontendChannelImpl>> channels_;
};
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index d20b996e60..764af63046 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -62,6 +62,7 @@ InspectorIsolateData::InspectorIsolateData(
&InspectorIsolateData::PromiseRejectHandler);
inspector_ = v8_inspector::V8Inspector::create(isolate_.get(), this);
}
+ v8::Isolate::Scope isolate_scope(isolate_.get());
v8::HandleScope handle_scope(isolate_.get());
not_inspectable_private_.Reset(
isolate_.get(),
@@ -163,9 +164,12 @@ int InspectorIsolateData::ConnectSession(
v8_inspector::V8Inspector::Channel* channel) {
v8::SealHandleScope seal_handle_scope(isolate());
int session_id = ++last_session_id_;
- sessions_[session_id] =
- inspector_->connect(context_group_id, channel, state,
- v8_inspector::V8Inspector::kFullyTrusted);
+ sessions_[session_id] = inspector_->connect(
+ context_group_id, channel, state,
+ v8_inspector::V8Inspector::kFullyTrusted,
+ waiting_for_debugger_
+ ? v8_inspector::V8Inspector::kWaitingForDebugger
+ : v8_inspector::V8Inspector::kNotWaitingForDebugger);
context_group_by_session_[sessions_[session_id].get()] = context_group_id;
return session_id;
}
@@ -197,6 +201,12 @@ void InspectorIsolateData::BreakProgram(
}
}
+void InspectorIsolateData::Stop(int session_id) {
+ v8::SealHandleScope seal_handle_scope(isolate());
+ auto it = sessions_.find(session_id);
+ if (it != sessions_.end()) it->second->stop();
+}
+
void InspectorIsolateData::SchedulePauseOnNextStatement(
int context_group_id, const v8_inspector::StringView& reason,
const v8_inspector::StringView& details) {
@@ -406,7 +416,7 @@ void InspectorIsolateData::SetCurrentTimeMS(double time) {
double InspectorIsolateData::currentTimeMS() {
if (current_time_set_) return current_time_;
- return V8::GetCurrentPlatform()->CurrentClockTimeMillis();
+ return V8::GetCurrentPlatform()->CurrentClockTimeMillisecondsHighResolution();
}
void InspectorIsolateData::SetMemoryInfo(v8::Local<v8::Value> memory_info) {
@@ -438,6 +448,10 @@ void InspectorIsolateData::runMessageLoopOnPause(int) {
task_runner_->RunMessageLoop(true);
}
+void InspectorIsolateData::runIfWaitingForDebugger(int) {
+ quitMessageLoopOnPause();
+}
+
void InspectorIsolateData::quitMessageLoopOnPause() {
v8::SealHandleScope seal_handle_scope(isolate());
task_runner_->QuitMessageLoop();
@@ -507,6 +521,13 @@ bool InspectorIsolateData::AssociateExceptionData(
this->isolate()->GetCurrentContext(), exception, key, value);
}
+void InspectorIsolateData::WaitForDebugger(int context_group_id) {
+ DCHECK(!waiting_for_debugger_);
+ waiting_for_debugger_ = true;
+ runMessageLoopOnPause(context_group_id);
+ waiting_for_debugger_ = false;
+}
+
namespace {
class StringBufferImpl : public v8_inspector::StringBuffer {
public:
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index b942d255de..7a953e6b3f 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -11,7 +11,9 @@
#include "include/v8-array-buffer.h"
#include "include/v8-inspector.h"
#include "include/v8-local-handle.h"
+#include "include/v8-locker.h"
#include "include/v8-script.h"
+#include "src/base/optional.h"
namespace v8 {
@@ -47,7 +49,9 @@ class InspectorIsolateData : public v8_inspector::V8InspectorClient {
~InspectorIsolateData() override {
// Enter the isolate before destructing this InspectorIsolateData, so that
// destructors that run before the Isolate's destructor still see it as
- // entered.
+ // entered. Use a v8::Locker, in case the thread destroying the isolate is
+ // not the last one that entered it.
+ locker_.emplace(isolate());
isolate()->Enter();
}
@@ -74,6 +78,7 @@ class InspectorIsolateData : public v8_inspector::V8InspectorClient {
void BreakProgram(int context_group_id,
const v8_inspector::StringView& reason,
const v8_inspector::StringView& details);
+ void Stop(int session_id);
void SchedulePauseOnNextStatement(int context_group_id,
const v8_inspector::StringView& reason,
const v8_inspector::StringView& details);
@@ -106,6 +111,7 @@ class InspectorIsolateData : public v8_inspector::V8InspectorClient {
bool AssociateExceptionData(v8::Local<v8::Value> exception,
v8::Local<v8::Name> key,
v8::Local<v8::Value> value);
+ void WaitForDebugger(int context_group_id);
private:
static v8::MaybeLocal<v8::Module> ModuleResolveCallback(
@@ -126,6 +132,7 @@ class InspectorIsolateData : public v8_inspector::V8InspectorClient {
v8::MaybeLocal<v8::Value> memoryInfo(v8::Isolate* isolate,
v8::Local<v8::Context>) override;
void runMessageLoopOnPause(int context_group_id) override;
+ void runIfWaitingForDebugger(int context_group_id) override;
void quitMessageLoopOnPause() override;
void installAdditionalCommandLineAPI(v8::Local<v8::Context>,
v8::Local<v8::Object>) override;
@@ -157,6 +164,9 @@ class InspectorIsolateData : public v8_inspector::V8InspectorClient {
SetupGlobalTasks setup_global_tasks_;
std::unique_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_;
std::unique_ptr<v8::Isolate, IsolateDeleter> isolate_;
+ // The locker_ field has to come after isolate_ because the locker has to
+ // outlive the isolate.
+ base::Optional<v8::Locker> locker_;
std::unique_ptr<v8_inspector::V8Inspector> inspector_;
int last_context_group_id_ = 0;
std::map<int, std::vector<v8::Global<v8::Context>>> contexts_;
@@ -169,6 +179,7 @@ class InspectorIsolateData : public v8_inspector::V8InspectorClient {
double current_time_ = 0.0;
bool log_console_api_message_calls_ = false;
bool log_max_async_call_stack_depth_changed_ = false;
+ bool waiting_for_debugger_ = false;
v8::Global<v8::Private> not_inspectable_private_;
v8::Global<v8::String> resource_name_prefix_;
v8::Global<v8::String> additional_console_api_;
diff --git a/deps/v8/test/inspector/private-class-member-inspector-test.js b/deps/v8/test/inspector/private-class-member-inspector-test.js
new file mode 100644
index 0000000000..6cdc988cd3
--- /dev/null
+++ b/deps/v8/test/inspector/private-class-member-inspector-test.js
@@ -0,0 +1,195 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+PrivateClassMemberInspectorTest = {};
+
+function getTestReceiver(type) {
+ return type === 'private-instance-member' ? 'obj' : 'Klass';
+}
+
+function getSetupScript({ type, testRuntime }) {
+ const pause = testRuntime ? '' : 'debugger;';
+ if (type === 'private-instance-member' || type === 'private-static-member') {
+ const isStatic = type === 'private-static-member';
+ const prefix = isStatic ? 'static' : '';
+ return `
+class Klass {
+ ${prefix} #field = "string";
+ ${prefix} get #getterOnly() { return "getterOnly"; }
+ ${prefix} set #setterOnly(val) { this.#field = "setterOnlyCalled"; }
+ ${prefix} get #accessor() { return this.#field }
+ ${prefix} set #accessor(val) { this.#field = val; }
+ ${prefix} #method() { return "method"; }
+}
+const obj = new Klass();
+${pause}`;
+ }
+
+ if (type !== 'private-conflicting-member') {
+ throw new Error('unknown test type');
+ }
+
+ return `
+class Klass {
+ #name = "string";
+}
+class ClassWithField extends Klass {
+ #name = "child";
+}
+class ClassWithMethod extends Klass {
+ #name() {}
+}
+class ClassWithAccessor extends Klass {
+ get #name() {}
+ set #name(val) {}
+}
+class StaticClass extends Klass {
+ static #name = "child";
+}
+${pause}`;
+}
+
+async function testAllPrivateMembers(type, runAndLog) {
+ const receiver = getTestReceiver(type);
+ InspectorTest.log('Checking private fields');
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`${receiver}.#field = 1`);
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`${receiver}.#field++`);
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`++${receiver}.#field`);
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`${receiver}.#field -= 3`);
+ await runAndLog(`${receiver}.#field`);
+
+ InspectorTest.log('Checking private getter-only accessors');
+ await runAndLog(`${receiver}.#getterOnly`);
+ await runAndLog(`${receiver}.#getterOnly = 1`);
+ await runAndLog(`${receiver}.#getterOnly++`);
+ await runAndLog(`${receiver}.#getterOnly -= 3`);
+ await runAndLog(`${receiver}.#getterOnly`);
+
+ InspectorTest.log('Checking private setter-only accessors');
+ await runAndLog(`${receiver}.#setterOnly`);
+ await runAndLog(`${receiver}.#setterOnly = 1`);
+ await runAndLog(`${receiver}.#setterOnly++`);
+ await runAndLog(`${receiver}.#setterOnly -= 3`);
+ await runAndLog(`${receiver}.#field`);
+
+ InspectorTest.log('Checking private accessors');
+ await runAndLog(`${receiver}.#accessor`);
+ await runAndLog(`${receiver}.#accessor = 1`);
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`${receiver}.#accessor++`);
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`++${receiver}.#accessor`);
+ await runAndLog(`${receiver}.#field`);
+ await runAndLog(`${receiver}.#accessor -= 3`);
+ await runAndLog(`${receiver}.#field`);
+
+ InspectorTest.log('Checking private methods');
+ await runAndLog(`${receiver}.#method`);
+ await runAndLog(`${receiver}.#method = 1`);
+ await runAndLog(`${receiver}.#method++`);
+ await runAndLog(`++${receiver}.#method`);
+ await runAndLog(`${receiver}.#method -= 3`);
+}
+
+async function testConflictingPrivateMembers(runAndLog) {
+ await runAndLog(`(new ClassWithField).#name`);
+ await runAndLog(`(new ClassWithMethod).#name`);
+ await runAndLog(`(new ClassWithAccessor).#name`);
+ await runAndLog(`StaticClass.#name`);
+ await runAndLog(`(new StaticClass).#name`);
+}
+
+async function runPrivateClassMemberTest(Protocol, { type, testRuntime }) {
+ let runAndLog;
+
+ if (testRuntime) {
+ runAndLog = async function runAndLog(expression) {
+ InspectorTest.log(`Runtime.evaluate: \`${expression}\``);
+ const { result: { result } } =
+ await Protocol.Runtime.evaluate({ expression, replMode: true });
+ InspectorTest.logMessage(result);
+ }
+ } else {
+ const { params: { callFrames } } = await Protocol.Debugger.oncePaused();
+ const frame = callFrames[0];
+
+ runAndLog = async function runAndLog(expression) {
+ InspectorTest.log(`Debugger.evaluateOnCallFrame: \`${expression}\``);
+ const { result: { result } } =
+ await Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: frame.callFrameId,
+ expression
+ });
+ InspectorTest.logMessage(result);
+ }
+ }
+
+ switch (type) {
+ case 'private-instance-member':
+ case 'private-static-member': {
+ await testAllPrivateMembers(type, runAndLog);
+ break;
+ }
+ case 'private-conflicting-member': {
+ await testConflictingPrivateMembers(runAndLog);
+ break;
+ }
+ default:
+ throw new Error('unknown test type');
+ }
+ await Protocol.Debugger.resume();
+}
+
+PrivateClassMemberInspectorTest.runTest = function (InspectorTest, options) {
+ const { contextGroup, Protocol } = InspectorTest.start(options.message);
+
+ if (options.testRuntime) {
+ Protocol.Runtime.enable();
+ } else {
+ Protocol.Debugger.enable();
+ }
+ const source = getSetupScript(options);
+ InspectorTest.log(source);
+ if (options.module) {
+ contextGroup.addModule(source, 'module');
+ } else {
+ contextGroup.addScript(source);
+ }
+
+ InspectorTest.runAsyncTestSuite([async function evaluatePrivateMembers() {
+ await runPrivateClassMemberTest(Protocol, options);
+ }]);
+}
+
+async function printPrivateMembers(Protocol, InspectorTest, options) {
+ let { result } = await Protocol.Runtime.getProperties(options);
+ InspectorTest.log('privateProperties from Runtime.getProperties()');
+ if (result.privateProperties === undefined) {
+ InspectorTest.logObject(result.privateProperties);
+ } else {
+ InspectorTest.logMessage(result.privateProperties);
+ }
+
+ // Can happen to accessorPropertiesOnly requests.
+ if (result.internalProperties === undefined) {
+ return;
+ }
+
+ InspectorTest.log('[[PrivateMethods]] in internalProperties from Runtime.getProperties()');
+ let privateMethods = result.internalProperties.find((i) => i.name === '[[PrivateMethods]]');
+ if (privateMethods === undefined) {
+ InspectorTest.logObject(privateMethods);
+ return;
+ }
+
+ InspectorTest.logMessage(privateMethods);
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: privateMethods.value.objectId
+ }));
+ InspectorTest.logMessage(result);
+}
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 0e430d8afc..a5e9505b82 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -36,7 +36,8 @@ InspectorTest.logMessage = function(originalMessage) {
const nonStableFields = new Set([
'objectId', 'scriptId', 'exceptionId', 'timestamp', 'executionContextId',
'callFrameId', 'breakpointId', 'bindRemoteObjectFunctionId',
- 'formatterObjectId', 'debuggerId', 'bodyGetterId', 'uniqueId'
+ 'formatterObjectId', 'debuggerId', 'bodyGetterId', 'uniqueId',
+ 'executionContextUniqueId'
]);
const message = JSON.parse(JSON.stringify(originalMessage, replacer.bind(null, Symbol(), nonStableFields)));
if (message.id)
@@ -142,6 +143,12 @@ InspectorTest.ContextGroup = class {
this.id = utils.createContextGroup();
}
+ waitForDebugger() {
+ return new Promise(resolve => {
+ utils.waitForDebugger(this.id, resolve);
+ });
+ }
+
createContext(name) {
utils.createContext(this.id, name || '');
}
@@ -258,6 +265,10 @@ InspectorTest.Session = class {
utils.sendMessageToBackend(this.id, command);
}
+ stop() {
+ utils.stop(this.id);
+ }
+
setupScriptMap() {
if (this._scriptMap)
return;
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1220203.js b/deps/v8/test/inspector/regress/regress-crbug-1220203.js
index dcaa5bfef0..90508e684e 100644
--- a/deps/v8/test/inspector/regress/regress-crbug-1220203.js
+++ b/deps/v8/test/inspector/regress/regress-crbug-1220203.js
@@ -32,9 +32,12 @@ InspectorTest.runAsyncTestSuite([
const {params: {callFrames, data}} = await pausedPromise;
InspectorTest.log(`${data.uncaught ? 'Uncaught' : 'Caught'} exception at`);
await session.logSourceLocation(callFrames[0].location);
+
+ await Protocol.Debugger.resume();
+ // Wait on this before the Promise.all to ensure we didn't break twice (crbug.com/1270780).
+ await evalPromise;
+
await Promise.all([
- Protocol.Debugger.resume(),
- evalPromise,
Protocol.Runtime.disable(),
Protocol.Debugger.disable(),
]);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1246896.js b/deps/v8/test/inspector/regress/regress-crbug-1246896.js
index 822a194ce1..6c70d44362 100644
--- a/deps/v8/test/inspector/regress/regress-crbug-1246896.js
+++ b/deps/v8/test/inspector/regress/regress-crbug-1246896.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-experimental-remove-internal-scopes-property
-
const {Protocol} = InspectorTest.start('Don\'t crash when getting the properties of a native function');
(async () => {
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1270780-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1270780-expected.txt
new file mode 100644
index 0000000000..da4e8a05a9
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1270780-expected.txt
@@ -0,0 +1,22 @@
+Regression test for crbug.com/1270780.
+
+Running test: testThrowingGenerator
+Not pausing for uncaught exception: SUCCESS
+Pause no. 1 for caught exception: SUCCESS
+
+Running test: testThrowingNext
+Not pausing for uncaught exception: SUCCESS
+Pause no. 1 for caught exception: SUCCESS
+
+Running test: testThrowingReturn
+Not pausing for uncaught exception: SUCCESS
+Pause no. 1 for caught exception: SUCCESS
+
+Running test: testThrowingLoopBody
+Not pausing for uncaught exception: SUCCESS
+Pause no. 1 for caught exception: SUCCESS
+
+Running test: testThrowingLoopBodyAndThrowingReturn
+Not pausing for uncaught exception: SUCCESS
+Pause no. 1 for caught exception: SUCCESS
+Pause no. 2 for caught exception: SUCCESS
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1270780.js b/deps/v8/test/inspector/regress/regress-crbug-1270780.js
new file mode 100644
index 0000000000..0be18f4230
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1270780.js
@@ -0,0 +1,107 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Regression test for crbug.com/1270780.');
+
+contextGroup.addScript(`
+function* gen() {
+ yield 1;
+ throw new Error();
+}
+
+function callGen() {
+ try {
+ for (const x of gen()) {}
+ } catch {}
+}
+
+const throwNextIter = () => ({
+ values: [1, 2],
+ next() {
+ // Throw an exception once 'values' is empty.
+ if (this.values.length === 0) {
+ throw new Error();
+ }
+ return {value: this.values.shift(), done: false};
+ },
+ [Symbol.iterator]() { return this; }
+});
+
+const throwReturnIter = () => ({
+ values: [1, 2],
+ next() {
+ if (this.values.length === 0) { return {done: true}};
+ return {value: this.values.shift(), done: false};
+ },
+ return() {
+ throw new Error();
+ },
+ [Symbol.iterator]() { return this; }
+});
+
+function iterateIterator(iter) {
+ try {
+ for (const x of iter) {}
+ } catch {}
+}
+
+function breakingLoopBody(iter) {
+ try {
+ for (const x of iter) {
+ break; // Triger '.return'.
+ }
+ } catch {}
+}
+
+function throwingLoopBody(iter) {
+ try {
+ for (const x of iter) {
+ throw new Error();
+ }
+ } catch {}
+}`);
+
+async function runTest(expression, expectedNumberOfPauses = 1) {
+ await Promise.all([
+ Protocol.Debugger.enable(),
+ Protocol.Debugger.setPauseOnExceptions({state: 'uncaught'}),
+ ]);
+
+ // Don't pause.
+ await Protocol.Runtime.evaluate({ expression, replMode: true });
+ InspectorTest.log('Not pausing for uncaught exception: SUCCESS');
+
+ // Run the same expression but with 'caught' and expect a pause.
+ await Protocol.Debugger.setPauseOnExceptions({ state: 'caught' });
+ const evalPromise = Protocol.Runtime.evaluate({ expression, replMode: true });
+
+ for (let x = 1; x <= expectedNumberOfPauses; ++x) {
+ await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`Pause no. ${x} for caught exception: SUCCESS`);
+ await Protocol.Debugger.resume();
+ }
+
+ await Promise.all([
+ Protocol.Debugger.disable(),
+ evalPromise,
+ ]);
+}
+
+InspectorTest.runAsyncTestSuite([
+ async function testThrowingGenerator() {
+ await runTest('callGen()');
+ },
+ async function testThrowingNext() {
+ await runTest('iterateIterator(throwNextIter())');
+ },
+ async function testThrowingReturn() {
+ await runTest('breakingLoopBody(throwReturnIter())');
+ },
+ async function testThrowingLoopBody() {
+ await runTest('throwingLoopBody([1, 2])');
+ },
+ async function testThrowingLoopBodyAndThrowingReturn() {
+ await runTest('throwingLoopBody(throwReturnIter())', 2);
+ },
+]);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1401674-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1401674-expected.txt
new file mode 100644
index 0000000000..fe03f46744
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1401674-expected.txt
@@ -0,0 +1,5 @@
+Regression test for crbug.com/1401674. Properly step through single statement loops.
+Expecting debugger to pause after the step ...
+SUCCESS
+Stepping to the same statement but in the next iteration ...
+SUCCESS
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1401674.js b/deps/v8/test/inspector/regress/regress-crbug-1401674.js
new file mode 100644
index 0000000000..986a837109
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1401674.js
@@ -0,0 +1,38 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {Protocol} = InspectorTest.start('Regression test for crbug.com/1401674. Properly step through single statement loops.');
+
+(async () => {
+ await Protocol.Debugger.enable();
+
+ Protocol.Runtime.evaluate({
+ expression: `
+ function f() {
+ let i = 0;
+ debugger;
+ while (true) {i++}
+ }
+
+ f();
+ `});
+
+ await Protocol.Debugger.oncePaused();
+
+ Protocol.Debugger.stepInto();
+ await Protocol.Debugger.oncePaused();
+
+ InspectorTest.log('Expecting debugger to pause after the step ...');
+ Protocol.Debugger.stepInto();
+ await Protocol.Debugger.oncePaused();
+
+ InspectorTest.log('SUCCESS');
+ InspectorTest.log('Stepping to the same statement but in the next iteration ...');
+
+ Protocol.Debugger.stepInto();
+ await Protocol.Debugger.oncePaused();
+
+ InspectorTest.log('SUCCESS');
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
index f98fc43bf9..e05a203c7d 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
@@ -208,11 +208,50 @@ Running test: testEvaluateOnExecutionContext
}
}
+Running test: testEvaluateOnUniqueExecutionContext
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 70
+ type : number
+ value : 70
+ }
+ }
+}
+
Running test: testPassingBothObjectIdAndExecutionContextId
{
error : {
- code : -32000
- message : ObjectId must not be specified together with executionContextId
+ code : -32602
+ message : ObjectId, executionContextId and uniqueContextId must mutually exclude each other
+ }
+ id : <messageId>
+}
+
+Running test: testPassingBothObjectIdAndExecutionContextUniqueId
+{
+ error : {
+ code : -32602
+ message : ObjectId, executionContextId and uniqueContextId must mutually exclude each other
+ }
+ id : <messageId>
+}
+
+Running test: testPassingTwoExecutionContextIds
+{
+ error : {
+ code : -32602
+ message : ObjectId, executionContextId and uniqueContextId must mutually exclude each other
+ }
+ id : <messageId>
+}
+
+Running test: testPassingNeitherContextIdNorObjectId
+{
+ error : {
+ code : -32602
+ message : Either objectId or executionContextId or uniqueContextId must be specified
}
id : <messageId>
}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async.js b/deps/v8/test/inspector/runtime/call-function-on-async.js
index 70f823c52c..18c60a288c 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async.js
+++ b/deps/v8/test/inspector/runtime/call-function-on-async.js
@@ -8,10 +8,12 @@ let callFunctionOn = Protocol.Runtime.callFunctionOn.bind(Protocol.Runtime);
let remoteObject1;
let remoteObject2;
let executionContextId;
+let executionContextUniqueId;
Protocol.Runtime.enable();
Protocol.Runtime.onExecutionContextCreated(messageObject => {
executionContextId = messageObject.params.context.id;
+ executionContextUniqueId = messageObject.params.context.uniqueId;
InspectorTest.runAsyncTestSuite(testSuite);
});
@@ -135,15 +137,52 @@ let testSuite = [
}));
},
+ async function testEvaluateOnUniqueExecutionContext() {
+ InspectorTest.logMessage(await callFunctionOn({
+ uniqueContextId: executionContextUniqueId,
+ functionDeclaration: '(function(arg) { return this.globalObjectProperty + arg; })',
+ arguments: prepareArguments([ 28 ]),
+ returnByValue: true,
+ generatePreview: false,
+ awaitPromise: false
+ }));
+ },
+
async function testPassingBothObjectIdAndExecutionContextId() {
InspectorTest.logMessage(await callFunctionOn({
executionContextId,
objectId: remoteObject1.objectId,
functionDeclaration: '(function() { return 42; })',
arguments: prepareArguments([]),
- returnByValue: true,
- generatePreview: false,
- awaitPromise: false
+ returnByValue: true
+ }));
+ },
+
+ async function testPassingBothObjectIdAndExecutionContextUniqueId() {
+ InspectorTest.logMessage(await callFunctionOn({
+ uniqueContextId: executionContextUniqueId,
+ objectId: remoteObject1.objectId,
+ functionDeclaration: '(function() { return 42; })',
+ arguments: prepareArguments([]),
+ returnByValue: true
+ }));
+ },
+
+ async function testPassingTwoExecutionContextIds() {
+ InspectorTest.logMessage(await callFunctionOn({
+ executionContextId,
+ uniqueContextId: executionContextUniqueId,
+ functionDeclaration: '(function() { return 42; })',
+ arguments: prepareArguments([]),
+ returnByValue: true
+ }));
+ },
+
+ async function testPassingNeitherContextIdNorObjectId() {
+ InspectorTest.logMessage(await callFunctionOn({
+ functionDeclaration: '(function() { return 42; })',
+ arguments: prepareArguments([]),
+ returnByValue: true
}));
},
diff --git a/deps/v8/test/inspector/runtime/console-spec-expected.txt b/deps/v8/test/inspector/runtime/console-spec-expected.txt
index 48cbc70fe2..a7146d5370 100644
--- a/deps/v8/test/inspector/runtime/console-spec-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-spec-expected.txt
@@ -18,7 +18,7 @@ Running test: prototypeChainMustBeCorrect
true
Running test: consoleToString
-[object Object]
+[object console]
Running test: consoleMethodPropertyDescriptor
{
diff --git a/deps/v8/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt b/deps/v8/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt
index 9a5e1708c1..381cf88c2e 100644
--- a/deps/v8/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt
+++ b/deps/v8/test/inspector/runtime/context-destroyed-on-context-collected-expected.txt
@@ -3,5 +3,6 @@ Tests that contextDesrtoyed nofitication is fired when context is collected.
method : Runtime.executionContextDestroyed
params : {
executionContextId : <executionContextId>
+ executionContextUniqueId : <executionContextUniqueId>
}
}
diff --git a/deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict-expected.txt b/deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict-expected.txt
new file mode 100644
index 0000000000..5b92d1c6b4
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict-expected.txt
@@ -0,0 +1,55 @@
+Evaluate conflicting private class member out of class scope in Runtime.evaluate()
+
+class Klass {
+ #name = "string";
+}
+class ClassWithField extends Klass {
+ #name = "child";
+}
+class ClassWithMethod extends Klass {
+ #name() {}
+}
+class ClassWithAccessor extends Klass {
+ get #name() {}
+ set #name(val) {}
+}
+class StaticClass extends Klass {
+ static #name = "child";
+}
+
+
+Running test: evaluatePrivateMembers
+Runtime.evaluate: `(new ClassWithField).#name`
+{
+ className : Error
+ description : Error: Operation is ambiguous because there are more than one private name'#name' on the object at <anonymous>:1:2
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `(new ClassWithMethod).#name`
+{
+ className : Error
+ description : Error: Operation is ambiguous because there are more than one private name'#name' on the object at <anonymous>:1:2
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `(new ClassWithAccessor).#name`
+{
+ className : Error
+ description : Error: Operation is ambiguous because there are more than one private name'#name' on the object at <anonymous>:1:2
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `StaticClass.#name`
+{
+ type : string
+ value : child
+}
+Runtime.evaluate: `(new StaticClass).#name`
+{
+ type : string
+ value : string
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict.js b/deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict.js
new file mode 100644
index 0000000000..95cc6237c0
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-private-class-member-conflict.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
+const options = {
+ type: 'private-conflicting-member',
+ testRuntime: true,
+ message: `Evaluate conflicting private class member out of class scope in Runtime.evaluate()`
+};
+PrivateClassMemberInspectorTest.runTest(InspectorTest, options);
diff --git a/deps/v8/test/inspector/runtime/evaluate-private-class-member-expected.txt b/deps/v8/test/inspector/runtime/evaluate-private-class-member-expected.txt
new file mode 100644
index 0000000000..9a81f57cf7
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-private-class-member-expected.txt
@@ -0,0 +1,231 @@
+Evaluate private class member out of class scope in Runtime.evaluate()
+
+class Klass {
+ #field = "string";
+ get #getterOnly() { return "getterOnly"; }
+ set #setterOnly(val) { this.#field = "setterOnlyCalled"; }
+ get #accessor() { return this.#field }
+ set #accessor(val) { this.#field = val; }
+ #method() { return "method"; }
+}
+const obj = new Klass();
+
+
+Running test: evaluatePrivateMembers
+Checking private fields
+Runtime.evaluate: `obj.#field`
+{
+ type : string
+ value : string
+}
+Runtime.evaluate: `obj.#field = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#field++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Runtime.evaluate: `++obj.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Runtime.evaluate: `obj.#field -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private getter-only accessors
+Runtime.evaluate: `obj.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Runtime.evaluate: `obj.#getterOnly = 1`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at <anonymous>:1:17
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#getterOnly++`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at <anonymous>:1:16
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#getterOnly -= 3`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at <anonymous>:1:17
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Checking private setter-only accessors
+Runtime.evaluate: `obj.#setterOnly`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#setterOnly = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#setterOnly++`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#setterOnly -= 3`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#field`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Checking private accessors
+Runtime.evaluate: `obj.#accessor`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Runtime.evaluate: `obj.#accessor = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#accessor++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Runtime.evaluate: `++obj.#accessor`
+{
+ type : undefined
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Runtime.evaluate: `obj.#accessor -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Runtime.evaluate: `obj.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private methods
+Runtime.evaluate: `obj.#method`
+{
+ className : Function
+ description : #method() { return "method"; }
+ objectId : <objectId>
+ type : function
+}
+Runtime.evaluate: `obj.#method = 1`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:13
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#method++`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:12
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `++obj.#method`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:7
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `obj.#method -= 3`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:13
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-private-class-member-static-expected.txt b/deps/v8/test/inspector/runtime/evaluate-private-class-member-static-expected.txt
new file mode 100644
index 0000000000..d52f4960fd
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-private-class-member-static-expected.txt
@@ -0,0 +1,231 @@
+Evaluate static private class member out of class scope in Runtime.evaluate()
+
+class Klass {
+ static #field = "string";
+ static get #getterOnly() { return "getterOnly"; }
+ static set #setterOnly(val) { this.#field = "setterOnlyCalled"; }
+ static get #accessor() { return this.#field }
+ static set #accessor(val) { this.#field = val; }
+ static #method() { return "method"; }
+}
+const obj = new Klass();
+
+
+Running test: evaluatePrivateMembers
+Checking private fields
+Runtime.evaluate: `Klass.#field`
+{
+ type : string
+ value : string
+}
+Runtime.evaluate: `Klass.#field = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#field++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Runtime.evaluate: `++Klass.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Runtime.evaluate: `Klass.#field -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private getter-only accessors
+Runtime.evaluate: `Klass.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Runtime.evaluate: `Klass.#getterOnly = 1`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at <anonymous>:1:19
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#getterOnly++`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at <anonymous>:1:18
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#getterOnly -= 3`
+{
+ className : Error
+ description : Error: '#getterOnly' was defined without a setter at <anonymous>:1:19
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#getterOnly`
+{
+ type : string
+ value : getterOnly
+}
+Checking private setter-only accessors
+Runtime.evaluate: `Klass.#setterOnly`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#setterOnly = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#setterOnly++`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#setterOnly -= 3`
+{
+ className : Error
+ description : Error: '#setterOnly' was defined without a getter at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#field`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Checking private accessors
+Runtime.evaluate: `Klass.#accessor`
+{
+ type : string
+ value : setterOnlyCalled
+}
+Runtime.evaluate: `Klass.#accessor = 1`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#accessor++`
+{
+ description : 1
+ type : number
+ value : 1
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 2
+ type : number
+ value : 2
+}
+Runtime.evaluate: `++Klass.#accessor`
+{
+ type : undefined
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 3
+ type : number
+ value : 3
+}
+Runtime.evaluate: `Klass.#accessor -= 3`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Runtime.evaluate: `Klass.#field`
+{
+ description : 0
+ type : number
+ value : 0
+}
+Checking private methods
+Runtime.evaluate: `Klass.#method`
+{
+ className : Function
+ description : #method() { return "method"; }
+ objectId : <objectId>
+ type : function
+}
+Runtime.evaluate: `Klass.#method = 1`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:15
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#method++`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:14
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `++Klass.#method`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:9
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+Runtime.evaluate: `Klass.#method -= 3`
+{
+ className : Error
+ description : Error: Private method '#method' is not writable at <anonymous>:1:15
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-private-class-member-static.js b/deps/v8/test/inspector/runtime/evaluate-private-class-member-static.js
new file mode 100644
index 0000000000..1a8b5ed141
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-private-class-member-static.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
+const options = {
+ type: 'private-static-member',
+ testRuntime: true,
+ message: `Evaluate static private class member out of class scope in Runtime.evaluate()`
+};
+PrivateClassMemberInspectorTest.runTest(InspectorTest, options);
diff --git a/deps/v8/test/inspector/runtime/evaluate-private-class-member.js b/deps/v8/test/inspector/runtime/evaluate-private-class-member.js
new file mode 100644
index 0000000000..dd22e790fb
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-private-class-member.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/private-class-member-inspector-test.js');
+
+const options = {
+ type: 'private-instance-member',
+ testRuntime: true,
+ message: `Evaluate private class member out of class scope in Runtime.evaluate()`
+};
+PrivateClassMemberInspectorTest.runTest(InspectorTest, options);
diff --git a/deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions-expected.txt b/deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions-expected.txt
new file mode 100644
index 0000000000..50fffb0575
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions-expected.txt
@@ -0,0 +1,24 @@
+Check that throwing breakpoint conditions report exceptions via Runtime.exceptionThrown.
+
+Running test: setUp
+
+Running test: testSyntaxError
+Exception thrown: SyntaxError: Unexpected end of input
+ at smallFnWithLogpoint (test.js:3:17)
+ at <anonymous>:1:1
+
+Running test: testRepeatedErrorsOnlyCauseOneEvent
+Exception thrown: SyntaxError: Unexpected end of input
+ at smallFnWithLogpoint (test.js:3:17)
+ at <anonymous>:1:29
+
+Running test: testSporadicThrowing
+Exception thrown: ReferenceError: y is not defined
+ at eval (eval at smallFnWithLogpoint (test.js:3:17), <anonymous>:1:1)
+ at smallFnWithLogpoint (test.js:3:17)
+ at <anonymous>:1:1
+Paused on conditional logpoint
+Exception thrown: ReferenceError: y is not defined
+ at eval (eval at smallFnWithLogpoint (test.js:3:17), <anonymous>:1:1)
+ at smallFnWithLogpoint (test.js:3:17)
+ at <anonymous>:1:22
diff --git a/deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions.js b/deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions.js
new file mode 100644
index 0000000000..e569eea8a0
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/exception-thrown-breakpoint-conditions.js
@@ -0,0 +1,71 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { session, contextGroup, Protocol } = InspectorTest.start("Check that throwing breakpoint conditions report exceptions via Runtime.exceptionThrown.");
+
+contextGroup.addScript(`
+function smallFnWithLogpoint(x) {
+ return x + 42;
+}
+`, 0, 0, 'test.js');
+
+Protocol.Runtime.onExceptionThrown(({ params: { exceptionDetails } }) => {
+ const { description } = exceptionDetails.exception;
+ InspectorTest.log(`Exception thrown: ${description}`);
+});
+
+async function testSyntaxError() {
+ const { result: { breakpointId } } = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 3,
+ url: 'test.js',
+ condition: 'x ===',
+ });
+ await Protocol.Runtime.evaluate({ expression: 'smallFnWithLogpoint(5)' });
+ await Protocol.Debugger.removeBreakpoint({ breakpointId });
+}
+
+async function testRepeatedErrorsOnlyCauseOneEvent() {
+ const { result: { breakpointId } } = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 3,
+ url: 'test.js',
+ condition: 'x ===',
+ });
+ await Protocol.Runtime.evaluate({
+ expression: 'for (let i = 0; i < 5; ++i) smallFnWithLogpoint(5);' });
+ await Protocol.Debugger.removeBreakpoint({ breakpointId });
+}
+
+async function testSporadicThrowing() {
+ // Tests that a breakpoint condition going from throwing -> succeeding -> throwing
+ // logs two events.
+ const { result: { breakpointId } } = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 3,
+ url: 'test.js',
+ condition: 'y === 42',
+ });
+ // Causes a reference error as `y` is not defined.
+ await Protocol.Runtime.evaluate({ expression: 'smallFnWithLogpoint(5)' });
+
+ // Introduce y and trigger breakpoint again.
+ const evalPromise = Protocol.Runtime.evaluate({ expression: 'globalThis.y = 42; smallFnWithLogpoint(5)' });
+ await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused on conditional logpoint');
+ await Promise.all([Protocol.Debugger.resume(), evalPromise]);
+
+ // Delete 'y' again, trigger breakpoint and expect an exception event.
+ await Protocol.Runtime.evaluate({ expression: 'delete globalThis.y; smallFnWithLogpoint(5)' });
+
+ await Protocol.Debugger.removeBreakpoint({ breakpointId });
+}
+
+InspectorTest.runAsyncTestSuite([
+ async function setUp() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.enable();
+ await Protocol.Debugger.onceScriptParsed();
+ },
+ testSyntaxError,
+ testRepeatedErrorsOnlyCauseOneEvent,
+ testSporadicThrowing,
+]);
diff --git a/deps/v8/test/inspector/runtime/function-scopes.js b/deps/v8/test/inspector/runtime/function-scopes.js
index c382ccda47..bda069bd9a 100644
--- a/deps/v8/test/inspector/runtime/function-scopes.js
+++ b/deps/v8/test/inspector/runtime/function-scopes.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-experimental-remove-internal-scopes-property
-
let {session, contextGroup, Protocol} = InspectorTest.start('Checks [[Scopes]] for functions');
contextGroup.addScript(`
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 2e32e48116..a200aab34f 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -111,6 +111,9 @@ Running test: testArrayBuffer
Symbol(Symbol.toStringTag) own string ArrayBuffer
byteLength own no value, getter
constructor own function undefined
+ maxByteLength own no value, getter
+ resizable own no value, getter
+ resize own function undefined
slice own function undefined
Internal properties
[[Prototype]] object undefined
@@ -156,6 +159,9 @@ Running test: testArrayBufferFromWebAssemblyMemory
Symbol(Symbol.toStringTag) own string ArrayBuffer
byteLength own no value, getter
constructor own function undefined
+ maxByteLength own no value, getter
+ resizable own no value, getter
+ resize own function undefined
slice own function undefined
Internal properties
[[Prototype]] object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
index a0437f4af6..dbb506dcd3 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
@@ -5,10 +5,10 @@ Testing regular Proxy
result : {
result : {
className : Object
- description : Proxy
+ description : Proxy(Object)
objectId : <objectId>
preview : {
- description : Proxy
+ description : Proxy(Object)
overflow : false
properties : [
[0] : {
@@ -65,10 +65,10 @@ Testing revocable Proxy
result : {
result : {
className : Object
- description : Proxy
+ description : Proxy(Object)
objectId : <objectId>
preview : {
- description : Proxy
+ description : Proxy(Object)
overflow : false
properties : [
[0] : {
diff --git a/deps/v8/test/inspector/runtime/internal-properties-prototype-chain-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-prototype-chain-expected.txt
new file mode 100644
index 0000000000..17a026ef61
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/internal-properties-prototype-chain-expected.txt
@@ -0,0 +1,61 @@
+Checks that only one of JSGlobalObject/JSGlobalProxy shows up in the prototype chain
+Prototype chain for "globalThis":
+{
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+}
+{
+ name : [[Prototype]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+}
+{
+ name : [[Prototype]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+}
+
+Prototype chain for "var weird = {}; weird.__proto__ = globalThis; weird;":
+{
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+}
+{
+ name : [[Prototype]]
+ value : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+}
+{
+ name : [[Prototype]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+}
+{
+ name : [[Prototype]]
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/internal-properties-prototype-chain.js b/deps/v8/test/inspector/runtime/internal-properties-prototype-chain.js
new file mode 100644
index 0000000000..ce48727437
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/internal-properties-prototype-chain.js
@@ -0,0 +1,39 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start('Checks that only one of JSGlobalObject/JSGlobalProxy shows up in the prototype chain');
+
+function findPrototypeObjectId(response) {
+ const { result: { internalProperties } } = response;
+ for (const prop of internalProperties || []) {
+ if (prop.name === '[[Prototype]]') {
+ return prop;
+ }
+ }
+}
+
+async function logPrototypeChain(objectId) {
+ while (true) {
+ const response = await Protocol.Runtime.getProperties({ objectId });
+ const prototype = findPrototypeObjectId(response);
+ if (!prototype) break;
+
+ InspectorTest.logMessage(prototype);
+ objectId = prototype.value.objectId;
+ }
+}
+
+(async () => {
+ InspectorTest.log('Prototype chain for "globalThis":');
+ const { result: { result } } = await Protocol.Runtime.evaluate({ expression: 'globalThis' });
+ InspectorTest.logMessage(result);
+ await logPrototypeChain(result.objectId);
+
+ InspectorTest.log('\nPrototype chain for "var weird = {}; weird.__proto__ = globalThis; weird;":')
+ const { result: { result: result2 } } = await Protocol.Runtime.evaluate({ expression: 'var weird = {}; weird.__proto__ = globalThis; weird;' });
+ InspectorTest.logMessage(result2);
+ await logPrototypeChain(result2.objectId);
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/internal-properties.js b/deps/v8/test/inspector/runtime/internal-properties.js
index 3e3ce4a197..b4b0bc47fb 100644
--- a/deps/v8/test/inspector/runtime/internal-properties.js
+++ b/deps/v8/test/inspector/runtime/internal-properties.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-experimental-remove-internal-scopes-property
-
let {session, contextGroup, Protocol} = InspectorTest.start('Checks internal properties in Runtime.getProperties output');
contextGroup.addScript(`
diff --git a/deps/v8/test/inspector/runtime/remote-object-expected.txt b/deps/v8/test/inspector/runtime/remote-object-expected.txt
index 14f584831e..765f8e26ef 100644
--- a/deps/v8/test/inspector/runtime/remote-object-expected.txt
+++ b/deps/v8/test/inspector/runtime/remote-object-expected.txt
@@ -484,6 +484,16 @@ Running test: testRegExp
type : object
}
}
+'/w+/v', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/v
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
'/w+/dgimsuy', returnByValue: false, generatePreview: false
{
result : {
@@ -494,6 +504,16 @@ Running test: testRegExp
type : object
}
}
+'/w+/dgimsvy', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/dgimsvy
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
'new RegExp('\w+', 'g')', returnByValue: false, generatePreview: false
{
result : {
@@ -526,6 +546,18 @@ Running test: testRegExp
type : object
}
}
+'var re = /./dgimsvy;
+ re.toString = () => 'foo';
+ re', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /./dgimsvy
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
'var re = new RegExp('\w+', 'g');
re.prop = 32;
re', returnByValue: false, generatePreview: true
@@ -1598,7 +1630,7 @@ Running test: testProxy
{
result : {
className : Object
- description : Proxy
+ description : Proxy(Object)
objectId : <objectId>
subtype : proxy
type : object
@@ -1608,7 +1640,7 @@ Running test: testProxy
{
result : {
className : Object
- description : Proxy
+ description : Proxy(Error)
objectId : <objectId>
subtype : proxy
type : object
@@ -1626,10 +1658,10 @@ Running test: testProxy
{
result : {
className : Object
- description : Proxy
+ description : Proxy(Object)
objectId : <objectId>
preview : {
- description : Proxy
+ description : Proxy(Object)
overflow : false
properties : [
[0] : {
@@ -1659,11 +1691,57 @@ Running test: testProxy
name : e
subtype : proxy
type : object
- value : Proxy
+ value : Proxy(Object)
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+}
+'new Proxy([1, 2], {})', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Proxy(Array)
+ objectId : <objectId>
+ preview : {
+ description : Proxy(Array)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 1
}
+ [1] : {
+ name : 1
+ type : number
+ value : 2
+ }
+ ]
+ subtype : proxy
+ type : object
+ }
+ subtype : proxy
+ type : object
+ }
+}
+'revocable = Proxy.revocable({}, {}); revocable.revoke(); revocable.proxy', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ preview : {
+ description : Proxy
+ overflow : false
+ properties : [
]
+ subtype : proxy
type : object
}
+ subtype : proxy
type : object
}
}
@@ -2405,6 +2483,16 @@ Running test: testArrayBuffer
type : number
value : 0
}
+ [1] : {
+ name : maxByteLength
+ type : number
+ value : 0
+ }
+ [2] : {
+ name : resizable
+ type : boolean
+ value : false
+ }
]
subtype : arraybuffer
type : object
@@ -2428,6 +2516,16 @@ Running test: testArrayBuffer
type : number
value : 400
}
+ [1] : {
+ name : maxByteLength
+ type : number
+ value : 400
+ }
+ [2] : {
+ name : resizable
+ type : boolean
+ value : false
+ }
]
subtype : arraybuffer
type : object
diff --git a/deps/v8/test/inspector/runtime/remote-object.js b/deps/v8/test/inspector/runtime/remote-object.js
index 9eb38f8a52..ed4e764e43 100644
--- a/deps/v8/test/inspector/runtime/remote-object.js
+++ b/deps/v8/test/inspector/runtime/remote-object.js
@@ -235,9 +235,15 @@ InspectorTest.runAsyncTestSuite([
expression: '/\w+/y'
})).result);
InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/v'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
expression: '/\w+/dgimsuy'
})).result);
InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/dgimsvy'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
expression: `new RegExp('\\w+', 'g')`,
})).result);
InspectorTest.logMessage((await evaluate({
@@ -249,6 +255,11 @@ InspectorTest.runAsyncTestSuite([
re`
})).result);
InspectorTest.logMessage((await evaluate({
+ expression: `var re = /./dgimsvy;
+ re.toString = () => 'foo';
+ re`
+ })).result);
+ InspectorTest.logMessage((await evaluate({
expression: `var re = new RegExp('\\w+', 'g');
re.prop = 32;
re`,
@@ -490,6 +501,14 @@ InspectorTest.runAsyncTestSuite([
expression: '({e: new Proxy({a: 1}, {b: 2})})',
generatePreview: true
})).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Proxy([1, 2], {})',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'revocable = Proxy.revocable({}, {}); revocable.revoke(); revocable.proxy',
+ generatePreview: true
+ })).result);
},
async function testPromise() {
InspectorTest.logMessage((await evaluate({
diff --git a/deps/v8/test/inspector/runtime/run-if-waiting-for-debugger-expected.txt b/deps/v8/test/inspector/runtime/run-if-waiting-for-debugger-expected.txt
new file mode 100644
index 0000000000..e90b003193
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/run-if-waiting-for-debugger-expected.txt
@@ -0,0 +1,12 @@
+
+Running test: testTwoSessions
+Tests Runtime.runIfWaitingForDebugger
+session 1 resumed
+session 2 resumed
+execution resumed
+
+Running test: testSessionDisconnect
+Tests Runtime.runIfWaitingForDebugger
+session 1 resumed
+session 2 disconnected
+execution resumed
diff --git a/deps/v8/test/inspector/runtime/run-if-waiting-for-debugger.js b/deps/v8/test/inspector/runtime/run-if-waiting-for-debugger.js
new file mode 100644
index 0000000000..db3036e244
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/run-if-waiting-for-debugger.js
@@ -0,0 +1,35 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.runAsyncTestSuite([
+ async function testTwoSessions() {
+ InspectorTest.log('Tests Runtime.runIfWaitingForDebugger');
+
+ const contextGroup = new InspectorTest.ContextGroup();
+ const resumed = contextGroup.waitForDebugger().then(() => InspectorTest.log('execution resumed'));
+
+ const session1 = contextGroup.connect();
+ const session2 = contextGroup.connect();
+ await session1.Protocol.Runtime.runIfWaitingForDebugger();
+ InspectorTest.log('session 1 resumed');
+ await session2.Protocol.Runtime.runIfWaitingForDebugger();
+ InspectorTest.log('session 2 resumed');
+ await resumed;
+ },
+
+ async function testSessionDisconnect() {
+ InspectorTest.log('Tests Runtime.runIfWaitingForDebugger');
+
+ const contextGroup = new InspectorTest.ContextGroup();
+ const resumed = contextGroup.waitForDebugger().then(() => InspectorTest.log('execution resumed'));
+
+ const session1 = contextGroup.connect();
+ const session2 = contextGroup.connect();
+ await session1.Protocol.Runtime.runIfWaitingForDebugger();
+ InspectorTest.log('session 1 resumed');
+ session2.disconnect();
+ InspectorTest.log('session 2 disconnected');
+ await resumed;
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/terminate-execution-expected.txt b/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
index 614dc6da1e..ccd85fad3b 100644
--- a/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
+++ b/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
@@ -70,4 +70,4 @@ Terminate execution with pending microtasks
result : {
}
}
-
+Terminate execution does not crash on destroy
diff --git a/deps/v8/test/inspector/runtime/terminate-execution.js b/deps/v8/test/inspector/runtime/terminate-execution.js
index 8af28e4787..3fd6b60242 100644
--- a/deps/v8/test/inspector/runtime/terminate-execution.js
+++ b/deps/v8/test/inspector/runtime/terminate-execution.js
@@ -66,7 +66,19 @@ let {session, contextGroup, Protocol} =
await paused2;
Protocol.Runtime.terminateExecution().then(InspectorTest.logMessage);
await Protocol.Debugger.resume();
-
await Protocol.Runtime.disable();
+
+ InspectorTest.log('Terminate execution does not crash on destroy');
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({
+ expression: `
+ while(true) {
+ let p = new Promise(resolve => setTimeout(resolve, 0));
+ await p;
+ }`
+ });
+ Protocol.Runtime.terminateExecution();
+ await Protocol.Debugger.disable();
+
InspectorTest.completeTest();
})();
diff --git a/deps/v8/test/inspector/sessions/create-session-expected.txt b/deps/v8/test/inspector/sessions/create-session-expected.txt
index 4459f4d19c..b8f9c5f828 100644
--- a/deps/v8/test/inspector/sessions/create-session-expected.txt
+++ b/deps/v8/test/inspector/sessions/create-session-expected.txt
@@ -70,6 +70,7 @@ From session 2
method : Runtime.executionContextDestroyed
params : {
executionContextId : <executionContextId>
+ executionContextUniqueId : <executionContextUniqueId>
}
}
id matching: true
@@ -78,6 +79,7 @@ From session 1
method : Runtime.executionContextDestroyed
params : {
executionContextId : <executionContextId>
+ executionContextUniqueId : <executionContextUniqueId>
}
}
id matching: true
@@ -86,6 +88,7 @@ From session 3
method : Runtime.executionContextDestroyed
params : {
executionContextId : <executionContextId>
+ executionContextUniqueId : <executionContextUniqueId>
}
}
id matching: true
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index b47b44e859..ce4c25cb05 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -68,7 +68,8 @@ void TaskRunner::Run() {
void TaskRunner::RunMessageLoop(bool only_protocol) {
int loop_number = ++nested_loop_count_;
- while (nested_loop_count_ == loop_number && !is_terminated_) {
+ while (nested_loop_count_ == loop_number && !is_terminated_ &&
+ !isolate()->IsExecutionTerminating()) {
std::unique_ptr<TaskRunner::Task> task = GetNext(only_protocol);
if (!task) return;
v8::Isolate::Scope isolate_scope(isolate());
@@ -88,7 +89,8 @@ void TaskRunner::RunMessageLoop(bool only_protocol) {
// This can be removed once https://crbug.com/v8/10747 is fixed.
// TODO(10748): Enable --stress-incremental-marking after the existing
// tests are fixed.
- if (!i::v8_flags.stress_incremental_marking) {
+ if (!i::v8_flags.stress_incremental_marking &&
+ !isolate()->IsExecutionTerminating()) {
while (v8::platform::PumpMessageLoop(
v8::internal::V8::GetCurrentPlatform(), isolate(),
isolate()->HasPendingBackgroundTasks()
diff --git a/deps/v8/test/inspector/tasks.cc b/deps/v8/test/inspector/tasks.cc
index 9a01555b2f..669a1e100c 100644
--- a/deps/v8/test/inspector/tasks.cc
+++ b/deps/v8/test/inspector/tasks.cc
@@ -14,6 +14,90 @@
namespace v8 {
namespace internal {
+void RunSyncTask(TaskRunner* task_runner,
+ std::function<void(InspectorIsolateData*)> callback) {
+ class SyncTask : public TaskRunner::Task {
+ public:
+ SyncTask(v8::base::Semaphore* ready_semaphore,
+ std::function<void(InspectorIsolateData*)> callback)
+ : ready_semaphore_(ready_semaphore), callback_(callback) {}
+ ~SyncTask() override = default;
+ bool is_priority_task() final { return true; }
+
+ private:
+ void Run(InspectorIsolateData* data) override {
+ callback_(data);
+ if (ready_semaphore_) ready_semaphore_->Signal();
+ }
+
+ v8::base::Semaphore* ready_semaphore_;
+ std::function<void(InspectorIsolateData*)> callback_;
+ };
+
+ v8::base::Semaphore ready_semaphore(0);
+ task_runner->Append(std::make_unique<SyncTask>(&ready_semaphore, callback));
+ ready_semaphore.Wait();
+}
+
+void RunSimpleAsyncTask(TaskRunner* task_runner,
+ std::function<void(InspectorIsolateData* data)> task,
+ v8::Local<v8::Function> callback) {
+ class DispatchResponseTask : public TaskRunner::Task {
+ public:
+ explicit DispatchResponseTask(v8::Local<v8::Function> callback)
+ : context_(callback->GetIsolate(),
+ callback->GetIsolate()->GetCurrentContext()),
+ client_callback_(callback->GetIsolate(), callback) {}
+ ~DispatchResponseTask() override = default;
+
+ private:
+ bool is_priority_task() final { return true; }
+ void Run(InspectorIsolateData* data) override {
+ v8::HandleScope handle_scope(data->isolate());
+ v8::Local<v8::Context> context = context_.Get(data->isolate());
+ v8::MicrotasksScope microtasks_scope(context,
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::Context::Scope context_scope(context);
+ USE(client_callback_.Get(data->isolate())
+ ->Call(context, context->Global(), 0, nullptr));
+ }
+ v8::Global<v8::Context> context_;
+ v8::Global<v8::Function> client_callback_;
+ };
+
+ using TaskCallback = std::function<void(InspectorIsolateData * data)>;
+
+ class TaskWrapper : public TaskRunner::Task {
+ public:
+ TaskWrapper(TaskCallback task, TaskRunner* client_task_runner,
+ std::unique_ptr<TaskRunner::Task> response_task)
+ : task_(std::move(task)),
+ client_task_runner_(client_task_runner),
+ response_task_(std::move(response_task)) {}
+
+ ~TaskWrapper() override = default;
+
+ private:
+ bool is_priority_task() final { return true; }
+ void Run(InspectorIsolateData* data) override {
+ task_(data);
+ client_task_runner_->Append(std::move(response_task_));
+ }
+
+ TaskCallback task_;
+ TaskRunner* client_task_runner_;
+ std::unique_ptr<TaskRunner::Task> response_task_;
+ };
+
+ v8::Local<v8::Context> context = callback->GetIsolate()->GetCurrentContext();
+ TaskRunner* response_task_runner =
+ InspectorIsolateData::FromContext(context)->task_runner();
+
+ auto response_task = std::make_unique<DispatchResponseTask>(callback);
+ task_runner->Append(std::make_unique<TaskWrapper>(
+ std::move(task), response_task_runner, std::move(response_task)));
+}
+
void ExecuteStringTask::Run(InspectorIsolateData* data) {
v8::HandleScope handle_scope(data->isolate());
v8::Local<v8::Context> context = data->GetDefaultContext(context_group_id_);
diff --git a/deps/v8/test/inspector/tasks.h b/deps/v8/test/inspector/tasks.h
index e178c93766..9b909eb12c 100644
--- a/deps/v8/test/inspector/tasks.h
+++ b/deps/v8/test/inspector/tasks.h
@@ -20,29 +20,11 @@
namespace v8 {
namespace internal {
-template <typename T>
-void RunSyncTask(TaskRunner* task_runner, T callback) {
- class SyncTask : public TaskRunner::Task {
- public:
- SyncTask(v8::base::Semaphore* ready_semaphore, T callback)
- : ready_semaphore_(ready_semaphore), callback_(callback) {}
- ~SyncTask() override = default;
- bool is_priority_task() final { return true; }
-
- private:
- void Run(InspectorIsolateData* data) override {
- callback_(data);
- if (ready_semaphore_) ready_semaphore_->Signal();
- }
-
- v8::base::Semaphore* ready_semaphore_;
- T callback_;
- };
-
- v8::base::Semaphore ready_semaphore(0);
- task_runner->Append(std::make_unique<SyncTask>(&ready_semaphore, callback));
- ready_semaphore.Wait();
-}
+void RunSyncTask(TaskRunner* task_runner,
+ std::function<void(InspectorIsolateData*)> callback);
+void RunSimpleAsyncTask(TaskRunner* task_runner,
+ std::function<void(InspectorIsolateData* data)> task,
+ v8::Local<v8::Function> callback);
class SendMessageToBackendTask : public TaskRunner::Task {
public:
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index f7325aa23b..4419169e5c 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -11,6 +11,7 @@ from testrunner.outproc import base as outproc
PROTOCOL_TEST_JS = "protocol-test.js"
WASM_INSPECTOR_JS = "wasm-inspector-test.js"
+PRIVATE_MEMBER_TEST_JS = "private-class-member-inspector-test.js"
EXPECTED_SUFFIX = "-expected.txt"
RESOURCES_FOLDER = "resources"
@@ -18,7 +19,7 @@ RESOURCES_FOLDER = "resources"
class TestLoader(testsuite.JSTestLoader):
@property
def excluded_files(self):
- return {PROTOCOL_TEST_JS, WASM_INSPECTOR_JS}
+ return {PROTOCOL_TEST_JS, WASM_INSPECTOR_JS, PRIVATE_MEMBER_TEST_JS}
@property
def excluded_dirs(self):
@@ -54,8 +55,9 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'inspector-test'
- def _get_resources(self):
- return [
+ def get_android_resources(self):
+ super_resources = super().get_android_resources()
+ return super_resources + [
os.path.join('test', 'inspector', 'debugger', 'resources',
'break-locations.js'),
os.path.join('test', 'inspector', WASM_INSPECTOR_JS),
@@ -66,4 +68,4 @@ class TestCase(testcase.TestCase):
return outproc.ExpectedOutProc(
self.expected_outcomes,
os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX,
- self.suite.test_config.regenerate_expected_files)
+ self.test_config.regenerate_expected_files)
diff --git a/deps/v8/test/inspector/wasm-inspector-test.js b/deps/v8/test/inspector/wasm-inspector-test.js
index 3e68e0689a..9729c3e0f4 100644
--- a/deps/v8/test/inspector/wasm-inspector-test.js
+++ b/deps/v8/test/inspector/wasm-inspector-test.js
@@ -160,7 +160,11 @@ async function dumpTables(tablesObj) {
let referencedObj = await Protocol.Runtime.getProperties(
{objectId: entry.value.objectId});
let value = referencedObj.result.result
- .filter(prop => prop.name == "value")[0].value.description;
+ .filter(prop => prop.name == "value")[0].value;
+ // If the value doesn't have a description, fall back to its value
+ // property. (For null this makes sure to print "null", as the null
+ // value doesn't have a description.)
+ value = value.description ?? value.value;
description = `${value} (${description})`;
}
functions.push(`${entry.name}: ${description}`);
diff --git a/deps/v8/test/intl/assert.js b/deps/v8/test/intl/assert.js
index 2a4f630eaf..41d578acf2 100644
--- a/deps/v8/test/intl/assert.js
+++ b/deps/v8/test/intl/assert.js
@@ -292,3 +292,17 @@ function assertLanguageTag(child, parent) {
fail(child, parent, 'language tag comparison');
}
}
+
+function assertArrayEquals(expected, found, name_opt) {
+ var start = "";
+ if (name_opt) {
+ start = name_opt + " - ";
+ }
+ assertEquals(expected.length, found.length, start + "array length");
+ if (expected.length === found.length) {
+ for (var i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], found[i],
+ start + "array element at index " + i);
+ }
+ }
+}
diff --git a/deps/v8/test/intl/date-format/en-format-range-to-parts.js b/deps/v8/test/intl/date-format/en-format-range-to-parts.js
index 9d9b2b8193..41162f1a5c 100644
--- a/deps/v8/test/intl/date-format/en-format-range-to-parts.js
+++ b/deps/v8/test/intl/date-format/en-format-range-to-parts.js
@@ -15,7 +15,7 @@ const expected1 = [
{type: "month", value: "Jan", source: "shared"},
{type: "literal", value: " ", source: "shared"},
{type: "day", value: "3", source: "startRange"},
- {type: "literal", value: " – ", source: "shared"},
+ {type: "literal", value: "\u2009–\u2009", source: "shared"},
{type: "day", value: "5", source: "endRange"},
{type: "literal", value: ", ", source: "shared"},
{type: "year", value: "2019", source: "shared"}
@@ -37,9 +37,9 @@ const expected2 = [
{type: "day", value: "5", source: "shared"},
{type: "literal", value: ", ", source: "shared"},
{type: "hour", value: "7", source: "startRange"},
- {type: "literal", value: " – ", source: "shared"},
+ {type: "literal", value: "\u2009–\u2009", source: "shared"},
{type: "hour", value: "10", source: "endRange"},
- {type: "literal", value: " ", source: "shared"},
+ {type: "literal", value: "\u202f", source: "shared"},
{type: "dayPeriod", value: "PM", source: "shared"}
];
dtf = new Intl.DateTimeFormat(["en"], {month: "short", day: "numeric", hour: "numeric"});
diff --git a/deps/v8/test/intl/date-format/format-range.js b/deps/v8/test/intl/date-format/format-range.js
index a5ac8df71d..e45501238f 100644
--- a/deps/v8/test/intl/date-format/format-range.js
+++ b/deps/v8/test/intl/date-format/format-range.js
@@ -23,20 +23,20 @@ assertThrows(() => dtf.formatRange(date1, "2019-5-4"), RangeError);
assertDoesNotThrow(() =>dtf.formatRange(date2, date1));
assertDoesNotThrow(() =>dtf.formatRange(date1, date2));
-assertEquals("1/3/2019 – 1/5/2019", dtf.formatRange(date1, date2));
-assertEquals("1/3/2019 – 3/4/2019", dtf.formatRange(date1, date3));
-assertEquals("1/3/2019 – 3/4/2020", dtf.formatRange(date1, date4));
-assertEquals("1/5/2019 – 3/4/2019", dtf.formatRange(date2, date3));
-assertEquals("1/5/2019 – 3/4/2020", dtf.formatRange(date2, date4));
-assertEquals("3/4/2019 – 3/4/2020", dtf.formatRange(date3, date4));
+assertEquals("1/3/2019\u2009–\u20091/5/2019", dtf.formatRange(date1, date2));
+assertEquals("1/3/2019\u2009–\u20093/4/2019", dtf.formatRange(date1, date3));
+assertEquals("1/3/2019\u2009–\u20093/4/2020", dtf.formatRange(date1, date4));
+assertEquals("1/5/2019\u2009–\u20093/4/2019", dtf.formatRange(date2, date3));
+assertEquals("1/5/2019\u2009–\u20093/4/2020", dtf.formatRange(date2, date4));
+assertEquals("3/4/2019\u2009–\u20093/4/2020", dtf.formatRange(date3, date4));
dtf = new Intl.DateTimeFormat(["en"], {year: "numeric", month: "short", day: "numeric"});
-assertEquals("Jan 3 – 5, 2019", dtf.formatRange(date1, date2));
-assertEquals("Jan 3 – Mar 4, 2019", dtf.formatRange(date1, date3));
-assertEquals("Jan 3, 2019 – Mar 4, 2020", dtf.formatRange(date1, date4));
-assertEquals("Jan 5 – Mar 4, 2019", dtf.formatRange(date2, date3));
-assertEquals("Jan 5, 2019 – Mar 4, 2020", dtf.formatRange(date2, date4));
-assertEquals("Mar 4, 2019 – Mar 4, 2020", dtf.formatRange(date3, date4));
+assertEquals("Jan 3\u2009–\u20095, 2019", dtf.formatRange(date1, date2));
+assertEquals("Jan 3\u2009–\u2009Mar 4, 2019", dtf.formatRange(date1, date3));
+assertEquals("Jan 3, 2019\u2009–\u2009Mar 4, 2020", dtf.formatRange(date1, date4));
+assertEquals("Jan 5\u2009–\u2009Mar 4, 2019", dtf.formatRange(date2, date3));
+assertEquals("Jan 5, 2019\u2009–\u2009Mar 4, 2020", dtf.formatRange(date2, date4));
+assertEquals("Mar 4, 2019\u2009–\u2009Mar 4, 2020", dtf.formatRange(date3, date4));
// Test the sequence of ToNumber and TimeClip
var secondDateAccessed = false;
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index d062dfe9a2..6dbb9d3e83 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -28,14 +28,15 @@
[
################################################################################
[ALWAYS, {
-# TODO(ftang,jshin): The following test is flaky.
- 'overrides/caching': [PASS, FAIL],
+ # TODO(v8:13649): The following test is flaky and slow.
+ 'overrides/caching': [SKIP],
}], # ALWAYS
################################################################################
['mode == debug', {
# Skip slow tests in debug mode.
'general/empty-handle': [SKIP],
+ 'string-localecompare': [SKIP],
}], # mode == debug
##############################################################################
diff --git a/deps/v8/test/intl/locale/locale-collations.js b/deps/v8/test/intl/locale/locale-collations.js
index cc8bdbaeb8..96c1998930 100644
--- a/deps/v8/test/intl/locale/locale-collations.js
+++ b/deps/v8/test/intl/locale/locale-collations.js
@@ -15,5 +15,6 @@ for (var i = 0; i < a_to_z.length; i++) {
locale + ".collations [" + locale.collations +
"] does not meet 'type: alphanum{3,8}(sep alphanum{3,8})*'");
});
+ assertArrayEquals(locale.collations, locale.collations.sort());
}
}
diff --git a/deps/v8/test/intl/number-format/unified/style-unit.js b/deps/v8/test/intl/number-format/unified/style-unit.js
index 757c0093c2..5d313c347b 100644
--- a/deps/v8/test/intl/number-format/unified/style-unit.js
+++ b/deps/v8/test/intl/number-format/unified/style-unit.js
@@ -48,6 +48,7 @@ const validUnits = [
'megabit',
'megabyte',
'meter',
+ 'microsecond',
'mile-scandinavian',
'mile',
'millimeter',
@@ -55,6 +56,7 @@ const validUnits = [
'millisecond',
'minute',
'month',
+ 'nanosecond',
'ounce',
'petabyte',
'pound',
@@ -140,7 +142,6 @@ assertThrows(() => c('meter-per-second-squared'), RangeError);
assertThrows(() => c('metric-ton'), RangeError);
assertThrows(() => c('microgram'), RangeError);
assertThrows(() => c('micrometer'), RangeError);
-assertThrows(() => c('microsecond'), RangeError);
assertThrows(() => c('mile-per-gallon-imperial'), RangeError);
assertThrows(() => c('milliampere'), RangeError);
assertThrows(() => c('millibar'), RangeError);
@@ -151,7 +152,6 @@ assertThrows(() => c('millimole-per-liter'), RangeError);
assertThrows(() => c('milliwatt'), RangeError);
assertThrows(() => c('month-person'), RangeError);
assertThrows(() => c('nanometer'), RangeError);
-assertThrows(() => c('nanosecond'), RangeError);
assertThrows(() => c('nautical-mile'), RangeError);
assertThrows(() => c('ohm'), RangeError);
assertThrows(() => c('ounce-troy'), RangeError);
diff --git a/deps/v8/test/intl/regress-9912.js b/deps/v8/test/intl/regress-9912.js
index 63639e91c5..4d8cd195e7 100644
--- a/deps/v8/test/intl/regress-9912.js
+++ b/deps/v8/test/intl/regress-9912.js
@@ -9,18 +9,18 @@ let d1 = new Date(2019, 3, 4);
let d2 = new Date(2019, 4, 5);
assertEquals(df.format(d1), df.formatRange(d1, d1));
assertEquals(df.format(d2), df.formatRange(d2, d2));
-assertEquals("4/4/2019 – 5/5/2019", df.formatRange(d1, d2));
+assertEquals("4/4/2019\u2009–\u20095/5/2019", df.formatRange(d1, d2));
// An old date just before the Julian / Gregorian switch
let d3 = new Date(1582, 8, 13);
let d4 = new Date(1582, 9, 14);
assertEquals(df.format(d3), df.formatRange(d3, d3));
assertEquals(df.format(d4), df.formatRange(d4, d4));
-assertEquals("9/13/1582 – 10/14/1582", df.formatRange(d3, d4));
+assertEquals("9/13/1582\u2009–\u200910/14/1582", df.formatRange(d3, d4));
// An older date
let d5 = new Date(1000, 0, 1);
let d6 = new Date(1001, 1, 2);
assertEquals(df.format(d5), df.formatRange(d5, d5));
assertEquals(df.format(d6), df.formatRange(d6, d6));
-assertEquals("1/1/1000 – 2/2/1001", df.formatRange(d5, d6));
+assertEquals("1/1/1000\u2009–\u20092/2/1001", df.formatRange(d5, d6));
diff --git a/deps/v8/test/intl/relative-time-format/format-en.js b/deps/v8/test/intl/relative-time-format/format-en.js
index d802902587..278b537b89 100644
--- a/deps/v8/test/intl/relative-time-format/format-en.js
+++ b/deps/v8/test/intl/relative-time-format/format-en.js
@@ -162,77 +162,77 @@ assertEquals('in 345 yr.', shortAuto.format(345, 'year'));
let narrowAuto = new Intl.RelativeTimeFormat(
"en", {style: "narrow", localeMatcher: 'lookup', numeric: 'auto'});
-assertEquals('3 sec. ago', narrowAuto.format(-3, 'second'));
-assertEquals('2 sec. ago', narrowAuto.format(-2, 'second'));
-assertEquals('1 sec. ago', narrowAuto.format(-1, 'second'));
+assertEquals('3s ago', narrowAuto.format(-3, 'second'));
+assertEquals('2s ago', narrowAuto.format(-2, 'second'));
+assertEquals('1s ago', narrowAuto.format(-1, 'second'));
assertEquals('now', narrowAuto.format(0, 'second'));
assertEquals('now', narrowAuto.format(-0, 'second'));
-assertEquals('in 1 sec.', narrowAuto.format(1, 'second'));
-assertEquals('in 2 sec.', narrowAuto.format(2, 'second'));
-assertEquals('in 345 sec.', narrowAuto.format(345, 'second'));
+assertEquals('in 1s', narrowAuto.format(1, 'second'));
+assertEquals('in 2s', narrowAuto.format(2, 'second'));
+assertEquals('in 345s', narrowAuto.format(345, 'second'));
-assertEquals('3 min. ago', narrowAuto.format(-3, 'minute'));
-assertEquals('2 min. ago', narrowAuto.format(-2, 'minute'));
-assertEquals('1 min. ago', narrowAuto.format(-1, 'minute'));
+assertEquals('3m ago', narrowAuto.format(-3, 'minute'));
+assertEquals('2m ago', narrowAuto.format(-2, 'minute'));
+assertEquals('1m ago', narrowAuto.format(-1, 'minute'));
assertEquals('this minute', narrowAuto.format(0, 'minute'));
assertEquals('this minute', narrowAuto.format(-0, 'minute'));
-assertEquals('in 1 min.', narrowAuto.format(1, 'minute'));
-assertEquals('in 2 min.', narrowAuto.format(2, 'minute'));
-assertEquals('in 345 min.', narrowAuto.format(345, 'minute'));
+assertEquals('in 1m', narrowAuto.format(1, 'minute'));
+assertEquals('in 2m', narrowAuto.format(2, 'minute'));
+assertEquals('in 345m', narrowAuto.format(345, 'minute'));
-assertEquals('3 hr. ago', narrowAuto.format(-3, 'hour'));
-assertEquals('2 hr. ago', narrowAuto.format(-2, 'hour'));
-assertEquals('1 hr. ago', narrowAuto.format(-1, 'hour'));
+assertEquals('3h ago', narrowAuto.format(-3, 'hour'));
+assertEquals('2h ago', narrowAuto.format(-2, 'hour'));
+assertEquals('1h ago', narrowAuto.format(-1, 'hour'));
assertEquals('this hour', narrowAuto.format(0, 'hour'));
assertEquals('this hour', narrowAuto.format(-0, 'hour'));
-assertEquals('in 1 hr.', narrowAuto.format(1, 'hour'));
-assertEquals('in 2 hr.', narrowAuto.format(2, 'hour'));
-assertEquals('in 345 hr.', narrowAuto.format(345, 'hour'));
+assertEquals('in 1h', narrowAuto.format(1, 'hour'));
+assertEquals('in 2h', narrowAuto.format(2, 'hour'));
+assertEquals('in 345h', narrowAuto.format(345, 'hour'));
-assertEquals('3 days ago', narrowAuto.format(-3, 'day'));
-assertEquals('2 days ago', narrowAuto.format(-2, 'day'));
+assertEquals('3d ago', narrowAuto.format(-3, 'day'));
+assertEquals('2d ago', narrowAuto.format(-2, 'day'));
assertEquals('yesterday', narrowAuto.format(-1, 'day'));
assertEquals('today', narrowAuto.format(0, 'day'));
assertEquals('today', narrowAuto.format(-0, 'day'));
assertEquals('tomorrow', narrowAuto.format(1, 'day'));
-assertEquals('in 2 days', narrowAuto.format(2, 'day'));
-assertEquals('in 345 days', narrowAuto.format(345, 'day'));
+assertEquals('in 2d', narrowAuto.format(2, 'day'));
+assertEquals('in 345d', narrowAuto.format(345, 'day'));
-assertEquals('3 wk. ago', narrowAuto.format(-3, 'week'));
-assertEquals('2 wk. ago', narrowAuto.format(-2, 'week'));
+assertEquals('3w ago', narrowAuto.format(-3, 'week'));
+assertEquals('2w ago', narrowAuto.format(-2, 'week'));
assertEquals('last wk.', narrowAuto.format(-1, 'week'));
assertEquals('this wk.', narrowAuto.format(0, 'week'));
assertEquals('this wk.', narrowAuto.format(-0, 'week'));
assertEquals('next wk.', narrowAuto.format(1, 'week'));
-assertEquals('in 2 wk.', narrowAuto.format(2, 'week'));
-assertEquals('in 345 wk.', narrowAuto.format(345, 'week'));
+assertEquals('in 2w', narrowAuto.format(2, 'week'));
+assertEquals('in 345w', narrowAuto.format(345, 'week'));
-assertEquals('3 mo. ago', narrowAuto.format(-3, 'month'));
-assertEquals('2 mo. ago', narrowAuto.format(-2, 'month'));
+assertEquals('3mo ago', narrowAuto.format(-3, 'month'));
+assertEquals('2mo ago', narrowAuto.format(-2, 'month'));
assertEquals('last mo.', narrowAuto.format(-1, 'month'));
assertEquals('this mo.', narrowAuto.format(0, 'month'));
assertEquals('this mo.', narrowAuto.format(-0, 'month'));
assertEquals('next mo.', narrowAuto.format(1, 'month'));
-assertEquals('in 2 mo.', narrowAuto.format(2, 'month'));
-assertEquals('in 345 mo.', narrowAuto.format(345, 'month'));
+assertEquals('in 2mo', narrowAuto.format(2, 'month'));
+assertEquals('in 345mo', narrowAuto.format(345, 'month'));
-assertEquals('3 qtrs. ago', narrowAuto.format(-3, 'quarter'));
-assertEquals('2 qtrs. ago', narrowAuto.format(-2, 'quarter'));
+assertEquals('3q ago', narrowAuto.format(-3, 'quarter'));
+assertEquals('2q ago', narrowAuto.format(-2, 'quarter'));
assertEquals('last qtr.', narrowAuto.format(-1, 'quarter'));
assertEquals('this qtr.', narrowAuto.format(0, 'quarter'));
assertEquals('this qtr.', narrowAuto.format(-0, 'quarter'));
assertEquals('next qtr.', narrowAuto.format(1, 'quarter'));
-assertEquals('in 2 qtrs.', narrowAuto.format(2, 'quarter'));
-assertEquals('in 345 qtrs.', narrowAuto.format(345, 'quarter'));
+assertEquals('in 2q', narrowAuto.format(2, 'quarter'));
+assertEquals('in 345q', narrowAuto.format(345, 'quarter'));
-assertEquals('3 yr. ago', narrowAuto.format(-3, 'year'));
-assertEquals('2 yr. ago', narrowAuto.format(-2, 'year'));
+assertEquals('3y ago', narrowAuto.format(-3, 'year'));
+assertEquals('2y ago', narrowAuto.format(-2, 'year'));
assertEquals('last yr.', narrowAuto.format(-1, 'year'));
assertEquals('this yr.', narrowAuto.format(0, 'year'));
assertEquals('this yr.', narrowAuto.format(-0, 'year'));
assertEquals('next yr.', narrowAuto.format(1, 'year'));
-assertEquals('in 2 yr.', narrowAuto.format(2, 'year'));
-assertEquals('in 345 yr.', narrowAuto.format(345, 'year'));
+assertEquals('in 2y', narrowAuto.format(2, 'year'));
+assertEquals('in 345y', narrowAuto.format(345, 'year'));
let longAlways = new Intl.RelativeTimeFormat(
"en", {style: "long", localeMatcher: 'lookup', numeric: 'always'});
@@ -388,77 +388,77 @@ assertEquals('in 345 yr.', shortAlways.format(345, 'year'));
let narrowAlways = new Intl.RelativeTimeFormat(
"en", {style: "narrow", localeMatcher: 'lookup', numeric: 'always'});
-assertEquals('3 sec. ago', narrowAlways.format(-3, 'second'));
-assertEquals('2 sec. ago', narrowAlways.format(-2, 'second'));
-assertEquals('1 sec. ago', narrowAlways.format(-1, 'second'));
-assertEquals('in 0 sec.', narrowAlways.format(0, 'second'));
-assertEquals('0 sec. ago', narrowAlways.format(-0, 'second'));
-assertEquals('in 1 sec.', narrowAlways.format(1, 'second'));
-assertEquals('in 2 sec.', narrowAlways.format(2, 'second'));
-assertEquals('in 345 sec.', narrowAlways.format(345, 'second'));
-
-assertEquals('3 min. ago', narrowAlways.format(-3, 'minute'));
-assertEquals('2 min. ago', narrowAlways.format(-2, 'minute'));
-assertEquals('1 min. ago', narrowAlways.format(-1, 'minute'));
-assertEquals('in 0 min.', narrowAlways.format(0, 'minute'));
-assertEquals('0 min. ago', narrowAlways.format(-0, 'minute'));
-assertEquals('in 1 min.', narrowAlways.format(1, 'minute'));
-assertEquals('in 2 min.', narrowAlways.format(2, 'minute'));
-assertEquals('in 345 min.', narrowAlways.format(345, 'minute'));
-
-assertEquals('3 hr. ago', narrowAlways.format(-3, 'hour'));
-assertEquals('2 hr. ago', narrowAlways.format(-2, 'hour'));
-assertEquals('1 hr. ago', narrowAlways.format(-1, 'hour'));
-assertEquals('in 0 hr.', narrowAlways.format(0, 'hour'));
-assertEquals('0 hr. ago', narrowAlways.format(-0, 'hour'));
-assertEquals('in 1 hr.', narrowAlways.format(1, 'hour'));
-assertEquals('in 2 hr.', narrowAlways.format(2, 'hour'));
-assertEquals('in 345 hr.', narrowAlways.format(345, 'hour'));
-
-assertEquals('3 days ago', narrowAlways.format(-3, 'day'));
-assertEquals('2 days ago', narrowAlways.format(-2, 'day'));
-assertEquals('1 day ago', narrowAlways.format(-1, 'day'));
-assertEquals('in 0 days', narrowAlways.format(0, 'day'));
-assertEquals('0 days ago', narrowAlways.format(-0, 'day'));
-assertEquals('in 1 day', narrowAlways.format(1, 'day'));
-assertEquals('in 2 days', narrowAlways.format(2, 'day'));
-assertEquals('in 345 days', narrowAlways.format(345, 'day'));
-
-assertEquals('3 wk. ago', narrowAlways.format(-3, 'week'));
-assertEquals('2 wk. ago', narrowAlways.format(-2, 'week'));
-assertEquals('1 wk. ago', narrowAlways.format(-1, 'week'));
-assertEquals('in 0 wk.', narrowAlways.format(0, 'week'));
-assertEquals('0 wk. ago', narrowAlways.format(-0, 'week'));
-assertEquals('in 1 wk.', narrowAlways.format(1, 'week'));
-assertEquals('in 2 wk.', narrowAlways.format(2, 'week'));
-assertEquals('in 345 wk.', narrowAlways.format(345, 'week'));
-
-assertEquals('3 mo. ago', narrowAlways.format(-3, 'month'));
-assertEquals('2 mo. ago', narrowAlways.format(-2, 'month'));
-assertEquals('1 mo. ago', narrowAlways.format(-1, 'month'));
-assertEquals('in 0 mo.', narrowAlways.format(0, 'month'));
-assertEquals('0 mo. ago', narrowAlways.format(-0, 'month'));
-assertEquals('in 1 mo.', narrowAlways.format(1, 'month'));
-assertEquals('in 2 mo.', narrowAlways.format(2, 'month'));
-assertEquals('in 345 mo.', narrowAlways.format(345, 'month'));
-
-assertEquals('3 qtrs. ago', narrowAlways.format(-3, 'quarter'));
-assertEquals('2 qtrs. ago', narrowAlways.format(-2, 'quarter'));
-assertEquals('1 qtr. ago', narrowAlways.format(-1, 'quarter'));
-assertEquals('in 0 qtrs.', narrowAlways.format(0, 'quarter'));
-assertEquals('0 qtrs. ago', narrowAlways.format(-0, 'quarter'));
-assertEquals('in 1 qtr.', narrowAlways.format(1, 'quarter'));
-assertEquals('in 2 qtrs.', narrowAlways.format(2, 'quarter'));
-assertEquals('in 345 qtrs.', narrowAlways.format(345, 'quarter'));
-
-assertEquals('3 yr. ago', narrowAlways.format(-3, 'year'));
-assertEquals('2 yr. ago', narrowAlways.format(-2, 'year'));
-assertEquals('1 yr. ago', narrowAlways.format(-1, 'year'));
-assertEquals('in 0 yr.', narrowAlways.format(0, 'year'));
-assertEquals('0 yr. ago', narrowAlways.format(-0, 'year'));
-assertEquals('in 1 yr.', narrowAlways.format(1, 'year'));
-assertEquals('in 2 yr.', narrowAlways.format(2, 'year'));
-assertEquals('in 345 yr.', narrowAlways.format(345, 'year'));
+assertEquals('3s ago', narrowAlways.format(-3, 'second'));
+assertEquals('2s ago', narrowAlways.format(-2, 'second'));
+assertEquals('1s ago', narrowAlways.format(-1, 'second'));
+assertEquals('in 0s', narrowAlways.format(0, 'second'));
+assertEquals('0s ago', narrowAlways.format(-0, 'second'));
+assertEquals('in 1s', narrowAlways.format(1, 'second'));
+assertEquals('in 2s', narrowAlways.format(2, 'second'));
+assertEquals('in 345s', narrowAlways.format(345, 'second'));
+
+assertEquals('3m ago', narrowAlways.format(-3, 'minute'));
+assertEquals('2m ago', narrowAlways.format(-2, 'minute'));
+assertEquals('1m ago', narrowAlways.format(-1, 'minute'));
+assertEquals('in 0m', narrowAlways.format(0, 'minute'));
+assertEquals('0m ago', narrowAlways.format(-0, 'minute'));
+assertEquals('in 1m', narrowAlways.format(1, 'minute'));
+assertEquals('in 2m', narrowAlways.format(2, 'minute'));
+assertEquals('in 345m', narrowAlways.format(345, 'minute'));
+
+assertEquals('3h ago', narrowAlways.format(-3, 'hour'));
+assertEquals('2h ago', narrowAlways.format(-2, 'hour'));
+assertEquals('1h ago', narrowAlways.format(-1, 'hour'));
+assertEquals('in 0h', narrowAlways.format(0, 'hour'));
+assertEquals('0h ago', narrowAlways.format(-0, 'hour'));
+assertEquals('in 1h', narrowAlways.format(1, 'hour'));
+assertEquals('in 2h', narrowAlways.format(2, 'hour'));
+assertEquals('in 345h', narrowAlways.format(345, 'hour'));
+
+assertEquals('3d ago', narrowAlways.format(-3, 'day'));
+assertEquals('2d ago', narrowAlways.format(-2, 'day'));
+assertEquals('1d ago', narrowAlways.format(-1, 'day'));
+assertEquals('in 0d', narrowAlways.format(0, 'day'));
+assertEquals('0d ago', narrowAlways.format(-0, 'day'));
+assertEquals('in 1d', narrowAlways.format(1, 'day'));
+assertEquals('in 2d', narrowAlways.format(2, 'day'));
+assertEquals('in 345d', narrowAlways.format(345, 'day'));
+
+assertEquals('3w ago', narrowAlways.format(-3, 'week'));
+assertEquals('2w ago', narrowAlways.format(-2, 'week'));
+assertEquals('1w ago', narrowAlways.format(-1, 'week'));
+assertEquals('in 0w', narrowAlways.format(0, 'week'));
+assertEquals('0w ago', narrowAlways.format(-0, 'week'));
+assertEquals('in 1w', narrowAlways.format(1, 'week'));
+assertEquals('in 2w', narrowAlways.format(2, 'week'));
+assertEquals('in 345w', narrowAlways.format(345, 'week'));
+
+assertEquals('3mo ago', narrowAlways.format(-3, 'month'));
+assertEquals('2mo ago', narrowAlways.format(-2, 'month'));
+assertEquals('1mo ago', narrowAlways.format(-1, 'month'));
+assertEquals('in 0mo', narrowAlways.format(0, 'month'));
+assertEquals('0mo ago', narrowAlways.format(-0, 'month'));
+assertEquals('in 1mo', narrowAlways.format(1, 'month'));
+assertEquals('in 2mo', narrowAlways.format(2, 'month'));
+assertEquals('in 345mo', narrowAlways.format(345, 'month'));
+
+assertEquals('3q ago', narrowAlways.format(-3, 'quarter'));
+assertEquals('2q ago', narrowAlways.format(-2, 'quarter'));
+assertEquals('1q ago', narrowAlways.format(-1, 'quarter'));
+assertEquals('in 0q', narrowAlways.format(0, 'quarter'));
+assertEquals('0q ago', narrowAlways.format(-0, 'quarter'));
+assertEquals('in 1q', narrowAlways.format(1, 'quarter'));
+assertEquals('in 2q', narrowAlways.format(2, 'quarter'));
+assertEquals('in 345q', narrowAlways.format(345, 'quarter'));
+
+assertEquals('3y ago', narrowAlways.format(-3, 'year'));
+assertEquals('2y ago', narrowAlways.format(-2, 'year'));
+assertEquals('1y ago', narrowAlways.format(-1, 'year'));
+assertEquals('in 0y', narrowAlways.format(0, 'year'));
+assertEquals('0y ago', narrowAlways.format(-0, 'year'));
+assertEquals('in 1y', narrowAlways.format(1, 'year'));
+assertEquals('in 2y', narrowAlways.format(2, 'year'));
+assertEquals('in 345y', narrowAlways.format(345, 'year'));
var styleNumericCombinations = [
longAuto, shortAuto, narrowAuto, longAlways,
diff --git a/deps/v8/test/intl/string-localecompare.js b/deps/v8/test/intl/string-localecompare.js
index 54508d83eb..794a2d13d8 100644
--- a/deps/v8/test/intl/string-localecompare.js
+++ b/deps/v8/test/intl/string-localecompare.js
@@ -50,8 +50,11 @@ function isOptimized(fun) {
}
assertFalse(isOptimized(check));
-while (true) {
- var optimized = isOptimized(check);
- check();
- if (optimized) break;
-}
+%PrepareFunctionForOptimization(check);
+check();
+check();
+check();
+%OptimizeFunctionOnNextCall(check);
+check();
+check();
+assertTrue(isOptimized(check));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 1b0c36e0c0..d8acad5033 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -80,7 +80,7 @@ class TestCase(testcase.D8TestCase):
]
]
- if self._test_config.isolates:
+ if self.test_config.isolates:
files += ['--isolate'] + files
return files
diff --git a/deps/v8/test/js-perf-test/BigInt/add-no-opt.js b/deps/v8/test/js-perf-test/BigInt/add-no-opt.js
deleted file mode 100644
index 2c0fddda45..0000000000
--- a/deps/v8/test/js-perf-test/BigInt/add-no-opt.js
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-"use strict";
-
-d8.file.execute('bigint-util.js');
-
-let random_bigints = [];
-
-// This dummy ensures that the feedback for benchmark.run() in the Measure
-// function from base.js is not monomorphic, thereby preventing the benchmarks
-// below from being inlined. This ensures consistent behavior and comparable
-// results.
-new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
- new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
-]);
-
-
-new BenchmarkSuite(`Add-Small`, [1000], [
- new Benchmark(`Add-Small`, true, false, 0, TestAdd,
- () => SetUpRandomBigInts(32))
-]);
-
-
-new BenchmarkSuite(`Add-Large`, [1000], [
- new Benchmark(`Add-Large`, true, false, 0, TestAdd,
- () => SetUpRandomBigInts(8192))
-]);
-
-
-new BenchmarkSuite(`Add-LargerThanSmall`, [1000], [
- new Benchmark(`Add-LargerThanSmall`, true, false, 0, TestAdd,
- () => SetUpRandomBigInts(68))
-]);
-
-
-new BenchmarkSuite(`Add-Random`, [1000], [
- new Benchmark(`Add-Random`, true, false, 0, TestAdd,
- SetUpTestAddRandom)
-]);
-
-
-function SetUpRandomBigInts(bits) {
- random_bigints = [];
- // RandomBigIntWithBits needs multiples of 4 bits.
- bits = Math.floor(bits / 4) * 4;
- for (let i = 0; i < TEST_ITERATIONS; ++i) {
- const bigint = RandomBigIntWithBits(bits);
- random_bigints.push(Math.random() < 0.5 ? -bigint : bigint);
- }
-}
-
-
-function SetUpTestAddRandom() {
- random_bigints = [];
- // RandomBigIntWithBits needs multiples of 4 bits.
- const max_in_4bits = RANDOM_BIGINTS_MAX_BITS / 4;
- for (let i = 0; i < TEST_ITERATIONS; ++i) {
- const bits = Math.floor(Math.random() * max_in_4bits) * 4;
- const bigint = RandomBigIntWithBits(bits);
- random_bigints.push(Math.random() < 0.5 ? -bigint : bigint);
- }
-}
-
-
-function TestAdd() {
- let sum = 0n;
-
- for (let i = 0; i < TEST_ITERATIONS - 1; ++i) {
- sum += random_bigints[i] + random_bigints[i + 1];
- }
-
- return sum;
-}
diff --git a/deps/v8/test/js-perf-test/BigInt/add.js b/deps/v8/test/js-perf-test/BigInt/add.js
index 09e68b61fb..1b55413440 100644
--- a/deps/v8/test/js-perf-test/BigInt/add.js
+++ b/deps/v8/test/js-perf-test/BigInt/add.js
@@ -19,12 +19,6 @@ new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
]);
-new BenchmarkSuite('Add-TypeError', [10000], [
- new Benchmark('Add-TypeError', true, false, 0, TestAddTypeError,
- SetUpTestAddTypeError)
-]);
-
-
new BenchmarkSuite('Add-Zero', [1000], [
new Benchmark('Add-Zero', true, false, 0, TestAddZero, SetUpTestAddZero)
]);
@@ -52,24 +46,6 @@ new BenchmarkSuite('Add-Random', [1000], [
]);
-function SetUpTestAddTypeError() {
- initial_sum = 42n;
-}
-
-
-function TestAddTypeError() {
- let sum = initial_sum;
- for (let i = 0; i < SLOW_TEST_ITERATIONS; ++i) {
- try {
- sum = 0 + sum;
- }
- catch(e) {
- }
- }
- return sum;
-}
-
-
function SetUpTestAddZero() {
initial_sum = 42n;
}
diff --git a/deps/v8/test/js-perf-test/BigInt/bigint-util.js b/deps/v8/test/js-perf-test/BigInt/bigint-util.js
index 187de3bddc..92e5c1b767 100644
--- a/deps/v8/test/js-perf-test/BigInt/bigint-util.js
+++ b/deps/v8/test/js-perf-test/BigInt/bigint-util.js
@@ -7,10 +7,7 @@
// Test configuration.
const TEST_ITERATIONS = 1000;
const SLOW_TEST_ITERATIONS = 50;
-const SMALL_BITS_CASES = [32, 64, 128, 256];
-const MEDIUM_BITS_CASES = [512, 1024];
-const BIG_BITS_CASES = [2048, 4096, 8192];
-const BITS_CASES = [32, 64, 128, 256, 512, 1024, 2048, 4096, 8192];
+const BITS_CASES = [32, 64, 1024, 8192];
const RANDOM_BIGINTS_MAX_BITS = 64 * 100;
const BIGINT_MAX_BITS = %BigIntMaxLengthBits();
diff --git a/deps/v8/test/js-perf-test/BigInt/exponentiate.js b/deps/v8/test/js-perf-test/BigInt/exponentiate.js
deleted file mode 100644
index 41d0b7a692..0000000000
--- a/deps/v8/test/js-perf-test/BigInt/exponentiate.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-"use strict";
-
-d8.file.execute('bigint-util.js');
-
-let random_exponents = [];
-
-// This dummy ensures that the feedback for benchmark.run() in the Measure
-// function from base.js is not monomorphic, thereby preventing the benchmarks
-// below from being inlined. This ensures consistent behavior and comparable
-// results.
-new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
- new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
-]);
-
-
-new BenchmarkSuite('Exponentiate-Base-Two', [10000], [
- new Benchmark('Exponentiate-Base-Two', true, false, 0,
- TestExponentiateBaseTwo, SetUpTestExponentiateBaseTwo)
-]);
-
-
-function SetUpTestExponentiateBaseTwo() {
- random_exponents = [];
- // Restrict the maximum length of exponents to 20 bits so that the durations
- // are reasonable and BigIntTooBig exceptions can be avoided.
- const max_in_4bits = 20 / 4;
- for (let i = 0; i < TEST_ITERATIONS; ++i) {
- const bits = Math.floor(Math.random() * max_in_4bits) * 4;
- const bigint = RandomBigIntWithBits(bits);
- // Exponents are non-negative.
- random_exponents.push(bigint);
- }
-}
-
-
-function TestExponentiateBaseTwo() {
- let sum = 0n;
-
- for (let i = 0; i < TEST_ITERATIONS; ++i) {
- sum += 2n ** random_exponents[i];
- }
-
- return sum;
-}
diff --git a/deps/v8/test/js-perf-test/BigInt/shift.js b/deps/v8/test/js-perf-test/BigInt/shift.js
index fa110fb868..cf6b91d83f 100644
--- a/deps/v8/test/js-perf-test/BigInt/shift.js
+++ b/deps/v8/test/js-perf-test/BigInt/shift.js
@@ -18,16 +18,6 @@ new BenchmarkSuite('Prevent-Inline-Dummy', [100], [
new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
]);
-new BenchmarkSuite('ShiftLeft-ShiftTooBig', [1000], [
- new Benchmark('ShiftLeft-ShiftTooBig', true, false, 0,
- TestShiftLeftShiftTooBig, SetUpTestShiftLeftShiftTooBig)
-]);
-
-new BenchmarkSuite('ShiftLeft-ResultTooBig', [1000], [
- new Benchmark('ShiftLeft-ResultTooBig', true, false, 0,
- TestShiftLeftResultTooBig, SetUpTestShiftLeftResultTooBig)
-]);
-
new BenchmarkSuite('ShiftLeft-Small', [1000], [
new Benchmark('ShiftLeft-Small', true, false, 0,
TestShiftLeftSmall, SetUpTestShiftLeftSmall)
@@ -81,43 +71,6 @@ function SetUpRandomBigInts() {
}
-function SetUpTestShiftLeftShiftTooBig() {
- // Left shifting by 2^80 is throwing an exception.
- a = SmallRandomBigIntWithBits(80);
- SetUpRandomBigInts();
-}
-
-function TestShiftLeftShiftTooBig() {
- let result = 0n;
- for (let i = 0; i < SLOW_TEST_ITERATIONS; ++i) {
- try {
- result = random_bigints[i] << a;
- } catch(e) {
- }
- }
- return result;
-}
-
-
-function SetUpTestShiftLeftResultTooBig() {
- a = BigInt(BIGINT_MAX_BITS - 4);
- for (let i = 0; i < SLOW_TEST_ITERATIONS; ++i) {
- random_bigints[i] = RandomBigIntWithBits(64);
- }
-}
-
-function TestShiftLeftResultTooBig() {
- let result = 0n;
- for (let i = 0; i < SLOW_TEST_ITERATIONS; ++i) {
- try {
- result = random_bigints[i] << a;
- } catch(e) {
- }
- }
- return result;
-}
-
-
function SetUpTestShiftLeftSmall() {
random_bigints = [];
// Set up all values such that the left shifted values still fit into one
diff --git a/deps/v8/test/js-perf-test/BigInt/subtract.js b/deps/v8/test/js-perf-test/BigInt/subtract.js
index a229ddcfe8..ee7c8cd126 100644
--- a/deps/v8/test/js-perf-test/BigInt/subtract.js
+++ b/deps/v8/test/js-perf-test/BigInt/subtract.js
@@ -19,12 +19,6 @@ new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
]);
-new BenchmarkSuite('Subtract-TypeError', [10000], [
- new Benchmark('Subtract-TypeError', true, false, 0, TestSubtractTypeError,
- SetUpTestSubtractTypeError)
-]);
-
-
new BenchmarkSuite('Subtract-Zero', [1000], [
new Benchmark('Subtract-Zero', true, false, 0, TestSubtractZero,
SetUpTestSubtractZero)
@@ -114,24 +108,6 @@ function TestSubtractRandom() {
}
-function SetUpTestSubtractTypeError() {
- initial_diff = 42n;
-}
-
-
-function TestSubtractTypeError() {
- let diff = initial_diff;
- for (let i = 0; i < SLOW_TEST_ITERATIONS; ++i) {
- try {
- diff = 0 - diff;
- }
- catch(e) {
- }
- }
- return diff;
-}
-
-
function SetUpTestSubtractZero() {
initial_diff = 42n;
}
diff --git a/deps/v8/test/js-perf-test/JSTests1.json b/deps/v8/test/js-perf-test/JSTests1.json
index e9d90ba467..417116c158 100644
--- a/deps/v8/test/js-perf-test/JSTests1.json
+++ b/deps/v8/test/js-perf-test/JSTests1.json
@@ -73,8 +73,6 @@
"test_flags": ["shift"],
"results_regexp": "^BigInt\\-%s\\(Score\\): (.+)$",
"tests": [
- { "name": "ShiftLeft-ShiftTooBig" },
- { "name": "ShiftLeft-ResultTooBig" },
{ "name": "ShiftLeft-Small" },
{ "name": "ShiftLeft-Random" },
{ "name": "ShiftLeft-Growing" },
@@ -105,44 +103,19 @@
"test_flags": ["add"],
"results_regexp": "^BigInt\\-%s\\(Score\\): (.+)$",
"tests": [
- { "name": "Add-TypeError" },
{ "name": "Add-Zero" },
{ "name": "Add-SameSign-32" },
{ "name": "Add-DifferentSign-32" },
{ "name": "Add-SameSign-64" },
{ "name": "Add-DifferentSign-64" },
- { "name": "Add-SameSign-128" },
- { "name": "Add-DifferentSign-128" },
- { "name": "Add-SameSign-256" },
- { "name": "Add-DifferentSign-256" },
- { "name": "Add-SameSign-512" },
- { "name": "Add-DifferentSign-512" },
{ "name": "Add-SameSign-1024" },
{ "name": "Add-DifferentSign-1024" },
- { "name": "Add-SameSign-2048" },
- { "name": "Add-DifferentSign-2048" },
- { "name": "Add-SameSign-4096" },
- { "name": "Add-DifferentSign-4096" },
{ "name": "Add-SameSign-8192" },
{ "name": "Add-DifferentSign-8192" },
{ "name": "Add-Random" }
]
},
{
- "name": "Add-No-Opt",
- "main": "run.js",
- "flags": ["--allow-natives-syntax", "--no-turbofan"],
- "resources": ["add-no-opt.js", "bigint-util.js"],
- "test_flags": ["add-no-opt"],
- "results_regexp": "^BigInt\\-%s\\(Score\\): (.+)$",
- "tests": [
- { "name": "Add-Small" },
- { "name": "Add-Large" },
- { "name": "Add-LargerThanSmall" },
- { "name": "Add-Random" }
- ]
- },
- {
"name": "Subtract",
"main": "run.js",
"flags": ["--allow-natives-syntax"],
@@ -150,24 +123,13 @@
"test_flags": ["subtract"],
"results_regexp": "^BigInt\\-%s\\(Score\\): (.+)$",
"tests": [
- { "name": "Subtract-TypeError" },
{ "name": "Subtract-Zero" },
{ "name": "Subtract-SameSign-32" },
{ "name": "Subtract-DifferentSign-32" },
{ "name": "Subtract-SameSign-64" },
{ "name": "Subtract-DifferentSign-64" },
- { "name": "Subtract-SameSign-128" },
- { "name": "Subtract-DifferentSign-128" },
- { "name": "Subtract-SameSign-256" },
- { "name": "Subtract-DifferentSign-256" },
- { "name": "Subtract-SameSign-512" },
- { "name": "Subtract-DifferentSign-512" },
{ "name": "Subtract-SameSign-1024" },
{ "name": "Subtract-DifferentSign-1024" },
- { "name": "Subtract-SameSign-2048" },
- { "name": "Subtract-DifferentSign-2048" },
- { "name": "Subtract-SameSign-4096" },
- { "name": "Subtract-DifferentSign-4096" },
{ "name": "Subtract-SameSign-8192" },
{ "name": "Subtract-DifferentSign-8192" },
{ "name": "Subtract-Random" }
@@ -216,17 +178,6 @@
]
},
{
- "name": "Exponentiate",
- "main": "run.js",
- "flags": ["--allow-natives-syntax"],
- "resources": ["exponentiate.js", "bigint-util.js"],
- "test_flags": ["exponentiate"],
- "results_regexp": "^BigInt\\-%s\\(Score\\): (.+)$",
- "tests": [
- { "name": "Exponentiate-Base-Two" }
- ]
- },
- {
"name": "AsUintN",
"main": "run.js",
"flags": ["--allow-natives-syntax"],
diff --git a/deps/v8/test/js-perf-test/JSTests2.json b/deps/v8/test/js-perf-test/JSTests2.json
index 0933c7da07..4f2d4a1e36 100644
--- a/deps/v8/test/js-perf-test/JSTests2.json
+++ b/deps/v8/test/js-perf-test/JSTests2.json
@@ -54,13 +54,14 @@
{
"name": "Array",
"path": ["Array"],
- "timeout": 180,
+ "timeout": 200,
"timeout_arm64": 360,
"main": "run.js",
"resources": [
"filter.js", "map.js", "every.js", "join.js", "some.js", "reduce.js",
"reduce-right.js", "to-string.js", "find.js", "find-index.js",
- "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js"
+ "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js",
+ "at.js"
],
"flags": [
"--allow-natives-syntax"
diff --git a/deps/v8/test/js-perf-test/Keys/keys.js b/deps/v8/test/js-perf-test/Keys/keys.js
index 63ed0ebd7e..7654f3a696 100644
--- a/deps/v8/test/js-perf-test/Keys/keys.js
+++ b/deps/v8/test/js-perf-test/Keys/keys.js
@@ -65,6 +65,7 @@ var array_int_holey_50 = HoleyIntArray(50);
var empty_proto_5_10 = ObjectWithKeys(5);
empty_proto_5_10.__proto__ = ObjectWithProtoKeys(10, 0);
+var empty_proto_5_0 = ObjectWithKeys(5);
var empty_proto_5_5_slow = ObjectWithKeys(5);
empty_proto_5_5_slow.__proto__ = ObjectWithProtoKeys(5, 0, false);
@@ -145,6 +146,18 @@ var TestFunctions = {
});
return [result, count];
}),
+ "for-in-multi-objects": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ for (var key in object) {
+ count++;
+ result = object[key];
+ if (empty_proto_5_0[key]) {
+ count++;
+ }
+ }
+ return [result, count];
+ }),
}
var TestFunctionsArrays = {
diff --git a/deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.js b/deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.js
new file mode 100644
index 0000000000..31f2ca505d
--- /dev/null
+++ b/deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ "\123";
+ "use strict";
+ abc 1;
+}
diff --git a/deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.out b/deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.out
new file mode 100644
index 0000000000..8e60e801db
--- /dev/null
+++ b/deps/v8/test/message/fail/strict-prior-octal-escape-use-strict-before.out
@@ -0,0 +1,9 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:6: SyntaxError: Octal escape sequences are not allowed in strict mode.
+ "\123";
+ ^^
+SyntaxError: Octal escape sequences are not allowed in strict mode.
+
diff --git a/deps/v8/test/message/fail/strict-prior-octal-escape.js b/deps/v8/test/message/fail/strict-prior-octal-escape.js
new file mode 100644
index 0000000000..63319f79a5
--- /dev/null
+++ b/deps/v8/test/message/fail/strict-prior-octal-escape.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ "use strict";
+ "\123";
+ abc 1;
+}
diff --git a/deps/v8/test/message/fail/strict-prior-octal-escape.out b/deps/v8/test/message/fail/strict-prior-octal-escape.out
new file mode 100644
index 0000000000..d573790c90
--- /dev/null
+++ b/deps/v8/test/message/fail/strict-prior-octal-escape.out
@@ -0,0 +1,9 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: SyntaxError: Octal escape sequences are not allowed in strict mode.
+ "\123";
+ ^^
+SyntaxError: Octal escape sequences are not allowed in strict mode.
+
diff --git a/deps/v8/test/message/fail/strict-prior-octal-literal.js b/deps/v8/test/message/fail/strict-prior-octal-literal.js
new file mode 100644
index 0000000000..455b3a7f9b
--- /dev/null
+++ b/deps/v8/test/message/fail/strict-prior-octal-literal.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ "use strict";
+ 01;
+ abc 1;
+}
diff --git a/deps/v8/test/message/fail/strict-prior-octal-literal.out b/deps/v8/test/message/fail/strict-prior-octal-literal.out
new file mode 100644
index 0000000000..75009a0218
--- /dev/null
+++ b/deps/v8/test/message/fail/strict-prior-octal-literal.out
@@ -0,0 +1,9 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: SyntaxError: Octal literals are not allowed in strict mode.
+ 01;
+ ^^
+SyntaxError: Octal literals are not allowed in strict mode.
+
diff --git a/deps/v8/test/message/fail/var-prior-conflict.js b/deps/v8/test/message/fail/var-prior-conflict.js
new file mode 100644
index 0000000000..7f2ee3bd28
--- /dev/null
+++ b/deps/v8/test/message/fail/var-prior-conflict.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{
+ let a;
+ var a;
+ abc 1;
+}
diff --git a/deps/v8/test/message/fail/var-prior-conflict.out b/deps/v8/test/message/fail/var-prior-conflict.out
new file mode 100644
index 0000000000..7558dbcf99
--- /dev/null
+++ b/deps/v8/test/message/fail/var-prior-conflict.out
@@ -0,0 +1,9 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: SyntaxError: Identifier 'a' has already been declared
+ var a;
+ ^
+SyntaxError: Identifier 'a' has already been declared
+
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 7bde812162..f04aff4cf9 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -40,6 +40,7 @@
# Skip Liftoff tests on platforms that don't support Liftoff.
['arch != x64 and arch != ia32 and arch != arm64 and arch != arm and arch != s390x', {
'wasm-trace-memory-liftoff': [SKIP],
+ 'wasm-trace-memory64-liftoff': [SKIP],
'wasm-trace-liftoff': [SKIP],
}], # arch != x64 and arch != ia32 and arch != arm64 and arch != arm and arch != s390x
@@ -51,8 +52,17 @@
['no_i18n == True', {
'fail/list-format*': [SKIP],
+ # Needs the StringPrototypeToLowerCaseIntl builtin.
+ 'wasm-recognize-imports': [SKIP],
}], # no_i18n == True
+################################################################################
+['mode == release', {
+ # Slow tests in release mode.
+ 'fail/map-grow-failed': [PASS, SLOW],
+ 'fail/set-grow-failed': [PASS, SLOW],
+}],
+
##############################################################################
# TODO(v8:7777): Change this once wasm is supported in jitless mode.
['not has_webassembly or variant == jitless', {
@@ -72,12 +82,15 @@
['arch == mips64el or arch == riscv64 or arch == loong64', {
# Tests that require Simd enabled.
'wasm-trace-memory': [SKIP],
+ 'wasm-trace-memory64': [SKIP],
}], # arch == mips64el or arch == riscv64 or arch == loong64
##############################################################################
['no_simd_hardware == True', {
'wasm-trace-memory': [SKIP],
'wasm-trace-memory-liftoff': [SKIP],
+ 'wasm-trace-memory64': [SKIP],
+ 'wasm-trace-memory64-liftoff': [SKIP],
}], # no_simd_hardware == True
################################################################################
@@ -98,4 +111,12 @@
['arch == riscv32', {
'wasm-trace-turbofan':[SKIP],
}], # (arch == riscv32)
+
+##############################################################################
+# 32-bit platforms
+['arch in (ia32, arm, riscv32)', {
+ # Needs >4GB of available contiguous memory.
+ 'wasm-trace-memory64': [SKIP],
+ 'wasm-trace-memory64-liftoff': [SKIP],
+}], # 'arch in (ia32, arm, riscv32)'
]
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index dda414c8a4..00d60a6bd9 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -98,4 +98,4 @@ class TestCase(testcase.D8TestCase):
self._base_path,
self._expected_fail(),
self._base_path + '.out',
- self.suite.test_config.regenerate_expected_files)
+ self.test_config.regenerate_expected_files)
diff --git a/deps/v8/test/message/wasm-recognize-imports.js b/deps/v8/test/message/wasm-recognize-imports.js
new file mode 100644
index 0000000000..70d7e053e9
--- /dev/null
+++ b/deps/v8/test/message/wasm-recognize-imports.js
@@ -0,0 +1,45 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-stringref --allow-natives-syntax
+// Flags: --trace-wasm-inlining --liftoff
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+let sig_w_w = makeSig([kWasmStringRef], [kWasmStringRef]);
+let toLowerCase = builder.addImport("m", "toLowerCase", sig_w_w);
+
+builder.addFunction('call_tolower', sig_w_w).exportFunc().addBody([
+ kExprLocalGet, 0,
+ kExprCallFunction, toLowerCase,
+]);
+
+let module = builder.toModule();
+
+let recognizable = Function.prototype.call.bind(String.prototype.toLowerCase);
+let recognizable_imports = { m: { toLowerCase: recognizable } };
+
+let instance1 = new WebAssembly.Instance(module, recognizable_imports);
+let call_tolower = instance1.exports.call_tolower;
+call_tolower("ABC");
+%WasmTierUpFunction(call_tolower);
+call_tolower("ABC");
+
+// Creating a second instance with identical imports should not cause
+// recompilation.
+console.log("Second instance.");
+let instance2 = new WebAssembly.Instance(module, recognizable_imports);
+let call_tolower2 = instance2.exports.call_tolower;
+call_tolower2("DEF");
+console.log("Still optimized: " + %IsTurboFanFunction(call_tolower2));
+
+// Creating a third instance with different imports must not reuse the
+// existing optimized code.
+console.log("Third instance.");
+let other_imports = { m: { toLowerCase: () => "foo" } };
+let instance3 = new WebAssembly.Instance(module, other_imports);
+let call_tolower3 = instance3.exports.call_tolower;
+call_tolower3("GHI");
+console.log("Still optimized: " + %IsTurboFanFunction(call_tolower3));
diff --git a/deps/v8/test/message/wasm-recognize-imports.out b/deps/v8/test/message/wasm-recognize-imports.out
new file mode 100644
index 0000000000..972daf3cdf
--- /dev/null
+++ b/deps/v8/test/message/wasm-recognize-imports.out
@@ -0,0 +1,5 @@
+[function 1: import 0 is well-known built-in String.toLowerCase]
+Second instance.
+Still optimized: true
+Third instance.
+Still optimized: false
diff --git a/deps/v8/test/message/wasm-trace-liftoff.js b/deps/v8/test/message/wasm-trace-liftoff.js
index 4cf5d88f78..c0e53d6c7d 100644
--- a/deps/v8/test/message/wasm-trace-liftoff.js
+++ b/deps/v8/test/message/wasm-trace-liftoff.js
@@ -41,8 +41,11 @@ let kCallIdentityFunction = builder.addFunction('call_identity', kSig_i_v)
])
.exportFunc()
.index;
+let kVoidFunction =
+ builder.addFunction('void', kSig_v_v).addBody([]).exportFunc().index;
builder.addFunction('main', kSig_v_v)
.addBody([
+ kExprCallFunction, kVoidFunction, // -
kExprCallFunction, kCall23Function, kExprDrop, // -
kExprCallFunction, kUnnamedFunction, kExprDrop, // -
kExprCallFunction, kRet0Function, kExprDrop, // -
diff --git a/deps/v8/test/message/wasm-trace-liftoff.out b/deps/v8/test/message/wasm-trace-liftoff.out
index 1332b9d4c1..1538715ae5 100644
--- a/deps/v8/test/message/wasm-trace-liftoff.out
+++ b/deps/v8/test/message/wasm-trace-liftoff.out
@@ -1,4 +1,6 @@
- 1: ~wasm-function[8] "main" {
+ 1: ~wasm-function[9] "main" {
+ 2: ~wasm-function[8] "void" {
+ 2: }
2: ~wasm-function[1] "call_23" {
3: ~wasm-function[0] "ret_23" {
3: } -> 23
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.js b/deps/v8/test/message/wasm-trace-memory-liftoff.js
index bdad488e4c..248aad0149 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.js
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.js
@@ -4,6 +4,4 @@
// Flags: --trace-wasm-memory --liftoff --no-wasm-tier-up
-// Force enable sse3 and sse4-1, since that will determine which execution tier
-// we use, and thus the expected output message will differ.
d8.file.execute("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory64-liftoff.js b/deps/v8/test/message/wasm-trace-memory64-liftoff.js
new file mode 100644
index 0000000000..84bbc010eb
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory64-liftoff.js
@@ -0,0 +1,7 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --trace-wasm-memory --liftoff --no-wasm-tier-up --experimental-wasm-memory64
+
+d8.file.execute("test/message/wasm-trace-memory64.js");
diff --git a/deps/v8/test/message/wasm-trace-memory64-liftoff.out b/deps/v8/test/message/wasm-trace-memory64-liftoff.out
new file mode 100644
index 0000000000..2cc0ff5ba4
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory64-liftoff.out
@@ -0,0 +1,28 @@
+liftoff func: 0:0x151 load from 0000004294970004 val: i32:0 / 00000000
+liftoff func: 1:0x15e load from 0000004294970001 val: i8:0 / 00
+liftoff func: 3:0x17a store to 0000004294970004 val: i32:305419896 / 12345678
+liftoff func: 0:0x151 load from 0000004294970002 val: i32:1450704896 / 56780000
+liftoff func: 1:0x15e load from 0000004294970006 val: i8:52 / 34
+liftoff func: 2:0x16b load from 0000004294970002 val: f32:68169720922112.000000 / 56780000
+liftoff func: 4:0x188 store to 0000004294970004 val: i8:171 / ab
+liftoff func: 0:0x151 load from 0000004294970002 val: i32:1454047232 / 56ab0000
+liftoff func: 2:0x16b load from 0000004294970002 val: f32:94008244174848.000000 / 56ab0000
+liftoff func: 6:0x1a6 store to 0000004294970004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+liftoff func: 5:0x194 load from 0000004294970002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+liftoff func: 7:0x1b3 load from 0000004294970004 val: i16:48879 / beef
+liftoff func: 8:0x1c0 load from 0000004294970002 val: i64:-4688528683866062848 / beef0000beef0000
+liftoff func: 9:0x1cd load from 0000004294970002 val: f64:-0.000015 / beef0000beef0000
+liftoff func: 10:0x1da load from 0000004294970004 val: i32:48879 / 0000beef
+liftoff func: 11:0x1e3 load from 0000004294970001 val: i8:0 / 00
+liftoff func: 13:0x1f7 store to 0000004294970004 val: i32:305419896 / 12345678
+liftoff func: 10:0x1da load from 0000004294970002 val: i32:1450704896 / 56780000
+liftoff func: 11:0x1e3 load from 0000004294970006 val: i8:52 / 34
+liftoff func: 12:0x1ec load from 0000004294970002 val: f32:68169720922112.000000 / 56780000
+liftoff func: 14:0x201 store to 0000004294970004 val: i8:171 / ab
+liftoff func: 10:0x1da load from 0000004294970002 val: i32:1454047232 / 56ab0000
+liftoff func: 12:0x1ec load from 0000004294970002 val: f32:94008244174848.000000 / 56ab0000
+liftoff func: 16:0x217 store to 0000004294970004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+liftoff func: 15:0x209 load from 0000004294970002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+liftoff func: 17:0x220 load from 0000004294970004 val: i16:48879 / beef
+liftoff func: 18:0x229 load from 0000004294970002 val: i64:-4688528683866062848 / beef0000beef0000
+liftoff func: 19:0x232 load from 0000004294970002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/wasm-trace-memory64.js b/deps/v8/test/message/wasm-trace-memory64.js
new file mode 100644
index 0000000000..339ce112db
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory64.js
@@ -0,0 +1,116 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --trace-wasm-memory --no-liftoff --experimental-wasm-memory64
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+const GB = 1024 * 1024 * 1024;
+const BIG_OFFSET = 4294970000n; // 0x100000a90n
+const BIG_OFFSET_LEB = [0x90, 0x95, 0x80, 0x80, 0x10];
+
+var builder = new WasmModuleBuilder();
+builder.addMemory64(5 * GB / kPageSize);
+
+// Functions for testing big offsets.
+builder.addFunction('load', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+builder.addFunction('load8', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem8U, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+builder.addFunction('loadf', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprF32LoadMem, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+builder.addFunction('store', kSig_v_li)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, ...BIG_OFFSET_LEB])
+ .exportFunc();
+builder.addFunction('store8', kSig_v_li)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem8, 0, ...BIG_OFFSET_LEB])
+ .exportFunc();
+builder.addFunction('load128', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kSimdPrefix, kExprS128LoadMem, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+// SIMD is not exposed to JS, so use splat to construct a s128 value.
+builder.addFunction('store128', kSig_v_li)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kSimdPrefix, kExprI32x4Splat, kSimdPrefix, kExprS128StoreMem, 0, ...BIG_OFFSET_LEB])
+ .exportFunc();
+// We add functions after, rather than sorting in some order, so as to keep
+// the .out changes small (due to function index).
+builder.addFunction('load16', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem16U, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+builder.addFunction('load64', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI64LoadMem, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+builder.addFunction('loadf64', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprF64LoadMem, 0, ...BIG_OFFSET_LEB, kExprDrop])
+ .exportFunc();
+
+// Functions for testing big indexes.
+builder.addFunction('load_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('load8_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem8U, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('loadf_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprF32LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('store_L', kSig_v_li)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0])
+ .exportFunc();
+builder.addFunction('store8_L', kSig_v_li)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem8, 0, 0])
+ .exportFunc();
+builder.addFunction('load128_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kSimdPrefix, kExprS128LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+// SIMD is not exposed to JS, so use splat to construct a s128 value.
+builder.addFunction('store128_L', kSig_v_li)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kSimdPrefix, kExprI32x4Splat, kSimdPrefix, kExprS128StoreMem, 0, 0])
+ .exportFunc();
+// We add functions after, rather than sorting in some order, so as to keep
+// the .out changes small (due to function index).
+builder.addFunction('load16_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem16U, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('load64_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprI64LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('loadf64_L', kSig_v_l)
+ .addBody([kExprLocalGet, 0, kExprF64LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+
+var module = builder.instantiate();
+
+module.exports.load(4n)
+module.exports.load8(1n);
+module.exports.store(4n, 0x12345678);
+module.exports.load(2n);
+module.exports.load8(6n);
+module.exports.loadf(2n);
+module.exports.store8(4n, 0xab);
+module.exports.load(2n);
+module.exports.loadf(2n);
+module.exports.store128(4n, 0xbeef);
+module.exports.load128(2n);
+module.exports.load16(4n);
+module.exports.load64(2n);
+module.exports.loadf64(2n);
+
+module.exports.load_L(BIG_OFFSET + 4n)
+module.exports.load8_L(BIG_OFFSET + 1n);
+module.exports.store_L(BIG_OFFSET + 4n, 0x12345678);
+module.exports.load_L(BIG_OFFSET + 2n);
+module.exports.load8_L(BIG_OFFSET + 6n);
+module.exports.loadf_L(BIG_OFFSET + 2n);
+module.exports.store8_L(BIG_OFFSET + 4n, 0xab);
+module.exports.load_L(BIG_OFFSET + 2n);
+module.exports.loadf_L(BIG_OFFSET + 2n);
+module.exports.store128_L(BIG_OFFSET + 4n, 0xbeef);
+module.exports.load128_L(BIG_OFFSET + 2n);
+module.exports.load16_L(BIG_OFFSET + 4n);
+module.exports.load64_L(BIG_OFFSET + 2n);
+module.exports.loadf64_L(BIG_OFFSET + 2n);
diff --git a/deps/v8/test/message/wasm-trace-memory64.out b/deps/v8/test/message/wasm-trace-memory64.out
new file mode 100644
index 0000000000..8f6cf28223
--- /dev/null
+++ b/deps/v8/test/message/wasm-trace-memory64.out
@@ -0,0 +1,28 @@
+turbofan func: 0:0x151 load from 0000004294970004 val: i32:0 / 00000000
+turbofan func: 1:0x15e load from 0000004294970001 val: i8:0 / 00
+turbofan func: 3:0x17a store to 0000004294970004 val: i32:305419896 / 12345678
+turbofan func: 0:0x151 load from 0000004294970002 val: i32:1450704896 / 56780000
+turbofan func: 1:0x15e load from 0000004294970006 val: i8:52 / 34
+turbofan func: 2:0x16b load from 0000004294970002 val: f32:68169720922112.000000 / 56780000
+turbofan func: 4:0x188 store to 0000004294970004 val: i8:171 / ab
+turbofan func: 0:0x151 load from 0000004294970002 val: i32:1454047232 / 56ab0000
+turbofan func: 2:0x16b load from 0000004294970002 val: f32:94008244174848.000000 / 56ab0000
+turbofan func: 6:0x1a6 store to 0000004294970004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+turbofan func: 5:0x194 load from 0000004294970002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+turbofan func: 7:0x1b3 load from 0000004294970004 val: i16:48879 / beef
+turbofan func: 8:0x1c0 load from 0000004294970002 val: i64:-4688528683866062848 / beef0000beef0000
+turbofan func: 9:0x1cd load from 0000004294970002 val: f64:-0.000015 / beef0000beef0000
+turbofan func: 10:0x1da load from 0000004294970004 val: i32:48879 / 0000beef
+turbofan func: 11:0x1e3 load from 0000004294970001 val: i8:0 / 00
+turbofan func: 13:0x1f7 store to 0000004294970004 val: i32:305419896 / 12345678
+turbofan func: 10:0x1da load from 0000004294970002 val: i32:1450704896 / 56780000
+turbofan func: 11:0x1e3 load from 0000004294970006 val: i8:52 / 34
+turbofan func: 12:0x1ec load from 0000004294970002 val: f32:68169720922112.000000 / 56780000
+turbofan func: 14:0x201 store to 0000004294970004 val: i8:171 / ab
+turbofan func: 10:0x1da load from 0000004294970002 val: i32:1454047232 / 56ab0000
+turbofan func: 12:0x1ec load from 0000004294970002 val: f32:94008244174848.000000 / 56ab0000
+turbofan func: 16:0x217 store to 0000004294970004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+turbofan func: 15:0x209 load from 0000004294970002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+turbofan func: 17:0x220 load from 0000004294970004 val: i16:48879 / beef
+turbofan func: 18:0x229 load from 0000004294970002 val: i64:-4688528683866062848 / beef0000beef0000
+turbofan func: 19:0x232 load from 0000004294970002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/wasm-trace-turbofan.out b/deps/v8/test/message/wasm-trace-turbofan.out
index 40a6a1cdf4..efb762b79b 100644
--- a/deps/v8/test/message/wasm-trace-turbofan.out
+++ b/deps/v8/test/message/wasm-trace-turbofan.out
@@ -1,4 +1,6 @@
- 1: *wasm-function[8] "main" {
+ 1: *wasm-function[9] "main" {
+ 2: *wasm-function[8] "void" {
+ 2: }
2: *wasm-function[1] "call_23" {
3: *wasm-function[0] "ret_23" {
3: } -> 23
diff --git a/deps/v8/test/mjsunit/array-reverse.js b/deps/v8/test/mjsunit/array-reverse.js
index 11aeb60cac..a2918a9bd6 100644
--- a/deps/v8/test/mjsunit/array-reverse.js
+++ b/deps/v8/test/mjsunit/array-reverse.js
@@ -10,6 +10,8 @@ assertArrayEquals(["str4", "str3", "str2"], ["str2", "str3", "str4"].reverse());
assertArrayEquals([4,3,,1], [1,,3,4].reverse());
assertArrayEquals([4,,2,1], [1,2,,4].reverse());
assertArrayEquals([5,,3,,1], [1,,3,,5].reverse());
+assertArrayEquals([0.5,,0.3,,0.1], [0.1,,0.3,,0.5].reverse());
+assertArrayEquals(["5",,"3",,"1"], ["1",,"3",,"5"].reverse());
function TestReverseWithObject() {
let obj = { length: 5 };
diff --git a/deps/v8/test/mjsunit/array-tostring.js b/deps/v8/test/mjsunit/array-tostring.js
index 382bf8d7a0..d4d50127a1 100644
--- a/deps/v8/test/mjsunit/array-tostring.js
+++ b/deps/v8/test/mjsunit/array-tostring.js
@@ -172,8 +172,13 @@ assertEquals("42,42,42", (42).arrayToLocaleString());
String.prototype.toLocaleString = pushArgs("String");
Object.prototype.toLocaleString = pushArgs("Object");
- [42, "foo", {}].toLocaleString();
- assertEquals(["Number", [], "String", [], "Object", []], log);
+ // According to the ECMA-402 specification, the optional arguments locales
+ // and options must be passed. Without the ECMA-402 internationalization
+ // API, the optional arguments must not be passed.
+ const noArgs = (typeof Intl !== "object") ? [] : [undefined, undefined];
+ const result = [42, null, "foo", {}, undefined].toLocaleString();
+ assertEquals("2,,4,6,", result);
+ assertEquals(["Number", noArgs, "String", noArgs, "Object", noArgs], log);
Number.prototype.toLocaleString = NumberToLocaleString;
String.prototype.toLocaleString = StringToLocaleString;
diff --git a/deps/v8/test/mjsunit/asm/regress-1395401.js b/deps/v8/test/mjsunit/asm/regress-1395401.js
new file mode 100644
index 0000000000..5898061ff4
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-1395401.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+eval(`
+ function asmModule() {
+ "use asm";
+ function x(v) {
+ v = v | 0;
+ }
+ return x;
+ }
+ asmModule();
+ `);
+
+console.profile();
diff --git a/deps/v8/test/mjsunit/code-stats-flag.js b/deps/v8/test/mjsunit/code-stats-flag.js
new file mode 100644
index 0000000000..fddfefb333
--- /dev/null
+++ b/deps/v8/test/mjsunit/code-stats-flag.js
@@ -0,0 +1,7 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-gc --code-stats
+
+gc();
diff --git a/deps/v8/test/mjsunit/compiler/bigint-bitwise-and.js b/deps/v8/test/mjsunit/compiler/bigint-bitwise-and.js
index 7884bba0c8..db99276624 100644
--- a/deps/v8/test/mjsunit/compiler/bigint-bitwise-and.js
+++ b/deps/v8/test/mjsunit/compiler/bigint-bitwise-and.js
@@ -27,6 +27,7 @@ function OptimizeAndTest(fn) {
assertEquals(0b1000n, fn(0b1100n, -0b110n));
// The result grows out of one digit
assertEquals(-(2n ** 64n), fn(-(2n ** 63n + 1n), -(2n ** 63n)));
+ assertOptimized(fn);
assertEquals(0b1000, fn(0b1100, 0b1010));
assertUnoptimized(fn);
diff --git a/deps/v8/test/mjsunit/compiler/bigint-bitwise-or.js b/deps/v8/test/mjsunit/compiler/bigint-bitwise-or.js
new file mode 100644
index 0000000000..3cd526e9b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-bitwise-or.js
@@ -0,0 +1,31 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+(function OptimizeAndTest() {
+ function fn(a, b) {
+ return a | b;
+ }
+ %PrepareFunctionForOptimization(fn);
+ assertEquals(0b1110n, fn(0b1100n, 0b1010n));
+ assertEquals(-0b0010n, fn(-0b100n, -0b110n));
+ assertEquals(-0b0010n, fn(-0b100n, 0b1010n));
+ assertEquals(-0b0010n, fn(0b1100n, -0b110n));
+ assertEquals(-(2n ** 64n) + 1n, fn(-(2n ** 64n) + 1n, -(2n ** 64n)));
+
+ %OptimizeFunctionOnNextCall(fn);
+ fn(0b1100n, 0b1010n);
+ assertOptimized(fn);
+
+ assertEquals(0b1110n, fn(0b1100n, 0b1010n));
+ assertEquals(-0b0010n, fn(-0b100n, -0b110n));
+ assertEquals(-0b0010n, fn(-0b100n, 0b1010n));
+ assertEquals(-0b0010n, fn(0b1100n, -0b110n));
+ assertEquals(-(2n ** 64n) + 1n, fn(-(2n ** 64n) + 1n, -(2n ** 64n)));
+ assertOptimized(fn);
+
+ assertEquals(0b1110, fn(0b1100, 0b1010));
+ assertUnoptimized(fn);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/bigint-bitwise-xor.js b/deps/v8/test/mjsunit/compiler/bigint-bitwise-xor.js
new file mode 100644
index 0000000000..13fe0492ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-bitwise-xor.js
@@ -0,0 +1,31 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+(function OptimizeAndTest() {
+ function fn(a, b) {
+ return a ^ b;
+ }
+ %PrepareFunctionForOptimization(fn);
+ assertEquals(0b0110n, fn(0b1100n, 0b1010n));
+ assertEquals(0b0110n, fn(-0b100n, -0b110n));
+ assertEquals(-0b1010n, fn(-0b100n, 0b1010n));
+ assertEquals(-0b1010n, fn(0b1100n, -0b110n));
+ assertEquals(1n, fn(-(2n ** 64n) + 1n, -(2n ** 64n)));
+
+ %OptimizeFunctionOnNextCall(fn);
+ fn(0b1100n, 0b1010n);
+ assertOptimized(fn);
+
+ assertEquals(0b0110n, fn(0b1100n, 0b1010n));
+ assertEquals(0b0110n, fn(-0b100n, -0b110n));
+ assertEquals(-0b1010n, fn(-0b100n, 0b1010n));
+ assertEquals(-0b1010n, fn(0b1100n, -0b110n));
+ assertEquals(1n, fn(-(2n ** 64n) + 1n, -(2n ** 64n)));
+ assertOptimized(fn);
+
+ assertEquals(0b0110, fn(0b1100, 0b1010));
+ assertUnoptimized(fn);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/bigint-compare.js b/deps/v8/test/mjsunit/compiler/bigint-compare.js
new file mode 100644
index 0000000000..65fac50428
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-compare.js
@@ -0,0 +1,61 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+function LessThan(x, y) {
+ return x < y;
+}
+
+function LessThanOrEqual(x, y) {
+ return x <= y;
+}
+
+function GreaterThan(x, y) {
+ return x > y;
+}
+
+function GreaterThanOrEqual(x, y) {
+ return x >= y;
+}
+
+function Test(f, large, lt, eq) {
+ assertEquals(lt, f(1n, 2n));
+ assertEquals(!lt, f(0n, -1n));
+ assertEquals(eq, f(-42n, -42n));
+ assertEquals(!lt, f(-(2n ** 62n), -(2n ** 63n) + 1n));
+ assertEquals(lt, f(-(2n ** 63n) + 1n, (2n ** 63n) - 1n));
+ if (large) {
+ assertEquals(lt, f(2n ** 63n - 1n, 2n ** 63n));
+ assertEquals(!lt, f(-(2n ** 63n) + 1n, -(2n ** 63n)));
+ assertEquals(lt, f(-(13n ** 70n), 13n ** 70n)); // Different signs
+ assertEquals(!lt, f(13n ** 70n, -(13n ** 70n)));
+ assertEquals(lt, f(13n ** 80n, 13n ** 90n)); // Different lengths
+ assertEquals(!lt, f(-(13n ** 70n), -(13n ** 80n))); // Same length
+ assertEquals(eq, f(13n ** 70n, 13n ** 70n));
+ }
+}
+
+function OptAndTest(f, large) {
+ const lt = f === LessThan || f === LessThanOrEqual;
+ const eq = f === LessThanOrEqual || f === GreaterThanOrEqual;
+ %PrepareFunctionForOptimization(f);
+ Test(f, large, lt, eq);
+ assertUnoptimized(f);
+ %OptimizeFunctionOnNextCall(f);
+ Test(f, large, lt, eq);
+ assertOptimized(f);
+}
+
+OptAndTest(LessThan, false);
+OptAndTest(LessThanOrEqual, false);
+OptAndTest(GreaterThan, false);
+OptAndTest(GreaterThanOrEqual, false);
+if (%Is64Bit()) {
+ // Should deopt on large bigints and there should not be deopt loops.
+ OptAndTest(LessThan, true);
+ OptAndTest(LessThanOrEqual, true);
+ OptAndTest(GreaterThan, true);
+ OptAndTest(GreaterThanOrEqual, true);
+}
diff --git a/deps/v8/test/mjsunit/compiler/bigint-constructor.js b/deps/v8/test/mjsunit/compiler/bigint-constructor.js
new file mode 100644
index 0000000000..7a6438c6fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-constructor.js
@@ -0,0 +1,107 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+(function () {
+ function ToBigInt(x) {
+ return BigInt(x);
+ }
+
+ %PrepareFunctionForOptimization(ToBigInt);
+ assertEquals(0n, ToBigInt(0));
+ %OptimizeFunctionOnNextCall(ToBigInt);
+
+ // Test the builtin ToBigIntConvertNumber.
+ assertThrows(() => ToBigInt(undefined), TypeError);
+
+ assertEquals(0n, ToBigInt(false));
+ assertEquals(1n, ToBigInt(true));
+
+ assertEquals(42n, ToBigInt(42n));
+
+ assertEquals(3n, ToBigInt(3));
+ assertEquals(0xdeadbeefn, ToBigInt(0xdeadbeef));
+ assertEquals(-0xdeadbeefn, ToBigInt(-0xdeadbeef));
+
+ assertEquals(2n, ToBigInt("2"));
+ assertEquals(0xdeadbeefdeadbeefdn, ToBigInt("0xdeadbeefdeadbeefd"));
+ assertThrows(() => ToBigInt("-0x10"), SyntaxError);
+
+ assertThrows(() => ToBigInt(Symbol("foo")), TypeError);
+ assertOptimized(ToBigInt);
+})();
+
+{
+ // Test constants to BigInts.
+ function OptimizeAndTest(expected, fun) {
+ %PrepareFunctionForOptimization(fun);
+ assertEquals(expected, fun());
+ %OptimizeFunctionOnNextCall(fun);
+ assertEquals(expected, fun());
+ assertOptimized(fun);
+ }
+
+ OptimizeAndTest(42n, () => BigInt(42n));
+
+ // MinusZero
+ OptimizeAndTest(0n, () => BigInt(-0));
+ OptimizeAndTest(0n, () => BigInt.asIntN(32, BigInt(-0)));
+ OptimizeAndTest(0n, () => BigInt.asUintN(32, BigInt(-0)));
+ OptimizeAndTest(0n, () => 0n + BigInt(-0));
+
+ // Smi
+ OptimizeAndTest(42n, () => BigInt(42));
+ OptimizeAndTest(42n, () => BigInt.asIntN(32, BigInt(42)));
+ OptimizeAndTest(42n, () => BigInt.asUintN(32, BigInt(42)));
+ OptimizeAndTest(42n, () => 0n + BigInt(42));
+
+ // Signed32
+ OptimizeAndTest(-0x80000000n, () => BigInt(-0x80000000));
+ OptimizeAndTest(-0x80000000n, () => BigInt.asIntN(32, BigInt(-0x80000000)));
+ OptimizeAndTest(0x80000000n, () => BigInt.asUintN(32, BigInt(-0x80000000)));
+ OptimizeAndTest(-0x80000000n, () => 0n + BigInt(-0x80000000));
+
+ // Unsigned32
+ OptimizeAndTest(0x80000000n, () => BigInt(0x80000000));
+ OptimizeAndTest(-0x80000000n, () => BigInt.asIntN(32, BigInt(0x80000000)));
+ OptimizeAndTest(0x80000000n, () => BigInt.asUintN(32, BigInt(0x80000000)));
+ OptimizeAndTest(0x80000000n, () => 0n + BigInt(0x80000000));
+}
+
+(function () {
+ function SmiToBigInt(arr) {
+ return BigInt(arr[0]);
+ }
+
+ // Element kind: PACKED_SMI_ELEMENTS
+ const numbers = [0x3fffffff, 0, -0x40000000];
+ %PrepareFunctionForOptimization(SmiToBigInt);
+ assertEquals(0x3fffffffn, SmiToBigInt(numbers));
+ %OptimizeFunctionOnNextCall(SmiToBigInt);
+ assertEquals(0x3fffffffn, SmiToBigInt(numbers));
+ assertOptimized(SmiToBigInt);
+
+ // Change the map of {numbers}.
+ numbers[1] = 0x80000000;
+ assertEquals(0x3fffffffn, SmiToBigInt(numbers));
+ assertUnoptimized(SmiToBigInt);
+})();
+
+(function () {
+ function ToBigInt() {
+ return BigInt(123);
+ }
+
+ %PrepareFunctionForOptimization(ToBigInt);
+ assertEquals(123n, ToBigInt());
+ %OptimizeFunctionOnNextCall(ToBigInt);
+ assertEquals(123n, ToBigInt());
+ assertOptimized(ToBigInt);
+
+ // Replace the global BigInt object.
+ BigInt = () => 42;
+ assertUnoptimized(ToBigInt);
+ assertEquals(42, ToBigInt());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/bigint-equal.js b/deps/v8/test/mjsunit/compiler/bigint-equal.js
new file mode 100644
index 0000000000..dbbd745afa
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-equal.js
@@ -0,0 +1,44 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+const bi = 42n;
+
+function Equal(x, y) {
+ return x == y;
+}
+
+function StrictEqual(x, y) {
+ return x === y;
+}
+
+function Test(f, large) {
+ assertEquals(false, f(1n, 2n));
+ assertEquals(false, f(1n, -1n));
+ assertEquals(true, f(-1n, -1n));
+ assertEquals(true, f(bi, bi));
+ assertEquals(false, f(2n ** 63n - 1n, -(2n ** 63n) + 1n));
+ if (large) {
+ assertEquals(false, f(2n ** 63n, -(2n ** 63n)));
+ assertEquals(true, f(13n ** 70n, 13n ** 70n));
+ }
+}
+
+function OptAndTest(f, large) {
+ %PrepareFunctionForOptimization(f);
+ Test(f, large);
+ assertUnoptimized(f);
+ %OptimizeFunctionOnNextCall(f);
+ Test(f, large);
+ assertOptimized(f);
+}
+
+OptAndTest(Equal, false);
+OptAndTest(StrictEqual, false);
+if (%Is64Bit()) {
+ // Should deopt on large bigints and there should not be deopt loops.
+ OptAndTest(Equal, true);
+ OptAndTest(StrictEqual, true);
+}
diff --git a/deps/v8/test/mjsunit/compiler/bigint-shift-left.js b/deps/v8/test/mjsunit/compiler/bigint-shift-left.js
new file mode 100644
index 0000000000..1f17eeba6d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-shift-left.js
@@ -0,0 +1,110 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+(function OptimizeAndTest() {
+ function ShiftLeft(a, b) {
+ return a << b;
+ }
+ %PrepareFunctionForOptimization(ShiftLeft);
+ assertEquals(0n, ShiftLeft(0n, 42n));
+ assertEquals(-42n, ShiftLeft(-42n, 0n));
+ assertEquals(2n ** 42n, ShiftLeft(1n, 42n));
+ assertEquals(-2n, ShiftLeft(-(2n ** 512n), -511n));
+ assertEquals(-1n, ShiftLeft(-(2n ** 512n), -513n));
+
+ %OptimizeFunctionOnNextCall(ShiftLeft);
+ assertEquals(0n, ShiftLeft(0n, 42n));
+ assertEquals(-42n, ShiftLeft(-42n, 0n));
+ assertEquals(2n ** 42n, ShiftLeft(1n, 42n));
+ assertEquals(-2n, ShiftLeft(-(2n ** 512n), -511n));
+ assertEquals(-1n, ShiftLeft(-(2n ** 512n), -513n));
+ assertOptimized(ShiftLeft);
+
+ assertThrows(() => ShiftLeft(1n, 2n ** 30n), RangeError);
+ assertUnoptimized(ShiftLeft);
+})();
+
+(function OptimizeAndTest() {
+ function ShiftLeftByPositive(a) {
+ return BigInt.asIntN(62, a << 42n);
+ }
+ %PrepareFunctionForOptimization(ShiftLeftByPositive);
+ assertEquals(0n, ShiftLeftByPositive(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftLeftByPositive);
+ assertEquals(0n, ShiftLeftByPositive(0n));
+ assertEquals(2n ** 42n, ShiftLeftByPositive(1n));
+ assertEquals(2n ** 42n, ShiftLeftByPositive(1n + 2n ** 62n));
+ assertEquals(-(2n ** 42n), ShiftLeftByPositive(-1n - 2n ** 64n));
+ assertOptimized(ShiftLeftByPositive);
+
+ assertThrows(() => ShiftLeftByPositive(0), TypeError);
+ assertUnoptimized(ShiftLeftByPositive);
+})();
+
+(function OptimizeAndTest() {
+ const minus42 = -42n;
+ function ShiftLeftByNegative(a) {
+ return BigInt.asIntN(62, BigInt.asUintN(64, a) << minus42);
+ }
+ %PrepareFunctionForOptimization(ShiftLeftByNegative);
+ assertEquals(0n, ShiftLeftByNegative(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftLeftByNegative);
+ assertEquals(0n, ShiftLeftByNegative(42n));
+ assertEquals(4194303n, ShiftLeftByNegative(-42n));
+ assertEquals(2n ** 20n, ShiftLeftByNegative(1n + 2n ** 62n));
+ assertEquals(3145727n, ShiftLeftByNegative(-1n - 2n ** 62n - 2n ** 64n));
+ assertOptimized(ShiftLeftByNegative);
+
+ assertThrows(() => ShiftLeftByNegative(0), TypeError);
+ if (%Is64Bit()) {
+ // BigInt truncation is not inlined on 32-bit platforms so there is no
+ // checks for BigInt, thus deopt will not be triggered.
+ assertUnoptimized(ShiftLeftByNegative);
+ }
+})();
+
+(function OptimizeAndTest() {
+ function ShiftLeftBy64(a) {
+ return BigInt.asIntN(62, a << 64n);
+ }
+ %PrepareFunctionForOptimization(ShiftLeftBy64);
+ assertEquals(0n, ShiftLeftBy64(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftLeftBy64);
+ assertEquals(0n, ShiftLeftBy64(0n));
+ assertEquals(0n, ShiftLeftBy64(1n));
+ assertEquals(0n, ShiftLeftBy64(1n + 2n ** 62n));
+ assertEquals(0n, ShiftLeftBy64(-1n - 2n ** 64n));
+ assertOptimized(ShiftLeftBy64);
+
+ assertThrows(() => ShiftLeftBy64(0), TypeError);
+ assertUnoptimized(ShiftLeftBy64);
+})();
+
+(function OptimizeAndTest() {
+ const bi = 2n ** 64n;
+ function ShiftLeftByLarge(a) {
+ return BigInt.asIntN(62, a << bi);
+ }
+ %PrepareFunctionForOptimization(ShiftLeftByLarge);
+ assertEquals(0n, ShiftLeftByLarge(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftLeftByLarge);
+ assertEquals(0n, ShiftLeftByLarge(0n));
+ if (%Is64Bit()) {
+ // After optimization, a truncated left shift will not throw a
+ // BigIntTooBig exception just as truncated addition.
+ assertEquals(0n, ShiftLeftByLarge(1n));
+ assertEquals(0n, ShiftLeftByLarge(1n + 2n ** 62n));
+ assertEquals(0n, ShiftLeftByLarge(-1n - 2n ** 64n));
+ }
+ assertOptimized(ShiftLeftByLarge);
+
+ assertThrows(() => ShiftLeftByLarge(0), TypeError);
+ assertUnoptimized(ShiftLeftByLarge);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/bigint-shift-right.js b/deps/v8/test/mjsunit/compiler/bigint-shift-right.js
new file mode 100644
index 0000000000..badfcdd7d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-shift-right.js
@@ -0,0 +1,140 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+(function OptimizeAndTest() {
+ function ShiftRight(a, b) {
+ return a >> b;
+ }
+ %PrepareFunctionForOptimization(ShiftRight);
+ assertEquals(0n, ShiftRight(0n, 42n));
+ assertEquals(-42n, ShiftRight(-42n, 0n));
+ assertEquals(-3n, ShiftRight(-5n, 1n));
+ assertEquals(0n, ShiftRight(42n, 2n ** 64n));
+ assertEquals(-1n, ShiftRight(-42n, 64n));
+ assertEquals(-1n, ShiftRight(-42n, 2n ** 64n));
+
+ %OptimizeFunctionOnNextCall(ShiftRight);
+ assertEquals(0n, ShiftRight(0n, 42n));
+ assertEquals(-42n, ShiftRight(-42n, 0n));
+ assertEquals(-3n, ShiftRight(-5n, 1n));
+ assertEquals(0n, ShiftRight(42n, 2n ** 64n));
+ assertEquals(-1n, ShiftRight(-42n, 64n));
+ assertEquals(-1n, ShiftRight(-42n, 2n ** 64n));
+ assertOptimized(ShiftRight);
+
+ assertThrows(() => ShiftRight(1n, -(2n ** 30n)), RangeError);
+ assertUnoptimized(ShiftRight);
+})();
+
+(function OptimizeAndTest() {
+ function ShiftRightUnsignedByPositive(a) {
+ return BigInt.asIntN(62, BigInt.asUintN(64, a) >> 42n);
+ }
+ %PrepareFunctionForOptimization(ShiftRightUnsignedByPositive);
+ assertEquals(0n, ShiftRightUnsignedByPositive(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftRightUnsignedByPositive);
+ assertEquals(0n, ShiftRightUnsignedByPositive(42n));
+ assertEquals(4194303n, ShiftRightUnsignedByPositive(-42n));
+ assertEquals(2n ** 20n, ShiftRightUnsignedByPositive(1n + 2n ** 62n));
+ assertEquals(3145727n,
+ ShiftRightUnsignedByPositive(-1n - 2n ** 62n - 2n ** 64n));
+ assertOptimized(ShiftRightUnsignedByPositive);
+
+ assertThrows(() => ShiftRightUnsignedByPositive(0), TypeError);
+ if (%Is64Bit()) {
+ // BigInt truncation is not inlined on 32-bit platforms so there is no
+ // checks for BigInt, thus deopt will not be triggered.
+ assertUnoptimized(ShiftRightUnsignedByPositive);
+ }
+})();
+
+(function OptimizeAndTest() {
+ function ShiftRightSignedByPositive(a) {
+ return BigInt.asIntN(62, BigInt.asIntN(64, a) >> 42n);
+ }
+ %PrepareFunctionForOptimization(ShiftRightSignedByPositive);
+ assertEquals(0n, ShiftRightSignedByPositive(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftRightSignedByPositive);
+ assertEquals(0n, ShiftRightSignedByPositive(42n));
+ assertEquals(-1n, ShiftRightSignedByPositive(-42n));
+ assertEquals(2n ** 20n, ShiftRightSignedByPositive(1n + 2n ** 62n));
+ assertEquals(-(2n ** 20n),
+ ShiftRightSignedByPositive(-(2n ** 62n) - 2n ** 64n));
+ assertOptimized(ShiftRightSignedByPositive);
+
+ assertThrows(() => ShiftRightSignedByPositive(0), TypeError);
+ if (%Is64Bit()) {
+ // BigInt truncation is not inlined on 32-bit platforms so there is no
+ // checks for BigInt, thus deopt will not be triggered.
+ assertUnoptimized(ShiftRightSignedByPositive);
+ }
+})();
+
+(function OptimizeAndTest() {
+ const minus42 = -42n;
+ function ShiftRightByNegative(a) {
+ return BigInt.asIntN(62, a >> minus42);
+ }
+ %PrepareFunctionForOptimization(ShiftRightByNegative);
+ assertEquals(0n, ShiftRightByNegative(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftRightByNegative);
+ assertEquals(0n, ShiftRightByNegative(0n));
+ assertEquals(2n ** 42n, ShiftRightByNegative(1n));
+ assertEquals(2n ** 42n, ShiftRightByNegative(1n + 2n ** 62n));
+ assertEquals(-(2n ** 42n), ShiftRightByNegative(-1n - 2n ** 64n));
+ assertOptimized(ShiftRightByNegative);
+
+ assertThrows(() => ShiftRightByNegative(0), TypeError);
+ assertUnoptimized(ShiftRightByNegative);
+})();
+
+(function OptimizeAndTest() {
+ function ShiftRightBy64(a) {
+ return BigInt.asIntN(62, BigInt.asUintN(64, a) >> 64n);
+ }
+ %PrepareFunctionForOptimization(ShiftRightBy64);
+ assertEquals(0n, ShiftRightBy64(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftRightBy64);
+ assertEquals(0n, ShiftRightBy64(0n));
+ assertEquals(0n, ShiftRightBy64(1n));
+ assertEquals(0n, ShiftRightBy64(1n + 2n ** 62n));
+ assertEquals(0n, ShiftRightBy64(-1n - 2n ** 64n));
+ assertOptimized(ShiftRightBy64);
+
+ assertThrows(() => ShiftRightBy64(0), TypeError);
+ if (%Is64Bit()) {
+ // BigInt truncation is not inlined on 32-bit platforms so there is no
+ // checks for BigInt, thus deopt will not be triggered.
+ assertUnoptimized(ShiftRightBy64);
+ }
+})();
+
+(function OptimizeAndTest() {
+ const bi = 2n ** 64n;
+ function ShiftRightByLarge(a) {
+ return BigInt.asIntN(62, BigInt.asIntN(64, a) >> bi);
+ }
+ %PrepareFunctionForOptimization(ShiftRightByLarge);
+ assertEquals(0n, ShiftRightByLarge(0n));
+
+ %OptimizeFunctionOnNextCall(ShiftRightByLarge);
+ assertEquals(0n, ShiftRightByLarge(0n));
+ assertEquals(-1n, ShiftRightByLarge(-1n));
+ assertEquals(0n, ShiftRightByLarge(1n + 2n ** 62n));
+ assertEquals(-1n, ShiftRightByLarge(-1n - 2n ** 64n));
+ assertOptimized(ShiftRightByLarge);
+
+ assertThrows(() => ShiftRightByLarge(0), TypeError);
+ if (%Is64Bit()) {
+ // BigInt truncation is not inlined on 32-bit platforms so there is no
+ // checks for BigInt, thus deopt will not be triggered.
+ assertUnoptimized(ShiftRightByLarge);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/compiler/bigint64-array.js b/deps/v8/test/mjsunit/compiler/bigint64-array.js
new file mode 100644
index 0000000000..2e4fc40850
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint64-array.js
@@ -0,0 +1,71 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+const bi = 18446744073709551615n; // 2n ** 64n - 1n
+
+function storeAndLoad(x) {
+ let buffer = new ArrayBuffer(16);
+ let biArray = new BigInt64Array(buffer);
+ biArray[0] = bi;
+ biArray[1] = x;
+ return biArray[0] + biArray[1];
+}
+
+%PrepareFunctionForOptimization(storeAndLoad);
+assertEquals(-1n, storeAndLoad(0n));
+assertEquals(41n, storeAndLoad(2n ** 64n + 42n));
+assertEquals(0n, storeAndLoad(-bi));
+assertEquals(-2n, storeAndLoad(bi));
+%OptimizeFunctionOnNextCall(storeAndLoad);
+assertEquals(-1n, storeAndLoad(0n));
+assertEquals(41n, storeAndLoad(2n ** 64n + 42n));
+assertEquals(0n, storeAndLoad(-bi));
+assertEquals(-2n, storeAndLoad(bi));
+assertOptimized(storeAndLoad);
+
+assertEquals(-1n, storeAndLoad(false));
+if (%Is64Bit()) {
+ assertUnoptimized(storeAndLoad);
+}
+
+%PrepareFunctionForOptimization(storeAndLoad);
+assertEquals(-1n, storeAndLoad(0n));
+%OptimizeFunctionOnNextCall(storeAndLoad);
+assertEquals(0n, storeAndLoad(true));
+// TODO(panq): Uncomment the assertion once the deopt loop is eliminated.
+// assertOptimized(storeAndLoad);
+
+function storeAndLoadUnsigned(x) {
+ let buffer = new ArrayBuffer(16);
+ let biArray = new BigUint64Array(buffer);
+ biArray[0] = bi;
+ biArray[1] = x;
+ return biArray[0] + biArray[1];
+}
+
+%PrepareFunctionForOptimization(storeAndLoadUnsigned);
+assertEquals(bi, storeAndLoadUnsigned(0n));
+assertEquals(bi + 42n, storeAndLoadUnsigned(2n ** 64n + 42n));
+assertEquals(bi + 1n, storeAndLoadUnsigned(-bi));
+assertEquals(bi * 2n, storeAndLoadUnsigned(bi));
+%OptimizeFunctionOnNextCall(storeAndLoadUnsigned);
+assertEquals(bi, storeAndLoadUnsigned(0n));
+assertEquals(bi + 42n, storeAndLoadUnsigned(2n ** 64n + 42n));
+assertEquals(bi + 1n, storeAndLoadUnsigned(-bi));
+assertEquals(bi * 2n, storeAndLoadUnsigned(bi));
+assertOptimized(storeAndLoadUnsigned);
+
+assertEquals(bi, storeAndLoadUnsigned(false));
+if (%Is64Bit()) {
+ assertUnoptimized(storeAndLoadUnsigned);
+}
+
+%PrepareFunctionForOptimization(storeAndLoadUnsigned);
+assertEquals(bi, storeAndLoadUnsigned(0n));
+%OptimizeFunctionOnNextCall(storeAndLoadUnsigned);
+assertEquals(bi + 1n, storeAndLoadUnsigned(true));
+// TODO(panq): Uncomment the assertion once the deopt loop is eliminated.
+// assertOptimized(storeAndLoadUnsigned);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-pretenure.js b/deps/v8/test/mjsunit/compiler/deopt-pretenure.js
index 3178c0c947..ed432f5671 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-pretenure.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-pretenure.js
@@ -9,9 +9,10 @@
function CheckOptimizationStatus(func, expectedOptimizationStatus) {
let opt_status = %GetOptimizationStatus(func);
- assertTrue ((opt_status & expectedOptimizationStatus) !== 0,
- "Expected flag " + expectedOptimizationStatus +
- " to be set in optimization status");
+ assertTrue(
+ (opt_status & expectedOptimizationStatus) !== 0,
+ 'Expected flag 0x' + expectedOptimizationStatus.toString(16) +
+ ' to be set in optimization status');
}
// Trigger pretenuring decision change at entry, deopting at bytecode offset -1.
@@ -30,14 +31,19 @@ DeoptEntry(V8OptimizationStatus.kTopmostFrameIsInterpreted
%OptimizeFunctionOnNextCall(DeoptEntry);
// Force the allocation site to be pretenured.
assertTrue(%PretenureAllocationSite(empty));
-// This call should deopt at entry because of the pretenuring decision change.
-DeoptEntry(V8OptimizationStatus.kTopmostFrameIsInterpreted
- | V8OptimizationStatus.kTopmostFrameIsBaseline);
+// This call should deopt Turbofan at entry because of the pretenuring decision
+// change. Maglev doesn't currently implement this optimization/deopt.
+DeoptEntry(
+ V8OptimizationStatus.kTopmostFrameIsInterpreted |
+ V8OptimizationStatus.kTopmostFrameIsBaseline |
+ V8OptimizationStatus.kTopmostFrameIsMaglev);
%PrepareFunctionForOptimization(DeoptEntry);
%OptimizeFunctionOnNextCall(DeoptEntry);
// Function should be compiled now.
-DeoptEntry(V8OptimizationStatus.kTopmostFrameIsTurboFanned);
+DeoptEntry(
+ V8OptimizationStatus.kTopmostFrameIsTurboFanned |
+ V8OptimizationStatus.kTopmostFrameIsMaglev);
// Trigger pretenuring decision change during OSR.
function createSource(name, fillCnt) {
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-calls-pointer.js b/deps/v8/test/mjsunit/compiler/fast-api-calls-pointer.js
new file mode 100644
index 0000000000..8a1da1d91b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/fast-api-calls-pointer.js
@@ -0,0 +1,140 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file excercises basic fast API calls and enables fuzzing of this
+// functionality.
+
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --turbofan
+// --always-turbofan is disabled because we rely on particular feedback for
+// optimizing to the fastest path.
+// Flags: --no-always-turbofan
+// The test relies on optimizing/deoptimizing at predictable moments, so
+// it's not suitable for deoptimization fuzzing.
+// Flags: --deopt-every-n-times=0
+
+assertThrows(() => d8.test.FastCAPI());
+const fast_c_api = new d8.test.FastCAPI();
+
+// ---------- Test external pointer passing -----------
+
+function reset_counts() {
+ return fast_c_api.reset_counts();
+}
+
+function fast_call_count() {
+ return fast_c_api.fast_call_count();
+}
+
+function slow_call_count() {
+ return fast_c_api.slow_call_count();
+}
+
+function assertIsExternal(pointer) {
+ return fast_c_api.assert_is_external(pointer);
+}
+
+function get_pointer_a() {
+ return fast_c_api.get_pointer();
+}
+
+function get_pointer_b() {
+ return fast_c_api.get_null_pointer();
+}
+
+function pass_pointer(pointer) {
+ return fast_c_api.pass_pointer(pointer);
+}
+
+function compare_pointers(pointer_a, pointer_b) {
+ return fast_c_api.compare_pointers(pointer_a, pointer_b);
+}
+
+%PrepareFunctionForOptimization(get_pointer_a);
+reset_counts();
+const external_a_slow = get_pointer_a();
+const external_a_slow_clone = get_pointer_a();
+assertEquals(slow_call_count(), 2);
+assertEquals(fast_call_count(), 0);
+assertIsExternal(external_a_slow);
+assertIsExternal(external_a_slow_clone);
+
+// Slow call that returns the same pointer from a new `External::New()`
+// will still create a new / different object.
+// Note that we cannot use `assertEquals(external_a_slow, external_a_slow_clone)`
+// as it's a deep equality comparison and will return true for all empty object comparsions.
+assertFalse(external_a_slow === external_a_slow_clone);
+
+%PrepareFunctionForOptimization(pass_pointer);
+reset_counts();
+const external_a_slow_passed = pass_pointer(external_a_slow);
+// If slow call returns the same External object, then object identity is
+// preserved.
+assertEquals(slow_call_count(), 1);
+assertEquals(fast_call_count(), 0);
+assertTrue(external_a_slow_passed === external_a_slow, true);
+%OptimizeFunctionOnNextCall(pass_pointer);
+const external_a_fast_passed = pass_pointer(external_a_slow);
+assertEquals(slow_call_count(), 1);
+assertEquals(fast_call_count(), 1);
+assertIsExternal(external_a_slow);
+assertIsExternal(external_a_fast_passed);
+// Fast call always creates a new External object, as they cannot
+// return the same External object given that they do not see it.
+assertFalse(external_a_fast_passed === external_a_slow);
+
+// An object that looks like an External is still not an External.
+const emptyObject = Object.create(null);
+// An object that internally carries a pointer is still not an External.
+const alsoInternallyPointer = new Uint8Array();
+assertThrows(() => pass_pointer(emptyObject));
+assertThrows(() => pass_pointer(alsoInternallyPointer));
+
+// Show off deep equality comparsions between various External objects and
+// the empty object to show that all Externals work properly as objects.
+assertEquals(external_a_slow, external_a_fast_passed);
+assertEquals(external_a_fast_passed, emptyObject);
+
+%OptimizeFunctionOnNextCall(get_pointer_a);
+reset_counts();
+const external_a_fast = get_pointer_a();
+assertEquals(slow_call_count(), 0);
+assertEquals(fast_call_count(), 1);
+assertIsExternal(external_a_fast);
+assertFalse(external_a_fast === external_a_slow);
+
+%PrepareFunctionForOptimization(get_pointer_b);
+fast_c_api.reset_counts();
+const external_b_slow = get_pointer_b();
+assertEquals(slow_call_count(), 1);
+assertEquals(fast_call_count(), 0);
+assertEquals(external_b_slow, null);
+%OptimizeFunctionOnNextCall(get_pointer_b);
+const external_b_fast = get_pointer_b();
+assertEquals(slow_call_count(), 1);
+assertEquals(fast_call_count(), 1);
+assertEquals(external_b_fast, null);
+
+const external_b_fast_passed = pass_pointer(external_b_slow);
+assertEquals(external_b_fast_passed, null);
+assertTrue(external_b_fast_passed === external_b_slow, true);
+
+%PrepareFunctionForOptimization(compare_pointers);
+assertUnoptimized(compare_pointers);
+reset_counts();
+assertFalse(compare_pointers(external_a_slow, external_b_slow));
+assertEquals(slow_call_count(), 1);
+assertEquals(fast_call_count(), 0);
+%OptimizeFunctionOnNextCall(compare_pointers);
+assertFalse(compare_pointers(external_a_slow, external_b_slow));
+assertEquals(slow_call_count(), 1);
+assertEquals(fast_call_count(), 1);
+assertTrue(compare_pointers(external_a_slow, external_a_slow), true);
+assertTrue(compare_pointers(external_a_slow, external_a_fast), true);
+assertTrue(compare_pointers(external_b_slow, external_b_slow), true);
+assertTrue(compare_pointers(external_b_slow, external_b_fast), true);
+assertTrue(compare_pointers(external_b_slow, external_b_fast_passed), true);
+
+// Assert that the ComparePointers call can safely be called with non-Externals
+// and that it will throw an error instead of crashing.
+assertThrows(() => compare_pointers(123, "foo"));
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-clamp-annotations.js b/deps/v8/test/mjsunit/compiler/fast-api-clamp-annotations.js
index d774f941fc..b875beb00b 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-clamp-annotations.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-clamp-annotations.js
@@ -125,3 +125,4 @@ is_in_range_u64(false, 2 ** 64 + 3.15, Number.MAX_SAFE_INTEGER);
// ---------- invalid arguments for clamp_compare ---------
fast_c_api.clamp_compare_i32(true);
fast_c_api.clamp_compare_i32(true, 753801, -2147483650);
+fast_c_api.clamp_compare_u32(-2147483648, NaN, -5e-324);
diff --git a/deps/v8/test/mjsunit/compiler/misc-ensure-no-deopt.js b/deps/v8/test/mjsunit/compiler/misc-ensure-no-deopt.js
index 5cc13d383f..76e0129cb3 100644
--- a/deps/v8/test/mjsunit/compiler/misc-ensure-no-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/misc-ensure-no-deopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbofan
function throwsRepeated(fn, ErrorType, required_compilation_count) {
for (let j = 0; j < (required_compilation_count ?? 1); j++) {
@@ -13,9 +13,8 @@ function throwsRepeated(fn, ErrorType, required_compilation_count) {
%OptimizeFunctionOnNextCall(fn);
assertThrows(fn, ErrorType);
}
- // If the function isn't optimized / turbofan tier not available,
- // a deopt happened on the call above.
- assertEquals(%IsTurbofanEnabled(), %ActiveTierIsTurbofan(fn));
+ // If the function isn't optimized, a deopt happened on the call above.
+ assertOptimized(fn);
}
function repeated(fn) {
@@ -25,9 +24,8 @@ function repeated(fn) {
// Force compilation and run.
%OptimizeFunctionOnNextCall(fn);
fn();
- // If the function isn't optimized / turbofan tier not available,
- // a deopt happened on the call above.
- assertEquals(%IsTurbofanEnabled(), %ActiveTierIsTurbofan(fn));
+ // If the function isn't optimized, a deopt happened on the call above.
+ assertOptimized(fn);
}
repeated(() => { for (let p of "abc") { } });
diff --git a/deps/v8/test/mjsunit/compiler/omit-default-ctors-array-iterator.js b/deps/v8/test/mjsunit/compiler/omit-default-ctors-array-iterator.js
index 0c52b0e45e..f6bab65700 100644
--- a/deps/v8/test/mjsunit/compiler/omit-default-ctors-array-iterator.js
+++ b/deps/v8/test/mjsunit/compiler/omit-default-ctors-array-iterator.js
@@ -27,7 +27,7 @@
// C default ctor doing "...args" and B default ctor doing "...args".
assertEquals(4, iterationCount);
- assertTrue(isTurboFanned(C)); // No deopt.
+ assertOptimized(C); // No deopt.
Array.prototype[Symbol.iterator] = oldIterator;
})();
diff --git a/deps/v8/test/mjsunit/compiler/omit-default-ctors.js b/deps/v8/test/mjsunit/compiler/omit-default-ctors.js
index 2f1c5c18c0..5d64f207e3 100644
--- a/deps/v8/test/mjsunit/compiler/omit-default-ctors.js
+++ b/deps/v8/test/mjsunit/compiler/omit-default-ctors.js
@@ -13,7 +13,7 @@
%OptimizeFunctionOnNextCall(B);
const o = new B();
assertSame(B.prototype, o.__proto__);
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
})();
(function OmitDefaultDerivedCtor() {
@@ -25,7 +25,7 @@
%OptimizeFunctionOnNextCall(C);
const o = new C();
assertSame(C.prototype, o.__proto__);
- assertTrue(isTurboFanned(C)); // No deopt.
+ assertOptimized(C); // No deopt.
})();
(function OmitDefaultBaseAndDerivedCtor() {
@@ -37,7 +37,7 @@
%OptimizeFunctionOnNextCall(C);
const o = new C();
assertSame(C.prototype, o.__proto__);
- assertTrue(isTurboFanned(C)); // No deopt.
+ assertOptimized(C); // No deopt.
})();
(function OmitDefaultBaseCtorWithExplicitSuper() {
@@ -48,7 +48,7 @@
%OptimizeFunctionOnNextCall(B);
const o = new B();
assertSame(B.prototype, o.__proto__);
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
})();
(function OmitDefaultDerivedCtorWithExplicitSuper() {
@@ -60,7 +60,7 @@
%OptimizeFunctionOnNextCall(C);
const o = new C();
assertSame(C.prototype, o.__proto__);
- assertTrue(isTurboFanned(C)); // No deopt.
+ assertOptimized(C); // No deopt.
})();
(function OmitDefaultBaseAndDerivedCtorWithExplicitSuper() {
@@ -72,7 +72,7 @@
%OptimizeFunctionOnNextCall(C);
const o = new C();
assertSame(C.prototype, o.__proto__);
- assertTrue(isTurboFanned(C)); // No deopt.
+ assertOptimized(C); // No deopt.
})();
(function OmitDefaultBaseCtorWithExplicitSuperAndNonFinalSpread() {
@@ -84,7 +84,7 @@
const o = new B(3, 4);
assertSame(B.prototype, o.__proto__);
// See https://bugs.chromium.org/p/v8/issues/detail?id=13310
- // assertTrue(isTurboFanned(B)); // No deopt.
+ // assertOptimized(B); // No deopt.
// This assert will fail when the above bug is fixed:
assertFalse(isTurboFanned(B));
})();
@@ -99,7 +99,7 @@
const o = new C(3, 4);
assertSame(C.prototype, o.__proto__);
// See https://bugs.chromium.org/p/v8/issues/detail?id=13310
- // assertTrue(isTurboFanned(C)); // No deopt.
+ // assertOptimized(C); // No deopt.
// This assert will fail when the above bug is fixed:
assertFalse(isTurboFanned(C));
})();
@@ -114,7 +114,7 @@
const o = new C(3, 4);
assertSame(C.prototype, o.__proto__);
// See https://bugs.chromium.org/p/v8/issues/detail?id=13310
- // assertTrue(isTurboFanned(C)); // No deopt.
+ // assertOptimized(C); // No deopt.
// This assert will fail when the above bug is fixed:
assertFalse(isTurboFanned(C));
})();
@@ -138,7 +138,7 @@
assertEquals(2, ctorCallCount);
assertEquals([1, 2, 3], lastArgs);
assertTrue(a.baseTagged);
- assertTrue(isTurboFanned(A)); // No deopt.
+ assertOptimized(A); // No deopt.
// 'A' default ctor will be omitted.
class B1 extends A {};
@@ -149,7 +149,7 @@
assertEquals(4, ctorCallCount);
assertEquals([4, 5, 6], lastArgs);
assertTrue(b1.baseTagged);
- assertTrue(isTurboFanned(B1)); // No deopt.
+ assertOptimized(B1); // No deopt.
// The same test with non-final spread; 'A' default ctor will be omitted.
class B2 extends A {
@@ -163,7 +163,7 @@
assertEquals([1, 4, 5, 6, 2], lastArgs);
assertTrue(b2.baseTagged);
// See https://bugs.chromium.org/p/v8/issues/detail?id=13310
- // assertTrue(isTurboFanned(B2)); // No deopt.
+ // assertOptimized(B2); // No deopt.
// This assert will fail when the above bug is fixed:
assertFalse(isTurboFanned(B2)); // No deopt.
})();
@@ -189,7 +189,7 @@
assertEquals(2, ctorCallCount);
assertEquals([1, 2, 3], lastArgs);
assertTrue(a.derivedTagged);
- assertTrue(isTurboFanned(A)); // No deopt.
+ assertOptimized(A); // No deopt.
// 'A' default ctor will be omitted.
class B1 extends A {};
@@ -200,7 +200,7 @@
assertEquals(4, ctorCallCount);
assertEquals([4, 5, 6], lastArgs);
assertTrue(b1.derivedTagged);
- assertTrue(isTurboFanned(B1)); // No deopt.
+ assertOptimized(B1); // No deopt.
// The same test with non-final spread. 'A' default ctor will be omitted.
class B2 extends A {
@@ -214,7 +214,7 @@
assertEquals([1, 4, 5, 6, 2], lastArgs);
assertTrue(b2.derivedTagged);
// See https://bugs.chromium.org/p/v8/issues/detail?id=13310
- // assertTrue(isTurboFanned(B2)); // No deopt.
+ // assertOptimized(B2); // No deopt.
// This assert will fail when the above bug is fixed:
assertFalse(isTurboFanned(B2)); // No deopt.
})();
@@ -233,7 +233,7 @@
const a1 = new A1();
assertEquals(2, baseFunctionCallCount);
assertTrue(a1.baseTagged);
- assertTrue(isTurboFanned(A1)); // No deopt.
+ assertOptimized(A1); // No deopt.
class A2 extends BaseFunction {
constructor(...args) { super(1, ...args, 2); }
@@ -244,7 +244,7 @@
const a2 = new A2();
assertEquals(4, baseFunctionCallCount);
assertTrue(a2.baseTagged);
- assertTrue(isTurboFanned(A2)); // No deopt.
+ assertOptimized(A2); // No deopt.
})();
(function NonSuperclassCtor() {
@@ -438,7 +438,7 @@
const b = new B();
assertTrue(b.isA());
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
%PrepareFunctionForOptimization(C1);
new C1();
@@ -446,7 +446,7 @@
const c1 = new C1();
assertTrue(c1.isA());
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -454,7 +454,7 @@
const c2 = new C2();
assertTrue(c2.isA());
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function DerivedPrivateField() {
@@ -474,7 +474,7 @@
const c1 = new C1();
assertTrue(c1.isB());
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -482,7 +482,7 @@
const c2 = new C2();
assertTrue(c2.isB());
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function BasePrivateMethod() {
@@ -502,7 +502,7 @@
const b = new B();
assertEquals('private', b.callPrivate());
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
%PrepareFunctionForOptimization(C1);
new C1();
@@ -510,7 +510,7 @@
const c1 = new C1();
assertEquals('private', c1.callPrivate());
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -518,7 +518,7 @@
const c2 = new C2();
assertEquals('private', c2.callPrivate());
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function DerivedPrivateMethod() {
@@ -538,7 +538,7 @@
const c1 = new C1();
assertEquals('private', c1.callPrivate());
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -546,7 +546,7 @@
const c2 = new C2();
assertEquals('private', c2.callPrivate());
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function BasePrivateGetter() {
@@ -566,7 +566,7 @@
const b = new B();
assertEquals('private', b.getPrivate());
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
%PrepareFunctionForOptimization(C1);
new C1();
@@ -574,7 +574,7 @@
const c1 = new C1();
assertEquals('private', c1.getPrivate());
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -582,7 +582,7 @@
const c2 = new C2();
assertEquals('private', c2.getPrivate());
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function DerivedPrivateGetter() {
@@ -602,7 +602,7 @@
const c1 = new C1();
assertEquals('private', c1.getPrivate());
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -610,7 +610,7 @@
const c2 = new C2();
assertEquals('private', c2.getPrivate());
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function BasePrivateSetter() {
@@ -639,7 +639,7 @@
const c1 = new C1();
c1.setPrivate();
assertEquals('private', c1.secret);
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -648,7 +648,7 @@
const c2 = new C2();
c2.setPrivate();
assertEquals('private', c2.secret);
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function DerivedPrivateSetter() {
@@ -669,7 +669,7 @@
const c1 = new C1();
c1.setPrivate();
assertEquals('private', c1.secret);
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -678,7 +678,7 @@
const c2 = new C2();
c2.setPrivate();
assertEquals('private', c2.secret);
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function BaseClassFields() {
@@ -702,7 +702,7 @@
const c1 = new C1();
assertTrue(c1.aField);
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -710,7 +710,7 @@
const c2 = new C2();
assertTrue(c2.aField);
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function DerivedClassFields() {
@@ -727,7 +727,7 @@
const c1 = new C1();
assertTrue(c1.bField);
- assertTrue(isTurboFanned(C1)); // No deopt.
+ assertOptimized(C1); // No deopt.
%PrepareFunctionForOptimization(C2);
new C2();
@@ -735,7 +735,7 @@
const c2 = new C2();
assertTrue(c2.bField);
- assertTrue(isTurboFanned(C2)); // No deopt.
+ assertOptimized(C2); // No deopt.
})();
(function SuperInTryCatchDefaultCtor() {
@@ -756,7 +756,7 @@
const b = new B();
assertSame(B.prototype, b.__proto__);
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
})();
(function SuperInTryCatchNonDefaultCtor() {
@@ -777,5 +777,5 @@
const b = new B();
assertSame(B.prototype, b.__proto__);
- assertTrue(isTurboFanned(B)); // No deopt.
+ assertOptimized(B); // No deopt.
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1399490.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1399490.js
new file mode 100644
index 0000000000..84f982a9c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1399490.js
@@ -0,0 +1,24 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turboshaft
+// Flags: --stress-concurrent-inlining --expose-fast-api
+
+const fast_c_api = new d8.test.FastCAPI();
+
+function foo() {
+ fast_c_api.enforce_range_compare_u64(undefined, "", "/0/");
+}
+
+%PrepareFunctionForOptimization(foo);
+try {
+ foo();
+} catch {
+}
+
+%OptimizeFunctionOnNextCall(foo);
+try {
+ foo();
+} catch {
+}
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1399626.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1399626.js
new file mode 100644
index 0000000000..9bdc6d13bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1399626.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+proto = Object.getPrototypeOf(0);
+
+function foo(v) {
+ properties = Object.getOwnPropertyNames(proto);
+ if (properties.includes("constructor") &&
+ v.constructor.hasOwnProperty()) {
+ }
+}
+
+function bar(n) {
+ if (n > 5000) return;
+ foo(0) ;
+ bar(n+1);
+}
+bar(1);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1399627.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1399627.js
new file mode 100644
index 0000000000..c61a6feeea
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1399627.js
@@ -0,0 +1,18 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(arr, value) {
+ if (arr !== value) throw new Error('bad value: ' + arr);
+}
+function slice_array(arr) {
+ return arr.slice();
+}
+for (var i = 0; i < 1e5; ++i) {
+ var arr = [];
+ var sliced = slice_array(arr);
+ foo(arr !== sliced, true);
+ try {
+ foo(sliced.length);
+ } catch (e) {}
+}
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1408013.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1408013.js
new file mode 100644
index 0000000000..c665a669bf
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1408013.js
@@ -0,0 +1,13 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --no-turbo-loop-variable
+
+function opt() {
+ "abcdefgh".startsWith("abcdefgh");
+}
+%PrepareFunctionForOptimization(opt);
+opt();
+%OptimizeFunctionOnNextCall(opt);
+opt();
diff --git a/deps/v8/test/mjsunit/compiler/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/compiler/typedarray-resizablearraybuffer.js
index 2cd42de7c2..b2a72363d9 100644
--- a/deps/v8/test/mjsunit/compiler/typedarray-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/compiler/typedarray-resizablearraybuffer.js
@@ -451,6 +451,83 @@ assertEquals(9, ByteLength(dv));
assertOptimized(ByteLength);
})();
+const dataview_data_sizes = ['Int8', 'Uint8', 'Int16', 'Uint16', 'Int32',
+ 'Uint32', 'Float32', 'Float64', 'BigInt64',
+ 'BigUint64'];
+
+// Global variable used for DataViews; this is important for triggering some
+// optimizations.
+var dv;
+(function() {
+for (let use_global_var of [true, false]) {
+ for (let shared of [false, true]) {
+ for (let length_tracking of [false, true]) {
+ for (let with_offset of [false, true]) {
+ for (let data_size of dataview_data_sizes) {
+ const test_case = `Testing: Get_${
+ data_size}_${
+ shared ? 'GSAB' : 'RAB'}_${
+ length_tracking ?
+ 'LengthTracking' :
+ 'FixedLength'}${with_offset ? 'WithOffset' : ''}_${
+ use_global_var ? 'UseGlobalVar' : ''}_DataView`;
+ // console.log(test_case);
+ const is_bigint = data_size.startsWith('Big');
+ const expected_value = is_bigint ? 0n : 0;
+
+ const get_code = 'return dv.get' + data_size + '(0); // ' + test_case;
+ const Get = use_global_var ?
+ new Function(get_code) : new Function('dv', get_code);
+
+ const offset = with_offset ? 8 : 0;
+
+ let blen = 8; // Enough for one element.
+ const fixed_blen = length_tracking ? undefined : blen;
+ const ab = CreateBuffer(shared, 8*10, 8*20);
+ // Assign to the global var.
+ dv = new DataView(ab, offset, fixed_blen);
+ const Resize = MakeResize(DataView, shared, offset, fixed_blen);
+
+ assertUnoptimized(Get);
+ %PrepareFunctionForOptimization(Get);
+ assertEquals(expected_value, Get(dv));
+ assertEquals(expected_value, Get(dv));
+ %OptimizeFunctionOnNextCall(Get);
+ assertEquals(expected_value, Get(dv));
+ assertOptimized(Get);
+
+ // Enough for one element or more (even with offset).
+ blen = Resize(ab, 8 + offset);
+ assertEquals(expected_value, Get(dv));
+ assertOptimized(Get);
+
+ blen = Resize(ab, 0); // Not enough for one element.
+ if (shared) {
+ assertEquals(expected_value, Get(dv));
+ } else {
+ if (!length_tracking || with_offset) {
+ // DataView is out of bounds.
+ assertThrows(() => { Get(dv); }, TypeError);
+ } else {
+ // DataView is valid, the index is out of bounds.
+ assertThrows(() => { Get(dv); }, RangeError);
+ }
+ }
+
+ blen = Resize(ab, 64);
+ assertEquals(expected_value, Get(dv));
+
+ if (!shared) {
+ %ArrayBufferDetach(ab);
+ assertThrows(() => { Get(dv); }, TypeError);
+ }
+ }
+ }
+ }
+ }
+}
+})();
+
(function() {
function Read_TA_RAB_LengthTracking_Mixed(ta, index) {
return ta[index];
diff --git a/deps/v8/test/mjsunit/const-dict-tracking.js b/deps/v8/test/mjsunit/const-dict-tracking.js
deleted file mode 100644
index 63b6160ddd..0000000000
--- a/deps/v8/test/mjsunit/const-dict-tracking.js
+++ /dev/null
@@ -1,734 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
-// Flags: --no-stress-flush-code --concurrent-recompilation
-//
-// Tests tracking of constness of properties stored in dictionary
-// mode prototypes.
-
-
-var unique_id = 0;
-// Creates a function with unique SharedFunctionInfo to ensure the feedback
-// vector is unique for each test case.
-function MakeFunctionWithUniqueSFI(...args) {
- assertTrue(args.length > 0);
- var body = `/* Unique comment: ${unique_id++} */ ` + args.pop();
- return new Function(...args, body);
-}
-
-// Invalidation by store handler.
-(function() {
- var proto = Object.create(null);
- proto.z = 1;
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proto);
-
- function read_z() {
- return o.z;
- }
- function update_z(new_value) {
- proto.z = new_value;
- }
-
- // Allocate feedback vector, but we don't want to optimize the function.
- %PrepareFunctionForOptimization(read_z);
- for (var i = 0; i < 4; i++) {
- read_z();
- }
- assertTrue(%HasOwnConstDataProperty(proto, "z"));
-
- // Allocate feedback vector, but we don't want to optimize the function.
- %PrepareFunctionForOptimization(update_z);
- for (var i = 0; i < 4; i++) {
- // Overwriting with same value maintains const-ness.
- update_z(1);
- }
-
-
- assertTrue(%HasOwnConstDataProperty(proto, "z"));
-
- update_z(2);
-
- assertFalse(%HasOwnConstDataProperty(proto, "z"));
- assertEquals(2, read_z());
-})();
-
-// Properties become const when dict mode object becomes prototype.
-(function() {
- var proto = Object.create(null);
- var proto_shadow = Object.create(null);
-
- proto.z = 1;
- proto_shadow.z = 1;
-
- // Make sure that z is marked as mutable.
- proto.z = 2;
- proto_shadow.z = 2;
-
- assertFalse(%HasFastProperties(proto));
- assertTrue(%HaveSameMap(proto, proto_shadow));
-
- var o = Object.create(proto);
-
- assertFalse(%HasFastProperties(proto));
- // proto must have received new map.
- assertFalse(%HaveSameMap(proto, proto_shadow));
- assertEquals(%IsDictPropertyConstTrackingEnabled(),
- %HasOwnConstDataProperty(proto, "z"));
-})();
-
-// Properties become const when fast mode object becomes prototype.
-(function() {
- var proto = {}
- var proto_shadow = {};
-
- proto.z = 1;
- proto_shadow.z = 1;
-
- // Make sure that z is marked as mutable.
- proto.z = 2;
- proto_shadow.z = 2;
-
- assertTrue(%HasFastProperties(proto));
- assertTrue(%HaveSameMap(proto, proto_shadow));
-
- var o = Object.create(proto);
-
- assertFalse(%HasFastProperties(proto));
- // proto must have received new map.
- assertFalse(%HaveSameMap(proto, proto_shadow));
- assertEquals(%IsDictPropertyConstTrackingEnabled(),
- %HasOwnConstDataProperty(proto, "z"));
-})();
-
-function testbench(o, proto, update_proto, check_constness) {
- var check_z = MakeFunctionWithUniqueSFI("obj", "return obj.z;");
-
- if (check_constness && %IsDictPropertyConstTrackingEnabled())
- assertTrue(%HasOwnConstDataProperty(proto, "z"));
-
- // Allocate feedback vector, but we don't want to optimize the function.
- %PrepareFunctionForOptimization(check_z);
- for (var i = 0; i < 4; i++) {
- check_z(o);
- }
-
- update_proto();
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- if (check_constness)
- assertFalse(%HasOwnConstDataProperty(proto, "z"));
- assertFalse(%HasFastProperties(proto));
- }
-
- assertEquals("2", check_z(o));
-}
-
-// Simple update.
-(function() {
- var proto = Object.create(null);
- proto.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proto);
-
- function update_z() {
- proto.z = "2";
- }
-
- testbench(o, proto, update_z, true);
-})();
-
-// Update using Object.assign.
-(function() {
- var proto = Object.create(null);
- proto.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proto);
-
- function update_z() {
- Object.assign(proto, {z: "2"});
- }
-
- testbench(o, proto, update_z, true);
-})();
-
-// Update using Object.defineProperty
-(function() {
- var proto = Object.create(null);
- proto.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proto);
-
- function update_z() {
- Object.defineProperty(proto, 'z', {
- value: "2",
- configurable: true,
- enumerable: true,
- writable: true
- });
- }
-
- testbench(o, proto, update_z, true);
-})();
-
-
-// Update using setter
-(function() {
- var proto = Object.create(null);
- Object.defineProperty(proto, "z", {
- get : function () {return this.z_val;},
- set : function (new_z) {this.z_val = new_z;}
- });
-
- proto.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proto);
-
- function update_z() {
- proto.z = "2";
- }
-
- testbench(o, proto, update_z, false);
-})();
-
-// Proxy test 1: Update via proxy.
-(function() {
- var proto = Object.create(null);
-
- var proxy = new Proxy(proto, {});
-
- proxy.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proxy);
-
- function update_z() {
- proxy.z = "2";
- }
-
- testbench(o, proto, update_z, false);
-})();
-
-// Proxy test 2: Update on proto.
-(function() {
- var proto = Object.create(null);
-
- var proxy = new Proxy(proto, {});
-
- proto.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proxy);
-
- function update_z() {
- proto.z = "2";
- }
-
- testbench(o, proto, update_z, false);
-})();
-
-// Proxy test 3: Update intercepted.
-(function() {
- var proto = Object.create(null);
-
- var handler = {
- get: function(target, prop) {
- return target.the_value;
- },
- set: function(target, prop, value) {
- return target.the_value = value;
- }
- };
-
- var proxy = new Proxy(proto, handler);
-
- proxy.z = "1";
- assertFalse(%HasFastProperties(proto));
-
- var o = Object.create(proxy);
-
- function update_z() {
- proxy.z = "2";
- }
-
- testbench(o, proto, update_z, false);
-
-})();
-
-//
-// Below: Testing TF optimization of accessing constants in dictionary mode
-// protoypes.
-//
-
-// Test inlining with fast mode receiver.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
-
- // Test that we inlined the access:
- var dummy = {x : 123};
- read_x(dummy);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
- assertUnoptimized(read_x);
- }
-
-})();
-
-// Test inlining with dictionary mode receiver that is a prototype.
-
-(function() {
-
- var proto1 = Object.create(null);
- proto1.x = 1;
- var proto2 = Object.create(null);
- var o = Object.create(proto1);
- Object.setPrototypeOf(proto1, proto2);
-
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto1));
- assertFalse(%HasFastProperties(proto2));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(proto1));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(proto1));
- assertOptimized(read_x);
-
- // Test that we inlined the access:
- var dummy = {x : 123};
- read_x(dummy);
-
- // TODO(v8:11457) This test doesn't work yet, see TODO in
- // AccessInfoFactory::TryLoadPropertyDetails. Currently, we can't inline
- // accesses with dictionary mode receivers.
- // if (%IsDictPropertyConstTrackingEnabled()) {
- // assertTrue(%HasFastProperties(o));
- // assertFalse(%HasFastProperties(proto1));
- // assertFalse(%HasFastProperties(proto2));
- // assertUnoptimized(read_x);
- // }
-})();
-
-// The machinery we use for detecting the invalidation of constants held by
-// dictionary mode objects (related to the prototype validity cell mechanism) is
-// specific to prototypes. This means that for non-prototype dictionary mode
-// objects, we have no way of detecting changes invalidating folded
-// constants. Therefore, we must not fold constants held by non-prototype
-// dictionary mode objects. This is tested here.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var o = Object.create(null);
- Object.setPrototypeOf(o, proto);
- assertFalse(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
- var dummy = {x : 123};
- read_x(dummy);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- // We never inlined the acceess, so it's still optimized.
- assertOptimized(read_x);
- }
-})();
-
-// Test inlining of accessor.
-(function() {
- var proto = Object.create(null);
- proto.x_val = 1;
- Object.defineProperty(proto, "x", {
- get : function () {return this.x_val;}
- });
-
- var o = Object.create(proto);
- assertFalse(%HasFastProperties(proto))
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
- // Test that we inlined the access:
- var dummy = {x : 123};
- read_x(dummy);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
- assertUnoptimized(read_x);
- }
-})();
-
-// Invalidation by adding same property to receiver.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
- o.x = 2;
-
- assertEquals(2, read_x(o));
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
- assertUnoptimized(read_x);
- }
-
-})();
-
-// Invalidation by adding property to intermediate prototype.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var in_between = Object.create(null);
- Object.setPrototypeOf(in_between, proto);
-
- var o = Object.create(in_between);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(in_between));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
- in_between.x = 2;
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(in_between));
- assertFalse(%HasFastProperties(proto));
- assertUnoptimized(read_x);
- }
-
- assertEquals(2, read_x(o));
-})();
-
-// Invalidation by changing prototype of receiver.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var other_proto = Object.create(null);
- other_proto.x = 2;
-
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
-
- Object.setPrototypeOf(o, other_proto);
- assertEquals(2, read_x(o));
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertFalse(%HasFastProperties(other_proto));
- assertUnoptimized(read_x);
- }
-})();
-
-// Invalidation by changing [[Prototype]] of a prototype on the chain from the
-// receiver to the holder.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var other_proto = Object.create(null);
- other_proto.x = 2;
- var in_between = Object.create(null);
- Object.setPrototypeOf(in_between, proto);
-
- var o = Object.create(in_between);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(in_between));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
- Object.setPrototypeOf(in_between, other_proto);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(in_between));
- assertFalse(%HasFastProperties(proto));
- assertFalse(%HasFastProperties(other_proto));
- assertUnoptimized(read_x);
- }
-
- assertEquals(2, read_x(o));
-})();
-
-// Invalidation by changing property on prototype itself.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- assertEquals(1, read_x(o));
- %OptimizeFunctionOnNextCall(read_x);
- assertEquals(1, read_x(o));
- assertOptimized(read_x);
-
- proto.x = 2;
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertUnoptimized(read_x);
- }
-
- assertEquals(2, read_x(o));
-})();
-
-// Invalidation by deleting property on prototype.
-(function() {
- var proto = Object.create(null);
- proto.x = 1;
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_x(arg_o) {
- return arg_o.x;
- }
-
- %PrepareFunctionForOptimization(read_x);
- read_x(o);
- %OptimizeFunctionOnNextCall(read_x);
- read_x(o);
-
- delete proto.x;
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertUnoptimized(read_x);
- }
-
- assertEquals(undefined, read_x(o));
-})();
-
-// Storing the same value does not invalidate const-ness. Store done from
-// runtime/without feedback.
-(function() {
- var proto = Object.create(null);
- var some_object = {bla: 123};
- proto.x = 1;
- proto.y = some_object
-
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_xy(arg_o) {
- return [arg_o.x, arg_o.y];
- }
-
- %PrepareFunctionForOptimization(read_xy);
- assertEquals([1, some_object], read_xy(o));
- %OptimizeFunctionOnNextCall(read_xy);
- assertEquals([1, some_object], read_xy(o));
- assertOptimized(read_xy);
-
- // Build value 1 without re-using proto.x.
- var x2 = 0;
- for(var i = 0; i < 5; ++i) {
- x2 += 0.2;
- }
-
- // Storing the same values for x and y again:
- proto.x = x2;
- proto.y = some_object;
- assertEquals(x2, proto.x);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertTrue(%HasOwnConstDataProperty(proto, "x"));
- assertOptimized(read_xy);
- }
-
- proto.x = 2;
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertFalse(%HasOwnConstDataProperty(proto, "x"));
- assertUnoptimized(read_xy);
- }
-
- assertEquals(2, read_xy(o)[0]);
-})();
-
-// Storing the same value does not invalidate const-ness. Store done by IC
-// handler.
-(function() {
- var proto = Object.create(null);
- var some_object = {bla: 123};
- proto.x = 1;
- proto.y = some_object
-
- var o = Object.create(proto);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto));
-
- function read_xy(arg_o) {
- return [arg_o.x, arg_o.y];
- }
-
- %PrepareFunctionForOptimization(read_xy);
- assertEquals([1, some_object], read_xy(o));
- %OptimizeFunctionOnNextCall(read_xy);
- assertEquals([1, some_object], read_xy(o));
- assertOptimized(read_xy);
-
- // Build value 1 without re-using proto.x.
- var x2 = 0;
- for(var i = 0; i < 5; ++i) {
- x2 += 0.2;
- }
-
- function change_xy(obj, x, y) {
- obj.x = x;
- obj.y = y;
- }
-
- %PrepareFunctionForOptimization(change_xy);
- // Storing the same values for x and y again:
- change_xy(proto, 1, some_object);
- change_xy(proto, 1, some_object);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertTrue(%HasOwnConstDataProperty(proto, "x"));
- assertOptimized(read_xy);
- }
-
- change_xy(proto, 2, some_object);
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto));
- assertFalse(%HasOwnConstDataProperty(proto, "x"));
- assertUnoptimized(read_xy);
- }
-
- assertEquals(2, read_xy(o)[0]);
-})();
-
-// Invalidation by replacing a prototype. Just like the old prototype, the new
-// prototype owns the property as an accessor, but in the form of an
-// AccessorInfo rather than an AccessorPair.
-(function() {
- var proto1 = Object.create(null);
- Object.defineProperty(proto1, 'length', {get() {return 1}});
- var proto2 = Object.create(proto1);
- var o = Object.create(proto2);
- assertTrue(%HasFastProperties(o));
- assertFalse(%HasFastProperties(proto1));
- assertFalse(%HasFastProperties(proto2));
-
- function read_length(arg_o) {
- return arg_o.length;
- }
-
- %PrepareFunctionForOptimization(read_length);
- assertEquals(1, read_length(o));
- %DisableOptimizationFinalization();
- %OptimizeFunctionOnNextCall(read_length, "concurrent");
- assertEquals(1, read_length(o));
- assertUnoptimized(read_length);
-
- %WaitForBackgroundOptimization();
- var other_proto1 = [];
- Object.setPrototypeOf(proto2, other_proto1);
- %FinalizeOptimization();
-
- assertUnoptimized(read_length);
- assertEquals(0, read_length(o));
-
- if (%IsDictPropertyConstTrackingEnabled()) {
- assertFalse(%HasFastProperties(proto1));
- assertFalse(%HasFastProperties(proto2));
- assertFalse(%HasFastProperties(other_proto1));
- assertUnoptimized(read_length);
- }
-})();
diff --git a/deps/v8/test/mjsunit/const-field-tracking.js b/deps/v8/test/mjsunit/const-field-tracking.js
deleted file mode 100644
index dc0bb7a8e7..0000000000
--- a/deps/v8/test/mjsunit/const-field-tracking.js
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
-
-var global = this;
-var unique_id = 0;
-// Creates a function with unique SharedFunctionInfo to ensure the feedback
-// vector is unique for each test case.
-function MakeFunctionWithUniqueSFI(...args) {
- assertTrue(args.length > 0);
- var body = `/* Unique comment: ${unique_id++} */ ` + args.pop();
- return new Function(...args, body);
-}
-
-
-//
-// Load constant field from constant object directly.
-//
-function TestLoadFromConstantFieldOfAConstantObject(the_value, other_value) {
- function A(v) { this.v = v; }
- function O() { this.a = new A(the_value); }
- var the_object = new O();
-
- // Ensure that {the_object.a}'s map is not stable to complicate compiler's
- // life.
- new A(the_value).blah = 0;
-
- // Ensure that constant tracking is enabled for {contant_object}.
- delete global.constant_object;
- global.constant_object = the_object;
- assertEquals(the_object, constant_object);
-
- assertTrue(%HasFastProperties(the_object));
-
- // {constant_object} is known to the compiler via global property cell
- // tracking.
- var load = MakeFunctionWithUniqueSFI("return constant_object.a.v;");
- %PrepareFunctionForOptimization(load);
- load();
- load();
- %OptimizeFunctionOnNextCall(load);
- assertEquals(the_value, load());
- assertOptimized(load);
- var a = new A(other_value);
- assertTrue(%HaveSameMap(a, the_object.a));
- // Make constant field mutable by assigning another value
- // to some other instance of A.
- new A(the_value).v = other_value;
- assertTrue(%HaveSameMap(a, new A(the_value)));
- assertTrue(%HaveSameMap(a, the_object.a));
- assertUnoptimized(load);
- assertEquals(the_value, load());
- assertUnoptimized(load);
- assertEquals(the_value, load());
-}
-
-// Test constant tracking with Smi value.
-(function() {
- var the_value = 42;
- var other_value = 153;
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with double value.
-(function() {
- var the_value = 0.9;
- var other_value = 0.42;
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with function value.
-(function() {
- var the_value = function V() {};
- var other_value = function W() {};
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with heap object value.
-(function() {
- function V() {}
- var the_value = new V();
- var other_value = new V();
- TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
-})();
-
-
-//
-// Load constant field from a prototype.
-//
-function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
- function Proto() { this.v = the_value; }
- var the_prototype = new Proto();
-
- function O() {}
- O.prototype = the_prototype;
- var the_object = new O();
-
- // Ensure O.prototype is in fast mode by loading from its field.
- function warmup() { return new O().v; }
- %EnsureFeedbackVectorForFunction(warmup);
- warmup(); warmup(); warmup();
- if (!%IsDictPropertyConstTrackingEnabled())
- assertTrue(%HasFastProperties(O.prototype));
-
- // The parameter object is not constant but all the values have the same
- // map and therefore the compiler knows the prototype object and can
- // optimize load of "v".
- var load = MakeFunctionWithUniqueSFI("o", "return o.v;");
- %PrepareFunctionForOptimization(load);
- load(new O());
- load(new O());
- %OptimizeFunctionOnNextCall(load);
- assertEquals(the_value, load(new O()));
- assertOptimized(load);
- // Invalidation of mutability should trigger deoptimization with a
- // "field-owner" reason.
- the_prototype.v = other_value;
- assertUnoptimized(load);
-}
-
-// Test constant tracking with Smi value.
-(function() {
- var the_value = 42;
- var other_value = 153;
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-// Test constant tracking with double value.
-(function() {
- var the_value = 0.9;
- var other_value = 0.42;
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-// Test constant tracking with function value.
-(function() {
- var the_value = function V() {};
- var other_value = function W() {};
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-// Test constant tracking with heap object value.
-(function() {
- function V() {}
- var the_value = new V();
- var other_value = new V();
- TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
-})();
-
-
-//
-// Store to constant field of a constant object.
-//
-function TestStoreToConstantFieldOfConstantObject(the_value, other_value) {
- function A(v) { this.v = v; }
- function O() { this.a = new A(the_value); }
- var the_object = new O();
-
- // Ensure that {the_object.a}'s map is not stable to complicate compiler's
- // life.
- new A(the_value).blah = 0;
-
- // Ensure that constant tracking is enabled for {contant_object}.
- delete global.constant_object;
- global.constant_object = the_object;
- assertEquals(the_object, constant_object);
-
- assertTrue(%HasFastProperties(the_object));
-
- // {constant_object} is known to the compiler via global property cell
- // tracking.
- var store = MakeFunctionWithUniqueSFI("v", "constant_object.a.v = v;");
- %PrepareFunctionForOptimization(store);
- store(the_value);
- store(the_value);
- %OptimizeFunctionOnNextCall(store);
- store(the_value);
- assertEquals(the_value, constant_object.a.v);
- assertOptimized(store);
- // Storing of the same value does not deoptimize.
- store(the_value);
- assertEquals(the_value, constant_object.a.v);
- assertOptimized(store);
-
- var a = new A(other_value);
-
- if (typeof the_value == "function" || typeof the_value == "object") {
- // For heap object fields "field-owner" dependency is installed for
- // any access of the field, therefore making constant field mutable by
- // assigning other value to some other instance of A should already
- // trigger deoptimization.
- assertTrue(%HaveSameMap(a, the_object.a));
- new A(the_value).v = other_value;
- assertTrue(%HaveSameMap(a, new A(the_value)));
- assertTrue(%HaveSameMap(a, the_object.a));
- assertUnoptimized(store);
- } else {
- assertOptimized(store);
- }
- // Storing other value deoptimizes because of failed value check.
- store(other_value);
- assertUnoptimized(store);
- assertEquals(other_value, constant_object.a.v);
-}
-
-// Test constant tracking with Smi values.
-(function() {
- var the_value = 42;
- var other_value = 153;
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with double values.
-(function() {
- var the_value = 0.9;
- var other_value = 0.42
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with function values.
-(function() {
- var the_value = function V() {};
- var other_value = function W() {};
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
-
-// Test constant tracking with heap object values.
-(function() {
- function V() {}
- var the_value = new V();
- var other_value = new V();
- TestStoreToConstantFieldOfConstantObject(the_value, other_value);
-})();
diff --git a/deps/v8/test/mjsunit/dataview-resizablearraybuffer.js b/deps/v8/test/mjsunit/dataview-resizablearraybuffer.js
index 83b8729619..13e0813704 100644
--- a/deps/v8/test/mjsunit/dataview-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/dataview-resizablearraybuffer.js
@@ -432,3 +432,32 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
TypeError);
}
})();
+
+(function DataViewsAndRabGsabDataViews() {
+ // Internally we differentiate between JSDataView and JSRabGsabDataView. Test
+ // that they're indistinguishable externally.
+ const ab = new ArrayBuffer(10);
+ const rab = new ArrayBuffer(10, {maxByteLength: 20});
+
+ const dv1 = new DataView(ab);
+ const dv2 = new DataView(rab);
+
+ assertEquals(DataView.prototype, dv1.__proto__);
+ assertEquals(DataView.prototype, dv2.__proto__);
+ assertEquals(DataView, dv1.constructor);
+ assertEquals(DataView, dv2.constructor);
+
+ class MyDataView extends DataView {
+ constructor(buffer) {
+ super(buffer);
+ }
+ }
+
+ const dv3 = new MyDataView(ab);
+ const dv4 = new MyDataView(rab);
+
+ assertEquals(MyDataView.prototype, dv3.__proto__);
+ assertEquals(MyDataView.prototype, dv4.__proto__);
+ assertEquals(MyDataView, dv3.constructor);
+ assertEquals(MyDataView, dv4.constructor);
+})();
diff --git a/deps/v8/test/mjsunit/ensure-growing-store-learns.js b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
index 2b4fa749af..b0bed32007 100644
--- a/deps/v8/test/mjsunit/ensure-growing-store-learns.js
+++ b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
@@ -36,7 +36,11 @@
// is a dictionary mode prototypes on the prototype chain. Therefore, if
// v8_dict_property_const_tracking is enabled, the optimized code only
// contains a call to the IC handler and doesn't get deopted.
- assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(foo));
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertOptimized(foo);
+ } else {
+ assertUnoptimized(foo);
+ }
assertTrue(%HasDictionaryElements(a));
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/es6/array-concat-arraylike-string-length.js b/deps/v8/test/mjsunit/es6/array-concat-arraylike-string-length.js
index 471dcb99eb..0263113019 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-arraylike-string-length.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-arraylike-string-length.js
@@ -12,6 +12,6 @@ var obj = {
obj[Symbol.isConcatSpreadable] = true;
var obj2 = { length: 3, "0": "0", "1": "1", "2": "2" };
var arr = ["X", "Y", "Z"];
-assertEquals([void 0, "A", void 0, "B", void 0, "C",
+assertEquals([, "A", , "B", , "C",
{ "length": 3, "0": "0", "1": "1", "2": "2" },
"X", "Y", "Z"], Array.prototype.concat.call(obj, obj2, arr));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-arraylike.js b/deps/v8/test/mjsunit/es6/array-concat-arraylike.js
index 0ad3eb5293..371d0f6e21 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-arraylike.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-arraylike.js
@@ -12,6 +12,6 @@ var obj = {
obj[Symbol.isConcatSpreadable] = true;
var obj2 = { length: 3, "0": "0", "1": "1", "2": "2" };
var arr = ["X", "Y", "Z"];
-assertEquals([void 0, "A", void 0, "B", void 0, "C",
+assertEquals([, "A", , "B", , "C",
{ "length": 3, "0": "0", "1": "1", "2": "2" },
"X", "Y", "Z"], Array.prototype.concat.call(obj, obj2, arr));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-holey-array.js b/deps/v8/test/mjsunit/es6/array-concat-holey-array.js
index 31c2ba1bd9..19989b42c7 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-holey-array.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-holey-array.js
@@ -7,5 +7,5 @@ var arr = [];
arr[4] = "Item 4";
arr[8] = "Item 8";
var arr2 = [".", "!", "?"];
-assertEquals([void 0, void 0, void 0, void 0, "Item 4", void 0, void 0,
- void 0, "Item 8", ".", "!", "?"], arr.concat(arr2));
+assertEquals(
+ [, , , , 'Item 4', , , , 'Item 8', '.', '!', '?'], arr.concat(arr2));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-holey.js b/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-holey.js
index ac160fc1f3..711a450bdf 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-holey.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-holey.js
@@ -5,4 +5,4 @@
var args = (function(a) { return arguments; })(1,2,3);
delete args[1];
args[Symbol.isConcatSpreadable] = true;
-assertEquals([1, void 0, 3, 1, void 0, 3], [].concat(args, args));
+assertEquals([1, , 3, 1, , 3], [].concat(args, args));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-with-dupes.js b/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-with-dupes.js
index 4634cac705..fed914780a 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-with-dupes.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments-with-dupes.js
@@ -7,4 +7,4 @@ args[Symbol.isConcatSpreadable] = true;
assertEquals([1, 2, 3, 1, 2, 3], [].concat(args, args));
Object.defineProperty(args, "length", { value: 6 });
-assertEquals([1, 2, 3, void 0, void 0, void 0], [].concat(args));
+assertEquals([1, 2, 3, , , ,], [].concat(args));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments.js b/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments.js
index b77833f885..346d07e870 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-sloppy-arguments.js
@@ -7,4 +7,4 @@ args[Symbol.isConcatSpreadable] = true;
assertEquals([1, 2, 3, 1, 2, 3], [].concat(args, args));
Object.defineProperty(args, "length", { value: 6 });
-assertEquals([1, 2, 3, void 0, void 0, void 0], [].concat(args));
+assertEquals([1, 2, 3, , ,, ], [].concat(args));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-spreadable-function.js b/deps/v8/test/mjsunit/es6/array-concat-spreadable-function.js
index 9b64fc63aa..1fea92b7e2 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-spreadable-function.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-spreadable-function.js
@@ -14,7 +14,7 @@ assertEquals([1, 2, 3], [].concat(fn));
Function.prototype[Symbol.isConcatSpreadable] = true;
// Functions may be concat-spreadable
-assertEquals([void 0, void 0, void 0], [].concat(function(a,b,c) {}));
+assertEquals(new Array(3), [].concat(function(a,b,c) {}));
Function.prototype[0] = 1;
Function.prototype[1] = 2;
Function.prototype[2] = 3;
diff --git a/deps/v8/test/mjsunit/es6/array-concat-spreadable-nonarraylike-proxy.js b/deps/v8/test/mjsunit/es6/array-concat-spreadable-nonarraylike-proxy.js
index ecbe4488d7..f32c0e1d9b 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-spreadable-nonarraylike-proxy.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-spreadable-nonarraylike-proxy.js
@@ -36,7 +36,7 @@ assertEquals(["get", target, "length", obj], log[1]);
target.length = 3;
log.length = 0;
-assertEquals(["a", "b", undefined], [].concat(obj));
+assertEquals(["a", "b", ,], [].concat(obj));
assertEquals(7, log.length);
for (var i in log) assertSame(target, log[i][1]);
assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
@@ -48,7 +48,7 @@ assertEquals(["get", target, "1", obj], log[5]);
assertEquals(["has", target, "2"], log[6]);
log.length = 0;
-assertEquals(["a", "b", undefined], Array.prototype.concat.apply(obj));
+assertEquals(["a", "b", ,], Array.prototype.concat.apply(obj));
assertEquals(7, log.length);
for (var i in log) assertSame(target, log[i][1]);
assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
diff --git a/deps/v8/test/mjsunit/es6/array-concat-spreadable-regexp.js b/deps/v8/test/mjsunit/es6/array-concat-spreadable-regexp.js
index 73a61e4d88..54b9372caf 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-spreadable-regexp.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-spreadable-regexp.js
@@ -16,7 +16,7 @@ assertEquals([1, 2, 3], [].concat(re));
RegExp.prototype[Symbol.isConcatSpreadable] = true;
RegExp.prototype.length = 3;
-assertEquals([void 0, void 0, void 0], [].concat(/abc/));
+assertEquals(new Array(3), [].concat(/abc/));
RegExp.prototype[0] = 1;
RegExp.prototype[1] = 2;
RegExp.prototype[2] = 3;
diff --git a/deps/v8/test/mjsunit/es6/array-concat-spreadable-sparse-object.js b/deps/v8/test/mjsunit/es6/array-concat-spreadable-sparse-object.js
index c10b002403..5227e7d6b7 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-spreadable-sparse-object.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-spreadable-sparse-object.js
@@ -5,7 +5,7 @@
"use strict";
var obj = { length: 5 };
obj[Symbol.isConcatSpreadable] = true;
-assertEquals([void 0, void 0, void 0, void 0, void 0], [].concat(obj));
+assertEquals(new Array(5), [].concat(obj));
obj.length = 4000;
assertEquals(new Array(4000), [].concat(obj));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-strict-arguments.js b/deps/v8/test/mjsunit/es6/array-concat-strict-arguments.js
index 4180a94a2e..106883168c 100644
--- a/deps/v8/test/mjsunit/es6/array-concat-strict-arguments.js
+++ b/deps/v8/test/mjsunit/es6/array-concat-strict-arguments.js
@@ -7,4 +7,4 @@ args[Symbol.isConcatSpreadable] = true;
assertEquals([1, 2, 3, 1, 2, 3], [].concat(args, args));
Object.defineProperty(args, "length", { value: 6 });
-assertEquals([1, 2, 3, void 0, void 0, void 0], [].concat(args));
+assertEquals([1, 2, 3, , , ,], [].concat(args));
diff --git a/deps/v8/test/mjsunit/es6/array-from.js b/deps/v8/test/mjsunit/es6/array-from.js
index 02a599d4ca..34fe9dfe92 100644
--- a/deps/v8/test/mjsunit/es6/array-from.js
+++ b/deps/v8/test/mjsunit/es6/array-from.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
(function() {
assertEquals(1, Array.from.length);
@@ -184,3 +186,22 @@ assertEquals(true, xlength.enumerable);
assertEquals(true, xlength.configurable);
})();
+
+(function testElementsKind() {
+ // Check that Array.from returns PACKED elements.
+ var arr = Array.from([1,2,3]);
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasSmiElements(arr));
+
+ var arr = Array.from({length: 3});
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasObjectElements(arr));
+
+ var arr = Array.from({length: 3}, (x) => 1);
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasSmiElements(arr));
+
+ var arr = Array.from({length: 3}, (x) => 1.5);
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasDoubleElements(arr));
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-of.js b/deps/v8/test/mjsunit/es6/array-of.js
index 40bc890da7..258e476939 100644
--- a/deps/v8/test/mjsunit/es6/array-of.js
+++ b/deps/v8/test/mjsunit/es6/array-of.js
@@ -4,6 +4,7 @@
// Based on Mozilla Array.of() tests at http://dxr.mozilla.org/mozilla-central/source/js/src/jit-test/tests/collections
+// Flags: --allow-natives-syntax
// Array.of makes real arrays.
@@ -210,3 +211,18 @@ assertThrows(function() { new Array.of() }, TypeError); // not a constructor
assertEquals(true, xlength.enumerable);
assertEquals(true, xlength.configurable);
})();
+
+(function testElementsKind() {
+ // Check that Array.of returns PACKED elements.
+ var arr = Array.of(1, 2, 3);
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasSmiElements(arr));
+
+ var arr = Array.of(1, 2.5, 3);
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasDoubleElements(arr));
+
+ var arr = Array.of.call(Array, Array(65536));
+ assertTrue(%HasFastPackedElements(arr));
+ assertTrue(%HasObjectElements(arr));
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-spread-holey.js b/deps/v8/test/mjsunit/es6/array-spread-holey.js
index 7d95e51b29..bea58e4952 100644
--- a/deps/v8/test/mjsunit/es6/array-spread-holey.js
+++ b/deps/v8/test/mjsunit/es6/array-spread-holey.js
@@ -6,7 +6,7 @@
var a = [, 2];
-assertEquals([, 2], [...a]);
+assertEquals([undefined, 2], [...a]);
assertTrue([...a].hasOwnProperty(0));
assertTrue([2, ...a].hasOwnProperty(1));
diff --git a/deps/v8/test/mjsunit/es6/block-sloppy-function.js b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
index 00f35d53d1..1dd758b10d 100644
--- a/deps/v8/test/mjsunit/es6/block-sloppy-function.js
+++ b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
@@ -470,7 +470,8 @@
assertEquals(4, f());
}
- assertEquals(4, f());
+ // B.3.2.6 Changes to BlockDeclarationInstantiation
+ assertEquals(5, f());
})();
// B.3.5 interacts with B.3.3 to allow this.
diff --git a/deps/v8/test/mjsunit/es6/super.js b/deps/v8/test/mjsunit/es6/super.js
index a101ea896b..a747fed33c 100644
--- a/deps/v8/test/mjsunit/es6/super.js
+++ b/deps/v8/test/mjsunit/es6/super.js
@@ -2245,3 +2245,15 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
assertThrows(C.prototype.named, TypeError);
assertThrows(C.prototype.keyed, TypeError);
})();
+
+(function TestDeleteSuperPropertyEvaluationOrder() {
+ var i = 0;
+ class Base {}
+ class Derived extends Base {
+ test() {
+ delete super[i++];
+ }
+ }
+ assertThrows(Derived.prototype.test, ReferenceError);
+ assertEquals(1, i);
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-tostring.js b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
index f388881494..0ae210eddc 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-tostring.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
@@ -91,8 +91,12 @@ for (var constructor of typedArrayConstructors) {
let NumberToLocaleString = Number.prototype.toLocaleString;
Number.prototype.toLocaleString = pushArgs("Number");
+ // According to the ECMA-402 specification, the optional arguments locales
+ // and options must be passed. Without the ECMA-402 internationalization
+ // API, the optional arguments must not be passed.
+ const noArgs = (typeof Intl !== "object") ? [] : [undefined, undefined];
(new constructor([1, 2])).toLocaleString();
- assertEquals(["Number", [], "Number", []], log);
+ assertEquals(["Number", noArgs, "Number", noArgs], log);
Number.prototype.toLocaleString = NumberToLocaleString;
})();
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js b/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js
index cd6496f944..fdabfe831e 100644
--- a/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-groupby.js
@@ -142,7 +142,7 @@ var group = () => {
}
assertEquals(group(), [
- ['undefined', [,]],
+ ['undefined', [undefined]],
]);
array.__proto__.push(6);
diff --git a/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js b/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js
index 265e84181f..84ac13cf8a 100644
--- a/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js
+++ b/deps/v8/test/mjsunit/harmony/array-prototype-groupbytomap.js
@@ -147,7 +147,7 @@ var groupToMap = () => {
}
assertEquals(groupToMap(), [
- [undefined, [,]],
+ [undefined, [undefined]],
]);
array.__proto__.push(6);
assertEquals(groupToMap(), [
diff --git a/deps/v8/test/mjsunit/harmony/array-to-reversed.js b/deps/v8/test/mjsunit/harmony/array-to-reversed.js
index caae4079ab..6ad0084e2c 100644
--- a/deps/v8/test/mjsunit/harmony/array-to-reversed.js
+++ b/deps/v8/test/mjsunit/harmony/array-to-reversed.js
@@ -31,6 +31,36 @@ assertEquals("toReversed", Array.prototype.toReversed.name);
assertFalse(a === r);
})();
+(function TestSmiHoley() {
+ let a = [1,,3,4];
+ let r = a.toReversed();
+ assertEquals([1,,3,4], a);
+ assertEquals([4,3,undefined,1], r);
+ assertFalse(a.hasOwnProperty(1));
+ assertTrue(r.hasOwnProperty(2));
+ assertFalse(a === r);
+})();
+
+(function TestDoubleHoley() {
+ let a = [1.1,,3.3,4.4];
+ let r = a.toReversed();
+ assertEquals([1.1,,3.3,4.4], a);
+ assertEquals([4.4,3.3,undefined,1.1], r);
+ assertFalse(a.hasOwnProperty(1));
+ assertTrue(r.hasOwnProperty(2));
+ assertFalse(a === r);
+})();
+
+(function TestHoley() {
+ let a = [true,false,,1,42.42];
+ let r = a.toReversed();
+ assertEquals([true,false,,1,42.42], a);
+ assertEquals([42.42,1,undefined,false,true], r);
+ assertFalse(a.hasOwnProperty(2));
+ assertTrue(r.hasOwnProperty(2));
+ assertFalse(a === r);
+})();
+
(function TestGeneric() {
let a = { length: 4,
get "0"() { return "hello"; },
diff --git a/deps/v8/test/mjsunit/harmony/array-to-sorted.js b/deps/v8/test/mjsunit/harmony/array-to-sorted.js
index e5ea813fb8..d32523a7fa 100644
--- a/deps/v8/test/mjsunit/harmony/array-to-sorted.js
+++ b/deps/v8/test/mjsunit/harmony/array-to-sorted.js
@@ -7,32 +7,39 @@
assertEquals(1, Array.prototype.toSorted.length);
assertEquals("toSorted", Array.prototype.toSorted.name);
-function TerribleCopy(input) {
+function TerribleCopy(input, fillHoles) {
let copy;
if (Array.isArray(input)) {
- copy = [...input];
+ copy = new Array(input.length);
} else {
copy = { length: input.length };
- for (let i = 0; i < input.length; i++) {
+ }
+ for (let i = 0; i < input.length; ++i) {
+ if (i in input) {
copy[i] = input[i];
+ } else if (fillHoles) {
+ copy[i] = undefined;
}
}
return copy;
}
function AssertToSortedAndSortSameResult(input, ...args) {
- const orig = TerribleCopy(input);
+ const orig = TerribleCopy(input, false);
const s = Array.prototype.toSorted.apply(input, args);
- const copy = TerribleCopy(input);
+ const copy = TerribleCopy(input, true);
Array.prototype.sort.apply(copy, args);
// The in-place sorted version should be pairwise equal to the toSorted,
- // modulo being an actual Array if the input is generic.
+ // modulo being an actual Array if the input is generic, and holes should
+ // be filled with undefined.
if (Array.isArray(input)) {
assertEquals(copy, s);
} else {
assertEquals(copy.length, s.length);
for (let i = 0; i < copy.length; i++) {
+ assertTrue(i in copy);
+ assertTrue(i in s);
assertEquals(copy[i], s[i]);
}
}
diff --git a/deps/v8/test/mjsunit/harmony/arraybuffer-transfer.js b/deps/v8/test/mjsunit/harmony/arraybuffer-transfer.js
index 85b3fd4177..15932f4dd9 100644
--- a/deps/v8/test/mjsunit/harmony/arraybuffer-transfer.js
+++ b/deps/v8/test/mjsunit/harmony/arraybuffer-transfer.js
@@ -2,136 +2,171 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rab-gsab --allow-natives-syntax
+// Flags: --harmony-rab-gsab --harmony-rab-gsab-transfer --allow-natives-syntax
-assertEquals(0, ArrayBuffer.prototype.transfer.length);
-assertEquals("transfer", ArrayBuffer.prototype.transfer.name);
+function TestTransfer(method) {
+ assertEquals(0, ArrayBuffer.prototype[method].length);
+ assertEquals(method, ArrayBuffer.prototype[method].name);
-function AssertDetached(ab) {
- assertEquals(0, ab.byteLength);
- assertThrows(() => (new Uint8Array(ab)).sort(), TypeError);
-}
+ function AssertDetached(ab) {
+ assertTrue(ab.detached);
+ assertEquals(0, ab.byteLength);
+ assertThrows(() => (new Uint8Array(ab)).sort(), TypeError);
+ }
-const IndicesAndValuesForTesting = [
- [1, 4],
- [4, 18],
- [17, 255],
- [518, 48]
-];
-function WriteTestData(ab) {
- let u8 = new Uint8Array(ab);
- for (let [idx, val] of IndicesAndValuesForTesting) {
- u8[idx] = val;
+ function AssertResizable(originalResizable, newResizable) {
+ if (method === 'transfer') {
+ assertEquals(originalResizable, newResizable);
+ } else {
+ assertFalse(newResizable);
+ }
}
-}
-function AssertBufferContainsTestData(ab) {
- let u8 = new Uint8Array(ab);
- for (let [idx, val] of IndicesAndValuesForTesting) {
- if (idx < u8.length) {
- assertEquals(val, u8[idx]);
+
+ const IndicesAndValuesForTesting = [[1, 4], [4, 18], [17, 255], [518, 48]];
+ function WriteTestData(ab) {
+ let u8 = new Uint8Array(ab);
+ for (let [idx, val] of IndicesAndValuesForTesting) {
+ u8[idx] = val;
+ }
+ }
+ function AssertBufferContainsTestData(ab) {
+ let u8 = new Uint8Array(ab);
+ for (let [idx, val] of IndicesAndValuesForTesting) {
+ if (idx < u8.length) {
+ assertEquals(val, u8[idx]);
+ }
}
}
-}
-function TestSameLength(len, opts) {
- let ab = new ArrayBuffer(len, opts);
- WriteTestData(ab);
- const xfer = ab.transfer();
- assertEquals(len, xfer.byteLength);
- assertFalse(xfer.resizable);
- AssertBufferContainsTestData(xfer);
- AssertDetached(ab);
-}
-TestSameLength(1024);
-TestSameLength(1024, { maxByteLength: 2048 });
-
-function TestGrow(len, opts) {
- let ab = new ArrayBuffer(len);
- WriteTestData(ab);
- const newLen = len * 2 + 128; // +128 to ensure newLen is never 0
- const xfer = ab.transfer(newLen);
- assertEquals(newLen, xfer.byteLength);
- assertFalse(xfer.resizable);
- if (len > 0) AssertBufferContainsTestData(xfer);
- AssertDetached(ab);
-
- // The new memory should be zeroed.
- let u8 = new Uint8Array(xfer);
- for (let i = len; i < newLen; i++) {
- assertEquals(0, u8[i]);
+ function TestSameLength(len, opts) {
+ let ab = new ArrayBuffer(len, opts);
+ WriteTestData(ab);
+ const resizable = ab.resizable;
+ const xfer = ab[method]();
+ assertEquals(len, xfer.byteLength);
+ AssertResizable(resizable, xfer.resizable);
+ AssertBufferContainsTestData(xfer);
+ AssertDetached(ab);
}
-}
-TestGrow(1024);
-TestGrow(1024, { maxByteLength: 2048 });
-TestGrow(0);
-TestGrow(0, { maxByteLength: 2048 });
-
-function TestNonGrow(len, opts) {
- for (let newLen of [len / 2, // shrink
- 0 // 0 special case
- ]) {
- let ab = new ArrayBuffer(len, opts);
+ TestSameLength(1024);
+ TestSameLength(1024, {maxByteLength: 2048});
+
+ function TestGrow(len, opts) {
+ let ab = new ArrayBuffer(len, opts);
+ const resizable = ab.resizable;
WriteTestData(ab);
- const xfer = ab.transfer(newLen);
+ const newLen = len * 2 + 128; // +128 to ensure newLen is never 0
+ const xfer = ab[method](newLen);
assertEquals(newLen, xfer.byteLength);
- assertFalse(xfer.resizable);
+ AssertResizable(resizable, xfer.resizable);
if (len > 0) AssertBufferContainsTestData(xfer);
AssertDetached(ab);
+
+ // The new memory should be zeroed.
+ let u8 = new Uint8Array(xfer);
+ for (let i = len; i < newLen; i++) {
+ assertEquals(0, u8[i]);
+ }
}
-}
-TestNonGrow(1024);
-TestNonGrow(1024, { maxByteLength: 2048 });
-TestNonGrow(0);
-TestNonGrow(0, { maxByteLength: 2048 });
-
-(function TestParameterConversion() {
- const len = 1024;
- {
- let ab = new ArrayBuffer(len);
- const detach = { valueOf() { %ArrayBufferDetach(ab); return len; } };
- assertThrows(() => ab.transfer(detach), TypeError);
+ TestGrow(1024);
+ TestGrow(0);
+ if (method === 'transfer') {
+ // Cannot transfer to a new byte length > max byte length.
+ assertThrows(() => TestGrow(1024, {maxByteLength: 2048}), RangeError);
+ } else {
+ TestGrow(1024, {maxByteLength: 2048});
}
-
- {
- let ab = new ArrayBuffer(len, { maxByteLength: len * 4 });
- const shrink = { valueOf() { ab.resize(len / 2); return len; } };
- const xfer = ab.transfer(shrink);
- assertFalse(xfer.resizable);
- assertEquals(len, xfer.byteLength);
+ TestGrow(0, {maxByteLength: 2048});
+
+ function TestNonGrow(len, opts) {
+ for (let newLen
+ of [len / 2, // shrink
+ 0 // 0 special case
+ ]) {
+ let ab = new ArrayBuffer(len, opts);
+ WriteTestData(ab);
+ const resizable = ab.resizable;
+ const xfer = ab[method](newLen);
+ assertEquals(newLen, xfer.byteLength);
+ AssertResizable(resizable, xfer.resizable);
+ if (len > 0) AssertBufferContainsTestData(xfer);
+ AssertDetached(ab);
+ }
}
+ TestNonGrow(1024);
+ TestNonGrow(1024, {maxByteLength: 2048});
+ TestNonGrow(0);
+ TestNonGrow(0, {maxByteLength: 2048});
+
+ (function TestParameterConversion() {
+ const len = 1024;
+ {
+ let ab = new ArrayBuffer(len);
+ const detach = {
+ valueOf() {
+ %ArrayBufferDetach(ab);
+ return len;
+ }
+ };
+ assertThrows(() => ab[method](detach), TypeError);
+ }
- {
- let ab = new ArrayBuffer(len, { maxByteLength: len * 4 });
- const grow = { valueOf() { ab.resize(len * 2); return len; } };
- const xfer = ab.transfer(grow);
- assertFalse(xfer.resizable);
- assertEquals(len, xfer.byteLength);
- }
-})();
+ {
+ let ab = new ArrayBuffer(len, {maxByteLength: len * 4});
+ const resizable = ab.resizable;
+ const shrink = {
+ valueOf() {
+ ab.resize(len / 2);
+ return len;
+ }
+ };
+ const xfer = ab[method](shrink);
+ AssertResizable(resizable, xfer.resizable);
+ assertEquals(len, xfer.byteLength);
+ }
-(function TestCannotBeSAB() {
- const len = 1024;
- let sab = new SharedArrayBuffer(1024);
- let gsab = new SharedArrayBuffer(len, { maxByteLength: len * 4 });
+ {
+ let ab = new ArrayBuffer(len, {maxByteLength: len * 4});
+ const resizable = ab.resizable;
+ const grow = {
+ valueOf() {
+ ab.resize(len * 2);
+ return len;
+ }
+ };
+ const xfer = ab[method](grow);
+ AssertResizable(resizable, xfer.resizable);
+ assertEquals(len, xfer.byteLength);
+ }
+ })();
- assertThrows(() => ArrayBuffer.prototype.transfer.call(sab), TypeError);
- assertThrows(() => ArrayBuffer.prototype.transfer.call(gsab), TypeError);
-})();
+ (function TestCannotBeSAB() {
+ const len = 1024;
+ let sab = new SharedArrayBuffer(1024);
+ let gsab = new SharedArrayBuffer(len, {maxByteLength: len * 4});
-(function TestInvalidLength() {
- for (let newLen of [-1024, Number.MAX_SAFE_INTEGER + 1]) {
- let ab = new ArrayBuffer(1024);
- assertThrows(() => ab.transfer(newLen), RangeError);
- }
-})();
+ assertThrows(() => ArrayBuffer.prototype[method].call(sab), TypeError);
+ assertThrows(() => ArrayBuffer.prototype[method].call(gsab), TypeError);
+ })();
-(function TestEmptySourceStore() {
- let ab = new ArrayBuffer();
- let xfer = ab.transfer().transfer(1024);
-})();
+ (function TestInvalidLength() {
+ for (let newLen of [-1024, Number.MAX_SAFE_INTEGER + 1]) {
+ let ab = new ArrayBuffer(1024);
+ assertThrows(() => ab[method](newLen), RangeError);
+ }
+ })();
-if (typeof WebAssembly !== 'undefined') {
- // WebAssembly buffers cannot be detached.
- const memory = new WebAssembly.Memory({ initial: 1 });
- assertThrows(() => memory.buffer.transfer(), TypeError);
+ (function TestEmptySourceStore() {
+ let ab = new ArrayBuffer();
+ let xfer = ab[method]()[method](1024);
+ })();
+
+ if (typeof WebAssembly !== 'undefined') {
+ // WebAssembly buffers cannot be detached.
+ const memory = new WebAssembly.Memory({initial: 1});
+ assertThrows(() => memory.buffer[method](), TypeError);
+ }
}
+
+TestTransfer('transfer');
+TestTransfer('transferToFixedLength');
diff --git a/deps/v8/test/mjsunit/harmony/iterator-constructor.js b/deps/v8/test/mjsunit/harmony/iterator-constructor.js
new file mode 100644
index 0000000000..61333f8619
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/iterator-constructor.js
@@ -0,0 +1,31 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-iterator-helpers
+
+assertEquals('function', typeof Iterator);
+assertEquals(0, Iterator.length);
+assertEquals('Iterator', Iterator.name);
+
+// Abstract base class, can't be instantiated.
+assertThrows(() => new Iterator(), TypeError);
+
+// Can be used as superclass though.
+class MyIterator extends Iterator {
+ next() {
+ return {value: 42, done: false};
+ }
+}
+const myIter = new MyIterator();
+assertTrue(myIter instanceof MyIterator);
+
+function* gen() {
+ yield 42;
+}
+const genIter = gen();
+assertTrue(genIter instanceof Iterator);
+assertSame(
+ Object.getPrototypeOf(
+ Object.getPrototypeOf(Object.getPrototypeOf(genIter))),
+ Iterator.prototype);
diff --git a/deps/v8/test/mjsunit/harmony/iterator-from.js b/deps/v8/test/mjsunit/harmony/iterator-from.js
new file mode 100644
index 0000000000..90a3d95b02
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/iterator-from.js
@@ -0,0 +1,78 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-iterator-helpers
+
+assertEquals('function', typeof Iterator.from);
+assertEquals(1, Iterator.from.length);
+assertEquals('from', Iterator.from.name);
+
+function TestValidIteratorWrapperSurface(iter) {
+ const proto = Object.getPrototypeOf(iter);
+ assertTrue(Object.hasOwn(proto, 'next'));
+ assertTrue(Object.hasOwn(proto, 'return'));
+ assertEquals('function', typeof proto.next);
+ assertEquals('function', typeof proto.return);
+ assertEquals(0, proto.next.length);
+ assertEquals(0, proto.return.length);
+ assertEquals('next', proto.next.name);
+ assertEquals('return', proto.return.name);
+ assertNotSame(iter, Iterator.prototype);
+ assertSame(Object.getPrototypeOf(proto), Iterator.prototype);
+}
+
+(function TestIteratorFromString() {
+ let iter = Iterator.from('abc');
+ assertEquals({value:'a', done:false}, iter.next());
+ assertEquals({value:'b', done:false}, iter.next());
+ assertEquals({value:'c', done:false}, iter.next());
+ assertEquals({value:undefined, done:true}, iter.next());
+})();
+
+(function TestIteratorFromManual() {
+ // Make the result objects so their identities can be used for testing
+ // passthrough of next().
+ let nextResult = { value: 42, done: false };
+ let returnResult = { value: 'ha ha ha... yes!' };
+ let iter = {
+ next() { return nextResult; },
+ ['return']() { return returnResult; }
+ };
+ let wrapper = Iterator.from(iter);
+ TestValidIteratorWrapperSurface(wrapper);
+ assertSame(iter.next(), wrapper.next());
+ assertSame(iter.return(), wrapper.return());
+})();
+
+(function TestIteratorFromNotWrapped() {
+ let obj = {
+ *[Symbol.iterator]() {
+ yield 42;
+ yield 'ha ha ha... yes';
+ }
+ };
+ // Objects that have iterators aren't wrapped.
+ let gen = obj[Symbol.iterator]();
+ let wrapper = Iterator.from(obj);
+ assertSame(Object.getPrototypeOf(gen), Object.getPrototypeOf(wrapper));
+ assertEquals({value: 42, done: false }, wrapper.next());
+ assertEquals({value: 'ha ha ha... yes', done: false }, wrapper.next());
+ assertEquals({value: undefined, done: true }, wrapper.next());
+})();
+
+assertThrows(() => {
+ Iterator.from({[Symbol.iterator]: "not callable"});
+}, TypeError);
+
+assertThrows(() => {
+ Iterator.from({[Symbol.iterator]() {
+ return "not an object";
+ }});
+}, TypeError);
+
+assertThrows(() => {
+ Iterator.from({[Symbol.iterator]() {
+ return { next: "not callable" };
+ }});
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/iterator-helpers.js b/deps/v8/test/mjsunit/harmony/iterator-helpers.js
new file mode 100644
index 0000000000..9de592e134
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/iterator-helpers.js
@@ -0,0 +1,320 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-iterator-helpers
+
+function* gen() {
+ yield 42;
+ yield 43;
+}
+
+function* longerGen() {
+ yield 42;
+ yield 43;
+ yield 44;
+ yield 45;
+}
+
+function TestHelperPrototypeSurface(helper) {
+ const proto = Object.getPrototypeOf(helper);
+ assertEquals('Iterator Helper', proto[Symbol.toStringTag]);
+ assertTrue(Object.hasOwn(proto, 'next'));
+ assertTrue(Object.hasOwn(proto, 'return'));
+ assertEquals('function', typeof proto.next);
+ assertEquals('function', typeof proto.return);
+ assertEquals(0, proto.next.length);
+ assertEquals(0, proto.return.length);
+ assertEquals('next', proto.next.name);
+ assertEquals('return', proto.return.name);
+}
+
+// --- Test Map helper
+
+(function TestMap() {
+ const iter = gen();
+ assertEquals('function', typeof iter.map);
+ assertEquals(1, iter.map.length);
+ assertEquals('map', iter.map.name);
+ let counters = [];
+ const mapIter = iter.map((x, i) => {
+ counters.push(i);
+ return x*2;
+ });
+ TestHelperPrototypeSurface(mapIter);
+ assertEquals({value: 84, done: false }, mapIter.next());
+ assertEquals({value: 86, done: false }, mapIter.next());
+ assertEquals([0,1], counters);
+ assertEquals({value: undefined, done: true }, mapIter.next());
+})();
+
+// --- Test Filter helper
+
+(function TestFilter() {
+ const iter = gen();
+ assertEquals('function', typeof iter.filter);
+ assertEquals(1, iter.filter.length);
+ assertEquals('filter', iter.filter.name);
+ const filterIter = iter.filter((x, i) => {
+ return x%2 == 0;
+ });
+ TestHelperPrototypeSurface(filterIter);
+ assertEquals({value: 42, done: false }, filterIter.next());
+ assertEquals({value: undefined, done: true }, filterIter.next());
+})();
+
+(function TestFilterLastElement() {
+ const iter = gen();
+ const filterIter = iter.filter((x, i) => {
+ return x == 43;
+ });
+ TestHelperPrototypeSurface(filterIter);
+ assertEquals({value: 43, done: false }, filterIter.next());
+ assertEquals({value: undefined, done: true }, filterIter.next());
+})();
+
+(function TestFilterAllElement() {
+ const iter = gen();
+ const filterIter = iter.filter((x, i) => {
+ return x == x;
+ });
+ TestHelperPrototypeSurface(filterIter);
+ assertEquals({value: 42, done: false }, filterIter.next());
+ assertEquals({value: 43, done: false }, filterIter.next());
+ assertEquals({value: undefined, done: true }, filterIter.next());
+})();
+
+(function TestFilterNoElement() {
+ const iter = gen();
+ const filterIter = iter.filter((x, i) => {
+ return x == 0;
+ });
+ TestHelperPrototypeSurface(filterIter);
+ assertEquals({value: undefined, done: true }, filterIter.next());
+})();
+
+// --- Test Take helper
+
+(function TestTake() {
+ const iter = gen();
+ assertEquals('function', typeof iter.take);
+ assertEquals(1, iter.take.length);
+ assertEquals('take', iter.take.name);
+ const takeIter = iter.take(1);
+ TestHelperPrototypeSurface(takeIter);
+ assertEquals({value: 42, done: false }, takeIter.next());
+ assertEquals({value: undefined, done: true }, takeIter.next());
+})();
+
+(function TestTakeAllElements() {
+ const iter = gen();
+ const takeIter = iter.take(2);
+ TestHelperPrototypeSurface(takeIter);
+ assertEquals({value: 42, done: false }, takeIter.next());
+ assertEquals({value: 43, done: false }, takeIter.next());
+ assertEquals({value: undefined, done: true }, takeIter.next());
+})();
+
+(function TestTakeNoElements() {
+ const iter = gen();
+ const takeIter = iter.take(0);
+ TestHelperPrototypeSurface(takeIter);
+ assertEquals({value: undefined, done: true }, takeIter.next());
+})();
+
+(function TestTakeMoreElements() {
+ const iter = gen();
+ const takeIter = iter.take(4);
+ TestHelperPrototypeSurface(takeIter);
+ assertEquals({value: 42, done: false }, takeIter.next());
+ assertEquals({value: 43, done: false }, takeIter.next());
+ assertEquals({value: undefined, done: true }, takeIter.next());
+})();
+
+(function TestTakeNegativeLimit() {
+ const iter = gen();
+ assertThrows(() => {iter.take(-3);});
+})();
+
+(function TestTakeInfinityLimit() {
+ const iter = gen();
+ const takeIter = iter.take(Number.POSITIVE_INFINITY);
+ TestHelperPrototypeSurface(takeIter);
+ assertEquals({value: 42, done: false }, takeIter.next());
+ assertEquals({value: 43, done: false }, takeIter.next());
+ assertEquals({value: undefined, done: true }, takeIter.next());
+})();
+
+(function TestTakeReturnInNormalIterator() {
+ const NormalIterator = {
+ i: 1,
+ next() {
+ if (this.i <= 3) {
+ return {value: this.i++, done: false};
+ } else {
+ return {value: undefined, done: true};
+ }
+ },
+ return() {return {value: undefined, done: true};},
+ };
+
+ Object.setPrototypeOf(NormalIterator, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));
+ const takeIter = NormalIterator.take(1);
+ TestHelperPrototypeSurface(takeIter);
+ assertEquals({value: 1, done: false }, takeIter.next());
+ assertEquals({value: undefined, done: true }, takeIter.next());
+})();
+
+(function TestTakeNoReturnInIterator() {
+ const IteratorNoReturn = {
+ i: 1,
+ next() {
+ if (this.i <= 3) {
+ return {value: this.i++, done: false};
+ } else {
+ return {value: undefined, done: true};
+ }
+ },
+ };
+
+ Object.setPrototypeOf(
+ IteratorNoReturn,
+ Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));
+ assertThrows(() => {iter.take(1);});
+})();
+
+(function TestTakeNonObjectReturnInIterator() {
+ const IteratorThrowReturn = {
+ i: 1,
+ next() {
+ if (this.i <= 3) {
+ return {value: this.i++, done: false};
+ } else {
+ return {value: undefined, done: true};
+ }
+ },
+ return () {
+ throw new Error('Non-object return');
+ },
+ };
+
+ Object.setPrototypeOf(
+ IteratorThrowReturn,
+ Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));
+ assertThrows(() => {iter.take(1);});
+})();
+
+// --- Test Drop helper
+
+(function TestDrop() {
+ const iter = longerGen();
+ assertEquals('function', typeof iter.drop);
+ assertEquals(1, iter.drop.length);
+ assertEquals('drop', iter.drop.name);
+ const dropIter = iter.drop(1);
+ TestHelperPrototypeSurface(dropIter);
+ assertEquals({value: 43, done: false}, dropIter.next());
+ assertEquals({value: 44, done: false}, dropIter.next());
+ assertEquals({value: 45, done: false}, dropIter.next());
+ assertEquals({value: undefined, done: true}, dropIter.next());
+})();
+
+(function TestDropAllElements() {
+ const iter = longerGen();
+ const dropIter = iter.drop(4);
+ TestHelperPrototypeSurface(dropIter);
+ assertEquals({value: undefined, done: true}, dropIter.next());
+})();
+
+(function TestDropNoElements() {
+ const iter = longerGen();
+ const dropIter = iter.drop(0);
+ TestHelperPrototypeSurface(dropIter);
+ assertEquals({value: 42, done: false}, dropIter.next());
+ assertEquals({value: 43, done: false}, dropIter.next());
+ assertEquals({value: 44, done: false}, dropIter.next());
+ assertEquals({value: 45, done: false}, dropIter.next());
+ assertEquals({value: undefined, done: true}, dropIter.next());
+})();
+
+(function TestDropNegativeLimit() {
+ const iter = longerGen();
+ assertThrows(() => {
+ iter.drop(-3);
+ });
+})();
+
+(function TestDropInfinityLimit() {
+ const iter = longerGen();
+ const dropIter = iter.drop(Number.POSITIVE_INFINITY);
+ TestHelperPrototypeSurface(dropIter);
+ assertEquals({value: undefined, done: true}, dropIter.next());
+})();
+
+(function TestDropReturnInNormalIterator() {
+ const NormalIterator = {
+ i: 1,
+ next() {
+ if (this.i <= 3) {
+ return {value: this.i++, done: false};
+ } else {
+ return {value: undefined, done: true};
+ }
+ },
+ return () {
+ return {value: undefined, done: true};
+ },
+ };
+
+ Object.setPrototypeOf(
+ NormalIterator,
+ Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));
+ const dropIter = NormalIterator.drop(1);
+ TestHelperPrototypeSurface(dropIter);
+ assertEquals({value: 2, done: false}, dropIter.next());
+ assertEquals({value: 3, done: false}, dropIter.next());
+ assertEquals({value: undefined, done: true}, dropIter.next());
+})();
+
+(function TestDropNoReturnInIterator() {
+ const IteratorNoReturn = {
+ i: 1,
+ next() {
+ if (this.i <= 3) {
+ return {value: this.i++, done: false};
+ } else {
+ return {value: undefined, done: true};
+ }
+ },
+ };
+
+ Object.setPrototypeOf(
+ IteratorNoReturn,
+ Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));
+ assertThrows(() => {
+ iter.drop(4);
+ });
+})();
+
+(function TestDropNonObjectReturnInIterator() {
+ const IteratorThrowReturn = {
+ i: 1,
+ next() {
+ if (this.i <= 3) {
+ return {value: this.i++, done: false};
+ } else {
+ return {value: undefined, done: true};
+ }
+ },
+ return () {
+ throw new Error('Non-object return');
+ },
+ };
+
+ Object.setPrototypeOf(
+ IteratorThrowReturn,
+ Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));
+ assertThrows(() => {
+ iter.drop(4);
+ });
+})();
diff --git a/deps/v8/test/mjsunit/harmony/json-parse-with-source-snapshot.js b/deps/v8/test/mjsunit/harmony/json-parse-with-source-snapshot.js
new file mode 100644
index 0000000000..8f5565bd62
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/json-parse-with-source-snapshot.js
@@ -0,0 +1,92 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-json-parse-with-source
+
+const replacements = [42,
+ ['foo'],
+ {foo:'bar'},
+ 'foo'];
+
+function TestArrayForwardModify(replacement) {
+ let alreadyReplaced = false;
+ let expectedKeys = ['0','1',''];
+ // lol who designed reviver semantics
+ if (typeof replacement === 'object') {
+ expectedKeys.splice(1, 0, ...Object.keys(replacement));
+ }
+ const o = JSON.parse('[1, 2]', function (k, v, { source }) {
+ assertEquals(expectedKeys.shift(), k);
+ if (k === '0') {
+ if (!alreadyReplaced) {
+ this[1] = replacement;
+ alreadyReplaced = true;
+ }
+ } else if (k !== '') {
+ assertSame(undefined, source);
+ }
+ return this[k];
+ });
+ assertEquals(0, expectedKeys.length);
+ assertEquals([1, replacement], o);
+}
+
+function TestObjectForwardModify(replacement) {
+ let alreadyReplaced = false;
+ let expectedKeys = ['p','q',''];
+ if (typeof replacement === 'object') {
+ expectedKeys.splice(1, 0, ...Object.keys(replacement));
+ }
+ const o = JSON.parse('{"p":1, "q":2}', function (k, v, { source }) {
+ assertEquals(expectedKeys.shift(), k);
+ if (k === 'p') {
+ if (!alreadyReplaced) {
+ this.q = replacement;
+ alreadyReplaced = true;
+ }
+ } else if (k !== '') {
+ assertSame(undefined, source);
+ }
+ return this[k];
+ });
+ assertEquals(0, expectedKeys.length);
+ assertEquals({p:1, q:replacement}, o);
+}
+
+for (const r of replacements) {
+ TestArrayForwardModify(r);
+ TestObjectForwardModify(r);
+}
+
+(function TestArrayAppend() {
+ let log = [];
+ const o = JSON.parse('[1,[]]', function (k, v, { source }) {
+ log.push([k, v, source]);
+ if (v === 1) {
+ this[1].push('barf');
+ }
+ return this[k];
+ });
+ assertEquals([['0', 1, '1'],
+ ['0', 'barf', undefined],
+ ['1', ['barf'], undefined],
+ ['', [1, ['barf']], undefined]],
+ log);
+})();
+
+(function TestObjectAddProperty() {
+ let log = [];
+ const o = JSON.parse('{"p":1,"q":{}}', function (k, v, { source }) {
+ log.push([k, v, source]);
+ if (v === 1) {
+ this.q.added = 'barf';
+ }
+ return this[k];
+ });
+ assertEquals([['p', 1, '1'],
+ ['added', 'barf', undefined],
+ ['q', {added:'barf'}, undefined],
+ ['', {p:1, q:{added:'barf'}}, undefined]],
+ log);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/json-parse-with-source.js b/deps/v8/test/mjsunit/harmony/json-parse-with-source.js
index 9699082529..ae78ed3f2b 100644
--- a/deps/v8/test/mjsunit/harmony/json-parse-with-source.js
+++ b/deps/v8/test/mjsunit/harmony/json-parse-with-source.js
@@ -250,7 +250,7 @@ function assertIsRawJson(rawJson, expectedRawJsonValue) {
} else if (key == 'b') {
this.c = 3;
assertEquals(2, value);
- assertEquals('1', source);
+ assertEquals(undefined, source);
} else if (key == 'c') {
assertEquals(3, value);
assertEquals(undefined, source);
@@ -271,11 +271,11 @@ function assertIsRawJson(rawJson, expectedRawJsonValue) {
} else if (key == '1') {
this[2] = 4;
assertEquals(3, value);
- assertEquals('2', source);
+ assertEquals(undefined, source);
} else if(key == '2') {
this[3] = 5;
assertEquals(4, value);
- assertEquals('3', source);
+ assertEquals(undefined, source);
} else if(key == '5'){
assertEquals(5, value);
assertEquals(undefined, source);
diff --git a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
index 8ab1e31848..d732f73568 100644
--- a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
@@ -166,6 +166,16 @@
}
{
+ let x = 'c';
+ class C {
+ [x] = function() { return 1 };
+ }
+
+ let c = new C;
+ assertEquals('c', c.c.name);
+}
+
+{
let d = function() { return new.target; }
class C {
c = d;
diff --git a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
index 0c7a3e5516..aa4918ae0c 100644
--- a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
@@ -144,6 +144,15 @@
}
{
+ let x = 'c';
+ class C {
+ static [x] = function() { return 1 };
+ }
+
+ assertEquals('c', C.c.name);
+}
+
+{
let d = function() { return new.target; }
class C {
static c = d;
@@ -463,8 +472,39 @@ y()();
let q = { ["z"]: class { static y = this.name } }
assertEquals(q.z.y, 'z');
+ let r = { ["z"]: class { static y = this.name; static name = "zz" } }
+ let r_z_name_desc = Object.getOwnPropertyDescriptor(r.z, "name");
+ assertEquals(r.z.y, 'z');
+ assertEquals(r_z_name_desc, {
+ value: 'zz', enumerable: true, writable: true, configurable: true
+ });
+
+ let s = { ["z"]: class Y { static y = this.name } }
+ assertEquals(s.z.y, 'Y');
+
const C = class {
static x = this.name;
}
assertEquals(C.x, 'C');
}
+
+{
+ let p = class { static z = class { static y = this.name } }
+ assertEquals(p.z.y, 'z');
+
+ let q = class { static ["z"] = class { static y = this.name } }
+ assertEquals(q.z.y, 'z');
+
+ let r = class {
+ static ["z"] =
+ class { static y = this.name; static name = "zz" }
+ }
+ let r_z_name_desc = Object.getOwnPropertyDescriptor(r.z, "name");
+ assertEquals(r.z.y, 'z');
+ assertEquals(r_z_name_desc, {
+ value: 'zz', enumerable: true, writable: true, configurable: true
+ });
+
+ let s = class { static ["z"] = class Y { static y = this.name } }
+ assertEquals(s.z.y, 'Y');
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-unicode-sets.js b/deps/v8/test/mjsunit/harmony/regexp-unicode-sets.js
index 19f95bbffb..fb17068f64 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-unicode-sets.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-unicode-sets.js
@@ -48,10 +48,41 @@ assertEarlyError('/[~~]/v');
assertEarlyError('/[a&&&]/v');
assertEarlyError('/[&&&a]/v');
+// Unterminated string disjunction.
+assertEarlyError('/[\\q{foo]/v');
+assertEarlyError('/[\\q{foo|]/v');
+
+// Negating classes containing strings is not allowed.
+assertEarlyError('/[^\\q{foo}]/v');
+assertEarlyError('/[^\\q{}]/v'); // Empty string counts as string.
+assertEarlyError('/[^[\\q{foo}]]/v');
+assertEarlyError('/[^[\\p{Basic_Emoji}]/v');
+assertEarlyError('/[^\\q{foo}&&\\q{bar}]/v');
+assertEarlyError('/[^\\q{foo}--\\q{bar}]/v');
+// Exceptions when negating the class is allowed:
+// The "string" contains only single characters.
+/[^\q{a|b|c}]/v;
+// Not all operands of an intersection contain strings.
+/[^\q{foo}&&\q{bar}&&a]/v;
+// The first operand of a subtraction doesn't contain strings.
+/[^a--\q{foo}--\q{bar}]/v;
+
+// Negated properties of strings are not allowed.
+assertEarlyError('/\\P{Basic_Emoji}/v');
+assertEarlyError('/\\P{Emoji_Keycap_Sequence}/v');
+assertEarlyError('/\\P{RGI_Emoji_Modifier_Sequence}/v');
+assertEarlyError('/\\P{RGI_Emoji_Flag_Sequence}/v');
+assertEarlyError('/\\P{RGI_Emoji_Tag_Sequence}/v');
+assertEarlyError('/\\P{RGI_Emoji_ZWJ_Sequence}/v');
+assertEarlyError('/\\P{RGI_Emoji}/v');
+
+// Invalid identity escape in string disjunciton.
+assertEarlyError('/[\\q{\\w}]/v');
+
const allAscii = Array.from(
{length: 127}, (v, i) => { return String.fromCharCode(i); });
-function check(re, expectMatch, expectNoMatch) {
+function check(re, expectMatch, expectNoMatch = [], negationValid = true) {
if (expectNoMatch === undefined) {
const expectSet = new Set(expectMatch.map(val => {
return (typeof val == 'number') ? String(val) : val; }));
@@ -63,14 +94,22 @@ function check(re, expectMatch, expectNoMatch) {
for (const noMatch of expectNoMatch) {
assertFalse(re.test(noMatch), `${re}.test(${noMatch})`);
}
- // Nest the current RegExp in a negated class and check expectations are
- // inversed.
- const inverted = new RegExp(`[^${re.source}]`, re.flags);
- for (const match of expectMatch) {
- assertFalse(inverted.test(match), `${inverted}.test(${match})`);
- }
- for (const noMatch of expectNoMatch) {
- assertTrue(inverted.test(noMatch), `${inverted}.test(${noMatch})`);
+ if (!negationValid) {
+ // Negation of classes containing strings is an error.
+ const negated = `[^${re.source}]`;
+ assertThrows(() => { new RegExp(negated, `${re.flags}`); }, SyntaxError,
+ `Invalid regular expression: /${negated}/${re.flags}: ` +
+ `Negated character class may contain strings`);
+ } else {
+ // Nest the current RegExp in a negated class and check expectations are
+ // inversed.
+ const inverted = new RegExp(`[^${re.source}]`, re.flags);
+ for (const match of expectMatch) {
+ assertFalse(inverted.test(match), `${inverted}.test(${match})`);
+ }
+ for (const noMatch of expectNoMatch) {
+ assertTrue(inverted.test(noMatch), `${inverted}.test(${noMatch})`);
+ }
}
}
@@ -126,18 +165,87 @@ check(/[Ā-č]/v, Array.from('ĀāĂ㥹Ćć'), Array.from('abc'));
check(/[ĀĂĄĆ]/vi, Array.from('ĀāĂ㥹Ćć'), Array.from('abc'));
check(/[āăąć]/vi, Array.from('ĀāĂ㥹Ćć'), Array.from('abc'));
+// String disjunctions
+check(/[\q{foo|bar|0|5}]/v, ['foo', 'bar', 0, 5], ['fo', 'baz'], false)
+check(/[\q{foo|bar}[05]]/v, ['foo', 'bar', 0, 5], ['fo', 'baz'], false)
+check(/[\q{foo|bar|0|5}&&\q{bar}]/v, ['bar'], ['foo', 0, 5, 'fo', 'baz'], false)
+// The second operand of the intersection doesn't contain strings, so the result
+// will not contain strings and therefore negation is valid.
+check(/[\q{foo|bar|0|5}&&\d]/v, [0, 5], ['foo', 'bar', 'fo', 'baz'], true)
+check(/[\q{foo|bar|0|5}--\q{foo}]/v, ['bar', 0, 5], ['foo', 'fo', 'baz'], false)
+check(/[\q{foo|bar|0|5}--\d]/v, ['foo', 'bar'], [0, 5, 'fo', 'baz'], false)
+
+check(
+ /[\q{foo|bar|0|5}&&\q{bAr}]/vi, ['bar', 'bAr', 'BAR'],
+ ['foo', 0, 5, 'fo', 'baz'], false)
+check(
+ /[\q{foo|bar|0|5}--\q{FoO}]/vi, ['bar', 'bAr', 'BAR', 0, 5],
+ ['foo', 'FOO', 'fo', 'baz'], false)
+
+check(/[\q{ĀĂĄĆ|AaAc}&&\q{āăąć}]/vi, ['ĀĂĄĆ', 'āăąć'], ['AaAc'], false);
+check(
+ /[\q{ĀĂĄĆ|AaAc}--\q{āăąć}]/vi, ['AaAc', 'aAaC'], ['ĀĂĄĆ', 'āăąć'],
+ false);
+
+// Empty nested classes.
+check(/[a-c\q{foo|bar}[]]/v, ['a','b','c','foo','bar'], [], false);
+check(/[[a-c\q{foo|bar}]&&[]]/v, [], ['a','b','c','foo','bar'], true);
+check(/[[a-c\q{foo|bar}]--[]]/v, ['a','b','c','foo','bar'], [], false);
+check(/[[]&&[a-c\q{foo|bar}]]/v, [], ['a','b','c','foo','bar'], true);
+check(/[[]--[a-c\q{foo|bar}]]/v, [], ['a','b','c','foo','bar'], true);
+
+// Empty string disjunctions matches nothing, but succeeds.
+let res = /[\q{}]/v.exec('foo');
+assertNotNull(res);
+assertEquals(1, res.length);
+assertEquals('', res[0]);
+
+// Ensure longest strings are matched first.
+assertEquals(['xyz'], /[a-c\q{W|xy|xyz}]/v.exec('xyzabc'))
+assertEquals(['xyz'], /[a-c\q{W|xyz|xy}]/v.exec('xyzabc'))
+assertEquals(['xyz'], /[\q{W|xyz|xy}a-c]/v.exec('xyzabc'))
+// Empty string is last.
+assertEquals(['a'], /[\q{W|}a-c]/v.exec('abc'))
+
// Some more sophisticated tests taken from
// https://v8.dev/features/regexp-v-flag
+assertTrue(/^\p{RGI_Emoji}$/v.test('⚽'));
+assertTrue(/^\p{RGI_Emoji}$/v.test('👨🏾‍⚕️'));
assertFalse(/[\p{Script_Extensions=Greek}--π]/v.test('π'));
assertFalse(/[\p{Script_Extensions=Greek}--[αβγ]]/v.test('α'));
assertFalse(/[\p{Script_Extensions=Greek}--[α-γ]]/v.test('β'));
assertTrue(/[\p{Decimal_Number}--[0-9]]/v.test('𑜹'));
assertFalse(/[\p{Decimal_Number}--[0-9]]/v.test('4'));
+assertTrue(
+ /^\p{RGI_Emoji_Tag_Sequence}$/v.test('🏴󠁧󠁢󠁳󠁣󠁴󠁿'));
+assertFalse(
+ /^[\p{RGI_Emoji_Tag_Sequence}--\q{🏴󠁧󠁢󠁳󠁣󠁴󠁿}]$/v.test(
+ '🏴󠁧󠁢󠁳󠁣󠁴󠁿'));
assertTrue(/[\p{Script_Extensions=Greek}&&\p{Letter}]/v.test('π'));
assertFalse(/[\p{Script_Extensions=Greek}&&\p{Letter}]/v.test('𐆊'));
assertTrue(/[\p{White_Space}&&\p{ASCII}]/v.test('\n'));
assertFalse(/[\p{White_Space}&&\p{ASCII}]/v.test('\u2028'));
assertTrue(/[\p{Script_Extensions=Mongolian}&&\p{Number}]/v.test('᠗'));
assertFalse(/[\p{Script_Extensions=Mongolian}&&\p{Number}]/v.test('ᠴ'));
+assertTrue(/^[\p{Emoji_Keycap_Sequence}\p{ASCII}\q{🇧🇪|abc}xyz0-9]$/v.test(
+ '4️⃣'));
+assertTrue(
+ /^[\p{Emoji_Keycap_Sequence}\p{ASCII}\q{🇧🇪|abc}xyz0-9]$/v.test('_'));
+assertTrue(
+ /^[\p{Emoji_Keycap_Sequence}\p{ASCII}\q{🇧🇪|abc}xyz0-9]$/v.test('🇧🇪'));
+assertTrue(/^[\p{Emoji_Keycap_Sequence}\p{ASCII}\q{🇧🇪|abc}xyz0-9]$/v.test(
+ 'abc'));
+assertTrue(
+ /^[\p{Emoji_Keycap_Sequence}\p{ASCII}\q{🇧🇪|abc}xyz0-9]$/v.test('x'));
+assertTrue(
+ /^[\p{Emoji_Keycap_Sequence}\p{ASCII}\q{🇧🇪|abc}xyz0-9]$/v.test('4'));
+assertTrue(
+ /[\p{RGI_Emoji_Flag_Sequence}\p{RGI_Emoji_Tag_Sequence}]/v.test('🇧🇪'));
+assertTrue(/[\p{RGI_Emoji_Flag_Sequence}\p{RGI_Emoji_Tag_Sequence}]/v.test(
+ '🏴󠁧󠁢󠁥󠁮󠁧󠁿'));
+assertTrue(
+ /[\p{RGI_Emoji_Flag_Sequence}\p{RGI_Emoji_Tag_Sequence}]/v.test('🇨🇭'));
+assertTrue(/[\p{RGI_Emoji_Flag_Sequence}\p{RGI_Emoji_Tag_Sequence}]/v.test(
+ '🏴󠁧󠁢󠁷󠁬󠁳󠁿'));
assertEquals('XXXXXX4#', 'aAbBcC4#'.replaceAll(/\p{Lowercase_Letter}/giv, 'X'));
assertEquals('XXXXXX4#', 'aAbBcC4#'.replaceAll(/[^\P{Lowercase_Letter}]/giv, 'X'));
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1410963.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1410963.js
new file mode 100644
index 0000000000..6c41f70567
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1410963.js
@@ -0,0 +1,8 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-unicode-sets
+
+assertTrue(/[a-cB]/vi.test('b'));
+assertTrue(/[a-cB]/vi.test('B'));
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1422812.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1422812.js
new file mode 100644
index 0000000000..6f7e0d682a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1422812.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-json-parse-with-source
+
+assertThrows(() => JSON.rawJSON(this), SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1423310.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1423310.js
new file mode 100644
index 0000000000..c9721d6867
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-1423310.js
@@ -0,0 +1,13 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-json-parse-with-source
+
+const v2 = JSON.rawJSON(-2025014085);
+const o3 = {
+ "type": v2,
+ __proto__: v2,
+};
+const v6 = [o3];
+Reflect.apply(JSON.stringify, this, v6);
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-callsite-throw.js b/deps/v8/test/mjsunit/harmony/shadowrealm-callsite-throw.js
new file mode 100644
index 0000000000..75a09da23f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-callsite-throw.js
@@ -0,0 +1,114 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-shadow-realm
+
+// Test that CallSite#getFunction and CallSite#getThis throw inside ShadowRealms
+// and cannot access objects from the outside, as otherwise we could violate the
+// callable boundary invariant.
+(function testInside() {
+ const shadowRealm = new ShadowRealm();
+
+ // The ShadowRealm won't have assertThrows, so use try-catch and accumulate a
+ // message string.
+ const wrapped = shadowRealm.evaluate(`
+Error.prepareStackTrace = function(err, frames) {
+ let a = [];
+ for (let i = 0; i < frames.length; i++) {
+ try {
+ a.push(frames[i].getFunction());
+ } catch (e) {
+ a.push("getFunction threw");
+ }
+ try {
+ a.push(frames[i].getThis());
+ } catch (e) {
+ a.push("getThis threw");
+ }
+ }
+ return a.join(' ');
+};
+
+function inner() {
+ try {
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+}
+
+inner;
+`);
+
+ (function outer() {
+ // There are 4 frames, youngest to oldest:
+ //
+ // inner
+ // outer
+ // testInside
+ // top-level
+ //
+ // So getFunction/getThis should throw 4 times since the prepareStackTrace
+ // hook is executing inside the ShadowRealm.
+ assertEquals("getFunction threw getThis threw " +
+ "getFunction threw getThis threw " +
+ "getFunction threw getThis threw " +
+ "getFunction threw getThis threw", wrapped());
+ })();
+})();
+
+// Test that CallSite#getFunction and CallSite#getThis throw for ShadowRealm
+// objects from the outside, as otherwise we can also violate the callable
+// boundary.
+(function testOutside() {
+ Error.prepareStackTrace = function(err, frames) {
+ let a = [];
+ for (let i = 0; i < frames.length; i++) {
+ try {
+ frames[i].getFunction();
+ a.push(`functionName: ${frames[i].getFunctionName()}`);
+ } catch (e) {
+ a.push(`${frames[i].getFunctionName()} threw`);
+ }
+ try {
+ frames[i].getThis();
+ a.push("t");
+ } catch (e) {
+ a.push("getThis threw");
+ }
+ }
+ return JSON.stringify(a);
+ };
+ const shadowRealm = new ShadowRealm();
+ const wrap = shadowRealm.evaluate(`
+function trampolineMaker(callback) {
+ return function trampoline() { return callback(); };
+}
+trampolineMaker;
+`);
+ const wrapped = wrap(function callback() {
+ try {
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+ });
+
+
+ // There are 4 frames, youngest to oldest:
+ //
+ // callback (in outer realm)
+ // trampoline (in ShadowRealm)
+ // testOutside (in outer realm)
+ // top-level (in outer realm)
+ //
+ // The frame corresponding to trampoline should throw, since the outer realm
+ // should not get references to ShadowRealm objects.
+ assertEquals(JSON.stringify(
+ ["functionName: callback", "t",
+ "trampoline threw", "getThis threw",
+ "functionName: testOutside", "t",
+ "functionName: null", "t"]), wrapped());
+ assertEquals
+})();
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js b/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
index cabad58e7e..8288963428 100644
--- a/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-evaluate.js
@@ -59,4 +59,23 @@ assertThrows(() => shadowRealm.evaluate(`
var revocable = Proxy.revocable(() => 1, {});
revocable.revoke();
revocable.proxy;
-`), TypeError, "Cannot wrap target callable");
+`), TypeError, "Cannot wrap target callable (TypeError: Cannot perform 'getOwnPropertyDescriptor' on a proxy that has been revoked)");
+
+// no-side-effects inspection on thrown error
+assertThrows(() => shadowRealm.evaluate(`
+throw new Error('foo');
+`), TypeError, "ShadowRealm evaluate threw (Error: foo)");
+
+// no-side-effects inspection on thrown error
+assertThrows(() => shadowRealm.evaluate(`
+globalThis.messageAccessed = false;
+const err = new Error('foo');
+Object.defineProperty(err, 'message', {
+ get: function() {
+ globalThis.messageAccessed = true;
+ return 'bar';
+ },
+});
+throw err;
+`), TypeError, "ShadowRealm evaluate threw (Error)");
+assertFalse(shadowRealm.evaluate('globalThis.messageAccessed'));
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-importvalue.js b/deps/v8/test/mjsunit/harmony/shadowrealm-importvalue.js
index 656201e76a..54007c8190 100644
--- a/deps/v8/test/mjsunit/harmony/shadowrealm-importvalue.js
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-importvalue.js
@@ -52,14 +52,22 @@ globalThis.foobar = 'outer-scope';
const promise = shadowRealm.importValue('./shadowrealm-skip-not-found.mjs', 'foo');
// Promise is created in caller realm.
assertInstanceof(promise, Promise);
- assertThrowsAsync(promise, TypeError, 'Cannot import in the ShadowRealm');
+ assertThrowsAsync(promise, TypeError, /Cannot import in ShadowRealm \(Error: .+shadowrealm-skip-not-found\.mjs\)/);
}
{
const promise = shadowRealm.importValue('./shadowrealm-skip-2-throw.mjs', 'foo');
// Promise is created in caller realm.
assertInstanceof(promise, Promise);
- assertThrowsAsync(promise, TypeError, 'Cannot import in the ShadowRealm');
+ assertThrowsAsync(promise, TypeError, 'Cannot import in ShadowRealm (Error: foobar)');
+}
+
+// no-side-effects inspection on thrown error
+{
+ const promise = shadowRealm.importValue('./shadowrealm-skip-3-throw-object.mjs', 'foo');
+ // Promise is created in caller realm.
+ assertInstanceof(promise, Promise);
+ assertThrowsAsync(promise, TypeError, 'Cannot import in ShadowRealm ([object Object])');
}
// Invalid args
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs b/deps/v8/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs
new file mode 100644
index 0000000000..cc6c3fdd73
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-skip-3-throw-object.mjs
@@ -0,0 +1,2 @@
+export const foo = 'bar';
+throw { message: 'foobar' };
diff --git a/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js
index 5c39a71f5b..782167b318 100644
--- a/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js
+++ b/deps/v8/test/mjsunit/harmony/shadowrealm-wrapped-function.js
@@ -28,3 +28,26 @@ var proxy = revocable.proxy;
assertEquals(proxy(), 1);
revocable.revoke();
assertThrows(() => proxy(), TypeError, "Cannot perform 'apply' on a proxy that has been revoked");
+
+// no-side-effects inspection on thrown error
+var wrapped = shadowRealm.evaluate(`() => {
+ throw new Error('foo');
+}`);
+assertThrows(() => wrapped(), TypeError, "WrappedFunction threw (Error: foo)");
+
+// no-side-effects inspection on thrown error
+var wrapped = shadowRealm.evaluate(`
+globalThis.messageAccessed = false;
+() => {
+ const err = new Error('foo');
+ Object.defineProperty(err, 'message', {
+ get: function() {
+ globalThis.messageAccessed = true;
+ return 'bar';
+ },
+ });
+ throw err;
+}
+`);
+assertThrows(() => wrapped(), TypeError, "WrappedFunction threw (Error)");
+assertFalse(shadowRealm.evaluate('globalThis.messageAccessed'));
diff --git a/deps/v8/test/mjsunit/harmony/string-iswellformed-external-uncached.js b/deps/v8/test/mjsunit/harmony/string-iswellformed-external-uncached.js
new file mode 100644
index 0000000000..193294e394
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/string-iswellformed-external-uncached.js
@@ -0,0 +1,42 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-externalize-string --harmony-string-is-well-formed
+
+(function TestIsWellFormed() {
+ const short2ByteWellFormed = '\u1234';
+ const short2ByteIllFormed = '\uD83D';
+
+ assertTrue(short2ByteWellFormed.isWellFormed());
+ assertFalse(short2ByteIllFormed.isWellFormed());
+
+ try {
+ // Turn the strings into uncached external strings to hit the slow runtime
+ // path.
+ externalizeString(short2ByteWellFormed, true);
+ externalizeString(short2ByteIllFormed, true);
+ } catch (e) {}
+
+ assertTrue(short2ByteWellFormed.isWellFormed());
+ assertFalse(short2ByteIllFormed.isWellFormed());
+})();
+
+(function TestToWellFormed() {
+ const short2ByteWellFormed = '\u1234';
+ const short2ByteIllFormed = '\uD83D';
+
+ assertTrue(short2ByteWellFormed.isWellFormed());
+ assertFalse(short2ByteIllFormed.isWellFormed());
+
+ try {
+ // Turn the strings into uncached external strings to hit the slow runtime
+ // path.
+ externalizeString(short2ByteWellFormed, true);
+ externalizeString(short2ByteIllFormed, true);
+ } catch (e) {}
+
+ assertEquals('\u1234', short2ByteWellFormed.toWellFormed());
+ // U+FFFD (REPLACEMENT CHARACTER)
+ assertEquals('\uFFFD', short2ByteIllFormed.toWellFormed());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/string-iswellformed-flat-indirect.js b/deps/v8/test/mjsunit/harmony/string-iswellformed-flat-indirect.js
new file mode 100644
index 0000000000..282944f821
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/string-iswellformed-flat-indirect.js
@@ -0,0 +1,35 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-is-well-formed
+
+(function TestSliced() {
+ // toString on the function returns a sliced string on the script source,
+ // which is 2-byte.
+ const fooString = foo.toString();
+ assertTrue(fooString.isWellFormed());
+ assertEquals(fooString, fooString.toWellFormed());
+})();
+
+function TestCons(a, b) {
+ const s = a + b;
+ // Flatten it before calling isWellFormed to get a flat cons.
+ s.endsWith('a');
+ assertTrue(s.isWellFormed());
+ assertEquals(s, s.toWellFormed());
+}
+TestCons('�', '�');
+
+function TestThin(a, b) {
+ const s = a + b;
+ const o = {};
+ o[s];
+ assertTrue(s.isWellFormed());
+ assertEquals(s, s.toWellFormed());
+}
+TestThin('�', '�');
+
+function foo() {}
+// Make this source file 2-byte.
+'�';
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
index 960ab89487..38f7c4b30b 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
@@ -4,31 +4,36 @@
// Flags: --expose-gc --noincremental-marking
-let r = Realm.create();
+(async function () {
-let cleanup = Realm.eval(r, "var stored_global; function cleanup() { stored_global = globalThis; } cleanup");
-let realm_global_this = Realm.eval(r, "globalThis");
+ let r = Realm.create();
-let fg = new FinalizationRegistry(cleanup);
+ let cleanup = Realm.eval(r, "var stored_global; function cleanup() { stored_global = globalThis; } cleanup");
+ let realm_global_this = Realm.eval(r, "globalThis");
-// Create an object and a register it in the FinalizationRegistry. The object needs
-// to be inside a closure so that we can reliably kill them!
-let weak_cell;
+ let fg = new FinalizationRegistry(cleanup);
-(function() {
- let object = {};
- fg.register(object, {});
+ // Create an object and register it in the FinalizationRegistry. The object needs
+ // to be inside a closure so that we can reliably kill them!
+ (function () {
+ let object = {};
+ fg.register(object, {});
+ // Object goes out of scope.
+ })();
- // object goes out of scope.
-})();
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
-gc();
+ // Assert that the cleanup function was called in its Realm.
+ let timeout_func = function () {
+ let stored_global = Realm.eval(r, "stored_global;");
+ assertNotEquals(stored_global, globalThis);
+ assertEquals(stored_global, realm_global_this);
+ }
-// Assert that the cleanup function was called in its Realm.
-let timeout_func = function() {
- let stored_global = Realm.eval(r, "stored_global;");
- assertNotEquals(stored_global, globalThis);
- assertEquals(stored_global, realm_global_this);
-}
+ setTimeout(timeout_func, 0);
-setTimeout(timeout_func, 0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
index 4e760144e6..6c4082dfcf 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
@@ -2,60 +2,55 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --expose-gc --noincremental-marking
-// This test asserts that the cleanup function call, scheduled by GC, is a
-// microtask and not a normal task.
+// This test asserts that the cleanup function call, scheduled by GC, is
+// not a microtask but a normal task.
-// Inside a microtask, cause GC (which should schedule the cleanup as
-// microtask). lso schedule another microtask. Assert that the cleanup
-// function ran before the other microtask.
+(async function () {
-let cleanedUp = false;
+ let microtaskInvoked = false;
+ const microtask = () => {
+ assertFalse(cleanedUp);
+ assertFalse(microtaskInvoked);
+ microtaskInvoked = true;
+ };
-function scheduleMicrotask(func) {
- Promise.resolve().then(func);
-}
+ let cleanedUp = false;
+ const cleanup = (holdings) => {
+ assertFalse(cleanedUp);
+ assertTrue(microtaskInvoked);
+ cleanedUp = true;
+ };
-let log = [];
+ const fg = new FinalizationRegistry(cleanup);
-let cleanup = (holdings) => {
- cleanedUp = true;
-}
+ (function() {
+ // Use a closure here to avoid other references to object which might keep
+ // it alive (e.g., stack frames pointing to it).
+ const object = {};
+ fg.register(object, {});
+ })();
-let fg = new FinalizationRegistry(cleanup);
-let o = null;
+ // The GC will schedule the cleanup as a regular task.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
-(function() {
- // Use a closure here to avoid other references to o which might keep it alive
- // (e.g., stack frames pointing to it).
- o = {};
- fg.register(o, {});
-})();
-
-let microtask = function() {
- log.push("first_microtask");
-
- // cause GC during a microtask
- o = null;
- gc();
-}
+ assertFalse(cleanedUp);
-assertFalse(cleanedUp);
+ // Schedule the microtask.
+ Promise.resolve().then(microtask);
-// enqueue microtask that triggers GC
-Promise.resolve().then(microtask);
+ // Nothing else hasn't been called yet, as we're still in synchronous
+ // execution.
+ assertFalse(microtaskInvoked);
+ assertFalse(cleanedUp);
-// but cleanup callback hasn't been called yet, as we're still in
-// synchronous execution
-assertFalse(cleanedUp);
+ // The microtask and the cleanup callbacks will verify that these two are
+ // invoked in the right order: microtask -> cleanup.
+ setTimeout(() => { assertTrue(cleanedUp); }, 0);
-// flush the microtask queue to run the microtask that triggers GC
-%PerformMicrotaskCheckpoint();
-
-// still no cleanup callback, because it runs after as a separate task
-assertFalse(cleanedUp);
-
-setTimeout(() => {
- assertTrue(cleanedUp);
-}, 0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
index 11a9b3099d..6d46af6c82 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
@@ -4,35 +4,66 @@
// Flags: --expose-gc --noincremental-marking
-let cleanedUp = false;
-let r = Realm.create();
-let FG = Realm.eval(r, "FinalizationRegistry");
-Realm.detachGlobal(r);
-
-let fg_not_run = new FG(() => {
- assertUnreachable();
-});
-(() => {
- fg_not_run.register({});
-})();
+(async function () {
-gc();
+ const r = Realm.create();
+ const FG = Realm.eval(r, "FinalizationRegistry");
+ Realm.detachGlobal(r);
-// Disposing the realm cancels the already scheduled fg_not_run's finalizer.
-Realm.dispose(r);
+ const cleanup_not_run = function (holdings) {
+ assertUnreachable();
+ }
+ let fg_not_run = new FG(cleanup_not_run);
-let fg = new FG(()=> {
- cleanedUp = true;
-});
+ (function () {
+ const object = {};
+ fg_not_run.register(object, "first");
+ // Object becomes unreachable.
+ })();
-// FGs that are alive after disposal can still schedule tasks.
-(() => {
- let object = {};
- fg.register(object, {});
+ let cleanedUp = false;
+ let fg_run;
- // object becomes unreachable.
-})();
+ // Schedule a GC, which will schedule fg_not_run for cleanup.
+ // Here and below, we need to invoke GC asynchronously and wait for it to
+ // finish, so that it doesn't need to scan the stack. Otherwise, the objects
+ // may not be reclaimed because of conservative stack scanning and the test
+ // may not work as intended.
+ let task_1_gc = (async function () {
+ await gc({ type: 'major', execution: 'async' });
+
+ // Disposing the realm cancels the already scheduled fg_not_run's finalizer.
+ Realm.dispose(r);
+
+ const cleanup = function (holdings) {
+ assertEquals(holdings, "second");
+ assertFalse(cleanedUp);
+ cleanedUp = true;
+ }
+ fg_run = new FG(cleanup);
+
+ // FGs that are alive after disposal can still schedule tasks.
+ (function () {
+ const object = {};
+ fg_run.register(object, "second");
+ // Object becomes unreachable.
+ })();
+ })();
-gc();
+ // Schedule a second GC for execution after that, which will now schedule
+ // fg_run for cleanup.
+ let task_2_gc = (async function () {
+ await gc({ type: 'major', execution: 'async' });
-setTimeout(function() { assertTrue(cleanedUp); }, 0);
+ // Check that the cleanup task has had the chance to run yet.
+ assertFalse(cleanedUp);
+ })();
+
+ // Wait for the two GCs to be executed.
+ await task_1_gc;
+ await task_2_gc;
+
+ // Give the cleanup task a chance to run and check it worked correctly.
+ setTimeout(function () { assertTrue(cleanedUp); }, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
index a824bd9d85..5eb15a4414 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
@@ -4,31 +4,39 @@
// Flags: --expose-gc --noincremental-marking
-let r = Realm.create();
+(async function () {
-let cleanup = Realm.eval(r, "var stored_global; let cleanup = new Proxy(function() { stored_global = globalThis;}, {}); cleanup");
-let realm_global_this = Realm.eval(r, "globalThis");
+ let r = Realm.create();
-let fg = new FinalizationRegistry(cleanup);
+ let cleanup = Realm.eval(r, "var stored_global; let cleanup = new Proxy(function() { stored_global = globalThis;}, {}); cleanup");
+ let realm_global_this = Realm.eval(r, "globalThis");
-// Create an object and register it in the FinalizationRegistry. The object needs
-// to be inside a closure so that we can reliably kill them!
-let weak_cell;
+ let fg = new FinalizationRegistry(cleanup);
-(function() {
- let object = {};
- fg.register(object, "holdings");
+ // Create an object and register it in the FinalizationRegistry. The object needs
+ // to be inside a closure so that we can reliably kill them!
+ let weak_cell;
- // object goes out of scope.
-})();
+ (function () {
+ let object = {};
+ fg.register(object, "holdings");
+
+ // object goes out of scope.
+ })();
-gc();
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
-// Assert that the cleanup function was called in its Realm.
-let timeout_func = function() {
- let stored_global = Realm.eval(r, "stored_global;");
- assertNotEquals(stored_global, globalThis);
- assertEquals(stored_global, realm_global_this);
-}
+ // Assert that the cleanup function was called in its Realm.
+ let timeout_func = function () {
+ let stored_global = Realm.eval(r, "stored_global;");
+ assertNotEquals(stored_global, globalThis);
+ assertEquals(stored_global, realm_global_this);
+ }
-setTimeout(timeout_func, 0);
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
index 730312cba5..f89b646d13 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
@@ -4,48 +4,56 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_called = 0;
-let holdings_list = [];
-let cleanup = function(holdings) {
- holdings_list.push(holdings);
- cleanup_called++;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let o1 = {};
-let o2 = {};
-
-// Ignition holds references to objects in temporary registers. These will be
-// released when the function exits. So only access o inside a function to
-// prevent any references to objects in temporary registers when a gc is
-(function() {
- fg.register(o1, 1);
- fg.register(o2, 2);
-})();
-
-gc();
-assertEquals(cleanup_called, 0);
+(async function () {
-// Drop the last references to o1 and o2.
-(function() {
- o1 = null;
- o2 = null;
-})();
+ let cleanup_called = 0;
+ let holdings_list = [];
+ let cleanup = function (holdings) {
+ holdings_list.push(holdings);
+ cleanup_called++;
+ }
-// GC will reclaim the target objects; the cleanup function will be called the
-// next time we enter the event loop.
-gc();
-assertEquals(cleanup_called, 0);
-
-let timeout_func = function() {
- assertEquals(cleanup_called, 2);
- assertEquals(holdings_list.length, 2);
- if (holdings_list[0] == 1) {
- assertEquals(holdings_list[1], 2);
- } else {
- assertEquals(holdings_list[0], 2);
- assertEquals(holdings_list[1], 1);
+ let fg = new FinalizationRegistry(cleanup);
+ let o1 = {};
+ let o2 = {};
+
+ // Ignition holds references to objects in temporary registers. These will be
+ // released when the function exits. So only access o inside a function to
+ // prevent any references to objects in temporary registers when a gc is
+ (function () {
+ fg.register(o1, 1);
+ fg.register(o2, 2);
+ })();
+
+ // Here and below, we need to invoke GC asynchronously and wait for it to
+ // finish, so that it doesn't need to scan the stack. Otherwise, the objects
+ // may not be reclaimed because of conservative stack scanning and the test
+ // may not work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(cleanup_called, 0);
+
+ // Drop the last references to o1 and o2.
+ (function () {
+ o1 = null;
+ o2 = null;
+ })();
+
+ // GC will reclaim the target objects; the cleanup function will be called the
+ // next time we enter the event loop.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(cleanup_called, 0);
+
+ let timeout_func = function () {
+ assertEquals(cleanup_called, 2);
+ assertEquals(holdings_list.length, 2);
+ if (holdings_list[0] == 1) {
+ assertEquals(holdings_list[1], 2);
+ } else {
+ assertEquals(holdings_list[0], 2);
+ assertEquals(holdings_list[1], 1);
+ }
}
-}
-setTimeout(timeout_func, 0);
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
index f7fe196c78..356fb39b8c 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
@@ -4,26 +4,34 @@
// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking --allow-natives-syntax
-let cleanup_count = 0;
-let cleanup_holdings = [];
-let cleanup = function(holdings) {
- cleanup_holdings.push(holdings);
- ++cleanup_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-(function() {
- let o = {};
- fg.register(o, "holdings");
-
- assertEquals(0, cleanup_count);
-})();
+(async function () {
+
+ let cleanup_count = 0;
+ let cleanup_holdings = [];
+ let cleanup = function (holdings) {
+ cleanup_holdings.push(holdings);
+ ++cleanup_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ (function () {
+ let o = {};
+ fg.register(o, "holdings");
-// GC will detect o as dead.
-gc();
+ assertEquals(0, cleanup_count);
+ })();
-// passing no callback, should trigger cleanup function
-fg.cleanupSome();
-assertEquals(1, cleanup_count);
-assertEquals(1, cleanup_holdings.length);
-assertEquals("holdings", cleanup_holdings[0]);
+ // GC will detect o as dead.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+
+ // passing no callback, should trigger cleanup function
+ fg.cleanupSome();
+ assertEquals(1, cleanup_count);
+ assertEquals(1, cleanup_holdings.length);
+ assertEquals("holdings", cleanup_holdings[0]);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
index c96e30763c..56844c758d 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
@@ -4,31 +4,39 @@
// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking
-let cleanup_count = 0;
-let cleanup_holdings = [];
-let cleanup = function(holdings) {
- cleanup_holdings.push(holdings);
- ++cleanup_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key = {"k": "this is the key"};
-(function() {
- let o = {};
- weak_cell = fg.register(o, "holdings", key);
-
- // cleanupSome won't do anything since there are no reclaimed targets.
- fg.cleanupSome();
- assertEquals(0, cleanup_count);
- return o;
-})();
-
-// GC will detect the WeakCell as dirty.
-gc();
+(async function () {
+
+ let cleanup_count = 0;
+ let cleanup_holdings = [];
+ let cleanup = function (holdings) {
+ cleanup_holdings.push(holdings);
+ ++cleanup_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let key = { "k": "this is the key" };
+ (function () {
+ let o = {};
+ weak_cell = fg.register(o, "holdings", key);
+
+ // cleanupSome won't do anything since there are no reclaimed targets.
+ fg.cleanupSome();
+ assertEquals(0, cleanup_count);
+ return o;
+ })();
+
+ // GC will detect the WeakCell as dirty.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+
+ // Unregister the tracked object just before calling cleanupSome.
+ fg.unregister(key);
-// Unregister the tracked object just before calling cleanupSome.
-fg.unregister(key);
+ fg.cleanupSome();
-fg.cleanupSome();
+ assertEquals(0, cleanup_count);
-assertEquals(0, cleanup_count);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
index 480ec4e2ad..835f184aeb 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
@@ -4,31 +4,39 @@
// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking --allow-natives-syntax
-let cleanup_count = 0;
-let cleanup_holdings = [];
-let cleanup = function(holdings) {
- %AbortJS("shouldn't be called");
-}
-
-let cleanup2 = function(holdings) {
- cleanup_holdings.push(holdings);
- ++cleanup_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-(function() {
- let o = {};
- fg.register(o, "holdings");
-
- // cleanupSome won't do anything since there are no reclaimed targets.
- fg.cleanupSome(cleanup2);
- assertEquals(0, cleanup_count);
-})();
+(async function () {
+
+ let cleanup_count = 0;
+ let cleanup_holdings = [];
+ let cleanup = function (holdings) {
+ %AbortJS("shouldn't be called");
+ }
+
+ let cleanup2 = function (holdings) {
+ cleanup_holdings.push(holdings);
+ ++cleanup_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ (function () {
+ let o = {};
+ fg.register(o, "holdings");
-// GC will detect o as dead.
-gc();
+ // cleanupSome won't do anything since there are no reclaimed targets.
+ fg.cleanupSome(cleanup2);
+ assertEquals(0, cleanup_count);
+ })();
-fg.cleanupSome(cleanup2);
-assertEquals(1, cleanup_count);
-assertEquals(1, cleanup_holdings.length);
-assertEquals("holdings", cleanup_holdings[0]);
+ // GC will detect o as dead.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+
+ fg.cleanupSome(cleanup2);
+ assertEquals(1, cleanup_count);
+ assertEquals(1, cleanup_holdings.length);
+ assertEquals("holdings", cleanup_holdings[0]);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
index a6cda82485..c80ebb4ec0 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
@@ -4,39 +4,46 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_called = false;
-let cleanup = function(holdings) {
- assertFalse(cleanup_called);
- let holdings_list = [];
- holdings_list.push(holdings);
- assertEquals(1, holdings_list.length);
- assertEquals("holdings", holdings_list[0]);
- cleanup_called = true;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let weak_ref;
-(function() {
- let o = {};
- weak_ref = new WeakRef(o);
- fg.register(o, "holdings");
-})();
+(async function () {
-// Since the WeakRef was created during this turn, it is not cleared by GC. The
-// pointer inside the FinalizationRegistry is not cleared either, since the WeakRef
-// keeps the target object alive.
-gc();
-(function() {
- assertNotEquals(undefined, weak_ref.deref());
-})();
+ let cleanup_called = false;
+ const cleanup = function(holdings) {
+ assertFalse(cleanup_called);
+ assertEquals("holdings", holdings);
+ cleanup_called = true;
+ }
-// Trigger gc in next task
-setTimeout(() => {
+ const fg = new FinalizationRegistry(cleanup);
+ let weak_ref;
+ (function() {
+ const o = {};
+ weak_ref = new WeakRef(o);
+ fg.register(o, "holdings");
+ })();
+
+ // Since the WeakRef was created during this turn, it is not cleared by GC. The
+ // pointer inside the FinalizationRegistry is not cleared either, since the WeakRef
+ // keeps the target object alive.
+ // Here we invoke GC synchronously and, with conservative stack scanning, there is
+ // a chance that the object is not reclaimed now. In any case, the WeakRef should
+ // not be cleared.
gc();
- // Check that cleanup callback was called in a follow up task
- setTimeout(() => {
- assertTrue(cleanup_called);
- assertEquals(undefined, weak_ref.deref());
- }, 0);
-}, 0);
+ assertNotEquals(undefined, weak_ref.deref());
+ assertFalse(cleanup_called);
+
+ // Trigger GC in next task. Now the WeakRef is cleared but the cleanup has
+ // not been called yet.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+
+ assertEquals(undefined, weak_ref.deref());
+ assertFalse(cleanup_called);
+
+ // Check that the cleanup callback was called in a follow up task.
+ setTimeout(() => { assertTrue(cleanup_called); }, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime-multiple.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime-multiple.js
index 4eb54166ea..02c4e196df 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime-multiple.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime-multiple.js
@@ -4,29 +4,49 @@
// Flags: --expose-gc --noincremental-marking --no-concurrent-inlining
-let cleanup_called = false;
-function cleanup(holdings) {
- cleanup_called = true;
-};
-let cleanup_called_2 = false;
-function cleanup2(holdings) {
- cleanup_called_2 = true;
-};
-let fg = new FinalizationRegistry(cleanup);
-(function() {
- let fg2 = new FinalizationRegistry(cleanup2);
- (function() {
- fg.register({}, {});
- fg2.register({}, {});
+(async function () {
+
+ let cleanup_called = false;
+ function cleanup(holdings) {
+ cleanup_called = true;
+ };
+
+ let cleanup_called_2 = false;
+ function cleanup2(holdings) {
+ cleanup_called_2 = true;
+ };
+
+ const fg = new FinalizationRegistry(cleanup);
+
+ let task_1_gc = (async function () {
+ const fg2 = new FinalizationRegistry(cleanup2);
+
+ (function () {
+ fg.register({}, "holdings1");
+ fg2.register({}, "holdings2");
+ })();
+
+ // Schedule fg and fg2 for cleanup.
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanup_called);
+ assertFalse(cleanup_called_2);
+ })();
+
+ // Schedule a task to collect fg2, but fg is still alive.
+ let task_2_gc = (async function () {
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanup_called);
+ assertFalse(cleanup_called_2);
})();
- // Schedule fg and fg2 for cleanup.
- gc();
-})();
-// Collect fg2, but fg is still alive.
-gc();
+ // Wait for the two GC tasks to be executed.
+ await task_1_gc;
+ await task_2_gc;
-setTimeout(function() {
- assertTrue(cleanup_called);
- assertFalse(cleanup_called_2);
-}, 0);
+ // Check that only the cleanup for fg will be called.
+ setTimeout(function() {
+ assertTrue(cleanup_called);
+ assertFalse(cleanup_called_2);
+ }, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
index 4496419966..a1744ead9a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
@@ -4,22 +4,38 @@
// Flags: --expose-gc --noincremental-marking --no-concurrent-recompilation
-let cleanup_called = false;
-function cleanup(holdings) {
- cleanup_called = true;
-};
-(function() {
- let fg = new FinalizationRegistry(cleanup);
- (function() {
- let x = {};
- fg.register(x, {});
- x = null;
+(async function () {
+
+ let cleanup_called = false;
+ function cleanup(holdings) {
+ cleanup_called = true;
+ };
+
+ let task_1_gc = (async function () {
+ const fg = new FinalizationRegistry(cleanup);
+
+ (function () {
+ let x = {};
+ fg.register(x, "holdings");
+ x = null;
+ })();
+
+ // Schedule fg for cleanup.
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanup_called);
})();
- // Schedule fg for cleanup.
- gc();
-})();
-// Collect fg, which should result in cleanup not called.
-gc();
+ // Schedule a task to collect fg, which should result in cleanup not called.
+ let task_2_gc = (async function () {
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanup_called);
+ })();
+
+ // Wait for the two GC tasks to be executed.
+ await task_1_gc;
+ await task_2_gc;
-setTimeout(function() { assertFalse(cleanup_called); }, 0);
+ // Check that the cleanup will not be called.
+ setTimeout(function () { assertFalse(cleanup_called); }, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
index 7a09273ca7..ab88cc940e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
@@ -4,40 +4,48 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_called = false;
-let holdings_list = [];
-let cleanup = function(holdings) {
+(async function () {
+
+ let cleanup_called = false;
+ let holdings_list = [];
+ let cleanup = function (holdings) {
+ assertFalse(cleanup_called);
+ holdings_list.push(holdings);
+ cleanup_called = true;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let o1 = {};
+ let holdings = { 'a': 'this is the holdings object' };
+
+ // Ignition holds references to objects in temporary registers. These will be
+ // released when the function exits. So only access o inside a function to
+ // prevent any references to objects in temporary registers when a gc is
+ // triggered.
+ (() => { fg.register(o1, holdings); })()
+
+ // Here and below, we need to invoke GC asynchronously and wait for it to
+ // finish, so that it doesn't need to scan the stack. Otherwise, the objects
+ // may not be reclaimed because of conservative stack scanning and the test
+ // may not work as intended.
+ await gc({ type: 'major', execution: 'async' });
assertFalse(cleanup_called);
- holdings_list.push(holdings);
- cleanup_called = true;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let o1 = {};
-let holdings = {'a': 'this is the holdings object'};
-
-// Ignition holds references to objects in temporary registers. These will be
-// released when the function exits. So only access o inside a function to
-// prevent any references to objects in temporary registers when a gc is
-// triggered.
-(() => {fg.register(o1, holdings);})()
-
-gc();
-assertFalse(cleanup_called);
-
-// Drop the last references to o1.
-(() => {o1 = null;})()
-
-// Drop the last reference to the holdings. The FinalizationRegistry keeps it
-// alive, so the cleanup function will be called as normal.
-holdings = null;
-gc();
-assertFalse(cleanup_called);
-
-let timeout_func = function() {
- assertTrue(cleanup_called);
- assertEquals(holdings_list.length, 1);
- assertEquals(holdings_list[0].a, "this is the holdings object");
-}
-
-setTimeout(timeout_func, 0);
+
+ // Drop the last references to o1.
+ (() => { o1 = null; })()
+
+ // Drop the last reference to the holdings. The FinalizationRegistry keeps it
+ // alive, so the cleanup function will be called as normal.
+ holdings = null;
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanup_called);
+
+ let timeout_func = function () {
+ assertTrue(cleanup_called);
+ assertEquals(holdings_list.length, 1);
+ assertEquals(holdings_list[0].a, "this is the holdings object");
+ }
+
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
index 3262442b2f..8db031b8bd 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
@@ -5,63 +5,78 @@
// Flags: --expose-gc --noincremental-marking
// Flags: --no-stress-flush-code
-let cleanup0_call_count = 0;
-let cleanup0_holdings_count = 0;
+(async function () {
-let cleanup1_call_count = 0;
-let cleanup1_holdings_count = 0;
+ let cleanup0_call_count = 0;
+ let cleanup1_call_count = 0;
-let cleanup0 = function(holdings) {
- ++cleanup0_holdings_count;
- ++cleanup0_call_count;
-}
+ let cleanup0 = function (holdings) {
+ ++cleanup0_call_count;
+ }
-let cleanup1 = function(holdings) {
- ++cleanup1_holdings_count;
- ++cleanup1_call_count;
-}
+ let cleanup1 = function (holdings) {
+ ++cleanup1_call_count;
+ }
-let fg0 = new FinalizationRegistry(cleanup0);
-let fg1 = new FinalizationRegistry(cleanup1);
+ let fg0 = new FinalizationRegistry(cleanup0);
+ let fg1 = new FinalizationRegistry(cleanup1);
-// Register 1 weak reference for each FinalizationRegistry and kill the objects they point to.
-(function() {
- // The objects need to be inside a closure so that we can reliably kill them.
- let objects = [];
- objects[0] = {};
- objects[1] = {};
+ // Register 1 weak reference for each FinalizationRegistry and kill the
+ // objects they point to.
+ (function () {
+ // The objects need to be inside a closure so that we can reliably kill
+ // them.
+ let objects = [];
+ objects[0] = {};
+ objects[1] = {};
+ fg0.register(objects[0], "holdings0-0");
+ fg1.register(objects[1], "holdings1-0");
+ // Drop the references to the objects.
+ objects = [];
+ })();
- fg0.register(objects[0], "holdings0-0");
- fg1.register(objects[1], "holdings1-0");
+ // Schedule a GC, which will schedule both fg0 and fg1 for cleanup.
+ // Here and below, we need to invoke GC asynchronously and wait for it to
+ // finish, so that it doesn't need to scan the stack. Otherwise, the objects
+ // may not be reclaimed because of conservative stack scanning and the test
+ // may not work as intended.
+ let task_1_gc = (async function () {
+ await gc({ type: 'major', execution: 'async' });
- // Drop the references to the objects.
- objects = [];
-})();
+ // Before the cleanup task has a chance to run, do the same thing again, so
+ // both FinalizationRegistries are (again) scheduled for cleanup. This has to
+ // be a IIFE function (so that we can reliably kill the objects) so we cannot
+ // use the same function as before.
+ (function () {
+ let objects = [];
+ objects[0] = {};
+ objects[1] = {};
+ fg0.register(objects[0], "holdings0-1");
+ fg1.register(objects[1], "holdings1-1");
+ objects = [];
+ })();
+ })();
-// Will schedule both fg0 and fg1 for cleanup.
-gc();
+ // Schedule a second GC for execution after that, which will again schedule
+ // both fg0 and fg1 for cleanup.
+ let task_2_gc = (async function () {
+ await gc({ type: 'major', execution: 'async' });
-// Before the cleanup task has a chance to run, do the same thing again, so both
-// FinalizationRegistries are (again) scheduled for cleanup. This has to be a IIFE function
-// (so that we can reliably kill the objects) so we cannot use the same function
-// as before.
-(function() {
- let objects = [];
- objects[0] = {};
- objects[1] = {};
- fg0.register(objects[0], "holdings0-1");
- fg1.register(objects[1], "holdings1-1");
- objects = [];
-})();
+ // Check that no cleanup task has had the chance to run yet.
+ assertEquals(0, cleanup0_call_count);
+ assertEquals(0, cleanup1_call_count);
+ })();
-gc();
+ // Wait for the two GCs to be executed.
+ await task_1_gc;
+ await task_2_gc;
-let timeout_func = function() {
- assertEquals(2, cleanup0_call_count);
- assertEquals(2, cleanup0_holdings_count);
- assertEquals(2, cleanup1_call_count);
- assertEquals(2, cleanup1_holdings_count);
-}
+ let timeout_func = function () {
+ assertEquals(2, cleanup0_call_count);
+ assertEquals(2, cleanup1_call_count);
+ }
-// Give the cleanup task a chance to run.
-setTimeout(timeout_func, 0);
+ // Give the cleanup task a chance to run and check it worked correctly.
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
index 533c3cb631..fec5b901f7 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
@@ -4,37 +4,42 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
+(async function () {
-let fg1 = new FinalizationRegistry(cleanup);
-let fg2 = new FinalizationRegistry(cleanup);
+ let cleanup_call_count = 0;
+ let cleanup = function (holdings) {
+ ++cleanup_call_count;
+ }
-// Create two objects and register them in FinalizationRegistries. The objects need
-// to be inside a closure so that we can reliably kill them!
+ let fg1 = new FinalizationRegistry(cleanup);
+ let fg2 = new FinalizationRegistry(cleanup);
-(function() {
- let object1 = {};
- fg1.register(object1, "holdings1");
+ // Create two objects and register them in FinalizationRegistries. The objects need
+ // to be inside a closure so that we can reliably kill them!
- let object2 = {};
- fg2.register(object2, "holdings2");
+ (function () {
+ let object1 = {};
+ fg1.register(object1, "holdings1");
- // object1 and object2 go out of scope.
-})();
+ let object2 = {};
+ fg2.register(object2, "holdings2");
+
+ // object1 and object2 go out of scope.
+ })();
-// This GC will discover dirty WeakCells and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+ // This GC will discover dirty WeakCells and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called.
-let timeout_func = function() {
- assertEquals(2, cleanup_call_count);
- assertEquals(2, cleanup_holdings_count);
-}
+ // Assert that the cleanup function was called.
+ let timeout_func = function () {
+ assertEquals(2, cleanup_call_count);
+ }
-setTimeout(timeout_func, 0);
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
index 07e23f614f..d898f9b216 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
@@ -4,20 +4,27 @@
// Flags: --expose-gc --noincremental-marking
-let call_count = 0;
-let reentrant_gc = function(holdings) {
- gc();
- call_count++;
-}
+(async function () {
-let fg = new FinalizationRegistry(reentrant_gc);
+ let call_count = 0;
+ const reentrant_gc = function (holdings) {
+ gc();
+ call_count++;
+ }
-(function() {
-fg.register({}, 42);
-})();
+ const fg = new FinalizationRegistry(reentrant_gc);
-gc();
+ (function () {
+ fg.register({}, 42);
+ })();
-setTimeout(function() {
- assertEquals(1, call_count);
-}, 0);
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, call_count);
+
+ setTimeout(function () { assertEquals(1, call_count); }, 0);
+
+ })();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js b/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js
index f9bcc2b77d..744e81a0c4 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js
@@ -4,33 +4,41 @@
// Flags: --stress-compaction --expose-gc
-// Test that the dirty FinalizationRegistries that are enqueued during GC have
-// their slots correctly recorded by the GC.
-
-// 1) Create many JSFinalizationRegistry objects so that they span several pages
-// (page size is 256kb).
-let registries = [];
-for (let i = 0; i < 1024 * 8; i++) {
- registries.push(new FinalizationRegistry(() => {}));
-}
-
-// 2) Force two GCs to ensure that JSFinalizatonRegistry objects are tenured.
-gc();
-gc();
-
-// 3) In a function: create a dummy target and register it in all
-// JSFinalizatonRegistry objects.
-(function() {
- let garbage = {};
- registries.forEach((fr) => {
- fr.register(garbage, 42);
- });
- garbage = null;
-})();
+(async function () {
+
+ // Test that the dirty FinalizationRegistries that are enqueued during GC have
+ // their slots correctly recorded by the GC.
+
+ // 1) Create many JSFinalizationRegistry objects so that they span several pages
+ // (page size is 256kb).
+ let registries = [];
+ for (let i = 0; i < 1024 * 8; i++) {
+ registries.push(new FinalizationRegistry(() => { }));
+ }
+
+ // 2) Force two GCs to ensure that JSFinalizatonRegistry objects are tenured.
+ // Here and below, we need to invoke GC asynchronously and wait for it to
+ // finish, so that it doesn't need to scan the stack. Otherwise, the objects
+ // may not be reclaimed because of conservative stack scanning and the test
+ // may not work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ await gc({ type: 'major', execution: 'async' });
-// 4) Outside the function where the target is unreachable: force GC to collect
-// the object.
-gc();
+ // 3) In a function: create a dummy target and register it in all
+ // JSFinalizatonRegistry objects.
+ (function () {
+ let garbage = {};
+ registries.forEach((fr) => {
+ fr.register(garbage, 42);
+ });
+ garbage = null;
+ })();
-// 5) Force another GC to test that the slot was correctly updated.
-gc();
+ // 4) Outside the function where the target is unreachable: force GC to collect
+ // the object.
+ await gc({ type: 'major', execution: 'async' });
+
+ // 5) Force another GC to test that the slot was correctly updated.
+ await gc({ type: 'major', execution: 'async' });
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/symbol-as-weakref-target-gc.js b/deps/v8/test/mjsunit/harmony/weakrefs/symbol-as-weakref-target-gc.js
index 561bf4f058..87b1e37602 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/symbol-as-weakref-target-gc.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/symbol-as-weakref-target-gc.js
@@ -4,36 +4,29 @@
// Flags: --harmony-symbol-as-weakmap-key --expose-gc --noincremental-marking
-(function TestWeakRefWithSymbolGC() {
+(async function () {
+
let weakRef;
- {
+ (function () {
const innerKey = Symbol('123');
weakRef = new WeakRef(innerKey);
- }
+ })();
+
// Since the WeakRef was created during this turn, it is not cleared by GC.
+ // Here we invoke GC synchronously and, with conservative stack scanning, there is
+ // a chance that the object is not reclaimed now. In any case, the WeakRef should
+ // not be cleared.
gc();
+
assertNotEquals(undefined, weakRef.deref());
- // Next task.
- setTimeout(() => {
- gc();
- assertEquals(undefined, weakRef.deref());
- }, 0);
-})();
-(function TestFinalizationRegistryWithSymbolGC() {
- let cleanUpCalled = false;
- const fg = new FinalizationRegistry((target) => {
- assertEquals('123', target);
- cleanUpCalled = true;
- });
- (function () {
- const innerKey = Symbol('123');
- fg.register(innerKey, '123');
- })();
- gc();
- assertFalse(cleanUpCalled);
- // Check that cleanup callback was called in a follow up task.
- setTimeout(() => {
- assertTrue(cleanUpCalled);
- }, 0);
+ // Trigger GC again in next task. Now the WeakRef is cleared.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+
+ assertEquals(undefined, weakRef.deref());
+
})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/symbol-in-finalizationregistry.js b/deps/v8/test/mjsunit/harmony/weakrefs/symbol-in-finalizationregistry.js
new file mode 100644
index 0000000000..2158143ac3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/symbol-in-finalizationregistry.js
@@ -0,0 +1,30 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-symbol-as-weakmap-key --expose-gc --noincremental-marking
+
+(async function () {
+
+ let cleanUpCalled = false;
+ const fg = new FinalizationRegistry((target) => {
+ assertEquals('123', target);
+ cleanUpCalled = true;
+ });
+
+ (function () {
+ const innerKey = Symbol('123');
+ fg.register(innerKey, '123');
+ })();
+
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanUpCalled);
+
+ // Check that cleanup callback was called in a follow up task.
+ setTimeout(() => { assertTrue(cleanUpCalled); }, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
index 6cfc1a1aa7..a23c1e8cf2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
@@ -14,6 +14,9 @@ let wr2;
})();
// Since the WeakRefs were created during this turn, they're not cleared by GC.
+// Here and below, we invoke GC synchronously and, with conservative stack
+// scanning, there is a chance that the object is not reclaimed now. In any
+// case, the WeakRef should not be cleared.
gc();
(function() {
@@ -23,25 +26,29 @@ gc();
// New task
setTimeout(function() {
- wr1.deref();
+ (function () { wr1.deref(); })();
o1 = null;
gc(); // deref makes sure we don't clean up wr1
+ (function () { assertNotEquals(undefined, wr1.deref()); })();
// New task
setTimeout(function() {
- wr2.deref();
+ (function () { wr2.deref(); })();
o2 = null;
gc(); // deref makes sure we don't clean up wr2
+ (function () { assertNotEquals(undefined, wr2.deref()); })();
// New task
- setTimeout(function() {
- assertEquals(undefined, wr1.deref());
- gc();
+ (async function () {
+ // Trigger GC again to make sure the two WeakRefs are cleared.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
- // New task
- setTimeout(function() {
- assertEquals(undefined, wr2.deref());
- }, 0);
- }, 0);
+ assertEquals(undefined, wr1.deref());
+ assertEquals(undefined, wr2.deref());
+ })();
}, 0);
}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
index a45426e3f6..8b6f8f42d9 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
@@ -4,34 +4,39 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- assertEquals(holdings, undefined);
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
+(async function () {
-let fg = new FinalizationRegistry(cleanup);
+ let cleanup_call_count = 0;
+ let cleanup = function (holdings) {
+ assertEquals(holdings, undefined);
+ ++cleanup_call_count;
+ }
-// Create an object and register it in the FinalizationRegistry. The object needs to be inside
-// a closure so that we can reliably kill them!
+ let fg = new FinalizationRegistry(cleanup);
-(function() {
- let object = {};
- fg.register(object);
+ // Create an object and register it in the FinalizationRegistry. The object needs to be inside
+ // a closure so that we can reliably kill them!
- // object goes out of scope.
-})();
+ (function () {
+ let object = {};
+ fg.register(object);
+
+ // object goes out of scope.
+ })();
-// This GC will reclaim the target object and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+ // This GC will reclaim the target object and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called.
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
-}
+ // Assert that the cleanup function was called.
+ let timeout_func = function () {
+ assertEquals(1, cleanup_call_count);
+ }
-setTimeout(timeout_func, 0);
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index b23f396f38..37ca99313e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -4,42 +4,46 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- assertEquals("holdings", holdings);
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationRegistry. The object needs
-// to be inside a closure so that we can reliably kill them!
-
-(function() {
- let object = {};
- fg.register(object, "holdings", key);
-
- // object goes out of scope.
-})();
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function (holdings) {
+ assertEquals("holdings", holdings);
+ ++cleanup_call_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let key = { "k": "this is the key" };
+ // Create an object and register it in the FinalizationRegistry. The object needs
+ // to be inside a closure so that we can reliably kill them!
+
+ (function () {
+ let object = {};
+ fg.register(object, "holdings", key);
-// This GC will reclaim the target object and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+ // object goes out of scope.
+ })();
-// Assert that the cleanup function was called.
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
+ // This GC will reclaim the target object and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
- // Unregister an already cleaned-up weak reference.
- let success = fg.unregister(key);
- assertFalse(success);
+ // Assert that the cleanup function was called.
+ let timeout_func = function () {
+ assertEquals(1, cleanup_call_count);
- // Assert that it didn't do anything.
- setTimeout(() => { assertEquals(1, cleanup_call_count); }, 0);
- setTimeout(() => { assertEquals(1, cleanup_holdings_count); }, 0);
-}
+ // Unregister an already cleaned-up weak reference.
+ let success = fg.unregister(key);
+ assertFalse(success);
-setTimeout(timeout_func, 0);
+ // Assert that it didn't do anything.
+ setTimeout(() => { assertEquals(1, cleanup_call_count); }, 0);
+ }
+
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index aebcc6a746..082e8cdfe2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -4,34 +4,42 @@
// Flags: --expose-gc --noincremental-marking --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup = function(holdings) {
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationRegistry. The object needs
-// to be inside a closure so that we can reliably kill them!
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function (holdings) {
+ ++cleanup_call_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let key = { "k": "this is the key" };
+ // Create an object and register it in the FinalizationRegistry. The object needs
+ // to be inside a closure so that we can reliably kill them!
+
+ (function () {
+ let object = {};
+ fg.register(object, "my holdings", key);
+
+ // Clear the WeakCell before the GC has a chance to discover it.
+ let success = fg.unregister(key);
+ assertTrue(success);
+
+ // object goes out of scope.
+ })();
+
+ // This GC will reclaim the target object.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
-(function() {
- let object = {};
- fg.register(object, "my holdings", key);
+ // Assert that the cleanup function won't be called, since we called unregister.
+ let timeout_func = function () {
+ assertEquals(0, cleanup_call_count);
+ }
- // Clear the WeakCell before the GC has a chance to discover it.
- let success = fg.unregister(key);
- assertTrue(success);
+ setTimeout(timeout_func, 0);
- // object goes out of scope.
})();
-
-// This GC will reclaim the target object.
-gc();
-assertEquals(0, cleanup_call_count);
-
-// Assert that the cleanup function won't be called, since we called unregister.
-let timeout_func = function() {
- assertEquals(0, cleanup_call_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
index b3f425655e..06c995b5b8 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -4,39 +4,47 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup = function(holdings) {
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationRegistry. The object needs
-// to be inside a closure so that we can reliably kill them!
-
-(function() {
- let object = {};
- fg.register(object, "holdings", key);
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function (holdings) {
+ ++cleanup_call_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let key = { "k": "this is the key" };
+ // Create an object and register it in the FinalizationRegistry. The object needs
+ // to be inside a closure so that we can reliably kill them!
+
+ (function () {
+ let object = {};
+ fg.register(object, "holdings", key);
+
+ // Unregister before the GC has a chance to discover the object.
+ let success = fg.unregister(key);
+ assertTrue(success);
+
+ // Call unregister again (just to assert we handle this gracefully).
+ success = fg.unregister(key);
+ assertFalse(success);
+
+ // object goes out of scope.
+ })();
+
+ // This GC will reclaim the target object.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
- // Unregister before the GC has a chance to discover the object.
- let success = fg.unregister(key);
- assertTrue(success);
+ // Assert that the cleanup function won't be called, since the weak reference
+ // was unregistered.
+ let timeout_func = function () {
+ assertEquals(0, cleanup_call_count);
+ }
- // Call unregister again (just to assert we handle this gracefully).
- success = fg.unregister(key);
- assertFalse(success);
+ setTimeout(timeout_func, 0);
- // object goes out of scope.
})();
-
-// This GC will reclaim the target object.
-gc();
-assertEquals(0, cleanup_call_count);
-
-// Assert that the cleanup function won't be called, since the weak reference
-// was unregistered.
-let timeout_func = function() {
- assertEquals(0, cleanup_call_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
index 903fb33a37..c12129dff7 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
@@ -4,37 +4,42 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- assertEquals(holdings, "holdings");
- let success = fg.unregister(key);
- assertFalse(success);
-
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-// Create an object and register it in the FinalizationRegistry. The object needs to be inside
-// a closure so that we can reliably kill them!
-let key = {"k": "this is the key"};
-
-(function() {
- let object = {};
- fg.register(object, "holdings", key);
-
- // object goes out of scope.
-})();
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function (holdings) {
+ assertEquals(holdings, "holdings");
+ let success = fg.unregister(key);
+ assertFalse(success);
+
+ ++cleanup_call_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ // Create an object and register it in the FinalizationRegistry. The object needs to be inside
+ // a closure so that we can reliably kill them!
+ let key = { "k": "this is the key" };
-// This GC will discover dirty WeakCells and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+ (function () {
+ let object = {};
+ fg.register(object, "holdings", key);
-// Assert that the cleanup function was called.
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
-}
+ // object goes out of scope.
+ })();
-setTimeout(timeout_func, 0);
+ // This GC will discover dirty WeakCells and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
+
+ // Assert that the cleanup function was called.
+ let timeout_func = function () {
+ assertEquals(1, cleanup_call_count);
+ }
+
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index 7479996844..6786183af9 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -4,45 +4,50 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- // See which target we're cleaning up and unregister the other one.
- if (holdings == 1) {
- let success = fg.unregister(key2);
- assertTrue(success);
- } else {
- assertSame(holdings, 2);
- let success = fg.unregister(key1);
- assertTrue(success);
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function(holdings) {
+ // See which target we're cleaning up and unregister the other one.
+ if (holdings == 1) {
+ let success = fg.unregister(key2);
+ assertTrue(success);
+ } else {
+ assertSame(holdings, 2);
+ let success = fg.unregister(key1);
+ assertTrue(success);
+ }
+ ++cleanup_call_count;
}
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key1 = {"k": "first key"};
-let key2 = {"k": "second key"};
-// Create two objects and register them in the FinalizationRegistry. The objects
-// need to be inside a closure so that we can reliably kill them!
-
-(function() {
- let object1 = {};
- fg.register(object1, 1, key1);
- let object2 = {};
- fg.register(object2, 2, key2);
-
- // object1 and object2 go out of scope.
-})();
-// This GC will reclaim target objects and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+ let fg = new FinalizationRegistry(cleanup);
+ let key1 = {"k": "first key"};
+ let key2 = {"k": "second key"};
+ // Create two objects and register them in the FinalizationRegistry. The objects
+ // need to be inside a closure so that we can reliably kill them!
+
+ (function() {
+ let object1 = {};
+ fg.register(object1, 1, key1);
+ let object2 = {};
+ fg.register(object2, 2, key2);
+
+ // object1 and object2 go out of scope.
+ })();
+
+ // This GC will reclaim target objects and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
+
+ // Assert that the cleanup function was called and cleaned up one holdings (but not the other one).
+ let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ }
-// Assert that the cleanup function was called and cleaned up one holdings (but not the other one).
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
-}
+ setTimeout(timeout_func, 0);
-setTimeout(timeout_func, 0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index ac1e0e2c41..45654bf64a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -4,44 +4,48 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- assertEquals(holdings, "holdings");
-
- // There's one more object with the same key that we haven't
- // cleaned up yet so we should be able to unregister the
- // callback for that one.
- let success = fg.unregister(key);
-
- assertTrue(success);
-
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-// Create an object and register it in the FinalizationRegistry. The object needs to be inside
-// a closure so that we can reliably kill them!
-let key = {"k": "this is the key"};
-
-(function() {
- let object = {};
- let object2 = {};
- fg.register(object, "holdings", key);
- fg.register(object2, "holdings", key);
-
- // object goes out of scope.
-})();
-
-// This GC will discover dirty WeakCells and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function(holdings) {
+ assertEquals(holdings, "holdings");
+
+ // There's one more object with the same key that we haven't
+ // cleaned up yet so we should be able to unregister the
+ // callback for that one.
+ let success = fg.unregister(key);
+ assertTrue(success);
+
+ ++cleanup_call_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ // Create an object and register it in the FinalizationRegistry. The object needs to be inside
+ // a closure so that we can reliably kill them!
+ let key = {"k": "this is the key"};
+
+ (function() {
+ let object = {};
+ let object2 = {};
+ fg.register(object, "holdings", key);
+ fg.register(object2, "holdings", key);
+
+ // object goes out of scope.
+ })();
+
+ // This GC will discover dirty WeakCells and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
+
+ // Assert that the cleanup function was called.
+ let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ }
+
+ setTimeout(timeout_func, 0);
-// Assert that the cleanup function was called.
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
-}
-
-setTimeout(timeout_func, 0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
index f9ff219d65..b94ec54e86 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -4,46 +4,51 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(holdings) {
- assertEquals("holdings2", holdings);
- ++cleanup_holdings_count;
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key1 = {"k": "key1"};
-let key2 = {"k": "key2"};
-// Create three objects and register them in the FinalizationRegistry. The objects
-// need to be inside a closure so that we can reliably kill them!
-
-(function() {
- let object1a = {};
- fg.register(object1a, "holdings1a", key1);
-
- let object1b = {};
- fg.register(object1b, "holdings1b", key1);
-
- let object2 = {};
- fg.register(object2, "holdings2", key2);
-
- // Unregister before the GC has a chance to discover the objects.
- let success = fg.unregister(key1);
- assertTrue(success);
-
- // objects go out of scope.
-})();
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function(holdings) {
+ assertEquals("holdings2", holdings);
+ ++cleanup_call_count;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let key1 = {"k": "key1"};
+ let key2 = {"k": "key2"};
+ // Create three objects and register them in the FinalizationRegistry. The objects
+ // need to be inside a closure so that we can reliably kill them!
+
+ (function() {
+ let object1a = {};
+ fg.register(object1a, "holdings1a", key1);
+
+ let object1b = {};
+ fg.register(object1b, "holdings1b", key1);
-// This GC will reclaim the target objects.
-gc();
-assertEquals(0, cleanup_call_count);
+ let object2 = {};
+ fg.register(object2, "holdings2", key2);
-// Assert that the cleanup function will be called only for the reference which
-// was not unregistered.
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
-}
+ // Unregister before the GC has a chance to discover the objects.
+ let success = fg.unregister(key1);
+ assertTrue(success);
-setTimeout(timeout_func, 0);
+ // objects go out of scope.
+ })();
+
+ // This GC will reclaim the target objects.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
+
+ // Assert that the cleanup function will be called only for the reference which
+ // was not unregistered.
+ let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ }
+
+ setTimeout(timeout_func, 0);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 05ba4f28d2..0b3e5bcfee 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -4,34 +4,42 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_call_count = 0;
-let cleanup = function(holdings) {
- ++cleanup_call_count;
-}
-
-let key = {"k": "this is my key"};
-let fg = new FinalizationRegistry(cleanup);
-// Create an object and register it in the FinalizationRegistry. The object needs to be inside
-// a closure so that we can reliably kill them!
-
-(function() {
- let object = {};
- fg.register(object, {}, key);
-
- // object goes out of scope.
-})();
+(async function () {
+
+ let cleanup_call_count = 0;
+ let cleanup = function(holdings) {
+ ++cleanup_call_count;
+ }
+
+ let key = {"k": "this is my key"};
+ let fg = new FinalizationRegistry(cleanup);
+ // Create an object and register it in the FinalizationRegistry. The object needs to be inside
+ // a closure so that we can reliably kill them!
+
+ (function() {
+ let object = {};
+ fg.register(object, {}, key);
+
+ // object goes out of scope.
+ })();
+
+ // This GC will discover dirty WeakCells and schedule cleanup.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(0, cleanup_call_count);
-// This GC will discover dirty WeakCells and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
+ // Unregister the object from the FinalizationRegistry before cleanup has ran.
+ let success = fg.unregister(key);
+ assertTrue(success);
-// Unregister the object from the FinalizationRegistry before cleanup has ran.
-let success = fg.unregister(key);
-assertTrue(success);
+ // Assert that the cleanup function won't be called.
+ let timeout_func = function() {
+ assertEquals(0, cleanup_call_count);
+ }
-// Assert that the cleanup function won't be called.
-let timeout_func = function() {
- assertEquals(0, cleanup_call_count);
-}
+ setTimeout(timeout_func, 0);
-setTimeout(timeout_func, 0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
index 3c8af1995b..532ceee4d2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -4,36 +4,44 @@
// Flags: --expose-gc --noincremental-marking
-let cleanup_called = false;
-let cleanup = function(holdings_arg) {
+(async function () {
+
+ let cleanup_called = false;
+ let cleanup = function(holdings_arg) {
+ assertFalse(cleanup_called);
+ assertEquals(holdings_arg, holdings);
+ cleanup_called = true;
+ }
+
+ let fg = new FinalizationRegistry(cleanup);
+ let o = {};
+ let holdings = {'h': 55};
+
+ // Ignition holds references to objects in temporary registers. These will be
+ // released when the function exits. So only access o inside a function to
+ // prevent any references to objects in temporary registers when a gc is
+ // triggered.
+ (() => { fg.register(o, holdings); })()
+
+ // Here and below, we need to invoke GC asynchronously and wait for it to
+ // finish, so that it doesn't need to scan the stack. Otherwise, the objects
+ // may not be reclaimed because of conservative stack scanning and the test
+ // may not work as intended.
+ await gc({ type: 'major', execution: 'async' });
assertFalse(cleanup_called);
- assertEquals(holdings_arg, holdings);
- cleanup_called = true;
-}
-let fg = new FinalizationRegistry(cleanup);
-let o = {};
-let holdings = {'h': 55};
+ // Drop the last reference to o.
+ (() => { o = null; })()
-// Ignition holds references to objects in temporary registers. These will be
-// released when the function exits. So only access o inside a function to
-// prevent any references to objects in temporary registers when a gc is
-// triggered.
-(() => { fg.register(o, holdings); })()
-
-gc();
-assertFalse(cleanup_called);
-
-// Drop the last reference to o.
-(() => { o = null; })()
+ // GC will clear the WeakCell; the cleanup function will be called the next time
+ // we enter the event loop.
+ await gc({ type: 'major', execution: 'async' });
+ assertFalse(cleanup_called);
-// GC will clear the WeakCell; the cleanup function will be called the next time
-// we enter the event loop.
-gc();
-assertFalse(cleanup_called);
+ let timeout_func = function() {
+ assertTrue(cleanup_called);
+ }
-let timeout_func = function() {
- assertTrue(cleanup_called);
-}
+ setTimeout(timeout_func, 0);
-setTimeout(timeout_func, 0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
index 6572faee21..255cbd25d1 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
@@ -4,19 +4,30 @@
// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking
-var FR = new FinalizationRegistry (function (holdings) { globalThis.FRRan = true; });
-{
+const cleanup = function (holdings) { globalThis.FRRan = true; };
+const FR = new FinalizationRegistry(cleanup);
+
+(function () {
let obj = {};
// obj is its own unregister token and becomes unreachable after this
// block. If the unregister token is held strongly this test will not
// terminate.
FR.register(obj, 42, obj);
-}
+})();
+
function tryAgain() {
- gc();
- if (globalThis.FRRan || FR.cleanupSome()) {
- return;
- }
- setTimeout(tryAgain, 0);
+ (async function () {
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+
+ if (globalThis.FRRan || FR.cleanupSome()) {
+ return;
+ }
+
+ setTimeout(tryAgain, 0);
+ })();
}
tryAgain();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
index 78e8865ac0..9b03f03e26 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
@@ -5,22 +5,29 @@
// Flags: --expose-gc --noincremental-marking
let wr;
-(function() {
+(function () {
let o = {};
wr = new WeakRef(o);
// Don't deref here, we want to test that the creation is enough to keep the
// WeakRef alive until the end of the turn.
})();
+// Here we invoke GC synchronously and, with conservative stack scanning,
+// there is a chance that the object is not reclaimed now. In any case,
+// the WeakRef should not be cleared.
gc();
-
// Since the WeakRef was created during this turn, it is not cleared by GC.
-(function() {
- assertNotEquals(undefined, wr.deref());
-})();
+assertNotEquals(undefined, wr.deref());
// Next task.
setTimeout(() => {
- gc();
- assertEquals(undefined, wr.deref());
+ (async function () {
+ // Trigger GC again to make sure the two WeakRefs are cleared.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
+ assertEquals(undefined, wr.deref());
+ })();
}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
index f7c05e88b8..06ee707c90 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
@@ -6,40 +6,43 @@
let wr;
let wr_control; // control WeakRef for testing what happens without deref
-(function() {
+(function () {
let o1 = {};
wr = new WeakRef(o1);
let o2 = {};
wr_control = new WeakRef(o2);
})();
-let strong = {a: wr.deref(), b: wr_control.deref()};
+let strong = { a: wr.deref(), b: wr_control.deref() };
+// Here and below, we invoke GC synchronously and, with conservative stack
+// scanning, there is a chance that the object is not reclaimed now. In any
+// case, the WeakRefs should not be cleared.
gc();
// Next task.
setTimeout(function() {
// Call deref inside a closure, trying to avoid accidentally storing a strong
// reference into the object in the stack frame.
- (function() {
- wr.deref();
- })();
+ (function () { wr.deref(); })();
strong = null;
- // This GC will clear wr_control.
+ // This GC should clear wr_control (modulo CSS), since nothing was keeping it
+ // alive, but it should not clear wr.
gc();
-
- (function() {
- assertNotEquals(undefined, wr.deref());
- // Now the control WeakRef got cleared, since nothing was keeping it alive.
- assertEquals(undefined, wr_control.deref());
- })();
+ (function () { assertNotEquals(undefined, wr.deref()); })();
// Next task.
- setTimeout(function() {
- gc();
+ (async function () {
+ // Trigger GC again to make sure the two WeakRefs are cleared.
+ // We need to invoke GC asynchronously and wait for it to finish, so that
+ // it doesn't need to scan the stack. Otherwise, the objects may not be
+ // reclaimed because of conservative stack scanning and the test may not
+ // work as intended.
+ await gc({ type: 'major', execution: 'async' });
assertEquals(undefined, wr.deref());
- }, 0);
+ assertEquals(undefined, wr_control.deref());
+ })();
}, 0);
diff --git a/deps/v8/test/mjsunit/ic-megadom-2.js b/deps/v8/test/mjsunit/ic-megadom-2.js
index cfb7521e1c..1f281ab2da 100644
--- a/deps/v8/test/mjsunit/ic-megadom-2.js
+++ b/deps/v8/test/mjsunit/ic-megadom-2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --enable-mega-dom-ic --allow-natives-syntax
+// Flags: --mega-dom-ic --allow-natives-syntax
// This tests checks that load property access using megadom IC returns
// correct results both on API objects and plain JS objects.
diff --git a/deps/v8/test/mjsunit/ic-megadom-3.js b/deps/v8/test/mjsunit/ic-megadom-3.js
index f7cb0df858..f7a7634e7e 100644
--- a/deps/v8/test/mjsunit/ic-megadom-3.js
+++ b/deps/v8/test/mjsunit/ic-megadom-3.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --enable-mega-dom-ic --allow-natives-syntax
+// Flags: --mega-dom-ic --allow-natives-syntax
// This tests checks that load property access using megadom IC
// handles correctly the error of signature mismatch.
diff --git a/deps/v8/test/mjsunit/ic-megadom.js b/deps/v8/test/mjsunit/ic-megadom.js
index 2f767504dd..cd3cfe2150 100644
--- a/deps/v8/test/mjsunit/ic-megadom.js
+++ b/deps/v8/test/mjsunit/ic-megadom.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --enable-mega-dom-ic --allow-natives-syntax
+// Flags: --mega-dom-ic --allow-natives-syntax
// This tests checks that load property access using megadom IC returns
// correct results on API.
diff --git a/deps/v8/test/mjsunit/interrupt-budget-override.js b/deps/v8/test/mjsunit/interrupt-budget-override.js
index 9f3784e793..fe0a874de0 100644
--- a/deps/v8/test/mjsunit/interrupt-budget-override.js
+++ b/deps/v8/test/mjsunit/interrupt-budget-override.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbofan --interrupt-budget=100 --interrupt-budget-for-feedback-allocation=10 --allow-natives-syntax
+// Flags: --turbofan --interrupt-budget=100 --interrupt-budget-for-feedback-allocation=10 --allow-natives-syntax --nomaglev
function f() {
let s = 0;
diff --git a/deps/v8/test/mjsunit/keyed-store-array-literal.js b/deps/v8/test/mjsunit/keyed-store-array-literal.js
index 000385b9d3..d2bf5beec6 100644
--- a/deps/v8/test/mjsunit/keyed-store-array-literal.js
+++ b/deps/v8/test/mjsunit/keyed-store-array-literal.js
@@ -8,7 +8,7 @@
function f1() {
const x = [,];
x[1] = 42;
- assertEquals([undefined, 42], x);
+ assertEquals([, 42], x);
}
%PrepareFunctionForOptimization(f1);
diff --git a/deps/v8/test/mjsunit/maglev/add-smi.js b/deps/v8/test/mjsunit/maglev/add-smi.js
index c59e97a070..e2802c8324 100644
--- a/deps/v8/test/mjsunit/maglev/add-smi.js
+++ b/deps/v8/test/mjsunit/maglev/add-smi.js
@@ -17,9 +17,8 @@
assertEquals(3, add(1, 2));
assertTrue(isMaglevved(add));
- // We should deopt here in SmiUntag.
assertEquals(0x40000000, add(1, 0x3FFFFFFF));
- assertFalse(isMaglevved(add));
+ assertTrue(isMaglevved(add));
})();
// Checks when we deopt due to tagging.
@@ -35,7 +34,7 @@
assertEquals(3, add(1, 2));
assertTrue(isMaglevved(add));
- // We should deopt here in SmiTag.
+ // We should deopt here in Int32Add.
assertEquals(3.2, add(1.2, 2));
assertFalse(isMaglevved(add));
})();
diff --git a/deps/v8/test/mjsunit/maglev/continuation-after-inlined.js b/deps/v8/test/mjsunit/maglev/continuation-after-inlined.js
new file mode 100644
index 0000000000..35fbe9d952
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/continuation-after-inlined.js
@@ -0,0 +1,29 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --maglev-inlining --allow-natives-syntax
+
+function hasInstance(x) {
+ %DeoptimizeFunction(bar);
+ return 5;
+}
+
+function Foo() {}
+Object.defineProperty(Foo, Symbol.hasInstance, {
+ value: hasInstance
+})
+
+let foo = new Foo();
+
+function bar(x) {
+ return x instanceof Foo;
+}
+
+%PrepareFunctionForOptimization(bar);
+%PrepareFunctionForOptimization(hasInstance);
+assertTrue(bar(foo));
+assertTrue(bar(foo));
+
+%OptimizeMaglevOnNextCall(bar);
+assertTrue(bar(foo));
diff --git a/deps/v8/test/mjsunit/maglev/exceptions.js b/deps/v8/test/mjsunit/maglev/exceptions.js
index 02dc8db903..a07697ba3c 100644
--- a/deps/v8/test/mjsunit/maglev/exceptions.js
+++ b/deps/v8/test/mjsunit/maglev/exceptions.js
@@ -20,24 +20,27 @@ assertEquals(foo_int32(), 2);
%OptimizeMaglevOnNextCall(foo_int32);
assertEquals(foo_int32(), 2);
-// This examples creates a simple exception handler block where the trampoline
-// has an int32 value that overflows and it needs to create a HeapNumber.
-function foo_int32_overflow(x) {
- try {
- x = x + x;
- throw "Error";
- } catch {
- return x;
- }
-}
-%PrepareFunctionForOptimization(foo_int32_overflow);
-assertEquals(foo_int32_overflow(1), 2);
-%OptimizeMaglevOnNextCall(foo_int32_overflow);
-assertEquals(foo_int32_overflow(0x3FFFFFFF), 0x7FFFFFFE);
-// If we call it with a HeapNumber, we deopt before the exception:
-assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
-assertEquals(foo_int32_overflow(1.1), 2.2);
-assertFalse(%ActiveTierIsMaglev(foo_int32_overflow));
+// TODO(leszeks): There is currently no way for this to happen, because all
+// Int32 ops are eagerly checked for Smi overflow.
+//
+// // This examples creates a simple exception handler block where the trampoline
+// // has an int32 value that overflows and it needs to create a HeapNumber.
+// function foo_int32_overflow(x) {
+// try {
+// x = x + x;
+// throw "Error";
+// } catch {
+// return x;
+// }
+// }
+// %PrepareFunctionForOptimization(foo_int32_overflow);
+// assertEquals(foo_int32_overflow(1), 2);
+// %OptimizeMaglevOnNextCall(foo_int32_overflow);
+// assertEquals(foo_int32_overflow(0x3FFFFFFF), 0x7FFFFFFE);
+// assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
+// // If we call it with a HeapNumber, we deopt before the exception:
+// assertEquals(foo_int32_overflow(1.1), 2.2);
+// assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
// This examples creates a simple exception handler block where the trampoline
// has an float64 value and needs to convert to a tagged value.
diff --git a/deps/v8/test/mjsunit/maglev/function-apply.js b/deps/v8/test/mjsunit/maglev/function-apply.js
new file mode 100644
index 0000000000..d9a5208c37
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/function-apply.js
@@ -0,0 +1,126 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function foo(...args) {
+ return {'this': this, 'arguments': args};
+}
+
+function validCalls(a, ...args) {
+ let obj = {};
+
+ // No receiver or receiver is null or undefined.
+ assertEquals({'this': this, 'arguments': []}, foo.apply());
+ assertEquals({'this': this, 'arguments': []}, foo.apply(null));
+ assertEquals({'this': this, 'arguments': []}, foo.apply(undefined));
+ // Receiver is object.
+ assertEquals({'this': obj, 'arguments': []}, foo.apply(obj));
+ // Valid arguments array.
+ assertEquals({'this': this, 'arguments': [3, 4]}, foo.apply(null, args));
+ assertEquals({'this': obj, 'arguments': [3, 4]}, foo.apply(obj, args));
+ assertEquals(
+ {'this': this, 'arguments': [2, 3, 4]}, foo.apply(null, [a, ...args]));
+ assertEquals(
+ {'this': obj, 'arguments': [2, 3, 4]}, foo.apply(obj, [a, ...args]));
+ // Extra arguments are ignored.
+ assertEquals({'this': this, 'arguments': [3, 4]}, foo.apply(null, args, a));
+ assertEquals({'this': obj, 'arguments': [3, 4]}, foo.apply(obj, args, a));
+}
+
+%PrepareFunctionForOptimization(validCalls);
+validCalls(2, 3, 4);
+validCalls(2, 3, 4);
+%OptimizeMaglevOnNextCall(validCalls);
+validCalls(2, 3, 4);
+
+function invalidArgsArrayExplicitReceiver(a) {
+ return foo.apply(null, a);
+}
+
+%PrepareFunctionForOptimization(invalidArgsArrayExplicitReceiver);
+assertThrows(
+ () => invalidArgsArrayExplicitReceiver(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertThrows(
+ () => invalidArgsArrayExplicitReceiver(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertEquals(
+ {'this': this, 'arguments': []},
+ invalidArgsArrayExplicitReceiver(null, null, null));
+%OptimizeMaglevOnNextCall(invalidArgsArrayExplicitReceiver);
+assertThrows(
+ () => invalidArgsArrayExplicitReceiver(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertEquals(
+ {'this': this, 'arguments': []},
+ invalidArgsArrayExplicitReceiver(null, null, null));
+
+function invalidArgsArrayImplicitReceiver(...args) {
+ return foo.apply(...args);
+}
+
+%PrepareFunctionForOptimization(invalidArgsArrayImplicitReceiver);
+assertThrows(
+ () => invalidArgsArrayImplicitReceiver(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertThrows(
+ () => invalidArgsArrayImplicitReceiver(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertEquals(
+ {'this': this, 'arguments': []},
+ invalidArgsArrayImplicitReceiver(null, null, null));
+%OptimizeMaglevOnNextCall(invalidArgsArrayImplicitReceiver);
+assertThrows(
+ () => invalidArgsArrayImplicitReceiver(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertEquals(
+ {'this': this, 'arguments': []},
+ invalidArgsArrayImplicitReceiver(null, null, null));
+
+function invalidArgsArrayWithExtraSpread(a, ...args) {
+ return foo.apply(null, a, ...args);
+}
+
+%PrepareFunctionForOptimization(invalidArgsArrayWithExtraSpread);
+assertThrows(
+ () => invalidArgsArrayWithExtraSpread(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertThrows(
+ () => invalidArgsArrayWithExtraSpread(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertEquals(
+ {'this': this, 'arguments': []},
+ invalidArgsArrayWithExtraSpread(null, null, null));
+%OptimizeMaglevOnNextCall(invalidArgsArrayWithExtraSpread);
+assertThrows(
+ () => invalidArgsArrayWithExtraSpread(2, 3, 4), TypeError,
+ 'CreateListFromArrayLike called on non-object');
+assertEquals(
+ {'this': this, 'arguments': []},
+ invalidArgsArrayWithExtraSpread(null, null, null));
+
+function nullArgsArray(a, ...args) {
+ assertEquals({'this': this, 'arguments': []}, foo.apply(null, null));
+}
+
+%PrepareFunctionForOptimization(nullArgsArray);
+nullArgsArray(2, 3, 4);
+nullArgsArray(2, 3, 4);
+nullArgsArray(null, null, null);
+%OptimizeMaglevOnNextCall(nullArgsArray);
+nullArgsArray(2, 3, 4);
+nullArgsArray(null, null, null);
+
+function nullArgsArrayWithExtraSpread(a, ...args) {
+ assertEquals({'this': this, 'arguments': []}, foo.apply(null, null, ...args));
+}
+
+%PrepareFunctionForOptimization(nullArgsArrayWithExtraSpread);
+nullArgsArrayWithExtraSpread(2, 3, 4);
+nullArgsArrayWithExtraSpread(2, 3, 4);
+nullArgsArrayWithExtraSpread(null, null, null);
+%OptimizeMaglevOnNextCall(nullArgsArrayWithExtraSpread);
+nullArgsArrayWithExtraSpread(2, 3, 4);
+nullArgsArrayWithExtraSpread(null, null, null);
diff --git a/deps/v8/test/mjsunit/maglev/inline-fresh-parent-deopt-frame.js b/deps/v8/test/mjsunit/maglev/inline-fresh-parent-deopt-frame.js
new file mode 100644
index 0000000000..c0ccb8c7ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/inline-fresh-parent-deopt-frame.js
@@ -0,0 +1,22 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --maglev-inlining
+
+function inlined(x) {
+ return x + x;
+}
+
+function foo(y) {
+ let a = inlined(1);
+ let b = inlined(y);
+ return a + b;
+}
+
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(inlined);
+assertEquals(6, foo(2));
+%OptimizeMaglevOnNextCall(foo);
+assertEquals(6, foo(2));
+assertEquals(6.2, foo(2.1));
diff --git a/deps/v8/test/mjsunit/maglev/inline-phi-leak.js b/deps/v8/test/mjsunit/maglev/inline-phi-leak.js
new file mode 100644
index 0000000000..bb2dddf0b2
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/inline-phi-leak.js
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --maglev-inlining
+
+function inlined(x) {
+ if (x < 10) {
+ x;
+ } else {
+ x;
+ }
+ // At a merge point, we should not think that we are merging the
+ // caller function Phi node.
+}
+
+function foo(y) {
+ y < 10;
+ let a = 1;
+ let b = 2;
+ for (let i = 0; i < y; i++) {
+ inlined(i); // Phi (representing i) can leak to the inlined function.
+ }
+}
diff --git a/deps/v8/test/mjsunit/maglev/lots-of-args.js b/deps/v8/test/mjsunit/maglev/lots-of-args.js
new file mode 100644
index 0000000000..9c75cb256d
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/lots-of-args.js
@@ -0,0 +1,46 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function foo(
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x
+) { }
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeMaglevOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/maglev/math-ceil.js b/deps/v8/test/mjsunit/maglev/math-ceil.js
new file mode 100644
index 0000000000..e25084ff24
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/math-ceil.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+function f(o) {
+ return Math.ceil(o.a);
+}
+
+%PrepareFunctionForOptimization(f);
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+%OptimizeMaglevOnNextCall(f);
+assertEquals(NaN, f({a:NaN}));
+assertEquals(Infinity, f({a:Infinity}));
+assertEquals(-Infinity, f({a:-Infinity}));
+assertEquals(-Infinity, 1/f({a:-0.5}));
+assertEquals(1, 1/f({a:0.5}));
+assertEquals(8, f({a: {valueOf(){
+ %DeoptimizeFunction(f);
+ return 7.5; }}}));
diff --git a/deps/v8/test/mjsunit/maglev/math-floor.js b/deps/v8/test/mjsunit/maglev/math-floor.js
new file mode 100644
index 0000000000..c897fbdcfb
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/math-floor.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+function f(o) {
+ return Math.floor(o.a);
+}
+
+%PrepareFunctionForOptimization(f);
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+%OptimizeMaglevOnNextCall(f);
+assertEquals(NaN, f({a:NaN}));
+assertEquals(Infinity, f({a:Infinity}));
+assertEquals(-Infinity, f({a:-Infinity}));
+assertEquals(-1, 1/f({a:-0.5}));
+assertEquals(Infinity, 1/f({a:0.5}));
+assertEquals(7, f({a: {valueOf(){
+ %DeoptimizeFunction(f);
+ return 7.5; }}}));
diff --git a/deps/v8/test/mjsunit/maglev/math-round.js b/deps/v8/test/mjsunit/maglev/math-round.js
new file mode 100644
index 0000000000..2e16fd9fa5
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/math-round.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+function f(o) {
+ return Math.round(o.a);
+}
+
+%PrepareFunctionForOptimization(f);
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+f({a: {valueOf(){ return 7.5; }}});
+%OptimizeMaglevOnNextCall(f);
+assertEquals(NaN, f({a:NaN}));
+assertEquals(Infinity, f({a:Infinity}));
+assertEquals(-Infinity, f({a:-Infinity}));
+assertEquals(-Infinity, 1/f({a:-0.5}));
+assertEquals(1, 1/f({a:0.5}));
+assertEquals(8, f({a: {valueOf(){
+ %DeoptimizeFunction(f);
+ return 7.5; }}}));
diff --git a/deps/v8/test/mjsunit/maglev/negate.js b/deps/v8/test/mjsunit/maglev/negate.js
new file mode 100644
index 0000000000..6e3c2d61c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/negate.js
@@ -0,0 +1,67 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function negate(val) {
+ return -val;
+}
+
+function test_negate_int32(value, expected) {
+ // Warmup.
+ %PrepareFunctionForOptimization(negate);
+ %ClearFunctionFeedback(negate);
+ negate(1, -1);
+ %OptimizeMaglevOnNextCall(negate);
+ assertEquals(expected, negate(value));
+ assertTrue(isMaglevved(negate));
+
+ %DeoptimizeFunction(negate);
+ assertEquals(expected, negate(value));
+}
+
+test_negate_int32(1, -1);
+test_negate_int32(-1, 1);
+test_negate_int32(42, -42);
+test_negate_int32(-42, 42);
+
+function test_negate_float(value, expected) {
+ // Warmup.
+ %PrepareFunctionForOptimization(negate);
+ %ClearFunctionFeedback(negate);
+ negate(1.1, -1.1);
+ %OptimizeMaglevOnNextCall(negate);
+ assertEquals(expected, negate(value));
+ assertTrue(isMaglevved(negate));
+
+ %DeoptimizeFunction(negate);
+ assertEquals(expected, negate(value));
+}
+
+test_negate_float(1.23, -1.23);
+test_negate_float(-1.001, 1.001);
+test_negate_float(42.42, -42.42);
+test_negate_float(-42.42, 42.42);
+
+const int32_max = Math.pow(2,30)-1;
+const int32_min = -Math.pow(2,31);
+test_negate_float(int32_max, -int32_max);
+test_negate_float(int32_min, -int32_min);
+
+function test_negate_int32_expect_deopt(value, expected) {
+ // Warmup.
+ %PrepareFunctionForOptimization(negate);
+ %ClearFunctionFeedback(negate);
+ negate(12, -12);
+ %OptimizeMaglevOnNextCall(negate);
+ assertEquals(expected, negate(value));
+ assertFalse(isMaglevved(negate));
+}
+
+test_negate_int32_expect_deopt(0, -0);
+test_negate_int32_expect_deopt(-0, 0);
+test_negate_int32_expect_deopt(int32_min, -int32_min);
+test_negate_int32_expect_deopt(-int32_min, int32_min);
+test_negate_int32_expect_deopt(int32_max, -int32_max);
+test_negate_int32_expect_deopt(-int32_max, int32_max);
diff --git a/deps/v8/test/mjsunit/maglev/nested-continuations.js b/deps/v8/test/mjsunit/maglev/nested-continuations.js
new file mode 100644
index 0000000000..2bdca505ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/nested-continuations.js
@@ -0,0 +1,35 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function Foo() {}
+Object.defineProperty(Foo, Symbol.hasInstance, { value: Math.round });
+
+let foo = new Foo();
+
+function bar(f) {
+ // `f instanceof Foo` runs `%ToBoolean(Foo[Symbol.hasInstance](f))`, where
+ // `Foo[Symbol.hasInstance]` is `Math.round`.
+ //
+ // So with sufficient builtin inlining, this will call
+ // `%ToBoolean(round(%ToNumber(f)))`, which will call `f.valueOf`. If this
+ // deopts (which in this test it will), we need to make sure to both round it,
+ // and then convert that rounded value to a boolean.
+ return f instanceof Foo;
+}
+
+foo.valueOf = () => {
+ %DeoptimizeFunction(bar);
+ // Return a value which, when rounded, has ToBoolean false, and when not
+ // rounded, has ToBoolean true.
+ return 0.2;
+}
+
+%PrepareFunctionForOptimization(bar);
+assertFalse(bar(foo));
+assertFalse(bar(foo));
+
+%OptimizeMaglevOnNextCall(bar);
+assertFalse(bar(foo));
diff --git a/deps/v8/test/mjsunit/maglev/omit-default-ctors.js b/deps/v8/test/mjsunit/maglev/omit-default-ctors.js
index 430a1118b8..e6686ebcf7 100644
--- a/deps/v8/test/mjsunit/maglev/omit-default-ctors.js
+++ b/deps/v8/test/mjsunit/maglev/omit-default-ctors.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --omit-default-ctors --allow-natives-syntax --maglev
+// Flags: --omit-default-ctors --allow-natives-syntax --maglev --no-maglev-inlining
(function OmitDefaultBaseCtor() {
class A {}; // default base ctor -> will be omitted
diff --git a/deps/v8/test/mjsunit/maglev/phi-untagging-conversions.js b/deps/v8/test/mjsunit/maglev/phi-untagging-conversions.js
new file mode 100644
index 0000000000..6c9a19aa0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/phi-untagging-conversions.js
@@ -0,0 +1,135 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax --no-always-turbofan
+
+
+// In this example, the final block of the graph will be something like:
+//
+// 1: Phi(#42, y)
+// 2: CheckedSmiUntag(1)
+// 3: Return(1)
+//
+// Note how the truncation is unused (but still required, as the graph builder
+// doesn't know that all of the inputs of the Phi are Smis). After Phi
+// untagging, the phi will be a Int32, and the truncation is thus not needed
+// anymore. It's important that it's removed during Phi untagging, because its
+// input is not tagged anymore (since the phi was untagged), and it's not
+// necessary to replace it by something else.
+
+function unused_unneeded_CheckedSmiUntag(x) {
+ let y = x + 1;
+ y = x ? 42 : y;
+ return y | 0;
+}
+
+%PrepareFunctionForOptimization(unused_unneeded_CheckedSmiUntag);
+unused_unneeded_CheckedSmiUntag(1);
+unused_unneeded_CheckedSmiUntag(0);
+%OptimizeMaglevOnNextCall(unused_unneeded_CheckedSmiUntag);
+unused_unneeded_CheckedSmiUntag(1);
+
+// This example is similar as the previous one, except that the graph will
+// contain a CheckedTruncateNumberToInt32 instead of the CheckedSmiUntag:
+//
+// 1: Phi(c1, c2)
+// 2: CheckedTruncateNumberToInt32(1)
+// 3: Return(undefined)
+//
+// The conversion only fails when its input is not a number. Since the Phi was
+// untagged to a Float64, the conversion can never fail anymore, and should thus
+// be omitted.
+//
+// Note that if the result of the CheckedTruncateNumberToInt32 would have been
+// used, the it would have been automatically changed into a
+// TruncateFloat64ToInt32.
+//
+// A side-effect of have this truncation is that it ensures that its input is a
+// boxed Float64, which means that subsequent untagging can use
+// UncheckedNumberToFloat64 instead of CheckedNumberToFloat64. This is what is
+// used here when doing `let as_double = phi + 0.6`. This
+// UncheckedNumberToFloat64 will just be dropped, since the input (the phi) is
+// already an unboxed Float64.
+
+function unused_unneeded_CheckedTruncateNumberToInt32(x) {
+ let c1 = x + 0.5;
+ let c2 = x + 1.5;
+ let phi = x ? c1 : c2;
+ let as_int = phi | 0;
+ let as_double = phi + 0.6;
+}
+
+%PrepareFunctionForOptimization(unused_unneeded_CheckedTruncateNumberToInt32);
+unused_unneeded_CheckedTruncateNumberToInt32(1.5);
+%OptimizeMaglevOnNextCall(unused_unneeded_CheckedTruncateNumberToInt32);
+unused_unneeded_CheckedTruncateNumberToInt32(1.5);
+
+
+
+// In this example, during feedback collection, we provide inputs that cause
+// `phi` to always be a Smi, which means that `phi | 0` will have Smi
+// feedback. The graph will thus be:
+//
+// 1: Phi(d, #42)
+// 2: CheckedSmiUntag(1)
+// 3: Return(1)
+//
+// Except that Phi untagging will realize that `phi` could be a Float64, and
+// will thus decide that it should be a Float64 Phi. In this case, the
+// conversion should not be dropped, because if the Phi is not a Int32, then it
+// should not be returned directly, but instead be truncated. In practice
+// though, we'll rather replace it by a CheckedTruncateFloat64ToInt32, which
+// will fail if the Float64 isn't a Int32, causing a deopt, and the reoptimized
+// code will have a better feedback (this is easier than to try to patch the
+// `Return(1)` to return something else).
+function unused_required_CheckedSmiUntag(x) {
+ x = x + 0.5; // ensuring Float64 alternative
+ let d = x + 2.53;
+ let phi = x ? d : 42;
+ return phi | 0;
+}
+
+%PrepareFunctionForOptimization(unused_required_CheckedSmiUntag);
+unused_required_CheckedSmiUntag(-0.5);
+%OptimizeMaglevOnNextCall(unused_required_CheckedSmiUntag);
+assertEquals(42, unused_required_CheckedSmiUntag(-0.5));
+assertOptimized(unused_required_CheckedSmiUntag);
+// If the CheckedSmiUntag is dropped, then the truncation won't be done, and the
+// non-truncated float (3.53) will be returned. Instead, if the conversion is
+// changed to CheckedTruncateFloat64ToInt32, then it will deopt, and we'll get
+// the correct result of 3.
+assertEquals(3, unused_required_CheckedSmiUntag(0.5));
+assertUnoptimized(unused_required_CheckedSmiUntag);
+
+
+
+// Finally, int this example, during feedback collection, `phi` will always be a
+// Smi, which means that `phi + 2` will be a Int32AddWithOverflow, preceeded by
+// a CheckedSmiUntag(Phi):
+//
+// 1: Phi(d, #42)
+// 2: CheckedSmiUntag(1)
+// 3: Int32AddWithOverflow(2, #2)
+//
+// However, Phi untagging will detect that `phi` should be a Float64 phi. In
+// that case, it's important that the CheckedSmiUntag conversion isn't dropped,
+// and becomes a deopting Float64->Int32 conversion that deopts when its input
+// cannot be converted to Int32 without loss of precision (ie, it should become
+// a CheckedTruncateFloat64ToInt32 rather than a TruncateFloat64ToInt32). Then,
+// when the Phi turns out to be a non-Smi Float64, the function should deopt.
+function used_required_deopting_Float64ToInt32(x) {
+ x = x + 0.5; // ensuring Float64 alternative
+ let d = x + 2.53;
+ let phi = x ? d : 42;
+ return phi + 2;
+}
+%PrepareFunctionForOptimization(used_required_deopting_Float64ToInt32);
+used_required_deopting_Float64ToInt32(-0.5);
+%OptimizeMaglevOnNextCall(used_required_deopting_Float64ToInt32);
+assertEquals(44, used_required_deopting_Float64ToInt32(-0.5));
+// The next call should cause a deopt, since `phi` will be 4.53, which shouldn't
+// be truncated to go into the Int32AddWithOverflow but should instead cause a
+// deopt to allow the `phi + 2` to be computed on double values.
+assertEquals(1.5+0.5+2.53+2, used_required_deopting_Float64ToInt32(1.5));
+assertUnoptimized(used_required_deopting_Float64ToInt32);
diff --git a/deps/v8/test/mjsunit/maglev/polymorphic-load-number.js b/deps/v8/test/mjsunit/maglev/polymorphic-load-number.js
new file mode 100644
index 0000000000..cfda707d7c
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/polymorphic-load-number.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+
+function foo(o) {
+ return o.length;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(6, foo("string"));
+assertEquals(undefined, foo(4.2));
+
+%OptimizeMaglevOnNextCall(foo);
+assertEquals(6, foo("string"));
+assertEquals(undefined, foo(4.2));
diff --git a/deps/v8/test/mjsunit/maglev/regress-1403324.js b/deps/v8/test/mjsunit/maglev/regress-1403324.js
new file mode 100644
index 0000000000..51d6630f19
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-1403324.js
@@ -0,0 +1,29 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function foo(__v_4) {
+ var __v_5 = function () {
+ return __v_4;
+ }();
+ var __v_6 = __v_5.x;
+ arguments[42];
+ return __v_6 + __v_5.x;
+}
+var __v_0 = {x: 24};
+__v_0.g = 43;
+
+%PrepareFunctionForOptimization(foo);
+foo({x: 42});
+foo({x: 42});
+
+%OptimizeMaglevOnNextCall(foo);
+var __v_3 = {x: 42};
+Object.prototype.__defineGetter__(42, function () {
+ __v_3.__defineGetter__("x", function () {
+ });
+});
+
+assertEquals(NaN, foo(__v_3));
diff --git a/deps/v8/test/mjsunit/maglev/regress-1405092.js b/deps/v8/test/mjsunit/maglev/regress-1405092.js
new file mode 100644
index 0000000000..81480c9cb0
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-1405092.js
@@ -0,0 +1,29 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+'use strict';
+
+function foo(obj, ...args) {
+ obj['apply'](...args);
+}
+
+var x = 0;
+
+function bar() {
+ try {
+ this.x;
+ } catch (e) {
+ x++;
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+foo(bar);
+
+%OptimizeMaglevOnNextCall(foo);
+foo(bar);
+
+assertEquals(2, x);
diff --git a/deps/v8/test/mjsunit/maglev/regress-1406456.js b/deps/v8/test/mjsunit/maglev/regress-1406456.js
new file mode 100644
index 0000000000..5c6edf3e71
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-1406456.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --harmony-rab-gsab
+
+function foo() {
+ const buffer = new SharedArrayBuffer(1395, {
+ "maxByteLength": 2110270,
+ });
+ const data = new DataView(buffer);
+ data.setInt16();
+}
+%PrepareFunctionForOptimization(foo);
+foo();
+%OptimizeMaglevOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/maglev/regress-1407606.js b/deps/v8/test/mjsunit/maglev/regress-1407606.js
new file mode 100644
index 0000000000..f44142c008
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-1407606.js
@@ -0,0 +1,18 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+for (let v0 = 0; v0 < 100; v0++) {
+ for (let v1 = 0; v1 < 100; v1++) {
+ const v4 = new Float64Array(33519);
+ }
+ for (let v5 = 0; v5 < 100; v5++) {
+ function F8( a12) {
+ if (!new.target) { throw 'must be called with new'; }
+ a12--;
+ }
+ const v14 = new F8(- -1000000.0);
+ }
+}
diff --git a/deps/v8/test/mjsunit/maglev/regress-1411075.js b/deps/v8/test/mjsunit/maglev/regress-1411075.js
new file mode 100644
index 0000000000..202b61f3f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-1411075.js
@@ -0,0 +1,17 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function foo(__v_6, __v_7) {
+ return +__v_6.x;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo({ x: 42 });
+foo(false);
+%OptimizeMaglevOnNextCall(foo);
+foo(false);
+
+assertEquals(NaN, foo(false));
diff --git a/deps/v8/test/mjsunit/maglev/regress-1417125.js b/deps/v8/test/mjsunit/maglev/regress-1417125.js
new file mode 100644
index 0000000000..526968dbba
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-1417125.js
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+// Flags: --single-threaded --deopt-to-baseline
+// Flags: --maglev --no-turbofan
+
+
+function foo() {
+ try {
+ foo();
+ String.fromCharCode(48,2268268757,48,59,25000,102,111,8,116,45,119,101,105,103,104,116,58,98,111,108,100,59,8,111,8,3394964672,2268268757,102,97,109,105,108,121,58,65,114,3746,97,108,44,32,72,101,8,8,101,116,105,8,97,44,3746,115,10000,110,115,45,25000,101,114,105,8,44,86,101,8,8,1,110,3746,34,32,99,111,108,111,4294967295,16,34,35,8,70,48,4294967295,4294967295,48,34,8,3746,79,82,69,88,47,80,65,82,38,35,1,48,52,59,8,8,25000,3394964672,-1745691656,47,62,84,4294967295,32,38,35,8,48,52,59,38,35,51,53,48,4003,76,4294967295,77,76,8,8,38,35,51,48,52,59,60,47,10000,111,110,116,62,60,47,25000,10000,97,110,25000,25000,47,116,-1745691656,62,10,8,3746,116,114,62,60,116,4294967295,3746,10,8,116,8,32,97,108,105,103,110,2268268757,34,108,101,3746,116,8,62,60,115,112,97,110,8,105,100,8,34,97,99,95,100,101,4294967295,99,34,62,60,102,111,8,8,32,115,8,121,108,3394964672,61,34,8,111,3394964672,116,45,115,25000,122,4003,16,8,49,4294967295,120,59,32,99,8,108,8,114,3746,4003,48,48,48,48,48,3746,59,32,4003,111,110,116,2268268757,102,97,109,105,108,121,8,8,4294967295,105,97,8,44,32,16,101,108,4294967295,101,116,105,99,3746,44,32,115,97,110,4294967295,45,3746,101,2268268757,105,102,44,86,101,114,100,3746,110,97,34,16,38,112,16,117,8,100,59,47,36,32,50,25000,112,10000,112,44,32,89,84,8,32,49,3746,32,25000,105,112,44,65,-1745691656,116,38,35,51,48,53,1,110,32,8,32,99,8,110,116,46,4003,8,8,98,2,116,32,83,112,114,101,97,100,45,84,38,117,-1745691656,109,108,59,114,60,3746,4294967295,114,32,47,8,107,32,66,97,110,8,97,115,38,35,2268268757,48,53,59,32,65,86,65,4003,2268268757,65,74,73,3394964672,2,102,111,110,116,62,60,8,115,112,97,110,62,60,16,116,100,4294967295,10,60,16,116,114,2268268757,2268268757,4294967295,114,62,10,60,116,100,32,97,108,105,103,110,61,8,108,101,102,116,34,62,3394964672,100,105,118,32,25000,100,61,8,97,99,95,117,2,108,2268268757,8,60,102,3394964672,110,116,3746,8,8,3746,108,101,61,4294967295,102,111,110,116,45,115,105,4294967295,101,8,49,48,112,120,59,32,3746,111,108,111,114,58,35,70,70,54,54,57,2,59,32,3746,111,110,4294967295,45,102,0,109,105,10000,121,58,65,114,105,8,108,44,32,72,101,108,3394964672,101,10000,8,99,97,44,32,115,97,110,115,8,115,101,8,16,4294967295,44,86,16,114,100,97,110,8,34,8,119,4294967295,119,46,104,101,100,101,118,111,8,119,98,114,3746,4003,62,3394964672,108,8,3746,101,46,99,3746,109,60,47,102,2268268757,110,16,62,60,4294967295,8,105,118,62,25000,47,0,100,62,8,47,116,16,62,60,47,116,97,8,108,101,62,60,8,116,100,62,60,-1745691656,116,114,62,60,8,-1745691656,62,10,60,116,100,32,99,8,97,4294967295,2268268757,61,34,97,99,95,107,97,3746,105,109,34,32,104,101,2268268757,103,104,8,61,34,50,48,37,34,32,98,8,99,-1745691656,108,111,114,61,2268268757,8,70,70,70,4294967295,70,1,34,32,105,100,61,34,116,97,119,52,34,32,97,108,105,103,110,61,34,108,8,102,116,3746,32,118,97,4294967295,105,8,110,61,34,109,105,100,8,108,3746,34,32,111,8,70,111,99,117,4294967295,4003,8,115,115,40,16,103,111,32,116,111,32,119,119,119,0,107,97,108,101,8,101,60,119,98,3746,0,47,62,46,99,111,109,39,44,25000,97,119,52,39,4294967295,34,32,111,110,77,111,117,115,1,-1745691656,118,3746,3746,61,34,115,3746,40,8,10000,8,32,116,111,32,119,3746,119,46,107,97,108,101,100,101,60,119,98,114,32,-1745691656,62,46,99,111,8,39,8,39,97,119,8,8,41,34,32,32,2,110,77,111,117,115,8,79,117,116,61,34,99,8,8,41,34,4294967295,25000,110,8,108,105,1,107,61,34,103,97,40,39,10000,8,116,112,58,4294967295,47,8,100,115,101,2268268757,4294967295,101,114,46,109,121,110,101,8,3746,99,111,109,1,65,100,83,101,114,118,101,8,47,99,25000,105,99,107,46,106,4294967295,4003,63,117,114,2268268757,8,3746,3394964672,49,48,4294967295,50,53,49,50,1,55,54,51,2268268757,52,4294967295,3394964672,51,49,8,52,3746,48,10000,57,54,48,48,54,51,49,4294967295,54,54,52,52,56,8,56,4003,50,48,8,49,8,52,55,51,55,54,52,51,50,57,4294967295,52,50,8,51,8,8,51,54,16,48,48,48,3746,8,56,49,55,50,8,57,53,48,8,2268268757,49,57,48,54,3746,56,55,50,4294967295,49,55,48,55,53,48,57,50,55,8,55,57,57,51,3746,53,50,52,54,49,51,56,49,57,53,55,8,2,50,8,8,50,55,0,48,8,53,57,56,8,8,50,55,48,4294967295,8,51,49,54,52,1,54,8,53,48,56,57,50,25000,54,4294967295,48,8,49,54,4294967295,25000,57,48,57,49,8,57,8,55,52,55,8,50,48,55,1,4294967295,51,51,25000,51,50,55,2268268757,50,54,55,50,3746,48,51,57,49,8,54,0,8,55,8,51,55,3394964672,52,51,49,51,52,8,56,51,54,51,52,53,8,3746,3746,53,57,48,8,48,56,54,57,49,52,53,49,49,52,4294967295,53,50,120,49,57,50,88,49,8,2,88,8,56,88,2268268757,49,88,56,48,56,48,88,8,39,41,8,0,2268268757,116,1,108,3746,61,34,99,117,114,115,111,114,58,4294967295,3394964672,105,110,116,101,114,34,8,1,8,116,97,98,108,101,32,119,105,25000,116,104,61,34,49,53,54,34,32,98,111,114,100,101,114,61,4294967295,48,34,32,99,101,108,8,115,112,97,99,105,110,103,61,34,49,34,32,99,101,8,108,2,97,100,100,105,110,8,3746,8,49,34,62,10,3746,116,114,62,2268268757,32,32,60,-1745691656,3394964672,32,97,8,105,103,110,61,34,3394964672,101,102,2,34,32,62,3394964672,8,112,97,4003,32,105,0,61,34,97,99,95,116,105,116,108,101,4294967295,62,60,102,111,110,116,32,115,116,8,108,101,61,34,102,111,110,116,8,115,8,122,101,58,8,50,3746,120,59,8,99,111,108,111,114,2268268757,8,70,70,48,48,3746,8,59,2268268757,102,8,110,4294967295);
+ } catch {
+ %DeoptimizeNow();
+ }
+}
+foo.valueOf = foo;
+42 > foo;
+foo();
diff --git a/deps/v8/test/mjsunit/maglev/regress-4349817-1.js b/deps/v8/test/mjsunit/maglev/regress-4349817-1.js
new file mode 100644
index 0000000000..bbeb0c2309
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-4349817-1.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --interrupt-budget-for-maglev=128 --single-threaded --verify-heap
+
+for (let v0 = 0; v0 < 10; v0++) {
+ function f1(a2) {
+ const v4 = 1 & a2;
+ const v6 = Math.floor(v0);
+ v8 =undefined / undefined | 0;
+ const v13 = Array();
+ v13[521] = v8;
+ for (let i15 = 0; i15 < v4; i15++) {
+ i15 + v6;
+ v13[i15] = i15;
+ i15 = f1;
+ i15++;
+ }
+ v13.sort(f1);
+ }
+ f1("5001");
+}
diff --git a/deps/v8/test/mjsunit/maglev/regress-4349817-2.js b/deps/v8/test/mjsunit/maglev/regress-4349817-2.js
new file mode 100644
index 0000000000..3335ae85e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress-4349817-2.js
@@ -0,0 +1,21 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --expose-gc --verify-heap
+
+function f(arr) {
+ let phi = arr ? 1 : 42.5;
+ phi |= 0;
+ arr[5] = phi;
+}
+
+let arr = Array(10);
+gc();
+gc();
+
+%PrepareFunctionForOptimization(f);
+f(arr);
+%OptimizeMaglevOnNextCall(f);
+f(arr);
+gc();
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-1383712.js b/deps/v8/test/mjsunit/maglev/regress/regress-1383712.js
new file mode 100644
index 0000000000..5b308e43a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-1383712.js
@@ -0,0 +1,26 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+// Pseudo-side-effecting function.
+function bar() {}
+%NeverOptimizeFunction(bar);
+
+function foo(i) {
+ // First load checks for HeapNumber map, allowing through Smis.
+ i['oh'];
+ // Cause side-effects to clear known maps of i.
+ bar(i);
+ // Second load should not crash for Smis.
+ i['no'];
+}
+
+%PrepareFunctionForOptimization(foo);
+// Give the two loads polymorphic feedback in HeapNumber and {some object}.
+foo({});
+foo(1);
+%OptimizeMaglevOnNextCall(foo);
+// Pass a Smi to loads with a HeapNumber map-check.
+foo(2);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-1392936.js b/deps/v8/test/mjsunit/maglev/regress/regress-1392936.js
new file mode 100644
index 0000000000..12a09d7184
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-1392936.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+var func = function(){}
+function foo() {
+ let call = func.call;
+ call();
+}
+%PrepareFunctionForOptimization(foo);
+try {foo();} catch {}
+try {foo();} catch {}
+%OptimizeMaglevOnNextCall(foo);
+try {foo();} catch {}
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-1394279.js b/deps/v8/test/mjsunit/maglev/regress/regress-1394279.js
new file mode 100644
index 0000000000..a299853db2
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-1394279.js
@@ -0,0 +1,21 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev --single-threaded
+
+function foo(b) {
+ foo.bind();
+ if (b) {
+ %OptimizeFunctionOnNextCall(foo);
+ }
+ for (let i = 0; i < 10000; i++) {}
+ foo instanceof foo;
+}
+
+ %PrepareFunctionForOptimization(foo);
+foo(false);
+%OptimizeMaglevOnNextCall(foo);
+foo(true);
+foo.prototype = foo;
+foo(true);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-1403575.js b/deps/v8/test/mjsunit/maglev/regress/regress-1403575.js
new file mode 100644
index 0000000000..f1ae351baf
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-1403575.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function f(y) {
+ const x = y % y;
+ return 1 / x;
+}
+%PrepareFunctionForOptimization(f);
+assertEquals(f(2), Infinity);
+%OptimizeMaglevOnNextCall(f);
+assertEquals(f(-2), -Infinity);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-1405651.js b/deps/v8/test/mjsunit/maglev/regress/regress-1405651.js
new file mode 100644
index 0000000000..09a6fbeb4d
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-1405651.js
@@ -0,0 +1,51 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --harmony --no-always-turbofan --maglev
+
+function assertMaglevved(f) {
+ assertTrue(isMaglevved(f));
+}
+
+function f(x) {
+ return x[2];
+}
+
+let buff = new ArrayBuffer(1024);
+let arr = new Int32Array(buff);
+arr[2] = 42;
+
+%PrepareFunctionForOptimization(f);
+assertEquals(42, f(arr));
+
+%OptimizeMaglevOnNextCall(f);
+assertEquals(42, f(arr));
+
+assertMaglevved(f);
+// Detaching {buff} will cause {f} to deoptimize thanks to the protector.
+buff.transfer();
+assertUnoptimized(f);
+
+assertEquals(undefined, f(arr));
+
+let buff2 = new ArrayBuffer(1024);
+let arr2 = new Int32Array(buff2);
+arr2[2] = 42;
+
+// Re-optimizing {f} (which a fresh feedback), now that the protector for
+// detached array buffer doesn't hold anymore.
+%ClearFunctionFeedback(f);
+%PrepareFunctionForOptimization(f);
+assertEquals(42, f(arr2));
+%OptimizeMaglevOnNextCall(f);
+assertEquals(42, f(arr2));
+assertMaglevved(f);
+
+// The protector doesn't hold anymore, so detaching buff2 shouldn't deopt {f}.
+buff2.transfer();
+assertMaglevved(f);
+
+// The runtime check will call a deopt since {buff2} has been detached.
+assertEquals(undefined, f(arr2));
+assertUnoptimized(f);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-1407959.js b/deps/v8/test/mjsunit/maglev/regress/regress-1407959.js
new file mode 100644
index 0000000000..205e1f50a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-1407959.js
@@ -0,0 +1,27 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function func() {}
+function foo(x) {
+ return x instanceof func;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeMaglevOnNextCall(foo);
+foo();
+
+let custom_has_instance_runs = 0;
+Object.defineProperty(func, Symbol.hasInstance, {
+ value: function() {
+ custom_has_instance_runs++;
+ return true;
+ }
+});
+
+assertTrue(foo());
+assertEquals(custom_has_instance_runs, 1);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1392061.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1392061.js
new file mode 100644
index 0000000000..6959bd1657
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1392061.js
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --maglev --allow-natives-syntax
+
+const obj7 = -1;
+function foo(arg) {
+ let obj10 = 0;
+ let obj11 = 0;
+ for(var i= 0 ;i<2;i++){
+ const obj15 = 1;
+ const obj17 = obj11 + 1;
+ const obj18 = 2;
+ const obj19 = 3;
+ const obj20 = obj10 / 3;
+ obj10 = obj20;
+ let obj21 = 0;
+ do {
+ try {
+ const obj23 = !obj7;
+
+ } catch(e) {
+ }
+ obj21++;
+ } while (obj21 < 2);
+ }
+
+}
+const obj32 = [1,2,3,4];
+
+
+%PrepareFunctionForOptimization(foo);
+foo(obj32);foo(obj32);foo(obj32);
+console.log("maglev");
+%OptimizeMaglevOnNextCall(foo);
+foo(obj32);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1394036.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1394036.js
new file mode 100644
index 0000000000..5af755c620
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1394036.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function v1(v4) {
+ let v6 = 0;
+ const v8 = v6--;
+ for (let v11 = v6; v11 < 6; v11++) {
+ v4 = v6;
+ }
+ try {
+1(1,..."string",1024);
+ } catch(v18) {
+ }
+}
+
+for (let v19 = 0; v19 < 100; v19++) {
+ const v20 = v1();
+}
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403280.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403280.js
new file mode 100644
index 0000000000..e6f1b97bed
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403280.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function __f_41() {
+ return "abc".charCodeAt(undefined/2);
+}
+
+%PrepareFunctionForOptimization(__f_41);
+assertEquals(97, __f_41());
+%OptimizeMaglevOnNextCall(__f_41);
+assertEquals(97, __f_41());
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403323.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403323.js
new file mode 100644
index 0000000000..cc18697092
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403323.js
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function foo(a) {
+ if (a.length > 0) {}
+}
+%PrepareFunctionForOptimization(foo);
+foo(false);
+foo(false);
+foo(4);
+%OptimizeMaglevOnNextCall(foo);
+assertThrows(foo);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403399.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403399.js
new file mode 100644
index 0000000000..b13387771c
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403399.js
@@ -0,0 +1,36 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --maglev --allow-natives-syntax
+
+function __f_0() {
+ for (let __v_3 = 0; __v_3 < 52; ++__v_3) {
+ let __v_4 = __v_3 | 0;
+ switch (__v_4) {
+ case 28:
+ if (__v_3 != null && typeof __v_3 == "object") {
+ try {
+ Object.defineProperty( {
+ get: function () {
+ ({get: function () {
+ return __v_4;
+ }})
+ }
+ });
+ } catch (e) {}
+ }
+ case 29:
+ case 31:
+ case 32:
+ case 33:
+ __v_4 += 1;
+
+ case 34:
+ }
+ }
+}
+%PrepareFunctionForOptimization(__f_0);
+__f_0();
+%OptimizeMaglevOnNextCall(__f_0);
+__f_0();
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403470.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403470.js
new file mode 100644
index 0000000000..65ac15c584
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403470.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function __f_0( __v_18) {
+ -1 % 11;
+ return -1 % 11;
+}
+%PrepareFunctionForOptimization(__f_0);
+assertEquals(-1, __f_0());
+%OptimizeMaglevOnNextCall(__f_0);
+assertEquals(-1, __f_0());
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403749.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403749.js
new file mode 100644
index 0000000000..726cba89e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1403749.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function f(x) {
+ let c = x | -6;
+ switch (c) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case -5: return 3;
+ }
+ return 0;
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(0, f(-2147483648));
+assertEquals(3, f(-2127484783));
+%OptimizeMaglevOnNextCall(f);
+assertEquals(0, f(-2147483648));
+assertEquals(3, f(-2127484783));
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1405445.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1405445.js
new file mode 100644
index 0000000000..9ce382db38
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1405445.js
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function foo() {}
+function bar(...args) {
+ foo.apply(...args);
+}
+
+%PrepareFunctionForOptimization(bar);
+assertThrows('bar(2,3,4)');
+assertThrows('bar(2,3,4)');
+%OptimizeMaglevOnNextCall(bar);
+assertThrows('bar(2,3,4)');
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416693.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416693.js
new file mode 100644
index 0000000000..7e87f8c1f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416693.js
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function f() {
+ let i = 0;
+ let j = 0;
+
+ while (i < 1) {
+ j = i;
+ i++;
+ }
+
+ return j;
+}
+
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeMaglevOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416795.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416795.js
new file mode 100644
index 0000000000..a52ecaa5eb
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1416795.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function f(x) {
+ x++; // Making sure that {x} has a Float64 alternative
+ let phi = x ? 0 : x; // Creating a Phi whose inputs are Smi and Float64
+ const obj = { "d" : phi }; // Storing the Phi in a Smi field, which will
+ // insert a CheckSmi
+ --phi; // Using the Smi as a Int32, which will insert a UnsafeSmiUntag
+}
+
+%PrepareFunctionForOptimization(f);
+f(1.5);
+%OptimizeMaglevOnNextCall(f);
+f(1.5);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1417386.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1417386.js
new file mode 100644
index 0000000000..8077a66bbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1417386.js
@@ -0,0 +1,27 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function f(x) {
+ x++; // Making sure {x} has a Float64 alternative
+ let phi = x || 0; // Creating a Phi whose inputs are Smi and Float64. Phi
+ // untagging will make this a Float64 phi.
+ phi + 1; // Float64 operation with {phi}, which will cause insert a
+ // CheckedNumberToFloat64(phi). After phi untagging, the
+ // CheckedNumberToFloat64 will be killed, resulting in the addition
+ // to take {phi} directly as input.
+ phi << 4; // Int operation with {phi}, which will produce a
+ // TruncateNumberToInt32(phi). TruncateNumberToInt32 doesn't check
+ // if the input is a Number, and call into runtime if necessary to
+ // to the truncation, so it can't deopt. After Phi untagging, this
+ // should be replaced by a Float64->Int32 truncation
+ // (TruncateFloat64ToInt32), which shouldn't deopt either (because
+ // we can't replace a non-deopting node by a deopting one).
+}
+
+%PrepareFunctionForOptimization(f);
+f(1.5);
+%OptimizeMaglevOnNextCall(f);
+f(1.5);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421237.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421237.js
new file mode 100644
index 0000000000..6e06c80e5d
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421237.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function f() {
+ for (let v6 = 0; v6 < 10; v6++) {
+ ~v6;
+ let obj = { enumerable: true, value: v6 };
+ Object.defineProperty(Float64Array, 2, obj);
+ v6 %= v6;
+ }
+}
+
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeMaglevOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421375.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421375.js
new file mode 100644
index 0000000000..271ef30624
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421375.js
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function f(a) {
+ let v = 3.5 / a;
+ let x = 4.5 + v;
+ if (x) {
+ return 42;
+ } else {
+ return 12;
+ }
+}
+
+%PrepareFunctionForOptimization(f);
+assertEquals(f(), 12);
+%OptimizeMaglevOnNextCall(f);
+assertEquals(f(), 12);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421712.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421712.js
new file mode 100644
index 0000000000..b647d5f4b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1421712.js
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function f0(a) {
+ while (a > 0) {
+ [115,109,-1,96,127,5,,1,9223372036854775806,17,2,0,103];
+ const v35 = Math.round(-1e-15);
+ Math[v35];
+ a = 0;
+ }
+ return f0;
+}
+
+%PrepareFunctionForOptimization(f0);
+f0(1);
+%OptimizeMaglevOnNextCall(f0);
+f0(1);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1422864.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1422864.js
new file mode 100644
index 0000000000..d1bbfdca8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1422864.js
@@ -0,0 +1,32 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+function f(a) {
+ let not_a_smi = a ^ 1073741824; // Greater than Smi::kMaxValue (and cannot be
+ // a Constant, because we don't untag
+ // Phi(Constant,Constant)).
+ let phi = a ? not_a_smi : 4; // During feedback collection, this is a heap
+ // number, but Phi untagging will decide that it
+ // should be a Int32 phi.
+ let truncated = phi | 0; // Will insert a CheckedTruncateNumberToInt32
+ // conversion, which will become an Idendity after
+ // phi untagging, but is an input to the following
+ // deopt state, which should thus be updated.
+ 10 * "a"; // can lazy deopt (an operation that can eager deopt could cause a
+ // similar bug, but it's a bit harder to set up the repro, because
+ // the deopt state used for lazy deopts is the "current" one,
+ // whereas eager deopt can use an earlier state as long as there is
+ // no side-effects between the state and the current operation. Here
+ // for instance, replacing `10 * "a"` by `10000000 * a` (which can
+ // eager deopts) doesn't reproduce the bug, because an earlier deopt
+ // state is used, which doesn't contain `truncated`).
+ return truncated; // uses `truncated` so that it's part of the lazy deopt state above
+}
+
+%PrepareFunctionForOptimization(f);
+f(1);
+%OptimizeMaglevOnNextCall(f);
+f(1);
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423580.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423580.js
new file mode 100644
index 0000000000..e128528dc9
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423580.js
@@ -0,0 +1,45 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function f_StoreTaggedFieldWithWriteBarrier() {
+ try {
+ function f4() {
+ }
+ "k"["k"]();
+ } catch(e8) {
+ e8.c = f_StoreTaggedFieldWithWriteBarrier;
+ for (let v11 = 1261610744; v11 < 1261610744; v11 = v11 + 3) {
+ // This code is unreachable. However, due to feedback sharing, this store
+ // will have an initialized feedback slot: it shares its slot with
+ // `e8.c = f0` despite the 2 having different receivers, because both receivers
+ // have index 2: `e8` in the context, and `v11` in the local.
+ v11.c = e8;
+ }
+ const t11 = 3;
+ }
+}
+
+%PrepareFunctionForOptimization(f_StoreTaggedFieldWithWriteBarrier);
+f_StoreTaggedFieldWithWriteBarrier();
+%OptimizeMaglevOnNextCall(f_StoreTaggedFieldWithWriteBarrier);
+f_StoreTaggedFieldWithWriteBarrier();
+
+
+function f_CheckedStoreSmiField(obj) {
+ obj.c = 42;
+ for (let v11 = 3.5; v11 < 3.5; v11 = v11 + 3) {
+ // This code is unreachable. However, due to feedback sharing, this store
+ // will have an initialized feedback slot: it shares its slot with
+ // `obj.c = f0` despite the 2 having different receivers, because they both
+ // have index 0: `obj` in the parameters, and `v11` in the locals.
+ v11.c = 4;
+ }
+}
+
+%PrepareFunctionForOptimization(f_CheckedStoreSmiField);
+f_CheckedStoreSmiField({c:42});
+%OptimizeMaglevOnNextCall(f_CheckedStoreSmiField);
+f_CheckedStoreSmiField({c:42});
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423610.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423610.js
new file mode 100644
index 0000000000..f43eead340
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1423610.js
@@ -0,0 +1,32 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax --expose-gc
+
+function f(a) {
+ let phi = a ? 0 : 4.2; // Phi untagging will untag this to a Float64
+ phi |= 0; // Causing a CheckedSmiUntag to be inserted
+ a.c = phi; // The graph builder will insert a StoreTaggedFieldNoWriteBarrier
+ // because `phi` is a Smi. Afterphi untagging, this should become a
+ // StoreTaggedFieldWithWriteBarrier, because `phi` is now a float.
+}
+
+// Allocating an object and making it old (its `c` field should be neither a Smi
+// nor a Double, so that the graph builder inserts a StoreTaggedFieldxxx rather
+// than a StoreDoubleField or CheckedStoreSmiField).
+let obj = {c:"a"};
+gc();
+gc();
+
+%PrepareFunctionForOptimization(f);
+f(obj);
+
+%OptimizeMaglevOnNextCall(f);
+// This call to `f` will store a young object into that `c` field of `obj`. This
+// should be done with a write barrier.
+f(obj);
+
+// If the write barrier was dropped, the GC will complain because it will see an
+// old->new pointer without remembered set entry.
+gc();
diff --git a/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1425124.js b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1425124.js
new file mode 100644
index 0000000000..0a26991f2f
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/regress/regress-crbug-1425124.js
@@ -0,0 +1,21 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+function f() {
+ let v8 = 0;
+ let v9 = -1024;
+ for (let i = 0; i < 5; i++) {
+ const v23 = v8 - 55598 | 0;
+ const v24 = v8 + 2147483647;
+ v8 = v24 | f;
+ v9 = ((v9 & v24) && v23) | 0;
+ }
+}
+
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeMaglevOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/maglev/resumable-loop-context.js b/deps/v8/test/mjsunit/maglev/resumable-loop-context.js
new file mode 100644
index 0000000000..6490f2bc5f
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/resumable-loop-context.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --maglev
+
+async function __f_20() {
+ try {
+ for (let __v_58 = 0; __v_58 < 8; __v_58++) {
+ }
+ } catch (e) {}
+ for (let __v_59 = 1; __v_59 < 1337; __v_59++) {
+ function* __f_21() {}
+ const __v_66 = __f_21();
+ const __v_69 = await "-4294967296";
+ }
+}
+
+__f_20();
diff --git a/deps/v8/test/mjsunit/maglev/resumable.js b/deps/v8/test/mjsunit/maglev/resumable.js
new file mode 100644
index 0000000000..5a32afddb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/resumable.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+(function () {
+ async function foo() {
+ for (let i = 0; i == 1; i++) {
+ 780109, { get: function() { } }; await 7;
+ }
+ }
+ %PrepareFunctionForOptimization(foo);
+ foo();
+ %OptimizeMaglevOnNextCall(foo);
+ foo();
+})();
diff --git a/deps/v8/test/mjsunit/maglev/shift-right-logical-smi.js b/deps/v8/test/mjsunit/maglev/shift-right-logical-smi.js
index b24121b08d..6077d3c4ac 100644
--- a/deps/v8/test/mjsunit/maglev/shift-right-logical-smi.js
+++ b/deps/v8/test/mjsunit/maglev/shift-right-logical-smi.js
@@ -39,10 +39,10 @@ function shrl_test_expect_deopt(lhs, rhs, expected_result) {
}
shrl_test(8, 2, 2);
-shrl_test_expect_deopt(-1, 1, 2147483647);
+shrl_test(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
-shrl_test_expect_deopt(-8, 0, 4294967288);
-shrl_test_expect_deopt(-892396978, 0, 3402570318);
+shrl_test(-8, 0, 4294967288);
+shrl_test(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);
diff --git a/deps/v8/test/mjsunit/maglev/shift-right-logical.js b/deps/v8/test/mjsunit/maglev/shift-right-logical.js
index 1b30e21ec8..164fd0d057 100644
--- a/deps/v8/test/mjsunit/maglev/shift-right-logical.js
+++ b/deps/v8/test/mjsunit/maglev/shift-right-logical.js
@@ -35,10 +35,10 @@ function shrl_test_expect_deopt(lhs, rhs, expected_result) {
}
shrl_test(8, 2, 2);
-shrl_test_expect_deopt(-1, 1, 2147483647);
+shrl_test(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
-shrl_test_expect_deopt(-8, 0, 4294967288);
-shrl_test_expect_deopt(-892396978, 0, 3402570318);
+shrl_test(-8, 0, 4294967288);
+shrl_test(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);
diff --git a/deps/v8/test/mjsunit/maglev/store-oddball-to-double-elements.js b/deps/v8/test/mjsunit/maglev/store-oddball-to-double-elements.js
new file mode 100644
index 0000000000..f31d18b39b
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/store-oddball-to-double-elements.js
@@ -0,0 +1,28 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --maglev --allow-natives-syntax
+
+let o = [0.5]
+
+function foo(x, store) {
+ x + 0.5; // Give x a float64_alternative
+ if (store) {
+ o[0] = x;
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+// Warm up the add with NumberOrOddball feedback, but keep the store as
+// a double store.
+foo(2.3, true);
+foo(undefined, false);
+
+%OptimizeMaglevOnNextCall(foo);
+// Storing a number should work.
+foo(2.3, true);
+assertEquals(2.3, o[0]);
+// Storing an oddball should work and not store the ToNumber of that oddball.
+foo(undefined, true);
+assertEquals(undefined, o[0]);
diff --git a/deps/v8/test/mjsunit/maglev/typedarray-out-of-bounds.js b/deps/v8/test/mjsunit/maglev/typedarray-out-of-bounds.js
new file mode 100644
index 0000000000..28c8e1c5df
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/typedarray-out-of-bounds.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+function foo(index) {
+ const array = new Uint8Array(8);
+ array[index] = 42;
+ return array;
+}
+
+%PrepareFunctionForOptimization(foo);
+
+foo(0);
+foo(0);
+
+%OptimizeMaglevOnNextCall(foo);
+
+foo(0);
+
+// This call is one element past the end and should deoptimize.
+foo(8);
diff --git a/deps/v8/test/mjsunit/maglev/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/maglev/typedarray-resizablearraybuffer.js
new file mode 100644
index 0000000000..dc299cc216
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/typedarray-resizablearraybuffer.js
@@ -0,0 +1,815 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab --allow-natives-syntax --maglev
+// Flags: --no-always-turbofan --turbo-rab-gsab
+
+"use strict";
+
+d8.file.execute('test/mjsunit/typedarray-helpers.js');
+
+const is_little_endian = (() => {
+ var buffer = new ArrayBuffer(4);
+ const HEAP32 = new Int32Array(buffer);
+ const HEAPU8 = new Uint8Array(buffer);
+ HEAP32[0] = 255;
+ return (HEAPU8[0] === 255 && HEAPU8[3] === 0);
+})();
+
+function FillBuffer(buffer) {
+ const view = new Uint8Array(buffer);
+ for (let i = 0; i < view.length; ++i) {
+ view[i] = i;
+ }
+}
+%NeverOptimizeFunction(FillBuffer);
+
+function asU16(index) {
+ const start = index * 2;
+ if (is_little_endian) {
+ return (start + 1) * 256 + start;
+ } else {
+ return start * 256 + start + 1;
+ }
+}
+%NeverOptimizeFunction(asU16);
+
+function asU32(index) {
+ const start = index * 4;
+ if (is_little_endian) {
+ return (((start + 3) * 256 + start + 2) * 256 + start + 1) * 256 + start;
+ } else {
+ return ((((start * 256) + start + 1) * 256) + start + 2) * 256 + start + 3;
+ }
+}
+%NeverOptimizeFunction(asU32);
+
+function asF32(index) {
+ const start = index * 4;
+ const ab = new ArrayBuffer(4);
+ const ta = new Uint8Array(ab);
+ for (let i = 0; i < 4; ++i) ta[i] = start + i;
+ return new Float32Array(ab)[0];
+}
+%NeverOptimizeFunction(asF32);
+
+function asF64(index) {
+ const start = index * 8;
+ const ab = new ArrayBuffer(8);
+ const ta = new Uint8Array(ab);
+ for (let i = 0; i < 8; ++i) ta[i] = start + i;
+ return new Float64Array(ab)[0];
+}
+%NeverOptimizeFunction(asF64);
+
+function asB64(index) {
+ const start = index * 8;
+ let result = 0n;
+ if (is_little_endian) {
+ for (let i = 0; i < 8; ++i) {
+ result = result << 8n;
+ result += BigInt(start + 7 - i);
+ }
+ } else {
+ for (let i = 0; i < 8; ++i) {
+ result = result << 8n;
+ result += BigInt(start + i);
+ }
+ }
+ return result;
+}
+%NeverOptimizeFunction(asB64);
+
+function CreateBuffer(shared, len, max_len) {
+ return shared ? new SharedArrayBuffer(len, {maxByteLength: max_len}) :
+ new ArrayBuffer(len, {maxByteLength: max_len});
+}
+%NeverOptimizeFunction(CreateBuffer);
+
+function MakeResize(target, shared, offset, fixed_len) {
+ const bpe = target.name === 'DataView' ? 1 : target.BYTES_PER_ELEMENT;
+ function RoundDownToElementSize(blen) {
+ return Math.floor(blen / bpe) * bpe;
+ }
+ if (!shared) {
+ if (fixed_len === undefined) {
+ return (b, len) => {
+ b.resize(len);
+ const blen = Math.max(0, len - offset);
+ return RoundDownToElementSize(blen);
+ };
+ } else {
+ const fixed_blen = fixed_len * bpe;
+ return (b, len) => {
+ b.resize(len);
+ const blen = fixed_blen <= (len - offset) ? fixed_blen : 0;
+ return RoundDownToElementSize(blen);
+ }
+ }
+ } else {
+ if (fixed_len === undefined) {
+ return (b, len) => {
+ let blen = 0;
+ if (len > b.byteLength) {
+ b.grow(len);
+ blen = Math.max(0, len - offset);
+ } else {
+ blen = b.byteLength - offset;
+ }
+ return RoundDownToElementSize(blen);
+ };
+ } else {
+ return (b, len) => {
+ if (len > b.byteLength) {
+ b.grow(len);
+ }
+ return fixed_len * bpe;
+ };
+ }
+ }
+}
+%NeverOptimizeFunction(MakeResize);
+
+function MakeElement(target, offset) {
+ const o = offset / target.BYTES_PER_ELEMENT;
+ if (target.name === 'Int8Array') {
+ return (index) => {
+ return o + index;
+ };
+ } else if (target.name === 'Uint32Array') {
+ return (index) => {
+ return asU32(o + index);
+ };
+ } else if (target.name === 'Float64Array') {
+ return (index) => {
+ return asF64(o + index);
+ };
+ } else if (target.name === 'BigInt64Array') {
+ return (index) => {
+ return asB64(o + index);
+ };
+ } else {
+ console.log(`unimplemented: MakeElement(${target.name})`);
+ return () => undefined;
+ }
+}
+%NeverOptimizeFunction(MakeElement);
+
+function MakeCheckBuffer(target, offset) {
+ return (ab, up_to) => {
+ const view = new Uint8Array(ab);
+ for (let i = 0; i < offset; ++i) {
+ assertEquals(0, view[i]);
+ }
+ for (let i = 0; i < (up_to * target.BYTES_PER_ELEMENT) + 1; ++i) {
+ // Use PrintBuffer(ab) for debugging.
+ assertEquals(offset + i, view[offset + i]);
+ }
+ }
+}
+%NeverOptimizeFunction(MakeCheckBuffer);
+
+function ClearBuffer(ab) {
+ for (let i = 0; i < ab.byteLength; ++i) ab[i] = 0;
+}
+%NeverOptimizeFunction(ClearBuffer);
+
+// Use this for debugging these tests.
+function PrintBuffer(buffer) {
+ const view = new Uint8Array(buffer);
+ for (let i = 0; i < 32; ++i) {
+ console.log(`[${i}]: ${view[i]}`)
+ }
+}
+%NeverOptimizeFunction(PrintBuffer);
+
+(function() {
+for (let shared of [false, true]) {
+ for (let length_tracking of [false, true]) {
+ for (let with_offset of [false, true]) {
+ for (let target
+ of [Int8Array, Uint32Array, Float64Array, BigInt64Array]) {
+ const test_case = `Testing: Length_${shared ? 'GSAB' : 'RAB'}_${
+ length_tracking ? 'LengthTracking' : 'FixedLength'}${
+ with_offset ? 'WithOffset' : ''}_${target.name}`;
+ // console.log(test_case);
+
+ const byte_length_code = 'return ta.byteLength; // ' + test_case;
+ const ByteLength = new Function('ta', byte_length_code);
+ const length_code = 'return ta.length; // ' + test_case;
+ const Length = new Function('ta', length_code);
+ const offset = with_offset ? 8 : 0;
+
+ let blen = 16 - offset;
+ const fixed_len =
+ length_tracking ? undefined : (blen / target.BYTES_PER_ELEMENT);
+ const ab = CreateBuffer(shared, 16, 40);
+ const ta = new target(ab, offset, fixed_len);
+ const Resize = MakeResize(target, shared, offset, fixed_len);
+
+ assertUnoptimized(ByteLength);
+ assertUnoptimized(Length);
+ %PrepareFunctionForOptimization(ByteLength);
+ %PrepareFunctionForOptimization(Length);
+ assertEquals(blen, ByteLength(ta));
+ assertEquals(blen, ByteLength(ta));
+ assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
+ assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
+ %OptimizeMaglevOnNextCall(ByteLength);
+ %OptimizeMaglevOnNextCall(Length);
+ assertEquals(blen, ByteLength(ta));
+ assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
+ blen = Resize(ab, 32);
+ assertEquals(blen, ByteLength(ta));
+ assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
+ blen = Resize(ab, 9);
+ assertEquals(blen, ByteLength(ta));
+ assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
+ assertOptimized(ByteLength);
+ assertOptimized(Length);
+ blen = Resize(ab, 24);
+ assertEquals(blen, ByteLength(ta));
+ assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
+ assertOptimized(ByteLength);
+ assertOptimized(Length);
+
+ if (!shared) {
+ %ArrayBufferDetach(ab);
+ assertEquals(0, ByteLength(ta));
+ assertEquals(0, Length(ta));
+ assertOptimized(Length);
+ }
+ }
+ }
+ }
+}
+})();
+
+(function() {
+for (let shared of [false, true]) {
+ for (let length_tracking of [false, true]) {
+ for (let with_offset of [false, true]) {
+ for (let target
+ of [Int8Array, Uint32Array, Float64Array, BigInt64Array]) {
+ const test_case = `Testing: Read_${shared ? 'GSAB' : 'RAB'}_${
+ length_tracking ? 'LengthTracking' : 'FixedLength'}${
+ with_offset ? 'WithOffset' : ''}_${target.name}`;
+ // console.log(test_case);
+
+ const read_code = 'return ta[index]; // ' + test_case;
+ const Read = new Function('ta', 'index', read_code);
+ const offset = with_offset ? 8 : 0;
+
+ let blen = 16 - offset;
+ let len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ const fixed_len = length_tracking ? undefined : len;
+ const ab = CreateBuffer(shared, 16, 40);
+ const ta = new target(ab, offset, fixed_len);
+ const Resize = MakeResize(target, shared, offset, fixed_len);
+ const Element = MakeElement(target, offset);
+ FillBuffer(ab);
+
+ assertUnoptimized(Read);
+ %PrepareFunctionForOptimization(Read);
+ for (let i = 0; i < len * 2; ++i)
+ assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
+ %OptimizeMaglevOnNextCall(Read);
+ for (let i = 0; i < len * 2; ++i)
+ assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
+ assertOptimized(Read);
+ blen = Resize(ab, 32);
+ FillBuffer(ab);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len * 2; ++i)
+ assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
+ assertOptimized(Read);
+ blen = Resize(ab, 9);
+ FillBuffer(ab);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len * 2; ++i)
+ assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
+ assertOptimized(Read);
+ blen = Resize(ab, 0);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len * 2; ++i)
+ assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
+ assertOptimized(Read);
+ blen = Resize(ab, 24);
+ FillBuffer(ab);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len * 2; ++i)
+ assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
+ assertOptimized(Read);
+
+ if (!shared) {
+ %ArrayBufferDetach(ab);
+ assertEquals(undefined, Read(ta, 0));
+ // assertOptimized(Read);
+ }
+ }
+ }
+ }
+}
+})();
+
+(function() {
+for (let shared of [false, true]) {
+ for (let length_tracking of [false, true]) {
+ for (let with_offset of [false, true]) {
+ for (let target
+ of [Int8Array, Uint32Array, Float64Array, BigInt64Array]) {
+ const test_case = `Testing: Write_${shared ? 'GSAB' : 'RAB'}_${
+ length_tracking ? 'LengthTracking' : 'FixedLength'}${
+ with_offset ? 'WithOffset' : ''}_${target.name}`;
+ // console.log(test_case);
+
+ const write_code = 'ta[index] = value; // ' + test_case;
+ const Write = new Function('ta', 'index', 'value', write_code);
+ const offset = with_offset ? 8 : 0;
+
+ let blen = 16 - offset;
+ let len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ const fixed_len = length_tracking ? undefined : len;
+ const ab = CreateBuffer(shared, 16, 40);
+ const ta = new target(ab, offset, fixed_len);
+ const Resize = MakeResize(target, shared, offset, fixed_len);
+ const Element = MakeElement(target, offset);
+ const CheckBuffer = MakeCheckBuffer(target, offset);
+ ClearBuffer(ab);
+
+ assertUnoptimized(Write);
+ %PrepareFunctionForOptimization(Write);
+ for (let i = 0; i < len; ++i) {
+ Write(ta, i, Element(i));
+ CheckBuffer(ab, i);
+ }
+ ClearBuffer(ab);
+ %OptimizeMaglevOnNextCall(Write);
+ for (let i = 0; i < len; ++i) {
+ Write(ta, i, Element(i));
+ CheckBuffer(ab, i);
+ }
+ assertOptimized(Write);
+ blen = Resize(ab, 32);
+ ClearBuffer(ab);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len; ++i) {
+ Write(ta, i, Element(i));
+ CheckBuffer(ab, i);
+ }
+ assertOptimized(Write);
+ blen = Resize(ab, 9);
+ ClearBuffer(ab);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len; ++i) {
+ Write(ta, i, Element(i));
+ CheckBuffer(ab, i);
+ }
+ assertOptimized(Write);
+ blen = Resize(ab, 24);
+ ClearBuffer(ab);
+ len = Math.floor(blen / target.BYTES_PER_ELEMENT);
+ for (let i = 0; i < len; ++i) {
+ Write(ta, i, Element(i));
+ CheckBuffer(ab, i);
+ }
+ assertOptimized(Write);
+ }
+ }
+ }
+}
+})();
+
+(function() {
+for (let shared of [false, true]) {
+ for (let length_tracking of [false, true]) {
+ for (let with_offset of [false, true]) {
+ const test_case = `Testing: ByteLength_${shared ? 'GSAB' : 'RAB'}_${
+ length_tracking ?
+ 'LengthTracking' :
+ 'FixedLength'}${with_offset ? 'WithOffset' : ''}_DataView`;
+ // console.log(test_case);
+
+ const byte_length_code = 'return dv.byteLength; // ' + test_case;
+ const ByteLength = new Function('dv', byte_length_code);
+ const offset = with_offset ? 8 : 0;
+
+ let blen = 16 - offset;
+ const fixed_blen = length_tracking ? undefined : blen;
+ const ab = CreateBuffer(shared, 16, 40);
+ const dv = new DataView(ab, offset, fixed_blen);
+ const Resize = MakeResize(DataView, shared, offset, fixed_blen);
+
+ assertUnoptimized(ByteLength);
+ %PrepareFunctionForOptimization(ByteLength);
+ assertEquals(blen, ByteLength(dv));
+ assertEquals(blen, ByteLength(dv));
+ %OptimizeMaglevOnNextCall(ByteLength);
+ assertEquals(blen, ByteLength(dv));
+ assertOptimized(ByteLength);
+ blen = Resize(ab, 32);
+ assertEquals(blen, ByteLength(dv));
+ assertOptimized(ByteLength);
+ blen = Resize(ab, 9);
+ if (length_tracking || shared) {
+ assertEquals(blen, ByteLength(dv));
+ } else {
+ // For fixed length rabs, Resize(ab, 9) will put the ArrayBuffer in
+ // detached state, for which DataView.prototype.byteLength has to throw.
+ assertThrows(() => { ByteLength(dv); }, TypeError);
+ }
+ assertOptimized(ByteLength);
+ blen = Resize(ab, 24);
+ assertEquals(blen, ByteLength(dv));
+ assertOptimized(ByteLength);
+
+ if (!shared) {
+ %ArrayBufferDetach(ab);
+ assertThrows(() => { ByteLength(dv); }, TypeError);
+ assertOptimized(ByteLength);
+ }
+ }
+ }
+}
+})();
+
+(function() {
+function ByteLength_RAB_LengthTrackingWithOffset_DataView(dv) {
+ return dv.byteLength;
+}
+const ByteLength = ByteLength_RAB_LengthTrackingWithOffset_DataView;
+
+const rab = CreateResizableArrayBuffer(16, 40);
+const dv = new DataView(rab, 7);
+
+%PrepareFunctionForOptimization(ByteLength);
+assertEquals(9, ByteLength(dv));
+assertEquals(9, ByteLength(dv));
+%OptimizeMaglevOnNextCall(ByteLength);
+assertEquals(9, ByteLength(dv));
+assertOptimized(ByteLength);
+})();
+
+(function() {
+function Read_TA_RAB_LengthTracking_Mixed(ta, index) {
+ return ta[index];
+}
+const Get = Read_TA_RAB_LengthTracking_Mixed;
+
+const ab = new ArrayBuffer(16);
+FillBuffer(ab);
+const rab = CreateResizableArrayBuffer(16, 40);
+FillBuffer(rab);
+let ta_int8 = new Int8Array(ab);
+let ta_uint16 = new Uint16Array(rab);
+let ta_float32 = new Float32Array(ab);
+let ta_float64 = new Float64Array(rab);
+
+// Train with feedback for all elements kinds.
+%PrepareFunctionForOptimization(Get);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(asU16(7), Get(ta_uint16, 7));
+assertEquals(undefined, Get(ta_uint16, 8));
+assertEquals(undefined, Get(ta_uint16, 12));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(asF64(1), Get(ta_float64, 1));
+assertEquals(undefined, Get(ta_float64, 2));
+assertEquals(undefined, Get(ta_float64, 12));
+%OptimizeMaglevOnNextCall(Get);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(asU16(7), Get(ta_uint16, 7));
+assertEquals(undefined, Get(ta_uint16, 8));
+assertEquals(undefined, Get(ta_uint16, 12));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(asF64(1), Get(ta_float64, 1));
+assertEquals(undefined, Get(ta_float64, 2));
+assertEquals(undefined, Get(ta_float64, 12));
+assertOptimized(Get);
+rab.resize(32);
+FillBuffer(rab);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(asU16(15), Get(ta_uint16, 15));
+assertEquals(undefined, Get(ta_uint16, 16));
+assertEquals(undefined, Get(ta_uint16, 40));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(asF64(1), Get(ta_float64, 1));
+assertEquals(asF64(3), Get(ta_float64, 3));
+assertEquals(undefined, Get(ta_float64, 4));
+assertEquals(undefined, Get(ta_float64, 12));
+assertOptimized(Get);
+rab.resize(9);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(undefined, Get(ta_uint16, 4));
+assertEquals(undefined, Get(ta_uint16, 12));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(undefined, Get(ta_float64, 1));
+assertEquals(undefined, Get(ta_float64, 12));
+assertOptimized(Get);
+
+}());
+
+(function() {
+function Read_TA_RAB_LengthTracking_Mixed(ta, index) {
+ return ta[index];
+}
+const Get = Read_TA_RAB_LengthTracking_Mixed;
+
+const ab = new ArrayBuffer(16);
+FillBuffer(ab);
+const rab = CreateResizableArrayBuffer(16, 40);
+FillBuffer(rab);
+let ta_int8 = new Int8Array(ab);
+let ta_uint16 = new Uint16Array(rab);
+let ta_float32 = new Float32Array(ab);
+let ta_float64 = new Float64Array(rab);
+
+// Train with feedback for all elements kinds.
+%PrepareFunctionForOptimization(Get);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(asU16(7), Get(ta_uint16, 7));
+assertEquals(undefined, Get(ta_uint16, 8));
+assertEquals(undefined, Get(ta_uint16, 12));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(asF64(1), Get(ta_float64, 1));
+assertEquals(undefined, Get(ta_float64, 2));
+assertEquals(undefined, Get(ta_float64, 12));
+%OptimizeMaglevOnNextCall(Get);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(asU16(7), Get(ta_uint16, 7));
+assertEquals(undefined, Get(ta_uint16, 8));
+assertEquals(undefined, Get(ta_uint16, 12));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(asF64(1), Get(ta_float64, 1));
+assertEquals(undefined, Get(ta_float64, 2));
+assertEquals(undefined, Get(ta_float64, 12));
+assertOptimized(Get);
+rab.resize(32);
+FillBuffer(rab);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(asU16(15), Get(ta_uint16, 15));
+assertEquals(undefined, Get(ta_uint16, 16));
+assertEquals(undefined, Get(ta_uint16, 40));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(asF64(1), Get(ta_float64, 1));
+assertEquals(asF64(3), Get(ta_float64, 3));
+assertEquals(undefined, Get(ta_float64, 4));
+assertEquals(undefined, Get(ta_float64, 12));
+assertOptimized(Get);
+rab.resize(9);
+assertEquals(0, Get(ta_int8, 0));
+assertEquals(3, Get(ta_int8, 3));
+assertEquals(15, Get(ta_int8, 15));
+assertEquals(undefined, Get(ta_int8, 16));
+assertEquals(undefined, Get(ta_int8, 32));
+assertEquals(asU16(0), Get(ta_uint16, 0));
+assertEquals(asU16(3), Get(ta_uint16, 3));
+assertEquals(undefined, Get(ta_uint16, 4));
+assertEquals(undefined, Get(ta_uint16, 12));
+assertEquals(asF32(0), Get(ta_float32, 0));
+assertEquals(asF32(3), Get(ta_float32, 3));
+assertEquals(undefined, Get(ta_float32, 4));
+assertEquals(undefined, Get(ta_float32, 12));
+assertEquals(asF64(0), Get(ta_float64, 0));
+assertEquals(undefined, Get(ta_float64, 1));
+assertEquals(undefined, Get(ta_float64, 12));
+assertOptimized(Get);
+
+}());
+
+(function() {
+function Length_TA_RAB_LengthTracking_Mixed(ta) {
+ return ta.length;
+}
+let Length = Length_TA_RAB_LengthTracking_Mixed;
+
+const ab = new ArrayBuffer(32);
+const rab = CreateResizableArrayBuffer(16, 40);
+let ta_int8 = new Int8Array(ab);
+let ta_uint16 = new Uint16Array(rab);
+let ta_float32 = new Float32Array(ab);
+let ta_bigint64 = new BigInt64Array(rab);
+
+// Train with feedback for all elements kinds.
+%PrepareFunctionForOptimization(Length);
+assertEquals(32, Length(ta_int8));
+assertEquals(8, Length(ta_uint16));
+assertEquals(8, Length(ta_float32));
+assertEquals(2, Length(ta_bigint64));
+%OptimizeMaglevOnNextCall(Length);
+assertEquals(32, Length(ta_int8));
+assertEquals(8, Length(ta_uint16));
+assertEquals(8, Length(ta_float32));
+assertEquals(2, Length(ta_bigint64));
+assertOptimized(Length);
+}());
+
+(function() {
+function Length_RAB_GSAB_LengthTrackingWithOffset_Mixed(ta) {
+ return ta.length;
+}
+const Length = Length_RAB_GSAB_LengthTrackingWithOffset_Mixed;
+
+const rab = CreateResizableArrayBuffer(16, 40);
+let ta_int8 = new Int8Array(rab);
+let ta_float64 = new Float64Array(rab);
+
+// Train with feedback for Int8Array and Float64Array.
+%PrepareFunctionForOptimization(Length);
+assertEquals(16, Length(ta_int8));
+assertEquals(2, Length(ta_float64));
+%OptimizeMaglevOnNextCall(Length);
+assertEquals(16, Length(ta_int8));
+assertEquals(2, Length(ta_float64));
+assertOptimized(Length);
+
+let ta_uint32 = new Uint32Array(rab);
+let ta_bigint64 = new BigInt64Array(rab);
+// Calling with Uint32Array will deopt because of the map check on length.
+assertEquals(4, Length(ta_uint32));
+assertUnoptimized(Length);
+%PrepareFunctionForOptimization(Length);
+assertEquals(2, Length(ta_bigint64));
+// Recompile with additional feedback for Uint32Array and BigInt64Array.
+%OptimizeMaglevOnNextCall(Length);
+assertEquals(2, Length(ta_bigint64));
+assertOptimized(Length);
+
+// Length handles all four TypedArrays without deopting.
+assertEquals(16, Length(ta_int8));
+assertEquals(2, Length(ta_float64));
+assertEquals(4, Length(ta_uint32));
+assertEquals(2, Length(ta_bigint64));
+assertOptimized(Length);
+
+// Length handles corresponding gsab-backed TypedArrays without deopting.
+const gsab = CreateGrowableSharedArrayBuffer(16, 40);
+let ta2_uint32 = new Uint32Array(gsab, 8);
+let ta2_float64 = new Float64Array(gsab, 8);
+let ta2_bigint64 = new BigInt64Array(gsab, 8);
+let ta2_int8 = new Int8Array(gsab, 8);
+assertEquals(8, Length(ta2_int8));
+assertEquals(1, Length(ta2_float64));
+assertEquals(2, Length(ta2_uint32));
+assertEquals(1, Length(ta2_bigint64));
+assertOptimized(Length);
+
+// Test Length after rab has been resized to a smaller size.
+rab.resize(5);
+assertEquals(5, Length(ta_int8));
+assertEquals(0, Length(ta_float64));
+assertEquals(1, Length(ta_uint32));
+assertEquals(0, Length(ta_bigint64));
+assertOptimized(Length);
+
+// Test Length after rab has been resized to a larger size.
+rab.resize(40);
+assertEquals(40, Length(ta_int8));
+assertEquals(5, Length(ta_float64));
+assertEquals(10, Length(ta_uint32));
+assertEquals(5, Length(ta_bigint64));
+assertOptimized(Length);
+
+// Test Length after gsab has been grown to a larger size.
+gsab.grow(25);
+assertEquals(17, Length(ta2_int8));
+assertEquals(2, Length(ta2_float64));
+assertEquals(4, Length(ta2_uint32));
+assertEquals(2, Length(ta2_bigint64));
+assertOptimized(Length);
+})();
+
+(function() {
+function Length_AB_RAB_GSAB_LengthTrackingWithOffset_Mixed(ta) {
+ return ta.length;
+}
+const Length = Length_AB_RAB_GSAB_LengthTrackingWithOffset_Mixed;
+
+let ab = new ArrayBuffer(32);
+let rab = CreateResizableArrayBuffer(16, 40);
+let gsab = CreateGrowableSharedArrayBuffer(16, 40);
+
+let ta_ab_int32 = new Int32Array(ab, 8, 3);
+let ta_rab_int32 = new Int32Array(rab, 4);
+let ta_gsab_float64 = new Float64Array(gsab);
+let ta_gsab_bigint64 = new BigInt64Array(gsab, 0, 2);
+
+// Optimize Length with polymorphic feedback.
+%PrepareFunctionForOptimization(Length);
+assertEquals(3, Length(ta_ab_int32));
+assertEquals(3, Length(ta_rab_int32));
+assertEquals(2, Length(ta_gsab_float64));
+assertEquals(2, Length(ta_gsab_bigint64));
+%OptimizeMaglevOnNextCall(Length);
+assertEquals(3, Length(ta_ab_int32));
+assertEquals(3, Length(ta_rab_int32));
+assertEquals(2, Length(ta_gsab_float64));
+assertEquals(2, Length(ta_gsab_bigint64));
+assertOptimized(Length);
+
+// Test resizing and growing the underlying rab/gsab buffers.
+rab.resize(8);
+gsab.grow(36);
+assertEquals(3, Length(ta_ab_int32));
+assertEquals(1, Length(ta_rab_int32));
+assertEquals(4, Length(ta_gsab_float64));
+assertEquals(2, Length(ta_gsab_bigint64));
+assertOptimized(Length);
+
+// Construct additional TypedArrays with the same ElementsKind.
+let ta2_ab_bigint64 = new BigInt64Array(ab, 0, 1);
+let ta2_gsab_int32 = new Int32Array(gsab, 16);
+let ta2_rab_float64 = new Float64Array(rab, 8);
+let ta2_rab_int32 = new Int32Array(rab, 0, 1);
+assertEquals(1, Length(ta2_ab_bigint64));
+assertEquals(5, Length(ta2_gsab_int32));
+assertEquals(0, Length(ta2_rab_float64));
+assertEquals(1, Length(ta2_rab_int32));
+assertOptimized(Length);
+})();
+
+(function() {
+function ByteOffset(ta) {
+ return ta.byteOffset;
+}
+
+const rab = CreateResizableArrayBuffer(16, 40);
+const ta = new Int32Array(rab, 4);
+
+%PrepareFunctionForOptimization(ByteOffset);
+assertEquals(4, ByteOffset(ta));
+assertEquals(4, ByteOffset(ta));
+%OptimizeMaglevOnNextCall(ByteOffset);
+assertEquals(4, ByteOffset(ta));
+assertOptimized(ByteOffset);
+})();
diff --git a/deps/v8/test/mjsunit/maglev/unused-checkedsmitag.js b/deps/v8/test/mjsunit/maglev/unused-checkedsmitag.js
new file mode 100644
index 0000000000..0726fdd283
--- /dev/null
+++ b/deps/v8/test/mjsunit/maglev/unused-checkedsmitag.js
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --maglev
+
+
+function f(x) {
+ return x | 0;
+}
+
+%PrepareFunctionForOptimization(f);
+f(42);
+%OptimizeMaglevOnNextCall(f);
+f(42);
+
+// `f` only has Smi feedback for `x | 0`, and should thus generate a
+// `CheckedSmiTag` to make sure that `x` is indeed a Smi, but `| 0` being the
+// identity, it won't be emitted, and instead, `x` will be returned. Thus, the
+// `CheckedSmiTag` is unused, but still shouldn't be optimized out.
+
+assertEquals(4, f(4.5));
diff --git a/deps/v8/test/mjsunit/mjsunit-assert-equals.js b/deps/v8/test/mjsunit/mjsunit-assert-equals.js
index 08154827d5..4f686df9fe 100644
--- a/deps/v8/test/mjsunit/mjsunit-assert-equals.js
+++ b/deps/v8/test/mjsunit/mjsunit-assert-equals.js
@@ -70,6 +70,8 @@ function testAssertEquals(a, b) {
testAssertEquals(new Array(1), new Array(1));
testAssertNotEquals(new Array(1), new Array(2));
testAssertEquals([,,], new Array(2));
+ // The difference between empty and undefined is not ignored.
+ testAssertNotEquals([undefined], new Array(1));
})();
(function TestAssertEqualsArraysNested() {
@@ -80,9 +82,7 @@ function testAssertEquals(a, b) {
})();
(function TestAssertEqualsArrayProperties() {
- // Difference between empty and undefined is ignored by the assert
- // implementation as well as additional properties.
- testAssertEquals([undefined], new Array(1));
+ // Array properties are ignored.
let arrWithProp = new Array();
arrWithProp.myProperty = 'Additional property';
testAssertEquals([], arrWithProp);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 8c791ddba6..c1a1359f7e 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -197,6 +197,8 @@ var V8OptimizationStatus = {
kTopmostFrameIsInterpreted: 1 << 16,
kTopmostFrameIsBaseline: 1 << 17,
kIsLazy: 1 << 18,
+ kTopmostFrameIsMaglev: 1 << 19,
+ kOptimizeOnNextCallOptimizesToMaglev: 1 << 20,
};
// Returns true if --lite-mode is on and we can't ever turn on optimization.
@@ -431,6 +433,7 @@ var prettyPrinted;
return false;
}
for (var i = 0; i < a.length; i++) {
+ if ((i in a) !== (i in b)) return false;
if (!deepEquals(a[i], b[i])) return false;
}
return true;
@@ -748,7 +751,16 @@ var prettyPrinted;
return;
}
var is_optimized = (opt_status & V8OptimizationStatus.kOptimized) !== 0;
- assertFalse(is_optimized, name_opt);
+ if (is_optimized && (opt_status & V8OptimizationStatus.kMaglevved) &&
+ (opt_status &
+ V8OptimizationStatus.kOptimizeOnNextCallOptimizesToMaglev)) {
+ // When --optimize-on-next-call-optimizes-to-maglev is used, we might emit
+ // more generic code than optimization tests expect. In such cases,
+ // assertUnoptimized may see optimized code, but we still want it to
+ // succeed and continue the test.
+ return;
+ }
+ assertFalse(is_optimized, 'should not be optimized: ' + name_opt);
}
assertOptimized = function assertOptimized(
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index ad001f4497..998d9671c4 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -39,7 +39,6 @@
'wasm/wasm-module-builder': [SKIP],
'compiler/fast-api-helpers': [SKIP],
'typedarray-helpers': [SKIP],
- 'web-snapshot/web-snapshot-helpers': [SKIP],
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
@@ -47,9 +46,6 @@
##############################################################################
# Open bugs.
- # BUG(v8:2989).
- 'regress/regress-2989': [FAIL, NO_VARIANTS, ['lite_mode == True', SKIP]],
-
# Issue 3784: setters-on-elements is flaky
'setters-on-elements': [PASS, FAIL],
@@ -141,12 +137,14 @@
'math-floor-of-div': [PASS, SLOW],
'md5': [PASS, SLOW],
'readonly': [PASS, SLOW],
+ 'regress-1417125': [PASS, SLOW],
'regress/regress-1122': [PASS, SLOW],
'regress/regress-605470': [PASS, SLOW],
'regress/regress-655573': [PASS, SLOW],
'regress/regress-1200351': [PASS, SLOW],
'regress/regress-crbug-808192': [PASS, SLOW, NO_VARIANTS, ['arch not in (ia32, x64)', SKIP], ['tsan', SKIP]],
'regress/regress-crbug-918301': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['(arch == arm or arch == arm64) and simulator_run', SKIP]],
+ 'regress/regress-crbug-1276923': [PASS, SLOW],
'regress/wasm/regress-810973': [PASS, SLOW],
'sealed-array-reduce': [PASS, SLOW],
'string-replace-gc': [PASS, SLOW],
@@ -157,6 +155,8 @@
'wasm/atomics64-stress': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['tsan', SKIP]],
'wasm/compare-exchange-stress': [PASS, SLOW, NO_VARIANTS],
'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
+ 'wasm/code-space-overflow': [PASS, SLOW, NO_VARIANTS, ['gc_stress == True or (simulator_run and mode == debug)', SKIP]],
+ 'wasm/max-wasm-functions': [PASS, SLOW, ['mode != release or dcheck_always_on or simulator_run or predictable', SKIP], ['tsan', SKIP]],
# Very slow on ARM, MIPS, RISCV and LOONG, contains no architecture dependent code.
'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]],
@@ -211,8 +211,20 @@
# Needs deterministic test helpers for concurrent maglev tiering.
# TODO(jgruber,v8:7700): Implement ASAP.
'maglev/18': [SKIP],
+
+ # --perf-prof is only available on Linux, and --perf-prof-unwinding-info only
+ # on selected architectures.
+ 'regress/wasm/regress-1032753': [PASS, ['system != linux', SKIP]],
+ 'regress/regress-913844': [PASS,
+ ['system != linux or arch not in (arm, arm64, x64, s390x, ppc64)', SKIP]],
}], # ALWAYS
+################################################################################
+['mode == release', {
+ # Slow tests in release mode.
+ 'wasm/many-modules': [PASS, SLOW],
+}],
+
##############################################################################
['mode == debug', {
# Skip slow tests in debug mode.
@@ -247,6 +259,11 @@
# BUG(v8:11745) The test allocates too much memory, making it slow on debug.
'compiler/regress-crbug-11564': [SKIP],
+
+ 'regress/regress-992389*': [PASS, SLOW],
+ 'es6/block-conflicts-sloppy': [PASS, SLOW],
+ 'es6/block-conflicts': [PASS, SLOW],
+ 'es6/typedarray-of': [PASS, SLOW],
}], # mode == debug
['novfp3', {
@@ -370,6 +387,43 @@
}], # 'gc_stress'
##############################################################################
+['gc_stress or variant == stress_concurrent_allocation', {
+ # These tests check that FinalizationRegistry cleanup tasks and/or the
+ # clearing of WeakRefs work as expected. They use carefully triggered
+ # synchronous or asynchronous GCs to achieve that and they assume that
+ # there are no unexpected, externally triggered GCs that would interfere
+ # with the tests. Therefore, they are unsuitable for modes that stress
+ # activities which can trigger GC.
+ 'harmony/weakrefs/cleanup': [SKIP],
+ 'harmony/weakrefs/cleanup-is-not-a-microtask': [SKIP],
+ 'harmony/weakrefs/cleanup-on-detached-realm': [SKIP],
+ 'harmony/weakrefs/cleanupsome': [SKIP],
+ 'harmony/weakrefs/cleanupsome-after-unregister': [SKIP],
+ 'harmony/weakrefs/finalizationregistry-and-weakref': [SKIP],
+ 'harmony/weakrefs/finalizationregistry-independent-lifetime': [SKIP],
+ 'harmony/weakrefs/finalizationregistry-independent-lifetime-multiple': [SKIP],
+ 'harmony/weakrefs/finalizationregistry-keeps-holdings-alive': [SKIP],
+ 'harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times': [SKIP],
+ 'harmony/weakrefs/multiple-dirty-finalization-groups': [SKIP],
+ 'harmony/weakrefs/reentrant-gc-from-cleanup': [SKIP],
+ 'harmony/weakrefs/symbol-as-weakref-target-gc': [SKIP],
+ 'harmony/weakrefs/symbol-in-finalizationregistry': [SKIP],
+ 'harmony/weakrefs/two-weakrefs': [SKIP],
+ 'harmony/weakrefs/undefined-holdings': [SKIP],
+ 'harmony/weakrefs/unregister-after-cleanup': [SKIP],
+ 'harmony/weakrefs/unregister-before-cleanup': [SKIP],
+ 'harmony/weakrefs/unregister-called-twice': [SKIP],
+ 'harmony/weakrefs/unregister-inside-cleanup2': [SKIP],
+ 'harmony/weakrefs/unregister-inside-cleanup3': [SKIP],
+ 'harmony/weakrefs/unregister-inside-cleanup': [SKIP],
+ 'harmony/weakrefs/unregister-many': [SKIP],
+ 'harmony/weakrefs/unregister-when-cleanup-already-scheduled': [SKIP],
+ 'harmony/weakrefs/weak-cell-basics': [SKIP],
+ 'harmony/weakrefs/weakref-creation-keeps-alive': [SKIP],
+ 'harmony/weakrefs/weakref-deref-keeps-alive': [SKIP],
+}], # 'gc_stress or variant == stress_concurrent_allocation'
+
+##############################################################################
# TODO(v8:7777): Change this once wasm is supported in jitless mode.
['not has_webassembly or variant == jitless', {
# Skip tests that require webassembly.
@@ -386,6 +440,12 @@
}], # not has_webassembly or variant == jitless
##############################################################################
+['jitless_build_mode', {
+ # invocation_count maintenance is disabled.
+ 'code-coverage*': [SKIP],
+}], # jitless_build_mode
+
+##############################################################################
['lite_mode or variant == jitless', {
# Timeouts in lite / jitless mode.
'asm/embenchen/*': [SKIP],
@@ -457,6 +517,7 @@
# noi18n is required for Intl
'regress/regress-crbug-1052647': [PASS,FAIL],
+ 'regress/regress-1409058': [SKIP],
# Temporal intl tests won't work in no_i18n
'temporal/function-exist': [FAIL],
@@ -498,6 +559,8 @@
'wasm/huge-memory': [SKIP],
'wasm/huge-typedarray': [SKIP],
'wasm/bigint-opt': [SKIP],
+ 'wasm/simd-lane-memory64': [SKIP],
+ 'regress/regress-1320641': [SKIP],
}], # 'arch in (ia32, arm, riscv32)'
##############################################################################
@@ -885,6 +948,7 @@
'regress/wasm/regress-1231950': [SKIP],
'regress/wasm/regress-1264462': [SKIP],
'regress/wasm/regress-1179025': [SKIP],
+ 'regress/wasm/regress-13732': [SKIP],
'wasm/multi-value-simd': [SKIP],
'wasm/liftoff-simd-params': [SKIP],
'wasm/exceptions-simd': [SKIP],
@@ -977,6 +1041,8 @@
# Tier down/up Wasm functions is non-deterministic with
# multiple isolates, as dynamic tiering relies on a array shared
# in the module, that can be modified by all instances.
+ 'wasm/code-flushing': [SKIP],
+ 'wasm/enter-and-leave-debug-state': [SKIP],
'wasm/wasm-dynamic-tiering': [SKIP],
# The test relies on precise switching of code kinds of wasm functions. With
@@ -984,6 +1050,17 @@
# not possible.
'wasm/serialization-with-compilation-hints': [SKIP],
+ # The test enables the global profiler, and there can only exist one profiler
+ # at a time.
+ 'wasm/log-code-after-post-message': [SKIP],
+
+ # The test explicitly deserializes a wasm module and expects the resulting
+ # wasm module to match the serialized one. With multiple isolate the resulting
+ # wasm module can however, come from the native module cache, and therefore
+ # does not match the serialized one, e.g. some functions are not compiled with
+ # TurboFan.
+ 'regress/wasm/regress-11024': [SKIP],
+
# waitAsync tests modify the global state (across Isolates)
'harmony/atomics-waitasync': [SKIP],
'harmony/atomics-waitasync-1thread-2timeout': [SKIP],
@@ -1099,6 +1176,19 @@
# BUG(v8:13379) maglev-inlining flag isn't stable enough for fuzzing.
'maglev/eager-deopt-in-inline': [SKIP],
+
+ # BUG(chromium:1394659) Skipped until issue is fixed to reduce noise on alerts.
+ 'wasm/log-code-after-post-message': [SKIP],
+ 'asm/regress-1395401': [SKIP],
+ 'regress/regress-1394663': [SKIP],
+
+ # Skipped because it has contradictory flags.
+ 'compiler/regress-crbug-1399490': [SKIP],
+
+ # BUG(v8:13470) Skipped until we have a solution.
+ 'compiler/js-create-arguments': [SKIP],
+ 'compiler/js-create': [SKIP],
+ 'compiler/construct-bound-function': [SKIP],
}], # gc_fuzzer or deopt_fuzzer or interrupt_fuzzer
##############################################################################
@@ -1130,6 +1220,9 @@
'harmony/futex': [SKIP],
'typedarray-growablesharedarraybuffer-atomics': [SKIP],
+ # Tests using a shared heap are inherently non-deterministic.
+ 'shared-memory/*': [SKIP],
+
# BUG(v8:7166).
'd8/enable-tracing': [SKIP],
@@ -1141,6 +1234,7 @@
'regress/regress-1212404': [SKIP],
'regress/regress-1221035': [SKIP],
'regress/regress-1232620': [SKIP],
+ 'regress/regress-1353555': [SKIP],
'regress/regress-crbug-1237153': [SKIP],
'regress/wasm/regress-1067621': [SKIP],
@@ -1179,6 +1273,9 @@
# stack overflow
'big-array-literal': [SKIP],
'regress/regress-353551': [SKIP],
+
+ # Limited wasm code space
+ 'wasm/max-wasm-functions': [SKIP],
}], # 'arch == ppc64'
##############################################################################
@@ -1189,6 +1286,9 @@
# PASE environment currently ships with no tzdata database
'tzoffset-transition-new-york-noi18n': [SKIP],
'tzoffset-seoul-noi18n': [SKIP],
+ # Manipulating addresses in in tools/system-analyzer/processor.mjs can cause
+ # computation errors due to unsafe integers (v8:13440)
+ 'tools/processor': [PASS, ['component_build', SKIP]],
}], # 'system == aix'
##############################################################################
@@ -1237,6 +1337,9 @@
# This test uses --wasm-speculative-inlining which is incompatible with
# stressing.
'regress/wasm/regress-1364036': [SKIP],
+
+ # Makes assumptions about tiering, which don't hold when we TF everything.
+ 'wasm/enter-and-leave-debug-state': [SKIP],
}], # variant == stress
##############################################################################
@@ -1264,15 +1367,6 @@
}], # variant == nooptimization
##############################################################################
-['gcov_coverage', {
- # Tests taking too long.
- 'array-functions-prototype-misc': [SKIP],
-
- # Stack overflow.
- 'big-array-literal': [SKIP],
-}], # 'gcov_coverage'
-
-##############################################################################
['variant == no_wasm_traps', {
# Skip stuff uninteresting for wasm traps
'bugs/*': [SKIP],
@@ -1299,11 +1393,9 @@
}], # no_harness
##############################################################################
-['arch != x64 or not pointer_compression or variant in (nooptimization, jitless)', {
- # Maglev is x64-only for now.
- # TODO(v8:7700): Update as we extend support.
+['not has_maglev', {
'maglev/*': [SKIP],
-}], # arch != x64 or not pointer_compression or variant in (nooptimization, jitless)
+}], # not has_maglev
##############################################################################
['arch != x64 or deopt_fuzzer', {
@@ -1317,7 +1409,7 @@
'wasm/liftoff': [SKIP],
'wasm/liftoff-debug': [SKIP],
'wasm/tier-up-testing-flag': [SKIP],
- 'wasm/tier-down-to-liftoff': [SKIP],
+ 'wasm/enter-debug-state': [SKIP],
'wasm/wasm-dynamic-tiering': [SKIP],
'wasm/test-partial-serialization': [SKIP],
'regress/wasm/regress-1248024': [SKIP],
@@ -1348,6 +1440,7 @@
# Type assertions block the propagation of word64 truncation useinfo,
# leading to differences in representation selection.
'compiler/bigint-multiply-truncate': [SKIP],
+ 'compiler/bigint-shift-left': [SKIP],
'wasm/bigint-opt': [SKIP]
}], # variant == assert_types
@@ -1524,6 +1617,9 @@
'regress/wasm/regress-1289678': [SKIP],
'regress/wasm/regress-1290079': [SKIP],
'regress/wasm/regress-1299183': [SKIP],
+ 'regress/wasm/regress-1417516': [SKIP],
+ 'regress/wasm/regress-13732': [SKIP],
+ 'regress/wasm/regress-1408337': [SKIP],
'regress/wasm/regress-crbug-1338980': [SKIP],
'regress/wasm/regress-crbug-1355070': [SKIP],
'regress/wasm/regress-crbug-1356718': [SKIP],
@@ -1571,7 +1667,6 @@
'regress/regress-11519': [SKIP],
'regress/regress-4121': [SKIP],
'packed-elements': [SKIP],
- 'const-dict-tracking': [SKIP],
'compiler/native-context-specialization-hole-check': [SKIP],
'compiler/test-literal-map-migration': [SKIP],
'compiler/deopt-pretenure': [SKIP],
@@ -1583,13 +1678,11 @@
'default-nospec': [SKIP],
'es6/collections-constructor-*': [SKIP],
'es6/map-constructor-entry-side-effect*': [SKIP],
-
- 'shared-memory/*': [SKIP],
}], # single_generation
################################################################################
['conservative_stack_scanning', {
- # TODO(v8:13257): Conservative stack scanning is not currently compatible
+ # TODO(v8:13493): Conservative stack scanning is not currently compatible
# with stack switching.
'wasm/stack-switching': [SKIP],
'wasm/stack-switching-export': [SKIP],
@@ -1597,8 +1690,6 @@
################################################################################
['third_party_heap', {
- # Requires local heaps
- 'const-field-tracking': [SKIP],
# Requires --concurrent_inlining / --finalize_streaming_on_background:
'regress/regress-1220974': [SKIP],
'regress-1146106': [SKIP],
@@ -1753,6 +1844,7 @@
'harmony/sharedarraybuffer-worker-gc-stress': [SKIP],
'math-floor-part1': [SKIP],
'compiler/regress-1125145': [SKIP],
+ 'compiler/regress-crbug-1399627': [SKIP],
# TODO(victorgomes):
'es6/unicode-regexp-ignore-case-noi18n': [SKIP],
@@ -1760,7 +1852,6 @@
# TODO(b/201757247):
'array-constructor-feedback': [FAIL],
- 'const-dict-tracking': [FAIL],
'compiler/deopt-pretenure': [FAIL],
'compiler/fast-api-sequences-x64': [FAIL],
'compiler/native-context-specialization-hole-check': [FAIL],
@@ -1805,6 +1896,8 @@
# Tests that cannot run without JS shared memory
['no_js_shared_memory', {
'shared-memory/*': [SKIP],
+ 'regress/regress-crbug-1394741': [SKIP],
+ 'regress/regress-crbug-1395117': [SKIP],
}], # 'no_js_shared_memory'
##############################################################################
@@ -1813,4 +1906,79 @@
'wasm/bigint-opt': [SKIP],
}], # variant == always_sparkplug
+##############################################################################
+# Behavioural differences between Maglev and Turbofan when the former is used
+# for OptimizeFunctionOnNextCall.
+['variant == stress_maglev', {
+ # Maglev doesn't inline Array constructors, so it won't bake in allocation
+ # site feedback.
+ 'array-constructor-feedback': [FAIL],
+ # Maglev deopts on non-int division, even if the result is truncated after.
+ 'shift-for-integer-div': [FAIL],
+ # Maglev deopts on negative zero for int32, even if truncated afterward.
+ 'compiler/number-abs': [FAIL],
+ 'compiler/number-floor': [FAIL],
+ 'compiler/number-ceil': [FAIL],
+ 'compiler/number-min': [FAIL],
+ 'compiler/number-max': [FAIL],
+ 'compiler/number-round': [FAIL],
+ 'compiler/number-trunc': [FAIL],
+ 'compiler/number-modulus': [FAIL],
+ 'compiler/number-toboolean': [FAIL],
+ 'compiler/number-comparison-truncations': [FAIL],
+ 'regress/regress-2132': [FAIL],
+ 'compiler/regress-1199345': [FAIL],
+ # Maglev doesn't suppress BigInt size errors on truncation.
+ 'compiler/bigint-shift-left': [FAIL],
+ 'compiler/bigint-multiply-truncate': [FAIL],
+ 'wasm/bigint-opt': [FAIL],
+ # Maglev doesn't promote Number to NumberOrOddball ops based on uses.
+ 'getters-on-elements': [FAIL],
+ # Maglev doesn't inline the call being tested here.
+ 'compiler/call-with-arraylike-or-spread': [FAIL],
+ 'compiler/call-with-arraylike-or-spread-3': [FAIL],
+ 'compiler/call-with-arraylike-or-spread-4': [FAIL],
+ 'compiler/call-with-arraylike-or-spread-5': [FAIL],
+ 'compiler/call-with-arraylike-or-spread-6': [FAIL],
+ 'compiler/call-with-arraylike-or-spread-7': [FAIL],
+ 'compiler/serializer-call': [FAIL],
+ 'compiler/serializer-apply': [FAIL],
+ 'compiler/serializer-accessors': [FAIL],
+ 'compiler/serializer-transition-propagation': [FAIL],
+ 'compiler/serializer-dead-after-jump': [FAIL],
+ 'compiler/serializer-dead-after-return': [FAIL],
+ 'es6/super-ic-opt-no-turboprop': [FAIL],
+ 'regress/regress-1049982-1': [FAIL],
+ 'regress/regress-1049982-2': [FAIL],
+ # Maglev doesn't support fast API calls.
+ 'compiler/fast-api-annotations': [FAIL],
+ 'compiler/fast-api-calls': [FAIL],
+ 'compiler/fast-api-calls-8args': [FAIL],
+ 'compiler/fast-api-calls-string': [FAIL],
+ 'compiler/fast-api-calls-pointer': [FAIL],
+ 'compiler/fast-api-interface-types': [FAIL],
+ 'compiler/fast-api-sequences*': [FAIL],
+ # Maglev doesn't specialise the IsBeingInterpreted runtime func.
+ 'compiler/is-being-interpreted-1': [FAIL],
+ # Maglev doesn't support elements transitions (yet!).
+ 'compiler/regress-9945-1': [FAIL],
+ 'compiler/regress-9945-2': [FAIL],
+ # Maglev doesn't support growing elements stores.
+ 'array-store-and-grow': [FAIL],
+ # This test requires inlining, currently disabled in Maglev
+ 'regress/regress-4578': [FAIL],
+ # This test appears to be relying on no adding a map stability check, but
+ # instead using a field representation dependency and allowing a read from
+ # the deprecated map. Wiring this through in Maglev would be too messy to be
+ # worth it.
+ 'regress/regress-map-invalidation-2': [FAIL],
+}], # variant == stress_maglev
+
+##############################################################################
+['is_clang_coverage and mode == debug', {
+ # Too slow tests.
+ 'asm/poppler/poppler': [SKIP],
+ 'wasm/shared-memory-worker-gc': [SKIP],
+}], # 'is_clang_coverage and mode == debug'
+
]
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
index 8007eb5c2a..5294edd9bc 100644
--- a/deps/v8/test/mjsunit/never-optimize.js
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbofan --no-always-turbofan --no-use-osr
-// Flags: --interrupt-budget=1024
+// Flags: --interrupt-budget=1024 --nomaglev
function o1() { }
%PrepareFunctionForOptimization(o1);
diff --git a/deps/v8/test/mjsunit/optimized-string-includes.js b/deps/v8/test/mjsunit/optimized-string-includes.js
index 32056d2ca3..4799405b7a 100644
--- a/deps/v8/test/mjsunit/optimized-string-includes.js
+++ b/deps/v8/test/mjsunit/optimized-string-includes.js
@@ -14,7 +14,7 @@
assertEquals(true, f());
%OptimizeFunctionOnNextCall(f);
assertEquals(true, f());
- assertTrue(isOptimized(f));
+ assertOptimized(f);
function f2() {
return 'abc'.includes('a', 1);
@@ -25,7 +25,7 @@
assertEquals(false, f2());
%OptimizeFunctionOnNextCall(f2);
assertEquals(false, f2());
- assertTrue(isOptimized(f2));
+ assertOptimized(f2);
function f3() {
return 'abc'.includes('b');
@@ -36,7 +36,7 @@
assertEquals(true, f3());
%OptimizeFunctionOnNextCall(f3);
assertEquals(true, f3());
- assertTrue(isOptimized(f3));
+ assertOptimized(f3);
function f4() {
return 'abcbc'.includes('bc', 2);
@@ -47,7 +47,7 @@
assertEquals(true, f4());
%OptimizeFunctionOnNextCall(f4);
assertEquals(true, f4());
- assertTrue(isOptimized(f4));
+ assertOptimized(f4);
function f5() {
return 'abcbc'.includes('b', -1);
@@ -58,7 +58,7 @@
assertEquals(true, f5());
%OptimizeFunctionOnNextCall(f5);
assertEquals(true, f5());
- assertTrue(isOptimized(f5));
+ assertOptimized(f5);
function f6() {
return 'abcbc'.includes('b', -10737418);
@@ -69,7 +69,7 @@
assertEquals(true, f6());
%OptimizeFunctionOnNextCall(f6);
assertEquals(true, f6());
- assertTrue(isOptimized(f6));
+ assertOptimized(f6);
})();
(function optimizeOSR() {
@@ -122,7 +122,7 @@
return 'abc'
}
}));
- assertFalse(isOptimized(f));
+ assertUnoptimized(f);
function f2(str) {
return 'abc'.includes(str)
@@ -135,7 +135,7 @@
return 'a'
}
}));
- assertFalse(isOptimized(f2));
+ assertUnoptimized(f2);
function f3(index) {
return 'abc'.includes('a', index)
@@ -148,5 +148,5 @@
return 0
}
}));
- assertFalse(isOptimized(f3));
+ assertUnoptimized(f3);
})();
diff --git a/deps/v8/test/mjsunit/rab-gsab-transfer-to-worker.js b/deps/v8/test/mjsunit/rab-gsab-transfer-to-worker.js
new file mode 100644
index 0000000000..ef27d1947a
--- /dev/null
+++ b/deps/v8/test/mjsunit/rab-gsab-transfer-to-worker.js
@@ -0,0 +1,232 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+(function TransferArrayBuffer() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const ab = msg.data;
+ postMessage(ab.byteLength + ' ' + ab.maxByteLength);
+ postMessage(ab.resizable + ' ' + ab.growable);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+
+ const ab = new ArrayBuffer(16);
+ worker.postMessage({data: ab}, [ab]);
+ assertEquals('16 16', worker.getMessage());
+ assertEquals('false undefined', worker.getMessage());
+
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ worker.postMessage({data: rab}, [rab]);
+ assertEquals('16 1024', worker.getMessage());
+ assertEquals('true undefined', worker.getMessage());
+
+ const sab = new SharedArrayBuffer(16);
+ worker.postMessage({data: sab});
+ assertEquals('16 16', worker.getMessage());
+ assertEquals('undefined false', worker.getMessage());
+
+ const gsab = new SharedArrayBuffer(16, {maxByteLength: 1024});
+ worker.postMessage({data: gsab});
+ assertEquals('16 1024', worker.getMessage());
+ assertEquals('undefined true', worker.getMessage());
+})();
+
+(function TransferLengthTrackingRabBackedTypedArray() {
+ function workerCode() {
+ onmessage = function(msg) {
+ postMessage(msg.data.length);
+ msg.data.buffer.resize(150);
+ postMessage(msg.data.length);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const ta = new Uint8Array(rab);
+ worker.postMessage({data: ta}, [rab]);
+ assertEquals(16, worker.getMessage());
+ assertEquals(150, worker.getMessage());
+})();
+
+(function TransferLengthTrackingGsabBackedTypedArray() {
+ function workerCode() {
+ onmessage = function(msg) {
+ postMessage(msg.data.length);
+ msg.data.buffer.grow(150);
+ postMessage(msg.data.length);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const gsab = new SharedArrayBuffer(16, {maxByteLength: 1024});
+ const ta = new Uint8Array(gsab);
+ worker.postMessage({data: ta});
+ assertEquals(16, worker.getMessage());
+ assertEquals(150, worker.getMessage());
+})();
+
+(function TransferFixedLengthRabBackedTypedArray() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const ta = msg.data;
+ postMessage(`${ta.length} ${ta[0]} ${ta[1]} ${ta[2]}`);
+ ta.buffer.resize(2);
+ postMessage(`${ta.length}`);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const ta = new Uint8Array(rab, 0, 10);
+ ta[0] = 30;
+ ta[1] = 11;
+ ta[2] = 22;
+ worker.postMessage({data: ta}, [rab]);
+ assertEquals('10 30 11 22', worker.getMessage());
+ assertEquals('0', worker.getMessage());
+})();
+
+(function TransferOutOfBoundsFixedLengthTypedArray() {
+ function workerCode() {
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const ta = new Uint8Array(rab, 0, 10);
+ rab.resize(0);
+ assertThrows(() => { worker.postMessage({data: ta}, [rab]) });
+})();
+
+(function TransferGsabBackedFixedLengthTypedArray() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const ta = msg.data;
+ postMessage(`${ta.length} ${ta[0]} ${ta[1]} ${ta[2]}`);
+ ta.buffer.grow(20);
+ postMessage(`${ta.length} ${ta[0]} ${ta[1]} ${ta[2]}`);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+
+ const gsab = new SharedArrayBuffer(16, {maxByteLength: 1024});
+ const ta = new Uint8Array(gsab, 0, 10);
+ ta[0] = 30;
+ ta[1] = 11;
+ ta[2] = 22;
+ worker.postMessage({data: ta});
+ assertEquals('10 30 11 22', worker.getMessage());
+ assertEquals('10 30 11 22', worker.getMessage());
+})();
+
+(function TransferLengthTrackingDataView() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const dv = msg.data;
+ postMessage(dv.byteLength);
+ dv.buffer.resize(150);
+ postMessage(dv.byteLength);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const dv = new DataView(rab);
+ worker.postMessage({data: dv}, [rab]);
+ assertEquals(16, worker.getMessage());
+ assertEquals(150, worker.getMessage());
+})();
+
+(function TransferFixedLengthDataView() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const dv = msg.data;
+ postMessage(`${dv.byteLength} ${dv.getUint8(0)} ${dv.getUint8(1)}`);
+ dv.buffer.resize(2);
+ try {
+ dv.byteLength;
+ } catch(e) {
+ postMessage('byteLength getter threw');
+ }
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const ta = new Uint8Array(rab);
+ ta[0] = 30;
+ ta[1] = 11;
+ worker.postMessage({data: new DataView(rab, 0, 10)}, [rab]);
+ assertEquals('10 30 11', worker.getMessage());
+ assertEquals('byteLength getter threw', worker.getMessage());
+})();
+
+(function TransferOutOfBoundsDataView1() {
+ function workerCode() {}
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const dv = new Uint8Array(rab, 0, 10);
+ rab.resize(0);
+ assertThrows(() => { worker.postMessage({data: dv}, [rab]) });
+})();
+
+(function TransferOutOfBoundsDataView2() {
+ function workerCode() {}
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+ const dv = new Uint8Array(rab, 2);
+ rab.resize(1);
+ assertThrows(() => { worker.postMessage({data: dv}, [rab]) });
+})();
+
+(function TransferZeroLengthDataView1() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const dv = msg.data;
+ postMessage(`${dv.byteLength}`);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+
+ worker.postMessage({data: new DataView(rab, 16)}, [rab]);
+ assertEquals('0', worker.getMessage());
+})();
+
+(function TransferZeroLengthDataView2() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const dv = msg.data;
+ postMessage(`${dv.byteLength}`);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+
+ worker.postMessage({data: new DataView(rab, 16, 0)}, [rab]);
+ assertEquals('0', worker.getMessage());
+})();
+
+(function TransferZeroLengthDataView3() {
+ function workerCode() {
+ onmessage = function(msg) {
+ const dv = msg.data;
+ postMessage(`${dv.byteLength}`);
+ }
+ }
+
+ const worker = new Worker(workerCode, {type: 'function'});
+ const rab = new ArrayBuffer(16, {maxByteLength: 1024});
+
+ worker.postMessage({data: new DataView(rab, 5, 0)}, [rab]);
+ assertEquals('0', worker.getMessage());
+})();
diff --git a/deps/v8/test/mjsunit/regress-1400809.js b/deps/v8/test/mjsunit/regress-1400809.js
new file mode 100644
index 0000000000..4b559fdca1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-1400809.js
@@ -0,0 +1,11 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --logfile='+' --log
+
+const log = d8.log.getAndStop();
+gc();
+function __f_6() {
+}
+__v_1 = __f_6();
diff --git a/deps/v8/test/mjsunit/regress-1417125.js b/deps/v8/test/mjsunit/regress-1417125.js
new file mode 100644
index 0000000000..3090acd259
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-1417125.js
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+// Flags: --single-threaded --deopt-to-baseline
+// Flags: --no-maglev --always-turbofan
+
+
+function foo() {
+ try {
+ foo();
+ String.fromCharCode(48,2268268757,48,59,25000,102,111,8,116,45,119,101,105,103,104,116,58,98,111,108,100,59,8,111,8,3394964672,2268268757,102,97,109,105,108,121,58,65,114,3746,97,108,44,32,72,101,8,8,101,116,105,8,97,44,3746,115,10000,110,115,45,25000,101,114,105,8,44,86,101,8,8,1,110,3746,34,32,99,111,108,111,4294967295,16,34,35,8,70,48,4294967295,4294967295,48,34,8,3746,79,82,69,88,47,80,65,82,38,35,1,48,52,59,8,8,25000,3394964672,-1745691656,47,62,84,4294967295,32,38,35,8,48,52,59,38,35,51,53,48,4003,76,4294967295,77,76,8,8,38,35,51,48,52,59,60,47,10000,111,110,116,62,60,47,25000,10000,97,110,25000,25000,47,116,-1745691656,62,10,8,3746,116,114,62,60,116,4294967295,3746,10,8,116,8,32,97,108,105,103,110,2268268757,34,108,101,3746,116,8,62,60,115,112,97,110,8,105,100,8,34,97,99,95,100,101,4294967295,99,34,62,60,102,111,8,8,32,115,8,121,108,3394964672,61,34,8,111,3394964672,116,45,115,25000,122,4003,16,8,49,4294967295,120,59,32,99,8,108,8,114,3746,4003,48,48,48,48,48,3746,59,32,4003,111,110,116,2268268757,102,97,109,105,108,121,8,8,4294967295,105,97,8,44,32,16,101,108,4294967295,101,116,105,99,3746,44,32,115,97,110,4294967295,45,3746,101,2268268757,105,102,44,86,101,114,100,3746,110,97,34,16,38,112,16,117,8,100,59,47,36,32,50,25000,112,10000,112,44,32,89,84,8,32,49,3746,32,25000,105,112,44,65,-1745691656,116,38,35,51,48,53,1,110,32,8,32,99,8,110,116,46,4003,8,8,98,2,116,32,83,112,114,101,97,100,45,84,38,117,-1745691656,109,108,59,114,60,3746,4294967295,114,32,47,8,107,32,66,97,110,8,97,115,38,35,2268268757,48,53,59,32,65,86,65,4003,2268268757,65,74,73,3394964672,2,102,111,110,116,62,60,8,115,112,97,110,62,60,16,116,100,4294967295,10,60,16,116,114,2268268757,2268268757,4294967295,114,62,10,60,116,100,32,97,108,105,103,110,61,8,108,101,102,116,34,62,3394964672,100,105,118,32,25000,100,61,8,97,99,95,117,2,108,2268268757,8,60,102,3394964672,110,116,3746,8,8,3746,108,101,61,4294967295,102,111,110,116,45,115,105,4294967295,101,8,49,48,112,120,59,32,3746,111,108,111,114,58,35,70,70,54,54,57,2,59,32,3746,111,110,4294967295,45,102,0,109,105,10000,121,58,65,114,105,8,108,44,32,72,101,108,3394964672,101,10000,8,99,97,44,32,115,97,110,115,8,115,101,8,16,4294967295,44,86,16,114,100,97,110,8,34,8,119,4294967295,119,46,104,101,100,101,118,111,8,119,98,114,3746,4003,62,3394964672,108,8,3746,101,46,99,3746,109,60,47,102,2268268757,110,16,62,60,4294967295,8,105,118,62,25000,47,0,100,62,8,47,116,16,62,60,47,116,97,8,108,101,62,60,8,116,100,62,60,-1745691656,116,114,62,60,8,-1745691656,62,10,60,116,100,32,99,8,97,4294967295,2268268757,61,34,97,99,95,107,97,3746,105,109,34,32,104,101,2268268757,103,104,8,61,34,50,48,37,34,32,98,8,99,-1745691656,108,111,114,61,2268268757,8,70,70,70,4294967295,70,1,34,32,105,100,61,34,116,97,119,52,34,32,97,108,105,103,110,61,34,108,8,102,116,3746,32,118,97,4294967295,105,8,110,61,34,109,105,100,8,108,3746,34,32,111,8,70,111,99,117,4294967295,4003,8,115,115,40,16,103,111,32,116,111,32,119,119,119,0,107,97,108,101,8,101,60,119,98,3746,0,47,62,46,99,111,109,39,44,25000,97,119,52,39,4294967295,34,32,111,110,77,111,117,115,1,-1745691656,118,3746,3746,61,34,115,3746,40,8,10000,8,32,116,111,32,119,3746,119,46,107,97,108,101,100,101,60,119,98,114,32,-1745691656,62,46,99,111,8,39,8,39,97,119,8,8,41,34,32,32,2,110,77,111,117,115,8,79,117,116,61,34,99,8,8,41,34,4294967295,25000,110,8,108,105,1,107,61,34,103,97,40,39,10000,8,116,112,58,4294967295,47,8,100,115,101,2268268757,4294967295,101,114,46,109,121,110,101,8,3746,99,111,109,1,65,100,83,101,114,118,101,8,47,99,25000,105,99,107,46,106,4294967295,4003,63,117,114,2268268757,8,3746,3394964672,49,48,4294967295,50,53,49,50,1,55,54,51,2268268757,52,4294967295,3394964672,51,49,8,52,3746,48,10000,57,54,48,48,54,51,49,4294967295,54,54,52,52,56,8,56,4003,50,48,8,49,8,52,55,51,55,54,52,51,50,57,4294967295,52,50,8,51,8,8,51,54,16,48,48,48,3746,8,56,49,55,50,8,57,53,48,8,2268268757,49,57,48,54,3746,56,55,50,4294967295,49,55,48,55,53,48,57,50,55,8,55,57,57,51,3746,53,50,52,54,49,51,56,49,57,53,55,8,2,50,8,8,50,55,0,48,8,53,57,56,8,8,50,55,48,4294967295,8,51,49,54,52,1,54,8,53,48,56,57,50,25000,54,4294967295,48,8,49,54,4294967295,25000,57,48,57,49,8,57,8,55,52,55,8,50,48,55,1,4294967295,51,51,25000,51,50,55,2268268757,50,54,55,50,3746,48,51,57,49,8,54,0,8,55,8,51,55,3394964672,52,51,49,51,52,8,56,51,54,51,52,53,8,3746,3746,53,57,48,8,48,56,54,57,49,52,53,49,49,52,4294967295,53,50,120,49,57,50,88,49,8,2,88,8,56,88,2268268757,49,88,56,48,56,48,88,8,39,41,8,0,2268268757,116,1,108,3746,61,34,99,117,114,115,111,114,58,4294967295,3394964672,105,110,116,101,114,34,8,1,8,116,97,98,108,101,32,119,105,25000,116,104,61,34,49,53,54,34,32,98,111,114,100,101,114,61,4294967295,48,34,32,99,101,108,8,115,112,97,99,105,110,103,61,34,49,34,32,99,101,8,108,2,97,100,100,105,110,8,3746,8,49,34,62,10,3746,116,114,62,2268268757,32,32,60,-1745691656,3394964672,32,97,8,105,103,110,61,34,3394964672,101,102,2,34,32,62,3394964672,8,112,97,4003,32,105,0,61,34,97,99,95,116,105,116,108,101,4294967295,62,60,102,111,110,116,32,115,116,8,108,101,61,34,102,111,110,116,8,115,8,122,101,58,8,50,3746,120,59,8,99,111,108,111,114,2268268757,8,70,70,48,48,3746,8,59,2268268757,102,8,110,4294967295);
+ } catch {
+ %DeoptimizeNow();
+ }
+}
+foo.valueOf = foo;
+42 > foo;
+foo();
diff --git a/deps/v8/test/mjsunit/regress-crbug-1359991.js b/deps/v8/test/mjsunit/regress-crbug-1359991.js
index 168bcea51d..a64241dd0c 100644
--- a/deps/v8/test/mjsunit/regress-crbug-1359991.js
+++ b/deps/v8/test/mjsunit/regress-crbug-1359991.js
@@ -13,7 +13,7 @@ class MyFloat64Array extends Float64Array {
super(rab);
if (callSlice) {
callSlice = false; // Prevent recursion
- super.slice()
+ super.slice();
}
}
};
diff --git a/deps/v8/test/mjsunit/regress-v8-13459.js b/deps/v8/test/mjsunit/regress-v8-13459.js
new file mode 100644
index 0000000000..fefa7822d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-v8-13459.js
@@ -0,0 +1,111 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For rest parameters.
+function f1(...x) {
+ { var x; }
+ function x(){}
+ assertEquals('function', typeof x);
+ var x = 10;
+ assertEquals(10, x);
+};
+f1(0);
+
+function f2(...x) {
+ var x;
+ function x(){}
+ assertEquals('function', typeof x);
+ var x = 10;
+ assertEquals(10, x);
+};
+f2(0);
+
+function f3(...x) {
+ var x;
+ assertEquals('object', typeof x);
+}
+f3(1);
+
+function f4(...x) {
+ var x = 10;
+ assertEquals(10, x);
+}
+f4(1);
+
+function f5(...x) {
+ function x(){}
+ assertEquals('function', typeof x);
+}
+f5(1);
+
+// For simple parameters.
+function f6(x) {
+ var x = 10;
+ function x(){}
+ assertEquals(10, x);
+}
+f6(1);
+
+function f7(x) {
+ var x;
+ function x(){}
+ assertEquals('function', typeof x);
+}
+f7(1);
+
+function f8(x) {
+ var x;
+ assertEquals(1, x);
+}
+f8(1);
+
+function f9(x) {
+ var x = 10;
+ assertEquals(10, x);
+}
+f9(1);
+
+function f10(x) {
+ function x(){}
+ assertEquals('function', typeof x);
+}
+f10(1);
+
+// For default parameters.
+function f11(x = 2) {
+ var x;
+ function x(){}
+ assertEquals('function', typeof x);
+}
+f11();
+f11(1);
+
+function f12(x = 2) {
+ var x;
+ function x(){}
+ assertEquals('function', typeof x);
+ var x = 10;
+ assertEquals(10, x);
+}
+f12();
+f12(1);
+
+function f13(x = 2) {
+ var x;
+ assertEquals(2, x);
+}
+f13();
+
+function f14(x = 2) {
+ var x = 1;
+ assertEquals(1, x);
+}
+f14();
+f14(3);
+
+function f15(x = 2) {
+ function x(){}
+ assertEquals('function', typeof x);
+}
+f15();
diff --git a/deps/v8/test/mjsunit/regress/asm/regress-1402270.js b/deps/v8/test/mjsunit/regress/asm/regress-1402270.js
new file mode 100644
index 0000000000..77badd768f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/asm/regress-1402270.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function print_stack(unused_arg) {
+ console.trace();
+}
+function asm(_, imports) {
+ 'use asm';
+ var print_stack = imports.print_stack;
+ function f() {
+ print_stack(1);
+ }
+ return f;
+}
+asm({}, {'print_stack': print_stack})();
diff --git a/deps/v8/test/mjsunit/regress/regress-1320641.js b/deps/v8/test/mjsunit/regress/regress-1320641.js
new file mode 100644
index 0000000000..6c52c9d178
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1320641.js
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(){
+ const xs = new Uint16Array(3775336418);
+ return xs[-981886074];
+}
+%PrepareFunctionForOptimization(foo);
+foo();
+
+assertEquals(undefined, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-13652.js b/deps/v8/test/mjsunit/regress/regress-13652.js
new file mode 100644
index 0000000000..a390046ed9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-13652.js
@@ -0,0 +1,28 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+async function f1() {
+ d8.debugger.enable();
+ throw new Error();
+}
+
+async function f2() {
+ try {
+ await f1();
+ } catch (_e) {
+ }
+}
+
+(async () => {
+ await f2();
+ await f2();
+})();
+
+(async () => {
+ d8.debugger.disable();
+})();
+
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-1376663.js b/deps/v8/test/mjsunit/regress/regress-1376663.js
new file mode 100644
index 0000000000..a459a57603
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1376663.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignore-unhandled-promises
+
+function __f_0() {
+ onmessage = function(e) {
+ import("./does_not_exist.js");
+ while(true) {}
+ }
+}
+function __f_1() {
+}
+
+let sab = new SharedArrayBuffer();
+let w1 = new Worker(__f_0, {type: 'function'});
+w1.postMessage({sab: sab});
+let w2 = new Worker(__f_1, {type: 'function'});
diff --git a/deps/v8/test/mjsunit/regress/regress-1383362.js b/deps/v8/test/mjsunit/regress/regress-1383362.js
new file mode 100644
index 0000000000..7237aee34c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1383362.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ for (let i = 0; i < 1; ++i) {
+ x ^= x;
+ }
+ return x;
+}
+
+%PrepareFunctionForOptimization(f);
+f(1n);
+%OptimizeFunctionOnNextCall(f);
+f(1n);
diff --git a/deps/v8/test/mjsunit/regress/regress-1385368.js b/deps/v8/test/mjsunit/regress/regress-1385368.js
new file mode 100644
index 0000000000..6b34217d95
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1385368.js
@@ -0,0 +1,10 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-temporal
+const o1 = {valueOf:1337};
+const o2 = Array(BigInt64Array,o1,..."short",o1);
+const o3 = {__proto__:13.37,c:o2,e:"\u56FD\u52A1\u9662\u5173\u4E8E\u300A\u571F\u5730"};
+const o4 = JSON.stringify(o3);
+assertThrows('new Temporal.Calendar(o4);', RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1393865.js b/deps/v8/test/mjsunit/regress/regress-1393865.js
new file mode 100644
index 0000000000..b4e19f02a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1393865.js
@@ -0,0 +1,28 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function aux(a, b) {
+ if (a) {
+ a >> b;
+ }
+}
+
+function opt() {
+ let p = Promise;
+ ++p;
+ // {p} can be anything that evaluates to false but is not inlined.
+ return aux(p, "number");
+}
+
+%PrepareFunctionForOptimization(aux);
+aux(1n, 1n);
+%OptimizeFunctionOnNextCall(aux);
+aux(1n, 1n);
+
+%PrepareFunctionForOptimization(opt);
+opt();
+%OptimizeFunctionOnNextCall(opt);
+opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-1393942.js b/deps/v8/test/mjsunit/regress/regress-1393942.js
new file mode 100644
index 0000000000..724f17aaed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1393942.js
@@ -0,0 +1,26 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-rab-gsab --allow-natives-syntax
+
+const gsab = new SharedArrayBuffer(4,{"maxByteLength":8});
+const u16arr = new Uint16Array(gsab);
+
+function foo(obj) {
+ obj[1] = 0;
+}
+
+function test() {
+ const u32arr = new Uint32Array();
+ foo(u32arr);
+ foo(u16arr);
+}
+
+%PrepareFunctionForOptimization(test);
+%PrepareFunctionForOptimization(foo);
+test();
+%OptimizeFunctionOnNextCall(foo);
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1394663.js b/deps/v8/test/mjsunit/regress/regress-1394663.js
new file mode 100644
index 0000000000..51cf42e127
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1394663.js
@@ -0,0 +1,10 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Adding a listener so that {profileEnd} does not create a file on the disk.
+d8.profiler.setOnProfileEndListener(() =>{});
+
+console.profile();
+console.profileEnd();
+console.profileEnd();
diff --git a/deps/v8/test/mjsunit/regress/regress-1400053.js b/deps/v8/test/mjsunit/regress/regress-1400053.js
new file mode 100644
index 0000000000..1a23692a4a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1400053.js
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-turbofan
+
+function foo(x) {
+ return BigInt(x);
+}
+
+function bar() {
+ for (let i = 0; i < 1; ++i) {
+ // The empty closure weakens the range of {i} to infinity in several
+ // iterations.
+ function t() { }
+ foo(i);
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(bar);
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-1400056.js b/deps/v8/test/mjsunit/regress/regress-1400056.js
new file mode 100644
index 0000000000..b7f6f25158
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1400056.js
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --turboshaft --allow-natives-syntax --no-lazy-feedback-allocation
+
+
+function foo() {
+ var arr = new Uint8Array();
+ try {
+ x.next();
+ } catch (e) {}
+ var x = arr.entries();
+ x.next();
+}
+
+function bar() {
+ foo();
+}
+
+%PrepareFunctionForOptimization(bar);
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-1400897.js b/deps/v8/test/mjsunit/regress/regress-1400897.js
new file mode 100644
index 0000000000..faea7b73ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1400897.js
@@ -0,0 +1,29 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar() {
+ const x = 0;
+ return BigInt(x * -1);
+}
+
+%PrepareFunctionForOptimization(bar);
+assertEquals(0n, bar());
+%OptimizeFunctionOnNextCall(bar);
+assertEquals(0n, bar());
+
+function foo() {
+ let result = 0n;
+ let obj = {i: 0};
+ for (; obj.i < 1; ++obj.i) {
+ result += BigInt(obj.i * -2);
+ }
+ return result;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(0n, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0n, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-1403742.js b/deps/v8/test/mjsunit/regress/regress-1403742.js
new file mode 100644
index 0000000000..7f427c552c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1403742.js
@@ -0,0 +1,21 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt(){
+ const v2 = {"maxByteLength":4886690};
+ const v4 = new ArrayBuffer(1458,v2);
+ const v6 = new Int8Array(v4);
+ const v8 = Symbol.toStringTag;
+ const v9 = v6[v8];
+ return v9;
+}
+
+let ignition = opt();
+%PrepareFunctionForOptimization(opt);
+opt();
+%OptimizeFunctionOnNextCall(opt);
+let turbofan = opt();
+assertEquals(ignition, turbofan);
diff --git a/deps/v8/test/mjsunit/regress/regress-1404607.js b/deps/v8/test/mjsunit/regress/regress-1404607.js
new file mode 100644
index 0000000000..8ced478320
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1404607.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt() {
+ const buffer = new ArrayBuffer(64);
+ const view = new DataView(buffer);
+ let i = 1n;
+ i += 1n;
+ view.setUint8(i);
+}
+
+%PrepareFunctionForOptimization(opt);
+assertThrows(opt, TypeError);
+%OptimizeFunctionOnNextCall(opt);
+assertThrows(opt, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1404863.js b/deps/v8/test/mjsunit/regress/regress-1404863.js
new file mode 100644
index 0000000000..7c656a0be1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1404863.js
@@ -0,0 +1,18 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const template = `class Foo { foo(){} }`
+
+// Keep recursively embedding the template inside itself until we stack
+// overflow. This should not segfault.
+let s = template;
+while (true) {
+ try {
+ eval(s);
+ } catch (e) {
+ // A stack overflow exception eventually is expected.
+ break;
+ }
+ s = s.replace("foo(){}", `foo(){ ${s} }`);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1407070.js b/deps/v8/test/mjsunit/regress/regress-1407070.js
new file mode 100644
index 0000000000..dbe2b2405d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1407070.js
@@ -0,0 +1,11 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --turboshaft --always-turbofan
+
+{
+ const zero = 0;
+ const x = -zero;
+ const result = Math.cbrt(x);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1407349.js b/deps/v8/test/mjsunit/regress/regress-1407349.js
new file mode 100644
index 0000000000..07486e4edc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1407349.js
@@ -0,0 +1,17 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbofan
+
+function main() {
+ for (let v1 = 0; v1 < 4002; v1++) {
+ const v3 = [-160421.17589718767];
+ v3.constructor = v1;
+ try {
+ const v4 = (-9223372036854775807)();
+ } catch(v5) {
+ }
+ }
+}
+main();
diff --git a/deps/v8/test/mjsunit/regress/regress-1408086.js b/deps/v8/test/mjsunit/regress/regress-1408086.js
new file mode 100644
index 0000000000..309524aa44
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1408086.js
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const obj1 = {a:42};
+const obj2 = {a:42};
+function foo() {
+ obj1.a = 13.37;
+ return obj2;
+}
+
+class c1 extends foo {
+ obj3 = 1;
+}
+new c1();
+new c1();
diff --git a/deps/v8/test/mjsunit/regress/regress-1408400.js b/deps/v8/test/mjsunit/regress/regress-1408400.js
new file mode 100644
index 0000000000..6896a72f56
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1408400.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags:
+
+for (let v0 = 0; v0 < 89; v0++) {
+ function f3() {
+ }
+ class C7 extends Uint8Array {
+ 7 = f3;
+ }
+ const v8 = new C7(1111953);
+ for (let v9 = 0; v9 < 91; v9++) {
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1408606.js b/deps/v8/test/mjsunit/regress/regress-1408606.js
new file mode 100644
index 0000000000..01e3de1db1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1408606.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v0 = new Uint8ClampedArray();
+function f0(cnt) {
+ Object.defineProperty(v0, cnt == 0 ? "subarray" : "property", {
+ })
+ v0[-7] = -1.5;
+ v0[-7];
+}
+%PrepareFunctionForOptimization(f0);
+f0(0);
+f0();
+%OptimizeFunctionOnNextCall(f0);
+f0();
diff --git a/deps/v8/test/mjsunit/regress/regress-1409058.js b/deps/v8/test/mjsunit/regress/regress-1409058.js
new file mode 100644
index 0000000000..e9351b1723
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1409058.js
@@ -0,0 +1,9 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (const invalidLocale of ["", "1", "12", "123", "1234"]) {
+ print(invalidLocale);
+ assertThrows(() => "".toLocaleUpperCase(invalidLocale), RangeError);
+ assertThrows(() => "".toLocaleLowerCase(invalidLocale), RangeError);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1412629.js b/deps/v8/test/mjsunit/regress/regress-1412629.js
new file mode 100644
index 0000000000..03e67d12fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1412629.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return NaN ** x;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(NaN, foo(1));
+assertEquals(1, foo(0));
+assertEquals(1, foo(-0));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(NaN, foo(1));
+assertEquals(1, foo(0));
+assertEquals(1, foo(-0));
diff --git a/deps/v8/test/mjsunit/regress/regress-1412975.js b/deps/v8/test/mjsunit/regress/regress-1412975.js
new file mode 100644
index 0000000000..9a912d2b88
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1412975.js
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ let v1 = -2564233261n;
+ const v2 = "function".charCodeAt(2);
+ return v1 + v1;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1414200.js b/deps/v8/test/mjsunit/regress/regress-1414200.js
new file mode 100644
index 0000000000..43530767b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1414200.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test() {
+ for (var i = 0; i< 10; i++) {
+ if (i <= NaN) {
+ throw 0;
+ }
+ }
+}
+
+%PrepareFunctionForOptimization(test);
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1414376.js b/deps/v8/test/mjsunit/regress/regress-1414376.js
new file mode 100644
index 0000000000..2b9f6fa709
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1414376.js
@@ -0,0 +1,16 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noturbo-inlining
+
+(function () {
+ assertEquals = function assertEquals() {};
+})();
+function __f_0() {}
+function __f_6() {
+ for (var __v_6 = -2147483648; __v_6 < 0; __v_6 += 100000) {
+ assertEquals(__f_0(), Math.floor(__v_6 / 0));
+ }
+};
+__f_6();
diff --git a/deps/v8/test/mjsunit/regress/regress-1414659.js b/deps/v8/test/mjsunit/regress/regress-1414659.js
new file mode 100644
index 0000000000..f8cd963d67
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1414659.js
@@ -0,0 +1,15 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(b) {
+ return (1 / (b ? 0 : -0)) < 0;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(false, foo(true));
+assertEquals(true, foo(false));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(true, foo(false));
diff --git a/deps/v8/test/mjsunit/regress/regress-1415210.js b/deps/v8/test/mjsunit/regress/regress-1415210.js
new file mode 100644
index 0000000000..6d3098efca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1415210.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --throws
+
+function source() {
+ return 1;
+}
+
+const options = {
+ arguments: [
+ {
+ get value() { d8.terminate(); return "something" },
+ }
+ ],
+ type: "function",
+};
+const v12 = new Worker(source, options);
diff --git a/deps/v8/test/mjsunit/regress/regress-1416520.js b/deps/v8/test/mjsunit/regress/regress-1416520.js
new file mode 100644
index 0000000000..7c1e673786
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1416520.js
@@ -0,0 +1,14 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(x) {
+ return x / 179769313486231590772930519078902473361797697894230657273430081157732675805500963132708477322407536021120113879871393357658789768814416622492847430639474124377767893424865485276302219601246094119453082952085005768838150682342462881473913110540827237163350510684586298239947245938479716304835356329624224137216;
+}
+
+%PrepareFunctionForOptimization(test);
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1416697.js b/deps/v8/test/mjsunit/regress/regress-1416697.js
new file mode 100644
index 0000000000..1930bb826b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1416697.js
@@ -0,0 +1,44 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function __isPropertyOfType() {
+ return typeof type === 'undefined' || typeof desc.value === type;
+}
+
+function __getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ if (__isPropertyOfType()) properties.name;
+ }
+}
+
+function __getRandomProperty(obj) {
+ let properties = __getProperties(obj);
+}
+
+var obj = {};
+Object.defineProperty(obj, "length", {});
+
+function foo(x) {
+ try {
+ for (let i = 0; i < x; ++i) {
+ delete obj[__getRandomProperty(obj)]();
+ }
+ } catch (e) {}
+}
+
+function test() {
+ let result = 0.9999;
+ for (let i = 0; i < 50; ++i) {
+ result += foo(i);
+ }
+}
+
+%PrepareFunctionForOptimization(test);
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1416830.js b/deps/v8/test/mjsunit/regress/regress-1416830.js
new file mode 100644
index 0000000000..c81146f92d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1416830.js
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() { }
+
+var foo = function() {
+ return !new f();
+};
+
+function test() {
+ return foo();
+};
+
+%PrepareFunctionForOptimization(test);
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1418509.js b/deps/v8/test/mjsunit/regress/regress-1418509.js
new file mode 100644
index 0000000000..fe1ca35fcc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1418509.js
@@ -0,0 +1,20 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var __v_0 = 5;
+function __f_1() {
+ for (var __v_6 = 0; __v_6 < __v_0; __v_6++) {
+ if (__v_6 % __v_6 == 0) {
+ "a".split();
+ }
+ }
+}
+
+%PrepareFunctionForOptimization(__f_1);
+__f_1();
+__f_1();
+%OptimizeFunctionOnNextCall(__f_1);
+__f_1();
diff --git a/deps/v8/test/mjsunit/regress/regress-1418571.js b/deps/v8/test/mjsunit/regress/regress-1418571.js
new file mode 100644
index 0000000000..05b5a8eb73
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1418571.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-turbo-loop-peeling
+
+let v0 = "description";
+for (const v1 in v0) {
+ function f2(a3) {
+ v0[v1];
+ do {
+ --v0;
+ } while (a3 < 6)
+ }
+ %PrepareFunctionForOptimization(f2);
+ %OptimizeFunctionOnNextCall(f2);
+ f2();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1419636.js b/deps/v8/test/mjsunit/regress/regress-1419636.js
new file mode 100644
index 0000000000..41d8f6cee4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1419636.js
@@ -0,0 +1,17 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return Object.is((x ? -0 : NaN) - 0, -0);
+}
+
+let ignition = foo(true);
+%PrepareFunctionForOptimization(foo);
+foo();
+%OptimizeFunctionOnNextCall(foo);
+let turbofan = foo(true);
+assertTrue(ignition);
+assertTrue(turbofan);
diff --git a/deps/v8/test/mjsunit/regress/regress-1419740.js b/deps/v8/test/mjsunit/regress/regress-1419740.js
new file mode 100644
index 0000000000..44b0aa4918
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1419740.js
@@ -0,0 +1,28 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f0() {
+ for (let v5 = 1; v5 < 9; v5++) {
+ function f6() {
+ }
+ function f10(a11) {
+ for (let v15 = -1834309537; v15 < 9; v15 = v15 + a11) {
+ function f17() {
+ }
+ for (let v20 = 0; v20 < v15; v20 = v20 + "14") {
+ }
+ }
+ return f10;
+ }
+ const v21 = f10();
+ v21(f6);
+ v21();
+ }
+}
+%PrepareFunctionForOptimization(f0);
+f0();
+%OptimizeFunctionOnNextCall(f0);
+f0();
diff --git a/deps/v8/test/mjsunit/regress/regress-1420536.js b/deps/v8/test/mjsunit/regress/regress-1420536.js
new file mode 100644
index 0000000000..175becf2f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1420536.js
@@ -0,0 +1,34 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() { this.x = 1; }
+for (var i = 0; i < 10; i++) new f();
+
+function foo() {
+ var obj = new f();
+ obj.y = -1073741825;
+ return obj;
+}
+
+function bar(t) {
+ var arr = [];
+ for (var p in t){
+ arr.push([ t[p]]);
+ }
+ return arr;
+}
+
+function test() {
+ return bar(foo());
+}
+
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(bar);
+%PrepareFunctionForOptimization(test);
+assertEquals([[1], [-1073741825]], test(foo, bar));
+%OptimizeFunctionOnNextCall(test);
+assertEquals([[1], [-1073741825]], test(foo, bar));
diff --git a/deps/v8/test/mjsunit/regress/regress-1421373.js b/deps/v8/test/mjsunit/regress/regress-1421373.js
new file mode 100644
index 0000000000..dca518b586
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1421373.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function foo() {
+ const o5 = {
+ "apply": undefined,
+ o() {
+ super.valueOf();
+ },
+ };
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1421685.js b/deps/v8/test/mjsunit/regress/regress-1421685.js
new file mode 100644
index 0000000000..e46cdaa5b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1421685.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-turbo-load-elimination
+// Flags: --no-turbo-loop-variable
+
+function test() {
+ for (let v1 = 0; v1 < 1; v1++) {
+ const v3 = BigInt(v1);
+ ([("1244138209").length]).includes(5, -2147483649);
+ v3 << 51n;
+ }
+}
+
+%PrepareFunctionForOptimization(test);
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1422166.js b/deps/v8/test/mjsunit/regress/regress-1422166.js
new file mode 100644
index 0000000000..e624322641
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1422166.js
@@ -0,0 +1,31 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const buf = new SharedArrayBuffer(8, { "maxByteLength": 8 });
+
+class obj extends Uint8ClampedArray {
+ constructor() {
+ super(buf);
+ }
+ defineProperty() {
+ for (let i = 0; i < 3; i++) {
+ super.length;
+ }
+ SharedArrayBuffer.__proto__ = this;
+ }
+}
+
+function opt() {
+ new obj().defineProperty();
+}
+
+%PrepareFunctionForOptimization(opt);
+opt();
+opt();
+opt();
+opt();
+%OptimizeFunctionOnNextCall(opt)
+opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-1423703.js b/deps/v8/test/mjsunit/regress/regress-1423703.js
new file mode 100644
index 0000000000..c17d001ebf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1423703.js
@@ -0,0 +1,11 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+for (let v0 = 0; v0 < 100; v0++) {
+ for (let v2 = 0; v2 < 19; v2++) {
+ Math["abs"](Math.max(4294967295, 0, v2, -0)) - v2;
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-165637.js b/deps/v8/test/mjsunit/regress/regress-165637.js
index 2a7fc4421f..60ca8d2e69 100644
--- a/deps/v8/test/mjsunit/regress/regress-165637.js
+++ b/deps/v8/test/mjsunit/regress/regress-165637.js
@@ -4,7 +4,7 @@
// Make sure that packed and unpacked array slices are still properly handled
var holey_array = [1, 2, 3, 4, 5,,,,,,];
-assertEquals([undefined], holey_array.slice(6, 7));
+assertEquals(new Array(1), holey_array.slice(6, 7));
assertEquals(undefined, holey_array.slice(6, 7)[0]);
assertEquals([], holey_array.slice(2, 1));
assertEquals(3, holey_array.slice(2, 3)[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-633998.js b/deps/v8/test/mjsunit/regress/regress-633998.js
index ff34a0a44e..c4ffe272cd 100644
--- a/deps/v8/test/mjsunit/regress/regress-633998.js
+++ b/deps/v8/test/mjsunit/regress/regress-633998.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var err_str_1 = "apply was called on , which is a object and not a function";
+var err_str_1 = "apply was called on , which is an object and not a function";
var err_str_2 =
- "apply was called on Error, which is a object and not a function";
+ "apply was called on Error, which is an object and not a function";
var reached = false;
var error = new Error();
diff --git a/deps/v8/test/mjsunit/regress/regress-804177.js b/deps/v8/test/mjsunit/regress/regress-804177.js
index b100480599..685bed1c03 100644
--- a/deps/v8/test/mjsunit/regress/regress-804177.js
+++ b/deps/v8/test/mjsunit/regress/regress-804177.js
@@ -25,7 +25,7 @@
}
b = Array.of.call(f,1,2);
b[4] = 1;
- assertEquals(b, [1, 2, undefined, undefined, 1]);
+ assertEquals(b, [1, 2, , , 1]);
})();
// Tests that using Array.of with a constructor returning an object with an
diff --git a/deps/v8/test/mjsunit/regress/regress-chromium-1409294.js b/deps/v8/test/mjsunit/regress/regress-chromium-1409294.js
new file mode 100644
index 0000000000..920b79a163
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-chromium-1409294.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-turbofan
+
+let key = 5;
+
+class Base {
+ constructor() {
+ return new Proxy(this, {
+ defineProperty(target, key, desc) {
+ return Reflect.defineProperty(target, key, desc);
+ }
+ });
+ }
+}
+
+class Child extends Base {
+ [key] = "basic";
+}
+let c = new Child();
+c = new Child();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1381404.js b/deps/v8/test/mjsunit/regress/regress-crbug-1381404.js
new file mode 100644
index 0000000000..164f34289d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1381404.js
@@ -0,0 +1,9 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const a = {"maxByteLength": 15061061};
+const e = d8.serializer.serialize(a);
+const f = new Uint8Array(e);
+f[18] = 114;
+assertThrows(() => { d8.serializer.deserialize(e); });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1383883.js b/deps/v8/test/mjsunit/regress/regress-crbug-1383883.js
new file mode 100644
index 0000000000..6b16697062
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1383883.js
@@ -0,0 +1,27 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __isPropertyOfType() {
+}
+function __getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ properties.push(name);
+ }
+ return properties;
+}
+function __getRandomProperty(obj, seed) {
+ let properties = __getProperties(obj);
+ return properties[seed % properties.length];
+}
+const __v_12 =
+[ 2, '3'];
+function __f_8() {
+ if (__v_12 != null && typeof __v_12 == "object") Object.defineProperty(__v_12, __getRandomProperty(__v_12, 416937), {
+ value: 4294967295
+ });
+}
+ __f_8();
+ var __v_15 = Object.freeze(__v_12);
+ __f_8();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1383976.js b/deps/v8/test/mjsunit/regress/regress-crbug-1383976.js
new file mode 100644
index 0000000000..0917f085af
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1383976.js
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let __v_34 =
+{
+ a: -9007199254740990,
+ b: 0
+};
+let __v_35 = {
+ a: 2,
+ b: 0
+};
+ Object.defineProperty(__v_34, "b", {
+ value: 4.2,
+ });
+let __v_36 = {
+ a: "foo",
+ b: 0
+};
+ Object.defineProperty(__v_35, "a",
+ {
+ value: 2,
+ });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant2.js
new file mode 100644
index 0000000000..3076ffdfc7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant2.js
@@ -0,0 +1,13 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+const rab1 = new ArrayBuffer(4, {"maxByteLength": 100});
+const ta = new Int8Array(rab1);
+const rab2 = new ArrayBuffer(10, {"maxByteLength": 20});
+const lengthTracking = new Int8Array(rab2);
+rab2.resize(0);
+ta.constructor = { [Symbol.species]: function() { return lengthTracking; } };
+assertThrows(() => { ta.slice(); }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant3.js b/deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant3.js
new file mode 100644
index 0000000000..54e7254193
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1384474-variant3.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+const rab1 = new ArrayBuffer(4, {"maxByteLength": 100});
+const ta = new Int8Array(rab1);
+const rab2 = new ArrayBuffer(10, {"maxByteLength": 20});
+const lengthTracking = new Int8Array(rab2);
+rab2.resize(0);
+ta.constructor = { [Symbol.species]: function() { return lengthTracking; } };
+assertThrows(() => { ta.filter(() => { return true; }); },
+ TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1384474.js b/deps/v8/test/mjsunit/regress/regress-crbug-1384474.js
new file mode 100644
index 0000000000..e64237e9c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1384474.js
@@ -0,0 +1,12 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+const ta = new Int8Array(4);
+const rab = new ArrayBuffer(10, {"maxByteLength": 20});
+const lengthTracking = new Int8Array(rab);
+rab.resize(0);
+ta.constructor = { [Symbol.species]: function() { return lengthTracking; } };
+assertThrows(() => { ta.slice(); }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1392577.js b/deps/v8/test/mjsunit/regress/regress-crbug-1392577.js
new file mode 100644
index 0000000000..97f08bec08
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1392577.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+const rab = new ArrayBuffer(50, {"maxByteLength": 100});
+const ta = new Int8Array(rab);
+const start = {};
+start.valueOf = function() {
+ rab.resize(0);
+ return 5;
+}
+ta.fill(5, start);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1393375.js b/deps/v8/test/mjsunit/regress/regress-crbug-1393375.js
new file mode 100644
index 0000000000..aa300cf35c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1393375.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rab-gsab
+
+const rab = new ArrayBuffer(50, {"maxByteLength": 100});
+const ta = new Int8Array(rab);
+const evil = {};
+evil.valueOf = function() {
+ rab.resize(0);
+ return 5;
+}
+ta.lastIndexOf(1, evil);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1394741.js b/deps/v8/test/mjsunit/regress/regress-crbug-1394741.js
new file mode 100644
index 0000000000..d715718969
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1394741.js
@@ -0,0 +1,23 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-gc --expose-externalize-string --shared-string-table
+
+(function __f_0() {
+ let worker = new Worker('', {type: 'string'});
+})();
+function __f_11() { return "A"; }
+function __f_12() { return "\u1234"; }
+function __f_13() {
+ var __v_3 = "";
+ var __v_5 =
+ "AAAA" + __f_11();
+ var __v_6 =
+ "\u1234\u1234\u1234\u1234" + __f_12();
+ gc();
+ externalizeString(__v_6);
+}
+for (var __v_4 = 0; __v_4 < 10; __v_4++) {
+ __f_13();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1395117.js b/deps/v8/test/mjsunit/regress/regress-crbug-1395117.js
new file mode 100644
index 0000000000..b56e2c94a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1395117.js
@@ -0,0 +1,14 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table
+
+let o = {}
+let json_str = '[{"a": 2.1, "b": 1, "c": "hello"}, {"a": null, "b": 2, "c": {"a": 2.1, "b": 1.1, "c": "hello"}}]';
+// Internalize a bunch of strings to trigger a shared GC.
+for (let i=0; i<100; i++) {
+ let str = 'X'.repeat(100) + i;
+ o[str] = str;
+}
+JSON.parse('[{"a": 2.1, "b": 1, "c": "hello"}, {"a": null, "b": 2, "c": {"a": 2.1, "b": 1.1, "c": "hello"}}]');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1399695.js b/deps/v8/test/mjsunit/regress/regress-crbug-1399695.js
new file mode 100644
index 0000000000..cd8fb6cc66
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1399695.js
@@ -0,0 +1,13 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function __f_3() {
+ isNaN.prototype = 14;
+}
+%PrepareFunctionForOptimization(__f_3);
+__f_3();
+%OptimizeFunctionOnNextCall(__f_3);
+__f_3();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1399799.js b/deps/v8/test/mjsunit/regress/regress-crbug-1399799.js
new file mode 100644
index 0000000000..70fdcea09e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1399799.js
@@ -0,0 +1,45 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-rab-gsab --allow-natives-syntax
+
+const ab = new ArrayBuffer(3000);
+const ta = new Uint16Array(ab);
+
+function createOOBTA() {
+ const rab = new ArrayBuffer(3000, {"maxByteLength": 4000});
+ const ta = new Uint8Array(rab, 0, 3000);
+ rab.resize(0);
+ return ta;
+}
+
+Object.defineProperty(Uint16Array, Symbol.species,
+ { configurable: true, enumerable: true,
+ get: () => { return createOOBTA; }});
+assertThrows(() => { ta.slice(); }, TypeError);
+
+function createDetachedTA() {
+ const rab = new ArrayBuffer(3000, {"maxByteLength": 4000});
+ const ta = new Uint8Array(rab, 0, 3000);
+ %ArrayBufferDetach(rab);
+ return ta;
+}
+
+Object.defineProperty(Uint16Array, Symbol.species,
+ { configurable: true, enumerable: true,
+ get: () => { return createDetachedTA; }});
+assertThrows(() => { ta.slice(); }, TypeError);
+
+// But this works:
+function createLengthTrackingTA() {
+ const rab = new ArrayBuffer(3000, {"maxByteLength": 4000});
+ const ta = new Uint16Array(rab, 0);
+ return ta;
+}
+
+Object.defineProperty(Uint16Array, Symbol.species,
+ { configurable: true, enumerable: true,
+ get: () => { return createLengthTrackingTA; }});
+
+ta.slice();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1402139.js b/deps/v8/test/mjsunit/regress/regress-crbug-1402139.js
new file mode 100644
index 0000000000..ce2ca5eef5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1402139.js
@@ -0,0 +1,13 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-rab-gsab
+
+const rab = new ArrayBuffer(363, {"maxByteLength": 1000});
+const ta = new Uint8Array(rab);
+rab.resize(80);
+const data = d8.serializer.serialize(ta);
+const dataArray = new Uint8Array(data);
+dataArray[dataArray.length - 1] = 17;
+assertThrows(() => { d8.serializer.deserialize(data); });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1404820.js b/deps/v8/test/mjsunit/regress/regress-crbug-1404820.js
new file mode 100644
index 0000000000..f8fd6ee0b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1404820.js
@@ -0,0 +1,21 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function array_iterator() {
+ let array_iterator_prototype = [].values().__proto__;
+ let iter;
+ array_iterator_prototype.return = function(value) {
+ iter = this;
+ return {value: value, done: true};
+ };
+
+ let array = [["good1"], ["good2"], "bad", "next", 5, 6, 7, 8];
+
+ // Aborted iteration in a builtin.
+ try {
+ new WeakSet(array);
+ } catch (e) {}
+ // iter points at "bad" item, so next() must return "next" value.
+ assertEquals(iter.next().value, "next");
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1406774.js b/deps/v8/test/mjsunit/regress/regress-crbug-1406774.js
new file mode 100644
index 0000000000..3e2d08b774
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1406774.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+function worker_code(arr) {
+ postMessage("worker starting");
+ const large_bigint = 2n ** 100_000n;
+ function f() {
+ // Run until near the stack limit.
+ try { f(); } catch (e) {
+ postMessage("stack limit reached");
+ postMessage(arr[large_bigint]);
+ }
+ }
+ onmessage = f;
+}
+let w = new Worker(worker_code, { "arguments": [], "type": "function" });
+assertEquals("worker starting", w.getMessage());
+w.postMessage("");
+assertEquals("stack limit reached", w.getMessage());
+w.terminate();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1407080.js b/deps/v8/test/mjsunit/regress/regress-crbug-1407080.js
new file mode 100644
index 0000000000..d1c272f769
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1407080.js
@@ -0,0 +1,7 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --max-lazy
+
+try { Function("") } catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1407384.js b/deps/v8/test/mjsunit/regress/regress-crbug-1407384.js
new file mode 100644
index 0000000000..9d9501dc41
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1407384.js
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function main() {
+ let v0 = 1.5;
+ do {
+ const v5 = BigInt.asIntN(6, 4n);
+ const v6 = v5 / v5;
+ const v7 = v6 / v6;
+ do {
+ [v7];
+ } while (v0 < 0);
+ --v0;
+ } while (v0 < 0);
+}
+%PrepareFunctionForOptimization(main);
+main();
+%OptimizeFunctionOnNextCall(main);
+main();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1408310.js b/deps/v8/test/mjsunit/regress/regress-crbug-1408310.js
new file mode 100644
index 0000000000..c3f4ca5fb3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1408310.js
@@ -0,0 +1,23 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-turbofan
+
+Object.defineProperty(Object.prototype, "test", {});
+
+class Base {
+ constructor() {
+ return new Proxy(this, {
+ defineProperty(target, key) {
+ return true;
+ }
+ });
+ }
+}
+let key = "test";
+class Child extends Base {
+ [key] = "basic";
+}
+let c = new Child();
+c = new Child();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1412938.js b/deps/v8/test/mjsunit/regress/regress-crbug-1412938.js
new file mode 100644
index 0000000000..dec36643d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1412938.js
@@ -0,0 +1,12 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (typeof WebAssembly != 'undefined') {
+ const memory = new WebAssembly.Memory({
+ "initial": 1,
+ "maximum": 10,
+ "shared": true,
+ });
+ assertEquals(65536, memory.buffer.maxByteLength);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1415249.js b/deps/v8/test/mjsunit/regress/regress-crbug-1415249.js
new file mode 100644
index 0000000000..5715e0107a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1415249.js
@@ -0,0 +1,30 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-turbofan
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const global = Realm.global(realm);
+ function Base() { return global; }
+ let i = 0;
+ class Klass extends Base {
+ field = i++;
+ }
+ let a = new Klass();
+ assertEquals(a.field, 0);
+ a = new Klass();
+ assertEquals(a.field, 1);
+}
+
+{
+ const realm = Realm.create();
+ const global = Realm.global(realm);
+ function Base() { return global; }
+ let i = 0;
+ class Klass extends Base {
+ field = i++;
+ }
+ assertThrows(() => new Klass(), Error, /no access/);
+ assertThrows(() => new Klass(), Error, /no access/);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1415581.js b/deps/v8/test/mjsunit/regress/regress-crbug-1415581.js
new file mode 100644
index 0000000000..f864671d23
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1415581.js
@@ -0,0 +1,10 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var proto = {};
+function constr() {}
+constr.prototype = proto;
+obj = new constr();
+proto[Symbol.toStringTag] = "foo";
+assertEquals('[object foo]', obj.toString());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1416248.js b/deps/v8/test/mjsunit/regress/regress-crbug-1416248.js
new file mode 100644
index 0000000000..25554834bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1416248.js
@@ -0,0 +1,11 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class foo {
+ get [Symbol.toStringTag]() {
+ return "foo";
+ }
+}
+let o = new foo();
+assertEquals('[object foo]', o.toString());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1416395.js b/deps/v8/test/mjsunit/regress/regress-crbug-1416395.js
new file mode 100644
index 0000000000..f8c6430ca2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1416395.js
@@ -0,0 +1,9 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const re = /(?=.)/giv;
+re.lastIndex = 4;
+const str = 'f\uD83D\uDCA9ba\u2603';
+let result = re[Symbol.split](str, 3);
+assertEquals(['f','\uD83D\uDCA9','b'], result);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1417495.js b/deps/v8/test/mjsunit/regress/regress-crbug-1417495.js
new file mode 100644
index 0000000000..3fede87727
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1417495.js
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+let arr = [];
+class Base {
+ x = arr.push();
+}
+
+class Child extends Base {
+ constructor() {
+ arr = () => {
+ try { arr(); } catch { /* max call stack size error */ }
+ super(); // arr.push called after super() -> non callable error
+ };
+ arr();
+ }
+}
+
+assertThrows(() => new Child(), TypeError, /arr.push is not a function/);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1417882.js b/deps/v8/test/mjsunit/regress/regress-crbug-1417882.js
new file mode 100644
index 0000000000..d37edac529
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1417882.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(className) {
+ var obj = {x: 12, y: 13};
+ delete obj.x;
+ obj[Symbol.toStringTag] = className;
+ return obj.toString();
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals('[object A]', foo('A'));
+assertEquals('[object B]', foo('B'));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals('[object C]', foo('C'));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1420860.js b/deps/v8/test/mjsunit/regress/regress-crbug-1420860.js
new file mode 100644
index 0000000000..0e73026444
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1420860.js
@@ -0,0 +1,12 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const v1 = 605.5772560725025.toLocaleString();
+const v2 = [723.31143385306,-2.220446049250313e-16,-2.220446049250313e-16,-730.3736786091615,-1.7184190427817423e+308];
+const v3 = v2.__proto__;
+const v4 = v2.join(v1);
+for (let v5 = 0; v5 < 66; v5++) {
+ v2.valueOf = v2["join"](v2.join(v4));
+ v2.unshift(v3);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1421198.js b/deps/v8/test/mjsunit/regress/regress-crbug-1421198.js
new file mode 100644
index 0000000000..da97e74aeb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1421198.js
@@ -0,0 +1,18 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+function body() {
+ // Signal worker has started.
+ this.postMessage(true);
+ // Keep the worker running forever
+ while (true) {
+ new WeakRef([]);
+ }
+}
+const worker = new Worker(body, { type: "function" });
+
+// Wait for the worker to start.
+const workerIsRunning = worker.getMessage();
+// d8 will terminate the workers after running the main script here.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1421451.js b/deps/v8/test/mjsunit/regress/regress-crbug-1421451.js
new file mode 100644
index 0000000000..eb93e88ac2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1421451.js
@@ -0,0 +1,19 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const ab = new ArrayBuffer(1000, {"maxByteLength": 1000});
+const ta = new Int16Array(ab);
+
+let mapperCallCount = 0;
+function evilMapper() {
+ ++mapperCallCount;
+ ab.resize(0);
+}
+
+function evilCtor() {
+ return ta;
+}
+
+assertThrows(() => { Float64Array.from.call(evilCtor, [0, 1], evilMapper); });
+assertEquals(1, mapperCallCount);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1423650.js b/deps/v8/test/mjsunit/regress/regress-crbug-1423650.js
new file mode 100644
index 0000000000..15e4ae6bd7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1423650.js
@@ -0,0 +1,8 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-runs=2 --no-fail
+
+// Make sure stress-runs can handle termination exceptions.
+d8.terminate();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1424486.js b/deps/v8/test/mjsunit/regress/regress-crbug-1424486.js
new file mode 100644
index 0000000000..ab588ac1a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1424486.js
@@ -0,0 +1,35 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-analyze-environment-liveness --allow-natives-syntax
+
+const __v_1 = {};
+
+function f() {
+ try {
+ var array = Object.defineProperties([0,0], { 1: __v_1 });
+ } catch (e1) {}
+ try {
+ for (var elem in array) {
+ try {
+ __v_1 = null;
+ } catch (e2) {}
+ array = elem;
+ try {
+ if (elem === "0") {
+ try {
+ Object.defineProperties();
+ } catch (e3) {}
+ }
+ } catch (e4) {}
+ }
+ } catch (e5) {}
+ array[0];
+}
+
+%PrepareFunctionForOptimization(f);
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1424699.js b/deps/v8/test/mjsunit/regress/regress-crbug-1424699.js
new file mode 100644
index 0000000000..90251cc8cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1424699.js
@@ -0,0 +1,11 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --expose-async-hooks --no-fail
+
+(async function () {
+ await gc({ execution: 'async' });
+ d8.terminate();
+ const foo = new FinalizationRegistry();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-13410.js b/deps/v8/test/mjsunit/regress/regress-v8-13410.js
new file mode 100644
index 0000000000..82b8952f7c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-13410.js
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Make sure lone surrogates don't match combined surrogates.
+assertFalse(/[\ud800-\udfff]+/u.test('\ud801\udc0f'));
+
+// Surrogate pairs split by an "always succeeding" backref shouldn't match
+// combined surrogates.
+assertFalse(/(\ud801\1\udc0f)/u.test('\ud801\udc0f'));
+assertFalse(/(\ud801\1?\udc0f)/u.test('\ud801\udc0f'));
+assertFalse(/(\ud801\1{0}\udc0f)/u.test('\ud801\udc0f'));
+assertFalse(new RegExp('(\ud801\\1\udc0f)','u').test('\ud801\udc0f'));
+assertFalse(new RegExp('(\ud801\\1?\udc0f)','u').test('\ud801\udc0f'));
+assertFalse(new RegExp('(\ud801\\1{0}\udc0f)','u').test('\ud801\udc0f'));
diff --git a/deps/v8/test/mjsunit/regress/wasm/export-wrapper-canonical-types.js b/deps/v8/test/mjsunit/regress/wasm/export-wrapper-canonical-types.js
new file mode 100644
index 0000000000..ad92e802b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/export-wrapper-canonical-types.js
@@ -0,0 +1,69 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+
+// Force different type indices in both modules.
+let dummy = builder.addStruct([]);
+let struct = builder.addStruct([makeField(kWasmI32, false)]);
+let creatorAnySig = builder.addType(makeSig([], [kWasmAnyRef]));
+let funcSig = builder.addType(makeSig([wasmRefType(creatorAnySig)],
+ [kWasmExternRef]));
+let exportedAny = builder.addFunction("exportedAny", funcSig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprCallRef, creatorAnySig,
+ kGCPrefix, kExprExternExternalize,
+ ])
+
+builder.addFunction("createStruct", creatorAnySig)
+ .addBody([kExprI32Const, 12, kGCPrefix, kExprStructNew, struct])
+ .exportFunc();
+
+builder.addFunction("refFunc", makeSig([], [wasmRefType(funcSig)]))
+ .addBody([kExprRefFunc, exportedAny.index])
+ .exportFunc();
+
+builder.addDeclarativeElementSegment([exportedAny.index]);
+
+let instance = builder.instantiate();
+let wasm = instance.exports;
+
+let wasm2 = (function () {
+ let builder = new WasmModuleBuilder();
+
+ let struct = builder.addStruct([makeField(kWasmI32, false)]);
+ let creatorAnySig = builder.addType(makeSig([], [kWasmAnyRef]));
+ let funcSig = builder.addType(makeSig([wasmRefType(creatorAnySig)],
+ [kWasmExternRef]));
+ builder.addFunction("exportedAny", funcSig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprCallRef, creatorAnySig,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction("createStruct", creatorAnySig)
+ .addBody([kExprI32Const, 12, kGCPrefix, kExprStructNew, struct])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+ let wasm = instance.exports;
+ // In case we have cached the wrapper when creating it for the previous
+ // module, it should still work here, despite the funcSig type being in a
+ // different index.
+ wasm.exportedAny(wasm.createStruct);
+ return wasm;
+})();
+
+// The intervening module compilation might overwrite export wrappers. This is
+// fine as long as wrappers remain identical for canonically identical types.
+wasm.refFunc()(wasm.createStruct);
+// It should also work with the struct exported by the other module.
+wasm.refFunc()(wasm2.createStruct);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js b/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js
index 71f96d3d08..fa81b8c784 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js
@@ -15,7 +15,8 @@ let loadFct = builder.addFunction('load', kSig_i_i).addBody([
kExprI32LoadMem, 0, 0, // i32.load_mem
]).exportFunc();
const instance = builder.instantiate();
-for (let i = 0; i < 20; i++) instance.exports.load(1);
-%WasmTierUpFunction(instance, loadFct.index);
-assertFalse(%IsLiftoffFunction(instance.exports.load));
-instance.exports.load(1);
+const load = instance.exports.load;
+for (let i = 0; i < 20; i++) load(1);
+%WasmTierUpFunction(load);
+assertFalse(%IsLiftoffFunction(load));
+load(1);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-12874.js b/deps/v8/test/mjsunit/regress/wasm/regress-12874.js
index 58a9146ebb..b670322348 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-12874.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-12874.js
@@ -9,10 +9,12 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
-var sig_index = builder.addType({params: [kWasmStructRef], results: [kWasmI32]});
-
+builder.startRecGroup();
+var sig_index = builder.addType({params: [kWasmStructRef],
+ results: [kWasmI32]});
var sub1 = builder.addStruct([makeField(kWasmI32, true)]);
var sub2 = builder.addStruct([makeField(kWasmI32, false)]);
+builder.endRecGroup();
builder.addFunction('producer', makeSig([], [kWasmStructRef]))
.addBody([
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-12945.js b/deps/v8/test/mjsunit/regress/wasm/regress-12945.js
index a681465db3..3dc72fa13f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-12945.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-12945.js
@@ -8,9 +8,11 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
let i32_field = makeField(kWasmI32, true);
+builder.startRecGroup();
let supertype = builder.addStruct([i32_field]);
let sub1 = builder.addStruct([i32_field, i32_field], supertype);
let sub2 = builder.addStruct([i32_field, makeField(kWasmF64, true)], supertype);
+builder.endRecGroup();
let sig = makeSig([wasmRefNullType(supertype)], [kWasmI32]);
let callee = builder.addFunction("callee", sig).addBody([
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-13230.js b/deps/v8/test/mjsunit/regress/wasm/regress-13230.js
index 3679aaa4cb..6ef5b9a4db 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-13230.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-13230.js
@@ -26,5 +26,5 @@ let instance = builder.instantiate();
let main = instance.exports.main;
for (let i = 0; i < 20; i++) main();
-%WasmTierUpFunction(instance, main_func.index);
+%WasmTierUpFunction(main);
main();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-13290.js b/deps/v8/test/mjsunit/regress/wasm/regress-13290.js
index b895314521..450b9492b0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-13290.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-13290.js
@@ -25,5 +25,5 @@ let instance = builder.instantiate();
let main = instance.exports.main;
for (let i = 0; i < 20; i++) assertEquals(0, main());
-%WasmTierUpFunction(instance, main_func.index);
+%WasmTierUpFunction(main);
assertEquals(0, main());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-13700.js b/deps/v8/test/mjsunit/regress/wasm/regress-13700.js
new file mode 100644
index 0000000000..03f5fa8bca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-13700.js
@@ -0,0 +1,31 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-liftoff
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let bases = [0n, 1234n, 4294967296n, -4294967297n];
+let expects = [0n, 1234n, 0n, -1n];
+
+for (let i = 0; i < bases.length; ++i) {
+ var builder = new WasmModuleBuilder();
+ let g0 = builder.addImportedGlobal("mod", "g0", kWasmI64, true);
+ builder.addExportOfKind('g0', kExternalGlobal, g0);
+
+ builder.addFunction("trunci64", kSig_v_v)
+ .addBody([
+ kExprGlobalGet, g0,
+ kExprI32ConvertI64,
+ kExprI64SConvertI32,
+ kExprGlobalSet, g0,
+ ]).exportAs("trunci64");
+
+ var to_imported = new WebAssembly.Global({value: "i64", mutable: true}, bases[i]);
+ var instance = builder.instantiate({mod: { g0: to_imported }});
+
+ assertEquals(bases[i], instance.exports.g0.value);
+ instance.exports.trunci64();
+ assertEquals(expects[i], instance.exports.g0.value);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-13715.js b/deps/v8/test/mjsunit/regress/wasm/regress-13715.js
new file mode 100644
index 0000000000..90d3c1631a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-13715.js
@@ -0,0 +1,32 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+
+let f0 = builder.addFunction('f0', kSig_i_v).addBody([
+ kExprI32Const, 0
+]);
+
+builder.addFunction('main', kSig_i_v)
+ .addLocals(kWasmF64, 1)
+ .addBody([
+ kExprBlock, kWasmI32,
+ kExprBlock, kWasmI32,
+ kExprI32Const, 42,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprBrIf, 0,
+ kExprBrIf, 0,
+ kExprEnd, // block
+ kExprCallFunction, f0.index,
+ kExprI32Eqz,
+ kExprBrIf, 0,
+ kExprEnd, // block
+ ])
+ .exportFunc();
+
+var instance = builder.instantiate();
+assertEquals(42, (instance.exports.main()));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-13732.js b/deps/v8/test/mjsunit/regress/wasm/regress-13732.js
new file mode 100644
index 0000000000..c970762604
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-13732.js
@@ -0,0 +1,13 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder1 = new WasmModuleBuilder();
+builder1.addGlobal(kWasmS128, false, wasmS128Const(0, 0)).exportAs("mv128");
+let instance1 = builder1.instantiate();
+
+let builder2 = new WasmModuleBuilder();
+builder2.addImportedGlobal("imports", "mv128", kWasmS128, false);
+let instance2 = builder2.instantiate({ imports: instance1.exports });
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-13826.js b/deps/v8/test/mjsunit/regress/wasm/regress-13826.js
new file mode 100644
index 0000000000..9153a19f71
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-13826.js
@@ -0,0 +1,51 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --allow-natives-syntax
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let builder = new WasmModuleBuilder();
+
+let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+builder.addFunction('test', kSig_i_v)
+ .exportFunc()
+ .addLocals(kWasmAnyRef, 1)
+ .addBody([
+ kExprI32Const, 0,
+ kExprIf, kWasmRef, struct,
+ kGCPrefix, kExprStructNewDefault, struct,
+ kExprElse,
+ kGCPrefix, kExprStructNewDefault, struct,
+ kExprEnd,
+ kExprLocalTee, 0,
+ kGCPrefix, kExprRefTestNull, kI31RefCode,
+ ]);
+
+builder.addFunction('cast', kSig_r_v)
+ .exportFunc()
+ .addLocals(kWasmAnyRef, 1)
+ .addBody([
+ kExprI32Const, 0,
+ kExprIf, kWasmRef, struct,
+ kGCPrefix, kExprStructNewDefault, struct,
+ kExprElse,
+ kGCPrefix, kExprStructNewDefault, struct,
+ kExprEnd,
+ kExprLocalTee, 0,
+ kGCPrefix, kExprRefCastNull, kStructRefCode,
+ kGCPrefix, kExprExternExternalize,
+ ]);
+
+let instance = builder.instantiate();
+let test = instance.exports.test;
+let cast = instance.exports.cast;
+assertEquals(0, test());
+%WasmTierUpFunction(test);
+assertEquals(0, test());
+
+assertNotNull(cast());
+%WasmTierUpFunction(cast);
+assertNotNull(cast());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1388938.js b/deps/v8/test/mjsunit/regress/wasm/regress-1388938.js
new file mode 100644
index 0000000000..2d96d51ef5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1388938.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-typed-funcref
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let builder = new WasmModuleBuilder();
+
+let table = builder.addTable(kWasmFuncRef, 10);
+
+builder.addActiveElementSegment(table.index, [kExprI32Const, 0], [],
+ wasmRefNullType(0));
+
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1388942.js b/deps/v8/test/mjsunit/regress/wasm/regress-1388942.js
new file mode 100644
index 0000000000..c86942a4ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1388942.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let builder = new WasmModuleBuilder();
+
+builder.addType(kSig_v_v);
+builder.addType(kSig_v_i);
+builder.addType(kSig_i_v);
+
+builder.addGlobal(wasmRefNullType(3), true, [kExprRefNull, 3]);
+
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1395604.js b/deps/v8/test/mjsunit/regress/wasm/regress-1395604.js
new file mode 100644
index 0000000000..a68450bf91
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1395604.js
@@ -0,0 +1,34 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-tier-mask-for-testing=2
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addMemory();
+var sig_one = builder.addType(makeSig(new Array(9).fill(kWasmI32), []));
+var zero = builder.addFunction('zero', kSig_v_i);
+var one = builder.addFunction('one', sig_one);
+var two = builder.addFunction('two', kSig_v_i);
+// Function 0 ("zero"), compiled with Liftoff.
+zero.addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0, kExprDrop]);
+// Function 1 ("one"), compiled with TurboFan.
+one.addBody([kExprLocalGet, 7, kExprCallFunction, zero.index]);
+// Function 2 ("two"), compiled with Liftoff.
+two.addBody([
+ kExprI32Const, 101, // arg #0
+ kExprI32Const, 102, // arg #1
+ kExprI32Const, 103, // arg #2
+ kExprI32Const, 104, // arg #3
+ kExprI32Const, 105, // arg #4
+ kExprI32Const, 106, // arg #5
+ kExprI32Const, 107, // arg #6
+ kExprI32Const, 108, // arg #7
+ kExprI32Const, 109, // arg #8
+ kExprCallFunction, one.index
+ ])
+ .exportFunc();
+let instance = builder.instantiate();
+assertTraps(kTrapMemOutOfBounds, instance.exports.two);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1403398.js b/deps/v8/test/mjsunit/regress/wasm/regress-1403398.js
new file mode 100644
index 0000000000..217ae42d06
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1403398.js
@@ -0,0 +1,119 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --allow-natives-syntax
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let simdSupported = (() => {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(10, 10);
+ builder.addFunction(null, makeSig([], [kWasmS128]))
+ .addBody([
+ kExprI32Const, 0,
+ kSimdPrefix, kExprS128LoadMem, 1, 0,
+ ]);
+ try {
+ builder.instantiate();
+ return true;
+ } catch(e) {
+ assertContains('SIMD unsupported', '' + e)
+ return false;
+ }
+})();
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(0, 0); // max size = 0
+const callee = builder.addFunction('callee', makeSig([], []))
+.addBody([]);
+
+const testCases = {
+ 'StoreMem': [
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprI32StoreMem, 1, 0x0f,
+ ],
+ 'LoadMem': [
+ kExprI32Const, 0,
+ kExprI32LoadMem, 1, 0x0f,
+ kExprDrop,
+ ],
+ 'atomicStore': [
+ kExprI32Const, 0,
+ kExprI64Const, 0,
+ kAtomicPrefix, kExprI64AtomicStore16U, 1, 0x0f,
+ ],
+ 'atomicLoad': [
+ kExprI32Const, 0,
+ kAtomicPrefix, kExprI64AtomicLoad16U, 1, 0x0f,
+ kExprDrop,
+ ],
+};
+if (simdSupported) {
+ Object.assign(testCases, {
+ 'SimdStoreMem': [
+ kExprI32Const, 0,
+ ...WasmModuleBuilder.defaultFor(kWasmS128),
+ kSimdPrefix, kExprS128StoreMem, 1, 0,
+ ],
+ 'SimdLoadMem': [
+ kExprI32Const, 0,
+ kSimdPrefix, kExprS128LoadMem, 1, 0,
+ kExprDrop,
+ ],
+ 'SimdStoreLane': [
+ kExprI32Const, 0,
+ ...WasmModuleBuilder.defaultFor(kWasmS128),
+ kSimdPrefix, kExprS128Store32Lane, 1, 0, 0, 0, 0,
+ ],
+ 'SimdLoadLane': [
+ kExprI32Const, 0,
+ ...WasmModuleBuilder.defaultFor(kWasmS128),
+ kSimdPrefix, kExprS128Load32Lane, 1, 0, 0, 0, 0,
+ ],
+ 'SimdLoadTransform': [
+ kExprI32Const, 0x00,
+ kExprI32Const, 0x00,
+ kSimdPrefix, kExprS128Load32Splat, 1, 0, 0, 0, 0,
+ ],
+ });
+}
+
+for (const [name, code] of Object.entries(testCases)) {
+ builder.addFunction(name, makeSig([], []))
+ .exportFunc()
+ .addBody([
+ // Some call that allocates a feedback vector.
+ // This is required to hit the invariant on the call below.
+ kExprCallFunction, callee.index,
+ // Static out of bounds memory access.
+ ...code,
+ // Call function.
+ // With speculative inlining this requires a slot in the feedback vector.
+ // As it is unreachable based on trapping out of bounds store, the
+ // allocation of this slot can be skipped. However, this should be treated
+ // consistently between liftoff and turbofan.
+ kExprCallFunction, callee.index,
+ ]);
+}
+
+const instance = builder.instantiate();
+
+function run(fct) {
+ try {
+ fct();
+ assertUnreachable();
+ } catch (e) {
+ assertContains('memory access out of bounds', '' + e)
+ }
+}
+
+for (const [name, code] of Object.entries(testCases)) {
+ print(`Test ${name}`);
+ // Create feedback vectors in liftoff compilation.
+ for (let i = 0; i < 5; ++i) run(instance.exports[name]);
+ // Force turbofan compilation.
+ %WasmTierUpFunction(instance.exports[name]);
+ run(instance.exports[name]);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1408337.js b/deps/v8/test/mjsunit/regress/wasm/regress-1408337.js
new file mode 100644
index 0000000000..26c950fc5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1408337.js
@@ -0,0 +1,24 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addMemory(16, 32, false);
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x15, // i32.const
+kSimdPrefix, kExprS128Load8x8U, 0x00, 0xff, 0xff, 0xff, 0x00, // v128.load8x8_u
+kExprUnreachable, // unreachable
+kExprEnd, // end @11
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertThrows(() => instance.exports.main(1, 2, 3), WebAssembly.RuntimeError, /memory access out of bounds/);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1412940.js b/deps/v8/test/mjsunit/regress/wasm/regress-1412940.js
new file mode 100644
index 0000000000..b9826f9389
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1412940.js
@@ -0,0 +1,40 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --no-wasm-lazy-compilation
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+/* sig 0 */ builder.addStruct([makeField(kWasmI16, true)]);
+/* sig 1 */ builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], []));
+/* sig 2 */ builder.addType(makeSig(
+ [], [kWasmI64, kWasmFuncRef, kWasmExternRef, wasmRefType(kWasmAnyRef)]));
+builder.addFunction(undefined, 1 /* sig */)
+ .addLocals(kWasmI64, 1)
+ .addBodyWithEnd([
+kExprBlock, 2, // block
+ kExprI64Const, 0xd7, 0x01, // i64.const
+ kExprI64Const, 0x00, // i64.const
+ kExprRefNull, 0x70, // ref.null
+ kExprRefNull, 0x6f, // ref.null
+ kExprI32Const, 0x00, // i32.const
+ kGCPrefix, kExprStructNew, 0x00, // struct.new
+ kExprRefNull, 0x6e, // ref.null
+ kExprBrOnNull, 0x00, // br_on_null
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprDrop, // drop
+ kExprI64Xor, // i64.xor
+ kExprRefNull, 0x70, // ref.null
+ kExprRefNull, 0x6f, // ref.null
+ kExprI32Const, 0x00, // i32.const
+ kGCPrefix, kExprStructNew, 0x00, // struct.new
+ kExprEnd, // end
+kExprUnreachable, // unreachable
+kExprEnd, // end
+]);
+builder.addExport('main', 0);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1416758.js b/deps/v8/test/mjsunit/regress/wasm/regress-1416758.js
new file mode 100644
index 0000000000..0f5fe576e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1416758.js
@@ -0,0 +1,25 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-wasm-lazy-compilation
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_v_v)
+ .addLocals(kWasmI32, 75)
+ .addBody([
+kExprTry, 0x40, // try
+ kExprLocalGet, 0x3d, // local.get
+ kExprI32Const, 0x2e, // i32.const
+ kExprI32GeS, // i32.ge_s
+ kExprIf, 0x40, // if
+ kExprCallFunction, 0x00, // call function #0: v_v
+ kExprUnreachable, // unreachable
+ kExprEnd, // end
+ kExprUnreachable, // unreachable
+ kExprEnd, // end
+kExprUnreachable, // unreachable
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1417516.js b/deps/v8/test/mjsunit/regress/wasm/regress-1417516.js
new file mode 100644
index 0000000000..5a30107b33
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1417516.js
@@ -0,0 +1,37 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --no-wasm-lazy-compilation
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const call_sig = builder.addType(kSig_v_v);
+builder.addMemory(16, 32, false);
+builder.addTable(kWasmFuncRef, 3, 4, undefined)
+builder.addFunction(undefined, kSig_i_iii)
+ .addBodyWithEnd([
+kExprTry, 0x7f, // try @11 i32
+ kExprI32Const, 0x01, // i32.const
+ kExprCallIndirect, call_sig, 0x00, // call_indirect sig #2: v_v
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kAtomicPrefix, kExprI32AtomicExchange, 0x00, 0x80, 0x80, 0xe8, 0x05, // i32.atomic.rmw.xchg
+kExprCatchAll, // catch_all @37
+ kExprI32Const, 0x01, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprTry, 0x7f, // try @62 i32
+ kExprI32Const, 0x01, // i32.const
+ kExprCallIndirect, call_sig, 0x00, // call_indirect sig #2: v_v
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kAtomicPrefix, kExprI32AtomicOr, 0x00, 0x00, // i32.atomic.rmw.or
+ kExprCatchAll, // catch_all @77
+ kExprI32Const, 0x00, // i32.const
+ kExprEnd, // end @80
+ kExprUnreachable,
+ kExprEnd, // end @121
+kExprEnd, // end @128
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1417908.js b/deps/v8/test/mjsunit/regress/wasm/regress-1417908.js
new file mode 100644
index 0000000000..daa84d2cbe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1417908.js
@@ -0,0 +1,32 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction("testFailNull", makeSig([], [kWasmAnyRef]))
+.exportFunc()
+.addBody([
+ kExprRefNull, kAnyRefCode,
+ kGCPrefix, kExprBrOnStruct, 0,
+ kGCPrefix, kExprBrOnCastFailNull, 0, kNullRefCode,
+ kGCPrefix, kExprBrOnStruct, 0,
+ kExprUnreachable,
+]);
+
+builder.addFunction("testNull", makeSig([], [kWasmAnyRef]))
+.exportFunc()
+.addBody([
+ kExprRefNull, kAnyRefCode,
+ kGCPrefix, kExprBrOnStruct, 0,
+ kGCPrefix, kExprBrOnCastNull, 0, kNullRefCode,
+ kGCPrefix, kExprBrOnStruct, 0,
+ kExprUnreachable,
+]);
+
+let wasm = builder.instantiate().exports;
+assertTraps(kTrapUnreachable, () => wasm.testFailNull());
+assertSame(null, wasm.testNull());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1418706.js b/deps/v8/test/mjsunit/regress/wasm/regress-1418706.js
new file mode 100644
index 0000000000..5e7ebcc112
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1418706.js
@@ -0,0 +1,17 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --no-wasm-lazy-compilation
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_v_v)
+ .addBodyWithEnd([
+ // Invalid (overly long) GC opcode.
+ kGCPrefix, 0xff, 0xff, 0x7f
+ ]);
+assertThrows(
+ () => builder.toModule(), WebAssembly.CompileError,
+ /Invalid prefixed opcode/);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1407594.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1407594.js
new file mode 100644
index 0000000000..943ba85be4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1407594.js
@@ -0,0 +1,64 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, false);
+builder.addDataSegment(0, [0x78, 0x56, 0x34, 0x12]);
+
+let spiller = builder.addFunction('spiller', kSig_v_v).addBody([]);
+
+builder.addFunction('main', kSig_l_v)
+ .exportFunc()
+ .addLocals(kWasmI64, 1)
+ .addBody([
+ // Initialize {var0} to 0x12345678 via a zero-extended 32-bit load.
+ kExprI32Const, 0,
+ kExprI64LoadMem32U, 2, 0,
+ kExprLocalSet, 0,
+ kExprCallFunction, spiller.index,
+ // The observable effect of this loop is that {var0} is left-shifted
+ // until it ends in 0x..000000. The details are specifically crafted
+ // to recreate a particular pattern of spill slot moves.
+ kExprLoop, kWasmVoid,
+ kExprI32Const, 0,
+ kExprI32LoadMem, 2, 0,
+ kExprI32Eqz,
+ // This block is never taken; it only serves to influence register
+ // allocator choices.
+ kExprIf, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprI64Const, 1,
+ kExprI64And,
+ kExprLocalSet, 0,
+ kExprEnd, // if
+ kExprLocalGet, 0,
+ kExprI64Const, 1,
+ kExprI64And,
+ kExprI64Eqz,
+ // This block is always taken; it is conditional in order to influence
+ // register allocator choices.
+ kExprIf, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprI64Const, 8,
+ kExprI64Shl,
+ kExprLocalSet, 0,
+ kExprEnd, // if
+ kExprBlock, kWasmVoid,
+ kExprLocalGet, 0,
+ ...wasmI64Const(0xFFFFFF),
+ kExprI64And,
+ kExprI64Eqz,
+ kExprI32Eqz,
+ kExprCallFunction, spiller.index,
+ kExprBrIf, 1,
+ kExprEnd, // block
+ kExprCallFunction, spiller.index,
+ kExprEnd, // loop
+ kExprLocalGet, 0,
+ ]);
+
+let instance = builder.instantiate();
+assertEquals("12345678000000", instance.exports.main().toString(16));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-struct-set-into-unreachable.js b/deps/v8/test/mjsunit/regress/wasm/regress-struct-set-into-unreachable.js
new file mode 100644
index 0000000000..f63706ffef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-struct-set-into-unreachable.js
@@ -0,0 +1,46 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --no-wasm-lazy-compilation --no-liftoff
+
+// Tests the following scenario:
+// - Wasm load elimination puts an immutable struct.get in its state.
+// - In a dead control path, we manage to store to the same field after casting
+// it to a type where this field is mutable.
+// - In this case, load elimination replaces the struct.set with an unreachable
+// value.
+// - The control flow graph should be valid after this replacement.
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+
+let super_struct = builder.addStruct([]);
+let sub_struct_1 = builder.addStruct([makeField(kWasmI32, false)],
+ super_struct);
+let sub_struct_2 = builder.addStruct([makeField(kWasmI32, true)],
+ super_struct);
+
+builder.addFunction("tester", makeSig([wasmRefNullType(sub_struct_1)], []))
+ .addLocals(kWasmI32, 1)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprRefIsNull,
+ kExprIf, kWasmVoid,
+
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructGet, sub_struct_1, 0,
+ kExprLocalSet, 1,
+
+ kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, sub_struct_2,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructSet, sub_struct_2, 0,
+
+ kExprEnd])
+
+ .exportFunc();
+
+let instance = builder.instantiate();
+
+assertTraps(kTrapNullDereference, () => instance.exports.tester(null));
diff --git a/deps/v8/test/mjsunit/set-prototype-of-Object_prototype.js b/deps/v8/test/mjsunit/set-prototype-of-Object_prototype.js
new file mode 100644
index 0000000000..1478722b50
--- /dev/null
+++ b/deps/v8/test/mjsunit/set-prototype-of-Object_prototype.js
@@ -0,0 +1,17 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function TestImmutableProtoype() {
+ assertThrows(function() {
+ Object.setPrototypeOf(Object.prototype, {});
+ },
+ TypeError,
+ 'Immutable prototype object \'Object.prototype\' cannot have their prototype set');
+}
+
+TestImmutableProtoype();
+
+Object.prototype.foo = {};
+
+TestImmutableProtoype();
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
index 33ccc969f9..0cac54acdf 100644
--- a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -25,13 +25,13 @@ assertFalse(isNeverOptimize());
if (i == 1) {
// f must be interpreted code.
- assertTrue(isUnoptimized(f));
+ assertUnoptimized(f);
// Run twice (i = 0, 1), then tier-up.
%OptimizeFunctionOnNextCall(f);
} else if (i == 2) {
// Tier-up at i = 2 should go up to turbofan.
- assertTrue(isTurboFanned(f));
+ assertOptimized(f);
}
}
})()
diff --git a/deps/v8/test/mjsunit/shared-memory/cannot-redefine-properties.js b/deps/v8/test/mjsunit/shared-memory/cannot-redefine-properties.js
new file mode 100644
index 0000000000..5e9ab359c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/cannot-redefine-properties.js
@@ -0,0 +1,40 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct
+
+// Shared objects have fixed layout and cannot have their properties
+// redefined. They are constructed sealed. In ordinary objects, sealed objects
+// can be redefined to be frozen. This is disallowed for shared objects.
+
+function TestSharedObjectCannotRedefineProperty(sharedObj) {
+ function TestRedefine(sharedObj, propName) {
+ if (Object.hasOwn(sharedObj, propName)) {
+ assertThrows(() => Object.defineProperty(sharedObj, propName, {
+ value: 99,
+ enumerable: true,
+ writable: false,
+ configurable: false
+ }));
+ Object.defineProperty(
+ sharedObj, propName,
+ {value: 99, enumerable: true, writable: true, configurable: false});
+ assertEquals(99, sharedObj[propName]);
+ }
+ }
+
+ TestRedefine(sharedObj, 'p');
+ TestRedefine(sharedObj, '0');
+ // Objects without any properties are a degenerate case and are considered
+ // frozen (or any integrity level, really).
+ if (Object.getOwnPropertyNames(sharedObj).length > 0) {
+ assertThrows(() => Object.freeze(sharedObj));
+ }
+ assertTrue(Object.isSealed(sharedObj));
+}
+
+TestSharedObjectCannotRedefineProperty(new Atomics.Condition());
+TestSharedObjectCannotRedefineProperty(new Atomics.Mutex());
+TestSharedObjectCannotRedefineProperty(new (new SharedStructType(['p'])));
+TestSharedObjectCannotRedefineProperty(new SharedArray(1));
diff --git a/deps/v8/test/mjsunit/shared-memory/condition.js b/deps/v8/test/mjsunit/shared-memory/condition.js
index 8fa0e6e3b5..b548cadef0 100644
--- a/deps/v8/test/mjsunit/shared-memory/condition.js
+++ b/deps/v8/test/mjsunit/shared-memory/condition.js
@@ -34,3 +34,10 @@ let cv = new Atomics.Condition;
assertEquals(false, Atomics.Condition.wait(cv, mutex, 100));
});
})();
+
+// Mutexes can be assigned to shared objects.
+(function TestConditionCanBeAssignedToSharedObjects() {
+ const Box = new SharedStructType(["payload"]);
+ const box = new Box;
+ box.payload = cv;
+})();
diff --git a/deps/v8/test/mjsunit/shared-memory/mutex-lock-twice.js b/deps/v8/test/mjsunit/shared-memory/mutex-lock-twice.js
new file mode 100644
index 0000000000..ad43129192
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/mutex-lock-twice.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-gc --harmony-struct
+
+let mtx = new Atomics.Mutex();
+let cnd = new Atomics.Condition();
+
+Atomics.Mutex.lock(mtx, () => {
+ Atomics.Condition.wait(cnd, mtx, 100);
+});
+
+gc();
+gc();
+
+Atomics.Mutex.lock(mtx, () => {
+ Atomics.Condition.wait(cnd, mtx, 100);
+});
diff --git a/deps/v8/test/mjsunit/shared-memory/mutex.js b/deps/v8/test/mjsunit/shared-memory/mutex.js
index f189535b36..768fc39fbb 100644
--- a/deps/v8/test/mjsunit/shared-memory/mutex.js
+++ b/deps/v8/test/mjsunit/shared-memory/mutex.js
@@ -44,3 +44,10 @@ assertThrowsEquals(() => {
}, 42);
Atomics.Mutex.tryLock(mutex, () => { locked_count++; });
assertEquals(locked_count, 6);
+
+// Mutexes can be assigned to shared objects.
+(function TestMutexCanBeAssignedToSharedObjects() {
+ const Box = new SharedStructType(["payload"]);
+ const box = new Box;
+ box.payload = mutex;
+})();
diff --git a/deps/v8/test/mjsunit/shared-memory/non-instance-prototype.js b/deps/v8/test/mjsunit/shared-memory/non-instance-prototype.js
new file mode 100644
index 0000000000..e4cef06120
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/non-instance-prototype.js
@@ -0,0 +1,42 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct --allow-natives-syntax
+
+function Foo() {}
+
+function TestInstanceOfMutex() {
+ Foo instanceof Atomics.Mutex;
+}
+function TestInstanceOfCondition() {
+ Foo instanceof Atomics.Condition;
+}
+function TestInstanceOfSharedArray() {
+ Foo instanceof SharedArray;
+}
+function TestInstanceOfSharedStruct() {
+ Foo instanceof (new SharedStructType(["foo"]));
+}
+
+%PrepareFunctionForOptimization(TestInstanceOfMutex);
+%PrepareFunctionForOptimization(TestInstanceOfCondition);
+%PrepareFunctionForOptimization(TestInstanceOfSharedArray);
+%PrepareFunctionForOptimization(TestInstanceOfSharedStruct);
+
+for (let i = 0; i < 10; i++) {
+ assertThrows(TestInstanceOfMutex, TypeError);
+ assertThrows(TestInstanceOfCondition, TypeError);
+ assertThrows(TestInstanceOfSharedArray, TypeError);
+ assertThrows(TestInstanceOfSharedStruct, TypeError);
+}
+
+%OptimizeFunctionOnNextCall(TestInstanceOfMutex);
+%OptimizeFunctionOnNextCall(TestInstanceOfCondition);
+%OptimizeFunctionOnNextCall(TestInstanceOfSharedArray);
+%OptimizeFunctionOnNextCall(TestInstanceOfSharedStruct);
+
+assertThrows(TestInstanceOfMutex, TypeError);
+assertThrows(TestInstanceOfCondition, TypeError);
+assertThrows(TestInstanceOfSharedArray, TypeError);
+assertThrows(TestInstanceOfSharedStruct, TypeError);
diff --git a/deps/v8/test/mjsunit/shared-memory/private-field.js b/deps/v8/test/mjsunit/shared-memory/private-field.js
new file mode 100644
index 0000000000..dd3a3c1abd
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/private-field.js
@@ -0,0 +1,25 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct
+
+// Private fields should not be able to extend shared objects. This is unlike
+// ordinary objects, which can get private fields even if sealed/frozen.
+
+function TestSharedObjectCannotAddPrivateField(sharedObj) {
+ function Base() {
+ return sharedObj;
+ }
+ assertThrows(() => {
+ class C extends Base {
+ #foo = 42
+ }
+ return new C();
+ });
+}
+
+TestSharedObjectCannotAddPrivateField(new Atomics.Condition());
+TestSharedObjectCannotAddPrivateField(new Atomics.Mutex());
+TestSharedObjectCannotAddPrivateField(new (new SharedStructType(['p'])));
+TestSharedObjectCannotAddPrivateField(new SharedArray(1));
diff --git a/deps/v8/test/mjsunit/shared-memory/private-name.js b/deps/v8/test/mjsunit/shared-memory/private-name.js
new file mode 100644
index 0000000000..a2e2f0caae
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/private-name.js
@@ -0,0 +1,13 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct
+
+// Private names as used by V8-specific APIs like Error.captureStackTrace should
+// not bypass shared objects' extensibility check.
+
+assertThrows(() => Error.captureStackTrace(new Atomics.Condition()));
+assertThrows(() => Error.captureStackTrace(new Atomics.Mutex()));
+assertThrows(() => Error.captureStackTrace(new (new SharedStructType(['p']))));
+assertThrows(() => Error.captureStackTrace(new SharedArray(1)));
diff --git a/deps/v8/test/mjsunit/shared-memory/regress-crbug-1425710.js b/deps/v8/test/mjsunit/shared-memory/regress-crbug-1425710.js
new file mode 100644
index 0000000000..fd2b2264b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/regress-crbug-1425710.js
@@ -0,0 +1,19 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct --expose-gc --expose-externalize-string
+
+const str = String.fromCharCode(849206214);
+gc();
+const Bar = this.SharedStructType("a");
+const bar = Bar();
+bar.a = str;
+externalizeString(str);
+bar[str] = 'foo';
+const str2 = String.fromCharCode(849206214);
+gc();
+const bar2 = Bar();
+bar2.a = str2;
+externalizeString(str2);
+bar2[str2] = 'foo';
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-array-surface.js b/deps/v8/test/mjsunit/shared-memory/shared-array-surface.js
index 9e02e7bc9b..01f938f4f4 100644
--- a/deps/v8/test/mjsunit/shared-memory/shared-array-surface.js
+++ b/deps/v8/test/mjsunit/shared-memory/shared-array-surface.js
@@ -9,6 +9,12 @@
(function TestNoPrototype() {
// For now the experimental shared arrays don't have a prototype.
assertNull(Object.getPrototypeOf(new SharedArray(10)));
+
+ assertNull(SharedArray.prototype);
+
+ assertThrows(() => {
+ SharedArray.prototype = {};
+ });
})();
(function TestPrimitives() {
@@ -82,7 +88,7 @@
let shared_array = new SharedArray(2);
shared_array[0] = 42;
- assertArrayEquals(shared_array.length, 10);
+ assertEquals(2, shared_array.length);
let propDescs = Object.getOwnPropertyDescriptors(shared_array);
let desc = propDescs[0];
@@ -107,3 +113,9 @@
i++;
}
})();
+
+(function TestProxyLengthGetter() {
+ let a = new SharedArray(2);
+ let proxy = new Proxy(a, {});
+ assertEquals(2, proxy.length);
+})();
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-array-workers.js b/deps/v8/test/mjsunit/shared-memory/shared-array-workers.js
index 231b1f4a3f..fa85967c5e 100644
--- a/deps/v8/test/mjsunit/shared-memory/shared-array-workers.js
+++ b/deps/v8/test/mjsunit/shared-memory/shared-array-workers.js
@@ -38,4 +38,19 @@ if (this.Worker) {
worker.terminate();
})();
+
+ (function TestObjectAssign() {
+ function f() {
+ const shared_array = new SharedArray(1);
+ const array = new Array(1);
+ array[0] = 1;
+ Object.assign(shared_array, array);
+ postMessage(shared_array[0]);
+ }
+
+ const worker = new Worker(f, {'type': 'function'});
+ assertEquals(1, worker.getMessage());
+
+ worker.terminate();
+ })();
}
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js b/deps/v8/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js
index 19d27a7cf8..a705d9f193 100644
--- a/deps/v8/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js
+++ b/deps/v8/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js
@@ -19,7 +19,9 @@ function get(o, ext_key) {
(function test() {
let ext_key = "AAAAAAAAAAAAAAAAAAAAAA";
- externalizeString(ext_key);
+ try {
+ externalizeString(ext_key);
+ } catch {}
set({a:1}, ext_key);
set({b:2}, ext_key);
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share-large.js b/deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share-large.js
new file mode 100644
index 0000000000..c32cf3e975
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share-large.js
@@ -0,0 +1,53 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct --shared-string-table --allow-natives-syntax
+// Flags: --expose-gc
+
+if (this.Worker) {
+
+(function TestSharedStringPostMessage() {
+ function workerCode() {
+ let Box = new SharedStructType(['payload']);
+ let b1 = new Box();
+ b1.payload = "started";
+ postMessage(b1);
+ onmessage = function(box) {
+ if (!%IsSharedString(box.payload)) {
+ throw new Error("str isn't shared");
+ }
+ let b2 = new Box();
+ b2.payload = box.payload;
+ postMessage(b2);
+ };
+ }
+
+ // Strings referenced in shared structs are serialized by sharing.
+
+ let worker = new Worker(workerCode, { type: 'function' });
+ let started = worker.getMessage();
+ assertTrue(%IsSharedString(started.payload));
+ assertEquals("started", started.payload);
+
+ let Box = new SharedStructType(['payload']);
+ let box_to_send = new Box();
+ // Create a string in large object space.
+ let payload = %FlattenString('a'.repeat(1024 * 256));
+ assertTrue(%InLargeObjectSpace(payload));
+ // Trigger a gc to move the object to old space.
+ gc();
+ assertFalse(%InYoungGeneration(payload));
+ box_to_send.payload = payload;
+ assertTrue(%IsSharedString(box_to_send.payload));
+ worker.postMessage(box_to_send);
+ let box_received = worker.getMessage();
+ assertTrue(%IsSharedString(box_received.payload));
+ assertFalse(box_to_send === box_received);
+ // Object.is and === won't check pointer equality of Strings.
+ assertTrue(%IsSameHeapObject(box_to_send.payload, box_received.payload));
+
+ worker.terminate();
+})();
+
+}
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share.js b/deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share.js
new file mode 100644
index 0000000000..416feb170a
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-string-copy-on-share.js
@@ -0,0 +1,62 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct --shared-string-table --allow-natives-syntax
+// Flags: --expose-gc
+
+if (this.Worker) {
+
+(function TestSharedStringPostMessage() {
+ function workerCode() {
+ let Box = new SharedStructType(['payload']);
+ let b1 = new Box();
+ b1.payload = "started";
+ postMessage(b1);
+ onmessage = function(box) {
+ if (!%IsSharedString(box.payload)) {
+ throw new Error("str isn't shared");
+ }
+ let b2 = new Box();
+ b2.payload = box.payload;
+ postMessage(b2);
+ };
+ }
+
+ // Strings referenced in shared structs are serialized by sharing.
+
+ let worker = new Worker(workerCode, { type: 'function' });
+ let started = worker.getMessage();
+ assertTrue(%IsSharedString(started.payload));
+ assertEquals("started", started.payload);
+
+ let Box = new SharedStructType(['payload']);
+ let box_to_send = new Box();
+ // Create a string that is not internalized by the parser. Create dummy
+ // strings before and after the string to make sure we have enough live
+ // objects on the page to get it promoted to old space (for minor MC).
+ let trash = [];
+ for (let i = 0; i < 1024 * 32; i++) {
+ trash.push('a'.repeat(8));
+ }
+ let payload = %FlattenString('a'.repeat(1024 * 60));
+ for (let i = 0; i < 1024 * 32; i++) {
+ trash.push('a'.repeat(8));
+ }
+ // Trigger a gc to move the object to old space.
+ gc({type: 'minor'});
+ assertFalse(%InLargeObjectSpace(payload));
+ assertFalse(%InYoungGeneration(payload));
+ box_to_send.payload = payload;
+ assertTrue(%IsSharedString(box_to_send.payload));
+ worker.postMessage(box_to_send);
+ let box_received = worker.getMessage();
+ assertTrue(%IsSharedString(box_received.payload));
+ assertFalse(box_to_send === box_received);
+ // Object.is and === won't check pointer equality of Strings.
+ assertTrue(%IsSameHeapObject(box_to_send.payload, box_received.payload));
+
+ worker.terminate();
+})();
+
+}
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-string-in-global-object-optimized.js b/deps/v8/test/mjsunit/shared-memory/shared-string-in-global-object-optimized.js
new file mode 100644
index 0000000000..b708bc6fbe
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-string-in-global-object-optimized.js
@@ -0,0 +1,22 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --shared-string-table --harmony-struct
+
+
+var ST = new SharedStructType(['foo']);
+var t22 = new ST();
+
+function f() {
+ t22.foo = 'a'.repeat(9);
+ globalThis[Symbol.unscopables] = t22.foo;
+ return globalThis[Symbol.unscopables];
+}
+
+%PrepareFunctionForOptimization(f);
+for (let i = 0; i < 10; i++) {
+ f();
+}
+%OptimizeFunctionOnNextCall(f);
+assertEquals('a'.repeat(9), f());
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js
index 6dd4325d5b..e66e007a1f 100644
--- a/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-surface.js
@@ -13,6 +13,9 @@ let S = new SharedStructType(['field']);
// proposal explainer which says accessing the prototype throws.
assertNull(S.prototype);
assertNull(Object.getPrototypeOf(new S()));
+ assertThrows(() => {
+ S.prototype = {};
+ });
})();
(function TestPrimitives() {
@@ -94,3 +97,59 @@ let S = new SharedStructType(['field']);
(function TestDuplicateFieldNames() {
assertThrows(() => new SharedStructType(['same', 'same']));
})();
+
+(function TestNoFields() {
+ const EmptyStruct = new SharedStructType([]);
+ let s = new EmptyStruct();
+})();
+
+(function TestSymbolsDisallowed() {
+ // This may be relaxed in the future.
+ assertThrows(() => new SharedStructType([Symbol()]));
+})();
+
+(function TestUsedAsPrototype() {
+ const OnPrototypeStruct = new SharedStructType(['prop']);
+ let ps = new OnPrototypeStruct();
+ ps.prop = "on proto";
+
+ function assertProtoIsStruct(obj, proto) {
+ // __proto__ is on Object.prototype, and obj here no longer has
+ // Object.prototype as its [[Prototype]].
+ assertSame(undefined, obj.__proto__);
+ assertSame(proto, Object.getPrototypeOf(obj));
+ assertEquals("on proto", obj.prop);
+ }
+
+ {
+ let pojo = { __proto__: ps };
+ assertProtoIsStruct(pojo, ps);
+ }
+
+ {
+ let pojo = {};
+ Object.setPrototypeOf(pojo, ps);
+ assertProtoIsStruct(pojo, ps);
+ }
+
+ {
+ let pojo = {};
+ pojo.__proto__ = ps;
+ assertProtoIsStruct(pojo, ps);
+ }
+
+ {
+ const old = globalThis.__proto__;
+ globalThis.__proto__ = ps;
+ globalThis.__proto__ = old;
+ }
+
+ {
+ Object.create(ps);
+ }
+
+ {
+ function Ctor() {}
+ Ctor.prototype = ps;
+ }
+})();
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js b/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js
index ddc78e5bbb..c686f9c7f1 100644
--- a/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js
+++ b/deps/v8/test/mjsunit/shared-memory/shared-struct-workers.js
@@ -36,4 +36,18 @@ if (this.Worker) {
worker.terminate();
})();
+(function TestObjectAssign() {
+ function f() {
+ const Struct = new SharedStructType(['field'])
+ const shared_struct = new Struct();
+ const obj = {'field': 1};
+ Object.assign(shared_struct, obj);
+ postMessage(shared_struct.field);
+ }
+
+ const worker = new Worker(f, {'type': 'function'});
+ assertEquals(1, worker.getMessage());
+
+ worker.terminate();
+})();
}
diff --git a/deps/v8/test/mjsunit/shared-memory/shared-value-barrier-optimization.js b/deps/v8/test/mjsunit/shared-memory/shared-value-barrier-optimization.js
new file mode 100644
index 0000000000..a058fe55a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shared-value-barrier-optimization.js
@@ -0,0 +1,29 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-struct --allow-natives-syntax
+
+const Box = new SharedStructType(['payload']);
+let a, b;
+function f() {
+ a = SharedArray(4000);
+ b = new Box();
+ // Assignment into shared objects have a barrier that ensure the RHS is in
+ // shared space.
+ //
+ // RHS needs to be large enough to be in a HeapNumber. TF then allocates it
+ // out of the non-shared old space during optimization. If TF incorrectly
+ // compiles away the barrier, TF optimized code will create shared->local
+ // edges.
+ a[0] = 2000000000;
+ b.payload = 2000000000;
+}
+%PrepareFunctionForOptimization(f);
+for (let i = 0; i < 10; i++) f();
+// Verify that TF optimized code does not incorrectly compile away the shared
+// value barrier.
+%OptimizeFunctionOnNextCall(f);
+for (let i = 0; i < 10; i++) f();
+// SharedGC will verify there are no shared->local edges.
+%SharedGC();
diff --git a/deps/v8/test/mjsunit/shared-memory/shrink-large-object.js b/deps/v8/test/mjsunit/shared-memory/shrink-large-object.js
new file mode 100644
index 0000000000..c91232a7a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-memory/shrink-large-object.js
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --shared-string-table --expose-gc
+
+let arr = new Array(65535);
+gc();
+arr[arr.length-1] = 'two';
+arr[1] = 'two';
+arr[2] = 'two';
+arr.length = 2;
+gc();
+gc();
+arr.length = 1;
+gc();
+gc();
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 3afd9e2bce..1a703d330d 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -105,7 +105,7 @@ class TestCase(testcase.D8TestCase):
else:
mjsunit_files = [os.path.join(self.suite.root, "mjsunit.js")]
- if self.suite.framework_name == 'num_fuzzer':
+ if self.framework_name == 'num_fuzzer':
mjsunit_files.append(os.path.join(self.suite.root, "mjsunit_numfuzz.js"))
self._source_files = files
@@ -128,10 +128,10 @@ class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = list(self._source_files)
- if not self._test_config.no_harness:
+ if not self.test_config.no_harness:
files += self._mjsunit_files
files += self._files_suffix
- if self._test_config.isolates:
+ if self.test_config.isolates:
files += ['--isolate'] + files
return files
@@ -190,8 +190,7 @@ class CombinedTest(testcase.D8TestCase):
passed as arguments.
"""
def __init__(self, name, tests):
- super(CombinedTest, self).__init__(tests[0].suite, '', name,
- tests[0]._test_config)
+ super(CombinedTest, self).__init__(tests[0].suite, '', name)
self._tests = tests
def _prepare_outcomes(self, force_update=True):
diff --git a/deps/v8/test/mjsunit/tools/processor-bigint.mjs b/deps/v8/test/mjsunit/tools/processor-bigint.mjs
new file mode 100644
index 0000000000..8b6cd54e94
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/processor-bigint.mjs
@@ -0,0 +1,59 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --logfile='+' --log --log-maps --log-ic --log-code
+// Flags: --log-function-events --no-predictable
+
+import { Processor } from "../../../tools/system-analyzer/processor.mjs";
+
+// log code start
+function doWork() {
+ let array = [];
+ for (let i = 0; i < 500; i++) {
+ doWorkStep(i, array);
+ }
+ let sum = 0;
+ for (let i = 0; i < 500; i++) {
+ sum += array[i]["property" + i];
+ }
+ return sum;
+}
+
+function doWorkStep(i, array) {
+ const obj = {
+ ["property" + i]: i,
+ };
+ array.push(obj);
+ obj.custom1 = 1;
+ obj.custom2 = 2;
+}
+
+const result = doWork();
+ // log code end
+
+const logString = d8.log.getAndStop();
+assertTrue(logString.length > 0);
+const useBigInts = true;
+const processor = new Processor(useBigInts);
+await processor.processChunk(logString);
+await processor.finalize();
+
+const maps = processor.mapTimeline;
+const ics = processor.icTimeline;
+const scripts = processor.scripts;
+
+(function testResults() {
+ assertEquals(result, 124750);
+ assertTrue(maps.length > 0);
+ assertTrue(ics.length > 0);
+ assertTrue(scripts.length > 0);
+})();
+
+(function testIcKeys() {
+ const keys = new Set();
+ ics.forEach(ic => keys.add(ic.key));
+ assertTrue(keys.has("custom1"));
+ assertTrue(keys.has("custom2"));
+ assertTrue(keys.has("push"));
+})();
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.mjs b/deps/v8/test/mjsunit/tools/tickprocessor.mjs
index a09f99fe42..eda1743df3 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.mjs
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.mjs
@@ -241,8 +241,8 @@ await (async function testWindowsCppEntriesProvider() {
WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
this.symbols = [
' Start Length Name Class',
- ' 0001:00000000 000ac902H .text CODE',
- ' 0001:000ac910 000005e2H .text$yc CODE',
+ ' 0001:00000000 000ac902H .text INSTRUCTION_STREAM',
+ ' 0001:000ac910 000005e2H .text$yc INSTRUCTION_STREAM',
' Address Publics by Value Rva+Base Lib:Object',
' 0000:00000000 __except_list 00000000 <absolute>',
' 0001:00000000 ?ReadFile@@YA?AV?$Handle@VString@v8@@@v8@@PBD@Z 00401000 f shell.obj',
diff --git a/deps/v8/test/mjsunit/turboshaft/type-inference.js b/deps/v8/test/mjsunit/turboshaft/type-inference.js
new file mode 100644
index 0000000000..ea346cb1ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/turboshaft/type-inference.js
@@ -0,0 +1,118 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --turboshaft --allow-natives-syntax
+
+// NOTE: The following tests are very platform specific and prone to break on
+// any compiler changes. Test are used for type system development only.
+// TODO(nicohartmann@): Replace by more stable tests or remove completely once
+// type system development is close to completion.
+
+/*
+function use() {}
+%NeverOptimizeFunction(use);
+
+function constants() {
+ use(%CheckTypeOf(3, "Word64{6}")); // smi-tagged value 3 in 64 bit register
+ // Cannot check this currently, because NumberConstants are not yet supported
+ // in the typer.
+ // use(%CheckTypeOf(5.5, "Float64{5.5}"));
+}
+
+function add1(x) {
+ let a = x ? 3 : 7;
+ let r = -1;
+ %CheckTurboshaftTypeOf(a, "Word32[3, 7]");
+ if (a < 5) r = a + 2;
+ else r = a - 2;
+ let result = r + 1;
+ return %CheckTurboshaftTypeOf(result, "Word32{6}");
+}
+
+function add2(x) {
+ let a = x ? 3.5 : 7.5;
+ let r = -1.0;
+ %CheckTurboshaftTypeOf(a, "Float64[3.5, 7.5]");
+ if (a < 5.5) r = a + 2.0;
+ else r = a - 2.0;
+ let result = r - 0.5;
+ return %CheckTurboshaftTypeOf(result, "Float64{5.0}");
+}
+
+function mul2(x) {
+ let a = x ? 3.5 : 7.0;
+ let r = -1.0;
+ if (a < 5.0) r = a * 5.0;
+ else r = a * 2.5;
+ let result = r - 0.5;
+ return result;
+}
+
+function div2(x) {
+ let a = x ? 3.3 : 6.6;
+ let r = -1.0;
+ if (a < 5.0) r = a / 1.1;
+ else r = a / 2.2;
+ let result = r - 0.5;
+ return %CheckTypeOf(result, "Float64[2.49999,2.50001]");
+}
+*/
+
+//function min2(x) {
+// let a = x ? 3.3 : 6.6;
+// let r = -1.0;
+// if (a < 5.0) r = Math.min(a, 6.6);
+// else r = Math.min(3.3, a);
+// let result = r - 0.3;
+// return %CheckTypeOf(result, "Float64{3}");
+//}
+//
+//function max2(x) {
+// let a = x ? 3.3 : 6.6;
+// let r = -1.0;
+// if (a < 5.0) r = Math.max(a, 6.6);
+// else r = Math.max(3.3, a);
+// let result = r - 0.6;
+// return %CheckTypeOf(result, "Float64{6}");
+//}
+
+function add_dce(x) {
+ let a = x ? 3 : 7;
+ let r = -1;
+ if (a < 5) r = a + 2;
+ else r = a - 2;
+ let result = r + 1;
+ return result;
+}
+
+function loop_dce(x) {
+ let limit = x ? 50 : 100;
+ let sum = 0;
+ for(let i = 1; i <= limit; ++i) {
+ sum += i;
+ }
+
+ let a = sum > 5000 ? 3 : 7;
+ let r = -1;
+ if(a < 5) r = a + 2;
+ else r = a - 2;
+ let result = r + 1;
+ return result;
+ // TODO(nicohartmann@): DCE should support merging identical return blocks.
+// if(sum > 5000) {
+// return true;
+// } else {
+// return true;
+// }
+}
+
+//let targets = [ constants, add1, add2, mul2, div2, /*min2, max2*/ ];
+let targets = [ add_dce, loop_dce ];
+for(let f of targets) {
+ %PrepareFunctionForOptimization(f);
+ f(true);
+ f(false);
+ %OptimizeFunctionOnNextCall(f);
+ f(true);
+}
diff --git a/deps/v8/test/mjsunit/turboshaft/typed-optimizations.js b/deps/v8/test/mjsunit/turboshaft/typed-optimizations.js
new file mode 100644
index 0000000000..087869e9aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/turboshaft/typed-optimizations.js
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --turboshaft --allow-natives-syntax
+
+function add1(x) {
+ let a = x ? 3 : 7; // a = {3, 7}
+ let r = -1; // r = {-1}
+ if (a < 5) // then: a = {3}
+ r = a + 2; // r = {5}
+ else // else: a = {7}
+ r = a - 2; // r = {5}
+ const result = r + 1; // result = {6}
+ // TODO(nicohartmann@): When we have a platform independent way to do that,
+ // add a %CheckTurboshaftTypeOf to verify the type.
+ return result;
+}
+
+function loop1(x) {
+ let a = x ? 3 : 7;
+ let result = 1;
+ for(let i = 0; result < 1000; ++i) {
+ result += a;
+ }
+ return result > 500;
+}
+
+function loop2(x) {
+ let a = x ? 3 : 7;
+ let result = 0;
+ for(let i = 0; i < a; ++i) {
+ result = i + i;
+ }
+ return result < 100;
+}
+
+
+let targets = [
+ add1,
+ loop1,
+ loop2,
+];
+for(let f of targets) {
+ %PrepareFunctionForOptimization(f);
+ const expected_true = f(true);
+ const expected_false = f(false);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(expected_true, f(true));
+ assertEquals(expected_false, f(false));
+}
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-array-methods.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-array-methods.js
index ee42e097b9..f6c31297ca 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-array-methods.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-array-methods.js
@@ -373,8 +373,7 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
const fixedLength = new ctor(rab, 0, 4);
const evil = { valueOf: () => { rab.resize(2 * ctor.BYTES_PER_ELEMENT);
return 0; }};
- assertEquals([undefined, undefined, undefined, undefined],
- sliceHelper(fixedLength, evil));
+ assertEquals(new Array(4), sliceHelper(fixedLength, evil));
assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
}
for (let ctor of ctors) {
@@ -401,8 +400,7 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
const fixedLength = new ctor(rab, 0, 4);
const evil = { valueOf: () => { %ArrayBufferDetach(rab);
return 0; }};
- assertEquals([undefined, undefined, undefined, undefined],
- sliceHelper(fixedLength, evil));
+ assertEquals(new Array(4), sliceHelper(fixedLength, evil));
assertEquals(0, rab.byteLength);
}
for (let ctor of ctors) {
@@ -414,8 +412,9 @@ d8.file.execute('test/mjsunit/typedarray-helpers.js');
}
const evil = { valueOf: () => { %ArrayBufferDetach(rab);
return 0; }};
- assertEquals([undefined, undefined, undefined, undefined],
- ToNumbers(sliceHelper(lengthTracking, evil)));
+ assertEquals(
+ [undefined, undefined, undefined, undefined],
+ ToNumbers(sliceHelper(lengthTracking, evil)));
assertEquals(0, rab.byteLength);
}
})();
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
index e0d7e603e7..f1d1fd34e1 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
@@ -1286,6 +1286,68 @@ TestFill(ArrayFillHelper, false);
assertThrows(
() => { TypedArrayFillHelper(fixedLength, 3, 1, evil); }, TypeError);
}
+ // Resizing + a length-tracking TA -> no OOB, but bounds recomputation needed.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 1;
+ }};
+ TypedArrayFillHelper(lengthTracking, evil, 0, 4);
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 0;
+ }};
+ TypedArrayFillHelper(lengthTracking, 1, evil, 4);
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 4;
+ }};
+ TypedArrayFillHelper(lengthTracking, 1, 0, evil);
+ }
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 1;
+ }};
+ TypedArrayFillHelper(lengthTracking, evil, 0, 4);
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 0;
+ }};
+ TypedArrayFillHelper(lengthTracking, 1, evil, 4);
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 4;
+ }};
+ TypedArrayFillHelper(lengthTracking, 1, 0, evil);
+ }
})();
(function ArrayFillParameterConversionResizes() {
@@ -1298,6 +1360,8 @@ TestFill(ArrayFillHelper, false);
rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 3;
}};
ArrayFillHelper(fixedLength, evil, 1, 2);
+ // The underlying data doesn't change: all writes fail because 'fixedLength'
+ // is OOB.
assertEquals([0, 0], ReadDataFromBuffer(rab, ctor));
}
for (let ctor of ctors) {
@@ -1322,6 +1386,71 @@ TestFill(ArrayFillHelper, false);
ArrayFillHelper(fixedLength, 3, 1, evil);
assertEquals([0, 0], ReadDataFromBuffer(rab, ctor));
}
+ // Resizing + a length-tracking TA -> no OOB, but bounds recomputation needed.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 3;
+ }};
+ ArrayFillHelper(lengthTracking, evil, 0, 4);
+ assertEquals([3, 3], ReadDataFromBuffer(rab, ctor));
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 0;
+ }};
+ ArrayFillHelper(lengthTracking, 3, evil, 4);
+ assertEquals([3, 3], ReadDataFromBuffer(rab, ctor));
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 4;
+ }};
+ ArrayFillHelper(lengthTracking, 3, 0, evil);
+ assertEquals([3, 3], ReadDataFromBuffer(rab, ctor));
+ }
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 3;
+ }};
+ ArrayFillHelper(lengthTracking, evil, 0, 4);
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 0;
+ }};
+ ArrayFillHelper(lengthTracking, 3, evil, 4);
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 4;
+ }};
+ ArrayFillHelper(lengthTracking, 3, 0, evil);
+ }
})();
function At(atHelper, oobThrows) {
@@ -1389,16 +1518,56 @@ function AtParameterConversionResizes(atHelper) {
8 * ctor.BYTES_PER_ELEMENT);
const fixedLength = new ctor(rab, 0, 4);
- let evil = { valueOf: () => { rab.resize(2); return 0;}};
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 0;
+ }};
assertEquals(undefined, atHelper(fixedLength, evil));
}
+ // Resizing + a length-tracking TA -> no OOB, but bounds recomputation needed.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return -1;
+ }};
+ // The TypedArray is *not* out of bounds since it's length-tracking.
+ assertEquals(undefined, atHelper(lengthTracking, evil));
+ }
for (let ctor of ctors) {
const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
const lengthTracking = new ctor(rab);
+ WriteToTypedArray(lengthTracking, 0, 25);
- let evil = { valueOf: () => { rab.resize(2); return -1;}};
+ const evil = { valueOf: () => {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 0;
+ }};
+ // The TypedArray is *not* out of bounds since it's length-tracking.
+ assertEquals(25, atHelper(lengthTracking, evil));
+ }
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return -1;
+ }};
+ // The TypedArray is *not* out of bounds since it's length-tracking.
+ assertEquals(undefined, atHelper(lengthTracking, evil));
+ }
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ WriteToTypedArray(lengthTracking, 0, 25);
+
+ const evil = { valueOf: () => {
+ rab.resize(0); return 0;
+ }};
// The TypedArray is *not* out of bounds since it's length-tracking.
assertEquals(undefined, atHelper(lengthTracking, evil));
}
@@ -1491,6 +1660,7 @@ AtParameterConversionResizes(ArrayAtHelper);
assertThrows(() => { fixedLength.slice(evil); }, TypeError);
assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
}
+ // Resizing + a length-tracking TA -> no OOB, but bounds recomputation needed.
for (let ctor of ctors) {
const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
@@ -1503,6 +1673,19 @@ AtParameterConversionResizes(ArrayAtHelper);
assertEquals([1, 2, 0, 0], ToNumbers(lengthTracking.slice(evil)));
assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i + 1);
+ }
+ const evil = { valueOf: () => { rab.resize(0);
+ return 0; }};
+ assertEquals([0, 0, 0, 0], ToNumbers(lengthTracking.slice(evil)));
+ assertEquals(0, rab.byteLength);
+ }
})();
function SliceParameterConversionGrows(sliceHelper) {
@@ -1812,6 +1995,7 @@ TestCopyWithin(ArrayCopyWithinHelper, false);
lengthTracking.copyWithin(evil, 0);
assertEquals([0, 1, 0], ToNumbers(lengthTracking));
}
+ // Resizing + a length-tracking TA -> no OOB, but bounds recomputation needed.
for (let ctor of ctors) {
const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
@@ -1829,6 +2013,23 @@ TestCopyWithin(ArrayCopyWithinHelper, false);
lengthTracking.copyWithin(0, evil);
assertEquals([2, 1, 2], ToNumbers(lengthTracking));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+ // [0, 1, 2, 3]
+ // ^
+ // start
+ // ^
+ // target
+ const evil = { valueOf: () => { rab.resize(0); return 2; }};
+ lengthTracking.copyWithin(0, evil);
+ assertEquals([], ToNumbers(lengthTracking));
+ }
})();
(function CopyWithinParameterConversionGrows() {
@@ -2229,7 +2430,7 @@ function EntriesKeysValuesShrinkMidIteration(
return rab;
}
- // Iterating with entries() (the 4 loops below).
+ // Iterating with entries() (the 5 loops below).
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const fixedLength = new ctor(rab, 0, 4);
@@ -2252,6 +2453,7 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 1, 3 * ctor.BYTES_PER_ELEMENT); });
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -2261,6 +2463,16 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(entriesHelper(lengthTracking),
+ [[0, 0]],
+ rab, 1, 0);
+ }
+
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -2270,7 +2482,7 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
}
- // Iterating with keys() (the 4 loops below).
+ // Iterating with keys() (the 5 loops below).
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const fixedLength = new ctor(rab, 0, 4);
@@ -2293,6 +2505,7 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -2302,6 +2515,16 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(keysHelper(lengthTracking),
+ [0],
+ rab, 1, 0);
+ }
+
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -2311,7 +2534,7 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
}
- // Iterating with values() (the 4 loops below).
+ // Iterating with values() (the 5 loops below).
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const fixedLength = new ctor(rab, 0, 4);
@@ -2334,6 +2557,7 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -2343,6 +2567,16 @@ function EntriesKeysValuesShrinkMidIteration(
rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(valuesHelper(lengthTracking),
+ [0, 2],
+ rab, 2, 0);
+ }
+
for (let ctor of ctors) {
const rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -2593,6 +2827,7 @@ function EveryShrinkMidIteration(everyHelper, hasUndefined) {
}
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -2607,6 +2842,21 @@ function EveryShrinkMidIteration(everyHelper, hasUndefined) {
}
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertTrue(everyHelper(lengthTracking, CollectValuesAndResize));
+ if (hasUndefined) {
+ assertEquals([0, 2, undefined, undefined], values);
+ } else {
+ assertEquals([0, 2], values);
+ }
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -2761,6 +3011,7 @@ function SomeShrinkMidIteration(someHelper, hasUndefined) {
}
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -2775,6 +3026,21 @@ function SomeShrinkMidIteration(someHelper, hasUndefined) {
}
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertFalse(someHelper(lengthTracking, CollectValuesAndResize));
+ if (hasUndefined) {
+ assertEquals([0, 2, undefined, undefined], values);
+ } else {
+ assertEquals([0, 2], values);
+ }
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -3171,6 +3437,7 @@ function FindShrinkMidIteration(findHelper) {
assertEquals([4, undefined], values);
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -3181,6 +3448,17 @@ function FindShrinkMidIteration(findHelper) {
assertEquals([0, 2, 4, undefined], values);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals(undefined, findHelper(lengthTracking, CollectValuesAndResize));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -3326,6 +3604,7 @@ function FindIndexShrinkMidIteration(findIndexHelper) {
assertEquals([4, undefined], values);
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -3336,6 +3615,17 @@ function FindIndexShrinkMidIteration(findIndexHelper) {
assertEquals([0, 2, 4, undefined], values);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals(-1, findIndexHelper(lengthTracking, CollectValuesAndResize));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -3479,6 +3769,7 @@ function FindLastShrinkMidIteration(findLastHelper) {
assertEquals([6, undefined], values);
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -3490,6 +3781,18 @@ function FindLastShrinkMidIteration(findLastHelper) {
assertEquals([6, 4, 2, 0], values);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals(undefined,
+ findLastHelper(lengthTracking, CollectValuesAndResize));
+ assertEquals([6, 4, undefined, undefined], values);
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -3649,6 +3952,7 @@ function FindLastIndexShrinkMidIteration(findLastIndexHelper) {
assertEquals([6, 4, 2, 0], values);
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -3660,6 +3964,18 @@ function FindLastIndexShrinkMidIteration(findLastIndexHelper) {
assertEquals([6, undefined, 2, 0], values);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 0;
+ assertEquals(-1,
+ findLastIndexHelper(lengthTracking, CollectValuesAndResize));
+ assertEquals([6, undefined, undefined, undefined], values);
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -3910,6 +4226,7 @@ Filter(ArrayFilterHelper, false);
assertEquals([4, undefined], values);
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -3920,6 +4237,17 @@ Filter(ArrayFilterHelper, false);
assertEquals([0, 2, 4, undefined], values);
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals([], ToNumbers(lengthTracking.filter(CollectValuesAndResize)));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -4199,6 +4527,7 @@ ForEachReduceReduceRight(ArrayForEachHelper, ArrayReduceHelper,
assertEquals([4, undefined], ForEachHelper(fixedLengthWithOffset));
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -4207,6 +4536,15 @@ ForEachReduceReduceRight(ArrayForEachHelper, ArrayReduceHelper,
assertEquals([0, 2, 4, undefined], ForEachHelper(lengthTracking));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals([0, 2, undefined, undefined], ForEachHelper(lengthTracking));
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -4233,6 +4571,7 @@ ForEachReduceReduceRight(ArrayForEachHelper, ArrayReduceHelper,
assertEquals([4, undefined], ReduceHelper(fixedLengthWithOffset));
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -4241,6 +4580,15 @@ ForEachReduceReduceRight(ArrayForEachHelper, ArrayReduceHelper,
assertEquals([0, 2, 4, undefined], ReduceHelper(lengthTracking));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals([0, 2, undefined, undefined], ReduceHelper(lengthTracking));
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -4267,6 +4615,7 @@ ForEachReduceReduceRight(ArrayForEachHelper, ArrayReduceHelper,
assertEquals([6, undefined], ReduceRightHelper(fixedLengthWithOffset));
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -4276,6 +4625,15 @@ ForEachReduceReduceRight(ArrayForEachHelper, ArrayReduceHelper,
assertEquals([6, 4, 2, 0], ReduceRightHelper(lengthTracking));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAfter = 2;
+ resizeTo = 0;
+ assertEquals([6, 4, undefined, undefined], ReduceRightHelper(lengthTracking));
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTracking = new ctor(rab, 0);
@@ -4621,6 +4979,7 @@ function IncludesParameterConversionResizes(helper) {
assertFalse(helper(fixedLength, 0, evil));
}
+ // Resizing + a length-tracking TA -> no OOB.
for (let ctor of ctors) {
const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
@@ -4635,6 +4994,21 @@ function IncludesParameterConversionResizes(helper) {
assertTrue(helper(lengthTracking, undefined, evil));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(0);
+ return 0;
+ }};
+ assertFalse(helper(lengthTracking, undefined));
+ // "includes" iterates until the original length and sees "undefined"s.
+ assertTrue(helper(lengthTracking, undefined, evil));
+ }
+
for (let ctor of ctors) {
const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
8 * ctor.BYTES_PER_ELEMENT);
@@ -4950,6 +5324,41 @@ function IndexOfParameterConversionShrinks(indexOfHelper, lastIndexOfHelper) {
// 2 no longer found.
assertEquals(-1, indexOfHelper(lengthTracking, 2, evil));
}
+
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ let evil = { valueOf: () => {
+ rab.resize(0);
+ return 2;
+ }};
+ assertEquals(2, indexOfHelper(lengthTracking, 2));
+ // 2 no longer found.
+ assertEquals(-1, indexOfHelper(lengthTracking, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ let evil = { valueOf: () => {
+ rab.resize(0);
+ return 1;
+ }};
+ assertEquals(2, indexOfHelper(lengthTracking, 2));
+ // 2 no longer found.
+ assertEquals(-1, indexOfHelper(lengthTracking, 2, evil));
+ }
}
IndexOfParameterConversionShrinks(TypedArrayIndexOfHelper);
IndexOfParameterConversionShrinks(ArrayIndexOfHelper);
@@ -5001,6 +5410,41 @@ function LastIndexOfParameterConversionShrinks(lastIndexOfHelper) {
// 2 no longer found.
assertEquals(-1, lastIndexOfHelper(lengthTracking, 2, evil));
}
+
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ const evil = { valueOf: () => {
+ rab.resize(0);
+ return 2;
+ }};
+ assertEquals(2, lastIndexOfHelper(lengthTracking, 2));
+ // 2 no longer found.
+ assertEquals(-1, lastIndexOfHelper(lengthTracking, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ const evil = { valueOf: () => {
+ rab.resize(0);
+ return 2;
+ }};
+ assertEquals(2, lastIndexOfHelper(lengthTracking, 2));
+ // 2 no longer found.
+ assertEquals(-1, lastIndexOfHelper(lengthTracking, 2, evil));
+ }
}
LastIndexOfParameterConversionShrinks(TypedArrayLastIndexOfHelper);
LastIndexOfParameterConversionShrinks(ArrayLastIndexOfHelper);
@@ -5258,6 +5702,21 @@ function JoinParameterConversionShrinks(joinHelper) {
// the new length are converted to the empty string.
assertEquals('0.0..', joinHelper(lengthTracking, evil));
}
+
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { toString: () => {
+ rab.resize(0);
+ return '.';
+ }};
+ // We iterate 4 elements, since it was the starting length. All elements are
+ // converted to the empty string.
+ assertEquals('...', joinHelper(lengthTracking, evil));
+ }
}
JoinParameterConversionShrinks(TypedArrayJoinHelper);
JoinParameterConversionShrinks(ArrayJoinHelper);
@@ -5352,6 +5811,33 @@ function ToLocaleStringNumberPrototypeToLocaleStringShrinks(
assertEquals('0,0,,', toLocaleStringHelper(lengthTracking));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let resizeAfter = 1;
+ Number.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(0);
+ }
+ return oldNumberPrototypeToLocaleString.call(this);
+ }
+ BigInt.prototype.toLocaleString = function() {
+ --resizeAfter;
+ if (resizeAfter == 0) {
+ rab.resize(0);
+ }
+ return oldBigIntPrototypeToLocaleString.call(this);
+ }
+
+ // We iterate 4 elements, since it was the starting length. Elements beyond
+ // the new length are converted to the empty string.
+ assertEquals('0,,,', toLocaleStringHelper(lengthTracking));
+ }
+
Number.prototype.toLocaleString = oldNumberPrototypeToLocaleString;
BigInt.prototype.toLocaleString = oldBigIntPrototypeToLocaleString;
}
@@ -5619,6 +6105,20 @@ function MapShrinkMidIteration(mapHelper, hasUndefined) {
}
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAfter = 1;
+ resizeTo = 0;
+ if (hasUndefined) {
+ assertEquals([0, undefined, undefined, undefined],
+ Helper(lengthTracking));
+ } else {
+ assertEquals([0], Helper(lengthTracking));
+ }
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -5776,6 +6276,33 @@ MapGrowMidIteration(ArrayMapHelper);
assertEquals([0, 1, undefined, undefined], Helper(lengthTracking));
assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
}
+
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(0);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ resizeWhenConstructorCalled = true;
+ assertEquals([undefined, undefined, undefined, undefined],
+ Helper(lengthTracking));
+ assertEquals(0, rab.byteLength);
+ }
})();
(function MapSpeciesCreateGrows() {
@@ -6188,6 +6715,16 @@ Reverse(ArrayReverseHelper, false);
assertEquals([1, 2, 4], ToNumbers(new ctor(rab)));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeTo = 0;
+ lengthTracking.set(CreateSourceProxy(1));
+ assertEquals([], ToNumbers(lengthTracking));
+ assertEquals([], ToNumbers(new ctor(rab)));
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -6369,6 +6906,17 @@ Reverse(ArrayReverseHelper, false);
assertEquals([1, 1, 4], ToNumbers(new ctor(rab)));
}
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ resizeAt = 2;
+ resizeTo = 0;
+ lengthTracking.set(CreateSourceProxy(2));
+ assertEquals([], ToNumbers(lengthTracking));
+ assertEquals([], ToNumbers(new ctor(rab)));
+ }
+
for (let ctor of ctors) {
rab = CreateRabForTest(ctor);
const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
@@ -6841,6 +7389,31 @@ Reverse(ArrayReverseHelper, false);
}};
assertThrows(() => { lengthTracking.subarray(0, evil); });
}
+
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(0);
+ return 1;
+ }};
+ assertThrows(() => { lengthTracking.subarray(0, evil); });
+ }
+
+ // Like the previous test, but now we construct a smaller subarray and it
+ // succeeds.
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => {
+ rab.resize(0);
+ return 0;
+ }};
+ assertEquals([], ToNumbers(lengthTracking.subarray(evil, 0)));
+ }
})();
(function SubarrayParameterConversionGrows() {
@@ -7346,6 +7919,17 @@ function SortCallbackShrinks(sortHelper) {
assertTrue([10, 9, 8, 7].includes(newData[0]));
assertTrue([10, 9, 8, 7].includes(newData[1]));
}
+
+ for (let ctor of ctors) {
+ rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ resizeTo = 0;
+ const lengthTracking = new ctor(rab, 0);
+ const taFull = new ctor(rab, 0);
+ WriteUnsortedData(taFull);
+
+ sortHelper(lengthTracking, CustomComparison);
+ }
}
SortCallbackShrinks(TypedArraySortHelper);
SortCallbackShrinks(ArraySortHelper);
@@ -7526,6 +8110,18 @@ SortCallbackGrows(ArraySortHelper);
}};
assertThrows(() => { helper(lengthTracking, evil, 8); }, TypeError);
}
+
+ // Special case: resizing to 0 -> length-tracking TA still not OOB.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab, 0);
+ const evil = {toString: () => {
+ rab.resize(0);
+ return 0; // Index too large after resize.
+ }};
+ assertThrows(() => { helper(lengthTracking, evil, 8); }, TypeError);
+ }
})();
(function ObjectDefinePropertyParameterConversionGrows() {
diff --git a/deps/v8/test/mjsunit/wasm/array-bulk-operations.js b/deps/v8/test/mjsunit/wasm/array-bulk-operations.js
new file mode 100644
index 0000000000..3e61752dba
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/array-bulk-operations.js
@@ -0,0 +1,131 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestArrayFillImmutable() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let array = builder.addArray(kWasmI32, false);
+
+ // Parameters: array, starting index, value, length.
+ builder.addFunction(
+ "array_fill",
+ makeSig([wasmRefNullType(array), kWasmI32, kWasmI32, kWasmI32], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kExprLocalGet, 2, kExprLocalGet, 3,
+ kGCPrefix, kExprArrayFill, array])
+ .exportFunc();
+
+ assertThrows(() => builder.instantiate(), WebAssembly.CompileError,
+ /immediate array type #0 is immutable/);
+})();
+
+(function TestArrayFill() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let array = builder.addArray(wasmRefNullType(struct), true);
+
+ builder.addFunction(
+ "make_array", makeSig([kWasmI32], [wasmRefType(array)]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprArrayNewDefault, array])
+ .exportFunc();
+
+ builder.addFunction(
+ "array_get", makeSig([wasmRefNullType(array), kWasmI32], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ // Parameters: array, starting index, value in struct, length.
+ builder.addFunction(
+ "array_fill",
+ makeSig([wasmRefNullType(array), kWasmI32, kWasmI32, kWasmI32], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kExprLocalGet, 2, kGCPrefix, kExprStructNew, struct,
+ kExprLocalGet, 3, kGCPrefix, kExprArrayFill, array])
+ .exportFunc();
+
+ let wasm = builder.instantiate().exports;
+
+ assertTraps(kTrapNullDereference, () => wasm.array_fill(null, 10, 20, 30));
+
+ let array_obj = wasm.make_array(8);
+
+ assertTraps(kTrapArrayOutOfBounds,
+ () => wasm.array_fill(array_obj, 5, 42, 4));
+ // Out-of-bounds array.fill traps even if length is 0, if the index is greater
+ // than array.len.
+ assertTraps(kTrapArrayOutOfBounds,
+ () => wasm.array_fill(array_obj, 10, 42, 0));
+ // Overflow of (index + length) traps.
+ assertTraps(kTrapArrayOutOfBounds,
+ () => wasm.array_fill(array_obj, 5, 42, -1));
+
+ wasm.array_fill(array_obj, 2, 42, 3);
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 0));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 1));
+ assertEquals(42, wasm.array_get(array_obj, 2));
+ assertEquals(42, wasm.array_get(array_obj, 3));
+ assertEquals(42, wasm.array_get(array_obj, 4));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 5));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 6));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 7));
+
+ // Index = array.len and length = 0 works, it just does nothing.
+ wasm.array_fill(array_obj, 8, 42, 0)
+
+ wasm.array_fill(array_obj, 4, 54, 2);
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 0));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 1));
+ assertEquals(42, wasm.array_get(array_obj, 2));
+ assertEquals(42, wasm.array_get(array_obj, 3));
+ assertEquals(54, wasm.array_get(array_obj, 4));
+ assertEquals(54, wasm.array_get(array_obj, 5));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 6));
+ assertTraps(kTrapNullDereference, () => wasm.array_get(array_obj, 7));
+})();
+
+(function TestArrayNewNonNullable() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let array = builder.addArray(wasmRefType(struct), true);
+
+ builder.addFunction(
+ "make_array", makeSig([wasmRefType(struct), kWasmI32],
+ [wasmRefType(array)]))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayNew, array])
+ .exportFunc();
+
+ builder.addFunction(
+ "array_get", makeSig([wasmRefNullType(array), kWasmI32],
+ [wasmRefType(struct)]))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array])
+ .exportFunc();
+
+ builder.addFunction(
+ "make_struct", makeSig([], [wasmRefType(struct)]))
+ .addBody([kGCPrefix, kExprStructNewDefault, struct])
+ .exportFunc();
+
+ let wasm = builder.instantiate().exports;
+
+ let length = 50; // Enough to go through initialization builtin.
+ let struct_obj = wasm.make_struct();
+ let array_obj = wasm.make_array(struct_obj, length);
+
+ for (let i = 0; i < length; i++) {
+ assertEquals(struct_obj, wasm.array_get(array_obj, i));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js b/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
index 38fbef64bd..50685f2ee9 100644
--- a/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
+++ b/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc --no-liftoff --experimental-wasm-nn-locals
+// Flags: --experimental-wasm-gc --no-liftoff
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
@@ -23,7 +23,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
let struct_index = builder.addStruct([makeField(kWasmI32, true),
makeField(kWasmI8, false)]);
- let array_type = kWasmI32; // Also try kWasmI64, wasmRefNullType(struct_index)
+ // Also try wasmRefNullType(struct_index), other numeric types.
+ let array_type = kWasmI32;
var array_index = builder.addArray(array_type, true);
var from = builder.addGlobal(wasmRefNullType(array_index), true);
var to = builder.addGlobal(wasmRefNullType(array_index), true);
diff --git a/deps/v8/test/mjsunit/wasm/array-init-from-segment.js b/deps/v8/test/mjsunit/wasm/array-init-from-segment.js
index f98c86e8b8..432cb91db4 100644
--- a/deps/v8/test/mjsunit/wasm/array-init-from-segment.js
+++ b/deps/v8/test/mjsunit/wasm/array-init-from-segment.js
@@ -36,14 +36,31 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprI32Const, 0, // offset
kExprLocalGet, 0, // length
- kGCPrefix, kExprArrayNewElem, array_type_index,
- segment,
+ kGCPrefix, kExprArrayNewElem, array_type_index, segment,
kExprLocalGet, 1, // index in the array
kGCPrefix, kExprArrayGet, array_type_index,
kGCPrefix, kExprStructGet, struct_type_index, 0])
.exportFunc()
}
+ // Respective segment elements should be pointer-identical.
+ builder.addFunction("identical", makeSig([kWasmI32, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprI32Const, 0, // offset
+ kExprLocalGet, 0, // length
+ kGCPrefix, kExprArrayNewElem, array_type_index, passive_segment,
+ kExprLocalGet, 1, // index in the array
+ kGCPrefix, kExprArrayGet, array_type_index,
+
+ kExprI32Const, 0, // offset
+ kExprLocalGet, 0, // length
+ kGCPrefix, kExprArrayNewElem, array_type_index, passive_segment,
+ kExprLocalGet, 1, // index in the array
+ kGCPrefix, kExprArrayGet, array_type_index,
+
+ kExprRefEq])
+ .exportFunc()
+
generator("init_and_get", passive_segment);
generator("init_and_get_active", active_segment);
@@ -67,6 +84,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertTraps(kTrapArrayTooLarge, () => init_and_get(1 << 31, 10));
// Element is out of bounds.
assertTraps(kTrapElementSegmentOutOfBounds, () => init_and_get(5, 0));
+ // Respective segment elements should be pointer-identical.
+ assertEquals(1, instance.exports.identical(3, 0));
// Now drop the segment.
instance.exports.drop();
// A 0-length array should still be created...
@@ -78,6 +97,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertTraps(kTrapElementSegmentOutOfBounds, () => init_and_get_active(1, 0));
})();
+// TODO(7748): Reenable when we have constant array.new_elem.
+/*
(function TestArrayNewElemConstant() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -158,6 +179,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertTraps(kTrapNullDereference, () => table_get(0, 2));
assertTraps(kTrapArrayOutOfBounds, () => table_get(0, 3));
})();
+*/
(function TestArrayNewElemMistypedSegment() {
print(arguments.callee.name);
@@ -185,6 +207,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
/segment type.*is not a subtype of array element type.*/);
})();
+// TODO(7748): Reenable when we have constant array.new_elem.
+/*
// Element segments are defined after globals, so currently it is not valid
// to refer to an element segment in the global section.
(function TestArrayNewFixedFromElemInGlobal() {
@@ -326,3 +350,62 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// An active segment counts as having 0 length.
assertTraps(kTrapElementSegmentOutOfBounds, () => instance.exports.init());
})();
+
+(function TestArrayNewData() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array_type_index = builder.addArray(kWasmI16, true);
+
+ let dummy_byte = 0xff;
+ let element_0 = 1000;
+ let element_1 = -2222;
+
+ let data_segment = builder.addPassiveDataSegment(
+ [dummy_byte, element_0 & 0xff, (element_0 >> 8) & 0xff,
+ element_1 & 0xff, (element_1 >> 8) & 0xff]);
+
+ let global = builder.addGlobal(
+ wasmRefType(array_type_index), true,
+ [...wasmI32Const(1), ...wasmI32Const(2),
+ kGCPrefix, kExprArrayNewData, array_type_index, data_segment],
+ builder);
+
+ builder.addFunction("global_get", kSig_i_i)
+ .addBody([
+ kExprGlobalGet, global.index,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayGetS, array_type_index])
+ .exportFunc();
+
+ // parameters: (segment offset, array length, array index)
+ builder.addFunction("init_from_data", kSig_i_iii)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayNewData,
+ array_type_index, data_segment,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayGetS, array_type_index])
+ .exportFunc();
+
+ builder.addFunction("drop_segment", kSig_v_v)
+ .addBody([kNumericPrefix, kExprDataDrop, data_segment])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertEquals(element_0, instance.exports.global_get(0));
+ assertEquals(element_1, instance.exports.global_get(1));
+
+ let init = instance.exports.init_from_data;
+
+ assertEquals(element_0, init(1, 2, 0));
+ assertEquals(element_1, init(1, 2, 1));
+
+ assertTraps(kTrapArrayTooLarge, () => init(1, 1000000000, 0));
+ assertTraps(kTrapDataSegmentOutOfBounds, () => init(2, 2, 0));
+
+ instance.exports.drop_segment();
+
+ assertTraps(kTrapDataSegmentOutOfBounds, () => init(1, 2, 0));
+})();
+*/
diff --git a/deps/v8/test/mjsunit/wasm/bit-shift-right.js b/deps/v8/test/mjsunit/wasm/bit-shift-right.js
index 93ff50ed45..d250c21c59 100644
--- a/deps/v8/test/mjsunit/wasm/bit-shift-right.js
+++ b/deps/v8/test/mjsunit/wasm/bit-shift-right.js
@@ -121,6 +121,6 @@ let testFct = () => {
for (let i = 0; i < 20; i++) testFct();
for (let fct of fcts) {
- %WasmTierUpFunction(instance, fct.index);
+ %WasmTierUpFunction(wasm[fct.name]);
}
testFct();
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
index c99d44c5ce..c3eb9f32a6 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
@@ -14,10 +14,10 @@ builder.addFunction('load', kSig_i_i)
kExprI32LoadMem, 0, 100])
.exportFunc();
-const module = builder.instantiate();
-%WasmTierUpFunction(module, 0);
+const load = builder.instantiate().exports.load;
+%WasmTierUpFunction(load);
// 100 is added as part of the load instruction above
// Last valid address (64k - 100 - 4)
-assertEquals(0, module.exports.load(0x10000 - 100 - 4));
+assertEquals(0, load(0x10000 - 100 - 4));
// First invalid address (64k - 100)
-assertTraps(kTrapMemOutOfBounds, _ => { module.exports.load(0x10000 - 100);});
+assertTraps(kTrapMemOutOfBounds, _ => { load(0x10000 - 100);});
diff --git a/deps/v8/test/mjsunit/wasm/call-ref.js b/deps/v8/test/mjsunit/wasm/call-ref.js
index 2752fb1746..9880f0ef6d 100644
--- a/deps/v8/test/mjsunit/wasm/call-ref.js
+++ b/deps/v8/test/mjsunit/wasm/call-ref.js
@@ -6,7 +6,8 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
-(function Test1() {
+(function TestImportedRefCall() {
+ print(arguments.callee.name);
var exporting_instance = (function () {
var builder = new WasmModuleBuilder();
@@ -31,7 +32,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
var imported_wasm_function_index =
builder.addImport("imports", "wasm_add", sig_index);
-
var locally_defined_function =
builder.addFunction("sub", sig_index)
.addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Sub])
@@ -120,6 +120,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
})();
(function TestFromJSSlowPath() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var sig_index = builder.addType(kSig_i_i);
@@ -135,3 +136,81 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// {undefined} is converted to 0.
assertEquals(0, instance.exports.main(fun, 1000));
})();
+
+(function TestImportedFunctionSubtyping() {
+ print(arguments.callee.name);
+ var exporting_instance = (function () {
+ var builder = new WasmModuleBuilder();
+ let super_struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let sub_struct = builder.addStruct(
+ [makeField(kWasmI32, true), makeField(kWasmI64, true)], super_struct);
+ let super_sig = builder.addType(makeSig([wasmRefNullType(sub_struct)],
+ [kWasmI32]), kNoSuperType, false)
+ let sub_sig = builder.addType(makeSig([wasmRefNullType(super_struct)],
+ [kWasmI32]), super_sig)
+
+ builder.addFunction("exported_function", sub_sig)
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprStructGet, super_struct, 0])
+ .exportFunc();
+
+ return builder.instantiate({});
+ })();
+
+ var builder = new WasmModuleBuilder();
+ // These should canonicalize to the same types as the exporting instance.
+ let super_struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let sub_struct = builder.addStruct(
+ [makeField(kWasmI32, true), makeField(kWasmI64, true)], super_struct);
+ let super_sig = builder.addType(
+ makeSig([wasmRefNullType(sub_struct)], [kWasmI32]), kNoSuperType, false);
+ builder.addImport("m", "f", super_sig);
+
+ // Import is a function of the declared type.
+ return builder.instantiate({m: {f:
+ exporting_instance.exports.exported_function}});
+})();
+
+(function TestJSFunctionCanonicallyDifferent() {
+ print(arguments.callee.name);
+
+ let imp = new WebAssembly.Function({parameters: ["i32"], results: ["i32"]},
+ x => x + 1);
+
+ (function () {
+ var builder = new WasmModuleBuilder();
+ let sig = builder.addType(kSig_i_i);
+
+ builder.addImport("m", "f", sig);
+
+ // This succeeds
+ builder.instantiate({m: {f: imp}});
+ })();
+
+ (function () {
+ var builder = new WasmModuleBuilder();
+ let sig = builder.addType(kSig_i_i, kNoSuperType, false);
+ let sig_sub = builder.addType(kSig_i_i, sig);
+
+ builder.addImport("m", "f", sig_sub);
+
+ // Import is a function of the declared type.
+ assertThrows(() => builder.instantiate({m: {f: imp}}),
+ WebAssembly.LinkError,
+ /imported function does not match the expected type/);
+ })();
+
+ (function () {
+ var builder = new WasmModuleBuilder();
+ builder.startRecGroup();
+ let sig_in_group = builder.addType(kSig_i_i);
+ builder.addType(kSig_i_v);
+ builder.endRecGroup();
+
+ builder.addImport("m", "f", sig_in_group);
+
+ // Import is a function of the declared type.
+ assertThrows(() => builder.instantiate({m: {f: imp}}),
+ WebAssembly.LinkError,
+ /imported function does not match the expected type/);
+ })();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/call_indirect.js b/deps/v8/test/mjsunit/wasm/call_indirect.js
index b3621687b4..6f482a4ab1 100644
--- a/deps/v8/test/mjsunit/wasm/call_indirect.js
+++ b/deps/v8/test/mjsunit/wasm/call_indirect.js
@@ -48,8 +48,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let exporting_instance = (function() {
const builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
-
let struct_mistyped = builder.addStruct([]);
let struct = builder.addStruct([makeField(kWasmI32, true)]);
@@ -72,8 +70,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
-
// Have these in the reverse order than before.
let struct = builder.addStruct([makeField(kWasmI32, true)]);
let struct_mistyped = builder.addStruct([]);
diff --git a/deps/v8/test/mjsunit/wasm/code-flushing.js b/deps/v8/test/mjsunit/wasm/code-flushing.js
new file mode 100644
index 0000000000..28a2c019e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/code-flushing.js
@@ -0,0 +1,40 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --allow-natives-syntax --wasm-lazy-compilation
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('f1', kSig_i_i).addBody([kExprLocalGet, 0]).exportFunc();
+builder.addFunction('f2', kSig_i_i).addBody([kExprLocalGet, 0]).exportFunc();
+builder.addFunction('f3', kSig_i_i).addBody([kExprLocalGet, 0]).exportFunc();
+
+const instance = builder.instantiate();
+
+instance.exports.f1(1);
+instance.exports.f2(2);
+instance.exports.f3(3);
+
+assertTrue(%IsLiftoffFunction(instance.exports.f1));
+assertTrue(%IsLiftoffFunction(instance.exports.f2));
+assertTrue(%IsLiftoffFunction(instance.exports.f3));
+
+%FlushWasmCode();
+
+assertTrue(%IsUncompiledWasmFunction(instance.exports.f1));
+assertTrue(%IsUncompiledWasmFunction(instance.exports.f2));
+assertTrue(%IsUncompiledWasmFunction(instance.exports.f3));
+
+instance.exports.f1(1);
+instance.exports.f2(2);
+instance.exports.f3(3);
+
+%WasmTierUpFunction(instance.exports.f3);
+
+%FlushWasmCode();
+
+assertTrue(%IsUncompiledWasmFunction(instance.exports.f1));
+assertTrue(%IsUncompiledWasmFunction(instance.exports.f2));
+assertTrue(%IsTurboFanFunction(instance.exports.f3));
diff --git a/deps/v8/test/mjsunit/wasm/code-space-overflow.js b/deps/v8/test/mjsunit/wasm/code-space-overflow.js
new file mode 100644
index 0000000000..d95948efb8
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/code-space-overflow.js
@@ -0,0 +1,44 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-code-space-size-mb=1
+// Disable lazy compilation, so we actually generate a lot of code at once.
+// Flags: --no-wasm-lazy-compilation
+// Limit the number of background threads, so each thread generates more code.
+// Flags: --wasm-num-compilation-tasks=2
+
+// This is a regression test for https://crbug.com/v8/13436. If a single
+// background thread generates more code than fits in a single code space, we
+// need to split it into multiple code spaces.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const start = performance.now();
+function time(name) {
+ const ms_since_start = (performance.now() - start).toFixed(1).padStart(7);
+ print(`[${ms_since_start}] ${name}`);
+}
+
+// At the time of writing this test (Nov 2022), this module generated ~20MB of
+// code on x64 and ~18MB on arm64.
+const builder = new WasmModuleBuilder();
+const kNumFunctions = 1500;
+// Build a large body. Then append one instruction to get different code per
+// function (for the case that we decide to merge identical code objects in the
+// future).
+time('Build function template.');
+let body_template = [kExprLocalGet, 0];
+for (let i = 0; i < kNumFunctions; ++i) {
+ body_template.push(kExprCallFunction, ...wasmSignedLeb(i));
+}
+time(`Adding ${kNumFunctions} functions`);
+for (let i = 0; i < kNumFunctions; ++i) {
+ if (i != 0 && i % 100 == 0) time(`... added ${i} functions`);
+ let body = body_template.concat([...wasmI32Const(i), kExprI32Add, kExprEnd]);
+ builder.addFunction('f' + i, kSig_i_i).addBodyWithEnd(body);
+}
+time('Building buffer.');
+const wasm_buf = builder.toBuffer();
+time('Compiling Wasm module.');
+new WebAssembly.Module(wasm_buf);
diff --git a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js b/deps/v8/test/mjsunit/wasm/committed-code-exhaustion.js
index d11a18ddc0..c74f953407 100644
--- a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
+++ b/deps/v8/test/mjsunit/wasm/committed-code-exhaustion.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-max-code-space=1
+// Flags: --wasm-max-committed-code-mb=1
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/enter-and-leave-debug-state.js b/deps/v8/test/mjsunit/wasm/enter-and-leave-debug-state.js
new file mode 100644
index 0000000000..1dce6547b0
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/enter-and-leave-debug-state.js
@@ -0,0 +1,25 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function enterAndLeaveDebugging() {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i_v).exportFunc().addBody(wasmI32Const(42));
+ let main = builder.instantiate().exports.main;
+ assertEquals(42, main());
+ assertTrue(%IsLiftoffFunction(main));
+ %WasmTierUpFunction(main);
+ assertEquals(42, main());
+ assertTrue(%IsTurboFanFunction(main));
+ %WasmEnterDebugging();
+ assertEquals(42, main());
+ assertTrue(%IsWasmDebugFunction(main));
+ %WasmLeaveDebugging();
+ %WasmTierUpFunction(main);
+ assertEquals(42, main());
+ assertTrue(%IsTurboFanFunction(main));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js b/deps/v8/test/mjsunit/wasm/enter-debug-state.js
index f8f19ad0f4..3b184ebc41 100644
--- a/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/enter-debug-state.js
@@ -18,37 +18,22 @@ function create_builder(delta = 0) {
return builder;
}
-function checkTieredDown(instance) {
+function checkForDebugCode(instance) {
for (let i = 0; i < num_functions; ++i) {
- assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
- }
-}
-
-function checkTieredUp(instance) {
- // Busy waiting until all functions are tiered up.
- let num_liftoff_functions;
- while (true) {
- num_liftoff_functions = 0;
- for (let i = 0; i < num_functions; ++i) {
- if (%IsLiftoffFunction(instance.exports['f' + i])) {
- num_liftoff_functions++;
- }
- }
- if (num_liftoff_functions == 0) return;
+ // Call the function once because of lazy compilation.
+ instance.exports['f' + i]();
+ assertTrue(%IsWasmDebugFunction(instance.exports['f' + i]));
}
}
function check(instance) {
- %WasmTierDown();
- checkTieredDown(instance);
+ %WasmEnterDebugging();
+ checkForDebugCode(instance);
for (let i = 0; i < num_functions; ++i) {
- %WasmTierUpFunction(instance, i);
+ %WasmTierUpFunction(instance.exports['f' + i]);
}
- checkTieredDown(instance);
-
- %WasmTierUp();
- checkTieredUp(instance);
+ checkForDebugCode(instance);
}
(function testTierDownToLiftoff() {
diff --git a/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js b/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
deleted file mode 100644
index e456c5b020..0000000000
--- a/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-gc --liftoff --no-wasm-tier-up
-
-d8.file.execute("test/mjsunit/wasm/externref-globals.js");
diff --git a/deps/v8/test/mjsunit/wasm/externref-liftoff.js b/deps/v8/test/mjsunit/wasm/externref-liftoff.js
deleted file mode 100644
index a27058939f..0000000000
--- a/deps/v8/test/mjsunit/wasm/externref-liftoff.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --expose-gc --liftoff --no-wasm-tier-up
-// Flags: --allow-natives-syntax
-
-d8.file.execute("test/mjsunit/wasm/externref.js");
diff --git a/deps/v8/test/mjsunit/wasm/gc-cast-type-inference.js b/deps/v8/test/mjsunit/wasm/gc-cast-type-inference.js
new file mode 100644
index 0000000000..c11cf1fb06
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-cast-type-inference.js
@@ -0,0 +1,172 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function TestRefCastNullReturnsNullTypeForNonNullInput() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let consumeRefI31 =
+ builder.addFunction(`consumeRefI31`,
+ makeSig([wasmRefType(kWasmI31Ref)], []))
+ .addBody([]);
+
+ builder.addFunction(`refCastRemovesNullability`,
+ makeSig([kWasmExternRef], []))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kExprRefAsNonNull,
+ kGCPrefix, kExprRefCastNull, kI31RefCode,
+ // ref.cast null pushes a nullable value on the stack even though its input
+ // was non-nullable, therefore this call is not spec-compliant.
+ kExprCallFunction, consumeRefI31.index,
+ ]).exportFunc();
+
+ assertThrows(() => builder.instantiate(), WebAssembly.CompileError,
+ /expected type \(ref i31\), found .* type i31ref/);
+})();
+
+(function TestRefCastRemovesNullability() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let i31ToI32 =
+ builder.addFunction(`i31ToI32`,
+ makeSig([wasmRefType(kWasmI31Ref)], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprI31GetS
+ ]);
+
+ builder.addFunction(`refCastRemovesNullability`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, kI31RefCode,
+ // ref.cast pushes a non-nullable value on the stack even for a nullable
+ // input value as the instruction traps on null.
+ kExprCallFunction, i31ToI32.index,
+ ]).exportFunc();
+
+ let wasm = builder.instantiate().exports;
+ assertEquals(42, wasm.refCastRemovesNullability(42));
+ assertEquals(-1, wasm.refCastRemovesNullability(-1));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastRemovesNullability(null));
+})();
+
+(function TestBrOnCastNullability() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let consumeNonNull =
+ builder.addFunction(`consumeNonNull`,
+ makeSig([wasmRefType(kWasmAnyRef)], []))
+ .addBody([]);
+ let i31ToI32 =
+ builder.addFunction(`i31ToI32`,
+ makeSig([wasmRefType(kWasmI31Ref)], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprI31GetS]);
+
+
+ builder.addFunction(`brOnCastNullNonNullOnPassThrough`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, kI31RefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCastNull, 0, kI31RefCode,
+ // As null branches, the type here is guaranteed to be non-null.
+ kExprCallFunction, consumeNonNull.index,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kGCPrefix, kExprI31GetS,
+ kExprReturn,
+ ]).exportFunc();
+
+ builder.addFunction(`brOnCastNonNullOnBranch`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ // The value is guaranteed to be non-null on branch.
+ kExprBlock, kWasmRef, kI31RefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCast, 0, kI31RefCode,
+ kExprDrop,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprCallFunction, i31ToI32.index,
+ kExprReturn,
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+ let wasm = instance.exports;
+ assertTraps(kTrapNullDereference, () => wasm.brOnCastNullNonNullOnPassThrough(null));
+ assertEquals(42, wasm.brOnCastNullNonNullOnPassThrough(42));
+ assertEquals(0, wasm.brOnCastNullNonNullOnPassThrough("cast fails"));
+ assertEquals(0, wasm.brOnCastNonNullOnBranch(null));
+ assertEquals(42, wasm.brOnCastNonNullOnBranch(42));
+ assertEquals(0, wasm.brOnCastNonNullOnBranch("cast fails"));
+})();
+
+(function TestBrOnCastFailNullability() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let consumeNonNull =
+ builder.addFunction(`consumeNonNull`,
+ makeSig([wasmRefType(kWasmAnyRef)], []))
+ .addBody([]);
+ let i31ToI32 =
+ builder.addFunction(`i31ToI32`,
+ makeSig([wasmRefType(kWasmI31Ref)], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprI31GetS]);
+
+ builder.addFunction(`brOnCastFailNonNullOnPassThrough`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, kAnyRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCastFail, 0, kI31RefCode,
+ // As null branches, the type here is guaranteed to be non-null.
+ kExprCallFunction, i31ToI32.index,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+
+ builder.addFunction(`brOnCastFailNullNonNullOnBranch`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ // The value is guaranteed to be non-null on branch.
+ kExprBlock, kWasmRef, kAnyRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCastFailNull, 0, kI31RefCode,
+ kGCPrefix, kExprI31GetS,
+ kExprReturn,
+ kExprEnd,
+ kExprCallFunction, consumeNonNull.index,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+ let wasm = instance.exports;
+ assertEquals(1, wasm.brOnCastFailNonNullOnPassThrough(null));
+ assertEquals(42, wasm.brOnCastFailNonNullOnPassThrough(42));
+ assertEquals(1, wasm.brOnCastFailNonNullOnPassThrough("cast fails"));
+ assertTraps(kTrapNullDereference, () => wasm.brOnCastFailNullNonNullOnBranch(null));
+ assertEquals(42, wasm.brOnCastFailNullNonNullOnBranch(42));
+ assertEquals(1, wasm.brOnCastFailNullNonNullOnBranch("cast fails"));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-casts-from-any.js b/deps/v8/test/mjsunit/wasm/gc-casts-from-any.js
index 4b5606cdde..5f3ea73721 100644
--- a/deps/v8/test/mjsunit/wasm/gc-casts-from-any.js
+++ b/deps/v8/test/mjsunit/wasm/gc-casts-from-any.js
@@ -2,15 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc --no-wasm-gc-structref-as-dataref
+// Flags: --experimental-wasm-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function TestRefTest() {
var builder = new WasmModuleBuilder();
+ builder.startRecGroup();
let structSuper = builder.addStruct([makeField(kWasmI32, true)]);
let structSub = builder.addStruct([makeField(kWasmI32, true)], structSuper);
let array = builder.addArray(kWasmI32);
+ builder.endRecGroup();
let fct =
builder.addFunction('createStructSuper',
@@ -47,6 +49,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
["Eq", kEqRefCode],
// 'ref.test any' is semantically the same as '!ref.is_null' here.
["Any", kAnyRefCode],
+ ["None", kNullRefCode]
].forEach(([typeName, typeCode]) => {
builder.addFunction(`refTest${typeName}`,
makeSig([kWasmExternRef], [kWasmI32, kWasmI32]))
@@ -76,6 +79,63 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
kGCPrefix, kExprRefCastNull, typeCode,
kGCPrefix, kExprExternExternalize,
]).exportFunc();
+
+ builder.addFunction(`brOnCast${typeName}`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, typeCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCast, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+ builder.addFunction(`brOnCastNull${typeName}`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, typeCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCastNull, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+ builder.addFunction(`brOnCastFail${typeName}`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kAnyRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCastFail, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+ builder.addFunction(`brOnCastFailNull${typeName}`,
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kAnyRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprBrOnCastFailNull, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
});
var instance = builder.instantiate();
@@ -153,6 +213,15 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals([1, 1], wasm.refTestAny(1)); // ref.i31
assertEquals([1, 1], wasm.refTestAny({'JavaScript': 'Object'}));
+ assertEquals([0, 1], wasm.refTestNone(null));
+ assertEquals([0, 0], wasm.refTestNone(undefined));
+ assertEquals([0, 0], wasm.refTestNone(wasm.createStructSuper()));
+ assertEquals([0, 0], wasm.refTestNone(wasm.createStructSub()));
+ assertEquals([0, 0], wasm.refTestNone(wasm.createArray()));
+ assertEquals([0, 0], wasm.refTestNone(wasm.createFuncRef()));
+ assertEquals([0, 0], wasm.refTestNone(1)); // ref.i31
+ assertEquals([0, 0], wasm.refTestNone({'JavaScript': 'Object'}));
+
// ref.cast
let structSuperObj = wasm.createStructSuper();
let structSubObj = wasm.createStructSub();
@@ -232,6 +301,15 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(1, wasm.refCastAny(1));
assertSame(jsObj, wasm.refCastAny(jsObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(null));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(undefined));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(structSuperObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(structSubObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(arrayObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(funcObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(1));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNone(jsObj));
+
// ref.cast null
assertSame(null, wasm.refCastNullStructSuper(null));
assertTraps(kTrapIllegalCast, () => wasm.refCastNullStructSuper(undefined));
@@ -304,4 +382,341 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertSame(funcObj, wasm.refCastNullAny(funcObj));
assertEquals(1, wasm.refCastNullAny(1));
assertSame(jsObj, wasm.refCastNullAny(jsObj));
+
+ assertSame(null, wasm.refCastNullNone(null));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(undefined));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(structSuperObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(structSubObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(arrayObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(funcObj));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(1));
+ assertTraps(kTrapIllegalCast, () => wasm.refCastNullNone(jsObj));
+
+ // br_on_cast
+ assertEquals(0, wasm.brOnCastStructSuper(null));
+ assertEquals(0, wasm.brOnCastStructSuper(undefined));
+ assertEquals(1, wasm.brOnCastStructSuper(structSuperObj));
+ assertEquals(1, wasm.brOnCastStructSuper(structSubObj));
+ assertEquals(0, wasm.brOnCastStructSuper(arrayObj));
+ assertEquals(0, wasm.brOnCastStructSuper(funcObj));
+ assertEquals(0, wasm.brOnCastStructSuper(1));
+ assertEquals(0, wasm.brOnCastStructSuper(jsObj));
+
+ assertEquals(0, wasm.brOnCastStructSub(null));
+ assertEquals(0, wasm.brOnCastStructSub(undefined));
+ assertEquals(0, wasm.brOnCastStructSub(structSuperObj));
+ assertEquals(1, wasm.brOnCastStructSub(structSubObj));
+ assertEquals(0, wasm.brOnCastStructSub(arrayObj));
+ assertEquals(0, wasm.brOnCastStructSub(funcObj));
+ assertEquals(0, wasm.brOnCastStructSub(1));
+ assertEquals(0, wasm.brOnCastStructSub(jsObj));
+
+ assertEquals(0, wasm.brOnCastArray(null));
+ assertEquals(0, wasm.brOnCastArray(undefined));
+ assertEquals(0, wasm.brOnCastArray(structSuperObj));
+ assertEquals(0, wasm.brOnCastArray(structSubObj));
+ assertEquals(1, wasm.brOnCastArray(arrayObj));
+ assertEquals(0, wasm.brOnCastArray(funcObj));
+ assertEquals(0, wasm.brOnCastArray(1));
+ assertEquals(0, wasm.brOnCastArray(jsObj));
+
+ assertEquals(0, wasm.brOnCastI31(null));
+ assertEquals(0, wasm.brOnCastI31(undefined));
+ assertEquals(0, wasm.brOnCastI31(structSuperObj));
+ assertEquals(0, wasm.brOnCastI31(structSubObj));
+ assertEquals(0, wasm.brOnCastI31(arrayObj));
+ assertEquals(0, wasm.brOnCastI31(funcObj));
+ assertEquals(1, wasm.brOnCastI31(1));
+ assertEquals(0, wasm.brOnCastI31(jsObj));
+
+ assertEquals(0, wasm.brOnCastAnyArray(null));
+ assertEquals(0, wasm.brOnCastAnyArray(undefined));
+ assertEquals(0, wasm.brOnCastAnyArray(structSuperObj));
+ assertEquals(0, wasm.brOnCastAnyArray(structSubObj));
+ assertEquals(1, wasm.brOnCastAnyArray(arrayObj));
+ assertEquals(0, wasm.brOnCastAnyArray(funcObj));
+ assertEquals(0, wasm.brOnCastAnyArray(1));
+ assertEquals(0, wasm.brOnCastAnyArray(jsObj));
+
+ assertEquals(0, wasm.brOnCastStruct(null));
+ assertEquals(0, wasm.brOnCastStruct(undefined));
+ assertEquals(1, wasm.brOnCastStruct(structSuperObj));
+ assertEquals(1, wasm.brOnCastStruct(structSubObj));
+ assertEquals(0, wasm.brOnCastStruct(arrayObj));
+ assertEquals(0, wasm.brOnCastStruct(funcObj));
+ assertEquals(0, wasm.brOnCastStruct(1));
+ assertEquals(0, wasm.brOnCastStruct(jsObj));
+
+ assertEquals(0, wasm.brOnCastEq(null));
+ assertEquals(0, wasm.brOnCastEq(undefined));
+ assertEquals(1, wasm.brOnCastEq(structSuperObj));
+ assertEquals(1, wasm.brOnCastEq(structSubObj));
+ assertEquals(1, wasm.brOnCastEq(arrayObj));
+ assertEquals(0, wasm.brOnCastEq(funcObj));
+ assertEquals(1, wasm.brOnCastEq(1));
+ assertEquals(0, wasm.brOnCastEq(jsObj));
+
+ assertEquals(0, wasm.brOnCastAny(null));
+ assertEquals(1, wasm.brOnCastAny(undefined));
+ assertEquals(1, wasm.brOnCastAny(structSuperObj));
+ assertEquals(1, wasm.brOnCastAny(structSubObj));
+ assertEquals(1, wasm.brOnCastAny(arrayObj));
+ assertEquals(1, wasm.brOnCastAny(funcObj));
+ assertEquals(1, wasm.brOnCastAny(1));
+ assertEquals(1, wasm.brOnCastAny(jsObj));
+
+ assertEquals(0, wasm.brOnCastNone(null));
+ assertEquals(0, wasm.brOnCastNone(undefined));
+ assertEquals(0, wasm.brOnCastNone(structSuperObj));
+ assertEquals(0, wasm.brOnCastNone(structSubObj));
+ assertEquals(0, wasm.brOnCastNone(arrayObj));
+ assertEquals(0, wasm.brOnCastNone(funcObj));
+ assertEquals(0, wasm.brOnCastNone(1));
+ assertEquals(0, wasm.brOnCastNone(jsObj));
+
+ // br_on_cast null
+ assertEquals(1, wasm.brOnCastNullStructSuper(null));
+ assertEquals(0, wasm.brOnCastNullStructSuper(undefined));
+ assertEquals(1, wasm.brOnCastNullStructSuper(structSuperObj));
+ assertEquals(1, wasm.brOnCastNullStructSuper(structSubObj));
+ assertEquals(0, wasm.brOnCastNullStructSuper(arrayObj));
+ assertEquals(0, wasm.brOnCastNullStructSuper(funcObj));
+ assertEquals(0, wasm.brOnCastNullStructSuper(1));
+ assertEquals(0, wasm.brOnCastNullStructSuper(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullStructSub(null));
+ assertEquals(0, wasm.brOnCastNullStructSub(undefined));
+ assertEquals(0, wasm.brOnCastNullStructSub(structSuperObj));
+ assertEquals(1, wasm.brOnCastNullStructSub(structSubObj));
+ assertEquals(0, wasm.brOnCastNullStructSub(arrayObj));
+ assertEquals(0, wasm.brOnCastNullStructSub(funcObj));
+ assertEquals(0, wasm.brOnCastNullStructSub(1));
+ assertEquals(0, wasm.brOnCastNullStructSub(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullArray(null));
+ assertEquals(0, wasm.brOnCastNullArray(undefined));
+ assertEquals(0, wasm.brOnCastNullArray(structSuperObj));
+ assertEquals(0, wasm.brOnCastNullArray(structSubObj));
+ assertEquals(1, wasm.brOnCastNullArray(arrayObj));
+ assertEquals(0, wasm.brOnCastNullArray(funcObj));
+ assertEquals(0, wasm.brOnCastNullArray(1));
+ assertEquals(0, wasm.brOnCastNullArray(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullI31(null));
+ assertEquals(0, wasm.brOnCastNullI31(undefined));
+ assertEquals(0, wasm.brOnCastNullI31(structSuperObj));
+ assertEquals(0, wasm.brOnCastNullI31(structSubObj));
+ assertEquals(0, wasm.brOnCastNullI31(arrayObj));
+ assertEquals(0, wasm.brOnCastNullI31(funcObj));
+ assertEquals(1, wasm.brOnCastNullI31(1));
+ assertEquals(0, wasm.brOnCastNullI31(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullAnyArray(null));
+ assertEquals(0, wasm.brOnCastNullAnyArray(undefined));
+ assertEquals(0, wasm.brOnCastNullAnyArray(structSuperObj));
+ assertEquals(0, wasm.brOnCastNullAnyArray(structSubObj));
+ assertEquals(1, wasm.brOnCastNullAnyArray(arrayObj));
+ assertEquals(0, wasm.brOnCastNullAnyArray(funcObj));
+ assertEquals(0, wasm.brOnCastNullAnyArray(1));
+ assertEquals(0, wasm.brOnCastNullAnyArray(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullStruct(null));
+ assertEquals(0, wasm.brOnCastNullStruct(undefined));
+ assertEquals(1, wasm.brOnCastNullStruct(structSuperObj));
+ assertEquals(1, wasm.brOnCastNullStruct(structSubObj));
+ assertEquals(0, wasm.brOnCastNullStruct(arrayObj));
+ assertEquals(0, wasm.brOnCastNullStruct(funcObj));
+ assertEquals(0, wasm.brOnCastNullStruct(1));
+ assertEquals(0, wasm.brOnCastNullStruct(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullEq(null));
+ assertEquals(0, wasm.brOnCastNullEq(undefined));
+ assertEquals(1, wasm.brOnCastNullEq(structSuperObj));
+ assertEquals(1, wasm.brOnCastNullEq(structSubObj));
+ assertEquals(1, wasm.brOnCastNullEq(arrayObj));
+ assertEquals(0, wasm.brOnCastNullEq(funcObj));
+ assertEquals(1, wasm.brOnCastNullEq(1));
+ assertEquals(0, wasm.brOnCastNullEq(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullAny(null));
+ assertEquals(1, wasm.brOnCastNullAny(undefined));
+ assertEquals(1, wasm.brOnCastNullAny(structSuperObj));
+ assertEquals(1, wasm.brOnCastNullAny(structSubObj));
+ assertEquals(1, wasm.brOnCastNullAny(arrayObj));
+ assertEquals(1, wasm.brOnCastNullAny(funcObj));
+ assertEquals(1, wasm.brOnCastNullAny(1));
+ assertEquals(1, wasm.brOnCastNullAny(jsObj));
+
+ assertEquals(1, wasm.brOnCastNullNone(null));
+ assertEquals(0, wasm.brOnCastNullNone(undefined));
+ assertEquals(0, wasm.brOnCastNullNone(structSuperObj));
+ assertEquals(0, wasm.brOnCastNullNone(structSubObj));
+ assertEquals(0, wasm.brOnCastNullNone(arrayObj));
+ assertEquals(0, wasm.brOnCastNullNone(funcObj));
+ assertEquals(0, wasm.brOnCastNullNone(1));
+ assertEquals(0, wasm.brOnCastNullNone(jsObj));
+
+ // br_on_cast_fail
+ assertEquals(1, wasm.brOnCastFailStructSuper(null));
+ assertEquals(1, wasm.brOnCastFailStructSuper(undefined));
+ assertEquals(0, wasm.brOnCastFailStructSuper(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailStructSuper(structSubObj));
+ assertEquals(1, wasm.brOnCastFailStructSuper(arrayObj));
+ assertEquals(1, wasm.brOnCastFailStructSuper(funcObj));
+ assertEquals(1, wasm.brOnCastFailStructSuper(1));
+ assertEquals(1, wasm.brOnCastFailStructSuper(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailStructSub(null));
+ assertEquals(1, wasm.brOnCastFailStructSub(undefined));
+ assertEquals(1, wasm.brOnCastFailStructSub(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailStructSub(structSubObj));
+ assertEquals(1, wasm.brOnCastFailStructSub(arrayObj));
+ assertEquals(1, wasm.brOnCastFailStructSub(funcObj));
+ assertEquals(1, wasm.brOnCastFailStructSub(1));
+ assertEquals(1, wasm.brOnCastFailStructSub(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailArray(null));
+ assertEquals(1, wasm.brOnCastFailArray(undefined));
+ assertEquals(1, wasm.brOnCastFailArray(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailArray(structSubObj));
+ assertEquals(0, wasm.brOnCastFailArray(arrayObj));
+ assertEquals(1, wasm.brOnCastFailArray(funcObj));
+ assertEquals(1, wasm.brOnCastFailArray(1));
+ assertEquals(1, wasm.brOnCastFailArray(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailI31(null));
+ assertEquals(1, wasm.brOnCastFailI31(undefined));
+ assertEquals(1, wasm.brOnCastFailI31(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailI31(structSubObj));
+ assertEquals(1, wasm.brOnCastFailI31(arrayObj));
+ assertEquals(1, wasm.brOnCastFailI31(funcObj));
+ assertEquals(0, wasm.brOnCastFailI31(1));
+ assertEquals(1, wasm.brOnCastFailI31(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailAnyArray(null));
+ assertEquals(1, wasm.brOnCastFailAnyArray(undefined));
+ assertEquals(1, wasm.brOnCastFailAnyArray(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailAnyArray(structSubObj));
+ assertEquals(0, wasm.brOnCastFailAnyArray(arrayObj));
+ assertEquals(1, wasm.brOnCastFailAnyArray(funcObj));
+ assertEquals(1, wasm.brOnCastFailAnyArray(1));
+ assertEquals(1, wasm.brOnCastFailAnyArray(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailStruct(null));
+ assertEquals(1, wasm.brOnCastFailStruct(undefined));
+ assertEquals(0, wasm.brOnCastFailStruct(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailStruct(structSubObj));
+ assertEquals(1, wasm.brOnCastFailStruct(arrayObj));
+ assertEquals(1, wasm.brOnCastFailStruct(funcObj));
+ assertEquals(1, wasm.brOnCastFailStruct(1));
+ assertEquals(1, wasm.brOnCastFailStruct(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailEq(null));
+ assertEquals(1, wasm.brOnCastFailEq(undefined));
+ assertEquals(0, wasm.brOnCastFailEq(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailEq(structSubObj));
+ assertEquals(0, wasm.brOnCastFailEq(arrayObj));
+ assertEquals(1, wasm.brOnCastFailEq(funcObj));
+ assertEquals(0, wasm.brOnCastFailEq(1));
+ assertEquals(1, wasm.brOnCastFailEq(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailAny(null));
+ assertEquals(0, wasm.brOnCastFailAny(undefined));
+ assertEquals(0, wasm.brOnCastFailAny(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailAny(structSubObj));
+ assertEquals(0, wasm.brOnCastFailAny(arrayObj));
+ assertEquals(0, wasm.brOnCastFailAny(funcObj));
+ assertEquals(0, wasm.brOnCastFailAny(1));
+ assertEquals(0, wasm.brOnCastFailAny(jsObj));
+
+ assertEquals(1, wasm.brOnCastFailNone(null));
+ assertEquals(1, wasm.brOnCastFailNone(undefined));
+ assertEquals(1, wasm.brOnCastFailNone(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailNone(structSubObj));
+ assertEquals(1, wasm.brOnCastFailNone(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNone(funcObj));
+ assertEquals(1, wasm.brOnCastFailNone(1));
+ assertEquals(1, wasm.brOnCastFailNone(jsObj));
+
+ // br_on_cast_fail null
+ assertEquals(0, wasm.brOnCastFailNullStructSuper(null));
+ assertEquals(1, wasm.brOnCastFailNullStructSuper(undefined));
+ assertEquals(0, wasm.brOnCastFailNullStructSuper(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailNullStructSuper(structSubObj));
+ assertEquals(1, wasm.brOnCastFailNullStructSuper(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullStructSuper(funcObj));
+ assertEquals(1, wasm.brOnCastFailNullStructSuper(1));
+ assertEquals(1, wasm.brOnCastFailNullStructSuper(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullStructSub(null));
+ assertEquals(1, wasm.brOnCastFailNullStructSub(undefined));
+ assertEquals(1, wasm.brOnCastFailNullStructSub(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailNullStructSub(structSubObj));
+ assertEquals(1, wasm.brOnCastFailNullStructSub(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullStructSub(funcObj));
+ assertEquals(1, wasm.brOnCastFailNullStructSub(1));
+ assertEquals(1, wasm.brOnCastFailNullStructSub(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullArray(null));
+ assertEquals(1, wasm.brOnCastFailNullArray(undefined));
+ assertEquals(1, wasm.brOnCastFailNullArray(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailNullArray(structSubObj));
+ assertEquals(0, wasm.brOnCastFailNullArray(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullArray(funcObj));
+ assertEquals(1, wasm.brOnCastFailNullArray(1));
+ assertEquals(1, wasm.brOnCastFailNullArray(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullI31(null));
+ assertEquals(1, wasm.brOnCastFailNullI31(undefined));
+ assertEquals(1, wasm.brOnCastFailNullI31(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailNullI31(structSubObj));
+ assertEquals(1, wasm.brOnCastFailNullI31(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullI31(funcObj));
+ assertEquals(0, wasm.brOnCastFailNullI31(1));
+ assertEquals(1, wasm.brOnCastFailNullI31(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullAnyArray(null));
+ assertEquals(1, wasm.brOnCastFailNullAnyArray(undefined));
+ assertEquals(1, wasm.brOnCastFailNullAnyArray(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailNullAnyArray(structSubObj));
+ assertEquals(0, wasm.brOnCastFailNullAnyArray(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullAnyArray(funcObj));
+ assertEquals(1, wasm.brOnCastFailNullAnyArray(1));
+ assertEquals(1, wasm.brOnCastFailNullAnyArray(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullStruct(null));
+ assertEquals(1, wasm.brOnCastFailNullStruct(undefined));
+ assertEquals(0, wasm.brOnCastFailNullStruct(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailNullStruct(structSubObj));
+ assertEquals(1, wasm.brOnCastFailNullStruct(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullStruct(funcObj));
+ assertEquals(1, wasm.brOnCastFailNullStruct(1));
+ assertEquals(1, wasm.brOnCastFailNullStruct(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullEq(null));
+ assertEquals(1, wasm.brOnCastFailNullEq(undefined));
+ assertEquals(0, wasm.brOnCastFailNullEq(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailNullEq(structSubObj));
+ assertEquals(0, wasm.brOnCastFailNullEq(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullEq(funcObj));
+ assertEquals(0, wasm.brOnCastFailNullEq(1));
+ assertEquals(1, wasm.brOnCastFailNullEq(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullAny(null));
+ assertEquals(0, wasm.brOnCastFailNullAny(undefined));
+ assertEquals(0, wasm.brOnCastFailNullAny(structSuperObj));
+ assertEquals(0, wasm.brOnCastFailNullAny(structSubObj));
+ assertEquals(0, wasm.brOnCastFailNullAny(arrayObj));
+ assertEquals(0, wasm.brOnCastFailNullAny(funcObj));
+ assertEquals(0, wasm.brOnCastFailNullAny(1));
+ assertEquals(0, wasm.brOnCastFailNullAny(jsObj));
+
+ assertEquals(0, wasm.brOnCastFailNullNone(null));
+ assertEquals(1, wasm.brOnCastFailNullNone(undefined));
+ assertEquals(1, wasm.brOnCastFailNullNone(structSuperObj));
+ assertEquals(1, wasm.brOnCastFailNullNone(structSubObj));
+ assertEquals(1, wasm.brOnCastFailNullNone(arrayObj));
+ assertEquals(1, wasm.brOnCastFailNullNone(funcObj));
+ assertEquals(1, wasm.brOnCastFailNullNone(1));
+ assertEquals(1, wasm.brOnCastFailNullNone(jsObj));
})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-casts-subtypes.js b/deps/v8/test/mjsunit/wasm/gc-casts-subtypes.js
index c3deb81e6d..efbc9fda32 100644
--- a/deps/v8/test/mjsunit/wasm/gc-casts-subtypes.js
+++ b/deps/v8/test/mjsunit/wasm/gc-casts-subtypes.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-gc --experimental-wasm-type-reflection
-// Flags: --no-wasm-gc-structref-as-dataref
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
@@ -11,9 +10,15 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function RefCastFromNull() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
+ builder.startRecGroup();
let structSuper = builder.addStruct([makeField(kWasmI32, true)]);
+ builder.endRecGroup();
+ builder.startRecGroup();
let structSub = builder.addStruct([makeField(kWasmI32, true)], structSuper);
+ builder.endRecGroup();
+ builder.startRecGroup();
let array = builder.addArray(kWasmI32);
+ builder.endRecGroup();
// Note: Casting between unrelated types is allowed as long as the types
// belong to the same type hierarchy (func / any / extern). In these cases the
@@ -50,6 +55,19 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
kGCPrefix, kExprRefCast, targetType & kLeb128Mask,
kExprDrop,
]).exportFunc();
+ builder.addFunction('branchNull' + testName, makeSig([], [kWasmI32]))
+ .addLocals(wasmRefNullType(sourceType), 1)
+ .addBody([
+ kExprBlock, kWasmRef, targetType & kLeb128Mask,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCast, 0, targetType & kLeb128Mask,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
}
let instance = builder.instantiate();
@@ -58,13 +76,14 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
for (let [sourceType, targetType, testName] of tests) {
assertEquals(0, wasm['testNull' + testName]());
assertTraps(kTrapIllegalCast, wasm['castNull' + testName]);
+ assertEquals(0, wasm['branchNull' + testName]());
}
})();
(function RefTestFuncRef() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let sigSuper = builder.addType(makeSig([kWasmI32], []));
+ let sigSuper = builder.addType(makeSig([kWasmI32], []), kNoSuperType, false);
let sigSub = builder.addType(makeSig([kWasmI32], []), sigSuper);
builder.addFunction('fctSuper', sigSuper).addBody([]).exportFunc();
@@ -77,21 +96,35 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
kExprLocalGet, 0, kGCPrefix, kExprRefTest, sigSuper,
kExprLocalGet, 0, kGCPrefix, kExprRefTest, sigSub,
]).exportFunc();
+ builder.addFunction('testNullFromFuncRef',
+ makeSig([kWasmFuncRef], [kWasmI32, kWasmI32, kWasmI32, kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0, kGCPrefix, kExprRefTestNull, kFuncRefCode,
+ kExprLocalGet, 0, kGCPrefix, kExprRefTestNull, kNullFuncRefCode,
+ kExprLocalGet, 0, kGCPrefix, kExprRefTestNull, sigSuper,
+ kExprLocalGet, 0, kGCPrefix, kExprRefTestNull, sigSub,
+ ]).exportFunc();
let instance = builder.instantiate();
let wasm = instance.exports;
let jsFct = new WebAssembly.Function(
{parameters:['i32', 'i32'], results: ['i32']},
function mul(a, b) { return a * b; });
+ assertEquals([0, 0, 0, 0], wasm.testFromFuncRef(null));
assertEquals([1, 0, 0, 0], wasm.testFromFuncRef(jsFct));
assertEquals([1, 0, 1, 0], wasm.testFromFuncRef(wasm.fctSuper));
assertEquals([1, 0, 1, 1], wasm.testFromFuncRef(wasm.fctSub));
+
+ assertEquals([1, 1, 1, 1], wasm.testNullFromFuncRef(null));
+ assertEquals([1, 0, 0, 0], wasm.testNullFromFuncRef(jsFct));
+ assertEquals([1, 0, 1, 0], wasm.testNullFromFuncRef(wasm.fctSuper));
+ assertEquals([1, 0, 1, 1], wasm.testNullFromFuncRef(wasm.fctSub));
})();
(function RefCastFuncRef() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let sigSuper = builder.addType(makeSig([kWasmI32], []));
+ let sigSuper = builder.addType(makeSig([kWasmI32], []), kNoSuperType, false);
let sigSub = builder.addType(makeSig([kWasmI32], []), sigSuper);
builder.addFunction('fctSuper', sigSuper).addBody([]).exportFunc();
@@ -110,26 +143,222 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCast, sigSub])
.exportFunc();
+ builder.addFunction('castNullToFuncRef',
+ makeSig([kWasmFuncRef], [kWasmFuncRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, kFuncRefCode])
+ .exportFunc();
+ builder.addFunction('castNullToNullFuncRef',
+ makeSig([kWasmFuncRef], [kWasmFuncRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, kNullFuncRefCode])
+ .exportFunc();
+ builder.addFunction('castNullToSuper',
+ makeSig([kWasmFuncRef], [kWasmFuncRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, sigSuper])
+ .exportFunc();
+ builder.addFunction('castNullToSub', makeSig([kWasmFuncRef], [kWasmFuncRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, sigSub])
+ .exportFunc();
+
let instance = builder.instantiate();
let wasm = instance.exports;
let jsFct = new WebAssembly.Function(
{parameters:['i32', 'i32'], results: ['i32']},
function mul(a, b) { return a * b; });
+
+ assertTraps(kTrapIllegalCast, () => wasm.castToFuncRef(null));
assertSame(jsFct, wasm.castToFuncRef(jsFct));
assertSame(wasm.fctSuper, wasm.castToFuncRef(wasm.fctSuper));
assertSame(wasm.fctSub, wasm.castToFuncRef(wasm.fctSub));
- assertTraps(kTrapIllegalCast, () => wasm.castToNullFuncRef(jsFct));
- assertTraps(kTrapIllegalCast, () => wasm.castToNullFuncRef(wasm.fctSuper));
- assertTraps(kTrapIllegalCast, () => wasm.castToNullFuncRef(wasm.fctSub));
+ assertSame(null, wasm.castNullToFuncRef(null));
+ assertSame(jsFct, wasm.castNullToFuncRef(jsFct));
+ assertSame(wasm.fctSuper, wasm.castNullToFuncRef(wasm.fctSuper));
+ assertSame(wasm.fctSub, wasm.castNullToFuncRef(wasm.fctSub));
+
+ assertSame(null, wasm.castNullToNullFuncRef(null));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToNullFuncRef(jsFct));
+ assertTraps(kTrapIllegalCast,
+ () => wasm.castNullToNullFuncRef(wasm.fctSuper));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToNullFuncRef(wasm.fctSub));
+ assertTraps(kTrapIllegalCast, () => wasm.castToSuper(null));
assertTraps(kTrapIllegalCast, () => wasm.castToSuper(jsFct));
assertSame(wasm.fctSuper, wasm.castToSuper(wasm.fctSuper));
assertSame(wasm.fctSub, wasm.castToSuper(wasm.fctSub));
+ assertSame(null, wasm.castNullToSuper(null));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToSuper(jsFct));
+ assertSame(wasm.fctSuper, wasm.castNullToSuper(wasm.fctSuper));
+ assertSame(wasm.fctSub, wasm.castNullToSuper(wasm.fctSub));
+
+ assertTraps(kTrapIllegalCast, () => wasm.castToSub(null));
assertTraps(kTrapIllegalCast, () => wasm.castToSub(jsFct));
assertTraps(kTrapIllegalCast, () => wasm.castToSub(wasm.fctSuper));
assertSame(wasm.fctSub, wasm.castToSub(wasm.fctSub));
+
+ assertSame(null, wasm.castNullToSub(null));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToSub(jsFct));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToSub(wasm.fctSuper));
+ assertSame(wasm.fctSub, wasm.castNullToSub(wasm.fctSub));
+})();
+
+(function BrOnCastFuncRef() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sigSuper = builder.addType(makeSig([kWasmI32], []), kNoSuperType, false);
+ let sigSub = builder.addType(makeSig([kWasmI32], []), sigSuper);
+
+ builder.addFunction('fctSuper', sigSuper).addBody([]).exportFunc();
+ builder.addFunction('fctSub', sigSub).addBody([]).exportFunc();
+ let targets = {
+ "funcref": kFuncRefCode,
+ "nullfuncref": kNullFuncRefCode,
+ "super": sigSuper,
+ "sub": sigSub
+ };
+ for (const [name, typeCode] of Object.entries(targets)) {
+ builder.addFunction(`brOnCast_${name}`,
+ makeSig([kWasmFuncRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, typeCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCast, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+
+ builder.addFunction(`brOnCastNull_${name}`,
+ makeSig([kWasmFuncRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, typeCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastNull, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+
+ builder.addFunction(`brOnCastFail_${name}`,
+ makeSig([kWasmFuncRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kFuncRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastFail, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+
+ builder.addFunction(`brOnCastFailNull_${name}`,
+ makeSig([kWasmFuncRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, kFuncRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastFailNull, 0, typeCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ]).exportFunc();
+ }
+
+ let instance = builder.instantiate();
+ let wasm = instance.exports;
+ let jsFct = new WebAssembly.Function(
+ {parameters:['i32', 'i32'], results: ['i32']},
+ function mul(a, b) { return a * b; });
+ assertEquals(0, wasm.brOnCast_funcref(null));
+ assertEquals(0, wasm.brOnCast_nullfuncref(null));
+ assertEquals(0, wasm.brOnCast_super(null));
+ assertEquals(0, wasm.brOnCast_sub(null));
+
+ assertEquals(1, wasm.brOnCast_funcref(jsFct));
+ assertEquals(0, wasm.brOnCast_nullfuncref(jsFct));
+ assertEquals(0, wasm.brOnCast_super(jsFct));
+ assertEquals(0, wasm.brOnCast_sub(jsFct));
+
+ assertEquals(1, wasm.brOnCast_funcref(wasm.fctSuper));
+ assertEquals(0, wasm.brOnCast_nullfuncref(wasm.fctSuper));
+ assertEquals(1, wasm.brOnCast_super(wasm.fctSuper));
+ assertEquals(0, wasm.brOnCast_sub(wasm.fctSuper));
+
+ assertEquals(1, wasm.brOnCast_funcref(wasm.fctSub));
+ assertEquals(0, wasm.brOnCast_nullfuncref(wasm.fctSub));
+ assertEquals(1, wasm.brOnCast_super(wasm.fctSub));
+ assertEquals(1, wasm.brOnCast_sub(wasm.fctSub));
+
+ assertEquals(1, wasm.brOnCastNull_funcref(null));
+ assertEquals(1, wasm.brOnCastNull_nullfuncref(null));
+ assertEquals(1, wasm.brOnCastNull_super(null));
+ assertEquals(1, wasm.brOnCastNull_sub(null));
+
+ assertEquals(1, wasm.brOnCastNull_funcref(jsFct));
+ assertEquals(0, wasm.brOnCastNull_nullfuncref(jsFct));
+ assertEquals(0, wasm.brOnCastNull_super(jsFct));
+ assertEquals(0, wasm.brOnCastNull_sub(jsFct));
+
+ assertEquals(1, wasm.brOnCastNull_funcref(wasm.fctSuper));
+ assertEquals(0, wasm.brOnCastNull_nullfuncref(wasm.fctSuper));
+ assertEquals(1, wasm.brOnCastNull_super(wasm.fctSuper));
+ assertEquals(0, wasm.brOnCastNull_sub(wasm.fctSuper));
+
+ assertEquals(1, wasm.brOnCastNull_funcref(wasm.fctSub));
+ assertEquals(0, wasm.brOnCastNull_nullfuncref(wasm.fctSub));
+ assertEquals(1, wasm.brOnCastNull_super(wasm.fctSub));
+ assertEquals(1, wasm.brOnCastNull_sub(wasm.fctSub));
+
+ assertEquals(1, wasm.brOnCastFail_funcref(null));
+ assertEquals(1, wasm.brOnCastFail_nullfuncref(null));
+ assertEquals(1, wasm.brOnCastFail_super(null));
+ assertEquals(1, wasm.brOnCastFail_sub(null));
+
+ assertEquals(0, wasm.brOnCastFail_funcref(jsFct));
+ assertEquals(1, wasm.brOnCastFail_nullfuncref(jsFct));
+ assertEquals(1, wasm.brOnCastFail_super(jsFct));
+ assertEquals(1, wasm.brOnCastFail_sub(jsFct));
+
+ assertEquals(0, wasm.brOnCastFail_funcref(wasm.fctSuper));
+ assertEquals(1, wasm.brOnCastFail_nullfuncref(wasm.fctSuper));
+ assertEquals(0, wasm.brOnCastFail_super(wasm.fctSuper));
+ assertEquals(1, wasm.brOnCastFail_sub(wasm.fctSuper));
+
+ assertEquals(0, wasm.brOnCastFail_funcref(wasm.fctSub));
+ assertEquals(1, wasm.brOnCastFail_nullfuncref(wasm.fctSub));
+ assertEquals(0, wasm.brOnCastFail_super(wasm.fctSub));
+ assertEquals(0, wasm.brOnCastFail_sub(wasm.fctSub));
+
+ // br_on_cast fail
+ assertEquals(0, wasm.brOnCastFailNull_funcref(null));
+ assertEquals(0, wasm.brOnCastFailNull_nullfuncref(null));
+ assertEquals(0, wasm.brOnCastFailNull_super(null));
+ assertEquals(0, wasm.brOnCastFailNull_sub(null));
+
+ assertEquals(0, wasm.brOnCastFailNull_funcref(jsFct));
+ assertEquals(1, wasm.brOnCastFailNull_nullfuncref(jsFct));
+ assertEquals(1, wasm.brOnCastFailNull_super(jsFct));
+ assertEquals(1, wasm.brOnCastFailNull_sub(jsFct));
+
+ assertEquals(0, wasm.brOnCastFailNull_funcref(wasm.fctSuper));
+ assertEquals(1, wasm.brOnCastFailNull_nullfuncref(wasm.fctSuper));
+ assertEquals(0, wasm.brOnCastFailNull_super(wasm.fctSuper));
+ assertEquals(1, wasm.brOnCastFailNull_sub(wasm.fctSuper));
+
+ assertEquals(0, wasm.brOnCastFailNull_funcref(wasm.fctSub));
+ assertEquals(1, wasm.brOnCastFailNull_nullfuncref(wasm.fctSub));
+ assertEquals(0, wasm.brOnCastFailNull_super(wasm.fctSub));
+ assertEquals(0, wasm.brOnCastFailNull_sub(wasm.fctSub));
})();
(function RefTestExternRef() {
@@ -143,6 +372,13 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
kExprLocalGet, 0, kGCPrefix, kExprRefTest, kNullExternRefCode,
]).exportFunc();
+ builder.addFunction('testNullExternRef',
+ makeSig([kWasmExternRef], [kWasmI32, kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0, kGCPrefix, kExprRefTestNull, kExternRefCode,
+ kExprLocalGet, 0, kGCPrefix, kExprRefTestNull, kNullExternRefCode,
+ ]).exportFunc();
+
let instance = builder.instantiate();
let wasm = instance.exports;
assertEquals([0, 0], wasm.testExternRef(null));
@@ -150,6 +386,12 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals([1, 0], wasm.testExternRef(1));
assertEquals([1, 0], wasm.testExternRef({}));
assertEquals([1, 0], wasm.testExternRef(wasm.testExternRef));
+
+ assertEquals([1, 1], wasm.testNullExternRef(null));
+ assertEquals([1, 0], wasm.testNullExternRef(undefined));
+ assertEquals([1, 0], wasm.testNullExternRef(1));
+ assertEquals([1, 0], wasm.testNullExternRef({}));
+ assertEquals([1, 0], wasm.testNullExternRef(wasm.testExternRef));
})();
(function RefCastExternRef() {
@@ -164,9 +406,18 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
makeSig([kWasmExternRef], [kWasmExternRef]))
.addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCast, kNullExternRefCode])
.exportFunc();
+ builder.addFunction('castNullToExternRef',
+ makeSig([kWasmExternRef], [kWasmExternRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, kExternRefCode])
+ .exportFunc();
+ builder.addFunction('castNullToNullExternRef',
+ makeSig([kWasmExternRef], [kWasmExternRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprRefCastNull, kNullExternRefCode])
+ .exportFunc();
let instance = builder.instantiate();
let wasm = instance.exports;
+
assertTraps(kTrapIllegalCast, () => wasm.castToExternRef(null));
assertEquals(undefined, wasm.castToExternRef(undefined));
assertEquals(1, wasm.castToExternRef(1));
@@ -180,8 +431,196 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertTraps(kTrapIllegalCast, () => wasm.castToNullExternRef(obj));
assertTraps(kTrapIllegalCast,
() => wasm.castToNullExternRef(wasm.castToExternRef));
+
+ assertSame(null, wasm.castNullToExternRef(null));
+ assertEquals(undefined, wasm.castNullToExternRef(undefined));
+ assertEquals(1, wasm.castNullToExternRef(1));
+ assertSame(obj, wasm.castNullToExternRef(obj));
+ assertSame(wasm.castToExternRef,
+ wasm.castNullToExternRef(wasm.castToExternRef));
+
+ assertSame(null, wasm.castNullToNullExternRef(null));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToNullExternRef(undefined));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToNullExternRef(1));
+ assertTraps(kTrapIllegalCast, () => wasm.castNullToNullExternRef(obj));
+ assertTraps(kTrapIllegalCast,
+ () => wasm.castNullToNullExternRef(wasm.castToExternRef));
+})();
+
+(function BrOnCastExternRef() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction('castToExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, kExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCast, 0, kExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+ builder.addFunction('castToNullExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, kNullExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCast, 0, kNullExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ builder.addFunction('castNullToExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, kExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastNull, 0, kExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+ builder.addFunction('castNullToNullExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, kNullExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastNull, 0, kNullExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ builder.addFunction('castFailToExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, kExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastFail, 0, kExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+ builder.addFunction('castFailToNullExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, kExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastFail, 0, kNullExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ builder.addFunction('castFailNullToExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, kExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastFailNull, 0, kExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+ builder.addFunction('castFailNullToNullExternRef',
+ makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, kExternRefCode,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprBrOnCastFailNull, 0, kNullExternRefCode,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+ let wasm = instance.exports;
+ let obj = {};
+
+ assertEquals(0, wasm.castToExternRef(null));
+ assertEquals(1, wasm.castToExternRef(undefined));
+ assertEquals(1, wasm.castToExternRef(1));
+ assertEquals(1, wasm.castToExternRef(obj));
+ assertEquals(1, wasm.castToExternRef(wasm.castToExternRef));
+
+ assertEquals(0, wasm.castToNullExternRef(null));
+ assertEquals(0, wasm.castToNullExternRef(undefined));
+ assertEquals(0, wasm.castToNullExternRef(1));
+ assertEquals(0, wasm.castToNullExternRef(obj));
+ assertEquals(0, wasm.castToNullExternRef(wasm.castToExternRef));
+
+ assertEquals(1, wasm.castNullToExternRef(null));
+ assertEquals(1, wasm.castNullToExternRef(undefined));
+ assertEquals(1, wasm.castNullToExternRef(1));
+ assertEquals(1, wasm.castNullToExternRef(obj));
+ assertEquals(1, wasm.castNullToExternRef(wasm.castToExternRef));
+
+ assertEquals(1, wasm.castNullToNullExternRef(null));
+ assertEquals(0, wasm.castNullToNullExternRef(undefined));
+ assertEquals(0, wasm.castNullToNullExternRef(1));
+ assertEquals(0, wasm.castNullToNullExternRef(obj));
+ assertEquals(0, wasm.castNullToNullExternRef(wasm.castToExternRef));
+
+ assertEquals(1, wasm.castFailToExternRef(null));
+ assertEquals(0, wasm.castFailToExternRef(undefined));
+ assertEquals(0, wasm.castFailToExternRef(1));
+ assertEquals(0, wasm.castFailToExternRef(obj));
+ assertEquals(0, wasm.castFailToExternRef(wasm.castToExternRef));
+
+ assertEquals(1, wasm.castFailToNullExternRef(null));
+ assertEquals(1, wasm.castFailToNullExternRef(undefined));
+ assertEquals(1, wasm.castFailToNullExternRef(1));
+ assertEquals(1, wasm.castFailToNullExternRef(obj));
+ assertEquals(1, wasm.castFailToNullExternRef(wasm.castToExternRef));
+
+ assertEquals(0, wasm.castFailNullToExternRef(null));
+ assertEquals(0, wasm.castFailNullToExternRef(undefined));
+ assertEquals(0, wasm.castFailNullToExternRef(1));
+ assertEquals(0, wasm.castFailNullToExternRef(obj));
+ assertEquals(0, wasm.castFailNullToExternRef(wasm.castToExternRef));
+
+ assertEquals(0, wasm.castFailNullToNullExternRef(null));
+ assertEquals(1, wasm.castFailNullToNullExternRef(undefined));
+ assertEquals(1, wasm.castFailNullToNullExternRef(1));
+ assertEquals(1, wasm.castFailNullToNullExternRef(obj));
+ assertEquals(1, wasm.castFailNullToNullExternRef(wasm.castToExternRef));
})();
+
(function RefTestAnyRefHierarchy() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -189,6 +628,11 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let structSub = builder.addStruct([makeField(kWasmI32, true)], structSuper);
let array = builder.addArray(kWasmI32);
+ // Helpers to be able to instantiate a true externref value from wasm.
+ let createExternSig = builder.addType(makeSig([], [kWasmExternRef]));
+ let createExternIdx = builder.addImport('import', 'createExtern', createExternSig);
+ let createExtern = () => undefined;
+
let types = {
any: kWasmAnyRef,
eq: kWasmEqRef,
@@ -197,6 +641,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
array: wasmRefNullType(array),
structSuper: wasmRefNullType(structSuper),
structSub: wasmRefNullType(structSub),
+ nullref: kWasmNullRef,
};
let createBodies = {
@@ -205,6 +650,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
structSuper: [kExprI32Const, 42, kGCPrefix, kExprStructNew, structSuper],
structSub: [kExprI32Const, 42, kGCPrefix, kExprStructNew, structSub],
array: [kExprI32Const, 42, kGCPrefix, kExprArrayNewFixed, array, 1],
+ any: [kExprCallFunction, createExternIdx, kGCPrefix, kExprExternInternalize],
};
// Each Test lists the following:
@@ -216,15 +662,16 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
let tests = [
{
source: 'any',
- values: ['nullref', 'i31ref', 'structSuper', 'structSub', 'array'],
+ values: ['nullref', 'i31ref', 'structSuper', 'structSub', 'array', 'any'],
targets: {
- any: ['i31ref', 'structSuper', 'structSub', 'array'],
+ any: ['i31ref', 'structSuper', 'structSub', 'array', 'any'],
eq: ['i31ref', 'structSuper', 'structSub', 'array'],
struct: ['structSuper', 'structSub'],
anyArray: ['array'],
array: ['array'],
structSuper: ['structSuper', 'structSub'],
structSub: ['structSub'],
+ nullref: [],
}
},
{
@@ -237,6 +684,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
array: ['array'],
structSuper: ['structSuper', 'structSub'],
structSub: ['structSub'],
+ nullref: [],
}
},
{
@@ -249,6 +697,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
array: ['array'],
structSuper: ['structSuper', 'structSub'],
structSub: ['structSub'],
+ nullref: [],
}
},
{
@@ -261,6 +710,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
array: ['array'],
structSuper: [],
structSub: [],
+ nullref: [],
}
},
{
@@ -273,6 +723,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
array: [],
structSuper: ['structSuper', 'structSub'],
structSub: ['structSub'],
+ nullref: [],
}
},
];
@@ -298,14 +749,14 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
makeSig([wasmRefType(creatorType)], [kWasmI32]))
.addBody([
kExprLocalGet, 0,
- kExprCallRef, creatorType,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
kGCPrefix, kExprRefTest, heapType,
]).exportFunc();
builder.addFunction(`test_null_${test.source}_to_${target}`,
makeSig([wasmRefType(creatorType)], [kWasmI32]))
.addBody([
kExprLocalGet, 0,
- kExprCallRef, creatorType,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
kGCPrefix, kExprRefTestNull, heapType,
]).exportFunc();
@@ -313,7 +764,7 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
makeSig([wasmRefType(creatorType)], [kWasmI32]))
.addBody([
kExprLocalGet, 0,
- kExprCallRef, creatorType,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
kGCPrefix, kExprRefCast, heapType,
kExprRefIsNull, // We can't expose the cast object to JS in most cases.
]).exportFunc();
@@ -322,14 +773,79 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
makeSig([wasmRefType(creatorType)], [kWasmI32]))
.addBody([
kExprLocalGet, 0,
- kExprCallRef, creatorType,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
kGCPrefix, kExprRefCastNull, heapType,
kExprRefIsNull, // We can't expose the cast object to JS in most cases.
]).exportFunc();
+
+ builder.addFunction(`brOnCast_${test.source}_to_${target}`,
+ makeSig([wasmRefType(creatorType)], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, heapType,
+ kExprLocalGet, 0,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
+ kGCPrefix, kExprBrOnCast, 0, heapType,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ builder.addFunction(`brOnCastNull_${test.source}_to_${target}`,
+ makeSig([wasmRefType(creatorType)], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, heapType,
+ kExprLocalGet, 0,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
+ kGCPrefix, kExprBrOnCastNull, 0, heapType,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ let sourceHeapType = sourceType.heap_type ?? (sourceType & kLeb128Mask);
+ builder.addFunction(`brOnCastFail_${test.source}_to_${target}`,
+ makeSig([wasmRefType(creatorType)], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRefNull, sourceHeapType,
+ kExprLocalGet, 0,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
+ kGCPrefix, kExprBrOnCastFail, 0, heapType,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
+
+ builder.addFunction(`brOnCastFailNull_${test.source}_to_${target}`,
+ makeSig([wasmRefType(creatorType)], [kWasmI32]))
+ .addBody([
+ kExprBlock, kWasmRef, sourceHeapType,
+ kExprLocalGet, 0,
+ kExprCallRef, ...wasmUnsignedLeb(creatorType),
+ kGCPrefix, kExprBrOnCastFailNull, 0, heapType,
+ kExprI32Const, 0,
+ kExprReturn,
+ kExprEnd,
+ kExprDrop,
+ kExprI32Const, 1,
+ kExprReturn,
+ ])
+ .exportFunc();
}
}
- let instance = builder.instantiate();
+ let instance = builder.instantiate({import: {createExtern}});
let wasm = instance.exports;
for (let test of tests) {
@@ -358,6 +874,21 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
} else {
assertTraps(kTrapIllegalCast, () => castNull(create_value));
}
+
+ print(`Test br_on_cast: ${test.source}(${value}) -> ${target}`);
+ res = wasm[`brOnCast_${test.source}_to_${target}`](create_value);
+ assertEquals(validValues.includes(value) ? 1 : 0, res);
+ print(`Test br_on_cast null: ${test.source}(${value}) -> ${target}`);
+ res = wasm[`brOnCastNull_${test.source}_to_${target}`](create_value);
+ assertEquals(
+ validValues.includes(value) || value == "nullref" ? 1 : 0, res);
+
+ print(`Test br_on_cast_fail: ${test.source}(${value}) -> ${target}`);
+ res = wasm[`brOnCastFail_${test.source}_to_${target}`](create_value);
+ assertEquals(!validValues.includes(value) || value == "nullref" ? 1 : 0, res);
+ print(`Test br_on_cast_fail null: ${test.source}(${value}) -> ${target}`);
+ res = wasm[`brOnCastFailNull_${test.source}_to_${target}`](create_value);
+ assertEquals(!validValues.includes(value) && value != "nullref" ? 1 : 0, res);
}
}
}
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-async-debugger.js b/deps/v8/test/mjsunit/wasm/gc-js-interop-async-debugger.js
new file mode 100644
index 0000000000..b7bedc01bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-async-debugger.js
@@ -0,0 +1,11 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --allow-natives-syntax
+
+// The implementation of Promises currently takes a different path (a C++
+// runtime function instead of a Torque builtin) when the debugger is
+// enabled, so exercise that path in this variant of the test.
+d8.debugger.enable();
+d8.file.execute('test/mjsunit/wasm/gc-js-interop-async.js');
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-collections.js b/deps/v8/test/mjsunit/wasm/gc-js-interop-collections.js
index 757691ab11..4bbf9aa248 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop-collections.js
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-collections.js
@@ -10,7 +10,7 @@ let {struct, array} = CreateWasmObjects();
for (const wasm_obj of [struct, array]) {
// Test Array.
- testThrowsRepeated(() => Array.from(wasm_obj), TypeError);
+ repeated(() => assertEquals([], Array.from(wasm_obj)));
repeated(() => assertFalse(Array.isArray(wasm_obj)));
repeated(() => assertEquals([wasm_obj], Array.of(wasm_obj)));
testThrowsRepeated(() => [1, 2].at(wasm_obj), TypeError);
@@ -65,7 +65,7 @@ for (const wasm_obj of [struct, array]) {
arr.unshift(wasm_obj);
assertEquals([wasm_obj, 1, 2], arr);
});
- testThrowsRepeated(() => Int8Array.from(wasm_obj), TypeError);
+ repeated(() => assertEquals(Int8Array.from([]), Int8Array.from(wasm_obj)));
testThrowsRepeated(() => Int8Array.of(wasm_obj), TypeError);
for (let ArrayType
of [Int8Array, Int16Array, Int32Array, Uint8Array, Uint16Array,
@@ -88,7 +88,7 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(() => array.map(() => wasm_obj), TypeError);
testThrowsRepeated(() => array.reduce(wasm_obj), TypeError);
testThrowsRepeated(() => array.reduceRight(wasm_obj), TypeError);
- testThrowsRepeated(() => array.set(wasm_obj), TypeError);
+ repeated(() => array.set(wasm_obj));
testThrowsRepeated(() => array.set([wasm_obj]), TypeError);
testThrowsRepeated(() => array.slice(wasm_obj, 1), TypeError);
testThrowsRepeated(() => array.some(wasm_obj), TypeError);
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-global-constructors.js b/deps/v8/test/mjsunit/wasm/gc-js-interop-global-constructors.js
index 0c11073480..45214197ef 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop-global-constructors.js
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-global-constructors.js
@@ -56,18 +56,30 @@ for (const wasm_obj of [struct, array]) {
repeated(() => assertSame(wasm_obj, new Array(wasm_obj)[0]));
testThrowsRepeated(() => new ArrayBuffer(wasm_obj), TypeError);
testThrowsRepeated(() => new BigInt(wasm_obj), TypeError);
- testThrowsRepeated(() => new BigInt64Array(wasm_obj), TypeError);
- testThrowsRepeated(() => new BigUint64Array(wasm_obj), TypeError);
+ repeated(() => assertEquals(new BigInt64Array(),
+ new BigInt64Array(wasm_obj)));
+ testThrowsRepeated(() => new BigInt64Array([wasm_obj]), TypeError);
+ repeated(() => assertEquals(new BigUint64Array(),
+ new BigUint64Array(wasm_obj)));
+ testThrowsRepeated(() => new BigUint64Array([wasm_obj]), TypeError);
repeated(() => assertEquals(true, (new Boolean(wasm_obj)).valueOf()));
testThrowsRepeated(() => new DataView(wasm_obj), TypeError);
testThrowsRepeated(() => new Date(wasm_obj), TypeError);
testThrowsRepeated(() => new Error(wasm_obj), TypeError);
testThrowsRepeated(() => new EvalError(wasm_obj), TypeError);
- testThrowsRepeated(() => new Float64Array(wasm_obj), TypeError);
+ repeated(() => assertEquals(new Float64Array(),
+ new Float64Array(wasm_obj)));
+ testThrowsRepeated(() => new Float64Array([wasm_obj]), TypeError);
testThrowsRepeated(() => new Function(wasm_obj), TypeError);
- testThrowsRepeated(() => new Int8Array(wasm_obj), TypeError);
- testThrowsRepeated(() => new Int16Array(wasm_obj), TypeError);
- testThrowsRepeated(() => new Int32Array(wasm_obj), TypeError);
+ repeated(() => assertEquals(new Int8Array(),
+ new Int8Array(wasm_obj)));
+ testThrowsRepeated(() => new Int8Array([wasm_obj]), TypeError);
+ repeated(() => assertEquals(new Int16Array(),
+ new Int16Array(wasm_obj)));
+ testThrowsRepeated(() => new Int16Array([wasm_obj]), TypeError);
+ repeated(() => assertEquals(new Int32Array(),
+ new Int32Array(wasm_obj)));
+ testThrowsRepeated(() => new Int32Array([wasm_obj]), TypeError);
testThrowsRepeated(() => new Map(wasm_obj), TypeError);
testThrowsRepeated(() => new Number(wasm_obj), TypeError);
repeated(() => assertSame(wasm_obj, new Object(wasm_obj)));
@@ -82,9 +94,14 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(() => new Symbol(wasm_obj), TypeError);
testThrowsRepeated(() => new SyntaxError(wasm_obj), TypeError);
testThrowsRepeated(() => new TypeError(wasm_obj), TypeError);
- testThrowsRepeated(() => new Uint8Array(wasm_obj), TypeError);
- testThrowsRepeated(() => new Uint16Array(wasm_obj), TypeError);
- testThrowsRepeated(() => new Uint32Array(wasm_obj), TypeError);
+ repeated(() => assertEquals(new Uint8Array(),
+ new Uint8Array(wasm_obj)));
+ testThrowsRepeated(() => new Uint8Array([wasm_obj]), TypeError);
+ repeated(() => assertEquals(new Uint16Array(),
+ new Uint16Array(wasm_obj)));
+ testThrowsRepeated(() => new Uint16Array([wasm_obj]), TypeError);
+ repeated(() => assertEquals(new Uint32Array(),
+ new Uint32Array(wasm_obj)));
testThrowsRepeated(() => new URIError(wasm_obj), TypeError);
testThrowsRepeated(() => new WeakMap(wasm_obj), TypeError);
repeated(() => assertSame(wasm_obj, new WeakRef(wasm_obj).deref()));
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-helpers.js b/deps/v8/test/mjsunit/wasm/gc-js-interop-helpers.js
index 2047862d16..3c39fc13bc 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop-helpers.js
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-helpers.js
@@ -8,7 +8,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
function CreateWasmObjects() {
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let struct_type = builder.addStruct([makeField(kWasmI32, true)]);
let array_type = builder.addArray(kWasmI32, true);
builder.addFunction('MakeStruct', makeSig([], [kWasmExternRef]))
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-import.mjs b/deps/v8/test/mjsunit/wasm/gc-js-interop-import.mjs
index 41ee0e12f7..b6266ad4e8 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop-import.mjs
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-import.mjs
@@ -8,7 +8,6 @@ import {struct, array} from 'gc-js-interop-export.mjs';
// Read struct and array with new wasm module.
let builder = new WasmModuleBuilder();
-builder.setSingletonRecGroups();
let struct_type = builder.addStruct([makeField(kWasmI32, true)]);
let array_type = builder.addArray(kWasmI32, true);
builder.addFunction('readStruct', makeSig([kWasmExternRef], [kWasmI32]))
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-objects.js b/deps/v8/test/mjsunit/wasm/gc-js-interop-objects.js
index 3ec837e19f..58aeb0b7a7 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop-objects.js
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-objects.js
@@ -12,10 +12,10 @@ for (const wasm_obj of [struct, array]) {
// Test Object.
testThrowsRepeated(() => Object.freeze(wasm_obj), TypeError);
testThrowsRepeated(() => Object.seal(wasm_obj), TypeError);
- testThrowsRepeated(
- () => Object.prototype.__lookupGetter__.call(wasm_obj, 'foo'), TypeError);
- testThrowsRepeated(
- () => Object.prototype.__lookupSetter__.call(wasm_obj, 'foo'), TypeError);
+ repeated(() => assertSame(
+ undefined, Object.prototype.__lookupGetter__.call(wasm_obj, 'foo')));
+ repeated(() => assertSame(
+ undefined, Object.prototype.__lookupSetter__.call(wasm_obj, 'foo')));
testThrowsRepeated(
() => Object.prototype.__defineGetter__.call(wasm_obj, 'foo', () => 42),
TypeError);
@@ -41,7 +41,9 @@ for (const wasm_obj of [struct, array]) {
repeated(() => assertEquals(true, Object.isFrozen(wasm_obj)));
repeated(() => assertEquals(false, Object.isExtensible(wasm_obj)));
repeated(() => assertEquals('object', typeof wasm_obj));
- testThrowsRepeated(() => Object.prototype.toString.call(wasm_obj), TypeError);
+ repeated(
+ () => assertEquals(
+ '[object Object]', Object.prototype.toString.call(wasm_obj)));
repeated(() => {
let tgt = {};
@@ -55,7 +57,7 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(
() => Object.defineProperty(wasm_obj, 'prop', {value: 1}), TypeError);
testThrowsRepeated(() => Object.fromEntries(wasm_obj), TypeError);
- testThrowsRepeated(() => Object.getPrototypeOf(wasm_obj), TypeError);
+ repeated(() => assertSame(null, Object.getPrototypeOf(wasm_obj)));
repeated(() => assertFalse(Object.hasOwn(wasm_obj, 'test')));
testThrowsRepeated(() => Object.preventExtensions(wasm_obj), TypeError);
testThrowsRepeated(() => Object.setPrototypeOf(wasm_obj, Object), TypeError);
@@ -67,11 +69,11 @@ for (const wasm_obj of [struct, array]) {
let obj = Object.create(wasm_obj);
repeated(() => assertSame(wasm_obj, Object.getPrototypeOf(obj)));
repeated(() => assertSame(wasm_obj, Reflect.getPrototypeOf(obj)));
- testThrowsRepeated(() => obj.__proto__, TypeError);
+ repeated(() => assertSame(undefined, obj.__proto__));
testThrowsRepeated(() => obj.__proto__ = wasm_obj, TypeError);
// Property access fails.
- testThrowsRepeated(() => obj[0], TypeError);
- testThrowsRepeated(() => obj.prop, TypeError);
+ repeated(() => assertSame(undefined, obj[0]));
+ repeated(() => assertSame(undefined, obj.prop));
testThrowsRepeated(() => obj.toString(), TypeError);
// Most conversions fail as it will use .toString(), .valueOf(), ...
testThrowsRepeated(() => `${obj}`, TypeError);
@@ -97,11 +99,11 @@ for (const wasm_obj of [struct, array]) {
() => assertEquals([wasm_obj, 1], Reflect.apply(fct, wasm_obj, [1])));
repeated(
() => assertEquals([{}, wasm_obj], Reflect.apply(fct, {}, [wasm_obj])));
- testThrowsRepeated(() => Reflect.apply(fct, 1, wasm_obj), TypeError);
+ repeated(() => assertEquals([new Number(1), undefined], Reflect.apply(fct, 1, wasm_obj)));
testThrowsRepeated(() => Reflect.apply(wasm_obj, null, []), TypeError);
}
testThrowsRepeated(() => Reflect.construct(wasm_obj, []), TypeError);
- testThrowsRepeated(() => Reflect.construct(Object, wasm_obj), TypeError);
+ repeated(() => assertEquals({}, Reflect.construct(Object, wasm_obj)));
testThrowsRepeated(() => Reflect.construct(Object, [], wasm_obj), TypeError);
testThrowsRepeated(
() => Reflect.defineProperty(wasm_obj, 'prop', {value: 1}), TypeError);
@@ -124,8 +126,8 @@ for (const wasm_obj of [struct, array]) {
});
testThrowsRepeated(() => Reflect.deleteProperty(wasm_obj, 'prop'), TypeError);
testThrowsRepeated(() => Reflect.deleteProperty({}, wasm_obj), TypeError);
- testThrowsRepeated(() => Reflect.get(wasm_obj, 'prop'), TypeError);
- testThrowsRepeated(() => Reflect.getPrototypeOf(wasm_obj), TypeError);
+ repeated(() => assertSame(undefined, Reflect.get(wasm_obj, 'prop')));
+ repeated(() => assertSame(null, Reflect.getPrototypeOf(wasm_obj)));
repeated(() => assertFalse(Reflect.has(wasm_obj, 'prop')));
repeated(() => assertTrue(Reflect.has({wasm_obj}, 'wasm_obj')));
@@ -149,8 +151,11 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(() => proxy.abc = 123, TypeError);
}
{
- let proxy = new Proxy({}, wasm_obj);
- testThrowsRepeated(() => proxy.abc, TypeError);
+ let underlyingObject = {};
+ let proxy = new Proxy(underlyingObject, wasm_obj);
+ repeated(() => assertSame(undefined, proxy.abc));
+ underlyingObject.abc = 123;
+ repeated(() => assertSame(123, proxy.abc));
}
{
const handler = {
@@ -166,7 +171,7 @@ for (const wasm_obj of [struct, array]) {
}
{
let proxy = Proxy.revocable({}, wasm_obj).proxy;
- testThrowsRepeated(() => proxy.abc, TypeError);
+ repeated(() => assertSame(undefined, proxy.abc));
}
// Ensure no statement re-assigned wasm_obj by accident.
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop-wasm.js b/deps/v8/test/mjsunit/wasm/gc-js-interop-wasm.js
index f6c0e57941..82e365ba26 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop-wasm.js
@@ -72,7 +72,7 @@ for (const wasm_obj of [struct, array]) {
let tag = new WebAssembly.Tag({parameters: ['structref']});
testThrowsRepeated(() => new WebAssembly.Exception(wasm_obj), TypeError);
- testThrowsRepeated(() => new WebAssembly.Exception(tag, wasm_obj), TypeError);
+ repeated(() => new WebAssembly.Exception(tag, wasm_obj));
repeated(() => new WebAssembly.Exception(tag, [wasm_obj]));
let exception = new WebAssembly.Exception(tag, [wasm_obj]);
testThrowsRepeated(() => exception.is(wasm_obj), TypeError);
diff --git a/deps/v8/test/mjsunit/wasm/gc-js-interop.js b/deps/v8/test/mjsunit/wasm/gc-js-interop.js
index 7b4ba33a21..bdbb720f83 100644
--- a/deps/v8/test/mjsunit/wasm/gc-js-interop.js
+++ b/deps/v8/test/mjsunit/wasm/gc-js-interop.js
@@ -8,15 +8,16 @@ d8.file.execute('test/mjsunit/wasm/gc-js-interop-helpers.js');
let {struct, array} = CreateWasmObjects();
for (const wasm_obj of [struct, array]) {
- testThrowsRepeated(() => wasm_obj.foo, TypeError);
+ repeated(() => assertSame(undefined, wasm_obj.foo));
testThrowsRepeated(() => wasm_obj.foo = 42, TypeError);
- testThrowsRepeated(() => wasm_obj[0], TypeError);
+ repeated(() => assertSame(undefined, wasm_obj[0]));
testThrowsRepeated(() => wasm_obj[0] = undefined, TypeError);
- testThrowsRepeated(() => wasm_obj.__proto__, TypeError);
+ repeated(() => assertSame(undefined, wasm_obj.__proto__));
+ repeated(() => assertSame(
+ null, Object.prototype.__lookupGetter__("__proto__").call(wasm_obj)));
testThrowsRepeated(
() => Object.prototype.__proto__.call(wasm_obj), TypeError);
testThrowsRepeated(() => wasm_obj.__proto__ = null, TypeError);
- testThrowsRepeated(() => JSON.stringify(wasm_obj), TypeError);
testThrowsRepeated(() => {
for (let p in wasm_obj) {
}
@@ -50,7 +51,7 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(() => `${wasm_obj}`, TypeError);
testThrowsRepeated(() => wasm_obj`test`, TypeError);
testThrowsRepeated(() => new wasm_obj, TypeError);
- testThrowsRepeated(() => wasm_obj?.property, TypeError);
+ repeated(() => assertSame(undefined, wasm_obj?.property));
repeated(() => assertEquals(undefined, void wasm_obj));
testThrowsRepeated(() => 2 == wasm_obj, TypeError);
@@ -69,7 +70,7 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(() => { let [] = wasm_obj; }, TypeError);
testThrowsRepeated(() => { let [a, b] = wasm_obj; }, TypeError);
testThrowsRepeated(() => { let [...all] = wasm_obj; }, TypeError);
- testThrowsRepeated(() => { let {a} = wasm_obj; }, TypeError);
+ repeated(() => { let {a} = wasm_obj; assertSame(undefined, a); });
repeated(() => { let {} = wasm_obj; }, TypeError);
repeated(() => {
let {...rest} = wasm_obj;
@@ -124,7 +125,8 @@ for (const wasm_obj of [struct, array]) {
repeated(
() =>
assertEquals([new Number(1), wasm_obj], fct.apply(1, [wasm_obj])));
- testThrowsRepeated(() => fct.apply(1, wasm_obj), TypeError);
+ repeated(
+ () => assertEquals([new Number(1), undefined], fct.apply(1, wasm_obj)));
repeated(() => assertEquals([wasm_obj, 1], fct.bind(wasm_obj)(1)));
repeated(() => assertEquals([wasm_obj, 1], fct.call(wasm_obj, 1)));
}
@@ -224,10 +226,12 @@ for (const wasm_obj of [struct, array]) {
testThrowsRepeated(() => JSON.parse(wasm_obj), TypeError);
repeated(() => assertEquals({x: 1}, JSON.parse('{"x": 1}', wasm_obj)));
- testThrowsRepeated(() => JSON.stringify(wasm_obj), TypeError);
+ repeated(() => assertEquals(undefined, JSON.stringify(wasm_obj)));
repeated(() => assertEquals('{"x":1}', JSON.stringify({x: 1}, wasm_obj)));
repeated(
() => assertEquals('{"x":1}', JSON.stringify({x: 1}, null, wasm_obj)));
+ repeated(
+ () => assertEquals("{}", JSON.stringify({wasm_obj})));
// Yielding wasm objects from a generator function is valid.
repeated(() => {
diff --git a/deps/v8/test/mjsunit/wasm/gc-nominal.js b/deps/v8/test/mjsunit/wasm/gc-nominal.js
index 5a43d5230f..367d2044d1 100644
--- a/deps/v8/test/mjsunit/wasm/gc-nominal.js
+++ b/deps/v8/test/mjsunit/wasm/gc-nominal.js
@@ -44,62 +44,3 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
() => builder.instantiate(), WebAssembly.CompileError,
/subtyping depth is greater than allowed/);
})();
-
-(function TestArrayNewData() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- builder.setEarlyDataCountSection();
- let array_type_index = builder.addArray(kWasmI16, true);
-
- let dummy_byte = 0xff;
- let element_0 = 1000;
- let element_1 = -2222;
-
- let data_segment = builder.addPassiveDataSegment(
- [dummy_byte, element_0 & 0xff, (element_0 >> 8) & 0xff,
- element_1 & 0xff, (element_1 >> 8) & 0xff]);
-
- let global = builder.addGlobal(
- wasmRefType(array_type_index), true,
- [...wasmI32Const(1), ...wasmI32Const(2),
- kGCPrefix, kExprArrayNewData, array_type_index, data_segment],
- builder);
-
- builder.addFunction("global_get", kSig_i_i)
- .addBody([
- kExprGlobalGet, global.index,
- kExprLocalGet, 0,
- kGCPrefix, kExprArrayGetS, array_type_index])
- .exportFunc();
-
- // parameters: (segment offset, array length, array index)
- builder.addFunction("init_from_data", kSig_i_iii)
- .addBody([
- kExprLocalGet, 0, kExprLocalGet, 1,
- kGCPrefix, kExprArrayNewData,
- array_type_index, data_segment,
- kExprLocalGet, 2,
- kGCPrefix, kExprArrayGetS, array_type_index])
- .exportFunc();
-
- builder.addFunction("drop_segment", kSig_v_v)
- .addBody([kNumericPrefix, kExprDataDrop, data_segment])
- .exportFunc();
-
- let instance = builder.instantiate();
-
- assertEquals(element_0, instance.exports.global_get(0));
- assertEquals(element_1, instance.exports.global_get(1));
-
- let init = instance.exports.init_from_data;
-
- assertEquals(element_0, init(1, 2, 0));
- assertEquals(element_1, init(1, 2, 1));
-
- assertTraps(kTrapArrayTooLarge, () => init(1, 1000000000, 0));
- assertTraps(kTrapDataSegmentOutOfBounds, () => init(2, 2, 0));
-
- instance.exports.drop_segment();
-
- assertTraps(kTrapDataSegmentOutOfBounds, () => init(1, 2, 0));
-})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-null-traps.js b/deps/v8/test/mjsunit/wasm/gc-null-traps.js
new file mode 100644
index 0000000000..d56b60a4b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-null-traps.js
@@ -0,0 +1,177 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function TestNullDereferences() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([
+ makeField(kWasmI32, true), makeField(kWasmI64, true),
+ makeField(kWasmF64, true), makeField(kWasmF32, true)]);
+ let struct_ref =
+ builder.addStruct([makeField(wasmRefNullType(struct), true)]);
+ let array = builder.addArray(kWasmI64, true);
+ let array_ref = builder.addArray(wasmRefNullType(struct), true);
+ let sig = builder.addType(kSig_i_i);
+
+ for (let field_type of [[0, kWasmI32], [1, kWasmI64],
+ [2, kWasmF64], [3, kWasmF32]]) {
+ builder.addFunction(
+ "structGet" + field_type[0],
+ makeSig([wasmRefNullType(struct)], [field_type[1]]))
+ .addBody([
+ kExprLocalGet, 0, kGCPrefix, kExprStructGet, struct, field_type[0]])
+ .exportFunc();
+
+ builder.addFunction(
+ "structSet" + field_type[0],
+ makeSig([wasmRefNullType(struct), field_type[1]], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprStructSet, struct, field_type[0]])
+ .exportFunc();
+ }
+
+ builder.addFunction(
+ "structRefGet", makeSig([wasmRefNullType(struct_ref)],
+ [wasmRefNullType(struct)]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprStructGet, struct_ref, 0])
+ .exportFunc();
+
+ builder.addFunction(
+ "structRefSet", makeSig(
+ [wasmRefNullType(struct_ref), wasmRefNullType(struct)], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprStructSet, struct_ref, 0])
+ .exportFunc();
+
+ builder.addFunction(
+ "arrayGet", makeSig([wasmRefNullType(array), kWasmI32], [kWasmI64]))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array])
+ .exportFunc();
+
+ builder.addFunction(
+ "arraySet", makeSig([wasmRefNullType(array), kWasmI32, kWasmI64], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2,
+ kGCPrefix, kExprArraySet, array])
+ .exportFunc();
+
+ builder.addFunction(
+ "arrayRefGet", makeSig([wasmRefNullType(array_ref), kWasmI32],
+ [wasmRefNullType(struct)]))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array_ref])
+ .exportFunc();
+
+ builder.addFunction(
+ "arrayRefSet", makeSig(
+ [wasmRefNullType(array_ref), kWasmI32, wasmRefNullType(struct)], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2,
+ kGCPrefix, kExprArraySet, array_ref])
+ .exportFunc();
+
+ builder.addFunction(
+ "arrayLen", makeSig([wasmRefNullType(array)], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprArrayLen])
+ .exportFunc();
+
+ builder.addFunction(
+ "arrayCopy",
+ makeSig([wasmRefNullType(array), wasmRefNullType(array)], []))
+ .addBody([kExprLocalGet, 0, kExprI32Const, 10,
+ kExprLocalGet, 1, kExprI32Const, 20,
+ kExprI32Const, 30,
+ kGCPrefix, kExprArrayCopy, array, array])
+ .exportFunc();
+
+ builder.addFunction(
+ "callFuncRef", makeSig([wasmRefNullType(sig), kWasmI32], [kWasmI32]))
+ .addBody([kExprLocalGet, 1, kExprLocalGet, 0, kExprCallRef, sig])
+ .exportFunc();
+
+ builder.addFunction(
+ "refAsNonNullStruct",
+ makeSig([wasmRefNullType(struct)], [wasmRefType(struct)]))
+ .addBody([kExprLocalGet, 0, kExprRefAsNonNull])
+ .exportFunc();
+
+ builder.addFunction(
+ "refAsNonNullFunction",
+ makeSig([wasmRefNullType(sig)], [wasmRefType(sig)]))
+ .addBody([kExprLocalGet, 0, kExprRefAsNonNull])
+ .exportFunc();
+
+ builder.addFunction(
+ "refAsNonNullAny",
+ makeSig([kWasmAnyRef], [wasmRefType(kWasmAnyRef)]))
+ .addBody([kExprLocalGet, 0, kExprRefAsNonNull])
+ .exportFunc();
+
+ builder.addFunction(
+ "refAsNonNullI31",
+ makeSig([kWasmI31Ref], [wasmRefType(kWasmI31Ref)]))
+ .addBody([kExprLocalGet, 0, kExprRefAsNonNull])
+ .exportFunc();
+
+ builder.addFunction(
+ "refAsNonNullExtern",
+ makeSig([kWasmExternRef], [wasmRefType(kWasmExternRef)]))
+ .addBody([kExprLocalGet, 0, kExprRefAsNonNull])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertTraps(kTrapNullDereference, () => instance.exports.structGet0(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.structSet0(null, 15));
+ assertTraps(kTrapNullDereference, () => instance.exports.structGet1(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.structSet1(null, 15n));
+ assertTraps(kTrapNullDereference, () => instance.exports.structGet2(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.structSet2(null, 15.0));
+ assertTraps(kTrapNullDereference, () => instance.exports.structGet3(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.structSet3(null, 15.0));
+ assertTraps(kTrapNullDereference, () => instance.exports.structRefGet(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.structRefSet(null, null));
+ assertTraps(kTrapNullDereference, () => instance.exports.arrayGet(null, 0));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arrayGet(null, 2000000000));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arraySet(null, 0, 42n));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arraySet(null, 2000000000, 42n));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arrayRefGet(null, 0));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arrayRefGet(null, 2000000000));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arrayRefSet(null, 0, null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arrayRefSet(null, 2000000000, null));
+ assertTraps(kTrapNullDereference, () => instance.exports.arrayLen(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.arrayCopy(null, null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.callFuncRef(null, 42));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.refAsNonNullStruct(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.refAsNonNullFunction(null));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.refAsNonNullAny(null));
+ assertEquals(42, instance.exports.refAsNonNullAny(42));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.refAsNonNullI31(null));
+ assertEquals(42, instance.exports.refAsNonNullI31(42));
+ assertTraps(kTrapNullDereference,
+ () => instance.exports.refAsNonNullExtern(null));
+ let object = {};
+ assertEquals(object, instance.exports.refAsNonNullExtern(object));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-optimizations.js b/deps/v8/test/mjsunit/wasm/gc-optimizations.js
index a7ac2e8deb..faa6822b10 100644
--- a/deps/v8/test/mjsunit/wasm/gc-optimizations.js
+++ b/deps/v8/test/mjsunit/wasm/gc-optimizations.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc --no-liftoff
+// Flags: --experimental-wasm-gc --no-liftoff --no-wasm-lazy-compilation
+// Flags: --no-wasm-inlining --no-wasm-speculative-inlining
// This tests are meant to examine if Turbofan CsaLoadElimination works
// correctly for wasm. The TurboFan graphs can be examined with --trace-turbo.
@@ -314,6 +315,52 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(value_0 + value_1, instance.exports.main());
})();
+(function WasmLoadEliminationArrayLength() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI32, true);
+ builder.addFunction("producer", makeSig([kWasmI32], [wasmRefType(array)]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprArrayNewDefault, array])
+ .exportFunc();
+ let side_effect = builder.addFunction("side_effect", kSig_v_v).addBody([]);
+ builder.addFunction("tester", makeSig([wasmRefType(array)], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprArrayLen,
+ kExprI32Const, 1, kExprI32Add,
+ kGCPrefix, kExprArrayNewDefault, array,
+ kExprCallFunction, side_effect.index, // unknown side-effect
+ kGCPrefix, kExprArrayLen,
+ kExprLocalGet, 0, kGCPrefix, kExprArrayLen,
+ kExprI32Mul])
+ .exportFunc();
+ let instance = builder.instantiate();
+ assertEquals(10 * 11,
+ instance.exports.tester(instance.exports.producer(10)));
+})();
+
+(function WasmLoadEliminationUnrelatedTypes() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let struct1 = builder.addStruct([makeField(kWasmI32, true)]);
+ let struct2 = builder.addStruct([makeField(kWasmI32, true),
+ makeField(kWasmI64, true)]);
+
+ builder.addFunction("tester",
+ makeSig([wasmRefType(struct1), wasmRefType(struct2)], [kWasmI32]))
+ // f(x, y) { y.f = x.f + 10; return y.f * x.f }
+ // x.f load in the state should survive y.f store.
+ .addBody([kExprLocalGet, 1,
+ kExprLocalGet, 0, kGCPrefix, kExprStructGet, struct1, 0,
+ kExprI32Const, 10, kExprI32Add,
+ kGCPrefix, kExprStructSet, struct2, 0,
+ kExprLocalGet, 0, kGCPrefix, kExprStructGet, struct1, 0,
+ kExprLocalGet, 1, kGCPrefix, kExprStructGet, struct2, 0,
+ kExprI32Mul]);
+
+ builder.instantiate()
+})();
+
(function EscapeAnalysisWithLoadElimination() {
print(arguments.callee.name);
@@ -468,9 +515,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
// Cast from struct_a to struct_b via common base type struct_super.
kExprLocalGet, 0,
- // TODO(7748): Replace cast op with "ref.cast null".
- kGCPrefix, kExprRefCastDeprecated, struct_super,
- kGCPrefix, kExprRefCastDeprecated, struct_b, // annotated as 'ref null none'
+ kGCPrefix, kExprRefCastNull, struct_super,
+ kGCPrefix, kExprRefCastNull, struct_b, // annotated as 'ref null none'
kExprRefIsNull,
]);
@@ -592,3 +638,38 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
builder.instantiate({});
})();
+
+(function RedundantExternalizeInternalize() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI32, true);
+
+ builder.addFunction('createArray',
+ makeSig([kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayNewFixed, array, 1,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('get', makeSig([kWasmExternRef, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ // The following two operations are optimized away.
+ kGCPrefix, kExprExternExternalize,
+ kGCPrefix, kExprExternInternalize,
+ //
+ kGCPrefix, kExprRefCastNull, array,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let wasmArray = wasm.createArray(10);
+ assertEquals(10, wasm.get(wasmArray, 0));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/i31ref.js b/deps/v8/test/mjsunit/wasm/i31ref.js
index 83d3c58494..aba5e1e1e1 100644
--- a/deps/v8/test/mjsunit/wasm/i31ref.js
+++ b/deps/v8/test/mjsunit/wasm/i31ref.js
@@ -54,3 +54,88 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(42, instance.exports.i31_null(0));
assertTraps(kTrapNullDereference, () => instance.exports.i31_null(1));
})();
+
+(function I31RefJS() {
+ print(arguments.callee.name);
+
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("roundtrip", makeSig([kWasmExternRef], [kWasmExternRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprExternExternalize])
+ .exportFunc();
+ builder.addFunction("signed", makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, kI31RefCode, kGCPrefix, kExprI31GetS])
+ .exportFunc();
+ builder.addFunction("unsigned", makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, kI31RefCode, kGCPrefix, kExprI31GetU])
+ .exportFunc();
+ builder.addFunction("new", makeSig([kWasmI32], [kWasmExternRef]))
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprI31New,
+ kGCPrefix, kExprExternExternalize])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertEquals(0, instance.exports.roundtrip(0));
+ assertEquals(0, instance.exports.signed(0));
+ assertEquals(0, instance.exports.unsigned(0));
+ assertEquals(0, instance.exports.new(0));
+
+ assertEquals(123, instance.exports.roundtrip(123));
+ assertEquals(123, instance.exports.signed(123));
+ assertEquals(123, instance.exports.unsigned(123));
+ assertEquals(123, instance.exports.new(123));
+
+ // Max value.
+ assertEquals(0x3fffffff, instance.exports.roundtrip(0x3fffffff));
+ assertEquals(0x3fffffff, instance.exports.signed(0x3fffffff));
+ assertEquals(0x3fffffff, instance.exports.unsigned(0x3fffffff));
+ assertEquals(0x3fffffff, instance.exports.new(0x3fffffff));
+
+ // Double number.
+ assertEquals(1234.567, instance.exports.roundtrip(1234.567));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(1234.567));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(1234.567));
+
+ // Out-of-bounds positive integer.
+ assertEquals(0x40000000, instance.exports.roundtrip(0x40000000));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(0x40000000));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(0x40000000));
+ assertEquals(-0x40000000, instance.exports.new(0x40000000));
+
+ // Out-of-bounds negative integer.
+ assertEquals(-0x40000001, instance.exports.roundtrip(-0x40000001));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(-0x40000001));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(-0x40000001));
+ assertEquals(0x3fffffff, instance.exports.new(-0x40000001));
+
+ // Sign/zero extention.
+ assertEquals(-2, instance.exports.roundtrip(-2));
+ assertEquals(-2, instance.exports.signed(-2));
+ assertEquals(0x7ffffffe, instance.exports.unsigned(-2));
+ assertEquals(-2, instance.exports.new(-2));
+
+ // Min value.
+ assertEquals(-0x40000000, instance.exports.roundtrip(-0x40000000));
+ assertEquals(-0x40000000, instance.exports.signed(-0x40000000));
+ assertEquals(0x40000000, instance.exports.unsigned(-0x40000000));
+ assertEquals(-0x40000000, instance.exports.new(-0x40000000));
+
+ assertEquals(NaN, instance.exports.roundtrip(NaN));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(NaN));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(NaN));
+
+ assertEquals(-0, instance.exports.roundtrip(-0));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(-0));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(-0));
+
+ assertEquals(Infinity, instance.exports.roundtrip(Infinity));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(Infinity));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(Infinity));
+
+ assertEquals(-Infinity, instance.exports.roundtrip(-Infinity));
+ assertTraps(kTrapIllegalCast, () => instance.exports.signed(-Infinity));
+ assertTraps(kTrapIllegalCast, () => instance.exports.unsigned(-Infinity));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/inlining.js b/deps/v8/test/mjsunit/wasm/inlining.js
index 602eae974a..7045b6a99a 100644
--- a/deps/v8/test/mjsunit/wasm/inlining.js
+++ b/deps/v8/test/mjsunit/wasm/inlining.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --wasm-inlining --no-liftoff --experimental-wasm-return-call
-// Flags: --experimental-wasm-gc
+// Flags: --experimental-wasm-gc --allow-natives-syntax
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
@@ -295,6 +295,68 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(20, instance.exports.main(10, 20));
})();
+// Inlining should behave correctly when there are no throwing nodes in the
+// callee.
+(function NoThrowInHandledTest() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let tag = builder.addTag(kSig_v_i);
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0, kExprI32Const, 0, kExprI32GeS,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
+ kExprElse,
+ kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub,
+ kExprEnd]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprCallFunction, callee.index,
+ kExprCatchAll,
+ kExprLocalGet, 1,
+ kExprEnd])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(11, instance.exports.main(10, 20));
+})();
+
+// Things get more complex if we also need to reload the memory context.
+(function UnandledInHandledWithMemoryTest() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let sig = builder.addType(kSig_i_i);
+
+ builder.addMemory(10, 100);
+
+ let inner_callee = builder.addFunction("inner_callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0]).exportFunc();
+
+ // f(x, y) = { do { y += 1; x -= 1; } while (x > 0); return y; }
+ let callee = builder.addFunction("callee", kSig_i_ii)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add,
+ kExprRefFunc, inner_callee.index, kExprCallRef, sig]);
+ // g(x) = f(5, x) + x
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprTry, kWasmI32,
+ kExprI32Const, 5, kExprLocalGet, 0,
+ kExprCallFunction, callee.index,
+ kExprCatchAll,
+ kExprI32Const, 0,
+ kExprEnd,
+ kExprLocalGet, 0, kExprI32Add,
+ kExprI32Const, 10, kExprI32LoadMem, 0, 0, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(25, instance.exports.main(10));
+})();
+
(function LoopUnrollingTest() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -469,3 +531,105 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
builder.instantiate({});
})();
+
+(function InliningTrapFromCallee() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ // Add some types to have an index offset.
+ for (let i = 0; i < 10; ++i) {
+ builder.addFunction(null, makeSig([], [])).addBody([]);
+ }
+
+ let callee = builder.addFunction('callee', kSig_i_ii)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprI32DivU,
+ ]);
+
+ let intermediate = builder.addFunction('intermediate', kSig_i_ii)
+ .addBody([
+ // Some nops, so that the call doesn't have the same offset as the div
+ // in the callee.
+ kExprNop, kExprNop,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprCallFunction, callee.index,
+ ])
+ .exportFunc();
+
+ let caller = builder.addFunction('main', kSig_ii_ii)
+ .addBody([
+ // Some nops, so that the call doesn't have the same offset as the div
+ // in the callee.
+ kExprNop, kExprNop, kExprNop, kExprNop, kExprNop,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprCallFunction, intermediate.index,
+ // If it didn't trap, call it again without intermediate function and with
+ // swapped arguments.
+ kExprLocalGet, 1,
+ kExprLocalGet, 0,
+ kExprCallFunction, callee.index,
+ ])
+ .exportFunc();
+
+ let wire_bytes = builder.toBuffer();
+ let module = new WebAssembly.Module(wire_bytes);
+ let instance = new WebAssembly.Instance(module, {});
+ TestStackTrace(instance.exports.main);
+ // Serialize and deserialize the module to verify that the inlining positions
+ // are properly "transformed" here.
+ print("Repeat test with serialized module.")
+ module = %DeserializeWasmModule(%SerializeWasmModule(module), wire_bytes);
+ instance = new WebAssembly.Instance(module, {});
+ TestStackTrace(instance.exports.main);
+
+ function TestStackTrace(main) {
+ assertEquals([7, 0], main(21, 3));
+ assertTraps(kTrapDivByZero, () => main(1, 0));
+ // Test stack trace for trap.
+ try {
+ main(1, 0);
+ assertUnreachable();
+ } catch(e) {
+ assertMatches(/RuntimeError: divide by zero/, e.stack);
+ let expected_entries = [
+ // [name, index, offset]
+ ['callee', '' + callee.index, '0x8c'],
+ ['intermediate', '' + intermediate.index, '0x96'],
+ ['main', '' + caller.index, '0xa4'],
+ ];
+ CheckCallStack(e, expected_entries);
+ }
+
+ try {
+ main(0, 1);
+ assertUnreachable();
+ } catch(e) {
+ assertMatches(/RuntimeError: divide by zero/, e.stack);
+ let expected_entries = [
+ // [name, index, offset]
+ ['callee', '' + callee.index, '0x8c'],
+ ['main', '' + caller.index, '0xaa'],
+ ];
+ CheckCallStack(e, expected_entries);
+ }
+ }
+
+ function CheckCallStack(error, expected_entries) {
+ print(error.stack);
+ let regex = /at ([^ ]+) \(wasm[^\[]+\[([0-9]+)\]:(0x[0-9a-f]+)\)/g;
+ let entries = [...error.stack.matchAll(regex)];
+ for (let i = 0; i < expected_entries.length; ++i) {
+ let actual = entries[i];
+ print(`match = ${actual[0]}`);
+ let expected = expected_entries[i];
+ assertEquals(expected[0], actual[1]);
+ assertEquals(expected[1], actual[2]);
+ assertEquals(expected[2], actual[3]);
+ }
+ assertEquals(expected_entries.length, entries.length);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/load-immutable.js b/deps/v8/test/mjsunit/wasm/load-immutable.js
index 988f48bac2..e9e444c4d5 100644
--- a/deps/v8/test/mjsunit/wasm/load-immutable.js
+++ b/deps/v8/test/mjsunit/wasm/load-immutable.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc --no-liftoff --experimental-wasm-nn-locals
+// Flags: --experimental-wasm-gc --no-liftoff
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/log-code-after-post-message.js b/deps/v8/test/mjsunit/wasm/log-code-after-post-message.js
new file mode 100644
index 0000000000..d2a718dd38
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/log-code-after-post-message.js
@@ -0,0 +1,44 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+function workerCode() {
+ function WorkerOnProfileEnd(profile) {
+ postMessage(profile.indexOf('foo'));
+ }
+
+ onmessage = (wasm_module) => {
+ WebAssembly.instantiate(wasm_module, {q: {func: d8.profiler.triggerSample}})
+ .then(instance => {
+ instance.exports.foo();
+ console.profileEnd();
+ });
+ };
+
+ d8.profiler.setOnProfileEndListener(WorkerOnProfileEnd);
+ // Code logging happens for all code objects when profiling gets started,
+ // and when new code objects appear after profiling has started. We want to
+ // test the second scenario here. As new code objects appear as the
+ // parameter of {OnMessage}, we have to start profiling already here before
+ // {onMessage} is called.
+ console.profile();
+ postMessage('Starting worker');
+}
+
+const worker = new Worker(workerCode, {type: 'function'});
+
+assertEquals("Starting worker", worker.getMessage());
+
+const builder = new WasmModuleBuilder();
+const sig_index = builder.addType(kSig_v_v);
+const imp_index = builder.addImport("q", "func", sig_index);
+builder.addFunction('foo', kSig_v_v)
+ .addBody([
+ kExprCallFunction, imp_index,
+ ])
+ .exportFunc();
+const wasm_module = builder.toModule();
+worker.postMessage(wasm_module);
+assertTrue(worker.getMessage() > 0);
diff --git a/deps/v8/test/mjsunit/wasm/max-wasm-functions.js b/deps/v8/test/mjsunit/wasm/max-wasm-functions.js
new file mode 100644
index 0000000000..27a289fb04
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/max-wasm-functions.js
@@ -0,0 +1,16 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --max-wasm-functions=1000100
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig_index = builder.addType(makeSig([kWasmI32], [kWasmI32]));
+
+for (let j = 0; j < 1000010; ++j) {
+ builder.addFunction(undefined, sig_index)
+ .addBody([kExprLocalGet, 0]);
+}
+const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/memory64.js b/deps/v8/test/mjsunit/wasm/memory64.js
index 13ca8cf8c5..d7bdd70e99 100644
--- a/deps/v8/test/mjsunit/wasm/memory64.js
+++ b/deps/v8/test/mjsunit/wasm/memory64.js
@@ -13,26 +13,31 @@ const GB = 1024 * 1024 * 1024;
// The current limit is 16GB. Adapt this test if this changes.
const max_num_pages = 16 * GB / kPageSize;
-function BasicMemory64Tests(num_pages) {
+function BasicMemory64Tests(num_pages, use_atomic_ops) {
const num_bytes = num_pages * kPageSize;
- print(`Testing ${num_bytes} bytes (${num_pages} pages)`);
+ print(`Testing ${num_bytes} bytes (${num_pages} pages) on ${
+ use_atomic_ops ? '' : 'non-'}atomic memory`);
let builder = new WasmModuleBuilder();
builder.addMemory64(num_pages, num_pages, true);
+ // A memory operation with alignment (0) and offset (0).
+ let op = (non_atomic, atomic) => use_atomic_ops ?
+ [kAtomicPrefix, atomic, 0, 0] :
+ [non_atomic, 0, 0];
builder.addFunction('load', makeSig([kWasmF64], [kWasmI32]))
.addBody([
- kExprLocalGet, 0, // local.get 0
- kExprI64UConvertF64, // i64.uconvert_sat.f64
- kExprI32LoadMem, 0, 0, // i32.load_mem align=1 offset=0
+ kExprLocalGet, 0, // local.get 0
+ kExprI64UConvertF64, // i64.uconvert_sat.f64
+ ...op(kExprI32LoadMem, kExprI32AtomicLoad) // load
])
.exportFunc();
builder.addFunction('store', makeSig([kWasmF64, kWasmI32], []))
.addBody([
- kExprLocalGet, 0, // local.get 0
- kExprI64UConvertF64, // i64.uconvert_sat.f64
- kExprLocalGet, 1, // local.get 1
- kExprI32StoreMem, 0, 0, // i32.store_mem align=1 offset=0
+ kExprLocalGet, 0, // local.get 0
+ kExprI64UConvertF64, // i64.uconvert_sat.f64
+ kExprLocalGet, 1, // local.get 1
+ ...op(kExprI32StoreMem, kExprI32AtomicStore) // store
])
.exportFunc();
@@ -56,19 +61,42 @@ function BasicMemory64Tests(num_pages) {
assertEquals(num_bytes, array.length);
}
+ const GB = Math.pow(2, 30);
assertEquals(0, load(num_bytes - 4));
- assertThrows(() => load(num_bytes - 3));
+ assertTraps(kTrapMemOutOfBounds, () => load(num_bytes));
+ assertTraps(kTrapMemOutOfBounds, () => load(num_bytes - 3));
+ assertTraps(kTrapMemOutOfBounds, () => load(num_bytes - 4 + 4 * GB));
+ assertTraps(kTrapMemOutOfBounds, () => store(num_bytes));
+ assertTraps(kTrapMemOutOfBounds, () => store(num_bytes - 3));
+ assertTraps(kTrapMemOutOfBounds, () => store(num_bytes - 4 + 4 * GB));
+ if (use_atomic_ops) {
+ assertTraps(kTrapUnalignedAccess, () => load(num_bytes - 7));
+ assertTraps(kTrapUnalignedAccess, () => store(num_bytes - 7));
+ }
store(num_bytes - 4, 0x12345678);
assertEquals(0x12345678, load(num_bytes - 4));
- let kStoreOffset = 27;
+ let kStoreOffset = use_atomic_ops ? 40 : 27;
store(kStoreOffset, 11);
assertEquals(11, load(kStoreOffset));
- // Now check 100 random positions.
- for (let i = 0; i < 100; ++i) {
- let position = Math.floor(Math.random() * num_bytes);
+ // Now check some interesting positions, plus 100 random positions.
+ const positions = [
+ // Nothing at the beginning.
+ 0, 1,
+ // Check positions around the store offset.
+ kStoreOffset - 1, kStoreOffset, kStoreOffset + 1,
+ // Check the end.
+ num_bytes - 5, num_bytes - 4, num_bytes - 3, num_bytes - 2, num_bytes - 1,
+ // Check positions at the end, truncated to 32 bit (might be
+ // redundant).
+ (num_bytes - 5) >>> 0, (num_bytes - 4) >>> 0, (num_bytes - 3) >>> 0,
+ (num_bytes - 2) >>> 0, (num_bytes - 1) >>> 0
+ ];
+ const random_positions =
+ Array.from({length: 100}, () => Math.floor(Math.random() * num_bytes));
+ for (let position of positions.concat(random_positions)) {
let expected = 0;
if (position == kStoreOffset) {
expected = 11;
@@ -371,3 +399,15 @@ function allowOOM(fn) {
assertEquals(kValue, instance.exports.load(kOffset2));
assertEquals(5n, instance.exports.grow(1n));
})();
+
+(function TestAtomics_SmallMemory() {
+ print(arguments.callee.name);
+ BasicMemory64Tests(4, true);
+})();
+
+(function TestAtomics_5GB() {
+ print(arguments.callee.name);
+ let num_pages = 5 * GB / kPageSize;
+ // This test can fail if 5GB of memory cannot be allocated.
+ allowOOM(() => BasicMemory64Tests(num_pages, true));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/multi-value.js b/deps/v8/test/mjsunit/wasm/multi-value.js
index 1ad3d428fd..0b9648a336 100644
--- a/deps/v8/test/mjsunit/wasm/multi-value.js
+++ b/deps/v8/test/mjsunit/wasm/multi-value.js
@@ -402,7 +402,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
(function MultiBrTableTest() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let sig_ii_v = builder.addType(kSig_v_v);
builder.addFunction("main", kSig_ii_v)
.addBody([
diff --git a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
new file mode 100644
index 0000000000..46363e1747
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
@@ -0,0 +1,46 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noexperimental-wasm-gc --no-experimental-wasm-stringref
+// Flags: --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function instantiateModuleWithGC() {
+ // Build a WebAssembly module which uses Wasm GC features.
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', makeSig([], [kWasmAnyRef]))
+ .addBody([
+ kExprI32Const, 42,
+ kGCPrefix, kExprI31New,
+ ])
+ .exportFunc();
+
+ return builder.instantiate();
+}
+
+function instantiateModuleWithStringRef() {
+ // Build a WebAssembly module which uses stringref features.
+ const builder = new WasmModuleBuilder();
+ builder.addFunction("main",
+ makeSig([kWasmStringRef], [kWasmStringRef]))
+ .addBody([kExprLocalGet, 0])
+ .exportFunc();
+ return builder.instantiate();
+}
+
+// Due to --noexperimental-wasm-gc GC is disabled.
+assertThrows(instantiateModuleWithGC, WebAssembly.CompileError);
+// Due to --noexperimental-wasm-stringref stringrefs are not supported.
+assertThrows(instantiateModuleWithStringRef, WebAssembly.CompileError);
+// Disable WebAssembly GC explicitly.
+%SetWasmGCEnabled(false);
+assertThrows(instantiateModuleWithGC, WebAssembly.CompileError);
+assertThrows(instantiateModuleWithStringRef, WebAssembly.CompileError);
+// Enable WebAssembly GC explicitly.
+%SetWasmGCEnabled(true);
+assertEquals(42, instantiateModuleWithGC().exports.main());
+// Enabling Wasm GC via callback will also enable wasm stringref.
+let str = "Hello World!";
+assertSame(str, instantiateModuleWithStringRef().exports.main(str));
diff --git a/deps/v8/test/mjsunit/wasm/recognize-imports.js b/deps/v8/test/mjsunit/wasm/recognize-imports.js
new file mode 100644
index 0000000000..1c4c232536
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/recognize-imports.js
@@ -0,0 +1,48 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-stringref --allow-natives-syntax
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+let sig_w_w = makeSig([kWasmStringRef], [kWasmStringRef]);
+let toLowerCase = builder.addImport("m", "toLowerCase", sig_w_w);
+
+builder.addFunction('call_tolower', sig_w_w).exportFunc().addBody([
+ kExprLocalGet, 0,
+ kExprCallFunction, toLowerCase,
+]);
+
+let module = builder.toModule();
+
+let recognizable = Function.prototype.call.bind(String.prototype.toLowerCase);
+let recognizable_imports = { m: { toLowerCase: recognizable } };
+
+let instance1 = new WebAssembly.Instance(module, recognizable_imports);
+let call_tolower = instance1.exports.call_tolower;
+assertEquals("abc", call_tolower("ABC"));
+%WasmTierUpFunction(call_tolower);
+assertEquals("abc", call_tolower("ABC"));
+
+// Null should be handled correctly (by throwing the same TypeError that
+// JavaScript would throw).
+assertThrows(
+ () => call_tolower(null), TypeError,
+ /String.prototype.toLowerCase called on null or undefined/);
+
+// Creating a second instance with identical imports should not cause
+// recompilation.
+console.log("Second instance.");
+let instance2 = new WebAssembly.Instance(module, recognizable_imports);
+assertEquals("def", instance2.exports.call_tolower("DEF"));
+
+// Creating a third instance with different imports must not reuse the
+// existing optimized code.
+console.log("Third instance.");
+let other_imports = { m: { toLowerCase: () => "foo" } };
+let instance3 = new WebAssembly.Instance(module, other_imports);
+assertEquals("foo", instance3.exports.call_tolower("GHI"));
+assertEquals("def", instance2.exports.call_tolower("DEF"));
+assertEquals("abc", instance1.exports.call_tolower("ABC"));
diff --git a/deps/v8/test/mjsunit/wasm/reference-globals-import.js b/deps/v8/test/mjsunit/wasm/reference-globals-import.js
index 456bc92af0..5b43c1c68c 100644
--- a/deps/v8/test/mjsunit/wasm/reference-globals-import.js
+++ b/deps/v8/test/mjsunit/wasm/reference-globals-import.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-gc --experimental-wasm-stringref
-// Flags: --no-wasm-gc-structref-as-dataref
+
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// Test type checks when creating a global with a value imported from a global
@@ -13,7 +13,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let exporting_instance = (function() {
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let type_super = builder.addStruct([makeField(kWasmI32, false)]);
let type_sub =
builder.addStruct([makeField(kWasmI32, false)], type_super);
@@ -64,7 +63,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
for (let[expected_valid, type, global] of tests) {
print(`test ${type} imports ${global}`);
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let type_super = builder.addStruct([makeField(kWasmI32, false)]);
let type_sub =
builder.addStruct([makeField(kWasmI32, false)], type_super);
@@ -105,7 +103,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let exporting_instance = (function() {
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let type_super = builder.addStruct([makeField(kWasmI32, false)]);
let type_sub =
builder.addStruct([makeField(kWasmI32, false)], type_super);
@@ -156,7 +153,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
for (let[expected_valid, type, imported_value] of tests) {
print(`test ${type} imports ${imported_value}`);
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let type_super = builder.addStruct([makeField(kWasmI32, false)]);
let type_sub =
builder.addStruct([makeField(kWasmI32, false)], type_super);
@@ -214,7 +210,6 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let exporting_instance = (function() {
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let type_struct = builder.addStruct([makeField(kWasmI32, false)]);
let type_array = builder.addArray(kWasmI32);
@@ -244,6 +239,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
builder.addImportedGlobal("imports", "eq2", kWasmEqRef, false);
builder.addImportedGlobal("imports", "eq3", kWasmEqRef, false);
builder.addImportedGlobal("imports", "array", kWasmArrayRef, false);
+ builder.addImportedGlobal("imports", "i31ref", kWasmI31Ref, false);
builder.instantiate({imports : {
any1: exporting_instance.exports.create_struct(),
any2: exporting_instance.exports.create_array(),
@@ -254,6 +250,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
eq2: exporting_instance.exports.create_array(),
eq3: exporting_instance.exports.create_struct(),
array: exporting_instance.exports.create_array(),
+ i31ref: -123,
}});
})();
@@ -505,3 +502,43 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertThrows(() => arrayref_global.value = "string", TypeError);
assertThrows(() => arrayref_global.value = wasm.create_struct(1), TypeError);
})();
+
+(function TestI31RefGlobalFromJS() {
+ print(arguments.callee.name);
+ let i31ref_global = new WebAssembly.Global(
+ { value: "i31ref", mutable: true }, 123);
+ assertEquals(123, i31ref_global.value);
+
+ let builder = new WasmModuleBuilder();
+ builder.addImportedGlobal("imports", "i31ref_global", kWasmI31Ref, true);
+ let struct_type = builder.addStruct([makeField(kWasmI32, false)]);
+
+ builder.addFunction("get_i31", makeSig([], [kWasmI32]))
+ .addBody([
+ kExprGlobalGet, 0,
+ kGCPrefix, kExprI31GetS
+ ])
+ .exportFunc();
+ builder.addFunction("create_struct",
+ makeSig([kWasmI32], [wasmRefType(struct_type)]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, struct_type,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({imports : {i31ref_global}});
+ let wasm = instance.exports;
+ assertEquals(123, i31ref_global.value);
+
+ i31ref_global.value = 42;
+ assertEquals(42, i31ref_global.value);
+ assertEquals(42, wasm.get_i31());
+ i31ref_global.value = null;
+ assertEquals(null, i31ref_global.value);
+
+ assertThrows(() => i31ref_global.value = undefined, TypeError);
+ assertThrows(() => i31ref_global.value = "string", TypeError);
+ assertThrows(() => i31ref_global.value = wasm.create_struct(1), TypeError);
+ assertThrows(() => i31ref_global.value = Math.pow(2, 33), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/reference-globals.js b/deps/v8/test/mjsunit/wasm/reference-globals.js
index 821c2b1b27..1653fc8f15 100644
--- a/deps/v8/test/mjsunit/wasm/reference-globals.js
+++ b/deps/v8/test/mjsunit/wasm/reference-globals.js
@@ -12,8 +12,12 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
var exporting_instance = (function() {
var builder = new WasmModuleBuilder();
+ builder.startRecGroup();
var sig_index = builder.addType(kSig_i_ii);
+ builder.endRecGroup();
+ builder.startRecGroup();
var wrong_sig_index = builder.addType(kSig_i_i);
+ builder.endRecGroup();
var addition_index = builder.addFunction("addition", sig_index)
.addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
@@ -63,7 +67,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
false);
builder.instantiate({imports: { global: 42 }})},
WebAssembly.LinkError,
- /function-typed object must be null \(if nullable\) or a Wasm function object/
+ /JS object does not match expected wasm type/
);
// Mistyped function import.
@@ -110,7 +114,9 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
+ builder.startRecGroup();
var struct_index = builder.addStruct([{type: kWasmI32, mutability: true}]);
+ builder.endRecGroup();
var composite_struct_index = builder.addStruct(
[{type: kWasmI32, mutability: true},
{type: wasmRefNullType(struct_index), mutability: true},
@@ -390,8 +396,59 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.exportFunc()
builder.addGlobal(wasmRefType(struct_index), false,
- [kExprRefFunc, func.index + 1, kExprStructNew, struct_index]);
+ [kExprRefFunc, func.index + 1, kExprStructNew,
+ struct_index]);
assertThrows(() => builder.instantiate(), WebAssembly.CompileError,
/function index #1 is out of bounds/);
})();
+
+(function TestExternConstantExpr() {
+ print(arguments.callee.name);
+
+ let imported_struct = (function () {
+ let builder = new WasmModuleBuilder();
+
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ let global = builder.addGlobal(
+ wasmRefType(struct), false,
+ [kExprI32Const, 42, kGCPrefix, kExprStructNew, struct])
+ .exportAs("global");
+
+ return builder.instantiate().exports.global.value;
+ })();
+
+ let builder = new WasmModuleBuilder();
+
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ let imported = builder.addImportedGlobal("m", "ext", kWasmExternRef, false)
+
+ let internal = builder.addGlobal(
+ kWasmAnyRef, false,
+ [kExprGlobalGet, imported, kGCPrefix, kExprExternInternalize]);
+
+ builder.addGlobal(
+ kWasmExternRef, false,
+ [kExprGlobalGet, internal.index, kGCPrefix, kExprExternExternalize])
+ .exportAs("exported")
+
+ builder.addFunction("getter", kSig_i_v)
+ .addBody([kExprGlobalGet, internal.index,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ builder.addFunction("getter_fail", kSig_i_v)
+ .addBody([kExprGlobalGet, internal.index,
+ kGCPrefix, kExprRefCast, kI31RefCode,
+ kGCPrefix, kExprI31GetS])
+ .exportFunc();
+
+ let instance = builder.instantiate({m: {ext: imported_struct}});
+
+ assertSame(instance.exports.exported.value, imported_struct);
+ assertEquals(42, instance.exports.getter());
+ assertTraps(kTrapIllegalCast, () => instance.exports.getter_fail());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/reference-table-js-interop.js b/deps/v8/test/mjsunit/wasm/reference-table-js-interop.js
index 7644278edf..fae5471fe4 100644
--- a/deps/v8/test/mjsunit/wasm/reference-table-js-interop.js
+++ b/deps/v8/test/mjsunit/wasm/reference-table-js-interop.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-gc --experimental-wasm-stringref
-// Flags: --no-wasm-gc-structref-as-dataref
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
@@ -12,6 +11,7 @@ let tableTypes = {
"eqref": kWasmEqRef,
"structref": kWasmStructRef,
"arrayref": kWasmArrayRef,
+ "i31ref": kWasmI31Ref,
};
// Test table consistency check.
@@ -115,11 +115,13 @@ for (let [typeName, type] of Object.entries(tableTypes)) {
builder.addFunction("createI31", i31Sig)
.addBody([kExprI32Const, 12, kGCPrefix, kExprI31New])
.exportFunc();
- let structSig = typeName != "arrayref" ? creatorSig : creatorAnySig;
+ let structSig = typeName != "arrayref" && typeName != "i31ref"
+ ? creatorSig : creatorAnySig;
builder.addFunction("createStruct", structSig)
.addBody([kExprI32Const, 12, kGCPrefix, kExprStructNew, struct])
.exportFunc();
- let arraySig = typeName != "structref" ? creatorSig : creatorAnySig;
+ let arraySig = typeName != "structref" && typeName != "i31ref"
+ ? creatorSig : creatorAnySig;
builder.addFunction("createArray", arraySig)
.addBody([
kExprI32Const, 12,
@@ -158,7 +160,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) {
assertSame(table.get(2), table.get(3)); // The same smi.
}
// Set struct.
- if (typeName != "arrayref") {
+ if (typeName != "arrayref" && typeName != "i31ref") {
table.set(4, wasm.exported(wasm.createStruct));
assertSame(table.get(4), wasm.tableGet(4));
assertEquals(12, wasm.tableGetStructVal(4));
@@ -168,7 +170,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) {
assertNotSame(table.get(4), table.get(5));
}
// Set array.
- if (typeName != "structref") {
+ if (typeName != "structref" && typeName != "i31ref") {
table.set(6, wasm.exported(wasm.createArray));
assertSame(table.get(6), wasm.tableGet(6));
assertEquals(12, wasm.tableGetArrayVal(6));
@@ -190,7 +192,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) {
assertEquals(largeString, table.get(9));
}
- if (typeName != "arrayref") {
+ if (typeName != "arrayref" && typeName != "i31ref") {
// Grow table with explicit value.
table.grow(2, wasm.exported(wasm.createStruct));
assertEquals(12, wasm.tableGetStructVal(size));
@@ -205,6 +207,19 @@ for (let [typeName, type] of Object.entries(tableTypes)) {
assertEquals("Grow using a string", wasm.tableGet(14));
assertEquals("Grow using a string", table.get(14));
}
+ if (typeName == "i31ref" || typeName == "anyref") {
+ table.set(0, 123);
+ assertEquals(123, table.get(0));
+ table.set(1, -123);
+ assertEquals(-123, table.get(1));
+ if (typeName == "i31ref") {
+ assertThrows(() => table.set(0, 1 << 31), TypeError);
+ } else {
+ // anyref can reference boxed numbers as well.
+ table.set(0, 1 << 31)
+ assertEquals(1 << 31, table.get(0));
+ }
+ }
// Set from JS with wrapped wasm value of incompatible type.
let invalidValues = {
@@ -212,6 +227,7 @@ for (let [typeName, type] of Object.entries(tableTypes)) {
"eqref": [],
"structref": ["I31", "Array"],
"arrayref": ["I31", "Struct"],
+ "i31ref": ["Struct", "Array"],
};
for (let invalidType of invalidValues[typeName]) {
print(`Test invalid type ${invalidType} for ${typeName}`);
diff --git a/deps/v8/test/mjsunit/wasm/reference-tables.js b/deps/v8/test/mjsunit/wasm/reference-tables.js
index c54fef5218..2255f6c4e9 100644
--- a/deps/v8/test/mjsunit/wasm/reference-tables.js
+++ b/deps/v8/test/mjsunit/wasm/reference-tables.js
@@ -322,6 +322,41 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(1, instance.exports.null_getter(2));
})();
+(function TestI31RefTable() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ let table = builder.addTable(kWasmI31Ref, 4, 4);
+ builder.addActiveElementSegment(
+ table, wasmI32Const(0),
+ [[...wasmI32Const(10), kGCPrefix, kExprI31New],
+ [...wasmI32Const(-42), kGCPrefix, kExprI31New],
+ [kExprRefNull, kI31RefCode]],
+ kWasmI31Ref);
+
+ builder.addFunction("i31GetI32", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0, kExprTableGet, 0,
+ kGCPrefix, kExprI31GetS])
+ .exportFunc();
+
+ builder.addFunction("i31GetNull", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprTableGet, 0, kExprRefIsNull])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ assertTrue(!!instance);
+
+ assertEquals(0, instance.exports.i31GetNull(0));
+ assertEquals(0, instance.exports.i31GetNull(1));
+ assertEquals(1, instance.exports.i31GetNull(2));
+ assertEquals(1, instance.exports.i31GetNull(3));
+ assertEquals(10, instance.exports.i31GetI32(0));
+ assertEquals(-42, instance.exports.i31GetI32(1));
+ assertTraps(kTrapNullDereference, () => instance.exports.i31GetI32(2));
+ assertTraps(kTrapNullDereference, () => instance.exports.i31GetI32(3));
+})();
+
(function TestArrayRefTable() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -425,8 +460,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
// Equivalent struct type.
let builder = new WasmModuleBuilder();
- // Force type canonicalization for struct_type
- builder.setSingletonRecGroups();
let struct_type = builder.addStruct([makeField(kWasmI32, false)]);
let struct_type_invalid = builder.addStruct([makeField(kWasmI64, false)]);
let struct_type_sub = builder.addStruct(
@@ -473,7 +506,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
print(arguments.callee.name);
let exporting_instance = (() => {
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let struct_type_base = builder.addStruct([makeField(kWasmI32, false)]);
let struct_type =
builder.addStruct([makeField(kWasmI32, false)], struct_type_base);
@@ -482,7 +514,6 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
})();
let builder = new WasmModuleBuilder();
- builder.setSingletonRecGroups();
let struct_type_base = builder.addStruct([makeField(kWasmI32, false)]);
let struct_type =
builder.addStruct([makeField(kWasmI32, false)], struct_type_base);
@@ -544,3 +575,56 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
wasmTable.set(2, instance.exports.create_struct(333));
assertEquals(333, instance.exports.struct_getter(2));
})();
+
+(function TestTypedTableCallIndirect() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+
+ let super_struct = builder.addStruct([makeField(kWasmI32, false)]);
+ let sub_struct = builder.addStruct(
+ [makeField(kWasmI32, false), makeField(kWasmI32, false)], super_struct);
+ let super_sig = builder.addType(
+ makeSig([kWasmI32], [wasmRefType(super_struct)]));
+ let sub_sig = builder.addType(
+ makeSig([kWasmI32], [wasmRefType(sub_struct)]), super_sig);
+
+ let super_func = builder.addFunction("super_func", super_sig)
+ .addBody([kExprLocalGet, 0, kGCPrefix, kExprStructNew, super_struct]);
+ let sub_func = builder.addFunction("super_func", sub_sig)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
+ kExprLocalGet, 0, kExprI32Const, 2, kExprI32Add,
+ kGCPrefix, kExprStructNew, sub_struct]);
+
+ let table = builder.addTable(wasmRefNullType(super_sig), 10, 10);
+ builder.addActiveElementSegment(
+ table.index, wasmI32Const(0),
+ [[kExprRefFunc, super_func.index], [kExprRefFunc, sub_func.index]],
+ wasmRefType(super_sig));
+
+ // Parameters: index, value.
+ builder.addFunction("call_indirect_super", kSig_i_ii)
+ .addBody([kExprLocalGet, 1, kExprLocalGet, 0,
+ kExprCallIndirect, super_sig, table.index,
+ kGCPrefix, kExprStructGet, super_struct, 0])
+ .exportFunc();
+ builder.addFunction("call_indirect_sub", kSig_i_ii)
+ .addBody([kExprLocalGet, 1, kExprLocalGet, 0,
+ kExprCallIndirect, sub_sig, table.index,
+ kGCPrefix, kExprStructGet, sub_struct, 0])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ // No type check needed, null check needed.
+ assertEquals(10, instance.exports.call_indirect_super(0, 10));
+ assertEquals(11, instance.exports.call_indirect_super(1, 10));
+ assertTraps(kTrapFuncSigMismatch,
+ () => instance.exports.call_indirect_super(2, 10));
+ // Type check and null check needed.
+ assertEquals(11, instance.exports.call_indirect_sub(1, 10));
+ assertTraps(kTrapFuncSigMismatch,
+ () => instance.exports.call_indirect_sub(0, 10));
+ assertTraps(kTrapFuncSigMismatch,
+ () => instance.exports.call_indirect_sub(2, 10));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/return-calls-eh.js b/deps/v8/test/mjsunit/wasm/return-calls-eh.js
new file mode 100644
index 0000000000..e031f7037d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/return-calls-eh.js
@@ -0,0 +1,113 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --experimental-wasm-return-call
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
+
+// Check that exceptions thrown by a return_call cannot be caught inside the
+// frame that does the call.
+(function TryReturnCallCatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addTag(kSig_v_v);
+ let throw_ = builder.addFunction("throw", kSig_v_v)
+ .addBody([kExprThrow, except]);
+ builder.addFunction("try_return_call", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmVoid,
+ kExprReturnCall, throw_.index,
+ kExprCatch, except,
+ kExprEnd
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertWasmThrows(instance, except, [], () => instance.exports.try_return_call());
+})();
+
+// Check that exceptions thrown by a return_call_indirect cannot be caught
+// inside the frame that does the call.
+(function TryReturnCallIndirectCatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addTag(kSig_v_v);
+ let sig = builder.addType(kSig_v_v);
+ let throw_ = builder.addFunction("throw", kSig_v_v)
+ .addBody([kExprThrow, except]);
+ let table = builder.addTable(kWasmAnyFunc, 1, 1);
+ builder.addActiveElementSegment(table.index, wasmI32Const(0), [0]);
+ builder.addFunction("try_return_call_indirect", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmVoid,
+ kExprI32Const, throw_.index,
+ kExprReturnCallIndirect, sig, 0,
+ kExprCatch, except,
+ kExprEnd
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertWasmThrows(instance, except, [], () => instance.exports.try_return_call_indirect());
+})();
+
+// Check that exceptions thrown by a return_call cannot be delegated inside the
+// frame that does the call.
+(function TryReturnCallDelegate() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addTag(kSig_v_v);
+ let throw_ = builder.addFunction("throw", kSig_v_v)
+ .addBody([kExprThrow, except]);
+ builder.addFunction("try_return_call_delegate", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmVoid,
+ kExprTry, kWasmVoid,
+ kExprReturnCall, throw_.index,
+ kExprDelegate, 0,
+ kExprCatch, except,
+ kExprEnd
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertWasmThrows(instance, except, [], () => instance.exports.try_return_call_delegate());
+})();
+
+// Check that exceptions thrown by a return_call to a JS import cannot be caught
+// inside the frame that does the call.
+(function TryReturnCallImportJSCatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let throw_import = builder.addImport("m", "throw_", kSig_v_v);
+ let except = builder.addTag(kSig_v_v);
+ builder.addExportOfKind("ex", kExternalTag, except);
+ builder.addFunction("return_call", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmVoid,
+ kExprReturnCall, throw_import,
+ kExprCatchAll,
+ kExprEnd
+ ]).exportFunc();
+ let throw_ = () => { throw new WebAssembly.Exception(instance.exports.ex, []); };
+ let instance = builder.instantiate({m: {throw_}});
+ assertWasmThrows(instance, except, [], () => instance.exports.return_call());
+})();
+
+// Check that the exception can be caught in the caller's caller.
+(function TryReturnCallCatchInCaller() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addTag(kSig_v_v);
+ let throw_ = builder.addFunction("throw", kSig_v_v)
+ .addBody([kExprThrow, except]);
+ builder.addFunction("caller", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmVoid,
+ kExprCallFunction, 2,
+ kExprCatch, except,
+ kExprEnd
+ ]).exportFunc();
+ builder.addFunction("return_call", kSig_v_v)
+ .addBody([
+ kExprReturnCall, throw_.index,
+ ]);
+ let instance = builder.instantiate();
+ assertDoesNotThrow(() => instance.exports.caller());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js b/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js
index 72bcae09af..832e5c017f 100644
--- a/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js
+++ b/deps/v8/test/mjsunit/wasm/runtime-type-canonicalization.js
@@ -7,7 +7,6 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
-builder.setSingletonRecGroups();
let struct_index = builder.addStruct([makeField(kWasmI32, true)]);
let identical_struct_index = builder.addStruct([makeField(kWasmI32, true)]);
diff --git a/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js b/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js
index d7bd61ba61..6ed8a33582 100644
--- a/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js
+++ b/deps/v8/test/mjsunit/wasm/serialization-with-compilation-hints.js
@@ -7,10 +7,6 @@
// Make the test faster:
// Flags: --wasm-tiering-budget=1000
-// This test busy-waits for tier-up to be complete, hence it does not work in
-// predictable mode where we only have a single thread.
-// Flags: --no-predictable
-
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
const num_functions = 3;
@@ -31,10 +27,7 @@ const wire_bytes = create_builder().toBuffer();
function serializeModule() {
const module = new WebAssembly.Module(wire_bytes);
let instance = new WebAssembly.Instance(module, {foo: {bar: () => 1}});
- // Execute {f1} until it gets tiered up.
- while (!%IsTurboFanFunction(instance.exports.f1)) {
- instance.exports.f1();
- }
+ %WasmTierUpFunction(instance.exports.f1);
// Execute {f2} once, so that the module knows that this is a used function.
instance.exports.f2();
const buff = %SerializeWasmModule(module);
@@ -50,7 +43,8 @@ const serialized_module = serializeModule();
const instance = new WebAssembly.Instance(module, {foo: {bar: () => 1}});
assertTrue(%IsTurboFanFunction(instance.exports.f1));
- assertTrue(%IsLiftoffFunction(instance.exports.f2));
+ // Busy-wait for `f2` to be compiled with Liftoff.
+ while (!%IsLiftoffFunction(instance.exports.f2)) {}
assertTrue(
!%IsLiftoffFunction(instance.exports.f0) &&
!%IsTurboFanFunction(instance.exports.f0));
diff --git a/deps/v8/test/mjsunit/wasm/simd-lane-memory64.js b/deps/v8/test/mjsunit/wasm/simd-lane-memory64.js
new file mode 100644
index 0000000000..77c81206e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/simd-lane-memory64.js
@@ -0,0 +1,93 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-memory64
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+const GB = 1024 * 1024 * 1024;
+const SRC_OFFSET = 4294970000n; // 0x100000a90n
+const SRC_OFFSET_LEB = [0x90, 0x95, 0x80, 0x80, 0x10];
+const DST_OFFSET = 4294970160n;
+const DST_OFFSET_LEB = [0xb0, 0x96, 0x80, 0x80, 0x10];
+
+var builder = new WasmModuleBuilder();
+builder.addMemory64(5 * GB / kPageSize).exportMemoryAs("memory");
+
+// Here we make a global of type v128 to be the target
+// for loading lanes and the source for storing lanes.
+var g = builder.addGlobal(
+ kWasmS128, true,
+ [kSimdPrefix, kExprS128Const,
+ 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
+
+for (let i = 0; i < 4; ++i) {
+ builder.addFunction(`load_lane_${i}`, kSig_v_l)
+ .addBody([kExprLocalGet, 0,
+ kExprGlobalGet, g.index,
+ kSimdPrefix, kExprS128Load32Lane, 0, 0, i,
+ kExprGlobalSet, g.index])
+ .exportFunc();
+
+ builder.addFunction(`store_lane_${i}`, kSig_v_l)
+ .addBody([kExprLocalGet, 0,
+ kExprGlobalGet, g.index,
+ kSimdPrefix, kExprS128Store32Lane, 0, 0, i])
+ .exportFunc();
+
+ builder.addFunction(`Load_Lane_${i}`, kSig_v_l)
+ .addBody([kExprLocalGet, 0,
+ kExprGlobalGet, g.index,
+ kSimdPrefix, kExprS128Load32Lane, 0, ...SRC_OFFSET_LEB, i,
+ kExprGlobalSet, g.index])
+ .exportFunc();
+
+ builder.addFunction(`Store_Lane_${i}`, kSig_v_l)
+ .addBody([kExprLocalGet, 0,
+ kExprGlobalGet, g.index,
+ kSimdPrefix, kExprS128Store32Lane, 0, ...DST_OFFSET_LEB, i])
+ .exportFunc();
+}
+
+(function TestLoadStoreLaneExternalOffset(){
+ print(arguments.callee.name);
+
+ var instance = builder.instantiate({});
+ var buffer = instance.exports.memory.buffer;
+
+ var src_view = new Uint32Array(buffer, Number(SRC_OFFSET), 4);
+ var dst_view = new Uint32Array(buffer, Number(DST_OFFSET), 4);
+ var values = [ 0x01234567, 0x89abcdef, 0x76543210, 0xfedcba98 ];
+ var expected_values = [ 0, 0, 0, 0 ];
+ src_view.set(values, 0);
+
+ for (let i = 0n; i < 4n; ++i) {
+ expected_values[i] = values[i];
+ const offset = 4n * i;
+ instance.exports[`load_lane_${i}`](SRC_OFFSET + offset);
+ instance.exports[`store_lane_${i}`](DST_OFFSET + offset);
+ assertEquals(expected_values, Array.from(dst_view.values()));
+ }
+})();
+
+(function TestLoadStoreLaneInternalOffset(){
+ print(arguments.callee.name);
+
+ var instance = builder.instantiate({});
+ var buffer = instance.exports.memory.buffer;
+
+ var src_view = new Uint32Array(buffer, Number(SRC_OFFSET), 4);
+ var dst_view = new Uint32Array(buffer, Number(DST_OFFSET), 4);
+ var values = [ 0x01234567, 0x89abcdef, 0x76543210, 0xfedcba98 ];
+ var expected_values = [ 0, 0, 0, 0 ];
+ src_view.set(values, 0);
+
+ for (let i = 0n; i < 4n; ++i) {
+ expected_values[i] = values[i];
+ const offset = 4n * i;
+ instance.exports[`Load_Lane_${i}`](offset);
+ instance.exports[`Store_Lane_${i}`](offset);
+ assertEquals(expected_values, Array.from(dst_view.values()));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/speculative-inlining.js b/deps/v8/test/mjsunit/wasm/speculative-inlining.js
index eff9fd62de..7f9f28859f 100644
--- a/deps/v8/test/mjsunit/wasm/speculative-inlining.js
+++ b/deps/v8/test/mjsunit/wasm/speculative-inlining.js
@@ -33,7 +33,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let instance = builder.instantiate();
for (let i = 0; i < 20; i++) assertEquals(14, instance.exports.main(10));
- %WasmTierUpFunction(instance, main.index);
+ %WasmTierUpFunction(instance.exports.main);
// The tiered-up function should have {callee} speculatively inlined.
assertEquals(14, instance.exports.main(10));
})();
@@ -75,7 +75,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let instance = builder.instantiate();
for (let i = 0; i < 20; i++) assertEquals(14, instance.exports.main(10, 1));
- %WasmTierUpFunction(instance, main.index);
+ %WasmTierUpFunction(instance.exports.main);
// Tier-up is done, and {callee0} should be inlined in the trace.
assertEquals(14, instance.exports.main(10, 1));
@@ -105,7 +105,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let instance = builder.instantiate();
for (let i = 0; i < 20; i++) assertEquals(14, instance.exports.main(10));
- %WasmTierUpFunction(instance, main.index);
+ %WasmTierUpFunction(instance.exports.main);
// After tier-up, the tail call should be speculatively inlined.
assertEquals(14, instance.exports.main(10));
})();
@@ -145,7 +145,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
let instance = builder.instantiate();
assertEquals(9, instance.exports.main(10, 1));
- %WasmTierUpFunction(instance, main.index);
+ %WasmTierUpFunction(instance.exports.main);
// After tier-up, {callee0} should be inlined in the trace.
assertEquals(9, instance.exports.main(10, 1))
@@ -190,7 +190,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// Run 'main' until it is tiered-up.
assertEquals(1, instance2.exports.main(0, instance1.exports.f1));
- %WasmTierUpFunction(instance2, main.index);
+ %WasmTierUpFunction(instance2.exports.main);
// The function f1 defined in another module should not be inlined.
assertEquals(1, instance2.exports.main(0, instance1.exports.f1));
})();
@@ -232,7 +232,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(16, instance2.exports.main(5, f1, f2));
}
}
- %WasmTierUpFunction(instance2, main.index);
+ %WasmTierUpFunction(instance2.exports.main);
// WebAssembly.Function objects should not be inlined.
assertEquals(16, instance2.exports.main(5, f1, f2));
assertEquals(12, instance2.exports.main(5, f1, f1));
diff --git a/deps/v8/test/mjsunit/wasm/stack-push-root.js b/deps/v8/test/mjsunit/wasm/stack-push-root.js
new file mode 100644
index 0000000000..2f14841016
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/stack-push-root.js
@@ -0,0 +1,35 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+// Tests that code generator behaves correctly when pushing to the stack an
+// operand that is an offset of the root register.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+
+let sig = makeSig(
+ [kWasmAnyRef, kWasmAnyRef, kWasmAnyRef, kWasmAnyRef, kWasmAnyRef,
+ kWasmAnyRef],
+ [kWasmAnyRef]);
+let sig_index = builder.addType(sig);
+
+let callee =
+ builder.addFunction('callee', sig_index).addBody([kExprLocalGet, 5]);
+
+builder.addFunction('main', kSig_r_v).exportFunc().addBody([
+ kExprRefNull, kAnyRefCode,
+ kExprRefNull, kAnyRefCode,
+ kExprRefNull, kAnyRefCode,
+ kExprRefNull, kAnyRefCode,
+ kExprRefNull, kAnyRefCode,
+ kExprRefNull, kAnyRefCode,
+ kExprCallFunction, callee.index,
+ kGCPrefix, kExprExternExternalize
+]);
+
+let instance = builder.instantiate();
+assertEquals(null, instance.exports.main());
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index e9825e0db6..8c06431686 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -59,10 +59,13 @@ function testErrorPosition(bytes, pos, message) {
(function testSectionLengthTooBig() {
let bytes = new Binary;
bytes.emit_header();
+ let pos = bytes.length;
bytes.emit_u8(kTypeSectionCode);
bytes.emit_u32v(0xffffff23);
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'section length');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 1, "Type"\\) extends past end of the module ' +
+ '\\(length 4294967075, remaining bytes 0\\)');
})();
(function testFunctionsCountInvalidVarint() {
@@ -78,6 +81,7 @@ function testErrorPosition(bytes, pos, message) {
1, // section length
0 // number of functions
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -85,8 +89,10 @@ function testErrorPosition(bytes, pos, message) {
// Functions count
bytes.emit_bytes([0x80, 0x80, 0x80, 0x80, 0x80, 0x00]);
- let pos = bytes.length - 1 - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'expected functions count');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 6\\)');
})();
(function testFunctionsCountTooBig() {
@@ -102,6 +108,7 @@ function testErrorPosition(bytes, pos, message) {
1, // section length
0 // number of functions
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -109,8 +116,10 @@ function testErrorPosition(bytes, pos, message) {
// Functions count
bytes.emit_u32v(0xffffff23);
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'functions count');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 5\\)');
})();
(function testFunctionsCountDoesNotMatch() {
@@ -126,6 +135,7 @@ function testErrorPosition(bytes, pos, message) {
1, // section length
0 // number of functions
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -133,8 +143,10 @@ function testErrorPosition(bytes, pos, message) {
// Functions count (different than the count in the functions section.
bytes.emit_u32v(5);
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'function body count 5 mismatch');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 1\\)');
})();
(function testBodySizeInvalidVarint() {
@@ -154,6 +166,7 @@ function testErrorPosition(bytes, pos, message) {
1, // number of functions
0 // signature index
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -162,8 +175,10 @@ function testErrorPosition(bytes, pos, message) {
// Invalid function body size.
bytes.emit_bytes([0x80, 0x80, 0x80, 0x80, 0x80, 0x00]);
- let pos = bytes.length - 1 - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'function body size');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 7\\)');
})();
(function testBodySizeTooBig() {
@@ -183,6 +198,7 @@ function testErrorPosition(bytes, pos, message) {
1, // number of functions
0 // signature index
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -191,8 +207,10 @@ function testErrorPosition(bytes, pos, message) {
// Invalid function body size.
bytes.emit_u32v(0xffffff23);
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'function body size');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 6\\)');
})();
(function testBodySizeDoesNotFit() {
@@ -212,6 +230,7 @@ function testErrorPosition(bytes, pos, message) {
1, // number of functions
0 // signature index
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -220,8 +239,10 @@ function testErrorPosition(bytes, pos, message) {
// Invalid function body size (does not fit into the code section).
bytes.emit_u32v(20);
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'not enough code section bytes');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 2\\)');
})();
(function testBodySizeIsZero() {
@@ -241,6 +262,7 @@ function testErrorPosition(bytes, pos, message) {
1, // number of functions
0 // signature index
]);
+ let pos = bytes.length;
bytes.emit_bytes([
kCodeSectionCode, // section id
20, // section length (arbitrary value > 6)
@@ -249,8 +271,10 @@ function testErrorPosition(bytes, pos, message) {
// Invalid function body size (body size of 0 is invalid).
bytes.emit_u32v(0);
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'invalid function length');
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section \\(code 10, "Code"\\) extends past end of the module ' +
+ '\\(length 20, remaining bytes 2\\)');
})();
(function testStaleCodeSectionBytes() {
@@ -272,15 +296,21 @@ function testErrorPosition(bytes, pos, message) {
]);
bytes.emit_bytes([
kCodeSectionCode, // section id
- 20, // section length (too big)
+ 10, // section length (too big)
1, // functions count
2, // body size
0, // locals count
kExprEnd // body
]);
-
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'not all code section bytes were used');
+ let pos = bytes.length;
+ // Add some more bytes to avoid early error detection for too big section
+ // length.
+ bytes.emit_bytes([0, 0, 0, 0, 0, 0, 0, 0]);
+
+ testErrorPositionAsyncOnly(
+ bytes, pos,
+ 'section was shorter than expected size ' +
+ '\\(10 bytes expected, 4 decoded\\)');
})();
(function testInvalidCode() {
@@ -340,7 +370,9 @@ function testErrorPosition(bytes, pos, message) {
0, // locals count
kExprEnd // body
]);
- let pos = bytes.length;
+ // TODO(clemensb): Fix error reporting to point to the section start, not the
+ // payload start.
+ let pos = bytes.length + 2;
bytes.emit_bytes([
kCodeSectionCode, // section id (repeating)
4, // section length
@@ -351,7 +383,7 @@ function testErrorPosition(bytes, pos, message) {
]);
// Find error at the second kCodeSectionCode.
- testErrorPositionAsyncOnly(bytes, pos, 'code section can only appear once');
+ testErrorPositionAsyncOnly(bytes, pos, 'unexpected section <Code>');
})();
(function testCodeSectionSizeZero() {
@@ -377,8 +409,8 @@ function testErrorPosition(bytes, pos, message) {
]);
// Find error at the code section length.
- let pos = bytes.length - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'code section cannot have size 0');
+ let pos = bytes.length;
+ testErrorPositionAsyncOnly(bytes, pos, 'expected functions count');
})();
(function testInvalidSection() {
diff --git a/deps/v8/test/mjsunit/wasm/stringrefs-exec-gc.js b/deps/v8/test/mjsunit/wasm/stringrefs-exec-gc.js
index 25489df802..e3c47253e7 100644
--- a/deps/v8/test/mjsunit/wasm/stringrefs-exec-gc.js
+++ b/deps/v8/test/mjsunit/wasm/stringrefs-exec-gc.js
@@ -117,6 +117,7 @@ function makeWtf8TestDataSegment() {
for (let [instr, name] of
[[kExprStringNewWtf8Array, "new_wtf8"],
[kExprStringNewUtf8Array, "new_utf8"],
+ [kExprStringNewUtf8ArrayTry, "new_utf8_try"],
[kExprStringNewLossyUtf8Array, "new_utf8_sloppy"]]) {
builder.addFunction(name, kSig_w_ii)
.exportFunc()
@@ -145,6 +146,7 @@ function makeWtf8TestDataSegment() {
if (HasIsolatedSurrogate(str)) {
assertThrows(() => instance.exports.new_utf8(start, end),
WebAssembly.RuntimeError, "invalid UTF-8 string");
+ assertNull(instance.exports.new_utf8_try(start, end));
// Isolated surrogates have the three-byte pattern ED [A0,BF]
// [80,BF]. When the sloppy decoder gets to the second byte, it
@@ -158,6 +160,7 @@ function makeWtf8TestDataSegment() {
} else {
assertEquals(str, instance.exports.new_utf8(start, end));
assertEquals(str, instance.exports.new_utf8_sloppy(start, end));
+ assertEquals(str, instance.exports.new_utf8_try(start, end));
}
}
for (let [str, {offset, length}] of Object.entries(data.invalid)) {
@@ -167,6 +170,7 @@ function makeWtf8TestDataSegment() {
WebAssembly.RuntimeError, "invalid WTF-8 string");
assertThrows(() => instance.exports.new_utf8(start, end),
WebAssembly.RuntimeError, "invalid UTF-8 string");
+ assertNull(instance.exports.new_utf8_try(start, end));
}
assertEquals("ascii", instance.exports.bounds_check(0, "ascii".length));
@@ -181,6 +185,40 @@ function makeWtf8TestDataSegment() {
WebAssembly.RuntimeError, "array element access out of bounds");
})();
+(function TestStringNewUtf8ArrayTryNullCheck() {
+ let builder = new WasmModuleBuilder();
+ let data = makeWtf8TestDataSegment();
+ let data_index = builder.addPassiveDataSegment(data.data);
+ let i8_array = builder.addArray(kWasmI8, true);
+
+ let make_i8_array = builder.addFunction(
+ "make_i8_array", makeSig([], [wasmRefType(i8_array)]))
+ .addBody([
+ ...wasmI32Const(0),
+ ...wasmI32Const(data.data.length),
+ kGCPrefix, kExprArrayNewData, i8_array, data_index
+ ]).index;
+
+ builder.addFunction("is_null_new_utf8_try", kSig_i_ii)
+ .exportFunc()
+ .addBody([
+ kExprCallFunction, make_i8_array,
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ ...GCInstr(kExprStringNewUtf8ArrayTry),
+ kExprRefIsNull,
+ ]);
+
+ let instance = builder.instantiate();
+ for (let [str, {offset, length}] of Object.entries(data.valid)) {
+ assertEquals(
+ +HasIsolatedSurrogate(str),
+ instance.exports.is_null_new_utf8_try(offset, offset+length));
+ }
+ for (let [str, {offset, length}] of Object.entries(data.invalid)) {
+ assertEquals(1, instance.exports.is_null_new_utf8_try(offset, offset+length));
+ }
+})();
+
function encodeWtf16LE(str) {
// String iterator coalesces surrogate pairs.
let out = [];
@@ -192,11 +230,11 @@ function encodeWtf16LE(str) {
return out;
}
-function makeWtf16TestDataSegment() {
+function makeWtf16TestDataSegment(strings) {
let data = []
let valid = {};
- for (let str of interestingStrings) {
+ for (let str of strings) {
valid[str] = { offset: data.length, length: str.length };
for (let byte of encodeWtf16LE(str)) {
data.push(byte);
@@ -209,7 +247,15 @@ function makeWtf16TestDataSegment() {
(function TestStringNewWtf16Array() {
let builder = new WasmModuleBuilder();
- let data = makeWtf16TestDataSegment();
+ // string.new_wtf16_array switches to a different implementation (runtime
+ // instead of Torque) for more than 32 characters, so provide some coverage
+ // for that case.
+ let strings = interestingStrings.concat([
+ "String with more than 32 characters, all of which are ASCII",
+ "Two-byte string with more than 32 characters \ucccc \ud800\udc00 \xa9?"
+ ]);
+
+ let data = makeWtf16TestDataSegment(strings);
let data_index = builder.addPassiveDataSegment(data.data);
let ascii_data_index =
builder.addPassiveDataSegment(Uint8Array.from(encodeWtf16LE("ascii")));
diff --git a/deps/v8/test/mjsunit/wasm/stringrefs-exec.js b/deps/v8/test/mjsunit/wasm/stringrefs-exec.js
index 9969ad1cf4..901a07d3ee 100644
--- a/deps/v8/test/mjsunit/wasm/stringrefs-exec.js
+++ b/deps/v8/test/mjsunit/wasm/stringrefs-exec.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-stringref
+// Flags: --experimental-wasm-stringref --experimental-wasm-typed-funcref
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
@@ -165,6 +165,13 @@ function makeWtf8TestDataSegment() {
...GCInstr(kExprStringNewUtf8), 0
]);
+ builder.addFunction("string_new_utf8_try", kSig_w_ii)
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ ...GCInstr(kExprStringNewUtf8Try), 0
+ ]);
+
builder.addFunction("string_new_wtf8", kSig_w_ii)
.exportFunc()
.addBody([
@@ -185,6 +192,7 @@ function makeWtf8TestDataSegment() {
if (HasIsolatedSurrogate(str)) {
assertThrows(() => instance.exports.string_new_utf8(offset, length),
WebAssembly.RuntimeError, "invalid UTF-8 string");
+ assertEquals(null, instance.exports.string_new_utf8_try(offset, length));
// Isolated surrogates have the three-byte pattern ED [A0,BF]
// [80,BF]. When the sloppy decoder gets to the second byte, it
@@ -197,6 +205,7 @@ function makeWtf8TestDataSegment() {
instance.exports.string_new_utf8_sloppy(offset, length));
} else {
assertEquals(str, instance.exports.string_new_utf8(offset, length));
+ assertEquals(str, instance.exports.string_new_utf8_try(offset, length));
assertEquals(str,
instance.exports.string_new_utf8_sloppy(offset, length));
}
@@ -206,6 +215,34 @@ function makeWtf8TestDataSegment() {
WebAssembly.RuntimeError, "invalid WTF-8 string");
assertThrows(() => instance.exports.string_new_utf8(offset, length),
WebAssembly.RuntimeError, "invalid UTF-8 string");
+ assertEquals(null, instance.exports.string_new_utf8_try(offset, length));
+ }
+})();
+
+(function TestStringNewUtf8TryNullCheck() {
+ let builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, undefined, false, false);
+ let data = makeWtf8TestDataSegment();
+ builder.addDataSegment(0, data.data);
+
+ builder.addFunction("is_null_new_utf8_try", kSig_i_ii)
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ ...GCInstr(kExprStringNewUtf8Try), 0,
+ kExprRefIsNull,
+ ]);
+
+ let instance = builder.instantiate();
+ for (let [str, {offset, length}] of Object.entries(data.valid)) {
+ print(offset, length);
+ assertEquals(
+ +HasIsolatedSurrogate(str),
+ instance.exports.is_null_new_utf8_try(offset, length));
+ }
+ for (let [str, {offset, length}] of Object.entries(data.invalid)) {
+ assertEquals(1, instance.exports.is_null_new_utf8_try(offset, length));
}
})();
@@ -1175,3 +1212,119 @@ function makeWtf16TestDataSegment() {
assertThrows(() => instance.exports.slice_null(),
WebAssembly.RuntimeError, "dereferencing a null pointer");
})();
+
+(function TestStringCompare() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction("compare",
+ makeSig([kWasmStringRef, kWasmStringRef], [kWasmI32]))
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ ...GCInstr(kExprStringCompare)
+ ]);
+
+ let instance = builder.instantiate();
+ for (let lhs of interestingStrings) {
+ for (let rhs of interestingStrings) {
+ print(`"${lhs}" <=> "${rhs}"`);
+ const expected = lhs < rhs ? -1 : lhs > rhs ? 1 : 0;
+ assertEquals(expected, instance.exports.compare(lhs, rhs));
+ }
+ }
+
+ assertThrows(() => instance.exports.compare(null, "abc"),
+ WebAssembly.RuntimeError, "dereferencing a null pointer");
+ assertThrows(() => instance.exports.compare("abc", null),
+ WebAssembly.RuntimeError, "dereferencing a null pointer");
+})();
+
+(function TestStringCompareNullCheckStaticType() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ // Use a mix of nullable and non-nullable input types to the compare.
+ builder.addFunction("compareLhsNullable",
+ makeSig([kWasmStringRef, kWasmStringRef], [kWasmI32]))
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0,
+ kExprRefAsNonNull,
+ kExprLocalGet, 1,
+ ...GCInstr(kExprStringCompare)
+ ]);
+
+ builder.addFunction("compareRhsNullable",
+ makeSig([kWasmStringRef, kWasmStringRef], [kWasmI32]))
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprRefAsNonNull,
+ ...GCInstr(kExprStringCompare)
+ ]);
+
+ let instance = builder.instantiate();
+ assertThrows(() => instance.exports.compareLhsNullable(null, "abc"),
+ WebAssembly.RuntimeError, "dereferencing a null pointer");
+ assertThrows(() => instance.exports.compareLhsNullable("abc", null),
+ WebAssembly.RuntimeError, "dereferencing a null pointer");
+ assertThrows(() => instance.exports.compareRhsNullable(null, "abc"),
+ WebAssembly.RuntimeError, "dereferencing a null pointer");
+ assertThrows(() => instance.exports.compareRhsNullable("abc", null),
+ WebAssembly.RuntimeError, "dereferencing a null pointer");
+})();
+
+(function TestStringFromCodePoint() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("asString",
+ makeSig([kWasmI32], [wasmRefType(kWasmStringRef)]))
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringFromCodePoint),
+ ]);
+
+ let instance = builder.instantiate();
+ for (let char of "Az1#\n\ucccc\ud800\udc00") {
+ assertEquals(char, instance.exports.asString(char.codePointAt(0)));
+ }
+ for (let codePoint of [0x110000, 0xFFFFFFFF, -1]) {
+ assertThrows(() => instance.exports.asString(codePoint),
+ WebAssembly.RuntimeError, /Invalid code point [0-9]+/);
+ }
+})();
+
+(function TestStringHash() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("hash", kSig_i_w)
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringHash),
+ ]);
+
+ let hash = builder.instantiate().exports.hash;
+ assertEquals(hash(""), hash(""));
+ assertEquals(hash("foo"), hash("foo"));
+ assertEquals(hash("bar"), hash("bar"));
+ assertEquals(hash("123"), hash("123"));
+ // Assuming that hash collisions are very rare.
+ assertNotEquals(hash("foo"), hash("bar"));
+ // Test with cons strings.
+ assertEquals(hash("f" + "o" + "o"), hash("foo"));
+ assertEquals(hash("f" + 1), hash("f1"));
+
+ assertEquals(hash(new String(" foo ").trim()), hash("foo"));
+ assertEquals(hash(new String("xfoox").substring(1, 4)), hash("foo"));
+
+ // Test integer index hash.
+ let dummy_obj = {123: 456};
+ let index_string = "123";
+ assertEquals(456, dummy_obj[index_string]);
+ assertEquals(hash("1" + "23"), hash(index_string));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/stringrefs-regressions.js b/deps/v8/test/mjsunit/wasm/stringrefs-regressions.js
index c12111f29d..305a1d3c33 100644
--- a/deps/v8/test/mjsunit/wasm/stringrefs-regressions.js
+++ b/deps/v8/test/mjsunit/wasm/stringrefs-regressions.js
@@ -65,7 +65,7 @@ let kSig_v_w = makeSig([kWasmStringRef], []);
// Bug 3: Builtin calls that have neither a kNoThrow annotation nor exception-
// handling support make the Wasm inliner sad.
for (let i = 0; i < 20; i++) f1(10);
- %WasmTierUpFunction(instance, caller.index);
+ %WasmTierUpFunction(f1);
f1(10);
})();
@@ -94,5 +94,5 @@ assertThrows(() => f2("1234567890")); // 650M characters is too much.
// Bug 5: Operations that can trap must not be marked as kEliminatable,
// otherwise the trap may be eliminated.
for (let i = 0; i < 3; i++) f2("a"); // 65M characters is okay.
-%WasmTierUpFunction(instance, concat.index);
+%WasmTierUpFunction(f2);
assertThrows(() => f2("1234567890")); // Optimized code still traps.
diff --git a/deps/v8/test/mjsunit/wasm/stringrefs-valid.js b/deps/v8/test/mjsunit/wasm/stringrefs-valid.js
index 9bcc294e99..d045f77c14 100644
--- a/deps/v8/test/mjsunit/wasm/stringrefs-valid.js
+++ b/deps/v8/test/mjsunit/wasm/stringrefs-valid.js
@@ -40,6 +40,7 @@ for (let [name, code] of [['string', kStringRefCode],
b => b.addFunction(undefined, kSig_v_v).addLocals(code, 1).addBody([]));
}
+let kSig_w_i = makeSig([kWasmI32], [kWasmStringRef]);
let kSig_w_ii = makeSig([kWasmI32, kWasmI32], [kWasmStringRef]);
let kSig_w_v = makeSig([], [kWasmStringRef]);
let kSig_i_w = makeSig([kWasmStringRef], [kWasmI32]);
@@ -77,6 +78,11 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32],
kExprLocalGet, 0, kExprLocalGet, 1,
...GCInstr(kExprStringNewUtf8), 0
]);
+ builder.addFunction("string.new_utf8_try", kSig_w_ii)
+ .addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1,
+ ...GCInstr(kExprStringNewUtf8Try), 0
+ ]);
builder.addFunction("string.new_lossy_utf8", kSig_w_ii)
.addBody([
kExprLocalGet, 0, kExprLocalGet, 1,
@@ -245,6 +251,18 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32],
...GCInstr(kExprStringViewIterSlice)
]);
+ builder.addFunction("string.from_code_point", kSig_w_i)
+ .addBody([
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringFromCodePoint)
+ ]);
+
+ builder.addFunction("string.hash", kSig_i_w)
+ .addBody([
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringHash)
+ ]);
+
let i8_array = builder.addArray(kWasmI8, true);
let i16_array = builder.addArray(kWasmI16, true);
@@ -253,7 +271,14 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32],
kExprRefNull, i8_array,
kExprI32Const, 0,
kExprI32Const, 0,
- ...GCInstr(kExprStringNewWtf8Array)
+ ...GCInstr(kExprStringNewUtf8Array)
+ ]);
+ builder.addFunction("string.new_utf8_array_try", kSig_w_v)
+ .addBody([
+ kExprRefNull, i8_array,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ ...GCInstr(kExprStringNewUtf8ArrayTry)
]);
builder.addFunction("string.new_lossy_utf8_array", kSig_w_v)
.addBody([
@@ -421,3 +446,7 @@ assertInvalid(
]);
},
/string.encode_wtf16_array\[1\] expected array of mutable i16, found local.get of type \(ref 0\)/);
+
+assertInvalid(builder => {
+ builder.addFunction(undefined, kSig_v_v).addBody([...GCInstr(0x790)]);
+}, /invalid stringref opcode: fb790/);
diff --git a/deps/v8/test/mjsunit/wasm/stringview-valuestack.js b/deps/v8/test/mjsunit/wasm/stringview-valuestack.js
new file mode 100644
index 0000000000..8493f09aca
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/stringview-valuestack.js
@@ -0,0 +1,73 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-stringref
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let kSig_v_w = makeSig([kWasmStringRef], []);
+let kSig_iw_i = makeSig([kWasmI32], [kWasmI32, kWasmStringRef]);
+
+(function TestStringViewIterStack() {
+ let builder = new WasmModuleBuilder();
+
+ let global = builder.addGlobal(kWasmStringViewIter, true);
+
+ builder.addFunction("iterate", kSig_v_w)
+ .exportFunc()
+ .addBody([
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringAsIter),
+ kExprGlobalSet, global.index
+ ]);
+
+ // The following functions perform a stringview operation and have the
+ // value 42 on the stack to ensure that the value stack is preserved on each
+ // of these operations.
+
+ builder.addFunction("advance", kSig_ii_i)
+ .exportFunc()
+ .addBody([
+ kExprI32Const, 42,
+ kExprGlobalGet, global.index,
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringViewIterAdvance)
+ ]);
+
+ builder.addFunction("rewind", kSig_ii_i)
+ .exportFunc()
+ .addBody([
+ kExprI32Const, 42,
+ kExprGlobalGet, global.index,
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringViewIterRewind)
+ ]);
+
+ builder.addFunction("slice", kSig_iw_i)
+ .exportFunc()
+ .addBody([
+ kExprI32Const, 42,
+ kExprGlobalGet, global.index,
+ kExprLocalGet, 0,
+ ...GCInstr(kExprStringViewIterSlice)
+ ]);
+
+ let instance = builder.instantiate();
+
+ let str = 'ascii string';
+ instance.exports.iterate(str);
+ for (let i = 0; i < str.length; i++) {
+ assertEquals([42, 1], instance.exports.advance(1));
+ }
+ assertEquals([42, 0], instance.exports.advance(1));
+
+ for (let i = 0; i < str.length; i++) {
+ assertEquals([42, 1], instance.exports.rewind(1));
+ }
+ assertEquals([42, 0], instance.exports.rewind(1));
+
+ for (let i = 0; i < str.length; i++) {
+ assertEquals([42, str.substring(0, i)], instance.exports.slice(i));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-access-liftoff.js b/deps/v8/test/mjsunit/wasm/table-access-liftoff.js
deleted file mode 100644
index 8f7a93b593..0000000000
--- a/deps/v8/test/mjsunit/wasm/table-access-liftoff.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --liftoff --no-wasm-tier-up
-
-d8.file.execute("test/mjsunit/wasm/table-access.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
index 49fced9588..d386e67a55 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -25,7 +25,7 @@ function testGrowInternalExternRefTable(table_index) {
const initial_size = 5;
// Add 10 tables, we only test one.
for (let i = 0; i < 10; ++i) {
- builder.addTable(kWasmExternRef, initial_size).index;
+ builder.addTable(kWasmExternRef, initial_size);
}
builder.addFunction('grow', kSig_i_ri)
.addBody([kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/wasm/table-numeric-ops.js b/deps/v8/test/mjsunit/wasm/table-numeric-ops.js
new file mode 100644
index 0000000000..112d8d97fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-numeric-ops.js
@@ -0,0 +1,144 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function TestTableNumericOps() {
+ print(arguments.callee.name);
+ let kTableNum = 10;
+ for (let table_index of [0, 7, 9]) {
+ let builder = new WasmModuleBuilder();
+ let kTableSize = 5;
+
+ for (let i = 0; i < kTableNum; i++) {
+ builder.addTable(kWasmFuncRef, kTableSize);
+ }
+
+ let elements = [];
+
+ let sig_i_v = builder.addType(kSig_i_v);
+
+ for (let i = 0; i < kTableSize; i++) {
+ builder.addFunction("f" + i, sig_i_v).addBody([kExprI32Const, i]);
+ elements.push(i);
+ }
+
+ let passive = builder.addPassiveElementSegment(elements);
+
+ let sig_i_i = builder.addType(kSig_i_i);
+
+ builder.addFunction("call", sig_i_i)
+ .addBody([kExprLocalGet, 0, kExprCallIndirect, sig_i_v, table_index])
+ .exportFunc();
+ builder.addFunction("table_init", kSig_v_iii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2,
+ kNumericPrefix, kExprTableInit, passive, table_index])
+ .exportFunc();
+ builder.addFunction("drop", kSig_v_v)
+ .addBody([kNumericPrefix, kExprElemDrop, passive])
+ .exportFunc();
+
+ let wasm = builder.instantiate().exports;
+
+ // An out-of-bounds trapping initialization should not have an effect on the
+ // table.
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(3, 0, 3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(0));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(1));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(2));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(4));
+
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 3, 3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(0));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(1));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(2));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(4));
+
+ // 0-count is still oob if target is invalid.
+ assertTraps(kTrapTableOutOfBounds,
+ () => wasm.table_init(kTableSize + 1, 0, 0));
+ assertTraps(kTrapElementSegmentOutOfBounds,
+ () => wasm.table_init(0, kTableSize + 1, 0));
+
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(0, 0, 6));
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 1, 5));
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 2, 4));
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 3, 3));
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 4, 2));
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 5, 1));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(0, 0, 6));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(1, 0, 5));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(2, 0, 4));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(3, 0, 3));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(4, 0, 2));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(5, 0, 1));
+ assertTraps(kTrapTableOutOfBounds, () => wasm.table_init(10, 0, 1));
+ assertTraps(kTrapElementSegmentOutOfBounds,
+ () => wasm.table_init(0, 10, 1));
+
+ // Initializing 0 elements is ok, even at the end of the table/segment.
+ wasm.table_init(0, 0, 0);
+ wasm.table_init(kTableSize, 0, 0);
+ wasm.table_init(0, kTableSize, 0);
+
+ wasm.table_init(0, 0, 1);
+ assertEquals(0, wasm.call(0));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(1));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(2));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(4));
+
+ wasm.table_init(0, 0, 2);
+ assertEquals(0, wasm.call(0));
+ assertEquals(1, wasm.call(1));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(2));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(4));
+
+ wasm.table_init(0, 0, 3);
+ assertEquals(0, wasm.call(0));
+ assertEquals(1, wasm.call(1));
+ assertEquals(2, wasm.call(2));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(3));
+ assertTraps(kTrapFuncSigMismatch, () => wasm.call(4));
+
+ wasm.table_init(3, 0, 2);
+ assertEquals(0, wasm.call(0));
+ assertEquals(1, wasm.call(1));
+ assertEquals(2, wasm.call(2));
+ assertEquals(0, wasm.call(3));
+ assertEquals(1, wasm.call(4));
+
+ wasm.table_init(3, 1, 2);
+ assertEquals(0, wasm.call(0));
+ assertEquals(1, wasm.call(1));
+ assertEquals(2, wasm.call(2));
+ assertEquals(1, wasm.call(3));
+ assertEquals(2, wasm.call(4));
+
+ wasm.table_init(3, 2, 2);
+ assertEquals(0, wasm.call(0));
+ assertEquals(1, wasm.call(1));
+ assertEquals(2, wasm.call(2));
+ assertEquals(2, wasm.call(3));
+ assertEquals(3, wasm.call(4));
+
+ wasm.table_init(3, 3, 2);
+ assertEquals(0, wasm.call(0));
+ assertEquals(1, wasm.call(1));
+ assertEquals(2, wasm.call(2));
+ assertEquals(3, wasm.call(3));
+ assertEquals(4, wasm.call(4));
+
+ // Now drop the passive segment twice. This should work.
+ wasm.drop();
+ wasm.drop();
+
+ // Subsequent accesses should trap for size > 0.
+ wasm.table_init(0, 0, 0);
+ assertTraps(kTrapElementSegmentOutOfBounds, () => wasm.table_init(0, 1, 0));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/type-based-optimizations.js b/deps/v8/test/mjsunit/wasm/type-based-optimizations.js
index 5ac4debefb..f6bd1ac783 100644
--- a/deps/v8/test/mjsunit/wasm/type-based-optimizations.js
+++ b/deps/v8/test/mjsunit/wasm/type-based-optimizations.js
@@ -10,6 +10,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// (by inspecting the resulting graph).
(function WasmTypedOptimizationsTest() {
let builder = new WasmModuleBuilder();
+ builder.startRecGroup();
let top = builder.addStruct([makeField(kWasmI32, true)]);
let middle = builder.addStruct([makeField(kWasmI32, true),
makeField(kWasmI64, false)],
@@ -22,6 +23,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
makeField(kWasmI64, false),
makeField(kWasmI64, false)],
middle);
+ builder.endRecGroup();
builder.addFunction("main", makeSig(
[wasmRefType(bottom1), wasmRefType(bottom2)], [kWasmI32]))
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-externalize-internalize.js b/deps/v8/test/mjsunit/wasm/wasm-gc-externalize-internalize.js
index 4d232775dc..b1b2851ac8 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-gc-externalize-internalize.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-externalize-internalize.js
@@ -221,6 +221,7 @@ for (let type of ["struct", "i31", "array"]) {
// Differently to structs and arrays, the i31 value is directly accessible in
// JavaScript. Similarly, a JS smi can be internalized as an i31ref.
-// TODO(7748): Fix i31 interop with disabled pointer compression.
-// assertEquals(12345, instance.exports.i31_externalize(12345));
-// assertEquals([12345, 0], instance.exports.i31_internalize(12345));
+let createHeapNumber = (x) => x + x;
+assertEquals(12345, instance.exports.i31_externalize(12345));
+assertEquals([12345, 0], instance.exports.i31_internalize(12345));
+assertEquals([11, 0], instance.exports.i31_internalize(createHeapNumber(5.5)));
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-inlining.js b/deps/v8/test/mjsunit/wasm/wasm-gc-inlining.js
new file mode 100644
index 0000000000..d8ec0e0f88
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-inlining.js
@@ -0,0 +1,639 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --allow-natives-syntax --turbofan
+// Flags: --no-always-turbofan --no-always-sparkplug --expose-gc
+// Flags: --experimental-wasm-js-inlining
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+function testOptimized(run, fctToOptimize) {
+ fctToOptimize = fctToOptimize ?? run;
+ %PrepareFunctionForOptimization(fctToOptimize);
+ for (let i = 0; i < 10; ++i) {
+ run();
+ }
+ %OptimizeFunctionOnNextCall(fctToOptimize);
+ run();
+ assertOptimized(fctToOptimize);
+}
+
+(function TestInliningStructGet() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ builder.addFunction('createStructNull', makeSig([kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getElementNull', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ builder.addFunction('createStruct',
+ makeSig([kWasmI32], [wasmRefType(kWasmExternRef)]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getElement',
+ makeSig([wasmRefType(kWasmExternRef)], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ // TODO(mliedtke): Consider splitting this loop as the reuse seems to prevent
+ // proper feedback for the second iteration.
+ for (let [create, get] of [
+ [wasm.createStruct, wasm.getElement],
+ [wasm.createStructNull, wasm.getElementNull]]) {
+ let fct = () => {
+ for (let i = 1; i <= 10; ++i) {
+ const struct = create(i);
+ assertEquals(i, get(struct));
+ }
+ };
+ testOptimized(fct);
+
+ // While these cases will all trap on the ref.cast, they cover very
+ // different code paths in extern.internalize.
+ print("Test exceptional cases");
+ const trap = kTrapIllegalCast;
+ print("- test get null");
+ const getNull = () => get(null);
+ testOptimized(() => assertTraps(trap, getNull), getNull);
+ print("- test undefined");
+ const getUndefined = () => get(undefined);
+ testOptimized(() => assertTraps(trap, getUndefined), getUndefined);
+ print("- test Smi");
+ const getSmi = () => get(1);
+ testOptimized(() => assertTraps(trap, getSmi), getSmi);
+ print("- test -0");
+ const getNZero = () => get(-0);
+ testOptimized(() => assertTraps(trap, getNZero), getNZero);
+ print("- test HeapNumber with fractional digits");
+ const getFractional = () => get(0.5);
+ testOptimized(() => assertTraps(trap, getFractional), getFractional);
+ print("- test Smi/HeapNumber too large for i31ref");
+ const getLargeNumber = () => get(0x4000_000);
+ testOptimized(() => assertTraps(trap, getLargeNumber), getLargeNumber);
+
+ print("- test inlining into try block");
+ // TODO(7748): This is currently not supported by inlining yet.
+ const getTry = () => {
+ try {
+ get(null);
+ } catch (e) {
+ assertTrue(e instanceof WebAssembly.RuntimeError);
+ return;
+ }
+ assertUnreachable();
+ };
+ testOptimized(getTry);
+ }
+})();
+
+(function TestInliningStructGetElementTypes() {
+ print(arguments.callee.name);
+ const i64Value = Number.MAX_SAFE_INTEGER;
+ const f64Value = 11.1;
+ const i8Value = 123;
+ const i16Value = 456;
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([
+ makeField(kWasmI64, true),
+ makeField(kWasmF64, true),
+ makeField(kWasmI8, true),
+ makeField(kWasmI16, true),
+ ]);
+
+ builder.addFunction('createStruct', makeSig([], [kWasmExternRef]))
+ .addBody([
+ ...wasmI64Const(i64Value),
+ ...wasmF64Const(f64Value),
+ ...wasmI32Const(i8Value),
+ ...wasmI32Const(i16Value),
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getI64', makeSig([kWasmExternRef], [kWasmI64]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0,
+ ])
+ .exportFunc();
+ builder.addFunction('getF64', makeSig([kWasmExternRef], [kWasmF64]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 1,
+ ])
+ .exportFunc();
+ builder.addFunction('getI8', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ // TODO(7748): Currently struct.get_s / struct.get_u does not get inlined.
+ kGCPrefix, kExprStructGetS, struct, 2,
+ ])
+ .exportFunc();
+ builder.addFunction('getI16', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ // TODO(7748): Currently struct.get_s / struct.get_u does not get inlined.
+ kGCPrefix, kExprStructGetU, struct, 3,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let structVal = wasm.createStruct();
+ print("- getI64");
+ let getI64 =
+ () => assertEquals(BigInt(i64Value), wasm.getI64(structVal));
+ testOptimized(getI64);
+ print("- getF64");
+ let getF64 = () => assertEquals(f64Value, wasm.getF64(structVal));
+ testOptimized(getF64);
+ print("- getI8");
+ let getI8 = () => assertEquals(i8Value, wasm.getI8(structVal));
+ testOptimized(getI8);
+ print("- getI16");
+ let getI16 = () => assertEquals(i16Value, wasm.getI16(structVal));
+ testOptimized(getI16);
+})();
+
+(function TestInliningMultiModule() {
+ print(arguments.callee.name);
+
+ let createModule = (fieldType) => {
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(fieldType, true)]);
+
+ builder.addFunction('createStruct', makeSig([fieldType], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('get', makeSig([kWasmExternRef], [fieldType]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ return instance.exports;
+ };
+
+ let moduleA = createModule(kWasmI32);
+ let moduleB = createModule(kWasmF64);
+ let structA = moduleA.createStruct(123);
+ let structB = moduleB.createStruct(321);
+
+ // Only one of the two calls can be fully inlined. For the other call only the
+ // wrapper is inlined.
+ let multiModule =
+ () => assertEquals(444, moduleA.get(structA) + moduleB.get(structB));
+ testOptimized(multiModule);
+
+ // The struct types are incompatible (but both use type index 0).
+ // One of the two calls gets inlined and the inlined and the non-inlined
+ // function have to keep apart the different wasm modules.
+ let i = 0;
+ let multiModuleTrap =
+ () => ++i % 2 == 0 ? moduleA.get(structB) : moduleB.get(structA);
+ testOptimized(() => assertTraps(kTrapIllegalCast, () => multiModuleTrap()),
+ multiModuleTrap);
+})();
+
+(function TestInliningTrapStackTrace() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ builder.addFunction('createStruct', makeSig([kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getElement', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ const getTrap = () => wasm.getElement(null);
+ const testTrap = () => {
+ try {
+ getTrap();
+ assertUnreachable();
+ } catch(e) {
+ // TODO(7748): The stack trace should always contain the wasm frame, even
+ // if it was inlined. The regex should be:
+ // /illegal cast[\s]+at getElement \(wasm:/
+ // For now we assert that the stack trace isn't fully broken and contains
+ // at least the `getTrap()` call above.
+ assertMatches(/illegal cast[\s]+at [.\s\S]*getTrap/, e.stack);
+ }
+ };
+ testOptimized(testTrap, getTrap);
+})();
+
+(function TestInliningExternExternalize() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([
+ makeField(wasmRefNullType(0), true),
+ makeField(kWasmI32, true),
+ ]);
+
+ builder.addFunction('createStruct',
+ makeSig([kWasmExternRef, kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, struct,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getRef', makeSig([kWasmExternRef], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+ builder.addFunction('getVal', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 1,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let structA = wasm.createStruct(null, 1);
+ let structB = wasm.createStruct(structA, 2);
+ let getRef = () => assertSame(structA, wasm.getRef(structB));
+ testOptimized(getRef);
+ let getRefGetVal = () => assertSame(1, wasm.getVal(wasm.getRef(structB)));
+ testOptimized(getRefGetVal);
+})();
+
+(function TestArrayLen() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI32, true);
+
+ builder.addFunction('createArray', makeSig([kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprArrayNewDefault, array,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('arrayLen', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, array,
+ kGCPrefix, kExprArrayLen,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let testLen = (expected, array) => assertSame(expected, wasm.arrayLen(array));
+ let array0 = wasm.createArray(0);
+ let array42 = wasm.createArray(42);
+ testOptimized(() => testLen(0, array0), testLen);
+ testOptimized(() => testLen(42, array42), testLen);
+ testOptimized(
+ () => assertTraps(kTrapNullDereference, () => testLen(-1, null)), testLen);
+})();
+
+(function TestArrayGet() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI32, true);
+
+ builder.addFunction('createArray',
+ makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayNewFixed, array, 3,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('get', makeSig([kWasmExternRef, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, array,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let wasmArray = wasm.createArray(10, -1, 1234567);
+ let get =
+ (expected, array, index) =>
+ assertEquals(expected, wasm.get(array, index));
+ testOptimized(() => get(10, wasmArray, 0), get);
+ testOptimized(() => get(-1, wasmArray, 1), get);
+ testOptimized(() => get(1234567, wasmArray, 2), get);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds, () => get(-1, wasmArray, -1)),
+ get);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds, () => get(-1, wasmArray, 3)), get);
+ testOptimized(
+ () => assertTraps(kTrapNullDereference, () => get(-1, null)), get);
+})();
+
+(function TestArrayGetPacked() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI8, true);
+
+ builder.addFunction('createArray',
+ makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayNewFixed, array, 3,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getS', makeSig([kWasmExternRef, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, array,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGetS, array,
+ ])
+ .exportFunc();
+
+ builder.addFunction('getU', makeSig([kWasmExternRef, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, array,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGetU, array,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let wasmArray = wasm.createArray(10, -1, -123);
+ {
+ print("- test getS");
+ let getS =
+ (expected, array, index) =>
+ assertEquals(expected, wasm.getS(array, index));
+ testOptimized(() => getS(10, wasmArray, 0), getS);
+ testOptimized(() => getS(-1, wasmArray, 1), getS);
+ testOptimized(() => getS(-123, wasmArray, 2), getS);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds, () => getS(-1, wasmArray, -1)),
+ getS);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds, () => getS(-1, wasmArray, 3)),
+ getS);
+ testOptimized(
+ () => assertTraps(kTrapNullDereference, () => getS(-1, null)), getS);
+ }
+ {
+ print("- test getU");
+ let getU =
+ (expected, array, index) =>
+ assertEquals(expected, wasm.getU(array, index));
+ testOptimized(() => getU(10, wasmArray, 0), getU);
+ testOptimized(() => getU(255, wasmArray, 1), getU);
+ testOptimized(() => getU(133, wasmArray, 2), getU);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds, () => getU(-1, wasmArray, -1)),
+ getU);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds, () => getU(-1, wasmArray, 3)),
+ getU);
+ testOptimized(
+ () => assertTraps(kTrapNullDereference, () => getU(-1, null)), getU);
+ }
+})();
+
+(function TestCastArray() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI32, true);
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ builder.addFunction('createArray', makeSig([], [kWasmExternRef]))
+ .addBody([
+ kGCPrefix, kExprArrayNewFixed, array, 0,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+ builder.addFunction('createStruct', makeSig([], [kWasmExternRef]))
+ .addBody([
+ kExprI32Const, 42,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('castArray', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ // Generic cast to ref.array.
+ kGCPrefix, kExprRefCast, kArrayRefCode,
+ kGCPrefix, kExprArrayLen,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let wasmArray = wasm.createArray();
+ let wasmStruct = wasm.createStruct();
+ let castArray = (value) => wasm.castArray(value);
+ let trap = kTrapIllegalCast;
+ testOptimized(() => assertTraps(trap, () => castArray(null), castArray));
+ testOptimized(() => assertTraps(trap, () => castArray(1), castArray));
+ testOptimized(
+ () => assertTraps(trap, () => castArray(wasmStruct), castArray));
+ testOptimized(() => assertEquals(0, castArray(wasmArray)), castArray);
+})();
+
+(function TestInliningArraySet() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let array = builder.addArray(kWasmI64, true);
+
+ builder.addFunction('createArray',
+ makeSig([kWasmI64, kWasmI64, kWasmI64], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArrayNewFixed, array, 3,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('get', makeSig([kWasmExternRef, kWasmI32], [kWasmI64]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, array,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprArrayGet, array,
+ ])
+ .exportFunc();
+
+ builder.addFunction('set', makeSig([kWasmExternRef, kWasmI32, kWasmI64], []))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, array,
+ kExprLocalGet, 1,
+ kExprLocalGet, 2,
+ kGCPrefix, kExprArraySet, array,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let wasmArray = wasm.createArray(0n, 1n, 2n);
+ let writeAndRead = (array, index, value) => {
+ wasm.set(array, index, value);
+ assertEquals(value, wasm.get(array, index));
+ };
+ testOptimized(() => writeAndRead(wasmArray, 0, 123n), writeAndRead);
+ testOptimized(() => writeAndRead(wasmArray, 1, -123n), writeAndRead);
+ testOptimized(() => writeAndRead(wasmArray, 2, 0n), writeAndRead);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds,
+ () => writeAndRead(wasmArray, 3, 0n)),
+ writeAndRead);
+ testOptimized(
+ () => assertTraps(kTrapArrayOutOfBounds,
+ () => writeAndRead(wasmArray, -1, 0n),
+ writeAndRead));
+ testOptimized(
+ () => assertTraps(kTrapNullDereference, () => writeAndRead(null, 0, 0n),
+ writeAndRead));
+})();
+
+(function TestInliningStructSet() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI64, true)]);
+
+ builder.addFunction('createStruct',
+ makeSig([kWasmI64], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, struct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('get', makeSig([kWasmExternRef], [kWasmI64]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, struct,
+ kGCPrefix, kExprStructGet, struct, 0,
+ ])
+ .exportFunc();
+
+ builder.addFunction('set', makeSig([kWasmExternRef, kWasmI64], []))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, struct,
+ kExprLocalGet, 1,
+ kGCPrefix, kExprStructSet, struct, 0,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ let wasmStruct = wasm.createStruct(0n);
+ let writeAndRead = (struct, value) => {
+ wasm.set(struct, value);
+ assertEquals(value, wasm.get(struct));
+ };
+ testOptimized(() => writeAndRead(wasmStruct, 123n), writeAndRead);
+ testOptimized(() => writeAndRead(wasmStruct, -123n), writeAndRead);
+ testOptimized(() => writeAndRead(wasmStruct, 0n), writeAndRead);
+ testOptimized(
+ () => assertTraps(kTrapNullDereference, () => writeAndRead(null, 0n),
+ writeAndRead));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
index 93cf72d54c..0f59410047 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc --no-wasm-gc-structref-as-dataref
+// Flags: --experimental-wasm-gc
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
@@ -41,6 +41,7 @@ let instance = (() => {
raw_struct: struct,
raw_array: array,
typed_func: sig,
+ i31: kWasmI31Ref,
eq: kWasmEqRef,
func: kWasmFuncRef,
any: kWasmAnyRef,
@@ -85,25 +86,31 @@ assertThrows(
instance.exports.eq_id(instance.exports.struct_producer());
// We can roundtrip an array as eqref.
instance.exports.eq_id(instance.exports.array_producer());
-// We can roundtrip an i31 as eqref.
+// We can roundtrip an i31 as eqref/i31ref.
instance.exports.eq_id(instance.exports.i31_as_eq_producer());
-// We can roundtrip any null as eqref.
+instance.exports.i31_id(instance.exports.i31_as_eq_producer());
+// We can roundtrip any null as any null supertype.
instance.exports.eq_id(instance.exports.struct_null());
instance.exports.eq_id(instance.exports.eq_null());
instance.exports.eq_id(instance.exports.func_null());
+instance.exports.eq_id(instance.exports.any_null());
+instance.exports.any_id(instance.exports.struct_null());
+instance.exports.any_id(instance.exports.eq_null());
+instance.exports.any_id(instance.exports.func_null());
+instance.exports.any_id(instance.exports.any_null());
+instance.exports.i31_id(instance.exports.struct_null());
+instance.exports.i31_id(instance.exports.eq_null());
+instance.exports.i31_id(instance.exports.func_null());
+instance.exports.i31_id(instance.exports.any_null());
+instance.exports.struct_id(instance.exports.struct_null());
+instance.exports.struct_id(instance.exports.eq_null());
+instance.exports.struct_id(instance.exports.func_null());
+instance.exports.struct_id(instance.exports.any_null());
// We cannot roundtrip a func as eqref.
assertThrows(
() => instance.exports.eq_id(instance.exports.func_producer()), TypeError,
'type incompatibility when transforming from/to JS');
-// Anyref is not allowed at the JS interface.
-assertThrows(
- () => instance.exports.any_null(), TypeError,
- 'type incompatibility when transforming from/to JS');
-assertThrows(
- () => instance.exports.any_id(), TypeError,
- 'type incompatibility when transforming from/to JS');
-
// We can roundtrip a typed function.
instance.exports.typed_func_id(instance.exports.func_producer());
// We can roundtrip any null as typed funcion.
@@ -123,24 +130,24 @@ assertThrows(
TypeError,
'type incompatibility when transforming from/to JS');
-// We cannot directly roundtrip structs or arrays.
-// TODO(7748): Switch these tests once we can.
+// We can directly roundtrip structs or arrays.
+instance.exports.raw_struct_id(instance.exports.struct_producer());
+instance.exports.raw_array_id(instance.exports.array_producer());
+
+// We cannot roundtrip an array as struct and vice versa.
assertThrows(
- () => instance.exports.raw_struct_id(instance.exports.struct_producer()),
- TypeError, 'type incompatibility when transforming from/to JS');
+ () => instance.exports.raw_struct_id(instance.exports.array_producer()),
+ TypeError,
+ 'type incompatibility when transforming from/to JS');
assertThrows(
- () => instance.exports.raw_array_id(instance.exports.array_producer()),
- TypeError, 'type incompatibility when transforming from/to JS');
+ () => instance.exports.raw_array_id(instance.exports.struct_producer()),
+ TypeError,
+ 'type incompatibility when transforming from/to JS');
// We can roundtrip an extern.
assertEquals(null, instance.exports.extern_id(instance.exports.extern_null()));
-// The special null types are not allowed on the boundary from/to JS.
+// We can roundtrip null typed as one of the three null types though wasm.
for (const nullType of ["none", "nofunc", "noextern"]) {
- assertThrows(
- () => instance.exports[`${nullType}_null`](),
- TypeError, 'type incompatibility when transforming from/to JS');
- assertThrows(
- () => instance.exports[`${nullType}_id`](),
- TypeError, 'type incompatibility when transforming from/to JS');
+ instance.exports[`${nullType}_id`](instance.exports[`${nullType}_null`]());
}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-source-location.js b/deps/v8/test/mjsunit/wasm/wasm-gc-source-location.js
new file mode 100644
index 0000000000..3638612180
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-source-location.js
@@ -0,0 +1,91 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+function TestStackTrace(testFct, trap, expected) {
+ assertTraps(trap, testFct);
+ try {
+ testFct();
+ assertUnreachable();
+ } catch(e) {
+ let regex = /at [^ ]+ \(wasm[^\[]+\[[0-9+]\]:(0x[0-9a-f]+)\)/;
+ let match = e.stack.match(regex);
+ assertEquals(expected, match[1])
+ }
+}
+
+(function CodeLocationRefCast() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let otherStruct = builder.addStruct([makeField(kWasmI64, true)]);
+
+ builder.addFunction('createOther', makeSig([kWasmI64], [kWasmExternRef]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructNew, otherStruct,
+ kGCPrefix, kExprExternExternalize,
+ ])
+ .exportFunc();
+
+ builder.addFunction('main', makeSig([kWasmExternRef], []))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, kStructRefCode, // abstract cast
+ kGCPrefix, kExprRefCastNull, struct, // type cast
+ kGCPrefix, kExprRefCast, struct, // null check
+ kExprDrop,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+ let other = wasm.createOther(5n);
+
+ TestStackTrace(() => wasm.main(3), kTrapIllegalCast, '0x50');
+ TestStackTrace(() => wasm.main(other), kTrapIllegalCast, '0x53');
+ TestStackTrace(() => wasm.main(null), kTrapIllegalCast, '0x56');
+})();
+
+(function CodeLocationArrayLen() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('arrayLen', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, kArrayRefCode,
+ kGCPrefix, kExprArrayLen,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ TestStackTrace(() => wasm.arrayLen(null), kTrapNullDereference, '0x2e');
+})();
+
+(function CodeLocationStructGet() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ builder.addFunction('structGet', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCastNull, struct,
+ kGCPrefix, kExprStructGet, struct, 0,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ TestStackTrace(() => wasm.structGet(null), kTrapNullDereference, '0x35');
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-js-inlining-code-reloc.js b/deps/v8/test/mjsunit/wasm/wasm-js-inlining-code-reloc.js
new file mode 100644
index 0000000000..a8e87cfdb1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-js-inlining-code-reloc.js
@@ -0,0 +1,54 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --allow-natives-syntax --turbofan
+// Flags: --no-always-turbofan --no-always-sparkplug --expose-gc
+// Flags: --stress-compaction --experimental-wasm-js-inlining
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestStressCompactionWasmStubCallRelocation() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+
+ builder.addFunction('getElementNull', makeSig([kWasmExternRef], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprExternInternalize,
+ kGCPrefix, kExprRefCast, struct,
+ kGCPrefix, kExprStructGet, struct, 0])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasm = instance.exports;
+
+ const trap = kTrapIllegalCast;
+ // getElementNull calls the TrapIllegalCast built-in.
+ const getNull = () => wasm.getElementNull(null);
+
+ // Build feedback vector.
+ %PrepareFunctionForOptimization(getNull);
+ for (let i = 0; i < 10; ++i) {
+ assertTraps(trap, getNull)
+ }
+ // Optimize function causing the wasm function with the built-in to be inlined
+ // into the JavaScript TurboFan code. The built-in is encoded as a jump with
+ // a 32 bit offset relative to the pc (on some platforms).
+ %OptimizeFunctionOnNextCall(getNull);
+ assertTraps(trap, getNull)
+ assertOptimized(getNull);
+ // Force gc(). It is necessary to call gc() multiple times and the flag
+ // --stress-compaction has to be set as well to cause a relocation of the JS
+ // function.
+ gc();
+ gc();
+ gc();
+ // The optimized function is not GCed.
+ assertOptimized(getNull);
+ // Calling getNull() again calls the relative built-in. If it still succeeds,
+ // the jump to the built-in was relocated successfully.
+ assertTraps(trap, getNull)
+ assertOptimized(getNull);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 9670f72031..5a19284c70 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -80,6 +80,7 @@ let kWasmFunctionTypeForm = 0x60;
let kWasmStructTypeForm = 0x5f;
let kWasmArrayTypeForm = 0x5e;
let kWasmSubtypeForm = 0x50;
+let kWasmSubtypeFinalForm = 0x4e;
let kWasmRecursiveTypeGroupForm = 0x4f;
let kNoSuperType = 0xFFFFFFFF;
@@ -195,6 +196,7 @@ let kSig_v_i = makeSig([kWasmI32], []);
let kSig_v_ii = makeSig([kWasmI32, kWasmI32], []);
let kSig_v_iii = makeSig([kWasmI32, kWasmI32, kWasmI32], []);
let kSig_v_l = makeSig([kWasmI64], []);
+let kSig_v_li = makeSig([kWasmI64, kWasmI32], []);
let kSig_v_d = makeSig([kWasmF64], []);
let kSig_v_dd = makeSig([kWasmF64, kWasmF64], []);
let kSig_v_ddi = makeSig([kWasmF64, kWasmF64, kWasmI32], []);
@@ -498,7 +500,6 @@ let kExprArrayGet = 0x13;
let kExprArrayGetS = 0x14;
let kExprArrayGetU = 0x15;
let kExprArraySet = 0x16;
-let kExprArrayLenDeprecated = 0x17;
let kExprArrayCopy = 0x18;
let kExprArrayLen = 0x19;
let kExprArrayNewFixed = 0x1a;
@@ -506,6 +507,7 @@ let kExprArrayNew = 0x1b;
let kExprArrayNewDefault = 0x1c;
let kExprArrayNewData = 0x1d;
let kExprArrayNewElem = 0x1f;
+let kExprArrayFill = 0x0f;
let kExprI31New = 0x20;
let kExprI31GetS = 0x21;
let kExprI31GetU = 0x22;
@@ -515,8 +517,11 @@ let kExprRefTestDeprecated = 0x44;
let kExprRefCast = 0x41;
let kExprRefCastNull = 0x49;
let kExprRefCastDeprecated = 0x45;
-let kExprBrOnCast = 0x46;
-let kExprBrOnCastFail = 0x47;
+let kExprBrOnCast = 0x42;
+let kExprBrOnCastNull = 0x4a;
+let kExprBrOnCastDeprecated = 0x46;
+let kExprBrOnCastFail = 0x43;
+let kExprBrOnCastFailNull = 0x4b;
let kExprRefCastNop = 0x4c;
let kExprRefIsData = 0x51;
let kExprRefIsI31 = 0x52;
@@ -547,6 +552,7 @@ let kExprStringNewLossyUtf8 = 0x8b;
let kExprStringNewWtf8 = 0x8c;
let kExprStringEncodeLossyUtf8 = 0x8d;
let kExprStringEncodeWtf8 = 0x8e;
+let kExprStringNewUtf8Try = 0x8f;
let kExprStringAsWtf8 = 0x90;
let kExprStringViewWtf8Advance = 0x91;
let kExprStringViewWtf8EncodeUtf8 = 0x92;
@@ -563,6 +569,9 @@ let kExprStringViewIterNext = 0xa1
let kExprStringViewIterAdvance = 0xa2;
let kExprStringViewIterRewind = 0xa3
let kExprStringViewIterSlice = 0xa4;
+let kExprStringCompare = 0xa8;
+let kExprStringFromCodePoint = 0xa9;
+let kExprStringHash = 0xaa;
let kExprStringNewUtf8Array = 0xb0;
let kExprStringNewWtf16Array = 0xb1;
let kExprStringEncodeUtf8Array = 0xb2;
@@ -571,6 +580,7 @@ let kExprStringNewLossyUtf8Array = 0xb4;
let kExprStringNewWtf8Array = 0xb5;
let kExprStringEncodeLossyUtf8Array = 0xb6;
let kExprStringEncodeWtf8Array = 0xb7;
+let kExprStringNewUtf8ArrayTry = 0xb8;
// Numeric opcodes.
let kExprI32SConvertSatF32 = 0x00;
@@ -1121,9 +1131,8 @@ class WasmFunctionBuilder {
addBody(body) {
checkExpr(body);
- this.body = body.slice();
- // Automatically add the end for the function block to the body.
- this.body.push(kExprEnd);
+ // Store a copy of the body, and automatically add the end opcode.
+ this.body = body.concat([kExprEnd]);
return this;
}
@@ -1205,21 +1214,23 @@ function makeField(type, mutability) {
}
class WasmStruct {
- constructor(fields, supertype_idx) {
+ constructor(fields, is_final, supertype_idx) {
if (!Array.isArray(fields)) {
throw new Error('struct fields must be an array');
}
this.fields = fields;
this.type_form = kWasmStructTypeForm;
+ this.is_final = is_final;
this.supertype = supertype_idx;
}
}
class WasmArray {
- constructor(type, mutability, supertype_idx) {
+ constructor(type, mutability, is_final, supertype_idx) {
this.type = type;
this.mutability = mutability;
this.type_form = kWasmArrayTypeForm;
+ this.is_final = is_final;
this.supertype = supertype_idx;
}
}
@@ -1273,16 +1284,11 @@ class WasmModuleBuilder {
this.element_segments = [];
this.data_segments = [];
this.explicit = [];
+ this.rec_groups = [];
this.num_imported_funcs = 0;
this.num_imported_globals = 0;
this.num_imported_tables = 0;
this.num_imported_tags = 0;
- // If a wasm-gc type is detected, all types are put by default into a single
- // recursive group. This field overrides this behavior and puts each type in
- // a separate rec. group instead.
- // TODO(7748): Support more flexible rec. groups.
- this.singleton_rec_groups = false;
- this.early_data_count_section = false;
return this;
}
@@ -1341,13 +1347,14 @@ class WasmModuleBuilder {
this.explicit.push(this.createCustomSection(name, bytes));
}
- // TODO(7748): Support recursive groups.
-
- addType(type, supertype_idx = kNoSuperType) {
+ // We use {is_final = true} so that the MVP syntax is generated for
+ // signatures.
+ addType(type, supertype_idx = kNoSuperType, is_final = true) {
var pl = type.params.length; // should have params
var rl = type.results.length; // should have results
- type.supertype = supertype_idx;
- this.types.push(type);
+ var type_copy = {params: type.params, results: type.results,
+ is_final: is_final, supertype: supertype_idx};
+ this.types.push(type_copy);
return this.types.length - 1;
}
@@ -1356,13 +1363,13 @@ class WasmModuleBuilder {
return this.stringrefs.length - 1;
}
- addStruct(fields, supertype_idx = kNoSuperType) {
- this.types.push(new WasmStruct(fields, supertype_idx));
+ addStruct(fields, supertype_idx = kNoSuperType, is_final = false) {
+ this.types.push(new WasmStruct(fields, is_final, supertype_idx));
return this.types.length - 1;
}
- addArray(type, mutability, supertype_idx = kNoSuperType) {
- this.types.push(new WasmArray(type, mutability, supertype_idx));
+ addArray(type, mutability, supertype_idx = kNoSuperType, is_final = false) {
+ this.types.push(new WasmArray(type, mutability, is_final, supertype_idx));
return this.types.length - 1;
}
@@ -1609,12 +1616,19 @@ class WasmModuleBuilder {
return this;
}
- setSingletonRecGroups() {
- this.singleton_rec_groups = true;
+ startRecGroup() {
+ this.rec_groups.push({start: this.types.length, size: 0});
}
- setEarlyDataCountSection() {
- this.early_data_count_section = true;
+ endRecGroup() {
+ if (this.rec_groups.length == 0) {
+ throw new Error("Did not start a recursive group before ending one")
+ }
+ let last_element = this.rec_groups[this.rec_groups.length - 1]
+ if (last_element.size != 0) {
+ throw new Error("Did not start a recursive group before ending one")
+ }
+ last_element.size = this.types.length - last_element.start;
}
setName(name) {
@@ -1633,21 +1647,31 @@ class WasmModuleBuilder {
if (wasm.types.length > 0) {
if (debug) print('emitting types @ ' + binary.length);
binary.emit_section(kTypeSectionCode, section => {
- // If any type is a wasm-gc type, wrap everything in a recursive group.
- // TODO(7748): Support more flexible rec. groups.
- if (!this.singleton_rec_groups &&
- wasm.types.findIndex(type => type instanceof WasmStruct ||
- type instanceof WasmArray) >= 0) {
- section.emit_u32v(1);
- section.emit_u8(kWasmRecursiveTypeGroupForm);
+ let length_with_groups = wasm.types.length;
+ for (let group of wasm.rec_groups) {
+ length_with_groups -= group.size - 1;
}
- section.emit_u32v(wasm.types.length);
+ section.emit_u32v(length_with_groups);
+
+ let rec_group_index = 0;
- for (let type of wasm.types) {
+ for (let i = 0; i < wasm.types.length; i++) {
+ if (rec_group_index < wasm.rec_groups.length &&
+ wasm.rec_groups[rec_group_index].start == i) {
+ section.emit_u8(kWasmRecursiveTypeGroupForm);
+ section.emit_u32v(wasm.rec_groups[rec_group_index].size);
+ rec_group_index++;
+ }
+
+ let type = wasm.types[i];
if (type.supertype != kNoSuperType) {
- section.emit_u8(kWasmSubtypeForm);
+ section.emit_u8(type.is_final ? kWasmSubtypeFinalForm
+ : kWasmSubtypeForm);
section.emit_u8(1); // supertype count
section.emit_u32v(type.supertype);
+ } else if (!type.is_final) {
+ section.emit_u8(kWasmSubtypeForm);
+ section.emit_u8(0); // no supertypes
}
if (type instanceof WasmStruct) {
section.emit_u8(kWasmStructTypeForm);
@@ -1727,14 +1751,6 @@ class WasmModuleBuilder {
});
}
- // If there are any passive data segments, add the DataCount section.
- if (this.early_data_count_section &&
- wasm.data_segments.some(seg => !seg.is_active)) {
- binary.emit_section(kDataCountSectionCode, section => {
- section.emit_u32v(wasm.data_segments.length);
- });
- }
-
// Add table section
if (wasm.tables.length > 0) {
if (debug) print('emitting tables @ ' + binary.length);
@@ -1900,8 +1916,7 @@ class WasmModuleBuilder {
}
// If there are any passive data segments, add the DataCount section.
- if (!this.early_data_count_section &&
- wasm.data_segments.some(seg => !seg.is_active)) {
+ if (wasm.data_segments.some(seg => !seg.is_active)) {
binary.emit_section(kDataCountSectionCode, section => {
section.emit_u32v(wasm.data_segments.length);
});
@@ -1948,18 +1963,24 @@ class WasmModuleBuilder {
let section_length = 0;
binary.emit_section(kCodeSectionCode, section => {
section.emit_u32v(wasm.functions.length);
- let header = new Binary;
+ let header;
for (let func of wasm.functions) {
- header.reset();
- // Function body length will be patched later.
- let local_decls = func.locals || [];
- header.emit_u32v(local_decls.length);
- for (let decl of local_decls) {
- header.emit_u32v(decl.count);
- header.emit_type(decl.type);
+ if (func.locals.length == 0) {
+ // Fast path for functions without locals.
+ section.emit_u32v(func.body.length + 1);
+ section.emit_u8(0); // 0 locals.
+ } else {
+ // Build the locals declarations in separate buffer first.
+ if (!header) header = new Binary;
+ header.reset();
+ header.emit_u32v(func.locals.length);
+ for (let decl of func.locals) {
+ header.emit_u32v(decl.count);
+ header.emit_type(decl.type);
+ }
+ section.emit_u32v(header.length + func.body.length);
+ section.emit_bytes(header.trunc_buffer());
}
- section.emit_u32v(header.length + func.body.length);
- section.emit_bytes(header.trunc_buffer());
// Set to section offset for now, will update.
func.body_offset = section.length;
section.emit_bytes(func.body);
@@ -2095,6 +2116,28 @@ function wasmSignedLeb(val, max_len = 5) {
'Leb value <' + val + '> exceeds maximum length of ' + max_len);
}
+function wasmSignedLeb64(val, max_len) {
+ if (typeof val != "bigint") {
+ if (val < Math.pow(2, 31)) {
+ return wasmSignedLeb(val, max_len);
+ }
+ val = BigInt(val);
+ }
+ let res = [];
+ for (let i = 0; i < max_len; ++i) {
+ let v = val & 0x7fn;
+ // If {v} sign-extended from 7 to 32 bits is equal to val, we are done.
+ if (((v << 25n) >> 25n) == val) {
+ res.push(Number(v));
+ return res;
+ }
+ res.push(Number(v) | 0x80);
+ val = val >> 7n;
+ }
+ throw new Error(
+ 'Leb value <' + val + '> exceeds maximum length of ' + max_len);
+}
+
function wasmUnsignedLeb(val, max_len = 5) {
let res = [];
for (let i = 0; i < max_len; ++i) {
@@ -2117,7 +2160,7 @@ function wasmI32Const(val) {
// Note: Since {val} is a JS number, the generated constant only has 53 bits of
// precision.
function wasmI64Const(val) {
- return [kExprI64Const, ...wasmSignedLeb(val, 10)];
+ return [kExprI64Const, ...wasmSignedLeb64(val, 10)];
}
function wasmF32Const(f) {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-wrapper-inlining.js b/deps/v8/test/mjsunit/wasm/wasm-wrapper-inlining.js
new file mode 100644
index 0000000000..46f1c35b41
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-wrapper-inlining.js
@@ -0,0 +1,55 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbofan --no-always-turbofan
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+function testOptimized(fct) {
+ %PrepareFunctionForOptimization(fct);
+ for (let i = 0; i < 10; ++i) {
+ fct();
+ }
+ %OptimizeFunctionOnNextCall(fct);
+ fct();
+ assertOptimized(fct);
+}
+
+(function TestWrapperInlining() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction('i32Add', makeSig([kWasmI32, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprI32Add,
+ ])
+ .exportFunc();
+
+ builder.addFunction('i32Mul', makeSig([kWasmI32, kWasmI32], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprI32Mul,
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate({});
+ let wasmFct = instance.exports.i32Add;
+ let fct = () => wasmFct(3, 5);
+
+ testOptimized(fct);
+ // Replacing the wasm function will cause a deopt.
+ wasmFct = instance.exports.i32Mul;
+ assertEquals(15, fct());
+ assertUnoptimized(fct);
+
+ // Running it again multiple times will optimize the function again.
+ testOptimized(fct);
+ // Switching back to the previous wasm function will not cause a deopt.
+ wasmFct = instance.exports.i32Add;
+ assertEquals(8, fct());
+ assertOptimized(fct);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wrapper-compilation.js b/deps/v8/test/mjsunit/wasm/wrapper-compilation.js
new file mode 100644
index 0000000000..b2083932f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wrapper-compilation.js
@@ -0,0 +1,33 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --allow-natives-syntax --dump-counters
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+function compileAdd(val) {
+ var builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let sig = makeSig([kWasmI32], [wasmRefType(struct)])
+ builder.addFunction(`fct`, sig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprI32Const, val,
+ kExprI32Add,
+ kGCPrefix, kExprStructNew, struct,
+ ])
+ .exportFunc();
+ return builder.instantiate();
+}
+
+assertEquals(0, %WasmCompiledExportWrappersCount());
+let a = compileAdd(1);
+a.exports.fct(1);
+assertEquals(1, %WasmCompiledExportWrappersCount());
+let b = compileAdd(2);
+b.exports.fct(1);
+assertEquals(1, %WasmCompiledExportWrappersCount());
+let c = compileAdd(2);
+c.exports.fct(1);
+assertEquals(1, %WasmCompiledExportWrappersCount());
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-array-buffer.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-array-buffer.js
deleted file mode 100644
index ddf37d58c8..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-array-buffer.js
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --harmony-rab-gsab --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-
-(function TestSharedArrayBuffer() {
- function createObjects() {
- const growableArrayBuffer = new SharedArrayBuffer(5, { maxByteLength: 10 });
- globalThis.growableArrayBuffer = growableArrayBuffer;
- const array1 = new Uint8Array(growableArrayBuffer);
- for (let i = 0; i < 5; i++) {
- array1[i] = i;
- }
-
- const arrayBuffer = new SharedArrayBuffer(5);
- globalThis.arrayBuffer = arrayBuffer;
- const array2 = new Uint8Array(arrayBuffer);
- for (let i = 0; i < 5; i++) {
- array2[i] = i;
- }
- }
- const { growableArrayBuffer, arrayBuffer } = takeAndUseWebSnapshot(createObjects, ['growableArrayBuffer', 'arrayBuffer']);
- assertEquals(5, growableArrayBuffer.byteLength);
- assertEquals(10, growableArrayBuffer.maxByteLength);
- assertTrue(growableArrayBuffer.growable);
- const array1 = new Uint8Array(growableArrayBuffer);
- for (let i = 0; i < 5; i++) {
- assertEquals(array1[i], i);
- }
-
- assertEquals(arrayBuffer.byteLength, 5);
- assertEquals(arrayBuffer.maxByteLength, 5);
- assertFalse(arrayBuffer.growable, false);
- const array2 = new Uint8Array(arrayBuffer);
- for (let i = 0; i < 5; i++) {
- assertEquals(array2[i], i);
- }
-})();
-
-(function TestArrayBuffer() {
- function createObjects() {
- const resizableArrayBuffer = new ArrayBuffer(5, {maxByteLength: 10});
- globalThis.resizableArrayBuffer = resizableArrayBuffer;
- const array1 = new Uint8Array(resizableArrayBuffer);
- for (let i = 0; i < 5; i++) {
- array1[i] = i;
- }
-
- const arrayBuffer = new ArrayBuffer(5);
- globalThis.arrayBuffer = arrayBuffer;
- const array2 = new Uint8Array(arrayBuffer);
- for (let i = 0; i < 5; i++) {
- array2[i] = i;
- }
-
- const detachedArrayBuffer = new ArrayBuffer(5);
- %ArrayBufferDetach(detachedArrayBuffer);
- globalThis.detachedArrayBuffer = detachedArrayBuffer;
- }
- const { resizableArrayBuffer, arrayBuffer, detachedArrayBuffer } = takeAndUseWebSnapshot(createObjects, ['resizableArrayBuffer', 'arrayBuffer', 'detachedArrayBuffer']);
- assertEquals(5, resizableArrayBuffer.byteLength);
- assertEquals(10, resizableArrayBuffer.maxByteLength);
- assertTrue(resizableArrayBuffer.resizable)
- const array1 = new Uint8Array(resizableArrayBuffer);
- for (let i = 0; i < 5; i++) {
- assertEquals(array1[i], i);
- }
-
- assertEquals(5, arrayBuffer.byteLength);
- assertEquals(5, arrayBuffer.maxByteLength);
- assertFalse(arrayBuffer.resizable)
- const array2 = new Uint8Array(arrayBuffer);
- for (let i = 0; i < 5; i++) {
- assertEquals(array2[i], i);
- }
-
- assertEquals(0, detachedArrayBuffer.byteLength);
- assertEquals(0, detachedArrayBuffer.maxByteLength);
-})()
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-array.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-array.js
deleted file mode 100644
index d1d021ab8a..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-array.js
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestArray() {
- function createObjects() {
- globalThis.foo = {
- array: [5, 6, 7]
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([5, 6, 7], foo.array);
-})();
-
-(function TestPackedDoubleElementsArray() {
- function createObjects() {
- globalThis.foo = [1.2, 2.3];
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1.2, 2.3], foo);
-})();
-
-(function TestArrayContainingDoubleAndSmi() {
- function createObjects() {
- globalThis.foo = [1.2, 1];
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1.2, 1], foo);
-})();
-
-(function TestArrayContainingDoubleAndObject() {
- function createObjects() {
- globalThis.foo = [1.2, {'key': 'value'}];
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1.2, {'key': 'value'}], foo);
-})();
-
-(function TestEmptyArray() {
- function createObjects() {
- globalThis.foo = {
- array: []
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(0, foo.array.length);
- assertEquals([], foo.array);
-})();
-
-(function TestArrayContainingArray() {
- function createObjects() {
- globalThis.foo = {
- array: [[2, 3], [4, 5]]
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([[2, 3], [4, 5]], foo.array);
-})();
-
-(function TestArrayContainingObject() {
- function createObjects() {
- globalThis.foo = {
- array: [{ a: 1 }, { b: 2 }]
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(1, foo.array[0].a);
- assertEquals(2, foo.array[1].b);
-})();
-
-(function TestArrayContainingFunction() {
- function createObjects() {
- globalThis.foo = {
- array: [function () { return 5; }]
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(5, foo.array[0]());
-})();
-
-(function TestInPlaceStringsInArray() {
- function createObjects() {
- globalThis.foo = {
- array: ['foo', 'bar', 'baz']
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- // We cannot test that the strings are really in-place; that's covered by
- // cctests.
- assertEquals('foobarbaz', foo.array.join(''));
-})();
-
-(function TestRepeatedInPlaceStringsInArray() {
- function createObjects() {
- globalThis.foo = {
- array: ['foo', 'bar', 'foo']
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- // We cannot test that the strings are really in-place; that's covered by
- // cctests.
- assertEquals('foobarfoo', foo.array.join(''));
-})();
-
-(function TestArrayWithSlackElements() {
- function createObjects() {
- globalThis.foo = {
- array: [],
- doubleArray: [],
- objectArray: []
- };
- for (let i = 0; i < 100; ++i) {
- globalThis.foo.array.push(i);
- globalThis.foo.doubleArray.push(i + 0.1);
- globalThis.foo.objectArray.push({});
- }
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(100, foo.array.length);
- assertEquals(100, foo.doubleArray.length);
- assertEquals(100, foo.objectArray.length);
- for (let i = 0; i < 100; ++i){
- assertEquals(i, foo.array[i]);
- assertEquals(i + 0.1, foo.doubleArray[i]);
- assertEquals({}, foo.objectArray[i]);
- }
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-base.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-base.js
deleted file mode 100644
index 1a82b8f134..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-base.js
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestMinimal() {
- function createObjects() {
- globalThis.foo = {
- str: 'hello',
- n: 42,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('hello', foo.str);
- assertEquals(42, foo.n);
-})();
-
-(function TestEmptyObject() {
- function createObjects() {
- globalThis.foo = {};
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([], Object.keys(foo));
-})();
-
-(function TestNumbers() {
- function createObjects() {
- globalThis.foo = {
- a: 6,
- b: -7,
- c: 7.3,
- d: NaN,
- e: Number.POSITIVE_INFINITY,
- f: Number.NEGATIVE_INFINITY,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(6, foo.a);
- assertEquals(-7, foo.b);
- assertEquals(7.3, foo.c);
- assertEquals(NaN, foo.d);
- assertEquals(Number.POSITIVE_INFINITY, foo.e);
- assertEquals(Number.NEGATIVE_INFINITY, foo.f);
-})();
-
-(function TestTopLevelNumbers() {
- function createObjects() {
- globalThis.a = 6;
- globalThis.b = -7;
- }
- const { a, b } = takeAndUseWebSnapshot(createObjects, ['a', 'b']);
- assertEquals(6, a);
- assertEquals(-7, b);
-})();
-
-(function TestOddballs() {
- function createObjects() {
- globalThis.foo = {
- a: true,
- b: false,
- c: null,
- d: undefined,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(foo.a);
- assertFalse(foo.b);
- assertEquals(null, foo.c);
- assertEquals(undefined, foo.d);
-})();
-
-(function TestTopLevelOddballs() {
- function createObjects() {
- globalThis.a = true;
- globalThis.b = false;
- }
- const { a, b } = takeAndUseWebSnapshot(createObjects, ['a', 'b']);
- assertTrue(a);
- assertFalse(b);
-})();
-
-(function TestStringWithNull() {
- function createObjects() {
- globalThis.s = 'l\0l';
- }
- const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
- assertEquals(108, s.charCodeAt(0));
- assertEquals(0, s.charCodeAt(1));
- assertEquals(108, s.charCodeAt(2));
-})();
-
-(function TestTwoByteString() {
- function createObjects() {
- globalThis.s = '\u{1F600}';
- }
- const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
- assertEquals('\u{1F600}', s);
-})();
-
-(function TestTwoByteStringWithNull() {
- function createObjects() {
- globalThis.s = 'l\0l\u{1F600}';
- }
- const { s } = takeAndUseWebSnapshot(createObjects, ['s']);
- assertEquals(108, s.charCodeAt(0));
- assertEquals(0, s.charCodeAt(1));
- assertEquals(108, s.charCodeAt(2));
-})();
-
-(function TestRegExp() {
- function createObjects() {
- globalThis.foo = {
- re: /ab+c/gi,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('/ab+c/gi', foo.re.toString());
- assertTrue(foo.re.test('aBc'));
- assertFalse(foo.re.test('ac'));
-})();
-
-(function TestRegExpNoFlags() {
- function createObjects() {
- globalThis.foo = {
- re: /ab+c/,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('/ab+c/', foo.re.toString());
- assertTrue(foo.re.test('abc'));
- assertFalse(foo.re.test('ac'));
-})();
-
-(function TestTopLevelRegExp() {
- function createObjects() {
- globalThis.re = /ab+c/gi;
- }
- const { re } = takeAndUseWebSnapshot(createObjects, ['re']);
- assertEquals('/ab+c/gi', re.toString());
- assertTrue(re.test('aBc'));
- assertFalse(re.test('ac'));
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-bigint.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-bigint.js
deleted file mode 100644
index 3ad17da2f0..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-bigint.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestBigInt() {
- function createObjects() {
- const b = 100n;
- const c = 2n ** 222n;
- globalThis.foo = { bar: b, bar1: c };
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(100n, foo.bar);
- assertEquals(2n ** 222n , foo.bar1)
-})();
-
-(function TestBigIntInArray() {
- function createObjects() {
- const b = 100n;
- const c = 2n ** 222n;
- globalThis.foo = [b, c];
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([100n, 2n ** 222n], foo)
-})();
-
-(function TestBigIntInFunctionContext() {
- function createObjects() {
- globalThis.foo = {
- key: (function () {
- const b = 100n;
- const c = 2n ** 222n;
- function inner() {
- return [b, c];
- }
- return inner;
- })()
- };
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([100n, 2n**222n], foo.key());
-})();
-
-(function TestBigIntInFunctionContextWithParentContext() {
- function createObjects() {
- globalThis.foo = {
- key: (function () {
- const b = 100n;
- function inner() {
- const c = 2n ** 222n;
- function innerinner() {
- return [b, c]
- }
- return innerinner
- }
- return inner();
- })()
- };
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([100n, 2n**222n], foo.key());
-})();
-
-(function TestBigIntInTopLevelFunctionWithContext() {
- function createObjects() {
- globalThis.foo = (function () {
- const b = 100n;
- const c = 2n ** 222n;
- function inner() { return [b, c]; }
- return inner;
- })();
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([100n, 2n**222n], foo());
-})();
-
-
-(function TestBigIntInClassStaticProperty() {
- function createObjects() {
- globalThis.foo = class Foo {
- static b = 100n;
- static c = 2n ** 222n;
- };
- }
- const { foo: Foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([100n, 2n**222n], [Foo.b, Foo.c]);
-})();
-
-(function TestBigIntInClassWithConstructor() {
- function createObjects() {
- globalThis.foo = class Foo {
- constructor() {
- this.b = 100n;
- this.c = 2n ** 222n;
- }
- };
- }
- const { foo: Foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- const foo = new Foo()
- assertEquals([100n, 2n**222n], [foo.b, foo.c]);
-})();
-
-(async function TestBigIntInClassWithMethods() {
- function createObjects() {
- globalThis.foo = class Foo {
- b() {
- return 100n;
- }
- async c() {
- return 2n ** 222n;
- }
- };
- }
- const { foo: Foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- const foo = new Foo()
- assertEquals([100n, 2n**222n], [foo.b(), await foo.c()]);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-builtin.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-builtin.js
deleted file mode 100644
index 12fb8c8d45..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-builtin.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestBuiltin() {
- function createObjects() {
- globalThis.obj1 = {'a': Error};
- globalThis.obj2 = {'b': Error.prototype};
- }
- const realm = Realm.create();
- const {obj1, obj2} = takeAndUseWebSnapshot(
- createObjects, ['obj1', 'obj2'], realm);
- assertSame(Realm.eval(realm, 'Error'), obj1.a);
- assertSame(Realm.eval(realm, 'Error.prototype'), obj2.b);
-})();
-
-(function TestInheritFromBuiltin() {
- function createObjects() {
- function inherit(subclass, superclass) {
- function middle() {}
- middle.prototype = superclass.prototype;
- subclass.prototype = new middle();
- subclass.prototype.constructor = subclass;
- };
- function MyError() {}
- inherit(MyError, Error);
- globalThis.MyError = MyError;
- }
- const realm = Realm.create();
- const {MyError} = takeAndUseWebSnapshot(createObjects, ['MyError'], realm);
- const obj = new MyError();
- assertSame(Realm.eval(realm, 'Error.prototype'), obj.__proto__.__proto__);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-circular-reference.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-circular-reference.js
deleted file mode 100644
index a120d7afb5..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-circular-reference.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestCircularObjectReference() {
- function createObjects() {
- globalThis.foo = {
- bar: {}
- };
- globalThis.foo.bar.circular = globalThis.foo;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertSame(foo, foo.bar.circular);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-class.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-class.js
deleted file mode 100644
index 5b3ecc08ca..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-class.js
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestEmptyClass() {
- function createObjects() {
- globalThis.Foo = class Foo { };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const x = new Foo();
-})();
-
-(function TestClassWithConstructor() {
- function createObjects() {
- globalThis.Foo = class {
- constructor() {
- this.n = 42;
- }
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const x = new Foo(2);
- assertEquals(42, x.n);
-})();
-
-(function TestClassWithMethods() {
- function createObjects() {
- globalThis.Foo = class {
- f() { return 7; };
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const x = new Foo();
- assertEquals(7, x.f());
-})();
-
-(function TestDerivedClass() {
- function createObjects() {
- globalThis.Base = class { f() { return 8; }};
- globalThis.Foo = class extends Base { };
- }
- const realm = Realm.create();
- const { Foo, Base } = takeAndUseWebSnapshot(createObjects, ['Foo', 'Base'], realm);
- assertEquals(Base.prototype, Foo.prototype.__proto__);
- assertEquals(Base, Foo.__proto__);
- const x = new Foo();
- assertEquals(8, x.f());
-})();
-
-(function TestDerivedClassWithConstructor() {
- function createObjects() {
- globalThis.Base = class { constructor() {this.m = 43;}};
- globalThis.Foo = class extends Base{
- constructor() {
- super();
- this.n = 42;
- }
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const x = new Foo();
- assertEquals(42, x.n);
- assertEquals(43, x.m);
-})();
-
-(async function TestClassWithAsyncMethods() {
- function createObjects() {
- globalThis.Foo = class {
- async g() { return 6; };
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const x = new Foo();
- assertEquals(6, await x.g());
-})();
-
-(function TestClassWithProperties() {
- function createObjects() {
- globalThis.Foo = class Foo { };
- Foo.key1 = 'value1';
- Foo.key2 = 1;
- Foo.key3 = 2.2;
- Foo.key4 = function key4() {
- return 'key4';
- }
- Foo.key5 = [1, 2];
- Foo.key6 = {'key':'value'}
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- assertEquals('value1', Foo.key1);
- assertEquals(1, Foo.key2);
- assertEquals(2.2, Foo.key3);
- assertEquals('key4', Foo.key4());
- assertEquals([1, 2], Foo.key5);
- assertEquals({ 'key': 'value' }, Foo.key6 );
-})();
-
-(function TestClassWithStaticProperties() {
- function createObjects() {
- globalThis.Foo = class Foo {
- static key1 = 'value1';
- static key2 = 1;
- static key3 = 2.2;
- static key4 = [1, 2];
- static key5 = {'key':'value'}
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- assertEquals('value1', Foo.key1);
- assertEquals(1, Foo.key2);
- assertEquals(2.2, Foo.key3);
- assertEquals([1, 2], Foo.key4);
- assertEquals({'key': 'value'}, Foo.key5);
-})();
-
-(function TestClassWithStaticMethods() {
- function createObjects() {
- globalThis.Foo = class Foo {
- static foo() {
- return 'foo'
- }
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- assertEquals('foo', Foo.foo());
-})();
-
-(async function TestClassWithStaticAsyncMethods() {
- function createObjects() {
- globalThis.Foo = class Foo {
- static async foo() {
- await Promise.resolve(1);
- return 'foo'
- }
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- assertEquals('foo', await Foo.foo());
-})();
-
-(function TestClassWithStaticGeneratorMethods() {
- function createObjects() {
- globalThis.Foo = class Foo {
- static *foo() {
- yield 'foo1'
- return 'foo2'
- }
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const foo = Foo.foo()
- assertEquals('foo1', foo.next().value);
- assertEquals('foo2', foo.next().value);
- assertEquals(true, foo.next().done);
-})();
-
-(async function TestClassWithStaticAsyncGeneratorMethods() {
- function createObjects() {
- globalThis.Foo = class Foo {
- static async *foo() {
- yield 'foo1'
- return 'foo2'
- }
- };
- }
- const { Foo } = takeAndUseWebSnapshot(createObjects, ['Foo']);
- const foo = Foo.foo()
- assertEquals('foo1', (await foo.next()).value);
- assertEquals('foo2', (await foo.next()).value);
- assertEquals(true, (await foo.next()).done);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-data-view.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-data-view.js
deleted file mode 100644
index 8e2d86134e..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-data-view.js
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --harmony-rab-gsab --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestDataView() {
- function createObjects() {
- const buffer = new ArrayBuffer(10);
- const array1 = new DataView(buffer, 0, 5);
- const array2 = new DataView(buffer, 5, 5);
- const array3 = new DataView(buffer, 2, 5);
- for (let i = 0; i < 5; i++) {
- array1.setUint8(i, i);
- array2.setUint8(i, i);
- }
- globalThis.array1 = array1;
- globalThis.array2 = array2;
- globalThis.array3 = array3;
- }
- const {array1, array2, array3} = takeAndUseWebSnapshot(createObjects, [
- 'array1',
- 'array2',
- 'array3'
- ]);
- assertEquals(5, array1.byteLength);
- assertEquals(0, array1.byteOffset);
- assertEquals(5, array2.byteLength);
- assertEquals(5, array2.byteOffset);
- assertEquals(5, array3.byteLength);
- assertEquals(2, array3.byteOffset);
-
- for (let i = 0; i < 5; i++) {
- assertEquals(i, array1.getUint8(i));
- assertEquals(i, array2.getUint8(i));
- }
- assertSame(array1.buffer, array2.buffer);
- assertSame(array1.buffer, array3.buffer);
-
- new DataView(array1.buffer).setUint8(2, 10);
- assertTrue(array1.getUint8(2) === 10);
- assertTrue(array3.getUint8(0) === 10);
-})();
-
-(function TestResizableDataView() {
- function createObjects() {
- let resizableArrayBuffer = new ArrayBuffer(1024, {
- maxByteLength: 1024 * 2,
- });
- // 0 offset, auto length
- let array1 = new DataView(resizableArrayBuffer);
- globalThis.array1 = array1;
-
- // Non-0 offset, auto length
- let array2 = new DataView(resizableArrayBuffer, 256);
- globalThis.array2 = array2;
-
- // Non-0 offset, fixed length
- let array3 = new DataView(resizableArrayBuffer, 128, 4);
- globalThis.array3 = array3;
- }
- const {array1, array2, array3} = takeAndUseWebSnapshot(createObjects, [
- 'array1',
- 'array2',
- 'array3',
- ]);
- assertTrue(array1.buffer.resizable);
- assertEquals(2048, array1.buffer.maxByteLength);
- assertEquals(1024, array1.byteLength);
- assertEquals(0, array1.byteOffset, 0);
- assertEquals(768, array2.byteLength); // 1024 - 256
- assertEquals(256, array2.byteOffset);
- assertEquals(4, array3.byteLength);
- assertEquals(128, array3.byteOffset);
-
- array1.buffer.resize(1024 * 2);
- assertEquals(2048, array1.byteLength);
- assertEquals(1792, array2.byteLength); // 2048 - 256
- assertEquals(4, array3.byteLength);
-
- assertSame(array1.buffer, array2.buffer);
- assertSame(array1.buffer, array3.buffer);
-})();
-
-(function TestGrowableDataView() {
- function createObjects() {
- let resizableArrayBuffer = new SharedArrayBuffer(1024, {
- maxByteLength: 1024 * 2,
- });
- // 0 offset, auto length
- let array1 = new DataView(resizableArrayBuffer);
- globalThis.array1 = array1;
-
- // Non-0 offset, auto length
- let array2 = new DataView(resizableArrayBuffer, 256);
- globalThis.array2 = array2;
-
- // Non-0 offset, fixed length
- let array3 = new DataView(resizableArrayBuffer, 128, 4);
- globalThis.array3 = array3;
- }
- const {array1, array2, array3} = takeAndUseWebSnapshot(createObjects, [
- 'array1',
- 'array2',
- 'array3',
- ]);
- assertTrue(array1.buffer.growable);
- assertEquals(2048, array1.buffer.maxByteLength);
- assertEquals(1024, array1.byteLength);
- assertEquals(0, array1.byteOffset);
- assertEquals(768, array2.byteLength); // 1024 - 256
- assertEquals(256, array2.byteOffset);
- assertEquals(4, array3.byteLength);
- assertEquals(128, array3.byteOffset);
-
- array1.buffer.grow(1024 * 2);
- assertEquals(2048, array1.byteLength);
- assertEquals(1792, array2.byteLength); // 2048 - 256
- assertEquals(4, array3.byteLength);
-
- assertSame(array1.buffer, array2.buffer);
- assertSame(array1.buffer, array3.buffer);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js
deleted file mode 100644
index 73bdd27b4b..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-externals.js
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-web-snapshots --allow-natives-syntax --verify-heap
-
-const external_1 = {external: 1};
-const external_2 = {external: 2};
-const object = {
- a: [1,2],
- b: external_1,
- c: [external_1, external_2],
- d: { d_a: external_2 }
-};
-
-(function testNoExternals() {
- const snapshot = WebSnapshot.serialize(object);
- const deserialized = WebSnapshot.deserialize(snapshot);
- %HeapObjectVerify(deserialized);
- assertEquals(object, deserialized);
- assertEquals(external_1, deserialized.b);
- assertNotSame(external_1, deserialized.b);
- assertEquals(external_2, deserialized.d.d_a);
- assertNotSame(external_2, deserialized.d.d_a);
-})();
-
-(function testOneExternals() {
- const externals = [external_1];
- const snapshot = WebSnapshot.serialize(object, externals);
- const replaced_externals = [{replacement:1}]
- const deserialized = WebSnapshot.deserialize(snapshot, replaced_externals);
- %HeapObjectVerify(deserialized);
- assertEquals(object.a, deserialized.a);
- assertSame(replaced_externals[0], deserialized.b);
- assertArrayEquals([replaced_externals[0], external_2], deserialized.c);
- assertSame(replaced_externals[0], deserialized.c[0]);
- assertNotSame(external_2, deserialized.c[1]);
- assertEquals(external_2, deserialized.d.d_a);
- assertNotSame(external_2, deserialized.d.d_a);
-})();
-
-(function testTwoExternals() {
- const externals = [external_1, external_2];
- const snapshot = WebSnapshot.serialize(object, externals);
- const replaced_externals = [{replacement:1}, {replacement:2}]
- const deserialized = WebSnapshot.deserialize(snapshot, replaced_externals);
- %HeapObjectVerify(deserialized);
- assertEquals(object.a, deserialized.a);
- assertSame(deserialized.b, replaced_externals[0]);
- assertArrayEquals(replaced_externals, deserialized.c);
- assertSame(replaced_externals[0], deserialized.c[0]);
- assertSame(replaced_externals[1], deserialized.c[1]);
- assertSame(replaced_externals[1], deserialized.d.d_a);
-})();
-
-(function testApiObject() {
- const api_object = new d8.dom.Div();
- const source_1 = [{}, api_object];
- assertThrows(() => WebSnapshot.serialize(source_1));
-
- let externals = [external_1]
- const source_2 = [{}, external_1, api_object, api_object];
- const snapshot_2 = WebSnapshot.serialize(source_2, externals);
- %HeapObjectVerify(externals);
- // Check that the unhandled api object is added to the externals.
- assertArrayEquals([external_1, api_object], externals);
-
- assertThrows(() => WebSnapshot.deserialize(snapshot_2));
- assertThrows(() => WebSnapshot.deserialize(snapshot_2, []));
- assertThrows(() => WebSnapshot.deserialize(snapshot_2, [external_1]));
-
- const result_2 = WebSnapshot.deserialize(snapshot_2, [external_1, api_object]);
- %HeapObjectVerify(externals);
- %HeapObjectVerify(result_2);
- assertArrayEquals(source_2, result_2);
- assertNotSame(source_2[0], result_2[0]);
- assertSame(external_1, result_2[1]);
- assertSame(api_object, result_2[2]);
- assertSame(api_object, result_2[3]);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-function-context.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-function-context.js
deleted file mode 100644
index 88854d047d..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-function-context.js
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestFunctionWithContext() {
- function createObjects() {
- globalThis.foo = {
- key: (function () {
- let result = 'bar';
- function inner() { return result; }
- return inner;
- })(),
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo.key());
-})();
-
-(function TestInnerFunctionWithContextAndParentContext() {
- function createObjects() {
- globalThis.foo = {
- key: (function () {
- let part1 = 'snap';
- function inner() {
- let part2 = 'shot';
- function innerinner() {
- return part1 + part2;
- }
- return innerinner;
- }
- return inner();
- })()
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('snapshot', foo.key());
-})();
-
-(function TestTopLevelFunctionWithContext() {
- function createObjects() {
- globalThis.foo = (function () {
- let result = 'bar';
- function inner() { return result; }
- return inner;
- })();
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo());
-})();
-
-(function TestContextTree() {
- function createObjects() {
- (function outer() {
- let a = 10;
- let b = 20;
- (function inner1() {
- let c = 5;
- globalThis.f1 = function() { return a + b + c; };
- })();
- (function inner2() {
- let d = 10;
- globalThis.f2 = function() { return a - b - d; };
- })();
- })();
- }
- const {f1, f2} = takeAndUseWebSnapshot(createObjects, ['f1', 'f2']);
- assertEquals(35, f1());
- assertEquals(-20, f2());
-})();
-
-(function TestContextReferringToFunction() {
- function createObjects() {
- (function outer() {
- let a = function() { return 10; }
- globalThis.f = function() { return a(); };
- })();
- }
- const {f} = takeAndUseWebSnapshot(createObjects, ['f']);
- assertEquals(10, f());
-})();
-
-(function TestNonInlinedScopeInfoInContext() {
- function createObjects() {
- globalThis.bar = (function() {
- let a1 = 1;
- let a2 = 1;
- let a3 = 1;
- let a4 = 1;
- let a5 = 1;
- let a6 = 1;
- let a7 = 1;
- let a8 = 1;
- let a9 = 1;
- let a10 = 1;
- let a11 = 1;
- let a12 = 1;
- let a13 = 1;
- let a14 = 1;
- let a15 = 1;
- let a16 = 1;
- let a17 = 1;
- let a18 = 1;
- let a19 = 1;
- let a20 = 1;
- let a21 = 1;
- let a22 = 1;
- let a23 = 1;
- let a24 = 1;
- let a25 = 1;
- let a26 = 1;
- let a27 = 1;
- let a28 = 1;
- let a29 = 1;
- let a30 = 1;
- let a31 = 1;
- let a32 = 1;
- let a33 = 1;
- let a34 = 1;
- let a35 = 1;
- let a36 = 1;
- let a37 = 1;
- let a38 = 1;
- let a39 = 1;
- let a40 = 1;
- let a41 = 1;
- let a42 = 1;
- let a43 = 1;
- let a44 = 1;
- let a45 = 1;
- let a46 = 1;
- let a47 = 1;
- let a48 = 1;
- let a49 = 1;
- let a50 = 1;
- let a51 = 1;
- let a52 = 1;
- let a53 = 1;
- let a54 = 1;
- let a55 = 1;
- let a56 = 1;
- let a57 = 1;
- let a58 = 1;
- let a59 = 1;
- let a60 = 1;
- let a61 = 1;
- let a62 = 1;
- let a63 = 1;
- let a64 = 1;
- let a65 = 1;
- let a66 = 1;
- let a67 = 1;
- let a68 = 1;
- let a69 = 1;
- let a70 = 1;
- let a71 = 1;
- let a72 = 1;
- let a73 = 1;
- let a74 = 1;
- let a75 = 1;
- function inner1() {
- return a1;
- }
- function inner2() {
- return a2;
- }
- function inner3() {
- return a3;
- }
- function inner4() {
- return a4;
- }
- function inner5() {
- return a5;
- }
- function inner6() {
- return a6;
- }
- function inner7() {
- return a7;
- }
- function inner8() {
- return a8;
- }
- function inner9() {
- return a9;
- }
- function inner10() {
- return a10;
- }
- function inner11() {
- return a11;
- }
- function inner12() {
- return a12;
- }
- function inner13() {
- return a13;
- }
- function inner14() {
- return a14;
- }
- function inner15() {
- return a15;
- }
- function inner16() {
- return a16;
- }
- function inner17() {
- return a17;
- }
- function inner18() {
- return a18;
- }
- function inner19() {
- return a19;
- }
- function inner20() {
- return a20;
- }
- function inner21() {
- return a21;
- }
- function inner22() {
- return a22;
- }
- function inner23() {
- return a23;
- }
- function inner24() {
- return a24;
- }
- function inner25() {
- return a25;
- }
- function inner26() {
- return a26;
- }
- function inner27() {
- return a27;
- }
- function inner28() {
- return a28;
- }
- function inner29() {
- return a29;
- }
- function inner30() {
- return a30;
- }
- function inner31() {
- return a31;
- }
- function inner32() {
- return a32;
- }
- function inner33() {
- return a33;
- }
- function inner34() {
- return a34;
- }
- function inner35() {
- return a35;
- }
- function inner36() {
- return a36;
- }
- function inner37() {
- return a37;
- }
- function inner38() {
- return a38;
- }
- function inner39() {
- return a39;
- }
- function inner40() {
- return a40;
- }
- function inner41() {
- return a41;
- }
- function inner42() {
- return a42;
- }
- function inner43() {
- return a43;
- }
- function inner44() {
- return a44;
- }
- function inner45() {
- return a45;
- }
- function inner46() {
- return a46;
- }
- function inner47() {
- return a47;
- }
- function inner48() {
- return a48;
- }
- function inner49() {
- return a49;
- }
- function inner50() {
- return a50;
- }
- function inner51() {
- return a51;
- }
- function inner52() {
- return a52;
- }
- function inner53() {
- return a53;
- }
- function inner54() {
- return a54;
- }
- function inner55() {
- return a55;
- }
- function inner56() {
- return a56;
- }
- function inner57() {
- return a57;
- }
- function inner58() {
- return a58;
- }
- function inner59() {
- return a59;
- }
- function inner60() {
- return a60;
- }
- function inner61() {
- return a61;
- }
- function inner62() {
- return a62;
- }
- function inner63() {
- return a63;
- }
- function inner64() {
- return a64;
- }
- function inner65() {
- return a65;
- }
- function inner66() {
- return a66;
- }
- function inner67() {
- return a67;
- }
- function inner68() {
- return a68;
- }
- function inner69() {
- return a69;
- }
- function inner70() {
- return a70;
- }
- function inner71() {
- return a71;
- }
- function inner72() {
- return a72;
- }
- function inner73() {
- return a73;
- }
- function inner74() {
- return a74;
- }
- function inner75() {
- return a75;
- }
- return inner1;
- })()
- }
- const {bar} = takeAndUseWebSnapshot(createObjects, ['bar']);
- assertEquals(1, bar());
-})();
-
-(function TestMoreThanOneScopeLocalInContext() {
- function createObjects() {
- globalThis.foo = (function() {
- let result = 'bar';
- let a = '1';
- function inner() {
- return result;
- }
- function inner2() {
- return a;
- }
- return inner;
- })();
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo());
-})();
-
-(function TestContextReferencingArray() {
- function createObjects() {
- function outer() {
- let o = [11525];
- function inner() { return o; }
- return inner;
- }
- globalThis.foo = {
- func: outer()
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(11525, foo.func()[0]);
-})();
-
-(function TestContextReferencingObject() {
- function createObjects() {
- function outer() {
- let o = { value: 11525 };
- function inner() { return o; }
- return inner;
- }
- globalThis.foo = {
- func: outer()
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(11525, foo.func().value);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-function.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-function.js
deleted file mode 100644
index 9cd5e7f55c..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-function.js
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestFunction() {
- function createObjects() {
- globalThis.foo = {
- key: function () { return 'bar'; },
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo.key());
-})();
-
-(function TestOptimizingFunctionFromSnapshot() {
- function createObjects() {
- globalThis.f = function(a, b) { return a + b; }
- }
- const { f } = takeAndUseWebSnapshot(createObjects, ['f']);
- %PrepareFunctionForOptimization(f);
- assertEquals(3, f(1, 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(4, f(1, 3));
-})();
-
-(function TestOptimizingConstructorFromSnapshot() {
- function createObjects() {
- globalThis.C = class {
- constructor(a, b) {
- this.x = a + b;
- }
- }
- }
- const { C } = takeAndUseWebSnapshot(createObjects, ['C']);
- %PrepareFunctionForOptimization(C);
- assertEquals(3, new C(1, 2).x);
- %OptimizeFunctionOnNextCall(C);
- assertEquals(4, new C(1, 3).x);
-})();
-
-(function TestFunctionPrototype() {
- function createObjects() {
- globalThis.F = function(p1, p2) {
- this.x = p1 + p2;
- }
- globalThis.F.prototype.m = function(p1, p2) {
- return this.x + p1 + p2;
- }
- }
- const { F } = takeAndUseWebSnapshot(createObjects, ['F']);
- const o = new F(1, 2);
- assertEquals(3, o.x);
- assertEquals(10, o.m(3, 4));
-})();
-
-(function TestFunctionPrototypeBecomesProto() {
- function createObjects() {
- globalThis.F = function() {}
- globalThis.F.prototype.x = 100;
- }
- const { F } = takeAndUseWebSnapshot(createObjects, ['F']);
- const o = new F();
- assertEquals(100, Object.getPrototypeOf(o).x);
-})();
-
-(function TestFunctionCtorCallsFunctionInPrototype() {
- function createObjects() {
- globalThis.F = function() {
- this.fooCalled = false;
- this.foo();
- }
- globalThis.F.prototype.foo = function() { this.fooCalled = true; };
- }
- const { F } = takeAndUseWebSnapshot(createObjects, ['F']);
- const o = new F();
- assertTrue(o.fooCalled);
-})();
-
-(function TestFunctionPrototypeConnectedToObjectPrototype() {
- function createObjects() {
- globalThis.F = function() {}
- }
- const realm = Realm.create();
- const { F } = takeAndUseWebSnapshot(createObjects, ['F'], realm);
- const o = new F();
- assertSame(Realm.eval(realm, 'Object.prototype'),
- Object.getPrototypeOf(Object.getPrototypeOf(o)));
-})();
-
-(function TestFunctionInheritance() {
- function createObjects() {
- globalThis.Super = function() {}
- globalThis.Super.prototype.superfunc = function() { return 'superfunc'; };
- globalThis.Sub = function() {}
- globalThis.Sub.prototype = Object.create(Super.prototype);
- globalThis.Sub.prototype.subfunc = function() { return 'subfunc'; };
- }
- const realm = Realm.create();
- const { Sub, Super } =
- takeAndUseWebSnapshot(createObjects, ['Sub', 'Super'], realm);
- const o = new Sub();
- assertEquals('superfunc', o.superfunc());
- assertEquals('subfunc', o.subfunc());
- assertSame(Super.prototype, Sub.prototype.__proto__);
- const realmFunctionPrototype = Realm.eval(realm, 'Function.prototype');
- assertSame(realmFunctionPrototype, Super.__proto__);
- assertSame(realmFunctionPrototype, Sub.__proto__);
-})();
-
-(function TestFunctionKinds() {
- function createObjects() {
- globalThis.normalFunction = function() {}
- globalThis.asyncFunction = async function() {}
- globalThis.generatorFunction = function*() {}
- globalThis.asyncGeneratorFunction = async function*() {}
- }
- const realm = Realm.create();
- const {normalFunction, asyncFunction, generatorFunction,
- asyncGeneratorFunction} =
- takeAndUseWebSnapshot(createObjects, ['normalFunction', 'asyncFunction',
- 'generatorFunction', 'asyncGeneratorFunction'], realm);
- const newNormalFunction = Realm.eval(realm, 'f1 = function() {}');
- const newAsyncFunction = Realm.eval(realm, 'f2 = async function() {}');
- const newGeneratorFunction = Realm.eval(realm, 'f3 = function*() {}');
- const newAsyncGeneratorFunction =
- Realm.eval(realm, 'f4 = async function*() {}');
-
- assertSame(newNormalFunction.__proto__, normalFunction.__proto__);
- assertSame(newNormalFunction.prototype.__proto__,
- normalFunction.prototype.__proto__);
-
- assertSame(newAsyncFunction.__proto__, asyncFunction.__proto__);
- assertEquals(undefined, asyncFunction.prototype);
- assertEquals(undefined, newAsyncFunction.prototype);
-
- assertSame(newGeneratorFunction.__proto__, generatorFunction.__proto__);
- assertSame(newGeneratorFunction.prototype.__proto__,
- generatorFunction.prototype.__proto__);
-
- assertSame(newAsyncGeneratorFunction.__proto__,
- asyncGeneratorFunction.__proto__);
- assertSame(newAsyncGeneratorFunction.prototype.__proto__,
- asyncGeneratorFunction.prototype.__proto__);
-})();
-
-(function TestFunctionWithProperties() {
- function createObjects() {
- function bar() { return 'bar'; };
- bar.key1 = 'value1';
- bar.key2 = 1;
- bar.key3 = 2.2;
- bar.key4 = function key4() {
- return 'key4';
- }
- bar.key5 = [1, 2];
- bar.key6 = {'key':'value'}
- globalThis.foo = {
- bar: bar,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar', foo.bar());
- assertEquals('value1', foo.bar.key1);
- assertEquals(1, foo.bar.key2);
- assertEquals(2.2, foo.bar.key3);
- assertEquals('key4', foo.bar.key4());
- assertEquals([1, 2], foo.bar.key5);
- assertEquals({ 'key': 'value' }, foo.bar.key6 );
-})();
-
-(function TestAsyncFunctionWithProperties() {
- function createObjects() {
- async function bar() { return 'bar'; };
- bar.key1 = 'value1';
- bar.key2 = 1;
- bar.key3 = 2.2;
- bar.key4 = function key4() {
- return 'key4';
- }
- bar.key5 = [1, 2];
- bar.key6 = {'key':'value'}
- globalThis.foo = {
- bar: bar,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('value1', foo.bar.key1);
- assertEquals(1, foo.bar.key2);
- assertEquals(2.2, foo.bar.key3);
- assertEquals('key4', foo.bar.key4());
- assertEquals([1, 2], foo.bar.key5);
- assertEquals({'key': 'value'}, foo.bar.key6 );
-})();
-
-(function TestGeneratorFunctionWithProperties() {
- function createObjects() {
- function *bar() { return 'bar'; };
- bar.key1 = 'value1';
- bar.key2 = 1;
- bar.key3 = 2.2;
- bar.key4 = function key4() {
- return 'key4';
- };
- bar.key5 = [1, 2];
- bar.key6 = {'key':'value'};
- globalThis.foo = {
- bar: bar,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('value1', foo.bar.key1);
- assertEquals(1, foo.bar.key2);
- assertEquals(2.2, foo.bar.key3);
- assertEquals('key4', foo.bar.key4());
- assertEquals([1, 2], foo.bar.key5);
- assertEquals({'key': 'value'}, foo.bar.key6 );
-})();
-
-(function TestAsyncGeneratorFunctionWithProperties() {
- function createObjects() {
- async function *bar() { return 'bar'; };
- bar.key1 = 'value1';
- bar.key2 = 1;
- bar.key3 = 2.2;
- bar.key4 = function key4() {
- return 'key4';
- }
- bar.key5 = [1, 2];
- bar.key6 = {'key':'value'}
- globalThis.foo = {
- bar: bar,
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('value1', foo.bar.key1);
- assertEquals(1, foo.bar.key2);
- assertEquals(2.2, foo.bar.key3);
- assertEquals('key4', foo.bar.key4());
- assertEquals([1, 2], foo.bar.key5);
- assertEquals({'key': 'value'}, foo.bar.key6);
-})();
-
-(function TestFunctionsWithSameMap() {
- function createObjects() {
- function bar1() { return 'bar1'; };
- bar1.key = 'value';
-
- function bar2() {
- return 'bar2';
- }
- bar2.key = 'value';
-
- globalThis.foo = {
- bar1: bar1,
- bar2: bar2
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals('bar1', foo.bar1());
- assertEquals('value', foo.bar1.key);
- assertEquals('bar2', foo.bar2());
- assertEquals('value', foo.bar2.key);
- assertTrue(%HaveSameMap(foo.bar1, foo.bar2))
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js
deleted file mode 100644
index 1987861192..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-helpers.js
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-function use(exports) {
- const result = Object.create(null);
- exports.forEach(x => result[x] = globalThis[x]);
- return result;
-}
-
-function takeAndUseWebSnapshot(createObjects, exports, realmForDeserializing) {
- // Take a snapshot in Realm r1.
- const r1 = Realm.create();
- Realm.eval(r1, createObjects, { type: 'function' });
- const snapshot = Realm.takeWebSnapshot(r1, exports);
- // Use the snapshot in Realm r2.
- const r2 = realmForDeserializing != undefined ?
- realmForDeserializing : Realm.create();
- const success = Realm.useWebSnapshot(r2, snapshot);
- assertTrue(success);
- const result =
- Realm.eval(r2, use, { type: 'function', arguments: [exports] });
- %HeapObjectVerify(result);
- return result;
-}
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-holey-array.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-holey-array.js
deleted file mode 100644
index c096d964b7..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-holey-array.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestHoleySmiElementsArray() {
- function createObjects() {
- globalThis.foo = [1,,2];
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1,,2], foo);
-})();
-
-(function TestHoleyElementsArray() {
- function createObjects() {
- globalThis.foo = [1,,'123'];
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1,,'123'], foo);
-})();
-
-(function TestHoleyArrayContainingDoubleAndSmi() {
- function createObjects() {
- globalThis.foo = [1.2, , 1];
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1.2, , 1], foo);
-})();
-
-(function TestHoleyArrayContainingDoubleAndObject() {
- function createObjects() {
- globalThis.foo = [1.2, , {'key': 'value'}];
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1.2, , {'key': 'value'}], foo);
-})();
-
-(function TestHoleyDoubleElementsArray() {
- function createObjects() {
- globalThis.foo = [1.2, , 2.3];
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals([1.2, , 2.3], foo);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-object.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-object.js
deleted file mode 100644
index 394bb3de55..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-object.js
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestObjectReferencingObject() {
- function createObjects() {
- globalThis.foo = {
- bar: { baz: 11525 }
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(11525, foo.bar.baz);
-})();
-
-(function TestInPlaceStringsInObject() {
- function createObjects() {
- globalThis.foo = {a: 'foo', b: 'bar', c: 'baz'};
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- // We cannot test that the strings are really in-place; that's covered by
- // cctests.
- assertEquals('foobarbaz', foo.a + foo.b + foo.c);
-})();
-
-(function TestRepeatedInPlaceStringsInObject() {
- function createObjects() {
- globalThis.foo = {a: 'foo', b: 'bar', c: 'foo'};
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- // We cannot test that the strings are really in-place; that's covered by
- // cctests.
- assertEquals('foobarfoo', foo.a + foo.b + foo.c);
-})();
-
-(function TestObjectWithPackedElements() {
- function createObjects() {
- globalThis.foo = {
- '0': 'zero', '1': 'one', '2': 'two', '3': 'three'
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- // Objects always get HOLEY_ELEMENTS; no PACKED or SMI_ELEMENTS.
- const elementsKindTest = {0: 0, 1: 1, 2: 2};
- assertFalse(%HasPackedElements(elementsKindTest));
- assertFalse(%HasSmiElements(elementsKindTest));
-
- assertFalse(%HasPackedElements(foo));
- assertFalse(%HasSmiElements(foo));
- assertEquals('zeroonetwothree', foo[0] + foo[1] + foo[2] + foo[3]);
-})();
-
-(function TestObjectWithPackedSmiElements() {
- function createObjects() {
- globalThis.foo = {
- '0': 0, '1': 1, '2': 2, '3': 3
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertFalse(%HasPackedElements(foo));
- assertFalse(%HasSmiElements(foo));
- assertEquals('0123', '' + foo[0] + foo[1] + foo[2] + foo[3]);
-})();
-
-(function TestObjectWithHoleyElements() {
- function createObjects() {
- globalThis.foo = {
- '1': 'a', '11': 'b', '111': 'c'
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertFalse(%HasPackedElements(foo));
- assertFalse(%HasSmiElements(foo));
- assertEquals('abc', foo[1] + foo[11] + foo[111]);
-})();
-
-(function TestObjectWithHoleySmiElements() {
- function createObjects() {
- globalThis.foo = {
- '1': 0, '11': 1, '111': 2
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertFalse(%HasPackedElements(foo));
- assertFalse(%HasSmiElements(foo));
- assertEquals('012', '' + foo[1] + foo[11] + foo[111]);
-})();
-
-(function TestObjectWithPropertiesAndElements() {
- function createObjects() {
- globalThis.foo = {
- 'prop1': 'value1', '1': 'a', 'prop2': 'value2', '11': 'b', '111': 'c'
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertFalse(%HasPackedElements(foo));
- assertFalse(%HasSmiElements(foo));
- assertEquals('abc', foo[1] + foo[11] + foo[111]);
- assertEquals('value1value2', foo.prop1 + foo.prop2);
-})();
-
-(function TestObjectsWithSamePropertiesButDifferentElementsKind() {
- function createObjects() {
- globalThis.foo = {
- 'prop1': 'value1', 'prop2': 'value2', '1': 'a', '11': 'b', '111': 'c'
- };
- globalThis.bar = {
- 'prop1': 'value1', 'prop2': 'value2', '0': 0, '1': 0
- }
- }
- const { foo, bar } = takeAndUseWebSnapshot(createObjects, ['foo', 'bar']);
- assertFalse(%HasPackedElements(foo));
- assertFalse(%HasSmiElements(foo));
- assertEquals('abc', foo[1] + foo[11] + foo[111]);
- assertEquals('value1value2', foo.prop1 + foo.prop2);
- assertFalse(%HasPackedElements(bar));
- assertFalse(%HasSmiElements(bar));
- assertEquals('00', '' + bar[0] + bar[1]);
- assertEquals('value1value2', bar.prop1 + bar.prop2);
-})();
-
-(function TestObjectWithEmptyMap() {
- function createObjects() {
- globalThis.foo = [{a:1}, {}, {b: 2}];
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(1, foo[0].a);
- assertEquals(2, foo[2].b);
-})();
-
-(function TestObjectWithDictionaryMap() {
- function createObjects() {
- const obj = {};
- // Create an object with dictionary map.
- for (let i = 0; i < 2000; i++){
- obj[`key${i}`] = `value${i}`;
- }
- globalThis.foo = obj;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(2000, Object.keys(foo).length);
- assertEquals(2000, Object.values(foo).length);
- for (let i = 0; i < 2000; i++){
- assertEquals(`value${i}`, foo[`key${i}`]);
- }
-})();
-
-(function TwoExportedObjects() {
- function createObjects() {
- globalThis.one = {x: 1};
- globalThis.two = {x: 2};
- }
- const { one, two } = takeAndUseWebSnapshot(createObjects, ['one', 'two']);
- assertEquals(1, one.x);
- assertEquals(2, two.x);
-})();
-
-(function TestObjectWithDictionaryElements() {
- function createObjects() {
- globalThis.obj = {
- 10: 1,
- 100: 2,
- 1000: 3,
- 10000: 4
- };
- }
- const { obj } = takeAndUseWebSnapshot(createObjects, ['obj']);
- assertEquals(['10', '100', '1000', '10000'], Object.getOwnPropertyNames(obj));
- assertEquals[1, obj[10]];
- assertEquals[2, obj[100]];
- assertEquals[3, obj[1000]];
- assertEquals[4, obj[10000]];
-})();
-
-(function TestObjectWithDictionaryElementsWithLargeIndex() {
- function createObjects() {
- globalThis.obj = {};
- globalThis.obj[4394967296] = 'lol';
- }
- const { obj } = takeAndUseWebSnapshot(createObjects, ['obj']);
- assertEquals(['4394967296'], Object.getOwnPropertyNames(obj));
- assertEquals['lol', obj[4394967296]];
-})();
-
-(function TestObjectWithSlackElements() {
- function createObjects() {
- globalThis.foo = {};
- globalThis.bar = {};
- for (let i = 0; i < 100; ++i) {
- globalThis.foo[i] = i;
- globalThis.bar[i] = {};
- }
- }
- const { foo, bar } = takeAndUseWebSnapshot(createObjects, ['foo', 'bar']);
- for (let i = 0; i < 100; ++i) {
- assertEquals(i, foo[i]);
- assertEquals({}, bar[i]);
- }
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-prototype.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-prototype.js
deleted file mode 100644
index eefe0cc4a7..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-prototype.js
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestObjectPrototype() {
- function createObjects() {
- globalThis.obj = {a: 1, __proto__: {x: 1}};
- }
- const realm = Realm.create();
- const {obj} = takeAndUseWebSnapshot(createObjects, ['obj'], realm);
- assertEquals(1, obj.x);
- assertEquals(1, obj.__proto__.x);
- assertSame(Realm.eval(realm, 'Object.prototype'), obj.__proto__.__proto__);
-})();
-
-(function TestEmptyObjectPrototype() {
- function createObjects() {
- globalThis.obj = {__proto__: {x: 1}};
- }
- const realm = Realm.create();
- const {obj} = takeAndUseWebSnapshot(createObjects, ['obj'], realm);
- assertEquals(1, obj.x);
- assertEquals(1, obj.__proto__.x);
- assertSame(Realm.eval(realm, 'Object.prototype'), obj.__proto__.__proto__);
-})();
-
-(function TestDictionaryObjectPrototype() {
- function createObjects() {
- const obj = {};
- // Create an object with dictionary map.
- for (let i = 0; i < 2000; i++){
- obj[`key${i}`] = `value${i}`;
- }
- obj.__proto__ = {x: 1};
- globalThis.foo = obj;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(2000, Object.keys(foo).length);
- assertEquals(2000, Object.values(foo).length);
- for (let i = 0; i < 2000; i++){
- assertEquals(`value${i}`, foo[`key${i}`]);
- }
- assertEquals(1, foo.x);
- assertEquals(1, foo.__proto__.x);
-})();
-
-(function TestNullPrototype() {
- function createObjects() {
- globalThis.foo = Object.create(null);
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(null, Object.getPrototypeOf(foo));
-})();
-
-(function TestDefaultObjectProto() {
- function createObjects() {
- globalThis.foo = {
- str: 'hello',
- n: 42,
- };
- }
- const realm = Realm.create();
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo'], realm);
- assertSame(Realm.eval(realm, 'Object.prototype'), Object.getPrototypeOf(foo));
-})();
-
-(function TestEmptyObjectProto() {
- function createObjects() {
- globalThis.foo = {};
- }
- const realm = Realm.create();
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo'], realm);
- assertSame(Realm.eval(realm, 'Object.prototype'), Object.getPrototypeOf(foo));
-})();
-
-(function TestObjectProto() {
- function createObjects() {
- globalThis.foo = {
- __proto__ : {x : 10},
- y: 11
- };
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertEquals(10, Object.getPrototypeOf(foo).x);
-})();
-
-(function TestObjectProtoInSnapshot() {
- function createObjects() {
- globalThis.o1 = { x: 10};
- globalThis.o2 = {
- __proto__ : o1,
- y: 11
- };
- }
- const realm = Realm.create();
- const { o1, o2 } = takeAndUseWebSnapshot(createObjects, ['o1', 'o2'], realm);
- assertSame(o1, Object.getPrototypeOf(o2));
- assertSame(Realm.eval(realm, 'Object.prototype'), Object.getPrototypeOf(o1));
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-sparse-array.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-sparse-array.js
deleted file mode 100644
index 7a2bf9bcb0..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-sparse-array.js
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestDictionaryElementsArray() {
- function createObjects() {
- const array = [];
- // Add a large index to force dictionary elements.
- array[2 ** 30] = 10;
- for (let i = 0; i < 10; i++) {
- array[i * 101] = i;
- }
- globalThis.foo = array;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(%HasDictionaryElements(foo));
- assertEquals(2 ** 30 + 1, foo.length);
- for (let i = 0; i < 10; i++) {
- assertEquals(i, foo[i * 101]);
- }
-})();
-
-(function TestDictionaryElementsArrayContainingArray() {
- function createObjects() {
- const array = [];
- // Add a large index to force dictionary elements.
- array[2 ** 30] = 10;
- for (let i = 0; i < 10; i++) {
- array[i * 101] = [i];
- }
- globalThis.foo = array;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(%HasDictionaryElements(foo));
- assertEquals(2 ** 30 + 1, foo.length);
- for (let i = 0; i < 10; i++) {
- assertEquals([i], foo[i * 101]);
- }
-})();
-
-(function TestDictionaryElementsArrayContainingObject() {
- function createObjects() {
- const array = [];
- // Add a large index to force dictionary elements.
- array[2 ** 30] = 10;
- for (let i = 0; i < 10; i++) {
- array[i * 101] = {i: i};
- }
- globalThis.foo = array;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(%HasDictionaryElements(foo));
- assertEquals(2 ** 30 + 1, foo.length);
- for (let i = 0; i < 10; i++) {
- assertEquals({i: i}, foo[i * 101]);
- }
-})();
-
-(function TestDictionaryElementsArrayContainingFunction() {
- function createObjects() {
- const array = [];
- // Add a large index to force dictionary elements.
- array[2 ** 30] = 10;
- for (let i = 0; i < 10; i++) {
- array[i * 101] = function() { return i; };
- }
- globalThis.foo = array;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(%HasDictionaryElements(foo));
- assertEquals(2 ** 30 + 1, foo.length);
- for (let i = 0; i < 10; i++) {
- assertEquals(i, foo[i * 101]());
- }
-})();
-
-(function TestDictionaryElementsArrayContainingString() {
- function createObjects() {
- const array = [];
- // Add a large index to force dictionary elements.
- array[2 ** 30] = 10;
- for (let i = 0; i < 10; i++) {
- array[i * 101] = `${i}`;
- }
- globalThis.foo = array;
- }
- const { foo } = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertTrue(%HasDictionaryElements(foo));
- assertEquals(2 ** 30 + 1, foo.length);
- for (let i = 0; i < 10; i++) {
- assertEquals(`${i}`, foo[i * 101]);
- }
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-symbol.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-symbol.js
deleted file mode 100644
index 807dd1bd20..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-symbol.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestNonGlobalSymbol() {
- function createObjects() {
- const s = Symbol('description');
- globalThis.foo = {mySymbol: s, innerObject: { symbolHereToo: s}};
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertSame(foo.mySymbol, foo.innerObject.symbolHereToo);
- assertEquals('description', foo.mySymbol.description);
- assertNotEquals(foo.mySymbol, Symbol('description'));
- assertNotEquals(foo.mySymbol, Symbol.for('description'));
-})();
-
-(function TestGlobalSymbol() {
- function createObjects() {
- const s = Symbol.for('this is global');
- globalThis.foo = {mySymbol: s, innerObject: { symbolHereToo: s}};
- }
- const {foo} = takeAndUseWebSnapshot(createObjects, ['foo']);
- assertSame(foo.mySymbol, foo.innerObject.symbolHereToo);
- assertEquals('this is global', foo.mySymbol.description);
- assertEquals(Symbol.for('this is global'), foo.mySymbol);
-})();
-
-(function TestSymbolAsMapKey() {
- function createObjects() {
- globalThis.obj1 = {};
- const global_symbol = Symbol.for('this is global');
- obj1[global_symbol] = 'global symbol value';
- globalThis.obj2 = {};
- const nonglobal_symbol = Symbol('this is not global');
- obj2[nonglobal_symbol] = 'nonglobal symbol value';
- }
- const {obj1, obj2} = takeAndUseWebSnapshot(createObjects, ['obj1', 'obj2']);
- assertEquals('global symbol value', obj1[Symbol.for('this is global')]);
- const nonglobal_symbol = Object.getOwnPropertySymbols(obj2)[0];
- assertEquals('nonglobal symbol value', obj2[nonglobal_symbol]);
-})();
diff --git a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-typed-array.js b/deps/v8/test/mjsunit/web-snapshot/web-snapshot-typed-array.js
deleted file mode 100644
index 0f16b5eace..0000000000
--- a/deps/v8/test/mjsunit/web-snapshot/web-snapshot-typed-array.js
+++ /dev/null
@@ -1,439 +0,0 @@
-// Copyright 2022 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-d8-web-snapshot-api --allow-natives-syntax --harmony-rab-gsab --verify-heap
-
-'use strict';
-
-d8.file.execute('test/mjsunit/web-snapshot/web-snapshot-helpers.js');
-
-(function TestTypedArray() {
- function createObjects() {
- const int8Array = new Int8Array(3);
- for (let i = 0; i < 3; i++) {
- int8Array[i] = i;
- }
- const uint8Array = new Uint8Array(3);
- for (let i = 0; i < 3; i++) {
- uint8Array[i] = i;
- }
- const uint8ClampedArray = new Uint8ClampedArray(3);
- for (let i = 0; i < 3; i++) {
- uint8ClampedArray[i] = i;
- }
- const int16Array = new Int16Array(3);
- for (let i = 0; i < 3; i++) {
- int16Array[i] = i;
- }
- const uint16Array = new Uint16Array(3);
- for (let i = 0; i < 3; i++) {
- uint16Array[i] = i;
- }
- const int32Array = new Int32Array(3);
- for (let i = 0; i < 3; i++) {
- int32Array[i] = i;
- }
- const uint32Array = new Uint32Array(3);
- for (let i = 0; i < 3; i++) {
- uint32Array[i] = i;
- }
- const float32Array = new Float32Array(3);
- for (let i = 0; i < 3; i++) {
- float32Array[i] = i + 0.2;
- }
- const float64Array = new Float64Array(3);
- for (let i = 0; i < 3; i++) {
- float64Array[i] = i + 0.2;
- }
- const bigInt64Array = new BigInt64Array(3);
- for (let i = 0; i < 3; i++) {
- bigInt64Array[i] = BigInt(i);
- }
- const bigUint64Array = new BigUint64Array(3);
- for (let i = 0; i < 3; i++) {
- bigUint64Array[i] = BigInt(i);
- }
- globalThis.int8Array = int8Array;
- globalThis.uint8Array = uint8Array;
- globalThis.uint8ClampedArray = uint8ClampedArray;
- globalThis.int16Array = int16Array;
- globalThis.uint16Array = uint16Array;
- globalThis.int32Array = int32Array;
- globalThis.uint32Array = uint32Array;
- globalThis.float32Array = float32Array;
- globalThis.float64Array = float64Array;
- globalThis.bigInt64Array = bigInt64Array;
- globalThis.bigUint64Array = bigUint64Array;
- }
- const {
- int8Array,
- uint8Array,
- uint8ClampedArray,
- int16Array,
- uint16Array,
- int32Array,
- uint32Array,
- float32Array,
- float64Array,
- bigInt64Array,
- bigUint64Array,
- } =
- takeAndUseWebSnapshot(createObjects, [
- 'int8Array',
- 'uint8Array',
- 'uint8ClampedArray',
- 'int16Array',
- 'uint16Array',
- 'int32Array',
- 'uint32Array',
- 'float32Array',
- 'float64Array',
- 'bigInt64Array',
- 'bigUint64Array',
- ]);
- assertNotSame(globalThis.int8Array, int8Array);
- assertEquals(int8Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(int8Array[i], i);
- }
- assertNotSame(globalThis.uint8Array, uint8Array);
- assertEquals(uint8Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(uint8Array[i], i);
- }
- assertNotSame(globalThis.uint8ClampedArray, uint8ClampedArray);
- assertEquals(uint8ClampedArray.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(uint8ClampedArray[i], i);
- }
- assertNotSame(globalThis.int16Array, int16Array);
- assertEquals(int16Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(int16Array[i], i);
- }
- assertNotSame(globalThis.uint16Array, uint16Array);
- assertEquals(uint16Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(uint16Array[i], i);
- }
- assertNotSame(globalThis.int32Array, int32Array);
- assertEquals(int32Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(int32Array[i], i);
- }
- assertNotSame(globalThis.uint32Array, uint32Array);
- assertEquals(uint32Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(uint32Array[i], i);
- }
- assertNotSame(globalThis.float32Array, float32Array);
- assertEquals(float32Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEqualsDelta(float32Array[i], i + 0.2);
- }
- assertNotSame(globalThis.float64Array, float64Array);
- assertEquals(float64Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEqualsDelta(float64Array[i], i + 0.2);
- }
- assertNotSame(globalThis.bigInt64Array, bigInt64Array);
- assertEquals(bigInt64Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(bigInt64Array[i], BigInt(i));
- }
- assertNotSame(globalThis.bigUint64Array, bigUint64Array);
- assertEquals(bigUint64Array.length, 3);
- for (let i = 0; i < 3; i++) {
- assertEquals(bigUint64Array[i], BigInt(i));
- }
-})();
-
-(function TestInt8Array() {
- function createObjects() {
- const array = new Int8Array([-129, -128, 1, 127, 128]);
- globalThis.array = array;
- const array2 = new Int8Array(array.buffer, 1, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 127);
- assertEquals(array[1], -128);
- assertEquals(array[2], 1);
- assertEquals(array[3], 127);
- assertEquals(array[4], -128);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestUint8Array() {
- function createObjects() {
- const array = new Uint8Array([-1, 0, 2, 255, 256]);
- globalThis.array = array;
- const array2 = new Uint8Array(array.buffer, 1, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 255);
- assertEquals(array[1], 0);
- assertEquals(array[2], 2);
- assertEquals(array[3], 255);
- assertEquals(array[4], 0);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestUint8ClampedArray() {
- function createObjects() {
- const array = new Uint8ClampedArray([-1, 0, 2, 255, 256]);
- globalThis.array = array;
- const array2 = new Uint8ClampedArray(array.buffer, 1, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 0);
- assertEquals(array[1], 0);
- assertEquals(array[2], 2);
- assertEquals(array[3], 255);
- assertEquals(array[4], 255);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestInt16Array() {
- function createObjects() {
- const array = new Int16Array([-32769, -32768, 1, 32767, 32768]);
- globalThis.array = array;
- const array2 = new Int16Array(array.buffer, 2, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 32767);
- assertEquals(array[1], -32768);
- assertEquals(array[2], 1);
- assertEquals(array[3], 32767);
- assertEquals(array[4], -32768);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestUint16Array() {
- function createObjects() {
- const array = new Uint16Array([-1, 0, 2, 65535, 65536]);
- globalThis.array = array;
- const array2 = new Uint16Array(array.buffer, 2, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 65535);
- assertEquals(array[1], 0);
- assertEquals(array[2], 2);
- assertEquals(array[3], 65535);
- assertEquals(array[4], 0);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestInt32Array() {
- function createObjects() {
- const array = new Int32Array([
- -2147483649,
- -2147483648,
- 1,
- 2147483647,
- 2147483648,
- ]);
- globalThis.array = array;
- const array2 = new Int32Array(array.buffer, 4, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 2147483647);
- assertEquals(array[1], -2147483648);
- assertEquals(array[2], 1);
- assertEquals(array[3], 2147483647);
- assertEquals(array[4], -2147483648);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestUint32Array() {
- function createObjects() {
- const array = new Uint32Array([-1, 0, 2, 4294967295, 4294967296]);
- globalThis.array = array;
- const array2 = new Uint32Array(array.buffer, 4, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], 4294967295);
- assertEquals(array[1], 0);
- assertEquals(array[2], 2);
- assertEquals(array[3], 4294967295);
- assertEquals(array[4], 0);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestBigInt64Array() {
- function createObjects() {
- const array = new BigInt64Array([
- BigInt(-(2 ** 63)) - 1n,
- BigInt(-(2 ** 63)),
- 1n,
- BigInt(2 ** 63) - 1n,
- BigInt(2 ** 63),
- ]);
- globalThis.array = array;
- const array2 = new BigInt64Array(array.buffer, 8, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], BigInt(2 ** 63) - 1n);
- assertEquals(array[1], BigInt(-(2 ** 63)));
- assertEquals(array[2], 1n);
- assertEquals(array[3], BigInt(2 ** 63) - 1n);
- assertEquals(array[4], BigInt(-(2 ** 63)));
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestBigUint32Array() {
- function createObjects() {
- const array = new BigUint64Array([
- -1n,
- 0n,
- 2n,
- BigInt(2 ** 64) - 1n,
- BigInt(2 ** 64),
- ]);
- globalThis.array = array;
- const array2 = new BigUint64Array(array.buffer, 8, 2);
- globalThis.array2 = array2;
- }
- const {array, array2} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- ]);
- assertEquals(array.length, 5);
- assertEquals(array[0], BigInt(2 ** 64) - 1n);
- assertEquals(array[1], 0n);
- assertEquals(array[2], 2n);
- assertEquals(array[3], BigInt(2 ** 64) - 1n);
- assertEquals(array[4], 0n);
- assertSame(array.buffer, array2.buffer);
- assertEquals(array2.length, 2);
- assertEquals(array2[0], array[1]);
- assertEquals(array2[1], array[2]);
-})();
-
-(function TestResizableTypedArray() {
- function createObjects() {
- let resizableArrayBuffer = new ArrayBuffer(1024, {
- maxByteLength: 1024 * 2,
- });
- // 0 offset, auto length
- let array = new Uint32Array(resizableArrayBuffer);
- globalThis.array = array;
-
- // Non-0 offset, auto length
- let array2 = new Uint32Array(resizableArrayBuffer, 256);
- globalThis.array2 = array2;
-
- // Non-0 offset, fixed length
- let array3 = new Uint32Array(resizableArrayBuffer, 128, 4);
- globalThis.array3 = array3;
- }
- const {array, array2, array3} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- 'array3',
- ]);
- assertTrue(array.buffer.resizable);
- assertEquals(array.length, 256); // (1024 - 0) / 4
- assertEquals(array2.length, 192); // (1024 - 256) / 4
- assertEquals(array3.length, 4);
- array.buffer.resize(1024 * 2);
- assertEquals(array.length, 512); // (2048 - 0) / 4
- assertEquals(array2.length, 448); // (2048 - 256) / 4
- assertEquals(array3.length, 4);
-})();
-
-(function TestGrowableTypedArray() {
- function createObjects() {
- let resizableArrayBuffer = new SharedArrayBuffer(1024, {
- maxByteLength: 1024 * 2,
- });
- // 0 offset, auto length
- let array = new Uint32Array(resizableArrayBuffer);
- globalThis.array = array;
-
- // Non-0 offset, auto length
- let array2 = new Uint32Array(resizableArrayBuffer, 256);
- globalThis.array2 = array2;
-
- // Non-0 offset, fixed length
- let array3 = new Uint32Array(resizableArrayBuffer, 128, 4);
- globalThis.array3 = array3;
- }
- const {array, array2, array3} = takeAndUseWebSnapshot(createObjects, [
- 'array',
- 'array2',
- 'array3',
- ]);
- assertTrue(array.buffer.growable);
- assertEquals(array.length, 256); // (1024 - 0) / 4
- assertEquals(array2.length, 192); // (1024 - 256) / 4
- assertEquals(array3.length, 4);
- array.buffer.grow(1024 * 2);
- assertEquals(array.length, 512); // (2048 - 0) / 4
- assertEquals(array2.length, 448); // (2048 - 256) / 4
- assertEquals(array3.length, 4);
-})();
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 7ddd1c0893..91404e318f 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -103,7 +103,7 @@ static void DumpSpaceFirstPageAddress(FILE* out, i::BaseSpace* space,
i::Address first_page) {
const char* name = space->name();
i::Tagged_t compressed =
- i::V8HeapCompressionScheme::CompressTagged(first_page);
+ i::V8HeapCompressionScheme::CompressObject(first_page);
uintptr_t unsigned_compressed = static_cast<uint32_t>(compressed);
i::PrintF(out, " 0x%08" V8PRIxPTR ": \"%s\",\n", unsigned_compressed, name);
}
diff --git a/deps/v8/test/test262/BUILD.gn b/deps/v8/test/test262/BUILD.gn
index 68c8d1455d..b08d1072c6 100644
--- a/deps/v8/test/test262/BUILD.gn
+++ b/deps/v8/test/test262/BUILD.gn
@@ -17,6 +17,7 @@ group("v8_test262") {
"harness-adapt.js",
"harness-adapt-donotevaluate.js",
"harness-agent.js",
+ "harness-done.js",
"harness-ishtmldda.js",
"test262.status",
"testcfg.py",
diff --git a/deps/v8/test/test262/harness-adapt.js b/deps/v8/test/test262/harness-adapt.js
index 55d1b445b0..dff7c34146 100644
--- a/deps/v8/test/test262/harness-adapt.js
+++ b/deps/v8/test/test262/harness-adapt.js
@@ -81,15 +81,6 @@ var ES5Harness = (function() {
}
})();
-function $DONE(arg){
- if (arg) {
- print('FAILED! Error: ' + arg);
- quit(1);
- }
-
- quit(0);
-};
-
function RealmOperators(realm) {
let $262 = {
evalScript(script) {
diff --git a/deps/v8/test/test262/harness-done.js b/deps/v8/test/test262/harness-done.js
new file mode 100644
index 0000000000..2d5af4266d
--- /dev/null
+++ b/deps/v8/test/test262/harness-done.js
@@ -0,0 +1,17 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function $DONE(error) {
+ if (error) {
+ if(typeof error === 'object' && error !== null && 'name' in error) {
+ print('Test262:AsyncTestFailure:' + error.name + ': ' + error.message);
+ } else {
+ print('Test262:AsyncTestFailure:Test262Error: ' + String(error));
+ }
+ quit(1);
+ }
+
+ print('Test262:AsyncTestComplete');
+ quit(0);
+};
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index de69a8d91c..9b4fe379f5 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -271,115 +271,26 @@
# http://crbug/v8/10905
'language/identifier-resolution/assign-to-global-undefined': [FAIL],
- # http://crbug/v8/11531
- 'built-ins/RegExp/prototype/flags/get-order': [FAIL],
-
# http://crbug/v8/11533
'language/statements/class/subclass/default-constructor-spread-override': [FAIL],
- # See also https://github.com/tc39/test262/issues/3380
- 'built-ins/TypedArray/prototype/map/callbackfn-resize': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=11935
- # regexp-v-flag not yet fully implemented.
- 'built-ins/RegExp/property-escapes/generated/strings/Basic_Emoji': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Basic_Emoji-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Basic_Emoji-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Basic_Emoji-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Keycap_Sequence': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Keycap_Sequence-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Keycap_Sequence-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Keycap_Sequence-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Test': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Test-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Test-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/Emoji_Test-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Flag_Sequence': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Flag_Sequence-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Flag_Sequence-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Flag_Sequence-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Modifier_Sequence': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Modifier_Sequence-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Modifier_Sequence-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Modifier_Sequence-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Tag_Sequence': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Tag_Sequence-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Tag_Sequence-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_Tag_Sequence-negative-u': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_ZWJ_Sequence': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_ZWJ_Sequence-negative-CharacterClass': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_ZWJ_Sequence-negative-P': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/strings/RGI_Emoji_ZWJ_Sequence-negative-u': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-difference-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-difference-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-difference-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-difference-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-intersection-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-intersection-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-union-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-union-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-intersection-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-intersection-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-union-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-union-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-difference-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-difference-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-intersection-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-intersection-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-union-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-union-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character-class-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character-class': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character-property-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character-class-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character-class': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character-property-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character-class-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character-class': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character-property-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-character-class-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-character-class': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-character': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-character-property-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-character-class-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-character-class': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-character': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-character-property-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-string-literal': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-union-character-class-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-union-character-class': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-union-character': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-union-character-property-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-union-property-of-strings-escape': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/string-literal-union-string-literal': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=13173
- 'built-ins/RegExp/named-groups/duplicate-names': [FAIL],
+ 'built-ins/RegExp/duplicate-named-capturing-groups-syntax': [FAIL],
'built-ins/RegExp/named-groups/duplicate-names-group-property-enumeration-order': [FAIL],
'built-ins/RegExp/named-groups/duplicate-names-match-indices': [FAIL],
'built-ins/RegExp/named-groups/duplicate-names-replace': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-exec': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-matchall': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-match': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-replaceall': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-search': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-split': [FAIL],
+ 'built-ins/RegExp/named-groups/duplicate-names-test': [FAIL],
+ 'built-ins/RegExp/prototype/exec/duplicate-named-groups-properties': [FAIL],
+ 'built-ins/RegExp/prototype/exec/duplicate-named-indices-groups-properties': [FAIL],
+ 'built-ins/String/prototype/match/duplicate-named-groups-properties': [FAIL],
+ 'built-ins/String/prototype/match/duplicate-named-indices-groups-properties': [FAIL],
+ 'annexB/built-ins/RegExp/prototype/compile/duplicate-named-capturing-groups-syntax': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=13174
'built-ins/RegExp/prototype/Symbol.match/flags-tostring-error': [FAIL],
@@ -389,6 +300,13 @@
'built-ins/RegExp/prototype/Symbol.replace/get-flags-err': [FAIL],
'built-ins/RegExp/prototype/Symbol.replace/get-unicode-error': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=13321
+ 'built-ins/Array/fromAsync/builtin': [FAIL],
+ 'built-ins/Array/fromAsync/length': [FAIL],
+ 'built-ins/Array/fromAsync/name': [FAIL],
+ 'built-ins/Array/fromAsync/not-a-constructor': [FAIL],
+ 'built-ins/Array/fromAsync/prop-desc': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=11544
'built-ins/Temporal/Duration/prototype/total/balance-negative-result': [FAIL],
'intl402/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL],
@@ -508,89 +426,71 @@
'staging/Intl402/Temporal/old/duration-arithmetic-dst': [FAIL],
'built-ins/Temporal/Calendar/from/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateAdd/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateAdd/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateAdd/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateAdd/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateUntil/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateUntil/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateUntil/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dateUntil/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/day/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/day/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/daysInYear/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/mergeFields/non-string-properties': [FAIL],
'built-ins/Temporal/Calendar/prototype/mergeFields/order-of-operations': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/month/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/month/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthCode/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthCode/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthCode/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthCode/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthCode/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/Calendar/prototype/year/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/Calendar/prototype/year/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/Duration/compare/relativeto-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
- 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/Duration/prototype/add/relativeto-string-datetime': [FAIL],
- 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/Duration/prototype/round/relativeto-string-datetime': [FAIL],
- 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-datetime': [FAIL],
- 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/Duration/prototype/total/relativeto-string-datetime': [FAIL],
'built-ins/Temporal/Instant/compare/argument-string-calendar-annotation': [FAIL],
@@ -623,22 +523,18 @@
'built-ins/Temporal/Now/zonedDateTimeISO/timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/Now/zonedDateTime/timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/PlainDate/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/PlainDate/compare/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDate/compare/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDate/compare/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/compare/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDate/compare/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDate/from/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDate/from/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/equals/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/equals/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/since/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/since/argument-string-time-zone-annotation': [FAIL],
@@ -646,55 +542,42 @@
'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/toPlainMonthDay/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/toPlainYearMonth/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/calendarname-critical': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toString/calendar-tostring': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-instance-does-not-get-timeZone-property': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/until/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/until/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/PlainDate/prototype/withCalendar/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/PlainDate/prototype/with/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/PlainDateTime/compare/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/compare/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDateTime/compare/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/compare/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/compare/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDateTime/from/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/from/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDateTime/from/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/from/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/from/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/equals/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/equals/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/equals/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/equals/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/since/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/since/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/since/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/since/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/since/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/toPlainMonthDay/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/toPlainYearMonth/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/calendarname-critical': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toString/calendar-tostring': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-instance-does-not-get-timeZone-property': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/until/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/until/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/until/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/until/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/until/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withCalendar/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/with/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string-time-zone-annotation': [FAIL],
@@ -703,20 +586,16 @@
'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/PlainMonthDay/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/PlainMonthDay/from/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainMonthDay/from/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainMonthDay/prototype/toPlainDate/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toString/calendarname-critical': [FAIL],
'built-ins/Temporal/PlainMonthDay/prototype/toString/calendar-tostring': [FAIL],
- 'built-ins/Temporal/PlainMonthDay/prototype/with/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainTime/compare/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainTime/compare/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainTime/compare/argument-string-unknown-annotation': [FAIL],
@@ -729,12 +608,10 @@
'built-ins/Temporal/PlainTime/prototype/since/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainTime/prototype/since/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainTime/prototype/since/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-string-time-zone-annotation': [FAIL],
@@ -744,18 +621,14 @@
'built-ins/Temporal/PlainTime/prototype/until/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainTime/prototype/until/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/compare/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/compare/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/from/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/from/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/add/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-string-time-zone-annotation': [FAIL],
@@ -764,19 +637,13 @@
'built-ins/Temporal/PlainYearMonth/prototype/since/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/since/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/since/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/subtract/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toString/calendarname-critical': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/toString/calendar-tostring': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/PlainYearMonth/prototype/until/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/until/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/PlainYearMonth/prototype/with/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/TimeZone/from/timezone-instance-does-not-get-timeZone-property': [FAIL],
- 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-string-time-zone-annotation': [FAIL],
@@ -794,7 +661,6 @@
'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-string-time-zone-annotation': [FAIL],
@@ -803,45 +669,36 @@
'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/compare/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/compare/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/compare/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/compare/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/from/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/from/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/from/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/from/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/from/offset-overrides-critical-flag': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/since/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/argument-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/since/argument-string-unknown-annotation': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainMonthDay/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/toPlainYearMonth/calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/calendarname-critical': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/calendar-tostring': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/toString/timezonename-critical': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/until/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/argument-propertybag-timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/argument-string-time-zone-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/until/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withCalendar/calendar-instance-does-not-get-calendar-property': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/with/calendar-fields-undefined': [FAIL],
- 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-calendar-fields-undefined': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-string-calendar-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-string-time-zone-annotation': [FAIL],
@@ -851,98 +708,294 @@
'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-unknown-annotation': [FAIL],
'built-ins/Temporal/ZonedDateTime/prototype/withTimeZone/timezone-instance-does-not-get-timeZone-property': [FAIL],
'built-ins/Temporal/ZonedDateTime/timezone-instance-does-not-get-timeZone-property': [FAIL],
- 'intl402/Temporal/Calendar/prototype/era/argument-calendar-fields-undefined': [FAIL],
'intl402/Temporal/Calendar/prototype/era/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'intl402/Temporal/Calendar/prototype/era/argument-string-calendar-annotation': [FAIL],
'intl402/Temporal/Calendar/prototype/era/argument-string-time-zone-annotation': [FAIL],
'intl402/Temporal/Calendar/prototype/era/argument-string-unknown-annotation': [FAIL],
- 'intl402/Temporal/Calendar/prototype/eraYear/argument-calendar-fields-undefined': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/argument-string-calendar-annotation': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/argument-string-time-zone-annotation': [FAIL],
'intl402/Temporal/Calendar/prototype/eraYear/argument-string-unknown-annotation': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=13342
- 'built-ins/RegExp/property-escapes/generated/Alphabetic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Assigned': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Cased': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Case_Ignorable': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Changes_When_NFKC_Casefolded': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Diacritic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Emoji': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Emoji_Modifier_Base': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Emoji_Presentation': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Cased_Letter': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Decimal_Number': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Format': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Letter': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Lowercase_Letter': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Mark': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Modifier_Letter': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Nonspacing_Mark': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Number': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other_Letter': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other_Number': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other_Punctuation': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other_Symbol': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Punctuation': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Spacing_Mark': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Symbol': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Unassigned': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Grapheme_Base': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Grapheme_Extend': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/ID_Continue': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Ideographic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/ID_Start': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Lowercase': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Arabic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Common': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Cyrillic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Devanagari': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Egyptian_Hieroglyphs': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Arabic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Common': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Cyrillic': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Devanagari': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Egyptian_Hieroglyphs': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Han': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Hiragana': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Kannada': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Katakana': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Kawi': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Khojki': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Lao': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Latin': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Nag_Mundari': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Han': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Hiragana': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Kannada': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Katakana': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Kawi': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Khojki': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Lao': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Latin': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Nag_Mundari': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Sentence_Terminal': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Soft_Dotted': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Terminal_Punctuation': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/Unified_Ideograph': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/XID_Continue': [FAIL],
- 'built-ins/RegExp/property-escapes/generated/XID_Start': [FAIL],
- 'language/identifiers/part-unicode-15.0.0-class-escaped': [FAIL],
- 'language/identifiers/part-unicode-15.0.0-class': [FAIL],
- 'language/identifiers/part-unicode-15.0.0-escaped': [FAIL],
- 'language/identifiers/part-unicode-15.0.0': [FAIL],
- 'language/identifiers/start-unicode-15.0.0-class-escaped': [FAIL],
- 'language/identifiers/start-unicode-15.0.0-class': [FAIL],
- 'language/identifiers/start-unicode-15.0.0-escaped': [FAIL],
- 'language/identifiers/start-unicode-15.0.0': [FAIL],
+ 'built-ins/Temporal/Calendar/from/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dateUntil/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/day/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfWeek/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/dayOfYear/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInMonth/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInWeek/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/daysInYear/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/id/custom-calendar': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/inLeapYear/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/month/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthCode/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/monthsInYear/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/weekOfYear/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/year/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-calendar-datefromfields-called-with-null-prototype-fields': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-leap-second': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-number': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-instance-does-not-get-calendar-property': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-leap-second': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-number': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-year-zero': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-calendar-annotation': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-critical-unknown-annotation': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-invalid': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-multiple-time-zone': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-time-separators': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-time-zone-annotation': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-unknown-annotation': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-string-with-utc-designator': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-wrong-type': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-zoneddatetime-convert': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-zoneddatetime-slots': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-zoneddatetime-timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/basic': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/branding': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/builtin': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/calendar-datefromfields-called-with-options-undefined': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/calendar-fields-iterable': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/cross-year': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/infinity-throws-rangeerror': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/length': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/name': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/prop-desc': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/year-zero': [FAIL],
+ 'built-ins/Temporal/Duration/compare/relativeto-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/toString/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/compare/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Instant/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/since/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toString/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTimeISO/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/until/order-of-operations': [FAIL],
+ 'built-ins/Temporal/Now/plainDate/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainDateISO/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTimeISO/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainDate/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/plainTimeISO/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTime/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTimeISO/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTime/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/compare/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/from/calendar-fields-custom': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/withCalendar/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/yearOfWeek/basic': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/yearOfWeek/branding': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/yearOfWeek/custom': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/yearOfWeek/prop-desc': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/yearOfWeek/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/compare/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toString/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withCalendar/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainDate/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/yearOfWeek/basic': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/yearOfWeek/branding': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/yearOfWeek/custom': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/yearOfWeek/prop-desc': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/yearOfWeek/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/compare/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/from/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/since/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toPlainDateTime/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toString/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/until/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/argument-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/compare/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/toPlainDate/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/from/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getInstantFor/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getNextTransition/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPossibleInstantsFor/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/id/custom-timezone': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/compare/order-of-operations': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/from/order-of-operations': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/equals/order-of-operations': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/argument-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/fractionalseconddigits-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/toString/order-of-operations': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/argument-propertybag-timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withCalendar/calendar-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/copies-merge-fields-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainDate/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-date-with-utc-offset': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-time-designator-required-for-disambiguation': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withTimeZone/timezone-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/branding': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/custom': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/prop-desc': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/timezone-getoffsetnanosecondsfor-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/timezone-getoffsetnanosecondsfor-not-callable': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/timezone-getoffsetnanosecondsfor-out-of-range': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/timezone-getoffsetnanosecondsfor-wrong-type': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/yearOfWeek/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/era/argument-string-date-with-utc-offset': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/eraYear/argument-string-date-with-utc-offset': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/yearOfWeek/infinity-throws-rangeerror': [FAIL],
+
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/not-a-constructor': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/add/relativeto-zoneddatetime-nanoseconds-to-days-range-errors': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/relativeto-zoneddatetime-nanoseconds-to-days-range-errors': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-zoneddatetime-nanoseconds-to-days-range-errors': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/nanoseconds-to-days-range-errors': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/nanoseconds-to-days-range-errors': [FAIL],
+
+ 'built-ins/Temporal/Calendar/from/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-case-insensitive': [FAIL],
+ 'built-ins/Temporal/Duration/prototype/round/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/Instant/prototype/toZonedDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Now/plainDate/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Now/plainDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/Now/zonedDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/argument-convert': [FAIL],
+ 'built-ins/Temporal/PlainDate/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/day/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/monthCode/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/month/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/withCalendar/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainDate/prototype/year/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/day/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/monthCode/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/month/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/withCalendar/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainDateTime/prototype/year/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/day/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/monthCode/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainMonthDay/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainTime/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/monthCode/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/month/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-out-of-range': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/with/order-of-operations': [FAIL],
+ 'built-ins/Temporal/PlainYearMonth/prototype/year/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/day/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/month/validate-calendar-value': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-non-integer': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/withCalendar/calendar-temporal-object': [FAIL],
+ 'built-ins/Temporal/ZonedDateTime/prototype/year/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/era/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/eraYear/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/era/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/PlainDateTime/prototype/eraYear/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/era/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/PlainYearMonth/prototype/eraYear/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/era/validate-calendar-value': [FAIL],
+ 'intl402/Temporal/ZonedDateTime/prototype/eraYear/validate-calendar-value': [FAIL],
+
+ 'built-ins/Temporal/Calendar/prototype/mergeFields/arguments-not-object': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/mergeFields/gregorian-mutually-exclusive-fields': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/mergeFields/japanese-mutually-exclusive-fields': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/monthDayFromFields/reference-year-1972': [FAIL],
+ 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/reference-day': [FAIL],
+ 'intl402/Temporal/PlainDate/prototype/with/cross-era-boundary': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12763
+ 'language/statements/class/decorator/syntax/valid/class-element-decorator-call-expr-identifier-reference': [FAIL],
+ 'language/statements/class/decorator/syntax/valid/class-element-decorator-member-expr-decorator-member-expr': [FAIL],
+ 'language/statements/class/decorator/syntax/valid/class-element-decorator-member-expr-identifier-reference': [FAIL],
+ 'language/statements/class/decorator/syntax/valid/class-element-decorator-parenthesized-expr-identifier-reference': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11660
# https://github.com/tc39/proposal-intl-duration-format/issues/114
'intl402/DurationFormat/prototype/format/style-options-en': [FAIL],
+ 'intl402/DurationFormat/prototype/formatToParts/formatToParts-styles-en': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=12763
'language/expressions/class/decorator/syntax/class-valid/decorator-member-expr-private-identifier': [FAIL],
'language/expressions/class/decorator/syntax/valid/decorator-call-expr-identifier-reference': [FAIL],
@@ -987,6 +1040,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=12681
'built-ins/Array/prototype/push/set-length-zero-array-length-is-non-writable': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=13791
+ 'intl402/NumberFormat/constructor-roundingIncrement': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
@@ -1002,6 +1058,9 @@
# https://github.com/tc39/ecma262/pull/889
'annexB/language/function-code/block-decl-func-skip-arguments': [FAIL],
+ # https://tc39.es/ecma262/#sec-web-compat-blockdeclarationinstantiation
+ 'annexB/language/function-code/block-decl-nested-blocks-with-fun-decl': [FAIL],
+
# Non-simple assignment targets are runtime errors instead of syntax errors
# for web compat. https://crbug.com/358346
'language/expressions/assignmenttargettype/direct-callexpression-arguments': [FAIL],
@@ -1097,29 +1156,68 @@
'built-ins/RegExp/property-escapes/*': [SKIP],
'built-ins/RegExp/named-groups/unicode-property-names': [SKIP],
'built-ins/RegExp/named-groups/unicode-property-names-valid': [SKIP],
- 'built-ins/RegExp/named-groups/non-unicode-property-names-valid': [FAIL],
+ 'built-ins/RegExp/named-groups/non-unicode-property-names-valid': [SKIP],
'built-ins/RegExp/match-indices/indices-array-unicode-property-names': [SKIP],
- 'built-ins/RegExp/unicodeSets/generated/character-class-difference-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-difference-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-union-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-class-escape-intersection-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-class-intersection-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-class-union-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-difference-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-intersection-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character-class-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character-class-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character-class': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character-class': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character-class-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-union-character-property-escape': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character-class': [PASS,FAIL],
- 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character-property-escape': [PASS,FAIL],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-difference-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-difference-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-escape-difference-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-escape-difference-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-escape-intersection-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-escape-intersection-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-escape-union-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-escape-union-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-intersection-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-intersection-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-union-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-class-union-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-difference-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-difference-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-intersection-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-intersection-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character-class-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character-class': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-character': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-difference-string-literal': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character-class-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character-class': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-character': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-intersection-string-literal': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character-class-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character-class': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-character': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-property-escape-union-string-literal': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-union-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/character-union-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character-class-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character-class': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-difference-string-literal': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character-class-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character-class': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-intersection-string-literal': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character-class-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character-class': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/property-of-strings-escape-union-string-literal': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/string-literal-difference-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/string-literal-intersection-property-of-strings-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/string-literal-union-character-property-escape': [SKIP],
+ 'built-ins/RegExp/unicodeSets/generated/string-literal-union-property-of-strings-escape': [SKIP],
# Unicode in identifiers.
'language/identifiers/part-unicode-*': [FAIL],
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 7c607cec0d..16eda7405f 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -48,7 +48,7 @@ FEATURE_FLAGS = {
'host-gc-required': '--expose-gc-as=v8GC',
'IsHTMLDDA': '--allow-natives-syntax',
'import-assertions': '--harmony-import-assertions',
- 'resizable-arraybuffer': '--harmony-rab-gsab',
+ 'resizable-arraybuffer': '--harmony-rab-gsab-transfer',
'Temporal': '--harmony-temporal',
'array-find-from-last': '--harmony-array-find-last',
'ShadowRealm': '--harmony-shadow-realm',
@@ -56,6 +56,9 @@ FEATURE_FLAGS = {
'array-grouping': '--harmony-array-grouping',
'change-array-by-copy': '--harmony-change-array-by-copy',
'symbols-as-weakmap-keys': '--harmony-symbol-as-weakmap-key',
+ 'String.prototype.isWellFormed': '--harmony-string-is-well-formed',
+ 'String.prototype.toWellFormed': '--harmony-string-is-well-formed',
+ 'arraybuffer-transfer': '--harmony-rab-gsab-transfer',
}
SKIPPED_FEATURES = set([])
@@ -205,6 +208,8 @@ class TestCase(testcase.D8TestCase):
if "IsHTMLDDA" in self.test_record.get("features", []) else []) +
([os.path.join(self.suite.root, "harness-adapt-donotevaluate.js")]
if self.fail_phase_only and not self._fail_phase_reverse else []) +
+ ([os.path.join(self.suite.root, "harness-done.js")]
+ if "async" in self.test_record.get("flags", []) else []) +
self._get_includes() +
(["--module"] if "module" in self.test_record else []) +
[self._get_source_path()])
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index ff952d03b2..a006614021 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -4,6 +4,17 @@
import("../../gni/v8.gni")
+if (v8_enable_webassembly) {
+ # Specifies if the target build is a simulator build. Comparing target cpu
+ # with v8 target cpu to not affect simulator builds for making cross-compile
+ # snapshots.
+ target_is_simulator = (target_cpu != v8_target_cpu && !v8_multi_arch_build) ||
+ (current_cpu != v8_current_cpu && v8_multi_arch_build)
+ if (!target_is_simulator && v8_current_cpu == "x64") {
+ v8_enable_wasm_simd256_revec = true
+ }
+}
+
if (is_fuchsia) {
import("//build/config/fuchsia/generate_runner_scripts.gni")
import("//third_party/fuchsia-sdk/sdk/build/component.gni")
@@ -11,7 +22,7 @@ if (is_fuchsia) {
fuchsia_component("v8_unittests_component") {
testonly = true
- data_deps = [ ":unittests" ]
+ data_deps = [ ":v8_unittests" ]
manifest = "v8_unittests.cml"
}
@@ -185,7 +196,7 @@ v8_source_set("cppgc_unittests_sources") {
}
}
-v8_executable("unittests") {
+v8_executable("v8_unittests") {
testonly = true
if (current_os == "aix") {
@@ -194,7 +205,7 @@ v8_executable("unittests") {
deps = [
":inspector_unittests_sources",
- ":unittests_sources",
+ ":v8_unittests_sources",
":v8_heap_base_unittests_sources",
"../..:v8_for_testing",
"../..:v8_libbase",
@@ -223,7 +234,7 @@ v8_executable("unittests") {
]
}
-v8_source_set("unittests_sources") {
+v8_source_set("v8_unittests_sources") {
testonly = true
sources = [
@@ -288,90 +299,11 @@ v8_source_set("unittests_sources") {
"codegen/aligned-slot-allocator-unittest.cc",
"codegen/code-layout-unittest.cc",
"codegen/code-pages-unittest.cc",
- "codegen/code-stub-assembler-unittest.cc",
- "codegen/code-stub-assembler-unittest.h",
"codegen/factory-unittest.cc",
"codegen/register-configuration-unittest.cc",
"codegen/source-position-table-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-unittest.cc",
"compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc",
- "compiler/backend/instruction-selector-unittest.cc",
- "compiler/backend/instruction-selector-unittest.h",
- "compiler/backend/instruction-sequence-unittest.cc",
- "compiler/backend/instruction-sequence-unittest.h",
- "compiler/backend/instruction-unittest.cc",
- "compiler/branch-elimination-unittest.cc",
- "compiler/bytecode-analysis-unittest.cc",
- "compiler/checkpoint-elimination-unittest.cc",
- "compiler/codegen-tester.cc",
- "compiler/codegen-tester.h",
- "compiler/codegen-unittest.cc",
- "compiler/common-operator-reducer-unittest.cc",
- "compiler/common-operator-unittest.cc",
- "compiler/compiler-test-utils.h",
- "compiler/compiler-unittest.cc",
- "compiler/constant-folding-reducer-unittest.cc",
- "compiler/control-equivalence-unittest.cc",
- "compiler/control-flow-optimizer-unittest.cc",
- "compiler/csa-load-elimination-unittest.cc",
- "compiler/dead-code-elimination-unittest.cc",
- "compiler/decompression-optimizer-unittest.cc",
- "compiler/diamond-unittest.cc",
- "compiler/effect-control-linearizer-unittest.cc",
- "compiler/frame-unittest.cc",
- "compiler/function-tester.cc",
- "compiler/function-tester.h",
- "compiler/graph-reducer-unittest.cc",
- "compiler/graph-reducer-unittest.h",
- "compiler/graph-trimmer-unittest.cc",
- "compiler/graph-unittest.cc",
- "compiler/graph-unittest.h",
- "compiler/js-call-reducer-unittest.cc",
- "compiler/js-create-lowering-unittest.cc",
- "compiler/js-intrinsic-lowering-unittest.cc",
- "compiler/js-native-context-specialization-unittest.cc",
- "compiler/js-operator-unittest.cc",
- "compiler/js-typed-lowering-unittest.cc",
- "compiler/linkage-tail-call-unittest.cc",
- "compiler/load-elimination-unittest.cc",
- "compiler/loop-peeling-unittest.cc",
- "compiler/machine-operator-reducer-unittest.cc",
- "compiler/machine-operator-unittest.cc",
- "compiler/node-cache-unittest.cc",
- "compiler/node-matchers-unittest.cc",
- "compiler/node-properties-unittest.cc",
- "compiler/node-test-utils.cc",
- "compiler/node-test-utils.h",
- "compiler/node-unittest.cc",
- "compiler/opcodes-unittest.cc",
- "compiler/persistent-unittest.cc",
- "compiler/redundancy-elimination-unittest.cc",
- "compiler/regalloc/live-range-unittest.cc",
- "compiler/regalloc/mid-tier-register-allocator-unittest.cc",
- "compiler/regalloc/move-optimizer-unittest.cc",
- "compiler/regalloc/register-allocator-unittest.cc",
- "compiler/run-bytecode-graph-builder-unittest.cc",
- "compiler/run-deopt-unittest.cc",
- "compiler/run-jsbranches-unittest.cc",
- "compiler/run-jscalls-unittest.cc",
- "compiler/run-jsexceptions-unittest.cc",
- "compiler/run-jsobjects-unittest.cc",
- "compiler/run-jsops-unittest.cc",
- "compiler/run-tail-calls-unittest.cc",
- "compiler/schedule-unittest.cc",
- "compiler/scheduler-rpo-unittest.cc",
- "compiler/scheduler-unittest.cc",
- "compiler/simplified-lowering-unittest.cc",
- "compiler/simplified-operator-reducer-unittest.cc",
- "compiler/simplified-operator-unittest.cc",
- "compiler/sloppy-equality-unittest.cc",
- "compiler/state-values-utils-unittest.cc",
- "compiler/turboshaft/snapshot-table-unittest.cc",
- "compiler/typed-optimization-unittest.cc",
- "compiler/typer-unittest.cc",
- "compiler/types-unittest.cc",
- "compiler/value-numbering-reducer-unittest.cc",
- "compiler/zone-stats-unittest.cc",
"date/date-cache-unittest.cc",
"date/date-unittest.cc",
"debug/debug-property-iterator-unittest.cc",
@@ -393,12 +325,13 @@ v8_source_set("unittests_sources") {
"heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",
"heap/code-object-registry-unittest.cc",
+ "heap/cppgc-js/embedder-roots-handler-unittest.cc",
"heap/cppgc-js/traced-reference-unittest.cc",
"heap/cppgc-js/unified-heap-snapshot-unittest.cc",
"heap/cppgc-js/unified-heap-unittest.cc",
"heap/cppgc-js/unified-heap-utils.cc",
"heap/cppgc-js/unified-heap-utils.h",
- "heap/embedder-tracing-unittest.cc",
+ "heap/cppgc-js/young-unified-heap-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
"heap/gc-tracer-unittest.cc",
"heap/global-handles-unittest.cc",
@@ -441,8 +374,6 @@ v8_source_set("unittests_sources") {
"interpreter/bytecode-utils.h",
"interpreter/bytecodes-unittest.cc",
"interpreter/constant-array-builder-unittest.cc",
- "interpreter/interpreter-assembler-unittest.cc",
- "interpreter/interpreter-assembler-unittest.h",
"interpreter/interpreter-intrinsics-unittest.cc",
"interpreter/interpreter-tester.cc",
"interpreter/interpreter-tester.h",
@@ -534,12 +465,100 @@ v8_source_set("unittests_sources") {
"utils/sparse-bit-vector-unittest.cc",
"utils/utils-unittest.cc",
"utils/version-unittest.cc",
- "web-snapshot/web-snapshot-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
+ "zone/zone-vector-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [
+ "codegen/code-stub-assembler-unittest.cc",
+ "codegen/code-stub-assembler-unittest.h",
+ "compiler/backend/instruction-selector-unittest.cc",
+ "compiler/backend/instruction-selector-unittest.h",
+ "compiler/backend/instruction-sequence-unittest.cc",
+ "compiler/backend/instruction-sequence-unittest.h",
+ "compiler/backend/instruction-unittest.cc",
+ "compiler/branch-elimination-unittest.cc",
+ "compiler/bytecode-analysis-unittest.cc",
+ "compiler/checkpoint-elimination-unittest.cc",
+ "compiler/codegen-tester.cc",
+ "compiler/codegen-tester.h",
+ "compiler/codegen-unittest.cc",
+ "compiler/common-operator-reducer-unittest.cc",
+ "compiler/common-operator-unittest.cc",
+ "compiler/compiler-test-utils.h",
+ "compiler/compiler-unittest.cc",
+ "compiler/constant-folding-reducer-unittest.cc",
+ "compiler/control-equivalence-unittest.cc",
+ "compiler/control-flow-optimizer-unittest.cc",
+ "compiler/csa-load-elimination-unittest.cc",
+ "compiler/dead-code-elimination-unittest.cc",
+ "compiler/decompression-optimizer-unittest.cc",
+ "compiler/diamond-unittest.cc",
+ "compiler/effect-control-linearizer-unittest.cc",
+ "compiler/frame-unittest.cc",
+ "compiler/function-tester.cc",
+ "compiler/function-tester.h",
+ "compiler/graph-reducer-unittest.cc",
+ "compiler/graph-reducer-unittest.h",
+ "compiler/graph-trimmer-unittest.cc",
+ "compiler/graph-unittest.cc",
+ "compiler/graph-unittest.h",
+ "compiler/js-call-reducer-unittest.cc",
+ "compiler/js-create-lowering-unittest.cc",
+ "compiler/js-intrinsic-lowering-unittest.cc",
+ "compiler/js-native-context-specialization-unittest.cc",
+ "compiler/js-operator-unittest.cc",
+ "compiler/js-typed-lowering-unittest.cc",
+ "compiler/linkage-tail-call-unittest.cc",
+ "compiler/load-elimination-unittest.cc",
+ "compiler/loop-peeling-unittest.cc",
+ "compiler/machine-operator-reducer-unittest.cc",
+ "compiler/machine-operator-unittest.cc",
+ "compiler/node-cache-unittest.cc",
+ "compiler/node-matchers-unittest.cc",
+ "compiler/node-properties-unittest.cc",
+ "compiler/node-test-utils.cc",
+ "compiler/node-test-utils.h",
+ "compiler/node-unittest.cc",
+ "compiler/opcodes-unittest.cc",
+ "compiler/persistent-unittest.cc",
+ "compiler/redundancy-elimination-unittest.cc",
+ "compiler/regalloc/live-range-unittest.cc",
+ "compiler/regalloc/mid-tier-register-allocator-unittest.cc",
+ "compiler/regalloc/move-optimizer-unittest.cc",
+ "compiler/regalloc/register-allocator-unittest.cc",
+ "compiler/run-bytecode-graph-builder-unittest.cc",
+ "compiler/run-deopt-unittest.cc",
+ "compiler/run-jsbranches-unittest.cc",
+ "compiler/run-jscalls-unittest.cc",
+ "compiler/run-jsexceptions-unittest.cc",
+ "compiler/run-jsobjects-unittest.cc",
+ "compiler/run-jsops-unittest.cc",
+ "compiler/run-tail-calls-unittest.cc",
+ "compiler/schedule-unittest.cc",
+ "compiler/scheduler-rpo-unittest.cc",
+ "compiler/scheduler-unittest.cc",
+ "compiler/simplified-lowering-unittest.cc",
+ "compiler/simplified-operator-reducer-unittest.cc",
+ "compiler/simplified-operator-unittest.cc",
+ "compiler/sloppy-equality-unittest.cc",
+ "compiler/state-values-utils-unittest.cc",
+ "compiler/turboshaft/snapshot-table-unittest.cc",
+ "compiler/turboshaft/turboshaft-typer-unittest.cc",
+ "compiler/turboshaft/turboshaft-types-unittest.cc",
+ "compiler/typed-optimization-unittest.cc",
+ "compiler/typer-unittest.cc",
+ "compiler/types-unittest.cc",
+ "compiler/value-numbering-reducer-unittest.cc",
+ "compiler/zone-stats-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.h",
+ ]
+ }
+
if (v8_enable_runtime_call_stats) {
sources += [ "logging/runtime-call-stats-unittest.cc" ]
}
@@ -564,15 +583,21 @@ v8_source_set("unittests_sources") {
"wasm/simd-shuffle-unittest.cc",
"wasm/streaming-decoder-unittest.cc",
"wasm/string-builder-unittest.cc",
+ "wasm/struct-types-unittest.cc",
"wasm/subtyping-unittest.cc",
"wasm/wasm-code-manager-unittest.cc",
"wasm/wasm-compiler-unittest.cc",
+ "wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc",
+ "wasm/wasm-disassembler-unittest-gc.wasm.inc",
+ "wasm/wasm-disassembler-unittest-gc.wat.inc",
"wasm/wasm-disassembler-unittest-mvp.wasm.inc",
"wasm/wasm-disassembler-unittest-mvp.wat.inc",
"wasm/wasm-disassembler-unittest-names.wasm.inc",
"wasm/wasm-disassembler-unittest-names.wat.inc",
"wasm/wasm-disassembler-unittest-simd.wasm.inc",
"wasm/wasm-disassembler-unittest-simd.wat.inc",
+ "wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc",
+ "wasm/wasm-disassembler-unittest-too-many-ends.wat.inc",
"wasm/wasm-disassembler-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
@@ -581,28 +606,24 @@ v8_source_set("unittests_sources") {
}
if (v8_enable_wasm_simd256_revec) {
- sources += [ "compiler/linear-scheduler-unittest.cc" ]
+ sources += [
+ "compiler/linear-scheduler-unittest.cc",
+ "compiler/revec-unittest.cc",
+ ]
}
if (v8_enable_wasm_gdb_remote_debugging) {
sources += [ "wasm/wasm-gdbserver-unittest.cc" ]
}
- if (v8_enable_inner_pointer_resolution_osb) {
- sources += [ "heap/object-start-bitmap-unittest.cc" ]
- }
-
- if (v8_enable_inner_pointer_resolution_mb) {
- sources += [ "heap/marking-inner-pointer-resolution-unittest.cc" ]
- }
-
if (v8_enable_conservative_stack_scanning) {
sources += [ "heap/conservative-stack-visitor-unittest.cc" ]
+ sources += [ "heap/marking-inner-pointer-resolution-unittest.cc" ]
}
if (v8_enable_i18n_support) {
defines = [ "V8_INTL_SUPPORT" ]
- public_deps = [ "//third_party/icu" ]
+ public_deps = [ v8_icu_path ]
} else {
sources -= [ "objects/intl-unittest.cc" ]
}
@@ -610,70 +631,91 @@ v8_source_set("unittests_sources") {
if (v8_current_cpu == "arm") {
sources += [
"assembler/disasm-arm-unittest.cc",
- "assembler/turbo-assembler-arm-unittest.cc",
- "compiler/arm/instruction-selector-arm-unittest.cc",
+ "assembler/macro-assembler-arm-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
+ }
} else if (v8_current_cpu == "arm64") {
sources += [
"assembler/disasm-arm64-unittest.cc",
"assembler/macro-assembler-arm64-unittest.cc",
- "assembler/turbo-assembler-arm64-unittest.cc",
"codegen/pointer-auth-arm64-unittest.cc",
- "compiler/arm64/instruction-selector-arm64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/arm64/instruction-selector-arm64-unittest.cc" ]
+ }
+ if (v8_enable_webassembly && current_cpu == "arm64") {
+ sources += [ "wasm/trap-handler-x64-arm64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "x86") {
sources += [
"assembler/disasm-ia32-unittest.cc",
- "assembler/turbo-assembler-ia32-unittest.cc",
- "compiler/ia32/instruction-selector-ia32-unittest.cc",
+ "assembler/macro-assembler-ia32-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ]
+ }
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
"assembler/disasm-mips64-unittest.cc",
- "assembler/turbo-assembler-mips64-unittest.cc",
- "compiler/mips64/instruction-selector-mips64-unittest.cc",
+ "assembler/macro-assembler-mips64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "riscv64") {
sources += [
"assembler/disasm-riscv-unittest.cc",
- "assembler/turbo-assembler-riscv-unittest.cc",
- "compiler/riscv64/instruction-selector-riscv64-unittest.cc",
+ "assembler/macro-assembler-riscv-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/riscv64/instruction-selector-riscv64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "riscv32") {
sources += [
"assembler/disasm-riscv-unittest.cc",
- "assembler/turbo-assembler-riscv-unittest.cc",
- "compiler/riscv32/instruction-selector-riscv32-unittest.cc",
+ "assembler/macro-assembler-riscv-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/riscv32/instruction-selector-riscv32-unittest.cc" ]
+ }
} else if (v8_current_cpu == "x64") {
sources += [
"assembler/assembler-x64-unittest.cc",
"assembler/disasm-x64-unittest.cc",
"assembler/macro-assembler-x64-unittest.cc",
- "assembler/turbo-assembler-x64-unittest.cc",
- "compiler/x64/instruction-selector-x64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ]
+ }
if (v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-x64-unittest.cc" ]
+ sources += [ "wasm/trap-handler-x64-arm64-unittest.cc" ]
}
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
"assembler/disasm-ppc-unittest.cc",
- "assembler/turbo-assembler-ppc-unittest.cc",
- "compiler/ppc/instruction-selector-ppc-unittest.cc",
+ "assembler/macro-assembler-ppc-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ]
+ }
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
"assembler/disasm-s390-unittest.cc",
- "assembler/turbo-assembler-s390-unittest.cc",
- "compiler/s390/instruction-selector-s390-unittest.cc",
+ "assembler/macro-assembler-s390-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ]
+ }
} else if (v8_current_cpu == "loong64") {
sources += [
"assembler/disasm-loong64-unittest.cc",
- "assembler/turbo-assembler-loong64-unittest.cc",
- "compiler/loong64/instruction-selector-loong64-unittest.cc",
+ "assembler/macro-assembler-loong64-unittest.cc",
]
+ if (v8_enable_turbofan) {
+ sources += [ "compiler/loong64/instruction-selector-loong64-unittest.cc" ]
+ }
}
if (v8_enable_webassembly) {
diff --git a/deps/v8/test/unittests/api/api-wasm-unittest.cc b/deps/v8/test/unittests/api/api-wasm-unittest.cc
index 85174ced7f..ab1f22b8e3 100644
--- a/deps/v8/test/unittests/api/api-wasm-unittest.cc
+++ b/deps/v8/test/unittests/api/api-wasm-unittest.cc
@@ -13,6 +13,8 @@
#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/handles/global-handles.h"
+#include "src/wasm/wasm-features.h"
+#include "test/common/flag-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -156,4 +158,52 @@ TEST_F(ApiWasmTest, WasmStreamingSetCallback) {
Promise::kPending);
}
+TEST_F(ApiWasmTest, WasmEnableDisableGC) {
+ Local<Context> context_local = Context::New(isolate());
+ Context::Scope context_scope(context_local);
+ i::Handle<i::Context> context = v8::Utils::OpenHandle(*context_local);
+ // When using the flags, stringref and GC are controlled independently.
+ {
+ i::FlagScope<bool> flag_gc(&i::v8_flags.experimental_wasm_gc, false);
+ i::FlagScope<bool> flag_stringref(&i::v8_flags.experimental_wasm_stringref,
+ true);
+ EXPECT_FALSE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_TRUE(i_isolate()->IsWasmStringRefEnabled(context));
+ }
+ {
+ i::FlagScope<bool> flag_gc(&i::v8_flags.experimental_wasm_gc, true);
+ i::FlagScope<bool> flag_stringref(&i::v8_flags.experimental_wasm_stringref,
+ false);
+ EXPECT_TRUE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_FALSE(i_isolate()->IsWasmStringRefEnabled(context));
+ }
+ // When providing a callback, the callback will control GC, stringref,
+ // and inlining.
+ isolate()->SetWasmGCEnabledCallback([](auto) { return true; });
+ EXPECT_TRUE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_TRUE(i_isolate()->IsWasmStringRefEnabled(context));
+ EXPECT_TRUE(i_isolate()->IsWasmInliningEnabled(context));
+ {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate());
+ EXPECT_TRUE(enabled_features.has_gc());
+ EXPECT_TRUE(enabled_features.has_stringref());
+ EXPECT_TRUE(enabled_features.has_typed_funcref());
+ EXPECT_TRUE(enabled_features.has_inlining());
+ }
+ isolate()->SetWasmGCEnabledCallback([](auto) { return false; });
+ EXPECT_FALSE(i_isolate()->IsWasmGCEnabled(context));
+ EXPECT_FALSE(i_isolate()->IsWasmStringRefEnabled(context));
+ // TODO(crbug.com/1424350): Change (or just drop) this expectation when
+ // we enable inlining by default.
+ EXPECT_FALSE(i_isolate()->IsWasmInliningEnabled(context));
+ {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate());
+ EXPECT_FALSE(enabled_features.has_gc());
+ EXPECT_FALSE(enabled_features.has_stringref());
+ EXPECT_FALSE(enabled_features.has_typed_funcref());
+ EXPECT_FALSE(enabled_features.has_inlining());
+ }
+ isolate()->SetWasmGCEnabledCallback(nullptr);
+}
+
} // namespace v8
diff --git a/deps/v8/test/unittests/api/deserialize-unittest.cc b/deps/v8/test/unittests/api/deserialize-unittest.cc
index f1d5299cbf..ab4d41f147 100644
--- a/deps/v8/test/unittests/api/deserialize-unittest.cc
+++ b/deps/v8/test/unittests/api/deserialize-unittest.cc
@@ -359,8 +359,6 @@ class MergeDeserializedCodeTest : public DeserializeTest {
}
}
- i::ScanStackModeScopeForTesting no_stack_scanning(
- i_isolate->heap(), i::Heap::ScanStackMode::kNone);
i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
@@ -411,6 +409,8 @@ class MergeDeserializedCodeTest : public DeserializeTest {
std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
IsolateAndContextScope scope(this);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
ScriptOrigin default_origin(isolate(), NewString(""));
i::Handle<i::WeakFixedArray> original_objects =
@@ -509,8 +509,6 @@ class MergeDeserializedCodeTest : public DeserializeTest {
// At this point, the original_objects array might still have pointers to
// some old discarded content, such as UncompiledData from flushed
// functions. GC again to clear it all out.
- i::ScanStackModeScopeForTesting no_stack_scanning(
- i_isolate->heap(), i::Heap::ScanStackMode::kNone);
i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
@@ -645,6 +643,9 @@ TEST_F(MergeDeserializedCodeTest, MergeWithNoFollowUpWork) {
std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
IsolateAndContextScope scope(this);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
+
ScriptOrigin default_origin(isolate(), NewString(""));
constexpr char kSourceCode[] = "function f() {}";
@@ -727,6 +728,8 @@ TEST_F(MergeDeserializedCodeTest, MergeThatCompilesLazyFunction) {
std::unique_ptr<v8::ScriptCompiler::CachedData> cached_data;
IsolateAndContextScope scope(this);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
ScriptOrigin default_origin(isolate(), NewString(""));
constexpr char kSourceCode[] =
@@ -819,4 +822,98 @@ TEST_F(MergeDeserializedCodeTest, MergeThatCompilesLazyFunction) {
CHECK(expected->StrictEquals(actual));
}
+TEST_F(MergeDeserializedCodeTest, MergeThatStartsButDoesNotFinish) {
+ i::v8_flags.merge_background_deserialized_script_with_compilation_cache =
+ true;
+ constexpr int kSimultaneousScripts = 10;
+ std::vector<std::unique_ptr<v8::ScriptCompiler::CachedData>> cached_data;
+ IsolateAndContextScope scope(this);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ ScriptOrigin default_origin(isolate(), NewString(""));
+
+ // Compile the script for the first time to produce code cache data.
+ {
+ v8::HandleScope handle_scope(isolate());
+ Local<Script> script =
+ Script::Compile(context(), NewString(kSourceCode), &default_origin)
+ .ToLocalChecked();
+ CHECK(!script->Run(context()).IsEmpty());
+
+ // Create a bunch of copies of the code cache data.
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ cached_data.emplace_back(
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript()));
+ }
+
+ // Age the top-level bytecode so that the Isolate compilation cache will
+ // contain only the Script.
+ i::BytecodeArray bytecode =
+ GetSharedFunctionInfo(script).GetBytecodeArray(i_isolate);
+ for (int j = 0; j < i::v8_flags.bytecode_old_age; ++j) {
+ bytecode.MakeOlder();
+ }
+ }
+
+ i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
+
+ // A second round of GC is necessary in case incremental marking had already
+ // started before the bytecode was aged.
+ i_isolate->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
+
+ // Start several background deserializations.
+ std::vector<std::unique_ptr<DeserializeThread>> deserialize_threads;
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ deserialize_threads.push_back(std::make_unique<DeserializeThread>(
+ ScriptCompiler::StartConsumingCodeCache(
+ isolate(), std::make_unique<ScriptCompiler::CachedData>(
+ cached_data[i]->data, cached_data[i]->length,
+ ScriptCompiler::CachedData::BufferNotOwned))));
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ CHECK(deserialize_threads[i]->Start());
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ deserialize_threads[i]->Join();
+ }
+
+ // Start background merges for all of those simultaneous scripts.
+ std::vector<std::unique_ptr<ScriptCompiler::ConsumeCodeCacheTask>> tasks;
+ std::vector<std::unique_ptr<MergeThread>> merge_threads;
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ tasks.push_back(deserialize_threads[i]->TakeTask());
+ tasks[i]->SourceTextAvailable(isolate(), NewString(kSourceCode),
+ default_origin);
+ CHECK(tasks[i]->ShouldMergeWithExistingScript());
+ merge_threads.push_back(std::make_unique<MergeThread>(tasks[i].get()));
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ CHECK(merge_threads[i]->Start());
+ }
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ merge_threads[i]->Join();
+ }
+
+ // Complete compilation of each script on the main thread. The first one will
+ // actually finish its merge; the others will abandon their in-progress merges
+ // and instead use the result from the first script since it will be in the
+ // Isolate compilation cache.
+ i::Handle<i::SharedFunctionInfo> first_script_sfi;
+ for (int i = 0; i < kSimultaneousScripts; ++i) {
+ ScriptCompiler::Source source(NewString(kSourceCode), default_origin,
+ cached_data[i].release(), tasks[i].release());
+ Local<Script> script =
+ ScriptCompiler::Compile(context(), &source,
+ ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+ if (i == 0) {
+ first_script_sfi = i::handle(GetSharedFunctionInfo(script), i_isolate);
+ } else {
+ CHECK_EQ(*first_script_sfi, GetSharedFunctionInfo(script));
+ }
+ CHECK(!script->Run(context()).IsEmpty());
+ }
+}
+
} // namespace v8
diff --git a/deps/v8/test/unittests/api/exception-unittest.cc b/deps/v8/test/unittests/api/exception-unittest.cc
index 2455e4c78f..957aeb24fa 100644
--- a/deps/v8/test/unittests/api/exception-unittest.cc
+++ b/deps/v8/test/unittests/api/exception-unittest.cc
@@ -54,6 +54,8 @@ class V8_NODISCARD ScopedExposeGc {
};
TEST_F(APIExceptionTest, ExceptionMessageDoesNotKeepContextAlive) {
+ i::DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
ScopedExposeGc expose_gc;
Persistent<Context> weak_context;
{
diff --git a/deps/v8/test/unittests/api/v8-script-unittest.cc b/deps/v8/test/unittests/api/v8-script-unittest.cc
index 98040cf662..79de0c2c67 100644
--- a/deps/v8/test/unittests/api/v8-script-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-script-unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "include/v8-context.h"
#include "include/v8-isolate.h"
#include "include/v8-local-handle.h"
@@ -157,5 +159,173 @@ TEST_F(ScriptTest, GetEmptyStalledTopLevelAwaitMessage) {
{});
}
+TEST_F(ScriptTest, ProduceCompileHints) {
+ const char* url = "http://www.foo.com/foo.js";
+ v8::ScriptOrigin origin(isolate(), NewString(url), 13, 0);
+
+ const char* code = "function lazy1() {} function lazy2() {} lazy1();";
+ v8::ScriptCompiler::Source script_source(NewString(code), origin);
+
+ // Test producing compile hints.
+ {
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(
+ v8_context(), &script_source,
+ v8::ScriptCompiler::CompileOptions::kProduceCompileHints)
+ .ToLocalChecked();
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate());
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ EXPECT_FALSE(result.IsEmpty());
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(1u, compile_hints.size());
+ EXPECT_EQ(14, compile_hints[0]);
+ }
+
+ // The previous data is cleared if we retrieve compile hints again.
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+
+ // Call the other lazy function and retrieve compile hints again.
+ const char* code2 = "lazy2();";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+ EXPECT_FALSE(result2.IsEmpty());
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(1u, compile_hints.size());
+ EXPECT_EQ(34, compile_hints[0]);
+ }
+ }
+
+ // Test that compile hints are not produced unless the relevant compile option
+ // is set.
+ {
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source)
+ .ToLocalChecked();
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate());
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ EXPECT_FALSE(result.IsEmpty());
+ {
+ auto compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(0u, compile_hints.size());
+ }
+ }
+}
+
+namespace {
+bool CompileHintsCallback(int position, void* data) {
+ std::vector<int>* hints = reinterpret_cast<std::vector<int>*>(data);
+ return std::find(hints->begin(), hints->end(), position) != hints->end();
+}
+} // namespace
+
+TEST_F(ScriptTest, LocalCompileHints) {
+ const char* url = "http://www.foo.com/foo.js";
+ v8::ScriptOrigin origin(isolate(), NewString(url), 13, 0);
+ v8::Local<v8::Context> context = v8::Context::New(isolate());
+
+ // Produce compile hints.
+ std::vector<int> compile_hints;
+ {
+ // Run the top level code.
+ const char* code = "function lazy1() {} function lazy2() {}";
+ v8::ScriptCompiler::Source script_source(NewString(code), origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(
+ v8_context(), &script_source,
+ v8::ScriptCompiler::CompileOptions::kProduceCompileHints)
+ .ToLocalChecked();
+
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ EXPECT_FALSE(result.IsEmpty());
+
+ // Run lazy1.
+ const char* code2 = "lazy1();";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+ EXPECT_FALSE(result2.IsEmpty());
+
+ // Retrieve compile hints.
+ compile_hints = script->GetProducedCompileHints();
+ EXPECT_EQ(1u, compile_hints.size());
+ }
+
+ // Consume compile hints. We use the produced compile hints to test that the
+ // positions of the requested compile hints match the positions of the
+ // produced compile hints.
+ {
+ // Artificially change the code so that the isolate cache won't hit.
+ const char* code = "function lazy1() {} function lazy2() {} //";
+ v8::ScriptCompiler::Source script_source(
+ NewString(code), origin, CompileHintsCallback,
+ reinterpret_cast<void*>(&compile_hints));
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(
+ v8_context(), &script_source,
+ v8::ScriptCompiler::CompileOptions::kConsumeCompileHints)
+ .ToLocalChecked();
+ USE(script);
+
+ // Retrieve the function object for lazy1.
+ {
+ const char* code2 = "lazy1";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+
+ auto function = i::Handle<i::JSFunction>::cast(
+ Utils::OpenHandle(*result2.ToLocalChecked()));
+ i::Builtin builtin = function->code().builtin_id();
+
+ // lazy1 was not compiled lazily (there was a compile hint for it).
+ EXPECT_NE(i::Builtin::kCompileLazy, builtin);
+ }
+
+ // Retrieve the function object for lazy2.
+ {
+ const char* code2 = "lazy2";
+ v8::ScriptCompiler::Source script_source2(NewString(code2), origin);
+
+ Local<Script> script2 =
+ v8::ScriptCompiler::Compile(v8_context(), &script_source2)
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result2 = script2->Run(context);
+
+ auto function = i::Handle<i::JSFunction>::cast(
+ Utils::OpenHandle(*result2.ToLocalChecked()));
+
+ i::Builtin builtin = function->code().builtin_id();
+
+ // lazy2 was compiled lazily (there was no compile hint for it).
+ EXPECT_EQ(i::Builtin::kCompileLazy, builtin);
+ }
+ }
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc
index a000cac6ec..5a3fd41549 100644
--- a/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/assembler-x64-unittest.cc
@@ -717,7 +717,7 @@ TEST_F(AssemblerX64Test, AssemblerMultiByteNop) {
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
int res = f.Call();
CHECK_EQ(42, res);
}
@@ -774,7 +774,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Code> code =
Factory::CodeBuilder(i_isolate, desc, CodeKind::FOR_TESTING).Build();
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(i_isolate, *code);
int res = f.Call();
args.GetReturnValue().Set(v8::Integer::New(isolate, res));
}
@@ -840,7 +840,7 @@ TEST_F(AssemblerX64Test, AssemblerX64Extractps) {
code->Print(os);
#endif
- auto f = GeneratedCode<F3>::FromCode(*code);
+ auto f = GeneratedCode<F3>::FromCode(isolate, *code);
uint64_t value1 = 0x1234'5678'8765'4321;
CHECK_EQ(0x12345678u, f.Call(base::uint64_to_double(value1)));
uint64_t value2 = 0x8765'4321'1234'5678;
@@ -875,7 +875,7 @@ TEST_F(AssemblerX64Test, AssemblerX64SSE) {
code->Print(os);
#endif
- auto f = GeneratedCode<F6>::FromCode(*code);
+ auto f = GeneratedCode<F6>::FromCode(isolate, *code);
CHECK_EQ(2, f.Call(1.0, 2.0));
}
@@ -905,7 +905,7 @@ TEST_F(AssemblerX64Test, AssemblerX64SSE3) {
code->Print(os);
#endif
- auto f = GeneratedCode<F6>::FromCode(*code);
+ auto f = GeneratedCode<F6>::FromCode(isolate, *code);
CHECK_EQ(4, f.Call(1.0, 2.0));
}
@@ -1126,7 +1126,7 @@ TEST_F(AssemblerX64Test, AssemblerX64FMA_sd) {
code->Print(os);
#endif
- auto f = GeneratedCode<F7>::FromCode(*code);
+ auto f = GeneratedCode<F7>::FromCode(isolate, *code);
CHECK_EQ(
0, f.Call(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
}
@@ -1348,7 +1348,7 @@ TEST_F(AssemblerX64Test, AssemblerX64FMA_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call(9.26621069e-05f, -2.4607749f, -1.09587872f));
}
@@ -1421,7 +1421,7 @@ TEST_F(AssemblerX64Test, AssemblerX64SSE_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
int res = f.Call(1.0f, 2.0f, 3.0f);
PrintF("f(1,2,3) = %d\n", res);
CHECK_EQ(6, res);
@@ -1505,7 +1505,7 @@ TEST_F(AssemblerX64Test, AssemblerX64AVX_ss) {
code->Print(os);
#endif
- auto f = GeneratedCode<F8>::FromCode(*code);
+ auto f = GeneratedCode<F8>::FromCode(isolate, *code);
int res = f.Call(1.0f, 2.0f, 3.0f);
PrintF("f(1,2,3) = %d\n", res);
CHECK_EQ(6, res);
@@ -1743,7 +1743,7 @@ TEST_F(AssemblerX64Test, AssemblerX64AVX_sd) {
code->Print(os);
#endif
- auto f = GeneratedCode<F7>::FromCode(*code);
+ auto f = GeneratedCode<F7>::FromCode(isolate, *code);
int res = f.Call(1.0, 2.0, 3.0);
PrintF("f(1,2,3) = %d\n", res);
CHECK_EQ(6, res);
@@ -1933,7 +1933,7 @@ TEST_F(AssemblerX64Test, AssemblerX64BMI1) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -1991,7 +1991,7 @@ TEST_F(AssemblerX64Test, AssemblerX64LZCNT) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -2049,7 +2049,7 @@ TEST_F(AssemblerX64Test, AssemblerX64POPCNT) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -2310,7 +2310,7 @@ TEST_F(AssemblerX64Test, AssemblerX64BMI2) {
code->Print(os);
#endif
- auto f = GeneratedCode<F0>::FromCode(*code);
+ auto f = GeneratedCode<F0>::FromCode(isolate, *code);
CHECK_EQ(0, f.Call());
}
@@ -2352,7 +2352,7 @@ TEST_F(AssemblerX64Test, AssemblerX64JumpTables1) {
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int res = f.Call(i);
PrintF("f(%d) = %d\n", i, res);
@@ -2399,7 +2399,7 @@ TEST_F(AssemblerX64Test, AssemblerX64JumpTables2) {
code->Print(std::cout);
#endif
- auto f = GeneratedCode<F1>::FromCode(*code);
+ auto f = GeneratedCode<F1>::FromCode(isolate, *code);
for (int i = 0; i < kNumCases; ++i) {
int res = f.Call(i);
PrintF("f(%d) = %d\n", i, res);
@@ -2455,7 +2455,7 @@ TEST_F(AssemblerX64Test, AssemblerX64vmovups) {
code->Print(os);
#endif
- auto f = GeneratedCode<F9>::FromCode(*code);
+ auto f = GeneratedCode<F9>::FromCode(isolate, *code);
CHECK_EQ(-1.5, f.Call(1.5, -1.5));
}
@@ -2624,6 +2624,10 @@ TEST_F(AssemblerX64Test, AssemblerX64FloatingPoint256bit) {
__ vcvtps2dq(ymm5, Operand(rbx, rcx, times_4, 10000));
__ vcvttpd2dq(xmm6, ymm8);
__ vcvttpd2dq(xmm10, Operand256(rbx, rcx, times_4, 10000));
+ __ vcvtdq2pd(ymm1, xmm2);
+ __ vcvtdq2pd(ymm1, Operand(rbx, rcx, times_4, 10000));
+ __ vcvttps2dq(ymm3, ymm2);
+ __ vcvttps2dq(ymm3, Operand256(rbx, rcx, times_4, 10000));
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -2673,7 +2677,15 @@ TEST_F(AssemblerX64Test, AssemblerX64FloatingPoint256bit) {
// vcvttpd2dq xmm6, ymm8
0xC4, 0xC1, 0x7D, 0xE6, 0xF0,
// vcvttpd2dq xmm10, YMMWORD PTR [rbx+rcx*4+0x2710]
- 0xC5, 0x7D, 0xE6, 0x94, 0x8B, 0x10, 0x27, 0x00, 0x00};
+ 0xC5, 0x7D, 0xE6, 0x94, 0x8B, 0x10, 0x27, 0x00, 0x00,
+ // vcvtdq2pd ymm1, xmm2
+ 0xC5, 0xFE, 0xE6, 0xCA,
+ // vcvtdq2pd ymm1, XMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xC5, 0xFE, 0xE6, 0x8C, 0x8B, 0x10, 0x27, 0x00, 0x00,
+ // vcvttps2dq ymm3, ymm2
+ 0xC5, 0xFE, 0x5B, 0xDA,
+ // vcvttps2dq ymm3, YMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xC5, 0xFE, 0x5B, 0x9C, 0x8B, 0x10, 0x27, 0x00, 0x00};
CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
}
diff --git a/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc b/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc
index 3cf5bf0517..d845144e14 100644
--- a/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-arm64-unittest.cc
@@ -1449,6 +1449,153 @@ TEST_F(DisasmArm64Test, load_store_acquire_release) {
COMPARE(stlxrh(wzr, w1, sp), "stlxrh wzr, w1, [sp]");
COMPARE(stlxr(w2, wzr, sp), "stlxr w2, wzr, [sp]");
+ CpuFeatureScope feature_scope(assm, LSE,
+ CpuFeatureScope::kDontCheckSupported);
+
+ COMPARE(cas(w30, w0, MemOperand(x1)), "cas w30, w0, [x1]");
+ COMPARE(cas(w2, w3, MemOperand(sp)), "cas w2, w3, [sp]");
+ COMPARE(cas(x4, x5, MemOperand(x6)), "cas x4, x5, [x6]");
+ COMPARE(cas(x7, x8, MemOperand(sp)), "cas x7, x8, [sp]");
+ COMPARE(casa(w9, w10, MemOperand(x11)), "casa w9, w10, [x11]");
+ COMPARE(casa(w12, w13, MemOperand(sp)), "casa w12, w13, [sp]");
+ COMPARE(casa(x14, x15, MemOperand(x16)), "casa x14, x15, [x16]");
+ COMPARE(casa(x17, x18, MemOperand(sp)), "casa x17, x18, [sp]");
+ COMPARE(casl(w19, w20, MemOperand(x21)), "casl w19, w20, [x21]");
+ COMPARE(casl(w22, w23, MemOperand(sp)), "casl w22, w23, [sp]");
+ COMPARE(casl(x24, x25, MemOperand(x26)), "casl x24, x25, [x26]");
+ COMPARE(casl(x27, x28, MemOperand(sp)), "casl cp, x28, [sp]");
+ COMPARE(casal(w29, w30, MemOperand(x0)), "casal w29, w30, [x0]");
+ COMPARE(casal(w1, w2, MemOperand(sp)), "casal w1, w2, [sp]");
+ COMPARE(casal(x3, x4, MemOperand(x5)), "casal x3, x4, [x5]");
+ COMPARE(casal(x6, x7, MemOperand(sp)), "casal x6, x7, [sp]");
+ COMPARE(casb(w8, w9, MemOperand(x10)), "casb w8, w9, [x10]");
+ COMPARE(casb(w11, w12, MemOperand(sp)), "casb w11, w12, [sp]");
+ COMPARE(casab(w13, w14, MemOperand(x15)), "casab w13, w14, [x15]");
+ COMPARE(casab(w16, w17, MemOperand(sp)), "casab w16, w17, [sp]");
+ COMPARE(caslb(w18, w19, MemOperand(x20)), "caslb w18, w19, [x20]");
+ COMPARE(caslb(w21, w22, MemOperand(sp)), "caslb w21, w22, [sp]");
+ COMPARE(casalb(w23, w24, MemOperand(x25)), "casalb w23, w24, [x25]");
+ COMPARE(casalb(w26, w27, MemOperand(sp)), "casalb w26, w27, [sp]");
+ COMPARE(cash(w28, w29, MemOperand(x30)), "cash w28, w29, [lr]");
+ COMPARE(cash(w0, w1, MemOperand(sp)), "cash w0, w1, [sp]");
+ COMPARE(casah(w2, w3, MemOperand(x4)), "casah w2, w3, [x4]");
+ COMPARE(casah(w5, w6, MemOperand(sp)), "casah w5, w6, [sp]");
+ COMPARE(caslh(w7, w8, MemOperand(x9)), "caslh w7, w8, [x9]");
+ COMPARE(caslh(w10, w11, MemOperand(sp)), "caslh w10, w11, [sp]");
+ COMPARE(casalh(w12, w13, MemOperand(x14)), "casalh w12, w13, [x14]");
+ COMPARE(casalh(w15, w16, MemOperand(sp)), "casalh w15, w16, [sp]");
+ COMPARE(casp(w18, w19, w20, w21, MemOperand(x22)),
+ "casp w18, w19, w20, w21, [x22]");
+ COMPARE(casp(w24, w25, w26, w27, MemOperand(sp)),
+ "casp w24, w25, w26, w27, [sp]");
+ COMPARE(casp(x28, x29, x0, x1, MemOperand(x2)), "casp x28, fp, x0, x1, [x2]");
+ COMPARE(casp(x4, x5, x6, x7, MemOperand(sp)), "casp x4, x5, x6, x7, [sp]");
+ COMPARE(caspa(w8, w9, w10, w11, MemOperand(x12)),
+ "caspa w8, w9, w10, w11, [x12]");
+ COMPARE(caspa(w14, w15, w16, w17, MemOperand(sp)),
+ "caspa w14, w15, w16, w17, [sp]");
+ COMPARE(caspa(x18, x19, x20, x21, MemOperand(x22)),
+ "caspa x18, x19, x20, x21, [x22]");
+ COMPARE(caspa(x24, x25, x26, x27, MemOperand(sp)),
+ "caspa x24, x25, x26, cp, [sp]");
+ COMPARE(caspl(w28, w29, w0, w1, MemOperand(x2)),
+ "caspl w28, w29, w0, w1, [x2]");
+ COMPARE(caspl(w4, w5, w6, w7, MemOperand(sp)), "caspl w4, w5, w6, w7, [sp]");
+ COMPARE(caspl(x8, x9, x10, x11, MemOperand(x12)),
+ "caspl x8, x9, x10, x11, [x12]");
+ COMPARE(caspl(x14, x15, x16, x17, MemOperand(sp)),
+ "caspl x14, x15, x16, x17, [sp]");
+ COMPARE(caspal(w18, w19, w20, w21, MemOperand(x22)),
+ "caspal w18, w19, w20, w21, [x22]");
+ COMPARE(caspal(w24, w25, w26, w27, MemOperand(sp)),
+ "caspal w24, w25, w26, w27, [sp]");
+ COMPARE(caspal(x28, x29, x0, x1, MemOperand(x2)),
+ "caspal x28, fp, x0, x1, [x2]");
+ COMPARE(caspal(x4, x5, x6, x7, MemOperand(sp)),
+ "caspal x4, x5, x6, x7, [sp]");
+
+ CLEANUP();
+}
+
+#define ATOMIC_MEMORY_DISASM_LIST(V, DEF) \
+ V(DEF, add, "add") \
+ V(DEF, clr, "clr") \
+ V(DEF, eor, "eor") \
+ V(DEF, set, "set") \
+ V(DEF, smax, "smax") \
+ V(DEF, smin, "smin") \
+ V(DEF, umax, "umax") \
+ V(DEF, umin, "umin")
+
+#define ATOMIC_MEMORY_DISASM_STORE_X_MODES(V, NAME, STR) \
+ V(NAME, STR) \
+ V(NAME##l, STR "l")
+
+#define ATOMIC_MEMORY_DISASM_STORE_W_MODES(V, NAME, STR) \
+ ATOMIC_MEMORY_DISASM_STORE_X_MODES(V, NAME, STR) \
+ V(NAME##b, STR "b") \
+ V(NAME##lb, STR "lb") \
+ V(NAME##h, STR "h") \
+ V(NAME##lh, STR "lh")
+
+#define ATOMIC_MEMORY_DISASM_LOAD_X_MODES(V, NAME, STR) \
+ ATOMIC_MEMORY_DISASM_STORE_X_MODES(V, NAME, STR) \
+ V(NAME##a, STR "a") \
+ V(NAME##al, STR "al")
+
+#define ATOMIC_MEMORY_DISASM_LOAD_W_MODES(V, NAME, STR) \
+ ATOMIC_MEMORY_DISASM_LOAD_X_MODES(V, NAME, STR) \
+ V(NAME##ab, STR "ab") \
+ V(NAME##alb, STR "alb") \
+ V(NAME##ah, STR "ah") \
+ V(NAME##alh, STR "alh")
+
+TEST_F(DisasmArm64Test, atomic_memory) {
+ SET_UP_MASM();
+
+ CpuFeatureScope feature_scope(assm, LSE,
+ CpuFeatureScope::kDontCheckSupported);
+
+ // These macros generate tests for all the variations of the atomic memory
+ // operations, e.g. ldadd, ldadda, ldaddb, staddl, etc.
+
+#define AM_LOAD_X_TESTS(N, MN) \
+ COMPARE(ld##N(x0, x1, MemOperand(x2)), "ld" MN " x0, x1, [x2]"); \
+ COMPARE(ld##N(x3, x4, MemOperand(sp)), "ld" MN " x3, x4, [sp]");
+#define AM_LOAD_W_TESTS(N, MN) \
+ COMPARE(ld##N(w0, w1, MemOperand(x2)), "ld" MN " w0, w1, [x2]"); \
+ COMPARE(ld##N(w3, w4, MemOperand(sp)), "ld" MN " w3, w4, [sp]");
+#define AM_STORE_X_TESTS(N, MN) \
+ COMPARE(st##N(x0, MemOperand(x1)), "st" MN " x0, [x1]"); \
+ COMPARE(st##N(x2, MemOperand(sp)), "st" MN " x2, [sp]");
+#define AM_STORE_W_TESTS(N, MN) \
+ COMPARE(st##N(w0, MemOperand(x1)), "st" MN " w0, [x1]"); \
+ COMPARE(st##N(w2, MemOperand(sp)), "st" MN " w2, [sp]");
+
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_LOAD_X_MODES, AM_LOAD_X_TESTS)
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_LOAD_W_MODES, AM_LOAD_W_TESTS)
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_STORE_X_MODES,
+ AM_STORE_X_TESTS)
+ ATOMIC_MEMORY_DISASM_LIST(ATOMIC_MEMORY_DISASM_STORE_W_MODES,
+ AM_STORE_W_TESTS)
+
+#define AM_SWP_X_TESTS(N, MN) \
+ COMPARE(N(x0, x1, MemOperand(x2)), MN " x0, x1, [x2]"); \
+ COMPARE(N(x3, x4, MemOperand(sp)), MN " x3, x4, [sp]");
+#define AM_SWP_W_TESTS(N, MN) \
+ COMPARE(N(w0, w1, MemOperand(x2)), MN " w0, w1, [x2]"); \
+ COMPARE(N(w3, w4, MemOperand(sp)), MN " w3, w4, [sp]");
+
+ ATOMIC_MEMORY_DISASM_LOAD_X_MODES(AM_SWP_X_TESTS, swp, "swp")
+ ATOMIC_MEMORY_DISASM_LOAD_W_MODES(AM_SWP_W_TESTS, swp, "swp")
+
+#undef AM_LOAD_X_TESTS
+#undef AM_LOAD_W_TESTS
+#undef AM_STORE_X_TESTS
+#undef AM_STORE_W_TESTS
+#undef AM_SWP_X_TESTS
+#undef AM_SWP_W_TESTS
+
CLEANUP();
}
diff --git a/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc b/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc
index d35a7a23df..ecb4717013 100644
--- a/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-ia32-unittest.cc
@@ -288,13 +288,9 @@ TEST_F(DisasmIa320Test, DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic = BUILTIN_CODE(isolate(), ArrayFrom);
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
__ jmp(&L1);
__ jmp(Operand(ebx, ecx, times_4, 10000));
- __ jmp(ic, RelocInfo::CODE_TARGET);
__ nop();
Label Ljcc;
@@ -988,8 +984,8 @@ TEST_F(DisasmIa320Test, DisasmIa320) {
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
- Address begin = code->raw_instruction_start();
- Address end = code->raw_instruction_end();
+ Address begin = code->InstructionStart();
+ Address end = code->InstructionEnd();
disasm::Disassembler::Disassemble(stdout, reinterpret_cast<byte*>(begin),
reinterpret_cast<byte*>(end));
#endif
diff --git a/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc b/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc
index 7bc9ca42cb..7bff5b9925 100644
--- a/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-riscv-unittest.cc
@@ -378,7 +378,7 @@ TEST_F(DisasmRiscv64Test, RV32D) {
COMPARE(fsgnjx_d(ft0, ft8, fa5), "22fe2053 fsgnjx.d ft0, ft8, fa5");
COMPARE(fmin_d(ft0, ft8, fa5), "2afe0053 fmin.d ft0, ft8, fa5");
COMPARE(fmax_d(ft0, ft8, fa5), "2afe1053 fmax.d ft0, ft8, fa5");
- COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, t3");
+ COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, ft8");
COMPARE(fcvt_d_s(ft0, fa0), "42050053 fcvt.d.s ft0, fa0");
COMPARE(feq_d(a0, ft8, fa5), "a2fe2553 feq.d a0, ft8, fa5");
COMPARE(flt_d(a0, ft8, fa5), "a2fe1553 flt.d a0, ft8, fa5");
diff --git a/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc b/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc
index 2ac718c366..e309fe7ebf 100644
--- a/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/disasm-x64-unittest.cc
@@ -49,7 +49,7 @@ using DisasmX64Test = TestWithIsolate;
namespace {
-Handle<CodeT> CreateDummyCode(Isolate* isolate) {
+Handle<Code> CreateDummyCode(Isolate* isolate) {
i::byte buffer[128];
Assembler assm(AssemblerOptions{},
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
@@ -59,7 +59,7 @@ Handle<CodeT> CreateDummyCode(Isolate* isolate) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
- return ToCodeT(code, isolate);
+ return code;
}
} // namespace
@@ -82,7 +82,7 @@ TEST_F(DisasmX64Test, DisasmX64) {
__ bind(&L2);
__ call(rcx);
__ nop();
- Handle<CodeT> ic = CreateDummyCode(isolate());
+ Handle<Code> ic = CreateDummyCode(isolate());
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -305,8 +305,8 @@ TEST_F(DisasmX64Test, DisasmX64) {
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
- Address begin = code->raw_instruction_start();
- Address end = code->raw_instruction_end();
+ Address begin = code->InstructionStart();
+ Address end = code->InstructionStart();
disasm::Disassembler::Disassemble(stdout, reinterpret_cast<byte*>(begin),
reinterpret_cast<byte*>(end));
#endif
@@ -568,6 +568,8 @@ TEST_F(DisasmX64Test, DisasmX64CheckOutput) {
COMPARE("4885948b10270000 REX.W testq rdx,[rbx+rcx*4+0x2710]",
testq(Operand(rbx, rcx, times_4, 10000), rdx));
+ COMPARE("48f7ac8b10270000 REX.W imulq [rbx+rcx*4+0x2710]",
+ imulq(Operand(rbx, rcx, times_4, 10000)));
COMPARE("486bd10c REX.W imulq rdx,rcx,0xc",
imulq(rdx, rcx, Immediate(12)));
COMPARE("4869d1e8030000 REX.W imulq rdx,rcx,0x3e8",
@@ -1445,6 +1447,13 @@ TEST_F(DisasmX64Test, DisasmX64YMMRegister) {
COMPARE("c5fe16ca vmovshdup ymm1,ymm2", vmovshdup(ymm1, ymm2));
COMPARE("c5f4c6da73 vshufps ymm3,ymm1,ymm2,0x73",
vshufps(ymm3, ymm1, ymm2, 115));
+ COMPARE("c5fee6ca vcvtdq2pd ymm1,xmm2", vcvtdq2pd(ymm1, xmm2));
+ COMPARE("c5fee68c8b10270000 vcvtdq2pd ymm1,[rbx+rcx*4+0x2710]",
+ vcvtdq2pd(ymm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("c5fe5bda vcvttps2dq ymm3,ymm2",
+ vcvttps2dq(ymm3, ymm2));
+ COMPARE("c5fe5b9c8b10270000 vcvttps2dq ymm3,[rbx+rcx*4+0x2710]",
+ vcvttps2dq(ymm3, Operand256(rbx, rcx, times_4, 10000)));
// vcmp
COMPARE("c5dcc2e900 vcmpps ymm5,ymm4,ymm1, (eq)",
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-arm-unittest.cc
index 6fa1bd5927..f7ec44e77f 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-arm-unittest.cc
@@ -13,7 +13,7 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// If we are running on android and the output is not redirected (i.e. ends up
// in the android log) then we cannot find the error message in the output. This
@@ -28,11 +28,11 @@ namespace internal {
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -40,7 +40,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -48,9 +48,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -62,7 +62,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
@@ -102,17 +102,17 @@ const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
template <typename T>
-class TurboAssemblerTestWithParam : public TurboAssemblerTest,
+class MacroAssemblerTestWithParam : public MacroAssemblerTest,
public ::testing::WithParamInterface<T> {};
-using TurboAssemblerTestMoveObjectAndSlot =
- TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
+using MacroAssemblerTestMoveObjectAndSlot =
+ MacroAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
-TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
+TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
const MoveObjectAndSlotTestCase test_case = GetParam();
TRACED_FOREACH(int32_t, offset, kOffsets) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ Push(r0);
__ Move(test_case.object, r1);
@@ -143,7 +143,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
__ RecordComment("--");
// The `result` pointer was saved on the stack.
- UseScratchRegisterScope temps(&tasm);
+ UseScratchRegisterScope temps(&masm);
Register scratch = temps.Acquire();
__ Pop(scratch);
__ str(dst_object, MemOperand(scratch));
@@ -152,7 +152,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ masm.GetCode(nullptr, &desc);
if (v8_flags.print_code) {
Handle<Code> code =
Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
@@ -179,8 +179,8 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
}
}
-INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest,
- TurboAssemblerTestMoveObjectAndSlot,
+INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest,
+ MacroAssemblerTestMoveObjectAndSlot,
::testing::ValuesIn(kMoveObjectAndSlotTestCases));
#undef __
diff --git a/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc
index 021b0423f3..3bbbc49096 100644
--- a/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-arm64-unittest.cc
@@ -1,129 +1,254 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/deoptimizer/deoptimizer.h"
-#include "src/heap/factory.h"
-#include "src/objects/objects-inl.h"
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
#include "src/utils/ostreams.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
namespace v8 {
namespace internal {
-namespace test_macro_assembler_arm64 {
-using MacroAssemblerArm64Test = TestWithIsolate;
+#define __ masm.
-using F0 = int();
+// If we are running on android and the output is not redirected (i.e. ends up
+// in the android log) then we cannot find the error message in the output. This
+// macro just returns the empty string in that case.
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define ERROR_MESSAGE(msg) ""
+#else
+#define ERROR_MESSAGE(msg) msg
+#endif
-#define __ masm.
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
-TEST_F(MacroAssemblerArm64Test, EmbeddedObj) {
-#ifdef V8_COMPRESS_POINTERS
- Isolate* isolate = i_isolate();
- HandleScope handles(isolate);
+class MacroAssemblerTest : public TestWithIsolate {};
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- Handle<HeapObject> old_array = isolate->factory()->NewFixedArray(2000);
- Handle<HeapObject> my_array = isolate->factory()->NewFixedArray(1000);
- __ Mov(w4, Immediate(my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
- __ Mov(x5, old_array);
- __ ret(x5);
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
-#ifdef DEBUG
- StdoutStream os;
- code->Print(os);
-#endif
+ {
+ AssemblerBufferWriteScope rw_scope(*buffer);
- // Collect garbage to ensure reloc info can be walked by the heap.
- CollectAllGarbage();
- CollectAllGarbage();
- CollectAllGarbage();
-
- PtrComprCageBase cage_base(isolate);
-
- // Test the user-facing reloc interface.
- const int mode_mask = RelocInfo::EmbeddedObjectModeMask();
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsCompressedEmbeddedObject(mode)) {
- CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base));
- } else {
- CHECK(RelocInfo::IsFullEmbeddedObject(mode));
- CHECK_EQ(*old_array, it.rinfo()->target_object(cage_base));
- }
+ __ CodeEntry();
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
}
-#endif // V8_COMPRESS_POINTERS
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
-TEST_F(MacroAssemblerArm64Test, DeoptExitSizeIsFixed) {
- Isolate* isolate = i_isolate();
- HandleScope handles(isolate);
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ {
+ AssemblerBufferWriteScope rw_scope(*buffer);
+
+ __ CodeEntry();
+
+ // Fail if the first parameter is 17.
+ __ Mov(w1, Immediate(17));
+ __ Cmp(w0, w1); // 1st parameter is in {w0}.
+ __ Check(Condition::ne, AbortReason::kNoReason);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ }
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
+}
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
- for (int i = 0; i < kDeoptimizeKindCount; i++) {
- DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
- Label before_exit;
- Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
- // Mirroring logic in code-generator.cc.
- if (kind == DeoptimizeKind::kLazy) {
- // CFI emits an extra instruction here.
- masm.BindExceptionHandler(&before_exit);
- } else {
- masm.bind(&before_exit);
+TEST_F(MacroAssemblerTest, CompareAndBranch) {
+ const int kTestCases[] = {-42, 0, 42};
+ static_assert(Condition::eq == 0);
+ static_assert(Condition::le == 13);
+ TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv
+ Condition cond = static_cast<Condition>(cc);
+ TRACED_FOREACH(int, imm, kTestCases) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate(), AssemblerOptions{},
+ CodeObjectRequired::kNo, buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ {
+ AssemblerBufferWriteScope rw_scope(*buffer);
+
+ __ CodeEntry();
+
+ Label start, lab;
+ __ Bind(&start);
+ __ CompareAndBranch(x0, Immediate(imm), cond, &lab);
+ if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) ||
+ (cond == ls))) { // One instruction generated
+ ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start));
+ } else { // Two instructions generated
+ ASSERT_EQ(static_cast<uint8_t>(2 * kInstrSize),
+ __ SizeOfCodeGeneratedSince(&start));
+ }
+ __ Cmp(x0, Immediate(imm));
+ __ Check(NegateCondition(cond),
+ AbortReason::kNoReason); // cond must not hold
+ __ Ret();
+ __ Bind(&lab); // Branch leads here
+ __ Cmp(x0, Immediate(imm));
+ __ Check(cond, AbortReason::kNoReason); // cond must hold
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ }
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ TRACED_FOREACH(int, n, kTestCases) { f.Call(n); }
}
- masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
- &before_exit);
- CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
- kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kEagerDeoptExitSize);
}
}
+struct MoveObjectAndSlotTestCase {
+ const char* comment;
+ Register dst_object;
+ Register dst_slot;
+ Register object;
+ Register offset_register = no_reg;
+};
+
+const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
+ {"no overlap", x0, x1, x2},
+ {"no overlap", x0, x1, x2, x3},
+
+ {"object == dst_object", x2, x1, x2},
+ {"object == dst_object", x2, x1, x2, x3},
+
+ {"object == dst_slot", x1, x2, x2},
+ {"object == dst_slot", x1, x2, x2, x3},
+
+ {"offset == dst_object", x0, x1, x2, x0},
+
+ {"offset == dst_object && object == dst_slot", x0, x1, x1, x0},
+
+ {"offset == dst_slot", x0, x1, x2, x1},
+
+ {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}};
+
+// Make sure we include offsets that cannot be encoded in an add instruction.
+const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
+
+template <typename T>
+class MacroAssemblerTestWithParam : public MacroAssemblerTest,
+ public ::testing::WithParamInterface<T> {};
+
+using MacroAssemblerTestMoveObjectAndSlot =
+ MacroAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
+
+TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
+ const MoveObjectAndSlotTestCase test_case = GetParam();
+ TRACED_FOREACH(int32_t, offset, kOffsets) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+
+ {
+ AssemblerBufferWriteScope rw_buffer_scope(*buffer);
+
+ __ CodeEntry();
+ __ Push(x0, padreg);
+ __ Mov(test_case.object, x1);
+
+ Register src_object = test_case.object;
+ Register dst_object = test_case.dst_object;
+ Register dst_slot = test_case.dst_slot;
+
+ Operand offset_operand(0);
+ if (test_case.offset_register == no_reg) {
+ offset_operand = Operand(offset);
+ } else {
+ __ Mov(test_case.offset_register, Operand(offset));
+ offset_operand = Operand(test_case.offset_register);
+ }
+
+ std::stringstream comment;
+ comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
+ << dst_object << ", " << dst_slot << ", " << src_object << ", ";
+ if (test_case.offset_register == no_reg) {
+ comment << "#" << offset;
+ } else {
+ comment << test_case.offset_register;
+ }
+ comment << ") --";
+ __ RecordComment(comment.str().c_str());
+ __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
+ __ RecordComment("--");
+
+ // The `result` pointer was saved on the stack.
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Pop(padreg, scratch);
+ __ Str(dst_object, MemOperand(scratch));
+ __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize));
+
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+ if (v8_flags.print_code) {
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
+ StdoutStream os;
+ code->Print(os);
+ }
+ }
+
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
+ buffer->start());
+
+ byte* object = new byte[offset];
+ byte* result[] = {nullptr, nullptr};
+
+ f.Call(result, object);
+
+ // The first element must be the address of the object, and the second the
+ // slot addressed by `offset`.
+ EXPECT_EQ(result[0], &object[0]);
+ EXPECT_EQ(result[1], &object[offset]);
+
+ delete[] object;
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest,
+ MacroAssemblerTestMoveObjectAndSlot,
+ ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
+
#undef __
+#undef ERROR_MESSAGE
-} // namespace test_macro_assembler_arm64
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-ia32-unittest.cc
index f0cb96d47d..cbf628ba88 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-ia32-unittest.cc
@@ -11,17 +11,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -29,16 +29,16 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ ret(0);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-loong64-unittest.cc
index 5334fb4be3..a2cc213cae 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-loong64-unittest.cc
@@ -12,33 +12,33 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the loong64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-mips64-unittest.cc
index c954ffcc65..92e3b1d6f8 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-mips64-unittest.cc
@@ -12,17 +12,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-ppc-unittest.cc
index 93ae7abafc..aabb988b29 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-ppc-unittest.cc
@@ -12,17 +12,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the ppc assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
@@ -62,23 +62,24 @@ TEST_F(TurboAssemblerTest, TestCheck) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, ReverseBitsU64) {
+TEST_F(MacroAssemblerTest, ReverseBitsU64) {
struct {
- uint64_t expected; uint64_t input;
+ uint64_t expected;
+ uint64_t input;
} values[] = {
- {0x0000000000000000, 0x0000000000000000},
- {0xffffffffffffffff, 0xffffffffffffffff},
- {0x8000000000000000, 0x0000000000000001},
- {0x0000000000000001, 0x8000000000000000},
- {0x800066aa22cc4488, 0x1122334455660001},
- {0x1122334455660001, 0x800066aa22cc4488},
- {0xffffffff00000000, 0x00000000ffffffff},
- {0x00000000ffffffff, 0xffffffff00000000},
- {0xff01020304050607, 0xe060a020c04080ff},
- {0xe060a020c04080ff, 0xff01020304050607},
+ {0x0000000000000000, 0x0000000000000000},
+ {0xffffffffffffffff, 0xffffffffffffffff},
+ {0x8000000000000000, 0x0000000000000001},
+ {0x0000000000000001, 0x8000000000000000},
+ {0x800066aa22cc4488, 0x1122334455660001},
+ {0x1122334455660001, 0x800066aa22cc4488},
+ {0xffffffff00000000, 0x00000000ffffffff},
+ {0x00000000ffffffff, 0xffffffff00000000},
+ {0xff01020304050607, 0xe060a020c04080ff},
+ {0xe060a020c04080ff, 0xff01020304050607},
};
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -87,28 +88,26 @@ TEST_F(TurboAssemblerTest, ReverseBitsU64) {
__ Pop(r4, r5);
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(),
- buffer->start());
- for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) {
+ auto f =
+ GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(), buffer->start());
+ for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) {
CHECK_EQ(values[i].expected, f.Call(values[i].input));
}
}
-TEST_F(TurboAssemblerTest, ReverseBitsU32) {
+TEST_F(MacroAssemblerTest, ReverseBitsU32) {
struct {
- uint64_t expected; uint64_t input;
+ uint64_t expected;
+ uint64_t input;
} values[] = {
- {0x00000000, 0x00000000},
- {0xffffffff, 0xffffffff},
- {0x00000001, 0x80000000},
- {0x80000000, 0x00000001},
- {0x22334455, 0xaa22cc44},
- {0xaa22cc44, 0x22334455},
+ {0x00000000, 0x00000000}, {0xffffffff, 0xffffffff},
+ {0x00000001, 0x80000000}, {0x80000000, 0x00000001},
+ {0x22334455, 0xaa22cc44}, {0xaa22cc44, 0x22334455},
};
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -117,11 +116,11 @@ TEST_F(TurboAssemblerTest, ReverseBitsU32) {
__ Pop(r4, r5);
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(),
- buffer->start());
- for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) {
+ auto f =
+ GeneratedCode<uint64_t, uint64_t>::FromBuffer(isolate(), buffer->start());
+ for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) {
CHECK_EQ(values[i].expected, f.Call(values[i].input));
}
}
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-riscv-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-riscv-unittest.cc
index afda8d3603..8e74ae692c 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-riscv-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-riscv-unittest.cc
@@ -12,33 +12,33 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ masm.GetCode(nullptr, &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ masm.GetCode(nullptr, &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-s390-unittest.cc
index d86a09f67c..b371c841c5 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-s390-unittest.cc
@@ -12,17 +12,17 @@
namespace v8 {
namespace internal {
-#define __ tasm.
+#define __ masm.
// Test the s390 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-class TurboAssemblerTest : public TestWithIsolate {};
+class MacroAssemblerTest : public TestWithIsolate {};
-TEST_F(TurboAssemblerTest, TestHardAbort) {
+TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST_F(TurboAssemblerTest, TestCheck) {
+TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
@@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
+ masm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc
index b7e5b0ffbe..9924b620ee 100644
--- a/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/macro-assembler-x64-unittest.cc
@@ -40,6 +40,57 @@
namespace v8 {
namespace internal {
+
+#define __ masm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+using MacroAssemblerX64Test = TestWithIsolate;
+
+TEST_F(MacroAssemblerX64Test, TestHardAbort) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(MacroAssemblerX64Test, TestCheck) {
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ movl(rax, Immediate(17));
+ __ cmpl(rax, arg_reg_1);
+ __ Check(Condition::not_equal, AbortReason::kNoReason);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm.GetCode(isolate(), &desc);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
namespace test_macro_assembler_x64 {
// Test the x64 assembler by compiling some simple functions into
@@ -51,8 +102,6 @@ namespace test_macro_assembler_x64 {
// This calling convention is used on Linux, with GCC, and on Mac OS,
// with GCC. A different convention is used on 64-bit windows.
-using MacroAssemblerX64Test = TestWithIsolate;
-
using F0 = int();
#define __ masm->
@@ -60,14 +109,14 @@ using F0 = int();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
__ pushq(kPtrComprCageBaseRegister);
#endif
__ InitializeRootRegister();
}
static void ExitCode(MacroAssembler* masm) {
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
__ popq(kPtrComprCageBaseRegister);
#endif
__ popq(kRootRegister);
@@ -468,7 +517,7 @@ TEST_F(MacroAssemblerX64Test, EmbeddedObj) {
code->Print(os);
#endif
using myF0 = Address();
- auto f = GeneratedCode<myF0>::FromAddress(isolate, code->entry());
+ auto f = GeneratedCode<myF0>::FromAddress(isolate, code->code_entry_point());
Object result = Object(f.Call());
CHECK_EQ(old_array->ptr(), result.ptr());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
deleted file mode 100644
index 77123ef565..0000000000
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/simulator.h"
-#include "src/utils/ostreams.h"
-#include "test/common/assembler-tester.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest-support.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ tasm.
-
-// If we are running on android and the output is not redirected (i.e. ends up
-// in the android log) then we cannot find the error message in the output. This
-// macro just returns the empty string in that case.
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-#define ERROR_MESSAGE(msg) ""
-#else
-#define ERROR_MESSAGE(msg) msg
-#endif
-
-// Test the x64 assembler by compiling some simple functions into
-// a buffer and executing them. These tests do not initialize the
-// V8 library, create a context, or use any V8 objects.
-
-class TurboAssemblerTest : public TestWithIsolate {};
-
-TEST_F(TurboAssemblerTest, TestHardAbort) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- {
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- __ CodeEntry();
-
- __ Abort(AbortReason::kNoReason);
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- }
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
-
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
-}
-
-TEST_F(TurboAssemblerTest, TestCheck) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- {
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- __ CodeEntry();
-
- // Fail if the first parameter is 17.
- __ Mov(w1, Immediate(17));
- __ Cmp(w0, w1); // 1st parameter is in {w0}.
- __ Check(Condition::ne, AbortReason::kNoReason);
- __ Ret();
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- }
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
-
- f.Call(0);
- f.Call(18);
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
-}
-
-TEST_F(TurboAssemblerTest, CompareAndBranch) {
- const int kTestCases[] = {-42, 0, 42};
- static_assert(Condition::eq == 0);
- static_assert(Condition::le == 13);
- TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv
- Condition cond = static_cast<Condition>(cc);
- TRACED_FOREACH(int, imm, kTestCases) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{},
- CodeObjectRequired::kNo, buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- {
- AssemblerBufferWriteScope rw_scope(*buffer);
-
- __ CodeEntry();
-
- Label start, lab;
- __ Bind(&start);
- __ CompareAndBranch(x0, Immediate(imm), cond, &lab);
- if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) ||
- (cond == ls))) { // One instruction generated
- ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start));
- } else { // Two instructions generated
- ASSERT_EQ(static_cast<uint8_t>(2 * kInstrSize),
- __ SizeOfCodeGeneratedSince(&start));
- }
- __ Cmp(x0, Immediate(imm));
- __ Check(NegateCondition(cond),
- AbortReason::kNoReason); // cond must not hold
- __ Ret();
- __ Bind(&lab); // Branch leads here
- __ Cmp(x0, Immediate(imm));
- __ Check(cond, AbortReason::kNoReason); // cond must hold
- __ Ret();
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- }
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
-
- TRACED_FOREACH(int, n, kTestCases) { f.Call(n); }
- }
- }
-}
-
-struct MoveObjectAndSlotTestCase {
- const char* comment;
- Register dst_object;
- Register dst_slot;
- Register object;
- Register offset_register = no_reg;
-};
-
-const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
- {"no overlap", x0, x1, x2},
- {"no overlap", x0, x1, x2, x3},
-
- {"object == dst_object", x2, x1, x2},
- {"object == dst_object", x2, x1, x2, x3},
-
- {"object == dst_slot", x1, x2, x2},
- {"object == dst_slot", x1, x2, x2, x3},
-
- {"offset == dst_object", x0, x1, x2, x0},
-
- {"offset == dst_object && object == dst_slot", x0, x1, x1, x0},
-
- {"offset == dst_slot", x0, x1, x2, x1},
-
- {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}};
-
-// Make sure we include offsets that cannot be encoded in an add instruction.
-const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
-
-template <typename T>
-class TurboAssemblerTestWithParam : public TurboAssemblerTest,
- public ::testing::WithParamInterface<T> {};
-
-using TurboAssemblerTestMoveObjectAndSlot =
- TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
-
-TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
- const MoveObjectAndSlotTestCase test_case = GetParam();
- TRACED_FOREACH(int32_t, offset, kOffsets) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
-
- {
- AssemblerBufferWriteScope rw_buffer_scope(*buffer);
-
- __ CodeEntry();
- __ Push(x0, padreg);
- __ Mov(test_case.object, x1);
-
- Register src_object = test_case.object;
- Register dst_object = test_case.dst_object;
- Register dst_slot = test_case.dst_slot;
-
- Operand offset_operand(0);
- if (test_case.offset_register == no_reg) {
- offset_operand = Operand(offset);
- } else {
- __ Mov(test_case.offset_register, Operand(offset));
- offset_operand = Operand(test_case.offset_register);
- }
-
- std::stringstream comment;
- comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
- << dst_object << ", " << dst_slot << ", " << src_object << ", ";
- if (test_case.offset_register == no_reg) {
- comment << "#" << offset;
- } else {
- comment << test_case.offset_register;
- }
- comment << ") --";
- __ RecordComment(comment.str().c_str());
- __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
- __ RecordComment("--");
-
- // The `result` pointer was saved on the stack.
- UseScratchRegisterScope temps(&tasm);
- Register scratch = temps.AcquireX();
- __ Pop(padreg, scratch);
- __ Str(dst_object, MemOperand(scratch));
- __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize));
-
- __ Ret();
-
- CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
- if (v8_flags.print_code) {
- Handle<Code> code =
- Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING)
- .Build();
- StdoutStream os;
- code->Print(os);
- }
- }
-
- // We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
- buffer->start());
-
- byte* object = new byte[offset];
- byte* result[] = {nullptr, nullptr};
-
- f.Call(result, object);
-
- // The first element must be the address of the object, and the second the
- // slot addressed by `offset`.
- EXPECT_EQ(result[0], &object[0]);
- EXPECT_EQ(result[1], &object[offset]);
-
- delete[] object;
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest,
- TurboAssemblerTestMoveObjectAndSlot,
- ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
-
-#undef __
-#undef ERROR_MESSAGE
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
deleted file mode 100644
index 43dd6b79d6..0000000000
--- a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/simulator.h"
-#include "test/common/assembler-tester.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest-support.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ tasm.
-
-// Test the x64 assembler by compiling some simple functions into
-// a buffer and executing them. These tests do not initialize the
-// V8 library, create a context, or use any V8 objects.
-
-class TurboAssemblerTest : public TestWithIsolate {};
-
-TEST_F(TurboAssemblerTest, TestHardAbort) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- __ Abort(AbortReason::kNoReason);
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- buffer->MakeExecutable();
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
-
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
-}
-
-TEST_F(TurboAssemblerTest, TestCheck) {
- auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
- buffer->CreateView());
- __ set_root_array_available(false);
- __ set_abort_hard(true);
-
- // Fail if the first parameter is 17.
- __ movl(rax, Immediate(17));
- __ cmpl(rax, arg_reg_1);
- __ Check(Condition::not_equal, AbortReason::kNoReason);
- __ ret(0);
-
- CodeDesc desc;
- tasm.GetCode(isolate(), &desc);
- buffer->MakeExecutable();
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
-
- f.Call(0);
- f.Call(18);
- ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/base/ieee754-unittest.cc b/deps/v8/test/unittests/base/ieee754-unittest.cc
index dbba16a313..e22f3b86b2 100644
--- a/deps/v8/test/unittests/base/ieee754-unittest.cc
+++ b/deps/v8/test/unittests/base/ieee754-unittest.cc
@@ -131,6 +131,175 @@ TEST(Ieee754, Atanh) {
EXPECT_DOUBLE_EQ(0.54930614433405478, atanh(0.5));
}
+#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
+TEST(Ieee754, LibmCos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(libm_cos(kQNaN), IsNaN());
+ EXPECT_THAT(libm_cos(kSNaN), IsNaN());
+ EXPECT_THAT(libm_cos(kInfinity), IsNaN());
+ EXPECT_THAT(libm_cos(-kInfinity), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / libm_cos(-0.0));
+ EXPECT_EQ(1.0, 1 / libm_cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, libm_cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, libm_cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, libm_cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, libm_cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, libm_cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, libm_cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, libm_cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, libm_cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, libm_cos(0.25));
+ EXPECT_EQ(0.8775825618903728, libm_cos(0.5));
+ EXPECT_EQ(0.7073882691671998, libm_cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, libm_cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, libm_cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, libm_cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, libm_cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, libm_cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, libm_cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, libm_cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, libm_cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, libm_cos(-kTwo120));
+}
+
+TEST(Ieee754, LibmSin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(libm_sin(kQNaN), IsNaN());
+ EXPECT_THAT(libm_sin(kSNaN), IsNaN());
+ EXPECT_THAT(libm_sin(kInfinity), IsNaN());
+ EXPECT_THAT(libm_sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, Divide(1.0, libm_sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, libm_sin(0.0)));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, libm_sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, libm_sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, libm_sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, libm_sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, libm_sin(0.5));
+ EXPECT_EQ(-0.479425538604203, libm_sin(-0.5));
+ EXPECT_EQ(1, libm_sin(kPI / 2.0));
+ EXPECT_EQ(-1, libm_sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, libm_sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, libm_sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, libm_sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, libm_sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, libm_sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, libm_sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, libm_sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, libm_sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, libm_sin(-kTwo120));
+}
+
+TEST(Ieee754, FdlibmCos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(fdlibm_cos(kQNaN), IsNaN());
+ EXPECT_THAT(fdlibm_cos(kSNaN), IsNaN());
+ EXPECT_THAT(fdlibm_cos(kInfinity), IsNaN());
+ EXPECT_THAT(fdlibm_cos(-kInfinity), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / fdlibm_cos(-0.0));
+ EXPECT_EQ(1.0, 1 / fdlibm_cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, fdlibm_cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, fdlibm_cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, fdlibm_cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, fdlibm_cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, fdlibm_cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, fdlibm_cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, fdlibm_cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, fdlibm_cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, fdlibm_cos(0.25));
+ EXPECT_EQ(0.8775825618903728, fdlibm_cos(0.5));
+ EXPECT_EQ(0.7073882691671998, fdlibm_cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, fdlibm_cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, fdlibm_cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, fdlibm_cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, fdlibm_cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, fdlibm_cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, fdlibm_cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, fdlibm_cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, fdlibm_cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, fdlibm_cos(-kTwo120));
+}
+
+TEST(Ieee754, FdlibmSin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(fdlibm_sin(kQNaN), IsNaN());
+ EXPECT_THAT(fdlibm_sin(kSNaN), IsNaN());
+ EXPECT_THAT(fdlibm_sin(kInfinity), IsNaN());
+ EXPECT_THAT(fdlibm_sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, Divide(1.0, fdlibm_sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, fdlibm_sin(0.0)));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, fdlibm_sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, fdlibm_sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, fdlibm_sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, fdlibm_sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, fdlibm_sin(0.5));
+ EXPECT_EQ(-0.479425538604203, fdlibm_sin(-0.5));
+ EXPECT_EQ(1, fdlibm_sin(kPI / 2.0));
+ EXPECT_EQ(-1, fdlibm_sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, fdlibm_sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, fdlibm_sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, fdlibm_sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, fdlibm_sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, fdlibm_sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, fdlibm_sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, fdlibm_sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, fdlibm_sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, fdlibm_sin(-kTwo120));
+}
+
+#else
+
TEST(Ieee754, Cos) {
// Test values mentioned in the EcmaScript spec.
EXPECT_THAT(cos(kQNaN), IsNaN());
@@ -177,6 +346,45 @@ TEST(Ieee754, Cos) {
EXPECT_EQ(-0.9258790228548379e0, cos(-kTwo120));
}
+TEST(Ieee754, Sin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sin(kQNaN), IsNaN());
+ EXPECT_THAT(sin(kSNaN), IsNaN());
+ EXPECT_THAT(sin(kInfinity), IsNaN());
+ EXPECT_THAT(sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, Divide(1.0, sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, sin(0.0)));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, sin(0.5));
+ EXPECT_EQ(-0.479425538604203, sin(-0.5));
+ EXPECT_EQ(1, sin(kPI / 2.0));
+ EXPECT_EQ(-1, sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
+}
+
+#endif
+
TEST(Ieee754, Cosh) {
// Test values mentioned in the EcmaScript spec.
EXPECT_THAT(cosh(kQNaN), IsNaN());
@@ -306,43 +514,6 @@ TEST(Ieee754, Cbrt) {
EXPECT_EQ(46.415888336127786, cbrt(100000));
}
-TEST(Ieee754, Sin) {
- // Test values mentioned in the EcmaScript spec.
- EXPECT_THAT(sin(kQNaN), IsNaN());
- EXPECT_THAT(sin(kSNaN), IsNaN());
- EXPECT_THAT(sin(kInfinity), IsNaN());
- EXPECT_THAT(sin(-kInfinity), IsNaN());
-
- // Tests for sin for |x| < pi/4
- EXPECT_EQ(-kInfinity, Divide(1.0, sin(-0.0)));
- EXPECT_EQ(kInfinity, Divide(1.0, sin(0.0)));
- // sin(x) = x for x < 2^-27
- EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
- EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
- // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
- EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
- EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
-
- // Tests for sin.
- EXPECT_EQ(0.479425538604203, sin(0.5));
- EXPECT_EQ(-0.479425538604203, sin(-0.5));
- EXPECT_EQ(1, sin(kPI / 2.0));
- EXPECT_EQ(-1, sin(-kPI / 2.0));
- // Test that sin(Math.PI) != 0 since Math.PI is not exact.
- EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
- EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
- // Test sin for various phases.
- EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
- EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
- EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
- EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
- EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
-
- // Test Hayne-Panek reduction.
- EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
- EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
-}
-
TEST(Ieee754, Sinh) {
// Test values mentioned in the EcmaScript spec.
EXPECT_THAT(sinh(kQNaN), IsNaN());
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index 4858e08544..26d740ca52 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -498,29 +498,39 @@ TEST(ThreadTicks, MAYBE_ThreadNow) {
EXPECT_GT(begin_thread, ThreadTicks());
int iterations_count = 0;
+#if V8_OS_WIN && V8_HOST_ARCH_ARM64
+ // The implementation of ThreadTicks::Now() is quite imprecise on arm64
+ // Windows, so the following test often fails with the default 10ms. By
+ // increasing to 100ms, we can make the test reliable.
+ const int limit_ms = 100;
+#else
+ const int limit_ms = 10;
+#endif
+ const int limit_us = limit_ms * 1000;
+
// Some systems have low resolution thread timers, this code makes sure
// that thread time has progressed by at least one tick.
// Limit waiting to 10ms to prevent infinite loops.
while (ThreadTicks::Now() == begin_thread &&
- ((TimeTicks::Now() - begin).InMicroseconds() < 10000)) {
+ ((TimeTicks::Now() - begin).InMicroseconds() < limit_us)) {
}
EXPECT_GT(ThreadTicks::Now(), begin_thread);
do {
// Sleep for 10 milliseconds to get the thread de-scheduled.
- OS::Sleep(base::TimeDelta::FromMilliseconds(10));
+ OS::Sleep(base::TimeDelta::FromMilliseconds(limit_ms));
end_thread = ThreadTicks::Now();
end = TimeTicks::Now();
delta = end - begin;
EXPECT_LE(++iterations_count, 2); // fail after 2 attempts.
} while (delta.InMicroseconds() <
- 10000); // Make sure that the OS did sleep for at least 10 ms.
+ limit_us); // Make sure that the OS did sleep for at least 10 ms.
TimeDelta delta_thread = end_thread - begin_thread;
// Make sure that some thread time have elapsed.
EXPECT_GT(delta_thread.InMicroseconds(), 0);
// But the thread time is at least 9ms less than clock time.
TimeDelta difference = delta - delta_thread;
- EXPECT_GE(difference.InMicroseconds(), 9000);
+ EXPECT_GE(difference.InMicroseconds(), limit_us * 9 / 10);
}
}
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
index 2af95c93f6..cd519d852c 100644
--- a/deps/v8/test/unittests/base/threaded-list-unittest.cc
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -316,5 +316,42 @@ TEST_F(ThreadedListTest, ConstIterComp) {
CHECK(found_first);
}
+TEST_F(ThreadedListTest, RemoveAt) {
+ auto it = list.begin();
+
+ // Removing first
+ ThreadedListTestNode* to_remove = list.first();
+ it = list.RemoveAt(it);
+ EXPECT_EQ(to_remove, &nodes[0]);
+ EXPECT_EQ(list.first(), &nodes[1]);
+ EXPECT_EQ(it, list.begin());
+ EXPECT_EQ(*it, &nodes[1]);
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(to_remove), nullptr);
+ EXPECT_FALSE(list.Contains(to_remove));
+ EXPECT_EQ(list.LengthForTest(), 4);
+ list.Verify();
+
+ // Removing in the middle
+ ++it;
+ to_remove = *it;
+ it = list.RemoveAt(it);
+ EXPECT_EQ(*it, &nodes[3]);
+ EXPECT_FALSE(list.Contains(to_remove));
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(to_remove), nullptr);
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(&nodes[1]), &nodes[3]);
+ EXPECT_EQ(list.LengthForTest(), 3);
+ list.Verify();
+
+ // Removing last
+ ++it;
+ to_remove = *it;
+ it = list.RemoveAt(it);
+ EXPECT_EQ(it, list.end());
+ EXPECT_FALSE(list.Contains(to_remove));
+ EXPECT_EQ(*ThreadedListTestNode::OtherTraits::next(&nodes[4]), nullptr);
+ EXPECT_EQ(list.LengthForTest(), 2);
+ list.Verify();
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/vector-unittest.cc b/deps/v8/test/unittests/base/vector-unittest.cc
index 32058e42af..d7c6fc37f7 100644
--- a/deps/v8/test/unittests/base/vector-unittest.cc
+++ b/deps/v8/test/unittests/base/vector-unittest.cc
@@ -60,7 +60,7 @@ TEST(VectorTest, Equals) {
EXPECT_TRUE(vec3_char != vec1_const_char);
}
-TEST(OwnedVectorConstruction, Equals) {
+TEST(OwnedVectorTest, Equals) {
auto int_vec = base::OwnedVector<int>::New(4);
EXPECT_EQ(4u, int_vec.size());
auto find_non_zero = [](int i) { return i != 0; };
@@ -76,6 +76,31 @@ TEST(OwnedVectorConstruction, Equals) {
EXPECT_EQ(init_vec1.as_vector(), init_vec2.as_vector());
}
+TEST(OwnedVectorTest, MoveConstructionAndAssignment) {
+ constexpr int kValues[] = {4, 11, 3};
+ auto int_vec = base::OwnedVector<int>::Of(kValues);
+ EXPECT_EQ(3u, int_vec.size());
+
+ auto move_constructed_vec = std::move(int_vec);
+ EXPECT_EQ(move_constructed_vec.as_vector(), base::ArrayVector(kValues));
+
+ auto move_assigned_to_empty = base::OwnedVector<int>{};
+ move_assigned_to_empty = std::move(move_constructed_vec);
+ EXPECT_EQ(move_assigned_to_empty.as_vector(), base::ArrayVector(kValues));
+
+ auto move_assigned_to_non_empty = base::OwnedVector<int>::New(2);
+ move_assigned_to_non_empty = std::move(move_assigned_to_empty);
+ EXPECT_EQ(move_assigned_to_non_empty.as_vector(), base::ArrayVector(kValues));
+
+ // All but the last vector must be empty (length 0, nullptr data).
+ EXPECT_TRUE(int_vec.empty());
+ EXPECT_TRUE(int_vec.begin() == nullptr);
+ EXPECT_TRUE(move_constructed_vec.empty());
+ EXPECT_TRUE(move_constructed_vec.begin() == nullptr);
+ EXPECT_TRUE(move_assigned_to_empty.empty());
+ EXPECT_TRUE(move_assigned_to_empty.begin() == nullptr);
+}
+
// Test that the constexpr factory methods work.
TEST(VectorTest, ConstexprFactories) {
static constexpr int kInit1[] = {4, 11, 3};
diff --git a/deps/v8/test/unittests/codegen/code-layout-unittest.cc b/deps/v8/test/unittests/codegen/code-layout-unittest.cc
index 1586c3a27d..40dbdea5dc 100644
--- a/deps/v8/test/unittests/codegen/code-layout-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-layout-unittest.cc
@@ -47,11 +47,10 @@ TEST_F(CodeLayoutTest, CodeLayoutWithoutUnwindingInfo) {
.Build();
CHECK(!code->has_unwinding_info());
- CHECK_EQ(code->raw_instruction_size(), buffer_size);
- CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->raw_instruction_start()),
- buffer, buffer_size));
- CHECK_EQ(static_cast<int>(code->raw_instruction_end() -
- code->raw_instruction_start()),
+ CHECK_EQ(code->InstructionSize(), buffer_size);
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->InstructionStart()), buffer,
+ buffer_size));
+ CHECK_EQ(static_cast<int>(code->InstructionEnd() - code->InstructionStart()),
buffer_size);
}
@@ -94,16 +93,16 @@ TEST_F(CodeLayoutTest, CodeLayoutWithUnwindingInfo) {
.Build();
CHECK(code->has_unwinding_info());
- CHECK_EQ(code->raw_body_size(), buffer_size + unwinding_info_size);
- CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->raw_instruction_start()),
- buffer, buffer_size));
+ CHECK_EQ(code->body_size(), buffer_size + unwinding_info_size);
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->InstructionStart()), buffer,
+ buffer_size));
CHECK_EQ(code->unwinding_info_size(), unwinding_info_size);
CHECK_EQ(memcmp(reinterpret_cast<void*>(code->unwinding_info_start()),
unwinding_info, unwinding_info_size),
0);
- CHECK_EQ(static_cast<int>(code->unwinding_info_end() -
- code->raw_instruction_start()),
- buffer_size + unwinding_info_size);
+ CHECK_EQ(
+ static_cast<int>(code->unwinding_info_end() - code->InstructionStart()),
+ buffer_size + unwinding_info_size);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/codegen/code-pages-unittest.cc b/deps/v8/test/unittests/codegen/code-pages-unittest.cc
index 6fc67e4ed0..e8a156581e 100644
--- a/deps/v8/test/unittests/codegen/code-pages-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-pages-unittest.cc
@@ -149,11 +149,11 @@ TEST_F(CodePagesTest, OptimizedCodeWithCodeRange) {
Handle<JSFunction> foo =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*local_foo));
- CodeT codet = foo->code();
+ Code code = foo->code();
// We don't produce optimized code when run with --no-turbofan and
// --no-maglev.
- if (!codet.is_optimized_code()) return;
- Code foo_code = FromCodeT(codet);
+ if (!code.is_optimized_code()) return;
+ InstructionStream foo_code = FromCode(code);
EXPECT_TRUE(i_isolate()->heap()->InSpace(foo_code, CODE_SPACE));
@@ -199,11 +199,11 @@ TEST_F(CodePagesTest, OptimizedCodeWithCodePages) {
EXPECT_TRUE(v8_flags.always_sparkplug);
return;
}
- CodeT codet = foo->code();
+ Code code = foo->code();
// We don't produce optimized code when run with --no-turbofan and
// --no-maglev.
- if (!codet.is_optimized_code()) return;
- Code foo_code = FromCodeT(codet);
+ if (!code.is_optimized_code()) return;
+ InstructionStream foo_code = FromCode(code);
EXPECT_TRUE(i_isolate()->heap()->InSpace(foo_code, CODE_SPACE));
@@ -268,6 +268,8 @@ TEST_F(CodePagesTest, LargeCodeObject) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope(i_isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
if (!i_isolate()->RequiresCodeRange() && !kHaveCodePages) return;
@@ -293,18 +295,20 @@ TEST_F(CodePagesTest, LargeCodeObject) {
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
.Build();
+ Handle<InstructionStream> foo_istream(foo_code->instruction_stream(),
+ i_isolate());
- EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_code, CODE_LO_SPACE));
+ EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_istream, CODE_LO_SPACE));
std::vector<MemoryRange>* pages = i_isolate()->GetCodePages();
if (i_isolate()->RequiresCodeRange()) {
- EXPECT_TRUE(PagesContainsAddress(pages, foo_code->address()));
+ EXPECT_TRUE(PagesContainsAddress(pages, foo_istream->address()));
} else {
- EXPECT_TRUE(PagesHasExactPage(pages, foo_code->address()));
+ EXPECT_TRUE(PagesHasExactPage(pages, foo_istream->address()));
}
- stale_code_address = foo_code->address();
+ stale_code_address = foo_istream->address();
}
// Delete the large code object.
@@ -383,6 +387,8 @@ TEST_F(CodePagesTest, LargeCodeObjectWithSignalHandler) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope(i_isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
if (!i_isolate()->RequiresCodeRange() && !kHaveCodePages) return;
@@ -417,8 +423,10 @@ TEST_F(CodePagesTest, LargeCodeObjectWithSignalHandler) {
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
.Build();
+ Handle<InstructionStream> foo_istream(foo_code->instruction_stream(),
+ i_isolate());
- EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_code, CODE_LO_SPACE));
+ EXPECT_TRUE(i_isolate()->heap()->InSpace(*foo_istream, CODE_LO_SPACE));
// Do a synchronous sample to ensure that we capture the state with the
// extra code page.
@@ -429,12 +437,12 @@ TEST_F(CodePagesTest, LargeCodeObjectWithSignalHandler) {
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate());
if (i_isolate()->RequiresCodeRange()) {
- EXPECT_TRUE(PagesContainsAddress(&pages, foo_code->address()));
+ EXPECT_TRUE(PagesContainsAddress(&pages, foo_istream->address()));
} else {
- EXPECT_TRUE(PagesHasExactPage(&pages, foo_code->address()));
+ EXPECT_TRUE(PagesHasExactPage(&pages, foo_istream->address()));
}
- stale_code_address = foo_code->address();
+ stale_code_address = foo_istream->address();
}
// Start async sampling again to detect threading issues.
@@ -459,6 +467,8 @@ TEST_F(CodePagesTest, Sorted) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope(i_isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
if (!i_isolate()->RequiresCodeRange() && !kHaveCodePages) return;
@@ -487,11 +497,14 @@ TEST_F(CodePagesTest, Sorted) {
};
{
HandleScope outer_scope(i_isolate());
- Handle<Code> code1, code3;
+ Handle<InstructionStream> code1, code3;
Address code2_address;
- code1 = Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
- .Build();
+ code1 =
+ handle(Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
+ .Build()
+ ->instruction_stream(),
+ i_isolate());
EXPECT_TRUE(i_isolate()->heap()->InSpace(*code1, CODE_LO_SPACE));
{
@@ -499,12 +512,17 @@ TEST_F(CodePagesTest, Sorted) {
// Create three large code objects, we'll delete the middle one and check
// everything is still sorted.
- Handle<Code> code2 =
+ Handle<InstructionStream> code2 = handle(
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
- .Build();
+ .Build()
+ ->instruction_stream(),
+ i_isolate());
EXPECT_TRUE(i_isolate()->heap()->InSpace(*code2, CODE_LO_SPACE));
- code3 = Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
- .Build();
+ code3 = handle(
+ Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION)
+ .Build()
+ ->instruction_stream(),
+ i_isolate());
EXPECT_TRUE(i_isolate()->heap()->InSpace(*code3, CODE_LO_SPACE));
code2_address = code2->address();
diff --git a/deps/v8/test/unittests/codegen/factory-unittest.cc b/deps/v8/test/unittests/codegen/factory-unittest.cc
index 3fb9140d1b..7d6f6ea45c 100644
--- a/deps/v8/test/unittests/codegen/factory-unittest.cc
+++ b/deps/v8/test/unittests/codegen/factory-unittest.cc
@@ -35,7 +35,8 @@ TEST_F(FactoryCodeBuilderTest, Factory_CodeBuilder) {
Handle<Code> code =
Factory::CodeBuilder(i_isolate(), desc, CodeKind::WASM_FUNCTION).Build();
- CHECK(i_isolate()->heap()->InSpace(*code, CODE_LO_SPACE));
+ CHECK(
+ i_isolate()->heap()->InSpace(code->instruction_stream(), CODE_LO_SPACE));
#if VERIFY_HEAP
code->ObjectVerify(i_isolate());
#endif
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 1eb4320041..57b22c11ac 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -448,19 +448,28 @@ TEST_F(BytecodeAnalysisTest, SuspendPoint) {
interpreter::BytecodeJumpTable* gen_jump_table =
builder.AllocateJumpTable(1, 0);
+ builder.SwitchOnGeneratorState(reg_gen, gen_jump_table);
+ expected_liveness.emplace_back("..L.", "..L.");
+
+ builder.LoadUndefined();
+ expected_liveness.emplace_back("....", "...L");
+
+ // Store some arbitrary value into the generator register so that this
+ // register is dead by the time we reach SwitchOnGeneratorState (this matches
+ // real generator bytecode and is DCHECKed in the bytecode analysis).
builder.StoreAccumulatorInRegister(reg_gen);
- expected_liveness.emplace_back("L..L", "L.LL");
+ expected_liveness.emplace_back("...L", "..L.");
- // Note: technically, r0 should be dead here since the resume will write it,
- // but in practice the bytecode analysis doesn't bother to special case it,
- // since the generator switch is close to the top of the function anyway.
- builder.SwitchOnGeneratorState(reg_gen, gen_jump_table);
- expected_liveness.emplace_back("L.LL", "L.LL");
+ builder.LoadUndefined();
+ expected_liveness.emplace_back("..L.", "..LL");
+ // Reg 0 is read after the resume, so should be live up to here (and is killed
+ // here).
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back("..LL", "L.LL");
- // Reg 1 is never read, so should be dead.
+ // Reg 1 is never read, so should be dead already and this store shouldn't
+ // change it.
builder.StoreAccumulatorInRegister(reg_1);
expected_liveness.emplace_back("L.LL", "L.LL");
diff --git a/deps/v8/test/unittests/compiler/codegen-tester.h b/deps/v8/test/unittests/compiler/codegen-tester.h
index 22ceedd382..146d9907f7 100644
--- a/deps/v8/test/unittests/compiler/codegen-tester.h
+++ b/deps/v8/test/unittests/compiler/codegen-tester.h
@@ -77,20 +77,16 @@ class RawMachineAssemblerTester : public CallHelper<ReturnType>,
return code_.ToHandleChecked();
}
- Handle<CodeT> GetCodeT() { return ToCodeT(GetCode(), isolate_); }
-
protected:
Address Generate() override {
if (code_.is_null()) {
- Schedule* schedule = this->ExportForTest();
- auto call_descriptor = this->call_descriptor();
- Graph* graph = this->graph();
+ Schedule* schedule = ExportForTest();
OptimizedCompilationInfo info(base::ArrayVector("testing"), zone_, kind_);
code_ = Pipeline::GenerateCodeForTesting(
- &info, isolate_, call_descriptor, graph,
+ &info, isolate_, call_descriptor(), graph(),
AssemblerOptions::Default(isolate_), schedule);
}
- return this->code_.ToHandleChecked()->entry();
+ return code_.ToHandleChecked()->code_entry_point();
}
Zone* zone() { return zone_; }
diff --git a/deps/v8/test/unittests/compiler/compiler-unittest.cc b/deps/v8/test/unittests/compiler/compiler-unittest.cc
index abef44976c..420ed5edef 100644
--- a/deps/v8/test/unittests/compiler/compiler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/compiler-unittest.cc
@@ -613,10 +613,12 @@ TEST_F(CompilerTest, CompileFunctionScriptOrigin) {
v8::ScriptCompiler::CompileFunction(context(), &script_source)
.ToLocalChecked();
EXPECT_TRUE(!fun.IsEmpty());
- v8::Local<v8::UnboundScript> script =
- fun->GetUnboundScript().ToLocalChecked();
- EXPECT_TRUE(!script.IsEmpty());
- EXPECT_TRUE(script->GetScriptName()->StrictEquals(NewString("test")));
+ auto fun_i = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*fun));
+ EXPECT_TRUE(fun_i->shared().IsSharedFunctionInfo());
+ EXPECT_TRUE(
+ Utils::ToLocal(i::handle(i::Script::cast(fun_i->shared().script()).name(),
+ i_isolate()))
+ ->StrictEquals(NewString("test")));
v8::TryCatch try_catch(isolate());
isolate()->SetCaptureStackTraceForUncaughtExceptions(true);
EXPECT_TRUE(fun->Call(context(), context()->Global(), 0, nullptr).IsEmpty());
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 863ddd8f50..fe37c015fd 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -93,7 +93,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
{
Node* node = Parameter(
- Type::Constant(broker(), factory()->minus_zero_value(), zone()));
+ Type::Constant(broker(), broker()->minus_zero_value(), zone()));
Node* use_value = UseValue(node);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -148,7 +148,7 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithNaN) {
}
{
Node* node =
- Parameter(Type::Constant(broker(), factory()->nan_value(), zone()));
+ Parameter(Type::Constant(broker(), broker()->nan_value(), zone()));
Node* use_value = UseValue(node);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -215,7 +215,7 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
Type::Union(
Type::Undetectable(),
Type::Union(
- Type::Constant(broker(), factory()->false_value(),
+ Type::Constant(broker(), broker()->false_value(),
zone()),
Type::Range(0.0, 0.0, zone()), zone()),
zone()),
@@ -234,7 +234,7 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
TEST_F(ConstantFoldingReducerTest, ToBooleanWithTruish) {
Node* input = Parameter(
Type::Union(
- Type::Constant(broker(), factory()->true_value(), zone()),
+ Type::Constant(broker(), broker()->true_value(), zone()),
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
zone()),
0);
diff --git a/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
index 567c276ba0..bd28d1dbca 100644
--- a/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
@@ -29,7 +29,8 @@ class CsaLoadEliminationTest : public GraphTest {
machine()),
reducer_(zone(), graph(), tick_counter(), broker()),
csa_(reducer(), jsgraph(), zone()),
- mcr_(reducer(), jsgraph()) {
+ mcr_(reducer(), jsgraph(),
+ MachineOperatorReducer::kPropagateSignallingNan) {
reducer()->AddReducer(&csa_);
reducer()->AddReducer(&mcr_);
}
diff --git a/deps/v8/test/unittests/compiler/function-tester.cc b/deps/v8/test/unittests/compiler/function-tester.cc
index d6951da6f7..d7e7356c76 100644
--- a/deps/v8/test/unittests/compiler/function-tester.cc
+++ b/deps/v8/test/unittests/compiler/function-tester.cc
@@ -61,12 +61,9 @@ FunctionTester::FunctionTester(Isolate* isolate, Handle<Code> code,
flags_(0) {
CHECK(!code.is_null());
Compile(function);
- function->set_code(ToCodeT(*code), kReleaseStore);
+ function->set_code(*code, kReleaseStore);
}
-FunctionTester::FunctionTester(Isolate* isolate, Handle<Code> code)
- : FunctionTester(isolate, code, 0) {}
-
void FunctionTester::CheckThrows(Handle<Object> a) {
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
MaybeHandle<Object> no_result = Call(a);
@@ -192,11 +189,9 @@ Handle<JSFunction> FunctionTester::Optimize(
CHECK(info.shared_info()->HasBytecodeArray());
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
- Handle<CodeT> code = ToCodeT(
+ Handle<Code> code =
compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
- .ToHandleChecked(),
- isolate);
- info.native_context().AddOptimizedCode(*code);
+ .ToHandleChecked();
function->set_code(*code, v8::kReleaseStore);
return function;
}
diff --git a/deps/v8/test/unittests/compiler/function-tester.h b/deps/v8/test/unittests/compiler/function-tester.h
index aededaa4ee..a5af93e9af 100644
--- a/deps/v8/test/unittests/compiler/function-tester.h
+++ b/deps/v8/test/unittests/compiler/function-tester.h
@@ -23,10 +23,12 @@ class FunctionTester {
FunctionTester(Isolate* i_isolate, Graph* graph, int param_count);
+ FunctionTester(Isolate* i_isolate, Handle<InstructionStream> code,
+ int param_count);
FunctionTester(Isolate* i_isolate, Handle<Code> code, int param_count);
// Assumes VoidDescriptor call interface.
- explicit FunctionTester(Isolate* i_isolate, Handle<Code> code);
+ explicit FunctionTester(Isolate* i_isolate, Handle<InstructionStream> code);
Isolate* isolate;
CanonicalHandleScope canonical;
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.h b/deps/v8/test/unittests/compiler/graph-reducer-unittest.h
index eb9d8f9199..ce9739b432 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.h
@@ -15,6 +15,7 @@ namespace compiler {
struct MockAdvancedReducerEditor : public AdvancedReducer::Editor {
MOCK_METHOD(void, Revisit, (Node*), (override));
MOCK_METHOD(void, Replace, (Node*, Node*), (override));
+ MOCK_METHOD(void, Replace, (Node*, Node*, NodeId), (override));
MOCK_METHOD(void, ReplaceWithValue, (Node*, Node*, Node*, Node*), (override));
};
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 10fa5cbd59..cf0d3ba2a9 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -19,6 +19,7 @@ GraphTest::GraphTest(int num_parameters)
common_(zone()),
graph_(zone()),
broker_(isolate(), zone()),
+ current_broker_(&broker_),
source_positions_(&graph_),
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 18878f456b..4c3b6bdfc6 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -74,6 +74,7 @@ class GraphTest : public TestWithNativeContextAndZone {
CommonOperatorBuilder common_;
Graph graph_;
JSHeapBroker broker_;
+ CurrentHeapBrokerScope current_broker_;
SourcePositionTable source_positions_;
NodeOriginTable node_origins_;
TickCounter tick_counter_;
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 685c14aa14..e2a05badd6 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -15,11 +15,14 @@
#include "src/compiler/node.h"
#include "src/compiler/wasm-compiler.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
+#if V8_TARGET_ARCH_32_BIT
+
using testing::AllOf;
using testing::Capture;
using testing::CaptureEq;
@@ -50,13 +53,11 @@ class Int64LoweringTest : public GraphTest {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
- nullptr, signature);
+ signature);
lowering.LowerGraph();
}
- void LowerGraphWithSpecialCase(
- Node* node, std::unique_ptr<Int64LoweringSpecialCase> special_case,
- MachineRepresentation rep) {
+ void LowerGraphWithSpecialCase(Node* node, MachineRepresentation rep) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
Node* ret = graph()->NewNode(common()->Return(), zero, node,
graph()->start(), graph()->start());
@@ -69,8 +70,7 @@ class Int64LoweringTest : public GraphTest {
sig_builder.AddReturn(rep);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
- nullptr, sig_builder.Build(),
- std::move(special_case));
+ sig_builder.Build());
lowering.LowerGraph();
}
@@ -287,7 +287,7 @@ TEST_F(Int64LoweringTest, Int64LoadImmutable) {
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
- nullptr, sig_builder.Build()); \
+ sig_builder.Build()); \
lowering.LowerGraph(); \
\
STORE_VERIFY(kStore, kRep32)
@@ -321,7 +321,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
- nullptr, sig_builder.Build());
+ sig_builder.Build());
lowering.LowerGraph();
EXPECT_THAT(
@@ -430,8 +430,6 @@ TEST_F(Int64LoweringTest, ParameterWithJSClosureParam) {
// two assumptions:
// - Pointers are 32 bit and therefore pointers do not get lowered.
// - 64-bit rol/ror/clz/ctz instructions have a control input.
-// TODO(wasm): We can find an alternative to re-activate these tests.
-#if V8_TARGET_ARCH_32_BIT
TEST_F(Int64LoweringTest, CallI64Return) {
int32_t function = 0x9999;
Node* context_address = Int32Constant(0);
@@ -660,7 +658,6 @@ TEST_F(Int64LoweringTest, I64Ror_43) {
IsInt32Constant(21))),
start(), start()));
}
-#endif
TEST_F(Int64LoweringTest, Int64Sub) {
LowerGraph(graph()->NewNode(machine()->Int64Sub(), Int64Constant(value(0)),
@@ -1035,37 +1032,20 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
Node* target = Int32Constant(1);
Node* context = Int32Constant(2);
Node* bigint = Int32Constant(4);
+ WasmCallDescriptors* descriptors = wasm::GetWasmEngine()->call_descriptors();
CallDescriptor* bigint_to_i64_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- BigIntToI64Descriptor(), // descriptor
- BigIntToI64Descriptor::GetStackParameterCount(), // stack parameter
- // count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
+ descriptors->GetBigIntToI64Descriptor(StubCallMode::kCallBuiltinPointer,
+ false);
CallDescriptor* bigint_to_i32_pair_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- BigIntToI32PairDescriptor(), // descriptor
- BigIntToI32PairDescriptor::
- GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
-
- auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
- lowering_special_case->replacements.insert(
- {bigint_to_i64_call_descriptor, bigint_to_i32_pair_call_descriptor});
+ descriptors->GetLoweredCallDescriptor(bigint_to_i64_call_descriptor);
Node* call_node =
graph()->NewNode(common()->Call(bigint_to_i64_call_descriptor), target,
bigint, context, start(), start());
- LowerGraphWithSpecialCase(call_node, std::move(lowering_special_case),
- MachineRepresentation::kWord64);
+ LowerGraphWithSpecialCase(call_node, MachineRepresentation::kWord64);
Capture<Node*> call;
Matcher<Node*> call_matcher =
@@ -1081,36 +1061,18 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
Node* target = Int32Constant(1);
Node* i64 = Int64Constant(value(0));
+ WasmCallDescriptors* descriptors = wasm::GetWasmEngine()->call_descriptors();
CallDescriptor* i64_to_bigint_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- I64ToBigIntDescriptor(), // descriptor
- I64ToBigIntDescriptor::GetStackParameterCount(), // stack parameter
- // count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
+ descriptors->GetI64ToBigIntDescriptor(StubCallMode::kCallBuiltinPointer);
CallDescriptor* i32_pair_to_bigint_call_descriptor =
- Linkage::GetStubCallDescriptor(
- zone(), // zone
- I32PairToBigIntDescriptor(), // descriptor
- I32PairToBigIntDescriptor::
- GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallCodeObject); // stub call mode
-
- auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
- lowering_special_case->replacements.insert(
- {i64_to_bigint_call_descriptor, i32_pair_to_bigint_call_descriptor});
+ descriptors->GetLoweredCallDescriptor(i64_to_bigint_call_descriptor);
Node* call = graph()->NewNode(common()->Call(i64_to_bigint_call_descriptor),
target, i64, start(), start());
- LowerGraphWithSpecialCase(call, std::move(lowering_special_case),
- MachineRepresentation::kTaggedPointer);
+ LowerGraphWithSpecialCase(call, MachineRepresentation::kTaggedPointer);
EXPECT_THAT(
graph()->end()->InputAt(1),
@@ -1123,3 +1085,5 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
} // namespace compiler
} // namespace internal
} // namespace v8
+
+#endif // V8_TARGET_ARCH_32_BIT
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 1ecf511149..7dc31d92ac 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -101,18 +101,8 @@ class JSCallReducerTest : public TypedGraphTest {
const Operator* Call(int arity) {
FeedbackVectorSpec spec(zone());
spec.AddCallICSlot();
- Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfoForBuiltin(
- isolate()->factory()->empty_string(), Builtin::kIllegal);
- // Set the raw feedback metadata to circumvent checks that we are not
- // overwriting existing metadata.
- shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
- ClosureFeedbackCellArray::New(isolate(), shared);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate()));
- Handle<FeedbackVector> vector = FeedbackVector::New(
- isolate(), shared, closure_feedback_cell_array, &is_compiled_scope);
+ Handle<FeedbackVector> vector =
+ FeedbackVector::NewForTesting(isolate(), &spec);
FeedbackSource feedback(vector, FeedbackSlot(0));
return javascript()->Call(JSCallNode::ArityForArgc(arity), CallFrequency(),
feedback, ConvertReceiverMode::kAny,
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 00242cdd7f..62c5bba17e 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -39,8 +39,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
GraphReducer graph_reducer(zone(), graph(), tick_counter(), broker());
- JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, broker(),
- zone());
+ JSCreateLowering reducer(&graph_reducer, &jsgraph, broker(), zone());
return reducer.Reduce(node);
}
@@ -150,10 +149,10 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CreateFunctionContext(
- MakeRef(broker(), ScopeInfo::Empty(isolate())), 8, FUNCTION_SCOPE),
- context, effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateFunctionContext(
+ broker()->empty_scope_info(), 8, FUNCTION_SCOPE),
+ context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
@@ -166,8 +165,7 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
- ScopeInfoRef scope_info =
- MakeRef(broker(), ReadOnlyRoots(isolate()).empty_function_scope_info());
+ ScopeInfoRef scope_info = broker()->empty_function_scope_info();
Node* const object = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
@@ -188,8 +186,7 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
// JSCreateCatchContext
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
- ScopeInfoRef scope_info =
- MakeRef(broker(), ReadOnlyRoots(isolate()).empty_function_scope_info());
+ ScopeInfoRef scope_info = broker()->empty_function_scope_info();
Node* const exception = Parameter(Type::Receiver());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 49dd7d9cc2..bb740dd956 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -393,7 +393,7 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
- NameRef name = MakeRef(broker(), factory()->length_string());
+ NameRef name = broker()->length_string();
Node* const receiver = Parameter(Type::String(), 0);
Node* const feedback = UndefinedConstant();
Node* const context = UndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
index c69d4324f2..941e9d95ba 100644
--- a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
@@ -175,35 +175,35 @@ const MachInst1 kAddSubOneInstructions[] = {
// ----------------------------------------------------------------------------
const IntCmp kCmpInstructions[] = {
- {{&RawMachineAssembler::WordEqual, "WordEqual", kLoong64Cmp,
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kLoong64Cmp64,
MachineType::Int64()},
1U},
- {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kLoong64Cmp,
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kLoong64Cmp64,
MachineType::Int64()},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp32,
MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kLoong64Cmp,
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kLoong64Cmp32,
MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp32,
MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kLoong64Cmp, MachineType::Int32()},
+ kLoong64Cmp32, MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kLoong64Cmp,
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kLoong64Cmp32,
MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kLoong64Cmp, MachineType::Int32()},
+ kLoong64Cmp32, MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp32,
MachineType::Uint32()},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kLoong64Cmp, MachineType::Uint32()},
+ kLoong64Cmp32, MachineType::Uint32()},
1U}};
// ----------------------------------------------------------------------------
@@ -235,16 +235,16 @@ const Conversion kConversionInstructions[] = {
// LOONG64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
- {&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp32,
MachineType::Uint32()},
- {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp32,
MachineType::Uint32()},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kLoong64Cmp, MachineType::Uint32()},
- {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
+ kLoong64Cmp32, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp32,
MachineType::Uint32()},
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kLoong64Cmp, MachineType::Uint32()},
+ kLoong64Cmp32, MachineType::Uint32()},
};
} // namespace
@@ -1378,7 +1378,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -1390,7 +1390,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -1405,7 +1405,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp64, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -1417,7 +1417,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Cmp64, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 1c2a73d486..669f941148 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -4,12 +4,15 @@
#include "src/compiler/machine-operator-reducer.h"
+#include <cstdint>
#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
+#include "src/builtins/builtins.h"
+#include "src/common/globals.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/numbers/conversions-inl.h"
@@ -45,7 +48,9 @@ class MachineOperatorReducerTest : public GraphTest {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
&machine_);
- MachineOperatorReducer reducer(&graph_reducer_, &jsgraph);
+ MachineOperatorReducer reducer(
+ &graph_reducer_, &jsgraph,
+ MachineOperatorReducer::kPropagateSignallingNan);
return reducer.Reduce(node);
}
@@ -69,6 +74,26 @@ class MachineOperatorReducerTest : public GraphTest {
IsWord32Shr(dividend_matcher, IsInt32Constant(31)));
}
+ Matcher<Node*> IsTruncatingDiv64(const Matcher<Node*>& dividend_matcher,
+ const int64_t divisor) {
+ base::MagicNumbersForDivision<uint64_t> const mag =
+ base::SignedDivisionByConstant(base::bit_cast<uint64_t>(divisor));
+ int64_t const multiplier = base::bit_cast<int64_t>(mag.multiplier);
+ int64_t const shift = base::bit_cast<int32_t>(mag.shift);
+ Matcher<Node*> quotient_matcher =
+ IsInt64MulHigh(dividend_matcher, IsInt64Constant(multiplier));
+ if (divisor > 0 && multiplier < 0) {
+ quotient_matcher = IsInt64Add(quotient_matcher, dividend_matcher);
+ } else if (divisor < 0 && multiplier > 0) {
+ quotient_matcher = IsInt64Sub(quotient_matcher, dividend_matcher);
+ }
+ if (shift) {
+ quotient_matcher = IsWord64Sar(quotient_matcher, IsInt64Constant(shift));
+ }
+ return IsInt64Add(quotient_matcher,
+ IsWord64Shr(dividend_matcher, IsInt64Constant(63)));
+ }
+
MachineOperatorBuilder* machine() { return &machine_; }
private:
@@ -1375,6 +1400,21 @@ TEST_F(MachineOperatorReducerTest,
}
}
+TEST_F(MachineOperatorReducerTest, Word32EqualWithAddAndConstant) {
+ // (x+k1)==k2 => x==(k2-k1)
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, k1, kInt32Values) {
+ TRACED_FOREACH(int32_t, k2, kInt32Values) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(k1)),
+ Int32Constant(k2));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Word64Equal
@@ -1413,6 +1453,21 @@ TEST_F(MachineOperatorReducerTest,
}
}
+TEST_F(MachineOperatorReducerTest, Word64EqualWithAddAndConstant) {
+ // (x+k1)==k2 => x==(k2-k1)
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int64_t, k1, kInt64Values) {
+ TRACED_FOREACH(int64_t, k2, kInt64Values) {
+ Node* node = graph()->NewNode(
+ machine()->Word64Equal(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)),
+ Int64Constant(k2));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Branch
@@ -1466,10 +1521,8 @@ TEST_F(MachineOperatorReducerTest, Int32SubWithConstant) {
}
}
-
// -----------------------------------------------------------------------------
-// Int32Div
-
+// Int32Div, Int64Div
TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
Node* const p0 = Parameter(0);
@@ -1556,6 +1609,93 @@ TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Int64DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(-1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Sub(IsInt64Constant(0), p0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(2), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord64Sar(IsInt64Add(IsWord64Shr(p0, IsInt64Constant(63)), p0),
+ IsInt64Constant(1)));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(-2), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt64Sub(
+ IsInt64Constant(0),
+ IsWord64Sar(IsInt64Add(IsWord64Shr(p0, IsInt64Constant(63)), p0),
+ IsInt64Constant(1))));
+ }
+ TRACED_FORRANGE(int64_t, shift, 2, 62) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int64Div(), p0,
+ Int64Constant(int64_t{1} << shift), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord64Sar(IsInt64Add(IsWord64Shr(IsWord64Sar(p0, IsInt64Constant(63)),
+ IsInt64Constant(64 - shift)),
+ p0),
+ IsInt64Constant(shift)));
+ }
+ TRACED_FORRANGE(int64_t, shift, 2, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(Shl(int64_t{-1}, shift)),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt64Sub(
+ IsInt64Constant(0),
+ IsWord64Sar(
+ IsInt64Add(IsWord64Shr(IsWord64Sar(p0, IsInt64Constant(63)),
+ IsInt64Constant(64 - shift)),
+ p0),
+ IsInt64Constant(shift))));
+ }
+ TRACED_FOREACH(int64_t, divisor, kInt64Values) {
+ if (divisor < 0) {
+ if (divisor == std::numeric_limits<int64_t>::min() ||
+ base::bits::IsPowerOfTwo(-divisor)) {
+ continue;
+ }
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Sub(IsInt64Constant(0),
+ IsTruncatingDiv64(p0, -divisor)));
+ } else if (divisor > 0) {
+ if (base::bits::IsPowerOfTwo(divisor)) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Div(), p0, Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncatingDiv64(p0, divisor));
+ }
+ }
+}
TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
Node* const p0 = Parameter(0);
@@ -1567,10 +1707,8 @@ TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
}
-
// -----------------------------------------------------------------------------
-// Uint32Div
-
+// Uint32Div, Uint64Div
TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
Node* const p0 = Parameter(0);
@@ -1613,6 +1751,46 @@ TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint64DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), Int64Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ TRACED_FOREACH(uint64_t, dividend, kUint64Values) {
+ TRACED_FOREACH(uint64_t, divisor, kUint64Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint64Div(), Uint64Constant(dividend),
+ Uint64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Constant(base::bit_cast<int64_t>(
+ base::bits::UnsignedDiv64(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint64_t, shift, 1, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Div(), p0, Uint64Constant(uint64_t{1} << shift),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord64Shr(p0, IsInt64Constant(static_cast<int64_t>(shift))));
+ }
+}
TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
Node* const p0 = Parameter(0);
@@ -1624,10 +1802,8 @@ TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
}
-
// -----------------------------------------------------------------------------
-// Int32Mod
-
+// Int32Mod, Uint64Mod
TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
Node* const p0 = Parameter(0);
@@ -1714,6 +1890,90 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Int64ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), Int64Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(-1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ TRACED_FOREACH(int64_t, dividend, kInt64Values) {
+ TRACED_FOREACH(int64_t, divisor, kInt64Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int64Mod(), Int64Constant(dividend),
+ Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Constant(base::bits::SignedMod64(dividend, divisor)));
+ }
+ }
+ TRACED_FORRANGE(int64_t, shift, 1, 62) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int64Mod(), p0,
+ Int64Constant(int64_t{1} << shift), graph()->start()));
+ int64_t const mask = (int64_t{1} << shift) - 1;
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord64,
+ IsInt64Sub(IsInt64Constant(0),
+ IsWord64And(IsInt64Sub(IsInt64Constant(0), p0),
+ IsInt64Constant(mask))),
+ IsWord64And(p0, IsInt64Constant(mask)),
+ IsMerge(IsIfTrue(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())),
+ IsIfFalse(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())))));
+ }
+ TRACED_FORRANGE(int64_t, shift, 1, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(Shl(int64_t{-1}, shift)),
+ graph()->start()));
+ int64_t const mask = static_cast<int64_t>((uint64_t{1} << shift) - 1U);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord64,
+ IsInt64Sub(IsInt64Constant(0),
+ IsWord64And(IsInt64Sub(IsInt64Constant(0), p0),
+ IsInt64Constant(mask))),
+ IsWord64And(p0, IsInt64Constant(mask)),
+ IsMerge(IsIfTrue(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())),
+ IsIfFalse(IsBranch(IsInt64LessThan(p0, IsInt64Constant(0)),
+ graph()->start())))));
+ }
+ TRACED_FOREACH(int64_t, divisor, kInt64Values) {
+ if (divisor == 0 || base::bits::IsPowerOfTwo(Abs(divisor))) continue;
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int64Mod(), p0, Int64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Sub(p0, IsInt64Mul(IsTruncatingDiv64(p0, Abs(divisor)),
+ IsInt64Constant(Abs(divisor)))));
+ }
+}
TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
Node* const p0 = Parameter(0);
@@ -1723,10 +1983,8 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
-
// -----------------------------------------------------------------------------
-// Uint32Mod
-
+// Uint32Mod, Uint64Mod
TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
Node* const p0 = Parameter(0);
@@ -1770,6 +2028,47 @@ TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint64ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), p0, Int64Constant(0), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), Int64Constant(0), p0, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), p0, Int64Constant(1), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64Constant(0));
+ }
+ TRACED_FOREACH(uint64_t, dividend, kUint64Values) {
+ TRACED_FOREACH(uint64_t, divisor, kUint64Values) {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint64Mod(), Uint64Constant(dividend),
+ Uint64Constant(divisor), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt64Constant(base::bit_cast<int64_t>(
+ base::bits::UnsignedMod64(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint64_t, shift, 1, 63) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint64Mod(), p0, Uint64Constant(uint64_t{1} << shift),
+ graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord64And(p0, IsInt64Constant(static_cast<int64_t>(
+ (uint64_t{1} << shift) - 1u))));
+ }
+}
TEST_F(MachineOperatorReducerTest, Uint32ModWithParameters) {
Node* const p0 = Parameter(0);
@@ -2317,6 +2616,49 @@ TEST_F(MachineOperatorReducerTest, Uint64LessThanWithUint32Reduction) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint64LessThanWithInt64AddDontReduce) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FOREACH(uint64_t, k1, kUint64Values) {
+ TRACED_FOREACH(uint64_t, k2, kUint64Values) {
+ Node* node = graph()->NewNode(
+ machine()->Uint64LessThan(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)),
+ Int64Constant(k2));
+ Reduction r = Reduce(node);
+ // Don't reduce because of potential overflow
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest,
+ Uint64LessThanOrEqualWithInt64AddDontReduce) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FOREACH(uint64_t, k1, kUint64Values) {
+ TRACED_FOREACH(uint64_t, k2, kUint64Values) {
+ uint64_t k1 = 0;
+ uint64_t k2 = 18446744073709551615u;
+ Node* node = graph()->NewNode(
+ machine()->Uint64LessThanOrEqual(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(k1)),
+ Int64Constant(k2));
+ Reduction r = Reduce(node);
+ if (k2 == 0) {
+ // x <= 0 => x == 0
+ ASSERT_TRUE(r.Changed());
+ } else if (k2 == std::numeric_limits<uint64_t>::max()) {
+ // x <= Max => true
+ ASSERT_TRUE(r.Changed());
+ } else {
+ // Don't reduce because of potential overflow
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Int64LessThan
@@ -2574,7 +2916,7 @@ TEST_F(MachineOperatorReducerTest, Float64CosWithConstant) {
Reduce(graph()->NewNode(machine()->Float64Cos(), Float64Constant(x)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cos(x))));
+ IsFloat64Constant(NanSensitiveDoubleEq(COS_IMPL(x))));
}
}
@@ -2673,7 +3015,7 @@ TEST_F(MachineOperatorReducerTest, Float64SinWithConstant) {
Reduce(graph()->NewNode(machine()->Float64Sin(), Float64Constant(x)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sin(x))));
+ IsFloat64Constant(NanSensitiveDoubleEq(SIN_IMPL(x))));
}
}
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index c98b13e40d..3611ed8c54 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -2263,6 +2263,7 @@ IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Div)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Int64Mul)
+IS_BINOP_MATCHER(Int64MulHigh)
IS_BINOP_MATCHER(Int64LessThan)
IS_BINOP_MATCHER(Uint64LessThan)
IS_BINOP_MATCHER(JSAdd)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index f727a14c34..db5059dfb8 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -419,6 +419,8 @@ Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64MulHigh(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Div(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64LessThan(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index ac33110995..b7113563d3 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -6,6 +6,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/feedback-source.h"
+#include "src/compiler/js-graph.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -22,8 +23,12 @@ class RedundancyEliminationTest : public GraphTest {
public:
explicit RedundancyEliminationTest(int num_parameters = 4)
: GraphTest(num_parameters),
- reducer_(&editor_, zone()),
- simplified_(zone()) {
+ javascript_(zone()),
+ simplified_(zone()),
+ machine_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
+ &machine_),
+ reducer_(&editor_, &jsgraph_, zone()) {
// Initialize the {reducer_} state for the Start node.
reducer_.Reduce(graph()->start());
@@ -31,16 +36,8 @@ class RedundancyEliminationTest : public GraphTest {
FeedbackVectorSpec spec(zone());
FeedbackSlot slot1 = spec.AddCallICSlot();
FeedbackSlot slot2 = spec.AddCallICSlot();
- Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfoForBuiltin(
- isolate()->factory()->empty_string(), Builtin::kIllegal);
- shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
- ClosureFeedbackCellArray::New(isolate(), shared);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate()));
- Handle<FeedbackVector> feedback_vector = FeedbackVector::New(
- isolate(), shared, closure_feedback_cell_array, &is_compiled_scope);
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::NewForTesting(isolate(), &spec);
vector_slot_pairs_.push_back(FeedbackSource());
vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot1));
vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot2));
@@ -59,8 +56,11 @@ class RedundancyEliminationTest : public GraphTest {
NiceMock<MockAdvancedReducerEditor> editor_;
std::vector<FeedbackSource> vector_slot_pairs_;
FeedbackSource feedback2_;
- RedundancyElimination reducer_;
+ JSOperatorBuilder javascript_;
SimplifiedOperatorBuilder simplified_;
+ MachineOperatorBuilder machine_;
+ JSGraph jsgraph_;
+ RedundancyElimination reducer_;
};
namespace {
diff --git a/deps/v8/test/unittests/compiler/revec-unittest.cc b/deps/v8/test/unittests/compiler/revec-unittest.cc
new file mode 100644
index 0000000000..01e14c26bf
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/revec-unittest.cc
@@ -0,0 +1,239 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/machine-type.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/machine-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/revectorizer.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/wasm-module.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class RevecTest : public TestWithIsolateAndZone {
+ public:
+ RevecTest()
+ : TestWithIsolateAndZone(kCompressGraphZone),
+ graph_(zone()),
+ common_(zone()),
+ machine_(zone(), MachineRepresentation::kWord64,
+ MachineOperatorBuilder::Flag::kAllOptionalOps),
+ mcgraph_(&graph_, &common_, &machine_) {}
+
+ Graph* graph() { return &graph_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ MachineGraph* mcgraph() { return &mcgraph_; }
+
+ private:
+ Graph graph_;
+ CommonOperatorBuilder common_;
+ MachineOperatorBuilder machine_;
+ MachineGraph mcgraph_;
+};
+
+// Create a graph which add two 256 bit vectors(a, b), store the result in c:
+// simd128 *a,*b,*c;
+// *c = *a + *b;
+// *(c+1) = *(a+1) + *(b+1);
+// In Revectorization, two simd 128 nodes can be combined into one 256 node:
+// simd256 *d, *e, *f;
+// *f = *d + *e;
+TEST_F(RevecTest, F32x8Add) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+
+ Node* start = graph()->NewNode(common()->Start(5));
+ graph()->SetStart(start);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* sixteen = graph()->NewNode(common()->Int64Constant(16));
+ // offset of memory start field in WASM instance object.
+ Node* offset = graph()->NewNode(common()->Int64Constant(23));
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* p2 = graph()->NewNode(common()->Parameter(2), start);
+ Node* p3 = graph()->NewNode(common()->Parameter(3), start);
+
+ StoreRepresentation store_rep(MachineRepresentation::kSimd128,
+ WriteBarrierKind::kNoWriteBarrier);
+ LoadRepresentation load_rep(MachineType::Simd128());
+ Node* load0 = graph()->NewNode(machine()->Load(MachineType::Int64()), p0,
+ offset, start, start);
+ Node* mem_buffer1 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_buffer2 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_store = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* load1 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p1,
+ load0, start);
+ Node* load2 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer1, p1, load1, start);
+ Node* load3 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p2,
+ load2, start);
+ Node* load4 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer2, p2, load3, start);
+ Node* add1 = graph()->NewNode(machine()->F32x4Add(), load1, load3);
+ Node* add2 = graph()->NewNode(machine()->F32x4Add(), load2, load4);
+ Node* store1 = graph()->NewNode(machine()->Store(store_rep), load0, p3, add1,
+ load4, start);
+ Node* store2 = graph()->NewNode(machine()->Store(store_rep), mem_store, p3,
+ add2, store1, start);
+ Node* ret = graph()->NewNode(common()->Return(0), zero, store2, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+ graph()->SetEnd(end);
+
+ graph()->RecordSimdStore(store1);
+ graph()->RecordSimdStore(store2);
+ graph()->SetSimd(true);
+
+ // Test whether the graph can be revectorized
+ Revectorizer revec(zone(), graph(), mcgraph());
+ EXPECT_TRUE(revec.TryRevectorize(nullptr));
+
+ // Test whether the graph has been revectorized
+ Node* store_256 = ret->InputAt(1);
+ EXPECT_EQ(StoreRepresentationOf(store_256->op()).representation(),
+ MachineRepresentation::kSimd256);
+}
+
+// Create a graph which multiplies a F32x8 vector with the first element of
+// vector b and store the result to a F32x8 vector c:
+// float *a, *b, *c;
+// c[0123] = a[0123] * b[0000];
+// c[4567] = a[4567] * b[0000];
+//
+// After the revectorization phase, two consecutive 128-bit loads and multiplies
+// can be coalesced using 256-bit vectors:
+// c[01234567] = a[01234567] * b[00000000];
+TEST_F(RevecTest, F32x8Mul) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+
+ Node* start = graph()->NewNode(common()->Start(4));
+ graph()->SetStart(start);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* sixteen = graph()->NewNode(common()->Int64Constant(16));
+ Node* offset = graph()->NewNode(common()->Int64Constant(23));
+
+ // Wasm array base address
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ // Load base address a*
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ // LoadTransfrom base address b*
+ Node* p2 = graph()->NewNode(common()->Parameter(2), start);
+ // Store base address c*
+ Node* p3 = graph()->NewNode(common()->Parameter(3), start);
+
+ LoadRepresentation load_rep(MachineType::Simd128());
+ StoreRepresentation store_rep(MachineRepresentation::kSimd128,
+ WriteBarrierKind::kNoWriteBarrier);
+ Node* base = graph()->NewNode(machine()->Load(MachineType::Int64()), p0,
+ offset, start, start);
+ Node* base16 = graph()->NewNode(machine()->Int64Add(), base, sixteen);
+ Node* base16_store = graph()->NewNode(machine()->Int64Add(), base, sixteen);
+ Node* load0 = graph()->NewNode(machine()->ProtectedLoad(load_rep), base, p1,
+ base, start);
+ Node* load1 = graph()->NewNode(machine()->ProtectedLoad(load_rep), base16, p1,
+ load0, start);
+ Node* load2 = graph()->NewNode(
+ machine()->LoadTransform(MemoryAccessKind::kProtected,
+ LoadTransformation::kS128Load32Splat),
+ base, p2, load1, start);
+ Node* mul0 = graph()->NewNode(machine()->F32x4Mul(), load0, load2);
+ Node* mul1 = graph()->NewNode(machine()->F32x4Mul(), load1, load2);
+ Node* store0 = graph()->NewNode(machine()->Store(store_rep), base, p3, mul0,
+ load2, start);
+ Node* store1 = graph()->NewNode(machine()->Store(store_rep), base16_store, p3,
+ mul1, store0, start);
+ Node* ret = graph()->NewNode(common()->Return(0), zero, store1, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+ graph()->SetEnd(end);
+
+ graph()->RecordSimdStore(store0);
+ graph()->RecordSimdStore(store1);
+ graph()->SetSimd(true);
+
+ Revectorizer revec(zone(), graph(), mcgraph());
+ EXPECT_TRUE(revec.TryRevectorize(nullptr));
+
+ // Test whether the graph has been revectorized
+ Node* store_256 = ret->InputAt(1);
+ EXPECT_EQ(StoreRepresentationOf(store_256->op()).representation(),
+ MachineRepresentation::kSimd256);
+}
+
+// Create a graph with load chain that can not be packed due to effect
+// dependency:
+// [Load4] -> [Load3] -> [Load2] -> [Irrelevant Load] -> [Load1]
+//
+// After reordering, no effect dependency will be broken so the graph can be
+// revectorized:
+// [Load4] -> [Load3] -> [Load2] -> [Load1] -> [Irrelevant Load]
+TEST_F(RevecTest, ReorderLoadChain) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+
+ Node* start = graph()->NewNode(common()->Start(5));
+ graph()->SetStart(start);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* sixteen = graph()->NewNode(common()->Int64Constant(16));
+ // offset of memory start field in WASM instance object.
+ Node* offset = graph()->NewNode(common()->Int64Constant(23));
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* p2 = graph()->NewNode(common()->Parameter(2), start);
+ Node* p3 = graph()->NewNode(common()->Parameter(3), start);
+
+ StoreRepresentation store_rep(MachineRepresentation::kSimd128,
+ WriteBarrierKind::kNoWriteBarrier);
+ LoadRepresentation load_rep(MachineType::Simd128());
+ Node* load0 = graph()->NewNode(machine()->Load(MachineType::Int64()), p0,
+ offset, start, start);
+ Node* mem_buffer1 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_buffer2 = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* mem_store = graph()->NewNode(machine()->Int64Add(), load0, sixteen);
+ Node* load1 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p1,
+ load0, start);
+ Node* irrelevant_load = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer1, p1, load1, start);
+ Node* load2 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer1, p1, irrelevant_load, start);
+ Node* load3 = graph()->NewNode(machine()->ProtectedLoad(load_rep), load0, p2,
+ load2, start);
+ Node* load4 = graph()->NewNode(machine()->ProtectedLoad(load_rep),
+ mem_buffer2, p2, load3, start);
+ Node* add1 = graph()->NewNode(machine()->F32x4Add(), load1, load3);
+ Node* add2 = graph()->NewNode(machine()->F32x4Add(), load2, load4);
+ Node* store1 = graph()->NewNode(machine()->Store(store_rep), load0, p3, add1,
+ load4, start);
+ Node* store2 = graph()->NewNode(machine()->Store(store_rep), mem_store, p3,
+ add2, store1, start);
+ Node* ret = graph()->NewNode(common()->Return(0), zero, store2, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+ graph()->SetEnd(end);
+
+ graph()->RecordSimdStore(store1);
+ graph()->RecordSimdStore(store2);
+ graph()->SetSimd(true);
+
+ // Test whether the graph can be revectorized
+ Revectorizer revec(zone(), graph(), mcgraph());
+ EXPECT_TRUE(revec.TryRevectorize(nullptr));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc b/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc
index f9380ce8cc..5769a980ee 100644
--- a/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/riscv32/instruction-selector-riscv32-unittest.cc
@@ -910,8 +910,8 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -928,10 +928,10 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
- EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
diff --git a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
index 8458e4e7d5..13de26ab4b 100644
--- a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
@@ -1298,8 +1298,8 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -1316,10 +1316,10 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
- EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
diff --git a/deps/v8/test/unittests/compiler/run-deopt-unittest.cc b/deps/v8/test/unittests/compiler/run-deopt-unittest.cc
index 2c75b0455f..d2dd46efc4 100644
--- a/deps/v8/test/unittests/compiler/run-deopt-unittest.cc
+++ b/deps/v8/test/unittests/compiler/run-deopt-unittest.cc
@@ -12,7 +12,8 @@ namespace internal {
namespace compiler {
static void IsOptimized(const v8::FunctionCallbackInfo<v8::Value>& args) {
- JavaScriptFrameIterator it(reinterpret_cast<Isolate*>(args.GetIsolate()));
+ JavaScriptStackFrameIterator it(
+ reinterpret_cast<Isolate*>(args.GetIsolate()));
JavaScriptFrame* frame = it.frame();
return args.GetReturnValue().Set(frame->is_turbofan());
}
diff --git a/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc b/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc
index 049faff5c5..930bd4b387 100644
--- a/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc
+++ b/deps/v8/test/unittests/compiler/run-tail-calls-unittest.cc
@@ -43,8 +43,7 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- Handle<CodeT> code =
- ToCodeT(BuildCallee(isolate, callee_descriptor), isolate);
+ Handle<Code> code = BuildCallee(isolate, callee_descriptor);
params.push_back(__ HeapConstant(code));
int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
for (int i = 0; i < param_slots; ++i) {
@@ -60,12 +59,12 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
Handle<Code> BuildSetupFunction(Isolate* isolate,
CallDescriptor* caller_descriptor,
CallDescriptor* callee_descriptor) {
- CodeAssemblerTester tester(isolate, 0);
+ CodeAssemblerTester tester(isolate, JSParameterCount(0));
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- Handle<CodeT> code = ToCodeT(
- BuildCaller(isolate, caller_descriptor, callee_descriptor), isolate);
+ Handle<Code> code =
+ BuildCaller(isolate, caller_descriptor, callee_descriptor);
params.push_back(__ HeapConstant(code));
// Set up arguments for "Caller".
int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
diff --git a/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc b/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc
index 34cdb0041f..465bebc54d 100644
--- a/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc
+++ b/deps/v8/test/unittests/compiler/turboshaft/snapshot-table-unittest.cc
@@ -26,170 +26,166 @@ TEST_F(SnapshotTableTest, BasicTest) {
Key k3 = table.NewKey(3);
Key k4 = table.NewKey(4);
- base::Optional<Snapshot> s1;
- {
- SnapshotTable<int>::Scope scope(table);
- EXPECT_EQ(scope.Get(k1), 1);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 3);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k1, 10);
- scope.Set(k2, 20);
- scope.Set(k4, 4);
- EXPECT_EQ(scope.Get(k1), 10);
- EXPECT_EQ(scope.Get(k2), 20);
- EXPECT_EQ(scope.Get(k3), 3);
- EXPECT_EQ(scope.Get(k4), 4);
- s1 = scope.Seal();
- }
-
- base::Optional<Snapshot> s2;
- {
- SnapshotTable<int>::Scope scope(table);
- EXPECT_EQ(scope.Get(k1), 1);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 3);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k1, 11);
- scope.Set(k3, 33);
- EXPECT_EQ(scope.Get(k1), 11);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 33);
- EXPECT_EQ(scope.Get(k4), 4);
- s2 = scope.Seal();
- }
-
- {
- SnapshotTable<int>::Scope scope(table, *s2);
- // Assignments of the same value are ignored.
- EXPECT_EQ(scope.Get(k1), 11);
- scope.Set(k1, 11);
- // An empty scope does not produce a new snapshot.
- EXPECT_EQ(scope.Seal(), *s2);
- }
-
- base::Optional<Snapshot> s3;
- {
- SnapshotTable<int>::Scope scope(
- table, {*s1, *s2}, [&](Key key, base::Vector<const int> values) {
- if (key == k1) {
- EXPECT_EQ(values[0], 10);
- EXPECT_EQ(values[1], 11);
- } else if (key == k2) {
- EXPECT_EQ(values[0], 20);
- EXPECT_EQ(values[1], 2);
- } else if (key == k3) {
- EXPECT_EQ(values[0], 3);
- EXPECT_EQ(values[1], 33);
- } else {
- EXPECT_TRUE(false);
- }
- return values[0] + values[1];
- });
- EXPECT_EQ(scope.Get(k1), 21);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k1, 40);
- EXPECT_EQ(scope.Get(k1), 40);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- s3 = scope.Seal();
- }
-
- base::Optional<Snapshot> s4;
- {
- SnapshotTable<int>::Scope scope(table, *s2);
- EXPECT_EQ(scope.Get(k1), 11);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 33);
- EXPECT_EQ(scope.Get(k4), 4);
- scope.Set(k3, 30);
- EXPECT_EQ(scope.Get(k3), 30);
- s4 = scope.Seal();
- }
-
- base::Optional<Snapshot> s5;
- {
- SnapshotTable<int>::Scope scope(
- table, {*s4, *s2}, [&](Key key, base::Vector<const int> values) {
- if (key == k3) {
- EXPECT_EQ(values[0], 30);
- EXPECT_EQ(values[1], 33);
- } else {
- EXPECT_TRUE(false);
- }
- return values[0] + values[1];
- });
- EXPECT_EQ(scope.Get(k1), 11);
- EXPECT_EQ(scope.Get(k2), 2);
- EXPECT_EQ(scope.Get(k3), 63);
- EXPECT_EQ(scope.Get(k4), 4);
- s5 = scope.Seal();
- }
-
- base::Optional<Key> k5;
- base::Optional<Snapshot> s6;
- {
- SnapshotTable<int>::Scope scope(table, *s2);
- scope.Set(k1, 5);
- // Creating a new key while the SnapshotTable is already in use, in the
- // middle of a scope. This is the same as creating the key in the beginning.
- k5 = table.NewKey(-1);
- EXPECT_EQ(scope.Get(*k5), -1);
- scope.Set(*k5, 42);
- EXPECT_EQ(scope.Get(*k5), 42);
- EXPECT_EQ(scope.Get(k1), 5);
- s6 = scope.Seal();
- }
-
- base::Optional<Snapshot> s7;
- {
- // We're merging {s6} and {s1}, to make sure that {s1}'s behavior is correct
- // with regard to {k5}, which wasn't created yet when {s1} was sealed.
- SnapshotTable<int>::Scope scope(
- table, {*s6, *s1}, [&](Key key, base::Vector<const int> values) {
- if (key == k1) {
- EXPECT_EQ(values[1], 10);
- EXPECT_EQ(values[0], 5);
- } else if (key == k2) {
- EXPECT_EQ(values[1], 20);
- EXPECT_EQ(values[0], 2);
- } else if (key == k3) {
- EXPECT_EQ(values[1], 3);
- EXPECT_EQ(values[0], 33);
- } else if (key == *k5) {
- EXPECT_EQ(values[0], 42);
- EXPECT_EQ(values[1], -1);
- return 127;
- } else {
- EXPECT_TRUE(false);
- }
- return values[0] + values[1];
- });
- EXPECT_EQ(scope.Get(k1), 15);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- EXPECT_EQ(scope.Get(*k5), 127);
- // We're not setting anything else, but the merges should produce entries in
- // the log.
- s7 = scope.Seal();
- }
-
- base::Optional<Snapshot> s8;
- {
- SnapshotTable<int>::Scope scope(table, *s7);
- // We're checking that {s7} did indeed capture the merge entries, despite
- // that we didn't do any explicit Set.
- EXPECT_EQ(scope.Get(k1), 15);
- EXPECT_EQ(scope.Get(k2), 22);
- EXPECT_EQ(scope.Get(k3), 36);
- EXPECT_EQ(scope.Get(k4), 4);
- EXPECT_EQ(scope.Get(*k5), 127);
- s8 = scope.Seal();
- }
+ table.StartNewSnapshot();
+ EXPECT_EQ(table.Get(k1), 1);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k1, 10);
+ table.Set(k2, 20);
+ table.Set(k4, 4);
+ EXPECT_EQ(table.Get(k1), 10);
+ EXPECT_EQ(table.Get(k2), 20);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ Snapshot s1 = table.Seal();
+
+ table.StartNewSnapshot();
+ EXPECT_EQ(table.Get(k1), 1);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k1, 11);
+ table.Set(k3, 33);
+ EXPECT_EQ(table.Get(k1), 11);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 33);
+ EXPECT_EQ(table.Get(k4), 4);
+ Snapshot s2 = table.Seal();
+
+ table.StartNewSnapshot(s2);
+ // Assignments of the same value are ignored.
+ EXPECT_EQ(table.Get(k1), 11);
+ table.Set(k1, 11);
+ // Sealing an empty snapshot does not produce a new snapshot.
+ EXPECT_EQ(table.Seal(), s2);
+
+ table.StartNewSnapshot({s1, s2},
+ [&](Key key, base::Vector<const int> values) {
+ if (key == k1) {
+ EXPECT_EQ(values[0], 10);
+ EXPECT_EQ(values[1], 11);
+ } else if (key == k2) {
+ EXPECT_EQ(values[0], 20);
+ EXPECT_EQ(values[1], 2);
+ } else if (key == k3) {
+ EXPECT_EQ(values[0], 3);
+ EXPECT_EQ(values[1], 33);
+ } else {
+ EXPECT_TRUE(false);
+ }
+ return values[0] + values[1];
+ });
+ EXPECT_EQ(table.Get(k1), 21);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k1, 40);
+ EXPECT_EQ(table.Get(k1), 40);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 0), 10);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 1), 11);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 0), 20);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 1), 2);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 0), 3);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 1), 33);
+ table.Seal();
+
+ table.StartNewSnapshot({s1, s2});
+ EXPECT_EQ(table.Get(k1), 1);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 3);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Seal();
+
+ table.StartNewSnapshot(s2);
+ EXPECT_EQ(table.Get(k1), 11);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 33);
+ EXPECT_EQ(table.Get(k4), 4);
+ table.Set(k3, 30);
+ EXPECT_EQ(table.Get(k3), 30);
+ Snapshot s4 = table.Seal();
+
+ table.StartNewSnapshot({s4, s2},
+ [&](Key key, base::Vector<const int> values) {
+ if (key == k3) {
+ EXPECT_EQ(values[0], 30);
+ EXPECT_EQ(values[1], 33);
+ } else {
+ EXPECT_TRUE(false);
+ }
+ return values[0] + values[1];
+ });
+ EXPECT_EQ(table.Get(k1), 11);
+ EXPECT_EQ(table.Get(k2), 2);
+ EXPECT_EQ(table.Get(k3), 63);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 0), 30);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 1), 33);
+ table.Seal();
+
+ table.StartNewSnapshot(s2);
+ table.Set(k1, 5);
+ // Creating a new key while the SnapshotTable is already in use. This is the
+ // same as creating the key at the beginning.
+ Key k5 = table.NewKey(-1);
+ EXPECT_EQ(table.Get(k5), -1);
+ table.Set(k5, 42);
+ EXPECT_EQ(table.Get(k5), 42);
+ EXPECT_EQ(table.Get(k1), 5);
+ Snapshot s6 = table.Seal();
+
+ // We're merging {s6} and {s1}, to make sure that {s1}'s behavior is correct
+ // with regard to {k5}, which wasn't created yet when {s1} was sealed.
+ table.StartNewSnapshot({s6, s1},
+ [&](Key key, base::Vector<const int> values) {
+ if (key == k1) {
+ EXPECT_EQ(values[1], 10);
+ EXPECT_EQ(values[0], 5);
+ } else if (key == k2) {
+ EXPECT_EQ(values[1], 20);
+ EXPECT_EQ(values[0], 2);
+ } else if (key == k3) {
+ EXPECT_EQ(values[1], 3);
+ EXPECT_EQ(values[0], 33);
+ } else if (key == k5) {
+ EXPECT_EQ(values[0], 42);
+ EXPECT_EQ(values[1], -1);
+ return 127;
+ } else {
+ EXPECT_TRUE(false);
+ }
+ return values[0] + values[1];
+ });
+ EXPECT_EQ(table.Get(k1), 15);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.Get(k5), 127);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 0), 5);
+ EXPECT_EQ(table.GetPredecessorValue(k1, 1), 10);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 0), 2);
+ EXPECT_EQ(table.GetPredecessorValue(k2, 1), 20);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 0), 33);
+ EXPECT_EQ(table.GetPredecessorValue(k3, 1), 3);
+ EXPECT_EQ(table.GetPredecessorValue(k5, 0), 42);
+ EXPECT_EQ(table.GetPredecessorValue(k5, 1), -1);
+ // We're not setting anything else, but the merges should produce entries in
+ // the log.
+ Snapshot s7 = table.Seal();
+
+ table.StartNewSnapshot(s7);
+ // We're checking that {s7} did indeed capture the merge entries, despite
+ // that we didn't do any explicit Set.
+ EXPECT_EQ(table.Get(k1), 15);
+ EXPECT_EQ(table.Get(k2), 22);
+ EXPECT_EQ(table.Get(k3), 36);
+ EXPECT_EQ(table.Get(k4), 4);
+ EXPECT_EQ(table.Get(k5), 127);
+ table.Seal();
}
TEST_F(SnapshotTableTest, KeyData) {
diff --git a/deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc
new file mode 100644
index 0000000000..84ee558abb
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-typer-unittest.cc
@@ -0,0 +1,346 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/common/globals.h"
+#include "src/compiler/turboshaft/typer.h"
+#include "src/handles/handles.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+template <typename T>
+class WordTyperTest : public TestWithNativeContextAndZone {
+ public:
+ CanonicalHandleScope canonical;
+ using word_t = typename T::word_t;
+ static constexpr size_t Bits = sizeof(word_t) * kBitsPerByte;
+
+ WordTyperTest() : TestWithNativeContextAndZone(), canonical(isolate()) {}
+};
+
+template <typename T>
+class FloatTyperTest : public TestWithNativeContextAndZone {
+ public:
+ CanonicalHandleScope canonical;
+ using float_t = typename T::float_t;
+ static constexpr size_t Bits = sizeof(float_t) * kBitsPerByte;
+
+ FloatTyperTest() : TestWithNativeContextAndZone(), canonical(isolate()) {}
+};
+
+template <typename T>
+struct Slices {
+ Slices(std::initializer_list<T> slices) : slices(slices) {}
+
+ std::vector<T> slices;
+};
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, const Slices<T>& slices) {
+ os << "Slices{";
+ for (const auto& s : slices.slices) os << s << ", ";
+ return os << "}";
+}
+
+// We define operator<= here for Type so that we can use gtest's EXPECT_LE to
+// check for subtyping and have the default printing.
+inline bool operator<=(const Type& lhs, const Type& rhs) {
+ return lhs.IsSubtypeOf(rhs);
+}
+template <typename T>
+inline bool operator<=(const Slices<T>& lhs, const T& rhs) {
+ for (const auto& s : lhs.slices) {
+ if (!s.IsSubtypeOf(rhs)) return false;
+ }
+ return true;
+}
+
+using WordTypes = ::testing::Types<Word32Type, Word64Type>;
+TYPED_TEST_SUITE(WordTyperTest, WordTypes);
+
+#define DEFINE_TEST_HELPERS() \
+ using T = TypeParam; \
+ using word_t = typename TestFixture::word_t; \
+ using Slices = Slices<T>; \
+ constexpr word_t max = std::numeric_limits<word_t>::max(); \
+ auto Constant = [&](word_t value) { return T::Constant(value); }; \
+ auto Set = [&](std::initializer_list<word_t> elements) { \
+ return WordOperationTyper<TestFixture::Bits>::FromElements(elements, \
+ this->zone()); \
+ }; \
+ auto Range = [&](word_t from, word_t to) { \
+ return T::Range(from, to, this->zone()); \
+ }; \
+ USE(Slices{}, Constant, Set, Range);
+
+TYPED_TEST(WordTyperTest, Add) {
+ DEFINE_TEST_HELPERS()
+#define EXPECT_ADD(lhs, rhs, result) \
+ EXPECT_LE(result, WordOperationTyper<TestFixture::Bits>::Add(lhs, rhs, \
+ this->zone())); \
+ EXPECT_LE(result, WordOperationTyper<TestFixture::Bits>::Add(rhs, lhs, \
+ this->zone()))
+
+ // Adding any.
+ {
+ // Any + Any
+ EXPECT_ADD(T::Any(), T::Any(), T::Any());
+ // c + Any
+ EXPECT_ADD(Constant(42), T::Any(), T::Any());
+ // {x1, ..., xn} + Any
+ EXPECT_ADD(Set({8, 11, 922}), T::Any(), T::Any());
+ // [a, b] + Any
+ EXPECT_ADD(Range(800, 1020), T::Any(), T::Any());
+ }
+
+ // Adding constants.
+ {
+ // c' + c
+ EXPECT_ADD(Constant(8), Constant(10003), Constant(8 + 10003));
+ EXPECT_ADD(Constant(max), Constant(0), Constant(max));
+ EXPECT_ADD(Constant(max - 8), Constant(12), Constant(3));
+ EXPECT_ADD(Constant(max), Constant(max), Constant(max - 1));
+ // {x1, ..., xn} + c
+ auto set1 = Set({0, 87});
+ EXPECT_ADD(set1, Constant(0), set1);
+ EXPECT_ADD(set1, Constant(2005), Set({2005, 2092}));
+ EXPECT_ADD(set1, Constant(max - 4), Set({82, max - 4}));
+ EXPECT_ADD(set1, Constant(max), Set({86, max}));
+ auto set2 = Set({15, 25025, max - 99});
+ EXPECT_ADD(set2, Constant(0), set2);
+ EXPECT_ADD(set2, Constant(4), Set({19, 25029, max - 95}));
+ EXPECT_ADD(set2, Constant(max - 50), Set({24974, max - 150, max - 35}));
+ EXPECT_ADD(set2, Constant(max), Set({14, 25024, max - 100}));
+ // [a, b](non-wrapping) + c
+ auto range1 = Range(13, 288);
+ EXPECT_ADD(range1, Constant(0), range1);
+ EXPECT_ADD(range1, Constant(812), Range(825, 1100));
+ EXPECT_ADD(range1, Constant(max - 103), Range(max - 90, 184));
+ EXPECT_ADD(range1, Constant(max - 5), Range(7, 282));
+ EXPECT_ADD(range1, Constant(max), Range(12, 287));
+ // [a, b](wrapping) + c
+ auto range2 = Range(max - 100, 70);
+ EXPECT_ADD(range2, Constant(0), range2);
+ EXPECT_ADD(range2, Constant(14), Range(max - 86, 84));
+ EXPECT_ADD(range2, Constant(101), Range(0, 171));
+ EXPECT_ADD(range2, Constant(200), Range(99, 270));
+ EXPECT_ADD(range2, Constant(max), Range(max - 101, 69));
+ }
+
+ // Adding sets.
+ {
+ // {y1, ..., ym} + {x1, ..., xn}
+ auto set1 = Set({0, 87});
+ EXPECT_ADD(set1, set1, Set({0, 87, (87 + 87)}));
+ EXPECT_ADD(set1, Set({3, 4, 5}), Set({3, 4, 5, 90, 91}));
+ EXPECT_ADD(set1, Set({3, 7, 11, 114}),
+ Set({3, 7, 11, 90, 94, 98, 114, 201}));
+ EXPECT_ADD(set1, Set({0, 1, 87, 200, max}),
+ Set({0, 1, 86, 87, 88, 174, 200, 287, max}));
+ EXPECT_ADD(set1, Set({max - 86, max - 9, max}),
+ Set({0, 77, 86, max - 86, max - 9, max}));
+ // [a, b](non-wrapping) + {x1, ..., xn}
+ auto range1 = Range(400, 991);
+ EXPECT_ADD(range1, Set({0, 55}), Range(400, 1046));
+ EXPECT_ADD(range1, Set({49, 110, 100009}), Range(449, 101000));
+ EXPECT_ADD(
+ range1, Set({112, max - 10094, max - 950}),
+ Slices({Range(0, 40), Range(512, 1103), Range(max - 9694, max)}));
+ EXPECT_ADD(range1, Set({112, max - 850}),
+ Slices({Range(512, 1103), Range(max - 450, 140)}));
+ EXPECT_ADD(range1, Set({max - 3, max - 1, max}), Range(396, 990));
+ // [a,b](wrapping) + {x1, ..., xn}
+ auto range2 = Range(max - 30, 82);
+ EXPECT_ADD(range2, Set({0, 20}),
+ Slices({Range(max - 30, 82), Range(max - 10, 102)}));
+ EXPECT_ADD(range2, Set({20, 30, 32, max}),
+ Slices({Range(max - 10, 101), Range(0, 112), Range(1, 114),
+ Range(max - 31, 81)}));
+ EXPECT_ADD(range2, Set({1000, 2000}),
+ Slices({Range(969, 1082), Range(1969, 2082)}));
+ EXPECT_ADD(range2, Set({max - 8, max - 2}),
+ Slices({Range(max - 39, 73), Range(max - 33, 79)}));
+ }
+
+ // Adding ranges.
+ {
+ // [a, b](non-wrapping) + [c, d](non-wrapping)
+ auto range1 = Range(30, 990);
+ EXPECT_ADD(range1, Range(0, 2), Range(30, 992));
+ EXPECT_ADD(range1, Range(1000, 22000), Range(1030, 22990));
+ EXPECT_ADD(range1, Range(0, max - 1000), Range(30, max - 10));
+ EXPECT_ADD(range1, Range(max - 800, max - 700), Range(max - 770, 289));
+ EXPECT_ADD(range1, Range(max - 5, max), Range(24, 989));
+ // [a, b](wrapping) + [c, d](non-wrapping)
+ auto range2 = Range(max - 40, 40);
+ EXPECT_ADD(range2, Range(0, 8), Range(max - 40, 48));
+ EXPECT_ADD(range2, Range(2000, 90000), Range(1959, 90040));
+ EXPECT_ADD(range2, Range(max - 400, max - 200),
+ Range(max - 441, max - 160));
+ EXPECT_ADD(range2, Range(0, max - 82), Range(max - 40, max - 42));
+ EXPECT_ADD(range2, Range(0, max - 81), T::Any());
+ EXPECT_ADD(range2, Range(20, max - 20), T::Any());
+ // [a, b](wrapping) + [c, d](wrapping)
+ EXPECT_ADD(range2, range2, Range(max - 81, 80));
+ EXPECT_ADD(range2, Range(max - 2, 2), Range(max - 43, 42));
+ EXPECT_ADD(range2, Range(1000, 100), Range(959, 140));
+ }
+
+#undef EXPECT_ADD
+}
+
+TYPED_TEST(WordTyperTest, WidenExponential) {
+ DEFINE_TEST_HELPERS()
+
+ auto SizeOf = [&](const T& type) -> word_t {
+ DCHECK(!type.is_any());
+ if (type.is_set()) return type.set_size();
+ if (type.is_wrapping()) {
+ return type.range_to() + (max - type.range_from()) + word_t{2};
+ }
+ return type.range_to() - type.range_from() + word_t{1};
+ };
+ auto DoubledInSize = [&](const T& old_type, const T& new_type) {
+ // If the `new_type` is any, we accept it.
+ if (new_type.is_any()) return true;
+ return SizeOf(old_type) <= 2 * SizeOf(new_type);
+ };
+
+#define EXPECT_WEXP(old_type, new_type) \
+ { \
+ const T ot = old_type; \
+ const T nt = new_type; \
+ auto result = WordOperationTyper<TestFixture::Bits>::WidenExponential( \
+ ot, nt, this->zone()); \
+ EXPECT_LE(ot, result); \
+ EXPECT_LE(nt, result); \
+ EXPECT_TRUE(DoubledInSize(ot, result)); \
+ }
+
+ // c W set
+ EXPECT_WEXP(Constant(0), Set({0, 1}));
+ EXPECT_WEXP(Constant(0), Set({0, 3}));
+ EXPECT_WEXP(Constant(0), Set({0, 1, max}));
+ EXPECT_WEXP(Constant(0), Set({0, 1, 2, max - 2, max - 1, max}));
+ EXPECT_WEXP(Constant(max), Set({0, 1, 2, max - 2, max}));
+ // c W range
+ EXPECT_WEXP(Constant(0), Range(0, 100));
+ EXPECT_WEXP(Constant(100), Range(50, 100));
+ EXPECT_WEXP(Constant(100), Range(50, 150));
+ EXPECT_WEXP(Constant(0), Range(max - 10, 0));
+ EXPECT_WEXP(Constant(0), Range(max - 10, 10));
+ EXPECT_WEXP(Constant(50), Range(max - 10000, 100));
+ EXPECT_WEXP(Constant(max), T::Any());
+ // set W set
+ EXPECT_WEXP(Set({0, 1}), Set({0, 1, 2}));
+ EXPECT_WEXP(Set({0, 1}), Set({0, 1, 2, 3, 4}));
+ EXPECT_WEXP(Set({0, max}), Set({0, 1, max}));
+ EXPECT_WEXP(Set({8, max - 8}), Set({7, 8, max - 8, max - 7}));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Set({2, 3, 5, 7, 11}));
+ // set W range
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(3, 11));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(0, 11));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(3, 100));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(max, 11));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), Range(max - 100, 100));
+ EXPECT_WEXP(Set({3, 5, 7, 11}), T::Any());
+ // range W range
+ EXPECT_WEXP(Range(0, 20), Range(0, 21));
+ EXPECT_WEXP(Range(0, 20), Range(0, 220));
+ EXPECT_WEXP(Range(0, 20), Range(max, 20));
+ EXPECT_WEXP(Range(0, 20), Range(max - 200, 20));
+ EXPECT_WEXP(Range(0, 20), T::Any());
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 101, max - 80));
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 100, max - 79));
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 101, max - 79));
+ EXPECT_WEXP(Range(max - 100, max - 80), Range(max - 200, 20));
+ EXPECT_WEXP(Range(max - 100, max - 80), T::Any());
+ EXPECT_WEXP(Range(max - 20, 0), Range(max - 20, 1));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 20, 21));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 21, 20));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 21, 21));
+ EXPECT_WEXP(Range(max - 20, 20), Range(max - 2000, 2000));
+ EXPECT_WEXP(Range(max - 20, 20), T::Any());
+
+#undef EXPECT_WEXP
+}
+
+#undef DEFINE_TEST_HELPERS
+
+using FloatTypes = ::testing::Types<Float32Type, Float64Type>;
+TYPED_TEST_SUITE(FloatTyperTest, FloatTypes);
+
+#define DEFINE_TEST_HELPERS() \
+ using T = TypeParam; \
+ using float_t = typename TestFixture::float_t; \
+ using Slices = Slices<T>; \
+ auto Constant = [&](float_t value) { return T::Constant(value); }; \
+ auto Set = [&](std::initializer_list<float_t> elements, \
+ uint32_t special_values = 0) { \
+ return T::Set(elements, special_values, this->zone()); \
+ }; \
+ auto Range = [&](float_t from, float_t to, uint32_t special_values = 0) { \
+ return T::Range(from, to, special_values, this->zone()); \
+ }; \
+ constexpr uint32_t kNaN = T::kNaN; \
+ constexpr uint32_t kMZ = T::kMinusZero; \
+ constexpr float_t nan = nan_v<TestFixture::Bits>; \
+ constexpr float_t inf = std::numeric_limits<float_t>::infinity(); \
+ USE(Slices{}, Constant, Set, Range); \
+ USE(kNaN, kMZ, nan, inf);
+
+TYPED_TEST(FloatTyperTest, Divide) {
+ DEFINE_TEST_HELPERS()
+#define EXPECT_DIV(lhs, rhs, result) \
+ EXPECT_LE(result, FloatOperationTyper<TestFixture::Bits>::Divide( \
+ lhs, rhs, this->zone()))
+
+ // 0 / x
+ EXPECT_DIV(Constant(0.0), T::Any(), Set({0}, kNaN | kMZ));
+ EXPECT_DIV(T::MinusZero(), T::Any(), Set({0}, kNaN | kMZ));
+ EXPECT_DIV(Constant(0.0), Range(0.001, inf), Constant(0));
+ EXPECT_DIV(T::MinusZero(), Range(0.001, inf), T::MinusZero());
+ EXPECT_DIV(Constant(0.0), Range(-inf, -0.001), T::MinusZero());
+ EXPECT_DIV(T::MinusZero(), Range(-inf, -0.001), Constant(0));
+ EXPECT_DIV(Set({0.0}, kMZ), Constant(3), Set({0}, kMZ));
+ EXPECT_DIV(Set({0.0}), Set({-2.5, 0.0, 1.5}), Set({0.0}, kNaN | kMZ));
+ EXPECT_DIV(Set({0.0}, kMZ), Set({-2.5, 0.0, 1.5}), Set({0.0}, kNaN | kMZ));
+ EXPECT_DIV(Set({0.0}), Set({1.5}, kMZ), Set({0.0}, kNaN));
+ EXPECT_DIV(Set({0.0}, kMZ), Set({1.5}, kMZ), Set({0.0}, kNaN | kMZ));
+
+ // x / 0
+ EXPECT_DIV(Constant(1.0), Constant(0), Constant(inf));
+ EXPECT_DIV(Constant(1.0), T::MinusZero(), Constant(-inf));
+ EXPECT_DIV(Constant(inf), Constant(0), Constant(inf));
+ EXPECT_DIV(Constant(inf), T::MinusZero(), Constant(-inf));
+ EXPECT_DIV(Constant(-1.0), Constant(0), Constant(-inf));
+ EXPECT_DIV(Constant(-1.0), T::MinusZero(), Constant(inf));
+ EXPECT_DIV(Constant(-inf), Constant(0), Constant(-inf));
+ EXPECT_DIV(Constant(-inf), T::MinusZero(), Constant(inf));
+ EXPECT_DIV(Constant(1.5), Set({0.0}, kMZ), Set({-inf, inf}));
+ EXPECT_DIV(Constant(-1.5), Set({0.0}, kMZ), Set({-inf, inf}));
+ EXPECT_DIV(Set({1.5}, kMZ), Set({0.0}, kMZ), Set({-inf, inf}, kNaN));
+ EXPECT_DIV(Set({-1.5}, kMZ), Set({0.0}, kMZ), Set({-inf, inf}, kNaN));
+
+ // 0 / 0
+ EXPECT_DIV(Constant(0), Constant(0), T::NaN());
+ EXPECT_DIV(Constant(0), T::MinusZero(), T::NaN());
+ EXPECT_DIV(T::MinusZero(), Constant(0), T::NaN());
+ EXPECT_DIV(T::MinusZero(), T::MinusZero(), T::NaN());
+ EXPECT_DIV(Set({0}, kMZ), Set({1}, kMZ), Set({0}, kNaN | kMZ));
+
+ // inf / inf
+ EXPECT_DIV(Constant(inf), Constant(inf), T::NaN());
+ EXPECT_DIV(Constant(inf), Constant(-inf), T::NaN());
+ EXPECT_DIV(Constant(-inf), Constant(inf), T::NaN());
+ EXPECT_DIV(Constant(-inf), Constant(-inf), T::NaN());
+ EXPECT_DIV(Set({-inf, inf}), Constant(inf), T::NaN());
+ EXPECT_DIV(Set({-inf, inf}), Constant(-inf), T::NaN());
+ EXPECT_DIV(Set({-inf, inf}), Set({-inf, inf}), T::NaN());
+}
+
+#undef DEFINE_TEST_HELPERS
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc
new file mode 100644
index 0000000000..284a711967
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/turboshaft/turboshaft-types-unittest.cc
@@ -0,0 +1,787 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/turboshaft/types.h"
+#include "src/handles/handles.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8::internal::compiler::turboshaft {
+
+class TurboshaftTypesTest : public TestWithNativeContextAndZone {
+ public:
+ using Kind = Type::Kind;
+ CanonicalHandleScope canonical;
+
+ TurboshaftTypesTest()
+ : TestWithNativeContextAndZone(), canonical(isolate()) {}
+};
+
+TEST_F(TurboshaftTypesTest, Word32) {
+ const auto max_value = std::numeric_limits<Word32Type::word_t>::max();
+
+ // Complete range
+ {
+ Word32Type t = Word32Type::Any();
+ EXPECT_TRUE(Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({3, 9, max_value - 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(0, 10, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(800, 1200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(1, max_value - 1, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(0, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(max_value - 20, 20, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(1000, 999, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (non-wrapping)
+ {
+ Word32Type t = Word32Type::Range(100, 300, zone());
+ EXPECT_TRUE(!Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(99).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(100).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(250).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(300).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(301).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 150}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({99, 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({100, 105}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({150, 200, 250}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({150, 300}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({300, 301}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(50, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(99, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(100, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(150, 250, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(250, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(250, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(99, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(800, 9000, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(max_value - 100, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(250, 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (wrapping)
+ {
+ const auto large_value = max_value - 1000;
+ Word32Type t = Word32Type::Range(large_value, 800, zone());
+ EXPECT_TRUE(Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(801).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(5000).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(large_value - 1).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(large_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(large_value + 5).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, 800}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 801}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 600, 900}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({100, max_value - 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({large_value - 1, large_value + 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word32Type::Set({large_value, large_value + 5, max_value - 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(0, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(100, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(0, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(200, max_value - 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(large_value - 1, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word32Type::Range(large_value, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(large_value + 100, max_value - 100, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Range(large_value, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word32Type::Range(large_value + 100, 700, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(large_value - 1, 799, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word32Type::Range(large_value + 1, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(5000, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set
+ {
+ CHECK_GT(Word32Type::kMaxSetSize, 2);
+ Word32Type t = Word32Type::Set({4, 890}, zone());
+ EXPECT_TRUE(!Word32Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(3).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(4).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(5).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Constant(889).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Constant(890).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 4}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({4, 90}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word32Type::Set({4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({0, 4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({4, 890, 1000}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Set({890, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(0, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(4, 890, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(800, 900, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(800, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(890, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Range(max_value - 5, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word32Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Word64) {
+ const auto max_value = std::numeric_limits<Word64Type::word_t>::max();
+
+ // Complete range
+ {
+ Word64Type t = Word64Type::Any();
+ EXPECT_TRUE(Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({3, 9, max_value - 1}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(0, 10, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(800, 1200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(1, max_value - 1, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(0, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(max_value - 20, 20, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(1000, 999, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (non-wrapping)
+ {
+ Word64Type t = Word64Type::Range(100, 300, zone());
+ EXPECT_TRUE(!Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(99).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(100).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(250).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(300).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(301).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 150}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({99, 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({100, 105}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({150, 200, 250}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({150, 300}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({300, 301}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(50, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(99, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(100, 150, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(150, 250, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(250, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(250, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(99, 301, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(800, 9000, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(max_value - 100, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(250, 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (wrapping)
+ {
+ const auto large_value = max_value - 1000;
+ Word64Type t = Word64Type::Range(large_value, 800, zone());
+ EXPECT_TRUE(Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(800).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(801).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(5000).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(large_value - 1).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(large_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(large_value + 5).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(max_value).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, 800}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 801}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 600, 900}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({0, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({100, max_value - 100}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({large_value - 1, large_value + 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word64Type::Set({large_value, large_value + 5, max_value - 5}, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(0, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(100, 300, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(0, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(200, max_value - 200, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(large_value - 1, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word64Type::Range(large_value, max_value, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(large_value + 100, max_value - 100, zone())
+ .IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Range(large_value, 800, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Word64Type::Range(large_value + 100, 700, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(large_value - 1, 799, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Word64Type::Range(large_value + 1, 801, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(5000, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set
+ {
+ CHECK_GT(Word64Type::kMaxSetSize, 2);
+ Word64Type t = Word64Type::Set({4, 890}, zone());
+ EXPECT_TRUE(!Word64Type::Constant(0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(3).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(4).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(5).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Constant(889).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Constant(890).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 4}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({4, 90}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Word64Type::Set({4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({0, 4, 890}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({4, 890, 1000}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Set({890, max_value}, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(0, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(4, 890, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(800, 900, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(800, 100, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(890, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Range(max_value - 5, 4, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Word64Type::Any().IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float32) {
+ const auto large_value =
+ std::numeric_limits<Float32Type::float_t>::max() * 0.99f;
+ const auto inf = std::numeric_limits<Float32Type::float_t>::infinity();
+ const auto kNaN = Float32Type::kNaN;
+ const auto kMinusZero = Float32Type::kMinusZero;
+ const auto kNoSpecialValues = Float32Type::kNoSpecialValues;
+
+ // Complete range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t = Float32Type::Any(kNaN | kMinusZero);
+ EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(391.113f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({0.13f, 91.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Float32Type::Set({-100.4f, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(0.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(-inf, 12.3f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Complete range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t = Float32Type::Any(kMinusZero);
+ EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(391.113f).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({0.13f, 91.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(
+ !with_nan,
+ Float32Type::Set({-100.4f, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(0.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(-inf, 12.3f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t =
+ Float32Type::Range(-1.0f, 3.14159f, kNaN | kMinusZero, zone());
+ EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-0.99f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.15f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-0.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.1f, 1.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-0.9f, 1.88f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({0.0f, 3.142f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.3f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, 0.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(-1.0f, 1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Range(0.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(0.0f, 3.142f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float32Type t = Float32Type::Range(-1.0f, 3.14159f, kMinusZero, zone());
+ EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-0.99f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.15f).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float32Type::Set({-0.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.1f, 1.5f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({-0.9f, 1.88f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({0.0f, 3.142f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.3f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, 0.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(-1.0f, 1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Range(0.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(0.0f, 3.142f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.0f, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float32Type t = Float32Type::Set({-1.0f, 3.14159f}, kNaN, zone());
+ EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.1415f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Set({-1.0f, 3.14159f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Set({3.14159f, 3.1416f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Range(-1.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.14159f, 4.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float32Type t =
+ Float32Type::Set({-1.0f, 3.14159f}, kNoSpecialValues, zone());
+ EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(1.0f).IsSubtypeOf(t));
+ EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(3.1415f).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-inf, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float32Type::Set({-1.0f, 3.14159f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Set({3.14159f, 3.1416f}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-inf, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(-1.01f, -1.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float32Type::Range(-1.0f, 3.14159f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Range(3.14159f, 4.0f, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float32Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // -0.0f corner cases
+ {
+ EXPECT_TRUE(!Float32Type::MinusZero().IsSubtypeOf(
+ Float32Type::Set({0.0f, 1.0f}, zone())));
+ EXPECT_TRUE(
+ !Float32Type::Constant(0.0f).IsSubtypeOf(Float32Type::MinusZero()));
+ EXPECT_TRUE(
+ Float32Type::Set({3.2f}, kMinusZero, zone())
+ .IsSubtypeOf(Float32Type::Range(0.0f, 4.0f, kMinusZero, zone())));
+ EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, kMinusZero, zone())
+ .IsSubtypeOf(Float32Type::Range(-inf, 0.0f, zone())));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float64) {
+ const auto large_value =
+ std::numeric_limits<Float64Type::float_t>::max() * 0.99;
+ const auto inf = std::numeric_limits<Float64Type::float_t>::infinity();
+ const auto kNaN = Float64Type::kNaN;
+ const auto kMinusZero = Float64Type::kMinusZero;
+ const auto kNoSpecialValues = Float64Type::kNoSpecialValues;
+
+ // Complete range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t = Float64Type::Any(kNaN | kMinusZero);
+ EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(391.113).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({0.13, 91.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ Float64Type::Set({-100.4, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(0.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(-inf, 12.3, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Complete range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t = Float64Type::Any(kMinusZero);
+ EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(391.113).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({0.13, 91.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(
+ !with_nan,
+ Float64Type::Set({-100.4, large_value}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({-inf, inf}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(0.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(-inf, 12.3, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(-inf, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t =
+ Float64Type::Range(-1.0, 3.14159, kNaN | kMinusZero, zone());
+ EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-0.99).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.15).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-0.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.1, 1.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-0.9, 1.88}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({0.0, 3.142}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.3}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, 0.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(-1.0, 1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Range(0.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(0.0, 3.142, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Range (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
+ Float64Type t = Float64Type::Range(-1.0, 3.14159, kMinusZero, zone());
+ EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-0.99).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.15).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan, Float64Type::Set({-0.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.1, 1.5}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({-0.9, 1.88}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({0.0, 3.142}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.3}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, 0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, 0.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(-1.0, 1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Range(0.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(0.0, 3.142, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.0, inf, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (with NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float64Type t = Float64Type::Set({-1.0, 3.14159}, kNaN, zone());
+ EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.1415).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.0, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Set({-1.0, 3.14159}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float64Type::Set({3.14159, 3.1416}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.14159, 4.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // Set (without NaN)
+ for (bool with_nan : {false, true}) {
+ uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
+ Float64Type t = Float64Type::Set({-1.0, 3.14159}, kNoSpecialValues, zone());
+ EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(1.0).IsSubtypeOf(t));
+ EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(3.1415).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Constant(inf).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-inf, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Set({-1.0, 0.0}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_EQ(!with_nan,
+ Float64Type::Set({-1.0, 3.14159}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(
+ !Float64Type::Set({3.14159, 3.1416}, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-inf, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.01, -1.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(-1.0, 3.14159, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Range(3.14159, 4.0, sv, zone()).IsSubtypeOf(t));
+ EXPECT_TRUE(!Float64Type::Any(sv).IsSubtypeOf(t));
+ EXPECT_TRUE(t.IsSubtypeOf(t));
+ }
+
+ // -0.0 corner cases
+ {
+ EXPECT_TRUE(!Float64Type::MinusZero().IsSubtypeOf(
+ Float64Type::Set({0.0, 1.0}, zone())));
+ EXPECT_TRUE(
+ !Float64Type::Constant(0.0).IsSubtypeOf(Float64Type::MinusZero()));
+ EXPECT_TRUE(
+ Float64Type::Set({3.2}, kMinusZero, zone())
+ .IsSubtypeOf(Float64Type::Range(0.0, 4.0, kMinusZero, zone())));
+ EXPECT_TRUE(
+ Float64Type::Set({0.0}, kMinusZero, zone())
+ .IsSubtypeOf(Float64Type::Range(-inf, 0.0, kMinusZero, zone())));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Word32LeastUpperBound) {
+ auto CheckLubIs = [&](const Word32Type& lhs, const Word32Type& rhs,
+ const Word32Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Word32Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+
+ {
+ const auto lhs = Word32Type::Range(100, 400, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word32Type::Range(50, 350, zone()),
+ Word32Type::Range(50, 400, zone()));
+ CheckLubIs(lhs, Word32Type::Range(150, 600, zone()),
+ Word32Type::Range(100, 600, zone()));
+ CheckLubIs(lhs, Word32Type::Range(150, 350, zone()), lhs);
+ CheckLubIs(lhs, Word32Type::Range(350, 0, zone()),
+ Word32Type::Range(100, 0, zone()));
+ CheckLubIs(lhs, Word32Type::Range(400, 100, zone()), Word32Type::Any());
+ CheckLubIs(lhs, Word32Type::Range(600, 0, zone()),
+ Word32Type::Range(600, 400, zone()));
+ CheckLubIs(lhs, Word32Type::Range(300, 150, zone()), Word32Type::Any());
+ }
+
+ {
+ const auto lhs = Word32Type::Constant(18);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word32Type::Constant(1119),
+ Word32Type::Set({18, 1119}, zone()));
+ CheckLubIs(lhs, Word32Type::Constant(0), Word32Type::Set({0, 18}, zone()));
+ CheckLubIs(lhs, Word32Type::Range(40, 100, zone()),
+ Word32Type::Range(18, 100, zone()));
+ CheckLubIs(lhs, Word32Type::Range(4, 90, zone()),
+ Word32Type::Range(4, 90, zone()));
+ CheckLubIs(lhs, Word32Type::Set({0, 1, 2, 3}, zone()),
+ Word32Type::Set({0, 1, 2, 3, 18}, zone()));
+ CheckLubIs(
+ lhs, Word32Type::Constant(std::numeric_limits<uint32_t>::max()),
+ Word32Type::Set({18, std::numeric_limits<uint32_t>::max()}, zone()));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Word64LeastUpperBound) {
+ auto CheckLubIs = [&](const Word64Type& lhs, const Word64Type& rhs,
+ const Word64Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Word64Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+
+ {
+ const auto lhs = Word64Type::Range(100, 400, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word64Type::Range(50, 350, zone()),
+ Word64Type::Range(50, 400, zone()));
+ CheckLubIs(lhs, Word64Type::Range(150, 600, zone()),
+ Word64Type::Range(100, 600, zone()));
+ CheckLubIs(lhs, Word64Type::Range(150, 350, zone()), lhs);
+ CheckLubIs(lhs, Word64Type::Range(350, 0, zone()),
+ Word64Type::Range(100, 0, zone()));
+ CheckLubIs(lhs, Word64Type::Range(400, 100, zone()), Word64Type::Any());
+ CheckLubIs(lhs, Word64Type::Range(600, 0, zone()),
+ Word64Type::Range(600, 400, zone()));
+ CheckLubIs(lhs, Word64Type::Range(300, 150, zone()), Word64Type::Any());
+ }
+
+ {
+ const auto lhs = Word64Type::Constant(18);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Word64Type::Constant(1119),
+ Word64Type::Set({18, 1119}, zone()));
+ CheckLubIs(lhs, Word64Type::Constant(0), Word64Type::Set({0, 18}, zone()));
+ CheckLubIs(lhs, Word64Type::Range(40, 100, zone()),
+ Word64Type::Range(18, 100, zone()));
+ CheckLubIs(lhs, Word64Type::Range(4, 90, zone()),
+ Word64Type::Range(4, 90, zone()));
+ CheckLubIs(lhs, Word64Type::Range(0, 3, zone()),
+ Word64Type::Set({0, 1, 2, 3, 18}, zone()));
+ CheckLubIs(
+ lhs, Word64Type::Constant(std::numeric_limits<uint64_t>::max()),
+ Word64Type::Set({18, std::numeric_limits<uint64_t>::max()}, zone()));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float32LeastUpperBound) {
+ auto CheckLubIs = [&](const Float32Type& lhs, const Float32Type& rhs,
+ const Float32Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Float32Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+ const auto kNaN = Float32Type::kNaN;
+
+ {
+ const auto lhs = Float32Type::Range(-32.19f, 94.07f, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float32Type::Range(-32.19f, 94.07f, kNaN, zone()),
+ Float32Type::Range(-32.19f, 94.07f, kNaN, zone()));
+ CheckLubIs(lhs, Float32Type::NaN(),
+ Float32Type::Range(-32.19f, 94.07f, kNaN, zone()));
+ CheckLubIs(lhs, Float32Type::Constant(0.0f), lhs);
+ CheckLubIs(lhs, Float32Type::Range(-19.9f, 31.29f, zone()), lhs);
+ CheckLubIs(lhs, Float32Type::Range(-91.22f, -40.0f, zone()),
+ Float32Type::Range(-91.22f, 94.07f, zone()));
+ CheckLubIs(lhs, Float32Type::Range(0.0f, 1993.0f, zone()),
+ Float32Type::Range(-32.19f, 1993.0f, zone()));
+ CheckLubIs(lhs, Float32Type::Range(-100.0f, 100.0f, kNaN, zone()),
+ Float32Type::Range(-100.0f, 100.0f, kNaN, zone()));
+ }
+
+ {
+ const auto lhs = Float32Type::Constant(-0.04f);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float32Type::NaN(),
+ Float32Type::Set({-0.04f}, kNaN, zone()));
+ CheckLubIs(lhs, Float32Type::Constant(17.14f),
+ Float32Type::Set({-0.04f, 17.14f}, zone()));
+ CheckLubIs(lhs, Float32Type::Range(-75.4f, -12.7f, zone()),
+ Float32Type::Range(-75.4f, -0.04f, zone()));
+ CheckLubIs(lhs, Float32Type::Set({0.04f}, kNaN, zone()),
+ Float32Type::Set({-0.04f, 0.04f}, kNaN, zone()));
+ }
+}
+
+TEST_F(TurboshaftTypesTest, Float64LeastUpperBound) {
+ auto CheckLubIs = [&](const Float64Type& lhs, const Float64Type& rhs,
+ const Float64Type& expected) {
+ EXPECT_TRUE(
+ expected.IsSubtypeOf(Float64Type::LeastUpperBound(lhs, rhs, zone())));
+ };
+ const auto kNaN = Float64Type::kNaN;
+
+ {
+ const auto lhs = Float64Type::Range(-32.19, 94.07, zone());
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float64Type::Range(-32.19, 94.07, kNaN, zone()),
+ Float64Type::Range(-32.19, 94.07, kNaN, zone()));
+ CheckLubIs(lhs, Float64Type::NaN(),
+ Float64Type::Range(-32.19, 94.07, kNaN, zone()));
+ CheckLubIs(lhs, Float64Type::Constant(0.0), lhs);
+ CheckLubIs(lhs, Float64Type::Range(-19.9, 31.29, zone()), lhs);
+ CheckLubIs(lhs, Float64Type::Range(-91.22, -40.0, zone()),
+ Float64Type::Range(-91.22, 94.07, zone()));
+ CheckLubIs(lhs, Float64Type::Range(0.0, 1993.0, zone()),
+ Float64Type::Range(-32.19, 1993.0, zone()));
+ CheckLubIs(lhs, Float64Type::Range(-100.0, 100.0, kNaN, zone()),
+ Float64Type::Range(-100.0, 100.0, kNaN, zone()));
+ }
+
+ {
+ const auto lhs = Float64Type::Constant(-0.04);
+ CheckLubIs(lhs, lhs, lhs);
+ CheckLubIs(lhs, Float64Type::NaN(),
+ Float64Type::Set({-0.04}, kNaN, zone()));
+ CheckLubIs(lhs, Float64Type::Constant(17.14),
+ Float64Type::Set({-0.04, 17.14}, zone()));
+ CheckLubIs(lhs, Float64Type::Range(-75.4, -12.7, zone()),
+ Float64Type::Range(-75.4, -0.04, zone()));
+ CheckLubIs(lhs, Float64Type::Set({0.04}, kNaN, zone()),
+ Float64Type::Set({-0.04, 0.04}, kNaN, zone()));
+ }
+}
+
+} // namespace v8::internal::compiler::turboshaft
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index bede5d5441..1a76aa12d4 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -5,6 +5,7 @@
#include <functional>
#include "src/base/overflowing-math.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
@@ -23,6 +24,7 @@ class TyperTest : public TypedGraphTest {
TyperTest()
: TypedGraphTest(3),
broker_(isolate(), zone()),
+ current_broker_(&broker_),
operation_typer_(&broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
@@ -57,6 +59,7 @@ class TyperTest : public TypedGraphTest {
const int kRepetitions = 50;
JSHeapBroker broker_;
+ CurrentHeapBrokerScope current_broker_;
OperationTyper operation_typer_;
Types types_;
JSOperatorBuilder javascript_;
@@ -233,11 +236,10 @@ class TyperTest : public TypedGraphTest {
double x1 = RandomInt(r1.AsRange());
double x2 = RandomInt(r2.AsRange());
bool result_value = opfun(x1, x2);
- Type result_type =
- Type::Constant(&broker_,
- result_value ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value(),
- zone());
+ Type result_type = Type::Constant(
+ &broker_,
+ result_value ? broker_.true_value() : broker_.false_value(),
+ zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
}
diff --git a/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc b/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc
index 135f1cbe23..42700518b1 100644
--- a/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc
+++ b/deps/v8/test/unittests/deoptimizer/deoptimization-unittest.cc
@@ -141,7 +141,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimple) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
// Test lazy deoptimization of a simple function. Call the function after the
// deoptimization while it is still activated further down the stack.
@@ -157,7 +156,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimple) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeSimpleWithArguments) {
@@ -178,7 +176,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimpleWithArguments) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
// Test lazy deoptimization of a simple function with some arguments. Call the
// function after the deoptimization while it is still activated further down
@@ -195,7 +192,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimpleWithArguments) {
CheckJsInt32(1, "count", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeSimpleNested) {
@@ -218,7 +214,6 @@ TEST_F(DeoptimizationTest, DeoptimizeSimpleNested) {
CheckJsInt32(1, "count", context());
CheckJsInt32(6, "result", context());
CHECK(!GetJSFunction("f")->HasAttachedOptimizedCode());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
}
@@ -241,7 +236,6 @@ TEST_F(DeoptimizationTest, DeoptimizeRecursive) {
CheckJsInt32(1, "count", context());
CheckJsInt32(11, "calls", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
context()->Global()->Get(context(), NewString("f")).ToLocalChecked());
@@ -272,7 +266,6 @@ TEST_F(DeoptimizationTest, DeoptimizeMultiple) {
CheckJsInt32(1, "count", context());
CheckJsInt32(14, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeConstructor) {
@@ -296,7 +289,6 @@ TEST_F(DeoptimizationTest, DeoptimizeConstructor) {
->Get(context(), NewString("result"))
.ToLocalChecked()
->IsTrue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -313,7 +305,6 @@ TEST_F(DeoptimizationTest, DeoptimizeConstructor) {
CheckJsInt32(1, "count", context());
CheckJsInt32(3, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationTest, DeoptimizeConstructorMultiple) {
@@ -341,7 +332,6 @@ TEST_F(DeoptimizationTest, DeoptimizeConstructorMultiple) {
CheckJsInt32(1, "count", context());
CheckJsInt32(14, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
class DeoptimizationDisableConcurrentRecompilationTest
@@ -439,7 +429,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CHECK(result->IsString());
v8::String::Utf8Value utf8(isolate(), result);
CHECK_EQ(0, strcmp("a+an X", *utf8));
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -451,7 +440,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(15, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -463,7 +451,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(-1, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -476,7 +463,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(56, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -488,7 +474,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(0, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
@@ -500,7 +485,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
CheckJsInt32(1, "count", context());
CheckJsInt32(7, "result", context());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest, DeoptimizeCompare) {
@@ -550,7 +534,6 @@ TEST_F(DeoptimizationDisableConcurrentRecompilationTest, DeoptimizeCompare) {
->Get(context(), NewString("result"))
.ToLocalChecked()
->IsTrue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate()));
}
TEST_F(DeoptimizationDisableConcurrentRecompilationTest,
diff --git a/deps/v8/test/unittests/flags/flag-definitions-unittest.cc b/deps/v8/test/unittests/flags/flag-definitions-unittest.cc
index 89022cc95e..5797fd2e11 100644
--- a/deps/v8/test/unittests/flags/flag-definitions-unittest.cc
+++ b/deps/v8/test/unittests/flags/flag-definitions-unittest.cc
@@ -221,5 +221,19 @@ TEST_F(FlagDefinitionsTest, FreezeFlags) {
CHECK_EQ(42, *direct_testing_int_ptr);
}
+TEST_F(FlagDefinitionsTest, TestExperimentalImplications) {
+ // Check that experimental features are not staged behind --future/--harmony.
+ if (!v8_flags.experimental) {
+ int argc = 3;
+ const char* argv[] = {"", "--future", "--harmony"};
+ CHECK_EQ(0, FlagList::SetFlagsFromCommandLine(
+ &argc, const_cast<char**>(argv), true));
+ FlagList::EnforceFlagImplications();
+ CHECK(v8_flags.future);
+ CHECK(v8_flags.harmony);
+ CHECK(!v8_flags.experimental);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/allocation-observer-unittest.cc b/deps/v8/test/unittests/heap/allocation-observer-unittest.cc
index 6ddbe8fb17..bbeef0f41d 100644
--- a/deps/v8/test/unittests/heap/allocation-observer-unittest.cc
+++ b/deps/v8/test/unittests/heap/allocation-observer-unittest.cc
@@ -23,7 +23,7 @@ class UnusedObserver : public AllocationObserver {
TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
AllocationCounter counter;
- CHECK(!counter.IsActive());
+ CHECK_EQ(SIZE_MAX, counter.NextBytes());
UnusedObserver observer100(100);
UnusedObserver observer200(200);
@@ -41,7 +41,7 @@ TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
CHECK_EQ(counter.NextBytes(), 110);
counter.RemoveAllocationObserver(&observer200);
- CHECK(!counter.IsActive());
+ CHECK_EQ(SIZE_MAX, counter.NextBytes());
}
namespace {
@@ -77,7 +77,7 @@ class VerifyStepObserver : public AllocationObserver {
TEST(AllocationObserverTest, Step) {
AllocationCounter counter;
- CHECK(!counter.IsActive());
+ CHECK_EQ(SIZE_MAX, counter.NextBytes());
const Address kSomeObjectAddress = 8;
VerifyStepObserver observer100(100);
diff --git a/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc b/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc
index 7b10a01c49..128f2a22be 100644
--- a/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc
+++ b/deps/v8/test/unittests/heap/conservative-stack-visitor-unittest.cc
@@ -23,9 +23,9 @@ class RecordingVisitor final : public RootVisitor {
inner_address_ = base_address_ + 42 * kTaggedSize;
#ifdef V8_COMPRESS_POINTERS
compr_address_ = static_cast<uint32_t>(
- V8HeapCompressionScheme::CompressTagged(base_address_));
+ V8HeapCompressionScheme::CompressAny(base_address_));
compr_inner_ = static_cast<uint32_t>(
- V8HeapCompressionScheme::CompressTagged(inner_address_));
+ V8HeapCompressionScheme::CompressAny(inner_address_));
#else
compr_address_ = static_cast<uint32_t>(base_address_);
compr_inner_ = static_cast<uint32_t>(inner_address_);
@@ -86,7 +86,7 @@ TEST_F(ConservativeStackVisitorTest, DirectBasePointer) {
volatile Address ptr = recorder->base_address();
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(kNullAddress, ptr);
@@ -107,7 +107,7 @@ TEST_F(ConservativeStackVisitorTest, TaggedBasePointer) {
volatile Address ptr = recorder->tagged_address();
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(kNullAddress, ptr);
@@ -128,7 +128,7 @@ TEST_F(ConservativeStackVisitorTest, InnerPointer) {
volatile Address ptr = recorder->inner_address();
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(kNullAddress, ptr);
@@ -151,7 +151,7 @@ TEST_F(ConservativeStackVisitorTest, HalfWord1) {
volatile uint32_t ptr[] = {recorder->compr_address(), 0};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[0]);
@@ -172,7 +172,7 @@ TEST_F(ConservativeStackVisitorTest, HalfWord2) {
volatile uint32_t ptr[] = {0, recorder->compr_address()};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[1]);
@@ -193,7 +193,7 @@ TEST_F(ConservativeStackVisitorTest, InnerHalfWord1) {
volatile uint32_t ptr[] = {recorder->compr_inner(), 0};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[0]);
@@ -214,7 +214,7 @@ TEST_F(ConservativeStackVisitorTest, InnerHalfWord2) {
volatile uint32_t ptr[] = {0, recorder->compr_inner()};
ConservativeStackVisitor stack_visitor(isolate(), recorder.get());
- isolate()->heap()->stack().IteratePointers(&stack_visitor);
+ heap()->stack().IteratePointers(&stack_visitor);
// Make sure to keep the pointer alive.
EXPECT_NE(static_cast<uint32_t>(0), ptr[1]);
diff --git a/deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc
new file mode 100644
index 0000000000..1759daf5d7
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc-js/embedder-roots-handler-unittest.cc
@@ -0,0 +1,287 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handles/handles.h"
+#include "src/handles/traced-handles.h"
+#include "test/unittests/heap/cppgc-js/unified-heap-utils.h"
+#include "test/unittests/heap/heap-utils.h"
+
+namespace v8::internal {
+
+namespace {
+
+constexpr uint16_t kClassIdToOptimize = 23;
+
+using EmbedderRootsHandlerTest = TestWithHeapInternalsAndContext;
+
+class V8_NODISCARD TemporaryEmbedderRootsHandleScope final {
+ public:
+ TemporaryEmbedderRootsHandleScope(v8::Isolate* isolate,
+ v8::EmbedderRootsHandler* handler)
+ : isolate_(isolate) {
+ isolate_->SetEmbedderRootsHandler(handler);
+ }
+
+ ~TemporaryEmbedderRootsHandleScope() {
+ isolate_->SetEmbedderRootsHandler(nullptr);
+ }
+
+ private:
+ v8::Isolate* const isolate_;
+};
+
+// EmbedderRootsHandler that can optimize Scavenger handling when used with
+// TracedReference.
+class ClearingEmbedderRootsHandler final : public v8::EmbedderRootsHandler {
+ public:
+ explicit ClearingEmbedderRootsHandler(uint16_t class_id_to_optimize)
+ : class_id_to_optimize_(class_id_to_optimize) {}
+
+ bool IsRoot(const v8::TracedReference<v8::Value>& handle) final {
+ return handle.WrapperClassId() != class_id_to_optimize_;
+ }
+
+ void ResetRoot(const v8::TracedReference<v8::Value>& handle) final {
+ if (handle.WrapperClassId() != class_id_to_optimize_) return;
+
+ // Convention (for test): Objects that are optimized have their first field
+ // set as a back pointer.
+ BasicTracedReference<v8::Value>* original_handle =
+ reinterpret_cast<BasicTracedReference<v8::Value>*>(
+ v8::Object::GetAlignedPointerFromInternalField(
+ handle.As<v8::Object>(), 0));
+ original_handle->Reset();
+ }
+
+ private:
+ const uint16_t class_id_to_optimize_;
+};
+
+template <typename T>
+void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
+ uint16_t optimized_class_id,
+ T* optimized_handle,
+ T* non_optimized_handle) {
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> optimized_object = WrapperHelper::CreateWrapper(
+ isolate->GetCurrentContext(), optimized_handle, nullptr);
+ EXPECT_TRUE(optimized_handle->IsEmpty());
+ *optimized_handle = T(isolate, optimized_object);
+ EXPECT_FALSE(optimized_handle->IsEmpty());
+ optimized_handle->SetWrapperClassId(optimized_class_id);
+
+ v8::Local<v8::Object> non_optimized_object = WrapperHelper::CreateWrapper(
+ isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(non_optimized_handle->IsEmpty());
+ *non_optimized_handle = T(isolate, non_optimized_object);
+ EXPECT_FALSE(non_optimized_handle->IsEmpty());
+}
+
+} // namespace
+
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceNoDestructorReclaimedOnScavenge) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap());
+
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+
+ auto* traced_handles = i_isolate()->traced_handles();
+ const size_t initial_count = traced_handles->used_node_count();
+ auto* optimized_handle = new v8::TracedReference<v8::Value>();
+ auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
+ SetupOptimizedAndNonOptimizedHandle(v8_isolate(), kClassIdToOptimize,
+ optimized_handle, non_optimized_handle);
+ EXPECT_EQ(initial_count + 2, traced_handles->used_node_count());
+ YoungGC();
+ EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
+ EXPECT_TRUE(optimized_handle->IsEmpty());
+ delete optimized_handle;
+ EXPECT_FALSE(non_optimized_handle->IsEmpty());
+ non_optimized_handle->Reset();
+ delete non_optimized_handle;
+ EXPECT_EQ(initial_count, traced_handles->used_node_count());
+}
+
+namespace {
+
+void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ v8::TracedReference<v8::Object>* handle) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ EXPECT_FALSE(object.IsEmpty());
+ *handle = v8::TracedReference<v8::Object>(isolate, object);
+ EXPECT_FALSE(handle->IsEmpty());
+}
+
+template <typename T>
+void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ T* global) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ EXPECT_FALSE(object.IsEmpty());
+ *global = T(isolate, object);
+ EXPECT_FALSE(global->IsEmpty());
+}
+
+enum class SurvivalMode { kSurvives, kDies };
+
+template <typename ModifierFunction, typename ConstructTracedReferenceFunction,
+ typename GCFunction>
+void TracedReferenceTest(v8::Isolate* isolate,
+ ConstructTracedReferenceFunction construct_function,
+ ModifierFunction modifier_function,
+ GCFunction gc_function, SurvivalMode survives) {
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate->heap());
+ v8::HandleScope scope(isolate);
+ auto* traced_handles = i_isolate->traced_handles();
+ const size_t initial_count = traced_handles->used_node_count();
+ auto gc_invisible_handle =
+ std::make_unique<v8::TracedReference<v8::Object>>();
+ construct_function(isolate, isolate->GetCurrentContext(),
+ gc_invisible_handle.get());
+ ASSERT_TRUE(IsNewObjectInCorrectGeneration(isolate, *gc_invisible_handle));
+ modifier_function(*gc_invisible_handle);
+ const size_t after_modification_count = traced_handles->used_node_count();
+ gc_function();
+ // Cannot check the handle as it is not explicitly cleared by the GC. Instead
+ // check the handles count.
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
+ after_modification_count == traced_handles->used_node_count());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies,
+ initial_count == traced_handles->used_node_count());
+}
+
+} // namespace
+
+TEST_F(EmbedderRootsHandlerTest, TracedReferenceWrapperClassId) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+
+ v8::TracedReference<v8::Object> traced;
+ ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &traced);
+ EXPECT_EQ(0, traced.WrapperClassId());
+ traced.SetWrapperClassId(17);
+ EXPECT_EQ(17, traced.WrapperClassId());
+}
+
+// EmbedderRootsHandler does not affect full GCs.
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectDiesOnFullGC) {
+ // When stressing incremental marking, a write barrier may keep the object
+ // alive.
+ if (v8_flags.stress_incremental_marking) return;
+
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](const TracedReference<v8::Object>&) {}, [this]() { FullGC(); },
+ SurvivalMode::kDies);
+}
+
+// EmbedderRootsHandler does not affect full GCs.
+TEST_F(
+ EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectDiesOnFullGCEvenWhenPointeeIsHeldAlive) {
+ ManualGCScope manual_gcs(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ // The TracedReference itself will die as it's not found by the full GC. The
+ // pointee will be kept alive through other means.
+ v8::Global<v8::Object> strong_global;
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [this, &strong_global](const TracedReference<v8::Object>& handle) {
+ v8::HandleScope scope(v8_isolate());
+ strong_global =
+ v8::Global<v8::Object>(v8_isolate(), handle.Get(v8_isolate()));
+ },
+ [this, &strong_global]() {
+ FullGC();
+ strong_global.Reset();
+ },
+ SurvivalMode::kDies);
+}
+
+// EmbedderRootsHandler does not affect non-API objects.
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesYoungGC) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
+ SurvivalMode::kSurvives);
+}
+
+// EmbedderRootsHandler does not affect non-API objects, even when the handle
+// has a wrapper class id that allows for reclamation.
+TEST_F(
+ EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSObjectSurvivesYoungGCWhenExcludedFromRoots) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSObject,
+ [](TracedReference<v8::Object>& handle) {
+ handle.SetWrapperClassId(kClassIdToOptimize);
+ },
+ [this]() { YoungGC(); }, SurvivalMode::kSurvives);
+}
+
+// EmbedderRootsHandler does not affect API objects for handles that have
+// their class ids not set up.
+TEST_F(EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
+ SurvivalMode::kSurvives);
+}
+
+// EmbedderRootsHandler resets API objects for handles that have their class ids
+// set to being optimized.
+TEST_F(
+ EmbedderRootsHandlerTest,
+ TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
+ if (v8_flags.single_generation) return;
+
+ ManualGCScope manual_gc(i_isolate());
+ ClearingEmbedderRootsHandler handler(kClassIdToOptimize);
+ TemporaryEmbedderRootsHandleScope roots_handler_scope(v8_isolate(), &handler);
+ TracedReferenceTest(
+ v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
+ [this](TracedReference<v8::Object>& handle) {
+ handle.SetWrapperClassId(kClassIdToOptimize);
+ {
+ HandleScope handles(i_isolate());
+ auto local = handle.Get(v8_isolate());
+ local->SetAlignedPointerInInternalField(0, &handle);
+ }
+ },
+ [this]() { YoungGC(); }, SurvivalMode::kDies);
+}
+
+} // namespace v8::internal
diff --git a/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc
index eec4069ad1..2c069ebdfd 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/traced-reference-unittest.cc
@@ -217,11 +217,11 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnConstruction) {
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
auto ref =
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
USE(ref);
- EXPECT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -238,9 +238,10 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnHeapReset) {
auto ref = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref->Reset(v8_isolate(), local);
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -257,9 +258,10 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnStackReset) {
v8::TracedReference<v8::Object> ref;
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref.Reset(v8_isolate(), local);
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -278,10 +280,11 @@ TEST_F(TracedReferenceTest, WriteBarrierOnHeapCopy) {
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -300,10 +303,11 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnStackCopy) {
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -322,10 +326,11 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnHeapMove) {
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
@@ -344,10 +349,11 @@ TEST_F(TracedReferenceTest, WriteBarrierForOnStackMove) {
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkingState state(i_isolate());
- ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ ASSERT_TRUE(state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
- EXPECT_FALSE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
+ EXPECT_FALSE(
+ state.IsUnmarked(HeapObject::cast(*Utils::OpenHandle(*local))));
}
}
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
index 384eacdff3..3934eb8b00 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-unittest.cc
@@ -10,7 +10,6 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/api-constants.h"
#include "include/cppgc/persistent.h"
-#include "include/cppgc/platform.h"
#include "include/cppgc/testing.h"
#include "include/libplatform/libplatform.h"
#include "include/v8-context.h"
@@ -19,7 +18,6 @@
#include "include/v8-object.h"
#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
-#include "src/base/platform/time.h"
#include "src/common/globals.h"
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -28,8 +26,7 @@
#include "test/unittests/heap/cppgc-js/unified-heap-utils.h"
#include "test/unittests/heap/heap-utils.h"
-namespace v8 {
-namespace internal {
+namespace v8::internal {
namespace {
@@ -61,13 +58,11 @@ TEST_F(UnifiedHeapTest, OnlyGC) { CollectGarbageWithEmbedderStack(); }
TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
auto* wrappable_object =
cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
- v8::Local<v8::Object> api_object =
- WrapperHelper::CreateWrapper(context, &wrappable_type, wrappable_object);
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), &wrappable_type, wrappable_object);
Wrappable::destructor_callcount = 0;
EXPECT_FALSE(api_object.IsEmpty());
EXPECT_EQ(0u, Wrappable::destructor_callcount);
@@ -80,12 +75,11 @@ TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
TEST_F(UnifiedHeapTest, WriteBarrierV8ToCppReference) {
if (!v8_flags.incremental_marking) return;
+
v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
void* wrappable = cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
- v8::Local<v8::Object> api_object =
- WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr);
Wrappable::destructor_callcount = 0;
WrapperHelper::ResetWrappableConnection(api_object);
SimulateIncrementalMarking();
@@ -105,8 +99,6 @@ class Unreferenced : public cppgc::GarbageCollected<Unreferenced> {
TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
auto* unreferenced = cppgc::MakeGarbageCollected<Unreferenced>(
allocation_handle(),
cppgc::AdditionalBytes(cppgc::internal::api_constants::kMB));
@@ -134,8 +126,6 @@ TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
TEST_F(UnifiedHeapTest, TracedReferenceRetainsFromStack) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
TracedReference<v8::Object> holder;
{
v8::HandleScope inner_handle_scope(v8_isolate());
@@ -211,8 +201,7 @@ TEST_F(UnifiedHeapDetachedTest, StandaloneTestingHeap) {
heap.FinalizeGarbageCollection(cppgc::EmbedderStackState::kNoHeapPointers);
}
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal
namespace cppgc {
@@ -225,8 +214,7 @@ constexpr size_t CustomSpaceForTest::kSpaceIndex;
} // namespace cppgc
-namespace v8 {
-namespace internal {
+namespace v8::internal {
namespace {
@@ -267,8 +255,7 @@ class GCed final : public cppgc::GarbageCollected<GCed> {
};
} // namespace
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal
namespace cppgc {
template <>
@@ -278,8 +265,7 @@ struct SpaceTrait<v8::internal::GCed> {
} // namespace cppgc
-namespace v8 {
-namespace internal {
+namespace v8::internal {
namespace {
@@ -359,8 +345,6 @@ class InConstructionObjectReferringToGlobalHandle final
InConstructionObjectReferringToGlobalHandle(Heap* heap,
v8::Local<v8::Object> wrapper)
: wrapper_(reinterpret_cast<v8::Isolate*>(heap->isolate()), wrapper) {
- ScanStackModeScopeForTesting no_stack_scanning(heap,
- Heap::ScanStackMode::kNone);
heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
}
@@ -377,8 +361,6 @@ class InConstructionObjectReferringToGlobalHandle final
TEST_F(UnifiedHeapTest, InConstructionObjectReferringToGlobalHandle) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
{
v8::HandleScope inner_handle_scope(v8_isolate());
auto local = v8::Object::New(v8_isolate());
@@ -410,8 +392,6 @@ class ResetReferenceInDestructorObject final
TEST_F(UnifiedHeapTest, ResetReferenceInDestructor) {
v8::HandleScope handle_scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
{
v8::HandleScope inner_handle_scope(v8_isolate());
auto local = v8::Object::New(v8_isolate());
@@ -422,5 +402,308 @@ TEST_F(UnifiedHeapTest, ResetReferenceInDestructor) {
CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
}
-} // namespace internal
-} // namespace v8
+TEST_F(UnifiedHeapTest, OnStackReferencesAreTemporary) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::Global<v8::Object> observer;
+ {
+ v8::TracedReference<v8::Value> stack_ref;
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr);
+ stack_ref.Reset(v8_isolate(), api_object);
+ observer.Reset(v8_isolate(), api_object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<Isolate*>(v8_isolate())->heap());
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ }
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+TEST_F(UnifiedHeapTest, TracedReferenceOnStack) {
+ ManualGCScope manual_gc(i_isolate());
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_ref;
+ {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), nullptr, nullptr);
+ stack_ref.Reset(v8_isolate(), object);
+ observer.Reset(v8_isolate(), object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC();
+ EXPECT_FALSE(observer.IsEmpty());
+}
+
+namespace {
+
+enum class Operation {
+ kCopy,
+ kMove,
+};
+
+template <typename T>
+V8_NOINLINE void PerformOperation(Operation op, T* target, T* source) {
+ switch (op) {
+ case Operation::kMove:
+ *target = std::move(*source);
+ break;
+ case Operation::kCopy:
+ *target = *source;
+ source->Reset();
+ break;
+ }
+}
+
+enum class TargetHandling {
+ kNonInitialized,
+ kInitializedYoungGen,
+ kInitializedOldGen
+};
+
+class GCedWithHeapRef final : public cppgc::GarbageCollected<GCedWithHeapRef> {
+ public:
+ v8::TracedReference<v8::Value> heap_handle;
+
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(heap_handle); }
+};
+
+V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::CppHeap* cpp_heap = v8_isolate->GetCppHeap();
+ cppgc::Persistent<GCedWithHeapRef> cpp_heap_obj =
+ cppgc::MakeGarbageCollected<GCedWithHeapRef>(
+ cpp_heap->GetAllocationHandle());
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(
+ IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!v8_flags.single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ FullGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ cpp_heap_obj->heap_handle.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ stack_handle.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &cpp_heap_obj->heap_handle, &stack_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ cpp_heap_obj.Clear();
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<i::Isolate*>(v8_isolate)->heap());
+ FullGC(v8_isolate);
+ }
+ ASSERT_TRUE(observer.IsEmpty());
+}
+
+V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle;
+ v8::CppHeap* cpp_heap = v8_isolate->GetCppHeap();
+ cppgc::Persistent<GCedWithHeapRef> cpp_heap_obj =
+ cppgc::MakeGarbageCollected<GCedWithHeapRef>(
+ cpp_heap->GetAllocationHandle());
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(
+ IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!v8_flags.single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ FullGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ stack_handle.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ cpp_heap_obj->heap_handle.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &stack_handle, &cpp_heap_obj->heap_handle);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ stack_handle.Reset();
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<i::Isolate*>(v8_isolate)->heap());
+ FullGC(v8_isolate);
+ }
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate, Operation op,
+ TargetHandling target_handling) {
+ v8::Global<v8::Object> observer;
+ v8::TracedReference<v8::Value> stack_handle1;
+ v8::TracedReference<v8::Value> stack_handle2;
+ if (target_handling != TargetHandling::kNonInitialized) {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> to_object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ EXPECT_TRUE(
+ IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
+ if (!v8_flags.single_generation &&
+ target_handling == TargetHandling::kInitializedOldGen) {
+ FullGC(v8_isolate);
+ EXPECT_FALSE(
+ i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
+ }
+ stack_handle2.Reset(v8_isolate, to_object);
+ }
+ {
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Object> object = WrapperHelper::CreateWrapper(
+ v8_isolate->GetCurrentContext(), nullptr, nullptr);
+ stack_handle1.Reset(v8_isolate, object);
+ observer.Reset(v8_isolate, object);
+ observer.SetWeak();
+ }
+ EXPECT_FALSE(observer.IsEmpty());
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ PerformOperation(op, &stack_handle2, &stack_handle1);
+ FullGC(v8_isolate);
+ EXPECT_FALSE(observer.IsEmpty());
+ stack_handle2.Reset();
+ {
+ // Conservative scanning may find stale pointers to on-stack handles.
+ // Disable scanning, assuming the slots are overwritten.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ reinterpret_cast<i::Isolate*>(v8_isolate)->heap());
+ FullGC(v8_isolate);
+ }
+ EXPECT_TRUE(observer.IsEmpty());
+}
+
+} // namespace
+
+TEST_F(UnifiedHeapTest, TracedReferenceMove) {
+ ManualGCScope manual_gc(i_isolate());
+ StackToHeapTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kNonInitialized);
+ StackToHeapTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kNonInitialized);
+ HeapToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kNonInitialized);
+ StackToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(v8_isolate(), Operation::kMove,
+ TargetHandling::kInitializedOldGen);
+}
+
+TEST_F(UnifiedHeapTest, TracedReferenceCopy) {
+ ManualGCScope manual_gc(i_isolate());
+ StackToHeapTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ StackToHeapTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToHeapTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ HeapToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ HeapToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ HeapToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+ StackToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kNonInitialized);
+ StackToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedYoungGen);
+ StackToStackTest(v8_isolate(), Operation::kCopy,
+ TargetHandling::kInitializedOldGen);
+}
+
+TEST_F(UnifiedHeapTest, TracingInEphemerons) {
+ // Tests that wrappers that are part of ephemerons are traced.
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
+ Wrappable::destructor_callcount = 0;
+
+ v8::Local<v8::Object> key =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ Handle<JSWeakMap> weak_map = i_isolate()->factory()->NewJSWeakMap();
+ {
+ v8::HandleScope inner_scope(v8_isolate());
+ // C++ object that should be traced through ephemeron value.
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> value = WrapperHelper::CreateWrapper(
+ v8_isolate()->GetCurrentContext(), &wrappable_type, wrappable_object);
+ EXPECT_FALSE(value.IsEmpty());
+ Handle<JSObject> js_key =
+ handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate());
+ Handle<JSReceiver> js_value = v8::Utils::OpenHandle(*value);
+ int32_t hash = js_key->GetOrCreateHash(i_isolate()).value();
+ JSWeakCollection::Set(weak_map, js_key, js_value, hash);
+ }
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(Wrappable::destructor_callcount, 0u);
+}
+
+TEST_F(UnifiedHeapTest, TracedReferenceHandlesDoNotLeak) {
+ // TracedReference handles are not cleared by the destructor of the embedder
+ // object. To avoid leaks we need to mark these handles during GC.
+ // This test checks that unmarked handles do not leak.
+ ManualGCScope manual_gc(i_isolate());
+ v8::HandleScope scope(v8_isolate());
+ v8::TracedReference<v8::Value> ref;
+ ref.Reset(v8_isolate(), v8::Undefined(v8_isolate()));
+ auto* traced_handles = i_isolate()->traced_handles();
+ const size_t initial_count = traced_handles->used_node_count();
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ const size_t final_count = traced_handles->used_node_count();
+ EXPECT_EQ(initial_count, final_count + 1);
+}
+
+} // namespace v8::internal
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
index 6bbded7795..22f91068d3 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.cc
@@ -11,6 +11,7 @@
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/heap.h"
#include "src/objects/objects-inl.h"
+#include "test/unittests/heap/heap-utils.h"
namespace v8 {
namespace internal {
@@ -24,6 +25,9 @@ UnifiedHeapTest::UnifiedHeapTest(
V8::GetCurrentPlatform(),
CppHeapCreateParams{std::move(custom_spaces),
WrapperHelper::DefaultWrapperDescriptor()})) {
+ // --stress-incremental-marking may have started an incremental GC at this
+ // point already.
+ FinalizeGCIfRunning(isolate());
isolate()->heap()->AttachCppHeap(cpp_heap_.get());
}
@@ -49,6 +53,27 @@ void UnifiedHeapTest::CollectGarbageWithoutEmbedderStack(
}
}
+void UnifiedHeapTest::CollectYoungGarbageWithEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type) {
+ EmbedderStackStateScope stack_scope(
+ heap(), EmbedderStackStateScope::kExplicitInvocation,
+ StackState::kMayContainHeapPointers);
+ CollectGarbage(NEW_SPACE);
+ if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
+ cpp_heap().AsBase().sweeper().FinishIfRunning();
+ }
+}
+void UnifiedHeapTest::CollectYoungGarbageWithoutEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type) {
+ EmbedderStackStateScope stack_scope(
+ heap(), EmbedderStackStateScope::kExplicitInvocation,
+ StackState::kNoHeapPointers);
+ CollectGarbage(NEW_SPACE);
+ if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
+ cpp_heap().AsBase().sweeper().FinishIfRunning();
+ }
+}
+
CppHeap& UnifiedHeapTest::cpp_heap() const {
return *CppHeap::From(isolate()->heap()->cpp_heap());
}
diff --git a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h
index 21be7a07ae..7c212194d4 100644
--- a/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h
+++ b/deps/v8/test/unittests/heap/cppgc-js/unified-heap-utils.h
@@ -18,7 +18,7 @@ namespace internal {
class CppHeap;
-class UnifiedHeapTest : public TestWithHeapInternals {
+class UnifiedHeapTest : public TestWithHeapInternalsAndContext {
public:
UnifiedHeapTest();
explicit UnifiedHeapTest(
@@ -31,6 +31,13 @@ class UnifiedHeapTest : public TestWithHeapInternals {
cppgc::Heap::SweepingType sweeping_type =
cppgc::Heap::SweepingType::kAtomic);
+ void CollectYoungGarbageWithEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type =
+ cppgc::Heap::SweepingType::kAtomic);
+ void CollectYoungGarbageWithoutEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type =
+ cppgc::Heap::SweepingType::kAtomic);
+
CppHeap& cpp_heap() const;
cppgc::AllocationHandle& allocation_handle();
diff --git a/deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc
new file mode 100644
index 0000000000..d5388cbf3d
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc-js/young-unified-heap-unittest.cc
@@ -0,0 +1,401 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(CPPGC_YOUNG_GENERATION)
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/testing.h"
+#include "include/v8-context.h"
+#include "include/v8-cppgc.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-traced-handle.h"
+#include "src/api/api-inl.h"
+#include "src/common/globals.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/objects/objects-inl.h"
+#include "test/common/flag-utils.h"
+#include "test/unittests/heap/cppgc-js/unified-heap-utils.h"
+#include "test/unittests/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool IsHeapObjectYoung(void* obj) {
+ return cppgc::internal::HeapObjectHeader::FromObject(obj).IsYoung();
+}
+
+bool IsHeapObjectOld(void* obj) { return !IsHeapObjectYoung(obj); }
+
+class Wrappable final : public cppgc::GarbageCollected<Wrappable> {
+ public:
+ static size_t destructor_callcount;
+
+ Wrappable() = default;
+ Wrappable(v8::Isolate* isolate, v8::Local<v8::Object> local)
+ : wrapper_(isolate, local) {}
+
+ Wrappable(const Wrappable&) = default;
+ Wrappable(Wrappable&&) = default;
+
+ Wrappable& operator=(const Wrappable&) = default;
+ Wrappable& operator=(Wrappable&&) = default;
+
+ ~Wrappable() { destructor_callcount++; }
+
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(wrapper_); }
+
+ void SetWrapper(v8::Isolate* isolate, v8::Local<v8::Object> wrapper) {
+ wrapper_.Reset(isolate, wrapper);
+ }
+
+ TracedReference<v8::Object>& wrapper() { return wrapper_; }
+
+ private:
+ TracedReference<v8::Object> wrapper_;
+};
+
+size_t Wrappable::destructor_callcount = 0;
+
+class MinorMCEnabler {
+ public:
+ MinorMCEnabler()
+ : minor_mc_(&v8_flags.minor_mc, true),
+ cppgc_young_generation_(&v8_flags.cppgc_young_generation, true) {}
+
+ private:
+ FlagScope<bool> minor_mc_;
+ FlagScope<bool> cppgc_young_generation_;
+};
+
+class YoungWrapperCollector : public RootVisitor {
+ public:
+ using YoungWrappers = std::set<Address>;
+
+ void VisitRootPointers(Root root, const char*, FullObjectSlot start,
+ FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
+ all_young_wrappers_.insert(*p.location());
+ }
+ }
+
+ YoungWrappers get_wrappers() { return std::move(all_young_wrappers_); }
+
+ private:
+ YoungWrappers all_young_wrappers_;
+};
+
+class ExpectCppGCToV8GenerationalBarrierToFire {
+ public:
+ ExpectCppGCToV8GenerationalBarrierToFire(
+ v8::Isolate& isolate, std::initializer_list<Address> expected_wrappers)
+ : isolate_(reinterpret_cast<Isolate&>(isolate)),
+ expected_wrappers_(expected_wrappers) {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ young_wrappers_before_ = visitor.get_wrappers();
+
+ std::vector<Address> diff;
+ std::set_intersection(young_wrappers_before_.begin(),
+ young_wrappers_before_.end(),
+ expected_wrappers_.begin(), expected_wrappers_.end(),
+ std::back_inserter(diff));
+ EXPECT_TRUE(diff.empty());
+ }
+
+ ~ExpectCppGCToV8GenerationalBarrierToFire() {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ const auto young_wrappers_after = visitor.get_wrappers();
+ EXPECT_GE(young_wrappers_after.size(), young_wrappers_before_.size());
+
+ EXPECT_TRUE(
+ std::includes(young_wrappers_after.begin(), young_wrappers_after.end(),
+ expected_wrappers_.begin(), expected_wrappers_.end()));
+ EXPECT_EQ(expected_wrappers_.size(),
+ young_wrappers_after.size() - young_wrappers_before_.size());
+ }
+
+ private:
+ Isolate& isolate_;
+ YoungWrapperCollector::YoungWrappers expected_wrappers_;
+ YoungWrapperCollector::YoungWrappers young_wrappers_before_;
+};
+
+class ExpectCppGCToV8NoGenerationalBarrier {
+ public:
+ explicit ExpectCppGCToV8NoGenerationalBarrier(v8::Isolate& isolate)
+ : isolate_(reinterpret_cast<Isolate&>(isolate)) {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ young_wrappers_before_ = visitor.get_wrappers();
+ }
+
+ ~ExpectCppGCToV8NoGenerationalBarrier() {
+ YoungWrapperCollector visitor;
+ isolate_.traced_handles()->IterateYoungRootsWithOldHostsForTesting(
+ &visitor);
+ const auto young_wrappers_after = visitor.get_wrappers();
+ EXPECT_EQ(young_wrappers_before_, young_wrappers_after);
+ }
+
+ private:
+ Isolate& isolate_;
+ YoungWrapperCollector::YoungWrappers young_wrappers_before_;
+};
+
+} // namespace
+
+class YoungUnifiedHeapTest : public MinorMCEnabler, public UnifiedHeapTest {
+ public:
+ YoungUnifiedHeapTest() {
+ // Enable young generation flag and run GC. After the first run the heap
+ // will enable minor GC.
+ CollectGarbageWithoutEmbedderStack();
+ }
+};
+
+TEST_F(YoungUnifiedHeapTest, OnlyGC) { CollectYoungGarbageWithEmbedderStack(); }
+
+TEST_F(YoungUnifiedHeapTest, CollectUnreachableCppGCObject) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ EXPECT_FALSE(api_object.IsEmpty());
+
+ Wrappable::destructor_callcount = 0;
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(1u, Wrappable::destructor_callcount);
+}
+
+TEST_F(YoungUnifiedHeapTest, FindingV8ToCppGCReference) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, &wrappable_type, wrappable_object);
+ EXPECT_FALSE(api_object.IsEmpty());
+
+ Wrappable::destructor_callcount = 0;
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+
+ WrapperHelper::ResetWrappableConnection(api_object);
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(1u, Wrappable::destructor_callcount);
+}
+
+TEST_F(YoungUnifiedHeapTest, FindingCppGCToV8Reference) {
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ auto* wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+ wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+
+ CollectYoungGarbageWithEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierV8ToCppGCReference) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ auto handle_api_object =
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(api_object));
+
+ EXPECT_TRUE(Heap::InYoungGeneration(*handle_api_object));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_FALSE(Heap::InYoungGeneration(*handle_api_object));
+
+ auto* wrappable = cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ uint16_t type_info = WrapperHelper::kTracedEmbedderId;
+ WrapperHelper::SetWrappableConnection(api_object, &type_info, wrappable);
+
+ Wrappable::destructor_callcount = 0;
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+}
+
+TEST_F(YoungUnifiedHeapTest,
+ GenerationalBarrierCppGCToV8NoInitializingStoreBarrier) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ auto local = v8::Object::New(v8_isolate());
+ {
+ ExpectCppGCToV8NoGenerationalBarrier expect_no_barrier(*v8_isolate());
+ auto* wrappable = cppgc::MakeGarbageCollected<Wrappable>(
+ allocation_handle(), v8_isolate(), local);
+ auto* copied_wrappable =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle(), *wrappable);
+ auto* moved_wrappable = cppgc::MakeGarbageCollected<Wrappable>(
+ allocation_handle(), std::move(*wrappable));
+ USE(moved_wrappable);
+ USE(copied_wrappable);
+ USE(wrappable);
+ }
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierCppGCToV8ReferenceReset) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::Persistent<Wrappable> wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ EXPECT_TRUE(IsHeapObjectYoung(wrappable_object.Get()));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_TRUE(IsHeapObjectOld(wrappable_object.Get()));
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+ {
+ ExpectCppGCToV8GenerationalBarrierToFire expect_barrier(
+ *v8_isolate(), {*reinterpret_cast<Address*>(*local)});
+ wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+ }
+
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierCppGCToV8ReferenceCopy) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::Persistent<Wrappable> wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ EXPECT_TRUE(IsHeapObjectYoung(wrappable_object.Get()));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_TRUE(IsHeapObjectOld(wrappable_object.Get()));
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+
+ Wrappable* another_wrappable_object = nullptr;
+ {
+ // Assign to young host and expect no barrier.
+ ExpectCppGCToV8NoGenerationalBarrier expect_no_barrier(*v8_isolate());
+ another_wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ another_wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+ {
+ // Assign to old object using TracedReference::operator= and expect
+ // the barrier to trigger.
+ ExpectCppGCToV8GenerationalBarrierToFire expect_barrier(
+ *v8_isolate(), {*reinterpret_cast<Address*>(*local)});
+ *wrappable_object = *another_wrappable_object;
+ }
+ }
+
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+TEST_F(YoungUnifiedHeapTest, GenerationalBarrierCppGCToV8ReferenceMove) {
+ if (i::v8_flags.single_generation) return;
+
+ FlagScope<bool> no_incremental_marking(&v8_flags.incremental_marking, false);
+ v8::HandleScope handle_scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+
+ cppgc::Persistent<Wrappable> wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+
+ EXPECT_TRUE(IsHeapObjectYoung(wrappable_object.Get()));
+ CollectAllAvailableGarbage();
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_TRUE(IsHeapObjectOld(wrappable_object.Get()));
+
+ {
+ v8::HandleScope inner_handle_scope(v8_isolate());
+ auto local = v8::Object::New(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+
+ Wrappable* another_wrappable_object = nullptr;
+ {
+ // Assign to young host and expect no barrier.
+ ExpectCppGCToV8NoGenerationalBarrier expect_no_barrier(*v8_isolate());
+ another_wrappable_object =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ another_wrappable_object->SetWrapper(v8_isolate(), local);
+ }
+ {
+ // Assign to old object using TracedReference::operator= and expect
+ // the barrier to trigger.
+ ExpectCppGCToV8GenerationalBarrierToFire expect_barrier(
+ *v8_isolate(), {*reinterpret_cast<Address*>(*local)});
+ *wrappable_object = std::move(*another_wrappable_object);
+ }
+ }
+
+ CollectYoungGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ auto local = wrappable_object->wrapper().Get(v8_isolate());
+ EXPECT_TRUE(local->IsObject());
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc b/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
index 9a18c49a2c..3c878411ca 100644
--- a/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/allocation-unittest.cc
@@ -183,38 +183,60 @@ TEST_F(CppgcAllocationTest, LargeDoubleWordAlignedAllocation) {
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromUnaligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
- auto* padding_object =
- MakeGarbageCollected<CustomPadding<kWord>>(GetAllocationHandle());
// The address from which the next object can be allocated, i.e. the end of
- // |padding_object|, should not be properly aligned.
- ASSERT_EQ(kWord, (reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object)) &
- kAlignmentMask);
+ // |padding_object|, should not be double-word aligned. Allocate extra objects
+ // to ensure padding in case payload start is 16-byte aligned.
+ using PaddingObject = CustomPadding<kDoubleWord>;
+ static_assert(((sizeof(HeapObjectHeader) + sizeof(PaddingObject)) %
+ kDoubleWord) == kWord);
+
+ void* padding_object = nullptr;
+ if (NormalPage::PayloadSize() % kDoubleWord == 0) {
+ padding_object = MakeGarbageCollected<PaddingObject>(GetAllocationHandle());
+ ASSERT_EQ(kWord, (reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject)) &
+ kAlignmentMask);
+ }
+
auto* aligned_object =
MakeGarbageCollected<AlignedCustomPadding<16>>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(aligned_object) & kAlignmentMask);
- // Test only yielded a reliable result if objects are adjacent to each other.
- ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object) + sizeof(HeapObjectHeader),
- reinterpret_cast<uintptr_t>(aligned_object));
+ if (padding_object) {
+ // Test only yielded a reliable result if objects are adjacent to each
+ // other.
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject) + sizeof(HeapObjectHeader),
+ reinterpret_cast<uintptr_t>(aligned_object));
+ }
}
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromAligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
- auto* padding_object =
- MakeGarbageCollected<CustomPadding<16>>(GetAllocationHandle());
// The address from which the next object can be allocated, i.e. the end of
- // |padding_object|, should be properly aligned.
- ASSERT_EQ(0u, (reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object)) &
- kAlignmentMask);
+ // |padding_object|, should be double-word aligned. Allocate extra objects to
+ // ensure padding in case payload start is 8-byte aligned.
+ using PaddingObject = CustomPadding<kDoubleWord>;
+ static_assert(((sizeof(HeapObjectHeader) + sizeof(PaddingObject)) %
+ kDoubleWord) == kWord);
+
+ void* padding_object = nullptr;
+ if (NormalPage::PayloadSize() % kDoubleWord == kWord) {
+ padding_object = MakeGarbageCollected<PaddingObject>(GetAllocationHandle());
+ ASSERT_EQ(0u, (reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject)) &
+ kAlignmentMask);
+ }
+
auto* aligned_object =
MakeGarbageCollected<AlignedCustomPadding<16>>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(aligned_object) & kAlignmentMask);
- // Test only yielded a reliable result if objects are adjacent to each other.
- ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
- sizeof(*padding_object) + 2 * sizeof(HeapObjectHeader),
- reinterpret_cast<uintptr_t>(aligned_object));
+ if (padding_object) {
+ // Test only yielded a reliable result if objects are adjacent to each
+ // other.
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
+ sizeof(PaddingObject) + 2 * sizeof(HeapObjectHeader),
+ reinterpret_cast<uintptr_t>(aligned_object));
+ }
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
index d2ea739016..77dd67c2b3 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
@@ -56,6 +56,7 @@ class NonFinalizable : public GarbageCollected<NonFinalizable<Size>> {
private:
char array_[Size];
+ int padding_to_make_size_the_same_as_finalizible_;
};
using NormalNonFinalizable = NonFinalizable<32>;
@@ -355,5 +356,46 @@ TEST_F(ConcurrentSweeperTest, IncrementalSweeping) {
FinishSweeping();
}
+TEST_F(ConcurrentSweeperTest, SweepOnAllocationReturnEmptyPage) {
+ PreciseGC();
+
+ // First, allocate the full page of finalizable objects.
+ const size_t objects_to_allocated =
+ NormalPage::PayloadSize() /
+ (sizeof(HeapObjectHeader) + sizeof(NormalFinalizable));
+ auto* first_obj =
+ MakeGarbageCollected<NormalFinalizable>(GetAllocationHandle());
+ auto* finalizable_page =
+ NormalPage::FromInnerAddress(&HeapBase::From(GetHeapHandle()), first_obj);
+ for (size_t i = 1; i < objects_to_allocated; ++i) {
+ MakeGarbageCollected<NormalFinalizable>(GetAllocationHandle());
+ }
+
+ // Then, allocate a new unfinalizable object on a fresh page. We do that so
+ // that the sweeper on allocation doesn't allocate a new page.
+ auto* non_finalizable =
+ MakeGarbageCollected<NormalNonFinalizable>(GetAllocationHandle());
+ auto* non_finalizable_page = NormalPage::FromInnerAddress(
+ &HeapBase::From(GetHeapHandle()), non_finalizable);
+ ASSERT_NE(finalizable_page, non_finalizable_page);
+
+ // Start the GC without sweeping.
+ static constexpr GCConfig config = {
+ CollectionType::kMajor, StackState::kNoHeapPointers,
+ GCConfig::MarkingType::kAtomic,
+ GCConfig::SweepingType::kIncrementalAndConcurrent};
+ Heap::From(GetHeap())->CollectGarbage(config);
+
+ WaitForConcurrentSweeping();
+
+ // Allocate and sweep.
+ auto* allocated_after_sweeping =
+ MakeGarbageCollected<NormalFinalizable>(GetAllocationHandle());
+ // Check that the empty page of finalizable objects was returned.
+ EXPECT_EQ(finalizable_page,
+ NormalPage::FromInnerAddress(&HeapBase::From(GetHeapHandle()),
+ allocated_after_sweeping));
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc b/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
index 8f4bb9fb75..69fb2ce7a0 100644
--- a/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
@@ -50,7 +50,7 @@ TEST(LoggingTest, ConstexprContext) {
}
#endif
-#if DEBUG && !defined(OFFICIAL_BUILD)
+#if DEBUG && !defined(OFFICIAL_BUILD) && GTEST_HAS_DEATH_TEST
TEST(LoggingTest, Message) {
using ::testing::ContainsRegex;
EXPECT_DEATH_IF_SUPPORTED(CPPGC_DCHECK(5 == 7),
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
index 39909ab7bc..739105eee8 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
@@ -23,9 +23,9 @@ class MarkingVerifierTest : public testing::TestWithHeap {
V8_NOINLINE void VerifyMarking(HeapBase& heap, StackState stack_state,
size_t expected_marked_bytes) {
Heap::From(GetHeap())->object_allocator().ResetLinearAllocationBuffers();
+ Heap::From(GetHeap())->stack()->SetMarkerToCurrentStackPosition();
MarkingVerifier verifier(heap, CollectionType::kMajor);
- verifier.Run(stack_state, v8::base::Stack::GetCurrentStackPosition(),
- expected_marked_bytes);
+ verifier.Run(stack_state, expected_marked_bytes);
}
};
diff --git a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
index b6dd973c49..03cf1383fa 100644
--- a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
@@ -65,10 +65,12 @@ struct CustomWriteBarrierPolicy {
static void InitializingBarrier(const void* slot, const void* value) {
++InitializingWriteBarriersTriggered;
}
+ template <WriteBarrierSlotType>
static void AssigningBarrier(const void* slot, const void* value) {
++AssigningWriteBarriersTriggered;
}
- static void AssigningBarrier(const void* slot, MemberStorage) {
+ template <WriteBarrierSlotType>
+ static void AssigningBarrier(const void* slot, DefaultMemberStorage) {
++AssigningWriteBarriersTriggered;
}
};
diff --git a/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
index 1625a3a586..fd8fcd8d54 100644
--- a/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
@@ -6,6 +6,7 @@
#include "include/cppgc/allocation.h"
#include "src/base/macros.h"
+#include "src/base/page-allocator.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/page-memory.h"
@@ -18,164 +19,171 @@ namespace internal {
namespace {
-bool IsEmpty(const ObjectStartBitmap& bitmap) {
- size_t count = 0;
- bitmap.Iterate([&count](Address) { count++; });
- return count == 0;
-}
-
-// Abstraction for objects that hides ObjectStartBitmap::kGranularity and
-// the base address as getting either of it wrong will result in failed DCHECKs.
-class Object {
+class PageWithBitmap final {
public:
- static Address kBaseOffset;
+ PageWithBitmap()
+ : base_(allocator_.AllocatePages(
+ nullptr, kPageSize, kPageSize,
+ v8::base::PageAllocator::Permission::kReadWrite)),
+ bitmap_(new(base_) ObjectStartBitmap) {}
+
+ PageWithBitmap(const PageWithBitmap&) = delete;
+ PageWithBitmap& operator=(const PageWithBitmap&) = delete;
+
+ ~PageWithBitmap() { allocator_.FreePages(base_, kPageSize); }
+
+ ObjectStartBitmap& bitmap() const { return *bitmap_; }
+
+ void* base() const { return base_; }
+ size_t size() const { return kPageSize; }
+
+ v8::base::PageAllocator allocator_;
+ void* base_;
+ ObjectStartBitmap* bitmap_;
+};
- explicit Object(size_t number) : number_(number) {
- const size_t max_entries = ObjectStartBitmap::MaxEntries();
- EXPECT_GE(max_entries, number_);
+class ObjectStartBitmapTest : public ::testing::Test {
+ protected:
+ void AllocateObject(size_t object_position) {
+ bitmap().SetBit(ObjectAddress(object_position));
}
- Address address() const {
- return kBaseOffset + ObjectStartBitmap::Granularity() * number_;
+ void FreeObject(size_t object_position) {
+ bitmap().ClearBit(ObjectAddress(object_position));
}
- HeapObjectHeader* header() const {
- return reinterpret_cast<HeapObjectHeader*>(address());
+ bool CheckObjectAllocated(size_t object_position) {
+ return bitmap().CheckBit(ObjectAddress(object_position));
}
- // Allow implicitly converting Object to Address.
- operator Address() const { return address(); }
+ Address ObjectAddress(size_t pos) const {
+ return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(page.base()) +
+ pos * ObjectStartBitmap::Granularity());
+ }
+
+ HeapObjectHeader* ObjectHeader(size_t pos) const {
+ return reinterpret_cast<HeapObjectHeader*>(ObjectAddress(pos));
+ }
+
+ ObjectStartBitmap& bitmap() const { return page.bitmap(); }
+
+ bool IsEmpty() const {
+ size_t count = 0;
+ bitmap().Iterate([&count](Address) { count++; });
+ return count == 0;
+ }
private:
- const size_t number_;
+ PageWithBitmap page;
};
-Address Object::kBaseOffset = reinterpret_cast<Address>(0x4000);
-
} // namespace
-TEST(ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
+TEST_F(ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
const size_t max_entries = ObjectStartBitmap::MaxEntries();
EXPECT_LT(0u, max_entries);
}
-TEST(ObjectStartBitmapTest, InitialEmpty) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- EXPECT_TRUE(IsEmpty(bitmap));
-}
+TEST_F(ObjectStartBitmapTest, InitialEmpty) { EXPECT_TRUE(IsEmpty()); }
-TEST(ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- bitmap.SetBit(Object(0));
- EXPECT_FALSE(IsEmpty(bitmap));
+TEST_F(ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
+ AllocateObject(0);
+ EXPECT_FALSE(IsEmpty());
}
-TEST(ObjectStartBitmapTest, SetBitCheckBit) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(7);
- bitmap.SetBit(object);
- EXPECT_TRUE(bitmap.CheckBit(object));
+TEST_F(ObjectStartBitmapTest, SetBitCheckBit) {
+ constexpr size_t object_num = 7;
+ AllocateObject(object_num);
+ EXPECT_TRUE(CheckObjectAllocated(object_num));
}
-TEST(ObjectStartBitmapTest, SetBitClearbitCheckBit) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(77);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_FALSE(bitmap.CheckBit(object));
+TEST_F(ObjectStartBitmapTest, SetBitClearbitCheckBit) {
+ constexpr size_t object_num = 77;
+ AllocateObject(object_num);
+ FreeObject(object_num);
+ EXPECT_FALSE(CheckObjectAllocated(object_num));
}
-TEST(ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(123);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_TRUE(IsEmpty(bitmap));
+TEST_F(ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
+ constexpr size_t object_num = 123;
+ AllocateObject(object_num);
+ FreeObject(object_num);
+ EXPECT_TRUE(IsEmpty());
}
-TEST(ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object0(0);
- Object object1(1);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(Object(3)));
+TEST_F(ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
+ AllocateObject(0);
+ AllocateObject(1);
+ EXPECT_FALSE(CheckObjectAllocated(3));
size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
+ bitmap().Iterate([&count, this](Address current) {
if (count == 0) {
- EXPECT_EQ(object0.address(), current);
+ EXPECT_EQ(ObjectAddress(0), current);
} else if (count == 1) {
- EXPECT_EQ(object1.address(), current);
+ EXPECT_EQ(ObjectAddress(1), current);
}
count++;
});
EXPECT_EQ(2u, count);
}
-TEST(ObjectStartBitmapTest, AdjacentObjectsAtEnd) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- const size_t last_entry_index = ObjectStartBitmap::MaxEntries() - 1;
- Object object0(last_entry_index - 1);
- Object object1(last_entry_index);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(Object(last_entry_index - 2)));
+TEST_F(ObjectStartBitmapTest, AdjacentObjectsAtEnd) {
+ static constexpr size_t last_entry_index =
+ ObjectStartBitmap::MaxEntries() - 1;
+ AllocateObject(last_entry_index);
+ AllocateObject(last_entry_index - 1);
+ EXPECT_FALSE(CheckObjectAllocated(last_entry_index - 2));
size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
+ bitmap().Iterate([&count, this](Address current) {
if (count == 0) {
- EXPECT_EQ(object0.address(), current);
+ EXPECT_EQ(ObjectAddress(last_entry_index - 1), current);
} else if (count == 1) {
- EXPECT_EQ(object1.address(), current);
+ EXPECT_EQ(ObjectAddress(last_entry_index), current);
}
count++;
});
EXPECT_EQ(2u, count);
}
-TEST(ObjectStartBitmapTest, FindHeaderExact) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.header(), bitmap.FindHeader(object.address()));
+TEST_F(ObjectStartBitmapTest, FindHeaderExact) {
+ constexpr size_t object_num = 654;
+ AllocateObject(object_num);
+ EXPECT_EQ(ObjectHeader(object_num),
+ bitmap().FindHeader(ObjectAddress(object_num)));
}
-TEST(ObjectStartBitmapTest, FindHeaderApproximate) {
+TEST_F(ObjectStartBitmapTest, FindHeaderApproximate) {
static const size_t kInternalDelta = 37;
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.header(),
- bitmap.FindHeader(object.address() + kInternalDelta));
+ constexpr size_t object_num = 654;
+ AllocateObject(object_num);
+ EXPECT_EQ(ObjectHeader(object_num),
+ bitmap().FindHeader(ObjectAddress(object_num) + kInternalDelta));
}
-TEST(ObjectStartBitmapTest, FindHeaderIteratingWholeBitmap) {
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object_to_find(Object(0));
- Address hint_index = Object(ObjectStartBitmap::MaxEntries() - 1);
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.header(), bitmap.FindHeader(hint_index));
+TEST_F(ObjectStartBitmapTest, FindHeaderIteratingWholeBitmap) {
+ AllocateObject(0);
+ Address hint_index = ObjectAddress(ObjectStartBitmap::MaxEntries() - 1);
+ EXPECT_EQ(ObjectHeader(0), bitmap().FindHeader(hint_index));
}
-TEST(ObjectStartBitmapTest, FindHeaderNextCell) {
+TEST_F(ObjectStartBitmapTest, FindHeaderNextCell) {
// This white box test makes use of the fact that cells are of type uint8_t.
const size_t kCellSize = sizeof(uint8_t);
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object_to_find(Object(kCellSize - 1));
- Address hint = Object(kCellSize);
- bitmap.SetBit(Object(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.header(), bitmap.FindHeader(hint));
+ AllocateObject(0);
+ AllocateObject(kCellSize - 1);
+ Address hint = ObjectAddress(kCellSize);
+ EXPECT_EQ(ObjectHeader(kCellSize - 1), bitmap().FindHeader(hint));
}
-TEST(ObjectStartBitmapTest, FindHeaderSameCell) {
+TEST_F(ObjectStartBitmapTest, FindHeaderSameCell) {
// This white box test makes use of the fact that cells are of type uint8_t.
const size_t kCellSize = sizeof(uint8_t);
- ObjectStartBitmap bitmap(Object::kBaseOffset);
- Object object_to_find(Object(kCellSize - 1));
- bitmap.SetBit(Object(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.header(),
- bitmap.FindHeader(object_to_find.address()));
+ AllocateObject(0);
+ AllocateObject(kCellSize - 1);
+ Address hint = ObjectAddress(kCellSize);
+ EXPECT_EQ(ObjectHeader(kCellSize - 1), bitmap().FindHeader(hint));
+ EXPECT_EQ(ObjectHeader(kCellSize - 1),
+ bitmap().FindHeader(ObjectAddress(kCellSize - 1)));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/test-platform.cc b/deps/v8/test/unittests/heap/cppgc/test-platform.cc
index 5b2139a101..bcf6ada50e 100644
--- a/deps/v8/test/unittests/heap/cppgc/test-platform.cc
+++ b/deps/v8/test/unittests/heap/cppgc/test-platform.cc
@@ -24,7 +24,8 @@ std::unique_ptr<cppgc::JobHandle> TestPlatform::PostJob(
}
void TestPlatform::RunAllForegroundTasks() {
- v8::platform::PumpMessageLoop(v8_platform_.get(), kNoIsolate);
+ while (v8::platform::PumpMessageLoop(v8_platform_.get(), kNoIsolate)) {
+ }
if (GetForegroundTaskRunner()->IdleTasksEnabled()) {
v8::platform::RunIdleTasks(v8_platform_.get(), kNoIsolate,
std::numeric_limits<double>::max());
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.cc b/deps/v8/test/unittests/heap/cppgc/tests.cc
index b144908dda..77a48e9cfe 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/tests.cc
@@ -11,6 +11,7 @@
#if !CPPGC_IS_STANDALONE
#include "include/v8-initialization.h"
+#include "src/init/v8.h"
#endif // !CPPGC_IS_STANDALONE
namespace cppgc {
@@ -28,7 +29,7 @@ void TestWithPlatform::SetUpTestSuite() {
#if !CPPGC_IS_STANDALONE
// For non-standalone builds, we need to initialize V8's platform so that it
// can be looked-up by trace-event.h.
- v8::V8::InitializePlatform(platform_->GetV8Platform());
+ i::V8::InitializePlatformForTesting(platform_->GetV8Platform());
v8::V8::Initialize();
#endif // !CPPGC_IS_STANDALONE
}
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index 5a9536b048..52ca765d04 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -122,6 +122,7 @@ class TestSupportingAllocationOnly : public TestWithHeap {
TestSupportingAllocationOnly();
private:
+ CPPGC_STACK_ALLOCATED_IGNORE("permitted for test code")
subtle::NoGarbageCollectionScope no_gc_scope_;
};
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
deleted file mode 100644
index 39b9712bc6..0000000000
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ /dev/null
@@ -1,1212 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/embedder-tracing.h"
-
-#include "include/v8-function.h"
-#include "include/v8-template.h"
-#include "src/handles/global-handles.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/heap.h"
-#include "test/unittests/heap/heap-utils.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-using LocalEmbedderHeapTracerWithIsolate = TestWithHeapInternals;
-
-namespace heap {
-
-using testing::StrictMock;
-using testing::_;
-using testing::Return;
-using v8::EmbedderHeapTracer;
-using v8::internal::LocalEmbedderHeapTracer;
-
-namespace {
-
-LocalEmbedderHeapTracer::WrapperInfo CreateWrapperInfo() {
- return LocalEmbedderHeapTracer::WrapperInfo(nullptr, nullptr);
-}
-
-} // namespace
-
-START_ALLOW_USE_DEPRECATED()
-class MockEmbedderHeapTracer : public EmbedderHeapTracer {
- public:
- MOCK_METHOD(void, TracePrologue, (EmbedderHeapTracer::TraceFlags),
- (override));
- MOCK_METHOD(void, TraceEpilogue, (EmbedderHeapTracer::TraceSummary*),
- (override));
- MOCK_METHOD(void, EnterFinalPause, (EmbedderHeapTracer::EmbedderStackState),
- (override));
- MOCK_METHOD(bool, IsTracingDone, (), (override));
- MOCK_METHOD(void, RegisterV8References,
- ((const std::vector<std::pair<void*, void*> >&)), (override));
- MOCK_METHOD(bool, AdvanceTracing, (double deadline_in_ms), (override));
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-TEST(LocalEmbedderHeapTracer, InUse) {
- MockEmbedderHeapTracer mock_remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&mock_remote_tracer);
- EXPECT_TRUE(local_tracer.InUse());
-}
-
-TEST(LocalEmbedderHeapTracer, NoRemoteTracer) {
- LocalEmbedderHeapTracer local_tracer(nullptr);
- // We should be able to call all functions without a remote tracer being
- // attached.
- EXPECT_FALSE(local_tracer.InUse());
- local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags);
- local_tracer.EnterFinalPause();
- bool done = local_tracer.Trace(std::numeric_limits<double>::infinity());
- EXPECT_TRUE(done);
- local_tracer.TraceEpilogue();
-}
-
-TEST(LocalEmbedderHeapTracer, TracePrologueForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, TracePrologue(_));
- local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags);
-}
-
-TEST(LocalEmbedderHeapTracer, TracePrologueForwardsMemoryReducingFlag) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer,
- TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory));
- local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory);
-}
-
-TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, TraceEpilogue(_));
- local_tracer.TraceEpilogue();
-}
-
-TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, EnterFinalPause(_));
- local_tracer.EnterFinalPause();
-}
-
-TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneForwards) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, IsTracingDone());
- local_tracer.IsRemoteTracingDone();
-}
-
-TEST(LocalEmbedderHeapTracer, EnterFinalPauseDefaultStackStateUnkown) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- // The default stack state is expected to be unkown.
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
- local_tracer.EnterFinalPause();
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate,
- EnterFinalPauseStackStateIsForwarded) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, TemporaryEmbedderStackState) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- // Default is unknown, see above.
- {
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- EXPECT_CALL(remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
- }
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate,
- TemporaryEmbedderStackStateRestores) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- // Default is unknown, see above.
- {
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- {
- EmbedderStackStateScope nested_scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
- local_tracer.EnterFinalPause();
- }
- EXPECT_CALL(remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
- }
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, TraceEpilogueStackStateResets) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- &local_tracer,
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
- local_tracer.EnterFinalPause();
- EXPECT_CALL(remote_tracer, TraceEpilogue(_));
- local_tracer.TraceEpilogue();
- EXPECT_CALL(
- remote_tracer,
- EnterFinalPause(
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
- local_tracer.EnterFinalPause();
-}
-
-TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneIncludesRemote) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, IsTracingDone());
- local_tracer.IsRemoteTracingDone();
-}
-
-TEST(LocalEmbedderHeapTracer, RegisterV8ReferencesWithRemoteTracer) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- {
- LocalEmbedderHeapTracer::ProcessingScope scope(&local_tracer);
- scope.AddWrapperInfoForTesting(CreateWrapperInfo());
- EXPECT_CALL(remote_tracer, RegisterV8References(_));
- }
- EXPECT_CALL(remote_tracer, IsTracingDone()).WillOnce(Return(false));
- EXPECT_FALSE(local_tracer.IsRemoteTracingDone());
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, SetRemoteTracerSetsIsolate) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_EQ(isolate(), reinterpret_cast<Isolate*>(remote_tracer.isolate()));
-}
-
-TEST_F(LocalEmbedderHeapTracerWithIsolate, DestructorClearsIsolate) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- {
- LocalEmbedderHeapTracer local_tracer(isolate());
- local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_EQ(isolate(), reinterpret_cast<Isolate*>(remote_tracer.isolate()));
- }
- EXPECT_EQ(nullptr, remote_tracer.isolate());
-}
-
-namespace {
-
-v8::Local<v8::Object> ConstructTraceableJSApiObject(
- v8::Local<v8::Context> context, void* first_field, void* second_field) {
- v8::EscapableHandleScope scope(context->GetIsolate());
- v8::Local<v8::FunctionTemplate> function_t =
- v8::FunctionTemplate::New(context->GetIsolate());
- v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
- instance_t->SetInternalFieldCount(2);
- v8::Local<v8::Function> function =
- function_t->GetFunction(context).ToLocalChecked();
- v8::Local<v8::Object> instance =
- function->NewInstance(context).ToLocalChecked();
- instance->SetAlignedPointerInInternalField(0, first_field);
- instance->SetAlignedPointerInInternalField(1, second_field);
- EXPECT_FALSE(instance.IsEmpty());
- i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
- EXPECT_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
- return scope.Escape(instance);
-}
-
-enum class TracePrologueBehavior { kNoop, kCallV8WriteBarrier };
-
-START_ALLOW_USE_DEPRECATED()
-
-class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
- public:
- TestEmbedderHeapTracer() = default;
- TestEmbedderHeapTracer(TracePrologueBehavior prologue_behavior,
- v8::Global<v8::Array> array)
- : prologue_behavior_(prologue_behavior), array_(std::move(array)) {}
-
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {
- registered_from_v8_.insert(registered_from_v8_.end(),
- embedder_fields.begin(), embedder_fields.end());
- }
-
- void AddReferenceForTracing(v8::TracedReference<v8::Value>* ref) {
- to_register_with_v8_references_.push_back(ref);
- }
-
- bool AdvanceTracing(double deadline_in_ms) final {
- for (auto ref : to_register_with_v8_references_) {
- RegisterEmbedderReference(ref->As<v8::Data>());
- }
- to_register_with_v8_references_.clear();
- return true;
- }
-
- bool IsTracingDone() final { return to_register_with_v8_references_.empty(); }
-
- void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
- if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
- auto local = array_.Get(isolate());
- local
- ->Set(local->GetCreationContext().ToLocalChecked(), 0,
- v8::Object::New(isolate()))
- .Check();
- }
- }
-
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-
- bool IsRegisteredFromV8(void* first_field) const {
- for (auto pair : registered_from_v8_) {
- if (pair.first == first_field) return true;
- }
- return false;
- }
-
- void DoNotConsiderAsRootForScavenge(v8::TracedReference<v8::Value>* handle) {
- handle->SetWrapperClassId(17);
- non_root_handles_.push_back(handle);
- }
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- return handle.WrapperClassId() != 17;
- }
-
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- for (auto* non_root_handle : non_root_handles_) {
- if (*non_root_handle == handle) {
- non_root_handle->Reset();
- }
- }
- }
-
- private:
- std::vector<std::pair<void*, void*>> registered_from_v8_;
- std::vector<v8::TracedReference<v8::Value>*> to_register_with_v8_references_;
- TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop;
- v8::Global<v8::Array> array_;
- std::vector<v8::TracedReference<v8::Value>*> non_root_handles_;
-};
-
-class V8_NODISCARD TemporaryEmbedderHeapTracerScope final {
- public:
- TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
- v8::EmbedderHeapTracer* tracer)
- : isolate_(isolate) {
- isolate_->SetEmbedderHeapTracer(tracer);
- }
-
- ~TemporaryEmbedderHeapTracerScope() {
- isolate_->SetEmbedderHeapTracer(nullptr);
- }
-
- private:
- v8::Isolate* const isolate_;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-} // namespace
-
-using EmbedderTracingTest = TestWithHeapInternalsAndContext;
-
-TEST_F(EmbedderTracingTest, V8RegisterEmbedderReference) {
- // Tests that wrappers are properly registered with the embedder heap
- // tracer.
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
-
- void* first_and_second_field = reinterpret_cast<void*>(0x2);
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- ASSERT_FALSE(api_object.IsEmpty());
- CollectGarbage(i::OLD_SPACE);
- EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST_F(EmbedderTracingTest, EmbedderRegisteringV8Reference) {
- // Tests that references that are registered by the embedder heap tracer are
- // considered live by V8.
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
-
- auto handle = std::make_unique<v8::TracedReference<v8::Value>>();
- {
- v8::HandleScope inner_scope(v8_isolate());
- v8::Local<v8::Value> o =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- handle->Reset(v8_isolate(), o);
- }
- tracer.AddReferenceForTracing(handle.get());
- CollectGarbage(i::OLD_SPACE);
- EXPECT_FALSE(handle->IsEmpty());
-}
-
-TEST_F(EmbedderTracingTest, TracingInEphemerons) {
- // Tests that wrappers that are part of ephemerons are traced.
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- v8::HandleScope scope(v8_isolate());
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
-
- v8::Local<v8::Object> key =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- void* first_and_second_field = reinterpret_cast<void*>(0x8);
- Handle<JSWeakMap> weak_map = i_isolate()->factory()->NewJSWeakMap();
- {
- v8::HandleScope inner_scope(v8_isolate());
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
- context, first_and_second_field, first_and_second_field);
- EXPECT_FALSE(api_object.IsEmpty());
- Handle<JSObject> js_key =
- handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate());
- Handle<JSReceiver> js_api_object = v8::Utils::OpenHandle(*api_object);
- int32_t hash = js_key->GetOrCreateHash(i_isolate()).value();
- JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
- }
- CollectGarbage(i::OLD_SPACE);
- EXPECT_TRUE(tracer.IsRegisteredFromV8(first_and_second_field));
-}
-
-TEST_F(EmbedderTracingTest, FinalizeTracingIsNoopWhenNotMarking) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- // Finalize a potentially running garbage collection.
- CollectGarbage(OLD_SPACE);
- EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped());
-
- int gc_counter = i_isolate()->heap()->gc_count();
- tracer.FinalizeTracing();
- EXPECT_TRUE(i_isolate()->heap()->incremental_marking()->IsStopped());
- EXPECT_EQ(gc_counter, i_isolate()->heap()->gc_count());
-}
-
-TEST_F(EmbedderTracingTest, FinalizeTracingWhenMarking) {
- if (!v8_flags.incremental_marking) return;
- ManualGCScope manual_gc(i_isolate());
- Heap* heap = i_isolate()->heap();
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- // Finalize a potentially running garbage collection.
- CollectGarbage(OLD_SPACE);
- if (heap->sweeping_in_progress()) {
- heap->EnsureSweepingCompleted(
- Heap::SweepingForcedFinalizationMode::kV8Only);
- }
- heap->tracer()->StopFullCycleIfNeeded();
- EXPECT_TRUE(heap->incremental_marking()->IsStopped());
-
- i::IncrementalMarking* marking = heap->incremental_marking();
- {
- IsolateSafepointScope scope(heap);
- heap->tracer()->StartCycle(
- GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector cctest", GCTracer::MarkingType::kIncremental);
- marking->Start(GarbageCollector::MARK_COMPACTOR,
- GarbageCollectionReason::kTesting);
- }
-
- // Sweeping is not runing so we should immediately start marking.
- EXPECT_TRUE(marking->IsMarking());
- tracer.FinalizeTracing();
- EXPECT_TRUE(marking->IsStopped());
-}
-
-namespace {
-
-void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- v8::TracedReference<v8::Object>* handle) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(v8::Object::New(isolate));
- EXPECT_FALSE(object.IsEmpty());
- *handle = v8::TracedReference<v8::Object>(isolate, object);
- EXPECT_FALSE(handle->IsEmpty());
-}
-
-template <typename T>
-void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- T* global) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(
- ConstructTraceableJSApiObject(context, nullptr, nullptr));
- EXPECT_FALSE(object.IsEmpty());
- *global = T(isolate, object);
- EXPECT_FALSE(global->IsEmpty());
-}
-
-enum class SurvivalMode { kSurvives, kDies };
-
-template <typename ModifierFunction, typename ConstructTracedReferenceFunction,
- typename GCFunction>
-void TracedReferenceTest(v8::Isolate* isolate,
- ConstructTracedReferenceFunction construct_function,
- ModifierFunction modifier_function,
- GCFunction gc_function, SurvivalMode survives) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- auto* global_handles =
- reinterpret_cast<i::Isolate*>(isolate)->global_handles();
-
- const size_t initial_count = global_handles->handles_count();
- auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
- construct_function(isolate, context, handle.get());
- ASSERT_TRUE(IsNewObjectInCorrectGeneration(isolate, *handle));
- modifier_function(*handle);
- const size_t after_modification_count = global_handles->handles_count();
- gc_function();
- // Cannot check the handle as it is not explicitly cleared by the GC. Instead
- // check the handles count.
- CHECK_IMPLIES(survives == SurvivalMode::kSurvives,
- after_modification_count == global_handles->handles_count());
- CHECK_IMPLIES(survives == SurvivalMode::kDies,
- initial_count == global_handles->handles_count());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceReset) {
- v8::HandleScope scope(v8_isolate());
- v8::TracedReference<v8::Object> handle;
- ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &handle);
- EXPECT_FALSE(handle.IsEmpty());
- handle.Reset();
- EXPECT_TRUE(handle.IsEmpty());
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceCopyReferences) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope outer_scope(v8_isolate());
- auto* traced_handles = i_isolate()->traced_handles();
-
- const size_t initial_count = traced_handles->used_node_count();
- auto handle1 = std::make_unique<v8::TracedReference<v8::Value>>();
- {
- v8::HandleScope scope(v8_isolate());
- handle1->Reset(v8_isolate(), v8::Object::New(v8_isolate()));
- }
- auto handle2 = std::make_unique<v8::TracedReference<v8::Value>>(*handle1);
- auto handle3 = std::make_unique<v8::TracedReference<v8::Value>>();
- *handle3 = *handle2;
- EXPECT_EQ(initial_count + 3, traced_handles->used_node_count());
- EXPECT_FALSE(handle1->IsEmpty());
- EXPECT_EQ(*handle1, *handle2);
- EXPECT_EQ(*handle2, *handle3);
- {
- v8::HandleScope scope(v8_isolate());
- auto tmp = v8::Local<v8::Value>::New(v8_isolate(), *handle3);
- EXPECT_FALSE(tmp.IsEmpty());
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope stack_scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate())
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC();
- }
- EXPECT_EQ(initial_count, traced_handles->used_node_count());
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceToUnmodifiedJSObjectDiesOnFullGC) {
- // When stressing incremental marking, a write barrier may keep the object
- // alive.
- if (v8_flags.stress_incremental_marking) return;
-
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [](const TracedReference<v8::Object>&) {}, [this]() { FullGC(); },
- SurvivalMode::kDies);
-}
-
-TEST_F(
- EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSObjectDiesOnFullGCEvenWhenPointeeIsHeldAlive) {
- ManualGCScope manual_gcs(i_isolate());
- // The TracedReference itself will die as it's not found by the full GC. The
- // pointee will be kept alive through other means.
- v8::Global<v8::Object> strong_global;
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [this, &strong_global](const TracedReference<v8::Object>& handle) {
- v8::HandleScope scope(v8_isolate());
- strong_global =
- v8::Global<v8::Object>(v8_isolate(), handle.Get(v8_isolate()));
- },
- [this, &strong_global]() {
- FullGC();
- strong_global.Reset();
- },
- SurvivalMode::kDies);
-}
-
-TEST_F(EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSObjectSurvivesYoungGC) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
- SurvivalMode::kSurvives);
-}
-
-TEST_F(
- EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSObjectSurvivesYoungGCWhenExcludedFromRoots) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- TracedReferenceTest(
- v8_isolate(), ConstructJSObject,
- [&tracer](const TracedReference<v8::Object>& handle) {
- tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
- },
- [this]() { YoungGC(); }, SurvivalMode::kSurvives);
-}
-
-TEST_F(EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- TracedReferenceTest(
- v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
- [](const TracedReference<v8::Object>&) {}, [this]() { YoungGC(); },
- SurvivalMode::kSurvives);
-}
-
-TEST_F(
- EmbedderTracingTest,
- TracedReferenceToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- TracedReferenceTest(
- v8_isolate(), ConstructJSApiObject<TracedReference<v8::Object>>,
- [&tracer](const TracedReference<v8::Object>& handle) {
- tracer.DoNotConsiderAsRootForScavenge(&handle.As<v8::Value>());
- },
- [this]() { YoungGC(); }, SurvivalMode::kDies);
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceWrapperClassId) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- v8::TracedReference<v8::Object> traced;
- ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(), &traced);
- EXPECT_EQ(0, traced.WrapperClassId());
- traced.SetWrapperClassId(17);
- EXPECT_EQ(17, traced.WrapperClassId());
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceHandlesMarking) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- auto live = std::make_unique<v8::TracedReference<v8::Value>>();
- auto dead = std::make_unique<v8::TracedReference<v8::Value>>();
- live->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
- dead->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
- auto* traced_handles = i_isolate()->traced_handles();
- {
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.AddReferenceForTracing(live.get());
- const size_t initial_count = traced_handles->used_node_count();
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate())
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC();
- }
- const size_t final_count = traced_handles->used_node_count();
- // Handles are not black allocated, so `dead` is immediately reclaimed.
- EXPECT_EQ(initial_count, final_count + 1);
- }
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceHandlesDoNotLeak) {
- // TracedReference handles are not cleared by the destructor of the embedder
- // object. To avoid leaks we need to mark these handles during GC.
- // This test checks that unmarked handles do not leak.
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- auto ref = std::make_unique<v8::TracedReference<v8::Value>>();
- ref->Reset(v8_isolate(), v8::Undefined(v8_isolate()));
- auto* traced_handles = i_isolate()->traced_handles();
- const size_t initial_count = traced_handles->used_node_count();
- // We need two GCs because handles are black allocated.
- FullGC();
- FullGC();
- const size_t final_count = traced_handles->used_node_count();
- EXPECT_EQ(initial_count, final_count + 1);
-}
-
-namespace {
-
-START_ALLOW_USE_DEPRECATED()
-
-class TracedReferenceVisitor final
- : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
- public:
- ~TracedReferenceVisitor() override = default;
-
- void VisitTracedReference(const TracedReference<Value>& value) final {
- if (value.WrapperClassId() == 57) {
- count_++;
- }
- }
-
- size_t count() const { return count_; }
-
- private:
- size_t count_ = 0;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceIteration) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
-
- auto handle = std::make_unique<v8::TracedReference<v8::Object>>();
- ConstructJSObject(v8_isolate(), v8_isolate()->GetCurrentContext(),
- handle.get());
- EXPECT_FALSE(handle->IsEmpty());
- handle->SetWrapperClassId(57);
- TracedReferenceVisitor visitor;
- {
- v8::HandleScope new_scope(v8_isolate());
- tracer.IterateTracedGlobalHandles(&visitor);
- }
- EXPECT_EQ(1u, visitor.count());
-}
-
-TEST_F(EmbedderTracingTest, TracePrologueCallingIntoV8WriteBarrier) {
- // Regression test: https://crbug.com/940003
- if (!v8_flags.incremental_marking) return;
- ManualGCScope manual_gc(isolate());
- v8::HandleScope scope(v8_isolate());
- v8::Global<v8::Array> global;
- {
- v8::HandleScope new_scope(v8_isolate());
- auto local = v8::Array::New(v8_isolate(), 10);
- global.Reset(v8_isolate(), local);
- }
- TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier,
- std::move(global));
- TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- SimulateIncrementalMarking();
- // Finish GC to avoid removing the tracer while GC is running which may end up
- // in an infinite loop because of unprocessed objects.
- FullGC();
-}
-
-TEST_F(EmbedderTracingTest, BasicTracedReference) {
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- auto* traced_handles = i_isolate()->traced_handles();
-
- const size_t initial_count = traced_handles->used_node_count();
- char* memory = new char[sizeof(v8::TracedReference<v8::Value>)];
- auto* traced = new (memory) v8::TracedReference<v8::Value>();
- {
- v8::HandleScope new_scope(v8_isolate());
- v8::Local<v8::Value> object(ConstructTraceableJSApiObject(
- v8_isolate()->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(traced->IsEmpty());
- *traced = v8::TracedReference<v8::Value>(v8_isolate(), object);
- EXPECT_FALSE(traced->IsEmpty());
- EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
- }
- traced->~TracedReference<v8::Value>();
- EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate())
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC();
- }
- EXPECT_EQ(initial_count, traced_handles->used_node_count());
- delete[] memory;
-}
-
-namespace {
-
-START_ALLOW_USE_DEPRECATED()
-
-class EmptyEmbedderHeapTracer : public v8::EmbedderHeapTracer {
- public:
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
-
- bool AdvanceTracing(double deadline_in_ms) final { return true; }
- bool IsTracingDone() final { return true; }
- void TracePrologue(EmbedderHeapTracer::TraceFlags) final {}
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-};
-
-END_ALLOW_USE_DEPRECATED()
-
-// EmbedderHeapTracer that can optimize Scavenger handling when used with
-// TracedReference.
-class EmbedderHeapTracerNoDestructorNonTracingClearing final
- : public EmptyEmbedderHeapTracer {
- public:
- explicit EmbedderHeapTracerNoDestructorNonTracingClearing(
- uint16_t class_id_to_optimize)
- : class_id_to_optimize_(class_id_to_optimize) {}
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- return handle.WrapperClassId() != class_id_to_optimize_;
- }
-
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
- if (handle.WrapperClassId() != class_id_to_optimize_) return;
-
- // Convention (for test): Objects that are optimized have their first field
- // set as a back pointer.
- BasicTracedReference<v8::Value>* original_handle =
- reinterpret_cast<BasicTracedReference<v8::Value>*>(
- v8::Object::GetAlignedPointerFromInternalField(
- handle.As<v8::Object>(), 0));
- original_handle->Reset();
- }
-
- private:
- uint16_t class_id_to_optimize_;
-};
-
-template <typename T>
-void SetupOptimizedAndNonOptimizedHandle(v8::Isolate* isolate,
- uint16_t optimized_class_id,
- T* optimized_handle,
- T* non_optimized_handle) {
- v8::HandleScope scope(isolate);
-
- v8::Local<v8::Object> optimized_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), optimized_handle, nullptr));
- EXPECT_TRUE(optimized_handle->IsEmpty());
- *optimized_handle = T(isolate, optimized_object);
- EXPECT_FALSE(optimized_handle->IsEmpty());
- optimized_handle->SetWrapperClassId(optimized_class_id);
-
- v8::Local<v8::Object> non_optimized_object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(non_optimized_handle->IsEmpty());
- *non_optimized_handle = T(isolate, non_optimized_object);
- EXPECT_FALSE(non_optimized_handle->IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceNoDestructorReclaimedOnScavenge) {
- if (v8_flags.single_generation) return;
- ManualGCScope manual_gc(i_isolate());
- v8::HandleScope scope(v8_isolate());
- constexpr uint16_t kClassIdToOptimize = 23;
- EmbedderHeapTracerNoDestructorNonTracingClearing tracer(kClassIdToOptimize);
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- auto* traced_handles = i_isolate()->traced_handles();
-
- const size_t initial_count = traced_handles->used_node_count();
- auto* optimized_handle = new v8::TracedReference<v8::Value>();
- auto* non_optimized_handle = new v8::TracedReference<v8::Value>();
- SetupOptimizedAndNonOptimizedHandle(v8_isolate(), kClassIdToOptimize,
- optimized_handle, non_optimized_handle);
- EXPECT_EQ(initial_count + 2, traced_handles->used_node_count());
- YoungGC();
- EXPECT_EQ(initial_count + 1, traced_handles->used_node_count());
- EXPECT_TRUE(optimized_handle->IsEmpty());
- delete optimized_handle;
- EXPECT_FALSE(non_optimized_handle->IsEmpty());
- non_optimized_handle->Reset();
- delete non_optimized_handle;
- EXPECT_EQ(initial_count, traced_handles->used_node_count());
-}
-
-namespace {
-
-template <typename T>
-V8_NOINLINE void OnStackTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer) {
- v8::Global<v8::Object> observer;
- T stack_ref;
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- stack_ref.Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceOnStack) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- OnStackTest<v8::TracedReference<v8::Value>>(v8_isolate(), &tracer);
-}
-
-namespace {
-
-enum class Operation {
- kCopy,
- kMove,
-};
-
-template <typename T>
-V8_NOINLINE void PerformOperation(Operation op, T* target, T* source) {
- switch (op) {
- case Operation::kMove:
- *target = std::move(*source);
- break;
- case Operation::kCopy:
- *target = *source;
- source->Reset();
- break;
- }
-}
-
-enum class TargetHandling {
- kNonInitialized,
- kInitializedYoungGen,
- kInitializedOldGen
-};
-
-V8_NOINLINE void StackToHeapTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle;
- v8::TracedReference<v8::Value>* heap_handle =
- new v8::TracedReference<v8::Value>();
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(
- IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!v8_flags.single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- FullGC(v8_isolate);
- EXPECT_FALSE(
- i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- heap_handle->Reset(v8_isolate, to_object);
- }
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle.Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- PerformOperation(op, heap_handle, &stack_handle);
- tracer->AddReferenceForTracing(heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate)
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC(v8_isolate);
- }
- ASSERT_TRUE(observer.IsEmpty());
- delete heap_handle;
-}
-
-V8_NOINLINE void HeapToStackTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle;
- v8::TracedReference<v8::Value>* heap_handle =
- new v8::TracedReference<v8::Value>();
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(
- IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!v8_flags.single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- FullGC(v8_isolate);
- EXPECT_FALSE(
- i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- stack_handle.Reset(v8_isolate, to_object);
- }
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- heap_handle->Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- tracer->AddReferenceForTracing(heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- PerformOperation(op, &stack_handle, heap_handle);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- stack_handle.Reset();
- FullGC(v8_isolate);
- EXPECT_TRUE(observer.IsEmpty());
- delete heap_handle;
-}
-
-V8_NOINLINE void StackToStackTest(v8::Isolate* v8_isolate,
- TestEmbedderHeapTracer* tracer, Operation op,
- TargetHandling target_handling) {
- v8::Global<v8::Object> observer;
- v8::TracedReference<v8::Value> stack_handle1;
- v8::TracedReference<v8::Value> stack_handle2;
- if (target_handling != TargetHandling::kNonInitialized) {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> to_object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- EXPECT_TRUE(
- IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*to_object)));
- if (!v8_flags.single_generation &&
- target_handling == TargetHandling::kInitializedOldGen) {
- FullGC(v8_isolate);
- EXPECT_FALSE(
- i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*to_object)));
- }
- stack_handle2.Reset(v8_isolate, to_object);
- }
- {
- v8::HandleScope scope(v8_isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- v8_isolate->GetCurrentContext(), nullptr, nullptr));
- stack_handle1.Reset(v8_isolate, object);
- observer.Reset(v8_isolate, object);
- observer.SetWeak();
- }
- EXPECT_FALSE(observer.IsEmpty());
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- PerformOperation(op, &stack_handle2, &stack_handle1);
- FullGC(v8_isolate);
- EXPECT_FALSE(observer.IsEmpty());
- stack_handle2.Reset();
- FullGC(v8_isolate);
- EXPECT_TRUE(observer.IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, TracedReferenceMove) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kNonInitialized);
- StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kMove,
- TargetHandling::kInitializedOldGen);
-}
-
-TEST_F(EmbedderTracingTest, TracedReferenceCopy) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToHeapTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- HeapToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kNonInitialized);
- StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedYoungGen);
- StackToStackTest(v8_isolate(), &tracer, Operation::kCopy,
- TargetHandling::kInitializedOldGen);
-}
-
-namespace {
-
-V8_NOINLINE void CreateTracedReferenceInDeepStack(
- v8::Isolate* isolate, v8::Global<v8::Object>* observer) {
- v8::TracedReference<v8::Value> stack_ref;
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> object(ConstructTraceableJSApiObject(
- isolate->GetCurrentContext(), nullptr, nullptr));
- stack_ref.Reset(isolate, object);
- observer->Reset(isolate, object);
- observer->SetWeak();
-}
-
-V8_NOINLINE void TracedReferenceOnStackReferencesAreTemporaryTest(
- v8::Isolate* v8_isolate, TestEmbedderHeapTracer* tracer) {
- v8::Global<v8::Object> observer;
- CreateTracedReferenceInDeepStack(v8_isolate, &observer);
- EXPECT_FALSE(observer.IsEmpty());
- {
- // Conservative scanning may find stale pointers to on-stack handles.
- // Disable scanning, assuming the slots are overwritten.
- EmbedderStackStateScope scope =
- EmbedderStackStateScope::ExplicitScopeForTesting(
- reinterpret_cast<i::Isolate*>(v8_isolate)
- ->heap()
- ->local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
- FullGC(v8_isolate);
- }
- EXPECT_TRUE(observer.IsEmpty());
-}
-
-} // namespace
-
-TEST_F(EmbedderTracingTest, OnStackReferencesAreTemporary) {
- ManualGCScope manual_gc(i_isolate());
- TestEmbedderHeapTracer tracer;
- heap::TemporaryEmbedderHeapTracerScope tracer_scope(v8_isolate(), &tracer);
- tracer.SetStackStart(
- static_cast<void*>(base::Stack::GetCurrentFrameAddress()));
- TracedReferenceOnStackReferencesAreTemporaryTest(v8_isolate(), &tracer);
-}
-
-} // namespace heap
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 95857cf4f7..6865486d53 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -119,6 +119,7 @@ void StopTracing(GCTracer* tracer, GarbageCollector collector) {
} // namespace
TEST_F(GCTracerTest, AllocationThroughput) {
+ if (v8_flags.stress_incremental_marking) return;
// GCTracer::AllocationThroughputInBytesPerMillisecond ignores global memory.
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -147,6 +148,7 @@ TEST_F(GCTracerTest, AllocationThroughput) {
}
TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -184,6 +186,7 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
}
TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -228,6 +231,7 @@ TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
}
TEST_F(GCTracerTest, RegularScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -242,6 +246,7 @@ TEST_F(GCTracerTest, RegularScope) {
}
TEST_F(GCTracerTest, IncrementalScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -258,6 +263,7 @@ TEST_F(GCTracerTest, IncrementalScope) {
}
TEST_F(GCTracerTest, IncrementalMarkingDetails) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -307,6 +313,7 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
}
TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -357,6 +364,7 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
}
TEST_F(GCTracerTest, MutatorUtilization) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
@@ -393,6 +401,7 @@ TEST_F(GCTracerTest, MutatorUtilization) {
}
TEST_F(GCTracerTest, BackgroundScavengerScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
StartTracing(tracer, GarbageCollector::SCAVENGER, StartTracingMode::kAtomic);
@@ -407,6 +416,7 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
}
TEST_F(GCTracerTest, BackgroundMinorMCScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
StartTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR,
@@ -416,10 +426,6 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY,
20);
tracer->AddScopeSample(GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY, 2);
- tracer->AddScopeSample(
- GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 30);
- tracer->AddScopeSample(
- GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 3);
StopTracing(tracer, GarbageCollector::MINOR_MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
11,
@@ -427,12 +433,10 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
EXPECT_DOUBLE_EQ(
22, tracer->current_
.scopes[GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY]);
- EXPECT_DOUBLE_EQ(
- 33, tracer->current_.scopes
- [GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]);
}
TEST_F(GCTracerTest, BackgroundMajorMCScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
tracer->AddScopeSample(GCTracer::Scope::MC_BACKGROUND_MARKING, 100);
@@ -479,6 +483,7 @@ class ThreadWithBackgroundScope final : public base::Thread {
};
TEST_F(GCTracerTest, MultithreadedBackgroundScope) {
+ if (v8_flags.stress_incremental_marking) return;
GCTracer* tracer = i_isolate()->heap()->tracer();
ThreadWithBackgroundScope thread1(tracer);
ThreadWithBackgroundScope thread2(tracer);
diff --git a/deps/v8/test/unittests/heap/global-handles-unittest.cc b/deps/v8/test/unittests/heap/global-handles-unittest.cc
index a7eda52f7e..0789f678ff 100644
--- a/deps/v8/test/unittests/heap/global-handles-unittest.cc
+++ b/deps/v8/test/unittests/heap/global-handles-unittest.cc
@@ -27,6 +27,7 @@
#include "src/handles/global-handles.h"
+#include "include/v8-embedder-heap.h"
#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate.h"
@@ -47,29 +48,13 @@ struct TracedReferenceWrapper {
v8::TracedReference<v8::Object> handle;
};
-START_ALLOW_USE_DEPRECATED()
-
-// Empty v8::EmbedderHeapTracer that never keeps objects alive on Scavenge. See
-// |IsRootForNonTracingGC|.
-class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
+class NonRootingEmbedderRootsHandler final : public v8::EmbedderRootsHandler {
public:
- NonRootingEmbedderHeapTracer() = default;
-
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
- bool AdvanceTracing(double deadline_in_ms) final { return true; }
- bool IsTracingDone() final { return true; }
- void TracePrologue(TraceFlags) final {}
- void TraceEpilogue(TraceSummary*) final {}
- void EnterFinalPause(EmbedderStackState) final {}
-
- bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
+ bool IsRoot(const v8::TracedReference<v8::Value>& handle) final {
return false;
}
- void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle) final {
+ void ResetRoot(const v8::TracedReference<v8::Value>& handle) final {
for (auto* wrapper : wrappers_) {
if (wrapper->handle == handle) {
wrapper->handle.Reset();
@@ -85,19 +70,18 @@ class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
std::vector<TracedReferenceWrapper*> wrappers_;
};
-END_ALLOW_USE_DEPRECATED()
-
void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Isolate* isolate = info.GetIsolate();
info.GetReturnValue().Set(v8::Number::New(isolate, 0));
}
-struct FlagAndGlobal {
+struct FlagAndHandles {
bool flag;
v8::Global<v8::Object> handle;
+ v8::Local<v8::Object> local;
};
-void ResetHandleAndSetFlag(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ResetHandleAndSetFlag(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.GetParameter()->flag = true;
}
@@ -155,7 +139,7 @@ void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndGlobal fp;
+ FlagAndHandles fp;
construct_function(isolate, context, &fp);
CHECK(IsNewObjectInCorrectGeneration(isolate, fp.handle));
fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
@@ -176,15 +160,17 @@ class GlobalHandlesTest : public TestWithContext {
ModifierFunction modifier_function,
SurvivalMode survives) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- NonRootingEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ NonRootingEmbedderRootsHandler roots_handler;
+ v8_isolate()->SetEmbedderRootsHandler(&roots_handler);
auto fp = std::make_unique<TracedReferenceWrapper>();
- tracer.Register(fp.get());
+ roots_handler.Register(fp.get());
construct_function(isolate, context, fp.get());
CHECK(IsNewObjectInCorrectGeneration(isolate, fp->handle));
modifier_function(fp.get());
@@ -193,6 +179,8 @@ class GlobalHandlesTest : public TestWithContext {
// handle directly here.
CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp->handle.IsEmpty());
CHECK_IMPLIES(survives == SurvivalMode::kDies, fp->handle.IsEmpty());
+
+ v8_isolate()->SetEmbedderRootsHandler(nullptr);
}
};
@@ -202,6 +190,8 @@ TEST_F(GlobalHandlesTest, EternalHandles) {
Isolate* isolate = i_isolate();
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
EternalHandles* eternal_handles = isolate->eternal_handles();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ isolate->heap());
// Create a number of handles that will not be on a block boundary
const int kArrayLength = 2048 - 1;
@@ -289,10 +279,10 @@ TEST_F(GlobalHandlesTest, WeakPersistentSmi) {
v8::WeakCallbackType::kParameter);
}
-START_ALLOW_USE_DEPRECATED()
-
TEST_F(GlobalHandlesTest, PhantomHandlesWithoutCallbacks) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::Global<v8::Object> g1, g2;
{
@@ -312,9 +302,14 @@ TEST_F(GlobalHandlesTest, PhantomHandlesWithoutCallbacks) {
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSObjectDiesOnScavenge) {
if (v8_flags.single_generation) return;
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSObject<FlagAndGlobal>, [](FlagAndGlobal* fp) {},
- [this]() { CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kDies);
+ v8_isolate(), &ConstructJSObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectGarbage(i::NEW_SPACE); },
+ SurvivalMode::kDies);
}
TEST_F(GlobalHandlesTest, TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
@@ -327,19 +322,22 @@ TEST_F(GlobalHandlesTest, TracedReferenceToUnmodifiedJSObjectSurvivesScavenge) {
}
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact) {
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSObject<FlagAndGlobal>, [](FlagAndGlobal* fp) {},
- [this]() { CollectAllGarbage(); }, SurvivalMode::kDies);
+ v8_isolate(), &ConstructJSObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectAllGarbage(); },
+ SurvivalMode::kDies);
}
TEST_F(GlobalHandlesTest,
WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
WeakHandleTest(
- v8_isolate(), &ConstructJSObject<FlagAndGlobal>,
- [this](FlagAndGlobal* fp) {
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
- USE(handle);
+ v8_isolate(), &ConstructJSObject<FlagAndHandles>,
+ [this](FlagAndHandles* fp) {
+ fp->local = v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
},
[this]() { CollectAllGarbage(); }, SurvivalMode::kSurvives);
}
@@ -347,9 +345,13 @@ TEST_F(GlobalHandlesTest,
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSApiObjectDiesOnScavenge) {
if (v8_flags.single_generation) return;
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [](FlagAndGlobal* fp) {}, [this]() { CollectGarbage(i::NEW_SPACE); },
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectGarbage(i::NEW_SPACE); },
SurvivalMode::kDies);
}
@@ -390,30 +392,30 @@ TEST_F(GlobalHandlesTest,
if (v8_flags.single_generation) return;
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [this](FlagAndGlobal* fp) {
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
- USE(handle);
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [this](FlagAndHandles* fp) {
+ fp->local = v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
},
[this]() { CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kSurvives);
}
TEST_F(GlobalHandlesTest, WeakHandleToUnmodifiedJSApiObjectDiesOnMarkCompact) {
+ // We need to invoke GC without stack, otherwise the object may survive.
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
+
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [](FlagAndGlobal* fp) {}, [this]() { CollectAllGarbage(); },
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [](FlagAndHandles* fp) {}, [this]() { CollectAllGarbage(); },
SurvivalMode::kDies);
}
TEST_F(GlobalHandlesTest,
WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
WeakHandleTest(
- v8_isolate(), &ConstructJSApiObject<FlagAndGlobal>,
- [this](FlagAndGlobal* fp) {
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
- USE(handle);
+ v8_isolate(), &ConstructJSApiObject<FlagAndHandles>,
+ [this](FlagAndHandles* fp) {
+ fp->local = v8::Local<v8::Object>::New(v8_isolate(), fp->handle);
},
[this]() { CollectAllGarbage(); }, SurvivalMode::kSurvives);
}
@@ -466,22 +468,22 @@ TEST_F(GlobalHandlesTest,
namespace {
-void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->flag = true;
YoungGC(data.GetIsolate());
}
-void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(ForceScavenge2);
}
-void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->flag = true;
FullGC(data.GetIsolate());
}
-void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(ForceMarkSweep2);
}
@@ -490,12 +492,14 @@ void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
if (v8_flags.single_generation) {
- FlagAndGlobal fp;
+ FlagAndHandles fp;
ConstructJSApiObject(isolate, context, &fp);
CHECK_IMPLIES(!v8_flags.single_generation,
!InYoungGeneration(isolate, fp.handle));
@@ -508,7 +512,7 @@ TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
}
static const int kNumberOfGCTypes = 2;
- using Callback = v8::WeakCallbackInfo<FlagAndGlobal>::Callback;
+ using Callback = v8::WeakCallbackInfo<FlagAndHandles>::Callback;
Callback gc_forcing_callback[kNumberOfGCTypes] = {&ForceScavenge1,
&ForceMarkSweep1};
@@ -519,7 +523,7 @@ TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
- FlagAndGlobal fp;
+ FlagAndHandles fp;
ConstructJSApiObject(isolate, context, &fp);
CHECK(InYoungGeneration(isolate, fp.handle));
fp.flag = false;
@@ -534,11 +538,11 @@ TEST_F(GlobalHandlesTest, GCFromWeakCallbacks) {
namespace {
-void SecondPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void SecondPassCallback(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->flag = true;
}
-void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
+void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndHandles>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(SecondPassCallback);
}
@@ -547,10 +551,12 @@ void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
TEST_F(GlobalHandlesTest, SecondPassPhantomCallbacks) {
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndGlobal fp;
+ FlagAndHandles fp;
ConstructJSApiObject(isolate, context, &fp);
fp.flag = false;
fp.handle.SetWeak(&fp, FirstPassCallback, v8::WeakCallbackType::kParameter);
diff --git a/deps/v8/test/unittests/heap/global-safepoint-unittest.cc b/deps/v8/test/unittests/heap/global-safepoint-unittest.cc
index 16cb6ea64a..024d9ac4e8 100644
--- a/deps/v8/test/unittests/heap/global-safepoint-unittest.cc
+++ b/deps/v8/test/unittests/heap/global-safepoint-unittest.cc
@@ -72,7 +72,6 @@ class InfiniteLooperThread final : public ParkingThread {
v8::Local<v8::String> source =
v8::String::NewFromUtf8(v8_isolate, "for(;;) {}").ToLocalChecked();
auto context = v8_isolate->GetCurrentContext();
- v8::Local<v8::Value> result;
v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
@@ -125,9 +124,9 @@ TEST_F(GlobalSafepointTest, Interrupt) {
// as of FeedbackVectors, and we wouldn't be testing the interrupt check.
base::OS::Sleep(base::TimeDelta::FromMilliseconds(500));
GlobalSafepointScope global_safepoint(i_main_isolate);
- i_main_isolate->shared_heap_isolate()
+ i_main_isolate->shared_space_isolate()
->global_safepoint()
- ->IterateClientIsolates([](Isolate* client) {
+ ->IterateSharedSpaceAndClientIsolates([](Isolate* client) {
client->stack_guard()->RequestTerminateExecution();
});
}
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 45230d7c32..8f649d0412 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -151,12 +151,11 @@ TEST_F(HeapTest, HeapLayout) {
EXPECT_TRUE(IsAligned(cage_base, size_t{4} * GB));
Address code_cage_base = i_isolate()->code_cage_base();
- EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
-
-#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
- Address isolate_root = i_isolate()->isolate_root();
- EXPECT_EQ(cage_base, isolate_root);
-#endif
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ EXPECT_TRUE(IsAligned(code_cage_base, kMinExpectedOSPageSize));
+ } else {
+ EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
+ }
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(cage_base, size_t{4} * GB);
@@ -184,12 +183,14 @@ TEST_F(HeapTest, HeapLayout) {
namespace {
void ShrinkNewSpace(NewSpace* new_space) {
if (!v8_flags.minor_mc) {
- new_space->Shrink();
+ SemiSpaceNewSpace::From(new_space)->Shrink();
return;
}
// MinorMC shrinks the space as part of sweeping.
PagedNewSpace* paged_new_space = PagedNewSpace::From(new_space);
- GCTracer* tracer = paged_new_space->heap()->tracer();
+ Heap* heap = paged_new_space->heap();
+ heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
+ GCTracer* tracer = heap->tracer();
tracer->StartObservablePause();
tracer->StartCycle(GarbageCollector::MARK_COMPACTOR,
GarbageCollectionReason::kTesting, "heap unittest",
@@ -198,7 +199,7 @@ void ShrinkNewSpace(NewSpace* new_space) {
paged_new_space->StartShrinking();
for (Page* page = paged_new_space->first_page();
page != paged_new_space->last_page() &&
- (paged_new_space->ShouldReleasePage());) {
+ (paged_new_space->ShouldReleaseEmptyPage());) {
Page* current_page = page;
page = page->next_page();
if (current_page->allocated_bytes() == 0) {
@@ -392,6 +393,7 @@ TEST_F(HeapTest, RememberedSet_InsertOnPromotingObjectToOld) {
// Promote 'arr' into old, its element is still in new, the old to new
// refs are inserted into the remembered sets during GC.
CollectGarbage(i::NEW_SPACE);
+ heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
CHECK(heap->InOldSpace(*arr));
CHECK(heap->InYoungGeneration(arr->get(0)));
@@ -430,11 +432,9 @@ TEST_F(HeapTest, Regress978156) {
marking->Start(GarbageCollector::MARK_COMPACTOR,
i::GarbageCollectionReason::kTesting);
}
- MarkingState* marking_state = heap->marking_state();
// 6. Mark the filler black to access its two markbits. This triggers
// an out-of-bounds access of the marking bitmap in a bad case.
- marking_state->WhiteToGrey(filler);
- marking_state->GreyToBlack(filler);
+ heap->marking_state()->TryMarkAndAccountLiveBytes(filler);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/heap-utils.cc b/deps/v8/test/unittests/heap/heap-utils.cc
index c1a6f3cf5d..a2aa2ff653 100644
--- a/deps/v8/test/unittests/heap/heap-utils.cc
+++ b/deps/v8/test/unittests/heap/heap-utils.cc
@@ -244,7 +244,7 @@ void FillCurrentSemiSpacePage(v8::internal::NewSpace* space,
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
- DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
+ DCHECK_IMPLIES(!space->heap()->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining = GetSpaceRemainingOnCurrentSemiSpacePage(space);
if (space_remaining == 0) return;
@@ -282,5 +282,16 @@ bool IsNewObjectInCorrectGeneration(HeapObject object) {
: i::Heap::InYoungGeneration(object);
}
+void FinalizeGCIfRunning(Isolate* isolate) {
+ if (!isolate) {
+ return;
+ }
+ auto* heap = isolate->heap();
+ if (heap->incremental_marking()->IsMarking()) {
+ heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+ heap->CompleteSweepingFull();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-utils.h b/deps/v8/test/unittests/heap/heap-utils.h
index 0d9dddba87..633652617d 100644
--- a/deps/v8/test/unittests/heap/heap-utils.h
+++ b/deps/v8/test/unittests/heap/heap-utils.h
@@ -33,22 +33,18 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
WithHeapInternals& operator=(const WithHeapInternals&) = delete;
void CollectGarbage(AllocationSpace space) {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(space, GarbageCollectionReason::kTesting);
}
void FullGC() {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
}
void YoungGC() {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
}
void CollectAllAvailableGarbage() {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kTesting);
}
@@ -92,7 +88,6 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
}
void GcAndSweep(AllocationSpace space) {
- ScanStackModeScopeForTesting scope(heap(), Heap::ScanStackMode::kNone);
heap()->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap()->sweeping_in_progress()) {
IsolateSafepointScope scope(heap());
@@ -102,26 +97,6 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
}
};
-START_ALLOW_USE_DEPRECATED()
-
-class V8_NODISCARD TemporaryEmbedderHeapTracerScope {
- public:
- TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
- v8::EmbedderHeapTracer* tracer)
- : isolate_(isolate) {
- isolate_->SetEmbedderHeapTracer(tracer);
- }
-
- ~TemporaryEmbedderHeapTracerScope() {
- isolate_->SetEmbedderHeapTracer(nullptr);
- }
-
- private:
- v8::Isolate* const isolate_;
-};
-
-END_ALLOW_USE_DEPRECATED()
-
using TestWithHeapInternals = //
WithHeapInternals< //
WithInternalIsolateMixin< //
@@ -136,19 +111,16 @@ using TestWithHeapInternalsAndContext = //
inline void CollectGarbage(AllocationSpace space, v8::Isolate* isolate) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
}
inline void FullGC(v8::Isolate* isolate) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectAllGarbage(Heap::kNoGCFlags, GarbageCollectionReason::kTesting);
}
inline void YoungGC(v8::Isolate* isolate) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- ScanStackModeScopeForTesting scope(heap, Heap::ScanStackMode::kNone);
heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
}
@@ -170,6 +142,8 @@ bool IsNewObjectInCorrectGeneration(v8::Isolate* isolate,
return IsNewObjectInCorrectGeneration(*v8::Utils::OpenHandle(*tmp));
}
+void FinalizeGCIfRunning(Isolate* isolate);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index 5330028295..cbbdeb1a6c 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -159,13 +159,9 @@ class BackgroundThreadForGCEpilogue final : public v8::base::Thread {
TEST_F(LocalHeapTest, GCEpilogue) {
Heap* heap = i_isolate()->heap();
- LocalHeap lh(heap, ThreadKind::kMain);
- lh.SetUpMainThreadForTesting();
+ LocalHeap* lh = heap->main_thread_local_heap();
std::array<GCEpilogue, 3> epilogue;
- {
- UnparkedScope unparked(&lh);
- lh.AddGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
- }
+ lh->AddGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
auto thread1 =
std::make_unique<BackgroundThreadForGCEpilogue>(heap, true, &epilogue[1]);
auto thread2 = std::make_unique<BackgroundThreadForGCEpilogue>(heap, false,
@@ -174,18 +170,12 @@ TEST_F(LocalHeapTest, GCEpilogue) {
CHECK(thread2->Start());
epilogue[1].WaitUntilStarted();
epilogue[2].WaitUntilStarted();
- {
- UnparkedScope scope(&lh);
- PreciseCollectAllGarbage(i_isolate());
- }
+ PreciseCollectAllGarbage(i_isolate());
epilogue[1].RequestStop();
epilogue[2].RequestStop();
thread1->Join();
thread2->Join();
- {
- UnparkedScope unparked(&lh);
- lh.RemoveGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
- }
+ lh->RemoveGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
for (auto& e : epilogue) {
CHECK(e.WasInvoked());
}
diff --git a/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc b/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc
index 6801f1441f..c8823e9644 100644
--- a/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-inner-pointer-resolution-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/heap/conservative-stack-visitor.h"
#include "src/heap/gc-tracer.h"
-#include "src/heap/mark-compact.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
@@ -15,7 +15,18 @@ namespace {
constexpr int Tagged = kTaggedSize;
constexpr int FullCell = Bitmap::kBitsPerCell * Tagged;
-class InnerPointerResolutionTest : public TestWithIsolate {
+template <typename TMixin>
+class WithInnerPointerResolutionMixin : public TMixin {
+ public:
+ Address ResolveInnerPointer(Address maybe_inner_ptr) {
+ return ConservativeStackVisitor::FindBasePtrForMarking(
+ maybe_inner_ptr, this->isolate()->heap()->memory_allocator(),
+ GarbageCollector::MARK_COMPACTOR);
+ }
+};
+
+class InnerPointerResolutionTest
+ : public WithInnerPointerResolutionMixin<TestWithIsolate> {
public:
struct ObjectRequest {
int size; // The only required field.
@@ -48,7 +59,6 @@ class InnerPointerResolutionTest : public TestWithIsolate {
Heap* heap() { return isolate()->heap(); }
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
- MarkCompactCollector* collector() { return heap()->mark_compact_collector(); }
// Create, free and lookup pages, normal or large.
@@ -205,12 +215,12 @@ class InnerPointerResolutionTest : public TestWithIsolate {
case ObjectRequest::WHITE:
break;
case ObjectRequest::GREY:
- heap()->marking_state()->WhiteToGrey(
+ heap()->marking_state()->TryMark(
HeapObject::FromAddress(object.address));
break;
case ObjectRequest::BLACK:
DCHECK_LE(2 * Tagged, object.size);
- heap()->marking_state()->WhiteToBlack(
+ heap()->marking_state()->TryMarkAndAccountLiveBytes(
HeapObject::FromAddress(object.address));
break;
case ObjectRequest::BLACK_AREA: {
@@ -227,8 +237,7 @@ class InnerPointerResolutionTest : public TestWithIsolate {
void RunTestInside(const ObjectRequest& object, int offset) {
DCHECK_LE(0, offset);
DCHECK_GT(object.size, offset);
- Address base_ptr =
- collector()->FindBasePtrForMarking(object.address + offset);
+ Address base_ptr = ResolveInnerPointer(object.address + offset);
bool should_return_null =
!IsPageAlive(object.page_id) || (object.type == ObjectRequest::FREE) ||
(object.type == ObjectRequest::REGULAR &&
@@ -243,7 +252,7 @@ class InnerPointerResolutionTest : public TestWithIsolate {
// This must be called with an address not contained in any created object.
void RunTestOutside(Address ptr) {
- Address base_ptr = collector()->FindBasePtrForMarking(ptr);
+ Address base_ptr = ResolveInnerPointer(ptr);
EXPECT_EQ(kNullAddress, base_ptr);
}
@@ -600,16 +609,20 @@ TEST_F(InnerPointerResolutionTest, FreePages) {
TestAll();
}
-using InnerPointerResolutionHeapTest = TestWithHeapInternalsAndContext;
+using InnerPointerResolutionHeapTest =
+ WithInnerPointerResolutionMixin<TestWithHeapInternalsAndContext>;
TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
ManualGCScope manual_gc_scope(isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap());
v8_flags.page_promotion = false;
Persistent<v8::FixedArray> weak1, weak2, strong;
Address inner_ptr1, inner_ptr2, inner_ptr3, outside_ptr1, outside_ptr2;
Page *page1, *page2;
+ auto allocator = heap()->memory_allocator();
+
{
PtrComprCageBase cage_base{isolate()};
HandleScope scope(isolate());
@@ -636,13 +649,18 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
EXPECT_TRUE(v8_flags.minor_mc || page2->IsToPage());
EXPECT_NE(page1, page2);
- // Allocate one more object, small enough that it fits in page2.
- // Keep a strong reference to this object.
+ // Allocate one more object, small enough that it fits in either page1 or
+ // page2. Keep a strong reference to this object.
auto h3 = factory()->NewFixedArray(16, AllocationType::kYoung);
strong.Reset(v8_isolate(), Utils::FixedArrayToLocal(h3));
auto obj3 = h3->GetHeapObject();
- EXPECT_EQ(page2, Page::FromHeapObject(obj3));
- EXPECT_EQ(obj3.address(), obj2.address() + obj2.Size(cage_base));
+ auto page3 = Page::FromHeapObject(obj3);
+ EXPECT_TRUE(page3 == page1 || page3 == page2);
+ if (page3 == page1) {
+ EXPECT_EQ(obj3.address(), obj1.address() + obj1.Size(cage_base));
+ } else {
+ EXPECT_EQ(obj3.address(), obj2.address() + obj2.Size(cage_base));
+ }
// Keep inner pointers to all objects.
inner_ptr1 = obj1.address() + 17 * Tagged;
@@ -654,28 +672,22 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
outside_ptr2 = page2->area_end() - 2 * Tagged;
EXPECT_LE(obj1.address() + obj1.Size(cage_base), outside_ptr1);
EXPECT_LE(obj2.address() + obj2.Size(cage_base), outside_ptr2);
- EXPECT_LE(obj3.address() + obj3.Size(cage_base), outside_ptr2);
+ if (page3 == page1) {
+ EXPECT_LE(obj3.address() + obj3.Size(cage_base), outside_ptr1);
+ } else {
+ EXPECT_LE(obj3.address() + obj3.Size(cage_base), outside_ptr2);
+ }
// Ensure the young generation space is iterable.
heap()->new_space()->MakeLinearAllocationAreaIterable();
// Inner pointer resolution should work now, finding the objects in the
// case of the inner pointers.
- EXPECT_EQ(
- obj1.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
- EXPECT_EQ(
- obj2.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
- EXPECT_EQ(
- obj3.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr3));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
+ EXPECT_EQ(obj1.address(), ResolveInnerPointer(inner_ptr1));
+ EXPECT_EQ(obj2.address(), ResolveInnerPointer(inner_ptr2));
+ EXPECT_EQ(obj3.address(), ResolveInnerPointer(inner_ptr3));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr2));
// Start incremental marking and mark the third object.
i::IncrementalMarking* marking = heap()->incremental_marking();
@@ -688,21 +700,18 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
i::GarbageCollectionReason::kTesting);
}
MarkingState* marking_state = heap()->marking_state();
- marking_state->WhiteToGrey(obj3);
- marking_state->GreyToBlack(obj3);
+ marking_state->TryMarkAndAccountLiveBytes(obj3);
}
// Garbage collection should reclaim the two large objects with the weak
// references, but not the small one with the strong reference.
- CollectGarbage(NEW_SPACE);
+ GcAndSweep(NEW_SPACE);
EXPECT_TRUE(weak1.IsEmpty());
EXPECT_TRUE(weak2.IsEmpty());
EXPECT_TRUE(!strong.IsEmpty());
// The two pages should still be around, in the new space.
- EXPECT_EQ(page1, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr1));
- EXPECT_EQ(page2, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr2));
+ EXPECT_EQ(page1, allocator->LookupChunkContainingAddress(inner_ptr1));
+ EXPECT_EQ(page2, allocator->LookupChunkContainingAddress(inner_ptr2));
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page2->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page1->IsFromPage());
@@ -711,61 +720,39 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedRegularYoungPages) {
// Inner pointer resolution should work with pointers to unused young
// generation pages (in case of the scavenger, the two pages are now in the
// "from" semispace). There are no objects to be found.
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr3));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr3));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr2));
// Garbage collection once more.
- CollectGarbage(NEW_SPACE);
+ GcAndSweep(NEW_SPACE);
EXPECT_EQ(AllocationSpace::NEW_SPACE, page1->owner_identity());
EXPECT_EQ(AllocationSpace::NEW_SPACE, page2->owner_identity());
// The two pages should still be around, in the new space.
- EXPECT_EQ(page1, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr1));
- EXPECT_EQ(page2, heap()->memory_allocator()->LookupChunkContainingAddress(
- inner_ptr2));
+ EXPECT_EQ(page1, allocator->LookupChunkContainingAddress(inner_ptr1));
+ EXPECT_EQ(page2, allocator->LookupChunkContainingAddress(inner_ptr2));
EXPECT_TRUE(v8_flags.minor_mc || page1->IsToPage());
EXPECT_TRUE(v8_flags.minor_mc || page2->IsToPage());
// Inner pointer resolution should work with pointers to unused young
// generation pages (in case of the scavenger, the two pages are now in the
// "to" semispace). There are no objects to be found.
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr2));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr3));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr1));
- EXPECT_EQ(
- kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(outside_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr2));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr3));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr1));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(outside_ptr2));
}
TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
ManualGCScope manual_gc_scope(isolate());
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(heap());
v8_flags.page_promotion = false;
Global<v8::FixedArray> weak;
Address inner_ptr;
- Page* page;
{
PtrComprCageBase cage_base{isolate()};
@@ -780,7 +767,7 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
weak.Reset(v8_isolate(), Utils::FixedArrayToLocal(h));
weak.SetWeak();
auto obj = h->GetHeapObject();
- page = Page::FromHeapObject(obj);
+ auto page = Page::FromHeapObject(obj);
EXPECT_TRUE(page->IsLargePage());
EXPECT_EQ(AllocationSpace::NEW_LO_SPACE, page->owner_identity());
EXPECT_TRUE(v8_flags.minor_mc || page->IsToPage());
@@ -789,26 +776,25 @@ TEST_F(InnerPointerResolutionHeapTest, UnusedLargeYoungPage) {
inner_ptr = obj.address() + 17 * Tagged;
// Inner pointer resolution should work now, finding the object.
- EXPECT_EQ(
- obj.address(),
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(obj.address(), ResolveInnerPointer(inner_ptr));
}
// Garbage collection should reclaim the object.
- CollectGarbage(NEW_SPACE);
+ GcAndSweep(NEW_SPACE);
EXPECT_TRUE(weak.IsEmpty());
// Inner pointer resolution should work with a pointer to an unused young
// generation large page. There is no object to be found.
- EXPECT_EQ(kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr));
}
TEST_F(InnerPointerResolutionHeapTest, RegularPageAfterEnd) {
+ auto allocator = heap()->memory_allocator();
+
// Allocate a regular page.
OldSpace* old_space = heap()->old_space();
DCHECK_NE(nullptr, old_space);
- auto* page = heap()->memory_allocator()->AllocatePage(
+ auto* page = allocator->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, old_space, NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
@@ -825,21 +811,21 @@ TEST_F(InnerPointerResolutionHeapTest, RegularPageAfterEnd) {
// Inner pointer resolution after the end of the page area should work.
Address inner_ptr = page->area_end() + Tagged;
EXPECT_FALSE(Page::IsAlignedToPageSize(inner_ptr));
- EXPECT_EQ(kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr));
// Deallocate the page.
- heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
- page);
+ allocator->Free(MemoryAllocator::FreeMode::kImmediately, page);
}
TEST_F(InnerPointerResolutionHeapTest, LargePageAfterEnd) {
+ auto allocator = heap()->memory_allocator();
+
// Allocate a large page.
OldLargeObjectSpace* lo_space = heap()->lo_space();
EXPECT_NE(nullptr, lo_space);
const int size = 3 * (1 << kPageSizeBits) / 2;
- LargePage* page = heap()->memory_allocator()->AllocateLargePage(
- lo_space, size, NOT_EXECUTABLE);
+ LargePage* page =
+ allocator->AllocateLargePage(lo_space, size, NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
// The end of the page area is expected not to coincide with the beginning of
@@ -849,12 +835,10 @@ TEST_F(InnerPointerResolutionHeapTest, LargePageAfterEnd) {
// Inner pointer resolution after the end of the pare area should work.
Address inner_ptr = page->area_end() + Tagged;
EXPECT_FALSE(Page::IsAlignedToPageSize(inner_ptr));
- EXPECT_EQ(kNullAddress,
- heap()->mark_compact_collector()->FindBasePtrForMarking(inner_ptr));
+ EXPECT_EQ(kNullAddress, ResolveInnerPointer(inner_ptr));
// Deallocate the page.
- heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
- page);
+ allocator->Free(MemoryAllocator::FreeMode::kImmediately, page);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/marking-worklist-unittest.cc b/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
index 0bd53c2893..45bbdad4be 100644
--- a/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-worklist-unittest.cc
@@ -22,7 +22,10 @@ TEST_F(MarkingWorklistTest, PushPop) {
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.Push(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.Pop(&popped_object));
@@ -33,30 +36,25 @@ TEST_F(MarkingWorklistTest, PushPopOnHold) {
MarkingWorklists holder;
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.PushOnHold(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.PopOnHold(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
-TEST_F(MarkingWorklistTest, PushPopEmbedder) {
- MarkingWorklists holder;
- MarkingWorklists::Local worklists(&holder);
- HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
- worklists.PushWrapper(pushed_object);
- HeapObject popped_object;
- EXPECT_TRUE(worklists.PopWrapper(&popped_object));
- EXPECT_EQ(popped_object, pushed_object);
-}
-
TEST_F(MarkingWorklistTest, MergeOnHold) {
MarkingWorklists holder;
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worker_worklists.PushOnHold(pushed_object);
worker_worklists.Publish();
main_worklists.MergeOnHold();
@@ -70,7 +68,10 @@ TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
main_worklists.Push(pushed_object);
main_worklists.ShareWork();
HeapObject popped_object;
@@ -84,7 +85,10 @@ TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
holder.CreateContextWorklists({context});
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.SwitchToContext(context);
worklists.Push(pushed_object);
worklists.SwitchToSharedForTesting();
@@ -100,7 +104,10 @@ TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
holder.CreateContextWorklists({context});
MarkingWorklists::Local worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
worklists.SwitchToContext(context);
worklists.Push(pushed_object);
EXPECT_FALSE(worklists.IsEmpty());
@@ -121,7 +128,10 @@ TEST_F(MarkingWorklistTest, ContextWorklistCrossTask) {
MarkingWorklists::Local main_worklists(&holder);
MarkingWorklists::Local worker_worklists(&holder);
HeapObject pushed_object =
- ReadOnlyRoots(i_isolate()->heap()).undefined_value();
+ HeapObject::cast(i_isolate()
+ ->roots_table()
+ .slot(RootIndex::kFirstStrongRoot)
+ .load(i_isolate()));
main_worklists.SwitchToContext(context1);
main_worklists.Push(pushed_object);
main_worklists.ShareWork();
diff --git a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
index d0986bb81f..16023efadd 100644
--- a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
+++ b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
@@ -11,26 +11,6 @@
namespace v8 {
namespace internal {
-MemoryReducer::State DoneState() {
- return MemoryReducer::State(MemoryReducer::kDone, 0, 0.0, 1.0, 0);
-}
-
-MemoryReducer::State DoneState(size_t committed_memory) {
- return MemoryReducer::State(MemoryReducer::kDone, 0, 0.0, 1.0,
- committed_memory);
-}
-
-MemoryReducer::State WaitState(int started_gcs, double next_gc_start_ms) {
- return MemoryReducer::State(MemoryReducer::kWait, started_gcs,
- next_gc_start_ms, 1.0, 0);
-}
-
-
-MemoryReducer::State RunState(int started_gcs, double next_gc_start_ms) {
- return MemoryReducer::State(MemoryReducer::kRun, started_gcs,
- next_gc_start_ms, 1.0, 0);
-}
-
MemoryReducer::Event MarkCompactEvent(double time_ms,
bool next_gc_likely_to_collect_more,
size_t committed_memory) {
@@ -88,248 +68,241 @@ MemoryReducer::Event PossibleGarbageEvent(double time_ms) {
TEST(MemoryReducer, FromDoneToDone) {
- MemoryReducer::State state0(DoneState()), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateDone(1.0, 0)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
state1 = MemoryReducer::Step(
state0,
MarkCompactEventGarbageLeft(0, MemoryReducer::kCommittedMemoryDelta - 1));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
- state0 = DoneState(1000 * MB);
+ state0 = MemoryReducer::State::CreateDone(1, 1000 * MB);
state1 = MemoryReducer::Step(
state0, MarkCompactEventGarbageLeft(
0, static_cast<size_t>(
1000 * MB * MemoryReducer::kCommittedMemoryFactor) -
1));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
}
TEST(MemoryReducer, FromDoneToWait) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(DoneState()), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateDone(1.0, 0)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(
state0,
MarkCompactEventGarbageLeft(2, MemoryReducer::kCommittedMemoryDelta));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms + 2,
- state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(2, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(2, state1.last_gc_time_ms());
state1 = MemoryReducer::Step(
state0,
MarkCompactEventNoGarbageLeft(2, MemoryReducer::kCommittedMemoryDelta));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms + 2,
- state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(2, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(2, state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms, state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms,
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
- state0 = DoneState(1000 * MB);
+ state0 = MemoryReducer::State::CreateDone(1, 1000 * MB);
state1 = MemoryReducer::Step(
state0, MarkCompactEventGarbageLeft(
2, static_cast<size_t>(
1000 * MB * MemoryReducer::kCommittedMemoryFactor)));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(v8_flags.gc_memory_reducer_start_delay_ms + 2,
- state1.next_gc_start_ms);
- EXPECT_EQ(0, state1.started_gcs);
- EXPECT_EQ(2, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(0, state1.started_gcs());
+ EXPECT_EQ(2, state1.last_gc_time_ms());
}
TEST(MemoryReducer, FromWaitToWait) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(WaitState(2, 1000.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateWait(2, 1000.0, 1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(state0.next_gc_start_ms(), state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(
- state0, TimerEventLowAllocationRate(state0.next_gc_start_ms - 1));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ state0, TimerEventLowAllocationRate(state0.next_gc_start_ms() - 1));
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(state0.next_gc_start_ms(), state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
+
+ state0 = MemoryReducer::State::CreateWait(2, 1000.0, 0);
- state0.last_gc_time_ms = 0;
state1 = MemoryReducer::Step(
state0,
TimerEventHighAllocationRate(MemoryReducer::kWatchdogDelayMs + 1));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
EXPECT_EQ(MemoryReducer::kWatchdogDelayMs + 1 + MemoryReducer::kLongDelayMs,
- state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
- state0.last_gc_time_ms = 1;
+ state0 = MemoryReducer::State::CreateWait(2, 1000.0, 1);
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
}
TEST(MemoryReducer, FromWaitToRun) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(WaitState(0, 1000.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateWait(0, 1000.0, 1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(
- state0, TimerEventLowAllocationRate(state0.next_gc_start_ms + 1));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
+ state0, TimerEventLowAllocationRate(state0.next_gc_start_ms() + 1));
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs() + 1, state1.started_gcs());
state1 = MemoryReducer::Step(
state0,
TimerEventHighAllocationRate(MemoryReducer::kWatchdogDelayMs + 2));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs() + 1, state1.started_gcs());
}
TEST(MemoryReducer, FromWaitToDone) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(WaitState(2, 0.0)), state1(DoneState());
-
- state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
+ MemoryReducer::State state0(
+ MemoryReducer::State::CreateWait(MemoryReducer::kMaxNumberOfGCs, 0.0, 1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(state0.last_gc_time_ms(), state1.last_gc_time_ms());
}
TEST(MemoryReducer, FromRunToRun) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(RunState(1, 0.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateRun(1)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(2000));
- EXPECT_EQ(MemoryReducer::kRun, state1.action);
- EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kRun, state1.id());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
}
TEST(MemoryReducer, FromRunToDone) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateRun(2)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
- state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
+ state0 = MemoryReducer::State::CreateRun(MemoryReducer::kMaxNumberOfGCs);
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kDone, state1.action);
- EXPECT_EQ(0, state1.next_gc_start_ms);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kDone, state1.id());
+ EXPECT_EQ(state1.started_gcs(), state1.started_gcs());
}
TEST(MemoryReducer, FromRunToWait) {
if (!v8_flags.incremental_marking) return;
- MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
+ MemoryReducer::State state0(MemoryReducer::State::CreateRun(2)),
+ state1(MemoryReducer::State::CreateDone(1.0, 0));
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
- state0.started_gcs = 1;
+ state0 = MemoryReducer::State::CreateRun(1);
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000, 0));
- EXPECT_EQ(MemoryReducer::kWait, state1.action);
- EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
- EXPECT_EQ(state0.started_gcs, state1.started_gcs);
- EXPECT_EQ(2000, state1.last_gc_time_ms);
+ EXPECT_EQ(MemoryReducer::kWait, state1.id());
+ EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms());
+ EXPECT_EQ(state0.started_gcs(), state1.started_gcs());
+ EXPECT_EQ(2000, state1.last_gc_time_ms());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc b/deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc
deleted file mode 100644
index d08c9d04d0..0000000000
--- a/deps/v8/test/unittests/heap/object-start-bitmap-unittest.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/object-start-bitmap.h"
-
-#include "src/base/macros.h"
-#include "src/heap/object-start-bitmap-inl.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-bool IsEmpty(const ObjectStartBitmap& bitmap) {
- size_t count = 0;
- bitmap.Iterate([&count](Address) { count++; });
- return count == 0;
-}
-
-// Abstraction for objects that hides ObjectStartBitmap::kGranularity and
-// the base address as getting either of it wrong will result in failed DCHECKs.
-class TestObject {
- public:
- static PtrComprCageBase kCageBase;
- static Address kBaseOffset;
-
- explicit TestObject(size_t number) : number_(number) {
- const size_t max_entries = ObjectStartBitmap::MaxEntries();
- EXPECT_GE(max_entries, number_);
- }
-
- Address base_ptr() const {
- return kBaseOffset + ObjectStartBitmap::Granularity() * number_;
- }
-
- // Allow implicitly converting Object to Address.
- operator Address() const { return base_ptr(); }
-
- private:
- const size_t number_;
-};
-
-PtrComprCageBase TestObject::kCageBase{0xca6e00000000ul};
-Address TestObject::kBaseOffset = reinterpret_cast<Address>(0x4000ul);
-
-} // namespace
-
-TEST(V8ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
- const size_t max_entries = ObjectStartBitmap::MaxEntries();
- EXPECT_LT(0u, max_entries);
-}
-
-TEST(V8ObjectStartBitmapTest, InitialEmpty) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- EXPECT_TRUE(IsEmpty(bitmap));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- bitmap.SetBit(TestObject(0));
- EXPECT_FALSE(IsEmpty(bitmap));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitCheckBit) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(7);
- bitmap.SetBit(object);
- EXPECT_TRUE(bitmap.CheckBit(object));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitClearbitCheckBit) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(77);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_FALSE(bitmap.CheckBit(object));
-}
-
-TEST(V8ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(123);
- bitmap.SetBit(object);
- bitmap.ClearBit(object);
- EXPECT_TRUE(IsEmpty(bitmap));
-}
-
-TEST(V8ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object0(0);
- TestObject object1(1);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(TestObject(3)));
- size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
- if (count == 0) {
- EXPECT_EQ(object0.base_ptr(), current);
- } else if (count == 1) {
- EXPECT_EQ(object1.base_ptr(), current);
- }
- count++;
- });
- EXPECT_EQ(2u, count);
-}
-
-TEST(V8ObjectStartBitmapTest, AdjacentObjectsAtEnd) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- const size_t last_entry_index = ObjectStartBitmap::MaxEntries() - 1;
- TestObject object0(last_entry_index - 1);
- TestObject object1(last_entry_index);
- bitmap.SetBit(object0);
- bitmap.SetBit(object1);
- EXPECT_FALSE(bitmap.CheckBit(TestObject(last_entry_index - 2)));
- size_t count = 0;
- bitmap.Iterate([&count, object0, object1](Address current) {
- if (count == 0) {
- EXPECT_EQ(object0.base_ptr(), current);
- } else if (count == 1) {
- EXPECT_EQ(object1.base_ptr(), current);
- }
- count++;
- });
- EXPECT_EQ(2u, count);
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrExact) {
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.base_ptr(), bitmap.FindBasePtrImpl(object.base_ptr()));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrApproximate) {
- const size_t kInternalDelta = 37;
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object(654);
- bitmap.SetBit(object);
- EXPECT_EQ(object.base_ptr(),
- bitmap.FindBasePtrImpl(object.base_ptr() + kInternalDelta));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrIteratingWholeBitmap) {
- const size_t kLastWordDelta = ObjectStartBitmap::MaxEntries() - 1;
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object_to_find(0);
- bitmap.SetBit(object_to_find);
- Address hint_index = TestObject(kLastWordDelta);
- EXPECT_EQ(object_to_find.base_ptr(), bitmap.FindBasePtrImpl(hint_index));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrNextCell) {
- // This white box test makes use of the fact that cells are of type uint32_t.
- const size_t kCellSize = sizeof(uint32_t);
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object_to_find(kCellSize - 1);
- Address hint = TestObject(kCellSize);
- bitmap.SetBit(TestObject(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.base_ptr(), bitmap.FindBasePtrImpl(hint));
-}
-
-TEST(V8ObjectStartBitmapTest, FindBasePtrSameCell) {
- // This white box test makes use of the fact that cells are of type uint32_t.
- const size_t kCellSize = sizeof(uint32_t);
- ObjectStartBitmap bitmap(TestObject::kCageBase, TestObject::kBaseOffset);
- TestObject object_to_find(kCellSize - 1);
- Address hint = object_to_find;
- bitmap.SetBit(TestObject(0));
- bitmap.SetBit(object_to_find);
- EXPECT_EQ(object_to_find.base_ptr(), bitmap.FindBasePtrImpl(hint));
-}
-
-// TODO(v8:12851): If the ObjectStartBitmap implementation stays, unit tests
-// should be added to test the functionality of method FindBasePtr.
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/shared-heap-unittest.cc b/deps/v8/test/unittests/heap/shared-heap-unittest.cc
index 19b5b1eb6f..8cd597a7fe 100644
--- a/deps/v8/test/unittests/heap/shared-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/shared-heap-unittest.cc
@@ -19,32 +19,20 @@ using SharedHeapTest = TestJSSharedMemoryWithIsolate;
class SharedHeapNoClientsTest : public TestJSSharedMemoryWithPlatform {
public:
SharedHeapNoClientsTest() {
- if (v8_flags.shared_space) {
- shared_space_isolate_wrapper.emplace(kNoCounters);
- shared_isolate_ = shared_space_isolate_wrapper->i_isolate();
- } else {
- bool created;
- shared_isolate_ = Isolate::GetProcessWideSharedIsolate(&created);
- CHECK(created);
- }
+ shared_space_isolate_wrapper.emplace(kNoCounters);
+ shared_space_isolate_ = shared_space_isolate_wrapper->i_isolate();
}
- ~SharedHeapNoClientsTest() override {
- if (!v8_flags.shared_space) {
- Isolate::DeleteProcessWideSharedIsolate();
- }
-
- shared_isolate_ = nullptr;
- }
+ ~SharedHeapNoClientsTest() override { shared_space_isolate_ = nullptr; }
- v8::Isolate* shared_heap_isolate() {
- return reinterpret_cast<v8::Isolate*>(i_shared_heap_isolate());
+ v8::Isolate* shared_space_isolate() {
+ return reinterpret_cast<v8::Isolate*>(i_shared_space_isolate());
}
- Isolate* i_shared_heap_isolate() { return shared_isolate_; }
+ Isolate* i_shared_space_isolate() { return shared_space_isolate_; }
private:
- Isolate* shared_isolate_;
+ Isolate* shared_space_isolate_;
base::Optional<IsolateWrapper> shared_space_isolate_wrapper;
};
@@ -192,12 +180,7 @@ TEST_F(SharedHeapTest, ConcurrentAllocationInSharedMapSpace) {
}
TEST_F(SharedHeapNoClientsTest, SharedCollectionWithoutClients) {
- if (!v8_flags.shared_space) {
- DCHECK_NULL(i_shared_heap_isolate()->heap()->new_space());
- DCHECK_NULL(i_shared_heap_isolate()->heap()->new_lo_space());
- }
-
- ::v8::internal::CollectGarbage(OLD_SPACE, shared_heap_isolate());
+ ::v8::internal::CollectGarbage(OLD_SPACE, shared_space_isolate());
}
void AllocateInSharedHeap(int iterations = 100) {
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 07be431f97..87b9dccf80 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -128,11 +128,14 @@ TEST_F(SpacesTest, WriteBarrierInYoungGenerationFromSpace) {
TEST_F(SpacesTest, CodeRangeAddressReuse) {
CodeRangeAddressHint hint;
- const size_t kAnyBaseAlignment = 1;
+ const size_t base_alignment = MemoryChunk::kPageSize;
// Create code ranges.
- Address code_range1 = hint.GetAddressHint(100, kAnyBaseAlignment);
- Address code_range2 = hint.GetAddressHint(200, kAnyBaseAlignment);
- Address code_range3 = hint.GetAddressHint(100, kAnyBaseAlignment);
+ Address code_range1 = hint.GetAddressHint(100, base_alignment);
+ CHECK(IsAligned(code_range1, base_alignment));
+ Address code_range2 = hint.GetAddressHint(200, base_alignment);
+ CHECK(IsAligned(code_range2, base_alignment));
+ Address code_range3 = hint.GetAddressHint(100, base_alignment);
+ CHECK(IsAligned(code_range3, base_alignment));
// Since the addresses are random, we cannot check that they are different.
@@ -141,14 +144,14 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
hint.NotifyFreedCodeRange(code_range2, 200);
// The next two code ranges should reuse the freed addresses.
- Address code_range4 = hint.GetAddressHint(100, kAnyBaseAlignment);
+ Address code_range4 = hint.GetAddressHint(100, base_alignment);
EXPECT_EQ(code_range4, code_range1);
- Address code_range5 = hint.GetAddressHint(200, kAnyBaseAlignment);
+ Address code_range5 = hint.GetAddressHint(200, base_alignment);
EXPECT_EQ(code_range5, code_range2);
// Free the third code range and check address reuse.
hint.NotifyFreedCodeRange(code_range3, 100);
- Address code_range6 = hint.GetAddressHint(100, kAnyBaseAlignment);
+ Address code_range6 = hint.GetAddressHint(100, base_alignment);
EXPECT_EQ(code_range6, code_range3);
}
diff --git a/deps/v8/test/unittests/inspector/inspector-unittest.cc b/deps/v8/test/unittests/inspector/inspector-unittest.cc
index e2d390a8e0..ec70f061c9 100644
--- a/deps/v8/test/unittests/inspector/inspector-unittest.cc
+++ b/deps/v8/test/unittests/inspector/inspector-unittest.cc
@@ -212,6 +212,20 @@ TEST_F(InspectorTest, NoInterruptOnGetAssociatedData) {
CHECK(recorder.WasInvoked);
}
+class TestChannel : public V8Inspector::Channel {
+ public:
+ ~TestChannel() override = default;
+ void sendResponse(int callId,
+ std::unique_ptr<StringBuffer> message) override {
+ CHECK_EQ(callId, 1);
+ CHECK_NE(toString16(message->string()).find(expected_response_matcher_),
+ String16::kNotFound);
+ }
+ void sendNotification(std::unique_ptr<StringBuffer> message) override {}
+ void flushProtocolNotifications() override {}
+ v8_inspector::String16 expected_response_matcher_;
+};
+
TEST_F(InspectorTest, NoConsoleAPIForUntrustedClient) {
v8::Isolate* isolate = v8_isolate();
v8::HandleScope handle_scope(isolate);
@@ -222,20 +236,6 @@ TEST_F(InspectorTest, NoConsoleAPIForUntrustedClient) {
V8ContextInfo context_info(v8_context(), 1, toStringView(""));
inspector->contextCreated(context_info);
- class TestChannel : public V8Inspector::Channel {
- public:
- ~TestChannel() override = default;
- void sendResponse(int callId,
- std::unique_ptr<StringBuffer> message) override {
- CHECK_EQ(callId, 1);
- CHECK_NE(toString16(message->string()).find(expected_response_matcher_),
- String16::kNotFound);
- }
- void sendNotification(std::unique_ptr<StringBuffer> message) override {}
- void flushProtocolNotifications() override {}
- v8_inspector::String16 expected_response_matcher_;
- };
-
TestChannel channel;
const char kCommand[] = R"({
"id": 1,
@@ -258,6 +258,26 @@ TEST_F(InspectorTest, NoConsoleAPIForUntrustedClient) {
untrusted_session->dispatchProtocolMessage(toStringView(kCommand));
}
+TEST_F(InspectorTest, CanHandleMalformedCborMessage) {
+ v8::Isolate* isolate = v8_isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8_inspector::V8InspectorClient default_client;
+ std::unique_ptr<V8Inspector> inspector =
+ V8Inspector::create(isolate, &default_client);
+ V8ContextInfo context_info(v8_context(), 1, toStringView(""));
+ inspector->contextCreated(context_info);
+
+ TestChannel channel;
+ const unsigned char kCommand[] = {0xD8, 0x5A, 0x00, 0xBA, 0xDB, 0xEE, 0xF0};
+ std::unique_ptr<V8InspectorSession> trusted_session =
+ inspector->connect(1, &channel, toStringView("{}"),
+ v8_inspector::V8Inspector::kFullyTrusted);
+ channel.expected_response_matcher_ = R"("value":42)";
+ trusted_session->dispatchProtocolMessage(
+ StringView(kCommand, sizeof(kCommand)));
+}
+
TEST_F(InspectorTest, ApiCreatedTasksAreCleanedUp) {
v8::Isolate* isolate = v8_isolate();
v8::HandleScope handle_scope(isolate);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 2a0770a90c..ecd874ee5a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -151,7 +151,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.SetKeyedProperty(reg, reg, strict_keyed_store_slot.ToInt(),
LanguageMode::kStrict)
.DefineNamedOwnProperty(reg, name, define_named_own_slot.ToInt())
- .DefineKeyedOwnProperty(reg, reg, define_named_own_slot.ToInt())
+ .DefineKeyedOwnProperty(reg, reg, DefineKeyedOwnPropertyFlag::kNoFlags,
+ define_named_own_slot.ToInt())
.StoreInArrayLiteral(reg, reg, store_array_element_slot.ToInt());
// Emit Iterator-protocol operations
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden
index 606a6ad481..fe3dd9a4d6 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -233,7 +233,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 20
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star0),
@@ -242,7 +242,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star2),
B(Ldar), R(0),
- /* 57 E> */ B(DefineKeyedOwnProperty), R(1), R(2), U8(1),
+ /* 57 E> */ B(DefineKeyedOwnProperty), R(1), R(2), U8(0), U8(1),
B(Ldar), R(1),
/* 61 S> */ B(Return),
]
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index bc1c08c83e..3594623033 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -23,12 +23,12 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 82
+bytecode array length: 83
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 67 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 67 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 76 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
@@ -75,15 +75,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -107,15 +107,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -139,17 +139,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 48 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 53 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 58 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -173,15 +173,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 41 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden
index 195e64c76a..5f1727548b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -24,7 +24,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 114
+bytecode array length: 112
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -43,7 +43,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(5), R(0),
@@ -64,7 +63,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(5), R(1),
@@ -127,7 +125,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 232
+bytecode array length: 229
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -153,7 +151,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(6), R(0),
@@ -191,7 +188,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
B(CreateClosure), U8(16), U8(7), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(6), R(1),
@@ -211,7 +207,6 @@ bytecodes: [
B(Mov), R(1), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
B(CreateClosure), U8(20), U8(9), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(6), R(2),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index a7cde86a77..b7bb831e61 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -18,12 +18,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 24
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 61 E> */ B(GetKeyedProperty), R(this), U8(2),
B(LdaImmutableCurrentContextSlot), U8(2),
@@ -48,17 +48,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star2),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -83,15 +83,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
B(Star0),
B(Ldar), R(context),
- /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0),
+ /* 44 E> */ B(DefineKeyedOwnProperty), R(this), R(0), U8(0), U8(0),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 54 E> */ B(GetKeyedProperty), R(this), U8(2),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -116,7 +116,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 48
+bytecode array length: 49
bytecodes: [
/* 44 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
@@ -125,7 +125,7 @@ bytecodes: [
B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
B(Star1),
B(Ldar), R(0),
- B(DefineKeyedOwnProperty), R(this), R(1), U8(0),
+ B(DefineKeyedOwnProperty), R(this), R(1), U8(0), U8(0),
/* 49 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(Star3),
/* 61 E> */ B(CallUndefinedReceiver0), R(3), U8(2),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden
index 43e40974f9..c814f8489d 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -23,7 +23,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 104
+bytecode array length: 102
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -41,7 +41,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(5), R(0),
@@ -61,7 +60,6 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(5), R(1),
@@ -119,7 +117,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 199
+bytecode array length: 196
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -144,7 +142,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(6), R(0),
@@ -173,7 +170,6 @@ bytecodes: [
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
B(CreateClosure), U8(14), U8(6), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(6), R(1),
@@ -192,7 +188,6 @@ bytecodes: [
B(Mov), R(1), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(6), R(2),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden
index 559afa2fa0..439c0eb00b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -27,7 +27,7 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 166
+bytecode array length: 164
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -55,11 +55,10 @@ bytecodes: [
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
B(CreateClosure), U8(6), U8(1), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(7), U8(1),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star6),
- B(CallProperty0), R(6), R(3), U8(3),
+ B(Star5),
+ B(CallProperty0), R(5), R(3), U8(3),
B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(9),
@@ -88,11 +87,10 @@ bytecodes: [
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star4),
B(SetNamedProperty), R(3), U8(7), U8(5),
B(CreateClosure), U8(13), U8(5), U8(2),
- B(Star6),
- B(CallProperty0), R(6), R(3), U8(7),
+ B(Star5),
+ B(CallProperty0), R(5), R(3), U8(7),
B(PopContext), R(2),
B(Mov), R(3), R(1),
/* 197 S> */ B(Ldar), R(0),
@@ -159,7 +157,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 298
+bytecode array length: 295
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -194,11 +192,10 @@ bytecodes: [
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(9), U8(1),
B(CreateClosure), U8(10), U8(3), U8(2),
- B(Star7),
- B(CallProperty0), R(7), R(4), U8(3),
+ B(Star6),
+ B(CallProperty0), R(6), R(4), U8(3),
B(PopContext), R(3),
B(Mov), R(4), R(0),
/* 38 E> */ B(CreateBlockContext), U8(11),
@@ -226,8 +223,8 @@ bytecodes: [
B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
- B(Mov), R(10), R(7),
B(Mov), R(4), R(6),
+ B(Mov), R(10), R(7),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
B(Ldar), R(9),
@@ -236,11 +233,10 @@ bytecodes: [
B(Star10),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(6),
B(CreateClosure), U8(17), U8(7), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(9), U8(5),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star7),
- B(CallProperty0), R(7), R(4), U8(7),
+ B(Star6),
+ B(CallProperty0), R(6), R(4), U8(7),
B(PopContext), R(3),
B(Mov), R(4), R(1),
/* 122 E> */ B(CreateBlockContext), U8(19),
@@ -260,8 +256,8 @@ bytecodes: [
B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
- B(Mov), R(1), R(7),
B(Mov), R(4), R(6),
+ B(Mov), R(1), R(7),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
B(Ldar), R(9),
@@ -271,11 +267,10 @@ bytecodes: [
B(Ldar), R(4),
B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(22), U8(10), U8(2),
- B(Star5),
B(SetNamedProperty), R(4), U8(9), U8(9),
B(CreateClosure), U8(23), U8(11), U8(2),
- B(Star7),
- B(CallProperty0), R(7), R(4), U8(11),
+ B(Star6),
+ B(CallProperty0), R(6), R(4), U8(11),
B(PopContext), R(3),
B(Mov), R(4), R(2),
/* 456 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 9ff074e5bb..e8350d6b7b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/unittests/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -24,7 +24,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -61,13 +61,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star2),
B(LdaConstant), U8(1),
B(Star3),
@@ -99,13 +99,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(304),
+ B(Wide), B(LdaSmi), I16(309),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -145,7 +145,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -167,7 +167,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -182,7 +182,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -216,13 +216,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -253,13 +253,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
/* 58 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star1),
B(LdaConstant), U8(1),
B(Star2),
@@ -292,13 +292,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(298),
+ B(Wide), B(LdaSmi), I16(303),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
/* 65 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(Wide), B(LdaSmi), I16(306),
+ B(Wide), B(LdaSmi), I16(311),
B(Star2),
B(LdaConstant), U8(1),
B(Star3),
@@ -327,7 +327,7 @@ bytecode array length: 19
bytecodes: [
/* 46 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 51 E> */ B(GetKeyedProperty), R(this), U8(0),
- B(Wide), B(LdaSmi), I16(305),
+ B(Wide), B(LdaSmi), I16(310),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index dcb6b72c35..51e61dae23 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -46,6 +46,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
if (Bytecodes::WritesAccumulator(bytecode())) {
SetAccumulator(NullConstant());
}
+ if (Bytecodes::ClobbersAccumulator(bytecode())) {
+ ClobberAccumulator(NullConstant());
+ }
if (Bytecodes::WritesImplicitRegister(bytecode())) {
StoreRegisterForShortStar(NullConstant(), IntPtrConstant(2));
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-unittest.cc
index 1052bccb54..dae9431252 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-unittest.cc
@@ -1841,8 +1841,10 @@ TEST_F(InterpreterTest, InterpreterBigIntComparisons) {
if (tester.HasFeedbackMetadata()) {
MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kBigInt,
- feedback->ToSmi().value());
+ // TODO(panq): Create a standalone unit test for kBigInt64.
+ CHECK(CompareOperationFeedback::kBigInt64 ==
+ feedback->ToSmi().value() ||
+ CompareOperationFeedback::kBigInt == feedback->ToSmi().value());
}
}
}
@@ -4758,11 +4760,11 @@ TEST_F(InterpreterTest, InterpreterWithNativeStack) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
CHECK(f->shared().HasBytecodeArray());
- i::CodeT code = f->shared().GetCode();
- i::Handle<i::CodeT> interpreter_entry_trampoline =
+ i::Code code = f->shared().GetCode(i_isolate());
+ i::Handle<i::Code> interpreter_entry_trampoline =
BUILTIN_CODE(i_isolate(), InterpreterEntryTrampoline);
- CHECK(code.IsCodeT());
+ CHECK(code.IsCode());
CHECK(code.is_interpreter_trampoline_builtin());
CHECK_NE(code.address(), interpreter_entry_trampoline->address());
}
@@ -4772,24 +4774,24 @@ TEST_F(InterpreterTest, InterpreterGetBytecodeHandler) {
Interpreter* interpreter = i_isolate()->interpreter();
// Test that single-width bytecode handlers deserializer correctly.
- CodeT wide_handler =
+ Code wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kSingle);
CHECK_EQ(wide_handler.builtin_id(), Builtin::kWideHandler);
- CodeT add_handler =
+ Code add_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kSingle);
CHECK_EQ(add_handler.builtin_id(), Builtin::kAddHandler);
// Test that double-width bytecode handlers deserializer correctly, including
// an illegal bytecode handler since there is no Wide.Wide handler.
- CodeT wide_wide_handler =
+ Code wide_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kDouble);
CHECK_EQ(wide_wide_handler.builtin_id(), Builtin::kIllegalHandler);
- CodeT add_wide_handler =
+ Code add_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kDouble);
CHECK_EQ(add_wide_handler.builtin_id(), Builtin::kAddWideHandler);
@@ -4982,15 +4984,15 @@ TEST_F(InterpreterTest, InterpreterCollectSourcePositions_GenerateStackTrace) {
TEST_F(InterpreterTest, InterpreterLookupNameOfBytecodeHandler) {
Interpreter* interpreter = i_isolate()->interpreter();
- CodeT ldaLookupSlot = interpreter->GetBytecodeHandler(
- Bytecode::kLdaLookupSlot, OperandScale::kSingle);
+ Code ldaLookupSlot = interpreter->GetBytecodeHandler(Bytecode::kLdaLookupSlot,
+ OperandScale::kSingle);
CheckStringEqual("LdaLookupSlotHandler",
Builtins::name(ldaLookupSlot.builtin_id()));
- CodeT wideLdaLookupSlot = interpreter->GetBytecodeHandler(
+ Code wideLdaLookupSlot = interpreter->GetBytecodeHandler(
Bytecode::kLdaLookupSlot, OperandScale::kDouble);
CheckStringEqual("LdaLookupSlotWideHandler",
Builtins::name(wideLdaLookupSlot.builtin_id()));
- CodeT extraWideLdaLookupSlot = interpreter->GetBytecodeHandler(
+ Code extraWideLdaLookupSlot = interpreter->GetBytecodeHandler(
Bytecode::kLdaLookupSlot, OperandScale::kQuadruple);
CheckStringEqual("LdaLookupSlotExtraWideHandler",
Builtins::name(extraWideLdaLookupSlot.builtin_id()));
diff --git a/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
index d3d9580a21..f6ef9834d2 100644
--- a/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/single-threaded-default-platform-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "include/v8-platform.h"
+#include "src/init/v8.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -14,7 +15,7 @@ class WithSingleThreadedDefaultPlatformMixin : public TMixin {
WithSingleThreadedDefaultPlatformMixin() {
platform_ = v8::platform::NewSingleThreadedDefaultPlatform();
CHECK_NOT_NULL(platform_.get());
- v8::V8::InitializePlatform(platform_.get());
+ i::V8::InitializePlatformForTesting(platform_.get());
v8::V8::Initialize();
}
diff --git a/deps/v8/test/unittests/libplatform/tracing-unittest.cc b/deps/v8/test/unittests/libplatform/tracing-unittest.cc
index 76f78af14c..b92fc34bb7 100644
--- a/deps/v8/test/unittests/libplatform/tracing-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/tracing-unittest.cc
@@ -11,7 +11,8 @@
#include "testing/gtest/include/gtest/gtest.h"
#ifdef V8_USE_PERFETTO
-#include "perfetto/tracing.h"
+#include "perfetto/tracing/track_event.h" // nogncheck
+#include "perfetto/tracing/track_event_legacy.h" // nogncheck
#include "protos/perfetto/trace/trace.pb.h" // nogncheck
#include "src/libplatform/tracing/trace-event-listener.h"
#include "src/tracing/traced-value.h"
@@ -389,6 +390,10 @@ TEST_F(PlatformTracingTest, TestTracingControllerMultipleArgsAndCopy) {
}
#endif // !defined(V8_USE_PERFETTO)
+// In Perfetto build there are no TracingObservers. Instead the code relies on
+// TrackEventSessionObserver to track tracing sessions, which is tested
+// upstream.
+#if !defined(V8_USE_PERFETTO)
namespace {
class TraceStateObserverImpl : public TracingController::TraceStateObserver {
@@ -412,16 +417,11 @@ TEST_F(PlatformTracingTest, TracingObservers) {
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
-#ifdef V8_USE_PERFETTO
- std::ostringstream sstream;
- tracing_controller->InitializeForPerfetto(&sstream);
-#else
MockTraceWriter* writer = new MockTraceWriter();
v8::platform::tracing::TraceBuffer* ring_buffer =
v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
writer);
tracing_controller->Initialize(ring_buffer);
-#endif
v8::platform::tracing::TraceConfig* trace_config =
new v8::platform::tracing::TraceConfig();
trace_config->AddIncludedCategory("v8");
@@ -469,6 +469,7 @@ TEST_F(PlatformTracingTest, TracingObservers) {
i::V8::SetPlatformForTesting(old_platform);
}
+#endif // !defined(V8_USE_PERFETTO)
// With Perfetto the tracing controller doesn't observe events.
#if !defined(V8_USE_PERFETTO)
@@ -597,7 +598,12 @@ class TestListener : public TraceEventListener {
if (!first_annotation) {
slice += ",";
}
- slice += debug_annotation_names_[it.name_iid()] + "=";
+ if (!it.name().empty()) {
+ slice += it.name();
+ } else {
+ slice += debug_annotation_names_[it.name_iid()];
+ }
+ slice += "=";
std::stringstream value;
if (it.has_bool_value()) {
value << "(bool)" << it.bool_value();
diff --git a/deps/v8/test/unittests/logging/log-unittest.cc b/deps/v8/test/unittests/logging/log-unittest.cc
index 764064c40d..fdeb11dda7 100644
--- a/deps/v8/test/unittests/logging/log-unittest.cc
+++ b/deps/v8/test/unittests/logging/log-unittest.cc
@@ -444,13 +444,16 @@ TEST_F(LogTest, Issue539892) {
explicit FakeCodeEventLogger(i::Isolate* isolate)
: CodeEventLogger(isolate) {}
- void CodeMoveEvent(i::AbstractCode from, i::AbstractCode to) override {}
+ void CodeMoveEvent(i::InstructionStream from,
+ i::InstructionStream to) override {}
+ void BytecodeMoveEvent(i::BytecodeArray from,
+ i::BytecodeArray to) override {}
void CodeDisableOptEvent(i::Handle<i::AbstractCode> code,
i::Handle<i::SharedFunctionInfo> shared) override {
}
private:
- void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
+ void LogRecordedBuffer(i::AbstractCode code,
i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
const char* name, int length) override {}
#if V8_ENABLE_WEBASSEMBLY
@@ -1214,7 +1217,7 @@ TEST_F(LogTest, BuiltinsNotLoggedAsLazyCompile) {
logger.StopLogging();
i::Isolate* i_isolate = logger.i_isolate();
- i::Handle<i::CodeT> builtin = BUILTIN_CODE(i_isolate, BooleanConstructor);
+ i::Handle<i::Code> builtin = BUILTIN_CODE(i_isolate, BooleanConstructor);
v8::base::EmbeddedVector<char, 100> buffer;
// Should only be logged as "Builtin" with a name, never as "Function".
diff --git a/deps/v8/test/unittests/objects/concurrent-string-unittest.cc b/deps/v8/test/unittests/objects/concurrent-string-unittest.cc
index 569bceb01c..919b9097d2 100644
--- a/deps/v8/test/unittests/objects/concurrent-string-unittest.cc
+++ b/deps/v8/test/unittests/objects/concurrent-string-unittest.cc
@@ -245,7 +245,6 @@ TEST_F(ConcurrentStringTest, InspectOneByteExternalizing_ThinString) {
// Create a string.
const char* raw_string = STRING_VALUE;
Handle<String> thin_string = factory->NewStringFromAsciiChecked(raw_string);
- EXPECT_TRUE(thin_string->IsOneByteRepresentation());
EXPECT_TRUE(!thin_string->IsExternalString());
EXPECT_TRUE(!thin_string->IsInternalizedString());
@@ -347,9 +346,6 @@ TEST_F(ConcurrentStringTest, InspectOneIntoTwoByteExternalizing_ThinString) {
EXPECT_TRUE(!thin_string->IsExternalString());
EXPECT_TRUE(!thin_string->IsInternalizedString());
EXPECT_TRUE(thin_string->IsThinString());
- // Even its representation is still one byte, even when the internalized
- // string moved to two bytes.
- EXPECT_TRUE(thin_string->IsOneByteRepresentation());
thread->Join();
}
diff --git a/deps/v8/test/unittests/objects/roots-unittest.cc b/deps/v8/test/unittests/objects/roots-unittest.cc
index 6bb3bc16ee..17e20e3fbc 100644
--- a/deps/v8/test/unittests/objects/roots-unittest.cc
+++ b/deps/v8/test/unittests/objects/roots-unittest.cc
@@ -93,6 +93,17 @@ TEST_F(RootsTest, TestHeapRootsNotReadOnly) {
MUTABLE_ROOT_LIST(CHECK_NOT_IN_RO_SPACE)
}
+TEST_F(RootsTest, TestHeapNumberList) {
+ ReadOnlyRoots roots(isolate());
+ for (auto pos = RootIndex::kFirstReadOnlyRoot;
+ pos <= RootIndex::kLastReadOnlyRoot; ++pos) {
+ auto obj = roots.object_at(pos);
+ bool in_nr_range = pos >= RootIndex::kFirstHeapNumberRoot &&
+ pos <= RootIndex::kLastHeapNumberRoot;
+ CHECK_EQ(obj.IsHeapNumber(), in_nr_range);
+ }
+}
+
#undef CHECK_NOT_IN_RO_SPACE
} // namespace internal
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 8a36f6dbb3..a2f6461ab2 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -21,7 +21,9 @@
#include "src/base/build_config.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/objects-inl.h"
+#include "test/common/flag-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -1398,7 +1400,6 @@ TEST_F(ValueSerializerTest, RoundTripDate) {
}
TEST_F(ValueSerializerTest, DecodeDate) {
- Local<Value> value;
#if defined(V8_TARGET_LITTLE_ENDIAN)
DecodeTestFutureVersions(
{0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84, 0x2E,
@@ -1884,11 +1885,20 @@ TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
ASSERT_TRUE(value->IsArrayBuffer());
EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
ExpectScriptTrue("Object.getPrototypeOf(result) === ArrayBuffer.prototype");
+ // TODO(v8:11111): Use API functions for testing max_byte_length and resizable
+ // once they're exposed via the API.
+ i::Handle<i::JSArrayBuffer> array_buffer =
+ Utils::OpenHandle(ArrayBuffer::Cast(*value));
+ EXPECT_EQ(0u, array_buffer->max_byte_length());
+ EXPECT_EQ(false, array_buffer->is_resizable_by_js());
value = RoundTripTest("new Uint8Array([0, 128, 255]).buffer");
ASSERT_TRUE(value->IsArrayBuffer());
EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
ExpectScriptTrue("new Uint8Array(result).toString() === '0,128,255'");
+ array_buffer = Utils::OpenHandle(ArrayBuffer::Cast(*value));
+ EXPECT_EQ(3u, array_buffer->max_byte_length());
+ EXPECT_EQ(false, array_buffer->is_resizable_by_js());
value =
RoundTripTest("({ a: new ArrayBuffer(), get b() { return this.a; }})");
@@ -1896,6 +1906,21 @@ TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
ExpectScriptTrue("result.a === result.b");
}
+TEST_F(ValueSerializerTest, RoundTripResizableArrayBuffer) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ Local<Value> value =
+ RoundTripTest("new ArrayBuffer(100, {maxByteLength: 200})");
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(100u, ArrayBuffer::Cast(*value)->ByteLength());
+
+ // TODO(v8:11111): Use API functions for testing max_byte_length and resizable
+ // once they're exposed via the API.
+ i::Handle<i::JSArrayBuffer> array_buffer =
+ Utils::OpenHandle(ArrayBuffer::Cast(*value));
+ EXPECT_EQ(200u, array_buffer->max_byte_length());
+ EXPECT_EQ(true, array_buffer->is_resizable_by_js());
+}
+
TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
DecodeTestFutureVersions(
{0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00}, [this](Local<Value> value) {
@@ -1927,6 +1952,13 @@ TEST_F(ValueSerializerTest, DecodeInvalidArrayBuffer) {
InvalidDecodeTest({0xFF, 0x09, 0x42, 0xFF, 0xFF, 0x00});
}
+TEST_F(ValueSerializerTest, DecodeInvalidResizableArrayBuffer) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ // Enough bytes available after reading the length, but not anymore when
+ // reading the max byte length.
+ InvalidDecodeTest({0xFF, 0x09, 0x7E, 0x2, 0x10, 0x00});
+}
+
// An array buffer allocator that never has available memory.
class OOMArrayBufferAllocator : public ArrayBuffer::Allocator {
public:
@@ -2026,14 +2058,20 @@ TEST_F(ValueSerializerTestWithArrayBufferTransfer,
TEST_F(ValueSerializerTest, RoundTripTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array.
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
Local<Value> value;
+ i::Handle<i::JSTypedArray> i_ta;
#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
value = RoundTripTest("new " #Type "Array(2)"); \
ASSERT_TRUE(value->Is##Type##Array()); \
EXPECT_EQ(2u * sizeof(ctype), TypedArray::Cast(*value)->ByteLength()); \
EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
- "Array.prototype");
+ "Array.prototype"); \
+ i_ta = v8::Utils::OpenHandle(TypedArray::Cast(*value)); \
+ EXPECT_EQ(false, i_ta->is_length_tracking()); \
+ EXPECT_EQ(false, i_ta->is_backed_by_rab());
TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
#undef TYPED_ARRAY_ROUND_TRIP_TEST
@@ -2066,6 +2104,56 @@ TEST_F(ValueSerializerTest, RoundTripTypedArray) {
ExpectScriptTrue("result.f32.length === 5");
}
+TEST_F(ValueSerializerTest, RoundTripRabBackedLengthTrackingTypedArray) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ // Check that the right type comes out the other side for every kind of typed
+ // array.
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ Local<Value> value;
+ i::Handle<i::JSTypedArray> i_ta;
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
+ value = RoundTripTest("new " #Type \
+ "Array(new ArrayBuffer(80, " \
+ "{maxByteLength: 160}))"); \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(80u, TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(80u / sizeof(ctype), TypedArray::Cast(*value)->Length()); \
+ ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
+ "Array.prototype"); \
+ i_ta = v8::Utils::OpenHandle(TypedArray::Cast(*value)); \
+ EXPECT_EQ(true, i_ta->is_length_tracking()); \
+ EXPECT_EQ(true, i_ta->is_backed_by_rab());
+
+ TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
+#undef TYPED_ARRAY_ROUND_TRIP_TEST
+}
+
+TEST_F(ValueSerializerTest, RoundTripRabBackedNonLengthTrackingTypedArray) {
+ FLAG_SCOPE(harmony_rab_gsab);
+ // Check that the right type comes out the other side for every kind of typed
+ // array.
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ Local<Value> value;
+ i::Handle<i::JSTypedArray> i_ta;
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
+ value = RoundTripTest("new " #Type \
+ "Array(new ArrayBuffer(80, " \
+ "{maxByteLength: 160}), 8, 4)"); \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(4u * sizeof(ctype), TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->Length()); \
+ ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
+ "Array.prototype"); \
+ i_ta = v8::Utils::OpenHandle(TypedArray::Cast(*value)); \
+ EXPECT_EQ(false, i_ta->is_length_tracking()); \
+ EXPECT_EQ(true, i_ta->is_backed_by_rab());
+
+ TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
+#undef TYPED_ARRAY_ROUND_TRIP_TEST
+}
+
TEST_F(ValueSerializerTest, DecodeTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array (version 14 and above).
@@ -2397,7 +2485,8 @@ TEST_F(ValueSerializerTest, RoundTripDataView) {
// TODO(v8:11111): Use API functions for testing is_length_tracking and
// is_backed_by_rab, once they're exposed
// via the API.
- i::Handle<i::JSDataView> i_dv = v8::Utils::OpenHandle(DataView::Cast(*value));
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv =
+ v8::Utils::OpenHandle(DataView::Cast(*value));
EXPECT_EQ(false, i_dv->is_length_tracking());
EXPECT_EQ(false, i_dv->is_backed_by_rab());
}
@@ -2416,6 +2505,42 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
});
}
+TEST_F(ValueSerializerTest, RoundTripRabBackedDataView) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ Local<Value> value = RoundTripTest(
+ "new DataView(new ArrayBuffer(4, {maxByteLength: 8}), 1, 2)");
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv =
+ v8::Utils::OpenHandle(DataView::Cast(*value));
+ EXPECT_EQ(false, i_dv->is_length_tracking());
+ EXPECT_EQ(true, i_dv->is_backed_by_rab());
+}
+
+TEST_F(ValueSerializerTest, RoundTripRabBackedLengthTrackingDataView) {
+ FLAG_SCOPE(harmony_rab_gsab);
+
+ Local<Value> value =
+ RoundTripTest("new DataView(new ArrayBuffer(4, {maxByteLength: 8}), 1)");
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(3u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
+ // TODO(v8:11111): Use API functions for testing is_length_tracking and
+ // is_backed_by_rab, once they're exposed via the API.
+ i::Handle<i::JSDataViewOrRabGsabDataView> i_dv =
+ v8::Utils::OpenHandle(DataView::Cast(*value));
+ EXPECT_EQ(true, i_dv->is_length_tracking());
+ EXPECT_EQ(true, i_dv->is_backed_by_rab());
+}
+
TEST_F(ValueSerializerTest, DecodeDataViewBackwardsCompatibility) {
DecodeTestUpToVersion(
13,
@@ -2633,6 +2758,47 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
ExpectScriptTrue(
"new Uint8Array(result.buffer, 0, 4).toString() === '0,1,128,255'");
}
+
+TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
+ RoundTripWebAssemblyMemory_WithPreviousReference) {
+ // This is a regression test for crbug.com/1421524.
+ // It ensures that WasmMemoryObject can deserialize even if its underlying
+ // buffer was already encountered, and so will be encoded with an object
+ // backreference.
+ std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xFF};
+ data.resize(65536);
+ InitializeData(data, true);
+
+ EXPECT_CALL(serializer_delegate_,
+ GetSharedArrayBufferId(isolate(), input_buffer()))
+ .WillRepeatedly(Return(Just(0U)));
+ EXPECT_CALL(deserializer_delegate_, GetSharedArrayBufferFromId(isolate(), 0U))
+ .WillRepeatedly(Return(output_buffer()));
+
+ Local<Value> input;
+ {
+ Context::Scope scope(serialization_context());
+ const int32_t kMaxPages = 1;
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*input_buffer());
+ i::Handle<i::WasmMemoryObject> wasm_memory =
+ i::WasmMemoryObject::New(i_isolate, buffer, kMaxPages)
+ .ToHandleChecked();
+ i::Handle<i::FixedArray> fixed_array =
+ i_isolate->factory()->NewFixedArray(2);
+ fixed_array->set(0, *buffer);
+ fixed_array->set(1, *wasm_memory);
+ input = Utils::ToLocal(i_isolate->factory()->NewJSArrayWithElements(
+ fixed_array, i::PACKED_ELEMENTS, 2));
+ }
+ RoundTripTest(input);
+ ExpectScriptTrue("result[0] instanceof SharedArrayBuffer");
+ ExpectScriptTrue("result[1] instanceof WebAssembly.Memory");
+ ExpectScriptTrue("result[0] === result[1].buffer");
+ ExpectScriptTrue("result[0].byteLength === 65536");
+ ExpectScriptTrue(
+ "new Uint8Array(result[0], 0, 4).toString() === '0,1,128,255'");
+}
#endif // V8_ENABLE_WEBASSEMBLY
TEST_F(ValueSerializerTest, UnsupportedHostObject) {
diff --git a/deps/v8/test/unittests/objects/weakmaps-unittest.cc b/deps/v8/test/unittests/objects/weakmaps-unittest.cc
index 9e839463dd..79f07aa938 100644
--- a/deps/v8/test/unittests/objects/weakmaps-unittest.cc
+++ b/deps/v8/test/unittests/objects/weakmaps-unittest.cc
@@ -58,6 +58,8 @@ TEST_F(WeakMapsTest, Weakness) {
v8_flags.incremental_marking = false;
Isolate* isolate = i_isolate();
Factory* factory = isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ isolate->heap());
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
GlobalHandles* global_handles = isolate->global_handles();
@@ -117,6 +119,8 @@ TEST_F(WeakMapsTest, Weakness) {
TEST_F(WeakMapsTest, Shrinking) {
Isolate* isolate = i_isolate();
Factory* factory = isolate->factory();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ isolate->heap());
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
@@ -329,6 +333,8 @@ TEST_F(WeakMapsTest, Regress399527) {
TEST_F(WeakMapsTest, WeakMapsWithChainedEntries) {
ManualGCScope manual_gc_scope(i_isolate());
v8::Isolate* isolate = v8_isolate();
+ DisableConservativeStackScanningScopeForTesting no_stack_scanning(
+ i_isolate()->heap());
v8::HandleScope scope(isolate);
const int initial_gc_count = i_isolate()->heap()->gc_count();
diff --git a/deps/v8/test/unittests/parser/decls-unittest.cc b/deps/v8/test/unittests/parser/decls-unittest.cc
index 5a0f0c1b89..8b12db12e3 100644
--- a/deps/v8/test/unittests/parser/decls-unittest.cc
+++ b/deps/v8/test/unittests/parser/decls-unittest.cc
@@ -145,8 +145,6 @@ void DeclarationContext::Check(const char* source, int get, int set, int query,
InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now
// to avoid that.
- i::ScanStackModeScopeForTesting no_stack_scanning(
- i_isolate()->heap(), i::Heap::ScanStackMode::kNone);
i_isolate()->heap()->CollectGarbage(i::NEW_SPACE,
i::GarbageCollectionReason::kTesting);
HandleScope scope(isolate_);
diff --git a/deps/v8/test/unittests/parser/parsing-unittest.cc b/deps/v8/test/unittests/parser/parsing-unittest.cc
index ac53e538fe..9048d7f1fa 100644
--- a/deps/v8/test/unittests/parser/parsing-unittest.cc
+++ b/deps/v8/test/unittests/parser/parsing-unittest.cc
@@ -2980,13 +2980,13 @@ TEST_F(ParsingTest, NoErrorsObjectLiteralChecking) {
TEST_F(ParsingTest, TooManyArguments) {
const char* context_data[][2] = {{"foo(", "0)"}, {nullptr, nullptr}};
- using v8::internal::Code;
- char statement[Code::kMaxArguments * 2 + 1];
- for (int i = 0; i < Code::kMaxArguments; ++i) {
+ using v8::internal::InstructionStream;
+ char statement[InstructionStream::kMaxArguments * 2 + 1];
+ for (int i = 0; i < InstructionStream::kMaxArguments; ++i) {
statement[2 * i] = '0';
statement[2 * i + 1] = ',';
}
- statement[Code::kMaxArguments * 2] = 0;
+ statement[InstructionStream::kMaxArguments * 2] = 0;
const char* statement_data[] = {statement, nullptr};
@@ -9059,9 +9059,9 @@ TEST_F(ParsingTest, ObjectRestNegativeTestSlow) {
{ nullptr, nullptr }
};
- using v8::internal::Code;
+ using v8::internal::InstructionStream;
std::string statement;
- for (int i = 0; i < Code::kMaxArguments; ++i) {
+ for (int i = 0; i < InstructionStream::kMaxArguments; ++i) {
statement += std::to_string(i) + " : " + "x, ";
}
statement += "...y";
diff --git a/deps/v8/test/unittests/regexp/regexp-unittest.cc b/deps/v8/test/unittests/regexp/regexp-unittest.cc
index 065eea336f..2b84c701ee 100644
--- a/deps/v8/test/unittests/regexp/regexp-unittest.cc
+++ b/deps/v8/test/unittests/regexp/regexp-unittest.cc
@@ -1653,6 +1653,7 @@ void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
++global_use_counts[feature];
}
+
} // namespace
using RegExpTestWithContext = TestWithContext;
@@ -2314,10 +2315,10 @@ TEST_F(RegExpTestWithContext, UnicodePropertyEscapeCodeSize) {
if (maybe_bytecode.IsByteArray()) {
// On x64, excessive inlining produced >250KB.
CHECK_LT(ByteArray::cast(maybe_bytecode).Size(), kMaxSize);
- } else if (maybe_code.IsCodeT()) {
+ } else if (maybe_code.IsCode()) {
// On x64, excessive inlining produced >360KB.
- CHECK_LT(FromCodeT(CodeT::cast(maybe_code)).Size(), kMaxSize);
- CHECK_EQ(FromCodeT(CodeT::cast(maybe_code)).kind(), CodeKind::REGEXP);
+ CHECK_LT(Code::cast(maybe_code).Size(), kMaxSize);
+ CHECK_EQ(Code::cast(maybe_code).kind(), CodeKind::REGEXP);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 6d95a2b7ac..053c81bcde 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -10,8 +10,10 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
+#include "test/unittests/heap/heap-utils.h"
namespace v8 {
@@ -53,7 +55,9 @@ IsolateWrapper::IsolateWrapper(CountersMode counters_mode)
IsolateWrapper::~IsolateWrapper() {
v8::Platform* platform = internal::V8::GetCurrentPlatform();
CHECK_NOT_NULL(platform);
+ isolate_->Enter();
while (platform::PumpMessageLoop(platform, isolate())) continue;
+ isolate_->Exit();
isolate_->Dispose();
if (counter_map_) {
CHECK_EQ(kCurrentCounterMap, counter_map_.get());
@@ -88,14 +92,7 @@ ManualGCScope::ManualGCScope(i::Isolate* isolate) {
// Some tests run threaded (back-to-back) and thus the GC may already be
// running by the time a ManualGCScope is created. Finalizing existing marking
// prevents any undefined/unexpected behavior.
- if (isolate && isolate->heap()->incremental_marking()->IsMarking()) {
- ScanStackModeScopeForTesting no_stack_scanning(isolate->heap(),
- Heap::ScanStackMode::kNone);
- isolate->heap()->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kTesting);
- // Make sure there is no concurrent sweeping running in the background.
- isolate->heap()->CompleteSweepingFull();
- }
+ FinalizeGCIfRunning(isolate);
i::v8_flags.concurrent_marking = false;
i::v8_flags.concurrent_sweeping = false;
@@ -105,6 +102,13 @@ ManualGCScope::ManualGCScope(i::Isolate* isolate) {
// Parallel marking has a dependency on concurrent marking.
i::v8_flags.parallel_marking = false;
i::v8_flags.detect_ineffective_gcs_near_heap_limit = false;
+ // CppHeap concurrent marking has a dependency on concurrent marking.
+ i::v8_flags.cppheap_concurrent_marking = false;
+
+ if (isolate && isolate->heap()->cpp_heap()) {
+ CppHeap::From(isolate->heap()->cpp_heap())
+ ->ReduceGCCapabilitiesFromFlagsForTesting();
+ }
}
} // namespace internal
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 7860ed5815..523cddfe02 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -38,7 +38,7 @@ class WithDefaultPlatformMixin : public TMixin {
platform_ = v8::platform::NewDefaultPlatform(
0, v8::platform::IdleTaskSupport::kEnabled);
CHECK_NOT_NULL(platform_.get());
- v8::V8::InitializePlatform(platform_.get());
+ i::V8::InitializePlatformForTesting(platform_.get());
// Allow changing flags in unit tests.
// TODO(12887): Fix tests to avoid changing flag values after
// initialization.
@@ -165,7 +165,6 @@ class WithIsolateScopeMixin : public TMixin {
static MaybeLocal<Value> TryRunJS(Local<Context> context,
Local<String> source) {
- v8::Local<v8::Value> result;
Local<Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
return script->Run(context);
@@ -187,37 +186,25 @@ class WithIsolateScopeMixin : public TMixin {
}
// By default, the GC methods do not scan the stack conservatively.
- void CollectGarbage(
- i::AllocationSpace space, i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void CollectGarbage(i::AllocationSpace space, i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
- void CollectAllGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void CollectAllGarbage(i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
- void CollectAllAvailableGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void CollectAllAvailableGarbage(i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kTesting);
}
- void PreciseCollectAllGarbage(
- i::Isolate* isolate = nullptr,
- i::Heap::ScanStackMode mode = i::Heap::ScanStackMode::kNone) {
+ void PreciseCollectAllGarbage(i::Isolate* isolate = nullptr) {
i::Isolate* iso = isolate ? isolate : i_isolate();
- i::ScanStackModeScopeForTesting scope(iso->heap(), mode);
iso->heap()->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
@@ -586,18 +573,7 @@ class FeedbackVectorHelper {
template <typename Spec>
Handle<FeedbackVector> NewFeedbackVector(Isolate* isolate, Spec* spec) {
- Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate, spec);
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfoForBuiltin(
- isolate->factory()->empty_string(), Builtin::kIllegal);
- // Set the raw feedback metadata to circumvent checks that we are not
- // overwriting existing metadata.
- shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
- ClosureFeedbackCellArray::New(isolate, shared);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
- return FeedbackVector::New(isolate, shared, closure_feedback_cell_array,
- &is_compiled_scope);
+ return FeedbackVector::NewForTesting(isolate, spec);
}
class ParkingThread : public v8::base::Thread {
diff --git a/deps/v8/test/unittests/testcfg.py b/deps/v8/test/unittests/testcfg.py
index 43ec0c2b11..6863c3e216 100644
--- a/deps/v8/test/unittests/testcfg.py
+++ b/deps/v8/test/unittests/testcfg.py
@@ -10,15 +10,27 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
+ADDITIONAL_VARIANTS = set(["minor_mc"])
+
+
class VariantsGenerator(testsuite.VariantsGenerator):
+
+ def __init__(self, variants):
+ super().__init__(variants)
+ self._supported_variants = self._standard_variant + [
+ v for v in variants if v in ADDITIONAL_VARIANTS
+ ]
+
def _get_variants(self, test):
- return self._standard_variant
+ if test.only_standard_variant:
+ return self._standard_variant
+ return self._supported_variants
class TestLoader(testsuite.TestLoader):
def _list_test_filenames(self):
shell = os.path.abspath(
- os.path.join(self.test_config.shell_dir, "unittests"))
+ os.path.join(self.test_config.shell_dir, "v8_unittests"))
if utils.IsWindows():
shell += ".exe"
@@ -76,9 +88,9 @@ class TestCase(testcase.TestCase):
)
def get_shell(self):
- return self.suite.name
+ return 'v8_' + self.suite.name
- def _get_resources(self):
+ def get_android_resources(self):
# Bytecode-generator tests are the only ones requiring extra files on
# Android.
parts = self.name.split('.')
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 5299766823..446d2345a8 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -84,7 +84,7 @@ type string constexpr 'const char*';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
-type Code extends HeapObject generates 'TNode<Code>';
+type InstructionStream extends HeapObject generates 'TNode<InstructionStream>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
type NativeContext extends Context;
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index ba61394ed1..ffe19b464c 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -32,6 +32,9 @@
# Slow tests.
'ParsingTest.ObjectRestNegativeTestSlow': [PASS, ['mode == debug', SKIP]],
+
+ # BUG(13660): Flaky test.
+ 'OS.RemapPages': [SKIP],
}], # ALWAYS
##############################################################################
@@ -192,6 +195,7 @@
'LogInterpretedFramesNativeStackWithSerializationTest.LogInterpretedFramesNativeStackWithSerialization': [SKIP],
}], # lite_mode
+##############################################################################
['variant == jitless', {
# --interpreted-frames-native-stack tests
'LogExternalInterpretedFramesNativeStackTest.ExternalLogEventListenerWithInterpretedFramesNativeStack': [SKIP],
@@ -200,6 +204,25 @@
'InterpreterTest.InterpreterWithNativeStack': [SKIP],
}], # jitless
+##############################################################################
+['jitless_build_mode', {
+ # Feedback collection maintenance is (mostly) disabled.
+ 'FeedbackVectorTest.Vector*': [SKIP],
+ 'InterpreterTest.InterpreterBigIntComparisons': [SKIP],
+ 'InterpreterTest.InterpreterBinaryOpSmiTypeFeedback': [SKIP],
+ 'InterpreterTest.InterpreterUnaryOpFeedback': [SKIP],
+ 'InterpreterTest.InterpreterStringComparisons': [SKIP],
+ 'InterpreterTest.InterpreterSmiComparisons': [SKIP],
+ 'InterpreterTest.InterpreterStringAdd': [SKIP],
+ 'InterpreterTest.InterpreterMixedComparisons': [SKIP],
+ 'InterpreterTest.InterpreterHeapNumberComparisons': [SKIP],
+ 'InterpreterTest.InterpreterBinaryOpsBigInt': [SKIP],
+ 'InterpreterTest.InterpreterBinaryOpTypeFeedback': [SKIP],
+ 'InterpreterTest.InterpreterBitwiseTypeFeedback': [SKIP],
+ # These require executable code space.
+ 'AssemblerX64Test.*': [SKIP],
+}], # jitless_build_mode
+
################################################################################
['third_party_heap', {
# Tests on OptimizingCompileDispatcher
@@ -243,7 +266,6 @@
'WeakMapsTest.WeakMapsWithChainedEntries': [SKIP],
'WeakMapsTest.Weakness': [SKIP],
'WeakSetsTest.WeakSet_Weakness': [SKIP],
- 'WebSnapshotTest.SFIDeduplicationAfterBytecodeFlushing': [SKIP],
# CodeRange tests
'CodePagesTest.LargeCodeObjectWithSignalHandler': [SKIP],
@@ -261,13 +283,6 @@
'FactoryCodeBuilderOOMTest.Factory_CodeBuilder_TryBuildOOM': [SKIP],
}], # third_party_heap
-################################################################################
-['variant == always_sparkplug', {
- # SFI deduplication tests check compilation state, which always_sparkplug
- # can break.
- 'WebSnapshotTest.SFIDeduplication*': [SKIP],
-}],
-
##############################################################################
['byteorder == big', {
# Peephole optimization not supported on big-endian machines.
@@ -337,4 +352,8 @@
'WasmDisassemblerTest.Simd': [SKIP],
}], # no_simd_hardware == True
+['tsan and mode == debug', {
+ 'LazyCompileDispatcherTest.CompileLazy2FinishesDispatcherJob': [SKIP],
+}]
+
]
diff --git a/deps/v8/test/unittests/utils/identity-map-unittest.cc b/deps/v8/test/unittests/utils/identity-map-unittest.cc
index f61559f750..281f2d9ecc 100644
--- a/deps/v8/test/unittests/utils/identity-map-unittest.cc
+++ b/deps/v8/test/unittests/utils/identity-map-unittest.cc
@@ -788,6 +788,7 @@ TEST_F(IdentityMapTest, GCShortCutting) {
if (v8_flags.single_generation) return;
// We don't create ThinStrings immediately when using the forwarding table.
if (v8_flags.always_use_string_forwarding_table) return;
+ v8_flags.shortcut_strings_with_stack = true;
ManualGCScope manual_gc_scope(isolate());
IdentityMapTester t(isolate()->heap(), zone());
Factory* factory = isolate()->factory();
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 26d37fe7d6..3e43729e0e 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -20,54 +20,52 @@ class DecoderTest : public TestWithZone {
Decoder decoder;
};
-#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(static_cast<uint32_t>(expected), \
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), \
- &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i32v<Decoder::FullValidationTag>( \
- decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(expected, decoder.consume_i32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_i32v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(expected, value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(expected, decoder.consume_i32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(static_cast<uint64_t>(expected), \
- decoder.read_u64v<Decoder::FullValidationTag>(decoder.start(), \
- &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_u64v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(static_cast<uint64_t>(expected), value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
-#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i64v<Decoder::FullValidationTag>( \
- decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ auto [value, length] = \
+ decoder.read_i64v<Decoder::FullValidationTag>(decoder.start()); \
+ EXPECT_EQ(expected, value); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
TEST_F(DecoderTest, ReadU32v_OneByte) {
@@ -377,18 +375,16 @@ TEST_F(DecoderTest, ReadI32v_FiveByte) {
TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
- unsigned length = 0;
decoder.Reset(data, data);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
TEST_F(DecoderTest, ReadU32v_off_end2) {
static const byte data[] = {U32V_2(1111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -396,9 +392,8 @@ TEST_F(DecoderTest, ReadU32v_off_end2) {
TEST_F(DecoderTest, ReadU32v_off_end3) {
static const byte data[] = {U32V_3(111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -406,9 +401,8 @@ TEST_F(DecoderTest, ReadU32v_off_end3) {
TEST_F(DecoderTest, ReadU32v_off_end4) {
static const byte data[] = {U32V_4(11111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -416,9 +410,8 @@ TEST_F(DecoderTest, ReadU32v_off_end4) {
TEST_F(DecoderTest, ReadU32v_off_end5) {
static const byte data[] = {U32V_5(111111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
@@ -427,29 +420,27 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 16; i++) {
data[4] = static_cast<byte>(i << 4);
- unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ auto [result, length] =
+ decoder.read_i32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_EQ(5u, length);
EXPECT_TRUE(decoder.ok());
}
TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_i32v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
@@ -483,9 +474,8 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
// foreach buffer size 0...5
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned rlen;
- uint32_t result =
- decoder.read_u32v<Decoder::FullValidationTag>(data, &rlen);
+ auto [result, rlen] =
+ decoder.read_u32v<Decoder::FullValidationTag>(data);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -540,9 +530,8 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned length;
- uint64_t result =
- decoder.read_u64v<Decoder::FullValidationTag>(data, &length);
+ auto [result, length] =
+ decoder.read_u64v<Decoder::FullValidationTag>(data);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -582,9 +571,8 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
// foreach buffer size 0...10
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned rlen;
- uint64_t result =
- decoder.read_u64v<Decoder::FullValidationTag>(data, &rlen);
+ auto [result, rlen] =
+ decoder.read_u64v<Decoder::FullValidationTag>(data);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -626,9 +614,8 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
// foreach buffer size 0...10
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- unsigned rlen;
- int64_t result =
- decoder.read_i64v<Decoder::FullValidationTag>(data, &rlen);
+ auto [result, rlen] =
+ decoder.read_i64v<Decoder::FullValidationTag>(data);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -645,29 +632,27 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 128; i++) {
data[9] = static_cast<byte>(i << 1);
- unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u64v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_u64v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
}
TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<Decoder::FullValidationTag>(decoder.start(), &length);
+ auto [result, length] =
+ decoder.read_i64v<Decoder::FullValidationTag>(decoder.start());
EXPECT_EQ(10u, length);
EXPECT_TRUE(decoder.ok());
}
TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<Decoder::FullValidationTag>(decoder.start(), &length);
+ decoder.read_i64v<Decoder::FullValidationTag>(decoder.start());
EXPECT_FALSE(decoder.ok());
}
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index c55f3cb660..b91df08e9a 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -75,9 +75,11 @@ enum MemoryType { kMemory32, kMemory64 };
// globals, or memories.
class TestModuleBuilder {
public:
- explicit TestModuleBuilder(ModuleOrigin origin = kWasmOrigin)
- : allocator(), mod(std::make_unique<Zone>(&allocator, ZONE_NAME)) {
- mod.origin = origin;
+ explicit TestModuleBuilder(ModuleOrigin origin = kWasmOrigin) : mod(origin) {
+ mod.num_declared_functions = 1;
+ mod.validated_functions = std::make_unique<std::atomic<uint8_t>[]>(1);
+ // Asm.js functions are valid by design.
+ if (is_asmjs_module(&mod)) mod.validated_functions[0] = 0xff;
}
byte AddGlobal(ValueType type, bool mutability = true) {
mod.globals.push_back({type, mutability, {}, {0}, false, false});
@@ -85,7 +87,7 @@ class TestModuleBuilder {
return static_cast<byte>(mod.globals.size() - 1);
}
byte AddSignature(const FunctionSig* sig, uint32_t supertype = kNoSuperType) {
- mod.add_signature(sig, supertype);
+ mod.add_signature(sig, supertype, v8_flags.wasm_final_types);
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
@@ -105,7 +107,7 @@ class TestModuleBuilder {
return result;
}
byte AddException(WasmTagSig* sig) {
- mod.tags.emplace_back(sig);
+ mod.tags.emplace_back(sig, AddSignature(sig));
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.tags.size() - 1);
}
@@ -124,19 +126,20 @@ class TestModuleBuilder {
byte AddStruct(std::initializer_list<F> fields,
uint32_t supertype = kNoSuperType) {
- StructType::Builder type_builder(mod.signature_zone.get(),
+ StructType::Builder type_builder(&mod.signature_zone,
static_cast<uint32_t>(fields.size()));
for (F field : fields) {
type_builder.AddField(field.first, field.second);
}
- mod.add_struct_type(type_builder.Build(), supertype);
+ mod.add_struct_type(type_builder.Build(), supertype,
+ v8_flags.wasm_final_types);
GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
byte AddArray(ValueType type, bool mutability) {
- ArrayType* array = mod.signature_zone->New<ArrayType>(type, mutability);
- mod.add_array_type(array, kNoSuperType);
+ ArrayType* array = mod.signature_zone.New<ArrayType>(type, mutability);
+ mod.add_array_type(array, kNoSuperType, v8_flags.wasm_final_types);
GetTypeCanonicalizer()->AddRecursiveGroup(module(), 1);
return static_cast<byte>(mod.types.size() - 1);
}
@@ -156,14 +159,14 @@ class TestModuleBuilder {
byte AddPassiveElementSegment(wasm::ValueType type) {
mod.elem_segments.emplace_back(type, WasmElemSegment::kStatusPassive,
- WasmElemSegment::kExpressionElements);
+ WasmElemSegment::kExpressionElements, 0, 0);
return static_cast<byte>(mod.elem_segments.size() - 1);
}
byte AddDeclarativeElementSegment() {
mod.elem_segments.emplace_back(kWasmFuncRef,
WasmElemSegment::kStatusDeclarative,
- WasmElemSegment::kExpressionElements);
+ WasmElemSegment::kExpressionElements, 0, 0);
return static_cast<byte>(mod.elem_segments.size() - 1);
}
@@ -191,7 +194,6 @@ class TestModuleBuilder {
return static_cast<byte>(mod.functions.size() - 1);
}
- AccountingAllocator allocator;
WasmModule mod;
};
@@ -260,9 +262,8 @@ class FunctionBodyDecoderTestBase : public WithZoneMixin<BaseTest> {
// Validate the code.
FunctionBody body(sig, 0, code.begin(), code.end());
WasmFeatures unused_detected_features = WasmFeatures::None();
- DecodeResult result =
- ValidateFunctionBody(this->zone()->allocator(), enabled_features_,
- module, &unused_detected_features, body);
+ DecodeResult result = ValidateFunctionBody(enabled_features_, module,
+ &unused_detected_features, body);
std::ostringstream str;
if (result.failed()) {
@@ -1718,7 +1719,7 @@ TEST_F(FunctionBodyDecoderTest, ReturnCallWithSubtype) {
WASM_FEATURE_SCOPE(return_call);
auto sig = MakeSig::Returns(kWasmAnyRef);
- auto callee_sig = MakeSig::Returns(kWasmAnyNonNullableRef);
+ auto callee_sig = MakeSig::Returns(kWasmAnyRef.AsNonNull());
builder.AddFunction(&callee_sig);
ExpectValidates(&sig, {WASM_RETURN_CALL_FUNCTION0(0)});
@@ -2354,97 +2355,6 @@ TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
ExpectFailure(sigs.i_d(), code);
}
-TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
- ValueType float32int32float32[] = {kWasmF32, kWasmI32, kWasmF32};
- FunctionSig sig_f_if(1, 2, float32int32float32);
- ValueType float64int32float64[] = {kWasmF64, kWasmI32, kWasmF64};
- FunctionSig sig_d_id(1, 2, float64int32float64);
- struct {
- WasmOpcode op;
- const FunctionSig* sig;
- } AsmJsBinOps[] = {
- {kExprF64Atan2, sigs.d_dd()},
- {kExprF64Pow, sigs.d_dd()},
- {kExprF64Mod, sigs.d_dd()},
- {kExprI32AsmjsDivS, sigs.i_ii()},
- {kExprI32AsmjsDivU, sigs.i_ii()},
- {kExprI32AsmjsRemS, sigs.i_ii()},
- {kExprI32AsmjsRemU, sigs.i_ii()},
- {kExprI32AsmjsStoreMem8, sigs.i_ii()},
- {kExprI32AsmjsStoreMem16, sigs.i_ii()},
- {kExprI32AsmjsStoreMem, sigs.i_ii()},
- {kExprF32AsmjsStoreMem, &sig_f_if},
- {kExprF64AsmjsStoreMem, &sig_d_id},
- };
-
- {
- TestModuleBuilder builder(kAsmJsSloppyOrigin);
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
- TestBinop(AsmJsBinOps[i].op, AsmJsBinOps[i].sig);
- }
- }
-
- {
- TestModuleBuilder builder;
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
- ExpectFailure(AsmJsBinOps[i].sig,
- {WASM_BINOP(AsmJsBinOps[i].op, WASM_LOCAL_GET(0),
- WASM_LOCAL_GET(1))});
- }
- }
-}
-
-TEST_F(FunctionBodyDecoderTest, AsmJsUnOpsCheckOrigin) {
- ValueType float32int32[] = {kWasmF32, kWasmI32};
- FunctionSig sig_f_i(1, 1, float32int32);
- ValueType float64int32[] = {kWasmF64, kWasmI32};
- FunctionSig sig_d_i(1, 1, float64int32);
- struct {
- WasmOpcode op;
- const FunctionSig* sig;
- } AsmJsUnOps[] = {{kExprF64Acos, sigs.d_d()},
- {kExprF64Asin, sigs.d_d()},
- {kExprF64Atan, sigs.d_d()},
- {kExprF64Cos, sigs.d_d()},
- {kExprF64Sin, sigs.d_d()},
- {kExprF64Tan, sigs.d_d()},
- {kExprF64Exp, sigs.d_d()},
- {kExprF64Log, sigs.d_d()},
- {kExprI32AsmjsLoadMem8S, sigs.i_i()},
- {kExprI32AsmjsLoadMem8U, sigs.i_i()},
- {kExprI32AsmjsLoadMem16S, sigs.i_i()},
- {kExprI32AsmjsLoadMem16U, sigs.i_i()},
- {kExprI32AsmjsLoadMem, sigs.i_i()},
- {kExprF32AsmjsLoadMem, &sig_f_i},
- {kExprF64AsmjsLoadMem, &sig_d_i},
- {kExprI32AsmjsSConvertF32, sigs.i_f()},
- {kExprI32AsmjsUConvertF32, sigs.i_f()},
- {kExprI32AsmjsSConvertF64, sigs.i_d()},
- {kExprI32AsmjsUConvertF64, sigs.i_d()}};
- {
- TestModuleBuilder builder(kAsmJsSloppyOrigin);
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
- TestUnop(AsmJsUnOps[i].op, AsmJsUnOps[i].sig);
- }
- }
-
- {
- TestModuleBuilder builder;
- module = builder.module();
- builder.InitializeMemory();
- for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
- ExpectFailure(AsmJsUnOps[i].sig,
- {WASM_UNOP(AsmJsUnOps[i].op, WASM_LOCAL_GET(0))});
- }
- }
-}
-
TEST_F(FunctionBodyDecoderTest, BreakEnd) {
ExpectValidates(
sigs.i_i(),
@@ -3289,9 +3199,8 @@ TEST_F(FunctionBodyDecoderTest, Regression709741) {
for (size_t i = 0; i < arraysize(code); ++i) {
FunctionBody body(sigs.v_v(), 0, code, code + i);
WasmFeatures unused_detected_features;
- DecodeResult result =
- ValidateFunctionBody(this->zone()->allocator(), WasmFeatures::All(),
- nullptr, &unused_detected_features, body);
+ DecodeResult result = ValidateFunctionBody(WasmFeatures::All(), module,
+ &unused_detected_features, body);
if (result.ok()) {
std::ostringstream str;
str << "Expected verification to fail";
@@ -4361,6 +4270,10 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
WASM_HEAP_TYPE(to_heap))});
ExpectValidates(&cast_sig, {WASM_REF_CAST(WASM_LOCAL_GET(0),
WASM_HEAP_TYPE(to_heap))});
+ ExpectValidates(&test_sig, {WASM_REF_TEST_NULL(WASM_LOCAL_GET(0),
+ WASM_HEAP_TYPE(to_heap))});
+ ExpectValidates(&cast_sig, {WASM_REF_CAST_NULL(WASM_LOCAL_GET(0),
+ WASM_HEAP_TYPE(to_heap))});
} else {
std::string error_message =
"local.get of type " + cast_reps[1].name() +
@@ -4374,6 +4287,16 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
{WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_HEAP_TYPE(to_heap))},
kAppendEnd,
("Invalid types for ref.cast: " + error_message).c_str());
+ ExpectFailure(
+ &test_sig,
+ {WASM_REF_TEST_NULL(WASM_LOCAL_GET(0), WASM_HEAP_TYPE(to_heap))},
+ kAppendEnd,
+ ("Invalid types for ref.test null: " + error_message).c_str());
+ ExpectFailure(
+ &cast_sig,
+ {WASM_REF_CAST_NULL(WASM_LOCAL_GET(0), WASM_HEAP_TYPE(to_heap))},
+ kAppendEnd,
+ ("Invalid types for ref.cast null: " + error_message).c_str());
}
}
@@ -4407,6 +4330,7 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
byte super_struct = builder.AddStruct({F(kWasmI16, true)});
byte sub_struct =
builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)}, super_struct);
+ byte fct_type = builder.AddSignature(sigs.i_i(), kNoSuperType);
ValueType supertype = ValueType::RefNull(super_struct);
ValueType subtype = ValueType::RefNull(sub_struct);
@@ -4422,6 +4346,10 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
ExpectValidates(
FunctionSig::Build(this->zone(), {kWasmI32, supertype}, {supertype}),
{WASM_I32V(42), WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct)});
+ ExpectValidates(
+ FunctionSig::Build(this->zone(), {kWasmI32, supertype}, {supertype}),
+ {WASM_I32V(42), WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct)});
// Wrong branch type.
ExpectFailure(
@@ -4433,6 +4361,11 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
{WASM_I32V(42), WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct)},
kAppendEnd,
"type error in branch[0] (expected (ref null 1), got (ref null 0))");
+ ExpectFailure(FunctionSig::Build(this->zone(), {subtype}, {supertype}),
+ {WASM_I32V(42), WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct)},
+ kAppendEnd,
+ "type error in branch[0] (expected (ref null 1), got (ref 0))");
// Wrong fallthrough type.
ExpectFailure(
@@ -4443,20 +4376,44 @@ TEST_F(FunctionBodyDecoderTest, BrOnCastOrCastFail) {
FunctionSig::Build(this->zone(), {supertype}, {supertype}),
{WASM_BLOCK_I(WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct))},
kAppendEnd, "type error in branch[0] (expected i32, got (ref null 0))");
+ ExpectFailure(FunctionSig::Build(this->zone(), {supertype}, {supertype}),
+ {WASM_BLOCK_I(WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct))},
+ kAppendEnd,
+ "type error in branch[0] (expected i32, got (ref 0))");
// Argument type error.
+ ExpectFailure(FunctionSig::Build(this->zone(), {subtype}, {kWasmExternRef}),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST(0, sub_struct),
+ WASM_GC_OP(kExprRefCast), sub_struct},
+ kAppendEnd,
+ "Invalid types for br_on_cast: local.get of type externref has "
+ "to be in the same reference type hierarchy as (ref 1)");
ExpectFailure(
FunctionSig::Build(this->zone(), {subtype}, {kWasmExternRef}),
- {WASM_LOCAL_GET(0), WASM_BR_ON_CAST(0, sub_struct),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST_NULL(0, sub_struct),
WASM_GC_OP(kExprRefCast), sub_struct},
kAppendEnd,
- "br_on_cast[0] expected subtype of (ref null func), (ref null struct) or "
- "(ref null array), found local.get of type externref");
+ "Invalid types for br_on_cast null: local.get of type externref has "
+ "to be in the same reference type hierarchy as (ref 1)");
ExpectFailure(
FunctionSig::Build(this->zone(), {supertype}, {kWasmExternRef}),
{WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL(0, sub_struct)}, kAppendEnd,
- "br_on_cast_fail[0] expected subtype of (ref null func), (ref null "
- "struct) or (ref null array), found local.get of type externref");
+ "Invalid types for br_on_cast_fail: local.get of type externref has to "
+ "be in the same reference type hierarchy as (ref 1)");
+ ExpectFailure(
+ FunctionSig::Build(this->zone(), {supertype}, {kWasmExternRef}),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST_FAIL_NULL(0, sub_struct)}, kAppendEnd,
+ "Invalid types for br_on_cast_fail null: local.get of type "
+ "externref has to be in the same reference type hierarchy as (ref 1)");
+
+ // Cast between types of different type hierarchies is invalid.
+ ExpectFailure(
+ FunctionSig::Build(this->zone(), {subtype}, {supertype}),
+ {WASM_LOCAL_GET(0), WASM_BR_ON_CAST(0, fct_type), WASM_UNREACHABLE},
+ kAppendEnd,
+ "Invalid types for br_on_cast: local.get of type (ref null 0) has "
+ "to be in the same reference type hierarchy as (ref 2)");
}
TEST_F(FunctionBodyDecoderTest, BrOnAbstractType) {
@@ -4817,7 +4774,7 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
ExpectLength(2, kExprGlobalSet);
ExpectLength(2, kExprCallFunction);
ExpectLength(3, kExprCallIndirect);
- ExpectLength(3, kExprSelectWithType, 1);
+ ExpectLength(3, kExprSelectWithType, 1, kI32Code);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
@@ -4933,7 +4890,7 @@ TEST_F(WasmOpcodeLengthTest, IllegalRefIndices) {
TEST_F(WasmOpcodeLengthTest, GCOpcodes) {
// br_on_cast{,_fail}: prefix + opcode + br_depth + type_index
- ExpectLength(4, 0xfb, kExprBrOnCast & 0xFF);
+ ExpectLength(4, 0xfb, kExprBrOnCastDeprecated & 0xFF);
ExpectLength(4, 0xfb, kExprBrOnCastFail & 0xFF);
// struct.new, with leb immediate operand.
@@ -4963,9 +4920,10 @@ class TypeReaderTest : public TestWithZone {
public:
HeapType DecodeHeapType(const byte* start, const byte* end) {
Decoder decoder(start, end);
- uint32_t length;
- return value_type_reader::read_heap_type<Decoder::FullValidationTag>(
- &decoder, start, &length, enabled_features_);
+ auto [heap_type, length] =
+ value_type_reader::read_heap_type<Decoder::FullValidationTag>(
+ &decoder, start, enabled_features_);
+ return heap_type;
}
// This variable is modified by WASM_FEATURE_SCOPE.
@@ -5025,7 +4983,6 @@ TEST_F(TypeReaderTest, HeapTypeDecodingTest) {
class LocalDeclDecoderTest : public TestWithZone {
public:
- v8::internal::AccountingAllocator allocator;
WasmFeatures enabled_features_;
size_t ExpectRun(ValueType* local_types, size_t pos, ValueType expected,
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index 362db55f14..71faa6f3d5 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -97,9 +97,8 @@ TEST_F(LEBHelperTest, sizeof_i32v) {
EXPECT_EQ(LEBHelper::sizeof_##name(val), \
static_cast<size_t>(ptr - buffer)); \
Decoder decoder(buffer, buffer + kSize); \
- unsigned length = 0; \
- ctype result = \
- decoder.read_##name<Decoder::NoValidationTag>(buffer, &length); \
+ auto [result, length] = \
+ decoder.read_##name<Decoder::NoValidationTag>(buffer); \
EXPECT_EQ(val, result); \
EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
}
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 65945932ae..2e7c75c1c9 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -26,8 +26,10 @@ class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
TestSignatures sigs;
uint32_t num_locals;
- BitVector* Analyze(const byte* start, const byte* end) {
- return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end);
+ BitVector* Analyze(const byte* start, const byte* end,
+ bool* loop_is_innermost = nullptr) {
+ return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end,
+ loop_is_innermost);
}
};
@@ -175,6 +177,29 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
}
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, NestedLoop) {
+ num_locals = 5;
+ byte code[] = {WASM_LOOP(WASM_LOOP(WASM_LOCAL_SET(0, 1)))};
+
+ bool outer_is_innermost = false;
+ BitVector* outer_assigned =
+ Analyze(code, code + arraysize(code), &outer_is_innermost);
+ for (int j = 0; j < outer_assigned->length(); j++) {
+ bool expected = j == 0;
+ EXPECT_EQ(expected, outer_assigned->Contains(j));
+ }
+ EXPECT_FALSE(outer_is_innermost);
+
+ bool inner_is_innermost = false;
+ BitVector* inner_assigned =
+ Analyze(code + 2, code + arraysize(code), &inner_is_innermost);
+ for (int j = 0; j < inner_assigned->length(); j++) {
+ bool expected = j == 0;
+ EXPECT_EQ(expected, inner_assigned->Contains(j));
+ }
+ EXPECT_TRUE(inner_is_innermost);
+}
+
TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
byte code[] = {kExprLoop, kVoidCode, kExprF32Neg, kExprBrTable, 0x0E, 'h',
'e', 'l', 'l', 'o', ',', ' ',
diff --git a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
index 7ae062709d..120197bba0 100644
--- a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
+++ b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
@@ -21,15 +21,11 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock-matchers.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
+namespace v8::internal::wasm {
enum MemoryProtectionMode {
kNoProtection,
kPku,
- kMprotect,
- kPkuWithMprotectFallback
};
const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
@@ -38,10 +34,6 @@ const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
return "NoProtection";
case kPku:
return "Pku";
- case kMprotect:
- return "Mprotect";
- case kPkuWithMprotectFallback:
- return "PkuWithMprotectFallback";
}
}
@@ -50,15 +42,10 @@ class MemoryProtectionTest : public TestWithNativeContext {
void Initialize(MemoryProtectionMode mode) {
v8_flags.wasm_lazy_compilation = false;
mode_ = mode;
- bool enable_pku = mode == kPku || mode == kPkuWithMprotectFallback;
- v8_flags.wasm_memory_protection_keys = enable_pku;
+ v8_flags.wasm_memory_protection_keys = (mode == kPku);
// The key is initially write-protected.
CHECK_IMPLIES(WasmCodeManager::HasMemoryProtectionKeySupport(),
!WasmCodeManager::MemoryProtectionKeyWritable());
-
- bool enable_mprotect =
- mode == kMprotect || mode == kPkuWithMprotectFallback;
- v8_flags.wasm_write_protect_code_memory = enable_mprotect;
}
void CompileModule() {
@@ -72,11 +59,7 @@ class MemoryProtectionTest : public TestWithNativeContext {
WasmCode* code() const { return code_; }
bool code_is_protected() {
- return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku() || uses_mprotect();
- }
-
- void MakeCodeWritable() {
- native_module_->MakeWritable(base::AddressRegionOf(code_->instructions()));
+ return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku();
}
void WriteToCode() { code_->instructions()[0] = 0; }
@@ -87,28 +70,18 @@ class MemoryProtectionTest : public TestWithNativeContext {
WriteToCode();
return;
}
- // Tier-up might be running and unprotecting the code region temporarily (if
- // using mprotect). In that case, repeatedly write to the code region to
- // make us eventually crash.
ASSERT_DEATH_IF_SUPPORTED(
- do {
+ {
WriteToCode();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
- } while (uses_mprotect()),
+ },
"");
}
- bool uses_mprotect() {
- // M1 always uses MAP_JIT.
- if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
- return mode_ == kMprotect ||
- (mode_ == kPkuWithMprotectFallback && !uses_pku());
- }
-
bool uses_pku() {
// M1 always uses MAP_JIT.
if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
- bool param_has_pku = mode_ == kPku || mode_ == kPkuWithMprotectFallback;
+ bool param_has_pku = mode_ == kPku;
return param_has_pku && WasmCodeManager::HasMemoryProtectionKeySupport();
}
@@ -121,11 +94,8 @@ class MemoryProtectionTest : public TestWithNativeContext {
SECTION(Code, ENTRY_COUNT(1), ADD_COUNT(0 /* locals */, kExprEnd))};
ModuleResult result =
- DecodeWasmModule(WasmFeatures::All(), std::begin(module_bytes),
- std::end(module_bytes), false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(),
- DecodingMethod::kSync, GetWasmEngine()->allocator());
+ DecodeWasmModule(WasmFeatures::All(), base::ArrayVector(module_bytes),
+ false, kWasmOrigin);
CHECK(result.ok());
ErrorThrower thrower(isolate(), "");
@@ -160,8 +130,7 @@ std::string PrintMemoryProtectionTestParam(
}
INSTANTIATE_TEST_SUITE_P(MemoryProtection, ParameterizedMemoryProtectionTest,
- ::testing::Values(kNoProtection, kPku, kMprotect,
- kPkuWithMprotectFallback),
+ ::testing::Values(kNoProtection, kPku),
PrintMemoryProtectionTestParam);
TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
@@ -172,7 +141,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
TEST_P(ParameterizedMemoryProtectionTest, CodeWritableWithinScope) {
CompileModule();
CodeSpaceWriteScope write_scope(native_module());
- MakeCodeWritable();
WriteToCode();
}
@@ -180,7 +148,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterScope) {
CompileModule();
{
CodeSpaceWriteScope write_scope(native_module());
- MakeCodeWritable();
WriteToCode();
}
AssertCodeEventuallyProtected();
@@ -267,8 +234,7 @@ std::string PrintMemoryProtectionAndSignalHandlingTestParam(
INSTANTIATE_TEST_SUITE_P(
MemoryProtection, ParameterizedMemoryProtectionTestWithSignalHandling,
- ::testing::Combine(::testing::Values(kNoProtection, kPku, kMprotect,
- kPkuWithMprotectFallback),
+ ::testing::Combine(::testing::Values(kNoProtection, kPku),
::testing::Bool(), ::testing::Bool()),
PrintMemoryProtectionAndSignalHandlingTestParam);
@@ -306,16 +272,12 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
// second parameter, and not a matcher as {ASSERT_DEATH}.
#if GTEST_HAS_DEATH_TEST
ASSERT_DEATH(
- // The signal handler should crash, but it might "accidentally"
- // succeed if tier-up is running in the background and using mprotect
- // to unprotect the code for the whole process. In that case we
- // repeatedly send the signal until we crash.
- do {
+ {
base::Optional<CodeSpaceWriteScope> write_scope;
if (open_write_scope) write_scope.emplace(native_module());
pthread_kill(pthread_self(), SIGPROF);
base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
- } while (uses_mprotect()), // Only loop for mprotect.
+ },
// Check that the subprocess tried to write, but did not succeed.
::testing::AnyOf(
// non-sanitizer builds:
@@ -343,6 +305,4 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
}
#endif // V8_OS_POSIX && !V8_OS_FUCHSIA
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::wasm
diff --git a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
index 1510e6d4ec..53bb240d64 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
@@ -31,12 +31,8 @@ class Memory64DecodingTest : public TestWithIsolateAndZone {
module_bytes.insert(module_bytes.end(), module_body_bytes);
static constexpr WasmFeatures kEnabledFeatures{
WasmFeature::kFeature_memory64};
- return DecodeWasmModule(
- kEnabledFeatures, module_bytes.data(),
- module_bytes.data() + module_bytes.size(), false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(), DecodingMethod::kSync,
- wasm::GetWasmEngine()->allocator());
+ return DecodeWasmModule(kEnabledFeatures, base::VectorOf(module_bytes),
+ false, kWasmOrigin);
}
};
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 1a1aa8fc42..9b1d676121 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -116,27 +116,27 @@ namespace module_decoder_unittest {
kWasmArrayTypeCode, type, (mutability ? 1 : 0)
#define WASM_FUNCTION_DEF(...) kWasmFunctionTypeCode, __VA_ARGS__
-#define EXPECT_VERIFIES(data) \
- do { \
- ModuleResult _result = DecodeModule(data, data + sizeof(data)); \
- EXPECT_OK(_result); \
+#define EXPECT_VERIFIES(data) \
+ do { \
+ ModuleResult _result = DecodeModule(base::ArrayVector(data)); \
+ EXPECT_OK(_result); \
} while (false)
-#define EXPECT_FAILURE_LEN(data, length) \
- do { \
- ModuleResult _result = DecodeModule(data, data + length); \
- EXPECT_FALSE(_result.ok()); \
+#define EXPECT_FAILURE_LEN(data, length) \
+ do { \
+ ModuleResult _result = DecodeModule(base::VectorOf(data, length)); \
+ EXPECT_FALSE(_result.ok()); \
} while (false)
#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
-#define EXPECT_FAILURE_WITH_MSG(data, msg) \
- do { \
- ModuleResult _result = DecodeModule(data, data + sizeof(data)); \
- EXPECT_FALSE(_result.ok()); \
- if (!_result.ok()) { \
- EXPECT_THAT(_result.error().message(), HasSubstr(msg)); \
- } \
+#define EXPECT_FAILURE_WITH_MSG(data, msg) \
+ do { \
+ ModuleResult _result = DecodeModule(base::ArrayVector(data)); \
+ EXPECT_FALSE(_result.ok()); \
+ if (!_result.ok()) { \
+ EXPECT_THAT(_result.error().message(), HasSubstr(msg)); \
+ } \
} while (false)
#define EXPECT_OFF_END_FAILURE(data, min) \
@@ -200,38 +200,30 @@ class WasmModuleVerifyTest : public TestWithIsolateAndZone {
public:
WasmFeatures enabled_features_ = WasmFeatures::None();
- ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
+ ModuleResult DecodeModule(base::Vector<const uint8_t> module_bytes) {
// Add the wasm magic and version number automatically.
- size_t size = static_cast<size_t>(module_end - module_start);
+ size_t size = module_bytes.size();
byte header[] = {WASM_MODULE_HEADER};
size_t total = sizeof(header) + size;
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
if (size > 0) {
- memcpy(temp + sizeof(header), module_start, size);
+ memcpy(temp + sizeof(header), module_bytes.begin(), size);
}
ModuleResult result = DecodeWasmModule(
- enabled_features_, temp, temp + total, false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(), DecodingMethod::kSync,
- GetWasmEngine()->allocator());
+ enabled_features_, base::VectorOf(temp, total), false, kWasmOrigin);
delete[] temp;
return result;
}
- ModuleResult DecodeModuleNoHeader(const byte* module_start,
- const byte* module_end) {
- return DecodeWasmModule(
- enabled_features_, module_start, module_end, false, kWasmOrigin,
- isolate()->counters(), isolate()->metrics_recorder(),
- v8::metrics::Recorder::ContextId::Empty(), DecodingMethod::kSync,
- GetWasmEngine()->allocator());
+ ModuleResult DecodeModuleNoHeader(base::Vector<const uint8_t> bytes) {
+ return DecodeWasmModule(enabled_features_, bytes, false, kWasmOrigin);
}
};
TEST_F(WasmModuleVerifyTest, WrongMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_FALSE(result.ok());
}
}
@@ -239,7 +231,7 @@ TEST_F(WasmModuleVerifyTest, WrongMagic) {
TEST_F(WasmModuleVerifyTest, WrongVersion) {
for (uint32_t x = 1; x; x <<= 1) {
const byte data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_FALSE(result.ok());
}
}
@@ -247,12 +239,12 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
TEST_F(WasmModuleVerifyTest, WrongSection) {
constexpr byte kInvalidSection = 0x1c;
const byte data[] = {kInvalidSection, 0};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_FALSE(result.ok());
}
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
- ModuleResult result = DecodeModule(nullptr, nullptr);
+ ModuleResult result = DecodeModule(base::VectorOf<uint8_t>(nullptr, 0));
EXPECT_TRUE(result.ok());
}
@@ -267,7 +259,7 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -292,7 +284,7 @@ TEST_F(WasmModuleVerifyTest, S128Global) {
kS128Code, // memory type
0, // immutable
WASM_SIMD_CONSTANT(v.data()), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
if (!CheckHardwareSupportsSimd()) {
EXPECT_NOT_OK(result, "Wasm SIMD unsupported");
} else {
@@ -332,7 +324,7 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
{
// Should decode to two globals.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(2u, result.value()->functions.size());
@@ -375,7 +367,7 @@ TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
TWO_EMPTY_BODIES};
{
// Should decode to two globals.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(2u, result.value()->functions.size());
@@ -424,7 +416,7 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobalWithGlobalInit) {
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -455,7 +447,7 @@ TEST_F(WasmModuleVerifyTest, NullGlobalWithGlobalInit) {
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
std::cout << result.error().message() << std::endl;
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
@@ -654,7 +646,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -724,7 +716,7 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
buffer.insert(buffer.end(), data, data + sizeof(data));
}
- ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
+ ModuleResult result = DecodeModule(base::VectorOf(buffer));
EXPECT_OK(result);
}
}
@@ -741,7 +733,7 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
{
// Should decode to exactly two globals.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -766,7 +758,7 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
TEST_F(WasmModuleVerifyTest, RefNullGlobal) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kFuncRefCode, 1,
WASM_REF_NULL(kFuncRefCode), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -774,7 +766,7 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid1) {
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kRefNullCode, 0,
1, WASM_REF_NULL(0), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "Type index 0 is out of bounds");
}
@@ -782,7 +774,7 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid2) {
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {SECTION(Global, ENTRY_COUNT(1), kFuncRefCode, 1,
kExprRefNull, U32V_5(1000001), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result,
"Type index 1000001 is greater than the maximum number 1000000 "
"of type definitions supported by V8");
@@ -1066,17 +1058,68 @@ TEST_F(WasmModuleVerifyTest, InvalidSupertypeInRecGroup) {
static const byte invalid_supertype[] = {
SECTION(Type, ENTRY_COUNT(1), // --
kWasmRecursiveTypeGroupCode, ENTRY_COUNT(2), // --
- kWasmArrayTypeCode, kI32Code, 0, // --
- kWasmSubtypeCode, 1, 0, // supertype count, supertype
+ kWasmSubtypeCode, 0, // 0 supertypes, non-final
+ kWasmArrayTypeCode, kI32Code, 0, // --
+ kWasmSubtypeCode, 1, 0, // supertype count, supertype
kWasmArrayTypeCode, kI64Code, 0)};
EXPECT_FAILURE_WITH_MSG(invalid_supertype,
"type 1 has invalid explicit supertype 0");
}
+// Tests supertype declaration with 0 supertypes.
+TEST_F(WasmModuleVerifyTest, SuperTypeDeclarationWith0Supertypes) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte zero_supertypes[] = {
+ SECTION(Type, ENTRY_COUNT(1), // --
+ kWasmSubtypeCode, 0, // supertype count
+ kWasmArrayTypeCode, kI32Code, 0)};
+
+ EXPECT_VERIFIES(zero_supertypes);
+}
+
+TEST_F(WasmModuleVerifyTest, NoSupertypeSupertype) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte no_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(1), // --
+ kWasmSubtypeCode, 1, // supertype count
+ 0xff, 0xff, 0xff, 0xff, 0x0f, // supertype = "kNoSuperType"
+ kWasmArrayTypeCode, kI32Code, 0)};
+
+ EXPECT_FAILURE_WITH_MSG(
+ no_supertype, "is greater than the maximum number of type definitions");
+}
+
+TEST_F(WasmModuleVerifyTest, NonSpecifiedFinalType) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(wasm_final_types);
+ static const byte final_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmStructTypeCode, 1, kI32Code, 1, // --
+ kWasmSubtypeCode, 1, 0, // --
+ kWasmStructTypeCode, 2, kI32Code, 1, kI32Code, 1)};
+ EXPECT_FAILURE_WITH_MSG(final_supertype, "type 1 extends final type 0");
+}
+
+TEST_F(WasmModuleVerifyTest, SpecifiedFinalType) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ FLAG_SCOPE(wasm_final_types);
+ static const byte final_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmSubtypeFinalCode, 0, // --
+ kWasmStructTypeCode, 1, kI32Code, 1, // --
+ kWasmSubtypeCode, 1, 0, // --
+ kWasmStructTypeCode, 2, kI32Code, 1, kI32Code, 1)};
+ EXPECT_FAILURE_WITH_MSG(final_supertype, "type 1 extends final type 0");
+}
+
TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.value()->tags.size());
}
@@ -1086,7 +1129,7 @@ TEST_F(WasmModuleVerifyTest, OneI32Exception) {
SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_x(kI32Code)), // sig#0 (i32)
SECTION(Tag, ENTRY_COUNT(1),
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->tags.size());
@@ -1103,7 +1146,7 @@ TEST_F(WasmModuleVerifyTest, TwoExceptions) {
SECTION(Tag, ENTRY_COUNT(2),
EXCEPTION_ENTRY(SIG_INDEX(1)), // except[0] (sig#1)
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[1] (sig#0)
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->tags.size());
const WasmTag& e0 = result.value()->tags.front();
@@ -1121,8 +1164,8 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_index) {
EXCEPTION_ENTRY(
SIG_INDEX(23)))}; // except[0] (sig#23 [out-of-bounds])
// Should fail decoding exception section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "signature index 23 out of bounds");
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "no signature at index 23 (1 signatures)");
}
TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
@@ -1132,7 +1175,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
EXCEPTION_ENTRY(
SIG_INDEX(0)))}; // except[0] (sig#0 [invalid-return-type])
// Should fail decoding exception section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "tag signature 0 has non-void return");
}
@@ -1142,7 +1185,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_attribute) {
SECTION(Tag, ENTRY_COUNT(1), 23,
SIG_INDEX(0))}; // except[0] (sig#0) [invalid-attribute]
// Should fail decoding exception section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "exception attribute 23 not supported");
}
@@ -1150,14 +1193,14 @@ TEST_F(WasmModuleVerifyTest, TagSectionCorrectPlacement) {
static const byte data[] = {SECTION(Memory, ENTRY_COUNT(0)),
SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Global, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
TEST_F(WasmModuleVerifyTest, TagSectionAfterGlobal) {
static const byte data[] = {SECTION(Global, ENTRY_COUNT(0)),
SECTION(Tag, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result,
"The Tag section must appear before the Global section");
}
@@ -1165,7 +1208,7 @@ TEST_F(WasmModuleVerifyTest, TagSectionAfterGlobal) {
TEST_F(WasmModuleVerifyTest, TagSectionBeforeMemory) {
static const byte data[] = {SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Memory, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
@@ -1174,7 +1217,7 @@ TEST_F(WasmModuleVerifyTest, TagSectionAfterTableBeforeMemory) {
static const byte data[] = {SECTION(Table, ENTRY_COUNT(0)),
SECTION(Tag, ENTRY_COUNT(0)),
SECTION(Memory, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
@@ -1187,7 +1230,7 @@ TEST_F(WasmModuleVerifyTest, TagImport) {
ADD_COUNT('e', 'x'), // tag name
kExternalTag, // import kind
EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->tags.size());
EXPECT_EQ(1u, result.value()->import_table.size());
@@ -1202,7 +1245,7 @@ TEST_F(WasmModuleVerifyTest, ExceptionExport) {
NO_NAME, // --
kExternalTag, // --
EXCEPTION_INDEX(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->tags.size());
EXPECT_EQ(1u, result.value()->export_table.size());
@@ -1229,7 +1272,7 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
SIG_ENTRY_x_xx(kI32Code, kF64Code, kF64Code)), // f64,f64 -> i32
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(3u, result.value()->types.size());
if (result.value()->types.size() == 3) {
@@ -1261,7 +1304,7 @@ TEST_F(WasmModuleVerifyTest, CanonicalTypeIds) {
WASM_ARRAY_DEF(kI32Code, true)) // Array definition
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
const WasmModule* module = result.value().get();
@@ -1296,7 +1339,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
U32V_1(3), // source size
'a', 'b', 'c') // data bytes
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -1346,7 +1389,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
{
EXPECT_VERIFIES(data);
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -1380,7 +1423,7 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
};
{
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.value()->globals.size());
EXPECT_EQ(0u, result.value()->functions.size());
@@ -1468,7 +1511,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
// code ----------------------------------------------------------------
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(1u, result.value()->types.size());
@@ -1555,7 +1598,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
// code ----------------------------------------------------------------
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->types.size());
EXPECT_EQ(1u, result.value()->functions.size());
@@ -1583,7 +1626,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
FUNC_INDEX(2), FUNC_INDEX(3))),
FOUR_EMPTY_BODIES};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->types.size());
EXPECT_EQ(4u, result.value()->functions.size());
@@ -1987,7 +2030,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTables) {
11), // table 2: minimum size
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->tables.size());
@@ -2009,7 +2052,7 @@ TEST_F(WasmModuleVerifyTest, TypedFunctionTable) {
kRefNullCode, 0, // table 0: type
0, 10)}; // table 0: limits
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(ValueType::RefNull(0), result.value()->tables[0].type);
}
@@ -2052,7 +2095,7 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
// Add table limits
data.insert(data.end(), {byte{0}, byte{10}});
- auto result = DecodeModule(data.data(), data.data() + data.size());
+ auto result = DecodeModule(base::VectorOf(data));
EXPECT_NOT_OK(result, "Only reference types can be used as table types");
}
}
@@ -2070,7 +2113,7 @@ TEST_F(WasmModuleVerifyTest, TableWithInitializer) {
0, 10, // table 0: limits
kExprRefFunc, 0, kExprEnd), // table 0: initial value
SECTION(Code, ENTRY_COUNT(1), NOP_BODY)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(ValueType::RefNull(0), result.value()->tables[0].type);
}
@@ -2088,7 +2131,7 @@ TEST_F(WasmModuleVerifyTest, NonNullableTable) {
0, 10, // table 0: limits
kExprRefFunc, 0, kExprEnd), // table 0: initial value
SECTION(Code, ENTRY_COUNT(1), NOP_BODY)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(ValueType::Ref(0), result.value()->tables[0].type);
}
@@ -2120,7 +2163,7 @@ TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
SECTION(Code, ENTRY_COUNT(3), NOP_BODY, NOP_BODY, NOP_BODY),
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(3u, result.value()->compilation_hints.size());
@@ -2158,7 +2201,7 @@ TEST_F(WasmModuleVerifyTest, BranchHinting) {
ADD_COUNT(0, /*no locals*/
WASM_BLOCK(WASM_BR_IF(0, WASM_I32V_1(1))), WASM_END))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.value()->branch_hints.size());
@@ -2172,18 +2215,18 @@ class WasmSignatureDecodeTest : public TestWithZone {
public:
WasmFeatures enabled_features_ = WasmFeatures::None();
- const FunctionSig* DecodeSig(const byte* start, const byte* end) {
+ const FunctionSig* DecodeSig(base::Vector<const uint8_t> bytes) {
Result<const FunctionSig*> res =
- DecodeWasmSignatureForTesting(enabled_features_, zone(), start, end);
+ DecodeWasmSignatureForTesting(enabled_features_, zone(), bytes);
EXPECT_TRUE(res.ok()) << res.error().message() << " at offset "
<< res.error().offset();
return res.ok() ? res.value() : nullptr;
}
- V8_NODISCARD testing::AssertionResult DecodeSigError(const byte* start,
- const byte* end) {
+ V8_NODISCARD testing::AssertionResult DecodeSigError(
+ base::Vector<const uint8_t> bytes) {
Result<const FunctionSig*> res =
- DecodeWasmSignatureForTesting(enabled_features_, zone(), start, end);
+ DecodeWasmSignatureForTesting(enabled_features_, zone(), bytes);
if (res.ok()) {
return testing::AssertionFailure() << "unexpected valid signature";
}
@@ -2195,7 +2238,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {SIG_ENTRY_v_v};
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
ASSERT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
@@ -2209,7 +2252,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_x(ret_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Return type " + ret_type.type.name());
ASSERT_TRUE(sig != nullptr);
@@ -2226,7 +2269,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair param_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Param type " + param_type.type.name());
ASSERT_TRUE(sig != nullptr);
@@ -2245,7 +2288,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueTypePair param_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Param type " + param_type.type.name());
ASSERT_TRUE(sig != nullptr);
@@ -2267,7 +2310,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {
SIG_ENTRY_x_xx(kI32Code, p0_type.code, p1_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("Signature i32(" + p0_type.type.name() + ", " +
p1_type.type.name() + ")");
@@ -2290,7 +2333,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_tt_tt) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_xx_xx(p0_type.code, p1_type.code,
p0_type.code, p1_type.code)};
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
SCOPED_TRACE("p0 = " + p0_type.type.name() +
", p1 = " + p1_type.type.name());
@@ -2309,10 +2352,10 @@ TEST_F(WasmSignatureDecodeTest, Simd) {
WASM_FEATURE_SCOPE(simd);
const byte data[] = {SIG_ENTRY_x(kS128Code)};
if (!CheckHardwareSupportsSimd()) {
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)))
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)))
<< "Type S128 should not be allowed on this hardware";
} else {
- const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(base::ArrayVector(data));
ASSERT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
EXPECT_EQ(1u, sig->return_count());
@@ -2324,14 +2367,14 @@ TEST_F(WasmSignatureDecodeTest, TooManyParams) {
static const byte data[] = {kWasmFunctionTypeCode,
WASM_I32V_3(kV8MaxWasmFunctionParams + 1),
kI32Code, 0};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
TEST_F(WasmSignatureDecodeTest, TooManyReturns) {
for (int i = 0; i < 2; i++) {
byte data[] = {kWasmFunctionTypeCode, 0,
WASM_I32V_3(kV8MaxWasmFunctionReturns + 1), kI32Code};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
}
@@ -2343,7 +2386,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
for (int i = 0; i < p + 1; i++) {
// Should fall off the end for all signatures.
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
}
}
@@ -2354,35 +2397,32 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte data[] = {SIG_ENTRY_x_xx(kI32Code, kI32Code, kI32Code)};
if (i >= arraysize(data)) break;
data[i] = kInvalidType;
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kVoidCode, kI32Code)};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kI32Code, kVoidCode)};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
static const byte data[] = {SIG_ENTRY_x_xx(kI32Code, kI32Code, kVoidCode)};
- EXPECT_TRUE(DecodeSigError(data, data + sizeof(data)));
+ EXPECT_TRUE(DecodeSigError(base::ArrayVector(data)));
}
class WasmFunctionVerifyTest : public TestWithIsolateAndZone {
public:
- FunctionResult DecodeWasmFunction(const ModuleWireBytes& wire_bytes,
- const WasmModule* module,
- const byte* function_start,
- const byte* function_end) {
- WasmFeatures enabled_features;
- return DecodeWasmFunctionForTesting(enabled_features, zone(), wire_bytes,
- module, function_start, function_end,
- isolate()->counters());
+ FunctionResult DecodeWasmFunction(
+ ModuleWireBytes wire_bytes, const WasmModule* module,
+ base::Vector<const uint8_t> function_bytes) {
+ return DecodeWasmFunctionForTesting(WasmFeatures::All(), zone(), wire_bytes,
+ module, function_bytes);
}
};
@@ -2402,8 +2442,8 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
};
WasmModule module;
- FunctionResult result = DecodeWasmFunction(ModuleWireBytes({}), &module, data,
- data + sizeof(data));
+ FunctionResult result =
+ DecodeWasmFunction(ModuleWireBytes({}), &module, base::ArrayVector(data));
EXPECT_OK(result);
if (result.value() && result.ok()) {
@@ -2521,7 +2561,7 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
0, // exported
WASM_INIT_EXPR_I32V_1(33)), // init
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->globals.size());
@@ -2658,7 +2698,7 @@ TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
SECTION(Export, ENTRY_COUNT(0)), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->functions.size());
@@ -2686,7 +2726,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne) {
kExternalFunction, // --
FUNC_INDEX(0)), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->functions.size());
@@ -2723,7 +2763,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
FUNC_INDEX(0)), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.value()->functions.size());
@@ -2746,7 +2786,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableThree) {
kExternalFunction,
FUNC_INDEX(2)), // --
THREE_EMPTY_BODIES};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
EXPECT_EQ(3u, result.value()->functions.size());
@@ -2824,7 +2864,7 @@ TEST_F(WasmModuleVerifyTest, FunctionBodySizeLimit) {
size_t total = sizeof(data) + body_size;
byte* buffer = reinterpret_cast<byte*>(calloc(1, total));
memcpy(buffer, data, sizeof(data));
- ModuleResult result = DecodeModule(buffer, buffer + total);
+ ModuleResult result = DecodeModule(base::VectorOf(buffer, total));
if (body_size <= kV8MaxWasmFunctionSize) {
EXPECT_TRUE(result.ok());
} else {
@@ -2940,13 +2980,13 @@ TEST_F(WasmModuleVerifyTest, FunctionSectionWithoutCodeSection) {
TYPE_SECTION(1, SIG_ENTRY_v_v), // Type section.
FUNCTION_SECTION(1, 0), // Function section.
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "function count is 1, but code section is absent");
}
TEST_F(WasmModuleVerifyTest, CodeSectionWithoutFunctionSection) {
static const byte data[] = {ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "function body count 1 mismatch (0 expected)");
}
@@ -2988,10 +3028,10 @@ TEST_F(WasmModuleVerifyTest, Section_Name_No_UTF8) {
class WasmModuleCustomSectionTest : public TestWithIsolateAndZone {
public:
- void CheckSections(const byte* module_start, const byte* module_end,
+ void CheckSections(base::Vector<const uint8_t> wire_bytes,
const CustomSectionOffset* expected, size_t num_expected) {
std::vector<CustomSectionOffset> custom_sections =
- DecodeCustomSections(module_start, module_end);
+ DecodeCustomSections(wire_bytes);
CHECK_EQ(num_expected, custom_sections.size());
@@ -3026,7 +3066,7 @@ TEST_F(WasmModuleCustomSectionTest, ThreeUnknownSections) {
{{27, 8}, {28, 5}, {33, 2}}, // --
};
- CheckSections(data, data + sizeof(data), expected, arraysize(expected));
+ CheckSections(base::ArrayVector(data), expected, arraysize(expected));
}
TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
@@ -3045,18 +3085,18 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
{{29, 8}, {30, 5}, {35, 2}}, // --
};
- CheckSections(data, data + sizeof(data), expected, arraysize(expected));
+ CheckSections(base::ArrayVector(data), expected, arraysize(expected));
}
TEST_F(WasmModuleVerifyTest, SourceMappingURLSection) {
static const byte data[] = {
WASM_MODULE_HEADER,
SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c')};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(WasmDebugSymbols::Type::SourceMap,
result.value()->debug_symbols.type);
- ModuleWireBytes wire_bytes(data, data + sizeof(data));
+ ModuleWireBytes wire_bytes(base::ArrayVector(data));
WasmName external_url =
wire_bytes.GetNameOrNull(result.value()->debug_symbols.external_url);
EXPECT_EQ("src/xyz.c", std::string(external_url.data(), external_url.size()));
@@ -3066,7 +3106,7 @@ TEST_F(WasmModuleVerifyTest, BadSourceMappingURLSection) {
static const byte data[] = {
WASM_MODULE_HEADER,
SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c')};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(WasmDebugSymbols::Type::None, result.value()->debug_symbols.type);
EXPECT_EQ(0u, result.value()->debug_symbols.external_url.length());
@@ -3076,11 +3116,11 @@ TEST_F(WasmModuleVerifyTest, MultipleSourceMappingURLSections) {
static const byte data[] = {WASM_MODULE_HEADER,
SECTION_SRC_MAP('a', 'b', 'c'),
SECTION_SRC_MAP('p', 'q', 'r')};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(WasmDebugSymbols::Type::SourceMap,
result.value()->debug_symbols.type);
- ModuleWireBytes wire_bytes(data, data + sizeof(data));
+ ModuleWireBytes wire_bytes(base::ArrayVector(data));
WasmName external_url =
wire_bytes.GetNameOrNull(result.value()->debug_symbols.external_url);
EXPECT_EQ("abc", std::string(external_url.data(), external_url.size()));
@@ -3090,7 +3130,7 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
static const byte data[] = {
SECTION_NAMES(0, ADD_COUNT(ADD_COUNT('a', 'b', 'c'))),
SECTION_NAMES(0, ADD_COUNT(ADD_COUNT('p', 'q', 'r', 's')))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(3u, result.value()->name.length());
}
@@ -3098,7 +3138,7 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
TEST_F(WasmModuleVerifyTest, BadNameSection) {
static const byte data[] = {SECTION_NAMES(
0, ADD_COUNT(ADD_COUNT('s', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c')))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(0u, result.value()->name.length());
}
@@ -3229,7 +3269,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
static const byte data[] = {SECTION(Code, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result,
"The DataCount section must appear before the Code section");
}
@@ -3237,7 +3277,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
TEST_F(WasmModuleVerifyTest, DataCountSectionBeforeElement) {
static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(Element, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Element>");
}
@@ -3253,14 +3293,14 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
SECTION(Element, ENTRY_COUNT(0)) // Element section.
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "unexpected section <Element>");
}
TEST_F(WasmModuleVerifyTest, MultipleDataCountSections) {
static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "Multiple DataCount sections not allowed");
}
@@ -3279,7 +3319,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_greater) {
SECTION(Memory, ENTRY_COUNT(1), 0, 1), // Memory section.
SECTION(DataCount, ENTRY_COUNT(3)), // DataCount section.
SECTION(Data, ENTRY_COUNT(0))}; // Data section.
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "data segments count 0 mismatch (3 expected)");
}
@@ -3289,14 +3329,14 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_less) {
SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0, // Data section.
WASM_INIT_EXPR_I32V_1(12), ADD_COUNT('a', 'b', 'c'))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "data segments count 1 mismatch (0 expected)");
}
TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
static const byte data[] = {SECTION(Memory, ENTRY_COUNT(1), 0, 1),
SECTION(DataCount, ENTRY_COUNT(1))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "data segments count 0 mismatch (1 expected)");
}
@@ -3313,7 +3353,7 @@ TEST_F(WasmModuleVerifyTest, GcStructIdsPass) {
WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(WASM_OPT_REF(0), true),
STRUCT_FIELD(WASM_OPT_REF(2), true)),
WASM_ARRAY_DEF(WASM_OPT_REF(0), true))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_OK(result);
}
@@ -3321,28 +3361,64 @@ TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInGlobal) {
WASM_FEATURE_SCOPE(typed_funcref);
static const byte data[] = {
SECTION(Global, ENTRY_COUNT(1), kRefCode, 0, WASM_REF_NULL(0), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "Type index 0 is out of bounds");
}
TEST_F(WasmModuleVerifyTest, OutOfBoundsTypeInType) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {SECTION(
+ Type, ENTRY_COUNT(1),
+ WASM_STRUCT_DEF(FIELD_COUNT(1),
+ STRUCT_FIELD(WASM_REF_TYPE(ValueType::Ref(1)), true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "Type index 1 is out of bounds");
+}
+
+TEST_F(WasmModuleVerifyTest, RecursiveTypeOutsideRecursiveGroup) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1),
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kRefCode, true)))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "Type index 1 is out of bounds");
+ WASM_STRUCT_DEF(
+ FIELD_COUNT(1),
+ STRUCT_FIELD(WASM_REF_TYPE(ValueType::RefNull(0)), true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_OK(result);
+}
+
+TEST_F(WasmModuleVerifyTest, OutOfBoundsSupertype) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1),
+ kWasmSubtypeCode, ENTRY_COUNT(1), 1,
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "type 0: supertype 1 out of bounds");
}
-TEST_F(WasmModuleVerifyTest, ForwardSupertype) {
+TEST_F(WasmModuleVerifyTest, ForwardSupertypeSameType) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(1),
kWasmSubtypeCode, ENTRY_COUNT(1), 0,
- WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kRefCode, true)))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
+ EXPECT_NOT_OK(result, "type 0: forward-declared supertype 0");
+}
+
+TEST_F(WasmModuleVerifyTest, ForwardSupertypeSameRecGroup) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), kWasmRecursiveTypeGroupCode, ENTRY_COUNT(2),
+ kWasmSubtypeCode, ENTRY_COUNT(1), 0,
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)))};
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "type 0: forward-declared supertype 0");
}
@@ -3353,7 +3429,7 @@ TEST_F(WasmModuleVerifyTest, IllegalPackedFields) {
static const byte data[] = {
SECTION(Global, ENTRY_COUNT(1), kI16Code, 0, WASM_INIT_EXPR_I32V_1(13))};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(base::ArrayVector(data));
EXPECT_NOT_OK(result, "invalid value type");
}
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index dfeb4739e2..857520f364 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -20,10 +20,10 @@ namespace wasm {
struct MockStreamingResult {
size_t num_sections = 0;
size_t num_functions = 0;
- WasmError error;
- base::OwnedVector<uint8_t> received_bytes;
+ bool error;
+ base::OwnedVector<const uint8_t> received_bytes;
- bool ok() const { return !error.has_error(); }
+ bool ok() const { return !error; }
MockStreamingResult() = default;
};
@@ -42,13 +42,13 @@ class MockStreamingProcessor : public StreamingProcessor {
bool ProcessModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) override {
Decoder decoder(bytes.begin(), bytes.end());
- NoTracer no_tracer;
- uint32_t magic_word = decoder.consume_u32("wasm magic", no_tracer);
+ uint32_t magic_word = decoder.consume_u32("wasm magic", ITracer::NoTrace);
if (decoder.failed() || magic_word != kWasmMagic) {
result_->error = WasmError(0, "expected wasm magic");
return false;
}
- uint32_t magic_version = decoder.consume_u32("wasm version", no_tracer);
+ uint32_t magic_version =
+ decoder.consume_u32("wasm version", ITracer::NoTrace);
if (decoder.failed() || magic_version != kWasmVersion) {
result_->error = WasmError(4, "expected wasm version");
return false;
@@ -72,22 +72,19 @@ class MockStreamingProcessor : public StreamingProcessor {
}
// Process a function body.
- void ProcessFunctionBody(base::Vector<const uint8_t> bytes,
+ bool ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t offset) override {
++result_->num_functions;
+ return true;
}
void OnFinishedChunk() override {}
// Finish the processing of the stream.
- void OnFinishedStream(base::OwnedVector<uint8_t> bytes) override {
+ void OnFinishedStream(base::OwnedVector<const uint8_t> bytes,
+ bool after_error) override {
result_->received_bytes = std::move(bytes);
- }
-
- // Report an error detected in the StreamingDecoder.
- void OnError(const WasmError& error) override {
- result_->error = error;
- CHECK(!result_->ok());
+ result_->error = after_error;
}
void OnAbort() override {}
@@ -119,8 +116,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
}
}
- void ExpectFailure(base::Vector<const uint8_t> data, uint32_t error_offset,
- const char* message) {
+ void ExpectFailure(base::Vector<const uint8_t> data) {
for (int split = 0; split <= data.length(); ++split) {
MockStreamingResult result;
auto stream = StreamingDecoder::CreateAsyncStreamingDecoder(
@@ -129,8 +125,7 @@ class WasmStreamingDecoderTest : public ::testing::Test {
stream->OnBytesReceived(data.SubVector(split, data.length()));
stream->Finish();
EXPECT_FALSE(result.ok());
- EXPECT_EQ(error_offset, result.error.offset());
- EXPECT_EQ(message, result.error.message());
+ EXPECT_TRUE(result.error);
}
}
};
@@ -154,8 +149,7 @@ TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
EXPECT_FALSE(result.ok());
}
for (uint32_t length = 1; length < sizeof(data); ++length) {
- ExpectFailure(base::VectorOf(data, length), length - 1,
- "unexpected end of stream");
+ ExpectFailure(base::VectorOf(data, length));
}
}
@@ -167,14 +161,14 @@ TEST_F(WasmStreamingDecoderTest, MagicAndVersion) {
TEST_F(WasmStreamingDecoderTest, BadMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
- ExpectFailure(base::ArrayVector(data), 0, "expected wasm magic");
+ ExpectFailure(base::ArrayVector(data));
}
}
TEST_F(WasmStreamingDecoderTest, BadVersion) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
- ExpectFailure(base::ArrayVector(data), 4, "expected wasm version");
+ ExpectFailure(base::ArrayVector(data));
}
}
@@ -261,8 +255,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
0x0, // 4
0x0 // 5
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "unexpected end of stream");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
@@ -273,8 +266,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
0x6, // Section Length
0x0 // Payload
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "unexpected end of stream");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
@@ -288,8 +280,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
0x80, // --
0x80, // --
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "expected section length");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
@@ -404,8 +395,7 @@ TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
0x1, // Number of Functions
0x0, // Function Length -- ERROR
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "invalid function length (0)");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
@@ -462,8 +452,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthZero) {
kCodeSectionCode, // Section ID
0x0, // Section Length
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "code section cannot have size 0");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
@@ -484,8 +473,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "not all code section bytes were used");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
@@ -496,8 +484,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
0xD, // Section Length
0x0, // Number of Functions
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "not all code section bytes were used");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
@@ -518,8 +505,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
0x1, // Function Length <8> -- ERROR
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 2,
- "read past code section end");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
@@ -542,7 +528,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), 12, "invalid code section length");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
@@ -567,7 +553,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), 15, "read past code section end");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
@@ -588,8 +574,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 1,
- "unexpected end of stream");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
@@ -607,8 +592,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
0x1, // Function Length
0x0 // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 3,
- "not all code section bytes were used");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
@@ -626,8 +610,7 @@ TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 5,
- "code section can only appear once");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, UnknownSection) {
@@ -668,14 +651,13 @@ TEST_F(WasmStreamingDecoderTest, UnknownSectionSandwich) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(base::ArrayVector(data), sizeof(data) - 5,
- "code section can only appear once");
+ ExpectFailure(base::ArrayVector(data));
}
TEST_F(WasmStreamingDecoderTest, InvalidSectionCode) {
uint8_t kInvalidSectionCode = 61;
const uint8_t data[] = {WASM_MODULE_HEADER, SECTION(Invalid)};
- ExpectFailure(base::ArrayVector(data), 8, "invalid section code");
+ ExpectFailure(base::ArrayVector(data));
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/struct-types-unittest.cc b/deps/v8/test/unittests/wasm/struct-types-unittest.cc
new file mode 100644
index 0000000000..3ddf8b84f7
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/struct-types-unittest.cc
@@ -0,0 +1,70 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/struct-types.h"
+
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8::internal::wasm {
+namespace struct_types_unittest {
+
+class StructTypesTest : public TestWithZone {};
+
+TEST_F(StructTypesTest, Empty) {
+ StructType::Builder builder(this->zone(), 0);
+ StructType* type = builder.Build();
+ EXPECT_EQ(0u, type->total_fields_size());
+}
+
+TEST_F(StructTypesTest, OneField) {
+ StructType::Builder builder(this->zone(), 1);
+ builder.AddField(kWasmI32, true);
+ StructType* type = builder.Build();
+ uint32_t expected = std::max(kUInt32Size, kTaggedSize);
+ EXPECT_EQ(expected, type->total_fields_size());
+ EXPECT_EQ(0u, type->field_offset(0));
+}
+
+TEST_F(StructTypesTest, Packing) {
+ StructType::Builder builder(this->zone(), 5);
+ builder.AddField(kWasmI64, true);
+ builder.AddField(kWasmI8, true);
+ builder.AddField(kWasmI32, true);
+ builder.AddField(kWasmI16, true);
+ builder.AddField(kWasmI8, true);
+ StructType* type = builder.Build();
+ EXPECT_EQ(16u, type->total_fields_size());
+ EXPECT_EQ(0u, type->field_offset(0));
+ EXPECT_EQ(8u, type->field_offset(1));
+ EXPECT_EQ(12u, type->field_offset(2));
+ EXPECT_EQ(10u, type->field_offset(3));
+ EXPECT_EQ(9u, type->field_offset(4));
+}
+
+TEST_F(StructTypesTest, CopyingOffsets) {
+ StructType::Builder builder(this->zone(), 5);
+ builder.AddField(kWasmI64, true);
+ builder.AddField(kWasmI8, true);
+ builder.AddField(kWasmI32, true);
+ builder.AddField(kWasmI16, true);
+ builder.AddField(kWasmI8, true);
+ StructType* type = builder.Build();
+
+ StructType::Builder copy_builder(this->zone(), type->field_count());
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ copy_builder.AddField(type->field(i), type->mutability(i),
+ type->field_offset(i));
+ }
+ copy_builder.set_total_fields_size(type->total_fields_size());
+
+ StructType* copy = copy_builder.Build();
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ EXPECT_EQ(type->field_offset(i), copy->field_offset(i));
+ }
+ EXPECT_EQ(type->total_fields_size(), copy->total_fields_size());
+}
+
+} // namespace struct_types_unittest
+} // namespace v8::internal::wasm
diff --git a/deps/v8/test/unittests/wasm/subtyping-unittest.cc b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
index 2602be49ba..fd085d0e75 100644
--- a/deps/v8/test/unittests/wasm/subtyping-unittest.cc
+++ b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
@@ -25,25 +25,25 @@ FieldInit mut(ValueType type) { return FieldInit(type, true); }
FieldInit immut(ValueType type) { return FieldInit(type, false); }
void DefineStruct(WasmModule* module, std::initializer_list<FieldInit> fields,
- uint32_t supertype = kNoSuperType,
+ uint32_t supertype = kNoSuperType, bool is_final = false,
bool in_singleton_rec_group = true) {
- StructType::Builder builder(module->signature_zone.get(),
+ StructType::Builder builder(&module->signature_zone,
static_cast<uint32_t>(fields.size()));
for (FieldInit field : fields) {
builder.AddField(field.first, field.second);
}
- module->add_struct_type(builder.Build(), supertype);
+ module->add_struct_type(builder.Build(), supertype, is_final);
if (in_singleton_rec_group) {
GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
}
}
void DefineArray(WasmModule* module, FieldInit element_type,
- uint32_t supertype = kNoSuperType,
+ uint32_t supertype = kNoSuperType, bool is_final = false,
bool in_singleton_rec_group = true) {
- module->add_array_type(module->signature_zone->New<ArrayType>(
+ module->add_array_type(module->signature_zone.New<ArrayType>(
element_type.first, element_type.second),
- supertype);
+ supertype, is_final);
if (in_singleton_rec_group) {
GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
}
@@ -52,11 +52,11 @@ void DefineArray(WasmModule* module, FieldInit element_type,
void DefineSignature(WasmModule* module,
std::initializer_list<ValueType> params,
std::initializer_list<ValueType> returns,
- uint32_t supertype = kNoSuperType,
+ uint32_t supertype = kNoSuperType, bool is_final = false,
bool in_singleton_rec_group = true) {
module->add_signature(
- FunctionSig::Build(module->signature_zone.get(), returns, params),
- supertype);
+ FunctionSig::Build(&module->signature_zone, returns, params), supertype,
+ is_final);
if (in_singleton_rec_group) {
GetTypeCanonicalizer()->AddRecursiveGroup(module, 1);
}
@@ -64,10 +64,9 @@ void DefineSignature(WasmModule* module,
TEST_F(WasmSubtypingTest, Subtyping) {
FLAG_SCOPE(experimental_wasm_gc);
- FLAG_VALUE_SCOPE(wasm_gc_structref_as_dataref, false);
v8::internal::AccountingAllocator allocator;
- WasmModule module1_(std::make_unique<Zone>(&allocator, ZONE_NAME));
- WasmModule module2_(std::make_unique<Zone>(&allocator, ZONE_NAME));
+ WasmModule module1_;
+ WasmModule module2_;
WasmModule* module1 = &module1_;
WasmModule* module2 = &module2_;
@@ -97,34 +96,43 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Rec. group.
/* 18 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17,
false);
- /* 19 */ DefineArray(module, {mut(refNull(21))}, kNoSuperType, false);
+ /* 19 */ DefineArray(module, {mut(refNull(21))}, kNoSuperType, false,
+ false);
/* 20 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
- false);
- /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false);
+ false, false);
+ /* 21 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false, false);
GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
// Identical rec. group.
/* 22 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17,
- false);
- /* 23 */ DefineArray(module, {mut(refNull(25))}, kNoSuperType, false);
+ false, false);
+ /* 23 */ DefineArray(module, {mut(refNull(25))}, kNoSuperType, false,
+ false);
/* 24 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
- false);
- /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false);
+ false, false);
+ /* 25 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 24, false, false);
GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
// Nonidentical rec. group: the last function extends a type outside the
// recursive group.
/* 26 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(17))}, 17,
- false);
- /* 27 */ DefineArray(module, {mut(refNull(29))}, kNoSuperType, false);
+ false, false);
+ /* 27 */ DefineArray(module, {mut(refNull(29))}, kNoSuperType, false,
+ false);
/* 28 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, kNoSuperType,
- false);
- /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false);
+ false, false);
+ /* 29 */ DefineSignature(module, {kWasmI32}, {kWasmI32}, 20, false, false);
GetTypeCanonicalizer()->AddRecursiveGroup(module, 4);
/* 30 */ DefineStruct(module, {mut(kWasmI32), immut(refNull(18))}, 18);
/* 31 */ DefineStruct(
module, {mut(ref(2)), immut(refNull(2)), immut(kWasmS128)}, 1);
+
+ // Final types
+ /* 32 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, true);
+ /* 33 */ DefineStruct(module, {mut(kWasmI32), mut(kWasmI64)}, 32, true);
+ /* 34 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, true);
+ /* 35 */ DefineStruct(module, {mut(kWasmI32)}, kNoSuperType, false);
}
constexpr ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
@@ -162,12 +170,13 @@ TEST_F(WasmSubtypingTest, Subtyping) {
#define DISTINCT(index1, index2) \
EXPECT_FALSE(EquivalentTypes(ValueType::RefNull(index1), \
ValueType::RefNull(index2), module1, module));
-// Union always expresses the result in terms of module1.
-#define UNION(type1, type2, type_result) \
- EXPECT_EQ(Union(type1, type2, module1, module), \
- TypeInModule(type_result, module1))
-// Intersection might return either module, so we have a version which checks
-// the module and one which deos not.
+// For union and intersection, we have a version that also checks the module,
+// and one that does not.
+#define UNION(type1, type2, type_result) \
+ EXPECT_EQ(Union(type1, type2, module1, module).type, type_result)
+#define UNION_M(type1, type2, type_result, module_result) \
+ EXPECT_EQ(Union(type1, type2, module1, module), \
+ TypeInModule(type_result, module_result))
#define INTERSECTION(type1, type2, type_result) \
EXPECT_EQ(Intersection(type1, type2, module1, module).type, type_result)
#define INTERSECTION_M(type1, type2, type_result, module_result) \
@@ -306,32 +315,38 @@ TEST_F(WasmSubtypingTest, Subtyping) {
VALID_SUBTYPE(ref(10), ref(10));
VALID_SUBTYPE(ref(11), ref(11));
- {
- // Canonicalization tests.
+ // Canonicalization tests.
- // Groups should only be canonicalized to identical groups.
- IDENTICAL(18, 22);
- IDENTICAL(19, 23);
- IDENTICAL(20, 24);
- IDENTICAL(21, 25);
+ // Groups should only be canonicalized to identical groups.
+ IDENTICAL(18, 22);
+ IDENTICAL(19, 23);
+ IDENTICAL(20, 24);
+ IDENTICAL(21, 25);
- DISTINCT(18, 26);
- DISTINCT(19, 27);
- DISTINCT(20, 28);
- DISTINCT(21, 29);
+ DISTINCT(18, 26);
+ DISTINCT(19, 27);
+ DISTINCT(20, 28);
+ DISTINCT(21, 29);
- // A type should not be canonicalized to an identical one with a different
- // group structure.
- DISTINCT(18, 17);
+ // A type should not be canonicalized to an identical one with a different
+ // group structure.
+ DISTINCT(18, 17);
- // A subtype should also be subtype of an equivalent type.
- VALID_SUBTYPE(ref(30), ref(18));
- VALID_SUBTYPE(ref(30), ref(22));
- NOT_SUBTYPE(ref(30), ref(26));
+ // A subtype should also be subtype of an equivalent type.
+ VALID_SUBTYPE(ref(30), ref(18));
+ VALID_SUBTYPE(ref(30), ref(22));
+ NOT_SUBTYPE(ref(30), ref(26));
- // Rtts of identical types are subtype-related.
- SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17));
- }
+ // Final types
+
+ // A type is not a valid subtype of a final type.
+ NOT_VALID_SUBTYPE(ref(33), ref(32));
+ IDENTICAL(32, 34);
+ // A final and a non-final
+ DISTINCT(32, 35);
+
+ // Rtts of identical types are subtype-related.
+ SUBTYPE(ValueType::Rtt(8), ValueType::Rtt(17));
// Unions and intersections.
@@ -395,6 +410,21 @@ TEST_F(WasmSubtypingTest, Subtyping) {
UNION(kWasmAnyRef, kWasmNullRef, kWasmAnyRef);
UNION(kWasmExternRef, kWasmNullExternRef, kWasmExternRef);
UNION(kWasmFuncRef, kWasmNullFuncRef, kWasmFuncRef);
+ UNION(kWasmFuncRef, kWasmStructRef, kWasmBottom);
+ UNION(kWasmFuncRef, kWasmArrayRef, kWasmBottom);
+ UNION(kWasmFuncRef, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmFuncRef, kWasmEqRef, kWasmBottom);
+ UNION(kWasmStringRef, kWasmAnyRef, kWasmAnyRef);
+ UNION(kWasmStringRef, kWasmStructRef, kWasmAnyRef);
+ UNION(kWasmStringRef, kWasmArrayRef, kWasmAnyRef);
+ UNION(kWasmStringRef, kWasmFuncRef, kWasmBottom);
+ UNION(kWasmStringViewIter, kWasmStringRef, kWasmBottom);
+ UNION(kWasmStringViewWtf8, kWasmStringRef, kWasmBottom);
+ UNION(kWasmStringViewWtf16, kWasmStringRef, kWasmBottom);
+ UNION(kWasmStringViewIter, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmStringViewWtf8, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmStringViewWtf16, kWasmAnyRef, kWasmBottom);
+ UNION(kWasmNullFuncRef, kWasmEqRef, kWasmBottom);
INTERSECTION(kWasmExternRef, kWasmEqRef, kWasmBottom);
INTERSECTION(kWasmExternRef, kWasmStructRef, kWasmBottom);
@@ -443,11 +473,15 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Abstract vs indexed types.
UNION(kWasmFuncRef, function_type, kWasmFuncRef);
+ UNION(kWasmFuncRef, struct_type, kWasmBottom);
+ UNION(kWasmFuncRef, array_type, kWasmBottom);
INTERSECTION(kWasmFuncRef, struct_type, kWasmBottom);
INTERSECTION(kWasmFuncRef, array_type, kWasmBottom);
- INTERSECTION(kWasmFuncRef, function_type, function_type);
+ INTERSECTION_M(kWasmFuncRef, function_type, function_type, module);
UNION(kWasmNullFuncRef, function_type, function_type.AsNullable());
+ UNION(kWasmNullFuncRef, struct_type, kWasmBottom);
+ UNION(kWasmNullFuncRef, array_type, kWasmBottom);
INTERSECTION(kWasmNullFuncRef, struct_type, kWasmBottom);
INTERSECTION(kWasmNullFuncRef, struct_type.AsNullable(), kWasmBottom);
INTERSECTION(kWasmNullFuncRef, array_type, kWasmBottom);
@@ -464,7 +498,8 @@ TEST_F(WasmSubtypingTest, Subtyping) {
UNION(kWasmStructRef, struct_type, kWasmStructRef);
UNION(kWasmStructRef, array_type, kWasmEqRef);
- INTERSECTION(kWasmStructRef, struct_type, struct_type);
+ UNION(kWasmStructRef, function_type, kWasmBottom);
+ INTERSECTION_M(kWasmStructRef, struct_type, struct_type, module);
INTERSECTION(kWasmStructRef, array_type, kWasmBottom);
INTERSECTION(kWasmStructRef, function_type, kWasmBottom);
@@ -476,17 +511,22 @@ TEST_F(WasmSubtypingTest, Subtyping) {
UNION(kWasmArrayRef, struct_type, kWasmEqRef);
UNION(kWasmArrayRef, array_type, kWasmArrayRef);
+ UNION(kWasmArrayRef, function_type, kWasmBottom);
INTERSECTION(kWasmArrayRef, struct_type, kWasmBottom);
- INTERSECTION(kWasmArrayRef, array_type, array_type);
+ INTERSECTION_M(kWasmArrayRef, array_type, array_type, module);
INTERSECTION(kWasmArrayRef, function_type, kWasmBottom);
- UNION(kWasmNullRef, struct_type, struct_type.AsNullable());
- UNION(kWasmNullRef, array_type, array_type.AsNullable());
- UNION(kWasmNullRef, function_type, function_type.AsNullable());
+ UNION_M(kWasmNullRef, struct_type, struct_type.AsNullable(), module);
+ UNION_M(kWasmNullRef, array_type, array_type.AsNullable(), module);
+ UNION(kWasmNullRef, function_type, kWasmBottom);
INTERSECTION(kWasmNullRef, struct_type, kWasmBottom);
INTERSECTION(kWasmNullRef, array_type, kWasmBottom);
INTERSECTION(kWasmNullRef, function_type, kWasmBottom);
+ UNION(struct_type, kWasmStringRef, kWasmAnyRef);
+ UNION(array_type, kWasmStringRef, kWasmAnyRef);
+ UNION(function_type, kWasmStringRef, kWasmBottom);
+
// Indexed types of different kinds.
UNION(struct_type, array_type, kWasmEqRef.AsNonNull());
INTERSECTION(struct_type, array_type, kWasmBottom);
@@ -502,11 +542,11 @@ TEST_F(WasmSubtypingTest, Subtyping) {
// Concrete types of the same kind.
// Subtyping relation.
- UNION(refNull(4), ref(1), refNull(1));
+ UNION_M(refNull(4), ref(1), refNull(1), module1);
INTERSECTION_M(refNull(4), ref(1), ref(4), module1);
INTERSECTION_M(refNull(1), refNull(4), refNull(4), module);
// Common ancestor.
- UNION(ref(4), ref(31), ref(1));
+ UNION_M(ref(4), ref(31), ref(1), module1);
INTERSECTION(ref(4), ref(31), kWasmBottom);
// No common ancestor.
UNION(ref(6), refNull(2), kWasmArrayRef.AsNullable());
@@ -524,6 +564,7 @@ TEST_F(WasmSubtypingTest, Subtyping) {
#undef IDENTICAL
#undef DISTINCT
#undef UNION
+#undef UNION_M
#undef INTERSECTION
#undef INTERSECTION_M
}
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-arm64-unittest.cc
index 8225944e2a..d06c780c35 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-arm64-unittest.cc
@@ -33,17 +33,25 @@
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
+#if V8_TRAP_HANDLER_SUPPORTED
+
+#if V8_HOST_ARCH_ARM64 && !V8_OS_DARWIN
+#error Unsupported platform
+#endif
+
namespace v8 {
namespace internal {
namespace wasm {
namespace {
+#if V8_HOST_ARCH_X64
constexpr Register scratch = r10;
+#endif
bool g_test_handler_executed = false;
#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
struct sigaction g_old_segv_action;
-struct sigaction g_old_fpe_action;
-struct sigaction g_old_bus_action; // We get SIGBUS on Mac sometimes.
+struct sigaction g_old_other_action; // FPE or TRAP, depending on x64 or arm64.
+struct sigaction g_old_bus_action; // We get SIGBUS on Mac sometimes.
#elif V8_OS_WIN
void* g_registered_handler = nullptr;
#endif
@@ -107,8 +115,15 @@ class TrapHandlerTest : public TestWithIsolate,
EXPECT_EQ(0, sigaction(SIGSEGV, &action, &g_old_segv_action));
// SIGBUS happens for wasm oob memory accesses on macOS.
EXPECT_EQ(0, sigaction(SIGBUS, &action, &g_old_bus_action));
+#if V8_HOST_ARCH_X64
// SIGFPE to simulate crashes which are not handled by the trap handler.
- EXPECT_EQ(0, sigaction(SIGFPE, &action, &g_old_fpe_action));
+ EXPECT_EQ(0, sigaction(SIGFPE, &action, &g_old_other_action));
+#elif V8_HOST_ARCH_ARM64
+ // SIGTRAP to simulate crashes which are not handled by the trap handler.
+ EXPECT_EQ(0, sigaction(SIGTRAP, &action, &g_old_other_action));
+#else
+#error Unsupported platform
+#endif
#elif V8_OS_WIN
g_registered_handler =
AddVectoredExceptionHandler(/*first=*/0, TestHandler);
@@ -129,8 +144,14 @@ class TrapHandlerTest : public TestWithIsolate,
// The test handler cleans up the signal handler setup in the test. If the
// test handler was not called, we have to do the cleanup ourselves.
EXPECT_EQ(0, sigaction(SIGSEGV, &g_old_segv_action, nullptr));
- EXPECT_EQ(0, sigaction(SIGFPE, &g_old_fpe_action, nullptr));
EXPECT_EQ(0, sigaction(SIGBUS, &g_old_bus_action, nullptr));
+#if V8_HOST_ARCH_X64
+ EXPECT_EQ(0, sigaction(SIGFPE, &g_old_other_action, nullptr));
+#elif V8_HOST_ARCH_ARM64
+ EXPECT_EQ(0, sigaction(SIGTRAP, &g_old_other_action, nullptr));
+#else
+#error Unsupported platform
+#endif
#elif V8_OS_WIN
RemoveVectoredExceptionHandler(g_registered_handler);
g_registered_handler = nullptr;
@@ -147,7 +168,9 @@ class TrapHandlerTest : public TestWithIsolate,
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
recovery_buffer_->CreateView());
int recovery_offset = __ pc_offset();
+#if V8_HOST_ARCH_X64
__ Pop(scratch);
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -167,16 +190,24 @@ class TrapHandlerTest : public TestWithIsolate,
// Reset the signal handler, to avoid that this signal handler is called
// repeatedly.
sigaction(SIGSEGV, &g_old_segv_action, nullptr);
- sigaction(SIGFPE, &g_old_fpe_action, nullptr);
+#if V8_HOST_ARCH_X64
+ sigaction(SIGFPE, &g_old_other_action, nullptr);
+#elif V8_HOST_ARCH_ARM64
+ sigaction(SIGTRAP, &g_old_other_action, nullptr);
+#else
+#error Unsupported platform
+#endif
sigaction(SIGBUS, &g_old_bus_action, nullptr);
g_test_handler_executed = true;
// Set the $rip to the recovery code.
ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
-#if V8_OS_LINUX
- uc->uc_mcontext.gregs[REG_RIP] = g_recovery_address;
-#elif V8_OS_DARWIN
+#if V8_OS_DARWIN && V8_HOST_ARCH_ARM64
+ uc->uc_mcontext->__ss.__pc = g_recovery_address;
+#elif V8_OS_DARWIN && V8_HOST_ARCH_X64
uc->uc_mcontext->__ss.__rip = g_recovery_address;
+#elif V8_OS_LINUX && V8_HOST_ARCH_X64
+ uc->uc_mcontext.gregs[REG_RIP] = g_recovery_address;
#elif V8_OS_FREEBSD
uc->uc_mcontext.mc_rip = g_recovery_address;
#else
@@ -208,17 +239,39 @@ class TrapHandlerTest : public TestWithIsolate,
public:
void GenerateSetThreadInWasmFlagCode(MacroAssembler* masm) {
+#if V8_HOST_ARCH_X64
masm->Move(scratch,
i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
RelocInfo::NO_INFO);
masm->movl(MemOperand(scratch, 0), Immediate(1));
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(masm);
+ Register addr = temps.AcquireX();
+ masm->Mov(addr,
+ i_isolate()->thread_local_top()->thread_in_wasm_flag_address_);
+ Register one = temps.AcquireX();
+ masm->Mov(one, 1);
+ masm->Str(one, MemOperand(addr));
+#else
+#error Unsupported platform
+#endif
}
void GenerateResetThreadInWasmFlagCode(MacroAssembler* masm) {
+#if V8_HOST_ARCH_X64
masm->Move(scratch,
i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
RelocInfo::NO_INFO);
masm->movl(MemOperand(scratch, 0), Immediate(0));
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(masm);
+ Register addr = temps.AcquireX();
+ masm->Mov(addr,
+ i_isolate()->thread_local_top()->thread_in_wasm_flag_address_);
+ masm->Str(xzr, MemOperand(addr));
+#else
+#error Unsupported platform
+#endif
}
bool GetThreadInWasmFlag() {
@@ -275,6 +328,7 @@ TEST_P(TrapHandlerTest, TestTrapHandlerRecovery) {
// wasm code (we fake the wasm code and the access violation).
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
@@ -283,6 +337,18 @@ TEST_P(TrapHandlerTest, TestTrapHandlerRecovery) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -300,6 +366,7 @@ TEST_P(TrapHandlerTest, TestReleaseHandlerData) {
// recover from the specific memory access violation anymore.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
@@ -308,6 +375,18 @@ TEST_P(TrapHandlerTest, TestReleaseHandlerData) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -332,12 +411,23 @@ TEST_P(TrapHandlerTest, TestNoThreadInWasmFlag) {
// get active.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
uint32_t crash_offset = __ pc_offset();
__ testl(MemOperand(scratch, 0), Immediate(1));
uint32_t recovery_offset = __ pc_offset();
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -355,6 +445,7 @@ TEST_P(TrapHandlerTest, TestCrashInWasmNoProtectedInstruction) {
// protected, then the trap handler does not handle it.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
uint32_t no_crash_offset = __ pc_offset();
@@ -364,6 +455,19 @@ TEST_P(TrapHandlerTest, TestCrashInWasmNoProtectedInstruction) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ uint32_t no_crash_offset = __ pc_offset();
+ __ Mov(scratch, crash_address_);
+ __ Ldr(scratch, MemOperand(scratch));
+ // Offset where the crash is not happening.
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -381,6 +485,7 @@ TEST_P(TrapHandlerTest, TestCrashInWasmWrongCrashType) {
// wasm trap handler does not handle it.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
GenerateSetThreadInWasmFlagCode(&masm);
__ xorq(scratch, scratch);
@@ -390,6 +495,17 @@ TEST_P(TrapHandlerTest, TestCrashInWasmWrongCrashType) {
uint32_t recovery_offset = __ pc_offset();
GenerateResetThreadInWasmFlagCode(&masm);
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ GenerateSetThreadInWasmFlagCode(&masm);
+ UseScratchRegisterScope temps(&masm);
+ uint32_t crash_offset = __ pc_offset();
+ __ Trap();
+ // Offset where the crash is not happening.
+ uint32_t recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -443,12 +559,23 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
// set.
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer_->CreateView());
+#if V8_HOST_ARCH_X64
__ Push(scratch);
__ Move(scratch, crash_address_, RelocInfo::NO_INFO);
uint32_t crash_offset = __ pc_offset();
__ testl(MemOperand(scratch, 0), Immediate(1));
uint32_t recovery_offset = __ pc_offset();
__ Pop(scratch);
+#elif V8_HOST_ARCH_ARM64
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, crash_address_);
+ uint32_t crash_offset = __ pc_offset();
+ __ Ldr(scratch, MemOperand(scratch));
+ uint32_t recovery_offset = __ pc_offset();
+#else
+#error Unsupported platform
+#endif
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
@@ -480,3 +607,5 @@ INSTANTIATE_TEST_SUITE_P(Traps, TrapHandlerTest,
} // namespace wasm
} // namespace internal
} // namespace v8
+
+#endif
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc
new file mode 100644
index 0000000000..fa559146e5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-bad-name-section.wasm.inc
@@ -0,0 +1,145 @@
+// This Wasm module has a name section which is invalid in that it
+// contains each sub-section twice.
+
+ 0x00, 0x61, 0x73, 0x6d, // wasm magic
+ 0x01, 0x00, 0x00, 0x00, // wasm version
+
+ // The only purpose of this table section is to trigger lazy decoding
+ // of the name section.
+ 0x04, // section kind: Table
+ 0x04, // section length 4
+ 0x01, 0x70, 0x00, // table count 1: funcref no maximum
+ 0x00, // initial size 0
+
+ 0x00, // section kind: Unknown
+ 0xb3, 0x01, // section length 179
+ 0x04, // section name length: 4
+ 0x6e, 0x61, 0x6d, 0x65, // section name: name
+
+ 0x01, // name type: function
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x02, // name type: local
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x03, // name type: label
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x04, // name type: type
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x05, // name type: table
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x06, // name type: memory
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x07, // name type: global
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x08, // name type: element segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x09, // name type: data segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x0a, // name type: field
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x0b, // name type: tag
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x01, // name type: function
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x02, // name type: local
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x03, // name type: label
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x04, // name type: type
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x05, // name type: table
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x06, // name type: memory
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x07, // name type: global
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x08, // name type: element segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x09, // name type: data segment
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
+
+ 0x0a, // name type: field
+ 0x0b, // payload length: 11
+ 0x02, // outer count 2
+ 0x00, 0x01, // outer index 0 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+ 0x01, 0x01, // outer index 1 inner count 1
+ 0x00, 0x01, 0x78, // inner index 0 name length 1 "x"
+
+ 0x0b, // name type: tag
+ 0x04, // payload length: 4
+ 0x01, // names count 1
+ 0x00, 0x01, 0x78, // index 0 name length 1 "x"
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc
new file mode 100644
index 0000000000..01fdedfd1b
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wasm.inc
@@ -0,0 +1,135 @@
+ 0x00, 0x61, 0x73, 0x6d, // wasm magic
+ 0x01, 0x00, 0x00, 0x00, // wasm version
+
+ 0x01, // section kind: Type
+ 0x3c, // section length 60
+ 0x0b, // types count 11
+ 0x50, 0x00, 0x5f, 0x00, // type #0 $type0 subtype, supertype count 0, kind: struct, field count 0
+ 0x5f, 0x01, 0x7f, 0x00, // type #1 $type1 kind: struct, field count 1: i32 immutable
+ 0x5f, 0x02, // type #2 $type2 kind: struct, field count 2
+ 0x7f, 0x01, // i32 mutable
+ 0x7e, 0x01, // i64 mutable
+ 0x5f, 0x02, // type #3 $type3 kind: struct, field count 2
+ 0x7a, 0x00, // i8 immutable
+ 0x79, 0x01, // i16 mutable
+ 0x5e, 0x7e, 0x00, // type #4 $type4 kind: array i64 immutable
+ 0x5e, 0x7e, 0x01, // type #5 $type5 kind: array i64 mutable
+ 0x5e, 0x7a, 0x00, // type #6 $type6 kind: array i8 immutable
+ 0x5f, 0x01, 0x6b, 0x00, 0x00, // type #7 $type7 kind: struct, field count 1: (ref $type0) immutable
+ 0x4f, // rec. group definition
+ 0x02, // recursive group size 2
+ 0x5f, 0x01, 0x6b, 0x09, 0x00, // type #8 $type8 kind: struct, field count 1: (ref $type9) immutable
+ 0x5f, 0x01, 0x6b, 0x08, 0x00, // type #9 $type9 kind: struct, field count 1: (ref $type8) immutable
+ 0x50, 0x01, 0x00, // type #10 $type10 subtype, supertype count 1: supertype 0
+ 0x5f, 0x01, 0x7f, 0x00, // kind: struct, field count 1: i32 immutable
+ 0x60, // type #11 $type11 kind: func
+ 0x02, // param count 2
+ 0x6b, 0x01, 0x6d, // (ref $type1) eqref
+ 0x00, // return count 0
+
+ 0x02, // section kind: Import
+ 0x30, // section length 48
+ 0x02, // imports count 2
+ // import #0
+ 0x03, // module name length: 3
+ 0x65, 0x6e, 0x76, // module name: env
+ 0x0f, // field name length: 15
+ 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64,
+ 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c,
+ // field name: imported_global
+ 0x03, 0x6b, 0x07, 0x00, // kind: global (ref $type7) immutable
+ // import #1
+ 0x03, // module name length: 3
+ 0x65, 0x6e, 0x76, // module name: env
+ 0x0e, // field name length: 14
+ 0x61, 0x6e, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f,
+ 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c,
+ // field name: another_global
+ 0x03, 0x6b, 0x08, 0x00, // kind: global (ref $type8) immutable
+
+ 0x03, // section kind: Function
+ 0x02, // section length 2
+ 0x01, 0x0b, // functions count 1: 0 $func0 (param (ref $type1) eqref)
+
+ 0x06, // section kind: Global
+ 0x0b, // section length 11
+ 0x02, // globals count 2
+ 0x6e, 0x00, // global #2: anyref immutable
+ 0xd0, 0x65, 0x0b, // ref.null none
+ 0x6d, 0x01, // global #3: eqref mutable
+ 0xd0, 0x65, 0x0b, // ref.null none
+
+ 0x0a, // section kind: Code
+ 0x99, 0x01, // section length 153
+ 0x01, // functions count 1
+ // function #0 $func0
+ 0x96, 0x01, // body size 150
+ 0x00, // 0 entries in locals list
+ 0xfb, 0x08, 0x01, // struct.new_default $type1
+ 0xfb, 0x03, 0x01, 0x00, // struct.get $type1 $field0
+ 0x1a, // drop
+ 0xfb, 0x08, 0x02, // struct.new_default $type2
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x06, 0x02, 0x00, // struct.set $type2 $field0
+ 0xfb, 0x08, 0x03, // struct.new_default $type3
+ 0xfb, 0x04, 0x03, 0x00, // struct.get_s $type3 $field0
+ 0x1a, // drop
+ 0xfb, 0x08, 0x03, // struct.new_default $type3
+ 0xfb, 0x05, 0x03, 0x01, // struct.get_u $type3 $field1
+ 0x1a, // drop
+ 0xfb, 0x1a, 0x04, 0x00, // array.new_fixed $type4 0
+ 0x1a, // drop
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x04, // array.new_default $type4
+ 0xfb, 0x19, // array.len
+ 0x1a, // drop
+ 0x42, 0x00, // i64.const 0
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1b, 0x04, // array.new $type4
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x13, 0x04, // array.get $type4
+ 0x1a, // drop
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x05, // array.new_default $type5
+ 0x41, 0x00, // i32.const 0
+ 0x42, 0x00, // i64.const 0
+ 0xfb, 0x16, 0x05, // array.set $type5
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x06, // array.new_default $type6
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x14, 0x06, // array.get_s $type6
+ 0x1a, // drop
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x1c, 0x06, // array.new_default $type6
+ 0x41, 0x00, // i32.const 0
+ 0xfb, 0x15, 0x06, // array.get_u $type6
+ 0x1a, // drop
+ 0x20, 0x01, // local.get $var1
+ 0x20, 0x01, // local.get $var1
+ 0xd5, // ref.eq
+ 0x1a, // drop
+ 0x20, 0x01, // local.get $var1
+ 0xfb, 0x44, 0x00, // ref.test $type0
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x45, 0x00, // ref.cast $type0
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x48, 0x00, // ref.test null $type0
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x49, 0x00, // ref.cast null $type0
+ 0x1a, // drop
+ 0x02, 0x6b, 0x01, // block (result (ref $type1)) $label0
+ 0x20, 0x00, // local.get $var0
+ 0xd6, 0x00, // br_on_non_null $label0
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x46, 0x00, 0x01, // br_on_cast $label0 $type1
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0xfb, 0x47, 0x00, 0x01, // br_on_cast_fail $label0 $type1
+ 0x1a, // drop
+ 0x20, 0x00, // local.get $var0
+ 0x0b, // end $label0
+ 0x1a, // drop
+ 0x0b, // end
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc
new file mode 100644
index 0000000000..1189fffa14
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-gc.wat.inc
@@ -0,0 +1,107 @@
+;; expected = R"---(;; This is a polyglot C++/WAT file.
+;; Comment lines are ignored and not expected in the disassembler output.
+(module
+ ;; Structs.
+ (type $type0 (struct))
+ (type $type1 (struct (field $field0 i32)))
+ (type $type2 (struct (field $field0 (mut i32)) (field $field1 (mut i64))))
+ (type $type3 (struct (field $field0 i8) (field $field1 (mut i16))))
+ ;; Arrays.
+ (type $type4 (array (field i64)))
+ (type $type5 (array (field (mut i64))))
+ (type $type6 (array (field i8)))
+ ;; References to other types, mutual recursion.
+ (type $type7 (struct (field $field0 (ref $type0))))
+ ;; TODO: rec-groups are supported in the binary format, but they are not
+ ;; printed yet. Once that is implemented in the disassembler, uncomment:
+ ;; (rec
+ (type $type8 (struct (field $field0 (ref $type9))))
+ (type $type9 (struct (field $field0 (ref $type8))))
+ ;; )
+ ;; Subtyping constraints.
+ ;; TODO: Change to `sub` keyword, once that is standardized.
+ (type $type10 (struct_subtype (field $field0 i32) $type0))
+ ;; Globals using reference types.
+ (global $env.imported_global (;0;) (import "env" "imported_global") (ref $type7))
+ (global $env.another_global (;1;) (import "env" "another_global") (ref $type8))
+ (global $global2 anyref (ref.null none))
+ (global $global3 (mut eqref) (ref.null none))
+ ;; Function with GC instructions and taking GC types as parameters.
+ (func $func0 (param $var0 (ref $type1)) (param $var1 eqref)
+ ;; Structs.
+ struct.new_default $type1
+ struct.get $type1 $field0
+ drop
+ struct.new_default $type2
+ i32.const 0
+ struct.set $type2 $field0
+ struct.new_default $type3
+ struct.get_s $type3 $field0
+ drop
+ struct.new_default $type3
+ struct.get_u $type3 $field1
+ drop
+ ;; Arrays.
+ array.new_fixed $type4 0
+ drop
+ i32.const 0
+ array.new_default $type4
+ array.len
+ drop
+ i64.const 0
+ i32.const 0
+ array.new $type4
+ i32.const 0
+ array.get $type4
+ drop
+ i32.const 0
+ array.new_default $type5
+ i32.const 0
+ i64.const 0
+ array.set $type5
+ i32.const 0
+ array.new_default $type6
+ i32.const 0
+ array.get_s $type6
+ drop
+ i32.const 0
+ array.new_default $type6
+ i32.const 0
+ array.get_u $type6
+ drop
+ ;; References.
+ local.get $var1
+ local.get $var1
+ ref.eq
+ drop
+ local.get $var1
+ ref.test $type0
+ drop
+ local.get $var0
+ ref.cast $type0
+ drop
+ local.get $var0
+ ref.test null $type0
+ drop
+ local.get $var0
+ ref.cast null $type0
+ drop
+ ;; Branches.
+ block $label0 (result (ref $type1))
+ local.get $var0
+ br_on_non_null $label0
+ local.get $var0
+ br_on_cast $label0 $type1
+ drop
+ local.get $var0
+ br_on_cast_fail $label0 $type1
+ drop
+ ;; TODO: Once `br_on_cast null` is implemented, uncomment:
+ ;; local.get $var0
+ ;; br_on_cast $label0 null $type1
+ local.get $var0
+ end $label0
+ drop
+ )
+)
+;;)---";
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc
index 30328bab67..2a438a9ab4 100644
--- a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-mvp.wat.inc
@@ -484,6 +484,5 @@
)
;; Data and element sections.
(data (global.get $env.imported_global) "foo\0a\00")
- ;; TODO(dlehmann): Wasm extensions, name and extended name section.
)
;;)---";
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc
new file mode 100644
index 0000000000..41a43397fe
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wasm.inc
@@ -0,0 +1,31 @@
+0x00, 0x61, 0x73, 0x6d, // wasm magic
+0x01, 0x00, 0x00, 0x00, // wasm version
+
+0x01, // section kind: Type
+0x04, // section length 4
+0x01, 0x60, // types count 1: kind: func
+0x00, // param count 0
+0x00, // return count 0
+
+0x03, // section kind: Function
+0x02, // section length 2
+0x01, 0x00, // functions count 1: 0 $doubleEnd
+
+0x07, // section kind: Export
+0x0d, // section length 13
+0x01, // exports count 1: export # 0
+0x09, // field name length: 9
+0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x45, 0x6e,
+0x64, // field name: doubleEnd
+0x00, 0x00, // kind: function index: 0
+
+0x0a, // section kind: Code
+0x07, // section length 7
+0x01, // functions count 1
+ // function #0 $doubleEnd
+0x05, // body size 5
+0x00, // 0 entries in locals list
+0x01, // nop
+0x0b, // end
+0x0b, // end
+0x0b, // end
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc
new file mode 100644
index 0000000000..593ed16e2d
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest-too-many-ends.wat.inc
@@ -0,0 +1,9 @@
+;; expected = R"---(;; This is a polyglot C++/WAT file.
+(module
+ (func $doubleEnd (;0;) (export "doubleEnd")
+ nop
+ )
+ ;; Unexpected end byte
+ ;; Unexpected end byte
+)
+;;)---";
diff --git a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc
index 68d505db0c..39b14b46ce 100644
--- a/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-disassembler-unittest.cc
@@ -24,8 +24,7 @@ void CheckDisassemblerOutput(base::Vector<const byte> module_bytes,
std::string expected_output) {
AccountingAllocator allocator;
- ModuleResult module_result = DecodeWasmModuleForDisassembler(
- module_bytes.begin(), module_bytes.end(), &allocator);
+ ModuleResult module_result = DecodeWasmModuleForDisassembler(module_bytes);
DCHECK(module_result.ok());
WasmModule* module = module_result.value().get();
@@ -44,10 +43,11 @@ void CheckDisassemblerOutput(base::Vector<const byte> module_bytes,
// Remove comment lines from expected output since they cannot be recovered
// by a disassembler.
// They were also used as part of the C++/WAT polyglot trick described below.
- expected_output =
- std::regex_replace(expected_output, std::regex(" *;;[^\\n]*\\n?"), "");
+ std::regex comment_regex(" *;;[^\\n]*\\n?");
+ expected_output = std::regex_replace(expected_output, comment_regex, "");
+ std::string output_str = std::regex_replace(output.str(), comment_regex, "");
- EXPECT_EQ(output.str(), expected_output);
+ EXPECT_EQ(expected_output, output_str);
}
TEST_F(WasmDisassemblerTest, Mvp) {
@@ -90,6 +90,17 @@ TEST_F(WasmDisassemblerTest, Names) {
CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
}
+TEST_F(WasmDisassemblerTest, InvalidNameSection) {
+ constexpr byte module_bytes[] = {
+#include "wasm-disassembler-unittest-bad-name-section.wasm.inc"
+ };
+ std::string expected(
+ "(module\n"
+ " (table $x (;0;) 0 funcref)\n"
+ ")\n");
+ CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
+}
+
TEST_F(WasmDisassemblerTest, Simd) {
constexpr byte module_bytes[] = {
#include "wasm-disassembler-unittest-simd.wasm.inc"
@@ -99,6 +110,34 @@ TEST_F(WasmDisassemblerTest, Simd) {
CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
}
+TEST_F(WasmDisassemblerTest, Gc) {
+ // Since WABT's `wat2wasm` didn't support some GC features yet, I used
+ // Binaryen's `wasm-as --enable-gc --hybrid` here to produce the binary.
+ constexpr byte module_bytes[] = {
+#include "wasm-disassembler-unittest-gc.wasm.inc"
+ };
+ std::string expected;
+#include "wasm-disassembler-unittest-gc.wat.inc"
+ CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
+}
+
+TEST_F(WasmDisassemblerTest, TooManyends) {
+ constexpr byte module_bytes[] = {
+#include "wasm-disassembler-unittest-too-many-ends.wasm.inc"
+ };
+ std::string expected;
+#include "wasm-disassembler-unittest-too-many-ends.wat.inc"
+ CheckDisassemblerOutput(base::ArrayVector(module_bytes), expected);
+}
+
+// TODO(dlehmann): Add tests for the following Wasm features and extensions:
+// - custom name section for Wasm GC constructs (struct and array type names,
+// struct fields).
+// - exception-related instructions (try, catch, catch_all, delegate) and named
+// exception tags.
+// - atomic instructions (threads proposal, 0xfe prefix).
+// - some "numeric" instructions (0xfc prefix).
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc b/deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc
deleted file mode 100644
index 1f1081805c..0000000000
--- a/deps/v8/test/unittests/web-snapshot/web-snapshot-unittest.cc
+++ /dev/null
@@ -1,1135 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/web-snapshot/web-snapshot.h"
-
-#include "include/v8-function.h"
-#include "src/api/api-inl.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-class WebSnapshotTest : public TestWithContext {
- protected:
- void TestWebSnapshotExtensive(
- const char* snapshot_source, const char* test_source,
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester,
- uint32_t string_count, uint32_t symbol_count,
- uint32_t builtin_object_count, uint32_t map_count, uint32_t context_count,
- uint32_t function_count, uint32_t object_count, uint32_t array_count) {
- v8::Isolate* isolate = v8_isolate();
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports =
- v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- CHECK_EQ(string_count, serializer.string_count());
- CHECK_EQ(symbol_count, serializer.symbol_count());
- CHECK_EQ(map_count, serializer.map_count());
- CHECK_EQ(builtin_object_count, serializer.builtin_object_count());
- CHECK_EQ(context_count, serializer.context_count());
- CHECK_EQ(function_count, serializer.function_count());
- CHECK_EQ(object_count, serializer.object_count());
- CHECK_EQ(array_count, serializer.array_count());
- }
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
- tester(isolate, new_context);
- CHECK_EQ(string_count, deserializer.string_count());
- CHECK_EQ(symbol_count, deserializer.symbol_count());
- CHECK_EQ(map_count, deserializer.map_count());
- CHECK_EQ(builtin_object_count, deserializer.builtin_object_count());
- CHECK_EQ(context_count, deserializer.context_count());
- CHECK_EQ(function_count, deserializer.function_count());
- CHECK_EQ(object_count, deserializer.object_count());
- CHECK_EQ(array_count, deserializer.array_count());
- }
- }
-
- void TestWebSnapshot(const char* snapshot_source, const char* test_source,
- const char* expected_result, uint32_t string_count,
- uint32_t symbol_count, uint32_t map_count,
- uint32_t builtin_object_count, uint32_t context_count,
- uint32_t function_count, uint32_t object_count,
- uint32_t array_count) {
- TestWebSnapshotExtensive(
- snapshot_source, test_source,
- [this, test_source, expected_result](
- v8::Isolate* isolate, v8::Local<v8::Context> new_context) {
- v8::Local<v8::String> result = RunJS(test_source).As<v8::String>();
- CHECK(result->Equals(new_context, NewString(expected_result))
- .FromJust());
- },
- string_count, symbol_count, map_count, builtin_object_count,
- context_count, function_count, object_count, array_count);
- }
-
- void VerifyFunctionKind(const v8::Local<v8::Object>& result,
- const v8::Local<v8::Context>& context,
- const char* property_name,
- FunctionKind expected_kind) {
- v8::Local<v8::Function> v8_function =
- result->Get(context, NewString(property_name))
- .ToLocalChecked()
- .As<v8::Function>();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_function));
- CHECK_EQ(function->shared().kind(), expected_kind);
- }
-};
-
-} // namespace
-
-TEST_F(WebSnapshotTest, Minimal) {
- const char* snapshot_source = "var foo = {'key': 'lol'};";
- const char* test_source = "foo.key";
- const char* expected_result = "lol";
- uint32_t kStringCount = 2; // 'foo', 'Object.prototype'; 'key' is in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, EmptyObject) {
- const char* snapshot_source = "var foo = {}";
- const char* test_source = "foo";
- uint32_t kStringCount = 2; // 'foo', 'Object.prototype'
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Handle<JSReceiver> foo(v8::Utils::OpenHandle(*result));
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- CHECK_EQ(foo->map(),
- i_isolate->native_context()->object_function().initial_map());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, Numbers) {
- const char* snapshot_source =
- "var foo = {'a': 6,\n"
- " 'b': -11,\n"
- " 'c': 11.6,\n"
- " 'd': NaN,\n"
- " 'e': Number.POSITIVE_INFINITY,\n"
- " 'f': Number.NEGATIVE_INFINITY,\n"
- "}";
- const char* test_source = "foo";
- uint32_t kStringCount =
- 2; // 'foo', 'Object.prototype'; 'a'...'f' are in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
-
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- int32_t a = result->Get(new_context, NewString("a"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(a, 6);
- int32_t b = result->Get(new_context, NewString("b"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(b, -11);
- double c = result->Get(new_context, NewString("c"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(c, 11.6);
- double d = result->Get(new_context, NewString("d"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK(std::isnan(d));
- double e = result->Get(new_context, NewString("e"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(e, std::numeric_limits<double>::infinity());
- double f = result->Get(new_context, NewString("f"))
- .ToLocalChecked()
- .As<v8::Number>()
- ->Value();
- CHECK_EQ(f, -std::numeric_limits<double>::infinity());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, Oddballs) {
- const char* snapshot_source =
- "var foo = {'a': false,\n"
- " 'b': true,\n"
- " 'c': null,\n"
- " 'd': undefined,\n"
- "}";
- const char* test_source = "foo";
- // 'foo', 'Object.prototype'; 'a'...'d' are in-place.
- uint32_t kStringCount = 2;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Local<Value> a =
- result->Get(new_context, NewString("a")).ToLocalChecked();
- CHECK(a->IsFalse());
- Local<Value> b =
- result->Get(new_context, NewString("b")).ToLocalChecked();
- CHECK(b->IsTrue());
- Local<Value> c =
- result->Get(new_context, NewString("c")).ToLocalChecked();
- CHECK(c->IsNull());
- Local<Value> d =
- result->Get(new_context, NewString("d")).ToLocalChecked();
- CHECK(d->IsUndefined());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, Function) {
- const char* snapshot_source =
- "var foo = {'key': function() { return '11525'; }};";
- const char* test_source = "foo.key()";
- const char* expected_result = "11525";
- // 'foo', 'Object.prototype', 'Function.prototype', function source code.
- // 'key' is in-place.
- uint32_t kStringCount = 4;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 1;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, InnerFunctionWithContext) {
- const char* snapshot_source =
- "var foo = {'key': (function() {\n"
- " let result = '11525';\n"
- " function inner() { return result; }\n"
- " return inner;\n"
- " })()};";
- const char* test_source = "foo.key()";
- const char* expected_result = "11525";
- // Strings: 'foo', 'result', 'Object.prototype', 'Function.prototype'.
- // function source code (inner). 'key' is in-place.
- uint32_t kStringCount = 5;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 1;
- uint32_t kFunctionCount = 1;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, InnerFunctionWithContextAndParentContext) {
- const char* snapshot_source =
- "var foo = {'key': (function() {\n"
- " let part1 = '11';\n"
- " function inner() {\n"
- " let part2 = '525';\n"
- " function innerinner() {\n"
- " return part1 + part2;\n"
- " }\n"
- " return innerinner;\n"
- " }\n"
- " return inner();\n"
- " })()};";
- const char* test_source = "foo.key()";
- const char* expected_result = "11525";
- // Strings: 'foo', 'Object.prototype', 'Function.prototype', function source
- // code (innerinner), 'part1', 'part2'.
- uint32_t kStringCount = 6;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 2;
- uint32_t kFunctionCount = 1;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RegExp) {
- const char* snapshot_source = "var foo = {'re': /ab+c/gi}";
- const char* test_source = "foo";
- // 'foo', 'Object.prototype', RegExp pattern, RegExp flags
- uint32_t kStringCount = 4;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Local<v8::RegExp> re = result->Get(new_context, NewString("re"))
- .ToLocalChecked()
- .As<v8::RegExp>();
- CHECK(re->IsRegExp());
- CHECK(
- re->GetSource()->Equals(new_context, NewString("ab+c")).FromJust());
- CHECK_EQ(v8::RegExp::kGlobal | v8::RegExp::kIgnoreCase, re->GetFlags());
- v8::Local<v8::Object> match =
- re->Exec(new_context, NewString("aBc")).ToLocalChecked();
- CHECK(match->IsArray());
- v8::Local<v8::Object> no_match =
- re->Exec(new_context, NewString("ac")).ToLocalChecked();
- CHECK(no_match->IsNull());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RegExpNoFlags) {
- const char* snapshot_source = "var foo = {'re': /ab+c/}";
- const char* test_source = "foo";
- // 'foo', , 'Object.prototype RegExp pattern, RegExp flags
- uint32_t kStringCount = 4;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- Local<v8::RegExp> re = result->Get(new_context, NewString("re"))
- .ToLocalChecked()
- .As<v8::RegExp>();
- CHECK(re->IsRegExp());
- CHECK(
- re->GetSource()->Equals(new_context, NewString("ab+c")).FromJust());
- CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
- v8::Local<v8::Object> match =
- re->Exec(new_context, NewString("abc")).ToLocalChecked();
- CHECK(match->IsArray());
- v8::Local<v8::Object> no_match =
- re->Exec(new_context, NewString("ac")).ToLocalChecked();
- CHECK(no_match->IsNull());
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplication) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.outer = function(a) {\n"
- " return function() {\n"
- " return a;\n"
- " }\n"
- "}\n"
- "foo.inner = foo.outer('hi');";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_inner = "foo.inner";
- const char* create_new_inner = "foo.outer()";
-
- // Verify that foo.inner and the JSFunction which is the result of calling
- // foo.outer() after deserialization share the SFI.
- v8::Local<v8::Function> v8_inner1 = RunJS(get_inner).As<v8::Function>();
- v8::Local<v8::Function> v8_inner2 =
- RunJS(create_new_inner).As<v8::Function>();
-
- Handle<JSFunction> inner1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner1));
- Handle<JSFunction> inner2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner2));
-
- CHECK_EQ(inner1->shared(), inner2->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationClasses) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.create = function(a) {\n"
- " return class {\n"
- " constructor(x) {this.x = x;};\n"
- " }\n"
- "}\n"
- "foo.class = foo.create('hi');";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_class = "foo.class";
- const char* create_new_class = "foo.create()";
-
- // Verify that foo.inner and the JSFunction which is the result of calling
- // foo.outer() after deserialization share the SFI.
- v8::Local<v8::Function> v8_class1 = RunJS(get_class).As<v8::Function>();
- v8::Local<v8::Function> v8_class2 =
- RunJS(create_new_class).As<v8::Function>();
-
- Handle<JSFunction> class1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class1));
- Handle<JSFunction> class2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class2));
-
- CHECK_EQ(class1->shared(), class2->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationAfterBytecodeFlushing) {
- v8_flags.stress_flush_code = true;
- v8_flags.flush_bytecode = true;
- v8::Isolate* isolate = v8_isolate();
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.outer = function() {\n"
- " let a = 'hello';\n"
- " return function() {\n"
- " return a;\n"
- " }\n"
- "}\n"
- "foo.inner = foo.outer();";
-
- TryRunJS(snapshot_source);
-
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- CollectAllGarbage();
- CollectAllGarbage();
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_outer = "foo.outer";
- const char* get_inner = "foo.inner";
- const char* create_new_inner = "foo.outer()";
-
- v8::Local<v8::Function> v8_outer = RunJS(get_outer).As<v8::Function>();
- Handle<JSFunction> outer =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_outer));
- CHECK(!outer->shared().is_compiled());
-
- v8::Local<v8::Function> v8_inner1 = RunJS(get_inner).As<v8::Function>();
- v8::Local<v8::Function> v8_inner2 =
- RunJS(create_new_inner).As<v8::Function>();
-
- Handle<JSFunction> inner1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner1));
- Handle<JSFunction> inner2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner2));
-
- CHECK(outer->shared().is_compiled());
- CHECK_EQ(inner1->shared(), inner2->shared());
-
- // Force bytecode flushing of "foo.outer".
- CollectAllGarbage();
- CollectAllGarbage();
-
- CHECK(!outer->shared().is_compiled());
-
- // Create another inner function.
- v8::Local<v8::Function> v8_inner3 =
- RunJS(create_new_inner).As<v8::Function>();
- Handle<JSFunction> inner3 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner3));
-
- // Check that it shares the SFI with the original inner function which is in
- // the snapshot.
- CHECK_EQ(inner1->shared(), inner3->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationAfterBytecodeFlushingClasses) {
- v8_flags.stress_flush_code = true;
- v8_flags.flush_bytecode = true;
- v8::Isolate* isolate = v8_isolate();
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.create = function(a) {\n"
- " return class {\n"
- " constructor(x) {this.x = x;};\n"
- " }\n"
- "}\n"
- "foo.class = foo.create('hi');";
-
- TryRunJS(snapshot_source);
-
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- CollectAllGarbage();
- CollectAllGarbage();
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_create = "foo.create";
- const char* get_class = "foo.class";
- const char* create_new_class = "foo.create()";
-
- v8::Local<v8::Function> v8_create = RunJS(get_create).As<v8::Function>();
- Handle<JSFunction> create =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_create));
- CHECK(!create->shared().is_compiled());
-
- v8::Local<v8::Function> v8_class1 = RunJS(get_class).As<v8::Function>();
- v8::Local<v8::Function> v8_class2 =
- RunJS(create_new_class).As<v8::Function>();
-
- Handle<JSFunction> class1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class1));
- Handle<JSFunction> class2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class2));
-
- CHECK(create->shared().is_compiled());
- CHECK_EQ(class1->shared(), class2->shared());
-
- // Force bytecode flushing of "foo.outer".
- CollectAllGarbage();
- CollectAllGarbage();
-
- CHECK(!create->shared().is_compiled());
-
- // Create another inner function.
- v8::Local<v8::Function> v8_class3 =
- RunJS(create_new_class).As<v8::Function>();
- Handle<JSFunction> class3 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_class3));
-
- // Check that it shares the SFI with the original inner function which is in
- // the snapshot.
- CHECK_EQ(class1->shared(), class3->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, SFIDeduplicationOfFunctionsNotInSnapshot) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "let foo = {};\n"
- "foo.outer = function(a) {\n"
- " return function() {\n"
- " return a;\n"
- " }\n"
- "}\n";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* create_new_inner = "foo.outer()";
-
- // Verify that repeated invocations of foo.outer() return functions which
- // share the SFI.
- v8::Local<v8::Function> v8_inner1 =
- RunJS(create_new_inner).As<v8::Function>();
- v8::Local<v8::Function> v8_inner2 =
- RunJS(create_new_inner).As<v8::Function>();
-
- Handle<JSFunction> inner1 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner1));
- Handle<JSFunction> inner2 =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_inner2));
-
- CHECK_EQ(inner1->shared(), inner2->shared());
- }
-}
-
-TEST_F(WebSnapshotTest, FunctionKinds) {
- const char* snapshot_source =
- "var foo = {a: function() {},\n"
- " b: () => {},\n"
- " c: async function() {},\n"
- " d: async () => {},\n"
- " e: function*() {},\n"
- " f: async function*() {}\n"
- "}";
- const char* test_source = "foo";
- // 'foo', 'Object.prototype', 'Function.prototype', 'AsyncFunction.prototype',
- // 'AsyncGeneratorFunction.prototype", "GeneratorFunction.prototype", source
- // code. 'a'...'f' in-place.
- uint32_t kStringCount = 7;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 5;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 6;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- std::function<void(v8::Isolate*, v8::Local<v8::Context>)> tester =
- [this, test_source](v8::Isolate* isolate,
- v8::Local<v8::Context> new_context) {
- v8::Local<v8::Object> result = RunJS(test_source).As<v8::Object>();
- // Verify all FunctionKinds.
- VerifyFunctionKind(result, new_context, "a",
- FunctionKind::kNormalFunction);
- VerifyFunctionKind(result, new_context, "b",
- FunctionKind::kArrowFunction);
- VerifyFunctionKind(result, new_context, "c",
- FunctionKind::kAsyncFunction);
- VerifyFunctionKind(result, new_context, "d",
- FunctionKind::kAsyncArrowFunction);
- VerifyFunctionKind(result, new_context, "e",
- FunctionKind::kGeneratorFunction);
- VerifyFunctionKind(result, new_context, "f",
- FunctionKind::kAsyncGeneratorFunction);
- };
- TestWebSnapshotExtensive(snapshot_source, test_source, tester, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount,
- kContextCount, kFunctionCount, kObjectCount,
- kArrayCount);
-}
-
-// Test that concatenating JS code to the snapshot works.
-TEST_F(WebSnapshotTest, Concatenation) {
- v8::Isolate* isolate = v8_isolate();
-
- const char* snapshot_source = "var foo = {a: 1};\n";
- const char* source_to_append = "var bar = {a: 10};";
- const char* test_source = "foo.a + bar.a";
- uint32_t kObjectCount = 1;
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- CHECK_EQ(kObjectCount, serializer.object_count());
- }
-
- auto buffer_size = snapshot_data.buffer_size + strlen(source_to_append);
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- memcpy(buffer.get(), snapshot_data.buffer, snapshot_data.buffer_size);
- memcpy(buffer.get() + snapshot_data.buffer_size, source_to_append,
- strlen(source_to_append));
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, buffer.get(), buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
- CHECK_EQ(kObjectCount, deserializer.object_count());
-
- v8::Local<v8::Number> result = RunJS(test_source).As<v8::Number>();
- CHECK_EQ(11, result->Value());
- }
-}
-
-// Test that errors from invalid concatenated code are handled correctly.
-TEST_F(WebSnapshotTest, ConcatenationErrors) {
- v8::Isolate* isolate = v8_isolate();
-
- const char* snapshot_source = "var foo = {a: 1};\n";
- const char* source_to_append = "wontparse+[)";
- uint32_t kObjectCount = 1;
-
- WebSnapshotData snapshot_data;
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- CHECK_EQ(kObjectCount, serializer.object_count());
- }
-
- auto buffer_size = snapshot_data.buffer_size + strlen(source_to_append);
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- memcpy(buffer.get(), snapshot_data.buffer, snapshot_data.buffer_size);
- memcpy(buffer.get() + snapshot_data.buffer_size, source_to_append,
- strlen(source_to_append));
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, buffer.get(), buffer_size);
- CHECK(!deserializer.Deserialize());
- }
-}
-
-TEST_F(WebSnapshotTest, CompactedSourceCode) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "function foo() { 'foo' }\n"
- "function bar() { 'bar' }\n"
- "function baz() { 'baz' }\n"
- "let e = [foo, bar, baz]";
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 1);
- v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, "e").ToLocalChecked();
- exports->Set(isolate, 0, str);
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- const char* get_function = "e[0]";
-
- // Verify that the source code got compacted.
- v8::Local<v8::Function> v8_function =
- RunJS(get_function).As<v8::Function>();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_function));
- Handle<String> function_script_source =
- handle(String::cast(Script::cast(function->shared().script()).source()),
- i_isolate());
- const char* raw_expected_source = "() { 'foo' }() { 'bar' }() { 'baz' }";
-
- Handle<String> expected_source = Utils::OpenHandle(
- *v8::String::NewFromUtf8(isolate, raw_expected_source).ToLocalChecked(),
- i_isolate());
- CHECK(function_script_source->Equals(*expected_source));
- }
-}
-
-TEST_F(WebSnapshotTest, InPlaceStringsInArrays) {
- const char* snapshot_source = "var foo = ['one', 'two', 'three'];";
- const char* test_source = "foo.join('');";
- const char* expected_result = "onetwothree";
- uint32_t kStringCount = 1; // 'foo'; Other strings are in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 0;
- uint32_t kMapCount = 0;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 0;
- uint32_t kArrayCount = 1;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RepeatedInPlaceStringsInArrays) {
- const char* snapshot_source = "var foo = ['one', 'two', 'one'];";
- const char* test_source = "foo.join('');";
- const char* expected_result = "onetwoone";
- uint32_t kStringCount = 2; // 'foo', 'one'; Other strings are in-place.
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 0;
- uint32_t kMapCount = 0;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 0;
- uint32_t kArrayCount = 1;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, InPlaceStringsInObjects) {
- const char* snapshot_source = "var foo = {a: 'one', b: 'two', c: 'three'};";
- const char* test_source = "foo.a + foo.b + foo.c;";
- const char* expected_result = "onetwothree";
- // 'foo', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 2;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, RepeatedInPlaceStringsInObjects) {
- const char* snapshot_source = "var foo = {a: 'one', b: 'two', c: 'one'};";
- const char* test_source = "foo.a + foo.b + foo.c;";
- const char* expected_result = "onetwoone";
- // 'foo', 'one', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 3;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 1;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, BuiltinObjects) {
- const char* snapshot_source = "var foo = {a: Error.prototype};";
- const char* test_source = "foo.a == Error.prototype ? \"pass\" : \"fail\"";
- const char* expected_result = "pass";
- // 'foo', 'Error.prototype', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 3;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, BuiltinObjectsDeduplicated) {
- const char* snapshot_source =
- "var foo = {a: Error.prototype, b: Error.prototype}";
- const char* test_source = "foo.a === Error.prototype ? \"pass\" : \"fail\"";
- const char* expected_result = "pass";
- // 'foo', 'Error.prototype', 'Object.prototype'. Other strings are in-place.
- uint32_t kStringCount = 3;
- uint32_t kSymbolCount = 0;
- uint32_t kBuiltinObjectCount = 2;
- uint32_t kMapCount = 1;
- uint32_t kContextCount = 0;
- uint32_t kFunctionCount = 0;
- uint32_t kObjectCount = 1;
- uint32_t kArrayCount = 0;
- TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
- kSymbolCount, kBuiltinObjectCount, kMapCount, kContextCount,
- kFunctionCount, kObjectCount, kArrayCount);
-}
-
-TEST_F(WebSnapshotTest, ConstructorFunctionKinds) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "class Base { constructor() {} };\n"
- "class Derived extends Base { constructor() {} };\n"
- "class BaseDefault {};\n"
- "class DerivedDefault extends BaseDefault {};\n";
-
- TryRunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 4);
- exports->Set(isolate, 0,
- v8::String::NewFromUtf8(isolate, "Base").ToLocalChecked());
- exports->Set(isolate, 1,
- v8::String::NewFromUtf8(isolate, "Derived").ToLocalChecked());
- exports->Set(
- isolate, 2,
- v8::String::NewFromUtf8(isolate, "BaseDefault").ToLocalChecked());
- exports->Set(
- isolate, 3,
- v8::String::NewFromUtf8(isolate, "DerivedDefault").ToLocalChecked());
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- v8::Local<v8::Function> v8_base = RunJS("Base").As<v8::Function>();
- Handle<JSFunction> base =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_base));
- CHECK_EQ(FunctionKind::kBaseConstructor, base->shared().kind());
-
- v8::Local<v8::Function> v8_derived = RunJS("Derived").As<v8::Function>();
- Handle<JSFunction> derived =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_derived));
- CHECK_EQ(FunctionKind::kDerivedConstructor, derived->shared().kind());
-
- v8::Local<v8::Function> v8_base_default =
- RunJS("BaseDefault").As<v8::Function>();
- Handle<JSFunction> base_default =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_base_default));
- CHECK_EQ(FunctionKind::kDefaultBaseConstructor,
- base_default->shared().kind());
-
- v8::Local<v8::Function> v8_derived_default =
- RunJS("DerivedDefault").As<v8::Function>();
- Handle<JSFunction> derived_default =
- Handle<JSFunction>::cast(Utils::OpenHandle(*v8_derived_default));
- CHECK_EQ(FunctionKind::kDefaultDerivedConstructor,
- derived_default->shared().kind());
- }
-}
-
-TEST_F(WebSnapshotTest, SlackElementsInObjects) {
- v8::Isolate* isolate = v8_isolate();
- v8::HandleScope scope(isolate);
-
- WebSnapshotData snapshot_data;
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- const char* snapshot_source =
- "var foo = {};"
- "for (let i = 0; i < 100; ++i) {"
- " foo[i] = i;"
- "}"
- "var bar = {};"
- "for (let i = 0; i < 100; ++i) {"
- " bar[i] = {};"
- "}";
-
- RunJS(snapshot_source);
- v8::Local<v8::PrimitiveArray> exports = v8::PrimitiveArray::New(isolate, 2);
- exports->Set(isolate, 0,
- v8::String::NewFromUtf8(isolate, "foo").ToLocalChecked());
- exports->Set(isolate, 1,
- v8::String::NewFromUtf8(isolate, "bar").ToLocalChecked());
- WebSnapshotSerializer serializer(isolate);
- CHECK(serializer.TakeSnapshot(new_context, exports, snapshot_data));
- CHECK(!serializer.has_error());
- CHECK_NOT_NULL(snapshot_data.buffer);
- }
-
- {
- v8::Local<v8::Context> new_context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(new_context);
- WebSnapshotDeserializer deserializer(isolate, snapshot_data.buffer,
- snapshot_data.buffer_size);
- CHECK(deserializer.Deserialize());
- CHECK(!deserializer.has_error());
-
- Handle<JSObject> foo =
- Handle<JSObject>::cast(Utils::OpenHandle<v8::Object, JSReceiver>(
- RunJS("foo").As<v8::Object>()));
- CHECK_EQ(100, foo->elements().length());
- CHECK_EQ(HOLEY_ELEMENTS, foo->GetElementsKind());
-
- Handle<JSObject> bar =
- Handle<JSObject>::cast(Utils::OpenHandle<v8::Object, JSReceiver>(
- RunJS("bar").As<v8::Object>()));
- CHECK_EQ(100, bar->elements().length());
- CHECK_EQ(HOLEY_ELEMENTS, bar->GetElementsKind());
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/zone/zone-vector-unittest.cc b/deps/v8/test/unittests/zone/zone-vector-unittest.cc
new file mode 100644
index 0000000000..d2406f8b45
--- /dev/null
+++ b/deps/v8/test/unittests/zone/zone-vector-unittest.cc
@@ -0,0 +1,373 @@
+// Copyright 2023 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <optional>
+
+#include "src/zone/zone-containers.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8::internal {
+
+template <class T>
+class LiveSet {
+ public:
+ void Add(const T* new_entry) {
+ CHECK(!Contains(new_entry));
+ set_.insert(new_entry);
+ }
+
+ void Remove(const T* old_entry) {
+ CHECK(Contains(old_entry));
+ set_.erase(old_entry);
+ }
+
+ void CheckContainsAll(ZoneVector<T>& vector) {
+ CHECK_EQ(vector.size(), set_.size());
+ for (const T* m = vector.begin(); m != vector.end(); m++) {
+ CHECK(Contains(m));
+ }
+ }
+
+ void CheckEmpty() { CHECK_EQ(0, set_.size()); }
+
+ private:
+ bool Contains(const T* entry) {
+ // std::set::contains is a C++20 extension.
+ return set_.find(entry) != set_.end();
+ }
+
+ std::set<const T*> set_;
+};
+
+template <typename T>
+LiveSet<T>& live_set() {
+ static LiveSet<T> static_live_set;
+ return static_live_set;
+}
+
+class Trivial {
+ public:
+ Trivial() : id_(0) {}
+ explicit Trivial(int id) : id_(id) {}
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(std::is_trivially_copyable_v<Trivial>);
+
+template <>
+class LiveSet<Trivial> {
+ public:
+ void Add(const Trivial* new_entry) { UNREACHABLE(); }
+ void Remove(const Trivial* old_entry) { UNREACHABLE(); }
+ void CheckContainsAll(ZoneVector<Trivial>&) {}
+ void CheckEmpty() {}
+};
+
+class CopyAssignable {
+ public:
+ CopyAssignable() : id_(0) { live_set<CopyAssignable>().Add(this); }
+ explicit CopyAssignable(int id) : id_(id) {
+ live_set<CopyAssignable>().Add(this);
+ }
+ CopyAssignable(const CopyAssignable& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<CopyAssignable>().Add(this);
+ }
+ ~CopyAssignable() { live_set<CopyAssignable>().Remove(this); }
+ CopyAssignable& operator=(const CopyAssignable& other) V8_NOEXCEPT = default;
+
+ CopyAssignable(CopyAssignable&& other) = delete;
+ CopyAssignable& operator=(CopyAssignable&& other) = delete;
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(!std::is_trivially_copyable_v<CopyAssignable>);
+static_assert(std::is_copy_assignable_v<CopyAssignable>);
+static_assert(!std::is_move_assignable_v<CopyAssignable>);
+
+class MoveAssignable {
+ public:
+ MoveAssignable() : id_(0) { live_set<MoveAssignable>().Add(this); }
+ explicit MoveAssignable(int id) : id_(id) {
+ live_set<MoveAssignable>().Add(this);
+ }
+ MoveAssignable(const MoveAssignable& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<MoveAssignable>().Add(this);
+ }
+ MoveAssignable(MoveAssignable&& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<MoveAssignable>().Add(this);
+ }
+ MoveAssignable& operator=(const MoveAssignable& other) = delete;
+ MoveAssignable& operator=(MoveAssignable&& other) V8_NOEXCEPT {
+ id_ = other.id_;
+ return *this;
+ }
+ ~MoveAssignable() { live_set<MoveAssignable>().Remove(this); }
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(!std::is_trivially_copyable_v<MoveAssignable>);
+static_assert(std::is_move_assignable_v<MoveAssignable>);
+static_assert(!std::is_copy_assignable_v<MoveAssignable>);
+
+class NotAssignable {
+ public:
+ NotAssignable() : id_(0) { live_set<NotAssignable>().Add(this); }
+ explicit NotAssignable(int id) : id_(id) {
+ live_set<NotAssignable>().Add(this);
+ }
+ NotAssignable(const NotAssignable& other) V8_NOEXCEPT : id_(other.id_) {
+ live_set<NotAssignable>().Add(this);
+ }
+ NotAssignable& operator=(const NotAssignable& other) = delete;
+ ~NotAssignable() { live_set<NotAssignable>().Remove(this); }
+
+ NotAssignable(NotAssignable&& other) = delete;
+ NotAssignable& operator=(NotAssignable&& other) = delete;
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
+static_assert(!std::is_trivially_copyable_v<NotAssignable>);
+static_assert(!std::is_copy_assignable_v<NotAssignable>);
+static_assert(!std::is_move_assignable_v<NotAssignable>);
+
+class ZoneVectorTest : public TestWithZone {
+ public:
+ template <class T>
+ void CheckConsistency(ZoneVector<T>& vector, std::initializer_list<int> ids) {
+ live_set<T>().CheckContainsAll(vector);
+ CHECK_EQ(vector.size(), ids.size());
+ auto it = ids.begin();
+ for (size_t i = 0; i < ids.size(); i++) {
+ CHECK_EQ(*it++, vector[i].id());
+ }
+ }
+
+ template <class T>
+ void Basic() {
+ {
+ // Constructor with definition.
+ ZoneVector<T> v(1, T(1), zone());
+ CheckConsistency(v, {1});
+ }
+ live_set<T>().CheckEmpty();
+
+ {
+ // Constructor with initializer list.
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ CheckConsistency(v, {1, 2, 3});
+ }
+ live_set<T>().CheckEmpty();
+
+ {
+ std::optional<ZoneVector<T>> v1;
+ v1.emplace({T(1), T(2), T(3)}, zone());
+ CheckConsistency(v1.value(), {1, 2, 3});
+ {
+ // Copy assignment with growth.
+ ZoneVector<T> v2 = v1.value();
+ v1.reset();
+ CheckConsistency(v2, {1, 2, 3});
+ }
+ v1.emplace({T(1), T(2), T(3)}, zone());
+ CheckConsistency(v1.value(), {1, 2, 3});
+
+ // Copy assignment without growth.
+ ZoneVector<T> v3({T(4), T(5), T(6)}, zone());
+ v3 = v1.value();
+ v1.reset();
+ CheckConsistency(v3, {1, 2, 3});
+
+ // Move assignment.
+ {
+ ZoneVector<T> v4(std::move(v3));
+ CheckConsistency(v4, {1, 2, 3});
+ }
+ CheckConsistency(v3, {});
+ }
+ live_set<T>().CheckEmpty();
+ }
+
+ template <class T>
+ void Assign() {
+ {
+ // Assign with sufficient capacity.
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.assign(2, T(4));
+ CheckConsistency(v, {4, 4});
+ // This time, capacity > size.
+ v.assign(3, T(5));
+ CheckConsistency(v, {5, 5, 5});
+ }
+
+ {
+ // Assign with capacity growth.
+ ZoneVector<T> v({T(1)}, zone());
+ v.assign(2, T(4));
+ CheckConsistency(v, {4, 4});
+ }
+
+ live_set<T>().CheckEmpty();
+ }
+
+ template <class T>
+ void Insert() {
+ // Check that we can insert (by iterator) in the right positions.
+ {
+ ZoneVector<T> v({T(2), T(4)}, zone());
+ {
+ T src1[] = {T(1)};
+ T src3[] = {T(3)};
+ T src5[] = {T(5)};
+ v.insert(&v.at(0), src1, std::end(src1));
+ v.insert(&v.at(2), src3, std::end(src3));
+ v.insert(v.end(), src5, std::end(src5));
+ }
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ }
+
+ // Check that we can insert (by count) in the right positions.
+ {
+ ZoneVector<T> v({T(2), T(4)}, zone());
+ v.insert(&v.at(0), 1, T(1));
+ v.insert(&v.at(2), 1, T(3));
+ v.insert(v.end(), 1, T(5));
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ }
+
+ // Test the "insufficient capacity" case in PrepareForInsertion.
+ {
+ ZoneVector<T> v(zone());
+ CHECK_EQ(0, v.capacity());
+ v.insert(v.begin(), 1, T(5));
+ CheckConsistency(v, {5});
+ {
+ T src[] = {T(1), T(2), T(3), T(4)};
+ v.insert(v.begin(), src, std::end(src));
+ }
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ }
+
+ // Test "case 1" of sufficient capacity in PrepareForInsertion.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4), T(5)}, zone());
+ v.reserve(10);
+ CHECK_EQ(10, v.capacity());
+ CheckConsistency(v, {1, 2, 3, 4, 5});
+ {
+ T src[] = {T(11), T(12), T(13), T(14)};
+ v.insert(&v.at(3), src, std::end(src));
+ }
+ CheckConsistency(v, {1, 2, 3, 11, 12, 13, 14, 4, 5});
+ }
+
+ // Test "case 2" of sufficient capacity in PrepareForInsertion.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4), T(5)}, zone());
+ v.reserve(10);
+ {
+ T src[] = {T(11), T(12)};
+ v.insert(&v.at(2), src, std::end(src));
+ }
+ CheckConsistency(v, {1, 2, 11, 12, 3, 4, 5});
+ }
+ live_set<T>().CheckEmpty();
+
+ // For good measure, test the edge case where we're inserting exactly
+ // as many elements as we're moving.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4)}, zone());
+ v.reserve(10);
+ {
+ T src[] = {T(11), T(12)};
+ v.insert(&v.at(2), src, std::end(src));
+ }
+ }
+ }
+
+ template <class T>
+ void Erase() {
+ // Erase one element.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(&v.at(1));
+ CheckConsistency(v, {1, 3});
+ }
+ // Erase a range.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3), T(4)}, zone());
+ v.erase(&v.at(1), &v.at(3));
+ CheckConsistency(v, {1, 4});
+ }
+ // Erase first element.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(v.begin());
+ CheckConsistency(v, {2, 3});
+ }
+ // Erase last element.
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(&v.at(2));
+ CheckConsistency(v, {1, 2});
+ }
+ // Erase nothing (empty range).
+ {
+ ZoneVector<T> v({T(1), T(2), T(3)}, zone());
+ v.erase(v.begin(), v.begin());
+ CheckConsistency(v, {1, 2, 3});
+ v.erase(&v.at(1), &v.at(1));
+ CheckConsistency(v, {1, 2, 3});
+ v.erase(v.end(), v.end());
+ CheckConsistency(v, {1, 2, 3});
+ }
+ live_set<T>().CheckEmpty();
+ }
+};
+
+TEST_F(ZoneVectorTest, Basic) {
+ Basic<Trivial>();
+ Basic<CopyAssignable>();
+ Basic<MoveAssignable>();
+ Basic<NotAssignable>();
+}
+
+TEST_F(ZoneVectorTest, Assign) {
+ Assign<Trivial>();
+ Assign<CopyAssignable>();
+ Assign<MoveAssignable>();
+ Assign<NotAssignable>();
+}
+
+TEST_F(ZoneVectorTest, Insert) {
+ Insert<Trivial>();
+ Insert<CopyAssignable>();
+ Insert<MoveAssignable>();
+ Insert<NotAssignable>();
+}
+
+TEST_F(ZoneVectorTest, Erase) {
+ Erase<Trivial>();
+ Erase<CopyAssignable>();
+ Erase<MoveAssignable>();
+ Erase<NotAssignable>();
+}
+
+} // namespace v8::internal
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
index 8d6bab9344..55de2db010 100644
--- a/deps/v8/test/wasm-api-tests/callbacks.cc
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -30,8 +30,6 @@ own<Trap> Stage2(void* env, const Val args[], Val results[]) {
own<Trap> Stage4_GC(void* env, const Val args[], Val results[]) {
printf("Stage4...\n");
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env);
- ScanStackModeScopeForTesting no_stack_scanning(isolate->heap(),
- Heap::ScanStackMode::kNone);
isolate->heap()->PreciseCollectAllGarbage(Heap::kForcedGC,
GarbageCollectionReason::kTesting);
results[0] = Val::i32(args[0].i32() + 1);
diff --git a/deps/v8/test/wasm-api-tests/finalize.cc b/deps/v8/test/wasm-api-tests/finalize.cc
index 19646cf007..77cd588449 100644
--- a/deps/v8/test/wasm-api-tests/finalize.cc
+++ b/deps/v8/test/wasm-api-tests/finalize.cc
@@ -36,10 +36,11 @@ void FinalizeModule(void* data) {
g_modules_finalized += static_cast<int>(reinterpret_cast<intptr_t>(data));
}
-void RunInStore(Store* store, ZoneBuffer* wire_bytes, int iterations) {
- size_t size = wire_bytes->end() - wire_bytes->begin();
+void RunInStore(Store* store, base::Vector<const uint8_t> wire_bytes,
+ int iterations) {
vec<byte_t> binary = vec<byte_t>::make(
- size, reinterpret_cast<byte_t*>(const_cast<byte*>(wire_bytes->begin())));
+ wire_bytes.size(),
+ reinterpret_cast<byte_t*>(const_cast<byte*>(wire_bytes.begin())));
own<Module> module = Module::make(store, binary);
module->set_host_info(reinterpret_cast<void*>(kModuleMagic), &FinalizeModule);
for (int iteration = 0; iteration < iterations; iteration++) {
diff --git a/deps/v8/test/wasm-api-tests/serialize.cc b/deps/v8/test/wasm-api-tests/serialize.cc
index 139e5cff8b..80af51855d 100644
--- a/deps/v8/test/wasm-api-tests/serialize.cc
+++ b/deps/v8/test/wasm-api-tests/serialize.cc
@@ -37,8 +37,6 @@ TEST_F(WasmCapiTest, Serialize) {
ResetModule();
Heap* heap =
reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate()->heap();
- ScanStackModeScopeForTesting no_stack_scanning(heap,
- Heap::ScanStackMode::kNone);
heap->PreciseCollectAllGarbage(Heap::kForcedGC,
GarbageCollectionReason::kTesting);
heap->PreciseCollectAllGarbage(Heap::kForcedGC,
diff --git a/deps/v8/test/wasm-api-tests/table.cc b/deps/v8/test/wasm-api-tests/table.cc
index c28dcce4a7..4425a678f0 100644
--- a/deps/v8/test/wasm-api-tests/table.cc
+++ b/deps/v8/test/wasm-api-tests/table.cc
@@ -39,7 +39,7 @@ void ExpectResult(int expected, const Func* func, int arg1, int arg2) {
TEST_F(WasmCapiTest, Table) {
const uint32_t table_index = builder()->AddTable(kWasmFuncRef, 2, 10);
builder()->AddExport(base::CStrVector("table"), kExternalTable, table_index);
- const uint32_t sig_i_i_index = builder()->AddSignature(wasm_i_i_sig());
+ const uint32_t sig_i_i_index = builder()->AddSignature(wasm_i_i_sig(), true);
ValueType reps[] = {kWasmI32, kWasmI32, kWasmI32};
FunctionSig call_sig(1, 2, reps);
byte call_code[] = {
diff --git a/deps/v8/test/wasm-api-tests/traps.cc b/deps/v8/test/wasm-api-tests/traps.cc
index adee567c50..e5f71a13aa 100644
--- a/deps/v8/test/wasm-api-tests/traps.cc
+++ b/deps/v8/test/wasm-api-tests/traps.cc
@@ -66,13 +66,8 @@ TEST_F(WasmCapiTest, Traps) {
// Use internal machinery to parse the module to find the function offsets.
// This makes the test more robust than hardcoding them.
- i::Isolate* isolate =
- reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate();
- ModuleResult result = DecodeWasmModule(
- WasmFeatures::All(), wire_bytes()->begin(), wire_bytes()->end(), false,
- ModuleOrigin::kWasmOrigin, isolate->counters(),
- isolate->metrics_recorder(), v8::metrics::Recorder::ContextId::Empty(),
- DecodingMethod::kSync, GetWasmEngine()->allocator());
+ ModuleResult result = DecodeWasmModule(WasmFeatures::All(), wire_bytes(),
+ false, ModuleOrigin::kWasmOrigin);
ASSERT_TRUE(result.ok());
const WasmFunction* func1 = &result.value()->functions[1];
const WasmFunction* func2 = &result.value()->functions[2];
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-test.h b/deps/v8/test/wasm-api-tests/wasm-api-test.h
index f161dcd87d..31c7fc1b02 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-test.h
+++ b/deps/v8/test/wasm-api-tests/wasm-api-test.h
@@ -155,7 +155,9 @@ class WasmCapiTest : public ::testing::Test {
Module* module() { return module_.get(); }
Instance* instance() { return instance_.get(); }
const ownvec<Extern>& exports() { return exports_; }
- ZoneBuffer* wire_bytes() { return &wire_bytes_; }
+ base::Vector<const uint8_t> wire_bytes() {
+ return base::VectorOf(wire_bytes_);
+ }
FunctionSig* wasm_i_i_sig() { return &wasm_i_i_sig_; }
FuncType* cpp_i_i_sig() { return cpp_i_i_sig_.get(); }
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 7d52419dc3..76d0295afb 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -13,22 +13,24 @@ WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
-proposal_flags = [{
- 'name': 'js-types',
- 'flags': ['--experimental-wasm-type-reflection',
- '--wasm-staging']
- },
- {
- 'name': 'tail-call',
- 'flags': ['--experimental-wasm-tail-call',
- '--wasm-staging']
- },
- {
- 'name': 'memory64',
- 'flags': ['--experimental-wasm-memory64',
- '--wasm-staging']
- },
- ]
+proposal_flags = [
+ {
+ 'name': 'js-types',
+ 'flags': ['--experimental-wasm-type-reflection', '--wasm-staging']
+ },
+ {
+ 'name': 'tail-call',
+ 'flags': ['--experimental-wasm-tail-call', '--wasm-staging']
+ },
+ {
+ 'name': 'memory64',
+ 'flags': ['--experimental-wasm-memory64', '--wasm-staging']
+ },
+ {
+ 'name': 'extended-const',
+ 'flags': ['--experimental-wasm-extended-const', '--wasm-staging']
+ },
+]
class TestLoader(testsuite.JSTestLoader):
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index abfd851d55..100afef785 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-597fc3fde2a51bd2bb9be1c87505d5a82ca7e441 \ No newline at end of file
+250a613cbdd086814a8412f8a2836f82f2840d23 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index b51973be7a..3dd0e2f662 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -8,11 +8,14 @@
# This test can only be executed in the browser
'wpt/idlharness': [SKIP],
# Failing WPT tests
- 'wpt/exception/getArg.tentative': [FAIL],
'wpt/exception/type.tentative': [FAIL],
'wpt/function/constructor.tentative': [FAIL],
'wpt/function/table.tentative': [FAIL],
'wpt/function/type.tentative': [FAIL],
+ # TODO(v8): Should work after
+ # https://github.com/WebAssembly/exception-handling/pull/257 landed and the
+ # tests have been updated.
+ 'wpt/exception/getArg.tentative': [FAIL],
# Outdated proposal tests.
'proposals/js-types/table/get-set': [FAIL],
@@ -34,6 +37,9 @@
# These are slow, and not useful to run for the proposals:
'proposals/js-types/limits': [SKIP],
'proposals/memory64/limits': [SKIP],
+
+ # Slow tests:
+ 'limits': [PASS, SLOW],
}], # ALWAYS
['system == android', {
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 1f45c88fbd..a87b75282c 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -7,22 +7,25 @@ import os
from testrunner.local import testsuite
from testrunner.objects import testcase
-proposal_flags = [{
- 'name': 'js-types',
- 'flags': ['--experimental-wasm-type-reflection',
- '--wasm-staging']
- },
- {
- 'name': 'tail-call',
- 'flags': ['--experimental-wasm-return-call',
- '--wasm-staging']
- },
- {
- 'name': 'memory64',
- 'flags': ['--experimental-wasm-memory64',
- '--wasm-staging']
- },
- ]
+proposal_flags = [
+ {
+ 'name': 'js-types',
+ 'flags': ['--experimental-wasm-type-reflection', '--wasm-staging']
+ },
+ {
+ 'name': 'tail-call',
+ 'flags': ['--experimental-wasm-return-call', '--wasm-staging']
+ },
+ {
+ 'name': 'memory64',
+ 'flags': ['--experimental-wasm-memory64', '--wasm-staging']
+ },
+ {
+ 'name': 'extended-const',
+ 'flags': ['--experimental-wasm-extended-const', '--wasm-staging']
+ },
+]
+
class TestLoader(testsuite.JSTestLoader):
pass
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index a6d736ecfb..546d9a340a 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-ac7450624874b95cbdf3b5e216a466729663e7a7 \ No newline at end of file
+a02872d1c77253e2cc25df72465cc62ac90dc736 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 2e2c88099d..9a7e12bd33 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -7,25 +7,27 @@
'skip-stack-guard-page': [PASS, ['((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
# Missing rebase in the proposal repository.
'proposals/js-types/table': [FAIL],
+ # "data" is supposed to fail for "data segment does not fit"; missing rebase on
+ # https://github.com/WebAssembly/spec/commit/7fa2f20a.
+ 'proposals/memory64/data': [FAIL],
+ # "elem" is supposed to fail for "elements segment does not fit"; missing rebase on
+ # https://github.com/WebAssembly/spec/commit/7fa2f20a.
+ 'proposals/memory64/elem': [FAIL],
+ # "imports" is supposed to fail for "multiple tables"; missing rebase on
+ # https://github.com/WebAssembly/spec/commit/7fa2f20a.
+ 'proposals/memory64/imports': [FAIL],
+ # "linking" is supposed to fail for "elements segment does not fit"; missing rebase on
+ # https://github.com/WebAssembly/spec/commit/7fa2f20a.
'proposals/memory64/linking': [FAIL],
+ # "table" is supposed to fail for "multiple tables"; missing rebase on
+ # https://github.com/WebAssembly/spec/commit/7fa2f20a.
'proposals/memory64/table': [FAIL],
+ # "unreached-invalid" is supposed to fail for "type mismatch"; missing rebase on
+ # https://github.com/WebAssembly/spec/commit/7fa2f20a.
'proposals/memory64/unreached-invalid': [FAIL],
# TODO(wasm): Roll newest tests into "js-types" repository.
'proposals/js-types/globals': [FAIL],
- 'proposals/js-types/linking': [FAIL],
-
- # TODO(wasm): Roll newest tests into "tail-call" repository.
- 'proposals/tail-call/exports': [FAIL],
- 'proposals/tail-call/func': [FAIL],
- 'proposals/tail-call/globals': [FAIL],
- 'proposals/tail-call/linking': [FAIL],
-
- # TODO(v8:11401): Fix memory64 spec tests / the v8 implementation (whatever
- # is broken).
- 'proposals/memory64/data': [FAIL],
- 'proposals/memory64/elem': [FAIL],
- 'proposals/memory64/imports': [FAIL],
# Tests that need to run sequentially (e.g. due to memory consumption).
'simd_f32x4*': [PASS, HEAVY],
@@ -93,9 +95,6 @@
# These tests need larger stack size on simulator.
'skip-stack-guard-page': '--sim-stack-size=8192',
'proposals/tail-call/skip-stack-guard-page': '--sim-stack-size=8192',
-
- # SIMD is not fully implemented yet.
- 'simd*': [SKIP],
}], # 'arch == riscv64'
@@ -158,7 +157,10 @@
##############################################################################
['no_simd_hardware == True', {
+ 'linking': [SKIP],
'simd*': [SKIP],
+ 'proposals/tail-call/simd_lane': [SKIP],
+ 'proposals/js-types/linking': [SKIP],
}], # no_simd_hardware == True
##############################################################################
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index 4958d705ed..ef666531d5 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -81,7 +81,7 @@ class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = self._source_files
- if self._test_config.isolates:
+ if self.test_config.isolates:
files = files + ['--isolate'] + files
return files
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index bce71fd0c6..7fa7bfdf84 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -118,13 +118,6 @@
}], # variant in [nooptimization, stress, stress_js_bg_compile_wasm_code_gc] and (arch == arm or arch == arm64) and simulator_run
##############################################################################
-['gcov_coverage', {
- # Tests taking too long or getting too large call stacks.
- 'fast/js/excessive-comma-usage': [SKIP],
- 'run-json-stringify': [SKIP],
-}], # 'gcov_coverage'
-
-##############################################################################
['variant == no_wasm_traps', {
'*': [SKIP],
}], # variant == no_wasm_traps
diff --git a/deps/v8/third_party/glibc/LICENSE b/deps/v8/third_party/glibc/LICENSE
new file mode 100644
index 0000000000..58af0d3787
--- /dev/null
+++ b/deps/v8/third_party/glibc/LICENSE
@@ -0,0 +1,502 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/deps/v8/third_party/glibc/METADATA b/deps/v8/third_party/glibc/METADATA
new file mode 100644
index 0000000000..62556f8a08
--- /dev/null
+++ b/deps/v8/third_party/glibc/METADATA
@@ -0,0 +1,18 @@
+name: "glibc"
+description:
+ "This is a fork of glibc containing a handful of files that implement "
+ "intrinsic functions."
+
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://www.gnu.org/software/libc"
+ }
+ url {
+ type: GIT
+ value: "https://sourceware.org/git/?p=glibc.git;a=summary"
+ }
+ version: "985ff73ad29bc4cc4e15ce6d182ecd808b7b02d7"
+ last_upgrade_date { year: 2022 month: 11 day: 7 }
+ license_type: NOTICE
+}
diff --git a/deps/v8/third_party/glibc/README.v8 b/deps/v8/third_party/glibc/README.v8
new file mode 100644
index 0000000000..5187f7b2f0
--- /dev/null
+++ b/deps/v8/third_party/glibc/README.v8
@@ -0,0 +1,8 @@
+Name: glibc
+URL: https://www.gnu.org/software/libc/
+License: LGPL 2.1
+License File: LICENSE
+Upstream Git: https://sourceware.org/git/?p=glibc.git;a=summary
+Last Upstream Merge: 985ff73ad29bc4cc4e15ce6d182ecd808b7b02d7, Nov 1 2022
+
+This directory contains a handful of files that are forked from glibc.
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.c b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.c
new file mode 100644
index 0000000000..3abfa1e0b4
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.c
@@ -0,0 +1,143 @@
+/*
+ * IBM Accurate Mathematical Library
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+/*******************************************************************/
+/* */
+/* MODULE_NAME: branred.c */
+/* */
+/* FUNCTIONS: branred */
+/* */
+/* FILES NEEDED: branred.h mydefs.h endian.h mpa.h */
+/* mha.c */
+/* */
+/* Routine branred() performs range reduction of a double number */
+/* x into Double length number a+aa,such that */
+/* x=n*pi/2+(a+aa), abs(a+aa)<pi/4, n=0,+-1,+-2,.... */
+/* Routine returns the integer (n mod 4) of the above description */
+/* of x. */
+/*******************************************************************/
+
+#include "endian.h"
+#include "mydefs.h"
+#include "branred.h"
+
+#include <math.h>
+
+#ifndef SECTION
+# define SECTION
+#endif
+
+
+/*******************************************************************/
+/* Routine branred() performs range reduction of a double number */
+/* x into Double length number a+aa,such that */
+/* x=n*pi/2+(a+aa), abs(a+aa)<pi/4, n=0,+-1,+-2,.... */
+/* Routine return integer (n mod 4) */
+/*******************************************************************/
+int
+SECTION
+__branred(double x, double *a, double *aa)
+{
+ int i,k;
+ mynumber u,gor;
+ double r[6],s,t,sum,b,bb,sum1,sum2,b1,bb1,b2,bb2,x1,x2,t1,t2;
+
+ x*=tm600.x;
+ t=x*split; /* split x to two numbers */
+ x1=t-(t-x);
+ x2=x-x1;
+ sum=0;
+ u.x = x1;
+ k = (u.i[HIGH_HALF]>>20)&2047;
+ k = (k-450)/24;
+ if (k<0)
+ k=0;
+ gor.x = t576.x;
+ gor.i[HIGH_HALF] -= ((k*24)<<20);
+ for (i=0;i<6;i++)
+ { r[i] = x1*toverp[k+i]*gor.x; gor.x *= tm24.x; }
+ for (i=0;i<3;i++) {
+ s=(r[i]+big.x)-big.x;
+ sum+=s;
+ r[i]-=s;
+ }
+ t=0;
+ for (i=0;i<6;i++)
+ t+=r[5-i];
+ bb=(((((r[0]-t)+r[1])+r[2])+r[3])+r[4])+r[5];
+ s=(t+big.x)-big.x;
+ sum+=s;
+ t-=s;
+ b=t+bb;
+ bb=(t-b)+bb;
+ s=(sum+big1.x)-big1.x;
+ sum-=s;
+ b1=b;
+ bb1=bb;
+ sum1=sum;
+ sum=0;
+
+ u.x = x2;
+ k = (u.i[HIGH_HALF]>>20)&2047;
+ k = (k-450)/24;
+ if (k<0)
+ k=0;
+ gor.x = t576.x;
+ gor.i[HIGH_HALF] -= ((k*24)<<20);
+ for (i=0;i<6;i++)
+ { r[i] = x2*toverp[k+i]*gor.x; gor.x *= tm24.x; }
+ for (i=0;i<3;i++) {
+ s=(r[i]+big.x)-big.x;
+ sum+=s;
+ r[i]-=s;
+ }
+ t=0;
+ for (i=0;i<6;i++)
+ t+=r[5-i];
+ bb=(((((r[0]-t)+r[1])+r[2])+r[3])+r[4])+r[5];
+ s=(t+big.x)-big.x;
+ sum+=s;
+ t-=s;
+ b=t+bb;
+ bb=(t-b)+bb;
+ s=(sum+big1.x)-big1.x;
+ sum-=s;
+
+ b2=b;
+ bb2=bb;
+ sum2=sum;
+
+ sum=sum1+sum2;
+ b=b1+b2;
+ bb = (fabs(b1)>fabs(b2))? (b1-b)+b2 : (b2-b)+b1;
+ if (b > 0.5)
+ {b-=1.0; sum+=1.0;}
+ else if (b < -0.5)
+ {b+=1.0; sum-=1.0;}
+ s=b+(bb+bb1+bb2);
+ t=((b-s)+bb)+(bb1+bb2);
+ b=s*split;
+ t1=b-(b-s);
+ t2=s-t1;
+ b=s*hp0.x;
+ bb=(((t1*mp1.x-b)+t1*mp2.x)+t2*mp1.x)+(t2*mp2.x+s*hp1.x+t*hp0.x);
+ s=b+bb;
+ t=(b-s)+bb;
+ *a=s;
+ *aa=t;
+ return ((int) sum)&3; /* return quater of unit circle */
+}
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.h b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.h
new file mode 100644
index 0000000000..d113c8530d
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/branred.h
@@ -0,0 +1,79 @@
+/*
+ * IBM Accurate Mathematical Library
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+/************************************************************************/
+/* MODULE_NAME: branred.h */
+/* */
+/* */
+/* common data and variables definition for BIG or LITTLE ENDIAN */
+/************************************************************************/
+
+#ifndef BRANRED_H
+#define BRANRED_H
+
+#include "dla.h"
+
+#ifdef BIG_ENDI
+static const mynumber
+
+/**/ t576 = {{0x63f00000, 0x00000000}}, /* 2 ^ 576 */
+/**/ tm600 = {{0x1a700000, 0x00000000}}, /* 2 ^- 600 */
+/**/ tm24 = {{0x3e700000, 0x00000000}}, /* 2 ^- 24 */
+/**/ big = {{0x43380000, 0x00000000}}, /* 6755399441055744 */
+/**/ big1 = {{0x43580000, 0x00000000}}, /* 27021597764222976 */
+/**/ hp0 = {{0x3FF921FB, 0x54442D18}} ,/* 1.5707963267948966 */
+/**/ hp1 = {{0x3C91A626, 0x33145C07}} ,/* 6.123233995736766e-17 */
+/**/ mp1 = {{0x3FF921FB, 0x58000000}}, /* 1.5707963407039642 */
+/**/ mp2 = {{0xBE4DDE97, 0x40000000}}; /*-1.3909067675399456e-08 */
+
+#else
+#ifdef LITTLE_ENDI
+static const mynumber
+
+/**/ t576 = {{0x00000000, 0x63f00000}}, /* 2 ^ 576 */
+/**/ tm600 = {{0x00000000, 0x1a700000}}, /* 2 ^- 600 */
+/**/ tm24 = {{0x00000000, 0x3e700000}}, /* 2 ^- 24 */
+/**/ big = {{0x00000000, 0x43380000}}, /* 6755399441055744 */
+/**/ big1 = {{0x00000000, 0x43580000}}, /* 27021597764222976 */
+/**/ hp0 = {{0x54442D18, 0x3FF921FB}}, /* 1.5707963267948966 */
+/**/ hp1 = {{0x33145C07, 0x3C91A626}}, /* 6.123233995736766e-17 */
+/**/ mp1 = {{0x58000000, 0x3FF921FB}}, /* 1.5707963407039642 */
+/**/ mp2 = {{0x40000000, 0xBE4DDE97}}; /*-1.3909067675399456e-08 */
+
+#endif
+#endif
+
+static const double toverp[75] = { /* 2/ PI base 24*/
+ 10680707.0, 7228996.0, 1387004.0, 2578385.0, 16069853.0,
+ 12639074.0, 9804092.0, 4427841.0, 16666979.0, 11263675.0,
+ 12935607.0, 2387514.0, 4345298.0, 14681673.0, 3074569.0,
+ 13734428.0, 16653803.0, 1880361.0, 10960616.0, 8533493.0,
+ 3062596.0, 8710556.0, 7349940.0, 6258241.0, 3772886.0,
+ 3769171.0, 3798172.0, 8675211.0, 12450088.0, 3874808.0,
+ 9961438.0, 366607.0, 15675153.0, 9132554.0, 7151469.0,
+ 3571407.0, 2607881.0, 12013382.0, 4155038.0, 6285869.0,
+ 7677882.0, 13102053.0, 15825725.0, 473591.0, 9065106.0,
+ 15363067.0, 6271263.0, 9264392.0, 5636912.0, 4652155.0,
+ 7056368.0, 13614112.0, 10155062.0, 1944035.0, 9527646.0,
+ 15080200.0, 6658437.0, 6231200.0, 6832269.0, 16767104.0,
+ 5075751.0, 3212806.0, 1398474.0, 7579849.0, 6349435.0,
+ 12618859.0, 4703257.0, 12806093.0, 14477321.0, 2786137.0,
+ 12875403.0, 9837734.0, 14528324.0, 13719321.0, 343717.0 };
+
+static const double split = CN; /* 2^27 + 1 */
+
+#endif
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h
new file mode 100644
index 0000000000..db4c4d81de
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h
@@ -0,0 +1,38 @@
+/*
+ * IBM Accurate Mathematical Library
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+
+/***********************************************************************/
+/*MODULE_NAME: dla.h */
+/* */
+/* This file holds C language macros for 'Double Length Floating Point */
+/* Arithmetic'. The macros are based on the paper: */
+/* T.J.Dekker, "A floating-point Technique for extending the */
+/* Available Precision", Number. Math. 18, 224-242 (1971). */
+/* A Double-Length number is defined by a pair (r,s), of IEEE double */
+/* precision floating point numbers that satisfy, */
+/* */
+/* abs(s) <= abs(r+s)*2**(-53)/(1+2**(-53)). */
+/* */
+/* The computer arithmetic assumed is IEEE double precision in */
+/* round to nearest mode. All variables in the macros must be of type */
+/* IEEE double. */
+/***********************************************************************/
+
+/* CN = 1+2**27 = '41a0000002000000' IEEE double format. Use it to split a
+ double for better accuracy. */
+#define CN 134217729.0
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h
new file mode 100644
index 0000000000..d97daca4df
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h
@@ -0,0 +1,21 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// glibc has a couple of endian.h files. This defines the macros expected by
+// the code in this directory using macros defined by clang.
+#if (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+#define BIG_ENDI 1
+#undef LITTLE_ENDI
+#define HIGH_HALF 0
+#define LOW_HALF 1
+#elif (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#undef BIG_ENDI
+#define LITTLE_ENDI 1
+#define HIGH_HALF 1
+#define LOW_HALF 0
+#else
+#error
+#endif
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h
new file mode 100644
index 0000000000..a57cbaa8b9
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h
@@ -0,0 +1,34 @@
+/*
+ * IBM Accurate Mathematical Library
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+
+/******************************************************************/
+/* */
+/* MODULE_NAME:mydefs.h */
+/* */
+/* common data and definition */
+/******************************************************************/
+
+#ifndef MY_H
+#define MY_H
+
+typedef int int4;
+typedef union { int4 i[2]; double x; double d; } mynumber;
+
+#define max(x, y) (((y) > (x)) ? (y) : (x))
+#define min(x, y) (((y) < (x)) ? (y) : (x))
+#endif
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c
new file mode 100644
index 0000000000..a7beb04f97
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c
@@ -0,0 +1,312 @@
+/*
+ * IBM Accurate Mathematical Library
+ * written by International Business Machines Corp.
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+/****************************************************************************/
+/* */
+/* MODULE_NAME:usncs.c */
+/* */
+/* FUNCTIONS: usin */
+/* ucos */
+/* FILES NEEDED: dla.h endian.h mpa.h mydefs.h usncs.h */
+/* branred.c sincos.tbl */
+/* */
+/* An ultimate sin and cos routine. Given an IEEE double machine number x */
+/* it computes sin(x) or cos(x) with ~0.55 ULP. */
+/* Assumption: Machine arithmetic operations are performed in */
+/* round to nearest mode of IEEE 754 standard. */
+/* */
+/****************************************************************************/
+
+
+#include <errno.h>
+#include <float.h>
+#include "endian.h"
+#include "mydefs.h"
+#include "usncs.h"
+#include <math.h>
+
+#define attribute_hidden
+#if !defined(__always_inline)
+#define __always_inline
+#endif
+
+/* Helper macros to compute sin of the input values. */
+#define POLYNOMIAL2(xx) ((((s5 * (xx) + s4) * (xx) + s3) * (xx) + s2) * (xx))
+
+#define POLYNOMIAL(xx) (POLYNOMIAL2 (xx) + s1)
+
+/* The computed polynomial is a variation of the Taylor series expansion for
+ sin(x):
+
+ x - x^3/3! + x^5/5! - x^7/7! + x^9/9! - dx*x^2/2 + dx
+
+ The constants s1, s2, s3, etc. are pre-computed values of 1/3!, 1/5! and so
+ on. The result is returned to LHS. */
+#define TAYLOR_SIN(xx, x, dx) \
+({ \
+ double t = ((POLYNOMIAL (xx) * (x) - 0.5 * (dx)) * (xx) + (dx)); \
+ double res = (x) + t; \
+ res; \
+})
+
+#define SINCOS_TABLE_LOOKUP(u, sn, ssn, cs, ccs) \
+({ \
+ int4 k = u.i[LOW_HALF] << 2; \
+ sn = __sincostab.x[k]; \
+ ssn = __sincostab.x[k + 1]; \
+ cs = __sincostab.x[k + 2]; \
+ ccs = __sincostab.x[k + 3]; \
+})
+
+#ifndef SECTION
+# define SECTION
+#endif
+
+extern const union
+{
+ int4 i[880];
+ double x[440];
+} __sincostab attribute_hidden;
+
+static const double
+ sn3 = -1.66666666666664880952546298448555E-01,
+ sn5 = 8.33333214285722277379541354343671E-03,
+ cs2 = 4.99999999999999999999950396842453E-01,
+ cs4 = -4.16666666666664434524222570944589E-02,
+ cs6 = 1.38888874007937613028114285595617E-03;
+
+int __branred (double x, double *a, double *aa);
+
+/* Given a number partitioned into X and DX, this function computes the cosine
+ of the number by combining the sin and cos of X (as computed by a variation
+ of the Taylor series) with the values looked up from the sin/cos table to
+ get the result. */
+static __always_inline double
+do_cos (double x, double dx)
+{
+ mynumber u;
+
+ if (x < 0)
+ dx = -dx;
+
+ u.x = big + fabs (x);
+ x = fabs (x) - (u.x - big) + dx;
+
+ double xx, s, sn, ssn, c, cs, ccs, cor;
+ xx = x * x;
+ s = x + x * xx * (sn3 + xx * sn5);
+ c = xx * (cs2 + xx * (cs4 + xx * cs6));
+ SINCOS_TABLE_LOOKUP (u, sn, ssn, cs, ccs);
+ cor = (ccs - s * ssn - cs * c) - sn * s;
+ return cs + cor;
+}
+
+/* Given a number partitioned into X and DX, this function computes the sine of
+ the number by combining the sin and cos of X (as computed by a variation of
+ the Taylor series) with the values looked up from the sin/cos table to get
+ the result. */
+static __always_inline double
+do_sin (double x, double dx)
+{
+ double xold = x;
+ /* Max ULP is 0.501 if |x| < 0.126, otherwise ULP is 0.518. */
+ if (fabs (x) < 0.126)
+ return TAYLOR_SIN (x * x, x, dx);
+
+ mynumber u;
+
+ if (x <= 0)
+ dx = -dx;
+ u.x = big + fabs (x);
+ x = fabs (x) - (u.x - big);
+
+ double xx, s, sn, ssn, c, cs, ccs, cor;
+ xx = x * x;
+ s = x + (dx + x * xx * (sn3 + xx * sn5));
+ c = x * dx + xx * (cs2 + xx * (cs4 + xx * cs6));
+ SINCOS_TABLE_LOOKUP (u, sn, ssn, cs, ccs);
+ cor = (ssn + s * ccs - sn * c) + cs * s;
+ return copysign (sn + cor, xold);
+}
+
+/* Reduce range of x to within PI/2 with abs (x) < 105414350. The high part
+ is written to *a, the low part to *da. Range reduction is accurate to 136
+ bits so that when x is large and *a very close to zero, all 53 bits of *a
+ are correct. */
+static __always_inline int4
+reduce_sincos (double x, double *a, double *da)
+{
+ mynumber v;
+
+ double t = (x * hpinv + toint);
+ double xn = t - toint;
+ v.x = t;
+ double y = (x - xn * mp1) - xn * mp2;
+ int4 n = v.i[LOW_HALF] & 3;
+
+ double b, db, t1, t2;
+ t1 = xn * pp3;
+ t2 = y - t1;
+ db = (y - t2) - t1;
+
+ t1 = xn * pp4;
+ b = t2 - t1;
+ db += (t2 - b) - t1;
+
+ *a = b;
+ *da = db;
+ return n;
+}
+
+/* Compute sin or cos (A + DA) for the given quadrant N. */
+static __always_inline double
+do_sincos (double a, double da, int4 n)
+{
+ double retval;
+
+ if (n & 1)
+ /* Max ULP is 0.513. */
+ retval = do_cos (a, da);
+ else
+ /* Max ULP is 0.501 if xx < 0.01588, otherwise ULP is 0.518. */
+ retval = do_sin (a, da);
+
+ return (n & 2) ? -retval : retval;
+}
+
+
+/*******************************************************************/
+/* An ultimate sin routine. Given an IEEE double machine number x */
+/* it computes the rounded value of sin(x). */
+/*******************************************************************/
+#ifndef IN_SINCOS
+double
+SECTION
+glibc_sin (double x)
+{
+ double t, a, da;
+ mynumber u;
+ int4 k, m, n;
+ double retval = 0;
+
+ u.x = x;
+ m = u.i[HIGH_HALF];
+ k = 0x7fffffff & m; /* no sign */
+ if (k < 0x3e500000) /* if x->0 =>sin(x)=x */
+ {
+ retval = x;
+ }
+/*--------------------------- 2^-26<|x|< 0.855469---------------------- */
+ else if (k < 0x3feb6000)
+ {
+ /* Max ULP is 0.548. */
+ retval = do_sin (x, 0);
+ } /* else if (k < 0x3feb6000) */
+
+/*----------------------- 0.855469 <|x|<2.426265 ----------------------*/
+ else if (k < 0x400368fd)
+ {
+ t = hp0 - fabs (x);
+ /* Max ULP is 0.51. */
+ retval = copysign (do_cos (t, hp1), x);
+ } /* else if (k < 0x400368fd) */
+
+/*-------------------------- 2.426265<|x|< 105414350 ----------------------*/
+ else if (k < 0x419921FB)
+ {
+ n = reduce_sincos (x, &a, &da);
+ retval = do_sincos (a, da, n);
+ } /* else if (k < 0x419921FB ) */
+
+/* --------------------105414350 <|x| <2^1024------------------------------*/
+ else if (k < 0x7ff00000)
+ {
+ n = __branred (x, &a, &da);
+ retval = do_sincos (a, da, n);
+ }
+/*--------------------- |x| > 2^1024 ----------------------------------*/
+ else
+ {
+ retval = x / x;
+ }
+
+ return retval;
+}
+
+
+/*******************************************************************/
+/* An ultimate cos routine. Given an IEEE double machine number x */
+/* it computes the rounded value of cos(x). */
+/*******************************************************************/
+
+double
+SECTION
+glibc_cos (double x)
+{
+ double y, a, da;
+ mynumber u;
+ int4 k, m, n;
+
+ double retval = 0;
+
+ u.x = x;
+ m = u.i[HIGH_HALF];
+ k = 0x7fffffff & m;
+
+ /* |x|<2^-27 => cos(x)=1 */
+ if (k < 0x3e400000)
+ retval = 1.0;
+
+ else if (k < 0x3feb6000)
+ { /* 2^-27 < |x| < 0.855469 */
+ /* Max ULP is 0.51. */
+ retval = do_cos (x, 0);
+ } /* else if (k < 0x3feb6000) */
+
+ else if (k < 0x400368fd)
+ { /* 0.855469 <|x|<2.426265 */ ;
+ y = hp0 - fabs (x);
+ a = y + hp1;
+ da = (y - a) + hp1;
+ /* Max ULP is 0.501 if xx < 0.01588 or 0.518 otherwise.
+ Range reduction uses 106 bits here which is sufficient. */
+ retval = do_sin (a, da);
+ } /* else if (k < 0x400368fd) */
+
+ else if (k < 0x419921FB)
+ { /* 2.426265<|x|< 105414350 */
+ n = reduce_sincos (x, &a, &da);
+ retval = do_sincos (a, da, n + 1);
+ } /* else if (k < 0x419921FB ) */
+
+ /* 105414350 <|x| <2^1024 */
+ else if (k < 0x7ff00000)
+ {
+ n = __branred (x, &a, &da);
+ retval = do_sincos (a, da, n + 1);
+ }
+
+ else
+ {
+ retval = x / x; /* |x| > 2^1024 */
+ }
+
+ return retval;
+}
+
+#endif
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c
new file mode 100644
index 0000000000..8ae29e2f02
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c
@@ -0,0 +1,913 @@
+/*
+ * IBM Accurate Mathematical Library
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "mydefs.h"
+#include "endian.h"
+
+/****************************************************************/
+/* TABLES FOR THE usin() and ucos() FUNCTION */
+/****************************************************************/
+
+
+#ifdef BIG_ENDI
+const union {int4 i[880]; double x[440];}__sincostab = { .i = {
+/**/ 0x00000000, 0x00000000,
+/**/ 0x00000000, 0x00000000,
+/**/ 0x3FF00000, 0x00000000,
+/**/ 0x00000000, 0x00000000,
+/**/ 0x3F7FFFEA, 0xAAAEEEEF,
+/**/ 0xBC1E45E2, 0xEC67B77C,
+/**/ 0x3FEFFFC0, 0x00155552,
+/**/ 0x3C8F4A01, 0xA0196DAE,
+/**/ 0x3F8FFFAA, 0xAAEEEED5,
+/**/ 0xBC02AB63, 0x9A9F0777,
+/**/ 0x3FEFFF00, 0x0155549F,
+/**/ 0x3C828A28, 0xA03A5EF3,
+/**/ 0x3F97FF70, 0x01033255,
+/**/ 0x3BFEFE2B, 0x51527336,
+/**/ 0x3FEFFDC0, 0x06BFF7E6,
+/**/ 0x3C8AE6DA, 0xE86977BD,
+/**/ 0x3F9FFEAA, 0xAEEEE86F,
+/**/ 0xBC3CD406, 0xFB224AE2,
+/**/ 0x3FEFFC00, 0x155527D3,
+/**/ 0xBC83B544, 0x92D89B5B,
+/**/ 0x3FA3FEB2, 0xB12D45D5,
+/**/ 0x3C34EC54, 0x203D1C11,
+/**/ 0x3FEFF9C0, 0x3414A7BA,
+/**/ 0x3C6991F4, 0xBE6C59BF,
+/**/ 0x3FA7FDC0, 0x1032FBA9,
+/**/ 0xBC4599BD, 0xF46E997A,
+/**/ 0x3FEFF700, 0x6BFDF99F,
+/**/ 0xBC78B3B5, 0x60648D5F,
+/**/ 0x3FABFC6D, 0x78586DAC,
+/**/ 0x3C18E4FD, 0x03DBF236,
+/**/ 0x3FEFF3C0, 0xC8103A31,
+/**/ 0x3C74856D, 0xBDDC0E66,
+/**/ 0x3FAFFAAA, 0xEEED4EDB,
+/**/ 0xBC42D16D, 0x32684B69,
+/**/ 0x3FEFF001, 0x5549F4D3,
+/**/ 0x3C832838, 0x7B99426F,
+/**/ 0x3FB1FC34, 0x3D808BEF,
+/**/ 0xBC5F3D32, 0xE6F3BE4F,
+/**/ 0x3FEFEBC2, 0x22A8EF9F,
+/**/ 0x3C579349, 0x34F54C77,
+/**/ 0x3FB3FACB, 0x12D1755B,
+/**/ 0xBC592191, 0x5299468C,
+/**/ 0x3FEFE703, 0x4129EF6F,
+/**/ 0xBC6CBF43, 0x37C96F97,
+/**/ 0x3FB5F911, 0xFD10B737,
+/**/ 0xBC50184F, 0x02BE9102,
+/**/ 0x3FEFE1C4, 0xC3C873EB,
+/**/ 0xBC35A9C9, 0x057C4A02,
+/**/ 0x3FB7F701, 0x032550E4,
+/**/ 0x3C3AFC2D, 0x1800501A,
+/**/ 0x3FEFDC06, 0xBF7E6B9B,
+/**/ 0x3C831902, 0xB535F8DB,
+/**/ 0x3FB9F490, 0x2D55D1F9,
+/**/ 0x3C52696D, 0x7EAC1DC1,
+/**/ 0x3FEFD5C9, 0x4B43E000,
+/**/ 0xBC62E768, 0xCB4F92F9,
+/**/ 0x3FBBF1B7, 0x8568391D,
+/**/ 0x3C5E9184, 0x1DEA4CC8,
+/**/ 0x3FEFCF0C, 0x800E99B1,
+/**/ 0x3C6EA3D7, 0x86D186AC,
+/**/ 0x3FBDEE6F, 0x16C1CCE6,
+/**/ 0xBC450F8E, 0x2FB71673,
+/**/ 0x3FEFC7D0, 0x78D1BC88,
+/**/ 0x3C8075D2, 0x447DB685,
+/**/ 0x3FBFEAAE, 0xEE86EE36,
+/**/ 0xBC4AFCB2, 0xBCC6F03B,
+/**/ 0x3FEFC015, 0x527D5BD3,
+/**/ 0x3C8B68F3, 0x5094EFB8,
+/**/ 0x3FC0F337, 0x8DDD71D1,
+/**/ 0x3C6D8468, 0x724F0F9E,
+/**/ 0x3FEFB7DB, 0x2BFE0695,
+/**/ 0x3C821DAD, 0xF4F65AB1,
+/**/ 0x3FC1F0D3, 0xD7AFCEAF,
+/**/ 0xBC66EF95, 0x099769A5,
+/**/ 0x3FEFAF22, 0x263C4BD3,
+/**/ 0xBC552ACE, 0x133A2769,
+/**/ 0x3FC2EE28, 0x5E4AB88F,
+/**/ 0xBC6E4D0F, 0x05DEE058,
+/**/ 0x3FEFA5EA, 0x641C36F2,
+/**/ 0x3C404DA6, 0xED17CC7C,
+/**/ 0x3FC3EB31, 0x2C5D66CB,
+/**/ 0x3C647D66, 0x6B66CB91,
+/**/ 0x3FEF9C34, 0x0A7CC428,
+/**/ 0x3C8C5B6B, 0x063B7462,
+/**/ 0x3FC4E7EA, 0x4DC5F27B,
+/**/ 0x3C5949DB, 0x2AC072FC,
+/**/ 0x3FEF91FF, 0x40374D01,
+/**/ 0xBC67D03F, 0x4D3A9E4C,
+/**/ 0x3FC5E44F, 0xCFA126F3,
+/**/ 0xBC66F443, 0x063F89B6,
+/**/ 0x3FEF874C, 0x2E1EECF6,
+/**/ 0xBC8C6514, 0xE1332B16,
+/**/ 0x3FC6E05D, 0xC05A4D4C,
+/**/ 0xBBD32C5C, 0x8B81C940,
+/**/ 0x3FEF7C1A, 0xFEFFDE24,
+/**/ 0xBC78F55B, 0xC47540B1,
+/**/ 0x3FC7DC10, 0x2FBAF2B5,
+/**/ 0x3C45AB50, 0xE23C97C3,
+/**/ 0x3FEF706B, 0xDF9ECE1C,
+/**/ 0xBC8698C8, 0x0C36DCB4,
+/**/ 0x3FC8D763, 0x2EFAA944,
+/**/ 0xBC620FA2, 0x62CBB953,
+/**/ 0x3FEF643E, 0xFEB82ACD,
+/**/ 0x3C76B00A, 0xC1FE28AC,
+/**/ 0x3FC9D252, 0xD0CEC312,
+/**/ 0x3C59C43D, 0x80B1137D,
+/**/ 0x3FEF5794, 0x8CFF6797,
+/**/ 0x3C6E3A0D, 0x3E03B1D5,
+/**/ 0x3FCACCDB, 0x297A0765,
+/**/ 0xBC59883B, 0x57D6CDEB,
+/**/ 0x3FEF4A6C, 0xBD1E3A79,
+/**/ 0x3C813DF0, 0xEDAEBB57,
+/**/ 0x3FCBC6F8, 0x4EDC6199,
+/**/ 0x3C69C1A5, 0x6A7B0CAB,
+/**/ 0x3FEF3CC7, 0xC3B3D16E,
+/**/ 0xBC621A3A, 0xD28A3494,
+/**/ 0x3FCCC0A6, 0x588289A3,
+/**/ 0xBC6868D0, 0x9BC87C6B,
+/**/ 0x3FEF2EA5, 0xD753FFED,
+/**/ 0x3C8CC421, 0x5F56D583,
+/**/ 0x3FCDB9E1, 0x5FB5A5D0,
+/**/ 0xBC632E20, 0xD6CC6FC2,
+/**/ 0x3FEF2007, 0x3086649F,
+/**/ 0x3C7B9404, 0x16C1984B,
+/**/ 0x3FCEB2A5, 0x7F8AE5A3,
+/**/ 0xBC60BE06, 0xAF572CEB,
+/**/ 0x3FEF10EC, 0x09C5873B,
+/**/ 0x3C8D9072, 0x762C1283,
+/**/ 0x3FCFAAEE, 0xD4F31577,
+/**/ 0xBC615D88, 0x508E32B8,
+/**/ 0x3FEF0154, 0x9F7DEEA1,
+/**/ 0x3C8D3C1E, 0x99E5CAFD,
+/**/ 0x3FD0515C, 0xBF65155C,
+/**/ 0xBC79B8C2, 0x9DFD8EC8,
+/**/ 0x3FEEF141, 0x300D2F26,
+/**/ 0xBC82AA1B, 0x08DED372,
+/**/ 0x3FD0CD00, 0xCEF36436,
+/**/ 0xBC79FB0A, 0x0C93E2B5,
+/**/ 0x3FEEE0B1, 0xFBC0F11C,
+/**/ 0xBC4BFD23, 0x80BBC3B1,
+/**/ 0x3FD14861, 0xAA94DDEB,
+/**/ 0xBC6BE881, 0xB5B615A4,
+/**/ 0x3FEECFA7, 0x44D5EFA1,
+/**/ 0xBC556D0A, 0x4AF541D0,
+/**/ 0x3FD1C37D, 0x64C6B876,
+/**/ 0x3C746076, 0xFE0DCFF5,
+/**/ 0x3FEEBE21, 0x4F76EFA8,
+/**/ 0xBC802F9F, 0x12BA543E,
+/**/ 0x3FD23E52, 0x111AAF36,
+/**/ 0xBC74F080, 0x334EFF18,
+/**/ 0x3FEEAC20, 0x61BBAF4F,
+/**/ 0x3C62C1D5, 0x3E94658D,
+/**/ 0x3FD2B8DD, 0xC43EB49F,
+/**/ 0x3C615538, 0x99F2D807,
+/**/ 0x3FEE99A4, 0xC3A7CD83,
+/**/ 0xBC82264B, 0x1BC53CE8,
+/**/ 0x3FD3331E, 0x94049F87,
+/**/ 0x3C7E0CB6, 0xB40C302C,
+/**/ 0x3FEE86AE, 0xBF29A9ED,
+/**/ 0x3C89397A, 0xFDBB58A7,
+/**/ 0x3FD3AD12, 0x9769D3D8,
+/**/ 0x3C003D55, 0x04878398,
+/**/ 0x3FEE733E, 0xA0193D40,
+/**/ 0xBC86428B, 0x3546CE13,
+/**/ 0x3FD426B7, 0xE69EE697,
+/**/ 0xBC7F09C7, 0x5705C59F,
+/**/ 0x3FEE5F54, 0xB436E9D0,
+/**/ 0x3C87EB0F, 0xD02FC8BC,
+/**/ 0x3FD4A00C, 0x9B0F3D20,
+/**/ 0x3C7823BA, 0x6BB08EAD,
+/**/ 0x3FEE4AF1, 0x4B2A449C,
+/**/ 0xBC868CA0, 0x2E8A6833,
+/**/ 0x3FD5190E, 0xCF68A77A,
+/**/ 0x3C7B3571, 0x55EEF0F3,
+/**/ 0x3FEE3614, 0xB680D6A5,
+/**/ 0xBC727793, 0xAA015237,
+/**/ 0x3FD591BC, 0x9FA2F597,
+/**/ 0x3C67C74B, 0xAC3FE0CB,
+/**/ 0x3FEE20BF, 0x49ACD6C1,
+/**/ 0xBC5660AE, 0xC7EF636C,
+/**/ 0x3FD60A14, 0x29078775,
+/**/ 0x3C5B1FD8, 0x0BA89133,
+/**/ 0x3FEE0AF1, 0x5A03DBCE,
+/**/ 0x3C5FE8E7, 0x02771AE6,
+/**/ 0x3FD68213, 0x8A38D7F7,
+/**/ 0xBC7D8892, 0x02444AAD,
+/**/ 0x3FEDF4AB, 0x3EBD875E,
+/**/ 0xBC8E2D8A, 0x7E6736C4,
+/**/ 0x3FD6F9B8, 0xE33A0255,
+/**/ 0x3C742BC1, 0x4EE9DA0D,
+/**/ 0x3FEDDDED, 0x50F228D6,
+/**/ 0xBC6E80C8, 0xD42BA2BF,
+/**/ 0x3FD77102, 0x55764214,
+/**/ 0xBC66EAD7, 0x314BB6CE,
+/**/ 0x3FEDC6B7, 0xEB995912,
+/**/ 0x3C54B364, 0x776DCD35,
+/**/ 0x3FD7E7EE, 0x03C86D4E,
+/**/ 0xBC7B63BC, 0xDABF5AF2,
+/**/ 0x3FEDAF0B, 0x6B888E83,
+/**/ 0x3C8A249E, 0x2B5E5CEA,
+/**/ 0x3FD85E7A, 0x12826949,
+/**/ 0x3C78A40E, 0x9B5FACE0,
+/**/ 0x3FED96E8, 0x2F71A9DC,
+/**/ 0x3C8FF61B, 0xD5D2039D,
+/**/ 0x3FD8D4A4, 0xA774992F,
+/**/ 0x3C744A02, 0xEA766326,
+/**/ 0x3FED7E4E, 0x97E17B4A,
+/**/ 0xBC63B770, 0x352BED94,
+/**/ 0x3FD94A6B, 0xE9F546C5,
+/**/ 0xBC769CE1, 0x3E683F58,
+/**/ 0x3FED653F, 0x073E4040,
+/**/ 0xBC876236, 0x434BEC37,
+/**/ 0x3FD9BFCE, 0x02E80510,
+/**/ 0x3C709E39, 0xA320B0A4,
+/**/ 0x3FED4BB9, 0xE1C619E0,
+/**/ 0x3C8F34BB, 0x77858F61,
+/**/ 0x3FDA34C9, 0x1CC50CCA,
+/**/ 0xBC5A310E, 0x3B50CECD,
+/**/ 0x3FED31BF, 0x8D8D7C06,
+/**/ 0x3C7E60DD, 0x3089CBDD,
+/**/ 0x3FDAA95B, 0x63A09277,
+/**/ 0xBC66293E, 0xB13C0381,
+/**/ 0x3FED1750, 0x727D94F0,
+/**/ 0x3C80D52B, 0x1EC1A48E,
+/**/ 0x3FDB1D83, 0x05321617,
+/**/ 0xBC7AE242, 0xCB99F519,
+/**/ 0x3FECFC6C, 0xFA52AD9F,
+/**/ 0x3C88B5B5, 0x508F2A0D,
+/**/ 0x3FDB913E, 0x30DBAC43,
+/**/ 0xBC7E38AD, 0x2F6C3FF1,
+/**/ 0x3FECE115, 0x909A82E5,
+/**/ 0x3C81F139, 0xBB31109A,
+/**/ 0x3FDC048B, 0x17B140A3,
+/**/ 0x3C619FE6, 0x757E9FA7,
+/**/ 0x3FECC54A, 0xA2B2972E,
+/**/ 0x3C64EE16, 0x2BA83A98,
+/**/ 0x3FDC7767, 0xEC7FD19E,
+/**/ 0xBC5EB14D, 0x1A3D5826,
+/**/ 0x3FECA90C, 0x9FC67D0B,
+/**/ 0xBC646A81, 0x485E3462,
+/**/ 0x3FDCE9D2, 0xE3D4A51F,
+/**/ 0xBC62FC8A, 0x12DAE298,
+/**/ 0x3FEC8C5B, 0xF8CE1A84,
+/**/ 0x3C7AB3D1, 0xA1590123,
+/**/ 0x3FDD5BCA, 0x34047661,
+/**/ 0x3C728A44, 0xA75FC29C,
+/**/ 0x3FEC6F39, 0x208BE53B,
+/**/ 0xBC8741DB, 0xFBAADB42,
+/**/ 0x3FDDCD4C, 0x15329C9A,
+/**/ 0x3C70D4C6, 0xE171FD9A,
+/**/ 0x3FEC51A4, 0x8B8B175E,
+/**/ 0xBC61BBB4, 0x3B9AA880,
+/**/ 0x3FDE3E56, 0xC1582A69,
+/**/ 0xBC50A482, 0x1099F88F,
+/**/ 0x3FEC339E, 0xB01DDD81,
+/**/ 0xBC8CAAF5, 0xEE82C5C0,
+/**/ 0x3FDEAEE8, 0x744B05F0,
+/**/ 0xBC5789B4, 0x3C9B027D,
+/**/ 0x3FEC1528, 0x065B7D50,
+/**/ 0xBC889211, 0x1312E828,
+/**/ 0x3FDF1EFF, 0x6BC4F97B,
+/**/ 0x3C717212, 0xF8A7525C,
+/**/ 0x3FEBF641, 0x081E7536,
+/**/ 0x3C8B7BD7, 0x1628A9A1,
+/**/ 0x3FDF8E99, 0xE76ABC97,
+/**/ 0x3C59D950, 0xAF2D00A3,
+/**/ 0x3FEBD6EA, 0x310294F5,
+/**/ 0x3C731BBC, 0xC88C109D,
+/**/ 0x3FDFFDB6, 0x28D2F57A,
+/**/ 0x3C6F4A99, 0x2E905B6A,
+/**/ 0x3FEBB723, 0xFE630F32,
+/**/ 0x3C772BD2, 0x452D0A39,
+/**/ 0x3FE03629, 0x39C69955,
+/**/ 0xBC82D8CD, 0x78397B01,
+/**/ 0x3FEB96EE, 0xEF58840E,
+/**/ 0x3C545A3C, 0xC78FADE0,
+/**/ 0x3FE06D36, 0x86946E5B,
+/**/ 0x3C83F5AE, 0x4538FF1B,
+/**/ 0x3FEB764B, 0x84B704C2,
+/**/ 0xBC8F5848, 0xC21B389B,
+/**/ 0x3FE0A402, 0x1E9E1001,
+/**/ 0xBC86F643, 0xA13914F6,
+/**/ 0x3FEB553A, 0x410C104E,
+/**/ 0x3C58FF79, 0x47027A16,
+/**/ 0x3FE0DA8B, 0x26B5672E,
+/**/ 0xBC8A58DE, 0xF0BEE909,
+/**/ 0x3FEB33BB, 0xA89C8948,
+/**/ 0x3C8EA6A5, 0x1D1F6CA9,
+/**/ 0x3FE110D0, 0xC4B69C3B,
+/**/ 0x3C8D9189, 0x98809981,
+/**/ 0x3FEB11D0, 0x4162A4C6,
+/**/ 0x3C71DD56, 0x1EFBC0C2,
+/**/ 0x3FE146D2, 0x1F8B7F82,
+/**/ 0x3C7BF953, 0x5E2739A8,
+/**/ 0x3FEAEF78, 0x930BD275,
+/**/ 0xBC7F8362, 0x79746F94,
+/**/ 0x3FE17C8E, 0x5F2EEDB0,
+/**/ 0x3C635E57, 0x102E2488,
+/**/ 0x3FEACCB5, 0x26F69DE5,
+/**/ 0x3C88FB6A, 0x8DD6B6CC,
+/**/ 0x3FE1B204, 0xACB02FDD,
+/**/ 0xBC5F190C, 0x70CBB5FF,
+/**/ 0x3FEAA986, 0x88308913,
+/**/ 0xBC0B83D6, 0x07CD5070,
+/**/ 0x3FE1E734, 0x3236574C,
+/**/ 0x3C722A3F, 0xA4F41D5A,
+/**/ 0x3FEA85ED, 0x4373E02D,
+/**/ 0x3C69BE06, 0x385EC792,
+/**/ 0x3FE21C1C, 0x1B0394CF,
+/**/ 0x3C5E5B32, 0x4B23AA31,
+/**/ 0x3FEA61E9, 0xE72586AF,
+/**/ 0x3C858330, 0xE2FD453F,
+/**/ 0x3FE250BB, 0x93788BBB,
+/**/ 0x3C7EA3D0, 0x2457BCCE,
+/**/ 0x3FEA3D7D, 0x0352BDCF,
+/**/ 0xBC868DBA, 0xECA19669,
+/**/ 0x3FE28511, 0xC917A067,
+/**/ 0xBC801DF1, 0xD9A16B70,
+/**/ 0x3FEA18A7, 0x29AEE445,
+/**/ 0x3C395E25, 0x736C0358,
+/**/ 0x3FE2B91D, 0xEA88421E,
+/**/ 0xBC8FA371, 0xDB216AB0,
+/**/ 0x3FE9F368, 0xED912F85,
+/**/ 0xBC81D200, 0xC5791606,
+/**/ 0x3FE2ECDF, 0x279A3082,
+/**/ 0x3C8D3557, 0xE0E7E37E,
+/**/ 0x3FE9CDC2, 0xE3F25E5C,
+/**/ 0x3C83F991, 0x12993F62,
+/**/ 0x3FE32054, 0xB148BC4F,
+/**/ 0x3C8F6B42, 0x095A135B,
+/**/ 0x3FE9A7B5, 0xA36A6514,
+/**/ 0x3C8722CF, 0xCC9FA7A9,
+/**/ 0x3FE3537D, 0xB9BE0367,
+/**/ 0x3C6B327E, 0x7AF040F0,
+/**/ 0x3FE98141, 0xC42E1310,
+/**/ 0x3C8D1FF8, 0x0488F08D,
+/**/ 0x3FE38659, 0x7456282B,
+/**/ 0xBC710FAD, 0xA93B07A8,
+/**/ 0x3FE95A67, 0xE00CB1FD,
+/**/ 0xBC80BEFD, 0xA21F862D,
+/**/ 0x3FE3B8E7, 0x15A2840A,
+/**/ 0xBC797653, 0xA7D2F07B,
+/**/ 0x3FE93328, 0x926D9E92,
+/**/ 0xBC8BB770, 0x03600CDA,
+/**/ 0x3FE3EB25, 0xD36CD53A,
+/**/ 0xBC5BE570, 0xE1570FC0,
+/**/ 0x3FE90B84, 0x784DDAF7,
+/**/ 0xBC70FEB1, 0x0AB93B87,
+/**/ 0x3FE41D14, 0xE4BA6790,
+/**/ 0x3C84608F, 0xD287ECF5,
+/**/ 0x3FE8E37C, 0x303D9AD1,
+/**/ 0xBC6463A4, 0xB53D4BF8,
+/**/ 0x3FE44EB3, 0x81CF386B,
+/**/ 0xBC83ED6C, 0x1E6A5505,
+/**/ 0x3FE8BB10, 0x5A5DC900,
+/**/ 0x3C8863E0, 0x3E9474C1,
+/**/ 0x3FE48000, 0xE431159F,
+/**/ 0xBC8B194A, 0x7463ED10,
+/**/ 0x3FE89241, 0x985D871F,
+/**/ 0x3C8C48D9, 0xC413ED84,
+/**/ 0x3FE4B0FC, 0x46AAB761,
+/**/ 0x3C20DA05, 0x738CC59A,
+/**/ 0x3FE86910, 0x8D77A6C6,
+/**/ 0x3C7338FF, 0xE2BFE9DD,
+/**/ 0x3FE4E1A4, 0xE54ED51B,
+/**/ 0xBC8A492F, 0x89B7C76A,
+/**/ 0x3FE83F7D, 0xDE701CA0,
+/**/ 0xBC4152CF, 0x609BC6E8,
+/**/ 0x3FE511F9, 0xFD7B351C,
+/**/ 0xBC85C0E8, 0x61C48831,
+/**/ 0x3FE8158A, 0x31916D5D,
+/**/ 0xBC6DE8B9, 0x0B8228DE,
+/**/ 0x3FE541FA, 0xCDDBB724,
+/**/ 0x3C7232C2, 0x8520D391,
+/**/ 0x3FE7EB36, 0x2EAA1488,
+/**/ 0x3C5A1D65, 0xA4A5959F,
+/**/ 0x3FE571A6, 0x966D59B3,
+/**/ 0x3C5C843B, 0x4D0FB198,
+/**/ 0x3FE7C082, 0x7F09E54F,
+/**/ 0xBC6C73D6, 0xD72AEE68,
+/**/ 0x3FE5A0FC, 0x98813A12,
+/**/ 0xBC8D82E2, 0xB7D4227B,
+/**/ 0x3FE7956F, 0xCD7F6543,
+/**/ 0xBC8AB276, 0xE9D45AE4,
+/**/ 0x3FE5CFFC, 0x16BF8F0D,
+/**/ 0x3C896CB3, 0x70EB578A,
+/**/ 0x3FE769FE, 0xC655211F,
+/**/ 0xBC6827D5, 0xCF8C68C5,
+/**/ 0x3FE5FEA4, 0x552A9E57,
+/**/ 0x3C80B6CE, 0xF7EE20B7,
+/**/ 0x3FE73E30, 0x174EFBA1,
+/**/ 0xBC65D3AE, 0x3D94AD5F,
+/**/ 0x3FE62CF4, 0x9921AC79,
+/**/ 0xBC8EDD98, 0x55B6241A,
+/**/ 0x3FE71204, 0x6FA77678,
+/**/ 0x3C8425B0, 0xA5029C81,
+/**/ 0x3FE65AEC, 0x2963E755,
+/**/ 0x3C8126F9, 0x6B71053C,
+/**/ 0x3FE6E57C, 0x800CF55E,
+/**/ 0x3C860286, 0xDEDBD0A6,
+/**/ 0x3FE6888A, 0x4E134B2F,
+/**/ 0xBC86B7D3, 0x7644D5E6,
+/**/ 0x3FE6B898, 0xFA9EFB5D,
+/**/ 0x3C715AC7, 0x86CCF4B2,
+/**/ 0x3FE6B5CE, 0x50B7821A,
+/**/ 0xBC65D515, 0x8F702E0F,
+/**/ 0x3FE68B5A, 0x92EB6253,
+/**/ 0xBC89A91A, 0xD985F89C,
+/**/ 0x3FE6E2B7, 0x7C40BDE1,
+/**/ 0xBC70E729, 0x857FAD53,
+/**/ 0x3FE65DC1, 0xFDEB8CBA,
+/**/ 0xBC597C1B, 0x47337C77,
+/**/ 0x3FE70F45, 0x1D0A8C40,
+/**/ 0x3C697EDE, 0x3885770D,
+/**/ 0x3FE62FCF, 0xF20191C7,
+/**/ 0x3C6D9143, 0x895756EF,
+/**/ 0x3FE73B76, 0x80DEA578,
+/**/ 0xBC722483, 0x06DC12A2,
+/**/ 0x3FE60185, 0x26F563DF,
+/**/ 0x3C846CA5, 0xE0E432D0,
+/**/ 0x3FE7674A, 0xF6F7B524,
+/**/ 0x3C7E9D3F, 0x94AC84A8,
+/**/ 0x3FE5D2E2, 0x55F1F17A,
+/**/ 0x3C803141, 0x04C8892B,
+/**/ 0x3FE792C1, 0xD0041D52,
+/**/ 0xBC8ABF05, 0xEEB354EB,
+/**/ 0x3FE5A3E8, 0x39824077,
+/**/ 0x3C8428AA, 0x2759BE62,
+/**/ 0x3FE7BDDA, 0x5E28B3C2,
+/**/ 0x3C4AD119, 0x7CCD0393,
+/**/ 0x3FE57497, 0x8D8E83F2,
+/**/ 0x3C8F4714, 0xAF282D23,
+/**/ 0x3FE7E893, 0xF5037959,
+/**/ 0x3C80EEFB, 0xAA650C4C,
+/**/ 0x3FE544F1, 0x0F592CA5,
+/**/ 0xBC8E7AE8, 0xE6C7A62F,
+/**/ 0x3FE812ED, 0xE9AE4BA4,
+/**/ 0xBC87830A, 0xDF402DDA,
+/**/ 0x3FE514F5, 0x7D7BF3DA,
+/**/ 0x3C747A10, 0x8073C259 } };
+#else
+#ifdef LITTLE_ENDI
+const union {int4 i[880]; double x[440];} __sincostab = { .i = {
+/**/ 0x00000000, 0x00000000,
+/**/ 0x00000000, 0x00000000,
+/**/ 0x00000000, 0x3FF00000,
+/**/ 0x00000000, 0x00000000,
+/**/ 0xAAAEEEEF, 0x3F7FFFEA,
+/**/ 0xEC67B77C, 0xBC1E45E2,
+/**/ 0x00155552, 0x3FEFFFC0,
+/**/ 0xA0196DAE, 0x3C8F4A01,
+/**/ 0xAAEEEED5, 0x3F8FFFAA,
+/**/ 0x9A9F0777, 0xBC02AB63,
+/**/ 0x0155549F, 0x3FEFFF00,
+/**/ 0xA03A5EF3, 0x3C828A28,
+/**/ 0x01033255, 0x3F97FF70,
+/**/ 0x51527336, 0x3BFEFE2B,
+/**/ 0x06BFF7E6, 0x3FEFFDC0,
+/**/ 0xE86977BD, 0x3C8AE6DA,
+/**/ 0xAEEEE86F, 0x3F9FFEAA,
+/**/ 0xFB224AE2, 0xBC3CD406,
+/**/ 0x155527D3, 0x3FEFFC00,
+/**/ 0x92D89B5B, 0xBC83B544,
+/**/ 0xB12D45D5, 0x3FA3FEB2,
+/**/ 0x203D1C11, 0x3C34EC54,
+/**/ 0x3414A7BA, 0x3FEFF9C0,
+/**/ 0xBE6C59BF, 0x3C6991F4,
+/**/ 0x1032FBA9, 0x3FA7FDC0,
+/**/ 0xF46E997A, 0xBC4599BD,
+/**/ 0x6BFDF99F, 0x3FEFF700,
+/**/ 0x60648D5F, 0xBC78B3B5,
+/**/ 0x78586DAC, 0x3FABFC6D,
+/**/ 0x03DBF236, 0x3C18E4FD,
+/**/ 0xC8103A31, 0x3FEFF3C0,
+/**/ 0xBDDC0E66, 0x3C74856D,
+/**/ 0xEEED4EDB, 0x3FAFFAAA,
+/**/ 0x32684B69, 0xBC42D16D,
+/**/ 0x5549F4D3, 0x3FEFF001,
+/**/ 0x7B99426F, 0x3C832838,
+/**/ 0x3D808BEF, 0x3FB1FC34,
+/**/ 0xE6F3BE4F, 0xBC5F3D32,
+/**/ 0x22A8EF9F, 0x3FEFEBC2,
+/**/ 0x34F54C77, 0x3C579349,
+/**/ 0x12D1755B, 0x3FB3FACB,
+/**/ 0x5299468C, 0xBC592191,
+/**/ 0x4129EF6F, 0x3FEFE703,
+/**/ 0x37C96F97, 0xBC6CBF43,
+/**/ 0xFD10B737, 0x3FB5F911,
+/**/ 0x02BE9102, 0xBC50184F,
+/**/ 0xC3C873EB, 0x3FEFE1C4,
+/**/ 0x057C4A02, 0xBC35A9C9,
+/**/ 0x032550E4, 0x3FB7F701,
+/**/ 0x1800501A, 0x3C3AFC2D,
+/**/ 0xBF7E6B9B, 0x3FEFDC06,
+/**/ 0xB535F8DB, 0x3C831902,
+/**/ 0x2D55D1F9, 0x3FB9F490,
+/**/ 0x7EAC1DC1, 0x3C52696D,
+/**/ 0x4B43E000, 0x3FEFD5C9,
+/**/ 0xCB4F92F9, 0xBC62E768,
+/**/ 0x8568391D, 0x3FBBF1B7,
+/**/ 0x1DEA4CC8, 0x3C5E9184,
+/**/ 0x800E99B1, 0x3FEFCF0C,
+/**/ 0x86D186AC, 0x3C6EA3D7,
+/**/ 0x16C1CCE6, 0x3FBDEE6F,
+/**/ 0x2FB71673, 0xBC450F8E,
+/**/ 0x78D1BC88, 0x3FEFC7D0,
+/**/ 0x447DB685, 0x3C8075D2,
+/**/ 0xEE86EE36, 0x3FBFEAAE,
+/**/ 0xBCC6F03B, 0xBC4AFCB2,
+/**/ 0x527D5BD3, 0x3FEFC015,
+/**/ 0x5094EFB8, 0x3C8B68F3,
+/**/ 0x8DDD71D1, 0x3FC0F337,
+/**/ 0x724F0F9E, 0x3C6D8468,
+/**/ 0x2BFE0695, 0x3FEFB7DB,
+/**/ 0xF4F65AB1, 0x3C821DAD,
+/**/ 0xD7AFCEAF, 0x3FC1F0D3,
+/**/ 0x099769A5, 0xBC66EF95,
+/**/ 0x263C4BD3, 0x3FEFAF22,
+/**/ 0x133A2769, 0xBC552ACE,
+/**/ 0x5E4AB88F, 0x3FC2EE28,
+/**/ 0x05DEE058, 0xBC6E4D0F,
+/**/ 0x641C36F2, 0x3FEFA5EA,
+/**/ 0xED17CC7C, 0x3C404DA6,
+/**/ 0x2C5D66CB, 0x3FC3EB31,
+/**/ 0x6B66CB91, 0x3C647D66,
+/**/ 0x0A7CC428, 0x3FEF9C34,
+/**/ 0x063B7462, 0x3C8C5B6B,
+/**/ 0x4DC5F27B, 0x3FC4E7EA,
+/**/ 0x2AC072FC, 0x3C5949DB,
+/**/ 0x40374D01, 0x3FEF91FF,
+/**/ 0x4D3A9E4C, 0xBC67D03F,
+/**/ 0xCFA126F3, 0x3FC5E44F,
+/**/ 0x063F89B6, 0xBC66F443,
+/**/ 0x2E1EECF6, 0x3FEF874C,
+/**/ 0xE1332B16, 0xBC8C6514,
+/**/ 0xC05A4D4C, 0x3FC6E05D,
+/**/ 0x8B81C940, 0xBBD32C5C,
+/**/ 0xFEFFDE24, 0x3FEF7C1A,
+/**/ 0xC47540B1, 0xBC78F55B,
+/**/ 0x2FBAF2B5, 0x3FC7DC10,
+/**/ 0xE23C97C3, 0x3C45AB50,
+/**/ 0xDF9ECE1C, 0x3FEF706B,
+/**/ 0x0C36DCB4, 0xBC8698C8,
+/**/ 0x2EFAA944, 0x3FC8D763,
+/**/ 0x62CBB953, 0xBC620FA2,
+/**/ 0xFEB82ACD, 0x3FEF643E,
+/**/ 0xC1FE28AC, 0x3C76B00A,
+/**/ 0xD0CEC312, 0x3FC9D252,
+/**/ 0x80B1137D, 0x3C59C43D,
+/**/ 0x8CFF6797, 0x3FEF5794,
+/**/ 0x3E03B1D5, 0x3C6E3A0D,
+/**/ 0x297A0765, 0x3FCACCDB,
+/**/ 0x57D6CDEB, 0xBC59883B,
+/**/ 0xBD1E3A79, 0x3FEF4A6C,
+/**/ 0xEDAEBB57, 0x3C813DF0,
+/**/ 0x4EDC6199, 0x3FCBC6F8,
+/**/ 0x6A7B0CAB, 0x3C69C1A5,
+/**/ 0xC3B3D16E, 0x3FEF3CC7,
+/**/ 0xD28A3494, 0xBC621A3A,
+/**/ 0x588289A3, 0x3FCCC0A6,
+/**/ 0x9BC87C6B, 0xBC6868D0,
+/**/ 0xD753FFED, 0x3FEF2EA5,
+/**/ 0x5F56D583, 0x3C8CC421,
+/**/ 0x5FB5A5D0, 0x3FCDB9E1,
+/**/ 0xD6CC6FC2, 0xBC632E20,
+/**/ 0x3086649F, 0x3FEF2007,
+/**/ 0x16C1984B, 0x3C7B9404,
+/**/ 0x7F8AE5A3, 0x3FCEB2A5,
+/**/ 0xAF572CEB, 0xBC60BE06,
+/**/ 0x09C5873B, 0x3FEF10EC,
+/**/ 0x762C1283, 0x3C8D9072,
+/**/ 0xD4F31577, 0x3FCFAAEE,
+/**/ 0x508E32B8, 0xBC615D88,
+/**/ 0x9F7DEEA1, 0x3FEF0154,
+/**/ 0x99E5CAFD, 0x3C8D3C1E,
+/**/ 0xBF65155C, 0x3FD0515C,
+/**/ 0x9DFD8EC8, 0xBC79B8C2,
+/**/ 0x300D2F26, 0x3FEEF141,
+/**/ 0x08DED372, 0xBC82AA1B,
+/**/ 0xCEF36436, 0x3FD0CD00,
+/**/ 0x0C93E2B5, 0xBC79FB0A,
+/**/ 0xFBC0F11C, 0x3FEEE0B1,
+/**/ 0x80BBC3B1, 0xBC4BFD23,
+/**/ 0xAA94DDEB, 0x3FD14861,
+/**/ 0xB5B615A4, 0xBC6BE881,
+/**/ 0x44D5EFA1, 0x3FEECFA7,
+/**/ 0x4AF541D0, 0xBC556D0A,
+/**/ 0x64C6B876, 0x3FD1C37D,
+/**/ 0xFE0DCFF5, 0x3C746076,
+/**/ 0x4F76EFA8, 0x3FEEBE21,
+/**/ 0x12BA543E, 0xBC802F9F,
+/**/ 0x111AAF36, 0x3FD23E52,
+/**/ 0x334EFF18, 0xBC74F080,
+/**/ 0x61BBAF4F, 0x3FEEAC20,
+/**/ 0x3E94658D, 0x3C62C1D5,
+/**/ 0xC43EB49F, 0x3FD2B8DD,
+/**/ 0x99F2D807, 0x3C615538,
+/**/ 0xC3A7CD83, 0x3FEE99A4,
+/**/ 0x1BC53CE8, 0xBC82264B,
+/**/ 0x94049F87, 0x3FD3331E,
+/**/ 0xB40C302C, 0x3C7E0CB6,
+/**/ 0xBF29A9ED, 0x3FEE86AE,
+/**/ 0xFDBB58A7, 0x3C89397A,
+/**/ 0x9769D3D8, 0x3FD3AD12,
+/**/ 0x04878398, 0x3C003D55,
+/**/ 0xA0193D40, 0x3FEE733E,
+/**/ 0x3546CE13, 0xBC86428B,
+/**/ 0xE69EE697, 0x3FD426B7,
+/**/ 0x5705C59F, 0xBC7F09C7,
+/**/ 0xB436E9D0, 0x3FEE5F54,
+/**/ 0xD02FC8BC, 0x3C87EB0F,
+/**/ 0x9B0F3D20, 0x3FD4A00C,
+/**/ 0x6BB08EAD, 0x3C7823BA,
+/**/ 0x4B2A449C, 0x3FEE4AF1,
+/**/ 0x2E8A6833, 0xBC868CA0,
+/**/ 0xCF68A77A, 0x3FD5190E,
+/**/ 0x55EEF0F3, 0x3C7B3571,
+/**/ 0xB680D6A5, 0x3FEE3614,
+/**/ 0xAA015237, 0xBC727793,
+/**/ 0x9FA2F597, 0x3FD591BC,
+/**/ 0xAC3FE0CB, 0x3C67C74B,
+/**/ 0x49ACD6C1, 0x3FEE20BF,
+/**/ 0xC7EF636C, 0xBC5660AE,
+/**/ 0x29078775, 0x3FD60A14,
+/**/ 0x0BA89133, 0x3C5B1FD8,
+/**/ 0x5A03DBCE, 0x3FEE0AF1,
+/**/ 0x02771AE6, 0x3C5FE8E7,
+/**/ 0x8A38D7F7, 0x3FD68213,
+/**/ 0x02444AAD, 0xBC7D8892,
+/**/ 0x3EBD875E, 0x3FEDF4AB,
+/**/ 0x7E6736C4, 0xBC8E2D8A,
+/**/ 0xE33A0255, 0x3FD6F9B8,
+/**/ 0x4EE9DA0D, 0x3C742BC1,
+/**/ 0x50F228D6, 0x3FEDDDED,
+/**/ 0xD42BA2BF, 0xBC6E80C8,
+/**/ 0x55764214, 0x3FD77102,
+/**/ 0x314BB6CE, 0xBC66EAD7,
+/**/ 0xEB995912, 0x3FEDC6B7,
+/**/ 0x776DCD35, 0x3C54B364,
+/**/ 0x03C86D4E, 0x3FD7E7EE,
+/**/ 0xDABF5AF2, 0xBC7B63BC,
+/**/ 0x6B888E83, 0x3FEDAF0B,
+/**/ 0x2B5E5CEA, 0x3C8A249E,
+/**/ 0x12826949, 0x3FD85E7A,
+/**/ 0x9B5FACE0, 0x3C78A40E,
+/**/ 0x2F71A9DC, 0x3FED96E8,
+/**/ 0xD5D2039D, 0x3C8FF61B,
+/**/ 0xA774992F, 0x3FD8D4A4,
+/**/ 0xEA766326, 0x3C744A02,
+/**/ 0x97E17B4A, 0x3FED7E4E,
+/**/ 0x352BED94, 0xBC63B770,
+/**/ 0xE9F546C5, 0x3FD94A6B,
+/**/ 0x3E683F58, 0xBC769CE1,
+/**/ 0x073E4040, 0x3FED653F,
+/**/ 0x434BEC37, 0xBC876236,
+/**/ 0x02E80510, 0x3FD9BFCE,
+/**/ 0xA320B0A4, 0x3C709E39,
+/**/ 0xE1C619E0, 0x3FED4BB9,
+/**/ 0x77858F61, 0x3C8F34BB,
+/**/ 0x1CC50CCA, 0x3FDA34C9,
+/**/ 0x3B50CECD, 0xBC5A310E,
+/**/ 0x8D8D7C06, 0x3FED31BF,
+/**/ 0x3089CBDD, 0x3C7E60DD,
+/**/ 0x63A09277, 0x3FDAA95B,
+/**/ 0xB13C0381, 0xBC66293E,
+/**/ 0x727D94F0, 0x3FED1750,
+/**/ 0x1EC1A48E, 0x3C80D52B,
+/**/ 0x05321617, 0x3FDB1D83,
+/**/ 0xCB99F519, 0xBC7AE242,
+/**/ 0xFA52AD9F, 0x3FECFC6C,
+/**/ 0x508F2A0D, 0x3C88B5B5,
+/**/ 0x30DBAC43, 0x3FDB913E,
+/**/ 0x2F6C3FF1, 0xBC7E38AD,
+/**/ 0x909A82E5, 0x3FECE115,
+/**/ 0xBB31109A, 0x3C81F139,
+/**/ 0x17B140A3, 0x3FDC048B,
+/**/ 0x757E9FA7, 0x3C619FE6,
+/**/ 0xA2B2972E, 0x3FECC54A,
+/**/ 0x2BA83A98, 0x3C64EE16,
+/**/ 0xEC7FD19E, 0x3FDC7767,
+/**/ 0x1A3D5826, 0xBC5EB14D,
+/**/ 0x9FC67D0B, 0x3FECA90C,
+/**/ 0x485E3462, 0xBC646A81,
+/**/ 0xE3D4A51F, 0x3FDCE9D2,
+/**/ 0x12DAE298, 0xBC62FC8A,
+/**/ 0xF8CE1A84, 0x3FEC8C5B,
+/**/ 0xA1590123, 0x3C7AB3D1,
+/**/ 0x34047661, 0x3FDD5BCA,
+/**/ 0xA75FC29C, 0x3C728A44,
+/**/ 0x208BE53B, 0x3FEC6F39,
+/**/ 0xFBAADB42, 0xBC8741DB,
+/**/ 0x15329C9A, 0x3FDDCD4C,
+/**/ 0xE171FD9A, 0x3C70D4C6,
+/**/ 0x8B8B175E, 0x3FEC51A4,
+/**/ 0x3B9AA880, 0xBC61BBB4,
+/**/ 0xC1582A69, 0x3FDE3E56,
+/**/ 0x1099F88F, 0xBC50A482,
+/**/ 0xB01DDD81, 0x3FEC339E,
+/**/ 0xEE82C5C0, 0xBC8CAAF5,
+/**/ 0x744B05F0, 0x3FDEAEE8,
+/**/ 0x3C9B027D, 0xBC5789B4,
+/**/ 0x065B7D50, 0x3FEC1528,
+/**/ 0x1312E828, 0xBC889211,
+/**/ 0x6BC4F97B, 0x3FDF1EFF,
+/**/ 0xF8A7525C, 0x3C717212,
+/**/ 0x081E7536, 0x3FEBF641,
+/**/ 0x1628A9A1, 0x3C8B7BD7,
+/**/ 0xE76ABC97, 0x3FDF8E99,
+/**/ 0xAF2D00A3, 0x3C59D950,
+/**/ 0x310294F5, 0x3FEBD6EA,
+/**/ 0xC88C109D, 0x3C731BBC,
+/**/ 0x28D2F57A, 0x3FDFFDB6,
+/**/ 0x2E905B6A, 0x3C6F4A99,
+/**/ 0xFE630F32, 0x3FEBB723,
+/**/ 0x452D0A39, 0x3C772BD2,
+/**/ 0x39C69955, 0x3FE03629,
+/**/ 0x78397B01, 0xBC82D8CD,
+/**/ 0xEF58840E, 0x3FEB96EE,
+/**/ 0xC78FADE0, 0x3C545A3C,
+/**/ 0x86946E5B, 0x3FE06D36,
+/**/ 0x4538FF1B, 0x3C83F5AE,
+/**/ 0x84B704C2, 0x3FEB764B,
+/**/ 0xC21B389B, 0xBC8F5848,
+/**/ 0x1E9E1001, 0x3FE0A402,
+/**/ 0xA13914F6, 0xBC86F643,
+/**/ 0x410C104E, 0x3FEB553A,
+/**/ 0x47027A16, 0x3C58FF79,
+/**/ 0x26B5672E, 0x3FE0DA8B,
+/**/ 0xF0BEE909, 0xBC8A58DE,
+/**/ 0xA89C8948, 0x3FEB33BB,
+/**/ 0x1D1F6CA9, 0x3C8EA6A5,
+/**/ 0xC4B69C3B, 0x3FE110D0,
+/**/ 0x98809981, 0x3C8D9189,
+/**/ 0x4162A4C6, 0x3FEB11D0,
+/**/ 0x1EFBC0C2, 0x3C71DD56,
+/**/ 0x1F8B7F82, 0x3FE146D2,
+/**/ 0x5E2739A8, 0x3C7BF953,
+/**/ 0x930BD275, 0x3FEAEF78,
+/**/ 0x79746F94, 0xBC7F8362,
+/**/ 0x5F2EEDB0, 0x3FE17C8E,
+/**/ 0x102E2488, 0x3C635E57,
+/**/ 0x26F69DE5, 0x3FEACCB5,
+/**/ 0x8DD6B6CC, 0x3C88FB6A,
+/**/ 0xACB02FDD, 0x3FE1B204,
+/**/ 0x70CBB5FF, 0xBC5F190C,
+/**/ 0x88308913, 0x3FEAA986,
+/**/ 0x07CD5070, 0xBC0B83D6,
+/**/ 0x3236574C, 0x3FE1E734,
+/**/ 0xA4F41D5A, 0x3C722A3F,
+/**/ 0x4373E02D, 0x3FEA85ED,
+/**/ 0x385EC792, 0x3C69BE06,
+/**/ 0x1B0394CF, 0x3FE21C1C,
+/**/ 0x4B23AA31, 0x3C5E5B32,
+/**/ 0xE72586AF, 0x3FEA61E9,
+/**/ 0xE2FD453F, 0x3C858330,
+/**/ 0x93788BBB, 0x3FE250BB,
+/**/ 0x2457BCCE, 0x3C7EA3D0,
+/**/ 0x0352BDCF, 0x3FEA3D7D,
+/**/ 0xECA19669, 0xBC868DBA,
+/**/ 0xC917A067, 0x3FE28511,
+/**/ 0xD9A16B70, 0xBC801DF1,
+/**/ 0x29AEE445, 0x3FEA18A7,
+/**/ 0x736C0358, 0x3C395E25,
+/**/ 0xEA88421E, 0x3FE2B91D,
+/**/ 0xDB216AB0, 0xBC8FA371,
+/**/ 0xED912F85, 0x3FE9F368,
+/**/ 0xC5791606, 0xBC81D200,
+/**/ 0x279A3082, 0x3FE2ECDF,
+/**/ 0xE0E7E37E, 0x3C8D3557,
+/**/ 0xE3F25E5C, 0x3FE9CDC2,
+/**/ 0x12993F62, 0x3C83F991,
+/**/ 0xB148BC4F, 0x3FE32054,
+/**/ 0x095A135B, 0x3C8F6B42,
+/**/ 0xA36A6514, 0x3FE9A7B5,
+/**/ 0xCC9FA7A9, 0x3C8722CF,
+/**/ 0xB9BE0367, 0x3FE3537D,
+/**/ 0x7AF040F0, 0x3C6B327E,
+/**/ 0xC42E1310, 0x3FE98141,
+/**/ 0x0488F08D, 0x3C8D1FF8,
+/**/ 0x7456282B, 0x3FE38659,
+/**/ 0xA93B07A8, 0xBC710FAD,
+/**/ 0xE00CB1FD, 0x3FE95A67,
+/**/ 0xA21F862D, 0xBC80BEFD,
+/**/ 0x15A2840A, 0x3FE3B8E7,
+/**/ 0xA7D2F07B, 0xBC797653,
+/**/ 0x926D9E92, 0x3FE93328,
+/**/ 0x03600CDA, 0xBC8BB770,
+/**/ 0xD36CD53A, 0x3FE3EB25,
+/**/ 0xE1570FC0, 0xBC5BE570,
+/**/ 0x784DDAF7, 0x3FE90B84,
+/**/ 0x0AB93B87, 0xBC70FEB1,
+/**/ 0xE4BA6790, 0x3FE41D14,
+/**/ 0xD287ECF5, 0x3C84608F,
+/**/ 0x303D9AD1, 0x3FE8E37C,
+/**/ 0xB53D4BF8, 0xBC6463A4,
+/**/ 0x81CF386B, 0x3FE44EB3,
+/**/ 0x1E6A5505, 0xBC83ED6C,
+/**/ 0x5A5DC900, 0x3FE8BB10,
+/**/ 0x3E9474C1, 0x3C8863E0,
+/**/ 0xE431159F, 0x3FE48000,
+/**/ 0x7463ED10, 0xBC8B194A,
+/**/ 0x985D871F, 0x3FE89241,
+/**/ 0xC413ED84, 0x3C8C48D9,
+/**/ 0x46AAB761, 0x3FE4B0FC,
+/**/ 0x738CC59A, 0x3C20DA05,
+/**/ 0x8D77A6C6, 0x3FE86910,
+/**/ 0xE2BFE9DD, 0x3C7338FF,
+/**/ 0xE54ED51B, 0x3FE4E1A4,
+/**/ 0x89B7C76A, 0xBC8A492F,
+/**/ 0xDE701CA0, 0x3FE83F7D,
+/**/ 0x609BC6E8, 0xBC4152CF,
+/**/ 0xFD7B351C, 0x3FE511F9,
+/**/ 0x61C48831, 0xBC85C0E8,
+/**/ 0x31916D5D, 0x3FE8158A,
+/**/ 0x0B8228DE, 0xBC6DE8B9,
+/**/ 0xCDDBB724, 0x3FE541FA,
+/**/ 0x8520D391, 0x3C7232C2,
+/**/ 0x2EAA1488, 0x3FE7EB36,
+/**/ 0xA4A5959F, 0x3C5A1D65,
+/**/ 0x966D59B3, 0x3FE571A6,
+/**/ 0x4D0FB198, 0x3C5C843B,
+/**/ 0x7F09E54F, 0x3FE7C082,
+/**/ 0xD72AEE68, 0xBC6C73D6,
+/**/ 0x98813A12, 0x3FE5A0FC,
+/**/ 0xB7D4227B, 0xBC8D82E2,
+/**/ 0xCD7F6543, 0x3FE7956F,
+/**/ 0xE9D45AE4, 0xBC8AB276,
+/**/ 0x16BF8F0D, 0x3FE5CFFC,
+/**/ 0x70EB578A, 0x3C896CB3,
+/**/ 0xC655211F, 0x3FE769FE,
+/**/ 0xCF8C68C5, 0xBC6827D5,
+/**/ 0x552A9E57, 0x3FE5FEA4,
+/**/ 0xF7EE20B7, 0x3C80B6CE,
+/**/ 0x174EFBA1, 0x3FE73E30,
+/**/ 0x3D94AD5F, 0xBC65D3AE,
+/**/ 0x9921AC79, 0x3FE62CF4,
+/**/ 0x55B6241A, 0xBC8EDD98,
+/**/ 0x6FA77678, 0x3FE71204,
+/**/ 0xA5029C81, 0x3C8425B0,
+/**/ 0x2963E755, 0x3FE65AEC,
+/**/ 0x6B71053C, 0x3C8126F9,
+/**/ 0x800CF55E, 0x3FE6E57C,
+/**/ 0xDEDBD0A6, 0x3C860286,
+/**/ 0x4E134B2F, 0x3FE6888A,
+/**/ 0x7644D5E6, 0xBC86B7D3,
+/**/ 0xFA9EFB5D, 0x3FE6B898,
+/**/ 0x86CCF4B2, 0x3C715AC7,
+/**/ 0x50B7821A, 0x3FE6B5CE,
+/**/ 0x8F702E0F, 0xBC65D515,
+/**/ 0x92EB6253, 0x3FE68B5A,
+/**/ 0xD985F89C, 0xBC89A91A,
+/**/ 0x7C40BDE1, 0x3FE6E2B7,
+/**/ 0x857FAD53, 0xBC70E729,
+/**/ 0xFDEB8CBA, 0x3FE65DC1,
+/**/ 0x47337C77, 0xBC597C1B,
+/**/ 0x1D0A8C40, 0x3FE70F45,
+/**/ 0x3885770D, 0x3C697EDE,
+/**/ 0xF20191C7, 0x3FE62FCF,
+/**/ 0x895756EF, 0x3C6D9143,
+/**/ 0x80DEA578, 0x3FE73B76,
+/**/ 0x06DC12A2, 0xBC722483,
+/**/ 0x26F563DF, 0x3FE60185,
+/**/ 0xE0E432D0, 0x3C846CA5,
+/**/ 0xF6F7B524, 0x3FE7674A,
+/**/ 0x94AC84A8, 0x3C7E9D3F,
+/**/ 0x55F1F17A, 0x3FE5D2E2,
+/**/ 0x04C8892B, 0x3C803141,
+/**/ 0xD0041D52, 0x3FE792C1,
+/**/ 0xEEB354EB, 0xBC8ABF05,
+/**/ 0x39824077, 0x3FE5A3E8,
+/**/ 0x2759BE62, 0x3C8428AA,
+/**/ 0x5E28B3C2, 0x3FE7BDDA,
+/**/ 0x7CCD0393, 0x3C4AD119,
+/**/ 0x8D8E83F2, 0x3FE57497,
+/**/ 0xAF282D23, 0x3C8F4714,
+/**/ 0xF5037959, 0x3FE7E893,
+/**/ 0xAA650C4C, 0x3C80EEFB,
+/**/ 0x0F592CA5, 0x3FE544F1,
+/**/ 0xE6C7A62F, 0xBC8E7AE8,
+/**/ 0xE9AE4BA4, 0x3FE812ED,
+/**/ 0xDF402DDA, 0xBC87830A,
+/**/ 0x7D7BF3DA, 0x3FE514F5,
+/**/ 0x8073C259, 0x3C747A10 } };
+#endif
+#endif
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h
new file mode 100644
index 0000000000..e4dac73048
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h
@@ -0,0 +1,14 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+double glibc_cos(double x);
+double glibc_sin(double x);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
diff --git a/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h
new file mode 100644
index 0000000000..1615c26beb
--- /dev/null
+++ b/deps/v8/third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h
@@ -0,0 +1,47 @@
+/*
+ * IBM Accurate Mathematical Library
+ * Copyright (C) 2001-2022 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* MODULE_NAME: dosincos.h */
+/* */
+/* */
+/* common data and variables definition for BIG or LITTLE ENDIAN */
+/************************************************************************/
+
+#ifndef USNCS_H
+#define USNCS_H
+
+static const double s1 = -0x1.5555555555555p-3; /* -0.16666666666666666 */
+static const double s2 = 0x1.1111111110ECEp-7; /* 0.0083333333333323288 */
+static const double s3 = -0x1.A01A019DB08B8p-13; /* -0.00019841269834414642 */
+static const double s4 = 0x1.71DE27B9A7ED9p-19; /* 2.755729806860771e-06 */
+static const double s5 = -0x1.ADDFFC2FCDF59p-26; /* -2.5022014848318398e-08 */
+static const double aa = -0x1.5558000000000p-3; /* -0.1666717529296875 */
+static const double bb = 0x1.5555555556E24p-18; /* 5.0862630208387126e-06 */
+static const double big = 0x1.8000000000000p45; /* 52776558133248 */
+static const double hp0 = 0x1.921FB54442D18p0; /* 1.5707963267948966 */
+static const double hp1 = 0x1.1A62633145C07p-54; /* 6.123233995736766e-17 */
+static const double mp1 = 0x1.921FB58000000p0; /* 1.5707963407039642 */
+static const double mp2 = -0x1.DDE973C000000p-27; /* -1.3909067564377153e-08 */
+static const double mp3 = -0x1.CB3B399D747F2p-55; /* -4.9789962505147994e-17 */
+static const double pp3 = -0x1.CB3B398000000p-55; /* -4.9789962314799099e-17 */
+static const double pp4 = -0x1.d747f23e32ed7p-83; /* -1.9034889620193266e-25 */
+static const double hpinv = 0x1.45F306DC9C883p-1; /* 0.63661977236758138 */
+static const double toint = 0x1.8000000000000p52; /* 6755399441055744 */
+
+#endif
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index be83565de2..0b161b6717 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: dec7ec1932f5277b933ed8a675cc6eb7cfc36f88
+Revision: 03764695ffd5887cebc446f61ec43059e03f55b5
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/maybe.h b/deps/v8/third_party/inspector_protocol/crdtp/maybe.h
index a476dd5810..f110c7e26a 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/maybe.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/maybe.h
@@ -47,7 +47,7 @@ class ValueMaybe {
ValueMaybe(ValueMaybe&& other) noexcept
: is_just_(other.is_just_), value_(std::move(other.value_)) {}
void operator=(T value) {
- value_ = value;
+ value_ = std::move(value);
is_just_ = true;
}
const T& fromJust() const {
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/protocol_core.h b/deps/v8/third_party/inspector_protocol/crdtp/protocol_core.h
index 486bf7b2d6..01896cc547 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/protocol_core.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/protocol_core.h
@@ -307,8 +307,6 @@ class ProtocolObject : public Serializable,
AppendSerialized(&serialized);
return T::ReadFrom(std::move(serialized)).value();
}
- // TODO(caseq): compatibility only, remove.
- std::unique_ptr<T> clone() const { return Clone(); }
protected:
using ProtocolType = T;
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/status.h b/deps/v8/third_party/inspector_protocol/crdtp/status.h
index 10745f83e0..978775998f 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/status.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/status.h
@@ -115,20 +115,23 @@ class StatusOr {
explicit StatusOr(const Status& status) : status_(status) {}
bool ok() const { return status_.ok(); }
- T& operator*() & {
- assert(ok());
- return value_;
- }
+ const Status& status() const { return status_; }
+ T& operator*() & { return value(); }
const T& operator*() const& { return value(); }
T&& operator*() && { return value(); }
- const Status& status() const { return status_; }
- T& value() & { return *this; }
+ T& value() & {
+ assert(ok());
+ return value_;
+ }
T&& value() && {
assert(ok());
return std::move(value_);
}
- const T& value() const& { return *this; }
+ const T& value() const& {
+ assert(ok());
+ return value_;
+ }
private:
Status status_;
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
index 8002f0794c..4e2547a09a 100644
--- a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
@@ -2,6 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+declare_args() {
+ # Where jinja2 is located, in chromium it is //third_party.
+ jinja_dir = "//third_party"
+}
+
# This template will generate inspector protocol source code. The code will
# not be compiled, use get_target_outputs(<name>) to compile them.
#
@@ -54,8 +59,7 @@ template("inspector_protocol_generate") {
args = [
"--jinja_dir",
- rebase_path("//third_party/", root_build_dir), # jinja is in chromium's
- # third_party
+ rebase_path(jinja_dir, root_build_dir),
"--output_base",
rebase_path(invoker.out_dir, root_build_dir),
"--config",
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.md5 b/deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.md5
deleted file mode 100644
index 064a6287b8..0000000000
--- a/deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.md5
+++ /dev/null
@@ -1 +0,0 @@
-231dc00d34afb2672c497713fa9cdaaa Jinja2-2.11.3.tar.gz
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.sha512 b/deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.sha512
deleted file mode 100644
index 26f0717bd5..0000000000
--- a/deps/v8/third_party/jinja2/Jinja2-2.11.3.tar.gz.sha512
+++ /dev/null
@@ -1 +0,0 @@
-fce4f835795fe9afb622f8106f60344032a811f3f693806f31ba482f9b7c1400f93dfa1701b4db0b472cbed4b0793cb329778c8091811ef0e3b577150d28e004 Jinja2-2.11.3.tar.gz
diff --git a/deps/v8/third_party/jinja2/README.chromium b/deps/v8/third_party/jinja2/README.chromium
index df828d669f..079ce0cc36 100644
--- a/deps/v8/third_party/jinja2/README.chromium
+++ b/deps/v8/third_party/jinja2/README.chromium
@@ -1,8 +1,7 @@
Name: Jinja2 Python Template Engine
Short Name: jinja2
URL: https://jinja.palletsprojects.com/
-Version: 2.11.3
-CPEPrefix: cpe:/a:pocoo:jinja2:2.11.3
+Version: 3.1.2
License: BSD 3-Clause
License File: LICENSE.rst
Security Critical: no
@@ -10,9 +9,8 @@ Security Critical: no
Description:
Template engine for code generation in Blink.
-Source: https://files.pythonhosted.org/packages/4f/e7/65300e6b32e69768ded990494809106f87da1d436418d5f1367ed3966fd7/Jinja2-2.11.3.tar.gz
-MD5: 231dc00d34afb2672c497713fa9cdaaa
-SHA-512: fce4f835795fe9afb622f8106f60344032a811f3f693806f31ba482f9b7c1400f93dfa1701b4db0b472cbed4b0793cb329778c8091811ef0e3b577150d28e004
+Source: https://files.pythonhosted.org/packages/7a/ff/75c28576a1d900e87eb6335b063fab47a8ef3c8b4d88524c4bf78f670cce/Jinja2-3.1.2.tar.gz
+SHA-256: 31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852
Local Modifications:
This only includes the src/jinja2/ directory from the tarball and the
@@ -21,5 +19,3 @@ Additional chromium-specific files are:
* README.chromium (this file)
* OWNERS
* jinja2.gni
-* files of hashes (MD5 is also posted on website, SHA-512 computed locally).
-* patches/*.patch for local modifications.
diff --git a/deps/v8/third_party/jinja2/README.rst b/deps/v8/third_party/jinja2/README.rst
index 060b19efee..a197aea647 100644
--- a/deps/v8/third_party/jinja2/README.rst
+++ b/deps/v8/third_party/jinja2/README.rst
@@ -35,7 +35,7 @@ Install and update using `pip`_:
$ pip install -U Jinja2
-.. _pip: https://pip.pypa.io/en/stable/quickstart/
+.. _pip: https://pip.pypa.io/en/stable/getting-started/
In A Nutshell
@@ -54,13 +54,25 @@ In A Nutshell
{% endblock %}
+Donate
+------
+
+The Pallets organization develops and supports Jinja and other popular
+packages. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, `please
+donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
Links
-----
-- Website: https://palletsprojects.com/p/jinja/
- Documentation: https://jinja.palletsprojects.com/
-- Releases: https://pypi.org/project/Jinja2/
-- Code: https://github.com/pallets/jinja
-- Issue tracker: https://github.com/pallets/jinja/issues
-- Test status: https://dev.azure.com/pallets/jinja/_build
-- Official chat: https://discord.gg/t6rrQZH
+- Changes: https://jinja.palletsprojects.com/changes/
+- PyPI Releases: https://pypi.org/project/Jinja2/
+- Source Code: https://github.com/pallets/jinja/
+- Issue Tracker: https://github.com/pallets/jinja/issues/
+- Website: https://palletsprojects.com/p/jinja/
+- Twitter: https://twitter.com/PalletsTeam
+- Chat: https://discord.gg/pallets
diff --git a/deps/v8/third_party/jinja2/__init__.py b/deps/v8/third_party/jinja2/__init__.py
index f17866f6c4..e32392679e 100644
--- a/deps/v8/third_party/jinja2/__init__.py
+++ b/deps/v8/third_party/jinja2/__init__.py
@@ -1,44 +1,37 @@
-# -*- coding: utf-8 -*-
"""Jinja is a template engine written in pure Python. It provides a
non-XML syntax that supports inline expressions and an optional
sandboxed environment.
"""
-from markupsafe import escape
-from markupsafe import Markup
+from .bccache import BytecodeCache as BytecodeCache
+from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache
+from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache
+from .environment import Environment as Environment
+from .environment import Template as Template
+from .exceptions import TemplateAssertionError as TemplateAssertionError
+from .exceptions import TemplateError as TemplateError
+from .exceptions import TemplateNotFound as TemplateNotFound
+from .exceptions import TemplateRuntimeError as TemplateRuntimeError
+from .exceptions import TemplatesNotFound as TemplatesNotFound
+from .exceptions import TemplateSyntaxError as TemplateSyntaxError
+from .exceptions import UndefinedError as UndefinedError
+from .loaders import BaseLoader as BaseLoader
+from .loaders import ChoiceLoader as ChoiceLoader
+from .loaders import DictLoader as DictLoader
+from .loaders import FileSystemLoader as FileSystemLoader
+from .loaders import FunctionLoader as FunctionLoader
+from .loaders import ModuleLoader as ModuleLoader
+from .loaders import PackageLoader as PackageLoader
+from .loaders import PrefixLoader as PrefixLoader
+from .runtime import ChainableUndefined as ChainableUndefined
+from .runtime import DebugUndefined as DebugUndefined
+from .runtime import make_logging_undefined as make_logging_undefined
+from .runtime import StrictUndefined as StrictUndefined
+from .runtime import Undefined as Undefined
+from .utils import clear_caches as clear_caches
+from .utils import is_undefined as is_undefined
+from .utils import pass_context as pass_context
+from .utils import pass_environment as pass_environment
+from .utils import pass_eval_context as pass_eval_context
+from .utils import select_autoescape as select_autoescape
-from .bccache import BytecodeCache
-from .bccache import FileSystemBytecodeCache
-from .bccache import MemcachedBytecodeCache
-from .environment import Environment
-from .environment import Template
-from .exceptions import TemplateAssertionError
-from .exceptions import TemplateError
-from .exceptions import TemplateNotFound
-from .exceptions import TemplateRuntimeError
-from .exceptions import TemplatesNotFound
-from .exceptions import TemplateSyntaxError
-from .exceptions import UndefinedError
-from .filters import contextfilter
-from .filters import environmentfilter
-from .filters import evalcontextfilter
-from .loaders import BaseLoader
-from .loaders import ChoiceLoader
-from .loaders import DictLoader
-from .loaders import FileSystemLoader
-from .loaders import FunctionLoader
-from .loaders import ModuleLoader
-from .loaders import PackageLoader
-from .loaders import PrefixLoader
-from .runtime import ChainableUndefined
-from .runtime import DebugUndefined
-from .runtime import make_logging_undefined
-from .runtime import StrictUndefined
-from .runtime import Undefined
-from .utils import clear_caches
-from .utils import contextfunction
-from .utils import environmentfunction
-from .utils import evalcontextfunction
-from .utils import is_undefined
-from .utils import select_autoescape
-
-__version__ = "2.11.3"
+__version__ = "3.1.2"
diff --git a/deps/v8/third_party/jinja2/_compat.py b/deps/v8/third_party/jinja2/_compat.py
deleted file mode 100644
index 1f044954a0..0000000000
--- a/deps/v8/third_party/jinja2/_compat.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -*- coding: utf-8 -*-
-# flake8: noqa
-import marshal
-import sys
-
-PY2 = sys.version_info[0] == 2
-PYPY = hasattr(sys, "pypy_translation_info")
-_identity = lambda x: x
-
-if not PY2:
- unichr = chr
- range_type = range
- text_type = str
- string_types = (str,)
- integer_types = (int,)
-
- iterkeys = lambda d: iter(d.keys())
- itervalues = lambda d: iter(d.values())
- iteritems = lambda d: iter(d.items())
-
- import pickle
- from io import BytesIO, StringIO
-
- NativeStringIO = StringIO
-
- def reraise(tp, value, tb=None):
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
- ifilter = filter
- imap = map
- izip = zip
- intern = sys.intern
-
- implements_iterator = _identity
- implements_to_string = _identity
- encode_filename = _identity
-
- marshal_dump = marshal.dump
- marshal_load = marshal.load
-
-else:
- unichr = unichr
- text_type = unicode
- range_type = xrange
- string_types = (str, unicode)
- integer_types = (int, long)
-
- iterkeys = lambda d: d.iterkeys()
- itervalues = lambda d: d.itervalues()
- iteritems = lambda d: d.iteritems()
-
- import cPickle as pickle
- from cStringIO import StringIO as BytesIO, StringIO
-
- NativeStringIO = BytesIO
-
- exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
-
- from itertools import imap, izip, ifilter
-
- intern = intern
-
- def implements_iterator(cls):
- cls.next = cls.__next__
- del cls.__next__
- return cls
-
- def implements_to_string(cls):
- cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
- return cls
-
- def encode_filename(filename):
- if isinstance(filename, unicode):
- return filename.encode("utf-8")
- return filename
-
- def marshal_dump(code, f):
- if isinstance(f, file):
- marshal.dump(code, f)
- else:
- f.write(marshal.dumps(code))
-
- def marshal_load(f):
- if isinstance(f, file):
- return marshal.load(f)
- return marshal.loads(f.read())
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a
- # dummy metaclass for one level of class instantiation that replaces
- # itself with the actual metaclass.
- class metaclass(type):
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
-
- return type.__new__(metaclass, "temporary_class", (), {})
-
-
-try:
- from urllib.parse import quote_from_bytes as url_quote
-except ImportError:
- from urllib import quote as url_quote
-
-
-try:
- from collections import abc
-except ImportError:
- import collections as abc
-
-
-try:
- from os import fspath
-except ImportError:
- try:
- from pathlib import PurePath
- except ImportError:
- PurePath = None
-
- def fspath(path):
- if hasattr(path, "__fspath__"):
- return path.__fspath__()
-
- # Python 3.5 doesn't have __fspath__ yet, use str.
- if PurePath is not None and isinstance(path, PurePath):
- return str(path)
-
- return path
diff --git a/deps/v8/third_party/jinja2/_identifier.py b/deps/v8/third_party/jinja2/_identifier.py
index 224d5449d1..928c1503c7 100644
--- a/deps/v8/third_party/jinja2/_identifier.py
+++ b/deps/v8/third_party/jinja2/_identifier.py
@@ -2,5 +2,5 @@ import re
# generated by scripts/generate_identifier_pattern.py
pattern = re.compile(
- r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
+ r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
)
diff --git a/deps/v8/third_party/jinja2/async_utils.py b/deps/v8/third_party/jinja2/async_utils.py
new file mode 100644
index 0000000000..1a4f3892ce
--- /dev/null
+++ b/deps/v8/third_party/jinja2/async_utils.py
@@ -0,0 +1,84 @@
+import inspect
+import typing as t
+from functools import WRAPPER_ASSIGNMENTS
+from functools import wraps
+
+from .utils import _PassArg
+from .utils import pass_eval_context
+
+V = t.TypeVar("V")
+
+
+def async_variant(normal_func): # type: ignore
+ def decorator(async_func): # type: ignore
+ pass_arg = _PassArg.from_obj(normal_func)
+ need_eval_context = pass_arg is None
+
+ if pass_arg is _PassArg.environment:
+
+ def is_async(args: t.Any) -> bool:
+ return t.cast(bool, args[0].is_async)
+
+ else:
+
+ def is_async(args: t.Any) -> bool:
+ return t.cast(bool, args[0].environment.is_async)
+
+ # Take the doc and annotations from the sync function, but the
+ # name from the async function. Pallets-Sphinx-Themes
+ # build_function_directive expects __wrapped__ to point to the
+ # sync function.
+ async_func_attrs = ("__module__", "__name__", "__qualname__")
+ normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs))
+
+ @wraps(normal_func, assigned=normal_func_attrs)
+ @wraps(async_func, assigned=async_func_attrs, updated=())
+ def wrapper(*args, **kwargs): # type: ignore
+ b = is_async(args)
+
+ if need_eval_context:
+ args = args[1:]
+
+ if b:
+ return async_func(*args, **kwargs)
+
+ return normal_func(*args, **kwargs)
+
+ if need_eval_context:
+ wrapper = pass_eval_context(wrapper)
+
+ wrapper.jinja_async_variant = True
+ return wrapper
+
+ return decorator
+
+
+_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
+
+
+async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
+ # Avoid a costly call to isawaitable
+ if type(value) in _common_primitives:
+ return t.cast("V", value)
+
+ if inspect.isawaitable(value):
+ return await t.cast("t.Awaitable[V]", value)
+
+ return t.cast("V", value)
+
+
+async def auto_aiter(
+ iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+) -> "t.AsyncIterator[V]":
+ if hasattr(iterable, "__aiter__"):
+ async for item in t.cast("t.AsyncIterable[V]", iterable):
+ yield item
+ else:
+ for item in t.cast("t.Iterable[V]", iterable):
+ yield item
+
+
+async def auto_to_list(
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+) -> t.List["V"]:
+ return [x async for x in auto_aiter(value)]
diff --git a/deps/v8/third_party/jinja2/asyncfilters.py b/deps/v8/third_party/jinja2/asyncfilters.py
deleted file mode 100644
index 3d98dbcc00..0000000000
--- a/deps/v8/third_party/jinja2/asyncfilters.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from functools import wraps
-
-from . import filters
-from .asyncsupport import auto_aiter
-from .asyncsupport import auto_await
-
-
-async def auto_to_seq(value):
- seq = []
- if hasattr(value, "__aiter__"):
- async for item in value:
- seq.append(item)
- else:
- for item in value:
- seq.append(item)
- return seq
-
-
-async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
- if seq:
- async for item in auto_aiter(seq):
- if func(item):
- yield item
-
-
-def dualfilter(normal_filter, async_filter):
- wrap_evalctx = False
- if getattr(normal_filter, "environmentfilter", False) is True:
-
- def is_async(args):
- return args[0].is_async
-
- wrap_evalctx = False
- else:
- has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
- has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
- wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
-
- def is_async(args):
- return args[0].environment.is_async
-
- @wraps(normal_filter)
- def wrapper(*args, **kwargs):
- b = is_async(args)
- if wrap_evalctx:
- args = args[1:]
- if b:
- return async_filter(*args, **kwargs)
- return normal_filter(*args, **kwargs)
-
- if wrap_evalctx:
- wrapper.evalcontextfilter = True
-
- wrapper.asyncfiltervariant = True
-
- return wrapper
-
-
-def asyncfiltervariant(original):
- def decorator(f):
- return dualfilter(original, f)
-
- return decorator
-
-
-@asyncfiltervariant(filters.do_first)
-async def do_first(environment, seq):
- try:
- return await auto_aiter(seq).__anext__()
- except StopAsyncIteration:
- return environment.undefined("No first item, sequence was empty.")
-
-
-@asyncfiltervariant(filters.do_groupby)
-async def do_groupby(environment, value, attribute):
- expr = filters.make_attrgetter(environment, attribute)
- return [
- filters._GroupTuple(key, await auto_to_seq(values))
- for key, values in filters.groupby(
- sorted(await auto_to_seq(value), key=expr), expr
- )
- ]
-
-
-@asyncfiltervariant(filters.do_join)
-async def do_join(eval_ctx, value, d=u"", attribute=None):
- return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
-
-
-@asyncfiltervariant(filters.do_list)
-async def do_list(value):
- return await auto_to_seq(value)
-
-
-@asyncfiltervariant(filters.do_reject)
-async def do_reject(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: not x, False)
-
-
-@asyncfiltervariant(filters.do_rejectattr)
-async def do_rejectattr(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: not x, True)
-
-
-@asyncfiltervariant(filters.do_select)
-async def do_select(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: x, False)
-
-
-@asyncfiltervariant(filters.do_selectattr)
-async def do_selectattr(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: x, True)
-
-
-@asyncfiltervariant(filters.do_map)
-async def do_map(*args, **kwargs):
- seq, func = filters.prepare_map(args, kwargs)
- if seq:
- async for item in auto_aiter(seq):
- yield await auto_await(func(item))
-
-
-@asyncfiltervariant(filters.do_sum)
-async def do_sum(environment, iterable, attribute=None, start=0):
- rv = start
- if attribute is not None:
- func = filters.make_attrgetter(environment, attribute)
- else:
-
- def func(x):
- return x
-
- async for item in auto_aiter(iterable):
- rv += func(item)
- return rv
-
-
-@asyncfiltervariant(filters.do_slice)
-async def do_slice(value, slices, fill_with=None):
- return filters.do_slice(await auto_to_seq(value), slices, fill_with)
-
-
-ASYNC_FILTERS = {
- "first": do_first,
- "groupby": do_groupby,
- "join": do_join,
- "list": do_list,
- # we intentionally do not support do_last because that would be
- # ridiculous
- "reject": do_reject,
- "rejectattr": do_rejectattr,
- "map": do_map,
- "select": do_select,
- "selectattr": do_selectattr,
- "sum": do_sum,
- "slice": do_slice,
-}
diff --git a/deps/v8/third_party/jinja2/asyncsupport.py b/deps/v8/third_party/jinja2/asyncsupport.py
deleted file mode 100644
index 78ba3739d8..0000000000
--- a/deps/v8/third_party/jinja2/asyncsupport.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# -*- coding: utf-8 -*-
-"""The code for async support. Importing this patches Jinja on supported
-Python versions.
-"""
-import asyncio
-import inspect
-from functools import update_wrapper
-
-from markupsafe import Markup
-
-from .environment import TemplateModule
-from .runtime import LoopContext
-from .utils import concat
-from .utils import internalcode
-from .utils import missing
-
-
-async def concat_async(async_gen):
- rv = []
-
- async def collect():
- async for event in async_gen:
- rv.append(event)
-
- await collect()
- return concat(rv)
-
-
-async def generate_async(self, *args, **kwargs):
- vars = dict(*args, **kwargs)
- try:
- async for event in self.root_render_func(self.new_context(vars)):
- yield event
- except Exception:
- yield self.environment.handle_exception()
-
-
-def wrap_generate_func(original_generate):
- def _convert_generator(self, loop, args, kwargs):
- async_gen = self.generate_async(*args, **kwargs)
- try:
- while 1:
- yield loop.run_until_complete(async_gen.__anext__())
- except StopAsyncIteration:
- pass
-
- def generate(self, *args, **kwargs):
- if not self.environment.is_async:
- return original_generate(self, *args, **kwargs)
- return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
-
- return update_wrapper(generate, original_generate)
-
-
-async def render_async(self, *args, **kwargs):
- if not self.environment.is_async:
- raise RuntimeError("The environment was not created with async mode enabled.")
-
- vars = dict(*args, **kwargs)
- ctx = self.new_context(vars)
-
- try:
- return await concat_async(self.root_render_func(ctx))
- except Exception:
- return self.environment.handle_exception()
-
-
-def wrap_render_func(original_render):
- def render(self, *args, **kwargs):
- if not self.environment.is_async:
- return original_render(self, *args, **kwargs)
- loop = asyncio.get_event_loop()
- return loop.run_until_complete(self.render_async(*args, **kwargs))
-
- return update_wrapper(render, original_render)
-
-
-def wrap_block_reference_call(original_call):
- @internalcode
- async def async_call(self):
- rv = await concat_async(self._stack[self._depth](self._context))
- if self._context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
- @internalcode
- def __call__(self):
- if not self._context.environment.is_async:
- return original_call(self)
- return async_call(self)
-
- return update_wrapper(__call__, original_call)
-
-
-def wrap_macro_invoke(original_invoke):
- @internalcode
- async def async_invoke(self, arguments, autoescape):
- rv = await self._func(*arguments)
- if autoescape:
- rv = Markup(rv)
- return rv
-
- @internalcode
- def _invoke(self, arguments, autoescape):
- if not self._environment.is_async:
- return original_invoke(self, arguments, autoescape)
- return async_invoke(self, arguments, autoescape)
-
- return update_wrapper(_invoke, original_invoke)
-
-
-@internalcode
-async def get_default_module_async(self):
- if self._module is not None:
- return self._module
- self._module = rv = await self.make_module_async()
- return rv
-
-
-def wrap_default_module(original_default_module):
- @internalcode
- def _get_default_module(self):
- if self.environment.is_async:
- raise RuntimeError("Template module attribute is unavailable in async mode")
- return original_default_module(self)
-
- return _get_default_module
-
-
-async def make_module_async(self, vars=None, shared=False, locals=None):
- context = self.new_context(vars, shared, locals)
- body_stream = []
- async for item in self.root_render_func(context):
- body_stream.append(item)
- return TemplateModule(self, context, body_stream)
-
-
-def patch_template():
- from . import Template
-
- Template.generate = wrap_generate_func(Template.generate)
- Template.generate_async = update_wrapper(generate_async, Template.generate_async)
- Template.render_async = update_wrapper(render_async, Template.render_async)
- Template.render = wrap_render_func(Template.render)
- Template._get_default_module = wrap_default_module(Template._get_default_module)
- Template._get_default_module_async = get_default_module_async
- Template.make_module_async = update_wrapper(
- make_module_async, Template.make_module_async
- )
-
-
-def patch_runtime():
- from .runtime import BlockReference, Macro
-
- BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
- Macro._invoke = wrap_macro_invoke(Macro._invoke)
-
-
-def patch_filters():
- from .filters import FILTERS
- from .asyncfilters import ASYNC_FILTERS
-
- FILTERS.update(ASYNC_FILTERS)
-
-
-def patch_all():
- patch_template()
- patch_runtime()
- patch_filters()
-
-
-async def auto_await(value):
- if inspect.isawaitable(value):
- return await value
- return value
-
-
-async def auto_aiter(iterable):
- if hasattr(iterable, "__aiter__"):
- async for item in iterable:
- yield item
- return
- for item in iterable:
- yield item
-
-
-class AsyncLoopContext(LoopContext):
- _to_iterator = staticmethod(auto_aiter)
-
- @property
- async def length(self):
- if self._length is not None:
- return self._length
-
- try:
- self._length = len(self._iterable)
- except TypeError:
- iterable = [x async for x in self._iterator]
- self._iterator = self._to_iterator(iterable)
- self._length = len(iterable) + self.index + (self._after is not missing)
-
- return self._length
-
- @property
- async def revindex0(self):
- return await self.length - self.index
-
- @property
- async def revindex(self):
- return await self.length - self.index0
-
- async def _peek_next(self):
- if self._after is not missing:
- return self._after
-
- try:
- self._after = await self._iterator.__anext__()
- except StopAsyncIteration:
- self._after = missing
-
- return self._after
-
- @property
- async def last(self):
- return await self._peek_next() is missing
-
- @property
- async def nextitem(self):
- rv = await self._peek_next()
-
- if rv is missing:
- return self._undefined("there is no next item")
-
- return rv
-
- def __aiter__(self):
- return self
-
- async def __anext__(self):
- if self._after is not missing:
- rv = self._after
- self._after = missing
- else:
- rv = await self._iterator.__anext__()
-
- self.index0 += 1
- self._before = self._current
- self._current = rv
- return rv, self
-
-
-async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
- import warnings
-
- warnings.warn(
- "This template must be recompiled with at least Jinja 2.11, or"
- " it will fail in 3.0.",
- DeprecationWarning,
- stacklevel=2,
- )
- return AsyncLoopContext(iterable, undefined, recurse, depth0)
-
-
-patch_all()
diff --git a/deps/v8/third_party/jinja2/bccache.py b/deps/v8/third_party/jinja2/bccache.py
index 9c0661030f..d0ddf56ef6 100644
--- a/deps/v8/third_party/jinja2/bccache.py
+++ b/deps/v8/third_party/jinja2/bccache.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""The optional bytecode cache system. This is useful if you have very
complex template situations and the compilation of all those templates
slows down your application too much.
@@ -8,22 +7,30 @@ are initialized on the first request.
"""
import errno
import fnmatch
+import marshal
import os
+import pickle
import stat
import sys
import tempfile
+import typing as t
from hashlib import sha1
-from os import listdir
-from os import path
+from io import BytesIO
+from types import CodeType
-from ._compat import BytesIO
-from ._compat import marshal_dump
-from ._compat import marshal_load
-from ._compat import pickle
-from ._compat import text_type
-from .utils import open_if_exists
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
-bc_version = 4
+ class _MemcachedClient(te.Protocol):
+ def get(self, key: str) -> bytes:
+ ...
+
+ def set(self, key: str, value: bytes, timeout: t.Optional[int] = None) -> None:
+ ...
+
+
+bc_version = 5
# Magic bytes to identify Jinja bytecode cache files. Contains the
# Python major and minor version to avoid loading incompatible bytecode
# if a project upgrades its Python version.
@@ -34,7 +41,7 @@ bc_magic = (
)
-class Bucket(object):
+class Bucket:
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
@@ -43,17 +50,17 @@ class Bucket(object):
cache subclasses don't have to care about cache invalidation.
"""
- def __init__(self, environment, key, checksum):
+ def __init__(self, environment: "Environment", key: str, checksum: str) -> None:
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
- def reset(self):
+ def reset(self) -> None:
"""Resets the bucket (unloads the bytecode)."""
- self.code = None
+ self.code: t.Optional[CodeType] = None
- def load_bytecode(self, f):
+ def load_bytecode(self, f: t.BinaryIO) -> None:
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
@@ -67,31 +74,31 @@ class Bucket(object):
return
# if marshal_load fails then we need to reload
try:
- self.code = marshal_load(f)
+ self.code = marshal.load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
- def write_bytecode(self, f):
+ def write_bytecode(self, f: t.IO[bytes]) -> None:
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError("can't write empty bucket")
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
- marshal_dump(self.code, f)
+ marshal.dump(self.code, f)
- def bytecode_from_string(self, string):
- """Load bytecode from a string."""
+ def bytecode_from_string(self, string: bytes) -> None:
+ """Load bytecode from bytes."""
self.load_bytecode(BytesIO(string))
- def bytecode_to_string(self):
- """Return the bytecode as string."""
+ def bytecode_to_string(self) -> bytes:
+ """Return the bytecode as bytes."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
-class BytecodeCache(object):
+class BytecodeCache:
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
@@ -120,41 +127,48 @@ class BytecodeCache(object):
Jinja.
"""
- def load_bytecode(self, bucket):
+ def load_bytecode(self, bucket: Bucket) -> None:
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
- def dump_bytecode(self, bucket):
+ def dump_bytecode(self, bucket: Bucket) -> None:
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
- def clear(self):
+ def clear(self) -> None:
"""Clears the cache. This method is not used by Jinja but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
- def get_cache_key(self, name, filename=None):
+ def get_cache_key(
+ self, name: str, filename: t.Optional[t.Union[str]] = None
+ ) -> str:
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode("utf-8"))
+
if filename is not None:
- filename = "|" + filename
- if isinstance(filename, text_type):
- filename = filename.encode("utf-8")
- hash.update(filename)
+ hash.update(f"|{filename}".encode())
+
return hash.hexdigest()
- def get_source_checksum(self, source):
+ def get_source_checksum(self, source: str) -> str:
"""Returns a checksum for the source."""
return sha1(source.encode("utf-8")).hexdigest()
- def get_bucket(self, environment, name, filename, source):
+ def get_bucket(
+ self,
+ environment: "Environment",
+ name: str,
+ filename: t.Optional[str],
+ source: str,
+ ) -> Bucket:
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
@@ -164,7 +178,7 @@ class BytecodeCache(object):
self.load_bytecode(bucket)
return bucket
- def set_bucket(self, bucket):
+ def set_bucket(self, bucket: Bucket) -> None:
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
@@ -187,14 +201,16 @@ class FileSystemBytecodeCache(BytecodeCache):
This bytecode cache supports clearing of the cache using the clear method.
"""
- def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
+ def __init__(
+ self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache"
+ ) -> None:
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
- def _get_default_cache_dir(self):
- def _unsafe_dir():
+ def _get_default_cache_dir(self) -> str:
+ def _unsafe_dir() -> "te.NoReturn":
raise RuntimeError(
"Cannot determine safe temp directory. You "
"need to explicitly provide one."
@@ -209,7 +225,7 @@ class FileSystemBytecodeCache(BytecodeCache):
if not hasattr(os, "getuid"):
_unsafe_dir()
- dirname = "_jinja2-cache-%d" % os.getuid()
+ dirname = f"_jinja2-cache-{os.getuid()}"
actual_dir = os.path.join(tmpdir, dirname)
try:
@@ -240,34 +256,72 @@ class FileSystemBytecodeCache(BytecodeCache):
return actual_dir
- def _get_cache_filename(self, bucket):
- return path.join(self.directory, self.pattern % bucket.key)
+ def _get_cache_filename(self, bucket: Bucket) -> str:
+ return os.path.join(self.directory, self.pattern % (bucket.key,))
+
+ def load_bytecode(self, bucket: Bucket) -> None:
+ filename = self._get_cache_filename(bucket)
- def load_bytecode(self, bucket):
- f = open_if_exists(self._get_cache_filename(bucket), "rb")
- if f is not None:
+ # Don't test for existence before opening the file, since the
+ # file could disappear after the test before the open.
+ try:
+ f = open(filename, "rb")
+ except (FileNotFoundError, IsADirectoryError, PermissionError):
+ # PermissionError can occur on Windows when an operation is
+ # in progress, such as calling clear().
+ return
+
+ with f:
+ bucket.load_bytecode(f)
+
+ def dump_bytecode(self, bucket: Bucket) -> None:
+ # Write to a temporary file, then rename to the real name after
+ # writing. This avoids another process reading the file before
+ # it is fully written.
+ name = self._get_cache_filename(bucket)
+ f = tempfile.NamedTemporaryFile(
+ mode="wb",
+ dir=os.path.dirname(name),
+ prefix=os.path.basename(name),
+ suffix=".tmp",
+ delete=False,
+ )
+
+ def remove_silent() -> None:
try:
- bucket.load_bytecode(f)
- finally:
- f.close()
+ os.remove(f.name)
+ except OSError:
+ # Another process may have called clear(). On Windows,
+ # another program may be holding the file open.
+ pass
- def dump_bytecode(self, bucket):
- f = open(self._get_cache_filename(bucket), "wb")
try:
- bucket.write_bytecode(f)
- finally:
- f.close()
+ with f:
+ bucket.write_bytecode(f)
+ except BaseException:
+ remove_silent()
+ raise
- def clear(self):
+ try:
+ os.replace(f.name, name)
+ except OSError:
+ # Another process may have called clear(). On Windows,
+ # another program may be holding the file open.
+ remove_silent()
+ except BaseException:
+ remove_silent()
+ raise
+
+ def clear(self) -> None:
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
- files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
+ files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",))
for filename in files:
try:
- remove(path.join(self.directory, filename))
+ remove(os.path.join(self.directory, filename))
except OSError:
pass
@@ -284,7 +338,7 @@ class MemcachedBytecodeCache(BytecodeCache):
- `python-memcached <https://pypi.org/project/python-memcached/>`_
(Unfortunately the django cache interface is not compatible because it
- does not support storing binary data, only unicode. You can however pass
+ does not support storing binary data, only text. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
@@ -319,32 +373,34 @@ class MemcachedBytecodeCache(BytecodeCache):
def __init__(
self,
- client,
- prefix="jinja2/bytecode/",
- timeout=None,
- ignore_memcache_errors=True,
+ client: "_MemcachedClient",
+ prefix: str = "jinja2/bytecode/",
+ timeout: t.Optional[int] = None,
+ ignore_memcache_errors: bool = True,
):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
- def load_bytecode(self, bucket):
+ def load_bytecode(self, bucket: Bucket) -> None:
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
- code = None
- if code is not None:
+ else:
bucket.bytecode_from_string(code)
- def dump_bytecode(self, bucket):
- args = (self.prefix + bucket.key, bucket.bytecode_to_string())
- if self.timeout is not None:
- args += (self.timeout,)
+ def dump_bytecode(self, bucket: Bucket) -> None:
+ key = self.prefix + bucket.key
+ value = bucket.bytecode_to_string()
+
try:
- self.client.set(*args)
+ if self.timeout is not None:
+ self.client.set(key, value, self.timeout)
+ else:
+ self.client.set(key, value)
except Exception:
if not self.ignore_memcache_errors:
raise
diff --git a/deps/v8/third_party/jinja2/compiler.py b/deps/v8/third_party/jinja2/compiler.py
index b39478d739..3458095f54 100644
--- a/deps/v8/third_party/jinja2/compiler.py
+++ b/deps/v8/third_party/jinja2/compiler.py
@@ -1,7 +1,8 @@
-# -*- coding: utf-8 -*-
"""Compiles nodes from the parser into Python code."""
-from collections import namedtuple
+import typing as t
+from contextlib import contextmanager
from functools import update_wrapper
+from io import StringIO
from itertools import chain
from keyword import iskeyword as is_python_keyword
@@ -9,13 +10,6 @@ from markupsafe import escape
from markupsafe import Markup
from . import nodes
-from ._compat import imap
-from ._compat import iteritems
-from ._compat import izip
-from ._compat import NativeStringIO
-from ._compat import range_type
-from ._compat import string_types
-from ._compat import text_type
from .exceptions import TemplateAssertionError
from .idtracking import Symbols
from .idtracking import VAR_LOAD_ALIAS
@@ -24,9 +18,16 @@ from .idtracking import VAR_LOAD_RESOLVE
from .idtracking import VAR_LOAD_UNDEFINED
from .nodes import EvalContext
from .optimizer import Optimizer
+from .utils import _PassArg
from .utils import concat
from .visitor import NodeVisitor
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
operators = {
"eq": "==",
"ne": "!=",
@@ -38,79 +39,109 @@ operators = {
"notin": "not in",
}
-# what method to iterate over items do we want to use for dict iteration
-# in generated code? on 2.x let's go with iteritems, on 3.x with items
-if hasattr(dict, "iteritems"):
- dict_item_iter = "iteritems"
-else:
- dict_item_iter = "items"
-
-code_features = ["division"]
-
-# does this python version support generator stops? (PEP 0479)
-try:
- exec("from __future__ import generator_stop")
- code_features.append("generator_stop")
-except SyntaxError:
- pass
-
-# does this python version support yield from?
-try:
- exec("def f(): yield from x()")
-except SyntaxError:
- supports_yield_from = False
-else:
- supports_yield_from = True
-
-
-def optimizeconst(f):
- def new_func(self, node, frame, **kwargs):
+
+def optimizeconst(f: F) -> F:
+ def new_func(
+ self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any
+ ) -> t.Any:
# Only optimize if the frame is not volatile
- if self.optimized and not frame.eval_ctx.volatile:
+ if self.optimizer is not None and not frame.eval_ctx.volatile:
new_node = self.optimizer.visit(node, frame.eval_ctx)
+
if new_node != node:
return self.visit(new_node, frame)
+
return f(self, node, frame, **kwargs)
- return update_wrapper(new_func, f)
+ return update_wrapper(t.cast(F, new_func), f)
+
+
+def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]:
+ @optimizeconst
+ def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None:
+ if (
+ self.environment.sandboxed
+ and op in self.environment.intercepted_binops # type: ignore
+ ):
+ self.write(f"environment.call_binop(context, {op!r}, ")
+ self.visit(node.left, frame)
+ self.write(", ")
+ self.visit(node.right, frame)
+ else:
+ self.write("(")
+ self.visit(node.left, frame)
+ self.write(f" {op} ")
+ self.visit(node.right, frame)
+
+ self.write(")")
+
+ return visitor
+
+
+def _make_unop(
+ op: str,
+) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]:
+ @optimizeconst
+ def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None:
+ if (
+ self.environment.sandboxed
+ and op in self.environment.intercepted_unops # type: ignore
+ ):
+ self.write(f"environment.call_unop(context, {op!r}, ")
+ self.visit(node.node, frame)
+ else:
+ self.write("(" + op)
+ self.visit(node.node, frame)
+
+ self.write(")")
+
+ return visitor
def generate(
- node, environment, name, filename, stream=None, defer_init=False, optimized=True
-):
+ node: nodes.Template,
+ environment: "Environment",
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ stream: t.Optional[t.TextIO] = None,
+ defer_init: bool = False,
+ optimized: bool = True,
+) -> t.Optional[str]:
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError("Can't compile non template nodes")
+
generator = environment.code_generator_class(
environment, name, filename, stream, defer_init, optimized
)
generator.visit(node)
+
if stream is None:
- return generator.stream.getvalue()
+ return generator.stream.getvalue() # type: ignore
+
+ return None
-def has_safe_repr(value):
+def has_safe_repr(value: t.Any) -> bool:
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
- if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
- return True
- if type(value) in (tuple, list, set, frozenset):
- for item in value:
- if not has_safe_repr(item):
- return False
- return True
- elif type(value) is dict:
- for key, value in iteritems(value):
- if not has_safe_repr(key):
- return False
- if not has_safe_repr(value):
- return False
+
+ if type(value) in {bool, int, float, complex, range, str, Markup}:
return True
+
+ if type(value) in {tuple, list, set, frozenset}:
+ return all(has_safe_repr(v) for v in value)
+
+ if type(value) is dict:
+ return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items())
+
return False
-def find_undeclared(nodes, names):
+def find_undeclared(
+ nodes: t.Iterable[nodes.Node], names: t.Iterable[str]
+) -> t.Set[str]:
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
@@ -123,20 +154,49 @@ def find_undeclared(nodes, names):
return visitor.undeclared
-class MacroRef(object):
- def __init__(self, node):
+class MacroRef:
+ def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None:
self.node = node
self.accesses_caller = False
self.accesses_kwargs = False
self.accesses_varargs = False
-class Frame(object):
+class Frame:
"""Holds compile time information for us."""
- def __init__(self, eval_ctx, parent=None, level=None):
+ def __init__(
+ self,
+ eval_ctx: EvalContext,
+ parent: t.Optional["Frame"] = None,
+ level: t.Optional[int] = None,
+ ) -> None:
self.eval_ctx = eval_ctx
- self.symbols = Symbols(parent and parent.symbols or None, level=level)
+
+ # the parent of this frame
+ self.parent = parent
+
+ if parent is None:
+ self.symbols = Symbols(level=level)
+
+ # in some dynamic inheritance situations the compiler needs to add
+ # write tests around output statements.
+ self.require_output_check = False
+
+ # inside some tags we are using a buffer rather than yield statements.
+ # this for example affects {% filter %} or {% macro %}. If a frame
+ # is buffered this variable points to the name of the list used as
+ # buffer.
+ self.buffer: t.Optional[str] = None
+
+ # the name of the block we're in, otherwise None.
+ self.block: t.Optional[str] = None
+
+ else:
+ self.symbols = Symbols(parent.symbols, level=level)
+ self.require_output_check = parent.require_output_check
+ self.buffer = parent.buffer
+ self.block = parent.block
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
@@ -146,47 +206,40 @@ class Frame(object):
# situations.
self.rootlevel = False
- # in some dynamic inheritance situations the compiler needs to add
- # write tests around output statements.
- self.require_output_check = parent and parent.require_output_check
-
- # inside some tags we are using a buffer rather than yield statements.
- # this for example affects {% filter %} or {% macro %}. If a frame
- # is buffered this variable points to the name of the list used as
- # buffer.
- self.buffer = None
+ # variables set inside of loops and blocks should not affect outer frames,
+ # but they still needs to be kept track of as part of the active context.
+ self.loop_frame = False
+ self.block_frame = False
- # the name of the block we're in, otherwise None.
- self.block = parent and parent.block or None
+ # track whether the frame is being used in an if-statement or conditional
+ # expression as it determines which errors should be raised during runtime
+ # or compile time.
+ self.soft_frame = False
- # the parent of this frame
- self.parent = parent
-
- if parent is not None:
- self.buffer = parent.buffer
-
- def copy(self):
+ def copy(self) -> "Frame":
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.symbols = self.symbols.copy()
return rv
- def inner(self, isolated=False):
+ def inner(self, isolated: bool = False) -> "Frame":
"""Return an inner frame."""
if isolated:
return Frame(self.eval_ctx, level=self.symbols.level + 1)
return Frame(self.eval_ctx, self)
- def soft(self):
+ def soft(self) -> "Frame":
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
- This is only used to implement if-statements.
+ This is only used to implement if-statements and conditional
+ expressions.
"""
rv = self.copy()
rv.rootlevel = False
+ rv.soft_frame = True
return rv
__copy__ = copy
@@ -199,19 +252,19 @@ class VisitorExit(RuntimeError):
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
- def __init__(self):
- self.filters = set()
- self.tests = set()
+ def __init__(self) -> None:
+ self.filters: t.Set[str] = set()
+ self.tests: t.Set[str] = set()
- def visit_Filter(self, node):
+ def visit_Filter(self, node: nodes.Filter) -> None:
self.generic_visit(node)
self.filters.add(node.name)
- def visit_Test(self, node):
+ def visit_Test(self, node: nodes.Test) -> None:
self.generic_visit(node)
self.tests.add(node.name)
- def visit_Block(self, node):
+ def visit_Block(self, node: nodes.Block) -> None:
"""Stop visiting at blocks."""
@@ -221,11 +274,11 @@ class UndeclaredNameVisitor(NodeVisitor):
not stop at closure frames.
"""
- def __init__(self, names):
+ def __init__(self, names: t.Iterable[str]) -> None:
self.names = set(names)
- self.undeclared = set()
+ self.undeclared: t.Set[str] = set()
- def visit_Name(self, node):
+ def visit_Name(self, node: nodes.Name) -> None:
if node.ctx == "load" and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
@@ -233,7 +286,7 @@ class UndeclaredNameVisitor(NodeVisitor):
else:
self.names.discard(node.name)
- def visit_Block(self, node):
+ def visit_Block(self, node: nodes.Block) -> None:
"""Stop visiting a blocks."""
@@ -246,26 +299,33 @@ class CompilerExit(Exception):
class CodeGenerator(NodeVisitor):
def __init__(
- self, environment, name, filename, stream=None, defer_init=False, optimized=True
- ):
+ self,
+ environment: "Environment",
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ stream: t.Optional[t.TextIO] = None,
+ defer_init: bool = False,
+ optimized: bool = True,
+ ) -> None:
if stream is None:
- stream = NativeStringIO()
+ stream = StringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
- self.optimized = optimized
+ self.optimizer: t.Optional[Optimizer] = None
+
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
- self.import_aliases = {}
+ self.import_aliases: t.Dict[str, str] = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
- self.blocks = {}
+ self.blocks: t.Dict[str, nodes.Block] = {}
# the number of extends statements so far
self.extends_so_far = 0
@@ -279,12 +339,12 @@ class CodeGenerator(NodeVisitor):
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
- self.tests = {}
- self.filters = {}
+ self.tests: t.Dict[str, str] = {}
+ self.filters: t.Dict[str, str] = {}
# the debug information
- self.debug_info = []
- self._write_debug_info = None
+ self.debug_info: t.List[t.Tuple[int, int]] = []
+ self._write_debug_info: t.Optional[int] = None
# the number of new lines before the next write()
self._new_lines = 0
@@ -303,75 +363,83 @@ class CodeGenerator(NodeVisitor):
self._indentation = 0
# Tracks toplevel assignments
- self._assign_stack = []
+ self._assign_stack: t.List[t.Set[str]] = []
# Tracks parameter definition blocks
- self._param_def_block = []
+ self._param_def_block: t.List[t.Set[str]] = []
# Tracks the current context.
self._context_reference_stack = ["context"]
+ @property
+ def optimized(self) -> bool:
+ return self.optimizer is not None
+
# -- Various compilation helpers
- def fail(self, msg, lineno):
+ def fail(self, msg: str, lineno: int) -> "te.NoReturn":
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
- def temporary_identifier(self):
+ def temporary_identifier(self) -> str:
"""Get a new unique identifier."""
self._last_identifier += 1
- return "t_%d" % self._last_identifier
+ return f"t_{self._last_identifier}"
- def buffer(self, frame):
+ def buffer(self, frame: Frame) -> None:
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
- self.writeline("%s = []" % frame.buffer)
+ self.writeline(f"{frame.buffer} = []")
- def return_buffer_contents(self, frame, force_unescaped=False):
+ def return_buffer_contents(
+ self, frame: Frame, force_unescaped: bool = False
+ ) -> None:
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline("if context.eval_ctx.autoescape:")
self.indent()
- self.writeline("return Markup(concat(%s))" % frame.buffer)
+ self.writeline(f"return Markup(concat({frame.buffer}))")
self.outdent()
self.writeline("else:")
self.indent()
- self.writeline("return concat(%s)" % frame.buffer)
+ self.writeline(f"return concat({frame.buffer})")
self.outdent()
return
elif frame.eval_ctx.autoescape:
- self.writeline("return Markup(concat(%s))" % frame.buffer)
+ self.writeline(f"return Markup(concat({frame.buffer}))")
return
- self.writeline("return concat(%s)" % frame.buffer)
+ self.writeline(f"return concat({frame.buffer})")
- def indent(self):
+ def indent(self) -> None:
"""Indent by one."""
self._indentation += 1
- def outdent(self, step=1):
+ def outdent(self, step: int = 1) -> None:
"""Outdent by step."""
self._indentation -= step
- def start_write(self, frame, node=None):
+ def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None:
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline("yield ", node)
else:
- self.writeline("%s.append(" % frame.buffer, node)
+ self.writeline(f"{frame.buffer}.append(", node)
- def end_write(self, frame):
+ def end_write(self, frame: Frame) -> None:
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(")")
- def simple_write(self, s, frame, node=None):
+ def simple_write(
+ self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None
+ ) -> None:
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
- def blockvisit(self, nodes, frame):
+ def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None:
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
@@ -382,7 +450,7 @@ class CodeGenerator(NodeVisitor):
except CompilerExit:
pass
- def write(self, x):
+ def write(self, x: str) -> None:
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
@@ -396,19 +464,26 @@ class CodeGenerator(NodeVisitor):
self._new_lines = 0
self.stream.write(x)
- def writeline(self, x, node=None, extra=0):
+ def writeline(
+ self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0
+ ) -> None:
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
- def newline(self, node=None, extra=0):
+ def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None:
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
- def signature(self, node, frame, extra_kwargs=None):
+ def signature(
+ self,
+ node: t.Union[nodes.Call, nodes.Filter, nodes.Test],
+ frame: Frame,
+ extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> None:
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
@@ -417,11 +492,10 @@ class CodeGenerator(NodeVisitor):
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
- kwarg_workaround = False
- for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
- if is_python_keyword(kwarg):
- kwarg_workaround = True
- break
+ kwarg_workaround = any(
+ is_python_keyword(t.cast(str, k))
+ for k in chain((x.key for x in node.kwargs), extra_kwargs or ())
+ )
for arg in node.args:
self.write(", ")
@@ -432,8 +506,8 @@ class CodeGenerator(NodeVisitor):
self.write(", ")
self.visit(kwarg, frame)
if extra_kwargs is not None:
- for key, value in iteritems(extra_kwargs):
- self.write(", %s=%s" % (key, value))
+ for key, value in extra_kwargs.items():
+ self.write(f", {key}={value}")
if node.dyn_args:
self.write(", *")
self.visit(node.dyn_args, frame)
@@ -444,12 +518,12 @@ class CodeGenerator(NodeVisitor):
else:
self.write(", **{")
for kwarg in node.kwargs:
- self.write("%r: " % kwarg.key)
+ self.write(f"{kwarg.key!r}: ")
self.visit(kwarg.value, frame)
self.write(", ")
if extra_kwargs is not None:
- for key, value in iteritems(extra_kwargs):
- self.write("%r: %s, " % (key, value))
+ for key, value in extra_kwargs.items():
+ self.write(f"{key!r}: {value}, ")
if node.dyn_kwargs is not None:
self.write("}, **")
self.visit(node.dyn_kwargs, frame)
@@ -461,50 +535,82 @@ class CodeGenerator(NodeVisitor):
self.write(", **")
self.visit(node.dyn_kwargs, frame)
- def pull_dependencies(self, nodes):
- """Pull all the dependencies."""
+ def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None:
+ """Find all filter and test names used in the template and
+ assign them to variables in the compiled namespace. Checking
+ that the names are registered with the environment is done when
+ compiling the Filter and Test nodes. If the node is in an If or
+ CondExpr node, the check is done at runtime instead.
+
+ .. versionchanged:: 3.0
+ Filters and tests in If and CondExpr nodes are checked at
+ runtime instead of compile time.
+ """
visitor = DependencyFinderVisitor()
+
for node in nodes:
visitor.visit(node)
- for dependency in "filters", "tests":
- mapping = getattr(self, dependency)
- for name in sorted(getattr(visitor, dependency)):
- if name not in mapping:
- mapping[name] = self.temporary_identifier()
+
+ for id_map, names, dependency in (self.filters, visitor.filters, "filters"), (
+ self.tests,
+ visitor.tests,
+ "tests",
+ ):
+ for name in sorted(names):
+ if name not in id_map:
+ id_map[name] = self.temporary_identifier()
+
+ # add check during runtime that dependencies used inside of executed
+ # blocks are defined, as this step may be skipped during compile time
+ self.writeline("try:")
+ self.indent()
+ self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]")
+ self.outdent()
+ self.writeline("except KeyError:")
+ self.indent()
+ self.writeline("@internalcode")
+ self.writeline(f"def {id_map[name]}(*unused):")
+ self.indent()
self.writeline(
- "%s = environment.%s[%r]" % (mapping[name], dependency, name)
+ f'raise TemplateRuntimeError("No {dependency[:-1]}'
+ f' named {name!r} found.")'
)
+ self.outdent()
+ self.outdent()
- def enter_frame(self, frame):
+ def enter_frame(self, frame: Frame) -> None:
undefs = []
- for target, (action, param) in iteritems(frame.symbols.loads):
+ for target, (action, param) in frame.symbols.loads.items():
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
- self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
+ self.writeline(f"{target} = {self.get_resolve_func()}({param!r})")
elif action == VAR_LOAD_ALIAS:
- self.writeline("%s = %s" % (target, param))
+ self.writeline(f"{target} = {param}")
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError("unknown load instruction")
if undefs:
- self.writeline("%s = missing" % " = ".join(undefs))
+ self.writeline(f"{' = '.join(undefs)} = missing")
- def leave_frame(self, frame, with_python_scope=False):
+ def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None:
if not with_python_scope:
undefs = []
- for target, _ in iteritems(frame.symbols.loads):
+ for target in frame.symbols.loads:
undefs.append(target)
if undefs:
- self.writeline("%s = missing" % " = ".join(undefs))
+ self.writeline(f"{' = '.join(undefs)} = missing")
- def func(self, name):
- if self.environment.is_async:
- return "async def %s" % name
- return "def %s" % name
+ def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str:
+ return async_value if self.environment.is_async else sync_value
- def macro_body(self, node, frame):
+ def func(self, name: str) -> str:
+ return f"{self.choose_async()}def {name}"
+
+ def macro_body(
+ self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame
+ ) -> t.Tuple[Frame, MacroRef]:
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
@@ -513,6 +619,7 @@ class CodeGenerator(NodeVisitor):
explicit_caller = None
skip_special_params = set()
args = []
+
for idx, arg in enumerate(node.args):
if arg.name == "caller":
explicit_caller = idx
@@ -552,7 +659,7 @@ class CodeGenerator(NodeVisitor):
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
- self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
+ self.writeline(f"{self.func('macro')}({', '.join(args)}):", node)
self.indent()
self.buffer(frame)
@@ -561,17 +668,17 @@ class CodeGenerator(NodeVisitor):
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
- self.writeline("if %s is missing:" % ref)
+ self.writeline(f"if {ref} is missing:")
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline(
- "%s = undefined(%r, name=%r)"
- % (ref, "parameter %r was not provided" % arg.name, arg.name)
+ f'{ref} = undefined("parameter {arg.name!r} was not provided",'
+ f" name={arg.name!r})"
)
else:
- self.writeline("%s = " % ref)
+ self.writeline(f"{ref} = ")
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
@@ -584,50 +691,46 @@ class CodeGenerator(NodeVisitor):
return frame, macro_ref
- def macro_def(self, macro_ref, frame):
+ def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None:
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, "name", None)
if len(macro_ref.node.args) == 1:
arg_tuple += ","
self.write(
- "Macro(environment, macro, %r, (%s), %r, %r, %r, "
- "context.eval_ctx.autoescape)"
- % (
- name,
- arg_tuple,
- macro_ref.accesses_kwargs,
- macro_ref.accesses_varargs,
- macro_ref.accesses_caller,
- )
+ f"Macro(environment, macro, {name!r}, ({arg_tuple}),"
+ f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r},"
+ f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)"
)
- def position(self, node):
+ def position(self, node: nodes.Node) -> str:
"""Return a human readable position for the node."""
- rv = "line %d" % node.lineno
+ rv = f"line {node.lineno}"
if self.name is not None:
- rv += " in " + repr(self.name)
+ rv = f"{rv} in {self.name!r}"
return rv
- def dump_local_context(self, frame):
- return "{%s}" % ", ".join(
- "%r: %s" % (name, target)
- for name, target in sorted(iteritems(frame.symbols.dump_stores()))
+ def dump_local_context(self, frame: Frame) -> str:
+ items_kv = ", ".join(
+ f"{name!r}: {target}"
+ for name, target in frame.symbols.dump_stores().items()
)
+ return f"{{{items_kv}}}"
- def write_commons(self):
+ def write_commons(self) -> None:
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline("resolve = context.resolve_or_missing")
self.writeline("undefined = environment.undefined")
+ self.writeline("concat = environment.concat")
# always use the standard Undefined class for the implicit else of
# conditional expressions
self.writeline("cond_expr_undefined = Undefined")
self.writeline("if 0: yield None")
- def push_parameter_definitions(self, frame):
+ def push_parameter_definitions(self, frame: Frame) -> None:
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
@@ -636,97 +739,108 @@ class CodeGenerator(NodeVisitor):
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
- def pop_parameter_definitions(self):
+ def pop_parameter_definitions(self) -> None:
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
- def mark_parameter_stored(self, target):
+ def mark_parameter_stored(self, target: str) -> None:
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
- def push_context_reference(self, target):
+ def push_context_reference(self, target: str) -> None:
self._context_reference_stack.append(target)
- def pop_context_reference(self):
+ def pop_context_reference(self) -> None:
self._context_reference_stack.pop()
- def get_context_ref(self):
+ def get_context_ref(self) -> str:
return self._context_reference_stack[-1]
- def get_resolve_func(self):
+ def get_resolve_func(self) -> str:
target = self._context_reference_stack[-1]
if target == "context":
return "resolve"
- return "%s.resolve" % target
+ return f"{target}.resolve"
- def derive_context(self, frame):
- return "%s.derived(%s)" % (
- self.get_context_ref(),
- self.dump_local_context(frame),
- )
+ def derive_context(self, frame: Frame) -> str:
+ return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})"
- def parameter_is_undeclared(self, target):
+ def parameter_is_undeclared(self, target: str) -> bool:
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
- def push_assign_tracking(self):
+ def push_assign_tracking(self) -> None:
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
- def pop_assign_tracking(self, frame):
+ def pop_assign_tracking(self, frame: Frame) -> None:
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
- if not frame.toplevel or not vars:
+ if (
+ not frame.block_frame
+ and not frame.loop_frame
+ and not frame.toplevel
+ or not vars
+ ):
return
public_names = [x for x in vars if x[:1] != "_"]
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
- self.writeline("context.vars[%r] = %s" % (name, ref))
+ if frame.loop_frame:
+ self.writeline(f"_loop_vars[{name!r}] = {ref}")
+ return
+ if frame.block_frame:
+ self.writeline(f"_block_vars[{name!r}] = {ref}")
+ return
+ self.writeline(f"context.vars[{name!r}] = {ref}")
else:
- self.writeline("context.vars.update({")
+ if frame.loop_frame:
+ self.writeline("_loop_vars.update({")
+ elif frame.block_frame:
+ self.writeline("_block_vars.update({")
+ else:
+ self.writeline("context.vars.update({")
for idx, name in enumerate(vars):
if idx:
self.write(", ")
ref = frame.symbols.ref(name)
- self.write("%r: %s" % (name, ref))
+ self.write(f"{name!r}: {ref}")
self.write("})")
- if public_names:
+ if not frame.block_frame and not frame.loop_frame and public_names:
if len(public_names) == 1:
- self.writeline("context.exported_vars.add(%r)" % public_names[0])
+ self.writeline(f"context.exported_vars.add({public_names[0]!r})")
else:
- self.writeline(
- "context.exported_vars.update((%s))"
- % ", ".join(imap(repr, public_names))
- )
+ names_str = ", ".join(map(repr, public_names))
+ self.writeline(f"context.exported_vars.update(({names_str}))")
# -- Statement Visitors
- def visit_Template(self, node, frame=None):
+ def visit_Template(
+ self, node: nodes.Template, frame: t.Optional[Frame] = None
+ ) -> None:
assert frame is None, "no root frame allowed"
eval_ctx = EvalContext(self.environment, self.name)
- from .runtime import exported
-
- self.writeline("from __future__ import %s" % ", ".join(code_features))
- self.writeline("from jinja2.runtime import " + ", ".join(exported))
+ from .runtime import exported, async_exported
if self.environment.is_async:
- self.writeline(
- "from jinja2.asyncsupport import auto_await, "
- "auto_aiter, AsyncLoopContext"
- )
+ exported_names = sorted(exported + async_exported)
+ else:
+ exported_names = sorted(exported)
+
+ self.writeline("from jinja2.runtime import " + ", ".join(exported_names))
# if we want a deferred initialization we cannot move the
# environment into a local name
- envenv = not self.defer_init and ", environment=environment" or ""
+ envenv = "" if self.defer_init else ", environment=environment"
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
@@ -735,7 +849,7 @@ class CodeGenerator(NodeVisitor):
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
- self.fail("block %r defined twice" % block.name, block.lineno)
+ self.fail(f"block {block.name!r} defined twice", block.lineno)
self.blocks[block.name] = block
# find all imports and import them
@@ -745,16 +859,16 @@ class CodeGenerator(NodeVisitor):
self.import_aliases[imp] = alias = self.temporary_identifier()
if "." in imp:
module, obj = imp.rsplit(".", 1)
- self.writeline("from %s import %s as %s" % (module, obj, alias))
+ self.writeline(f"from {module} import {obj} as {alias}")
else:
- self.writeline("import %s as %s" % (imp, alias))
+ self.writeline(f"import {imp} as {alias}")
# add the load name
- self.writeline("name = %r" % self.name)
+ self.writeline(f"name = {self.name!r}")
# generate the root render function.
self.writeline(
- "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
+ f"{self.func('root')}(context, missing=missing{envenv}):", extra=1
)
self.indent()
self.write_commons()
@@ -763,7 +877,7 @@ class CodeGenerator(NodeVisitor):
frame = Frame(eval_ctx)
if "self" in find_undeclared(node.body, ("self",)):
ref = frame.symbols.declare_parameter("self")
- self.writeline("%s = TemplateReference(context)" % ref)
+ self.writeline(f"{ref} = TemplateReference(context)")
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
@@ -781,13 +895,11 @@ class CodeGenerator(NodeVisitor):
self.indent()
self.writeline("if parent_template is not None:")
self.indent()
- if supports_yield_from and not self.environment.is_async:
+ if not self.environment.is_async:
self.writeline("yield from parent_template.root_render_func(context)")
else:
self.writeline(
- "%sfor event in parent_template."
- "root_render_func(context):"
- % (self.environment.is_async and "async " or "")
+ "async for event in parent_template.root_render_func(context):"
)
self.indent()
self.writeline("yield event")
@@ -795,10 +907,9 @@ class CodeGenerator(NodeVisitor):
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
- for name, block in iteritems(self.blocks):
+ for name, block in self.blocks.items():
self.writeline(
- "%s(context, missing=missing%s):"
- % (self.func("block_" + name), envenv),
+ f"{self.func('block_' + name)}(context, missing=missing{envenv}):",
block,
1,
)
@@ -808,32 +919,29 @@ class CodeGenerator(NodeVisitor):
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
+ block_frame.block_frame = True
undeclared = find_undeclared(block.body, ("self", "super"))
if "self" in undeclared:
ref = block_frame.symbols.declare_parameter("self")
- self.writeline("%s = TemplateReference(context)" % ref)
+ self.writeline(f"{ref} = TemplateReference(context)")
if "super" in undeclared:
ref = block_frame.symbols.declare_parameter("super")
- self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
+ self.writeline(f"{ref} = context.super({name!r}, block_{name})")
block_frame.symbols.analyze_node(block)
block_frame.block = name
+ self.writeline("_block_vars = {}")
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
- self.writeline(
- "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
- extra=1,
- )
+ blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks)
+ self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1)
+ debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info)
+ self.writeline(f"debug_info = {debug_kv_str!r}")
- # add a function that returns the debug info
- self.writeline(
- "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
- )
-
- def visit_Block(self, node, frame):
+ def visit_Block(self, node: nodes.Block, frame: Frame) -> None:
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
@@ -851,18 +959,23 @@ class CodeGenerator(NodeVisitor):
else:
context = self.get_context_ref()
- if (
- supports_yield_from
- and not self.environment.is_async
- and frame.buffer is None
- ):
+ if node.required:
+ self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node)
+ self.indent()
self.writeline(
- "yield from context.blocks[%r][0](%s)" % (node.name, context), node
+ f'raise TemplateRuntimeError("Required block {node.name!r} not found")',
+ node,
+ )
+ self.outdent()
+
+ if not self.environment.is_async and frame.buffer is None:
+ self.writeline(
+ f"yield from context.blocks[{node.name!r}][0]({context})", node
)
else:
- loop = self.environment.is_async and "async for" or "for"
self.writeline(
- "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
+ f"{self.choose_async()}for event in"
+ f" context.blocks[{node.name!r}][0]({context}):",
node,
)
self.indent()
@@ -871,7 +984,7 @@ class CodeGenerator(NodeVisitor):
self.outdent(level)
- def visit_Extends(self, node, frame):
+ def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None:
"""Calls the extender."""
if not frame.toplevel:
self.fail("cannot use extend from a non top-level scope", node.lineno)
@@ -888,7 +1001,7 @@ class CodeGenerator(NodeVisitor):
if not self.has_known_extends:
self.writeline("if parent_template is not None:")
self.indent()
- self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
+ self.writeline('raise TemplateRuntimeError("extended multiple times")')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
@@ -899,10 +1012,8 @@ class CodeGenerator(NodeVisitor):
self.writeline("parent_template = environment.get_template(", node)
self.visit(node.template, frame)
- self.write(", %r)" % self.name)
- self.writeline(
- "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
- )
+ self.write(f", {self.name!r})")
+ self.writeline("for name, parent_block in parent_template.blocks.items():")
self.indent()
self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
self.outdent()
@@ -916,7 +1027,7 @@ class CodeGenerator(NodeVisitor):
# and now we have one more
self.extends_so_far += 1
- def visit_Include(self, node, frame):
+ def visit_Include(self, node: nodes.Include, frame: Frame) -> None:
"""Handles includes."""
if node.ignore_missing:
self.writeline("try:")
@@ -924,16 +1035,16 @@ class CodeGenerator(NodeVisitor):
func_name = "get_or_select_template"
if isinstance(node.template, nodes.Const):
- if isinstance(node.template.value, string_types):
+ if isinstance(node.template.value, str):
func_name = "get_template"
elif isinstance(node.template.value, (tuple, list)):
func_name = "select_template"
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = "select_template"
- self.writeline("template = environment.%s(" % func_name, node)
+ self.writeline(f"template = environment.{func_name}(", node)
self.visit(node.template, frame)
- self.write(", %r)" % self.name)
+ self.write(f", {self.name!r})")
if node.ignore_missing:
self.outdent()
self.writeline("except TemplateNotFound:")
@@ -945,26 +1056,19 @@ class CodeGenerator(NodeVisitor):
skip_event_yield = False
if node.with_context:
- loop = self.environment.is_async and "async for" or "for"
self.writeline(
- "%s event in template.root_render_func("
- "template.new_context(context.get_all(), True, "
- "%s)):" % (loop, self.dump_local_context(frame))
+ f"{self.choose_async()}for event in template.root_render_func("
+ "template.new_context(context.get_all(), True,"
+ f" {self.dump_local_context(frame)})):"
)
elif self.environment.is_async:
self.writeline(
- "for event in (await "
- "template._get_default_module_async())"
+ "for event in (await template._get_default_module_async())"
"._body_stream:"
)
else:
- if supports_yield_from:
- self.writeline("yield from template._get_default_module()._body_stream")
- skip_event_yield = True
- else:
- self.writeline(
- "for event in template._get_default_module()._body_stream:"
- )
+ self.writeline("yield from template._get_default_module()._body_stream")
+ skip_event_yield = True
if not skip_event_yield:
self.indent()
@@ -974,53 +1078,37 @@ class CodeGenerator(NodeVisitor):
if node.ignore_missing:
self.outdent()
- def visit_Import(self, node, frame):
- """Visit regular imports."""
- self.writeline("%s = " % frame.symbols.ref(node.target), node)
- if frame.toplevel:
- self.write("context.vars[%r] = " % node.target)
- if self.environment.is_async:
- self.write("await ")
- self.write("environment.get_template(")
+ def _import_common(
+ self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame
+ ) -> None:
+ self.write(f"{self.choose_async('await ')}environment.get_template(")
self.visit(node.template, frame)
- self.write(", %r)." % self.name)
+ self.write(f", {self.name!r}).")
+
if node.with_context:
+ f_name = f"make_module{self.choose_async('_async')}"
self.write(
- "make_module%s(context.get_all(), True, %s)"
- % (
- self.environment.is_async and "_async" or "",
- self.dump_local_context(frame),
- )
+ f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})"
)
- elif self.environment.is_async:
- self.write("_get_default_module_async()")
else:
- self.write("_get_default_module()")
+ self.write(f"_get_default_module{self.choose_async('_async')}(context)")
+
+ def visit_Import(self, node: nodes.Import, frame: Frame) -> None:
+ """Visit regular imports."""
+ self.writeline(f"{frame.symbols.ref(node.target)} = ", node)
+ if frame.toplevel:
+ self.write(f"context.vars[{node.target!r}] = ")
+
+ self._import_common(node, frame)
+
if frame.toplevel and not node.target.startswith("_"):
- self.writeline("context.exported_vars.discard(%r)" % node.target)
+ self.writeline(f"context.exported_vars.discard({node.target!r})")
- def visit_FromImport(self, node, frame):
+ def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None:
"""Visit named imports."""
self.newline(node)
- self.write(
- "included_template = %senvironment.get_template("
- % (self.environment.is_async and "await " or "")
- )
- self.visit(node.template, frame)
- self.write(", %r)." % self.name)
- if node.with_context:
- self.write(
- "make_module%s(context.get_all(), True, %s)"
- % (
- self.environment.is_async and "_async" or "",
- self.dump_local_context(frame),
- )
- )
- elif self.environment.is_async:
- self.write("_get_default_module_async()")
- else:
- self.write("_get_default_module()")
-
+ self.write("included_template = ")
+ self._import_common(node, frame)
var_names = []
discarded_names = []
for name in node.names:
@@ -1029,22 +1117,18 @@ class CodeGenerator(NodeVisitor):
else:
alias = name
self.writeline(
- "%s = getattr(included_template, "
- "%r, missing)" % (frame.symbols.ref(alias), name)
+ f"{frame.symbols.ref(alias)} ="
+ f" getattr(included_template, {name!r}, missing)"
)
- self.writeline("if %s is missing:" % frame.symbols.ref(alias))
+ self.writeline(f"if {frame.symbols.ref(alias)} is missing:")
self.indent()
+ message = (
+ "the template {included_template.__name__!r}"
+ f" (imported on {self.position(node)})"
+ f" does not export the requested name {name!r}"
+ )
self.writeline(
- "%s = undefined(%r %% "
- "included_template.__name__, "
- "name=%r)"
- % (
- frame.symbols.ref(alias),
- "the template %%r (imported on %s) does "
- "not export the requested name %s"
- % (self.position(node), repr(name)),
- name,
- )
+ f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})"
)
self.outdent()
if frame.toplevel:
@@ -1055,35 +1139,35 @@ class CodeGenerator(NodeVisitor):
if var_names:
if len(var_names) == 1:
name = var_names[0]
- self.writeline(
- "context.vars[%r] = %s" % (name, frame.symbols.ref(name))
- )
+ self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}")
else:
- self.writeline(
- "context.vars.update({%s})"
- % ", ".join(
- "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
- )
+ names_kv = ", ".join(
+ f"{name!r}: {frame.symbols.ref(name)}" for name in var_names
)
+ self.writeline(f"context.vars.update({{{names_kv}}})")
if discarded_names:
if len(discarded_names) == 1:
- self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
+ self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})")
else:
+ names_str = ", ".join(map(repr, discarded_names))
self.writeline(
- "context.exported_vars.difference_"
- "update((%s))" % ", ".join(imap(repr, discarded_names))
+ f"context.exported_vars.difference_update(({names_str}))"
)
- def visit_For(self, node, frame):
+ def visit_For(self, node: nodes.For, frame: Frame) -> None:
loop_frame = frame.inner()
+ loop_frame.loop_frame = True
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
- # variable is accessed in the body.
- extended_loop = node.recursive or "loop" in find_undeclared(
- node.iter_child_nodes(only=("body",)), ("loop",)
+ # variable is accessed in the body if the body is a scoped block.
+ extended_loop = (
+ node.recursive
+ or "loop"
+ in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",))
+ or any(block.scoped for block in node.find_all(nodes.Block))
)
loop_ref = None
@@ -1097,13 +1181,13 @@ class CodeGenerator(NodeVisitor):
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch="test")
- self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
+ self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test)
self.indent()
self.enter_frame(test_frame)
- self.writeline(self.environment.is_async and "async for " or "for ")
+ self.writeline(self.choose_async("async for ", "for "))
self.visit(node.target, loop_frame)
self.write(" in ")
- self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
+ self.write(self.choose_async("auto_aiter(fiter)", "fiter"))
self.write(":")
self.indent()
self.writeline("if ", node.test)
@@ -1120,7 +1204,7 @@ class CodeGenerator(NodeVisitor):
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline(
- "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
+ f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node
)
self.indent()
self.buffer(loop_frame)
@@ -1131,7 +1215,7 @@ class CodeGenerator(NodeVisitor):
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
- self.writeline("%s = missing" % loop_ref)
+ self.writeline(f"{loop_ref} = missing")
for name in node.find_all(nodes.Name):
if name.ctx == "store" and name.name == "loop":
@@ -1142,20 +1226,17 @@ class CodeGenerator(NodeVisitor):
if node.else_:
iteration_indicator = self.temporary_identifier()
- self.writeline("%s = 1" % iteration_indicator)
+ self.writeline(f"{iteration_indicator} = 1")
- self.writeline(self.environment.is_async and "async for " or "for ", node)
+ self.writeline(self.choose_async("async for ", "for "), node)
self.visit(node.target, loop_frame)
if extended_loop:
- if self.environment.is_async:
- self.write(", %s in AsyncLoopContext(" % loop_ref)
- else:
- self.write(", %s in LoopContext(" % loop_ref)
+ self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(")
else:
self.write(" in ")
if node.test:
- self.write("%s(" % loop_filter_func)
+ self.write(f"{loop_filter_func}(")
if node.recursive:
self.write("reciter")
else:
@@ -1170,21 +1251,22 @@ class CodeGenerator(NodeVisitor):
if node.recursive:
self.write(", undefined, loop_render_func, depth):")
else:
- self.write(extended_loop and ", undefined):" or ":")
+ self.write(", undefined):" if extended_loop else ":")
self.indent()
self.enter_frame(loop_frame)
+ self.writeline("_loop_vars = {}")
self.blockvisit(node.body, loop_frame)
if node.else_:
- self.writeline("%s = 0" % iteration_indicator)
+ self.writeline(f"{iteration_indicator} = 0")
self.outdent()
self.leave_frame(
loop_frame, with_python_scope=node.recursive and not node.else_
)
if node.else_:
- self.writeline("if %s:" % iteration_indicator)
+ self.writeline(f"if {iteration_indicator}:")
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
@@ -1197,9 +1279,7 @@ class CodeGenerator(NodeVisitor):
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
- if self.environment.is_async:
- self.write("await ")
- self.write("loop(")
+ self.write(f"{self.choose_async('await ')}loop(")
if self.environment.is_async:
self.write("auto_aiter(")
self.visit(node.iter, frame)
@@ -1208,7 +1288,12 @@ class CodeGenerator(NodeVisitor):
self.write(", loop)")
self.end_write(frame)
- def visit_If(self, node, frame):
+ # at the end of the iteration, clear any assignments made in the
+ # loop from the top level
+ if self._assign_stack:
+ self._assign_stack[-1].difference_update(loop_frame.symbols.stores)
+
+ def visit_If(self, node: nodes.If, frame: Frame) -> None:
if_frame = frame.soft()
self.writeline("if ", node)
self.visit(node.test, if_frame)
@@ -1229,17 +1314,17 @@ class CodeGenerator(NodeVisitor):
self.blockvisit(node.else_, if_frame)
self.outdent()
- def visit_Macro(self, node, frame):
+ def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None:
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith("_"):
- self.write("context.exported_vars.add(%r)" % node.name)
- self.writeline("context.vars[%r] = " % node.name)
- self.write("%s = " % frame.symbols.ref(node.name))
+ self.write(f"context.exported_vars.add({node.name!r})")
+ self.writeline(f"context.vars[{node.name!r}] = ")
+ self.write(f"{frame.symbols.ref(node.name)} = ")
self.macro_def(macro_ref, macro_frame)
- def visit_CallBlock(self, node, frame):
+ def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None:
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline("caller = ")
self.macro_def(macro_ref, call_frame)
@@ -1247,7 +1332,7 @@ class CodeGenerator(NodeVisitor):
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
- def visit_FilterBlock(self, node, frame):
+ def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None:
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
@@ -1258,11 +1343,11 @@ class CodeGenerator(NodeVisitor):
self.end_write(frame)
self.leave_frame(filter_frame)
- def visit_With(self, node, frame):
+ def visit_With(self, node: nodes.With, frame: Frame) -> None:
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
- for target, expr in izip(node.targets, node.values):
+ for target, expr in zip(node.targets, node.values):
self.newline()
self.visit(target, with_frame)
self.write(" = ")
@@ -1270,18 +1355,25 @@ class CodeGenerator(NodeVisitor):
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
- def visit_ExprStmt(self, node, frame):
+ def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None:
self.newline(node)
self.visit(node.node, frame)
- _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
- #: The default finalize function if the environment isn't configured
- #: with one. Or if the environment has one, this is called on that
- #: function's output for constants.
- _default_finalize = text_type
- _finalize = None
+ class _FinalizeInfo(t.NamedTuple):
+ const: t.Optional[t.Callable[..., str]]
+ src: t.Optional[str]
- def _make_finalize(self):
+ @staticmethod
+ def _default_finalize(value: t.Any) -> t.Any:
+ """The default finalize function if the environment isn't
+ configured with one. Or, if the environment has one, this is
+ called on that function's output for constants.
+ """
+ return str(value)
+
+ _finalize: t.Optional[_FinalizeInfo] = None
+
+ def _make_finalize(self) -> _FinalizeInfo:
"""Build the finalize function to be used on constants and at
runtime. Cached so it's only created once for all output nodes.
@@ -1297,39 +1389,48 @@ class CodeGenerator(NodeVisitor):
if self._finalize is not None:
return self._finalize
+ finalize: t.Optional[t.Callable[..., t.Any]]
finalize = default = self._default_finalize
src = None
if self.environment.finalize:
src = "environment.finalize("
env_finalize = self.environment.finalize
+ pass_arg = {
+ _PassArg.context: "context",
+ _PassArg.eval_context: "context.eval_ctx",
+ _PassArg.environment: "environment",
+ }.get(
+ _PassArg.from_obj(env_finalize) # type: ignore
+ )
+ finalize = None
+
+ if pass_arg is None:
+
+ def finalize(value: t.Any) -> t.Any:
+ return default(env_finalize(value))
- def finalize(value):
- return default(env_finalize(value))
+ else:
+ src = f"{src}{pass_arg}, "
- if getattr(env_finalize, "contextfunction", False) is True:
- src += "context, "
- finalize = None # noqa: F811
- elif getattr(env_finalize, "evalcontextfunction", False) is True:
- src += "context.eval_ctx, "
- finalize = None
- elif getattr(env_finalize, "environmentfunction", False) is True:
- src += "environment, "
+ if pass_arg == "environment":
- def finalize(value):
- return default(env_finalize(self.environment, value))
+ def finalize(value: t.Any) -> t.Any:
+ return default(env_finalize(self.environment, value))
self._finalize = self._FinalizeInfo(finalize, src)
return self._finalize
- def _output_const_repr(self, group):
+ def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
"""Given a group of constant values converted from ``Output``
child nodes, produce a string to write to the template module
source.
"""
return repr(concat(group))
- def _output_child_to_const(self, node, frame, finalize):
+ def _output_child_to_const(
+ self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
+ ) -> str:
"""Try to optimize a child of an ``Output`` node by trying to
convert it to constant, finalized data at compile time.
@@ -1344,25 +1445,29 @@ class CodeGenerator(NodeVisitor):
# Template data doesn't go through finalize.
if isinstance(node, nodes.TemplateData):
- return text_type(const)
+ return str(const)
- return finalize.const(const)
+ return finalize.const(const) # type: ignore
- def _output_child_pre(self, node, frame, finalize):
+ def _output_child_pre(
+ self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
+ ) -> None:
"""Output extra source code before visiting a child of an
``Output`` node.
"""
if frame.eval_ctx.volatile:
- self.write("(escape if context.eval_ctx.autoescape else to_string)(")
+ self.write("(escape if context.eval_ctx.autoescape else str)(")
elif frame.eval_ctx.autoescape:
self.write("escape(")
else:
- self.write("to_string(")
+ self.write("str(")
if finalize.src is not None:
self.write(finalize.src)
- def _output_child_post(self, node, frame, finalize):
+ def _output_child_post(
+ self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
+ ) -> None:
"""Output extra source code after visiting a child of an
``Output`` node.
"""
@@ -1371,7 +1476,7 @@ class CodeGenerator(NodeVisitor):
if finalize.src is not None:
self.write(")")
- def visit_Output(self, node, frame):
+ def visit_Output(self, node: nodes.Output, frame: Frame) -> None:
# If an extends is active, don't render outside a block.
if frame.require_output_check:
# A top-level extends is known to exist at compile time.
@@ -1382,7 +1487,7 @@ class CodeGenerator(NodeVisitor):
self.indent()
finalize = self._make_finalize()
- body = []
+ body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = []
# Evaluate constants at compile time if possible. Each item in
# body will be either a list of static data or a node to be
@@ -1414,9 +1519,9 @@ class CodeGenerator(NodeVisitor):
if frame.buffer is not None:
if len(body) == 1:
- self.writeline("%s.append(" % frame.buffer)
+ self.writeline(f"{frame.buffer}.append(")
else:
- self.writeline("%s.extend((" % frame.buffer)
+ self.writeline(f"{frame.buffer}.extend((")
self.indent()
@@ -1450,7 +1555,7 @@ class CodeGenerator(NodeVisitor):
if frame.require_output_check:
self.outdent()
- def visit_Assign(self, node, frame):
+ def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None:
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
@@ -1458,7 +1563,7 @@ class CodeGenerator(NodeVisitor):
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
- def visit_AssignBlock(self, node, frame):
+ def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None:
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
@@ -1475,15 +1580,17 @@ class CodeGenerator(NodeVisitor):
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
- self.write("concat(%s)" % block_frame.buffer)
+ self.write(f"concat({block_frame.buffer})")
self.write(")")
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
- def visit_Name(self, node, frame):
- if node.ctx == "store" and frame.toplevel:
+ def visit_Name(self, node: nodes.Name, frame: Frame) -> None:
+ if node.ctx == "store" and (
+ frame.toplevel or frame.loop_frame or frame.block_frame
+ ):
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
@@ -1499,52 +1606,51 @@ class CodeGenerator(NodeVisitor):
and not self.parameter_is_undeclared(ref)
):
self.write(
- "(undefined(name=%r) if %s is missing else %s)"
- % (node.name, ref, ref)
+ f"(undefined(name={node.name!r}) if {ref} is missing else {ref})"
)
return
self.write(ref)
- def visit_NSRef(self, node, frame):
+ def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None:
# NSRefs can only be used to store values; since they use the normal
# `foo.bar` notation they will be parsed as a normal attribute access
# when used anywhere but in a `set` context
ref = frame.symbols.ref(node.name)
- self.writeline("if not isinstance(%s, Namespace):" % ref)
+ self.writeline(f"if not isinstance({ref}, Namespace):")
self.indent()
self.writeline(
- "raise TemplateRuntimeError(%r)"
- % "cannot assign attribute on non-namespace object"
+ "raise TemplateRuntimeError"
+ '("cannot assign attribute on non-namespace object")'
)
self.outdent()
- self.writeline("%s[%r]" % (ref, node.attr))
+ self.writeline(f"{ref}[{node.attr!r}]")
- def visit_Const(self, node, frame):
+ def visit_Const(self, node: nodes.Const, frame: Frame) -> None:
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
- def visit_TemplateData(self, node, frame):
+ def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None:
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write(
- "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
+ f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})"
)
- def visit_Tuple(self, node, frame):
+ def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None:
self.write("(")
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
- self.write(idx == 0 and ",)" or ")")
+ self.write(",)" if idx == 0 else ")")
- def visit_List(self, node, frame):
+ def visit_List(self, node: nodes.List, frame: Frame) -> None:
self.write("[")
for idx, item in enumerate(node.items):
if idx:
@@ -1552,7 +1658,7 @@ class CodeGenerator(NodeVisitor):
self.visit(item, frame)
self.write("]")
- def visit_Dict(self, node, frame):
+ def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None:
self.write("{")
for idx, item in enumerate(node.items):
if idx:
@@ -1562,96 +1668,59 @@ class CodeGenerator(NodeVisitor):
self.visit(item.value, frame)
self.write("}")
- def binop(operator, interceptable=True): # noqa: B902
- @optimizeconst
- def visitor(self, node, frame):
- if (
- self.environment.sandboxed
- and operator in self.environment.intercepted_binops
- ):
- self.write("environment.call_binop(context, %r, " % operator)
- self.visit(node.left, frame)
- self.write(", ")
- self.visit(node.right, frame)
- else:
- self.write("(")
- self.visit(node.left, frame)
- self.write(" %s " % operator)
- self.visit(node.right, frame)
- self.write(")")
-
- return visitor
-
- def uaop(operator, interceptable=True): # noqa: B902
- @optimizeconst
- def visitor(self, node, frame):
- if (
- self.environment.sandboxed
- and operator in self.environment.intercepted_unops
- ):
- self.write("environment.call_unop(context, %r, " % operator)
- self.visit(node.node, frame)
- else:
- self.write("(" + operator)
- self.visit(node.node, frame)
- self.write(")")
-
- return visitor
-
- visit_Add = binop("+")
- visit_Sub = binop("-")
- visit_Mul = binop("*")
- visit_Div = binop("/")
- visit_FloorDiv = binop("//")
- visit_Pow = binop("**")
- visit_Mod = binop("%")
- visit_And = binop("and", interceptable=False)
- visit_Or = binop("or", interceptable=False)
- visit_Pos = uaop("+")
- visit_Neg = uaop("-")
- visit_Not = uaop("not ", interceptable=False)
- del binop, uaop
+ visit_Add = _make_binop("+")
+ visit_Sub = _make_binop("-")
+ visit_Mul = _make_binop("*")
+ visit_Div = _make_binop("/")
+ visit_FloorDiv = _make_binop("//")
+ visit_Pow = _make_binop("**")
+ visit_Mod = _make_binop("%")
+ visit_And = _make_binop("and")
+ visit_Or = _make_binop("or")
+ visit_Pos = _make_unop("+")
+ visit_Neg = _make_unop("-")
+ visit_Not = _make_unop("not ")
@optimizeconst
- def visit_Concat(self, node, frame):
+ def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None:
if frame.eval_ctx.volatile:
- func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
+ func_name = "(markup_join if context.eval_ctx.volatile else str_join)"
elif frame.eval_ctx.autoescape:
func_name = "markup_join"
else:
- func_name = "unicode_join"
- self.write("%s((" % func_name)
+ func_name = "str_join"
+ self.write(f"{func_name}((")
for arg in node.nodes:
self.visit(arg, frame)
self.write(", ")
self.write("))")
@optimizeconst
- def visit_Compare(self, node, frame):
+ def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None:
self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
self.write(")")
- def visit_Operand(self, node, frame):
- self.write(" %s " % operators[node.op])
+ def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None:
+ self.write(f" {operators[node.op]} ")
self.visit(node.expr, frame)
@optimizeconst
- def visit_Getattr(self, node, frame):
+ def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None:
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getattr(")
self.visit(node.node, frame)
- self.write(", %r)" % node.attr)
+ self.write(f", {node.attr!r})")
if self.environment.is_async:
self.write("))")
@optimizeconst
- def visit_Getitem(self, node, frame):
+ def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None:
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
@@ -1671,7 +1740,7 @@ class CodeGenerator(NodeVisitor):
if self.environment.is_async:
self.write("))")
- def visit_Slice(self, node, frame):
+ def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None:
if node.start is not None:
self.visit(node.start, frame)
self.write(":")
@@ -1681,60 +1750,83 @@ class CodeGenerator(NodeVisitor):
self.write(":")
self.visit(node.step, frame)
- @optimizeconst
- def visit_Filter(self, node, frame):
+ @contextmanager
+ def _filter_test_common(
+ self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool
+ ) -> t.Iterator[None]:
if self.environment.is_async:
- self.write("await auto_await(")
- self.write(self.filters[node.name] + "(")
- func = self.environment.filters.get(node.name)
- if func is None:
- self.fail("no filter named %r" % node.name, node.lineno)
- if getattr(func, "contextfilter", False) is True:
- self.write("context, ")
- elif getattr(func, "evalcontextfilter", False) is True:
- self.write("context.eval_ctx, ")
- elif getattr(func, "environmentfilter", False) is True:
- self.write("environment, ")
-
- # if the filter node is None we are inside a filter block
- # and want to write to the current buffer
- if node.node is not None:
- self.visit(node.node, frame)
- elif frame.eval_ctx.volatile:
- self.write(
- "(context.eval_ctx.autoescape and"
- " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
- )
- elif frame.eval_ctx.autoescape:
- self.write("Markup(concat(%s))" % frame.buffer)
+ self.write("(await auto_await(")
+
+ if is_filter:
+ self.write(f"{self.filters[node.name]}(")
+ func = self.environment.filters.get(node.name)
else:
- self.write("concat(%s)" % frame.buffer)
+ self.write(f"{self.tests[node.name]}(")
+ func = self.environment.tests.get(node.name)
+
+ # When inside an If or CondExpr frame, allow the filter to be
+ # undefined at compile time and only raise an error if it's
+ # actually called at runtime. See pull_dependencies.
+ if func is None and not frame.soft_frame:
+ type_name = "filter" if is_filter else "test"
+ self.fail(f"No {type_name} named {node.name!r}.", node.lineno)
+
+ pass_arg = {
+ _PassArg.context: "context",
+ _PassArg.eval_context: "context.eval_ctx",
+ _PassArg.environment: "environment",
+ }.get(
+ _PassArg.from_obj(func) # type: ignore
+ )
+
+ if pass_arg is not None:
+ self.write(f"{pass_arg}, ")
+
+ # Back to the visitor function to handle visiting the target of
+ # the filter or test.
+ yield
+
self.signature(node, frame)
self.write(")")
+
if self.environment.is_async:
- self.write(")")
+ self.write("))")
@optimizeconst
- def visit_Test(self, node, frame):
- self.write(self.tests[node.name] + "(")
- if node.name not in self.environment.tests:
- self.fail("no test named %r" % node.name, node.lineno)
- self.visit(node.node, frame)
- self.signature(node, frame)
- self.write(")")
+ def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None:
+ with self._filter_test_common(node, frame, True):
+ # if the filter node is None we are inside a filter block
+ # and want to write to the current buffer
+ if node.node is not None:
+ self.visit(node.node, frame)
+ elif frame.eval_ctx.volatile:
+ self.write(
+ f"(Markup(concat({frame.buffer}))"
+ f" if context.eval_ctx.autoescape else concat({frame.buffer}))"
+ )
+ elif frame.eval_ctx.autoescape:
+ self.write(f"Markup(concat({frame.buffer}))")
+ else:
+ self.write(f"concat({frame.buffer})")
+
+ @optimizeconst
+ def visit_Test(self, node: nodes.Test, frame: Frame) -> None:
+ with self._filter_test_common(node, frame, False):
+ self.visit(node.node, frame)
@optimizeconst
- def visit_CondExpr(self, node, frame):
- def write_expr2():
+ def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None:
+ frame = frame.soft()
+
+ def write_expr2() -> None:
if node.expr2 is not None:
- return self.visit(node.expr2, frame)
+ self.visit(node.expr2, frame)
+ return
+
self.write(
- "cond_expr_undefined(%r)"
- % (
- "the inline if-"
- "expression on %s evaluated to false and "
- "no else section was defined." % self.position(node)
- )
+ f'cond_expr_undefined("the inline if-expression on'
+ f" {self.position(node)} evaluated to false and no else"
+ f' section was defined.")'
)
self.write("(")
@@ -1746,71 +1838,89 @@ class CodeGenerator(NodeVisitor):
self.write(")")
@optimizeconst
- def visit_Call(self, node, frame, forward_caller=False):
+ def visit_Call(
+ self, node: nodes.Call, frame: Frame, forward_caller: bool = False
+ ) -> None:
if self.environment.is_async:
- self.write("await auto_await(")
+ self.write("(await auto_await(")
if self.environment.sandboxed:
self.write("environment.call(context, ")
else:
self.write("context.call(")
self.visit(node.node, frame)
- extra_kwargs = forward_caller and {"caller": "caller"} or None
+ extra_kwargs = {"caller": "caller"} if forward_caller else None
+ loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {}
+ block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {}
+ if extra_kwargs:
+ extra_kwargs.update(loop_kwargs, **block_kwargs)
+ elif loop_kwargs or block_kwargs:
+ extra_kwargs = dict(loop_kwargs, **block_kwargs)
self.signature(node, frame, extra_kwargs)
self.write(")")
if self.environment.is_async:
- self.write(")")
+ self.write("))")
- def visit_Keyword(self, node, frame):
+ def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None:
self.write(node.key + "=")
self.visit(node.value, frame)
# -- Unused nodes for extensions
- def visit_MarkSafe(self, node, frame):
+ def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None:
self.write("Markup(")
self.visit(node.expr, frame)
self.write(")")
- def visit_MarkSafeIfAutoescape(self, node, frame):
- self.write("(context.eval_ctx.autoescape and Markup or identity)(")
+ def visit_MarkSafeIfAutoescape(
+ self, node: nodes.MarkSafeIfAutoescape, frame: Frame
+ ) -> None:
+ self.write("(Markup if context.eval_ctx.autoescape else identity)(")
self.visit(node.expr, frame)
self.write(")")
- def visit_EnvironmentAttribute(self, node, frame):
+ def visit_EnvironmentAttribute(
+ self, node: nodes.EnvironmentAttribute, frame: Frame
+ ) -> None:
self.write("environment." + node.name)
- def visit_ExtensionAttribute(self, node, frame):
- self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
+ def visit_ExtensionAttribute(
+ self, node: nodes.ExtensionAttribute, frame: Frame
+ ) -> None:
+ self.write(f"environment.extensions[{node.identifier!r}].{node.name}")
- def visit_ImportedName(self, node, frame):
+ def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None:
self.write(self.import_aliases[node.importname])
- def visit_InternalName(self, node, frame):
+ def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None:
self.write(node.name)
- def visit_ContextReference(self, node, frame):
+ def visit_ContextReference(
+ self, node: nodes.ContextReference, frame: Frame
+ ) -> None:
self.write("context")
- def visit_DerivedContextReference(self, node, frame):
+ def visit_DerivedContextReference(
+ self, node: nodes.DerivedContextReference, frame: Frame
+ ) -> None:
self.write(self.derive_context(frame))
- def visit_Continue(self, node, frame):
+ def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None:
self.writeline("continue", node)
- def visit_Break(self, node, frame):
+ def visit_Break(self, node: nodes.Break, frame: Frame) -> None:
self.writeline("break", node)
- def visit_Scope(self, node, frame):
+ def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None:
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
- def visit_OverlayScope(self, node, frame):
+ def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None:
ctx = self.temporary_identifier()
- self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
- self.writeline("%s.vars = " % ctx)
+ self.writeline(f"{ctx} = {self.derive_context(frame)}")
+ self.writeline(f"{ctx}.vars = ")
self.visit(node.context, frame)
self.push_context_reference(ctx)
@@ -1821,9 +1931,11 @@ class CodeGenerator(NodeVisitor):
self.leave_frame(scope_frame)
self.pop_context_reference()
- def visit_EvalContextModifier(self, node, frame):
+ def visit_EvalContextModifier(
+ self, node: nodes.EvalContextModifier, frame: Frame
+ ) -> None:
for keyword in node.options:
- self.writeline("context.eval_ctx.%s = " % keyword.key)
+ self.writeline(f"context.eval_ctx.{keyword.key} = ")
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
@@ -1832,12 +1944,14 @@ class CodeGenerator(NodeVisitor):
else:
setattr(frame.eval_ctx, keyword.key, val)
- def visit_ScopedEvalContextModifier(self, node, frame):
+ def visit_ScopedEvalContextModifier(
+ self, node: nodes.ScopedEvalContextModifier, frame: Frame
+ ) -> None:
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
- self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
+ self.writeline(f"{old_ctx_name} = context.eval_ctx.save()")
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
- self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
+ self.writeline(f"context.eval_ctx.revert({old_ctx_name})")
diff --git a/deps/v8/third_party/jinja2/constants.py b/deps/v8/third_party/jinja2/constants.py
index bf7f2ca721..41a1c23b0a 100644
--- a/deps/v8/third_party/jinja2/constants.py
+++ b/deps/v8/third_party/jinja2/constants.py
@@ -1,6 +1,5 @@
-# -*- coding: utf-8 -*-
#: list of lorem ipsum words used by the lipsum() helper function
-LOREM_IPSUM_WORDS = u"""\
+LOREM_IPSUM_WORDS = """\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diff --git a/deps/v8/third_party/jinja2/debug.py b/deps/v8/third_party/jinja2/debug.py
index 5d8aec31d0..7ed7e9297e 100644
--- a/deps/v8/third_party/jinja2/debug.py
+++ b/deps/v8/third_party/jinja2/debug.py
@@ -1,38 +1,37 @@
import sys
+import typing as t
from types import CodeType
+from types import TracebackType
-from . import TemplateSyntaxError
-from ._compat import PYPY
+from .exceptions import TemplateSyntaxError
from .utils import internal_code
from .utils import missing
+if t.TYPE_CHECKING:
+ from .runtime import Context
-def rewrite_traceback_stack(source=None):
+
+def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException:
"""Rewrite the current exception to replace any tracebacks from
within compiled template code with tracebacks that look like they
came from the template source.
This must be called within an ``except`` block.
- :param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
- the current ``exc_info`` is used.
:param source: For ``TemplateSyntaxError``, the original source if
known.
- :return: A :meth:`sys.exc_info` tuple that can be re-raised.
+ :return: The original exception with the rewritten traceback.
"""
- exc_type, exc_value, tb = sys.exc_info()
+ _, exc_value, tb = sys.exc_info()
+ exc_value = t.cast(BaseException, exc_value)
+ tb = t.cast(TracebackType, tb)
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
exc_value.translated = True
exc_value.source = source
-
- try:
- # Remove the old traceback on Python 3, otherwise the frames
- # from the compiler still show up.
- exc_value.with_traceback(None)
- except AttributeError:
- pass
-
+ # Remove the old traceback, otherwise the frames from the
+ # compiler still show up.
+ exc_value.with_traceback(None)
# Outside of runtime, so the frame isn't executing template
# code, but it still needs to point at the template.
tb = fake_traceback(
@@ -68,12 +67,15 @@ def rewrite_traceback_stack(source=None):
# Assign tb_next in reverse to avoid circular references.
for tb in reversed(stack):
- tb_next = tb_set_next(tb, tb_next)
+ tb.tb_next = tb_next
+ tb_next = tb
- return exc_type, exc_value, tb_next
+ return exc_value.with_traceback(tb_next)
-def fake_traceback(exc_value, tb, filename, lineno):
+def fake_traceback( # type: ignore
+ exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int
+) -> TracebackType:
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
@@ -100,79 +102,60 @@ def fake_traceback(exc_value, tb, filename, lineno):
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
- code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
+ code: CodeType = compile(
+ "\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec"
+ )
# Build a new code object that points to the template file and
# replaces the location with a block name.
- try:
- location = "template"
-
- if tb is not None:
- function = tb.tb_frame.f_code.co_name
-
- if function == "root":
- location = "top-level template code"
- elif function.startswith("block_"):
- location = 'block "%s"' % function[6:]
-
- # Collect arguments for the new code object. CodeType only
- # accepts positional arguments, and arguments were inserted in
- # new Python versions.
- code_args = []
-
- for attr in (
- "argcount",
- "posonlyargcount", # Python 3.8
- "kwonlyargcount", # Python 3
- "nlocals",
- "stacksize",
- "flags",
- "code", # codestring
- "consts", # constants
- "names",
- "varnames",
- ("filename", filename),
- ("name", location),
- "firstlineno",
- "lnotab",
- "freevars",
- "cellvars",
- ):
- if isinstance(attr, tuple):
- # Replace with given value.
- code_args.append(attr[1])
- continue
-
- try:
- # Copy original value if it exists.
- code_args.append(getattr(code, "co_" + attr))
- except AttributeError:
- # Some arguments were added later.
- continue
-
- code = CodeType(*code_args)
- except Exception:
- # Some environments such as Google App Engine don't support
- # modifying code objects.
- pass
+ location = "template"
+
+ if tb is not None:
+ function = tb.tb_frame.f_code.co_name
+
+ if function == "root":
+ location = "top-level template code"
+ elif function.startswith("block_"):
+ location = f"block {function[6:]!r}"
+
+ if sys.version_info >= (3, 8):
+ code = code.replace(co_name=location)
+ else:
+ code = CodeType(
+ code.co_argcount,
+ code.co_kwonlyargcount,
+ code.co_nlocals,
+ code.co_stacksize,
+ code.co_flags,
+ code.co_code,
+ code.co_consts,
+ code.co_names,
+ code.co_varnames,
+ code.co_filename,
+ location,
+ code.co_firstlineno,
+ code.co_lnotab,
+ code.co_freevars,
+ code.co_cellvars,
+ )
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
- return sys.exc_info()[2].tb_next
+ return sys.exc_info()[2].tb_next # type: ignore
-def get_template_locals(real_locals):
+def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]:
"""Based on the runtime locals, get the context that would be
available at that point in the template.
"""
# Start with the current template context.
- ctx = real_locals.get("context")
+ ctx: "t.Optional[Context]" = real_locals.get("context")
- if ctx:
- data = ctx.get_all().copy()
+ if ctx is not None:
+ data: t.Dict[str, t.Any] = ctx.get_all().copy()
else:
data = {}
@@ -180,7 +163,7 @@ def get_template_locals(real_locals):
# rather than pushing a context. Local variables follow the scheme
# l_depth_name. Find the highest-depth local that has a value for
# each name.
- local_overrides = {}
+ local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {}
for name, value in real_locals.items():
if not name.startswith("l_") or value is missing:
@@ -188,8 +171,8 @@ def get_template_locals(real_locals):
continue
try:
- _, depth, name = name.split("_", 2)
- depth = int(depth)
+ _, depth_str, name = name.split("_", 2)
+ depth = int(depth_str)
except ValueError:
continue
@@ -206,63 +189,3 @@ def get_template_locals(real_locals):
data[name] = value
return data
-
-
-if sys.version_info >= (3, 7):
- # tb_next is directly assignable as of Python 3.7
- def tb_set_next(tb, tb_next):
- tb.tb_next = tb_next
- return tb
-
-
-elif PYPY:
- # PyPy might have special support, and won't work with ctypes.
- try:
- import tputil
- except ImportError:
- # Without tproxy support, use the original traceback.
- def tb_set_next(tb, tb_next):
- return tb
-
- else:
- # With tproxy support, create a proxy around the traceback that
- # returns the new tb_next.
- def tb_set_next(tb, tb_next):
- def controller(op):
- if op.opname == "__getattribute__" and op.args[0] == "tb_next":
- return tb_next
-
- return op.delegate()
-
- return tputil.make_proxy(controller, obj=tb)
-
-
-else:
- # Use ctypes to assign tb_next at the C level since it's read-only
- # from Python.
- import ctypes
-
- class _CTraceback(ctypes.Structure):
- _fields_ = [
- # Extra PyObject slots when compiled with Py_TRACE_REFS.
- ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
- # Only care about tb_next as an object, not a traceback.
- ("tb_next", ctypes.py_object),
- ]
-
- def tb_set_next(tb, tb_next):
- c_tb = _CTraceback.from_address(id(tb))
-
- # Clear out the old tb_next.
- if tb.tb_next is not None:
- c_tb_next = ctypes.py_object(tb.tb_next)
- c_tb.tb_next = ctypes.py_object()
- ctypes.pythonapi.Py_DecRef(c_tb_next)
-
- # Assign the new tb_next.
- if tb_next is not None:
- c_tb_next = ctypes.py_object(tb_next)
- ctypes.pythonapi.Py_IncRef(c_tb_next)
- c_tb.tb_next = c_tb_next
-
- return tb
diff --git a/deps/v8/third_party/jinja2/defaults.py b/deps/v8/third_party/jinja2/defaults.py
index 8e0e7d7710..638cad3d2d 100644
--- a/deps/v8/third_party/jinja2/defaults.py
+++ b/deps/v8/third_party/jinja2/defaults.py
@@ -1,5 +1,5 @@
-# -*- coding: utf-8 -*-
-from ._compat import range_type
+import typing as t
+
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
from .utils import Cycler
@@ -7,6 +7,9 @@ from .utils import generate_lorem_ipsum
from .utils import Joiner
from .utils import Namespace
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
# defaults for the parser / lexer
BLOCK_START_STRING = "{%"
BLOCK_END_STRING = "%}"
@@ -14,17 +17,17 @@ VARIABLE_START_STRING = "{{"
VARIABLE_END_STRING = "}}"
COMMENT_START_STRING = "{#"
COMMENT_END_STRING = "#}"
-LINE_STATEMENT_PREFIX = None
-LINE_COMMENT_PREFIX = None
+LINE_STATEMENT_PREFIX: t.Optional[str] = None
+LINE_COMMENT_PREFIX: t.Optional[str] = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
-NEWLINE_SEQUENCE = "\n"
+NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n"
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
DEFAULT_NAMESPACE = {
- "range": range_type,
+ "range": range,
"dict": dict,
"lipsum": generate_lorem_ipsum,
"cycler": Cycler,
@@ -33,10 +36,11 @@ DEFAULT_NAMESPACE = {
}
# default policies
-DEFAULT_POLICIES = {
+DEFAULT_POLICIES: t.Dict[str, t.Any] = {
"compiler.ascii_str": True,
"urlize.rel": "noopener",
"urlize.target": None,
+ "urlize.extra_schemes": None,
"truncate.leeway": 5,
"json.dumps_function": None,
"json.dumps_kwargs": {"sort_keys": True},
diff --git a/deps/v8/third_party/jinja2/environment.py b/deps/v8/third_party/jinja2/environment.py
index 8430390eea..ea04e8b443 100644
--- a/deps/v8/third_party/jinja2/environment.py
+++ b/deps/v8/third_party/jinja2/environment.py
@@ -1,25 +1,19 @@
-# -*- coding: utf-8 -*-
"""Classes for managing templates and their runtime and compile time
options.
"""
import os
-import sys
+import typing
+import typing as t
import weakref
+from collections import ChainMap
+from functools import lru_cache
from functools import partial
from functools import reduce
+from types import CodeType
from markupsafe import Markup
from . import nodes
-from ._compat import encode_filename
-from ._compat import implements_iterator
-from ._compat import implements_to_string
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import PYPY
-from ._compat import reraise
-from ._compat import string_types
-from ._compat import text_type
from .compiler import CodeGenerator
from .compiler import generate
from .defaults import BLOCK_END_STRING
@@ -44,25 +38,33 @@ from .exceptions import TemplatesNotFound
from .exceptions import TemplateSyntaxError
from .exceptions import UndefinedError
from .lexer import get_lexer
+from .lexer import Lexer
from .lexer import TokenStream
from .nodes import EvalContext
from .parser import Parser
from .runtime import Context
from .runtime import new_context
from .runtime import Undefined
+from .utils import _PassArg
from .utils import concat
from .utils import consume
-from .utils import have_async_gen
from .utils import import_string
from .utils import internalcode
from .utils import LRUCache
from .utils import missing
-# for direct template usage we have up to ten living environments
-_spontaneous_environments = LRUCache(10)
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .bccache import BytecodeCache
+ from .ext import Extension
+ from .loaders import BaseLoader
+
+_env_bound = t.TypeVar("_env_bound", bound="Environment")
-def get_spontaneous_environment(cls, *args):
+# for direct template usage we have up to ten living environments
+@lru_cache(maxsize=10)
+def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound:
"""Return a new spontaneous environment. A spontaneous environment
is used for templates created directly rather than through an
existing environment.
@@ -70,75 +72,74 @@ def get_spontaneous_environment(cls, *args):
:param cls: Environment class to create.
:param args: Positional arguments passed to environment.
"""
- key = (cls, args)
+ env = cls(*args)
+ env.shared = True
+ return env
- try:
- return _spontaneous_environments[key]
- except KeyError:
- _spontaneous_environments[key] = env = cls(*args)
- env.shared = True
- return env
-
-def create_cache(size):
+def create_cache(
+ size: int,
+) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]:
"""Return the cache class for the given size."""
if size == 0:
return None
+
if size < 0:
return {}
- return LRUCache(size)
+ return LRUCache(size) # type: ignore
-def copy_cache(cache):
+
+def copy_cache(
+ cache: t.Optional[t.MutableMapping],
+) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]:
"""Create an empty copy of the given cache."""
if cache is None:
return None
- elif type(cache) is dict:
+
+ if type(cache) is dict:
return {}
- return LRUCache(cache.capacity)
+
+ return LRUCache(cache.capacity) # type: ignore
-def load_extensions(environment, extensions):
+def load_extensions(
+ environment: "Environment",
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]],
+) -> t.Dict[str, "Extension"]:
"""Load the extensions from the list and bind it to the environment.
- Returns a dict of instantiated environments.
+ Returns a dict of instantiated extensions.
"""
result = {}
+
for extension in extensions:
- if isinstance(extension, string_types):
- extension = import_string(extension)
- result[extension.identifier] = extension(environment)
- return result
+ if isinstance(extension, str):
+ extension = t.cast(t.Type["Extension"], import_string(extension))
+ result[extension.identifier] = extension(environment)
-def fail_for_missing_callable(string, name):
- msg = string % name
- if isinstance(name, Undefined):
- try:
- name._fail_with_undefined_error()
- except Exception as e:
- msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e)
- raise TemplateRuntimeError(msg)
+ return result
-def _environment_sanity_check(environment):
+def _environment_config_check(environment: "Environment") -> "Environment":
"""Perform a sanity check on the environment."""
assert issubclass(
environment.undefined, Undefined
- ), "undefined must be a subclass of undefined because filters depend on it."
+ ), "'undefined' must be a subclass of 'jinja2.Undefined'."
assert (
environment.block_start_string
!= environment.variable_start_string
!= environment.comment_start_string
- ), "block, variable and comment start strings must be different"
- assert environment.newline_sequence in (
+ ), "block, variable and comment start strings must be different."
+ assert environment.newline_sequence in {
"\r",
"\r\n",
"\n",
- ), "newline_sequence set to unknown line ending string."
+ }, "'newline_sequence' must be one of '\\n', '\\r\\n', or '\\r'."
return environment
-class Environment(object):
+class Environment:
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
@@ -256,9 +257,8 @@ class Environment(object):
See :ref:`bytecode-cache` for more information.
`enable_async`
- If set to true this enables async template execution which allows
- you to take advantage of newer Python features. This requires
- Python 3.6 or later.
+ If set to true this enables async template execution which
+ allows using async functions and generators.
"""
#: if this environment is sandboxed. Modifying this variable won't make
@@ -271,7 +271,7 @@ class Environment(object):
overlayed = False
#: the environment this environment is linked to if it is an overlay
- linked_to = None
+ linked_to: t.Optional["Environment"] = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
@@ -279,36 +279,40 @@ class Environment(object):
#: the class that is used for code generation. See
#: :class:`~jinja2.compiler.CodeGenerator` for more information.
- code_generator_class = CodeGenerator
+ code_generator_class: t.Type["CodeGenerator"] = CodeGenerator
+
+ concat = "".join
- #: the context class thatis used for templates. See
+ #: the context class that is used for templates. See
#: :class:`~jinja2.runtime.Context` for more information.
- context_class = Context
+ context_class: t.Type[Context] = Context
+
+ template_class: t.Type["Template"]
def __init__(
self,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- loader=None,
- cache_size=400,
- auto_reload=True,
- bytecode_cache=None,
- enable_async=False,
+ block_start_string: str = BLOCK_START_STRING,
+ block_end_string: str = BLOCK_END_STRING,
+ variable_start_string: str = VARIABLE_START_STRING,
+ variable_end_string: str = VARIABLE_END_STRING,
+ comment_start_string: str = COMMENT_START_STRING,
+ comment_end_string: str = COMMENT_END_STRING,
+ line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
+ line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
+ trim_blocks: bool = TRIM_BLOCKS,
+ lstrip_blocks: bool = LSTRIP_BLOCKS,
+ newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
+ keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
+ optimized: bool = True,
+ undefined: t.Type[Undefined] = Undefined,
+ finalize: t.Optional[t.Callable[..., t.Any]] = None,
+ autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
+ loader: t.Optional["BaseLoader"] = None,
+ cache_size: int = 400,
+ auto_reload: bool = True,
+ bytecode_cache: t.Optional["BytecodeCache"] = None,
+ enable_async: bool = False,
):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
@@ -336,7 +340,7 @@ class Environment(object):
self.keep_trailing_newline = keep_trailing_newline
# runtime information
- self.undefined = undefined
+ self.undefined: t.Type[Undefined] = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
@@ -358,52 +362,50 @@ class Environment(object):
# load extensions
self.extensions = load_extensions(self, extensions)
- self.enable_async = enable_async
- self.is_async = self.enable_async and have_async_gen
- if self.is_async:
- # runs patch_all() to enable async support
- from . import asyncsupport # noqa: F401
-
- _environment_sanity_check(self)
+ self.is_async = enable_async
+ _environment_config_check(self)
- def add_extension(self, extension):
+ def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None:
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
- def extend(self, **attributes):
+ def extend(self, **attributes: t.Any) -> None:
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
- for key, value in iteritems(attributes):
+ for key, value in attributes.items():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(
self,
- block_start_string=missing,
- block_end_string=missing,
- variable_start_string=missing,
- variable_end_string=missing,
- comment_start_string=missing,
- comment_end_string=missing,
- line_statement_prefix=missing,
- line_comment_prefix=missing,
- trim_blocks=missing,
- lstrip_blocks=missing,
- extensions=missing,
- optimized=missing,
- undefined=missing,
- finalize=missing,
- autoescape=missing,
- loader=missing,
- cache_size=missing,
- auto_reload=missing,
- bytecode_cache=missing,
- ):
+ block_start_string: str = missing,
+ block_end_string: str = missing,
+ variable_start_string: str = missing,
+ variable_end_string: str = missing,
+ comment_start_string: str = missing,
+ comment_end_string: str = missing,
+ line_statement_prefix: t.Optional[str] = missing,
+ line_comment_prefix: t.Optional[str] = missing,
+ trim_blocks: bool = missing,
+ lstrip_blocks: bool = missing,
+ newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = missing,
+ keep_trailing_newline: bool = missing,
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing,
+ optimized: bool = missing,
+ undefined: t.Type[Undefined] = missing,
+ finalize: t.Optional[t.Callable[..., t.Any]] = missing,
+ autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing,
+ loader: t.Optional["BaseLoader"] = missing,
+ cache_size: int = missing,
+ auto_reload: bool = missing,
+ bytecode_cache: t.Optional["BytecodeCache"] = missing,
+ enable_async: bool = False,
+ ) -> "Environment":
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
@@ -414,16 +416,20 @@ class Environment(object):
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
+
+ .. versionchanged:: 3.1.2
+ Added the ``newline_sequence``,, ``keep_trailing_newline``,
+ and ``enable_async`` parameters to match ``__init__``.
"""
args = dict(locals())
- del args["self"], args["cache_size"], args["extensions"]
+ del args["self"], args["cache_size"], args["extensions"], args["enable_async"]
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
- for key, value in iteritems(args):
+ for key, value in args.items():
if value is not missing:
setattr(rv, key, value)
@@ -433,25 +439,33 @@ class Environment(object):
rv.cache = copy_cache(self.cache)
rv.extensions = {}
- for key, value in iteritems(self.extensions):
+ for key, value in self.extensions.items():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
- return _environment_sanity_check(rv)
+ if enable_async is not missing:
+ rv.is_async = enable_async
- lexer = property(get_lexer, doc="The lexer for this environment.")
+ return _environment_config_check(rv)
- def iter_extensions(self):
+ @property
+ def lexer(self) -> Lexer:
+ """The lexer for this environment."""
+ return get_lexer(self)
+
+ def iter_extensions(self) -> t.Iterator["Extension"]:
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
- def getitem(self, obj, argument):
+ def getitem(
+ self, obj: t.Any, argument: t.Union[str, t.Any]
+ ) -> t.Union[t.Any, Undefined]:
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (AttributeError, TypeError, LookupError):
- if isinstance(argument, string_types):
+ if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
@@ -463,9 +477,9 @@ class Environment(object):
pass
return self.undefined(obj=obj, name=argument)
- def getattr(self, obj, attribute):
+ def getattr(self, obj: t.Any, attribute: str) -> t.Any:
"""Get an item or attribute of an object but prefer the attribute.
- Unlike :meth:`getitem` the attribute *must* be a bytestring.
+ Unlike :meth:`getitem` the attribute *must* be a string.
"""
try:
return getattr(obj, attribute)
@@ -476,51 +490,113 @@ class Environment(object):
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
- def call_filter(
- self, name, value, args=None, kwargs=None, context=None, eval_ctx=None
- ):
- """Invokes a filter on a value the same way the compiler does it.
+ def _filter_test_common(
+ self,
+ name: t.Union[str, Undefined],
+ value: t.Any,
+ args: t.Optional[t.Sequence[t.Any]],
+ kwargs: t.Optional[t.Mapping[str, t.Any]],
+ context: t.Optional[Context],
+ eval_ctx: t.Optional[EvalContext],
+ is_filter: bool,
+ ) -> t.Any:
+ if is_filter:
+ env_map = self.filters
+ type_name = "filter"
+ else:
+ env_map = self.tests
+ type_name = "test"
- Note that on Python 3 this might return a coroutine in case the
- filter is running from an environment in async mode and the filter
- supports async execution. It's your responsibility to await this
- if needed.
+ func = env_map.get(name) # type: ignore
- .. versionadded:: 2.7
- """
- func = self.filters.get(name)
if func is None:
- fail_for_missing_callable("no filter named %r", name)
- args = [value] + list(args or ())
- if getattr(func, "contextfilter", False) is True:
+ msg = f"No {type_name} named {name!r}."
+
+ if isinstance(name, Undefined):
+ try:
+ name._fail_with_undefined_error()
+ except Exception as e:
+ msg = f"{msg} ({e}; did you forget to quote the callable name?)"
+
+ raise TemplateRuntimeError(msg)
+
+ args = [value, *(args if args is not None else ())]
+ kwargs = kwargs if kwargs is not None else {}
+ pass_arg = _PassArg.from_obj(func)
+
+ if pass_arg is _PassArg.context:
if context is None:
raise TemplateRuntimeError(
- "Attempted to invoke context filter without context"
+ f"Attempted to invoke a context {type_name} without context."
)
+
args.insert(0, context)
- elif getattr(func, "evalcontextfilter", False) is True:
+ elif pass_arg is _PassArg.eval_context:
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
+
args.insert(0, eval_ctx)
- elif getattr(func, "environmentfilter", False) is True:
+ elif pass_arg is _PassArg.environment:
args.insert(0, self)
- return func(*args, **(kwargs or {}))
- def call_test(self, name, value, args=None, kwargs=None):
- """Invokes a test on a value the same way the compiler does it.
+ return func(*args, **kwargs)
+
+ def call_filter(
+ self,
+ name: str,
+ value: t.Any,
+ args: t.Optional[t.Sequence[t.Any]] = None,
+ kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
+ context: t.Optional[Context] = None,
+ eval_ctx: t.Optional[EvalContext] = None,
+ ) -> t.Any:
+ """Invoke a filter on a value the same way the compiler does.
+
+ This might return a coroutine if the filter is running from an
+ environment in async mode and the filter supports async
+ execution. It's your responsibility to await this if needed.
+
+ .. versionadded:: 2.7
+ """
+ return self._filter_test_common(
+ name, value, args, kwargs, context, eval_ctx, True
+ )
+
+ def call_test(
+ self,
+ name: str,
+ value: t.Any,
+ args: t.Optional[t.Sequence[t.Any]] = None,
+ kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
+ context: t.Optional[Context] = None,
+ eval_ctx: t.Optional[EvalContext] = None,
+ ) -> t.Any:
+ """Invoke a test on a value the same way the compiler does.
+
+ This might return a coroutine if the test is running from an
+ environment in async mode and the test supports async execution.
+ It's your responsibility to await this if needed.
+
+ .. versionchanged:: 3.0
+ Tests support ``@pass_context``, etc. decorators. Added
+ the ``context`` and ``eval_ctx`` parameters.
.. versionadded:: 2.7
"""
- func = self.tests.get(name)
- if func is None:
- fail_for_missing_callable("no test named %r", name)
- return func(value, *(args or ()), **(kwargs or {}))
+ return self._filter_test_common(
+ name, value, args, kwargs, context, eval_ctx, False
+ )
@internalcode
- def parse(self, source, name=None, filename=None):
+ def parse(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> nodes.Template:
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
@@ -534,11 +610,18 @@ class Environment(object):
except TemplateSyntaxError:
self.handle_exception(source=source)
- def _parse(self, source, name, filename):
+ def _parse(
+ self, source: str, name: t.Optional[str], filename: t.Optional[str]
+ ) -> nodes.Template:
"""Internal parsing function used by `parse` and `compile`."""
- return Parser(self, source, name, encode_filename(filename)).parse()
+ return Parser(self, source, name, filename).parse()
- def lex(self, source, name=None, filename=None):
+ def lex(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> t.Iterator[t.Tuple[int, str, str]]:
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
@@ -548,13 +631,18 @@ class Environment(object):
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
- source = text_type(source)
+ source = str(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
self.handle_exception(source=source)
- def preprocess(self, source, name=None, filename=None):
+ def preprocess(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> str:
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
@@ -562,28 +650,43 @@ class Environment(object):
return reduce(
lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(),
- text_type(source),
+ str(source),
)
- def _tokenize(self, source, name, filename=None, state=None):
+ def _tokenize(
+ self,
+ source: str,
+ name: t.Optional[str],
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> TokenStream:
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
+
for ext in self.iter_extensions():
- stream = ext.filter_stream(stream)
+ stream = ext.filter_stream(stream) # type: ignore
+
if not isinstance(stream, TokenStream):
- stream = TokenStream(stream, name, filename)
+ stream = TokenStream(stream, name, filename) # type: ignore
+
return stream
- def _generate(self, source, name, filename, defer_init=False):
+ def _generate(
+ self,
+ source: nodes.Template,
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ defer_init: bool = False,
+ ) -> str:
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
- return generate(
+ return generate( # type: ignore
source,
self,
name,
@@ -592,16 +695,45 @@ class Environment(object):
optimized=self.optimized,
)
- def _compile(self, source, filename):
+ def _compile(self, source: str, filename: str) -> CodeType:
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
- return compile(source, filename, "exec")
+ return compile(source, filename, "exec") # type: ignore
+
+ @typing.overload
+ def compile( # type: ignore
+ self,
+ source: t.Union[str, nodes.Template],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ raw: "te.Literal[False]" = False,
+ defer_init: bool = False,
+ ) -> CodeType:
+ ...
+
+ @typing.overload
+ def compile(
+ self,
+ source: t.Union[str, nodes.Template],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ raw: "te.Literal[True]" = ...,
+ defer_init: bool = False,
+ ) -> str:
+ ...
@internalcode
- def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
+ def compile(
+ self,
+ source: t.Union[str, nodes.Template],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ raw: bool = False,
+ defer_init: bool = False,
+ ) -> t.Union[str, CodeType]:
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
@@ -623,7 +755,7 @@ class Environment(object):
"""
source_hint = None
try:
- if isinstance(source, string_types):
+ if isinstance(source, str):
source_hint = source
source = self._parse(source, name, filename)
source = self._generate(source, name, filename, defer_init=defer_init)
@@ -631,13 +763,13 @@ class Environment(object):
return source
if filename is None:
filename = "<template>"
- else:
- filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
self.handle_exception(source=source_hint)
- def compile_expression(self, source, undefined_to_none=True):
+ def compile_expression(
+ self, source: str, undefined_to_none: bool = True
+ ) -> "TemplateExpression":
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
@@ -674,8 +806,7 @@ class Environment(object):
)
expr.set_environment(self)
except TemplateSyntaxError:
- if sys.exc_info() is not None:
- self.handle_exception(source=source)
+ self.handle_exception(source=source)
body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
@@ -683,14 +814,13 @@ class Environment(object):
def compile_templates(
self,
- target,
- extensions=None,
- filter_func=None,
- zip="deflated",
- log_function=None,
- ignore_errors=True,
- py_compile=False,
- ):
+ target: t.Union[str, os.PathLike],
+ extensions: t.Optional[t.Collection[str]] = None,
+ filter_func: t.Optional[t.Callable[[str], bool]] = None,
+ zip: t.Optional[str] = "deflated",
+ log_function: t.Optional[t.Callable[[str], None]] = None,
+ ignore_errors: bool = True,
+ ) -> None:
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
@@ -706,52 +836,26 @@ class Environment(object):
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
- If `py_compile` is set to `True` .pyc files will be written to the
- target instead of standard .py files. This flag does not do anything
- on pypy and Python 3 where pyc files are not picked up by itself and
- don't give much benefit.
-
.. versionadded:: 2.4
"""
from .loaders import ModuleLoader
if log_function is None:
- def log_function(x):
+ def log_function(x: str) -> None:
pass
- if py_compile:
- if not PY2 or PYPY:
- import warnings
-
- warnings.warn(
- "'py_compile=True' has no effect on PyPy or Python"
- " 3 and will be removed in version 3.0",
- DeprecationWarning,
- stacklevel=2,
- )
- py_compile = False
- else:
- import imp
- import marshal
-
- py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15")
+ assert log_function is not None
+ assert self.loader is not None, "No loader configured."
- # Python 3.3 added a source filesize to the header
- if sys.version_info >= (3, 3):
- py_header += u"\x00\x00\x00\x00".encode("iso-8859-15")
-
- def write_file(filename, data):
+ def write_file(filename: str, data: str) -> None:
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
- if isinstance(data, text_type):
- data = data.encode("utf8")
-
with open(os.path.join(target, filename), "wb") as f:
- f.write(data)
+ f.write(data.encode("utf8"))
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
@@ -759,11 +863,11 @@ class Environment(object):
zip_file = ZipFile(
target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
)
- log_function('Compiling into Zip archive "%s"' % target)
+ log_function(f"Compiling into Zip archive {target!r}")
else:
if not os.path.isdir(target):
os.makedirs(target)
- log_function('Compiling into folder "%s"' % target)
+ log_function(f"Compiling into folder {target!r}")
try:
for name in self.list_templates(extensions, filter_func):
@@ -773,25 +877,24 @@ class Environment(object):
except TemplateSyntaxError as e:
if not ignore_errors:
raise
- log_function('Could not compile "%s": %s' % (name, e))
+ log_function(f'Could not compile "{name}": {e}')
continue
filename = ModuleLoader.get_module_filename(name)
- if py_compile:
- c = self._compile(code, encode_filename(filename))
- write_file(filename + "c", py_header + marshal.dumps(c))
- log_function('Byte-compiled "%s" as %s' % (name, filename + "c"))
- else:
- write_file(filename, code)
- log_function('Compiled "%s" as %s' % (name, filename))
+ write_file(filename, code)
+ log_function(f'Compiled "{name}" as {filename}')
finally:
if zip:
zip_file.close()
log_function("Finished compiling templates")
- def list_templates(self, extensions=None, filter_func=None):
+ def list_templates(
+ self,
+ extensions: t.Optional[t.Collection[str]] = None,
+ filter_func: t.Optional[t.Callable[[str], bool]] = None,
+ ) -> t.List[str]:
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
@@ -807,6 +910,7 @@ class Environment(object):
.. versionadded:: 2.4
"""
+ assert self.loader is not None, "No loader configured."
names = self.loader.list_templates()
if extensions is not None:
@@ -815,23 +919,23 @@ class Environment(object):
"either extensions or filter_func can be passed, but not both"
)
- def filter_func(x):
- return "." in x and x.rsplit(".", 1)[1] in extensions
+ def filter_func(x: str) -> bool:
+ return "." in x and x.rsplit(".", 1)[1] in extensions # type: ignore
if filter_func is not None:
names = [name for name in names if filter_func(name)]
return names
- def handle_exception(self, source=None):
+ def handle_exception(self, source: t.Optional[str] = None) -> "te.NoReturn":
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
from .debug import rewrite_traceback_stack
- reraise(*rewrite_traceback_stack(source=source))
+ raise rewrite_traceback_stack(source=source)
- def join_path(self, template, parent):
+ def join_path(self, template: str, parent: str) -> str:
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
@@ -844,7 +948,9 @@ class Environment(object):
return template
@internalcode
- def _load_template(self, name, globals):
+ def _load_template(
+ self, name: str, globals: t.Optional[t.MutableMapping[str, t.Any]]
+ ) -> "Template":
if self.loader is None:
raise TypeError("no loader for this environment specified")
cache_key = (weakref.ref(self.loader), name)
@@ -853,49 +959,88 @@ class Environment(object):
if template is not None and (
not self.auto_reload or template.is_up_to_date
):
+ # template.globals is a ChainMap, modifying it will only
+ # affect the template, not the environment globals.
+ if globals:
+ template.globals.update(globals)
+
return template
- template = self.loader.load(self, name, globals)
+
+ template = self.loader.load(self, name, self.make_globals(globals))
+
if self.cache is not None:
self.cache[cache_key] = template
return template
@internalcode
- def get_template(self, name, parent=None, globals=None):
- """Load a template from the loader. If a loader is configured this
- method asks the loader for the template and returns a :class:`Template`.
- If the `parent` parameter is not `None`, :meth:`join_path` is called
- to get the real template name before loading.
-
- The `globals` parameter can be used to provide template wide globals.
- These variables are available in the context at render time.
-
- If the template does not exist a :exc:`TemplateNotFound` exception is
- raised.
+ def get_template(
+ self,
+ name: t.Union[str, "Template"],
+ parent: t.Optional[str] = None,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Load a template by name with :attr:`loader` and return a
+ :class:`Template`. If the template does not exist a
+ :exc:`TemplateNotFound` exception is raised.
+
+ :param name: Name of the template to load. When loading
+ templates from the filesystem, "/" is used as the path
+ separator, even on Windows.
+ :param parent: The name of the parent template importing this
+ template. :meth:`join_path` can be used to implement name
+ transformations with this.
+ :param globals: Extend the environment :attr:`globals` with
+ these extra variables available for all renders of this
+ template. If the template has already been loaded and
+ cached, its globals are updated with any new items.
+
+ .. versionchanged:: 3.0
+ If a template is loaded from cache, ``globals`` will update
+ the template's globals instead of ignoring the new values.
.. versionchanged:: 2.4
- If `name` is a :class:`Template` object it is returned from the
- function unchanged.
+ If ``name`` is a :class:`Template` object it is returned
+ unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
- return self._load_template(name, self.make_globals(globals))
+
+ return self._load_template(name, globals)
@internalcode
- def select_template(self, names, parent=None, globals=None):
- """Works like :meth:`get_template` but tries a number of templates
- before it fails. If it cannot find any of the templates, it will
- raise a :exc:`TemplatesNotFound` exception.
+ def select_template(
+ self,
+ names: t.Iterable[t.Union[str, "Template"]],
+ parent: t.Optional[str] = None,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Like :meth:`get_template`, but tries loading multiple names.
+ If none of the names can be loaded a :exc:`TemplatesNotFound`
+ exception is raised.
+
+ :param names: List of template names to try loading in order.
+ :param parent: The name of the parent template importing this
+ template. :meth:`join_path` can be used to implement name
+ transformations with this.
+ :param globals: Extend the environment :attr:`globals` with
+ these extra variables available for all renders of this
+ template. If the template has already been loaded and
+ cached, its globals are updated with any new items.
+
+ .. versionchanged:: 3.0
+ If a template is loaded from cache, ``globals`` will update
+ the template's globals instead of ignoring the new values.
.. versionchanged:: 2.11
- If names is :class:`Undefined`, an :exc:`UndefinedError` is
- raised instead. If no templates were found and names
+ If ``names`` is :class:`Undefined`, an :exc:`UndefinedError`
+ is raised instead. If no templates were found and ``names``
contains :class:`Undefined`, the message is more helpful.
.. versionchanged:: 2.4
- If `names` contains a :class:`Template` object it is returned
- from the function unchanged.
+ If ``names`` contains a :class:`Template` object it is
+ returned unchanged.
.. versionadded:: 2.3
"""
@@ -904,9 +1049,9 @@ class Environment(object):
if not names:
raise TemplatesNotFound(
- message=u"Tried to select from an empty list " u"of templates."
+ message="Tried to select from an empty list of templates."
)
- globals = self.make_globals(globals)
+
for name in names:
if isinstance(name, Template):
return name
@@ -916,95 +1061,127 @@ class Environment(object):
return self._load_template(name, globals)
except (TemplateNotFound, UndefinedError):
pass
- raise TemplatesNotFound(names)
+ raise TemplatesNotFound(names) # type: ignore
@internalcode
- def get_or_select_template(self, template_name_or_list, parent=None, globals=None):
- """Does a typecheck and dispatches to :meth:`select_template`
- if an iterable of template names is given, otherwise to
- :meth:`get_template`.
+ def get_or_select_template(
+ self,
+ template_name_or_list: t.Union[
+ str, "Template", t.List[t.Union[str, "Template"]]
+ ],
+ parent: t.Optional[str] = None,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Use :meth:`select_template` if an iterable of template names
+ is given, or :meth:`get_template` if one name is given.
.. versionadded:: 2.3
"""
- if isinstance(template_name_or_list, (string_types, Undefined)):
+ if isinstance(template_name_or_list, (str, Undefined)):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
- def from_string(self, source, globals=None, template_class=None):
- """Load a template from a string. This parses the source given and
- returns a :class:`Template` object.
+ def from_string(
+ self,
+ source: t.Union[str, nodes.Template],
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ template_class: t.Optional[t.Type["Template"]] = None,
+ ) -> "Template":
+ """Load a template from a source string without using
+ :attr:`loader`.
+
+ :param source: Jinja source to compile into a template.
+ :param globals: Extend the environment :attr:`globals` with
+ these extra variables available for all renders of this
+ template. If the template has already been loaded and
+ cached, its globals are updated with any new items.
+ :param template_class: Return an instance of this
+ :class:`Template` class.
"""
- globals = self.make_globals(globals)
+ gs = self.make_globals(globals)
cls = template_class or self.template_class
- return cls.from_code(self, self.compile(source), globals, None)
-
- def make_globals(self, d):
- """Return a dict for the globals."""
- if not d:
- return self.globals
- return dict(self.globals, **d)
-
-
-class Template(object):
- """The central template object. This class represents a compiled template
- and is used to evaluate it.
-
- Normally the template object is generated from an :class:`Environment` but
- it also has a constructor that makes it possible to create a template
- instance directly using the constructor. It takes the same arguments as
- the environment constructor but it's not possible to specify a loader.
-
- Every template object has a few methods and members that are guaranteed
- to exist. However it's important that a template object should be
- considered immutable. Modifications on the object are not supported.
-
- Template objects created from the constructor rather than an environment
- do have an `environment` attribute that points to a temporary environment
- that is probably shared with other templates created with the constructor
- and compatible settings.
-
- >>> template = Template('Hello {{ name }}!')
- >>> template.render(name='John Doe') == u'Hello John Doe!'
- True
- >>> stream = template.stream(name='John Doe')
- >>> next(stream) == u'Hello John Doe!'
- True
- >>> next(stream)
- Traceback (most recent call last):
- ...
- StopIteration
+ return cls.from_code(self, self.compile(source), gs, None)
+
+ def make_globals(
+ self, d: t.Optional[t.MutableMapping[str, t.Any]]
+ ) -> t.MutableMapping[str, t.Any]:
+ """Make the globals map for a template. Any given template
+ globals overlay the environment :attr:`globals`.
+
+ Returns a :class:`collections.ChainMap`. This allows any changes
+ to a template's globals to only affect that template, while
+ changes to the environment's globals are still reflected.
+ However, avoid modifying any globals after a template is loaded.
+
+ :param d: Dict of template-specific globals.
+
+ .. versionchanged:: 3.0
+ Use :class:`collections.ChainMap` to always prevent mutating
+ environment globals.
+ """
+ if d is None:
+ d = {}
+
+ return ChainMap(d, self.globals)
+
+
+class Template:
+ """A compiled template that can be rendered.
+
+ Use the methods on :class:`Environment` to create or load templates.
+ The environment is used to configure how templates are compiled and
+ behave.
+
+ It is also possible to create a template object directly. This is
+ not usually recommended. The constructor takes most of the same
+ arguments as :class:`Environment`. All templates created with the
+ same environment arguments share the same ephemeral ``Environment``
+ instance behind the scenes.
+
+ A template object should be considered immutable. Modifications on
+ the object are not supported.
"""
#: Type of environment to create when creating a template directly
#: rather than through an existing environment.
- environment_class = Environment
+ environment_class: t.Type[Environment] = Environment
+
+ environment: Environment
+ globals: t.MutableMapping[str, t.Any]
+ name: t.Optional[str]
+ filename: t.Optional[str]
+ blocks: t.Dict[str, t.Callable[[Context], t.Iterator[str]]]
+ root_render_func: t.Callable[[Context], t.Iterator[str]]
+ _module: t.Optional["TemplateModule"]
+ _debug_info: str
+ _uptodate: t.Optional[t.Callable[[], bool]]
def __new__(
cls,
- source,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- enable_async=False,
- ):
+ source: t.Union[str, nodes.Template],
+ block_start_string: str = BLOCK_START_STRING,
+ block_end_string: str = BLOCK_END_STRING,
+ variable_start_string: str = VARIABLE_START_STRING,
+ variable_end_string: str = VARIABLE_END_STRING,
+ comment_start_string: str = COMMENT_START_STRING,
+ comment_end_string: str = COMMENT_END_STRING,
+ line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
+ line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
+ trim_blocks: bool = TRIM_BLOCKS,
+ lstrip_blocks: bool = LSTRIP_BLOCKS,
+ newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
+ keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
+ optimized: bool = True,
+ undefined: t.Type[Undefined] = Undefined,
+ finalize: t.Optional[t.Callable[..., t.Any]] = None,
+ autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
+ enable_async: bool = False,
+ ) -> t.Any: # it returns a `Template`, but this breaks the sphinx build...
env = get_spontaneous_environment(
- cls.environment_class,
+ cls.environment_class, # type: ignore
block_start_string,
block_end_string,
variable_start_string,
@@ -1019,7 +1196,7 @@ class Template(object):
keep_trailing_newline,
frozenset(extensions),
optimized,
- undefined,
+ undefined, # type: ignore
finalize,
autoescape,
None,
@@ -1031,7 +1208,13 @@ class Template(object):
return env.from_string(source, template_class=cls)
@classmethod
- def from_code(cls, environment, code, globals, uptodate=None):
+ def from_code(
+ cls,
+ environment: Environment,
+ code: CodeType,
+ globals: t.MutableMapping[str, t.Any],
+ uptodate: t.Optional[t.Callable[[], bool]] = None,
+ ) -> "Template":
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
@@ -1042,7 +1225,12 @@ class Template(object):
return rv
@classmethod
- def from_module_dict(cls, environment, module_dict, globals):
+ def from_module_dict(
+ cls,
+ environment: Environment,
+ module_dict: t.MutableMapping[str, t.Any],
+ globals: t.MutableMapping[str, t.Any],
+ ) -> "Template":
"""Creates a template object from a module. This is used by the
module loader to create a template object.
@@ -1051,8 +1239,13 @@ class Template(object):
return cls._from_namespace(environment, module_dict, globals)
@classmethod
- def _from_namespace(cls, environment, namespace, globals):
- t = object.__new__(cls)
+ def _from_namespace(
+ cls,
+ environment: Environment,
+ namespace: t.MutableMapping[str, t.Any],
+ globals: t.MutableMapping[str, t.Any],
+ ) -> "Template":
+ t: "Template" = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace["name"]
@@ -1060,7 +1253,7 @@ class Template(object):
t.blocks = namespace["blocks"]
# render function and module
- t.root_render_func = namespace["root"]
+ t.root_render_func = namespace["root"] # type: ignore
t._module = None
# debug and loader helpers
@@ -1073,7 +1266,7 @@ class Template(object):
return t
- def render(self, *args, **kwargs):
+ def render(self, *args: t.Any, **kwargs: t.Any) -> str:
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
@@ -1081,15 +1274,33 @@ class Template(object):
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
- This will return the rendered template as unicode string.
+ This will return the rendered template as a string.
"""
- vars = dict(*args, **kwargs)
+ if self.environment.is_async:
+ import asyncio
+
+ close = False
+
+ try:
+ loop = asyncio.get_running_loop()
+ except RuntimeError:
+ loop = asyncio.new_event_loop()
+ close = True
+
+ try:
+ return loop.run_until_complete(self.render_async(*args, **kwargs))
+ finally:
+ if close:
+ loop.close()
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
try:
- return concat(self.root_render_func(self.new_context(vars)))
+ return self.environment.concat(self.root_render_func(ctx)) # type: ignore
except Exception:
self.environment.handle_exception()
- def render_async(self, *args, **kwargs):
+ async def render_async(self, *args: t.Any, **kwargs: t.Any) -> str:
"""This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled.
@@ -1098,42 +1309,75 @@ class Template(object):
await template.render_async(knights='that say nih; asynchronously')
"""
- # see asyncsupport for the actual implementation
- raise NotImplementedError(
- "This feature is not available for this version of Python"
- )
+ if not self.environment.is_async:
+ raise RuntimeError(
+ "The environment was not created with async mode enabled."
+ )
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ return self.environment.concat( # type: ignore
+ [n async for n in self.root_render_func(ctx)] # type: ignore
+ )
+ except Exception:
+ return self.environment.handle_exception()
- def stream(self, *args, **kwargs):
+ def stream(self, *args: t.Any, **kwargs: t.Any) -> "TemplateStream":
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
- def generate(self, *args, **kwargs):
+ def generate(self, *args: t.Any, **kwargs: t.Any) -> t.Iterator[str]:
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
- a generator that yields one item after another as unicode strings.
+ a generator that yields one item after another as strings.
It accepts the same arguments as :meth:`render`.
"""
- vars = dict(*args, **kwargs)
+ if self.environment.is_async:
+ import asyncio
+
+ async def to_list() -> t.List[str]:
+ return [x async for x in self.generate_async(*args, **kwargs)]
+
+ yield from asyncio.run(to_list())
+ return
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
try:
- for event in self.root_render_func(self.new_context(vars)):
- yield event
+ yield from self.root_render_func(ctx) # type: ignore
except Exception:
yield self.environment.handle_exception()
- def generate_async(self, *args, **kwargs):
+ async def generate_async(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.AsyncIterator[str]:
"""An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
"""
- # see asyncsupport for the actual implementation
- raise NotImplementedError(
- "This feature is not available for this version of Python"
- )
+ if not self.environment.is_async:
+ raise RuntimeError(
+ "The environment was not created with async mode enabled."
+ )
+
+ ctx = self.new_context(dict(*args, **kwargs))
- def new_context(self, vars=None, shared=False, locals=None):
+ try:
+ async for event in self.root_render_func(ctx): # type: ignore
+ yield event
+ except Exception:
+ yield self.environment.handle_exception()
+
+ def new_context(
+ self,
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> Context:
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
@@ -1145,35 +1389,80 @@ class Template(object):
self.environment, self.name, self.blocks, vars, shared, self.globals, locals
)
- def make_module(self, vars=None, shared=False, locals=None):
+ def make_module(
+ self,
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "TemplateModule":
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
- return TemplateModule(self, self.new_context(vars, shared, locals))
+ ctx = self.new_context(vars, shared, locals)
+ return TemplateModule(self, ctx)
- def make_module_async(self, vars=None, shared=False, locals=None):
+ async def make_module_async(
+ self,
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "TemplateModule":
"""As template module creation can invoke template code for
asynchronous executions this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
"""
- # see asyncsupport for the actual implementation
- raise NotImplementedError(
- "This feature is not available for this version of Python"
+ ctx = self.new_context(vars, shared, locals)
+ return TemplateModule(
+ self, ctx, [x async for x in self.root_render_func(ctx)] # type: ignore
)
@internalcode
- def _get_default_module(self):
- if self._module is not None:
- return self._module
- self._module = rv = self.make_module()
- return rv
+ def _get_default_module(self, ctx: t.Optional[Context] = None) -> "TemplateModule":
+ """If a context is passed in, this means that the template was
+ imported. Imported templates have access to the current
+ template's globals by default, but they can only be accessed via
+ the context during runtime.
+
+ If there are new globals, we need to create a new module because
+ the cached module is already rendered and will not have access
+ to globals from the current context. This new module is not
+ cached because the template can be imported elsewhere, and it
+ should have access to only the current template's globals.
+ """
+ if self.environment.is_async:
+ raise RuntimeError("Module is not available in async mode.")
+
+ if ctx is not None:
+ keys = ctx.globals_keys - self.globals.keys()
+
+ if keys:
+ return self.make_module({k: ctx.parent[k] for k in keys})
+
+ if self._module is None:
+ self._module = self.make_module()
+
+ return self._module
+
+ async def _get_default_module_async(
+ self, ctx: t.Optional[Context] = None
+ ) -> "TemplateModule":
+ if ctx is not None:
+ keys = ctx.globals_keys - self.globals.keys()
+
+ if keys:
+ return await self.make_module_async({k: ctx.parent[k] for k in keys})
+
+ if self._module is None:
+ self._module = await self.make_module_async()
+
+ return self._module
@property
- def module(self):
+ def module(self) -> "TemplateModule":
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
@@ -1188,7 +1477,7 @@ class Template(object):
"""
return self._get_default_module()
- def get_corresponding_lineno(self, lineno):
+ def get_corresponding_lineno(self, lineno: int) -> int:
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
@@ -1198,100 +1487,113 @@ class Template(object):
return 1
@property
- def is_up_to_date(self):
+ def is_up_to_date(self) -> bool:
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
- def debug_info(self):
+ def debug_info(self) -> t.List[t.Tuple[int, int]]:
"""The debug info mapping."""
if self._debug_info:
- return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")]
+ return [
+ tuple(map(int, x.split("="))) # type: ignore
+ for x in self._debug_info.split("&")
+ ]
+
return []
- def __repr__(self):
+ def __repr__(self) -> str:
if self.name is None:
- name = "memory:%x" % id(self)
+ name = f"memory:{id(self):x}"
else:
name = repr(self.name)
- return "<%s %s>" % (self.__class__.__name__, name)
+ return f"<{type(self).__name__} {name}>"
-@implements_to_string
-class TemplateModule(object):
+class TemplateModule:
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
- converting it into an unicode- or bytestrings renders the contents.
+ converting it into a string renders the contents.
"""
- def __init__(self, template, context, body_stream=None):
+ def __init__(
+ self,
+ template: Template,
+ context: Context,
+ body_stream: t.Optional[t.Iterable[str]] = None,
+ ) -> None:
if body_stream is None:
if context.environment.is_async:
raise RuntimeError(
- "Async mode requires a body stream "
- "to be passed to a template module. Use "
- "the async methods of the API you are "
- "using."
+ "Async mode requires a body stream to be passed to"
+ " a template module. Use the async methods of the"
+ " API you are using."
)
- body_stream = list(template.root_render_func(context))
+
+ body_stream = list(template.root_render_func(context)) # type: ignore
+
self._body_stream = body_stream
self.__dict__.update(context.get_exported())
self.__name__ = template.name
- def __html__(self):
+ def __html__(self) -> Markup:
return Markup(concat(self._body_stream))
- def __str__(self):
+ def __str__(self) -> str:
return concat(self._body_stream)
- def __repr__(self):
+ def __repr__(self) -> str:
if self.__name__ is None:
- name = "memory:%x" % id(self)
+ name = f"memory:{id(self):x}"
else:
name = repr(self.__name__)
- return "<%s %s>" % (self.__class__.__name__, name)
+ return f"<{type(self).__name__} {name}>"
-class TemplateExpression(object):
+class TemplateExpression:
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
- def __init__(self, template, undefined_to_none):
+ def __init__(self, template: Template, undefined_to_none: bool) -> None:
self._template = template
self._undefined_to_none = undefined_to_none
- def __call__(self, *args, **kwargs):
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Optional[t.Any]:
context = self._template.new_context(dict(*args, **kwargs))
- consume(self._template.root_render_func(context))
+ consume(self._template.root_render_func(context)) # type: ignore
rv = context.vars["result"]
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
-@implements_iterator
-class TemplateStream(object):
+class TemplateStream:
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
- instruction in the template one unicode string is yielded.
+ instruction in the template one string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
- into a new unicode string. This is mainly useful if you are streaming
+ into a new string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
- def __init__(self, gen):
+ def __init__(self, gen: t.Iterator[str]) -> None:
self._gen = gen
self.disable_buffering()
- def dump(self, fp, encoding=None, errors="strict"):
+ def dump(
+ self,
+ fp: t.Union[str, t.IO],
+ encoding: t.Optional[str] = None,
+ errors: t.Optional[str] = "strict",
+ ) -> None:
"""Dump the complete stream into a file or file-like object.
- Per default unicode strings are written, if you want to encode
+ Per default strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
@@ -1299,16 +1601,19 @@ class TemplateStream(object):
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
- if isinstance(fp, string_types):
+
+ if isinstance(fp, str):
if encoding is None:
encoding = "utf-8"
+
fp = open(fp, "wb")
close = True
try:
if encoding is not None:
- iterable = (x.encode(encoding, errors) for x in self)
+ iterable = (x.encode(encoding, errors) for x in self) # type: ignore
else:
- iterable = self
+ iterable = self # type: ignore
+
if hasattr(fp, "writelines"):
fp.writelines(iterable)
else:
@@ -1318,17 +1623,17 @@ class TemplateStream(object):
if close:
fp.close()
- def disable_buffering(self):
+ def disable_buffering(self) -> None:
"""Disable the output buffering."""
self._next = partial(next, self._gen)
self.buffered = False
- def _buffered_generator(self, size):
- buf = []
+ def _buffered_generator(self, size: int) -> t.Iterator[str]:
+ buf: t.List[str] = []
c_size = 0
push = buf.append
- while 1:
+ while True:
try:
while c_size < size:
c = next(self._gen)
@@ -1342,7 +1647,7 @@ class TemplateStream(object):
del buf[:]
c_size = 0
- def enable_buffering(self, size=5):
+ def enable_buffering(self, size: int = 5) -> None:
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError("buffer size too small")
@@ -1350,11 +1655,11 @@ class TemplateStream(object):
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
- def __iter__(self):
+ def __iter__(self) -> "TemplateStream":
return self
- def __next__(self):
- return self._next()
+ def __next__(self) -> str:
+ return self._next() # type: ignore
# hook in default template class. if anyone reads this comment: ignore that
diff --git a/deps/v8/third_party/jinja2/exceptions.py b/deps/v8/third_party/jinja2/exceptions.py
index 0bf2003e30..082ebe8f22 100644
--- a/deps/v8/third_party/jinja2/exceptions.py
+++ b/deps/v8/third_party/jinja2/exceptions.py
@@ -1,44 +1,20 @@
-# -*- coding: utf-8 -*-
-from ._compat import imap
-from ._compat import implements_to_string
-from ._compat import PY2
-from ._compat import text_type
+import typing as t
+
+if t.TYPE_CHECKING:
+ from .runtime import Undefined
class TemplateError(Exception):
"""Baseclass for all template errors."""
- if PY2:
-
- def __init__(self, message=None):
- if message is not None:
- message = text_type(message).encode("utf-8")
- Exception.__init__(self, message)
-
- @property
- def message(self):
- if self.args:
- message = self.args[0]
- if message is not None:
- return message.decode("utf-8", "replace")
-
- def __unicode__(self):
- return self.message or u""
+ def __init__(self, message: t.Optional[str] = None) -> None:
+ super().__init__(message)
- else:
+ @property
+ def message(self) -> t.Optional[str]:
+ return self.args[0] if self.args else None
- def __init__(self, message=None):
- Exception.__init__(self, message)
- @property
- def message(self):
- if self.args:
- message = self.args[0]
- if message is not None:
- return message
-
-
-@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist.
@@ -47,11 +23,15 @@ class TemplateNotFound(IOError, LookupError, TemplateError):
provided, an :exc:`UndefinedError` is raised.
"""
- # looks weird, but removes the warning descriptor that just
- # bogusly warns us about message being deprecated
- message = None
+ # Silence the Python warning about message being deprecated since
+ # it's not valid here.
+ message: t.Optional[str] = None
- def __init__(self, name, message=None):
+ def __init__(
+ self,
+ name: t.Optional[t.Union[str, "Undefined"]],
+ message: t.Optional[str] = None,
+ ) -> None:
IOError.__init__(self, name)
if message is None:
@@ -66,8 +46,8 @@ class TemplateNotFound(IOError, LookupError, TemplateError):
self.name = name
self.templates = [name]
- def __str__(self):
- return self.message
+ def __str__(self) -> str:
+ return str(self.message)
class TemplatesNotFound(TemplateNotFound):
@@ -82,7 +62,11 @@ class TemplatesNotFound(TemplateNotFound):
.. versionadded:: 2.2
"""
- def __init__(self, names=(), message=None):
+ def __init__(
+ self,
+ names: t.Sequence[t.Union[str, "Undefined"]] = (),
+ message: t.Optional[str] = None,
+ ) -> None:
if message is None:
from .runtime import Undefined
@@ -94,52 +78,57 @@ class TemplatesNotFound(TemplateNotFound):
else:
parts.append(name)
- message = u"none of the templates given were found: " + u", ".join(
- imap(text_type, parts)
- )
- TemplateNotFound.__init__(self, names and names[-1] or None, message)
+ parts_str = ", ".join(map(str, parts))
+ message = f"none of the templates given were found: {parts_str}"
+
+ super().__init__(names[-1] if names else None, message)
self.templates = list(names)
-@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
- def __init__(self, message, lineno, name=None, filename=None):
- TemplateError.__init__(self, message)
+ def __init__(
+ self,
+ message: str,
+ lineno: int,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> None:
+ super().__init__(message)
self.lineno = lineno
self.name = name
self.filename = filename
- self.source = None
+ self.source: t.Optional[str] = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
- def __str__(self):
+ def __str__(self) -> str:
# for translated errors we only return the message
if self.translated:
- return self.message
+ return t.cast(str, self.message)
# otherwise attach some stuff
- location = "line %d" % self.lineno
+ location = f"line {self.lineno}"
name = self.filename or self.name
if name:
- location = 'File "%s", %s' % (name, location)
- lines = [self.message, " " + location]
+ location = f'File "{name}", {location}'
+ lines = [t.cast(str, self.message), " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
- line = None
- if line:
+ pass
+ else:
lines.append(" " + line.strip())
- return u"\n".join(lines)
+ return "\n".join(lines)
- def __reduce__(self):
+ def __reduce__(self): # type: ignore
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
diff --git a/deps/v8/third_party/jinja2/ext.py b/deps/v8/third_party/jinja2/ext.py
index 9141be4dac..d5550540cd 100644
--- a/deps/v8/third_party/jinja2/ext.py
+++ b/deps/v8/third_party/jinja2/ext.py
@@ -1,53 +1,57 @@
-# -*- coding: utf-8 -*-
"""Extension API for adding custom tags and behavior."""
import pprint
import re
-from sys import version_info
+import typing as t
from markupsafe import Markup
+from . import defaults
from . import nodes
-from ._compat import iteritems
-from ._compat import string_types
-from ._compat import with_metaclass
-from .defaults import BLOCK_END_STRING
-from .defaults import BLOCK_START_STRING
-from .defaults import COMMENT_END_STRING
-from .defaults import COMMENT_START_STRING
-from .defaults import KEEP_TRAILING_NEWLINE
-from .defaults import LINE_COMMENT_PREFIX
-from .defaults import LINE_STATEMENT_PREFIX
-from .defaults import LSTRIP_BLOCKS
-from .defaults import NEWLINE_SEQUENCE
-from .defaults import TRIM_BLOCKS
-from .defaults import VARIABLE_END_STRING
-from .defaults import VARIABLE_START_STRING
from .environment import Environment
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
-from .nodes import ContextReference
-from .runtime import concat
-from .utils import contextfunction
+from .runtime import concat # type: ignore
+from .runtime import Context
+from .runtime import Undefined
from .utils import import_string
+from .utils import pass_context
-# the only real useful gettext functions for a Jinja template. Note
-# that ugettext must be assigned to gettext as Jinja doesn't support
-# non unicode strings.
-GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .lexer import Token
+ from .lexer import TokenStream
+ from .parser import Parser
-_ws_re = re.compile(r"\s*\n\s*")
+ class _TranslationsBasic(te.Protocol):
+ def gettext(self, message: str) -> str:
+ ...
+ def ngettext(self, singular: str, plural: str, n: int) -> str:
+ pass
-class ExtensionRegistry(type):
- """Gives the extension an unique identifier."""
+ class _TranslationsContext(_TranslationsBasic):
+ def pgettext(self, context: str, message: str) -> str:
+ ...
+
+ def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
+ ...
+
+ _SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
- def __new__(mcs, name, bases, d):
- rv = type.__new__(mcs, name, bases, d)
- rv.identifier = rv.__module__ + "." + rv.__name__
- return rv
+
+# I18N functions available in Jinja templates. If the I18N library
+# provides ugettext, it will be assigned to gettext.
+GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
+ "_",
+ "gettext",
+ "ngettext",
+ "pgettext",
+ "npgettext",
+)
+_ws_re = re.compile(r"\s*\n\s*")
-class Extension(with_metaclass(ExtensionRegistry, object)):
+class Extension:
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
@@ -66,8 +70,13 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
name as includes the name of the extension (fragment cache).
"""
+ identifier: t.ClassVar[str]
+
+ def __init_subclass__(cls) -> None:
+ cls.identifier = f"{cls.__module__}.{cls.__name__}"
+
#: if this extension parses this is the list of tags it's listening to.
- tags = set()
+ tags: t.Set[str] = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
@@ -76,24 +85,28 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
#: .. versionadded:: 2.4
priority = 100
- def __init__(self, environment):
+ def __init__(self, environment: Environment) -> None:
self.environment = environment
- def bind(self, environment):
+ def bind(self, environment: Environment) -> "Extension":
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
- def preprocess(self, source, name, filename=None):
+ def preprocess(
+ self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
+ ) -> str:
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
- def filter_stream(self, stream):
+ def filter_stream(
+ self, stream: "TokenStream"
+ ) -> t.Union["TokenStream", t.Iterable["Token"]]:
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
@@ -101,7 +114,7 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
"""
return stream
- def parse(self, parser):
+ def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
@@ -109,7 +122,9 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
"""
raise NotImplementedError()
- def attr(self, name, lineno=None):
+ def attr(
+ self, name: str, lineno: t.Optional[int] = None
+ ) -> nodes.ExtensionAttribute:
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
@@ -120,8 +135,14 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(
- self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
- ):
+ self,
+ name: str,
+ args: t.Optional[t.List[nodes.Expr]] = None,
+ kwargs: t.Optional[t.List[nodes.Keyword]] = None,
+ dyn_args: t.Optional[nodes.Expr] = None,
+ dyn_kwargs: t.Optional[nodes.Expr] = None,
+ lineno: t.Optional[int] = None,
+ ) -> nodes.Call:
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
@@ -139,38 +160,88 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
)
-@contextfunction
-def _gettext_alias(__context, *args, **kwargs):
+@pass_context
+def _gettext_alias(
+ __context: Context, *args: t.Any, **kwargs: t.Any
+) -> t.Union[t.Any, Undefined]:
return __context.call(__context.resolve("gettext"), *args, **kwargs)
-def _make_new_gettext(func):
- @contextfunction
- def gettext(__context, __string, **variables):
+def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
+ @pass_context
+ def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, even if there are no
# variables. This makes translation strings more consistent
# and predictable. This requires escaping
- return rv % variables
+ return rv % variables # type: ignore
return gettext
-def _make_new_ngettext(func):
- @contextfunction
- def ngettext(__context, __singular, __plural, __num, **variables):
+def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
+ @pass_context
+ def ngettext(
+ __context: Context,
+ __singular: str,
+ __plural: str,
+ __num: int,
+ **variables: t.Any,
+ ) -> str:
variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
- return rv % variables
+ return rv % variables # type: ignore
return ngettext
+def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
+ @pass_context
+ def pgettext(
+ __context: Context, __string_ctx: str, __string: str, **variables: t.Any
+ ) -> str:
+ variables.setdefault("context", __string_ctx)
+ rv = __context.call(func, __string_ctx, __string)
+
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return pgettext
+
+
+def _make_new_npgettext(
+ func: t.Callable[[str, str, str, int], str]
+) -> t.Callable[..., str]:
+ @pass_context
+ def npgettext(
+ __context: Context,
+ __string_ctx: str,
+ __singular: str,
+ __plural: str,
+ __num: int,
+ **variables: t.Any,
+ ) -> str:
+ variables.setdefault("context", __string_ctx)
+ variables.setdefault("num", __num)
+ rv = __context.call(func, __string_ctx, __singular, __plural, __num)
+
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return npgettext
+
+
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja."""
@@ -183,8 +254,8 @@ class InternationalizationExtension(Extension):
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
- def __init__(self, environment):
- Extension.__init__(self, environment)
+ def __init__(self, environment: Environment) -> None:
+ super().__init__(environment)
environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
@@ -195,48 +266,108 @@ class InternationalizationExtension(Extension):
newstyle_gettext=False,
)
- def _install(self, translations, newstyle=None):
+ def _install(
+ self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
+ ) -> None:
+ # ugettext and ungettext are preferred in case the I18N library
+ # is providing compatibility with older Python versions.
gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
- self._install_callables(gettext, ngettext, newstyle)
- def _install_null(self, newstyle=None):
+ pgettext = getattr(translations, "pgettext", None)
+ npgettext = getattr(translations, "npgettext", None)
self._install_callables(
- lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
+ gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
)
- def _install_callables(self, gettext, ngettext, newstyle=None):
+ def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
+ import gettext
+
+ translations = gettext.NullTranslations()
+
+ if hasattr(translations, "pgettext"):
+ # Python < 3.8
+ pgettext = translations.pgettext # type: ignore
+ else:
+
+ def pgettext(c: str, s: str) -> str:
+ return s
+
+ if hasattr(translations, "npgettext"):
+ npgettext = translations.npgettext # type: ignore
+ else:
+
+ def npgettext(c: str, s: str, p: str, n: int) -> str:
+ return s if n == 1 else p
+
+ self._install_callables(
+ gettext=translations.gettext,
+ ngettext=translations.ngettext,
+ newstyle=newstyle,
+ pgettext=pgettext,
+ npgettext=npgettext,
+ )
+
+ def _install_callables(
+ self,
+ gettext: t.Callable[[str], str],
+ ngettext: t.Callable[[str, str, int], str],
+ newstyle: t.Optional[bool] = None,
+ pgettext: t.Optional[t.Callable[[str, str], str]] = None,
+ npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
+ ) -> None:
if newstyle is not None:
- self.environment.newstyle_gettext = newstyle
- if self.environment.newstyle_gettext:
+ self.environment.newstyle_gettext = newstyle # type: ignore
+ if self.environment.newstyle_gettext: # type: ignore
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(gettext=gettext, ngettext=ngettext)
- def _uninstall(self, translations):
- for key in "gettext", "ngettext":
+ if pgettext is not None:
+ pgettext = _make_new_pgettext(pgettext)
+
+ if npgettext is not None:
+ npgettext = _make_new_npgettext(npgettext)
+
+ self.environment.globals.update(
+ gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
+ )
+
+ def _uninstall(self, translations: "_SupportedTranslations") -> None:
+ for key in ("gettext", "ngettext", "pgettext", "npgettext"):
self.environment.globals.pop(key, None)
- def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
- if isinstance(source, string_types):
+ def _extract(
+ self,
+ source: t.Union[str, nodes.Template],
+ gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
+ ) -> t.Iterator[
+ t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
+ ]:
+ if isinstance(source, str):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
- def parse(self, parser):
+ def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
- num_called_num = False
+
+ context = None
+ context_token = parser.stream.next_if("string")
+
+ if context_token is not None:
+ context = context_token.value
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
- plural_expr = None
- plural_expr_assignment = None
- variables = {}
+ plural_expr: t.Optional[nodes.Expr] = None
+ plural_expr_assignment: t.Optional[nodes.Assign] = None
+ num_called_num = False
+ variables: t.Dict[str, nodes.Expr] = {}
trimmed = None
while parser.stream.current.type != "block_end":
if variables:
@@ -246,34 +377,34 @@ class InternationalizationExtension(Extension):
if parser.stream.skip_if("colon"):
break
- name = parser.stream.expect("name")
- if name.value in variables:
+ token = parser.stream.expect("name")
+ if token.value in variables:
parser.fail(
- "translatable variable %r defined twice." % name.value,
- name.lineno,
+ f"translatable variable {token.value!r} defined twice.",
+ token.lineno,
exc=TemplateAssertionError,
)
# expressions
if parser.stream.current.type == "assign":
next(parser.stream)
- variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ("trimmed", "notrimmed"):
- trimmed = name.value == "trimmed"
+ variables[token.value] = var = parser.parse_expression()
+ elif trimmed is None and token.value in ("trimmed", "notrimmed"):
+ trimmed = token.value == "trimmed"
continue
else:
- variables[name.value] = var = nodes.Name(name.value, "load")
+ variables[token.value] = var = nodes.Name(token.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name("_trans", "load")
- variables[name.value] = plural_expr
+ variables[token.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name("_trans", "store"), var
)
else:
plural_expr = var
- num_called_num = name.value == "num"
+ num_called_num = token.value == "num"
parser.stream.expect("block_end")
@@ -294,15 +425,15 @@ class InternationalizationExtension(Extension):
have_plural = True
next(parser.stream)
if parser.stream.current.type != "block_end":
- name = parser.stream.expect("name")
- if name.value not in variables:
+ token = parser.stream.expect("name")
+ if token.value not in variables:
parser.fail(
- "unknown variable %r for pluralization" % name.value,
- name.lineno,
+ f"unknown variable {token.value!r} for pluralization",
+ token.lineno,
exc=TemplateAssertionError,
)
- plural_expr = variables[name.value]
- num_called_num = name.value == "num"
+ plural_expr = variables[token.value]
+ num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
@@ -311,9 +442,9 @@ class InternationalizationExtension(Extension):
next(parser.stream)
# register free names as simple name expressions
- for var in referenced:
- if var not in variables:
- variables[var] = nodes.Name(var, "load")
+ for name in referenced:
+ if name not in variables:
+ variables[name] = nodes.Name(name, "load")
if not have_plural:
plural_expr = None
@@ -330,6 +461,7 @@ class InternationalizationExtension(Extension):
node = self._make_node(
singular,
plural,
+ context,
variables,
plural_expr,
bool(referenced),
@@ -341,14 +473,17 @@ class InternationalizationExtension(Extension):
else:
return node
- def _trim_whitespace(self, string, _ws_re=_ws_re):
+ def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
return _ws_re.sub(" ", string.strip())
- def _parse_block(self, parser, allow_pluralize):
+ def _parse_block(
+ self, parser: "Parser", allow_pluralize: bool
+ ) -> t.Tuple[t.List[str], str]:
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
- while 1:
+
+ while True:
if parser.stream.current.type == "data":
buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
@@ -356,7 +491,7 @@ class InternationalizationExtension(Extension):
next(parser.stream)
name = parser.stream.expect("name").value
referenced.append(name)
- buf.append("%%(%s)s" % name)
+ buf.append(f"%({name})s")
parser.stream.expect("variable_end")
elif parser.stream.current.type == "block_begin":
next(parser.stream)
@@ -379,37 +514,44 @@ class InternationalizationExtension(Extension):
return referenced, concat(buf)
def _make_node(
- self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
- ):
+ self,
+ singular: str,
+ plural: t.Optional[str],
+ context: t.Optional[str],
+ variables: t.Dict[str, nodes.Expr],
+ plural_expr: t.Optional[nodes.Expr],
+ vars_referenced: bool,
+ num_called_num: bool,
+ ) -> nodes.Output:
"""Generates a useful node from the data provided."""
+ newstyle = self.environment.newstyle_gettext # type: ignore
+ node: nodes.Expr
+
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
- if not vars_referenced and not self.environment.newstyle_gettext:
+ if not vars_referenced and not newstyle:
singular = singular.replace("%%", "%")
if plural:
plural = plural.replace("%%", "%")
- # singular only:
- if plural_expr is None:
- gettext = nodes.Name("gettext", "load")
- node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
+ func_name = "gettext"
+ func_args: t.List[nodes.Expr] = [nodes.Const(singular)]
- # singular and plural
- else:
- ngettext = nodes.Name("ngettext", "load")
- node = nodes.Call(
- ngettext,
- [nodes.Const(singular), nodes.Const(plural), plural_expr],
- [],
- None,
- None,
- )
+ if context is not None:
+ func_args.insert(0, nodes.Const(context))
+ func_name = f"p{func_name}"
+
+ if plural_expr is not None:
+ func_name = f"n{func_name}"
+ func_args.extend((nodes.Const(plural), plural_expr))
+
+ node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
- if self.environment.newstyle_gettext:
- for key, value in iteritems(variables):
+ if newstyle:
+ for key, value in variables.items():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == "num":
@@ -439,9 +581,9 @@ class ExprStmtExtension(Extension):
that it doesn't print the return value.
"""
- tags = set(["do"])
+ tags = {"do"}
- def parse(self, parser):
+ def parse(self, parser: "Parser") -> nodes.ExprStmt:
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
@@ -450,23 +592,15 @@ class ExprStmtExtension(Extension):
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
- tags = set(["break", "continue"])
+ tags = {"break", "continue"}
- def parse(self, parser):
+ def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
token = next(parser.stream)
if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
-class WithExtension(Extension):
- pass
-
-
-class AutoEscapeExtension(Extension):
- pass
-
-
class DebugExtension(Extension):
"""A ``{% debug %}`` tag that dumps the available variables,
filters, and tests.
@@ -490,13 +624,13 @@ class DebugExtension(Extension):
tags = {"debug"}
- def parse(self, parser):
+ def parse(self, parser: "Parser") -> nodes.Output:
lineno = parser.stream.expect("name:debug").lineno
- context = ContextReference()
+ context = nodes.ContextReference()
result = self.call_method("_render", [context], lineno=lineno)
return nodes.Output([result], lineno=lineno)
- def _render(self, context):
+ def _render(self, context: Context) -> str:
result = {
"context": context.get_all(),
"filters": sorted(self.environment.filters.keys()),
@@ -504,13 +638,16 @@ class DebugExtension(Extension):
}
# Set the depth since the intent is to show the top few names.
- if version_info[:2] >= (3, 4):
- return pprint.pformat(result, depth=3, compact=True)
- else:
- return pprint.pformat(result, depth=3)
+ return pprint.pformat(result, depth=3, compact=True)
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
+def extract_from_ast(
+ ast: nodes.Template,
+ gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
+ babel_style: bool = True,
+) -> t.Iterator[
+ t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
+]:
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
@@ -538,23 +675,26 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
- * ``message`` is the string itself (a ``unicode`` object, or a tuple
- of ``unicode`` objects for functions with multiple string arguments).
+ * ``message`` is the string, or a tuple of strings for functions
+ with multiple string arguments.
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
- for node in node.find_all(nodes.Call):
+ out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
+
+ for node in ast.find_all(nodes.Call):
if (
not isinstance(node.node, nodes.Name)
or node.node.name not in gettext_functions
):
continue
- strings = []
+ strings: t.List[t.Optional[str]] = []
+
for arg in node.args:
- if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
strings.append(arg.value)
else:
strings.append(None)
@@ -567,31 +707,35 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True
strings.append(None)
if not babel_style:
- strings = tuple(x for x in strings if x is not None)
- if not strings:
+ out = tuple(x for x in strings if x is not None)
+
+ if not out:
continue
else:
if len(strings) == 1:
- strings = strings[0]
+ out = strings[0]
else:
- strings = tuple(strings)
- yield node.lineno, node.node.name, strings
+ out = tuple(strings)
+ yield node.lineno, node.node.name, out
-class _CommentFinder(object):
+
+class _CommentFinder:
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
- def __init__(self, tokens, comment_tags):
+ def __init__(
+ self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
+ ) -> None:
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
- def find_backwards(self, offset):
+ def find_backwards(self, offset: int) -> t.List[str]:
try:
for _, token_type, token_value in reversed(
self.tokens[self.offset : offset]
@@ -607,7 +751,7 @@ class _CommentFinder(object):
finally:
self.offset = offset
- def find_comments(self, lineno):
+ def find_comments(self, lineno: int) -> t.List[str]:
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
@@ -616,7 +760,16 @@ class _CommentFinder(object):
return self.find_backwards(len(self.tokens))
-def babel_extract(fileobj, keywords, comment_tags, options):
+def babel_extract(
+ fileobj: t.BinaryIO,
+ keywords: t.Sequence[str],
+ comment_tags: t.Sequence[str],
+ options: t.Dict[str, t.Any],
+) -> t.Iterator[
+ t.Tuple[
+ int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
+ ]
+]:
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
@@ -644,33 +797,37 @@ def babel_extract(fileobj, keywords, comment_tags, options):
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
- extensions = set()
- for extension in options.get("extensions", "").split(","):
- extension = extension.strip()
- if not extension:
+ extensions: t.Dict[t.Type[Extension], None] = {}
+
+ for extension_name in options.get("extensions", "").split(","):
+ extension_name = extension_name.strip()
+
+ if not extension_name:
continue
- extensions.add(import_string(extension))
+
+ extensions[import_string(extension_name)] = None
+
if InternationalizationExtension not in extensions:
- extensions.add(InternationalizationExtension)
+ extensions[InternationalizationExtension] = None
- def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
+ def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
+ return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
silent = getbool(options, "silent", True)
environment = Environment(
- options.get("block_start_string", BLOCK_START_STRING),
- options.get("block_end_string", BLOCK_END_STRING),
- options.get("variable_start_string", VARIABLE_START_STRING),
- options.get("variable_end_string", VARIABLE_END_STRING),
- options.get("comment_start_string", COMMENT_START_STRING),
- options.get("comment_end_string", COMMENT_END_STRING),
- options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
- options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
- getbool(options, "trim_blocks", TRIM_BLOCKS),
- getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
- NEWLINE_SEQUENCE,
- getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
- frozenset(extensions),
+ options.get("block_start_string", defaults.BLOCK_START_STRING),
+ options.get("block_end_string", defaults.BLOCK_END_STRING),
+ options.get("variable_start_string", defaults.VARIABLE_START_STRING),
+ options.get("variable_end_string", defaults.VARIABLE_END_STRING),
+ options.get("comment_start_string", defaults.COMMENT_START_STRING),
+ options.get("comment_end_string", defaults.COMMENT_END_STRING),
+ options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
+ defaults.NEWLINE_SEQUENCE,
+ getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
+ tuple(extensions),
cache_size=0,
auto_reload=False,
)
@@ -678,7 +835,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
if getbool(options, "trimmed"):
environment.policies["ext.i18n.trimmed"] = True
if getbool(options, "newstyle_gettext"):
- environment.newstyle_gettext = True
+ environment.newstyle_gettext = True # type: ignore
source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
@@ -699,6 +856,4 @@ def babel_extract(fileobj, keywords, comment_tags, options):
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
-with_ = WithExtension
-autoescape = AutoEscapeExtension
debug = DebugExtension
diff --git a/deps/v8/third_party/jinja2/filters.py b/deps/v8/third_party/jinja2/filters.py
index 74b108dcec..ed07c4c0e2 100644
--- a/deps/v8/third_party/jinja2/filters.py
+++ b/deps/v8/third_party/jinja2/filters.py
@@ -1,79 +1,75 @@
-# -*- coding: utf-8 -*-
"""Built-in template filters used with the ``|`` operator."""
import math
import random
import re
-import warnings
-from collections import namedtuple
+import typing
+import typing as t
+from collections import abc
from itertools import chain
from itertools import groupby
from markupsafe import escape
from markupsafe import Markup
-from markupsafe import soft_unicode
+from markupsafe import soft_str
-from ._compat import abc
-from ._compat import imap
-from ._compat import iteritems
-from ._compat import string_types
-from ._compat import text_type
+from .async_utils import async_variant
+from .async_utils import auto_aiter
+from .async_utils import auto_await
+from .async_utils import auto_to_list
from .exceptions import FilterArgumentError
from .runtime import Undefined
from .utils import htmlsafe_json_dumps
+from .utils import pass_context
+from .utils import pass_environment
+from .utils import pass_eval_context
from .utils import pformat
-from .utils import unicode_urlencode
+from .utils import url_quote
from .utils import urlize
-_word_re = re.compile(r"\w+", re.UNICODE)
-_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE)
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+ from .nodes import EvalContext
+ from .runtime import Context
+ from .sandbox import SandboxedEnvironment # noqa: F401
-
-def contextfilter(f):
- """Decorator for marking context dependent filters. The current
- :class:`Context` will be passed as first argument.
- """
- f.contextfilter = True
- return f
-
-
-def evalcontextfilter(f):
- """Decorator for marking eval-context dependent filters. An eval
- context object is passed as first argument. For more information
- about the eval context, see :ref:`eval-context`.
-
- .. versionadded:: 2.4
- """
- f.evalcontextfilter = True
- return f
+ class HasHTML(te.Protocol):
+ def __html__(self) -> str:
+ pass
-def environmentfilter(f):
- """Decorator for marking environment dependent filters. The current
- :class:`Environment` is passed to the filter as first argument.
- """
- f.environmentfilter = True
- return f
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+K = t.TypeVar("K")
+V = t.TypeVar("V")
-def ignore_case(value):
+def ignore_case(value: V) -> V:
"""For use as a postprocessor for :func:`make_attrgetter`. Converts strings
to lowercase and returns other types as-is."""
- return value.lower() if isinstance(value, string_types) else value
+ if isinstance(value, str):
+ return t.cast(V, value.lower())
+ return value
-def make_attrgetter(environment, attribute, postprocess=None, default=None):
+
+def make_attrgetter(
+ environment: "Environment",
+ attribute: t.Optional[t.Union[str, int]],
+ postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
+ default: t.Optional[t.Any] = None,
+) -> t.Callable[[t.Any], t.Any]:
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
- attribute = _prepare_attribute_parts(attribute)
+ parts = _prepare_attribute_parts(attribute)
- def attrgetter(item):
- for part in attribute:
+ def attrgetter(item: t.Any) -> t.Any:
+ for part in parts:
item = environment.getitem(item, part)
- if default and isinstance(item, Undefined):
+ if default is not None and isinstance(item, Undefined):
item = default
if postprocess is not None:
@@ -84,7 +80,11 @@ def make_attrgetter(environment, attribute, postprocess=None, default=None):
return attrgetter
-def make_multi_attrgetter(environment, attribute, postprocess=None):
+def make_multi_attrgetter(
+ environment: "Environment",
+ attribute: t.Optional[t.Union[str, int]],
+ postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
+) -> t.Callable[[t.Any], t.List[t.Any]]:
"""Returns a callable that looks up the given comma separated
attributes from a passed object with the rules of the environment.
Dots are allowed to access attributes of each attribute. Integer
@@ -95,17 +95,19 @@ def make_multi_attrgetter(environment, attribute, postprocess=None):
Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
"""
- attribute_parts = (
- attribute.split(",") if isinstance(attribute, string_types) else [attribute]
- )
- attribute = [
- _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
- ]
+ if isinstance(attribute, str):
+ split: t.Sequence[t.Union[str, int, None]] = attribute.split(",")
+ else:
+ split = [attribute]
+
+ parts = [_prepare_attribute_parts(item) for item in split]
- def attrgetter(item):
- items = [None] * len(attribute)
- for i, attribute_part in enumerate(attribute):
+ def attrgetter(item: t.Any) -> t.List[t.Any]:
+ items = [None] * len(parts)
+
+ for i, attribute_part in enumerate(parts):
item_i = item
+
for part in attribute_part:
item_i = environment.getitem(item_i, part)
@@ -113,28 +115,35 @@ def make_multi_attrgetter(environment, attribute, postprocess=None):
item_i = postprocess(item_i)
items[i] = item_i
+
return items
return attrgetter
-def _prepare_attribute_parts(attr):
+def _prepare_attribute_parts(
+ attr: t.Optional[t.Union[str, int]]
+) -> t.List[t.Union[str, int]]:
if attr is None:
return []
- elif isinstance(attr, string_types):
+
+ if isinstance(attr, str):
return [int(x) if x.isdigit() else x for x in attr.split(".")]
- else:
- return [attr]
+ return [attr]
-def do_forceescape(value):
+
+def do_forceescape(value: "t.Union[str, HasHTML]") -> Markup:
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, "__html__"):
- value = value.__html__()
- return escape(text_type(value))
+ value = t.cast("HasHTML", value).__html__()
+
+ return escape(str(value))
-def do_urlencode(value):
+def do_urlencode(
+ value: t.Union[str, t.Mapping[str, t.Any], t.Iterable[t.Tuple[str, t.Any]]]
+) -> str:
"""Quote data for use in a URL path or query using UTF-8.
Basic wrapper around :func:`urllib.parse.quote` when given a
@@ -150,22 +159,23 @@ def do_urlencode(value):
.. versionadded:: 2.7
"""
- if isinstance(value, string_types) or not isinstance(value, abc.Iterable):
- return unicode_urlencode(value)
+ if isinstance(value, str) or not isinstance(value, abc.Iterable):
+ return url_quote(value)
if isinstance(value, dict):
- items = iteritems(value)
+ items: t.Iterable[t.Tuple[str, t.Any]] = value.items()
else:
- items = iter(value)
+ items = value # type: ignore
- return u"&".join(
- "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True))
- for k, v in items
+ return "&".join(
+ f"{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}" for k, v in items
)
-@evalcontextfilter
-def do_replace(eval_ctx, s, old, new, count=None):
+@pass_eval_context
+def do_replace(
+ eval_ctx: "EvalContext", s: str, old: str, new: str, count: t.Optional[int] = None
+) -> str:
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
@@ -182,8 +192,10 @@ def do_replace(eval_ctx, s, old, new, count=None):
"""
if count is None:
count = -1
+
if not eval_ctx.autoescape:
- return text_type(s).replace(text_type(old), text_type(new), count)
+ return str(s).replace(str(old), str(new), count)
+
if (
hasattr(old, "__html__")
or hasattr(new, "__html__")
@@ -191,22 +203,55 @@ def do_replace(eval_ctx, s, old, new, count=None):
):
s = escape(s)
else:
- s = soft_unicode(s)
- return s.replace(soft_unicode(old), soft_unicode(new), count)
+ s = soft_str(s)
+
+ return s.replace(soft_str(old), soft_str(new), count)
-def do_upper(s):
+def do_upper(s: str) -> str:
"""Convert a value to uppercase."""
- return soft_unicode(s).upper()
+ return soft_str(s).upper()
-def do_lower(s):
+def do_lower(s: str) -> str:
"""Convert a value to lowercase."""
- return soft_unicode(s).lower()
+ return soft_str(s).lower()
+
+
+def do_items(value: t.Union[t.Mapping[K, V], Undefined]) -> t.Iterator[t.Tuple[K, V]]:
+ """Return an iterator over the ``(key, value)`` items of a mapping.
+
+ ``x|items`` is the same as ``x.items()``, except if ``x`` is
+ undefined an empty iterator is returned.
+
+ This filter is useful if you expect the template to be rendered with
+ an implementation of Jinja in another programming language that does
+ not have a ``.items()`` method on its mapping type.
+
+ .. code-block:: html+jinja
+
+ <dl>
+ {% for key, value in my_dict|items %}
+ <dt>{{ key }}
+ <dd>{{ value }}
+ {% endfor %}
+ </dl>
+ .. versionadded:: 3.1
+ """
+ if isinstance(value, Undefined):
+ return
+
+ if not isinstance(value, abc.Mapping):
+ raise TypeError("Can only get item pairs from a mapping.")
+
+ yield from value.items()
-@evalcontextfilter
-def do_xmlattr(_eval_ctx, d, autospace=True):
+
+@pass_eval_context
+def do_xmlattr(
+ eval_ctx: "EvalContext", d: t.Mapping[str, t.Any], autospace: bool = True
+) -> str:
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
@@ -229,42 +274,52 @@ def do_xmlattr(_eval_ctx, d, autospace=True):
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
- rv = u" ".join(
- u'%s="%s"' % (escape(key), escape(value))
- for key, value in iteritems(d)
+ rv = " ".join(
+ f'{escape(key)}="{escape(value)}"'
+ for key, value in d.items()
if value is not None and not isinstance(value, Undefined)
)
+
if autospace and rv:
- rv = u" " + rv
- if _eval_ctx.autoescape:
+ rv = " " + rv
+
+ if eval_ctx.autoescape:
rv = Markup(rv)
+
return rv
-def do_capitalize(s):
+def do_capitalize(s: str) -> str:
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
- return soft_unicode(s).capitalize()
+ return soft_str(s).capitalize()
+
+
+_word_beginning_split_re = re.compile(r"([-\s({\[<]+)")
-def do_title(s):
+def do_title(s: str) -> str:
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
return "".join(
[
item[0].upper() + item[1:].lower()
- for item in _word_beginning_split_re.split(soft_unicode(s))
+ for item in _word_beginning_split_re.split(soft_str(s))
if item
]
)
-def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
- """Sort a dict and yield (key, value) pairs. Because python dicts are
- unsorted you may want to use this function to order them by either
- key or value:
+def do_dictsort(
+ value: t.Mapping[K, V],
+ case_sensitive: bool = False,
+ by: 'te.Literal["key", "value"]' = "key",
+ reverse: bool = False,
+) -> t.List[t.Tuple[K, V]]:
+ """Sort a dict and yield (key, value) pairs. Python dicts may not
+ be in the order you want to display them in, so sort them first.
.. sourcecode:: jinja
@@ -287,7 +342,7 @@ def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
- def sort_func(item):
+ def sort_func(item: t.Tuple[t.Any, t.Any]) -> t.Any:
value = item[pos]
if not case_sensitive:
@@ -298,8 +353,14 @@ def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
return sorted(value.items(), key=sort_func, reverse=reverse)
-@environmentfilter
-def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
+@pass_environment
+def do_sort(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ reverse: bool = False,
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.List[V]":
"""Sort an iterable using Python's :func:`sorted`.
.. sourcecode:: jinja
@@ -331,7 +392,7 @@ def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=N
.. sourcecode:: jinja
- {% for user users|sort(attribute="age,name") %}
+ {% for user in users|sort(attribute="age,name") %}
...
{% endfor %}
@@ -348,8 +409,13 @@ def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=N
return sorted(value, key=key_func, reverse=reverse)
-@environmentfilter
-def do_unique(environment, value, case_sensitive=False, attribute=None):
+@pass_environment
+def do_unique(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.Iterator[V]":
"""Returns a list of unique items from the given iterable.
.. sourcecode:: jinja
@@ -376,7 +442,13 @@ def do_unique(environment, value, case_sensitive=False, attribute=None):
yield item
-def _min_or_max(environment, value, func, case_sensitive, attribute):
+def _min_or_max(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ func: "t.Callable[..., V]",
+ case_sensitive: bool,
+ attribute: t.Optional[t.Union[str, int]],
+) -> "t.Union[V, Undefined]":
it = iter(value)
try:
@@ -390,8 +462,13 @@ def _min_or_max(environment, value, func, case_sensitive, attribute):
return func(chain([first], it), key=key_func)
-@environmentfilter
-def do_min(environment, value, case_sensitive=False, attribute=None):
+@pass_environment
+def do_min(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.Union[V, Undefined]":
"""Return the smallest item from the sequence.
.. sourcecode:: jinja
@@ -405,8 +482,13 @@ def do_min(environment, value, case_sensitive=False, attribute=None):
return _min_or_max(environment, value, min, case_sensitive, attribute)
-@environmentfilter
-def do_max(environment, value, case_sensitive=False, attribute=None):
+@pass_environment
+def do_max(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.Union[V, Undefined]":
"""Return the largest item from the sequence.
.. sourcecode:: jinja
@@ -420,7 +502,11 @@ def do_max(environment, value, case_sensitive=False, attribute=None):
return _min_or_max(environment, value, max, case_sensitive, attribute)
-def do_default(value, default_value=u"", boolean=False):
+def do_default(
+ value: V,
+ default_value: V = "", # type: ignore
+ boolean: bool = False,
+) -> V:
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
@@ -445,11 +531,17 @@ def do_default(value, default_value=u"", boolean=False):
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
+
return value
-@evalcontextfilter
-def do_join(eval_ctx, value, d=u"", attribute=None):
+@pass_eval_context
+def sync_do_join(
+ eval_ctx: "EvalContext",
+ value: t.Iterable,
+ d: str = "",
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> str:
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
@@ -472,39 +564,54 @@ def do_join(eval_ctx, value, d=u"", attribute=None):
The `attribute` parameter was added.
"""
if attribute is not None:
- value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
+ value = map(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot easier then
if not eval_ctx.autoescape:
- return text_type(d).join(imap(text_type, value))
+ return str(d).join(map(str, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, "__html__"):
value = list(value)
do_escape = False
+
for idx, item in enumerate(value):
if hasattr(item, "__html__"):
do_escape = True
else:
- value[idx] = text_type(item)
+ value[idx] = str(item)
+
if do_escape:
d = escape(d)
else:
- d = text_type(d)
+ d = str(d)
+
return d.join(value)
# no html involved, to normal joining
- return soft_unicode(d).join(imap(soft_unicode, value))
+ return soft_str(d).join(map(soft_str, value))
+
+@async_variant(sync_do_join) # type: ignore
+async def do_join(
+ eval_ctx: "EvalContext",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ d: str = "",
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> str:
+ return sync_do_join(eval_ctx, await auto_to_list(value), d, attribute)
-def do_center(value, width=80):
+
+def do_center(value: str, width: int = 80) -> str:
"""Centers the value in a field of a given width."""
- return text_type(value).center(width)
+ return soft_str(value).center(width)
-@environmentfilter
-def do_first(environment, seq):
+@pass_environment
+def sync_do_first(
+ environment: "Environment", seq: "t.Iterable[V]"
+) -> "t.Union[V, Undefined]":
"""Return the first item of a sequence."""
try:
return next(iter(seq))
@@ -512,10 +619,21 @@ def do_first(environment, seq):
return environment.undefined("No first item, sequence was empty.")
-@environmentfilter
-def do_last(environment, seq):
- """
- Return the last item of a sequence.
+@async_variant(sync_do_first) # type: ignore
+async def do_first(
+ environment: "Environment", seq: "t.Union[t.AsyncIterable[V], t.Iterable[V]]"
+) -> "t.Union[V, Undefined]":
+ try:
+ return await auto_aiter(seq).__anext__()
+ except StopAsyncIteration:
+ return environment.undefined("No first item, sequence was empty.")
+
+
+@pass_environment
+def do_last(
+ environment: "Environment", seq: "t.Reversible[V]"
+) -> "t.Union[V, Undefined]":
+ """Return the last item of a sequence.
Note: Does not work with generators. You may want to explicitly
convert it to a list:
@@ -530,8 +648,11 @@ def do_last(environment, seq):
return environment.undefined("No last item, sequence was empty.")
-@contextfilter
-def do_random(context, seq):
+# No async do_last, it may not be safe in async mode.
+
+
+@pass_context
+def do_random(context: "Context", seq: "t.Sequence[V]") -> "t.Union[V, Undefined]":
"""Return a random item from the sequence."""
try:
return random.choice(seq)
@@ -539,108 +660,151 @@ def do_random(context, seq):
return context.environment.undefined("No random item, sequence was empty.")
-def do_filesizeformat(value, binary=False):
+def do_filesizeformat(value: t.Union[str, float, int], binary: bool = False) -> str:
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
- base = binary and 1024 or 1000
+ base = 1024 if binary else 1000
prefixes = [
- (binary and "KiB" or "kB"),
- (binary and "MiB" or "MB"),
- (binary and "GiB" or "GB"),
- (binary and "TiB" or "TB"),
- (binary and "PiB" or "PB"),
- (binary and "EiB" or "EB"),
- (binary and "ZiB" or "ZB"),
- (binary and "YiB" or "YB"),
+ ("KiB" if binary else "kB"),
+ ("MiB" if binary else "MB"),
+ ("GiB" if binary else "GB"),
+ ("TiB" if binary else "TB"),
+ ("PiB" if binary else "PB"),
+ ("EiB" if binary else "EB"),
+ ("ZiB" if binary else "ZB"),
+ ("YiB" if binary else "YB"),
]
+
if bytes == 1:
return "1 Byte"
elif bytes < base:
- return "%d Bytes" % bytes
+ return f"{int(bytes)} Bytes"
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
+
if bytes < unit:
- return "%.1f %s" % ((base * bytes / unit), prefix)
- return "%.1f %s" % ((base * bytes / unit), prefix)
+ return f"{base * bytes / unit:.1f} {prefix}"
+ return f"{base * bytes / unit:.1f} {prefix}"
-def do_pprint(value, verbose=False):
- """Pretty print a variable. Useful for debugging.
- With Jinja 1.2 onwards you can pass it a parameter. If this parameter
- is truthy the output will be more verbose (this requires `pretty`)
- """
- return pformat(value, verbose=verbose)
+def do_pprint(value: t.Any) -> str:
+ """Pretty print a variable. Useful for debugging."""
+ return pformat(value)
+
+
+_uri_scheme_re = re.compile(r"^([\w.+-]{2,}:(/){0,2})$")
-@evalcontextfilter
+@pass_eval_context
def do_urlize(
- eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
-):
- """Converts URLs in plain text into clickable links.
+ eval_ctx: "EvalContext",
+ value: str,
+ trim_url_limit: t.Optional[int] = None,
+ nofollow: bool = False,
+ target: t.Optional[str] = None,
+ rel: t.Optional[str] = None,
+ extra_schemes: t.Optional[t.Iterable[str]] = None,
+) -> str:
+ """Convert URLs in text into clickable links.
+
+ This may not recognize links in some situations. Usually, a more
+ comprehensive formatter, such as a Markdown library, is a better
+ choice.
+
+ Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
+ addresses. Links with trailing punctuation (periods, commas, closing
+ parentheses) and leading punctuation (opening parentheses) are
+ recognized excluding the punctuation. Email addresses that include
+ header fields are not recognized (for example,
+ ``mailto:address@example.com?cc=copy@example.com``).
+
+ :param value: Original text containing URLs to link.
+ :param trim_url_limit: Shorten displayed URL values to this length.
+ :param nofollow: Add the ``rel=nofollow`` attribute to links.
+ :param target: Add the ``target`` attribute to links.
+ :param rel: Add the ``rel`` attribute to links.
+ :param extra_schemes: Recognize URLs that start with these schemes
+ in addition to the default behavior. Defaults to
+ ``env.policies["urlize.extra_schemes"]``, which defaults to no
+ extra schemes.
+
+ .. versionchanged:: 3.0
+ The ``extra_schemes`` parameter was added.
+
+ .. versionchanged:: 3.0
+ Generate ``https://`` links for URLs without a scheme.
+
+ .. versionchanged:: 3.0
+ The parsing rules were updated. Recognize email addresses with
+ or without the ``mailto:`` scheme. Validate IP addresses. Ignore
+ parentheses and brackets in more cases.
+
+ .. versionchanged:: 2.8
+ The ``target`` parameter was added.
+ """
+ policies = eval_ctx.environment.policies
+ rel_parts = set((rel or "").split())
- If you pass the filter an additional integer it will shorten the urls
- to that number. Also a third argument exists that makes the urls
- "nofollow":
+ if nofollow:
+ rel_parts.add("nofollow")
- .. sourcecode:: jinja
+ rel_parts.update((policies["urlize.rel"] or "").split())
+ rel = " ".join(sorted(rel_parts)) or None
- {{ mytext|urlize(40, true) }}
- links are shortened to 40 chars and defined with rel="nofollow"
+ if target is None:
+ target = policies["urlize.target"]
- If *target* is specified, the ``target`` attribute will be added to the
- ``<a>`` tag:
+ if extra_schemes is None:
+ extra_schemes = policies["urlize.extra_schemes"] or ()
- .. sourcecode:: jinja
+ for scheme in extra_schemes:
+ if _uri_scheme_re.fullmatch(scheme) is None:
+ raise FilterArgumentError(f"{scheme!r} is not a valid URI scheme prefix.")
- {{ mytext|urlize(40, target='_blank') }}
+ rv = urlize(
+ value,
+ trim_url_limit=trim_url_limit,
+ rel=rel,
+ target=target,
+ extra_schemes=extra_schemes,
+ )
- .. versionchanged:: 2.8+
- The *target* parameter was added.
- """
- policies = eval_ctx.environment.policies
- rel = set((rel or "").split() or [])
- if nofollow:
- rel.add("nofollow")
- rel.update((policies["urlize.rel"] or "").split())
- if target is None:
- target = policies["urlize.target"]
- rel = " ".join(sorted(rel)) or None
- rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
+
return rv
-def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
+def do_indent(
+ s: str, width: t.Union[int, str] = 4, first: bool = False, blank: bool = False
+) -> str:
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
- :param width: Number of spaces to indent by.
+ :param width: Number of spaces, or a string, to indent by.
:param first: Don't skip indenting the first line.
:param blank: Don't skip indenting empty lines.
+ .. versionchanged:: 3.0
+ ``width`` can be a string.
+
.. versionchanged:: 2.10
Blank lines are not indented by default.
Rename the ``indentfirst`` argument to ``first``.
"""
- if indentfirst is not None:
- warnings.warn(
- "The 'indentfirst' argument is renamed to 'first' and will"
- " be removed in version 3.0.",
- DeprecationWarning,
- stacklevel=2,
- )
- first = indentfirst
+ if isinstance(width, str):
+ indention = width
+ else:
+ indention = " " * width
- indention = u" " * width
- newline = u"\n"
+ newline = "\n"
if isinstance(s, Markup):
indention = Markup(indention)
@@ -665,8 +829,15 @@ def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
return rv
-@environmentfilter
-def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
+@pass_environment
+def do_truncate(
+ env: "Environment",
+ s: str,
+ length: int = 255,
+ killwords: bool = False,
+ end: str = "...",
+ leeway: t.Optional[int] = None,
+) -> str:
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
@@ -692,25 +863,29 @@ def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
"""
if leeway is None:
leeway = env.policies["truncate.leeway"]
- assert length >= len(end), "expected length >= %s, got %s" % (len(end), length)
- assert leeway >= 0, "expected leeway >= 0, got %s" % leeway
+
+ assert length >= len(end), f"expected length >= {len(end)}, got {length}"
+ assert leeway >= 0, f"expected leeway >= 0, got {leeway}"
+
if len(s) <= length + leeway:
return s
+
if killwords:
return s[: length - len(end)] + end
+
result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
-@environmentfilter
+@pass_environment
def do_wordwrap(
- environment,
- s,
- width=79,
- break_long_words=True,
- wrapstring=None,
- break_on_hyphens=True,
-):
+ environment: "Environment",
+ s: str,
+ width: int = 79,
+ break_long_words: bool = True,
+ wrapstring: t.Optional[str] = None,
+ break_on_hyphens: bool = True,
+) -> str:
"""Wrap a string to the given width. Existing newlines are treated
as paragraphs to be wrapped separately.
@@ -732,10 +907,9 @@ def do_wordwrap(
.. versionchanged:: 2.7
Added the ``wrapstring`` parameter.
"""
-
import textwrap
- if not wrapstring:
+ if wrapstring is None:
wrapstring = environment.newline_sequence
# textwrap.wrap doesn't consider existing newlines when wrapping.
@@ -759,12 +933,15 @@ def do_wordwrap(
)
-def do_wordcount(s):
+_word_re = re.compile(r"\w+")
+
+
+def do_wordcount(s: str) -> int:
"""Count the words in that string."""
- return len(_word_re.findall(soft_unicode(s)))
+ return len(_word_re.findall(soft_str(s)))
-def do_int(value, default=0, base=10):
+def do_int(value: t.Any, default: int = 0, base: int = 10) -> int:
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
@@ -774,8 +951,9 @@ def do_int(value, default=0, base=10):
The base is ignored for decimal numbers and non-string values.
"""
try:
- if isinstance(value, string_types):
+ if isinstance(value, str):
return int(value, base)
+
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
@@ -785,7 +963,7 @@ def do_int(value, default=0, base=10):
return default
-def do_float(value, default=0.0):
+def do_float(value: t.Any, default: float = 0.0) -> float:
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
@@ -796,7 +974,7 @@ def do_float(value, default=0.0):
return default
-def do_format(value, *args, **kwargs):
+def do_format(value: str, *args: t.Any, **kwargs: t.Any) -> str:
"""Apply the given values to a `printf-style`_ format string, like
``string % values``.
@@ -820,22 +998,26 @@ def do_format(value, *args, **kwargs):
raise FilterArgumentError(
"can't handle positional and keyword arguments at the same time"
)
- return soft_unicode(value) % (kwargs or args)
+ return soft_str(value) % (kwargs or args)
-def do_trim(value, chars=None):
+
+def do_trim(value: str, chars: t.Optional[str] = None) -> str:
"""Strip leading and trailing characters, by default whitespace."""
- return soft_unicode(value).strip(chars)
+ return soft_str(value).strip(chars)
-def do_striptags(value):
+def do_striptags(value: "t.Union[str, HasHTML]") -> str:
"""Strip SGML/XML tags and replace adjacent whitespace by one space."""
if hasattr(value, "__html__"):
- value = value.__html__()
- return Markup(text_type(value)).striptags()
+ value = t.cast("HasHTML", value).__html__()
+
+ return Markup(str(value)).striptags()
-def do_slice(value, slices, fill_with=None):
+def sync_do_slice(
+ value: "t.Collection[V]", slices: int, fill_with: "t.Optional[V]" = None
+) -> "t.Iterator[t.List[V]]":
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
@@ -860,18 +1042,34 @@ def do_slice(value, slices, fill_with=None):
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
+
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
+
if slice_number < slices_with_extra:
offset += 1
+
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
+
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
+
yield tmp
-def do_batch(value, linecount, fill_with=None):
+@async_variant(sync_do_slice) # type: ignore
+async def do_slice(
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ slices: int,
+ fill_with: t.Optional[t.Any] = None,
+) -> "t.Iterator[t.List[V]]":
+ return sync_do_slice(await auto_to_list(value), slices, fill_with)
+
+
+def do_batch(
+ value: "t.Iterable[V]", linecount: int, fill_with: "t.Optional[V]" = None
+) -> "t.Iterator[t.List[V]]":
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
@@ -890,19 +1088,27 @@ def do_batch(value, linecount, fill_with=None):
{%- endfor %}
</table>
"""
- tmp = []
+ tmp: "t.List[V]" = []
+
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
+
tmp.append(item)
+
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
+
yield tmp
-def do_round(value, precision=0, method="common"):
+def do_round(
+ value: float,
+ precision: int = 0,
+ method: 'te.Literal["common", "ceil", "floor"]' = "common",
+) -> float:
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
@@ -930,24 +1136,35 @@ def do_round(value, precision=0, method="common"):
"""
if method not in {"common", "ceil", "floor"}:
raise FilterArgumentError("method must be common, ceil or floor")
+
if method == "common":
return round(value, precision)
+
func = getattr(math, method)
- return func(value * (10 ** precision)) / (10 ** precision)
+ return t.cast(float, func(value * (10**precision)) / (10**precision))
+
+
+class _GroupTuple(t.NamedTuple):
+ grouper: t.Any
+ list: t.List
+ # Use the regular tuple repr to hide this subclass if users print
+ # out the value during debugging.
+ def __repr__(self) -> str:
+ return tuple.__repr__(self)
-# Use a regular tuple repr here. This is what we did in the past and we
-# really want to hide this custom type as much as possible. In particular
-# we do not want to accidentally expose an auto generated repr in case
-# people start to print this out in comments or something similar for
-# debugging.
-_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
-_GroupTuple.__repr__ = tuple.__repr__
-_GroupTuple.__str__ = tuple.__str__
+ def __str__(self) -> str:
+ return tuple.__str__(self)
-@environmentfilter
-def do_groupby(environment, value, attribute):
+@pass_environment
+def sync_do_groupby(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ attribute: t.Union[str, int],
+ default: t.Optional[t.Any] = None,
+ case_sensitive: bool = False,
+) -> "t.List[_GroupTuple]":
"""Group a sequence of objects by an attribute using Python's
:func:`itertools.groupby`. The attribute can use dot notation for
nested access, like ``"address.city"``. Unlike Python's ``groupby``,
@@ -978,18 +1195,86 @@ def do_groupby(environment, value, attribute):
<li>{{ group.grouper }}: {{ group.list|join(", ") }}
{% endfor %}</ul>
+ You can specify a ``default`` value to use if an object in the list
+ does not have the given attribute.
+
+ .. sourcecode:: jinja
+
+ <ul>{% for city, items in users|groupby("city", default="NY") %}
+ <li>{{ city }}: {{ items|map(attribute="name")|join(", ") }}</li>
+ {% endfor %}</ul>
+
+ Like the :func:`~jinja-filters.sort` filter, sorting and grouping is
+ case-insensitive by default. The ``key`` for each group will have
+ the case of the first item in that group of values. For example, if
+ a list of users has cities ``["CA", "NY", "ca"]``, the "CA" group
+ will have two values. This can be disabled by passing
+ ``case_sensitive=True``.
+
+ .. versionchanged:: 3.1
+ Added the ``case_sensitive`` parameter. Sorting and grouping is
+ case-insensitive by default, matching other filters that do
+ comparisons.
+
+ .. versionchanged:: 3.0
+ Added the ``default`` parameter.
+
.. versionchanged:: 2.6
The attribute supports dot notation for nested access.
"""
- expr = make_attrgetter(environment, attribute)
- return [
+ expr = make_attrgetter(
+ environment,
+ attribute,
+ postprocess=ignore_case if not case_sensitive else None,
+ default=default,
+ )
+ out = [
_GroupTuple(key, list(values))
for key, values in groupby(sorted(value, key=expr), expr)
]
+ if not case_sensitive:
+ # Return the real key from the first value instead of the lowercase key.
+ output_expr = make_attrgetter(environment, attribute, default=default)
+ out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
+
+ return out
+
+
+@async_variant(sync_do_groupby) # type: ignore
+async def do_groupby(
+ environment: "Environment",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ attribute: t.Union[str, int],
+ default: t.Optional[t.Any] = None,
+ case_sensitive: bool = False,
+) -> "t.List[_GroupTuple]":
+ expr = make_attrgetter(
+ environment,
+ attribute,
+ postprocess=ignore_case if not case_sensitive else None,
+ default=default,
+ )
+ out = [
+ _GroupTuple(key, await auto_to_list(values))
+ for key, values in groupby(sorted(await auto_to_list(value), key=expr), expr)
+ ]
+
+ if not case_sensitive:
+ # Return the real key from the first value instead of the lowercase key.
+ output_expr = make_attrgetter(environment, attribute, default=default)
+ out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
-@environmentfilter
-def do_sum(environment, iterable, attribute=None, start=0):
+ return out
+
+
+@pass_environment
+def sync_do_sum(
+ environment: "Environment",
+ iterable: "t.Iterable[V]",
+ attribute: t.Optional[t.Union[str, int]] = None,
+ start: V = 0, # type: ignore
+) -> V:
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
@@ -1001,52 +1286,93 @@ def do_sum(environment, iterable, attribute=None, start=0):
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
- The `attribute` parameter was added to allow suming up over
- attributes. Also the `start` parameter was moved on to the right.
+ The ``attribute`` parameter was added to allow summing up over
+ attributes. Also the ``start`` parameter was moved on to the right.
"""
if attribute is not None:
- iterable = imap(make_attrgetter(environment, attribute), iterable)
- return sum(iterable, start)
+ iterable = map(make_attrgetter(environment, attribute), iterable)
+
+ return sum(iterable, start) # type: ignore[no-any-return, call-overload]
+
+
+@async_variant(sync_do_sum) # type: ignore
+async def do_sum(
+ environment: "Environment",
+ iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ attribute: t.Optional[t.Union[str, int]] = None,
+ start: V = 0, # type: ignore
+) -> V:
+ rv = start
+
+ if attribute is not None:
+ func = make_attrgetter(environment, attribute)
+ else:
+
+ def func(x: V) -> V:
+ return x
+ async for item in auto_aiter(iterable):
+ rv += func(item)
-def do_list(value):
+ return rv
+
+
+def sync_do_list(value: "t.Iterable[V]") -> "t.List[V]":
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
-def do_mark_safe(value):
+@async_variant(sync_do_list) # type: ignore
+async def do_list(value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]") -> "t.List[V]":
+ return await auto_to_list(value)
+
+
+def do_mark_safe(value: str) -> Markup:
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
-def do_mark_unsafe(value):
+def do_mark_unsafe(value: str) -> str:
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
- return text_type(value)
+ return str(value)
-def do_reverse(value):
+@typing.overload
+def do_reverse(value: str) -> str:
+ ...
+
+
+@typing.overload
+def do_reverse(value: "t.Iterable[V]") -> "t.Iterable[V]":
+ ...
+
+
+def do_reverse(value: t.Union[str, t.Iterable[V]]) -> t.Union[str, t.Iterable[V]]:
"""Reverse the object or return an iterator that iterates over it the other
way round.
"""
- if isinstance(value, string_types):
+ if isinstance(value, str):
return value[::-1]
+
try:
- return reversed(value)
+ return reversed(value) # type: ignore
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
- except TypeError:
- raise FilterArgumentError("argument must be iterable")
+ except TypeError as e:
+ raise FilterArgumentError("argument must be iterable") from e
-@environmentfilter
-def do_attr(environment, obj, name):
+@pass_environment
+def do_attr(
+ environment: "Environment", obj: t.Any, name: str
+) -> t.Union[Undefined, t.Any]:
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo.bar`` just that always an attribute is returned and items are not
looked up.
@@ -1063,16 +1389,39 @@ def do_attr(environment, obj, name):
except AttributeError:
pass
else:
- if environment.sandboxed and not environment.is_safe_attribute(
- obj, name, value
- ):
- return environment.unsafe_undefined(obj, name)
+ if environment.sandboxed:
+ environment = t.cast("SandboxedEnvironment", environment)
+
+ if not environment.is_safe_attribute(obj, name, value):
+ return environment.unsafe_undefined(obj, name)
+
return value
+
return environment.undefined(obj=obj, name=name)
-@contextfilter
-def do_map(*args, **kwargs):
+@typing.overload
+def sync_do_map(
+ context: "Context", value: t.Iterable, name: str, *args: t.Any, **kwargs: t.Any
+) -> t.Iterable:
+ ...
+
+
+@typing.overload
+def sync_do_map(
+ context: "Context",
+ value: t.Iterable,
+ *,
+ attribute: str = ...,
+ default: t.Optional[t.Any] = None,
+) -> t.Iterable:
+ ...
+
+
+@pass_context
+def sync_do_map(
+ context: "Context", value: t.Iterable, *args: t.Any, **kwargs: t.Any
+) -> t.Iterable:
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
@@ -1104,7 +1453,7 @@ def do_map(*args, **kwargs):
.. code-block:: python
(u.username for u in users)
- (u.username or "Anonymous" for u in users)
+ (getattr(u, "username", "Anonymous") for u in users)
(do_lower(x) for x in titles)
.. versionchanged:: 2.11.0
@@ -1112,14 +1461,53 @@ def do_map(*args, **kwargs):
.. versionadded:: 2.7
"""
- seq, func = prepare_map(args, kwargs)
- if seq:
- for item in seq:
+ if value:
+ func = prepare_map(context, args, kwargs)
+
+ for item in value:
yield func(item)
-@contextfilter
-def do_select(*args, **kwargs):
+@typing.overload
+def do_map(
+ context: "Context",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ name: str,
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> t.Iterable:
+ ...
+
+
+@typing.overload
+def do_map(
+ context: "Context",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ *,
+ attribute: str = ...,
+ default: t.Optional[t.Any] = None,
+) -> t.Iterable:
+ ...
+
+
+@async_variant(sync_do_map) # type: ignore
+async def do_map(
+ context: "Context",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> t.AsyncIterable:
+ if value:
+ func = prepare_map(context, args, kwargs)
+
+ async for item in auto_aiter(value):
+ yield await auto_await(func(item))
+
+
+@pass_context
+def sync_do_select(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and only selecting the objects with the test succeeding.
@@ -1144,11 +1532,23 @@ def do_select(*args, **kwargs):
.. versionadded:: 2.7
"""
- return select_or_reject(args, kwargs, lambda x: x, False)
+ return select_or_reject(context, value, args, kwargs, lambda x: x, False)
+
+
+@async_variant(sync_do_select) # type: ignore
+async def do_select(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: x, False)
-@contextfilter
-def do_reject(*args, **kwargs):
+@pass_context
+def sync_do_reject(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and rejecting the objects with the test succeeding.
@@ -1168,11 +1568,23 @@ def do_reject(*args, **kwargs):
.. versionadded:: 2.7
"""
- return select_or_reject(args, kwargs, lambda x: not x, False)
+ return select_or_reject(context, value, args, kwargs, lambda x: not x, False)
-@contextfilter
-def do_selectattr(*args, **kwargs):
+@async_variant(sync_do_reject) # type: ignore
+async def do_reject(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: not x, False)
+
+
+@pass_context
+def sync_do_selectattr(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and only selecting the objects with the
test succeeding.
@@ -1196,11 +1608,23 @@ def do_selectattr(*args, **kwargs):
.. versionadded:: 2.7
"""
- return select_or_reject(args, kwargs, lambda x: x, True)
+ return select_or_reject(context, value, args, kwargs, lambda x: x, True)
+
+@async_variant(sync_do_selectattr) # type: ignore
+async def do_selectattr(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: x, True)
-@contextfilter
-def do_rejectattr(*args, **kwargs):
+
+@pass_context
+def sync_do_rejectattr(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and rejecting the objects with the test
succeeding.
@@ -1222,105 +1646,138 @@ def do_rejectattr(*args, **kwargs):
.. versionadded:: 2.7
"""
- return select_or_reject(args, kwargs, lambda x: not x, True)
-
+ return select_or_reject(context, value, args, kwargs, lambda x: not x, True)
-@evalcontextfilter
-def do_tojson(eval_ctx, value, indent=None):
- """Dumps a structure to JSON so that it's safe to use in ``<script>``
- tags. It accepts the same arguments and returns a JSON string. Note that
- this is available in templates through the ``|tojson`` filter which will
- also mark the result as safe. Due to how this function escapes certain
- characters this is safe even if used outside of ``<script>`` tags.
- The following characters are escaped in strings:
+@async_variant(sync_do_rejectattr) # type: ignore
+async def do_rejectattr(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: not x, True)
- - ``<``
- - ``>``
- - ``&``
- - ``'``
- This makes it safe to embed such strings in any place in HTML with the
- notable exception of double quoted attributes. In that case single
- quote your attributes or HTML escape it in addition.
+@pass_eval_context
+def do_tojson(
+ eval_ctx: "EvalContext", value: t.Any, indent: t.Optional[int] = None
+) -> Markup:
+ """Serialize an object to a string of JSON, and mark it safe to
+ render in HTML. This filter is only for use in HTML documents.
- The indent parameter can be used to enable pretty printing. Set it to
- the number of spaces that the structures should be indented with.
+ The returned string is safe to render in HTML documents and
+ ``<script>`` tags. The exception is in HTML attributes that are
+ double quoted; either use single quotes or the ``|forceescape``
+ filter.
- Note that this filter is for use in HTML contexts only.
+ :param value: The object to serialize to JSON.
+ :param indent: The ``indent`` parameter passed to ``dumps``, for
+ pretty-printing the value.
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
- dumper = policies["json.dumps_function"]
- options = policies["json.dumps_kwargs"]
+ dumps = policies["json.dumps_function"]
+ kwargs = policies["json.dumps_kwargs"]
+
if indent is not None:
- options = dict(options)
- options["indent"] = indent
- return htmlsafe_json_dumps(value, dumper=dumper, **options)
+ kwargs = kwargs.copy()
+ kwargs["indent"] = indent
+ return htmlsafe_json_dumps(value, dumps=dumps, **kwargs)
-def prepare_map(args, kwargs):
- context = args[0]
- seq = args[1]
- default = None
- if len(args) == 2 and "attribute" in kwargs:
+def prepare_map(
+ context: "Context", args: t.Tuple, kwargs: t.Dict[str, t.Any]
+) -> t.Callable[[t.Any], t.Any]:
+ if not args and "attribute" in kwargs:
attribute = kwargs.pop("attribute")
default = kwargs.pop("default", None)
+
if kwargs:
raise FilterArgumentError(
- "Unexpected keyword argument %r" % next(iter(kwargs))
+ f"Unexpected keyword argument {next(iter(kwargs))!r}"
)
+
func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
- name = args[2]
- args = args[3:]
+ name = args[0]
+ args = args[1:]
except LookupError:
- raise FilterArgumentError("map requires a filter argument")
+ raise FilterArgumentError("map requires a filter argument") from None
- def func(item):
+ def func(item: t.Any) -> t.Any:
return context.environment.call_filter(
name, item, args, kwargs, context=context
)
- return seq, func
+ return func
-def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
- context = args[0]
- seq = args[1]
+def prepare_select_or_reject(
+ context: "Context",
+ args: t.Tuple,
+ kwargs: t.Dict[str, t.Any],
+ modfunc: t.Callable[[t.Any], t.Any],
+ lookup_attr: bool,
+) -> t.Callable[[t.Any], t.Any]:
if lookup_attr:
try:
- attr = args[2]
+ attr = args[0]
except LookupError:
- raise FilterArgumentError("Missing parameter for attribute name")
+ raise FilterArgumentError("Missing parameter for attribute name") from None
+
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
- def transfunc(x):
+ def transfunc(x: V) -> V:
return x
try:
- name = args[2 + off]
- args = args[3 + off :]
+ name = args[off]
+ args = args[1 + off :]
- def func(item):
+ def func(item: t.Any) -> t.Any:
return context.environment.call_test(name, item, args, kwargs)
except LookupError:
- func = bool
+ func = bool # type: ignore
+
+ return lambda item: modfunc(func(transfunc(item)))
+
+
+def select_or_reject(
+ context: "Context",
+ value: "t.Iterable[V]",
+ args: t.Tuple,
+ kwargs: t.Dict[str, t.Any],
+ modfunc: t.Callable[[t.Any], t.Any],
+ lookup_attr: bool,
+) -> "t.Iterator[V]":
+ if value:
+ func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
+
+ for item in value:
+ if func(item):
+ yield item
- return seq, lambda item: modfunc(func(transfunc(item)))
+async def async_select_or_reject(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ args: t.Tuple,
+ kwargs: t.Dict[str, t.Any],
+ modfunc: t.Callable[[t.Any], t.Any],
+ lookup_attr: bool,
+) -> "t.AsyncIterator[V]":
+ if value:
+ func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
-def select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
- if seq:
- for item in seq:
+ async for item in auto_aiter(value):
if func(item):
yield item
@@ -1350,6 +1807,7 @@ FILTERS = {
"length": len,
"list": do_list,
"lower": do_lower,
+ "items": do_items,
"map": do_map,
"min": do_min,
"max": do_max,
@@ -1365,7 +1823,7 @@ FILTERS = {
"selectattr": do_selectattr,
"slice": do_slice,
"sort": do_sort,
- "string": soft_unicode,
+ "string": soft_str,
"striptags": do_striptags,
"sum": do_sum,
"title": do_title,
diff --git a/deps/v8/third_party/jinja2/idtracking.py b/deps/v8/third_party/jinja2/idtracking.py
index 9a0d838017..995ebaa0c8 100644
--- a/deps/v8/third_party/jinja2/idtracking.py
+++ b/deps/v8/third_party/jinja2/idtracking.py
@@ -1,4 +1,6 @@
-from ._compat import iteritems
+import typing as t
+
+from . import nodes
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
@@ -7,7 +9,9 @@ VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
-def find_symbols(nodes, parent_symbols=None):
+def find_symbols(
+ nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
+) -> "Symbols":
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
@@ -15,58 +19,71 @@ def find_symbols(nodes, parent_symbols=None):
return sym
-def symbols_for_node(node, parent_symbols=None):
+def symbols_for_node(
+ node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
+) -> "Symbols":
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
-class Symbols(object):
- def __init__(self, parent=None, level=None):
+class Symbols:
+ def __init__(
+ self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
+ ) -> None:
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
- self.level = level
+
+ self.level: int = level
self.parent = parent
- self.refs = {}
- self.loads = {}
- self.stores = set()
+ self.refs: t.Dict[str, str] = {}
+ self.loads: t.Dict[str, t.Any] = {}
+ self.stores: t.Set[str] = set()
- def analyze_node(self, node, **kwargs):
+ def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
- def _define_ref(self, name, load=None):
- ident = "l_%d_%s" % (self.level, name)
+ def _define_ref(
+ self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
+ ) -> str:
+ ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
- def find_load(self, target):
+ def find_load(self, target: str) -> t.Optional[t.Any]:
if target in self.loads:
return self.loads[target]
+
if self.parent is not None:
return self.parent.find_load(target)
- def find_ref(self, name):
+ return None
+
+ def find_ref(self, name: str) -> t.Optional[str]:
if name in self.refs:
return self.refs[name]
+
if self.parent is not None:
return self.parent.find_ref(name)
- def ref(self, name):
+ return None
+
+ def ref(self, name: str) -> str:
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
- "Tried to resolve a name to a reference that "
- "was unknown to the frame (%r)" % name
+ "Tried to resolve a name to a reference that was"
+ f" unknown to the frame ({name!r})"
)
return rv
- def copy(self):
+ def copy(self) -> "Symbols":
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
@@ -74,7 +91,7 @@ class Symbols(object):
rv.stores = self.stores.copy()
return rv
- def store(self, name):
+ def store(self, name: str) -> None:
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
@@ -92,17 +109,16 @@ class Symbols(object):
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
- def declare_parameter(self, name):
+ def declare_parameter(self, name: str) -> str:
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
- def load(self, name):
- target = self.find_ref(name)
- if target is None:
+ def load(self, name: str) -> None:
+ if self.find_ref(name) is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
- def branch_update(self, branch_symbols):
- stores = {}
+ def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
+ stores: t.Dict[str, int] = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
@@ -114,10 +130,11 @@ class Symbols(object):
self.loads.update(sym.loads)
self.stores.update(sym.stores)
- for name, branch_count in iteritems(stores):
+ for name, branch_count in stores.items():
if branch_count == len(branch_symbols):
continue
- target = self.find_ref(name)
+
+ target = self.find_ref(name) # type: ignore
assert target is not None, "should not happen"
if self.parent is not None:
@@ -127,56 +144,64 @@ class Symbols(object):
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
- def dump_stores(self):
- rv = {}
- node = self
+ def dump_stores(self) -> t.Dict[str, str]:
+ rv: t.Dict[str, str] = {}
+ node: t.Optional["Symbols"] = self
+
while node is not None:
- for name in node.stores:
+ for name in sorted(node.stores):
if name not in rv:
- rv[name] = self.find_ref(name)
+ rv[name] = self.find_ref(name) # type: ignore
+
node = node.parent
+
return rv
- def dump_param_targets(self):
+ def dump_param_targets(self) -> t.Set[str]:
rv = set()
- node = self
+ node: t.Optional["Symbols"] = self
+
while node is not None:
- for target, (instr, _) in iteritems(self.loads):
+ for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
+
node = node.parent
+
return rv
class RootVisitor(NodeVisitor):
- def __init__(self, symbols):
+ def __init__(self, symbols: "Symbols") -> None:
self.sym_visitor = FrameSymbolVisitor(symbols)
- def _simple_visit(self, node, **kwargs):
+ def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
- visit_Template = (
- visit_Block
- ) = (
- visit_Macro
- ) = (
- visit_FilterBlock
- ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
+ visit_Template = _simple_visit
+ visit_Block = _simple_visit
+ visit_Macro = _simple_visit
+ visit_FilterBlock = _simple_visit
+ visit_Scope = _simple_visit
+ visit_If = _simple_visit
+ visit_ScopedEvalContextModifier = _simple_visit
- def visit_AssignBlock(self, node, **kwargs):
+ def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
- def visit_CallBlock(self, node, **kwargs):
+ def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
- def visit_OverlayScope(self, node, **kwargs):
+ def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
- def visit_For(self, node, for_branch="body", **kwargs):
+ def visit_For(
+ self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
+ ) -> None:
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
@@ -189,28 +214,30 @@ class RootVisitor(NodeVisitor):
return
else:
raise RuntimeError("Unknown for branch")
- for item in branch or ():
- self.sym_visitor.visit(item)
- def visit_With(self, node, **kwargs):
+ if branch:
+ for item in branch:
+ self.sym_visitor.visit(item)
+
+ def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
- def generic_visit(self, node, *args, **kwargs):
- raise NotImplementedError(
- "Cannot find symbols for %r" % node.__class__.__name__
- )
+ def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
+ raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
- def __init__(self, symbols):
+ def __init__(self, symbols: "Symbols") -> None:
self.symbols = symbols
- def visit_Name(self, node, store_as_param=False, **kwargs):
+ def visit_Name(
+ self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
+ ) -> None:
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
@@ -219,72 +246,73 @@ class FrameSymbolVisitor(NodeVisitor):
elif node.ctx == "load":
self.symbols.load(node.name)
- def visit_NSRef(self, node, **kwargs):
+ def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
self.symbols.load(node.name)
- def visit_If(self, node, **kwargs):
+ def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
self.visit(node.test, **kwargs)
-
original_symbols = self.symbols
- def inner_visit(nodes):
+ def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
self.symbols = rv = original_symbols.copy()
+
for subnode in nodes:
self.visit(subnode, **kwargs)
+
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
-
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
- def visit_Macro(self, node, **kwargs):
+ def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
self.symbols.store(node.name)
- def visit_Import(self, node, **kwargs):
+ def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
- def visit_FromImport(self, node, **kwargs):
+ def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
+
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
- def visit_Assign(self, node, **kwargs):
+ def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
- def visit_For(self, node, **kwargs):
+ def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
- def visit_CallBlock(self, node, **kwargs):
+ def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
self.visit(node.call, **kwargs)
- def visit_FilterBlock(self, node, **kwargs):
+ def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
self.visit(node.filter, **kwargs)
- def visit_With(self, node, **kwargs):
+ def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.values:
self.visit(target)
- def visit_AssignBlock(self, node, **kwargs):
+ def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
- def visit_Scope(self, node, **kwargs):
+ def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
"""Stop visiting at scopes."""
- def visit_Block(self, node, **kwargs):
+ def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
"""Stop visiting at blocks."""
- def visit_OverlayScope(self, node, **kwargs):
+ def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
"""Do not visit into overlay scopes."""
diff --git a/deps/v8/third_party/jinja2/jinja2.gni b/deps/v8/third_party/jinja2/jinja2.gni
index 9aec8f735b..680a871640 100644
--- a/deps/v8/third_party/jinja2/jinja2.gni
+++ b/deps/v8/third_party/jinja2/jinja2.gni
@@ -2,10 +2,8 @@ import("//third_party/markupsafe/markupsafe.gni")
jinja2_sources = [
"//third_party/jinja2/__init__.py",
- "//third_party/jinja2/_compat.py",
"//third_party/jinja2/_identifier.py",
- "//third_party/jinja2/asyncfilters.py",
- "//third_party/jinja2/asyncsupport.py",
+ "//third_party/jinja2/async_utils.py",
"//third_party/jinja2/bccache.py",
"//third_party/jinja2/compiler.py",
"//third_party/jinja2/constants.py",
diff --git a/deps/v8/third_party/jinja2/lexer.py b/deps/v8/third_party/jinja2/lexer.py
index 552356a12d..aff7e9f993 100644
--- a/deps/v8/third_party/jinja2/lexer.py
+++ b/deps/v8/third_party/jinja2/lexer.py
@@ -1,32 +1,48 @@
-# -*- coding: utf-8 -*-
"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
is used to do some preprocessing. It filters out invalid operators like
the bitshift operators we don't allow in templates. It separates
template code and python code in expressions.
"""
import re
+import typing as t
from ast import literal_eval
from collections import deque
-from operator import itemgetter
+from sys import intern
-from ._compat import implements_iterator
-from ._compat import intern
-from ._compat import iteritems
-from ._compat import text_type
+from ._identifier import pattern as name_re
from .exceptions import TemplateSyntaxError
from .utils import LRUCache
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
-_lexer_cache = LRUCache(50)
+_lexer_cache: t.MutableMapping[t.Tuple, "Lexer"] = LRUCache(50) # type: ignore
# static regular expressions
-whitespace_re = re.compile(r"\s+", re.U)
+whitespace_re = re.compile(r"\s+")
newline_re = re.compile(r"(\r\n|\r|\n)")
string_re = re.compile(
r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
)
-integer_re = re.compile(r"(\d+_)*\d+")
+integer_re = re.compile(
+ r"""
+ (
+ 0b(_?[0-1])+ # binary
+ |
+ 0o(_?[0-7])+ # octal
+ |
+ 0x(_?[\da-f])+ # hex
+ |
+ [1-9](_?\d)* # decimal
+ |
+ 0(_?0)* # decimal zero
+ )
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
float_re = re.compile(
r"""
(?<!\.) # doesn't start with a .
@@ -41,20 +57,6 @@ float_re = re.compile(
re.IGNORECASE | re.VERBOSE,
)
-try:
- # check if this Python supports Unicode identifiers
- compile("föö", "<unknown>", "eval")
-except SyntaxError:
- # Python 2, no Unicode support, use ASCII identifiers
- name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
- check_ident = False
-else:
- # Unicode support, import generated re pattern and set flag to use
- # str.isidentifier to validate during lexing.
- from ._identifier import pattern as name_re
-
- check_ident = True
-
# internal the tokens and keep references to them
TOKEN_ADD = intern("add")
TOKEN_ASSIGN = intern("assign")
@@ -136,10 +138,10 @@ operators = {
";": TOKEN_SEMICOLON,
}
-reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
+reverse_operators = {v: k for k, v in operators.items()}
assert len(operators) == len(reverse_operators), "operators dropped"
operator_re = re.compile(
- "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
+ f"({'|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))})"
)
ignored_tokens = frozenset(
@@ -158,9 +160,10 @@ ignore_if_empty = frozenset(
)
-def _describe_token_type(token_type):
+def _describe_token_type(token_type: str) -> str:
if token_type in reverse_operators:
return reverse_operators[token_type]
+
return {
TOKEN_COMMENT_BEGIN: "begin of comment",
TOKEN_COMMENT_END: "end of comment",
@@ -177,32 +180,35 @@ def _describe_token_type(token_type):
}.get(token_type, token_type)
-def describe_token(token):
+def describe_token(token: "Token") -> str:
"""Returns a description of the token."""
if token.type == TOKEN_NAME:
return token.value
+
return _describe_token_type(token.type)
-def describe_token_expr(expr):
+def describe_token_expr(expr: str) -> str:
"""Like `describe_token` but for token expressions."""
if ":" in expr:
type, value = expr.split(":", 1)
+
if type == TOKEN_NAME:
return value
else:
type = expr
+
return _describe_token_type(type)
-def count_newlines(value):
+def count_newlines(value: str) -> int:
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
-def compile_rules(environment):
+def compile_rules(environment: "Environment") -> t.List[t.Tuple[str, str]]:
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
@@ -243,36 +249,30 @@ def compile_rules(environment):
return [x[1:] for x in sorted(rules, reverse=True)]
-class Failure(object):
+class Failure:
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
- def __init__(self, message, cls=TemplateSyntaxError):
+ def __init__(
+ self, message: str, cls: t.Type[TemplateSyntaxError] = TemplateSyntaxError
+ ) -> None:
self.message = message
self.error_class = cls
- def __call__(self, lineno, filename):
+ def __call__(self, lineno: int, filename: str) -> "te.NoReturn":
raise self.error_class(self.message, lineno, filename)
-class Token(tuple):
- """Token class."""
-
- __slots__ = ()
- lineno, type, value = (property(itemgetter(x)) for x in range(3))
-
- def __new__(cls, lineno, type, value):
- return tuple.__new__(cls, (lineno, intern(str(type)), value))
+class Token(t.NamedTuple):
+ lineno: int
+ type: str
+ value: str
- def __str__(self):
- if self.type in reverse_operators:
- return reverse_operators[self.type]
- elif self.type == "name":
- return self.value
- return self.type
+ def __str__(self) -> str:
+ return describe_token(self)
- def test(self, expr):
+ def test(self, expr: str) -> bool:
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
@@ -281,76 +281,75 @@ class Token(tuple):
# passed an iterable of not interned strings.
if self.type == expr:
return True
- elif ":" in expr:
+
+ if ":" in expr:
return expr.split(":", 1) == [self.type, self.value]
- return False
- def test_any(self, *iterable):
- """Test against multiple token expressions."""
- for expr in iterable:
- if self.test(expr):
- return True
return False
- def __repr__(self):
- return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
+ def test_any(self, *iterable: str) -> bool:
+ """Test against multiple token expressions."""
+ return any(self.test(expr) for expr in iterable)
-@implements_iterator
-class TokenStreamIterator(object):
+class TokenStreamIterator:
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
- def __init__(self, stream):
+ def __init__(self, stream: "TokenStream") -> None:
self.stream = stream
- def __iter__(self):
+ def __iter__(self) -> "TokenStreamIterator":
return self
- def __next__(self):
+ def __next__(self) -> Token:
token = self.stream.current
+
if token.type is TOKEN_EOF:
self.stream.close()
- raise StopIteration()
+ raise StopIteration
+
next(self.stream)
return token
-@implements_iterator
-class TokenStream(object):
+class TokenStream:
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
- def __init__(self, generator, name, filename):
+ def __init__(
+ self,
+ generator: t.Iterable[Token],
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ ):
self._iter = iter(generator)
- self._pushed = deque()
+ self._pushed: "te.Deque[Token]" = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, "")
next(self)
- def __iter__(self):
+ def __iter__(self) -> TokenStreamIterator:
return TokenStreamIterator(self)
- def __bool__(self):
+ def __bool__(self) -> bool:
return bool(self._pushed) or self.current.type is not TOKEN_EOF
- __nonzero__ = __bool__ # py2
-
@property
- def eos(self):
+ def eos(self) -> bool:
"""Are we at the end of the stream?"""
return not self
- def push(self, token):
+ def push(self, token: Token) -> None:
"""Push a token back to the stream."""
self._pushed.append(token)
- def look(self):
+ def look(self) -> Token:
"""Look at the next token."""
old_token = next(self)
result = self.current
@@ -358,28 +357,31 @@ class TokenStream(object):
self.current = old_token
return result
- def skip(self, n=1):
+ def skip(self, n: int = 1) -> None:
"""Got n tokens ahead."""
for _ in range(n):
next(self)
- def next_if(self, expr):
+ def next_if(self, expr: str) -> t.Optional[Token]:
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
- def skip_if(self, expr):
+ return None
+
+ def skip_if(self, expr: str) -> bool:
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
- def __next__(self):
+ def __next__(self) -> Token:
"""Go one token ahead and return the old one.
Use the built-in :func:`next` instead of calling this directly.
"""
rv = self.current
+
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
@@ -387,40 +389,41 @@ class TokenStream(object):
self.current = next(self._iter)
except StopIteration:
self.close()
+
return rv
- def close(self):
+ def close(self) -> None:
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, "")
- self._iter = None
+ self._iter = iter(())
self.closed = True
- def expect(self, expr):
+ def expect(self, expr: str) -> Token:
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
+
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError(
- "unexpected end of template, expected %r." % expr,
+ f"unexpected end of template, expected {expr!r}.",
self.current.lineno,
self.name,
self.filename,
)
+
raise TemplateSyntaxError(
- "expected token %r, got %r" % (expr, describe_token(self.current)),
+ f"expected token {expr!r}, got {describe_token(self.current)!r}",
self.current.lineno,
self.name,
self.filename,
)
- try:
- return self.current
- finally:
- next(self)
+
+ return next(self)
-def get_lexer(environment):
+def get_lexer(environment: "Environment") -> "Lexer":
"""Return a lexer which is probably cached."""
key = (
environment.block_start_string,
@@ -437,9 +440,10 @@ def get_lexer(environment):
environment.keep_trailing_newline,
)
lexer = _lexer_cache.get(key)
+
if lexer is None:
- lexer = Lexer(environment)
- _lexer_cache[key] = lexer
+ _lexer_cache[key] = lexer = Lexer(environment)
+
return lexer
@@ -452,11 +456,17 @@ class OptionalLStrip(tuple):
# Even though it looks like a no-op, creating instances fails
# without this.
- def __new__(cls, *members, **kwargs):
- return super(OptionalLStrip, cls).__new__(cls, members)
+ def __new__(cls, *members, **kwargs): # type: ignore
+ return super().__new__(cls, members)
-class Lexer(object):
+class _Rule(t.NamedTuple):
+ pattern: t.Pattern[str]
+ tokens: t.Union[str, t.Tuple[str, ...], t.Tuple[Failure]]
+ command: t.Optional[str]
+
+
+class Lexer:
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
@@ -464,21 +474,21 @@ class Lexer(object):
Multiple environments can share the same lexer.
"""
- def __init__(self, environment):
+ def __init__(self, environment: "Environment") -> None:
# shortcuts
e = re.escape
- def c(x):
+ def c(x: str) -> t.Pattern[str]:
return re.compile(x, re.M | re.S)
# lexing rules for tags
- tag_rules = [
- (whitespace_re, TOKEN_WHITESPACE, None),
- (float_re, TOKEN_FLOAT, None),
- (integer_re, TOKEN_INTEGER, None),
- (name_re, TOKEN_NAME, None),
- (string_re, TOKEN_STRING, None),
- (operator_re, TOKEN_OPERATOR, None),
+ tag_rules: t.List[_Rule] = [
+ _Rule(whitespace_re, TOKEN_WHITESPACE, None),
+ _Rule(float_re, TOKEN_FLOAT, None),
+ _Rule(integer_re, TOKEN_INTEGER, None),
+ _Rule(name_re, TOKEN_NAME, None),
+ _Rule(string_re, TOKEN_STRING, None),
+ _Rule(operator_re, TOKEN_OPERATOR, None),
]
# assemble the root lexing rule. because "|" is ungreedy
@@ -489,70 +499,57 @@ class Lexer(object):
# is required.
root_tag_rules = compile_rules(environment)
+ block_start_re = e(environment.block_start_string)
+ block_end_re = e(environment.block_end_string)
+ comment_end_re = e(environment.comment_end_string)
+ variable_end_re = e(environment.variable_end_string)
+
# block suffix if trimming is enabled
- block_suffix_re = environment.trim_blocks and "\\n?" or ""
+ block_suffix_re = "\\n?" if environment.trim_blocks else ""
- # If lstrip is enabled, it should not be applied if there is any
- # non-whitespace between the newline and block.
- self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
+ self.lstrip_blocks = environment.lstrip_blocks
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
+ root_raw_re = (
+ rf"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
+ rf"(?:\-{block_end_re}\s*|{block_end_re}))"
+ )
+ root_parts_re = "|".join(
+ [root_raw_re] + [rf"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
+ )
+
# global lexing rules
- self.rules = {
+ self.rules: t.Dict[str, t.List[_Rule]] = {
"root": [
# directives
- (
- c(
- "(.*?)(?:%s)"
- % "|".join(
- [
- r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
- % (
- e(environment.block_start_string),
- e(environment.block_end_string),
- e(environment.block_end_string),
- )
- ]
- + [
- r"(?P<%s>%s(\-|\+|))" % (n, r)
- for n, r in root_tag_rules
- ]
- )
- ),
- OptionalLStrip(TOKEN_DATA, "#bygroup"),
+ _Rule(
+ c(rf"(.*?)(?:{root_parts_re})"),
+ OptionalLStrip(TOKEN_DATA, "#bygroup"), # type: ignore
"#bygroup",
),
# data
- (c(".+"), TOKEN_DATA, None),
+ _Rule(c(".+"), TOKEN_DATA, None),
],
# comments
TOKEN_COMMENT_BEGIN: [
- (
+ _Rule(
c(
- r"(.*?)((?:\-%s\s*|%s)%s)"
- % (
- e(environment.comment_end_string),
- e(environment.comment_end_string),
- block_suffix_re,
- )
+ rf"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
+ rf"|{comment_end_re}{block_suffix_re}))"
),
(TOKEN_COMMENT, TOKEN_COMMENT_END),
"#pop",
),
- (c("(.)"), (Failure("Missing end of comment tag"),), None),
+ _Rule(c(r"(.)"), (Failure("Missing end of comment tag"),), None),
],
# blocks
TOKEN_BLOCK_BEGIN: [
- (
+ _Rule(
c(
- r"(?:\-%s\s*|%s)%s"
- % (
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re,
- )
+ rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
+ rf"|{block_end_re}{block_suffix_re})"
),
TOKEN_BLOCK_END,
"#pop",
@@ -561,14 +558,8 @@ class Lexer(object):
+ tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
- (
- c(
- r"\-%s\s*|%s"
- % (
- e(environment.variable_end_string),
- e(environment.variable_end_string),
- )
- ),
+ _Rule(
+ c(rf"\-{variable_end_re}\s*|{variable_end_re}"),
TOKEN_VARIABLE_END,
"#pop",
)
@@ -576,29 +567,25 @@ class Lexer(object):
+ tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
- (
+ _Rule(
c(
- r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
- % (
- e(environment.block_start_string),
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re,
- )
+ rf"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
+ rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
+ rf"|{block_end_re}{block_suffix_re}))"
),
- OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
+ OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), # type: ignore
"#pop",
),
- (c("(.)"), (Failure("Missing end of raw directive"),), None),
+ _Rule(c(r"(.)"), (Failure("Missing end of raw directive"),), None),
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
- (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
+ _Rule(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
]
+ tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
- (
+ _Rule(
c(r"(.*?)()(?=\n|$)"),
(TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
"#pop",
@@ -606,23 +593,39 @@ class Lexer(object):
],
}
- def _normalize_newlines(self, value):
- """Called for strings and template data to normalize it to unicode."""
+ def _normalize_newlines(self, value: str) -> str:
+ """Replace all newlines with the configured sequence in strings
+ and template data.
+ """
return newline_re.sub(self.newline_sequence, value)
- def tokenize(self, source, name=None, filename=None, state=None):
+ def tokenize(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> TokenStream:
"""Calls tokeniter + tokenize and wraps it in a token stream."""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
- def wrap(self, stream, name=None, filename=None):
+ def wrap(
+ self,
+ stream: t.Iterable[t.Tuple[int, str, str]],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> t.Iterator[Token]:
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
- for lineno, token, value in stream:
+ for lineno, token, value_str in stream:
if token in ignored_tokens:
continue
- elif token == TOKEN_LINESTATEMENT_BEGIN:
+
+ value: t.Any = value_str
+
+ if token == TOKEN_LINESTATEMENT_BEGIN:
token = TOKEN_BLOCK_BEGIN
elif token == TOKEN_LINESTATEMENT_END:
token = TOKEN_BLOCK_END
@@ -630,12 +633,13 @@ class Lexer(object):
elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
continue
elif token == TOKEN_DATA:
- value = self._normalize_newlines(value)
+ value = self._normalize_newlines(value_str)
elif token == "keyword":
- token = value
+ token = value_str
elif token == TOKEN_NAME:
- value = str(value)
- if check_ident and not value.isidentifier():
+ value = value_str
+
+ if not value.isidentifier():
raise TemplateSyntaxError(
"Invalid character in identifier", lineno, name, filename
)
@@ -643,51 +647,62 @@ class Lexer(object):
# try to unescape string
try:
value = (
- self._normalize_newlines(value[1:-1])
+ self._normalize_newlines(value_str[1:-1])
.encode("ascii", "backslashreplace")
.decode("unicode-escape")
)
except Exception as e:
msg = str(e).split(":")[-1].strip()
- raise TemplateSyntaxError(msg, lineno, name, filename)
+ raise TemplateSyntaxError(msg, lineno, name, filename) from e
elif token == TOKEN_INTEGER:
- value = int(value.replace("_", ""))
+ value = int(value_str.replace("_", ""), 0)
elif token == TOKEN_FLOAT:
# remove all "_" first to support more Python versions
- value = literal_eval(value.replace("_", ""))
+ value = literal_eval(value_str.replace("_", ""))
elif token == TOKEN_OPERATOR:
- token = operators[value]
+ token = operators[value_str]
+
yield Token(lineno, token, value)
- def tokeniter(self, source, name, filename=None, state=None):
+ def tokeniter(
+ self,
+ source: str,
+ name: t.Optional[str],
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> t.Iterator[t.Tuple[int, str, str]]:
"""This method tokenizes the text and returns the tokens in a
- generator. Use this method if you just want to tokenize a template.
+ generator. Use this method if you just want to tokenize a template.
+
+ .. versionchanged:: 3.0
+ Only ``\\n``, ``\\r\\n`` and ``\\r`` are treated as line
+ breaks.
"""
- source = text_type(source)
- lines = source.splitlines()
- if self.keep_trailing_newline and source:
- for newline in ("\r\n", "\r", "\n"):
- if source.endswith(newline):
- lines.append("")
- break
+ lines = newline_re.split(source)[::2]
+
+ if not self.keep_trailing_newline and lines[-1] == "":
+ del lines[-1]
+
source = "\n".join(lines)
pos = 0
lineno = 1
stack = ["root"]
+
if state is not None and state != "root":
assert state in ("variable", "block"), "invalid state"
stack.append(state + "_begin")
+
statetokens = self.rules[stack[-1]]
source_length = len(source)
- balancing_stack = []
- lstrip_unless_re = self.lstrip_unless_re
+ balancing_stack: t.List[str] = []
newlines_stripped = 0
line_starting = True
- while 1:
+ while True:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
+
# if no match we try again with the next rule
if m is None:
continue
@@ -705,13 +720,12 @@ class Lexer(object):
# tuples support more options
if isinstance(tokens, tuple):
- groups = m.groups()
+ groups: t.Sequence[str] = m.groups()
if isinstance(tokens, OptionalLStrip):
# Rule supports lstrip. Match will look like
# text, block type, whitespace control, type, control, ...
text = groups[0]
-
# Skipping the text and first type, every other group is the
# whitespace control for each type. One of the groups will be
# -, +, or empty string instead of None.
@@ -721,22 +735,23 @@ class Lexer(object):
# Strip all whitespace between the text and the tag.
stripped = text.rstrip()
newlines_stripped = text[len(stripped) :].count("\n")
- groups = (stripped,) + groups[1:]
+ groups = [stripped, *groups[1:]]
elif (
# Not marked for preserving whitespace.
strip_sign != "+"
# lstrip is enabled.
- and lstrip_unless_re is not None
+ and self.lstrip_blocks
# Not a variable expression.
and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
):
# The start of text between the last newline and the tag.
l_pos = text.rfind("\n") + 1
+
if l_pos > 0 or line_starting:
# If there's only whitespace between the newline and the
# tag, strip it.
- if not lstrip_unless_re.search(text, l_pos):
- groups = (text[:l_pos],) + groups[1:]
+ if whitespace_re.fullmatch(text, l_pos):
+ groups = [text[:l_pos], *groups[1:]]
for idx, token in enumerate(tokens):
# failure group
@@ -746,28 +761,30 @@ class Lexer(object):
# yield for the current token the first named
# group that matched
elif token == "#bygroup":
- for key, value in iteritems(m.groupdict()):
+ for key, value in m.groupdict().items():
if value is not None:
yield lineno, key, value
lineno += value.count("\n")
break
else:
raise RuntimeError(
- "%r wanted to resolve "
- "the token dynamically"
- " but no group matched" % regex
+ f"{regex!r} wanted to resolve the token dynamically"
+ " but no group matched"
)
# normal group
else:
data = groups[idx]
+
if data or token not in ignore_if_empty:
yield lineno, token, data
+
lineno += data.count("\n") + newlines_stripped
newlines_stripped = 0
# strings as token just are yielded as it.
else:
data = m.group()
+
# update brace/parentheses balance
if tokens == TOKEN_OPERATOR:
if data == "{":
@@ -779,24 +796,26 @@ class Lexer(object):
elif data in ("}", ")", "]"):
if not balancing_stack:
raise TemplateSyntaxError(
- "unexpected '%s'" % data, lineno, name, filename
+ f"unexpected '{data}'", lineno, name, filename
)
+
expected_op = balancing_stack.pop()
+
if expected_op != data:
raise TemplateSyntaxError(
- "unexpected '%s', "
- "expected '%s'" % (data, expected_op),
+ f"unexpected '{data}', expected '{expected_op}'",
lineno,
name,
filename,
)
+
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
+
lineno += data.count("\n")
line_starting = m.group()[-1:] == "\n"
-
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
@@ -809,27 +828,28 @@ class Lexer(object):
stack.pop()
# resolve the new state by group checking
elif new_state == "#bygroup":
- for key, value in iteritems(m.groupdict()):
+ for key, value in m.groupdict().items():
if value is not None:
stack.append(key)
break
else:
raise RuntimeError(
- "%r wanted to resolve the "
- "new state dynamically but"
- " no group matched" % regex
+ f"{regex!r} wanted to resolve the new state dynamically"
+ f" but no group matched"
)
# direct state name given
else:
stack.append(new_state)
+
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError(
- "%r yielded empty string without stack change" % regex
+ f"{regex!r} yielded empty string without stack change"
)
+
# publish new function and start again
pos = pos2
break
@@ -839,10 +859,8 @@ class Lexer(object):
# end of text
if pos >= source_length:
return
+
# something went wrong
raise TemplateSyntaxError(
- "unexpected char %r at %d" % (source[pos], pos),
- lineno,
- name,
- filename,
+ f"unexpected char {source[pos]!r} at {pos}", lineno, name, filename
)
diff --git a/deps/v8/third_party/jinja2/loaders.py b/deps/v8/third_party/jinja2/loaders.py
index 457c4b59a7..d2f98093cd 100644
--- a/deps/v8/third_party/jinja2/loaders.py
+++ b/deps/v8/third_party/jinja2/loaders.py
@@ -1,33 +1,37 @@
-# -*- coding: utf-8 -*-
"""API and implementations for loading templates from different data
sources.
"""
+import importlib.util
import os
+import posixpath
import sys
+import typing as t
import weakref
+import zipimport
+from collections import abc
from hashlib import sha1
-from os import path
+from importlib import import_module
from types import ModuleType
-from ._compat import abc
-from ._compat import fspath
-from ._compat import iteritems
-from ._compat import string_types
from .exceptions import TemplateNotFound
from .utils import internalcode
from .utils import open_if_exists
+if t.TYPE_CHECKING:
+ from .environment import Environment
+ from .environment import Template
-def split_template_path(template):
+
+def split_template_path(template: str) -> t.List[str]:
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split("/"):
if (
- path.sep in piece
- or (path.altsep and path.altsep in piece)
- or piece == path.pardir
+ os.path.sep in piece
+ or (os.path.altsep and os.path.altsep in piece)
+ or piece == os.path.pardir
):
raise TemplateNotFound(template)
elif piece and piece != ".":
@@ -35,7 +39,7 @@ def split_template_path(template):
return pieces
-class BaseLoader(object):
+class BaseLoader:
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
@@ -57,8 +61,8 @@ class BaseLoader(object):
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
- with file(path) as f:
- source = f.read().decode('utf-8')
+ with open(path) as f:
+ source = f.read()
return source, path, lambda: mtime == getmtime(path)
"""
@@ -68,16 +72,18 @@ class BaseLoader(object):
#: .. versionadded:: 2.4
has_source_access = True
- def get_source(self, environment, template):
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
- template as unicode string or a ASCII bytestring. The filename should
- be the name of the file on the filesystem if it was loaded from there,
- otherwise `None`. The filename is used by python for the tracebacks
+ template as a string. The filename should be the name of the
+ file on the filesystem if it was loaded from there, otherwise
+ ``None``. The filename is used by Python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
@@ -88,18 +94,23 @@ class BaseLoader(object):
"""
if not self.has_source_access:
raise RuntimeError(
- "%s cannot provide access to the source" % self.__class__.__name__
+ f"{type(self).__name__} cannot provide access to the source"
)
raise TemplateNotFound(template)
- def list_templates(self):
+ def list_templates(self) -> t.List[str]:
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError("this loader cannot iterate over all templates")
@internalcode
- def load(self, environment, name, globals=None):
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
@@ -139,44 +150,53 @@ class BaseLoader(object):
class FileSystemLoader(BaseLoader):
- """Loads templates from the file system. This loader can find templates
- in folders on the file system and is the preferred way to load them.
+ """Load templates from a directory in the file system.
+
+ The path can be relative or absolute. Relative paths are relative to
+ the current working directory.
+
+ .. code-block:: python
- The loader takes the path to the templates as string, or if multiple
- locations are wanted a list of them which is then looked up in the
- given order::
+ loader = FileSystemLoader("templates")
- >>> loader = FileSystemLoader('/path/to/templates')
- >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
+ A list of paths can be given. The directories will be searched in
+ order, stopping at the first matching template.
- Per default the template encoding is ``'utf-8'`` which can be changed
- by setting the `encoding` parameter to something else.
+ .. code-block:: python
- To follow symbolic links, set the *followlinks* parameter to ``True``::
+ loader = FileSystemLoader(["/override/templates", "/default/templates"])
- >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
+ :param searchpath: A path, or list of paths, to the directory that
+ contains the templates.
+ :param encoding: Use this encoding to read the text from template
+ files.
+ :param followlinks: Follow symbolic links in the path.
.. versionchanged:: 2.8
- The ``followlinks`` parameter was added.
+ Added the ``followlinks`` parameter.
"""
- def __init__(self, searchpath, encoding="utf-8", followlinks=False):
- if not isinstance(searchpath, abc.Iterable) or isinstance(
- searchpath, string_types
- ):
+ def __init__(
+ self,
+ searchpath: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]],
+ encoding: str = "utf-8",
+ followlinks: bool = False,
+ ) -> None:
+ if not isinstance(searchpath, abc.Iterable) or isinstance(searchpath, str):
searchpath = [searchpath]
- # In Python 3.5, os.path.join doesn't support Path. This can be
- # simplified to list(searchpath) when Python 3.5 is dropped.
- self.searchpath = [fspath(p) for p in searchpath]
-
+ self.searchpath = [os.fspath(p) for p in searchpath]
self.encoding = encoding
self.followlinks = followlinks
- def get_source(self, environment, template):
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, str, t.Callable[[], bool]]:
pieces = split_template_path(template)
for searchpath in self.searchpath:
- filename = path.join(searchpath, *pieces)
+ # Use posixpath even on Windows to avoid "drive:" or UNC
+ # segments breaking out of the search directory.
+ filename = posixpath.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
@@ -185,18 +205,19 @@ class FileSystemLoader(BaseLoader):
finally:
f.close()
- mtime = path.getmtime(filename)
+ mtime = os.path.getmtime(filename)
- def uptodate():
+ def uptodate() -> bool:
try:
- return path.getmtime(filename) == mtime
+ return os.path.getmtime(filename) == mtime
except OSError:
return False
- return contents, filename, uptodate
+ # Use normpath to convert Windows altsep to sep.
+ return contents, os.path.normpath(filename), uptodate
raise TemplateNotFound(template)
- def list_templates(self):
+ def list_templates(self) -> t.List[str]:
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
@@ -215,105 +236,199 @@ class FileSystemLoader(BaseLoader):
class PackageLoader(BaseLoader):
- """Load templates from python eggs or packages. It is constructed with
- the name of the python package and the path to the templates in that
- package::
+ """Load templates from a directory in a Python package.
- loader = PackageLoader('mypackage', 'views')
+ :param package_name: Import name of the package that contains the
+ template directory.
+ :param package_path: Directory within the imported package that
+ contains the templates.
+ :param encoding: Encoding of template files.
- If the package path is not given, ``'templates'`` is assumed.
+ The following example looks up templates in the ``pages`` directory
+ within the ``project.ui`` package.
- Per default the template encoding is ``'utf-8'`` which can be changed
- by setting the `encoding` parameter to something else. Due to the nature
- of eggs it's only possible to reload templates if the package was loaded
- from the file system and not a zip file.
- """
+ .. code-block:: python
- def __init__(self, package_name, package_path="templates", encoding="utf-8"):
- from pkg_resources import DefaultProvider
- from pkg_resources import get_provider
- from pkg_resources import ResourceManager
+ loader = PackageLoader("project.ui", "pages")
- provider = get_provider(package_name)
- self.encoding = encoding
- self.manager = ResourceManager()
- self.filesystem_bound = isinstance(provider, DefaultProvider)
- self.provider = provider
- self.package_path = package_path
+ Only packages installed as directories (standard pip behavior) or
+ zip/egg files (less common) are supported. The Python API for
+ introspecting data in packages is too limited to support other
+ installation methods the way this loader requires.
- def get_source(self, environment, template):
- pieces = split_template_path(template)
- p = "/".join((self.package_path,) + tuple(pieces))
+ There is limited support for :pep:`420` namespace packages. The
+ template directory is assumed to only be in one namespace
+ contributor. Zip files contributing to a namespace are not
+ supported.
- if not self.provider.has_resource(p):
- raise TemplateNotFound(template)
+ .. versionchanged:: 3.0
+ No longer uses ``setuptools`` as a dependency.
- filename = uptodate = None
+ .. versionchanged:: 3.0
+ Limited PEP 420 namespace package support.
+ """
- if self.filesystem_bound:
- filename = self.provider.get_resource_filename(self.manager, p)
- mtime = path.getmtime(filename)
+ def __init__(
+ self,
+ package_name: str,
+ package_path: "str" = "templates",
+ encoding: str = "utf-8",
+ ) -> None:
+ package_path = os.path.normpath(package_path).rstrip(os.path.sep)
- def uptodate():
- try:
- return path.getmtime(filename) == mtime
- except OSError:
- return False
+ # normpath preserves ".", which isn't valid in zip paths.
+ if package_path == os.path.curdir:
+ package_path = ""
+ elif package_path[:2] == os.path.curdir + os.path.sep:
+ package_path = package_path[2:]
+
+ self.package_path = package_path
+ self.package_name = package_name
+ self.encoding = encoding
- source = self.provider.get_resource_string(self.manager, p)
- return source.decode(self.encoding), filename, uptodate
+ # Make sure the package exists. This also makes namespace
+ # packages work, otherwise get_loader returns None.
+ import_module(package_name)
+ spec = importlib.util.find_spec(package_name)
+ assert spec is not None, "An import spec was not found for the package."
+ loader = spec.loader
+ assert loader is not None, "A loader was not found for the package."
+ self._loader = loader
+ self._archive = None
+ template_root = None
+
+ if isinstance(loader, zipimport.zipimporter):
+ self._archive = loader.archive
+ pkgdir = next(iter(spec.submodule_search_locations)) # type: ignore
+ template_root = os.path.join(pkgdir, package_path).rstrip(os.path.sep)
+ else:
+ roots: t.List[str] = []
+
+ # One element for regular packages, multiple for namespace
+ # packages, or None for single module file.
+ if spec.submodule_search_locations:
+ roots.extend(spec.submodule_search_locations)
+ # A single module file, use the parent directory instead.
+ elif spec.origin is not None:
+ roots.append(os.path.dirname(spec.origin))
+
+ for root in roots:
+ root = os.path.join(root, package_path)
+
+ if os.path.isdir(root):
+ template_root = root
+ break
+
+ if template_root is None:
+ raise ValueError(
+ f"The {package_name!r} package was not installed in a"
+ " way that PackageLoader understands."
+ )
- def list_templates(self):
- path = self.package_path
+ self._template_root = template_root
- if path[:2] == "./":
- path = path[2:]
- elif path == ".":
- path = ""
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, str, t.Optional[t.Callable[[], bool]]]:
+ # Use posixpath even on Windows to avoid "drive:" or UNC
+ # segments breaking out of the search directory. Use normpath to
+ # convert Windows altsep to sep.
+ p = os.path.normpath(
+ posixpath.join(self._template_root, *split_template_path(template))
+ )
+ up_to_date: t.Optional[t.Callable[[], bool]]
- offset = len(path)
- results = []
+ if self._archive is None:
+ # Package is a directory.
+ if not os.path.isfile(p):
+ raise TemplateNotFound(template)
- def _walk(path):
- for filename in self.provider.resource_listdir(path):
- fullname = path + "/" + filename
+ with open(p, "rb") as f:
+ source = f.read()
- if self.provider.resource_isdir(fullname):
- _walk(fullname)
- else:
- results.append(fullname[offset:].lstrip("/"))
+ mtime = os.path.getmtime(p)
+
+ def up_to_date() -> bool:
+ return os.path.isfile(p) and os.path.getmtime(p) == mtime
+
+ else:
+ # Package is a zip file.
+ try:
+ source = self._loader.get_data(p) # type: ignore
+ except OSError as e:
+ raise TemplateNotFound(template) from e
+
+ # Could use the zip's mtime for all template mtimes, but
+ # would need to safely reload the module if it's out of
+ # date, so just report it as always current.
+ up_to_date = None
+
+ return source.decode(self.encoding), p, up_to_date
+
+ def list_templates(self) -> t.List[str]:
+ results: t.List[str] = []
+
+ if self._archive is None:
+ # Package is a directory.
+ offset = len(self._template_root)
+
+ for dirpath, _, filenames in os.walk(self._template_root):
+ dirpath = dirpath[offset:].lstrip(os.path.sep)
+ results.extend(
+ os.path.join(dirpath, name).replace(os.path.sep, "/")
+ for name in filenames
+ )
+ else:
+ if not hasattr(self._loader, "_files"):
+ raise TypeError(
+ "This zip import does not have the required"
+ " metadata to list templates."
+ )
+
+ # Package is a zip file.
+ prefix = (
+ self._template_root[len(self._archive) :].lstrip(os.path.sep)
+ + os.path.sep
+ )
+ offset = len(prefix)
+
+ for name in self._loader._files.keys(): # type: ignore
+ # Find names under the templates directory that aren't directories.
+ if name.startswith(prefix) and name[-1] != os.path.sep:
+ results.append(name[offset:].replace(os.path.sep, "/"))
- _walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
- """Loads a template from a python dict. It's passed a dict of unicode
- strings bound to template names. This loader is useful for unittesting:
+ """Loads a template from a Python dict mapping template names to
+ template source. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
- def __init__(self, mapping):
+ def __init__(self, mapping: t.Mapping[str, str]) -> None:
self.mapping = mapping
- def get_source(self, environment, template):
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, None, t.Callable[[], bool]]:
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
- def list_templates(self):
+ def list_templates(self) -> t.List[str]:
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
- an unicode string with the template source, a tuple in the form ``(source,
+ a string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
@@ -328,15 +443,30 @@ class FunctionLoader(BaseLoader):
return value.
"""
- def __init__(self, load_func):
+ def __init__(
+ self,
+ load_func: t.Callable[
+ [str],
+ t.Optional[
+ t.Union[
+ str, t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]
+ ]
+ ],
+ ],
+ ) -> None:
self.load_func = load_func
- def get_source(self, environment, template):
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
rv = self.load_func(template)
+
if rv is None:
raise TemplateNotFound(template)
- elif isinstance(rv, string_types):
+
+ if isinstance(rv, str):
return rv, None, None
+
return rv
@@ -355,40 +485,49 @@ class PrefixLoader(BaseLoader):
by loading ``'app2/index.html'`` the file from the second.
"""
- def __init__(self, mapping, delimiter="/"):
+ def __init__(
+ self, mapping: t.Mapping[str, BaseLoader], delimiter: str = "/"
+ ) -> None:
self.mapping = mapping
self.delimiter = delimiter
- def get_loader(self, template):
+ def get_loader(self, template: str) -> t.Tuple[BaseLoader, str]:
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
- except (ValueError, KeyError):
- raise TemplateNotFound(template)
+ except (ValueError, KeyError) as e:
+ raise TemplateNotFound(template) from e
return loader, name
- def get_source(self, environment, template):
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
- except TemplateNotFound:
+ except TemplateNotFound as e:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
- raise TemplateNotFound(template)
+ raise TemplateNotFound(template) from e
@internalcode
- def load(self, environment, name, globals=None):
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
- except TemplateNotFound:
+ except TemplateNotFound as e:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
- raise TemplateNotFound(name)
+ raise TemplateNotFound(name) from e
- def list_templates(self):
+ def list_templates(self) -> t.List[str]:
result = []
- for prefix, loader in iteritems(self.mapping):
+ for prefix, loader in self.mapping.items():
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
@@ -408,10 +547,12 @@ class ChoiceLoader(BaseLoader):
from a different location.
"""
- def __init__(self, loaders):
+ def __init__(self, loaders: t.Sequence[BaseLoader]) -> None:
self.loaders = loaders
- def get_source(self, environment, template):
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
for loader in self.loaders:
try:
return loader.get_source(environment, template)
@@ -420,7 +561,12 @@ class ChoiceLoader(BaseLoader):
raise TemplateNotFound(template)
@internalcode
- def load(self, environment, name, globals=None):
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
@@ -428,7 +574,7 @@ class ChoiceLoader(BaseLoader):
pass
raise TemplateNotFound(name)
- def list_templates(self):
+ def list_templates(self) -> t.List[str]:
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
@@ -454,17 +600,19 @@ class ModuleLoader(BaseLoader):
has_source_access = False
- def __init__(self, path):
- package_name = "_jinja2_module_templates_%x" % id(self)
+ def __init__(
+ self, path: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]]
+ ) -> None:
+ package_name = f"_jinja2_module_templates_{id(self):x}"
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
- if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
+ if not isinstance(path, abc.Iterable) or isinstance(path, str):
path = [path]
- mod.__path__ = [fspath(p) for p in path]
+ mod.__path__ = [os.fspath(p) for p in path]
sys.modules[package_name] = weakref.proxy(
mod, lambda x: sys.modules.pop(package_name, None)
@@ -477,28 +625,37 @@ class ModuleLoader(BaseLoader):
self.package_name = package_name
@staticmethod
- def get_template_key(name):
+ def get_template_key(name: str) -> str:
return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
@staticmethod
- def get_module_filename(name):
+ def get_module_filename(name: str) -> str:
return ModuleLoader.get_template_key(name) + ".py"
@internalcode
- def load(self, environment, name, globals=None):
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
key = self.get_template_key(name)
- module = "%s.%s" % (self.package_name, key)
+ module = f"{self.package_name}.{key}"
mod = getattr(self.module, module, None)
+
if mod is None:
try:
mod = __import__(module, None, None, ["root"])
- except ImportError:
- raise TemplateNotFound(name)
+ except ImportError as e:
+ raise TemplateNotFound(name) from e
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
+ if globals is None:
+ globals = {}
+
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals
)
diff --git a/deps/v8/third_party/jinja2/meta.py b/deps/v8/third_party/jinja2/meta.py
index 3795aace59..0057d6eaba 100644
--- a/deps/v8/third_party/jinja2/meta.py
+++ b/deps/v8/third_party/jinja2/meta.py
@@ -1,32 +1,36 @@
-# -*- coding: utf-8 -*-
"""Functions that expose information about templates that might be
interesting for introspection.
"""
+import typing as t
+
from . import nodes
-from ._compat import iteritems
-from ._compat import string_types
from .compiler import CodeGenerator
+from .compiler import Frame
+
+if t.TYPE_CHECKING:
+ from .environment import Environment
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
- def __init__(self, environment):
- CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
- self.undeclared_identifiers = set()
+ def __init__(self, environment: "Environment") -> None:
+ super().__init__(environment, "<introspection>", "<introspection>")
+ self.undeclared_identifiers: t.Set[str] = set()
- def write(self, x):
+ def write(self, x: str) -> None:
"""Don't write."""
- def enter_frame(self, frame):
+ def enter_frame(self, frame: Frame) -> None:
"""Remember all undeclared identifiers."""
- CodeGenerator.enter_frame(self, frame)
- for _, (action, param) in iteritems(frame.symbols.loads):
+ super().enter_frame(frame)
+
+ for _, (action, param) in frame.symbols.loads.items():
if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
-def find_undeclared_variables(ast):
+def find_undeclared_variables(ast: nodes.Template) -> t.Set[str]:
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
@@ -35,7 +39,7 @@ def find_undeclared_variables(ast):
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
- >>> meta.find_undeclared_variables(ast) == set(['bar'])
+ >>> meta.find_undeclared_variables(ast) == {'bar'}
True
.. admonition:: Implementation
@@ -45,12 +49,16 @@ def find_undeclared_variables(ast):
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
- codegen = TrackingCodeGenerator(ast.environment)
+ codegen = TrackingCodeGenerator(ast.environment) # type: ignore
codegen.visit(ast)
return codegen.undeclared_identifiers
-def find_referenced_templates(ast):
+_ref_types = (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
+_RefType = t.Union[nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include]
+
+
+def find_referenced_templates(ast: nodes.Template) -> t.Iterator[t.Optional[str]]:
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
@@ -65,17 +73,19 @@ def find_referenced_templates(ast):
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
- for node in ast.find_all(
- (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
- ):
- if not isinstance(node.template, nodes.Const):
+ template_name: t.Any
+
+ for node in ast.find_all(_ref_types):
+ template: nodes.Expr = node.template # type: ignore
+
+ if not isinstance(template, nodes.Const):
# a tuple with some non consts in there
- if isinstance(node.template, (nodes.Tuple, nodes.List)):
- for template_name in node.template.items:
+ if isinstance(template, (nodes.Tuple, nodes.List)):
+ for template_name in template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
- if isinstance(template_name.value, string_types):
+ if isinstance(template_name.value, str):
yield template_name.value
# something dynamic in there
else:
@@ -85,16 +95,16 @@ def find_referenced_templates(ast):
yield None
continue
# constant is a basestring, direct template name
- if isinstance(node.template.value, string_types):
- yield node.template.value
+ if isinstance(template.value, str):
+ yield template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and isinstance(
- node.template.value, (tuple, list)
+ template.value, (tuple, list)
):
- for template_name in node.template.value:
- if isinstance(template_name, string_types):
+ for template_name in template.value:
+ if isinstance(template_name, str):
yield template_name
# something else we don't care about, we could warn here
else:
diff --git a/deps/v8/third_party/jinja2/nativetypes.py b/deps/v8/third_party/jinja2/nativetypes.py
index a9ead4e2bb..ac08610348 100644
--- a/deps/v8/third_party/jinja2/nativetypes.py
+++ b/deps/v8/third_party/jinja2/nativetypes.py
@@ -1,53 +1,67 @@
+import typing as t
from ast import literal_eval
+from ast import parse
from itertools import chain
from itertools import islice
+from types import GeneratorType
from . import nodes
-from ._compat import text_type
from .compiler import CodeGenerator
+from .compiler import Frame
from .compiler import has_safe_repr
from .environment import Environment
from .environment import Template
-def native_concat(nodes):
+def native_concat(values: t.Iterable[t.Any]) -> t.Optional[t.Any]:
"""Return a native Python type from the list of compiled nodes. If
the result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
the string is returned.
- :param nodes: Iterable of nodes to concatenate.
+ :param values: Iterable of outputs to concatenate.
"""
- head = list(islice(nodes, 2))
+ head = list(islice(values, 2))
if not head:
return None
if len(head) == 1:
raw = head[0]
+ if not isinstance(raw, str):
+ return raw
else:
- raw = u"".join([text_type(v) for v in chain(head, nodes)])
+ if isinstance(values, GeneratorType):
+ values = chain(head, values)
+ raw = "".join([str(v) for v in values])
try:
- return literal_eval(raw)
+ return literal_eval(
+ # In Python 3.10+ ast.literal_eval removes leading spaces/tabs
+ # from the given string. For backwards compatibility we need to
+ # parse the string ourselves without removing leading spaces/tabs.
+ parse(raw, mode="eval")
+ )
except (ValueError, SyntaxError, MemoryError):
return raw
class NativeCodeGenerator(CodeGenerator):
"""A code generator which renders Python types by not adding
- ``to_string()`` around output nodes.
+ ``str()`` around output nodes.
"""
@staticmethod
- def _default_finalize(value):
+ def _default_finalize(value: t.Any) -> t.Any:
return value
- def _output_const_repr(self, group):
- return repr(u"".join([text_type(v) for v in group]))
+ def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
+ return repr("".join([str(v) for v in group]))
- def _output_child_to_const(self, node, frame, finalize):
+ def _output_child_to_const(
+ self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
+ ) -> t.Any:
const = node.as_const(frame.eval_ctx)
if not has_safe_repr(const):
@@ -56,13 +70,17 @@ class NativeCodeGenerator(CodeGenerator):
if isinstance(node, nodes.TemplateData):
return const
- return finalize.const(const)
+ return finalize.const(const) # type: ignore
- def _output_child_pre(self, node, frame, finalize):
+ def _output_child_pre(
+ self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
+ ) -> None:
if finalize.src is not None:
self.write(finalize.src)
- def _output_child_post(self, node, frame, finalize):
+ def _output_child_post(
+ self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
+ ) -> None:
if finalize.src is not None:
self.write(")")
@@ -71,22 +89,40 @@ class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
+ concat = staticmethod(native_concat) # type: ignore
class NativeTemplate(Template):
environment_class = NativeEnvironment
- def render(self, *args, **kwargs):
+ def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Render the template to produce a native Python type. If the
result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed
with :func:`ast.literal_eval`, the parsed value is returned.
Otherwise, the string is returned.
"""
- vars = dict(*args, **kwargs)
+ ctx = self.new_context(dict(*args, **kwargs))
try:
- return native_concat(self.root_render_func(self.new_context(vars)))
+ return self.environment_class.concat( # type: ignore
+ self.root_render_func(ctx) # type: ignore
+ )
+ except Exception:
+ return self.environment.handle_exception()
+
+ async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ if not self.environment.is_async:
+ raise RuntimeError(
+ "The environment was not created with async mode enabled."
+ )
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ return self.environment_class.concat( # type: ignore
+ [n async for n in self.root_render_func(ctx)] # type: ignore
+ )
except Exception:
return self.environment.handle_exception()
diff --git a/deps/v8/third_party/jinja2/nodes.py b/deps/v8/third_party/jinja2/nodes.py
index 95bd614a14..b2f88d9d9c 100644
--- a/deps/v8/third_party/jinja2/nodes.py
+++ b/deps/v8/third_party/jinja2/nodes.py
@@ -1,19 +1,23 @@
-# -*- coding: utf-8 -*-
"""AST nodes generated by the parser for the compiler. Also provides
some node tree helper functions used by the parser and compiler in order
to normalize nodes.
"""
+import inspect
import operator
+import typing as t
from collections import deque
from markupsafe import Markup
-from ._compat import izip
-from ._compat import PY2
-from ._compat import text_type
-from ._compat import with_metaclass
+from .utils import _PassArg
-_binop_to_func = {
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+_NodeBound = t.TypeVar("_NodeBound", bound="Node")
+
+_binop_to_func: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
@@ -23,9 +27,13 @@ _binop_to_func = {
"-": operator.sub,
}
-_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
+_uaop_to_func: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
+ "not": operator.not_,
+ "+": operator.pos,
+ "-": operator.neg,
+}
-_cmpop_to_func = {
+_cmpop_to_func: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"eq": operator.eq,
"ne": operator.ne,
"gt": operator.gt,
@@ -46,24 +54,26 @@ class NodeType(type):
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
- def __new__(mcs, name, bases, d):
+ def __new__(mcs, name, bases, d): # type: ignore
for attr in "fields", "attributes":
storage = []
- storage.extend(getattr(bases[0], attr, ()))
+ storage.extend(getattr(bases[0] if bases else object, attr, ()))
storage.extend(d.get(attr, ()))
- assert len(bases) == 1, "multiple inheritance not allowed"
+ assert len(bases) <= 1, "multiple inheritance not allowed"
assert len(storage) == len(set(storage)), "layout conflict"
d[attr] = tuple(storage)
d.setdefault("abstract", False)
return type.__new__(mcs, name, bases, d)
-class EvalContext(object):
+class EvalContext:
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
- def __init__(self, environment, template_name=None):
+ def __init__(
+ self, environment: "Environment", template_name: t.Optional[str] = None
+ ) -> None:
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
@@ -71,27 +81,26 @@ class EvalContext(object):
self.autoescape = environment.autoescape
self.volatile = False
- def save(self):
+ def save(self) -> t.Mapping[str, t.Any]:
return self.__dict__.copy()
- def revert(self, old):
+ def revert(self, old: t.Mapping[str, t.Any]) -> None:
self.__dict__.clear()
self.__dict__.update(old)
-def get_eval_context(node, ctx):
+def get_eval_context(node: "Node", ctx: t.Optional[EvalContext]) -> EvalContext:
if ctx is None:
if node.environment is None:
raise RuntimeError(
- "if no eval context is passed, the "
- "node must have an attached "
- "environment."
+ "if no eval context is passed, the node must have an"
+ " attached environment."
)
return EvalContext(node.environment)
return ctx
-class Node(with_metaclass(NodeType, object)):
+class Node(metaclass=NodeType):
"""Baseclass for all Jinja nodes. There are a number of nodes available
of different types. There are four major types:
@@ -108,33 +117,36 @@ class Node(with_metaclass(NodeType, object)):
all nodes automatically.
"""
- fields = ()
- attributes = ("lineno", "environment")
+ fields: t.Tuple[str, ...] = ()
+ attributes: t.Tuple[str, ...] = ("lineno", "environment")
abstract = True
- def __init__(self, *fields, **attributes):
+ lineno: int
+ environment: t.Optional["Environment"]
+
+ def __init__(self, *fields: t.Any, **attributes: t.Any) -> None:
if self.abstract:
raise TypeError("abstract nodes are not instantiable")
if fields:
if len(fields) != len(self.fields):
if not self.fields:
- raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
+ raise TypeError(f"{type(self).__name__!r} takes 0 arguments")
raise TypeError(
- "%r takes 0 or %d argument%s"
- % (
- self.__class__.__name__,
- len(self.fields),
- len(self.fields) != 1 and "s" or "",
- )
+ f"{type(self).__name__!r} takes 0 or {len(self.fields)}"
+ f" argument{'s' if len(self.fields) != 1 else ''}"
)
- for name, arg in izip(self.fields, fields):
+ for name, arg in zip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
- raise TypeError("unknown attribute %r" % next(iter(attributes)))
+ raise TypeError(f"unknown attribute {next(iter(attributes))!r}")
- def iter_fields(self, exclude=None, only=None):
+ def iter_fields(
+ self,
+ exclude: t.Optional[t.Container[str]] = None,
+ only: t.Optional[t.Container[str]] = None,
+ ) -> t.Iterator[t.Tuple[str, t.Any]]:
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
@@ -143,7 +155,7 @@ class Node(with_metaclass(NodeType, object)):
"""
for name in self.fields:
if (
- (exclude is only is None)
+ (exclude is None and only is None)
or (exclude is not None and name not in exclude)
or (only is not None and name in only)
):
@@ -152,7 +164,11 @@ class Node(with_metaclass(NodeType, object)):
except AttributeError:
pass
- def iter_child_nodes(self, exclude=None, only=None):
+ def iter_child_nodes(
+ self,
+ exclude: t.Optional[t.Container[str]] = None,
+ only: t.Optional[t.Container[str]] = None,
+ ) -> t.Iterator["Node"]:
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
@@ -165,24 +181,27 @@ class Node(with_metaclass(NodeType, object)):
elif isinstance(item, Node):
yield item
- def find(self, node_type):
+ def find(self, node_type: t.Type[_NodeBound]) -> t.Optional[_NodeBound]:
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
- def find_all(self, node_type):
+ return None
+
+ def find_all(
+ self, node_type: t.Union[t.Type[_NodeBound], t.Tuple[t.Type[_NodeBound], ...]]
+ ) -> t.Iterator[_NodeBound]:
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
- yield child
- for result in child.find_all(node_type):
- yield result
+ yield child # type: ignore
+ yield from child.find_all(node_type)
- def set_ctx(self, ctx):
+ def set_ctx(self, ctx: str) -> "Node":
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
@@ -192,11 +211,11 @@ class Node(with_metaclass(NodeType, object)):
while todo:
node = todo.popleft()
if "ctx" in node.fields:
- node.ctx = ctx
+ node.ctx = ctx # type: ignore
todo.extend(node.iter_child_nodes())
return self
- def set_lineno(self, lineno, override=False):
+ def set_lineno(self, lineno: int, override: bool = False) -> "Node":
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
@@ -207,7 +226,7 @@ class Node(with_metaclass(NodeType, object)):
todo.extend(node.iter_child_nodes())
return self
- def set_environment(self, environment):
+ def set_environment(self, environment: "Environment") -> "Node":
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
@@ -216,30 +235,25 @@ class Node(with_metaclass(NodeType, object)):
todo.extend(node.iter_child_nodes())
return self
- def __eq__(self, other):
- return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
- other.iter_fields()
- )
+ def __eq__(self, other: t.Any) -> bool:
+ if type(self) is not type(other):
+ return NotImplemented
- def __ne__(self, other):
- return not self.__eq__(other)
+ return tuple(self.iter_fields()) == tuple(other.iter_fields())
- # Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
- def __repr__(self):
- return "%s(%s)" % (
- self.__class__.__name__,
- ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
- )
+ def __repr__(self) -> str:
+ args_str = ", ".join(f"{a}={getattr(self, a, None)!r}" for a in self.fields)
+ return f"{type(self).__name__}({args_str})"
- def dump(self):
- def _dump(node):
+ def dump(self) -> str:
+ def _dump(node: t.Union[Node, t.Any]) -> None:
if not isinstance(node, Node):
buf.append(repr(node))
return
- buf.append("nodes.%s(" % node.__class__.__name__)
+ buf.append(f"nodes.{type(node).__name__}(")
if not node.fields:
buf.append(")")
return
@@ -258,7 +272,7 @@ class Node(with_metaclass(NodeType, object)):
_dump(value)
buf.append(")")
- buf = []
+ buf: t.List[str] = []
_dump(self)
return "".join(buf)
@@ -281,6 +295,7 @@ class Template(Node):
"""
fields = ("body",)
+ body: t.List[Node]
class Output(Stmt):
@@ -289,12 +304,14 @@ class Output(Stmt):
"""
fields = ("nodes",)
+ nodes: t.List["Expr"]
class Extends(Stmt):
"""Represents an extends statement."""
fields = ("template",)
+ template: "Expr"
class For(Stmt):
@@ -307,12 +324,22 @@ class For(Stmt):
"""
fields = ("target", "iter", "body", "else_", "test", "recursive")
+ target: Node
+ iter: Node
+ body: t.List[Node]
+ else_: t.List[Node]
+ test: t.Optional[Node]
+ recursive: bool
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ("test", "body", "elif_", "else_")
+ test: Node
+ body: t.List[Node]
+ elif_: t.List["If"]
+ else_: t.List[Node]
class Macro(Stmt):
@@ -322,6 +349,10 @@ class Macro(Stmt):
"""
fields = ("name", "args", "defaults", "body")
+ name: str
+ args: t.List["Name"]
+ defaults: t.List["Expr"]
+ body: t.List[Node]
class CallBlock(Stmt):
@@ -330,12 +361,18 @@ class CallBlock(Stmt):
"""
fields = ("call", "args", "defaults", "body")
+ call: "Call"
+ args: t.List["Name"]
+ defaults: t.List["Expr"]
+ body: t.List[Node]
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ("body", "filter")
+ body: t.List[Node]
+ filter: "Filter"
class With(Stmt):
@@ -346,24 +383,41 @@ class With(Stmt):
"""
fields = ("targets", "values", "body")
+ targets: t.List["Expr"]
+ values: t.List["Expr"]
+ body: t.List[Node]
class Block(Stmt):
- """A node that represents a block."""
+ """A node that represents a block.
- fields = ("name", "body", "scoped")
+ .. versionchanged:: 3.0.0
+ the `required` field was added.
+ """
+
+ fields = ("name", "body", "scoped", "required")
+ name: str
+ body: t.List[Node]
+ scoped: bool
+ required: bool
class Include(Stmt):
"""A node that represents the include tag."""
fields = ("template", "with_context", "ignore_missing")
+ template: "Expr"
+ with_context: bool
+ ignore_missing: bool
class Import(Stmt):
"""A node that represents the import tag."""
fields = ("template", "target", "with_context")
+ template: "Expr"
+ target: str
+ with_context: bool
class FromImport(Stmt):
@@ -379,24 +433,33 @@ class FromImport(Stmt):
"""
fields = ("template", "names", "with_context")
+ template: "Expr"
+ names: t.List[t.Union[str, t.Tuple[str, str]]]
+ with_context: bool
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ("node",)
+ node: Node
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ("target", "node")
+ target: "Expr"
+ node: Node
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ("target", "filter", "body")
+ target: "Expr"
+ filter: t.Optional["Filter"]
+ body: t.List[Node]
class Expr(Node):
@@ -404,7 +467,7 @@ class Expr(Node):
abstract = True
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
@@ -417,7 +480,7 @@ class Expr(Node):
"""
raise Impossible()
- def can_assign(self):
+ def can_assign(self) -> bool:
"""Check if it's possible to assign something to this node."""
return False
@@ -426,44 +489,49 @@ class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ("left", "right")
- operator = None
+ left: Expr
+ right: Expr
+ operator: str
abstract = True
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
+
# intercepted operators cannot be folded at compile time
if (
- self.environment.sandboxed
- and self.operator in self.environment.intercepted_binops
+ eval_ctx.environment.sandboxed
+ and self.operator in eval_ctx.environment.intercepted_binops # type: ignore
):
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
- except Exception:
- raise Impossible()
+ except Exception as e:
+ raise Impossible() from e
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ("node",)
- operator = None
+ node: Expr
+ operator: str
abstract = True
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
+
# intercepted operators cannot be folded at compile time
if (
- self.environment.sandboxed
- and self.operator in self.environment.intercepted_unops
+ eval_ctx.environment.sandboxed
+ and self.operator in eval_ctx.environment.intercepted_unops # type: ignore
):
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
- except Exception:
- raise Impossible()
+ except Exception as e:
+ raise Impossible() from e
class Name(Expr):
@@ -476,17 +544,21 @@ class Name(Expr):
"""
fields = ("name", "ctx")
+ name: str
+ ctx: str
- def can_assign(self):
- return self.name not in ("true", "false", "none", "True", "False", "None")
+ def can_assign(self) -> bool:
+ return self.name not in {"true", "false", "none", "True", "False", "None"}
class NSRef(Expr):
"""Reference to a namespace value assignment"""
fields = ("name", "attr")
+ name: str
+ attr: str
- def can_assign(self):
+ def can_assign(self) -> bool:
# We don't need any special checks here; NSRef assignments have a
# runtime check to ensure the target is a namespace object which will
# have been checked already as it is created using a normal assignment
@@ -508,22 +580,18 @@ class Const(Literal):
"""
fields = ("value",)
+ value: t.Any
- def as_const(self, eval_ctx=None):
- rv = self.value
- if (
- PY2
- and type(rv) is text_type
- and self.environment.policies["compiler.ascii_str"]
- ):
- try:
- rv = rv.encode("ascii")
- except UnicodeError:
- pass
- return rv
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ return self.value
@classmethod
- def from_untrusted(cls, value, lineno=None, environment=None):
+ def from_untrusted(
+ cls,
+ value: t.Any,
+ lineno: t.Optional[int] = None,
+ environment: "t.Optional[Environment]" = None,
+ ) -> "Const":
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
@@ -539,8 +607,9 @@ class TemplateData(Literal):
"""A constant template string."""
fields = ("data",)
+ data: str
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
@@ -556,12 +625,14 @@ class Tuple(Literal):
"""
fields = ("items", "ctx")
+ items: t.List[Expr]
+ ctx: str
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Tuple[t.Any, ...]:
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
- def can_assign(self):
+ def can_assign(self) -> bool:
for item in self.items:
if not item.can_assign():
return False
@@ -572,8 +643,9 @@ class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ("items",)
+ items: t.List[Expr]
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.List[t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
@@ -584,8 +656,11 @@ class Dict(Literal):
"""
fields = ("items",)
+ items: t.List["Pair"]
- def as_const(self, eval_ctx=None):
+ def as_const(
+ self, eval_ctx: t.Optional[EvalContext] = None
+ ) -> t.Dict[t.Any, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
@@ -594,8 +669,12 @@ class Pair(Helper):
"""A key, value pair for dicts."""
fields = ("key", "value")
+ key: Expr
+ value: Expr
- def as_const(self, eval_ctx=None):
+ def as_const(
+ self, eval_ctx: t.Optional[EvalContext] = None
+ ) -> t.Tuple[t.Any, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
@@ -604,8 +683,10 @@ class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ("key", "value")
+ key: str
+ value: Expr
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Tuple[str, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
@@ -616,8 +697,11 @@ class CondExpr(Expr):
"""
fields = ("test", "expr1", "expr2")
+ test: Expr
+ expr1: Expr
+ expr2: t.Optional[Expr]
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
@@ -629,93 +713,103 @@ class CondExpr(Expr):
return self.expr2.as_const(eval_ctx)
-def args_as_const(node, eval_ctx):
+def args_as_const(
+ node: t.Union["_FilterTestCommon", "Call"], eval_ctx: t.Optional[EvalContext]
+) -> t.Tuple[t.List[t.Any], t.Dict[t.Any, t.Any]]:
args = [x.as_const(eval_ctx) for x in node.args]
kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
if node.dyn_args is not None:
try:
args.extend(node.dyn_args.as_const(eval_ctx))
- except Exception:
- raise Impossible()
+ except Exception as e:
+ raise Impossible() from e
if node.dyn_kwargs is not None:
try:
kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
- except Exception:
- raise Impossible()
+ except Exception as e:
+ raise Impossible() from e
return args, kwargs
-class Filter(Expr):
- """This node applies a filter on an expression. `name` is the name of
- the filter, the rest of the fields are the same as for :class:`Call`.
-
- If the `node` of a filter is `None` the contents of the last buffer are
- filtered. Buffers are created by macros and filter blocks.
- """
-
+class _FilterTestCommon(Expr):
fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ node: Expr
+ name: str
+ args: t.List[Expr]
+ kwargs: t.List[Pair]
+ dyn_args: t.Optional[Expr]
+ dyn_kwargs: t.Optional[Expr]
+ abstract = True
+ _is_filter = True
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
- if eval_ctx.volatile or self.node is None:
+ if eval_ctx.volatile:
raise Impossible()
- # we have to be careful here because we call filter_ below.
- # if this variable would be called filter, 2to3 would wrap the
- # call in a list because it is assuming we are talking about the
- # builtin filter function here which no longer returns a list in
- # python 3. because of that, do not rename filter_ to filter!
- filter_ = self.environment.filters.get(self.name)
+ if self._is_filter:
+ env_map = eval_ctx.environment.filters
+ else:
+ env_map = eval_ctx.environment.tests
+
+ func = env_map.get(self.name)
+ pass_arg = _PassArg.from_obj(func) # type: ignore
- if filter_ is None or getattr(filter_, "contextfilter", False) is True:
+ if func is None or pass_arg is _PassArg.context:
raise Impossible()
- # We cannot constant handle async filters, so we need to make sure
- # to not go down this path.
- if eval_ctx.environment.is_async and getattr(
- filter_, "asyncfiltervariant", False
+ if eval_ctx.environment.is_async and (
+ getattr(func, "jinja_async_variant", False) is True
+ or inspect.iscoroutinefunction(func)
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
- if getattr(filter_, "evalcontextfilter", False) is True:
+ if pass_arg is _PassArg.eval_context:
args.insert(0, eval_ctx)
- elif getattr(filter_, "environmentfilter", False) is True:
- args.insert(0, self.environment)
+ elif pass_arg is _PassArg.environment:
+ args.insert(0, eval_ctx.environment)
try:
- return filter_(*args, **kwargs)
- except Exception:
- raise Impossible()
+ return func(*args, **kwargs)
+ except Exception as e:
+ raise Impossible() from e
-class Test(Expr):
- """Applies a test on an expression. `name` is the name of the test, the
- rest of the fields are the same as for :class:`Call`.
- """
+class Filter(_FilterTestCommon):
+ """Apply a filter to an expression. ``name`` is the name of the
+ filter, the other fields are the same as :class:`Call`.
- fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ If ``node`` is ``None``, the filter is being used in a filter block
+ and is applied to the content of the block.
+ """
- def as_const(self, eval_ctx=None):
- test = self.environment.tests.get(self.name)
+ node: t.Optional[Expr] # type: ignore
- if test is None:
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ if self.node is None:
raise Impossible()
- eval_ctx = get_eval_context(self, eval_ctx)
- args, kwargs = args_as_const(self, eval_ctx)
- args.insert(0, self.node.as_const(eval_ctx))
+ return super().as_const(eval_ctx=eval_ctx)
- try:
- return test(*args, **kwargs)
- except Exception:
- raise Impossible()
+
+class Test(_FilterTestCommon):
+ """Apply a test to an expression. ``name`` is the name of the test,
+ the other field are the same as :class:`Call`.
+
+ .. versionchanged:: 3.0
+ ``as_const`` shares the same logic for filters and tests. Tests
+ check for volatile, async, and ``@pass_context`` etc.
+ decorators.
+ """
+
+ _is_filter = False
class Call(Expr):
@@ -727,26 +821,33 @@ class Call(Expr):
"""
fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ node: Expr
+ args: t.List[Expr]
+ kwargs: t.List[Keyword]
+ dyn_args: t.Optional[Expr]
+ dyn_kwargs: t.Optional[Expr]
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ("node", "arg", "ctx")
+ node: Expr
+ arg: Expr
+ ctx: str
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
if self.ctx != "load":
raise Impossible()
+
+ eval_ctx = get_eval_context(self, eval_ctx)
+
try:
- return self.environment.getitem(
+ return eval_ctx.environment.getitem(
self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
)
- except Exception:
- raise Impossible()
-
- def can_assign(self):
- return False
+ except Exception as e:
+ raise Impossible() from e
class Getattr(Expr):
@@ -755,18 +856,20 @@ class Getattr(Expr):
"""
fields = ("node", "attr", "ctx")
+ node: Expr
+ attr: str
+ ctx: str
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
if self.ctx != "load":
raise Impossible()
- try:
- eval_ctx = get_eval_context(self, eval_ctx)
- return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
- except Exception:
- raise Impossible()
- def can_assign(self):
- return False
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ try:
+ return eval_ctx.environment.getattr(self.node.as_const(eval_ctx), self.attr)
+ except Exception as e:
+ raise Impossible() from e
class Slice(Expr):
@@ -775,11 +878,14 @@ class Slice(Expr):
"""
fields = ("start", "stop", "step")
+ start: t.Optional[Expr]
+ stop: t.Optional[Expr]
+ step: t.Optional[Expr]
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> slice:
eval_ctx = get_eval_context(self, eval_ctx)
- def const(obj):
+ def const(obj: t.Optional[Expr]) -> t.Optional[t.Any]:
if obj is None:
return None
return obj.as_const(eval_ctx)
@@ -788,15 +894,16 @@ class Slice(Expr):
class Concat(Expr):
- """Concatenates the list of expressions provided after converting them to
- unicode.
+ """Concatenates the list of expressions provided after converting
+ them to strings.
"""
fields = ("nodes",)
+ nodes: t.List[Expr]
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
eval_ctx = get_eval_context(self, eval_ctx)
- return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+ return "".join(str(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
@@ -805,8 +912,10 @@ class Compare(Expr):
"""
fields = ("expr", "ops")
+ expr: Expr
+ ops: t.List["Operand"]
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
@@ -819,8 +928,8 @@ class Compare(Expr):
return False
value = new_value
- except Exception:
- raise Impossible()
+ except Exception as e:
+ raise Impossible() from e
return result
@@ -829,15 +938,8 @@ class Operand(Helper):
"""Holds an operator and an expression."""
fields = ("op", "expr")
-
-
-if __debug__:
- Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
- sorted(
- "``%s``" % x
- for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
- )
- )
+ op: str
+ expr: Expr
class Mul(BinExpr):
@@ -853,7 +955,7 @@ class Div(BinExpr):
class FloorDiv(BinExpr):
- """Divides the left by the right node and truncates conver the
+ """Divides the left by the right node and converts the
result into an integer by truncating.
"""
@@ -889,7 +991,7 @@ class And(BinExpr):
operator = "and"
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
@@ -899,7 +1001,7 @@ class Or(BinExpr):
operator = "or"
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
@@ -931,6 +1033,7 @@ class EnvironmentAttribute(Expr):
"""
fields = ("name",)
+ name: str
class ExtensionAttribute(Expr):
@@ -942,6 +1045,8 @@ class ExtensionAttribute(Expr):
"""
fields = ("identifier", "name")
+ identifier: str
+ name: str
class ImportedName(Expr):
@@ -952,6 +1057,7 @@ class ImportedName(Expr):
"""
fields = ("importname",)
+ importname: str
class InternalName(Expr):
@@ -959,12 +1065,13 @@ class InternalName(Expr):
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
- template and is not threated specially by the compiler.
+ template and is not treated specially by the compiler.
"""
fields = ("name",)
+ name: str
- def __init__(self):
+ def __init__(self) -> None:
raise TypeError(
"Can't create internal names. Use the "
"`free_identifier` method on a parser."
@@ -975,8 +1082,9 @@ class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ("expr",)
+ expr: Expr
- def as_const(self, eval_ctx=None):
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> Markup:
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
@@ -989,8 +1097,11 @@ class MarkSafeIfAutoescape(Expr):
"""
fields = ("expr",)
+ expr: Expr
- def as_const(self, eval_ctx=None):
+ def as_const(
+ self, eval_ctx: t.Optional[EvalContext] = None
+ ) -> t.Union[Markup, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
@@ -1012,9 +1123,9 @@ class ContextReference(Expr):
Getattr(ContextReference(), 'name'))
This is basically equivalent to using the
- :func:`~jinja2.contextfunction` decorator when using the
- high-level API, which causes a reference to the context to be passed
- as the first argument to a function.
+ :func:`~jinja2.pass_context` decorator when using the high-level
+ API, which causes a reference to the context to be passed as the
+ first argument to a function.
"""
@@ -1039,6 +1150,7 @@ class Scope(Stmt):
"""An artificial scope."""
fields = ("body",)
+ body: t.List[Node]
class OverlayScope(Stmt):
@@ -1056,6 +1168,8 @@ class OverlayScope(Stmt):
"""
fields = ("context", "body")
+ context: Expr
+ body: t.List[Node]
class EvalContextModifier(Stmt):
@@ -1068,6 +1182,7 @@ class EvalContextModifier(Stmt):
"""
fields = ("options",)
+ options: t.List[Keyword]
class ScopedEvalContextModifier(EvalContextModifier):
@@ -1077,12 +1192,13 @@ class ScopedEvalContextModifier(EvalContextModifier):
"""
fields = ("body",)
+ body: t.List[Node]
# make sure nobody creates custom nodes
-def _failing_new(*args, **kwargs):
+def _failing_new(*args: t.Any, **kwargs: t.Any) -> "te.NoReturn":
raise TypeError("can't create custom node types")
-NodeType.__new__ = staticmethod(_failing_new)
+NodeType.__new__ = staticmethod(_failing_new) # type: ignore
del _failing_new
diff --git a/deps/v8/third_party/jinja2/optimizer.py b/deps/v8/third_party/jinja2/optimizer.py
index 7bc78c4524..fe1010705e 100644
--- a/deps/v8/third_party/jinja2/optimizer.py
+++ b/deps/v8/third_party/jinja2/optimizer.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""The optimizer tries to constant fold expressions and modify the AST
in place so that it should be faster to evaluate.
@@ -8,23 +7,30 @@ want. For example, loop unrolling doesn't work because unrolled loops
would have a different scope. The solution would be a second syntax tree
that stored the scoping rules.
"""
+import typing as t
+
from . import nodes
from .visitor import NodeTransformer
+if t.TYPE_CHECKING:
+ from .environment import Environment
+
-def optimize(node, environment):
+def optimize(node: nodes.Node, environment: "Environment") -> nodes.Node:
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
- return optimizer.visit(node)
+ return t.cast(nodes.Node, optimizer.visit(node))
class Optimizer(NodeTransformer):
- def __init__(self, environment):
+ def __init__(self, environment: "t.Optional[Environment]") -> None:
self.environment = environment
- def generic_visit(self, node, *args, **kwargs):
- node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
+ def generic_visit(
+ self, node: nodes.Node, *args: t.Any, **kwargs: t.Any
+ ) -> nodes.Node:
+ node = super().generic_visit(node, *args, **kwargs)
# Do constant folding. Some other nodes besides Expr have
# as_const, but folding them causes errors later on.
diff --git a/deps/v8/third_party/jinja2/parser.py b/deps/v8/third_party/jinja2/parser.py
index d5881066f7..cefce2dfa1 100644
--- a/deps/v8/third_party/jinja2/parser.py
+++ b/deps/v8/third_party/jinja2/parser.py
@@ -1,12 +1,20 @@
-# -*- coding: utf-8 -*-
"""Parse tokens from the lexer into nodes for the compiler."""
+import typing
+import typing as t
+
from . import nodes
-from ._compat import imap
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .lexer import describe_token
from .lexer import describe_token_expr
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+_ImportInclude = t.TypeVar("_ImportInclude", nodes.Import, nodes.Include)
+_MacroCall = t.TypeVar("_MacroCall", nodes.Macro, nodes.CallBlock)
+
_statement_keywords = frozenset(
[
"for",
@@ -25,7 +33,7 @@ _statement_keywords = frozenset(
)
_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
-_math_nodes = {
+_math_nodes: t.Dict[str, t.Type[nodes.Expr]] = {
"add": nodes.Add,
"sub": nodes.Sub,
"mul": nodes.Mul,
@@ -35,26 +43,40 @@ _math_nodes = {
}
-class Parser(object):
+class Parser:
"""This is the central parsing class Jinja uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
- def __init__(self, environment, source, name=None, filename=None, state=None):
+ def __init__(
+ self,
+ environment: "Environment",
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> None:
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
- self.extensions = {}
+ self.extensions: t.Dict[
+ str, t.Callable[["Parser"], t.Union[nodes.Node, t.List[nodes.Node]]]
+ ] = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
- self._tag_stack = []
- self._end_token_stack = []
+ self._tag_stack: t.List[str] = []
+ self._end_token_stack: t.List[t.Tuple[str, ...]] = []
- def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
+ def fail(
+ self,
+ msg: str,
+ lineno: t.Optional[int] = None,
+ exc: t.Type[TemplateSyntaxError] = TemplateSyntaxError,
+ ) -> "te.NoReturn":
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
@@ -63,13 +85,18 @@ class Parser(object):
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
- def _fail_ut_eof(self, name, end_token_stack, lineno):
- expected = []
+ def _fail_ut_eof(
+ self,
+ name: t.Optional[str],
+ end_token_stack: t.List[t.Tuple[str, ...]],
+ lineno: t.Optional[int],
+ ) -> "te.NoReturn":
+ expected: t.Set[str] = set()
for exprs in end_token_stack:
- expected.extend(imap(describe_token_expr, exprs))
+ expected.update(map(describe_token_expr, exprs))
if end_token_stack:
- currently_looking = " or ".join(
- "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
+ currently_looking: t.Optional[str] = " or ".join(
+ map(repr, map(describe_token_expr, end_token_stack[-1]))
)
else:
currently_looking = None
@@ -77,59 +104,65 @@ class Parser(object):
if name is None:
message = ["Unexpected end of template."]
else:
- message = ["Encountered unknown tag '%s'." % name]
+ message = [f"Encountered unknown tag {name!r}."]
if currently_looking:
if name is not None and name in expected:
message.append(
- "You probably made a nesting mistake. Jinja "
- "is expecting this tag, but currently looking "
- "for %s." % currently_looking
+ "You probably made a nesting mistake. Jinja is expecting this tag,"
+ f" but currently looking for {currently_looking}."
)
else:
message.append(
- "Jinja was looking for the following tags: "
- "%s." % currently_looking
+ f"Jinja was looking for the following tags: {currently_looking}."
)
if self._tag_stack:
message.append(
- "The innermost block that needs to be "
- "closed is '%s'." % self._tag_stack[-1]
+ "The innermost block that needs to be closed is"
+ f" {self._tag_stack[-1]!r}."
)
self.fail(" ".join(message), lineno)
- def fail_unknown_tag(self, name, lineno=None):
+ def fail_unknown_tag(
+ self, name: str, lineno: t.Optional[int] = None
+ ) -> "te.NoReturn":
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
- return self._fail_ut_eof(name, self._end_token_stack, lineno)
+ self._fail_ut_eof(name, self._end_token_stack, lineno)
- def fail_eof(self, end_tokens=None, lineno=None):
+ def fail_eof(
+ self,
+ end_tokens: t.Optional[t.Tuple[str, ...]] = None,
+ lineno: t.Optional[int] = None,
+ ) -> "te.NoReturn":
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
- return self._fail_ut_eof(None, stack, lineno)
+ self._fail_ut_eof(None, stack, lineno)
- def is_tuple_end(self, extra_end_rules=None):
+ def is_tuple_end(
+ self, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None
+ ) -> bool:
"""Are we at the end of a tuple?"""
if self.stream.current.type in ("variable_end", "block_end", "rparen"):
return True
elif extra_end_rules is not None:
- return self.stream.current.test_any(extra_end_rules)
+ return self.stream.current.test_any(extra_end_rules) # type: ignore
return False
- def free_identifier(self, lineno=None):
+ def free_identifier(self, lineno: t.Optional[int] = None) -> nodes.InternalName:
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
- nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
+ nodes.Node.__init__(rv, f"fi{self._last_identifier}", lineno=lineno)
return rv
- def parse_statement(self):
+ def parse_statement(self) -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""Parse a single statement."""
token = self.stream.current
if token.type != "name":
@@ -138,7 +171,8 @@ class Parser(object):
pop_tag = True
try:
if token.value in _statement_keywords:
- return getattr(self, "parse_" + self.stream.current.value)()
+ f = getattr(self, f"parse_{self.stream.current.value}")
+ return f() # type: ignore
if token.value == "call":
return self.parse_call_block()
if token.value == "filter":
@@ -157,7 +191,9 @@ class Parser(object):
if pop_tag:
self._tag_stack.pop()
- def parse_statements(self, end_tokens, drop_needle=False):
+ def parse_statements(
+ self, end_tokens: t.Tuple[str, ...], drop_needle: bool = False
+ ) -> t.List[nodes.Node]:
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
@@ -184,7 +220,7 @@ class Parser(object):
next(self.stream)
return result
- def parse_set(self):
+ def parse_set(self) -> t.Union[nodes.Assign, nodes.AssignBlock]:
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
@@ -195,7 +231,7 @@ class Parser(object):
body = self.parse_statements(("name:endset",), drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
- def parse_for(self):
+ def parse_for(self) -> nodes.For:
"""Parse a for loop."""
lineno = self.stream.expect("name:for").lineno
target = self.parse_assign_target(extra_end_rules=("name:in",))
@@ -214,10 +250,10 @@ class Parser(object):
else_ = self.parse_statements(("name:endfor",), drop_needle=True)
return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
- def parse_if(self):
+ def parse_if(self) -> nodes.If:
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
- while 1:
+ while True:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
node.elif_ = []
@@ -232,10 +268,10 @@ class Parser(object):
break
return result
- def parse_with(self):
+ def parse_with(self) -> nodes.With:
node = nodes.With(lineno=next(self.stream).lineno)
- targets = []
- values = []
+ targets: t.List[nodes.Expr] = []
+ values: t.List[nodes.Expr] = []
while self.stream.current.type != "block_end":
if targets:
self.stream.expect("comma")
@@ -249,37 +285,50 @@ class Parser(object):
node.body = self.parse_statements(("name:endwith",), drop_needle=True)
return node
- def parse_autoescape(self):
+ def parse_autoescape(self) -> nodes.Scope:
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [nodes.Keyword("autoescape", self.parse_expression())]
node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
return nodes.Scope([node])
- def parse_block(self):
+ def parse_block(self) -> nodes.Block:
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect("name").value
node.scoped = self.stream.skip_if("name:scoped")
+ node.required = self.stream.skip_if("name:required")
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == "sub":
self.fail(
- "Block names in Jinja have to be valid Python "
- "identifiers and may not contain hyphens, use an "
- "underscore instead."
+ "Block names in Jinja have to be valid Python identifiers and may not"
+ " contain hyphens, use an underscore instead."
)
node.body = self.parse_statements(("name:endblock",), drop_needle=True)
+
+ # enforce that required blocks only contain whitespace or comments
+ # by asserting that the body, if not empty, is just TemplateData nodes
+ # with whitespace data
+ if node.required and not all(
+ isinstance(child, nodes.TemplateData) and child.data.isspace()
+ for body in node.body
+ for child in body.nodes # type: ignore
+ ):
+ self.fail("Required blocks can only contain comments or whitespace")
+
self.stream.skip_if("name:" + node.name)
return node
- def parse_extends(self):
+ def parse_extends(self) -> nodes.Extends:
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
- def parse_import_context(self, node, default):
+ def parse_import_context(
+ self, node: _ImportInclude, default: bool
+ ) -> _ImportInclude:
if self.stream.current.test_any(
"name:with", "name:without"
) and self.stream.look().test("name:context"):
@@ -289,7 +338,7 @@ class Parser(object):
node.with_context = default
return node
- def parse_include(self):
+ def parse_include(self) -> nodes.Include:
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test("name:ignore") and self.stream.look().test(
@@ -301,30 +350,30 @@ class Parser(object):
node.ignore_missing = False
return self.parse_import_context(node, True)
- def parse_import(self):
+ def parse_import(self) -> nodes.Import:
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect("name:as")
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
- def parse_from(self):
+ def parse_from(self) -> nodes.FromImport:
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect("name:import")
node.names = []
- def parse_context():
- if self.stream.current.value in (
+ def parse_context() -> bool:
+ if self.stream.current.value in {
"with",
"without",
- ) and self.stream.look().test("name:context"):
+ } and self.stream.look().test("name:context"):
node.with_context = next(self.stream).value == "with"
self.stream.skip()
return True
return False
- while 1:
+ while True:
if node.names:
self.stream.expect("comma")
if self.stream.current.type == "name":
@@ -350,9 +399,9 @@ class Parser(object):
node.with_context = False
return node
- def parse_signature(self, node):
- node.args = args = []
- node.defaults = defaults = []
+ def parse_signature(self, node: _MacroCall) -> None:
+ args = node.args = []
+ defaults = node.defaults = []
self.stream.expect("lparen")
while self.stream.current.type != "rparen":
if args:
@@ -366,7 +415,7 @@ class Parser(object):
args.append(arg)
self.stream.expect("rparen")
- def parse_call_block(self):
+ def parse_call_block(self) -> nodes.CallBlock:
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == "lparen":
self.parse_signature(node)
@@ -374,26 +423,27 @@ class Parser(object):
node.args = []
node.defaults = []
- node.call = self.parse_expression()
- if not isinstance(node.call, nodes.Call):
+ call_node = self.parse_expression()
+ if not isinstance(call_node, nodes.Call):
self.fail("expected call", node.lineno)
+ node.call = call_node
node.body = self.parse_statements(("name:endcall",), drop_needle=True)
return node
- def parse_filter_block(self):
+ def parse_filter_block(self) -> nodes.FilterBlock:
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
- node.filter = self.parse_filter(None, start_inline=True)
+ node.filter = self.parse_filter(None, start_inline=True) # type: ignore
node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
return node
- def parse_macro(self):
+ def parse_macro(self) -> nodes.Macro:
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
return node
- def parse_print(self):
+ def parse_print(self) -> nodes.Output:
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != "block_end":
@@ -402,13 +452,29 @@ class Parser(object):
node.nodes.append(self.parse_expression())
return node
+ @typing.overload
+ def parse_assign_target(
+ self, with_tuple: bool = ..., name_only: "te.Literal[True]" = ...
+ ) -> nodes.Name:
+ ...
+
+ @typing.overload
def parse_assign_target(
self,
- with_tuple=True,
- name_only=False,
- extra_end_rules=None,
- with_namespace=False,
- ):
+ with_tuple: bool = True,
+ name_only: bool = False,
+ extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
+ with_namespace: bool = False,
+ ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]:
+ ...
+
+ def parse_assign_target(
+ self,
+ with_tuple: bool = True,
+ name_only: bool = False,
+ extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
+ with_namespace: bool = False,
+ ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]:
"""Parse an assignment target. As Jinja allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
@@ -417,6 +483,8 @@ class Parser(object):
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
+ target: nodes.Expr
+
if with_namespace and self.stream.look().type == "dot":
token = self.stream.expect("name")
next(self.stream) # dot
@@ -432,14 +500,17 @@ class Parser(object):
)
else:
target = self.parse_primary()
+
target.set_ctx("store")
+
if not target.can_assign():
self.fail(
- "can't assign to %r" % target.__class__.__name__.lower(), target.lineno
+ f"can't assign to {type(target).__name__.lower()!r}", target.lineno
)
- return target
- def parse_expression(self, with_condexpr=True):
+ return target # type: ignore
+
+ def parse_expression(self, with_condexpr: bool = True) -> nodes.Expr:
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
@@ -448,9 +519,11 @@ class Parser(object):
return self.parse_condexpr()
return self.parse_or()
- def parse_condexpr(self):
+ def parse_condexpr(self) -> nodes.Expr:
lineno = self.stream.current.lineno
expr1 = self.parse_or()
+ expr3: t.Optional[nodes.Expr]
+
while self.stream.skip_if("name:if"):
expr2 = self.parse_or()
if self.stream.skip_if("name:else"):
@@ -461,7 +534,7 @@ class Parser(object):
lineno = self.stream.current.lineno
return expr1
- def parse_or(self):
+ def parse_or(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if("name:or"):
@@ -470,7 +543,7 @@ class Parser(object):
lineno = self.stream.current.lineno
return left
- def parse_and(self):
+ def parse_and(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if("name:and"):
@@ -479,17 +552,17 @@ class Parser(object):
lineno = self.stream.current.lineno
return left
- def parse_not(self):
+ def parse_not(self) -> nodes.Expr:
if self.stream.current.test("name:not"):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
- def parse_compare(self):
+ def parse_compare(self) -> nodes.Expr:
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
- while 1:
+ while True:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
@@ -508,7 +581,7 @@ class Parser(object):
return expr
return nodes.Compare(expr, ops, lineno=lineno)
- def parse_math1(self):
+ def parse_math1(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ("add", "sub"):
@@ -519,7 +592,7 @@ class Parser(object):
lineno = self.stream.current.lineno
return left
- def parse_concat(self):
+ def parse_concat(self) -> nodes.Expr:
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == "tilde":
@@ -529,7 +602,7 @@ class Parser(object):
return args[0]
return nodes.Concat(args, lineno=lineno)
- def parse_math2(self):
+ def parse_math2(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
@@ -540,7 +613,7 @@ class Parser(object):
lineno = self.stream.current.lineno
return left
- def parse_pow(self):
+ def parse_pow(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == "pow":
@@ -550,9 +623,11 @@ class Parser(object):
lineno = self.stream.current.lineno
return left
- def parse_unary(self, with_filter=True):
+ def parse_unary(self, with_filter: bool = True) -> nodes.Expr:
token_type = self.stream.current.type
lineno = self.stream.current.lineno
+ node: nodes.Expr
+
if token_type == "sub":
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
@@ -566,8 +641,9 @@ class Parser(object):
node = self.parse_filter_expr(node)
return node
- def parse_primary(self):
+ def parse_primary(self) -> nodes.Expr:
token = self.stream.current
+ node: nodes.Expr
if token.type == "name":
if token.value in ("true", "false", "True", "False"):
node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
@@ -596,16 +672,16 @@ class Parser(object):
elif token.type == "lbrace":
node = self.parse_dict()
else:
- self.fail("unexpected '%s'" % describe_token(token), token.lineno)
+ self.fail(f"unexpected {describe_token(token)!r}", token.lineno)
return node
def parse_tuple(
self,
- simplified=False,
- with_condexpr=True,
- extra_end_rules=None,
- explicit_parentheses=False,
- ):
+ simplified: bool = False,
+ with_condexpr: bool = True,
+ extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
+ explicit_parentheses: bool = False,
+ ) -> t.Union[nodes.Tuple, nodes.Expr]:
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
@@ -631,12 +707,13 @@ class Parser(object):
parse = self.parse_expression
else:
- def parse():
+ def parse() -> nodes.Expr:
return self.parse_expression(with_condexpr=False)
- args = []
+ args: t.List[nodes.Expr] = []
is_tuple = False
- while 1:
+
+ while True:
if args:
self.stream.expect("comma")
if self.is_tuple_end(extra_end_rules):
@@ -658,15 +735,15 @@ class Parser(object):
# tuple.
if not explicit_parentheses:
self.fail(
- "Expected an expression, got '%s'"
- % describe_token(self.stream.current)
+ "Expected an expression,"
+ f" got {describe_token(self.stream.current)!r}"
)
return nodes.Tuple(args, "load", lineno=lineno)
- def parse_list(self):
+ def parse_list(self) -> nodes.List:
token = self.stream.expect("lbracket")
- items = []
+ items: t.List[nodes.Expr] = []
while self.stream.current.type != "rbracket":
if items:
self.stream.expect("comma")
@@ -676,9 +753,9 @@ class Parser(object):
self.stream.expect("rbracket")
return nodes.List(items, lineno=token.lineno)
- def parse_dict(self):
+ def parse_dict(self) -> nodes.Dict:
token = self.stream.expect("lbrace")
- items = []
+ items: t.List[nodes.Pair] = []
while self.stream.current.type != "rbrace":
if items:
self.stream.expect("comma")
@@ -691,8 +768,8 @@ class Parser(object):
self.stream.expect("rbrace")
return nodes.Dict(items, lineno=token.lineno)
- def parse_postfix(self, node):
- while 1:
+ def parse_postfix(self, node: nodes.Expr) -> nodes.Expr:
+ while True:
token_type = self.stream.current.type
if token_type == "dot" or token_type == "lbracket":
node = self.parse_subscript(node)
@@ -704,11 +781,11 @@ class Parser(object):
break
return node
- def parse_filter_expr(self, node):
- while 1:
+ def parse_filter_expr(self, node: nodes.Expr) -> nodes.Expr:
+ while True:
token_type = self.stream.current.type
if token_type == "pipe":
- node = self.parse_filter(node)
+ node = self.parse_filter(node) # type: ignore
elif token_type == "name" and self.stream.current.value == "is":
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
@@ -719,8 +796,12 @@ class Parser(object):
break
return node
- def parse_subscript(self, node):
+ def parse_subscript(
+ self, node: nodes.Expr
+ ) -> t.Union[nodes.Getattr, nodes.Getitem]:
token = next(self.stream)
+ arg: nodes.Expr
+
if token.type == "dot":
attr_token = self.stream.current
next(self.stream)
@@ -733,7 +814,7 @@ class Parser(object):
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
if token.type == "lbracket":
- args = []
+ args: t.List[nodes.Expr] = []
while self.stream.current.type != "rbracket":
if args:
self.stream.expect("comma")
@@ -746,8 +827,9 @@ class Parser(object):
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
self.fail("expected subscript expression", token.lineno)
- def parse_subscribed(self):
+ def parse_subscribed(self) -> nodes.Expr:
lineno = self.stream.current.lineno
+ args: t.List[t.Optional[nodes.Expr]]
if self.stream.current.type == "colon":
next(self.stream)
@@ -777,23 +859,26 @@ class Parser(object):
return nodes.Slice(lineno=lineno, *args)
- def parse_call(self, node):
+ def parse_call_args(self) -> t.Tuple:
token = self.stream.expect("lparen")
args = []
kwargs = []
- dyn_args = dyn_kwargs = None
+ dyn_args = None
+ dyn_kwargs = None
require_comma = False
- def ensure(expr):
+ def ensure(expr: bool) -> None:
if not expr:
self.fail("invalid syntax for function call expression", token.lineno)
while self.stream.current.type != "rparen":
if require_comma:
self.stream.expect("comma")
+
# support for trailing comma
if self.stream.current.type == "rparen":
break
+
if self.stream.current.type == "mul":
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
@@ -819,13 +904,20 @@ class Parser(object):
args.append(self.parse_expression())
require_comma = True
+
self.stream.expect("rparen")
+ return args, kwargs, dyn_args, dyn_kwargs
- if node is None:
- return args, kwargs, dyn_args, dyn_kwargs
+ def parse_call(self, node: nodes.Expr) -> nodes.Call:
+ # The lparen will be expected in parse_call_args, but the lineno
+ # needs to be recorded before the stream is advanced.
+ token = self.stream.current
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
- def parse_filter(self, node, start_inline=False):
+ def parse_filter(
+ self, node: t.Optional[nodes.Expr], start_inline: bool = False
+ ) -> t.Optional[nodes.Expr]:
while self.stream.current.type == "pipe" or start_inline:
if not start_inline:
next(self.stream)
@@ -835,7 +927,7 @@ class Parser(object):
next(self.stream)
name += "." + self.stream.expect("name").value
if self.stream.current.type == "lparen":
- args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
else:
args = []
kwargs = []
@@ -846,7 +938,7 @@ class Parser(object):
start_inline = False
return node
- def parse_test(self, node):
+ def parse_test(self, node: nodes.Expr) -> nodes.Expr:
token = next(self.stream)
if self.stream.current.test("name:not"):
next(self.stream)
@@ -860,8 +952,8 @@ class Parser(object):
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == "lparen":
- args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
- elif self.stream.current.type in (
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
+ elif self.stream.current.type in {
"name",
"string",
"integer",
@@ -869,7 +961,7 @@ class Parser(object):
"lparen",
"lbracket",
"lbrace",
- ) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
+ } and not self.stream.current.test_any("name:else", "name:or", "name:and"):
if self.stream.current.test("name:is"):
self.fail("You cannot chain multiple tests with is")
arg_node = self.parse_primary()
@@ -884,15 +976,17 @@ class Parser(object):
node = nodes.Not(node, lineno=token.lineno)
return node
- def subparse(self, end_tokens=None):
- body = []
- data_buffer = []
+ def subparse(
+ self, end_tokens: t.Optional[t.Tuple[str, ...]] = None
+ ) -> t.List[nodes.Node]:
+ body: t.List[nodes.Node] = []
+ data_buffer: t.List[nodes.Node] = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
- def flush_data():
+ def flush_data() -> None:
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
@@ -929,10 +1023,9 @@ class Parser(object):
finally:
if end_tokens is not None:
self._end_token_stack.pop()
-
return body
- def parse(self):
+ def parse(self) -> nodes.Template:
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
diff --git a/deps/v8/third_party/jinja2/patches/0001-jinja2-make-compiled-template-deterministic-for-pyth.patch b/deps/v8/third_party/jinja2/patches/0001-jinja2-make-compiled-template-deterministic-for-pyth.patch
deleted file mode 100644
index 85dd20d27e..0000000000
--- a/deps/v8/third_party/jinja2/patches/0001-jinja2-make-compiled-template-deterministic-for-pyth.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Takuto Ikuta <tikuta@chromium.org>
-Date: Mon, 24 May 2021 17:09:21 +0900
-Subject: [PATCH] jinja2: make compiled template deterministic for python3
-
-set() doesn't have deterministic iteration order, so need this to
-have deterministic output from jinja2.
-
-Bug: 1194274
-
-diff -Naur a/compiler.py b/compiler.py
---- a/compiler.py 2021-10-05 23:41:45.774333282 +0900
-+++ b/compiler.py 2021-10-06 16:46:08.082078686 +0900
-@@ -468,7 +468,7 @@
- visitor.visit(node)
- for dependency in "filters", "tests":
- mapping = getattr(self, dependency)
-- for name in getattr(visitor, dependency):
-+ for name in sorted(getattr(visitor, dependency)):
- if name not in mapping:
- mapping[name] = self.temporary_identifier()
- self.writeline(
-@@ -612,7 +612,7 @@
- def dump_local_context(self, frame):
- return "{%s}" % ", ".join(
- "%r: %s" % (name, target)
-- for name, target in iteritems(frame.symbols.dump_stores())
-+ for name, target in sorted(iteritems(frame.symbols.dump_stores()))
- )
-
- def write_commons(self):
diff --git a/deps/v8/third_party/jinja2/py.typed b/deps/v8/third_party/jinja2/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/third_party/jinja2/py.typed
diff --git a/deps/v8/third_party/jinja2/runtime.py b/deps/v8/third_party/jinja2/runtime.py
index 3ad7968624..985842b284 100644
--- a/deps/v8/third_party/jinja2/runtime.py
+++ b/deps/v8/third_party/jinja2/runtime.py
@@ -1,32 +1,45 @@
-# -*- coding: utf-8 -*-
"""The runtime functions and state used by compiled templates."""
+import functools
import sys
+import typing as t
+from collections import abc
from itertools import chain
-from types import MethodType
from markupsafe import escape # noqa: F401
from markupsafe import Markup
-from markupsafe import soft_unicode
-
-from ._compat import abc
-from ._compat import imap
-from ._compat import implements_iterator
-from ._compat import implements_to_string
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import with_metaclass
+from markupsafe import soft_str
+
+from .async_utils import auto_aiter
+from .async_utils import auto_await # noqa: F401
from .exceptions import TemplateNotFound # noqa: F401
from .exceptions import TemplateRuntimeError # noqa: F401
from .exceptions import UndefinedError
from .nodes import EvalContext
+from .utils import _PassArg
from .utils import concat
-from .utils import evalcontextfunction
from .utils import internalcode
from .utils import missing
from .utils import Namespace # noqa: F401
from .utils import object_type_repr
+from .utils import pass_eval_context
+
+V = t.TypeVar("V")
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+if t.TYPE_CHECKING:
+ import logging
+ import typing_extensions as te
+ from .environment import Environment
+
+ class LoopRenderFunc(te.Protocol):
+ def __call__(
+ self,
+ reciter: t.Iterable[V],
+ loop_render_func: "LoopRenderFunc",
+ depth: int = 0,
+ ) -> str:
+ ...
+
# these variables are exported to the template runtime
exported = [
@@ -36,54 +49,54 @@ exported = [
"Markup",
"TemplateRuntimeError",
"missing",
- "concat",
"escape",
"markup_join",
- "unicode_join",
- "to_string",
+ "str_join",
"identity",
"TemplateNotFound",
"Namespace",
"Undefined",
+ "internalcode",
+]
+async_exported = [
+ "AsyncLoopContext",
+ "auto_aiter",
+ "auto_await",
]
-
-#: the name of the function that is used to convert something into
-#: a string. We can just use the text type here.
-to_string = text_type
-def identity(x):
+def identity(x: V) -> V:
"""Returns its argument. Useful for certain things in the
environment.
"""
return x
-def markup_join(seq):
- """Concatenation that escapes if necessary and converts to unicode."""
+def markup_join(seq: t.Iterable[t.Any]) -> str:
+ """Concatenation that escapes if necessary and converts to string."""
buf = []
- iterator = imap(soft_unicode, seq)
+ iterator = map(soft_str, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, "__html__"):
- return Markup(u"").join(chain(buf, iterator))
+ return Markup("").join(chain(buf, iterator))
return concat(buf)
-def unicode_join(seq):
- """Simple args to unicode conversion and concatenation."""
- return concat(imap(text_type, seq))
+def str_join(seq: t.Iterable[t.Any]) -> str:
+ """Simple args to string conversion and concatenation."""
+ return concat(map(str, seq))
def new_context(
- environment,
- template_name,
- blocks,
- vars=None,
- shared=None,
- globals=None,
- locals=None,
-):
+ environment: "Environment",
+ template_name: t.Optional[str],
+ blocks: t.Dict[str, t.Callable[["Context"], t.Iterator[str]]],
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+) -> "Context":
"""Internal helper for context creation."""
if vars is None:
vars = {}
@@ -96,66 +109,38 @@ def new_context(
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
- for key, value in iteritems(locals):
+ for key, value in locals.items():
if value is not missing:
parent[key] = value
- return environment.context_class(environment, parent, template_name, blocks)
+ return environment.context_class(
+ environment, parent, template_name, blocks, globals=globals
+ )
-class TemplateReference(object):
+class TemplateReference:
"""The `self` in templates."""
- def __init__(self, context):
+ def __init__(self, context: "Context") -> None:
self.__context = context
- def __getitem__(self, name):
+ def __getitem__(self, name: str) -> t.Any:
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
- def __repr__(self):
- return "<%s %r>" % (self.__class__.__name__, self.__context.name)
-
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.__context.name!r}>"
-def _get_func(x):
- return getattr(x, "__func__", x)
-
-
-class ContextMeta(type):
- def __new__(mcs, name, bases, d):
- rv = type.__new__(mcs, name, bases, d)
- if bases == ():
- return rv
-
- resolve = _get_func(rv.resolve)
- default_resolve = _get_func(Context.resolve)
- resolve_or_missing = _get_func(rv.resolve_or_missing)
- default_resolve_or_missing = _get_func(Context.resolve_or_missing)
-
- # If we have a changed resolve but no changed default or missing
- # resolve we invert the call logic.
- if (
- resolve is not default_resolve
- and resolve_or_missing is default_resolve_or_missing
- ):
- rv._legacy_resolve_mode = True
- elif (
- resolve is default_resolve
- and resolve_or_missing is default_resolve_or_missing
- ):
- rv._fast_resolve_mode = True
-
- return rv
+def _dict_method_all(dict_method: F) -> F:
+ @functools.wraps(dict_method)
+ def f_all(self: "Context") -> t.Any:
+ return dict_method(self.get_all())
-def resolve_or_missing(context, key, missing=missing):
- if key in context.vars:
- return context.vars[key]
- if key in context.parent:
- return context.parent[key]
- return missing
+ return t.cast(F, f_all)
-class Context(with_metaclass(ContextMeta)):
+@abc.Mapping.register
+class Context:
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
@@ -165,7 +150,7 @@ class Context(with_metaclass(ContextMeta)):
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
- :func:`contextfunction`\\s get the active context passed as first argument
+ :func:`pass_context` get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
@@ -175,30 +160,30 @@ class Context(with_metaclass(ContextMeta)):
:class:`Undefined` object for missing variables.
"""
- # XXX: we want to eventually make this be a deprecation warning and
- # remove it.
- _legacy_resolve_mode = False
- _fast_resolve_mode = False
-
- def __init__(self, environment, parent, name, blocks):
+ def __init__(
+ self,
+ environment: "Environment",
+ parent: t.Dict[str, t.Any],
+ name: t.Optional[str],
+ blocks: t.Dict[str, t.Callable[["Context"], t.Iterator[str]]],
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ):
self.parent = parent
- self.vars = {}
- self.environment = environment
+ self.vars: t.Dict[str, t.Any] = {}
+ self.environment: "Environment" = environment
self.eval_ctx = EvalContext(self.environment, name)
- self.exported_vars = set()
+ self.exported_vars: t.Set[str] = set()
self.name = name
+ self.globals_keys = set() if globals is None else set(globals)
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
- self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
-
- # In case we detect the fast resolve mode we can set up an alias
- # here that bypasses the legacy code logic.
- if self._fast_resolve_mode:
- self.resolve_or_missing = MethodType(resolve_or_missing, self)
+ self.blocks = {k: [v] for k, v in blocks.items()}
- def super(self, name, current):
+ def super(
+ self, name: str, current: t.Callable[["Context"], t.Iterator[str]]
+ ) -> t.Union["BlockReference", "Undefined"]:
"""Render a parent block."""
try:
blocks = self.blocks[name]
@@ -206,47 +191,62 @@ class Context(with_metaclass(ContextMeta)):
blocks[index]
except LookupError:
return self.environment.undefined(
- "there is no parent block called %r." % name, name="super"
+ f"there is no parent block called {name!r}.", name="super"
)
return BlockReference(name, self, blocks, index)
- def get(self, key, default=None):
- """Returns an item from the template context, if it doesn't exist
- `default` is returned.
+ def get(self, key: str, default: t.Any = None) -> t.Any:
+ """Look up a variable by name, or return a default if the key is
+ not found.
+
+ :param key: The variable name to look up.
+ :param default: The value to return if the key is not found.
"""
try:
return self[key]
except KeyError:
return default
- def resolve(self, key):
- """Looks up a variable like `__getitem__` or `get` but returns an
- :class:`Undefined` object with the name of the name looked up.
+ def resolve(self, key: str) -> t.Union[t.Any, "Undefined"]:
+ """Look up a variable by name, or return an :class:`Undefined`
+ object if the key is not found.
+
+ If you need to add custom behavior, override
+ :meth:`resolve_or_missing`, not this method. The various lookup
+ functions use that method, not this one.
+
+ :param key: The variable name to look up.
"""
- if self._legacy_resolve_mode:
- rv = resolve_or_missing(self, key)
- else:
- rv = self.resolve_or_missing(key)
+ rv = self.resolve_or_missing(key)
+
if rv is missing:
return self.environment.undefined(name=key)
+
return rv
- def resolve_or_missing(self, key):
- """Resolves a variable like :meth:`resolve` but returns the
- special `missing` value if it cannot be found.
+ def resolve_or_missing(self, key: str) -> t.Any:
+ """Look up a variable by name, or return a ``missing`` sentinel
+ if the key is not found.
+
+ Override this method to add custom lookup behavior.
+ :meth:`resolve`, :meth:`get`, and :meth:`__getitem__` use this
+ method. Don't call this method directly.
+
+ :param key: The variable name to look up.
"""
- if self._legacy_resolve_mode:
- rv = self.resolve(key)
- if isinstance(rv, Undefined):
- rv = missing
- return rv
- return resolve_or_missing(self, key)
-
- def get_exported(self):
+ if key in self.vars:
+ return self.vars[key]
+
+ if key in self.parent:
+ return self.parent[key]
+
+ return missing
+
+ def get_exported(self) -> t.Dict[str, t.Any]:
"""Get a new dict with the exported variables."""
- return dict((k, self.vars[k]) for k in self.exported_vars)
+ return {k: self.vars[k] for k in self.exported_vars}
- def get_all(self):
+ def get_all(self) -> t.Dict[str, t.Any]:
"""Return the complete context as dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
@@ -258,44 +258,51 @@ class Context(with_metaclass(ContextMeta)):
return dict(self.parent, **self.vars)
@internalcode
- def call(__self, __obj, *args, **kwargs): # noqa: B902
+ def call(
+ __self, __obj: t.Callable, *args: t.Any, **kwargs: t.Any # noqa: B902
+ ) -> t.Union[t.Any, "Undefined"]:
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
- argument if the callable is a :func:`contextfunction` or
- :func:`environmentfunction`.
+ argument if the callable has :func:`pass_context` or
+ :func:`pass_environment`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
- if hasattr(__obj, "__call__"): # noqa: B004
- fn = __obj.__call__
- for fn_type in (
- "contextfunction",
- "evalcontextfunction",
- "environmentfunction",
- ):
- if hasattr(fn, fn_type):
- __obj = fn
- break
-
- if callable(__obj):
- if getattr(__obj, "contextfunction", False) is True:
- args = (__self,) + args
- elif getattr(__obj, "evalcontextfunction", False) is True:
- args = (__self.eval_ctx,) + args
- elif getattr(__obj, "environmentfunction", False) is True:
- args = (__self.environment,) + args
+ if (
+ hasattr(__obj, "__call__") # noqa: B004
+ and _PassArg.from_obj(__obj.__call__) is not None # type: ignore
+ ):
+ __obj = __obj.__call__ # type: ignore
+
+ pass_arg = _PassArg.from_obj(__obj)
+
+ if pass_arg is _PassArg.context:
+ # the active context should have access to variables set in
+ # loops and blocks without mutating the context itself
+ if kwargs.get("_loop_vars"):
+ __self = __self.derived(kwargs["_loop_vars"])
+ if kwargs.get("_block_vars"):
+ __self = __self.derived(kwargs["_block_vars"])
+ args = (__self,) + args
+ elif pass_arg is _PassArg.eval_context:
+ args = (__self.eval_ctx,) + args
+ elif pass_arg is _PassArg.environment:
+ args = (__self.environment,) + args
+
+ kwargs.pop("_block_vars", None)
+ kwargs.pop("_loop_vars", None)
+
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined(
- "value was undefined because "
- "a callable raised a "
- "StopIteration exception"
+ "value was undefined because a callable raised a"
+ " StopIteration exception"
)
- def derived(self, locals=None):
+ def derived(self, locals: t.Optional[t.Dict[str, t.Any]] = None) -> "Context":
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
@@ -304,78 +311,79 @@ class Context(with_metaclass(ContextMeta)):
self.environment, self.name, {}, self.get_all(), True, None, locals
)
context.eval_ctx = self.eval_ctx
- context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
+ context.blocks.update((k, list(v)) for k, v in self.blocks.items())
return context
- def _all(meth): # noqa: B902
- def proxy(self):
- return getattr(self.get_all(), meth)()
-
- proxy.__doc__ = getattr(dict, meth).__doc__
- proxy.__name__ = meth
- return proxy
-
- keys = _all("keys")
- values = _all("values")
- items = _all("items")
+ keys = _dict_method_all(dict.keys)
+ values = _dict_method_all(dict.values)
+ items = _dict_method_all(dict.items)
- # not available on python 3
- if PY2:
- iterkeys = _all("iterkeys")
- itervalues = _all("itervalues")
- iteritems = _all("iteritems")
- del _all
-
- def __contains__(self, name):
+ def __contains__(self, name: str) -> bool:
return name in self.vars or name in self.parent
- def __getitem__(self, key):
- """Lookup a variable or raise `KeyError` if the variable is
- undefined.
+ def __getitem__(self, key: str) -> t.Any:
+ """Look up a variable by name with ``[]`` syntax, or raise a
+ ``KeyError`` if the key is not found.
"""
item = self.resolve_or_missing(key)
+
if item is missing:
raise KeyError(key)
- return item
-
- def __repr__(self):
- return "<%s %s of %r>" % (
- self.__class__.__name__,
- repr(self.get_all()),
- self.name,
- )
+ return item
-abc.Mapping.register(Context)
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.get_all()!r} of {self.name!r}>"
-class BlockReference(object):
+class BlockReference:
"""One block on a template reference."""
- def __init__(self, name, context, stack, depth):
+ def __init__(
+ self,
+ name: str,
+ context: "Context",
+ stack: t.List[t.Callable[["Context"], t.Iterator[str]]],
+ depth: int,
+ ) -> None:
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
- def super(self):
+ def super(self) -> t.Union["BlockReference", "Undefined"]:
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment.undefined(
- "there is no parent block called %r." % self.name, name="super"
+ f"there is no parent block called {self.name!r}.", name="super"
)
return BlockReference(self.name, self._context, self._stack, self._depth + 1)
@internalcode
- def __call__(self):
+ async def _async_call(self) -> str:
+ rv = concat(
+ [x async for x in self._stack[self._depth](self._context)] # type: ignore
+ )
+
+ if self._context.eval_ctx.autoescape:
+ return Markup(rv)
+
+ return rv
+
+ @internalcode
+ def __call__(self) -> str:
+ if self._context.environment.is_async:
+ return self._async_call() # type: ignore
+
rv = concat(self._stack[self._depth](self._context))
+
if self._context.eval_ctx.autoescape:
- rv = Markup(rv)
+ return Markup(rv)
+
return rv
-@implements_iterator
class LoopContext:
"""A wrapper iterable for dynamic ``for`` loops, with information
about the loop and iteration.
@@ -384,13 +392,19 @@ class LoopContext:
#: Current iteration of the loop, starting at 0.
index0 = -1
- _length = None
- _after = missing
- _current = missing
- _before = missing
- _last_changed_value = missing
+ _length: t.Optional[int] = None
+ _after: t.Any = missing
+ _current: t.Any = missing
+ _before: t.Any = missing
+ _last_changed_value: t.Any = missing
- def __init__(self, iterable, undefined, recurse=None, depth0=0):
+ def __init__(
+ self,
+ iterable: t.Iterable[V],
+ undefined: t.Type["Undefined"],
+ recurse: t.Optional["LoopRenderFunc"] = None,
+ depth0: int = 0,
+ ) -> None:
"""
:param iterable: Iterable to wrap.
:param undefined: :class:`Undefined` class to use for next and
@@ -407,11 +421,11 @@ class LoopContext:
self.depth0 = depth0
@staticmethod
- def _to_iterator(iterable):
+ def _to_iterator(iterable: t.Iterable[V]) -> t.Iterator[V]:
return iter(iterable)
@property
- def length(self):
+ def length(self) -> int:
"""Length of the iterable.
If the iterable is a generator or otherwise does not have a
@@ -421,7 +435,7 @@ class LoopContext:
return self._length
try:
- self._length = len(self._iterable)
+ self._length = len(self._iterable) # type: ignore
except TypeError:
iterable = list(self._iterator)
self._iterator = self._to_iterator(iterable)
@@ -429,21 +443,21 @@ class LoopContext:
return self._length
- def __len__(self):
+ def __len__(self) -> int:
return self.length
@property
- def depth(self):
+ def depth(self) -> int:
"""How many levels deep a recursive loop currently is, starting at 1."""
return self.depth0 + 1
@property
- def index(self):
+ def index(self) -> int:
"""Current iteration of the loop, starting at 1."""
return self.index0 + 1
@property
- def revindex0(self):
+ def revindex0(self) -> int:
"""Number of iterations from the end of the loop, ending at 0.
Requires calculating :attr:`length`.
@@ -451,7 +465,7 @@ class LoopContext:
return self.length - self.index
@property
- def revindex(self):
+ def revindex(self) -> int:
"""Number of iterations from the end of the loop, ending at 1.
Requires calculating :attr:`length`.
@@ -459,11 +473,11 @@ class LoopContext:
return self.length - self.index0
@property
- def first(self):
+ def first(self) -> bool:
"""Whether this is the first iteration of the loop."""
return self.index0 == 0
- def _peek_next(self):
+ def _peek_next(self) -> t.Any:
"""Return the next element in the iterable, or :data:`missing`
if the iterable is exhausted. Only peeks one item ahead, caching
the result in :attr:`_last` for use in subsequent checks. The
@@ -476,7 +490,7 @@ class LoopContext:
return self._after
@property
- def last(self):
+ def last(self) -> bool:
"""Whether this is the last iteration of the loop.
Causes the iterable to advance early. See
@@ -486,7 +500,7 @@ class LoopContext:
return self._peek_next() is missing
@property
- def previtem(self):
+ def previtem(self) -> t.Union[t.Any, "Undefined"]:
"""The item in the previous iteration. Undefined during the
first iteration.
"""
@@ -496,13 +510,13 @@ class LoopContext:
return self._before
@property
- def nextitem(self):
+ def nextitem(self) -> t.Union[t.Any, "Undefined"]:
"""The item in the next iteration. Undefined during the last
iteration.
Causes the iterable to advance early. See
:func:`itertools.groupby` for issues this can cause.
- The :func:`groupby` filter avoids that issue.
+ The :func:`jinja-filters.groupby` filter avoids that issue.
"""
rv = self._peek_next()
@@ -511,7 +525,7 @@ class LoopContext:
return rv
- def cycle(self, *args):
+ def cycle(self, *args: V) -> V:
"""Return a value from the given args, cycling through based on
the current :attr:`index0`.
@@ -522,7 +536,7 @@ class LoopContext:
return args[self.index0 % len(args)]
- def changed(self, *value):
+ def changed(self, *value: t.Any) -> bool:
"""Return ``True`` if previously called with a different value
(including when called for the first time).
@@ -534,10 +548,10 @@ class LoopContext:
return False
- def __iter__(self):
+ def __iter__(self) -> "LoopContext":
return self
- def __next__(self):
+ def __next__(self) -> t.Tuple[t.Any, "LoopContext"]:
if self._after is not missing:
rv = self._after
self._after = missing
@@ -550,7 +564,7 @@ class LoopContext:
return rv, self
@internalcode
- def __call__(self, iterable):
+ def __call__(self, iterable: t.Iterable[V]) -> str:
"""When iterating over nested data, render the body of the loop
recursively with the given inner iterable data.
@@ -563,23 +577,94 @@ class LoopContext:
return self._recurse(iterable, self._recurse, depth=self.depth)
- def __repr__(self):
- return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length)
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.index}/{self.length}>"
+
+
+class AsyncLoopContext(LoopContext):
+ _iterator: t.AsyncIterator[t.Any] # type: ignore
+ @staticmethod
+ def _to_iterator( # type: ignore
+ iterable: t.Union[t.Iterable[V], t.AsyncIterable[V]]
+ ) -> t.AsyncIterator[V]:
+ return auto_aiter(iterable)
+
+ @property
+ async def length(self) -> int: # type: ignore
+ if self._length is not None:
+ return self._length
-class Macro(object):
+ try:
+ self._length = len(self._iterable) # type: ignore
+ except TypeError:
+ iterable = [x async for x in self._iterator]
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
+ return self._length
+
+ @property
+ async def revindex0(self) -> int: # type: ignore
+ return await self.length - self.index
+
+ @property
+ async def revindex(self) -> int: # type: ignore
+ return await self.length - self.index0
+
+ async def _peek_next(self) -> t.Any:
+ if self._after is not missing:
+ return self._after
+
+ try:
+ self._after = await self._iterator.__anext__()
+ except StopAsyncIteration:
+ self._after = missing
+
+ return self._after
+
+ @property
+ async def last(self) -> bool: # type: ignore
+ return await self._peek_next() is missing
+
+ @property
+ async def nextitem(self) -> t.Union[t.Any, "Undefined"]:
+ rv = await self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def __aiter__(self) -> "AsyncLoopContext":
+ return self
+
+ async def __anext__(self) -> t.Tuple[t.Any, "AsyncLoopContext"]:
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = await self._iterator.__anext__()
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+
+class Macro:
"""Wraps a macro function."""
def __init__(
self,
- environment,
- func,
- name,
- arguments,
- catch_kwargs,
- catch_varargs,
- caller,
- default_autoescape=None,
+ environment: "Environment",
+ func: t.Callable[..., str],
+ name: str,
+ arguments: t.List[str],
+ catch_kwargs: bool,
+ catch_varargs: bool,
+ caller: bool,
+ default_autoescape: t.Optional[bool] = None,
):
self._environment = environment
self._func = func
@@ -590,13 +675,18 @@ class Macro(object):
self.catch_varargs = catch_varargs
self.caller = caller
self.explicit_caller = "caller" in arguments
+
if default_autoescape is None:
- default_autoescape = environment.autoescape
+ if callable(environment.autoescape):
+ default_autoescape = environment.autoescape(None)
+ else:
+ default_autoescape = environment.autoescape
+
self._default_autoescape = default_autoescape
@internalcode
- @evalcontextfunction
- def __call__(self, *args, **kwargs):
+ @pass_eval_context
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> str:
# This requires a bit of explanation, In the past we used to
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
@@ -656,40 +746,47 @@ class Macro(object):
elif kwargs:
if "caller" in kwargs:
raise TypeError(
- "macro %r was invoked with two values for "
- "the special caller argument. This is "
- "most likely a bug." % self.name
+ f"macro {self.name!r} was invoked with two values for the special"
+ " caller argument. This is most likely a bug."
)
raise TypeError(
- "macro %r takes no keyword argument %r"
- % (self.name, next(iter(kwargs)))
+ f"macro {self.name!r} takes no keyword argument {next(iter(kwargs))!r}"
)
if self.catch_varargs:
arguments.append(args[self._argument_count :])
elif len(args) > self._argument_count:
raise TypeError(
- "macro %r takes not more than %d argument(s)"
- % (self.name, len(self.arguments))
+ f"macro {self.name!r} takes not more than"
+ f" {len(self.arguments)} argument(s)"
)
return self._invoke(arguments, autoescape)
- def _invoke(self, arguments, autoescape):
- """This method is being swapped out by the async implementation."""
+ async def _async_invoke(self, arguments: t.List[t.Any], autoescape: bool) -> str:
+ rv = await self._func(*arguments) # type: ignore
+
+ if autoescape:
+ return Markup(rv)
+
+ return rv # type: ignore
+
+ def _invoke(self, arguments: t.List[t.Any], autoescape: bool) -> str:
+ if self._environment.is_async:
+ return self._async_invoke(arguments, autoescape) # type: ignore
+
rv = self._func(*arguments)
+
if autoescape:
rv = Markup(rv)
+
return rv
- def __repr__(self):
- return "<%s %s>" % (
- self.__class__.__name__,
- self.name is None and "anonymous" or repr(self.name),
- )
+ def __repr__(self) -> str:
+ name = "anonymous" if self.name is None else repr(self.name)
+ return f"<{type(self).__name__} {name}>"
-@implements_to_string
-class Undefined(object):
+class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
@@ -711,14 +808,20 @@ class Undefined(object):
"_undefined_exception",
)
- def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
+ def __init__(
+ self,
+ hint: t.Optional[str] = None,
+ obj: t.Any = missing,
+ name: t.Optional[str] = None,
+ exc: t.Type[TemplateRuntimeError] = UndefinedError,
+ ) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@property
- def _undefined_message(self):
+ def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
@@ -726,107 +829,78 @@ class Undefined(object):
return self._undefined_hint
if self._undefined_obj is missing:
- return "%r is undefined" % self._undefined_name
+ return f"{self._undefined_name!r} is undefined"
- if not isinstance(self._undefined_name, string_types):
- return "%s has no element %r" % (
- object_type_repr(self._undefined_obj),
- self._undefined_name,
+ if not isinstance(self._undefined_name, str):
+ return (
+ f"{object_type_repr(self._undefined_obj)} has no"
+ f" element {self._undefined_name!r}"
)
- return "%r has no attribute %r" % (
- object_type_repr(self._undefined_obj),
- self._undefined_name,
+ return (
+ f"{object_type_repr(self._undefined_obj)!r} has no"
+ f" attribute {self._undefined_name!r}"
)
@internalcode
- def _fail_with_undefined_error(self, *args, **kwargs):
+ def _fail_with_undefined_error(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> "te.NoReturn":
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
raise self._undefined_exception(self._undefined_message)
@internalcode
- def __getattr__(self, name):
+ def __getattr__(self, name: str) -> t.Any:
if name[:2] == "__":
raise AttributeError(name)
+
return self._fail_with_undefined_error()
- __add__ = (
- __radd__
- ) = (
- __mul__
- ) = (
- __rmul__
- ) = (
- __div__
- ) = (
- __rdiv__
- ) = (
- __truediv__
- ) = (
- __rtruediv__
- ) = (
- __floordiv__
- ) = (
- __rfloordiv__
- ) = (
- __mod__
- ) = (
- __rmod__
- ) = (
- __pos__
- ) = (
- __neg__
- ) = (
- __call__
- ) = (
- __getitem__
- ) = (
- __lt__
- ) = (
- __le__
- ) = (
- __gt__
- ) = (
- __ge__
- ) = (
- __int__
- ) = (
- __float__
- ) = (
- __complex__
- ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
-
- def __eq__(self, other):
+ __add__ = __radd__ = __sub__ = __rsub__ = _fail_with_undefined_error
+ __mul__ = __rmul__ = __div__ = __rdiv__ = _fail_with_undefined_error
+ __truediv__ = __rtruediv__ = _fail_with_undefined_error
+ __floordiv__ = __rfloordiv__ = _fail_with_undefined_error
+ __mod__ = __rmod__ = _fail_with_undefined_error
+ __pos__ = __neg__ = _fail_with_undefined_error
+ __call__ = __getitem__ = _fail_with_undefined_error
+ __lt__ = __le__ = __gt__ = __ge__ = _fail_with_undefined_error
+ __int__ = __float__ = __complex__ = _fail_with_undefined_error
+ __pow__ = __rpow__ = _fail_with_undefined_error
+
+ def __eq__(self, other: t.Any) -> bool:
return type(self) is type(other)
- def __ne__(self, other):
+ def __ne__(self, other: t.Any) -> bool:
return not self.__eq__(other)
- def __hash__(self):
+ def __hash__(self) -> int:
return id(type(self))
- def __str__(self):
- return u""
+ def __str__(self) -> str:
+ return ""
- def __len__(self):
+ def __len__(self) -> int:
return 0
- def __iter__(self):
- if 0:
- yield None
+ def __iter__(self) -> t.Iterator[t.Any]:
+ yield from ()
- def __nonzero__(self):
- return False
+ async def __aiter__(self) -> t.AsyncIterator[t.Any]:
+ for _ in ():
+ yield
- __bool__ = __nonzero__
+ def __bool__(self) -> bool:
+ return False
- def __repr__(self):
+ def __repr__(self) -> str:
return "Undefined"
-def make_logging_undefined(logger=None, base=None):
+def make_logging_undefined(
+ logger: t.Optional["logging.Logger"] = None, base: t.Type[Undefined] = Undefined
+) -> t.Type[Undefined]:
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
@@ -851,70 +925,39 @@ def make_logging_undefined(logger=None, base=None):
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
- if base is None:
- base = Undefined
-
- def _log_message(undef):
- if undef._undefined_hint is None:
- if undef._undefined_obj is missing:
- hint = "%s is undefined" % undef._undefined_name
- elif not isinstance(undef._undefined_name, string_types):
- hint = "%s has no element %s" % (
- object_type_repr(undef._undefined_obj),
- undef._undefined_name,
- )
- else:
- hint = "%s has no attribute %s" % (
- object_type_repr(undef._undefined_obj),
- undef._undefined_name,
- )
- else:
- hint = undef._undefined_hint
- logger.warning("Template variable warning: %s", hint)
- class LoggingUndefined(base):
- def _fail_with_undefined_error(self, *args, **kwargs):
+ def _log_message(undef: Undefined) -> None:
+ logger.warning( # type: ignore
+ "Template variable warning: %s", undef._undefined_message
+ )
+
+ class LoggingUndefined(base): # type: ignore
+ __slots__ = ()
+
+ def _fail_with_undefined_error( # type: ignore
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> "te.NoReturn":
try:
- return base._fail_with_undefined_error(self, *args, **kwargs)
+ super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as e:
- logger.error("Template variable error: %s", str(e))
+ logger.error("Template variable error: %s", e) # type: ignore
raise e
- def __str__(self):
- rv = base.__str__(self)
+ def __str__(self) -> str:
_log_message(self)
- return rv
+ return super().__str__() # type: ignore
- def __iter__(self):
- rv = base.__iter__(self)
+ def __iter__(self) -> t.Iterator[t.Any]:
_log_message(self)
- return rv
-
- if PY2:
-
- def __nonzero__(self):
- rv = base.__nonzero__(self)
- _log_message(self)
- return rv
-
- def __unicode__(self):
- rv = base.__unicode__(self)
- _log_message(self)
- return rv
+ return super().__iter__() # type: ignore
- else:
-
- def __bool__(self):
- rv = base.__bool__(self)
- _log_message(self)
- return rv
+ def __bool__(self) -> bool:
+ _log_message(self)
+ return super().__bool__() # type: ignore
return LoggingUndefined
-# No @implements_to_string decorator here because __str__
-# is not overwritten from Undefined in this class.
-# This would cause a recursion error in Python 2.
class ChainableUndefined(Undefined):
"""An undefined that is chainable, where both ``__getattr__`` and
``__getitem__`` return itself rather than raising an
@@ -933,16 +976,15 @@ class ChainableUndefined(Undefined):
__slots__ = ()
- def __html__(self):
- return self.__str__()
+ def __html__(self) -> str:
+ return str(self)
- def __getattr__(self, _):
+ def __getattr__(self, _: str) -> "ChainableUndefined":
return self
- __getitem__ = __getattr__
+ __getitem__ = __getattr__ # type: ignore
-@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
@@ -959,18 +1001,22 @@ class DebugUndefined(Undefined):
__slots__ = ()
- def __str__(self):
- if self._undefined_hint is None:
- if self._undefined_obj is missing:
- return u"{{ %s }}" % self._undefined_name
- return "{{ no such element: %s[%r] }}" % (
- object_type_repr(self._undefined_obj),
- self._undefined_name,
+ def __str__(self) -> str:
+ if self._undefined_hint:
+ message = f"undefined value printed: {self._undefined_hint}"
+
+ elif self._undefined_obj is missing:
+ message = self._undefined_name # type: ignore
+
+ else:
+ message = (
+ f"no such element: {object_type_repr(self._undefined_obj)}"
+ f"[{self._undefined_name!r}]"
)
- return u"{{ undefined value printed: %s }}" % self._undefined_hint
+
+ return f"{{{{ {message} }}}}"
-@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
@@ -992,17 +1038,13 @@ class StrictUndefined(Undefined):
"""
__slots__ = ()
- __iter__ = (
- __str__
- ) = (
- __len__
- ) = (
- __nonzero__
- ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
+ __iter__ = __str__ = __len__ = Undefined._fail_with_undefined_error
+ __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
+ __contains__ = Undefined._fail_with_undefined_error
-# remove remaining slots attributes, after the metaclass did the magic they
-# are unneeded and irritating as they contain wrong data for the subclasses.
+# Remove slots attributes, after the metaclass is applied they are
+# unneeded and contain wrong data for subclasses.
del (
Undefined.__slots__,
ChainableUndefined.__slots__,
diff --git a/deps/v8/third_party/jinja2/sandbox.py b/deps/v8/third_party/jinja2/sandbox.py
index cfd7993aee..06d74148ec 100644
--- a/deps/v8/third_party/jinja2/sandbox.py
+++ b/deps/v8/third_party/jinja2/sandbox.py
@@ -1,42 +1,32 @@
-# -*- coding: utf-8 -*-
"""A sandbox layer that ensures unsafe operations cannot be performed.
Useful when the template itself comes from an untrusted source.
"""
import operator
import types
-import warnings
+import typing as t
+from _string import formatter_field_name_split # type: ignore
+from collections import abc
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
-from ._compat import abc
-from ._compat import PY2
-from ._compat import range_type
-from ._compat import string_types
from .environment import Environment
from .exceptions import SecurityError
+from .runtime import Context
+from .runtime import Undefined
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
#: maximum number of items a range may produce
MAX_RANGE = 100000
-#: attributes of function objects that are considered unsafe.
-if PY2:
- UNSAFE_FUNCTION_ATTRIBUTES = {
- "func_closure",
- "func_code",
- "func_dict",
- "func_defaults",
- "func_globals",
- }
-else:
- # On versions > python 2 the special attributes on functions are gone,
- # but they remain on methods and generators for whatever reason.
- UNSAFE_FUNCTION_ATTRIBUTES = set()
+#: Unsafe function attributes.
+UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
-#: unsafe method attributes. function attributes are unsafe for methods too
-UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
+#: Unsafe method attributes. Function attributes are unsafe for methods too.
+UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
@@ -47,41 +37,9 @@ UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
-# make sure we don't warn in python 2.6 about stuff we don't care about
-warnings.filterwarnings(
- "ignore", "the sets module", DeprecationWarning, module=__name__
-)
-
-_mutable_set_types = (set,)
-_mutable_mapping_types = (dict,)
-_mutable_sequence_types = (list,)
-
-# on python 2.x we can register the user collection types
-try:
- from UserDict import UserDict, DictMixin
- from UserList import UserList
-
- _mutable_mapping_types += (UserDict, DictMixin)
- _mutable_set_types += (UserList,)
-except ImportError:
- pass
-
-# if sets is still available, register the mutable set from there as well
-try:
- from sets import Set
-
- _mutable_set_types += (Set,)
-except ImportError:
- pass
-
-#: register Python 2.6 abstract base classes
-_mutable_set_types += (abc.MutableSet,)
-_mutable_mapping_types += (abc.MutableMapping,)
-_mutable_sequence_types += (abc.MutableSequence,)
-
-_mutable_spec = (
+_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = (
(
- _mutable_set_types,
+ abc.MutableSet,
frozenset(
[
"add",
@@ -96,11 +54,11 @@ _mutable_spec = (
),
),
(
- _mutable_mapping_types,
+ abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
- _mutable_sequence_types,
+ abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
@@ -122,76 +80,49 @@ _mutable_spec = (
)
-class _MagicFormatMapping(abc.Mapping):
- """This class implements a dummy wrapper to fix a bug in the Python
- standard library for string formatting.
-
- See https://bugs.python.org/issue13598 for information about why
- this is necessary.
- """
-
- def __init__(self, args, kwargs):
- self._args = args
- self._kwargs = kwargs
- self._last_index = 0
-
- def __getitem__(self, key):
- if key == "":
- idx = self._last_index
- self._last_index += 1
- try:
- return self._args[idx]
- except LookupError:
- pass
- key = str(idx)
- return self._kwargs[key]
-
- def __iter__(self):
- return iter(self._kwargs)
-
- def __len__(self):
- return len(self._kwargs)
-
-
-def inspect_format_method(callable):
+def inspect_format_method(callable: t.Callable) -> t.Optional[str]:
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
+
obj = callable.__self__
- if isinstance(obj, string_types):
+
+ if isinstance(obj, str):
return obj
+ return None
+
-def safe_range(*args):
+def safe_range(*args: int) -> range:
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
- rng = range_type(*args)
+ rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
- " MAX_RANGE (%d)." % MAX_RANGE
+ f" MAX_RANGE ({MAX_RANGE})."
)
return rng
-def unsafe(f):
+def unsafe(f: F) -> F:
"""Marks a function or method as unsafe.
- ::
+ .. code-block: python
@unsafe
def delete(self):
pass
"""
- f.unsafe_callable = True
+ f.unsafe_callable = True # type: ignore
return f
-def is_internal_attribute(obj, attr):
+def is_internal_attribute(obj: t.Any, attr: str) -> bool:
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
@@ -228,12 +159,10 @@ def is_internal_attribute(obj, attr):
return attr.startswith("__")
-def modifies_known_mutable(obj, attr):
+def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
"""This function checks if an attribute on a builtin mutable object
- (list, dict, set or deque) would modify it if called. It also supports
- the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
- with Python 2.6 onwards the abstract base classes `MutableSet`,
- `MutableMapping`, and `MutableSequence`.
+ (list, dict, set or deque) or the corresponding ABCs would modify it
+ if called.
>>> modifies_known_mutable({}, "clear")
True
@@ -244,8 +173,7 @@ def modifies_known_mutable(obj, attr):
>>> modifies_known_mutable([], "index")
False
- If called with an unsupported object (such as unicode) `False` is
- returned.
+ If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
@@ -272,7 +200,7 @@ class SandboxedEnvironment(Environment):
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
- default_binop_table = {
+ default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
@@ -285,7 +213,10 @@ class SandboxedEnvironment(Environment):
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
- default_unop_table = {"+": operator.pos, "-": operator.neg}
+ default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
+ "+": operator.pos,
+ "-": operator.neg,
+ }
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
@@ -301,7 +232,7 @@ class SandboxedEnvironment(Environment):
#: interested in.
#:
#: .. versionadded:: 2.6
- intercepted_binops = frozenset()
+ intercepted_binops: t.FrozenSet[str] = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
@@ -316,32 +247,15 @@ class SandboxedEnvironment(Environment):
#: interested in.
#:
#: .. versionadded:: 2.6
- intercepted_unops = frozenset()
-
- def intercept_unop(self, operator):
- """Called during template compilation with the name of a unary
- operator to check if it should be intercepted at runtime. If this
- method returns `True`, :meth:`call_unop` is executed for this unary
- operator. The default implementation of :meth:`call_unop` will use
- the :attr:`unop_table` dictionary to perform the operator with the
- same logic as the builtin one.
-
- The following unary operators are interceptable: ``+`` and ``-``
-
- Intercepted calls are always slower than the native operator call,
- so make sure only to intercept the ones you are interested in.
-
- .. versionadded:: 2.6
- """
- return False
+ intercepted_unops: t.FrozenSet[str] = frozenset()
- def __init__(self, *args, **kwargs):
- Environment.__init__(self, *args, **kwargs)
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
+ super().__init__(*args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
- def is_safe_attribute(self, obj, attr, value):
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
@@ -350,17 +264,20 @@ class SandboxedEnvironment(Environment):
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
- def is_safe_callable(self, obj):
- """Check if an object is safely callable. Per default a function is
- considered safe unless the `unsafe_callable` attribute exists and is
- True. Override this method to alter the behavior, but this won't
- affect the `unsafe` decorator from this module.
+ def is_safe_callable(self, obj: t.Any) -> bool:
+ """Check if an object is safely callable. By default callables
+ are considered safe unless decorated with :func:`unsafe`.
+
+ This also recognizes the Django convention of setting
+ ``func.alters_data = True``.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
- def call_binop(self, context, operator, left, right):
+ def call_binop(
+ self, context: Context, operator: str, left: t.Any, right: t.Any
+ ) -> t.Any:
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
@@ -369,7 +286,7 @@ class SandboxedEnvironment(Environment):
"""
return self.binop_table[operator](left, right)
- def call_unop(self, context, operator, arg):
+ def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
@@ -378,12 +295,14 @@ class SandboxedEnvironment(Environment):
"""
return self.unop_table[operator](arg)
- def getitem(self, obj, argument):
+ def getitem(
+ self, obj: t.Any, argument: t.Union[str, t.Any]
+ ) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
- if isinstance(argument, string_types):
+ if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
@@ -399,7 +318,7 @@ class SandboxedEnvironment(Environment):
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
- def getattr(self, obj, attribute):
+ def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
@@ -416,40 +335,52 @@ class SandboxedEnvironment(Environment):
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
- def unsafe_undefined(self, obj, attribute):
+ def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
"""Return an undefined object for unsafe attributes."""
return self.undefined(
- "access to attribute %r of %r "
- "object is unsafe." % (attribute, obj.__class__.__name__),
+ f"access to attribute {attribute!r} of"
+ f" {type(obj).__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
- def format_string(self, s, args, kwargs, format_func=None):
+ def format_string(
+ self,
+ s: str,
+ args: t.Tuple[t.Any, ...],
+ kwargs: t.Dict[str, t.Any],
+ format_func: t.Optional[t.Callable] = None,
+ ) -> str:
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
+ formatter: SandboxedFormatter
if isinstance(s, Markup):
- formatter = SandboxedEscapeFormatter(self, s.escape)
+ formatter = SandboxedEscapeFormatter(self, escape=s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
- "format_map() takes exactly one argument %d given"
- % (len(args) + (kwargs is not None))
+ "format_map() takes exactly one argument"
+ f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
- args = None
+ args = ()
- kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
- def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
+ def call(
+ __self, # noqa: B902
+ __context: Context,
+ __obj: t.Any,
+ *args: t.Any,
+ **kwargs: t.Any,
+ ) -> t.Any:
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
@@ -458,7 +389,7 @@ class SandboxedEnvironment(Environment):
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
- raise SecurityError("%r is not safely callable" % (__obj,))
+ raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
@@ -468,26 +399,21 @@ class ImmutableSandboxedEnvironment(SandboxedEnvironment):
`dict` by using the :func:`modifies_known_mutable` function.
"""
- def is_safe_attribute(self, obj, attr, value):
- if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
+ if not super().is_safe_attribute(obj, attr, value):
return False
- return not modifies_known_mutable(obj, attr)
-
-# This really is not a public API apparently.
-try:
- from _string import formatter_field_name_split
-except ImportError:
-
- def formatter_field_name_split(field_name):
- return field_name._formatter_field_name_split()
+ return not modifies_known_mutable(obj, attr)
-class SandboxedFormatterMixin(object):
- def __init__(self, env):
+class SandboxedFormatter(Formatter):
+ def __init__(self, env: Environment, **kwargs: t.Any) -> None:
self._env = env
+ super().__init__(**kwargs)
- def get_field(self, field_name, args, kwargs):
+ def get_field(
+ self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
+ ) -> t.Tuple[t.Any, str]:
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
@@ -498,13 +424,5 @@ class SandboxedFormatterMixin(object):
return obj, first
-class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
- def __init__(self, env):
- SandboxedFormatterMixin.__init__(self, env)
- Formatter.__init__(self)
-
-
-class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
- def __init__(self, env, escape):
- SandboxedFormatterMixin.__init__(self, env)
- EscapeFormatter.__init__(self, escape)
+class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
+ pass
diff --git a/deps/v8/third_party/jinja2/tests.py b/deps/v8/third_party/jinja2/tests.py
index fabd4ce51b..a467cf08b5 100644
--- a/deps/v8/third_party/jinja2/tests.py
+++ b/deps/v8/third_party/jinja2/tests.py
@@ -1,36 +1,32 @@
-# -*- coding: utf-8 -*-
"""Built-in template tests used with the ``is`` operator."""
-import decimal
import operator
-import re
+import typing as t
+from collections import abc
+from numbers import Number
-from ._compat import abc
-from ._compat import integer_types
-from ._compat import string_types
-from ._compat import text_type
from .runtime import Undefined
+from .utils import pass_environment
-number_re = re.compile(r"^-?\d+(\.\d+)?$")
-regex_type = type(number_re)
-test_callable = callable
+if t.TYPE_CHECKING:
+ from .environment import Environment
-def test_odd(value):
+def test_odd(value: int) -> bool:
"""Return true if the variable is odd."""
return value % 2 == 1
-def test_even(value):
+def test_even(value: int) -> bool:
"""Return true if the variable is even."""
return value % 2 == 0
-def test_divisibleby(value, num):
+def test_divisibleby(value: int, num: int) -> bool:
"""Check if a variable is divisible by a number."""
return value % num == 0
-def test_defined(value):
+def test_defined(value: t.Any) -> bool:
"""Return true if the variable is defined:
.. sourcecode:: jinja
@@ -47,17 +43,57 @@ def test_defined(value):
return not isinstance(value, Undefined)
-def test_undefined(value):
+def test_undefined(value: t.Any) -> bool:
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
-def test_none(value):
+@pass_environment
+def test_filter(env: "Environment", value: str) -> bool:
+ """Check if a filter exists by name. Useful if a filter may be
+ optionally available.
+
+ .. code-block:: jinja
+
+ {% if 'markdown' is filter %}
+ {{ value | markdown }}
+ {% else %}
+ {{ value }}
+ {% endif %}
+
+ .. versionadded:: 3.0
+ """
+ return value in env.filters
+
+
+@pass_environment
+def test_test(env: "Environment", value: str) -> bool:
+ """Check if a test exists by name. Useful if a test may be
+ optionally available.
+
+ .. code-block:: jinja
+
+ {% if 'loud' is test %}
+ {% if value is loud %}
+ {{ value|upper }}
+ {% else %}
+ {{ value|lower }}
+ {% endif %}
+ {% else %}
+ {{ value }}
+ {% endif %}
+
+ .. versionadded:: 3.0
+ """
+ return value in env.tests
+
+
+def test_none(value: t.Any) -> bool:
"""Return true if the variable is none."""
return value is None
-def test_boolean(value):
+def test_boolean(value: t.Any) -> bool:
"""Return true if the object is a boolean value.
.. versionadded:: 2.11
@@ -65,7 +101,7 @@ def test_boolean(value):
return value is True or value is False
-def test_false(value):
+def test_false(value: t.Any) -> bool:
"""Return true if the object is False.
.. versionadded:: 2.11
@@ -73,7 +109,7 @@ def test_false(value):
return value is False
-def test_true(value):
+def test_true(value: t.Any) -> bool:
"""Return true if the object is True.
.. versionadded:: 2.11
@@ -82,16 +118,16 @@ def test_true(value):
# NOTE: The existing 'number' test matches booleans and floats
-def test_integer(value):
+def test_integer(value: t.Any) -> bool:
"""Return true if the object is an integer.
.. versionadded:: 2.11
"""
- return isinstance(value, integer_types) and value is not True and value is not False
+ return isinstance(value, int) and value is not True and value is not False
# NOTE: The existing 'number' test matches booleans and integers
-def test_float(value):
+def test_float(value: t.Any) -> bool:
"""Return true if the object is a float.
.. versionadded:: 2.11
@@ -99,22 +135,22 @@ def test_float(value):
return isinstance(value, float)
-def test_lower(value):
+def test_lower(value: str) -> bool:
"""Return true if the variable is lowercased."""
- return text_type(value).islower()
+ return str(value).islower()
-def test_upper(value):
+def test_upper(value: str) -> bool:
"""Return true if the variable is uppercased."""
- return text_type(value).isupper()
+ return str(value).isupper()
-def test_string(value):
+def test_string(value: t.Any) -> bool:
"""Return true if the object is a string."""
- return isinstance(value, string_types)
+ return isinstance(value, str)
-def test_mapping(value):
+def test_mapping(value: t.Any) -> bool:
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
@@ -122,12 +158,12 @@ def test_mapping(value):
return isinstance(value, abc.Mapping)
-def test_number(value):
+def test_number(value: t.Any) -> bool:
"""Return true if the variable is a number."""
- return isinstance(value, integer_types + (float, complex, decimal.Decimal))
+ return isinstance(value, Number)
-def test_sequence(value):
+def test_sequence(value: t.Any) -> bool:
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
@@ -136,10 +172,11 @@ def test_sequence(value):
value.__getitem__
except Exception:
return False
+
return True
-def test_sameas(value, other):
+def test_sameas(value: t.Any, other: t.Any) -> bool:
"""Check if an object points to the same memory address than another
object:
@@ -152,21 +189,22 @@ def test_sameas(value, other):
return value is other
-def test_iterable(value):
+def test_iterable(value: t.Any) -> bool:
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
+
return True
-def test_escaped(value):
+def test_escaped(value: t.Any) -> bool:
"""Check if the value is escaped."""
return hasattr(value, "__html__")
-def test_in(value, seq):
+def test_in(value: t.Any, seq: t.Container) -> bool:
"""Check if value is in seq.
.. versionadded:: 2.10
@@ -180,6 +218,8 @@ TESTS = {
"divisibleby": test_divisibleby,
"defined": test_defined,
"undefined": test_undefined,
+ "filter": test_filter,
+ "test": test_test,
"none": test_none,
"boolean": test_boolean,
"false": test_false,
@@ -193,7 +233,7 @@ TESTS = {
"number": test_number,
"sequence": test_sequence,
"iterable": test_iterable,
- "callable": test_callable,
+ "callable": callable,
"sameas": test_sameas,
"escaped": test_escaped,
"in": test_in,
diff --git a/deps/v8/third_party/jinja2/utils.py b/deps/v8/third_party/jinja2/utils.py
index 6afca81055..9b5f5a50eb 100644
--- a/deps/v8/third_party/jinja2/utils.py
+++ b/deps/v8/third_party/jinja2/utils.py
@@ -1,80 +1,98 @@
-# -*- coding: utf-8 -*-
+import enum
import json
import os
import re
-import warnings
+import typing as t
+from collections import abc
from collections import deque
from random import choice
from random import randrange
-from string import ascii_letters as _letters
-from string import digits as _digits
from threading import Lock
+from types import CodeType
+from urllib.parse import quote_from_bytes
-from markupsafe import escape
-from markupsafe import Markup
+import markupsafe
-from ._compat import abc
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import url_quote
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
# special singleton representing missing values for the runtime
-missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
+missing: t.Any = type("MissingType", (), {"__repr__": lambda x: "missing"})()
+
+internal_code: t.MutableSet[CodeType] = set()
-# internal code
-internal_code = set()
+concat = "".join
-concat = u"".join
-_slash_escape = "\\/" not in json.dumps("/")
+def pass_context(f: F) -> F:
+ """Pass the :class:`~jinja2.runtime.Context` as the first argument
+ to the decorated function when called while rendering a template.
+ Can be used on functions, filters, and tests.
-def contextfunction(f):
- """This decorator can be used to mark a function or method context callable.
- A context callable is passed the active :class:`Context` as first argument when
- called from the template. This is useful if a function wants to get access
- to the context or functions provided on the context object. For example
- a function that returns a sorted list of template variables the current
- template exports could look like this::
+ If only ``Context.eval_context`` is needed, use
+ :func:`pass_eval_context`. If only ``Context.environment`` is
+ needed, use :func:`pass_environment`.
- @contextfunction
- def get_exported_names(context):
- return sorted(context.exported_vars)
+ .. versionadded:: 3.0.0
+ Replaces ``contextfunction`` and ``contextfilter``.
"""
- f.contextfunction = True
+ f.jinja_pass_arg = _PassArg.context # type: ignore
return f
-def evalcontextfunction(f):
- """This decorator can be used to mark a function or method as an eval
- context callable. This is similar to the :func:`contextfunction`
- but instead of passing the context, an evaluation context object is
- passed. For more information about the eval context, see
- :ref:`eval-context`.
+def pass_eval_context(f: F) -> F:
+ """Pass the :class:`~jinja2.nodes.EvalContext` as the first argument
+ to the decorated function when called while rendering a template.
+ See :ref:`eval-context`.
+
+ Can be used on functions, filters, and tests.
+
+ If only ``EvalContext.environment`` is needed, use
+ :func:`pass_environment`.
- .. versionadded:: 2.4
+ .. versionadded:: 3.0.0
+ Replaces ``evalcontextfunction`` and ``evalcontextfilter``.
"""
- f.evalcontextfunction = True
+ f.jinja_pass_arg = _PassArg.eval_context # type: ignore
return f
-def environmentfunction(f):
- """This decorator can be used to mark a function or method as environment
- callable. This decorator works exactly like the :func:`contextfunction`
- decorator just that the first argument is the active :class:`Environment`
- and not context.
+def pass_environment(f: F) -> F:
+ """Pass the :class:`~jinja2.Environment` as the first argument to
+ the decorated function when called while rendering a template.
+
+ Can be used on functions, filters, and tests.
+
+ .. versionadded:: 3.0.0
+ Replaces ``environmentfunction`` and ``environmentfilter``.
"""
- f.environmentfunction = True
+ f.jinja_pass_arg = _PassArg.environment # type: ignore
return f
-def internalcode(f):
+class _PassArg(enum.Enum):
+ context = enum.auto()
+ eval_context = enum.auto()
+ environment = enum.auto()
+
+ @classmethod
+ def from_obj(cls, obj: F) -> t.Optional["_PassArg"]:
+ if hasattr(obj, "jinja_pass_arg"):
+ return obj.jinja_pass_arg # type: ignore
+
+ return None
+
+
+def internalcode(f: F) -> F:
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
-def is_undefined(obj):
+def is_undefined(obj: t.Any) -> bool:
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
@@ -91,26 +109,26 @@ def is_undefined(obj):
return isinstance(obj, Undefined)
-def consume(iterable):
+def consume(iterable: t.Iterable[t.Any]) -> None:
"""Consumes an iterable without doing anything with it."""
for _ in iterable:
pass
-def clear_caches():
+def clear_caches() -> None:
"""Jinja keeps internal caches for environments and lexers. These are
used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
- from .environment import _spontaneous_environments
+ from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
- _spontaneous_environments.clear()
+ get_spontaneous_environment.cache_clear()
_lexer_cache.clear()
-def import_string(import_name, silent=False):
+def import_string(import_name: str, silent: bool = False) -> t.Any:
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
@@ -134,7 +152,7 @@ def import_string(import_name, silent=False):
raise
-def open_if_exists(filename, mode="rb"):
+def open_if_exists(filename: str, mode: str = "rb") -> t.Optional[t.IO]:
"""Returns a file descriptor for the filename if that file exists,
otherwise ``None``.
"""
@@ -144,7 +162,7 @@ def open_if_exists(filename, mode="rb"):
return open(filename, mode)
-def object_type_repr(obj):
+def object_type_repr(obj: t.Any) -> str:
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
@@ -156,51 +174,104 @@ def object_type_repr(obj):
cls = type(obj)
- # __builtin__ in 2.x, builtins in 3.x
- if cls.__module__ in ("__builtin__", "builtins"):
- name = cls.__name__
- else:
- name = cls.__module__ + "." + cls.__name__
+ if cls.__module__ == "builtins":
+ return f"{cls.__name__} object"
- return "%s object" % name
+ return f"{cls.__module__}.{cls.__name__} object"
-def pformat(obj, verbose=False):
- """Prettyprint an object. Either use the `pretty` library or the
- builtin `pprint`.
- """
- try:
- from pretty import pretty
+def pformat(obj: t.Any) -> str:
+ """Format an object using :func:`pprint.pformat`."""
+ from pprint import pformat # type: ignore
- return pretty(obj, verbose=verbose)
- except ImportError:
- from pprint import pformat
+ return pformat(obj)
- return pformat(obj)
+_http_re = re.compile(
+ r"""
+ ^
+ (
+ (https?://|www\.) # scheme or www
+ (([\w%-]+\.)+)? # subdomain
+ (
+ [a-z]{2,63} # basic tld
+ |
+ xn--[\w%]{2,59} # idna tld
+ )
+ |
+ ([\w%-]{2,63}\.)+ # basic domain
+ (com|net|int|edu|gov|org|info|mil) # basic tld
+ |
+ (https?://) # scheme
+ (
+ (([\d]{1,3})(\.[\d]{1,3}){3}) # IPv4
+ |
+ (\[([\da-f]{0,4}:){2}([\da-f]{0,4}:?){1,6}]) # IPv6
+ )
+ )
+ (?::[\d]{1,5})? # port
+ (?:[/?#]\S*)? # path, query, and fragment
+ $
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
+_email_re = re.compile(r"^\S+@\w[\w.-]*\.\w+$")
+
+
+def urlize(
+ text: str,
+ trim_url_limit: t.Optional[int] = None,
+ rel: t.Optional[str] = None,
+ target: t.Optional[str] = None,
+ extra_schemes: t.Optional[t.Iterable[str]] = None,
+) -> str:
+ """Convert URLs in text into clickable links.
+
+ This may not recognize links in some situations. Usually, a more
+ comprehensive formatter, such as a Markdown library, is a better
+ choice.
+
+ Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
+ addresses. Links with trailing punctuation (periods, commas, closing
+ parentheses) and leading punctuation (opening parentheses) are
+ recognized excluding the punctuation. Email addresses that include
+ header fields are not recognized (for example,
+ ``mailto:address@example.com?cc=copy@example.com``).
+
+ :param text: Original text containing URLs to link.
+ :param trim_url_limit: Shorten displayed URL values to this length.
+ :param target: Add the ``target`` attribute to links.
+ :param rel: Add the ``rel`` attribute to links.
+ :param extra_schemes: Recognize URLs that start with these schemes
+ in addition to the default behavior.
+
+ .. versionchanged:: 3.0
+ The ``extra_schemes`` parameter was added.
+
+ .. versionchanged:: 3.0
+ Generate ``https://`` links for URLs without a scheme.
+
+ .. versionchanged:: 3.0
+ The parsing rules were updated. Recognize email addresses with
+ or without the ``mailto:`` scheme. Validate IP addresses. Ignore
+ parentheses and brackets in more cases.
+ """
+ if trim_url_limit is not None:
-def urlize(text, trim_url_limit=None, rel=None, target=None):
- """Converts any URLs in text into clickable links. Works on http://,
- https:// and www. links. Links can have trailing punctuation (periods,
- commas, close-parens) and leading punctuation (opening parens) and
- it'll still do the right thing.
+ def trim_url(x: str) -> str:
+ if len(x) > trim_url_limit: # type: ignore
+ return f"{x[:trim_url_limit]}..."
- If trim_url_limit is not None, the URLs in link text will be limited
- to trim_url_limit characters.
+ return x
- If nofollow is True, the URLs in link text will get a rel="nofollow"
- attribute.
+ else:
- If target is not None, a target attribute will be added to the link.
- """
- trim_url = (
- lambda x, limit=trim_url_limit: limit is not None
- and (x[:limit] + (len(x) >= limit and "..." or ""))
- or x
- )
- words = re.split(r"(\s+)", text_type(escape(text)))
- rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
- target_attr = target and ' target="%s"' % escape(target) or ""
+ def trim_url(x: str) -> str:
+ return x
+
+ words = re.split(r"(\s+)", str(markupsafe.escape(text)))
+ rel_attr = f' rel="{markupsafe.escape(rel)}"' if rel else ""
+ target_attr = f' target="{markupsafe.escape(target)}"' if target else ""
for i, word in enumerate(words):
head, middle, tail = "", word, ""
@@ -220,47 +291,57 @@ def urlize(text, trim_url_limit=None, rel=None, target=None):
tail = match.group()
middle = middle[: match.start()]
- if middle.startswith("www.") or (
- "@" not in middle
- and not middle.startswith("http://")
- and not middle.startswith("https://")
- and len(middle) > 0
- and middle[0] in _letters + _digits
- and (
- middle.endswith(".org")
- or middle.endswith(".net")
- or middle.endswith(".com")
- )
- ):
- middle = '<a href="http://%s"%s%s>%s</a>' % (
- middle,
- rel_attr,
- target_attr,
- trim_url(middle),
- )
-
- if middle.startswith("http://") or middle.startswith("https://"):
- middle = '<a href="%s"%s%s>%s</a>' % (
- middle,
- rel_attr,
- target_attr,
- trim_url(middle),
- )
-
- if (
+ # Prefer balancing parentheses in URLs instead of ignoring a
+ # trailing character.
+ for start_char, end_char in ("(", ")"), ("<", ">"), ("&lt;", "&gt;"):
+ start_count = middle.count(start_char)
+
+ if start_count <= middle.count(end_char):
+ # Balanced, or lighter on the left
+ continue
+
+ # Move as many as possible from the tail to balance
+ for _ in range(min(start_count, tail.count(end_char))):
+ end_index = tail.index(end_char) + len(end_char)
+ # Move anything in the tail before the end char too
+ middle += tail[:end_index]
+ tail = tail[end_index:]
+
+ if _http_re.match(middle):
+ if middle.startswith("https://") or middle.startswith("http://"):
+ middle = (
+ f'<a href="{middle}"{rel_attr}{target_attr}>{trim_url(middle)}</a>'
+ )
+ else:
+ middle = (
+ f'<a href="https://{middle}"{rel_attr}{target_attr}>'
+ f"{trim_url(middle)}</a>"
+ )
+
+ elif middle.startswith("mailto:") and _email_re.match(middle[7:]):
+ middle = f'<a href="{middle}">{middle[7:]}</a>'
+
+ elif (
"@" in middle
and not middle.startswith("www.")
and ":" not in middle
- and re.match(r"^\S+@\w[\w.-]*\.\w+$", middle)
+ and _email_re.match(middle)
):
- middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+ middle = f'<a href="mailto:{middle}">{middle}</a>'
+
+ elif extra_schemes is not None:
+ for scheme in extra_schemes:
+ if middle != scheme and middle.startswith(scheme):
+ middle = f'<a href="{middle}"{rel_attr}{target_attr}>{middle}</a>'
- words[i] = head + middle + tail
+ words[i] = f"{head}{middle}{tail}"
- return u"".join(words)
+ return "".join(words)
-def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+def generate_lorem_ipsum(
+ n: int = 5, html: bool = True, min: int = 20, max: int = 100
+) -> str:
"""Generate some lorem ipsum for the template."""
from .constants import LOREM_IPSUM_WORDS
@@ -297,40 +378,38 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
p.append(word)
# ensure that the paragraph ends with a dot.
- p = u" ".join(p)
- if p.endswith(","):
- p = p[:-1] + "."
- elif not p.endswith("."):
- p += "."
- result.append(p)
+ p_str = " ".join(p)
+
+ if p_str.endswith(","):
+ p_str = p_str[:-1] + "."
+ elif not p_str.endswith("."):
+ p_str += "."
+
+ result.append(p_str)
if not html:
- return u"\n\n".join(result)
- return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
+ return "\n\n".join(result)
+ return markupsafe.Markup(
+ "\n".join(f"<p>{markupsafe.escape(x)}</p>" for x in result)
+ )
-def unicode_urlencode(obj, charset="utf-8", for_qs=False):
+def url_quote(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
"""Quote a string for use in a URL using the given charset.
- This function is misnamed, it is a wrapper around
- :func:`urllib.parse.quote`.
-
:param obj: String or bytes to quote. Other types are converted to
string then encoded to bytes using the given charset.
:param charset: Encode text to bytes using this charset.
:param for_qs: Quote "/" and use "+" for spaces.
"""
- if not isinstance(obj, string_types):
- obj = text_type(obj)
+ if not isinstance(obj, bytes):
+ if not isinstance(obj, str):
+ obj = str(obj)
- if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = b"" if for_qs else b"/"
- rv = url_quote(obj, safe)
-
- if not isinstance(rv, text_type):
- rv = rv.decode("utf-8")
+ rv = quote_from_bytes(obj, safe)
if for_qs:
rv = rv.replace("%20", "+")
@@ -338,20 +417,21 @@ def unicode_urlencode(obj, charset="utf-8", for_qs=False):
return rv
-class LRUCache(object):
+@abc.MutableMapping.register
+class LRUCache:
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
- def __init__(self, capacity):
+ def __init__(self, capacity: int) -> None:
self.capacity = capacity
- self._mapping = {}
- self._queue = deque()
+ self._mapping: t.Dict[t.Any, t.Any] = {}
+ self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
- def _postinit(self):
+ def _postinit(self) -> None:
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
@@ -359,35 +439,35 @@ class LRUCache(object):
self._wlock = Lock()
self._append = self._queue.append
- def __getstate__(self):
+ def __getstate__(self) -> t.Mapping[str, t.Any]:
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
- def __setstate__(self, d):
+ def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
self.__dict__.update(d)
self._postinit()
- def __getnewargs__(self):
+ def __getnewargs__(self) -> t.Tuple:
return (self.capacity,)
- def copy(self):
+ def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue.extend(self._queue)
return rv
- def get(self, key, default=None):
+ def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
- def setdefault(self, key, default=None):
+ def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
@@ -397,35 +477,32 @@ class LRUCache(object):
self[key] = default
return default
- def clear(self):
+ def clear(self) -> None:
"""Clear the cache."""
- self._wlock.acquire()
- try:
+ with self._wlock:
self._mapping.clear()
self._queue.clear()
- finally:
- self._wlock.release()
- def __contains__(self, key):
+ def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
return key in self._mapping
- def __len__(self):
+ def __len__(self) -> int:
"""Return the current size of the cache."""
return len(self._mapping)
- def __repr__(self):
- return "<%s %r>" % (self.__class__.__name__, self._mapping)
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self._mapping!r}>"
- def __getitem__(self, key):
+ def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
- self._wlock.acquire()
- try:
+ with self._wlock:
rv = self._mapping[key]
+
if self._queue[-1] != key:
try:
self._remove(key)
@@ -434,100 +511,54 @@ class LRUCache(object):
# when we read, ignore the ValueError that we would
# get otherwise.
pass
+
self._append(key)
+
return rv
- finally:
- self._wlock.release()
- def __setitem__(self, key, value):
+ def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
- self._wlock.acquire()
- try:
+ with self._wlock:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
+
self._append(key)
self._mapping[key] = value
- finally:
- self._wlock.release()
- def __delitem__(self, key):
+ def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
- self._wlock.acquire()
- try:
+ with self._wlock:
del self._mapping[key]
+
try:
self._remove(key)
except ValueError:
pass
- finally:
- self._wlock.release()
- def items(self):
+ def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
- def iteritems(self):
- """Iterate over all items."""
- warnings.warn(
- "'iteritems()' will be removed in version 3.0. Use"
- " 'iter(cache.items())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self.items())
-
- def values(self):
+ def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
return [x[1] for x in self.items()]
- def itervalue(self):
- """Iterate over all values."""
- warnings.warn(
- "'itervalue()' will be removed in version 3.0. Use"
- " 'iter(cache.values())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self.values())
-
- def itervalues(self):
- """Iterate over all values."""
- warnings.warn(
- "'itervalues()' will be removed in version 3.0. Use"
- " 'iter(cache.values())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self.values())
-
- def keys(self):
+ def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
return list(self)
- def iterkeys(self):
- """Iterate over all keys in the cache dict, ordered by
- the most recent usage.
- """
- warnings.warn(
- "'iterkeys()' will be removed in version 3.0. Use"
- " 'iter(cache.keys())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self)
-
- def __iter__(self):
+ def __iter__(self) -> t.Iterator[t.Any]:
return reversed(tuple(self._queue))
- def __reversed__(self):
+ def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
@@ -536,15 +567,12 @@ class LRUCache(object):
__copy__ = copy
-abc.MutableMapping.register(LRUCache)
-
-
def select_autoescape(
- enabled_extensions=("html", "htm", "xml"),
- disabled_extensions=(),
- default_for_string=True,
- default=False,
-):
+ enabled_extensions: t.Collection[str] = ("html", "htm", "xml"),
+ disabled_extensions: t.Collection[str] = (),
+ default_for_string: bool = True,
+ default: bool = False,
+) -> t.Callable[[t.Optional[str]], bool]:
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
@@ -579,10 +607,10 @@ def select_autoescape(
.. versionadded:: 2.9
"""
- enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
- disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
+ enabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in enabled_extensions)
+ disabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in disabled_extensions)
- def autoescape(template_name):
+ def autoescape(template_name: t.Optional[str]) -> bool:
if template_name is None:
return default_for_string
template_name = template_name.lower()
@@ -595,37 +623,47 @@ def select_autoescape(
return autoescape
-def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
- """Works exactly like :func:`dumps` but is safe for use in ``<script>``
- tags. It accepts the same arguments and returns a JSON string. Note that
- this is available in templates through the ``|tojson`` filter which will
- also mark the result as safe. Due to how this function escapes certain
- characters this is safe even if used outside of ``<script>`` tags.
+def htmlsafe_json_dumps(
+ obj: t.Any, dumps: t.Optional[t.Callable[..., str]] = None, **kwargs: t.Any
+) -> markupsafe.Markup:
+ """Serialize an object to a string of JSON with :func:`json.dumps`,
+ then replace HTML-unsafe characters with Unicode escapes and mark
+ the result safe with :class:`~markupsafe.Markup`.
+
+ This is available in templates as the ``|tojson`` filter.
+
+ The following characters are escaped: ``<``, ``>``, ``&``, ``'``.
+
+ The returned string is safe to render in HTML documents and
+ ``<script>`` tags. The exception is in HTML attributes that are
+ double quoted; either use single quotes or the ``|forceescape``
+ filter.
- The following characters are escaped in strings:
+ :param obj: The object to serialize to JSON.
+ :param dumps: The ``dumps`` function to use. Defaults to
+ ``env.policies["json.dumps_function"]``, which defaults to
+ :func:`json.dumps`.
+ :param kwargs: Extra arguments to pass to ``dumps``. Merged onto
+ ``env.policies["json.dumps_kwargs"]``.
- - ``<``
- - ``>``
- - ``&``
- - ``'``
+ .. versionchanged:: 3.0
+ The ``dumper`` parameter is renamed to ``dumps``.
- This makes it safe to embed such strings in any place in HTML with the
- notable exception of double quoted attributes. In that case single
- quote your attributes or HTML escape it in addition.
+ .. versionadded:: 2.9
"""
- if dumper is None:
- dumper = json.dumps
- rv = (
- dumper(obj, **kwargs)
- .replace(u"<", u"\\u003c")
- .replace(u">", u"\\u003e")
- .replace(u"&", u"\\u0026")
- .replace(u"'", u"\\u0027")
+ if dumps is None:
+ dumps = json.dumps
+
+ return markupsafe.Markup(
+ dumps(obj, **kwargs)
+ .replace("<", "\\u003c")
+ .replace(">", "\\u003e")
+ .replace("&", "\\u0026")
+ .replace("'", "\\u0027")
)
- return Markup(rv)
-class Cycler(object):
+class Cycler:
"""Cycle through values by yield them one at a time, then restarting
once the end is reached. Available as ``cycler`` in templates.
@@ -651,24 +689,24 @@ class Cycler(object):
.. versionadded:: 2.1
"""
- def __init__(self, *items):
+ def __init__(self, *items: t.Any) -> None:
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.pos = 0
- def reset(self):
+ def reset(self) -> None:
"""Resets the current item to the first item."""
self.pos = 0
@property
- def current(self):
+ def current(self) -> t.Any:
"""Return the current item. Equivalent to the item that will be
returned next time :meth:`next` is called.
"""
return self.items[self.pos]
- def next(self):
+ def next(self) -> t.Any:
"""Return the current item, then advance :attr:`current` to the
next item.
"""
@@ -679,59 +717,39 @@ class Cycler(object):
__next__ = next
-class Joiner(object):
+class Joiner:
"""A joining helper for templates."""
- def __init__(self, sep=u", "):
+ def __init__(self, sep: str = ", ") -> None:
self.sep = sep
self.used = False
- def __call__(self):
+ def __call__(self) -> str:
if not self.used:
self.used = True
- return u""
+ return ""
return self.sep
-class Namespace(object):
+class Namespace:
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword arguments."""
- def __init__(*args, **kwargs): # noqa: B902
+ def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
- def __getattribute__(self, name):
+ def __getattribute__(self, name: str) -> t.Any:
# __class__ is needed for the awaitable check in async mode
if name in {"_Namespace__attrs", "__class__"}:
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
except KeyError:
- raise AttributeError(name)
+ raise AttributeError(name) from None
- def __setitem__(self, name, value):
+ def __setitem__(self, name: str, value: t.Any) -> None:
self.__attrs[name] = value
- def __repr__(self):
- return "<Namespace %r>" % self.__attrs
-
-
-# does this python version support async for in and async generators?
-try:
- exec("async def _():\n async for _ in ():\n yield _")
- have_async_gen = True
-except SyntaxError:
- have_async_gen = False
-
-
-def soft_unicode(s):
- from markupsafe import soft_unicode
-
- warnings.warn(
- "'jinja2.utils.soft_unicode' will be removed in version 3.0."
- " Use 'markupsafe.soft_unicode' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return soft_unicode(s)
+ def __repr__(self) -> str:
+ return f"<Namespace {self.__attrs!r}>"
diff --git a/deps/v8/third_party/jinja2/visitor.py b/deps/v8/third_party/jinja2/visitor.py
index d1365bf10e..17c6aaba57 100644
--- a/deps/v8/third_party/jinja2/visitor.py
+++ b/deps/v8/third_party/jinja2/visitor.py
@@ -1,11 +1,19 @@
-# -*- coding: utf-8 -*-
"""API for traversing the AST nodes. Implemented by the compiler and
meta introspection.
"""
+import typing as t
+
from .nodes import Node
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ class VisitCallable(te.Protocol):
+ def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ ...
-class NodeVisitor(object):
+
+class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
@@ -17,25 +25,26 @@ class NodeVisitor(object):
(return value `None`) the `generic_visit` visitor is used instead.
"""
- def get_visitor(self, node):
+ def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
- method = "visit_" + node.__class__.__name__
- return getattr(self, method, None)
+ return getattr(self, f"visit_{type(node).__name__}", None)
- def visit(self, node, *args, **kwargs):
+ def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
+
if f is not None:
return f(node, *args, **kwargs)
+
return self.generic_visit(node, *args, **kwargs)
- def generic_visit(self, node, *args, **kwargs):
+ def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
- for node in node.iter_child_nodes():
- self.visit(node, *args, **kwargs)
+ for child_node in node.iter_child_nodes():
+ self.visit(child_node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
@@ -49,7 +58,7 @@ class NodeTransformer(NodeVisitor):
replacement takes place.
"""
- def generic_visit(self, node, *args, **kwargs):
+ def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
@@ -71,11 +80,13 @@ class NodeTransformer(NodeVisitor):
setattr(node, field, new_node)
return node
- def visit_list(self, node, *args, **kwargs):
+ def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
+
if not isinstance(rv, list):
- rv = [rv]
+ return [rv]
+
return rv
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index ac0cb6250c..eb101be349 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -371,15 +371,11 @@ transitioning builtin SortCompareDefault(
// 7. Let xSmaller be the result of performing
// Abstract Relational Comparison xString < yString.
// 8. If xSmaller is true, return -1.
- if (StringLessThan(context, xString, yString) == True) return -1;
-
// 9. Let ySmaller be the result of performing
// Abstract Relational Comparison yString < xString.
// 10. If ySmaller is true, return 1.
- if (StringLessThan(context, yString, xString) == True) return 1;
-
// 11. Return +0.
- return 0;
+ return StringCompare(xString, yString);
}
transitioning builtin SortCompareUserFn(
diff --git a/deps/v8/third_party/zlib/BUILD.gn b/deps/v8/third_party/zlib/BUILD.gn
index b85067a12b..5c215860ae 100644
--- a/deps/v8/third_party/zlib/BUILD.gn
+++ b/deps/v8/third_party/zlib/BUILD.gn
@@ -4,6 +4,12 @@
import("//build/config/compiler/compiler.gni")
+declare_args() {
+ # Expose zlib's symbols, used by Node.js to provide zlib APIs for its native
+ # modules.
+ zlib_symbols_visible = false
+}
+
if (build_with_chromium) {
import("//testing/test.gni")
}
@@ -14,6 +20,10 @@ if (current_cpu == "arm" || current_cpu == "arm64") {
config("zlib_config") {
include_dirs = [ "." ]
+
+ if (zlib_symbols_visible) {
+ defines = [ "ZLIB_DLL" ]
+ }
}
config("zlib_internal_config") {
@@ -23,7 +33,7 @@ config("zlib_internal_config") {
# Build code using -O3, see: crbug.com/1084371.
configs = [ "//build/config/compiler:optimize_speed" ]
}
- if (is_debug || use_libfuzzer) {
+ if (is_debug || use_fuzzing_engine) {
# Enable zlib's asserts in debug and fuzzer builds.
defines += [ "ZLIB_DEBUG" ]
}
@@ -358,6 +368,11 @@ component("zlib") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
+ if (zlib_symbols_visible) {
+ configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+ configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ }
+
public_configs = [ ":zlib_config" ]
configs += [
diff --git a/deps/v8/third_party/zlib/CMakeLists.txt b/deps/v8/third_party/zlib/CMakeLists.txt
new file mode 100644
index 0000000000..f06e193f72
--- /dev/null
+++ b/deps/v8/third_party/zlib/CMakeLists.txt
@@ -0,0 +1,234 @@
+cmake_minimum_required(VERSION 3.0)
+set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS ON)
+
+project(zlib C)
+
+set(VERSION "1.2.13")
+
+set(INSTALL_BIN_DIR "${CMAKE_INSTALL_PREFIX}/bin" CACHE PATH "Installation directory for executables")
+set(INSTALL_LIB_DIR "${CMAKE_INSTALL_PREFIX}/lib" CACHE PATH "Installation directory for libraries")
+set(INSTALL_INC_DIR "${CMAKE_INSTALL_PREFIX}/include" CACHE PATH "Installation directory for headers")
+set(INSTALL_MAN_DIR "${CMAKE_INSTALL_PREFIX}/share/man" CACHE PATH "Installation directory for manual pages")
+set(INSTALL_PKGCONFIG_DIR "${CMAKE_INSTALL_PREFIX}/share/pkgconfig" CACHE PATH "Installation directory for pkgconfig (.pc) files")
+
+include(CheckTypeSize)
+include(CheckFunctionExists)
+include(CheckIncludeFile)
+include(CheckCSourceCompiles)
+enable_testing()
+
+check_include_file(sys/types.h HAVE_SYS_TYPES_H)
+check_include_file(stdint.h HAVE_STDINT_H)
+check_include_file(stddef.h HAVE_STDDEF_H)
+
+option(ENABLE_SIMD_OPTIMIZATIONS "Enable all SIMD optimizations" OFF)
+
+# TODO(cavalcantii): add support for other OSes (e.g. Android, fuchsia, osx)
+# and architectures (e.g. Arm).
+if (ENABLE_SIMD_OPTIMIZATIONS)
+ add_definitions(-DINFLATE_CHUNK_SIMD_SSE2)
+ add_definitions(-DADLER32_SIMD_SSSE3)
+ add_definitions(-DINFLATE_CHUNK_READ_64LE)
+ add_definitions(-DCRC32_SIMD_SSE42_PCLMUL)
+ add_definitions(-DDEFLATE_SLIDE_HASH_SSE2)
+ add_compile_options(-msse4.2 -mpclmul)
+ # Required by CPU features detection code.
+ add_definitions(-DX86_NOT_WINDOWS)
+ # Apparently some environments (e.g. CentOS) require to explicitly link
+ # with pthread and that is required by the CPU features detection code.
+ find_package (Threads REQUIRED)
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
+endif()
+
+#
+# Check to see if we have large file support
+#
+set(CMAKE_REQUIRED_DEFINITIONS -D_LARGEFILE64_SOURCE=1)
+# We add these other definitions here because CheckTypeSize.cmake
+# in CMake 2.4.x does not automatically do so and we want
+# compatibility with CMake 2.4.x.
+if(HAVE_SYS_TYPES_H)
+ list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_SYS_TYPES_H)
+endif()
+if(HAVE_STDINT_H)
+ list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDINT_H)
+endif()
+if(HAVE_STDDEF_H)
+ list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDDEF_H)
+endif()
+check_type_size(off64_t OFF64_T)
+if(HAVE_OFF64_T)
+ add_definitions(-D_LARGEFILE64_SOURCE=1)
+endif()
+set(CMAKE_REQUIRED_DEFINITIONS) # clear variable
+
+#
+# Check for fseeko
+#
+check_function_exists(fseeko HAVE_FSEEKO)
+if(NOT HAVE_FSEEKO)
+ add_definitions(-DNO_FSEEKO)
+endif()
+
+#
+# Check for unistd.h
+#
+check_include_file(unistd.h Z_HAVE_UNISTD_H)
+
+if(MSVC)
+ set(CMAKE_DEBUG_POSTFIX "d")
+ add_definitions(-D_CRT_SECURE_NO_DEPRECATE)
+ add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE)
+ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+endif()
+
+if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
+ # If we're doing an out of source build and the user has a zconf.h
+ # in their source tree...
+ if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h)
+ message(STATUS "Renaming")
+ message(STATUS " ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h")
+ message(STATUS "to 'zconf.h.included' because this file is included with zlib")
+ message(STATUS "but CMake generates it automatically in the build directory.")
+ file(RENAME ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h.included)
+ endif()
+endif()
+
+set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc)
+configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/zlib.pc.cmakein
+ ${ZLIB_PC} @ONLY)
+configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h.cmakein
+ ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY)
+include_directories(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_SOURCE_DIR})
+
+
+#============================================================================
+# zlib
+#============================================================================
+
+set(ZLIB_PUBLIC_HDRS
+ ${CMAKE_CURRENT_BINARY_DIR}/zconf.h
+ zlib.h
+)
+set(ZLIB_PRIVATE_HDRS
+ crc32.h
+ deflate.h
+ gzguts.h
+ inffast.h
+ inffixed.h
+ inflate.h
+ inftrees.h
+ trees.h
+ zutil.h
+)
+set(ZLIB_SRCS
+ adler32.c
+ compress.c
+ crc32.c
+ deflate.c
+ gzclose.c
+ gzlib.c
+ gzread.c
+ gzwrite.c
+ inflate.c
+ infback.c
+ inftrees.c
+ inffast.c
+ trees.c
+ uncompr.c
+ zutil.c
+)
+
+
+#============================================================================
+# Update list of source files if optimizations were enabled
+#============================================================================
+if (ENABLE_SIMD_OPTIMIZATIONS)
+ list(REMOVE_ITEM ZLIB_SRCS inflate.c)
+
+ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h)
+ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/chunkcopy.h)
+ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.h)
+ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h)
+ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.h)
+
+ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c)
+ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.c)
+ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inflate.c)
+ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c)
+ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.c)
+ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc_folding.c)
+endif()
+
+# parse the full version number from zlib.h and include in ZLIB_FULL_VERSION
+file(READ ${CMAKE_CURRENT_SOURCE_DIR}/zlib.h _zlib_h_contents)
+string(REGEX REPLACE ".*#define[ \t]+ZLIB_VERSION[ \t]+\"([-0-9A-Za-z.]+)\".*"
+ "\\1" ZLIB_FULL_VERSION ${_zlib_h_contents})
+
+if(MINGW)
+ # This gets us DLL resource information when compiling on MinGW.
+ if(NOT CMAKE_RC_COMPILER)
+ set(CMAKE_RC_COMPILER windres.exe)
+ endif()
+
+ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj
+ COMMAND ${CMAKE_RC_COMPILER}
+ -D GCC_WINDRES
+ -I ${CMAKE_CURRENT_SOURCE_DIR}
+ -I ${CMAKE_CURRENT_BINARY_DIR}
+ -o ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj
+ -i ${CMAKE_CURRENT_SOURCE_DIR}/win32/zlib1.rc)
+ set(ZLIB_DLL_SRCS ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj)
+endif(MINGW)
+
+add_library(zlib SHARED ${ZLIB_SRCS} ${ZLIB_DLL_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
+add_library(zlibstatic STATIC ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
+set_target_properties(zlib PROPERTIES DEFINE_SYMBOL ZLIB_DLL)
+set_target_properties(zlib PROPERTIES SOVERSION 1)
+
+if(NOT CYGWIN)
+ # This property causes shared libraries on Linux to have the full version
+ # encoded into their final filename. We disable this on Cygwin because
+ # it causes cygz-${ZLIB_FULL_VERSION}.dll to be created when cygz.dll
+ # seems to be the default.
+ #
+ # This has no effect with MSVC, on that platform the version info for
+ # the DLL comes from the resource file win32/zlib1.rc
+ set_target_properties(zlib PROPERTIES VERSION ${ZLIB_FULL_VERSION})
+endif()
+
+if(UNIX)
+ # On unix-like platforms the library is almost always called libz
+ set_target_properties(zlib zlibstatic PROPERTIES OUTPUT_NAME z)
+ if(NOT APPLE)
+ set_target_properties(zlib PROPERTIES LINK_FLAGS "-Wl,--version-script,\"${CMAKE_CURRENT_SOURCE_DIR}/zlib.map\"")
+ endif()
+elseif(BUILD_SHARED_LIBS AND WIN32)
+ # Creates zlib1.dll when building shared library version
+ set_target_properties(zlib PROPERTIES SUFFIX "1.dll")
+endif()
+
+if(NOT SKIP_INSTALL_LIBRARIES AND NOT SKIP_INSTALL_ALL )
+ install(TARGETS zlib zlibstatic
+ RUNTIME DESTINATION "${INSTALL_BIN_DIR}"
+ ARCHIVE DESTINATION "${INSTALL_LIB_DIR}"
+ LIBRARY DESTINATION "${INSTALL_LIB_DIR}" )
+endif()
+if(NOT SKIP_INSTALL_HEADERS AND NOT SKIP_INSTALL_ALL )
+ install(FILES ${ZLIB_PUBLIC_HDRS} DESTINATION "${INSTALL_INC_DIR}")
+endif()
+if(NOT SKIP_INSTALL_FILES AND NOT SKIP_INSTALL_ALL )
+ install(FILES zlib.3 DESTINATION "${INSTALL_MAN_DIR}/man3")
+endif()
+if(NOT SKIP_INSTALL_FILES AND NOT SKIP_INSTALL_ALL )
+ install(FILES ${ZLIB_PC} DESTINATION "${INSTALL_PKGCONFIG_DIR}")
+endif()
+
+#============================================================================
+# Benchmarker
+#============================================================================
+enable_language(CXX)
+set(CMAKE_CXX_STANDARD 14) # workaround for older compilers (e.g. g++ 5.4).
+add_executable(zlib_bench contrib/bench/zlib_bench.cc)
+target_link_libraries(zlib_bench zlib)
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/chunkcopy.h b/deps/v8/third_party/zlib/contrib/optimizations/chunkcopy.h
index db3c861ba4..f40546d54d 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/chunkcopy.h
+++ b/deps/v8/third_party/zlib/contrib/optimizations/chunkcopy.h
@@ -60,7 +60,7 @@ Z_STATIC_ASSERT(vector_128_bits_wide,
* instruction appropriate for the z_vec128i_t type.
*/
static inline z_vec128i_t loadchunk(
- const unsigned char FAR* s) {
+ const unsigned char FAR* s) Z_DISABLE_MSAN {
z_vec128i_t v;
Z_BUILTIN_MEMCPY(&v, s, sizeof(v));
return v;
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c b/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c
index 5b094873ae..a38e14db03 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c
+++ b/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.c
@@ -1,5 +1,6 @@
/* inffast_chunk.c -- fast decoding
* Copyright (C) 1995-2017 Mark Adler
+ * Copyright 2023 The Chromium Authors
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@@ -24,8 +25,8 @@
Entry assumptions:
state->mode == LEN
- strm->avail_in >= INFLATE_FAST_MIN_INPUT (6 or 8 bytes)
- strm->avail_out >= INFLATE_FAST_MIN_OUTPUT (258 bytes)
+ strm->avail_in >= INFLATE_FAST_MIN_INPUT (6 or 8 bytes + 7 bytes)
+ strm->avail_out >= INFLATE_FAST_MIN_OUTPUT (258 bytes + 2 bytes)
start >= strm->avail_out
state->bits < 8
(state->hold >> state->bits) == 0
@@ -42,7 +43,7 @@
Notes:
- INFLATE_FAST_MIN_INPUT: 6 or 8 bytes
+ INFLATE_FAST_MIN_INPUT: 6 or 8 bytes + 7 bytes
- The maximum input bits used by a length/distance pair is 15 bits for the
length code, 5 bits for the length extra, 15 bits for the distance code,
@@ -64,11 +65,11 @@
(state->hold >> state->bits) == 0
- INFLATE_FAST_MIN_OUTPUT: 258 bytes
+ INFLATE_FAST_MIN_OUTPUT: 258 bytes + 2 bytes for literals = 260 bytes
- The maximum bytes that a single length/distance pair can output is 258
bytes, which is the maximum length that can be coded. inflate_fast()
- requires strm->avail_out >= 258 for each loop to avoid checking for
+ requires strm->avail_out >= 260 for each loop to avoid checking for
available output space while decoding.
*/
void ZLIB_INTERNAL inflate_fast_chunk_(strm, start)
@@ -124,22 +125,50 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
lmask = (1U << state->lenbits) - 1;
dmask = (1U << state->distbits) - 1;
+#ifdef INFLATE_CHUNK_READ_64LE
+#define REFILL() do { \
+ Assert(bits < 64, "### Too many bits in inflate_fast."); \
+ hold |= read64le(in) << bits; \
+ in += 7; \
+ in -= bits >> 3; \
+ bits |= 56; \
+ } while (0)
+#endif
+
/* decode literals and length/distances until end-of-block or not enough
input data or output space */
do {
- if (bits < 15) {
#ifdef INFLATE_CHUNK_READ_64LE
- hold |= read64le(in) << bits;
- in += 6;
- bits += 48;
+ REFILL();
#else
+ if (bits < 15) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
hold += (unsigned long)(*in++) << bits;
bits += 8;
-#endif
}
+#endif
here = lcode + (hold & lmask);
+#ifdef INFLATE_CHUNK_READ_64LE
+ if (here->op == 0) { /* literal */
+ Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here->val));
+ *out++ = (unsigned char)(here->val);
+ hold >>= here->bits;
+ bits -= here->bits;
+ here = lcode + (hold & lmask);
+ if (here->op == 0) { /* literal */
+ Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ?
+ "inflate: 2nd literal '%c'\n" :
+ "inflate: 2nd literal 0x%02x\n", here->val));
+ *out++ = (unsigned char)(here->val);
+ hold >>= here->bits;
+ bits -= here->bits;
+ here = lcode + (hold & lmask);
+ }
+ }
+#endif
dolen:
op = (unsigned)(here->bits);
hold >>= op;
@@ -155,33 +184,25 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
len = (unsigned)(here->val);
op &= 15; /* number of extra bits */
if (op) {
+#ifndef INFLATE_CHUNK_READ_64LE
if (bits < op) {
-#ifdef INFLATE_CHUNK_READ_64LE
- hold |= read64le(in) << bits;
- in += 6;
- bits += 48;
-#else
hold += (unsigned long)(*in++) << bits;
bits += 8;
-#endif
}
+#endif
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
Tracevv((stderr, "inflate: length %u\n", len));
+#ifndef INFLATE_CHUNK_READ_64LE
if (bits < 15) {
-#ifdef INFLATE_CHUNK_READ_64LE
- hold |= read64le(in) << bits;
- in += 6;
- bits += 48;
-#else
hold += (unsigned long)(*in++) << bits;
bits += 8;
hold += (unsigned long)(*in++) << bits;
bits += 8;
-#endif
}
+#endif
here = dcode + (hold & dmask);
dodist:
op = (unsigned)(here->bits);
@@ -191,11 +212,11 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
if (op & 16) { /* distance base */
dist = (unsigned)(here->val);
op &= 15; /* number of extra bits */
+ /* we have two fast-path loads: 10+10 + 15+5 + 15 = 55,
+ but we may need to refill here in the worst case */
if (bits < op) {
#ifdef INFLATE_CHUNK_READ_64LE
- hold |= read64le(in) << bits;
- in += 6;
- bits += 48;
+ REFILL();
#else
hold += (unsigned long)(*in++) << bits;
bits += 8;
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.h b/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.h
index 39c771b863..cc861bd09d 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.h
+++ b/deps/v8/third_party/zlib/contrib/optimizations/inffast_chunk.h
@@ -1,6 +1,7 @@
/* inffast_chunk.h -- header to use inffast_chunk.c
* Copyright (C) 1995-2003, 2010 Mark Adler
* Copyright (C) 2017 ARM, Inc.
+ * Copyright 2023 The Chromium Authors
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@@ -11,16 +12,31 @@
#include "inffast.h"
-/* INFLATE_FAST_MIN_INPUT: the minimum number of input bytes needed so that
- we can safely call inflate_fast() with only one up-front bounds check. One
+/* INFLATE_FAST_MIN_INPUT:
+ The minimum number of input bytes needed so that we can safely call
+ inflate_fast() with only one up-front bounds check. One
length/distance code pair (15 bits for the length code, 5 bits for length
extra, 15 bits for the distance code, 13 bits for distance extra) requires
- reading up to 48 input bits (6 bytes). The wide input data reading option
- requires a little endian machine, and reads 64 input bits (8 bytes).
+ reading up to 48 input bits. Additionally, in the same iteraction, we may
+ decode two literals from the root-table (requiring MIN_OUTPUT = 258 + 2).
+
+ Each root-table entry is up to 10 bits, for a total of 68 input bits each
+ iteraction.
+
+ The refill variant reads 8 bytes from the buffer at a time, and advances
+ the input pointer by up to 7 bytes, ensuring there are at least 56-bits
+ available in the bit-buffer. The technique was documented by Fabian Giesen
+ on his blog as variant 4 in the article 'Reading bits in far too many ways':
+ https://fgiesen.wordpress.com/2018/02/20/
+
+ In the worst case, we may refill twice in the same iteraction, requiring
+ MIN_INPUT = 8 + 7.
*/
#ifdef INFLATE_CHUNK_READ_64LE
#undef INFLATE_FAST_MIN_INPUT
-#define INFLATE_FAST_MIN_INPUT 8
+#define INFLATE_FAST_MIN_INPUT 15
+#undef INFLATE_FAST_MIN_OUTPUT
+#define INFLATE_FAST_MIN_OUTPUT 260
#endif
void ZLIB_INTERNAL inflate_fast_chunk_ OF((z_streamp strm, unsigned start));
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h b/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h
index 2a04f69934..c6a296aef7 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h
+++ b/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h
@@ -57,10 +57,9 @@
TARGET_CPU_WITH_CRC
local INLINE Pos insert_string_simd(deflate_state* const s, const Pos str) {
Pos ret;
- unsigned *ip, val, h = 0;
+ unsigned val, h = 0;
- ip = (unsigned*)&s->window[str];
- val = *ip;
+ zmemcpy(&val, &s->window[str], sizeof(val));
if (s->level >= 6)
val &= 0xFFFFFF;
diff --git a/deps/v8/third_party/zlib/google/BUILD.gn b/deps/v8/third_party/zlib/google/BUILD.gn
index 35ba1daf2d..990b023230 100644
--- a/deps/v8/third_party/zlib/google/BUILD.gn
+++ b/deps/v8/third_party/zlib/google/BUILD.gn
@@ -18,9 +18,9 @@ if (build_with_chromium) {
"zip_writer.h",
]
deps = [
+ "..:minizip",
"//base",
"//base:i18n",
- "//third_party/zlib:minizip",
]
}
@@ -30,8 +30,8 @@ if (build_with_chromium) {
"compression_utils.h",
]
deps = [
+ "..",
"//base",
- "//third_party/zlib",
]
public_deps = [ ":compression_utils_portable" ]
}
@@ -44,5 +44,5 @@ static_library("compression_utils_portable") {
"compression_utils_portable.cc",
"compression_utils_portable.h",
]
- public_deps = [ "//third_party/zlib" ]
+ public_deps = [ ".." ]
}
diff --git a/deps/v8/third_party/zlib/google/OWNERS b/deps/v8/third_party/zlib/google/OWNERS
index 411670ca13..868af3cc66 100644
--- a/deps/v8/third_party/zlib/google/OWNERS
+++ b/deps/v8/third_party/zlib/google/OWNERS
@@ -1,5 +1,3 @@
-fdegros@chromium.org
-noel@chromium.org
satorux@chromium.org
# compression_utils*
diff --git a/deps/v8/third_party/zlib/google/zip.cc b/deps/v8/third_party/zlib/google/zip.cc
index 490dcee34e..87065b9188 100644
--- a/deps/v8/third_party/zlib/google/zip.cc
+++ b/deps/v8/third_party/zlib/google/zip.cc
@@ -7,10 +7,10 @@
#include <string>
#include <vector>
-#include "base/bind.h"
#include "base/files/file.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_util.h"
+#include "base/functional/bind.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/strings/string_util.h"
diff --git a/deps/v8/third_party/zlib/google/zip.h b/deps/v8/third_party/zlib/google/zip.h
index e3036c809c..ea8778681d 100644
--- a/deps/v8/third_party/zlib/google/zip.h
+++ b/deps/v8/third_party/zlib/google/zip.h
@@ -10,10 +10,10 @@
#include <utility>
#include <vector>
-#include "base/callback.h"
#include "base/containers/span.h"
#include "base/files/file_path.h"
#include "base/files/platform_file.h"
+#include "base/functional/callback.h"
#include "base/time/time.h"
#include "build/build_config.h"
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index e97027a0bb..9b1030a029 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -7,10 +7,10 @@
#include <algorithm>
#include <utility>
-#include "base/bind.h"
#include "base/check.h"
#include "base/files/file.h"
#include "base/files/file_util.h"
+#include "base/functional/bind.h"
#include "base/i18n/icu_string_conversions.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
@@ -18,7 +18,7 @@
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/task/sequenced_task_runner.h"
#include "build/build_config.h"
#include "third_party/zlib/google/redact.h"
#include "third_party/zlib/google/zip_internal.h"
@@ -461,11 +461,11 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
// If this is a directory, just create it and return.
if (entry_.is_directory) {
if (base::CreateDirectory(output_file_path)) {
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE, std::move(success_callback));
} else {
LOG(ERROR) << "Cannot create directory " << Redact(output_file_path);
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE, std::move(failure_callback));
}
return;
@@ -479,7 +479,7 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
err != UNZ_OK) {
LOG(ERROR) << "Cannot open file " << Redact(entry_.path)
<< " from ZIP: " << err;
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
}
@@ -487,7 +487,7 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
base::FilePath output_dir_path = output_file_path.DirName();
if (!base::CreateDirectory(output_dir_path)) {
LOG(ERROR) << "Cannot create directory " << Redact(output_dir_path);
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
}
@@ -497,12 +497,12 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
if (!output_file.IsValid()) {
LOG(ERROR) << "Cannot create file " << Redact(output_file_path);
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE, std::move(failure_callback));
return;
}
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE,
base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
std::move(output_file), std::move(success_callback),
@@ -602,7 +602,7 @@ void ZipReader::ExtractChunk(base::File output_file,
offset += num_bytes_read;
progress_callback.Run(offset);
- base::SequencedTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE,
base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
std::move(output_file), std::move(success_callback),
diff --git a/deps/v8/third_party/zlib/google/zip_reader.h b/deps/v8/third_party/zlib/google/zip_reader.h
index 48244c8238..b7680cc839 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.h
+++ b/deps/v8/third_party/zlib/google/zip_reader.h
@@ -11,9 +11,9 @@
#include <memory>
#include <string>
-#include "base/callback.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
+#include "base/functional/callback.h"
#include "base/memory/weak_ptr.h"
#include "base/numerics/safe_conversions.h"
#include "base/time/time.h"
diff --git a/deps/v8/third_party/zlib/google/zip_reader_unittest.cc b/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
index 52dab200a3..b9175045d0 100644
--- a/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader_unittest.cc
@@ -12,12 +12,12 @@
#include <string>
#include <vector>
-#include "base/bind.h"
#include "base/check.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
+#include "base/functional/bind.h"
#include "base/hash/md5.h"
#include "base/path_service.h"
#include "base/run_loop.h"
diff --git a/deps/v8/third_party/zlib/google/zip_unittest.cc b/deps/v8/third_party/zlib/google/zip_unittest.cc
index b639e8e843..24ed147729 100644
--- a/deps/v8/third_party/zlib/google/zip_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_unittest.cc
@@ -12,12 +12,12 @@
#include <unordered_set>
#include <vector>
-#include "base/bind.h"
#include "base/files/file.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
+#include "base/functional/bind.h"
#include "base/logging.h"
#include "base/path_service.h"
#include "base/strings/strcat.h"
diff --git a/deps/v8/third_party/zlib/patches/0010-cmake-enable-simd.patch b/deps/v8/third_party/zlib/patches/0010-cmake-enable-simd.patch
new file mode 100644
index 0000000000..3893101b7c
--- /dev/null
+++ b/deps/v8/third_party/zlib/patches/0010-cmake-enable-simd.patch
@@ -0,0 +1,96 @@
+diff --git a/third_party/zlib/CMakeLists.txt b/third_party/zlib/CMakeLists.txt
+index b412dc7feb732..0431278405046 100644
+--- a/third_party/zlib/CMakeLists.txt
++++ b/third_party/zlib/CMakeLists.txt
+@@ -1,4 +1,4 @@
+-cmake_minimum_required(VERSION 2.4.4)
++cmake_minimum_required(VERSION 3.0)
+ set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS ON)
+
+ project(zlib C)
+@@ -21,6 +21,26 @@ check_include_file(sys/types.h HAVE_SYS_TYPES_H)
+ check_include_file(stdint.h HAVE_STDINT_H)
+ check_include_file(stddef.h HAVE_STDDEF_H)
+
++option(ENABLE_SIMD_OPTIMIZATIONS "Enable all SIMD optimizations" OFF)
++
++# TODO(cavalcantii): add support for other OSes (e.g. Android, fuchsia, osx)
++# and architectures (e.g. Arm).
++if (ENABLE_SIMD_OPTIMIZATIONS)
++ add_definitions(-DINFLATE_CHUNK_SIMD_SSE2)
++ add_definitions(-DADLER32_SIMD_SSSE3)
++ add_definitions(-DINFLATE_CHUNK_READ_64LE)
++ add_definitions(-DCRC32_SIMD_SSE42_PCLMUL)
++ add_definitions(-DDEFLATE_SLIDE_HASH_SSE2)
++ add_compile_options(-msse4.2 -mpclmul)
++ # Required by CPU features detection code.
++ add_definitions(-DX86_NOT_WINDOWS)
++ # Apparently some environments (e.g. CentOS) require to explicitly link
++ # with pthread and that is required by the CPU features detection code.
++ find_package (Threads REQUIRED)
++ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread")
++ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
++endif()
++
+ #
+ # Check to see if we have large file support
+ #
+@@ -120,10 +140,25 @@ set(ZLIB_SRCS
+ zutil.c
+ )
+
+-if(NOT MINGW)
+- set(ZLIB_DLL_SRCS
+- win32/zlib1.rc # If present will override custom build rule below.
+- )
++
++#============================================================================
++# Update list of source files if optimizations were enabled
++#============================================================================
++if (ENABLE_SIMD_OPTIMIZATIONS)
++ list(REMOVE_ITEM ZLIB_SRCS inflate.c)
++
++ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h)
++ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/chunkcopy.h)
++ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.h)
++ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h)
++ list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.h)
++
++ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c)
++ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.c)
++ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inflate.c)
++ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c)
++ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.c)
++ list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc_folding.c)
+ endif()
+
+ # parse the full version number from zlib.h and include in ZLIB_FULL_VERSION
+@@ -191,23 +226,9 @@ if(NOT SKIP_INSTALL_FILES AND NOT SKIP_INSTALL_ALL )
+ endif()
+
+ #============================================================================
+-# Example binaries
++# Benchmarker
+ #============================================================================
+-
+-add_executable(example test/example.c)
+-target_link_libraries(example zlib)
+-add_test(example example)
+-
+-add_executable(minigzip test/minigzip.c)
+-target_link_libraries(minigzip zlib)
+-
+-if(HAVE_OFF64_T)
+- add_executable(example64 test/example.c)
+- target_link_libraries(example64 zlib)
+- set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64")
+- add_test(example64 example64)
+-
+- add_executable(minigzip64 test/minigzip.c)
+- target_link_libraries(minigzip64 zlib)
+- set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64")
+-endif()
++enable_language(CXX)
++set(CMAKE_CXX_STANDARD 14) # workaround for older compilers (e.g. g++ 5.4).
++add_executable(zlib_bench contrib/bench/zlib_bench.cc)
++target_link_libraries(zlib_bench zlib)
diff --git a/deps/v8/third_party/zlib/zconf.h.cmakein b/deps/v8/third_party/zlib/zconf.h.cmakein
new file mode 100644
index 0000000000..247ba2461d
--- /dev/null
+++ b/deps/v8/third_party/zlib/zconf.h.cmakein
@@ -0,0 +1,549 @@
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#ifndef ZCONF_H
+#define ZCONF_H
+#cmakedefine Z_PREFIX
+#cmakedefine Z_HAVE_UNISTD_H
+
+/*
+ * If you *really* need a unique prefix for all types and library functions,
+ * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
+ * Even better than compiling with -DZ_PREFIX would be to use configure to set
+ * this permanently in zconf.h using "./configure --zprefix".
+ */
+#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */
+# define Z_PREFIX_SET
+
+/* all linked symbols and init macros */
+# define _dist_code z__dist_code
+# define _length_code z__length_code
+# define _tr_align z__tr_align
+# define _tr_flush_bits z__tr_flush_bits
+# define _tr_flush_block z__tr_flush_block
+# define _tr_init z__tr_init
+# define _tr_stored_block z__tr_stored_block
+# define _tr_tally z__tr_tally
+# define adler32 z_adler32
+# define adler32_combine z_adler32_combine
+# define adler32_combine64 z_adler32_combine64
+# define adler32_z z_adler32_z
+# ifndef Z_SOLO
+# define compress z_compress
+# define compress2 z_compress2
+# define compressBound z_compressBound
+# endif
+# define crc32 z_crc32
+# define crc32_combine z_crc32_combine
+# define crc32_combine64 z_crc32_combine64
+# define crc32_combine_gen z_crc32_combine_gen
+# define crc32_combine_gen64 z_crc32_combine_gen64
+# define crc32_combine_op z_crc32_combine_op
+# define crc32_z z_crc32_z
+# define deflate z_deflate
+# define deflateBound z_deflateBound
+# define deflateCopy z_deflateCopy
+# define deflateEnd z_deflateEnd
+# define deflateGetDictionary z_deflateGetDictionary
+# define deflateInit z_deflateInit
+# define deflateInit2 z_deflateInit2
+# define deflateInit2_ z_deflateInit2_
+# define deflateInit_ z_deflateInit_
+# define deflateParams z_deflateParams
+# define deflatePending z_deflatePending
+# define deflatePrime z_deflatePrime
+# define deflateReset z_deflateReset
+# define deflateResetKeep z_deflateResetKeep
+# define deflateSetDictionary z_deflateSetDictionary
+# define deflateSetHeader z_deflateSetHeader
+# define deflateTune z_deflateTune
+# define deflate_copyright z_deflate_copyright
+# define get_crc_table z_get_crc_table
+# ifndef Z_SOLO
+# define gz_error z_gz_error
+# define gz_intmax z_gz_intmax
+# define gz_strwinerror z_gz_strwinerror
+# define gzbuffer z_gzbuffer
+# define gzclearerr z_gzclearerr
+# define gzclose z_gzclose
+# define gzclose_r z_gzclose_r
+# define gzclose_w z_gzclose_w
+# define gzdirect z_gzdirect
+# define gzdopen z_gzdopen
+# define gzeof z_gzeof
+# define gzerror z_gzerror
+# define gzflush z_gzflush
+# define gzfread z_gzfread
+# define gzfwrite z_gzfwrite
+# define gzgetc z_gzgetc
+# define gzgetc_ z_gzgetc_
+# define gzgets z_gzgets
+# define gzoffset z_gzoffset
+# define gzoffset64 z_gzoffset64
+# define gzopen z_gzopen
+# define gzopen64 z_gzopen64
+# ifdef _WIN32
+# define gzopen_w z_gzopen_w
+# endif
+# define gzprintf z_gzprintf
+# define gzputc z_gzputc
+# define gzputs z_gzputs
+# define gzread z_gzread
+# define gzrewind z_gzrewind
+# define gzseek z_gzseek
+# define gzseek64 z_gzseek64
+# define gzsetparams z_gzsetparams
+# define gztell z_gztell
+# define gztell64 z_gztell64
+# define gzungetc z_gzungetc
+# define gzvprintf z_gzvprintf
+# define gzwrite z_gzwrite
+# endif
+# define inflate z_inflate
+# define inflateBack z_inflateBack
+# define inflateBackEnd z_inflateBackEnd
+# define inflateBackInit z_inflateBackInit
+# define inflateBackInit_ z_inflateBackInit_
+# define inflateCodesUsed z_inflateCodesUsed
+# define inflateCopy z_inflateCopy
+# define inflateEnd z_inflateEnd
+# define inflateGetDictionary z_inflateGetDictionary
+# define inflateGetHeader z_inflateGetHeader
+# define inflateInit z_inflateInit
+# define inflateInit2 z_inflateInit2
+# define inflateInit2_ z_inflateInit2_
+# define inflateInit_ z_inflateInit_
+# define inflateMark z_inflateMark
+# define inflatePrime z_inflatePrime
+# define inflateReset z_inflateReset
+# define inflateReset2 z_inflateReset2
+# define inflateResetKeep z_inflateResetKeep
+# define inflateSetDictionary z_inflateSetDictionary
+# define inflateSync z_inflateSync
+# define inflateSyncPoint z_inflateSyncPoint
+# define inflateUndermine z_inflateUndermine
+# define inflateValidate z_inflateValidate
+# define inflate_copyright z_inflate_copyright
+# define inflate_fast z_inflate_fast
+# define inflate_table z_inflate_table
+# ifndef Z_SOLO
+# define uncompress z_uncompress
+# define uncompress2 z_uncompress2
+# endif
+# define zError z_zError
+# ifndef Z_SOLO
+# define zcalloc z_zcalloc
+# define zcfree z_zcfree
+# endif
+# define zlibCompileFlags z_zlibCompileFlags
+# define zlibVersion z_zlibVersion
+
+/* all zlib typedefs in zlib.h and zconf.h */
+# define Byte z_Byte
+# define Bytef z_Bytef
+# define alloc_func z_alloc_func
+# define charf z_charf
+# define free_func z_free_func
+# ifndef Z_SOLO
+# define gzFile z_gzFile
+# endif
+# define gz_header z_gz_header
+# define gz_headerp z_gz_headerp
+# define in_func z_in_func
+# define intf z_intf
+# define out_func z_out_func
+# define uInt z_uInt
+# define uIntf z_uIntf
+# define uLong z_uLong
+# define uLongf z_uLongf
+# define voidp z_voidp
+# define voidpc z_voidpc
+# define voidpf z_voidpf
+
+/* all zlib structs in zlib.h and zconf.h */
+# define gz_header_s z_gz_header_s
+# define internal_state z_internal_state
+
+#endif
+
+#if defined(__MSDOS__) && !defined(MSDOS)
+# define MSDOS
+#endif
+#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2)
+# define OS2
+#endif
+#if defined(_WINDOWS) && !defined(WINDOWS)
+# define WINDOWS
+#endif
+#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__)
+# ifndef WIN32
+# define WIN32
+# endif
+#endif
+#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32)
+# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__)
+# ifndef SYS16BIT
+# define SYS16BIT
+# endif
+# endif
+#endif
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ */
+#ifdef SYS16BIT
+# define MAXSEG_64K
+#endif
+#ifdef MSDOS
+# define UNALIGNED_OK
+#endif
+
+#ifdef __STDC_VERSION__
+# ifndef STDC
+# define STDC
+# endif
+# if __STDC_VERSION__ >= 199901L
+# ifndef STDC99
+# define STDC99
+# endif
+# endif
+#endif
+#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__))
+# define STDC
+#endif
+
+#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */
+# define STDC
+#endif
+
+#ifndef STDC
+# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
+# define const /* note: need a more gentle solution here */
+# endif
+#endif
+
+#if defined(ZLIB_CONST) && !defined(z_const)
+# define z_const const
+#else
+# define z_const
+#endif
+
+#ifdef Z_SOLO
+ typedef unsigned long z_size_t;
+#else
+# define z_longlong long long
+# if defined(NO_SIZE_T)
+ typedef unsigned NO_SIZE_T z_size_t;
+# elif defined(STDC)
+# include <stddef.h>
+ typedef size_t z_size_t;
+# else
+ typedef unsigned long z_size_t;
+# endif
+# undef z_longlong
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2.
+ * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
+ * created by gzip. (Files created by minigzip can still be extracted by
+ * gzip.)
+ */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ (1 << (windowBits+2)) + (1 << (memLevel+9))
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus about 7 kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+#ifndef Z_ARG /* function prototypes for stdarg */
+# if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# define Z_ARG(args) args
+# else
+# define Z_ARG(args) ()
+# endif
+#endif
+
+/* The following definitions for FAR are needed only for MSDOS mixed
+ * model programming (small or medium model with some far allocations).
+ * This was tested only with MSC; for other MSDOS compilers you may have
+ * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
+ * just define FAR to be empty.
+ */
+#ifdef SYS16BIT
+# if defined(M_I86SM) || defined(M_I86MM)
+ /* MSC small or medium model */
+# define SMALL_MEDIUM
+# ifdef _MSC_VER
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+# if (defined(__SMALL__) || defined(__MEDIUM__))
+ /* Turbo C small or medium model */
+# define SMALL_MEDIUM
+# ifdef __BORLANDC__
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+#endif
+
+#if defined(WINDOWS) || defined(WIN32)
+ /* If building or using zlib as a DLL, define ZLIB_DLL.
+ * This is not mandatory, but it offers a little performance increase.
+ */
+# ifdef ZLIB_DLL
+# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500))
+# ifdef ZLIB_INTERNAL
+# define ZEXTERN extern __declspec(dllexport)
+# else
+# define ZEXTERN extern __declspec(dllimport)
+# endif
+# endif
+# endif /* ZLIB_DLL */
+ /* If building or using zlib with the WINAPI/WINAPIV calling convention,
+ * define ZLIB_WINAPI.
+ * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI.
+ */
+# ifdef ZLIB_WINAPI
+# ifdef FAR
+# undef FAR
+# endif
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
+# include <windows.h>
+ /* No need for _export, use ZLIB.DEF instead. */
+ /* For complete Windows compatibility, use WINAPI, not __stdcall. */
+# define ZEXPORT WINAPI
+# ifdef WIN32
+# define ZEXPORTVA WINAPIV
+# else
+# define ZEXPORTVA FAR CDECL
+# endif
+# endif
+#endif
+
+#if defined (__BEOS__)
+# ifdef ZLIB_DLL
+# ifdef ZLIB_INTERNAL
+# define ZEXPORT __declspec(dllexport)
+# define ZEXPORTVA __declspec(dllexport)
+# else
+# define ZEXPORT __declspec(dllimport)
+# define ZEXPORTVA __declspec(dllimport)
+# endif
+# endif
+#endif
+
+#ifndef ZEXTERN
+# define ZEXTERN extern
+#endif
+#ifndef ZEXPORT
+# define ZEXPORT
+#endif
+#ifndef ZEXPORTVA
+# define ZEXPORTVA
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+#if !defined(__MACTYPES__)
+typedef unsigned char Byte; /* 8 bits */
+#endif
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+#ifdef SMALL_MEDIUM
+ /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */
+# define Bytef Byte FAR
+#else
+ typedef Byte FAR Bytef;
+#endif
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void const *voidpc;
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte const *voidpc;
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC)
+# include <limits.h>
+# if (UINT_MAX == 0xffffffffUL)
+# define Z_U4 unsigned
+# elif (ULONG_MAX == 0xffffffffUL)
+# define Z_U4 unsigned long
+# elif (USHRT_MAX == 0xffffffffUL)
+# define Z_U4 unsigned short
+# endif
+#endif
+
+#ifdef Z_U4
+ typedef Z_U4 z_crc_t;
+#else
+ typedef unsigned long z_crc_t;
+#endif
+
+#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */
+# define Z_HAVE_UNISTD_H
+#endif
+
+#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */
+# define Z_HAVE_STDARG_H
+#endif
+
+#ifdef STDC
+# ifndef Z_SOLO
+# include <sys/types.h> /* for off_t */
+# endif
+#endif
+
+#if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# ifndef Z_SOLO
+# include <stdarg.h> /* for va_list */
+# endif
+#endif
+
+#ifdef _WIN32
+# ifndef Z_SOLO
+# include <stddef.h> /* for wchar_t */
+# endif
+#endif
+
+/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and
+ * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even
+ * though the former does not conform to the LFS document), but considering
+ * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
+ * equivalently requesting no 64-bit operations
+ */
+#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1
+# undef _LARGEFILE64_SOURCE
+#endif
+
+#ifndef Z_HAVE_UNISTD_H
+# ifdef __WATCOMC__
+# define Z_HAVE_UNISTD_H
+# endif
+#endif
+#ifndef Z_HAVE_UNISTD_H
+# if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32)
+# define Z_HAVE_UNISTD_H
+# endif
+#endif
+#ifndef Z_SOLO
+# if defined(Z_HAVE_UNISTD_H)
+# include <unistd.h> /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
+# ifdef VMS
+# include <unixio.h> /* for off_t */
+# endif
+# ifndef z_off_t
+# define z_off_t off_t
+# endif
+# endif
+#endif
+
+#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0
+# define Z_LFS64
+#endif
+
+#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64)
+# define Z_LARGE64
+#endif
+
+#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64)
+# define Z_WANT64
+#endif
+
+#if !defined(SEEK_SET) && !defined(Z_SOLO)
+# define SEEK_SET 0 /* Seek from beginning of file. */
+# define SEEK_CUR 1 /* Seek from current position. */
+# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */
+#endif
+
+#ifndef z_off_t
+# define z_off_t long
+#endif
+
+#if !defined(_WIN32) && defined(Z_LARGE64)
+# define z_off64_t off64_t
+#else
+# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO)
+# define z_off64_t __int64
+# else
+# define z_off64_t z_off_t
+# endif
+#endif
+
+/* MVS linker does not support external names larger than 8 bytes */
+#if defined(__MVS__)
+ #pragma map(deflateInit_,"DEIN")
+ #pragma map(deflateInit2_,"DEIN2")
+ #pragma map(deflateEnd,"DEEND")
+ #pragma map(deflateBound,"DEBND")
+ #pragma map(inflateInit_,"ININ")
+ #pragma map(inflateInit2_,"ININ2")
+ #pragma map(inflateEnd,"INEND")
+ #pragma map(inflateSync,"INSY")
+ #pragma map(inflateSetDictionary,"INSEDI")
+ #pragma map(compressBound,"CMBND")
+ #pragma map(inflate_table,"INTABL")
+ #pragma map(inflate_fast,"INFA")
+ #pragma map(inflate_copyright,"INCOPY")
+#endif
+
+#endif /* ZCONF_H */
diff --git a/deps/v8/third_party/zlib/zlib.3 b/deps/v8/third_party/zlib/zlib.3
new file mode 100644
index 0000000000..6f6e91404d
--- /dev/null
+++ b/deps/v8/third_party/zlib/zlib.3
@@ -0,0 +1,149 @@
+.TH ZLIB 3 "13 Oct 2022"
+.SH NAME
+zlib \- compression/decompression library
+.SH SYNOPSIS
+[see
+.I zlib.h
+for full description]
+.SH DESCRIPTION
+The
+.I zlib
+library is a general purpose data compression library.
+The code is thread safe, assuming that the standard library functions
+used are thread safe, such as memory allocation routines.
+It provides in-memory compression and decompression functions,
+including integrity checks of the uncompressed data.
+This version of the library supports only one compression method (deflation)
+but other algorithms may be added later
+with the same stream interface.
+.LP
+Compression can be done in a single step if the buffers are large enough
+or can be done by repeated calls of the compression function.
+In the latter case,
+the application must provide more input and/or consume the output
+(providing more output space) before each call.
+.LP
+The library also supports reading and writing files in
+.IR gzip (1)
+(.gz) format
+with an interface similar to that of stdio.
+.LP
+The library does not install any signal handler.
+The decoder checks the consistency of the compressed data,
+so the library should never crash even in the case of corrupted input.
+.LP
+All functions of the compression library are documented in the file
+.IR zlib.h .
+The distribution source includes examples of use of the library
+in the files
+.I test/example.c
+and
+.IR test/minigzip.c,
+as well as other examples in the
+.IR examples/
+directory.
+.LP
+Changes to this version are documented in the file
+.I ChangeLog
+that accompanies the source.
+.LP
+.I zlib
+is built in to many languages and operating systems, including but not limited to
+Java, Python, .NET, PHP, Perl, Ruby, Swift, and Go.
+.LP
+An experimental package to read and write files in the .zip format,
+written on top of
+.I zlib
+by Gilles Vollant (info@winimage.com),
+is available at:
+.IP
+http://www.winimage.com/zLibDll/minizip.html
+and also in the
+.I contrib/minizip
+directory of the main
+.I zlib
+source distribution.
+.SH "SEE ALSO"
+The
+.I zlib
+web site can be found at:
+.IP
+http://zlib.net/
+.LP
+The data format used by the
+.I zlib
+library is described by RFC
+(Request for Comments) 1950 to 1952 in the files:
+.IP
+http://tools.ietf.org/html/rfc1950 (for the zlib header and trailer format)
+.br
+http://tools.ietf.org/html/rfc1951 (for the deflate compressed data format)
+.br
+http://tools.ietf.org/html/rfc1952 (for the gzip header and trailer format)
+.LP
+Mark Nelson wrote an article about
+.I zlib
+for the Jan. 1997 issue of Dr. Dobb's Journal;
+a copy of the article is available at:
+.IP
+http://marknelson.us/1997/01/01/zlib-engine/
+.SH "REPORTING PROBLEMS"
+Before reporting a problem,
+please check the
+.I zlib
+web site to verify that you have the latest version of
+.IR zlib ;
+otherwise,
+obtain the latest version and see if the problem still exists.
+Please read the
+.I zlib
+FAQ at:
+.IP
+http://zlib.net/zlib_faq.html
+.LP
+before asking for help.
+Send questions and/or comments to zlib@gzip.org,
+or (for the Windows DLL version) to Gilles Vollant (info@winimage.com).
+.SH AUTHORS AND LICENSE
+Version 1.2.13
+.LP
+Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
+.LP
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+.LP
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+.LP
+.nr step 1 1
+.IP \n[step]. 3
+The origin of this software must not be misrepresented; you must not
+claim that you wrote the original software. If you use this software
+in a product, an acknowledgment in the product documentation would be
+appreciated but is not required.
+.IP \n+[step].
+Altered source versions must be plainly marked as such, and must not be
+misrepresented as being the original software.
+.IP \n+[step].
+This notice may not be removed or altered from any source distribution.
+.LP
+Jean-loup Gailly Mark Adler
+.br
+jloup@gzip.org madler@alumni.caltech.edu
+.LP
+The deflate format used by
+.I zlib
+was defined by Phil Katz.
+The deflate and
+.I zlib
+specifications were written by L. Peter Deutsch.
+Thanks to all the people who reported problems and suggested various
+improvements in
+.IR zlib ;
+who are too numerous to cite here.
+.LP
+UNIX manual page by R. P. C. Rodgers,
+U.S. National Library of Medicine (rodgers@nlm.nih.gov).
+.\" end of man page
diff --git a/deps/v8/third_party/zlib/zlib.pc.cmakein b/deps/v8/third_party/zlib/zlib.pc.cmakein
new file mode 100644
index 0000000000..a5e642938c
--- /dev/null
+++ b/deps/v8/third_party/zlib/zlib.pc.cmakein
@@ -0,0 +1,13 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=@CMAKE_INSTALL_PREFIX@
+libdir=@INSTALL_LIB_DIR@
+sharedlibdir=@INSTALL_LIB_DIR@
+includedir=@INSTALL_INC_DIR@
+
+Name: zlib
+Description: zlib compression library
+Version: @VERSION@
+
+Requires:
+Libs: -L${libdir} -L${sharedlibdir} -lz
+Cflags: -I${includedir}
diff --git a/deps/v8/tools/.vpython3 b/deps/v8/tools/.vpython3
new file mode 100644
index 0000000000..38ceb4d202
--- /dev/null
+++ b/deps/v8/tools/.vpython3
@@ -0,0 +1,91 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the chromium repo, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+# vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md
+
+python_version: "3.8"
+
+# The default set of platforms vpython checks does not yet include mac-arm64.
+# Setting `verify_pep425_tag` to the list of platforms we explicitly must support
+# allows us to ensure that vpython specs stay mac-arm64-friendly
+verify_pep425_tag: [
+ {python: "cp38", abi: "cp38", platform: "manylinux1_x86_64"},
+ {python: "cp38", abi: "cp38", platform: "linux_arm64"},
+
+ {python: "cp38", abi: "cp38", platform: "macosx_10_10_intel"},
+ {python: "cp38", abi: "cp38", platform: "macosx_11_0_arm64"},
+
+ {python: "cp38", abi: "cp38", platform: "win32"},
+ {python: "cp38", abi: "cp38", platform: "win_amd64"}
+]
+
+# TODO(https://crbug.com/898348): Add in necessary wheels as Python3 versions
+# become available.
+wheel: <
+ name: "infra/python/wheels/six-py2_py3"
+ version: "version:1.15.0"
+>
+
+wheel: <
+ name: "infra/python/wheels/coverage/${vpython_platform}"
+ version: "version:5.5.chromium.3"
+>
+
+wheel: <
+ name: "infra/python/wheels/pbr-py2_py3"
+ version: "version:3.0.0"
+>
+
+wheel: <
+ name: "infra/python/wheels/funcsigs-py2_py3"
+ version: "version:1.0.2"
+>
+
+wheel: <
+ name: "infra/python/wheels/mock-py2_py3"
+ version: "version:2.0.0"
+>
+
+wheel: <
+ name: "infra/python/wheels/numpy/${vpython_platform}"
+ version: "version:1.2x.supported.1"
+>
+
+wheel: <
+ name: "infra/python/wheels/protobuf-py3"
+ version: "version:3.19.3"
+>
+
+wheel: <
+ name: "infra/python/wheels/requests-py2_py3"
+ version: "version:2.13.0"
+>
+
+wheel: <
+ name: "infra/python/wheels/pyyaml/${vpython_platform}"
+ version: "version:5.4.1.chromium.1"
+>
+
+wheel: <
+ name: "infra/python/wheels/clusterfuzz-py2_py3"
+ version: "version:2.5.6-5c85ed3d46137b17da04c59bcd805ee5"
+>
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index e9b6a0a390..430f9a2c01 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -2,7 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//build/config/sanitizers/sanitizers.gni")
import("../gni/v8.gni")
group("gn_all") {
@@ -11,10 +10,13 @@ group("gn_all") {
data_deps = [
":v8_check_static_initializers",
"debug_helper:v8_debug_helper",
- "gcmole:v8_gcmole_files",
"jsfunfuzz:v8_jsfunfuzz",
]
+ if (v8_gcmole) {
+ data_deps += [ "gcmole:v8_gcmole_files" ]
+ }
+
if (is_win) {
data_deps += [ "v8windbg" ]
}
@@ -57,13 +59,7 @@ group("v8_testrunner") {
"predictable_wrapper.py",
"run-num-fuzzer.py",
"run-tests.py",
+ ".vpython3",
"testrunner/",
]
-
- if (v8_code_coverage && sanitizer_coverage_flags == "bb,trace-pc-guard") {
- data += [
- "sanitizers/sancov_merger.py",
- "../third_party/llvm/projects/compiler-rt/lib/sanitizer_common/scripts/sancov.py",
- ]
- }
}
diff --git a/deps/v8/tools/PRESUBMIT.py b/deps/v8/tools/PRESUBMIT.py
index ded0016793..6212decfe0 100644
--- a/deps/v8/tools/PRESUBMIT.py
+++ b/deps/v8/tools/PRESUBMIT.py
@@ -6,9 +6,20 @@
# use Python3 instead of Python2 when running the code in this file.
USE_PYTHON3 = True
+TEST_DIRECTORIES = [
+ 'unittests',
+ 'builtins-pgo',
+]
+
def CheckChangeOnCommit(input_api, output_api):
- tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, 'unittests', files_to_check=[r'.+_test\.py$'],
- run_on_python2=False)
+ tests = []
+ for directory in TEST_DIRECTORIES:
+ tests += input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api,
+ output_api,
+ directory,
+ files_to_check=[r'.+_test\.py$'],
+ run_on_python2=False)
+
return input_api.RunTests(tests)
diff --git a/deps/v8/tools/android-sync.sh b/deps/v8/tools/android-sync.sh
index 66d7aed78d..cbe68f2494 100755
--- a/deps/v8/tools/android-sync.sh
+++ b/deps/v8/tools/android-sync.sh
@@ -89,7 +89,7 @@ echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
sync_file "$OUTDIR/$ARCH_MODE/cctest"
sync_file "$OUTDIR/$ARCH_MODE/d8"
sync_file "$OUTDIR/$ARCH_MODE/snapshot_blob.bin"
-sync_file "$OUTDIR/$ARCH_MODE/unittests"
+sync_file "$OUTDIR/$ARCH_MODE/v8_unittests"
echo ""
echo -n "sync to $ANDROID_V8/tools"
sync_file tools/arguments.mjs
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index a7d525f179..a6cf0bb547 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -75,7 +75,7 @@ _test_flag() {
}
complete -F _d8_flag -f d8 v8 v8-debug
-complete -F _test_flag -f cctest unittests
+complete -F _test_flag -f cctest v8_unittests
# Many distros set up their own GDB completion scripts. The logic below is
# careful to wrap any such functions (with additional logic), rather than
@@ -105,7 +105,7 @@ _maybe_setup_gdb_completions() {
if [ "$next" = "d8" ] ; then
_d8_flag
return 0
- elif [ "$next" = "unittests" ] || [ "$next" = "cctest" ]; then
+ elif [ "$next" = "v8_unittests" ] || [ "$next" = "cctest" ]; then
_test_flag
return 0
fi
diff --git a/deps/v8/tools/bazel/bazel.sha1 b/deps/v8/tools/bazel/bazel.sha1
index f1b8cbeb35..49bbbbadaf 100644
--- a/deps/v8/tools/bazel/bazel.sha1
+++ b/deps/v8/tools/bazel/bazel.sha1
@@ -1 +1 @@
-a85565a04bf357b2cb4bf04a1c68b83660a47ea2 \ No newline at end of file
+f9e04ace61362af959c0ce5a999fc71bd4f00d68 \ No newline at end of file
diff --git a/deps/v8/tools/builtins-pgo/arm.profile b/deps/v8/tools/builtins-pgo/arm.profile
deleted file mode 100644
index 29277b7567..0000000000
--- a/deps/v8/tools/builtins-pgo/arm.profile
+++ /dev/null
@@ -1,6371 +0,0 @@
-block_hint,RecordWriteSaveFP,6,7,1
-block_hint,RecordWriteSaveFP,19,20,1
-block_hint,RecordWriteSaveFP,9,10,1
-block_hint,RecordWriteSaveFP,32,33,0
-block_hint,RecordWriteSaveFP,36,37,1
-block_hint,RecordWriteSaveFP,34,35,1
-block_hint,RecordWriteSaveFP,25,26,0
-block_hint,RecordWriteSaveFP,15,16,0
-block_hint,RecordWriteIgnoreFP,21,22,0
-block_hint,RecordWriteIgnoreFP,6,7,1
-block_hint,RecordWriteIgnoreFP,19,20,1
-block_hint,RecordWriteIgnoreFP,9,10,1
-block_hint,RecordWriteIgnoreFP,25,26,0
-block_hint,RecordWriteIgnoreFP,15,16,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,43,44,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,83,84,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,80,81,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,63,64,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,35,36,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,67,68,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,50,51,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,29,30,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,56,57,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,7,8,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,61,62,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,14,15,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,16,17,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,69,70,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,54,55,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,83,84,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,80,81,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,63,64,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,5,6,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,46,47,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,25,26,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,67,68,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,50,51,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,29,30,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,56,57,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,7,8,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,61,62,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,14,15,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,16,17,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,69,70,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,54,55,1
-block_hint,Call_ReceiverIsAny_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsAny_Baseline_Compact,21,22,1
-block_hint,CallProxy,38,39,1
-block_hint,CallProxy,22,23,1
-block_hint,CallProxy,18,19,1
-block_hint,CallWithSpread,51,52,1
-block_hint,CallWithSpread,31,32,1
-block_hint,CallWithSpread,6,7,1
-block_hint,CallWithSpread,57,58,1
-block_hint,CallWithSpread,59,60,0
-block_hint,CallWithSpread,43,44,0
-block_hint,CallWithSpread,45,46,1
-block_hint,CallWithSpread,33,34,0
-block_hint,CallWithSpread,38,39,1
-block_hint,CallWithSpread,40,41,1
-block_hint,CallWithSpread,22,23,1
-block_hint,CallWithSpread,24,25,1
-block_hint,CallWithSpread,26,27,0
-block_hint,CallWithSpread,53,54,1
-block_hint,CallWithSpread,47,48,1
-block_hint,CallWithSpread,9,10,0
-block_hint,CallWithSpread,11,12,1
-block_hint,CallWithSpread,13,14,0
-block_hint,CallWithSpread,49,50,0
-block_hint,CallWithSpread,55,56,1
-block_hint,CallWithSpread,15,16,1
-block_hint,CallWithSpread_Baseline,115,116,1
-block_hint,CallWithSpread_Baseline,60,61,0
-block_hint,CallWithSpread_Baseline,113,114,0
-block_hint,CallWithSpread_Baseline,106,107,0
-block_hint,CallWithSpread_Baseline,81,82,0
-block_hint,CallWithSpread_Baseline,48,49,0
-block_hint,CallWithSpread_Baseline,138,139,1
-block_hint,CallWithSpread_Baseline,130,131,0
-block_hint,CallWithSpread_Baseline,119,120,1
-block_hint,CallWithSpread_Baseline,85,86,1
-block_hint,CallWithSpread_Baseline,12,13,1
-block_hint,CallWithSpread_Baseline,100,101,1
-block_hint,CallWithSpread_Baseline,102,103,0
-block_hint,CallWithSpread_Baseline,68,69,0
-block_hint,CallWithSpread_Baseline,33,34,1
-block_hint,CallWithSpread_Baseline,70,71,1
-block_hint,CallWithSpread_Baseline,53,54,0
-block_hint,CallWithSpread_Baseline,63,64,1
-block_hint,CallWithSpread_Baseline,65,66,1
-block_hint,CallWithSpread_Baseline,37,38,1
-block_hint,CallWithSpread_Baseline,39,40,1
-block_hint,CallWithSpread_Baseline,41,42,0
-block_hint,CallWithSpread_Baseline,91,92,1
-block_hint,CallWithSpread_Baseline,77,78,1
-block_hint,CallWithSpread_Baseline,23,24,0
-block_hint,CallWithSpread_Baseline,25,26,1
-block_hint,CallWithSpread_Baseline,27,28,0
-block_hint,CallWithSpread_Baseline,79,80,0
-block_hint,CallWithSpread_Baseline,93,94,1
-block_hint,CallWithSpread_Baseline,29,30,1
-block_hint,CallWithArrayLike,28,29,1
-block_hint,CallWithArrayLike,30,31,1
-block_hint,CallWithArrayLike,32,33,1
-block_hint,CallWithArrayLike,6,7,1
-block_hint,CallWithArrayLike,8,9,1
-block_hint,CallWithArrayLike,61,62,0
-block_hint,CallWithArrayLike,53,54,1
-block_hint,CallWithArrayLike,46,47,0
-block_hint,CallWithArrayLike,10,11,1
-block_hint,CallWithArrayLike,63,64,1
-block_hint,CallWithArrayLike,55,56,0
-block_hint,CallWithArrayLike,57,58,1
-block_hint,CallWithArrayLike,12,13,0
-block_hint,ConstructWithSpread_Baseline,90,91,1
-block_hint,ConstructWithSpread_Baseline,70,71,1
-block_hint,ConstructWithSpread_Baseline,45,46,1
-block_hint,ConstructWithSpread_Baseline,11,12,1
-block_hint,ConstructWithSpread_Baseline,81,82,1
-block_hint,ConstructWithSpread_Baseline,83,84,0
-block_hint,ConstructWithSpread_Baseline,58,59,0
-block_hint,ConstructWithSpread_Baseline,27,28,1
-block_hint,Construct_Baseline,48,49,0
-block_hint,Construct_Baseline,46,47,1
-block_hint,Construct_Baseline,38,39,1
-block_hint,Construct_Baseline,23,24,1
-block_hint,Construct_Baseline,5,6,1
-block_hint,Construct_Baseline,20,21,1
-block_hint,Construct_Baseline,25,26,1
-block_hint,Construct_Baseline,7,8,1
-block_hint,Construct_Baseline,36,37,0
-block_hint,Construct_Baseline,11,12,1
-block_hint,Construct_Baseline,13,14,0
-block_hint,Construct_Baseline,40,41,0
-block_hint,Construct_Baseline,27,28,1
-block_hint,FastNewObject,38,39,1
-block_hint,FastNewObject,40,41,1
-block_hint,FastNewObject,42,43,1
-block_hint,FastNewObject,44,45,1
-block_hint,FastNewObject,53,54,0
-block_hint,FastNewObject,55,56,1
-block_hint,FastNewObject,48,49,0
-block_hint,FastNewObject,23,24,0
-block_hint,FastNewObject,27,28,0
-block_hint,FastNewObject,31,32,1
-block_hint,FastNewClosure,15,16,0
-block_hint,FastNewClosure,4,5,1
-block_hint,FastNewClosure,19,20,1
-block_hint,FastNewClosure,8,9,1
-block_hint,StringEqual,57,58,0
-block_hint,StringEqual,29,30,1
-block_hint,StringEqual,55,56,0
-block_hint,StringEqual,45,46,1
-block_hint,StringEqual,81,82,1
-block_hint,StringEqual,69,70,0
-block_hint,StringEqual,51,52,0
-block_hint,StringEqual,23,24,1
-block_hint,StringEqual,79,80,0
-block_hint,StringEqual,65,66,0
-block_hint,StringEqual,47,48,0
-block_hint,StringEqual,39,40,0
-block_hint,StringEqual,71,72,0
-block_hint,StringEqual,53,54,0
-block_hint,StringGreaterThanOrEqual,40,41,1
-block_hint,StringGreaterThanOrEqual,30,31,1
-block_hint,StringGreaterThanOrEqual,36,37,0
-block_hint,StringGreaterThanOrEqual,12,13,0
-block_hint,StringLessThan,22,23,0
-block_hint,StringLessThan,40,41,1
-block_hint,StringLessThan,36,37,0
-block_hint,StringLessThan,24,25,0
-block_hint,StringLessThanOrEqual,40,41,1
-block_hint,StringLessThanOrEqual,30,31,1
-block_hint,StringLessThanOrEqual,36,37,0
-block_hint,StringLessThanOrEqual,12,13,0
-block_hint,StringSubstring,87,88,0
-block_hint,StringSubstring,29,30,0
-block_hint,StringSubstring,63,64,1
-block_hint,StringSubstring,58,59,1
-block_hint,StringSubstring,56,57,1
-block_hint,StringSubstring,110,111,0
-block_hint,StringSubstring,19,20,0
-block_hint,StringSubstring,21,22,0
-block_hint,StringSubstring,114,115,1
-block_hint,StringSubstring,102,103,1
-block_hint,StringSubstring,38,39,0
-block_hint,StringSubstring,17,18,0
-block_hint,StringSubstring,116,117,1
-block_hint,StringSubstring,104,105,1
-block_hint,StringSubstring,42,43,0
-block_hint,StringSubstring,75,76,1
-block_hint,StringSubstring,127,128,0
-block_hint,StringSubstring,34,35,1
-block_hint,StringSubstring,31,32,0
-block_hint,OrderedHashTableHealIndex,5,6,1
-block_hint,OrderedHashTableHealIndex,9,10,0
-block_hint,CompileLazy,42,43,1
-block_hint,CompileLazy,22,23,0
-block_hint,CompileLazy,40,41,0
-block_hint,CompileLazy,8,9,0
-block_hint,CompileLazy,10,11,0
-block_hint,CompileLazy,15,16,0
-block_hint,CompileLazy,3,4,0
-block_hint,CompileLazy,18,19,1
-block_hint,AllocateInYoungGeneration,2,3,1
-block_hint,AllocateRegularInYoungGeneration,2,3,1
-block_hint,AllocateRegularInOldGeneration,2,3,1
-block_hint,CopyFastSmiOrObjectElements,12,13,1
-block_hint,CopyFastSmiOrObjectElements,18,19,1
-block_hint,CopyFastSmiOrObjectElements,9,10,0
-block_hint,CopyFastSmiOrObjectElements,23,24,1
-block_hint,CopyFastSmiOrObjectElements,21,22,1
-block_hint,CopyFastSmiOrObjectElements,15,16,0
-block_hint,GrowFastDoubleElements,18,19,0
-block_hint,GrowFastDoubleElements,20,21,0
-block_hint,GrowFastDoubleElements,14,15,0
-block_hint,GrowFastDoubleElements,16,17,1
-block_hint,GrowFastDoubleElements,28,29,0
-block_hint,GrowFastDoubleElements,6,7,0
-block_hint,GrowFastDoubleElements,26,27,0
-block_hint,GrowFastSmiOrObjectElements,16,17,0
-block_hint,GrowFastSmiOrObjectElements,18,19,0
-block_hint,GrowFastSmiOrObjectElements,14,15,0
-block_hint,GrowFastSmiOrObjectElements,22,23,1
-block_hint,GrowFastSmiOrObjectElements,6,7,0
-block_hint,GrowFastSmiOrObjectElements,12,13,0
-block_hint,ToNumber,3,4,1
-block_hint,ToNumber,5,6,0
-block_hint,ToNumber,18,19,0
-block_hint,ToNumber,15,16,1
-block_hint,ToNumber_Baseline,24,25,0
-block_hint,ToNumber_Baseline,22,23,1
-block_hint,ToNumber_Baseline,3,4,1
-block_hint,ToNumeric_Baseline,7,8,0
-block_hint,ToNumeric_Baseline,9,10,1
-block_hint,ToNumeric_Baseline,3,4,1
-block_hint,ToNumberConvertBigInt,3,4,1
-block_hint,ToNumberConvertBigInt,5,6,0
-block_hint,ToNumberConvertBigInt,20,21,0
-block_hint,ToNumberConvertBigInt,17,18,1
-block_hint,ToNumberConvertBigInt,9,10,1
-block_hint,Typeof,17,18,0
-block_hint,Typeof,9,10,0
-block_hint,Typeof,13,14,1
-block_hint,KeyedLoadIC_PolymorphicName,247,248,1
-block_hint,KeyedLoadIC_PolymorphicName,96,97,1
-block_hint,KeyedLoadIC_PolymorphicName,263,264,0
-block_hint,KeyedLoadIC_PolymorphicName,60,61,0
-block_hint,KeyedLoadIC_PolymorphicName,133,134,1
-block_hint,KeyedLoadIC_PolymorphicName,303,304,0
-block_hint,KeyedLoadIC_PolymorphicName,333,334,1
-block_hint,KeyedLoadIC_PolymorphicName,98,99,0
-block_hint,KeyedLoadIC_PolymorphicName,284,285,1
-block_hint,KeyedLoadIC_PolymorphicName,24,25,1
-block_hint,KeyedLoadIC_PolymorphicName,165,166,0
-block_hint,KeyedLoadIC_PolymorphicName,122,123,1
-block_hint,KeyedLoadIC_PolymorphicName,335,336,1
-block_hint,KeyedLoadIC_PolymorphicName,110,111,0
-block_hint,KeyedLoadIC_PolymorphicName,175,176,0
-block_hint,KeyedLoadIC_PolymorphicName,112,113,1
-block_hint,KeyedLoadIC_PolymorphicName,45,46,1
-block_hint,KeyedLoadIC_PolymorphicName,74,75,0
-block_hint,KeyedLoadIC_PolymorphicName,253,254,0
-block_hint,KeyedLoadIC_PolymorphicName,292,293,1
-block_hint,KeyedLoadIC_PolymorphicName,28,29,0
-block_hint,KeyedLoadIC_PolymorphicName,26,27,0
-block_hint,KeyedStoreIC_Megamorphic,379,380,1
-block_hint,KeyedStoreIC_Megamorphic,381,382,0
-block_hint,KeyedStoreIC_Megamorphic,1234,1235,0
-block_hint,KeyedStoreIC_Megamorphic,1215,1216,1
-block_hint,KeyedStoreIC_Megamorphic,1149,1150,0
-block_hint,KeyedStoreIC_Megamorphic,918,919,1
-block_hint,KeyedStoreIC_Megamorphic,383,384,1
-block_hint,KeyedStoreIC_Megamorphic,1244,1245,0
-block_hint,KeyedStoreIC_Megamorphic,1223,1224,0
-block_hint,KeyedStoreIC_Megamorphic,601,602,0
-block_hint,KeyedStoreIC_Megamorphic,746,747,1
-block_hint,KeyedStoreIC_Megamorphic,603,604,0
-block_hint,KeyedStoreIC_Megamorphic,1203,1204,0
-block_hint,KeyedStoreIC_Megamorphic,1038,1039,0
-block_hint,KeyedStoreIC_Megamorphic,1177,1178,0
-block_hint,KeyedStoreIC_Megamorphic,192,193,1
-block_hint,KeyedStoreIC_Megamorphic,194,195,1
-block_hint,KeyedStoreIC_Megamorphic,539,540,0
-block_hint,KeyedStoreIC_Megamorphic,541,542,0
-block_hint,KeyedStoreIC_Megamorphic,1042,1043,0
-block_hint,KeyedStoreIC_Megamorphic,547,548,1
-block_hint,KeyedStoreIC_Megamorphic,1068,1069,0
-block_hint,KeyedStoreIC_Megamorphic,606,607,0
-block_hint,KeyedStoreIC_Megamorphic,1205,1206,0
-block_hint,KeyedStoreIC_Megamorphic,549,550,0
-block_hint,KeyedStoreIC_Megamorphic,1044,1045,0
-block_hint,KeyedStoreIC_Megamorphic,200,201,1
-block_hint,KeyedStoreIC_Megamorphic,553,554,0
-block_hint,KeyedStoreIC_Megamorphic,202,203,0
-block_hint,KeyedStoreIC_Megamorphic,204,205,0
-block_hint,KeyedStoreIC_Megamorphic,953,954,0
-block_hint,KeyedStoreIC_Megamorphic,555,556,1
-block_hint,KeyedStoreIC_Megamorphic,557,558,0
-block_hint,KeyedStoreIC_Megamorphic,559,560,1
-block_hint,KeyedStoreIC_Megamorphic,561,562,0
-block_hint,KeyedStoreIC_Megamorphic,1157,1158,0
-block_hint,KeyedStoreIC_Megamorphic,563,564,1
-block_hint,KeyedStoreIC_Megamorphic,905,906,0
-block_hint,KeyedStoreIC_Megamorphic,1159,1160,0
-block_hint,KeyedStoreIC_Megamorphic,565,566,1
-block_hint,KeyedStoreIC_Megamorphic,571,572,1
-block_hint,KeyedStoreIC_Megamorphic,573,574,0
-block_hint,KeyedStoreIC_Megamorphic,575,576,0
-block_hint,KeyedStoreIC_Megamorphic,577,578,1
-block_hint,KeyedStoreIC_Megamorphic,960,961,1
-block_hint,KeyedStoreIC_Megamorphic,569,570,1
-block_hint,KeyedStoreIC_Megamorphic,567,568,0
-block_hint,KeyedStoreIC_Megamorphic,1232,1233,0
-block_hint,KeyedStoreIC_Megamorphic,1247,1248,1
-block_hint,KeyedStoreIC_Megamorphic,1240,1241,1
-block_hint,KeyedStoreIC_Megamorphic,1139,1140,1
-block_hint,KeyedStoreIC_Megamorphic,978,979,1
-block_hint,KeyedStoreIC_Megamorphic,206,207,0
-block_hint,KeyedStoreIC_Megamorphic,362,363,0
-block_hint,KeyedStoreIC_Megamorphic,1143,1144,0
-block_hint,KeyedStoreIC_Megamorphic,1152,1153,0
-block_hint,KeyedStoreIC_Megamorphic,930,931,0
-block_hint,KeyedStoreIC_Megamorphic,491,492,0
-block_hint,KeyedStoreIC_Megamorphic,898,899,0
-block_hint,KeyedStoreIC_Megamorphic,934,935,0
-block_hint,KeyedStoreIC_Megamorphic,932,933,1
-block_hint,KeyedStoreIC_Megamorphic,493,494,1
-block_hint,KeyedStoreIC_Megamorphic,499,500,1
-block_hint,KeyedStoreIC_Megamorphic,501,502,0
-block_hint,KeyedStoreIC_Megamorphic,938,939,1
-block_hint,KeyedStoreIC_Megamorphic,503,504,0
-block_hint,KeyedStoreIC_Megamorphic,505,506,1
-block_hint,KeyedStoreIC_Megamorphic,936,937,1
-block_hint,KeyedStoreIC_Megamorphic,497,498,1
-block_hint,KeyedStoreIC_Megamorphic,495,496,0
-block_hint,KeyedStoreIC_Megamorphic,1124,1125,1
-block_hint,KeyedStoreIC_Megamorphic,1189,1190,1
-block_hint,KeyedStoreIC_Megamorphic,896,897,0
-block_hint,KeyedStoreIC_Megamorphic,350,351,1
-block_hint,KeyedStoreIC_Megamorphic,336,337,1
-block_hint,KeyedStoreIC_Megamorphic,1122,1123,1
-block_hint,KeyedStoreIC_Megamorphic,683,684,0
-block_hint,KeyedStoreIC_Megamorphic,980,981,1
-block_hint,KeyedStoreIC_Megamorphic,214,215,0
-block_hint,KeyedStoreIC_Megamorphic,1024,1025,0
-block_hint,KeyedStoreIC_Megamorphic,693,694,0
-block_hint,KeyedStoreIC_Megamorphic,579,580,0
-block_hint,KeyedStoreIC_Megamorphic,167,168,1
-block_hint,KeyedStoreIC_Megamorphic,581,582,0
-block_hint,KeyedStoreIC_Megamorphic,583,584,0
-block_hint,KeyedStoreIC_Megamorphic,1051,1052,0
-block_hint,KeyedStoreIC_Megamorphic,585,586,1
-block_hint,KeyedStoreIC_Megamorphic,966,967,0
-block_hint,KeyedStoreIC_Megamorphic,1183,1184,0
-block_hint,KeyedStoreIC_Megamorphic,1053,1054,1
-block_hint,KeyedStoreIC_Megamorphic,759,760,1
-block_hint,KeyedStoreIC_Megamorphic,612,613,0
-block_hint,KeyedStoreIC_Megamorphic,1208,1209,0
-block_hint,KeyedStoreIC_Megamorphic,1055,1056,0
-block_hint,KeyedStoreIC_Megamorphic,1181,1182,0
-block_hint,KeyedStoreIC_Megamorphic,224,225,1
-block_hint,KeyedStoreIC_Megamorphic,761,762,0
-block_hint,KeyedStoreIC_Megamorphic,593,594,0
-block_hint,KeyedStoreIC_Megamorphic,1145,1146,0
-block_hint,KeyedStoreIC_Megamorphic,1192,1193,0
-block_hint,KeyedStoreIC_Megamorphic,909,910,0
-block_hint,KeyedStoreIC_Megamorphic,173,174,1
-block_hint,KeyedStoreIC_Megamorphic,175,176,1
-block_hint,KeyedStoreIC_Megamorphic,373,374,0
-block_hint,KeyedStoreIC_Megamorphic,177,178,1
-block_hint,KeyedStoreIC_Megamorphic,375,376,0
-block_hint,KeyedStoreIC_Megamorphic,179,180,1
-block_hint,KeyedStoreIC_Megamorphic,234,235,0
-block_hint,KeyedStoreIC_Megamorphic,236,237,0
-block_hint,KeyedStoreIC_Megamorphic,181,182,1
-block_hint,KeyedStoreIC_Megamorphic,183,184,1
-block_hint,KeyedStoreIC_Megamorphic,1029,1030,0
-block_hint,KeyedStoreIC_Megamorphic,185,186,1
-block_hint,KeyedStoreIC_Megamorphic,928,929,1
-block_hint,KeyedStoreIC_Megamorphic,485,486,1
-block_hint,KeyedStoreIC_Megamorphic,733,734,0
-block_hint,KeyedStoreIC_Megamorphic,922,923,1
-block_hint,KeyedStoreIC_Megamorphic,413,414,0
-block_hint,KeyedStoreIC_Megamorphic,415,416,0
-block_hint,KeyedStoreIC_Megamorphic,254,255,1
-block_hint,KeyedStoreIC_Megamorphic,417,418,0
-block_hint,KeyedStoreIC_Megamorphic,630,631,1
-block_hint,KeyedStoreIC_Megamorphic,92,93,1
-block_hint,KeyedStoreIC_Megamorphic,94,95,0
-block_hint,KeyedStoreIC_Megamorphic,771,772,1
-block_hint,KeyedStoreIC_Megamorphic,387,388,0
-block_hint,KeyedStoreIC_Megamorphic,639,640,1
-block_hint,KeyedStoreIC_Megamorphic,64,65,1
-block_hint,KeyedStoreIC_Megamorphic,66,67,0
-block_hint,DefineKeyedOwnIC_Megamorphic,312,313,1
-block_hint,DefineKeyedOwnIC_Megamorphic,314,315,0
-block_hint,DefineKeyedOwnIC_Megamorphic,899,900,0
-block_hint,DefineKeyedOwnIC_Megamorphic,420,421,0
-block_hint,DefineKeyedOwnIC_Megamorphic,418,419,1
-block_hint,DefineKeyedOwnIC_Megamorphic,800,801,0
-block_hint,DefineKeyedOwnIC_Megamorphic,575,576,1
-block_hint,DefineKeyedOwnIC_Megamorphic,603,604,1
-block_hint,DefineKeyedOwnIC_Megamorphic,232,233,0
-block_hint,DefineKeyedOwnIC_Megamorphic,53,54,1
-block_hint,DefineKeyedOwnIC_Megamorphic,55,56,0
-block_hint,LoadGlobalIC_NoFeedback,41,42,1
-block_hint,LoadGlobalIC_NoFeedback,6,7,1
-block_hint,LoadGlobalIC_NoFeedback,8,9,1
-block_hint,LoadGlobalIC_NoFeedback,10,11,1
-block_hint,LoadGlobalIC_NoFeedback,12,13,1
-block_hint,LoadGlobalIC_NoFeedback,31,32,1
-block_hint,LoadGlobalIC_NoFeedback,49,50,1
-block_hint,LoadGlobalIC_NoFeedback,18,19,1
-block_hint,LoadGlobalIC_NoFeedback,27,28,0
-block_hint,LoadGlobalIC_NoFeedback,14,15,1
-block_hint,LoadGlobalIC_NoFeedback,33,34,0
-block_hint,LoadGlobalIC_NoFeedback,16,17,1
-block_hint,LoadGlobalIC_NoFeedback,20,21,1
-block_hint,LoadGlobalIC_NoFeedback,22,23,0
-block_hint,LoadGlobalIC_NoFeedback,24,25,1
-block_hint,LoadIC_FunctionPrototype,2,3,0
-block_hint,LoadIC_FunctionPrototype,4,5,1
-block_hint,LoadIC_NoFeedback,97,98,1
-block_hint,LoadIC_NoFeedback,99,100,0
-block_hint,LoadIC_NoFeedback,306,307,1
-block_hint,LoadIC_NoFeedback,226,227,0
-block_hint,LoadIC_NoFeedback,141,142,0
-block_hint,LoadIC_NoFeedback,320,321,0
-block_hint,LoadIC_NoFeedback,287,288,0
-block_hint,LoadIC_NoFeedback,302,303,0
-block_hint,LoadIC_NoFeedback,53,54,1
-block_hint,LoadIC_NoFeedback,289,290,0
-block_hint,LoadIC_NoFeedback,55,56,1
-block_hint,LoadIC_NoFeedback,324,325,1
-block_hint,LoadIC_NoFeedback,272,273,0
-block_hint,LoadIC_NoFeedback,295,296,1
-block_hint,LoadIC_NoFeedback,247,248,1
-block_hint,LoadIC_NoFeedback,59,60,0
-block_hint,LoadIC_NoFeedback,22,23,1
-block_hint,LoadIC_NoFeedback,35,36,1
-block_hint,LoadIC_NoFeedback,130,131,1
-block_hint,LoadIC_NoFeedback,145,146,0
-block_hint,LoadIC_NoFeedback,125,126,0
-block_hint,LoadIC_NoFeedback,261,262,0
-block_hint,LoadIC_NoFeedback,250,251,0
-block_hint,LoadIC_NoFeedback,149,150,1
-block_hint,LoadIC_NoFeedback,167,168,0
-block_hint,LoadIC_NoFeedback,322,323,0
-block_hint,LoadIC_NoFeedback,151,152,0
-block_hint,LoadIC_NoFeedback,291,292,0
-block_hint,LoadIC_NoFeedback,70,71,1
-block_hint,LoadIC_NoFeedback,155,156,0
-block_hint,LoadIC_NoFeedback,72,73,1
-block_hint,LoadIC_NoFeedback,254,255,1
-block_hint,LoadIC_NoFeedback,76,77,0
-block_hint,LoadIC_NoFeedback,326,327,1
-block_hint,LoadIC_NoFeedback,278,279,0
-block_hint,LoadIC_NoFeedback,276,277,0
-block_hint,LoadIC_NoFeedback,24,25,1
-block_hint,LoadIC_NoFeedback,242,243,1
-block_hint,LoadIC_NoFeedback,135,136,1
-block_hint,LoadIC_NoFeedback,93,94,0
-block_hint,StoreIC_NoFeedback,147,148,1
-block_hint,StoreIC_NoFeedback,149,150,0
-block_hint,StoreIC_NoFeedback,259,260,0
-block_hint,StoreIC_NoFeedback,549,550,0
-block_hint,StoreIC_NoFeedback,443,444,0
-block_hint,StoreIC_NoFeedback,527,528,0
-block_hint,StoreIC_NoFeedback,58,59,1
-block_hint,StoreIC_NoFeedback,60,61,1
-block_hint,StoreIC_NoFeedback,199,200,0
-block_hint,StoreIC_NoFeedback,201,202,0
-block_hint,StoreIC_NoFeedback,447,448,0
-block_hint,StoreIC_NoFeedback,207,208,1
-block_hint,StoreIC_NoFeedback,473,474,0
-block_hint,StoreIC_NoFeedback,262,263,0
-block_hint,StoreIC_NoFeedback,551,552,0
-block_hint,StoreIC_NoFeedback,209,210,0
-block_hint,StoreIC_NoFeedback,449,450,0
-block_hint,StoreIC_NoFeedback,66,67,1
-block_hint,StoreIC_NoFeedback,213,214,0
-block_hint,StoreIC_NoFeedback,68,69,0
-block_hint,StoreIC_NoFeedback,390,391,0
-block_hint,StoreIC_NoFeedback,215,216,1
-block_hint,StoreIC_NoFeedback,217,218,0
-block_hint,StoreIC_NoFeedback,219,220,1
-block_hint,StoreIC_NoFeedback,221,222,0
-block_hint,StoreIC_NoFeedback,509,510,0
-block_hint,StoreIC_NoFeedback,223,224,1
-block_hint,StoreIC_NoFeedback,356,357,0
-block_hint,StoreIC_NoFeedback,511,512,0
-block_hint,StoreIC_NoFeedback,393,394,1
-block_hint,StoreIC_NoFeedback,231,232,1
-block_hint,StoreIC_NoFeedback,233,234,0
-block_hint,StoreIC_NoFeedback,235,236,0
-block_hint,StoreIC_NoFeedback,237,238,1
-block_hint,StoreIC_NoFeedback,227,228,0
-block_hint,StoreIC_NoFeedback,564,565,0
-block_hint,StoreIC_NoFeedback,494,495,1
-block_hint,StoreIC_NoFeedback,413,414,1
-block_hint,StoreIC_NoFeedback,72,73,0
-block_hint,StoreIC_NoFeedback,78,79,0
-block_hint,StoreIC_NoFeedback,130,131,0
-block_hint,StoreIC_NoFeedback,498,499,0
-block_hint,StoreIC_NoFeedback,367,368,0
-block_hint,StoreIC_NoFeedback,151,152,0
-block_hint,StoreIC_NoFeedback,349,350,0
-block_hint,StoreIC_NoFeedback,153,154,1
-block_hint,StoreIC_NoFeedback,159,160,1
-block_hint,StoreIC_NoFeedback,161,162,0
-block_hint,StoreIC_NoFeedback,163,164,0
-block_hint,StoreIC_NoFeedback,157,158,1
-block_hint,StoreIC_NoFeedback,155,156,0
-block_hint,StoreIC_NoFeedback,536,537,1
-block_hint,StoreIC_NoFeedback,385,386,1
-block_hint,StoreIC_NoFeedback,193,194,0
-block_hint,StoreIC_NoFeedback,381,382,1
-block_hint,StoreIC_NoFeedback,179,180,0
-block_hint,StoreIC_NoFeedback,519,520,1
-block_hint,StoreIC_NoFeedback,415,416,1
-block_hint,StoreIC_NoFeedback,80,81,0
-block_hint,StoreIC_NoFeedback,82,83,0
-block_hint,StoreIC_NoFeedback,241,242,0
-block_hint,StoreIC_NoFeedback,243,244,0
-block_hint,StoreIC_NoFeedback,456,457,0
-block_hint,StoreIC_NoFeedback,245,246,1
-block_hint,StoreIC_NoFeedback,513,514,0
-block_hint,StoreIC_NoFeedback,403,404,0
-block_hint,StoreIC_NoFeedback,458,459,1
-block_hint,StoreIC_NoFeedback,268,269,0
-block_hint,StoreIC_NoFeedback,553,554,0
-block_hint,StoreIC_NoFeedback,460,461,0
-block_hint,StoreIC_NoFeedback,531,532,0
-block_hint,StoreIC_NoFeedback,90,91,1
-block_hint,StoreIC_NoFeedback,332,333,0
-block_hint,StoreIC_NoFeedback,420,421,1
-block_hint,StoreIC_NoFeedback,94,95,0
-block_hint,StoreIC_NoFeedback,96,97,0
-block_hint,StoreIC_NoFeedback,253,254,0
-block_hint,StoreIC_NoFeedback,255,256,1
-block_hint,StoreIC_NoFeedback,362,363,0
-block_hint,StoreIC_NoFeedback,40,41,1
-block_hint,StoreIC_NoFeedback,42,43,1
-block_hint,StoreIC_NoFeedback,141,142,0
-block_hint,StoreIC_NoFeedback,44,45,1
-block_hint,StoreIC_NoFeedback,143,144,0
-block_hint,StoreIC_NoFeedback,46,47,1
-block_hint,StoreIC_NoFeedback,100,101,0
-block_hint,StoreIC_NoFeedback,102,103,0
-block_hint,StoreIC_NoFeedback,48,49,1
-block_hint,StoreIC_NoFeedback,50,51,1
-block_hint,StoreIC_NoFeedback,439,440,0
-block_hint,StoreIC_NoFeedback,52,53,1
-block_hint,DefineNamedOwnIC_NoFeedback,80,81,1
-block_hint,DefineNamedOwnIC_NoFeedback,82,83,0
-block_hint,DefineNamedOwnIC_NoFeedback,236,237,0
-block_hint,DefineNamedOwnIC_NoFeedback,210,211,1
-block_hint,DefineNamedOwnIC_NoFeedback,136,137,0
-block_hint,DefineNamedOwnIC_NoFeedback,239,240,0
-block_hint,DefineNamedOwnIC_NoFeedback,212,213,0
-block_hint,DefineNamedOwnIC_NoFeedback,234,235,0
-block_hint,DefineNamedOwnIC_NoFeedback,157,158,1
-block_hint,DefineNamedOwnIC_NoFeedback,36,37,1
-block_hint,DefineNamedOwnIC_NoFeedback,86,87,0
-block_hint,DefineNamedOwnIC_NoFeedback,38,39,1
-block_hint,DefineNamedOwnIC_NoFeedback,40,41,1
-block_hint,KeyedLoadIC_SloppyArguments,12,13,0
-block_hint,KeyedLoadIC_SloppyArguments,14,15,1
-block_hint,KeyedLoadIC_SloppyArguments,4,5,1
-block_hint,KeyedLoadIC_SloppyArguments,22,23,0
-block_hint,KeyedLoadIC_SloppyArguments,6,7,1
-block_hint,KeyedLoadIC_SloppyArguments,16,17,0
-block_hint,KeyedLoadIC_SloppyArguments,18,19,0
-block_hint,KeyedLoadIC_SloppyArguments,8,9,1
-block_hint,KeyedLoadIC_SloppyArguments,10,11,0
-block_hint,StoreFastElementIC_Standard,340,341,0
-block_hint,StoreFastElementIC_Standard,826,827,0
-block_hint,StoreFastElementIC_Standard,346,347,0
-block_hint,StoreFastElementIC_Standard,966,967,1
-block_hint,StoreFastElementIC_Standard,348,349,1
-block_hint,StoreFastElementIC_Standard,40,41,1
-block_hint,StoreFastElementIC_Standard,350,351,0
-block_hint,StoreFastElementIC_Standard,828,829,0
-block_hint,StoreFastElementIC_Standard,356,357,0
-block_hint,StoreFastElementIC_Standard,968,969,1
-block_hint,StoreFastElementIC_Standard,358,359,1
-block_hint,StoreFastElementIC_Standard,42,43,1
-block_hint,StoreFastElementIC_Standard,360,361,0
-block_hint,StoreFastElementIC_Standard,830,831,0
-block_hint,StoreFastElementIC_Standard,970,971,1
-block_hint,StoreFastElementIC_Standard,366,367,1
-block_hint,StoreFastElementIC_Standard,44,45,1
-block_hint,StoreFastElementIC_Standard,392,393,0
-block_hint,StoreFastElementIC_Standard,838,839,0
-block_hint,StoreFastElementIC_Standard,978,979,1
-block_hint,StoreFastElementIC_Standard,398,399,1
-block_hint,StoreFastElementIC_Standard,52,53,1
-block_hint,StoreFastElementIC_Standard,844,845,0
-block_hint,StoreFastElementIC_Standard,420,421,1
-block_hint,StoreFastElementIC_Standard,985,986,1
-block_hint,StoreFastElementIC_Standard,422,423,1
-block_hint,StoreFastElementIC_Standard,58,59,1
-block_hint,StoreFastElementIC_Standard,848,849,0
-block_hint,StoreFastElementIC_Standard,428,429,1
-block_hint,StoreFastElementIC_Standard,988,989,1
-block_hint,StoreFastElementIC_Standard,430,431,1
-block_hint,StoreFastElementIC_Standard,60,61,1
-block_hint,StoreFastElementIC_Standard,600,601,0
-block_hint,StoreFastElementIC_Standard,1072,1073,0
-block_hint,StoreFastElementIC_Standard,669,670,0
-block_hint,StoreFastElementIC_Standard,300,301,1
-block_hint,StoreFastElementIC_Standard,596,597,0
-block_hint,StoreFastElementIC_Standard,1074,1075,0
-block_hint,StoreFastElementIC_Standard,671,672,0
-block_hint,StoreFastElementIC_Standard,302,303,1
-block_hint,StoreFastElementIC_Standard,592,593,0
-block_hint,StoreFastElementIC_Standard,1076,1077,0
-block_hint,StoreFastElementIC_Standard,673,674,0
-block_hint,StoreFastElementIC_Standard,304,305,1
-block_hint,StoreFastElementIC_Standard,588,589,0
-block_hint,StoreFastElementIC_Standard,1078,1079,0
-block_hint,StoreFastElementIC_Standard,675,676,0
-block_hint,StoreFastElementIC_Standard,306,307,1
-block_hint,StoreFastElementIC_Standard,584,585,0
-block_hint,StoreFastElementIC_Standard,931,932,1
-block_hint,StoreFastElementIC_Standard,770,771,0
-block_hint,StoreFastElementIC_Standard,308,309,1
-block_hint,StoreFastElementIC_Standard,580,581,0
-block_hint,StoreFastElementIC_Standard,1097,1098,0
-block_hint,StoreFastElementIC_Standard,929,930,1
-block_hint,StoreFastElementIC_Standard,772,773,0
-block_hint,StoreFastElementIC_Standard,310,311,1
-block_hint,StoreFastElementIC_Standard,576,577,0
-block_hint,StoreFastElementIC_Standard,927,928,1
-block_hint,StoreFastElementIC_Standard,774,775,0
-block_hint,StoreFastElementIC_Standard,312,313,1
-block_hint,StoreFastElementIC_Standard,572,573,0
-block_hint,StoreFastElementIC_Standard,776,777,0
-block_hint,StoreFastElementIC_Standard,314,315,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,469,470,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,263,264,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,647,648,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,472,473,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,563,564,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,435,436,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,214,215,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,541,542,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,198,199,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,204,205,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,629,630,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,437,438,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,34,35,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,474,475,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,277,278,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,653,654,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,283,284,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,567,568,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,439,440,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,219,220,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,537,538,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,182,183,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,184,185,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,631,632,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,441,442,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,36,37,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,479,480,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,659,660,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,295,296,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,571,572,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,443,444,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,224,225,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,533,534,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,166,167,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,633,634,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,445,446,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,38,39,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,492,493,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,665,666,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,331,332,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,581,582,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,453,454,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,238,239,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,523,524,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,126,127,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,637,638,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,455,456,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,46,47,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,501,502,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,359,360,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,669,670,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,365,366,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,590,591,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,461,462,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,249,250,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,251,252,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,603,604,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,102,103,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,517,518,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,463,464,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,56,57,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,508,509,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,371,372,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,673,674,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,377,378,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,96,97,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,100,101,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,467,468,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,58,59,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,335,336,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,185,186,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,393,394,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,187,188,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,22,23,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,339,340,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,397,398,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,205,206,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,207,208,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,160,161,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,375,376,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,130,131,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,134,135,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,26,27,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,355,356,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,265,266,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,412,413,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,267,268,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,44,45,1
-block_hint,ElementsTransitionAndStore_Standard,433,434,1
-block_hint,ElementsTransitionAndStore_Standard,435,436,0
-block_hint,ElementsTransitionAndStore_Standard,543,544,1
-block_hint,ElementsTransitionAndStore_Standard,431,432,0
-block_hint,ElementsTransitionAndStore_Standard,187,188,0
-block_hint,ElementsTransitionAndStore_Standard,189,190,0
-block_hint,ElementsTransitionAndStore_Standard,334,335,0
-block_hint,ElementsTransitionAndStore_Standard,437,438,1
-block_hint,ElementsTransitionAndStore_Standard,195,196,1
-block_hint,ElementsTransitionAndStore_Standard,28,29,1
-block_hint,ElementsTransitionAndStore_Standard,481,482,1
-block_hint,ElementsTransitionAndStore_Standard,483,484,0
-block_hint,ElementsTransitionAndStore_Standard,477,478,1
-block_hint,ElementsTransitionAndStore_Standard,479,480,0
-block_hint,ElementsTransitionAndStore_Standard,245,246,0
-block_hint,ElementsTransitionAndStore_Standard,349,350,0
-block_hint,ElementsTransitionAndStore_Standard,485,486,1
-block_hint,ElementsTransitionAndStore_Standard,251,252,1
-block_hint,ElementsTransitionAndStore_Standard,38,39,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,739,740,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,1119,1120,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,742,743,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,896,897,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,684,685,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,324,325,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,830,831,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,237,238,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,241,242,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,1063,1064,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,686,687,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,62,63,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,95,96,0
-block_hint,KeyedHasIC_PolymorphicName,69,70,1
-block_hint,KeyedHasIC_PolymorphicName,28,29,1
-block_hint,KeyedHasIC_PolymorphicName,24,25,0
-block_hint,KeyedHasIC_PolymorphicName,26,27,0
-block_hint,KeyedHasIC_PolymorphicName,55,56,1
-block_hint,KeyedHasIC_PolymorphicName,89,90,0
-block_hint,KeyedHasIC_PolymorphicName,93,94,1
-block_hint,KeyedHasIC_PolymorphicName,30,31,0
-block_hint,KeyedHasIC_PolymorphicName,78,79,1
-block_hint,KeyedHasIC_PolymorphicName,14,15,1
-block_hint,KeyedHasIC_PolymorphicName,16,17,1
-block_hint,EnqueueMicrotask,4,5,1
-block_hint,EnqueueMicrotask,2,3,0
-block_hint,RunMicrotasks,18,19,1
-block_hint,RunMicrotasks,31,32,1
-block_hint,RunMicrotasks,65,66,0
-block_hint,RunMicrotasks,36,37,1
-block_hint,RunMicrotasks,85,86,0
-block_hint,RunMicrotasks,67,68,0
-block_hint,RunMicrotasks,38,39,1
-block_hint,HasProperty,137,138,1
-block_hint,HasProperty,139,140,1
-block_hint,HasProperty,261,262,0
-block_hint,HasProperty,211,212,1
-block_hint,HasProperty,254,255,0
-block_hint,HasProperty,97,98,0
-block_hint,HasProperty,234,235,1
-block_hint,HasProperty,123,124,1
-block_hint,HasProperty,141,142,1
-block_hint,HasProperty,199,200,0
-block_hint,HasProperty,201,202,0
-block_hint,HasProperty,101,102,0
-block_hint,HasProperty,99,100,0
-block_hint,HasProperty,250,251,0
-block_hint,HasProperty,268,269,0
-block_hint,HasProperty,257,258,1
-block_hint,HasProperty,106,107,0
-block_hint,HasProperty,275,276,0
-block_hint,HasProperty,280,281,0
-block_hint,HasProperty,266,267,0
-block_hint,HasProperty,203,204,1
-block_hint,HasProperty,42,43,1
-block_hint,HasProperty,65,66,0
-block_hint,HasProperty,44,45,1
-block_hint,HasProperty,239,240,1
-block_hint,HasProperty,48,49,0
-block_hint,HasProperty,270,271,0
-block_hint,HasProperty,228,229,0
-block_hint,HasProperty,38,39,0
-block_hint,DeleteProperty,38,39,1
-block_hint,DeleteProperty,62,63,0
-block_hint,DeleteProperty,40,41,0
-block_hint,DeleteProperty,66,67,1
-block_hint,DeleteProperty,80,81,0
-block_hint,DeleteProperty,73,74,0
-block_hint,DeleteProperty,64,65,1
-block_hint,DeleteProperty,56,57,1
-block_hint,DeleteProperty,42,43,1
-block_hint,DeleteProperty,83,84,0
-block_hint,DeleteProperty,85,86,0
-block_hint,DeleteProperty,77,78,0
-block_hint,DeleteProperty,75,76,0
-block_hint,DeleteProperty,47,48,0
-block_hint,DeleteProperty,49,50,0
-block_hint,DeleteProperty,87,88,0
-block_hint,DeleteProperty,71,72,1
-block_hint,DeleteProperty,20,21,0
-block_hint,DeleteProperty,54,55,0
-block_hint,DeleteProperty,7,8,1
-block_hint,DeleteProperty,9,10,1
-block_hint,DeleteProperty,11,12,1
-block_hint,DeleteProperty,13,14,1
-block_hint,DeleteProperty,15,16,1
-block_hint,SetDataProperties,136,137,1
-block_hint,SetDataProperties,263,264,1
-block_hint,SetDataProperties,261,262,1
-block_hint,SetDataProperties,144,145,0
-block_hint,SetDataProperties,316,317,0
-block_hint,SetDataProperties,146,147,0
-block_hint,SetDataProperties,59,60,0
-block_hint,SetDataProperties,341,342,0
-block_hint,SetDataProperties,267,268,0
-block_hint,SetDataProperties,385,386,1
-block_hint,SetDataProperties,277,278,0
-block_hint,SetDataProperties,752,753,0
-block_hint,SetDataProperties,762,763,1
-block_hint,SetDataProperties,750,751,0
-block_hint,SetDataProperties,748,749,0
-block_hint,SetDataProperties,659,660,0
-block_hint,SetDataProperties,451,452,1
-block_hint,SetDataProperties,221,222,1
-block_hint,SetDataProperties,87,88,1
-block_hint,SetDataProperties,223,224,0
-block_hint,SetDataProperties,513,514,0
-block_hint,SetDataProperties,515,516,0
-block_hint,SetDataProperties,519,520,1
-block_hint,SetDataProperties,449,450,0
-block_hint,SetDataProperties,329,330,1
-block_hint,SetDataProperties,326,327,0
-block_hint,SetDataProperties,158,159,0
-block_hint,SetDataProperties,399,400,0
-block_hint,SetDataProperties,447,448,0
-block_hint,SetDataProperties,352,353,0
-block_hint,SetDataProperties,226,227,1
-block_hint,SetDataProperties,93,94,1
-block_hint,SetDataProperties,521,522,0
-block_hint,SetDataProperties,95,96,0
-block_hint,SetDataProperties,97,98,0
-block_hint,SetDataProperties,617,618,0
-block_hint,SetDataProperties,523,524,1
-block_hint,SetDataProperties,525,526,0
-block_hint,SetDataProperties,527,528,1
-block_hint,SetDataProperties,529,530,0
-block_hint,SetDataProperties,673,674,0
-block_hint,SetDataProperties,531,532,1
-block_hint,SetDataProperties,577,578,0
-block_hint,SetDataProperties,675,676,0
-block_hint,SetDataProperties,620,621,1
-block_hint,SetDataProperties,539,540,1
-block_hint,SetDataProperties,541,542,0
-block_hint,SetDataProperties,543,544,0
-block_hint,SetDataProperties,545,546,1
-block_hint,SetDataProperties,535,536,0
-block_hint,SetDataProperties,657,658,0
-block_hint,SetDataProperties,555,556,1
-block_hint,SetDataProperties,292,293,1
-block_hint,SetDataProperties,99,100,0
-block_hint,SetDataProperties,437,438,0
-block_hint,SetDataProperties,241,242,0
-block_hint,SetDataProperties,129,130,0
-block_hint,SetDataProperties,279,280,1
-block_hint,SetDataProperties,204,205,0
-block_hint,SetDataProperties,61,62,0
-block_hint,ReturnReceiver,3,4,1
-block_hint,ArrayConstructorImpl,9,10,0
-block_hint,ArrayConstructorImpl,13,14,1
-block_hint,ArrayConstructorImpl,40,41,1
-block_hint,ArrayConstructorImpl,15,16,1
-block_hint,ArrayConstructorImpl,19,20,0
-block_hint,ArrayConstructorImpl,23,24,0
-block_hint,ArrayConstructorImpl,25,26,1
-block_hint,ArrayConstructorImpl,27,28,1
-block_hint,ArrayConstructorImpl,29,30,1
-block_hint,ArrayNoArgumentConstructor_PackedSmi_DontOverride,3,4,1
-block_hint,ArrayNoArgumentConstructor_PackedSmi_DontOverride,5,6,1
-block_hint,ArrayNoArgumentConstructor_HoleySmi_DontOverride,3,4,1
-block_hint,ArrayNoArgumentConstructor_HoleySmi_DontOverride,5,6,1
-block_hint,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,5,6,1
-block_hint,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,5,6,1
-block_hint,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,8,9,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,21,22,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,8,9,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,25,26,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,12,13,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,8,9,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,10,11,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,25,26,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,12,13,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,8,9,0
-block_hint,ArrayIncludesSmi,120,121,0
-block_hint,ArrayIncludesSmi,43,44,1
-block_hint,ArrayIncludesSmi,108,109,1
-block_hint,ArrayIncludesSmi,75,76,1
-block_hint,ArrayIncludesSmi,122,123,0
-block_hint,ArrayIncludesSmiOrObject,113,114,1
-block_hint,ArrayIncludesSmiOrObject,38,39,0
-block_hint,ArrayIncludesSmiOrObject,107,108,0
-block_hint,ArrayIncludesSmiOrObject,28,29,1
-block_hint,ArrayIncludesSmiOrObject,84,85,1
-block_hint,ArrayIncludesSmiOrObject,86,87,1
-block_hint,ArrayIncludesSmiOrObject,117,118,0
-block_hint,ArrayIncludesSmiOrObject,131,132,1
-block_hint,ArrayIncludesSmiOrObject,125,126,0
-block_hint,ArrayIncludesSmiOrObject,98,99,0
-block_hint,ArrayIncludes,52,53,1
-block_hint,ArrayIncludes,49,50,1
-block_hint,ArrayIncludes,42,43,1
-block_hint,ArrayIncludes,44,45,1
-block_hint,ArrayIncludes,25,26,1
-block_hint,ArrayIncludes,17,18,1
-block_hint,ArrayIncludes,3,4,1
-block_hint,ArrayIncludes,47,48,1
-block_hint,ArrayIncludes,38,39,0
-block_hint,ArrayIncludes,27,28,1
-block_hint,ArrayIncludes,13,14,0
-block_hint,ArrayIncludes,19,20,1
-block_hint,ArrayIndexOfSmiOrObject,96,97,1
-block_hint,ArrayIndexOfSmiOrObject,88,89,0
-block_hint,ArrayIndexOfSmiOrObject,23,24,0
-block_hint,ArrayIndexOfSmiOrObject,37,38,0
-block_hint,ArrayIndexOfSmiOrObject,69,70,1
-block_hint,ArrayIndexOfSmiOrObject,43,44,0
-block_hint,ArrayIndexOfSmiOrObject,71,72,1
-block_hint,ArrayIndexOfSmiOrObject,110,111,1
-block_hint,ArrayIndexOfSmiOrObject,100,101,0
-block_hint,ArrayIndexOfSmiOrObject,77,78,0
-block_hint,ArrayIndexOfSmiOrObject,102,103,0
-block_hint,ArrayIndexOfSmiOrObject,79,80,0
-block_hint,ArrayIndexOfSmiOrObject,49,50,0
-block_hint,ArrayIndexOfSmiOrObject,29,30,0
-block_hint,ArrayIndexOfSmiOrObject,106,107,0
-block_hint,ArrayIndexOfSmiOrObject,83,84,0
-block_hint,ArrayIndexOfSmiOrObject,35,36,1
-block_hint,ArrayIndexOfSmiOrObject,94,95,1
-block_hint,ArrayIndexOfSmiOrObject,86,87,0
-block_hint,ArrayIndexOf,52,53,1
-block_hint,ArrayIndexOf,49,50,1
-block_hint,ArrayIndexOf,42,43,1
-block_hint,ArrayIndexOf,44,45,1
-block_hint,ArrayIndexOf,25,26,1
-block_hint,ArrayIndexOf,17,18,1
-block_hint,ArrayIndexOf,3,4,1
-block_hint,ArrayIndexOf,47,48,1
-block_hint,ArrayIndexOf,38,39,0
-block_hint,ArrayIndexOf,27,28,1
-block_hint,ArrayIndexOf,13,14,0
-block_hint,ArrayIndexOf,7,8,0
-block_hint,ArrayIndexOf,19,20,1
-block_hint,ArrayIndexOf,22,23,1
-block_hint,ArrayPrototypePop,49,50,1
-block_hint,ArrayPrototypePop,42,43,1
-block_hint,ArrayPrototypePop,47,48,1
-block_hint,ArrayPrototypePop,36,37,1
-block_hint,ArrayPrototypePop,26,27,1
-block_hint,ArrayPrototypePop,5,6,1
-block_hint,ArrayPrototypePop,45,46,1
-block_hint,ArrayPrototypePop,39,40,0
-block_hint,ArrayPrototypePop,20,21,1
-block_hint,ArrayPrototypePop,28,29,0
-block_hint,ArrayPrototypePop,7,8,1
-block_hint,ArrayPrototypePop,33,34,0
-block_hint,ArrayPrototypePop,16,17,1
-block_hint,ArrayPrototypePop,22,23,0
-block_hint,ArrayPrototypePop,30,31,1
-block_hint,ArrayPrototypePop,18,19,0
-block_hint,ArrayPrototypePop,14,15,1
-block_hint,ArrayPrototypePop,9,10,1
-block_hint,ArrayPrototypePop,11,12,0
-block_hint,ArrayPrototypePush,171,172,1
-block_hint,ArrayPrototypePush,151,152,1
-block_hint,ArrayPrototypePush,167,168,1
-block_hint,ArrayPrototypePush,136,137,1
-block_hint,ArrayPrototypePush,92,93,1
-block_hint,ArrayPrototypePush,17,18,1
-block_hint,ArrayPrototypePush,161,162,1
-block_hint,ArrayPrototypePush,140,141,0
-block_hint,ArrayPrototypePush,77,78,1
-block_hint,ArrayPrototypePush,79,80,1
-block_hint,ArrayPrototypePush,94,95,0
-block_hint,ArrayPrototypePush,19,20,1
-block_hint,ArrayPrototypePush,99,100,0
-block_hint,ArrayPrototypePush,116,117,0
-block_hint,ArrayPrototypePush,105,106,0
-block_hint,ArrayPrototypePush,68,69,0
-block_hint,ArrayPrototypePush,114,115,1
-block_hint,ArrayPrototypePush,28,29,0
-block_hint,ArrayPrototypePush,30,31,0
-block_hint,ArrayPrototypePush,34,35,0
-block_hint,ArrayPrototypePush,36,37,0
-block_hint,ArrayPrototypePush,96,97,0
-block_hint,ArrayPrototypePush,21,22,1
-block_hint,ArrayPrototypePush,38,39,1
-block_hint,ArrayPrototypePush,126,127,1
-block_hint,ArrayPrototypePush,128,129,0
-block_hint,ArrayPrototypePush,163,164,0
-block_hint,ArrayPrototypePush,165,166,0
-block_hint,ArrayPrototypePush,109,110,0
-block_hint,ArrayPrototypePush,72,73,0
-block_hint,ArrayPrototypePush,74,75,1
-block_hint,ArrayPrototypePush,142,143,0
-block_hint,ArrayPrototypePush,40,41,0
-block_hint,ArrayPrototypePush,118,119,0
-block_hint,ArrayPrototypePush,48,49,0
-block_hint,ArrayPrototypePush,158,159,1
-block_hint,ArrayPrototypePush,25,26,1
-block_hint,ArrayPrototypePush,50,51,1
-block_hint,ArrayPrototypePush,107,108,0
-block_hint,ArrayPrototypePush,70,71,0
-block_hint,ArrayPrototypePush,122,123,1
-block_hint,ArrayPrototypePush,52,53,0
-block_hint,ArrayPrototypePush,58,59,0
-block_hint,ArrayPrototypePush,60,61,0
-block_hint,ArrayPrototypePush,23,24,1
-block_hint,CloneFastJSArray,10,11,0
-block_hint,CloneFastJSArray,38,39,1
-block_hint,CloneFastJSArray,34,35,1
-block_hint,CloneFastJSArray,19,20,0
-block_hint,CloneFastJSArray,8,9,0
-block_hint,CloneFastJSArray,12,13,0
-block_hint,CloneFastJSArray,14,15,1
-block_hint,CloneFastJSArray,41,42,1
-block_hint,CloneFastJSArray,48,49,0
-block_hint,CloneFastJSArray,43,44,0
-block_hint,CloneFastJSArray,45,46,1
-block_hint,CloneFastJSArray,25,26,1
-block_hint,CloneFastJSArray,4,5,1
-block_hint,CloneFastJSArray,17,18,1
-block_hint,CloneFastJSArrayFillingHoles,76,77,0
-block_hint,CloneFastJSArrayFillingHoles,78,79,0
-block_hint,CloneFastJSArrayFillingHoles,91,92,0
-block_hint,CloneFastJSArrayFillingHoles,46,47,0
-block_hint,CloneFastJSArrayFillingHoles,95,96,1
-block_hint,CloneFastJSArrayFillingHoles,83,84,1
-block_hint,CloneFastJSArrayFillingHoles,16,17,0
-block_hint,CloneFastJSArrayFillingHoles,20,21,0
-block_hint,CloneFastJSArrayFillingHoles,85,86,0
-block_hint,CloneFastJSArrayFillingHoles,117,118,0
-block_hint,CloneFastJSArrayFillingHoles,80,81,1
-block_hint,CloneFastJSArrayFillingHoles,10,11,1
-block_hint,CloneFastJSArrayFillingHoles,55,56,1
-block_hint,ExtractFastJSArray,4,5,1
-block_hint,ExtractFastJSArray,27,28,0
-block_hint,ExtractFastJSArray,10,11,0
-block_hint,ExtractFastJSArray,42,43,1
-block_hint,ExtractFastJSArray,34,35,1
-block_hint,ExtractFastJSArray,20,21,0
-block_hint,ExtractFastJSArray,6,7,0
-block_hint,ExtractFastJSArray,12,13,0
-block_hint,ExtractFastJSArray,14,15,1
-block_hint,ExtractFastJSArray,36,37,1
-block_hint,ExtractFastJSArray,38,39,0
-block_hint,ExtractFastJSArray,16,17,1
-block_hint,ArrayPrototypeValues,14,15,1
-block_hint,ArrayPrototypeValues,11,12,1
-block_hint,ArrayPrototypeValues,8,9,1
-block_hint,ArrayPrototypeValues,3,4,1
-block_hint,ArrayPrototypeValues,6,7,1
-block_hint,ArrayIteratorPrototypeNext,140,141,1
-block_hint,ArrayIteratorPrototypeNext,88,89,1
-block_hint,ArrayIteratorPrototypeNext,90,91,1
-block_hint,ArrayIteratorPrototypeNext,196,197,0
-block_hint,ArrayIteratorPrototypeNext,255,256,0
-block_hint,ArrayIteratorPrototypeNext,224,225,0
-block_hint,ArrayIteratorPrototypeNext,206,207,0
-block_hint,ArrayIteratorPrototypeNext,164,165,0
-block_hint,ArrayIteratorPrototypeNext,118,119,1
-block_hint,ArrayIteratorPrototypeNext,268,269,0
-block_hint,ArrayIteratorPrototypeNext,248,249,0
-block_hint,ArrayIteratorPrototypeNext,159,160,0
-block_hint,ArrayIteratorPrototypeNext,109,110,1
-block_hint,ArrayIteratorPrototypeNext,6,7,1
-block_hint,ArrayIteratorPrototypeNext,8,9,1
-block_hint,ArrayIteratorPrototypeNext,142,143,0
-block_hint,ArrayIteratorPrototypeNext,124,125,1
-block_hint,ArrayIteratorPrototypeNext,60,61,1
-block_hint,ArrayIteratorPrototypeNext,80,81,1
-block_hint,AsyncFunctionEnter,41,42,1
-block_hint,AsyncFunctionEnter,28,29,0
-block_hint,AsyncFunctionEnter,13,14,0
-block_hint,AsyncFunctionEnter,33,34,1
-block_hint,AsyncFunctionEnter,26,27,1
-block_hint,AsyncFunctionEnter,9,10,0
-block_hint,AsyncFunctionEnter,3,4,1
-block_hint,AsyncFunctionEnter,36,37,1
-block_hint,AsyncFunctionEnter,22,23,0
-block_hint,AsyncFunctionEnter,5,6,1
-block_hint,AsyncFunctionEnter,24,25,1
-block_hint,AsyncFunctionEnter,7,8,0
-block_hint,AsyncFunctionResolve,2,3,0
-block_hint,AsyncFunctionAwaitCaught,24,25,1
-block_hint,AsyncFunctionAwaitCaught,19,20,1
-block_hint,AsyncFunctionAwaitCaught,2,3,1
-block_hint,AsyncFunctionAwaitCaught,30,31,1
-block_hint,AsyncFunctionAwaitCaught,32,33,0
-block_hint,AsyncFunctionAwaitCaught,28,29,1
-block_hint,AsyncFunctionAwaitCaught,8,9,1
-block_hint,AsyncFunctionAwaitCaught,10,11,1
-block_hint,AsyncFunctionAwaitCaught,12,13,1
-block_hint,AsyncFunctionAwaitCaught,14,15,1
-block_hint,AsyncFunctionAwaitCaught,22,23,0
-block_hint,AsyncFunctionAwaitUncaught,24,25,1
-block_hint,AsyncFunctionAwaitUncaught,19,20,1
-block_hint,AsyncFunctionAwaitUncaught,2,3,1
-block_hint,AsyncFunctionAwaitUncaught,30,31,1
-block_hint,AsyncFunctionAwaitUncaught,32,33,0
-block_hint,AsyncFunctionAwaitUncaught,28,29,1
-block_hint,AsyncFunctionAwaitUncaught,8,9,1
-block_hint,AsyncFunctionAwaitUncaught,10,11,1
-block_hint,AsyncFunctionAwaitUncaught,12,13,1
-block_hint,AsyncFunctionAwaitUncaught,14,15,1
-block_hint,AsyncFunctionAwaitUncaught,22,23,0
-block_hint,AsyncFunctionAwaitResolveClosure,8,9,1
-block_hint,AsyncFunctionAwaitResolveClosure,2,3,1
-block_hint,AsyncFunctionAwaitResolveClosure,6,7,0
-block_hint,DatePrototypeGetDate,10,11,1
-block_hint,DatePrototypeGetDate,7,8,1
-block_hint,DatePrototypeGetDate,5,6,1
-block_hint,DatePrototypeGetDate,2,3,1
-block_hint,DatePrototypeGetDay,10,11,1
-block_hint,DatePrototypeGetDay,7,8,1
-block_hint,DatePrototypeGetDay,5,6,1
-block_hint,DatePrototypeGetDay,2,3,1
-block_hint,DatePrototypeGetFullYear,10,11,1
-block_hint,DatePrototypeGetFullYear,7,8,1
-block_hint,DatePrototypeGetFullYear,5,6,1
-block_hint,DatePrototypeGetHours,10,11,1
-block_hint,DatePrototypeGetHours,7,8,1
-block_hint,DatePrototypeGetHours,5,6,1
-block_hint,DatePrototypeGetHours,2,3,1
-block_hint,DatePrototypeGetMinutes,10,11,1
-block_hint,DatePrototypeGetMinutes,7,8,1
-block_hint,DatePrototypeGetMinutes,5,6,1
-block_hint,DatePrototypeGetMinutes,2,3,1
-block_hint,DatePrototypeGetMonth,10,11,1
-block_hint,DatePrototypeGetMonth,7,8,1
-block_hint,DatePrototypeGetMonth,5,6,1
-block_hint,DatePrototypeGetMonth,2,3,1
-block_hint,DatePrototypeGetSeconds,10,11,1
-block_hint,DatePrototypeGetSeconds,7,8,1
-block_hint,DatePrototypeGetSeconds,5,6,1
-block_hint,DatePrototypeGetSeconds,2,3,1
-block_hint,DatePrototypeGetTime,8,9,1
-block_hint,DatePrototypeGetTime,5,6,1
-block_hint,DatePrototypeGetTime,2,3,1
-block_hint,CreateIterResultObject,4,5,1
-block_hint,CreateIterResultObject,11,12,1
-block_hint,CreateIterResultObject,6,7,0
-block_hint,CreateGeneratorObject,32,33,1
-block_hint,CreateGeneratorObject,34,35,1
-block_hint,CreateGeneratorObject,57,58,1
-block_hint,CreateGeneratorObject,54,55,0
-block_hint,CreateGeneratorObject,43,44,1
-block_hint,CreateGeneratorObject,24,25,0
-block_hint,CreateGeneratorObject,47,48,1
-block_hint,CreateGeneratorObject,40,41,1
-block_hint,CreateGeneratorObject,8,9,0
-block_hint,CreateGeneratorObject,51,52,1
-block_hint,CreateGeneratorObject,37,38,0
-block_hint,CreateGeneratorObject,12,13,0
-block_hint,GeneratorPrototypeNext,19,20,1
-block_hint,GeneratorPrototypeNext,11,12,1
-block_hint,GeneratorPrototypeNext,13,14,1
-block_hint,GeneratorPrototypeNext,5,6,0
-block_hint,GeneratorPrototypeNext,7,8,0
-block_hint,SuspendGeneratorBaseline,19,20,1
-block_hint,SuspendGeneratorBaseline,5,6,1
-block_hint,SuspendGeneratorBaseline,11,12,1
-block_hint,SuspendGeneratorBaseline,7,8,1
-block_hint,SuspendGeneratorBaseline,13,14,0
-block_hint,ResumeGeneratorBaseline,11,12,1
-block_hint,ResumeGeneratorBaseline,4,5,1
-block_hint,ResumeGeneratorBaseline,6,7,0
-block_hint,GlobalIsFinite,9,10,1
-block_hint,GlobalIsNaN,9,10,1
-block_hint,GlobalIsNaN,11,12,1
-block_hint,LoadIC,373,374,1
-block_hint,LoadIC,139,140,0
-block_hint,LoadIC,61,62,0
-block_hint,LoadIC,233,234,0
-block_hint,LoadIC,350,351,1
-block_hint,LoadIC,235,236,0
-block_hint,LoadIC,399,400,1
-block_hint,LoadIC,396,397,0
-block_hint,LoadIC,388,389,1
-block_hint,LoadIC,295,296,1
-block_hint,LoadIC,100,101,1
-block_hint,LoadIC,281,282,0
-block_hint,LoadIC,324,325,0
-block_hint,LoadIC,141,142,1
-block_hint,LoadIC,361,362,1
-block_hint,LoadIC,102,103,0
-block_hint,LoadIC,104,105,0
-block_hint,LoadIC,21,22,1
-block_hint,LoadIC,64,65,0
-block_hint,LoadIC,143,144,0
-block_hint,LoadIC,313,314,1
-block_hint,LoadIC,23,24,1
-block_hint,LoadIC,173,174,0
-block_hint,LoadIC,367,368,0
-block_hint,LoadIC,369,370,0
-block_hint,LoadIC,322,323,0
-block_hint,LoadIC,129,130,0
-block_hint,LoadIC,51,52,1
-block_hint,LoadIC,209,210,0
-block_hint,LoadIC,84,85,0
-block_hint,LoadIC,46,47,0
-block_hint,LoadIC,363,364,1
-block_hint,LoadIC,114,115,0
-block_hint,LoadIC,183,184,0
-block_hint,LoadIC,44,45,1
-block_hint,LoadIC,76,77,0
-block_hint,LoadIC,271,272,0
-block_hint,LoadIC,315,316,1
-block_hint,LoadIC,27,28,0
-block_hint,LoadIC,179,180,1
-block_hint,LoadIC,181,182,1
-block_hint,LoadIC,175,176,1
-block_hint,LoadIC,177,178,1
-block_hint,LoadIC,133,134,1
-block_hint,LoadIC,135,136,0
-block_hint,LoadIC_Megamorphic,367,368,1
-block_hint,LoadIC_Megamorphic,364,365,0
-block_hint,LoadIC_Megamorphic,356,357,1
-block_hint,LoadIC_Megamorphic,260,261,1
-block_hint,LoadIC_Megamorphic,262,263,1
-block_hint,LoadIC_Megamorphic,258,259,0
-block_hint,LoadIC_Megamorphic,58,59,0
-block_hint,LoadIC_Megamorphic,299,300,0
-block_hint,LoadIC_Megamorphic,130,131,1
-block_hint,LoadIC_Megamorphic,285,286,0
-block_hint,LoadIC_Megamorphic,331,332,1
-block_hint,LoadIC_Megamorphic,95,96,0
-block_hint,LoadIC_Megamorphic,132,133,0
-block_hint,LoadIC_Megamorphic,287,288,1
-block_hint,LoadIC_Megamorphic,22,23,1
-block_hint,LoadIC_Megamorphic,162,163,0
-block_hint,LoadIC_Megamorphic,292,293,0
-block_hint,LoadIC_Megamorphic,252,253,1
-block_hint,LoadIC_Megamorphic,337,338,0
-block_hint,LoadIC_Megamorphic,339,340,0
-block_hint,LoadIC_Megamorphic,296,297,0
-block_hint,LoadIC_Megamorphic,122,123,0
-block_hint,LoadIC_Megamorphic,50,51,1
-block_hint,LoadIC_Megamorphic,45,46,0
-block_hint,LoadIC_Megamorphic,248,249,0
-block_hint,LoadIC_Megamorphic,289,290,1
-block_hint,LoadIC_Megamorphic,26,27,0
-block_hint,LoadIC_Megamorphic,24,25,0
-block_hint,LoadIC_Megamorphic,164,165,1
-block_hint,LoadIC_Megamorphic,166,167,1
-block_hint,LoadIC_Megamorphic,126,127,1
-block_hint,LoadIC_Noninlined,376,377,1
-block_hint,LoadIC_Noninlined,132,133,0
-block_hint,LoadIC_Noninlined,384,385,1
-block_hint,LoadIC_Noninlined,381,382,0
-block_hint,LoadIC_Noninlined,371,372,1
-block_hint,LoadIC_Noninlined,270,271,0
-block_hint,LoadIC_Noninlined,58,59,0
-block_hint,LoadIC_Noninlined,313,314,0
-block_hint,LoadIC_Noninlined,142,143,1
-block_hint,LoadIC_Noninlined,297,298,0
-block_hint,LoadIC_Noninlined,22,23,1
-block_hint,LoadIC_Noninlined,174,175,0
-block_hint,LoadIC_Noninlined,39,40,1
-block_hint,LoadIC_Noninlined,260,261,0
-block_hint,LoadIC_Noninlined,301,302,1
-block_hint,LoadIC_Noninlined,26,27,0
-block_hint,LoadIC_Noninlined,24,25,0
-block_hint,LoadICTrampoline,3,4,1
-block_hint,LoadICTrampoline_Megamorphic,3,4,1
-block_hint,LoadSuperIC,533,534,0
-block_hint,LoadSuperIC,253,254,0
-block_hint,LoadSuperIC,569,570,1
-block_hint,LoadSuperIC,443,444,0
-block_hint,LoadSuperIC,77,78,0
-block_hint,LoadSuperIC,545,546,0
-block_hint,LoadSuperIC,255,256,1
-block_hint,LoadSuperIC,520,521,0
-block_hint,LoadSuperIC,43,44,1
-block_hint,LoadSuperIC,555,556,0
-block_hint,LoadSuperIC,287,288,0
-block_hint,LoadSuperIC,62,63,1
-block_hint,LoadSuperIC,432,433,0
-block_hint,LoadSuperIC,430,431,0
-block_hint,LoadSuperIC,524,525,1
-block_hint,LoadSuperIC,47,48,0
-block_hint,LoadSuperIC,681,682,0
-block_hint,KeyedLoadIC,636,637,1
-block_hint,KeyedLoadIC,258,259,0
-block_hint,KeyedLoadIC,250,251,0
-block_hint,KeyedLoadIC,384,385,0
-block_hint,KeyedLoadIC,501,502,1
-block_hint,KeyedLoadIC,650,651,0
-block_hint,KeyedLoadIC,630,631,0
-block_hint,KeyedLoadIC,585,586,1
-block_hint,KeyedLoadIC,390,391,1
-block_hint,KeyedLoadIC,388,389,1
-block_hint,KeyedLoadIC,669,670,0
-block_hint,KeyedLoadIC,671,672,0
-block_hint,KeyedLoadIC,634,635,0
-block_hint,KeyedLoadIC,587,588,1
-block_hint,KeyedLoadIC,152,153,1
-block_hint,KeyedLoadIC,628,629,0
-block_hint,KeyedLoadIC,482,483,0
-block_hint,KeyedLoadIC,106,107,1
-block_hint,KeyedLoadIC,685,686,0
-block_hint,KeyedLoadIC,687,688,0
-block_hint,KeyedLoadIC,642,643,1
-block_hint,KeyedLoadIC,294,295,1
-block_hint,KeyedLoadIC,296,297,0
-block_hint,KeyedLoadIC,681,682,1
-block_hint,KeyedLoadIC,528,529,1
-block_hint,KeyedLoadIC,626,627,0
-block_hint,KeyedLoadIC,615,616,0
-block_hint,KeyedLoadIC,565,566,1
-block_hint,KeyedLoadIC,316,317,1
-block_hint,KeyedLoadIC,68,69,0
-block_hint,KeyedLoadIC,306,307,0
-block_hint,KeyedLoadIC,532,533,1
-block_hint,KeyedLoadIC,308,309,1
-block_hint,KeyedLoadIC,222,223,0
-block_hint,KeyedLoadIC,178,179,0
-block_hint,KeyedLoadIC,567,568,0
-block_hint,KeyedLoadIC,457,458,1
-block_hint,KeyedLoadIC,118,119,0
-block_hint,KeyedLoadIC,120,121,0
-block_hint,KeyedLoadIC,405,406,1
-block_hint,KeyedLoadIC,618,619,1
-block_hint,KeyedLoadIC,246,247,1
-block_hint,KeyedLoadIC,569,570,0
-block_hint,KeyedLoadIC,519,520,0
-block_hint,KeyedLoadIC,443,444,1
-block_hint,KeyedLoadIC,690,691,0
-block_hint,KeyedLoadIC,122,123,1
-block_hint,KeyedLoadIC,322,323,0
-block_hint,KeyedLoadIC,324,325,1
-block_hint,KeyedLoadIC,70,71,0
-block_hint,KeyedLoadIC_Megamorphic,497,498,1
-block_hint,KeyedLoadIC_Megamorphic,499,500,0
-block_hint,KeyedLoadIC_Megamorphic,1245,1246,0
-block_hint,KeyedLoadIC_Megamorphic,1217,1218,1
-block_hint,KeyedLoadIC_Megamorphic,1169,1170,0
-block_hint,KeyedLoadIC_Megamorphic,1241,1242,1
-block_hint,KeyedLoadIC_Megamorphic,1247,1248,1
-block_hint,KeyedLoadIC_Megamorphic,1219,1220,1
-block_hint,KeyedLoadIC_Megamorphic,1243,1244,0
-block_hint,KeyedLoadIC_Megamorphic,1129,1130,0
-block_hint,KeyedLoadIC_Megamorphic,940,941,1
-block_hint,KeyedLoadIC_Megamorphic,938,939,1
-block_hint,KeyedLoadIC_Megamorphic,529,530,1
-block_hint,KeyedLoadIC_Megamorphic,1211,1212,0
-block_hint,KeyedLoadIC_Megamorphic,1213,1214,0
-block_hint,KeyedLoadIC_Megamorphic,1179,1180,0
-block_hint,KeyedLoadIC_Megamorphic,1177,1178,1
-block_hint,KeyedLoadIC_Megamorphic,1207,1208,0
-block_hint,KeyedLoadIC_Megamorphic,1173,1174,0
-block_hint,KeyedLoadIC_Megamorphic,942,943,1
-block_hint,KeyedLoadIC_Megamorphic,533,534,1
-block_hint,KeyedLoadIC_Megamorphic,952,953,0
-block_hint,KeyedLoadIC_Megamorphic,649,650,0
-block_hint,KeyedLoadIC_Megamorphic,1197,1198,0
-block_hint,KeyedLoadIC_Megamorphic,1103,1104,0
-block_hint,KeyedLoadIC_Megamorphic,1155,1156,0
-block_hint,KeyedLoadIC_Megamorphic,234,235,1
-block_hint,KeyedLoadIC_Megamorphic,1105,1106,0
-block_hint,KeyedLoadIC_Megamorphic,236,237,1
-block_hint,KeyedLoadIC_Megamorphic,1085,1086,0
-block_hint,KeyedLoadIC_Megamorphic,1231,1232,1
-block_hint,KeyedLoadIC_Megamorphic,1083,1084,0
-block_hint,KeyedLoadIC_Megamorphic,1081,1082,0
-block_hint,KeyedLoadIC_Megamorphic,991,992,1
-block_hint,KeyedLoadIC_Megamorphic,240,241,0
-block_hint,KeyedLoadIC_Megamorphic,126,127,1
-block_hint,KeyedLoadIC_Megamorphic,198,199,0
-block_hint,KeyedLoadIC_Megamorphic,653,654,0
-block_hint,KeyedLoadIC_Megamorphic,559,560,0
-block_hint,KeyedLoadIC_Megamorphic,1054,1055,0
-block_hint,KeyedLoadIC_Megamorphic,994,995,0
-block_hint,KeyedLoadIC_Megamorphic,657,658,1
-block_hint,KeyedLoadIC_Megamorphic,675,676,0
-block_hint,KeyedLoadIC_Megamorphic,1199,1200,0
-block_hint,KeyedLoadIC_Megamorphic,659,660,0
-block_hint,KeyedLoadIC_Megamorphic,1107,1108,0
-block_hint,KeyedLoadIC_Megamorphic,661,662,1
-block_hint,KeyedLoadIC_Megamorphic,251,252,1
-block_hint,KeyedLoadIC_Megamorphic,663,664,0
-block_hint,KeyedLoadIC_Megamorphic,253,254,1
-block_hint,KeyedLoadIC_Megamorphic,855,856,0
-block_hint,KeyedLoadIC_Megamorphic,998,999,1
-block_hint,KeyedLoadIC_Megamorphic,257,258,0
-block_hint,KeyedLoadIC_Megamorphic,665,666,0
-block_hint,KeyedLoadIC_Megamorphic,1091,1092,0
-block_hint,KeyedLoadIC_Megamorphic,1193,1194,0
-block_hint,KeyedLoadIC_Megamorphic,1233,1234,1
-block_hint,KeyedLoadIC_Megamorphic,1089,1090,0
-block_hint,KeyedLoadIC_Megamorphic,128,129,1
-block_hint,KeyedLoadIC_Megamorphic,206,207,0
-block_hint,KeyedLoadIC_Megamorphic,936,937,0
-block_hint,KeyedLoadIC_Megamorphic,684,685,0
-block_hint,KeyedLoadIC_Megamorphic,1201,1202,0
-block_hint,KeyedLoadIC_Megamorphic,1237,1238,0
-block_hint,KeyedLoadIC_Megamorphic,1159,1160,0
-block_hint,KeyedLoadIC_Megamorphic,857,858,1
-block_hint,KeyedLoadIC_Megamorphic,269,270,1
-block_hint,KeyedLoadIC_Megamorphic,1227,1228,0
-block_hint,KeyedLoadIC_Megamorphic,271,272,1
-block_hint,KeyedLoadIC_Megamorphic,1125,1126,0
-block_hint,KeyedLoadIC_Megamorphic,1239,1240,0
-block_hint,KeyedLoadIC_Megamorphic,1119,1120,1
-block_hint,KeyedLoadIC_Megamorphic,749,750,1
-block_hint,KeyedLoadIC_Megamorphic,1028,1029,1
-block_hint,KeyedLoadIC_Megamorphic,745,746,0
-block_hint,KeyedLoadIC_Megamorphic,117,118,0
-block_hint,KeyedLoadIC_Megamorphic,890,891,0
-block_hint,KeyedLoadIC_Megamorphic,339,340,1
-block_hint,KeyedLoadIC_Megamorphic,876,877,0
-block_hint,KeyedLoadIC_Megamorphic,81,82,1
-block_hint,KeyedLoadIC_Megamorphic,369,370,0
-block_hint,KeyedLoadIC_Megamorphic,737,738,0
-block_hint,KeyedLoadIC_Megamorphic,98,99,1
-block_hint,KeyedLoadIC_Megamorphic,1071,1072,0
-block_hint,KeyedLoadIC_Megamorphic,1225,1226,1
-block_hint,KeyedLoadIC_Megamorphic,1069,1070,0
-block_hint,KeyedLoadIC_Megamorphic,1137,1138,1
-block_hint,KeyedLoadIC_Megamorphic,1009,1010,1
-block_hint,KeyedLoadIC_Megamorphic,295,296,0
-block_hint,KeyedLoadIC_Megamorphic,120,121,1
-block_hint,KeyedLoadIC_Megamorphic,182,183,0
-block_hint,KeyedLoadIC_Megamorphic,971,972,0
-block_hint,KeyedLoadIC_Megamorphic,828,829,1
-block_hint,KeyedLoadIC_Megamorphic,186,187,1
-block_hint,KeyedLoadIC_Megamorphic,690,691,0
-block_hint,KeyedLoadIC_Megamorphic,525,526,0
-block_hint,KeyedLoadIC_Megamorphic,1042,1043,0
-block_hint,KeyedLoadIC_Megamorphic,1012,1013,0
-block_hint,KeyedLoadIC_Megamorphic,694,695,1
-block_hint,KeyedLoadIC_Megamorphic,869,870,1
-block_hint,KeyedLoadIC_Megamorphic,1203,1204,0
-block_hint,KeyedLoadIC_Megamorphic,308,309,1
-block_hint,KeyedLoadIC_Megamorphic,871,872,0
-block_hint,KeyedLoadIC_Megamorphic,1077,1078,0
-block_hint,KeyedLoadIC_Megamorphic,1229,1230,1
-block_hint,KeyedLoadIC_Megamorphic,1075,1076,0
-block_hint,KeyedLoadIC_Megamorphic,190,191,0
-block_hint,KeyedLoadIC_Megamorphic,960,961,0
-block_hint,KeyedLoadIC_Megamorphic,1166,1167,0
-block_hint,KeyedLoadIC_Megamorphic,918,919,1
-block_hint,KeyedLoadIC_Megamorphic,132,133,0
-block_hint,KeyedLoadIC_Megamorphic,727,728,0
-block_hint,KeyedLoadIC_Megamorphic,1037,1038,0
-block_hint,KeyedLoadIC_Megamorphic,563,564,1
-block_hint,KeyedLoadIC_Megamorphic,322,323,0
-block_hint,KeyedLoadIC_Megamorphic,723,724,0
-block_hint,KeyedLoadIC_Megamorphic,565,566,0
-block_hint,KeyedLoadIC_Megamorphic,134,135,1
-block_hint,KeyedLoadIC_Megamorphic,573,574,0
-block_hint,KeyedLoadIC_Megamorphic,922,923,1
-block_hint,KeyedLoadIC_Megamorphic,493,494,0
-block_hint,KeyedLoadIC_Megamorphic,958,959,0
-block_hint,KeyedLoadIC_Megamorphic,731,732,1
-block_hint,KeyedLoadIC_Megamorphic,581,582,0
-block_hint,KeyedLoadIC_Megamorphic,216,217,0
-block_hint,KeyedLoadIC_Megamorphic,491,492,1
-block_hint,KeyedLoadIC_Megamorphic,583,584,1
-block_hint,KeyedLoadIC_Megamorphic,150,151,1
-block_hint,KeyedLoadICTrampoline,3,4,1
-block_hint,KeyedLoadICTrampoline_Megamorphic,3,4,1
-block_hint,StoreGlobalIC,72,73,0
-block_hint,StoreGlobalIC,229,230,1
-block_hint,StoreGlobalIC,268,269,0
-block_hint,StoreGlobalIC,144,145,0
-block_hint,StoreGlobalIC,205,206,0
-block_hint,StoreGlobalIC,92,93,0
-block_hint,StoreGlobalIC,146,147,1
-block_hint,StoreGlobalIC,94,95,1
-block_hint,StoreGlobalIC,15,16,1
-block_hint,StoreGlobalICTrampoline,3,4,1
-block_hint,StoreIC,338,339,1
-block_hint,StoreIC,144,145,0
-block_hint,StoreIC,69,70,0
-block_hint,StoreIC,208,209,0
-block_hint,StoreIC,210,211,1
-block_hint,StoreIC,395,396,1
-block_hint,StoreIC,386,387,0
-block_hint,StoreIC,369,370,1
-block_hint,StoreIC,240,241,1
-block_hint,StoreIC,242,243,1
-block_hint,StoreIC,74,75,1
-block_hint,StoreIC,250,251,1
-block_hint,StoreIC,108,109,0
-block_hint,StoreIC,35,36,0
-block_hint,StoreIC,316,317,1
-block_hint,StoreIC,92,93,0
-block_hint,StoreIC,146,147,0
-block_hint,StoreIC,94,95,1
-block_hint,StoreIC,150,151,0
-block_hint,StoreIC,16,17,1
-block_hint,StoreIC,96,97,0
-block_hint,StoreIC,18,19,0
-block_hint,StoreIC,359,360,0
-block_hint,StoreIC,160,161,1
-block_hint,StoreIC,162,163,1
-block_hint,StoreIC,327,328,1
-block_hint,StoreIC,164,165,0
-block_hint,StoreIC,105,106,0
-block_hint,StoreIC,103,104,1
-block_hint,StoreIC,320,321,1
-block_hint,StoreIC,23,24,0
-block_hint,StoreIC,152,153,1
-block_hint,StoreIC,287,288,0
-block_hint,StoreIC,154,155,0
-block_hint,StoreIC,156,157,1
-block_hint,StoreIC,323,324,1
-block_hint,StoreIC,25,26,1
-block_hint,StoreIC,158,159,0
-block_hint,StoreIC,325,326,1
-block_hint,StoreIC,31,32,0
-block_hint,StoreIC,29,30,1
-block_hint,StoreIC,227,228,1
-block_hint,StoreIC,63,64,0
-block_hint,StoreIC,291,292,0
-block_hint,StoreIC,166,167,1
-block_hint,StoreIC,293,294,0
-block_hint,StoreIC,312,313,1
-block_hint,StoreIC,76,77,0
-block_hint,StoreIC,246,247,0
-block_hint,StoreIC,176,177,0
-block_hint,StoreIC,43,44,1
-block_hint,StoreIC,112,113,0
-block_hint,StoreIC,178,179,0
-block_hint,StoreIC,271,272,0
-block_hint,StoreIC,125,126,1
-block_hint,StoreIC,371,372,0
-block_hint,StoreIC,267,268,1
-block_hint,StoreIC,45,46,1
-block_hint,StoreIC,47,48,1
-block_hint,StoreIC,121,122,0
-block_hint,StoreIC,49,50,1
-block_hint,StoreIC,123,124,0
-block_hint,StoreIC,51,52,1
-block_hint,StoreIC,80,81,0
-block_hint,StoreIC,53,54,1
-block_hint,StoreIC,55,56,1
-block_hint,StoreIC,333,334,0
-block_hint,StoreIC,57,58,1
-block_hint,StoreIC,184,185,0
-block_hint,StoreIC,186,187,0
-block_hint,StoreIC,229,230,0
-block_hint,StoreIC,133,134,0
-block_hint,StoreIC,299,300,0
-block_hint,StoreIC,190,191,1
-block_hint,StoreIC,192,193,0
-block_hint,StoreIC,281,282,0
-block_hint,StoreIC,365,366,0
-block_hint,StoreIC,301,302,1
-block_hint,StoreIC,194,195,1
-block_hint,StoreIC,200,201,1
-block_hint,StoreIC,202,203,0
-block_hint,StoreIC,204,205,0
-block_hint,StoreIC,206,207,1
-block_hint,StoreIC,198,199,1
-block_hint,StoreIC,196,197,0
-block_hint,StoreIC,384,385,0
-block_hint,StoreIC,388,389,1
-block_hint,StoreIC,357,358,1
-block_hint,StoreIC,314,315,1
-block_hint,StoreIC,84,85,0
-block_hint,StoreIC,139,140,0
-block_hint,StoreIC,231,232,1
-block_hint,StoreICTrampoline,3,4,1
-block_hint,DefineNamedOwnIC,329,330,1
-block_hint,DefineNamedOwnIC,145,146,0
-block_hint,DefineNamedOwnIC,300,301,1
-block_hint,DefineNamedOwnIC,203,204,0
-block_hint,DefineNamedOwnIC,69,70,0
-block_hint,DefineNamedOwnIC,205,206,0
-block_hint,DefineNamedOwnIC,326,327,0
-block_hint,DefineNamedOwnIC,243,244,1
-block_hint,DefineNamedOwnIC,93,94,0
-block_hint,DefineNamedOwnIC,17,18,0
-block_hint,DefineNamedOwnIC,350,351,0
-block_hint,DefineNamedOwnIC,157,158,1
-block_hint,DefineNamedOwnIC,159,160,1
-block_hint,DefineNamedOwnIC,254,255,1
-block_hint,DefineNamedOwnIC,32,33,0
-block_hint,DefineNamedOwnIC,246,247,1
-block_hint,DefineNamedOwnIC,22,23,0
-block_hint,DefineNamedOwnIC,149,150,1
-block_hint,DefineNamedOwnIC,352,353,0
-block_hint,DefineNamedOwnIC,280,281,0
-block_hint,DefineNamedOwnIC,151,152,0
-block_hint,DefineNamedOwnIC,153,154,1
-block_hint,DefineNamedOwnIC,248,249,1
-block_hint,DefineNamedOwnIC,26,27,0
-block_hint,DefineNamedOwnIC,155,156,0
-block_hint,DefineNamedOwnIC,250,251,1
-block_hint,DefineNamedOwnIC,30,31,0
-block_hint,KeyedStoreIC,401,402,1
-block_hint,KeyedStoreIC,173,174,0
-block_hint,KeyedStoreIC,169,170,0
-block_hint,KeyedStoreIC,239,240,0
-block_hint,KeyedStoreIC,171,172,1
-block_hint,KeyedStoreIC,87,88,1
-block_hint,KeyedStoreIC,398,399,1
-block_hint,KeyedStoreIC,109,110,0
-block_hint,KeyedStoreIC,22,23,0
-block_hint,KeyedStoreIC,428,429,0
-block_hint,KeyedStoreIC,181,182,1
-block_hint,KeyedStoreIC,430,431,0
-block_hint,KeyedStoreIC,351,352,0
-block_hint,KeyedStoreIC,298,299,1
-block_hint,KeyedStoreIC,31,32,0
-block_hint,KeyedStoreIC,272,273,0
-block_hint,KeyedStoreIC,355,356,0
-block_hint,KeyedStoreIC,195,196,1
-block_hint,KeyedStoreIC,260,261,1
-block_hint,KeyedStoreIC,432,433,0
-block_hint,KeyedStoreIC,329,330,0
-block_hint,KeyedStoreIC,137,138,1
-block_hint,KeyedStoreIC,45,46,1
-block_hint,KeyedStoreIC,197,198,0
-block_hint,KeyedStoreIC,47,48,0
-block_hint,KeyedStoreIC,215,216,0
-block_hint,KeyedStoreIC,361,362,1
-block_hint,KeyedStoreIC,363,364,0
-block_hint,KeyedStoreIC,221,222,1
-block_hint,KeyedStoreIC,223,224,0
-block_hint,KeyedStoreIC,345,346,0
-block_hint,KeyedStoreIC,367,368,0
-block_hint,KeyedStoreIC,434,435,0
-block_hint,KeyedStoreIC,365,366,1
-block_hint,KeyedStoreIC,231,232,1
-block_hint,KeyedStoreIC,233,234,0
-block_hint,KeyedStoreIC,235,236,0
-block_hint,KeyedStoreIC,237,238,1
-block_hint,KeyedStoreIC,449,450,0
-block_hint,KeyedStoreIC,426,427,1
-block_hint,KeyedStoreIC,278,279,0
-block_hint,KeyedStoreIC,377,378,1
-block_hint,KeyedStoreIC,97,98,0
-block_hint,KeyedStoreIC,164,165,0
-block_hint,KeyedStoreICTrampoline,3,4,1
-block_hint,DefineKeyedOwnIC,392,393,1
-block_hint,DefineKeyedOwnIC,174,175,0
-block_hint,DefineKeyedOwnIC,170,171,1
-block_hint,StoreInArrayLiteralIC,30,31,1
-block_hint,StoreInArrayLiteralIC,19,20,0
-block_hint,StoreInArrayLiteralIC,23,24,0
-block_hint,StoreInArrayLiteralIC,14,15,1
-block_hint,StoreInArrayLiteralIC,16,17,1
-block_hint,StoreInArrayLiteralIC,8,9,1
-block_hint,StoreInArrayLiteralIC,4,5,1
-block_hint,LoadGlobalIC,62,63,0
-block_hint,LoadGlobalIC,16,17,1
-block_hint,LoadGlobalIC,18,19,1
-block_hint,LoadGlobalIC,20,21,1
-block_hint,LoadGlobalIC,194,195,0
-block_hint,LoadGlobalIC,14,15,0
-block_hint,LoadGlobalIC,111,112,1
-block_hint,LoadGlobalICInsideTypeof,62,63,0
-block_hint,LoadGlobalICInsideTypeof,196,197,1
-block_hint,LoadGlobalICInsideTypeof,14,15,0
-block_hint,LoadGlobalICInsideTypeof,111,112,0
-block_hint,LoadGlobalICInsideTypeof,22,23,1
-block_hint,LoadGlobalICInsideTypeof,24,25,1
-block_hint,LoadGlobalICInsideTypeof,257,258,1
-block_hint,LoadGlobalICInsideTypeof,211,212,0
-block_hint,LoadGlobalICInsideTypeof,60,61,0
-block_hint,LoadGlobalICInsideTypeof,225,226,0
-block_hint,LoadGlobalICInsideTypeof,113,114,1
-block_hint,LoadGlobalICInsideTypeof,26,27,1
-block_hint,LoadGlobalICInsideTypeof,234,235,1
-block_hint,LoadGlobalICInsideTypeof,202,203,0
-block_hint,LoadGlobalICInsideTypeof,45,46,0
-block_hint,LoadGlobalICInsideTypeof,43,44,1
-block_hint,LoadGlobalICTrampoline,3,4,1
-block_hint,LoadGlobalICInsideTypeofTrampoline,3,4,1
-block_hint,LookupGlobalICBaseline,3,4,1
-block_hint,LookupGlobalICBaseline,14,15,1
-block_hint,LookupGlobalICBaseline,5,6,1
-block_hint,LookupGlobalICBaseline,11,12,1
-block_hint,LookupGlobalICBaseline,7,8,1
-block_hint,LookupGlobalICBaseline,9,10,0
-block_hint,KeyedHasIC,261,262,1
-block_hint,KeyedHasIC,125,126,0
-block_hint,KeyedHasIC,117,118,0
-block_hint,KeyedHasIC,239,240,0
-block_hint,KeyedHasIC,165,166,0
-block_hint,KeyedHasIC,77,78,0
-block_hint,KeyedHasIC,119,120,1
-block_hint,KeyedHasIC,167,168,0
-block_hint,KeyedHasIC,123,124,1
-block_hint,KeyedHasIC,79,80,1
-block_hint,KeyedHasIC,197,198,0
-block_hint,KeyedHasIC,221,222,0
-block_hint,KeyedHasIC,281,282,0
-block_hint,KeyedHasIC,279,280,0
-block_hint,KeyedHasIC,161,162,1
-block_hint,KeyedHasIC,61,62,0
-block_hint,KeyedHasIC_Megamorphic,137,138,1
-block_hint,KeyedHasIC_Megamorphic,139,140,1
-block_hint,KeyedHasIC_Megamorphic,261,262,0
-block_hint,KeyedHasIC_Megamorphic,211,212,1
-block_hint,KeyedHasIC_Megamorphic,254,255,0
-block_hint,KeyedHasIC_Megamorphic,97,98,0
-block_hint,KeyedHasIC_Megamorphic,234,235,1
-block_hint,KeyedHasIC_Megamorphic,123,124,1
-block_hint,KeyedHasIC_Megamorphic,141,142,1
-block_hint,KeyedHasIC_Megamorphic,199,200,0
-block_hint,KeyedHasIC_Megamorphic,201,202,0
-block_hint,KeyedHasIC_Megamorphic,101,102,0
-block_hint,KeyedHasIC_Megamorphic,99,100,0
-block_hint,KeyedHasIC_Megamorphic,250,251,0
-block_hint,KeyedHasIC_Megamorphic,268,269,0
-block_hint,KeyedHasIC_Megamorphic,106,107,0
-block_hint,KeyedHasIC_Megamorphic,275,276,0
-block_hint,KeyedHasIC_Megamorphic,280,281,0
-block_hint,KeyedHasIC_Megamorphic,266,267,0
-block_hint,KeyedHasIC_Megamorphic,203,204,0
-block_hint,KeyedHasIC_Megamorphic,44,45,1
-block_hint,KeyedHasIC_Megamorphic,63,64,0
-block_hint,KeyedHasIC_Megamorphic,239,240,1
-block_hint,KeyedHasIC_Megamorphic,48,49,0
-block_hint,KeyedHasIC_Megamorphic,270,271,0
-block_hint,KeyedHasIC_Megamorphic,228,229,0
-block_hint,KeyedHasIC_Megamorphic,87,88,0
-block_hint,KeyedHasIC_Megamorphic,155,156,0
-block_hint,KeyedHasIC_Megamorphic,196,197,0
-block_hint,KeyedHasIC_Megamorphic,59,60,0
-block_hint,KeyedHasIC_Megamorphic,222,223,0
-block_hint,KeyedHasIC_Megamorphic,57,58,1
-block_hint,IterableToList,42,43,1
-block_hint,IterableToList,44,45,1
-block_hint,IterableToList,46,47,1
-block_hint,IterableToList,36,37,1
-block_hint,IterableToList,48,49,1
-block_hint,IterableToList,50,51,1
-block_hint,IterableToList,98,99,1
-block_hint,IterableToList,107,108,0
-block_hint,IterableToList,109,110,0
-block_hint,IterableToList,100,101,0
-block_hint,IterableToList,74,75,0
-block_hint,IterableToList,58,59,1
-block_hint,IterableToList,96,97,0
-block_hint,IterableToList,52,53,0
-block_hint,IterableToList,93,94,1
-block_hint,IterableToList,82,83,1
-block_hint,IterableToList,17,18,0
-block_hint,IterableToList,61,62,0
-block_hint,IterableToList,14,15,1
-block_hint,IterableToList,90,91,0
-block_hint,IterableToList,103,104,1
-block_hint,IterableToList,88,89,0
-block_hint,IterableToList,32,33,0
-block_hint,IterableToList,113,114,1
-block_hint,IterableToList,111,112,1
-block_hint,IterableToList,63,64,0
-block_hint,IterableToList,34,35,1
-block_hint,IterableToListWithSymbolLookup,39,40,0
-block_hint,IterableToListWithSymbolLookup,96,97,1
-block_hint,IterableToListWithSymbolLookup,94,95,0
-block_hint,IterableToListWithSymbolLookup,82,83,1
-block_hint,IterableToListWithSymbolLookup,55,56,1
-block_hint,IterableToListWithSymbolLookup,25,26,1
-block_hint,IterableToListWithSymbolLookup,2,3,1
-block_hint,IterableToListWithSymbolLookup,99,100,1
-block_hint,IterableToListWithSymbolLookup,92,93,0
-block_hint,IterableToListWithSymbolLookup,71,72,1
-block_hint,IterableToListWithSymbolLookup,78,79,0
-block_hint,IterableToListWithSymbolLookup,84,85,1
-block_hint,IterableToListWithSymbolLookup,57,58,1
-block_hint,IterableToListWithSymbolLookup,27,28,1
-block_hint,IterableToListWithSymbolLookup,4,5,1
-block_hint,IterableToListWithSymbolLookup,80,81,1
-block_hint,IterableToListWithSymbolLookup,62,63,0
-block_hint,IterableToListWithSymbolLookup,17,18,1
-block_hint,IterableToListMayPreserveHoles,8,9,1
-block_hint,IterableToListMayPreserveHoles,15,16,0
-block_hint,IterableToListMayPreserveHoles,20,21,1
-block_hint,IterableToListMayPreserveHoles,17,18,1
-block_hint,IterableToListMayPreserveHoles,11,12,1
-block_hint,IterableToListMayPreserveHoles,3,4,1
-block_hint,IterableToListMayPreserveHoles,13,14,1
-block_hint,IterableToListMayPreserveHoles,5,6,0
-block_hint,FindOrderedHashMapEntry,26,27,1
-block_hint,FindOrderedHashMapEntry,64,65,0
-block_hint,FindOrderedHashMapEntry,24,25,0
-block_hint,FindOrderedHashMapEntry,22,23,0
-block_hint,FindOrderedHashMapEntry,68,69,0
-block_hint,FindOrderedHashMapEntry,58,59,1
-block_hint,FindOrderedHashMapEntry,60,61,1
-block_hint,MapConstructor,328,329,1
-block_hint,MapConstructor,248,249,0
-block_hint,MapConstructor,105,106,0
-block_hint,MapConstructor,13,14,1
-block_hint,MapConstructor,270,271,1
-block_hint,MapConstructor,211,212,1
-block_hint,MapConstructor,86,87,0
-block_hint,MapConstructor,88,89,1
-block_hint,MapConstructor,272,273,1
-block_hint,MapConstructor,308,309,0
-block_hint,MapConstructor,319,320,0
-block_hint,MapConstructor,220,221,0
-block_hint,MapConstructor,109,110,0
-block_hint,MapConstructor,238,239,1
-block_hint,MapConstructor,103,104,1
-block_hint,MapPrototypeSet,98,99,1
-block_hint,MapPrototypeSet,62,63,1
-block_hint,MapPrototypeSet,64,65,1
-block_hint,MapPrototypeSet,88,89,1
-block_hint,MapPrototypeSet,90,91,0
-block_hint,MapPrototypeSet,26,27,1
-block_hint,MapPrototypeSet,94,95,0
-block_hint,MapPrototypeSet,56,57,0
-block_hint,MapPrototypeSet,24,25,0
-block_hint,MapPrototypeSet,22,23,0
-block_hint,MapPrototypeSet,31,32,1
-block_hint,MapPrototypeSet,66,67,0
-block_hint,MapPrototypeSet,47,48,0
-block_hint,MapPrototypeSet,49,50,1
-block_hint,MapPrototypeSet,51,52,1
-block_hint,MapPrototypeSet,53,54,0
-block_hint,MapPrototypeSet,17,18,1
-block_hint,MapPrototypeSet,29,30,1
-block_hint,MapPrototypeDelete,98,99,1
-block_hint,MapPrototypeDelete,77,78,1
-block_hint,MapPrototypeDelete,79,80,1
-block_hint,MapPrototypeDelete,15,16,0
-block_hint,MapPrototypeDelete,89,90,1
-block_hint,MapPrototypeDelete,63,64,0
-block_hint,MapPrototypeDelete,40,41,0
-block_hint,MapPrototypeDelete,65,66,1
-block_hint,MapPrototypeDelete,67,68,1
-block_hint,MapPrototypeDelete,19,20,1
-block_hint,MapPrototypeDelete,21,22,1
-block_hint,MapPrototypeDelete,23,24,1
-block_hint,MapPrototypeGet,12,13,1
-block_hint,MapPrototypeGet,7,8,1
-block_hint,MapPrototypeGet,9,10,1
-block_hint,MapPrototypeGet,3,4,1
-block_hint,MapPrototypeHas,10,11,1
-block_hint,MapPrototypeHas,5,6,1
-block_hint,MapPrototypeHas,7,8,1
-block_hint,MapPrototypeEntries,13,14,1
-block_hint,MapPrototypeEntries,8,9,1
-block_hint,MapPrototypeEntries,10,11,1
-block_hint,MapPrototypeEntries,4,5,1
-block_hint,MapPrototypeEntries,6,7,1
-block_hint,MapPrototypeGetSize,8,9,1
-block_hint,MapPrototypeGetSize,5,6,1
-block_hint,MapPrototypeGetSize,3,4,1
-block_hint,MapPrototypeForEach,33,34,1
-block_hint,MapPrototypeForEach,30,31,1
-block_hint,MapPrototypeForEach,27,28,1
-block_hint,MapPrototypeForEach,20,21,1
-block_hint,MapPrototypeForEach,22,23,1
-block_hint,MapPrototypeForEach,24,25,1
-block_hint,MapPrototypeForEach,12,13,1
-block_hint,MapPrototypeForEach,14,15,0
-block_hint,MapPrototypeValues,13,14,1
-block_hint,MapPrototypeValues,8,9,1
-block_hint,MapPrototypeValues,10,11,1
-block_hint,MapPrototypeValues,4,5,1
-block_hint,MapPrototypeValues,6,7,1
-block_hint,MapIteratorPrototypeNext,47,48,1
-block_hint,MapIteratorPrototypeNext,30,31,1
-block_hint,MapIteratorPrototypeNext,32,33,1
-block_hint,MapIteratorPrototypeNext,19,20,0
-block_hint,MapIteratorPrototypeNext,21,22,0
-block_hint,MapIteratorPrototypeNext,34,35,0
-block_hint,MapIteratorPrototypeNext,7,8,1
-block_hint,MapIteratorPrototypeNext,39,40,1
-block_hint,MapIteratorPrototypeNext,9,10,1
-block_hint,MapIteratorPrototypeNext,11,12,1
-block_hint,MapIteratorPrototypeNext,13,14,1
-block_hint,MapIteratorPrototypeNext,15,16,1
-block_hint,MapIteratorPrototypeNext,17,18,1
-block_hint,MapIteratorPrototypeNext,25,26,1
-block_hint,SameValueNumbersOnly,4,5,1
-block_hint,Add_Baseline,32,33,0
-block_hint,Add_Baseline,21,22,0
-block_hint,Add_Baseline,8,9,1
-block_hint,Add_Baseline,58,59,0
-block_hint,Add_Baseline,35,36,1
-block_hint,Add_Baseline,47,48,0
-block_hint,Add_Baseline,17,18,1
-block_hint,Add_Baseline,53,54,1
-block_hint,Add_Baseline,19,20,1
-block_hint,Add_Baseline,26,27,1
-block_hint,Add_Baseline,10,11,1
-block_hint,AddSmi_Baseline,32,33,0
-block_hint,AddSmi_Baseline,21,22,0
-block_hint,AddSmi_Baseline,8,9,1
-block_hint,AddSmi_Baseline,49,50,1
-block_hint,AddSmi_Baseline,26,27,1
-block_hint,AddSmi_Baseline,10,11,1
-block_hint,Subtract_Baseline,21,22,0
-block_hint,Subtract_Baseline,8,9,1
-block_hint,Subtract_Baseline,46,47,1
-block_hint,Subtract_Baseline,56,57,1
-block_hint,Subtract_Baseline,54,55,0
-block_hint,Subtract_Baseline,42,43,0
-block_hint,Subtract_Baseline,48,49,1
-block_hint,Subtract_Baseline,17,18,1
-block_hint,Subtract_Baseline,23,24,1
-block_hint,Subtract_Baseline,10,11,1
-block_hint,SubtractSmi_Baseline,21,22,0
-block_hint,SubtractSmi_Baseline,8,9,1
-block_hint,SubtractSmi_Baseline,38,39,1
-block_hint,SubtractSmi_Baseline,23,24,1
-block_hint,SubtractSmi_Baseline,10,11,1
-block_hint,Multiply_Baseline,69,70,0
-block_hint,Multiply_Baseline,47,48,0
-block_hint,Multiply_Baseline,55,56,0
-block_hint,Multiply_Baseline,61,62,1
-block_hint,Multiply_Baseline,57,58,1
-block_hint,Multiply_Baseline,10,11,1
-block_hint,Multiply_Baseline,49,50,1
-block_hint,Multiply_Baseline,67,68,1
-block_hint,Multiply_Baseline,51,52,1
-block_hint,Multiply_Baseline,24,25,1
-block_hint,Multiply_Baseline,12,13,1
-block_hint,MultiplySmi_Baseline,61,62,0
-block_hint,MultiplySmi_Baseline,47,48,0
-block_hint,MultiplySmi_Baseline,49,50,0
-block_hint,MultiplySmi_Baseline,51,52,1
-block_hint,MultiplySmi_Baseline,22,23,0
-block_hint,MultiplySmi_Baseline,10,11,1
-block_hint,MultiplySmi_Baseline,38,39,1
-block_hint,MultiplySmi_Baseline,24,25,1
-block_hint,MultiplySmi_Baseline,12,13,1
-block_hint,Divide_Baseline,59,60,0
-block_hint,Divide_Baseline,61,62,0
-block_hint,Divide_Baseline,48,49,0
-block_hint,Divide_Baseline,31,32,1
-block_hint,Divide_Baseline,10,11,1
-block_hint,Divide_Baseline,52,53,1
-block_hint,Divide_Baseline,67,68,1
-block_hint,Divide_Baseline,54,55,1
-block_hint,Divide_Baseline,38,39,0
-block_hint,Divide_Baseline,19,20,1
-block_hint,Divide_Baseline,25,26,1
-block_hint,Divide_Baseline,12,13,1
-block_hint,DivideSmi_Baseline,53,54,0
-block_hint,DivideSmi_Baseline,61,62,0
-block_hint,DivideSmi_Baseline,55,56,0
-block_hint,DivideSmi_Baseline,48,49,0
-block_hint,DivideSmi_Baseline,31,32,1
-block_hint,DivideSmi_Baseline,10,11,1
-block_hint,DivideSmi_Baseline,40,41,1
-block_hint,DivideSmi_Baseline,25,26,1
-block_hint,DivideSmi_Baseline,12,13,1
-block_hint,Modulus_Baseline,68,69,0
-block_hint,Modulus_Baseline,64,65,0
-block_hint,Modulus_Baseline,50,51,1
-block_hint,Modulus_Baseline,45,46,1
-block_hint,Modulus_Baseline,22,23,0
-block_hint,Modulus_Baseline,10,11,1
-block_hint,ModulusSmi_Baseline,50,51,1
-block_hint,ModulusSmi_Baseline,45,46,1
-block_hint,ModulusSmi_Baseline,22,23,0
-block_hint,ModulusSmi_Baseline,10,11,1
-block_hint,ModulusSmi_Baseline,37,38,1
-block_hint,ModulusSmi_Baseline,24,25,1
-block_hint,ModulusSmi_Baseline,12,13,1
-block_hint,BitwiseAnd_Baseline,35,36,0
-block_hint,BitwiseAnd_Baseline,23,24,1
-block_hint,BitwiseAnd_Baseline,8,9,0
-block_hint,BitwiseAnd_Baseline,33,34,0
-block_hint,BitwiseAnd_Baseline,27,28,1
-block_hint,BitwiseAnd_Baseline,12,13,0
-block_hint,BitwiseAnd_Baseline,50,51,1
-block_hint,BitwiseAnd_Baseline,14,15,1
-block_hint,BitwiseAndSmi_Baseline,18,19,0
-block_hint,BitwiseAndSmi_Baseline,16,17,1
-block_hint,BitwiseAndSmi_Baseline,7,8,0
-block_hint,BitwiseAndSmi_Baseline,26,27,0
-block_hint,BitwiseAndSmi_Baseline,20,21,0
-block_hint,BitwiseAndSmi_Baseline,9,10,1
-block_hint,BitwiseOr_Baseline,35,36,0
-block_hint,BitwiseOr_Baseline,23,24,1
-block_hint,BitwiseOr_Baseline,8,9,1
-block_hint,BitwiseOr_Baseline,48,49,1
-block_hint,BitwiseOr_Baseline,50,51,1
-block_hint,BitwiseOr_Baseline,14,15,1
-block_hint,BitwiseOrSmi_Baseline,5,6,0
-block_hint,BitwiseOrSmi_Baseline,18,19,0
-block_hint,BitwiseOrSmi_Baseline,16,17,0
-block_hint,BitwiseOrSmi_Baseline,28,29,1
-block_hint,BitwiseOrSmi_Baseline,9,10,1
-block_hint,BitwiseXor_Baseline,25,26,1
-block_hint,BitwiseXor_Baseline,35,36,0
-block_hint,BitwiseXor_Baseline,23,24,1
-block_hint,BitwiseXor_Baseline,48,49,1
-block_hint,BitwiseXor_Baseline,33,34,0
-block_hint,BitwiseXor_Baseline,27,28,1
-block_hint,BitwiseXor_Baseline,50,51,1
-block_hint,BitwiseXor_Baseline,14,15,1
-block_hint,BitwiseXorSmi_Baseline,18,19,0
-block_hint,BitwiseXorSmi_Baseline,16,17,1
-block_hint,BitwiseXorSmi_Baseline,7,8,1
-block_hint,BitwiseXorSmi_Baseline,9,10,1
-block_hint,ShiftLeft_Baseline,25,26,1
-block_hint,ShiftLeft_Baseline,10,11,0
-block_hint,ShiftLeft_Baseline,50,51,1
-block_hint,ShiftLeft_Baseline,14,15,1
-block_hint,ShiftLeftSmi_Baseline,35,36,1
-block_hint,ShiftLeftSmi_Baseline,25,26,1
-block_hint,ShiftLeftSmi_Baseline,37,38,1
-block_hint,ShiftLeftSmi_Baseline,9,10,1
-block_hint,ShiftRight_Baseline,6,7,0
-block_hint,ShiftRight_Baseline,10,11,0
-block_hint,ShiftRight_Baseline,46,47,0
-block_hint,ShiftRight_Baseline,29,30,0
-block_hint,ShiftRight_Baseline,14,15,1
-block_hint,ShiftRightSmi_Baseline,5,6,0
-block_hint,ShiftRightSmi_Baseline,22,23,1
-block_hint,ShiftRightSmi_Baseline,26,27,0
-block_hint,ShiftRightSmi_Baseline,20,21,0
-block_hint,ShiftRightSmi_Baseline,9,10,1
-block_hint,ShiftRightLogical_Baseline,25,26,1
-block_hint,ShiftRightLogical_Baseline,10,11,0
-block_hint,ShiftRightLogical_Baseline,14,15,1
-block_hint,ShiftRightLogicalSmi_Baseline,35,36,1
-block_hint,ShiftRightLogicalSmi_Baseline,25,26,1
-block_hint,ShiftRightLogicalSmi_Baseline,33,34,0
-block_hint,ShiftRightLogicalSmi_Baseline,23,24,0
-block_hint,ShiftRightLogicalSmi_Baseline,9,10,1
-block_hint,Add_WithFeedback,49,50,1
-block_hint,Add_WithFeedback,60,61,0
-block_hint,Add_WithFeedback,58,59,0
-block_hint,Add_WithFeedback,45,46,1
-block_hint,Add_WithFeedback,35,36,1
-block_hint,Add_WithFeedback,28,29,0
-block_hint,Add_WithFeedback,19,20,1
-block_hint,Subtract_WithFeedback,52,53,1
-block_hint,Subtract_WithFeedback,56,57,0
-block_hint,Subtract_WithFeedback,54,55,0
-block_hint,Subtract_WithFeedback,42,43,0
-block_hint,Subtract_WithFeedback,17,18,1
-block_hint,Modulus_WithFeedback,68,69,0
-block_hint,Modulus_WithFeedback,64,65,0
-block_hint,Modulus_WithFeedback,50,51,1
-block_hint,Modulus_WithFeedback,45,46,1
-block_hint,Modulus_WithFeedback,22,23,0
-block_hint,Modulus_WithFeedback,10,11,1
-block_hint,BitwiseOr_WithFeedback,6,7,1
-block_hint,BitwiseOr_WithFeedback,35,36,0
-block_hint,BitwiseOr_WithFeedback,23,24,0
-block_hint,BitwiseOr_WithFeedback,10,11,0
-block_hint,BitwiseOr_WithFeedback,46,47,0
-block_hint,BitwiseOr_WithFeedback,29,30,0
-block_hint,BitwiseOr_WithFeedback,14,15,1
-block_hint,Equal_Baseline,48,49,0
-block_hint,Equal_Baseline,18,19,1
-block_hint,Equal_Baseline,22,23,1
-block_hint,Equal_Baseline,101,102,0
-block_hint,Equal_Baseline,14,15,1
-block_hint,Equal_Baseline,39,40,0
-block_hint,Equal_Baseline,26,27,0
-block_hint,Equal_Baseline,28,29,1
-block_hint,Equal_Baseline,45,46,0
-block_hint,Equal_Baseline,32,33,0
-block_hint,Equal_Baseline,24,25,1
-block_hint,Equal_Baseline,77,78,0
-block_hint,Equal_Baseline,75,76,0
-block_hint,Equal_Baseline,83,84,0
-block_hint,Equal_Baseline,85,86,0
-block_hint,Equal_Baseline,59,60,0
-block_hint,Equal_Baseline,109,110,0
-block_hint,Equal_Baseline,65,66,0
-block_hint,Equal_Baseline,69,70,1
-block_hint,Equal_Baseline,98,99,0
-block_hint,Equal_Baseline,71,72,1
-block_hint,Equal_Baseline,6,7,1
-block_hint,StrictEqual_Baseline,37,38,0
-block_hint,StrictEqual_Baseline,76,77,0
-block_hint,StrictEqual_Baseline,47,48,1
-block_hint,StrictEqual_Baseline,60,61,0
-block_hint,StrictEqual_Baseline,51,52,0
-block_hint,StrictEqual_Baseline,53,54,1
-block_hint,StrictEqual_Baseline,35,36,1
-block_hint,StrictEqual_Baseline,33,34,0
-block_hint,StrictEqual_Baseline,55,56,0
-block_hint,StrictEqual_Baseline,29,30,1
-block_hint,StrictEqual_Baseline,31,32,1
-block_hint,StrictEqual_Baseline,49,50,1
-block_hint,StrictEqual_Baseline,41,42,0
-block_hint,StrictEqual_Baseline,45,46,0
-block_hint,StrictEqual_Baseline,66,67,0
-block_hint,StrictEqual_Baseline,13,14,0
-block_hint,StrictEqual_Baseline,43,44,0
-block_hint,StrictEqual_Baseline,3,4,1
-block_hint,LessThan_Baseline,44,45,0
-block_hint,LessThan_Baseline,23,24,1
-block_hint,LessThan_Baseline,25,26,1
-block_hint,LessThan_Baseline,10,11,0
-block_hint,LessThan_Baseline,56,57,0
-block_hint,LessThan_Baseline,12,13,0
-block_hint,LessThan_Baseline,5,6,1
-block_hint,GreaterThan_Baseline,44,45,0
-block_hint,GreaterThan_Baseline,10,11,0
-block_hint,GreaterThan_Baseline,48,49,1
-block_hint,GreaterThan_Baseline,56,57,0
-block_hint,GreaterThan_Baseline,12,13,0
-block_hint,GreaterThan_Baseline,5,6,1
-block_hint,LessThanOrEqual_Baseline,44,45,0
-block_hint,LessThanOrEqual_Baseline,23,24,1
-block_hint,LessThanOrEqual_Baseline,25,26,1
-block_hint,LessThanOrEqual_Baseline,56,57,0
-block_hint,LessThanOrEqual_Baseline,58,59,1
-block_hint,LessThanOrEqual_Baseline,37,38,1
-block_hint,LessThanOrEqual_Baseline,27,28,1
-block_hint,LessThanOrEqual_Baseline,5,6,1
-block_hint,GreaterThanOrEqual_Baseline,44,45,0
-block_hint,GreaterThanOrEqual_Baseline,23,24,1
-block_hint,GreaterThanOrEqual_Baseline,25,26,1
-block_hint,GreaterThanOrEqual_Baseline,56,57,0
-block_hint,GreaterThanOrEqual_Baseline,27,28,1
-block_hint,GreaterThanOrEqual_Baseline,5,6,1
-block_hint,Equal_WithFeedback,103,104,0
-block_hint,Equal_WithFeedback,81,82,1
-block_hint,Equal_WithFeedback,37,38,0
-block_hint,Equal_WithFeedback,48,49,0
-block_hint,Equal_WithFeedback,18,19,1
-block_hint,Equal_WithFeedback,95,96,0
-block_hint,Equal_WithFeedback,101,102,0
-block_hint,Equal_WithFeedback,20,21,0
-block_hint,Equal_WithFeedback,39,40,0
-block_hint,Equal_WithFeedback,26,27,0
-block_hint,Equal_WithFeedback,28,29,1
-block_hint,Equal_WithFeedback,45,46,0
-block_hint,Equal_WithFeedback,32,33,0
-block_hint,Equal_WithFeedback,75,76,0
-block_hint,Equal_WithFeedback,83,84,0
-block_hint,Equal_WithFeedback,85,86,0
-block_hint,Equal_WithFeedback,87,88,0
-block_hint,Equal_WithFeedback,79,80,0
-block_hint,Equal_WithFeedback,89,90,0
-block_hint,Equal_WithFeedback,117,118,0
-block_hint,Equal_WithFeedback,109,110,0
-block_hint,Equal_WithFeedback,107,108,0
-block_hint,Equal_WithFeedback,67,68,0
-block_hint,Equal_WithFeedback,105,106,0
-block_hint,Equal_WithFeedback,65,66,0
-block_hint,Equal_WithFeedback,6,7,1
-block_hint,StrictEqual_WithFeedback,37,38,0
-block_hint,StrictEqual_WithFeedback,72,73,0
-block_hint,StrictEqual_WithFeedback,47,48,1
-block_hint,StrictEqual_WithFeedback,60,61,0
-block_hint,StrictEqual_WithFeedback,53,54,1
-block_hint,StrictEqual_WithFeedback,35,36,1
-block_hint,StrictEqual_WithFeedback,57,58,1
-block_hint,StrictEqual_WithFeedback,55,56,0
-block_hint,StrictEqual_WithFeedback,31,32,1
-block_hint,StrictEqual_WithFeedback,41,42,0
-block_hint,StrictEqual_WithFeedback,70,71,1
-block_hint,StrictEqual_WithFeedback,45,46,0
-block_hint,StrictEqual_WithFeedback,21,22,1
-block_hint,StrictEqual_WithFeedback,66,67,0
-block_hint,StrictEqual_WithFeedback,15,16,0
-block_hint,StrictEqual_WithFeedback,13,14,0
-block_hint,StrictEqual_WithFeedback,43,44,0
-block_hint,StrictEqual_WithFeedback,3,4,1
-block_hint,LessThan_WithFeedback,44,45,1
-block_hint,LessThan_WithFeedback,23,24,1
-block_hint,LessThan_WithFeedback,46,47,1
-block_hint,LessThan_WithFeedback,48,49,1
-block_hint,LessThan_WithFeedback,56,57,0
-block_hint,LessThan_WithFeedback,54,55,0
-block_hint,LessThan_WithFeedback,18,19,1
-block_hint,LessThan_WithFeedback,31,32,0
-block_hint,LessThan_WithFeedback,16,17,1
-block_hint,LessThan_WithFeedback,12,13,0
-block_hint,LessThan_WithFeedback,39,40,1
-block_hint,LessThan_WithFeedback,5,6,1
-block_hint,GreaterThan_WithFeedback,60,61,1
-block_hint,GreaterThan_WithFeedback,23,24,1
-block_hint,GreaterThan_WithFeedback,25,26,1
-block_hint,GreaterThan_WithFeedback,48,49,1
-block_hint,GreaterThan_WithFeedback,56,57,0
-block_hint,GreaterThan_WithFeedback,58,59,0
-block_hint,GreaterThan_WithFeedback,54,55,1
-block_hint,GreaterThan_WithFeedback,50,51,1
-block_hint,GreaterThan_WithFeedback,18,19,0
-block_hint,GreaterThan_WithFeedback,12,13,0
-block_hint,GreaterThan_WithFeedback,5,6,1
-block_hint,GreaterThanOrEqual_WithFeedback,60,61,1
-block_hint,GreaterThanOrEqual_WithFeedback,46,47,1
-block_hint,GreaterThanOrEqual_WithFeedback,48,49,0
-block_hint,GreaterThanOrEqual_WithFeedback,56,57,0
-block_hint,GreaterThanOrEqual_WithFeedback,54,55,0
-block_hint,GreaterThanOrEqual_WithFeedback,18,19,1
-block_hint,GreaterThanOrEqual_WithFeedback,31,32,0
-block_hint,GreaterThanOrEqual_WithFeedback,16,17,1
-block_hint,GreaterThanOrEqual_WithFeedback,5,6,1
-block_hint,BitwiseNot_Baseline,19,20,0
-block_hint,BitwiseNot_Baseline,15,16,1
-block_hint,BitwiseNot_Baseline,7,8,1
-block_hint,BitwiseNot_Baseline,27,28,1
-block_hint,BitwiseNot_Baseline,9,10,1
-block_hint,Decrement_Baseline,19,20,0
-block_hint,Decrement_Baseline,17,18,1
-block_hint,Decrement_Baseline,13,14,0
-block_hint,Decrement_Baseline,15,16,1
-block_hint,Decrement_Baseline,5,6,1
-block_hint,Increment_Baseline,19,20,0
-block_hint,Increment_Baseline,17,18,1
-block_hint,Increment_Baseline,13,14,0
-block_hint,Increment_Baseline,15,16,1
-block_hint,Increment_Baseline,5,6,1
-block_hint,Negate_Baseline,20,21,1
-block_hint,Negate_Baseline,14,15,0
-block_hint,Negate_Baseline,18,19,1
-block_hint,Negate_Baseline,5,6,1
-block_hint,ObjectAssign,21,22,1
-block_hint,ObjectAssign,18,19,1
-block_hint,ObjectAssign,15,16,1
-block_hint,ObjectAssign,12,13,1
-block_hint,ObjectAssign,9,10,0
-block_hint,ObjectAssign,5,6,0
-block_hint,ObjectCreate,78,79,1
-block_hint,ObjectCreate,75,76,1
-block_hint,ObjectCreate,33,34,1
-block_hint,ObjectCreate,35,36,1
-block_hint,ObjectCreate,37,38,1
-block_hint,ObjectCreate,39,40,0
-block_hint,ObjectCreate,41,42,1
-block_hint,ObjectCreate,43,44,0
-block_hint,ObjectCreate,45,46,1
-block_hint,ObjectCreate,17,18,1
-block_hint,ObjectCreate,69,70,0
-block_hint,ObjectCreate,55,56,0
-block_hint,ObjectCreate,59,60,1
-block_hint,ObjectCreate,47,48,0
-block_hint,ObjectCreate,49,50,0
-block_hint,ObjectCreate,5,6,1
-block_hint,ObjectCreate,52,53,1
-block_hint,ObjectCreate,7,8,1
-block_hint,ObjectCreate,9,10,1
-block_hint,ObjectCreate,11,12,1
-block_hint,ObjectCreate,13,14,1
-block_hint,ObjectCreate,15,16,1
-block_hint,ObjectCreate,20,21,0
-block_hint,ObjectCreate,61,62,1
-block_hint,ObjectGetOwnPropertyDescriptor,517,518,1
-block_hint,ObjectGetOwnPropertyDescriptor,514,515,1
-block_hint,ObjectGetOwnPropertyDescriptor,511,512,0
-block_hint,ObjectGetOwnPropertyDescriptor,503,504,1
-block_hint,ObjectGetOwnPropertyDescriptor,490,491,1
-block_hint,ObjectGetOwnPropertyDescriptor,408,409,0
-block_hint,ObjectGetOwnPropertyDescriptor,470,471,1
-block_hint,ObjectGetOwnPropertyDescriptor,488,489,0
-block_hint,ObjectGetOwnPropertyDescriptor,434,435,0
-block_hint,ObjectGetOwnPropertyDescriptor,467,468,1
-block_hint,ObjectGetOwnPropertyDescriptor,410,411,1
-block_hint,ObjectGetOwnPropertyDescriptor,462,463,0
-block_hint,ObjectGetOwnPropertyDescriptor,464,465,0
-block_hint,ObjectGetOwnPropertyDescriptor,436,437,0
-block_hint,ObjectGetOwnPropertyDescriptor,406,407,0
-block_hint,ObjectGetOwnPropertyDescriptor,331,332,0
-block_hint,ObjectGetOwnPropertyDescriptor,197,198,1
-block_hint,ObjectGetOwnPropertyDescriptor,307,308,1
-block_hint,ObjectGetOwnPropertyDescriptor,138,139,1
-block_hint,ObjectGetOwnPropertyDescriptor,497,498,0
-block_hint,ObjectGetOwnPropertyDescriptor,505,506,1
-block_hint,ObjectGetOwnPropertyDescriptor,493,494,0
-block_hint,ObjectGetOwnPropertyDescriptor,426,427,0
-block_hint,ObjectGetOwnPropertyDescriptor,329,330,0
-block_hint,ObjectGetOwnPropertyDescriptor,31,32,1
-block_hint,ObjectGetOwnPropertyDescriptor,361,362,1
-block_hint,ObjectGetOwnPropertyDescriptor,150,151,0
-block_hint,ObjectGetOwnPropertyDescriptor,474,475,0
-block_hint,ObjectGetOwnPropertyDescriptor,390,391,0
-block_hint,ObjectGetOwnPropertyDescriptor,264,265,0
-block_hint,ObjectGetOwnPropertyDescriptor,260,261,0
-block_hint,ObjectGetOwnPropertyDescriptor,282,283,0
-block_hint,ObjectGetOwnPropertyDescriptor,284,285,1
-block_hint,ObjectGetOwnPropertyDescriptor,36,37,1
-block_hint,ObjectGetOwnPropertyDescriptor,365,366,1
-block_hint,ObjectGetOwnPropertyDescriptor,186,187,0
-block_hint,ObjectGetOwnPropertyDescriptor,268,269,1
-block_hint,ObjectKeys,32,33,1
-block_hint,ObjectKeys,27,28,1
-block_hint,ObjectKeys,23,24,1
-block_hint,ObjectKeys,25,26,0
-block_hint,ObjectKeys,17,18,1
-block_hint,ObjectKeys,5,6,1
-block_hint,ObjectKeys,21,22,1
-block_hint,ObjectKeys,9,10,0
-block_hint,ObjectKeys,7,8,1
-block_hint,ObjectKeys,14,15,1
-block_hint,ObjectPrototypeHasOwnProperty,230,231,1
-block_hint,ObjectPrototypeHasOwnProperty,205,206,1
-block_hint,ObjectPrototypeHasOwnProperty,222,223,1
-block_hint,ObjectPrototypeHasOwnProperty,239,240,0
-block_hint,ObjectPrototypeHasOwnProperty,219,220,0
-block_hint,ObjectPrototypeHasOwnProperty,209,210,1
-block_hint,ObjectPrototypeHasOwnProperty,163,164,1
-block_hint,ObjectPrototypeHasOwnProperty,235,236,0
-block_hint,ObjectPrototypeHasOwnProperty,237,238,0
-block_hint,ObjectPrototypeHasOwnProperty,233,234,0
-block_hint,ObjectPrototypeHasOwnProperty,228,229,0
-block_hint,ObjectPrototypeHasOwnProperty,192,193,1
-block_hint,ObjectPrototypeHasOwnProperty,137,138,0
-block_hint,ObjectPrototypeHasOwnProperty,211,212,0
-block_hint,ObjectPrototypeHasOwnProperty,175,176,1
-block_hint,ObjectPrototypeHasOwnProperty,141,142,0
-block_hint,ObjectPrototypeHasOwnProperty,226,227,0
-block_hint,ObjectPrototypeHasOwnProperty,76,77,0
-block_hint,ObjectPrototypeHasOwnProperty,203,204,0
-block_hint,ObjectPrototypeHasOwnProperty,34,35,1
-block_hint,ObjectPrototypeHasOwnProperty,52,53,0
-block_hint,ObjectPrototypeHasOwnProperty,36,37,1
-block_hint,ObjectPrototypeHasOwnProperty,197,198,1
-block_hint,ObjectPrototypeHasOwnProperty,40,41,0
-block_hint,ObjectPrototypeHasOwnProperty,171,172,0
-block_hint,ObjectPrototypeHasOwnProperty,178,179,1
-block_hint,ObjectPrototypeHasOwnProperty,58,59,0
-block_hint,ObjectToString,45,46,0
-block_hint,ObjectToString,60,61,0
-block_hint,ObjectToString,68,69,0
-block_hint,ObjectToString,55,56,0
-block_hint,ObjectToString,7,8,1
-block_hint,ObjectToString,5,6,1
-block_hint,ObjectToString,11,12,1
-block_hint,ObjectToString,20,21,0
-block_hint,InstanceOf_WithFeedback,50,51,1
-block_hint,InstanceOf_WithFeedback,52,53,0
-block_hint,InstanceOf_WithFeedback,54,55,1
-block_hint,InstanceOf_WithFeedback,32,33,1
-block_hint,InstanceOf_WithFeedback,34,35,1
-block_hint,InstanceOf_WithFeedback,5,6,1
-block_hint,InstanceOf_WithFeedback,14,15,1
-block_hint,InstanceOf_Baseline,50,51,1
-block_hint,InstanceOf_Baseline,54,55,1
-block_hint,InstanceOf_Baseline,32,33,1
-block_hint,InstanceOf_Baseline,34,35,1
-block_hint,InstanceOf_Baseline,5,6,1
-block_hint,InstanceOf_Baseline,14,15,1
-block_hint,ForInEnumerate,34,35,1
-block_hint,ForInEnumerate,36,37,0
-block_hint,ForInEnumerate,30,31,0
-block_hint,ForInEnumerate,32,33,1
-block_hint,ForInEnumerate,5,6,1
-block_hint,ForInEnumerate,38,39,1
-block_hint,ForInEnumerate,9,10,1
-block_hint,ForInPrepare,7,8,1
-block_hint,ForInPrepare,12,13,1
-block_hint,ForInPrepare,5,6,1
-block_hint,ForInFilter,232,233,1
-block_hint,ForInFilter,234,235,1
-block_hint,ForInFilter,225,226,0
-block_hint,ForInFilter,117,118,1
-block_hint,ForInFilter,217,218,0
-block_hint,ForInFilter,62,63,0
-block_hint,ForInFilter,129,130,1
-block_hint,ForInFilter,219,220,1
-block_hint,ForInFilter,103,104,0
-block_hint,ForInFilter,105,106,0
-block_hint,ForInFilter,66,67,0
-block_hint,ForInFilter,64,65,0
-block_hint,ForInFilter,268,269,0
-block_hint,ForInFilter,223,224,1
-block_hint,ForInFilter,109,110,1
-block_hint,ForInFilter,71,72,0
-block_hint,ForInFilter,264,265,0
-block_hint,ForInFilter,262,263,0
-block_hint,ForInFilter,249,250,0
-block_hint,ForInFilter,107,108,1
-block_hint,ForInFilter,40,41,1
-block_hint,ForInFilter,201,202,0
-block_hint,ForInFilter,42,43,1
-block_hint,ForInFilter,144,145,1
-block_hint,ForInFilter,46,47,0
-block_hint,ForInFilter,113,114,0
-block_hint,ForInFilter,131,132,0
-block_hint,ForInFilter,36,37,0
-block_hint,ForInFilter,246,247,0
-block_hint,ForInFilter,253,254,1
-block_hint,ForInFilter,189,190,0
-block_hint,ForInFilter,33,34,1
-block_hint,RegExpConstructor,55,56,1
-block_hint,RegExpConstructor,7,8,1
-block_hint,RegExpConstructor,131,132,1
-block_hint,RegExpConstructor,133,134,1
-block_hint,RegExpConstructor,70,71,0
-block_hint,RegExpConstructor,106,107,1
-block_hint,RegExpConstructor,127,128,0
-block_hint,RegExpConstructor,108,109,0
-block_hint,RegExpConstructor,82,83,1
-block_hint,RegExpConstructor,67,68,1
-block_hint,RegExpConstructor,40,41,0
-block_hint,RegExpConstructor,76,77,0
-block_hint,RegExpConstructor,104,105,1
-block_hint,RegExpConstructor,86,87,1
-block_hint,RegExpConstructor,78,79,1
-block_hint,RegExpConstructor,63,64,1
-block_hint,RegExpExecInternal,20,21,0
-block_hint,RegExpExecInternal,22,23,0
-block_hint,RegExpExecInternal,36,37,0
-block_hint,RegExpExecInternal,12,13,0
-block_hint,RegExpExecInternal,49,50,0
-block_hint,RegExpExecInternal,52,53,1
-block_hint,RegExpExecInternal,40,41,1
-block_hint,RegExpExecInternal,54,55,1
-block_hint,RegExpExecInternal,44,45,0
-block_hint,RegExpExecInternal,24,25,0
-block_hint,FindOrderedHashSetEntry,26,27,1
-block_hint,FindOrderedHashSetEntry,34,35,0
-block_hint,FindOrderedHashSetEntry,24,25,0
-block_hint,FindOrderedHashSetEntry,22,23,0
-block_hint,FindOrderedHashSetEntry,42,43,1
-block_hint,FindOrderedHashSetEntry,68,69,0
-block_hint,FindOrderedHashSetEntry,58,59,1
-block_hint,FindOrderedHashSetEntry,60,61,1
-block_hint,SetConstructor,202,203,1
-block_hint,SetConstructor,74,75,0
-block_hint,SetConstructor,11,12,1
-block_hint,SetConstructor,172,173,1
-block_hint,SetConstructor,135,136,1
-block_hint,SetConstructor,56,57,0
-block_hint,SetConstructor,58,59,1
-block_hint,SetConstructor,218,219,1
-block_hint,SetConstructor,210,211,0
-block_hint,SetConstructor,79,80,1
-block_hint,SetConstructor,23,24,1
-block_hint,SetConstructor,222,223,1
-block_hint,SetConstructor,214,215,0
-block_hint,SetConstructor,150,151,1
-block_hint,SetConstructor,25,26,1
-block_hint,SetConstructor,178,179,1
-block_hint,SetConstructor,143,144,1
-block_hint,SetConstructor,83,84,1
-block_hint,SetConstructor,85,86,1
-block_hint,SetConstructor,87,88,1
-block_hint,SetConstructor,89,90,1
-block_hint,SetConstructor,91,92,1
-block_hint,SetConstructor,93,94,1
-block_hint,SetConstructor,34,35,1
-block_hint,SetConstructor,95,96,1
-block_hint,SetConstructor,146,147,1
-block_hint,SetConstructor,152,153,1
-block_hint,SetConstructor,190,191,0
-block_hint,SetConstructor,183,184,0
-block_hint,SetConstructor,154,155,0
-block_hint,SetConstructor,105,106,0
-block_hint,SetConstructor,137,138,1
-block_hint,SetConstructor,27,28,1
-block_hint,SetConstructor,62,63,1
-block_hint,SetConstructor,176,177,0
-block_hint,SetConstructor,66,67,1
-block_hint,SetPrototypeHas,10,11,1
-block_hint,SetPrototypeHas,5,6,1
-block_hint,SetPrototypeHas,7,8,1
-block_hint,SetPrototypeAdd,98,99,1
-block_hint,SetPrototypeAdd,62,63,1
-block_hint,SetPrototypeAdd,64,65,1
-block_hint,SetPrototypeAdd,88,89,1
-block_hint,SetPrototypeAdd,90,91,0
-block_hint,SetPrototypeAdd,27,28,1
-block_hint,SetPrototypeAdd,79,80,0
-block_hint,SetPrototypeAdd,25,26,0
-block_hint,SetPrototypeAdd,23,24,0
-block_hint,SetPrototypeAdd,35,36,1
-block_hint,SetPrototypeAdd,66,67,0
-block_hint,SetPrototypeAdd,51,52,1
-block_hint,SetPrototypeAdd,53,54,1
-block_hint,SetPrototypeAdd,33,34,1
-block_hint,SetPrototypeDelete,96,97,1
-block_hint,SetPrototypeDelete,75,76,1
-block_hint,SetPrototypeDelete,77,78,1
-block_hint,SetPrototypeDelete,15,16,0
-block_hint,SetPrototypeDelete,32,33,1
-block_hint,SetPrototypeDelete,87,88,0
-block_hint,SetPrototypeDelete,30,31,0
-block_hint,SetPrototypeDelete,28,29,0
-block_hint,SetPrototypeDelete,45,46,1
-block_hint,SetPrototypeDelete,83,84,0
-block_hint,SetPrototypeDelete,79,80,0
-block_hint,SetPrototypeDelete,19,20,1
-block_hint,SetPrototypeDelete,21,22,1
-block_hint,SetPrototypeGetSize,8,9,1
-block_hint,SetPrototypeGetSize,5,6,1
-block_hint,SetPrototypeGetSize,3,4,1
-block_hint,SetPrototypeValues,13,14,1
-block_hint,SetPrototypeValues,8,9,1
-block_hint,SetPrototypeValues,10,11,1
-block_hint,SetPrototypeValues,4,5,1
-block_hint,SetPrototypeValues,6,7,1
-block_hint,SetIteratorPrototypeNext,41,42,1
-block_hint,SetIteratorPrototypeNext,28,29,1
-block_hint,SetIteratorPrototypeNext,39,40,1
-block_hint,SetIteratorPrototypeNext,17,18,0
-block_hint,SetIteratorPrototypeNext,19,20,0
-block_hint,SetIteratorPrototypeNext,37,38,1
-block_hint,SetIteratorPrototypeNext,15,16,1
-block_hint,SetIteratorPrototypeNext,23,24,1
-block_hint,SetOrSetIteratorToList,33,34,1
-block_hint,SetOrSetIteratorToList,8,9,1
-block_hint,SetOrSetIteratorToList,43,44,1
-block_hint,SetOrSetIteratorToList,31,32,1
-block_hint,SetOrSetIteratorToList,47,48,1
-block_hint,SetOrSetIteratorToList,14,15,0
-block_hint,SetOrSetIteratorToList,19,20,0
-block_hint,SetOrSetIteratorToList,24,25,1
-block_hint,StringFromCharCode,87,88,1
-block_hint,StringFromCharCode,53,54,1
-block_hint,StringFromCharCode,11,12,0
-block_hint,StringFromCharCode,81,82,1
-block_hint,StringFromCharCode,77,78,1
-block_hint,StringFromCharCode,19,20,0
-block_hint,StringFromCharCode,23,24,0
-block_hint,StringFromCharCode,58,59,0
-block_hint,StringFromCharCode,21,22,0
-block_hint,StringFromCharCode,29,30,0
-block_hint,StringFromCharCode,35,36,0
-block_hint,StringFromCharCode,33,34,0
-block_hint,StringFromCharCode,75,76,0
-block_hint,StringFromCharCode,41,42,0
-block_hint,StringFromCharCode,17,18,1
-block_hint,StringFromCharCode,44,45,1
-block_hint,StringPrototypeReplace,36,37,1
-block_hint,StringPrototypeReplace,8,9,0
-block_hint,StringPrototypeReplace,55,56,1
-block_hint,StringPrototypeReplace,51,52,1
-block_hint,StringPrototypeReplace,38,39,1
-block_hint,StringPrototypeReplace,22,23,0
-block_hint,StringPrototypeReplace,3,4,1
-block_hint,StringPrototypeReplace,24,25,0
-block_hint,StringPrototypeReplace,5,6,1
-block_hint,StringPrototypeReplace,28,29,1
-block_hint,StringPrototypeReplace,10,11,1
-block_hint,StringPrototypeReplace,57,58,0
-block_hint,StringPrototypeReplace,30,31,1
-block_hint,StringPrototypeReplace,92,93,1
-block_hint,StringPrototypeReplace,87,88,1
-block_hint,StringPrototypeReplace,80,81,1
-block_hint,StringPrototypeReplace,73,74,1
-block_hint,StringPrototypeReplace,59,60,1
-block_hint,StringPrototypeReplace,61,62,0
-block_hint,StringPrototypeReplace,63,64,1
-block_hint,StringPrototypeReplace,53,54,1
-block_hint,StringPrototypeReplace,42,43,1
-block_hint,StringPrototypeReplace,14,15,1
-block_hint,StringPrototypeReplace,90,91,1
-block_hint,StringPrototypeReplace,82,83,1
-block_hint,StringPrototypeReplace,76,77,0
-block_hint,StringPrototypeReplace,78,79,1
-block_hint,StringPrototypeReplace,70,71,1
-block_hint,StringPrototypeReplace,49,50,1
-block_hint,StringPrototypeReplace,16,17,1
-block_hint,StringPrototypeReplace,18,19,0
-block_hint,StringPrototypeReplace,26,27,1
-block_hint,StringPrototypeSplit,125,126,1
-block_hint,StringPrototypeSplit,112,113,1
-block_hint,StringPrototypeSplit,92,93,1
-block_hint,StringPrototypeSplit,35,36,0
-block_hint,StringPrototypeSplit,114,115,1
-block_hint,StringPrototypeSplit,105,106,1
-block_hint,StringPrototypeSplit,94,95,1
-block_hint,StringPrototypeSplit,64,65,0
-block_hint,StringPrototypeSplit,8,9,1
-block_hint,StringPrototypeSplit,66,67,0
-block_hint,StringPrototypeSplit,10,11,1
-block_hint,StringPrototypeSplit,77,78,1
-block_hint,StringPrototypeSplit,37,38,1
-block_hint,StringPrototypeSplit,116,117,0
-block_hint,StringPrototypeSplit,79,80,1
-block_hint,StringPrototypeSplit,168,169,1
-block_hint,StringPrototypeSplit,152,153,1
-block_hint,StringPrototypeSplit,128,129,1
-block_hint,StringPrototypeSplit,122,123,1
-block_hint,StringPrototypeSplit,107,108,1
-block_hint,StringPrototypeSplit,83,84,0
-block_hint,StringPrototypeSplit,68,69,0
-block_hint,StringPrototypeSplit,85,86,1
-block_hint,StringPrototypeSplit,70,71,1
-block_hint,StringPrototypeSplit,88,89,1
-block_hint,StringPrototypeSplit,25,26,0
-block_hint,StringPrototypeSplit,72,73,1
-block_hint,StringPrototypeSplit,42,43,0
-block_hint,StringPrototypeSplit,110,111,1
-block_hint,StringPrototypeSplit,90,91,0
-block_hint,StringPrototypeSplit,27,28,1
-block_hint,StringPrototypeSplit,16,17,1
-block_hint,StringPrototypeSplit,18,19,1
-block_hint,StringPrototypeSplit,20,21,1
-block_hint,StringPrototypeSplit,50,51,1
-block_hint,TypedArrayConstructor,14,15,1
-block_hint,TypedArrayConstructor,11,12,1
-block_hint,TypedArrayConstructor,2,3,0
-block_hint,TypedArrayPrototypeByteLength,69,70,1
-block_hint,TypedArrayPrototypeByteLength,43,44,1
-block_hint,TypedArrayPrototypeByteLength,45,46,1
-block_hint,TypedArrayPrototypeByteLength,71,72,0
-block_hint,TypedArrayPrototypeByteLength,73,74,0
-block_hint,TypedArrayPrototypeByteLength,65,66,0
-block_hint,TypedArrayPrototypeByteLength,33,34,0
-block_hint,TypedArrayPrototypeLength,50,51,1
-block_hint,TypedArrayPrototypeLength,33,34,1
-block_hint,TypedArrayPrototypeLength,35,36,1
-block_hint,TypedArrayPrototypeLength,52,53,0
-block_hint,TypedArrayPrototypeLength,44,45,0
-block_hint,TypedArrayPrototypeLength,28,29,0
-block_hint,TypedArrayPrototypeLength,19,20,0
-block_hint,WeakMapConstructor,351,352,1
-block_hint,WeakMapConstructor,271,272,0
-block_hint,WeakMapConstructor,119,120,0
-block_hint,WeakMapConstructor,14,15,1
-block_hint,WeakMapConstructor,293,294,1
-block_hint,WeakMapConstructor,230,231,1
-block_hint,WeakMapConstructor,93,94,0
-block_hint,WeakMapConstructor,95,96,1
-block_hint,WeakMapConstructor,295,296,1
-block_hint,WeakMapConstructor,331,332,0
-block_hint,WeakMapConstructor,342,343,0
-block_hint,WeakMapConstructor,239,240,0
-block_hint,WeakMapConstructor,123,124,0
-block_hint,WeakMapConstructor,241,242,0
-block_hint,WeakMapConstructor,109,110,0
-block_hint,WeakMapConstructor,243,244,1
-block_hint,WeakMapConstructor,211,212,1
-block_hint,WeakMapConstructor,28,29,1
-block_hint,WeakMapConstructor,30,31,1
-block_hint,WeakMapConstructor,32,33,1
-block_hint,WeakMapConstructor,98,99,0
-block_hint,WeakMapConstructor,117,118,1
-block_hint,WeakMapLookupHashIndex,9,10,1
-block_hint,WeakMapLookupHashIndex,31,32,1
-block_hint,WeakMapLookupHashIndex,11,12,0
-block_hint,WeakMapLookupHashIndex,13,14,0
-block_hint,WeakMapLookupHashIndex,25,26,1
-block_hint,WeakMapLookupHashIndex,33,34,1
-block_hint,WeakMapLookupHashIndex,27,28,0
-block_hint,WeakMapLookupHashIndex,23,24,0
-block_hint,WeakMapGet,12,13,1
-block_hint,WeakMapGet,7,8,1
-block_hint,WeakMapGet,9,10,1
-block_hint,WeakMapGet,3,4,1
-block_hint,WeakMapPrototypeHas,10,11,1
-block_hint,WeakMapPrototypeHas,5,6,1
-block_hint,WeakMapPrototypeHas,7,8,1
-block_hint,WeakMapPrototypeSet,24,25,1
-block_hint,WeakMapPrototypeSet,5,6,1
-block_hint,WeakMapPrototypeSet,7,8,1
-block_hint,WeakMapPrototypeSet,13,14,1
-block_hint,WeakMapPrototypeSet,22,23,1
-block_hint,WeakMapPrototypeSet,15,16,0
-block_hint,WeakMapPrototypeSet,9,10,0
-block_hint,WeakCollectionSet,17,18,1
-block_hint,WeakCollectionSet,20,21,0
-block_hint,WeakCollectionSet,7,8,1
-block_hint,WeakCollectionSet,13,14,0
-block_hint,AsyncGeneratorResolve,9,10,1
-block_hint,AsyncGeneratorResolve,3,4,1
-block_hint,AsyncGeneratorResolve,11,12,0
-block_hint,AsyncGeneratorResolve,7,8,0
-block_hint,AsyncGeneratorYieldWithAwait,24,25,1
-block_hint,AsyncGeneratorYieldWithAwait,19,20,0
-block_hint,AsyncGeneratorYieldWithAwait,6,7,1
-block_hint,AsyncGeneratorYieldWithAwait,42,43,1
-block_hint,AsyncGeneratorYieldWithAwait,37,38,0
-block_hint,AsyncGeneratorYieldWithAwait,28,29,1
-block_hint,AsyncGeneratorYieldWithAwait,8,9,1
-block_hint,AsyncGeneratorYieldWithAwait,10,11,1
-block_hint,AsyncGeneratorYieldWithAwait,12,13,1
-block_hint,AsyncGeneratorYieldWithAwait,14,15,1
-block_hint,AsyncGeneratorYieldWithAwait,22,23,0
-block_hint,AsyncGeneratorResumeNext,18,19,0
-block_hint,AsyncGeneratorResumeNext,14,15,0
-block_hint,AsyncGeneratorPrototypeNext,27,28,1
-block_hint,AsyncGeneratorPrototypeNext,16,17,0
-block_hint,AsyncGeneratorPrototypeNext,4,5,1
-block_hint,AsyncGeneratorPrototypeNext,34,35,1
-block_hint,AsyncGeneratorPrototypeNext,29,30,0
-block_hint,AsyncGeneratorPrototypeNext,18,19,1
-block_hint,AsyncGeneratorPrototypeNext,20,21,1
-block_hint,AsyncGeneratorPrototypeNext,22,23,1
-block_hint,AsyncGeneratorPrototypeNext,6,7,1
-block_hint,AsyncGeneratorPrototypeNext,11,12,0
-block_hint,AsyncGeneratorAwaitUncaught,24,25,1
-block_hint,AsyncGeneratorAwaitUncaught,19,20,1
-block_hint,AsyncGeneratorAwaitUncaught,2,3,1
-block_hint,AsyncGeneratorAwaitUncaught,30,31,1
-block_hint,AsyncGeneratorAwaitUncaught,32,33,0
-block_hint,AsyncGeneratorAwaitUncaught,28,29,1
-block_hint,AsyncGeneratorAwaitUncaught,8,9,1
-block_hint,AsyncGeneratorAwaitUncaught,10,11,1
-block_hint,AsyncGeneratorAwaitUncaught,12,13,1
-block_hint,AsyncGeneratorAwaitUncaught,14,15,1
-block_hint,AsyncGeneratorAwaitUncaught,22,23,0
-block_hint,AsyncGeneratorAwaitResolveClosure,8,9,1
-block_hint,AsyncGeneratorAwaitResolveClosure,2,3,1
-block_hint,AsyncGeneratorAwaitResolveClosure,6,7,0
-block_hint,AsyncGeneratorYieldWithAwaitResolveClosure,5,6,1
-block_hint,AsyncGeneratorYieldWithAwaitResolveClosure,2,3,1
-block_hint,StringAdd_CheckNone,19,20,1
-block_hint,StringAdd_CheckNone,58,59,0
-block_hint,StringAdd_CheckNone,78,79,1
-block_hint,StringAdd_CheckNone,42,43,1
-block_hint,StringAdd_CheckNone,60,61,0
-block_hint,StringAdd_CheckNone,94,95,0
-block_hint,StringAdd_CheckNone,84,85,0
-block_hint,StringAdd_CheckNone,88,89,0
-block_hint,StringAdd_CheckNone,64,65,1
-block_hint,StringAdd_CheckNone,76,77,1
-block_hint,StringAdd_CheckNone,55,56,1
-block_hint,StringAdd_CheckNone,13,14,0
-block_hint,StringAdd_CheckNone,15,16,0
-block_hint,StringAdd_CheckNone,92,93,1
-block_hint,StringAdd_CheckNone,82,83,1
-block_hint,StringAdd_CheckNone,34,35,0
-block_hint,StringAdd_CheckNone,38,39,0
-block_hint,StringAdd_CheckNone,40,41,1
-block_hint,StringAdd_CheckNone,53,54,1
-block_hint,StringAdd_CheckNone,11,12,0
-block_hint,StringAdd_CheckNone,90,91,1
-block_hint,StringAdd_CheckNone,80,81,1
-block_hint,StringAdd_CheckNone,26,27,0
-block_hint,StringAdd_CheckNone,30,31,0
-block_hint,SubString,63,64,1
-block_hint,SubString,97,98,1
-block_hint,SubString,58,59,1
-block_hint,SubString,56,57,1
-block_hint,SubString,110,111,0
-block_hint,SubString,19,20,0
-block_hint,SubString,21,22,0
-block_hint,SubString,114,115,1
-block_hint,SubString,102,103,1
-block_hint,SubString,38,39,0
-block_hint,SubString,17,18,0
-block_hint,SubString,116,117,1
-block_hint,SubString,104,105,1
-block_hint,SubString,42,43,0
-block_hint,SubString,75,76,1
-block_hint,SubString,127,128,0
-block_hint,SubString,99,100,1
-block_hint,SubString,34,35,1
-block_hint,SubString,31,32,0
-block_hint,GetProperty,56,57,1
-block_hint,GetProperty,101,102,0
-block_hint,GetProperty,175,176,1
-block_hint,GetProperty,205,206,0
-block_hint,GetProperty,165,166,1
-block_hint,GetProperty,133,134,1
-block_hint,GetProperty,60,61,1
-block_hint,GetProperty,139,140,0
-block_hint,GetProperty,141,142,0
-block_hint,GetProperty,110,111,0
-block_hint,GetProperty,62,63,0
-block_hint,GetProperty,167,168,0
-block_hint,GetProperty,218,219,0
-block_hint,GetProperty,208,209,1
-block_hint,GetProperty,112,113,0
-block_hint,GetProperty,229,230,0
-block_hint,GetProperty,220,221,0
-block_hint,GetProperty,216,217,0
-block_hint,GetProperty,35,36,1
-block_hint,GetProperty,222,223,0
-block_hint,GetProperty,37,38,1
-block_hint,GetProperty,147,148,0
-block_hint,GetProperty,187,188,1
-block_hint,GetProperty,41,42,0
-block_hint,GetProperty,43,44,0
-block_hint,GetProperty,157,158,0
-block_hint,GetProperty,161,162,1
-block_hint,GetProperty,151,152,0
-block_hint,GetProperty,47,48,0
-block_hint,GetProperty,231,232,0
-block_hint,GetProperty,196,197,1
-block_hint,GetProperty,92,93,0
-block_hint,GetProperty,94,95,0
-block_hint,GetProperty,96,97,0
-block_hint,GetProperty,163,164,0
-block_hint,GetProperty,98,99,1
-block_hint,GetProperty,203,204,0
-block_hint,GetProperty,226,227,0
-block_hint,GetProperty,233,234,1
-block_hint,GetProperty,201,202,0
-block_hint,GetProperty,199,200,0
-block_hint,GetProperty,22,23,0
-block_hint,GetProperty,182,183,1
-block_hint,GetProperty,104,105,1
-block_hint,GetPropertyWithReceiver,58,59,1
-block_hint,GetPropertyWithReceiver,60,61,1
-block_hint,GetPropertyWithReceiver,203,204,0
-block_hint,GetPropertyWithReceiver,174,175,1
-block_hint,GetPropertyWithReceiver,211,212,0
-block_hint,GetPropertyWithReceiver,112,113,0
-block_hint,GetPropertyWithReceiver,162,163,1
-block_hint,GetPropertyWithReceiver,138,139,1
-block_hint,GetPropertyWithReceiver,62,63,1
-block_hint,GetPropertyWithReceiver,144,145,0
-block_hint,GetPropertyWithReceiver,146,147,0
-block_hint,GetPropertyWithReceiver,114,115,0
-block_hint,GetPropertyWithReceiver,64,65,0
-block_hint,GetPropertyWithReceiver,164,165,0
-block_hint,GetPropertyWithReceiver,215,216,1
-block_hint,GetPropertyWithReceiver,117,118,0
-block_hint,GetPropertyWithReceiver,236,237,0
-block_hint,GetPropertyWithReceiver,232,233,0
-block_hint,GetPropertyWithReceiver,223,224,0
-block_hint,GetPropertyWithReceiver,148,149,1
-block_hint,GetPropertyWithReceiver,38,39,1
-block_hint,GetPropertyWithReceiver,234,235,0
-block_hint,GetPropertyWithReceiver,40,41,1
-block_hint,GetPropertyWithReceiver,183,184,0
-block_hint,GetPropertyWithReceiver,34,35,0
-block_hint,GetPropertyWithReceiver,229,230,1
-block_hint,GetPropertyWithReceiver,205,206,0
-block_hint,SetProperty,379,380,1
-block_hint,SetProperty,381,382,0
-block_hint,SetProperty,1213,1214,0
-block_hint,SetProperty,928,929,1
-block_hint,SetProperty,1031,1032,1
-block_hint,SetProperty,1033,1034,0
-block_hint,SetProperty,733,734,0
-block_hint,SetProperty,922,923,1
-block_hint,SetProperty,413,414,0
-block_hint,SetProperty,415,416,0
-block_hint,SetProperty,256,257,1
-block_hint,SetProperty,417,418,0
-block_hint,SetProperty,630,631,1
-block_hint,SetProperty,92,93,1
-block_hint,SetProperty,94,95,1
-block_hint,SetProperty,1098,1099,0
-block_hint,SetProperty,811,812,0
-block_hint,SetProperty,813,814,1
-block_hint,SetProperty,815,816,0
-block_hint,SetProperty,104,105,1
-block_hint,SetProperty,108,109,1
-block_hint,SetProperty,429,430,1
-block_hint,SetProperty,110,111,1
-block_hint,SetProperty,106,107,1
-block_hint,CreateDataProperty,319,320,1
-block_hint,CreateDataProperty,321,322,0
-block_hint,CreateDataProperty,990,991,0
-block_hint,CreateDataProperty,782,783,1
-block_hint,CreateDataProperty,865,866,1
-block_hint,CreateDataProperty,539,540,1
-block_hint,CreateDataProperty,648,649,1
-block_hint,CreateDataProperty,650,651,0
-block_hint,CreateDataProperty,912,913,1
-block_hint,CreateDataProperty,333,334,0
-block_hint,CreateDataProperty,55,56,1
-block_hint,CreateDataProperty,543,544,1
-block_hint,CreateDataProperty,57,58,1
-block_hint,FindNonDefaultConstructorOrConstruct,12,13,0
-block_hint,FindNonDefaultConstructorOrConstruct,6,7,0
-block_hint,FindNonDefaultConstructorOrConstruct,14,15,1
-block_hint,FindNonDefaultConstructorOrConstruct,16,17,0
-block_hint,FindNonDefaultConstructorOrConstruct,4,5,1
-block_hint,FindNonDefaultConstructorOrConstruct,18,19,0
-block_hint,ArrayPrototypeConcat,79,80,1
-block_hint,ArrayPrototypeConcat,54,55,1
-block_hint,ArrayPrototypeConcat,63,64,1
-block_hint,ArrayPrototypeConcat,74,75,0
-block_hint,ArrayPrototypeConcat,81,82,0
-block_hint,ArrayPrototypeConcat,70,71,1
-block_hint,ArrayPrototypeConcat,37,38,1
-block_hint,ArrayPrototypeConcat,16,17,1
-block_hint,ArrayPrototypeConcat,3,4,1
-block_hint,ArrayPrototypeConcat,25,26,1
-block_hint,ArrayPrototypeConcat,9,10,0
-block_hint,ArrayPrototypeConcat,20,21,1
-block_hint,ArrayPrototypeConcat,30,31,0
-block_hint,ArrayPrototypeConcat,42,43,0
-block_hint,ArrayPrototypeConcat,72,73,1
-block_hint,ArrayPrototypeConcat,39,40,1
-block_hint,ArrayPrototypeConcat,18,19,1
-block_hint,ArrayPrototypeConcat,5,6,1
-block_hint,ArrayPrototypeConcat,57,58,1
-block_hint,ArrayPrototypeConcat,59,60,0
-block_hint,ArrayPrototypeConcat,66,67,0
-block_hint,ArrayPrototypeConcat,33,34,1
-block_hint,ArrayPrototypeConcat,68,69,1
-block_hint,ArrayPrototypeConcat,35,36,1
-block_hint,ArrayPrototypeConcat,27,28,1
-block_hint,ArrayPrototypeConcat,11,12,1
-block_hint,ArrayEvery,73,74,1
-block_hint,ArrayEvery,31,32,0
-block_hint,ArrayEvery,122,123,1
-block_hint,ArrayEvery,116,117,1
-block_hint,ArrayEvery,91,92,1
-block_hint,ArrayEvery,93,94,1
-block_hint,ArrayEvery,99,100,1
-block_hint,ArrayEvery,105,106,1
-block_hint,ArrayEvery,107,108,1
-block_hint,ArrayEvery,97,98,1
-block_hint,ArrayEvery,49,50,0
-block_hint,ArrayEvery,102,103,1
-block_hint,ArrayEvery,66,67,1
-block_hint,ArrayEvery,45,46,1
-block_hint,ArrayEvery,12,13,1
-block_hint,ArrayEvery,57,58,1
-block_hint,ArrayEvery,28,29,0
-block_hint,ArrayEvery,68,69,1
-block_hint,ArrayEvery,70,71,0
-block_hint,ArrayEvery,51,52,0
-block_hint,ArrayEvery,47,48,0
-block_hint,ArrayEvery,18,19,0
-block_hint,ArrayEvery,20,21,1
-block_hint,ArrayEvery,61,62,0
-block_hint,ArrayEvery,109,110,1
-block_hint,ArrayEvery,87,88,0
-block_hint,ArrayEvery,89,90,0
-block_hint,ArrayEvery,111,112,0
-block_hint,ArrayEvery,79,80,0
-block_hint,ArrayFilter,194,195,1
-block_hint,ArrayFilter,84,85,0
-block_hint,ArrayFilter,301,302,1
-block_hint,ArrayFilter,292,293,1
-block_hint,ArrayFilter,228,229,1
-block_hint,ArrayFilter,230,231,1
-block_hint,ArrayFilter,249,250,1
-block_hint,ArrayFilter,274,275,1
-block_hint,ArrayFilter,276,277,1
-block_hint,ArrayFilter,242,243,0
-block_hint,ArrayFilter,280,281,1
-block_hint,ArrayFilter,196,197,1
-block_hint,ArrayFilter,123,124,1
-block_hint,ArrayFilter,22,23,1
-block_hint,ArrayFilter,198,199,1
-block_hint,ArrayFilter,125,126,0
-block_hint,ArrayFilter,24,25,1
-block_hint,ArrayFilter,271,272,1
-block_hint,ArrayFilter,167,168,0
-block_hint,ArrayFilter,282,283,1
-block_hint,ArrayFilter,200,201,1
-block_hint,ArrayFilter,127,128,1
-block_hint,ArrayFilter,26,27,1
-block_hint,ArrayFilter,210,211,1
-block_hint,ArrayFilter,212,213,0
-block_hint,ArrayFilter,287,288,1
-block_hint,ArrayFilter,214,215,1
-block_hint,ArrayFilter,216,217,1
-block_hint,ArrayFilter,218,219,1
-block_hint,ArrayFilter,202,203,1
-block_hint,ArrayFilter,129,130,0
-block_hint,ArrayFilter,28,29,1
-block_hint,ArrayFilter,172,173,0
-block_hint,ArrayFilter,103,104,0
-block_hint,ArrayFilter,245,246,1
-block_hint,ArrayFilter,247,248,0
-block_hint,ArrayFilter,204,205,0
-block_hint,ArrayFilter,131,132,0
-block_hint,ArrayFilter,42,43,0
-block_hint,ArrayFilter,44,45,1
-block_hint,ArrayFilter,149,150,0
-block_hint,ArrayFilter,252,253,1
-block_hint,ArrayFilter,178,179,0
-block_hint,ArrayFilter,180,181,0
-block_hint,ArrayFilter,254,255,0
-block_hint,ArrayFilter,256,257,0
-block_hint,ArrayFilter,258,259,1
-block_hint,ArrayFilter,260,261,0
-block_hint,ArrayFilter,262,263,1
-block_hint,ArrayFilter,284,285,0
-block_hint,ArrayFilter,240,241,0
-block_hint,ArrayFilter,162,163,0
-block_hint,ArrayFilter,95,96,0
-block_hint,ArrayFilter,188,189,1
-block_hint,ArrayFilter,60,61,0
-block_hint,ArrayFilter,64,65,1
-block_hint,ArrayFilter,50,51,1
-block_hint,ArrayForEach,70,71,1
-block_hint,ArrayForEach,29,30,0
-block_hint,ArrayForEach,99,100,1
-block_hint,ArrayForEach,95,96,1
-block_hint,ArrayForEach,76,77,1
-block_hint,ArrayForEach,78,79,1
-block_hint,ArrayForEach,84,85,1
-block_hint,ArrayForEach,90,91,1
-block_hint,ArrayForEach,92,93,1
-block_hint,ArrayForEach,47,48,0
-block_hint,ArrayForEach,87,88,1
-block_hint,ArrayForEach,63,64,1
-block_hint,ArrayForEach,43,44,1
-block_hint,ArrayForEach,12,13,1
-block_hint,ArrayForEach,53,54,1
-block_hint,ArrayForEach,26,27,0
-block_hint,ArrayForEach,65,66,1
-block_hint,ArrayForEach,67,68,0
-block_hint,ArrayForEach,49,50,0
-block_hint,ArrayForEach,45,46,0
-block_hint,ArrayForEach,18,19,0
-block_hint,ArrayForEach,20,21,1
-block_hint,ArrayForEach,58,59,0
-block_hint,ArrayFrom,225,226,1
-block_hint,ArrayFrom,76,77,1
-block_hint,ArrayFrom,78,79,1
-block_hint,ArrayFrom,8,9,1
-block_hint,ArrayFrom,342,343,1
-block_hint,ArrayFrom,338,339,1
-block_hint,ArrayFrom,327,328,0
-block_hint,ArrayFrom,311,312,1
-block_hint,ArrayFrom,309,310,0
-block_hint,ArrayFrom,80,81,1
-block_hint,ArrayFrom,10,11,1
-block_hint,ArrayFrom,322,323,1
-block_hint,ArrayFrom,305,306,0
-block_hint,ArrayFrom,245,246,1
-block_hint,ArrayFrom,266,267,0
-block_hint,ArrayFrom,82,83,1
-block_hint,ArrayFrom,12,13,1
-block_hint,ArrayFrom,268,269,1
-block_hint,ArrayFrom,213,214,0
-block_hint,ArrayFrom,290,291,1
-block_hint,ArrayFrom,248,249,0
-block_hint,ArrayFrom,285,286,1
-block_hint,ArrayFrom,281,282,0
-block_hint,ArrayFrom,188,189,1
-block_hint,ArrayFrom,88,89,1
-block_hint,ArrayFrom,18,19,1
-block_hint,ArrayFrom,215,216,1
-block_hint,ArrayFrom,72,73,1
-block_hint,ArrayIsArray,13,14,1
-block_hint,ArrayIsArray,9,10,1
-block_hint,ArrayIsArray,7,8,0
-block_hint,LoadJoinElement_FastSmiOrObjectElements_0,2,3,1
-block_hint,LoadJoinElement_FastSmiOrObjectElements_0,4,5,0
-block_hint,LoadJoinElement_FastDoubleElements_0,3,4,1
-block_hint,LoadJoinElement_FastDoubleElements_0,5,6,0
-block_hint,LoadJoinElement_FastDoubleElements_0,7,8,1
-block_hint,JoinStackPush,28,29,1
-block_hint,JoinStackPush,6,7,1
-block_hint,JoinStackPush,10,11,0
-block_hint,JoinStackPop,9,10,1
-block_hint,JoinStackPop,4,5,1
-block_hint,ArrayPrototypeJoin,512,513,1
-block_hint,ArrayPrototypeJoin,450,451,1
-block_hint,ArrayPrototypeJoin,417,418,1
-block_hint,ArrayPrototypeJoin,330,331,1
-block_hint,ArrayPrototypeJoin,332,333,1
-block_hint,ArrayPrototypeJoin,363,364,1
-block_hint,ArrayPrototypeJoin,336,337,0
-block_hint,ArrayPrototypeJoin,179,180,0
-block_hint,ArrayPrototypeJoin,468,469,1
-block_hint,ArrayPrototypeJoin,434,435,1
-block_hint,ArrayPrototypeJoin,324,325,0
-block_hint,ArrayPrototypeJoin,226,227,1
-block_hint,ArrayPrototypeJoin,30,31,1
-block_hint,ArrayPrototypeJoin,181,182,0
-block_hint,ArrayPrototypeJoin,32,33,1
-block_hint,ArrayPrototypeJoin,385,386,1
-block_hint,ArrayPrototypeJoin,321,322,0
-block_hint,ArrayPrototypeJoin,143,144,1
-block_hint,ArrayPrototypeJoin,487,488,1
-block_hint,ArrayPrototypeJoin,452,453,0
-block_hint,ArrayPrototypeJoin,421,422,0
-block_hint,ArrayPrototypeJoin,365,366,1
-block_hint,ArrayPrototypeJoin,183,184,1
-block_hint,ArrayPrototypeJoin,38,39,1
-block_hint,ArrayPrototypeJoin,454,455,1
-block_hint,ArrayPrototypeJoin,423,424,0
-block_hint,ArrayPrototypeJoin,293,294,1
-block_hint,ArrayPrototypeJoin,426,427,0
-block_hint,ArrayPrototypeJoin,341,342,0
-block_hint,ArrayPrototypeJoin,189,190,0
-block_hint,ArrayPrototypeJoin,230,231,1
-block_hint,ArrayPrototypeJoin,145,146,1
-block_hint,ArrayPrototypeJoin,477,478,0
-block_hint,ArrayPrototypeJoin,481,482,1
-block_hint,ArrayPrototypeJoin,519,520,0
-block_hint,ArrayPrototypeJoin,515,516,0
-block_hint,ArrayPrototypeJoin,508,509,1
-block_hint,ArrayPrototypeJoin,483,484,1
-block_hint,ArrayPrototypeJoin,479,480,0
-block_hint,ArrayPrototypeJoin,147,148,0
-block_hint,ArrayPrototypeJoin,149,150,0
-block_hint,ArrayPrototypeJoin,463,464,0
-block_hint,ArrayPrototypeJoin,465,466,0
-block_hint,ArrayPrototypeJoin,448,449,1
-block_hint,ArrayPrototypeJoin,405,406,1
-block_hint,ArrayPrototypeJoin,407,408,1
-block_hint,ArrayPrototypeJoin,409,410,1
-block_hint,ArrayPrototypeJoin,411,412,1
-block_hint,ArrayPrototypeJoin,197,198,1
-block_hint,ArrayPrototypeJoin,252,253,0
-block_hint,ArrayPrototypeJoin,254,255,0
-block_hint,ArrayPrototypeJoin,300,301,0
-block_hint,ArrayPrototypeJoin,260,261,0
-block_hint,ArrayPrototypeJoin,262,263,0
-block_hint,ArrayPrototypeJoin,203,204,1
-block_hint,ArrayPrototypeJoin,72,73,1
-block_hint,ArrayPrototypeJoin,377,378,0
-block_hint,ArrayPrototypeJoin,303,304,1
-block_hint,ArrayPrototypeJoin,207,208,1
-block_hint,ArrayPrototypeJoin,268,269,0
-block_hint,ArrayPrototypeJoin,270,271,0
-block_hint,ArrayPrototypeJoin,209,210,1
-block_hint,ArrayPrototypeJoin,86,87,1
-block_hint,ArrayPrototypeJoin,305,306,1
-block_hint,ArrayPrototypeJoin,102,103,0
-block_hint,ArrayPrototypeJoin,104,105,0
-block_hint,ArrayPrototypeJoin,401,402,1
-block_hint,ArrayPrototypeJoin,383,384,1
-block_hint,ArrayPrototypeJoin,100,101,0
-block_hint,ArrayPrototypeJoin,399,400,1
-block_hint,ArrayPrototypeJoin,381,382,1
-block_hint,ArrayPrototypeJoin,96,97,1
-block_hint,ArrayPrototypeJoin,348,349,1
-block_hint,ArrayPrototypeJoin,307,308,0
-block_hint,ArrayPrototypeJoin,215,216,0
-block_hint,ArrayPrototypeJoin,106,107,1
-block_hint,ArrayPrototypeJoin,108,109,0
-block_hint,ArrayPrototypeJoin,110,111,1
-block_hint,ArrayPrototypeJoin,282,283,1
-block_hint,ArrayPrototypeJoin,139,140,1
-block_hint,ArrayPrototypeToString,14,15,1
-block_hint,ArrayPrototypeToString,11,12,1
-block_hint,ArrayPrototypeToString,8,9,1
-block_hint,ArrayPrototypeToString,5,6,1
-block_hint,ArrayPrototypeToString,3,4,1
-block_hint,ArrayPrototypeLastIndexOf,279,280,1
-block_hint,ArrayPrototypeLastIndexOf,261,262,1
-block_hint,ArrayPrototypeLastIndexOf,245,246,1
-block_hint,ArrayPrototypeLastIndexOf,175,176,1
-block_hint,ArrayPrototypeLastIndexOf,177,178,1
-block_hint,ArrayPrototypeLastIndexOf,91,92,0
-block_hint,ArrayPrototypeLastIndexOf,41,42,1
-block_hint,ArrayPrototypeLastIndexOf,375,376,0
-block_hint,ArrayPrototypeLastIndexOf,361,362,0
-block_hint,ArrayPrototypeLastIndexOf,367,368,0
-block_hint,ArrayPrototypeLastIndexOf,358,359,0
-block_hint,ArrayPrototypeLastIndexOf,335,336,0
-block_hint,ArrayPrototypeLastIndexOf,324,325,1
-block_hint,ArrayPrototypeLastIndexOf,338,339,0
-block_hint,ArrayPrototypeLastIndexOf,328,329,0
-block_hint,ArrayPrototypeLastIndexOf,315,316,0
-block_hint,ArrayPrototypeLastIndexOf,300,301,0
-block_hint,ArrayPrototypeLastIndexOf,313,314,0
-block_hint,ArrayPrototypeLastIndexOf,298,299,0
-block_hint,ArrayPrototypeLastIndexOf,281,282,1
-block_hint,ArrayPrototypeLastIndexOf,252,253,1
-block_hint,ArrayPrototypeLastIndexOf,194,195,1
-block_hint,ArrayPrototypeLastIndexOf,83,84,1
-block_hint,ArrayPrototypeLastIndexOf,73,74,1
-block_hint,ArrayPrototypeLastIndexOf,21,22,1
-block_hint,ArrayPrototypeLastIndexOf,85,86,1
-block_hint,ArrayPrototypeLastIndexOf,77,78,0
-block_hint,ArrayPrototypeLastIndexOf,29,30,1
-block_hint,ArrayPrototypeLastIndexOf,60,61,0
-block_hint,ArrayPrototypeLastIndexOf,98,99,1
-block_hint,ArrayPrototypeLastIndexOf,56,57,0
-block_hint,ArrayPrototypeLastIndexOf,23,24,1
-block_hint,ArrayPrototypeLastIndexOf,58,59,0
-block_hint,ArrayPrototypeLastIndexOf,214,215,0
-block_hint,ArrayPrototypeLastIndexOf,220,221,1
-block_hint,ArrayPrototypeLastIndexOf,239,240,0
-block_hint,ArrayPrototypeLastIndexOf,212,213,0
-block_hint,ArrayPrototypeLastIndexOf,145,146,0
-block_hint,ArrayPrototypeLastIndexOf,129,130,1
-block_hint,ArrayPrototypeLastIndexOf,31,32,0
-block_hint,ArrayMap,163,164,1
-block_hint,ArrayMap,72,73,0
-block_hint,ArrayMap,267,268,1
-block_hint,ArrayMap,248,249,1
-block_hint,ArrayMap,194,195,1
-block_hint,ArrayMap,196,197,1
-block_hint,ArrayMap,215,216,1
-block_hint,ArrayMap,229,230,1
-block_hint,ArrayMap,231,232,1
-block_hint,ArrayMap,257,258,1
-block_hint,ArrayMap,212,213,0
-block_hint,ArrayMap,226,227,1
-block_hint,ArrayMap,233,234,1
-block_hint,ArrayMap,165,166,1
-block_hint,ArrayMap,114,115,1
-block_hint,ArrayMap,23,24,1
-block_hint,ArrayMap,241,242,1
-block_hint,ArrayMap,217,218,0
-block_hint,ArrayMap,161,162,0
-block_hint,ArrayMap,74,75,0
-block_hint,ArrayMap,180,181,1
-block_hint,ArrayMap,159,160,1
-block_hint,ArrayMap,55,56,0
-block_hint,ArrayMap,280,281,1
-block_hint,ArrayMap,277,278,0
-block_hint,ArrayMap,260,261,0
-block_hint,ArrayMap,235,236,0
-block_hint,ArrayMap,201,202,0
-block_hint,ArrayMap,116,117,0
-block_hint,ArrayMap,29,30,0
-block_hint,ArrayMap,31,32,1
-block_hint,ArrayMap,132,133,0
-block_hint,ArrayMap,33,34,1
-block_hint,ArrayMap,120,121,0
-block_hint,ArrayMap,37,38,1
-block_hint,ArrayMap,35,36,1
-block_hint,ArrayMap,265,266,0
-block_hint,ArrayMap,209,210,0
-block_hint,ArrayMap,151,152,0
-block_hint,ArrayMap,45,46,1
-block_hint,ArrayMap,153,154,0
-block_hint,ArrayMap,89,90,1
-block_hint,ArrayMap,83,84,0
-block_hint,ArrayMap,85,86,1
-block_hint,ArrayMap,182,183,1
-block_hint,ArrayMap,184,185,0
-block_hint,ArrayMap,62,63,0
-block_hint,ArrayMap,64,65,1
-block_hint,ArrayMap,96,97,1
-block_hint,ArrayMap,47,48,1
-block_hint,ArrayMap,155,156,1
-block_hint,ArrayMap,98,99,1
-block_hint,ArrayMap,49,50,1
-block_hint,ArrayMap,136,137,1
-block_hint,ArrayReduce,81,82,1
-block_hint,ArrayReduce,30,31,0
-block_hint,ArrayReduce,124,125,1
-block_hint,ArrayReduce,120,121,1
-block_hint,ArrayReduce,89,90,1
-block_hint,ArrayReduce,91,92,1
-block_hint,ArrayReduce,101,102,1
-block_hint,ArrayReduce,111,112,1
-block_hint,ArrayReduce,113,114,1
-block_hint,ArrayReduce,95,96,1
-block_hint,ArrayReduce,104,105,0
-block_hint,ArrayReduce,49,50,0
-block_hint,ArrayReduce,106,107,1
-block_hint,ArrayReduce,65,66,1
-block_hint,ArrayReduce,45,46,1
-block_hint,ArrayReduce,12,13,1
-block_hint,ArrayReduce,53,54,1
-block_hint,ArrayReduce,26,27,0
-block_hint,ArrayReduce,99,100,0
-block_hint,ArrayReduce,67,68,1
-block_hint,ArrayReduce,69,70,0
-block_hint,ArrayReduce,117,118,0
-block_hint,ArrayReduce,97,98,0
-block_hint,ArrayReduce,71,72,0
-block_hint,ArrayReduce,47,48,0
-block_hint,ArrayReduce,18,19,0
-block_hint,ArrayReduce,20,21,1
-block_hint,ArrayReduce,57,58,0
-block_hint,ArrayReduce,59,60,0
-block_hint,ArrayReduce,23,24,0
-block_hint,ArrayPrototypeReverse,236,237,1
-block_hint,ArrayPrototypeReverse,210,211,1
-block_hint,ArrayPrototypeReverse,190,191,1
-block_hint,ArrayPrototypeReverse,152,153,1
-block_hint,ArrayPrototypeReverse,103,104,1
-block_hint,ArrayPrototypeReverse,18,19,1
-block_hint,ArrayPrototypeReverse,192,193,1
-block_hint,ArrayPrototypeReverse,169,170,0
-block_hint,ArrayPrototypeReverse,140,141,1
-block_hint,ArrayPrototypeReverse,118,119,1
-block_hint,ArrayPrototypeReverse,89,90,0
-block_hint,ArrayPrototypeShift,237,238,1
-block_hint,ArrayPrototypeShift,205,206,1
-block_hint,ArrayPrototypeShift,185,186,1
-block_hint,ArrayPrototypeShift,132,133,1
-block_hint,ArrayPrototypeShift,81,82,1
-block_hint,ArrayPrototypeShift,11,12,1
-block_hint,ArrayPrototypeShift,196,197,1
-block_hint,ArrayPrototypeShift,168,169,0
-block_hint,ArrayPrototypeShift,134,135,1
-block_hint,ArrayPrototypeShift,83,84,0
-block_hint,ArrayPrototypeShift,13,14,1
-block_hint,ArrayPrototypeShift,136,137,0
-block_hint,ArrayPrototypeShift,85,86,0
-block_hint,ArrayPrototypeShift,68,69,0
-block_hint,ArrayPrototypeShift,87,88,0
-block_hint,ArrayPrototypeShift,27,28,0
-block_hint,ArrayPrototypeShift,29,30,1
-block_hint,ArrayPrototypeShift,170,171,0
-block_hint,ArrayPrototypeShift,89,90,0
-block_hint,ArrayPrototypeShift,33,34,0
-block_hint,ArrayPrototypeShift,148,149,0
-block_hint,ArrayPrototypeShift,111,112,0
-block_hint,ArrayPrototypeShift,91,92,0
-block_hint,ArrayPrototypeShift,39,40,0
-block_hint,ArrayPrototypeShift,41,42,1
-block_hint,ArrayPrototypeSlice,288,289,1
-block_hint,ArrayPrototypeSlice,267,268,1
-block_hint,ArrayPrototypeSlice,245,246,1
-block_hint,ArrayPrototypeSlice,182,183,1
-block_hint,ArrayPrototypeSlice,81,82,1
-block_hint,ArrayPrototypeSlice,12,13,1
-block_hint,ArrayPrototypeSlice,83,84,1
-block_hint,ArrayPrototypeSlice,14,15,1
-block_hint,ArrayPrototypeSlice,16,17,1
-block_hint,ArrayPrototypeSlice,87,88,1
-block_hint,ArrayPrototypeSlice,511,512,0
-block_hint,ArrayPrototypeSlice,509,510,0
-block_hint,ArrayPrototypeSlice,485,486,0
-block_hint,ArrayPrototypeSlice,448,449,0
-block_hint,ArrayPrototypeSlice,428,429,0
-block_hint,ArrayPrototypeSlice,405,406,0
-block_hint,ArrayPrototypeSlice,446,447,0
-block_hint,ArrayPrototypeSlice,426,427,0
-block_hint,ArrayPrototypeSlice,401,402,1
-block_hint,ArrayPrototypeSlice,479,480,0
-block_hint,ArrayPrototypeSlice,465,466,0
-block_hint,ArrayPrototypeSlice,454,455,0
-block_hint,ArrayPrototypeSlice,424,425,0
-block_hint,ArrayPrototypeSlice,422,423,0
-block_hint,ArrayPrototypeSlice,393,394,1
-block_hint,ArrayPrototypeSlice,332,333,0
-block_hint,ArrayPrototypeSlice,277,278,1
-block_hint,ArrayPrototypeSlice,257,258,0
-block_hint,ArrayPrototypeSlice,89,90,1
-block_hint,ArrayPrototypeSlice,20,21,1
-block_hint,ArrayPrototypeSlice,128,129,1
-block_hint,ArrayPrototypeSlice,66,67,0
-block_hint,ArrayPrototypeSlice,443,444,0
-block_hint,ArrayPrototypeSlice,386,387,0
-block_hint,ArrayPrototypeSlice,364,365,0
-block_hint,ArrayPrototypeSlice,384,385,0
-block_hint,ArrayPrototypeSlice,362,363,0
-block_hint,ArrayPrototypeSlice,344,345,1
-block_hint,ArrayPrototypeSlice,437,438,0
-block_hint,ArrayPrototypeSlice,413,414,0
-block_hint,ArrayPrototypeSlice,388,389,0
-block_hint,ArrayPrototypeSlice,360,361,0
-block_hint,ArrayPrototypeSlice,340,341,1
-block_hint,ArrayPrototypeSlice,309,310,0
-block_hint,ArrayPrototypeSlice,296,297,0
-block_hint,ArrayPrototypeSlice,284,285,0
-block_hint,ArrayPrototypeSlice,261,262,0
-block_hint,ArrayPrototypeSlice,238,239,1
-block_hint,ArrayPrototypeSlice,141,142,0
-block_hint,ArrayPrototypeSlice,143,144,0
-block_hint,ArrayPrototypeSlice,190,191,1
-block_hint,ArrayPrototypeSlice,211,212,0
-block_hint,ArrayPrototypeSlice,91,92,1
-block_hint,ArrayPrototypeSlice,22,23,1
-block_hint,ArrayPrototypeSlice,197,198,1
-block_hint,ArrayPrototypeSlice,134,135,0
-block_hint,ArrayPrototypeSlice,69,70,0
-block_hint,ArrayPrototypeSlice,93,94,1
-block_hint,ArrayPrototypeSlice,24,25,1
-block_hint,ArrayPrototypeSlice,95,96,1
-block_hint,ArrayPrototypeSlice,26,27,1
-block_hint,ArrayPrototypeSlice,28,29,1
-block_hint,ArrayPrototypeSlice,99,100,1
-block_hint,ArrayPrototypeSlice,42,43,1
-block_hint,ArrayPrototypeSlice,145,146,1
-block_hint,ArrayPrototypeSlice,174,175,0
-block_hint,ArrayPrototypeSlice,176,177,1
-block_hint,ArrayPrototypeSlice,157,158,0
-block_hint,ArrayPrototypeSlice,101,102,0
-block_hint,ArrayPrototypeSlice,32,33,1
-block_hint,ArrayPrototypeSlice,250,251,1
-block_hint,ArrayPrototypeSlice,221,222,1
-block_hint,ArrayPrototypeSlice,118,119,1
-block_hint,ArrayPrototypeSlice,57,58,0
-block_hint,ArrayPrototypeSlice,59,60,1
-block_hint,ArrayPrototypeSlice,75,76,1
-block_hint,ArrayPrototypeSlice,103,104,0
-block_hint,ArrayPrototypeSlice,232,233,0
-block_hint,ArrayPrototypeSlice,164,165,1
-block_hint,ArrayPrototypeSlice,71,72,0
-block_hint,ArrayPrototypeSlice,178,179,0
-block_hint,ArrayPrototypeSlice,160,161,0
-block_hint,ArrayPrototypeSlice,109,110,0
-block_hint,ArrayPrototypeSlice,44,45,1
-block_hint,ArrayPrototypeSlice,248,249,1
-block_hint,ArrayPrototypeSlice,217,218,1
-block_hint,ArrayPrototypeSlice,116,117,1
-block_hint,ArrayPrototypeSlice,49,50,0
-block_hint,ArrayPrototypeSlice,136,137,0
-block_hint,ArrayPrototypeSlice,73,74,0
-block_hint,ArraySome,88,89,1
-block_hint,ArraySome,31,32,0
-block_hint,ArraySome,119,120,1
-block_hint,ArraySome,115,116,1
-block_hint,ArraySome,93,94,1
-block_hint,ArraySome,95,96,1
-block_hint,ArraySome,101,102,1
-block_hint,ArraySome,108,109,1
-block_hint,ArraySome,110,111,1
-block_hint,ArraySome,99,100,1
-block_hint,ArraySome,56,57,0
-block_hint,ArraySome,105,106,1
-block_hint,ArraySome,77,78,1
-block_hint,ArraySome,52,53,1
-block_hint,ArraySome,13,14,1
-block_hint,ArraySome,62,63,1
-block_hint,ArraySome,27,28,0
-block_hint,ArraySome,79,80,1
-block_hint,ArraySome,81,82,0
-block_hint,ArraySome,58,59,0
-block_hint,ArraySome,54,55,0
-block_hint,ArraySome,19,20,0
-block_hint,ArraySome,21,22,1
-block_hint,ArraySome,66,67,0
-block_hint,ArrayPrototypeSplice,605,606,1
-block_hint,ArrayPrototypeSplice,450,451,1
-block_hint,ArrayPrototypeSplice,452,453,1
-block_hint,ArrayPrototypeSplice,1201,1202,1
-block_hint,ArrayPrototypeSplice,1183,1184,0
-block_hint,ArrayPrototypeSplice,1159,1160,0
-block_hint,ArrayPrototypeSplice,1138,1139,0
-block_hint,ArrayPrototypeSplice,1104,1105,0
-block_hint,ArrayPrototypeSplice,1072,1073,0
-block_hint,ArrayPrototypeSplice,1031,1032,0
-block_hint,ArrayPrototypeSplice,1102,1103,0
-block_hint,ArrayPrototypeSplice,1070,1071,0
-block_hint,ArrayPrototypeSplice,1027,1028,1
-block_hint,ArrayPrototypeSplice,940,941,1
-block_hint,ArrayPrototypeSplice,1093,1094,0
-block_hint,ArrayPrototypeSplice,1226,1227,0
-block_hint,ArrayPrototypeSplice,1218,1219,0
-block_hint,ArrayPrototypeSplice,1205,1206,0
-block_hint,ArrayPrototypeSplice,1193,1194,1
-block_hint,ArrayPrototypeSplice,1162,1163,0
-block_hint,ArrayPrototypeSplice,1142,1143,0
-block_hint,ArrayPrototypeSplice,1121,1122,0
-block_hint,ArrayPrototypeSplice,1078,1079,0
-block_hint,ArrayPrototypeSplice,1038,1039,0
-block_hint,ArrayPrototypeSplice,1076,1077,0
-block_hint,ArrayPrototypeSplice,1036,1037,0
-block_hint,ArrayPrototypeSplice,987,988,1
-block_hint,ArrayPrototypeSplice,877,878,0
-block_hint,ArrayPrototypeSplice,842,843,0
-block_hint,ArrayPrototypeSplice,810,811,0
-block_hint,ArrayPrototypeSplice,731,732,0
-block_hint,ArrayPrototypeSplice,677,678,0
-block_hint,ArrayPrototypeSplice,607,608,0
-block_hint,ArrayPrototypeSplice,511,512,1
-block_hint,ArrayPrototypeSplice,456,457,0
-block_hint,ArrayPrototypeSplice,223,224,0
-block_hint,ArrayPrototypeSplice,332,333,0
-block_hint,ArrayPrototypeSplice,334,335,0
-block_hint,ArrayPrototypeSplice,336,337,0
-block_hint,ArrayPrototypeSplice,225,226,1
-block_hint,ArrayPrototypeSplice,51,52,1
-block_hint,ArrayPrototypeSplice,338,339,1
-block_hint,ArrayPrototypeSplice,340,341,0
-block_hint,ArrayPrototypeSplice,342,343,0
-block_hint,ArrayPrototypeSplice,387,388,1
-block_hint,ArrayPrototypeSplice,227,228,0
-block_hint,ArrayPrototypeSplice,53,54,1
-block_hint,ArrayPrototypeSplice,244,245,0
-block_hint,ArrayPrototypeSplice,93,94,1
-block_hint,ArrayPrototypeSplice,556,557,0
-block_hint,ArrayPrototypeSplice,400,401,0
-block_hint,ArrayPrototypeSplice,590,591,0
-block_hint,ArrayPrototypeSplice,528,529,1
-block_hint,ArrayPrototypeSplice,345,346,0
-block_hint,ArrayPrototypeSplice,347,348,1
-block_hint,ArrayPrototypeSplice,235,236,0
-block_hint,ArrayPrototypeSplice,258,259,1
-block_hint,ArrayPrototypeSplice,105,106,0
-block_hint,ArrayPrototypeSplice,229,230,0
-block_hint,ArrayPrototypeSplice,329,330,0
-block_hint,ArrayPrototypeSplice,327,328,0
-block_hint,ArrayPrototypeSplice,391,392,1
-block_hint,ArrayPrototypeSplice,65,66,1
-block_hint,ArrayPrototypeSplice,294,295,1
-block_hint,ArrayPrototypeSplice,143,144,0
-block_hint,ArrayPrototypeSplice,67,68,0
-block_hint,ArrayPrototypeSplice,69,70,0
-block_hint,ArrayPrototypeSplice,263,264,1
-block_hint,ArrayPrototypeSplice,178,179,1
-block_hint,ArrayPrototypeSplice,325,326,0
-block_hint,ArrayPrototypeSplice,425,426,1
-block_hint,ArrayPrototypeSplice,265,266,0
-block_hint,ArrayPrototypeSplice,111,112,0
-block_hint,ArrayPrototypeSplice,427,428,0
-block_hint,ArrayPrototypeSplice,267,268,0
-block_hint,ArrayPrototypeSplice,113,114,0
-block_hint,ArrayPrototypeSplice,115,116,0
-block_hint,ArrayPrototypeSplice,182,183,0
-block_hint,ArrayPrototypeSplice,63,64,1
-block_hint,ArrayPrototypeSplice,131,132,1
-block_hint,ArrayPrototypeSplice,296,297,0
-block_hint,ArrayPrototypeSplice,71,72,1
-block_hint,ArrayPrototypeUnshift,186,187,1
-block_hint,ArrayPrototypeUnshift,157,158,1
-block_hint,ArrayPrototypeUnshift,141,142,1
-block_hint,ArrayPrototypeUnshift,96,97,1
-block_hint,ArrayPrototypeUnshift,55,56,1
-block_hint,ArrayPrototypeUnshift,10,11,1
-block_hint,ArrayPrototypeUnshift,128,129,1
-block_hint,ArrayPrototypeUnshift,98,99,0
-block_hint,ArrayPrototypeUnshift,57,58,0
-block_hint,ArrayPrototypeUnshift,100,101,1
-block_hint,ArrayPrototypeUnshift,59,60,0
-block_hint,ArrayPrototypeUnshift,20,21,1
-block_hint,ArrayPrototypeUnshift,22,23,0
-block_hint,ArrayBufferPrototypeGetByteLength,15,16,1
-block_hint,ArrayBufferPrototypeGetByteLength,10,11,1
-block_hint,ArrayBufferPrototypeGetByteLength,12,13,1
-block_hint,ArrayBufferPrototypeGetByteLength,6,7,0
-block_hint,ArrayBufferPrototypeGetByteLength,4,5,0
-block_hint,ArrayBufferIsView,8,9,1
-block_hint,ArrayBufferIsView,5,6,1
-block_hint,ArrayBufferIsView,3,4,1
-block_hint,ToInteger,4,5,1
-block_hint,ToInteger,6,7,0
-block_hint,BooleanConstructor,81,82,1
-block_hint,BooleanConstructor,74,75,1
-block_hint,BooleanConstructor,57,58,0
-block_hint,BooleanConstructor,68,69,1
-block_hint,BooleanConstructor,59,60,0
-block_hint,BooleanConstructor,70,71,0
-block_hint,BooleanConstructor,51,52,0
-block_hint,BooleanConstructor,7,8,1
-block_hint,ToString,20,21,0
-block_hint,ToString,34,35,0
-block_hint,ToString,67,68,0
-block_hint,ToString,83,84,0
-block_hint,ToString,25,26,1
-block_hint,ToString,50,51,1
-block_hint,ToString,54,55,0
-block_hint,StringPrototypeToString,9,10,1
-block_hint,StringPrototypeToString,11,12,1
-block_hint,StringPrototypeToString,7,8,0
-block_hint,StringPrototypeToString,5,6,1
-block_hint,StringPrototypeValueOf,9,10,1
-block_hint,StringPrototypeValueOf,11,12,1
-block_hint,StringPrototypeValueOf,5,6,1
-block_hint,StringPrototypeCharAt,51,52,1
-block_hint,StringPrototypeCharAt,37,38,1
-block_hint,StringPrototypeCharAt,28,29,1
-block_hint,StringPrototypeCharAt,33,34,0
-block_hint,StringPrototypeCharAt,12,13,0
-block_hint,StringPrototypeCharAt,14,15,0
-block_hint,StringPrototypeCharAt,19,20,1
-block_hint,StringPrototypeCharAt,43,44,0
-block_hint,StringPrototypeCharAt,6,7,1
-block_hint,StringPrototypeCharCodeAt,46,47,1
-block_hint,StringPrototypeCharCodeAt,41,42,1
-block_hint,StringPrototypeCharCodeAt,28,29,1
-block_hint,StringPrototypeCharCodeAt,39,40,0
-block_hint,StringPrototypeCharCodeAt,13,14,0
-block_hint,StringPrototypeCharCodeAt,15,16,0
-block_hint,StringPrototypeCharCodeAt,17,18,1
-block_hint,StringPrototypeCharCodeAt,32,33,0
-block_hint,StringPrototypeCodePointAt,79,80,1
-block_hint,StringPrototypeCodePointAt,53,54,1
-block_hint,StringPrototypeCodePointAt,43,44,1
-block_hint,StringPrototypeCodePointAt,51,52,0
-block_hint,StringPrototypeCodePointAt,20,21,0
-block_hint,StringPrototypeCodePointAt,22,23,0
-block_hint,StringPrototypeCodePointAt,8,9,0
-block_hint,StringPrototypeCodePointAt,65,66,0
-block_hint,StringPrototypeCodePointAt,45,46,0
-block_hint,StringPrototypeCodePointAt,14,15,1
-block_hint,StringPrototypeCodePointAt,16,17,1
-block_hint,StringPrototypeCodePointAt,10,11,0
-block_hint,StringPrototypeCodePointAt,72,73,0
-block_hint,StringPrototypeCodePointAt,48,49,0
-block_hint,StringPrototypeCodePointAt,18,19,1
-block_hint,StringConstructor,64,65,1
-block_hint,StringConstructor,49,50,1
-block_hint,StringConstructor,36,37,0
-block_hint,StringConstructor,78,79,1
-block_hint,StringConstructor,76,77,1
-block_hint,StringConstructor,73,74,1
-block_hint,StringConstructor,60,61,0
-block_hint,StringConstructor,62,63,1
-block_hint,StringConstructor,45,46,0
-block_hint,StringConstructor,24,25,0
-block_hint,StringConstructor,26,27,1
-block_hint,StringAddConvertLeft,47,48,1
-block_hint,StringAddConvertLeft,49,50,0
-block_hint,StringAddConvertLeft,82,83,1
-block_hint,StringAddConvertLeft,64,65,0
-block_hint,StringAddConvertLeft,43,44,0
-block_hint,StringAddConvertLeft,62,63,1
-block_hint,StringAddConvertRight,47,48,1
-block_hint,StringAddConvertRight,82,83,1
-block_hint,StringAddConvertRight,64,65,0
-block_hint,StringAddConvertRight,43,44,0
-block_hint,StringAddConvertRight,86,87,0
-block_hint,StringAddConvertRight,79,80,1
-block_hint,StringCharAt,27,28,0
-block_hint,StringCharAt,20,21,1
-block_hint,StringCharAt,5,6,1
-block_hint,FastNewFunctionContextFunction,11,12,1
-block_hint,FastNewFunctionContextFunction,4,5,1
-block_hint,FastNewFunctionContextFunction,6,7,0
-block_hint,CreateRegExpLiteral,6,7,0
-block_hint,CreateRegExpLiteral,8,9,1
-block_hint,CreateRegExpLiteral,10,11,1
-block_hint,CreateRegExpLiteral,2,3,1
-block_hint,CreateShallowArrayLiteral,20,21,1
-block_hint,CreateShallowArrayLiteral,22,23,1
-block_hint,CreateShallowArrayLiteral,35,36,1
-block_hint,CreateShallowArrayLiteral,11,12,0
-block_hint,CreateShallowArrayLiteral,43,44,1
-block_hint,CreateShallowArrayLiteral,39,40,1
-block_hint,CreateShallowArrayLiteral,24,25,0
-block_hint,CreateShallowArrayLiteral,13,14,0
-block_hint,CreateShallowArrayLiteral,15,16,1
-block_hint,CreateShallowArrayLiteral,46,47,1
-block_hint,CreateShallowArrayLiteral,48,49,0
-block_hint,CreateShallowArrayLiteral,30,31,1
-block_hint,CreateShallowArrayLiteral,5,6,1
-block_hint,CreateShallowArrayLiteral,18,19,1
-block_hint,CreateEmptyArrayLiteral,9,10,1
-block_hint,CreateEmptyArrayLiteral,3,4,1
-block_hint,CreateEmptyArrayLiteral,6,7,1
-block_hint,CreateShallowObjectLiteral,53,54,1
-block_hint,CreateShallowObjectLiteral,61,62,1
-block_hint,CreateShallowObjectLiteral,63,64,0
-block_hint,CreateShallowObjectLiteral,110,111,0
-block_hint,CreateShallowObjectLiteral,99,100,1
-block_hint,CreateShallowObjectLiteral,67,68,1
-block_hint,CreateShallowObjectLiteral,106,107,1
-block_hint,CreateShallowObjectLiteral,81,82,1
-block_hint,CreateShallowObjectLiteral,34,35,0
-block_hint,CreateShallowObjectLiteral,71,72,0
-block_hint,CreateShallowObjectLiteral,38,39,0
-block_hint,CreateShallowObjectLiteral,42,43,0
-block_hint,CreateShallowObjectLiteral,85,86,1
-block_hint,CreateShallowObjectLiteral,93,94,1
-block_hint,ObjectConstructor,27,28,1
-block_hint,ObjectConstructor,19,20,1
-block_hint,ObjectConstructor,29,30,1
-block_hint,ObjectConstructor,23,24,0
-block_hint,ObjectConstructor,17,18,0
-block_hint,ObjectConstructor,11,12,0
-block_hint,ObjectConstructor,4,5,1
-block_hint,ObjectConstructor,21,22,1
-block_hint,ObjectConstructor,6,7,0
-block_hint,CreateEmptyLiteralObject,4,5,1
-block_hint,CreateEmptyLiteralObject,11,12,1
-block_hint,CreateEmptyLiteralObject,6,7,0
-block_hint,NumberConstructor,18,19,1
-block_hint,NumberConstructor,6,7,1
-block_hint,NumberConstructor,28,29,1
-block_hint,NumberConstructor,12,13,0
-block_hint,NumberConstructor,34,35,0
-block_hint,NumberConstructor,32,33,1
-block_hint,NumberConstructor,30,31,1
-block_hint,NumberConstructor,2,3,1
-block_hint,NonNumberToNumber,14,15,0
-block_hint,NonNumberToNumber,3,4,1
-block_hint,NonNumberToNumeric,17,18,0
-block_hint,NonNumberToNumeric,14,15,0
-block_hint,NonNumberToNumeric,5,6,1
-block_hint,ToNumeric,5,6,1
-block_hint,ToNumeric,3,4,1
-block_hint,NumberToString,69,70,0
-block_hint,NumberToString,20,21,1
-block_hint,NumberToString,45,46,1
-block_hint,NumberToString,41,42,1
-block_hint,ToBoolean,18,19,1
-block_hint,ToBoolean,14,15,0
-block_hint,ToBoolean,20,21,0
-block_hint,ToBoolean,6,7,0
-block_hint,ToBooleanForBaselineJump,14,15,0
-block_hint,ToBooleanForBaselineJump,20,21,0
-block_hint,ToBooleanForBaselineJump,6,7,0
-block_hint,ToLength,19,20,0
-block_hint,ToLength,5,6,0
-block_hint,ToName,40,41,1
-block_hint,ToName,48,49,0
-block_hint,ToName,20,21,0
-block_hint,ToName,22,23,0
-block_hint,ToName,67,68,0
-block_hint,ToName,27,28,1
-block_hint,ToObject,45,46,1
-block_hint,ToObject,7,8,0
-block_hint,ToObject,38,39,1
-block_hint,ToObject,9,10,1
-block_hint,ToObject,53,54,0
-block_hint,ToObject,55,56,1
-block_hint,ToObject,48,49,0
-block_hint,ToObject,26,27,0
-block_hint,ToObject,28,29,1
-block_hint,NonPrimitiveToPrimitive_Default,5,6,1
-block_hint,NonPrimitiveToPrimitive_Number,5,6,1
-block_hint,NonPrimitiveToPrimitive_String,5,6,1
-block_hint,OrdinaryToPrimitive_Number,56,57,1
-block_hint,OrdinaryToPrimitive_Number,53,54,1
-block_hint,OrdinaryToPrimitive_Number,40,41,1
-block_hint,OrdinaryToPrimitive_Number,42,43,0
-block_hint,OrdinaryToPrimitive_Number,28,29,0
-block_hint,OrdinaryToPrimitive_Number,12,13,0
-block_hint,OrdinaryToPrimitive_Number,30,31,0
-block_hint,OrdinaryToPrimitive_Number,32,33,0
-block_hint,OrdinaryToPrimitive_Number,14,15,0
-block_hint,OrdinaryToPrimitive_Number,16,17,0
-block_hint,OrdinaryToPrimitive_Number,44,45,1
-block_hint,OrdinaryToPrimitive_Number,46,47,1
-block_hint,OrdinaryToPrimitive_Number,48,49,1
-block_hint,OrdinaryToPrimitive_Number,50,51,0
-block_hint,OrdinaryToPrimitive_Number,34,35,0
-block_hint,OrdinaryToPrimitive_Number,20,21,0
-block_hint,OrdinaryToPrimitive_String,56,57,1
-block_hint,OrdinaryToPrimitive_String,53,54,1
-block_hint,OrdinaryToPrimitive_String,40,41,1
-block_hint,OrdinaryToPrimitive_String,42,43,0
-block_hint,OrdinaryToPrimitive_String,28,29,0
-block_hint,OrdinaryToPrimitive_String,10,11,0
-block_hint,DataViewPrototypeGetByteLength,37,38,1
-block_hint,DataViewPrototypeGetByteLength,19,20,1
-block_hint,DataViewPrototypeGetByteLength,21,22,1
-block_hint,DataViewPrototypeGetByteLength,39,40,0
-block_hint,DataViewPrototypeGetByteLength,33,34,0
-block_hint,DataViewPrototypeGetByteLength,12,13,0
-block_hint,DataViewPrototypeGetByteLength,10,11,0
-block_hint,DataViewPrototypeGetFloat64,101,102,1
-block_hint,DataViewPrototypeGetFloat64,87,88,1
-block_hint,DataViewPrototypeGetFloat64,56,57,0
-block_hint,DataViewPrototypeGetFloat64,17,18,1
-block_hint,DataViewPrototypeGetFloat64,19,20,1
-block_hint,DataViewPrototypeGetFloat64,95,96,0
-block_hint,DataViewPrototypeGetFloat64,99,100,0
-block_hint,DataViewPrototypeGetFloat64,78,79,0
-block_hint,DataViewPrototypeGetFloat64,49,50,0
-block_hint,DataViewPrototypeGetFloat64,70,71,0
-block_hint,DataViewPrototypeGetFloat64,89,90,1
-block_hint,DataViewPrototypeGetFloat64,72,73,0
-block_hint,DataViewPrototypeGetFloat64,74,75,0
-block_hint,DataViewPrototypeGetFloat64,91,92,0
-block_hint,DataViewPrototypeGetFloat64,64,65,0
-block_hint,DataViewPrototypeGetFloat64,21,22,0
-block_hint,DataViewPrototypeGetFloat64,97,98,0
-block_hint,DataViewPrototypeGetFloat64,82,83,0
-block_hint,DataViewPrototypeGetFloat64,47,48,0
-block_hint,DataViewPrototypeGetFloat64,35,36,0
-block_hint,DataViewPrototypeGetFloat64,37,38,0
-block_hint,DataViewPrototypeGetFloat64,85,86,1
-block_hint,DataViewPrototypeGetFloat64,54,55,0
-block_hint,DataViewPrototypeGetFloat64,14,15,1
-block_hint,DataViewPrototypeSetFloat64,116,117,1
-block_hint,DataViewPrototypeSetFloat64,104,105,1
-block_hint,DataViewPrototypeSetFloat64,82,83,0
-block_hint,DataViewPrototypeSetFloat64,49,50,0
-block_hint,DataViewPrototypeSetFloat64,16,17,1
-block_hint,DataViewPrototypeSetFloat64,18,19,1
-block_hint,DataViewPrototypeSetFloat64,106,107,0
-block_hint,DataViewPrototypeSetFloat64,95,96,0
-block_hint,DataViewPrototypeSetFloat64,71,72,0
-block_hint,DataViewPrototypeSetFloat64,42,43,0
-block_hint,DataViewPrototypeSetFloat64,84,85,1
-block_hint,DataViewPrototypeSetFloat64,86,87,1
-block_hint,DataViewPrototypeSetFloat64,59,60,1
-block_hint,DataViewPrototypeSetFloat64,10,11,0
-block_hint,DataViewPrototypeSetFloat64,93,94,0
-block_hint,DataViewPrototypeSetFloat64,79,80,0
-block_hint,DataViewPrototypeSetFloat64,40,41,0
-block_hint,DataViewPrototypeSetFloat64,34,35,0
-block_hint,DataViewPrototypeSetFloat64,36,37,0
-block_hint,DataViewPrototypeSetFloat64,47,48,1
-block_hint,DataViewPrototypeSetFloat64,14,15,0
-block_hint,FunctionPrototypeHasInstance,35,36,1
-block_hint,FunctionPrototypeHasInstance,15,16,1
-block_hint,FunctionPrototypeHasInstance,17,18,1
-block_hint,FunctionPrototypeHasInstance,19,20,1
-block_hint,FunctionPrototypeHasInstance,33,34,1
-block_hint,FunctionPrototypeHasInstance,23,24,0
-block_hint,FunctionPrototypeHasInstance,13,14,0
-block_hint,FunctionPrototypeHasInstance,31,32,0
-block_hint,FunctionPrototypeHasInstance,25,26,0
-block_hint,FunctionPrototypeHasInstance,27,28,0
-block_hint,FastFunctionPrototypeBind,91,92,1
-block_hint,FastFunctionPrototypeBind,88,89,1
-block_hint,FastFunctionPrototypeBind,75,76,0
-block_hint,FastFunctionPrototypeBind,29,30,0
-block_hint,FastFunctionPrototypeBind,31,32,0
-block_hint,FastFunctionPrototypeBind,7,8,1
-block_hint,FastFunctionPrototypeBind,53,54,1
-block_hint,FastFunctionPrototypeBind,65,66,0
-block_hint,FastFunctionPrototypeBind,69,70,1
-block_hint,FastFunctionPrototypeBind,41,42,1
-block_hint,FastFunctionPrototypeBind,9,10,1
-block_hint,FastFunctionPrototypeBind,56,57,1
-block_hint,FastFunctionPrototypeBind,67,68,0
-block_hint,FastFunctionPrototypeBind,79,80,1
-block_hint,FastFunctionPrototypeBind,71,72,1
-block_hint,FastFunctionPrototypeBind,43,44,1
-block_hint,FastFunctionPrototypeBind,11,12,1
-block_hint,FastFunctionPrototypeBind,35,36,1
-block_hint,FastFunctionPrototypeBind,81,82,1
-block_hint,FastFunctionPrototypeBind,73,74,1
-block_hint,FastFunctionPrototypeBind,27,28,1
-block_hint,ForInNext,2,3,1
-block_hint,ForInNext,7,8,1
-block_hint,CallIteratorWithFeedback,56,57,1
-block_hint,CallIteratorWithFeedback,58,59,1
-block_hint,CallIteratorWithFeedback,26,27,1
-block_hint,CallIteratorWithFeedback,28,29,1
-block_hint,CallIteratorWithFeedback,30,31,1
-block_hint,CallIteratorWithFeedback,10,11,1
-block_hint,MathAbs,14,15,1
-block_hint,MathAbs,16,17,1
-block_hint,MathAbs,23,24,0
-block_hint,MathAbs,9,10,0
-block_hint,MathAbs,11,12,1
-block_hint,MathCeil,12,13,1
-block_hint,MathFloor,12,13,1
-block_hint,MathFloor,14,15,1
-block_hint,MathFloor,35,36,1
-block_hint,MathFloor,25,26,0
-block_hint,MathFloor,21,22,1
-block_hint,MathFloor,19,20,0
-block_hint,MathFloor,7,8,0
-block_hint,MathRound,12,13,1
-block_hint,MathRound,14,15,1
-block_hint,MathRound,32,33,0
-block_hint,MathRound,36,37,0
-block_hint,MathRound,28,29,0
-block_hint,MathRound,21,22,1
-block_hint,MathRound,7,8,0
-block_hint,MathRound,9,10,1
-block_hint,MathPow,12,13,1
-block_hint,MathPow,14,15,1
-block_hint,MathPow,18,19,1
-block_hint,MathPow,23,24,0
-block_hint,MathPow,7,8,0
-block_hint,MathPow,9,10,1
-block_hint,MathMax,13,14,1
-block_hint,MathMax,19,20,0
-block_hint,MathMax,17,18,1
-block_hint,MathMax,24,25,0
-block_hint,MathMax,8,9,0
-block_hint,MathMax,10,11,1
-block_hint,MathMin,13,14,1
-block_hint,MathMin,19,20,0
-block_hint,MathMin,17,18,1
-block_hint,MathMin,24,25,0
-block_hint,MathMin,8,9,0
-block_hint,MathMin,10,11,1
-block_hint,MathAtan2,34,35,1
-block_hint,MathAtan2,32,33,1
-block_hint,MathAtan2,23,24,1
-block_hint,MathAtan2,5,6,1
-block_hint,MathCos,25,26,1
-block_hint,MathCos,23,24,1
-block_hint,MathCos,9,10,1
-block_hint,MathCos,3,4,0
-block_hint,MathCos,5,6,1
-block_hint,MathExp,25,26,1
-block_hint,MathExp,20,21,1
-block_hint,MathExp,23,24,1
-block_hint,MathExp,16,17,1
-block_hint,MathExp,13,14,0
-block_hint,MathExp,5,6,1
-block_hint,MathFround,25,26,1
-block_hint,MathFround,23,24,1
-block_hint,MathFround,5,6,1
-block_hint,MathLog,25,26,1
-block_hint,MathLog,23,24,1
-block_hint,MathLog,13,14,0
-block_hint,MathLog,5,6,1
-block_hint,MathSin,25,26,1
-block_hint,MathSin,23,24,1
-block_hint,MathSin,9,10,0
-block_hint,MathSin,11,12,0
-block_hint,MathSin,3,4,0
-block_hint,MathSin,5,6,1
-block_hint,MathSign,16,17,1
-block_hint,MathSign,11,12,0
-block_hint,MathSign,7,8,0
-block_hint,MathSign,2,3,0
-block_hint,MathSign,4,5,1
-block_hint,MathSqrt,25,26,1
-block_hint,MathSqrt,23,24,1
-block_hint,MathSqrt,11,12,0
-block_hint,MathSqrt,3,4,0
-block_hint,MathSqrt,5,6,1
-block_hint,MathTan,25,26,1
-block_hint,MathTan,20,21,0
-block_hint,MathTan,16,17,0
-block_hint,MathTanh,25,26,1
-block_hint,MathTanh,20,21,1
-block_hint,MathTanh,23,24,1
-block_hint,MathTanh,16,17,1
-block_hint,MathTanh,13,14,0
-block_hint,MathTanh,5,6,1
-block_hint,MathRandom,15,16,1
-block_hint,MathRandom,3,4,1
-block_hint,MathRandom,17,18,1
-block_hint,MathRandom,5,6,1
-block_hint,MathRandom,7,8,1
-block_hint,MathRandom,9,10,1
-block_hint,MathRandom,13,14,1
-block_hint,NumberPrototypeToString,71,72,1
-block_hint,NumberPrototypeToString,113,114,0
-block_hint,NumberPrototypeToString,51,52,0
-block_hint,NumberPrototypeToString,59,60,1
-block_hint,NumberPrototypeToString,183,184,0
-block_hint,NumberPrototypeToString,154,155,0
-block_hint,NumberPrototypeToString,121,122,0
-block_hint,NumberPrototypeToString,180,181,0
-block_hint,NumberPrototypeToString,167,168,0
-block_hint,NumberPrototypeToString,85,86,0
-block_hint,NumberPrototypeToString,176,177,0
-block_hint,NumberPrototypeToString,97,98,0
-block_hint,NumberPrototypeToString,171,172,0
-block_hint,NumberPrototypeToString,129,130,0
-block_hint,NumberPrototypeToString,109,110,1
-block_hint,NumberPrototypeToString,42,43,1
-block_hint,NumberPrototypeToString,49,50,1
-block_hint,NumberPrototypeToString,73,74,0
-block_hint,NumberPrototypeToString,27,28,0
-block_hint,NumberPrototypeToString,116,117,1
-block_hint,NumberPrototypeToString,75,76,1
-block_hint,NumberPrototypeToString,29,30,1
-block_hint,NumberPrototypeToString,95,96,0
-block_hint,NumberPrototypeToString,111,112,0
-block_hint,NumberPrototypeToString,35,36,1
-block_hint,NumberPrototypeToString,132,133,1
-block_hint,NumberPrototypeToString,37,38,0
-block_hint,NumberPrototypeToString,134,135,1
-block_hint,NumberPrototypeToString,39,40,0
-block_hint,NumberPrototypeToString,162,163,1
-block_hint,NumberPrototypeToString,164,165,0
-block_hint,NumberPrototypeToString,139,140,0
-block_hint,NumberPrototypeToString,105,106,1
-block_hint,NumberIsInteger,13,14,1
-block_hint,NumberParseFloat,14,15,1
-block_hint,NumberParseFloat,2,3,1
-block_hint,NumberParseFloat,12,13,0
-block_hint,NumberParseFloat,17,18,0
-block_hint,NumberParseFloat,4,5,1
-block_hint,ParseInt,27,28,1
-block_hint,ParseInt,13,14,0
-block_hint,ParseInt,6,7,1
-block_hint,ParseInt,31,32,0
-block_hint,ParseInt,25,26,1
-block_hint,ParseInt,23,24,1
-block_hint,ParseInt,10,11,0
-block_hint,NumberParseInt,3,4,1
-block_hint,Add,66,67,1
-block_hint,Add,24,25,0
-block_hint,Add,68,69,0
-block_hint,Add,35,36,0
-block_hint,Add,40,41,0
-block_hint,Subtract,24,25,0
-block_hint,Subtract,9,10,0
-block_hint,Subtract,22,23,0
-block_hint,Subtract,7,8,0
-block_hint,Divide,50,51,0
-block_hint,Divide,23,24,0
-block_hint,Divide,9,10,0
-block_hint,Divide,44,45,1
-block_hint,Divide,48,49,1
-block_hint,Divide,33,34,0
-block_hint,Divide,7,8,1
-block_hint,CreateObjectWithoutProperties,52,53,1
-block_hint,CreateObjectWithoutProperties,42,43,1
-block_hint,CreateObjectWithoutProperties,34,35,0
-block_hint,CreateObjectWithoutProperties,17,18,1
-block_hint,CreateObjectWithoutProperties,56,57,0
-block_hint,CreateObjectWithoutProperties,44,45,0
-block_hint,CreateObjectWithoutProperties,48,49,1
-block_hint,CreateObjectWithoutProperties,36,37,0
-block_hint,CreateObjectWithoutProperties,38,39,0
-block_hint,CreateObjectWithoutProperties,5,6,1
-block_hint,CreateObjectWithoutProperties,40,41,1
-block_hint,CreateObjectWithoutProperties,7,8,1
-block_hint,CreateObjectWithoutProperties,9,10,1
-block_hint,CreateObjectWithoutProperties,11,12,1
-block_hint,CreateObjectWithoutProperties,13,14,1
-block_hint,CreateObjectWithoutProperties,15,16,1
-block_hint,CreateObjectWithoutProperties,20,21,0
-block_hint,CreateObjectWithoutProperties,50,51,1
-block_hint,ObjectGetPrototypeOf,11,12,1
-block_hint,ObjectGetPrototypeOf,8,9,1
-block_hint,ObjectGetPrototypeOf,5,6,1
-block_hint,ObjectGetPrototypeOf,2,3,0
-block_hint,ObjectSetPrototypeOf,18,19,1
-block_hint,ObjectSetPrototypeOf,4,5,0
-block_hint,ObjectSetPrototypeOf,13,14,1
-block_hint,ObjectSetPrototypeOf,20,21,0
-block_hint,ObjectSetPrototypeOf,15,16,0
-block_hint,ObjectSetPrototypeOf,6,7,1
-block_hint,ObjectSetPrototypeOf,8,9,0
-block_hint,ObjectSetPrototypeOf,10,11,0
-block_hint,ObjectPrototypeToString,3,4,1
-block_hint,ObjectPrototypeValueOf,8,9,1
-block_hint,ObjectPrototypeValueOf,5,6,1
-block_hint,ObjectPrototypeValueOf,2,3,1
-block_hint,FulfillPromise,32,33,1
-block_hint,FulfillPromise,15,16,0
-block_hint,FulfillPromise,34,35,1
-block_hint,FulfillPromise,17,18,0
-block_hint,FulfillPromise,19,20,1
-block_hint,FulfillPromise,21,22,0
-block_hint,PerformPromiseThen,101,102,1
-block_hint,PerformPromiseThen,57,58,0
-block_hint,PerformPromiseThen,103,104,1
-block_hint,PerformPromiseThen,59,60,0
-block_hint,PerformPromiseThen,61,62,1
-block_hint,PerformPromiseThen,63,64,0
-block_hint,PerformPromiseThen,18,19,1
-block_hint,PerformPromiseThen,72,73,1
-block_hint,PerformPromiseThen,25,26,1
-block_hint,PerformPromiseThen,93,94,1
-block_hint,PerformPromiseThen,45,46,0
-block_hint,PerformPromiseThen,95,96,1
-block_hint,PerformPromiseThen,47,48,0
-block_hint,PerformPromiseThen,49,50,1
-block_hint,PerformPromiseThen,51,52,0
-block_hint,PerformPromiseThen,20,21,1
-block_hint,PerformPromiseThen,115,116,1
-block_hint,PromiseFulfillReactionJob,22,23,0
-block_hint,PromiseFulfillReactionJob,2,3,1
-block_hint,ResolvePromise,29,30,0
-block_hint,ResolvePromise,31,32,0
-block_hint,ResolvePromise,15,16,1
-block_hint,ResolvePromise,47,48,0
-block_hint,ResolvePromise,33,34,0
-block_hint,ResolvePromise,6,7,1
-block_hint,ResolvePromise,17,18,0
-block_hint,ResolvePromise,19,20,1
-block_hint,ResolvePromise,53,54,1
-block_hint,ResolvePromise,49,50,0
-block_hint,ResolvePromise,23,24,0
-block_hint,ProxyConstructor,30,31,1
-block_hint,ProxyConstructor,10,11,0
-block_hint,ProxyConstructor,22,23,1
-block_hint,ProxyConstructor,24,25,0
-block_hint,ProxyConstructor,26,27,1
-block_hint,ProxyConstructor,28,29,0
-block_hint,ProxyConstructor,7,8,1
-block_hint,ProxyConstructor,17,18,1
-block_hint,ProxyConstructor,5,6,1
-block_hint,ProxyConstructor,12,13,1
-block_hint,ProxyGetProperty,153,154,1
-block_hint,ProxyGetProperty,34,35,0
-block_hint,ProxyGetProperty,10,11,0
-block_hint,ProxyGetProperty,89,90,0
-block_hint,ProxyGetProperty,91,92,0
-block_hint,ProxyGetProperty,85,86,1
-block_hint,ProxyGetProperty,87,88,1
-block_hint,ProxyGetProperty,176,177,1
-block_hint,ProxyGetProperty,180,181,0
-block_hint,ProxyGetProperty,118,119,0
-block_hint,ProxyGetProperty,40,41,1
-block_hint,ProxyGetProperty,114,115,1
-block_hint,ProxyGetProperty,24,25,1
-block_hint,ProxyGetProperty,26,27,0
-block_hint,ProxyGetProperty,208,209,1
-block_hint,ProxyGetProperty,198,199,0
-block_hint,ProxyGetProperty,149,150,1
-block_hint,ProxyGetProperty,28,29,0
-block_hint,ProxyGetProperty,167,168,0
-block_hint,ProxyGetProperty,187,188,1
-block_hint,ProxyGetProperty,131,132,1
-block_hint,ProxyGetProperty,169,170,1
-block_hint,ProxyGetProperty,171,172,0
-block_hint,ProxyGetProperty,60,61,0
-block_hint,ReflectGet,20,21,1
-block_hint,ReflectGet,15,16,1
-block_hint,ReflectGet,5,6,1
-block_hint,ReflectGet,7,8,0
-block_hint,ReflectGet,18,19,0
-block_hint,ReflectGet,9,10,0
-block_hint,ReflectHas,8,9,1
-block_hint,ReflectHas,5,6,1
-block_hint,ReflectHas,3,4,0
-block_hint,RegExpPrototypeExec,204,205,1
-block_hint,RegExpPrototypeExec,130,131,1
-block_hint,RegExpPrototypeExec,132,133,1
-block_hint,RegExpPrototypeExec,206,207,1
-block_hint,RegExpPrototypeExec,166,167,1
-block_hint,RegExpPrototypeExec,16,17,1
-block_hint,RegExpPrototypeExec,148,149,1
-block_hint,RegExpPrototypeExec,150,151,0
-block_hint,RegExpPrototypeExec,152,153,0
-block_hint,RegExpPrototypeExec,227,228,0
-block_hint,RegExpPrototypeExec,213,214,0
-block_hint,RegExpPrototypeExec,154,155,0
-block_hint,RegExpPrototypeExec,18,19,1
-block_hint,RegExpPrototypeExec,185,186,0
-block_hint,RegExpPrototypeExec,134,135,0
-block_hint,RegExpPrototypeExec,159,160,0
-block_hint,RegExpPrototypeExec,118,119,1
-block_hint,RegExpPrototypeExec,242,243,0
-block_hint,RegExpPrototypeExec,257,258,1
-block_hint,RegExpPrototypeExec,233,234,1
-block_hint,RegExpPrototypeExec,222,223,1
-block_hint,RegExpPrototypeExec,171,172,0
-block_hint,RegExpPrototypeExec,161,162,0
-block_hint,RegExpPrototypeExec,73,74,1
-block_hint,RegExpPrototypeExec,24,25,1
-block_hint,RegExpPrototypeExec,138,139,0
-block_hint,RegExpPrototypeExec,26,27,1
-block_hint,RegExpPrototypeExec,190,191,1
-block_hint,RegExpPrototypeExec,140,141,1
-block_hint,RegExpPrototypeExec,248,249,1
-block_hint,RegExpPrototypeExec,217,218,0
-block_hint,RegExpPrototypeExec,179,180,0
-block_hint,RegExpPrototypeExec,77,78,0
-block_hint,RegExpPrototypeExec,34,35,1
-block_hint,RegExpPrototypeExec,144,145,1
-block_hint,RegExpPrototypeExec,116,117,0
-block_hint,RegExpPrototypeExec,156,157,0
-block_hint,RegExpMatchFast,359,360,0
-block_hint,RegExpMatchFast,289,290,0
-block_hint,RegExpMatchFast,32,33,1
-block_hint,RegExpMatchFast,326,327,0
-block_hint,RegExpMatchFast,234,235,0
-block_hint,RegExpMatchFast,283,284,0
-block_hint,RegExpMatchFast,460,461,0
-block_hint,RegExpMatchFast,442,443,1
-block_hint,RegExpMatchFast,415,416,1
-block_hint,RegExpMatchFast,291,292,0
-block_hint,RegExpMatchFast,285,286,0
-block_hint,RegExpMatchFast,129,130,1
-block_hint,RegExpMatchFast,236,237,0
-block_hint,RegExpMatchFast,238,239,0
-block_hint,RegExpMatchFast,40,41,1
-block_hint,RegExpMatchFast,331,332,1
-block_hint,RegExpMatchFast,240,241,1
-block_hint,RegExpMatchFast,468,469,1
-block_hint,RegExpMatchFast,396,397,0
-block_hint,RegExpMatchFast,320,321,0
-block_hint,RegExpMatchFast,133,134,0
-block_hint,RegExpMatchFast,48,49,1
-block_hint,RegExpMatchFast,244,245,1
-block_hint,RegExpMatchFast,180,181,0
-block_hint,RegExpMatchFast,259,260,0
-block_hint,RegExpMatchFast,297,298,0
-block_hint,RegExpMatchFast,82,83,1
-block_hint,RegExpMatchFast,84,85,1
-block_hint,RegExpMatchFast,301,302,1
-block_hint,RegExpMatchFast,346,347,0
-block_hint,RegExpMatchFast,444,445,0
-block_hint,RegExpMatchFast,401,402,0
-block_hint,RegExpMatchFast,299,300,0
-block_hint,RegExpMatchFast,86,87,1
-block_hint,RegExpMatchFast,340,341,0
-block_hint,RegExpMatchFast,248,249,0
-block_hint,RegExpMatchFast,275,276,0
-block_hint,RegExpMatchFast,190,191,1
-block_hint,RegExpMatchFast,462,463,0
-block_hint,RegExpMatchFast,480,481,1
-block_hint,RegExpMatchFast,448,449,1
-block_hint,RegExpMatchFast,413,414,1
-block_hint,RegExpMatchFast,303,304,0
-block_hint,RegExpMatchFast,277,278,0
-block_hint,RegExpMatchFast,117,118,1
-block_hint,RegExpMatchFast,342,343,0
-block_hint,RegExpMatchFast,250,251,1
-block_hint,RegExpMatchFast,92,93,1
-block_hint,RegExpMatchFast,366,367,1
-block_hint,RegExpMatchFast,252,253,0
-block_hint,RegExpMatchFast,102,103,1
-block_hint,RegExpMatchFast,306,307,1
-block_hint,RegExpMatchFast,177,178,0
-block_hint,RegExpMatchFast,104,105,0
-block_hint,RegExpMatchFast,106,107,0
-block_hint,RegExpMatchFast,198,199,1
-block_hint,RegExpMatchFast,317,318,0
-block_hint,RegExpMatchFast,108,109,1
-block_hint,RegExpMatchFast,187,188,1
-block_hint,RegExpMatchFast,348,349,1
-block_hint,RegExpMatchFast,94,95,1
-block_hint,RegExpMatchFast,96,97,1
-block_hint,RegExpMatchFast,175,176,0
-block_hint,RegExpMatchFast,98,99,0
-block_hint,RegExpMatchFast,100,101,0
-block_hint,RegExpMatchFast,218,219,1
-block_hint,RegExpMatchFast,309,310,0
-block_hint,RegExpMatchFast,220,221,0
-block_hint,RegExpReplace,263,264,1
-block_hint,RegExpReplace,298,299,1
-block_hint,RegExpReplace,251,252,1
-block_hint,RegExpReplace,149,150,0
-block_hint,RegExpReplace,22,23,1
-block_hint,RegExpReplace,209,210,1
-block_hint,RegExpReplace,151,152,0
-block_hint,RegExpReplace,24,25,1
-block_hint,RegExpReplace,211,212,1
-block_hint,RegExpReplace,213,214,1
-block_hint,RegExpReplace,172,173,1
-block_hint,RegExpReplace,179,180,0
-block_hint,RegExpReplace,259,260,0
-block_hint,RegExpReplace,349,350,0
-block_hint,RegExpReplace,50,51,1
-block_hint,RegExpReplace,229,230,0
-block_hint,RegExpReplace,163,164,0
-block_hint,RegExpReplace,183,184,0
-block_hint,RegExpReplace,109,110,1
-block_hint,RegExpReplace,381,382,0
-block_hint,RegExpReplace,386,387,1
-block_hint,RegExpReplace,353,354,1
-block_hint,RegExpReplace,317,318,1
-block_hint,RegExpReplace,203,204,0
-block_hint,RegExpReplace,185,186,0
-block_hint,RegExpReplace,81,82,1
-block_hint,RegExpReplace,56,57,1
-block_hint,RegExpReplace,58,59,1
-block_hint,RegExpReplace,60,61,1
-block_hint,RegExpReplace,167,168,0
-block_hint,RegExpReplace,62,63,1
-block_hint,RegExpReplace,233,234,1
-block_hint,RegExpReplace,169,170,0
-block_hint,RegExpReplace,64,65,1
-block_hint,RegExpReplace,388,389,1
-block_hint,RegExpReplace,378,379,1
-block_hint,RegExpReplace,330,331,0
-block_hint,RegExpReplace,286,287,0
-block_hint,RegExpReplace,218,219,0
-block_hint,RegExpReplace,100,101,1
-block_hint,RegExpReplace,26,27,1
-block_hint,RegExpReplace,28,29,1
-block_hint,RegExpReplace,102,103,1
-block_hint,RegExpReplace,30,31,0
-block_hint,RegExpReplace,32,33,1
-block_hint,RegExpReplace,34,35,1
-block_hint,RegExpReplace,72,73,1
-block_hint,RegExpReplace,44,45,1
-block_hint,RegExpReplace,161,162,1
-block_hint,RegExpReplace,46,47,1
-block_hint,RegExpReplace,48,49,1
-block_hint,RegExpReplace,236,237,1
-block_hint,RegExpReplace,176,177,1
-block_hint,RegExpReplace,153,154,1
-block_hint,RegExpReplace,36,37,1
-block_hint,RegExpReplace,155,156,1
-block_hint,RegExpReplace,40,41,0
-block_hint,RegExpReplace,254,255,1
-block_hint,RegExpReplace,196,197,1
-block_hint,RegExpReplace,42,43,1
-block_hint,RegExpSearchFast,50,51,0
-block_hint,RegExpSearchFast,6,7,1
-block_hint,RegExpSearchFast,56,57,0
-block_hint,RegExpSearchFast,36,37,0
-block_hint,RegExpSearchFast,46,47,0
-block_hint,RegExpSearchFast,84,85,0
-block_hint,RegExpSearchFast,81,82,1
-block_hint,RegExpSearchFast,73,74,1
-block_hint,RegExpSearchFast,62,63,0
-block_hint,RegExpSearchFast,52,53,0
-block_hint,RegExpSearchFast,58,59,1
-block_hint,RegExpSearchFast,44,45,0
-block_hint,RegExpPrototypeSourceGetter,12,13,1
-block_hint,RegExpPrototypeSourceGetter,9,10,1
-block_hint,RegExpPrototypeSourceGetter,4,5,1
-block_hint,RegExpSplit,179,180,1
-block_hint,RegExpSplit,88,89,0
-block_hint,RegExpSplit,22,23,1
-block_hint,RegExpSplit,149,150,1
-block_hint,RegExpSplit,40,41,1
-block_hint,RegExpSplit,24,25,1
-block_hint,RegExpSplit,185,186,1
-block_hint,RegExpSplit,101,102,1
-block_hint,RegExpSplit,136,137,0
-block_hint,RegExpSplit,26,27,1
-block_hint,RegExpSplit,205,206,0
-block_hint,RegExpSplit,138,139,0
-block_hint,RegExpSplit,162,163,0
-block_hint,RegExpSplit,108,109,1
-block_hint,RegExpSplit,327,328,0
-block_hint,RegExpSplit,322,323,1
-block_hint,RegExpSplit,314,315,1
-block_hint,RegExpSplit,287,288,1
-block_hint,RegExpSplit,181,182,0
-block_hint,RegExpSplit,225,226,0
-block_hint,RegExpSplit,164,165,0
-block_hint,RegExpSplit,46,47,1
-block_hint,RegExpSplit,307,308,0
-block_hint,RegExpSplit,263,264,1
-block_hint,RegExpSplit,207,208,0
-block_hint,RegExpSplit,92,93,0
-block_hint,RegExpSplit,227,228,1
-block_hint,RegExpSplit,194,195,1
-block_hint,RegExpSplit,50,51,0
-block_hint,RegExpSplit,167,168,0
-block_hint,RegExpSplit,141,142,0
-block_hint,RegExpSplit,32,33,1
-block_hint,RegExpSplit,58,59,0
-block_hint,RegExpSplit,280,281,0
-block_hint,RegExpSplit,246,247,0
-block_hint,RegExpSplit,151,152,0
-block_hint,RegExpSplit,241,242,1
-block_hint,RegExpSplit,212,213,0
-block_hint,RegExpSplit,96,97,0
-block_hint,RegExpSplit,232,233,1
-block_hint,RegExpSplit,201,202,1
-block_hint,RegExpSplit,74,75,0
-block_hint,RegExpSplit,175,176,0
-block_hint,RegExpSplit,38,39,1
-block_hint,RegExpSplit,219,220,0
-block_hint,RegExpSplit,244,245,1
-block_hint,RegExpSplit,217,218,0
-block_hint,RegExpSplit,99,100,0
-block_hint,RegExpSplit,277,278,1
-block_hint,RegExpSplit,260,261,1
-block_hint,RegExpSplit,177,178,0
-block_hint,RegExpSplit,103,104,1
-block_hint,RegExpPrototypeTest,112,113,1
-block_hint,RegExpPrototypeTest,50,51,1
-block_hint,RegExpPrototypeTest,52,53,0
-block_hint,RegExpPrototypeTest,137,138,1
-block_hint,RegExpPrototypeTest,54,55,0
-block_hint,RegExpPrototypeTest,8,9,1
-block_hint,RegExpPrototypeTest,93,94,1
-block_hint,RegExpPrototypeTest,56,57,0
-block_hint,RegExpPrototypeTest,10,11,1
-block_hint,RegExpPrototypeTest,145,146,1
-block_hint,RegExpPrototypeTest,127,128,1
-block_hint,RegExpPrototypeTest,85,86,0
-block_hint,RegExpPrototypeTest,14,15,1
-block_hint,RegExpPrototypeTest,99,100,0
-block_hint,RegExpPrototypeTest,59,60,0
-block_hint,RegExpPrototypeTest,73,74,0
-block_hint,RegExpPrototypeTest,42,43,0
-block_hint,RegExpPrototypeTest,161,162,0
-block_hint,RegExpPrototypeTest,164,165,1
-block_hint,RegExpPrototypeTest,151,152,1
-block_hint,RegExpPrototypeTest,143,144,1
-block_hint,RegExpPrototypeTest,87,88,0
-block_hint,RegExpPrototypeTest,75,76,0
-block_hint,RegExpPrototypeTest,29,30,1
-block_hint,RegExpPrototypeTest,37,38,0
-block_hint,RegExpPrototypeTest,65,66,0
-block_hint,RegExpPrototypeTestFast,48,49,0
-block_hint,RegExpPrototypeTestFast,7,8,1
-block_hint,RegExpPrototypeTestFast,56,57,0
-block_hint,RegExpPrototypeTestFast,36,37,0
-block_hint,RegExpPrototypeTestFast,44,45,0
-block_hint,RegExpPrototypeTestFast,82,83,0
-block_hint,RegExpPrototypeTestFast,85,86,1
-block_hint,RegExpPrototypeTestFast,79,80,1
-block_hint,RegExpPrototypeTestFast,73,74,1
-block_hint,RegExpPrototypeTestFast,50,51,0
-block_hint,RegExpPrototypeTestFast,46,47,0
-block_hint,RegExpPrototypeTestFast,19,20,1
-block_hint,RegExpPrototypeTestFast,26,27,0
-block_hint,RegExpPrototypeTestFast,42,43,0
-block_hint,StringPrototypeEndsWith,288,289,1
-block_hint,StringPrototypeEndsWith,271,272,1
-block_hint,StringPrototypeEndsWith,251,252,1
-block_hint,StringPrototypeEndsWith,235,236,1
-block_hint,StringPrototypeEndsWith,174,175,1
-block_hint,StringPrototypeEndsWith,278,279,1
-block_hint,StringPrototypeEndsWith,267,268,1
-block_hint,StringPrototypeEndsWith,253,254,1
-block_hint,StringPrototypeEndsWith,244,245,1
-block_hint,StringPrototypeEndsWith,179,180,1
-block_hint,StringPrototypeEndsWith,29,30,0
-block_hint,StringPrototypeEndsWith,68,69,0
-block_hint,StringPrototypeEndsWith,70,71,0
-block_hint,StringPrototypeEndsWith,185,186,1
-block_hint,StringPrototypeEndsWith,84,85,0
-block_hint,StringPrototypeEndsWith,86,87,0
-block_hint,StringPrototypeEndsWith,164,165,0
-block_hint,StringPrototypeEndsWith,47,48,0
-block_hint,StringPrototypeEndsWith,144,145,0
-block_hint,StringPrototypeEndsWith,35,36,0
-block_hint,StringPrototypeEndsWith,49,50,0
-block_hint,StringPrototypeEndsWith,116,117,0
-block_hint,StringPrototypeIndexOf,39,40,1
-block_hint,StringPrototypeIndexOf,36,37,1
-block_hint,StringPrototypeIndexOf,19,20,1
-block_hint,StringPrototypeIndexOf,8,9,1
-block_hint,StringPrototypeIndexOf,28,29,1
-block_hint,StringPrototypeIndexOf,21,22,1
-block_hint,StringPrototypeIndexOf,33,34,0
-block_hint,StringPrototypeIndexOf,24,25,0
-block_hint,StringPrototypeIndexOf,11,12,0
-block_hint,StringPrototypeIterator,15,16,1
-block_hint,StringPrototypeIterator,12,13,1
-block_hint,StringPrototypeIterator,10,11,1
-block_hint,StringPrototypeIterator,3,4,1
-block_hint,StringPrototypeIterator,8,9,1
-block_hint,StringIteratorPrototypeNext,56,57,1
-block_hint,StringIteratorPrototypeNext,38,39,1
-block_hint,StringIteratorPrototypeNext,40,41,1
-block_hint,StringIteratorPrototypeNext,13,14,0
-block_hint,StringIteratorPrototypeNext,74,75,0
-block_hint,StringIteratorPrototypeNext,64,65,1
-block_hint,StringIteratorPrototypeNext,54,55,0
-block_hint,StringIteratorPrototypeNext,61,62,1
-block_hint,StringIteratorPrototypeNext,50,51,1
-block_hint,StringIteratorPrototypeNext,11,12,1
-block_hint,StringIteratorPrototypeNext,20,21,1
-block_hint,StringIteratorPrototypeNext,9,10,1
-block_hint,StringIteratorPrototypeNext,17,18,1
-block_hint,StringPrototypeMatch,67,68,1
-block_hint,StringPrototypeMatch,39,40,0
-block_hint,StringPrototypeMatch,99,100,1
-block_hint,StringPrototypeMatch,88,89,0
-block_hint,StringPrototypeMatch,69,70,1
-block_hint,StringPrototypeMatch,49,50,0
-block_hint,StringPrototypeMatch,6,7,1
-block_hint,StringPrototypeMatch,71,72,1
-block_hint,StringPrototypeMatch,51,52,0
-block_hint,StringPrototypeMatch,8,9,1
-block_hint,StringPrototypeMatch,83,84,1
-block_hint,StringPrototypeMatch,75,76,1
-block_hint,StringPrototypeMatch,43,44,1
-block_hint,StringPrototypeSearch,67,68,1
-block_hint,StringPrototypeSearch,39,40,0
-block_hint,StringPrototypeSearch,99,100,1
-block_hint,StringPrototypeSearch,88,89,0
-block_hint,StringPrototypeSearch,69,70,1
-block_hint,StringPrototypeSearch,49,50,0
-block_hint,StringPrototypeSearch,6,7,1
-block_hint,StringPrototypeSearch,71,72,1
-block_hint,StringPrototypeSearch,51,52,0
-block_hint,StringPrototypeSearch,8,9,1
-block_hint,StringPrototypeSearch,83,84,1
-block_hint,StringPrototypeSearch,75,76,1
-block_hint,StringPrototypeSearch,43,44,1
-block_hint,StringPrototypeSlice,167,168,1
-block_hint,StringPrototypeSlice,136,137,1
-block_hint,StringPrototypeSlice,103,104,1
-block_hint,StringPrototypeSlice,189,190,1
-block_hint,StringPrototypeSlice,175,176,0
-block_hint,StringPrototypeSlice,199,200,0
-block_hint,StringPrototypeSlice,196,197,0
-block_hint,StringPrototypeSlice,183,184,1
-block_hint,StringPrototypeSlice,179,180,1
-block_hint,StringPrototypeSlice,187,188,0
-block_hint,StringPrototypeSlice,170,171,0
-block_hint,StringPrototypeSlice,138,139,1
-block_hint,StringPrototypeSlice,31,32,0
-block_hint,StringPrototypeSlice,68,69,1
-block_hint,StringPrototypeSlice,63,64,1
-block_hint,StringPrototypeSlice,61,62,1
-block_hint,StringPrototypeSlice,124,125,0
-block_hint,StringPrototypeSlice,21,22,0
-block_hint,StringPrototypeSlice,23,24,0
-block_hint,StringPrototypeSlice,128,129,1
-block_hint,StringPrototypeSlice,115,116,1
-block_hint,StringPrototypeSlice,40,41,0
-block_hint,StringPrototypeSlice,19,20,0
-block_hint,StringPrototypeSlice,130,131,1
-block_hint,StringPrototypeSlice,117,118,1
-block_hint,StringPrototypeSlice,44,45,0
-block_hint,StringPrototypeSlice,154,155,0
-block_hint,StringPrototypeSlice,148,149,0
-block_hint,StringPrototypeSlice,36,37,1
-block_hint,StringPrototypeSlice,33,34,0
-block_hint,StringPrototypeStartsWith,288,289,1
-block_hint,StringPrototypeStartsWith,271,272,1
-block_hint,StringPrototypeStartsWith,251,252,1
-block_hint,StringPrototypeStartsWith,235,236,1
-block_hint,StringPrototypeStartsWith,174,175,1
-block_hint,StringPrototypeStartsWith,278,279,1
-block_hint,StringPrototypeStartsWith,267,268,1
-block_hint,StringPrototypeStartsWith,253,254,1
-block_hint,StringPrototypeStartsWith,244,245,1
-block_hint,StringPrototypeStartsWith,179,180,1
-block_hint,StringPrototypeStartsWith,29,30,0
-block_hint,StringPrototypeStartsWith,68,69,0
-block_hint,StringPrototypeStartsWith,70,71,0
-block_hint,StringPrototypeStartsWith,185,186,1
-block_hint,StringPrototypeStartsWith,84,85,0
-block_hint,StringPrototypeStartsWith,86,87,0
-block_hint,StringPrototypeStartsWith,164,165,0
-block_hint,StringPrototypeStartsWith,47,48,0
-block_hint,StringPrototypeStartsWith,35,36,0
-block_hint,StringPrototypeStartsWith,49,50,1
-block_hint,StringPrototypeStartsWith,116,117,1
-block_hint,StringPrototypeSubstr,163,164,1
-block_hint,StringPrototypeSubstr,141,142,1
-block_hint,StringPrototypeSubstr,103,104,1
-block_hint,StringPrototypeSubstr,182,183,1
-block_hint,StringPrototypeSubstr,171,172,0
-block_hint,StringPrototypeSubstr,192,193,0
-block_hint,StringPrototypeSubstr,189,190,0
-block_hint,StringPrototypeSubstr,166,167,0
-block_hint,StringPrototypeSubstr,148,149,0
-block_hint,StringPrototypeSubstr,120,121,0
-block_hint,StringPrototypeSubstr,31,32,0
-block_hint,StringPrototypeSubstr,61,62,1
-block_hint,StringPrototypeSubstr,129,130,0
-block_hint,StringPrototypeSubstr,19,20,0
-block_hint,StringPrototypeSubstr,135,136,1
-block_hint,StringPrototypeSubstr,114,115,1
-block_hint,StringPrototypeSubstr,44,45,0
-block_hint,StringPrototypeSubstr,153,154,0
-block_hint,StringPrototypeSubstr,36,37,1
-block_hint,StringPrototypeSubstr,33,34,0
-block_hint,StringPrototypeSubstring,147,148,1
-block_hint,StringPrototypeSubstring,127,128,1
-block_hint,StringPrototypeSubstring,99,100,1
-block_hint,StringPrototypeSubstring,182,183,1
-block_hint,StringPrototypeSubstring,169,170,0
-block_hint,StringPrototypeSubstring,186,187,0
-block_hint,StringPrototypeSubstring,180,181,0
-block_hint,StringPrototypeSubstring,171,172,0
-block_hint,StringPrototypeSubstring,167,168,0
-block_hint,StringPrototypeSubstring,160,161,0
-block_hint,StringPrototypeSubstring,151,152,0
-block_hint,StringPrototypeSubstring,131,132,0
-block_hint,StringPrototypeSubstring,89,90,0
-block_hint,StringPrototypeSubstring,65,66,1
-block_hint,StringPrototypeSubstring,101,102,1
-block_hint,StringPrototypeSubstring,58,59,1
-block_hint,StringPrototypeSubstring,115,116,0
-block_hint,StringPrototypeSubstring,85,86,1
-block_hint,StringPrototypeSubstring,17,18,0
-block_hint,StringPrototypeSubstring,121,122,1
-block_hint,StringPrototypeSubstring,109,110,1
-block_hint,StringPrototypeSubstring,42,43,0
-block_hint,StringPrototypeSubstring,54,55,0
-block_hint,StringPrototypeSubstring,138,139,0
-block_hint,StringPrototypeSubstring,104,105,1
-block_hint,StringPrototypeSubstring,34,35,1
-block_hint,StringPrototypeTrim,462,463,1
-block_hint,StringPrototypeTrim,263,264,1
-block_hint,StringPrototypeTrim,186,187,1
-block_hint,StringPrototypeTrim,188,189,0
-block_hint,StringPrototypeTrim,436,437,0
-block_hint,StringPrototypeTrim,265,266,1
-block_hint,StringPrototypeTrim,156,157,0
-block_hint,StringPrototypeTrim,158,159,0
-block_hint,StringPrototypeTrim,247,248,0
-block_hint,StringPrototypeTrim,63,64,1
-block_hint,StringPrototypeTrim,362,363,1
-block_hint,StringPrototypeTrim,83,84,0
-block_hint,StringPrototypeTrim,249,250,0
-block_hint,StringPrototypeTrim,65,66,1
-block_hint,StringPrototypeTrim,388,389,0
-block_hint,StringPrototypeTrim,390,391,1
-block_hint,StringPrototypeTrim,128,129,0
-block_hint,StringPrototypeTrim,85,86,0
-block_hint,StringPrototypeTrim,92,93,0
-block_hint,StringPrototypeTrim,285,286,0
-block_hint,StringPrototypeTrim,178,179,1
-block_hint,StringPrototypeTrim,430,431,0
-block_hint,StringPrototypeTrim,251,252,0
-block_hint,StringPrototypeTrim,69,70,0
-block_hint,StringPrototypeTrim,71,72,0
-block_hint,StringPrototypeTrim,446,447,1
-block_hint,StringPrototypeTrim,416,417,1
-block_hint,StringPrototypeTrim,132,133,0
-block_hint,StringPrototypeTrim,152,153,0
-block_hint,StringPrototypeTrim,154,155,0
-block_hint,StringPrototypeTrim,239,240,0
-block_hint,StringPrototypeTrim,47,48,1
-block_hint,StringPrototypeTrim,298,299,1
-block_hint,StringPrototypeTrim,241,242,0
-block_hint,StringPrototypeTrim,49,50,1
-block_hint,StringPrototypeTrim,326,327,1
-block_hint,StringPrototypeTrim,81,82,0
-block_hint,StringPrototypeTrim,87,88,0
-block_hint,StringPrototypeTrim,283,284,1
-block_hint,StringPrototypeTrim,172,173,1
-block_hint,StringPrototypeTrim,428,429,0
-block_hint,StringPrototypeTrim,243,244,1
-block_hint,StringPrototypeTrim,51,52,0
-block_hint,StringPrototypeTrim,440,441,1
-block_hint,StringPrototypeTrim,354,355,1
-block_hint,StringPrototypeTrim,112,113,0
-block_hint,StringPrototypeTrim,466,467,0
-block_hint,StringPrototypeTrim,287,288,1
-block_hint,StringPrototypeTrim,97,98,1
-block_hint,StringPrototypeTrim,89,90,0
-block_hint,SymbolPrototypeToString,9,10,1
-block_hint,SymbolPrototypeToString,11,12,1
-block_hint,SymbolPrototypeToString,5,6,0
-block_hint,SymbolPrototypeToString,7,8,1
-block_hint,CreateTypedArray,567,568,0
-block_hint,CreateTypedArray,597,598,0
-block_hint,CreateTypedArray,540,541,0
-block_hint,CreateTypedArray,454,455,0
-block_hint,CreateTypedArray,333,334,1
-block_hint,CreateTypedArray,335,336,1
-block_hint,CreateTypedArray,640,641,0
-block_hint,CreateTypedArray,489,490,1
-block_hint,CreateTypedArray,487,488,1
-block_hint,CreateTypedArray,385,386,1
-block_hint,CreateTypedArray,546,547,0
-block_hint,CreateTypedArray,621,622,0
-block_hint,CreateTypedArray,544,545,0
-block_hint,CreateTypedArray,458,459,0
-block_hint,CreateTypedArray,396,397,0
-block_hint,CreateTypedArray,398,399,0
-block_hint,CreateTypedArray,388,389,0
-block_hint,CreateTypedArray,104,105,1
-block_hint,CreateTypedArray,106,107,1
-block_hint,CreateTypedArray,648,649,1
-block_hint,CreateTypedArray,600,601,1
-block_hint,CreateTypedArray,646,647,1
-block_hint,CreateTypedArray,618,619,1
-block_hint,CreateTypedArray,491,492,0
-block_hint,CreateTypedArray,523,524,1
-block_hint,CreateTypedArray,362,363,0
-block_hint,CreateTypedArray,236,237,0
-block_hint,CreateTypedArray,301,302,0
-block_hint,CreateTypedArray,281,282,1
-block_hint,CreateTypedArray,283,284,1
-block_hint,CreateTypedArray,493,494,0
-block_hint,CreateTypedArray,525,526,1
-block_hint,CreateTypedArray,364,365,0
-block_hint,CreateTypedArray,252,253,0
-block_hint,CreateTypedArray,303,304,0
-block_hint,CreateTypedArray,480,481,0
-block_hint,CreateTypedArray,482,483,0
-block_hint,CreateTypedArray,634,635,0
-block_hint,CreateTypedArray,498,499,1
-block_hint,CreateTypedArray,496,497,1
-block_hint,CreateTypedArray,400,401,1
-block_hint,CreateTypedArray,506,507,0
-block_hint,CreateTypedArray,500,501,0
-block_hint,CreateTypedArray,403,404,0
-block_hint,CreateTypedArray,152,153,1
-block_hint,CreateTypedArray,342,343,0
-block_hint,CreateTypedArray,154,155,1
-block_hint,CreateTypedArray,652,653,1
-block_hint,CreateTypedArray,607,608,1
-block_hint,CreateTypedArray,650,651,1
-block_hint,CreateTypedArray,624,625,1
-block_hint,CreateTypedArray,502,503,0
-block_hint,CreateTypedArray,519,520,1
-block_hint,CreateTypedArray,358,359,0
-block_hint,CreateTypedArray,204,205,0
-block_hint,CreateTypedArray,627,628,0
-block_hint,CreateTypedArray,166,167,1
-block_hint,CreateTypedArray,291,292,1
-block_hint,CreateTypedArray,293,294,1
-block_hint,CreateTypedArray,504,505,0
-block_hint,CreateTypedArray,521,522,1
-block_hint,CreateTypedArray,360,361,0
-block_hint,CreateTypedArray,220,221,0
-block_hint,CreateTypedArray,629,630,0
-block_hint,CreateTypedArray,513,514,0
-block_hint,CreateTypedArray,508,509,0
-block_hint,CreateTypedArray,465,466,0
-block_hint,CreateTypedArray,348,349,0
-block_hint,CreateTypedArray,419,420,1
-block_hint,CreateTypedArray,352,353,1
-block_hint,CreateTypedArray,350,351,1
-block_hint,CreateTypedArray,421,422,0
-block_hint,CreateTypedArray,656,657,0
-block_hint,CreateTypedArray,609,610,0
-block_hint,CreateTypedArray,529,530,1
-block_hint,CreateTypedArray,527,528,1
-block_hint,CreateTypedArray,433,434,1
-block_hint,CreateTypedArray,613,614,0
-block_hint,CreateTypedArray,537,538,0
-block_hint,CreateTypedArray,447,448,0
-block_hint,CreateTypedArray,316,317,0
-block_hint,CreateTypedArray,611,612,0
-block_hint,CreateTypedArray,535,536,0
-block_hint,CreateTypedArray,443,444,0
-block_hint,CreateTypedArray,265,266,0
-block_hint,CreateTypedArray,592,593,0
-block_hint,CreateTypedArray,323,324,0
-block_hint,CreateTypedArray,325,326,0
-block_hint,CreateTypedArray,372,373,0
-block_hint,CreateTypedArray,374,375,0
-block_hint,CreateTypedArray,318,319,0
-block_hint,CreateTypedArray,328,329,0
-block_hint,CreateTypedArray,321,322,0
-block_hint,CreateTypedArray,474,475,0
-block_hint,CreateTypedArray,517,518,1
-block_hint,CreateTypedArray,356,357,0
-block_hint,CreateTypedArray,188,189,0
-block_hint,CreateTypedArray,451,452,0
-block_hint,CreateTypedArray,273,274,0
-block_hint,TypedArrayFrom,246,247,1
-block_hint,TypedArrayFrom,225,226,1
-block_hint,TypedArrayFrom,202,203,1
-block_hint,TypedArrayFrom,159,160,1
-block_hint,TypedArrayFrom,89,90,1
-block_hint,TypedArrayFrom,91,92,1
-block_hint,TypedArrayFrom,191,192,1
-block_hint,TypedArrayFrom,182,183,0
-block_hint,TypedArrayFrom,143,144,0
-block_hint,TypedArrayFrom,102,103,1
-block_hint,TypedArrayFrom,104,105,1
-block_hint,TypedArrayFrom,260,261,1
-block_hint,TypedArrayFrom,262,263,0
-block_hint,TypedArrayFrom,248,249,0
-block_hint,TypedArrayFrom,235,236,1
-block_hint,TypedArrayFrom,237,238,0
-block_hint,TypedArrayFrom,215,216,1
-block_hint,TypedArrayFrom,193,194,1
-block_hint,TypedArrayFrom,169,170,0
-block_hint,TypedArrayFrom,171,172,0
-block_hint,TypedArrayFrom,256,257,0
-block_hint,TypedArrayFrom,230,231,1
-block_hint,TypedArrayFrom,185,186,0
-block_hint,TypedArrayFrom,108,109,1
-block_hint,TypedArrayFrom,110,111,1
-block_hint,TypedArrayFrom,177,178,0
-block_hint,TypedArrayFrom,149,150,0
-block_hint,TypedArrayFrom,120,121,0
-block_hint,TypedArrayFrom,57,58,0
-block_hint,TypedArrayFrom,155,156,0
-block_hint,TypedArrayFrom,59,60,0
-block_hint,TypedArrayFrom,137,138,1
-block_hint,TypedArrayFrom,61,62,1
-block_hint,TypedArrayPrototypeSet,196,197,1
-block_hint,TypedArrayPrototypeSet,104,105,1
-block_hint,TypedArrayPrototypeSet,106,107,1
-block_hint,TypedArrayPrototypeSet,249,250,1
-block_hint,TypedArrayPrototypeSet,282,283,0
-block_hint,TypedArrayPrototypeSet,268,269,0
-block_hint,TypedArrayPrototypeSet,256,257,0
-block_hint,TypedArrayPrototypeSet,223,224,0
-block_hint,TypedArrayPrototypeSet,155,156,0
-block_hint,TypedArrayPrototypeSet,198,199,0
-block_hint,TypedArrayPrototypeSet,200,201,0
-block_hint,TypedArrayPrototypeSet,167,168,0
-block_hint,TypedArrayPrototypeSet,278,279,1
-block_hint,TypedArrayPrototypeSet,265,266,1
-block_hint,TypedArrayPrototypeSet,244,245,1
-block_hint,TypedArrayPrototypeSet,211,212,0
-block_hint,TypedArrayPrototypeSet,213,214,0
-block_hint,TypedArrayPrototypeSet,171,172,0
-block_hint,TypedArrayPrototypeSet,159,160,0
-block_hint,TypedArrayPrototypeSet,179,180,0
-block_hint,TypedArrayPrototypeSet,123,124,0
-block_hint,TypedArrayPrototypeSet,185,186,1
-block_hint,TypedArrayPrototypeSet,91,92,1
-block_hint,TypedArrayPrototypeSet,81,82,0
-block_hint,TypedArrayPrototypeSet,83,84,0
-block_hint,TypedArrayPrototypeSet,85,86,0
-block_hint,TypedArrayPrototypeSet,87,88,0
-block_hint,TypedArrayPrototypeSet,187,188,0
-block_hint,TypedArrayPrototypeSet,146,147,0
-block_hint,TypedArrayPrototypeSubArray,129,130,1
-block_hint,TypedArrayPrototypeSubArray,82,83,1
-block_hint,TypedArrayPrototypeSubArray,84,85,1
-block_hint,TypedArrayPrototypeSubArray,159,160,1
-block_hint,TypedArrayPrototypeSubArray,151,152,0
-block_hint,TypedArrayPrototypeSubArray,131,132,0
-block_hint,TypedArrayPrototypeSubArray,133,134,0
-block_hint,TypedArrayPrototypeSubArray,210,211,0
-block_hint,TypedArrayPrototypeSubArray,190,191,1
-block_hint,TypedArrayPrototypeSubArray,170,171,0
-block_hint,TypedArrayPrototypeSubArray,218,219,0
-block_hint,TypedArrayPrototypeSubArray,205,206,0
-block_hint,TypedArrayPrototypeSubArray,196,197,0
-block_hint,TypedArrayPrototypeSubArray,186,187,1
-block_hint,TypedArrayPrototypeSubArray,154,155,0
-block_hint,TypedArrayPrototypeSubArray,137,138,0
-block_hint,TypedArrayPrototypeSubArray,165,166,0
-block_hint,TypedArrayPrototypeSubArray,216,217,0
-block_hint,TypedArrayPrototypeSubArray,203,204,0
-block_hint,TypedArrayPrototypeSubArray,192,193,0
-block_hint,TypedArrayPrototypeSubArray,149,150,1
-block_hint,TypedArrayPrototypeSubArray,124,125,0
-block_hint,TypedArrayPrototypeSubArray,102,103,0
-block_hint,TypedArrayPrototypeSubArray,104,105,0
-block_hint,TypedArrayPrototypeSubArray,115,116,0
-block_hint,TypedArrayPrototypeSubArray,63,64,1
-block_hint,TypedArrayPrototypeSubArray,65,66,1
-block_hint,TypedArrayPrototypeSubArray,145,146,1
-block_hint,TypedArrayPrototypeSubArray,80,81,0
-block_hint,TypedArrayPrototypeSubArray,117,118,0
-block_hint,TypedArrayPrototypeSubArray,90,91,1
-block_hint,TypedArrayPrototypeSubArray,92,93,1
-block_hint,TypedArrayPrototypeSubArray,119,120,0
-block_hint,TypedArrayPrototypeSubArray,94,95,1
-block_hint,TypedArrayPrototypeSubArray,96,97,1
-block_hint,TypedArrayPrototypeSubArray,69,70,1
-block_hint,TypedArrayPrototypeSubArray,98,99,1
-block_hint,TypedArrayPrototypeSubArray,100,101,1
-block_hint,TypedArrayPrototypeSubArray,73,74,0
-block_hint,NewSloppyArgumentsElements,44,45,1
-block_hint,NewSloppyArgumentsElements,24,25,1
-block_hint,NewSloppyArgumentsElements,33,34,0
-block_hint,NewSloppyArgumentsElements,14,15,0
-block_hint,NewSloppyArgumentsElements,16,17,0
-block_hint,NewSloppyArgumentsElements,46,47,1
-block_hint,NewSloppyArgumentsElements,36,37,1
-block_hint,NewSloppyArgumentsElements,18,19,0
-block_hint,NewSloppyArgumentsElements,48,49,0
-block_hint,NewStrictArgumentsElements,9,10,0
-block_hint,NewStrictArgumentsElements,20,21,0
-block_hint,NewRestArgumentsElements,25,26,0
-block_hint,NewRestArgumentsElements,11,12,1
-block_hint,NewRestArgumentsElements,16,17,0
-block_hint,NewRestArgumentsElements,5,6,0
-block_hint,NewRestArgumentsElements,7,8,0
-block_hint,NewRestArgumentsElements,23,24,1
-block_hint,NewRestArgumentsElements,19,20,1
-block_hint,NewRestArgumentsElements,9,10,0
-block_hint,NewRestArgumentsElements,21,22,0
-block_hint,FastNewSloppyArguments,41,42,1
-block_hint,FastNewSloppyArguments,43,44,0
-block_hint,FastNewSloppyArguments,101,102,1
-block_hint,FastNewSloppyArguments,45,46,0
-block_hint,FastNewSloppyArguments,13,14,0
-block_hint,FastNewSloppyArguments,15,16,0
-block_hint,FastNewSloppyArguments,75,76,1
-block_hint,FastNewSloppyArguments,59,60,1
-block_hint,FastNewSloppyArguments,17,18,0
-block_hint,FastNewSloppyArguments,61,62,0
-block_hint,FastNewSloppyArguments,81,82,1
-block_hint,FastNewSloppyArguments,47,48,0
-block_hint,FastNewSloppyArguments,19,20,0
-block_hint,FastNewSloppyArguments,21,22,0
-block_hint,FastNewSloppyArguments,71,72,1
-block_hint,FastNewSloppyArguments,55,56,1
-block_hint,FastNewSloppyArguments,23,24,0
-block_hint,FastNewSloppyArguments,73,74,0
-block_hint,FastNewSloppyArguments,25,26,1
-block_hint,FastNewSloppyArguments,51,52,1
-block_hint,FastNewSloppyArguments,27,28,1
-block_hint,FastNewSloppyArguments,29,30,0
-block_hint,FastNewSloppyArguments,31,32,0
-block_hint,FastNewSloppyArguments,77,78,1
-block_hint,FastNewSloppyArguments,63,64,1
-block_hint,FastNewSloppyArguments,33,34,0
-block_hint,FastNewSloppyArguments,35,36,1
-block_hint,FastNewSloppyArguments,53,54,1
-block_hint,FastNewStrictArguments,16,17,1
-block_hint,FastNewStrictArguments,18,19,0
-block_hint,FastNewStrictArguments,20,21,0
-block_hint,FastNewStrictArguments,7,8,0
-block_hint,FastNewStrictArguments,9,10,0
-block_hint,FastNewStrictArguments,31,32,1
-block_hint,FastNewStrictArguments,25,26,1
-block_hint,FastNewStrictArguments,11,12,0
-block_hint,FastNewStrictArguments,27,28,0
-block_hint,FastNewStrictArguments,13,14,1
-block_hint,FastNewStrictArguments,23,24,1
-block_hint,FastNewRestArguments,16,17,1
-block_hint,FastNewRestArguments,18,19,0
-block_hint,FastNewRestArguments,34,35,1
-block_hint,FastNewRestArguments,7,8,1
-block_hint,FastNewRestArguments,21,22,0
-block_hint,FastNewRestArguments,9,10,0
-block_hint,FastNewRestArguments,11,12,0
-block_hint,FastNewRestArguments,32,33,1
-block_hint,FastNewRestArguments,25,26,1
-block_hint,FastNewRestArguments,13,14,0
-block_hint,FastNewRestArguments,27,28,0
-block_hint,FastNewRestArguments,23,24,1
-block_hint,StringSlowFlatten,35,36,1
-block_hint,StringSlowFlatten,20,21,1
-block_hint,StringSlowFlatten,4,5,0
-block_hint,StringSlowFlatten,30,31,1
-block_hint,StringSlowFlatten,22,23,1
-block_hint,StringIndexOf,160,161,0
-block_hint,StringIndexOf,112,113,1
-block_hint,StringIndexOf,125,126,1
-block_hint,StringIndexOf,91,92,0
-block_hint,StringIndexOf,117,118,1
-block_hint,StringIndexOf,136,137,0
-block_hint,StringIndexOf,44,45,0
-block_hint,StringIndexOf,46,47,0
-block_hint,StringIndexOf,133,134,0
-block_hint,StringIndexOf,76,77,0
-block_hint,StringIndexOf,78,79,0
-block_hint,StringIndexOf,72,73,0
-block_hint,StringIndexOf,74,75,0
-block_hint,StringIndexOf,40,41,0
-block_hint,StringIndexOf,42,43,0
-block_hint,StringIndexOf,127,128,1
-block_hint,StringIndexOf,56,57,0
-block_hint,StringIndexOf,58,59,0
-block_hint,Load_FastSmiElements_0,2,3,1
-block_hint,Load_FastObjectElements_0,2,3,1
-block_hint,Store_FastSmiElements_0,2,3,1
-block_hint,Store_FastObjectElements_0,2,3,1
-block_hint,SortCompareDefault,8,9,1
-block_hint,SortCompareDefault,20,21,1
-block_hint,SortCompareDefault,17,18,1
-block_hint,SortCompareDefault,14,15,1
-block_hint,SortCompareDefault,11,12,1
-block_hint,SortCompareDefault,6,7,1
-block_hint,SortCompareUserFn,9,10,0
-block_hint,SortCompareUserFn,5,6,0
-block_hint,Copy,17,18,1
-block_hint,Copy,9,10,1
-block_hint,Copy,11,12,1
-block_hint,Copy,5,6,1
-block_hint,Copy,7,8,1
-block_hint,MergeAt,13,14,1
-block_hint,MergeAt,15,16,1
-block_hint,MergeAt,17,18,1
-block_hint,MergeAt,19,20,1
-block_hint,MergeAt,140,141,0
-block_hint,MergeAt,29,30,1
-block_hint,MergeAt,31,32,0
-block_hint,MergeAt,33,34,1
-block_hint,MergeAt,35,36,1
-block_hint,MergeAt,123,124,0
-block_hint,MergeAt,236,237,1
-block_hint,MergeAt,225,226,1
-block_hint,MergeAt,69,70,1
-block_hint,MergeAt,71,72,1
-block_hint,MergeAt,150,151,1
-block_hint,MergeAt,103,104,0
-block_hint,MergeAt,73,74,1
-block_hint,MergeAt,75,76,1
-block_hint,MergeAt,227,228,0
-block_hint,MergeAt,81,82,1
-block_hint,MergeAt,83,84,1
-block_hint,MergeAt,198,199,0
-block_hint,MergeAt,134,135,0
-block_hint,MergeAt,77,78,1
-block_hint,MergeAt,79,80,1
-block_hint,MergeAt,196,197,1
-block_hint,MergeAt,132,133,0
-block_hint,MergeAt,152,153,0
-block_hint,MergeAt,182,183,1
-block_hint,MergeAt,85,86,1
-block_hint,MergeAt,87,88,1
-block_hint,MergeAt,89,90,1
-block_hint,MergeAt,147,148,0
-block_hint,MergeAt,91,92,1
-block_hint,MergeAt,93,94,1
-block_hint,MergeAt,95,96,1
-block_hint,MergeAt,107,108,1
-block_hint,MergeAt,194,195,1
-block_hint,MergeAt,97,98,1
-block_hint,MergeAt,99,100,1
-block_hint,MergeAt,230,231,1
-block_hint,MergeAt,116,117,0
-block_hint,MergeAt,232,233,1
-block_hint,MergeAt,220,221,1
-block_hint,MergeAt,37,38,1
-block_hint,MergeAt,39,40,1
-block_hint,MergeAt,154,155,1
-block_hint,MergeAt,109,110,0
-block_hint,MergeAt,41,42,1
-block_hint,MergeAt,43,44,1
-block_hint,MergeAt,222,223,0
-block_hint,MergeAt,49,50,1
-block_hint,MergeAt,51,52,1
-block_hint,MergeAt,202,203,0
-block_hint,MergeAt,138,139,0
-block_hint,MergeAt,45,46,1
-block_hint,MergeAt,47,48,1
-block_hint,MergeAt,200,201,1
-block_hint,MergeAt,136,137,0
-block_hint,MergeAt,111,112,0
-block_hint,MergeAt,165,166,1
-block_hint,MergeAt,53,54,1
-block_hint,MergeAt,207,208,0
-block_hint,MergeAt,169,170,0
-block_hint,MergeAt,55,56,1
-block_hint,MergeAt,57,58,1
-block_hint,MergeAt,143,144,1
-block_hint,MergeAt,59,60,1
-block_hint,MergeAt,173,174,0
-block_hint,MergeAt,61,62,1
-block_hint,MergeAt,63,64,1
-block_hint,MergeAt,113,114,0
-block_hint,MergeAt,192,193,1
-block_hint,MergeAt,65,66,1
-block_hint,MergeAt,67,68,1
-block_hint,GallopLeft,11,12,1
-block_hint,GallopLeft,47,48,0
-block_hint,GallopLeft,15,16,1
-block_hint,GallopLeft,63,64,0
-block_hint,GallopLeft,29,30,0
-block_hint,GallopLeft,41,42,0
-block_hint,GallopLeft,13,14,1
-block_hint,GallopLeft,65,66,0
-block_hint,GallopLeft,31,32,0
-block_hint,GallopLeft,39,40,0
-block_hint,GallopLeft,17,18,1
-block_hint,GallopLeft,61,62,0
-block_hint,GallopRight,11,12,1
-block_hint,GallopRight,47,48,0
-block_hint,GallopRight,35,36,1
-block_hint,GallopRight,15,16,1
-block_hint,GallopRight,63,64,0
-block_hint,GallopRight,29,30,0
-block_hint,GallopRight,41,42,0
-block_hint,GallopRight,13,14,1
-block_hint,GallopRight,65,66,0
-block_hint,GallopRight,31,32,0
-block_hint,GallopRight,39,40,0
-block_hint,GallopRight,17,18,1
-block_hint,GallopRight,61,62,0
-block_hint,ArrayTimSort,120,121,0
-block_hint,ArrayTimSort,240,241,0
-block_hint,ArrayTimSort,227,228,0
-block_hint,ArrayTimSort,122,123,0
-block_hint,ArrayTimSort,163,164,0
-block_hint,ArrayTimSort,140,141,0
-block_hint,ArrayTimSort,33,34,1
-block_hint,ArrayTimSort,93,94,0
-block_hint,ArrayTimSort,95,96,0
-block_hint,ArrayTimSort,143,144,0
-block_hint,ArrayTimSort,35,36,1
-block_hint,ArrayTimSort,37,38,1
-block_hint,ArrayTimSort,214,215,0
-block_hint,ArrayTimSort,145,146,1
-block_hint,ArrayTimSort,39,40,1
-block_hint,ArrayTimSort,218,219,0
-block_hint,ArrayTimSort,216,217,0
-block_hint,ArrayTimSort,41,42,1
-block_hint,ArrayTimSort,43,44,1
-block_hint,ArrayTimSort,45,46,1
-block_hint,ArrayTimSort,134,135,0
-block_hint,ArrayTimSort,47,48,1
-block_hint,ArrayTimSort,49,50,1
-block_hint,ArrayTimSort,222,223,0
-block_hint,ArrayTimSort,51,52,1
-block_hint,ArrayTimSort,53,54,1
-block_hint,ArrayTimSort,55,56,1
-block_hint,ArrayTimSort,57,58,1
-block_hint,ArrayTimSort,59,60,1
-block_hint,ArrayTimSort,61,62,1
-block_hint,ArrayTimSort,63,64,1
-block_hint,ArrayTimSort,65,66,1
-block_hint,ArrayTimSort,67,68,1
-block_hint,ArrayTimSort,69,70,1
-block_hint,ArrayTimSort,71,72,1
-block_hint,ArrayTimSort,157,158,1
-block_hint,ArrayTimSort,73,74,1
-block_hint,ArrayTimSort,75,76,1
-block_hint,ArrayTimSort,204,205,0
-block_hint,ArrayTimSort,77,78,1
-block_hint,ArrayTimSort,79,80,1
-block_hint,ArrayTimSort,209,210,0
-block_hint,ArrayTimSort,81,82,1
-block_hint,ArrayTimSort,83,84,1
-block_hint,ArrayTimSort,186,187,0
-block_hint,ArrayTimSort,236,237,1
-block_hint,ArrayTimSort,238,239,1
-block_hint,ArrayTimSort,211,212,1
-block_hint,ArrayTimSort,161,162,1
-block_hint,ArrayTimSort,85,86,1
-block_hint,ArrayTimSort,243,244,1
-block_hint,ArrayTimSort,230,231,0
-block_hint,ArrayTimSort,188,189,1
-block_hint,ArrayTimSort,138,139,0
-block_hint,ArrayTimSort,87,88,1
-block_hint,ArrayTimSort,113,114,0
-block_hint,ArrayTimSort,89,90,0
-block_hint,ArrayPrototypeSort,106,107,1
-block_hint,ArrayPrototypeSort,80,81,1
-block_hint,ArrayPrototypeSort,39,40,1
-block_hint,ArrayPrototypeSort,70,71,0
-block_hint,ArrayPrototypeSort,41,42,1
-block_hint,ArrayPrototypeSort,82,83,1
-block_hint,ArrayPrototypeSort,84,85,1
-block_hint,ArrayPrototypeSort,63,64,0
-block_hint,ArrayPrototypeSort,27,28,0
-block_hint,ArrayPrototypeSort,121,122,0
-block_hint,ArrayPrototypeSort,101,102,1
-block_hint,ArrayPrototypeSort,73,74,1
-block_hint,ArrayPrototypeSort,51,52,1
-block_hint,ArrayPrototypeSort,15,16,1
-block_hint,ArrayPrototypeSort,95,96,1
-block_hint,ArrayPrototypeSort,75,76,0
-block_hint,ArrayPrototypeSort,53,54,0
-block_hint,ArrayPrototypeSort,139,140,0
-block_hint,ArrayPrototypeSort,142,143,0
-block_hint,ArrayPrototypeSort,132,133,0
-block_hint,ArrayPrototypeSort,128,129,0
-block_hint,ArrayPrototypeSort,103,104,0
-block_hint,ArrayPrototypeSort,114,115,1
-block_hint,ArrayPrototypeSort,118,119,1
-block_hint,ArrayPrototypeSort,77,78,1
-block_hint,ArrayPrototypeSort,33,34,0
-block_hint,ArrayPrototypeSort,98,99,1
-block_hint,ArrayPrototypeSort,91,92,1
-block_hint,ArrayPrototypeSort,56,57,1
-block_hint,StringFastLocaleCompare,315,316,1
-block_hint,StringFastLocaleCompare,239,240,0
-block_hint,StringFastLocaleCompare,303,304,1
-block_hint,StringFastLocaleCompare,156,157,0
-block_hint,StringFastLocaleCompare,158,159,0
-block_hint,StringFastLocaleCompare,267,268,1
-block_hint,StringFastLocaleCompare,106,107,0
-block_hint,StringFastLocaleCompare,307,308,1
-block_hint,StringFastLocaleCompare,172,173,0
-block_hint,StringFastLocaleCompare,174,175,0
-block_hint,StringFastLocaleCompare,109,110,0
-block_hint,StringFastLocaleCompare,211,212,1
-block_hint,StringFastLocaleCompare,271,272,1
-block_hint,StringFastLocaleCompare,276,277,0
-block_hint,StringFastLocaleCompare,253,254,1
-block_hint,StringFastLocaleCompare,73,74,0
-block_hint,StringFastLocaleCompare,274,275,1
-block_hint,StringFastLocaleCompare,116,117,0
-block_hint,StringFastLocaleCompare,77,78,1
-block_hint,CanUseSameAccessor_FastObjectElements_0,2,3,1
-block_hint,CanUseSameAccessor_FastObjectElements_0,4,5,1
-block_hint,StringPrototypeToLowerCaseIntl,10,11,1
-block_hint,StringPrototypeToLowerCaseIntl,7,8,1
-block_hint,StringPrototypeToLowerCaseIntl,5,6,1
-block_hint,StringToLowerCaseIntl,23,24,1
-block_hint,StringToLowerCaseIntl,25,26,0
-block_hint,StringToLowerCaseIntl,34,35,1
-block_hint,StringToLowerCaseIntl,7,8,0
-block_hint,StringToLowerCaseIntl,43,44,1
-block_hint,StringToLowerCaseIntl,41,42,1
-block_hint,StringToLowerCaseIntl,19,20,0
-block_hint,StringToLowerCaseIntl,39,40,0
-block_hint,StringToLowerCaseIntl,14,15,0
-block_hint,LdaContextSlotHandler,3,4,1
-block_hint,LdaContextSlotHandler,5,6,1
-block_hint,LdaImmutableContextSlotHandler,3,4,1
-block_hint,LdaImmutableContextSlotHandler,5,6,1
-block_hint,LdaCurrentContextSlotHandler,2,3,1
-block_hint,LdaImmutableCurrentContextSlotHandler,2,3,1
-block_hint,TestTypeOfHandler,7,8,1
-block_hint,TestTypeOfHandler,15,16,0
-block_hint,TestTypeOfHandler,23,24,0
-block_hint,TestTypeOfHandler,27,28,1
-block_hint,TestTypeOfHandler,31,32,1
-block_hint,TestTypeOfHandler,50,51,0
-block_hint,TestTypeOfHandler,35,36,0
-block_hint,LdaGlobalHandler,7,8,1
-block_hint,LdaGlobalHandler,9,10,1
-block_hint,LdaGlobalHandler,11,12,1
-block_hint,LdaGlobalHandler,13,14,1
-block_hint,LdaGlobalHandler,183,184,0
-block_hint,LdaGlobalHandler,105,106,0
-block_hint,LdaGlobalHandler,109,110,1
-block_hint,StaContextSlotHandler,5,6,1
-block_hint,StaCurrentContextSlotHandler,2,3,1
-block_hint,GetNamedPropertyHandler,379,380,1
-block_hint,GetNamedPropertyHandler,219,220,0
-block_hint,GetNamedPropertyHandler,77,78,0
-block_hint,GetNamedPropertyHandler,35,36,1
-block_hint,GetNamedPropertyHandler,318,319,0
-block_hint,GetNamedPropertyHandler,342,343,0
-block_hint,GetNamedPropertyHandler,221,222,1
-block_hint,GetNamedPropertyHandler,293,294,1
-block_hint,GetNamedPropertyHandler,39,40,0
-block_hint,GetNamedPropertyHandler,223,224,0
-block_hint,GetNamedPropertyHandler,297,298,1
-block_hint,GetNamedPropertyHandler,98,99,1
-block_hint,GetNamedPropertyHandler,350,351,0
-block_hint,GetNamedPropertyHandler,245,246,0
-block_hint,GetNamedPropertyHandler,154,155,0
-block_hint,GetNamedPropertyHandler,122,123,1
-block_hint,GetNamedPropertyHandler,49,50,0
-block_hint,GetNamedPropertyHandler,87,88,0
-block_hint,GetNamedPropertyHandler,25,26,1
-block_hint,GetNamedPropertyHandler,144,145,0
-block_hint,GetNamedPropertyHandler,65,66,0
-block_hint,GetNamedPropertyHandler,306,307,1
-block_hint,GetNamedPropertyHandler,102,103,0
-block_hint,GetNamedPropertyHandler,251,252,1
-block_hint,GetNamedPropertyHandler,253,254,1
-block_hint,GetNamedPropertyHandler,247,248,1
-block_hint,GetNamedPropertyHandler,249,250,1
-block_hint,GetNamedPropertyHandler,164,165,1
-block_hint,AddHandler,53,54,0
-block_hint,AddHandler,37,38,0
-block_hint,AddHandler,28,29,1
-block_hint,AddHandler,80,81,0
-block_hint,AddHandler,60,61,1
-block_hint,AddHandler,40,41,1
-block_hint,AddHandler,74,75,1
-block_hint,AddHandler,43,44,1
-block_hint,AddHandler,56,57,1
-block_hint,AddHandler,22,23,1
-block_hint,SubHandler,35,36,0
-block_hint,SubHandler,23,24,1
-block_hint,SubHandler,64,65,1
-block_hint,SubHandler,75,76,1
-block_hint,SubHandler,66,67,1
-block_hint,SubHandler,45,46,1
-block_hint,SubHandler,19,20,1
-block_hint,MulHandler,79,80,1
-block_hint,MulHandler,75,76,1
-block_hint,MulHandler,26,27,1
-block_hint,MulHandler,85,86,1
-block_hint,MulHandler,69,70,1
-block_hint,MulHandler,47,48,1
-block_hint,MulHandler,21,22,1
-block_hint,DivHandler,81,82,0
-block_hint,DivHandler,75,76,0
-block_hint,DivHandler,64,65,0
-block_hint,DivHandler,43,44,1
-block_hint,DivHandler,23,24,1
-block_hint,DivHandler,83,84,1
-block_hint,DivHandler,70,71,1
-block_hint,DivHandler,46,47,1
-block_hint,DivHandler,17,18,1
-block_hint,ModHandler,87,88,1
-block_hint,ModHandler,84,85,0
-block_hint,ModHandler,80,81,0
-block_hint,ModHandler,66,67,1
-block_hint,ModHandler,61,62,1
-block_hint,ModHandler,34,35,0
-block_hint,ModHandler,15,16,0
-block_hint,ModHandler,23,24,1
-block_hint,BitwiseOrHandler,42,43,0
-block_hint,BitwiseOrHandler,30,31,1
-block_hint,BitwiseOrHandler,8,9,1
-block_hint,BitwiseOrHandler,56,57,1
-block_hint,BitwiseOrHandler,60,61,1
-block_hint,BitwiseOrHandler,24,25,1
-block_hint,BitwiseXorHandler,32,33,1
-block_hint,BitwiseXorHandler,56,57,1
-block_hint,BitwiseXorHandler,60,61,1
-block_hint,BitwiseXorHandler,24,25,1
-block_hint,BitwiseAndHandler,32,33,1
-block_hint,BitwiseAndHandler,56,57,1
-block_hint,BitwiseAndHandler,60,61,1
-block_hint,BitwiseAndHandler,24,25,1
-block_hint,ShiftLeftHandler,10,11,0
-block_hint,ShiftLeftHandler,60,61,1
-block_hint,ShiftLeftHandler,24,25,1
-block_hint,ShiftRightHandler,32,33,1
-block_hint,ShiftRightHandler,10,11,0
-block_hint,ShiftRightHandler,58,59,0
-block_hint,ShiftRightHandler,39,40,0
-block_hint,ShiftRightHandler,24,25,1
-block_hint,ShiftRightLogicalHandler,10,11,0
-block_hint,ShiftRightLogicalHandler,58,59,0
-block_hint,ShiftRightLogicalHandler,39,40,0
-block_hint,AddSmiHandler,53,54,0
-block_hint,AddSmiHandler,37,38,0
-block_hint,AddSmiHandler,28,29,1
-block_hint,SubSmiHandler,35,36,0
-block_hint,SubSmiHandler,23,24,1
-block_hint,MulSmiHandler,78,79,0
-block_hint,MulSmiHandler,63,64,0
-block_hint,MulSmiHandler,65,66,0
-block_hint,MulSmiHandler,34,35,0
-block_hint,MulSmiHandler,23,24,1
-block_hint,DivSmiHandler,69,70,0
-block_hint,DivSmiHandler,78,79,0
-block_hint,DivSmiHandler,64,65,0
-block_hint,DivSmiHandler,43,44,1
-block_hint,DivSmiHandler,15,16,0
-block_hint,DivSmiHandler,23,24,1
-block_hint,ModSmiHandler,66,67,1
-block_hint,ModSmiHandler,61,62,1
-block_hint,ModSmiHandler,34,35,0
-block_hint,ModSmiHandler,23,24,1
-block_hint,BitwiseOrSmiHandler,31,32,1
-block_hint,BitwiseOrSmiHandler,37,38,1
-block_hint,BitwiseAndSmiHandler,6,7,0
-block_hint,BitwiseAndSmiHandler,18,19,1
-block_hint,ShiftLeftSmiHandler,44,45,1
-block_hint,ShiftLeftSmiHandler,34,35,1
-block_hint,ShiftLeftSmiHandler,46,47,1
-block_hint,ShiftLeftSmiHandler,18,19,1
-block_hint,ShiftRightSmiHandler,31,32,1
-block_hint,ShiftRightSmiHandler,35,36,0
-block_hint,ShiftRightSmiHandler,29,30,0
-block_hint,ShiftRightSmiHandler,18,19,1
-block_hint,ShiftRightLogicalSmiHandler,40,41,0
-block_hint,ShiftRightLogicalSmiHandler,30,31,0
-block_hint,ShiftRightLogicalSmiHandler,34,35,1
-block_hint,ShiftRightLogicalSmiHandler,42,43,0
-block_hint,ShiftRightLogicalSmiHandler,32,33,0
-block_hint,ShiftRightLogicalSmiHandler,18,19,1
-block_hint,IncHandler,27,28,0
-block_hint,IncHandler,23,24,0
-block_hint,IncHandler,18,19,1
-block_hint,DecHandler,27,28,0
-block_hint,DecHandler,23,24,0
-block_hint,DecHandler,18,19,1
-block_hint,NegateHandler,26,27,1
-block_hint,NegateHandler,24,25,1
-block_hint,ToBooleanLogicalNotHandler,15,16,0
-block_hint,ToBooleanLogicalNotHandler,21,22,0
-block_hint,ToBooleanLogicalNotHandler,7,8,0
-block_hint,TypeOfHandler,20,21,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,12,13,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,6,7,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,14,15,1
-block_hint,FindNonDefaultConstructorOrConstructHandler,16,17,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,4,5,1
-block_hint,FindNonDefaultConstructorOrConstructHandler,18,19,0
-block_hint,CallAnyReceiverHandler,21,22,1
-block_hint,CallProperty0Handler,7,8,1
-block_hint,CallProperty0Handler,62,63,0
-block_hint,CallProperty0Handler,14,15,1
-block_hint,CallProperty0Handler,16,17,0
-block_hint,CallProperty0Handler,72,73,0
-block_hint,CallProperty0Handler,55,56,1
-block_hint,CallProperty1Handler,86,87,0
-block_hint,CallProperty1Handler,83,84,0
-block_hint,CallProperty1Handler,64,65,0
-block_hint,CallProperty1Handler,35,36,0
-block_hint,CallProperty1Handler,70,71,1
-block_hint,CallProperty1Handler,51,52,0
-block_hint,CallProperty1Handler,7,8,1
-block_hint,CallProperty1Handler,62,63,0
-block_hint,CallProperty1Handler,14,15,1
-block_hint,CallProperty1Handler,16,17,0
-block_hint,CallProperty1Handler,72,73,0
-block_hint,CallProperty1Handler,55,56,1
-block_hint,CallProperty2Handler,23,24,0
-block_hint,CallProperty2Handler,86,87,0
-block_hint,CallProperty2Handler,83,84,0
-block_hint,CallProperty2Handler,64,65,0
-block_hint,CallProperty2Handler,5,6,1
-block_hint,CallProperty2Handler,47,48,1
-block_hint,CallProperty2Handler,25,26,1
-block_hint,CallProperty2Handler,7,8,1
-block_hint,CallProperty2Handler,14,15,1
-block_hint,CallProperty2Handler,16,17,0
-block_hint,CallProperty2Handler,72,73,0
-block_hint,CallProperty2Handler,55,56,1
-block_hint,CallUndefinedReceiverHandler,86,87,0
-block_hint,CallUndefinedReceiverHandler,83,84,0
-block_hint,CallUndefinedReceiverHandler,64,65,0
-block_hint,CallUndefinedReceiverHandler,35,36,0
-block_hint,CallUndefinedReceiverHandler,70,71,1
-block_hint,CallUndefinedReceiverHandler,51,52,0
-block_hint,CallUndefinedReceiverHandler,29,30,1
-block_hint,CallUndefinedReceiver0Handler,86,87,0
-block_hint,CallUndefinedReceiver0Handler,83,84,0
-block_hint,CallUndefinedReceiver0Handler,64,65,0
-block_hint,CallUndefinedReceiver0Handler,35,36,0
-block_hint,CallUndefinedReceiver0Handler,70,71,1
-block_hint,CallUndefinedReceiver0Handler,51,52,0
-block_hint,CallUndefinedReceiver0Handler,29,30,1
-block_hint,CallUndefinedReceiver1Handler,86,87,0
-block_hint,CallUndefinedReceiver1Handler,83,84,0
-block_hint,CallUndefinedReceiver1Handler,64,65,0
-block_hint,CallUndefinedReceiver1Handler,35,36,0
-block_hint,CallUndefinedReceiver1Handler,70,71,1
-block_hint,CallUndefinedReceiver1Handler,51,52,0
-block_hint,CallUndefinedReceiver1Handler,29,30,1
-block_hint,CallUndefinedReceiver1Handler,7,8,1
-block_hint,CallUndefinedReceiver1Handler,62,63,0
-block_hint,CallUndefinedReceiver1Handler,14,15,1
-block_hint,CallUndefinedReceiver1Handler,16,17,0
-block_hint,CallUndefinedReceiver1Handler,72,73,0
-block_hint,CallUndefinedReceiver1Handler,55,56,1
-block_hint,CallUndefinedReceiver2Handler,23,24,0
-block_hint,CallUndefinedReceiver2Handler,86,87,0
-block_hint,CallUndefinedReceiver2Handler,83,84,0
-block_hint,CallUndefinedReceiver2Handler,64,65,0
-block_hint,CallUndefinedReceiver2Handler,35,36,0
-block_hint,CallUndefinedReceiver2Handler,70,71,1
-block_hint,CallUndefinedReceiver2Handler,51,52,0
-block_hint,CallUndefinedReceiver2Handler,29,30,1
-block_hint,CallWithSpreadHandler,23,24,1
-block_hint,ConstructHandler,52,53,0
-block_hint,ConstructHandler,41,42,1
-block_hint,ConstructHandler,24,25,1
-block_hint,ConstructHandler,15,16,1
-block_hint,ConstructHandler,3,4,1
-block_hint,ConstructHandler,39,40,1
-block_hint,TestEqualHandler,103,104,0
-block_hint,TestEqualHandler,25,26,1
-block_hint,TestEqualHandler,72,73,0
-block_hint,TestEqualHandler,79,80,1
-block_hint,TestEqualHandler,27,28,1
-block_hint,TestEqualHandler,85,86,0
-block_hint,TestEqualHandler,114,115,0
-block_hint,TestEqualHandler,19,20,1
-block_hint,TestEqualStrictHandler,82,83,0
-block_hint,TestEqualStrictHandler,53,54,1
-block_hint,TestEqualStrictHandler,66,67,0
-block_hint,TestEqualStrictHandler,59,60,1
-block_hint,TestEqualStrictHandler,41,42,1
-block_hint,TestEqualStrictHandler,61,62,0
-block_hint,TestEqualStrictHandler,55,56,1
-block_hint,TestEqualStrictHandler,47,48,0
-block_hint,TestEqualStrictHandler,72,73,0
-block_hint,TestEqualStrictHandler,49,50,0
-block_hint,TestEqualStrictHandler,7,8,1
-block_hint,TestLessThanHandler,41,42,0
-block_hint,TestLessThanHandler,63,64,0
-block_hint,TestLessThanHandler,65,66,1
-block_hint,TestLessThanHandler,49,50,1
-block_hint,TestLessThanHandler,9,10,1
-block_hint,TestGreaterThanHandler,41,42,0
-block_hint,TestGreaterThanHandler,45,46,1
-block_hint,TestGreaterThanHandler,9,10,1
-block_hint,TestLessThanOrEqualHandler,41,42,0
-block_hint,TestLessThanOrEqualHandler,9,10,1
-block_hint,TestGreaterThanOrEqualHandler,61,62,0
-block_hint,TestGreaterThanOrEqualHandler,41,42,0
-block_hint,TestGreaterThanOrEqualHandler,63,64,0
-block_hint,TestGreaterThanOrEqualHandler,9,10,1
-block_hint,TestInstanceOfHandler,17,18,1
-block_hint,TestInstanceOfHandler,19,20,1
-block_hint,TestInstanceOfHandler,4,5,1
-block_hint,TestInstanceOfHandler,21,22,1
-block_hint,ToNumericHandler,12,13,0
-block_hint,ToNumericHandler,7,8,1
-block_hint,ToStringHandler,3,4,1
-block_hint,CreateRegExpLiteralHandler,7,8,1
-block_hint,CreateRegExpLiteralHandler,3,4,1
-block_hint,CreateArrayLiteralHandler,38,39,1
-block_hint,CreateArrayLiteralHandler,41,42,1
-block_hint,CreateArrayLiteralHandler,13,14,0
-block_hint,CreateArrayLiteralHandler,50,51,1
-block_hint,CreateArrayLiteralHandler,46,47,1
-block_hint,CreateArrayLiteralHandler,22,23,0
-block_hint,CreateArrayLiteralHandler,28,29,1
-block_hint,CreateArrayLiteralHandler,3,4,1
-block_hint,CreateArrayLiteralHandler,30,31,1
-block_hint,CreateArrayLiteralHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralHandler,3,4,1
-block_hint,CreateEmptyArrayLiteralHandler,13,14,1
-block_hint,CreateEmptyArrayLiteralHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralHandler,15,16,1
-block_hint,CreateObjectLiteralHandler,88,89,0
-block_hint,CreateObjectLiteralHandler,122,123,0
-block_hint,CreateObjectLiteralHandler,116,117,1
-block_hint,CreateObjectLiteralHandler,92,93,1
-block_hint,CreateObjectLiteralHandler,108,109,1
-block_hint,CreateObjectLiteralHandler,72,73,1
-block_hint,CreateObjectLiteralHandler,34,35,0
-block_hint,CreateObjectLiteralHandler,74,75,1
-block_hint,CreateObjectLiteralHandler,64,65,0
-block_hint,CreateEmptyObjectLiteralHandler,4,5,1
-block_hint,CreateEmptyObjectLiteralHandler,11,12,1
-block_hint,CreateEmptyObjectLiteralHandler,6,7,0
-block_hint,CreateClosureHandler,2,3,1
-block_hint,CreateFunctionContextHandler,11,12,1
-block_hint,CreateFunctionContextHandler,4,5,1
-block_hint,CreateFunctionContextHandler,6,7,0
-block_hint,CreateMappedArgumentsHandler,52,53,0
-block_hint,CreateMappedArgumentsHandler,42,43,1
-block_hint,CreateMappedArgumentsHandler,44,45,0
-block_hint,CreateMappedArgumentsHandler,104,105,1
-block_hint,CreateMappedArgumentsHandler,46,47,0
-block_hint,CreateMappedArgumentsHandler,12,13,0
-block_hint,CreateMappedArgumentsHandler,14,15,0
-block_hint,CreateMappedArgumentsHandler,78,79,1
-block_hint,CreateMappedArgumentsHandler,58,59,1
-block_hint,CreateMappedArgumentsHandler,16,17,0
-block_hint,CreateMappedArgumentsHandler,60,61,0
-block_hint,CreateMappedArgumentsHandler,24,25,1
-block_hint,CreateMappedArgumentsHandler,70,71,1
-block_hint,CreateUnmappedArgumentsHandler,16,17,1
-block_hint,CreateUnmappedArgumentsHandler,18,19,0
-block_hint,CreateUnmappedArgumentsHandler,20,21,0
-block_hint,CreateUnmappedArgumentsHandler,7,8,0
-block_hint,CreateUnmappedArgumentsHandler,9,10,0
-block_hint,CreateUnmappedArgumentsHandler,31,32,1
-block_hint,CreateUnmappedArgumentsHandler,25,26,1
-block_hint,CreateUnmappedArgumentsHandler,11,12,0
-block_hint,CreateUnmappedArgumentsHandler,27,28,0
-block_hint,CreateUnmappedArgumentsHandler,13,14,1
-block_hint,CreateUnmappedArgumentsHandler,23,24,1
-block_hint,CreateRestParameterHandler,13,14,0
-block_hint,CreateRestParameterHandler,27,28,0
-block_hint,JumpLoopHandler,34,35,1
-block_hint,JumpLoopHandler,23,24,0
-block_hint,JumpLoopHandler,9,10,1
-block_hint,JumpIfToBooleanTrueConstantHandler,20,21,0
-block_hint,JumpIfToBooleanTrueConstantHandler,6,7,0
-block_hint,JumpIfToBooleanFalseConstantHandler,14,15,0
-block_hint,JumpIfToBooleanFalseConstantHandler,20,21,0
-block_hint,JumpIfToBooleanFalseConstantHandler,6,7,0
-block_hint,JumpIfToBooleanTrueHandler,14,15,0
-block_hint,JumpIfToBooleanTrueHandler,6,7,0
-block_hint,JumpIfToBooleanTrueHandler,8,9,1
-block_hint,JumpIfToBooleanFalseHandler,14,15,0
-block_hint,JumpIfToBooleanFalseHandler,20,21,0
-block_hint,JumpIfToBooleanFalseHandler,6,7,0
-block_hint,JumpIfUndefinedOrNullHandler,3,4,0
-block_hint,JumpIfJSReceiverHandler,5,6,1
-block_hint,JumpIfJSReceiverHandler,3,4,1
-block_hint,SwitchOnSmiNoFeedbackHandler,3,4,0
-block_hint,ForInEnumerateHandler,34,35,1
-block_hint,ForInPrepareHandler,18,19,1
-block_hint,ForInNextHandler,2,3,1
-block_hint,ForInNextHandler,13,14,1
-block_hint,ReturnHandler,3,4,1
-block_hint,ThrowReferenceErrorIfHoleHandler,4,5,0
-block_hint,ThrowSuperNotCalledIfHoleHandler,2,3,0
-block_hint,ThrowSuperAlreadyCalledIfNotHoleHandler,2,3,1
-block_hint,ThrowIfNotSuperConstructorHandler,2,3,1
-block_hint,SuspendGeneratorHandler,14,15,1
-block_hint,SuspendGeneratorHandler,8,9,1
-block_hint,SuspendGeneratorHandler,12,13,1
-block_hint,ResumeGeneratorHandler,10,11,1
-block_hint,ResumeGeneratorHandler,4,5,1
-block_hint,ResumeGeneratorHandler,6,7,1
-block_hint,LdaImmutableContextSlotWideHandler,3,4,1
-block_hint,LdaImmutableContextSlotWideHandler,9,10,0
-block_hint,LdaImmutableContextSlotWideHandler,5,6,1
-block_hint,LdaImmutableCurrentContextSlotWideHandler,2,3,1
-block_hint,LdaGlobalWideHandler,265,266,0
-block_hint,LdaGlobalWideHandler,110,111,1
-block_hint,StaGlobalWideHandler,3,4,0
-block_hint,StaCurrentContextSlotWideHandler,2,3,1
-block_hint,GetNamedPropertyWideHandler,334,335,0
-block_hint,GetNamedPropertyWideHandler,140,141,1
-block_hint,GetKeyedPropertyWideHandler,3,4,0
-block_hint,SetNamedPropertyWideHandler,3,4,0
-block_hint,DefineNamedOwnPropertyWideHandler,3,4,0
-block_hint,SetKeyedPropertyWideHandler,3,4,0
-block_hint,DefineKeyedOwnPropertyWideHandler,3,4,0
-block_hint,StaInArrayLiteralWideHandler,3,4,0
-block_hint,AddWideHandler,82,83,0
-block_hint,AddWideHandler,49,50,0
-block_hint,AddWideHandler,35,36,0
-block_hint,AddWideHandler,78,79,0
-block_hint,AddWideHandler,64,65,1
-block_hint,AddWideHandler,45,46,1
-block_hint,AddWideHandler,27,28,1
-block_hint,AddWideHandler,43,44,1
-block_hint,AddWideHandler,16,17,1
-block_hint,SubWideHandler,75,76,0
-block_hint,SubWideHandler,53,54,0
-block_hint,SubWideHandler,33,34,0
-block_hint,SubWideHandler,13,14,1
-block_hint,MulWideHandler,90,91,0
-block_hint,MulWideHandler,83,84,1
-block_hint,MulWideHandler,69,70,1
-block_hint,MulWideHandler,67,68,1
-block_hint,MulWideHandler,36,37,1
-block_hint,MulWideHandler,17,18,1
-block_hint,BitwiseOrWideHandler,28,29,0
-block_hint,BitwiseOrWideHandler,20,21,1
-block_hint,AddSmiWideHandler,49,50,0
-block_hint,AddSmiWideHandler,35,36,0
-block_hint,MulSmiWideHandler,78,79,0
-block_hint,MulSmiWideHandler,65,66,0
-block_hint,MulSmiWideHandler,34,35,0
-block_hint,MulSmiWideHandler,54,55,1
-block_hint,MulSmiWideHandler,36,37,0
-block_hint,MulSmiWideHandler,42,43,1
-block_hint,MulSmiWideHandler,17,18,1
-block_hint,ModSmiWideHandler,77,78,1
-block_hint,ModSmiWideHandler,70,71,0
-block_hint,ModSmiWideHandler,66,67,1
-block_hint,ModSmiWideHandler,61,62,1
-block_hint,ModSmiWideHandler,34,35,0
-block_hint,ModSmiWideHandler,15,16,0
-block_hint,ModSmiWideHandler,23,24,1
-block_hint,BitwiseOrSmiWideHandler,23,24,0
-block_hint,BitwiseOrSmiWideHandler,6,7,0
-block_hint,BitwiseOrSmiWideHandler,11,12,1
-block_hint,BitwiseAndSmiWideHandler,6,7,0
-block_hint,BitwiseAndSmiWideHandler,18,19,1
-block_hint,ShiftLeftSmiWideHandler,24,25,0
-block_hint,ShiftLeftSmiWideHandler,6,7,0
-block_hint,ShiftLeftSmiWideHandler,40,41,0
-block_hint,ShiftLeftSmiWideHandler,30,31,0
-block_hint,ShiftLeftSmiWideHandler,11,12,1
-block_hint,ShiftRightSmiWideHandler,23,24,0
-block_hint,ShiftRightSmiWideHandler,6,7,0
-block_hint,ShiftRightSmiWideHandler,11,12,1
-block_hint,IncWideHandler,9,10,0
-block_hint,IncWideHandler,25,26,0
-block_hint,IncWideHandler,19,20,0
-block_hint,IncWideHandler,7,8,1
-block_hint,CallPropertyWideHandler,68,69,0
-block_hint,CallPropertyWideHandler,19,20,0
-block_hint,CallProperty0WideHandler,68,69,0
-block_hint,CallProperty0WideHandler,19,20,0
-block_hint,CallProperty1WideHandler,68,69,0
-block_hint,CallProperty1WideHandler,19,20,0
-block_hint,CallProperty2WideHandler,68,69,0
-block_hint,CallProperty2WideHandler,19,20,0
-block_hint,CallUndefinedReceiverWideHandler,68,69,0
-block_hint,CallUndefinedReceiverWideHandler,19,20,0
-block_hint,CallUndefinedReceiver0WideHandler,68,69,0
-block_hint,CallUndefinedReceiver0WideHandler,19,20,0
-block_hint,CallUndefinedReceiver1WideHandler,68,69,0
-block_hint,CallUndefinedReceiver1WideHandler,19,20,0
-block_hint,CallUndefinedReceiver2WideHandler,68,69,0
-block_hint,CallUndefinedReceiver2WideHandler,19,20,0
-block_hint,ConstructWideHandler,49,50,0
-block_hint,ConstructWideHandler,22,23,0
-block_hint,TestEqualWideHandler,103,104,0
-block_hint,TestEqualWideHandler,95,96,0
-block_hint,TestEqualWideHandler,47,48,0
-block_hint,TestEqualWideHandler,7,8,1
-block_hint,TestEqualStrictWideHandler,82,83,0
-block_hint,TestEqualStrictWideHandler,53,54,1
-block_hint,TestEqualStrictWideHandler,55,56,1
-block_hint,TestEqualStrictWideHandler,47,48,0
-block_hint,TestEqualStrictWideHandler,9,10,0
-block_hint,TestEqualStrictWideHandler,4,5,1
-block_hint,TestGreaterThanWideHandler,24,25,0
-block_hint,TestGreaterThanWideHandler,6,7,1
-block_hint,CreateRegExpLiteralWideHandler,14,15,0
-block_hint,CreateRegExpLiteralWideHandler,9,10,1
-block_hint,CreateArrayLiteralWideHandler,42,43,0
-block_hint,CreateArrayLiteralWideHandler,20,21,1
-block_hint,CreateEmptyArrayLiteralWideHandler,22,23,0
-block_hint,CreateEmptyArrayLiteralWideHandler,11,12,1
-block_hint,CreateEmptyArrayLiteralWideHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralWideHandler,15,16,1
-block_hint,CreateObjectLiteralWideHandler,99,100,0
-block_hint,CreateObjectLiteralWideHandler,58,59,1
-block_hint,CreateClosureWideHandler,9,10,1
-block_hint,CreateClosureWideHandler,2,3,1
-block_hint,CreateFunctionContextWideHandler,8,9,0
-block_hint,JumpLoopWideHandler,34,35,1
-block_hint,JumpLoopWideHandler,9,10,1
-block_hint,JumpIfToBooleanTrueWideHandler,18,19,1
-block_hint,JumpIfToBooleanTrueWideHandler,14,15,0
-block_hint,JumpIfToBooleanFalseWideHandler,18,19,1
-block_hint,JumpIfToBooleanFalseWideHandler,14,15,0
-block_hint,JumpIfToBooleanFalseWideHandler,20,21,0
-block_hint,JumpIfToBooleanFalseWideHandler,6,7,0
-block_hint,SwitchOnSmiNoFeedbackWideHandler,5,6,0
-block_hint,SwitchOnSmiNoFeedbackWideHandler,3,4,0
-block_hint,ForInNextWideHandler,11,12,0
-block_hint,ForInNextWideHandler,2,3,1
-block_hint,ForInNextWideHandler,4,5,0
-block_hint,ForInNextWideHandler,9,10,1
-block_hint,LdaGlobalExtraWideHandler,265,266,0
-block_hint,LdaGlobalExtraWideHandler,110,111,1
-block_hint,AddSmiExtraWideHandler,69,70,1
-block_hint,AddSmiExtraWideHandler,43,44,0
-block_hint,AddSmiExtraWideHandler,52,53,1
-block_hint,AddSmiExtraWideHandler,16,17,1
-block_hint,DivSmiExtraWideHandler,73,74,0
-block_hint,DivSmiExtraWideHandler,69,70,0
-block_hint,DivSmiExtraWideHandler,78,79,0
-block_hint,DivSmiExtraWideHandler,64,65,0
-block_hint,DivSmiExtraWideHandler,35,36,0
-block_hint,DivSmiExtraWideHandler,43,44,1
-block_hint,DivSmiExtraWideHandler,23,24,1
-block_hint,BitwiseAndSmiExtraWideHandler,31,32,1
-block_hint,BitwiseAndSmiExtraWideHandler,35,36,0
-block_hint,BitwiseAndSmiExtraWideHandler,29,30,0
-block_hint,BitwiseAndSmiExtraWideHandler,18,19,1
-block_hint,CallUndefinedReceiver1ExtraWideHandler,68,69,0
-block_hint,CallUndefinedReceiver1ExtraWideHandler,19,20,0
-builtin_hash,RecordWriteSaveFP,-726777896
-builtin_hash,RecordWriteIgnoreFP,-726777896
-builtin_hash,EphemeronKeyBarrierSaveFP,-673045595
-builtin_hash,AdaptorWithBuiltinExitFrame,1058054117
-builtin_hash,Call_ReceiverIsNullOrUndefined_Baseline_Compact,858122912
-builtin_hash,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,646911826
-builtin_hash,Call_ReceiverIsAny_Baseline_Compact,646911826
-builtin_hash,CallProxy,-213096940
-builtin_hash,CallWithSpread,850146918
-builtin_hash,CallWithSpread_Baseline,28766541
-builtin_hash,CallWithArrayLike,863195024
-builtin_hash,ConstructWithSpread,-680697218
-builtin_hash,ConstructWithSpread_Baseline,289501324
-builtin_hash,Construct_Baseline,-759871673
-builtin_hash,FastNewObject,413535442
-builtin_hash,FastNewClosure,570052345
-builtin_hash,StringEqual,854835916
-builtin_hash,StringGreaterThan,-1046193569
-builtin_hash,StringGreaterThanOrEqual,-163480371
-builtin_hash,StringLessThan,-163480371
-builtin_hash,StringLessThanOrEqual,-1046193569
-builtin_hash,StringSubstring,919827347
-builtin_hash,OrderedHashTableHealIndex,534476200
-builtin_hash,CompileLazy,972488543
-builtin_hash,CompileLazyDeoptimizedCode,-421407567
-builtin_hash,InstantiateAsmJs,-1023179608
-builtin_hash,AllocateInYoungGeneration,-533397479
-builtin_hash,AllocateRegularInYoungGeneration,1027329059
-builtin_hash,AllocateRegularInOldGeneration,1027329059
-builtin_hash,CopyFastSmiOrObjectElements,683092240
-builtin_hash,GrowFastDoubleElements,-452014384
-builtin_hash,GrowFastSmiOrObjectElements,-864052523
-builtin_hash,ToNumber,-800974620
-builtin_hash,ToNumber_Baseline,109807865
-builtin_hash,ToNumeric_Baseline,-776212596
-builtin_hash,ToNumberConvertBigInt,716851903
-builtin_hash,Typeof,819455224
-builtin_hash,KeyedLoadIC_PolymorphicName,-689223336
-builtin_hash,KeyedStoreIC_Megamorphic,254473413
-builtin_hash,DefineKeyedOwnIC_Megamorphic,-460277254
-builtin_hash,LoadGlobalIC_NoFeedback,-110200517
-builtin_hash,LoadIC_FunctionPrototype,-712048234
-builtin_hash,LoadIC_StringLength,888036620
-builtin_hash,LoadIC_StringWrapperLength,-322863465
-builtin_hash,LoadIC_NoFeedback,-407781812
-builtin_hash,StoreIC_NoFeedback,498495916
-builtin_hash,DefineNamedOwnIC_NoFeedback,-674344650
-builtin_hash,KeyedLoadIC_SloppyArguments,-264177550
-builtin_hash,StoreFastElementIC_Standard,670144974
-builtin_hash,StoreFastElementIC_GrowNoTransitionHandleCOW,72441205
-builtin_hash,StoreFastElementIC_NoTransitionHandleCOW,-865837557
-builtin_hash,ElementsTransitionAndStore_Standard,602243835
-builtin_hash,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,-149712742
-builtin_hash,ElementsTransitionAndStore_NoTransitionHandleCOW,594322143
-builtin_hash,KeyedHasIC_PolymorphicName,-821302781
-builtin_hash,EnqueueMicrotask,-302354621
-builtin_hash,RunMicrotasks,766385977
-builtin_hash,HasProperty,-714203747
-builtin_hash,DeleteProperty,-714863218
-builtin_hash,SetDataProperties,-859409816
-builtin_hash,ReturnReceiver,424286427
-builtin_hash,ArrayConstructor,374700129
-builtin_hash,ArrayConstructorImpl,-937016390
-builtin_hash,ArrayNoArgumentConstructor_PackedSmi_DontOverride,-610727288
-builtin_hash,ArrayNoArgumentConstructor_HoleySmi_DontOverride,-610727288
-builtin_hash,ArrayNoArgumentConstructor_PackedSmi_DisableAllocationSites,-920795459
-builtin_hash,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,-920795459
-builtin_hash,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,-920795459
-builtin_hash,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,-931536819
-builtin_hash,ArraySingleArgumentConstructor_HoleySmi_DontOverride,-163160499
-builtin_hash,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,-334236117
-builtin_hash,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,-334236117
-builtin_hash,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,-851781780
-builtin_hash,ArrayIncludesSmi,-782625397
-builtin_hash,ArrayIncludesSmiOrObject,-1019116117
-builtin_hash,ArrayIncludes,-700282247
-builtin_hash,ArrayIndexOfSmi,674398784
-builtin_hash,ArrayIndexOfSmiOrObject,-38210588
-builtin_hash,ArrayIndexOf,-630602255
-builtin_hash,ArrayPrototypePop,520833050
-builtin_hash,ArrayPrototypePush,-588957354
-builtin_hash,CloneFastJSArray,-940332184
-builtin_hash,CloneFastJSArrayFillingHoles,376125361
-builtin_hash,ExtractFastJSArray,69886191
-builtin_hash,ArrayPrototypeEntries,797181916
-builtin_hash,ArrayPrototypeKeys,-510793262
-builtin_hash,ArrayPrototypeValues,797181916
-builtin_hash,ArrayIteratorPrototypeNext,998895316
-builtin_hash,AsyncFunctionEnter,706651433
-builtin_hash,AsyncFunctionResolve,924354792
-builtin_hash,AsyncFunctionAwaitCaught,1029985317
-builtin_hash,AsyncFunctionAwaitUncaught,1029985317
-builtin_hash,AsyncFunctionAwaitResolveClosure,624834406
-builtin_hash,DatePrototypeGetDate,455533545
-builtin_hash,DatePrototypeGetDay,455533545
-builtin_hash,DatePrototypeGetFullYear,455533545
-builtin_hash,DatePrototypeGetHours,455533545
-builtin_hash,DatePrototypeGetMilliseconds,837814216
-builtin_hash,DatePrototypeGetMinutes,455533545
-builtin_hash,DatePrototypeGetMonth,455533545
-builtin_hash,DatePrototypeGetSeconds,455533545
-builtin_hash,DatePrototypeGetTime,-352023713
-builtin_hash,DatePrototypeGetTimezoneOffset,837814216
-builtin_hash,DatePrototypeValueOf,-352023713
-builtin_hash,DatePrototypeToPrimitive,-306581045
-builtin_hash,CreateIterResultObject,-369987292
-builtin_hash,CreateGeneratorObject,-51189745
-builtin_hash,GeneratorPrototypeNext,-979874589
-builtin_hash,GeneratorPrototypeReturn,-376232641
-builtin_hash,SuspendGeneratorBaseline,1044307139
-builtin_hash,ResumeGeneratorBaseline,-33214673
-builtin_hash,GlobalIsFinite,608945684
-builtin_hash,GlobalIsNaN,480533016
-builtin_hash,LoadIC,-159967869
-builtin_hash,LoadIC_Megamorphic,783558994
-builtin_hash,LoadIC_Noninlined,-1053430564
-builtin_hash,LoadICTrampoline,415259910
-builtin_hash,LoadICBaseline,-789670064
-builtin_hash,LoadICTrampoline_Megamorphic,415259910
-builtin_hash,LoadSuperIC,-382069116
-builtin_hash,LoadSuperICBaseline,350433880
-builtin_hash,KeyedLoadIC,-775829679
-builtin_hash,KeyedLoadIC_Megamorphic,535456695
-builtin_hash,KeyedLoadICTrampoline,415259910
-builtin_hash,KeyedLoadICBaseline,-789670064
-builtin_hash,KeyedLoadICTrampoline_Megamorphic,415259910
-builtin_hash,StoreGlobalIC,-549357916
-builtin_hash,StoreGlobalICTrampoline,415259910
-builtin_hash,StoreGlobalICBaseline,-789670064
-builtin_hash,StoreIC,131443759
-builtin_hash,StoreICTrampoline,664895372
-builtin_hash,StoreICBaseline,350433880
-builtin_hash,DefineNamedOwnIC,-470691823
-builtin_hash,DefineNamedOwnICBaseline,350433880
-builtin_hash,KeyedStoreIC,143012442
-builtin_hash,KeyedStoreICTrampoline,664895372
-builtin_hash,KeyedStoreICBaseline,350433880
-builtin_hash,DefineKeyedOwnIC,14337203
-builtin_hash,StoreInArrayLiteralIC,357518953
-builtin_hash,StoreInArrayLiteralICBaseline,350433880
-builtin_hash,LoadGlobalIC,-965378230
-builtin_hash,LoadGlobalICInsideTypeof,-962887989
-builtin_hash,LoadGlobalICTrampoline,1065241136
-builtin_hash,LoadGlobalICBaseline,-209484242
-builtin_hash,LoadGlobalICInsideTypeofTrampoline,1065241136
-builtin_hash,LoadGlobalICInsideTypeofBaseline,-209484242
-builtin_hash,LookupGlobalICBaseline,696922418
-builtin_hash,LookupGlobalICInsideTypeofBaseline,696922418
-builtin_hash,KeyedHasIC,926037557
-builtin_hash,KeyedHasICBaseline,-789670064
-builtin_hash,KeyedHasIC_Megamorphic,-714203747
-builtin_hash,IterableToList,216346690
-builtin_hash,IterableToListWithSymbolLookup,470463439
-builtin_hash,IterableToListMayPreserveHoles,140268820
-builtin_hash,FindOrderedHashMapEntry,-604979912
-builtin_hash,MapConstructor,-249864188
-builtin_hash,MapPrototypeSet,535944514
-builtin_hash,MapPrototypeDelete,823187063
-builtin_hash,MapPrototypeGet,-992707095
-builtin_hash,MapPrototypeHas,379455552
-builtin_hash,MapPrototypeEntries,908832154
-builtin_hash,MapPrototypeGetSize,846186029
-builtin_hash,MapPrototypeForEach,420961920
-builtin_hash,MapPrototypeKeys,908832154
-builtin_hash,MapPrototypeValues,908832154
-builtin_hash,MapIteratorPrototypeNext,-288779464
-builtin_hash,MapIteratorToList,-816260477
-builtin_hash,SameValueNumbersOnly,-25129126
-builtin_hash,Add_Baseline,227334744
-builtin_hash,AddSmi_Baseline,-803607255
-builtin_hash,Subtract_Baseline,-465432536
-builtin_hash,SubtractSmi_Baseline,-747934466
-builtin_hash,Multiply_Baseline,958145830
-builtin_hash,MultiplySmi_Baseline,-183633015
-builtin_hash,Divide_Baseline,-658775957
-builtin_hash,DivideSmi_Baseline,596645384
-builtin_hash,Modulus_Baseline,417939096
-builtin_hash,ModulusSmi_Baseline,274117178
-builtin_hash,Exponentiate_Baseline,-678138653
-builtin_hash,BitwiseAnd_Baseline,971917730
-builtin_hash,BitwiseAndSmi_Baseline,-427224399
-builtin_hash,BitwiseOr_Baseline,-86023826
-builtin_hash,BitwiseOrSmi_Baseline,1033800245
-builtin_hash,BitwiseXor_Baseline,1018309106
-builtin_hash,BitwiseXorSmi_Baseline,-776461247
-builtin_hash,ShiftLeft_Baseline,-563580356
-builtin_hash,ShiftLeftSmi_Baseline,-559342044
-builtin_hash,ShiftRight_Baseline,1046983317
-builtin_hash,ShiftRightSmi_Baseline,344949769
-builtin_hash,ShiftRightLogical_Baseline,-621551965
-builtin_hash,ShiftRightLogicalSmi_Baseline,-730271331
-builtin_hash,Add_WithFeedback,-655798137
-builtin_hash,Subtract_WithFeedback,-172753508
-builtin_hash,Modulus_WithFeedback,-1003748883
-builtin_hash,BitwiseOr_WithFeedback,981590503
-builtin_hash,Equal_Baseline,940608970
-builtin_hash,StrictEqual_Baseline,726432452
-builtin_hash,LessThan_Baseline,362404859
-builtin_hash,GreaterThan_Baseline,1053559793
-builtin_hash,LessThanOrEqual_Baseline,283953892
-builtin_hash,GreaterThanOrEqual_Baseline,734136323
-builtin_hash,Equal_WithFeedback,553553676
-builtin_hash,StrictEqual_WithFeedback,-536478159
-builtin_hash,LessThan_WithFeedback,-304778695
-builtin_hash,GreaterThan_WithFeedback,856459323
-builtin_hash,GreaterThanOrEqual_WithFeedback,547352059
-builtin_hash,BitwiseNot_Baseline,-649673224
-builtin_hash,Decrement_Baseline,596332548
-builtin_hash,Increment_Baseline,842384965
-builtin_hash,Negate_Baseline,15380030
-builtin_hash,ObjectAssign,521201809
-builtin_hash,ObjectCreate,-448176478
-builtin_hash,ObjectEntries,-68491710
-builtin_hash,ObjectGetOwnPropertyDescriptor,-542894089
-builtin_hash,ObjectGetOwnPropertyNames,650621679
-builtin_hash,ObjectIs,-986440389
-builtin_hash,ObjectKeys,-702112957
-builtin_hash,ObjectPrototypeHasOwnProperty,-29325300
-builtin_hash,ObjectToString,-165543538
-builtin_hash,InstanceOf_WithFeedback,336324400
-builtin_hash,InstanceOf_Baseline,-6863477
-builtin_hash,ForInEnumerate,-685322984
-builtin_hash,ForInPrepare,-247635412
-builtin_hash,ForInFilter,-833678486
-builtin_hash,RegExpConstructor,-954773819
-builtin_hash,RegExpExecAtom,-1012355680
-builtin_hash,RegExpExecInternal,714858405
-builtin_hash,FindOrderedHashSetEntry,726112787
-builtin_hash,SetConstructor,246660026
-builtin_hash,SetPrototypeHas,379455552
-builtin_hash,SetPrototypeAdd,-1038309920
-builtin_hash,SetPrototypeDelete,-870300530
-builtin_hash,SetPrototypeEntries,908832154
-builtin_hash,SetPrototypeGetSize,846186029
-builtin_hash,SetPrototypeForEach,-891643547
-builtin_hash,SetPrototypeValues,908832154
-builtin_hash,SetIteratorPrototypeNext,664557777
-builtin_hash,SetOrSetIteratorToList,-665943107
-builtin_hash,StringFromCharCode,-404331798
-builtin_hash,StringPrototypeReplace,-489667599
-builtin_hash,StringPrototypeSplit,882257544
-builtin_hash,TypedArrayConstructor,-808110108
-builtin_hash,TypedArrayPrototypeByteLength,450901727
-builtin_hash,TypedArrayPrototypeLength,51309302
-builtin_hash,WeakMapConstructor,-402303863
-builtin_hash,WeakMapLookupHashIndex,94941198
-builtin_hash,WeakMapGet,815434422
-builtin_hash,WeakMapPrototypeHas,514771298
-builtin_hash,WeakMapPrototypeSet,-349184670
-builtin_hash,WeakSetConstructor,-342477008
-builtin_hash,WeakSetPrototypeHas,514771298
-builtin_hash,WeakSetPrototypeAdd,-987480020
-builtin_hash,WeakCollectionSet,-1066277515
-builtin_hash,AsyncGeneratorResolve,-819132993
-builtin_hash,AsyncGeneratorYieldWithAwait,110676265
-builtin_hash,AsyncGeneratorResumeNext,923449615
-builtin_hash,AsyncGeneratorPrototypeNext,-1069429825
-builtin_hash,AsyncGeneratorAwaitUncaught,-489961876
-builtin_hash,AsyncGeneratorAwaitResolveClosure,811649112
-builtin_hash,AsyncGeneratorYieldWithAwaitResolveClosure,-998984780
-builtin_hash,StringAdd_CheckNone,-1024256598
-builtin_hash,SubString,-266358213
-builtin_hash,GetProperty,933894284
-builtin_hash,GetPropertyWithReceiver,-32510004
-builtin_hash,SetProperty,-495376823
-builtin_hash,CreateDataProperty,594399224
-builtin_hash,FindNonDefaultConstructorOrConstruct,366791757
-builtin_hash,ArrayPrototypeConcat,754436794
-builtin_hash,ArrayEvery,-455892525
-builtin_hash,ArrayFilterLoopLazyDeoptContinuation,611749953
-builtin_hash,ArrayFilterLoopContinuation,-833908508
-builtin_hash,ArrayFilter,-683369240
-builtin_hash,ArrayPrototypeFind,865719932
-builtin_hash,ArrayForEachLoopLazyDeoptContinuation,447647825
-builtin_hash,ArrayForEachLoopContinuation,1042683717
-builtin_hash,ArrayForEach,-45990657
-builtin_hash,ArrayFrom,-19171003
-builtin_hash,ArrayIsArray,-862006355
-builtin_hash,LoadJoinElement_FastSmiOrObjectElements_0,525543749
-builtin_hash,LoadJoinElement_FastDoubleElements_0,538796669
-builtin_hash,JoinStackPush,-77767476
-builtin_hash,JoinStackPop,-269387551
-builtin_hash,ArrayPrototypeJoin,-742859465
-builtin_hash,ArrayPrototypeToString,403405778
-builtin_hash,ArrayPrototypeLastIndexOf,536760057
-builtin_hash,ArrayMapLoopLazyDeoptContinuation,-554291199
-builtin_hash,ArrayMapLoopContinuation,746589647
-builtin_hash,ArrayMap,-987743445
-builtin_hash,ArrayReduceLoopLazyDeoptContinuation,-31323243
-builtin_hash,ArrayReduceLoopContinuation,586684706
-builtin_hash,ArrayReduce,-1036824447
-builtin_hash,ArrayPrototypeReverse,-85515243
-builtin_hash,ArrayPrototypeShift,-355152592
-builtin_hash,ArrayPrototypeSlice,-882387995
-builtin_hash,ArraySome,572787735
-builtin_hash,ArrayPrototypeSplice,-993386813
-builtin_hash,ArrayPrototypeUnshift,-757236194
-builtin_hash,ArrayBufferPrototypeGetByteLength,366611265
-builtin_hash,ArrayBufferIsView,-965145146
-builtin_hash,ToInteger,-1031727149
-builtin_hash,FastCreateDataProperty,-85250558
-builtin_hash,BooleanConstructor,-152052498
-builtin_hash,BooleanPrototypeToString,884033636
-builtin_hash,ToString,918542802
-builtin_hash,StringPrototypeToString,-980655357
-builtin_hash,StringPrototypeValueOf,-980655357
-builtin_hash,StringPrototypeCharAt,-180668138
-builtin_hash,StringPrototypeCharCodeAt,-872019738
-builtin_hash,StringPrototypeCodePointAt,187584823
-builtin_hash,StringPrototypeConcat,897315329
-builtin_hash,StringConstructor,-742161575
-builtin_hash,StringAddConvertLeft,464769062
-builtin_hash,StringAddConvertRight,319266024
-builtin_hash,StringCharAt,864970334
-builtin_hash,FastNewClosureBaseline,163793209
-builtin_hash,FastNewFunctionContextFunction,317653702
-builtin_hash,CreateRegExpLiteral,1058887503
-builtin_hash,CreateShallowArrayLiteral,-330514735
-builtin_hash,CreateEmptyArrayLiteral,833695600
-builtin_hash,CreateShallowObjectLiteral,-29649147
-builtin_hash,ObjectConstructor,741451926
-builtin_hash,CreateEmptyLiteralObject,-766695182
-builtin_hash,NumberConstructor,782234117
-builtin_hash,StringToNumber,-992596056
-builtin_hash,NonNumberToNumber,-970153117
-builtin_hash,NonNumberToNumeric,-280530476
-builtin_hash,ToNumeric,874672566
-builtin_hash,NumberToString,-397868165
-builtin_hash,ToBoolean,-1062838663
-builtin_hash,ToBooleanForBaselineJump,358716591
-builtin_hash,ToLength,-759915327
-builtin_hash,ToName,820424209
-builtin_hash,ToObject,-125924279
-builtin_hash,NonPrimitiveToPrimitive_Default,687286659
-builtin_hash,NonPrimitiveToPrimitive_Number,687286659
-builtin_hash,NonPrimitiveToPrimitive_String,687286659
-builtin_hash,OrdinaryToPrimitive_Number,172366650
-builtin_hash,OrdinaryToPrimitive_String,172366650
-builtin_hash,DataViewPrototypeGetByteLength,686110725
-builtin_hash,DataViewPrototypeGetFloat64,895164724
-builtin_hash,DataViewPrototypeSetUint32,729463948
-builtin_hash,DataViewPrototypeSetFloat64,946935329
-builtin_hash,FunctionPrototypeHasInstance,-111878689
-builtin_hash,FastFunctionPrototypeBind,733410738
-builtin_hash,ForInNext,695701361
-builtin_hash,GetIteratorWithFeedback,656782758
-builtin_hash,GetIteratorBaseline,561837844
-builtin_hash,CallIteratorWithFeedback,-555674017
-builtin_hash,MathAbs,679934506
-builtin_hash,MathCeil,-379506221
-builtin_hash,MathFloor,-953911531
-builtin_hash,MathRound,402490573
-builtin_hash,MathPow,-180289611
-builtin_hash,MathMax,178146456
-builtin_hash,MathMin,751748840
-builtin_hash,MathAsin,816104342
-builtin_hash,MathAtan2,-86622779
-builtin_hash,MathCos,-22074993
-builtin_hash,MathExp,-588443238
-builtin_hash,MathFround,400378764
-builtin_hash,MathImul,1019157426
-builtin_hash,MathLog,-97836428
-builtin_hash,MathSin,14431085
-builtin_hash,MathSign,728598572
-builtin_hash,MathSqrt,229011554
-builtin_hash,MathTan,-1012302844
-builtin_hash,MathTanh,1011823357
-builtin_hash,MathRandom,-676034213
-builtin_hash,NumberPrototypeToString,-162258996
-builtin_hash,NumberIsInteger,-384800121
-builtin_hash,NumberIsNaN,820755905
-builtin_hash,NumberParseFloat,-375043449
-builtin_hash,ParseInt,18014642
-builtin_hash,NumberParseInt,63997416
-builtin_hash,Add,-954609031
-builtin_hash,Subtract,289942982
-builtin_hash,Multiply,523374153
-builtin_hash,Divide,804521858
-builtin_hash,Modulus,949112112
-builtin_hash,CreateObjectWithoutProperties,-1064462911
-builtin_hash,ObjectIsExtensible,284344234
-builtin_hash,ObjectPreventExtensions,-111120885
-builtin_hash,ObjectGetPrototypeOf,913438424
-builtin_hash,ObjectSetPrototypeOf,121738554
-builtin_hash,ObjectPrototypeToString,-282402028
-builtin_hash,ObjectPrototypeValueOf,-694058811
-builtin_hash,FulfillPromise,-550390507
-builtin_hash,NewPromiseCapability,505672440
-builtin_hash,PromiseCapabilityDefaultResolve,-866352423
-builtin_hash,PerformPromiseThen,-560636632
-builtin_hash,PromiseAll,17219024
-builtin_hash,PromiseAllResolveElementClosure,95365838
-builtin_hash,PromiseConstructor,161136799
-builtin_hash,PromisePrototypeCatch,815820860
-builtin_hash,PromiseFulfillReactionJob,274407576
-builtin_hash,PromiseResolveTrampoline,-167703057
-builtin_hash,PromiseResolve,907966935
-builtin_hash,ResolvePromise,223087443
-builtin_hash,PromisePrototypeThen,844580362
-builtin_hash,PromiseResolveThenableJob,655377974
-builtin_hash,ProxyConstructor,-22029616
-builtin_hash,ProxyGetProperty,272083385
-builtin_hash,ProxyIsExtensible,-454160590
-builtin_hash,ProxyPreventExtensions,-632485167
-builtin_hash,ReflectGet,326106219
-builtin_hash,ReflectHas,-167703057
-builtin_hash,RegExpPrototypeExec,-813728440
-builtin_hash,RegExpMatchFast,166162007
-builtin_hash,RegExpReplace,-143637874
-builtin_hash,RegExpPrototypeReplace,-459816201
-builtin_hash,RegExpSearchFast,1000243168
-builtin_hash,RegExpPrototypeSourceGetter,648178085
-builtin_hash,RegExpSplit,-714662060
-builtin_hash,RegExpPrototypeTest,1046472002
-builtin_hash,RegExpPrototypeTestFast,-798233488
-builtin_hash,RegExpPrototypeGlobalGetter,-203267211
-builtin_hash,RegExpPrototypeIgnoreCaseGetter,-465412178
-builtin_hash,RegExpPrototypeMultilineGetter,-508767992
-builtin_hash,RegExpPrototypeHasIndicesGetter,-282842269
-builtin_hash,RegExpPrototypeDotAllGetter,192996811
-builtin_hash,RegExpPrototypeStickyGetter,134953677
-builtin_hash,RegExpPrototypeUnicodeGetter,-249845547
-builtin_hash,RegExpPrototypeFlagsGetter,921172498
-builtin_hash,StringPrototypeEndsWith,-918116877
-builtin_hash,StringPrototypeIncludes,941923285
-builtin_hash,StringPrototypeIndexOf,806674611
-builtin_hash,StringPrototypeIterator,-876353408
-builtin_hash,StringIteratorPrototypeNext,-654477549
-builtin_hash,StringPrototypeMatch,-595127381
-builtin_hash,StringPrototypeSearch,-595127381
-builtin_hash,StringRepeat,-561250183
-builtin_hash,StringPrototypeSlice,-775888791
-builtin_hash,StringPrototypeStartsWith,-277204680
-builtin_hash,StringPrototypeSubstr,-947069151
-builtin_hash,StringPrototypeSubstring,-978368636
-builtin_hash,StringPrototypeTrim,274893597
-builtin_hash,SymbolPrototypeToString,-185434595
-builtin_hash,CreateTypedArray,1033298460
-builtin_hash,TypedArrayFrom,-367584045
-builtin_hash,TypedArrayPrototypeSet,-63776151
-builtin_hash,TypedArrayPrototypeSubArray,-493435057
-builtin_hash,NewSloppyArgumentsElements,678456613
-builtin_hash,NewStrictArgumentsElements,185097327
-builtin_hash,NewRestArgumentsElements,619724658
-builtin_hash,FastNewSloppyArguments,-80034412
-builtin_hash,FastNewStrictArguments,-464051478
-builtin_hash,FastNewRestArguments,685808482
-builtin_hash,StringSlowFlatten,-784143839
-builtin_hash,StringIndexOf,-474260096
-builtin_hash,Load_FastSmiElements_0,379894944
-builtin_hash,Load_FastObjectElements_0,379894944
-builtin_hash,Store_FastSmiElements_0,-902376010
-builtin_hash,Store_FastObjectElements_0,66846279
-builtin_hash,SortCompareDefault,622990416
-builtin_hash,SortCompareUserFn,-475979535
-builtin_hash,Copy,-597822670
-builtin_hash,MergeAt,623377186
-builtin_hash,GallopLeft,-889285900
-builtin_hash,GallopRight,-999899286
-builtin_hash,ArrayTimSort,439597300
-builtin_hash,ArrayPrototypeSort,-257800026
-builtin_hash,StringFastLocaleCompare,-322387781
-builtin_hash,WasmInt32ToHeapNumber,642966430
-builtin_hash,WasmTaggedNonSmiToInt32,721903651
-builtin_hash,WasmTriggerTierUp,-81706558
-builtin_hash,WasmStackGuard,-758328907
-builtin_hash,CanUseSameAccessor_FastSmiElements_0,895246524
-builtin_hash,CanUseSameAccessor_FastObjectElements_0,895246524
-builtin_hash,StringPrototypeToLowerCaseIntl,-989408672
-builtin_hash,StringToLowerCaseIntl,-921557299
-builtin_hash,WideHandler,-909873635
-builtin_hash,ExtraWideHandler,-909873635
-builtin_hash,LdarHandler,436211825
-builtin_hash,LdaZeroHandler,-600556494
-builtin_hash,LdaSmiHandler,-976000524
-builtin_hash,LdaUndefinedHandler,-443904601
-builtin_hash,LdaNullHandler,-443904601
-builtin_hash,LdaTheHoleHandler,-443904601
-builtin_hash,LdaTrueHandler,-1005639650
-builtin_hash,LdaFalseHandler,-1005639650
-builtin_hash,LdaConstantHandler,266070913
-builtin_hash,LdaContextSlotHandler,660734865
-builtin_hash,LdaImmutableContextSlotHandler,660734865
-builtin_hash,LdaCurrentContextSlotHandler,864850451
-builtin_hash,LdaImmutableCurrentContextSlotHandler,864850451
-builtin_hash,StarHandler,-205182408
-builtin_hash,MovHandler,-701250297
-builtin_hash,PushContextHandler,-20849192
-builtin_hash,PopContextHandler,711206742
-builtin_hash,TestReferenceEqualHandler,-497662813
-builtin_hash,TestUndetectableHandler,-791662564
-builtin_hash,TestNullHandler,-70530481
-builtin_hash,TestUndefinedHandler,-70530481
-builtin_hash,TestTypeOfHandler,-392059484
-builtin_hash,LdaGlobalHandler,875012517
-builtin_hash,LdaGlobalInsideTypeofHandler,-679405654
-builtin_hash,StaGlobalHandler,-566520598
-builtin_hash,StaContextSlotHandler,-1033911292
-builtin_hash,StaCurrentContextSlotHandler,-78459139
-builtin_hash,LdaLookupGlobalSlotHandler,779325960
-builtin_hash,LdaLookupGlobalSlotInsideTypeofHandler,788806266
-builtin_hash,StaLookupSlotHandler,440700011
-builtin_hash,GetNamedPropertyHandler,309200467
-builtin_hash,GetNamedPropertyFromSuperHandler,-449343494
-builtin_hash,GetKeyedPropertyHandler,-379193937
-builtin_hash,SetNamedPropertyHandler,514822198
-builtin_hash,DefineNamedOwnPropertyHandler,514822198
-builtin_hash,SetKeyedPropertyHandler,642510192
-builtin_hash,DefineKeyedOwnPropertyHandler,642510192
-builtin_hash,StaInArrayLiteralHandler,642510192
-builtin_hash,DefineKeyedOwnPropertyInLiteralHandler,1040795086
-builtin_hash,AddHandler,-846364031
-builtin_hash,SubHandler,-947310537
-builtin_hash,MulHandler,-898838623
-builtin_hash,DivHandler,-601015277
-builtin_hash,ModHandler,-618002607
-builtin_hash,ExpHandler,48455184
-builtin_hash,BitwiseOrHandler,-782248736
-builtin_hash,BitwiseXorHandler,355393221
-builtin_hash,BitwiseAndHandler,568661089
-builtin_hash,ShiftLeftHandler,-289287193
-builtin_hash,ShiftRightHandler,468833118
-builtin_hash,ShiftRightLogicalHandler,1067162459
-builtin_hash,AddSmiHandler,-819173902
-builtin_hash,SubSmiHandler,-89503362
-builtin_hash,MulSmiHandler,-67264102
-builtin_hash,DivSmiHandler,200036936
-builtin_hash,ModSmiHandler,430435649
-builtin_hash,BitwiseOrSmiHandler,755686576
-builtin_hash,BitwiseXorSmiHandler,-713121388
-builtin_hash,BitwiseAndSmiHandler,-35533849
-builtin_hash,ShiftLeftSmiHandler,219116013
-builtin_hash,ShiftRightSmiHandler,-486366353
-builtin_hash,ShiftRightLogicalSmiHandler,561278751
-builtin_hash,IncHandler,-31417017
-builtin_hash,DecHandler,-90685787
-builtin_hash,NegateHandler,145072098
-builtin_hash,BitwiseNotHandler,401993486
-builtin_hash,ToBooleanLogicalNotHandler,741088768
-builtin_hash,LogicalNotHandler,-58795339
-builtin_hash,TypeOfHandler,-388431360
-builtin_hash,DeletePropertyStrictHandler,-1037108511
-builtin_hash,DeletePropertySloppyHandler,-37188680
-builtin_hash,FindNonDefaultConstructorOrConstructHandler,-861606229
-builtin_hash,CallAnyReceiverHandler,1020222647
-builtin_hash,CallPropertyHandler,1020222647
-builtin_hash,CallProperty0Handler,224132667
-builtin_hash,CallProperty1Handler,-100562732
-builtin_hash,CallProperty2Handler,263714692
-builtin_hash,CallUndefinedReceiverHandler,7784819
-builtin_hash,CallUndefinedReceiver0Handler,-108466145
-builtin_hash,CallUndefinedReceiver1Handler,-579782138
-builtin_hash,CallUndefinedReceiver2Handler,615684548
-builtin_hash,CallWithSpreadHandler,1020222647
-builtin_hash,CallRuntimeHandler,-822340864
-builtin_hash,CallJSRuntimeHandler,-926246438
-builtin_hash,InvokeIntrinsicHandler,-8360555
-builtin_hash,ConstructHandler,-272587889
-builtin_hash,ConstructWithSpreadHandler,-372839015
-builtin_hash,TestEqualHandler,374283527
-builtin_hash,TestEqualStrictHandler,626523511
-builtin_hash,TestLessThanHandler,-794234108
-builtin_hash,TestGreaterThanHandler,735449711
-builtin_hash,TestLessThanOrEqualHandler,297739786
-builtin_hash,TestGreaterThanOrEqualHandler,-670596979
-builtin_hash,TestInstanceOfHandler,296390618
-builtin_hash,TestInHandler,510814407
-builtin_hash,ToNameHandler,137741473
-builtin_hash,ToNumberHandler,-476034023
-builtin_hash,ToNumericHandler,-258785245
-builtin_hash,ToObjectHandler,137741473
-builtin_hash,ToStringHandler,-664830208
-builtin_hash,CreateRegExpLiteralHandler,367775340
-builtin_hash,CreateArrayLiteralHandler,1024172358
-builtin_hash,CreateArrayFromIterableHandler,-1040575832
-builtin_hash,CreateEmptyArrayLiteralHandler,-266274507
-builtin_hash,CreateObjectLiteralHandler,-1061604397
-builtin_hash,CreateEmptyObjectLiteralHandler,725849533
-builtin_hash,CreateClosureHandler,-785694825
-builtin_hash,CreateBlockContextHandler,336437386
-builtin_hash,CreateCatchContextHandler,-591858337
-builtin_hash,CreateFunctionContextHandler,831535155
-builtin_hash,CreateMappedArgumentsHandler,690946433
-builtin_hash,CreateUnmappedArgumentsHandler,91752283
-builtin_hash,CreateRestParameterHandler,171972945
-builtin_hash,JumpLoopHandler,-538188868
-builtin_hash,JumpHandler,-127048246
-builtin_hash,JumpConstantHandler,9123603
-builtin_hash,JumpIfUndefinedConstantHandler,-235965049
-builtin_hash,JumpIfNotUndefinedConstantHandler,1037899806
-builtin_hash,JumpIfUndefinedOrNullConstantHandler,142717992
-builtin_hash,JumpIfTrueConstantHandler,-235965049
-builtin_hash,JumpIfFalseConstantHandler,-235965049
-builtin_hash,JumpIfToBooleanTrueConstantHandler,-924023865
-builtin_hash,JumpIfToBooleanFalseConstantHandler,163722448
-builtin_hash,JumpIfToBooleanTrueHandler,143047998
-builtin_hash,JumpIfToBooleanFalseHandler,132155748
-builtin_hash,JumpIfTrueHandler,509710878
-builtin_hash,JumpIfFalseHandler,509710878
-builtin_hash,JumpIfNullHandler,509710878
-builtin_hash,JumpIfNotNullHandler,1067368128
-builtin_hash,JumpIfUndefinedHandler,509710878
-builtin_hash,JumpIfNotUndefinedHandler,1067368128
-builtin_hash,JumpIfUndefinedOrNullHandler,388158191
-builtin_hash,JumpIfJSReceiverHandler,-1017385873
-builtin_hash,SwitchOnSmiNoFeedbackHandler,-713553393
-builtin_hash,ForInEnumerateHandler,-856728459
-builtin_hash,ForInPrepareHandler,218804922
-builtin_hash,ForInContinueHandler,-793028055
-builtin_hash,ForInNextHandler,715608170
-builtin_hash,ForInStepHandler,904745987
-builtin_hash,SetPendingMessageHandler,-797838352
-builtin_hash,ThrowHandler,640637055
-builtin_hash,ReThrowHandler,640637055
-builtin_hash,ReturnHandler,-751460073
-builtin_hash,ThrowReferenceErrorIfHoleHandler,544325925
-builtin_hash,ThrowSuperNotCalledIfHoleHandler,799933248
-builtin_hash,ThrowSuperAlreadyCalledIfNotHoleHandler,397804955
-builtin_hash,ThrowIfNotSuperConstructorHandler,-527607615
-builtin_hash,SwitchOnGeneratorStateHandler,91785332
-builtin_hash,SuspendGeneratorHandler,-82069111
-builtin_hash,ResumeGeneratorHandler,-346239077
-builtin_hash,GetIteratorHandler,828882125
-builtin_hash,ShortStarHandler,773625974
-builtin_hash,LdarWideHandler,-558525052
-builtin_hash,LdaSmiWideHandler,564581323
-builtin_hash,LdaConstantWideHandler,548763663
-builtin_hash,LdaContextSlotWideHandler,618436299
-builtin_hash,LdaImmutableContextSlotWideHandler,618436299
-builtin_hash,LdaImmutableCurrentContextSlotWideHandler,882741548
-builtin_hash,StarWideHandler,-757786971
-builtin_hash,MovWideHandler,-238891687
-builtin_hash,PushContextWideHandler,32753133
-builtin_hash,PopContextWideHandler,476886005
-builtin_hash,TestReferenceEqualWideHandler,753678489
-builtin_hash,LdaGlobalWideHandler,-888894444
-builtin_hash,LdaGlobalInsideTypeofWideHandler,500447961
-builtin_hash,StaGlobalWideHandler,-1015395305
-builtin_hash,StaContextSlotWideHandler,526187909
-builtin_hash,StaCurrentContextSlotWideHandler,475958509
-builtin_hash,LdaLookupGlobalSlotWideHandler,-1071096984
-builtin_hash,GetNamedPropertyWideHandler,817982323
-builtin_hash,GetKeyedPropertyWideHandler,857306089
-builtin_hash,SetNamedPropertyWideHandler,-960786341
-builtin_hash,DefineNamedOwnPropertyWideHandler,-960786341
-builtin_hash,SetKeyedPropertyWideHandler,591604085
-builtin_hash,DefineKeyedOwnPropertyWideHandler,591604085
-builtin_hash,StaInArrayLiteralWideHandler,591604085
-builtin_hash,AddWideHandler,-232204010
-builtin_hash,SubWideHandler,-384964032
-builtin_hash,MulWideHandler,580603117
-builtin_hash,DivWideHandler,63550206
-builtin_hash,BitwiseOrWideHandler,238584526
-builtin_hash,BitwiseAndWideHandler,199219068
-builtin_hash,ShiftLeftWideHandler,-289702637
-builtin_hash,AddSmiWideHandler,-181214576
-builtin_hash,SubSmiWideHandler,797195243
-builtin_hash,MulSmiWideHandler,84206304
-builtin_hash,DivSmiWideHandler,-713648372
-builtin_hash,ModSmiWideHandler,1034577565
-builtin_hash,BitwiseOrSmiWideHandler,636336979
-builtin_hash,BitwiseXorSmiWideHandler,83711264
-builtin_hash,BitwiseAndSmiWideHandler,981310414
-builtin_hash,ShiftLeftSmiWideHandler,-108872348
-builtin_hash,ShiftRightSmiWideHandler,-499188540
-builtin_hash,ShiftRightLogicalSmiWideHandler,868152887
-builtin_hash,IncWideHandler,23321587
-builtin_hash,DecWideHandler,775873375
-builtin_hash,NegateWideHandler,-106212454
-builtin_hash,CallPropertyWideHandler,-98363630
-builtin_hash,CallProperty0WideHandler,376756798
-builtin_hash,CallProperty1WideHandler,232683870
-builtin_hash,CallProperty2WideHandler,-495857513
-builtin_hash,CallUndefinedReceiverWideHandler,-1065355738
-builtin_hash,CallUndefinedReceiver0WideHandler,-94257150
-builtin_hash,CallUndefinedReceiver1WideHandler,639212152
-builtin_hash,CallUndefinedReceiver2WideHandler,1002448783
-builtin_hash,CallWithSpreadWideHandler,-98363630
-builtin_hash,ConstructWideHandler,-449134944
-builtin_hash,TestEqualWideHandler,32535298
-builtin_hash,TestEqualStrictWideHandler,438216084
-builtin_hash,TestLessThanWideHandler,278610142
-builtin_hash,TestGreaterThanWideHandler,468841842
-builtin_hash,TestLessThanOrEqualWideHandler,-427559307
-builtin_hash,TestGreaterThanOrEqualWideHandler,553075788
-builtin_hash,TestInstanceOfWideHandler,-1012072182
-builtin_hash,TestInWideHandler,452328218
-builtin_hash,ToNumericWideHandler,-1069018434
-builtin_hash,CreateRegExpLiteralWideHandler,-907932416
-builtin_hash,CreateArrayLiteralWideHandler,747775518
-builtin_hash,CreateEmptyArrayLiteralWideHandler,-1062432317
-builtin_hash,CreateObjectLiteralWideHandler,711507686
-builtin_hash,CreateClosureWideHandler,-755746622
-builtin_hash,CreateBlockContextWideHandler,-298013473
-builtin_hash,CreateFunctionContextWideHandler,-125332949
-builtin_hash,JumpLoopWideHandler,243486717
-builtin_hash,JumpWideHandler,-127048246
-builtin_hash,JumpIfToBooleanTrueWideHandler,527083439
-builtin_hash,JumpIfToBooleanFalseWideHandler,22533852
-builtin_hash,JumpIfTrueWideHandler,-988555685
-builtin_hash,JumpIfFalseWideHandler,-988555685
-builtin_hash,SwitchOnSmiNoFeedbackWideHandler,-1073273649
-builtin_hash,ForInPrepareWideHandler,364788909
-builtin_hash,ForInNextWideHandler,618775213
-builtin_hash,ThrowReferenceErrorIfHoleWideHandler,-906539593
-builtin_hash,GetIteratorWideHandler,987616579
-builtin_hash,LdaSmiExtraWideHandler,564581323
-builtin_hash,LdaGlobalExtraWideHandler,335586579
-builtin_hash,AddSmiExtraWideHandler,409910492
-builtin_hash,SubSmiExtraWideHandler,-439082683
-builtin_hash,MulSmiExtraWideHandler,-535192892
-builtin_hash,DivSmiExtraWideHandler,-779784508
-builtin_hash,BitwiseOrSmiExtraWideHandler,962616587
-builtin_hash,BitwiseXorSmiExtraWideHandler,-2659951
-builtin_hash,BitwiseAndSmiExtraWideHandler,161080602
-builtin_hash,CallUndefinedReceiverExtraWideHandler,-80876123
-builtin_hash,CallUndefinedReceiver1ExtraWideHandler,-222714708
-builtin_hash,CallUndefinedReceiver2ExtraWideHandler,-437922167
diff --git a/deps/v8/tools/builtins-pgo/arm64.profile b/deps/v8/tools/builtins-pgo/arm64.profile
deleted file mode 100644
index 0595a88d8a..0000000000
--- a/deps/v8/tools/builtins-pgo/arm64.profile
+++ /dev/null
@@ -1,6403 +0,0 @@
-block_hint,RecordWriteSaveFP,21,22,0
-block_hint,RecordWriteSaveFP,6,7,0
-block_hint,RecordWriteSaveFP,19,20,0
-block_hint,RecordWriteSaveFP,9,10,0
-block_hint,RecordWriteSaveFP,36,37,0
-block_hint,RecordWriteSaveFP,34,35,0
-block_hint,RecordWriteSaveFP,32,33,1
-block_hint,RecordWriteSaveFP,25,26,0
-block_hint,RecordWriteSaveFP,15,16,1
-block_hint,RecordWriteIgnoreFP,21,22,0
-block_hint,RecordWriteIgnoreFP,6,7,0
-block_hint,RecordWriteIgnoreFP,19,20,0
-block_hint,RecordWriteIgnoreFP,9,10,0
-block_hint,RecordWriteIgnoreFP,25,26,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,43,44,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,83,84,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,80,81,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,63,64,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,35,36,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,67,68,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,50,51,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,29,30,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,56,57,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,7,8,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,61,62,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,14,15,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,16,17,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,69,70,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,54,55,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,83,84,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,80,81,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,63,64,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,5,6,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,46,47,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,25,26,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,67,68,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,50,51,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,29,30,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,56,57,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,7,8,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,61,62,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,14,15,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,16,17,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,69,70,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,54,55,1
-block_hint,Call_ReceiverIsAny_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsAny_Baseline_Compact,21,22,1
-block_hint,CallProxy,38,39,1
-block_hint,CallProxy,22,23,1
-block_hint,CallProxy,18,19,1
-block_hint,CallWithSpread,51,52,1
-block_hint,CallWithSpread,31,32,1
-block_hint,CallWithSpread,6,7,1
-block_hint,CallWithSpread,57,58,1
-block_hint,CallWithSpread,59,60,0
-block_hint,CallWithSpread,43,44,0
-block_hint,CallWithSpread,45,46,1
-block_hint,CallWithSpread,33,34,0
-block_hint,CallWithSpread,38,39,1
-block_hint,CallWithSpread,40,41,1
-block_hint,CallWithSpread,22,23,1
-block_hint,CallWithSpread,24,25,1
-block_hint,CallWithSpread,26,27,0
-block_hint,CallWithSpread,53,54,1
-block_hint,CallWithSpread,47,48,1
-block_hint,CallWithSpread,9,10,0
-block_hint,CallWithSpread,11,12,1
-block_hint,CallWithSpread,13,14,0
-block_hint,CallWithSpread,49,50,0
-block_hint,CallWithSpread,55,56,1
-block_hint,CallWithSpread,15,16,1
-block_hint,CallWithSpread_Baseline,115,116,1
-block_hint,CallWithSpread_Baseline,60,61,0
-block_hint,CallWithSpread_Baseline,113,114,0
-block_hint,CallWithSpread_Baseline,106,107,0
-block_hint,CallWithSpread_Baseline,81,82,0
-block_hint,CallWithSpread_Baseline,48,49,1
-block_hint,CallWithSpread_Baseline,138,139,1
-block_hint,CallWithSpread_Baseline,130,131,0
-block_hint,CallWithSpread_Baseline,119,120,1
-block_hint,CallWithSpread_Baseline,85,86,1
-block_hint,CallWithSpread_Baseline,12,13,1
-block_hint,CallWithSpread_Baseline,100,101,1
-block_hint,CallWithSpread_Baseline,102,103,0
-block_hint,CallWithSpread_Baseline,68,69,0
-block_hint,CallWithSpread_Baseline,33,34,1
-block_hint,CallWithSpread_Baseline,70,71,1
-block_hint,CallWithSpread_Baseline,53,54,0
-block_hint,CallWithSpread_Baseline,63,64,1
-block_hint,CallWithSpread_Baseline,65,66,1
-block_hint,CallWithSpread_Baseline,37,38,1
-block_hint,CallWithSpread_Baseline,39,40,1
-block_hint,CallWithSpread_Baseline,41,42,0
-block_hint,CallWithSpread_Baseline,91,92,1
-block_hint,CallWithSpread_Baseline,77,78,1
-block_hint,CallWithSpread_Baseline,23,24,0
-block_hint,CallWithSpread_Baseline,25,26,1
-block_hint,CallWithSpread_Baseline,27,28,0
-block_hint,CallWithSpread_Baseline,79,80,0
-block_hint,CallWithSpread_Baseline,93,94,1
-block_hint,CallWithSpread_Baseline,29,30,1
-block_hint,CallWithArrayLike,28,29,1
-block_hint,CallWithArrayLike,30,31,1
-block_hint,CallWithArrayLike,32,33,1
-block_hint,CallWithArrayLike,6,7,1
-block_hint,CallWithArrayLike,8,9,1
-block_hint,CallWithArrayLike,61,62,0
-block_hint,CallWithArrayLike,53,54,1
-block_hint,CallWithArrayLike,46,47,0
-block_hint,CallWithArrayLike,10,11,1
-block_hint,CallWithArrayLike,63,64,1
-block_hint,CallWithArrayLike,55,56,0
-block_hint,CallWithArrayLike,57,58,1
-block_hint,CallWithArrayLike,12,13,0
-block_hint,ConstructWithSpread_Baseline,90,91,1
-block_hint,ConstructWithSpread_Baseline,70,71,1
-block_hint,ConstructWithSpread_Baseline,45,46,1
-block_hint,ConstructWithSpread_Baseline,11,12,1
-block_hint,ConstructWithSpread_Baseline,81,82,1
-block_hint,ConstructWithSpread_Baseline,83,84,0
-block_hint,ConstructWithSpread_Baseline,58,59,0
-block_hint,ConstructWithSpread_Baseline,27,28,1
-block_hint,Construct_Baseline,48,49,0
-block_hint,Construct_Baseline,46,47,1
-block_hint,Construct_Baseline,38,39,1
-block_hint,Construct_Baseline,23,24,1
-block_hint,Construct_Baseline,5,6,1
-block_hint,Construct_Baseline,20,21,1
-block_hint,Construct_Baseline,25,26,1
-block_hint,Construct_Baseline,7,8,1
-block_hint,Construct_Baseline,36,37,0
-block_hint,Construct_Baseline,11,12,1
-block_hint,Construct_Baseline,13,14,0
-block_hint,Construct_Baseline,40,41,0
-block_hint,Construct_Baseline,27,28,1
-block_hint,FastNewObject,38,39,1
-block_hint,FastNewObject,40,41,1
-block_hint,FastNewObject,42,43,1
-block_hint,FastNewObject,44,45,1
-block_hint,FastNewObject,53,54,0
-block_hint,FastNewObject,55,56,1
-block_hint,FastNewObject,48,49,0
-block_hint,FastNewObject,23,24,0
-block_hint,FastNewObject,27,28,0
-block_hint,FastNewObject,31,32,1
-block_hint,FastNewClosure,15,16,0
-block_hint,FastNewClosure,4,5,1
-block_hint,FastNewClosure,19,20,1
-block_hint,FastNewClosure,8,9,1
-block_hint,StringEqual,29,30,1
-block_hint,StringEqual,55,56,0
-block_hint,StringEqual,45,46,1
-block_hint,StringEqual,81,82,1
-block_hint,StringEqual,69,70,0
-block_hint,StringEqual,51,52,0
-block_hint,StringEqual,23,24,1
-block_hint,StringEqual,79,80,0
-block_hint,StringEqual,65,66,0
-block_hint,StringEqual,47,48,0
-block_hint,StringEqual,39,40,0
-block_hint,StringEqual,71,72,0
-block_hint,StringEqual,53,54,0
-block_hint,StringGreaterThanOrEqual,40,41,1
-block_hint,StringGreaterThanOrEqual,30,31,1
-block_hint,StringGreaterThanOrEqual,36,37,0
-block_hint,StringGreaterThanOrEqual,12,13,0
-block_hint,StringLessThan,22,23,0
-block_hint,StringLessThan,40,41,1
-block_hint,StringLessThan,36,37,0
-block_hint,StringLessThan,24,25,0
-block_hint,StringLessThanOrEqual,40,41,1
-block_hint,StringLessThanOrEqual,30,31,1
-block_hint,StringLessThanOrEqual,36,37,0
-block_hint,StringLessThanOrEqual,12,13,0
-block_hint,StringSubstring,91,92,0
-block_hint,StringSubstring,29,30,0
-block_hint,StringSubstring,63,64,1
-block_hint,StringSubstring,58,59,1
-block_hint,StringSubstring,56,57,1
-block_hint,StringSubstring,114,115,0
-block_hint,StringSubstring,85,86,1
-block_hint,StringSubstring,19,20,0
-block_hint,StringSubstring,21,22,0
-block_hint,StringSubstring,130,131,1
-block_hint,StringSubstring,118,119,1
-block_hint,StringSubstring,38,39,0
-block_hint,StringSubstring,83,84,1
-block_hint,StringSubstring,17,18,0
-block_hint,StringSubstring,132,133,1
-block_hint,StringSubstring,120,121,1
-block_hint,StringSubstring,42,43,0
-block_hint,StringSubstring,75,76,1
-block_hint,StringSubstring,139,140,0
-block_hint,StringSubstring,34,35,1
-block_hint,StringSubstring,31,32,0
-block_hint,OrderedHashTableHealIndex,5,6,1
-block_hint,OrderedHashTableHealIndex,9,10,0
-block_hint,CompileLazy,42,43,1
-block_hint,CompileLazy,22,23,0
-block_hint,CompileLazy,40,41,0
-block_hint,CompileLazy,8,9,0
-block_hint,CompileLazy,10,11,0
-block_hint,CompileLazy,15,16,0
-block_hint,CompileLazy,3,4,0
-block_hint,CompileLazy,18,19,1
-block_hint,AllocateInYoungGeneration,2,3,1
-block_hint,AllocateRegularInYoungGeneration,2,3,1
-block_hint,AllocateRegularInOldGeneration,2,3,1
-block_hint,CopyFastSmiOrObjectElements,12,13,0
-block_hint,CopyFastSmiOrObjectElements,18,19,1
-block_hint,CopyFastSmiOrObjectElements,9,10,0
-block_hint,CopyFastSmiOrObjectElements,23,24,1
-block_hint,CopyFastSmiOrObjectElements,21,22,1
-block_hint,CopyFastSmiOrObjectElements,15,16,1
-block_hint,GrowFastDoubleElements,18,19,0
-block_hint,GrowFastDoubleElements,20,21,0
-block_hint,GrowFastDoubleElements,14,15,0
-block_hint,GrowFastDoubleElements,16,17,1
-block_hint,GrowFastDoubleElements,25,26,0
-block_hint,GrowFastDoubleElements,6,7,0
-block_hint,GrowFastDoubleElements,27,28,0
-block_hint,GrowFastSmiOrObjectElements,16,17,0
-block_hint,GrowFastSmiOrObjectElements,18,19,0
-block_hint,GrowFastSmiOrObjectElements,14,15,0
-block_hint,GrowFastSmiOrObjectElements,22,23,1
-block_hint,GrowFastSmiOrObjectElements,6,7,0
-block_hint,GrowFastSmiOrObjectElements,12,13,0
-block_hint,ToNumber,3,4,1
-block_hint,ToNumber,5,6,0
-block_hint,ToNumber,18,19,0
-block_hint,ToNumber,15,16,1
-block_hint,ToNumber_Baseline,24,25,0
-block_hint,ToNumber_Baseline,22,23,1
-block_hint,ToNumber_Baseline,3,4,1
-block_hint,ToNumeric_Baseline,7,8,0
-block_hint,ToNumeric_Baseline,9,10,1
-block_hint,ToNumeric_Baseline,3,4,1
-block_hint,ToNumberConvertBigInt,3,4,1
-block_hint,ToNumberConvertBigInt,5,6,0
-block_hint,ToNumberConvertBigInt,20,21,0
-block_hint,ToNumberConvertBigInt,17,18,1
-block_hint,ToNumberConvertBigInt,9,10,1
-block_hint,Typeof,17,18,0
-block_hint,Typeof,9,10,0
-block_hint,Typeof,13,14,1
-block_hint,KeyedLoadIC_PolymorphicName,244,245,1
-block_hint,KeyedLoadIC_PolymorphicName,96,97,1
-block_hint,KeyedLoadIC_PolymorphicName,260,261,0
-block_hint,KeyedLoadIC_PolymorphicName,58,59,0
-block_hint,KeyedLoadIC_PolymorphicName,133,134,1
-block_hint,KeyedLoadIC_PolymorphicName,298,299,1
-block_hint,KeyedLoadIC_PolymorphicName,330,331,1
-block_hint,KeyedLoadIC_PolymorphicName,98,99,0
-block_hint,KeyedLoadIC_PolymorphicName,100,101,0
-block_hint,KeyedLoadIC_PolymorphicName,22,23,1
-block_hint,KeyedLoadIC_PolymorphicName,165,166,0
-block_hint,KeyedLoadIC_PolymorphicName,122,123,1
-block_hint,KeyedLoadIC_PolymorphicName,332,333,1
-block_hint,KeyedLoadIC_PolymorphicName,110,111,0
-block_hint,KeyedLoadIC_PolymorphicName,175,176,0
-block_hint,KeyedLoadIC_PolymorphicName,43,44,1
-block_hint,KeyedLoadIC_PolymorphicName,74,75,0
-block_hint,KeyedLoadIC_PolymorphicName,250,251,0
-block_hint,KeyedLoadIC_PolymorphicName,287,288,1
-block_hint,KeyedLoadIC_PolymorphicName,26,27,0
-block_hint,KeyedLoadIC_PolymorphicName,24,25,0
-block_hint,KeyedStoreIC_Megamorphic,379,380,1
-block_hint,KeyedStoreIC_Megamorphic,381,382,0
-block_hint,KeyedStoreIC_Megamorphic,1216,1217,0
-block_hint,KeyedStoreIC_Megamorphic,1218,1219,1
-block_hint,KeyedStoreIC_Megamorphic,1203,1204,1
-block_hint,KeyedStoreIC_Megamorphic,1140,1141,0
-block_hint,KeyedStoreIC_Megamorphic,915,916,1
-block_hint,KeyedStoreIC_Megamorphic,383,384,1
-block_hint,KeyedStoreIC_Megamorphic,1228,1229,0
-block_hint,KeyedStoreIC_Megamorphic,1211,1212,0
-block_hint,KeyedStoreIC_Megamorphic,601,602,0
-block_hint,KeyedStoreIC_Megamorphic,746,747,1
-block_hint,KeyedStoreIC_Megamorphic,603,604,0
-block_hint,KeyedStoreIC_Megamorphic,1191,1192,0
-block_hint,KeyedStoreIC_Megamorphic,1041,1042,0
-block_hint,KeyedStoreIC_Megamorphic,1168,1169,0
-block_hint,KeyedStoreIC_Megamorphic,192,193,1
-block_hint,KeyedStoreIC_Megamorphic,194,195,0
-block_hint,KeyedStoreIC_Megamorphic,1134,1135,0
-block_hint,KeyedStoreIC_Megamorphic,1143,1144,0
-block_hint,KeyedStoreIC_Megamorphic,927,928,0
-block_hint,KeyedStoreIC_Megamorphic,491,492,0
-block_hint,KeyedStoreIC_Megamorphic,895,896,0
-block_hint,KeyedStoreIC_Megamorphic,931,932,0
-block_hint,KeyedStoreIC_Megamorphic,929,930,1
-block_hint,KeyedStoreIC_Megamorphic,493,494,1
-block_hint,KeyedStoreIC_Megamorphic,499,500,1
-block_hint,KeyedStoreIC_Megamorphic,501,502,0
-block_hint,KeyedStoreIC_Megamorphic,935,936,1
-block_hint,KeyedStoreIC_Megamorphic,503,504,0
-block_hint,KeyedStoreIC_Megamorphic,505,506,1
-block_hint,KeyedStoreIC_Megamorphic,933,934,1
-block_hint,KeyedStoreIC_Megamorphic,497,498,1
-block_hint,KeyedStoreIC_Megamorphic,495,496,0
-block_hint,KeyedStoreIC_Megamorphic,1115,1116,1
-block_hint,KeyedStoreIC_Megamorphic,1177,1178,1
-block_hint,KeyedStoreIC_Megamorphic,893,894,0
-block_hint,KeyedStoreIC_Megamorphic,350,351,1
-block_hint,KeyedStoreIC_Megamorphic,336,337,1
-block_hint,KeyedStoreIC_Megamorphic,1113,1114,1
-block_hint,KeyedStoreIC_Megamorphic,683,684,0
-block_hint,KeyedStoreIC_Megamorphic,539,540,0
-block_hint,KeyedStoreIC_Megamorphic,541,542,0
-block_hint,KeyedStoreIC_Megamorphic,1045,1046,0
-block_hint,KeyedStoreIC_Megamorphic,547,548,1
-block_hint,KeyedStoreIC_Megamorphic,1071,1072,0
-block_hint,KeyedStoreIC_Megamorphic,606,607,0
-block_hint,KeyedStoreIC_Megamorphic,1193,1194,0
-block_hint,KeyedStoreIC_Megamorphic,549,550,0
-block_hint,KeyedStoreIC_Megamorphic,1047,1048,0
-block_hint,KeyedStoreIC_Megamorphic,551,552,1
-block_hint,KeyedStoreIC_Megamorphic,200,201,1
-block_hint,KeyedStoreIC_Megamorphic,553,554,0
-block_hint,KeyedStoreIC_Megamorphic,202,203,0
-block_hint,KeyedStoreIC_Megamorphic,204,205,0
-block_hint,KeyedStoreIC_Megamorphic,950,951,0
-block_hint,KeyedStoreIC_Megamorphic,555,556,1
-block_hint,KeyedStoreIC_Megamorphic,557,558,0
-block_hint,KeyedStoreIC_Megamorphic,559,560,1
-block_hint,KeyedStoreIC_Megamorphic,561,562,0
-block_hint,KeyedStoreIC_Megamorphic,1148,1149,0
-block_hint,KeyedStoreIC_Megamorphic,563,564,1
-block_hint,KeyedStoreIC_Megamorphic,902,903,0
-block_hint,KeyedStoreIC_Megamorphic,1150,1151,0
-block_hint,KeyedStoreIC_Megamorphic,565,566,1
-block_hint,KeyedStoreIC_Megamorphic,571,572,1
-block_hint,KeyedStoreIC_Megamorphic,573,574,0
-block_hint,KeyedStoreIC_Megamorphic,575,576,0
-block_hint,KeyedStoreIC_Megamorphic,577,578,1
-block_hint,KeyedStoreIC_Megamorphic,957,958,1
-block_hint,KeyedStoreIC_Megamorphic,569,570,1
-block_hint,KeyedStoreIC_Megamorphic,567,568,0
-block_hint,KeyedStoreIC_Megamorphic,1214,1215,0
-block_hint,KeyedStoreIC_Megamorphic,1231,1232,1
-block_hint,KeyedStoreIC_Megamorphic,1224,1225,1
-block_hint,KeyedStoreIC_Megamorphic,1130,1131,1
-block_hint,KeyedStoreIC_Megamorphic,975,976,1
-block_hint,KeyedStoreIC_Megamorphic,206,207,0
-block_hint,KeyedStoreIC_Megamorphic,362,363,0
-block_hint,KeyedStoreIC_Megamorphic,977,978,1
-block_hint,KeyedStoreIC_Megamorphic,214,215,0
-block_hint,KeyedStoreIC_Megamorphic,1027,1028,0
-block_hint,KeyedStoreIC_Megamorphic,693,694,0
-block_hint,KeyedStoreIC_Megamorphic,579,580,0
-block_hint,KeyedStoreIC_Megamorphic,167,168,1
-block_hint,KeyedStoreIC_Megamorphic,581,582,0
-block_hint,KeyedStoreIC_Megamorphic,583,584,0
-block_hint,KeyedStoreIC_Megamorphic,1054,1055,0
-block_hint,KeyedStoreIC_Megamorphic,585,586,1
-block_hint,KeyedStoreIC_Megamorphic,963,964,0
-block_hint,KeyedStoreIC_Megamorphic,1174,1175,0
-block_hint,KeyedStoreIC_Megamorphic,1056,1057,1
-block_hint,KeyedStoreIC_Megamorphic,759,760,1
-block_hint,KeyedStoreIC_Megamorphic,612,613,0
-block_hint,KeyedStoreIC_Megamorphic,1196,1197,0
-block_hint,KeyedStoreIC_Megamorphic,1058,1059,0
-block_hint,KeyedStoreIC_Megamorphic,1172,1173,0
-block_hint,KeyedStoreIC_Megamorphic,224,225,0
-block_hint,KeyedStoreIC_Megamorphic,761,762,0
-block_hint,KeyedStoreIC_Megamorphic,593,594,0
-block_hint,KeyedStoreIC_Megamorphic,1136,1137,0
-block_hint,KeyedStoreIC_Megamorphic,1180,1181,0
-block_hint,KeyedStoreIC_Megamorphic,906,907,0
-block_hint,KeyedStoreIC_Megamorphic,173,174,1
-block_hint,KeyedStoreIC_Megamorphic,175,176,1
-block_hint,KeyedStoreIC_Megamorphic,373,374,0
-block_hint,KeyedStoreIC_Megamorphic,177,178,1
-block_hint,KeyedStoreIC_Megamorphic,375,376,0
-block_hint,KeyedStoreIC_Megamorphic,179,180,1
-block_hint,KeyedStoreIC_Megamorphic,234,235,0
-block_hint,KeyedStoreIC_Megamorphic,236,237,0
-block_hint,KeyedStoreIC_Megamorphic,181,182,1
-block_hint,KeyedStoreIC_Megamorphic,183,184,1
-block_hint,KeyedStoreIC_Megamorphic,1032,1033,0
-block_hint,KeyedStoreIC_Megamorphic,185,186,1
-block_hint,KeyedStoreIC_Megamorphic,925,926,1
-block_hint,KeyedStoreIC_Megamorphic,485,486,1
-block_hint,KeyedStoreIC_Megamorphic,733,734,0
-block_hint,KeyedStoreIC_Megamorphic,919,920,1
-block_hint,KeyedStoreIC_Megamorphic,413,414,0
-block_hint,KeyedStoreIC_Megamorphic,415,416,0
-block_hint,KeyedStoreIC_Megamorphic,254,255,1
-block_hint,KeyedStoreIC_Megamorphic,417,418,0
-block_hint,KeyedStoreIC_Megamorphic,630,631,1
-block_hint,KeyedStoreIC_Megamorphic,92,93,1
-block_hint,KeyedStoreIC_Megamorphic,94,95,0
-block_hint,KeyedStoreIC_Megamorphic,769,770,1
-block_hint,KeyedStoreIC_Megamorphic,387,388,0
-block_hint,KeyedStoreIC_Megamorphic,639,640,1
-block_hint,KeyedStoreIC_Megamorphic,64,65,1
-block_hint,KeyedStoreIC_Megamorphic,66,67,0
-block_hint,DefineKeyedOwnIC_Megamorphic,312,313,1
-block_hint,DefineKeyedOwnIC_Megamorphic,314,315,0
-block_hint,DefineKeyedOwnIC_Megamorphic,887,888,0
-block_hint,DefineKeyedOwnIC_Megamorphic,420,421,0
-block_hint,DefineKeyedOwnIC_Megamorphic,418,419,1
-block_hint,DefineKeyedOwnIC_Megamorphic,803,804,0
-block_hint,DefineKeyedOwnIC_Megamorphic,575,576,1
-block_hint,DefineKeyedOwnIC_Megamorphic,601,602,1
-block_hint,DefineKeyedOwnIC_Megamorphic,232,233,0
-block_hint,DefineKeyedOwnIC_Megamorphic,53,54,1
-block_hint,DefineKeyedOwnIC_Megamorphic,55,56,0
-block_hint,LoadGlobalIC_NoFeedback,41,42,1
-block_hint,LoadGlobalIC_NoFeedback,6,7,1
-block_hint,LoadGlobalIC_NoFeedback,8,9,1
-block_hint,LoadGlobalIC_NoFeedback,10,11,1
-block_hint,LoadGlobalIC_NoFeedback,12,13,1
-block_hint,LoadGlobalIC_NoFeedback,31,32,1
-block_hint,LoadGlobalIC_NoFeedback,49,50,1
-block_hint,LoadGlobalIC_NoFeedback,18,19,1
-block_hint,LoadGlobalIC_NoFeedback,27,28,0
-block_hint,LoadGlobalIC_NoFeedback,14,15,1
-block_hint,LoadGlobalIC_NoFeedback,33,34,0
-block_hint,LoadGlobalIC_NoFeedback,16,17,1
-block_hint,LoadGlobalIC_NoFeedback,20,21,1
-block_hint,LoadGlobalIC_NoFeedback,22,23,0
-block_hint,LoadGlobalIC_NoFeedback,24,25,1
-block_hint,LoadIC_FunctionPrototype,2,3,0
-block_hint,LoadIC_FunctionPrototype,4,5,1
-block_hint,LoadIC_NoFeedback,97,98,1
-block_hint,LoadIC_NoFeedback,99,100,0
-block_hint,LoadIC_NoFeedback,306,307,1
-block_hint,LoadIC_NoFeedback,226,227,0
-block_hint,LoadIC_NoFeedback,285,286,1
-block_hint,LoadIC_NoFeedback,141,142,0
-block_hint,LoadIC_NoFeedback,320,321,0
-block_hint,LoadIC_NoFeedback,287,288,0
-block_hint,LoadIC_NoFeedback,302,303,0
-block_hint,LoadIC_NoFeedback,53,54,1
-block_hint,LoadIC_NoFeedback,289,290,0
-block_hint,LoadIC_NoFeedback,55,56,0
-block_hint,LoadIC_NoFeedback,324,325,1
-block_hint,LoadIC_NoFeedback,272,273,0
-block_hint,LoadIC_NoFeedback,295,296,1
-block_hint,LoadIC_NoFeedback,247,248,1
-block_hint,LoadIC_NoFeedback,59,60,0
-block_hint,LoadIC_NoFeedback,22,23,1
-block_hint,LoadIC_NoFeedback,35,36,1
-block_hint,LoadIC_NoFeedback,130,131,1
-block_hint,LoadIC_NoFeedback,145,146,0
-block_hint,LoadIC_NoFeedback,125,126,0
-block_hint,LoadIC_NoFeedback,261,262,0
-block_hint,LoadIC_NoFeedback,250,251,0
-block_hint,LoadIC_NoFeedback,149,150,1
-block_hint,LoadIC_NoFeedback,167,168,0
-block_hint,LoadIC_NoFeedback,322,323,0
-block_hint,LoadIC_NoFeedback,151,152,0
-block_hint,LoadIC_NoFeedback,291,292,0
-block_hint,LoadIC_NoFeedback,70,71,1
-block_hint,LoadIC_NoFeedback,155,156,0
-block_hint,LoadIC_NoFeedback,72,73,0
-block_hint,LoadIC_NoFeedback,254,255,1
-block_hint,LoadIC_NoFeedback,76,77,0
-block_hint,LoadIC_NoFeedback,326,327,1
-block_hint,LoadIC_NoFeedback,278,279,0
-block_hint,LoadIC_NoFeedback,276,277,0
-block_hint,LoadIC_NoFeedback,242,243,1
-block_hint,LoadIC_NoFeedback,135,136,1
-block_hint,LoadIC_NoFeedback,93,94,0
-block_hint,StoreIC_NoFeedback,147,148,1
-block_hint,StoreIC_NoFeedback,149,150,0
-block_hint,StoreIC_NoFeedback,259,260,0
-block_hint,StoreIC_NoFeedback,549,550,0
-block_hint,StoreIC_NoFeedback,443,444,0
-block_hint,StoreIC_NoFeedback,527,528,0
-block_hint,StoreIC_NoFeedback,58,59,1
-block_hint,StoreIC_NoFeedback,60,61,0
-block_hint,StoreIC_NoFeedback,498,499,0
-block_hint,StoreIC_NoFeedback,367,368,0
-block_hint,StoreIC_NoFeedback,151,152,0
-block_hint,StoreIC_NoFeedback,349,350,0
-block_hint,StoreIC_NoFeedback,153,154,1
-block_hint,StoreIC_NoFeedback,159,160,1
-block_hint,StoreIC_NoFeedback,161,162,0
-block_hint,StoreIC_NoFeedback,163,164,0
-block_hint,StoreIC_NoFeedback,157,158,1
-block_hint,StoreIC_NoFeedback,155,156,0
-block_hint,StoreIC_NoFeedback,536,537,1
-block_hint,StoreIC_NoFeedback,381,382,1
-block_hint,StoreIC_NoFeedback,179,180,0
-block_hint,StoreIC_NoFeedback,519,520,1
-block_hint,StoreIC_NoFeedback,199,200,0
-block_hint,StoreIC_NoFeedback,201,202,0
-block_hint,StoreIC_NoFeedback,447,448,0
-block_hint,StoreIC_NoFeedback,207,208,1
-block_hint,StoreIC_NoFeedback,473,474,0
-block_hint,StoreIC_NoFeedback,262,263,0
-block_hint,StoreIC_NoFeedback,551,552,0
-block_hint,StoreIC_NoFeedback,209,210,0
-block_hint,StoreIC_NoFeedback,449,450,0
-block_hint,StoreIC_NoFeedback,66,67,1
-block_hint,StoreIC_NoFeedback,213,214,0
-block_hint,StoreIC_NoFeedback,68,69,0
-block_hint,StoreIC_NoFeedback,390,391,0
-block_hint,StoreIC_NoFeedback,215,216,1
-block_hint,StoreIC_NoFeedback,217,218,0
-block_hint,StoreIC_NoFeedback,219,220,1
-block_hint,StoreIC_NoFeedback,221,222,0
-block_hint,StoreIC_NoFeedback,509,510,0
-block_hint,StoreIC_NoFeedback,223,224,1
-block_hint,StoreIC_NoFeedback,356,357,0
-block_hint,StoreIC_NoFeedback,511,512,0
-block_hint,StoreIC_NoFeedback,393,394,1
-block_hint,StoreIC_NoFeedback,231,232,1
-block_hint,StoreIC_NoFeedback,233,234,0
-block_hint,StoreIC_NoFeedback,235,236,0
-block_hint,StoreIC_NoFeedback,237,238,1
-block_hint,StoreIC_NoFeedback,227,228,0
-block_hint,StoreIC_NoFeedback,564,565,0
-block_hint,StoreIC_NoFeedback,494,495,1
-block_hint,StoreIC_NoFeedback,413,414,1
-block_hint,StoreIC_NoFeedback,72,73,0
-block_hint,StoreIC_NoFeedback,78,79,0
-block_hint,StoreIC_NoFeedback,130,131,0
-block_hint,StoreIC_NoFeedback,415,416,1
-block_hint,StoreIC_NoFeedback,80,81,0
-block_hint,StoreIC_NoFeedback,82,83,0
-block_hint,StoreIC_NoFeedback,241,242,0
-block_hint,StoreIC_NoFeedback,243,244,0
-block_hint,StoreIC_NoFeedback,456,457,0
-block_hint,StoreIC_NoFeedback,245,246,1
-block_hint,StoreIC_NoFeedback,513,514,0
-block_hint,StoreIC_NoFeedback,403,404,0
-block_hint,StoreIC_NoFeedback,458,459,1
-block_hint,StoreIC_NoFeedback,268,269,0
-block_hint,StoreIC_NoFeedback,553,554,0
-block_hint,StoreIC_NoFeedback,460,461,0
-block_hint,StoreIC_NoFeedback,531,532,0
-block_hint,StoreIC_NoFeedback,90,91,0
-block_hint,StoreIC_NoFeedback,332,333,0
-block_hint,StoreIC_NoFeedback,420,421,1
-block_hint,StoreIC_NoFeedback,94,95,0
-block_hint,StoreIC_NoFeedback,96,97,0
-block_hint,StoreIC_NoFeedback,253,254,0
-block_hint,StoreIC_NoFeedback,255,256,1
-block_hint,StoreIC_NoFeedback,362,363,0
-block_hint,StoreIC_NoFeedback,40,41,1
-block_hint,StoreIC_NoFeedback,42,43,1
-block_hint,StoreIC_NoFeedback,141,142,0
-block_hint,StoreIC_NoFeedback,44,45,1
-block_hint,StoreIC_NoFeedback,143,144,0
-block_hint,StoreIC_NoFeedback,46,47,1
-block_hint,StoreIC_NoFeedback,100,101,0
-block_hint,StoreIC_NoFeedback,102,103,0
-block_hint,StoreIC_NoFeedback,48,49,1
-block_hint,StoreIC_NoFeedback,50,51,1
-block_hint,StoreIC_NoFeedback,439,440,0
-block_hint,StoreIC_NoFeedback,52,53,1
-block_hint,DefineNamedOwnIC_NoFeedback,80,81,1
-block_hint,DefineNamedOwnIC_NoFeedback,82,83,0
-block_hint,DefineNamedOwnIC_NoFeedback,236,237,0
-block_hint,DefineNamedOwnIC_NoFeedback,210,211,1
-block_hint,DefineNamedOwnIC_NoFeedback,136,137,0
-block_hint,DefineNamedOwnIC_NoFeedback,239,240,0
-block_hint,DefineNamedOwnIC_NoFeedback,212,213,0
-block_hint,DefineNamedOwnIC_NoFeedback,234,235,0
-block_hint,DefineNamedOwnIC_NoFeedback,157,158,1
-block_hint,DefineNamedOwnIC_NoFeedback,36,37,1
-block_hint,DefineNamedOwnIC_NoFeedback,86,87,0
-block_hint,DefineNamedOwnIC_NoFeedback,38,39,0
-block_hint,DefineNamedOwnIC_NoFeedback,40,41,0
-block_hint,KeyedLoadIC_SloppyArguments,12,13,0
-block_hint,KeyedLoadIC_SloppyArguments,14,15,1
-block_hint,KeyedLoadIC_SloppyArguments,4,5,1
-block_hint,KeyedLoadIC_SloppyArguments,22,23,0
-block_hint,KeyedLoadIC_SloppyArguments,6,7,1
-block_hint,KeyedLoadIC_SloppyArguments,16,17,0
-block_hint,KeyedLoadIC_SloppyArguments,18,19,0
-block_hint,KeyedLoadIC_SloppyArguments,8,9,1
-block_hint,KeyedLoadIC_SloppyArguments,10,11,0
-block_hint,StoreFastElementIC_Standard,336,337,0
-block_hint,StoreFastElementIC_Standard,876,877,0
-block_hint,StoreFastElementIC_Standard,344,345,0
-block_hint,StoreFastElementIC_Standard,1016,1017,1
-block_hint,StoreFastElementIC_Standard,346,347,1
-block_hint,StoreFastElementIC_Standard,40,41,1
-block_hint,StoreFastElementIC_Standard,348,349,0
-block_hint,StoreFastElementIC_Standard,878,879,0
-block_hint,StoreFastElementIC_Standard,356,357,0
-block_hint,StoreFastElementIC_Standard,1018,1019,1
-block_hint,StoreFastElementIC_Standard,358,359,1
-block_hint,StoreFastElementIC_Standard,42,43,1
-block_hint,StoreFastElementIC_Standard,360,361,0
-block_hint,StoreFastElementIC_Standard,880,881,0
-block_hint,StoreFastElementIC_Standard,1020,1021,1
-block_hint,StoreFastElementIC_Standard,368,369,1
-block_hint,StoreFastElementIC_Standard,44,45,1
-block_hint,StoreFastElementIC_Standard,400,401,0
-block_hint,StoreFastElementIC_Standard,888,889,0
-block_hint,StoreFastElementIC_Standard,1028,1029,1
-block_hint,StoreFastElementIC_Standard,408,409,1
-block_hint,StoreFastElementIC_Standard,52,53,1
-block_hint,StoreFastElementIC_Standard,894,895,0
-block_hint,StoreFastElementIC_Standard,436,437,1
-block_hint,StoreFastElementIC_Standard,1035,1036,1
-block_hint,StoreFastElementIC_Standard,438,439,1
-block_hint,StoreFastElementIC_Standard,58,59,1
-block_hint,StoreFastElementIC_Standard,898,899,0
-block_hint,StoreFastElementIC_Standard,446,447,1
-block_hint,StoreFastElementIC_Standard,1038,1039,1
-block_hint,StoreFastElementIC_Standard,448,449,1
-block_hint,StoreFastElementIC_Standard,60,61,1
-block_hint,StoreFastElementIC_Standard,662,663,0
-block_hint,StoreFastElementIC_Standard,1113,1114,0
-block_hint,StoreFastElementIC_Standard,731,732,0
-block_hint,StoreFastElementIC_Standard,300,301,1
-block_hint,StoreFastElementIC_Standard,658,659,0
-block_hint,StoreFastElementIC_Standard,1115,1116,0
-block_hint,StoreFastElementIC_Standard,733,734,0
-block_hint,StoreFastElementIC_Standard,302,303,1
-block_hint,StoreFastElementIC_Standard,654,655,0
-block_hint,StoreFastElementIC_Standard,1117,1118,0
-block_hint,StoreFastElementIC_Standard,735,736,0
-block_hint,StoreFastElementIC_Standard,304,305,1
-block_hint,StoreFastElementIC_Standard,650,651,0
-block_hint,StoreFastElementIC_Standard,1119,1120,0
-block_hint,StoreFastElementIC_Standard,737,738,0
-block_hint,StoreFastElementIC_Standard,306,307,1
-block_hint,StoreFastElementIC_Standard,646,647,0
-block_hint,StoreFastElementIC_Standard,981,982,1
-block_hint,StoreFastElementIC_Standard,832,833,0
-block_hint,StoreFastElementIC_Standard,308,309,1
-block_hint,StoreFastElementIC_Standard,642,643,0
-block_hint,StoreFastElementIC_Standard,979,980,1
-block_hint,StoreFastElementIC_Standard,834,835,0
-block_hint,StoreFastElementIC_Standard,310,311,1
-block_hint,StoreFastElementIC_Standard,638,639,0
-block_hint,StoreFastElementIC_Standard,977,978,1
-block_hint,StoreFastElementIC_Standard,836,837,0
-block_hint,StoreFastElementIC_Standard,312,313,1
-block_hint,StoreFastElementIC_Standard,634,635,0
-block_hint,StoreFastElementIC_Standard,838,839,0
-block_hint,StoreFastElementIC_Standard,314,315,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,489,490,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,265,266,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,661,662,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,492,493,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,587,588,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,455,456,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,214,215,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,565,566,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,198,199,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,204,205,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,643,644,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,457,458,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,34,35,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,494,495,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,281,282,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,667,668,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,287,288,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,591,592,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,459,460,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,219,220,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,561,562,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,182,183,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,184,185,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,645,646,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,461,462,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,36,37,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,499,500,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,673,674,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,301,302,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,595,596,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,463,464,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,224,225,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,557,558,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,166,167,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,647,648,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,465,466,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,38,39,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,512,513,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,679,680,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,345,346,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,605,606,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,473,474,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,238,239,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,547,548,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,126,127,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,651,652,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,475,476,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,46,47,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,521,522,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,379,380,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,683,684,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,385,386,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,614,615,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,481,482,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,249,250,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,251,252,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,539,540,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,102,103,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,541,542,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,483,484,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,56,57,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,528,529,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,393,394,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,687,688,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,399,400,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,96,97,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,100,101,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,487,488,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,58,59,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,357,358,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,187,188,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,415,416,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,189,190,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,22,23,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,361,362,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,419,420,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,211,212,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,213,214,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,160,161,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,397,398,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,130,131,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,134,135,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,26,27,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,377,378,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,285,286,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,434,435,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,287,288,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,44,45,1
-block_hint,ElementsTransitionAndStore_Standard,460,461,0
-block_hint,ElementsTransitionAndStore_Standard,462,463,1
-block_hint,ElementsTransitionAndStore_Standard,555,556,1
-block_hint,ElementsTransitionAndStore_Standard,458,459,0
-block_hint,ElementsTransitionAndStore_Standard,199,200,0
-block_hint,ElementsTransitionAndStore_Standard,201,202,0
-block_hint,ElementsTransitionAndStore_Standard,355,356,0
-block_hint,ElementsTransitionAndStore_Standard,464,465,1
-block_hint,ElementsTransitionAndStore_Standard,209,210,1
-block_hint,ElementsTransitionAndStore_Standard,28,29,1
-block_hint,ElementsTransitionAndStore_Standard,508,509,0
-block_hint,ElementsTransitionAndStore_Standard,510,511,1
-block_hint,ElementsTransitionAndStore_Standard,504,505,1
-block_hint,ElementsTransitionAndStore_Standard,506,507,0
-block_hint,ElementsTransitionAndStore_Standard,267,268,0
-block_hint,ElementsTransitionAndStore_Standard,370,371,0
-block_hint,ElementsTransitionAndStore_Standard,512,513,1
-block_hint,ElementsTransitionAndStore_Standard,275,276,1
-block_hint,ElementsTransitionAndStore_Standard,38,39,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,756,757,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,1115,1116,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,759,760,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,927,928,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,701,702,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,324,325,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,853,854,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,237,238,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,241,242,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,1059,1060,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,703,704,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,62,63,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,95,96,0
-block_hint,KeyedHasIC_PolymorphicName,69,70,1
-block_hint,KeyedHasIC_PolymorphicName,28,29,1
-block_hint,KeyedHasIC_PolymorphicName,24,25,0
-block_hint,KeyedHasIC_PolymorphicName,26,27,0
-block_hint,KeyedHasIC_PolymorphicName,55,56,1
-block_hint,KeyedHasIC_PolymorphicName,89,90,1
-block_hint,KeyedHasIC_PolymorphicName,93,94,1
-block_hint,KeyedHasIC_PolymorphicName,30,31,0
-block_hint,KeyedHasIC_PolymorphicName,32,33,0
-block_hint,KeyedHasIC_PolymorphicName,14,15,1
-block_hint,KeyedHasIC_PolymorphicName,16,17,1
-block_hint,EnqueueMicrotask,4,5,0
-block_hint,EnqueueMicrotask,2,3,0
-block_hint,RunMicrotasks,18,19,0
-block_hint,RunMicrotasks,31,32,0
-block_hint,RunMicrotasks,65,66,0
-block_hint,RunMicrotasks,36,37,1
-block_hint,RunMicrotasks,85,86,0
-block_hint,RunMicrotasks,67,68,0
-block_hint,RunMicrotasks,38,39,1
-block_hint,HasProperty,137,138,1
-block_hint,HasProperty,139,140,1
-block_hint,HasProperty,263,264,0
-block_hint,HasProperty,211,212,1
-block_hint,HasProperty,254,255,0
-block_hint,HasProperty,97,98,0
-block_hint,HasProperty,234,235,1
-block_hint,HasProperty,123,124,1
-block_hint,HasProperty,141,142,1
-block_hint,HasProperty,199,200,0
-block_hint,HasProperty,201,202,0
-block_hint,HasProperty,101,102,0
-block_hint,HasProperty,99,100,0
-block_hint,HasProperty,250,251,0
-block_hint,HasProperty,270,271,0
-block_hint,HasProperty,259,260,1
-block_hint,HasProperty,106,107,0
-block_hint,HasProperty,277,278,0
-block_hint,HasProperty,282,283,0
-block_hint,HasProperty,268,269,0
-block_hint,HasProperty,203,204,1
-block_hint,HasProperty,42,43,1
-block_hint,HasProperty,65,66,0
-block_hint,HasProperty,44,45,0
-block_hint,HasProperty,239,240,1
-block_hint,HasProperty,48,49,0
-block_hint,HasProperty,272,273,0
-block_hint,HasProperty,228,229,0
-block_hint,HasProperty,38,39,0
-block_hint,DeleteProperty,38,39,1
-block_hint,DeleteProperty,62,63,0
-block_hint,DeleteProperty,40,41,0
-block_hint,DeleteProperty,66,67,1
-block_hint,DeleteProperty,91,92,0
-block_hint,DeleteProperty,73,74,0
-block_hint,DeleteProperty,64,65,1
-block_hint,DeleteProperty,56,57,1
-block_hint,DeleteProperty,42,43,1
-block_hint,DeleteProperty,83,84,0
-block_hint,DeleteProperty,85,86,0
-block_hint,DeleteProperty,77,78,0
-block_hint,DeleteProperty,75,76,0
-block_hint,DeleteProperty,47,48,0
-block_hint,DeleteProperty,49,50,0
-block_hint,DeleteProperty,87,88,0
-block_hint,DeleteProperty,71,72,1
-block_hint,DeleteProperty,20,21,0
-block_hint,DeleteProperty,54,55,0
-block_hint,DeleteProperty,7,8,1
-block_hint,DeleteProperty,9,10,1
-block_hint,DeleteProperty,11,12,1
-block_hint,DeleteProperty,13,14,1
-block_hint,DeleteProperty,15,16,1
-block_hint,SetDataProperties,136,137,1
-block_hint,SetDataProperties,263,264,1
-block_hint,SetDataProperties,261,262,1
-block_hint,SetDataProperties,144,145,0
-block_hint,SetDataProperties,316,317,0
-block_hint,SetDataProperties,146,147,0
-block_hint,SetDataProperties,59,60,0
-block_hint,SetDataProperties,341,342,0
-block_hint,SetDataProperties,267,268,0
-block_hint,SetDataProperties,385,386,1
-block_hint,SetDataProperties,277,278,0
-block_hint,SetDataProperties,752,753,0
-block_hint,SetDataProperties,762,763,1
-block_hint,SetDataProperties,750,751,0
-block_hint,SetDataProperties,748,749,0
-block_hint,SetDataProperties,659,660,0
-block_hint,SetDataProperties,451,452,1
-block_hint,SetDataProperties,221,222,1
-block_hint,SetDataProperties,87,88,0
-block_hint,SetDataProperties,223,224,0
-block_hint,SetDataProperties,513,514,0
-block_hint,SetDataProperties,515,516,0
-block_hint,SetDataProperties,519,520,1
-block_hint,SetDataProperties,449,450,0
-block_hint,SetDataProperties,329,330,1
-block_hint,SetDataProperties,326,327,0
-block_hint,SetDataProperties,158,159,0
-block_hint,SetDataProperties,399,400,0
-block_hint,SetDataProperties,447,448,0
-block_hint,SetDataProperties,352,353,0
-block_hint,SetDataProperties,226,227,1
-block_hint,SetDataProperties,93,94,1
-block_hint,SetDataProperties,521,522,0
-block_hint,SetDataProperties,95,96,0
-block_hint,SetDataProperties,97,98,0
-block_hint,SetDataProperties,617,618,0
-block_hint,SetDataProperties,523,524,1
-block_hint,SetDataProperties,525,526,0
-block_hint,SetDataProperties,527,528,1
-block_hint,SetDataProperties,529,530,0
-block_hint,SetDataProperties,673,674,0
-block_hint,SetDataProperties,531,532,1
-block_hint,SetDataProperties,577,578,0
-block_hint,SetDataProperties,675,676,0
-block_hint,SetDataProperties,620,621,1
-block_hint,SetDataProperties,539,540,1
-block_hint,SetDataProperties,541,542,0
-block_hint,SetDataProperties,543,544,0
-block_hint,SetDataProperties,545,546,1
-block_hint,SetDataProperties,535,536,0
-block_hint,SetDataProperties,657,658,0
-block_hint,SetDataProperties,555,556,1
-block_hint,SetDataProperties,292,293,1
-block_hint,SetDataProperties,99,100,0
-block_hint,SetDataProperties,437,438,0
-block_hint,SetDataProperties,241,242,0
-block_hint,SetDataProperties,279,280,1
-block_hint,SetDataProperties,204,205,0
-block_hint,SetDataProperties,61,62,0
-block_hint,ReturnReceiver,3,4,1
-block_hint,ArrayConstructorImpl,40,41,0
-block_hint,ArrayConstructorImpl,15,16,1
-block_hint,ArrayConstructorImpl,19,20,0
-block_hint,ArrayConstructorImpl,23,24,0
-block_hint,ArrayConstructorImpl,25,26,1
-block_hint,ArrayConstructorImpl,9,10,0
-block_hint,ArrayConstructorImpl,13,14,1
-block_hint,ArrayConstructorImpl,27,28,0
-block_hint,ArrayConstructorImpl,29,30,1
-block_hint,ArrayNoArgumentConstructor_PackedSmi_DontOverride,3,4,1
-block_hint,ArrayNoArgumentConstructor_PackedSmi_DontOverride,5,6,1
-block_hint,ArrayNoArgumentConstructor_HoleySmi_DontOverride,3,4,1
-block_hint,ArrayNoArgumentConstructor_HoleySmi_DontOverride,5,6,1
-block_hint,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,5,6,1
-block_hint,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,5,6,1
-block_hint,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,8,9,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,21,22,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,8,9,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,25,26,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,12,13,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,8,9,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,10,11,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,25,26,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,12,13,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,8,9,0
-block_hint,ArrayIncludesSmi,120,121,0
-block_hint,ArrayIncludesSmi,43,44,1
-block_hint,ArrayIncludesSmi,108,109,1
-block_hint,ArrayIncludesSmi,75,76,1
-block_hint,ArrayIncludesSmi,122,123,0
-block_hint,ArrayIncludesSmiOrObject,113,114,1
-block_hint,ArrayIncludesSmiOrObject,38,39,0
-block_hint,ArrayIncludesSmiOrObject,107,108,0
-block_hint,ArrayIncludesSmiOrObject,28,29,1
-block_hint,ArrayIncludesSmiOrObject,84,85,1
-block_hint,ArrayIncludesSmiOrObject,86,87,1
-block_hint,ArrayIncludesSmiOrObject,117,118,0
-block_hint,ArrayIncludesSmiOrObject,131,132,1
-block_hint,ArrayIncludesSmiOrObject,125,126,0
-block_hint,ArrayIncludesSmiOrObject,98,99,0
-block_hint,ArrayIncludes,52,53,1
-block_hint,ArrayIncludes,49,50,0
-block_hint,ArrayIncludes,42,43,1
-block_hint,ArrayIncludes,44,45,1
-block_hint,ArrayIncludes,25,26,1
-block_hint,ArrayIncludes,17,18,1
-block_hint,ArrayIncludes,3,4,1
-block_hint,ArrayIncludes,47,48,1
-block_hint,ArrayIncludes,38,39,0
-block_hint,ArrayIncludes,27,28,1
-block_hint,ArrayIncludes,13,14,0
-block_hint,ArrayIncludes,19,20,1
-block_hint,ArrayIndexOfSmiOrObject,96,97,1
-block_hint,ArrayIndexOfSmiOrObject,88,89,0
-block_hint,ArrayIndexOfSmiOrObject,23,24,0
-block_hint,ArrayIndexOfSmiOrObject,37,38,0
-block_hint,ArrayIndexOfSmiOrObject,69,70,1
-block_hint,ArrayIndexOfSmiOrObject,43,44,0
-block_hint,ArrayIndexOfSmiOrObject,71,72,1
-block_hint,ArrayIndexOfSmiOrObject,110,111,1
-block_hint,ArrayIndexOfSmiOrObject,100,101,0
-block_hint,ArrayIndexOfSmiOrObject,77,78,0
-block_hint,ArrayIndexOfSmiOrObject,102,103,0
-block_hint,ArrayIndexOfSmiOrObject,79,80,0
-block_hint,ArrayIndexOfSmiOrObject,49,50,0
-block_hint,ArrayIndexOfSmiOrObject,29,30,0
-block_hint,ArrayIndexOfSmiOrObject,106,107,0
-block_hint,ArrayIndexOfSmiOrObject,83,84,0
-block_hint,ArrayIndexOfSmiOrObject,35,36,1
-block_hint,ArrayIndexOfSmiOrObject,94,95,1
-block_hint,ArrayIndexOfSmiOrObject,86,87,0
-block_hint,ArrayIndexOf,52,53,1
-block_hint,ArrayIndexOf,49,50,0
-block_hint,ArrayIndexOf,42,43,1
-block_hint,ArrayIndexOf,44,45,1
-block_hint,ArrayIndexOf,25,26,1
-block_hint,ArrayIndexOf,17,18,1
-block_hint,ArrayIndexOf,3,4,1
-block_hint,ArrayIndexOf,47,48,1
-block_hint,ArrayIndexOf,38,39,0
-block_hint,ArrayIndexOf,27,28,1
-block_hint,ArrayIndexOf,13,14,0
-block_hint,ArrayIndexOf,7,8,0
-block_hint,ArrayIndexOf,19,20,1
-block_hint,ArrayIndexOf,22,23,1
-block_hint,ArrayPrototypePop,49,50,1
-block_hint,ArrayPrototypePop,42,43,1
-block_hint,ArrayPrototypePop,47,48,1
-block_hint,ArrayPrototypePop,36,37,1
-block_hint,ArrayPrototypePop,26,27,1
-block_hint,ArrayPrototypePop,5,6,1
-block_hint,ArrayPrototypePop,45,46,1
-block_hint,ArrayPrototypePop,39,40,0
-block_hint,ArrayPrototypePop,20,21,1
-block_hint,ArrayPrototypePop,28,29,0
-block_hint,ArrayPrototypePop,7,8,1
-block_hint,ArrayPrototypePop,33,34,0
-block_hint,ArrayPrototypePop,16,17,0
-block_hint,ArrayPrototypePop,22,23,0
-block_hint,ArrayPrototypePop,30,31,1
-block_hint,ArrayPrototypePop,18,19,0
-block_hint,ArrayPrototypePop,14,15,1
-block_hint,ArrayPrototypePop,9,10,1
-block_hint,ArrayPrototypePop,11,12,0
-block_hint,ArrayPrototypePush,165,166,1
-block_hint,ArrayPrototypePush,148,149,1
-block_hint,ArrayPrototypePush,163,164,1
-block_hint,ArrayPrototypePush,137,138,1
-block_hint,ArrayPrototypePush,92,93,1
-block_hint,ArrayPrototypePush,17,18,1
-block_hint,ArrayPrototypePush,157,158,1
-block_hint,ArrayPrototypePush,141,142,0
-block_hint,ArrayPrototypePush,77,78,1
-block_hint,ArrayPrototypePush,79,80,1
-block_hint,ArrayPrototypePush,94,95,0
-block_hint,ArrayPrototypePush,19,20,1
-block_hint,ArrayPrototypePush,99,100,0
-block_hint,ArrayPrototypePush,104,105,0
-block_hint,ArrayPrototypePush,68,69,0
-block_hint,ArrayPrototypePush,113,114,1
-block_hint,ArrayPrototypePush,28,29,0
-block_hint,ArrayPrototypePush,30,31,0
-block_hint,ArrayPrototypePush,34,35,0
-block_hint,ArrayPrototypePush,36,37,0
-block_hint,ArrayPrototypePush,96,97,0
-block_hint,ArrayPrototypePush,21,22,1
-block_hint,ArrayPrototypePush,38,39,1
-block_hint,ArrayPrototypePush,127,128,1
-block_hint,ArrayPrototypePush,129,130,0
-block_hint,ArrayPrototypePush,159,160,0
-block_hint,ArrayPrototypePush,161,162,0
-block_hint,ArrayPrototypePush,108,109,0
-block_hint,ArrayPrototypePush,72,73,0
-block_hint,ArrayPrototypePush,74,75,1
-block_hint,ArrayPrototypePush,117,118,0
-block_hint,ArrayPrototypePush,40,41,0
-block_hint,ArrayPrototypePush,119,120,0
-block_hint,ArrayPrototypePush,48,49,0
-block_hint,ArrayPrototypePush,154,155,1
-block_hint,ArrayPrototypePush,25,26,1
-block_hint,ArrayPrototypePush,50,51,1
-block_hint,ArrayPrototypePush,106,107,0
-block_hint,ArrayPrototypePush,70,71,0
-block_hint,ArrayPrototypePush,123,124,1
-block_hint,ArrayPrototypePush,52,53,0
-block_hint,ArrayPrototypePush,58,59,0
-block_hint,ArrayPrototypePush,60,61,0
-block_hint,ArrayPrototypePush,23,24,1
-block_hint,CloneFastJSArray,10,11,0
-block_hint,CloneFastJSArray,37,38,1
-block_hint,CloneFastJSArray,34,35,1
-block_hint,CloneFastJSArray,19,20,1
-block_hint,CloneFastJSArray,12,13,0
-block_hint,CloneFastJSArray,14,15,1
-block_hint,CloneFastJSArray,42,43,1
-block_hint,CloneFastJSArray,40,41,0
-block_hint,CloneFastJSArray,25,26,1
-block_hint,CloneFastJSArray,4,5,1
-block_hint,CloneFastJSArray,17,18,1
-block_hint,CloneFastJSArrayFillingHoles,75,76,0
-block_hint,CloneFastJSArrayFillingHoles,77,78,0
-block_hint,CloneFastJSArrayFillingHoles,92,93,0
-block_hint,CloneFastJSArrayFillingHoles,46,47,0
-block_hint,CloneFastJSArrayFillingHoles,96,97,1
-block_hint,CloneFastJSArrayFillingHoles,82,83,1
-block_hint,CloneFastJSArrayFillingHoles,16,17,0
-block_hint,CloneFastJSArrayFillingHoles,20,21,0
-block_hint,CloneFastJSArrayFillingHoles,84,85,0
-block_hint,CloneFastJSArrayFillingHoles,113,114,0
-block_hint,CloneFastJSArrayFillingHoles,79,80,1
-block_hint,CloneFastJSArrayFillingHoles,10,11,1
-block_hint,CloneFastJSArrayFillingHoles,55,56,1
-block_hint,ExtractFastJSArray,4,5,1
-block_hint,ExtractFastJSArray,27,28,0
-block_hint,ExtractFastJSArray,10,11,0
-block_hint,ExtractFastJSArray,39,40,1
-block_hint,ExtractFastJSArray,35,36,1
-block_hint,ExtractFastJSArray,20,21,1
-block_hint,ExtractFastJSArray,12,13,0
-block_hint,ExtractFastJSArray,14,15,1
-block_hint,ExtractFastJSArray,37,38,1
-block_hint,ExtractFastJSArray,33,34,0
-block_hint,ExtractFastJSArray,16,17,1
-block_hint,ArrayPrototypeValues,14,15,1
-block_hint,ArrayPrototypeValues,11,12,1
-block_hint,ArrayPrototypeValues,8,9,1
-block_hint,ArrayPrototypeValues,3,4,1
-block_hint,ArrayPrototypeValues,6,7,1
-block_hint,ArrayIteratorPrototypeNext,138,139,1
-block_hint,ArrayIteratorPrototypeNext,88,89,1
-block_hint,ArrayIteratorPrototypeNext,90,91,1
-block_hint,ArrayIteratorPrototypeNext,236,237,0
-block_hint,ArrayIteratorPrototypeNext,243,244,0
-block_hint,ArrayIteratorPrototypeNext,219,220,0
-block_hint,ArrayIteratorPrototypeNext,199,200,0
-block_hint,ArrayIteratorPrototypeNext,160,161,0
-block_hint,ArrayIteratorPrototypeNext,118,119,1
-block_hint,ArrayIteratorPrototypeNext,255,256,0
-block_hint,ArrayIteratorPrototypeNext,238,239,0
-block_hint,ArrayIteratorPrototypeNext,155,156,0
-block_hint,ArrayIteratorPrototypeNext,109,110,1
-block_hint,ArrayIteratorPrototypeNext,6,7,1
-block_hint,ArrayIteratorPrototypeNext,8,9,1
-block_hint,ArrayIteratorPrototypeNext,140,141,0
-block_hint,ArrayIteratorPrototypeNext,122,123,1
-block_hint,ArrayIteratorPrototypeNext,60,61,1
-block_hint,ArrayIteratorPrototypeNext,80,81,1
-block_hint,AsyncFunctionEnter,41,42,1
-block_hint,AsyncFunctionEnter,28,29,0
-block_hint,AsyncFunctionEnter,13,14,0
-block_hint,AsyncFunctionEnter,33,34,1
-block_hint,AsyncFunctionEnter,26,27,1
-block_hint,AsyncFunctionEnter,9,10,0
-block_hint,AsyncFunctionEnter,3,4,1
-block_hint,AsyncFunctionEnter,36,37,1
-block_hint,AsyncFunctionEnter,22,23,0
-block_hint,AsyncFunctionEnter,5,6,1
-block_hint,AsyncFunctionEnter,24,25,1
-block_hint,AsyncFunctionEnter,7,8,0
-block_hint,AsyncFunctionResolve,2,3,0
-block_hint,AsyncFunctionAwaitCaught,24,25,1
-block_hint,AsyncFunctionAwaitCaught,19,20,1
-block_hint,AsyncFunctionAwaitCaught,2,3,1
-block_hint,AsyncFunctionAwaitCaught,30,31,1
-block_hint,AsyncFunctionAwaitCaught,32,33,0
-block_hint,AsyncFunctionAwaitCaught,28,29,1
-block_hint,AsyncFunctionAwaitCaught,8,9,1
-block_hint,AsyncFunctionAwaitCaught,10,11,1
-block_hint,AsyncFunctionAwaitCaught,12,13,1
-block_hint,AsyncFunctionAwaitCaught,14,15,1
-block_hint,AsyncFunctionAwaitCaught,22,23,0
-block_hint,AsyncFunctionAwaitUncaught,24,25,1
-block_hint,AsyncFunctionAwaitUncaught,19,20,1
-block_hint,AsyncFunctionAwaitUncaught,2,3,1
-block_hint,AsyncFunctionAwaitUncaught,30,31,1
-block_hint,AsyncFunctionAwaitUncaught,32,33,0
-block_hint,AsyncFunctionAwaitUncaught,28,29,1
-block_hint,AsyncFunctionAwaitUncaught,8,9,1
-block_hint,AsyncFunctionAwaitUncaught,10,11,1
-block_hint,AsyncFunctionAwaitUncaught,12,13,1
-block_hint,AsyncFunctionAwaitUncaught,14,15,1
-block_hint,AsyncFunctionAwaitUncaught,22,23,0
-block_hint,AsyncFunctionAwaitResolveClosure,8,9,1
-block_hint,AsyncFunctionAwaitResolveClosure,2,3,1
-block_hint,AsyncFunctionAwaitResolveClosure,6,7,0
-block_hint,DatePrototypeGetDate,10,11,1
-block_hint,DatePrototypeGetDate,7,8,1
-block_hint,DatePrototypeGetDate,5,6,1
-block_hint,DatePrototypeGetDate,2,3,1
-block_hint,DatePrototypeGetDay,10,11,1
-block_hint,DatePrototypeGetDay,7,8,1
-block_hint,DatePrototypeGetDay,5,6,1
-block_hint,DatePrototypeGetDay,2,3,1
-block_hint,DatePrototypeGetFullYear,10,11,1
-block_hint,DatePrototypeGetFullYear,7,8,1
-block_hint,DatePrototypeGetFullYear,5,6,1
-block_hint,DatePrototypeGetHours,10,11,1
-block_hint,DatePrototypeGetHours,7,8,1
-block_hint,DatePrototypeGetHours,5,6,1
-block_hint,DatePrototypeGetHours,2,3,1
-block_hint,DatePrototypeGetMinutes,10,11,1
-block_hint,DatePrototypeGetMinutes,7,8,1
-block_hint,DatePrototypeGetMinutes,5,6,1
-block_hint,DatePrototypeGetMinutes,2,3,1
-block_hint,DatePrototypeGetMonth,10,11,1
-block_hint,DatePrototypeGetMonth,7,8,1
-block_hint,DatePrototypeGetMonth,5,6,1
-block_hint,DatePrototypeGetMonth,2,3,1
-block_hint,DatePrototypeGetSeconds,10,11,1
-block_hint,DatePrototypeGetSeconds,7,8,1
-block_hint,DatePrototypeGetSeconds,5,6,1
-block_hint,DatePrototypeGetSeconds,2,3,1
-block_hint,DatePrototypeGetTime,8,9,1
-block_hint,DatePrototypeGetTime,5,6,1
-block_hint,DatePrototypeGetTime,2,3,1
-block_hint,CreateIterResultObject,4,5,1
-block_hint,CreateIterResultObject,11,12,1
-block_hint,CreateIterResultObject,6,7,0
-block_hint,CreateGeneratorObject,32,33,1
-block_hint,CreateGeneratorObject,34,35,1
-block_hint,CreateGeneratorObject,57,58,1
-block_hint,CreateGeneratorObject,54,55,0
-block_hint,CreateGeneratorObject,43,44,1
-block_hint,CreateGeneratorObject,24,25,0
-block_hint,CreateGeneratorObject,47,48,1
-block_hint,CreateGeneratorObject,40,41,1
-block_hint,CreateGeneratorObject,8,9,0
-block_hint,CreateGeneratorObject,51,52,1
-block_hint,CreateGeneratorObject,37,38,0
-block_hint,CreateGeneratorObject,12,13,0
-block_hint,GeneratorPrototypeNext,19,20,1
-block_hint,GeneratorPrototypeNext,11,12,1
-block_hint,GeneratorPrototypeNext,13,14,1
-block_hint,GeneratorPrototypeNext,5,6,0
-block_hint,GeneratorPrototypeNext,7,8,0
-block_hint,SuspendGeneratorBaseline,19,20,1
-block_hint,SuspendGeneratorBaseline,5,6,1
-block_hint,SuspendGeneratorBaseline,11,12,1
-block_hint,SuspendGeneratorBaseline,7,8,1
-block_hint,SuspendGeneratorBaseline,13,14,0
-block_hint,ResumeGeneratorBaseline,11,12,1
-block_hint,ResumeGeneratorBaseline,4,5,1
-block_hint,ResumeGeneratorBaseline,6,7,0
-block_hint,GlobalIsFinite,9,10,1
-block_hint,GlobalIsNaN,9,10,1
-block_hint,GlobalIsNaN,11,12,1
-block_hint,LoadIC,370,371,1
-block_hint,LoadIC,139,140,0
-block_hint,LoadIC,59,60,0
-block_hint,LoadIC,233,234,0
-block_hint,LoadIC,345,346,1
-block_hint,LoadIC,235,236,0
-block_hint,LoadIC,387,388,1
-block_hint,LoadIC,384,385,0
-block_hint,LoadIC,381,382,1
-block_hint,LoadIC,292,293,1
-block_hint,LoadIC,100,101,1
-block_hint,LoadIC,278,279,0
-block_hint,LoadIC,319,320,0
-block_hint,LoadIC,141,142,1
-block_hint,LoadIC,143,144,0
-block_hint,LoadIC,308,309,1
-block_hint,LoadIC,358,359,1
-block_hint,LoadIC,102,103,0
-block_hint,LoadIC,19,20,1
-block_hint,LoadIC,62,63,0
-block_hint,LoadIC,21,22,1
-block_hint,LoadIC,173,174,0
-block_hint,LoadIC,364,365,0
-block_hint,LoadIC,366,367,0
-block_hint,LoadIC,317,318,0
-block_hint,LoadIC,129,130,0
-block_hint,LoadIC,49,50,1
-block_hint,LoadIC,209,210,0
-block_hint,LoadIC,84,85,0
-block_hint,LoadIC,44,45,0
-block_hint,LoadIC,360,361,1
-block_hint,LoadIC,114,115,0
-block_hint,LoadIC,183,184,0
-block_hint,LoadIC,42,43,1
-block_hint,LoadIC,76,77,0
-block_hint,LoadIC,268,269,0
-block_hint,LoadIC,310,311,1
-block_hint,LoadIC,25,26,0
-block_hint,LoadIC,179,180,1
-block_hint,LoadIC,181,182,1
-block_hint,LoadIC,175,176,1
-block_hint,LoadIC,177,178,1
-block_hint,LoadIC,133,134,1
-block_hint,LoadIC,135,136,0
-block_hint,LoadIC_Megamorphic,355,356,1
-block_hint,LoadIC_Megamorphic,352,353,0
-block_hint,LoadIC_Megamorphic,349,350,1
-block_hint,LoadIC_Megamorphic,257,258,1
-block_hint,LoadIC_Megamorphic,259,260,1
-block_hint,LoadIC_Megamorphic,255,256,0
-block_hint,LoadIC_Megamorphic,56,57,0
-block_hint,LoadIC_Megamorphic,294,295,0
-block_hint,LoadIC_Megamorphic,130,131,1
-block_hint,LoadIC_Megamorphic,280,281,1
-block_hint,LoadIC_Megamorphic,132,133,0
-block_hint,LoadIC_Megamorphic,282,283,1
-block_hint,LoadIC_Megamorphic,328,329,1
-block_hint,LoadIC_Megamorphic,95,96,0
-block_hint,LoadIC_Megamorphic,97,98,0
-block_hint,LoadIC_Megamorphic,20,21,1
-block_hint,LoadIC_Megamorphic,162,163,0
-block_hint,LoadIC_Megamorphic,287,288,0
-block_hint,LoadIC_Megamorphic,249,250,1
-block_hint,LoadIC_Megamorphic,334,335,0
-block_hint,LoadIC_Megamorphic,336,337,0
-block_hint,LoadIC_Megamorphic,291,292,0
-block_hint,LoadIC_Megamorphic,122,123,0
-block_hint,LoadIC_Megamorphic,48,49,1
-block_hint,LoadIC_Megamorphic,43,44,0
-block_hint,LoadIC_Megamorphic,245,246,0
-block_hint,LoadIC_Megamorphic,284,285,1
-block_hint,LoadIC_Megamorphic,24,25,0
-block_hint,LoadIC_Megamorphic,22,23,0
-block_hint,LoadIC_Megamorphic,164,165,1
-block_hint,LoadIC_Megamorphic,166,167,1
-block_hint,LoadIC_Megamorphic,126,127,1
-block_hint,LoadIC_Noninlined,366,367,1
-block_hint,LoadIC_Noninlined,132,133,0
-block_hint,LoadIC_Noninlined,372,373,1
-block_hint,LoadIC_Noninlined,369,370,0
-block_hint,LoadIC_Noninlined,364,365,1
-block_hint,LoadIC_Noninlined,267,268,0
-block_hint,LoadIC_Noninlined,56,57,0
-block_hint,LoadIC_Noninlined,308,309,0
-block_hint,LoadIC_Noninlined,142,143,1
-block_hint,LoadIC_Noninlined,292,293,1
-block_hint,LoadIC_Noninlined,20,21,1
-block_hint,LoadIC_Noninlined,174,175,0
-block_hint,LoadIC_Noninlined,37,38,1
-block_hint,LoadIC_Noninlined,257,258,0
-block_hint,LoadIC_Noninlined,296,297,1
-block_hint,LoadIC_Noninlined,24,25,0
-block_hint,LoadIC_Noninlined,22,23,0
-block_hint,LoadICTrampoline,3,4,1
-block_hint,LoadICTrampoline_Megamorphic,3,4,1
-block_hint,LoadSuperIC,528,529,0
-block_hint,LoadSuperIC,253,254,0
-block_hint,LoadSuperIC,564,565,1
-block_hint,LoadSuperIC,440,441,0
-block_hint,LoadSuperIC,75,76,0
-block_hint,LoadSuperIC,540,541,0
-block_hint,LoadSuperIC,255,256,1
-block_hint,LoadSuperIC,515,516,1
-block_hint,LoadSuperIC,41,42,1
-block_hint,LoadSuperIC,550,551,0
-block_hint,LoadSuperIC,287,288,0
-block_hint,LoadSuperIC,60,61,1
-block_hint,LoadSuperIC,429,430,0
-block_hint,LoadSuperIC,427,428,0
-block_hint,LoadSuperIC,519,520,1
-block_hint,LoadSuperIC,45,46,0
-block_hint,LoadSuperIC,671,672,0
-block_hint,KeyedLoadIC,629,630,1
-block_hint,KeyedLoadIC,257,258,0
-block_hint,KeyedLoadIC,249,250,0
-block_hint,KeyedLoadIC,385,386,0
-block_hint,KeyedLoadIC,494,495,1
-block_hint,KeyedLoadIC,671,672,0
-block_hint,KeyedLoadIC,623,624,0
-block_hint,KeyedLoadIC,574,575,1
-block_hint,KeyedLoadIC,391,392,1
-block_hint,KeyedLoadIC,389,390,1
-block_hint,KeyedLoadIC,657,658,0
-block_hint,KeyedLoadIC,659,660,0
-block_hint,KeyedLoadIC,627,628,0
-block_hint,KeyedLoadIC,576,577,1
-block_hint,KeyedLoadIC,151,152,1
-block_hint,KeyedLoadIC,621,622,0
-block_hint,KeyedLoadIC,475,476,0
-block_hint,KeyedLoadIC,101,102,1
-block_hint,KeyedLoadIC,667,668,0
-block_hint,KeyedLoadIC,669,670,0
-block_hint,KeyedLoadIC,635,636,1
-block_hint,KeyedLoadIC,637,638,1
-block_hint,KeyedLoadIC,293,294,1
-block_hint,KeyedLoadIC,295,296,0
-block_hint,KeyedLoadIC,663,664,1
-block_hint,KeyedLoadIC,517,518,1
-block_hint,KeyedLoadIC,619,620,0
-block_hint,KeyedLoadIC,606,607,0
-block_hint,KeyedLoadIC,554,555,1
-block_hint,KeyedLoadIC,315,316,1
-block_hint,KeyedLoadIC,63,64,0
-block_hint,KeyedLoadIC,305,306,0
-block_hint,KeyedLoadIC,521,522,1
-block_hint,KeyedLoadIC,307,308,1
-block_hint,KeyedLoadIC,221,222,0
-block_hint,KeyedLoadIC,177,178,1
-block_hint,KeyedLoadIC,556,557,0
-block_hint,KeyedLoadIC,450,451,1
-block_hint,KeyedLoadIC,113,114,0
-block_hint,KeyedLoadIC,115,116,0
-block_hint,KeyedLoadIC,406,407,1
-block_hint,KeyedLoadIC,609,610,1
-block_hint,KeyedLoadIC,245,246,1
-block_hint,KeyedLoadIC,558,559,0
-block_hint,KeyedLoadIC,508,509,0
-block_hint,KeyedLoadIC,436,437,1
-block_hint,KeyedLoadIC,673,674,0
-block_hint,KeyedLoadIC,119,120,1
-block_hint,KeyedLoadIC,323,324,1
-block_hint,KeyedLoadIC,325,326,1
-block_hint,KeyedLoadIC,65,66,0
-block_hint,KeyedLoadIC_Megamorphic,496,497,1
-block_hint,KeyedLoadIC_Megamorphic,498,499,0
-block_hint,KeyedLoadIC_Megamorphic,1218,1219,0
-block_hint,KeyedLoadIC_Megamorphic,1220,1221,1
-block_hint,KeyedLoadIC_Megamorphic,1192,1193,1
-block_hint,KeyedLoadIC_Megamorphic,1145,1146,0
-block_hint,KeyedLoadIC_Megamorphic,1212,1213,1
-block_hint,KeyedLoadIC_Megamorphic,1222,1223,1
-block_hint,KeyedLoadIC_Megamorphic,1194,1195,1
-block_hint,KeyedLoadIC_Megamorphic,1214,1215,0
-block_hint,KeyedLoadIC_Megamorphic,1111,1112,0
-block_hint,KeyedLoadIC_Megamorphic,927,928,1
-block_hint,KeyedLoadIC_Megamorphic,925,926,1
-block_hint,KeyedLoadIC_Megamorphic,528,529,1
-block_hint,KeyedLoadIC_Megamorphic,1186,1187,0
-block_hint,KeyedLoadIC_Megamorphic,1188,1189,0
-block_hint,KeyedLoadIC_Megamorphic,1155,1156,0
-block_hint,KeyedLoadIC_Megamorphic,1153,1154,1
-block_hint,KeyedLoadIC_Megamorphic,1182,1183,0
-block_hint,KeyedLoadIC_Megamorphic,1149,1150,0
-block_hint,KeyedLoadIC_Megamorphic,929,930,1
-block_hint,KeyedLoadIC_Megamorphic,939,940,0
-block_hint,KeyedLoadIC_Megamorphic,640,641,0
-block_hint,KeyedLoadIC_Megamorphic,1172,1173,0
-block_hint,KeyedLoadIC_Megamorphic,1085,1086,0
-block_hint,KeyedLoadIC_Megamorphic,1131,1132,0
-block_hint,KeyedLoadIC_Megamorphic,233,234,1
-block_hint,KeyedLoadIC_Megamorphic,1087,1088,0
-block_hint,KeyedLoadIC_Megamorphic,235,236,0
-block_hint,KeyedLoadIC_Megamorphic,1070,1071,0
-block_hint,KeyedLoadIC_Megamorphic,1204,1205,1
-block_hint,KeyedLoadIC_Megamorphic,1068,1069,0
-block_hint,KeyedLoadIC_Megamorphic,1066,1067,0
-block_hint,KeyedLoadIC_Megamorphic,980,981,1
-block_hint,KeyedLoadIC_Megamorphic,239,240,0
-block_hint,KeyedLoadIC_Megamorphic,121,122,1
-block_hint,KeyedLoadIC_Megamorphic,195,196,0
-block_hint,KeyedLoadIC_Megamorphic,644,645,0
-block_hint,KeyedLoadIC_Megamorphic,558,559,0
-block_hint,KeyedLoadIC_Megamorphic,1043,1044,0
-block_hint,KeyedLoadIC_Megamorphic,983,984,0
-block_hint,KeyedLoadIC_Megamorphic,648,649,1
-block_hint,KeyedLoadIC_Megamorphic,666,667,0
-block_hint,KeyedLoadIC_Megamorphic,1174,1175,0
-block_hint,KeyedLoadIC_Megamorphic,650,651,0
-block_hint,KeyedLoadIC_Megamorphic,1089,1090,0
-block_hint,KeyedLoadIC_Megamorphic,652,653,1
-block_hint,KeyedLoadIC_Megamorphic,250,251,1
-block_hint,KeyedLoadIC_Megamorphic,654,655,0
-block_hint,KeyedLoadIC_Megamorphic,252,253,0
-block_hint,KeyedLoadIC_Megamorphic,842,843,0
-block_hint,KeyedLoadIC_Megamorphic,987,988,1
-block_hint,KeyedLoadIC_Megamorphic,256,257,0
-block_hint,KeyedLoadIC_Megamorphic,656,657,0
-block_hint,KeyedLoadIC_Megamorphic,1076,1077,0
-block_hint,KeyedLoadIC_Megamorphic,1169,1170,0
-block_hint,KeyedLoadIC_Megamorphic,1206,1207,1
-block_hint,KeyedLoadIC_Megamorphic,1074,1075,0
-block_hint,KeyedLoadIC_Megamorphic,123,124,1
-block_hint,KeyedLoadIC_Megamorphic,203,204,0
-block_hint,KeyedLoadIC_Megamorphic,923,924,0
-block_hint,KeyedLoadIC_Megamorphic,675,676,0
-block_hint,KeyedLoadIC_Megamorphic,1176,1177,0
-block_hint,KeyedLoadIC_Megamorphic,1208,1209,0
-block_hint,KeyedLoadIC_Megamorphic,1135,1136,0
-block_hint,KeyedLoadIC_Megamorphic,844,845,1
-block_hint,KeyedLoadIC_Megamorphic,268,269,1
-block_hint,KeyedLoadIC_Megamorphic,1200,1201,0
-block_hint,KeyedLoadIC_Megamorphic,270,271,0
-block_hint,KeyedLoadIC_Megamorphic,1056,1057,0
-block_hint,KeyedLoadIC_Megamorphic,1198,1199,1
-block_hint,KeyedLoadIC_Megamorphic,1054,1055,0
-block_hint,KeyedLoadIC_Megamorphic,1116,1117,1
-block_hint,KeyedLoadIC_Megamorphic,1107,1108,0
-block_hint,KeyedLoadIC_Megamorphic,1210,1211,0
-block_hint,KeyedLoadIC_Megamorphic,1101,1102,1
-block_hint,KeyedLoadIC_Megamorphic,740,741,1
-block_hint,KeyedLoadIC_Megamorphic,1017,1018,1
-block_hint,KeyedLoadIC_Megamorphic,736,737,0
-block_hint,KeyedLoadIC_Megamorphic,112,113,0
-block_hint,KeyedLoadIC_Megamorphic,877,878,0
-block_hint,KeyedLoadIC_Megamorphic,338,339,1
-block_hint,KeyedLoadIC_Megamorphic,863,864,1
-block_hint,KeyedLoadIC_Megamorphic,76,77,1
-block_hint,KeyedLoadIC_Megamorphic,368,369,0
-block_hint,KeyedLoadIC_Megamorphic,728,729,0
-block_hint,KeyedLoadIC_Megamorphic,93,94,1
-block_hint,KeyedLoadIC_Megamorphic,998,999,1
-block_hint,KeyedLoadIC_Megamorphic,294,295,0
-block_hint,KeyedLoadIC_Megamorphic,115,116,1
-block_hint,KeyedLoadIC_Megamorphic,179,180,0
-block_hint,KeyedLoadIC_Megamorphic,960,961,0
-block_hint,KeyedLoadIC_Megamorphic,817,818,1
-block_hint,KeyedLoadIC_Megamorphic,183,184,1
-block_hint,KeyedLoadIC_Megamorphic,681,682,0
-block_hint,KeyedLoadIC_Megamorphic,524,525,0
-block_hint,KeyedLoadIC_Megamorphic,1031,1032,0
-block_hint,KeyedLoadIC_Megamorphic,1001,1002,0
-block_hint,KeyedLoadIC_Megamorphic,685,686,1
-block_hint,KeyedLoadIC_Megamorphic,856,857,1
-block_hint,KeyedLoadIC_Megamorphic,1178,1179,0
-block_hint,KeyedLoadIC_Megamorphic,307,308,0
-block_hint,KeyedLoadIC_Megamorphic,858,859,0
-block_hint,KeyedLoadIC_Megamorphic,1062,1063,0
-block_hint,KeyedLoadIC_Megamorphic,187,188,0
-block_hint,KeyedLoadIC_Megamorphic,947,948,0
-block_hint,KeyedLoadIC_Megamorphic,1142,1143,0
-block_hint,KeyedLoadIC_Megamorphic,905,906,1
-block_hint,KeyedLoadIC_Megamorphic,127,128,0
-block_hint,KeyedLoadIC_Megamorphic,718,719,0
-block_hint,KeyedLoadIC_Megamorphic,1026,1027,0
-block_hint,KeyedLoadIC_Megamorphic,562,563,1
-block_hint,KeyedLoadIC_Megamorphic,321,322,0
-block_hint,KeyedLoadIC_Megamorphic,714,715,0
-block_hint,KeyedLoadIC_Megamorphic,564,565,0
-block_hint,KeyedLoadIC_Megamorphic,129,130,1
-block_hint,KeyedLoadIC_Megamorphic,572,573,0
-block_hint,KeyedLoadIC_Megamorphic,909,910,1
-block_hint,KeyedLoadIC_Megamorphic,492,493,0
-block_hint,KeyedLoadIC_Megamorphic,945,946,0
-block_hint,KeyedLoadIC_Megamorphic,722,723,1
-block_hint,KeyedLoadIC_Megamorphic,580,581,0
-block_hint,KeyedLoadIC_Megamorphic,213,214,0
-block_hint,KeyedLoadIC_Megamorphic,490,491,1
-block_hint,KeyedLoadIC_Megamorphic,582,583,1
-block_hint,KeyedLoadIC_Megamorphic,145,146,1
-block_hint,KeyedLoadICTrampoline,3,4,1
-block_hint,KeyedLoadICTrampoline_Megamorphic,3,4,1
-block_hint,StoreGlobalIC,72,73,0
-block_hint,StoreGlobalIC,229,230,1
-block_hint,StoreGlobalIC,268,269,0
-block_hint,StoreGlobalIC,144,145,0
-block_hint,StoreGlobalIC,205,206,0
-block_hint,StoreGlobalIC,92,93,0
-block_hint,StoreGlobalIC,146,147,1
-block_hint,StoreGlobalIC,94,95,1
-block_hint,StoreGlobalIC,15,16,1
-block_hint,StoreGlobalICTrampoline,3,4,1
-block_hint,StoreIC,338,339,1
-block_hint,StoreIC,144,145,0
-block_hint,StoreIC,69,70,0
-block_hint,StoreIC,208,209,0
-block_hint,StoreIC,210,211,1
-block_hint,StoreIC,395,396,1
-block_hint,StoreIC,386,387,0
-block_hint,StoreIC,240,241,1
-block_hint,StoreIC,242,243,1
-block_hint,StoreIC,74,75,1
-block_hint,StoreIC,250,251,1
-block_hint,StoreIC,108,109,0
-block_hint,StoreIC,35,36,0
-block_hint,StoreIC,316,317,1
-block_hint,StoreIC,92,93,0
-block_hint,StoreIC,146,147,0
-block_hint,StoreIC,150,151,0
-block_hint,StoreIC,16,17,1
-block_hint,StoreIC,96,97,0
-block_hint,StoreIC,18,19,0
-block_hint,StoreIC,359,360,0
-block_hint,StoreIC,160,161,1
-block_hint,StoreIC,162,163,1
-block_hint,StoreIC,327,328,1
-block_hint,StoreIC,164,165,0
-block_hint,StoreIC,105,106,0
-block_hint,StoreIC,103,104,1
-block_hint,StoreIC,320,321,1
-block_hint,StoreIC,23,24,0
-block_hint,StoreIC,152,153,1
-block_hint,StoreIC,287,288,0
-block_hint,StoreIC,154,155,0
-block_hint,StoreIC,156,157,1
-block_hint,StoreIC,323,324,1
-block_hint,StoreIC,25,26,1
-block_hint,StoreIC,158,159,0
-block_hint,StoreIC,325,326,1
-block_hint,StoreIC,31,32,0
-block_hint,StoreIC,29,30,1
-block_hint,StoreIC,227,228,1
-block_hint,StoreIC,63,64,0
-block_hint,StoreIC,291,292,0
-block_hint,StoreIC,166,167,1
-block_hint,StoreIC,293,294,1
-block_hint,StoreIC,312,313,1
-block_hint,StoreIC,76,77,0
-block_hint,StoreIC,246,247,0
-block_hint,StoreIC,176,177,0
-block_hint,StoreIC,43,44,1
-block_hint,StoreIC,112,113,0
-block_hint,StoreIC,178,179,0
-block_hint,StoreIC,271,272,0
-block_hint,StoreIC,125,126,1
-block_hint,StoreIC,371,372,0
-block_hint,StoreIC,267,268,1
-block_hint,StoreIC,45,46,1
-block_hint,StoreIC,47,48,1
-block_hint,StoreIC,121,122,0
-block_hint,StoreIC,49,50,1
-block_hint,StoreIC,123,124,0
-block_hint,StoreIC,51,52,1
-block_hint,StoreIC,80,81,0
-block_hint,StoreIC,53,54,1
-block_hint,StoreIC,55,56,1
-block_hint,StoreIC,333,334,0
-block_hint,StoreIC,57,58,1
-block_hint,StoreIC,184,185,0
-block_hint,StoreIC,186,187,0
-block_hint,StoreIC,229,230,0
-block_hint,StoreIC,133,134,0
-block_hint,StoreIC,299,300,0
-block_hint,StoreIC,190,191,1
-block_hint,StoreIC,192,193,0
-block_hint,StoreIC,281,282,0
-block_hint,StoreIC,365,366,0
-block_hint,StoreIC,301,302,1
-block_hint,StoreIC,194,195,1
-block_hint,StoreIC,200,201,1
-block_hint,StoreIC,202,203,0
-block_hint,StoreIC,204,205,0
-block_hint,StoreIC,206,207,1
-block_hint,StoreIC,198,199,1
-block_hint,StoreIC,196,197,0
-block_hint,StoreIC,384,385,0
-block_hint,StoreIC,388,389,1
-block_hint,StoreIC,357,358,1
-block_hint,StoreIC,314,315,1
-block_hint,StoreIC,84,85,0
-block_hint,StoreIC,139,140,0
-block_hint,StoreIC,231,232,1
-block_hint,StoreICTrampoline,3,4,1
-block_hint,DefineNamedOwnIC,329,330,1
-block_hint,DefineNamedOwnIC,145,146,0
-block_hint,DefineNamedOwnIC,300,301,1
-block_hint,DefineNamedOwnIC,203,204,0
-block_hint,DefineNamedOwnIC,69,70,0
-block_hint,DefineNamedOwnIC,205,206,0
-block_hint,DefineNamedOwnIC,326,327,0
-block_hint,DefineNamedOwnIC,243,244,1
-block_hint,DefineNamedOwnIC,93,94,0
-block_hint,DefineNamedOwnIC,17,18,0
-block_hint,DefineNamedOwnIC,350,351,0
-block_hint,DefineNamedOwnIC,157,158,1
-block_hint,DefineNamedOwnIC,159,160,1
-block_hint,DefineNamedOwnIC,254,255,1
-block_hint,DefineNamedOwnIC,32,33,0
-block_hint,DefineNamedOwnIC,246,247,1
-block_hint,DefineNamedOwnIC,22,23,0
-block_hint,DefineNamedOwnIC,149,150,1
-block_hint,DefineNamedOwnIC,352,353,0
-block_hint,DefineNamedOwnIC,280,281,0
-block_hint,DefineNamedOwnIC,151,152,0
-block_hint,DefineNamedOwnIC,153,154,1
-block_hint,DefineNamedOwnIC,248,249,1
-block_hint,DefineNamedOwnIC,26,27,0
-block_hint,DefineNamedOwnIC,155,156,0
-block_hint,DefineNamedOwnIC,250,251,1
-block_hint,DefineNamedOwnIC,30,31,0
-block_hint,KeyedStoreIC,401,402,1
-block_hint,KeyedStoreIC,173,174,0
-block_hint,KeyedStoreIC,169,170,0
-block_hint,KeyedStoreIC,239,240,0
-block_hint,KeyedStoreIC,171,172,1
-block_hint,KeyedStoreIC,83,84,1
-block_hint,KeyedStoreIC,87,88,1
-block_hint,KeyedStoreIC,398,399,1
-block_hint,KeyedStoreIC,109,110,0
-block_hint,KeyedStoreIC,22,23,0
-block_hint,KeyedStoreIC,432,433,0
-block_hint,KeyedStoreIC,181,182,1
-block_hint,KeyedStoreIC,434,435,0
-block_hint,KeyedStoreIC,351,352,0
-block_hint,KeyedStoreIC,298,299,1
-block_hint,KeyedStoreIC,31,32,0
-block_hint,KeyedStoreIC,272,273,0
-block_hint,KeyedStoreIC,355,356,0
-block_hint,KeyedStoreIC,195,196,1
-block_hint,KeyedStoreIC,260,261,1
-block_hint,KeyedStoreIC,436,437,1
-block_hint,KeyedStoreIC,329,330,0
-block_hint,KeyedStoreIC,137,138,1
-block_hint,KeyedStoreIC,45,46,1
-block_hint,KeyedStoreIC,197,198,0
-block_hint,KeyedStoreIC,47,48,0
-block_hint,KeyedStoreIC,215,216,0
-block_hint,KeyedStoreIC,361,362,1
-block_hint,KeyedStoreIC,363,364,0
-block_hint,KeyedStoreIC,221,222,1
-block_hint,KeyedStoreIC,223,224,0
-block_hint,KeyedStoreIC,345,346,0
-block_hint,KeyedStoreIC,367,368,0
-block_hint,KeyedStoreIC,438,439,0
-block_hint,KeyedStoreIC,365,366,1
-block_hint,KeyedStoreIC,231,232,1
-block_hint,KeyedStoreIC,233,234,0
-block_hint,KeyedStoreIC,235,236,0
-block_hint,KeyedStoreIC,237,238,1
-block_hint,KeyedStoreIC,453,454,0
-block_hint,KeyedStoreIC,430,431,1
-block_hint,KeyedStoreIC,278,279,0
-block_hint,KeyedStoreIC,377,378,1
-block_hint,KeyedStoreIC,97,98,0
-block_hint,KeyedStoreIC,164,165,0
-block_hint,KeyedStoreICTrampoline,3,4,1
-block_hint,DefineKeyedOwnIC,392,393,1
-block_hint,DefineKeyedOwnIC,174,175,0
-block_hint,DefineKeyedOwnIC,170,171,1
-block_hint,StoreInArrayLiteralIC,30,31,1
-block_hint,StoreInArrayLiteralIC,19,20,0
-block_hint,StoreInArrayLiteralIC,23,24,0
-block_hint,StoreInArrayLiteralIC,14,15,1
-block_hint,StoreInArrayLiteralIC,16,17,1
-block_hint,StoreInArrayLiteralIC,8,9,1
-block_hint,StoreInArrayLiteralIC,4,5,1
-block_hint,LoadGlobalIC,60,61,0
-block_hint,LoadGlobalIC,14,15,1
-block_hint,LoadGlobalIC,16,17,1
-block_hint,LoadGlobalIC,18,19,1
-block_hint,LoadGlobalIC,191,192,0
-block_hint,LoadGlobalIC,12,13,0
-block_hint,LoadGlobalIC,111,112,1
-block_hint,LoadGlobalICInsideTypeof,60,61,0
-block_hint,LoadGlobalICInsideTypeof,193,194,1
-block_hint,LoadGlobalICInsideTypeof,12,13,0
-block_hint,LoadGlobalICInsideTypeof,111,112,0
-block_hint,LoadGlobalICInsideTypeof,20,21,1
-block_hint,LoadGlobalICInsideTypeof,22,23,1
-block_hint,LoadGlobalICInsideTypeof,254,255,1
-block_hint,LoadGlobalICInsideTypeof,208,209,0
-block_hint,LoadGlobalICInsideTypeof,58,59,0
-block_hint,LoadGlobalICInsideTypeof,220,221,0
-block_hint,LoadGlobalICInsideTypeof,113,114,1
-block_hint,LoadGlobalICInsideTypeof,24,25,1
-block_hint,LoadGlobalICInsideTypeof,229,230,1
-block_hint,LoadGlobalICInsideTypeof,199,200,0
-block_hint,LoadGlobalICInsideTypeof,43,44,0
-block_hint,LoadGlobalICInsideTypeof,41,42,1
-block_hint,LoadGlobalICTrampoline,3,4,1
-block_hint,LoadGlobalICInsideTypeofTrampoline,3,4,1
-block_hint,LookupGlobalICBaseline,3,4,1
-block_hint,LookupGlobalICBaseline,14,15,0
-block_hint,LookupGlobalICBaseline,5,6,1
-block_hint,LookupGlobalICBaseline,11,12,1
-block_hint,LookupGlobalICBaseline,7,8,1
-block_hint,LookupGlobalICBaseline,9,10,0
-block_hint,KeyedHasIC,261,262,1
-block_hint,KeyedHasIC,125,126,0
-block_hint,KeyedHasIC,117,118,0
-block_hint,KeyedHasIC,239,240,0
-block_hint,KeyedHasIC,165,166,0
-block_hint,KeyedHasIC,77,78,0
-block_hint,KeyedHasIC,119,120,1
-block_hint,KeyedHasIC,167,168,0
-block_hint,KeyedHasIC,123,124,1
-block_hint,KeyedHasIC,79,80,1
-block_hint,KeyedHasIC,197,198,0
-block_hint,KeyedHasIC,221,222,0
-block_hint,KeyedHasIC,283,284,0
-block_hint,KeyedHasIC,281,282,0
-block_hint,KeyedHasIC,161,162,1
-block_hint,KeyedHasIC,61,62,0
-block_hint,KeyedHasIC_Megamorphic,137,138,1
-block_hint,KeyedHasIC_Megamorphic,139,140,1
-block_hint,KeyedHasIC_Megamorphic,263,264,0
-block_hint,KeyedHasIC_Megamorphic,211,212,1
-block_hint,KeyedHasIC_Megamorphic,254,255,0
-block_hint,KeyedHasIC_Megamorphic,97,98,0
-block_hint,KeyedHasIC_Megamorphic,234,235,1
-block_hint,KeyedHasIC_Megamorphic,123,124,1
-block_hint,KeyedHasIC_Megamorphic,141,142,1
-block_hint,KeyedHasIC_Megamorphic,199,200,0
-block_hint,KeyedHasIC_Megamorphic,201,202,0
-block_hint,KeyedHasIC_Megamorphic,101,102,0
-block_hint,KeyedHasIC_Megamorphic,99,100,0
-block_hint,KeyedHasIC_Megamorphic,250,251,0
-block_hint,KeyedHasIC_Megamorphic,270,271,0
-block_hint,KeyedHasIC_Megamorphic,106,107,0
-block_hint,KeyedHasIC_Megamorphic,277,278,0
-block_hint,KeyedHasIC_Megamorphic,282,283,0
-block_hint,KeyedHasIC_Megamorphic,268,269,0
-block_hint,KeyedHasIC_Megamorphic,203,204,0
-block_hint,KeyedHasIC_Megamorphic,44,45,0
-block_hint,KeyedHasIC_Megamorphic,63,64,0
-block_hint,KeyedHasIC_Megamorphic,239,240,1
-block_hint,KeyedHasIC_Megamorphic,48,49,0
-block_hint,KeyedHasIC_Megamorphic,272,273,0
-block_hint,KeyedHasIC_Megamorphic,228,229,0
-block_hint,KeyedHasIC_Megamorphic,87,88,0
-block_hint,KeyedHasIC_Megamorphic,155,156,0
-block_hint,KeyedHasIC_Megamorphic,196,197,0
-block_hint,KeyedHasIC_Megamorphic,59,60,0
-block_hint,KeyedHasIC_Megamorphic,222,223,0
-block_hint,KeyedHasIC_Megamorphic,57,58,1
-block_hint,IterableToList,42,43,1
-block_hint,IterableToList,44,45,1
-block_hint,IterableToList,46,47,1
-block_hint,IterableToList,36,37,1
-block_hint,IterableToList,48,49,1
-block_hint,IterableToList,50,51,1
-block_hint,IterableToList,98,99,1
-block_hint,IterableToList,107,108,0
-block_hint,IterableToList,109,110,0
-block_hint,IterableToList,100,101,0
-block_hint,IterableToList,74,75,0
-block_hint,IterableToList,58,59,0
-block_hint,IterableToList,96,97,0
-block_hint,IterableToList,52,53,0
-block_hint,IterableToList,93,94,1
-block_hint,IterableToList,82,83,1
-block_hint,IterableToList,17,18,0
-block_hint,IterableToList,61,62,1
-block_hint,IterableToList,14,15,1
-block_hint,IterableToList,90,91,0
-block_hint,IterableToList,103,104,0
-block_hint,IterableToList,88,89,0
-block_hint,IterableToList,32,33,0
-block_hint,IterableToList,113,114,1
-block_hint,IterableToList,111,112,1
-block_hint,IterableToList,63,64,1
-block_hint,IterableToList,34,35,1
-block_hint,IterableToListWithSymbolLookup,39,40,0
-block_hint,IterableToListWithSymbolLookup,96,97,1
-block_hint,IterableToListWithSymbolLookup,94,95,0
-block_hint,IterableToListWithSymbolLookup,82,83,1
-block_hint,IterableToListWithSymbolLookup,55,56,1
-block_hint,IterableToListWithSymbolLookup,25,26,1
-block_hint,IterableToListWithSymbolLookup,2,3,1
-block_hint,IterableToListWithSymbolLookup,99,100,1
-block_hint,IterableToListWithSymbolLookup,92,93,0
-block_hint,IterableToListWithSymbolLookup,71,72,1
-block_hint,IterableToListWithSymbolLookup,78,79,0
-block_hint,IterableToListWithSymbolLookup,84,85,1
-block_hint,IterableToListWithSymbolLookup,57,58,1
-block_hint,IterableToListWithSymbolLookup,27,28,1
-block_hint,IterableToListWithSymbolLookup,4,5,1
-block_hint,IterableToListWithSymbolLookup,80,81,1
-block_hint,IterableToListWithSymbolLookup,62,63,0
-block_hint,IterableToListWithSymbolLookup,17,18,1
-block_hint,IterableToListMayPreserveHoles,8,9,1
-block_hint,IterableToListMayPreserveHoles,15,16,0
-block_hint,IterableToListMayPreserveHoles,20,21,1
-block_hint,IterableToListMayPreserveHoles,17,18,1
-block_hint,IterableToListMayPreserveHoles,11,12,1
-block_hint,IterableToListMayPreserveHoles,3,4,1
-block_hint,IterableToListMayPreserveHoles,13,14,1
-block_hint,IterableToListMayPreserveHoles,5,6,0
-block_hint,FindOrderedHashMapEntry,26,27,1
-block_hint,FindOrderedHashMapEntry,64,65,0
-block_hint,FindOrderedHashMapEntry,24,25,0
-block_hint,FindOrderedHashMapEntry,22,23,0
-block_hint,FindOrderedHashMapEntry,68,69,0
-block_hint,FindOrderedHashMapEntry,58,59,1
-block_hint,FindOrderedHashMapEntry,60,61,1
-block_hint,MapConstructor,328,329,1
-block_hint,MapConstructor,248,249,1
-block_hint,MapConstructor,105,106,0
-block_hint,MapConstructor,13,14,1
-block_hint,MapConstructor,270,271,1
-block_hint,MapConstructor,211,212,1
-block_hint,MapConstructor,86,87,0
-block_hint,MapConstructor,88,89,1
-block_hint,MapConstructor,272,273,1
-block_hint,MapConstructor,308,309,0
-block_hint,MapConstructor,319,320,0
-block_hint,MapConstructor,220,221,0
-block_hint,MapConstructor,109,110,0
-block_hint,MapConstructor,238,239,1
-block_hint,MapConstructor,103,104,1
-block_hint,MapPrototypeSet,98,99,1
-block_hint,MapPrototypeSet,62,63,1
-block_hint,MapPrototypeSet,64,65,1
-block_hint,MapPrototypeSet,88,89,1
-block_hint,MapPrototypeSet,90,91,0
-block_hint,MapPrototypeSet,26,27,1
-block_hint,MapPrototypeSet,94,95,0
-block_hint,MapPrototypeSet,56,57,0
-block_hint,MapPrototypeSet,24,25,0
-block_hint,MapPrototypeSet,22,23,0
-block_hint,MapPrototypeSet,31,32,1
-block_hint,MapPrototypeSet,66,67,0
-block_hint,MapPrototypeSet,47,48,0
-block_hint,MapPrototypeSet,49,50,1
-block_hint,MapPrototypeSet,51,52,1
-block_hint,MapPrototypeSet,53,54,0
-block_hint,MapPrototypeSet,17,18,1
-block_hint,MapPrototypeSet,29,30,1
-block_hint,MapPrototypeDelete,98,99,1
-block_hint,MapPrototypeDelete,77,78,1
-block_hint,MapPrototypeDelete,79,80,1
-block_hint,MapPrototypeDelete,15,16,0
-block_hint,MapPrototypeDelete,89,90,1
-block_hint,MapPrototypeDelete,63,64,0
-block_hint,MapPrototypeDelete,40,41,0
-block_hint,MapPrototypeDelete,65,66,1
-block_hint,MapPrototypeDelete,67,68,1
-block_hint,MapPrototypeDelete,19,20,1
-block_hint,MapPrototypeDelete,21,22,1
-block_hint,MapPrototypeDelete,23,24,1
-block_hint,MapPrototypeGet,12,13,1
-block_hint,MapPrototypeGet,7,8,1
-block_hint,MapPrototypeGet,9,10,1
-block_hint,MapPrototypeGet,3,4,1
-block_hint,MapPrototypeHas,10,11,1
-block_hint,MapPrototypeHas,5,6,1
-block_hint,MapPrototypeHas,7,8,1
-block_hint,MapPrototypeEntries,13,14,1
-block_hint,MapPrototypeEntries,8,9,1
-block_hint,MapPrototypeEntries,10,11,1
-block_hint,MapPrototypeEntries,4,5,1
-block_hint,MapPrototypeEntries,6,7,1
-block_hint,MapPrototypeGetSize,8,9,1
-block_hint,MapPrototypeGetSize,5,6,1
-block_hint,MapPrototypeGetSize,3,4,1
-block_hint,MapPrototypeForEach,33,34,1
-block_hint,MapPrototypeForEach,30,31,0
-block_hint,MapPrototypeForEach,27,28,1
-block_hint,MapPrototypeForEach,20,21,1
-block_hint,MapPrototypeForEach,22,23,1
-block_hint,MapPrototypeForEach,24,25,1
-block_hint,MapPrototypeForEach,12,13,1
-block_hint,MapPrototypeForEach,14,15,0
-block_hint,MapPrototypeValues,13,14,1
-block_hint,MapPrototypeValues,8,9,1
-block_hint,MapPrototypeValues,10,11,1
-block_hint,MapPrototypeValues,4,5,1
-block_hint,MapPrototypeValues,6,7,1
-block_hint,MapIteratorPrototypeNext,47,48,1
-block_hint,MapIteratorPrototypeNext,30,31,1
-block_hint,MapIteratorPrototypeNext,32,33,1
-block_hint,MapIteratorPrototypeNext,19,20,0
-block_hint,MapIteratorPrototypeNext,21,22,0
-block_hint,MapIteratorPrototypeNext,7,8,1
-block_hint,MapIteratorPrototypeNext,39,40,1
-block_hint,MapIteratorPrototypeNext,9,10,1
-block_hint,MapIteratorPrototypeNext,11,12,1
-block_hint,MapIteratorPrototypeNext,13,14,1
-block_hint,MapIteratorPrototypeNext,15,16,1
-block_hint,MapIteratorPrototypeNext,17,18,1
-block_hint,MapIteratorPrototypeNext,25,26,1
-block_hint,SameValueNumbersOnly,4,5,1
-block_hint,Add_Baseline,39,40,0
-block_hint,Add_Baseline,25,26,0
-block_hint,Add_Baseline,9,10,1
-block_hint,Add_Baseline,84,85,0
-block_hint,Add_Baseline,46,47,1
-block_hint,Add_Baseline,56,57,0
-block_hint,Add_Baseline,20,21,1
-block_hint,Add_Baseline,64,65,1
-block_hint,Add_Baseline,23,24,1
-block_hint,Add_Baseline,31,32,1
-block_hint,Add_Baseline,11,12,1
-block_hint,AddSmi_Baseline,39,40,0
-block_hint,AddSmi_Baseline,25,26,0
-block_hint,AddSmi_Baseline,9,10,1
-block_hint,AddSmi_Baseline,60,61,1
-block_hint,AddSmi_Baseline,31,32,1
-block_hint,AddSmi_Baseline,11,12,1
-block_hint,Subtract_Baseline,31,32,0
-block_hint,Subtract_Baseline,11,12,1
-block_hint,Subtract_Baseline,60,61,1
-block_hint,Subtract_Baseline,82,83,1
-block_hint,Subtract_Baseline,76,77,0
-block_hint,Subtract_Baseline,53,54,0
-block_hint,Subtract_Baseline,62,63,1
-block_hint,Subtract_Baseline,23,24,1
-block_hint,Subtract_Baseline,33,34,1
-block_hint,Subtract_Baseline,13,14,1
-block_hint,SubtractSmi_Baseline,31,32,0
-block_hint,SubtractSmi_Baseline,11,12,1
-block_hint,SubtractSmi_Baseline,51,52,1
-block_hint,SubtractSmi_Baseline,33,34,1
-block_hint,SubtractSmi_Baseline,13,14,1
-block_hint,Multiply_Baseline,100,101,0
-block_hint,Multiply_Baseline,61,62,0
-block_hint,Multiply_Baseline,77,78,0
-block_hint,Multiply_Baseline,87,88,1
-block_hint,Multiply_Baseline,79,80,1
-block_hint,Multiply_Baseline,13,14,1
-block_hint,Multiply_Baseline,63,64,1
-block_hint,Multiply_Baseline,93,94,1
-block_hint,Multiply_Baseline,65,66,1
-block_hint,Multiply_Baseline,34,35,1
-block_hint,Multiply_Baseline,15,16,1
-block_hint,MultiplySmi_Baseline,92,93,0
-block_hint,MultiplySmi_Baseline,61,62,0
-block_hint,MultiplySmi_Baseline,71,72,0
-block_hint,MultiplySmi_Baseline,73,74,1
-block_hint,MultiplySmi_Baseline,32,33,0
-block_hint,MultiplySmi_Baseline,13,14,1
-block_hint,MultiplySmi_Baseline,51,52,1
-block_hint,MultiplySmi_Baseline,34,35,1
-block_hint,MultiplySmi_Baseline,15,16,1
-block_hint,Divide_Baseline,89,90,0
-block_hint,Divide_Baseline,91,92,0
-block_hint,Divide_Baseline,69,70,0
-block_hint,Divide_Baseline,47,48,1
-block_hint,Divide_Baseline,14,15,1
-block_hint,Divide_Baseline,73,74,1
-block_hint,Divide_Baseline,97,98,1
-block_hint,Divide_Baseline,75,76,1
-block_hint,Divide_Baseline,55,56,0
-block_hint,Divide_Baseline,28,29,1
-block_hint,Divide_Baseline,40,41,1
-block_hint,Divide_Baseline,16,17,1
-block_hint,DivideSmi_Baseline,83,84,0
-block_hint,DivideSmi_Baseline,99,100,0
-block_hint,DivideSmi_Baseline,85,86,0
-block_hint,DivideSmi_Baseline,69,70,0
-block_hint,DivideSmi_Baseline,47,48,1
-block_hint,DivideSmi_Baseline,14,15,1
-block_hint,DivideSmi_Baseline,57,58,1
-block_hint,DivideSmi_Baseline,40,41,1
-block_hint,DivideSmi_Baseline,16,17,1
-block_hint,Modulus_Baseline,108,109,0
-block_hint,Modulus_Baseline,94,95,0
-block_hint,Modulus_Baseline,71,72,1
-block_hint,Modulus_Baseline,66,67,1
-block_hint,Modulus_Baseline,37,38,0
-block_hint,Modulus_Baseline,14,15,1
-block_hint,ModulusSmi_Baseline,71,72,1
-block_hint,ModulusSmi_Baseline,66,67,1
-block_hint,ModulusSmi_Baseline,37,38,0
-block_hint,ModulusSmi_Baseline,14,15,1
-block_hint,ModulusSmi_Baseline,54,55,1
-block_hint,ModulusSmi_Baseline,39,40,1
-block_hint,ModulusSmi_Baseline,16,17,1
-block_hint,BitwiseAnd_Baseline,35,36,0
-block_hint,BitwiseAnd_Baseline,23,24,1
-block_hint,BitwiseAnd_Baseline,8,9,0
-block_hint,BitwiseAnd_Baseline,33,34,0
-block_hint,BitwiseAnd_Baseline,27,28,1
-block_hint,BitwiseAnd_Baseline,12,13,0
-block_hint,BitwiseAnd_Baseline,50,51,1
-block_hint,BitwiseAnd_Baseline,14,15,1
-block_hint,BitwiseAndSmi_Baseline,18,19,0
-block_hint,BitwiseAndSmi_Baseline,16,17,1
-block_hint,BitwiseAndSmi_Baseline,7,8,0
-block_hint,BitwiseAndSmi_Baseline,26,27,0
-block_hint,BitwiseAndSmi_Baseline,20,21,0
-block_hint,BitwiseAndSmi_Baseline,9,10,1
-block_hint,BitwiseOr_Baseline,35,36,0
-block_hint,BitwiseOr_Baseline,23,24,1
-block_hint,BitwiseOr_Baseline,8,9,1
-block_hint,BitwiseOr_Baseline,48,49,1
-block_hint,BitwiseOr_Baseline,50,51,1
-block_hint,BitwiseOr_Baseline,14,15,1
-block_hint,BitwiseOrSmi_Baseline,5,6,0
-block_hint,BitwiseOrSmi_Baseline,18,19,0
-block_hint,BitwiseOrSmi_Baseline,16,17,0
-block_hint,BitwiseOrSmi_Baseline,28,29,1
-block_hint,BitwiseOrSmi_Baseline,9,10,1
-block_hint,BitwiseXor_Baseline,25,26,1
-block_hint,BitwiseXor_Baseline,35,36,0
-block_hint,BitwiseXor_Baseline,23,24,1
-block_hint,BitwiseXor_Baseline,48,49,1
-block_hint,BitwiseXor_Baseline,33,34,0
-block_hint,BitwiseXor_Baseline,27,28,1
-block_hint,BitwiseXor_Baseline,50,51,1
-block_hint,BitwiseXor_Baseline,14,15,1
-block_hint,BitwiseXorSmi_Baseline,18,19,0
-block_hint,BitwiseXorSmi_Baseline,16,17,1
-block_hint,BitwiseXorSmi_Baseline,7,8,1
-block_hint,BitwiseXorSmi_Baseline,9,10,1
-block_hint,ShiftLeft_Baseline,25,26,1
-block_hint,ShiftLeft_Baseline,10,11,0
-block_hint,ShiftLeft_Baseline,50,51,1
-block_hint,ShiftLeft_Baseline,14,15,1
-block_hint,ShiftLeftSmi_Baseline,35,36,1
-block_hint,ShiftLeftSmi_Baseline,25,26,1
-block_hint,ShiftLeftSmi_Baseline,37,38,1
-block_hint,ShiftLeftSmi_Baseline,9,10,1
-block_hint,ShiftRight_Baseline,6,7,0
-block_hint,ShiftRight_Baseline,10,11,0
-block_hint,ShiftRight_Baseline,46,47,0
-block_hint,ShiftRight_Baseline,29,30,0
-block_hint,ShiftRight_Baseline,14,15,1
-block_hint,ShiftRightSmi_Baseline,22,23,1
-block_hint,ShiftRightSmi_Baseline,26,27,0
-block_hint,ShiftRightSmi_Baseline,20,21,0
-block_hint,ShiftRightSmi_Baseline,9,10,1
-block_hint,ShiftRightLogical_Baseline,25,26,1
-block_hint,ShiftRightLogical_Baseline,10,11,0
-block_hint,ShiftRightLogical_Baseline,14,15,1
-block_hint,ShiftRightLogicalSmi_Baseline,35,36,1
-block_hint,ShiftRightLogicalSmi_Baseline,25,26,1
-block_hint,ShiftRightLogicalSmi_Baseline,33,34,0
-block_hint,ShiftRightLogicalSmi_Baseline,23,24,0
-block_hint,ShiftRightLogicalSmi_Baseline,9,10,1
-block_hint,Add_WithFeedback,60,61,1
-block_hint,Add_WithFeedback,86,87,0
-block_hint,Add_WithFeedback,84,85,0
-block_hint,Add_WithFeedback,54,55,1
-block_hint,Add_WithFeedback,46,47,1
-block_hint,Add_WithFeedback,33,34,0
-block_hint,Add_WithFeedback,23,24,1
-block_hint,Subtract_WithFeedback,74,75,1
-block_hint,Subtract_WithFeedback,82,83,0
-block_hint,Subtract_WithFeedback,76,77,0
-block_hint,Subtract_WithFeedback,53,54,0
-block_hint,Subtract_WithFeedback,23,24,1
-block_hint,Modulus_WithFeedback,108,109,0
-block_hint,Modulus_WithFeedback,94,95,0
-block_hint,Modulus_WithFeedback,71,72,1
-block_hint,Modulus_WithFeedback,66,67,1
-block_hint,Modulus_WithFeedback,37,38,0
-block_hint,Modulus_WithFeedback,14,15,1
-block_hint,BitwiseOr_WithFeedback,6,7,1
-block_hint,BitwiseOr_WithFeedback,35,36,0
-block_hint,BitwiseOr_WithFeedback,23,24,0
-block_hint,BitwiseOr_WithFeedback,10,11,0
-block_hint,BitwiseOr_WithFeedback,46,47,0
-block_hint,BitwiseOr_WithFeedback,29,30,0
-block_hint,BitwiseOr_WithFeedback,14,15,1
-block_hint,Equal_Baseline,48,49,0
-block_hint,Equal_Baseline,18,19,1
-block_hint,Equal_Baseline,101,102,0
-block_hint,Equal_Baseline,14,15,1
-block_hint,Equal_Baseline,39,40,0
-block_hint,Equal_Baseline,26,27,0
-block_hint,Equal_Baseline,28,29,1
-block_hint,Equal_Baseline,45,46,0
-block_hint,Equal_Baseline,32,33,0
-block_hint,Equal_Baseline,24,25,1
-block_hint,Equal_Baseline,77,78,0
-block_hint,Equal_Baseline,75,76,0
-block_hint,Equal_Baseline,83,84,0
-block_hint,Equal_Baseline,85,86,0
-block_hint,Equal_Baseline,59,60,0
-block_hint,Equal_Baseline,65,66,0
-block_hint,Equal_Baseline,69,70,1
-block_hint,Equal_Baseline,98,99,0
-block_hint,Equal_Baseline,71,72,1
-block_hint,Equal_Baseline,6,7,1
-block_hint,StrictEqual_Baseline,37,38,0
-block_hint,StrictEqual_Baseline,76,77,0
-block_hint,StrictEqual_Baseline,47,48,1
-block_hint,StrictEqual_Baseline,60,61,0
-block_hint,StrictEqual_Baseline,51,52,0
-block_hint,StrictEqual_Baseline,53,54,1
-block_hint,StrictEqual_Baseline,35,36,1
-block_hint,StrictEqual_Baseline,33,34,0
-block_hint,StrictEqual_Baseline,55,56,0
-block_hint,StrictEqual_Baseline,29,30,1
-block_hint,StrictEqual_Baseline,31,32,1
-block_hint,StrictEqual_Baseline,49,50,1
-block_hint,StrictEqual_Baseline,41,42,0
-block_hint,StrictEqual_Baseline,45,46,0
-block_hint,StrictEqual_Baseline,66,67,0
-block_hint,StrictEqual_Baseline,13,14,0
-block_hint,StrictEqual_Baseline,43,44,0
-block_hint,StrictEqual_Baseline,3,4,1
-block_hint,LessThan_Baseline,44,45,0
-block_hint,LessThan_Baseline,23,24,1
-block_hint,LessThan_Baseline,25,26,1
-block_hint,LessThan_Baseline,10,11,0
-block_hint,LessThan_Baseline,56,57,0
-block_hint,LessThan_Baseline,12,13,0
-block_hint,LessThan_Baseline,5,6,1
-block_hint,GreaterThan_Baseline,44,45,0
-block_hint,GreaterThan_Baseline,23,24,1
-block_hint,GreaterThan_Baseline,10,11,0
-block_hint,GreaterThan_Baseline,48,49,1
-block_hint,GreaterThan_Baseline,56,57,0
-block_hint,GreaterThan_Baseline,58,59,0
-block_hint,GreaterThan_Baseline,54,55,1
-block_hint,GreaterThan_Baseline,50,51,1
-block_hint,GreaterThan_Baseline,18,19,0
-block_hint,GreaterThan_Baseline,12,13,0
-block_hint,GreaterThan_Baseline,5,6,1
-block_hint,LessThanOrEqual_Baseline,44,45,0
-block_hint,LessThanOrEqual_Baseline,23,24,1
-block_hint,LessThanOrEqual_Baseline,25,26,1
-block_hint,LessThanOrEqual_Baseline,56,57,0
-block_hint,LessThanOrEqual_Baseline,58,59,1
-block_hint,LessThanOrEqual_Baseline,37,38,1
-block_hint,LessThanOrEqual_Baseline,27,28,1
-block_hint,LessThanOrEqual_Baseline,5,6,1
-block_hint,GreaterThanOrEqual_Baseline,44,45,0
-block_hint,GreaterThanOrEqual_Baseline,23,24,1
-block_hint,GreaterThanOrEqual_Baseline,25,26,1
-block_hint,GreaterThanOrEqual_Baseline,56,57,0
-block_hint,GreaterThanOrEqual_Baseline,27,28,1
-block_hint,GreaterThanOrEqual_Baseline,5,6,1
-block_hint,Equal_WithFeedback,103,104,0
-block_hint,Equal_WithFeedback,81,82,1
-block_hint,Equal_WithFeedback,37,38,0
-block_hint,Equal_WithFeedback,48,49,0
-block_hint,Equal_WithFeedback,18,19,1
-block_hint,Equal_WithFeedback,95,96,0
-block_hint,Equal_WithFeedback,101,102,0
-block_hint,Equal_WithFeedback,20,21,0
-block_hint,Equal_WithFeedback,39,40,0
-block_hint,Equal_WithFeedback,26,27,0
-block_hint,Equal_WithFeedback,28,29,1
-block_hint,Equal_WithFeedback,45,46,0
-block_hint,Equal_WithFeedback,32,33,0
-block_hint,Equal_WithFeedback,75,76,0
-block_hint,Equal_WithFeedback,83,84,0
-block_hint,Equal_WithFeedback,85,86,0
-block_hint,Equal_WithFeedback,87,88,0
-block_hint,Equal_WithFeedback,79,80,0
-block_hint,Equal_WithFeedback,89,90,0
-block_hint,Equal_WithFeedback,117,118,0
-block_hint,Equal_WithFeedback,109,110,0
-block_hint,Equal_WithFeedback,107,108,0
-block_hint,Equal_WithFeedback,67,68,0
-block_hint,Equal_WithFeedback,105,106,0
-block_hint,Equal_WithFeedback,65,66,0
-block_hint,Equal_WithFeedback,6,7,1
-block_hint,StrictEqual_WithFeedback,74,75,1
-block_hint,StrictEqual_WithFeedback,37,38,0
-block_hint,StrictEqual_WithFeedback,72,73,0
-block_hint,StrictEqual_WithFeedback,47,48,1
-block_hint,StrictEqual_WithFeedback,60,61,0
-block_hint,StrictEqual_WithFeedback,53,54,1
-block_hint,StrictEqual_WithFeedback,35,36,1
-block_hint,StrictEqual_WithFeedback,57,58,1
-block_hint,StrictEqual_WithFeedback,55,56,0
-block_hint,StrictEqual_WithFeedback,31,32,1
-block_hint,StrictEqual_WithFeedback,41,42,0
-block_hint,StrictEqual_WithFeedback,70,71,1
-block_hint,StrictEqual_WithFeedback,45,46,0
-block_hint,StrictEqual_WithFeedback,21,22,1
-block_hint,StrictEqual_WithFeedback,66,67,0
-block_hint,StrictEqual_WithFeedback,15,16,0
-block_hint,StrictEqual_WithFeedback,13,14,0
-block_hint,StrictEqual_WithFeedback,43,44,0
-block_hint,StrictEqual_WithFeedback,3,4,1
-block_hint,LessThan_WithFeedback,44,45,1
-block_hint,LessThan_WithFeedback,23,24,1
-block_hint,LessThan_WithFeedback,46,47,1
-block_hint,LessThan_WithFeedback,48,49,1
-block_hint,LessThan_WithFeedback,56,57,0
-block_hint,LessThan_WithFeedback,54,55,0
-block_hint,LessThan_WithFeedback,18,19,1
-block_hint,LessThan_WithFeedback,31,32,0
-block_hint,LessThan_WithFeedback,16,17,1
-block_hint,LessThan_WithFeedback,12,13,0
-block_hint,LessThan_WithFeedback,39,40,1
-block_hint,LessThan_WithFeedback,5,6,1
-block_hint,GreaterThan_WithFeedback,60,61,1
-block_hint,GreaterThan_WithFeedback,23,24,1
-block_hint,GreaterThan_WithFeedback,25,26,1
-block_hint,GreaterThan_WithFeedback,48,49,1
-block_hint,GreaterThan_WithFeedback,56,57,0
-block_hint,GreaterThan_WithFeedback,58,59,0
-block_hint,GreaterThan_WithFeedback,54,55,1
-block_hint,GreaterThan_WithFeedback,50,51,1
-block_hint,GreaterThan_WithFeedback,18,19,0
-block_hint,GreaterThan_WithFeedback,12,13,0
-block_hint,GreaterThan_WithFeedback,5,6,1
-block_hint,GreaterThanOrEqual_WithFeedback,60,61,1
-block_hint,GreaterThanOrEqual_WithFeedback,46,47,1
-block_hint,GreaterThanOrEqual_WithFeedback,48,49,0
-block_hint,GreaterThanOrEqual_WithFeedback,56,57,0
-block_hint,GreaterThanOrEqual_WithFeedback,54,55,0
-block_hint,GreaterThanOrEqual_WithFeedback,18,19,1
-block_hint,GreaterThanOrEqual_WithFeedback,31,32,0
-block_hint,GreaterThanOrEqual_WithFeedback,16,17,1
-block_hint,GreaterThanOrEqual_WithFeedback,5,6,1
-block_hint,BitwiseNot_Baseline,19,20,0
-block_hint,BitwiseNot_Baseline,15,16,1
-block_hint,BitwiseNot_Baseline,7,8,1
-block_hint,BitwiseNot_Baseline,27,28,1
-block_hint,BitwiseNot_Baseline,9,10,1
-block_hint,Decrement_Baseline,19,20,0
-block_hint,Decrement_Baseline,17,18,1
-block_hint,Decrement_Baseline,13,14,0
-block_hint,Decrement_Baseline,15,16,1
-block_hint,Decrement_Baseline,5,6,1
-block_hint,Increment_Baseline,19,20,0
-block_hint,Increment_Baseline,17,18,1
-block_hint,Increment_Baseline,13,14,0
-block_hint,Increment_Baseline,15,16,1
-block_hint,Increment_Baseline,5,6,1
-block_hint,Negate_Baseline,20,21,1
-block_hint,Negate_Baseline,14,15,0
-block_hint,Negate_Baseline,18,19,1
-block_hint,Negate_Baseline,5,6,1
-block_hint,ObjectAssign,21,22,1
-block_hint,ObjectAssign,18,19,0
-block_hint,ObjectAssign,15,16,1
-block_hint,ObjectAssign,12,13,1
-block_hint,ObjectAssign,9,10,0
-block_hint,ObjectAssign,5,6,0
-block_hint,ObjectCreate,78,79,1
-block_hint,ObjectCreate,75,76,0
-block_hint,ObjectCreate,33,34,1
-block_hint,ObjectCreate,35,36,1
-block_hint,ObjectCreate,37,38,1
-block_hint,ObjectCreate,39,40,0
-block_hint,ObjectCreate,41,42,1
-block_hint,ObjectCreate,43,44,0
-block_hint,ObjectCreate,45,46,1
-block_hint,ObjectCreate,17,18,1
-block_hint,ObjectCreate,69,70,0
-block_hint,ObjectCreate,55,56,0
-block_hint,ObjectCreate,59,60,1
-block_hint,ObjectCreate,47,48,0
-block_hint,ObjectCreate,49,50,0
-block_hint,ObjectCreate,5,6,1
-block_hint,ObjectCreate,52,53,1
-block_hint,ObjectCreate,7,8,1
-block_hint,ObjectCreate,9,10,1
-block_hint,ObjectCreate,11,12,1
-block_hint,ObjectCreate,13,14,1
-block_hint,ObjectCreate,15,16,1
-block_hint,ObjectCreate,20,21,0
-block_hint,ObjectCreate,61,62,1
-block_hint,ObjectGetOwnPropertyDescriptor,519,520,1
-block_hint,ObjectGetOwnPropertyDescriptor,516,517,0
-block_hint,ObjectGetOwnPropertyDescriptor,513,514,0
-block_hint,ObjectGetOwnPropertyDescriptor,505,506,1
-block_hint,ObjectGetOwnPropertyDescriptor,492,493,1
-block_hint,ObjectGetOwnPropertyDescriptor,408,409,0
-block_hint,ObjectGetOwnPropertyDescriptor,470,471,1
-block_hint,ObjectGetOwnPropertyDescriptor,488,489,0
-block_hint,ObjectGetOwnPropertyDescriptor,434,435,0
-block_hint,ObjectGetOwnPropertyDescriptor,467,468,1
-block_hint,ObjectGetOwnPropertyDescriptor,410,411,1
-block_hint,ObjectGetOwnPropertyDescriptor,462,463,0
-block_hint,ObjectGetOwnPropertyDescriptor,464,465,0
-block_hint,ObjectGetOwnPropertyDescriptor,436,437,0
-block_hint,ObjectGetOwnPropertyDescriptor,406,407,0
-block_hint,ObjectGetOwnPropertyDescriptor,331,332,0
-block_hint,ObjectGetOwnPropertyDescriptor,197,198,1
-block_hint,ObjectGetOwnPropertyDescriptor,307,308,1
-block_hint,ObjectGetOwnPropertyDescriptor,138,139,0
-block_hint,ObjectGetOwnPropertyDescriptor,499,500,0
-block_hint,ObjectGetOwnPropertyDescriptor,507,508,1
-block_hint,ObjectGetOwnPropertyDescriptor,495,496,0
-block_hint,ObjectGetOwnPropertyDescriptor,426,427,0
-block_hint,ObjectGetOwnPropertyDescriptor,329,330,0
-block_hint,ObjectGetOwnPropertyDescriptor,31,32,1
-block_hint,ObjectGetOwnPropertyDescriptor,361,362,1
-block_hint,ObjectGetOwnPropertyDescriptor,150,151,0
-block_hint,ObjectGetOwnPropertyDescriptor,474,475,0
-block_hint,ObjectGetOwnPropertyDescriptor,390,391,0
-block_hint,ObjectGetOwnPropertyDescriptor,264,265,0
-block_hint,ObjectGetOwnPropertyDescriptor,260,261,0
-block_hint,ObjectGetOwnPropertyDescriptor,282,283,0
-block_hint,ObjectGetOwnPropertyDescriptor,284,285,1
-block_hint,ObjectGetOwnPropertyDescriptor,36,37,1
-block_hint,ObjectGetOwnPropertyDescriptor,365,366,1
-block_hint,ObjectGetOwnPropertyDescriptor,186,187,0
-block_hint,ObjectGetOwnPropertyDescriptor,268,269,1
-block_hint,ObjectKeys,32,33,1
-block_hint,ObjectKeys,27,28,1
-block_hint,ObjectKeys,23,24,1
-block_hint,ObjectKeys,25,26,0
-block_hint,ObjectKeys,17,18,0
-block_hint,ObjectKeys,5,6,1
-block_hint,ObjectKeys,21,22,1
-block_hint,ObjectKeys,9,10,0
-block_hint,ObjectKeys,7,8,1
-block_hint,ObjectKeys,14,15,1
-block_hint,ObjectPrototypeHasOwnProperty,230,231,1
-block_hint,ObjectPrototypeHasOwnProperty,205,206,1
-block_hint,ObjectPrototypeHasOwnProperty,222,223,1
-block_hint,ObjectPrototypeHasOwnProperty,241,242,0
-block_hint,ObjectPrototypeHasOwnProperty,219,220,0
-block_hint,ObjectPrototypeHasOwnProperty,209,210,1
-block_hint,ObjectPrototypeHasOwnProperty,163,164,1
-block_hint,ObjectPrototypeHasOwnProperty,235,236,0
-block_hint,ObjectPrototypeHasOwnProperty,237,238,0
-block_hint,ObjectPrototypeHasOwnProperty,233,234,0
-block_hint,ObjectPrototypeHasOwnProperty,228,229,0
-block_hint,ObjectPrototypeHasOwnProperty,192,193,1
-block_hint,ObjectPrototypeHasOwnProperty,137,138,0
-block_hint,ObjectPrototypeHasOwnProperty,211,212,0
-block_hint,ObjectPrototypeHasOwnProperty,175,176,1
-block_hint,ObjectPrototypeHasOwnProperty,141,142,0
-block_hint,ObjectPrototypeHasOwnProperty,226,227,0
-block_hint,ObjectPrototypeHasOwnProperty,76,77,0
-block_hint,ObjectPrototypeHasOwnProperty,203,204,0
-block_hint,ObjectPrototypeHasOwnProperty,34,35,1
-block_hint,ObjectPrototypeHasOwnProperty,52,53,0
-block_hint,ObjectPrototypeHasOwnProperty,36,37,0
-block_hint,ObjectPrototypeHasOwnProperty,197,198,1
-block_hint,ObjectPrototypeHasOwnProperty,40,41,0
-block_hint,ObjectPrototypeHasOwnProperty,171,172,0
-block_hint,ObjectPrototypeHasOwnProperty,178,179,1
-block_hint,ObjectPrototypeHasOwnProperty,58,59,0
-block_hint,ObjectToString,45,46,0
-block_hint,ObjectToString,60,61,0
-block_hint,ObjectToString,68,69,0
-block_hint,ObjectToString,55,56,0
-block_hint,ObjectToString,7,8,1
-block_hint,ObjectToString,5,6,1
-block_hint,ObjectToString,11,12,1
-block_hint,ObjectToString,20,21,0
-block_hint,InstanceOf_WithFeedback,50,51,1
-block_hint,InstanceOf_WithFeedback,52,53,0
-block_hint,InstanceOf_WithFeedback,54,55,1
-block_hint,InstanceOf_WithFeedback,32,33,1
-block_hint,InstanceOf_WithFeedback,34,35,1
-block_hint,InstanceOf_WithFeedback,5,6,1
-block_hint,InstanceOf_WithFeedback,14,15,1
-block_hint,InstanceOf_Baseline,50,51,1
-block_hint,InstanceOf_Baseline,54,55,1
-block_hint,InstanceOf_Baseline,32,33,1
-block_hint,InstanceOf_Baseline,34,35,1
-block_hint,InstanceOf_Baseline,5,6,1
-block_hint,InstanceOf_Baseline,14,15,1
-block_hint,ForInEnumerate,34,35,1
-block_hint,ForInEnumerate,36,37,0
-block_hint,ForInEnumerate,30,31,0
-block_hint,ForInEnumerate,22,23,1
-block_hint,ForInEnumerate,32,33,1
-block_hint,ForInEnumerate,5,6,1
-block_hint,ForInEnumerate,38,39,1
-block_hint,ForInEnumerate,9,10,1
-block_hint,ForInPrepare,7,8,1
-block_hint,ForInPrepare,12,13,1
-block_hint,ForInPrepare,5,6,1
-block_hint,ForInFilter,234,235,1
-block_hint,ForInFilter,236,237,1
-block_hint,ForInFilter,227,228,0
-block_hint,ForInFilter,117,118,1
-block_hint,ForInFilter,217,218,0
-block_hint,ForInFilter,62,63,0
-block_hint,ForInFilter,129,130,1
-block_hint,ForInFilter,221,222,1
-block_hint,ForInFilter,103,104,0
-block_hint,ForInFilter,105,106,0
-block_hint,ForInFilter,66,67,0
-block_hint,ForInFilter,64,65,0
-block_hint,ForInFilter,270,271,0
-block_hint,ForInFilter,225,226,1
-block_hint,ForInFilter,109,110,1
-block_hint,ForInFilter,71,72,0
-block_hint,ForInFilter,266,267,0
-block_hint,ForInFilter,264,265,0
-block_hint,ForInFilter,251,252,0
-block_hint,ForInFilter,107,108,1
-block_hint,ForInFilter,40,41,1
-block_hint,ForInFilter,201,202,0
-block_hint,ForInFilter,42,43,0
-block_hint,ForInFilter,144,145,1
-block_hint,ForInFilter,46,47,0
-block_hint,ForInFilter,113,114,0
-block_hint,ForInFilter,131,132,0
-block_hint,ForInFilter,36,37,0
-block_hint,ForInFilter,248,249,0
-block_hint,ForInFilter,255,256,1
-block_hint,ForInFilter,189,190,0
-block_hint,ForInFilter,33,34,1
-block_hint,RegExpConstructor,55,56,1
-block_hint,RegExpConstructor,7,8,1
-block_hint,RegExpConstructor,131,132,1
-block_hint,RegExpConstructor,133,134,1
-block_hint,RegExpConstructor,70,71,0
-block_hint,RegExpConstructor,106,107,1
-block_hint,RegExpConstructor,127,128,0
-block_hint,RegExpConstructor,108,109,0
-block_hint,RegExpConstructor,82,83,1
-block_hint,RegExpConstructor,67,68,1
-block_hint,RegExpConstructor,40,41,0
-block_hint,RegExpConstructor,76,77,0
-block_hint,RegExpConstructor,104,105,1
-block_hint,RegExpConstructor,86,87,1
-block_hint,RegExpConstructor,78,79,1
-block_hint,RegExpConstructor,63,64,1
-block_hint,RegExpExecInternal,20,21,0
-block_hint,RegExpExecInternal,22,23,0
-block_hint,RegExpExecInternal,36,37,0
-block_hint,RegExpExecInternal,12,13,0
-block_hint,RegExpExecInternal,48,49,0
-block_hint,RegExpExecInternal,51,52,1
-block_hint,RegExpExecInternal,40,41,1
-block_hint,RegExpExecInternal,44,45,0
-block_hint,RegExpExecInternal,24,25,1
-block_hint,FindOrderedHashSetEntry,26,27,1
-block_hint,FindOrderedHashSetEntry,34,35,0
-block_hint,FindOrderedHashSetEntry,24,25,0
-block_hint,FindOrderedHashSetEntry,22,23,0
-block_hint,FindOrderedHashSetEntry,42,43,1
-block_hint,FindOrderedHashSetEntry,68,69,0
-block_hint,FindOrderedHashSetEntry,58,59,1
-block_hint,FindOrderedHashSetEntry,60,61,1
-block_hint,SetConstructor,202,203,1
-block_hint,SetConstructor,74,75,0
-block_hint,SetConstructor,11,12,1
-block_hint,SetConstructor,172,173,1
-block_hint,SetConstructor,135,136,1
-block_hint,SetConstructor,56,57,0
-block_hint,SetConstructor,58,59,1
-block_hint,SetConstructor,218,219,1
-block_hint,SetConstructor,210,211,0
-block_hint,SetConstructor,79,80,1
-block_hint,SetConstructor,23,24,1
-block_hint,SetConstructor,222,223,1
-block_hint,SetConstructor,214,215,0
-block_hint,SetConstructor,150,151,1
-block_hint,SetConstructor,25,26,1
-block_hint,SetConstructor,178,179,1
-block_hint,SetConstructor,143,144,1
-block_hint,SetConstructor,83,84,1
-block_hint,SetConstructor,85,86,1
-block_hint,SetConstructor,87,88,1
-block_hint,SetConstructor,89,90,1
-block_hint,SetConstructor,91,92,1
-block_hint,SetConstructor,93,94,1
-block_hint,SetConstructor,34,35,1
-block_hint,SetConstructor,95,96,1
-block_hint,SetConstructor,146,147,1
-block_hint,SetConstructor,152,153,1
-block_hint,SetConstructor,190,191,0
-block_hint,SetConstructor,183,184,0
-block_hint,SetConstructor,154,155,0
-block_hint,SetConstructor,105,106,0
-block_hint,SetConstructor,137,138,1
-block_hint,SetConstructor,27,28,1
-block_hint,SetConstructor,62,63,1
-block_hint,SetConstructor,176,177,0
-block_hint,SetConstructor,66,67,1
-block_hint,SetPrototypeHas,10,11,1
-block_hint,SetPrototypeHas,5,6,1
-block_hint,SetPrototypeHas,7,8,1
-block_hint,SetPrototypeAdd,98,99,1
-block_hint,SetPrototypeAdd,62,63,1
-block_hint,SetPrototypeAdd,64,65,1
-block_hint,SetPrototypeAdd,88,89,1
-block_hint,SetPrototypeAdd,90,91,0
-block_hint,SetPrototypeAdd,27,28,1
-block_hint,SetPrototypeAdd,79,80,0
-block_hint,SetPrototypeAdd,25,26,0
-block_hint,SetPrototypeAdd,23,24,0
-block_hint,SetPrototypeAdd,35,36,1
-block_hint,SetPrototypeAdd,66,67,0
-block_hint,SetPrototypeAdd,51,52,1
-block_hint,SetPrototypeAdd,53,54,1
-block_hint,SetPrototypeAdd,33,34,1
-block_hint,SetPrototypeDelete,96,97,1
-block_hint,SetPrototypeDelete,75,76,1
-block_hint,SetPrototypeDelete,77,78,1
-block_hint,SetPrototypeDelete,15,16,0
-block_hint,SetPrototypeDelete,32,33,1
-block_hint,SetPrototypeDelete,87,88,0
-block_hint,SetPrototypeDelete,30,31,0
-block_hint,SetPrototypeDelete,28,29,0
-block_hint,SetPrototypeDelete,45,46,1
-block_hint,SetPrototypeDelete,83,84,0
-block_hint,SetPrototypeDelete,79,80,0
-block_hint,SetPrototypeDelete,19,20,1
-block_hint,SetPrototypeDelete,21,22,1
-block_hint,SetPrototypeGetSize,8,9,1
-block_hint,SetPrototypeGetSize,5,6,1
-block_hint,SetPrototypeGetSize,3,4,1
-block_hint,SetPrototypeValues,13,14,1
-block_hint,SetPrototypeValues,8,9,1
-block_hint,SetPrototypeValues,10,11,1
-block_hint,SetPrototypeValues,4,5,1
-block_hint,SetPrototypeValues,6,7,1
-block_hint,SetIteratorPrototypeNext,41,42,1
-block_hint,SetIteratorPrototypeNext,28,29,1
-block_hint,SetIteratorPrototypeNext,39,40,1
-block_hint,SetIteratorPrototypeNext,17,18,0
-block_hint,SetIteratorPrototypeNext,19,20,0
-block_hint,SetIteratorPrototypeNext,37,38,1
-block_hint,SetIteratorPrototypeNext,15,16,1
-block_hint,SetIteratorPrototypeNext,23,24,1
-block_hint,SetOrSetIteratorToList,33,34,1
-block_hint,SetOrSetIteratorToList,8,9,1
-block_hint,SetOrSetIteratorToList,31,32,1
-block_hint,SetOrSetIteratorToList,47,48,1
-block_hint,SetOrSetIteratorToList,43,44,1
-block_hint,SetOrSetIteratorToList,14,15,0
-block_hint,SetOrSetIteratorToList,19,20,0
-block_hint,SetOrSetIteratorToList,24,25,1
-block_hint,StringFromCharCode,87,88,1
-block_hint,StringFromCharCode,53,54,1
-block_hint,StringFromCharCode,11,12,0
-block_hint,StringFromCharCode,81,82,1
-block_hint,StringFromCharCode,77,78,1
-block_hint,StringFromCharCode,19,20,0
-block_hint,StringFromCharCode,23,24,0
-block_hint,StringFromCharCode,58,59,0
-block_hint,StringFromCharCode,21,22,0
-block_hint,StringFromCharCode,29,30,0
-block_hint,StringFromCharCode,35,36,0
-block_hint,StringFromCharCode,33,34,0
-block_hint,StringFromCharCode,75,76,0
-block_hint,StringFromCharCode,41,42,0
-block_hint,StringFromCharCode,17,18,1
-block_hint,StringFromCharCode,44,45,1
-block_hint,StringPrototypeReplace,36,37,1
-block_hint,StringPrototypeReplace,8,9,0
-block_hint,StringPrototypeReplace,55,56,1
-block_hint,StringPrototypeReplace,51,52,1
-block_hint,StringPrototypeReplace,38,39,1
-block_hint,StringPrototypeReplace,22,23,0
-block_hint,StringPrototypeReplace,3,4,1
-block_hint,StringPrototypeReplace,24,25,0
-block_hint,StringPrototypeReplace,5,6,1
-block_hint,StringPrototypeReplace,28,29,1
-block_hint,StringPrototypeReplace,10,11,1
-block_hint,StringPrototypeReplace,57,58,0
-block_hint,StringPrototypeReplace,30,31,1
-block_hint,StringPrototypeReplace,92,93,1
-block_hint,StringPrototypeReplace,87,88,1
-block_hint,StringPrototypeReplace,80,81,1
-block_hint,StringPrototypeReplace,73,74,1
-block_hint,StringPrototypeReplace,59,60,1
-block_hint,StringPrototypeReplace,61,62,0
-block_hint,StringPrototypeReplace,63,64,1
-block_hint,StringPrototypeReplace,53,54,1
-block_hint,StringPrototypeReplace,42,43,1
-block_hint,StringPrototypeReplace,14,15,1
-block_hint,StringPrototypeReplace,90,91,1
-block_hint,StringPrototypeReplace,82,83,1
-block_hint,StringPrototypeReplace,76,77,0
-block_hint,StringPrototypeReplace,78,79,1
-block_hint,StringPrototypeReplace,70,71,1
-block_hint,StringPrototypeReplace,49,50,1
-block_hint,StringPrototypeReplace,16,17,1
-block_hint,StringPrototypeReplace,18,19,0
-block_hint,StringPrototypeReplace,26,27,1
-block_hint,StringPrototypeSplit,125,126,1
-block_hint,StringPrototypeSplit,112,113,0
-block_hint,StringPrototypeSplit,92,93,1
-block_hint,StringPrototypeSplit,35,36,0
-block_hint,StringPrototypeSplit,114,115,1
-block_hint,StringPrototypeSplit,105,106,1
-block_hint,StringPrototypeSplit,94,95,1
-block_hint,StringPrototypeSplit,64,65,0
-block_hint,StringPrototypeSplit,8,9,1
-block_hint,StringPrototypeSplit,66,67,0
-block_hint,StringPrototypeSplit,10,11,1
-block_hint,StringPrototypeSplit,77,78,1
-block_hint,StringPrototypeSplit,37,38,1
-block_hint,StringPrototypeSplit,116,117,0
-block_hint,StringPrototypeSplit,79,80,1
-block_hint,StringPrototypeSplit,168,169,1
-block_hint,StringPrototypeSplit,152,153,1
-block_hint,StringPrototypeSplit,128,129,1
-block_hint,StringPrototypeSplit,122,123,1
-block_hint,StringPrototypeSplit,107,108,1
-block_hint,StringPrototypeSplit,83,84,0
-block_hint,StringPrototypeSplit,68,69,0
-block_hint,StringPrototypeSplit,85,86,1
-block_hint,StringPrototypeSplit,70,71,1
-block_hint,StringPrototypeSplit,88,89,1
-block_hint,StringPrototypeSplit,25,26,0
-block_hint,StringPrototypeSplit,72,73,1
-block_hint,StringPrototypeSplit,42,43,0
-block_hint,StringPrototypeSplit,110,111,1
-block_hint,StringPrototypeSplit,90,91,0
-block_hint,StringPrototypeSplit,27,28,0
-block_hint,StringPrototypeSplit,16,17,1
-block_hint,StringPrototypeSplit,18,19,1
-block_hint,StringPrototypeSplit,20,21,1
-block_hint,StringPrototypeSplit,50,51,1
-block_hint,TypedArrayConstructor,14,15,1
-block_hint,TypedArrayConstructor,11,12,0
-block_hint,TypedArrayConstructor,2,3,0
-block_hint,TypedArrayPrototypeByteLength,69,70,1
-block_hint,TypedArrayPrototypeByteLength,43,44,1
-block_hint,TypedArrayPrototypeByteLength,45,46,1
-block_hint,TypedArrayPrototypeByteLength,71,72,0
-block_hint,TypedArrayPrototypeByteLength,73,74,0
-block_hint,TypedArrayPrototypeByteLength,65,66,0
-block_hint,TypedArrayPrototypeByteLength,33,34,0
-block_hint,TypedArrayPrototypeLength,50,51,1
-block_hint,TypedArrayPrototypeLength,33,34,1
-block_hint,TypedArrayPrototypeLength,35,36,1
-block_hint,TypedArrayPrototypeLength,52,53,0
-block_hint,TypedArrayPrototypeLength,44,45,0
-block_hint,TypedArrayPrototypeLength,28,29,0
-block_hint,TypedArrayPrototypeLength,19,20,0
-block_hint,WeakMapConstructor,351,352,1
-block_hint,WeakMapConstructor,271,272,1
-block_hint,WeakMapConstructor,119,120,0
-block_hint,WeakMapConstructor,14,15,1
-block_hint,WeakMapConstructor,293,294,1
-block_hint,WeakMapConstructor,230,231,1
-block_hint,WeakMapConstructor,93,94,0
-block_hint,WeakMapConstructor,95,96,1
-block_hint,WeakMapConstructor,295,296,1
-block_hint,WeakMapConstructor,331,332,0
-block_hint,WeakMapConstructor,342,343,0
-block_hint,WeakMapConstructor,239,240,0
-block_hint,WeakMapConstructor,123,124,0
-block_hint,WeakMapConstructor,241,242,0
-block_hint,WeakMapConstructor,109,110,0
-block_hint,WeakMapConstructor,243,244,1
-block_hint,WeakMapConstructor,211,212,1
-block_hint,WeakMapConstructor,28,29,1
-block_hint,WeakMapConstructor,30,31,1
-block_hint,WeakMapConstructor,32,33,1
-block_hint,WeakMapConstructor,98,99,0
-block_hint,WeakMapConstructor,117,118,1
-block_hint,WeakMapLookupHashIndex,9,10,1
-block_hint,WeakMapLookupHashIndex,31,32,1
-block_hint,WeakMapLookupHashIndex,11,12,0
-block_hint,WeakMapLookupHashIndex,13,14,0
-block_hint,WeakMapLookupHashIndex,25,26,1
-block_hint,WeakMapLookupHashIndex,33,34,1
-block_hint,WeakMapLookupHashIndex,27,28,0
-block_hint,WeakMapLookupHashIndex,23,24,0
-block_hint,WeakMapGet,12,13,1
-block_hint,WeakMapGet,7,8,1
-block_hint,WeakMapGet,9,10,1
-block_hint,WeakMapGet,3,4,1
-block_hint,WeakMapPrototypeHas,10,11,1
-block_hint,WeakMapPrototypeHas,5,6,1
-block_hint,WeakMapPrototypeHas,7,8,1
-block_hint,WeakMapPrototypeSet,24,25,1
-block_hint,WeakMapPrototypeSet,5,6,1
-block_hint,WeakMapPrototypeSet,7,8,1
-block_hint,WeakMapPrototypeSet,13,14,1
-block_hint,WeakMapPrototypeSet,22,23,1
-block_hint,WeakMapPrototypeSet,15,16,0
-block_hint,WeakMapPrototypeSet,9,10,0
-block_hint,WeakCollectionSet,17,18,1
-block_hint,WeakCollectionSet,20,21,0
-block_hint,WeakCollectionSet,7,8,1
-block_hint,WeakCollectionSet,13,14,0
-block_hint,AsyncGeneratorResolve,9,10,1
-block_hint,AsyncGeneratorResolve,3,4,1
-block_hint,AsyncGeneratorResolve,11,12,0
-block_hint,AsyncGeneratorResolve,7,8,0
-block_hint,AsyncGeneratorYieldWithAwait,24,25,1
-block_hint,AsyncGeneratorYieldWithAwait,19,20,0
-block_hint,AsyncGeneratorYieldWithAwait,6,7,1
-block_hint,AsyncGeneratorYieldWithAwait,42,43,1
-block_hint,AsyncGeneratorYieldWithAwait,37,38,0
-block_hint,AsyncGeneratorYieldWithAwait,28,29,1
-block_hint,AsyncGeneratorYieldWithAwait,8,9,1
-block_hint,AsyncGeneratorYieldWithAwait,10,11,1
-block_hint,AsyncGeneratorYieldWithAwait,12,13,1
-block_hint,AsyncGeneratorYieldWithAwait,14,15,1
-block_hint,AsyncGeneratorYieldWithAwait,22,23,0
-block_hint,AsyncGeneratorResumeNext,18,19,0
-block_hint,AsyncGeneratorResumeNext,14,15,0
-block_hint,AsyncGeneratorPrototypeNext,27,28,1
-block_hint,AsyncGeneratorPrototypeNext,16,17,1
-block_hint,AsyncGeneratorPrototypeNext,4,5,1
-block_hint,AsyncGeneratorPrototypeNext,34,35,1
-block_hint,AsyncGeneratorPrototypeNext,29,30,0
-block_hint,AsyncGeneratorPrototypeNext,18,19,1
-block_hint,AsyncGeneratorPrototypeNext,20,21,1
-block_hint,AsyncGeneratorPrototypeNext,22,23,1
-block_hint,AsyncGeneratorPrototypeNext,6,7,1
-block_hint,AsyncGeneratorPrototypeNext,11,12,0
-block_hint,AsyncGeneratorAwaitUncaught,24,25,1
-block_hint,AsyncGeneratorAwaitUncaught,19,20,1
-block_hint,AsyncGeneratorAwaitUncaught,2,3,1
-block_hint,AsyncGeneratorAwaitUncaught,30,31,1
-block_hint,AsyncGeneratorAwaitUncaught,32,33,0
-block_hint,AsyncGeneratorAwaitUncaught,28,29,1
-block_hint,AsyncGeneratorAwaitUncaught,8,9,1
-block_hint,AsyncGeneratorAwaitUncaught,10,11,1
-block_hint,AsyncGeneratorAwaitUncaught,12,13,1
-block_hint,AsyncGeneratorAwaitUncaught,14,15,1
-block_hint,AsyncGeneratorAwaitUncaught,22,23,0
-block_hint,AsyncGeneratorAwaitResolveClosure,8,9,1
-block_hint,AsyncGeneratorAwaitResolveClosure,2,3,1
-block_hint,AsyncGeneratorAwaitResolveClosure,6,7,0
-block_hint,AsyncGeneratorYieldWithAwaitResolveClosure,5,6,1
-block_hint,AsyncGeneratorYieldWithAwaitResolveClosure,2,3,1
-block_hint,StringAdd_CheckNone,19,20,1
-block_hint,StringAdd_CheckNone,58,59,0
-block_hint,StringAdd_CheckNone,78,79,1
-block_hint,StringAdd_CheckNone,42,43,1
-block_hint,StringAdd_CheckNone,60,61,0
-block_hint,StringAdd_CheckNone,94,95,0
-block_hint,StringAdd_CheckNone,84,85,0
-block_hint,StringAdd_CheckNone,88,89,0
-block_hint,StringAdd_CheckNone,64,65,1
-block_hint,StringAdd_CheckNone,76,77,1
-block_hint,StringAdd_CheckNone,55,56,1
-block_hint,StringAdd_CheckNone,13,14,0
-block_hint,StringAdd_CheckNone,15,16,0
-block_hint,StringAdd_CheckNone,92,93,1
-block_hint,StringAdd_CheckNone,82,83,1
-block_hint,StringAdd_CheckNone,34,35,0
-block_hint,StringAdd_CheckNone,38,39,0
-block_hint,StringAdd_CheckNone,40,41,1
-block_hint,StringAdd_CheckNone,53,54,1
-block_hint,StringAdd_CheckNone,11,12,0
-block_hint,StringAdd_CheckNone,90,91,1
-block_hint,StringAdd_CheckNone,80,81,1
-block_hint,StringAdd_CheckNone,26,27,0
-block_hint,StringAdd_CheckNone,30,31,0
-block_hint,SubString,63,64,1
-block_hint,SubString,101,102,1
-block_hint,SubString,58,59,1
-block_hint,SubString,56,57,1
-block_hint,SubString,114,115,0
-block_hint,SubString,85,86,1
-block_hint,SubString,19,20,0
-block_hint,SubString,21,22,0
-block_hint,SubString,130,131,1
-block_hint,SubString,118,119,1
-block_hint,SubString,38,39,0
-block_hint,SubString,83,84,1
-block_hint,SubString,17,18,0
-block_hint,SubString,132,133,1
-block_hint,SubString,120,121,1
-block_hint,SubString,42,43,0
-block_hint,SubString,75,76,1
-block_hint,SubString,139,140,0
-block_hint,SubString,103,104,1
-block_hint,SubString,34,35,1
-block_hint,SubString,31,32,0
-block_hint,GetProperty,56,57,1
-block_hint,GetProperty,101,102,0
-block_hint,GetProperty,175,176,1
-block_hint,GetProperty,205,206,0
-block_hint,GetProperty,165,166,1
-block_hint,GetProperty,133,134,1
-block_hint,GetProperty,60,61,1
-block_hint,GetProperty,139,140,0
-block_hint,GetProperty,141,142,0
-block_hint,GetProperty,110,111,0
-block_hint,GetProperty,62,63,0
-block_hint,GetProperty,167,168,0
-block_hint,GetProperty,220,221,0
-block_hint,GetProperty,210,211,1
-block_hint,GetProperty,112,113,0
-block_hint,GetProperty,231,232,0
-block_hint,GetProperty,222,223,0
-block_hint,GetProperty,218,219,0
-block_hint,GetProperty,35,36,1
-block_hint,GetProperty,224,225,0
-block_hint,GetProperty,37,38,0
-block_hint,GetProperty,147,148,0
-block_hint,GetProperty,187,188,1
-block_hint,GetProperty,41,42,0
-block_hint,GetProperty,43,44,0
-block_hint,GetProperty,157,158,0
-block_hint,GetProperty,161,162,1
-block_hint,GetProperty,151,152,0
-block_hint,GetProperty,47,48,0
-block_hint,GetProperty,233,234,0
-block_hint,GetProperty,196,197,1
-block_hint,GetProperty,92,93,0
-block_hint,GetProperty,94,95,0
-block_hint,GetProperty,96,97,0
-block_hint,GetProperty,163,164,0
-block_hint,GetProperty,98,99,1
-block_hint,GetProperty,203,204,0
-block_hint,GetProperty,228,229,0
-block_hint,GetProperty,235,236,1
-block_hint,GetProperty,201,202,0
-block_hint,GetProperty,199,200,0
-block_hint,GetProperty,22,23,0
-block_hint,GetProperty,182,183,1
-block_hint,GetProperty,104,105,1
-block_hint,GetPropertyWithReceiver,58,59,1
-block_hint,GetPropertyWithReceiver,60,61,1
-block_hint,GetPropertyWithReceiver,203,204,0
-block_hint,GetPropertyWithReceiver,174,175,1
-block_hint,GetPropertyWithReceiver,211,212,0
-block_hint,GetPropertyWithReceiver,112,113,0
-block_hint,GetPropertyWithReceiver,162,163,1
-block_hint,GetPropertyWithReceiver,138,139,1
-block_hint,GetPropertyWithReceiver,62,63,1
-block_hint,GetPropertyWithReceiver,144,145,0
-block_hint,GetPropertyWithReceiver,146,147,0
-block_hint,GetPropertyWithReceiver,114,115,0
-block_hint,GetPropertyWithReceiver,64,65,0
-block_hint,GetPropertyWithReceiver,164,165,0
-block_hint,GetPropertyWithReceiver,217,218,1
-block_hint,GetPropertyWithReceiver,117,118,0
-block_hint,GetPropertyWithReceiver,238,239,0
-block_hint,GetPropertyWithReceiver,234,235,0
-block_hint,GetPropertyWithReceiver,225,226,0
-block_hint,GetPropertyWithReceiver,148,149,1
-block_hint,GetPropertyWithReceiver,38,39,1
-block_hint,GetPropertyWithReceiver,236,237,0
-block_hint,GetPropertyWithReceiver,40,41,0
-block_hint,GetPropertyWithReceiver,183,184,0
-block_hint,GetPropertyWithReceiver,34,35,0
-block_hint,GetPropertyWithReceiver,231,232,1
-block_hint,GetPropertyWithReceiver,205,206,0
-block_hint,GetPropertyWithReceiver,181,182,1
-block_hint,GetPropertyWithReceiver,108,109,1
-block_hint,SetProperty,379,380,1
-block_hint,SetProperty,381,382,0
-block_hint,SetProperty,1201,1202,0
-block_hint,SetProperty,925,926,1
-block_hint,SetProperty,1034,1035,1
-block_hint,SetProperty,1036,1037,0
-block_hint,SetProperty,733,734,0
-block_hint,SetProperty,919,920,1
-block_hint,SetProperty,413,414,0
-block_hint,SetProperty,415,416,0
-block_hint,SetProperty,256,257,1
-block_hint,SetProperty,417,418,0
-block_hint,SetProperty,630,631,1
-block_hint,SetProperty,92,93,1
-block_hint,SetProperty,94,95,1
-block_hint,SetProperty,1089,1090,0
-block_hint,SetProperty,808,809,1
-block_hint,SetProperty,810,811,1
-block_hint,SetProperty,812,813,0
-block_hint,SetProperty,104,105,1
-block_hint,SetProperty,108,109,1
-block_hint,SetProperty,429,430,1
-block_hint,SetProperty,110,111,1
-block_hint,SetProperty,106,107,1
-block_hint,CreateDataProperty,319,320,1
-block_hint,CreateDataProperty,321,322,0
-block_hint,CreateDataProperty,978,979,0
-block_hint,CreateDataProperty,779,780,1
-block_hint,CreateDataProperty,868,869,1
-block_hint,CreateDataProperty,539,540,1
-block_hint,CreateDataProperty,645,646,0
-block_hint,CreateDataProperty,647,648,1
-block_hint,CreateDataProperty,903,904,1
-block_hint,CreateDataProperty,333,334,0
-block_hint,CreateDataProperty,55,56,1
-block_hint,CreateDataProperty,543,544,1
-block_hint,CreateDataProperty,57,58,1
-block_hint,FindNonDefaultConstructorOrConstruct,12,13,0
-block_hint,FindNonDefaultConstructorOrConstruct,6,7,0
-block_hint,FindNonDefaultConstructorOrConstruct,14,15,1
-block_hint,FindNonDefaultConstructorOrConstruct,16,17,0
-block_hint,FindNonDefaultConstructorOrConstruct,4,5,1
-block_hint,FindNonDefaultConstructorOrConstruct,18,19,1
-block_hint,ArrayPrototypeConcat,79,80,1
-block_hint,ArrayPrototypeConcat,54,55,0
-block_hint,ArrayPrototypeConcat,63,64,1
-block_hint,ArrayPrototypeConcat,74,75,0
-block_hint,ArrayPrototypeConcat,81,82,0
-block_hint,ArrayPrototypeConcat,70,71,1
-block_hint,ArrayPrototypeConcat,37,38,1
-block_hint,ArrayPrototypeConcat,16,17,1
-block_hint,ArrayPrototypeConcat,3,4,1
-block_hint,ArrayPrototypeConcat,25,26,1
-block_hint,ArrayPrototypeConcat,9,10,0
-block_hint,ArrayPrototypeConcat,20,21,1
-block_hint,ArrayPrototypeConcat,30,31,0
-block_hint,ArrayPrototypeConcat,42,43,0
-block_hint,ArrayPrototypeConcat,72,73,1
-block_hint,ArrayPrototypeConcat,39,40,1
-block_hint,ArrayPrototypeConcat,18,19,1
-block_hint,ArrayPrototypeConcat,5,6,1
-block_hint,ArrayPrototypeConcat,57,58,1
-block_hint,ArrayPrototypeConcat,59,60,0
-block_hint,ArrayPrototypeConcat,66,67,0
-block_hint,ArrayPrototypeConcat,33,34,1
-block_hint,ArrayPrototypeConcat,68,69,0
-block_hint,ArrayPrototypeConcat,35,36,1
-block_hint,ArrayPrototypeConcat,27,28,1
-block_hint,ArrayPrototypeConcat,11,12,1
-block_hint,ArrayEvery,73,74,1
-block_hint,ArrayEvery,31,32,0
-block_hint,ArrayEvery,122,123,1
-block_hint,ArrayEvery,116,117,1
-block_hint,ArrayEvery,91,92,1
-block_hint,ArrayEvery,93,94,1
-block_hint,ArrayEvery,99,100,0
-block_hint,ArrayEvery,105,106,1
-block_hint,ArrayEvery,107,108,1
-block_hint,ArrayEvery,97,98,1
-block_hint,ArrayEvery,49,50,0
-block_hint,ArrayEvery,102,103,1
-block_hint,ArrayEvery,66,67,1
-block_hint,ArrayEvery,45,46,1
-block_hint,ArrayEvery,12,13,1
-block_hint,ArrayEvery,57,58,1
-block_hint,ArrayEvery,28,29,0
-block_hint,ArrayEvery,68,69,1
-block_hint,ArrayEvery,70,71,0
-block_hint,ArrayEvery,51,52,0
-block_hint,ArrayEvery,47,48,0
-block_hint,ArrayEvery,18,19,0
-block_hint,ArrayEvery,20,21,1
-block_hint,ArrayEvery,61,62,0
-block_hint,ArrayEvery,109,110,1
-block_hint,ArrayEvery,87,88,0
-block_hint,ArrayEvery,89,90,0
-block_hint,ArrayEvery,111,112,0
-block_hint,ArrayEvery,79,80,0
-block_hint,ArrayFilter,195,196,1
-block_hint,ArrayFilter,84,85,0
-block_hint,ArrayFilter,295,296,1
-block_hint,ArrayFilter,286,287,1
-block_hint,ArrayFilter,225,226,1
-block_hint,ArrayFilter,227,228,1
-block_hint,ArrayFilter,246,247,0
-block_hint,ArrayFilter,270,271,1
-block_hint,ArrayFilter,272,273,1
-block_hint,ArrayFilter,239,240,0
-block_hint,ArrayFilter,276,277,1
-block_hint,ArrayFilter,197,198,1
-block_hint,ArrayFilter,123,124,1
-block_hint,ArrayFilter,22,23,1
-block_hint,ArrayFilter,199,200,1
-block_hint,ArrayFilter,125,126,0
-block_hint,ArrayFilter,24,25,1
-block_hint,ArrayFilter,267,268,1
-block_hint,ArrayFilter,166,167,0
-block_hint,ArrayFilter,278,279,1
-block_hint,ArrayFilter,201,202,1
-block_hint,ArrayFilter,127,128,1
-block_hint,ArrayFilter,26,27,1
-block_hint,ArrayFilter,211,212,1
-block_hint,ArrayFilter,213,214,0
-block_hint,ArrayFilter,283,284,1
-block_hint,ArrayFilter,215,216,1
-block_hint,ArrayFilter,217,218,1
-block_hint,ArrayFilter,219,220,1
-block_hint,ArrayFilter,203,204,1
-block_hint,ArrayFilter,129,130,0
-block_hint,ArrayFilter,28,29,1
-block_hint,ArrayFilter,171,172,0
-block_hint,ArrayFilter,103,104,0
-block_hint,ArrayFilter,242,243,1
-block_hint,ArrayFilter,244,245,0
-block_hint,ArrayFilter,205,206,0
-block_hint,ArrayFilter,131,132,0
-block_hint,ArrayFilter,42,43,0
-block_hint,ArrayFilter,44,45,1
-block_hint,ArrayFilter,149,150,0
-block_hint,ArrayFilter,249,250,1
-block_hint,ArrayFilter,177,178,0
-block_hint,ArrayFilter,179,180,0
-block_hint,ArrayFilter,251,252,0
-block_hint,ArrayFilter,253,254,0
-block_hint,ArrayFilter,255,256,1
-block_hint,ArrayFilter,257,258,0
-block_hint,ArrayFilter,259,260,1
-block_hint,ArrayFilter,280,281,0
-block_hint,ArrayFilter,237,238,0
-block_hint,ArrayFilter,161,162,0
-block_hint,ArrayFilter,95,96,0
-block_hint,ArrayFilter,187,188,1
-block_hint,ArrayFilter,60,61,0
-block_hint,ArrayFilter,64,65,1
-block_hint,ArrayFilter,50,51,1
-block_hint,ArrayForEach,70,71,1
-block_hint,ArrayForEach,29,30,0
-block_hint,ArrayForEach,99,100,1
-block_hint,ArrayForEach,95,96,1
-block_hint,ArrayForEach,76,77,1
-block_hint,ArrayForEach,78,79,1
-block_hint,ArrayForEach,84,85,0
-block_hint,ArrayForEach,90,91,1
-block_hint,ArrayForEach,92,93,1
-block_hint,ArrayForEach,47,48,0
-block_hint,ArrayForEach,87,88,1
-block_hint,ArrayForEach,63,64,1
-block_hint,ArrayForEach,43,44,1
-block_hint,ArrayForEach,12,13,1
-block_hint,ArrayForEach,53,54,1
-block_hint,ArrayForEach,26,27,0
-block_hint,ArrayForEach,65,66,1
-block_hint,ArrayForEach,67,68,0
-block_hint,ArrayForEach,49,50,0
-block_hint,ArrayForEach,45,46,0
-block_hint,ArrayForEach,18,19,0
-block_hint,ArrayForEach,20,21,1
-block_hint,ArrayForEach,58,59,0
-block_hint,ArrayFrom,225,226,1
-block_hint,ArrayFrom,76,77,1
-block_hint,ArrayFrom,78,79,1
-block_hint,ArrayFrom,8,9,1
-block_hint,ArrayFrom,342,343,1
-block_hint,ArrayFrom,338,339,0
-block_hint,ArrayFrom,327,328,0
-block_hint,ArrayFrom,311,312,1
-block_hint,ArrayFrom,309,310,0
-block_hint,ArrayFrom,80,81,1
-block_hint,ArrayFrom,10,11,1
-block_hint,ArrayFrom,322,323,1
-block_hint,ArrayFrom,305,306,0
-block_hint,ArrayFrom,245,246,1
-block_hint,ArrayFrom,266,267,0
-block_hint,ArrayFrom,82,83,1
-block_hint,ArrayFrom,12,13,1
-block_hint,ArrayFrom,268,269,1
-block_hint,ArrayFrom,213,214,0
-block_hint,ArrayFrom,290,291,1
-block_hint,ArrayFrom,248,249,0
-block_hint,ArrayFrom,285,286,1
-block_hint,ArrayFrom,281,282,0
-block_hint,ArrayFrom,188,189,1
-block_hint,ArrayFrom,88,89,1
-block_hint,ArrayFrom,18,19,1
-block_hint,ArrayFrom,215,216,1
-block_hint,ArrayFrom,72,73,1
-block_hint,ArrayIsArray,13,14,1
-block_hint,ArrayIsArray,9,10,1
-block_hint,ArrayIsArray,7,8,0
-block_hint,LoadJoinElement_FastSmiOrObjectElements_0,2,3,1
-block_hint,LoadJoinElement_FastSmiOrObjectElements_0,4,5,0
-block_hint,LoadJoinElement_FastDoubleElements_0,3,4,1
-block_hint,LoadJoinElement_FastDoubleElements_0,5,6,0
-block_hint,LoadJoinElement_FastDoubleElements_0,7,8,1
-block_hint,JoinStackPush,28,29,1
-block_hint,JoinStackPush,6,7,1
-block_hint,JoinStackPush,10,11,0
-block_hint,JoinStackPop,9,10,1
-block_hint,JoinStackPop,4,5,1
-block_hint,ArrayPrototypeJoin,518,519,1
-block_hint,ArrayPrototypeJoin,456,457,1
-block_hint,ArrayPrototypeJoin,419,420,1
-block_hint,ArrayPrototypeJoin,334,335,1
-block_hint,ArrayPrototypeJoin,336,337,1
-block_hint,ArrayPrototypeJoin,367,368,1
-block_hint,ArrayPrototypeJoin,340,341,0
-block_hint,ArrayPrototypeJoin,179,180,0
-block_hint,ArrayPrototypeJoin,474,475,1
-block_hint,ArrayPrototypeJoin,440,441,1
-block_hint,ArrayPrototypeJoin,328,329,0
-block_hint,ArrayPrototypeJoin,228,229,1
-block_hint,ArrayPrototypeJoin,30,31,1
-block_hint,ArrayPrototypeJoin,181,182,0
-block_hint,ArrayPrototypeJoin,32,33,1
-block_hint,ArrayPrototypeJoin,387,388,1
-block_hint,ArrayPrototypeJoin,325,326,0
-block_hint,ArrayPrototypeJoin,143,144,1
-block_hint,ArrayPrototypeJoin,493,494,1
-block_hint,ArrayPrototypeJoin,458,459,0
-block_hint,ArrayPrototypeJoin,423,424,0
-block_hint,ArrayPrototypeJoin,369,370,1
-block_hint,ArrayPrototypeJoin,183,184,1
-block_hint,ArrayPrototypeJoin,38,39,1
-block_hint,ArrayPrototypeJoin,460,461,1
-block_hint,ArrayPrototypeJoin,425,426,0
-block_hint,ArrayPrototypeJoin,295,296,1
-block_hint,ArrayPrototypeJoin,428,429,0
-block_hint,ArrayPrototypeJoin,345,346,0
-block_hint,ArrayPrototypeJoin,189,190,0
-block_hint,ArrayPrototypeJoin,232,233,1
-block_hint,ArrayPrototypeJoin,145,146,1
-block_hint,ArrayPrototypeJoin,483,484,0
-block_hint,ArrayPrototypeJoin,487,488,1
-block_hint,ArrayPrototypeJoin,525,526,0
-block_hint,ArrayPrototypeJoin,521,522,0
-block_hint,ArrayPrototypeJoin,514,515,1
-block_hint,ArrayPrototypeJoin,489,490,1
-block_hint,ArrayPrototypeJoin,485,486,0
-block_hint,ArrayPrototypeJoin,147,148,0
-block_hint,ArrayPrototypeJoin,149,150,0
-block_hint,ArrayPrototypeJoin,469,470,0
-block_hint,ArrayPrototypeJoin,471,472,0
-block_hint,ArrayPrototypeJoin,454,455,1
-block_hint,ArrayPrototypeJoin,407,408,1
-block_hint,ArrayPrototypeJoin,409,410,1
-block_hint,ArrayPrototypeJoin,411,412,1
-block_hint,ArrayPrototypeJoin,413,414,1
-block_hint,ArrayPrototypeJoin,197,198,1
-block_hint,ArrayPrototypeJoin,254,255,0
-block_hint,ArrayPrototypeJoin,256,257,0
-block_hint,ArrayPrototypeJoin,302,303,0
-block_hint,ArrayPrototypeJoin,262,263,0
-block_hint,ArrayPrototypeJoin,264,265,0
-block_hint,ArrayPrototypeJoin,203,204,1
-block_hint,ArrayPrototypeJoin,72,73,1
-block_hint,ArrayPrototypeJoin,381,382,1
-block_hint,ArrayPrototypeJoin,305,306,0
-block_hint,ArrayPrototypeJoin,207,208,1
-block_hint,ArrayPrototypeJoin,270,271,0
-block_hint,ArrayPrototypeJoin,272,273,0
-block_hint,ArrayPrototypeJoin,209,210,1
-block_hint,ArrayPrototypeJoin,86,87,1
-block_hint,ArrayPrototypeJoin,307,308,0
-block_hint,ArrayPrototypeJoin,219,220,1
-block_hint,ArrayPrototypeJoin,102,103,0
-block_hint,ArrayPrototypeJoin,104,105,0
-block_hint,ArrayPrototypeJoin,435,436,1
-block_hint,ArrayPrototypeJoin,403,404,1
-block_hint,ArrayPrototypeJoin,217,218,1
-block_hint,ArrayPrototypeJoin,100,101,0
-block_hint,ArrayPrototypeJoin,433,434,1
-block_hint,ArrayPrototypeJoin,401,402,1
-block_hint,ArrayPrototypeJoin,96,97,1
-block_hint,ArrayPrototypeJoin,352,353,1
-block_hint,ArrayPrototypeJoin,311,312,0
-block_hint,ArrayPrototypeJoin,215,216,0
-block_hint,ArrayPrototypeJoin,106,107,1
-block_hint,ArrayPrototypeJoin,108,109,0
-block_hint,ArrayPrototypeJoin,110,111,1
-block_hint,ArrayPrototypeJoin,284,285,1
-block_hint,ArrayPrototypeJoin,139,140,1
-block_hint,ArrayPrototypeToString,14,15,1
-block_hint,ArrayPrototypeToString,11,12,1
-block_hint,ArrayPrototypeToString,8,9,1
-block_hint,ArrayPrototypeToString,5,6,1
-block_hint,ArrayPrototypeToString,3,4,1
-block_hint,ArrayPrototypeLastIndexOf,279,280,1
-block_hint,ArrayPrototypeLastIndexOf,261,262,1
-block_hint,ArrayPrototypeLastIndexOf,245,246,1
-block_hint,ArrayPrototypeLastIndexOf,175,176,1
-block_hint,ArrayPrototypeLastIndexOf,177,178,1
-block_hint,ArrayPrototypeLastIndexOf,91,92,0
-block_hint,ArrayPrototypeLastIndexOf,41,42,1
-block_hint,ArrayPrototypeLastIndexOf,375,376,0
-block_hint,ArrayPrototypeLastIndexOf,361,362,0
-block_hint,ArrayPrototypeLastIndexOf,367,368,0
-block_hint,ArrayPrototypeLastIndexOf,358,359,0
-block_hint,ArrayPrototypeLastIndexOf,335,336,0
-block_hint,ArrayPrototypeLastIndexOf,324,325,1
-block_hint,ArrayPrototypeLastIndexOf,338,339,0
-block_hint,ArrayPrototypeLastIndexOf,328,329,0
-block_hint,ArrayPrototypeLastIndexOf,315,316,0
-block_hint,ArrayPrototypeLastIndexOf,300,301,0
-block_hint,ArrayPrototypeLastIndexOf,313,314,0
-block_hint,ArrayPrototypeLastIndexOf,298,299,0
-block_hint,ArrayPrototypeLastIndexOf,281,282,1
-block_hint,ArrayPrototypeLastIndexOf,252,253,0
-block_hint,ArrayPrototypeLastIndexOf,194,195,1
-block_hint,ArrayPrototypeLastIndexOf,83,84,1
-block_hint,ArrayPrototypeLastIndexOf,73,74,1
-block_hint,ArrayPrototypeLastIndexOf,21,22,1
-block_hint,ArrayPrototypeLastIndexOf,85,86,1
-block_hint,ArrayPrototypeLastIndexOf,77,78,0
-block_hint,ArrayPrototypeLastIndexOf,29,30,1
-block_hint,ArrayPrototypeLastIndexOf,60,61,0
-block_hint,ArrayPrototypeLastIndexOf,98,99,1
-block_hint,ArrayPrototypeLastIndexOf,56,57,0
-block_hint,ArrayPrototypeLastIndexOf,23,24,1
-block_hint,ArrayPrototypeLastIndexOf,58,59,0
-block_hint,ArrayPrototypeLastIndexOf,214,215,0
-block_hint,ArrayPrototypeLastIndexOf,220,221,1
-block_hint,ArrayPrototypeLastIndexOf,239,240,0
-block_hint,ArrayPrototypeLastIndexOf,212,213,0
-block_hint,ArrayPrototypeLastIndexOf,145,146,0
-block_hint,ArrayPrototypeLastIndexOf,129,130,1
-block_hint,ArrayPrototypeLastIndexOf,31,32,0
-block_hint,ArrayMap,165,166,1
-block_hint,ArrayMap,72,73,0
-block_hint,ArrayMap,255,256,1
-block_hint,ArrayMap,236,237,1
-block_hint,ArrayMap,188,189,1
-block_hint,ArrayMap,190,191,1
-block_hint,ArrayMap,209,210,0
-block_hint,ArrayMap,221,222,1
-block_hint,ArrayMap,223,224,1
-block_hint,ArrayMap,245,246,1
-block_hint,ArrayMap,206,207,0
-block_hint,ArrayMap,218,219,1
-block_hint,ArrayMap,225,226,1
-block_hint,ArrayMap,167,168,1
-block_hint,ArrayMap,114,115,1
-block_hint,ArrayMap,23,24,1
-block_hint,ArrayMap,233,234,1
-block_hint,ArrayMap,211,212,0
-block_hint,ArrayMap,159,160,0
-block_hint,ArrayMap,74,75,0
-block_hint,ArrayMap,182,183,1
-block_hint,ArrayMap,157,158,1
-block_hint,ArrayMap,55,56,0
-block_hint,ArrayMap,268,269,1
-block_hint,ArrayMap,265,266,0
-block_hint,ArrayMap,248,249,0
-block_hint,ArrayMap,227,228,0
-block_hint,ArrayMap,195,196,0
-block_hint,ArrayMap,116,117,0
-block_hint,ArrayMap,29,30,0
-block_hint,ArrayMap,31,32,1
-block_hint,ArrayMap,132,133,0
-block_hint,ArrayMap,33,34,1
-block_hint,ArrayMap,120,121,0
-block_hint,ArrayMap,37,38,1
-block_hint,ArrayMap,35,36,1
-block_hint,ArrayMap,253,254,0
-block_hint,ArrayMap,203,204,0
-block_hint,ArrayMap,149,150,0
-block_hint,ArrayMap,45,46,1
-block_hint,ArrayMap,151,152,0
-block_hint,ArrayMap,89,90,1
-block_hint,ArrayMap,83,84,0
-block_hint,ArrayMap,85,86,1
-block_hint,ArrayMap,184,185,1
-block_hint,ArrayMap,161,162,0
-block_hint,ArrayMap,62,63,0
-block_hint,ArrayMap,64,65,1
-block_hint,ArrayMap,96,97,1
-block_hint,ArrayMap,47,48,1
-block_hint,ArrayMap,153,154,1
-block_hint,ArrayMap,98,99,1
-block_hint,ArrayMap,49,50,1
-block_hint,ArrayMap,135,136,1
-block_hint,ArrayReduce,81,82,1
-block_hint,ArrayReduce,30,31,0
-block_hint,ArrayReduce,124,125,1
-block_hint,ArrayReduce,120,121,1
-block_hint,ArrayReduce,89,90,1
-block_hint,ArrayReduce,91,92,1
-block_hint,ArrayReduce,101,102,0
-block_hint,ArrayReduce,111,112,1
-block_hint,ArrayReduce,113,114,1
-block_hint,ArrayReduce,95,96,1
-block_hint,ArrayReduce,104,105,0
-block_hint,ArrayReduce,49,50,0
-block_hint,ArrayReduce,106,107,1
-block_hint,ArrayReduce,65,66,1
-block_hint,ArrayReduce,45,46,1
-block_hint,ArrayReduce,12,13,1
-block_hint,ArrayReduce,53,54,1
-block_hint,ArrayReduce,26,27,0
-block_hint,ArrayReduce,99,100,0
-block_hint,ArrayReduce,67,68,1
-block_hint,ArrayReduce,69,70,0
-block_hint,ArrayReduce,117,118,0
-block_hint,ArrayReduce,97,98,0
-block_hint,ArrayReduce,71,72,0
-block_hint,ArrayReduce,47,48,0
-block_hint,ArrayReduce,18,19,0
-block_hint,ArrayReduce,20,21,1
-block_hint,ArrayReduce,57,58,0
-block_hint,ArrayReduce,59,60,0
-block_hint,ArrayReduce,23,24,0
-block_hint,ArrayPrototypeReverse,236,237,1
-block_hint,ArrayPrototypeReverse,210,211,1
-block_hint,ArrayPrototypeReverse,190,191,1
-block_hint,ArrayPrototypeReverse,152,153,1
-block_hint,ArrayPrototypeReverse,103,104,1
-block_hint,ArrayPrototypeReverse,18,19,1
-block_hint,ArrayPrototypeReverse,192,193,1
-block_hint,ArrayPrototypeReverse,169,170,0
-block_hint,ArrayPrototypeReverse,140,141,1
-block_hint,ArrayPrototypeReverse,118,119,1
-block_hint,ArrayPrototypeReverse,89,90,0
-block_hint,ArrayPrototypeShift,237,238,1
-block_hint,ArrayPrototypeShift,205,206,1
-block_hint,ArrayPrototypeShift,185,186,1
-block_hint,ArrayPrototypeShift,132,133,1
-block_hint,ArrayPrototypeShift,81,82,1
-block_hint,ArrayPrototypeShift,11,12,1
-block_hint,ArrayPrototypeShift,196,197,1
-block_hint,ArrayPrototypeShift,168,169,0
-block_hint,ArrayPrototypeShift,134,135,1
-block_hint,ArrayPrototypeShift,83,84,0
-block_hint,ArrayPrototypeShift,13,14,1
-block_hint,ArrayPrototypeShift,136,137,0
-block_hint,ArrayPrototypeShift,85,86,0
-block_hint,ArrayPrototypeShift,68,69,0
-block_hint,ArrayPrototypeShift,87,88,0
-block_hint,ArrayPrototypeShift,27,28,0
-block_hint,ArrayPrototypeShift,29,30,1
-block_hint,ArrayPrototypeShift,170,171,0
-block_hint,ArrayPrototypeShift,89,90,0
-block_hint,ArrayPrototypeShift,33,34,0
-block_hint,ArrayPrototypeShift,148,149,0
-block_hint,ArrayPrototypeShift,111,112,1
-block_hint,ArrayPrototypeShift,91,92,0
-block_hint,ArrayPrototypeShift,39,40,0
-block_hint,ArrayPrototypeShift,41,42,1
-block_hint,ArrayPrototypeSlice,288,289,1
-block_hint,ArrayPrototypeSlice,267,268,1
-block_hint,ArrayPrototypeSlice,245,246,1
-block_hint,ArrayPrototypeSlice,182,183,1
-block_hint,ArrayPrototypeSlice,81,82,1
-block_hint,ArrayPrototypeSlice,12,13,1
-block_hint,ArrayPrototypeSlice,83,84,1
-block_hint,ArrayPrototypeSlice,14,15,1
-block_hint,ArrayPrototypeSlice,16,17,1
-block_hint,ArrayPrototypeSlice,87,88,1
-block_hint,ArrayPrototypeSlice,511,512,0
-block_hint,ArrayPrototypeSlice,509,510,0
-block_hint,ArrayPrototypeSlice,485,486,0
-block_hint,ArrayPrototypeSlice,448,449,0
-block_hint,ArrayPrototypeSlice,428,429,0
-block_hint,ArrayPrototypeSlice,405,406,0
-block_hint,ArrayPrototypeSlice,446,447,0
-block_hint,ArrayPrototypeSlice,426,427,0
-block_hint,ArrayPrototypeSlice,401,402,1
-block_hint,ArrayPrototypeSlice,479,480,0
-block_hint,ArrayPrototypeSlice,465,466,0
-block_hint,ArrayPrototypeSlice,454,455,0
-block_hint,ArrayPrototypeSlice,424,425,0
-block_hint,ArrayPrototypeSlice,422,423,0
-block_hint,ArrayPrototypeSlice,393,394,1
-block_hint,ArrayPrototypeSlice,332,333,0
-block_hint,ArrayPrototypeSlice,277,278,1
-block_hint,ArrayPrototypeSlice,257,258,0
-block_hint,ArrayPrototypeSlice,89,90,1
-block_hint,ArrayPrototypeSlice,20,21,1
-block_hint,ArrayPrototypeSlice,128,129,1
-block_hint,ArrayPrototypeSlice,66,67,0
-block_hint,ArrayPrototypeSlice,443,444,0
-block_hint,ArrayPrototypeSlice,386,387,0
-block_hint,ArrayPrototypeSlice,364,365,0
-block_hint,ArrayPrototypeSlice,384,385,0
-block_hint,ArrayPrototypeSlice,362,363,0
-block_hint,ArrayPrototypeSlice,344,345,1
-block_hint,ArrayPrototypeSlice,437,438,0
-block_hint,ArrayPrototypeSlice,413,414,0
-block_hint,ArrayPrototypeSlice,388,389,0
-block_hint,ArrayPrototypeSlice,360,361,0
-block_hint,ArrayPrototypeSlice,340,341,1
-block_hint,ArrayPrototypeSlice,309,310,0
-block_hint,ArrayPrototypeSlice,296,297,0
-block_hint,ArrayPrototypeSlice,284,285,0
-block_hint,ArrayPrototypeSlice,261,262,0
-block_hint,ArrayPrototypeSlice,238,239,1
-block_hint,ArrayPrototypeSlice,141,142,0
-block_hint,ArrayPrototypeSlice,143,144,0
-block_hint,ArrayPrototypeSlice,190,191,1
-block_hint,ArrayPrototypeSlice,211,212,0
-block_hint,ArrayPrototypeSlice,91,92,1
-block_hint,ArrayPrototypeSlice,22,23,1
-block_hint,ArrayPrototypeSlice,197,198,1
-block_hint,ArrayPrototypeSlice,134,135,0
-block_hint,ArrayPrototypeSlice,69,70,0
-block_hint,ArrayPrototypeSlice,93,94,1
-block_hint,ArrayPrototypeSlice,24,25,1
-block_hint,ArrayPrototypeSlice,95,96,1
-block_hint,ArrayPrototypeSlice,26,27,1
-block_hint,ArrayPrototypeSlice,28,29,1
-block_hint,ArrayPrototypeSlice,99,100,1
-block_hint,ArrayPrototypeSlice,42,43,1
-block_hint,ArrayPrototypeSlice,145,146,1
-block_hint,ArrayPrototypeSlice,174,175,0
-block_hint,ArrayPrototypeSlice,176,177,1
-block_hint,ArrayPrototypeSlice,157,158,0
-block_hint,ArrayPrototypeSlice,101,102,0
-block_hint,ArrayPrototypeSlice,32,33,1
-block_hint,ArrayPrototypeSlice,250,251,0
-block_hint,ArrayPrototypeSlice,221,222,1
-block_hint,ArrayPrototypeSlice,118,119,0
-block_hint,ArrayPrototypeSlice,57,58,0
-block_hint,ArrayPrototypeSlice,59,60,1
-block_hint,ArrayPrototypeSlice,75,76,1
-block_hint,ArrayPrototypeSlice,103,104,0
-block_hint,ArrayPrototypeSlice,232,233,0
-block_hint,ArrayPrototypeSlice,164,165,1
-block_hint,ArrayPrototypeSlice,71,72,1
-block_hint,ArrayPrototypeSlice,178,179,0
-block_hint,ArrayPrototypeSlice,160,161,0
-block_hint,ArrayPrototypeSlice,109,110,0
-block_hint,ArrayPrototypeSlice,44,45,1
-block_hint,ArrayPrototypeSlice,248,249,0
-block_hint,ArrayPrototypeSlice,217,218,1
-block_hint,ArrayPrototypeSlice,116,117,0
-block_hint,ArrayPrototypeSlice,49,50,0
-block_hint,ArrayPrototypeSlice,136,137,0
-block_hint,ArrayPrototypeSlice,73,74,1
-block_hint,ArraySome,88,89,1
-block_hint,ArraySome,31,32,0
-block_hint,ArraySome,119,120,1
-block_hint,ArraySome,115,116,1
-block_hint,ArraySome,93,94,1
-block_hint,ArraySome,95,96,1
-block_hint,ArraySome,101,102,0
-block_hint,ArraySome,108,109,1
-block_hint,ArraySome,110,111,1
-block_hint,ArraySome,99,100,1
-block_hint,ArraySome,56,57,0
-block_hint,ArraySome,105,106,1
-block_hint,ArraySome,77,78,1
-block_hint,ArraySome,52,53,1
-block_hint,ArraySome,13,14,1
-block_hint,ArraySome,62,63,1
-block_hint,ArraySome,27,28,0
-block_hint,ArraySome,79,80,1
-block_hint,ArraySome,81,82,0
-block_hint,ArraySome,58,59,0
-block_hint,ArraySome,54,55,0
-block_hint,ArraySome,19,20,0
-block_hint,ArraySome,21,22,1
-block_hint,ArraySome,66,67,0
-block_hint,ArrayPrototypeSplice,599,600,1
-block_hint,ArrayPrototypeSplice,447,448,1
-block_hint,ArrayPrototypeSplice,449,450,1
-block_hint,ArrayPrototypeSplice,1195,1196,0
-block_hint,ArrayPrototypeSplice,1177,1178,0
-block_hint,ArrayPrototypeSplice,1153,1154,0
-block_hint,ArrayPrototypeSplice,1132,1133,0
-block_hint,ArrayPrototypeSplice,1098,1099,0
-block_hint,ArrayPrototypeSplice,1066,1067,0
-block_hint,ArrayPrototypeSplice,1025,1026,0
-block_hint,ArrayPrototypeSplice,1096,1097,0
-block_hint,ArrayPrototypeSplice,1064,1065,0
-block_hint,ArrayPrototypeSplice,1021,1022,1
-block_hint,ArrayPrototypeSplice,934,935,0
-block_hint,ArrayPrototypeSplice,1087,1088,0
-block_hint,ArrayPrototypeSplice,1220,1221,0
-block_hint,ArrayPrototypeSplice,1212,1213,0
-block_hint,ArrayPrototypeSplice,1199,1200,0
-block_hint,ArrayPrototypeSplice,1187,1188,1
-block_hint,ArrayPrototypeSplice,1156,1157,0
-block_hint,ArrayPrototypeSplice,1136,1137,0
-block_hint,ArrayPrototypeSplice,1115,1116,0
-block_hint,ArrayPrototypeSplice,1072,1073,0
-block_hint,ArrayPrototypeSplice,1032,1033,0
-block_hint,ArrayPrototypeSplice,1070,1071,0
-block_hint,ArrayPrototypeSplice,1030,1031,0
-block_hint,ArrayPrototypeSplice,981,982,1
-block_hint,ArrayPrototypeSplice,871,872,0
-block_hint,ArrayPrototypeSplice,836,837,0
-block_hint,ArrayPrototypeSplice,804,805,0
-block_hint,ArrayPrototypeSplice,725,726,0
-block_hint,ArrayPrototypeSplice,671,672,0
-block_hint,ArrayPrototypeSplice,601,602,0
-block_hint,ArrayPrototypeSplice,507,508,1
-block_hint,ArrayPrototypeSplice,453,454,0
-block_hint,ArrayPrototypeSplice,223,224,0
-block_hint,ArrayPrototypeSplice,333,334,0
-block_hint,ArrayPrototypeSplice,335,336,0
-block_hint,ArrayPrototypeSplice,337,338,0
-block_hint,ArrayPrototypeSplice,225,226,1
-block_hint,ArrayPrototypeSplice,51,52,1
-block_hint,ArrayPrototypeSplice,339,340,1
-block_hint,ArrayPrototypeSplice,341,342,0
-block_hint,ArrayPrototypeSplice,343,344,0
-block_hint,ArrayPrototypeSplice,388,389,1
-block_hint,ArrayPrototypeSplice,227,228,0
-block_hint,ArrayPrototypeSplice,53,54,1
-block_hint,ArrayPrototypeSplice,244,245,0
-block_hint,ArrayPrototypeSplice,93,94,1
-block_hint,ArrayPrototypeSplice,552,553,0
-block_hint,ArrayPrototypeSplice,401,402,0
-block_hint,ArrayPrototypeSplice,584,585,0
-block_hint,ArrayPrototypeSplice,524,525,1
-block_hint,ArrayPrototypeSplice,346,347,0
-block_hint,ArrayPrototypeSplice,348,349,1
-block_hint,ArrayPrototypeSplice,235,236,0
-block_hint,ArrayPrototypeSplice,257,258,1
-block_hint,ArrayPrototypeSplice,105,106,0
-block_hint,ArrayPrototypeSplice,229,230,0
-block_hint,ArrayPrototypeSplice,330,331,0
-block_hint,ArrayPrototypeSplice,328,329,0
-block_hint,ArrayPrototypeSplice,392,393,1
-block_hint,ArrayPrototypeSplice,65,66,1
-block_hint,ArrayPrototypeSplice,293,294,0
-block_hint,ArrayPrototypeSplice,143,144,0
-block_hint,ArrayPrototypeSplice,67,68,0
-block_hint,ArrayPrototypeSplice,69,70,0
-block_hint,ArrayPrototypeSplice,262,263,1
-block_hint,ArrayPrototypeSplice,178,179,1
-block_hint,ArrayPrototypeSplice,326,327,0
-block_hint,ArrayPrototypeSplice,422,423,1
-block_hint,ArrayPrototypeSplice,264,265,1
-block_hint,ArrayPrototypeSplice,111,112,0
-block_hint,ArrayPrototypeSplice,424,425,1
-block_hint,ArrayPrototypeSplice,115,116,0
-block_hint,ArrayPrototypeSplice,182,183,1
-block_hint,ArrayPrototypeSplice,63,64,1
-block_hint,ArrayPrototypeSplice,131,132,1
-block_hint,ArrayPrototypeSplice,295,296,0
-block_hint,ArrayPrototypeSplice,71,72,1
-block_hint,ArrayPrototypeUnshift,185,186,1
-block_hint,ArrayPrototypeUnshift,158,159,1
-block_hint,ArrayPrototypeUnshift,140,141,1
-block_hint,ArrayPrototypeUnshift,96,97,1
-block_hint,ArrayPrototypeUnshift,55,56,1
-block_hint,ArrayPrototypeUnshift,10,11,1
-block_hint,ArrayPrototypeUnshift,129,130,1
-block_hint,ArrayPrototypeUnshift,98,99,0
-block_hint,ArrayPrototypeUnshift,57,58,0
-block_hint,ArrayPrototypeUnshift,100,101,1
-block_hint,ArrayPrototypeUnshift,59,60,0
-block_hint,ArrayPrototypeUnshift,20,21,1
-block_hint,ArrayPrototypeUnshift,22,23,0
-block_hint,ArrayBufferPrototypeGetByteLength,15,16,1
-block_hint,ArrayBufferPrototypeGetByteLength,10,11,1
-block_hint,ArrayBufferPrototypeGetByteLength,12,13,1
-block_hint,ArrayBufferPrototypeGetByteLength,6,7,0
-block_hint,ArrayBufferPrototypeGetByteLength,4,5,0
-block_hint,ArrayBufferIsView,8,9,1
-block_hint,ArrayBufferIsView,5,6,1
-block_hint,ArrayBufferIsView,3,4,1
-block_hint,ToInteger,4,5,1
-block_hint,ToInteger,6,7,0
-block_hint,BooleanConstructor,81,82,1
-block_hint,BooleanConstructor,74,75,0
-block_hint,BooleanConstructor,57,58,0
-block_hint,BooleanConstructor,68,69,1
-block_hint,BooleanConstructor,59,60,0
-block_hint,BooleanConstructor,70,71,0
-block_hint,BooleanConstructor,51,52,0
-block_hint,BooleanConstructor,7,8,1
-block_hint,ToString,20,21,0
-block_hint,ToString,34,35,0
-block_hint,ToString,67,68,0
-block_hint,ToString,83,84,0
-block_hint,ToString,25,26,1
-block_hint,ToString,10,11,1
-block_hint,ToString,40,41,1
-block_hint,ToString,50,51,1
-block_hint,ToString,54,55,0
-block_hint,StringPrototypeToString,9,10,1
-block_hint,StringPrototypeToString,11,12,1
-block_hint,StringPrototypeToString,7,8,0
-block_hint,StringPrototypeToString,5,6,1
-block_hint,StringPrototypeValueOf,9,10,1
-block_hint,StringPrototypeValueOf,11,12,1
-block_hint,StringPrototypeValueOf,5,6,1
-block_hint,StringPrototypeCharAt,51,52,1
-block_hint,StringPrototypeCharAt,37,38,1
-block_hint,StringPrototypeCharAt,28,29,1
-block_hint,StringPrototypeCharAt,33,34,0
-block_hint,StringPrototypeCharAt,12,13,0
-block_hint,StringPrototypeCharAt,14,15,0
-block_hint,StringPrototypeCharAt,19,20,1
-block_hint,StringPrototypeCharAt,43,44,0
-block_hint,StringPrototypeCharAt,6,7,1
-block_hint,StringPrototypeCharCodeAt,46,47,1
-block_hint,StringPrototypeCharCodeAt,41,42,1
-block_hint,StringPrototypeCharCodeAt,28,29,1
-block_hint,StringPrototypeCharCodeAt,39,40,0
-block_hint,StringPrototypeCharCodeAt,13,14,0
-block_hint,StringPrototypeCharCodeAt,15,16,0
-block_hint,StringPrototypeCharCodeAt,17,18,1
-block_hint,StringPrototypeCharCodeAt,32,33,0
-block_hint,StringPrototypeCodePointAt,79,80,1
-block_hint,StringPrototypeCodePointAt,53,54,1
-block_hint,StringPrototypeCodePointAt,43,44,1
-block_hint,StringPrototypeCodePointAt,51,52,0
-block_hint,StringPrototypeCodePointAt,20,21,0
-block_hint,StringPrototypeCodePointAt,22,23,0
-block_hint,StringPrototypeCodePointAt,8,9,0
-block_hint,StringPrototypeCodePointAt,65,66,0
-block_hint,StringPrototypeCodePointAt,45,46,0
-block_hint,StringPrototypeCodePointAt,14,15,1
-block_hint,StringPrototypeCodePointAt,16,17,1
-block_hint,StringPrototypeCodePointAt,10,11,0
-block_hint,StringPrototypeCodePointAt,72,73,0
-block_hint,StringPrototypeCodePointAt,48,49,0
-block_hint,StringPrototypeCodePointAt,18,19,1
-block_hint,StringConstructor,64,65,1
-block_hint,StringConstructor,49,50,0
-block_hint,StringConstructor,36,37,0
-block_hint,StringConstructor,78,79,1
-block_hint,StringConstructor,76,77,1
-block_hint,StringConstructor,73,74,1
-block_hint,StringConstructor,60,61,0
-block_hint,StringConstructor,62,63,1
-block_hint,StringConstructor,45,46,0
-block_hint,StringConstructor,24,25,0
-block_hint,StringConstructor,26,27,1
-block_hint,StringAddConvertLeft,47,48,1
-block_hint,StringAddConvertLeft,49,50,0
-block_hint,StringAddConvertLeft,82,83,1
-block_hint,StringAddConvertLeft,64,65,0
-block_hint,StringAddConvertLeft,43,44,0
-block_hint,StringAddConvertLeft,62,63,1
-block_hint,StringAddConvertRight,47,48,1
-block_hint,StringAddConvertRight,82,83,1
-block_hint,StringAddConvertRight,64,65,0
-block_hint,StringAddConvertRight,43,44,0
-block_hint,StringAddConvertRight,86,87,0
-block_hint,StringAddConvertRight,62,63,1
-block_hint,StringAddConvertRight,79,80,0
-block_hint,StringAddConvertRight,84,85,0
-block_hint,StringAddConvertRight,30,31,0
-block_hint,StringAddConvertRight,9,10,0
-block_hint,StringAddConvertRight,52,53,1
-block_hint,StringAddConvertRight,32,33,1
-block_hint,StringAddConvertRight,11,12,1
-block_hint,StringAddConvertRight,34,35,1
-block_hint,StringAddConvertRight,38,39,0
-block_hint,StringAddConvertRight,15,16,1
-block_hint,StringAddConvertRight,17,18,1
-block_hint,StringCharAt,27,28,0
-block_hint,StringCharAt,20,21,1
-block_hint,StringCharAt,5,6,1
-block_hint,FastNewFunctionContextFunction,11,12,1
-block_hint,FastNewFunctionContextFunction,4,5,1
-block_hint,FastNewFunctionContextFunction,6,7,0
-block_hint,CreateRegExpLiteral,6,7,0
-block_hint,CreateRegExpLiteral,8,9,1
-block_hint,CreateRegExpLiteral,10,11,1
-block_hint,CreateRegExpLiteral,2,3,1
-block_hint,CreateShallowArrayLiteral,20,21,1
-block_hint,CreateShallowArrayLiteral,22,23,1
-block_hint,CreateShallowArrayLiteral,35,36,0
-block_hint,CreateShallowArrayLiteral,11,12,0
-block_hint,CreateShallowArrayLiteral,42,43,1
-block_hint,CreateShallowArrayLiteral,39,40,1
-block_hint,CreateShallowArrayLiteral,24,25,1
-block_hint,CreateShallowArrayLiteral,13,14,0
-block_hint,CreateShallowArrayLiteral,15,16,1
-block_hint,CreateShallowArrayLiteral,47,48,1
-block_hint,CreateShallowArrayLiteral,45,46,0
-block_hint,CreateShallowArrayLiteral,30,31,1
-block_hint,CreateShallowArrayLiteral,5,6,1
-block_hint,CreateShallowArrayLiteral,18,19,1
-block_hint,CreateEmptyArrayLiteral,9,10,1
-block_hint,CreateEmptyArrayLiteral,3,4,1
-block_hint,CreateEmptyArrayLiteral,6,7,1
-block_hint,CreateShallowObjectLiteral,53,54,1
-block_hint,CreateShallowObjectLiteral,61,62,1
-block_hint,CreateShallowObjectLiteral,63,64,0
-block_hint,CreateShallowObjectLiteral,110,111,0
-block_hint,CreateShallowObjectLiteral,99,100,1
-block_hint,CreateShallowObjectLiteral,67,68,1
-block_hint,CreateShallowObjectLiteral,106,107,1
-block_hint,CreateShallowObjectLiteral,81,82,1
-block_hint,CreateShallowObjectLiteral,34,35,0
-block_hint,CreateShallowObjectLiteral,71,72,0
-block_hint,CreateShallowObjectLiteral,38,39,0
-block_hint,CreateShallowObjectLiteral,42,43,0
-block_hint,CreateShallowObjectLiteral,85,86,1
-block_hint,CreateShallowObjectLiteral,93,94,1
-block_hint,ObjectConstructor,27,28,1
-block_hint,ObjectConstructor,19,20,1
-block_hint,ObjectConstructor,29,30,0
-block_hint,ObjectConstructor,23,24,0
-block_hint,ObjectConstructor,17,18,0
-block_hint,ObjectConstructor,11,12,0
-block_hint,ObjectConstructor,4,5,1
-block_hint,ObjectConstructor,21,22,1
-block_hint,ObjectConstructor,6,7,0
-block_hint,CreateEmptyLiteralObject,4,5,1
-block_hint,CreateEmptyLiteralObject,11,12,1
-block_hint,CreateEmptyLiteralObject,6,7,0
-block_hint,NumberConstructor,18,19,1
-block_hint,NumberConstructor,6,7,1
-block_hint,NumberConstructor,28,29,0
-block_hint,NumberConstructor,12,13,0
-block_hint,NumberConstructor,34,35,0
-block_hint,NumberConstructor,32,33,1
-block_hint,NumberConstructor,30,31,1
-block_hint,NumberConstructor,2,3,1
-block_hint,NonNumberToNumber,14,15,0
-block_hint,NonNumberToNumber,3,4,1
-block_hint,NonNumberToNumeric,17,18,0
-block_hint,NonNumberToNumeric,14,15,0
-block_hint,NonNumberToNumeric,5,6,1
-block_hint,ToNumeric,5,6,1
-block_hint,ToNumeric,3,4,1
-block_hint,NumberToString,69,70,0
-block_hint,NumberToString,20,21,1
-block_hint,NumberToString,45,46,0
-block_hint,NumberToString,62,63,0
-block_hint,NumberToString,27,28,0
-block_hint,NumberToString,7,8,0
-block_hint,NumberToString,50,51,1
-block_hint,NumberToString,29,30,1
-block_hint,NumberToString,9,10,1
-block_hint,NumberToString,31,32,1
-block_hint,NumberToString,35,36,0
-block_hint,NumberToString,13,14,1
-block_hint,NumberToString,15,16,1
-block_hint,NumberToString,41,42,1
-block_hint,ToBoolean,18,19,1
-block_hint,ToBoolean,14,15,0
-block_hint,ToBoolean,20,21,0
-block_hint,ToBoolean,6,7,0
-block_hint,ToBooleanForBaselineJump,14,15,0
-block_hint,ToBooleanForBaselineJump,20,21,0
-block_hint,ToBooleanForBaselineJump,6,7,0
-block_hint,ToLength,19,20,0
-block_hint,ToLength,5,6,0
-block_hint,ToName,40,41,1
-block_hint,ToName,48,49,0
-block_hint,ToName,20,21,0
-block_hint,ToName,22,23,0
-block_hint,ToName,67,68,0
-block_hint,ToName,27,28,1
-block_hint,ToObject,45,46,1
-block_hint,ToObject,7,8,0
-block_hint,ToObject,38,39,0
-block_hint,ToObject,9,10,1
-block_hint,ToObject,53,54,0
-block_hint,ToObject,55,56,1
-block_hint,ToObject,48,49,0
-block_hint,ToObject,26,27,0
-block_hint,ToObject,28,29,1
-block_hint,NonPrimitiveToPrimitive_Default,5,6,1
-block_hint,NonPrimitiveToPrimitive_Number,5,6,1
-block_hint,NonPrimitiveToPrimitive_String,5,6,1
-block_hint,OrdinaryToPrimitive_Number,56,57,1
-block_hint,OrdinaryToPrimitive_Number,53,54,1
-block_hint,OrdinaryToPrimitive_Number,40,41,1
-block_hint,OrdinaryToPrimitive_Number,42,43,0
-block_hint,OrdinaryToPrimitive_Number,28,29,0
-block_hint,OrdinaryToPrimitive_Number,12,13,0
-block_hint,OrdinaryToPrimitive_Number,30,31,0
-block_hint,OrdinaryToPrimitive_Number,32,33,0
-block_hint,OrdinaryToPrimitive_Number,14,15,0
-block_hint,OrdinaryToPrimitive_Number,16,17,0
-block_hint,OrdinaryToPrimitive_Number,44,45,1
-block_hint,OrdinaryToPrimitive_Number,46,47,1
-block_hint,OrdinaryToPrimitive_Number,48,49,1
-block_hint,OrdinaryToPrimitive_Number,50,51,0
-block_hint,OrdinaryToPrimitive_Number,34,35,0
-block_hint,OrdinaryToPrimitive_Number,20,21,0
-block_hint,OrdinaryToPrimitive_String,56,57,1
-block_hint,OrdinaryToPrimitive_String,53,54,1
-block_hint,OrdinaryToPrimitive_String,40,41,1
-block_hint,OrdinaryToPrimitive_String,42,43,0
-block_hint,OrdinaryToPrimitive_String,28,29,0
-block_hint,OrdinaryToPrimitive_String,10,11,0
-block_hint,DataViewPrototypeGetByteLength,37,38,1
-block_hint,DataViewPrototypeGetByteLength,19,20,1
-block_hint,DataViewPrototypeGetByteLength,21,22,1
-block_hint,DataViewPrototypeGetByteLength,39,40,0
-block_hint,DataViewPrototypeGetByteLength,33,34,0
-block_hint,DataViewPrototypeGetByteLength,12,13,0
-block_hint,DataViewPrototypeGetByteLength,10,11,0
-block_hint,DataViewPrototypeGetFloat64,98,99,1
-block_hint,DataViewPrototypeGetFloat64,84,85,0
-block_hint,DataViewPrototypeGetFloat64,54,55,0
-block_hint,DataViewPrototypeGetFloat64,16,17,1
-block_hint,DataViewPrototypeGetFloat64,18,19,1
-block_hint,DataViewPrototypeGetFloat64,92,93,0
-block_hint,DataViewPrototypeGetFloat64,96,97,0
-block_hint,DataViewPrototypeGetFloat64,75,76,0
-block_hint,DataViewPrototypeGetFloat64,47,48,0
-block_hint,DataViewPrototypeGetFloat64,67,68,0
-block_hint,DataViewPrototypeGetFloat64,86,87,1
-block_hint,DataViewPrototypeGetFloat64,69,70,0
-block_hint,DataViewPrototypeGetFloat64,71,72,0
-block_hint,DataViewPrototypeGetFloat64,88,89,0
-block_hint,DataViewPrototypeGetFloat64,61,62,0
-block_hint,DataViewPrototypeGetFloat64,20,21,0
-block_hint,DataViewPrototypeGetFloat64,94,95,0
-block_hint,DataViewPrototypeGetFloat64,79,80,0
-block_hint,DataViewPrototypeGetFloat64,45,46,0
-block_hint,DataViewPrototypeGetFloat64,29,30,0
-block_hint,DataViewPrototypeGetFloat64,82,83,1
-block_hint,DataViewPrototypeGetFloat64,52,53,0
-block_hint,DataViewPrototypeGetFloat64,13,14,1
-block_hint,DataViewPrototypeSetFloat64,113,114,1
-block_hint,DataViewPrototypeSetFloat64,101,102,0
-block_hint,DataViewPrototypeSetFloat64,79,80,0
-block_hint,DataViewPrototypeSetFloat64,47,48,0
-block_hint,DataViewPrototypeSetFloat64,15,16,1
-block_hint,DataViewPrototypeSetFloat64,17,18,1
-block_hint,DataViewPrototypeSetFloat64,103,104,0
-block_hint,DataViewPrototypeSetFloat64,92,93,0
-block_hint,DataViewPrototypeSetFloat64,68,69,0
-block_hint,DataViewPrototypeSetFloat64,40,41,0
-block_hint,DataViewPrototypeSetFloat64,81,82,1
-block_hint,DataViewPrototypeSetFloat64,83,84,1
-block_hint,DataViewPrototypeSetFloat64,57,58,1
-block_hint,DataViewPrototypeSetFloat64,10,11,0
-block_hint,DataViewPrototypeSetFloat64,90,91,0
-block_hint,DataViewPrototypeSetFloat64,76,77,0
-block_hint,DataViewPrototypeSetFloat64,38,39,0
-block_hint,DataViewPrototypeSetFloat64,27,28,0
-block_hint,DataViewPrototypeSetFloat64,45,46,1
-block_hint,DataViewPrototypeSetFloat64,13,14,0
-block_hint,FunctionPrototypeHasInstance,35,36,1
-block_hint,FunctionPrototypeHasInstance,15,16,1
-block_hint,FunctionPrototypeHasInstance,17,18,1
-block_hint,FunctionPrototypeHasInstance,19,20,1
-block_hint,FunctionPrototypeHasInstance,33,34,1
-block_hint,FunctionPrototypeHasInstance,23,24,0
-block_hint,FunctionPrototypeHasInstance,13,14,0
-block_hint,FunctionPrototypeHasInstance,31,32,0
-block_hint,FunctionPrototypeHasInstance,25,26,0
-block_hint,FunctionPrototypeHasInstance,27,28,0
-block_hint,FastFunctionPrototypeBind,91,92,1
-block_hint,FastFunctionPrototypeBind,88,89,1
-block_hint,FastFunctionPrototypeBind,75,76,0
-block_hint,FastFunctionPrototypeBind,29,30,0
-block_hint,FastFunctionPrototypeBind,31,32,0
-block_hint,FastFunctionPrototypeBind,7,8,1
-block_hint,FastFunctionPrototypeBind,53,54,1
-block_hint,FastFunctionPrototypeBind,65,66,0
-block_hint,FastFunctionPrototypeBind,69,70,1
-block_hint,FastFunctionPrototypeBind,41,42,1
-block_hint,FastFunctionPrototypeBind,9,10,1
-block_hint,FastFunctionPrototypeBind,56,57,1
-block_hint,FastFunctionPrototypeBind,67,68,0
-block_hint,FastFunctionPrototypeBind,79,80,1
-block_hint,FastFunctionPrototypeBind,71,72,1
-block_hint,FastFunctionPrototypeBind,43,44,1
-block_hint,FastFunctionPrototypeBind,11,12,1
-block_hint,FastFunctionPrototypeBind,35,36,1
-block_hint,FastFunctionPrototypeBind,81,82,1
-block_hint,FastFunctionPrototypeBind,73,74,0
-block_hint,FastFunctionPrototypeBind,27,28,1
-block_hint,ForInNext,2,3,1
-block_hint,ForInNext,7,8,1
-block_hint,CallIteratorWithFeedback,56,57,1
-block_hint,CallIteratorWithFeedback,58,59,1
-block_hint,CallIteratorWithFeedback,26,27,1
-block_hint,CallIteratorWithFeedback,28,29,1
-block_hint,CallIteratorWithFeedback,30,31,1
-block_hint,CallIteratorWithFeedback,10,11,1
-block_hint,MathAbs,14,15,1
-block_hint,MathAbs,16,17,1
-block_hint,MathAbs,7,8,0
-block_hint,MathAbs,23,24,0
-block_hint,MathAbs,9,10,0
-block_hint,MathAbs,11,12,1
-block_hint,MathCeil,12,13,1
-block_hint,MathFloor,12,13,1
-block_hint,MathFloor,14,15,1
-block_hint,MathFloor,21,22,1
-block_hint,MathFloor,19,20,0
-block_hint,MathFloor,7,8,0
-block_hint,MathRound,12,13,1
-block_hint,MathRound,14,15,1
-block_hint,MathRound,21,22,1
-block_hint,MathRound,7,8,0
-block_hint,MathRound,9,10,1
-block_hint,MathPow,12,13,1
-block_hint,MathPow,14,15,1
-block_hint,MathPow,18,19,1
-block_hint,MathPow,23,24,0
-block_hint,MathPow,7,8,0
-block_hint,MathPow,9,10,1
-block_hint,MathMax,13,14,1
-block_hint,MathMax,19,20,0
-block_hint,MathMax,17,18,1
-block_hint,MathMax,24,25,0
-block_hint,MathMax,8,9,0
-block_hint,MathMax,10,11,1
-block_hint,MathMin,13,14,1
-block_hint,MathMin,19,20,0
-block_hint,MathMin,17,18,1
-block_hint,MathMin,24,25,0
-block_hint,MathMin,8,9,0
-block_hint,MathMin,10,11,1
-block_hint,MathAtan2,34,35,1
-block_hint,MathAtan2,32,33,1
-block_hint,MathAtan2,23,24,1
-block_hint,MathAtan2,5,6,1
-block_hint,MathCos,25,26,1
-block_hint,MathCos,23,24,1
-block_hint,MathCos,9,10,1
-block_hint,MathCos,3,4,0
-block_hint,MathCos,5,6,1
-block_hint,MathExp,25,26,1
-block_hint,MathExp,20,21,1
-block_hint,MathExp,23,24,1
-block_hint,MathExp,16,17,1
-block_hint,MathExp,13,14,0
-block_hint,MathExp,5,6,1
-block_hint,MathFround,25,26,1
-block_hint,MathFround,23,24,1
-block_hint,MathFround,5,6,1
-block_hint,MathLog,25,26,1
-block_hint,MathLog,23,24,1
-block_hint,MathLog,13,14,0
-block_hint,MathLog,5,6,1
-block_hint,MathSin,25,26,1
-block_hint,MathSin,23,24,1
-block_hint,MathSin,9,10,0
-block_hint,MathSin,11,12,0
-block_hint,MathSin,3,4,0
-block_hint,MathSin,5,6,1
-block_hint,MathSign,16,17,1
-block_hint,MathSign,11,12,0
-block_hint,MathSign,7,8,0
-block_hint,MathSign,2,3,0
-block_hint,MathSign,4,5,1
-block_hint,MathSqrt,25,26,1
-block_hint,MathSqrt,23,24,1
-block_hint,MathSqrt,11,12,0
-block_hint,MathSqrt,3,4,0
-block_hint,MathSqrt,5,6,1
-block_hint,MathTan,25,26,1
-block_hint,MathTan,20,21,0
-block_hint,MathTan,16,17,0
-block_hint,MathTanh,25,26,1
-block_hint,MathTanh,20,21,1
-block_hint,MathTanh,23,24,1
-block_hint,MathTanh,16,17,1
-block_hint,MathTanh,13,14,0
-block_hint,MathTanh,5,6,1
-block_hint,MathRandom,15,16,1
-block_hint,MathRandom,3,4,1
-block_hint,MathRandom,17,18,1
-block_hint,MathRandom,5,6,1
-block_hint,MathRandom,7,8,1
-block_hint,MathRandom,9,10,1
-block_hint,MathRandom,13,14,1
-block_hint,NumberPrototypeToString,71,72,1
-block_hint,NumberPrototypeToString,113,114,0
-block_hint,NumberPrototypeToString,51,52,0
-block_hint,NumberPrototypeToString,59,60,1
-block_hint,NumberPrototypeToString,183,184,0
-block_hint,NumberPrototypeToString,154,155,0
-block_hint,NumberPrototypeToString,121,122,0
-block_hint,NumberPrototypeToString,180,181,0
-block_hint,NumberPrototypeToString,167,168,0
-block_hint,NumberPrototypeToString,85,86,0
-block_hint,NumberPrototypeToString,176,177,0
-block_hint,NumberPrototypeToString,97,98,0
-block_hint,NumberPrototypeToString,171,172,0
-block_hint,NumberPrototypeToString,129,130,0
-block_hint,NumberPrototypeToString,109,110,1
-block_hint,NumberPrototypeToString,42,43,1
-block_hint,NumberPrototypeToString,49,50,1
-block_hint,NumberPrototypeToString,73,74,0
-block_hint,NumberPrototypeToString,27,28,0
-block_hint,NumberPrototypeToString,116,117,1
-block_hint,NumberPrototypeToString,75,76,1
-block_hint,NumberPrototypeToString,29,30,1
-block_hint,NumberPrototypeToString,95,96,0
-block_hint,NumberPrototypeToString,111,112,0
-block_hint,NumberPrototypeToString,35,36,1
-block_hint,NumberPrototypeToString,132,133,1
-block_hint,NumberPrototypeToString,37,38,0
-block_hint,NumberPrototypeToString,134,135,1
-block_hint,NumberPrototypeToString,39,40,0
-block_hint,NumberPrototypeToString,162,163,1
-block_hint,NumberPrototypeToString,164,165,0
-block_hint,NumberPrototypeToString,139,140,0
-block_hint,NumberPrototypeToString,105,106,0
-block_hint,NumberPrototypeToString,148,149,0
-block_hint,NumberPrototypeToString,152,153,0
-block_hint,NumberPrototypeToString,79,80,0
-block_hint,NumberPrototypeToString,17,18,0
-block_hint,NumberPrototypeToString,119,120,1
-block_hint,NumberPrototypeToString,81,82,1
-block_hint,NumberPrototypeToString,19,20,1
-block_hint,NumberPrototypeToString,83,84,1
-block_hint,NumberPrototypeToString,89,90,0
-block_hint,NumberPrototypeToString,23,24,1
-block_hint,NumberPrototypeToString,25,26,1
-block_hint,NumberIsInteger,11,12,1
-block_hint,NumberParseFloat,14,15,1
-block_hint,NumberParseFloat,2,3,1
-block_hint,NumberParseFloat,12,13,0
-block_hint,NumberParseFloat,17,18,0
-block_hint,NumberParseFloat,4,5,1
-block_hint,ParseInt,27,28,1
-block_hint,ParseInt,13,14,0
-block_hint,ParseInt,6,7,1
-block_hint,ParseInt,31,32,0
-block_hint,ParseInt,25,26,1
-block_hint,ParseInt,23,24,1
-block_hint,ParseInt,10,11,0
-block_hint,NumberParseInt,3,4,1
-block_hint,Add,66,67,1
-block_hint,Add,24,25,0
-block_hint,Add,68,69,0
-block_hint,Add,35,36,0
-block_hint,Add,40,41,0
-block_hint,Subtract,24,25,0
-block_hint,Subtract,9,10,0
-block_hint,Subtract,22,23,0
-block_hint,Subtract,7,8,0
-block_hint,Divide,50,51,0
-block_hint,Divide,23,24,0
-block_hint,Divide,9,10,0
-block_hint,Divide,44,45,1
-block_hint,Divide,48,49,1
-block_hint,Divide,33,34,0
-block_hint,Divide,7,8,1
-block_hint,CreateObjectWithoutProperties,52,53,1
-block_hint,CreateObjectWithoutProperties,42,43,1
-block_hint,CreateObjectWithoutProperties,34,35,0
-block_hint,CreateObjectWithoutProperties,17,18,1
-block_hint,CreateObjectWithoutProperties,56,57,0
-block_hint,CreateObjectWithoutProperties,44,45,0
-block_hint,CreateObjectWithoutProperties,48,49,1
-block_hint,CreateObjectWithoutProperties,36,37,0
-block_hint,CreateObjectWithoutProperties,38,39,0
-block_hint,CreateObjectWithoutProperties,5,6,1
-block_hint,CreateObjectWithoutProperties,40,41,1
-block_hint,CreateObjectWithoutProperties,7,8,1
-block_hint,CreateObjectWithoutProperties,9,10,1
-block_hint,CreateObjectWithoutProperties,11,12,1
-block_hint,CreateObjectWithoutProperties,13,14,1
-block_hint,CreateObjectWithoutProperties,15,16,1
-block_hint,CreateObjectWithoutProperties,20,21,0
-block_hint,CreateObjectWithoutProperties,50,51,1
-block_hint,ObjectGetPrototypeOf,11,12,1
-block_hint,ObjectGetPrototypeOf,8,9,1
-block_hint,ObjectGetPrototypeOf,5,6,1
-block_hint,ObjectGetPrototypeOf,2,3,0
-block_hint,ObjectSetPrototypeOf,18,19,1
-block_hint,ObjectSetPrototypeOf,4,5,0
-block_hint,ObjectSetPrototypeOf,13,14,1
-block_hint,ObjectSetPrototypeOf,20,21,0
-block_hint,ObjectSetPrototypeOf,15,16,0
-block_hint,ObjectSetPrototypeOf,6,7,1
-block_hint,ObjectSetPrototypeOf,8,9,0
-block_hint,ObjectSetPrototypeOf,10,11,0
-block_hint,ObjectPrototypeToString,3,4,1
-block_hint,ObjectPrototypeValueOf,8,9,1
-block_hint,ObjectPrototypeValueOf,5,6,1
-block_hint,ObjectPrototypeValueOf,2,3,1
-block_hint,FulfillPromise,32,33,1
-block_hint,FulfillPromise,15,16,0
-block_hint,FulfillPromise,34,35,1
-block_hint,FulfillPromise,17,18,0
-block_hint,FulfillPromise,19,20,1
-block_hint,FulfillPromise,21,22,0
-block_hint,PerformPromiseThen,101,102,1
-block_hint,PerformPromiseThen,57,58,0
-block_hint,PerformPromiseThen,103,104,1
-block_hint,PerformPromiseThen,59,60,0
-block_hint,PerformPromiseThen,61,62,1
-block_hint,PerformPromiseThen,63,64,0
-block_hint,PerformPromiseThen,18,19,1
-block_hint,PerformPromiseThen,72,73,1
-block_hint,PerformPromiseThen,25,26,1
-block_hint,PerformPromiseThen,93,94,1
-block_hint,PerformPromiseThen,45,46,0
-block_hint,PerformPromiseThen,95,96,1
-block_hint,PerformPromiseThen,47,48,0
-block_hint,PerformPromiseThen,49,50,1
-block_hint,PerformPromiseThen,51,52,0
-block_hint,PerformPromiseThen,20,21,1
-block_hint,PerformPromiseThen,115,116,1
-block_hint,PromiseFulfillReactionJob,22,23,0
-block_hint,PromiseFulfillReactionJob,2,3,1
-block_hint,ResolvePromise,29,30,0
-block_hint,ResolvePromise,31,32,0
-block_hint,ResolvePromise,15,16,1
-block_hint,ResolvePromise,47,48,0
-block_hint,ResolvePromise,33,34,0
-block_hint,ResolvePromise,6,7,1
-block_hint,ResolvePromise,17,18,0
-block_hint,ResolvePromise,19,20,1
-block_hint,ResolvePromise,53,54,1
-block_hint,ResolvePromise,49,50,0
-block_hint,ResolvePromise,23,24,0
-block_hint,ProxyConstructor,30,31,1
-block_hint,ProxyConstructor,10,11,0
-block_hint,ProxyConstructor,22,23,1
-block_hint,ProxyConstructor,24,25,0
-block_hint,ProxyConstructor,26,27,1
-block_hint,ProxyConstructor,28,29,0
-block_hint,ProxyConstructor,7,8,1
-block_hint,ProxyConstructor,17,18,1
-block_hint,ProxyConstructor,5,6,1
-block_hint,ProxyConstructor,12,13,1
-block_hint,ProxyGetProperty,153,154,1
-block_hint,ProxyGetProperty,34,35,0
-block_hint,ProxyGetProperty,10,11,0
-block_hint,ProxyGetProperty,89,90,0
-block_hint,ProxyGetProperty,91,92,0
-block_hint,ProxyGetProperty,85,86,1
-block_hint,ProxyGetProperty,87,88,1
-block_hint,ProxyGetProperty,176,177,1
-block_hint,ProxyGetProperty,180,181,0
-block_hint,ProxyGetProperty,118,119,0
-block_hint,ProxyGetProperty,40,41,1
-block_hint,ProxyGetProperty,114,115,1
-block_hint,ProxyGetProperty,24,25,0
-block_hint,ProxyGetProperty,26,27,1
-block_hint,ProxyGetProperty,208,209,1
-block_hint,ProxyGetProperty,198,199,0
-block_hint,ProxyGetProperty,149,150,1
-block_hint,ProxyGetProperty,28,29,0
-block_hint,ProxyGetProperty,167,168,0
-block_hint,ProxyGetProperty,187,188,1
-block_hint,ProxyGetProperty,131,132,1
-block_hint,ProxyGetProperty,169,170,1
-block_hint,ProxyGetProperty,171,172,0
-block_hint,ProxyGetProperty,60,61,0
-block_hint,ReflectGet,20,21,1
-block_hint,ReflectGet,15,16,0
-block_hint,ReflectGet,5,6,1
-block_hint,ReflectGet,7,8,0
-block_hint,ReflectGet,18,19,0
-block_hint,ReflectGet,9,10,0
-block_hint,ReflectHas,8,9,1
-block_hint,ReflectHas,5,6,1
-block_hint,ReflectHas,3,4,0
-block_hint,RegExpPrototypeExec,202,203,1
-block_hint,RegExpPrototypeExec,130,131,1
-block_hint,RegExpPrototypeExec,132,133,1
-block_hint,RegExpPrototypeExec,204,205,1
-block_hint,RegExpPrototypeExec,166,167,1
-block_hint,RegExpPrototypeExec,16,17,1
-block_hint,RegExpPrototypeExec,148,149,1
-block_hint,RegExpPrototypeExec,150,151,0
-block_hint,RegExpPrototypeExec,152,153,0
-block_hint,RegExpPrototypeExec,208,209,0
-block_hint,RegExpPrototypeExec,154,155,0
-block_hint,RegExpPrototypeExec,18,19,1
-block_hint,RegExpPrototypeExec,185,186,0
-block_hint,RegExpPrototypeExec,134,135,0
-block_hint,RegExpPrototypeExec,159,160,0
-block_hint,RegExpPrototypeExec,236,237,0
-block_hint,RegExpPrototypeExec,227,228,1
-block_hint,RegExpPrototypeExec,212,213,1
-block_hint,RegExpPrototypeExec,171,172,1
-block_hint,RegExpPrototypeExec,161,162,0
-block_hint,RegExpPrototypeExec,73,74,0
-block_hint,RegExpPrototypeExec,24,25,1
-block_hint,RegExpPrototypeExec,138,139,1
-block_hint,RegExpPrototypeExec,26,27,1
-block_hint,RegExpPrototypeExec,190,191,0
-block_hint,RegExpPrototypeExec,140,141,1
-block_hint,RegExpPrototypeExec,242,243,1
-block_hint,RegExpPrototypeExec,214,215,0
-block_hint,RegExpPrototypeExec,179,180,1
-block_hint,RegExpPrototypeExec,77,78,0
-block_hint,RegExpPrototypeExec,34,35,1
-block_hint,RegExpPrototypeExec,144,145,1
-block_hint,RegExpPrototypeExec,116,117,1
-block_hint,RegExpPrototypeExec,156,157,1
-block_hint,RegExpMatchFast,357,358,0
-block_hint,RegExpMatchFast,289,290,1
-block_hint,RegExpMatchFast,32,33,1
-block_hint,RegExpMatchFast,326,327,0
-block_hint,RegExpMatchFast,234,235,0
-block_hint,RegExpMatchFast,283,284,0
-block_hint,RegExpMatchFast,448,449,0
-block_hint,RegExpMatchFast,392,393,1
-block_hint,RegExpMatchFast,291,292,1
-block_hint,RegExpMatchFast,285,286,0
-block_hint,RegExpMatchFast,129,130,0
-block_hint,RegExpMatchFast,236,237,1
-block_hint,RegExpMatchFast,238,239,1
-block_hint,RegExpMatchFast,40,41,1
-block_hint,RegExpMatchFast,331,332,0
-block_hint,RegExpMatchFast,240,241,1
-block_hint,RegExpMatchFast,456,457,1
-block_hint,RegExpMatchFast,394,395,0
-block_hint,RegExpMatchFast,320,321,1
-block_hint,RegExpMatchFast,133,134,0
-block_hint,RegExpMatchFast,48,49,1
-block_hint,RegExpMatchFast,244,245,1
-block_hint,RegExpMatchFast,180,181,1
-block_hint,RegExpMatchFast,259,260,1
-block_hint,RegExpMatchFast,297,298,0
-block_hint,RegExpMatchFast,82,83,1
-block_hint,RegExpMatchFast,84,85,1
-block_hint,RegExpMatchFast,301,302,0
-block_hint,RegExpMatchFast,344,345,0
-block_hint,RegExpMatchFast,379,380,0
-block_hint,RegExpMatchFast,299,300,0
-block_hint,RegExpMatchFast,86,87,1
-block_hint,RegExpMatchFast,340,341,0
-block_hint,RegExpMatchFast,248,249,0
-block_hint,RegExpMatchFast,275,276,0
-block_hint,RegExpMatchFast,190,191,1
-block_hint,RegExpMatchFast,450,451,0
-block_hint,RegExpMatchFast,436,437,1
-block_hint,RegExpMatchFast,390,391,1
-block_hint,RegExpMatchFast,303,304,1
-block_hint,RegExpMatchFast,277,278,0
-block_hint,RegExpMatchFast,117,118,0
-block_hint,RegExpMatchFast,342,343,0
-block_hint,RegExpMatchFast,250,251,0
-block_hint,RegExpMatchFast,92,93,1
-block_hint,RegExpMatchFast,362,363,1
-block_hint,RegExpMatchFast,252,253,0
-block_hint,RegExpMatchFast,102,103,1
-block_hint,RegExpMatchFast,306,307,0
-block_hint,RegExpMatchFast,177,178,0
-block_hint,RegExpMatchFast,104,105,0
-block_hint,RegExpMatchFast,106,107,0
-block_hint,RegExpMatchFast,198,199,1
-block_hint,RegExpMatchFast,317,318,0
-block_hint,RegExpMatchFast,108,109,1
-block_hint,RegExpMatchFast,187,188,1
-block_hint,RegExpMatchFast,346,347,0
-block_hint,RegExpMatchFast,94,95,1
-block_hint,RegExpMatchFast,96,97,1
-block_hint,RegExpMatchFast,175,176,0
-block_hint,RegExpMatchFast,98,99,0
-block_hint,RegExpMatchFast,100,101,0
-block_hint,RegExpMatchFast,218,219,1
-block_hint,RegExpMatchFast,309,310,0
-block_hint,RegExpMatchFast,220,221,0
-block_hint,RegExpReplace,261,262,1
-block_hint,RegExpReplace,299,300,1
-block_hint,RegExpReplace,251,252,1
-block_hint,RegExpReplace,149,150,0
-block_hint,RegExpReplace,22,23,1
-block_hint,RegExpReplace,209,210,1
-block_hint,RegExpReplace,151,152,0
-block_hint,RegExpReplace,24,25,1
-block_hint,RegExpReplace,211,212,1
-block_hint,RegExpReplace,213,214,1
-block_hint,RegExpReplace,172,173,1
-block_hint,RegExpReplace,179,180,0
-block_hint,RegExpReplace,257,258,0
-block_hint,RegExpReplace,50,51,1
-block_hint,RegExpReplace,229,230,0
-block_hint,RegExpReplace,163,164,0
-block_hint,RegExpReplace,183,184,0
-block_hint,RegExpReplace,109,110,1
-block_hint,RegExpReplace,375,376,0
-block_hint,RegExpReplace,359,360,1
-block_hint,RegExpReplace,293,294,1
-block_hint,RegExpReplace,203,204,1
-block_hint,RegExpReplace,185,186,0
-block_hint,RegExpReplace,81,82,0
-block_hint,RegExpReplace,56,57,1
-block_hint,RegExpReplace,58,59,1
-block_hint,RegExpReplace,60,61,1
-block_hint,RegExpReplace,167,168,0
-block_hint,RegExpReplace,62,63,1
-block_hint,RegExpReplace,233,234,1
-block_hint,RegExpReplace,169,170,0
-block_hint,RegExpReplace,64,65,1
-block_hint,RegExpReplace,380,381,1
-block_hint,RegExpReplace,371,372,1
-block_hint,RegExpReplace,326,327,0
-block_hint,RegExpReplace,285,286,0
-block_hint,RegExpReplace,218,219,0
-block_hint,RegExpReplace,100,101,1
-block_hint,RegExpReplace,26,27,1
-block_hint,RegExpReplace,28,29,1
-block_hint,RegExpReplace,102,103,1
-block_hint,RegExpReplace,30,31,0
-block_hint,RegExpReplace,32,33,1
-block_hint,RegExpReplace,34,35,1
-block_hint,RegExpReplace,72,73,1
-block_hint,RegExpReplace,44,45,1
-block_hint,RegExpReplace,161,162,1
-block_hint,RegExpReplace,46,47,1
-block_hint,RegExpReplace,48,49,1
-block_hint,RegExpReplace,236,237,1
-block_hint,RegExpReplace,176,177,1
-block_hint,RegExpReplace,153,154,1
-block_hint,RegExpReplace,36,37,1
-block_hint,RegExpReplace,155,156,1
-block_hint,RegExpReplace,40,41,0
-block_hint,RegExpReplace,254,255,1
-block_hint,RegExpReplace,196,197,1
-block_hint,RegExpReplace,42,43,1
-block_hint,RegExpSearchFast,50,51,1
-block_hint,RegExpSearchFast,6,7,1
-block_hint,RegExpSearchFast,56,57,0
-block_hint,RegExpSearchFast,36,37,0
-block_hint,RegExpSearchFast,46,47,0
-block_hint,RegExpSearchFast,78,79,0
-block_hint,RegExpSearchFast,67,68,1
-block_hint,RegExpSearchFast,60,61,0
-block_hint,RegExpSearchFast,52,53,1
-block_hint,RegExpSearchFast,58,59,1
-block_hint,RegExpSearchFast,44,45,1
-block_hint,RegExpPrototypeSourceGetter,12,13,1
-block_hint,RegExpPrototypeSourceGetter,9,10,1
-block_hint,RegExpPrototypeSourceGetter,4,5,1
-block_hint,RegExpSplit,179,180,1
-block_hint,RegExpSplit,88,89,0
-block_hint,RegExpSplit,22,23,1
-block_hint,RegExpSplit,149,150,1
-block_hint,RegExpSplit,40,41,1
-block_hint,RegExpSplit,24,25,1
-block_hint,RegExpSplit,185,186,1
-block_hint,RegExpSplit,101,102,1
-block_hint,RegExpSplit,136,137,0
-block_hint,RegExpSplit,26,27,1
-block_hint,RegExpSplit,205,206,0
-block_hint,RegExpSplit,138,139,0
-block_hint,RegExpSplit,162,163,0
-block_hint,RegExpSplit,44,45,0
-block_hint,RegExpSplit,108,109,1
-block_hint,RegExpSplit,322,323,0
-block_hint,RegExpSplit,314,315,1
-block_hint,RegExpSplit,278,279,1
-block_hint,RegExpSplit,181,182,1
-block_hint,RegExpSplit,225,226,0
-block_hint,RegExpSplit,164,165,0
-block_hint,RegExpSplit,46,47,0
-block_hint,RegExpSplit,307,308,0
-block_hint,RegExpSplit,262,263,0
-block_hint,RegExpSplit,207,208,0
-block_hint,RegExpSplit,92,93,0
-block_hint,RegExpSplit,227,228,1
-block_hint,RegExpSplit,194,195,1
-block_hint,RegExpSplit,50,51,0
-block_hint,RegExpSplit,167,168,1
-block_hint,RegExpSplit,141,142,0
-block_hint,RegExpSplit,32,33,1
-block_hint,RegExpSplit,58,59,0
-block_hint,RegExpSplit,281,282,0
-block_hint,RegExpSplit,246,247,0
-block_hint,RegExpSplit,151,152,0
-block_hint,RegExpSplit,241,242,0
-block_hint,RegExpSplit,212,213,0
-block_hint,RegExpSplit,96,97,0
-block_hint,RegExpSplit,232,233,1
-block_hint,RegExpSplit,201,202,1
-block_hint,RegExpSplit,74,75,0
-block_hint,RegExpSplit,175,176,1
-block_hint,RegExpSplit,38,39,1
-block_hint,RegExpSplit,219,220,0
-block_hint,RegExpSplit,244,245,0
-block_hint,RegExpSplit,217,218,0
-block_hint,RegExpSplit,99,100,0
-block_hint,RegExpSplit,276,277,1
-block_hint,RegExpSplit,260,261,1
-block_hint,RegExpSplit,177,178,1
-block_hint,RegExpSplit,103,104,1
-block_hint,RegExpPrototypeTest,110,111,1
-block_hint,RegExpPrototypeTest,50,51,1
-block_hint,RegExpPrototypeTest,52,53,0
-block_hint,RegExpPrototypeTest,134,135,1
-block_hint,RegExpPrototypeTest,54,55,0
-block_hint,RegExpPrototypeTest,8,9,1
-block_hint,RegExpPrototypeTest,93,94,1
-block_hint,RegExpPrototypeTest,56,57,0
-block_hint,RegExpPrototypeTest,10,11,1
-block_hint,RegExpPrototypeTest,141,142,1
-block_hint,RegExpPrototypeTest,126,127,1
-block_hint,RegExpPrototypeTest,85,86,1
-block_hint,RegExpPrototypeTest,14,15,1
-block_hint,RegExpPrototypeTest,99,100,0
-block_hint,RegExpPrototypeTest,59,60,0
-block_hint,RegExpPrototypeTest,73,74,0
-block_hint,RegExpPrototypeTest,42,43,0
-block_hint,RegExpPrototypeTest,154,155,0
-block_hint,RegExpPrototypeTest,152,153,1
-block_hint,RegExpPrototypeTest,132,133,1
-block_hint,RegExpPrototypeTest,87,88,1
-block_hint,RegExpPrototypeTest,75,76,0
-block_hint,RegExpPrototypeTest,29,30,0
-block_hint,RegExpPrototypeTest,37,38,1
-block_hint,RegExpPrototypeTest,65,66,1
-block_hint,RegExpPrototypeTestFast,48,49,1
-block_hint,RegExpPrototypeTestFast,7,8,1
-block_hint,RegExpPrototypeTestFast,56,57,0
-block_hint,RegExpPrototypeTestFast,36,37,0
-block_hint,RegExpPrototypeTestFast,44,45,0
-block_hint,RegExpPrototypeTestFast,75,76,0
-block_hint,RegExpPrototypeTestFast,73,74,1
-block_hint,RegExpPrototypeTestFast,66,67,1
-block_hint,RegExpPrototypeTestFast,50,51,1
-block_hint,RegExpPrototypeTestFast,46,47,0
-block_hint,RegExpPrototypeTestFast,19,20,0
-block_hint,RegExpPrototypeTestFast,26,27,1
-block_hint,RegExpPrototypeTestFast,42,43,1
-block_hint,StringPrototypeEndsWith,288,289,1
-block_hint,StringPrototypeEndsWith,271,272,0
-block_hint,StringPrototypeEndsWith,251,252,1
-block_hint,StringPrototypeEndsWith,235,236,1
-block_hint,StringPrototypeEndsWith,174,175,1
-block_hint,StringPrototypeEndsWith,278,279,1
-block_hint,StringPrototypeEndsWith,267,268,1
-block_hint,StringPrototypeEndsWith,253,254,1
-block_hint,StringPrototypeEndsWith,244,245,1
-block_hint,StringPrototypeEndsWith,179,180,1
-block_hint,StringPrototypeEndsWith,29,30,0
-block_hint,StringPrototypeEndsWith,68,69,0
-block_hint,StringPrototypeEndsWith,70,71,0
-block_hint,StringPrototypeEndsWith,185,186,1
-block_hint,StringPrototypeEndsWith,84,85,0
-block_hint,StringPrototypeEndsWith,86,87,0
-block_hint,StringPrototypeEndsWith,164,165,0
-block_hint,StringPrototypeEndsWith,47,48,0
-block_hint,StringPrototypeEndsWith,144,145,0
-block_hint,StringPrototypeEndsWith,35,36,0
-block_hint,StringPrototypeEndsWith,49,50,0
-block_hint,StringPrototypeEndsWith,116,117,0
-block_hint,StringPrototypeIndexOf,39,40,1
-block_hint,StringPrototypeIndexOf,36,37,0
-block_hint,StringPrototypeIndexOf,19,20,1
-block_hint,StringPrototypeIndexOf,8,9,1
-block_hint,StringPrototypeIndexOf,28,29,1
-block_hint,StringPrototypeIndexOf,21,22,1
-block_hint,StringPrototypeIndexOf,33,34,0
-block_hint,StringPrototypeIndexOf,24,25,0
-block_hint,StringPrototypeIndexOf,11,12,0
-block_hint,StringPrototypeIterator,15,16,1
-block_hint,StringPrototypeIterator,12,13,1
-block_hint,StringPrototypeIterator,10,11,1
-block_hint,StringPrototypeIterator,3,4,1
-block_hint,StringPrototypeIterator,8,9,1
-block_hint,StringIteratorPrototypeNext,56,57,1
-block_hint,StringIteratorPrototypeNext,38,39,1
-block_hint,StringIteratorPrototypeNext,40,41,1
-block_hint,StringIteratorPrototypeNext,13,14,0
-block_hint,StringIteratorPrototypeNext,74,75,0
-block_hint,StringIteratorPrototypeNext,64,65,1
-block_hint,StringIteratorPrototypeNext,54,55,0
-block_hint,StringIteratorPrototypeNext,61,62,1
-block_hint,StringIteratorPrototypeNext,50,51,1
-block_hint,StringIteratorPrototypeNext,11,12,1
-block_hint,StringIteratorPrototypeNext,20,21,1
-block_hint,StringIteratorPrototypeNext,9,10,1
-block_hint,StringIteratorPrototypeNext,17,18,1
-block_hint,StringPrototypeMatch,67,68,1
-block_hint,StringPrototypeMatch,39,40,0
-block_hint,StringPrototypeMatch,99,100,1
-block_hint,StringPrototypeMatch,88,89,0
-block_hint,StringPrototypeMatch,69,70,1
-block_hint,StringPrototypeMatch,49,50,0
-block_hint,StringPrototypeMatch,6,7,1
-block_hint,StringPrototypeMatch,71,72,1
-block_hint,StringPrototypeMatch,51,52,0
-block_hint,StringPrototypeMatch,8,9,1
-block_hint,StringPrototypeMatch,83,84,1
-block_hint,StringPrototypeMatch,75,76,1
-block_hint,StringPrototypeMatch,43,44,1
-block_hint,StringPrototypeSearch,67,68,1
-block_hint,StringPrototypeSearch,39,40,0
-block_hint,StringPrototypeSearch,99,100,1
-block_hint,StringPrototypeSearch,88,89,0
-block_hint,StringPrototypeSearch,69,70,1
-block_hint,StringPrototypeSearch,49,50,0
-block_hint,StringPrototypeSearch,6,7,1
-block_hint,StringPrototypeSearch,71,72,1
-block_hint,StringPrototypeSearch,51,52,0
-block_hint,StringPrototypeSearch,8,9,1
-block_hint,StringPrototypeSearch,83,84,1
-block_hint,StringPrototypeSearch,75,76,1
-block_hint,StringPrototypeSearch,43,44,1
-block_hint,StringPrototypeSlice,179,180,1
-block_hint,StringPrototypeSlice,140,141,1
-block_hint,StringPrototypeSlice,107,108,1
-block_hint,StringPrototypeSlice,201,202,0
-block_hint,StringPrototypeSlice,187,188,0
-block_hint,StringPrototypeSlice,211,212,0
-block_hint,StringPrototypeSlice,208,209,0
-block_hint,StringPrototypeSlice,195,196,1
-block_hint,StringPrototypeSlice,191,192,1
-block_hint,StringPrototypeSlice,199,200,0
-block_hint,StringPrototypeSlice,182,183,0
-block_hint,StringPrototypeSlice,142,143,1
-block_hint,StringPrototypeSlice,31,32,0
-block_hint,StringPrototypeSlice,68,69,1
-block_hint,StringPrototypeSlice,63,64,1
-block_hint,StringPrototypeSlice,61,62,1
-block_hint,StringPrototypeSlice,128,129,0
-block_hint,StringPrototypeSlice,91,92,1
-block_hint,StringPrototypeSlice,21,22,0
-block_hint,StringPrototypeSlice,23,24,0
-block_hint,StringPrototypeSlice,154,155,1
-block_hint,StringPrototypeSlice,132,133,1
-block_hint,StringPrototypeSlice,40,41,0
-block_hint,StringPrototypeSlice,89,90,1
-block_hint,StringPrototypeSlice,19,20,0
-block_hint,StringPrototypeSlice,156,157,1
-block_hint,StringPrototypeSlice,134,135,1
-block_hint,StringPrototypeSlice,44,45,0
-block_hint,StringPrototypeSlice,166,167,0
-block_hint,StringPrototypeSlice,152,153,0
-block_hint,StringPrototypeSlice,36,37,1
-block_hint,StringPrototypeSlice,33,34,0
-block_hint,StringPrototypeStartsWith,288,289,1
-block_hint,StringPrototypeStartsWith,271,272,0
-block_hint,StringPrototypeStartsWith,251,252,1
-block_hint,StringPrototypeStartsWith,235,236,1
-block_hint,StringPrototypeStartsWith,174,175,1
-block_hint,StringPrototypeStartsWith,278,279,1
-block_hint,StringPrototypeStartsWith,267,268,1
-block_hint,StringPrototypeStartsWith,253,254,1
-block_hint,StringPrototypeStartsWith,244,245,1
-block_hint,StringPrototypeStartsWith,179,180,1
-block_hint,StringPrototypeStartsWith,29,30,0
-block_hint,StringPrototypeStartsWith,68,69,0
-block_hint,StringPrototypeStartsWith,70,71,0
-block_hint,StringPrototypeStartsWith,185,186,1
-block_hint,StringPrototypeStartsWith,84,85,0
-block_hint,StringPrototypeStartsWith,86,87,0
-block_hint,StringPrototypeStartsWith,164,165,0
-block_hint,StringPrototypeStartsWith,47,48,0
-block_hint,StringPrototypeStartsWith,35,36,0
-block_hint,StringPrototypeStartsWith,49,50,1
-block_hint,StringPrototypeStartsWith,116,117,1
-block_hint,StringPrototypeSubstr,175,176,1
-block_hint,StringPrototypeSubstr,145,146,1
-block_hint,StringPrototypeSubstr,107,108,1
-block_hint,StringPrototypeSubstr,194,195,0
-block_hint,StringPrototypeSubstr,183,184,0
-block_hint,StringPrototypeSubstr,204,205,0
-block_hint,StringPrototypeSubstr,201,202,0
-block_hint,StringPrototypeSubstr,178,179,0
-block_hint,StringPrototypeSubstr,160,161,0
-block_hint,StringPrototypeSubstr,120,121,0
-block_hint,StringPrototypeSubstr,31,32,0
-block_hint,StringPrototypeSubstr,61,62,1
-block_hint,StringPrototypeSubstr,133,134,0
-block_hint,StringPrototypeSubstr,89,90,1
-block_hint,StringPrototypeSubstr,19,20,0
-block_hint,StringPrototypeSubstr,153,154,1
-block_hint,StringPrototypeSubstr,139,140,1
-block_hint,StringPrototypeSubstr,44,45,0
-block_hint,StringPrototypeSubstr,165,166,0
-block_hint,StringPrototypeSubstr,36,37,1
-block_hint,StringPrototypeSubstr,33,34,0
-block_hint,StringPrototypeSubstring,159,160,1
-block_hint,StringPrototypeSubstring,131,132,1
-block_hint,StringPrototypeSubstring,103,104,1
-block_hint,StringPrototypeSubstring,194,195,0
-block_hint,StringPrototypeSubstring,181,182,0
-block_hint,StringPrototypeSubstring,198,199,0
-block_hint,StringPrototypeSubstring,192,193,0
-block_hint,StringPrototypeSubstring,183,184,0
-block_hint,StringPrototypeSubstring,179,180,0
-block_hint,StringPrototypeSubstring,172,173,0
-block_hint,StringPrototypeSubstring,163,164,0
-block_hint,StringPrototypeSubstring,135,136,0
-block_hint,StringPrototypeSubstring,93,94,0
-block_hint,StringPrototypeSubstring,65,66,1
-block_hint,StringPrototypeSubstring,105,106,1
-block_hint,StringPrototypeSubstring,58,59,1
-block_hint,StringPrototypeSubstring,119,120,0
-block_hint,StringPrototypeSubstring,115,116,1
-block_hint,StringPrototypeSubstring,85,86,1
-block_hint,StringPrototypeSubstring,17,18,0
-block_hint,StringPrototypeSubstring,141,142,1
-block_hint,StringPrototypeSubstring,125,126,1
-block_hint,StringPrototypeSubstring,42,43,0
-block_hint,StringPrototypeSubstring,54,55,0
-block_hint,StringPrototypeSubstring,150,151,0
-block_hint,StringPrototypeSubstring,108,109,1
-block_hint,StringPrototypeSubstring,34,35,1
-block_hint,StringPrototypeTrim,470,471,1
-block_hint,StringPrototypeTrim,271,272,1
-block_hint,StringPrototypeTrim,186,187,1
-block_hint,StringPrototypeTrim,188,189,0
-block_hint,StringPrototypeTrim,444,445,0
-block_hint,StringPrototypeTrim,273,274,1
-block_hint,StringPrototypeTrim,156,157,0
-block_hint,StringPrototypeTrim,158,159,0
-block_hint,StringPrototypeTrim,251,252,0
-block_hint,StringPrototypeTrim,63,64,1
-block_hint,StringPrototypeTrim,366,367,1
-block_hint,StringPrototypeTrim,83,84,0
-block_hint,StringPrototypeTrim,253,254,0
-block_hint,StringPrototypeTrim,65,66,1
-block_hint,StringPrototypeTrim,392,393,0
-block_hint,StringPrototypeTrim,394,395,1
-block_hint,StringPrototypeTrim,128,129,0
-block_hint,StringPrototypeTrim,85,86,0
-block_hint,StringPrototypeTrim,92,93,0
-block_hint,StringPrototypeTrim,293,294,0
-block_hint,StringPrototypeTrim,178,179,1
-block_hint,StringPrototypeTrim,438,439,0
-block_hint,StringPrototypeTrim,432,433,0
-block_hint,StringPrototypeTrim,257,258,1
-block_hint,StringPrototypeTrim,69,70,0
-block_hint,StringPrototypeTrim,71,72,0
-block_hint,StringPrototypeTrim,480,481,1
-block_hint,StringPrototypeTrim,454,455,1
-block_hint,StringPrototypeTrim,132,133,0
-block_hint,StringPrototypeTrim,152,153,0
-block_hint,StringPrototypeTrim,154,155,0
-block_hint,StringPrototypeTrim,239,240,0
-block_hint,StringPrototypeTrim,47,48,1
-block_hint,StringPrototypeTrim,306,307,1
-block_hint,StringPrototypeTrim,241,242,0
-block_hint,StringPrototypeTrim,49,50,1
-block_hint,StringPrototypeTrim,334,335,1
-block_hint,StringPrototypeTrim,81,82,0
-block_hint,StringPrototypeTrim,87,88,0
-block_hint,StringPrototypeTrim,291,292,1
-block_hint,StringPrototypeTrim,172,173,1
-block_hint,StringPrototypeTrim,436,437,0
-block_hint,StringPrototypeTrim,428,429,1
-block_hint,StringPrototypeTrim,243,244,1
-block_hint,StringPrototypeTrim,51,52,0
-block_hint,StringPrototypeTrim,474,475,1
-block_hint,StringPrototypeTrim,448,449,1
-block_hint,StringPrototypeTrim,112,113,0
-block_hint,StringPrototypeTrim,490,491,0
-block_hint,StringPrototypeTrim,295,296,1
-block_hint,StringPrototypeTrim,97,98,1
-block_hint,StringPrototypeTrim,89,90,0
-block_hint,SymbolPrototypeToString,9,10,1
-block_hint,SymbolPrototypeToString,11,12,1
-block_hint,SymbolPrototypeToString,5,6,0
-block_hint,SymbolPrototypeToString,7,8,1
-block_hint,CreateTypedArray,610,611,0
-block_hint,CreateTypedArray,638,639,0
-block_hint,CreateTypedArray,576,577,0
-block_hint,CreateTypedArray,485,486,0
-block_hint,CreateTypedArray,356,357,1
-block_hint,CreateTypedArray,358,359,1
-block_hint,CreateTypedArray,677,678,0
-block_hint,CreateTypedArray,520,521,1
-block_hint,CreateTypedArray,518,519,1
-block_hint,CreateTypedArray,407,408,1
-block_hint,CreateTypedArray,586,587,0
-block_hint,CreateTypedArray,662,663,0
-block_hint,CreateTypedArray,584,585,0
-block_hint,CreateTypedArray,491,492,0
-block_hint,CreateTypedArray,424,425,0
-block_hint,CreateTypedArray,426,427,0
-block_hint,CreateTypedArray,410,411,0
-block_hint,CreateTypedArray,105,106,1
-block_hint,CreateTypedArray,107,108,1
-block_hint,CreateTypedArray,412,413,1
-block_hint,CreateTypedArray,109,110,1
-block_hint,CreateTypedArray,111,112,1
-block_hint,CreateTypedArray,641,642,0
-block_hint,CreateTypedArray,683,684,1
-block_hint,CreateTypedArray,660,661,1
-block_hint,CreateTypedArray,522,523,0
-block_hint,CreateTypedArray,558,559,1
-block_hint,CreateTypedArray,384,385,0
-block_hint,CreateTypedArray,261,262,0
-block_hint,CreateTypedArray,416,417,0
-block_hint,CreateTypedArray,123,124,1
-block_hint,CreateTypedArray,125,126,1
-block_hint,CreateTypedArray,305,306,1
-block_hint,CreateTypedArray,307,308,1
-block_hint,CreateTypedArray,525,526,0
-block_hint,CreateTypedArray,560,561,1
-block_hint,CreateTypedArray,386,387,0
-block_hint,CreateTypedArray,277,278,0
-block_hint,CreateTypedArray,420,421,0
-block_hint,CreateTypedArray,137,138,1
-block_hint,CreateTypedArray,139,140,1
-block_hint,CreateTypedArray,512,513,0
-block_hint,CreateTypedArray,514,515,0
-block_hint,CreateTypedArray,671,672,0
-block_hint,CreateTypedArray,531,532,1
-block_hint,CreateTypedArray,529,530,1
-block_hint,CreateTypedArray,428,429,1
-block_hint,CreateTypedArray,541,542,0
-block_hint,CreateTypedArray,533,534,0
-block_hint,CreateTypedArray,431,432,0
-block_hint,CreateTypedArray,165,166,1
-block_hint,CreateTypedArray,365,366,0
-block_hint,CreateTypedArray,167,168,1
-block_hint,CreateTypedArray,433,434,1
-block_hint,CreateTypedArray,169,170,1
-block_hint,CreateTypedArray,171,172,1
-block_hint,CreateTypedArray,648,649,0
-block_hint,CreateTypedArray,686,687,1
-block_hint,CreateTypedArray,665,666,1
-block_hint,CreateTypedArray,535,536,0
-block_hint,CreateTypedArray,554,555,1
-block_hint,CreateTypedArray,380,381,0
-block_hint,CreateTypedArray,229,230,0
-block_hint,CreateTypedArray,437,438,0
-block_hint,CreateTypedArray,183,184,1
-block_hint,CreateTypedArray,185,186,1
-block_hint,CreateTypedArray,187,188,1
-block_hint,CreateTypedArray,318,319,1
-block_hint,CreateTypedArray,320,321,1
-block_hint,CreateTypedArray,538,539,0
-block_hint,CreateTypedArray,556,557,1
-block_hint,CreateTypedArray,382,383,0
-block_hint,CreateTypedArray,245,246,0
-block_hint,CreateTypedArray,441,442,0
-block_hint,CreateTypedArray,199,200,1
-block_hint,CreateTypedArray,201,202,1
-block_hint,CreateTypedArray,548,549,0
-block_hint,CreateTypedArray,543,544,0
-block_hint,CreateTypedArray,500,501,0
-block_hint,CreateTypedArray,371,372,0
-block_hint,CreateTypedArray,453,454,1
-block_hint,CreateTypedArray,375,376,1
-block_hint,CreateTypedArray,503,504,0
-block_hint,CreateTypedArray,373,374,1
-block_hint,CreateTypedArray,455,456,0
-block_hint,CreateTypedArray,688,689,0
-block_hint,CreateTypedArray,650,651,0
-block_hint,CreateTypedArray,564,565,1
-block_hint,CreateTypedArray,562,563,1
-block_hint,CreateTypedArray,466,467,1
-block_hint,CreateTypedArray,656,657,0
-block_hint,CreateTypedArray,574,575,0
-block_hint,CreateTypedArray,481,482,0
-block_hint,CreateTypedArray,340,341,1
-block_hint,CreateTypedArray,654,655,0
-block_hint,CreateTypedArray,572,573,0
-block_hint,CreateTypedArray,477,478,0
-block_hint,CreateTypedArray,290,291,0
-block_hint,CreateTypedArray,634,635,0
-block_hint,CreateTypedArray,347,348,0
-block_hint,CreateTypedArray,349,350,0
-block_hint,CreateTypedArray,396,397,0
-block_hint,CreateTypedArray,398,399,0
-block_hint,CreateTypedArray,342,343,1
-block_hint,CreateTypedArray,352,353,0
-block_hint,CreateTypedArray,345,346,0
-block_hint,CreateTypedArray,507,508,0
-block_hint,CreateTypedArray,552,553,1
-block_hint,CreateTypedArray,378,379,0
-block_hint,CreateTypedArray,213,214,0
-block_hint,CreateTypedArray,567,568,0
-block_hint,CreateTypedArray,391,392,0
-block_hint,CreateTypedArray,60,61,1
-block_hint,CreateTypedArray,62,63,1
-block_hint,TypedArrayFrom,234,235,1
-block_hint,TypedArrayFrom,214,215,0
-block_hint,TypedArrayFrom,195,196,1
-block_hint,TypedArrayFrom,154,155,1
-block_hint,TypedArrayFrom,87,88,1
-block_hint,TypedArrayFrom,89,90,1
-block_hint,TypedArrayFrom,184,185,1
-block_hint,TypedArrayFrom,176,177,0
-block_hint,TypedArrayFrom,139,140,0
-block_hint,TypedArrayFrom,100,101,1
-block_hint,TypedArrayFrom,102,103,1
-block_hint,TypedArrayFrom,248,249,1
-block_hint,TypedArrayFrom,250,251,0
-block_hint,TypedArrayFrom,236,237,0
-block_hint,TypedArrayFrom,223,224,1
-block_hint,TypedArrayFrom,225,226,0
-block_hint,TypedArrayFrom,204,205,1
-block_hint,TypedArrayFrom,186,187,1
-block_hint,TypedArrayFrom,164,165,0
-block_hint,TypedArrayFrom,166,167,0
-block_hint,TypedArrayFrom,244,245,0
-block_hint,TypedArrayFrom,217,218,1
-block_hint,TypedArrayFrom,178,179,0
-block_hint,TypedArrayFrom,106,107,1
-block_hint,TypedArrayFrom,108,109,1
-block_hint,TypedArrayFrom,171,172,0
-block_hint,TypedArrayFrom,144,145,0
-block_hint,TypedArrayFrom,118,119,0
-block_hint,TypedArrayFrom,55,56,0
-block_hint,TypedArrayFrom,150,151,0
-block_hint,TypedArrayFrom,57,58,0
-block_hint,TypedArrayFrom,133,134,1
-block_hint,TypedArrayFrom,59,60,0
-block_hint,TypedArrayPrototypeSet,189,190,1
-block_hint,TypedArrayPrototypeSet,104,105,1
-block_hint,TypedArrayPrototypeSet,106,107,1
-block_hint,TypedArrayPrototypeSet,241,242,1
-block_hint,TypedArrayPrototypeSet,274,275,0
-block_hint,TypedArrayPrototypeSet,260,261,0
-block_hint,TypedArrayPrototypeSet,248,249,0
-block_hint,TypedArrayPrototypeSet,216,217,0
-block_hint,TypedArrayPrototypeSet,153,154,0
-block_hint,TypedArrayPrototypeSet,191,192,0
-block_hint,TypedArrayPrototypeSet,193,194,0
-block_hint,TypedArrayPrototypeSet,163,164,0
-block_hint,TypedArrayPrototypeSet,270,271,0
-block_hint,TypedArrayPrototypeSet,257,258,1
-block_hint,TypedArrayPrototypeSet,236,237,1
-block_hint,TypedArrayPrototypeSet,204,205,0
-block_hint,TypedArrayPrototypeSet,206,207,0
-block_hint,TypedArrayPrototypeSet,167,168,0
-block_hint,TypedArrayPrototypeSet,157,158,0
-block_hint,TypedArrayPrototypeSet,123,124,0
-block_hint,TypedArrayPrototypeSet,179,180,1
-block_hint,TypedArrayPrototypeSet,91,92,0
-block_hint,TypedArrayPrototypeSet,81,82,0
-block_hint,TypedArrayPrototypeSet,83,84,0
-block_hint,TypedArrayPrototypeSet,85,86,0
-block_hint,TypedArrayPrototypeSet,87,88,0
-block_hint,TypedArrayPrototypeSet,181,182,0
-block_hint,TypedArrayPrototypeSet,144,145,0
-block_hint,TypedArrayPrototypeSubArray,129,130,1
-block_hint,TypedArrayPrototypeSubArray,82,83,1
-block_hint,TypedArrayPrototypeSubArray,84,85,1
-block_hint,TypedArrayPrototypeSubArray,159,160,1
-block_hint,TypedArrayPrototypeSubArray,151,152,0
-block_hint,TypedArrayPrototypeSubArray,131,132,0
-block_hint,TypedArrayPrototypeSubArray,133,134,0
-block_hint,TypedArrayPrototypeSubArray,210,211,0
-block_hint,TypedArrayPrototypeSubArray,190,191,0
-block_hint,TypedArrayPrototypeSubArray,170,171,0
-block_hint,TypedArrayPrototypeSubArray,218,219,0
-block_hint,TypedArrayPrototypeSubArray,205,206,0
-block_hint,TypedArrayPrototypeSubArray,196,197,0
-block_hint,TypedArrayPrototypeSubArray,186,187,1
-block_hint,TypedArrayPrototypeSubArray,154,155,0
-block_hint,TypedArrayPrototypeSubArray,137,138,0
-block_hint,TypedArrayPrototypeSubArray,165,166,0
-block_hint,TypedArrayPrototypeSubArray,216,217,0
-block_hint,TypedArrayPrototypeSubArray,203,204,0
-block_hint,TypedArrayPrototypeSubArray,192,193,0
-block_hint,TypedArrayPrototypeSubArray,149,150,1
-block_hint,TypedArrayPrototypeSubArray,124,125,0
-block_hint,TypedArrayPrototypeSubArray,102,103,0
-block_hint,TypedArrayPrototypeSubArray,104,105,0
-block_hint,TypedArrayPrototypeSubArray,115,116,0
-block_hint,TypedArrayPrototypeSubArray,63,64,1
-block_hint,TypedArrayPrototypeSubArray,65,66,1
-block_hint,TypedArrayPrototypeSubArray,145,146,1
-block_hint,TypedArrayPrototypeSubArray,80,81,0
-block_hint,TypedArrayPrototypeSubArray,117,118,0
-block_hint,TypedArrayPrototypeSubArray,90,91,1
-block_hint,TypedArrayPrototypeSubArray,92,93,1
-block_hint,TypedArrayPrototypeSubArray,119,120,0
-block_hint,TypedArrayPrototypeSubArray,94,95,1
-block_hint,TypedArrayPrototypeSubArray,96,97,1
-block_hint,TypedArrayPrototypeSubArray,69,70,1
-block_hint,TypedArrayPrototypeSubArray,98,99,1
-block_hint,TypedArrayPrototypeSubArray,100,101,1
-block_hint,TypedArrayPrototypeSubArray,73,74,0
-block_hint,NewSloppyArgumentsElements,44,45,0
-block_hint,NewSloppyArgumentsElements,24,25,0
-block_hint,NewSloppyArgumentsElements,33,34,0
-block_hint,NewSloppyArgumentsElements,14,15,0
-block_hint,NewSloppyArgumentsElements,16,17,0
-block_hint,NewSloppyArgumentsElements,46,47,1
-block_hint,NewSloppyArgumentsElements,36,37,1
-block_hint,NewSloppyArgumentsElements,18,19,0
-block_hint,NewSloppyArgumentsElements,48,49,0
-block_hint,NewStrictArgumentsElements,9,10,0
-block_hint,NewStrictArgumentsElements,20,21,0
-block_hint,NewRestArgumentsElements,25,26,0
-block_hint,NewRestArgumentsElements,11,12,0
-block_hint,NewRestArgumentsElements,16,17,0
-block_hint,NewRestArgumentsElements,5,6,0
-block_hint,NewRestArgumentsElements,7,8,0
-block_hint,NewRestArgumentsElements,23,24,1
-block_hint,NewRestArgumentsElements,19,20,1
-block_hint,NewRestArgumentsElements,9,10,0
-block_hint,NewRestArgumentsElements,21,22,0
-block_hint,FastNewSloppyArguments,41,42,1
-block_hint,FastNewSloppyArguments,43,44,0
-block_hint,FastNewSloppyArguments,101,102,1
-block_hint,FastNewSloppyArguments,81,82,0
-block_hint,FastNewSloppyArguments,47,48,0
-block_hint,FastNewSloppyArguments,19,20,0
-block_hint,FastNewSloppyArguments,21,22,0
-block_hint,FastNewSloppyArguments,71,72,1
-block_hint,FastNewSloppyArguments,55,56,1
-block_hint,FastNewSloppyArguments,23,24,0
-block_hint,FastNewSloppyArguments,73,74,0
-block_hint,FastNewSloppyArguments,45,46,0
-block_hint,FastNewSloppyArguments,13,14,0
-block_hint,FastNewSloppyArguments,15,16,0
-block_hint,FastNewSloppyArguments,75,76,1
-block_hint,FastNewSloppyArguments,59,60,1
-block_hint,FastNewSloppyArguments,17,18,0
-block_hint,FastNewSloppyArguments,61,62,0
-block_hint,FastNewSloppyArguments,27,28,1
-block_hint,FastNewSloppyArguments,29,30,0
-block_hint,FastNewSloppyArguments,31,32,0
-block_hint,FastNewSloppyArguments,77,78,1
-block_hint,FastNewSloppyArguments,63,64,1
-block_hint,FastNewSloppyArguments,33,34,0
-block_hint,FastNewSloppyArguments,35,36,1
-block_hint,FastNewSloppyArguments,53,54,1
-block_hint,FastNewSloppyArguments,25,26,1
-block_hint,FastNewSloppyArguments,51,52,1
-block_hint,FastNewStrictArguments,16,17,1
-block_hint,FastNewStrictArguments,18,19,0
-block_hint,FastNewStrictArguments,20,21,0
-block_hint,FastNewStrictArguments,7,8,0
-block_hint,FastNewStrictArguments,9,10,0
-block_hint,FastNewStrictArguments,31,32,1
-block_hint,FastNewStrictArguments,25,26,1
-block_hint,FastNewStrictArguments,11,12,0
-block_hint,FastNewStrictArguments,27,28,0
-block_hint,FastNewStrictArguments,13,14,1
-block_hint,FastNewStrictArguments,23,24,1
-block_hint,FastNewRestArguments,16,17,1
-block_hint,FastNewRestArguments,18,19,0
-block_hint,FastNewRestArguments,34,35,1
-block_hint,FastNewRestArguments,7,8,1
-block_hint,FastNewRestArguments,21,22,0
-block_hint,FastNewRestArguments,9,10,0
-block_hint,FastNewRestArguments,11,12,0
-block_hint,FastNewRestArguments,32,33,1
-block_hint,FastNewRestArguments,25,26,1
-block_hint,FastNewRestArguments,13,14,0
-block_hint,FastNewRestArguments,27,28,0
-block_hint,FastNewRestArguments,23,24,1
-block_hint,StringSlowFlatten,35,36,1
-block_hint,StringSlowFlatten,20,21,1
-block_hint,StringSlowFlatten,4,5,0
-block_hint,StringSlowFlatten,30,31,1
-block_hint,StringSlowFlatten,22,23,1
-block_hint,StringIndexOf,160,161,0
-block_hint,StringIndexOf,112,113,0
-block_hint,StringIndexOf,125,126,1
-block_hint,StringIndexOf,91,92,0
-block_hint,StringIndexOf,154,155,0
-block_hint,StringIndexOf,117,118,1
-block_hint,StringIndexOf,44,45,0
-block_hint,StringIndexOf,46,47,0
-block_hint,StringIndexOf,133,134,0
-block_hint,StringIndexOf,76,77,0
-block_hint,StringIndexOf,78,79,0
-block_hint,StringIndexOf,72,73,0
-block_hint,StringIndexOf,74,75,0
-block_hint,StringIndexOf,40,41,0
-block_hint,StringIndexOf,42,43,0
-block_hint,StringIndexOf,127,128,1
-block_hint,StringIndexOf,56,57,0
-block_hint,StringIndexOf,58,59,0
-block_hint,Load_FastSmiElements_0,2,3,1
-block_hint,Load_FastObjectElements_0,2,3,1
-block_hint,Store_FastSmiElements_0,2,3,1
-block_hint,Store_FastObjectElements_0,2,3,1
-block_hint,SortCompareDefault,8,9,1
-block_hint,SortCompareDefault,20,21,1
-block_hint,SortCompareDefault,17,18,1
-block_hint,SortCompareDefault,14,15,1
-block_hint,SortCompareDefault,11,12,1
-block_hint,SortCompareDefault,6,7,1
-block_hint,SortCompareUserFn,9,10,0
-block_hint,SortCompareUserFn,5,6,0
-block_hint,Copy,17,18,1
-block_hint,Copy,9,10,1
-block_hint,Copy,11,12,1
-block_hint,Copy,5,6,1
-block_hint,Copy,7,8,1
-block_hint,MergeAt,13,14,1
-block_hint,MergeAt,15,16,1
-block_hint,MergeAt,17,18,1
-block_hint,MergeAt,19,20,1
-block_hint,MergeAt,140,141,0
-block_hint,MergeAt,29,30,1
-block_hint,MergeAt,31,32,0
-block_hint,MergeAt,33,34,1
-block_hint,MergeAt,35,36,1
-block_hint,MergeAt,123,124,0
-block_hint,MergeAt,236,237,1
-block_hint,MergeAt,225,226,1
-block_hint,MergeAt,69,70,1
-block_hint,MergeAt,71,72,1
-block_hint,MergeAt,150,151,1
-block_hint,MergeAt,103,104,0
-block_hint,MergeAt,73,74,1
-block_hint,MergeAt,75,76,1
-block_hint,MergeAt,227,228,0
-block_hint,MergeAt,81,82,1
-block_hint,MergeAt,83,84,1
-block_hint,MergeAt,198,199,0
-block_hint,MergeAt,134,135,0
-block_hint,MergeAt,77,78,1
-block_hint,MergeAt,79,80,1
-block_hint,MergeAt,196,197,1
-block_hint,MergeAt,132,133,0
-block_hint,MergeAt,182,183,1
-block_hint,MergeAt,85,86,1
-block_hint,MergeAt,87,88,1
-block_hint,MergeAt,89,90,1
-block_hint,MergeAt,147,148,0
-block_hint,MergeAt,91,92,1
-block_hint,MergeAt,93,94,1
-block_hint,MergeAt,95,96,1
-block_hint,MergeAt,194,195,1
-block_hint,MergeAt,97,98,1
-block_hint,MergeAt,99,100,1
-block_hint,MergeAt,230,231,1
-block_hint,MergeAt,116,117,0
-block_hint,MergeAt,232,233,1
-block_hint,MergeAt,220,221,1
-block_hint,MergeAt,37,38,1
-block_hint,MergeAt,39,40,1
-block_hint,MergeAt,154,155,1
-block_hint,MergeAt,109,110,0
-block_hint,MergeAt,41,42,1
-block_hint,MergeAt,43,44,1
-block_hint,MergeAt,222,223,0
-block_hint,MergeAt,49,50,1
-block_hint,MergeAt,51,52,1
-block_hint,MergeAt,202,203,0
-block_hint,MergeAt,138,139,0
-block_hint,MergeAt,45,46,1
-block_hint,MergeAt,47,48,1
-block_hint,MergeAt,200,201,1
-block_hint,MergeAt,136,137,0
-block_hint,MergeAt,111,112,0
-block_hint,MergeAt,165,166,1
-block_hint,MergeAt,53,54,1
-block_hint,MergeAt,207,208,0
-block_hint,MergeAt,169,170,0
-block_hint,MergeAt,55,56,1
-block_hint,MergeAt,57,58,1
-block_hint,MergeAt,143,144,1
-block_hint,MergeAt,59,60,1
-block_hint,MergeAt,173,174,0
-block_hint,MergeAt,61,62,1
-block_hint,MergeAt,63,64,1
-block_hint,MergeAt,113,114,0
-block_hint,MergeAt,192,193,1
-block_hint,MergeAt,65,66,1
-block_hint,MergeAt,67,68,1
-block_hint,GallopLeft,11,12,1
-block_hint,GallopLeft,47,48,0
-block_hint,GallopLeft,15,16,1
-block_hint,GallopLeft,63,64,0
-block_hint,GallopLeft,29,30,0
-block_hint,GallopLeft,41,42,0
-block_hint,GallopLeft,13,14,1
-block_hint,GallopLeft,65,66,0
-block_hint,GallopLeft,31,32,0
-block_hint,GallopLeft,39,40,0
-block_hint,GallopLeft,17,18,1
-block_hint,GallopLeft,61,62,0
-block_hint,GallopRight,11,12,1
-block_hint,GallopRight,47,48,0
-block_hint,GallopRight,35,36,1
-block_hint,GallopRight,15,16,1
-block_hint,GallopRight,63,64,0
-block_hint,GallopRight,29,30,0
-block_hint,GallopRight,41,42,0
-block_hint,GallopRight,13,14,1
-block_hint,GallopRight,65,66,0
-block_hint,GallopRight,31,32,0
-block_hint,GallopRight,39,40,0
-block_hint,GallopRight,17,18,1
-block_hint,GallopRight,61,62,0
-block_hint,ArrayTimSort,120,121,0
-block_hint,ArrayTimSort,240,241,0
-block_hint,ArrayTimSort,227,228,0
-block_hint,ArrayTimSort,122,123,0
-block_hint,ArrayTimSort,163,164,0
-block_hint,ArrayTimSort,140,141,0
-block_hint,ArrayTimSort,33,34,1
-block_hint,ArrayTimSort,93,94,0
-block_hint,ArrayTimSort,95,96,0
-block_hint,ArrayTimSort,143,144,0
-block_hint,ArrayTimSort,35,36,1
-block_hint,ArrayTimSort,37,38,1
-block_hint,ArrayTimSort,214,215,0
-block_hint,ArrayTimSort,145,146,1
-block_hint,ArrayTimSort,39,40,1
-block_hint,ArrayTimSort,218,219,0
-block_hint,ArrayTimSort,216,217,0
-block_hint,ArrayTimSort,41,42,1
-block_hint,ArrayTimSort,43,44,1
-block_hint,ArrayTimSort,45,46,1
-block_hint,ArrayTimSort,134,135,0
-block_hint,ArrayTimSort,47,48,1
-block_hint,ArrayTimSort,49,50,1
-block_hint,ArrayTimSort,222,223,0
-block_hint,ArrayTimSort,51,52,1
-block_hint,ArrayTimSort,53,54,1
-block_hint,ArrayTimSort,55,56,1
-block_hint,ArrayTimSort,57,58,1
-block_hint,ArrayTimSort,59,60,1
-block_hint,ArrayTimSort,61,62,1
-block_hint,ArrayTimSort,63,64,1
-block_hint,ArrayTimSort,65,66,1
-block_hint,ArrayTimSort,67,68,1
-block_hint,ArrayTimSort,69,70,1
-block_hint,ArrayTimSort,71,72,1
-block_hint,ArrayTimSort,157,158,1
-block_hint,ArrayTimSort,73,74,1
-block_hint,ArrayTimSort,75,76,1
-block_hint,ArrayTimSort,204,205,0
-block_hint,ArrayTimSort,77,78,1
-block_hint,ArrayTimSort,79,80,1
-block_hint,ArrayTimSort,209,210,0
-block_hint,ArrayTimSort,81,82,1
-block_hint,ArrayTimSort,83,84,1
-block_hint,ArrayTimSort,186,187,0
-block_hint,ArrayTimSort,236,237,1
-block_hint,ArrayTimSort,238,239,1
-block_hint,ArrayTimSort,211,212,1
-block_hint,ArrayTimSort,161,162,1
-block_hint,ArrayTimSort,85,86,1
-block_hint,ArrayTimSort,243,244,1
-block_hint,ArrayTimSort,230,231,0
-block_hint,ArrayTimSort,188,189,1
-block_hint,ArrayTimSort,138,139,0
-block_hint,ArrayTimSort,87,88,1
-block_hint,ArrayTimSort,113,114,0
-block_hint,ArrayTimSort,89,90,0
-block_hint,ArrayPrototypeSort,106,107,1
-block_hint,ArrayPrototypeSort,80,81,0
-block_hint,ArrayPrototypeSort,39,40,1
-block_hint,ArrayPrototypeSort,70,71,0
-block_hint,ArrayPrototypeSort,41,42,1
-block_hint,ArrayPrototypeSort,82,83,1
-block_hint,ArrayPrototypeSort,84,85,1
-block_hint,ArrayPrototypeSort,63,64,0
-block_hint,ArrayPrototypeSort,27,28,0
-block_hint,ArrayPrototypeSort,120,121,0
-block_hint,ArrayPrototypeSort,101,102,1
-block_hint,ArrayPrototypeSort,73,74,1
-block_hint,ArrayPrototypeSort,51,52,1
-block_hint,ArrayPrototypeSort,15,16,1
-block_hint,ArrayPrototypeSort,95,96,1
-block_hint,ArrayPrototypeSort,75,76,0
-block_hint,ArrayPrototypeSort,53,54,0
-block_hint,ArrayPrototypeSort,136,137,0
-block_hint,ArrayPrototypeSort,139,140,0
-block_hint,ArrayPrototypeSort,130,131,0
-block_hint,ArrayPrototypeSort,122,123,0
-block_hint,ArrayPrototypeSort,103,104,0
-block_hint,ArrayPrototypeSort,114,115,0
-block_hint,ArrayPrototypeSort,117,118,1
-block_hint,ArrayPrototypeSort,77,78,1
-block_hint,ArrayPrototypeSort,33,34,0
-block_hint,ArrayPrototypeSort,98,99,1
-block_hint,ArrayPrototypeSort,91,92,1
-block_hint,ArrayPrototypeSort,56,57,1
-block_hint,StringFastLocaleCompare,315,316,1
-block_hint,StringFastLocaleCompare,239,240,0
-block_hint,StringFastLocaleCompare,303,304,1
-block_hint,StringFastLocaleCompare,156,157,0
-block_hint,StringFastLocaleCompare,158,159,0
-block_hint,StringFastLocaleCompare,267,268,1
-block_hint,StringFastLocaleCompare,106,107,0
-block_hint,StringFastLocaleCompare,307,308,1
-block_hint,StringFastLocaleCompare,172,173,0
-block_hint,StringFastLocaleCompare,174,175,0
-block_hint,StringFastLocaleCompare,109,110,0
-block_hint,StringFastLocaleCompare,211,212,1
-block_hint,StringFastLocaleCompare,271,272,1
-block_hint,StringFastLocaleCompare,276,277,0
-block_hint,StringFastLocaleCompare,253,254,1
-block_hint,StringFastLocaleCompare,73,74,0
-block_hint,StringFastLocaleCompare,274,275,1
-block_hint,StringFastLocaleCompare,116,117,0
-block_hint,StringFastLocaleCompare,77,78,1
-block_hint,CanUseSameAccessor_FastObjectElements_0,2,3,1
-block_hint,CanUseSameAccessor_FastObjectElements_0,4,5,1
-block_hint,StringPrototypeToLowerCaseIntl,10,11,1
-block_hint,StringPrototypeToLowerCaseIntl,7,8,1
-block_hint,StringPrototypeToLowerCaseIntl,5,6,1
-block_hint,StringToLowerCaseIntl,23,24,1
-block_hint,StringToLowerCaseIntl,25,26,0
-block_hint,StringToLowerCaseIntl,34,35,1
-block_hint,StringToLowerCaseIntl,7,8,0
-block_hint,StringToLowerCaseIntl,43,44,1
-block_hint,StringToLowerCaseIntl,41,42,1
-block_hint,StringToLowerCaseIntl,19,20,0
-block_hint,StringToLowerCaseIntl,39,40,0
-block_hint,StringToLowerCaseIntl,14,15,0
-block_hint,LdaContextSlotHandler,3,4,1
-block_hint,LdaContextSlotHandler,5,6,1
-block_hint,LdaImmutableContextSlotHandler,3,4,1
-block_hint,LdaImmutableContextSlotHandler,5,6,1
-block_hint,LdaCurrentContextSlotHandler,2,3,1
-block_hint,LdaImmutableCurrentContextSlotHandler,2,3,1
-block_hint,TestTypeOfHandler,7,8,1
-block_hint,TestTypeOfHandler,15,16,0
-block_hint,TestTypeOfHandler,23,24,0
-block_hint,TestTypeOfHandler,31,32,1
-block_hint,TestTypeOfHandler,50,51,0
-block_hint,TestTypeOfHandler,35,36,0
-block_hint,LdaGlobalHandler,7,8,1
-block_hint,LdaGlobalHandler,9,10,1
-block_hint,LdaGlobalHandler,11,12,1
-block_hint,LdaGlobalHandler,13,14,1
-block_hint,LdaGlobalHandler,183,184,0
-block_hint,LdaGlobalHandler,105,106,0
-block_hint,LdaGlobalHandler,109,110,1
-block_hint,StaContextSlotHandler,5,6,1
-block_hint,StaCurrentContextSlotHandler,2,3,1
-block_hint,LdaLookupGlobalSlotHandler,13,14,1
-block_hint,LdaLookupGlobalSlotHandler,125,126,0
-block_hint,LdaLookupGlobalSlotHandler,15,16,1
-block_hint,GetNamedPropertyHandler,372,373,1
-block_hint,GetNamedPropertyHandler,216,217,0
-block_hint,GetNamedPropertyHandler,77,78,0
-block_hint,GetNamedPropertyHandler,35,36,1
-block_hint,GetNamedPropertyHandler,313,314,0
-block_hint,GetNamedPropertyHandler,339,340,0
-block_hint,GetNamedPropertyHandler,218,219,1
-block_hint,GetNamedPropertyHandler,290,291,0
-block_hint,GetNamedPropertyHandler,220,221,0
-block_hint,GetNamedPropertyHandler,294,295,1
-block_hint,GetNamedPropertyHandler,39,40,0
-block_hint,GetNamedPropertyHandler,98,99,1
-block_hint,GetNamedPropertyHandler,347,348,0
-block_hint,GetNamedPropertyHandler,242,243,0
-block_hint,GetNamedPropertyHandler,154,155,0
-block_hint,GetNamedPropertyHandler,120,121,1
-block_hint,GetNamedPropertyHandler,49,50,0
-block_hint,GetNamedPropertyHandler,87,88,0
-block_hint,GetNamedPropertyHandler,25,26,1
-block_hint,GetNamedPropertyHandler,144,145,0
-block_hint,GetNamedPropertyHandler,65,66,0
-block_hint,GetNamedPropertyHandler,303,304,1
-block_hint,GetNamedPropertyHandler,102,103,0
-block_hint,GetNamedPropertyHandler,248,249,1
-block_hint,GetNamedPropertyHandler,250,251,1
-block_hint,GetNamedPropertyHandler,244,245,1
-block_hint,GetNamedPropertyHandler,246,247,1
-block_hint,GetNamedPropertyHandler,164,165,1
-block_hint,AddHandler,72,73,0
-block_hint,AddHandler,45,46,0
-block_hint,AddHandler,32,33,1
-block_hint,AddHandler,118,119,0
-block_hint,AddHandler,81,82,1
-block_hint,AddHandler,48,49,1
-block_hint,AddHandler,103,104,1
-block_hint,AddHandler,52,53,1
-block_hint,AddHandler,75,76,1
-block_hint,AddHandler,24,25,1
-block_hint,SubHandler,42,43,0
-block_hint,SubHandler,27,28,1
-block_hint,SubHandler,78,79,1
-block_hint,SubHandler,98,99,1
-block_hint,SubHandler,80,81,1
-block_hint,SubHandler,56,57,1
-block_hint,SubHandler,21,22,1
-block_hint,MulHandler,106,107,1
-block_hint,MulHandler,98,99,1
-block_hint,MulHandler,30,31,1
-block_hint,MulHandler,112,113,1
-block_hint,MulHandler,91,92,1
-block_hint,MulHandler,59,60,1
-block_hint,MulHandler,23,24,1
-block_hint,DivHandler,109,110,0
-block_hint,DivHandler,90,91,0
-block_hint,DivHandler,63,64,1
-block_hint,DivHandler,33,34,1
-block_hint,DivHandler,121,122,1
-block_hint,DivHandler,96,97,1
-block_hint,DivHandler,66,67,1
-block_hint,DivHandler,23,24,1
-block_hint,ModHandler,129,130,0
-block_hint,ModHandler,118,119,0
-block_hint,ModHandler,92,93,1
-block_hint,ModHandler,87,88,1
-block_hint,ModHandler,50,51,0
-block_hint,ModHandler,33,34,1
-block_hint,BitwiseOrHandler,42,43,0
-block_hint,BitwiseOrHandler,30,31,1
-block_hint,BitwiseOrHandler,8,9,1
-block_hint,BitwiseOrHandler,56,57,1
-block_hint,BitwiseOrHandler,60,61,1
-block_hint,BitwiseOrHandler,24,25,1
-block_hint,BitwiseXorHandler,32,33,1
-block_hint,BitwiseXorHandler,56,57,1
-block_hint,BitwiseXorHandler,60,61,1
-block_hint,BitwiseXorHandler,24,25,1
-block_hint,BitwiseAndHandler,32,33,1
-block_hint,BitwiseAndHandler,56,57,1
-block_hint,BitwiseAndHandler,60,61,1
-block_hint,BitwiseAndHandler,24,25,1
-block_hint,ShiftLeftHandler,10,11,0
-block_hint,ShiftLeftHandler,60,61,1
-block_hint,ShiftLeftHandler,24,25,1
-block_hint,ShiftRightHandler,32,33,1
-block_hint,ShiftRightHandler,10,11,0
-block_hint,ShiftRightHandler,58,59,0
-block_hint,ShiftRightHandler,39,40,0
-block_hint,ShiftRightHandler,24,25,1
-block_hint,ShiftRightLogicalHandler,10,11,0
-block_hint,ShiftRightLogicalHandler,58,59,0
-block_hint,ShiftRightLogicalHandler,39,40,0
-block_hint,ShiftRightLogicalHandler,24,25,1
-block_hint,AddSmiHandler,27,28,0
-block_hint,AddSmiHandler,19,20,0
-block_hint,AddSmiHandler,14,15,1
-block_hint,SubSmiHandler,25,26,0
-block_hint,SubSmiHandler,15,16,1
-block_hint,MulSmiHandler,52,53,0
-block_hint,MulSmiHandler,39,40,0
-block_hint,MulSmiHandler,41,42,0
-block_hint,MulSmiHandler,22,23,0
-block_hint,MulSmiHandler,13,14,1
-block_hint,DivSmiHandler,45,46,0
-block_hint,DivSmiHandler,52,53,0
-block_hint,DivSmiHandler,40,41,0
-block_hint,DivSmiHandler,29,30,1
-block_hint,DivSmiHandler,7,8,0
-block_hint,DivSmiHandler,13,14,1
-block_hint,ModSmiHandler,42,43,1
-block_hint,ModSmiHandler,37,38,1
-block_hint,ModSmiHandler,22,23,0
-block_hint,ModSmiHandler,13,14,1
-block_hint,BitwiseOrSmiHandler,31,32,1
-block_hint,BitwiseOrSmiHandler,37,38,1
-block_hint,BitwiseOrSmiHandler,18,19,1
-block_hint,BitwiseAndSmiHandler,6,7,0
-block_hint,BitwiseAndSmiHandler,18,19,1
-block_hint,ShiftLeftSmiHandler,44,45,1
-block_hint,ShiftLeftSmiHandler,34,35,1
-block_hint,ShiftLeftSmiHandler,46,47,1
-block_hint,ShiftLeftSmiHandler,18,19,1
-block_hint,ShiftRightSmiHandler,31,32,1
-block_hint,ShiftRightSmiHandler,35,36,0
-block_hint,ShiftRightSmiHandler,29,30,0
-block_hint,ShiftRightSmiHandler,18,19,1
-block_hint,ShiftRightLogicalSmiHandler,40,41,0
-block_hint,ShiftRightLogicalSmiHandler,30,31,0
-block_hint,ShiftRightLogicalSmiHandler,34,35,1
-block_hint,ShiftRightLogicalSmiHandler,42,43,0
-block_hint,ShiftRightLogicalSmiHandler,32,33,0
-block_hint,ShiftRightLogicalSmiHandler,18,19,1
-block_hint,IncHandler,27,28,0
-block_hint,IncHandler,23,24,0
-block_hint,IncHandler,18,19,1
-block_hint,DecHandler,27,28,0
-block_hint,DecHandler,23,24,0
-block_hint,DecHandler,18,19,1
-block_hint,NegateHandler,26,27,1
-block_hint,NegateHandler,24,25,1
-block_hint,NegateHandler,14,15,1
-block_hint,ToBooleanLogicalNotHandler,15,16,0
-block_hint,ToBooleanLogicalNotHandler,21,22,0
-block_hint,ToBooleanLogicalNotHandler,7,8,0
-block_hint,TypeOfHandler,20,21,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,12,13,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,6,7,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,14,15,1
-block_hint,FindNonDefaultConstructorOrConstructHandler,16,17,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,4,5,1
-block_hint,FindNonDefaultConstructorOrConstructHandler,18,19,1
-block_hint,CallAnyReceiverHandler,21,22,1
-block_hint,CallProperty0Handler,7,8,1
-block_hint,CallProperty0Handler,62,63,0
-block_hint,CallProperty0Handler,14,15,1
-block_hint,CallProperty0Handler,16,17,0
-block_hint,CallProperty0Handler,72,73,0
-block_hint,CallProperty0Handler,55,56,1
-block_hint,CallProperty1Handler,21,22,1
-block_hint,CallProperty1Handler,86,87,0
-block_hint,CallProperty1Handler,83,84,0
-block_hint,CallProperty1Handler,64,65,0
-block_hint,CallProperty1Handler,35,36,1
-block_hint,CallProperty1Handler,70,71,1
-block_hint,CallProperty1Handler,51,52,0
-block_hint,CallProperty1Handler,7,8,1
-block_hint,CallProperty1Handler,62,63,0
-block_hint,CallProperty1Handler,14,15,1
-block_hint,CallProperty1Handler,16,17,0
-block_hint,CallProperty1Handler,72,73,0
-block_hint,CallProperty1Handler,55,56,1
-block_hint,CallProperty2Handler,23,24,0
-block_hint,CallProperty2Handler,86,87,0
-block_hint,CallProperty2Handler,83,84,0
-block_hint,CallProperty2Handler,64,65,0
-block_hint,CallProperty2Handler,5,6,1
-block_hint,CallProperty2Handler,47,48,1
-block_hint,CallProperty2Handler,25,26,1
-block_hint,CallProperty2Handler,7,8,1
-block_hint,CallProperty2Handler,14,15,1
-block_hint,CallProperty2Handler,16,17,0
-block_hint,CallProperty2Handler,72,73,0
-block_hint,CallProperty2Handler,55,56,1
-block_hint,CallUndefinedReceiverHandler,86,87,0
-block_hint,CallUndefinedReceiverHandler,83,84,0
-block_hint,CallUndefinedReceiverHandler,64,65,0
-block_hint,CallUndefinedReceiverHandler,35,36,1
-block_hint,CallUndefinedReceiverHandler,70,71,1
-block_hint,CallUndefinedReceiverHandler,51,52,0
-block_hint,CallUndefinedReceiverHandler,29,30,1
-block_hint,CallUndefinedReceiver0Handler,86,87,0
-block_hint,CallUndefinedReceiver0Handler,83,84,0
-block_hint,CallUndefinedReceiver0Handler,64,65,0
-block_hint,CallUndefinedReceiver0Handler,35,36,1
-block_hint,CallUndefinedReceiver0Handler,70,71,1
-block_hint,CallUndefinedReceiver0Handler,51,52,0
-block_hint,CallUndefinedReceiver0Handler,29,30,1
-block_hint,CallUndefinedReceiver1Handler,86,87,0
-block_hint,CallUndefinedReceiver1Handler,83,84,0
-block_hint,CallUndefinedReceiver1Handler,64,65,0
-block_hint,CallUndefinedReceiver1Handler,35,36,1
-block_hint,CallUndefinedReceiver1Handler,70,71,1
-block_hint,CallUndefinedReceiver1Handler,51,52,0
-block_hint,CallUndefinedReceiver1Handler,29,30,1
-block_hint,CallUndefinedReceiver1Handler,7,8,1
-block_hint,CallUndefinedReceiver1Handler,62,63,0
-block_hint,CallUndefinedReceiver1Handler,14,15,1
-block_hint,CallUndefinedReceiver1Handler,16,17,0
-block_hint,CallUndefinedReceiver1Handler,72,73,0
-block_hint,CallUndefinedReceiver1Handler,55,56,1
-block_hint,CallUndefinedReceiver2Handler,23,24,0
-block_hint,CallUndefinedReceiver2Handler,86,87,0
-block_hint,CallUndefinedReceiver2Handler,83,84,0
-block_hint,CallUndefinedReceiver2Handler,64,65,0
-block_hint,CallUndefinedReceiver2Handler,35,36,1
-block_hint,CallUndefinedReceiver2Handler,70,71,1
-block_hint,CallUndefinedReceiver2Handler,51,52,0
-block_hint,CallUndefinedReceiver2Handler,29,30,1
-block_hint,CallWithSpreadHandler,23,24,1
-block_hint,ConstructHandler,52,53,0
-block_hint,ConstructHandler,41,42,1
-block_hint,ConstructHandler,24,25,1
-block_hint,ConstructHandler,15,16,1
-block_hint,ConstructHandler,3,4,1
-block_hint,ConstructHandler,39,40,1
-block_hint,TestEqualHandler,103,104,0
-block_hint,TestEqualHandler,25,26,1
-block_hint,TestEqualHandler,72,73,0
-block_hint,TestEqualHandler,79,80,1
-block_hint,TestEqualHandler,27,28,1
-block_hint,TestEqualHandler,85,86,0
-block_hint,TestEqualHandler,114,115,0
-block_hint,TestEqualHandler,19,20,1
-block_hint,TestEqualStrictHandler,82,83,0
-block_hint,TestEqualStrictHandler,53,54,1
-block_hint,TestEqualStrictHandler,66,67,0
-block_hint,TestEqualStrictHandler,59,60,1
-block_hint,TestEqualStrictHandler,41,42,1
-block_hint,TestEqualStrictHandler,61,62,0
-block_hint,TestEqualStrictHandler,55,56,1
-block_hint,TestEqualStrictHandler,47,48,0
-block_hint,TestEqualStrictHandler,72,73,0
-block_hint,TestEqualStrictHandler,49,50,0
-block_hint,TestEqualStrictHandler,7,8,1
-block_hint,TestLessThanHandler,41,42,0
-block_hint,TestLessThanHandler,63,64,0
-block_hint,TestLessThanHandler,65,66,1
-block_hint,TestLessThanHandler,49,50,1
-block_hint,TestLessThanHandler,9,10,1
-block_hint,TestGreaterThanHandler,41,42,0
-block_hint,TestGreaterThanHandler,45,46,1
-block_hint,TestGreaterThanHandler,9,10,1
-block_hint,TestLessThanOrEqualHandler,41,42,0
-block_hint,TestLessThanOrEqualHandler,9,10,1
-block_hint,TestGreaterThanOrEqualHandler,41,42,0
-block_hint,TestGreaterThanOrEqualHandler,9,10,1
-block_hint,TestInstanceOfHandler,17,18,1
-block_hint,TestInstanceOfHandler,19,20,1
-block_hint,TestInstanceOfHandler,4,5,1
-block_hint,TestInstanceOfHandler,21,22,1
-block_hint,ToNumericHandler,12,13,0
-block_hint,ToNumericHandler,7,8,1
-block_hint,ToStringHandler,3,4,1
-block_hint,CreateRegExpLiteralHandler,7,8,1
-block_hint,CreateRegExpLiteralHandler,3,4,1
-block_hint,CreateArrayLiteralHandler,38,39,1
-block_hint,CreateArrayLiteralHandler,41,42,0
-block_hint,CreateArrayLiteralHandler,13,14,0
-block_hint,CreateArrayLiteralHandler,49,50,1
-block_hint,CreateArrayLiteralHandler,46,47,1
-block_hint,CreateArrayLiteralHandler,22,23,1
-block_hint,CreateArrayLiteralHandler,28,29,1
-block_hint,CreateArrayLiteralHandler,3,4,1
-block_hint,CreateArrayLiteralHandler,30,31,1
-block_hint,CreateArrayLiteralHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralHandler,3,4,1
-block_hint,CreateEmptyArrayLiteralHandler,13,14,1
-block_hint,CreateEmptyArrayLiteralHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralHandler,15,16,1
-block_hint,CreateObjectLiteralHandler,88,89,0
-block_hint,CreateObjectLiteralHandler,123,124,0
-block_hint,CreateObjectLiteralHandler,116,117,1
-block_hint,CreateObjectLiteralHandler,92,93,1
-block_hint,CreateObjectLiteralHandler,108,109,1
-block_hint,CreateObjectLiteralHandler,72,73,1
-block_hint,CreateObjectLiteralHandler,34,35,0
-block_hint,CreateObjectLiteralHandler,74,75,1
-block_hint,CreateObjectLiteralHandler,64,65,0
-block_hint,CreateEmptyObjectLiteralHandler,4,5,1
-block_hint,CreateEmptyObjectLiteralHandler,11,12,1
-block_hint,CreateEmptyObjectLiteralHandler,6,7,0
-block_hint,CreateClosureHandler,2,3,1
-block_hint,CreateFunctionContextHandler,11,12,1
-block_hint,CreateFunctionContextHandler,4,5,1
-block_hint,CreateFunctionContextHandler,6,7,0
-block_hint,CreateMappedArgumentsHandler,52,53,0
-block_hint,CreateMappedArgumentsHandler,42,43,1
-block_hint,CreateMappedArgumentsHandler,44,45,0
-block_hint,CreateMappedArgumentsHandler,104,105,1
-block_hint,CreateMappedArgumentsHandler,46,47,0
-block_hint,CreateMappedArgumentsHandler,12,13,0
-block_hint,CreateMappedArgumentsHandler,14,15,0
-block_hint,CreateMappedArgumentsHandler,78,79,1
-block_hint,CreateMappedArgumentsHandler,58,59,1
-block_hint,CreateMappedArgumentsHandler,16,17,0
-block_hint,CreateMappedArgumentsHandler,60,61,0
-block_hint,CreateMappedArgumentsHandler,32,33,0
-block_hint,CreateMappedArgumentsHandler,24,25,1
-block_hint,CreateMappedArgumentsHandler,70,71,1
-block_hint,CreateUnmappedArgumentsHandler,16,17,1
-block_hint,CreateUnmappedArgumentsHandler,18,19,0
-block_hint,CreateUnmappedArgumentsHandler,20,21,0
-block_hint,CreateUnmappedArgumentsHandler,7,8,0
-block_hint,CreateUnmappedArgumentsHandler,9,10,0
-block_hint,CreateUnmappedArgumentsHandler,31,32,1
-block_hint,CreateUnmappedArgumentsHandler,25,26,1
-block_hint,CreateUnmappedArgumentsHandler,11,12,0
-block_hint,CreateUnmappedArgumentsHandler,27,28,0
-block_hint,CreateUnmappedArgumentsHandler,13,14,1
-block_hint,CreateUnmappedArgumentsHandler,23,24,1
-block_hint,CreateRestParameterHandler,13,14,0
-block_hint,CreateRestParameterHandler,27,28,0
-block_hint,JumpLoopHandler,34,35,1
-block_hint,JumpLoopHandler,23,24,0
-block_hint,JumpLoopHandler,9,10,1
-block_hint,JumpIfToBooleanFalseConstantHandler,14,15,0
-block_hint,JumpIfToBooleanFalseConstantHandler,20,21,0
-block_hint,JumpIfToBooleanFalseConstantHandler,6,7,0
-block_hint,JumpIfToBooleanTrueHandler,14,15,0
-block_hint,JumpIfToBooleanTrueHandler,6,7,0
-block_hint,JumpIfToBooleanTrueHandler,8,9,1
-block_hint,JumpIfToBooleanFalseHandler,14,15,0
-block_hint,JumpIfToBooleanFalseHandler,20,21,0
-block_hint,JumpIfToBooleanFalseHandler,6,7,0
-block_hint,JumpIfUndefinedOrNullHandler,3,4,0
-block_hint,JumpIfJSReceiverHandler,5,6,1
-block_hint,JumpIfJSReceiverHandler,3,4,1
-block_hint,SwitchOnSmiNoFeedbackHandler,3,4,0
-block_hint,ForInEnumerateHandler,34,35,1
-block_hint,ForInPrepareHandler,18,19,1
-block_hint,ForInNextHandler,2,3,1
-block_hint,ForInNextHandler,13,14,1
-block_hint,ReturnHandler,3,4,1
-block_hint,ThrowReferenceErrorIfHoleHandler,4,5,0
-block_hint,ThrowSuperNotCalledIfHoleHandler,2,3,0
-block_hint,ThrowSuperAlreadyCalledIfNotHoleHandler,2,3,1
-block_hint,ThrowIfNotSuperConstructorHandler,2,3,1
-block_hint,SuspendGeneratorHandler,14,15,1
-block_hint,SuspendGeneratorHandler,8,9,1
-block_hint,SuspendGeneratorHandler,12,13,1
-block_hint,ResumeGeneratorHandler,10,11,1
-block_hint,ResumeGeneratorHandler,4,5,1
-block_hint,ResumeGeneratorHandler,6,7,1
-block_hint,LdaImmutableContextSlotWideHandler,3,4,1
-block_hint,LdaImmutableContextSlotWideHandler,9,10,0
-block_hint,LdaImmutableContextSlotWideHandler,5,6,1
-block_hint,LdaImmutableCurrentContextSlotWideHandler,2,3,1
-block_hint,LdaGlobalWideHandler,262,263,0
-block_hint,LdaGlobalWideHandler,110,111,1
-block_hint,StaGlobalWideHandler,3,4,0
-block_hint,StaCurrentContextSlotWideHandler,2,3,1
-block_hint,GetNamedPropertyWideHandler,331,332,0
-block_hint,GetNamedPropertyWideHandler,140,141,1
-block_hint,GetKeyedPropertyWideHandler,3,4,0
-block_hint,SetNamedPropertyWideHandler,3,4,0
-block_hint,DefineNamedOwnPropertyWideHandler,3,4,0
-block_hint,SetKeyedPropertyWideHandler,3,4,0
-block_hint,DefineKeyedOwnPropertyWideHandler,3,4,0
-block_hint,StaInArrayLiteralWideHandler,3,4,0
-block_hint,AddWideHandler,120,121,0
-block_hint,AddWideHandler,60,61,0
-block_hint,AddWideHandler,42,43,0
-block_hint,AddWideHandler,107,108,0
-block_hint,AddWideHandler,76,77,1
-block_hint,AddWideHandler,53,54,1
-block_hint,AddWideHandler,31,32,1
-block_hint,AddWideHandler,51,52,1
-block_hint,AddWideHandler,18,19,1
-block_hint,SubWideHandler,108,109,0
-block_hint,SubWideHandler,65,66,0
-block_hint,SubWideHandler,40,41,0
-block_hint,SubWideHandler,15,16,1
-block_hint,MulWideHandler,128,129,0
-block_hint,MulWideHandler,106,107,1
-block_hint,MulWideHandler,83,84,1
-block_hint,MulWideHandler,81,82,1
-block_hint,MulWideHandler,43,44,1
-block_hint,MulWideHandler,19,20,1
-block_hint,BitwiseOrWideHandler,28,29,0
-block_hint,BitwiseOrWideHandler,20,21,1
-block_hint,AddSmiWideHandler,25,26,0
-block_hint,AddSmiWideHandler,17,18,0
-block_hint,MulSmiWideHandler,52,53,0
-block_hint,MulSmiWideHandler,41,42,0
-block_hint,MulSmiWideHandler,22,23,0
-block_hint,MulSmiWideHandler,34,35,1
-block_hint,MulSmiWideHandler,24,25,0
-block_hint,MulSmiWideHandler,28,29,1
-block_hint,MulSmiWideHandler,9,10,1
-block_hint,ModSmiWideHandler,51,52,1
-block_hint,ModSmiWideHandler,46,47,0
-block_hint,ModSmiWideHandler,42,43,1
-block_hint,ModSmiWideHandler,37,38,1
-block_hint,ModSmiWideHandler,22,23,0
-block_hint,ModSmiWideHandler,7,8,0
-block_hint,ModSmiWideHandler,13,14,1
-block_hint,BitwiseOrSmiWideHandler,23,24,0
-block_hint,BitwiseOrSmiWideHandler,6,7,0
-block_hint,BitwiseOrSmiWideHandler,11,12,1
-block_hint,BitwiseAndSmiWideHandler,6,7,0
-block_hint,BitwiseAndSmiWideHandler,18,19,1
-block_hint,ShiftLeftSmiWideHandler,24,25,0
-block_hint,ShiftLeftSmiWideHandler,6,7,0
-block_hint,ShiftLeftSmiWideHandler,40,41,0
-block_hint,ShiftLeftSmiWideHandler,30,31,0
-block_hint,ShiftLeftSmiWideHandler,11,12,1
-block_hint,ShiftRightSmiWideHandler,23,24,0
-block_hint,ShiftRightSmiWideHandler,6,7,0
-block_hint,ShiftRightSmiWideHandler,11,12,1
-block_hint,IncWideHandler,9,10,0
-block_hint,IncWideHandler,25,26,0
-block_hint,IncWideHandler,19,20,0
-block_hint,IncWideHandler,7,8,1
-block_hint,CallPropertyWideHandler,68,69,0
-block_hint,CallPropertyWideHandler,19,20,0
-block_hint,CallProperty0WideHandler,68,69,0
-block_hint,CallProperty0WideHandler,19,20,0
-block_hint,CallProperty1WideHandler,68,69,0
-block_hint,CallProperty1WideHandler,19,20,0
-block_hint,CallProperty2WideHandler,68,69,0
-block_hint,CallProperty2WideHandler,19,20,0
-block_hint,CallUndefinedReceiverWideHandler,68,69,0
-block_hint,CallUndefinedReceiverWideHandler,19,20,0
-block_hint,CallUndefinedReceiver0WideHandler,68,69,0
-block_hint,CallUndefinedReceiver0WideHandler,19,20,0
-block_hint,CallUndefinedReceiver1WideHandler,68,69,0
-block_hint,CallUndefinedReceiver1WideHandler,19,20,0
-block_hint,CallUndefinedReceiver2WideHandler,68,69,0
-block_hint,CallUndefinedReceiver2WideHandler,19,20,0
-block_hint,ConstructWideHandler,49,50,0
-block_hint,ConstructWideHandler,22,23,0
-block_hint,TestEqualWideHandler,103,104,0
-block_hint,TestEqualWideHandler,95,96,0
-block_hint,TestEqualWideHandler,47,48,0
-block_hint,TestEqualWideHandler,7,8,1
-block_hint,TestEqualStrictWideHandler,82,83,0
-block_hint,TestEqualStrictWideHandler,53,54,1
-block_hint,TestEqualStrictWideHandler,55,56,1
-block_hint,TestEqualStrictWideHandler,47,48,0
-block_hint,TestEqualStrictWideHandler,9,10,0
-block_hint,TestEqualStrictWideHandler,4,5,1
-block_hint,TestGreaterThanWideHandler,24,25,0
-block_hint,TestGreaterThanWideHandler,6,7,1
-block_hint,CreateRegExpLiteralWideHandler,14,15,0
-block_hint,CreateRegExpLiteralWideHandler,9,10,1
-block_hint,CreateArrayLiteralWideHandler,42,43,0
-block_hint,CreateArrayLiteralWideHandler,20,21,1
-block_hint,CreateEmptyArrayLiteralWideHandler,22,23,0
-block_hint,CreateEmptyArrayLiteralWideHandler,11,12,1
-block_hint,CreateEmptyArrayLiteralWideHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralWideHandler,15,16,1
-block_hint,CreateObjectLiteralWideHandler,99,100,0
-block_hint,CreateObjectLiteralWideHandler,58,59,1
-block_hint,CreateClosureWideHandler,9,10,1
-block_hint,CreateClosureWideHandler,2,3,1
-block_hint,CreateFunctionContextWideHandler,8,9,0
-block_hint,JumpLoopWideHandler,34,35,1
-block_hint,JumpLoopWideHandler,9,10,1
-block_hint,JumpIfToBooleanTrueWideHandler,18,19,1
-block_hint,JumpIfToBooleanTrueWideHandler,14,15,0
-block_hint,JumpIfToBooleanFalseWideHandler,18,19,1
-block_hint,JumpIfToBooleanFalseWideHandler,14,15,0
-block_hint,JumpIfToBooleanFalseWideHandler,20,21,0
-block_hint,JumpIfToBooleanFalseWideHandler,6,7,0
-block_hint,SwitchOnSmiNoFeedbackWideHandler,5,6,0
-block_hint,SwitchOnSmiNoFeedbackWideHandler,3,4,0
-block_hint,ForInNextWideHandler,11,12,0
-block_hint,ForInNextWideHandler,2,3,1
-block_hint,ForInNextWideHandler,4,5,0
-block_hint,ForInNextWideHandler,9,10,1
-block_hint,LdaGlobalExtraWideHandler,262,263,0
-block_hint,LdaGlobalExtraWideHandler,110,111,1
-block_hint,AddSmiExtraWideHandler,33,34,1
-block_hint,AddSmiExtraWideHandler,23,24,0
-block_hint,AddSmiExtraWideHandler,28,29,1
-block_hint,AddSmiExtraWideHandler,9,10,1
-block_hint,DivSmiExtraWideHandler,49,50,0
-block_hint,DivSmiExtraWideHandler,45,46,0
-block_hint,DivSmiExtraWideHandler,52,53,0
-block_hint,DivSmiExtraWideHandler,40,41,0
-block_hint,DivSmiExtraWideHandler,23,24,0
-block_hint,DivSmiExtraWideHandler,29,30,1
-block_hint,DivSmiExtraWideHandler,13,14,1
-block_hint,BitwiseAndSmiExtraWideHandler,31,32,1
-block_hint,BitwiseAndSmiExtraWideHandler,35,36,0
-block_hint,BitwiseAndSmiExtraWideHandler,29,30,0
-block_hint,BitwiseAndSmiExtraWideHandler,18,19,1
-block_hint,CallUndefinedReceiver1ExtraWideHandler,68,69,0
-block_hint,CallUndefinedReceiver1ExtraWideHandler,19,20,0
-builtin_hash,RecordWriteSaveFP,-787985789
-builtin_hash,RecordWriteIgnoreFP,-787985789
-builtin_hash,EphemeronKeyBarrierSaveFP,-762846067
-builtin_hash,AdaptorWithBuiltinExitFrame,245562366
-builtin_hash,Call_ReceiverIsNullOrUndefined_Baseline_Compact,-701969451
-builtin_hash,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,-324308522
-builtin_hash,Call_ReceiverIsAny_Baseline_Compact,-324308522
-builtin_hash,CallProxy,1028339399
-builtin_hash,CallWithSpread,535056033
-builtin_hash,CallWithSpread_Baseline,-119914143
-builtin_hash,CallWithArrayLike,-122249728
-builtin_hash,ConstructWithSpread,246592083
-builtin_hash,ConstructWithSpread_Baseline,150379974
-builtin_hash,Construct_Baseline,62706048
-builtin_hash,FastNewObject,958443730
-builtin_hash,FastNewClosure,344670909
-builtin_hash,StringEqual,747283806
-builtin_hash,StringGreaterThan,-181364078
-builtin_hash,StringGreaterThanOrEqual,-462881432
-builtin_hash,StringLessThan,-462881432
-builtin_hash,StringLessThanOrEqual,-181364078
-builtin_hash,StringSubstring,-615814018
-builtin_hash,OrderedHashTableHealIndex,-1059061674
-builtin_hash,CompileLazy,-1040787392
-builtin_hash,CompileLazyDeoptimizedCode,254075260
-builtin_hash,InstantiateAsmJs,-162781474
-builtin_hash,AllocateInYoungGeneration,504130749
-builtin_hash,AllocateRegularInYoungGeneration,-967770913
-builtin_hash,AllocateRegularInOldGeneration,-967770913
-builtin_hash,CopyFastSmiOrObjectElements,-184201389
-builtin_hash,GrowFastDoubleElements,933478036
-builtin_hash,GrowFastSmiOrObjectElements,62812155
-builtin_hash,ToNumber,-536181652
-builtin_hash,ToNumber_Baseline,-361624131
-builtin_hash,ToNumeric_Baseline,-968362129
-builtin_hash,ToNumberConvertBigInt,-484303877
-builtin_hash,Typeof,-292943239
-builtin_hash,KeyedLoadIC_PolymorphicName,-445639640
-builtin_hash,KeyedStoreIC_Megamorphic,228109775
-builtin_hash,DefineKeyedOwnIC_Megamorphic,587942691
-builtin_hash,LoadGlobalIC_NoFeedback,-506168140
-builtin_hash,LoadIC_FunctionPrototype,-217294724
-builtin_hash,LoadIC_StringLength,876788958
-builtin_hash,LoadIC_StringWrapperLength,-105737329
-builtin_hash,LoadIC_NoFeedback,796730020
-builtin_hash,StoreIC_NoFeedback,-771215689
-builtin_hash,DefineNamedOwnIC_NoFeedback,610029223
-builtin_hash,KeyedLoadIC_SloppyArguments,1037341519
-builtin_hash,StoreFastElementIC_Standard,511933864
-builtin_hash,StoreFastElementIC_GrowNoTransitionHandleCOW,-733182579
-builtin_hash,StoreFastElementIC_NoTransitionHandleCOW,14002747
-builtin_hash,ElementsTransitionAndStore_Standard,-303995099
-builtin_hash,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,-620039698
-builtin_hash,ElementsTransitionAndStore_NoTransitionHandleCOW,387221171
-builtin_hash,KeyedHasIC_PolymorphicName,481900135
-builtin_hash,EnqueueMicrotask,987190055
-builtin_hash,RunMicrotasks,-606800144
-builtin_hash,HasProperty,-958876308
-builtin_hash,DeleteProperty,-583543539
-builtin_hash,SetDataProperties,-633970258
-builtin_hash,ReturnReceiver,386533367
-builtin_hash,ArrayConstructor,-862505040
-builtin_hash,ArrayConstructorImpl,-772732436
-builtin_hash,ArrayNoArgumentConstructor_PackedSmi_DontOverride,546753803
-builtin_hash,ArrayNoArgumentConstructor_HoleySmi_DontOverride,546753803
-builtin_hash,ArrayNoArgumentConstructor_PackedSmi_DisableAllocationSites,76921937
-builtin_hash,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,76921937
-builtin_hash,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,76921937
-builtin_hash,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,-916490644
-builtin_hash,ArraySingleArgumentConstructor_HoleySmi_DontOverride,924187471
-builtin_hash,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,-90166804
-builtin_hash,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,-90166804
-builtin_hash,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,377718997
-builtin_hash,ArrayIncludesSmi,833613331
-builtin_hash,ArrayIncludesSmiOrObject,-439120197
-builtin_hash,ArrayIncludes,-557378221
-builtin_hash,ArrayIndexOfSmi,818318721
-builtin_hash,ArrayIndexOfSmiOrObject,1027851539
-builtin_hash,ArrayIndexOf,344845802
-builtin_hash,ArrayPrototypePop,127416215
-builtin_hash,ArrayPrototypePush,611743176
-builtin_hash,CloneFastJSArray,1060615555
-builtin_hash,CloneFastJSArrayFillingHoles,1003395618
-builtin_hash,ExtractFastJSArray,-517393151
-builtin_hash,ArrayPrototypeEntries,-332667431
-builtin_hash,ArrayPrototypeKeys,110264383
-builtin_hash,ArrayPrototypeValues,-332667431
-builtin_hash,ArrayIteratorPrototypeNext,-858892834
-builtin_hash,AsyncFunctionEnter,423723147
-builtin_hash,AsyncFunctionResolve,265196636
-builtin_hash,AsyncFunctionAwaitCaught,960969853
-builtin_hash,AsyncFunctionAwaitUncaught,960969853
-builtin_hash,AsyncFunctionAwaitResolveClosure,-1057297202
-builtin_hash,DatePrototypeGetDate,905028372
-builtin_hash,DatePrototypeGetDay,905028372
-builtin_hash,DatePrototypeGetFullYear,905028372
-builtin_hash,DatePrototypeGetHours,905028372
-builtin_hash,DatePrototypeGetMilliseconds,-707287527
-builtin_hash,DatePrototypeGetMinutes,905028372
-builtin_hash,DatePrototypeGetMonth,905028372
-builtin_hash,DatePrototypeGetSeconds,905028372
-builtin_hash,DatePrototypeGetTime,665014006
-builtin_hash,DatePrototypeGetTimezoneOffset,-707287527
-builtin_hash,DatePrototypeValueOf,665014006
-builtin_hash,DatePrototypeToPrimitive,23745105
-builtin_hash,CreateIterResultObject,833507199
-builtin_hash,CreateGeneratorObject,-898656785
-builtin_hash,GeneratorPrototypeNext,-29771038
-builtin_hash,GeneratorPrototypeReturn,-279661376
-builtin_hash,SuspendGeneratorBaseline,-49499079
-builtin_hash,ResumeGeneratorBaseline,145201245
-builtin_hash,GlobalIsFinite,805204024
-builtin_hash,GlobalIsNaN,413622277
-builtin_hash,LoadIC,79924816
-builtin_hash,LoadIC_Megamorphic,682925528
-builtin_hash,LoadIC_Noninlined,-767250044
-builtin_hash,LoadICTrampoline,-803254542
-builtin_hash,LoadICBaseline,-628874782
-builtin_hash,LoadICTrampoline_Megamorphic,-803254542
-builtin_hash,LoadSuperIC,-238282119
-builtin_hash,LoadSuperICBaseline,841397561
-builtin_hash,KeyedLoadIC,78355712
-builtin_hash,KeyedLoadIC_Megamorphic,-391277039
-builtin_hash,KeyedLoadICTrampoline,-803254542
-builtin_hash,KeyedLoadICBaseline,-628874782
-builtin_hash,KeyedLoadICTrampoline_Megamorphic,-803254542
-builtin_hash,StoreGlobalIC,-33330877
-builtin_hash,StoreGlobalICTrampoline,-803254542
-builtin_hash,StoreGlobalICBaseline,-628874782
-builtin_hash,StoreIC,-959753225
-builtin_hash,StoreICTrampoline,756382466
-builtin_hash,StoreICBaseline,841397561
-builtin_hash,DefineNamedOwnIC,464622021
-builtin_hash,DefineNamedOwnICBaseline,841397561
-builtin_hash,KeyedStoreIC,-538069768
-builtin_hash,KeyedStoreICTrampoline,756382466
-builtin_hash,KeyedStoreICBaseline,841397561
-builtin_hash,DefineKeyedOwnIC,458562905
-builtin_hash,StoreInArrayLiteralIC,-604069917
-builtin_hash,StoreInArrayLiteralICBaseline,841397561
-builtin_hash,LoadGlobalIC,274757270
-builtin_hash,LoadGlobalICInsideTypeof,303475129
-builtin_hash,LoadGlobalICTrampoline,-833311190
-builtin_hash,LoadGlobalICBaseline,-77255126
-builtin_hash,LoadGlobalICInsideTypeofTrampoline,-833311190
-builtin_hash,LoadGlobalICInsideTypeofBaseline,-77255126
-builtin_hash,LookupGlobalICBaseline,-1021507359
-builtin_hash,LookupGlobalICInsideTypeofBaseline,-1021507359
-builtin_hash,KeyedHasIC,-204183308
-builtin_hash,KeyedHasICBaseline,-628874782
-builtin_hash,KeyedHasIC_Megamorphic,-958876308
-builtin_hash,IterableToList,-4651130
-builtin_hash,IterableToListWithSymbolLookup,977588013
-builtin_hash,IterableToListMayPreserveHoles,908990960
-builtin_hash,FindOrderedHashMapEntry,196242182
-builtin_hash,MapConstructor,127220366
-builtin_hash,MapPrototypeSet,529910141
-builtin_hash,MapPrototypeDelete,-553855034
-builtin_hash,MapPrototypeGet,-312429732
-builtin_hash,MapPrototypeHas,-908577859
-builtin_hash,MapPrototypeEntries,898519671
-builtin_hash,MapPrototypeGetSize,548120946
-builtin_hash,MapPrototypeForEach,600253966
-builtin_hash,MapPrototypeKeys,898519671
-builtin_hash,MapPrototypeValues,898519671
-builtin_hash,MapIteratorPrototypeNext,581031622
-builtin_hash,MapIteratorToList,-668334452
-builtin_hash,SameValueNumbersOnly,1046023669
-builtin_hash,Add_Baseline,-819537320
-builtin_hash,AddSmi_Baseline,-468458532
-builtin_hash,Subtract_Baseline,65596691
-builtin_hash,SubtractSmi_Baseline,-149584042
-builtin_hash,Multiply_Baseline,294831898
-builtin_hash,MultiplySmi_Baseline,996262660
-builtin_hash,Divide_Baseline,-446061441
-builtin_hash,DivideSmi_Baseline,-447600168
-builtin_hash,Modulus_Baseline,-832082339
-builtin_hash,ModulusSmi_Baseline,413347859
-builtin_hash,Exponentiate_Baseline,129594833
-builtin_hash,BitwiseAnd_Baseline,807317245
-builtin_hash,BitwiseAndSmi_Baseline,-299694524
-builtin_hash,BitwiseOr_Baseline,517046253
-builtin_hash,BitwiseOrSmi_Baseline,986547189
-builtin_hash,BitwiseXor_Baseline,-23876279
-builtin_hash,BitwiseXorSmi_Baseline,-1002138133
-builtin_hash,ShiftLeft_Baseline,500850188
-builtin_hash,ShiftLeftSmi_Baseline,-633960771
-builtin_hash,ShiftRight_Baseline,-32080745
-builtin_hash,ShiftRightSmi_Baseline,315819990
-builtin_hash,ShiftRightLogical_Baseline,479447240
-builtin_hash,ShiftRightLogicalSmi_Baseline,-519393226
-builtin_hash,Add_WithFeedback,-206794177
-builtin_hash,Subtract_WithFeedback,347362352
-builtin_hash,Modulus_WithFeedback,920841751
-builtin_hash,BitwiseOr_WithFeedback,-74343708
-builtin_hash,Equal_Baseline,-896951542
-builtin_hash,StrictEqual_Baseline,87581778
-builtin_hash,LessThan_Baseline,-374004445
-builtin_hash,GreaterThan_Baseline,-368668942
-builtin_hash,LessThanOrEqual_Baseline,301132954
-builtin_hash,GreaterThanOrEqual_Baseline,756925202
-builtin_hash,Equal_WithFeedback,-1040295188
-builtin_hash,StrictEqual_WithFeedback,-1052414211
-builtin_hash,LessThan_WithFeedback,948983301
-builtin_hash,GreaterThan_WithFeedback,-258688563
-builtin_hash,GreaterThanOrEqual_WithFeedback,691471117
-builtin_hash,BitwiseNot_Baseline,182142082
-builtin_hash,Decrement_Baseline,-544743600
-builtin_hash,Increment_Baseline,-307783174
-builtin_hash,Negate_Baseline,434902398
-builtin_hash,ObjectAssign,-786777006
-builtin_hash,ObjectCreate,-543317475
-builtin_hash,ObjectEntries,-465524320
-builtin_hash,ObjectGetOwnPropertyDescriptor,862856609
-builtin_hash,ObjectGetOwnPropertyNames,409260893
-builtin_hash,ObjectIs,-428110665
-builtin_hash,ObjectKeys,-711238005
-builtin_hash,ObjectPrototypeHasOwnProperty,-338192343
-builtin_hash,ObjectToString,993745228
-builtin_hash,InstanceOf_WithFeedback,-50284518
-builtin_hash,InstanceOf_Baseline,992223159
-builtin_hash,ForInEnumerate,-857152067
-builtin_hash,ForInPrepare,-602567485
-builtin_hash,ForInFilter,-142224411
-builtin_hash,RegExpConstructor,-862541618
-builtin_hash,RegExpExecAtom,-837574121
-builtin_hash,RegExpExecInternal,549675176
-builtin_hash,FindOrderedHashSetEntry,-166628054
-builtin_hash,SetConstructor,-778640968
-builtin_hash,SetPrototypeHas,-908577859
-builtin_hash,SetPrototypeAdd,-427333429
-builtin_hash,SetPrototypeDelete,-871946847
-builtin_hash,SetPrototypeEntries,898519671
-builtin_hash,SetPrototypeGetSize,548120946
-builtin_hash,SetPrototypeForEach,-501810916
-builtin_hash,SetPrototypeValues,898519671
-builtin_hash,SetIteratorPrototypeNext,182871241
-builtin_hash,SetOrSetIteratorToList,-33118696
-builtin_hash,StringFromCharCode,-971392951
-builtin_hash,StringPrototypeReplace,211421001
-builtin_hash,StringPrototypeSplit,-56509999
-builtin_hash,TypedArrayConstructor,618386097
-builtin_hash,TypedArrayPrototypeByteLength,-587563610
-builtin_hash,TypedArrayPrototypeLength,-163278974
-builtin_hash,WeakMapConstructor,-808541690
-builtin_hash,WeakMapLookupHashIndex,-619048905
-builtin_hash,WeakMapGet,276986520
-builtin_hash,WeakMapPrototypeHas,-285904254
-builtin_hash,WeakMapPrototypeSet,629680419
-builtin_hash,WeakSetConstructor,-367435631
-builtin_hash,WeakSetPrototypeHas,-285904254
-builtin_hash,WeakSetPrototypeAdd,-301255294
-builtin_hash,WeakCollectionSet,217583952
-builtin_hash,AsyncGeneratorResolve,242317686
-builtin_hash,AsyncGeneratorYieldWithAwait,302667528
-builtin_hash,AsyncGeneratorResumeNext,-265907726
-builtin_hash,AsyncGeneratorPrototypeNext,-194499830
-builtin_hash,AsyncGeneratorAwaitUncaught,-398074132
-builtin_hash,AsyncGeneratorAwaitResolveClosure,-245656056
-builtin_hash,AsyncGeneratorYieldWithAwaitResolveClosure,-649252259
-builtin_hash,StringAdd_CheckNone,1037172071
-builtin_hash,SubString,-701927326
-builtin_hash,GetProperty,-433765894
-builtin_hash,GetPropertyWithReceiver,636771451
-builtin_hash,SetProperty,-985618808
-builtin_hash,CreateDataProperty,952942021
-builtin_hash,FindNonDefaultConstructorOrConstruct,1020851957
-builtin_hash,ArrayPrototypeConcat,-711562967
-builtin_hash,ArrayEvery,732127203
-builtin_hash,ArrayFilterLoopLazyDeoptContinuation,782264259
-builtin_hash,ArrayFilterLoopContinuation,292635770
-builtin_hash,ArrayFilter,-585622372
-builtin_hash,ArrayPrototypeFind,410534083
-builtin_hash,ArrayForEachLoopLazyDeoptContinuation,-299794382
-builtin_hash,ArrayForEachLoopContinuation,350033182
-builtin_hash,ArrayForEach,729108989
-builtin_hash,ArrayFrom,1055630901
-builtin_hash,ArrayIsArray,-970031738
-builtin_hash,LoadJoinElement_FastSmiOrObjectElements_0,228167807
-builtin_hash,LoadJoinElement_FastDoubleElements_0,580988969
-builtin_hash,JoinStackPush,751439150
-builtin_hash,JoinStackPop,128574663
-builtin_hash,ArrayPrototypeJoin,89295304
-builtin_hash,ArrayPrototypeToString,-66500098
-builtin_hash,ArrayPrototypeLastIndexOf,1073113005
-builtin_hash,ArrayMapLoopLazyDeoptContinuation,-47088981
-builtin_hash,ArrayMapLoopContinuation,-794603673
-builtin_hash,ArrayMap,-326417675
-builtin_hash,ArrayReduceLoopLazyDeoptContinuation,-1014597388
-builtin_hash,ArrayReduceLoopContinuation,-1067144759
-builtin_hash,ArrayReduce,-407776620
-builtin_hash,ArrayPrototypeReverse,-121874294
-builtin_hash,ArrayPrototypeShift,-928108750
-builtin_hash,ArrayPrototypeSlice,214735037
-builtin_hash,ArraySome,466290774
-builtin_hash,ArrayPrototypeSplice,1001942992
-builtin_hash,ArrayPrototypeUnshift,-1052845134
-builtin_hash,ArrayBufferPrototypeGetByteLength,445258508
-builtin_hash,ArrayBufferIsView,-78532109
-builtin_hash,ToInteger,713419327
-builtin_hash,FastCreateDataProperty,-278611029
-builtin_hash,BooleanConstructor,-809457299
-builtin_hash,BooleanPrototypeToString,-798757106
-builtin_hash,ToString,436846720
-builtin_hash,StringPrototypeToString,-794700080
-builtin_hash,StringPrototypeValueOf,-794700080
-builtin_hash,StringPrototypeCharAt,915103217
-builtin_hash,StringPrototypeCharCodeAt,-272108096
-builtin_hash,StringPrototypeCodePointAt,-596824984
-builtin_hash,StringPrototypeConcat,-577571398
-builtin_hash,StringConstructor,-65593142
-builtin_hash,StringAddConvertLeft,51926197
-builtin_hash,StringAddConvertRight,115066033
-builtin_hash,StringCharAt,959950211
-builtin_hash,FastNewClosureBaseline,-532908706
-builtin_hash,FastNewFunctionContextFunction,977993537
-builtin_hash,CreateRegExpLiteral,64770172
-builtin_hash,CreateShallowArrayLiteral,866949735
-builtin_hash,CreateEmptyArrayLiteral,-862242730
-builtin_hash,CreateShallowObjectLiteral,991590480
-builtin_hash,ObjectConstructor,-384944316
-builtin_hash,CreateEmptyLiteralObject,-310219292
-builtin_hash,NumberConstructor,-974450450
-builtin_hash,StringToNumber,-446317754
-builtin_hash,NonNumberToNumber,504608456
-builtin_hash,NonNumberToNumeric,-570033562
-builtin_hash,ToNumeric,-772194204
-builtin_hash,NumberToString,674929388
-builtin_hash,ToBoolean,856538717
-builtin_hash,ToBooleanForBaselineJump,-446512949
-builtin_hash,ToLength,999641871
-builtin_hash,ToName,645844037
-builtin_hash,ToObject,119745243
-builtin_hash,NonPrimitiveToPrimitive_Default,-151838227
-builtin_hash,NonPrimitiveToPrimitive_Number,-151838227
-builtin_hash,NonPrimitiveToPrimitive_String,-151838227
-builtin_hash,OrdinaryToPrimitive_Number,-337334591
-builtin_hash,OrdinaryToPrimitive_String,-337334591
-builtin_hash,DataViewPrototypeGetByteLength,750091486
-builtin_hash,DataViewPrototypeGetFloat64,544637297
-builtin_hash,DataViewPrototypeSetUint32,366892025
-builtin_hash,DataViewPrototypeSetFloat64,267831220
-builtin_hash,FunctionPrototypeHasInstance,-911487777
-builtin_hash,FastFunctionPrototypeBind,-29755211
-builtin_hash,ForInNext,547638943
-builtin_hash,GetIteratorWithFeedback,935596039
-builtin_hash,GetIteratorBaseline,-124236956
-builtin_hash,CallIteratorWithFeedback,174322508
-builtin_hash,MathAbs,111472406
-builtin_hash,MathCeil,-288711730
-builtin_hash,MathFloor,558720012
-builtin_hash,MathRound,-893522347
-builtin_hash,MathPow,-432438626
-builtin_hash,MathMax,-914923816
-builtin_hash,MathMin,-435430851
-builtin_hash,MathAsin,-865319143
-builtin_hash,MathAtan2,-706534972
-builtin_hash,MathCos,705415335
-builtin_hash,MathExp,1065131032
-builtin_hash,MathFround,-135252655
-builtin_hash,MathImul,773832811
-builtin_hash,MathLog,540909033
-builtin_hash,MathSin,-688911662
-builtin_hash,MathSign,-523407079
-builtin_hash,MathSqrt,-794868693
-builtin_hash,MathTan,537052027
-builtin_hash,MathTanh,-300840302
-builtin_hash,MathRandom,966867537
-builtin_hash,NumberPrototypeToString,-382822730
-builtin_hash,NumberIsInteger,-213604804
-builtin_hash,NumberIsNaN,788813704
-builtin_hash,NumberParseFloat,-741561968
-builtin_hash,ParseInt,998287919
-builtin_hash,NumberParseInt,-382916138
-builtin_hash,Add,-136527337
-builtin_hash,Subtract,-213501900
-builtin_hash,Multiply,7472525
-builtin_hash,Divide,-344347312
-builtin_hash,Modulus,-582417614
-builtin_hash,CreateObjectWithoutProperties,339671006
-builtin_hash,ObjectIsExtensible,-329082141
-builtin_hash,ObjectPreventExtensions,940542631
-builtin_hash,ObjectGetPrototypeOf,157540923
-builtin_hash,ObjectSetPrototypeOf,187356384
-builtin_hash,ObjectPrototypeToString,-483254038
-builtin_hash,ObjectPrototypeValueOf,193287106
-builtin_hash,FulfillPromise,272197869
-builtin_hash,NewPromiseCapability,-508522709
-builtin_hash,PromiseCapabilityDefaultResolve,-402797269
-builtin_hash,PerformPromiseThen,330989248
-builtin_hash,PromiseAll,697437536
-builtin_hash,PromiseAllResolveElementClosure,-862999565
-builtin_hash,PromiseConstructor,762524591
-builtin_hash,PromisePrototypeCatch,756171957
-builtin_hash,PromiseFulfillReactionJob,-630924263
-builtin_hash,PromiseResolveTrampoline,-167249272
-builtin_hash,PromiseResolve,-412690059
-builtin_hash,ResolvePromise,756044362
-builtin_hash,PromisePrototypeThen,3713531
-builtin_hash,PromiseResolveThenableJob,-14213172
-builtin_hash,ProxyConstructor,459230341
-builtin_hash,ProxyGetProperty,1054163992
-builtin_hash,ProxyIsExtensible,308384776
-builtin_hash,ProxyPreventExtensions,399450299
-builtin_hash,ReflectGet,-434221017
-builtin_hash,ReflectHas,-167249272
-builtin_hash,RegExpPrototypeExec,963999476
-builtin_hash,RegExpMatchFast,384654261
-builtin_hash,RegExpReplace,-475275041
-builtin_hash,RegExpPrototypeReplace,860372377
-builtin_hash,RegExpSearchFast,907750005
-builtin_hash,RegExpPrototypeSourceGetter,-747085084
-builtin_hash,RegExpSplit,-607180644
-builtin_hash,RegExpPrototypeTest,-585829947
-builtin_hash,RegExpPrototypeTestFast,-1071276448
-builtin_hash,RegExpPrototypeGlobalGetter,-718555192
-builtin_hash,RegExpPrototypeIgnoreCaseGetter,1070990033
-builtin_hash,RegExpPrototypeMultilineGetter,216999873
-builtin_hash,RegExpPrototypeHasIndicesGetter,390292067
-builtin_hash,RegExpPrototypeDotAllGetter,390292067
-builtin_hash,RegExpPrototypeStickyGetter,1055105538
-builtin_hash,RegExpPrototypeUnicodeGetter,1055105538
-builtin_hash,RegExpPrototypeFlagsGetter,-646009057
-builtin_hash,StringPrototypeEndsWith,565371891
-builtin_hash,StringPrototypeIncludes,480948081
-builtin_hash,StringPrototypeIndexOf,619068194
-builtin_hash,StringPrototypeIterator,-532566456
-builtin_hash,StringIteratorPrototypeNext,-1034386014
-builtin_hash,StringPrototypeMatch,127768813
-builtin_hash,StringPrototypeSearch,127768813
-builtin_hash,StringRepeat,92491602
-builtin_hash,StringPrototypeSlice,111174165
-builtin_hash,StringPrototypeStartsWith,-951440779
-builtin_hash,StringPrototypeSubstr,716425893
-builtin_hash,StringPrototypeSubstring,769385864
-builtin_hash,StringPrototypeTrim,-151587513
-builtin_hash,SymbolPrototypeToString,697341238
-builtin_hash,CreateTypedArray,100324164
-builtin_hash,TypedArrayFrom,-508079252
-builtin_hash,TypedArrayPrototypeSet,241292735
-builtin_hash,TypedArrayPrototypeSubArray,-638094120
-builtin_hash,NewSloppyArgumentsElements,745494442
-builtin_hash,NewStrictArgumentsElements,-81425804
-builtin_hash,NewRestArgumentsElements,-823345459
-builtin_hash,FastNewSloppyArguments,-174863955
-builtin_hash,FastNewStrictArguments,-75939795
-builtin_hash,FastNewRestArguments,-680285498
-builtin_hash,StringSlowFlatten,108774605
-builtin_hash,StringIndexOf,119327941
-builtin_hash,Load_FastSmiElements_0,-418523514
-builtin_hash,Load_FastObjectElements_0,-418523514
-builtin_hash,Store_FastSmiElements_0,975980653
-builtin_hash,Store_FastObjectElements_0,311513691
-builtin_hash,SortCompareDefault,842664214
-builtin_hash,SortCompareUserFn,1059126141
-builtin_hash,Copy,-750738169
-builtin_hash,MergeAt,944447896
-builtin_hash,GallopLeft,368113946
-builtin_hash,GallopRight,186729557
-builtin_hash,ArrayTimSort,-475205137
-builtin_hash,ArrayPrototypeSort,-366911589
-builtin_hash,StringFastLocaleCompare,15452983
-builtin_hash,WasmInt32ToHeapNumber,751194511
-builtin_hash,WasmTaggedNonSmiToInt32,-202443862
-builtin_hash,WasmTriggerTierUp,-980759280
-builtin_hash,WasmStackGuard,-1024124053
-builtin_hash,CanUseSameAccessor_FastSmiElements_0,-756700379
-builtin_hash,CanUseSameAccessor_FastObjectElements_0,-756700379
-builtin_hash,StringPrototypeToLowerCaseIntl,-966367732
-builtin_hash,StringToLowerCaseIntl,-481509366
-builtin_hash,WideHandler,-298201266
-builtin_hash,ExtraWideHandler,-298201266
-builtin_hash,LdarHandler,-745598094
-builtin_hash,LdaZeroHandler,368748633
-builtin_hash,LdaSmiHandler,-545227529
-builtin_hash,LdaUndefinedHandler,1011673901
-builtin_hash,LdaNullHandler,1011673901
-builtin_hash,LdaTheHoleHandler,1011673901
-builtin_hash,LdaTrueHandler,827753247
-builtin_hash,LdaFalseHandler,827753247
-builtin_hash,LdaConstantHandler,407548785
-builtin_hash,LdaContextSlotHandler,506452989
-builtin_hash,LdaImmutableContextSlotHandler,506452989
-builtin_hash,LdaCurrentContextSlotHandler,327557270
-builtin_hash,LdaImmutableCurrentContextSlotHandler,327557270
-builtin_hash,StarHandler,305217552
-builtin_hash,MovHandler,-283701884
-builtin_hash,PushContextHandler,177425195
-builtin_hash,PopContextHandler,-1044986385
-builtin_hash,TestReferenceEqualHandler,-651544719
-builtin_hash,TestUndetectableHandler,-830971105
-builtin_hash,TestNullHandler,1005522396
-builtin_hash,TestUndefinedHandler,1005522396
-builtin_hash,TestTypeOfHandler,-1028477858
-builtin_hash,LdaGlobalHandler,965344129
-builtin_hash,LdaGlobalInsideTypeofHandler,585777250
-builtin_hash,StaGlobalHandler,1056951542
-builtin_hash,StaContextSlotHandler,-675927710
-builtin_hash,StaCurrentContextSlotHandler,-997669083
-builtin_hash,LdaLookupGlobalSlotHandler,-84752131
-builtin_hash,LdaLookupGlobalSlotInsideTypeofHandler,49834142
-builtin_hash,StaLookupSlotHandler,-381579342
-builtin_hash,GetNamedPropertyHandler,-27764824
-builtin_hash,GetNamedPropertyFromSuperHandler,-724989944
-builtin_hash,GetKeyedPropertyHandler,-56635454
-builtin_hash,SetNamedPropertyHandler,448782548
-builtin_hash,DefineNamedOwnPropertyHandler,448782548
-builtin_hash,SetKeyedPropertyHandler,941278116
-builtin_hash,DefineKeyedOwnPropertyHandler,941278116
-builtin_hash,StaInArrayLiteralHandler,941278116
-builtin_hash,DefineKeyedOwnPropertyInLiteralHandler,1045494813
-builtin_hash,AddHandler,-518783725
-builtin_hash,SubHandler,505104408
-builtin_hash,MulHandler,-222850853
-builtin_hash,DivHandler,-1028262634
-builtin_hash,ModHandler,143526297
-builtin_hash,ExpHandler,-727777022
-builtin_hash,BitwiseOrHandler,-522781712
-builtin_hash,BitwiseXorHandler,-419955523
-builtin_hash,BitwiseAndHandler,530208341
-builtin_hash,ShiftLeftHandler,-804444955
-builtin_hash,ShiftRightHandler,-104335215
-builtin_hash,ShiftRightLogicalHandler,1050635494
-builtin_hash,AddSmiHandler,-161508067
-builtin_hash,SubSmiHandler,-609360326
-builtin_hash,MulSmiHandler,282822605
-builtin_hash,DivSmiHandler,292906952
-builtin_hash,ModSmiHandler,-917212490
-builtin_hash,BitwiseOrSmiHandler,172148322
-builtin_hash,BitwiseXorSmiHandler,1046550901
-builtin_hash,BitwiseAndSmiHandler,-808862341
-builtin_hash,ShiftLeftSmiHandler,862845296
-builtin_hash,ShiftRightSmiHandler,183483372
-builtin_hash,ShiftRightLogicalSmiHandler,31369673
-builtin_hash,IncHandler,-318834355
-builtin_hash,DecHandler,938496699
-builtin_hash,NegateHandler,-590726041
-builtin_hash,BitwiseNotHandler,322709376
-builtin_hash,ToBooleanLogicalNotHandler,-972724513
-builtin_hash,LogicalNotHandler,-706273800
-builtin_hash,TypeOfHandler,-751823
-builtin_hash,DeletePropertyStrictHandler,-724253277
-builtin_hash,DeletePropertySloppyHandler,-476722269
-builtin_hash,FindNonDefaultConstructorOrConstructHandler,-746857468
-builtin_hash,CallAnyReceiverHandler,87393745
-builtin_hash,CallPropertyHandler,87393745
-builtin_hash,CallProperty0Handler,956548008
-builtin_hash,CallProperty1Handler,-471075746
-builtin_hash,CallProperty2Handler,-1043814952
-builtin_hash,CallUndefinedReceiverHandler,126620186
-builtin_hash,CallUndefinedReceiver0Handler,-286191860
-builtin_hash,CallUndefinedReceiver1Handler,-357856703
-builtin_hash,CallUndefinedReceiver2Handler,798828847
-builtin_hash,CallWithSpreadHandler,87393745
-builtin_hash,CallRuntimeHandler,624123308
-builtin_hash,CallJSRuntimeHandler,1005113218
-builtin_hash,InvokeIntrinsicHandler,-566159390
-builtin_hash,ConstructHandler,543386518
-builtin_hash,ConstructWithSpreadHandler,595837553
-builtin_hash,TestEqualHandler,-157366914
-builtin_hash,TestEqualStrictHandler,998643852
-builtin_hash,TestLessThanHandler,1046936290
-builtin_hash,TestGreaterThanHandler,-369508260
-builtin_hash,TestLessThanOrEqualHandler,-412750652
-builtin_hash,TestGreaterThanOrEqualHandler,-364267636
-builtin_hash,TestInstanceOfHandler,-607728916
-builtin_hash,TestInHandler,539847065
-builtin_hash,ToNameHandler,701699245
-builtin_hash,ToNumberHandler,-512585428
-builtin_hash,ToNumericHandler,459707132
-builtin_hash,ToObjectHandler,701699245
-builtin_hash,ToStringHandler,620423288
-builtin_hash,CreateRegExpLiteralHandler,848340986
-builtin_hash,CreateArrayLiteralHandler,101333771
-builtin_hash,CreateArrayFromIterableHandler,-18783057
-builtin_hash,CreateEmptyArrayLiteralHandler,-289337896
-builtin_hash,CreateObjectLiteralHandler,-711473910
-builtin_hash,CreateEmptyObjectLiteralHandler,795228443
-builtin_hash,CreateClosureHandler,877324634
-builtin_hash,CreateBlockContextHandler,-344466857
-builtin_hash,CreateCatchContextHandler,-214012965
-builtin_hash,CreateFunctionContextHandler,729147868
-builtin_hash,CreateMappedArgumentsHandler,-124182926
-builtin_hash,CreateUnmappedArgumentsHandler,758781228
-builtin_hash,CreateRestParameterHandler,-10099522
-builtin_hash,JumpLoopHandler,-166037043
-builtin_hash,JumpHandler,-79617432
-builtin_hash,JumpConstantHandler,906507762
-builtin_hash,JumpIfUndefinedConstantHandler,250257394
-builtin_hash,JumpIfNotUndefinedConstantHandler,-587816710
-builtin_hash,JumpIfUndefinedOrNullConstantHandler,53751011
-builtin_hash,JumpIfTrueConstantHandler,250257394
-builtin_hash,JumpIfFalseConstantHandler,250257394
-builtin_hash,JumpIfToBooleanTrueConstantHandler,15176103
-builtin_hash,JumpIfToBooleanFalseConstantHandler,422983862
-builtin_hash,JumpIfToBooleanTrueHandler,635201116
-builtin_hash,JumpIfToBooleanFalseHandler,408147223
-builtin_hash,JumpIfTrueHandler,801953084
-builtin_hash,JumpIfFalseHandler,801953084
-builtin_hash,JumpIfNullHandler,801953084
-builtin_hash,JumpIfNotNullHandler,1026829001
-builtin_hash,JumpIfUndefinedHandler,801953084
-builtin_hash,JumpIfNotUndefinedHandler,1026829001
-builtin_hash,JumpIfUndefinedOrNullHandler,1021601552
-builtin_hash,JumpIfJSReceiverHandler,65469341
-builtin_hash,SwitchOnSmiNoFeedbackHandler,807681990
-builtin_hash,ForInEnumerateHandler,510063374
-builtin_hash,ForInPrepareHandler,630466074
-builtin_hash,ForInContinueHandler,-691562887
-builtin_hash,ForInNextHandler,512834227
-builtin_hash,ForInStepHandler,942618821
-builtin_hash,SetPendingMessageHandler,401946975
-builtin_hash,ThrowHandler,50431783
-builtin_hash,ReThrowHandler,50431783
-builtin_hash,ReturnHandler,-117530186
-builtin_hash,ThrowReferenceErrorIfHoleHandler,512852920
-builtin_hash,ThrowSuperNotCalledIfHoleHandler,717642155
-builtin_hash,ThrowSuperAlreadyCalledIfNotHoleHandler,-546144205
-builtin_hash,ThrowIfNotSuperConstructorHandler,-460002303
-builtin_hash,SwitchOnGeneratorStateHandler,10710931
-builtin_hash,SuspendGeneratorHandler,-500612975
-builtin_hash,ResumeGeneratorHandler,1068636019
-builtin_hash,GetIteratorHandler,-71006498
-builtin_hash,ShortStarHandler,356943682
-builtin_hash,LdarWideHandler,-249230336
-builtin_hash,LdaSmiWideHandler,-31881096
-builtin_hash,LdaConstantWideHandler,-758989820
-builtin_hash,LdaContextSlotWideHandler,687146226
-builtin_hash,LdaImmutableContextSlotWideHandler,687146226
-builtin_hash,LdaImmutableCurrentContextSlotWideHandler,-836770052
-builtin_hash,StarWideHandler,501248040
-builtin_hash,MovWideHandler,-871657303
-builtin_hash,PushContextWideHandler,844522230
-builtin_hash,PopContextWideHandler,744748597
-builtin_hash,TestReferenceEqualWideHandler,-118913544
-builtin_hash,LdaGlobalWideHandler,-661487412
-builtin_hash,LdaGlobalInsideTypeofWideHandler,-572343212
-builtin_hash,StaGlobalWideHandler,555909381
-builtin_hash,StaContextSlotWideHandler,478877471
-builtin_hash,StaCurrentContextSlotWideHandler,-615279276
-builtin_hash,LdaLookupGlobalSlotWideHandler,-1002268065
-builtin_hash,GetNamedPropertyWideHandler,-241462706
-builtin_hash,GetKeyedPropertyWideHandler,641533107
-builtin_hash,SetNamedPropertyWideHandler,-58064714
-builtin_hash,DefineNamedOwnPropertyWideHandler,-58064714
-builtin_hash,SetKeyedPropertyWideHandler,686171362
-builtin_hash,DefineKeyedOwnPropertyWideHandler,686171362
-builtin_hash,StaInArrayLiteralWideHandler,686171362
-builtin_hash,AddWideHandler,-617481681
-builtin_hash,SubWideHandler,145242966
-builtin_hash,MulWideHandler,166175890
-builtin_hash,DivWideHandler,829768719
-builtin_hash,BitwiseOrWideHandler,-671352735
-builtin_hash,BitwiseAndWideHandler,-748389668
-builtin_hash,ShiftLeftWideHandler,-722355824
-builtin_hash,AddSmiWideHandler,-503151286
-builtin_hash,SubSmiWideHandler,266762310
-builtin_hash,MulSmiWideHandler,767307001
-builtin_hash,DivSmiWideHandler,1050619977
-builtin_hash,ModSmiWideHandler,-653636504
-builtin_hash,BitwiseOrSmiWideHandler,905206733
-builtin_hash,BitwiseXorSmiWideHandler,1044063990
-builtin_hash,BitwiseAndSmiWideHandler,-376485258
-builtin_hash,ShiftLeftSmiWideHandler,-1004091795
-builtin_hash,ShiftRightSmiWideHandler,-397666497
-builtin_hash,ShiftRightLogicalSmiWideHandler,54662547
-builtin_hash,IncWideHandler,331971916
-builtin_hash,DecWideHandler,279024516
-builtin_hash,NegateWideHandler,-781916260
-builtin_hash,CallPropertyWideHandler,-998392170
-builtin_hash,CallProperty0WideHandler,54487119
-builtin_hash,CallProperty1WideHandler,-147592428
-builtin_hash,CallProperty2WideHandler,-58614287
-builtin_hash,CallUndefinedReceiverWideHandler,400495181
-builtin_hash,CallUndefinedReceiver0WideHandler,-1000686597
-builtin_hash,CallUndefinedReceiver1WideHandler,-299347389
-builtin_hash,CallUndefinedReceiver2WideHandler,525189648
-builtin_hash,CallWithSpreadWideHandler,-998392170
-builtin_hash,ConstructWideHandler,193926631
-builtin_hash,TestEqualWideHandler,-797631551
-builtin_hash,TestEqualStrictWideHandler,753248660
-builtin_hash,TestLessThanWideHandler,-210582608
-builtin_hash,TestGreaterThanWideHandler,543018087
-builtin_hash,TestLessThanOrEqualWideHandler,-1053789276
-builtin_hash,TestGreaterThanOrEqualWideHandler,-582678107
-builtin_hash,TestInstanceOfWideHandler,-280937039
-builtin_hash,TestInWideHandler,817647574
-builtin_hash,ToNumericWideHandler,868695670
-builtin_hash,CreateRegExpLiteralWideHandler,-1006765965
-builtin_hash,CreateArrayLiteralWideHandler,-826485513
-builtin_hash,CreateEmptyArrayLiteralWideHandler,559300434
-builtin_hash,CreateObjectLiteralWideHandler,455963528
-builtin_hash,CreateClosureWideHandler,1061873155
-builtin_hash,CreateBlockContextWideHandler,271729622
-builtin_hash,CreateFunctionContextWideHandler,527181803
-builtin_hash,JumpLoopWideHandler,941891518
-builtin_hash,JumpWideHandler,-79617432
-builtin_hash,JumpIfToBooleanTrueWideHandler,923993949
-builtin_hash,JumpIfToBooleanFalseWideHandler,145370961
-builtin_hash,JumpIfTrueWideHandler,-1042889789
-builtin_hash,JumpIfFalseWideHandler,-1042889789
-builtin_hash,SwitchOnSmiNoFeedbackWideHandler,-773907277
-builtin_hash,ForInPrepareWideHandler,-483036360
-builtin_hash,ForInNextWideHandler,-173595160
-builtin_hash,ThrowReferenceErrorIfHoleWideHandler,-254407930
-builtin_hash,GetIteratorWideHandler,-412149326
-builtin_hash,LdaSmiExtraWideHandler,65806156
-builtin_hash,LdaGlobalExtraWideHandler,411460668
-builtin_hash,AddSmiExtraWideHandler,553152400
-builtin_hash,SubSmiExtraWideHandler,446395338
-builtin_hash,MulSmiExtraWideHandler,105494980
-builtin_hash,DivSmiExtraWideHandler,-317292269
-builtin_hash,BitwiseOrSmiExtraWideHandler,604681516
-builtin_hash,BitwiseXorSmiExtraWideHandler,-91329781
-builtin_hash,BitwiseAndSmiExtraWideHandler,150048166
-builtin_hash,CallUndefinedReceiverExtraWideHandler,423421950
-builtin_hash,CallUndefinedReceiver1ExtraWideHandler,168432499
-builtin_hash,CallUndefinedReceiver2ExtraWideHandler,524973830
diff --git a/deps/v8/tools/builtins-pgo/download_profiles.py b/deps/v8/tools/builtins-pgo/download_profiles.py
new file mode 100755
index 0000000000..b4fb586703
--- /dev/null
+++ b/deps/v8/tools/builtins-pgo/download_profiles.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+"""
+Download PGO profiles for V8 builtins. The version is pulled from V8's version
+file (include/v8-version.h).
+
+See argparse documentation for usage details.
+"""
+
+import argparse
+import os
+import pathlib
+import re
+import sys
+
+FILENAME = os.path.basename(__file__)
+PGO_PROFILE_BUCKET = 'chromium-v8-builtins-pgo'
+PGO_PROFILE_DIR = pathlib.Path(os.path.dirname(__file__)) / 'profiles'
+
+V8_DIR = PGO_PROFILE_DIR.parents[2]
+DEPOT_TOOLS_DEFAULT_PATH = os.path.join(V8_DIR, 'third_party', 'depot_tools')
+VERSION_FILE = V8_DIR / 'include' / 'v8-version.h'
+VERSION_RE = r"""#define V8_MAJOR_VERSION (\d+)
+#define V8_MINOR_VERSION (\d+)
+#define V8_BUILD_NUMBER (\d+)
+#define V8_PATCH_LEVEL (\d+)"""
+
+
+def main(cmd_args=None):
+ args = parse_args(cmd_args)
+ import_gsutil(args)
+ version = retrieve_version(args)
+ perform_action(version, args)
+ sys.exit(0)
+
+
+def parse_args(cmd_args):
+ parser = argparse.ArgumentParser(
+ description=(
+ f'Download PGO profiles for V8 builtins generated for the version '
+ f'defined in {VERSION_FILE}.'),
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog='\n'.join([
+ f'examples:', f' {FILENAME} download',
+ f' {FILENAME} validate --bucket=chromium-v8-builtins-pgo-staging',
+ f'', f'return codes:',
+ f' 0 - profiles successfully downloaded or validated',
+ f' 1 - unexpected error, see stdout',
+ f' 2 - invalid arguments specified, see {FILENAME} --help',
+ f' 3 - invalid path to depot_tools provided'
+ f' 4 - gsutil was unable to retrieve data from the bucket'
+ ]),
+ )
+
+ parser.add_argument(
+ 'action',
+ choices=['download', 'validate'],
+ help=(
+ 'download or validate profiles for the currently checked out version'
+ ),
+ )
+
+ parser.add_argument(
+ '--version',
+ help=('download (or validate) profiles for this version (e.g. 11.0.226.0 '
+ 'or 11.0.226.2), defaults to the version in v8\'s version file'),
+ )
+
+ parser.add_argument(
+ '--depot-tools',
+ help=('path to depot tools, defaults to V8\'s version in '
+ f'{DEPOT_TOOLS_DEFAULT_PATH}.'),
+ type=pathlib.Path,
+ default=DEPOT_TOOLS_DEFAULT_PATH,
+ )
+
+ return parser.parse_args(cmd_args)
+
+
+def import_gsutil(args):
+ abs_depot_tools_path = os.path.abspath(args.depot_tools)
+ file = os.path.join(abs_depot_tools_path, 'download_from_google_storage.py')
+ if not pathlib.Path(file).is_file():
+ print(f'{file} does not exist; check --depot-tools path.', file=sys.stderr)
+ sys.exit(3)
+
+ sys.path.append(abs_depot_tools_path)
+ globals()['gcs_download'] = __import__('download_from_google_storage')
+
+
+def retrieve_version(args):
+ if args.version:
+ return args.version
+
+ with open(VERSION_FILE) as f:
+ version_tuple = re.search(VERSION_RE, f.read()).groups(0)
+ return '.'.join(version_tuple)
+
+
+def perform_action(version, args):
+ path = f'{PGO_PROFILE_BUCKET}/by-version/{version}'
+
+ if args.action == 'download':
+ cmd = ['cp', '-R', f'gs://{path}/*.profile', str(PGO_PROFILE_DIR)]
+ failure_hint = f'https://storage.googleapis.com/{path} does not exist.'
+ call_gsutil(cmd, failure_hint)
+ return
+
+ if args.action == 'validate':
+ meta_json = f'{path}/meta.json'
+ cmd = ['stat', f'gs://{meta_json}']
+ failure_hint = f'https://storage.googleapis.com/{meta_json} does not exist.'
+ call_gsutil(cmd, failure_hint)
+ return
+
+ raise AssertionError(f'Invalid action: {args.action}')
+
+
+def call_gsutil(cmd, failure_hint):
+ # Load gsutil from depot tools, and execute command
+ gsutil = gcs_download.Gsutil(gcs_download.GSUTIL_DEFAULT_PATH)
+ returncode, stdout, stderr = gsutil.check_call(*cmd)
+ if returncode != 0:
+ print_error(['gsutil', *cmd], returncode, stdout, stderr, failure_hint)
+ sys.exit(4)
+
+
+def print_error(cmd, returncode, stdout, stderr, failure_hint):
+ message = [
+ 'The following command did not succeed:',
+ f' $ {" ".join(cmd)}',
+ ]
+ sections = [
+ ('return code', str(returncode)),
+ ('stdout', stdout.strip()),
+ ('stderr', stderr.strip()),
+ ('hint', failure_hint),
+ ]
+ for label, output in sections:
+ if not output:
+ continue
+ message += [f'{label}:', " " + "\n ".join(output.split("\n"))]
+
+ print('\n'.join(message), file=sys.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/tools/builtins-pgo/download_profiles_test.py b/deps/v8/tools/builtins-pgo/download_profiles_test.py
new file mode 100644
index 0000000000..8ae844f7ea
--- /dev/null
+++ b/deps/v8/tools/builtins-pgo/download_profiles_test.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+
+import contextlib
+import io
+import os
+import unittest
+
+from tempfile import TemporaryDirectory
+from unittest.mock import patch
+
+from download_profiles import main
+
+
+class TestDownloadProfiles(unittest.TestCase):
+
+ def _test_cmd(self, cmd, exitcode):
+ out = io.StringIO()
+ err = io.StringIO()
+ with self.assertRaises(SystemExit) as se, \
+ contextlib.redirect_stdout(out), \
+ contextlib.redirect_stderr(err):
+ main(cmd)
+ self.assertEqual(se.exception.code, exitcode)
+ return out.getvalue(), err.getvalue()
+
+ def test_validate_profiles(self):
+ out, err = self._test_cmd(['validate', '--version', '11.1.0.0'], 0)
+ self.assertEqual(len(out), 0)
+ self.assertEqual(len(err), 0)
+
+ def test_download_profiles(self):
+ with TemporaryDirectory() as td, \
+ patch('download_profiles.PGO_PROFILE_DIR', td):
+ out, err = self._test_cmd(['download', '--version', '11.1.0.0'], 0)
+ self.assertEqual(len(out), 0)
+ self.assertEqual(len(err), 0)
+ self.assertGreater(
+ len([f for f in os.listdir(td) if f.endswith('.profile')]), 0)
+
+ def test_invalid_args(self):
+ out, err = self._test_cmd(['invalid-action'], 2)
+ self.assertEqual(len(out), 0)
+ self.assertGreater(len(err), 0)
+
+ def test_invalid_depot_tools_path(self):
+ out, err = self._test_cmd(
+ ['validate', '--depot-tools', '/no-depot-tools-path'], 3)
+ self.assertEqual(len(out), 0)
+ self.assertGreater(len(err), 0)
+
+ def test_missing_profiles(self):
+ out, err = self._test_cmd(['download', '--version', '0.0.0.42'], 4)
+ self.assertEqual(len(out), 0)
+ self.assertGreater(len(err), 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/builtins-pgo/generate.py b/deps/v8/tools/builtins-pgo/generate.py
index 5eec3c1516..2760b04d88 100755
--- a/deps/v8/tools/builtins-pgo/generate.py
+++ b/deps/v8/tools/builtins-pgo/generate.py
@@ -10,7 +10,7 @@ from pathlib import Path
parser = argparse.ArgumentParser(
description='Generate builtin PGO profiles. ' +
- 'The script has to be run from the root of a V8 checkout and updates the profiles in `tools/builtins-pgo`.'
+ 'The script has to be run from the root of a V8 checkout and updates the profiles in `tools/builtins-pgo/profiles`.'
)
parser.add_argument(
'v8_target_cpu', help='target cpu to build the profile for: x64 or arm64')
@@ -42,7 +42,7 @@ if args.target_cpu == None:
def run(cmd, **kwargs):
print(f"# CMD: {cmd} {kwargs}")
- return subprocess.run(cmd, **kwargs)
+ return subprocess.run(cmd, **kwargs, check=True)
def try_start_goma():
@@ -84,6 +84,7 @@ if args.use_qemu:
GN_ARGS_TEMPLATE = f"""\
is_debug = false
+is_clang = true
target_cpu = "{args.target_cpu}"
v8_target_cpu = "{args.v8_target_cpu}"
use_goma = {has_goma_str}
@@ -101,5 +102,5 @@ for arch, gn_args in [(args.v8_target_cpu, GN_ARGS_TEMPLATE)]:
]
run(cmd, cwd=benchmark_dir)
get_hints_path = tools_pgo_dir / "get_hints.py"
- profile_path = tools_pgo_dir / f"{arch}.profile"
+ profile_path = tools_pgo_dir / "profiles" / f"{arch}.profile"
run([get_hints_path, log_path, profile_path])
diff --git a/deps/v8/tools/builtins-pgo/profile_only.py b/deps/v8/tools/builtins-pgo/profile_only.py
new file mode 100755
index 0000000000..7d938e983d
--- /dev/null
+++ b/deps/v8/tools/builtins-pgo/profile_only.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+
+from pathlib import Path
+import argparse
+import subprocess
+import sys
+
+
+def main():
+ args = parse_arguments()
+ run_benchmark(args.benchmark_path, args.d8_path, args.output_dir)
+ run_get_hints(args.output_dir, args.v8_target_cpu)
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(
+ description=('Generate builtin PGO profiles. '
+ 'The script is designed to run in swarming context where '
+ 'the isolate aready contains the instrumented binary.'))
+ parser.add_argument(
+ '--v8-target-cpu',
+ help='target cpu to build the profile for: x64 or arm64')
+ parser.add_argument(
+ '--benchmark_path',
+ default=Path('./JetStream2/cli.js'),
+ help='path to benchmark runner .js file, usually JetStream2\'s `cli.js`',
+ type=Path)
+ parser.add_argument(
+ '--d8-path',
+ default=Path('./out/build/d8'),
+ help=('path to the d8 executable, by default `./out/build/d8` in '
+ 'swarming context. Use together with `--profile-only`'),
+ type=Path)
+ parser.add_argument('--output-dir', type=Path)
+ return parser.parse_args()
+
+
+def run_benchmark(benchmark_path, d8_path, output_dir):
+ root_dir = tools_pgo_dir().parent.parent
+ benchmark_dir = (root_dir / benchmark_path).parent.absolute()
+ assert benchmark_dir.exists(), "Could not find benchmark path!"
+
+ benchmark_file = benchmark_path.name
+ d8_path_abs = (root_dir / d8_path).absolute()
+ assert d8_path_abs.exists(), "Could not find d8 path!"
+
+ log_path = benchmark_log_path(output_dir)
+ cmd = [d8_path_abs, f"--turbo-profiling-output={log_path}", benchmark_file]
+ run(cmd, cwd=benchmark_dir)
+ assert log_path.exists(), "Could not find benchmark logs path!"
+
+
+def tools_pgo_dir():
+ return Path(__file__).parent
+
+
+def benchmark_log_path(output_dir):
+ return (output_dir / "v8.builtins.pgo").absolute()
+
+
+def run_get_hints(output_dir, v8_target_cpu):
+ get_hints_path = (tools_pgo_dir() / "get_hints.py").absolute()
+ assert get_hints_path.exists(), "Could not find get_hints.py script path!"
+
+ profile_path = (output_dir / f"{v8_target_cpu}.profile").absolute()
+ run([
+ sys.executable, '-u', get_hints_path,
+ benchmark_log_path(output_dir), profile_path
+ ])
+ assert profile_path.exists(), "Could not find profile path!"
+
+
+def run(cmd, **kwargs):
+ print(f"# CMD: {cmd} {kwargs}")
+ subprocess.run(cmd, **kwargs, check=True)
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(main())
diff --git a/deps/v8/tools/builtins-pgo/profiles/.gitkeep b/deps/v8/tools/builtins-pgo/profiles/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/tools/builtins-pgo/profiles/.gitkeep
diff --git a/deps/v8/tools/builtins-pgo/x64.profile b/deps/v8/tools/builtins-pgo/x64.profile
deleted file mode 100644
index b7b8877cfa..0000000000
--- a/deps/v8/tools/builtins-pgo/x64.profile
+++ /dev/null
@@ -1,6415 +0,0 @@
-block_hint,RecordWriteSaveFP,21,22,0
-block_hint,RecordWriteSaveFP,6,7,0
-block_hint,RecordWriteSaveFP,19,20,0
-block_hint,RecordWriteSaveFP,9,10,0
-block_hint,RecordWriteSaveFP,36,37,0
-block_hint,RecordWriteSaveFP,34,35,0
-block_hint,RecordWriteSaveFP,32,33,1
-block_hint,RecordWriteSaveFP,25,26,0
-block_hint,RecordWriteSaveFP,15,16,1
-block_hint,RecordWriteIgnoreFP,21,22,0
-block_hint,RecordWriteIgnoreFP,6,7,0
-block_hint,RecordWriteIgnoreFP,19,20,0
-block_hint,RecordWriteIgnoreFP,9,10,0
-block_hint,RecordWriteIgnoreFP,25,26,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,43,44,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,83,84,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,80,81,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,63,64,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,35,36,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,67,68,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,50,51,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,29,30,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,56,57,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,7,8,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,61,62,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,14,15,1
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,16,17,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,69,70,0
-block_hint,Call_ReceiverIsNullOrUndefined_Baseline_Compact,54,55,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,83,84,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,80,81,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,63,64,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,5,6,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,46,47,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,25,26,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,67,68,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,50,51,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,29,30,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,56,57,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,7,8,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,61,62,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,14,15,1
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,16,17,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,69,70,0
-block_hint,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,54,55,1
-block_hint,Call_ReceiverIsAny_Baseline_Compact,19,20,1
-block_hint,Call_ReceiverIsAny_Baseline_Compact,21,22,1
-block_hint,CallProxy,38,39,1
-block_hint,CallProxy,22,23,1
-block_hint,CallProxy,18,19,1
-block_hint,CallWithSpread,51,52,1
-block_hint,CallWithSpread,31,32,1
-block_hint,CallWithSpread,6,7,1
-block_hint,CallWithSpread,57,58,1
-block_hint,CallWithSpread,59,60,0
-block_hint,CallWithSpread,43,44,0
-block_hint,CallWithSpread,45,46,1
-block_hint,CallWithSpread,33,34,0
-block_hint,CallWithSpread,38,39,1
-block_hint,CallWithSpread,40,41,1
-block_hint,CallWithSpread,22,23,1
-block_hint,CallWithSpread,24,25,1
-block_hint,CallWithSpread,26,27,0
-block_hint,CallWithSpread,53,54,1
-block_hint,CallWithSpread,47,48,1
-block_hint,CallWithSpread,9,10,0
-block_hint,CallWithSpread,11,12,1
-block_hint,CallWithSpread,13,14,0
-block_hint,CallWithSpread,49,50,0
-block_hint,CallWithSpread,55,56,1
-block_hint,CallWithSpread,15,16,1
-block_hint,CallWithSpread_Baseline,115,116,1
-block_hint,CallWithSpread_Baseline,60,61,0
-block_hint,CallWithSpread_Baseline,113,114,0
-block_hint,CallWithSpread_Baseline,106,107,0
-block_hint,CallWithSpread_Baseline,81,82,0
-block_hint,CallWithSpread_Baseline,48,49,1
-block_hint,CallWithSpread_Baseline,138,139,1
-block_hint,CallWithSpread_Baseline,130,131,0
-block_hint,CallWithSpread_Baseline,119,120,1
-block_hint,CallWithSpread_Baseline,85,86,1
-block_hint,CallWithSpread_Baseline,12,13,1
-block_hint,CallWithSpread_Baseline,100,101,1
-block_hint,CallWithSpread_Baseline,102,103,0
-block_hint,CallWithSpread_Baseline,68,69,0
-block_hint,CallWithSpread_Baseline,33,34,1
-block_hint,CallWithSpread_Baseline,70,71,1
-block_hint,CallWithSpread_Baseline,53,54,0
-block_hint,CallWithSpread_Baseline,63,64,1
-block_hint,CallWithSpread_Baseline,65,66,1
-block_hint,CallWithSpread_Baseline,37,38,1
-block_hint,CallWithSpread_Baseline,39,40,1
-block_hint,CallWithSpread_Baseline,41,42,0
-block_hint,CallWithSpread_Baseline,91,92,1
-block_hint,CallWithSpread_Baseline,77,78,1
-block_hint,CallWithSpread_Baseline,23,24,0
-block_hint,CallWithSpread_Baseline,25,26,1
-block_hint,CallWithSpread_Baseline,27,28,0
-block_hint,CallWithSpread_Baseline,79,80,0
-block_hint,CallWithSpread_Baseline,93,94,1
-block_hint,CallWithSpread_Baseline,29,30,1
-block_hint,CallWithArrayLike,28,29,1
-block_hint,CallWithArrayLike,30,31,1
-block_hint,CallWithArrayLike,32,33,1
-block_hint,CallWithArrayLike,6,7,1
-block_hint,CallWithArrayLike,8,9,1
-block_hint,CallWithArrayLike,61,62,0
-block_hint,CallWithArrayLike,53,54,1
-block_hint,CallWithArrayLike,46,47,0
-block_hint,CallWithArrayLike,10,11,1
-block_hint,CallWithArrayLike,63,64,1
-block_hint,CallWithArrayLike,55,56,0
-block_hint,CallWithArrayLike,57,58,1
-block_hint,CallWithArrayLike,12,13,0
-block_hint,ConstructWithSpread_Baseline,90,91,1
-block_hint,ConstructWithSpread_Baseline,70,71,1
-block_hint,ConstructWithSpread_Baseline,45,46,1
-block_hint,ConstructWithSpread_Baseline,11,12,1
-block_hint,ConstructWithSpread_Baseline,81,82,1
-block_hint,ConstructWithSpread_Baseline,83,84,0
-block_hint,ConstructWithSpread_Baseline,58,59,0
-block_hint,ConstructWithSpread_Baseline,27,28,1
-block_hint,Construct_Baseline,48,49,0
-block_hint,Construct_Baseline,46,47,1
-block_hint,Construct_Baseline,38,39,1
-block_hint,Construct_Baseline,23,24,1
-block_hint,Construct_Baseline,5,6,1
-block_hint,Construct_Baseline,20,21,1
-block_hint,Construct_Baseline,25,26,1
-block_hint,Construct_Baseline,7,8,1
-block_hint,Construct_Baseline,36,37,0
-block_hint,Construct_Baseline,11,12,1
-block_hint,Construct_Baseline,13,14,0
-block_hint,Construct_Baseline,40,41,0
-block_hint,Construct_Baseline,27,28,1
-block_hint,FastNewObject,38,39,1
-block_hint,FastNewObject,40,41,1
-block_hint,FastNewObject,42,43,1
-block_hint,FastNewObject,44,45,1
-block_hint,FastNewObject,53,54,0
-block_hint,FastNewObject,55,56,1
-block_hint,FastNewObject,48,49,0
-block_hint,FastNewObject,23,24,0
-block_hint,FastNewObject,27,28,0
-block_hint,FastNewObject,31,32,1
-block_hint,FastNewClosure,15,16,0
-block_hint,FastNewClosure,4,5,1
-block_hint,FastNewClosure,19,20,1
-block_hint,FastNewClosure,8,9,1
-block_hint,StringEqual,57,58,0
-block_hint,StringEqual,29,30,1
-block_hint,StringEqual,55,56,0
-block_hint,StringEqual,45,46,1
-block_hint,StringEqual,81,82,1
-block_hint,StringEqual,69,70,0
-block_hint,StringEqual,51,52,0
-block_hint,StringEqual,23,24,1
-block_hint,StringEqual,79,80,0
-block_hint,StringEqual,65,66,0
-block_hint,StringEqual,47,48,0
-block_hint,StringEqual,39,40,0
-block_hint,StringEqual,71,72,0
-block_hint,StringEqual,53,54,0
-block_hint,StringGreaterThanOrEqual,40,41,1
-block_hint,StringGreaterThanOrEqual,30,31,1
-block_hint,StringGreaterThanOrEqual,36,37,0
-block_hint,StringGreaterThanOrEqual,12,13,0
-block_hint,StringLessThan,22,23,0
-block_hint,StringLessThan,40,41,1
-block_hint,StringLessThan,36,37,0
-block_hint,StringLessThan,24,25,0
-block_hint,StringLessThanOrEqual,40,41,1
-block_hint,StringLessThanOrEqual,30,31,1
-block_hint,StringLessThanOrEqual,36,37,0
-block_hint,StringLessThanOrEqual,12,13,0
-block_hint,StringSubstring,91,92,0
-block_hint,StringSubstring,29,30,0
-block_hint,StringSubstring,63,64,1
-block_hint,StringSubstring,58,59,1
-block_hint,StringSubstring,56,57,1
-block_hint,StringSubstring,114,115,0
-block_hint,StringSubstring,85,86,1
-block_hint,StringSubstring,19,20,0
-block_hint,StringSubstring,21,22,0
-block_hint,StringSubstring,130,131,1
-block_hint,StringSubstring,118,119,1
-block_hint,StringSubstring,38,39,0
-block_hint,StringSubstring,83,84,1
-block_hint,StringSubstring,17,18,0
-block_hint,StringSubstring,132,133,1
-block_hint,StringSubstring,120,121,1
-block_hint,StringSubstring,42,43,0
-block_hint,StringSubstring,75,76,1
-block_hint,StringSubstring,139,140,0
-block_hint,StringSubstring,34,35,1
-block_hint,StringSubstring,31,32,0
-block_hint,OrderedHashTableHealIndex,5,6,1
-block_hint,OrderedHashTableHealIndex,9,10,0
-block_hint,CompileLazy,42,43,1
-block_hint,CompileLazy,22,23,0
-block_hint,CompileLazy,40,41,0
-block_hint,CompileLazy,8,9,0
-block_hint,CompileLazy,10,11,0
-block_hint,CompileLazy,15,16,0
-block_hint,CompileLazy,3,4,0
-block_hint,CompileLazy,18,19,1
-block_hint,AllocateInYoungGeneration,2,3,1
-block_hint,AllocateRegularInYoungGeneration,2,3,1
-block_hint,AllocateRegularInOldGeneration,2,3,1
-block_hint,CopyFastSmiOrObjectElements,12,13,0
-block_hint,CopyFastSmiOrObjectElements,18,19,1
-block_hint,CopyFastSmiOrObjectElements,9,10,0
-block_hint,CopyFastSmiOrObjectElements,23,24,1
-block_hint,CopyFastSmiOrObjectElements,21,22,1
-block_hint,CopyFastSmiOrObjectElements,15,16,1
-block_hint,GrowFastDoubleElements,18,19,0
-block_hint,GrowFastDoubleElements,20,21,0
-block_hint,GrowFastDoubleElements,14,15,0
-block_hint,GrowFastDoubleElements,16,17,1
-block_hint,GrowFastDoubleElements,25,26,0
-block_hint,GrowFastDoubleElements,6,7,0
-block_hint,GrowFastDoubleElements,27,28,0
-block_hint,GrowFastSmiOrObjectElements,16,17,0
-block_hint,GrowFastSmiOrObjectElements,18,19,0
-block_hint,GrowFastSmiOrObjectElements,14,15,0
-block_hint,GrowFastSmiOrObjectElements,22,23,1
-block_hint,GrowFastSmiOrObjectElements,6,7,0
-block_hint,GrowFastSmiOrObjectElements,12,13,0
-block_hint,ToNumber,3,4,1
-block_hint,ToNumber,5,6,0
-block_hint,ToNumber,18,19,0
-block_hint,ToNumber,15,16,1
-block_hint,ToNumber_Baseline,24,25,0
-block_hint,ToNumber_Baseline,22,23,1
-block_hint,ToNumber_Baseline,3,4,1
-block_hint,ToNumeric_Baseline,7,8,0
-block_hint,ToNumeric_Baseline,3,4,1
-block_hint,ToNumberConvertBigInt,3,4,1
-block_hint,ToNumberConvertBigInt,5,6,0
-block_hint,ToNumberConvertBigInt,20,21,0
-block_hint,ToNumberConvertBigInt,17,18,1
-block_hint,ToNumberConvertBigInt,9,10,1
-block_hint,Typeof,17,18,0
-block_hint,Typeof,9,10,0
-block_hint,Typeof,13,14,1
-block_hint,KeyedLoadIC_PolymorphicName,244,245,1
-block_hint,KeyedLoadIC_PolymorphicName,96,97,1
-block_hint,KeyedLoadIC_PolymorphicName,260,261,0
-block_hint,KeyedLoadIC_PolymorphicName,58,59,0
-block_hint,KeyedLoadIC_PolymorphicName,133,134,1
-block_hint,KeyedLoadIC_PolymorphicName,298,299,1
-block_hint,KeyedLoadIC_PolymorphicName,330,331,1
-block_hint,KeyedLoadIC_PolymorphicName,98,99,0
-block_hint,KeyedLoadIC_PolymorphicName,100,101,0
-block_hint,KeyedLoadIC_PolymorphicName,22,23,1
-block_hint,KeyedLoadIC_PolymorphicName,165,166,0
-block_hint,KeyedLoadIC_PolymorphicName,122,123,1
-block_hint,KeyedLoadIC_PolymorphicName,332,333,1
-block_hint,KeyedLoadIC_PolymorphicName,110,111,0
-block_hint,KeyedLoadIC_PolymorphicName,175,176,0
-block_hint,KeyedLoadIC_PolymorphicName,43,44,1
-block_hint,KeyedLoadIC_PolymorphicName,74,75,0
-block_hint,KeyedLoadIC_PolymorphicName,250,251,0
-block_hint,KeyedLoadIC_PolymorphicName,287,288,1
-block_hint,KeyedLoadIC_PolymorphicName,26,27,0
-block_hint,KeyedLoadIC_PolymorphicName,24,25,0
-block_hint,KeyedStoreIC_Megamorphic,379,380,1
-block_hint,KeyedStoreIC_Megamorphic,381,382,0
-block_hint,KeyedStoreIC_Megamorphic,1216,1217,0
-block_hint,KeyedStoreIC_Megamorphic,1218,1219,1
-block_hint,KeyedStoreIC_Megamorphic,1203,1204,1
-block_hint,KeyedStoreIC_Megamorphic,1140,1141,0
-block_hint,KeyedStoreIC_Megamorphic,915,916,1
-block_hint,KeyedStoreIC_Megamorphic,383,384,1
-block_hint,KeyedStoreIC_Megamorphic,1228,1229,0
-block_hint,KeyedStoreIC_Megamorphic,1211,1212,0
-block_hint,KeyedStoreIC_Megamorphic,601,602,0
-block_hint,KeyedStoreIC_Megamorphic,746,747,1
-block_hint,KeyedStoreIC_Megamorphic,603,604,0
-block_hint,KeyedStoreIC_Megamorphic,1191,1192,0
-block_hint,KeyedStoreIC_Megamorphic,1041,1042,0
-block_hint,KeyedStoreIC_Megamorphic,1168,1169,0
-block_hint,KeyedStoreIC_Megamorphic,192,193,1
-block_hint,KeyedStoreIC_Megamorphic,194,195,0
-block_hint,KeyedStoreIC_Megamorphic,1134,1135,0
-block_hint,KeyedStoreIC_Megamorphic,1143,1144,0
-block_hint,KeyedStoreIC_Megamorphic,927,928,0
-block_hint,KeyedStoreIC_Megamorphic,491,492,0
-block_hint,KeyedStoreIC_Megamorphic,895,896,0
-block_hint,KeyedStoreIC_Megamorphic,931,932,0
-block_hint,KeyedStoreIC_Megamorphic,929,930,1
-block_hint,KeyedStoreIC_Megamorphic,493,494,1
-block_hint,KeyedStoreIC_Megamorphic,499,500,1
-block_hint,KeyedStoreIC_Megamorphic,501,502,0
-block_hint,KeyedStoreIC_Megamorphic,935,936,1
-block_hint,KeyedStoreIC_Megamorphic,503,504,0
-block_hint,KeyedStoreIC_Megamorphic,505,506,1
-block_hint,KeyedStoreIC_Megamorphic,933,934,1
-block_hint,KeyedStoreIC_Megamorphic,497,498,1
-block_hint,KeyedStoreIC_Megamorphic,495,496,0
-block_hint,KeyedStoreIC_Megamorphic,1115,1116,1
-block_hint,KeyedStoreIC_Megamorphic,1177,1178,1
-block_hint,KeyedStoreIC_Megamorphic,893,894,0
-block_hint,KeyedStoreIC_Megamorphic,350,351,1
-block_hint,KeyedStoreIC_Megamorphic,336,337,1
-block_hint,KeyedStoreIC_Megamorphic,1113,1114,1
-block_hint,KeyedStoreIC_Megamorphic,683,684,0
-block_hint,KeyedStoreIC_Megamorphic,539,540,0
-block_hint,KeyedStoreIC_Megamorphic,541,542,0
-block_hint,KeyedStoreIC_Megamorphic,1045,1046,0
-block_hint,KeyedStoreIC_Megamorphic,547,548,1
-block_hint,KeyedStoreIC_Megamorphic,1071,1072,0
-block_hint,KeyedStoreIC_Megamorphic,606,607,0
-block_hint,KeyedStoreIC_Megamorphic,1193,1194,0
-block_hint,KeyedStoreIC_Megamorphic,549,550,0
-block_hint,KeyedStoreIC_Megamorphic,1047,1048,0
-block_hint,KeyedStoreIC_Megamorphic,551,552,1
-block_hint,KeyedStoreIC_Megamorphic,200,201,1
-block_hint,KeyedStoreIC_Megamorphic,553,554,0
-block_hint,KeyedStoreIC_Megamorphic,202,203,0
-block_hint,KeyedStoreIC_Megamorphic,204,205,0
-block_hint,KeyedStoreIC_Megamorphic,950,951,0
-block_hint,KeyedStoreIC_Megamorphic,555,556,1
-block_hint,KeyedStoreIC_Megamorphic,557,558,0
-block_hint,KeyedStoreIC_Megamorphic,559,560,1
-block_hint,KeyedStoreIC_Megamorphic,561,562,0
-block_hint,KeyedStoreIC_Megamorphic,1148,1149,0
-block_hint,KeyedStoreIC_Megamorphic,563,564,1
-block_hint,KeyedStoreIC_Megamorphic,902,903,0
-block_hint,KeyedStoreIC_Megamorphic,1150,1151,0
-block_hint,KeyedStoreIC_Megamorphic,565,566,1
-block_hint,KeyedStoreIC_Megamorphic,571,572,1
-block_hint,KeyedStoreIC_Megamorphic,573,574,0
-block_hint,KeyedStoreIC_Megamorphic,575,576,0
-block_hint,KeyedStoreIC_Megamorphic,577,578,1
-block_hint,KeyedStoreIC_Megamorphic,957,958,1
-block_hint,KeyedStoreIC_Megamorphic,569,570,1
-block_hint,KeyedStoreIC_Megamorphic,567,568,0
-block_hint,KeyedStoreIC_Megamorphic,1214,1215,0
-block_hint,KeyedStoreIC_Megamorphic,1231,1232,1
-block_hint,KeyedStoreIC_Megamorphic,1224,1225,1
-block_hint,KeyedStoreIC_Megamorphic,1130,1131,1
-block_hint,KeyedStoreIC_Megamorphic,975,976,1
-block_hint,KeyedStoreIC_Megamorphic,206,207,0
-block_hint,KeyedStoreIC_Megamorphic,362,363,0
-block_hint,KeyedStoreIC_Megamorphic,977,978,1
-block_hint,KeyedStoreIC_Megamorphic,214,215,0
-block_hint,KeyedStoreIC_Megamorphic,1027,1028,0
-block_hint,KeyedStoreIC_Megamorphic,693,694,0
-block_hint,KeyedStoreIC_Megamorphic,579,580,0
-block_hint,KeyedStoreIC_Megamorphic,167,168,1
-block_hint,KeyedStoreIC_Megamorphic,581,582,0
-block_hint,KeyedStoreIC_Megamorphic,583,584,0
-block_hint,KeyedStoreIC_Megamorphic,1054,1055,0
-block_hint,KeyedStoreIC_Megamorphic,585,586,1
-block_hint,KeyedStoreIC_Megamorphic,963,964,0
-block_hint,KeyedStoreIC_Megamorphic,1174,1175,0
-block_hint,KeyedStoreIC_Megamorphic,1056,1057,1
-block_hint,KeyedStoreIC_Megamorphic,759,760,1
-block_hint,KeyedStoreIC_Megamorphic,612,613,0
-block_hint,KeyedStoreIC_Megamorphic,1196,1197,0
-block_hint,KeyedStoreIC_Megamorphic,1058,1059,0
-block_hint,KeyedStoreIC_Megamorphic,1172,1173,0
-block_hint,KeyedStoreIC_Megamorphic,224,225,0
-block_hint,KeyedStoreIC_Megamorphic,761,762,0
-block_hint,KeyedStoreIC_Megamorphic,593,594,0
-block_hint,KeyedStoreIC_Megamorphic,1136,1137,0
-block_hint,KeyedStoreIC_Megamorphic,1180,1181,0
-block_hint,KeyedStoreIC_Megamorphic,906,907,0
-block_hint,KeyedStoreIC_Megamorphic,173,174,1
-block_hint,KeyedStoreIC_Megamorphic,175,176,1
-block_hint,KeyedStoreIC_Megamorphic,373,374,0
-block_hint,KeyedStoreIC_Megamorphic,177,178,1
-block_hint,KeyedStoreIC_Megamorphic,375,376,0
-block_hint,KeyedStoreIC_Megamorphic,179,180,1
-block_hint,KeyedStoreIC_Megamorphic,234,235,0
-block_hint,KeyedStoreIC_Megamorphic,236,237,0
-block_hint,KeyedStoreIC_Megamorphic,181,182,1
-block_hint,KeyedStoreIC_Megamorphic,183,184,1
-block_hint,KeyedStoreIC_Megamorphic,1032,1033,0
-block_hint,KeyedStoreIC_Megamorphic,185,186,1
-block_hint,KeyedStoreIC_Megamorphic,925,926,1
-block_hint,KeyedStoreIC_Megamorphic,485,486,1
-block_hint,KeyedStoreIC_Megamorphic,733,734,0
-block_hint,KeyedStoreIC_Megamorphic,919,920,1
-block_hint,KeyedStoreIC_Megamorphic,413,414,0
-block_hint,KeyedStoreIC_Megamorphic,415,416,0
-block_hint,KeyedStoreIC_Megamorphic,254,255,1
-block_hint,KeyedStoreIC_Megamorphic,417,418,0
-block_hint,KeyedStoreIC_Megamorphic,630,631,1
-block_hint,KeyedStoreIC_Megamorphic,92,93,1
-block_hint,KeyedStoreIC_Megamorphic,94,95,0
-block_hint,KeyedStoreIC_Megamorphic,769,770,1
-block_hint,KeyedStoreIC_Megamorphic,387,388,0
-block_hint,KeyedStoreIC_Megamorphic,639,640,1
-block_hint,KeyedStoreIC_Megamorphic,64,65,1
-block_hint,KeyedStoreIC_Megamorphic,66,67,0
-block_hint,DefineKeyedOwnIC_Megamorphic,312,313,1
-block_hint,DefineKeyedOwnIC_Megamorphic,314,315,0
-block_hint,DefineKeyedOwnIC_Megamorphic,887,888,0
-block_hint,DefineKeyedOwnIC_Megamorphic,420,421,0
-block_hint,DefineKeyedOwnIC_Megamorphic,418,419,1
-block_hint,DefineKeyedOwnIC_Megamorphic,803,804,0
-block_hint,DefineKeyedOwnIC_Megamorphic,575,576,1
-block_hint,DefineKeyedOwnIC_Megamorphic,601,602,1
-block_hint,DefineKeyedOwnIC_Megamorphic,232,233,0
-block_hint,DefineKeyedOwnIC_Megamorphic,53,54,1
-block_hint,DefineKeyedOwnIC_Megamorphic,55,56,0
-block_hint,LoadGlobalIC_NoFeedback,41,42,1
-block_hint,LoadGlobalIC_NoFeedback,6,7,1
-block_hint,LoadGlobalIC_NoFeedback,8,9,1
-block_hint,LoadGlobalIC_NoFeedback,10,11,1
-block_hint,LoadGlobalIC_NoFeedback,12,13,1
-block_hint,LoadGlobalIC_NoFeedback,31,32,1
-block_hint,LoadGlobalIC_NoFeedback,49,50,1
-block_hint,LoadGlobalIC_NoFeedback,18,19,1
-block_hint,LoadGlobalIC_NoFeedback,27,28,0
-block_hint,LoadGlobalIC_NoFeedback,14,15,1
-block_hint,LoadGlobalIC_NoFeedback,33,34,0
-block_hint,LoadGlobalIC_NoFeedback,16,17,1
-block_hint,LoadGlobalIC_NoFeedback,20,21,1
-block_hint,LoadGlobalIC_NoFeedback,22,23,0
-block_hint,LoadGlobalIC_NoFeedback,24,25,1
-block_hint,LoadIC_FunctionPrototype,2,3,0
-block_hint,LoadIC_FunctionPrototype,4,5,1
-block_hint,LoadIC_NoFeedback,97,98,1
-block_hint,LoadIC_NoFeedback,99,100,0
-block_hint,LoadIC_NoFeedback,306,307,1
-block_hint,LoadIC_NoFeedback,226,227,0
-block_hint,LoadIC_NoFeedback,285,286,1
-block_hint,LoadIC_NoFeedback,141,142,0
-block_hint,LoadIC_NoFeedback,320,321,0
-block_hint,LoadIC_NoFeedback,287,288,0
-block_hint,LoadIC_NoFeedback,302,303,0
-block_hint,LoadIC_NoFeedback,53,54,1
-block_hint,LoadIC_NoFeedback,289,290,0
-block_hint,LoadIC_NoFeedback,55,56,0
-block_hint,LoadIC_NoFeedback,324,325,1
-block_hint,LoadIC_NoFeedback,272,273,0
-block_hint,LoadIC_NoFeedback,295,296,1
-block_hint,LoadIC_NoFeedback,247,248,1
-block_hint,LoadIC_NoFeedback,59,60,0
-block_hint,LoadIC_NoFeedback,22,23,1
-block_hint,LoadIC_NoFeedback,35,36,1
-block_hint,LoadIC_NoFeedback,130,131,1
-block_hint,LoadIC_NoFeedback,145,146,0
-block_hint,LoadIC_NoFeedback,125,126,0
-block_hint,LoadIC_NoFeedback,261,262,0
-block_hint,LoadIC_NoFeedback,250,251,0
-block_hint,LoadIC_NoFeedback,149,150,1
-block_hint,LoadIC_NoFeedback,167,168,0
-block_hint,LoadIC_NoFeedback,322,323,0
-block_hint,LoadIC_NoFeedback,151,152,0
-block_hint,LoadIC_NoFeedback,291,292,0
-block_hint,LoadIC_NoFeedback,70,71,1
-block_hint,LoadIC_NoFeedback,155,156,0
-block_hint,LoadIC_NoFeedback,72,73,0
-block_hint,LoadIC_NoFeedback,254,255,1
-block_hint,LoadIC_NoFeedback,76,77,0
-block_hint,LoadIC_NoFeedback,326,327,1
-block_hint,LoadIC_NoFeedback,278,279,0
-block_hint,LoadIC_NoFeedback,276,277,0
-block_hint,LoadIC_NoFeedback,242,243,1
-block_hint,LoadIC_NoFeedback,135,136,1
-block_hint,LoadIC_NoFeedback,93,94,0
-block_hint,StoreIC_NoFeedback,147,148,1
-block_hint,StoreIC_NoFeedback,149,150,0
-block_hint,StoreIC_NoFeedback,259,260,0
-block_hint,StoreIC_NoFeedback,549,550,0
-block_hint,StoreIC_NoFeedback,443,444,0
-block_hint,StoreIC_NoFeedback,527,528,0
-block_hint,StoreIC_NoFeedback,58,59,1
-block_hint,StoreIC_NoFeedback,60,61,0
-block_hint,StoreIC_NoFeedback,498,499,0
-block_hint,StoreIC_NoFeedback,367,368,0
-block_hint,StoreIC_NoFeedback,151,152,0
-block_hint,StoreIC_NoFeedback,349,350,0
-block_hint,StoreIC_NoFeedback,153,154,1
-block_hint,StoreIC_NoFeedback,159,160,1
-block_hint,StoreIC_NoFeedback,161,162,0
-block_hint,StoreIC_NoFeedback,163,164,0
-block_hint,StoreIC_NoFeedback,157,158,1
-block_hint,StoreIC_NoFeedback,155,156,0
-block_hint,StoreIC_NoFeedback,536,537,1
-block_hint,StoreIC_NoFeedback,381,382,1
-block_hint,StoreIC_NoFeedback,179,180,0
-block_hint,StoreIC_NoFeedback,519,520,1
-block_hint,StoreIC_NoFeedback,199,200,0
-block_hint,StoreIC_NoFeedback,201,202,0
-block_hint,StoreIC_NoFeedback,447,448,0
-block_hint,StoreIC_NoFeedback,207,208,1
-block_hint,StoreIC_NoFeedback,473,474,0
-block_hint,StoreIC_NoFeedback,262,263,0
-block_hint,StoreIC_NoFeedback,551,552,0
-block_hint,StoreIC_NoFeedback,209,210,0
-block_hint,StoreIC_NoFeedback,449,450,0
-block_hint,StoreIC_NoFeedback,66,67,1
-block_hint,StoreIC_NoFeedback,213,214,0
-block_hint,StoreIC_NoFeedback,68,69,0
-block_hint,StoreIC_NoFeedback,390,391,0
-block_hint,StoreIC_NoFeedback,215,216,1
-block_hint,StoreIC_NoFeedback,217,218,0
-block_hint,StoreIC_NoFeedback,219,220,1
-block_hint,StoreIC_NoFeedback,221,222,0
-block_hint,StoreIC_NoFeedback,509,510,0
-block_hint,StoreIC_NoFeedback,223,224,1
-block_hint,StoreIC_NoFeedback,356,357,0
-block_hint,StoreIC_NoFeedback,511,512,0
-block_hint,StoreIC_NoFeedback,393,394,1
-block_hint,StoreIC_NoFeedback,231,232,1
-block_hint,StoreIC_NoFeedback,233,234,0
-block_hint,StoreIC_NoFeedback,235,236,0
-block_hint,StoreIC_NoFeedback,237,238,1
-block_hint,StoreIC_NoFeedback,227,228,0
-block_hint,StoreIC_NoFeedback,564,565,0
-block_hint,StoreIC_NoFeedback,494,495,1
-block_hint,StoreIC_NoFeedback,413,414,1
-block_hint,StoreIC_NoFeedback,72,73,0
-block_hint,StoreIC_NoFeedback,78,79,0
-block_hint,StoreIC_NoFeedback,130,131,0
-block_hint,StoreIC_NoFeedback,415,416,1
-block_hint,StoreIC_NoFeedback,80,81,0
-block_hint,StoreIC_NoFeedback,82,83,0
-block_hint,StoreIC_NoFeedback,241,242,0
-block_hint,StoreIC_NoFeedback,243,244,0
-block_hint,StoreIC_NoFeedback,456,457,0
-block_hint,StoreIC_NoFeedback,245,246,1
-block_hint,StoreIC_NoFeedback,513,514,0
-block_hint,StoreIC_NoFeedback,403,404,0
-block_hint,StoreIC_NoFeedback,458,459,1
-block_hint,StoreIC_NoFeedback,268,269,0
-block_hint,StoreIC_NoFeedback,553,554,0
-block_hint,StoreIC_NoFeedback,460,461,0
-block_hint,StoreIC_NoFeedback,531,532,0
-block_hint,StoreIC_NoFeedback,90,91,0
-block_hint,StoreIC_NoFeedback,332,333,0
-block_hint,StoreIC_NoFeedback,420,421,1
-block_hint,StoreIC_NoFeedback,94,95,0
-block_hint,StoreIC_NoFeedback,96,97,0
-block_hint,StoreIC_NoFeedback,253,254,0
-block_hint,StoreIC_NoFeedback,255,256,1
-block_hint,StoreIC_NoFeedback,362,363,0
-block_hint,StoreIC_NoFeedback,40,41,1
-block_hint,StoreIC_NoFeedback,42,43,1
-block_hint,StoreIC_NoFeedback,141,142,0
-block_hint,StoreIC_NoFeedback,44,45,1
-block_hint,StoreIC_NoFeedback,143,144,0
-block_hint,StoreIC_NoFeedback,46,47,1
-block_hint,StoreIC_NoFeedback,100,101,0
-block_hint,StoreIC_NoFeedback,102,103,0
-block_hint,StoreIC_NoFeedback,48,49,1
-block_hint,StoreIC_NoFeedback,50,51,1
-block_hint,StoreIC_NoFeedback,439,440,0
-block_hint,StoreIC_NoFeedback,52,53,1
-block_hint,DefineNamedOwnIC_NoFeedback,80,81,1
-block_hint,DefineNamedOwnIC_NoFeedback,82,83,0
-block_hint,DefineNamedOwnIC_NoFeedback,236,237,0
-block_hint,DefineNamedOwnIC_NoFeedback,210,211,1
-block_hint,DefineNamedOwnIC_NoFeedback,136,137,0
-block_hint,DefineNamedOwnIC_NoFeedback,239,240,0
-block_hint,DefineNamedOwnIC_NoFeedback,212,213,0
-block_hint,DefineNamedOwnIC_NoFeedback,234,235,0
-block_hint,DefineNamedOwnIC_NoFeedback,157,158,1
-block_hint,DefineNamedOwnIC_NoFeedback,36,37,1
-block_hint,DefineNamedOwnIC_NoFeedback,86,87,0
-block_hint,DefineNamedOwnIC_NoFeedback,38,39,0
-block_hint,DefineNamedOwnIC_NoFeedback,40,41,0
-block_hint,KeyedLoadIC_SloppyArguments,12,13,0
-block_hint,KeyedLoadIC_SloppyArguments,14,15,1
-block_hint,KeyedLoadIC_SloppyArguments,4,5,1
-block_hint,KeyedLoadIC_SloppyArguments,22,23,0
-block_hint,KeyedLoadIC_SloppyArguments,6,7,1
-block_hint,KeyedLoadIC_SloppyArguments,16,17,0
-block_hint,KeyedLoadIC_SloppyArguments,18,19,0
-block_hint,KeyedLoadIC_SloppyArguments,8,9,1
-block_hint,KeyedLoadIC_SloppyArguments,10,11,0
-block_hint,StoreFastElementIC_Standard,336,337,0
-block_hint,StoreFastElementIC_Standard,876,877,0
-block_hint,StoreFastElementIC_Standard,344,345,0
-block_hint,StoreFastElementIC_Standard,1016,1017,1
-block_hint,StoreFastElementIC_Standard,346,347,1
-block_hint,StoreFastElementIC_Standard,40,41,1
-block_hint,StoreFastElementIC_Standard,348,349,0
-block_hint,StoreFastElementIC_Standard,878,879,0
-block_hint,StoreFastElementIC_Standard,356,357,0
-block_hint,StoreFastElementIC_Standard,1018,1019,1
-block_hint,StoreFastElementIC_Standard,358,359,1
-block_hint,StoreFastElementIC_Standard,42,43,1
-block_hint,StoreFastElementIC_Standard,360,361,0
-block_hint,StoreFastElementIC_Standard,880,881,0
-block_hint,StoreFastElementIC_Standard,1020,1021,1
-block_hint,StoreFastElementIC_Standard,368,369,1
-block_hint,StoreFastElementIC_Standard,44,45,1
-block_hint,StoreFastElementIC_Standard,400,401,0
-block_hint,StoreFastElementIC_Standard,888,889,0
-block_hint,StoreFastElementIC_Standard,1028,1029,1
-block_hint,StoreFastElementIC_Standard,408,409,1
-block_hint,StoreFastElementIC_Standard,52,53,1
-block_hint,StoreFastElementIC_Standard,894,895,0
-block_hint,StoreFastElementIC_Standard,436,437,1
-block_hint,StoreFastElementIC_Standard,1035,1036,1
-block_hint,StoreFastElementIC_Standard,438,439,1
-block_hint,StoreFastElementIC_Standard,58,59,1
-block_hint,StoreFastElementIC_Standard,898,899,0
-block_hint,StoreFastElementIC_Standard,446,447,1
-block_hint,StoreFastElementIC_Standard,1038,1039,1
-block_hint,StoreFastElementIC_Standard,448,449,1
-block_hint,StoreFastElementIC_Standard,60,61,1
-block_hint,StoreFastElementIC_Standard,662,663,0
-block_hint,StoreFastElementIC_Standard,1114,1115,0
-block_hint,StoreFastElementIC_Standard,731,732,0
-block_hint,StoreFastElementIC_Standard,300,301,1
-block_hint,StoreFastElementIC_Standard,658,659,0
-block_hint,StoreFastElementIC_Standard,1116,1117,0
-block_hint,StoreFastElementIC_Standard,733,734,0
-block_hint,StoreFastElementIC_Standard,302,303,1
-block_hint,StoreFastElementIC_Standard,654,655,0
-block_hint,StoreFastElementIC_Standard,1118,1119,0
-block_hint,StoreFastElementIC_Standard,735,736,0
-block_hint,StoreFastElementIC_Standard,304,305,1
-block_hint,StoreFastElementIC_Standard,650,651,0
-block_hint,StoreFastElementIC_Standard,1120,1121,0
-block_hint,StoreFastElementIC_Standard,737,738,0
-block_hint,StoreFastElementIC_Standard,306,307,1
-block_hint,StoreFastElementIC_Standard,646,647,0
-block_hint,StoreFastElementIC_Standard,981,982,1
-block_hint,StoreFastElementIC_Standard,832,833,0
-block_hint,StoreFastElementIC_Standard,308,309,1
-block_hint,StoreFastElementIC_Standard,642,643,0
-block_hint,StoreFastElementIC_Standard,979,980,1
-block_hint,StoreFastElementIC_Standard,834,835,0
-block_hint,StoreFastElementIC_Standard,310,311,1
-block_hint,StoreFastElementIC_Standard,638,639,0
-block_hint,StoreFastElementIC_Standard,977,978,1
-block_hint,StoreFastElementIC_Standard,836,837,0
-block_hint,StoreFastElementIC_Standard,312,313,1
-block_hint,StoreFastElementIC_Standard,634,635,0
-block_hint,StoreFastElementIC_Standard,838,839,0
-block_hint,StoreFastElementIC_Standard,314,315,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,489,490,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,265,266,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,661,662,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,492,493,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,587,588,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,455,456,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,214,215,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,565,566,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,198,199,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,204,205,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,643,644,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,457,458,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,34,35,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,494,495,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,281,282,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,667,668,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,287,288,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,591,592,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,459,460,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,219,220,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,561,562,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,182,183,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,645,646,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,461,462,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,36,37,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,499,500,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,673,674,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,301,302,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,595,596,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,463,464,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,224,225,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,557,558,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,166,167,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,647,648,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,465,466,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,38,39,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,512,513,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,679,680,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,345,346,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,605,606,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,473,474,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,238,239,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,547,548,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,126,127,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,651,652,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,475,476,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,46,47,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,521,522,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,379,380,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,683,684,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,385,386,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,614,615,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,481,482,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,249,250,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,251,252,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,539,540,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,102,103,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,541,542,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,483,484,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,56,57,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,528,529,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,393,394,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,687,688,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,399,400,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,96,97,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,100,101,0
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,487,488,1
-block_hint,StoreFastElementIC_GrowNoTransitionHandleCOW,58,59,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,357,358,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,187,188,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,415,416,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,189,190,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,22,23,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,361,362,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,419,420,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,211,212,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,213,214,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,160,161,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,397,398,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,130,131,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,134,135,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,26,27,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,377,378,0
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,285,286,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,434,435,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,287,288,1
-block_hint,StoreFastElementIC_NoTransitionHandleCOW,44,45,1
-block_hint,ElementsTransitionAndStore_Standard,460,461,0
-block_hint,ElementsTransitionAndStore_Standard,462,463,1
-block_hint,ElementsTransitionAndStore_Standard,555,556,1
-block_hint,ElementsTransitionAndStore_Standard,458,459,0
-block_hint,ElementsTransitionAndStore_Standard,199,200,0
-block_hint,ElementsTransitionAndStore_Standard,201,202,0
-block_hint,ElementsTransitionAndStore_Standard,355,356,0
-block_hint,ElementsTransitionAndStore_Standard,464,465,1
-block_hint,ElementsTransitionAndStore_Standard,209,210,1
-block_hint,ElementsTransitionAndStore_Standard,28,29,1
-block_hint,ElementsTransitionAndStore_Standard,508,509,0
-block_hint,ElementsTransitionAndStore_Standard,510,511,1
-block_hint,ElementsTransitionAndStore_Standard,504,505,1
-block_hint,ElementsTransitionAndStore_Standard,506,507,0
-block_hint,ElementsTransitionAndStore_Standard,267,268,0
-block_hint,ElementsTransitionAndStore_Standard,370,371,0
-block_hint,ElementsTransitionAndStore_Standard,512,513,1
-block_hint,ElementsTransitionAndStore_Standard,275,276,1
-block_hint,ElementsTransitionAndStore_Standard,38,39,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,756,757,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,1115,1116,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,759,760,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,927,928,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,701,702,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,324,325,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,853,854,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,237,238,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,241,242,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,1059,1060,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,703,704,0
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,62,63,1
-block_hint,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,95,96,0
-block_hint,KeyedHasIC_PolymorphicName,69,70,1
-block_hint,KeyedHasIC_PolymorphicName,28,29,1
-block_hint,KeyedHasIC_PolymorphicName,24,25,0
-block_hint,KeyedHasIC_PolymorphicName,26,27,0
-block_hint,KeyedHasIC_PolymorphicName,55,56,1
-block_hint,KeyedHasIC_PolymorphicName,89,90,1
-block_hint,KeyedHasIC_PolymorphicName,93,94,1
-block_hint,KeyedHasIC_PolymorphicName,30,31,0
-block_hint,KeyedHasIC_PolymorphicName,32,33,0
-block_hint,KeyedHasIC_PolymorphicName,14,15,1
-block_hint,KeyedHasIC_PolymorphicName,16,17,1
-block_hint,EnqueueMicrotask,4,5,0
-block_hint,EnqueueMicrotask,2,3,0
-block_hint,RunMicrotasks,18,19,0
-block_hint,RunMicrotasks,31,32,0
-block_hint,RunMicrotasks,65,66,0
-block_hint,RunMicrotasks,36,37,1
-block_hint,RunMicrotasks,85,86,0
-block_hint,RunMicrotasks,67,68,0
-block_hint,RunMicrotasks,38,39,1
-block_hint,HasProperty,137,138,1
-block_hint,HasProperty,139,140,1
-block_hint,HasProperty,263,264,0
-block_hint,HasProperty,211,212,1
-block_hint,HasProperty,254,255,0
-block_hint,HasProperty,97,98,0
-block_hint,HasProperty,234,235,1
-block_hint,HasProperty,123,124,1
-block_hint,HasProperty,141,142,1
-block_hint,HasProperty,199,200,0
-block_hint,HasProperty,201,202,0
-block_hint,HasProperty,101,102,0
-block_hint,HasProperty,99,100,0
-block_hint,HasProperty,250,251,0
-block_hint,HasProperty,270,271,0
-block_hint,HasProperty,259,260,1
-block_hint,HasProperty,106,107,0
-block_hint,HasProperty,277,278,0
-block_hint,HasProperty,282,283,0
-block_hint,HasProperty,268,269,0
-block_hint,HasProperty,203,204,1
-block_hint,HasProperty,42,43,1
-block_hint,HasProperty,65,66,0
-block_hint,HasProperty,44,45,0
-block_hint,HasProperty,239,240,1
-block_hint,HasProperty,48,49,0
-block_hint,HasProperty,272,273,0
-block_hint,HasProperty,228,229,0
-block_hint,HasProperty,38,39,0
-block_hint,DeleteProperty,38,39,1
-block_hint,DeleteProperty,62,63,0
-block_hint,DeleteProperty,40,41,0
-block_hint,DeleteProperty,66,67,1
-block_hint,DeleteProperty,91,92,0
-block_hint,DeleteProperty,73,74,0
-block_hint,DeleteProperty,64,65,1
-block_hint,DeleteProperty,56,57,1
-block_hint,DeleteProperty,42,43,1
-block_hint,DeleteProperty,83,84,0
-block_hint,DeleteProperty,85,86,0
-block_hint,DeleteProperty,77,78,0
-block_hint,DeleteProperty,75,76,0
-block_hint,DeleteProperty,47,48,0
-block_hint,DeleteProperty,49,50,0
-block_hint,DeleteProperty,87,88,0
-block_hint,DeleteProperty,71,72,1
-block_hint,DeleteProperty,20,21,0
-block_hint,DeleteProperty,54,55,0
-block_hint,DeleteProperty,7,8,1
-block_hint,DeleteProperty,9,10,1
-block_hint,DeleteProperty,11,12,1
-block_hint,DeleteProperty,13,14,1
-block_hint,DeleteProperty,15,16,1
-block_hint,SetDataProperties,136,137,1
-block_hint,SetDataProperties,263,264,1
-block_hint,SetDataProperties,261,262,1
-block_hint,SetDataProperties,144,145,0
-block_hint,SetDataProperties,316,317,0
-block_hint,SetDataProperties,146,147,0
-block_hint,SetDataProperties,59,60,0
-block_hint,SetDataProperties,341,342,0
-block_hint,SetDataProperties,267,268,0
-block_hint,SetDataProperties,385,386,1
-block_hint,SetDataProperties,277,278,0
-block_hint,SetDataProperties,752,753,0
-block_hint,SetDataProperties,762,763,1
-block_hint,SetDataProperties,750,751,0
-block_hint,SetDataProperties,748,749,0
-block_hint,SetDataProperties,659,660,0
-block_hint,SetDataProperties,451,452,1
-block_hint,SetDataProperties,221,222,1
-block_hint,SetDataProperties,87,88,0
-block_hint,SetDataProperties,223,224,0
-block_hint,SetDataProperties,513,514,0
-block_hint,SetDataProperties,515,516,0
-block_hint,SetDataProperties,519,520,1
-block_hint,SetDataProperties,449,450,0
-block_hint,SetDataProperties,329,330,1
-block_hint,SetDataProperties,326,327,0
-block_hint,SetDataProperties,158,159,0
-block_hint,SetDataProperties,399,400,0
-block_hint,SetDataProperties,447,448,0
-block_hint,SetDataProperties,352,353,0
-block_hint,SetDataProperties,226,227,1
-block_hint,SetDataProperties,93,94,1
-block_hint,SetDataProperties,521,522,0
-block_hint,SetDataProperties,95,96,0
-block_hint,SetDataProperties,97,98,0
-block_hint,SetDataProperties,617,618,0
-block_hint,SetDataProperties,523,524,1
-block_hint,SetDataProperties,525,526,0
-block_hint,SetDataProperties,527,528,1
-block_hint,SetDataProperties,529,530,0
-block_hint,SetDataProperties,673,674,0
-block_hint,SetDataProperties,531,532,1
-block_hint,SetDataProperties,577,578,0
-block_hint,SetDataProperties,675,676,0
-block_hint,SetDataProperties,620,621,1
-block_hint,SetDataProperties,539,540,1
-block_hint,SetDataProperties,541,542,0
-block_hint,SetDataProperties,543,544,0
-block_hint,SetDataProperties,545,546,1
-block_hint,SetDataProperties,535,536,0
-block_hint,SetDataProperties,657,658,0
-block_hint,SetDataProperties,555,556,1
-block_hint,SetDataProperties,292,293,1
-block_hint,SetDataProperties,99,100,0
-block_hint,SetDataProperties,437,438,0
-block_hint,SetDataProperties,241,242,0
-block_hint,SetDataProperties,279,280,1
-block_hint,SetDataProperties,204,205,0
-block_hint,SetDataProperties,61,62,0
-block_hint,ReturnReceiver,3,4,1
-block_hint,ArrayConstructorImpl,40,41,0
-block_hint,ArrayConstructorImpl,15,16,1
-block_hint,ArrayConstructorImpl,19,20,0
-block_hint,ArrayConstructorImpl,23,24,0
-block_hint,ArrayConstructorImpl,25,26,1
-block_hint,ArrayConstructorImpl,9,10,0
-block_hint,ArrayConstructorImpl,13,14,1
-block_hint,ArrayConstructorImpl,27,28,0
-block_hint,ArrayConstructorImpl,29,30,1
-block_hint,ArrayNoArgumentConstructor_PackedSmi_DontOverride,3,4,1
-block_hint,ArrayNoArgumentConstructor_PackedSmi_DontOverride,5,6,1
-block_hint,ArrayNoArgumentConstructor_HoleySmi_DontOverride,3,4,1
-block_hint,ArrayNoArgumentConstructor_HoleySmi_DontOverride,5,6,1
-block_hint,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,5,6,1
-block_hint,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,5,6,1
-block_hint,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,3,4,1
-block_hint,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DontOverride,8,9,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,21,22,1
-block_hint,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,8,9,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,25,26,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,12,13,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,8,9,0
-block_hint,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,10,11,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,5,6,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,14,15,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,16,17,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,25,26,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,23,24,1
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,12,13,0
-block_hint,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,8,9,0
-block_hint,ArrayIncludesSmi,120,121,0
-block_hint,ArrayIncludesSmi,43,44,1
-block_hint,ArrayIncludesSmi,108,109,1
-block_hint,ArrayIncludesSmi,75,76,1
-block_hint,ArrayIncludesSmi,122,123,0
-block_hint,ArrayIncludesSmiOrObject,113,114,1
-block_hint,ArrayIncludesSmiOrObject,38,39,0
-block_hint,ArrayIncludesSmiOrObject,107,108,0
-block_hint,ArrayIncludesSmiOrObject,28,29,1
-block_hint,ArrayIncludesSmiOrObject,84,85,1
-block_hint,ArrayIncludesSmiOrObject,86,87,1
-block_hint,ArrayIncludesSmiOrObject,117,118,0
-block_hint,ArrayIncludesSmiOrObject,131,132,1
-block_hint,ArrayIncludesSmiOrObject,125,126,0
-block_hint,ArrayIncludesSmiOrObject,98,99,0
-block_hint,ArrayIncludes,52,53,1
-block_hint,ArrayIncludes,49,50,0
-block_hint,ArrayIncludes,42,43,1
-block_hint,ArrayIncludes,44,45,1
-block_hint,ArrayIncludes,25,26,1
-block_hint,ArrayIncludes,17,18,1
-block_hint,ArrayIncludes,3,4,1
-block_hint,ArrayIncludes,47,48,1
-block_hint,ArrayIncludes,38,39,0
-block_hint,ArrayIncludes,27,28,1
-block_hint,ArrayIncludes,13,14,0
-block_hint,ArrayIncludes,19,20,1
-block_hint,ArrayIndexOfSmiOrObject,96,97,1
-block_hint,ArrayIndexOfSmiOrObject,88,89,0
-block_hint,ArrayIndexOfSmiOrObject,23,24,0
-block_hint,ArrayIndexOfSmiOrObject,37,38,0
-block_hint,ArrayIndexOfSmiOrObject,69,70,1
-block_hint,ArrayIndexOfSmiOrObject,43,44,0
-block_hint,ArrayIndexOfSmiOrObject,71,72,1
-block_hint,ArrayIndexOfSmiOrObject,110,111,1
-block_hint,ArrayIndexOfSmiOrObject,100,101,0
-block_hint,ArrayIndexOfSmiOrObject,77,78,0
-block_hint,ArrayIndexOfSmiOrObject,102,103,0
-block_hint,ArrayIndexOfSmiOrObject,79,80,0
-block_hint,ArrayIndexOfSmiOrObject,49,50,0
-block_hint,ArrayIndexOfSmiOrObject,29,30,0
-block_hint,ArrayIndexOfSmiOrObject,106,107,0
-block_hint,ArrayIndexOfSmiOrObject,83,84,0
-block_hint,ArrayIndexOfSmiOrObject,35,36,1
-block_hint,ArrayIndexOfSmiOrObject,94,95,1
-block_hint,ArrayIndexOfSmiOrObject,86,87,0
-block_hint,ArrayIndexOf,52,53,1
-block_hint,ArrayIndexOf,49,50,0
-block_hint,ArrayIndexOf,42,43,1
-block_hint,ArrayIndexOf,44,45,1
-block_hint,ArrayIndexOf,25,26,1
-block_hint,ArrayIndexOf,17,18,1
-block_hint,ArrayIndexOf,3,4,1
-block_hint,ArrayIndexOf,47,48,1
-block_hint,ArrayIndexOf,38,39,0
-block_hint,ArrayIndexOf,27,28,1
-block_hint,ArrayIndexOf,13,14,0
-block_hint,ArrayIndexOf,7,8,0
-block_hint,ArrayIndexOf,19,20,1
-block_hint,ArrayIndexOf,22,23,1
-block_hint,ArrayPrototypePop,49,50,1
-block_hint,ArrayPrototypePop,42,43,1
-block_hint,ArrayPrototypePop,47,48,1
-block_hint,ArrayPrototypePop,36,37,1
-block_hint,ArrayPrototypePop,26,27,1
-block_hint,ArrayPrototypePop,5,6,1
-block_hint,ArrayPrototypePop,45,46,1
-block_hint,ArrayPrototypePop,39,40,0
-block_hint,ArrayPrototypePop,20,21,1
-block_hint,ArrayPrototypePop,28,29,0
-block_hint,ArrayPrototypePop,7,8,1
-block_hint,ArrayPrototypePop,33,34,0
-block_hint,ArrayPrototypePop,16,17,0
-block_hint,ArrayPrototypePop,22,23,0
-block_hint,ArrayPrototypePop,30,31,1
-block_hint,ArrayPrototypePop,18,19,0
-block_hint,ArrayPrototypePop,14,15,1
-block_hint,ArrayPrototypePop,9,10,1
-block_hint,ArrayPrototypePop,11,12,0
-block_hint,ArrayPrototypePush,165,166,1
-block_hint,ArrayPrototypePush,148,149,1
-block_hint,ArrayPrototypePush,163,164,1
-block_hint,ArrayPrototypePush,137,138,1
-block_hint,ArrayPrototypePush,92,93,1
-block_hint,ArrayPrototypePush,17,18,1
-block_hint,ArrayPrototypePush,157,158,1
-block_hint,ArrayPrototypePush,141,142,0
-block_hint,ArrayPrototypePush,77,78,1
-block_hint,ArrayPrototypePush,79,80,1
-block_hint,ArrayPrototypePush,94,95,0
-block_hint,ArrayPrototypePush,19,20,1
-block_hint,ArrayPrototypePush,99,100,0
-block_hint,ArrayPrototypePush,115,116,0
-block_hint,ArrayPrototypePush,104,105,0
-block_hint,ArrayPrototypePush,68,69,0
-block_hint,ArrayPrototypePush,113,114,1
-block_hint,ArrayPrototypePush,28,29,0
-block_hint,ArrayPrototypePush,30,31,0
-block_hint,ArrayPrototypePush,34,35,0
-block_hint,ArrayPrototypePush,36,37,0
-block_hint,ArrayPrototypePush,96,97,0
-block_hint,ArrayPrototypePush,21,22,1
-block_hint,ArrayPrototypePush,38,39,1
-block_hint,ArrayPrototypePush,127,128,1
-block_hint,ArrayPrototypePush,129,130,0
-block_hint,ArrayPrototypePush,159,160,0
-block_hint,ArrayPrototypePush,161,162,0
-block_hint,ArrayPrototypePush,108,109,0
-block_hint,ArrayPrototypePush,72,73,0
-block_hint,ArrayPrototypePush,74,75,1
-block_hint,ArrayPrototypePush,117,118,0
-block_hint,ArrayPrototypePush,40,41,0
-block_hint,ArrayPrototypePush,119,120,0
-block_hint,ArrayPrototypePush,48,49,0
-block_hint,ArrayPrototypePush,154,155,1
-block_hint,ArrayPrototypePush,25,26,1
-block_hint,ArrayPrototypePush,50,51,1
-block_hint,ArrayPrototypePush,106,107,0
-block_hint,ArrayPrototypePush,70,71,0
-block_hint,ArrayPrototypePush,123,124,1
-block_hint,ArrayPrototypePush,52,53,0
-block_hint,ArrayPrototypePush,58,59,0
-block_hint,ArrayPrototypePush,60,61,0
-block_hint,ArrayPrototypePush,23,24,1
-block_hint,CloneFastJSArray,10,11,0
-block_hint,CloneFastJSArray,37,38,1
-block_hint,CloneFastJSArray,34,35,1
-block_hint,CloneFastJSArray,19,20,1
-block_hint,CloneFastJSArray,8,9,0
-block_hint,CloneFastJSArray,12,13,0
-block_hint,CloneFastJSArray,14,15,1
-block_hint,CloneFastJSArray,42,43,1
-block_hint,CloneFastJSArray,40,41,0
-block_hint,CloneFastJSArray,25,26,1
-block_hint,CloneFastJSArray,4,5,1
-block_hint,CloneFastJSArray,17,18,1
-block_hint,CloneFastJSArrayFillingHoles,75,76,0
-block_hint,CloneFastJSArrayFillingHoles,77,78,0
-block_hint,CloneFastJSArrayFillingHoles,92,93,0
-block_hint,CloneFastJSArrayFillingHoles,46,47,0
-block_hint,CloneFastJSArrayFillingHoles,96,97,1
-block_hint,CloneFastJSArrayFillingHoles,82,83,1
-block_hint,CloneFastJSArrayFillingHoles,16,17,0
-block_hint,CloneFastJSArrayFillingHoles,20,21,0
-block_hint,CloneFastJSArrayFillingHoles,84,85,0
-block_hint,CloneFastJSArrayFillingHoles,113,114,0
-block_hint,CloneFastJSArrayFillingHoles,79,80,1
-block_hint,CloneFastJSArrayFillingHoles,10,11,1
-block_hint,CloneFastJSArrayFillingHoles,55,56,1
-block_hint,ExtractFastJSArray,4,5,1
-block_hint,ExtractFastJSArray,27,28,0
-block_hint,ExtractFastJSArray,10,11,0
-block_hint,ExtractFastJSArray,39,40,1
-block_hint,ExtractFastJSArray,35,36,1
-block_hint,ExtractFastJSArray,20,21,1
-block_hint,ExtractFastJSArray,6,7,0
-block_hint,ExtractFastJSArray,12,13,0
-block_hint,ExtractFastJSArray,14,15,1
-block_hint,ExtractFastJSArray,37,38,1
-block_hint,ExtractFastJSArray,33,34,0
-block_hint,ExtractFastJSArray,16,17,1
-block_hint,ArrayPrototypeValues,14,15,1
-block_hint,ArrayPrototypeValues,11,12,1
-block_hint,ArrayPrototypeValues,8,9,1
-block_hint,ArrayPrototypeValues,3,4,1
-block_hint,ArrayPrototypeValues,6,7,1
-block_hint,ArrayIteratorPrototypeNext,138,139,1
-block_hint,ArrayIteratorPrototypeNext,88,89,1
-block_hint,ArrayIteratorPrototypeNext,90,91,1
-block_hint,ArrayIteratorPrototypeNext,236,237,0
-block_hint,ArrayIteratorPrototypeNext,243,244,0
-block_hint,ArrayIteratorPrototypeNext,219,220,0
-block_hint,ArrayIteratorPrototypeNext,199,200,0
-block_hint,ArrayIteratorPrototypeNext,160,161,0
-block_hint,ArrayIteratorPrototypeNext,118,119,1
-block_hint,ArrayIteratorPrototypeNext,255,256,0
-block_hint,ArrayIteratorPrototypeNext,238,239,0
-block_hint,ArrayIteratorPrototypeNext,155,156,0
-block_hint,ArrayIteratorPrototypeNext,109,110,1
-block_hint,ArrayIteratorPrototypeNext,6,7,1
-block_hint,ArrayIteratorPrototypeNext,8,9,1
-block_hint,ArrayIteratorPrototypeNext,140,141,0
-block_hint,ArrayIteratorPrototypeNext,122,123,1
-block_hint,ArrayIteratorPrototypeNext,60,61,1
-block_hint,ArrayIteratorPrototypeNext,80,81,1
-block_hint,AsyncFunctionEnter,41,42,1
-block_hint,AsyncFunctionEnter,28,29,0
-block_hint,AsyncFunctionEnter,13,14,0
-block_hint,AsyncFunctionEnter,33,34,1
-block_hint,AsyncFunctionEnter,26,27,1
-block_hint,AsyncFunctionEnter,9,10,0
-block_hint,AsyncFunctionEnter,3,4,1
-block_hint,AsyncFunctionEnter,36,37,1
-block_hint,AsyncFunctionEnter,22,23,0
-block_hint,AsyncFunctionEnter,5,6,1
-block_hint,AsyncFunctionEnter,24,25,1
-block_hint,AsyncFunctionEnter,7,8,0
-block_hint,AsyncFunctionResolve,2,3,0
-block_hint,AsyncFunctionAwaitCaught,24,25,1
-block_hint,AsyncFunctionAwaitCaught,19,20,1
-block_hint,AsyncFunctionAwaitCaught,2,3,1
-block_hint,AsyncFunctionAwaitCaught,30,31,1
-block_hint,AsyncFunctionAwaitCaught,32,33,0
-block_hint,AsyncFunctionAwaitCaught,28,29,1
-block_hint,AsyncFunctionAwaitCaught,8,9,1
-block_hint,AsyncFunctionAwaitCaught,10,11,1
-block_hint,AsyncFunctionAwaitCaught,12,13,1
-block_hint,AsyncFunctionAwaitCaught,14,15,1
-block_hint,AsyncFunctionAwaitCaught,22,23,0
-block_hint,AsyncFunctionAwaitUncaught,24,25,1
-block_hint,AsyncFunctionAwaitUncaught,19,20,1
-block_hint,AsyncFunctionAwaitUncaught,2,3,1
-block_hint,AsyncFunctionAwaitUncaught,30,31,1
-block_hint,AsyncFunctionAwaitUncaught,32,33,0
-block_hint,AsyncFunctionAwaitUncaught,28,29,1
-block_hint,AsyncFunctionAwaitUncaught,8,9,1
-block_hint,AsyncFunctionAwaitUncaught,10,11,1
-block_hint,AsyncFunctionAwaitUncaught,12,13,1
-block_hint,AsyncFunctionAwaitUncaught,14,15,1
-block_hint,AsyncFunctionAwaitUncaught,22,23,0
-block_hint,AsyncFunctionAwaitResolveClosure,8,9,1
-block_hint,AsyncFunctionAwaitResolveClosure,2,3,1
-block_hint,AsyncFunctionAwaitResolveClosure,6,7,0
-block_hint,DatePrototypeGetDate,10,11,1
-block_hint,DatePrototypeGetDate,7,8,1
-block_hint,DatePrototypeGetDate,5,6,1
-block_hint,DatePrototypeGetDate,2,3,1
-block_hint,DatePrototypeGetDay,10,11,1
-block_hint,DatePrototypeGetDay,7,8,1
-block_hint,DatePrototypeGetDay,5,6,1
-block_hint,DatePrototypeGetDay,2,3,1
-block_hint,DatePrototypeGetFullYear,10,11,1
-block_hint,DatePrototypeGetFullYear,7,8,1
-block_hint,DatePrototypeGetFullYear,5,6,1
-block_hint,DatePrototypeGetHours,10,11,1
-block_hint,DatePrototypeGetHours,7,8,1
-block_hint,DatePrototypeGetHours,5,6,1
-block_hint,DatePrototypeGetHours,2,3,1
-block_hint,DatePrototypeGetMinutes,10,11,1
-block_hint,DatePrototypeGetMinutes,7,8,1
-block_hint,DatePrototypeGetMinutes,5,6,1
-block_hint,DatePrototypeGetMinutes,2,3,1
-block_hint,DatePrototypeGetMonth,10,11,1
-block_hint,DatePrototypeGetMonth,7,8,1
-block_hint,DatePrototypeGetMonth,5,6,1
-block_hint,DatePrototypeGetMonth,2,3,1
-block_hint,DatePrototypeGetSeconds,10,11,1
-block_hint,DatePrototypeGetSeconds,7,8,1
-block_hint,DatePrototypeGetSeconds,5,6,1
-block_hint,DatePrototypeGetSeconds,2,3,1
-block_hint,DatePrototypeGetTime,8,9,1
-block_hint,DatePrototypeGetTime,5,6,1
-block_hint,DatePrototypeGetTime,2,3,1
-block_hint,CreateIterResultObject,4,5,1
-block_hint,CreateIterResultObject,11,12,1
-block_hint,CreateIterResultObject,6,7,0
-block_hint,CreateGeneratorObject,32,33,1
-block_hint,CreateGeneratorObject,34,35,1
-block_hint,CreateGeneratorObject,57,58,1
-block_hint,CreateGeneratorObject,54,55,0
-block_hint,CreateGeneratorObject,43,44,1
-block_hint,CreateGeneratorObject,24,25,0
-block_hint,CreateGeneratorObject,47,48,1
-block_hint,CreateGeneratorObject,40,41,1
-block_hint,CreateGeneratorObject,8,9,0
-block_hint,CreateGeneratorObject,51,52,1
-block_hint,CreateGeneratorObject,37,38,0
-block_hint,CreateGeneratorObject,12,13,0
-block_hint,GeneratorPrototypeNext,19,20,1
-block_hint,GeneratorPrototypeNext,11,12,1
-block_hint,GeneratorPrototypeNext,13,14,1
-block_hint,GeneratorPrototypeNext,5,6,0
-block_hint,GeneratorPrototypeNext,7,8,0
-block_hint,SuspendGeneratorBaseline,19,20,1
-block_hint,SuspendGeneratorBaseline,5,6,1
-block_hint,SuspendGeneratorBaseline,11,12,1
-block_hint,SuspendGeneratorBaseline,7,8,1
-block_hint,SuspendGeneratorBaseline,13,14,0
-block_hint,ResumeGeneratorBaseline,11,12,1
-block_hint,ResumeGeneratorBaseline,4,5,1
-block_hint,ResumeGeneratorBaseline,6,7,0
-block_hint,GlobalIsFinite,9,10,1
-block_hint,GlobalIsNaN,9,10,1
-block_hint,GlobalIsNaN,11,12,1
-block_hint,LoadIC,370,371,1
-block_hint,LoadIC,139,140,0
-block_hint,LoadIC,59,60,0
-block_hint,LoadIC,233,234,0
-block_hint,LoadIC,345,346,1
-block_hint,LoadIC,235,236,0
-block_hint,LoadIC,387,388,1
-block_hint,LoadIC,384,385,0
-block_hint,LoadIC,381,382,1
-block_hint,LoadIC,292,293,1
-block_hint,LoadIC,100,101,1
-block_hint,LoadIC,278,279,0
-block_hint,LoadIC,319,320,0
-block_hint,LoadIC,141,142,1
-block_hint,LoadIC,143,144,0
-block_hint,LoadIC,308,309,1
-block_hint,LoadIC,358,359,1
-block_hint,LoadIC,102,103,0
-block_hint,LoadIC,19,20,1
-block_hint,LoadIC,62,63,0
-block_hint,LoadIC,21,22,1
-block_hint,LoadIC,173,174,0
-block_hint,LoadIC,364,365,0
-block_hint,LoadIC,366,367,0
-block_hint,LoadIC,317,318,0
-block_hint,LoadIC,129,130,0
-block_hint,LoadIC,49,50,1
-block_hint,LoadIC,209,210,0
-block_hint,LoadIC,84,85,0
-block_hint,LoadIC,44,45,0
-block_hint,LoadIC,360,361,1
-block_hint,LoadIC,114,115,0
-block_hint,LoadIC,183,184,0
-block_hint,LoadIC,42,43,1
-block_hint,LoadIC,76,77,0
-block_hint,LoadIC,268,269,0
-block_hint,LoadIC,310,311,1
-block_hint,LoadIC,25,26,0
-block_hint,LoadIC,179,180,1
-block_hint,LoadIC,181,182,1
-block_hint,LoadIC,175,176,1
-block_hint,LoadIC,177,178,1
-block_hint,LoadIC,133,134,1
-block_hint,LoadIC,135,136,0
-block_hint,LoadIC_Megamorphic,355,356,1
-block_hint,LoadIC_Megamorphic,352,353,0
-block_hint,LoadIC_Megamorphic,257,258,1
-block_hint,LoadIC_Megamorphic,259,260,1
-block_hint,LoadIC_Megamorphic,255,256,0
-block_hint,LoadIC_Megamorphic,56,57,0
-block_hint,LoadIC_Megamorphic,294,295,0
-block_hint,LoadIC_Megamorphic,130,131,1
-block_hint,LoadIC_Megamorphic,280,281,1
-block_hint,LoadIC_Megamorphic,132,133,0
-block_hint,LoadIC_Megamorphic,282,283,1
-block_hint,LoadIC_Megamorphic,328,329,1
-block_hint,LoadIC_Megamorphic,95,96,0
-block_hint,LoadIC_Megamorphic,20,21,1
-block_hint,LoadIC_Megamorphic,162,163,0
-block_hint,LoadIC_Megamorphic,287,288,0
-block_hint,LoadIC_Megamorphic,249,250,1
-block_hint,LoadIC_Megamorphic,334,335,0
-block_hint,LoadIC_Megamorphic,336,337,0
-block_hint,LoadIC_Megamorphic,291,292,0
-block_hint,LoadIC_Megamorphic,122,123,0
-block_hint,LoadIC_Megamorphic,48,49,1
-block_hint,LoadIC_Megamorphic,43,44,0
-block_hint,LoadIC_Megamorphic,245,246,0
-block_hint,LoadIC_Megamorphic,284,285,1
-block_hint,LoadIC_Megamorphic,24,25,0
-block_hint,LoadIC_Megamorphic,22,23,0
-block_hint,LoadIC_Megamorphic,164,165,1
-block_hint,LoadIC_Megamorphic,166,167,1
-block_hint,LoadIC_Megamorphic,126,127,1
-block_hint,LoadIC_Noninlined,366,367,1
-block_hint,LoadIC_Noninlined,132,133,0
-block_hint,LoadIC_Noninlined,372,373,1
-block_hint,LoadIC_Noninlined,369,370,0
-block_hint,LoadIC_Noninlined,364,365,1
-block_hint,LoadIC_Noninlined,267,268,0
-block_hint,LoadIC_Noninlined,56,57,0
-block_hint,LoadIC_Noninlined,308,309,0
-block_hint,LoadIC_Noninlined,142,143,1
-block_hint,LoadIC_Noninlined,292,293,1
-block_hint,LoadIC_Noninlined,20,21,1
-block_hint,LoadIC_Noninlined,174,175,0
-block_hint,LoadIC_Noninlined,37,38,1
-block_hint,LoadIC_Noninlined,257,258,0
-block_hint,LoadIC_Noninlined,296,297,1
-block_hint,LoadIC_Noninlined,24,25,0
-block_hint,LoadIC_Noninlined,22,23,0
-block_hint,LoadICTrampoline,3,4,1
-block_hint,LoadICTrampoline_Megamorphic,3,4,1
-block_hint,LoadSuperIC,528,529,0
-block_hint,LoadSuperIC,253,254,0
-block_hint,LoadSuperIC,564,565,1
-block_hint,LoadSuperIC,440,441,0
-block_hint,LoadSuperIC,75,76,0
-block_hint,LoadSuperIC,540,541,0
-block_hint,LoadSuperIC,255,256,1
-block_hint,LoadSuperIC,515,516,1
-block_hint,LoadSuperIC,41,42,1
-block_hint,LoadSuperIC,550,551,0
-block_hint,LoadSuperIC,287,288,0
-block_hint,LoadSuperIC,60,61,1
-block_hint,LoadSuperIC,429,430,0
-block_hint,LoadSuperIC,427,428,0
-block_hint,LoadSuperIC,519,520,1
-block_hint,LoadSuperIC,45,46,0
-block_hint,LoadSuperIC,671,672,0
-block_hint,KeyedLoadIC,629,630,1
-block_hint,KeyedLoadIC,257,258,0
-block_hint,KeyedLoadIC,249,250,0
-block_hint,KeyedLoadIC,385,386,0
-block_hint,KeyedLoadIC,494,495,1
-block_hint,KeyedLoadIC,671,672,0
-block_hint,KeyedLoadIC,623,624,0
-block_hint,KeyedLoadIC,574,575,1
-block_hint,KeyedLoadIC,391,392,1
-block_hint,KeyedLoadIC,389,390,1
-block_hint,KeyedLoadIC,657,658,0
-block_hint,KeyedLoadIC,659,660,0
-block_hint,KeyedLoadIC,627,628,0
-block_hint,KeyedLoadIC,576,577,1
-block_hint,KeyedLoadIC,151,152,1
-block_hint,KeyedLoadIC,621,622,0
-block_hint,KeyedLoadIC,475,476,0
-block_hint,KeyedLoadIC,101,102,1
-block_hint,KeyedLoadIC,667,668,0
-block_hint,KeyedLoadIC,669,670,0
-block_hint,KeyedLoadIC,635,636,1
-block_hint,KeyedLoadIC,637,638,1
-block_hint,KeyedLoadIC,293,294,1
-block_hint,KeyedLoadIC,295,296,0
-block_hint,KeyedLoadIC,663,664,1
-block_hint,KeyedLoadIC,517,518,1
-block_hint,KeyedLoadIC,619,620,0
-block_hint,KeyedLoadIC,606,607,0
-block_hint,KeyedLoadIC,554,555,1
-block_hint,KeyedLoadIC,315,316,1
-block_hint,KeyedLoadIC,63,64,0
-block_hint,KeyedLoadIC,305,306,0
-block_hint,KeyedLoadIC,521,522,1
-block_hint,KeyedLoadIC,307,308,1
-block_hint,KeyedLoadIC,221,222,0
-block_hint,KeyedLoadIC,177,178,1
-block_hint,KeyedLoadIC,556,557,0
-block_hint,KeyedLoadIC,450,451,1
-block_hint,KeyedLoadIC,113,114,0
-block_hint,KeyedLoadIC,115,116,0
-block_hint,KeyedLoadIC,406,407,1
-block_hint,KeyedLoadIC,609,610,1
-block_hint,KeyedLoadIC,245,246,1
-block_hint,KeyedLoadIC,558,559,0
-block_hint,KeyedLoadIC,508,509,0
-block_hint,KeyedLoadIC,436,437,1
-block_hint,KeyedLoadIC,673,674,0
-block_hint,KeyedLoadIC,119,120,1
-block_hint,KeyedLoadIC,323,324,1
-block_hint,KeyedLoadIC,325,326,1
-block_hint,KeyedLoadIC,65,66,0
-block_hint,KeyedLoadIC_Megamorphic,496,497,1
-block_hint,KeyedLoadIC_Megamorphic,498,499,0
-block_hint,KeyedLoadIC_Megamorphic,1218,1219,0
-block_hint,KeyedLoadIC_Megamorphic,1220,1221,1
-block_hint,KeyedLoadIC_Megamorphic,1192,1193,1
-block_hint,KeyedLoadIC_Megamorphic,1145,1146,0
-block_hint,KeyedLoadIC_Megamorphic,1212,1213,1
-block_hint,KeyedLoadIC_Megamorphic,1222,1223,1
-block_hint,KeyedLoadIC_Megamorphic,1194,1195,1
-block_hint,KeyedLoadIC_Megamorphic,1214,1215,0
-block_hint,KeyedLoadIC_Megamorphic,1111,1112,0
-block_hint,KeyedLoadIC_Megamorphic,927,928,1
-block_hint,KeyedLoadIC_Megamorphic,925,926,1
-block_hint,KeyedLoadIC_Megamorphic,528,529,1
-block_hint,KeyedLoadIC_Megamorphic,1186,1187,0
-block_hint,KeyedLoadIC_Megamorphic,1188,1189,0
-block_hint,KeyedLoadIC_Megamorphic,1155,1156,0
-block_hint,KeyedLoadIC_Megamorphic,1153,1154,1
-block_hint,KeyedLoadIC_Megamorphic,1182,1183,0
-block_hint,KeyedLoadIC_Megamorphic,1149,1150,0
-block_hint,KeyedLoadIC_Megamorphic,929,930,1
-block_hint,KeyedLoadIC_Megamorphic,939,940,0
-block_hint,KeyedLoadIC_Megamorphic,640,641,0
-block_hint,KeyedLoadIC_Megamorphic,1172,1173,0
-block_hint,KeyedLoadIC_Megamorphic,1085,1086,0
-block_hint,KeyedLoadIC_Megamorphic,1131,1132,0
-block_hint,KeyedLoadIC_Megamorphic,233,234,1
-block_hint,KeyedLoadIC_Megamorphic,1087,1088,0
-block_hint,KeyedLoadIC_Megamorphic,235,236,0
-block_hint,KeyedLoadIC_Megamorphic,1070,1071,0
-block_hint,KeyedLoadIC_Megamorphic,1204,1205,1
-block_hint,KeyedLoadIC_Megamorphic,1068,1069,0
-block_hint,KeyedLoadIC_Megamorphic,1066,1067,0
-block_hint,KeyedLoadIC_Megamorphic,980,981,1
-block_hint,KeyedLoadIC_Megamorphic,239,240,0
-block_hint,KeyedLoadIC_Megamorphic,121,122,1
-block_hint,KeyedLoadIC_Megamorphic,195,196,0
-block_hint,KeyedLoadIC_Megamorphic,644,645,0
-block_hint,KeyedLoadIC_Megamorphic,558,559,0
-block_hint,KeyedLoadIC_Megamorphic,1043,1044,0
-block_hint,KeyedLoadIC_Megamorphic,983,984,0
-block_hint,KeyedLoadIC_Megamorphic,648,649,1
-block_hint,KeyedLoadIC_Megamorphic,666,667,0
-block_hint,KeyedLoadIC_Megamorphic,1174,1175,0
-block_hint,KeyedLoadIC_Megamorphic,650,651,0
-block_hint,KeyedLoadIC_Megamorphic,1089,1090,0
-block_hint,KeyedLoadIC_Megamorphic,652,653,1
-block_hint,KeyedLoadIC_Megamorphic,250,251,1
-block_hint,KeyedLoadIC_Megamorphic,654,655,0
-block_hint,KeyedLoadIC_Megamorphic,252,253,0
-block_hint,KeyedLoadIC_Megamorphic,842,843,0
-block_hint,KeyedLoadIC_Megamorphic,987,988,1
-block_hint,KeyedLoadIC_Megamorphic,256,257,0
-block_hint,KeyedLoadIC_Megamorphic,656,657,0
-block_hint,KeyedLoadIC_Megamorphic,258,259,1
-block_hint,KeyedLoadIC_Megamorphic,1076,1077,0
-block_hint,KeyedLoadIC_Megamorphic,1169,1170,0
-block_hint,KeyedLoadIC_Megamorphic,1206,1207,1
-block_hint,KeyedLoadIC_Megamorphic,1074,1075,0
-block_hint,KeyedLoadIC_Megamorphic,123,124,1
-block_hint,KeyedLoadIC_Megamorphic,203,204,0
-block_hint,KeyedLoadIC_Megamorphic,923,924,0
-block_hint,KeyedLoadIC_Megamorphic,675,676,0
-block_hint,KeyedLoadIC_Megamorphic,1176,1177,0
-block_hint,KeyedLoadIC_Megamorphic,1208,1209,0
-block_hint,KeyedLoadIC_Megamorphic,1135,1136,0
-block_hint,KeyedLoadIC_Megamorphic,844,845,1
-block_hint,KeyedLoadIC_Megamorphic,268,269,1
-block_hint,KeyedLoadIC_Megamorphic,1200,1201,0
-block_hint,KeyedLoadIC_Megamorphic,270,271,0
-block_hint,KeyedLoadIC_Megamorphic,1056,1057,0
-block_hint,KeyedLoadIC_Megamorphic,1198,1199,1
-block_hint,KeyedLoadIC_Megamorphic,1054,1055,0
-block_hint,KeyedLoadIC_Megamorphic,1116,1117,1
-block_hint,KeyedLoadIC_Megamorphic,1107,1108,0
-block_hint,KeyedLoadIC_Megamorphic,1210,1211,0
-block_hint,KeyedLoadIC_Megamorphic,1101,1102,1
-block_hint,KeyedLoadIC_Megamorphic,740,741,1
-block_hint,KeyedLoadIC_Megamorphic,1017,1018,1
-block_hint,KeyedLoadIC_Megamorphic,736,737,0
-block_hint,KeyedLoadIC_Megamorphic,112,113,0
-block_hint,KeyedLoadIC_Megamorphic,877,878,0
-block_hint,KeyedLoadIC_Megamorphic,338,339,1
-block_hint,KeyedLoadIC_Megamorphic,863,864,1
-block_hint,KeyedLoadIC_Megamorphic,76,77,1
-block_hint,KeyedLoadIC_Megamorphic,368,369,0
-block_hint,KeyedLoadIC_Megamorphic,728,729,0
-block_hint,KeyedLoadIC_Megamorphic,93,94,1
-block_hint,KeyedLoadIC_Megamorphic,998,999,1
-block_hint,KeyedLoadIC_Megamorphic,294,295,0
-block_hint,KeyedLoadIC_Megamorphic,115,116,1
-block_hint,KeyedLoadIC_Megamorphic,179,180,0
-block_hint,KeyedLoadIC_Megamorphic,960,961,0
-block_hint,KeyedLoadIC_Megamorphic,817,818,1
-block_hint,KeyedLoadIC_Megamorphic,183,184,1
-block_hint,KeyedLoadIC_Megamorphic,681,682,0
-block_hint,KeyedLoadIC_Megamorphic,524,525,0
-block_hint,KeyedLoadIC_Megamorphic,1031,1032,0
-block_hint,KeyedLoadIC_Megamorphic,1001,1002,0
-block_hint,KeyedLoadIC_Megamorphic,685,686,1
-block_hint,KeyedLoadIC_Megamorphic,856,857,1
-block_hint,KeyedLoadIC_Megamorphic,1178,1179,0
-block_hint,KeyedLoadIC_Megamorphic,307,308,0
-block_hint,KeyedLoadIC_Megamorphic,858,859,0
-block_hint,KeyedLoadIC_Megamorphic,1062,1063,0
-block_hint,KeyedLoadIC_Megamorphic,187,188,0
-block_hint,KeyedLoadIC_Megamorphic,947,948,0
-block_hint,KeyedLoadIC_Megamorphic,1142,1143,0
-block_hint,KeyedLoadIC_Megamorphic,905,906,1
-block_hint,KeyedLoadIC_Megamorphic,127,128,0
-block_hint,KeyedLoadIC_Megamorphic,718,719,0
-block_hint,KeyedLoadIC_Megamorphic,1026,1027,0
-block_hint,KeyedLoadIC_Megamorphic,562,563,1
-block_hint,KeyedLoadIC_Megamorphic,321,322,0
-block_hint,KeyedLoadIC_Megamorphic,714,715,0
-block_hint,KeyedLoadIC_Megamorphic,564,565,0
-block_hint,KeyedLoadIC_Megamorphic,129,130,1
-block_hint,KeyedLoadIC_Megamorphic,572,573,0
-block_hint,KeyedLoadIC_Megamorphic,909,910,1
-block_hint,KeyedLoadIC_Megamorphic,492,493,0
-block_hint,KeyedLoadIC_Megamorphic,945,946,0
-block_hint,KeyedLoadIC_Megamorphic,722,723,1
-block_hint,KeyedLoadIC_Megamorphic,580,581,0
-block_hint,KeyedLoadIC_Megamorphic,213,214,0
-block_hint,KeyedLoadIC_Megamorphic,490,491,1
-block_hint,KeyedLoadIC_Megamorphic,582,583,1
-block_hint,KeyedLoadIC_Megamorphic,145,146,1
-block_hint,KeyedLoadICTrampoline,3,4,1
-block_hint,KeyedLoadICTrampoline_Megamorphic,3,4,1
-block_hint,StoreGlobalIC,72,73,0
-block_hint,StoreGlobalIC,229,230,1
-block_hint,StoreGlobalIC,268,269,0
-block_hint,StoreGlobalIC,144,145,0
-block_hint,StoreGlobalIC,205,206,0
-block_hint,StoreGlobalIC,92,93,0
-block_hint,StoreGlobalIC,146,147,1
-block_hint,StoreGlobalIC,94,95,1
-block_hint,StoreGlobalIC,15,16,1
-block_hint,StoreGlobalICTrampoline,3,4,1
-block_hint,StoreIC,338,339,1
-block_hint,StoreIC,144,145,0
-block_hint,StoreIC,69,70,0
-block_hint,StoreIC,208,209,0
-block_hint,StoreIC,210,211,1
-block_hint,StoreIC,395,396,1
-block_hint,StoreIC,386,387,0
-block_hint,StoreIC,369,370,1
-block_hint,StoreIC,240,241,1
-block_hint,StoreIC,242,243,1
-block_hint,StoreIC,74,75,1
-block_hint,StoreIC,250,251,1
-block_hint,StoreIC,108,109,0
-block_hint,StoreIC,35,36,0
-block_hint,StoreIC,316,317,1
-block_hint,StoreIC,92,93,0
-block_hint,StoreIC,146,147,0
-block_hint,StoreIC,94,95,1
-block_hint,StoreIC,150,151,0
-block_hint,StoreIC,16,17,1
-block_hint,StoreIC,96,97,0
-block_hint,StoreIC,18,19,0
-block_hint,StoreIC,359,360,0
-block_hint,StoreIC,160,161,1
-block_hint,StoreIC,162,163,1
-block_hint,StoreIC,327,328,1
-block_hint,StoreIC,164,165,0
-block_hint,StoreIC,105,106,0
-block_hint,StoreIC,103,104,1
-block_hint,StoreIC,320,321,1
-block_hint,StoreIC,23,24,0
-block_hint,StoreIC,152,153,1
-block_hint,StoreIC,287,288,0
-block_hint,StoreIC,154,155,0
-block_hint,StoreIC,156,157,1
-block_hint,StoreIC,323,324,1
-block_hint,StoreIC,25,26,1
-block_hint,StoreIC,158,159,0
-block_hint,StoreIC,325,326,1
-block_hint,StoreIC,31,32,0
-block_hint,StoreIC,29,30,1
-block_hint,StoreIC,227,228,1
-block_hint,StoreIC,63,64,0
-block_hint,StoreIC,291,292,0
-block_hint,StoreIC,166,167,1
-block_hint,StoreIC,293,294,1
-block_hint,StoreIC,312,313,1
-block_hint,StoreIC,76,77,0
-block_hint,StoreIC,246,247,0
-block_hint,StoreIC,176,177,0
-block_hint,StoreIC,43,44,1
-block_hint,StoreIC,112,113,0
-block_hint,StoreIC,178,179,0
-block_hint,StoreIC,271,272,0
-block_hint,StoreIC,125,126,1
-block_hint,StoreIC,371,372,0
-block_hint,StoreIC,267,268,1
-block_hint,StoreIC,45,46,1
-block_hint,StoreIC,47,48,1
-block_hint,StoreIC,121,122,0
-block_hint,StoreIC,49,50,1
-block_hint,StoreIC,123,124,0
-block_hint,StoreIC,51,52,1
-block_hint,StoreIC,80,81,0
-block_hint,StoreIC,53,54,1
-block_hint,StoreIC,55,56,1
-block_hint,StoreIC,333,334,0
-block_hint,StoreIC,57,58,1
-block_hint,StoreIC,184,185,0
-block_hint,StoreIC,186,187,0
-block_hint,StoreIC,229,230,0
-block_hint,StoreIC,133,134,0
-block_hint,StoreIC,299,300,0
-block_hint,StoreIC,190,191,1
-block_hint,StoreIC,192,193,0
-block_hint,StoreIC,281,282,0
-block_hint,StoreIC,365,366,0
-block_hint,StoreIC,301,302,1
-block_hint,StoreIC,194,195,1
-block_hint,StoreIC,200,201,1
-block_hint,StoreIC,202,203,0
-block_hint,StoreIC,204,205,0
-block_hint,StoreIC,206,207,1
-block_hint,StoreIC,198,199,1
-block_hint,StoreIC,196,197,0
-block_hint,StoreIC,384,385,0
-block_hint,StoreIC,388,389,1
-block_hint,StoreIC,357,358,1
-block_hint,StoreIC,314,315,1
-block_hint,StoreIC,84,85,0
-block_hint,StoreIC,139,140,0
-block_hint,StoreIC,231,232,1
-block_hint,StoreICTrampoline,3,4,1
-block_hint,DefineNamedOwnIC,329,330,1
-block_hint,DefineNamedOwnIC,145,146,0
-block_hint,DefineNamedOwnIC,300,301,1
-block_hint,DefineNamedOwnIC,203,204,0
-block_hint,DefineNamedOwnIC,69,70,0
-block_hint,DefineNamedOwnIC,205,206,0
-block_hint,DefineNamedOwnIC,326,327,0
-block_hint,DefineNamedOwnIC,243,244,1
-block_hint,DefineNamedOwnIC,93,94,0
-block_hint,DefineNamedOwnIC,17,18,0
-block_hint,DefineNamedOwnIC,350,351,0
-block_hint,DefineNamedOwnIC,157,158,1
-block_hint,DefineNamedOwnIC,159,160,1
-block_hint,DefineNamedOwnIC,254,255,1
-block_hint,DefineNamedOwnIC,32,33,0
-block_hint,DefineNamedOwnIC,246,247,1
-block_hint,DefineNamedOwnIC,22,23,0
-block_hint,DefineNamedOwnIC,149,150,1
-block_hint,DefineNamedOwnIC,352,353,0
-block_hint,DefineNamedOwnIC,280,281,0
-block_hint,DefineNamedOwnIC,151,152,0
-block_hint,DefineNamedOwnIC,153,154,1
-block_hint,DefineNamedOwnIC,248,249,1
-block_hint,DefineNamedOwnIC,26,27,0
-block_hint,DefineNamedOwnIC,155,156,0
-block_hint,DefineNamedOwnIC,250,251,1
-block_hint,DefineNamedOwnIC,30,31,0
-block_hint,KeyedStoreIC,401,402,1
-block_hint,KeyedStoreIC,173,174,0
-block_hint,KeyedStoreIC,169,170,0
-block_hint,KeyedStoreIC,239,240,0
-block_hint,KeyedStoreIC,171,172,1
-block_hint,KeyedStoreIC,83,84,1
-block_hint,KeyedStoreIC,87,88,1
-block_hint,KeyedStoreIC,398,399,1
-block_hint,KeyedStoreIC,109,110,0
-block_hint,KeyedStoreIC,22,23,0
-block_hint,KeyedStoreIC,432,433,0
-block_hint,KeyedStoreIC,181,182,1
-block_hint,KeyedStoreIC,434,435,0
-block_hint,KeyedStoreIC,351,352,0
-block_hint,KeyedStoreIC,298,299,1
-block_hint,KeyedStoreIC,31,32,0
-block_hint,KeyedStoreIC,272,273,0
-block_hint,KeyedStoreIC,355,356,0
-block_hint,KeyedStoreIC,195,196,1
-block_hint,KeyedStoreIC,260,261,1
-block_hint,KeyedStoreIC,436,437,1
-block_hint,KeyedStoreIC,329,330,0
-block_hint,KeyedStoreIC,137,138,1
-block_hint,KeyedStoreIC,45,46,1
-block_hint,KeyedStoreIC,197,198,0
-block_hint,KeyedStoreIC,47,48,0
-block_hint,KeyedStoreIC,215,216,0
-block_hint,KeyedStoreIC,361,362,1
-block_hint,KeyedStoreIC,363,364,0
-block_hint,KeyedStoreIC,221,222,1
-block_hint,KeyedStoreIC,223,224,0
-block_hint,KeyedStoreIC,345,346,0
-block_hint,KeyedStoreIC,367,368,0
-block_hint,KeyedStoreIC,438,439,0
-block_hint,KeyedStoreIC,365,366,1
-block_hint,KeyedStoreIC,231,232,1
-block_hint,KeyedStoreIC,233,234,0
-block_hint,KeyedStoreIC,235,236,0
-block_hint,KeyedStoreIC,237,238,1
-block_hint,KeyedStoreIC,453,454,0
-block_hint,KeyedStoreIC,430,431,1
-block_hint,KeyedStoreIC,278,279,0
-block_hint,KeyedStoreIC,377,378,1
-block_hint,KeyedStoreIC,97,98,0
-block_hint,KeyedStoreIC,164,165,0
-block_hint,KeyedStoreICTrampoline,3,4,1
-block_hint,DefineKeyedOwnIC,392,393,1
-block_hint,DefineKeyedOwnIC,174,175,0
-block_hint,DefineKeyedOwnIC,170,171,1
-block_hint,StoreInArrayLiteralIC,30,31,1
-block_hint,StoreInArrayLiteralIC,19,20,0
-block_hint,StoreInArrayLiteralIC,23,24,0
-block_hint,StoreInArrayLiteralIC,14,15,1
-block_hint,StoreInArrayLiteralIC,16,17,1
-block_hint,StoreInArrayLiteralIC,8,9,1
-block_hint,StoreInArrayLiteralIC,4,5,1
-block_hint,LoadGlobalIC,60,61,0
-block_hint,LoadGlobalIC,14,15,1
-block_hint,LoadGlobalIC,16,17,1
-block_hint,LoadGlobalIC,18,19,1
-block_hint,LoadGlobalIC,191,192,0
-block_hint,LoadGlobalIC,12,13,0
-block_hint,LoadGlobalIC,111,112,1
-block_hint,LoadGlobalICInsideTypeof,60,61,0
-block_hint,LoadGlobalICInsideTypeof,193,194,1
-block_hint,LoadGlobalICInsideTypeof,12,13,0
-block_hint,LoadGlobalICInsideTypeof,111,112,0
-block_hint,LoadGlobalICInsideTypeof,20,21,1
-block_hint,LoadGlobalICInsideTypeof,22,23,1
-block_hint,LoadGlobalICInsideTypeof,254,255,1
-block_hint,LoadGlobalICInsideTypeof,208,209,0
-block_hint,LoadGlobalICInsideTypeof,58,59,0
-block_hint,LoadGlobalICInsideTypeof,220,221,0
-block_hint,LoadGlobalICInsideTypeof,113,114,1
-block_hint,LoadGlobalICInsideTypeof,24,25,1
-block_hint,LoadGlobalICInsideTypeof,229,230,1
-block_hint,LoadGlobalICInsideTypeof,199,200,0
-block_hint,LoadGlobalICInsideTypeof,43,44,0
-block_hint,LoadGlobalICInsideTypeof,41,42,1
-block_hint,LoadGlobalICTrampoline,3,4,1
-block_hint,LoadGlobalICInsideTypeofTrampoline,3,4,1
-block_hint,LookupGlobalICBaseline,3,4,1
-block_hint,LookupGlobalICBaseline,14,15,0
-block_hint,LookupGlobalICBaseline,5,6,1
-block_hint,LookupGlobalICBaseline,11,12,1
-block_hint,LookupGlobalICBaseline,7,8,1
-block_hint,LookupGlobalICBaseline,9,10,0
-block_hint,KeyedHasIC,261,262,1
-block_hint,KeyedHasIC,125,126,0
-block_hint,KeyedHasIC,117,118,0
-block_hint,KeyedHasIC,239,240,0
-block_hint,KeyedHasIC,165,166,0
-block_hint,KeyedHasIC,77,78,0
-block_hint,KeyedHasIC,119,120,1
-block_hint,KeyedHasIC,167,168,0
-block_hint,KeyedHasIC,123,124,1
-block_hint,KeyedHasIC,79,80,1
-block_hint,KeyedHasIC,197,198,0
-block_hint,KeyedHasIC,221,222,0
-block_hint,KeyedHasIC,283,284,0
-block_hint,KeyedHasIC,281,282,0
-block_hint,KeyedHasIC,161,162,1
-block_hint,KeyedHasIC,61,62,0
-block_hint,KeyedHasIC_Megamorphic,137,138,1
-block_hint,KeyedHasIC_Megamorphic,139,140,1
-block_hint,KeyedHasIC_Megamorphic,263,264,0
-block_hint,KeyedHasIC_Megamorphic,211,212,1
-block_hint,KeyedHasIC_Megamorphic,254,255,0
-block_hint,KeyedHasIC_Megamorphic,97,98,0
-block_hint,KeyedHasIC_Megamorphic,234,235,1
-block_hint,KeyedHasIC_Megamorphic,123,124,1
-block_hint,KeyedHasIC_Megamorphic,141,142,1
-block_hint,KeyedHasIC_Megamorphic,199,200,0
-block_hint,KeyedHasIC_Megamorphic,201,202,0
-block_hint,KeyedHasIC_Megamorphic,101,102,0
-block_hint,KeyedHasIC_Megamorphic,99,100,0
-block_hint,KeyedHasIC_Megamorphic,250,251,0
-block_hint,KeyedHasIC_Megamorphic,270,271,0
-block_hint,KeyedHasIC_Megamorphic,106,107,0
-block_hint,KeyedHasIC_Megamorphic,277,278,0
-block_hint,KeyedHasIC_Megamorphic,282,283,0
-block_hint,KeyedHasIC_Megamorphic,268,269,0
-block_hint,KeyedHasIC_Megamorphic,203,204,0
-block_hint,KeyedHasIC_Megamorphic,44,45,0
-block_hint,KeyedHasIC_Megamorphic,63,64,0
-block_hint,KeyedHasIC_Megamorphic,239,240,1
-block_hint,KeyedHasIC_Megamorphic,48,49,0
-block_hint,KeyedHasIC_Megamorphic,272,273,0
-block_hint,KeyedHasIC_Megamorphic,228,229,0
-block_hint,KeyedHasIC_Megamorphic,87,88,0
-block_hint,KeyedHasIC_Megamorphic,155,156,0
-block_hint,KeyedHasIC_Megamorphic,196,197,0
-block_hint,KeyedHasIC_Megamorphic,59,60,0
-block_hint,KeyedHasIC_Megamorphic,222,223,0
-block_hint,KeyedHasIC_Megamorphic,57,58,1
-block_hint,IterableToList,42,43,1
-block_hint,IterableToList,44,45,1
-block_hint,IterableToList,46,47,1
-block_hint,IterableToList,36,37,1
-block_hint,IterableToList,48,49,1
-block_hint,IterableToList,50,51,1
-block_hint,IterableToList,98,99,1
-block_hint,IterableToList,107,108,0
-block_hint,IterableToList,109,110,0
-block_hint,IterableToList,100,101,0
-block_hint,IterableToList,74,75,0
-block_hint,IterableToList,58,59,0
-block_hint,IterableToList,96,97,0
-block_hint,IterableToList,52,53,0
-block_hint,IterableToList,93,94,1
-block_hint,IterableToList,82,83,1
-block_hint,IterableToList,17,18,0
-block_hint,IterableToList,61,62,1
-block_hint,IterableToList,14,15,1
-block_hint,IterableToList,90,91,0
-block_hint,IterableToList,103,104,0
-block_hint,IterableToList,88,89,0
-block_hint,IterableToList,32,33,0
-block_hint,IterableToList,113,114,1
-block_hint,IterableToList,111,112,1
-block_hint,IterableToList,63,64,1
-block_hint,IterableToList,34,35,1
-block_hint,IterableToListWithSymbolLookup,39,40,0
-block_hint,IterableToListWithSymbolLookup,96,97,1
-block_hint,IterableToListWithSymbolLookup,94,95,0
-block_hint,IterableToListWithSymbolLookup,82,83,1
-block_hint,IterableToListWithSymbolLookup,55,56,1
-block_hint,IterableToListWithSymbolLookup,25,26,1
-block_hint,IterableToListWithSymbolLookup,2,3,1
-block_hint,IterableToListWithSymbolLookup,99,100,1
-block_hint,IterableToListWithSymbolLookup,92,93,0
-block_hint,IterableToListWithSymbolLookup,71,72,1
-block_hint,IterableToListWithSymbolLookup,78,79,0
-block_hint,IterableToListWithSymbolLookup,84,85,1
-block_hint,IterableToListWithSymbolLookup,57,58,1
-block_hint,IterableToListWithSymbolLookup,27,28,1
-block_hint,IterableToListWithSymbolLookup,4,5,1
-block_hint,IterableToListWithSymbolLookup,80,81,1
-block_hint,IterableToListWithSymbolLookup,62,63,0
-block_hint,IterableToListWithSymbolLookup,17,18,1
-block_hint,IterableToListMayPreserveHoles,8,9,1
-block_hint,IterableToListMayPreserveHoles,15,16,0
-block_hint,IterableToListMayPreserveHoles,20,21,1
-block_hint,IterableToListMayPreserveHoles,17,18,1
-block_hint,IterableToListMayPreserveHoles,11,12,1
-block_hint,IterableToListMayPreserveHoles,3,4,1
-block_hint,IterableToListMayPreserveHoles,13,14,1
-block_hint,IterableToListMayPreserveHoles,5,6,0
-block_hint,FindOrderedHashMapEntry,26,27,1
-block_hint,FindOrderedHashMapEntry,64,65,0
-block_hint,FindOrderedHashMapEntry,24,25,0
-block_hint,FindOrderedHashMapEntry,22,23,0
-block_hint,FindOrderedHashMapEntry,68,69,0
-block_hint,FindOrderedHashMapEntry,58,59,1
-block_hint,FindOrderedHashMapEntry,60,61,1
-block_hint,MapConstructor,328,329,1
-block_hint,MapConstructor,248,249,1
-block_hint,MapConstructor,105,106,0
-block_hint,MapConstructor,13,14,1
-block_hint,MapConstructor,270,271,1
-block_hint,MapConstructor,211,212,1
-block_hint,MapConstructor,86,87,0
-block_hint,MapConstructor,88,89,1
-block_hint,MapConstructor,272,273,1
-block_hint,MapConstructor,308,309,0
-block_hint,MapConstructor,319,320,0
-block_hint,MapConstructor,220,221,0
-block_hint,MapConstructor,109,110,0
-block_hint,MapConstructor,238,239,1
-block_hint,MapConstructor,103,104,1
-block_hint,MapPrototypeSet,98,99,1
-block_hint,MapPrototypeSet,62,63,1
-block_hint,MapPrototypeSet,64,65,1
-block_hint,MapPrototypeSet,88,89,1
-block_hint,MapPrototypeSet,90,91,0
-block_hint,MapPrototypeSet,26,27,1
-block_hint,MapPrototypeSet,94,95,0
-block_hint,MapPrototypeSet,56,57,0
-block_hint,MapPrototypeSet,24,25,0
-block_hint,MapPrototypeSet,22,23,0
-block_hint,MapPrototypeSet,31,32,1
-block_hint,MapPrototypeSet,66,67,0
-block_hint,MapPrototypeSet,47,48,0
-block_hint,MapPrototypeSet,49,50,1
-block_hint,MapPrototypeSet,51,52,1
-block_hint,MapPrototypeSet,53,54,0
-block_hint,MapPrototypeSet,17,18,1
-block_hint,MapPrototypeSet,29,30,1
-block_hint,MapPrototypeDelete,98,99,1
-block_hint,MapPrototypeDelete,77,78,1
-block_hint,MapPrototypeDelete,79,80,1
-block_hint,MapPrototypeDelete,15,16,0
-block_hint,MapPrototypeDelete,89,90,1
-block_hint,MapPrototypeDelete,63,64,0
-block_hint,MapPrototypeDelete,40,41,0
-block_hint,MapPrototypeDelete,65,66,1
-block_hint,MapPrototypeDelete,67,68,1
-block_hint,MapPrototypeDelete,19,20,1
-block_hint,MapPrototypeDelete,21,22,1
-block_hint,MapPrototypeDelete,23,24,1
-block_hint,MapPrototypeGet,12,13,1
-block_hint,MapPrototypeGet,7,8,1
-block_hint,MapPrototypeGet,9,10,1
-block_hint,MapPrototypeGet,3,4,1
-block_hint,MapPrototypeHas,10,11,1
-block_hint,MapPrototypeHas,5,6,1
-block_hint,MapPrototypeHas,7,8,1
-block_hint,MapPrototypeEntries,13,14,1
-block_hint,MapPrototypeEntries,8,9,1
-block_hint,MapPrototypeEntries,10,11,1
-block_hint,MapPrototypeEntries,4,5,1
-block_hint,MapPrototypeEntries,6,7,1
-block_hint,MapPrototypeGetSize,8,9,1
-block_hint,MapPrototypeGetSize,5,6,1
-block_hint,MapPrototypeGetSize,3,4,1
-block_hint,MapPrototypeForEach,33,34,1
-block_hint,MapPrototypeForEach,30,31,0
-block_hint,MapPrototypeForEach,27,28,1
-block_hint,MapPrototypeForEach,20,21,1
-block_hint,MapPrototypeForEach,22,23,1
-block_hint,MapPrototypeForEach,24,25,1
-block_hint,MapPrototypeForEach,12,13,1
-block_hint,MapPrototypeForEach,14,15,0
-block_hint,MapPrototypeValues,13,14,1
-block_hint,MapPrototypeValues,8,9,1
-block_hint,MapPrototypeValues,10,11,1
-block_hint,MapPrototypeValues,4,5,1
-block_hint,MapPrototypeValues,6,7,1
-block_hint,MapIteratorPrototypeNext,47,48,1
-block_hint,MapIteratorPrototypeNext,30,31,1
-block_hint,MapIteratorPrototypeNext,32,33,1
-block_hint,MapIteratorPrototypeNext,19,20,0
-block_hint,MapIteratorPrototypeNext,21,22,0
-block_hint,MapIteratorPrototypeNext,7,8,1
-block_hint,MapIteratorPrototypeNext,39,40,1
-block_hint,MapIteratorPrototypeNext,9,10,1
-block_hint,MapIteratorPrototypeNext,11,12,1
-block_hint,MapIteratorPrototypeNext,13,14,1
-block_hint,MapIteratorPrototypeNext,15,16,1
-block_hint,MapIteratorPrototypeNext,17,18,1
-block_hint,MapIteratorPrototypeNext,25,26,1
-block_hint,SameValueNumbersOnly,4,5,1
-block_hint,Add_Baseline,39,40,0
-block_hint,Add_Baseline,25,26,0
-block_hint,Add_Baseline,9,10,1
-block_hint,Add_Baseline,84,85,0
-block_hint,Add_Baseline,46,47,1
-block_hint,Add_Baseline,56,57,0
-block_hint,Add_Baseline,20,21,1
-block_hint,Add_Baseline,64,65,1
-block_hint,Add_Baseline,23,24,1
-block_hint,Add_Baseline,31,32,1
-block_hint,Add_Baseline,11,12,1
-block_hint,AddSmi_Baseline,39,40,0
-block_hint,AddSmi_Baseline,25,26,0
-block_hint,AddSmi_Baseline,9,10,1
-block_hint,AddSmi_Baseline,60,61,1
-block_hint,AddSmi_Baseline,31,32,1
-block_hint,AddSmi_Baseline,11,12,1
-block_hint,Subtract_Baseline,31,32,0
-block_hint,Subtract_Baseline,11,12,1
-block_hint,Subtract_Baseline,60,61,1
-block_hint,Subtract_Baseline,82,83,1
-block_hint,Subtract_Baseline,76,77,0
-block_hint,Subtract_Baseline,53,54,0
-block_hint,Subtract_Baseline,62,63,1
-block_hint,Subtract_Baseline,23,24,1
-block_hint,Subtract_Baseline,33,34,1
-block_hint,Subtract_Baseline,13,14,1
-block_hint,SubtractSmi_Baseline,31,32,0
-block_hint,SubtractSmi_Baseline,11,12,1
-block_hint,SubtractSmi_Baseline,51,52,1
-block_hint,SubtractSmi_Baseline,33,34,1
-block_hint,SubtractSmi_Baseline,13,14,1
-block_hint,Multiply_Baseline,100,101,0
-block_hint,Multiply_Baseline,61,62,0
-block_hint,Multiply_Baseline,77,78,0
-block_hint,Multiply_Baseline,87,88,1
-block_hint,Multiply_Baseline,79,80,1
-block_hint,Multiply_Baseline,13,14,1
-block_hint,Multiply_Baseline,63,64,1
-block_hint,Multiply_Baseline,93,94,1
-block_hint,Multiply_Baseline,65,66,1
-block_hint,Multiply_Baseline,34,35,1
-block_hint,Multiply_Baseline,15,16,1
-block_hint,MultiplySmi_Baseline,92,93,0
-block_hint,MultiplySmi_Baseline,61,62,0
-block_hint,MultiplySmi_Baseline,71,72,0
-block_hint,MultiplySmi_Baseline,73,74,1
-block_hint,MultiplySmi_Baseline,32,33,0
-block_hint,MultiplySmi_Baseline,13,14,1
-block_hint,MultiplySmi_Baseline,51,52,1
-block_hint,MultiplySmi_Baseline,34,35,1
-block_hint,MultiplySmi_Baseline,15,16,1
-block_hint,Divide_Baseline,89,90,0
-block_hint,Divide_Baseline,91,92,0
-block_hint,Divide_Baseline,69,70,0
-block_hint,Divide_Baseline,47,48,1
-block_hint,Divide_Baseline,14,15,1
-block_hint,Divide_Baseline,73,74,1
-block_hint,Divide_Baseline,97,98,1
-block_hint,Divide_Baseline,75,76,1
-block_hint,Divide_Baseline,55,56,0
-block_hint,Divide_Baseline,28,29,1
-block_hint,Divide_Baseline,40,41,1
-block_hint,Divide_Baseline,16,17,1
-block_hint,DivideSmi_Baseline,83,84,0
-block_hint,DivideSmi_Baseline,99,100,0
-block_hint,DivideSmi_Baseline,85,86,0
-block_hint,DivideSmi_Baseline,69,70,0
-block_hint,DivideSmi_Baseline,47,48,1
-block_hint,DivideSmi_Baseline,14,15,1
-block_hint,DivideSmi_Baseline,57,58,1
-block_hint,DivideSmi_Baseline,40,41,1
-block_hint,DivideSmi_Baseline,16,17,1
-block_hint,Modulus_Baseline,108,109,0
-block_hint,Modulus_Baseline,94,95,0
-block_hint,Modulus_Baseline,71,72,1
-block_hint,Modulus_Baseline,66,67,1
-block_hint,Modulus_Baseline,37,38,0
-block_hint,Modulus_Baseline,14,15,1
-block_hint,ModulusSmi_Baseline,71,72,1
-block_hint,ModulusSmi_Baseline,66,67,1
-block_hint,ModulusSmi_Baseline,37,38,0
-block_hint,ModulusSmi_Baseline,14,15,1
-block_hint,ModulusSmi_Baseline,54,55,1
-block_hint,ModulusSmi_Baseline,39,40,1
-block_hint,ModulusSmi_Baseline,16,17,1
-block_hint,BitwiseAnd_Baseline,35,36,0
-block_hint,BitwiseAnd_Baseline,23,24,1
-block_hint,BitwiseAnd_Baseline,8,9,0
-block_hint,BitwiseAnd_Baseline,33,34,0
-block_hint,BitwiseAnd_Baseline,27,28,1
-block_hint,BitwiseAnd_Baseline,12,13,0
-block_hint,BitwiseAnd_Baseline,50,51,1
-block_hint,BitwiseAnd_Baseline,14,15,1
-block_hint,BitwiseAndSmi_Baseline,18,19,0
-block_hint,BitwiseAndSmi_Baseline,16,17,1
-block_hint,BitwiseAndSmi_Baseline,7,8,0
-block_hint,BitwiseAndSmi_Baseline,26,27,0
-block_hint,BitwiseAndSmi_Baseline,20,21,0
-block_hint,BitwiseAndSmi_Baseline,9,10,1
-block_hint,BitwiseOr_Baseline,35,36,0
-block_hint,BitwiseOr_Baseline,23,24,1
-block_hint,BitwiseOr_Baseline,8,9,1
-block_hint,BitwiseOr_Baseline,48,49,1
-block_hint,BitwiseOr_Baseline,50,51,1
-block_hint,BitwiseOr_Baseline,14,15,1
-block_hint,BitwiseOrSmi_Baseline,5,6,0
-block_hint,BitwiseOrSmi_Baseline,18,19,0
-block_hint,BitwiseOrSmi_Baseline,16,17,0
-block_hint,BitwiseOrSmi_Baseline,28,29,1
-block_hint,BitwiseOrSmi_Baseline,9,10,1
-block_hint,BitwiseXor_Baseline,25,26,1
-block_hint,BitwiseXor_Baseline,35,36,0
-block_hint,BitwiseXor_Baseline,23,24,1
-block_hint,BitwiseXor_Baseline,48,49,1
-block_hint,BitwiseXor_Baseline,33,34,0
-block_hint,BitwiseXor_Baseline,27,28,1
-block_hint,BitwiseXor_Baseline,50,51,1
-block_hint,BitwiseXor_Baseline,14,15,1
-block_hint,BitwiseXorSmi_Baseline,18,19,0
-block_hint,BitwiseXorSmi_Baseline,16,17,1
-block_hint,BitwiseXorSmi_Baseline,7,8,1
-block_hint,BitwiseXorSmi_Baseline,9,10,1
-block_hint,ShiftLeft_Baseline,25,26,1
-block_hint,ShiftLeft_Baseline,10,11,0
-block_hint,ShiftLeft_Baseline,50,51,1
-block_hint,ShiftLeft_Baseline,14,15,1
-block_hint,ShiftLeftSmi_Baseline,35,36,1
-block_hint,ShiftLeftSmi_Baseline,25,26,1
-block_hint,ShiftLeftSmi_Baseline,37,38,1
-block_hint,ShiftLeftSmi_Baseline,9,10,1
-block_hint,ShiftRight_Baseline,6,7,0
-block_hint,ShiftRight_Baseline,10,11,0
-block_hint,ShiftRight_Baseline,46,47,0
-block_hint,ShiftRight_Baseline,29,30,0
-block_hint,ShiftRight_Baseline,14,15,1
-block_hint,ShiftRightSmi_Baseline,22,23,1
-block_hint,ShiftRightSmi_Baseline,26,27,0
-block_hint,ShiftRightSmi_Baseline,20,21,0
-block_hint,ShiftRightSmi_Baseline,9,10,1
-block_hint,ShiftRightLogical_Baseline,25,26,1
-block_hint,ShiftRightLogical_Baseline,10,11,0
-block_hint,ShiftRightLogical_Baseline,46,47,0
-block_hint,ShiftRightLogical_Baseline,29,30,0
-block_hint,ShiftRightLogical_Baseline,14,15,1
-block_hint,ShiftRightLogicalSmi_Baseline,35,36,1
-block_hint,ShiftRightLogicalSmi_Baseline,25,26,1
-block_hint,ShiftRightLogicalSmi_Baseline,33,34,0
-block_hint,ShiftRightLogicalSmi_Baseline,23,24,0
-block_hint,ShiftRightLogicalSmi_Baseline,9,10,1
-block_hint,Add_WithFeedback,60,61,1
-block_hint,Add_WithFeedback,86,87,0
-block_hint,Add_WithFeedback,84,85,0
-block_hint,Add_WithFeedback,54,55,1
-block_hint,Add_WithFeedback,46,47,1
-block_hint,Add_WithFeedback,33,34,0
-block_hint,Add_WithFeedback,23,24,1
-block_hint,Subtract_WithFeedback,74,75,1
-block_hint,Subtract_WithFeedback,82,83,0
-block_hint,Subtract_WithFeedback,76,77,0
-block_hint,Subtract_WithFeedback,53,54,0
-block_hint,Subtract_WithFeedback,23,24,1
-block_hint,Modulus_WithFeedback,108,109,0
-block_hint,Modulus_WithFeedback,94,95,0
-block_hint,Modulus_WithFeedback,71,72,1
-block_hint,Modulus_WithFeedback,66,67,1
-block_hint,Modulus_WithFeedback,37,38,0
-block_hint,Modulus_WithFeedback,14,15,1
-block_hint,BitwiseOr_WithFeedback,6,7,1
-block_hint,BitwiseOr_WithFeedback,35,36,0
-block_hint,BitwiseOr_WithFeedback,23,24,0
-block_hint,BitwiseOr_WithFeedback,10,11,0
-block_hint,BitwiseOr_WithFeedback,46,47,0
-block_hint,BitwiseOr_WithFeedback,29,30,0
-block_hint,BitwiseOr_WithFeedback,14,15,1
-block_hint,Equal_Baseline,48,49,0
-block_hint,Equal_Baseline,18,19,1
-block_hint,Equal_Baseline,101,102,0
-block_hint,Equal_Baseline,14,15,1
-block_hint,Equal_Baseline,39,40,0
-block_hint,Equal_Baseline,26,27,0
-block_hint,Equal_Baseline,28,29,1
-block_hint,Equal_Baseline,45,46,0
-block_hint,Equal_Baseline,32,33,0
-block_hint,Equal_Baseline,24,25,1
-block_hint,Equal_Baseline,77,78,0
-block_hint,Equal_Baseline,75,76,0
-block_hint,Equal_Baseline,83,84,0
-block_hint,Equal_Baseline,85,86,0
-block_hint,Equal_Baseline,59,60,0
-block_hint,Equal_Baseline,65,66,0
-block_hint,Equal_Baseline,69,70,1
-block_hint,Equal_Baseline,98,99,0
-block_hint,Equal_Baseline,71,72,1
-block_hint,Equal_Baseline,6,7,1
-block_hint,StrictEqual_Baseline,37,38,0
-block_hint,StrictEqual_Baseline,76,77,0
-block_hint,StrictEqual_Baseline,47,48,1
-block_hint,StrictEqual_Baseline,60,61,0
-block_hint,StrictEqual_Baseline,51,52,0
-block_hint,StrictEqual_Baseline,53,54,1
-block_hint,StrictEqual_Baseline,35,36,1
-block_hint,StrictEqual_Baseline,33,34,0
-block_hint,StrictEqual_Baseline,55,56,0
-block_hint,StrictEqual_Baseline,29,30,1
-block_hint,StrictEqual_Baseline,31,32,1
-block_hint,StrictEqual_Baseline,49,50,1
-block_hint,StrictEqual_Baseline,41,42,0
-block_hint,StrictEqual_Baseline,45,46,0
-block_hint,StrictEqual_Baseline,66,67,0
-block_hint,StrictEqual_Baseline,13,14,0
-block_hint,StrictEqual_Baseline,43,44,0
-block_hint,StrictEqual_Baseline,3,4,1
-block_hint,LessThan_Baseline,44,45,0
-block_hint,LessThan_Baseline,23,24,1
-block_hint,LessThan_Baseline,25,26,1
-block_hint,LessThan_Baseline,10,11,0
-block_hint,LessThan_Baseline,56,57,0
-block_hint,LessThan_Baseline,54,55,0
-block_hint,LessThan_Baseline,18,19,1
-block_hint,LessThan_Baseline,31,32,0
-block_hint,LessThan_Baseline,16,17,1
-block_hint,LessThan_Baseline,12,13,0
-block_hint,LessThan_Baseline,5,6,1
-block_hint,GreaterThan_Baseline,44,45,0
-block_hint,GreaterThan_Baseline,23,24,1
-block_hint,GreaterThan_Baseline,10,11,0
-block_hint,GreaterThan_Baseline,48,49,1
-block_hint,GreaterThan_Baseline,56,57,0
-block_hint,GreaterThan_Baseline,58,59,0
-block_hint,GreaterThan_Baseline,54,55,1
-block_hint,GreaterThan_Baseline,50,51,1
-block_hint,GreaterThan_Baseline,18,19,0
-block_hint,GreaterThan_Baseline,12,13,0
-block_hint,GreaterThan_Baseline,5,6,1
-block_hint,LessThanOrEqual_Baseline,44,45,0
-block_hint,LessThanOrEqual_Baseline,23,24,1
-block_hint,LessThanOrEqual_Baseline,25,26,1
-block_hint,LessThanOrEqual_Baseline,56,57,0
-block_hint,LessThanOrEqual_Baseline,58,59,1
-block_hint,LessThanOrEqual_Baseline,37,38,1
-block_hint,LessThanOrEqual_Baseline,27,28,1
-block_hint,LessThanOrEqual_Baseline,5,6,1
-block_hint,GreaterThanOrEqual_Baseline,44,45,0
-block_hint,GreaterThanOrEqual_Baseline,23,24,1
-block_hint,GreaterThanOrEqual_Baseline,25,26,1
-block_hint,GreaterThanOrEqual_Baseline,56,57,0
-block_hint,GreaterThanOrEqual_Baseline,54,55,0
-block_hint,GreaterThanOrEqual_Baseline,18,19,1
-block_hint,GreaterThanOrEqual_Baseline,31,32,0
-block_hint,GreaterThanOrEqual_Baseline,16,17,1
-block_hint,GreaterThanOrEqual_Baseline,27,28,1
-block_hint,GreaterThanOrEqual_Baseline,5,6,1
-block_hint,Equal_WithFeedback,103,104,0
-block_hint,Equal_WithFeedback,81,82,1
-block_hint,Equal_WithFeedback,37,38,0
-block_hint,Equal_WithFeedback,48,49,0
-block_hint,Equal_WithFeedback,18,19,1
-block_hint,Equal_WithFeedback,95,96,0
-block_hint,Equal_WithFeedback,101,102,0
-block_hint,Equal_WithFeedback,20,21,0
-block_hint,Equal_WithFeedback,39,40,0
-block_hint,Equal_WithFeedback,26,27,0
-block_hint,Equal_WithFeedback,28,29,1
-block_hint,Equal_WithFeedback,45,46,0
-block_hint,Equal_WithFeedback,32,33,0
-block_hint,Equal_WithFeedback,75,76,0
-block_hint,Equal_WithFeedback,83,84,0
-block_hint,Equal_WithFeedback,85,86,0
-block_hint,Equal_WithFeedback,87,88,0
-block_hint,Equal_WithFeedback,79,80,0
-block_hint,Equal_WithFeedback,89,90,0
-block_hint,Equal_WithFeedback,117,118,0
-block_hint,Equal_WithFeedback,109,110,0
-block_hint,Equal_WithFeedback,107,108,0
-block_hint,Equal_WithFeedback,67,68,0
-block_hint,Equal_WithFeedback,105,106,0
-block_hint,Equal_WithFeedback,65,66,0
-block_hint,Equal_WithFeedback,6,7,1
-block_hint,StrictEqual_WithFeedback,74,75,1
-block_hint,StrictEqual_WithFeedback,37,38,0
-block_hint,StrictEqual_WithFeedback,72,73,0
-block_hint,StrictEqual_WithFeedback,47,48,1
-block_hint,StrictEqual_WithFeedback,60,61,0
-block_hint,StrictEqual_WithFeedback,53,54,1
-block_hint,StrictEqual_WithFeedback,35,36,1
-block_hint,StrictEqual_WithFeedback,57,58,1
-block_hint,StrictEqual_WithFeedback,55,56,0
-block_hint,StrictEqual_WithFeedback,31,32,1
-block_hint,StrictEqual_WithFeedback,41,42,0
-block_hint,StrictEqual_WithFeedback,70,71,1
-block_hint,StrictEqual_WithFeedback,45,46,0
-block_hint,StrictEqual_WithFeedback,21,22,1
-block_hint,StrictEqual_WithFeedback,66,67,0
-block_hint,StrictEqual_WithFeedback,15,16,0
-block_hint,StrictEqual_WithFeedback,13,14,0
-block_hint,StrictEqual_WithFeedback,43,44,0
-block_hint,StrictEqual_WithFeedback,3,4,1
-block_hint,LessThan_WithFeedback,44,45,1
-block_hint,LessThan_WithFeedback,23,24,1
-block_hint,LessThan_WithFeedback,46,47,1
-block_hint,LessThan_WithFeedback,48,49,1
-block_hint,LessThan_WithFeedback,56,57,0
-block_hint,LessThan_WithFeedback,54,55,0
-block_hint,LessThan_WithFeedback,18,19,1
-block_hint,LessThan_WithFeedback,31,32,0
-block_hint,LessThan_WithFeedback,16,17,1
-block_hint,LessThan_WithFeedback,12,13,0
-block_hint,LessThan_WithFeedback,39,40,1
-block_hint,LessThan_WithFeedback,5,6,1
-block_hint,GreaterThan_WithFeedback,60,61,1
-block_hint,GreaterThan_WithFeedback,23,24,1
-block_hint,GreaterThan_WithFeedback,25,26,1
-block_hint,GreaterThan_WithFeedback,48,49,1
-block_hint,GreaterThan_WithFeedback,56,57,0
-block_hint,GreaterThan_WithFeedback,58,59,0
-block_hint,GreaterThan_WithFeedback,54,55,1
-block_hint,GreaterThan_WithFeedback,50,51,1
-block_hint,GreaterThan_WithFeedback,18,19,0
-block_hint,GreaterThan_WithFeedback,12,13,0
-block_hint,GreaterThan_WithFeedback,5,6,1
-block_hint,GreaterThanOrEqual_WithFeedback,60,61,1
-block_hint,GreaterThanOrEqual_WithFeedback,46,47,1
-block_hint,GreaterThanOrEqual_WithFeedback,48,49,0
-block_hint,GreaterThanOrEqual_WithFeedback,56,57,0
-block_hint,GreaterThanOrEqual_WithFeedback,54,55,0
-block_hint,GreaterThanOrEqual_WithFeedback,18,19,1
-block_hint,GreaterThanOrEqual_WithFeedback,31,32,0
-block_hint,GreaterThanOrEqual_WithFeedback,16,17,1
-block_hint,GreaterThanOrEqual_WithFeedback,5,6,1
-block_hint,BitwiseNot_Baseline,19,20,0
-block_hint,BitwiseNot_Baseline,15,16,1
-block_hint,BitwiseNot_Baseline,7,8,1
-block_hint,BitwiseNot_Baseline,27,28,1
-block_hint,BitwiseNot_Baseline,9,10,1
-block_hint,Decrement_Baseline,19,20,0
-block_hint,Decrement_Baseline,13,14,0
-block_hint,Decrement_Baseline,5,6,1
-block_hint,Increment_Baseline,19,20,0
-block_hint,Increment_Baseline,17,18,1
-block_hint,Increment_Baseline,13,14,0
-block_hint,Increment_Baseline,15,16,1
-block_hint,Increment_Baseline,5,6,1
-block_hint,Negate_Baseline,20,21,1
-block_hint,Negate_Baseline,14,15,0
-block_hint,Negate_Baseline,18,19,1
-block_hint,Negate_Baseline,5,6,1
-block_hint,ObjectAssign,21,22,1
-block_hint,ObjectAssign,18,19,0
-block_hint,ObjectAssign,15,16,1
-block_hint,ObjectAssign,12,13,1
-block_hint,ObjectAssign,9,10,0
-block_hint,ObjectAssign,5,6,0
-block_hint,ObjectCreate,78,79,1
-block_hint,ObjectCreate,75,76,0
-block_hint,ObjectCreate,33,34,1
-block_hint,ObjectCreate,35,36,1
-block_hint,ObjectCreate,37,38,1
-block_hint,ObjectCreate,39,40,0
-block_hint,ObjectCreate,41,42,1
-block_hint,ObjectCreate,43,44,0
-block_hint,ObjectCreate,45,46,1
-block_hint,ObjectCreate,17,18,1
-block_hint,ObjectCreate,69,70,0
-block_hint,ObjectCreate,55,56,0
-block_hint,ObjectCreate,59,60,1
-block_hint,ObjectCreate,47,48,0
-block_hint,ObjectCreate,49,50,0
-block_hint,ObjectCreate,5,6,1
-block_hint,ObjectCreate,52,53,1
-block_hint,ObjectCreate,7,8,1
-block_hint,ObjectCreate,9,10,1
-block_hint,ObjectCreate,11,12,1
-block_hint,ObjectCreate,13,14,1
-block_hint,ObjectCreate,15,16,1
-block_hint,ObjectCreate,20,21,0
-block_hint,ObjectCreate,61,62,1
-block_hint,ObjectGetOwnPropertyDescriptor,519,520,1
-block_hint,ObjectGetOwnPropertyDescriptor,516,517,0
-block_hint,ObjectGetOwnPropertyDescriptor,513,514,0
-block_hint,ObjectGetOwnPropertyDescriptor,505,506,1
-block_hint,ObjectGetOwnPropertyDescriptor,492,493,1
-block_hint,ObjectGetOwnPropertyDescriptor,408,409,0
-block_hint,ObjectGetOwnPropertyDescriptor,470,471,1
-block_hint,ObjectGetOwnPropertyDescriptor,488,489,0
-block_hint,ObjectGetOwnPropertyDescriptor,434,435,0
-block_hint,ObjectGetOwnPropertyDescriptor,467,468,1
-block_hint,ObjectGetOwnPropertyDescriptor,410,411,1
-block_hint,ObjectGetOwnPropertyDescriptor,462,463,0
-block_hint,ObjectGetOwnPropertyDescriptor,464,465,0
-block_hint,ObjectGetOwnPropertyDescriptor,436,437,0
-block_hint,ObjectGetOwnPropertyDescriptor,406,407,0
-block_hint,ObjectGetOwnPropertyDescriptor,331,332,0
-block_hint,ObjectGetOwnPropertyDescriptor,197,198,1
-block_hint,ObjectGetOwnPropertyDescriptor,307,308,1
-block_hint,ObjectGetOwnPropertyDescriptor,138,139,0
-block_hint,ObjectGetOwnPropertyDescriptor,499,500,0
-block_hint,ObjectGetOwnPropertyDescriptor,507,508,1
-block_hint,ObjectGetOwnPropertyDescriptor,495,496,0
-block_hint,ObjectGetOwnPropertyDescriptor,426,427,0
-block_hint,ObjectGetOwnPropertyDescriptor,329,330,0
-block_hint,ObjectGetOwnPropertyDescriptor,31,32,1
-block_hint,ObjectGetOwnPropertyDescriptor,361,362,1
-block_hint,ObjectGetOwnPropertyDescriptor,150,151,0
-block_hint,ObjectGetOwnPropertyDescriptor,474,475,0
-block_hint,ObjectGetOwnPropertyDescriptor,390,391,0
-block_hint,ObjectGetOwnPropertyDescriptor,264,265,0
-block_hint,ObjectGetOwnPropertyDescriptor,260,261,0
-block_hint,ObjectGetOwnPropertyDescriptor,282,283,0
-block_hint,ObjectGetOwnPropertyDescriptor,284,285,1
-block_hint,ObjectGetOwnPropertyDescriptor,36,37,1
-block_hint,ObjectGetOwnPropertyDescriptor,365,366,1
-block_hint,ObjectGetOwnPropertyDescriptor,186,187,0
-block_hint,ObjectGetOwnPropertyDescriptor,268,269,1
-block_hint,ObjectKeys,32,33,1
-block_hint,ObjectKeys,27,28,1
-block_hint,ObjectKeys,23,24,1
-block_hint,ObjectKeys,25,26,0
-block_hint,ObjectKeys,17,18,0
-block_hint,ObjectKeys,5,6,1
-block_hint,ObjectKeys,21,22,1
-block_hint,ObjectKeys,9,10,0
-block_hint,ObjectKeys,7,8,1
-block_hint,ObjectKeys,14,15,1
-block_hint,ObjectPrototypeHasOwnProperty,230,231,1
-block_hint,ObjectPrototypeHasOwnProperty,205,206,1
-block_hint,ObjectPrototypeHasOwnProperty,222,223,1
-block_hint,ObjectPrototypeHasOwnProperty,241,242,0
-block_hint,ObjectPrototypeHasOwnProperty,219,220,0
-block_hint,ObjectPrototypeHasOwnProperty,209,210,1
-block_hint,ObjectPrototypeHasOwnProperty,163,164,1
-block_hint,ObjectPrototypeHasOwnProperty,235,236,0
-block_hint,ObjectPrototypeHasOwnProperty,237,238,0
-block_hint,ObjectPrototypeHasOwnProperty,233,234,0
-block_hint,ObjectPrototypeHasOwnProperty,228,229,0
-block_hint,ObjectPrototypeHasOwnProperty,192,193,1
-block_hint,ObjectPrototypeHasOwnProperty,137,138,0
-block_hint,ObjectPrototypeHasOwnProperty,211,212,0
-block_hint,ObjectPrototypeHasOwnProperty,175,176,1
-block_hint,ObjectPrototypeHasOwnProperty,141,142,0
-block_hint,ObjectPrototypeHasOwnProperty,226,227,0
-block_hint,ObjectPrototypeHasOwnProperty,76,77,0
-block_hint,ObjectPrototypeHasOwnProperty,203,204,0
-block_hint,ObjectPrototypeHasOwnProperty,34,35,1
-block_hint,ObjectPrototypeHasOwnProperty,52,53,0
-block_hint,ObjectPrototypeHasOwnProperty,36,37,0
-block_hint,ObjectPrototypeHasOwnProperty,197,198,1
-block_hint,ObjectPrototypeHasOwnProperty,40,41,0
-block_hint,ObjectPrototypeHasOwnProperty,171,172,0
-block_hint,ObjectPrototypeHasOwnProperty,178,179,1
-block_hint,ObjectPrototypeHasOwnProperty,58,59,0
-block_hint,ObjectToString,45,46,0
-block_hint,ObjectToString,60,61,0
-block_hint,ObjectToString,68,69,0
-block_hint,ObjectToString,55,56,0
-block_hint,ObjectToString,7,8,1
-block_hint,ObjectToString,5,6,1
-block_hint,ObjectToString,11,12,1
-block_hint,ObjectToString,20,21,0
-block_hint,InstanceOf_WithFeedback,50,51,1
-block_hint,InstanceOf_WithFeedback,52,53,0
-block_hint,InstanceOf_WithFeedback,54,55,1
-block_hint,InstanceOf_WithFeedback,32,33,1
-block_hint,InstanceOf_WithFeedback,34,35,1
-block_hint,InstanceOf_WithFeedback,5,6,1
-block_hint,InstanceOf_WithFeedback,14,15,1
-block_hint,InstanceOf_Baseline,50,51,1
-block_hint,InstanceOf_Baseline,54,55,1
-block_hint,InstanceOf_Baseline,32,33,1
-block_hint,InstanceOf_Baseline,34,35,1
-block_hint,InstanceOf_Baseline,5,6,1
-block_hint,InstanceOf_Baseline,14,15,1
-block_hint,ForInEnumerate,34,35,1
-block_hint,ForInEnumerate,36,37,0
-block_hint,ForInEnumerate,30,31,0
-block_hint,ForInEnumerate,22,23,1
-block_hint,ForInEnumerate,32,33,1
-block_hint,ForInEnumerate,5,6,1
-block_hint,ForInEnumerate,38,39,1
-block_hint,ForInEnumerate,9,10,1
-block_hint,ForInPrepare,7,8,1
-block_hint,ForInPrepare,12,13,1
-block_hint,ForInPrepare,5,6,1
-block_hint,ForInFilter,234,235,1
-block_hint,ForInFilter,236,237,1
-block_hint,ForInFilter,227,228,0
-block_hint,ForInFilter,117,118,1
-block_hint,ForInFilter,217,218,0
-block_hint,ForInFilter,62,63,0
-block_hint,ForInFilter,129,130,1
-block_hint,ForInFilter,221,222,1
-block_hint,ForInFilter,103,104,0
-block_hint,ForInFilter,105,106,0
-block_hint,ForInFilter,66,67,0
-block_hint,ForInFilter,64,65,0
-block_hint,ForInFilter,270,271,0
-block_hint,ForInFilter,225,226,1
-block_hint,ForInFilter,109,110,1
-block_hint,ForInFilter,71,72,0
-block_hint,ForInFilter,266,267,0
-block_hint,ForInFilter,264,265,0
-block_hint,ForInFilter,251,252,0
-block_hint,ForInFilter,107,108,1
-block_hint,ForInFilter,40,41,1
-block_hint,ForInFilter,201,202,0
-block_hint,ForInFilter,42,43,0
-block_hint,ForInFilter,144,145,1
-block_hint,ForInFilter,46,47,0
-block_hint,ForInFilter,113,114,0
-block_hint,ForInFilter,131,132,0
-block_hint,ForInFilter,36,37,0
-block_hint,ForInFilter,248,249,0
-block_hint,ForInFilter,255,256,1
-block_hint,ForInFilter,189,190,0
-block_hint,ForInFilter,33,34,1
-block_hint,RegExpConstructor,55,56,1
-block_hint,RegExpConstructor,7,8,1
-block_hint,RegExpConstructor,131,132,1
-block_hint,RegExpConstructor,133,134,1
-block_hint,RegExpConstructor,70,71,0
-block_hint,RegExpConstructor,106,107,1
-block_hint,RegExpConstructor,127,128,0
-block_hint,RegExpConstructor,108,109,0
-block_hint,RegExpConstructor,82,83,1
-block_hint,RegExpConstructor,67,68,1
-block_hint,RegExpConstructor,40,41,0
-block_hint,RegExpConstructor,76,77,0
-block_hint,RegExpConstructor,104,105,1
-block_hint,RegExpConstructor,86,87,1
-block_hint,RegExpConstructor,78,79,1
-block_hint,RegExpConstructor,63,64,1
-block_hint,RegExpExecInternal,20,21,0
-block_hint,RegExpExecInternal,22,23,0
-block_hint,RegExpExecInternal,36,37,0
-block_hint,RegExpExecInternal,12,13,0
-block_hint,RegExpExecInternal,48,49,0
-block_hint,RegExpExecInternal,51,52,1
-block_hint,RegExpExecInternal,40,41,1
-block_hint,RegExpExecInternal,44,45,0
-block_hint,RegExpExecInternal,24,25,1
-block_hint,FindOrderedHashSetEntry,26,27,1
-block_hint,FindOrderedHashSetEntry,34,35,0
-block_hint,FindOrderedHashSetEntry,24,25,0
-block_hint,FindOrderedHashSetEntry,22,23,0
-block_hint,FindOrderedHashSetEntry,42,43,1
-block_hint,FindOrderedHashSetEntry,68,69,0
-block_hint,FindOrderedHashSetEntry,58,59,1
-block_hint,FindOrderedHashSetEntry,60,61,1
-block_hint,SetConstructor,202,203,1
-block_hint,SetConstructor,74,75,0
-block_hint,SetConstructor,11,12,1
-block_hint,SetConstructor,172,173,1
-block_hint,SetConstructor,135,136,1
-block_hint,SetConstructor,56,57,0
-block_hint,SetConstructor,58,59,1
-block_hint,SetConstructor,218,219,1
-block_hint,SetConstructor,210,211,0
-block_hint,SetConstructor,79,80,1
-block_hint,SetConstructor,23,24,1
-block_hint,SetConstructor,222,223,1
-block_hint,SetConstructor,214,215,0
-block_hint,SetConstructor,150,151,1
-block_hint,SetConstructor,25,26,1
-block_hint,SetConstructor,178,179,1
-block_hint,SetConstructor,143,144,1
-block_hint,SetConstructor,83,84,1
-block_hint,SetConstructor,85,86,1
-block_hint,SetConstructor,87,88,1
-block_hint,SetConstructor,89,90,1
-block_hint,SetConstructor,91,92,1
-block_hint,SetConstructor,93,94,1
-block_hint,SetConstructor,34,35,1
-block_hint,SetConstructor,95,96,1
-block_hint,SetConstructor,146,147,1
-block_hint,SetConstructor,152,153,1
-block_hint,SetConstructor,190,191,0
-block_hint,SetConstructor,183,184,0
-block_hint,SetConstructor,154,155,0
-block_hint,SetConstructor,105,106,0
-block_hint,SetConstructor,137,138,1
-block_hint,SetConstructor,27,28,1
-block_hint,SetConstructor,62,63,1
-block_hint,SetConstructor,176,177,0
-block_hint,SetConstructor,66,67,1
-block_hint,SetPrototypeHas,10,11,1
-block_hint,SetPrototypeHas,5,6,1
-block_hint,SetPrototypeHas,7,8,1
-block_hint,SetPrototypeAdd,98,99,1
-block_hint,SetPrototypeAdd,62,63,1
-block_hint,SetPrototypeAdd,64,65,1
-block_hint,SetPrototypeAdd,88,89,1
-block_hint,SetPrototypeAdd,90,91,0
-block_hint,SetPrototypeAdd,27,28,1
-block_hint,SetPrototypeAdd,79,80,0
-block_hint,SetPrototypeAdd,25,26,0
-block_hint,SetPrototypeAdd,23,24,0
-block_hint,SetPrototypeAdd,35,36,1
-block_hint,SetPrototypeAdd,66,67,0
-block_hint,SetPrototypeAdd,51,52,1
-block_hint,SetPrototypeAdd,53,54,1
-block_hint,SetPrototypeAdd,33,34,1
-block_hint,SetPrototypeDelete,96,97,1
-block_hint,SetPrototypeDelete,75,76,1
-block_hint,SetPrototypeDelete,77,78,1
-block_hint,SetPrototypeDelete,15,16,0
-block_hint,SetPrototypeDelete,32,33,1
-block_hint,SetPrototypeDelete,87,88,0
-block_hint,SetPrototypeDelete,30,31,0
-block_hint,SetPrototypeDelete,28,29,0
-block_hint,SetPrototypeDelete,45,46,1
-block_hint,SetPrototypeDelete,83,84,0
-block_hint,SetPrototypeDelete,79,80,0
-block_hint,SetPrototypeDelete,19,20,1
-block_hint,SetPrototypeDelete,21,22,1
-block_hint,SetPrototypeGetSize,8,9,1
-block_hint,SetPrototypeGetSize,5,6,1
-block_hint,SetPrototypeGetSize,3,4,1
-block_hint,SetPrototypeValues,13,14,1
-block_hint,SetPrototypeValues,8,9,1
-block_hint,SetPrototypeValues,10,11,1
-block_hint,SetPrototypeValues,4,5,1
-block_hint,SetPrototypeValues,6,7,1
-block_hint,SetIteratorPrototypeNext,41,42,1
-block_hint,SetIteratorPrototypeNext,28,29,1
-block_hint,SetIteratorPrototypeNext,39,40,1
-block_hint,SetIteratorPrototypeNext,17,18,0
-block_hint,SetIteratorPrototypeNext,19,20,0
-block_hint,SetIteratorPrototypeNext,37,38,1
-block_hint,SetIteratorPrototypeNext,15,16,1
-block_hint,SetIteratorPrototypeNext,23,24,1
-block_hint,SetOrSetIteratorToList,33,34,1
-block_hint,SetOrSetIteratorToList,8,9,1
-block_hint,SetOrSetIteratorToList,31,32,1
-block_hint,SetOrSetIteratorToList,47,48,1
-block_hint,SetOrSetIteratorToList,43,44,1
-block_hint,SetOrSetIteratorToList,14,15,0
-block_hint,SetOrSetIteratorToList,19,20,0
-block_hint,SetOrSetIteratorToList,24,25,1
-block_hint,StringFromCharCode,87,88,1
-block_hint,StringFromCharCode,53,54,1
-block_hint,StringFromCharCode,11,12,0
-block_hint,StringFromCharCode,81,82,1
-block_hint,StringFromCharCode,77,78,1
-block_hint,StringFromCharCode,19,20,0
-block_hint,StringFromCharCode,23,24,0
-block_hint,StringFromCharCode,58,59,0
-block_hint,StringFromCharCode,21,22,0
-block_hint,StringFromCharCode,29,30,0
-block_hint,StringFromCharCode,35,36,0
-block_hint,StringFromCharCode,33,34,0
-block_hint,StringFromCharCode,75,76,0
-block_hint,StringFromCharCode,41,42,0
-block_hint,StringFromCharCode,17,18,1
-block_hint,StringFromCharCode,44,45,1
-block_hint,StringPrototypeReplace,36,37,1
-block_hint,StringPrototypeReplace,8,9,0
-block_hint,StringPrototypeReplace,55,56,1
-block_hint,StringPrototypeReplace,51,52,1
-block_hint,StringPrototypeReplace,38,39,1
-block_hint,StringPrototypeReplace,22,23,0
-block_hint,StringPrototypeReplace,3,4,1
-block_hint,StringPrototypeReplace,24,25,0
-block_hint,StringPrototypeReplace,5,6,1
-block_hint,StringPrototypeReplace,28,29,1
-block_hint,StringPrototypeReplace,10,11,1
-block_hint,StringPrototypeReplace,57,58,0
-block_hint,StringPrototypeReplace,30,31,1
-block_hint,StringPrototypeReplace,92,93,1
-block_hint,StringPrototypeReplace,87,88,1
-block_hint,StringPrototypeReplace,80,81,1
-block_hint,StringPrototypeReplace,73,74,1
-block_hint,StringPrototypeReplace,59,60,1
-block_hint,StringPrototypeReplace,61,62,0
-block_hint,StringPrototypeReplace,63,64,1
-block_hint,StringPrototypeReplace,53,54,1
-block_hint,StringPrototypeReplace,42,43,1
-block_hint,StringPrototypeReplace,14,15,1
-block_hint,StringPrototypeReplace,90,91,1
-block_hint,StringPrototypeReplace,82,83,1
-block_hint,StringPrototypeReplace,76,77,0
-block_hint,StringPrototypeReplace,78,79,1
-block_hint,StringPrototypeReplace,70,71,1
-block_hint,StringPrototypeReplace,49,50,1
-block_hint,StringPrototypeReplace,16,17,1
-block_hint,StringPrototypeReplace,18,19,0
-block_hint,StringPrototypeReplace,26,27,1
-block_hint,StringPrototypeSplit,125,126,1
-block_hint,StringPrototypeSplit,112,113,0
-block_hint,StringPrototypeSplit,92,93,1
-block_hint,StringPrototypeSplit,35,36,0
-block_hint,StringPrototypeSplit,114,115,1
-block_hint,StringPrototypeSplit,105,106,1
-block_hint,StringPrototypeSplit,94,95,1
-block_hint,StringPrototypeSplit,64,65,0
-block_hint,StringPrototypeSplit,8,9,1
-block_hint,StringPrototypeSplit,66,67,0
-block_hint,StringPrototypeSplit,10,11,1
-block_hint,StringPrototypeSplit,77,78,1
-block_hint,StringPrototypeSplit,37,38,1
-block_hint,StringPrototypeSplit,116,117,0
-block_hint,StringPrototypeSplit,79,80,1
-block_hint,StringPrototypeSplit,168,169,1
-block_hint,StringPrototypeSplit,152,153,1
-block_hint,StringPrototypeSplit,128,129,1
-block_hint,StringPrototypeSplit,122,123,1
-block_hint,StringPrototypeSplit,107,108,1
-block_hint,StringPrototypeSplit,83,84,0
-block_hint,StringPrototypeSplit,68,69,0
-block_hint,StringPrototypeSplit,85,86,1
-block_hint,StringPrototypeSplit,70,71,1
-block_hint,StringPrototypeSplit,88,89,1
-block_hint,StringPrototypeSplit,25,26,0
-block_hint,StringPrototypeSplit,72,73,1
-block_hint,StringPrototypeSplit,42,43,0
-block_hint,StringPrototypeSplit,110,111,1
-block_hint,StringPrototypeSplit,90,91,0
-block_hint,StringPrototypeSplit,27,28,0
-block_hint,StringPrototypeSplit,16,17,1
-block_hint,StringPrototypeSplit,18,19,1
-block_hint,StringPrototypeSplit,20,21,1
-block_hint,StringPrototypeSplit,50,51,1
-block_hint,TypedArrayConstructor,14,15,1
-block_hint,TypedArrayConstructor,11,12,0
-block_hint,TypedArrayConstructor,2,3,0
-block_hint,TypedArrayPrototypeByteLength,69,70,1
-block_hint,TypedArrayPrototypeByteLength,43,44,1
-block_hint,TypedArrayPrototypeByteLength,45,46,1
-block_hint,TypedArrayPrototypeByteLength,71,72,0
-block_hint,TypedArrayPrototypeByteLength,73,74,0
-block_hint,TypedArrayPrototypeByteLength,65,66,0
-block_hint,TypedArrayPrototypeByteLength,33,34,0
-block_hint,TypedArrayPrototypeLength,50,51,1
-block_hint,TypedArrayPrototypeLength,33,34,1
-block_hint,TypedArrayPrototypeLength,35,36,1
-block_hint,TypedArrayPrototypeLength,52,53,0
-block_hint,TypedArrayPrototypeLength,44,45,0
-block_hint,TypedArrayPrototypeLength,28,29,0
-block_hint,TypedArrayPrototypeLength,19,20,0
-block_hint,WeakMapConstructor,351,352,1
-block_hint,WeakMapConstructor,271,272,1
-block_hint,WeakMapConstructor,119,120,0
-block_hint,WeakMapConstructor,14,15,1
-block_hint,WeakMapConstructor,293,294,1
-block_hint,WeakMapConstructor,230,231,1
-block_hint,WeakMapConstructor,93,94,0
-block_hint,WeakMapConstructor,95,96,1
-block_hint,WeakMapConstructor,295,296,1
-block_hint,WeakMapConstructor,331,332,0
-block_hint,WeakMapConstructor,342,343,0
-block_hint,WeakMapConstructor,239,240,0
-block_hint,WeakMapConstructor,123,124,0
-block_hint,WeakMapConstructor,241,242,0
-block_hint,WeakMapConstructor,109,110,0
-block_hint,WeakMapConstructor,243,244,1
-block_hint,WeakMapConstructor,211,212,1
-block_hint,WeakMapConstructor,28,29,1
-block_hint,WeakMapConstructor,30,31,1
-block_hint,WeakMapConstructor,32,33,1
-block_hint,WeakMapConstructor,98,99,0
-block_hint,WeakMapConstructor,117,118,1
-block_hint,WeakMapLookupHashIndex,9,10,1
-block_hint,WeakMapLookupHashIndex,31,32,1
-block_hint,WeakMapLookupHashIndex,11,12,0
-block_hint,WeakMapLookupHashIndex,13,14,0
-block_hint,WeakMapLookupHashIndex,25,26,1
-block_hint,WeakMapLookupHashIndex,33,34,1
-block_hint,WeakMapLookupHashIndex,27,28,0
-block_hint,WeakMapLookupHashIndex,23,24,0
-block_hint,WeakMapGet,12,13,1
-block_hint,WeakMapGet,7,8,1
-block_hint,WeakMapGet,9,10,1
-block_hint,WeakMapGet,3,4,1
-block_hint,WeakMapPrototypeHas,10,11,1
-block_hint,WeakMapPrototypeHas,5,6,1
-block_hint,WeakMapPrototypeHas,7,8,1
-block_hint,WeakMapPrototypeSet,24,25,1
-block_hint,WeakMapPrototypeSet,5,6,1
-block_hint,WeakMapPrototypeSet,7,8,1
-block_hint,WeakMapPrototypeSet,13,14,1
-block_hint,WeakMapPrototypeSet,22,23,1
-block_hint,WeakMapPrototypeSet,15,16,0
-block_hint,WeakMapPrototypeSet,9,10,0
-block_hint,WeakCollectionSet,17,18,1
-block_hint,WeakCollectionSet,20,21,0
-block_hint,WeakCollectionSet,7,8,1
-block_hint,WeakCollectionSet,13,14,0
-block_hint,AsyncGeneratorResolve,9,10,1
-block_hint,AsyncGeneratorResolve,3,4,1
-block_hint,AsyncGeneratorResolve,11,12,0
-block_hint,AsyncGeneratorResolve,7,8,0
-block_hint,AsyncGeneratorYieldWithAwait,24,25,1
-block_hint,AsyncGeneratorYieldWithAwait,19,20,0
-block_hint,AsyncGeneratorYieldWithAwait,6,7,1
-block_hint,AsyncGeneratorYieldWithAwait,42,43,1
-block_hint,AsyncGeneratorYieldWithAwait,37,38,0
-block_hint,AsyncGeneratorYieldWithAwait,28,29,1
-block_hint,AsyncGeneratorYieldWithAwait,8,9,1
-block_hint,AsyncGeneratorYieldWithAwait,10,11,1
-block_hint,AsyncGeneratorYieldWithAwait,12,13,1
-block_hint,AsyncGeneratorYieldWithAwait,14,15,1
-block_hint,AsyncGeneratorYieldWithAwait,22,23,0
-block_hint,AsyncGeneratorResumeNext,18,19,0
-block_hint,AsyncGeneratorResumeNext,14,15,0
-block_hint,AsyncGeneratorPrototypeNext,27,28,1
-block_hint,AsyncGeneratorPrototypeNext,16,17,1
-block_hint,AsyncGeneratorPrototypeNext,4,5,1
-block_hint,AsyncGeneratorPrototypeNext,34,35,1
-block_hint,AsyncGeneratorPrototypeNext,29,30,0
-block_hint,AsyncGeneratorPrototypeNext,18,19,1
-block_hint,AsyncGeneratorPrototypeNext,20,21,1
-block_hint,AsyncGeneratorPrototypeNext,22,23,1
-block_hint,AsyncGeneratorPrototypeNext,6,7,1
-block_hint,AsyncGeneratorPrototypeNext,11,12,0
-block_hint,AsyncGeneratorAwaitUncaught,24,25,1
-block_hint,AsyncGeneratorAwaitUncaught,19,20,1
-block_hint,AsyncGeneratorAwaitUncaught,2,3,1
-block_hint,AsyncGeneratorAwaitUncaught,30,31,1
-block_hint,AsyncGeneratorAwaitUncaught,32,33,0
-block_hint,AsyncGeneratorAwaitUncaught,28,29,1
-block_hint,AsyncGeneratorAwaitUncaught,8,9,1
-block_hint,AsyncGeneratorAwaitUncaught,10,11,1
-block_hint,AsyncGeneratorAwaitUncaught,12,13,1
-block_hint,AsyncGeneratorAwaitUncaught,14,15,1
-block_hint,AsyncGeneratorAwaitUncaught,22,23,0
-block_hint,AsyncGeneratorAwaitResolveClosure,8,9,1
-block_hint,AsyncGeneratorAwaitResolveClosure,2,3,1
-block_hint,AsyncGeneratorAwaitResolveClosure,6,7,0
-block_hint,AsyncGeneratorYieldWithAwaitResolveClosure,5,6,1
-block_hint,AsyncGeneratorYieldWithAwaitResolveClosure,2,3,1
-block_hint,StringAdd_CheckNone,19,20,1
-block_hint,StringAdd_CheckNone,58,59,0
-block_hint,StringAdd_CheckNone,78,79,1
-block_hint,StringAdd_CheckNone,42,43,1
-block_hint,StringAdd_CheckNone,60,61,0
-block_hint,StringAdd_CheckNone,94,95,0
-block_hint,StringAdd_CheckNone,84,85,0
-block_hint,StringAdd_CheckNone,88,89,0
-block_hint,StringAdd_CheckNone,64,65,1
-block_hint,StringAdd_CheckNone,76,77,1
-block_hint,StringAdd_CheckNone,55,56,1
-block_hint,StringAdd_CheckNone,13,14,0
-block_hint,StringAdd_CheckNone,15,16,0
-block_hint,StringAdd_CheckNone,92,93,1
-block_hint,StringAdd_CheckNone,82,83,1
-block_hint,StringAdd_CheckNone,34,35,0
-block_hint,StringAdd_CheckNone,38,39,0
-block_hint,StringAdd_CheckNone,40,41,1
-block_hint,StringAdd_CheckNone,53,54,1
-block_hint,StringAdd_CheckNone,11,12,0
-block_hint,StringAdd_CheckNone,90,91,1
-block_hint,StringAdd_CheckNone,80,81,1
-block_hint,StringAdd_CheckNone,26,27,0
-block_hint,StringAdd_CheckNone,30,31,0
-block_hint,SubString,63,64,1
-block_hint,SubString,101,102,1
-block_hint,SubString,58,59,1
-block_hint,SubString,56,57,1
-block_hint,SubString,114,115,0
-block_hint,SubString,85,86,1
-block_hint,SubString,19,20,0
-block_hint,SubString,21,22,0
-block_hint,SubString,130,131,1
-block_hint,SubString,118,119,1
-block_hint,SubString,38,39,0
-block_hint,SubString,83,84,1
-block_hint,SubString,17,18,0
-block_hint,SubString,132,133,1
-block_hint,SubString,120,121,1
-block_hint,SubString,42,43,0
-block_hint,SubString,75,76,1
-block_hint,SubString,139,140,0
-block_hint,SubString,103,104,1
-block_hint,SubString,34,35,1
-block_hint,SubString,31,32,0
-block_hint,GetProperty,56,57,1
-block_hint,GetProperty,101,102,0
-block_hint,GetProperty,175,176,1
-block_hint,GetProperty,205,206,0
-block_hint,GetProperty,165,166,1
-block_hint,GetProperty,133,134,1
-block_hint,GetProperty,60,61,1
-block_hint,GetProperty,139,140,0
-block_hint,GetProperty,141,142,0
-block_hint,GetProperty,110,111,0
-block_hint,GetProperty,62,63,0
-block_hint,GetProperty,167,168,0
-block_hint,GetProperty,220,221,0
-block_hint,GetProperty,210,211,1
-block_hint,GetProperty,112,113,0
-block_hint,GetProperty,231,232,0
-block_hint,GetProperty,222,223,0
-block_hint,GetProperty,218,219,0
-block_hint,GetProperty,35,36,1
-block_hint,GetProperty,224,225,0
-block_hint,GetProperty,37,38,0
-block_hint,GetProperty,147,148,0
-block_hint,GetProperty,187,188,1
-block_hint,GetProperty,41,42,0
-block_hint,GetProperty,157,158,0
-block_hint,GetProperty,161,162,1
-block_hint,GetProperty,151,152,0
-block_hint,GetProperty,47,48,0
-block_hint,GetProperty,233,234,0
-block_hint,GetProperty,196,197,1
-block_hint,GetProperty,92,93,0
-block_hint,GetProperty,94,95,0
-block_hint,GetProperty,96,97,0
-block_hint,GetProperty,163,164,0
-block_hint,GetProperty,98,99,1
-block_hint,GetProperty,203,204,0
-block_hint,GetProperty,228,229,0
-block_hint,GetProperty,235,236,1
-block_hint,GetProperty,201,202,0
-block_hint,GetProperty,199,200,0
-block_hint,GetProperty,22,23,0
-block_hint,GetProperty,182,183,1
-block_hint,GetProperty,104,105,1
-block_hint,GetPropertyWithReceiver,58,59,1
-block_hint,GetPropertyWithReceiver,60,61,1
-block_hint,GetPropertyWithReceiver,203,204,0
-block_hint,GetPropertyWithReceiver,174,175,1
-block_hint,GetPropertyWithReceiver,211,212,0
-block_hint,GetPropertyWithReceiver,112,113,0
-block_hint,GetPropertyWithReceiver,162,163,1
-block_hint,GetPropertyWithReceiver,138,139,1
-block_hint,GetPropertyWithReceiver,62,63,1
-block_hint,GetPropertyWithReceiver,144,145,0
-block_hint,GetPropertyWithReceiver,146,147,0
-block_hint,GetPropertyWithReceiver,114,115,0
-block_hint,GetPropertyWithReceiver,64,65,0
-block_hint,GetPropertyWithReceiver,164,165,0
-block_hint,GetPropertyWithReceiver,217,218,1
-block_hint,GetPropertyWithReceiver,117,118,0
-block_hint,GetPropertyWithReceiver,238,239,0
-block_hint,GetPropertyWithReceiver,234,235,0
-block_hint,GetPropertyWithReceiver,225,226,0
-block_hint,GetPropertyWithReceiver,148,149,1
-block_hint,GetPropertyWithReceiver,38,39,1
-block_hint,GetPropertyWithReceiver,236,237,0
-block_hint,GetPropertyWithReceiver,40,41,0
-block_hint,GetPropertyWithReceiver,183,184,0
-block_hint,GetPropertyWithReceiver,34,35,0
-block_hint,GetPropertyWithReceiver,231,232,1
-block_hint,GetPropertyWithReceiver,205,206,0
-block_hint,GetPropertyWithReceiver,181,182,1
-block_hint,GetPropertyWithReceiver,108,109,1
-block_hint,SetProperty,379,380,1
-block_hint,SetProperty,381,382,0
-block_hint,SetProperty,1201,1202,0
-block_hint,SetProperty,925,926,1
-block_hint,SetProperty,1034,1035,1
-block_hint,SetProperty,1036,1037,0
-block_hint,SetProperty,733,734,0
-block_hint,SetProperty,919,920,1
-block_hint,SetProperty,413,414,0
-block_hint,SetProperty,415,416,0
-block_hint,SetProperty,256,257,1
-block_hint,SetProperty,417,418,0
-block_hint,SetProperty,630,631,1
-block_hint,SetProperty,92,93,1
-block_hint,SetProperty,94,95,1
-block_hint,SetProperty,1089,1090,0
-block_hint,SetProperty,808,809,1
-block_hint,SetProperty,810,811,1
-block_hint,SetProperty,812,813,0
-block_hint,SetProperty,104,105,1
-block_hint,SetProperty,108,109,1
-block_hint,SetProperty,429,430,1
-block_hint,SetProperty,110,111,1
-block_hint,SetProperty,106,107,1
-block_hint,CreateDataProperty,319,320,1
-block_hint,CreateDataProperty,321,322,0
-block_hint,CreateDataProperty,978,979,0
-block_hint,CreateDataProperty,779,780,1
-block_hint,CreateDataProperty,868,869,1
-block_hint,CreateDataProperty,539,540,1
-block_hint,CreateDataProperty,645,646,0
-block_hint,CreateDataProperty,647,648,1
-block_hint,CreateDataProperty,903,904,1
-block_hint,CreateDataProperty,333,334,0
-block_hint,CreateDataProperty,55,56,1
-block_hint,CreateDataProperty,543,544,1
-block_hint,CreateDataProperty,57,58,1
-block_hint,FindNonDefaultConstructorOrConstruct,12,13,0
-block_hint,FindNonDefaultConstructorOrConstruct,6,7,0
-block_hint,FindNonDefaultConstructorOrConstruct,14,15,1
-block_hint,FindNonDefaultConstructorOrConstruct,16,17,0
-block_hint,FindNonDefaultConstructorOrConstruct,4,5,1
-block_hint,FindNonDefaultConstructorOrConstruct,18,19,1
-block_hint,ArrayPrototypeConcat,79,80,1
-block_hint,ArrayPrototypeConcat,54,55,0
-block_hint,ArrayPrototypeConcat,63,64,1
-block_hint,ArrayPrototypeConcat,74,75,0
-block_hint,ArrayPrototypeConcat,81,82,0
-block_hint,ArrayPrototypeConcat,70,71,1
-block_hint,ArrayPrototypeConcat,37,38,1
-block_hint,ArrayPrototypeConcat,16,17,1
-block_hint,ArrayPrototypeConcat,3,4,1
-block_hint,ArrayPrototypeConcat,25,26,1
-block_hint,ArrayPrototypeConcat,9,10,0
-block_hint,ArrayPrototypeConcat,20,21,1
-block_hint,ArrayPrototypeConcat,30,31,0
-block_hint,ArrayPrototypeConcat,42,43,0
-block_hint,ArrayPrototypeConcat,72,73,1
-block_hint,ArrayPrototypeConcat,39,40,1
-block_hint,ArrayPrototypeConcat,18,19,1
-block_hint,ArrayPrototypeConcat,5,6,1
-block_hint,ArrayPrototypeConcat,57,58,1
-block_hint,ArrayPrototypeConcat,59,60,0
-block_hint,ArrayPrototypeConcat,66,67,0
-block_hint,ArrayPrototypeConcat,33,34,1
-block_hint,ArrayPrototypeConcat,68,69,0
-block_hint,ArrayPrototypeConcat,35,36,1
-block_hint,ArrayPrototypeConcat,27,28,1
-block_hint,ArrayPrototypeConcat,11,12,1
-block_hint,ArrayEvery,73,74,1
-block_hint,ArrayEvery,31,32,0
-block_hint,ArrayEvery,122,123,1
-block_hint,ArrayEvery,116,117,1
-block_hint,ArrayEvery,91,92,1
-block_hint,ArrayEvery,93,94,1
-block_hint,ArrayEvery,99,100,0
-block_hint,ArrayEvery,105,106,1
-block_hint,ArrayEvery,107,108,1
-block_hint,ArrayEvery,97,98,1
-block_hint,ArrayEvery,49,50,0
-block_hint,ArrayEvery,102,103,1
-block_hint,ArrayEvery,66,67,1
-block_hint,ArrayEvery,45,46,1
-block_hint,ArrayEvery,12,13,1
-block_hint,ArrayEvery,57,58,1
-block_hint,ArrayEvery,28,29,0
-block_hint,ArrayEvery,68,69,1
-block_hint,ArrayEvery,70,71,0
-block_hint,ArrayEvery,51,52,0
-block_hint,ArrayEvery,47,48,0
-block_hint,ArrayEvery,18,19,0
-block_hint,ArrayEvery,20,21,1
-block_hint,ArrayEvery,61,62,0
-block_hint,ArrayEvery,109,110,1
-block_hint,ArrayEvery,87,88,0
-block_hint,ArrayEvery,89,90,0
-block_hint,ArrayEvery,111,112,0
-block_hint,ArrayEvery,79,80,0
-block_hint,ArrayFilter,195,196,1
-block_hint,ArrayFilter,84,85,0
-block_hint,ArrayFilter,295,296,1
-block_hint,ArrayFilter,286,287,1
-block_hint,ArrayFilter,225,226,1
-block_hint,ArrayFilter,227,228,1
-block_hint,ArrayFilter,246,247,0
-block_hint,ArrayFilter,270,271,1
-block_hint,ArrayFilter,272,273,1
-block_hint,ArrayFilter,239,240,0
-block_hint,ArrayFilter,276,277,1
-block_hint,ArrayFilter,197,198,1
-block_hint,ArrayFilter,123,124,1
-block_hint,ArrayFilter,22,23,1
-block_hint,ArrayFilter,199,200,1
-block_hint,ArrayFilter,125,126,0
-block_hint,ArrayFilter,24,25,1
-block_hint,ArrayFilter,267,268,1
-block_hint,ArrayFilter,166,167,0
-block_hint,ArrayFilter,278,279,1
-block_hint,ArrayFilter,201,202,1
-block_hint,ArrayFilter,127,128,1
-block_hint,ArrayFilter,26,27,1
-block_hint,ArrayFilter,211,212,1
-block_hint,ArrayFilter,213,214,0
-block_hint,ArrayFilter,283,284,1
-block_hint,ArrayFilter,215,216,1
-block_hint,ArrayFilter,217,218,1
-block_hint,ArrayFilter,219,220,1
-block_hint,ArrayFilter,203,204,1
-block_hint,ArrayFilter,129,130,0
-block_hint,ArrayFilter,28,29,1
-block_hint,ArrayFilter,171,172,0
-block_hint,ArrayFilter,103,104,0
-block_hint,ArrayFilter,242,243,1
-block_hint,ArrayFilter,244,245,0
-block_hint,ArrayFilter,205,206,0
-block_hint,ArrayFilter,131,132,0
-block_hint,ArrayFilter,42,43,0
-block_hint,ArrayFilter,44,45,1
-block_hint,ArrayFilter,149,150,0
-block_hint,ArrayFilter,249,250,1
-block_hint,ArrayFilter,177,178,0
-block_hint,ArrayFilter,179,180,0
-block_hint,ArrayFilter,251,252,0
-block_hint,ArrayFilter,253,254,0
-block_hint,ArrayFilter,255,256,1
-block_hint,ArrayFilter,257,258,0
-block_hint,ArrayFilter,259,260,1
-block_hint,ArrayFilter,280,281,0
-block_hint,ArrayFilter,237,238,0
-block_hint,ArrayFilter,161,162,0
-block_hint,ArrayFilter,95,96,0
-block_hint,ArrayFilter,187,188,1
-block_hint,ArrayFilter,60,61,0
-block_hint,ArrayFilter,64,65,1
-block_hint,ArrayFilter,50,51,1
-block_hint,ArrayForEach,70,71,1
-block_hint,ArrayForEach,29,30,0
-block_hint,ArrayForEach,99,100,1
-block_hint,ArrayForEach,95,96,1
-block_hint,ArrayForEach,76,77,1
-block_hint,ArrayForEach,78,79,1
-block_hint,ArrayForEach,84,85,0
-block_hint,ArrayForEach,90,91,1
-block_hint,ArrayForEach,92,93,1
-block_hint,ArrayForEach,47,48,0
-block_hint,ArrayForEach,87,88,1
-block_hint,ArrayForEach,63,64,1
-block_hint,ArrayForEach,43,44,1
-block_hint,ArrayForEach,12,13,1
-block_hint,ArrayForEach,53,54,1
-block_hint,ArrayForEach,26,27,0
-block_hint,ArrayForEach,65,66,1
-block_hint,ArrayForEach,67,68,0
-block_hint,ArrayForEach,49,50,0
-block_hint,ArrayForEach,45,46,0
-block_hint,ArrayForEach,18,19,0
-block_hint,ArrayForEach,20,21,1
-block_hint,ArrayForEach,58,59,0
-block_hint,ArrayFrom,225,226,1
-block_hint,ArrayFrom,76,77,1
-block_hint,ArrayFrom,78,79,1
-block_hint,ArrayFrom,8,9,1
-block_hint,ArrayFrom,342,343,1
-block_hint,ArrayFrom,338,339,0
-block_hint,ArrayFrom,327,328,0
-block_hint,ArrayFrom,311,312,1
-block_hint,ArrayFrom,309,310,0
-block_hint,ArrayFrom,80,81,1
-block_hint,ArrayFrom,10,11,1
-block_hint,ArrayFrom,322,323,1
-block_hint,ArrayFrom,305,306,0
-block_hint,ArrayFrom,245,246,1
-block_hint,ArrayFrom,266,267,0
-block_hint,ArrayFrom,82,83,1
-block_hint,ArrayFrom,12,13,1
-block_hint,ArrayFrom,268,269,1
-block_hint,ArrayFrom,213,214,0
-block_hint,ArrayFrom,290,291,1
-block_hint,ArrayFrom,248,249,0
-block_hint,ArrayFrom,285,286,1
-block_hint,ArrayFrom,281,282,0
-block_hint,ArrayFrom,188,189,1
-block_hint,ArrayFrom,88,89,1
-block_hint,ArrayFrom,18,19,1
-block_hint,ArrayFrom,215,216,1
-block_hint,ArrayFrom,72,73,1
-block_hint,ArrayIsArray,13,14,1
-block_hint,ArrayIsArray,9,10,1
-block_hint,ArrayIsArray,7,8,0
-block_hint,LoadJoinElement_FastSmiOrObjectElements_0,2,3,1
-block_hint,LoadJoinElement_FastSmiOrObjectElements_0,4,5,0
-block_hint,LoadJoinElement_FastDoubleElements_0,3,4,1
-block_hint,LoadJoinElement_FastDoubleElements_0,5,6,0
-block_hint,LoadJoinElement_FastDoubleElements_0,7,8,1
-block_hint,JoinStackPush,28,29,1
-block_hint,JoinStackPush,6,7,1
-block_hint,JoinStackPush,10,11,0
-block_hint,JoinStackPop,9,10,1
-block_hint,JoinStackPop,4,5,1
-block_hint,ArrayPrototypeJoin,518,519,1
-block_hint,ArrayPrototypeJoin,456,457,1
-block_hint,ArrayPrototypeJoin,419,420,1
-block_hint,ArrayPrototypeJoin,334,335,1
-block_hint,ArrayPrototypeJoin,336,337,1
-block_hint,ArrayPrototypeJoin,367,368,1
-block_hint,ArrayPrototypeJoin,340,341,0
-block_hint,ArrayPrototypeJoin,179,180,0
-block_hint,ArrayPrototypeJoin,474,475,1
-block_hint,ArrayPrototypeJoin,440,441,1
-block_hint,ArrayPrototypeJoin,328,329,0
-block_hint,ArrayPrototypeJoin,228,229,1
-block_hint,ArrayPrototypeJoin,30,31,1
-block_hint,ArrayPrototypeJoin,181,182,0
-block_hint,ArrayPrototypeJoin,32,33,1
-block_hint,ArrayPrototypeJoin,387,388,1
-block_hint,ArrayPrototypeJoin,325,326,0
-block_hint,ArrayPrototypeJoin,143,144,1
-block_hint,ArrayPrototypeJoin,493,494,1
-block_hint,ArrayPrototypeJoin,458,459,0
-block_hint,ArrayPrototypeJoin,423,424,0
-block_hint,ArrayPrototypeJoin,369,370,1
-block_hint,ArrayPrototypeJoin,183,184,1
-block_hint,ArrayPrototypeJoin,38,39,1
-block_hint,ArrayPrototypeJoin,460,461,1
-block_hint,ArrayPrototypeJoin,425,426,0
-block_hint,ArrayPrototypeJoin,295,296,1
-block_hint,ArrayPrototypeJoin,428,429,0
-block_hint,ArrayPrototypeJoin,345,346,0
-block_hint,ArrayPrototypeJoin,189,190,0
-block_hint,ArrayPrototypeJoin,232,233,1
-block_hint,ArrayPrototypeJoin,145,146,1
-block_hint,ArrayPrototypeJoin,483,484,0
-block_hint,ArrayPrototypeJoin,487,488,1
-block_hint,ArrayPrototypeJoin,525,526,0
-block_hint,ArrayPrototypeJoin,521,522,0
-block_hint,ArrayPrototypeJoin,514,515,1
-block_hint,ArrayPrototypeJoin,489,490,1
-block_hint,ArrayPrototypeJoin,485,486,0
-block_hint,ArrayPrototypeJoin,147,148,0
-block_hint,ArrayPrototypeJoin,149,150,0
-block_hint,ArrayPrototypeJoin,469,470,0
-block_hint,ArrayPrototypeJoin,471,472,0
-block_hint,ArrayPrototypeJoin,454,455,1
-block_hint,ArrayPrototypeJoin,407,408,1
-block_hint,ArrayPrototypeJoin,409,410,1
-block_hint,ArrayPrototypeJoin,411,412,1
-block_hint,ArrayPrototypeJoin,413,414,1
-block_hint,ArrayPrototypeJoin,197,198,1
-block_hint,ArrayPrototypeJoin,254,255,0
-block_hint,ArrayPrototypeJoin,256,257,0
-block_hint,ArrayPrototypeJoin,302,303,0
-block_hint,ArrayPrototypeJoin,262,263,0
-block_hint,ArrayPrototypeJoin,264,265,0
-block_hint,ArrayPrototypeJoin,203,204,1
-block_hint,ArrayPrototypeJoin,72,73,1
-block_hint,ArrayPrototypeJoin,381,382,1
-block_hint,ArrayPrototypeJoin,305,306,0
-block_hint,ArrayPrototypeJoin,207,208,1
-block_hint,ArrayPrototypeJoin,270,271,0
-block_hint,ArrayPrototypeJoin,272,273,0
-block_hint,ArrayPrototypeJoin,209,210,1
-block_hint,ArrayPrototypeJoin,86,87,1
-block_hint,ArrayPrototypeJoin,307,308,0
-block_hint,ArrayPrototypeJoin,219,220,1
-block_hint,ArrayPrototypeJoin,102,103,0
-block_hint,ArrayPrototypeJoin,104,105,0
-block_hint,ArrayPrototypeJoin,435,436,1
-block_hint,ArrayPrototypeJoin,403,404,1
-block_hint,ArrayPrototypeJoin,217,218,1
-block_hint,ArrayPrototypeJoin,100,101,0
-block_hint,ArrayPrototypeJoin,433,434,1
-block_hint,ArrayPrototypeJoin,401,402,1
-block_hint,ArrayPrototypeJoin,96,97,1
-block_hint,ArrayPrototypeJoin,352,353,1
-block_hint,ArrayPrototypeJoin,311,312,0
-block_hint,ArrayPrototypeJoin,215,216,0
-block_hint,ArrayPrototypeJoin,106,107,1
-block_hint,ArrayPrototypeJoin,108,109,0
-block_hint,ArrayPrototypeJoin,110,111,1
-block_hint,ArrayPrototypeJoin,284,285,1
-block_hint,ArrayPrototypeJoin,139,140,1
-block_hint,ArrayPrototypeToString,14,15,1
-block_hint,ArrayPrototypeToString,11,12,1
-block_hint,ArrayPrototypeToString,8,9,1
-block_hint,ArrayPrototypeToString,5,6,1
-block_hint,ArrayPrototypeToString,3,4,1
-block_hint,ArrayPrototypeLastIndexOf,279,280,1
-block_hint,ArrayPrototypeLastIndexOf,261,262,1
-block_hint,ArrayPrototypeLastIndexOf,245,246,1
-block_hint,ArrayPrototypeLastIndexOf,175,176,1
-block_hint,ArrayPrototypeLastIndexOf,177,178,1
-block_hint,ArrayPrototypeLastIndexOf,91,92,0
-block_hint,ArrayPrototypeLastIndexOf,41,42,1
-block_hint,ArrayPrototypeLastIndexOf,375,376,0
-block_hint,ArrayPrototypeLastIndexOf,361,362,0
-block_hint,ArrayPrototypeLastIndexOf,367,368,0
-block_hint,ArrayPrototypeLastIndexOf,358,359,0
-block_hint,ArrayPrototypeLastIndexOf,335,336,0
-block_hint,ArrayPrototypeLastIndexOf,324,325,1
-block_hint,ArrayPrototypeLastIndexOf,338,339,0
-block_hint,ArrayPrototypeLastIndexOf,328,329,0
-block_hint,ArrayPrototypeLastIndexOf,315,316,0
-block_hint,ArrayPrototypeLastIndexOf,300,301,0
-block_hint,ArrayPrototypeLastIndexOf,313,314,0
-block_hint,ArrayPrototypeLastIndexOf,298,299,0
-block_hint,ArrayPrototypeLastIndexOf,281,282,1
-block_hint,ArrayPrototypeLastIndexOf,252,253,0
-block_hint,ArrayPrototypeLastIndexOf,194,195,1
-block_hint,ArrayPrototypeLastIndexOf,83,84,1
-block_hint,ArrayPrototypeLastIndexOf,73,74,1
-block_hint,ArrayPrototypeLastIndexOf,21,22,1
-block_hint,ArrayPrototypeLastIndexOf,85,86,1
-block_hint,ArrayPrototypeLastIndexOf,77,78,0
-block_hint,ArrayPrototypeLastIndexOf,29,30,1
-block_hint,ArrayPrototypeLastIndexOf,60,61,0
-block_hint,ArrayPrototypeLastIndexOf,98,99,1
-block_hint,ArrayPrototypeLastIndexOf,56,57,0
-block_hint,ArrayPrototypeLastIndexOf,23,24,1
-block_hint,ArrayPrototypeLastIndexOf,58,59,0
-block_hint,ArrayPrototypeLastIndexOf,214,215,0
-block_hint,ArrayPrototypeLastIndexOf,220,221,1
-block_hint,ArrayPrototypeLastIndexOf,239,240,0
-block_hint,ArrayPrototypeLastIndexOf,212,213,0
-block_hint,ArrayPrototypeLastIndexOf,145,146,0
-block_hint,ArrayPrototypeLastIndexOf,129,130,1
-block_hint,ArrayPrototypeLastIndexOf,31,32,0
-block_hint,ArrayMap,165,166,1
-block_hint,ArrayMap,72,73,0
-block_hint,ArrayMap,255,256,1
-block_hint,ArrayMap,236,237,1
-block_hint,ArrayMap,188,189,1
-block_hint,ArrayMap,190,191,1
-block_hint,ArrayMap,209,210,0
-block_hint,ArrayMap,221,222,1
-block_hint,ArrayMap,223,224,1
-block_hint,ArrayMap,245,246,1
-block_hint,ArrayMap,206,207,0
-block_hint,ArrayMap,218,219,1
-block_hint,ArrayMap,225,226,1
-block_hint,ArrayMap,167,168,1
-block_hint,ArrayMap,114,115,1
-block_hint,ArrayMap,23,24,1
-block_hint,ArrayMap,233,234,1
-block_hint,ArrayMap,211,212,0
-block_hint,ArrayMap,159,160,0
-block_hint,ArrayMap,74,75,0
-block_hint,ArrayMap,182,183,1
-block_hint,ArrayMap,157,158,1
-block_hint,ArrayMap,55,56,0
-block_hint,ArrayMap,268,269,1
-block_hint,ArrayMap,265,266,0
-block_hint,ArrayMap,248,249,0
-block_hint,ArrayMap,227,228,0
-block_hint,ArrayMap,195,196,0
-block_hint,ArrayMap,116,117,0
-block_hint,ArrayMap,29,30,0
-block_hint,ArrayMap,31,32,1
-block_hint,ArrayMap,132,133,0
-block_hint,ArrayMap,33,34,1
-block_hint,ArrayMap,120,121,0
-block_hint,ArrayMap,37,38,1
-block_hint,ArrayMap,35,36,1
-block_hint,ArrayMap,253,254,0
-block_hint,ArrayMap,203,204,0
-block_hint,ArrayMap,149,150,0
-block_hint,ArrayMap,45,46,1
-block_hint,ArrayMap,151,152,0
-block_hint,ArrayMap,89,90,1
-block_hint,ArrayMap,83,84,0
-block_hint,ArrayMap,85,86,1
-block_hint,ArrayMap,184,185,1
-block_hint,ArrayMap,161,162,0
-block_hint,ArrayMap,62,63,0
-block_hint,ArrayMap,64,65,1
-block_hint,ArrayMap,96,97,1
-block_hint,ArrayMap,47,48,1
-block_hint,ArrayMap,153,154,1
-block_hint,ArrayMap,98,99,1
-block_hint,ArrayMap,49,50,1
-block_hint,ArrayMap,135,136,1
-block_hint,ArrayReduce,81,82,1
-block_hint,ArrayReduce,30,31,0
-block_hint,ArrayReduce,124,125,1
-block_hint,ArrayReduce,120,121,1
-block_hint,ArrayReduce,89,90,1
-block_hint,ArrayReduce,91,92,1
-block_hint,ArrayReduce,101,102,0
-block_hint,ArrayReduce,111,112,1
-block_hint,ArrayReduce,113,114,1
-block_hint,ArrayReduce,95,96,1
-block_hint,ArrayReduce,104,105,0
-block_hint,ArrayReduce,49,50,0
-block_hint,ArrayReduce,106,107,1
-block_hint,ArrayReduce,65,66,1
-block_hint,ArrayReduce,45,46,1
-block_hint,ArrayReduce,12,13,1
-block_hint,ArrayReduce,53,54,1
-block_hint,ArrayReduce,26,27,0
-block_hint,ArrayReduce,99,100,0
-block_hint,ArrayReduce,67,68,1
-block_hint,ArrayReduce,69,70,0
-block_hint,ArrayReduce,117,118,0
-block_hint,ArrayReduce,97,98,0
-block_hint,ArrayReduce,71,72,0
-block_hint,ArrayReduce,47,48,0
-block_hint,ArrayReduce,18,19,0
-block_hint,ArrayReduce,20,21,1
-block_hint,ArrayReduce,57,58,0
-block_hint,ArrayReduce,59,60,0
-block_hint,ArrayReduce,23,24,0
-block_hint,ArrayPrototypeReverse,236,237,1
-block_hint,ArrayPrototypeReverse,210,211,1
-block_hint,ArrayPrototypeReverse,190,191,1
-block_hint,ArrayPrototypeReverse,152,153,1
-block_hint,ArrayPrototypeReverse,103,104,1
-block_hint,ArrayPrototypeReverse,18,19,1
-block_hint,ArrayPrototypeReverse,192,193,1
-block_hint,ArrayPrototypeReverse,169,170,0
-block_hint,ArrayPrototypeReverse,140,141,1
-block_hint,ArrayPrototypeReverse,118,119,1
-block_hint,ArrayPrototypeReverse,89,90,0
-block_hint,ArrayPrototypeShift,237,238,1
-block_hint,ArrayPrototypeShift,205,206,1
-block_hint,ArrayPrototypeShift,185,186,1
-block_hint,ArrayPrototypeShift,132,133,1
-block_hint,ArrayPrototypeShift,81,82,1
-block_hint,ArrayPrototypeShift,11,12,1
-block_hint,ArrayPrototypeShift,196,197,1
-block_hint,ArrayPrototypeShift,168,169,0
-block_hint,ArrayPrototypeShift,134,135,1
-block_hint,ArrayPrototypeShift,83,84,0
-block_hint,ArrayPrototypeShift,13,14,1
-block_hint,ArrayPrototypeShift,136,137,0
-block_hint,ArrayPrototypeShift,85,86,0
-block_hint,ArrayPrototypeShift,68,69,0
-block_hint,ArrayPrototypeShift,87,88,0
-block_hint,ArrayPrototypeShift,27,28,0
-block_hint,ArrayPrototypeShift,29,30,1
-block_hint,ArrayPrototypeShift,170,171,0
-block_hint,ArrayPrototypeShift,89,90,0
-block_hint,ArrayPrototypeShift,33,34,0
-block_hint,ArrayPrototypeShift,148,149,0
-block_hint,ArrayPrototypeShift,111,112,1
-block_hint,ArrayPrototypeShift,91,92,0
-block_hint,ArrayPrototypeShift,39,40,0
-block_hint,ArrayPrototypeShift,41,42,1
-block_hint,ArrayPrototypeSlice,288,289,1
-block_hint,ArrayPrototypeSlice,267,268,1
-block_hint,ArrayPrototypeSlice,245,246,1
-block_hint,ArrayPrototypeSlice,182,183,1
-block_hint,ArrayPrototypeSlice,81,82,1
-block_hint,ArrayPrototypeSlice,12,13,1
-block_hint,ArrayPrototypeSlice,83,84,1
-block_hint,ArrayPrototypeSlice,14,15,1
-block_hint,ArrayPrototypeSlice,16,17,1
-block_hint,ArrayPrototypeSlice,87,88,1
-block_hint,ArrayPrototypeSlice,511,512,0
-block_hint,ArrayPrototypeSlice,509,510,0
-block_hint,ArrayPrototypeSlice,485,486,0
-block_hint,ArrayPrototypeSlice,448,449,0
-block_hint,ArrayPrototypeSlice,428,429,0
-block_hint,ArrayPrototypeSlice,405,406,0
-block_hint,ArrayPrototypeSlice,446,447,0
-block_hint,ArrayPrototypeSlice,426,427,0
-block_hint,ArrayPrototypeSlice,401,402,1
-block_hint,ArrayPrototypeSlice,479,480,0
-block_hint,ArrayPrototypeSlice,465,466,0
-block_hint,ArrayPrototypeSlice,454,455,0
-block_hint,ArrayPrototypeSlice,424,425,0
-block_hint,ArrayPrototypeSlice,422,423,0
-block_hint,ArrayPrototypeSlice,393,394,1
-block_hint,ArrayPrototypeSlice,332,333,0
-block_hint,ArrayPrototypeSlice,277,278,1
-block_hint,ArrayPrototypeSlice,257,258,0
-block_hint,ArrayPrototypeSlice,89,90,1
-block_hint,ArrayPrototypeSlice,20,21,1
-block_hint,ArrayPrototypeSlice,128,129,1
-block_hint,ArrayPrototypeSlice,66,67,0
-block_hint,ArrayPrototypeSlice,443,444,0
-block_hint,ArrayPrototypeSlice,386,387,0
-block_hint,ArrayPrototypeSlice,364,365,0
-block_hint,ArrayPrototypeSlice,384,385,0
-block_hint,ArrayPrototypeSlice,362,363,0
-block_hint,ArrayPrototypeSlice,344,345,1
-block_hint,ArrayPrototypeSlice,437,438,0
-block_hint,ArrayPrototypeSlice,413,414,0
-block_hint,ArrayPrototypeSlice,388,389,0
-block_hint,ArrayPrototypeSlice,360,361,0
-block_hint,ArrayPrototypeSlice,340,341,1
-block_hint,ArrayPrototypeSlice,309,310,0
-block_hint,ArrayPrototypeSlice,296,297,0
-block_hint,ArrayPrototypeSlice,284,285,0
-block_hint,ArrayPrototypeSlice,261,262,0
-block_hint,ArrayPrototypeSlice,238,239,1
-block_hint,ArrayPrototypeSlice,141,142,0
-block_hint,ArrayPrototypeSlice,143,144,0
-block_hint,ArrayPrototypeSlice,190,191,1
-block_hint,ArrayPrototypeSlice,211,212,0
-block_hint,ArrayPrototypeSlice,91,92,1
-block_hint,ArrayPrototypeSlice,22,23,1
-block_hint,ArrayPrototypeSlice,197,198,1
-block_hint,ArrayPrototypeSlice,134,135,0
-block_hint,ArrayPrototypeSlice,69,70,0
-block_hint,ArrayPrototypeSlice,93,94,1
-block_hint,ArrayPrototypeSlice,24,25,1
-block_hint,ArrayPrototypeSlice,95,96,1
-block_hint,ArrayPrototypeSlice,26,27,1
-block_hint,ArrayPrototypeSlice,28,29,1
-block_hint,ArrayPrototypeSlice,99,100,1
-block_hint,ArrayPrototypeSlice,42,43,1
-block_hint,ArrayPrototypeSlice,145,146,1
-block_hint,ArrayPrototypeSlice,174,175,0
-block_hint,ArrayPrototypeSlice,176,177,1
-block_hint,ArrayPrototypeSlice,157,158,0
-block_hint,ArrayPrototypeSlice,101,102,0
-block_hint,ArrayPrototypeSlice,32,33,1
-block_hint,ArrayPrototypeSlice,250,251,0
-block_hint,ArrayPrototypeSlice,221,222,1
-block_hint,ArrayPrototypeSlice,118,119,0
-block_hint,ArrayPrototypeSlice,57,58,0
-block_hint,ArrayPrototypeSlice,59,60,1
-block_hint,ArrayPrototypeSlice,75,76,1
-block_hint,ArrayPrototypeSlice,103,104,0
-block_hint,ArrayPrototypeSlice,232,233,0
-block_hint,ArrayPrototypeSlice,164,165,1
-block_hint,ArrayPrototypeSlice,71,72,1
-block_hint,ArrayPrototypeSlice,178,179,0
-block_hint,ArrayPrototypeSlice,160,161,0
-block_hint,ArrayPrototypeSlice,109,110,0
-block_hint,ArrayPrototypeSlice,44,45,1
-block_hint,ArrayPrototypeSlice,248,249,0
-block_hint,ArrayPrototypeSlice,217,218,1
-block_hint,ArrayPrototypeSlice,116,117,0
-block_hint,ArrayPrototypeSlice,49,50,0
-block_hint,ArrayPrototypeSlice,136,137,0
-block_hint,ArrayPrototypeSlice,73,74,1
-block_hint,ArraySome,88,89,1
-block_hint,ArraySome,31,32,0
-block_hint,ArraySome,119,120,1
-block_hint,ArraySome,115,116,1
-block_hint,ArraySome,93,94,1
-block_hint,ArraySome,95,96,1
-block_hint,ArraySome,101,102,0
-block_hint,ArraySome,108,109,1
-block_hint,ArraySome,110,111,1
-block_hint,ArraySome,99,100,1
-block_hint,ArraySome,56,57,0
-block_hint,ArraySome,105,106,1
-block_hint,ArraySome,77,78,1
-block_hint,ArraySome,52,53,1
-block_hint,ArraySome,13,14,1
-block_hint,ArraySome,62,63,1
-block_hint,ArraySome,27,28,0
-block_hint,ArraySome,79,80,1
-block_hint,ArraySome,81,82,0
-block_hint,ArraySome,58,59,0
-block_hint,ArraySome,54,55,0
-block_hint,ArraySome,19,20,0
-block_hint,ArraySome,21,22,1
-block_hint,ArraySome,66,67,0
-block_hint,ArrayPrototypeSplice,599,600,1
-block_hint,ArrayPrototypeSplice,447,448,1
-block_hint,ArrayPrototypeSplice,449,450,1
-block_hint,ArrayPrototypeSplice,1195,1196,0
-block_hint,ArrayPrototypeSplice,1177,1178,0
-block_hint,ArrayPrototypeSplice,1153,1154,0
-block_hint,ArrayPrototypeSplice,1132,1133,0
-block_hint,ArrayPrototypeSplice,1098,1099,0
-block_hint,ArrayPrototypeSplice,1066,1067,0
-block_hint,ArrayPrototypeSplice,1025,1026,0
-block_hint,ArrayPrototypeSplice,1096,1097,0
-block_hint,ArrayPrototypeSplice,1064,1065,0
-block_hint,ArrayPrototypeSplice,1021,1022,1
-block_hint,ArrayPrototypeSplice,934,935,0
-block_hint,ArrayPrototypeSplice,1087,1088,0
-block_hint,ArrayPrototypeSplice,1220,1221,0
-block_hint,ArrayPrototypeSplice,1212,1213,0
-block_hint,ArrayPrototypeSplice,1199,1200,0
-block_hint,ArrayPrototypeSplice,1187,1188,1
-block_hint,ArrayPrototypeSplice,1156,1157,0
-block_hint,ArrayPrototypeSplice,1136,1137,0
-block_hint,ArrayPrototypeSplice,1115,1116,0
-block_hint,ArrayPrototypeSplice,1072,1073,0
-block_hint,ArrayPrototypeSplice,1032,1033,0
-block_hint,ArrayPrototypeSplice,1070,1071,0
-block_hint,ArrayPrototypeSplice,1030,1031,0
-block_hint,ArrayPrototypeSplice,981,982,1
-block_hint,ArrayPrototypeSplice,871,872,0
-block_hint,ArrayPrototypeSplice,836,837,0
-block_hint,ArrayPrototypeSplice,804,805,0
-block_hint,ArrayPrototypeSplice,725,726,0
-block_hint,ArrayPrototypeSplice,671,672,0
-block_hint,ArrayPrototypeSplice,601,602,0
-block_hint,ArrayPrototypeSplice,507,508,1
-block_hint,ArrayPrototypeSplice,453,454,0
-block_hint,ArrayPrototypeSplice,223,224,0
-block_hint,ArrayPrototypeSplice,333,334,0
-block_hint,ArrayPrototypeSplice,335,336,0
-block_hint,ArrayPrototypeSplice,337,338,0
-block_hint,ArrayPrototypeSplice,225,226,1
-block_hint,ArrayPrototypeSplice,51,52,1
-block_hint,ArrayPrototypeSplice,339,340,1
-block_hint,ArrayPrototypeSplice,341,342,0
-block_hint,ArrayPrototypeSplice,343,344,0
-block_hint,ArrayPrototypeSplice,388,389,1
-block_hint,ArrayPrototypeSplice,227,228,0
-block_hint,ArrayPrototypeSplice,53,54,1
-block_hint,ArrayPrototypeSplice,244,245,0
-block_hint,ArrayPrototypeSplice,93,94,1
-block_hint,ArrayPrototypeSplice,552,553,0
-block_hint,ArrayPrototypeSplice,401,402,0
-block_hint,ArrayPrototypeSplice,584,585,0
-block_hint,ArrayPrototypeSplice,524,525,1
-block_hint,ArrayPrototypeSplice,346,347,0
-block_hint,ArrayPrototypeSplice,348,349,1
-block_hint,ArrayPrototypeSplice,235,236,0
-block_hint,ArrayPrototypeSplice,257,258,1
-block_hint,ArrayPrototypeSplice,105,106,0
-block_hint,ArrayPrototypeSplice,229,230,0
-block_hint,ArrayPrototypeSplice,330,331,0
-block_hint,ArrayPrototypeSplice,328,329,0
-block_hint,ArrayPrototypeSplice,392,393,1
-block_hint,ArrayPrototypeSplice,65,66,1
-block_hint,ArrayPrototypeSplice,293,294,0
-block_hint,ArrayPrototypeSplice,143,144,0
-block_hint,ArrayPrototypeSplice,67,68,0
-block_hint,ArrayPrototypeSplice,69,70,0
-block_hint,ArrayPrototypeSplice,262,263,1
-block_hint,ArrayPrototypeSplice,178,179,1
-block_hint,ArrayPrototypeSplice,326,327,0
-block_hint,ArrayPrototypeSplice,422,423,1
-block_hint,ArrayPrototypeSplice,264,265,1
-block_hint,ArrayPrototypeSplice,111,112,0
-block_hint,ArrayPrototypeSplice,424,425,1
-block_hint,ArrayPrototypeSplice,266,267,0
-block_hint,ArrayPrototypeSplice,113,114,0
-block_hint,ArrayPrototypeSplice,115,116,0
-block_hint,ArrayPrototypeSplice,182,183,1
-block_hint,ArrayPrototypeSplice,63,64,1
-block_hint,ArrayPrototypeSplice,131,132,1
-block_hint,ArrayPrototypeSplice,295,296,0
-block_hint,ArrayPrototypeSplice,71,72,1
-block_hint,ArrayPrototypeUnshift,185,186,1
-block_hint,ArrayPrototypeUnshift,158,159,1
-block_hint,ArrayPrototypeUnshift,140,141,1
-block_hint,ArrayPrototypeUnshift,96,97,1
-block_hint,ArrayPrototypeUnshift,55,56,1
-block_hint,ArrayPrototypeUnshift,10,11,1
-block_hint,ArrayPrototypeUnshift,129,130,1
-block_hint,ArrayPrototypeUnshift,98,99,0
-block_hint,ArrayPrototypeUnshift,57,58,0
-block_hint,ArrayPrototypeUnshift,100,101,1
-block_hint,ArrayPrototypeUnshift,59,60,0
-block_hint,ArrayPrototypeUnshift,20,21,1
-block_hint,ArrayPrototypeUnshift,22,23,0
-block_hint,ArrayBufferPrototypeGetByteLength,15,16,1
-block_hint,ArrayBufferPrototypeGetByteLength,10,11,1
-block_hint,ArrayBufferPrototypeGetByteLength,12,13,1
-block_hint,ArrayBufferPrototypeGetByteLength,6,7,0
-block_hint,ArrayBufferPrototypeGetByteLength,4,5,0
-block_hint,ArrayBufferIsView,8,9,1
-block_hint,ArrayBufferIsView,5,6,1
-block_hint,ArrayBufferIsView,3,4,1
-block_hint,ToInteger,4,5,1
-block_hint,ToInteger,6,7,0
-block_hint,BooleanConstructor,81,82,1
-block_hint,BooleanConstructor,74,75,0
-block_hint,BooleanConstructor,57,58,0
-block_hint,BooleanConstructor,68,69,1
-block_hint,BooleanConstructor,59,60,0
-block_hint,BooleanConstructor,70,71,0
-block_hint,BooleanConstructor,51,52,0
-block_hint,BooleanConstructor,7,8,1
-block_hint,ToString,20,21,0
-block_hint,ToString,34,35,0
-block_hint,ToString,67,68,0
-block_hint,ToString,83,84,0
-block_hint,ToString,25,26,1
-block_hint,ToString,10,11,1
-block_hint,ToString,40,41,1
-block_hint,ToString,50,51,1
-block_hint,ToString,54,55,0
-block_hint,StringPrototypeToString,9,10,1
-block_hint,StringPrototypeToString,11,12,1
-block_hint,StringPrototypeToString,7,8,0
-block_hint,StringPrototypeToString,5,6,1
-block_hint,StringPrototypeValueOf,9,10,1
-block_hint,StringPrototypeValueOf,11,12,1
-block_hint,StringPrototypeValueOf,5,6,1
-block_hint,StringPrototypeCharAt,51,52,1
-block_hint,StringPrototypeCharAt,37,38,1
-block_hint,StringPrototypeCharAt,28,29,1
-block_hint,StringPrototypeCharAt,33,34,0
-block_hint,StringPrototypeCharAt,12,13,0
-block_hint,StringPrototypeCharAt,14,15,0
-block_hint,StringPrototypeCharAt,19,20,1
-block_hint,StringPrototypeCharAt,43,44,0
-block_hint,StringPrototypeCharAt,6,7,1
-block_hint,StringPrototypeCharCodeAt,46,47,1
-block_hint,StringPrototypeCharCodeAt,41,42,1
-block_hint,StringPrototypeCharCodeAt,28,29,1
-block_hint,StringPrototypeCharCodeAt,39,40,0
-block_hint,StringPrototypeCharCodeAt,13,14,0
-block_hint,StringPrototypeCharCodeAt,15,16,0
-block_hint,StringPrototypeCharCodeAt,17,18,1
-block_hint,StringPrototypeCharCodeAt,32,33,0
-block_hint,StringPrototypeCodePointAt,79,80,1
-block_hint,StringPrototypeCodePointAt,53,54,1
-block_hint,StringPrototypeCodePointAt,43,44,1
-block_hint,StringPrototypeCodePointAt,51,52,0
-block_hint,StringPrototypeCodePointAt,20,21,0
-block_hint,StringPrototypeCodePointAt,22,23,0
-block_hint,StringPrototypeCodePointAt,8,9,0
-block_hint,StringPrototypeCodePointAt,65,66,0
-block_hint,StringPrototypeCodePointAt,45,46,0
-block_hint,StringPrototypeCodePointAt,14,15,1
-block_hint,StringPrototypeCodePointAt,16,17,1
-block_hint,StringPrototypeCodePointAt,10,11,0
-block_hint,StringPrototypeCodePointAt,72,73,0
-block_hint,StringPrototypeCodePointAt,48,49,0
-block_hint,StringPrototypeCodePointAt,18,19,1
-block_hint,StringConstructor,64,65,1
-block_hint,StringConstructor,49,50,0
-block_hint,StringConstructor,36,37,0
-block_hint,StringConstructor,78,79,1
-block_hint,StringConstructor,76,77,1
-block_hint,StringConstructor,73,74,1
-block_hint,StringConstructor,60,61,0
-block_hint,StringConstructor,62,63,1
-block_hint,StringConstructor,45,46,0
-block_hint,StringConstructor,24,25,0
-block_hint,StringConstructor,26,27,1
-block_hint,StringAddConvertLeft,47,48,1
-block_hint,StringAddConvertLeft,49,50,0
-block_hint,StringAddConvertLeft,82,83,1
-block_hint,StringAddConvertLeft,64,65,0
-block_hint,StringAddConvertLeft,43,44,0
-block_hint,StringAddConvertLeft,62,63,1
-block_hint,StringAddConvertRight,47,48,1
-block_hint,StringAddConvertRight,82,83,1
-block_hint,StringAddConvertRight,64,65,0
-block_hint,StringAddConvertRight,43,44,0
-block_hint,StringAddConvertRight,86,87,0
-block_hint,StringAddConvertRight,62,63,1
-block_hint,StringAddConvertRight,79,80,0
-block_hint,StringAddConvertRight,84,85,0
-block_hint,StringAddConvertRight,30,31,0
-block_hint,StringAddConvertRight,9,10,0
-block_hint,StringAddConvertRight,52,53,1
-block_hint,StringAddConvertRight,32,33,1
-block_hint,StringAddConvertRight,11,12,1
-block_hint,StringAddConvertRight,34,35,1
-block_hint,StringAddConvertRight,38,39,0
-block_hint,StringAddConvertRight,15,16,1
-block_hint,StringAddConvertRight,17,18,1
-block_hint,StringCharAt,27,28,0
-block_hint,StringCharAt,20,21,1
-block_hint,StringCharAt,5,6,1
-block_hint,FastNewFunctionContextFunction,11,12,1
-block_hint,FastNewFunctionContextFunction,4,5,1
-block_hint,FastNewFunctionContextFunction,6,7,0
-block_hint,CreateRegExpLiteral,6,7,0
-block_hint,CreateRegExpLiteral,8,9,1
-block_hint,CreateRegExpLiteral,10,11,1
-block_hint,CreateRegExpLiteral,2,3,1
-block_hint,CreateShallowArrayLiteral,20,21,1
-block_hint,CreateShallowArrayLiteral,22,23,1
-block_hint,CreateShallowArrayLiteral,35,36,0
-block_hint,CreateShallowArrayLiteral,11,12,0
-block_hint,CreateShallowArrayLiteral,42,43,1
-block_hint,CreateShallowArrayLiteral,39,40,1
-block_hint,CreateShallowArrayLiteral,24,25,1
-block_hint,CreateShallowArrayLiteral,13,14,0
-block_hint,CreateShallowArrayLiteral,15,16,1
-block_hint,CreateShallowArrayLiteral,47,48,1
-block_hint,CreateShallowArrayLiteral,45,46,0
-block_hint,CreateShallowArrayLiteral,30,31,1
-block_hint,CreateShallowArrayLiteral,5,6,1
-block_hint,CreateShallowArrayLiteral,18,19,1
-block_hint,CreateEmptyArrayLiteral,9,10,1
-block_hint,CreateEmptyArrayLiteral,3,4,1
-block_hint,CreateEmptyArrayLiteral,6,7,1
-block_hint,CreateShallowObjectLiteral,53,54,1
-block_hint,CreateShallowObjectLiteral,61,62,1
-block_hint,CreateShallowObjectLiteral,63,64,0
-block_hint,CreateShallowObjectLiteral,110,111,0
-block_hint,CreateShallowObjectLiteral,99,100,1
-block_hint,CreateShallowObjectLiteral,67,68,1
-block_hint,CreateShallowObjectLiteral,106,107,1
-block_hint,CreateShallowObjectLiteral,81,82,1
-block_hint,CreateShallowObjectLiteral,34,35,0
-block_hint,CreateShallowObjectLiteral,71,72,0
-block_hint,CreateShallowObjectLiteral,38,39,0
-block_hint,CreateShallowObjectLiteral,42,43,0
-block_hint,CreateShallowObjectLiteral,85,86,1
-block_hint,CreateShallowObjectLiteral,93,94,1
-block_hint,ObjectConstructor,27,28,1
-block_hint,ObjectConstructor,19,20,1
-block_hint,ObjectConstructor,29,30,0
-block_hint,ObjectConstructor,23,24,0
-block_hint,ObjectConstructor,17,18,0
-block_hint,ObjectConstructor,11,12,0
-block_hint,ObjectConstructor,4,5,1
-block_hint,ObjectConstructor,21,22,1
-block_hint,ObjectConstructor,6,7,0
-block_hint,CreateEmptyLiteralObject,4,5,1
-block_hint,CreateEmptyLiteralObject,11,12,1
-block_hint,CreateEmptyLiteralObject,6,7,0
-block_hint,NumberConstructor,18,19,1
-block_hint,NumberConstructor,6,7,1
-block_hint,NumberConstructor,28,29,0
-block_hint,NumberConstructor,12,13,0
-block_hint,NumberConstructor,34,35,0
-block_hint,NumberConstructor,32,33,1
-block_hint,NumberConstructor,30,31,1
-block_hint,NumberConstructor,2,3,1
-block_hint,NonNumberToNumber,14,15,0
-block_hint,NonNumberToNumber,3,4,1
-block_hint,NonNumberToNumeric,17,18,0
-block_hint,NonNumberToNumeric,14,15,0
-block_hint,NonNumberToNumeric,5,6,1
-block_hint,ToNumeric,5,6,1
-block_hint,ToNumeric,3,4,1
-block_hint,NumberToString,69,70,0
-block_hint,NumberToString,20,21,1
-block_hint,NumberToString,45,46,0
-block_hint,NumberToString,62,63,0
-block_hint,NumberToString,27,28,0
-block_hint,NumberToString,7,8,0
-block_hint,NumberToString,50,51,1
-block_hint,NumberToString,29,30,1
-block_hint,NumberToString,9,10,1
-block_hint,NumberToString,31,32,1
-block_hint,NumberToString,35,36,0
-block_hint,NumberToString,13,14,1
-block_hint,NumberToString,15,16,1
-block_hint,NumberToString,41,42,1
-block_hint,ToBoolean,18,19,1
-block_hint,ToBoolean,14,15,0
-block_hint,ToBoolean,20,21,0
-block_hint,ToBoolean,6,7,0
-block_hint,ToBooleanForBaselineJump,14,15,0
-block_hint,ToBooleanForBaselineJump,20,21,0
-block_hint,ToBooleanForBaselineJump,6,7,0
-block_hint,ToLength,19,20,0
-block_hint,ToLength,5,6,0
-block_hint,ToName,40,41,1
-block_hint,ToName,48,49,0
-block_hint,ToName,20,21,0
-block_hint,ToName,22,23,0
-block_hint,ToName,67,68,0
-block_hint,ToName,27,28,1
-block_hint,ToObject,45,46,1
-block_hint,ToObject,7,8,0
-block_hint,ToObject,38,39,0
-block_hint,ToObject,9,10,1
-block_hint,ToObject,53,54,0
-block_hint,ToObject,55,56,1
-block_hint,ToObject,48,49,0
-block_hint,ToObject,26,27,0
-block_hint,ToObject,28,29,1
-block_hint,NonPrimitiveToPrimitive_Default,5,6,1
-block_hint,NonPrimitiveToPrimitive_Number,5,6,1
-block_hint,NonPrimitiveToPrimitive_String,5,6,1
-block_hint,OrdinaryToPrimitive_Number,56,57,1
-block_hint,OrdinaryToPrimitive_Number,53,54,1
-block_hint,OrdinaryToPrimitive_Number,40,41,1
-block_hint,OrdinaryToPrimitive_Number,42,43,0
-block_hint,OrdinaryToPrimitive_Number,28,29,0
-block_hint,OrdinaryToPrimitive_Number,12,13,0
-block_hint,OrdinaryToPrimitive_Number,30,31,0
-block_hint,OrdinaryToPrimitive_Number,32,33,0
-block_hint,OrdinaryToPrimitive_Number,14,15,0
-block_hint,OrdinaryToPrimitive_Number,16,17,0
-block_hint,OrdinaryToPrimitive_Number,44,45,1
-block_hint,OrdinaryToPrimitive_Number,46,47,1
-block_hint,OrdinaryToPrimitive_Number,48,49,1
-block_hint,OrdinaryToPrimitive_Number,50,51,0
-block_hint,OrdinaryToPrimitive_Number,34,35,0
-block_hint,OrdinaryToPrimitive_Number,20,21,0
-block_hint,OrdinaryToPrimitive_String,56,57,1
-block_hint,OrdinaryToPrimitive_String,53,54,1
-block_hint,OrdinaryToPrimitive_String,40,41,1
-block_hint,OrdinaryToPrimitive_String,42,43,0
-block_hint,OrdinaryToPrimitive_String,28,29,0
-block_hint,OrdinaryToPrimitive_String,10,11,0
-block_hint,DataViewPrototypeGetByteLength,37,38,1
-block_hint,DataViewPrototypeGetByteLength,19,20,1
-block_hint,DataViewPrototypeGetByteLength,21,22,1
-block_hint,DataViewPrototypeGetByteLength,39,40,0
-block_hint,DataViewPrototypeGetByteLength,33,34,0
-block_hint,DataViewPrototypeGetByteLength,12,13,0
-block_hint,DataViewPrototypeGetByteLength,10,11,0
-block_hint,DataViewPrototypeGetFloat64,98,99,1
-block_hint,DataViewPrototypeGetFloat64,84,85,0
-block_hint,DataViewPrototypeGetFloat64,54,55,0
-block_hint,DataViewPrototypeGetFloat64,16,17,1
-block_hint,DataViewPrototypeGetFloat64,18,19,1
-block_hint,DataViewPrototypeGetFloat64,92,93,0
-block_hint,DataViewPrototypeGetFloat64,96,97,0
-block_hint,DataViewPrototypeGetFloat64,75,76,0
-block_hint,DataViewPrototypeGetFloat64,47,48,0
-block_hint,DataViewPrototypeGetFloat64,67,68,0
-block_hint,DataViewPrototypeGetFloat64,86,87,1
-block_hint,DataViewPrototypeGetFloat64,69,70,0
-block_hint,DataViewPrototypeGetFloat64,71,72,0
-block_hint,DataViewPrototypeGetFloat64,88,89,0
-block_hint,DataViewPrototypeGetFloat64,61,62,0
-block_hint,DataViewPrototypeGetFloat64,20,21,0
-block_hint,DataViewPrototypeGetFloat64,94,95,0
-block_hint,DataViewPrototypeGetFloat64,79,80,0
-block_hint,DataViewPrototypeGetFloat64,45,46,0
-block_hint,DataViewPrototypeGetFloat64,29,30,0
-block_hint,DataViewPrototypeGetFloat64,82,83,1
-block_hint,DataViewPrototypeGetFloat64,52,53,0
-block_hint,DataViewPrototypeGetFloat64,13,14,1
-block_hint,DataViewPrototypeSetFloat64,113,114,1
-block_hint,DataViewPrototypeSetFloat64,101,102,0
-block_hint,DataViewPrototypeSetFloat64,79,80,0
-block_hint,DataViewPrototypeSetFloat64,47,48,0
-block_hint,DataViewPrototypeSetFloat64,15,16,1
-block_hint,DataViewPrototypeSetFloat64,17,18,1
-block_hint,DataViewPrototypeSetFloat64,103,104,0
-block_hint,DataViewPrototypeSetFloat64,92,93,0
-block_hint,DataViewPrototypeSetFloat64,68,69,0
-block_hint,DataViewPrototypeSetFloat64,40,41,0
-block_hint,DataViewPrototypeSetFloat64,81,82,1
-block_hint,DataViewPrototypeSetFloat64,83,84,1
-block_hint,DataViewPrototypeSetFloat64,57,58,1
-block_hint,DataViewPrototypeSetFloat64,10,11,0
-block_hint,DataViewPrototypeSetFloat64,90,91,0
-block_hint,DataViewPrototypeSetFloat64,76,77,0
-block_hint,DataViewPrototypeSetFloat64,38,39,0
-block_hint,DataViewPrototypeSetFloat64,27,28,0
-block_hint,DataViewPrototypeSetFloat64,45,46,1
-block_hint,DataViewPrototypeSetFloat64,13,14,0
-block_hint,FunctionPrototypeHasInstance,35,36,1
-block_hint,FunctionPrototypeHasInstance,15,16,1
-block_hint,FunctionPrototypeHasInstance,17,18,1
-block_hint,FunctionPrototypeHasInstance,19,20,1
-block_hint,FunctionPrototypeHasInstance,33,34,1
-block_hint,FunctionPrototypeHasInstance,23,24,0
-block_hint,FunctionPrototypeHasInstance,13,14,0
-block_hint,FunctionPrototypeHasInstance,31,32,0
-block_hint,FunctionPrototypeHasInstance,25,26,0
-block_hint,FunctionPrototypeHasInstance,27,28,0
-block_hint,FastFunctionPrototypeBind,91,92,1
-block_hint,FastFunctionPrototypeBind,88,89,1
-block_hint,FastFunctionPrototypeBind,75,76,0
-block_hint,FastFunctionPrototypeBind,29,30,0
-block_hint,FastFunctionPrototypeBind,31,32,0
-block_hint,FastFunctionPrototypeBind,7,8,1
-block_hint,FastFunctionPrototypeBind,53,54,1
-block_hint,FastFunctionPrototypeBind,65,66,0
-block_hint,FastFunctionPrototypeBind,69,70,1
-block_hint,FastFunctionPrototypeBind,41,42,1
-block_hint,FastFunctionPrototypeBind,9,10,1
-block_hint,FastFunctionPrototypeBind,56,57,1
-block_hint,FastFunctionPrototypeBind,67,68,0
-block_hint,FastFunctionPrototypeBind,79,80,1
-block_hint,FastFunctionPrototypeBind,71,72,1
-block_hint,FastFunctionPrototypeBind,43,44,1
-block_hint,FastFunctionPrototypeBind,11,12,1
-block_hint,FastFunctionPrototypeBind,35,36,1
-block_hint,FastFunctionPrototypeBind,81,82,1
-block_hint,FastFunctionPrototypeBind,73,74,0
-block_hint,FastFunctionPrototypeBind,27,28,1
-block_hint,ForInNext,2,3,1
-block_hint,ForInNext,7,8,1
-block_hint,CallIteratorWithFeedback,56,57,1
-block_hint,CallIteratorWithFeedback,58,59,1
-block_hint,CallIteratorWithFeedback,26,27,1
-block_hint,CallIteratorWithFeedback,28,29,1
-block_hint,CallIteratorWithFeedback,30,31,1
-block_hint,CallIteratorWithFeedback,10,11,1
-block_hint,MathAbs,14,15,1
-block_hint,MathAbs,16,17,1
-block_hint,MathAbs,23,24,0
-block_hint,MathAbs,9,10,0
-block_hint,MathAbs,11,12,1
-block_hint,MathCeil,12,13,1
-block_hint,MathFloor,12,13,1
-block_hint,MathFloor,14,15,1
-block_hint,MathFloor,35,36,1
-block_hint,MathFloor,25,26,0
-block_hint,MathFloor,21,22,1
-block_hint,MathFloor,19,20,0
-block_hint,MathFloor,7,8,0
-block_hint,MathRound,12,13,1
-block_hint,MathRound,14,15,1
-block_hint,MathRound,32,33,0
-block_hint,MathRound,36,37,0
-block_hint,MathRound,28,29,0
-block_hint,MathRound,21,22,1
-block_hint,MathRound,7,8,0
-block_hint,MathRound,9,10,1
-block_hint,MathPow,12,13,1
-block_hint,MathPow,14,15,1
-block_hint,MathPow,18,19,1
-block_hint,MathPow,23,24,0
-block_hint,MathPow,7,8,0
-block_hint,MathPow,9,10,1
-block_hint,MathMax,13,14,1
-block_hint,MathMax,19,20,0
-block_hint,MathMax,17,18,1
-block_hint,MathMax,24,25,0
-block_hint,MathMax,8,9,0
-block_hint,MathMax,10,11,1
-block_hint,MathMin,13,14,1
-block_hint,MathMin,19,20,0
-block_hint,MathMin,17,18,1
-block_hint,MathMin,24,25,0
-block_hint,MathMin,8,9,0
-block_hint,MathMin,10,11,1
-block_hint,MathAtan2,34,35,1
-block_hint,MathAtan2,32,33,1
-block_hint,MathAtan2,23,24,1
-block_hint,MathAtan2,5,6,1
-block_hint,MathCos,25,26,1
-block_hint,MathCos,23,24,1
-block_hint,MathCos,9,10,1
-block_hint,MathCos,3,4,0
-block_hint,MathCos,5,6,1
-block_hint,MathExp,25,26,1
-block_hint,MathExp,20,21,1
-block_hint,MathExp,23,24,1
-block_hint,MathExp,16,17,1
-block_hint,MathExp,13,14,0
-block_hint,MathExp,5,6,1
-block_hint,MathFround,25,26,1
-block_hint,MathFround,23,24,1
-block_hint,MathFround,5,6,1
-block_hint,MathLog,25,26,1
-block_hint,MathLog,23,24,1
-block_hint,MathLog,13,14,0
-block_hint,MathLog,5,6,1
-block_hint,MathSin,25,26,1
-block_hint,MathSin,23,24,1
-block_hint,MathSin,9,10,0
-block_hint,MathSin,11,12,0
-block_hint,MathSin,3,4,0
-block_hint,MathSin,5,6,1
-block_hint,MathSign,16,17,1
-block_hint,MathSign,11,12,0
-block_hint,MathSign,7,8,0
-block_hint,MathSign,2,3,0
-block_hint,MathSign,4,5,1
-block_hint,MathSqrt,25,26,1
-block_hint,MathSqrt,23,24,1
-block_hint,MathSqrt,11,12,0
-block_hint,MathSqrt,3,4,0
-block_hint,MathSqrt,5,6,1
-block_hint,MathTan,25,26,1
-block_hint,MathTan,20,21,0
-block_hint,MathTan,16,17,0
-block_hint,MathTanh,25,26,1
-block_hint,MathTanh,20,21,1
-block_hint,MathTanh,23,24,1
-block_hint,MathTanh,16,17,1
-block_hint,MathTanh,13,14,0
-block_hint,MathTanh,5,6,1
-block_hint,MathRandom,15,16,1
-block_hint,MathRandom,3,4,1
-block_hint,MathRandom,17,18,1
-block_hint,MathRandom,5,6,1
-block_hint,MathRandom,7,8,1
-block_hint,MathRandom,9,10,1
-block_hint,MathRandom,13,14,1
-block_hint,NumberPrototypeToString,71,72,1
-block_hint,NumberPrototypeToString,113,114,0
-block_hint,NumberPrototypeToString,51,52,0
-block_hint,NumberPrototypeToString,59,60,1
-block_hint,NumberPrototypeToString,183,184,0
-block_hint,NumberPrototypeToString,154,155,0
-block_hint,NumberPrototypeToString,121,122,0
-block_hint,NumberPrototypeToString,180,181,0
-block_hint,NumberPrototypeToString,167,168,0
-block_hint,NumberPrototypeToString,85,86,0
-block_hint,NumberPrototypeToString,176,177,0
-block_hint,NumberPrototypeToString,97,98,0
-block_hint,NumberPrototypeToString,171,172,0
-block_hint,NumberPrototypeToString,129,130,0
-block_hint,NumberPrototypeToString,109,110,1
-block_hint,NumberPrototypeToString,42,43,1
-block_hint,NumberPrototypeToString,49,50,1
-block_hint,NumberPrototypeToString,73,74,0
-block_hint,NumberPrototypeToString,27,28,0
-block_hint,NumberPrototypeToString,116,117,1
-block_hint,NumberPrototypeToString,75,76,1
-block_hint,NumberPrototypeToString,29,30,1
-block_hint,NumberPrototypeToString,95,96,0
-block_hint,NumberPrototypeToString,111,112,0
-block_hint,NumberPrototypeToString,35,36,1
-block_hint,NumberPrototypeToString,132,133,1
-block_hint,NumberPrototypeToString,37,38,0
-block_hint,NumberPrototypeToString,134,135,1
-block_hint,NumberPrototypeToString,39,40,0
-block_hint,NumberPrototypeToString,162,163,1
-block_hint,NumberPrototypeToString,164,165,0
-block_hint,NumberPrototypeToString,139,140,0
-block_hint,NumberPrototypeToString,105,106,0
-block_hint,NumberPrototypeToString,148,149,0
-block_hint,NumberPrototypeToString,152,153,0
-block_hint,NumberPrototypeToString,79,80,0
-block_hint,NumberPrototypeToString,17,18,0
-block_hint,NumberPrototypeToString,119,120,1
-block_hint,NumberPrototypeToString,81,82,1
-block_hint,NumberPrototypeToString,19,20,1
-block_hint,NumberPrototypeToString,83,84,1
-block_hint,NumberPrototypeToString,89,90,0
-block_hint,NumberPrototypeToString,23,24,1
-block_hint,NumberPrototypeToString,25,26,1
-block_hint,NumberIsInteger,13,14,1
-block_hint,NumberParseFloat,14,15,1
-block_hint,NumberParseFloat,2,3,1
-block_hint,NumberParseFloat,12,13,0
-block_hint,NumberParseFloat,17,18,0
-block_hint,NumberParseFloat,4,5,1
-block_hint,ParseInt,27,28,1
-block_hint,ParseInt,13,14,0
-block_hint,ParseInt,6,7,1
-block_hint,ParseInt,31,32,0
-block_hint,ParseInt,25,26,1
-block_hint,ParseInt,23,24,1
-block_hint,ParseInt,10,11,0
-block_hint,NumberParseInt,3,4,1
-block_hint,Add,66,67,1
-block_hint,Add,24,25,0
-block_hint,Add,68,69,0
-block_hint,Add,35,36,0
-block_hint,Add,40,41,0
-block_hint,Subtract,24,25,0
-block_hint,Subtract,9,10,0
-block_hint,Subtract,22,23,0
-block_hint,Subtract,7,8,0
-block_hint,Divide,50,51,0
-block_hint,Divide,23,24,0
-block_hint,Divide,9,10,0
-block_hint,Divide,44,45,1
-block_hint,Divide,48,49,1
-block_hint,Divide,33,34,0
-block_hint,Divide,7,8,1
-block_hint,CreateObjectWithoutProperties,52,53,1
-block_hint,CreateObjectWithoutProperties,42,43,1
-block_hint,CreateObjectWithoutProperties,34,35,0
-block_hint,CreateObjectWithoutProperties,17,18,1
-block_hint,CreateObjectWithoutProperties,56,57,0
-block_hint,CreateObjectWithoutProperties,44,45,0
-block_hint,CreateObjectWithoutProperties,48,49,1
-block_hint,CreateObjectWithoutProperties,36,37,0
-block_hint,CreateObjectWithoutProperties,38,39,0
-block_hint,CreateObjectWithoutProperties,5,6,1
-block_hint,CreateObjectWithoutProperties,40,41,1
-block_hint,CreateObjectWithoutProperties,7,8,1
-block_hint,CreateObjectWithoutProperties,9,10,1
-block_hint,CreateObjectWithoutProperties,11,12,1
-block_hint,CreateObjectWithoutProperties,13,14,1
-block_hint,CreateObjectWithoutProperties,15,16,1
-block_hint,CreateObjectWithoutProperties,20,21,0
-block_hint,CreateObjectWithoutProperties,50,51,1
-block_hint,ObjectGetPrototypeOf,11,12,1
-block_hint,ObjectGetPrototypeOf,8,9,1
-block_hint,ObjectGetPrototypeOf,5,6,1
-block_hint,ObjectGetPrototypeOf,2,3,0
-block_hint,ObjectSetPrototypeOf,18,19,1
-block_hint,ObjectSetPrototypeOf,4,5,0
-block_hint,ObjectSetPrototypeOf,13,14,1
-block_hint,ObjectSetPrototypeOf,20,21,0
-block_hint,ObjectSetPrototypeOf,15,16,0
-block_hint,ObjectSetPrototypeOf,6,7,1
-block_hint,ObjectSetPrototypeOf,8,9,0
-block_hint,ObjectSetPrototypeOf,10,11,0
-block_hint,ObjectPrototypeToString,3,4,1
-block_hint,ObjectPrototypeValueOf,8,9,1
-block_hint,ObjectPrototypeValueOf,5,6,1
-block_hint,ObjectPrototypeValueOf,2,3,1
-block_hint,FulfillPromise,32,33,1
-block_hint,FulfillPromise,15,16,0
-block_hint,FulfillPromise,34,35,1
-block_hint,FulfillPromise,17,18,0
-block_hint,FulfillPromise,19,20,1
-block_hint,FulfillPromise,21,22,0
-block_hint,PerformPromiseThen,101,102,1
-block_hint,PerformPromiseThen,57,58,0
-block_hint,PerformPromiseThen,103,104,1
-block_hint,PerformPromiseThen,59,60,0
-block_hint,PerformPromiseThen,61,62,1
-block_hint,PerformPromiseThen,63,64,0
-block_hint,PerformPromiseThen,18,19,1
-block_hint,PerformPromiseThen,72,73,1
-block_hint,PerformPromiseThen,25,26,1
-block_hint,PerformPromiseThen,93,94,1
-block_hint,PerformPromiseThen,45,46,0
-block_hint,PerformPromiseThen,95,96,1
-block_hint,PerformPromiseThen,47,48,0
-block_hint,PerformPromiseThen,49,50,1
-block_hint,PerformPromiseThen,51,52,0
-block_hint,PerformPromiseThen,20,21,1
-block_hint,PerformPromiseThen,115,116,1
-block_hint,PromiseFulfillReactionJob,22,23,0
-block_hint,PromiseFulfillReactionJob,2,3,1
-block_hint,ResolvePromise,29,30,0
-block_hint,ResolvePromise,31,32,0
-block_hint,ResolvePromise,15,16,1
-block_hint,ResolvePromise,47,48,0
-block_hint,ResolvePromise,33,34,0
-block_hint,ResolvePromise,6,7,1
-block_hint,ResolvePromise,17,18,0
-block_hint,ResolvePromise,19,20,1
-block_hint,ResolvePromise,53,54,1
-block_hint,ResolvePromise,49,50,0
-block_hint,ResolvePromise,23,24,0
-block_hint,ProxyConstructor,30,31,1
-block_hint,ProxyConstructor,10,11,0
-block_hint,ProxyConstructor,22,23,1
-block_hint,ProxyConstructor,24,25,0
-block_hint,ProxyConstructor,26,27,1
-block_hint,ProxyConstructor,28,29,0
-block_hint,ProxyConstructor,7,8,1
-block_hint,ProxyConstructor,17,18,1
-block_hint,ProxyConstructor,5,6,1
-block_hint,ProxyConstructor,12,13,1
-block_hint,ProxyGetProperty,153,154,1
-block_hint,ProxyGetProperty,34,35,0
-block_hint,ProxyGetProperty,10,11,0
-block_hint,ProxyGetProperty,89,90,0
-block_hint,ProxyGetProperty,91,92,0
-block_hint,ProxyGetProperty,85,86,1
-block_hint,ProxyGetProperty,87,88,1
-block_hint,ProxyGetProperty,176,177,1
-block_hint,ProxyGetProperty,180,181,0
-block_hint,ProxyGetProperty,118,119,0
-block_hint,ProxyGetProperty,40,41,1
-block_hint,ProxyGetProperty,114,115,1
-block_hint,ProxyGetProperty,24,25,0
-block_hint,ProxyGetProperty,26,27,1
-block_hint,ProxyGetProperty,208,209,1
-block_hint,ProxyGetProperty,198,199,0
-block_hint,ProxyGetProperty,149,150,1
-block_hint,ProxyGetProperty,28,29,0
-block_hint,ProxyGetProperty,167,168,0
-block_hint,ProxyGetProperty,187,188,1
-block_hint,ProxyGetProperty,131,132,1
-block_hint,ProxyGetProperty,169,170,1
-block_hint,ProxyGetProperty,171,172,0
-block_hint,ProxyGetProperty,60,61,0
-block_hint,ReflectGet,20,21,1
-block_hint,ReflectGet,15,16,0
-block_hint,ReflectGet,5,6,1
-block_hint,ReflectGet,7,8,0
-block_hint,ReflectGet,18,19,0
-block_hint,ReflectGet,9,10,0
-block_hint,ReflectHas,8,9,1
-block_hint,ReflectHas,5,6,1
-block_hint,ReflectHas,3,4,0
-block_hint,RegExpPrototypeExec,202,203,1
-block_hint,RegExpPrototypeExec,130,131,1
-block_hint,RegExpPrototypeExec,132,133,1
-block_hint,RegExpPrototypeExec,204,205,1
-block_hint,RegExpPrototypeExec,166,167,1
-block_hint,RegExpPrototypeExec,16,17,1
-block_hint,RegExpPrototypeExec,148,149,1
-block_hint,RegExpPrototypeExec,150,151,0
-block_hint,RegExpPrototypeExec,152,153,0
-block_hint,RegExpPrototypeExec,208,209,0
-block_hint,RegExpPrototypeExec,154,155,0
-block_hint,RegExpPrototypeExec,18,19,1
-block_hint,RegExpPrototypeExec,185,186,0
-block_hint,RegExpPrototypeExec,134,135,0
-block_hint,RegExpPrototypeExec,159,160,0
-block_hint,RegExpPrototypeExec,236,237,0
-block_hint,RegExpPrototypeExec,227,228,1
-block_hint,RegExpPrototypeExec,212,213,1
-block_hint,RegExpPrototypeExec,171,172,1
-block_hint,RegExpPrototypeExec,161,162,0
-block_hint,RegExpPrototypeExec,73,74,0
-block_hint,RegExpPrototypeExec,24,25,1
-block_hint,RegExpPrototypeExec,138,139,1
-block_hint,RegExpPrototypeExec,26,27,1
-block_hint,RegExpPrototypeExec,190,191,0
-block_hint,RegExpPrototypeExec,140,141,1
-block_hint,RegExpPrototypeExec,242,243,1
-block_hint,RegExpPrototypeExec,214,215,0
-block_hint,RegExpPrototypeExec,179,180,1
-block_hint,RegExpPrototypeExec,77,78,0
-block_hint,RegExpPrototypeExec,34,35,1
-block_hint,RegExpPrototypeExec,144,145,1
-block_hint,RegExpPrototypeExec,116,117,1
-block_hint,RegExpPrototypeExec,156,157,1
-block_hint,RegExpMatchFast,357,358,0
-block_hint,RegExpMatchFast,289,290,1
-block_hint,RegExpMatchFast,32,33,1
-block_hint,RegExpMatchFast,326,327,0
-block_hint,RegExpMatchFast,234,235,0
-block_hint,RegExpMatchFast,283,284,0
-block_hint,RegExpMatchFast,448,449,0
-block_hint,RegExpMatchFast,392,393,1
-block_hint,RegExpMatchFast,291,292,1
-block_hint,RegExpMatchFast,285,286,0
-block_hint,RegExpMatchFast,129,130,0
-block_hint,RegExpMatchFast,236,237,1
-block_hint,RegExpMatchFast,238,239,1
-block_hint,RegExpMatchFast,40,41,1
-block_hint,RegExpMatchFast,331,332,0
-block_hint,RegExpMatchFast,240,241,1
-block_hint,RegExpMatchFast,456,457,1
-block_hint,RegExpMatchFast,394,395,0
-block_hint,RegExpMatchFast,320,321,1
-block_hint,RegExpMatchFast,133,134,0
-block_hint,RegExpMatchFast,48,49,1
-block_hint,RegExpMatchFast,244,245,1
-block_hint,RegExpMatchFast,180,181,1
-block_hint,RegExpMatchFast,259,260,1
-block_hint,RegExpMatchFast,297,298,0
-block_hint,RegExpMatchFast,82,83,1
-block_hint,RegExpMatchFast,84,85,1
-block_hint,RegExpMatchFast,301,302,0
-block_hint,RegExpMatchFast,344,345,0
-block_hint,RegExpMatchFast,379,380,0
-block_hint,RegExpMatchFast,299,300,0
-block_hint,RegExpMatchFast,86,87,1
-block_hint,RegExpMatchFast,340,341,0
-block_hint,RegExpMatchFast,248,249,0
-block_hint,RegExpMatchFast,275,276,0
-block_hint,RegExpMatchFast,190,191,1
-block_hint,RegExpMatchFast,450,451,0
-block_hint,RegExpMatchFast,436,437,1
-block_hint,RegExpMatchFast,390,391,1
-block_hint,RegExpMatchFast,303,304,1
-block_hint,RegExpMatchFast,277,278,0
-block_hint,RegExpMatchFast,117,118,0
-block_hint,RegExpMatchFast,342,343,0
-block_hint,RegExpMatchFast,250,251,0
-block_hint,RegExpMatchFast,92,93,1
-block_hint,RegExpMatchFast,362,363,1
-block_hint,RegExpMatchFast,252,253,0
-block_hint,RegExpMatchFast,102,103,1
-block_hint,RegExpMatchFast,306,307,0
-block_hint,RegExpMatchFast,177,178,0
-block_hint,RegExpMatchFast,104,105,0
-block_hint,RegExpMatchFast,106,107,0
-block_hint,RegExpMatchFast,198,199,1
-block_hint,RegExpMatchFast,317,318,0
-block_hint,RegExpMatchFast,108,109,1
-block_hint,RegExpMatchFast,187,188,1
-block_hint,RegExpMatchFast,346,347,0
-block_hint,RegExpMatchFast,94,95,1
-block_hint,RegExpMatchFast,96,97,1
-block_hint,RegExpMatchFast,175,176,0
-block_hint,RegExpMatchFast,98,99,0
-block_hint,RegExpMatchFast,100,101,0
-block_hint,RegExpMatchFast,218,219,1
-block_hint,RegExpMatchFast,309,310,0
-block_hint,RegExpMatchFast,220,221,0
-block_hint,RegExpReplace,261,262,1
-block_hint,RegExpReplace,299,300,1
-block_hint,RegExpReplace,251,252,1
-block_hint,RegExpReplace,149,150,0
-block_hint,RegExpReplace,22,23,1
-block_hint,RegExpReplace,209,210,1
-block_hint,RegExpReplace,151,152,0
-block_hint,RegExpReplace,24,25,1
-block_hint,RegExpReplace,211,212,1
-block_hint,RegExpReplace,213,214,1
-block_hint,RegExpReplace,172,173,1
-block_hint,RegExpReplace,179,180,0
-block_hint,RegExpReplace,257,258,0
-block_hint,RegExpReplace,50,51,1
-block_hint,RegExpReplace,229,230,0
-block_hint,RegExpReplace,163,164,0
-block_hint,RegExpReplace,183,184,0
-block_hint,RegExpReplace,109,110,1
-block_hint,RegExpReplace,375,376,0
-block_hint,RegExpReplace,359,360,1
-block_hint,RegExpReplace,293,294,1
-block_hint,RegExpReplace,203,204,1
-block_hint,RegExpReplace,185,186,0
-block_hint,RegExpReplace,81,82,0
-block_hint,RegExpReplace,56,57,1
-block_hint,RegExpReplace,58,59,1
-block_hint,RegExpReplace,60,61,1
-block_hint,RegExpReplace,167,168,0
-block_hint,RegExpReplace,62,63,1
-block_hint,RegExpReplace,233,234,1
-block_hint,RegExpReplace,169,170,0
-block_hint,RegExpReplace,64,65,1
-block_hint,RegExpReplace,380,381,1
-block_hint,RegExpReplace,371,372,1
-block_hint,RegExpReplace,326,327,0
-block_hint,RegExpReplace,285,286,0
-block_hint,RegExpReplace,218,219,0
-block_hint,RegExpReplace,100,101,1
-block_hint,RegExpReplace,26,27,1
-block_hint,RegExpReplace,28,29,1
-block_hint,RegExpReplace,102,103,1
-block_hint,RegExpReplace,30,31,0
-block_hint,RegExpReplace,32,33,1
-block_hint,RegExpReplace,34,35,1
-block_hint,RegExpReplace,72,73,1
-block_hint,RegExpReplace,44,45,1
-block_hint,RegExpReplace,161,162,1
-block_hint,RegExpReplace,46,47,1
-block_hint,RegExpReplace,48,49,1
-block_hint,RegExpReplace,236,237,1
-block_hint,RegExpReplace,176,177,1
-block_hint,RegExpReplace,153,154,1
-block_hint,RegExpReplace,36,37,1
-block_hint,RegExpReplace,155,156,1
-block_hint,RegExpReplace,40,41,0
-block_hint,RegExpReplace,254,255,1
-block_hint,RegExpReplace,196,197,1
-block_hint,RegExpReplace,42,43,1
-block_hint,RegExpSearchFast,50,51,1
-block_hint,RegExpSearchFast,6,7,1
-block_hint,RegExpSearchFast,56,57,0
-block_hint,RegExpSearchFast,36,37,0
-block_hint,RegExpSearchFast,46,47,0
-block_hint,RegExpSearchFast,78,79,0
-block_hint,RegExpSearchFast,67,68,1
-block_hint,RegExpSearchFast,60,61,0
-block_hint,RegExpSearchFast,52,53,1
-block_hint,RegExpSearchFast,58,59,1
-block_hint,RegExpSearchFast,44,45,1
-block_hint,RegExpPrototypeSourceGetter,12,13,1
-block_hint,RegExpPrototypeSourceGetter,9,10,1
-block_hint,RegExpPrototypeSourceGetter,4,5,1
-block_hint,RegExpSplit,179,180,1
-block_hint,RegExpSplit,88,89,0
-block_hint,RegExpSplit,22,23,1
-block_hint,RegExpSplit,149,150,1
-block_hint,RegExpSplit,40,41,1
-block_hint,RegExpSplit,24,25,1
-block_hint,RegExpSplit,185,186,1
-block_hint,RegExpSplit,101,102,1
-block_hint,RegExpSplit,136,137,0
-block_hint,RegExpSplit,26,27,1
-block_hint,RegExpSplit,205,206,0
-block_hint,RegExpSplit,138,139,0
-block_hint,RegExpSplit,162,163,0
-block_hint,RegExpSplit,44,45,0
-block_hint,RegExpSplit,108,109,1
-block_hint,RegExpSplit,322,323,0
-block_hint,RegExpSplit,314,315,1
-block_hint,RegExpSplit,278,279,1
-block_hint,RegExpSplit,181,182,1
-block_hint,RegExpSplit,225,226,0
-block_hint,RegExpSplit,164,165,0
-block_hint,RegExpSplit,46,47,0
-block_hint,RegExpSplit,307,308,0
-block_hint,RegExpSplit,262,263,0
-block_hint,RegExpSplit,207,208,0
-block_hint,RegExpSplit,92,93,0
-block_hint,RegExpSplit,227,228,1
-block_hint,RegExpSplit,194,195,1
-block_hint,RegExpSplit,50,51,0
-block_hint,RegExpSplit,167,168,1
-block_hint,RegExpSplit,141,142,0
-block_hint,RegExpSplit,32,33,1
-block_hint,RegExpSplit,58,59,0
-block_hint,RegExpSplit,281,282,0
-block_hint,RegExpSplit,246,247,0
-block_hint,RegExpSplit,151,152,0
-block_hint,RegExpSplit,241,242,0
-block_hint,RegExpSplit,212,213,0
-block_hint,RegExpSplit,96,97,0
-block_hint,RegExpSplit,232,233,1
-block_hint,RegExpSplit,201,202,1
-block_hint,RegExpSplit,74,75,0
-block_hint,RegExpSplit,175,176,1
-block_hint,RegExpSplit,38,39,1
-block_hint,RegExpSplit,219,220,0
-block_hint,RegExpSplit,244,245,0
-block_hint,RegExpSplit,217,218,0
-block_hint,RegExpSplit,99,100,0
-block_hint,RegExpSplit,276,277,1
-block_hint,RegExpSplit,260,261,1
-block_hint,RegExpSplit,177,178,1
-block_hint,RegExpSplit,103,104,1
-block_hint,RegExpPrototypeTest,110,111,1
-block_hint,RegExpPrototypeTest,50,51,1
-block_hint,RegExpPrototypeTest,52,53,0
-block_hint,RegExpPrototypeTest,134,135,1
-block_hint,RegExpPrototypeTest,54,55,0
-block_hint,RegExpPrototypeTest,8,9,1
-block_hint,RegExpPrototypeTest,93,94,1
-block_hint,RegExpPrototypeTest,56,57,0
-block_hint,RegExpPrototypeTest,10,11,1
-block_hint,RegExpPrototypeTest,141,142,1
-block_hint,RegExpPrototypeTest,126,127,1
-block_hint,RegExpPrototypeTest,85,86,1
-block_hint,RegExpPrototypeTest,14,15,1
-block_hint,RegExpPrototypeTest,99,100,0
-block_hint,RegExpPrototypeTest,59,60,0
-block_hint,RegExpPrototypeTest,73,74,0
-block_hint,RegExpPrototypeTest,42,43,0
-block_hint,RegExpPrototypeTest,154,155,0
-block_hint,RegExpPrototypeTest,152,153,1
-block_hint,RegExpPrototypeTest,132,133,1
-block_hint,RegExpPrototypeTest,87,88,1
-block_hint,RegExpPrototypeTest,75,76,0
-block_hint,RegExpPrototypeTest,29,30,0
-block_hint,RegExpPrototypeTest,37,38,1
-block_hint,RegExpPrototypeTest,65,66,1
-block_hint,RegExpPrototypeTestFast,48,49,1
-block_hint,RegExpPrototypeTestFast,7,8,1
-block_hint,RegExpPrototypeTestFast,56,57,0
-block_hint,RegExpPrototypeTestFast,36,37,0
-block_hint,RegExpPrototypeTestFast,44,45,0
-block_hint,RegExpPrototypeTestFast,75,76,0
-block_hint,RegExpPrototypeTestFast,73,74,1
-block_hint,RegExpPrototypeTestFast,66,67,1
-block_hint,RegExpPrototypeTestFast,50,51,1
-block_hint,RegExpPrototypeTestFast,46,47,0
-block_hint,RegExpPrototypeTestFast,19,20,0
-block_hint,RegExpPrototypeTestFast,26,27,1
-block_hint,RegExpPrototypeTestFast,42,43,1
-block_hint,StringPrototypeEndsWith,288,289,1
-block_hint,StringPrototypeEndsWith,271,272,0
-block_hint,StringPrototypeEndsWith,251,252,1
-block_hint,StringPrototypeEndsWith,235,236,1
-block_hint,StringPrototypeEndsWith,174,175,1
-block_hint,StringPrototypeEndsWith,278,279,1
-block_hint,StringPrototypeEndsWith,267,268,1
-block_hint,StringPrototypeEndsWith,253,254,1
-block_hint,StringPrototypeEndsWith,244,245,1
-block_hint,StringPrototypeEndsWith,179,180,1
-block_hint,StringPrototypeEndsWith,29,30,0
-block_hint,StringPrototypeEndsWith,68,69,0
-block_hint,StringPrototypeEndsWith,70,71,0
-block_hint,StringPrototypeEndsWith,185,186,1
-block_hint,StringPrototypeEndsWith,84,85,0
-block_hint,StringPrototypeEndsWith,86,87,0
-block_hint,StringPrototypeEndsWith,164,165,0
-block_hint,StringPrototypeEndsWith,47,48,0
-block_hint,StringPrototypeEndsWith,144,145,0
-block_hint,StringPrototypeEndsWith,35,36,0
-block_hint,StringPrototypeEndsWith,49,50,0
-block_hint,StringPrototypeEndsWith,116,117,0
-block_hint,StringPrototypeIndexOf,39,40,1
-block_hint,StringPrototypeIndexOf,36,37,0
-block_hint,StringPrototypeIndexOf,19,20,1
-block_hint,StringPrototypeIndexOf,8,9,1
-block_hint,StringPrototypeIndexOf,28,29,1
-block_hint,StringPrototypeIndexOf,21,22,1
-block_hint,StringPrototypeIndexOf,33,34,0
-block_hint,StringPrototypeIndexOf,24,25,0
-block_hint,StringPrototypeIndexOf,11,12,0
-block_hint,StringPrototypeIterator,15,16,1
-block_hint,StringPrototypeIterator,12,13,1
-block_hint,StringPrototypeIterator,10,11,1
-block_hint,StringPrototypeIterator,3,4,1
-block_hint,StringPrototypeIterator,8,9,1
-block_hint,StringIteratorPrototypeNext,56,57,1
-block_hint,StringIteratorPrototypeNext,38,39,1
-block_hint,StringIteratorPrototypeNext,40,41,1
-block_hint,StringIteratorPrototypeNext,13,14,0
-block_hint,StringIteratorPrototypeNext,74,75,0
-block_hint,StringIteratorPrototypeNext,64,65,1
-block_hint,StringIteratorPrototypeNext,54,55,0
-block_hint,StringIteratorPrototypeNext,61,62,1
-block_hint,StringIteratorPrototypeNext,50,51,1
-block_hint,StringIteratorPrototypeNext,11,12,1
-block_hint,StringIteratorPrototypeNext,20,21,1
-block_hint,StringIteratorPrototypeNext,9,10,1
-block_hint,StringIteratorPrototypeNext,17,18,1
-block_hint,StringPrototypeMatch,67,68,1
-block_hint,StringPrototypeMatch,39,40,0
-block_hint,StringPrototypeMatch,99,100,1
-block_hint,StringPrototypeMatch,88,89,0
-block_hint,StringPrototypeMatch,69,70,1
-block_hint,StringPrototypeMatch,49,50,0
-block_hint,StringPrototypeMatch,6,7,1
-block_hint,StringPrototypeMatch,71,72,1
-block_hint,StringPrototypeMatch,51,52,0
-block_hint,StringPrototypeMatch,8,9,1
-block_hint,StringPrototypeMatch,83,84,1
-block_hint,StringPrototypeMatch,75,76,1
-block_hint,StringPrototypeMatch,43,44,1
-block_hint,StringPrototypeSearch,67,68,1
-block_hint,StringPrototypeSearch,39,40,0
-block_hint,StringPrototypeSearch,99,100,1
-block_hint,StringPrototypeSearch,88,89,0
-block_hint,StringPrototypeSearch,69,70,1
-block_hint,StringPrototypeSearch,49,50,0
-block_hint,StringPrototypeSearch,6,7,1
-block_hint,StringPrototypeSearch,71,72,1
-block_hint,StringPrototypeSearch,51,52,0
-block_hint,StringPrototypeSearch,8,9,1
-block_hint,StringPrototypeSearch,83,84,1
-block_hint,StringPrototypeSearch,75,76,1
-block_hint,StringPrototypeSearch,43,44,1
-block_hint,StringPrototypeSlice,179,180,1
-block_hint,StringPrototypeSlice,140,141,1
-block_hint,StringPrototypeSlice,107,108,1
-block_hint,StringPrototypeSlice,201,202,0
-block_hint,StringPrototypeSlice,187,188,0
-block_hint,StringPrototypeSlice,211,212,0
-block_hint,StringPrototypeSlice,208,209,0
-block_hint,StringPrototypeSlice,195,196,1
-block_hint,StringPrototypeSlice,191,192,1
-block_hint,StringPrototypeSlice,199,200,0
-block_hint,StringPrototypeSlice,182,183,0
-block_hint,StringPrototypeSlice,142,143,1
-block_hint,StringPrototypeSlice,31,32,0
-block_hint,StringPrototypeSlice,68,69,1
-block_hint,StringPrototypeSlice,63,64,1
-block_hint,StringPrototypeSlice,61,62,1
-block_hint,StringPrototypeSlice,128,129,0
-block_hint,StringPrototypeSlice,91,92,1
-block_hint,StringPrototypeSlice,21,22,0
-block_hint,StringPrototypeSlice,23,24,0
-block_hint,StringPrototypeSlice,154,155,1
-block_hint,StringPrototypeSlice,132,133,1
-block_hint,StringPrototypeSlice,40,41,0
-block_hint,StringPrototypeSlice,89,90,1
-block_hint,StringPrototypeSlice,19,20,0
-block_hint,StringPrototypeSlice,156,157,1
-block_hint,StringPrototypeSlice,134,135,1
-block_hint,StringPrototypeSlice,44,45,0
-block_hint,StringPrototypeSlice,166,167,0
-block_hint,StringPrototypeSlice,152,153,0
-block_hint,StringPrototypeSlice,36,37,1
-block_hint,StringPrototypeSlice,33,34,0
-block_hint,StringPrototypeStartsWith,288,289,1
-block_hint,StringPrototypeStartsWith,271,272,0
-block_hint,StringPrototypeStartsWith,251,252,1
-block_hint,StringPrototypeStartsWith,235,236,1
-block_hint,StringPrototypeStartsWith,174,175,1
-block_hint,StringPrototypeStartsWith,278,279,1
-block_hint,StringPrototypeStartsWith,267,268,1
-block_hint,StringPrototypeStartsWith,253,254,1
-block_hint,StringPrototypeStartsWith,244,245,1
-block_hint,StringPrototypeStartsWith,179,180,1
-block_hint,StringPrototypeStartsWith,29,30,0
-block_hint,StringPrototypeStartsWith,68,69,0
-block_hint,StringPrototypeStartsWith,70,71,0
-block_hint,StringPrototypeStartsWith,185,186,1
-block_hint,StringPrototypeStartsWith,84,85,0
-block_hint,StringPrototypeStartsWith,86,87,0
-block_hint,StringPrototypeStartsWith,164,165,0
-block_hint,StringPrototypeStartsWith,47,48,0
-block_hint,StringPrototypeStartsWith,35,36,0
-block_hint,StringPrototypeStartsWith,49,50,1
-block_hint,StringPrototypeStartsWith,116,117,1
-block_hint,StringPrototypeSubstr,175,176,1
-block_hint,StringPrototypeSubstr,145,146,1
-block_hint,StringPrototypeSubstr,107,108,1
-block_hint,StringPrototypeSubstr,194,195,0
-block_hint,StringPrototypeSubstr,183,184,0
-block_hint,StringPrototypeSubstr,204,205,0
-block_hint,StringPrototypeSubstr,201,202,0
-block_hint,StringPrototypeSubstr,178,179,0
-block_hint,StringPrototypeSubstr,160,161,0
-block_hint,StringPrototypeSubstr,120,121,0
-block_hint,StringPrototypeSubstr,31,32,0
-block_hint,StringPrototypeSubstr,61,62,1
-block_hint,StringPrototypeSubstr,133,134,0
-block_hint,StringPrototypeSubstr,89,90,1
-block_hint,StringPrototypeSubstr,19,20,0
-block_hint,StringPrototypeSubstr,153,154,1
-block_hint,StringPrototypeSubstr,139,140,1
-block_hint,StringPrototypeSubstr,44,45,0
-block_hint,StringPrototypeSubstr,165,166,0
-block_hint,StringPrototypeSubstr,36,37,1
-block_hint,StringPrototypeSubstr,33,34,0
-block_hint,StringPrototypeSubstring,159,160,1
-block_hint,StringPrototypeSubstring,131,132,1
-block_hint,StringPrototypeSubstring,103,104,1
-block_hint,StringPrototypeSubstring,194,195,0
-block_hint,StringPrototypeSubstring,181,182,0
-block_hint,StringPrototypeSubstring,198,199,0
-block_hint,StringPrototypeSubstring,192,193,0
-block_hint,StringPrototypeSubstring,183,184,0
-block_hint,StringPrototypeSubstring,179,180,0
-block_hint,StringPrototypeSubstring,172,173,0
-block_hint,StringPrototypeSubstring,163,164,0
-block_hint,StringPrototypeSubstring,135,136,0
-block_hint,StringPrototypeSubstring,93,94,0
-block_hint,StringPrototypeSubstring,65,66,1
-block_hint,StringPrototypeSubstring,105,106,1
-block_hint,StringPrototypeSubstring,58,59,1
-block_hint,StringPrototypeSubstring,119,120,0
-block_hint,StringPrototypeSubstring,115,116,1
-block_hint,StringPrototypeSubstring,85,86,1
-block_hint,StringPrototypeSubstring,17,18,0
-block_hint,StringPrototypeSubstring,141,142,1
-block_hint,StringPrototypeSubstring,125,126,1
-block_hint,StringPrototypeSubstring,42,43,0
-block_hint,StringPrototypeSubstring,54,55,0
-block_hint,StringPrototypeSubstring,150,151,0
-block_hint,StringPrototypeSubstring,108,109,1
-block_hint,StringPrototypeSubstring,34,35,1
-block_hint,StringPrototypeTrim,470,471,1
-block_hint,StringPrototypeTrim,271,272,1
-block_hint,StringPrototypeTrim,186,187,1
-block_hint,StringPrototypeTrim,188,189,0
-block_hint,StringPrototypeTrim,444,445,0
-block_hint,StringPrototypeTrim,273,274,1
-block_hint,StringPrototypeTrim,156,157,0
-block_hint,StringPrototypeTrim,158,159,0
-block_hint,StringPrototypeTrim,251,252,0
-block_hint,StringPrototypeTrim,63,64,1
-block_hint,StringPrototypeTrim,366,367,1
-block_hint,StringPrototypeTrim,83,84,0
-block_hint,StringPrototypeTrim,253,254,0
-block_hint,StringPrototypeTrim,65,66,1
-block_hint,StringPrototypeTrim,392,393,0
-block_hint,StringPrototypeTrim,394,395,1
-block_hint,StringPrototypeTrim,128,129,0
-block_hint,StringPrototypeTrim,85,86,0
-block_hint,StringPrototypeTrim,92,93,0
-block_hint,StringPrototypeTrim,293,294,0
-block_hint,StringPrototypeTrim,178,179,1
-block_hint,StringPrototypeTrim,438,439,0
-block_hint,StringPrototypeTrim,432,433,0
-block_hint,StringPrototypeTrim,257,258,1
-block_hint,StringPrototypeTrim,69,70,0
-block_hint,StringPrototypeTrim,71,72,0
-block_hint,StringPrototypeTrim,480,481,1
-block_hint,StringPrototypeTrim,454,455,1
-block_hint,StringPrototypeTrim,132,133,0
-block_hint,StringPrototypeTrim,152,153,0
-block_hint,StringPrototypeTrim,154,155,0
-block_hint,StringPrototypeTrim,239,240,0
-block_hint,StringPrototypeTrim,47,48,1
-block_hint,StringPrototypeTrim,306,307,1
-block_hint,StringPrototypeTrim,241,242,0
-block_hint,StringPrototypeTrim,49,50,1
-block_hint,StringPrototypeTrim,334,335,1
-block_hint,StringPrototypeTrim,81,82,0
-block_hint,StringPrototypeTrim,87,88,0
-block_hint,StringPrototypeTrim,291,292,1
-block_hint,StringPrototypeTrim,172,173,1
-block_hint,StringPrototypeTrim,436,437,0
-block_hint,StringPrototypeTrim,428,429,1
-block_hint,StringPrototypeTrim,243,244,1
-block_hint,StringPrototypeTrim,51,52,0
-block_hint,StringPrototypeTrim,474,475,1
-block_hint,StringPrototypeTrim,448,449,1
-block_hint,StringPrototypeTrim,112,113,0
-block_hint,StringPrototypeTrim,490,491,0
-block_hint,StringPrototypeTrim,295,296,1
-block_hint,StringPrototypeTrim,97,98,1
-block_hint,StringPrototypeTrim,89,90,0
-block_hint,SymbolPrototypeToString,9,10,1
-block_hint,SymbolPrototypeToString,11,12,1
-block_hint,SymbolPrototypeToString,5,6,0
-block_hint,SymbolPrototypeToString,7,8,1
-block_hint,CreateTypedArray,610,611,0
-block_hint,CreateTypedArray,638,639,0
-block_hint,CreateTypedArray,576,577,0
-block_hint,CreateTypedArray,485,486,0
-block_hint,CreateTypedArray,356,357,1
-block_hint,CreateTypedArray,358,359,1
-block_hint,CreateTypedArray,677,678,0
-block_hint,CreateTypedArray,520,521,1
-block_hint,CreateTypedArray,518,519,1
-block_hint,CreateTypedArray,407,408,1
-block_hint,CreateTypedArray,586,587,0
-block_hint,CreateTypedArray,662,663,0
-block_hint,CreateTypedArray,584,585,0
-block_hint,CreateTypedArray,491,492,0
-block_hint,CreateTypedArray,424,425,0
-block_hint,CreateTypedArray,426,427,0
-block_hint,CreateTypedArray,410,411,0
-block_hint,CreateTypedArray,105,106,1
-block_hint,CreateTypedArray,107,108,1
-block_hint,CreateTypedArray,412,413,1
-block_hint,CreateTypedArray,109,110,1
-block_hint,CreateTypedArray,111,112,1
-block_hint,CreateTypedArray,641,642,0
-block_hint,CreateTypedArray,683,684,1
-block_hint,CreateTypedArray,660,661,1
-block_hint,CreateTypedArray,522,523,0
-block_hint,CreateTypedArray,558,559,1
-block_hint,CreateTypedArray,384,385,0
-block_hint,CreateTypedArray,261,262,0
-block_hint,CreateTypedArray,416,417,0
-block_hint,CreateTypedArray,123,124,1
-block_hint,CreateTypedArray,125,126,1
-block_hint,CreateTypedArray,305,306,1
-block_hint,CreateTypedArray,307,308,1
-block_hint,CreateTypedArray,525,526,0
-block_hint,CreateTypedArray,560,561,1
-block_hint,CreateTypedArray,386,387,0
-block_hint,CreateTypedArray,277,278,0
-block_hint,CreateTypedArray,420,421,0
-block_hint,CreateTypedArray,137,138,1
-block_hint,CreateTypedArray,139,140,1
-block_hint,CreateTypedArray,512,513,0
-block_hint,CreateTypedArray,514,515,0
-block_hint,CreateTypedArray,671,672,0
-block_hint,CreateTypedArray,531,532,1
-block_hint,CreateTypedArray,529,530,1
-block_hint,CreateTypedArray,428,429,1
-block_hint,CreateTypedArray,541,542,0
-block_hint,CreateTypedArray,533,534,0
-block_hint,CreateTypedArray,431,432,0
-block_hint,CreateTypedArray,165,166,1
-block_hint,CreateTypedArray,365,366,0
-block_hint,CreateTypedArray,167,168,1
-block_hint,CreateTypedArray,433,434,1
-block_hint,CreateTypedArray,169,170,1
-block_hint,CreateTypedArray,171,172,1
-block_hint,CreateTypedArray,648,649,0
-block_hint,CreateTypedArray,686,687,1
-block_hint,CreateTypedArray,665,666,1
-block_hint,CreateTypedArray,535,536,0
-block_hint,CreateTypedArray,554,555,1
-block_hint,CreateTypedArray,380,381,0
-block_hint,CreateTypedArray,229,230,0
-block_hint,CreateTypedArray,437,438,0
-block_hint,CreateTypedArray,183,184,1
-block_hint,CreateTypedArray,185,186,1
-block_hint,CreateTypedArray,187,188,1
-block_hint,CreateTypedArray,318,319,1
-block_hint,CreateTypedArray,320,321,1
-block_hint,CreateTypedArray,538,539,0
-block_hint,CreateTypedArray,556,557,1
-block_hint,CreateTypedArray,382,383,0
-block_hint,CreateTypedArray,245,246,0
-block_hint,CreateTypedArray,441,442,0
-block_hint,CreateTypedArray,199,200,1
-block_hint,CreateTypedArray,201,202,1
-block_hint,CreateTypedArray,548,549,0
-block_hint,CreateTypedArray,543,544,0
-block_hint,CreateTypedArray,500,501,0
-block_hint,CreateTypedArray,371,372,0
-block_hint,CreateTypedArray,453,454,1
-block_hint,CreateTypedArray,375,376,1
-block_hint,CreateTypedArray,503,504,0
-block_hint,CreateTypedArray,373,374,1
-block_hint,CreateTypedArray,455,456,0
-block_hint,CreateTypedArray,688,689,0
-block_hint,CreateTypedArray,650,651,0
-block_hint,CreateTypedArray,564,565,1
-block_hint,CreateTypedArray,562,563,1
-block_hint,CreateTypedArray,466,467,1
-block_hint,CreateTypedArray,656,657,0
-block_hint,CreateTypedArray,574,575,0
-block_hint,CreateTypedArray,481,482,0
-block_hint,CreateTypedArray,340,341,1
-block_hint,CreateTypedArray,654,655,0
-block_hint,CreateTypedArray,572,573,0
-block_hint,CreateTypedArray,477,478,0
-block_hint,CreateTypedArray,290,291,0
-block_hint,CreateTypedArray,634,635,0
-block_hint,CreateTypedArray,347,348,0
-block_hint,CreateTypedArray,349,350,0
-block_hint,CreateTypedArray,396,397,0
-block_hint,CreateTypedArray,398,399,0
-block_hint,CreateTypedArray,342,343,1
-block_hint,CreateTypedArray,352,353,0
-block_hint,CreateTypedArray,345,346,0
-block_hint,CreateTypedArray,507,508,0
-block_hint,CreateTypedArray,552,553,1
-block_hint,CreateTypedArray,378,379,0
-block_hint,CreateTypedArray,213,214,0
-block_hint,CreateTypedArray,567,568,0
-block_hint,CreateTypedArray,391,392,0
-block_hint,CreateTypedArray,60,61,1
-block_hint,CreateTypedArray,62,63,1
-block_hint,TypedArrayFrom,234,235,1
-block_hint,TypedArrayFrom,214,215,0
-block_hint,TypedArrayFrom,195,196,1
-block_hint,TypedArrayFrom,154,155,1
-block_hint,TypedArrayFrom,87,88,1
-block_hint,TypedArrayFrom,89,90,1
-block_hint,TypedArrayFrom,184,185,1
-block_hint,TypedArrayFrom,176,177,0
-block_hint,TypedArrayFrom,139,140,0
-block_hint,TypedArrayFrom,100,101,1
-block_hint,TypedArrayFrom,102,103,1
-block_hint,TypedArrayFrom,248,249,1
-block_hint,TypedArrayFrom,250,251,0
-block_hint,TypedArrayFrom,236,237,0
-block_hint,TypedArrayFrom,223,224,1
-block_hint,TypedArrayFrom,225,226,0
-block_hint,TypedArrayFrom,204,205,1
-block_hint,TypedArrayFrom,186,187,1
-block_hint,TypedArrayFrom,164,165,0
-block_hint,TypedArrayFrom,166,167,0
-block_hint,TypedArrayFrom,244,245,0
-block_hint,TypedArrayFrom,217,218,1
-block_hint,TypedArrayFrom,178,179,0
-block_hint,TypedArrayFrom,106,107,1
-block_hint,TypedArrayFrom,108,109,1
-block_hint,TypedArrayFrom,171,172,0
-block_hint,TypedArrayFrom,144,145,0
-block_hint,TypedArrayFrom,118,119,0
-block_hint,TypedArrayFrom,55,56,0
-block_hint,TypedArrayFrom,150,151,0
-block_hint,TypedArrayFrom,57,58,0
-block_hint,TypedArrayFrom,133,134,1
-block_hint,TypedArrayFrom,59,60,0
-block_hint,TypedArrayPrototypeSet,189,190,1
-block_hint,TypedArrayPrototypeSet,104,105,1
-block_hint,TypedArrayPrototypeSet,106,107,1
-block_hint,TypedArrayPrototypeSet,241,242,1
-block_hint,TypedArrayPrototypeSet,274,275,0
-block_hint,TypedArrayPrototypeSet,260,261,0
-block_hint,TypedArrayPrototypeSet,248,249,0
-block_hint,TypedArrayPrototypeSet,216,217,0
-block_hint,TypedArrayPrototypeSet,153,154,0
-block_hint,TypedArrayPrototypeSet,191,192,0
-block_hint,TypedArrayPrototypeSet,193,194,0
-block_hint,TypedArrayPrototypeSet,163,164,0
-block_hint,TypedArrayPrototypeSet,270,271,0
-block_hint,TypedArrayPrototypeSet,257,258,1
-block_hint,TypedArrayPrototypeSet,236,237,1
-block_hint,TypedArrayPrototypeSet,204,205,0
-block_hint,TypedArrayPrototypeSet,206,207,0
-block_hint,TypedArrayPrototypeSet,167,168,0
-block_hint,TypedArrayPrototypeSet,157,158,0
-block_hint,TypedArrayPrototypeSet,123,124,0
-block_hint,TypedArrayPrototypeSet,179,180,1
-block_hint,TypedArrayPrototypeSet,91,92,0
-block_hint,TypedArrayPrototypeSet,81,82,0
-block_hint,TypedArrayPrototypeSet,83,84,0
-block_hint,TypedArrayPrototypeSet,85,86,0
-block_hint,TypedArrayPrototypeSet,87,88,0
-block_hint,TypedArrayPrototypeSet,181,182,0
-block_hint,TypedArrayPrototypeSet,144,145,0
-block_hint,TypedArrayPrototypeSubArray,129,130,1
-block_hint,TypedArrayPrototypeSubArray,82,83,1
-block_hint,TypedArrayPrototypeSubArray,84,85,1
-block_hint,TypedArrayPrototypeSubArray,159,160,1
-block_hint,TypedArrayPrototypeSubArray,151,152,0
-block_hint,TypedArrayPrototypeSubArray,131,132,0
-block_hint,TypedArrayPrototypeSubArray,133,134,0
-block_hint,TypedArrayPrototypeSubArray,210,211,0
-block_hint,TypedArrayPrototypeSubArray,190,191,0
-block_hint,TypedArrayPrototypeSubArray,170,171,0
-block_hint,TypedArrayPrototypeSubArray,218,219,0
-block_hint,TypedArrayPrototypeSubArray,205,206,0
-block_hint,TypedArrayPrototypeSubArray,196,197,0
-block_hint,TypedArrayPrototypeSubArray,186,187,1
-block_hint,TypedArrayPrototypeSubArray,154,155,0
-block_hint,TypedArrayPrototypeSubArray,137,138,0
-block_hint,TypedArrayPrototypeSubArray,165,166,0
-block_hint,TypedArrayPrototypeSubArray,216,217,0
-block_hint,TypedArrayPrototypeSubArray,203,204,0
-block_hint,TypedArrayPrototypeSubArray,192,193,0
-block_hint,TypedArrayPrototypeSubArray,149,150,1
-block_hint,TypedArrayPrototypeSubArray,124,125,0
-block_hint,TypedArrayPrototypeSubArray,102,103,0
-block_hint,TypedArrayPrototypeSubArray,104,105,0
-block_hint,TypedArrayPrototypeSubArray,115,116,0
-block_hint,TypedArrayPrototypeSubArray,63,64,1
-block_hint,TypedArrayPrototypeSubArray,65,66,1
-block_hint,TypedArrayPrototypeSubArray,145,146,1
-block_hint,TypedArrayPrototypeSubArray,80,81,0
-block_hint,TypedArrayPrototypeSubArray,117,118,0
-block_hint,TypedArrayPrototypeSubArray,90,91,1
-block_hint,TypedArrayPrototypeSubArray,92,93,1
-block_hint,TypedArrayPrototypeSubArray,119,120,0
-block_hint,TypedArrayPrototypeSubArray,94,95,1
-block_hint,TypedArrayPrototypeSubArray,96,97,1
-block_hint,TypedArrayPrototypeSubArray,69,70,1
-block_hint,TypedArrayPrototypeSubArray,98,99,1
-block_hint,TypedArrayPrototypeSubArray,100,101,1
-block_hint,TypedArrayPrototypeSubArray,73,74,0
-block_hint,NewSloppyArgumentsElements,44,45,0
-block_hint,NewSloppyArgumentsElements,24,25,0
-block_hint,NewSloppyArgumentsElements,33,34,0
-block_hint,NewSloppyArgumentsElements,14,15,0
-block_hint,NewSloppyArgumentsElements,16,17,0
-block_hint,NewSloppyArgumentsElements,46,47,1
-block_hint,NewSloppyArgumentsElements,36,37,1
-block_hint,NewSloppyArgumentsElements,18,19,0
-block_hint,NewSloppyArgumentsElements,48,49,0
-block_hint,NewStrictArgumentsElements,9,10,0
-block_hint,NewStrictArgumentsElements,20,21,0
-block_hint,NewRestArgumentsElements,25,26,0
-block_hint,NewRestArgumentsElements,11,12,0
-block_hint,NewRestArgumentsElements,16,17,0
-block_hint,NewRestArgumentsElements,5,6,0
-block_hint,NewRestArgumentsElements,7,8,0
-block_hint,NewRestArgumentsElements,23,24,1
-block_hint,NewRestArgumentsElements,19,20,1
-block_hint,NewRestArgumentsElements,9,10,0
-block_hint,NewRestArgumentsElements,21,22,0
-block_hint,FastNewSloppyArguments,41,42,1
-block_hint,FastNewSloppyArguments,43,44,0
-block_hint,FastNewSloppyArguments,101,102,1
-block_hint,FastNewSloppyArguments,81,82,0
-block_hint,FastNewSloppyArguments,47,48,0
-block_hint,FastNewSloppyArguments,19,20,0
-block_hint,FastNewSloppyArguments,21,22,0
-block_hint,FastNewSloppyArguments,71,72,1
-block_hint,FastNewSloppyArguments,55,56,1
-block_hint,FastNewSloppyArguments,23,24,0
-block_hint,FastNewSloppyArguments,73,74,0
-block_hint,FastNewSloppyArguments,45,46,0
-block_hint,FastNewSloppyArguments,13,14,0
-block_hint,FastNewSloppyArguments,15,16,0
-block_hint,FastNewSloppyArguments,75,76,1
-block_hint,FastNewSloppyArguments,59,60,1
-block_hint,FastNewSloppyArguments,17,18,0
-block_hint,FastNewSloppyArguments,61,62,0
-block_hint,FastNewSloppyArguments,27,28,1
-block_hint,FastNewSloppyArguments,29,30,0
-block_hint,FastNewSloppyArguments,31,32,0
-block_hint,FastNewSloppyArguments,77,78,1
-block_hint,FastNewSloppyArguments,63,64,1
-block_hint,FastNewSloppyArguments,33,34,0
-block_hint,FastNewSloppyArguments,35,36,1
-block_hint,FastNewSloppyArguments,53,54,1
-block_hint,FastNewSloppyArguments,25,26,1
-block_hint,FastNewSloppyArguments,51,52,1
-block_hint,FastNewStrictArguments,16,17,1
-block_hint,FastNewStrictArguments,18,19,0
-block_hint,FastNewStrictArguments,20,21,0
-block_hint,FastNewStrictArguments,7,8,0
-block_hint,FastNewStrictArguments,9,10,0
-block_hint,FastNewStrictArguments,31,32,1
-block_hint,FastNewStrictArguments,25,26,1
-block_hint,FastNewStrictArguments,11,12,0
-block_hint,FastNewStrictArguments,27,28,0
-block_hint,FastNewStrictArguments,13,14,1
-block_hint,FastNewStrictArguments,23,24,1
-block_hint,FastNewRestArguments,16,17,1
-block_hint,FastNewRestArguments,18,19,0
-block_hint,FastNewRestArguments,34,35,1
-block_hint,FastNewRestArguments,7,8,1
-block_hint,FastNewRestArguments,21,22,0
-block_hint,FastNewRestArguments,9,10,0
-block_hint,FastNewRestArguments,11,12,0
-block_hint,FastNewRestArguments,32,33,1
-block_hint,FastNewRestArguments,25,26,1
-block_hint,FastNewRestArguments,13,14,0
-block_hint,FastNewRestArguments,27,28,0
-block_hint,FastNewRestArguments,23,24,1
-block_hint,StringSlowFlatten,35,36,1
-block_hint,StringSlowFlatten,20,21,1
-block_hint,StringSlowFlatten,4,5,0
-block_hint,StringSlowFlatten,30,31,1
-block_hint,StringSlowFlatten,22,23,1
-block_hint,StringIndexOf,160,161,0
-block_hint,StringIndexOf,112,113,0
-block_hint,StringIndexOf,125,126,1
-block_hint,StringIndexOf,91,92,0
-block_hint,StringIndexOf,154,155,0
-block_hint,StringIndexOf,117,118,1
-block_hint,StringIndexOf,44,45,0
-block_hint,StringIndexOf,46,47,0
-block_hint,StringIndexOf,133,134,0
-block_hint,StringIndexOf,76,77,0
-block_hint,StringIndexOf,78,79,0
-block_hint,StringIndexOf,72,73,0
-block_hint,StringIndexOf,74,75,0
-block_hint,StringIndexOf,40,41,0
-block_hint,StringIndexOf,42,43,0
-block_hint,StringIndexOf,127,128,1
-block_hint,StringIndexOf,56,57,0
-block_hint,StringIndexOf,58,59,0
-block_hint,Load_FastSmiElements_0,2,3,1
-block_hint,Load_FastObjectElements_0,2,3,1
-block_hint,Store_FastSmiElements_0,2,3,1
-block_hint,Store_FastObjectElements_0,2,3,1
-block_hint,SortCompareDefault,8,9,1
-block_hint,SortCompareDefault,20,21,1
-block_hint,SortCompareDefault,17,18,1
-block_hint,SortCompareDefault,14,15,1
-block_hint,SortCompareDefault,11,12,1
-block_hint,SortCompareDefault,6,7,1
-block_hint,SortCompareUserFn,9,10,0
-block_hint,SortCompareUserFn,5,6,0
-block_hint,Copy,17,18,1
-block_hint,Copy,9,10,1
-block_hint,Copy,11,12,1
-block_hint,Copy,5,6,1
-block_hint,Copy,7,8,1
-block_hint,MergeAt,13,14,1
-block_hint,MergeAt,15,16,1
-block_hint,MergeAt,17,18,1
-block_hint,MergeAt,19,20,1
-block_hint,MergeAt,140,141,0
-block_hint,MergeAt,29,30,1
-block_hint,MergeAt,31,32,0
-block_hint,MergeAt,33,34,1
-block_hint,MergeAt,35,36,1
-block_hint,MergeAt,123,124,0
-block_hint,MergeAt,236,237,1
-block_hint,MergeAt,225,226,1
-block_hint,MergeAt,69,70,1
-block_hint,MergeAt,71,72,1
-block_hint,MergeAt,150,151,1
-block_hint,MergeAt,103,104,0
-block_hint,MergeAt,73,74,1
-block_hint,MergeAt,75,76,1
-block_hint,MergeAt,227,228,0
-block_hint,MergeAt,81,82,1
-block_hint,MergeAt,83,84,1
-block_hint,MergeAt,198,199,0
-block_hint,MergeAt,134,135,0
-block_hint,MergeAt,77,78,1
-block_hint,MergeAt,79,80,1
-block_hint,MergeAt,196,197,1
-block_hint,MergeAt,132,133,0
-block_hint,MergeAt,182,183,1
-block_hint,MergeAt,85,86,1
-block_hint,MergeAt,87,88,1
-block_hint,MergeAt,89,90,1
-block_hint,MergeAt,194,195,1
-block_hint,MergeAt,97,98,1
-block_hint,MergeAt,99,100,1
-block_hint,MergeAt,230,231,1
-block_hint,MergeAt,116,117,0
-block_hint,MergeAt,232,233,1
-block_hint,MergeAt,220,221,1
-block_hint,MergeAt,37,38,1
-block_hint,MergeAt,39,40,1
-block_hint,MergeAt,154,155,1
-block_hint,MergeAt,109,110,0
-block_hint,MergeAt,41,42,1
-block_hint,MergeAt,43,44,1
-block_hint,MergeAt,222,223,0
-block_hint,MergeAt,49,50,1
-block_hint,MergeAt,51,52,1
-block_hint,MergeAt,202,203,0
-block_hint,MergeAt,138,139,0
-block_hint,MergeAt,45,46,1
-block_hint,MergeAt,47,48,1
-block_hint,MergeAt,200,201,1
-block_hint,MergeAt,136,137,0
-block_hint,MergeAt,111,112,0
-block_hint,MergeAt,165,166,1
-block_hint,MergeAt,53,54,1
-block_hint,MergeAt,207,208,0
-block_hint,MergeAt,169,170,0
-block_hint,MergeAt,55,56,1
-block_hint,MergeAt,57,58,1
-block_hint,MergeAt,143,144,1
-block_hint,MergeAt,59,60,1
-block_hint,MergeAt,173,174,0
-block_hint,MergeAt,61,62,1
-block_hint,MergeAt,63,64,1
-block_hint,MergeAt,113,114,0
-block_hint,MergeAt,192,193,1
-block_hint,MergeAt,65,66,1
-block_hint,MergeAt,67,68,1
-block_hint,GallopLeft,11,12,1
-block_hint,GallopLeft,47,48,0
-block_hint,GallopLeft,15,16,1
-block_hint,GallopLeft,63,64,0
-block_hint,GallopLeft,29,30,0
-block_hint,GallopLeft,41,42,0
-block_hint,GallopLeft,13,14,1
-block_hint,GallopLeft,65,66,0
-block_hint,GallopLeft,31,32,0
-block_hint,GallopLeft,39,40,0
-block_hint,GallopLeft,17,18,1
-block_hint,GallopLeft,61,62,0
-block_hint,GallopRight,11,12,1
-block_hint,GallopRight,47,48,0
-block_hint,GallopRight,35,36,1
-block_hint,GallopRight,15,16,1
-block_hint,GallopRight,63,64,0
-block_hint,GallopRight,29,30,0
-block_hint,GallopRight,41,42,0
-block_hint,GallopRight,13,14,1
-block_hint,GallopRight,65,66,0
-block_hint,GallopRight,31,32,0
-block_hint,GallopRight,39,40,0
-block_hint,GallopRight,17,18,1
-block_hint,GallopRight,61,62,0
-block_hint,ArrayTimSort,120,121,0
-block_hint,ArrayTimSort,240,241,0
-block_hint,ArrayTimSort,227,228,0
-block_hint,ArrayTimSort,122,123,0
-block_hint,ArrayTimSort,163,164,0
-block_hint,ArrayTimSort,140,141,0
-block_hint,ArrayTimSort,33,34,1
-block_hint,ArrayTimSort,93,94,0
-block_hint,ArrayTimSort,95,96,0
-block_hint,ArrayTimSort,143,144,0
-block_hint,ArrayTimSort,35,36,1
-block_hint,ArrayTimSort,37,38,1
-block_hint,ArrayTimSort,214,215,0
-block_hint,ArrayTimSort,145,146,1
-block_hint,ArrayTimSort,39,40,1
-block_hint,ArrayTimSort,218,219,0
-block_hint,ArrayTimSort,216,217,0
-block_hint,ArrayTimSort,41,42,1
-block_hint,ArrayTimSort,43,44,1
-block_hint,ArrayTimSort,45,46,1
-block_hint,ArrayTimSort,134,135,0
-block_hint,ArrayTimSort,47,48,1
-block_hint,ArrayTimSort,49,50,1
-block_hint,ArrayTimSort,222,223,0
-block_hint,ArrayTimSort,51,52,1
-block_hint,ArrayTimSort,53,54,1
-block_hint,ArrayTimSort,55,56,1
-block_hint,ArrayTimSort,57,58,1
-block_hint,ArrayTimSort,59,60,1
-block_hint,ArrayTimSort,61,62,1
-block_hint,ArrayTimSort,63,64,1
-block_hint,ArrayTimSort,65,66,1
-block_hint,ArrayTimSort,67,68,1
-block_hint,ArrayTimSort,69,70,1
-block_hint,ArrayTimSort,71,72,1
-block_hint,ArrayTimSort,157,158,1
-block_hint,ArrayTimSort,73,74,1
-block_hint,ArrayTimSort,75,76,1
-block_hint,ArrayTimSort,204,205,0
-block_hint,ArrayTimSort,77,78,1
-block_hint,ArrayTimSort,79,80,1
-block_hint,ArrayTimSort,209,210,0
-block_hint,ArrayTimSort,81,82,1
-block_hint,ArrayTimSort,83,84,1
-block_hint,ArrayTimSort,186,187,0
-block_hint,ArrayTimSort,236,237,1
-block_hint,ArrayTimSort,238,239,1
-block_hint,ArrayTimSort,211,212,1
-block_hint,ArrayTimSort,161,162,1
-block_hint,ArrayTimSort,85,86,1
-block_hint,ArrayTimSort,243,244,1
-block_hint,ArrayTimSort,230,231,0
-block_hint,ArrayTimSort,188,189,1
-block_hint,ArrayTimSort,138,139,0
-block_hint,ArrayTimSort,87,88,1
-block_hint,ArrayTimSort,113,114,0
-block_hint,ArrayTimSort,89,90,0
-block_hint,ArrayPrototypeSort,106,107,1
-block_hint,ArrayPrototypeSort,80,81,0
-block_hint,ArrayPrototypeSort,39,40,1
-block_hint,ArrayPrototypeSort,70,71,0
-block_hint,ArrayPrototypeSort,41,42,1
-block_hint,ArrayPrototypeSort,82,83,1
-block_hint,ArrayPrototypeSort,84,85,1
-block_hint,ArrayPrototypeSort,63,64,0
-block_hint,ArrayPrototypeSort,27,28,0
-block_hint,ArrayPrototypeSort,120,121,0
-block_hint,ArrayPrototypeSort,101,102,1
-block_hint,ArrayPrototypeSort,73,74,1
-block_hint,ArrayPrototypeSort,51,52,1
-block_hint,ArrayPrototypeSort,15,16,1
-block_hint,ArrayPrototypeSort,95,96,1
-block_hint,ArrayPrototypeSort,75,76,0
-block_hint,ArrayPrototypeSort,53,54,0
-block_hint,ArrayPrototypeSort,136,137,0
-block_hint,ArrayPrototypeSort,139,140,0
-block_hint,ArrayPrototypeSort,130,131,0
-block_hint,ArrayPrototypeSort,122,123,0
-block_hint,ArrayPrototypeSort,103,104,0
-block_hint,ArrayPrototypeSort,114,115,0
-block_hint,ArrayPrototypeSort,117,118,1
-block_hint,ArrayPrototypeSort,77,78,1
-block_hint,ArrayPrototypeSort,33,34,0
-block_hint,ArrayPrototypeSort,98,99,1
-block_hint,ArrayPrototypeSort,91,92,1
-block_hint,ArrayPrototypeSort,56,57,1
-block_hint,StringFastLocaleCompare,315,316,1
-block_hint,StringFastLocaleCompare,239,240,0
-block_hint,StringFastLocaleCompare,303,304,1
-block_hint,StringFastLocaleCompare,156,157,0
-block_hint,StringFastLocaleCompare,158,159,0
-block_hint,StringFastLocaleCompare,267,268,1
-block_hint,StringFastLocaleCompare,106,107,0
-block_hint,StringFastLocaleCompare,307,308,1
-block_hint,StringFastLocaleCompare,172,173,0
-block_hint,StringFastLocaleCompare,174,175,0
-block_hint,StringFastLocaleCompare,109,110,0
-block_hint,StringFastLocaleCompare,211,212,1
-block_hint,StringFastLocaleCompare,271,272,1
-block_hint,StringFastLocaleCompare,276,277,0
-block_hint,StringFastLocaleCompare,253,254,1
-block_hint,StringFastLocaleCompare,73,74,0
-block_hint,StringFastLocaleCompare,274,275,1
-block_hint,StringFastLocaleCompare,116,117,0
-block_hint,StringFastLocaleCompare,77,78,1
-block_hint,CanUseSameAccessor_FastObjectElements_0,2,3,1
-block_hint,CanUseSameAccessor_FastObjectElements_0,4,5,1
-block_hint,StringPrototypeToLowerCaseIntl,10,11,1
-block_hint,StringPrototypeToLowerCaseIntl,7,8,1
-block_hint,StringPrototypeToLowerCaseIntl,5,6,1
-block_hint,StringToLowerCaseIntl,23,24,1
-block_hint,StringToLowerCaseIntl,25,26,0
-block_hint,StringToLowerCaseIntl,34,35,1
-block_hint,StringToLowerCaseIntl,7,8,0
-block_hint,StringToLowerCaseIntl,43,44,1
-block_hint,StringToLowerCaseIntl,41,42,1
-block_hint,StringToLowerCaseIntl,19,20,0
-block_hint,StringToLowerCaseIntl,39,40,0
-block_hint,StringToLowerCaseIntl,14,15,0
-block_hint,LdaContextSlotHandler,3,4,1
-block_hint,LdaContextSlotHandler,5,6,1
-block_hint,LdaImmutableContextSlotHandler,3,4,1
-block_hint,LdaImmutableContextSlotHandler,5,6,1
-block_hint,LdaCurrentContextSlotHandler,2,3,1
-block_hint,LdaImmutableCurrentContextSlotHandler,2,3,1
-block_hint,TestTypeOfHandler,7,8,1
-block_hint,TestTypeOfHandler,15,16,0
-block_hint,TestTypeOfHandler,23,24,0
-block_hint,TestTypeOfHandler,31,32,1
-block_hint,TestTypeOfHandler,50,51,0
-block_hint,TestTypeOfHandler,35,36,0
-block_hint,LdaGlobalHandler,7,8,1
-block_hint,LdaGlobalHandler,9,10,1
-block_hint,LdaGlobalHandler,11,12,1
-block_hint,LdaGlobalHandler,13,14,1
-block_hint,LdaGlobalHandler,183,184,0
-block_hint,LdaGlobalHandler,105,106,0
-block_hint,LdaGlobalHandler,109,110,1
-block_hint,StaContextSlotHandler,5,6,1
-block_hint,StaCurrentContextSlotHandler,2,3,1
-block_hint,LdaLookupGlobalSlotHandler,13,14,1
-block_hint,LdaLookupGlobalSlotHandler,125,126,0
-block_hint,LdaLookupGlobalSlotHandler,15,16,1
-block_hint,GetNamedPropertyHandler,372,373,1
-block_hint,GetNamedPropertyHandler,216,217,0
-block_hint,GetNamedPropertyHandler,77,78,0
-block_hint,GetNamedPropertyHandler,35,36,1
-block_hint,GetNamedPropertyHandler,313,314,0
-block_hint,GetNamedPropertyHandler,339,340,0
-block_hint,GetNamedPropertyHandler,218,219,1
-block_hint,GetNamedPropertyHandler,290,291,0
-block_hint,GetNamedPropertyHandler,220,221,0
-block_hint,GetNamedPropertyHandler,294,295,1
-block_hint,GetNamedPropertyHandler,39,40,0
-block_hint,GetNamedPropertyHandler,98,99,1
-block_hint,GetNamedPropertyHandler,347,348,0
-block_hint,GetNamedPropertyHandler,242,243,0
-block_hint,GetNamedPropertyHandler,154,155,0
-block_hint,GetNamedPropertyHandler,120,121,1
-block_hint,GetNamedPropertyHandler,49,50,0
-block_hint,GetNamedPropertyHandler,87,88,0
-block_hint,GetNamedPropertyHandler,25,26,1
-block_hint,GetNamedPropertyHandler,144,145,0
-block_hint,GetNamedPropertyHandler,65,66,0
-block_hint,GetNamedPropertyHandler,303,304,1
-block_hint,GetNamedPropertyHandler,102,103,0
-block_hint,GetNamedPropertyHandler,248,249,1
-block_hint,GetNamedPropertyHandler,250,251,1
-block_hint,GetNamedPropertyHandler,244,245,1
-block_hint,GetNamedPropertyHandler,246,247,1
-block_hint,GetNamedPropertyHandler,164,165,1
-block_hint,AddHandler,72,73,0
-block_hint,AddHandler,45,46,0
-block_hint,AddHandler,32,33,1
-block_hint,AddHandler,118,119,0
-block_hint,AddHandler,81,82,1
-block_hint,AddHandler,48,49,1
-block_hint,AddHandler,103,104,1
-block_hint,AddHandler,52,53,1
-block_hint,AddHandler,75,76,1
-block_hint,AddHandler,24,25,1
-block_hint,SubHandler,42,43,0
-block_hint,SubHandler,27,28,1
-block_hint,SubHandler,78,79,1
-block_hint,SubHandler,98,99,1
-block_hint,SubHandler,80,81,1
-block_hint,SubHandler,56,57,1
-block_hint,SubHandler,21,22,1
-block_hint,MulHandler,106,107,1
-block_hint,MulHandler,98,99,1
-block_hint,MulHandler,30,31,1
-block_hint,MulHandler,112,113,1
-block_hint,MulHandler,91,92,1
-block_hint,MulHandler,59,60,1
-block_hint,MulHandler,23,24,1
-block_hint,DivHandler,109,110,0
-block_hint,DivHandler,90,91,0
-block_hint,DivHandler,63,64,1
-block_hint,DivHandler,33,34,1
-block_hint,DivHandler,121,122,1
-block_hint,DivHandler,96,97,1
-block_hint,DivHandler,66,67,1
-block_hint,DivHandler,23,24,1
-block_hint,ModHandler,129,130,0
-block_hint,ModHandler,118,119,0
-block_hint,ModHandler,92,93,1
-block_hint,ModHandler,87,88,1
-block_hint,ModHandler,50,51,0
-block_hint,ModHandler,33,34,1
-block_hint,BitwiseOrHandler,42,43,0
-block_hint,BitwiseOrHandler,30,31,1
-block_hint,BitwiseOrHandler,8,9,1
-block_hint,BitwiseOrHandler,56,57,1
-block_hint,BitwiseOrHandler,60,61,1
-block_hint,BitwiseOrHandler,24,25,1
-block_hint,BitwiseXorHandler,32,33,1
-block_hint,BitwiseXorHandler,56,57,1
-block_hint,BitwiseXorHandler,60,61,1
-block_hint,BitwiseXorHandler,24,25,1
-block_hint,BitwiseAndHandler,32,33,1
-block_hint,BitwiseAndHandler,56,57,1
-block_hint,BitwiseAndHandler,60,61,1
-block_hint,BitwiseAndHandler,24,25,1
-block_hint,ShiftLeftHandler,10,11,0
-block_hint,ShiftLeftHandler,60,61,1
-block_hint,ShiftLeftHandler,24,25,1
-block_hint,ShiftRightHandler,32,33,1
-block_hint,ShiftRightHandler,10,11,0
-block_hint,ShiftRightHandler,58,59,0
-block_hint,ShiftRightHandler,39,40,0
-block_hint,ShiftRightHandler,24,25,1
-block_hint,ShiftRightLogicalHandler,10,11,0
-block_hint,ShiftRightLogicalHandler,58,59,0
-block_hint,ShiftRightLogicalHandler,39,40,0
-block_hint,ShiftRightLogicalHandler,24,25,1
-block_hint,AddSmiHandler,27,28,0
-block_hint,AddSmiHandler,19,20,0
-block_hint,AddSmiHandler,14,15,1
-block_hint,SubSmiHandler,25,26,0
-block_hint,SubSmiHandler,15,16,1
-block_hint,MulSmiHandler,52,53,0
-block_hint,MulSmiHandler,39,40,0
-block_hint,MulSmiHandler,41,42,0
-block_hint,MulSmiHandler,22,23,0
-block_hint,MulSmiHandler,13,14,1
-block_hint,DivSmiHandler,45,46,0
-block_hint,DivSmiHandler,52,53,0
-block_hint,DivSmiHandler,40,41,0
-block_hint,DivSmiHandler,29,30,1
-block_hint,DivSmiHandler,7,8,0
-block_hint,DivSmiHandler,13,14,1
-block_hint,ModSmiHandler,42,43,1
-block_hint,ModSmiHandler,37,38,1
-block_hint,ModSmiHandler,22,23,0
-block_hint,ModSmiHandler,13,14,1
-block_hint,BitwiseOrSmiHandler,31,32,1
-block_hint,BitwiseOrSmiHandler,37,38,1
-block_hint,BitwiseOrSmiHandler,18,19,1
-block_hint,BitwiseAndSmiHandler,6,7,0
-block_hint,BitwiseAndSmiHandler,18,19,1
-block_hint,ShiftLeftSmiHandler,44,45,1
-block_hint,ShiftLeftSmiHandler,34,35,1
-block_hint,ShiftLeftSmiHandler,46,47,1
-block_hint,ShiftLeftSmiHandler,18,19,1
-block_hint,ShiftRightSmiHandler,31,32,1
-block_hint,ShiftRightSmiHandler,35,36,0
-block_hint,ShiftRightSmiHandler,29,30,0
-block_hint,ShiftRightSmiHandler,18,19,1
-block_hint,ShiftRightLogicalSmiHandler,40,41,0
-block_hint,ShiftRightLogicalSmiHandler,30,31,0
-block_hint,ShiftRightLogicalSmiHandler,34,35,1
-block_hint,ShiftRightLogicalSmiHandler,42,43,0
-block_hint,ShiftRightLogicalSmiHandler,32,33,0
-block_hint,ShiftRightLogicalSmiHandler,18,19,1
-block_hint,IncHandler,27,28,0
-block_hint,IncHandler,23,24,0
-block_hint,IncHandler,18,19,1
-block_hint,DecHandler,27,28,0
-block_hint,DecHandler,23,24,0
-block_hint,DecHandler,18,19,1
-block_hint,NegateHandler,26,27,1
-block_hint,NegateHandler,24,25,1
-block_hint,ToBooleanLogicalNotHandler,15,16,0
-block_hint,ToBooleanLogicalNotHandler,21,22,0
-block_hint,ToBooleanLogicalNotHandler,7,8,0
-block_hint,TypeOfHandler,20,21,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,12,13,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,6,7,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,14,15,1
-block_hint,FindNonDefaultConstructorOrConstructHandler,16,17,0
-block_hint,FindNonDefaultConstructorOrConstructHandler,4,5,1
-block_hint,FindNonDefaultConstructorOrConstructHandler,18,19,1
-block_hint,CallAnyReceiverHandler,21,22,1
-block_hint,CallProperty0Handler,7,8,1
-block_hint,CallProperty0Handler,62,63,0
-block_hint,CallProperty0Handler,14,15,1
-block_hint,CallProperty0Handler,16,17,0
-block_hint,CallProperty0Handler,72,73,0
-block_hint,CallProperty0Handler,55,56,1
-block_hint,CallProperty1Handler,21,22,1
-block_hint,CallProperty1Handler,86,87,0
-block_hint,CallProperty1Handler,83,84,0
-block_hint,CallProperty1Handler,64,65,0
-block_hint,CallProperty1Handler,35,36,1
-block_hint,CallProperty1Handler,70,71,1
-block_hint,CallProperty1Handler,51,52,0
-block_hint,CallProperty1Handler,7,8,1
-block_hint,CallProperty1Handler,62,63,0
-block_hint,CallProperty1Handler,14,15,1
-block_hint,CallProperty1Handler,16,17,0
-block_hint,CallProperty1Handler,72,73,0
-block_hint,CallProperty1Handler,55,56,1
-block_hint,CallProperty2Handler,23,24,0
-block_hint,CallProperty2Handler,86,87,0
-block_hint,CallProperty2Handler,83,84,0
-block_hint,CallProperty2Handler,64,65,0
-block_hint,CallProperty2Handler,5,6,1
-block_hint,CallProperty2Handler,47,48,1
-block_hint,CallProperty2Handler,25,26,1
-block_hint,CallProperty2Handler,7,8,1
-block_hint,CallProperty2Handler,14,15,1
-block_hint,CallProperty2Handler,16,17,0
-block_hint,CallProperty2Handler,72,73,0
-block_hint,CallProperty2Handler,55,56,1
-block_hint,CallUndefinedReceiverHandler,86,87,0
-block_hint,CallUndefinedReceiverHandler,83,84,0
-block_hint,CallUndefinedReceiverHandler,64,65,0
-block_hint,CallUndefinedReceiverHandler,35,36,1
-block_hint,CallUndefinedReceiverHandler,70,71,1
-block_hint,CallUndefinedReceiverHandler,51,52,0
-block_hint,CallUndefinedReceiverHandler,29,30,1
-block_hint,CallUndefinedReceiver0Handler,86,87,0
-block_hint,CallUndefinedReceiver0Handler,83,84,0
-block_hint,CallUndefinedReceiver0Handler,64,65,0
-block_hint,CallUndefinedReceiver0Handler,35,36,1
-block_hint,CallUndefinedReceiver0Handler,70,71,1
-block_hint,CallUndefinedReceiver0Handler,51,52,0
-block_hint,CallUndefinedReceiver0Handler,29,30,1
-block_hint,CallUndefinedReceiver1Handler,86,87,0
-block_hint,CallUndefinedReceiver1Handler,83,84,0
-block_hint,CallUndefinedReceiver1Handler,64,65,0
-block_hint,CallUndefinedReceiver1Handler,35,36,1
-block_hint,CallUndefinedReceiver1Handler,70,71,1
-block_hint,CallUndefinedReceiver1Handler,51,52,0
-block_hint,CallUndefinedReceiver1Handler,29,30,1
-block_hint,CallUndefinedReceiver1Handler,7,8,1
-block_hint,CallUndefinedReceiver1Handler,62,63,0
-block_hint,CallUndefinedReceiver1Handler,14,15,1
-block_hint,CallUndefinedReceiver1Handler,16,17,0
-block_hint,CallUndefinedReceiver1Handler,72,73,0
-block_hint,CallUndefinedReceiver1Handler,55,56,1
-block_hint,CallUndefinedReceiver2Handler,23,24,0
-block_hint,CallUndefinedReceiver2Handler,86,87,0
-block_hint,CallUndefinedReceiver2Handler,83,84,0
-block_hint,CallUndefinedReceiver2Handler,64,65,0
-block_hint,CallUndefinedReceiver2Handler,35,36,1
-block_hint,CallUndefinedReceiver2Handler,70,71,1
-block_hint,CallUndefinedReceiver2Handler,51,52,0
-block_hint,CallUndefinedReceiver2Handler,29,30,1
-block_hint,CallWithSpreadHandler,23,24,1
-block_hint,ConstructHandler,52,53,0
-block_hint,ConstructHandler,41,42,1
-block_hint,ConstructHandler,24,25,1
-block_hint,ConstructHandler,15,16,1
-block_hint,ConstructHandler,3,4,1
-block_hint,ConstructHandler,39,40,1
-block_hint,TestEqualHandler,103,104,0
-block_hint,TestEqualHandler,25,26,1
-block_hint,TestEqualHandler,72,73,0
-block_hint,TestEqualHandler,79,80,1
-block_hint,TestEqualHandler,27,28,1
-block_hint,TestEqualHandler,85,86,0
-block_hint,TestEqualHandler,114,115,0
-block_hint,TestEqualHandler,19,20,1
-block_hint,TestEqualStrictHandler,82,83,0
-block_hint,TestEqualStrictHandler,53,54,1
-block_hint,TestEqualStrictHandler,66,67,0
-block_hint,TestEqualStrictHandler,59,60,1
-block_hint,TestEqualStrictHandler,41,42,1
-block_hint,TestEqualStrictHandler,61,62,0
-block_hint,TestEqualStrictHandler,55,56,1
-block_hint,TestEqualStrictHandler,47,48,0
-block_hint,TestEqualStrictHandler,72,73,0
-block_hint,TestEqualStrictHandler,49,50,0
-block_hint,TestEqualStrictHandler,7,8,1
-block_hint,TestLessThanHandler,41,42,0
-block_hint,TestLessThanHandler,63,64,0
-block_hint,TestLessThanHandler,65,66,1
-block_hint,TestLessThanHandler,49,50,1
-block_hint,TestLessThanHandler,9,10,1
-block_hint,TestGreaterThanHandler,41,42,0
-block_hint,TestGreaterThanHandler,45,46,1
-block_hint,TestGreaterThanHandler,9,10,1
-block_hint,TestLessThanOrEqualHandler,41,42,0
-block_hint,TestLessThanOrEqualHandler,9,10,1
-block_hint,TestGreaterThanOrEqualHandler,61,62,0
-block_hint,TestGreaterThanOrEqualHandler,41,42,0
-block_hint,TestGreaterThanOrEqualHandler,9,10,1
-block_hint,TestInstanceOfHandler,17,18,1
-block_hint,TestInstanceOfHandler,19,20,1
-block_hint,TestInstanceOfHandler,4,5,1
-block_hint,TestInstanceOfHandler,21,22,1
-block_hint,ToNumericHandler,12,13,0
-block_hint,ToNumericHandler,7,8,1
-block_hint,ToStringHandler,3,4,1
-block_hint,CreateRegExpLiteralHandler,7,8,1
-block_hint,CreateRegExpLiteralHandler,3,4,1
-block_hint,CreateArrayLiteralHandler,38,39,1
-block_hint,CreateArrayLiteralHandler,41,42,0
-block_hint,CreateArrayLiteralHandler,13,14,0
-block_hint,CreateArrayLiteralHandler,49,50,1
-block_hint,CreateArrayLiteralHandler,46,47,1
-block_hint,CreateArrayLiteralHandler,22,23,1
-block_hint,CreateArrayLiteralHandler,28,29,1
-block_hint,CreateArrayLiteralHandler,3,4,1
-block_hint,CreateArrayLiteralHandler,30,31,1
-block_hint,CreateArrayLiteralHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralHandler,3,4,1
-block_hint,CreateEmptyArrayLiteralHandler,13,14,1
-block_hint,CreateEmptyArrayLiteralHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralHandler,15,16,1
-block_hint,CreateObjectLiteralHandler,88,89,0
-block_hint,CreateObjectLiteralHandler,123,124,0
-block_hint,CreateObjectLiteralHandler,116,117,1
-block_hint,CreateObjectLiteralHandler,92,93,1
-block_hint,CreateObjectLiteralHandler,108,109,1
-block_hint,CreateObjectLiteralHandler,72,73,1
-block_hint,CreateObjectLiteralHandler,34,35,0
-block_hint,CreateObjectLiteralHandler,74,75,1
-block_hint,CreateObjectLiteralHandler,64,65,0
-block_hint,CreateEmptyObjectLiteralHandler,4,5,1
-block_hint,CreateEmptyObjectLiteralHandler,11,12,1
-block_hint,CreateEmptyObjectLiteralHandler,6,7,0
-block_hint,CreateClosureHandler,2,3,1
-block_hint,CreateFunctionContextHandler,11,12,1
-block_hint,CreateFunctionContextHandler,4,5,1
-block_hint,CreateFunctionContextHandler,6,7,0
-block_hint,CreateMappedArgumentsHandler,52,53,0
-block_hint,CreateMappedArgumentsHandler,42,43,1
-block_hint,CreateMappedArgumentsHandler,44,45,0
-block_hint,CreateMappedArgumentsHandler,104,105,1
-block_hint,CreateMappedArgumentsHandler,46,47,0
-block_hint,CreateMappedArgumentsHandler,12,13,0
-block_hint,CreateMappedArgumentsHandler,14,15,0
-block_hint,CreateMappedArgumentsHandler,78,79,1
-block_hint,CreateMappedArgumentsHandler,58,59,1
-block_hint,CreateMappedArgumentsHandler,16,17,0
-block_hint,CreateMappedArgumentsHandler,60,61,0
-block_hint,CreateMappedArgumentsHandler,32,33,0
-block_hint,CreateMappedArgumentsHandler,24,25,1
-block_hint,CreateMappedArgumentsHandler,70,71,1
-block_hint,CreateUnmappedArgumentsHandler,16,17,1
-block_hint,CreateUnmappedArgumentsHandler,18,19,0
-block_hint,CreateUnmappedArgumentsHandler,20,21,0
-block_hint,CreateUnmappedArgumentsHandler,7,8,0
-block_hint,CreateUnmappedArgumentsHandler,9,10,0
-block_hint,CreateUnmappedArgumentsHandler,31,32,1
-block_hint,CreateUnmappedArgumentsHandler,25,26,1
-block_hint,CreateUnmappedArgumentsHandler,11,12,0
-block_hint,CreateUnmappedArgumentsHandler,27,28,0
-block_hint,CreateUnmappedArgumentsHandler,13,14,1
-block_hint,CreateUnmappedArgumentsHandler,23,24,1
-block_hint,CreateRestParameterHandler,13,14,0
-block_hint,CreateRestParameterHandler,27,28,0
-block_hint,JumpLoopHandler,34,35,1
-block_hint,JumpLoopHandler,23,24,0
-block_hint,JumpLoopHandler,9,10,1
-block_hint,JumpIfToBooleanFalseConstantHandler,14,15,0
-block_hint,JumpIfToBooleanFalseConstantHandler,20,21,0
-block_hint,JumpIfToBooleanFalseConstantHandler,6,7,0
-block_hint,JumpIfToBooleanTrueHandler,14,15,0
-block_hint,JumpIfToBooleanTrueHandler,6,7,0
-block_hint,JumpIfToBooleanTrueHandler,8,9,1
-block_hint,JumpIfToBooleanFalseHandler,14,15,0
-block_hint,JumpIfToBooleanFalseHandler,20,21,0
-block_hint,JumpIfToBooleanFalseHandler,6,7,0
-block_hint,JumpIfUndefinedOrNullHandler,3,4,0
-block_hint,JumpIfJSReceiverHandler,5,6,1
-block_hint,JumpIfJSReceiverHandler,3,4,1
-block_hint,SwitchOnSmiNoFeedbackHandler,3,4,0
-block_hint,ForInEnumerateHandler,34,35,1
-block_hint,ForInPrepareHandler,18,19,1
-block_hint,ForInNextHandler,2,3,1
-block_hint,ForInNextHandler,13,14,1
-block_hint,ReturnHandler,3,4,1
-block_hint,ThrowReferenceErrorIfHoleHandler,4,5,0
-block_hint,ThrowSuperNotCalledIfHoleHandler,2,3,0
-block_hint,ThrowSuperAlreadyCalledIfNotHoleHandler,2,3,1
-block_hint,ThrowIfNotSuperConstructorHandler,2,3,1
-block_hint,SuspendGeneratorHandler,14,15,1
-block_hint,SuspendGeneratorHandler,8,9,1
-block_hint,SuspendGeneratorHandler,12,13,1
-block_hint,ResumeGeneratorHandler,10,11,1
-block_hint,ResumeGeneratorHandler,4,5,1
-block_hint,ResumeGeneratorHandler,6,7,1
-block_hint,LdaImmutableContextSlotWideHandler,3,4,1
-block_hint,LdaImmutableContextSlotWideHandler,9,10,0
-block_hint,LdaImmutableContextSlotWideHandler,5,6,1
-block_hint,LdaImmutableCurrentContextSlotWideHandler,2,3,1
-block_hint,LdaGlobalWideHandler,262,263,0
-block_hint,LdaGlobalWideHandler,110,111,1
-block_hint,StaGlobalWideHandler,3,4,0
-block_hint,StaCurrentContextSlotWideHandler,2,3,1
-block_hint,GetNamedPropertyWideHandler,331,332,0
-block_hint,GetNamedPropertyWideHandler,140,141,1
-block_hint,GetKeyedPropertyWideHandler,3,4,0
-block_hint,SetNamedPropertyWideHandler,3,4,0
-block_hint,DefineNamedOwnPropertyWideHandler,3,4,0
-block_hint,SetKeyedPropertyWideHandler,3,4,0
-block_hint,DefineKeyedOwnPropertyWideHandler,3,4,0
-block_hint,StaInArrayLiteralWideHandler,3,4,0
-block_hint,AddWideHandler,120,121,0
-block_hint,AddWideHandler,60,61,0
-block_hint,AddWideHandler,42,43,0
-block_hint,AddWideHandler,107,108,0
-block_hint,AddWideHandler,76,77,1
-block_hint,AddWideHandler,53,54,1
-block_hint,AddWideHandler,31,32,1
-block_hint,AddWideHandler,51,52,1
-block_hint,AddWideHandler,18,19,1
-block_hint,SubWideHandler,108,109,0
-block_hint,SubWideHandler,65,66,0
-block_hint,SubWideHandler,40,41,0
-block_hint,SubWideHandler,15,16,1
-block_hint,MulWideHandler,128,129,0
-block_hint,MulWideHandler,106,107,1
-block_hint,MulWideHandler,83,84,1
-block_hint,MulWideHandler,81,82,1
-block_hint,MulWideHandler,43,44,1
-block_hint,MulWideHandler,19,20,1
-block_hint,BitwiseOrWideHandler,28,29,0
-block_hint,BitwiseOrWideHandler,20,21,1
-block_hint,AddSmiWideHandler,25,26,0
-block_hint,AddSmiWideHandler,17,18,0
-block_hint,MulSmiWideHandler,52,53,0
-block_hint,MulSmiWideHandler,41,42,0
-block_hint,MulSmiWideHandler,22,23,0
-block_hint,MulSmiWideHandler,34,35,1
-block_hint,MulSmiWideHandler,24,25,0
-block_hint,MulSmiWideHandler,28,29,1
-block_hint,MulSmiWideHandler,9,10,1
-block_hint,ModSmiWideHandler,51,52,1
-block_hint,ModSmiWideHandler,46,47,0
-block_hint,ModSmiWideHandler,42,43,1
-block_hint,ModSmiWideHandler,37,38,1
-block_hint,ModSmiWideHandler,22,23,0
-block_hint,ModSmiWideHandler,7,8,0
-block_hint,ModSmiWideHandler,13,14,1
-block_hint,BitwiseOrSmiWideHandler,23,24,0
-block_hint,BitwiseOrSmiWideHandler,6,7,0
-block_hint,BitwiseOrSmiWideHandler,11,12,1
-block_hint,BitwiseAndSmiWideHandler,6,7,0
-block_hint,BitwiseAndSmiWideHandler,18,19,1
-block_hint,ShiftLeftSmiWideHandler,24,25,0
-block_hint,ShiftLeftSmiWideHandler,6,7,0
-block_hint,ShiftLeftSmiWideHandler,40,41,0
-block_hint,ShiftLeftSmiWideHandler,30,31,0
-block_hint,ShiftLeftSmiWideHandler,11,12,1
-block_hint,ShiftRightSmiWideHandler,23,24,0
-block_hint,ShiftRightSmiWideHandler,6,7,0
-block_hint,ShiftRightSmiWideHandler,11,12,1
-block_hint,IncWideHandler,9,10,0
-block_hint,IncWideHandler,25,26,0
-block_hint,IncWideHandler,19,20,0
-block_hint,IncWideHandler,7,8,1
-block_hint,CallPropertyWideHandler,68,69,0
-block_hint,CallPropertyWideHandler,19,20,0
-block_hint,CallProperty0WideHandler,68,69,0
-block_hint,CallProperty0WideHandler,19,20,0
-block_hint,CallProperty1WideHandler,68,69,0
-block_hint,CallProperty1WideHandler,19,20,0
-block_hint,CallProperty2WideHandler,68,69,0
-block_hint,CallProperty2WideHandler,19,20,0
-block_hint,CallUndefinedReceiverWideHandler,68,69,0
-block_hint,CallUndefinedReceiverWideHandler,19,20,0
-block_hint,CallUndefinedReceiver0WideHandler,68,69,0
-block_hint,CallUndefinedReceiver0WideHandler,19,20,0
-block_hint,CallUndefinedReceiver1WideHandler,68,69,0
-block_hint,CallUndefinedReceiver1WideHandler,19,20,0
-block_hint,CallUndefinedReceiver2WideHandler,68,69,0
-block_hint,CallUndefinedReceiver2WideHandler,19,20,0
-block_hint,ConstructWideHandler,49,50,0
-block_hint,ConstructWideHandler,22,23,0
-block_hint,TestEqualWideHandler,103,104,0
-block_hint,TestEqualWideHandler,95,96,0
-block_hint,TestEqualWideHandler,47,48,0
-block_hint,TestEqualWideHandler,7,8,1
-block_hint,TestEqualStrictWideHandler,82,83,0
-block_hint,TestEqualStrictWideHandler,53,54,1
-block_hint,TestEqualStrictWideHandler,55,56,1
-block_hint,TestEqualStrictWideHandler,47,48,0
-block_hint,TestEqualStrictWideHandler,9,10,0
-block_hint,TestEqualStrictWideHandler,4,5,1
-block_hint,TestGreaterThanWideHandler,24,25,0
-block_hint,TestGreaterThanWideHandler,6,7,1
-block_hint,CreateRegExpLiteralWideHandler,14,15,0
-block_hint,CreateRegExpLiteralWideHandler,9,10,1
-block_hint,CreateArrayLiteralWideHandler,42,43,0
-block_hint,CreateArrayLiteralWideHandler,20,21,1
-block_hint,CreateEmptyArrayLiteralWideHandler,22,23,0
-block_hint,CreateEmptyArrayLiteralWideHandler,11,12,1
-block_hint,CreateEmptyArrayLiteralWideHandler,5,6,1
-block_hint,CreateEmptyArrayLiteralWideHandler,15,16,1
-block_hint,CreateObjectLiteralWideHandler,99,100,0
-block_hint,CreateObjectLiteralWideHandler,58,59,1
-block_hint,CreateClosureWideHandler,9,10,1
-block_hint,CreateClosureWideHandler,2,3,1
-block_hint,CreateFunctionContextWideHandler,8,9,0
-block_hint,JumpLoopWideHandler,34,35,1
-block_hint,JumpLoopWideHandler,9,10,1
-block_hint,JumpIfToBooleanTrueWideHandler,18,19,1
-block_hint,JumpIfToBooleanTrueWideHandler,14,15,0
-block_hint,JumpIfToBooleanFalseWideHandler,18,19,1
-block_hint,JumpIfToBooleanFalseWideHandler,14,15,0
-block_hint,JumpIfToBooleanFalseWideHandler,20,21,0
-block_hint,JumpIfToBooleanFalseWideHandler,6,7,0
-block_hint,SwitchOnSmiNoFeedbackWideHandler,5,6,0
-block_hint,SwitchOnSmiNoFeedbackWideHandler,3,4,0
-block_hint,ForInNextWideHandler,11,12,0
-block_hint,ForInNextWideHandler,2,3,1
-block_hint,ForInNextWideHandler,4,5,0
-block_hint,ForInNextWideHandler,9,10,1
-block_hint,LdaGlobalExtraWideHandler,262,263,0
-block_hint,LdaGlobalExtraWideHandler,110,111,1
-block_hint,AddSmiExtraWideHandler,33,34,1
-block_hint,AddSmiExtraWideHandler,23,24,0
-block_hint,AddSmiExtraWideHandler,28,29,1
-block_hint,AddSmiExtraWideHandler,9,10,1
-block_hint,DivSmiExtraWideHandler,49,50,0
-block_hint,DivSmiExtraWideHandler,45,46,0
-block_hint,DivSmiExtraWideHandler,52,53,0
-block_hint,DivSmiExtraWideHandler,40,41,0
-block_hint,DivSmiExtraWideHandler,23,24,0
-block_hint,DivSmiExtraWideHandler,29,30,1
-block_hint,DivSmiExtraWideHandler,13,14,1
-block_hint,BitwiseAndSmiExtraWideHandler,31,32,1
-block_hint,BitwiseAndSmiExtraWideHandler,35,36,0
-block_hint,BitwiseAndSmiExtraWideHandler,29,30,0
-block_hint,BitwiseAndSmiExtraWideHandler,18,19,1
-block_hint,CallUndefinedReceiver1ExtraWideHandler,68,69,0
-block_hint,CallUndefinedReceiver1ExtraWideHandler,19,20,0
-builtin_hash,RecordWriteSaveFP,-787985789
-builtin_hash,RecordWriteIgnoreFP,-787985789
-builtin_hash,EphemeronKeyBarrierSaveFP,-762846067
-builtin_hash,AdaptorWithBuiltinExitFrame,245562366
-builtin_hash,Call_ReceiverIsNullOrUndefined_Baseline_Compact,-701969451
-builtin_hash,Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,-324308522
-builtin_hash,Call_ReceiverIsAny_Baseline_Compact,-324308522
-builtin_hash,CallProxy,1028339399
-builtin_hash,CallWithSpread,535056033
-builtin_hash,CallWithSpread_Baseline,-119914143
-builtin_hash,CallWithArrayLike,-122249728
-builtin_hash,ConstructWithSpread,246592083
-builtin_hash,ConstructWithSpread_Baseline,150379974
-builtin_hash,Construct_Baseline,62706048
-builtin_hash,FastNewObject,958443730
-builtin_hash,FastNewClosure,344670909
-builtin_hash,StringEqual,747283806
-builtin_hash,StringGreaterThan,-181364078
-builtin_hash,StringGreaterThanOrEqual,-462881432
-builtin_hash,StringLessThan,-462881432
-builtin_hash,StringLessThanOrEqual,-181364078
-builtin_hash,StringSubstring,-615814018
-builtin_hash,OrderedHashTableHealIndex,-1059061674
-builtin_hash,CompileLazy,-1040787392
-builtin_hash,CompileLazyDeoptimizedCode,254075260
-builtin_hash,InstantiateAsmJs,-162781474
-builtin_hash,AllocateInYoungGeneration,504130749
-builtin_hash,AllocateRegularInYoungGeneration,-967770913
-builtin_hash,AllocateRegularInOldGeneration,-967770913
-builtin_hash,CopyFastSmiOrObjectElements,-184201389
-builtin_hash,GrowFastDoubleElements,933478036
-builtin_hash,GrowFastSmiOrObjectElements,62812155
-builtin_hash,ToNumber,-536181652
-builtin_hash,ToNumber_Baseline,-361624131
-builtin_hash,ToNumeric_Baseline,-968362129
-builtin_hash,ToNumberConvertBigInt,-484303877
-builtin_hash,Typeof,-292943239
-builtin_hash,KeyedLoadIC_PolymorphicName,-445639640
-builtin_hash,KeyedStoreIC_Megamorphic,228109775
-builtin_hash,DefineKeyedOwnIC_Megamorphic,587942691
-builtin_hash,LoadGlobalIC_NoFeedback,-506168140
-builtin_hash,LoadIC_FunctionPrototype,-217294724
-builtin_hash,LoadIC_StringLength,876788958
-builtin_hash,LoadIC_StringWrapperLength,-105737329
-builtin_hash,LoadIC_NoFeedback,796730020
-builtin_hash,StoreIC_NoFeedback,-771215689
-builtin_hash,DefineNamedOwnIC_NoFeedback,610029223
-builtin_hash,KeyedLoadIC_SloppyArguments,1037341519
-builtin_hash,StoreFastElementIC_Standard,-1073045042
-builtin_hash,StoreFastElementIC_GrowNoTransitionHandleCOW,-733182579
-builtin_hash,StoreFastElementIC_NoTransitionHandleCOW,14002747
-builtin_hash,ElementsTransitionAndStore_Standard,-303995099
-builtin_hash,ElementsTransitionAndStore_GrowNoTransitionHandleCOW,-620039698
-builtin_hash,ElementsTransitionAndStore_NoTransitionHandleCOW,387221171
-builtin_hash,KeyedHasIC_PolymorphicName,481900135
-builtin_hash,EnqueueMicrotask,987190055
-builtin_hash,RunMicrotasks,-606800144
-builtin_hash,HasProperty,-958876308
-builtin_hash,DeleteProperty,-583543539
-builtin_hash,SetDataProperties,-633970258
-builtin_hash,ReturnReceiver,386533367
-builtin_hash,ArrayConstructor,-862505040
-builtin_hash,ArrayConstructorImpl,-772732436
-builtin_hash,ArrayNoArgumentConstructor_PackedSmi_DontOverride,546753803
-builtin_hash,ArrayNoArgumentConstructor_HoleySmi_DontOverride,546753803
-builtin_hash,ArrayNoArgumentConstructor_PackedSmi_DisableAllocationSites,76921937
-builtin_hash,ArrayNoArgumentConstructor_Packed_DisableAllocationSites,76921937
-builtin_hash,ArrayNoArgumentConstructor_Holey_DisableAllocationSites,76921937
-builtin_hash,ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites,-916490644
-builtin_hash,ArraySingleArgumentConstructor_HoleySmi_DontOverride,924187471
-builtin_hash,ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites,-90166804
-builtin_hash,ArraySingleArgumentConstructor_Holey_DisableAllocationSites,-90166804
-builtin_hash,ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites,377718997
-builtin_hash,ArrayIncludesSmi,833613331
-builtin_hash,ArrayIncludesSmiOrObject,-439120197
-builtin_hash,ArrayIncludes,-557378221
-builtin_hash,ArrayIndexOfSmi,818318721
-builtin_hash,ArrayIndexOfSmiOrObject,1027851539
-builtin_hash,ArrayIndexOf,344845802
-builtin_hash,ArrayPrototypePop,127416215
-builtin_hash,ArrayPrototypePush,611743176
-builtin_hash,CloneFastJSArray,1060615555
-builtin_hash,CloneFastJSArrayFillingHoles,1003395618
-builtin_hash,ExtractFastJSArray,-517393151
-builtin_hash,ArrayPrototypeEntries,-332667431
-builtin_hash,ArrayPrototypeKeys,110264383
-builtin_hash,ArrayPrototypeValues,-332667431
-builtin_hash,ArrayIteratorPrototypeNext,-858892834
-builtin_hash,AsyncFunctionEnter,423723147
-builtin_hash,AsyncFunctionResolve,265196636
-builtin_hash,AsyncFunctionAwaitCaught,960969853
-builtin_hash,AsyncFunctionAwaitUncaught,960969853
-builtin_hash,AsyncFunctionAwaitResolveClosure,-1057297202
-builtin_hash,DatePrototypeGetDate,905028372
-builtin_hash,DatePrototypeGetDay,905028372
-builtin_hash,DatePrototypeGetFullYear,905028372
-builtin_hash,DatePrototypeGetHours,905028372
-builtin_hash,DatePrototypeGetMilliseconds,-707287527
-builtin_hash,DatePrototypeGetMinutes,905028372
-builtin_hash,DatePrototypeGetMonth,905028372
-builtin_hash,DatePrototypeGetSeconds,905028372
-builtin_hash,DatePrototypeGetTime,665014006
-builtin_hash,DatePrototypeGetTimezoneOffset,-707287527
-builtin_hash,DatePrototypeValueOf,665014006
-builtin_hash,DatePrototypeToPrimitive,23745105
-builtin_hash,CreateIterResultObject,833507199
-builtin_hash,CreateGeneratorObject,-898656785
-builtin_hash,GeneratorPrototypeNext,-29771038
-builtin_hash,GeneratorPrototypeReturn,-279661376
-builtin_hash,SuspendGeneratorBaseline,-49499079
-builtin_hash,ResumeGeneratorBaseline,145201245
-builtin_hash,GlobalIsFinite,805204024
-builtin_hash,GlobalIsNaN,413622277
-builtin_hash,LoadIC,79924816
-builtin_hash,LoadIC_Megamorphic,682925528
-builtin_hash,LoadIC_Noninlined,-767250044
-builtin_hash,LoadICTrampoline,-803254542
-builtin_hash,LoadICBaseline,-628874782
-builtin_hash,LoadICTrampoline_Megamorphic,-803254542
-builtin_hash,LoadSuperIC,-238282119
-builtin_hash,LoadSuperICBaseline,841397561
-builtin_hash,KeyedLoadIC,78355712
-builtin_hash,KeyedLoadIC_Megamorphic,-391277039
-builtin_hash,KeyedLoadICTrampoline,-803254542
-builtin_hash,KeyedLoadICBaseline,-628874782
-builtin_hash,KeyedLoadICTrampoline_Megamorphic,-803254542
-builtin_hash,StoreGlobalIC,-33330877
-builtin_hash,StoreGlobalICTrampoline,-803254542
-builtin_hash,StoreGlobalICBaseline,-628874782
-builtin_hash,StoreIC,-959753225
-builtin_hash,StoreICTrampoline,756382466
-builtin_hash,StoreICBaseline,841397561
-builtin_hash,DefineNamedOwnIC,464622021
-builtin_hash,DefineNamedOwnICBaseline,841397561
-builtin_hash,KeyedStoreIC,-538069768
-builtin_hash,KeyedStoreICTrampoline,756382466
-builtin_hash,KeyedStoreICBaseline,841397561
-builtin_hash,DefineKeyedOwnIC,458562905
-builtin_hash,StoreInArrayLiteralIC,-604069917
-builtin_hash,StoreInArrayLiteralICBaseline,841397561
-builtin_hash,LoadGlobalIC,274757270
-builtin_hash,LoadGlobalICInsideTypeof,303475129
-builtin_hash,LoadGlobalICTrampoline,-833311190
-builtin_hash,LoadGlobalICBaseline,-77255126
-builtin_hash,LoadGlobalICInsideTypeofTrampoline,-833311190
-builtin_hash,LoadGlobalICInsideTypeofBaseline,-77255126
-builtin_hash,LookupGlobalICBaseline,-1021507359
-builtin_hash,LookupGlobalICInsideTypeofBaseline,-1021507359
-builtin_hash,KeyedHasIC,-204183308
-builtin_hash,KeyedHasICBaseline,-628874782
-builtin_hash,KeyedHasIC_Megamorphic,-958876308
-builtin_hash,IterableToList,-4651130
-builtin_hash,IterableToListWithSymbolLookup,977588013
-builtin_hash,IterableToListMayPreserveHoles,908990960
-builtin_hash,FindOrderedHashMapEntry,196242182
-builtin_hash,MapConstructor,127220366
-builtin_hash,MapPrototypeSet,529910141
-builtin_hash,MapPrototypeDelete,-553855034
-builtin_hash,MapPrototypeGet,-312429732
-builtin_hash,MapPrototypeHas,-908577859
-builtin_hash,MapPrototypeEntries,898519671
-builtin_hash,MapPrototypeGetSize,548120946
-builtin_hash,MapPrototypeForEach,600253966
-builtin_hash,MapPrototypeKeys,898519671
-builtin_hash,MapPrototypeValues,898519671
-builtin_hash,MapIteratorPrototypeNext,581031622
-builtin_hash,MapIteratorToList,-668334452
-builtin_hash,SameValueNumbersOnly,1046023669
-builtin_hash,Add_Baseline,-819537320
-builtin_hash,AddSmi_Baseline,-468458532
-builtin_hash,Subtract_Baseline,65596691
-builtin_hash,SubtractSmi_Baseline,-149584042
-builtin_hash,Multiply_Baseline,294831898
-builtin_hash,MultiplySmi_Baseline,996262660
-builtin_hash,Divide_Baseline,-446061441
-builtin_hash,DivideSmi_Baseline,-447600168
-builtin_hash,Modulus_Baseline,-832082339
-builtin_hash,ModulusSmi_Baseline,413347859
-builtin_hash,Exponentiate_Baseline,129594833
-builtin_hash,BitwiseAnd_Baseline,807317245
-builtin_hash,BitwiseAndSmi_Baseline,-299694524
-builtin_hash,BitwiseOr_Baseline,517046253
-builtin_hash,BitwiseOrSmi_Baseline,986547189
-builtin_hash,BitwiseXor_Baseline,-23876279
-builtin_hash,BitwiseXorSmi_Baseline,-1002138133
-builtin_hash,ShiftLeft_Baseline,500850188
-builtin_hash,ShiftLeftSmi_Baseline,-633960771
-builtin_hash,ShiftRight_Baseline,-32080745
-builtin_hash,ShiftRightSmi_Baseline,315819990
-builtin_hash,ShiftRightLogical_Baseline,479447240
-builtin_hash,ShiftRightLogicalSmi_Baseline,-519393226
-builtin_hash,Add_WithFeedback,-206794177
-builtin_hash,Subtract_WithFeedback,347362352
-builtin_hash,Modulus_WithFeedback,920841751
-builtin_hash,BitwiseOr_WithFeedback,-74343708
-builtin_hash,Equal_Baseline,-896951542
-builtin_hash,StrictEqual_Baseline,87581778
-builtin_hash,LessThan_Baseline,-374004445
-builtin_hash,GreaterThan_Baseline,-368668942
-builtin_hash,LessThanOrEqual_Baseline,301132954
-builtin_hash,GreaterThanOrEqual_Baseline,756925202
-builtin_hash,Equal_WithFeedback,-1040295188
-builtin_hash,StrictEqual_WithFeedback,-1052414211
-builtin_hash,LessThan_WithFeedback,948983301
-builtin_hash,GreaterThan_WithFeedback,-258688563
-builtin_hash,GreaterThanOrEqual_WithFeedback,691471117
-builtin_hash,BitwiseNot_Baseline,182142082
-builtin_hash,Decrement_Baseline,-544743600
-builtin_hash,Increment_Baseline,-307783174
-builtin_hash,Negate_Baseline,434902398
-builtin_hash,ObjectAssign,-786777006
-builtin_hash,ObjectCreate,-543317475
-builtin_hash,ObjectEntries,-465524320
-builtin_hash,ObjectGetOwnPropertyDescriptor,862856609
-builtin_hash,ObjectGetOwnPropertyNames,409260893
-builtin_hash,ObjectIs,-428110665
-builtin_hash,ObjectKeys,-711238005
-builtin_hash,ObjectPrototypeHasOwnProperty,-338192343
-builtin_hash,ObjectToString,993745228
-builtin_hash,InstanceOf_WithFeedback,-50284518
-builtin_hash,InstanceOf_Baseline,992223159
-builtin_hash,ForInEnumerate,-857152067
-builtin_hash,ForInPrepare,-602567485
-builtin_hash,ForInFilter,-142224411
-builtin_hash,RegExpConstructor,-862541618
-builtin_hash,RegExpExecAtom,-837574121
-builtin_hash,RegExpExecInternal,549675176
-builtin_hash,FindOrderedHashSetEntry,-166628054
-builtin_hash,SetConstructor,-778640968
-builtin_hash,SetPrototypeHas,-908577859
-builtin_hash,SetPrototypeAdd,-427333429
-builtin_hash,SetPrototypeDelete,-871946847
-builtin_hash,SetPrototypeEntries,898519671
-builtin_hash,SetPrototypeGetSize,548120946
-builtin_hash,SetPrototypeForEach,-501810916
-builtin_hash,SetPrototypeValues,898519671
-builtin_hash,SetIteratorPrototypeNext,182871241
-builtin_hash,SetOrSetIteratorToList,-33118696
-builtin_hash,StringFromCharCode,-971392951
-builtin_hash,StringPrototypeReplace,211421001
-builtin_hash,StringPrototypeSplit,-575300599
-builtin_hash,TypedArrayConstructor,618386097
-builtin_hash,TypedArrayPrototypeByteLength,-587563610
-builtin_hash,TypedArrayPrototypeLength,-163278974
-builtin_hash,WeakMapConstructor,-808541690
-builtin_hash,WeakMapLookupHashIndex,-619048905
-builtin_hash,WeakMapGet,276986520
-builtin_hash,WeakMapPrototypeHas,-285904254
-builtin_hash,WeakMapPrototypeSet,629680419
-builtin_hash,WeakSetConstructor,-367435631
-builtin_hash,WeakSetPrototypeHas,-285904254
-builtin_hash,WeakSetPrototypeAdd,-301255294
-builtin_hash,WeakCollectionSet,217583952
-builtin_hash,AsyncGeneratorResolve,242317686
-builtin_hash,AsyncGeneratorYieldWithAwait,302667528
-builtin_hash,AsyncGeneratorResumeNext,-265907726
-builtin_hash,AsyncGeneratorPrototypeNext,-194499830
-builtin_hash,AsyncGeneratorAwaitUncaught,-398074132
-builtin_hash,AsyncGeneratorAwaitResolveClosure,-245656056
-builtin_hash,AsyncGeneratorYieldWithAwaitResolveClosure,-649252259
-builtin_hash,StringAdd_CheckNone,1037172071
-builtin_hash,SubString,-701927326
-builtin_hash,GetProperty,-433765894
-builtin_hash,GetPropertyWithReceiver,636771451
-builtin_hash,SetProperty,-985618808
-builtin_hash,CreateDataProperty,952942021
-builtin_hash,FindNonDefaultConstructorOrConstruct,1020851957
-builtin_hash,ArrayPrototypeConcat,-711562967
-builtin_hash,ArrayEvery,732127203
-builtin_hash,ArrayFilterLoopLazyDeoptContinuation,782264259
-builtin_hash,ArrayFilterLoopContinuation,292635770
-builtin_hash,ArrayFilter,-585622372
-builtin_hash,ArrayPrototypeFind,410534083
-builtin_hash,ArrayForEachLoopLazyDeoptContinuation,-299794382
-builtin_hash,ArrayForEachLoopContinuation,350033182
-builtin_hash,ArrayForEach,729108989
-builtin_hash,ArrayFrom,1055630901
-builtin_hash,ArrayIsArray,-970031738
-builtin_hash,LoadJoinElement_FastSmiOrObjectElements_0,228167807
-builtin_hash,LoadJoinElement_FastDoubleElements_0,580988969
-builtin_hash,JoinStackPush,751439150
-builtin_hash,JoinStackPop,128574663
-builtin_hash,ArrayPrototypeJoin,89295304
-builtin_hash,ArrayPrototypeToString,-66500098
-builtin_hash,ArrayPrototypeLastIndexOf,1073113005
-builtin_hash,ArrayMapLoopLazyDeoptContinuation,-47088981
-builtin_hash,ArrayMapLoopContinuation,-794603673
-builtin_hash,ArrayMap,-326417675
-builtin_hash,ArrayReduceLoopLazyDeoptContinuation,-1014597388
-builtin_hash,ArrayReduceLoopContinuation,-1067144759
-builtin_hash,ArrayReduce,-407776620
-builtin_hash,ArrayPrototypeReverse,-121874294
-builtin_hash,ArrayPrototypeShift,-928108750
-builtin_hash,ArrayPrototypeSlice,214735037
-builtin_hash,ArraySome,466290774
-builtin_hash,ArrayPrototypeSplice,1001942992
-builtin_hash,ArrayPrototypeUnshift,-1052845134
-builtin_hash,ArrayBufferPrototypeGetByteLength,445258508
-builtin_hash,ArrayBufferIsView,-78532109
-builtin_hash,ToInteger,-64770826
-builtin_hash,FastCreateDataProperty,-278611029
-builtin_hash,BooleanConstructor,-809457299
-builtin_hash,BooleanPrototypeToString,-798757106
-builtin_hash,ToString,436846720
-builtin_hash,StringPrototypeToString,-794700080
-builtin_hash,StringPrototypeValueOf,-794700080
-builtin_hash,StringPrototypeCharAt,915103217
-builtin_hash,StringPrototypeCharCodeAt,-272108096
-builtin_hash,StringPrototypeCodePointAt,-596824984
-builtin_hash,StringPrototypeConcat,-577571398
-builtin_hash,StringConstructor,-65593142
-builtin_hash,StringAddConvertLeft,51926197
-builtin_hash,StringAddConvertRight,115066033
-builtin_hash,StringCharAt,959950211
-builtin_hash,FastNewClosureBaseline,-532908706
-builtin_hash,FastNewFunctionContextFunction,977993537
-builtin_hash,CreateRegExpLiteral,64770172
-builtin_hash,CreateShallowArrayLiteral,866949735
-builtin_hash,CreateEmptyArrayLiteral,-862242730
-builtin_hash,CreateShallowObjectLiteral,991590480
-builtin_hash,ObjectConstructor,-384944316
-builtin_hash,CreateEmptyLiteralObject,-310219292
-builtin_hash,NumberConstructor,-974450450
-builtin_hash,StringToNumber,-446317754
-builtin_hash,NonNumberToNumber,504608456
-builtin_hash,NonNumberToNumeric,-570033562
-builtin_hash,ToNumeric,-772194204
-builtin_hash,NumberToString,674929388
-builtin_hash,ToBoolean,856538717
-builtin_hash,ToBooleanForBaselineJump,-446512949
-builtin_hash,ToLength,-155953797
-builtin_hash,ToName,645844037
-builtin_hash,ToObject,119745243
-builtin_hash,NonPrimitiveToPrimitive_Default,-151838227
-builtin_hash,NonPrimitiveToPrimitive_Number,-151838227
-builtin_hash,NonPrimitiveToPrimitive_String,-151838227
-builtin_hash,OrdinaryToPrimitive_Number,-337334591
-builtin_hash,OrdinaryToPrimitive_String,-337334591
-builtin_hash,DataViewPrototypeGetByteLength,750091486
-builtin_hash,DataViewPrototypeGetFloat64,544637297
-builtin_hash,DataViewPrototypeSetUint32,366892025
-builtin_hash,DataViewPrototypeSetFloat64,267831220
-builtin_hash,FunctionPrototypeHasInstance,-911487777
-builtin_hash,FastFunctionPrototypeBind,-29755211
-builtin_hash,ForInNext,547638943
-builtin_hash,GetIteratorWithFeedback,935596039
-builtin_hash,GetIteratorBaseline,-124236956
-builtin_hash,CallIteratorWithFeedback,174322508
-builtin_hash,MathAbs,111472406
-builtin_hash,MathCeil,466078480
-builtin_hash,MathFloor,-900013988
-builtin_hash,MathRound,981339685
-builtin_hash,MathPow,-432438626
-builtin_hash,MathMax,-914923816
-builtin_hash,MathMin,-435430851
-builtin_hash,MathAsin,-865319143
-builtin_hash,MathAtan2,-706534972
-builtin_hash,MathCos,705415335
-builtin_hash,MathExp,1065131032
-builtin_hash,MathFround,-135252655
-builtin_hash,MathImul,773832811
-builtin_hash,MathLog,540909033
-builtin_hash,MathSin,-688911662
-builtin_hash,MathSign,-523407079
-builtin_hash,MathSqrt,-794868693
-builtin_hash,MathTan,537052027
-builtin_hash,MathTanh,-300840302
-builtin_hash,MathRandom,966867537
-builtin_hash,NumberPrototypeToString,-382822730
-builtin_hash,NumberIsInteger,-693598207
-builtin_hash,NumberIsNaN,788813704
-builtin_hash,NumberParseFloat,-741561968
-builtin_hash,ParseInt,998287919
-builtin_hash,NumberParseInt,-382916138
-builtin_hash,Add,-136527337
-builtin_hash,Subtract,-213501900
-builtin_hash,Multiply,7472525
-builtin_hash,Divide,-344347312
-builtin_hash,Modulus,-582417614
-builtin_hash,CreateObjectWithoutProperties,339671006
-builtin_hash,ObjectIsExtensible,-329082141
-builtin_hash,ObjectPreventExtensions,940542631
-builtin_hash,ObjectGetPrototypeOf,157540923
-builtin_hash,ObjectSetPrototypeOf,187356384
-builtin_hash,ObjectPrototypeToString,-483254038
-builtin_hash,ObjectPrototypeValueOf,193287106
-builtin_hash,FulfillPromise,272197869
-builtin_hash,NewPromiseCapability,-508522709
-builtin_hash,PromiseCapabilityDefaultResolve,-402797269
-builtin_hash,PerformPromiseThen,330989248
-builtin_hash,PromiseAll,697437536
-builtin_hash,PromiseAllResolveElementClosure,-862999565
-builtin_hash,PromiseConstructor,762524591
-builtin_hash,PromisePrototypeCatch,756171957
-builtin_hash,PromiseFulfillReactionJob,-630924263
-builtin_hash,PromiseResolveTrampoline,-167249272
-builtin_hash,PromiseResolve,-412690059
-builtin_hash,ResolvePromise,756044362
-builtin_hash,PromisePrototypeThen,3713531
-builtin_hash,PromiseResolveThenableJob,-14213172
-builtin_hash,ProxyConstructor,459230341
-builtin_hash,ProxyGetProperty,1054163992
-builtin_hash,ProxyIsExtensible,308384776
-builtin_hash,ProxyPreventExtensions,399450299
-builtin_hash,ReflectGet,-434221017
-builtin_hash,ReflectHas,-167249272
-builtin_hash,RegExpPrototypeExec,963999476
-builtin_hash,RegExpMatchFast,384654261
-builtin_hash,RegExpReplace,-475275041
-builtin_hash,RegExpPrototypeReplace,860372377
-builtin_hash,RegExpSearchFast,907750005
-builtin_hash,RegExpPrototypeSourceGetter,-747085084
-builtin_hash,RegExpSplit,-607180644
-builtin_hash,RegExpPrototypeTest,-585829947
-builtin_hash,RegExpPrototypeTestFast,-1071276448
-builtin_hash,RegExpPrototypeGlobalGetter,-718555192
-builtin_hash,RegExpPrototypeIgnoreCaseGetter,1070990033
-builtin_hash,RegExpPrototypeMultilineGetter,216999873
-builtin_hash,RegExpPrototypeHasIndicesGetter,390292067
-builtin_hash,RegExpPrototypeDotAllGetter,390292067
-builtin_hash,RegExpPrototypeStickyGetter,1055105538
-builtin_hash,RegExpPrototypeUnicodeGetter,1055105538
-builtin_hash,RegExpPrototypeFlagsGetter,-646009057
-builtin_hash,StringPrototypeEndsWith,565371891
-builtin_hash,StringPrototypeIncludes,480948081
-builtin_hash,StringPrototypeIndexOf,619068194
-builtin_hash,StringPrototypeIterator,-532566456
-builtin_hash,StringIteratorPrototypeNext,-1034386014
-builtin_hash,StringPrototypeMatch,127768813
-builtin_hash,StringPrototypeSearch,127768813
-builtin_hash,StringRepeat,92491602
-builtin_hash,StringPrototypeSlice,111174165
-builtin_hash,StringPrototypeStartsWith,-951440779
-builtin_hash,StringPrototypeSubstr,716425893
-builtin_hash,StringPrototypeSubstring,769385864
-builtin_hash,StringPrototypeTrim,-151587513
-builtin_hash,SymbolPrototypeToString,697341238
-builtin_hash,CreateTypedArray,100324164
-builtin_hash,TypedArrayFrom,-508079252
-builtin_hash,TypedArrayPrototypeSet,241292735
-builtin_hash,TypedArrayPrototypeSubArray,-638094120
-builtin_hash,NewSloppyArgumentsElements,745494442
-builtin_hash,NewStrictArgumentsElements,-81425804
-builtin_hash,NewRestArgumentsElements,-823345459
-builtin_hash,FastNewSloppyArguments,-174863955
-builtin_hash,FastNewStrictArguments,-75939795
-builtin_hash,FastNewRestArguments,-680285498
-builtin_hash,StringSlowFlatten,108774605
-builtin_hash,StringIndexOf,119327941
-builtin_hash,Load_FastSmiElements_0,-418523514
-builtin_hash,Load_FastObjectElements_0,-418523514
-builtin_hash,Store_FastSmiElements_0,975980653
-builtin_hash,Store_FastObjectElements_0,311513691
-builtin_hash,SortCompareDefault,842664214
-builtin_hash,SortCompareUserFn,1059126141
-builtin_hash,Copy,-750738169
-builtin_hash,MergeAt,944447896
-builtin_hash,GallopLeft,368113946
-builtin_hash,GallopRight,186729557
-builtin_hash,ArrayTimSort,-475205137
-builtin_hash,ArrayPrototypeSort,-366911589
-builtin_hash,StringFastLocaleCompare,15452983
-builtin_hash,WasmInt32ToHeapNumber,751194511
-builtin_hash,WasmTaggedNonSmiToInt32,-202443862
-builtin_hash,WasmTriggerTierUp,-980759280
-builtin_hash,WasmStackGuard,-1024124053
-builtin_hash,CanUseSameAccessor_FastSmiElements_0,-756700379
-builtin_hash,CanUseSameAccessor_FastObjectElements_0,-756700379
-builtin_hash,StringPrototypeToLowerCaseIntl,-966367732
-builtin_hash,StringToLowerCaseIntl,-481509366
-builtin_hash,WideHandler,-298201266
-builtin_hash,ExtraWideHandler,-298201266
-builtin_hash,LdarHandler,-745598094
-builtin_hash,LdaZeroHandler,368748633
-builtin_hash,LdaSmiHandler,-545227529
-builtin_hash,LdaUndefinedHandler,1011673901
-builtin_hash,LdaNullHandler,1011673901
-builtin_hash,LdaTheHoleHandler,1011673901
-builtin_hash,LdaTrueHandler,827753247
-builtin_hash,LdaFalseHandler,827753247
-builtin_hash,LdaConstantHandler,407548785
-builtin_hash,LdaContextSlotHandler,506452989
-builtin_hash,LdaImmutableContextSlotHandler,506452989
-builtin_hash,LdaCurrentContextSlotHandler,327557270
-builtin_hash,LdaImmutableCurrentContextSlotHandler,327557270
-builtin_hash,StarHandler,305217552
-builtin_hash,MovHandler,-283701884
-builtin_hash,PushContextHandler,177425195
-builtin_hash,PopContextHandler,-1044986385
-builtin_hash,TestReferenceEqualHandler,-651544719
-builtin_hash,TestUndetectableHandler,-830971105
-builtin_hash,TestNullHandler,1005522396
-builtin_hash,TestUndefinedHandler,1005522396
-builtin_hash,TestTypeOfHandler,-1028477858
-builtin_hash,LdaGlobalHandler,965344129
-builtin_hash,LdaGlobalInsideTypeofHandler,585777250
-builtin_hash,StaGlobalHandler,1056951542
-builtin_hash,StaContextSlotHandler,-675927710
-builtin_hash,StaCurrentContextSlotHandler,-997669083
-builtin_hash,LdaLookupGlobalSlotHandler,-84752131
-builtin_hash,LdaLookupGlobalSlotInsideTypeofHandler,49834142
-builtin_hash,StaLookupSlotHandler,-381579342
-builtin_hash,GetNamedPropertyHandler,-27764824
-builtin_hash,GetNamedPropertyFromSuperHandler,-724989944
-builtin_hash,GetKeyedPropertyHandler,-56635454
-builtin_hash,SetNamedPropertyHandler,448782548
-builtin_hash,DefineNamedOwnPropertyHandler,448782548
-builtin_hash,SetKeyedPropertyHandler,941278116
-builtin_hash,DefineKeyedOwnPropertyHandler,941278116
-builtin_hash,StaInArrayLiteralHandler,941278116
-builtin_hash,DefineKeyedOwnPropertyInLiteralHandler,1045494813
-builtin_hash,AddHandler,-518783725
-builtin_hash,SubHandler,505104408
-builtin_hash,MulHandler,-222850853
-builtin_hash,DivHandler,-1028262634
-builtin_hash,ModHandler,143526297
-builtin_hash,ExpHandler,-727777022
-builtin_hash,BitwiseOrHandler,-522781712
-builtin_hash,BitwiseXorHandler,-419955523
-builtin_hash,BitwiseAndHandler,530208341
-builtin_hash,ShiftLeftHandler,-804444955
-builtin_hash,ShiftRightHandler,-104335215
-builtin_hash,ShiftRightLogicalHandler,1050635494
-builtin_hash,AddSmiHandler,-161508067
-builtin_hash,SubSmiHandler,-609360326
-builtin_hash,MulSmiHandler,282822605
-builtin_hash,DivSmiHandler,292906952
-builtin_hash,ModSmiHandler,-917212490
-builtin_hash,BitwiseOrSmiHandler,172148322
-builtin_hash,BitwiseXorSmiHandler,1046550901
-builtin_hash,BitwiseAndSmiHandler,-808862341
-builtin_hash,ShiftLeftSmiHandler,862845296
-builtin_hash,ShiftRightSmiHandler,183483372
-builtin_hash,ShiftRightLogicalSmiHandler,31369673
-builtin_hash,IncHandler,-318834355
-builtin_hash,DecHandler,938496699
-builtin_hash,NegateHandler,-590726041
-builtin_hash,BitwiseNotHandler,322709376
-builtin_hash,ToBooleanLogicalNotHandler,-972724513
-builtin_hash,LogicalNotHandler,-706273800
-builtin_hash,TypeOfHandler,-751823
-builtin_hash,DeletePropertyStrictHandler,-724253277
-builtin_hash,DeletePropertySloppyHandler,-476722269
-builtin_hash,FindNonDefaultConstructorOrConstructHandler,-746857468
-builtin_hash,CallAnyReceiverHandler,87393745
-builtin_hash,CallPropertyHandler,87393745
-builtin_hash,CallProperty0Handler,956548008
-builtin_hash,CallProperty1Handler,-471075746
-builtin_hash,CallProperty2Handler,-1043814952
-builtin_hash,CallUndefinedReceiverHandler,126620186
-builtin_hash,CallUndefinedReceiver0Handler,-286191860
-builtin_hash,CallUndefinedReceiver1Handler,-357856703
-builtin_hash,CallUndefinedReceiver2Handler,798828847
-builtin_hash,CallWithSpreadHandler,87393745
-builtin_hash,CallRuntimeHandler,624123308
-builtin_hash,CallJSRuntimeHandler,1005113218
-builtin_hash,InvokeIntrinsicHandler,-566159390
-builtin_hash,ConstructHandler,543386518
-builtin_hash,ConstructWithSpreadHandler,595837553
-builtin_hash,TestEqualHandler,-157366914
-builtin_hash,TestEqualStrictHandler,998643852
-builtin_hash,TestLessThanHandler,1046936290
-builtin_hash,TestGreaterThanHandler,-369508260
-builtin_hash,TestLessThanOrEqualHandler,-412750652
-builtin_hash,TestGreaterThanOrEqualHandler,-364267636
-builtin_hash,TestInstanceOfHandler,-607728916
-builtin_hash,TestInHandler,539847065
-builtin_hash,ToNameHandler,701699245
-builtin_hash,ToNumberHandler,-512585428
-builtin_hash,ToNumericHandler,459707132
-builtin_hash,ToObjectHandler,701699245
-builtin_hash,ToStringHandler,620423288
-builtin_hash,CreateRegExpLiteralHandler,848340986
-builtin_hash,CreateArrayLiteralHandler,101333771
-builtin_hash,CreateArrayFromIterableHandler,-18783057
-builtin_hash,CreateEmptyArrayLiteralHandler,-289337896
-builtin_hash,CreateObjectLiteralHandler,-711473910
-builtin_hash,CreateEmptyObjectLiteralHandler,795228443
-builtin_hash,CreateClosureHandler,877324634
-builtin_hash,CreateBlockContextHandler,-344466857
-builtin_hash,CreateCatchContextHandler,-214012965
-builtin_hash,CreateFunctionContextHandler,729147868
-builtin_hash,CreateMappedArgumentsHandler,-124182926
-builtin_hash,CreateUnmappedArgumentsHandler,758781228
-builtin_hash,CreateRestParameterHandler,-10099522
-builtin_hash,JumpLoopHandler,-166037043
-builtin_hash,JumpHandler,-79617432
-builtin_hash,JumpConstantHandler,906507762
-builtin_hash,JumpIfUndefinedConstantHandler,250257394
-builtin_hash,JumpIfNotUndefinedConstantHandler,-587816710
-builtin_hash,JumpIfUndefinedOrNullConstantHandler,53751011
-builtin_hash,JumpIfTrueConstantHandler,250257394
-builtin_hash,JumpIfFalseConstantHandler,250257394
-builtin_hash,JumpIfToBooleanTrueConstantHandler,15176103
-builtin_hash,JumpIfToBooleanFalseConstantHandler,422983862
-builtin_hash,JumpIfToBooleanTrueHandler,635201116
-builtin_hash,JumpIfToBooleanFalseHandler,408147223
-builtin_hash,JumpIfTrueHandler,801953084
-builtin_hash,JumpIfFalseHandler,801953084
-builtin_hash,JumpIfNullHandler,801953084
-builtin_hash,JumpIfNotNullHandler,1026829001
-builtin_hash,JumpIfUndefinedHandler,801953084
-builtin_hash,JumpIfNotUndefinedHandler,1026829001
-builtin_hash,JumpIfUndefinedOrNullHandler,1021601552
-builtin_hash,JumpIfJSReceiverHandler,65469341
-builtin_hash,SwitchOnSmiNoFeedbackHandler,807681990
-builtin_hash,ForInEnumerateHandler,510063374
-builtin_hash,ForInPrepareHandler,630466074
-builtin_hash,ForInContinueHandler,-691562887
-builtin_hash,ForInNextHandler,512834227
-builtin_hash,ForInStepHandler,942618821
-builtin_hash,SetPendingMessageHandler,401946975
-builtin_hash,ThrowHandler,50431783
-builtin_hash,ReThrowHandler,50431783
-builtin_hash,ReturnHandler,-117530186
-builtin_hash,ThrowReferenceErrorIfHoleHandler,512852920
-builtin_hash,ThrowSuperNotCalledIfHoleHandler,717642155
-builtin_hash,ThrowSuperAlreadyCalledIfNotHoleHandler,-546144205
-builtin_hash,ThrowIfNotSuperConstructorHandler,-460002303
-builtin_hash,SwitchOnGeneratorStateHandler,10710931
-builtin_hash,SuspendGeneratorHandler,-500612975
-builtin_hash,ResumeGeneratorHandler,1068636019
-builtin_hash,GetIteratorHandler,-71006498
-builtin_hash,ShortStarHandler,356943682
-builtin_hash,LdarWideHandler,-249230336
-builtin_hash,LdaSmiWideHandler,-31881096
-builtin_hash,LdaConstantWideHandler,-758989820
-builtin_hash,LdaContextSlotWideHandler,687146226
-builtin_hash,LdaImmutableContextSlotWideHandler,687146226
-builtin_hash,LdaImmutableCurrentContextSlotWideHandler,-836770052
-builtin_hash,StarWideHandler,501248040
-builtin_hash,MovWideHandler,-871657303
-builtin_hash,PushContextWideHandler,844522230
-builtin_hash,PopContextWideHandler,744748597
-builtin_hash,TestReferenceEqualWideHandler,-118913544
-builtin_hash,LdaGlobalWideHandler,-661487412
-builtin_hash,LdaGlobalInsideTypeofWideHandler,-572343212
-builtin_hash,StaGlobalWideHandler,555909381
-builtin_hash,StaContextSlotWideHandler,478877471
-builtin_hash,StaCurrentContextSlotWideHandler,-615279276
-builtin_hash,LdaLookupGlobalSlotWideHandler,-1002268065
-builtin_hash,GetNamedPropertyWideHandler,-241462706
-builtin_hash,GetKeyedPropertyWideHandler,641533107
-builtin_hash,SetNamedPropertyWideHandler,-58064714
-builtin_hash,DefineNamedOwnPropertyWideHandler,-58064714
-builtin_hash,SetKeyedPropertyWideHandler,686171362
-builtin_hash,DefineKeyedOwnPropertyWideHandler,686171362
-builtin_hash,StaInArrayLiteralWideHandler,686171362
-builtin_hash,AddWideHandler,-617481681
-builtin_hash,SubWideHandler,145242966
-builtin_hash,MulWideHandler,166175890
-builtin_hash,DivWideHandler,829768719
-builtin_hash,BitwiseOrWideHandler,-671352735
-builtin_hash,BitwiseAndWideHandler,-748389668
-builtin_hash,ShiftLeftWideHandler,-722355824
-builtin_hash,AddSmiWideHandler,-503151286
-builtin_hash,SubSmiWideHandler,266762310
-builtin_hash,MulSmiWideHandler,767307001
-builtin_hash,DivSmiWideHandler,1050619977
-builtin_hash,ModSmiWideHandler,-653636504
-builtin_hash,BitwiseOrSmiWideHandler,905206733
-builtin_hash,BitwiseXorSmiWideHandler,1044063990
-builtin_hash,BitwiseAndSmiWideHandler,-376485258
-builtin_hash,ShiftLeftSmiWideHandler,-1004091795
-builtin_hash,ShiftRightSmiWideHandler,-397666497
-builtin_hash,ShiftRightLogicalSmiWideHandler,54662547
-builtin_hash,IncWideHandler,331971916
-builtin_hash,DecWideHandler,279024516
-builtin_hash,NegateWideHandler,-781916260
-builtin_hash,CallPropertyWideHandler,-998392170
-builtin_hash,CallProperty0WideHandler,54487119
-builtin_hash,CallProperty1WideHandler,-147592428
-builtin_hash,CallProperty2WideHandler,-58614287
-builtin_hash,CallUndefinedReceiverWideHandler,400495181
-builtin_hash,CallUndefinedReceiver0WideHandler,-1000686597
-builtin_hash,CallUndefinedReceiver1WideHandler,-299347389
-builtin_hash,CallUndefinedReceiver2WideHandler,525189648
-builtin_hash,CallWithSpreadWideHandler,-998392170
-builtin_hash,ConstructWideHandler,193926631
-builtin_hash,TestEqualWideHandler,-797631551
-builtin_hash,TestEqualStrictWideHandler,753248660
-builtin_hash,TestLessThanWideHandler,-210582608
-builtin_hash,TestGreaterThanWideHandler,543018087
-builtin_hash,TestLessThanOrEqualWideHandler,-1053789276
-builtin_hash,TestGreaterThanOrEqualWideHandler,-582678107
-builtin_hash,TestInstanceOfWideHandler,-280937039
-builtin_hash,TestInWideHandler,817647574
-builtin_hash,ToNumericWideHandler,868695670
-builtin_hash,CreateRegExpLiteralWideHandler,-1006765965
-builtin_hash,CreateArrayLiteralWideHandler,-826485513
-builtin_hash,CreateEmptyArrayLiteralWideHandler,559300434
-builtin_hash,CreateObjectLiteralWideHandler,455963528
-builtin_hash,CreateClosureWideHandler,1061873155
-builtin_hash,CreateBlockContextWideHandler,271729622
-builtin_hash,CreateFunctionContextWideHandler,527181803
-builtin_hash,JumpLoopWideHandler,941891518
-builtin_hash,JumpWideHandler,-79617432
-builtin_hash,JumpIfToBooleanTrueWideHandler,923993949
-builtin_hash,JumpIfToBooleanFalseWideHandler,145370961
-builtin_hash,JumpIfTrueWideHandler,-1042889789
-builtin_hash,JumpIfFalseWideHandler,-1042889789
-builtin_hash,SwitchOnSmiNoFeedbackWideHandler,-773907277
-builtin_hash,ForInPrepareWideHandler,-483036360
-builtin_hash,ForInNextWideHandler,-173595160
-builtin_hash,ThrowReferenceErrorIfHoleWideHandler,-254407930
-builtin_hash,GetIteratorWideHandler,-412149326
-builtin_hash,LdaSmiExtraWideHandler,65806156
-builtin_hash,LdaGlobalExtraWideHandler,411460668
-builtin_hash,AddSmiExtraWideHandler,553152400
-builtin_hash,SubSmiExtraWideHandler,446395338
-builtin_hash,MulSmiExtraWideHandler,105494980
-builtin_hash,DivSmiExtraWideHandler,-317292269
-builtin_hash,BitwiseOrSmiExtraWideHandler,604681516
-builtin_hash,BitwiseXorSmiExtraWideHandler,-91329781
-builtin_hash,BitwiseAndSmiExtraWideHandler,150048166
-builtin_hash,CallUndefinedReceiverExtraWideHandler,423421950
-builtin_hash,CallUndefinedReceiver1ExtraWideHandler,168432499
-builtin_hash,CallUndefinedReceiver2ExtraWideHandler,524973830
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index d67c287959..0664d5a6ff 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1945,7 +1945,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
Group.groups.get('gc-background').entry(),
Group.groups.get('gc').entry(),
Group.groups.get('javascript').entry(),
- Group.groups.get('websnapshot').entry(),
Group.groups.get('runtime').entry(),
this.unclassified
];
@@ -2282,7 +2281,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
new Group('GC', /GC_.*|AllocateInTargetSpace|GC/, "#00799c"));
Group.add('javascript',
new Group('JavaScript', /JS_Execution|JavaScript/, "#DD4477"));
- Group.add('websnapshot', new Group('WebSnapshot', /.*Web.*/, "#E8E11C"));
Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
Group.add('blink',
new Group('Blink RCS', /.*Blink_.*/, "#006600", false, false));
diff --git a/deps/v8/tools/callstats_groups.py b/deps/v8/tools/callstats_groups.py
index cd8712d499..62898c61c8 100644
--- a/deps/v8/tools/callstats_groups.py
+++ b/deps/v8/tools/callstats_groups.py
@@ -19,5 +19,4 @@ RUNTIME_CALL_STATS_GROUPS = [
('Group-GC-Background', re.compile(".*GC.*BACKGROUND.*")),
('Group-GC', re.compile("GC_.*|AllocateInTargetSpace")),
('Group-JavaScript', re.compile("JS_Execution")),
- ('Group-WebSnapshot', re.compile("WebSnapshot.*")),
('Group-Runtime', re.compile(".*"))]
diff --git a/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt
index 9d00ca4586..8be71d9db2 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output.txt
@@ -9,7 +9,7 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up --flag1 --flag2=0
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up --no-maglev --flag1 --flag2=0
# Flags of x64,ignition_turbo:
--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --flag3
#
diff --git a/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt
index 9f6f9edad4..1d75ba592b 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_arch.txt
@@ -9,7 +9,7 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up --no-maglev
# Flags of x64,ignition_turbo:
--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --bad-flag
#
diff --git a/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt
index 6deccfb8fa..2a8097fb39 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/failure_output_second.txt
@@ -9,7 +9,7 @@
# Compared x64,ignition with ia32,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up --no-maglev
# Flags of ia32,ignition_turbo:
--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --very-bad-flag
#
diff --git a/deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt b/deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt
index e322801c9f..11aa471e52 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt
+++ b/deps/v8/tools/clusterfuzz/foozzie/testdata/smoke_test_output.txt
@@ -9,7 +9,7 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --no-turbofan --no-sparkplug --liftoff --no-wasm-tier-up --no-maglev
# Flags of x64,ignition_turbo:
--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --harmony --wasm-staging --no-wasm-async-compilation --wasm-max-mem-pages=32767 --suppress-asm-messages --random-seed 12345
#
diff --git a/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
index a693745725..a24a2dce23 100755
--- a/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_foozzie.py
@@ -35,6 +35,7 @@ CONFIGS = dict(
'--no-sparkplug',
'--liftoff',
'--no-wasm-tier-up',
+ '--no-maglev',
],
ignition_asm=[
'--turbo-filter=~',
@@ -42,6 +43,7 @@ CONFIGS = dict(
'--no-sparkplug',
'--validate-asm',
'--stress-validate-asm',
+ '--no-maglev',
],
ignition_eager=[
'--turbo-filter=~',
@@ -49,6 +51,7 @@ CONFIGS = dict(
'--no-sparkplug',
'--no-lazy',
'--no-lazy-inner-functions',
+ '--no-maglev',
],
ignition_no_ic=[
'--turbo-filter=~',
@@ -58,6 +61,7 @@ CONFIGS = dict(
'--no-wasm-tier-up',
'--no-use-ic',
'--no-lazy-feedback-allocation',
+ '--no-maglev',
],
ignition_turbo=[],
ignition_turbo_no_ic=[
@@ -72,6 +76,11 @@ CONFIGS = dict(
'--no-lazy',
'--no-lazy-inner-functions',
],
+ ignition_maglev=[
+ '--maglev',
+ '--turbo-filter=~',
+ '--no-turbofan',
+ ],
jitless=[
'--jitless',
],
@@ -172,11 +181,11 @@ KNOWN_FAILURES = {
# Flags that are already crashy during smoke tests should not be used.
DISALLOWED_FLAGS = [
- # TODO(https://crbug.com/1324097): Enable once maglev is more stable.
- '--maglev',
-
# Bails out when sorting, leading to differences in sorted output.
'--multi-mapped-mock-allocator',
+
+ # TODO(https://crbug.com/1393020): Changes the global object.
+ '--harmony-struct',
]
# List pairs of flags that lead to contradictory cycles, i.e.:
@@ -186,6 +195,9 @@ DISALLOWED_FLAGS = [
CONTRADICTORY_FLAGS = [
('--always-turbofan', '--jitless'),
('--assert-types', '--stress-concurrent-inlining'),
+ ('--assert-types', '--stress-concurrent-inlining-attach-code'),
+ ('--jitless', '--stress-concurrent-inlining'),
+ ('--jitless', '--stress-concurrent-inlining-attach-code'),
]
diff --git a/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json
index 8c6baa2496..ff38b5f711 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_experiments.json
@@ -1,15 +1,16 @@
[
- [15, "ignition", "jitless", "d8"],
+ [10, "ignition", "jitless", "d8"],
[10, "ignition", "slow_path", "d8"],
- [10, "ignition_no_ic", "slow_path", "d8"],
+ [5, "ignition_no_ic", "slow_path", "d8"],
[5, "ignition", "slow_path_opt", "d8"],
[5, "ignition", "ignition_turbo_no_ic", "d8"],
- [20, "ignition", "ignition_turbo_opt", "d8"],
+ [15, "ignition", "ignition_turbo_opt", "d8"],
[5, "ignition_no_ic", "ignition_turbo_opt", "d8"],
+ [15, "ignition", "ignition_maglev", "d8"],
[5, "ignition_turbo", "ignition_turbo_opt", "clang_x64_pointer_compression/d8"],
[5, "ignition", "ignition", "clang_x86/d8"],
[5, "ignition", "ignition_turbo_opt", "clang_x86/d8"],
[5, "ignition", "slow_path", "clang_x86/d8"],
[5, "ignition", "ignition_turbo_opt", "clang_x64_v8_arm64/d8"],
[5, "ignition", "ignition_turbo_opt", "clang_x86_v8_arm/d8"]
-] \ No newline at end of file
+]
diff --git a/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json
index 8c882c0bd5..e164b218c6 100644
--- a/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json
+++ b/deps/v8/tools/clusterfuzz/foozzie/v8_fuzz_flags.json
@@ -29,7 +29,9 @@
[0.1, "--no-lazy-feedback-allocation --interrupt-budget=100"],
[0.05, "--interrupt-budget-for-feedback-allocation=0"],
[0.1, "--no-wasm-generic-wrapper"],
+ [0.2, "--turboshaft"],
[0.1, "--turbo-force-mid-tier-regalloc"],
[0.0001, "--simulate-errors"],
- [0.1, "--minor-mc"]
+ [0.1, "--minor-mc"],
+ [0.1, "--shared-string-table"]
]
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh b/deps/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh
index b2a90b8bd2..b2a90b8bd2 100644..100755
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js b/deps/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js
index 5b4bd7c883..3fd6629c9b 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js
@@ -15,29 +15,44 @@ const skipped = [
"v8/test/mjsunit/es6/sloppy-restrictive-block-function.js",
"v8/test/mjsunit/es7/exponentiation-operator.js",
"v8/test/mjsunit/harmony/bigint/property-names.js",
+ "v8/test/mjsunit/harmony/class-static-blocks.js",
"v8/test/mjsunit/harmony/import-from-compilation-errored.js",
"v8/test/mjsunit/harmony/import-from-evaluation-errored.js",
"v8/test/mjsunit/harmony/import-from-fetch-errored.js",
"v8/test/mjsunit/harmony/import-from-instantiation-errored.js",
+ "v8/test/mjsunit/harmony/logical-assignment-function-name.js",
+ "v8/test/mjsunit/harmony/logical-assignment.js",
"v8/test/mjsunit/harmony/numeric-separator.js",
+ "v8/test/mjsunit/harmony/optional-chaining-this-private.js",
+ "v8/test/mjsunit/harmony/private-brand-checks.js",
"v8/test/mjsunit/harmony/private-fields-special-object.js",
+ "v8/test/mjsunit/harmony/regexp-match-indices.js",
"v8/test/mjsunit/html-comments.js",
"v8/test/mjsunit/ignition/dead-code-source-position.js",
+ "v8/test/mjsunit/regexp-linear-flag.js",
"v8/test/mjsunit/regress/regress-436893.js",
"v8/test/mjsunit/regress/regress-5692.js",
"v8/test/mjsunit/regress/regress-740694.js",
"v8/test/mjsunit/regress/regress-744292.js",
"v8/test/mjsunit/regress/regress-797581.js",
"v8/test/mjsunit/regress/regress-800651.js",
+ "v8/test/mjsunit/regress/regress-crbug-1238467.js",
+ "v8/test/mjsunit/regress/regress-crbug-1239907.js",
"v8/test/mjsunit/regress/regress-crbug-412208.js",
"v8/test/mjsunit/regress/regress-crbug-934166.js",
+ "v8/test/mjsunit/regress/regress-v8-10604.js",
"v8/test/mjsunit/serialize-embedded-error.js",
"v8/test/mjsunit/switch.js",
+ "v8/test/mjsunit/temporal/calendar-merge-fields.js",
+ "v8/test/mjsunit/temporal/calendar-year-month-from-fields.js",
+ "v8/test/mjsunit/temporal/plain-date-time-from.js",
/* Tests with mutation errors from v8 */
"v8/test/mjsunit/harmony/private-accessors.js",
"v8/test/mjsunit/harmony/private-methods-empty-inner.js",
"v8/test/mjsunit/harmony/private-methods.js",
"v8/test/mjsunit/harmony/static-private-methods.js",
+ "v8/test/mjsunit/regress/regress-crbug-1245870.js",
+ "v8/test/mjsunit/regress/regress-v8-11360.js",
/* Very slow tests from v8 */
"v8/test/mjsunit/regress/wasm/regress-810973b.js", // 12.121s
/* Tests with parse errors from spidermonkey */
@@ -56,11 +71,19 @@ const skipped = [
"spidermonkey/non262/destructuring/yield-in-object-destr-script.js",
"spidermonkey/non262/destructuring/yield-with-escape-in-object-destr-function.js",
"spidermonkey/non262/destructuring/yield-with-escape-in-object-destr-script.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-anon-fns.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-const.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-deleted-decl-binding.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-property-key-evaluation.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-scope-lookup.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-tdz.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment.js",
"spidermonkey/non262/extensions/inc-dec-functioncall.js",
"spidermonkey/non262/extensions/regress-406572.js",
"spidermonkey/non262/fields/await-identifier-module-1.js",
"spidermonkey/non262/fields/await-identifier-module-2.js",
"spidermonkey/non262/fields/await-identifier-script.js",
+ "spidermonkey/non262/fields/numeric-fields.js",
"spidermonkey/non262/generators/syntax.js",
"spidermonkey/non262/generators/yield-non-regexp.js",
"spidermonkey/non262/jit/regress-451673.js",
@@ -68,6 +91,13 @@ const skipped = [
"spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-parameter.js",
"spidermonkey/non262/module/bug1488117-import-namespace.js",
"spidermonkey/non262/module/bug1488117.js",
+ "spidermonkey/non262/module/bug1689499-a.js",
+ "spidermonkey/non262/module/bug1689499-b.js",
+ "spidermonkey/non262/module/bug1689499-c.js",
+ "spidermonkey/non262/module/bug1689499-x.js",
+ "spidermonkey/non262/module/bug1689499.js",
+ "spidermonkey/non262/module/bug1693261.js",
+ "spidermonkey/non262/module/module-export-name-star.js",
"spidermonkey/non262/regress/regress-243389-n.js",
"spidermonkey/non262/regress/regress-319391.js",
"spidermonkey/non262/regress/regress-350253.js",
@@ -89,8 +119,10 @@ const skipped = [
"spidermonkey/test/fixtures/export/multi-header.js",
"spidermonkey/test/fixtures/export/reftest-error-syntaxerror.js",
"spidermonkey/test/fixtures/export/regular.js",
+ /* Tests with mutation errors from spidermonkey */
+ "spidermonkey/non262/PrivateName/home-object-when-preceded-by-computed-key.js",
/* Very slow tests from spidermonkey */
- "spidermonkey/non262/regress/regress-308085.js", // 14.519s
+ "spidermonkey/non262/regress/regress-308085.js", // 17.41s
/* Tests with parse errors from chakra */
"chakra/Basics/keyword.js",
"chakra/Bugs/OS_4341640.js",
@@ -182,6 +214,7 @@ const skipped = [
"chakra/es6module/otherModule.js",
"chakra/es6module/passmodule.js",
"chakra/es6module/testDynamicImportfromModule.js",
+ "chakra/es6module/top-level-await.js",
"chakra/es7/json_superset.js",
"chakra/inlining/bug_gh6303.js",
"chakra/inlining/profilingbug.js",
@@ -204,14 +237,19 @@ const skipped = [
"chakra/es6module/moduletest1.js",
"chakra/es6module/moduletest2.js",
/* Very slow tests from chakra */
- "chakra/benchmarks/ARES-6/Air/payload-imaging-gaussian-blur-gaussianBlur.js", // 21.782s
- "chakra/benchmarks/ARES-6/Air/payload-gbemu-executeIteration.js", // 18.461s
+ "chakra/benchmarks/ARES-6/Air/payload-imaging-gaussian-blur-gaussianBlur.js", // 22.525s
+ "chakra/benchmarks/ARES-6/Air/payload-gbemu-executeIteration.js", // 19.993s
/* Tests with parse errors from jstests */
"WebKit/JSTests/es6/non-strict_function_semantics_function_statements_in_if-statement_clauses.js",
+ "WebKit/JSTests/stress/allocation-sinking-changing-structures.js",
"WebKit/JSTests/stress/arrowfunction-lexical-bind-this-8.js",
"WebKit/JSTests/stress/big-int-as-property-name.js",
+ "WebKit/JSTests/stress/bytecode-for-rmw-with-invalid-right-side.js",
+ "WebKit/JSTests/stress/escaped-keyword-identifiers.js",
"WebKit/JSTests/stress/for-let-comma.js",
+ "WebKit/JSTests/stress/global-lexical-environment-access-from-module.js",
"WebKit/JSTests/stress/import-basic.js",
+ "WebKit/JSTests/stress/import-exception.js",
"WebKit/JSTests/stress/import-from-eval.js",
"WebKit/JSTests/stress/import-reject-with-exception.js",
"WebKit/JSTests/stress/import-tests/cocoa.js",
@@ -219,21 +257,84 @@ const skipped = [
"WebKit/JSTests/stress/import-tests/multiple2.js",
"WebKit/JSTests/stress/import-tests/should.js",
"WebKit/JSTests/stress/import-with-empty-string.js",
+ "WebKit/JSTests/stress/logical-assignment-operator-and.js",
+ "WebKit/JSTests/stress/logical-assignment-operator-coalesce.js",
+ "WebKit/JSTests/stress/logical-assignment-operator-or.js",
"WebKit/JSTests/stress/module-namespace-access-change.js",
"WebKit/JSTests/stress/module-namespace-access-non-constant.js",
"WebKit/JSTests/stress/module-namespace-access-poly.js",
+ "WebKit/JSTests/stress/module-namespace-access-set-fails.js",
"WebKit/JSTests/stress/module-namespace-access-transitive-exports.js",
"WebKit/JSTests/stress/module-namespace-access.js",
+ "WebKit/JSTests/stress/module-namespace-object-caching.js",
+ "WebKit/JSTests/stress/optional-chaining-and-private-fields.js",
"WebKit/JSTests/stress/printableModuleKey-should-never-throw.js",
+ "WebKit/JSTests/stress/private-in-error.js",
+ "WebKit/JSTests/stress/private-in.js",
"WebKit/JSTests/stress/re-execute-error-module.js",
"WebKit/JSTests/stress/regress-170732.js",
"WebKit/JSTests/stress/regress-191856.js",
"WebKit/JSTests/stress/resources/error-module.js",
+ "WebKit/JSTests/stress/resources/global-lexical-environment-access-from-module-child.js",
"WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports-2.js",
"WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports.js",
"WebKit/JSTests/stress/resources/module-namespace-access.js",
+ "WebKit/JSTests/stress/resources/shadow-realm-example-module.js",
+ "WebKit/JSTests/stress/resources/to-string-module.js",
+ "WebKit/JSTests/stress/resources/value-of-module.js",
+ "WebKit/JSTests/stress/shadow-realm-import-value.js",
"WebKit/JSTests/stress/sloppy-mode-function-hoisting.js",
+ "WebKit/JSTests/stress/terminated-execution-error-in-promise.js",
"WebKit/JSTests/stress/yield-label.js",
+ /* Tests with mutation errors from jstests */
+ "WebKit/JSTests/stress/BrandedStructure-should-keep-its-members-alive.js",
+ "WebKit/JSTests/stress/class-private-method-access.js",
+ "WebKit/JSTests/stress/declared-private-field-in-eval.js",
+ "WebKit/JSTests/stress/private-access-nested-eval.js",
+ "WebKit/JSTests/stress/private-access-nested.js",
+ "WebKit/JSTests/stress/private-brand-installed-after-super-call-from-arrow-function.js",
+ "WebKit/JSTests/stress/private-brand-installed-after-super-call-from-eval.js",
+ "WebKit/JSTests/stress/private-getter-brand-check.js",
+ "WebKit/JSTests/stress/private-getter-inner-class.js",
+ "WebKit/JSTests/stress/private-members-get-and-set.js",
+ "WebKit/JSTests/stress/private-method-brand-check.js",
+ "WebKit/JSTests/stress/private-method-change-attribute-from-branded-structure.js",
+ "WebKit/JSTests/stress/private-method-change-prototype-from-branded-structure.js",
+ "WebKit/JSTests/stress/private-method-check-private-brand-ic.js",
+ "WebKit/JSTests/stress/private-method-check-structure-miss.js",
+ "WebKit/JSTests/stress/private-method-comparison.js",
+ "WebKit/JSTests/stress/private-method-delete-property-from-branded-structure.js",
+ "WebKit/JSTests/stress/private-method-extends-brand-check.js",
+ "WebKit/JSTests/stress/private-method-get-and-call.js",
+ "WebKit/JSTests/stress/private-method-invalid-multiple-brand-installation.js",
+ "WebKit/JSTests/stress/private-method-invalidate-compiled-with-constant-symbol.js",
+ "WebKit/JSTests/stress/private-method-nested-class.js",
+ "WebKit/JSTests/stress/private-method-on-sealed-objects.js",
+ "WebKit/JSTests/stress/private-method-on-uncacheable-dictionary.js",
+ "WebKit/JSTests/stress/private-method-polymorphic-with-constant-symbol.js",
+ "WebKit/JSTests/stress/private-method-set-brand-should-have-write-barrier.js",
+ "WebKit/JSTests/stress/private-method-untyped-use.js",
+ "WebKit/JSTests/stress/private-method-with-uncacheable-dictionary-transition.js",
+ "WebKit/JSTests/stress/private-methods-and-accessors-postfix-node.js",
+ "WebKit/JSTests/stress/private-methods-and-accessors-prefix-node.js",
+ "WebKit/JSTests/stress/private-methods-inheritance.js",
+ "WebKit/JSTests/stress/private-methods-inline-cache.js",
+ "WebKit/JSTests/stress/private-methods-megamorphic-ic.js",
+ "WebKit/JSTests/stress/private-methods-on-proxy.js",
+ "WebKit/JSTests/stress/private-methods-poly-ic-multiple-classes.js",
+ "WebKit/JSTests/stress/private-methods-poly-ic-single-class.js",
+ "WebKit/JSTests/stress/private-names-available-on-direct-eval.js",
+ "WebKit/JSTests/stress/private-names-available-on-eval-during-field-initialization.js",
+ "WebKit/JSTests/stress/private-setter-brand-check.js",
+ "WebKit/JSTests/stress/private-setter-inner-class.js",
+ "WebKit/JSTests/stress/private-static-method-declaration-error.js",
+ "WebKit/JSTests/stress/static-private-methods-and-accessor-inner-class.js",
+ "WebKit/JSTests/stress/static-private-methods-and-accessor-multiple-evaluation.js",
+ "WebKit/JSTests/stress/static-private-methods-and-accessors-postfix-node.js",
+ "WebKit/JSTests/stress/static-private-methods-and-accessors-prefix-node.js",
+ "WebKit/JSTests/stress/undeclared-private-field-in-eval.js",
+ /* Very slow tests from jstests */
+ "WebKit/JSTests/stress/string-locale-compare-uca-ducet.js", // 15.698s
/* Tests with parse errors from crashtests */
"CrashTests/115674352/util.js",
"CrashTests/132918471/fast/js/resources/js-test-post.js",
@@ -248,8 +349,10 @@ const skipped = [
"CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test-utils.js",
"CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test.js",
"CrashTests/4563969814560768/1.0.2/resources/js-test-pre.js",
+ "CrashTests/4570511337324544/01067.js",
"CrashTests/4592095397150720/619.js",
"CrashTests/4620742728613888/02272.js",
+ "CrashTests/4624768836632576/00383.js",
"CrashTests/4675875294674944/04443.js",
"CrashTests/4676310267068416/00041.js",
"CrashTests/4676310267068416/meta-00041.js",
@@ -269,6 +372,7 @@ const skipped = [
"CrashTests/4835573090222080/meta-00096.js",
"CrashTests/4837730048278528/03052.js",
"CrashTests/4843490131312640/03475.js",
+ "CrashTests/4848225223245824/resources/js-test-post.js",
"CrashTests/4850895428517888/2670.js",
"CrashTests/4854644212105216/392.js",
"CrashTests/4855156194934784/meta-00080.js",
@@ -295,6 +399,7 @@ const skipped = [
"CrashTests/5091969183776768/js/angular.js",
"CrashTests/5104674803023872/meta-00066.js",
"CrashTests/5110246766673920/117.js",
+ "CrashTests/5120973776420864/00299.js",
"CrashTests/5126730184654848/00846.js",
"CrashTests/5140656268640256/resources/interpolation-test.js",
"CrashTests/5151090662178816/01492.js",
@@ -367,13 +472,17 @@ const skipped = [
"CrashTests/5694701996867584/conformance/resources/webgl-test.js",
"CrashTests/5703976838234112/test.js",
"CrashTests/5707472246472704/1443.js",
+ "CrashTests/5712001662517248/report.js",
"CrashTests/5721502735532032/03042.js",
"CrashTests/5734750167105536/01271.js",
"CrashTests/5735023732064256/meta-00070.js",
"CrashTests/5736353084342272/resources/testharness.js",
"CrashTests/5737388710821888/resources/js-test.js",
+ "CrashTests/5738737345822720/svg/dynamic-updates/resources/SVGTestCase.js",
"CrashTests/5744365229441024/resources/testharness.js",
"CrashTests/5745342726537216/meta-00053.js",
+ "CrashTests/5747146314285056/support/alertAssert.sub.js",
+ "CrashTests/5747146314285056/support/logTest.sub.js",
"CrashTests/5755508264534016/00224.js",
"CrashTests/5763511307337728/04651.js",
"CrashTests/5774432061095936/00972.js",
@@ -404,6 +513,8 @@ const skipped = [
"CrashTests/6073192676327424/resources/js-test-pre.js",
"CrashTests/6085702952681472/00521.js",
"CrashTests/6113149884563456/02823.js",
+ "CrashTests/6131247808839680/00012.js",
+ "CrashTests/6132283327971328/resources/autosizingTest.js",
"CrashTests/6150179231105024/conformance/resources/webgl-test.js",
"CrashTests/6158905865666560/meta-00624.js",
"CrashTests/6179220893204480/02159.js",
@@ -419,6 +530,7 @@ const skipped = [
"CrashTests/6255231244697600/meta-00216.js",
"CrashTests/6255916311379968/1372.js",
"CrashTests/6259138054324224/meta-00172.js",
+ "CrashTests/6263485068017664/MediaSessionTestUtils.js",
"CrashTests/6278159702425600/01463.js",
"CrashTests/6280577705705472/1146.js",
"CrashTests/6285336190124032/01621.js",
@@ -455,12 +567,15 @@ const skipped = [
"CrashTests/6530413356122112/meta-00391.js",
"CrashTests/6541223017054208/01484.js",
"CrashTests/6550225930944512/mnt/scratch0/clusterfuzz/slave-bot/inputs/fuzzers/inferno_twister_custom_bundle/inferno_twister_custom_bundle_data/moz_tests/dom/workers/test/threadErrors_worker1.js",
+ "CrashTests/6551265423982592/00708.js",
"CrashTests/6552552797503488/bug_41414141.js",
+ "CrashTests/6572559555166208/report.js",
"CrashTests/6576437049950208/conformance/resources/glsl-generator.js",
"CrashTests/6576437049950208/resources/js-test-pre.js",
"CrashTests/6586504922267648/00672.js",
"CrashTests/6597230699216896/meta-00299.js",
"CrashTests/6613865297084416/builds/chromium-browser-syzyasan_win32-release/revisions/asan-win32-release-276100/resources/inspector/main/Main.js",
+ "CrashTests/6617130045341696/02588.js",
"CrashTests/6622275291840512/resources/js-test.js",
"CrashTests/6644133880397824/00752.js",
"CrashTests/6646069054013440/poc.js",
@@ -471,196 +586,216 @@ const skipped = [
"CrashTests/6731147175526400/meta-00107.js",
"CrashTests/6744125769252864/494.js",
/* Tests with mutation errors from crashtests */
+ "CrashTests/4540193473101824/00967.js",
"CrashTests/4542853924782080/01450.js",
"CrashTests/4575654914293760/01532.js",
"CrashTests/4652594229411840/00000.js",
"CrashTests/4656490341466112/00126.js",
"CrashTests/4672370177736704/00528.js",
+ "CrashTests/4797755955937280/857.js",
"CrashTests/4798856567717888/04694.js",
"CrashTests/4804923870150656/03027.js",
"CrashTests/4895570342707200/02467.js",
"CrashTests/4983976359100416/02090.js",
"CrashTests/5087167542853632/02505.js",
+ "CrashTests/5095010520858624/2806.js",
+ "CrashTests/5105917793468416/00610.js",
"CrashTests/5136618234314752/00136.js",
+ "CrashTests/5138652353593344/410.js",
+ "CrashTests/5200855060774912/00023.js",
+ "CrashTests/5357835741888512/568.js",
"CrashTests/5518580170096640/00960.js",
"CrashTests/5604116503199744/00316.js",
"CrashTests/5631123291111424/00708.js",
+ "CrashTests/5666447379988480/1339.js",
+ "CrashTests/5692170731847680/195.js",
"CrashTests/5701970444288000/00241.js",
+ "CrashTests/5741122957148160/2584.js",
+ "CrashTests/5748217537429504/repro.js",
"CrashTests/5834909260709888/01407.js",
"CrashTests/5927058168610816/01389.js",
"CrashTests/6005687605002240/00969.js",
+ "CrashTests/6245901978370048/1543.js",
+ "CrashTests/6263564054888448/1184.js",
+ "CrashTests/6316400054960128/165.js",
"CrashTests/6396053053243392/00161.js",
"CrashTests/6491889133158400/01408.js",
+ "CrashTests/6551672999968768/259.js",
"CrashTests/6666268416671744/09877.js",
/* Very slow tests from crashtests */
- "CrashTests/5680390288441344/scripts/extension.js", // 48.473s
- "CrashTests/5680390288441344/scripts/feedback.js", // 24.001s
- "CrashTests/5666182802309120/binaryen-1564.js", // 18.342s
- "CrashTests/5306741051621376/binaryen-2962.js", // 16.455s
- "CrashTests/6071297320747008/324.js", // 14.031s
- "CrashTests/6250982932086784/binaryen-538.js", // 11.258s
- "CrashTests/5187171718529024/844.js", // 10.189s
- "CrashTests/4741082707132416/binaryen-1700.js", // 10.129s
+ "CrashTests/5680390288441344/scripts/extension.js", // 45.763s
+ "CrashTests/5666182802309120/binaryen-1564.js", // 24.367s
+ "CrashTests/5680390288441344/scripts/feedback.js", // 22.301s
+ "CrashTests/5306741051621376/binaryen-2962.js", // 18.079s
+ "CrashTests/6270084030201856/binaryen-634.js", // 14.106s
+ "CrashTests/6071297320747008/324.js", // 12.704s
+ "CrashTests/6316400054960128/165.js", // 11.802s
+ "CrashTests/5691805950083072/binaryen-2729.js", // 11.693s
];
const softSkipped = [
/* Slow tests from v8 */
- "v8/test/mjsunit/object-literal.js", // 4.219s
- "v8/test/mjsunit/wasm/wasm-module-builder.js", // 4.07s
+ "v8/test/mjsunit/wasm/wasm-module-builder.js", // 7.537s
+ "v8/test/mjsunit/object-literal.js", // 3.837s
/* Slow tests from spidermonkey */
- "spidermonkey/non262/statements/regress-74474-003.js", // 7.228s
- "spidermonkey/non262/statements/regress-74474-002.js", // 7.209s
- "spidermonkey/non262/extensions/dataview.js", // 3.845s
+ "spidermonkey/non262/statements/regress-74474-002.js", // 8.047s
+ "spidermonkey/non262/statements/regress-74474-003.js", // 6.488s
+ "spidermonkey/non262/extensions/dataview.js", // 3.727s
/* Slow tests from chakra */
- "chakra/TaggedIntegers/loops.js", // 7.354s
- "chakra/benchmarks/ARES-6/Air/payload-typescript-scanIdentifier.js", // 7.011s
- "chakra/benchmarks/Octane/crypto.js", // 4.004s
- "chakra/benchmarks/Octane_Closure/crypto.js", // 3.178s
- "chakra/benchmarks/ARES-6/Air/payload-airjs-ACLj8C.js", // 2.918s
+ "chakra/TaggedIntegers/loops.js", // 8.255s
+ "chakra/benchmarks/ARES-6/Air/payload-typescript-scanIdentifier.js", // 5.26s
+ "chakra/benchmarks/ARES-6/Air/payload-airjs-ACLj8C.js", // 3.713s
+ "chakra/benchmarks/Octane/crypto.js", // 3.463s
+ "chakra/benchmarks/Octane_Closure/crypto.js", // 3.354s
/* Slow tests from jstests */
- "WebKit/JSTests/stress/v8-crypto-strict.js", // 3.023s
+ "WebKit/JSTests/stress/v8-crypto-strict.js", // 2.56s
"WebKit/JSTests/stress/v8-regexp-strict.js", // 2.555s
/* Slow tests from crashtests */
- "CrashTests/5082337238712320/binaryen-3268.js", // 9.621s
- "CrashTests/4602127226241024/js/jquery.js", // 9.337s
- "CrashTests/6472801805664256/common/js/frameworks/jquery-1.8.2.min.js", // 8.859s
- "CrashTests/5657116044951552/scripts/libs/jquery.js", // 8.649s
- "CrashTests/4614296351277056/js/jquery-1.8.0.min.js", // 8.446s
- "CrashTests/5550653104455680/js/jquery-1.8.0.min.js", // 8.426s
- "CrashTests/5091969183776768/js/jquery.js", // 8.396s
- "CrashTests/4612142496743424/binaryen-1882.js", // 8.101s
- "CrashTests/5049543056424960/inc/jquery-2.1.0.min.js", // 7.912s
- "CrashTests/6183950024441856/common/widget-api/widgets/common/jquery-1.7.1.min.js", // 7.454s
- "CrashTests/6183950024441856/common/components/menu/js/jquery-1.7.1.min.js", // 7.409s
- "CrashTests/5365583999664128/extensionData/plugins/4_jquery_1_7_1.js", // 7.298s
- "CrashTests/4615141375344640/lib/jquery.js", // 7.144s
- "CrashTests/6183950024441856/common/js/jquery-1.7.1.min.js", // 7.133s
- "CrashTests/5657174977806336/binaryen-1398.js", // 6.913s
- "CrashTests/6327982568898560/binaryen-862.js", // 6.736s
- "CrashTests/4633495124312064/634.js", // 5.399s
- "CrashTests/5689977077891072/01770.js", // 5.345s
- "CrashTests/6636948839202816/121.js", // 5.31s
- "CrashTests/5365583999664128/extensionData/plugins/17_jQuery.js", // 5.234s
- "CrashTests/5533984447266816/626.js", // 5.002s
- "CrashTests/4528969625894912/encaiiljifbdbjlphpgpiimidegddhic/lib/3rdparty/jquery.js", // 4.998s
- "CrashTests/5274731158568960/test2.js", // 4.907s
- "CrashTests/4528969625894912/lib/3rdparty/jquery.js", // 4.847s
- "CrashTests/6103088053354496/965.js", // 4.574s
- "CrashTests/5293298093391872/65.js", // 3.944s
- "CrashTests/6215250211504128/05886.js", // 3.928s
- "CrashTests/6107728614522880/wasm-hashset/many-test.js", // 3.235s
- "CrashTests/5157721919979520/00935.js", // 3.224s
- "CrashTests/5804707603021824/workers/wasm-hashset/worker.js", // 3.116s
- "CrashTests/6107728614522880/wasm-hashset/worker.js", // 3.115s
- "CrashTests/4986854798262272/js/webgl-test-utils.js", // 3.098s
- "CrashTests/4764215218012160/workers/wasm-hashset/worker.js", // 3.092s
- "CrashTests/4764215218012160/workers/wasm-hashset/test.js", // 3.064s
- "CrashTests/5970862301904896/wasm-hashset/many-test.js", // 3.037s
- "CrashTests/6264668110323712/js/webgl-test-utils.js", // 3.031s
- "CrashTests/5144726426222592/957.js", // 3.028s
- "CrashTests/4521096081309696/workers/wasm-hashset/many-worker-2.js", // 3.007s
- "CrashTests/4727886732066816/03031.js", // 2.945s
- "CrashTests/6171607952523264/workers/wasm-hashset/many-test-2.js", // 2.924s
- "CrashTests/5804707603021824/workers/wasm-hashset/many-test.js", // 2.92s
- "CrashTests/5903614327128064/js/webgl-test-utils.js", // 2.892s
- "CrashTests/5474186315956224/js/webgl-test-utils.js", // 2.881s
- "CrashTests/5720170289692672/js/webgl-test-utils.js", // 2.88s
- "CrashTests/5709689405571072/js/webgl-test-utils.js", // 2.87s
- "CrashTests/4808534067838976/113.js", // 2.852s
- "CrashTests/5150788929454080/js/webgl-test-utils.js", // 2.842s
- "CrashTests/4521096081309696/workers/wasm-hashset/many-test-2.js", // 2.839s
- "CrashTests/4750804070957056/js/webgl-test-utils.js", // 2.837s
- "CrashTests/5877660912451584/js/webgl-test-utils.js", // 2.831s
- "CrashTests/6117827240263680/js/webgl-test-utils.js", // 2.821s
- "CrashTests/5649522772541440/js/webgl-test-utils.js", // 2.821s
- "CrashTests/6207235662020608/js/webgl-test-utils.js", // 2.81s
- "CrashTests/5081168717545472/js/webgl-test-utils.js", // 2.793s
- "CrashTests/6113858805301248/js/webgl-test-utils.js", // 2.781s
- "CrashTests/4895116383485952/js/webgl-test-utils.js", // 2.767s
- "CrashTests/5205072808771584/js/webgl-test-utils.js", // 2.766s
- "CrashTests/5550653104455680/js/esprima.js", // 2.758s
- "CrashTests/5540518327746560/js/webgl-test-utils.js", // 2.751s
- "CrashTests/6307834848608256/js/webgl-test-utils.js", // 2.723s
- "CrashTests/4561088605323264/js/webgl-test-utils.js", // 2.722s
- "CrashTests/5152046202093568/binaryen-397.js", // 2.721s
- "CrashTests/4614296351277056/js/esprima.js", // 2.72s
- "CrashTests/5289255386742784/js/webgl-test-utils.js", // 2.718s
- "CrashTests/5636770818686976/00408.js", // 2.718s
- "CrashTests/6021155845308416/js/webgl-test-utils.js", // 2.708s
- "CrashTests/5316130750332928/js/webgl-test-utils.js", // 2.694s
- "CrashTests/5630410519478272/916.js", // 2.685s
- "CrashTests/4763495091142656/js/webgl-test-utils.js", // 2.676s
- "CrashTests/6643859697434624/00989.js", // 2.672s
- "CrashTests/6578304131006464/js/webgl-test-utils.js", // 2.63s
- "CrashTests/5921882795933696/js/webgl-test-utils.js", // 2.613s
- "CrashTests/5720530023612416/binaryen-1954.js", // 2.592s
- "CrashTests/5753604559470592/03311.js", // 2.589s
- "CrashTests/4780408753094656/js/webgl-test-utils.js", // 2.584s
- "CrashTests/6103004909666304/js/webgl-test-utils.js", // 2.582s
- "CrashTests/5940011987107840/js/webgl-test-utils.js", // 2.569s
- "CrashTests/6612369747476480/04469.js", // 2.566s
- "CrashTests/5766886287081472/js/webgl-test-utils.js", // 2.561s
- "CrashTests/5130481752735744/817.js", // 2.557s
- "CrashTests/5667434598760448/js/webgl-test-utils.js", // 2.557s
- "CrashTests/5304417640513536/js/webgl-test-utils.js", // 2.557s
- "CrashTests/5069958615400448/js/webgl-test-utils.js", // 2.539s
- "CrashTests/5803513008095232/js/webgl-test-utils.js", // 2.524s
- "CrashTests/5684927436423168/js/webgl-test-utils.js", // 2.521s
- "CrashTests/6343749881036800/01604.js", // 2.516s
- "CrashTests/6159546553466880/js/webgl-test-utils.js", // 2.506s
- "CrashTests/5436877461782528/binaryen-4415.js", // 2.492s
- "CrashTests/5246233363611648/js/webgl-test-utils.js", // 2.478s
- "CrashTests/5154715558084608/572.js", // 2.472s
- "CrashTests/5216366704721920/js/webgl-test-utils.js", // 2.47s
- "CrashTests/5020463045804032/js/webgl-test-utils.js", // 2.44s
- "CrashTests/6231966593318912/js/webgl-test-utils.js", // 2.438s
- "CrashTests/4712093587865600/js/webgl-test-utils.js", // 2.421s
- "CrashTests/4722289303355392/js/webgl-test-utils.js", // 2.415s
- "CrashTests/6446057308028928/js/webgl-test-utils.js", // 2.414s
- "CrashTests/6585627176992768/binaryen-655.js", // 2.411s
- "CrashTests/6371786506371072/js/webgl-test-utils.js", // 2.408s
- "CrashTests/5875816496627712/js/webgl-test-utils.js", // 2.404s
- "CrashTests/4571384448811008/fast/canvas/webgl/resources/webgl-test-utils-full.js", // 2.404s
- "CrashTests/4902839495032832/2.0.0/resources/webgl_test_files/js/webgl-test-utils.js", // 2.391s
- "CrashTests/6396634260570112/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-616366/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.379s
- "CrashTests/5973030224527360/builds/chrome-test-builds_media_mac-release_e6940505d6c387d688e04a7feeb7e2019c3efe81/revisions/asan-mac-release-405858/resources/inspector/heap_snapshot_worker.js", // 2.376s
- "CrashTests/4928460350029824/js/webgl-test-utils.js", // 2.371s
- "CrashTests/5447031043915776/js/webgl-test-utils.js", // 2.35s
- "CrashTests/5097133477462016/binaryen-1557.js", // 2.339s
- "CrashTests/5748791416979456/js/webgl-test-utils.js", // 2.335s
- "CrashTests/4979734430351360/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-587925/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.329s
- "CrashTests/5882955910873088/test.js", // 2.329s
- "CrashTests/6030846597005312/binaryen-97.js", // 2.31s
- "CrashTests/5934321914609664/js/webgl-test-utils.js", // 2.306s
- "CrashTests/4872723313197056/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-589752/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.291s
- "CrashTests/4864843149213696/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-588015/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.281s
- "CrashTests/4526031242788864/rf_onloadcontent.js", // 2.261s
- "CrashTests/5673981645684736/js/webgl-test-utils.js", // 2.247s
- "CrashTests/5112085437743104/js/webgl-test-utils.js", // 2.223s
- "CrashTests/4544669955129344/binaryen-1549.js", // 2.211s
- "CrashTests/4661285908905984/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-578254/gen/third_party/blink/public/platform/modules/payments/payment_request.mojom.js", // 2.204s
- "CrashTests/5710180189995008/js/webgl-test-utils.js", // 2.188s
- "CrashTests/6522661136760832/js/webgl-test-utils.js", // 2.176s
- "CrashTests/6158076232990720/resources/testharness.js", // 2.174s
- "CrashTests/5657181087727616/binaryen-125.js", // 2.159s
- "CrashTests/4714207862587392/03389.js", // 2.145s
- "CrashTests/5716123902410752/resources/testharness.js", // 2.135s
- "CrashTests/6203771342159872/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-707359/gen/media/mojo/mojom/media_types.mojom.js", // 2.133s
- "CrashTests/6393868459180032/jquery.flot.js", // 2.114s
- "CrashTests/5186189903396864/resources/testharness.js", // 2.111s
- "CrashTests/5490620452044800/00601.js", // 2.089s
- "CrashTests/4656197324767232/gen/third_party/blink/public/platform/modules/payments/payment_request.mojom.js", // 2.081s
- "CrashTests/5873758480105472/conformance/resources/webgl-test-utils.js", // 2.079s
- "CrashTests/5308016126853120/conformance/resources/webgl-test-utils.js", // 2.075s
- "CrashTests/6693648314400768/1.0.3/resources/webgl_test_files/conformance/resources/webgl-test-utils.js", // 2.07s
- "CrashTests/4607827521568768/resources/testharness.js", // 2.066s
- "CrashTests/6444261469847552/jquery.flot.js", // 2.043s
- "CrashTests/5949856401326080/conformance/resources/webgl-test-utils.js", // 2.028s
- "CrashTests/5320478993678336/conformance/resources/webgl-test-utils.js", // 2.024s
- "CrashTests/4871780976099328/LayoutTests/resources/testharness.js", // 2.024s
- "CrashTests/5195343992586240/binaryen-2577.js", // 2.022s
- "CrashTests/5170518889332736/resources/webgl-test-utils.js", // 2.019s
- "CrashTests/4942847902220288/conformance/resources/webgl-test-utils.js", // 2.005s
- "CrashTests/6459909679087616/conformance/resources/webgl-test-utils.js", // 2.001s
+ "CrashTests/5138652353593344/410.js", // 9.937s
+ "CrashTests/6250982932086784/binaryen-538.js", // 9.721s
+ "CrashTests/4741082707132416/binaryen-1700.js", // 9.631s
+ "CrashTests/5082337238712320/binaryen-3268.js", // 9.475s
+ "CrashTests/5187171718529024/844.js", // 9.422s
+ "CrashTests/6472801805664256/common/js/frameworks/jquery-1.8.2.min.js", // 8.621s
+ "CrashTests/4602127226241024/js/jquery.js", // 8.489s
+ "CrashTests/5657174977806336/binaryen-1398.js", // 8.434s
+ "CrashTests/4614296351277056/js/jquery-1.8.0.min.js", // 8.29s
+ "CrashTests/5657116044951552/scripts/libs/jquery.js", // 8.25s
+ "CrashTests/5091969183776768/js/jquery.js", // 8.217s
+ "CrashTests/5550653104455680/js/jquery-1.8.0.min.js", // 8.214s
+ "CrashTests/6649057641037824/binaryen-3706.js", // 7.484s
+ "CrashTests/4612142496743424/binaryen-1882.js", // 7.364s
+ "CrashTests/5365583999664128/extensionData/plugins/4_jquery_1_7_1.js", // 7.253s
+ "CrashTests/6183950024441856/common/widget-api/widgets/common/jquery-1.7.1.min.js", // 7.168s
+ "CrashTests/5049543056424960/inc/jquery-2.1.0.min.js", // 7.118s
+ "CrashTests/6183950024441856/common/js/jquery-1.7.1.min.js", // 7.043s
+ "CrashTests/6183950024441856/common/components/menu/js/jquery-1.7.1.min.js", // 6.928s
+ "CrashTests/4615141375344640/lib/jquery.js", // 6.769s
+ "CrashTests/6327982568898560/binaryen-862.js", // 6.678s
+ "CrashTests/6240393225306112/48.js", // 5.382s
+ "CrashTests/5689977077891072/01770.js", // 4.938s
+ "CrashTests/5365583999664128/extensionData/plugins/17_jQuery.js", // 4.922s
+ "CrashTests/6636948839202816/121.js", // 4.907s
+ "CrashTests/4787946390093824/jquery-ui-1.8.2.custom/js/jquery-1.4.2.min.js", // 4.607s
+ "CrashTests/4633495124312064/634.js", // 4.597s
+ "CrashTests/5157721919979520/00935.js", // 4.518s
+ "CrashTests/5533984447266816/626.js", // 4.445s
+ "CrashTests/4528969625894912/encaiiljifbdbjlphpgpiimidegddhic/lib/3rdparty/jquery.js", // 4.33s
+ "CrashTests/4528969625894912/lib/3rdparty/jquery.js", // 4.283s
+ "CrashTests/5706022850527232/jquery-ui-1.8.2.custom/js/jquery-1.4.2.min.js", // 4.247s
+ "CrashTests/5274731158568960/test2.js", // 4.218s
+ "CrashTests/6215250211504128/05886.js", // 4.169s
+ "CrashTests/6103088053354496/965.js", // 3.924s
+ "CrashTests/5293298093391872/65.js", // 3.242s
+ "CrashTests/5540518327746560/js/webgl-test-utils.js", // 3.075s
+ "CrashTests/6505173049999360/binaryen-1108.js", // 2.989s
+ "CrashTests/5020463045804032/js/webgl-test-utils.js", // 2.979s
+ "CrashTests/5765813295185920/js/webgl-test-utils.js", // 2.937s
+ "CrashTests/6411936120766464/js/webgl-test-utils.js", // 2.928s
+ "CrashTests/4756083598753792/js/webgl-test-utils.js", // 2.893s
+ "CrashTests/6564750484373504/js/webgl-test-utils.js", // 2.881s
+ "CrashTests/5748523325521920/resources/webgl_test_files/js/webgl-test-utils.js", // 2.877s
+ "CrashTests/5724904516812800/js/webgl-test-utils.js", // 2.863s
+ "CrashTests/5654708333903872/js/webgl-test-utils.js", // 2.852s
+ "CrashTests/5658246410207232/js/webgl-test-utils.js", // 2.835s
+ "CrashTests/6090022799867904/js/webgl-test-utils.js", // 2.816s
+ "CrashTests/5804707603021824/workers/wasm-hashset/worker.js", // 2.81s
+ "CrashTests/5767297823473664/js/webgl-test-utils.js", // 2.797s
+ "CrashTests/5748791416979456/js/webgl-test-utils.js", // 2.788s
+ "CrashTests/4727886732066816/03031.js", // 2.783s
+ "CrashTests/5147619843702784/js/webgl-test-utils.js", // 2.757s
+ "CrashTests/4561088605323264/js/webgl-test-utils.js", // 2.748s
+ "CrashTests/5970862301904896/wasm-hashset/many-test.js", // 2.739s
+ "CrashTests/5804707603021824/workers/wasm-hashset/many-test.js", // 2.738s
+ "CrashTests/5195343992586240/binaryen-2577.js", // 2.737s
+ "CrashTests/6107728614522880/wasm-hashset/many-test.js", // 2.732s
+ "CrashTests/4808534067838976/113.js", // 2.702s
+ "CrashTests/4521096081309696/workers/wasm-hashset/many-test-2.js", // 2.693s
+ "CrashTests/5512230554370048/binaryen-229.js", // 2.688s
+ "CrashTests/4764215218012160/workers/wasm-hashset/test.js", // 2.683s
+ "CrashTests/6107728614522880/wasm-hashset/worker.js", // 2.664s
+ "CrashTests/5934400840466432/resources/testharness.js", // 2.65s
+ "CrashTests/6171607952523264/workers/wasm-hashset/many-test-2.js", // 2.65s
+ "CrashTests/5649522772541440/js/webgl-test-utils.js", // 2.63s
+ "CrashTests/4521096081309696/workers/wasm-hashset/many-worker-2.js", // 2.616s
+ "CrashTests/5877660912451584/js/webgl-test-utils.js", // 2.611s
+ "CrashTests/5144726426222592/957.js", // 2.609s
+ "CrashTests/4750804070957056/js/webgl-test-utils.js", // 2.605s
+ "CrashTests/4764215218012160/workers/wasm-hashset/worker.js", // 2.592s
+ "CrashTests/5436877461782528/binaryen-4415.js", // 2.579s
+ "CrashTests/4544669955129344/binaryen-1549.js", // 2.548s
+ "CrashTests/5684927436423168/js/webgl-test-utils.js", // 2.533s
+ "CrashTests/5216366704721920/js/webgl-test-utils.js", // 2.52s
+ "CrashTests/4780408753094656/js/webgl-test-utils.js", // 2.515s
+ "CrashTests/6113858805301248/js/webgl-test-utils.js", // 2.488s
+ "CrashTests/4614296351277056/js/esprima.js", // 2.487s
+ "CrashTests/6643859697434624/00989.js", // 2.482s
+ "CrashTests/5148692865417216/binaryen-587.js", // 2.476s
+ "CrashTests/5152046202093568/binaryen-397.js", // 2.457s
+ "CrashTests/6396634260570112/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-616366/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.454s
+ "CrashTests/6264668110323712/js/webgl-test-utils.js", // 2.448s
+ "CrashTests/5709689405571072/js/webgl-test-utils.js", // 2.443s
+ "CrashTests/5720170289692672/js/webgl-test-utils.js", // 2.441s
+ "CrashTests/5720530023612416/binaryen-1954.js", // 2.427s
+ "CrashTests/5130481752735744/817.js", // 2.419s
+ "CrashTests/5692170731847680/195.js", // 2.418s
+ "CrashTests/5903614327128064/js/webgl-test-utils.js", // 2.412s
+ "CrashTests/5150788929454080/js/webgl-test-utils.js", // 2.401s
+ "CrashTests/4763495091142656/js/webgl-test-utils.js", // 2.39s
+ "CrashTests/5636770818686976/00408.js", // 2.374s
+ "CrashTests/6159546553466880/js/webgl-test-utils.js", // 2.363s
+ "CrashTests/4895116383485952/js/webgl-test-utils.js", // 2.35s
+ "CrashTests/4986854798262272/js/webgl-test-utils.js", // 2.342s
+ "CrashTests/6207235662020608/js/webgl-test-utils.js", // 2.34s
+ "CrashTests/5205072808771584/js/webgl-test-utils.js", // 2.336s
+ "CrashTests/6103004909666304/js/webgl-test-utils.js", // 2.331s
+ "CrashTests/6021155845308416/js/webgl-test-utils.js", // 2.328s
+ "CrashTests/4712093587865600/js/webgl-test-utils.js", // 2.314s
+ "CrashTests/5550653104455680/js/esprima.js", // 2.313s
+ "CrashTests/5803513008095232/js/webgl-test-utils.js", // 2.311s
+ "CrashTests/6585627176992768/binaryen-655.js", // 2.309s
+ "CrashTests/6231966593318912/js/webgl-test-utils.js", // 2.289s
+ "CrashTests/6522661136760832/js/webgl-test-utils.js", // 2.289s
+ "CrashTests/4849910154854400/2.0.0/resources/webgl_test_files/js/webgl-test-utils.js", // 2.289s
+ "CrashTests/5316130750332928/js/webgl-test-utils.js", // 2.286s
+ "CrashTests/4786020456595456/resources/testharness.js", // 2.284s
+ "CrashTests/5246233363611648/js/webgl-test-utils.js", // 2.283s
+ "CrashTests/4610688298057728/js/webgl-test-utils.js", // 2.271s
+ "CrashTests/5732319423168512/js/webgl-test-utils.js", // 2.27s
+ "CrashTests/6117827240263680/js/webgl-test-utils.js", // 2.266s
+ "CrashTests/6446057308028928/js/webgl-test-utils.js", // 2.265s
+ "CrashTests/5474186315956224/js/webgl-test-utils.js", // 2.265s
+ "CrashTests/5081168717545472/js/webgl-test-utils.js", // 2.263s
+ "CrashTests/5289255386742784/js/webgl-test-utils.js", // 2.258s
+ "CrashTests/5153121645625344/binaryen-602.js", // 2.257s
+ "CrashTests/6307834848608256/js/webgl-test-utils.js", // 2.234s
+ "CrashTests/5069958615400448/js/webgl-test-utils.js", // 2.228s
+ "CrashTests/5673981645684736/js/webgl-test-utils.js", // 2.216s
+ "CrashTests/6371786506371072/js/webgl-test-utils.js", // 2.208s
+ "CrashTests/5766886287081472/js/webgl-test-utils.js", // 2.207s
+ "CrashTests/5112085437743104/js/webgl-test-utils.js", // 2.195s
+ "CrashTests/5875816496627712/js/webgl-test-utils.js", // 2.187s
+ "CrashTests/4902839495032832/2.0.0/resources/webgl_test_files/js/webgl-test-utils.js", // 2.185s
+ "CrashTests/5940011987107840/js/webgl-test-utils.js", // 2.181s
+ "CrashTests/5934321914609664/js/webgl-test-utils.js", // 2.18s
+ "CrashTests/5447031043915776/js/webgl-test-utils.js", // 2.173s
+ "CrashTests/5667434598760448/js/webgl-test-utils.js", // 2.172s
+ "CrashTests/4722289303355392/js/webgl-test-utils.js", // 2.158s
+ "CrashTests/4928460350029824/js/webgl-test-utils.js", // 2.152s
+ "CrashTests/6612369747476480/04469.js", // 2.137s
+ "CrashTests/5304417640513536/js/webgl-test-utils.js", // 2.134s
+ "CrashTests/5154715558084608/572.js", // 2.132s
+ "CrashTests/5710180189995008/js/webgl-test-utils.js", // 2.122s
+ "CrashTests/5921882795933696/js/webgl-test-utils.js", // 2.119s
+ "CrashTests/6578304131006464/js/webgl-test-utils.js", // 2.109s
+ "CrashTests/5753604559470592/03311.js", // 2.102s
+ "CrashTests/5630410519478272/916.js", // 2.092s
+ "CrashTests/4571384448811008/fast/canvas/webgl/resources/webgl-test-utils-full.js", // 2.069s
+ "CrashTests/6343749881036800/01604.js", // 2.055s
+ "CrashTests/5657181087727616/binaryen-125.js", // 2.031s
+ "CrashTests/4979734430351360/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-587925/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.019s
+ "CrashTests/6030846597005312/binaryen-97.js", // 2.015s
+ "CrashTests/4661285908905984/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-578254/gen/third_party/blink/public/platform/modules/payments/payment_request.mojom.js", // 2.014s
+ "CrashTests/4864843149213696/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-588015/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.014s
];
const sloppy = [
/* Tests requiring sloppy mode from v8 */
@@ -674,6 +809,7 @@ const sloppy = [
"v8/test/mjsunit/array-indexing-receiver.js",
"v8/test/mjsunit/arrow-with.js",
"v8/test/mjsunit/asm-directive.js",
+ "v8/test/mjsunit/baseline/test-baseline.js",
"v8/test/mjsunit/compiler/delete.js",
"v8/test/mjsunit/compiler/global-delete.js",
"v8/test/mjsunit/compiler/global-var-delete.js",
@@ -733,19 +869,29 @@ const sloppy = [
"v8/test/mjsunit/global-nan.js",
"v8/test/mjsunit/global-undefined.js",
"v8/test/mjsunit/global-vars-with.js",
+ "v8/test/mjsunit/harmony/array-findlast-unscopables.js",
"v8/test/mjsunit/harmony/bigint/property-names.js",
+ "v8/test/mjsunit/harmony/class-static-blocks.js",
"v8/test/mjsunit/harmony/global-configurable.js",
"v8/test/mjsunit/harmony/import-from-compilation-errored.js",
"v8/test/mjsunit/harmony/import-from-evaluation-errored.js",
"v8/test/mjsunit/harmony/import-from-fetch-errored.js",
"v8/test/mjsunit/harmony/import-from-instantiation-errored.js",
+ "v8/test/mjsunit/harmony/logical-assignment-function-name.js",
+ "v8/test/mjsunit/harmony/logical-assignment.js",
"v8/test/mjsunit/harmony/numeric-separator.js",
+ "v8/test/mjsunit/harmony/optional-chaining-this-private.js",
+ "v8/test/mjsunit/harmony/private-brand-checks.js",
"v8/test/mjsunit/harmony/private-fields-special-object.js",
+ "v8/test/mjsunit/harmony/regexp-match-indices.js",
"v8/test/mjsunit/ignition/dead-code-source-position.js",
"v8/test/mjsunit/ignition/regress-616064.js",
"v8/test/mjsunit/no-octal-constants-above-256.js",
"v8/test/mjsunit/override-read-only-property.js",
"v8/test/mjsunit/receiver-in-with-calls.js",
+ "v8/test/mjsunit/regexp-linear-flag.js",
+ "v8/test/mjsunit/regexp-no-linear-flag.js",
+ "v8/test/mjsunit/regress/asm/regress-608630.js",
"v8/test/mjsunit/regress/regress-1030466.js",
"v8/test/mjsunit/regress/regress-1079.js",
"v8/test/mjsunit/regress/regress-1125.js",
@@ -774,7 +920,6 @@ const sloppy = [
"v8/test/mjsunit/regress/regress-583260.js",
"v8/test/mjsunit/regress/regress-587004.js",
"v8/test/mjsunit/regress/regress-592353.js",
- "v8/test/mjsunit/regress/regress-608630.js",
"v8/test/mjsunit/regress/regress-649067.js",
"v8/test/mjsunit/regress/regress-6677.js",
"v8/test/mjsunit/regress/regress-670147.js",
@@ -795,6 +940,10 @@ const sloppy = [
"v8/test/mjsunit/regress/regress-abort-preparsing-params.js",
"v8/test/mjsunit/regress/regress-crbug-1041210.js",
"v8/test/mjsunit/regress/regress-crbug-1041616.js",
+ "v8/test/mjsunit/regress/regress-crbug-1074737.js",
+ "v8/test/mjsunit/regress/regress-crbug-1238467.js",
+ "v8/test/mjsunit/regress/regress-crbug-1239907.js",
+ "v8/test/mjsunit/regress/regress-crbug-1254704.js",
"v8/test/mjsunit/regress/regress-crbug-135008.js",
"v8/test/mjsunit/regress/regress-crbug-412208.js",
"v8/test/mjsunit/regress/regress-crbug-450642.js",
@@ -810,6 +959,7 @@ const sloppy = [
"v8/test/mjsunit/regress/regress-crbug-851393.js",
"v8/test/mjsunit/regress/regress-crbug-934166.js",
"v8/test/mjsunit/regress/regress-sloppy-block-function-hoisting-dynamic.js",
+ "v8/test/mjsunit/regress/regress-v8-10604.js",
"v8/test/mjsunit/regress/regress-v8-9394-2.js",
"v8/test/mjsunit/regress/regress-v8-9394.js",
"v8/test/mjsunit/regress/wasm/loop-stack-check.js",
@@ -818,9 +968,16 @@ const sloppy = [
"v8/test/mjsunit/regress/wasm/regress-753496.js",
"v8/test/mjsunit/scope-calls-eval.js",
"v8/test/mjsunit/serialize-embedded-error.js",
+ "v8/test/mjsunit/stackoverflow-underapplication.js",
"v8/test/mjsunit/strict-mode-implicit-receiver.js",
"v8/test/mjsunit/strict-mode.js",
"v8/test/mjsunit/switch.js",
+ "v8/test/mjsunit/temporal/calendar-merge-fields.js",
+ "v8/test/mjsunit/temporal/calendar-week-of-year.js",
+ "v8/test/mjsunit/temporal/calendar-year-month-from-fields.js",
+ "v8/test/mjsunit/temporal/duration-from.js",
+ "v8/test/mjsunit/temporal/plain-date-time-from.js",
+ "v8/test/mjsunit/temporal/plain-date-time-to-json.js",
"v8/test/mjsunit/throw-and-catch-function.js",
"v8/test/mjsunit/unused-context-in-with.js",
"v8/test/mjsunit/value-wrapper.js",
@@ -830,11 +987,13 @@ const sloppy = [
"v8/test/mjsunit/with-prototype.js",
"v8/test/mjsunit/with-readonly.js",
"v8/test/mjsunit/with-value.js",
+ "v8/test/mjsunit/worker-ping-test.js",
/* Tests requiring sloppy mode from spidermonkey */
"spidermonkey/non262/Array/unscopables.js",
"spidermonkey/non262/Array/values.js",
"spidermonkey/non262/BigInt/property-name-guessed-name.js",
"spidermonkey/non262/BigInt/property-name.js",
+ "spidermonkey/non262/Date/parse-time-zone.js",
"spidermonkey/non262/Date/time-zones-posix.js",
"spidermonkey/non262/Date/time-zones.js",
"spidermonkey/non262/Exceptions/catchguard-002-n.js",
@@ -849,6 +1008,7 @@ const sloppy = [
"spidermonkey/non262/Function/rest-has-duplicated.js",
"spidermonkey/non262/Function/rest-parameter-names.js",
"spidermonkey/non262/GC/regress-383269-02.js",
+ "spidermonkey/non262/PrivateName/lexical-presence.js",
"spidermonkey/non262/RegExp/regress-6359.js",
"spidermonkey/non262/RegExp/regress-85721.js",
"spidermonkey/non262/Scope/regress-184107.js",
@@ -860,8 +1020,6 @@ const sloppy = [
"spidermonkey/non262/Script/delete-001.js",
"spidermonkey/non262/Script/new-001.js",
"spidermonkey/non262/String/regress-392378.js",
- "spidermonkey/non262/TypedObject/method_from.js",
- "spidermonkey/non262/TypedObject/method_map.js",
"spidermonkey/non262/Unicode/regress-352044-02-n.js",
"spidermonkey/non262/arrow-functions/arrow-not-as-end-of-statement.js",
"spidermonkey/non262/arrow-functions/arrow-returning-arrow-with-block-body-followed-by-regexp.js",
@@ -877,6 +1035,13 @@ const sloppy = [
"spidermonkey/non262/eval/redeclared-arguments-in-param-expression-eval.js",
"spidermonkey/non262/execution-contexts/regress-448595-01.js",
"spidermonkey/non262/expressions/delete-constant-folded-and-or.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-anon-fns.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-const.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-deleted-decl-binding.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-property-key-evaluation.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-scope-lookup.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment-tdz.js",
+ "spidermonkey/non262/expressions/short-circuit-compound-assignment.js",
"spidermonkey/non262/extensions/clone-leaf-object.js",
"spidermonkey/non262/extensions/clone-simple.js",
"spidermonkey/non262/extensions/cross-global-eval-is-indirect.js",
@@ -900,6 +1065,7 @@ const sloppy = [
"spidermonkey/non262/fields/await-identifier-module-1.js",
"spidermonkey/non262/fields/await-identifier-module-2.js",
"spidermonkey/non262/fields/await-identifier-script.js",
+ "spidermonkey/non262/fields/numeric-fields.js",
"spidermonkey/non262/generators/iteration.js",
"spidermonkey/non262/generators/syntax.js",
"spidermonkey/non262/generators/yield-non-regexp.js",
@@ -935,6 +1101,13 @@ const sloppy = [
"spidermonkey/non262/lexical-environment/with-global-ignores-global-let-variables.js",
"spidermonkey/non262/module/bug1488117-import-namespace.js",
"spidermonkey/non262/module/bug1488117.js",
+ "spidermonkey/non262/module/bug1689499-a.js",
+ "spidermonkey/non262/module/bug1689499-b.js",
+ "spidermonkey/non262/module/bug1689499-c.js",
+ "spidermonkey/non262/module/bug1689499-x.js",
+ "spidermonkey/non262/module/bug1689499.js",
+ "spidermonkey/non262/module/bug1693261.js",
+ "spidermonkey/non262/module/module-export-name-star.js",
"spidermonkey/non262/reflect-parse/PatternBuilders.js",
"spidermonkey/non262/reflect-parse/classes.js",
"spidermonkey/non262/regress/regress-104077.js",
@@ -1242,6 +1415,7 @@ const sloppy = [
"chakra/es6/supersyntax06.js",
"chakra/es6/unicode_6_identifier_Blue511452.js",
"chakra/es6/unicode_6_identifier_Blue524737.js",
+ "chakra/es6/unscopablesWithEsArrayFindFromLastTest.js",
"chakra/es6/unscopablesWithScopeTest.js",
"chakra/es6module/GetModuleNamespace.js",
"chakra/es6module/ModuleCircularBar.js",
@@ -1299,6 +1473,7 @@ const sloppy = [
"chakra/es6module/otherModule.js",
"chakra/es6module/passmodule.js",
"chakra/es6module/testDynamicImportfromModule.js",
+ "chakra/es6module/top-level-await.js",
"chakra/es7/asyncawait-functionality.js",
"chakra/es7/json_superset.js",
"chakra/fieldopts/equiv-mismatch2.js",
@@ -1333,6 +1508,7 @@ const sloppy = [
"WebKit/JSTests/es6/non-strict_function_semantics_labeled_function_statements.js",
"WebKit/JSTests/es6/well-known_symbols_Symbol.unscopables.js",
"WebKit/JSTests/stress/adhoc-setter-frame-should-not-be-killed.js",
+ "WebKit/JSTests/stress/allocation-sinking-changing-structures.js",
"WebKit/JSTests/stress/allocation-sinking-hints-are-valid-ssa-2.js",
"WebKit/JSTests/stress/allocation-sinking-hints-are-valid-ssa.js",
"WebKit/JSTests/stress/array-copywithin.js",
@@ -1342,11 +1518,13 @@ const sloppy = [
"WebKit/JSTests/stress/arrowfunction-lexical-bind-arguments-non-strict-2.js",
"WebKit/JSTests/stress/arrowfunction-lexical-bind-this-8.js",
"WebKit/JSTests/stress/big-int-as-property-name.js",
+ "WebKit/JSTests/stress/bytecode-for-rmw-with-invalid-right-side.js",
"WebKit/JSTests/stress/const-and-with-statement.js",
"WebKit/JSTests/stress/const-not-strict-mode.js",
"WebKit/JSTests/stress/constant-closure-var-with-dynamic-invalidation.js",
"WebKit/JSTests/stress/do-eval-virtual-call-correctly.js",
"WebKit/JSTests/stress/es6-default-parameters.js",
+ "WebKit/JSTests/stress/escaped-keyword-identifiers.js",
"WebKit/JSTests/stress/eval-cached.js",
"WebKit/JSTests/stress/eval-func-decl-block-with-var-and-remove.js",
"WebKit/JSTests/stress/eval-func-decl-in-eval-within-with-scope.js",
@@ -1361,10 +1539,12 @@ const sloppy = [
"WebKit/JSTests/stress/get-my-argument-by-val-safe-wrap-around.js",
"WebKit/JSTests/stress/getter-setter-should-be-cell.js",
"WebKit/JSTests/stress/global-environment-does-not-trap-unscopables.js",
+ "WebKit/JSTests/stress/global-lexical-environment-access-from-module.js",
"WebKit/JSTests/stress/global-lexical-variable-with-statement.js",
"WebKit/JSTests/stress/global-object-proto-getter.js",
"WebKit/JSTests/stress/hashbang.js",
"WebKit/JSTests/stress/import-basic.js",
+ "WebKit/JSTests/stress/import-exception.js",
"WebKit/JSTests/stress/import-from-eval.js",
"WebKit/JSTests/stress/import-reject-with-exception.js",
"WebKit/JSTests/stress/import-tests/cocoa.js",
@@ -1372,21 +1552,31 @@ const sloppy = [
"WebKit/JSTests/stress/import-tests/multiple2.js",
"WebKit/JSTests/stress/import-tests/should.js",
"WebKit/JSTests/stress/import-with-empty-string.js",
+ "WebKit/JSTests/stress/intl-object.js",
"WebKit/JSTests/stress/lazy-global-object-property-materialization-should-not-putDirectWithoutTransition.js",
"WebKit/JSTests/stress/lexical-let-and-with-statement.js",
"WebKit/JSTests/stress/lexical-let-not-strict-mode.js",
"WebKit/JSTests/stress/licm-should-handle-if-a-hoist-causes-a-provable-osr-exit.js",
+ "WebKit/JSTests/stress/logical-assignment-operator-and.js",
+ "WebKit/JSTests/stress/logical-assignment-operator-coalesce.js",
+ "WebKit/JSTests/stress/logical-assignment-operator-or.js",
"WebKit/JSTests/stress/module-namespace-access-change.js",
"WebKit/JSTests/stress/module-namespace-access-non-constant.js",
"WebKit/JSTests/stress/module-namespace-access-poly.js",
+ "WebKit/JSTests/stress/module-namespace-access-set-fails.js",
"WebKit/JSTests/stress/module-namespace-access-transitive-exports.js",
"WebKit/JSTests/stress/module-namespace-access.js",
+ "WebKit/JSTests/stress/module-namespace-object-caching.js",
"WebKit/JSTests/stress/object-allocation-sinking-interpretation-can-interpret-edges-that-can-be-proven-unreachable-in-ai.js",
"WebKit/JSTests/stress/object-allocation-sinking-phase-needs-to-write-to-each-scope-offset.js",
+ "WebKit/JSTests/stress/optional-chaining-and-private-fields.js",
"WebKit/JSTests/stress/printableModuleKey-should-never-throw.js",
+ "WebKit/JSTests/stress/private-in-error.js",
+ "WebKit/JSTests/stress/private-in.js",
"WebKit/JSTests/stress/proxy-call-apply-handler-to-this.js",
"WebKit/JSTests/stress/proxy-getter-stack-overflow.js",
"WebKit/JSTests/stress/proxy-stack-overflow-exceptions.js",
+ "WebKit/JSTests/stress/proxy-trap-this.js",
"WebKit/JSTests/stress/proxy-with-statement.js",
"WebKit/JSTests/stress/put-dynamic-var-strict-and-sloppy.js",
"WebKit/JSTests/stress/re-execute-error-module.js",
@@ -1396,13 +1586,20 @@ const sloppy = [
"WebKit/JSTests/stress/regress-191856.js",
"WebKit/JSTests/stress/regress-192626.js",
"WebKit/JSTests/stress/resources/error-module.js",
+ "WebKit/JSTests/stress/resources/global-lexical-environment-access-from-module-child.js",
"WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports-2.js",
"WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports.js",
"WebKit/JSTests/stress/resources/module-namespace-access.js",
+ "WebKit/JSTests/stress/resources/shadow-realm-example-module.js",
+ "WebKit/JSTests/stress/resources/to-string-module.js",
+ "WebKit/JSTests/stress/resources/value-of-module.js",
+ "WebKit/JSTests/stress/scoped-arguments-table-should-be-tolerant-for-oom.js",
+ "WebKit/JSTests/stress/shadow-realm-import-value.js",
"WebKit/JSTests/stress/sloppy-mode-function-hoisting.js",
"WebKit/JSTests/stress/sloppy-mode-hoist-arguments-function-non-simple-parameter-list.js",
"WebKit/JSTests/stress/string-prototype-scopes.js",
"WebKit/JSTests/stress/tagged-templates-this.js",
+ "WebKit/JSTests/stress/terminated-execution-error-in-promise.js",
"WebKit/JSTests/stress/to-this-before-arrow-function-closes-over-this-that-starts-as-lexical-environment.js",
"WebKit/JSTests/stress/unscopables.js",
"WebKit/JSTests/stress/use-arguments-as-object-pointer.js",
@@ -1433,10 +1630,12 @@ const sloppy = [
"CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test-utils.js",
"CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test.js",
"CrashTests/4563969814560768/1.0.2/resources/js-test-pre.js",
+ "CrashTests/4570511337324544/01067.js",
"CrashTests/4592095397150720/619.js",
"CrashTests/4599018605772800/00095.js",
"CrashTests/4609052021096448/02286.js",
"CrashTests/4620742728613888/02272.js",
+ "CrashTests/4624768836632576/00383.js",
"CrashTests/4625478540066816/02759.js",
"CrashTests/4632675287826432/01188.js",
"CrashTests/4636862568726528/02064.js",
@@ -1483,6 +1682,7 @@ const sloppy = [
"CrashTests/4835573090222080/meta-00096.js",
"CrashTests/4837730048278528/03052.js",
"CrashTests/4843490131312640/03475.js",
+ "CrashTests/4848225223245824/resources/js-test-post.js",
"CrashTests/4850895428517888/2670.js",
"CrashTests/4854644212105216/392.js",
"CrashTests/4855156194934784/meta-00080.js",
@@ -1543,6 +1743,7 @@ const sloppy = [
"CrashTests/5083229709664256/00286.js",
"CrashTests/5083537469079552/03453.js",
"CrashTests/5086848684654592/00140.js",
+ "CrashTests/5087262806704128/00566.js",
"CrashTests/5089350304661504/04000.js",
"CrashTests/5090843606515712/4564.js",
"CrashTests/5091969183776768/js/angular.js",
@@ -1555,6 +1756,7 @@ const sloppy = [
"CrashTests/5110246766673920/117.js",
"CrashTests/5113028242702336/03897.js",
"CrashTests/5114377424601088/00224.js",
+ "CrashTests/5120973776420864/00299.js",
"CrashTests/5126302418337792/00216.js",
"CrashTests/5126730184654848/00846.js",
"CrashTests/5127274311843840/769.js",
@@ -1586,6 +1788,7 @@ const sloppy = [
"CrashTests/5226258591121408/04850.js",
"CrashTests/5226692407984128/meta-00030.js",
"CrashTests/5226950361612288/01783.js",
+ "CrashTests/5228917292597248/01383.js",
"CrashTests/5231597301334016/00307.js",
"CrashTests/5238861996490752/01351.js",
"CrashTests/5242104612651008/mjsunit_modified.js",
@@ -1664,6 +1867,7 @@ const sloppy = [
"CrashTests/5639584467910656/00441.js",
"CrashTests/5639628713492480/04139.js",
"CrashTests/5642849944993792/resources/js-test.js",
+ "CrashTests/5642994023202816/950.js",
"CrashTests/5644307466878976/__MACOSX/._audio.js",
"CrashTests/5644307466878976/__MACOSX/._processor.js",
"CrashTests/5645896422522880/00670.js",
@@ -1703,16 +1907,22 @@ const sloppy = [
"CrashTests/5696049601314816/7.js",
"CrashTests/5697903049441280/03188.js",
"CrashTests/5703976838234112/test.js",
+ "CrashTests/5706349639434240/804.js",
"CrashTests/5707472246472704/1443.js",
+ "CrashTests/5712001662517248/report.js",
"CrashTests/5713776938582016/00793.js",
"CrashTests/5721502735532032/03042.js",
+ "CrashTests/5729254488211456/151.js",
"CrashTests/5733293570392064/00764.js",
"CrashTests/5734750167105536/01271.js",
"CrashTests/5735023732064256/meta-00070.js",
"CrashTests/5736353084342272/resources/testharness.js",
"CrashTests/5737388710821888/resources/js-test.js",
+ "CrashTests/5738737345822720/svg/dynamic-updates/resources/SVGTestCase.js",
"CrashTests/5744365229441024/resources/testharness.js",
"CrashTests/5745342726537216/meta-00053.js",
+ "CrashTests/5747146314285056/support/alertAssert.sub.js",
+ "CrashTests/5747146314285056/support/logTest.sub.js",
"CrashTests/5750922200875008/747.js",
"CrashTests/5753604559470592/03311.js",
"CrashTests/5754855756136448/00202.js",
@@ -1742,6 +1952,7 @@ const sloppy = [
"CrashTests/5878747354365952/02158.js",
"CrashTests/5910324886634496/02597.js",
"CrashTests/5913894233833472/05410.js",
+ "CrashTests/5914309578784768/repro.js",
"CrashTests/5919491238920192/00154.js",
"CrashTests/5925149103357952/webaudio/resources/audit.js",
"CrashTests/5931087833333760/03890.js",
@@ -1786,6 +1997,8 @@ const sloppy = [
"CrashTests/6107917668319232/00571.js",
"CrashTests/6113149884563456/02823.js",
"CrashTests/6124318079582208/00744.js",
+ "CrashTests/6131247808839680/00012.js",
+ "CrashTests/6132283327971328/resources/autosizingTest.js",
"CrashTests/6134706385977344/00149.js",
"CrashTests/6136495474737152/00377.js",
"CrashTests/6150179231105024/conformance/resources/webgl-test.js",
@@ -1818,18 +2031,22 @@ const sloppy = [
"CrashTests/6255231244697600/meta-00216.js",
"CrashTests/6255916311379968/1372.js",
"CrashTests/6259138054324224/meta-00172.js",
+ "CrashTests/6263485068017664/MediaSessionTestUtils.js",
"CrashTests/6269363175555072/00815.js",
"CrashTests/6273728140017664/03583.js",
"CrashTests/6277052166832128/00830.js",
"CrashTests/6278159702425600/01463.js",
"CrashTests/6280577705705472/1146.js",
"CrashTests/6285336190124032/01621.js",
+ "CrashTests/6290863075688448/192.js",
"CrashTests/6292792642371584/00047.js",
"CrashTests/6294597573541888/00725.js",
"CrashTests/6294835115065344/00805.js",
"CrashTests/6295241556492288/01763.js",
"CrashTests/6300171514675200/00115.js",
"CrashTests/6304143111356416/00782.js",
+ "CrashTests/6313127026688000/02126.js",
+ "CrashTests/6316400054960128/165.js",
"CrashTests/6319065615040512/04012.js",
"CrashTests/6328755580567552/resources/js-test.js",
"CrashTests/6328755580567552/svg/dynamic-updates/resources/SVGTestCase.js",
@@ -1837,6 +2054,7 @@ const sloppy = [
"CrashTests/6332832186761216/00681.js",
"CrashTests/6332904701427712/00888.js",
"CrashTests/6332930432958464/02637.js",
+ "CrashTests/6333032621998080/04777.js",
"CrashTests/6339944789049344/04142.js",
"CrashTests/6345007341764608/00699.js",
"CrashTests/6346448656400384/dist/jquery.js",
@@ -1883,9 +2101,11 @@ const sloppy = [
"CrashTests/6534217117990912/01172.js",
"CrashTests/6541223017054208/01484.js",
"CrashTests/6550225930944512/mnt/scratch0/clusterfuzz/slave-bot/inputs/fuzzers/inferno_twister_custom_bundle/inferno_twister_custom_bundle_data/moz_tests/dom/workers/test/threadErrors_worker1.js",
+ "CrashTests/6551265423982592/00708.js",
"CrashTests/6552552797503488/bug_41414141.js",
"CrashTests/6566622022860800/05257.js",
"CrashTests/6566953431597056/02044.js",
+ "CrashTests/6572559555166208/report.js",
"CrashTests/6574969751601152/01903.js",
"CrashTests/6576437049950208/conformance/resources/glsl-generator.js",
"CrashTests/6576437049950208/resources/js-test-pre.js",
@@ -1899,6 +2119,7 @@ const sloppy = [
"CrashTests/6603770342408192/00211.js",
"CrashTests/6613865297084416/builds/chromium-browser-syzyasan_win32-release/revisions/asan-win32-release-276100/resources/inspector/main/Main.js",
"CrashTests/6616252740009984/01288.js",
+ "CrashTests/6617130045341696/02588.js",
"CrashTests/6622275291840512/resources/js-test.js",
"CrashTests/6637202159960064/01577.js",
"CrashTests/6637774979465216/01973.js",
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js b/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
index 00272fcd55..665550f92b 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
@@ -49,7 +49,7 @@ class FunctionCallMutator extends mutator.Mutator {
}
const probability = random.random();
- if (probability < 0.4) {
+ if (probability < 0.3) {
const randFunc = common.randomFunction(path);
if (randFunc) {
thisMutator.annotate(
@@ -58,11 +58,12 @@ class FunctionCallMutator extends mutator.Mutator {
path.node.callee = randFunc;
}
- } else if (probability < 0.6 && thisMutator.settings.engine == 'V8') {
+ } else if (probability < 0.7 && thisMutator.settings.engine == 'V8') {
const prepareTemplate = babelTemplate(
'__V8BuiltinPrepareFunctionForOptimization(ID)');
+ const optimizationMode = random.choose(0.7) ? 'Function' : 'Maglev';
const optimizeTemplate = babelTemplate(
- '__V8BuiltinOptimizeFunctionOnNextCall(ID)');
+ `__V8BuiltinOptimize${optimizationMode}OnNextCall(ID)`);
const nodes = [
prepareTemplate({
@@ -86,7 +87,7 @@ class FunctionCallMutator extends mutator.Mutator {
thisMutator.insertBeforeSkip(
path, _liftExpressionsToStatements(path, nodes));
}
- } else if (probability < 0.75 && thisMutator.settings.engine == 'V8') {
+ } else if (probability < 0.8 && thisMutator.settings.engine == 'V8') {
const template = babelTemplate(
'__V8BuiltinCompileBaseline(ID)');
@@ -108,7 +109,7 @@ class FunctionCallMutator extends mutator.Mutator {
thisMutator.insertBeforeSkip(
path, _liftExpressionsToStatements(path, nodes));
}
- } else if (probability < 0.85 &&
+ } else if (probability < 0.9 &&
thisMutator.settings.engine == 'V8') {
const template = babelTemplate(
'__V8BuiltinDeoptimizeFunction(ID)');
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js b/deps/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js
index f0e1fd7871..8729a73f30 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js
@@ -13,6 +13,7 @@ const path = require('path');
const common = require('./mutators/common.js');
const db = require('./db.js');
+const random = require('./random.js');
const sourceHelpers = require('./source_helpers.js');
const { AddTryCatchMutator } = require('./mutators/try_catch.js');
@@ -26,6 +27,8 @@ const { ObjectMutator } = require('./mutators/object_mutator.js');
const { VariableMutator } = require('./mutators/variable_mutator.js');
const { VariableOrObjectMutator } = require('./mutators/variable_or_object_mutation.js');
+const MAX_EXTRA_MUTATIONS = 5;
+
function defaultSettings() {
return {
ADD_VAR_OR_OBJ_MUTATIONS: 0.1,
@@ -38,6 +41,8 @@ function defaultSettings() {
MUTATE_NUMBERS: 0.05,
MUTATE_OBJECTS: 0.1,
MUTATE_VARIABLES: 0.075,
+ SCRIPT_MUTATOR_EXTRA_MUTATIONS: 0.2,
+ SCRIPT_MUTATOR_SHUFFLE: 0.2,
};
}
@@ -61,8 +66,9 @@ class ScriptMutator {
new ExpressionMutator(settings),
new FunctionCallMutator(settings),
new VariableOrObjectMutator(settings),
- new AddTryCatchMutator(settings),
];
+ this.trycatch = new AddTryCatchMutator(settings);
+ this.settings = settings;
}
_addMjsunitIfNeeded(dependencies, input) {
@@ -129,9 +135,31 @@ class ScriptMutator {
}
mutate(source) {
- for (const mutator of this.mutators) {
+ let mutators = this.mutators.slice();
+ let annotations = [];
+ if (random.choose(this.settings.SCRIPT_MUTATOR_SHUFFLE)){
+ annotations.push(' Script mutator: using shuffled mutators');
+ random.shuffle(mutators);
+ }
+
+ if (random.choose(this.settings.SCRIPT_MUTATOR_EXTRA_MUTATIONS)){
+ for (let i = random.randInt(1, MAX_EXTRA_MUTATIONS); i > 0; i--) {
+ let mutator = random.single(this.mutators);
+ mutators.push(mutator);
+ annotations.push(` Script mutator: extra ${mutator.constructor.name}`);
+ }
+ }
+
+ // Try-catch wrapping should always be the last mutation.
+ mutators.push(this.trycatch);
+
+ for (const mutator of mutators) {
mutator.mutate(source);
}
+
+ for (const annotation of annotations.reverse()) {
+ sourceHelpers.annotateWithComment(source.ast, annotation);
+ }
}
// Returns parsed dependencies for inputs.
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js b/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
index bef68c86a4..28709e6b56 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
@@ -359,7 +359,7 @@ function removeComments(ast) {
*/
function cleanAsserts(ast) {
function replace(string) {
- return string.replace(/[Aa]ssert/g, '*****t');
+ return string == null ? null : string.replace(/[Aa]ssert/g, '*****t');
}
babelTraverse(ast, {
StringLiteral(path) {
@@ -375,15 +375,22 @@ function cleanAsserts(ast) {
}
/**
- * Annotate code with original file path.
+ * Annotate code with top-level comment.
*/
-function annotateWithOriginalPath(ast, relPath) {
+function annotateWithComment(ast, comment) {
if (ast.program && ast.program.body && ast.program.body.length > 0) {
babelTypes.addComment(
- ast.program.body[0], 'leading', ' Original: ' + relPath, true);
+ ast.program.body[0], 'leading', comment, true);
}
}
+/**
+ * Annotate code with original file path.
+ */
+function annotateWithOriginalPath(ast, relPath) {
+ annotateWithComment(ast, ' Original: ' + relPath);
+}
+
// TODO(machenbach): Move this into the V8 corpus. Other test suites don't
// use this flag logic.
function loadFlags(data) {
@@ -449,6 +456,7 @@ function generateCode(source, dependencies=[]) {
module.exports = {
BABYLON_OPTIONS: BABYLON_OPTIONS,
BABYLON_REPLACE_VAR_OPTIONS: BABYLON_REPLACE_VAR_OPTIONS,
+ annotateWithComment: annotateWithComment,
generateCode: generateCode,
loadDependencyAbs: loadDependencyAbs,
loadResource: loadResource,
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
index 292c1c0c7e..70b6038dd6 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
@@ -36,14 +36,15 @@ describe('Mutate functions', () => {
});
it('is robust without available functions', () => {
- sandbox.stub(random, 'random').callsFake(() => { return 0.3; });
+ sandbox.stub(random, 'random').callsFake(() => { return 0.2; });
// We just ensure here that mutating this file doesn't throw.
loadAndMutate('mutate_function_call.js');
});
- it('optimizes functions in V8', () => {
+ it('optimizes functions with turbofan in V8', () => {
sandbox.stub(random, 'random').callsFake(() => { return 0.5; });
+ sandbox.stub(random, 'choose').callsFake(p => true);
const source = loadAndMutate('mutate_function_call.js');
const mutated = sourceHelpers.generateCode(source);
@@ -51,6 +52,18 @@ describe('Mutate functions', () => {
'mutate_function_call_expected.js', mutated);
});
+ it('optimizes functions with maglev in V8', () => {
+ sandbox.stub(random, 'random').callsFake(() => { return 0.5; });
+ // False-path takes 'Maglev'. Other calls to choose should return
+ // true. It's also used to determine if a mutator should be chosen.
+ sandbox.stub(random, 'choose').callsFake(p => p == 0.7 ? false : true);
+
+ const source = loadAndMutate('mutate_function_call.js');
+ const mutated = sourceHelpers.generateCode(source);
+ helpers.assertExpectedResult(
+ 'mutate_function_call_maglev_expected.js', mutated);
+ });
+
it('compiles functions in V8 to baseline', () => {
sandbox.stub(random, 'random').callsFake(() => { return 0.7; });
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutation_order.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutation_order.js
new file mode 100644
index 0000000000..4d7fbfed54
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_mutation_order.js
@@ -0,0 +1,56 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test shuffling mutators and extra mutations.
+ *
+ * Use minimal probability settings to demonstrate order changes of top-level
+ * mutators. Which mutations are used exactly is not relevant to the test and
+ * handled pseudo-randomly.
+ */
+
+'use strict';
+
+const sinon = require('sinon');
+
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+const random = require('../random.js');
+
+const sandbox = sinon.createSandbox();
+
+describe('Toplevel mutations', () => {
+ afterEach(() => {
+ sandbox.restore();
+ });
+
+ it('shuffle their order', () => {
+ // Make random operations deterministic.
+ helpers.deterministicRandom(sandbox);
+
+ this.settings = {
+ ADD_VAR_OR_OBJ_MUTATIONS: 0.0,
+ MUTATE_CROSSOVER_INSERT: 0.0,
+ MUTATE_EXPRESSIONS: 0.0,
+ MUTATE_FUNCTION_CALLS: 1.0,
+ MUTATE_NUMBERS: 1.0,
+ MUTATE_VARIABLES: 0.0,
+ SCRIPT_MUTATOR_SHUFFLE: 1.0,
+ SCRIPT_MUTATOR_EXTRA_MUTATIONS: 1.0,
+ engine: 'V8',
+ testing: true,
+ };
+
+ const source = helpers.loadTestData('mutation_order/input.js');
+ const mutator = new scriptMutator.ScriptMutator(this.settings, helpers.DB_DIR);
+ const mutated = mutator.mutateInputs([source]);
+ const code = sourceHelpers.generateCode(mutated);
+
+ // The test data should be rich enough to produce a pattern from the
+ // FunctionCallMutator that afterwards gets mutated by the NumberMutator.
+ helpers.assertExpectedResult(
+ 'mutation_order/output_expected.js', code);
+ });
+});
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js
new file mode 100644
index 0000000000..f6b8823635
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_maglev_expected.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+%PrepareFunctionForOptimization(__f_0);
+
+__f_0(1);
+
+__f_0(1);
+
+%OptimizeMaglevOnNextCall(__f_0);
+
+// Original: mutate_function_call.js
+
+/* FunctionCallMutator: Optimizing __f_0 */
+__f_0(1);
+
+a = (
+/* FunctionCallMutator: Optimizing __f_0 */
+%PrepareFunctionForOptimization(__f_0), __f_0(1), __f_0(1), %OptimizeMaglevOnNextCall(__f_0), __f_0(1));
+foo(1, (
+/* FunctionCallMutator: Optimizing __f_0 */
+%PrepareFunctionForOptimization(__f_0), __f_0(), __f_0(), %OptimizeMaglevOnNextCall(__f_0), __f_0()));
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/input.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/input.js
new file mode 100644
index 0000000000..b58810985b
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/input.js
@@ -0,0 +1,23 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var i = 1;
+var j = 'str';
+var k = undefined;
+var l = {0: 1};
+
+function foo(a, b) {
+ return a + b;
+}
+
+foo(i, 3);
+
+function bar(a) {
+ return foo(a, a);
+}
+
+foo('foo', j);
+bar(2, foo(i, j));
+foo(i, j);
+bar(j, 3);
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js
new file mode 100644
index 0000000000..ddad108080
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mutation_order/output_expected.js
@@ -0,0 +1,119 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Script mutator: using shuffled mutators
+// Script mutator: extra ArrayMutator
+// Script mutator: extra VariableMutator
+// Script mutator: extra ExpressionMutator
+// Script mutator: extra ArrayMutator
+
+// Original: mutation_order/input.js
+try {
+ var __v_0 =
+ /* NumberMutator: Replaced 1 with -10 */
+ -10;
+} catch (e) {}
+
+try {
+ var __v_1 = 'str';
+} catch (e) {}
+
+try {
+ var __v_2 = undefined;
+} catch (e) {}
+
+try {
+ var __v_3 = {
+ /* NumberMutator: Replaced 0 with 8 */
+ 8:
+ /* NumberMutator: Replaced 1 with 3 */
+ 3
+ };
+} catch (e) {}
+
+function __f_0(__v_4, __v_5) {
+ return __v_4 + __v_5;
+}
+
+try {
+ %PrepareFunctionForOptimization(__f_0);
+} catch (e) {}
+
+try {
+ __f_0(__v_0,
+ /* NumberMutator: Replaced 3 with 5 */
+ 5);
+} catch (e) {}
+
+try {
+ __f_0(__v_0,
+ /* NumberMutator: Replaced 3 with NaN */
+ NaN);
+} catch (e) {}
+
+try {
+ %OptimizeFunctionOnNextCall(__f_0);
+} catch (e) {}
+
+try {
+ /* FunctionCallMutator: Optimizing __f_0 */
+ __f_0(__v_0,
+ /* NumberMutator: Replaced 3 with 2 */
+ 2);
+} catch (e) {}
+
+function __f_1(__v_6) {
+ return (
+ /* FunctionCallMutator: Replaced __f_0 with __f_0 */
+ __f_0(__v_6, __v_6)
+ );
+}
+
+try {
+ %PrepareFunctionForOptimization(__f_0);
+} catch (e) {}
+
+try {
+ __f_0('foo', __v_1);
+} catch (e) {}
+
+try {
+ __f_0('foo', __v_1);
+} catch (e) {}
+
+try {
+ %OptimizeFunctionOnNextCall(__f_0);
+} catch (e) {}
+
+try {
+ /* FunctionCallMutator: Optimizing __f_0 */
+ __f_0('foo', __v_1);
+} catch (e) {}
+
+try {
+ /* FunctionCallMutator: Compiling baseline __f_1 */
+ %CompileBaseline(__f_1);
+} catch (e) {}
+
+try {
+ __f_1(
+ /* NumberMutator: Replaced 2 with -10 */
+ -10, __f_0(__v_0, __v_1));
+} catch (e) {}
+
+try {
+ /* FunctionCallMutator: Deoptimizing __f_0 */
+ __f_0(__v_0, __v_1);
+} catch (e) {}
+
+try {
+ %DeoptimizeFunction(__f_0);
+} catch (e) {}
+
+try {
+ /* FunctionCallMutator: Replaced __f_1 with __f_1 */
+ __f_1(__v_1,
+ /* NumberMutator: Replaced 3 with 16 */
+ 16);
+} catch (e) {}
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
index 43a776c476..6ae5f80dea 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
@@ -10,3 +10,6 @@ if (!ok)
throw new Error(`Assertion failed: Some text`);
print("Assertion failed: Some text");
+
+// Check that we can load template literals with null cooked value.
+check()`\01`;
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js
index b43edcff18..f45e4aa0d4 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js
@@ -19,3 +19,4 @@ console.log('load2.js');
console.log('load.js');
if (!ok) throw new Error(`*****tion failed: Some text`);
print("*****tion failed: Some text");
+check()`\01`;
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py b/deps/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py
index 4719fe0e72..8e7fc76a12 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py
@@ -56,9 +56,10 @@ def run(fuzz_file, flag_file):
cmd = ' '.join(args)
try:
output = subprocess.check_output(cmd, stderr=subprocess.PIPE, shell=True)
- return (cmd, output)
+ return (cmd, output.decode('utf-8'))
except Exception as e:
- return (cmd, e.output)
+ return (cmd, e.output.decode('utf-8'))
+
def list_tests():
"""Iterates all fuzz tests and corresponding flags in the given base dir."""
diff --git a/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json b/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json
index b33e672eda..51b46c6880 100644
--- a/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json
+++ b/deps/v8/tools/clusterfuzz/trials/clusterfuzz_trials_config.json
@@ -1,10 +1,11 @@
[
{"app_args": "--assert-types", "app_name": "d8", "probability": 0.25, "contradicts": ["--stress-concurrent-inlining", "--stress-concurrent-inlining-attach-code"]},
- {"app_args": "--interrupt-budget-for-feedback-vector-allocation=0", "app_name": "d8", "probability": 0.05},
{"app_args": "--force-slow-path", "app_name": "d8", "probability": 0.05},
{"app_args": "--future", "app_name": "d8", "probability": 0.25},
+ {"app_args": "--harmony-struct", "app_name": "d8", "probability": 0.1},
{"app_args": "--interrupt-budget=1000", "app_name": "d8", "probability": 0.25},
- {"app_args": "--jitless", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--interrupt-budget-for-feedback-allocation=0", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--jitless", "app_name": "d8", "probability": 0.1, "contradicts": ["--stress-concurrent-inlining", "--stress-concurrent-inlining-attach-code"]},
{"app_args": "--maglev", "app_name": "d8", "probability": 0.1, "contradicts": ["--jitless"]},
{"app_args": "--minor-mc", "app_name": "d8", "probability": 0.1},
{"app_args": "--random-gc-interval=2000", "app_name": "d8", "probability": 0.05},
@@ -25,14 +26,18 @@
{"app_args": "--no-use-ic", "app_name": "d8", "probability": 0.25},
{"app_args": "--no-wasm-generic-wrapper", "app_name": "d8", "probability": 0.1},
{"app_args": "--regexp-interpret-all", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--shared-string-table", "app_name": "d8", "probability": 0.1},
{"app_args": "--simulate-errors", "app_name": "d8", "probability": 0.001},
{"app_args": "--stress-compaction-random", "app_name": "d8", "probability": 0.05},
- {"app_args": "--stress-concurrent-inlining", "app_name": "d8", "probability": 0.25, "contradicts": ["--assert-types"]},
- {"app_args": "--stress-concurrent-inlining-attach-code", "app_name": "d8", "probability": 0.05, "contradicts": ["--assert-types"]},
+ {"app_args": "--stress-concurrent-inlining", "app_name": "d8", "probability": 0.25, "contradicts": ["--assert-types", "--jitless"]},
+ {"app_args": "--stress-concurrent-inlining-attach-code", "app_name": "d8", "probability": 0.05, "contradicts": ["--assert-types", "--jitless"]},
{"app_args": "--stress-flush-code", "app_name": "d8", "probability": 0.25},
{"app_args": "--stress-marking=100", "app_name": "d8", "probability": 0.05},
{"app_args": "--stress-scavenge=100", "app_name": "d8", "probability": 0.05},
+ {"app_args": "--turboshaft", "app_name": "d8", "probability": 0.2},
{"app_args": "--turbo-instruction-scheduling", "app_name": "d8", "probability": 0.1},
{"app_args": "--turbo-stress-instruction-scheduling", "app_name": "d8", "probability": 0.1},
- {"app_args": "--stress-wasm-code-gc", "app_name": "d8", "probability": 0.1}
+ {"app_args": "--stress-wasm-code-gc", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--lazy", "app_name": "d8", "probability": 0.1},
+ {"app_args": "--no-stress-lazy-source-positions", "app_name": "d8", "probability": 0.1}
]
diff --git a/deps/v8/tools/codemap.mjs b/deps/v8/tools/codemap.mjs
index 57040635c3..985e721c34 100644
--- a/deps/v8/tools/codemap.mjs
+++ b/deps/v8/tools/codemap.mjs
@@ -65,6 +65,12 @@ export class CodeMap {
*/
pages_ = new Set();
+ constructor(useBigInt=false) {
+ this.useBigInt = useBigInt;
+ this.kPageSize = useBigInt ? BigInt(kPageSize) : kPageSize;
+ this.kOne = useBigInt ? 1n : 1;
+ this.kZero = useBigInt ? 0n : 0;
+ }
/**
* Adds a code entry that might overlap with static code (e.g. for builtins).
@@ -73,7 +79,7 @@ export class CodeMap {
* @param {CodeEntry} codeEntry Code entry object.
*/
addAnyCode(start, codeEntry) {
- const pageAddr = (start / kPageSize) | 0;
+ const pageAddr = (start / this.kPageSize) | this.kZero;
if (!this.pages_.has(pageAddr)) return this.addCode(start, codeEntry);
// We might have loaded static code (builtins, bytecode handlers)
// and we get more information later in v8.log with code-creation events.
@@ -147,8 +153,8 @@ export class CodeMap {
* @private
*/
markPages_(start, end) {
- for (let addr = start; addr <= end; addr += kPageSize) {
- this.pages_.add((addr / kPageSize) | 0);
+ for (let addr = start; addr <= end; addr += this.kPageSize) {
+ this.pages_.add((addr / this.kPageSize) | this.kZero);
}
}
@@ -157,13 +163,13 @@ export class CodeMap {
*/
deleteAllCoveredNodes_(tree, start, end) {
const to_delete = [];
- let addr = end - 1;
+ let addr = end - this.kOne;
while (addr >= start) {
const node = tree.findGreatestLessThan(addr);
if (node === null) break;
const start2 = node.key, end2 = start2 + node.value.size;
if (start2 < end && start < end2) to_delete.push(start2);
- addr = start2 - 1;
+ addr = start2 - this.kOne;
}
for (let i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
}
@@ -191,7 +197,7 @@ export class CodeMap {
* @param {number} addr Address.
*/
findAddress(addr) {
- const pageAddr = (addr / kPageSize) | 0;
+ const pageAddr = (addr / this.kPageSize) | this.kZero;
if (this.pages_.has(pageAddr)) {
// Static code entries can contain "holes" of unnamed code.
// In this case, the whole library is assigned to this address.
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
index 2256df1f55..d557aca2d3 100644
--- a/deps/v8/tools/debug_helper/BUILD.gn
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -69,7 +69,7 @@ action("gen_heap_constants") {
]
}
-v8_component("v8_debug_helper") {
+v8_component("v8_debug_helper_internal") {
testonly = true
public = [ "debug-helper.h" ]
@@ -81,6 +81,7 @@ v8_component("v8_debug_helper") {
"$target_gen_dir/../../torque-generated/debug-macros.h",
"$target_gen_dir/../../torque-generated/instance-types.h",
"$target_gen_dir/heap-constants-gen.cc",
+ "../../src/common/ptr-compr.cc",
"compiler-types.cc",
"debug-helper-internal.cc",
"debug-helper-internal.h",
@@ -104,11 +105,15 @@ v8_component("v8_debug_helper") {
configs = [ ":internal_config" ]
if (v8_enable_i18n_support) {
- configs += [ "//third_party/icu:icu_config" ]
+ configs += [ "$v8_icu_path:icu_config" ]
}
remove_configs = [ "//build/config/compiler:no_rtti" ]
configs += [ "//build/config/compiler:rtti" ]
+}
+group("v8_debug_helper") {
+ testonly = true
+ public_deps = [ ":v8_debug_helper_internal" ]
public_configs = [ ":external_config" ]
}
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.cc b/deps/v8/tools/debug_helper/debug-helper-internal.cc
index d13ed9f973..8ff3f29f5c 100644
--- a/deps/v8/tools/debug_helper/debug-helper-internal.cc
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.cc
@@ -23,9 +23,16 @@ bool IsPointerCompressed(uintptr_t address) {
uintptr_t EnsureDecompressed(uintptr_t address,
uintptr_t any_uncompressed_ptr) {
if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ Address base =
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(any_uncompressed_ptr);
+ if (base != V8HeapCompressionScheme::base()) {
+ V8HeapCompressionScheme::InitBase(base);
+ }
+#endif
// TODO(v8:11880): ExternalCodeCompressionScheme might be needed here for
// decompressing Code pointers from external code space.
- return i::V8HeapCompressionScheme::DecompressTaggedAny(
+ return i::V8HeapCompressionScheme::DecompressTagged(
any_uncompressed_ptr, static_cast<i::Tagged_t>(address));
}
diff --git a/deps/v8/tools/debug_helper/gen-heap-constants.py b/deps/v8/tools/debug_helper/gen-heap-constants.py
index d78276a86f..38fd4a1745 100644
--- a/deps/v8/tools/debug_helper/gen-heap-constants.py
+++ b/deps/v8/tools/debug_helper/gen-heap-constants.py
@@ -72,7 +72,7 @@ if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')): # Only exists in ptr-compr build
if (space_name in expected_spaces):
out = out + ' if (heap_addresses->' + space_name + '_first_page == 0) {\n'
out = out + ' heap_addresses->' + space_name + \
- '_first_page = i::V8HeapCompressionScheme::DecompressTaggedPointer(' + \
+ '_first_page = i::V8HeapCompressionScheme::DecompressTagged(' + \
'any_uncompressed_ptr, ' + str(offset) + ');\n'
out = out + ' }\n'
out = out + '}\n'
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index 659a138462..186438d7ac 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -244,11 +244,11 @@ class ReadStringVisitor : public TqObjectVisitor {
return std::string(result.data(), write_index);
}
- template <typename TChar>
- Value<TChar> ReadCharacter(uintptr_t data_address, int32_t index) {
- TChar value{};
+ template <typename T>
+ Value<T> ReadValue(uintptr_t data_address, int32_t index = 0) {
+ T value{};
d::MemoryAccessResult validity =
- accessor_(data_address + index * sizeof(TChar),
+ accessor_(data_address + index * sizeof(T),
reinterpret_cast<uint8_t*>(&value), sizeof(value));
return {validity, value};
}
@@ -259,7 +259,7 @@ class ReadStringVisitor : public TqObjectVisitor {
for (; index_ < length && index_ < limit_ && !done_; ++index_) {
static_assert(sizeof(TChar) <= sizeof(char16_t));
char16_t c = static_cast<char16_t>(
- GetOrFinish(ReadCharacter<TChar>(data_address, index_)));
+ GetOrFinish(ReadValue<TChar>(data_address, index_)));
if (!done_) AddCharacter(c);
}
}
@@ -350,13 +350,22 @@ class ReadStringVisitor : public TqObjectVisitor {
ExternalPointer_t resource_data =
GetOrFinish(object->GetResourceDataValue(accessor_));
#ifdef V8_ENABLE_SANDBOX
- Isolate* isolate = GetIsolateForSandbox(
- HeapObject::unchecked_cast(Object(heap_addresses_.any_heap_pointer)));
- ExternalPointerHandle handle =
- static_cast<ExternalPointerHandle>(resource_data);
- uintptr_t data_address =
- static_cast<uintptr_t>(isolate->shared_external_pointer_table().Get(
- handle, kExternalStringResourceDataTag));
+ Address memory_chunk =
+ BasicMemoryChunk::BaseAddress(object->GetMapAddress());
+ Address heap = GetOrFinish(
+ ReadValue<Address>(memory_chunk + BasicMemoryChunk::kHeapOffset));
+ Isolate* isolate = Isolate::FromHeap(reinterpret_cast<Heap*>(heap));
+ Address external_pointer_table_address_address =
+ isolate->shared_external_pointer_table_address_address();
+ Address external_pointer_table_address = GetOrFinish(
+ ReadValue<Address>(external_pointer_table_address_address));
+ Address external_pointer_table =
+ GetOrFinish(ReadValue<Address>(external_pointer_table_address));
+ int32_t index =
+ static_cast<int32_t>(resource_data >> kExternalPointerIndexShift);
+ Address tagged_data =
+ GetOrFinish(ReadValue<Address>(external_pointer_table, index));
+ Address data_address = tagged_data & ~kExternalStringResourceDataTag;
#else
uintptr_t data_address = static_cast<uintptr_t>(resource_data);
#endif // V8_ENABLE_SANDBOX
@@ -614,6 +623,13 @@ std::unique_ptr<ObjectPropertiesResult> GetHeapObjectPropertiesMaybeCompressed(
any_uncompressed_ptr = heap_addresses.old_space_first_page;
if (any_uncompressed_ptr == 0)
any_uncompressed_ptr = heap_addresses.read_only_space_first_page;
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ Address base =
+ V8HeapCompressionScheme::GetPtrComprCageBaseAddress(any_uncompressed_ptr);
+ if (base != V8HeapCompressionScheme::base()) {
+ V8HeapCompressionScheme::InitBase(base);
+ }
+#endif
FillInUnknownHeapAddresses(&heap_addresses, any_uncompressed_ptr);
if (any_uncompressed_ptr == 0) {
// We can't figure out the heap range. Just check for known objects.
diff --git a/deps/v8/tools/dev/gen-static-roots.py b/deps/v8/tools/dev/gen-static-roots.py
new file mode 100755
index 0000000000..48d9af513e
--- /dev/null
+++ b/deps/v8/tools/dev/gen-static-roots.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+
+import subprocess
+import argparse
+import os
+import filecmp
+import tempfile
+import shutil
+import platform
+from pathlib import Path
+
+# Detect if we have goma
+
+
+def _Which(cmd):
+ for path in os.environ["PATH"].split(os.pathsep):
+ if os.path.exists(os.path.join(path, cmd)):
+ return os.path.join(path, cmd)
+ return None
+
+
+def DetectGoma():
+ if os.environ.get("GOMA_DIR"):
+ return os.environ.get("GOMA_DIR")
+ if os.environ.get("GOMADIR"):
+ return os.environ.get("GOMADIR")
+ # There is a copy of goma in depot_tools, but it might not be in use on
+ # this machine.
+ goma = _Which("goma_ctl")
+ if goma is None:
+ return None
+ cipd_bin = os.path.join(os.path.dirname(goma), ".cipd_bin")
+ if not os.path.exists(cipd_bin):
+ return None
+ goma_auth = os.path.expanduser("~/.goma_client_oauth2_config")
+ if not os.path.exists(goma_auth):
+ return None
+ return cipd_bin
+
+
+GOMADIR = DetectGoma()
+IS_GOMA_MACHINE = GOMADIR is not None
+
+USE_GOMA = "true" if IS_GOMA_MACHINE else "false"
+
+# List of all supported build configurations for static roots
+
+STATIC_ROOT_CONFIGURATIONS = {
+ "ptr-cmpr-wasm-intl": {
+ "target":
+ "src/roots/static-roots.h",
+ "gn_args":
+ f"""\
+is_debug = false
+use_goma = {USE_GOMA}
+v8_enable_static_roots = false
+v8_enable_static_root_generation = true
+v8_enable_pointer_compression = true
+v8_enable_shared_ro_heap = true
+v8_enable_pointer_compression_shared_cage = true
+v8_enable_webassembly = true
+v8_enable_i18n_support = true
+dcheck_always_on = true
+"""
+ },
+}
+
+# Parse args
+
+parser = argparse.ArgumentParser(description='Generates static-roots.h.')
+parser.add_argument(
+ '--configuration',
+ choices=STATIC_ROOT_CONFIGURATIONS.keys(),
+ action='extend',
+ default='ptr-cmpr-wasm-intl',
+ nargs='*',
+ help="""Build configuration. Refers to a set of configurations with
+identical static-roots.h. Currently there is only one supported configuration.
+Future configurations will need to generate multiple target files.""")
+parser.add_argument(
+ '--out',
+ default=Path('out'),
+ required=False,
+ type=Path,
+ help='target build directory')
+
+args = parser.parse_args()
+
+# Some helpers
+
+
+def run(cmd, **kwargs):
+ print(f"# CMD: {cmd} {kwargs}")
+ return subprocess.run(cmd, **kwargs, check=True)
+
+
+def build(path, gn_args):
+ if not path.exists():
+ path.mkdir(parents=True, exist_ok=True)
+ with (path / "args.gn").open("w") as f:
+ f.write(gn_args)
+ suffix = ".bat" if platform.system() == "Windows" else ""
+ run(["gn" + suffix, "gen", path])
+ run(["autoninja" + suffix, "-C", path, "mksnapshot"])
+ return path.absolute()
+
+
+# Generate all requested static root headers
+
+v8_path = Path(__file__).parents[2]
+
+changed = False
+for target in [args.configuration]:
+ build_dir = args.out / f"gen-static-roots.{target}"
+ config = STATIC_ROOT_CONFIGURATIONS[target]
+ gn_args = config["gn_args"]
+ build_path = build(build_dir, gn_args)
+ out_file = Path(tempfile.gettempdir()) / f"static-roots-{target}.h"
+ run([build_path / "mksnapshot", "--static-roots-src", out_file])
+ target_file = v8_path / config["target"]
+ if not filecmp.cmp(out_file, target_file):
+ shutil.move(out_file, target_file)
+ changed = True
+
+if changed:
+ exit(1)
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 245a05389e..c2860a4763 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -17,6 +17,10 @@ All arguments are optional. Most combinations should work, e.g.:
gm.py android_arm.release.check --progress=verbose
gm.py x64 mjsunit/foo cctest/test-bar/*
+For a less automated experience, pass an existing output directory (which
+must contain an existing args.gn), e.g.:
+ gm.py out/foo unittests
+
Flags are passed unchanged to the test runner. They must start with -- and must
not contain spaces.
"""
@@ -29,13 +33,17 @@ import platform
import re
import subprocess
import sys
+import shutil
+from pathlib import Path
USE_PTY = "linux" in sys.platform
if USE_PTY:
import pty
-BUILD_TARGETS_TEST = ["d8", "bigint_shell", "cctest", "inspector-test",
- "unittests", "wasm_api_tests"]
+BUILD_TARGETS_TEST = [
+ "d8", "bigint_shell", "cctest", "inspector-test", "v8_unittests",
+ "wasm_api_tests"
+]
BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
@@ -60,7 +68,7 @@ MODES = {
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
TARGETS = [
- "d8", "cctest", "unittests", "v8_fuzzers", "wasm_api_tests", "wee8",
+ "d8", "cctest", "v8_unittests", "v8_fuzzers", "wasm_api_tests", "wee8",
"mkgrokdump", "generate-bytecode-expectations", "inspector-test",
"bigint_shell", "wami"
]
@@ -115,86 +123,86 @@ HELP = """<arch> can be any of: %(arches)s
"targets": ", ".join(TARGETS)
}
-TESTSUITES_TARGETS = {"benchmarks": "d8",
- "bigint": "bigint_shell",
- "cctest": "cctest",
- "debugger": "d8",
- "fuzzer": "v8_fuzzers",
- "inspector": "inspector-test",
- "intl": "d8",
- "message": "d8",
- "mjsunit": "d8",
- "mozilla": "d8",
- "test262": "d8",
- "unittests": "unittests",
- "wasm-api-tests": "wasm_api_tests",
- "wasm-js": "d8",
- "wasm-spec-tests": "d8",
- "webkit": "d8"}
-
-OUTDIR = "out"
-
-def _Which(cmd):
- for path in os.environ["PATH"].split(os.pathsep):
- if os.path.exists(os.path.join(path, cmd)):
- return os.path.join(path, cmd)
- return None
+TESTSUITES_TARGETS = {
+ "benchmarks": "d8",
+ "bigint": "bigint_shell",
+ "cctest": "cctest",
+ "debugger": "d8",
+ "fuzzer": "v8_fuzzers",
+ "inspector": "inspector-test",
+ "intl": "d8",
+ "message": "d8",
+ "mjsunit": "d8",
+ "mozilla": "d8",
+ "test262": "d8",
+ "unittests": "v8_unittests",
+ "wasm-api-tests": "wasm_api_tests",
+ "wasm-js": "d8",
+ "wasm-spec-tests": "d8",
+ "webkit": "d8"
+}
+
+OUTDIR = Path("out")
-def DetectGoma():
+
+def detect_goma():
if os.environ.get("GOMA_DIR"):
- return os.environ.get("GOMA_DIR")
+ return Path(os.environ.get("GOMA_DIR"))
if os.environ.get("GOMADIR"):
- return os.environ.get("GOMADIR")
+ return Path(os.environ.get("GOMADIR"))
# There is a copy of goma in depot_tools, but it might not be in use on
# this machine.
- goma = _Which("goma_ctl")
+ goma = shutil.which("goma_ctl")
if goma is None: return None
- cipd_bin = os.path.join(os.path.dirname(goma), ".cipd_bin")
- if not os.path.exists(cipd_bin): return None
- goma_auth = os.path.expanduser("~/.goma_client_oauth2_config")
- if not os.path.exists(goma_auth): return None
+ cipd_bin = Path(goma).parent / ".cipd_bin"
+ if not cipd_bin.exists():
+ return None
+ goma_auth = Path("~/.goma_client_oauth2_config").expanduser()
+ if not goma_auth.exists():
+ return None
return cipd_bin
-GOMADIR = DetectGoma()
+
+GOMADIR = detect_goma()
IS_GOMA_MACHINE = GOMADIR is not None
USE_GOMA = "true" if IS_GOMA_MACHINE else "false"
-RELEASE_ARGS_TEMPLATE = """\
+RELEASE_ARGS_TEMPLATE = f"""\
is_component_build = false
is_debug = false
%s
-use_goma = {GOMA}
+use_goma = {USE_GOMA}
v8_enable_backtrace = true
v8_enable_disassembler = true
v8_enable_object_print = true
v8_enable_verify_heap = true
dcheck_always_on = false
-""".replace("{GOMA}", USE_GOMA)
+"""
-DEBUG_ARGS_TEMPLATE = """\
+DEBUG_ARGS_TEMPLATE = f"""\
is_component_build = true
is_debug = true
symbol_level = 2
%s
-use_goma = {GOMA}
+use_goma = {USE_GOMA}
v8_enable_backtrace = true
v8_enable_fast_mksnapshot = true
v8_enable_slow_dchecks = true
v8_optimized_debug = false
-""".replace("{GOMA}", USE_GOMA)
+"""
-OPTDEBUG_ARGS_TEMPLATE = """\
+OPTDEBUG_ARGS_TEMPLATE = f"""\
is_component_build = true
is_debug = true
symbol_level = 1
%s
-use_goma = {GOMA}
+use_goma = {USE_GOMA}
v8_enable_backtrace = true
v8_enable_fast_mksnapshot = true
v8_enable_verify_heap = true
v8_optimized_debug = true
-""".replace("{GOMA}", USE_GOMA)
+"""
ARGS_TEMPLATES = {
"release": RELEASE_ARGS_TEMPLATE,
@@ -202,31 +210,37 @@ ARGS_TEMPLATES = {
"optdebug": OPTDEBUG_ARGS_TEMPLATE
}
-def PrintHelpAndExit():
+
+def print_help_and_exit():
print(__doc__)
print(HELP)
sys.exit(0)
-def PrintCompletionsAndExit():
+
+def print_completions_and_exit():
for a in ARCHES:
- print("%s" % a)
+ print(str(a))
for m in set(MODES.values()):
- print("%s" % m)
- print("%s.%s" % (a, m))
+ print(str(m))
+ print(f"{a}.{m}")
for t in TARGETS:
- print("%s" % t)
- print("%s.%s.%s" % (a, m, t))
+ print(str(t))
+ print("{a}.{m}.{t}")
sys.exit(0)
-def _Call(cmd, silent=False):
- if not silent: print("# %s" % cmd)
+
+def _call(cmd, silent=False):
+ if not silent:
+ print(f"# {cmd}")
return subprocess.call(cmd, shell=True)
-def _CallWithOutputNoTerminal(cmd):
+
+def _call_with_output_no_terminal(cmd):
return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
-def _CallWithOutput(cmd):
- print("# %s" % cmd)
+
+def _call_with_output(cmd):
+ print(f"# {cmd}")
# The following trickery is required so that the 'cmd' thinks it's running
# in a real terminal, while this script gets to intercept its output.
parent, child = pty.openpty()
@@ -251,36 +265,126 @@ def _CallWithOutput(cmd):
p.wait()
return p.returncode, "".join(output)
-def _Write(filename, content):
- print("# echo > %s << EOF\n%sEOF" % (filename, content))
- with open(filename, "w") as f:
+
+def _write(filename, content):
+ print(f"# echo > {filename} << EOF\n{content}EOF")
+ with filename.open("w") as f:
f.write(content)
-def _Notify(summary, body):
- if (_Which('notify-send') is not None and
+
+def _notify(summary, body):
+ if (shutil.which('notify-send') is not None and
os.environ.get("DISPLAY") is not None):
- _Call("notify-send '{}' '{}'".format(summary, body), silent=True)
+ _call(f"notify-send '{summary}' '{body}'", silent=True)
else:
- print("{} - {}".format(summary, body))
+ print(f"{summary} - {body}")
+
-def _GetMachine():
+def _get_machine():
return platform.machine()
-def GetPath(arch, mode):
- subdir = "%s.%s" % (arch, mode)
- return os.path.join(OUTDIR, subdir)
-def PrepareMksnapshotCmdline(orig_cmdline, path):
- result = "gdb --args %s/mksnapshot " % path
+def get_path(arch, mode):
+ return OUTDIR / f"{arch}.{mode}"
+
+
+def prepare_mksnapshot_cmdline(orig_cmdline, path):
+ mksnapshot_bin = path / "mksnapshot"
+ result = f"gdb --args {mksnapshot_bin} "
for w in orig_cmdline.split(" "):
if w.startswith("gen/") or w.startswith("snapshot_blob"):
- result += ("%(path)s%(sep)s%(arg)s " %
- {"path": path, "sep": os.sep, "arg": w})
+ result += f"{str(path / w)} "
+ elif w.startswith("../../"):
+ result += f"{w[6:]} "
else:
- result += "%s " % w
+ result += f"{w} "
return result
-class Config(object):
+
+def prepare_torque_cmdline(orig_cmdline: str, path):
+ torque_bin = path / "torque"
+ args = orig_cmdline.replace("-v8-root ../..", "-v8-root .")
+ args = args.replace("gen/torque-generated", f"{path}/gen/torque-generated")
+ return f"gdb --args {torque_bin} {args}"
+
+# Only has a path, assumes that the path (and args.gn in it) already exists.
+class RawConfig:
+
+ def __init__(self, path, targets, tests=[], clean=False, testrunner_args=[]):
+ self.path = path
+ self.targets = set(targets)
+ self.tests = set(tests)
+ self.testrunner_args = testrunner_args
+ self.clean = clean
+
+ def extend(self, targets, tests=[], clean=False):
+ self.targets.update(targets)
+ self.tests.update(tests)
+ self.clean |= clean
+
+ def build(self):
+ build_ninja = self.path / "build.ninja"
+ if not build_ninja.exists():
+ code = _call(f"gn gen {self.path}")
+ if code != 0:
+ return code
+ elif self.clean:
+ code = _call(f"gn clean {self.path}")
+ if code != 0:
+ return code
+ targets = " ".join(self.targets)
+ # The implementation of mksnapshot failure detection relies on
+ # the "pty" module and GDB presence, so skip it on non-Linux.
+ if not USE_PTY:
+ return _call(f"autoninja -C {self.path} {targets}")
+
+ return_code, output = _call_with_output(
+ f"autoninja -C {self.path} {targets}")
+ if return_code != 0 and "FAILED:" in output:
+ if "snapshot_blob" in output:
+ if "gen-static-roots.py" in output:
+ _notify("V8 build requires your attention",
+ "Please re-generate static roots...")
+ return return_code
+ csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
+ match = csa_trap.search(output)
+ extra_opt = match.group(1) if match else ""
+ cmdline = re.compile("python3 ../../tools/run.py ./mksnapshot (.*)")
+ orig_cmdline = cmdline.search(output).group(1).strip()
+ cmdline = (
+ prepare_mksnapshot_cmdline(orig_cmdline, self.path) + extra_opt)
+ _notify("V8 build requires your attention",
+ "Detected mksnapshot failure, re-running in GDB...")
+ _call(cmdline)
+ elif "run.py ./torque" in output and not ": Torque Error: " in output:
+ # Torque failed/crashed without printing an error message.
+ cmdline = re.compile("python3 ../../tools/run.py ./torque (.*)")
+ orig_cmdline = cmdline.search(output).group(1).strip()
+ cmdline = f"gdb --args "
+ cmdline = prepare_torque_cmdline(orig_cmdline, self.path)
+ _notify("V8 build requires your attention",
+ "Detecting torque failure, re-running in GDB...")
+ _call(cmdline)
+ return return_code
+
+ def run_tests(self):
+ if not self.tests:
+ return 0
+ if "ALL" in self.tests:
+ tests = ""
+ else:
+ tests = " ".join(self.tests)
+ run_tests = Path("tools") / "run-tests.py"
+ test_runner_args = " ".join(self.testrunner_args)
+ return _call(
+ f'"{sys.executable }" {run_tests} --outdir={self.path} {tests} {test_runner_args}'
+ )
+
+
+# Contrary to RawConfig, takes arch and mode, and sets everything up
+# automatically.
+class ManagedConfig(RawConfig):
+
def __init__(self,
arch,
mode,
@@ -288,39 +392,32 @@ class Config(object):
tests=[],
clean=False,
testrunner_args=[]):
+ super().__init__(
+ get_path(arch, mode), targets, tests, clean, testrunner_args)
self.arch = arch
self.mode = mode
- self.targets = set(targets)
- self.tests = set(tests)
- self.testrunner_args = testrunner_args
- self.clean = clean
- def Extend(self, targets, tests=[], clean=False):
- self.targets.update(targets)
- self.tests.update(tests)
- self.clean |= clean
-
- def GetTargetCpu(self):
+ def get_target_cpu(self):
cpu = "x86"
if self.arch == "android_arm":
cpu = "arm"
elif self.arch == "android_arm64" or self.arch == "fuchsia_arm64":
cpu = "arm64"
- elif self.arch == "arm64" and _GetMachine() in ("aarch64", "arm64"):
+ elif self.arch == "arm64" and _get_machine() in ("aarch64", "arm64"):
# arm64 build host:
cpu = "arm64"
- elif self.arch == "arm" and _GetMachine() in ("aarch64", "arm64"):
+ elif self.arch == "arm" and _get_machine() in ("aarch64", "arm64"):
cpu = "arm"
- elif self.arch == "loong64" and _GetMachine() == "loongarch64":
+ elif self.arch == "loong64" and _get_machine() == "loongarch64":
cpu = "loong64"
- elif self.arch == "mips64el" and _GetMachine() == "mips64":
+ elif self.arch == "mips64el" and _get_machine() == "mips64":
cpu = "mips64el"
elif "64" in self.arch or self.arch == "s390x":
# Native x64 or simulator build.
cpu = "x64"
- return ["target_cpu = \"%s\"" % cpu]
+ return [f"target_cpu = \"{cpu}\""]
- def GetV8TargetCpu(self):
+ def get_v8_target_cpu(self):
if self.arch == "android_arm":
v8_cpu = "arm"
elif self.arch == "android_arm64" or self.arch == "fuchsia_arm64":
@@ -330,89 +427,57 @@ class Config(object):
v8_cpu = self.arch
else:
return []
- return ["v8_target_cpu = \"%s\"" % v8_cpu]
+ return [f"v8_target_cpu = \"{v8_cpu}\""]
- def GetTargetOS(self):
+ def get_target_os(self):
if self.arch in ("android_arm", "android_arm64"):
return ["target_os = \"android\""]
elif self.arch in ("fuchsia_x64", "fuchsia_arm64"):
return ["target_os = \"fuchsia\""]
return []
- def GetSpecialCompiler(self):
- if _GetMachine() in ("aarch64", "mips64", "loongarch64"):
+ def get_specialized_compiler(self):
+ if _get_machine() in ("aarch64", "mips64", "loongarch64"):
# We have no prebuilt Clang for arm64, mips64 or loongarch64 on Linux,
# so use the system Clang instead.
return ["clang_base_path = \"/usr\"", "clang_use_chrome_plugins = false"]
return []
- def GetSandboxFlag(self):
+ def get_sandbox_flag(self):
if self.arch in SANDBOX_SUPPORTED_ARCHES:
return ["v8_enable_sandbox = true"]
return []
- def GetGnArgs(self):
+ def get_gn_args(self):
# Use only substring before first '-' as the actual mode
mode = re.match("([^-]+)", self.mode).group(1)
template = ARGS_TEMPLATES[mode]
arch_specific = (
- self.GetTargetCpu() + self.GetV8TargetCpu() + self.GetTargetOS() +
- self.GetSpecialCompiler() + self.GetSandboxFlag())
+ self.get_target_cpu() + self.get_v8_target_cpu() +
+ self.get_target_os() + self.get_specialized_compiler() +
+ self.get_sandbox_flag())
return template % "\n".join(arch_specific)
- def Build(self):
- path = GetPath(self.arch, self.mode)
- args_gn = os.path.join(path, "args.gn")
- build_ninja = os.path.join(path, "build.ninja")
- if not os.path.exists(path):
- print("# mkdir -p %s" % path)
- os.makedirs(path)
- if not os.path.exists(args_gn):
- _Write(args_gn, self.GetGnArgs())
- if not os.path.exists(build_ninja):
- code = _Call("gn gen %s" % path)
- if code != 0: return code
- elif self.clean:
- code = _Call("gn clean %s" % path)
- if code != 0: return code
- targets = " ".join(self.targets)
- # The implementation of mksnapshot failure detection relies on
- # the "pty" module and GDB presence, so skip it on non-Linux.
- if not USE_PTY:
- return _Call("autoninja -C %s %s" % (path, targets))
-
- return_code, output = _CallWithOutput("autoninja -C %s %s" %
- (path, targets))
- if return_code != 0 and "FAILED:" in output and "snapshot_blob" in output:
- csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
- match = csa_trap.search(output)
- extra_opt = match.group(1) if match else ""
- cmdline = re.compile("python3 ../../tools/run.py ./mksnapshot (.*)")
- orig_cmdline = cmdline.search(output).group(1).strip()
- cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
- _Notify("V8 build requires your attention",
- "Detected mksnapshot failure, re-running in GDB...")
- _Call(cmdline)
- return return_code
-
- def RunTests(self):
+ def build(self):
+ path = self.path
+ args_gn = path / "args.gn"
+ if not path.exists():
+ print(f"# mkdir -p {path}")
+ path.mkdir(parents=True)
+ if not args_gn.exists():
+ _write(args_gn, self.get_gn_args())
+ return super().build()
+
+ def run_tests(self):
# Special handling for "mkgrokdump": if it was built, run it.
if (self.arch == "x64" and self.mode == "release" and
"mkgrokdump" in self.targets):
- _Call("%s/mkgrokdump > tools/v8heapconst.py" %
- GetPath(self.arch, self.mode))
- if not self.tests: return 0
- if "ALL" in self.tests:
- tests = ""
- else:
- tests = " ".join(self.tests)
- return _Call('"%s" ' % sys.executable +
- os.path.join("tools", "run-tests.py") +
- " --outdir=%s %s %s" % (
- GetPath(self.arch, self.mode), tests,
- " ".join(self.testrunner_args)))
+ mkgrokdump_bin = self.path / "mkgrokdump"
+ _call(f"{mkgrokdump_bin} > tools/v8heapconst.py")
+ return super().run_tests()
-def GetTestBinary(argstring):
+
+def get_test_binary(argstring):
for suite in TESTSUITES_TARGETS:
if argstring.startswith(suite): return TESTSUITES_TARGETS[suite]
return None
@@ -425,31 +490,72 @@ class ArgumentParser(object):
self.configs = {}
self.testrunner_args = []
- def PopulateConfigs(self, arches, modes, targets, tests, clean):
+ def populate_configs(self, arches, modes, targets, tests, clean):
for a in arches:
for m in modes:
- path = GetPath(a, m)
+ path = get_path(a, m)
if path not in self.configs:
- self.configs[path] = Config(a, m, targets, tests, clean,
- self.testrunner_args)
+ self.configs[path] = ManagedConfig(a, m, targets, tests, clean,
+ self.testrunner_args)
else:
- self.configs[path].Extend(targets, tests)
+ self.configs[path].extend(targets, tests)
- def ProcessGlobalActions(self):
+ def process_global_actions(self):
have_configs = len(self.configs) > 0
for action in self.global_actions:
impact = ACTIONS[action]
if (have_configs):
for c in self.configs:
- self.configs[c].Extend(**impact)
+ self.configs[c].extend(**impact)
+ else:
+ self.populate_configs(DEFAULT_ARCHES, DEFAULT_MODES, **impact)
+
+ def maybe_parse_builddir(self, argstring):
+ outdir_prefix = str(OUTDIR) + os.path.sep
+ # {argstring} must have the shape "out/x", and the 'x' part must be
+ # at least one character.
+ if not argstring.startswith(outdir_prefix):
+ return False
+ if len(argstring) <= len(outdir_prefix):
+ return False
+ # "out/foo.d8" -> path="out/foo", targets=["d8"]
+ # "out/d8.cctest" -> path="out/d8", targets=["cctest"]
+ # "out/x.y.d8.cctest" -> path="out/x.y", targets=["d8", "cctest"]
+ words = argstring.split('.')
+ path_end = len(words)
+ targets = []
+ tests = []
+ clean = False
+ while path_end > 1:
+ w = words[path_end - 1]
+ maybe_target = get_test_binary(w)
+ if w in TARGETS:
+ targets.append(w)
+ elif maybe_target is not None:
+ targets.append(maybe_target)
+ tests.append(w)
+ elif w == 'clean':
+ clean = True
else:
- self.PopulateConfigs(DEFAULT_ARCHES, DEFAULT_MODES, **impact)
+ break
+ path_end -= 1
+ path = Path('.'.join(words[:path_end]))
+ args_gn = path / "args.gn"
+ # Only accept existing build output directories, otherwise fall back
+ # to regular parsing.
+ if not args_gn.is_file():
+ return False
+ if path not in self.configs:
+ self.configs[path] = RawConfig(path, targets, tests, clean)
+ else:
+ self.configs[path].extend(targets, tests, clean)
+ return True
- def ParseArg(self, argstring):
+ def parse_arg(self, argstring):
if argstring in ("-h", "--help", "help"):
- PrintHelpAndExit()
+ print_help_and_exit()
if argstring == "--print-completions":
- PrintCompletionsAndExit()
+ print_completions_and_exit()
arches = []
modes = []
targets = []
@@ -458,16 +564,19 @@ class ArgumentParser(object):
clean = False
# Special handling for "mkgrokdump": build it for x64.release.
if argstring == "mkgrokdump":
- self.PopulateConfigs(["x64"], ["release"], ["mkgrokdump"], [], False)
+ self.populate_configs(["x64"], ["release"], ["mkgrokdump"], [], False)
+ return
+ if argstring.startswith("--"):
+ # Pass all other flags to test runner.
+ self.testrunner_args.append(argstring)
+ return
+ # Specifying a directory like "out/foo" enters "manual mode".
+ if self.maybe_parse_builddir(argstring):
return
# Specifying a single unit test looks like "unittests/Foo.Bar", test262
# tests have names like "S15.4.4.7_A4_T1", don't split these.
if argstring.startswith("unittests/") or argstring.startswith("test262/"):
words = [argstring]
- elif argstring.startswith("--"):
- # Pass all other flags to test runner.
- self.testrunner_args.append(argstring)
- return
else:
# Assume it's a word like "x64.release" -> split at the dot.
words = argstring.split('.')
@@ -479,7 +588,7 @@ class ArgumentParser(object):
if word in TARGETS:
self.global_targets.add(word)
return
- maybe_target = GetTestBinary(word)
+ maybe_target = get_test_binary(word)
if maybe_target is not None:
self.global_tests.add(word)
self.global_targets.add(maybe_target)
@@ -501,7 +610,7 @@ class ArgumentParser(object):
modes.append(MODES[prefix] + "-" + suffix)
break
else:
- print("Didn't understand: %s" % word)
+ print(f"Didn't understand: {word}")
sys.exit(1)
# Process actions.
for action in actions:
@@ -514,36 +623,38 @@ class ArgumentParser(object):
modes = modes or DEFAULT_MODES
targets = targets or DEFAULT_TARGETS
# Produce configs.
- self.PopulateConfigs(arches, modes, targets, tests, clean)
+ self.populate_configs(arches, modes, targets, tests, clean)
- def ParseArguments(self, argv):
+ def parse_arguments(self, argv):
if len(argv) == 0:
- PrintHelpAndExit()
+ print_help_and_exit()
for argstring in argv:
- self.ParseArg(argstring)
- self.ProcessGlobalActions()
+ self.parse_arg(argstring)
+ self.process_global_actions()
for c in self.configs:
- self.configs[c].Extend(self.global_targets, self.global_tests)
+ self.configs[c].extend(self.global_targets, self.global_tests)
return self.configs
-def Main(argv):
+
+def main(argv):
parser = ArgumentParser()
- configs = parser.ParseArguments(argv[1:])
+ configs = parser.parse_arguments(argv[1:])
return_code = 0
# If we have Goma but it is not running, start it.
if (IS_GOMA_MACHINE and
- _Call("pgrep -x compiler_proxy > /dev/null", silent=True) != 0):
- _Call("%s/goma_ctl.py ensure_start" % GOMADIR)
+ _call("pgrep -x compiler_proxy > /dev/null", silent=True) != 0):
+ goma_ctl = GOMADIR / "goma_ctl.py"
+ _call(f"{goma_ctl} ensure_start")
for c in configs:
- return_code += configs[c].Build()
+ return_code += configs[c].build()
if return_code == 0:
for c in configs:
- return_code += configs[c].RunTests()
+ return_code += configs[c].run_tests()
if return_code == 0:
- _Notify('Done!', 'V8 compilation finished successfully.')
+ _notify('Done!', 'V8 compilation finished successfully.')
else:
- _Notify('Error!', 'V8 compilation finished with errors.')
+ _notify('Error!', 'V8 compilation finished with errors.')
return return_code
if __name__ == "__main__":
- sys.exit(Main(sys.argv))
+ sys.exit(main(sys.argv))
diff --git a/deps/v8/tools/dev/update-compile-commands.py b/deps/v8/tools/dev/update-compile-commands.py
index 26e6be2c25..9abbdb7a9f 100755
--- a/deps/v8/tools/dev/update-compile-commands.py
+++ b/deps/v8/tools/dev/update-compile-commands.py
@@ -13,6 +13,11 @@ import json
import os
import subprocess
import sys
+import platform
+
+DEFAULT_ARCH = "x64"
+if platform.machine() == "arm64":
+ DEFAULT_ARCH = "arm64"
PYLIB_PATH = 'tools/clang/pylib'
GM_PATH = 'tools/dev'
@@ -48,7 +53,7 @@ def PrepareBuildDir(arch, mode):
args_gn = os.path.join(build_dir, "args.gn")
if not os.path.exists(args_gn):
conf = gm.Config(arch, mode, [])
- _Write(args_gn, conf.GetGnArgs())
+ _Write(args_gn, conf.get_gn_args())
build_ninja = os.path.join(build_dir, "build.ninja")
if not os.path.exists(build_ninja):
code = _Call("gn gen %s" % build_dir)
@@ -73,9 +78,11 @@ def UpdateCompileCommands():
print(">>> Updating compile_commands.json...")
combined = {}
AddTargetsForArch("x64", combined)
- AddTargetsForArch("ia32", combined)
- AddTargetsForArch("arm", combined)
AddTargetsForArch("arm64", combined)
+ if DEFAULT_ARCH != "arm64":
+ # Mac arm64 doesn't like 32bit platforms:
+ AddTargetsForArch("ia32", combined)
+ AddTargetsForArch("arm", combined)
commands = []
for key in combined:
commands.append(combined[key])
@@ -83,17 +90,19 @@ def UpdateCompileCommands():
def CompileLanguageServer():
print(">>> Compiling Torque Language Server...")
- PrepareBuildDir("x64", "release")
- _Call("autoninja -C out/x64.release torque-language-server")
+ PrepareBuildDir(DEFAULT_ARCH, "release")
+ _Call(f"autoninja -C out/{DEFAULT_ARCH}.release torque-language-server")
+
def GenerateCCFiles():
print(">>> Generating generated C++ source files...")
# This must be called after UpdateCompileCommands().
- assert os.path.exists("out/x64.debug/build.ninja")
- _Call("autoninja -C out/x64.debug v8_generated_cc_files")
+ assert os.path.exists(f"out/{DEFAULT_ARCH}.debug/build.ninja")
+ _Call(f"autoninja -C out/{DEFAULT_ARCH}.debug v8_generated_cc_files")
+
def StartGoma():
- gomadir = gm.DetectGoma()
+ gomadir = gm.detect_goma()
if (gomadir is not None and
_Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
_Call("%s/goma_ctl.py ensure_start" % gomadir)
diff --git a/deps/v8/tools/dev/update-vscode.sh b/deps/v8/tools/dev/update-vscode.sh
index 5a0fd3d79e..64f5db0c3f 100755
--- a/deps/v8/tools/dev/update-vscode.sh
+++ b/deps/v8/tools/dev/update-vscode.sh
@@ -7,7 +7,7 @@
# Visual Studio Code on Linux distributions where for whatever reason there
# is no good way to do so via the package manager.
-# Version of this script: 2020.07.04
+# Version of this script: 2022.11.12
# Basic checking of arguments: want at least one, and it's not --help.
VERSION="$1"
@@ -27,12 +27,24 @@ die() {
if [ "$VERSION" == "--auto" -o "$VERSION" == "auto" ]; then
echo "Searching online for latest available version..."
- # Where to find the latest available version (we assume that it's mentioned
- # in the first 1000 characters, which is true as of 2020-07).
- AVAILABLE_PACKAGES_URL="https://packages.microsoft.com/repos/vscode/dists/stable/main/binary-amd64/Packages"
- VERSION=$(curl "$AVAILABLE_PACKAGES_URL" --range 0-1000 --silent \
- | grep "^Version: " \
- | sed 's/[^0-9]*\([0-9.]*\).*/\1/')
+ # Where to find the latest available version.
+ AVAILABLE_PACKAGES_URL="https://packages.microsoft.com/repos/vscode/dists/stable/main/binary-amd64/Packages.gz"
+ VERSION=$(curl "$AVAILABLE_PACKAGES_URL" --silent \
+ | gunzip \
+ | gawk '
+ BEGIN { engaged = 0 }
+ # Look at blocks starting with "Package: code".
+ /^Package: code$/ { engaged = 1 }
+ # Stop looking at the empty line indicating the end of a block.
+ /^$/ { engaged = 0 }
+ # In interesting blocks, print the relevant part of the
+ # "Version: " line.
+ match($0, /^Version: ([0-9.]*)/, groups) {
+ if (engaged == 1) print groups[1]
+ }
+ ' - \
+ | sort -rV \
+ | head -1)
if [ -z "$VERSION" ]; then
die "Detecting latest version failed, please specify it manually."
else
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 9354f24ff8..e9e3f6c168 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -2,45 +2,64 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/sysroot.gni")
import("../../gni/v8.gni")
-group("v8_gcmole_files") {
- testonly = true
- data_deps = [
- "../../:v8_dump_build_config",
- "../../:v8_generated_cc_files",
- ]
- data = [
- "gcmole.py",
- "gcmole-test.cc",
- "gcmole-tools/",
- "run-gcmole.py",
- "suspects.allowlist",
- "ignored_files",
- "test-expectations.txt",
+if (v8_gcmole) {
+ group("v8_gcmole_files") {
+ testonly = true
+ data_deps = [
+ ":v8_gcmole_args",
+ "../../:v8_dump_build_config",
+ "../../:v8_generated_cc_files",
+ ]
+ data = [
+ "gcmole.py",
+ "gcmole-test.cc",
+ "gcmole-tools/",
+ "run-gcmole.py",
+ "suspects.allowlist",
+ "ignored_files",
+ "test-expectations.txt",
- # The following contains all relevant source and build files.
- "../debug_helper/debug-helper.h",
- "../../BUILD.gn",
- "../../base/",
- "../../include/",
- "../../src/",
- "../../test/cctest/",
- "../../test/common/",
- "../../testing/gtest/include/gtest/gtest_prod.h",
- "../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
- "../../third_party/icu/source/",
- "../../third_party/wasm-api/wasm.h",
- "../../third_party/wasm-api/wasm.hh",
- "../../third_party/zlib/",
- "../../third_party/inspector_protocol/",
- "$target_gen_dir/../../",
- "$target_gen_dir/../../torque-generated/",
- ]
+ # The following contains all relevant source and build files.
+ "../debug_helper/debug-helper.h",
+ "../../BUILD.gn",
+ "../../base/",
+ "../../include/",
+ "../../src/",
+ "../../test/cctest/",
+ "../../test/common/",
+ "../../testing/gtest/include/gtest/gtest_prod.h",
+ "../../third_party/glibc/",
+ "../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
+ "../../third_party/icu/source/common/",
+ "../../third_party/icu/source/i18n/",
+ "../../third_party/wasm-api/wasm.h",
+ "../../third_party/wasm-api/wasm.hh",
+ "../../third_party/zlib/",
+ "../../third_party/inspector_protocol/",
+ "$target_gen_dir/../../",
+ "$target_gen_dir/../../torque-generated/",
- if (v8_gcmole) {
- # This assumes gcmole tools have been fetched by a hook
- # into v8/tools/gcmole/gcmole_tools.
- data += [ "gcmole-tools/" ]
+ # This assumes gcmole tools have been fetched by a hook
+ # into v8/tools/gcmole/gcmole_tools.
+ "gcmole-tools/",
+
+ # We use the bundled sysroot with gcmole.
+ sysroot,
+ ]
+ }
+
+ # Only prepare gcmole args if gcmole is activated by a gn arg.
+ action("v8_gcmole_args") {
+ script = "gcmole_args.py"
+ outputs = [ "$root_out_dir/v8_gcmole.args" ]
+ args = [ sysroot ]
+
+ # We use an arbitrary v8 target as proxy for calculating globally used
+ # gcmole plugin arguments. Should be a target that's early in the chain
+ # and that enables icu.
+ deps = [ "../../:v8_base_without_compiler" ]
}
}
diff --git a/deps/v8/tools/gcmole/PRESUBMIT.py b/deps/v8/tools/gcmole/PRESUBMIT.py
new file mode 100644
index 0000000000..5ba6bfda37
--- /dev/null
+++ b/deps/v8/tools/gcmole/PRESUBMIT.py
@@ -0,0 +1,32 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+USE_PYTHON3 = True
+
+
+def _RunTests(input_api, output_api):
+ return input_api.RunTests(
+ input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api,
+ output_api,
+ '.',
+ files_to_check=[r'.+_test\.py$'],
+ run_on_python2=False))
+
+
+def _CommonChecks(input_api, output_api):
+ """Checks common to both upload and commit."""
+ checks = [
+ _RunTests,
+ ]
+
+ return sum([check(input_api, output_api) for check in checks], [])
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/gcmole/gcmole-test.cc b/deps/v8/tools/gcmole/gcmole-test.cc
index 038a514189..39d7ce1e15 100644
--- a/deps/v8/tools/gcmole/gcmole-test.cc
+++ b/deps/v8/tools/gcmole/gcmole-test.cc
@@ -349,5 +349,15 @@ void TestGuardedDeadVarAnalysisMidFunction2(Isolate* isolate) {
raw_obj.Print();
}
+void TestGuardedDeadVarAnalysisMultipleSafepoints(Isolate* isolate) {
+ // TODO(https://crbug.com/v8/13536): The analysis points to this safepoint,
+ // while it should point to the one below.
+ Safepoint();
+ JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+ DisallowGarbageCollection no_gc;
+ Safepoint();
+ raw_obj.Print();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/tools/gcmole/gcmole.py b/deps/v8/tools/gcmole/gcmole.py
index 6961ec4fef..be605d8c55 100755
--- a/deps/v8/tools/gcmole/gcmole.py
+++ b/deps/v8/tools/gcmole/gcmole.py
@@ -6,14 +6,18 @@
# This is main driver for gcmole tool. See README for more details.
# Usage: CLANG_BIN=clang-bin-dir python tools/gcmole/gcmole.py [arm|arm64|ia32|x64]
+from contextlib import contextmanager
+from contextlib import redirect_stderr
from multiprocessing import cpu_count
from pathlib import Path
+import argparse
import collections
import difflib
+import io
import json
-import optparse
import os
+import pickle
import re
import subprocess
import sys
@@ -21,17 +25,8 @@ import threading
import queue
-ArchCfg = collections.namedtuple(
- "ArchCfg", ["name", "cpu", "triple", "arch_define", "arch_options"])
-
-# TODO(cbruni): use gn desc by default for platform-specific settings
-OPTIONS_64BIT = [
- "-DV8_COMPRESS_POINTERS",
- "-DV8_COMPRESS_POINTERS_IN_SHARED_CAGE",
- "-DV8_EXTERNAL_CODE_SPACE",
- "-DV8_SHORT_BUILTIN_CALLS",
- "-DV8_SHARED_RO_HEAP",
-]
+ArchCfg = collections.namedtuple("ArchCfg",
+ ["name", "cpu", "triple", "arch_options"])
ARCHITECTURES = {
"ia32":
@@ -39,7 +34,6 @@ ARCHITECTURES = {
name="ia32",
cpu="x86",
triple="i586-unknown-linux",
- arch_define="V8_TARGET_ARCH_IA32",
arch_options=["-m32"],
),
"arm":
@@ -47,24 +41,19 @@ ARCHITECTURES = {
name="arm",
cpu="arm",
triple="i586-unknown-linux",
- arch_define="V8_TARGET_ARCH_ARM",
arch_options=["-m32"],
),
- # TODO(cbruni): Use detailed settings:
- # arch_options = OPTIONS_64BIT + [ "-DV8_WIN64_UNWINDING_INFO" ]
"x64":
ArchCfg(
name="x64",
cpu="x64",
triple="x86_64-unknown-linux",
- arch_define="V8_TARGET_ARCH_X64",
arch_options=[]),
"arm64":
ArchCfg(
name="arm64",
cpu="arm64",
triple="x86_64-unknown-linux",
- arch_define="V8_TARGET_ARCH_ARM64",
arch_options=[],
),
}
@@ -86,6 +75,9 @@ def fatal(format):
def make_clang_command_line(plugin, plugin_args, options):
+ with open(options.v8_build_dir / 'v8_gcmole.args') as f:
+ generated_args = f.read().strip().split()
+
arch_cfg = ARCHITECTURES[options.v8_target_cpu]
prefixed_plugin_args = []
if plugin_args:
@@ -97,7 +89,6 @@ def make_clang_command_line(plugin, plugin_args, options):
arg,
]
log("Using generated files in {}", options.v8_build_dir / 'gen')
- icu_src_dir = options.v8_root_dir / 'third_party/icu/source'
return ([
options.clang_bin_dir / "clang++",
"-std=c++17",
@@ -117,18 +108,8 @@ def make_clang_command_line(plugin, plugin_args, options):
arch_cfg.triple,
"-fno-exceptions",
"-Wno-everything",
- "-D",
- arch_cfg.arch_define,
- "-DENABLE_DEBUGGER_SUPPORT",
- "-DV8_ENABLE_WEBASSEMBLY",
"-DV8_GC_MOLE",
- "-DV8_INTL_SUPPORT",
- "-I{}".format(options.v8_root_dir),
- "-I{}".format(options.v8_root_dir / 'include'),
- "-I{}".format(options.v8_build_dir / 'gen'),
- "-I{}".format(icu_src_dir / 'common'),
- "-I{}".format(icu_src_dir / 'i18n'),
- ] + arch_cfg.arch_options)
+ ] + generated_args + arch_cfg.arch_options)
def invoke_clang_plugin_for_file(filename, cmd_line, verbose):
@@ -222,56 +203,44 @@ def invoke_clang_plugin_for_each_file(filenames, plugin, plugin_args, options):
# -----------------------------------------------------------------------------
-def parse_gn_file(options, for_test):
- if for_test:
- return {"all": [options.v8_root_dir / "tools/gcmole/gcmole-test.cc"]}
- result = {}
+def build_file_list(options):
+ """Calculates the list of source files to be checked with gcmole.
+
+ The list comprises all files from marked source sections in the
+ listed BUILD.gn files. All files preceeded by the following comment and
+ until the end of the source section are used:
+ ### gcmole(arch) ###
+ Where arch can either be all (all architectures) or one of the supported V8
+ architectures.
+
+ The structure of these directives is also checked by presubmit via:
+ tools/v8_presubmit.py::GCMoleProcessor.
+
+ Returns: List of file paths (of type Path).
+ """
+ if options.test_run:
+ return [options.v8_root_dir / "tools/gcmole/gcmole-test.cc"]
+ result = []
gn_files = [
("BUILD.gn", re.compile('"([^"]*?\.cc)"'), ""),
("test/cctest/BUILD.gn", re.compile('"(test-[^"]*?\.cc)"'),
Path("test/cctest/")),
]
- for filename, pattern, prefix in gn_files:
+ gn_re = re.compile(f"### gcmole\((all|{options.v8_target_cpu})\) ###(.*?)\]",
+ re.MULTILINE | re.DOTALL)
+ for filename, file_pattern, prefix in gn_files:
path = options.v8_root_dir / filename
with open(path) as gn_file:
gn = gn_file.read()
- for condition, sources in re.findall("### gcmole\((.*?)\) ###(.*?)\]", gn,
- re.MULTILINE | re.DOTALL):
- if condition not in result:
- result[condition] = []
- for file in pattern.findall(sources):
- result[condition].append(options.v8_root_dir / prefix / file)
-
- return result
-
-
-def evaluate_condition(cond, props):
- if cond == "all":
- return True
-
- m = re.match("(\w+):(\w+)", cond)
- if m is None:
- fatal("failed to parse condition: {}", cond)
- p, v = m.groups()
- if p not in props:
- fatal("undefined configuration property: {}", p)
-
- return props[p] == v
+ for _, sources in gn_re.findall(gn):
+ for file in file_pattern.findall(sources):
+ result.append(options.v8_root_dir / prefix / file)
+ # Filter files of current shard if running on multiple hosts.
+ def is_in_shard(index):
+ return (index % options.shard_count) == options.shard_index
-def build_file_list(options, for_test):
- sources = parse_gn_file(options, for_test)
- props = {
- "os": "linux",
- "arch": options.v8_target_cpu,
- "mode": "debug",
- "simulator": ""
- }
- ret = []
- for condition, files in list(sources.items()):
- if evaluate_condition(condition, props):
- ret += files
- return ret
+ return [f for i, f in enumerate(result) if is_in_shard(i)]
# -----------------------------------------------------------------------------
@@ -326,18 +295,11 @@ IS_SPECIAL_WITH_ALLOW_LIST = merge_regexp({
})
-class GCSuspectsCollector:
+class CallGraph:
- def __init__(self, options):
- self.gc = {}
- self.gc_caused = collections.defaultdict(lambda: set())
- self.funcs = {}
+ def __init__(self):
+ self.funcs = collections.defaultdict(set)
self.current_caller = None
- self.allowlist = options.allowlist
- self.is_special = IS_SPECIAL_WITH_ALLOW_LIST if self.allowlist else IS_SPECIAL_WITHOUT_ALLOW_LIST
-
- def add_cause(self, name, cause):
- self.gc_caused[name].add(cause)
def parse(self, lines):
for funcname in lines:
@@ -345,58 +307,114 @@ class GCSuspectsCollector:
continue
if funcname[0] != "\t":
- self.resolve(funcname)
+ # Always inserting the current caller makes the serialized version
+ # more compact.
+ self.funcs[funcname]
self.current_caller = funcname
else:
- name = funcname[1:]
- callers_for_name = self.resolve(name)
- callers_for_name.add(self.current_caller)
+ self.funcs[funcname[1:]].add(self.current_caller)
+
+ def to_file(self, file_name):
+ """Store call graph in file 'file_name'."""
+ log(f"Writing serialized callgraph to {file_name}")
+ with open(file_name, 'wb') as f:
+ pickle.dump(self, f)
+
+ @staticmethod
+ def from_file(file_name):
+ """Restore call graph from file 'file_name'."""
+ log(f"Reading serialized callgraph from {file_name}")
+ with open(file_name, 'rb') as f:
+ return pickle.load(f)
+
+ @staticmethod
+ def from_files(*file_names):
+ """Merge multiple call graphs from a list of files."""
+ callgraph = CallGraph()
+ for file_name in file_names:
+ funcs = CallGraph.from_file(file_name).funcs
+ for callee, callers in funcs.items():
+ callgraph.funcs[callee].update(callers)
+ return callgraph
+
+
+class GCSuspectsCollector:
+
+ def __init__(self, options, funcs):
+ self.gc = {}
+ self.gc_caused = collections.defaultdict(set)
+ self.funcs = funcs
+ if options.allowlist:
+ self.is_special = IS_SPECIAL_WITH_ALLOW_LIST
+ else:
+ self.is_special = IS_SPECIAL_WITHOUT_ALLOW_LIST
+
+ def add_cause(self, name, cause):
+ self.gc_caused[name].add(cause)
def resolve(self, name):
- if name not in self.funcs:
- self.funcs[name] = set()
- m = self.is_special.search(name)
- if m:
- if m.group("gc"):
- self.gc[name] = True
- self.add_cause(name, "<GC>")
- elif m.group("safepoint"):
- self.gc[name] = True
- self.add_cause(name, "<Safepoint>")
- elif m.group("allow"):
- self.gc[name] = False
-
- return self.funcs[name]
+ m = self.is_special.search(name)
+ if not m:
+ return
+
+ if m.group("gc"):
+ self.gc[name] = True
+ self.add_cause(name, "<GC>")
+ elif m.group("safepoint"):
+ self.gc[name] = True
+ self.add_cause(name, "<Safepoint>")
+ elif m.group("allow"):
+ self.gc[name] = False
def propagate(self):
log("Propagating GC information")
- def mark(funcname, callers):
- for caller in callers:
+ def mark(funcname):
+ for caller in self.funcs[funcname]:
if caller not in self.gc:
self.gc[caller] = True
- mark(caller, self.funcs[caller])
+ mark(caller)
self.add_cause(caller, funcname)
- for funcname, callers in list(self.funcs.items()):
+ for funcname in self.funcs:
+ self.resolve(funcname)
+
+ for funcname in self.funcs:
if self.gc.get(funcname, False):
- mark(funcname, callers)
+ mark(funcname)
+
+
+def generate_callgraph(files, options):
+ """Construct a (potentially partial) call graph from a subset of
+ source files.
+ """
+ callgraph = CallGraph()
+
+ log(f"Building call graph for {options.v8_target_cpu}")
+ for _, stdout, _ in invoke_clang_plugin_for_each_file(
+ files, "dump-callees", [], options):
+ callgraph.parse(stdout.splitlines())
+ return callgraph
-def generate_gc_suspects(files, options):
- # Reset the global state.
- collector = GCSuspectsCollector(options)
- log("Building GC Suspects for {}", options.v8_target_cpu)
- for _, stdout, _ in invoke_clang_plugin_for_each_file(files, "dump-callees",
- [], options):
- collector.parse(stdout.splitlines())
+def generate_gc_suspects_from_callgraph(callgraph, options):
+ """Calculate and store gc-suspect information from a given call graph."""
+ collector = GCSuspectsCollector(options, callgraph.funcs)
collector.propagate()
# TODO(cbruni): remove once gcmole.cc is migrated
write_gcmole_results(collector, options, options.v8_root_dir)
write_gcmole_results(collector, options, options.out_dir)
+def generate_gc_suspects_from_files(options):
+ """Generate file list and corresponding gc-suspect information."""
+ files = build_file_list(options)
+ call_graph = generate_callgraph(files, options)
+ generate_gc_suspects_from_callgraph(call_graph, options)
+ return files
+
+
def write_gcmole_results(collector, options, dst):
# gcsuspects contains a list("mangled_full_name,name") of all functions that
# could cause a gc (directly or indirectly).
@@ -437,20 +455,12 @@ def write_gcmole_results(collector, options, dst):
# Analysis
-def check_correctness_for_arch(options, for_test):
- files = build_file_list(options, for_test)
-
- if not options.reuse_gcsuspects:
- generate_gc_suspects(files, options)
- else:
- log("Reusing GCSuspects for {}", options.v8_target_cpu)
-
+def check_correctness_for_arch(files, options):
processed_files = 0
errors_found = False
- output = ""
log("Searching for evaluation order problems " +
- (' and dead variables' if options.dead_vars else '') + "for" +
+ ("and dead variables " if options.dead_vars else "") + "for " +
options.v8_target_cpu)
plugin_args = []
if options.dead_vars:
@@ -465,26 +475,39 @@ def check_correctness_for_arch(options, for_test):
if not errors_found:
errors_found = re.search("^[^:]+:\d+:\d+: (warning|error)", stderr,
re.MULTILINE) is not None
- if for_test:
- output = output + stderr
- else:
- sys.stdout.write(stderr)
+ sys.stderr.write(stderr)
log("Done processing {} files.", processed_files)
log("Errors found" if errors_found else "No errors found")
- return errors_found, output
+ return errors_found
+
+
+def clean_test_output(output):
+ """Substitute line number patterns for files except gcmole-test.cc, as
+ otherwise unrelated code changes require a rebaseline of test expectations.
+ """
+ return re.sub(
+ r'(?<!gcmole-test\.cc):\d*:\d*:',
+ ':<number>:<number>:',
+ output)
-def test_run(options):
+def has_unexpected_errors(options, errors_found, file_io):
+ """Returns True if error state isn't as expected, False otherwise.
+
+ In test-run mode, we expect certain errors and return False if expectations
+ are met.
+ """
if not options.test_run:
- return True
+ return errors_found
+
log("Test Run")
- errors_found, output = check_correctness_for_arch(options, True)
+ output = clean_test_output(file_io.getvalue())
if not errors_found:
log("Test file should produce errors, but none were found. Output:")
print(output)
- return False
+ return True
new_file = options.out_dir / "test-expectations-gen.txt"
with open(new_file, "w") as f:
@@ -500,9 +523,9 @@ def test_run(options):
print("#" * 79)
log("Output mismatch from running tests.")
log("Please run gcmole manually with --test-run --verbose.")
- log("Expected: " + expected_file)
- log("New: " + new_file)
- log("*Diff:* " + diff_file)
+ log(f"Expected: {expected_file}")
+ log(f"New: {new_file}")
+ log(f"*Diff:* {diff_file}")
print("#" * 79)
for line in difflib.unified_diff(
expectations.splitlines(),
@@ -515,17 +538,17 @@ def test_run(options):
print("#" * 79)
log("Full output")
- log("Expected: " + expected_file)
- log("Diff: " + diff_file)
- log("*New:* " + new_file)
+ log(f"Expected: {expected_file}")
+ log(f"Diff: {diff_file}")
+ log(f"*New*: {new_file}")
print("#" * 79)
print(output)
print("#" * 79)
- return False
+ return True
log("Tests ran successfully")
- return True
+ return False
# =============================================================================
@@ -533,123 +556,186 @@ def relative_parents(path, level=0):
return Path(os.path.relpath(str(path.resolve().parents[level])))
-def main(args):
+def main(argv):
# Get a clean parent path relative to PWD
default_root_dir = relative_parents(Path(__file__), level=2)
- if len(args) >= 1:
- default_gcmole_dir = relative_parents(Path(args[0]))
+ if len(argv) >= 1:
+ default_gcmole_dir = relative_parents(Path(argv[0]))
if default_gcmole_dir or not default_gcmole_dir.exists():
default_gcmole_dir = default_root_dir / 'tools' / 'gcmole'
-
- parser = optparse.OptionParser()
- archs = list(ARCHITECTURES.keys())
- parser.add_option(
- "--v8-root-dir",
- metavar="DIR",
- default=default_root_dir,
- help="V8 checkout directory. Default: '{}'".format(
- default_root_dir.absolute()))
- parser.add_option(
- "--v8-target-cpu",
- type="choice",
- default="x64",
- choices=archs,
- help="Tested CPU architecture. Choices: {}".format(archs),
- metavar="CPU")
default_clang_bin_dir = default_gcmole_dir / 'gcmole-tools/bin'
- parser.add_option(
- "--clang-bin-dir",
- metavar="DIR",
- help="Build dir of the custom clang version for gcmole." + \
- "Default: env['CLANG_DIR'] or '{}'".format(default_clang_bin_dir))
- parser.add_option(
- "--clang-plugins-dir",
- metavar="DIR",
- help="Containing dir for libgcmole.so."
- "Default: env['CLANG_PLUGINS'] or '{}'".format(default_gcmole_dir))
- parser.add_option(
- "--v8-build-dir",
- metavar="BUILD_DIR",
- help="GN build dir for v8. Default: 'out/CPU.Release'. "
- "Config must match cpu specified by --v8-target-cpu")
- parser.add_option(
- "--out-dir",
- metavar="DIR",
- help="Output location for the gcsuspect and gcauses file."
- "Default: BUILD_DIR/gen/tools/gcmole")
- parser.add_option(
- "--is-bot",
- action="store_true",
- default=False,
- help="Flag for setting build bot specific settings.")
-
- group = optparse.OptionGroup(parser, "GCMOLE options")
- group.add_option(
- "--reuse-gcsuspects",
- action="store_true",
- default=False,
- help="Don't build gcsuspects file and reuse previously generated one.")
- group.add_option(
- "--sequential",
- action="store_true",
- default=False,
- help="Don't use parallel python runner.")
- group.add_option(
- "--verbose",
- action="store_true",
- default=False,
- help="Print commands to console before executing them.")
- group.add_option(
- "--no-dead-vars",
- action="store_false",
- dest="dead_vars",
- default=True,
- help="Don't perform dead variable analysis.")
- group.add_option(
- "--verbose-trace",
- action="store_true",
- default=False,
- help="Enable verbose tracing from the plugin itself."
- "This can be useful to debug finding dead variable.")
- group.add_option(
- "--no-allowlist",
- action="store_true",
- default=True,
- dest="allowlist",
- help="When building gcsuspects allowlist certain functions as if they can be "
- "causing GC. Currently used to reduce number of false positives in dead "
- "variables analysis. See TODO for ALLOWLIST in gcmole.py")
- group.add_option(
- "--test-run",
- action="store_true",
- default=False,
- help="Test gcmole on tools/gcmole/gcmole-test.cc")
- parser.add_option_group(group)
-
- (options, args) = parser.parse_args()
-
- if not options.v8_target_cpu:
- # Backwards compatibility
- if len(args) > 0 and args[0] in archs:
- options.v8_target_cpu = args[0]
- log("Using --v8-target-cpu={}", options.v8_target_cpu)
- else:
- parser.error("Missing --v8-target-cpu option")
+
+ def add_common_args(parser):
+ archs = list(ARCHITECTURES.keys())
+ parser.add_argument(
+ "--v8-root-dir",
+ metavar="DIR",
+ default=default_root_dir,
+ help="V8 checkout directory. Default: '{}'".format(
+ default_root_dir.absolute()))
+ parser.add_argument(
+ "--v8-target-cpu",
+ default="x64",
+ choices=archs,
+ help="Tested CPU architecture. Choices: {}".format(archs),
+ metavar="CPU")
+ parser.add_argument(
+ "--clang-bin-dir",
+ metavar="DIR",
+ help="Build dir of the custom clang version for gcmole." + \
+ "Default: env['CLANG_DIR'] or '{}'".format(default_clang_bin_dir))
+ parser.add_argument(
+ "--clang-plugins-dir",
+ metavar="DIR",
+ help="Containing dir for libgcmole.so."
+ "Default: env['CLANG_PLUGINS'] or '{}'".format(default_gcmole_dir))
+ parser.add_argument(
+ "--v8-build-dir",
+ metavar="BUILD_DIR",
+ help="GN build dir for v8. Default: 'out/CPU.Release'. "
+ "Config must match cpu specified by --v8-target-cpu")
+ parser.add_argument(
+ "--out-dir",
+ metavar="DIR",
+ help="Output location for the gcsuspect and gcauses file."
+ "Default: BUILD_DIR/gen/tools/gcmole")
+ parser.add_argument(
+ "--is-bot",
+ action="store_true",
+ default=False,
+ help="Flag for setting build bot specific settings.")
+ parser.add_argument(
+ "--shard-count",
+ default=1,
+ type=int,
+ help="Number of tasks the current action (e.g. collect or check) "
+ "is distributed to.")
+ parser.add_argument(
+ "--shard-index",
+ default=0,
+ type=int,
+ help="Index of the current task (in [0..shard-count-1]) if the "
+ "overall action is distributed (shard-count > 1).")
+
+ group = parser.add_argument_group("GCMOLE options")
+ group.add_argument(
+ "--sequential",
+ action="store_true",
+ default=False,
+ help="Don't use parallel python runner.")
+ group.add_argument(
+ "--verbose",
+ action="store_true",
+ default=False,
+ help="Print commands to console before executing them.")
+ group.add_argument(
+ "--no-dead-vars",
+ action="store_false",
+ dest="dead_vars",
+ default=True,
+ help="Don't perform dead variable analysis.")
+ group.add_argument(
+ "--verbose-trace",
+ action="store_true",
+ default=False,
+ help="Enable verbose tracing from the plugin itself."
+ "This can be useful to debug finding dead variable.")
+ group.add_argument(
+ "--no-allowlist",
+ action="store_true",
+ default=True,
+ dest="allowlist",
+ help="When building gcsuspects allowlist certain functions as if they can be "
+ "causing GC. Currently used to reduce number of false positives in dead "
+ "variables analysis. See TODO for ALLOWLIST in gcmole.py")
+ group.add_argument(
+ "--test-run",
+ action="store_true",
+ default=False,
+ help="Test gcmole on tools/gcmole/gcmole-test.cc")
+
+ parser = argparse.ArgumentParser()
+ subps = parser.add_subparsers()
+
+ subp = subps.add_parser(
+ "full", description="Run both gcmole analysis passes.")
+ add_common_args(subp)
+ subp.set_defaults(func=full_run)
+
+ subp = subps.add_parser(
+ "collect",
+ description="Construct call graph from source files. "
+ "The action can be distributed using --shard-count and "
+ "--shard-index.")
+ add_common_args(subp)
+ subp.set_defaults(func=collect_run)
+ subp.add_argument(
+ "--output",
+ required=True,
+ help="Path to a file where to store the constructed call graph")
+
+ subp = subps.add_parser(
+ "merge",
+ description="Merge partial call graphs and propagate gc suspects.")
+ add_common_args(subp)
+ subp.set_defaults(func=merge_run)
+ subp.add_argument(
+ "--input",
+ action='append',
+ required=True,
+ help="Path to a file containing a partial call graph stored by "
+ "'collect'. Repeat for multiple files.")
+
+ subp = subps.add_parser(
+ "check",
+ description="Check for problems using previously collected gc-suspect "
+ "information. The action can be distributed using "
+ "--shard-count and --shard-index.")
+ add_common_args(subp)
+ subp.set_defaults(func=check_run)
+
+ options = parser.parse_args(argv[1:])
verify_and_convert_dirs(parser, options, default_gcmole_dir,
default_clang_bin_dir)
verify_clang_plugin(parser, options)
prepare_gcmole_files(options)
verify_build_config(parser, options)
+ override_env_options(options)
- any_errors_found = False
- if not test_run(options):
- any_errors_found = True
- else:
- errors_found, output = check_correctness_for_arch(options, False)
- any_errors_found = any_errors_found or errors_found
+ options.func(options)
+
+
+@contextmanager
+def maybe_redirect_stderr(options):
+ file_io = io.StringIO() if options.test_run else sys.stderr
+ with redirect_stderr(file_io) as f:
+ yield f
+
+
+def check_files(options, files):
+ with maybe_redirect_stderr(options) as file_io:
+ errors_found = check_correctness_for_arch(files, options)
+ sys.exit(has_unexpected_errors(options, errors_found, file_io))
+
+
+def full_run(options):
+ check_files(options, generate_gc_suspects_from_files(options))
- sys.exit(1 if any_errors_found else 0)
+
+def collect_run(options):
+ files = build_file_list(options)
+ callgraph = generate_callgraph(files, options)
+ callgraph.to_file(options.output)
+
+
+def merge_run(options):
+ generate_gc_suspects_from_callgraph(
+ CallGraph.from_files(*options.input), options)
+
+
+def check_run(options):
+ check_files(options, build_file_list(options))
def verify_and_convert_dirs(parser, options, default_tools_gcmole_dir,
@@ -700,13 +786,15 @@ def verify_and_convert_dirs(parser, options, default_tools_gcmole_dir,
else:
options.out_dir = Path(options.out_dir)
- for flag in [
- "--v8-root-dir", "--v8-build-dir", "--clang-bin-dir",
- "--clang-plugins-dir", "--out-dir"
+ for flag, path in [
+ ("--v8-root-dir", options.v8_root_dir),
+ ("--v8-build-dir", options.v8_build_dir),
+ ("--clang-bin-dir", options.clang_bin_dir),
+ ("--clang-plugins-dir", options.clang_plugins_dir),
+ ("--out-dir", options.out_dir),
]:
- dir = getattr(options, parser.get_option(flag).dest)
- if not dir.is_dir():
- parser.error("{}='{}' does not exist!".format(flag, dir))
+ if not path.is_dir():
+ parser.error(f"{flag}='{path}' does not exist!")
def verify_clang_plugin(parser, options):
@@ -753,5 +841,13 @@ def verify_build_config(parser, options):
options.v8_build_dir, options.v8_target_cpu, found_cpu))
+def override_env_options(options):
+ """Set shard options if passed as gtest environment vars on bots."""
+ options.shard_count = int(
+ os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ options.shard_index = int(
+ os.environ.get('GTEST_SHARD_INDEX', options.shard_index))
+
+
if __name__ == "__main__":
main(sys.argv)
diff --git a/deps/v8/tools/gcmole/gcmole_args.py b/deps/v8/tools/gcmole/gcmole_args.py
new file mode 100644
index 0000000000..9f266f5228
--- /dev/null
+++ b/deps/v8/tools/gcmole/gcmole_args.py
@@ -0,0 +1,64 @@
+# Copyright 2023 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Calculate arguments for the gcmole plugin based on flags passed to the
+compiler for a typical target in V8.
+"""
+
+from pathlib import Path
+
+import os
+import re
+import sys
+
+DEFINES_RE = re.compile(r'^defines = (.*)$', re.M)
+INCLUDES_RE = re.compile(r'^include_dirs = (.*)$', re.M)
+
+BASE_DIR = Path(__file__).resolve().parents[2].absolute()
+
+# This script is always called relative to the build directory root
+# by ninja.
+BUILD_DIR_ABS = Path.cwd()
+BUILD_DIR_REL = BUILD_DIR_ABS.relative_to(BASE_DIR)
+
+
+def search_flags(regexp, ninja_config):
+ match = regexp.search(ninja_config)
+ assert match
+ result = match.group(1)
+ assert result
+ return result
+
+
+def main():
+ assert len(sys.argv) == 2, 'Expecting sysroot arg'
+ gn_sysroot_var = sys.argv[1]
+ assert gn_sysroot_var.startswith('//'), 'Expecting root-dir gn path'
+ rel_sysroot = gn_sysroot_var[len('//'):]
+
+ assert BUILD_DIR_ABS.exists()
+
+ ninja_file = BUILD_DIR_ABS / 'obj' / 'v8_base_without_compiler.ninja'
+ assert ninja_file.exists()
+
+ with ninja_file.open() as f:
+ ninja_config = f.read()
+
+ defines = search_flags(DEFINES_RE, ninja_config)
+ includes = search_flags(INCLUDES_RE, ninja_config)
+
+ # Include flags are relative to the build root. Make them relative to the
+ # base directory for gcmole.
+ # E.g. BUILD_DIR_REL = out/build and -I../../include gives -Iinclude.
+ include_flags = []
+ for flag in includes.strip().split():
+ prefix, suffix = flag[:2], flag[2:]
+ assert prefix == '-I'
+ include_flags.append(prefix + os.path.normpath(BUILD_DIR_REL / suffix))
+
+ with open('v8_gcmole.args', 'w') as f:
+ f.write(' '.join([defines] + include_flags + [f'--sysroot={rel_sysroot}']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/tools/gcmole/gcmole_test.py b/deps/v8/tools/gcmole/gcmole_test.py
new file mode 100644
index 0000000000..4197165535
--- /dev/null
+++ b/deps/v8/tools/gcmole/gcmole_test.py
@@ -0,0 +1,436 @@
+#!/usr/bin/env python3
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pathlib import Path
+
+import collections
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import textwrap
+import unittest
+
+import gcmole
+
+GCMOLE_PATH = Path(__file__).parent.absolute()
+TESTDATA_PATH = GCMOLE_PATH / 'testdata' / 'v8'
+
+Options = collections.namedtuple(
+ 'Options', ['v8_root_dir', 'v8_target_cpu', 'shard_count', 'shard_index',
+ 'test_run'])
+
+
+def abs_test_file(f):
+ return TESTDATA_PATH / f
+
+
+class FilesTest(unittest.TestCase):
+
+ def testFileList_for_testing(self):
+ options = Options(TESTDATA_PATH, 'x64', 1, 0, True)
+ self.assertEqual(
+ gcmole.build_file_list(options),
+ list(map(abs_test_file, ['tools/gcmole/gcmole-test.cc'])))
+
+ def testFileList_x64(self):
+ options = Options(TESTDATA_PATH, 'x64', 1, 0, False)
+ expected = [
+ 'file1.cc',
+ 'file2.cc',
+ 'x64/file1.cc',
+ 'x64/file2.cc',
+ 'file3.cc',
+ 'file4.cc',
+ 'test/cctest/test-x64-file1.cc',
+ 'test/cctest/test-x64-file2.cc',
+ ]
+ self.assertEqual(
+ gcmole.build_file_list(options),
+ list(map(abs_test_file, expected)))
+
+ def testFileList_x64_shard0(self):
+ options = Options(TESTDATA_PATH, 'x64', 2, 0, False)
+ expected = [
+ 'file1.cc',
+ 'x64/file1.cc',
+ 'file3.cc',
+ 'test/cctest/test-x64-file1.cc',
+ ]
+ self.assertEqual(
+ gcmole.build_file_list(options),
+ list(map(abs_test_file, expected)))
+
+ def testFileList_x64_shard1(self):
+ options = Options(TESTDATA_PATH, 'x64', 2, 1, False)
+ expected = [
+ 'file2.cc',
+ 'x64/file2.cc',
+ 'file4.cc',
+ 'test/cctest/test-x64-file2.cc',
+ ]
+ self.assertEqual(
+ gcmole.build_file_list(options),
+ list(map(abs_test_file, expected)))
+
+ def testFileList_arm(self):
+ options = Options(TESTDATA_PATH, 'arm', 1, 0, False)
+ expected = [
+ 'file1.cc',
+ 'file2.cc',
+ 'file3.cc',
+ 'file4.cc',
+ 'arm/file1.cc',
+ 'arm/file2.cc',
+ ]
+ self.assertEqual(
+ gcmole.build_file_list(options),
+ list(map(abs_test_file, expected)))
+
+
+GC = 'Foo,NowCollectAllTheGarbage'
+SP = 'Bar,SafepointSlowPath'
+WF = 'Baz,WriteField'
+
+
+class OutputLines:
+ CALLERS_RE = re.compile(r'([\w,]+)\s*→\s*(.*)')
+
+ def __init__(self, *callee_list):
+ """Construct a test data placeholder for output lines of one invocation of
+ the GCMole plugin.
+
+ Args:
+ callee_list: Strings, each containing a caller/calle relationship
+ formatted as "A → B C", meaning A calls B and C. For GC,
+ Safepoint and a allow-listed function use GC, SP and WF
+ constants above respectivly.
+ Methods not calling anything are formatted as "A →".
+ """
+ self.callee_list = callee_list
+
+ def lines(self):
+ result = []
+ for str_rep in self.callee_list:
+ match = self.CALLERS_RE.match(str_rep)
+ assert match
+ result.append(match.group(1))
+ for callee in (match.group(2) or '').split():
+ result.append('\t' + callee)
+ return result
+
+
+class SuspectCollectorTest(unittest.TestCase):
+
+ def create_callgraph(self, *outputs):
+ call_graph = gcmole.CallGraph()
+ for output in outputs:
+ call_graph.parse(output.lines())
+ return call_graph
+
+ def testCallGraph(self):
+ call_graph = self.create_callgraph(OutputLines())
+ self.assertDictEqual(call_graph.funcs, {})
+
+ call_graph = self.create_callgraph(OutputLines('A →'))
+ self.assertDictEqual(call_graph.funcs, {'A': set()})
+
+ call_graph = self.create_callgraph(OutputLines('A → B'))
+ self.assertDictEqual(call_graph.funcs, {'A': set(), 'B': set('A')})
+
+ call_graph = self.create_callgraph(
+ OutputLines('A → B C', 'B → C D', 'D →'))
+ self.assertDictEqual(
+ call_graph.funcs,
+ {'A': set(), 'B': set('A'), 'C': set(['A', 'B']), 'D': set('B')})
+
+ call_graph = self.create_callgraph(
+ OutputLines('B → C D', 'D →'), OutputLines('A → B C'))
+ self.assertDictEqual(
+ call_graph.funcs,
+ {'A': set(), 'B': set('A'), 'C': set(['A', 'B']), 'D': set('B')})
+
+ def testCallGraphMerge(self):
+ """Test serializing, deserializing and merging call graphs."""
+ temp_dir = Path(tempfile.mkdtemp('gcmole_test'))
+
+ call_graph1 = self.create_callgraph(
+ OutputLines('B → C D E', 'D →'), OutputLines('A → B C'))
+ self.assertDictEqual(
+ call_graph1.funcs,
+ {'A': set(), 'B': set('A'), 'C': set(['A', 'B']), 'D': set('B'),
+ 'E': set('B')})
+
+ call_graph2 = self.create_callgraph(
+ OutputLines('E → A'), OutputLines('C → D F'))
+ self.assertDictEqual(
+ call_graph2.funcs,
+ {'A': set('E'), 'C': set(), 'D': set('C'), 'E': set(), 'F': set('C')})
+
+ file1 = temp_dir / 'file1.bin'
+ file2 = temp_dir / 'file2.bin'
+ call_graph1.to_file(file1)
+ call_graph2.to_file(file2)
+
+ expected = {'A': set(['E']), 'B': set('A'), 'C': set(['A', 'B']),
+ 'D': set(['B', 'C']), 'E': set(['B']), 'F': set(['C'])}
+
+ call_graph = gcmole.CallGraph.from_files(file1, file2)
+ self.assertDictEqual(call_graph.funcs, expected)
+
+ call_graph = gcmole.CallGraph.from_files(file2, file1)
+ self.assertDictEqual(call_graph.funcs, expected)
+
+ call_graph3 = self.create_callgraph(
+ OutputLines('F → G'), OutputLines('G →'))
+ self.assertDictEqual(
+ call_graph3.funcs,
+ {'G': set('F'), 'F': set()})
+
+ file3 = temp_dir / 'file3.bin'
+ call_graph3.to_file(file3)
+
+ call_graph = gcmole.CallGraph.from_files(file1, file2, file3)
+ self.assertDictEqual(call_graph.funcs, dict(G=set('F'), **expected))
+
+ def create_collector(self, outputs):
+ Options = collections.namedtuple('OptionsForCollector', ['allowlist'])
+ options = Options(True)
+ call_graph = self.create_callgraph(*outputs)
+ collector = gcmole.GCSuspectsCollector(options, call_graph.funcs)
+ collector.propagate()
+ return collector
+
+ def check(self, outputs, expected_gc, expected_gc_caused):
+ """Verify the GCSuspectsCollector propagation and outputs against test
+ data.
+
+ Args:
+ outputs: List of OutputLines object simulating the lines returned by
+ the GCMole plugin in drop-callees mode. Each output lines object
+ represents one plugin invocation.
+ expected_gc: Mapping as expected by GCSuspectsCollector.gc.
+ expected_gc_caused: Mapping as expected by GCSuspectsCollector.gc_caused.
+ """
+ collector = self.create_collector(outputs)
+ self.assertDictEqual(collector.gc, expected_gc)
+ self.assertDictEqual(collector.gc_caused, expected_gc_caused)
+
+ def testNoGC(self):
+ self.check(
+ outputs=[OutputLines()],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A →')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A →', 'B →')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A → B C')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A → B', 'B → C')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A → B C', 'B → D', 'D → A', 'C →')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A →'), OutputLines('B →')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A → B'), OutputLines('B → C')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines('A → B C'),
+ OutputLines('B → D', 'D → A'),
+ OutputLines('C →')],
+ expected_gc={},
+ expected_gc_caused={},
+ )
+
+ def testGCOneFile(self):
+ self.check(
+ outputs=[OutputLines(f'{GC} →')],
+ expected_gc={GC: True},
+ expected_gc_caused={GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}')],
+ expected_gc={GC: True, 'A': True},
+ expected_gc_caused={'A': {GC}, GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}', 'B → A')],
+ expected_gc={GC: True, 'A': True, 'B': True},
+ expected_gc_caused={'B': {'A'}, 'A': {GC}, GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines('B → A', f'A → {GC}')],
+ expected_gc={GC: True, 'A': True, 'B': True},
+ expected_gc_caused={'B': {'A'}, 'A': {GC}, GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → B {GC}', 'B →', 'C → B A')],
+ expected_gc={GC: True, 'A': True, 'C': True},
+ expected_gc_caused={'C': {'A'}, 'A': {GC}, GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}', 'B → A', 'C → A', 'D → B C')],
+ expected_gc={GC: True, 'A': True, 'B': True, 'C': True, 'D': True},
+ expected_gc_caused={'C': {'A'}, 'A': {GC}, 'B': {'A'}, 'D': {'B', 'C'},
+ GC: {'<GC>'}},
+ )
+
+ def testAllowListOneFile(self):
+ self.check(
+ outputs=[OutputLines(f'{WF} →')],
+ expected_gc={WF: False},
+ expected_gc_caused={},
+ )
+ self.check(
+ outputs=[OutputLines(f'{WF} → {GC}')],
+ expected_gc={GC: True, WF: False},
+ expected_gc_caused={WF: {GC}, GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}', f'{WF} → A B', 'D → A B',
+ f'E → {WF}')],
+ expected_gc={GC: True, WF: False, 'A': True, 'D': True},
+ expected_gc_caused={'A': {GC}, WF: {'A'}, 'D': {'A'}, GC: {'<GC>'}},
+ )
+
+ def testSafepointOneFile(self):
+ self.check(
+ outputs=[OutputLines(f'{SP} →')],
+ expected_gc={SP: True},
+ expected_gc_caused={SP: {'<Safepoint>'}},
+ )
+ self.check(
+ outputs=[OutputLines('B → A', f'A → {SP}')],
+ expected_gc={SP: True, 'A': True, 'B': True},
+ expected_gc_caused={'B': {'A'}, 'A': {SP}, SP: {'<Safepoint>'}},
+ )
+
+ def testCombinedOneFile(self):
+ self.check(
+ outputs=[OutputLines(f'{GC} →', f'{SP} →')],
+ expected_gc={SP: True, GC: True},
+ expected_gc_caused={SP: {'<Safepoint>'}, GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}', f'B → {SP}')],
+ expected_gc={GC: True, SP: True, 'A': True, 'B': True},
+ expected_gc_caused={'B': {SP}, 'A': {GC}, SP: {'<Safepoint>'},
+ GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}', f'B → {SP}', 'C → D A B')],
+ expected_gc={GC: True, SP: True, 'A': True, 'B': True, 'C': True},
+ expected_gc_caused={'B': {SP}, 'A': {GC}, 'C': {'A', 'B'},
+ SP: {'<Safepoint>'}, GC: {'<GC>'}},
+ )
+
+ def testCombinedMoreFiles(self):
+ self.check(
+ outputs=[OutputLines(f'A → {GC}'), OutputLines(f'B → {SP}')],
+ expected_gc={GC: True, SP: True, 'A': True, 'B': True},
+ expected_gc_caused={'B': {SP}, 'A': {GC}, SP: {'<Safepoint>'},
+ GC: {'<GC>'}},
+ )
+ self.check(
+ outputs=[OutputLines(f'A → {GC}'), OutputLines(f'B → {SP}'),
+ OutputLines('C → D A B')],
+ expected_gc={GC: True, SP: True, 'A': True, 'B': True, 'C': True},
+ expected_gc_caused={'B': {SP}, 'A': {GC}, 'C': {'A', 'B'},
+ SP: {'<Safepoint>'}, GC: {'<GC>'}},
+ )
+
+ def testWriteGCMoleResults(self):
+ temp_dir = Path(tempfile.mkdtemp('gcmole_test'))
+ Options = collections.namedtuple('OptionsForWriting', ['v8_target_cpu'])
+ collector = self.create_collector(
+ [OutputLines(f'A → {GC}'), OutputLines(f'B → {SP}')])
+ gcmole.write_gcmole_results(collector, Options('x64'), temp_dir)
+
+ gcsuspects_expected = textwrap.dedent(f"""\
+ {GC}
+ {SP}
+ A
+ B
+ """)
+
+ with open(temp_dir / 'gcsuspects') as f:
+ self.assertEqual(f.read(), gcsuspects_expected)
+
+ gccauses_expected = textwrap.dedent(f"""
+ {GC}
+ start,nested
+ <GC>
+ end,nested
+ {SP}
+ start,nested
+ <Safepoint>
+ end,nested
+ A
+ start,nested
+ {GC}
+ end,nested
+ B
+ start,nested
+ {SP}
+ end,nested
+ """).strip()
+
+ with open(temp_dir / 'gccauses') as f:
+ self.assertEqual(f.read().strip(), gccauses_expected)
+
+
+class ArgsTest(unittest.TestCase):
+
+ def testArgs(self):
+ """Test argument retrieval using a fake v8 file system and build dir."""
+ with tempfile.TemporaryDirectory('gcmole_args_test') as temp_dir:
+ temp_dir = Path(temp_dir)
+ temp_out = temp_dir / 'out'
+ temp_gcmole = temp_dir / 'tools' / 'gcmole' / 'gcmole_args.py'
+
+ shutil.copytree(abs_test_file('out'), temp_out)
+ os.makedirs(temp_gcmole.parent)
+ shutil.copy(GCMOLE_PATH / 'gcmole_args.py', temp_gcmole)
+
+ # Simulate a ninja call relative to the build dir.
+ gn_sysroot = '//build/linux/debian_bullseye_amd64-sysroot'
+ subprocess.check_call(
+ [sys.executable, temp_gcmole, gn_sysroot], cwd=temp_out)
+
+ with open(temp_dir / 'out' / 'v8_gcmole.args') as f:
+ self.assertEqual(f.read().split(), [
+ '-DUSE_GLIB=1', '-DV8_TARGET_ARCH_X64', '-I.', '-Iout/gen',
+ '-Iinclude', '-Iout/gen/include',
+ '--sysroot=build/linux/debian_bullseye_amd64-sysroot',
+ ])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py
index 145b21ecc4..af5807a218 100755
--- a/deps/v8/tools/gcmole/run-gcmole.py
+++ b/deps/v8/tools/gcmole/run-gcmole.py
@@ -18,33 +18,42 @@ V8_ROOT_DIR = os.path.dirname(os.path.dirname(GCMOLE_PATH))
def print_help():
print(
- """Usage: ./run-gcmole.py TOOLS_GCMOLE_DIR V8_TARGET_CPU [gcmole.py OPTION]...
+ """Usage: ./run-gcmole.py [MODE] V8_TARGET_CPU [gcmole.py OPTION]...
Helper script to run gcmole.py on the bots.""")
+args = sys.argv[1:]
+if "--help" in args:
+ print_help()
+ exit(0)
+
-for arg in sys.argv:
- if arg == "--help":
- print_help()
- exit(0)
+# Different modes of running gcmole. Optional to stay backwards-compatible.
+mode = 'full'
+if args and args[0] in ['check', 'collect', 'full', 'merge']:
+ mode = args[0]
+ args = args[1:]
-if len(sys.argv) < 2:
+
+if not args:
print("Missing arguments!")
print_help()
exit(1)
+
if not os.path.isfile("out/build/gen/torque-generated/builtin-definitions.h"):
print("Expected generated headers in out/build/gen.")
print("Either build v8 in out/build or change the 'out/build/gen' location in gcmole.py")
sys.exit(-1)
-gcmole_py_options = sys.argv[2:]
+gcmole_py_options = args[1:]
proc = subprocess.Popen(
[
sys.executable,
GCMOLE_PY,
+ mode,
"--v8-build-dir=%s" % os.path.join(V8_ROOT_DIR, 'out', 'build'),
- "--v8-target-cpu=%s" % sys.argv[1],
+ "--v8-target-cpu=%s" % args[0],
"--clang-plugins-dir=%s" % CLANG_PLUGINS,
"--clang-bin-dir=%s" % CLANG_BIN,
"--is-bot",
diff --git a/deps/v8/tools/gcmole/test-expectations.txt b/deps/v8/tools/gcmole/test-expectations.txt
index 92256b3e18..549fb949f0 100644
--- a/deps/v8/tools/gcmole/test-expectations.txt
+++ b/deps/v8/tools/gcmole/test-expectations.txt
@@ -1,73 +1,226 @@
-tools/gcmole/gcmole-test.cc:30:10: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:30:10: warning: Possibly stale variable due to GCs.
return obj;
^
-tools/gcmole/gcmole-test.cc:48:3: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:28:20: note: Call might cause unexpected GC.
+ isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+ ^
+./src/heap/heap.h:<number>:<number>: note: GC call here.
+ V8_EXPORT_PRIVATE void CollectGarbage(
+ ^
+tools/gcmole/gcmole-test.cc:48:3: warning: Possible problem with evaluation order with interleaved GCs.
TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
^
-tools/gcmole/gcmole-test.cc:60:3: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:48:25: note: Call might cause unexpected GC.
+ TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
+ ^
+tools/gcmole/gcmole-test.cc:21:1: note: GC call here.
+Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:60:3: warning: Possible problem with evaluation order with interleaved GCs.
TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
^
-tools/gcmole/gcmole-test.cc:85:7: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:60:37: note: Call might cause unexpected GC.
+ TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
+ ^
+tools/gcmole/gcmole-test.cc:21:1: note: GC call here.
+Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:85:7: warning: Possible problem with evaluation order with interleaved GCs.
so->Method(*CauseGC(obj1, isolate));
^
-tools/gcmole/gcmole-test.cc:87:7: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:85:15: note: Call might cause unexpected GC.
+ so->Method(*CauseGC(obj1, isolate));
+ ^
+tools/gcmole/gcmole-test.cc:21:1: note: GC call here.
+Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:87:7: warning: Possible problem with evaluation order with interleaved GCs.
so->Method(CauseGCRaw(*obj1, isolate));
^
-tools/gcmole/gcmole-test.cc:131:14: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:85:15: note: Call might cause unexpected GC.
+ so->Method(*CauseGC(obj1, isolate));
+ ^
+tools/gcmole/gcmole-test.cc:21:1: note: GC call here.
+Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:131:14: warning: Possible problem with evaluation order with interleaved GCs.
so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
^
-tools/gcmole/gcmole-test.cc:133:14: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:131:30: note: Call might cause unexpected GC.
+ so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
+ ^
+tools/gcmole/gcmole-test.cc:115:3: note: GC call here.
+ Handle<Object> VirtualCauseGC(Handle<Object> obj, Isolate* isolate) override {
+ ^
+tools/gcmole/gcmole-test.cc:133:14: warning: Possible problem with evaluation order with interleaved GCs.
so_handle->Method(*base->VirtualCauseGC(obj1, isolate));
^
-tools/gcmole/gcmole-test.cc:154:14: warning: Possible problem with evaluation order.
+tools/gcmole/gcmole-test.cc:131:30: note: Call might cause unexpected GC.
+ so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
+ ^
+tools/gcmole/gcmole-test.cc:115:3: note: GC call here.
+ Handle<Object> VirtualCauseGC(Handle<Object> obj, Isolate* isolate) override {
+ ^
+tools/gcmole/gcmole-test.cc:154:14: warning: Possible problem with evaluation order with interleaved GCs.
so_handle->Method(*SomeClass::StaticCauseGC(obj1, isolate));
^
-tools/gcmole/gcmole-test.cc:164:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:154:22: note: Call might cause unexpected GC.
+ so_handle->Method(*SomeClass::StaticCauseGC(obj1, isolate));
+ ^
+tools/gcmole/gcmole-test.cc:140:3: note: GC call here.
+ static Handle<Object> StaticCauseGC(Handle<Object> obj, Isolate* isolate) {
+ ^
+tools/gcmole/gcmole-test.cc:164:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:172:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:161:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:172:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:198:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:169:3: note: Call might cause unexpected GC.
+ Safepoint();
+ ^
+tools/gcmole/gcmole-test.cc:19:1: note: GC call here.
+void Safepoint() { LocalHeap::Current()->Safepoint(); }
+^
+tools/gcmole/gcmole-test.cc:198:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:224:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:195:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:224:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:235:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:221:3: note: Call might cause unexpected GC.
+ Safepoint();
+ ^
+tools/gcmole/gcmole-test.cc:19:1: note: GC call here.
+void Safepoint() { LocalHeap::Current()->Safepoint(); }
+^
+tools/gcmole/gcmole-test.cc:235:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:242:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:233:3: note: Call might cause unexpected GC.
+ Safepoint();
+ ^
+tools/gcmole/gcmole-test.cc:19:1: note: GC call here.
+void Safepoint() { LocalHeap::Current()->Safepoint(); }
+^
+tools/gcmole/gcmole-test.cc:242:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:252:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:233:3: note: Call might cause unexpected GC.
+ Safepoint();
+ ^
+tools/gcmole/gcmole-test.cc:19:1: note: GC call here.
+void Safepoint() { LocalHeap::Current()->Safepoint(); }
+^
+tools/gcmole/gcmole-test.cc:252:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:262:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:250:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:262:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:265:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:260:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:265:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:271:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:260:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:271:3: warning: Possibly stale variable due to GCs.
+ raw_obj.Print();
+ ^
+tools/gcmole/gcmole-test.cc:269:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:287:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:287:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:285:3: note: Call might cause unexpected GC.
+ TestGuardedDeadVarAnalysisNested(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:268:1: note: GC call here.
+void TestGuardedDeadVarAnalysisNested(JSObject raw_obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:295:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:295:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:293:3: note: Call might cause unexpected GC.
+ TestGuardedDeadVarAnalysisNested(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:268:1: note: GC call here.
+void TestGuardedDeadVarAnalysisNested(JSObject raw_obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:302:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:302:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:300:3: note: Call might cause unexpected GC.
+ TestGuardedDeadVarAnalysisNested(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:268:1: note: GC call here.
+void TestGuardedDeadVarAnalysisNested(JSObject raw_obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:319:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:319:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:317:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:338:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:338:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:334:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:349:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-tools/gcmole/gcmole-test.cc:349:3: warning: Possibly dead variable.
+tools/gcmole/gcmole-test.cc:345:3: note: Call might cause unexpected GC.
+ CauseGCRaw(raw_obj, isolate);
+ ^
+tools/gcmole/gcmole-test.cc:27:1: note: GC call here.
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+^
+tools/gcmole/gcmole-test.cc:359:3: warning: Possibly stale variable due to GCs.
raw_obj.Print();
^
-24 warnings generated.
+tools/gcmole/gcmole-test.cc:355:3: note: Call might cause unexpected GC.
+ Safepoint();
+ ^
+tools/gcmole/gcmole-test.cc:19:1: note: GC call here.
+void Safepoint() { LocalHeap::Current()->Safepoint(); }
+^
+25 warnings generated.
diff --git a/deps/v8/tools/gcmole/testdata/v8/BUILD.gn b/deps/v8/tools/gcmole/testdata/v8/BUILD.gn
new file mode 100644
index 0000000000..d994175d40
--- /dev/null
+++ b/deps/v8/tools/gcmole/testdata/v8/BUILD.gn
@@ -0,0 +1,35 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test data for gcmole.
+
+some_sources = [
+ ### gcmole(all) ###
+ "file1.cc",
+ "file1.h",
+ "file2.cc",
+]
+
+other_sources = [
+ ### gcmole(x64) ###
+ "x64/file1.cc",
+ "x64/file1.h",
+ "x64/file2.cc",
+]
+
+yet_more_sources = [
+ ### gcmole(all) ###
+ "file3.cc",
+
+ # Some other comment.
+ "file4.cc",
+ "file4.h",
+]
+
+the_last_sources = [
+ ### gcmole(arm) ###
+ "arm/file1.cc",
+ "arm/file1.h",
+ "arm/file2.cc",
+]
diff --git a/deps/v8/tools/gcmole/testdata/v8/out/obj/v8_base_without_compiler.ninja b/deps/v8/tools/gcmole/testdata/v8/out/obj/v8_base_without_compiler.ninja
new file mode 100644
index 0000000000..6c176fd520
--- /dev/null
+++ b/deps/v8/tools/gcmole/testdata/v8/out/obj/v8_base_without_compiler.ninja
@@ -0,0 +1,7 @@
+defines = -DUSE_GLIB=1 -DV8_TARGET_ARCH_X64
+include_dirs = -I.. -Igen -I../include -Igen/include
+cflags = -Wall -Werror
+cflags_cc = -isystem../../buildtools/third_party/libc++/trunk/include
+label_name = v8_base_without_compiler
+target_out_dir = obj
+target_output_name = v8_base_without_compiler \ No newline at end of file
diff --git a/deps/v8/tools/gcmole/testdata/v8/test/cctest/BUILD.gn b/deps/v8/tools/gcmole/testdata/v8/test/cctest/BUILD.gn
new file mode 100644
index 0000000000..d0a1b4a1a4
--- /dev/null
+++ b/deps/v8/tools/gcmole/testdata/v8/test/cctest/BUILD.gn
@@ -0,0 +1,12 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test data for gcmole.
+
+some_sources = [
+ ### gcmole(x64) ###
+ "foo-x64-file1.cc",
+ "test-x64-file1.cc",
+ "test-x64-file2.cc",
+]
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index fee237b537..745e24dcbd 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -92,7 +92,8 @@ class SimCommand(gdb.Command):
super (SimCommand, self).__init__ ("sim", gdb.COMMAND_SUPPORT)
def invoke (self, arg, from_tty):
- arg_c_string = gdb.Value(arg)
+ arg_bytes = arg.encode("utf-8") + b'\0'
+ arg_c_string = gdb.Value(arg_bytes, gdb.lookup_type('char').array(len(arg_bytes) - 1))
cmd_func = gdb.selected_frame().read_var("_v8_internal_Simulator_ExecDebugCommand")
cmd_func(arg_c_string)
@@ -180,7 +181,7 @@ def v8_stop_handler(event):
count = 0
# Limit stack scanning since the frames we look for are near the top anyway,
# and otherwise stack overflows can be very slow.
- while frame is not None and count < 7:
+ while frame is not None and count < 10:
count += 1
# If we are in a frame created by gdb (e.g. for `(gdb) call foo()`), gdb
# emits a dummy frame between its stack and the program's stack. Abort the
@@ -296,7 +297,7 @@ class CppGCMemberPrinter(object):
def to_string(self):
pointer = gdb.parse_and_eval(
- "_cppgc_internal_Print_Member((cppgc::internal::MemberBase*){})".format(
+ "_cppgc_internal_Uncompress_Member((void*){})".format(
self.val.address))
return "{}Member<{}> pointing to {}".format(
'' if self.category is None else self.category, self.pointee_type,
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 7f5f7864ae..4d171267a9 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -58,207 +58,449 @@ import sys
# enumeration values used as indexes in internal tables, etc..
#
consts_misc = [
- { 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' },
- { 'name': 'APIObjectType', 'value': 'JS_API_OBJECT_TYPE' },
- { 'name': 'SpecialAPIObjectType', 'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
-
- { 'name': 'FirstContextType', 'value': 'FIRST_CONTEXT_TYPE' },
- { 'name': 'LastContextType', 'value': 'LAST_CONTEXT_TYPE' },
-
- { 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
- { 'name': 'StringTag', 'value': 'kStringTag' },
-
- { 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
- { 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
- { 'name': 'OneByteStringTag', 'value': 'kOneByteStringTag' },
-
- { 'name': 'StringRepresentationMask',
- 'value': 'kStringRepresentationMask' },
- { 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
- { 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
- { 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
- { 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
- { 'name': 'ThinStringTag', 'value': 'kThinStringTag' },
-
- { 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
- { 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
- { 'name': 'SmiTag', 'value': 'kSmiTag' },
- { 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
- { 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
- { 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
- { 'name': 'SystemPointerSize', 'value': 'kSystemPointerSize' },
- { 'name': 'SystemPointerSizeLog2', 'value': 'kSystemPointerSizeLog2' },
- { 'name': 'TaggedSize', 'value': 'kTaggedSize' },
- { 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
-
- { 'name': 'CodeKindFieldMask', 'value': 'Code::KindField::kMask' },
- { 'name': 'CodeKindFieldShift', 'value': 'Code::KindField::kShift' },
-
- { 'name': 'DeoptimizationDataInlinedFunctionCountIndex',
- 'value': 'DeoptimizationData::kInlinedFunctionCountIndex' },
- { 'name': 'DeoptimizationDataLiteralArrayIndex',
- 'value': 'DeoptimizationData::kLiteralArrayIndex' },
- { 'name': 'DeoptimizationDataOptimizationIdIndex',
- 'value': 'DeoptimizationData::kOptimizationIdIndex' },
- { 'name': 'DeoptimizationDataSharedFunctionInfoIndex',
- 'value': 'DeoptimizationData::kSharedFunctionInfoIndex' },
- { 'name': 'DeoptimizationDataInliningPositionsIndex',
- 'value': 'DeoptimizationData::kInliningPositionsIndex' },
-
- { 'name': 'CodeKindBytecodeHandler',
- 'value': 'static_cast<int>(CodeKind::BYTECODE_HANDLER)' },
- { 'name': 'CodeKindInterpretedFunction',
- 'value': 'static_cast<int>(CodeKind::INTERPRETED_FUNCTION)' },
- { 'name': 'CodeKindBaseline',
- 'value': 'static_cast<int>(CodeKind::BASELINE)' },
-
- { 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
- { 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
- { 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
- { 'name': 'OddballNull', 'value': 'Oddball::kNull' },
- { 'name': 'OddballArgumentsMarker', 'value': 'Oddball::kArgumentsMarker' },
- { 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
- { 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
- { 'name': 'OddballOther', 'value': 'Oddball::kOther' },
- { 'name': 'OddballException', 'value': 'Oddball::kException' },
-
- { 'name': 'ContextRegister', 'value': 'kContextRegister.code()' },
- { 'name': 'ReturnRegister0', 'value': 'kReturnRegister0.code()' },
- { 'name': 'JSFunctionRegister', 'value': 'kJSFunctionRegister.code()' },
- { 'name': 'InterpreterBytecodeOffsetRegister',
- 'value': 'kInterpreterBytecodeOffsetRegister.code()' },
- { 'name': 'InterpreterBytecodeArrayRegister',
- 'value': 'kInterpreterBytecodeArrayRegister.code()' },
- { 'name': 'RuntimeCallFunctionRegister',
- 'value': 'kRuntimeCallFunctionRegister.code()' },
-
- { 'name': 'prop_kind_Data',
- 'value': 'static_cast<int>(PropertyKind::kData)' },
- { 'name': 'prop_kind_Accessor',
- 'value': 'static_cast<int>(PropertyKind::kAccessor)' },
- { 'name': 'prop_kind_mask',
- 'value': 'PropertyDetails::KindField::kMask' },
- { 'name': 'prop_location_Descriptor',
- 'value': 'static_cast<int>(PropertyLocation::kDescriptor)' },
- { 'name': 'prop_location_Field',
- 'value': 'static_cast<int>(PropertyLocation::kField)' },
- { 'name': 'prop_location_mask',
- 'value': 'PropertyDetails::LocationField::kMask' },
- { 'name': 'prop_location_shift',
- 'value': 'PropertyDetails::LocationField::kShift' },
- { 'name': 'prop_attributes_NONE', 'value': 'NONE' },
- { 'name': 'prop_attributes_READ_ONLY', 'value': 'READ_ONLY' },
- { 'name': 'prop_attributes_DONT_ENUM', 'value': 'DONT_ENUM' },
- { 'name': 'prop_attributes_DONT_DELETE', 'value': 'DONT_DELETE' },
- { 'name': 'prop_attributes_mask',
- 'value': 'PropertyDetails::AttributesField::kMask' },
- { 'name': 'prop_attributes_shift',
- 'value': 'PropertyDetails::AttributesField::kShift' },
- { 'name': 'prop_index_mask',
- 'value': 'PropertyDetails::FieldIndexField::kMask' },
- { 'name': 'prop_index_shift',
- 'value': 'PropertyDetails::FieldIndexField::kShift' },
- { 'name': 'prop_representation_mask',
- 'value': 'PropertyDetails::RepresentationField::kMask' },
- { 'name': 'prop_representation_shift',
- 'value': 'PropertyDetails::RepresentationField::kShift' },
- { 'name': 'prop_representation_smi',
- 'value': 'Representation::Kind::kSmi' },
- { 'name': 'prop_representation_double',
- 'value': 'Representation::Kind::kDouble' },
- { 'name': 'prop_representation_heapobject',
- 'value': 'Representation::Kind::kHeapObject' },
- { 'name': 'prop_representation_tagged',
- 'value': 'Representation::Kind::kTagged' },
-
- { 'name': 'prop_desc_key',
- 'value': 'DescriptorArray::kEntryKeyIndex' },
- { 'name': 'prop_desc_details',
- 'value': 'DescriptorArray::kEntryDetailsIndex' },
- { 'name': 'prop_desc_value',
- 'value': 'DescriptorArray::kEntryValueIndex' },
- { 'name': 'prop_desc_size',
- 'value': 'DescriptorArray::kEntrySize' },
-
- { 'name': 'elements_fast_holey_elements',
- 'value': 'HOLEY_ELEMENTS' },
- { 'name': 'elements_fast_elements',
- 'value': 'PACKED_ELEMENTS' },
- { 'name': 'elements_dictionary_elements',
- 'value': 'DICTIONARY_ELEMENTS' },
-
- { 'name': 'bit_field2_elements_kind_mask',
- 'value': 'Map::Bits2::ElementsKindBits::kMask' },
- { 'name': 'bit_field2_elements_kind_shift',
- 'value': 'Map::Bits2::ElementsKindBits::kShift' },
- { 'name': 'bit_field3_is_dictionary_map_shift',
- 'value': 'Map::Bits3::IsDictionaryMapBit::kShift' },
- { 'name': 'bit_field3_number_of_own_descriptors_mask',
- 'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kMask' },
- { 'name': 'bit_field3_number_of_own_descriptors_shift',
- 'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kShift' },
- { 'name': 'class_Map__instance_descriptors_offset',
- 'value': 'Map::kInstanceDescriptorsOffset' },
-
- { 'name': 'off_fp_context_or_frame_type',
- 'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
- { 'name': 'off_fp_context',
- 'value': 'StandardFrameConstants::kContextOffset' },
- { 'name': 'off_fp_constant_pool',
- 'value': 'StandardFrameConstants::kConstantPoolOffset' },
- { 'name': 'off_fp_function',
- 'value': 'StandardFrameConstants::kFunctionOffset' },
- { 'name': 'off_fp_args',
- 'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp' },
- { 'name': 'off_fp_bytecode_array',
- 'value': 'UnoptimizedFrameConstants::kBytecodeArrayFromFp' },
- { 'name': 'off_fp_bytecode_offset',
- 'value': 'UnoptimizedFrameConstants::kBytecodeOffsetOrFeedbackVectorFromFp' },
-
- { 'name': 'scopeinfo_idx_nparams',
- 'value': 'ScopeInfo::kParameterCount' },
- { 'name': 'scopeinfo_idx_ncontextlocals',
- 'value': 'ScopeInfo::kContextLocalCount' },
- { 'name': 'scopeinfo_idx_first_vars',
- 'value': 'ScopeInfo::kVariablePartIndex' },
-
- { 'name': 'jsarray_buffer_was_detached_mask',
- 'value': 'JSArrayBuffer::WasDetachedBit::kMask' },
- { 'name': 'jsarray_buffer_was_detached_shift',
- 'value': 'JSArrayBuffer::WasDetachedBit::kShift' },
-
- { 'name': 'context_idx_scope_info',
- 'value': 'Context::SCOPE_INFO_INDEX' },
- { 'name': 'context_idx_prev',
- 'value': 'Context::PREVIOUS_INDEX' },
- { 'name': 'context_min_slots',
- 'value': 'Context::MIN_CONTEXT_SLOTS' },
- { 'name': 'native_context_embedder_data_offset',
- 'value': 'Internals::kNativeContextEmbedderDataOffset' },
-
-
- { 'name': 'namedictionaryshape_prefix_size',
- 'value': 'NameDictionaryShape::kPrefixSize' },
- { 'name': 'namedictionaryshape_entry_size',
- 'value': 'NameDictionaryShape::kEntrySize' },
- { 'name': 'globaldictionaryshape_entry_size',
- 'value': 'GlobalDictionaryShape::kEntrySize' },
-
- { 'name': 'namedictionary_prefix_start_index',
- 'value': 'NameDictionary::kPrefixStartIndex' },
-
- { 'name': 'numberdictionaryshape_prefix_size',
- 'value': 'NumberDictionaryShape::kPrefixSize' },
- { 'name': 'numberdictionaryshape_entry_size',
- 'value': 'NumberDictionaryShape::kEntrySize' },
-
- { 'name': 'simplenumberdictionaryshape_prefix_size',
- 'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
- { 'name': 'simplenumberdictionaryshape_entry_size',
- 'value': 'SimpleNumberDictionaryShape::kEntrySize' },
-
- { 'name': 'type_JSError__JS_ERROR_TYPE', 'value': 'JS_ERROR_TYPE' },
-];
+ {
+ 'name': 'FirstNonstringType',
+ 'value': 'FIRST_NONSTRING_TYPE'
+ },
+ {
+ 'name': 'APIObjectType',
+ 'value': 'JS_API_OBJECT_TYPE'
+ },
+ {
+ 'name': 'SpecialAPIObjectType',
+ 'value': 'JS_SPECIAL_API_OBJECT_TYPE'
+ },
+ {
+ 'name': 'FirstContextType',
+ 'value': 'FIRST_CONTEXT_TYPE'
+ },
+ {
+ 'name': 'LastContextType',
+ 'value': 'LAST_CONTEXT_TYPE'
+ },
+ {
+ 'name': 'IsNotStringMask',
+ 'value': 'kIsNotStringMask'
+ },
+ {
+ 'name': 'StringTag',
+ 'value': 'kStringTag'
+ },
+ {
+ 'name': 'StringEncodingMask',
+ 'value': 'kStringEncodingMask'
+ },
+ {
+ 'name': 'TwoByteStringTag',
+ 'value': 'kTwoByteStringTag'
+ },
+ {
+ 'name': 'OneByteStringTag',
+ 'value': 'kOneByteStringTag'
+ },
+ {
+ 'name': 'StringRepresentationMask',
+ 'value': 'kStringRepresentationMask'
+ },
+ {
+ 'name': 'SeqStringTag',
+ 'value': 'kSeqStringTag'
+ },
+ {
+ 'name': 'ConsStringTag',
+ 'value': 'kConsStringTag'
+ },
+ {
+ 'name': 'ExternalStringTag',
+ 'value': 'kExternalStringTag'
+ },
+ {
+ 'name': 'SlicedStringTag',
+ 'value': 'kSlicedStringTag'
+ },
+ {
+ 'name': 'ThinStringTag',
+ 'value': 'kThinStringTag'
+ },
+ {
+ 'name': 'HeapObjectTag',
+ 'value': 'kHeapObjectTag'
+ },
+ {
+ 'name': 'HeapObjectTagMask',
+ 'value': 'kHeapObjectTagMask'
+ },
+ {
+ 'name': 'SmiTag',
+ 'value': 'kSmiTag'
+ },
+ {
+ 'name': 'SmiTagMask',
+ 'value': 'kSmiTagMask'
+ },
+ {
+ 'name': 'SmiValueShift',
+ 'value': 'kSmiTagSize'
+ },
+ {
+ 'name': 'SmiShiftSize',
+ 'value': 'kSmiShiftSize'
+ },
+ {
+ 'name': 'SystemPointerSize',
+ 'value': 'kSystemPointerSize'
+ },
+ {
+ 'name': 'SystemPointerSizeLog2',
+ 'value': 'kSystemPointerSizeLog2'
+ },
+ {
+ 'name': 'TaggedSize',
+ 'value': 'kTaggedSize'
+ },
+ {
+ 'name': 'TaggedSizeLog2',
+ 'value': 'kTaggedSizeLog2'
+ },
+ {
+ 'name': 'CodeKindFieldMask',
+ 'value': 'Code::KindField::kMask'
+ },
+ {
+ 'name': 'CodeKindFieldShift',
+ 'value': 'Code::KindField::kShift'
+ },
+ {
+ 'name': 'DeoptimizationDataInlinedFunctionCountIndex',
+ 'value': 'DeoptimizationData::kInlinedFunctionCountIndex'
+ },
+ {
+ 'name': 'DeoptimizationDataLiteralArrayIndex',
+ 'value': 'DeoptimizationData::kLiteralArrayIndex'
+ },
+ {
+ 'name': 'DeoptimizationDataOptimizationIdIndex',
+ 'value': 'DeoptimizationData::kOptimizationIdIndex'
+ },
+ {
+ 'name': 'DeoptimizationDataSharedFunctionInfoIndex',
+ 'value': 'DeoptimizationData::kSharedFunctionInfoIndex'
+ },
+ {
+ 'name': 'DeoptimizationDataInliningPositionsIndex',
+ 'value': 'DeoptimizationData::kInliningPositionsIndex'
+ },
+ {
+ 'name': 'CodeKindBytecodeHandler',
+ 'value': 'static_cast<int>(CodeKind::BYTECODE_HANDLER)'
+ },
+ {
+ 'name': 'CodeKindInterpretedFunction',
+ 'value': 'static_cast<int>(CodeKind::INTERPRETED_FUNCTION)'
+ },
+ {
+ 'name': 'CodeKindBaseline',
+ 'value': 'static_cast<int>(CodeKind::BASELINE)'
+ },
+ {
+ 'name': 'OddballFalse',
+ 'value': 'Oddball::kFalse'
+ },
+ {
+ 'name': 'OddballTrue',
+ 'value': 'Oddball::kTrue'
+ },
+ {
+ 'name': 'OddballTheHole',
+ 'value': 'Oddball::kTheHole'
+ },
+ {
+ 'name': 'OddballNull',
+ 'value': 'Oddball::kNull'
+ },
+ {
+ 'name': 'OddballArgumentsMarker',
+ 'value': 'Oddball::kArgumentsMarker'
+ },
+ {
+ 'name': 'OddballUndefined',
+ 'value': 'Oddball::kUndefined'
+ },
+ {
+ 'name': 'OddballUninitialized',
+ 'value': 'Oddball::kUninitialized'
+ },
+ {
+ 'name': 'OddballOther',
+ 'value': 'Oddball::kOther'
+ },
+ {
+ 'name': 'OddballException',
+ 'value': 'Oddball::kException'
+ },
+ {
+ 'name': 'ContextRegister',
+ 'value': 'kContextRegister.code()'
+ },
+ {
+ 'name': 'ReturnRegister0',
+ 'value': 'kReturnRegister0.code()'
+ },
+ {
+ 'name': 'JSFunctionRegister',
+ 'value': 'kJSFunctionRegister.code()'
+ },
+ {
+ 'name': 'InterpreterBytecodeOffsetRegister',
+ 'value': 'kInterpreterBytecodeOffsetRegister.code()'
+ },
+ {
+ 'name': 'InterpreterBytecodeArrayRegister',
+ 'value': 'kInterpreterBytecodeArrayRegister.code()'
+ },
+ {
+ 'name': 'RuntimeCallFunctionRegister',
+ 'value': 'kRuntimeCallFunctionRegister.code()'
+ },
+ {
+ 'name': 'prop_kind_Data',
+ 'value': 'static_cast<int>(PropertyKind::kData)'
+ },
+ {
+ 'name': 'prop_kind_Accessor',
+ 'value': 'static_cast<int>(PropertyKind::kAccessor)'
+ },
+ {
+ 'name': 'prop_kind_mask',
+ 'value': 'PropertyDetails::KindField::kMask'
+ },
+ {
+ 'name': 'prop_location_Descriptor',
+ 'value': 'static_cast<int>(PropertyLocation::kDescriptor)'
+ },
+ {
+ 'name': 'prop_location_Field',
+ 'value': 'static_cast<int>(PropertyLocation::kField)'
+ },
+ {
+ 'name': 'prop_location_mask',
+ 'value': 'PropertyDetails::LocationField::kMask'
+ },
+ {
+ 'name': 'prop_location_shift',
+ 'value': 'PropertyDetails::LocationField::kShift'
+ },
+ {
+ 'name': 'prop_attributes_NONE',
+ 'value': 'NONE'
+ },
+ {
+ 'name': 'prop_attributes_READ_ONLY',
+ 'value': 'READ_ONLY'
+ },
+ {
+ 'name': 'prop_attributes_DONT_ENUM',
+ 'value': 'DONT_ENUM'
+ },
+ {
+ 'name': 'prop_attributes_DONT_DELETE',
+ 'value': 'DONT_DELETE'
+ },
+ {
+ 'name': 'prop_attributes_mask',
+ 'value': 'PropertyDetails::AttributesField::kMask'
+ },
+ {
+ 'name': 'prop_attributes_shift',
+ 'value': 'PropertyDetails::AttributesField::kShift'
+ },
+ {
+ 'name': 'prop_index_mask',
+ 'value': 'PropertyDetails::FieldIndexField::kMask'
+ },
+ {
+ 'name': 'prop_index_shift',
+ 'value': 'PropertyDetails::FieldIndexField::kShift'
+ },
+ {
+ 'name': 'prop_representation_mask',
+ 'value': 'PropertyDetails::RepresentationField::kMask'
+ },
+ {
+ 'name': 'prop_representation_shift',
+ 'value': 'PropertyDetails::RepresentationField::kShift'
+ },
+ {
+ 'name': 'prop_representation_smi',
+ 'value': 'Representation::Kind::kSmi'
+ },
+ {
+ 'name': 'prop_representation_double',
+ 'value': 'Representation::Kind::kDouble'
+ },
+ {
+ 'name': 'prop_representation_heapobject',
+ 'value': 'Representation::Kind::kHeapObject'
+ },
+ {
+ 'name': 'prop_representation_tagged',
+ 'value': 'Representation::Kind::kTagged'
+ },
+ {
+ 'name': 'prop_desc_key',
+ 'value': 'DescriptorArray::kEntryKeyIndex'
+ },
+ {
+ 'name': 'prop_desc_details',
+ 'value': 'DescriptorArray::kEntryDetailsIndex'
+ },
+ {
+ 'name': 'prop_desc_value',
+ 'value': 'DescriptorArray::kEntryValueIndex'
+ },
+ {
+ 'name': 'prop_desc_size',
+ 'value': 'DescriptorArray::kEntrySize'
+ },
+ {
+ 'name': 'elements_fast_holey_elements',
+ 'value': 'HOLEY_ELEMENTS'
+ },
+ {
+ 'name': 'elements_fast_elements',
+ 'value': 'PACKED_ELEMENTS'
+ },
+ {
+ 'name': 'elements_dictionary_elements',
+ 'value': 'DICTIONARY_ELEMENTS'
+ },
+ {
+ 'name': 'bit_field2_elements_kind_mask',
+ 'value': 'Map::Bits2::ElementsKindBits::kMask'
+ },
+ {
+ 'name': 'bit_field2_elements_kind_shift',
+ 'value': 'Map::Bits2::ElementsKindBits::kShift'
+ },
+ {
+ 'name': 'bit_field3_is_dictionary_map_shift',
+ 'value': 'Map::Bits3::IsDictionaryMapBit::kShift'
+ },
+ {
+ 'name': 'bit_field3_number_of_own_descriptors_mask',
+ 'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kMask'
+ },
+ {
+ 'name': 'bit_field3_number_of_own_descriptors_shift',
+ 'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kShift'
+ },
+ {
+ 'name': 'class_Map__instance_descriptors_offset',
+ 'value': 'Map::kInstanceDescriptorsOffset'
+ },
+ {
+ 'name': 'off_fp_context_or_frame_type',
+ 'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'
+ },
+ {
+ 'name': 'off_fp_context',
+ 'value': 'StandardFrameConstants::kContextOffset'
+ },
+ {
+ 'name': 'off_fp_constant_pool',
+ 'value': 'StandardFrameConstants::kConstantPoolOffset'
+ },
+ {
+ 'name': 'off_fp_function',
+ 'value': 'StandardFrameConstants::kFunctionOffset'
+ },
+ {
+ 'name': 'off_fp_args',
+ 'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp'
+ },
+ {
+ 'name': 'off_fp_bytecode_array',
+ 'value': 'UnoptimizedFrameConstants::kBytecodeArrayFromFp'
+ },
+ {
+ 'name':
+ 'off_fp_bytecode_offset',
+ 'value':
+ 'UnoptimizedFrameConstants::kBytecodeOffsetOrFeedbackVectorFromFp'
+ },
+ {
+ 'name': 'scopeinfo_idx_nparams',
+ 'value': 'ScopeInfo::kParameterCount'
+ },
+ {
+ 'name': 'scopeinfo_idx_ncontextlocals',
+ 'value': 'ScopeInfo::kContextLocalCount'
+ },
+ {
+ 'name': 'scopeinfo_idx_first_vars',
+ 'value': 'ScopeInfo::kVariablePartIndex'
+ },
+ {
+ 'name': 'jsarray_buffer_was_detached_mask',
+ 'value': 'JSArrayBuffer::WasDetachedBit::kMask'
+ },
+ {
+ 'name': 'jsarray_buffer_was_detached_shift',
+ 'value': 'JSArrayBuffer::WasDetachedBit::kShift'
+ },
+ {
+ 'name': 'context_idx_scope_info',
+ 'value': 'Context::SCOPE_INFO_INDEX'
+ },
+ {
+ 'name': 'context_idx_prev',
+ 'value': 'Context::PREVIOUS_INDEX'
+ },
+ {
+ 'name': 'context_min_slots',
+ 'value': 'Context::MIN_CONTEXT_SLOTS'
+ },
+ {
+ 'name': 'native_context_embedder_data_offset',
+ 'value': 'Internals::kNativeContextEmbedderDataOffset'
+ },
+ {
+ 'name': 'namedictionaryshape_prefix_size',
+ 'value': 'NameDictionaryShape::kPrefixSize'
+ },
+ {
+ 'name': 'namedictionaryshape_entry_size',
+ 'value': 'NameDictionaryShape::kEntrySize'
+ },
+ {
+ 'name': 'globaldictionaryshape_entry_size',
+ 'value': 'GlobalDictionaryShape::kEntrySize'
+ },
+ {
+ 'name': 'namedictionary_prefix_start_index',
+ 'value': 'NameDictionary::kPrefixStartIndex'
+ },
+ {
+ 'name': 'numberdictionaryshape_prefix_size',
+ 'value': 'NumberDictionaryShape::kPrefixSize'
+ },
+ {
+ 'name': 'numberdictionaryshape_entry_size',
+ 'value': 'NumberDictionaryShape::kEntrySize'
+ },
+ {
+ 'name': 'simplenumberdictionaryshape_prefix_size',
+ 'value': 'SimpleNumberDictionaryShape::kPrefixSize'
+ },
+ {
+ 'name': 'simplenumberdictionaryshape_entry_size',
+ 'value': 'SimpleNumberDictionaryShape::kEntrySize'
+ },
+ {
+ 'name': 'type_JSError__JS_ERROR_TYPE',
+ 'value': 'JS_ERROR_TYPE'
+ },
+]
#
# The following useful fields are missing accessors, so we define fake ones.
@@ -307,10 +549,7 @@ extras_accessors = [
'SharedFunctionInfo, flags, int, kFlagsOffset',
'SharedFunctionInfo, length, uint16_t, kLengthOffset',
'SlicedString, parent, String, kParentOffset',
- 'Code, flags, uint32_t, kFlagsOffset',
- 'Code, instruction_start, uintptr_t, kHeaderSize',
- 'Code, instruction_size, int, kInstructionSizeOffset',
- 'Code, deoptimization_data, FixedArray, kDeoptimizationDataOrInterpreterDataOffset',
+ 'InstructionStream, instruction_start, uintptr_t, kHeaderSize',
'String, length, int32_t, kLengthOffset',
'DescriptorArray, header_size, uintptr_t, kHeaderSize',
'ConsString, first, String, kFirstOffset',
@@ -713,8 +952,8 @@ def load_fields_from_file(filename):
#
# Emit a block of constants.
#
-def emit_set(out, consts):
- lines = set() # To remove duplicates.
+def emit_constants(out, consts):
+ lines = []
# Fix up overzealous parses. This could be done inside the
# parsers but as there are several, it's easiest to do it here.
@@ -722,10 +961,10 @@ def emit_set(out, consts):
for const in consts:
name = ws.sub('', const['name'])
value = ws.sub('', str(const['value'])) # Can be a number.
- lines.add('V8_EXPORT int v8dbg_%s = %s;\n' % (name, value))
+ lines.append('V8_EXPORT int v8dbg_%s = %s;' % (name, value))
- for line in lines:
- out.write(line);
+ # Generate without duplicates and with preserved order.
+ out.write('\n'.join(dict.fromkeys(lines)))
out.write('\n');
#
@@ -737,7 +976,7 @@ def emit_config():
out.write(header);
out.write('/* miscellaneous constants */\n');
- emit_set(out, consts_misc);
+ emit_constants(out, consts_misc);
out.write('/* class type information */\n');
consts = [];
@@ -748,7 +987,7 @@ def emit_config():
'value': typename
});
- emit_set(out, consts);
+ emit_constants(out, consts);
out.write('/* class hierarchy information */\n');
consts = [];
@@ -765,10 +1004,10 @@ def emit_config():
'value': 0
});
- emit_set(out, consts);
+ emit_constants(out, consts);
out.write('/* field information */\n');
- emit_set(out, fields);
+ emit_constants(out, fields);
out.write(footer);
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 7bef23bfb5..cf8a80ee0e 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -29,6 +29,8 @@ MY_DIR = os.path.dirname(os.path.realpath(__file__))
V8_DIR = os.path.dirname(MY_DIR)
OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
AUTO_EXCLUDE = [
+ # Platform specific for iOS.
+ 'src/base/ios-headers.h',
# flag-definitions.h needs a mode set for being included.
'src/flags/flag-definitions.h',
# recorder.h should only be included conditionally.
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 24f880cd26..8a04078b19 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -163,7 +163,7 @@ export const CATEGORIES = new Map([
'BYTECODE_ARRAY_HANDLER_TABLE_TYPE',
'BYTECODE_ARRAY_TYPE',
'BYTECODE_HANDLER',
- 'CODE_DATA_CONTAINER_TYPE',
+ 'CODE_TYPE',
'DEOPTIMIZATION_DATA_TYPE',
'EMBEDDED_OBJECT_TYPE',
'FEEDBACK_CELL_TYPE',
diff --git a/deps/v8/tools/logreader.mjs b/deps/v8/tools/logreader.mjs
index e4d8b4d057..339017c488 100644
--- a/deps/v8/tools/logreader.mjs
+++ b/deps/v8/tools/logreader.mjs
@@ -35,6 +35,16 @@
export function parseString(field) { return field };
export const parseVarArgs = 'parse-var-args';
+// Checks fields for numbers that are not safe integers. Returns true if any are
+// found.
+function containsUnsafeInts(fields) {
+ for (let i = 0; i < fields.length; i++) {
+ let field = fields[i];
+ if ('number' == typeof(field) && !Number.isSafeInteger(field)) return true;
+ }
+ return false;
+}
+
/**
* Base class for processing log files.
*
@@ -44,7 +54,7 @@ export const parseVarArgs = 'parse-var-args';
* @constructor
*/
export class LogReader {
- constructor(timedRange=false, pairwiseTimedRange=false) {
+ constructor(timedRange=false, pairwiseTimedRange=false, useBigInt=false) {
this.dispatchTable_ = new Map();
this.timedRange_ = timedRange;
this.pairwiseTimedRange_ = pairwiseTimedRange;
@@ -54,6 +64,11 @@ export class LogReader {
// Variables for tracking of 'current-time' log entries:
this.hasSeenTimerMarker_ = false;
this.logLinesSinceLastTimerMarker_ = [];
+ // Flag to parse all numeric fields as BigInt to avoid arithmetic errors
+ // caused by memory addresses being greater than MAX_SAFE_INTEGER
+ this.useBigInt = useBigInt;
+ this.parseFrame = useBigInt ? BigInt : parseInt;
+ this.hasSeenUnsafeIntegers = false;
}
/**
@@ -180,11 +195,11 @@ export class LogReader {
const firstChar = frame[0];
if (firstChar === '+' || firstChar === '-') {
// An offset from the previous frame.
- prevFrame += parseInt(frame, 16);
+ prevFrame += this.parseFrame(frame);
fullStack.push(prevFrame);
// Filter out possible 'overflow' string.
} else if (firstChar !== 'o') {
- fullStack.push(parseInt(frame, 16));
+ fullStack.push(this.parseFrame(frame));
} else {
console.error(`Dropping unknown tick frame: ${frame}`);
}
@@ -216,6 +231,12 @@ export class LogReader {
parsedFields[i] = parser(fields[1 + i]);
}
}
+ if (!this.useBigInt) {
+ if (!this.hasSeenUnsafeIntegers && containsUnsafeInts(parsedFields)) {
+ console.warn(`Log line containts unsafe integers: ${fields}`);
+ this.hasSeenUnsafeIntegers = true;
+ }
+ }
// Run the processor.
await dispatch.processor(...parsedFields);
}
diff --git a/deps/v8/tools/process-wasm-compilation-times.py b/deps/v8/tools/process-wasm-compilation-times.py
index 37c5998657..db22147965 100755
--- a/deps/v8/tools/process-wasm-compilation-times.py
+++ b/deps/v8/tools/process-wasm-compilation-times.py
@@ -25,6 +25,8 @@ def Size(number):
modules = {}
max_module = 0
+total_lo_time = 0
+total_lo_size = 0
total_tf_time = 0
total_tf_size = 0
@@ -124,8 +126,16 @@ with open(sys.argv[1], "r") as f:
funcs_list.sort(key=lambda fun: fun.time_tf)
for f in funcs_list:
print(f)
- if f.time_tf > 0: total_tf_time += f.time_tf
- if f.size_tf > 0: total_tf_size += f.size_tf
+ if f.time_tf > 0:
+ total_tf_time += f.time_tf
+ if f.size_tf > 0:
+ total_tf_size += f.size_tf
+ if f.time_lo > 0:
+ total_lo_time += f.time_lo
+ if f.size_lo > 0:
+ total_lo_size += f.size_lo
print("Total TF time: %d" % total_tf_time)
print("Total TF size: %d" % total_tf_size)
+print("Total LO time: %d" % total_lo_time)
+print("Total LO size: %d" % total_lo_size)
diff --git a/deps/v8/tools/profile.mjs b/deps/v8/tools/profile.mjs
index 28490ea72e..5ca36b80d1 100644
--- a/deps/v8/tools/profile.mjs
+++ b/deps/v8/tools/profile.mjs
@@ -305,7 +305,6 @@ const kProfileOperationTick = 2;
* @constructor
*/
export class Profile {
- codeMap_ = new CodeMap();
topDownTree_ = new CallTree();
bottomUpTree_ = new CallTree();
c_entries_ = {__proto__:null};
@@ -313,6 +312,11 @@ export class Profile {
urlToScript_ = new Map();
warnings = new Set();
+ constructor(useBigInt=false) {
+ this.useBigInt = useBigInt;
+ this.codeMap_ = new CodeMap(useBigInt);
+ }
+
serializeVMSymbols() {
let result = this.codeMap_.getAllStaticEntriesWithAddresses();
result.concat(this.codeMap_.getAllLibraryEntriesWithAddresses())
@@ -513,7 +517,7 @@ export class Profile {
// it is safe to put them in a single code map.
let func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
if (func === null) {
- func = new FunctionEntry(name);
+ func = new FunctionEntry(name, this.useBigInt);
this.codeMap_.addCode(funcAddr, func);
} else if (func.name !== name) {
// Function object has been overwritten with a new one.
@@ -961,8 +965,8 @@ class FunctionEntry extends CodeEntry {
/** @type {Set<DynamicCodeEntry>} */
_codeEntries = new Set();
- constructor(name) {
- super(0, name);
+ constructor(name, useBigInt=false) {
+ super(useBigInt ? 0n : 0, name);
const index = name.lastIndexOf(' ');
this.functionName = 1 <= index ? name.substring(0, index) : '<anonymous>';
}
diff --git a/deps/v8/tools/profiling/linux-perf-chrome.py b/deps/v8/tools/profiling/linux-perf-chrome.py
index 0f3a7ec1cc..fee4541fad 100755
--- a/deps/v8/tools/profiling/linux-perf-chrome.py
+++ b/deps/v8/tools/profiling/linux-perf-chrome.py
@@ -54,8 +54,16 @@ chrome_options = optparse.OptionGroup(
"These convenience for a better script experience that are forward directly"
"to chrome. Any other chrome option can be passed after the '--' arguments"
"separator.")
-chrome_options.add_option("--user-data-dir", dest="user_data_dir", default=None)
-chrome_options.add_option("--js-flags", dest="js_flags")
+chrome_options.add_option(
+ "--user-data-dir",
+ dest="user_data_dir",
+ default=None,
+ help="Chrome's profile location. "
+ "By default a temp directory is used.")
+chrome_options.add_option(
+ "--js-flags",
+ dest="js_flags",
+ help="Comma-separated list of flags passed to V8.")
chrome_options.add_option(
"--renderer-cmd-prefix",
default=None,
@@ -136,10 +144,10 @@ with tempfile.TemporaryDirectory(prefix="chrome-") as tmp_dir_path:
str(chrome_bin),
]
if options.user_data_dir is None:
- cmd.append(f"--user-data-dir={tempdir}")
+ options.user_data_dir = tempdir
+ cmd.append(f"--user-data-dir={options.user_data_dir}")
cmd += [
"--no-sandbox",
- "--incognito",
"--enable-benchmarking",
"--no-first-run",
"--no-default-browser-check",
diff --git a/deps/v8/tools/profview/profile-utils.js b/deps/v8/tools/profview/profile-utils.js
index e1410550a7..69fed93c74 100644
--- a/deps/v8/tools/profview/profile-utils.js
+++ b/deps/v8/tools/profview/profile-utils.js
@@ -150,11 +150,7 @@ function findNextFrame(file, stack, stackPos, step, filter) {
codeId = stack[stackPos];
code = codeId >= 0 ? file.code[codeId] : undefined;
- if (filter) {
- let type = code ? code.type : undefined;
- let kind = code ? code.kind : undefined;
- if (filter(type, kind)) return stackPos;
- }
+ if (!filter || filter(code?.type, code?.kind)) return stackPos;
stackPos += step;
}
return -1;
@@ -399,8 +395,9 @@ class FunctionListTree {
class CategorySampler {
- constructor(file, bucketCount) {
+ constructor(file, bucketCount, filter) {
this.bucketCount = bucketCount;
+ this.filter = filter;
this.firstTime = file.ticks[0].tm;
let lastTime = file.ticks[file.ticks.length - 1].tm;
@@ -426,7 +423,8 @@ class CategorySampler {
let bucket = this.buckets[i];
bucket.total++;
- let codeId = (stack.length > 0) ? stack[0] : -1;
+ let stackPos = findNextFrame(file, stack, 0, 2, this.filter);
+ let codeId = stackPos >= 0 ? stack[stackPos] : -1;
let code = codeId >= 0 ? file.code[codeId] : undefined;
let kind = resolveCodeKindAndVmState(code, vmState);
bucket[kind]++;
diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js
index 4dd3fbf9d0..e98b0e5e65 100644
--- a/deps/v8/tools/profview/profview.js
+++ b/deps/v8/tools/profview/profview.js
@@ -908,6 +908,7 @@ class TimelineView {
height === oldState.timelineSize.height &&
newState.file === oldState.file &&
newState.currentCodeId === oldState.currentCodeId &&
+ newState.callTree.attribution === oldState.callTree.attribution &&
newState.start === oldState.start &&
newState.end === oldState.end) {
// No change, nothing to do.
@@ -945,11 +946,10 @@ class TimelineView {
this.selectionStart = (start - firstTime) / (lastTime - firstTime) * width;
this.selectionEnd = (end - firstTime) / (lastTime - firstTime) * width;
- let stackProcessor = new CategorySampler(file, bucketCount);
+ let filter = filterFromFilterId(this.currentState.callTree.attribution);
+ let stackProcessor = new CategorySampler(file, bucketCount, filter);
generateTree(file, 0, Infinity, stackProcessor);
- let codeIdProcessor = new FunctionTimelineProcessor(
- currentCodeId,
- filterFromFilterId(this.currentState.callTree.attribution));
+ let codeIdProcessor = new FunctionTimelineProcessor(currentCodeId, filter);
generateTree(file, 0, Infinity, codeIdProcessor);
let buffer = document.createElement("canvas");
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index cb72022310..3f3e4111e3 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -5,6 +5,7 @@
import argparse
import os
+import re
import sys
from common_includes import *
@@ -20,17 +21,17 @@ https://v8-roll.appspot.com/
This only works with a Google account.
CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
-CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_chromium_chromeos_msan_rel_ng
CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel""")
+REF_LINE_PATTERN = r"refs\/tags\/(\d+(?:\.\d+){2,3})-pgo\ ([0-9a-f]{40})"
+
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'preparation'
# Update v8 remote tracking branches.
self.GitFetchOrigin()
self.Git("fetch origin +refs/tags/*:refs/tags/*")
@@ -40,7 +41,6 @@ class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'detect_last_roll'
self["last_roll"] = self._options.last_roll
if not self["last_roll"]:
# Get last-rolled v8 revision from Chromium's DEPS file.
@@ -55,7 +55,6 @@ class DetectRevisionToRoll(Step):
MESSAGE = "Detect commit ID of the V8 revision to roll."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'detect_revision'
self["roll"] = self._options.revision
if self["roll"]:
# If the revision was passed on the cmd line, continue script execution
@@ -64,26 +63,31 @@ class DetectRevisionToRoll(Step):
# The revision that should be rolled. Check for the latest of the most
# recent releases based on commit timestamp.
- revisions = self.GetRecentReleases(
- max_age=self._options.max_age * DAY_IN_SECONDS)
- assert revisions, "Didn't find any recent release."
+ response = self.Git(
+ r"for-each-ref --count=80 --sort=-committerdate --format "
+ r"'%(refname) %(objectname)' 'refs/tags/*-pgo'"
+ )
+ version_revisions = []
+ for line in response.split('\n'):
+ match = re.fullmatch(REF_LINE_PATTERN, line)
+ if not match:
+ continue
+ version_revisions.append(match.groups())
+
+ assert version_revisions, "Didn't find any recent release."
# There must be some progress between the last roll and the new candidate
# revision (i.e. we don't go backwards). The revisions are ordered newest
# to oldest. It is possible that the newest timestamp has no progress
# compared to the last roll, i.e. if the newest release is a cherry-pick
# on a release branch. Then we look further.
- for revision in revisions:
- version = self.GetVersionTag(revision)
- assert version, "Internal error. All recent releases should have a tag"
-
+ for version, revision in version_revisions:
if LooseVersion(self["last_version"]) < LooseVersion(version):
self["roll"] = revision
break
else:
print("There is no newer v8 revision than the one in Chromium (%s)."
% self["last_roll"])
- self['json_output']['monitoring_state'] = 'up_to_date'
return True
@@ -91,7 +95,6 @@ class PrepareRollCandidate(Step):
MESSAGE = "Robustness checks of the roll candidate."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'prepare_candidate'
self["roll_title"] = self.GitLog(n=1, format="%s",
git_hash=self["roll"])
@@ -106,7 +109,6 @@ class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'switch_chromium'
cwd = self._options.chromium
self.InitialEnvironmentChecks(cwd)
# Check for a clean workdir.
@@ -121,7 +123,6 @@ class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'update_chromium'
cwd = self._options.chromium
self.GitCheckout("main", cwd=cwd)
self.DeleteBranch("work-branch", cwd=cwd)
@@ -137,7 +138,6 @@ class UploadCL(Step):
MESSAGE = "Create and upload CL."
def RunStep(self):
- self['json_output']['monitoring_state'] = 'upload'
cwd = self._options.chromium
# Patch DEPS file.
if self.Command("gclient", "setdep -r src/v8@%s" %
@@ -172,7 +172,6 @@ class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
- self['json_output']['monitoring_state'] = 'success'
print("Congratulations, you have successfully rolled %s into "
"Chromium."
% self["roll"])
@@ -189,8 +188,6 @@ class AutoRoll(ScriptsBase):
parser.add_argument("--last-roll",
help="The git commit ID of the last rolled version. "
"Auto-detected if not specified.")
- parser.add_argument("--max-age", default=7, type=int,
- help="Maximum age in days of the latest release.")
parser.add_argument("--revision",
help="Revision to roll. Auto-detected if not "
"specified."),
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index f4d99a7d64..97c0098b48 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -533,22 +533,13 @@ class Step(GitRecipesMixin):
self.WaitForResolvingConflicts(patch_file)
def GetVersionTag(self, revision):
- tag = self.Git("describe --tags %s" % revision).strip()
- return SanitizeVersionTag(tag)
+ tags = self.Git(f"tag --points-at {revision}").strip().split('\n')
+ for tag in tags:
+ sanitized_tag = SanitizeVersionTag(tag)
+ if sanitized_tag:
+ return sanitized_tag
- def GetRecentReleases(self, max_age):
- # Make sure tags are fetched.
- self.Git("fetch origin +refs/tags/*:refs/tags/*")
-
- # Current timestamp.
- time_now = int(self._side_effect_handler.GetUTCStamp())
-
- # List every tag from a given period.
- revisions = self.Git("rev-list --max-age=%d --tags" %
- int(time_now - max_age)).strip()
-
- # Filter out revisions who's tag is off by one or more commits.
- return list(filter(self.GetVersionTag, revisions.splitlines()))
+ return None
def GetLatestVersion(self):
# Use cached version if available.
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 4aeada90dc..82f475558a 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -521,10 +521,12 @@ git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
- ROLL_COMMIT_MSG = """Update V8 to version 3.22.4.
+ ROLL_HASH = "1234567890123456789012345678901234567890"
+ HASH_ALT_1 = "9999999999999999999999999999999999999999"
+ ROLL_COMMIT_MSG = f"""Update V8 to version 3.22.4.
Summary of changes available at:
-https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
+https://chromium.googlesource.com/v8/v8/+log/last_rol..{ROLL_HASH[:8]}
Please follow these instructions for assigning/CC'ing issues:
https://v8.dev/docs/triage-issues
@@ -534,7 +536,6 @@ https://v8-roll.appspot.com/
This only works with a Google account.
CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
-CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_chromium_chromeos_msan_rel_ng
CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
@@ -563,15 +564,14 @@ deps = {
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir),
- Cmd("git describe --tags last_roll_hsh", "3.22.4"),
- Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git rev-list --max-age=395200 --tags",
- "bad_tag\nroll_hsh\nhash_123"),
- Cmd("git describe --tags bad_tag", ""),
- Cmd("git describe --tags roll_hsh", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- Cmd("git describe --tags roll_hsh", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git tag --points-at last_roll_hsh", "3.22.4\n3.22.4-pgo"),
+ Cmd((
+ "git for-each-ref --count=80 --sort=-committerdate --format "
+ "'%(refname) %(objectname)' 'refs/tags/*-pgo'"
+ ), "\n".join([
+ f"refs/tags/3.22.4-pgo {self.ROLL_HASH}",
+ f"refs/tags/3.22.3-pgo {self.HASH_ALT_1}",
+ ])),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
@@ -579,8 +579,6 @@ deps = {
"-c", TEST_CONFIG["CHROMIUM"],
"--json-output", json_output_file])
self.assertEquals(0, result)
- json_output = json.loads(FileToText(json_output_file))
- self.assertEquals("up_to_date", json_output["monitoring_state"])
def testChromiumRoll(self):
@@ -600,24 +598,24 @@ deps = {
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir),
- Cmd("git describe --tags last_roll_hsh", "3.22.3.1"),
- Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git rev-list --max-age=395200 --tags",
- "bad_tag\nroll_hsh\nhash_123"),
- Cmd("git describe --tags bad_tag", ""),
- Cmd("git describe --tags roll_hsh", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- Cmd("git describe --tags roll_hsh", "3.22.4"),
- Cmd("git log -1 --format=%s roll_hsh", "Version 3.22.4\n"),
- Cmd("git describe --tags roll_hsh", "3.22.4"),
- Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
+ Cmd("git tag --points-at last_roll_hsh", "3.22.3.1\n22.3.1-pgo"),
+ Cmd((
+ "git for-each-ref --count=80 --sort=-committerdate --format "
+ "'%(refname) %(objectname)' 'refs/tags/*-pgo'"
+ ), "\n".join([
+ f"refs/tags/3.22.4-pgo {self.ROLL_HASH}",
+ f"refs/tags/3.22.3-pgo {self.HASH_ALT_1}",
+ ])),
+ Cmd(f"git log -1 --format=%s {self.ROLL_HASH}", "Version 3.22.4\n"),
+ Cmd(f"git tag --points-at {self.ROLL_HASH}", "3.22.4\n3.22.4-pgo"),
+ Cmd("git tag --points-at last_roll_hsh", "3.22.2.1\n22.2.1-pgo"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f main", "", cwd=chrome_dir),
Cmd("git branch", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
Cmd("git new-branch work-branch", "", cwd=chrome_dir),
- Cmd("gclient setdep -r src/v8@roll_hsh", "", cb=WriteDeps,
+ Cmd(f"gclient setdep -r src/v8@{self.ROLL_HASH}", "", cb=WriteDeps,
cwd=chrome_dir),
Cmd(("git commit -am \"%s\" "
"--author \"author@chromium.org <author@chromium.org>\"" %
@@ -638,9 +636,6 @@ deps = {
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
- json_output = json.loads(FileToText(json_output_file))
- self.assertEquals("success", json_output["monitoring_state"])
-
def testCheckLastPushRecently(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 9313d5715c..7b4ab418ae 100644..100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -127,6 +127,7 @@ import json
import logging
import math
import argparse
+import pathlib
import os
import re
import subprocess
@@ -787,6 +788,7 @@ class Platform(object):
if output.stderr: # pragma: no cover
# Print stderr for debugging.
logging.info(title % 'Stderr' + '\n%s', output.stderr)
+ if output.HasTimedOut():
logging.warning('>>> Test timed out after %ss.', runnable.timeout)
if output.exit_code != 0:
logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
@@ -1076,10 +1078,12 @@ def Main(argv):
parser.add_argument('--outdir-secondary',
help='Base directory with compile output without patch '
'or for reference build')
- parser.add_argument('--binary-override-path',
- help='JavaScript engine binary. By default, d8 under '
- 'architecture-specific build dir. '
- 'Not supported in conjunction with outdir-secondary.')
+ parser.add_argument(
+ '--binary-override-path',
+ '--d8-path',
+ help='JavaScript engine binary. By default, d8 under '
+ 'architecture-specific build dir. '
+ 'Not supported in conjunction with outdir-secondary.')
parser.add_argument('--prioritize',
help='Raise the priority to nice -20 for the '
'benchmarking process.Requires Linux, schedtool, and '
@@ -1123,9 +1127,13 @@ def Main(argv):
parser.add_argument('--dump-logcats-to',
help='Writes logcat output from each test into specified '
'directory. Only supported for android targets.')
- parser.add_argument('--run-count', type=int, default=0,
- help='Override the run count specified by the test '
- 'suite. The default 0 uses the suite\'s config.')
+ parser.add_argument(
+ '--run-count',
+ "--repeat",
+ type=int,
+ default=0,
+ help='Override the run count specified by the test '
+ 'suite. The default 0 uses the suite\'s config.')
parser.add_argument(
'--dry-run',
default=False,
@@ -1164,15 +1172,15 @@ def Main(argv):
os.path.join(workspace, args.outdir), args.arch)
default_binary_name = 'd8'
else:
- if not os.path.isfile(args.binary_override_path):
- logging.error('binary-override-path must be a file name')
+ path = pathlib.Path(args.binary_override_path).expanduser().resolve()
+ if not path.is_file():
+ logging.error(f'binary-override-path "{path}" must be a file name')
return INFRA_FAILURE_RETCODE
if args.outdir_secondary:
logging.error('specify either binary-override-path or outdir-secondary')
return INFRA_FAILURE_RETCODE
- args.shell_dir = os.path.abspath(
- os.path.dirname(args.binary_override_path))
- default_binary_name = os.path.basename(args.binary_override_path)
+ args.shell_dir = str(path.parent)
+ default_binary_name = path.name
if args.outdir_secondary:
args.shell_dir_secondary = find_build_directory(
diff --git a/deps/v8/tools/sanitizers/sancov_formatter_test.py b/deps/v8/tools/sanitizers/sancov_formatter_test.py
index 008151d40e..8fa3337d64 100644
--- a/deps/v8/tools/sanitizers/sancov_formatter_test.py
+++ b/deps/v8/tools/sanitizers/sancov_formatter_test.py
@@ -59,9 +59,9 @@ EXPECTED_PROCESSED_OUTPUT = {
# Data for test_merge_instrumented_line_results. A list of absolute paths to
# all executables.
EXE_LIST = [
- '/path/to/d8',
- '/path/to/cctest',
- '/path/to/unittests',
+ '/path/to/d8',
+ '/path/to/cctest',
+ '/path/to/v8_unittests',
]
# Post-processed llvm symbolizer output as returned by
@@ -86,13 +86,13 @@ INSTRUMENTED_LINE_RESULTS = [
# the coverage mask is 0 for all lines. The line tuples remain sorted by
# line number and contain no duplicates.
EXPECTED_INSTRUMENTED_LINES_DATA = {
- 'version': 1,
- 'tests': ['cctest', 'd8', 'unittests'],
- 'files': {
- 'src/baz/bar.h': [[0, 0], [3, 0], [7, 0], [8, 0]],
- 'src/baz.cc': [[1, 0], [2, 0]],
- 'src/foo.cc': [[1, 0], [11, 0], [92, 0], [93, 0]],
- },
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'v8_unittests'],
+ 'files': {
+ 'src/baz/bar.h': [[0, 0], [3, 0], [7, 0], [8, 0]],
+ 'src/baz.cc': [[1, 0], [2, 0]],
+ 'src/foo.cc': [[1, 0], [11, 0], [92, 0], [93, 0]],
+ },
}
@@ -102,32 +102,32 @@ EXPECTED_INSTRUMENTED_LINES_DATA = {
# llvm-symbolizer output as a tuple including the executable name of each data
# set.
COVERED_LINE_RESULTS = [
- ({
- 'src/baz/bar.h': [3, 7],
- 'src/foo.cc': [11],
- }, 'd8'),
- ({
- 'src/baz/bar.h': [3, 7],
- 'src/baz.cc': [2],
- 'src/foo.cc': [1],
- }, 'cctest'),
- ({
- 'src/foo.cc': [92],
- 'src/baz.cc': [2],
- }, 'unittests'),
+ ({
+ 'src/baz/bar.h': [3, 7],
+ 'src/foo.cc': [11],
+ }, 'd8'),
+ ({
+ 'src/baz/bar.h': [3, 7],
+ 'src/baz.cc': [2],
+ 'src/foo.cc': [1],
+ }, 'cctest'),
+ ({
+ 'src/foo.cc': [92],
+ 'src/baz.cc': [2],
+ }, 'v8_unittests'),
]
# This shows initial instrumentation + coverage. The mask bits are:
-# cctest: 1, d8: 2, unittests:4. So a line covered by cctest and unittests
+# cctest: 1, d8: 2, v8_unittests:4. So a line covered by cctest and v8_unittests
# has a coverage mask of 0b101, e.g. line 2 in src/baz.cc.
EXPECTED_COVERED_LINES_DATA = {
- 'version': 1,
- 'tests': ['cctest', 'd8', 'unittests'],
- 'files': {
- 'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
- 'src/baz.cc': [[1, 0b0], [2, 0b101]],
- 'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
- },
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'v8_unittests'],
+ 'files': {
+ 'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+ 'src/baz.cc': [[1, 0b0], [2, 0b101]],
+ 'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+ },
}
@@ -136,36 +136,36 @@ EXPECTED_COVERED_LINES_DATA = {
# Data for test_split.
EXPECTED_SPLIT_FILES = [
- (
- os.path.join('src', 'baz', 'bar.h.json'),
- {
- 'version': 1,
- 'tests': ['cctest', 'd8', 'unittests'],
- 'files': {
- 'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
- },
- },
- ),
- (
- os.path.join('src', 'baz.cc.json'),
- {
- 'version': 1,
- 'tests': ['cctest', 'd8', 'unittests'],
- 'files': {
- 'src/baz.cc': [[1, 0b0], [2, 0b101]],
- },
- },
- ),
- (
- os.path.join('src', 'foo.cc.json'),
- {
- 'version': 1,
- 'tests': ['cctest', 'd8', 'unittests'],
- 'files': {
- 'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
- },
- },
- ),
+ (
+ os.path.join('src', 'baz', 'bar.h.json'),
+ {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'v8_unittests'],
+ 'files': {
+ 'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+ },
+ },
+ ),
+ (
+ os.path.join('src', 'baz.cc.json'),
+ {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'v8_unittests'],
+ 'files': {
+ 'src/baz.cc': [[1, 0b0], [2, 0b101]],
+ },
+ },
+ ),
+ (
+ os.path.join('src', 'foo.cc.json'),
+ {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'v8_unittests'],
+ 'files': {
+ 'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+ },
+ },
+ ),
]
diff --git a/deps/v8/tools/snapshot/compare_mksnapshot_output.py b/deps/v8/tools/snapshot/compare_mksnapshot_output.py
new file mode 100644
index 0000000000..4b25a69755
--- /dev/null
+++ b/deps/v8/tools/snapshot/compare_mksnapshot_output.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# Copyright 2023 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+Checks for non-determinism in mksnapshot output by comparing output from
+multiple runs. Expected to be called with arguments like:
+
+<script> <report path> <gn gen dir> <gn out dir> <number of runs>
+
+The script will look for <number of runs> versions of snapshot and embedded
+builtins within the gn gen and out directories.
+"""
+
+import hashlib
+import sys
+
+from pathlib import Path
+
+ERROR_TEXT = """
+Non-deterministic %s.
+To reproduce, run mksnapshot multiple times and compare or:
+1) Set gn variable v8_verify_deterministic_mksnapshot = true
+2) Build target verify_deterministic_mksnapshot.
+"""
+
+assert len(sys.argv) == 5
+report = Path(sys.argv[1])
+gendir = Path(sys.argv[2])
+outdir = Path(sys.argv[3])
+n_runs = int(sys.argv[4])
+
+
+def md5(path):
+ with open(path, 'rb') as f:
+ return hashlib.md5(f.read()).digest()
+
+
+def snapshot_file(i):
+ return outdir / f'snapshot_blob_run_{i}.bin'
+
+
+def builtins_file(i):
+ return gendir / f'embedded_run_{i}.S'
+
+
+def verify(file_fun, type):
+ different_hashes = set(md5(file_fun(i)) for i in range(n_runs))
+ if len(different_hashes) != 1:
+ print(ERROR_TEXT % type)
+ sys.exit(1)
+
+
+verify(snapshot_file, 'shapshot')
+verify(builtins_file, 'embedded builtins')
+
+# Dummy output file needed when running an action target.
+with open(report, 'w') as f:
+ f.write('Deterministic mksnapshot.')
diff --git a/deps/v8/tools/system-analyzer/log/tick.mjs b/deps/v8/tools/system-analyzer/log/tick.mjs
index 3c16cc6207..023a881117 100644
--- a/deps/v8/tools/system-analyzer/log/tick.mjs
+++ b/deps/v8/tools/system-analyzer/log/tick.mjs
@@ -42,7 +42,7 @@ export class TickLogEntry extends LogEntry {
return 'Idle';
}
const topOfStack = processedStack[0];
- if (typeof topOfStack === 'number') {
+ if (typeof topOfStack === 'number' || typeof topOfStack === 'bigint') {
// TODO(cbruni): Handle VmStack and native ticks better.
return 'Other';
}
diff --git a/deps/v8/tools/system-analyzer/processor.mjs b/deps/v8/tools/system-analyzer/processor.mjs
index 15eccd70c9..c4cd1f9915 100644
--- a/deps/v8/tools/system-analyzer/processor.mjs
+++ b/deps/v8/tools/system-analyzer/processor.mjs
@@ -47,7 +47,6 @@ class AsyncConsumer {
}
export class Processor extends LogReader {
- _profile = new Profile();
_codeTimeline = new Timeline();
_deoptTimeline = new Timeline();
_icTimeline = new Timeline();
@@ -70,12 +69,16 @@ export class Processor extends LogReader {
MAJOR_VERSION = 7;
MINOR_VERSION = 6;
- constructor() {
- super();
+ constructor(useBigInt = false) {
+ super(false, false, useBigInt);
+ this.useBigInt = useBigInt;
+ this.kZero = useBigInt ? 0n : 0;
+ this.parseAddress = useBigInt ? BigInt : parseInt;
this._chunkConsumer =
new AsyncConsumer((chunk) => this._processChunk(chunk));
+ this._profile = new Profile(useBigInt);
const propertyICParser = [
- parseInt, parseInt, parseInt, parseInt, parseString, parseString,
+ this.parseAddress, parseInt, parseInt, parseInt, parseString, parseString,
parseString, parseString, parseString, parseString
];
this.setDispatchTable({
@@ -88,46 +91,47 @@ export class Processor extends LogReader {
processor: this.processV8Version,
},
'shared-library': {
- parsers: [parseString, parseInt, parseInt, parseInt],
+ parsers: [
+ parseString, this.parseAddress, this.parseAddress, this.parseAddress
+ ],
processor: this.processSharedLibrary.bind(this),
isAsync: true,
},
'code-creation': {
parsers: [
- parseString, parseInt, parseInt, parseInt, parseInt, parseString,
- parseVarArgs
+ parseString, parseInt, parseInt, this.parseAddress, this.parseAddress,
+ parseString, parseVarArgs
],
processor: this.processCodeCreation
},
'code-deopt': {
parsers: [
- parseInt, parseInt, parseInt, parseInt, parseInt, parseString,
- parseString, parseString
+ parseInt, parseInt, this.parseAddress, parseInt, parseInt,
+ parseString, parseString, parseString
],
processor: this.processCodeDeopt
},
- 'code-move':
- {parsers: [parseInt, parseInt], processor: this.processCodeMove},
- 'code-delete': {parsers: [parseInt], processor: this.processCodeDelete},
+ 'code-move': {
+ parsers: [this.parseAddress, this.parseAddress],
+ processor: this.processCodeMove
+ },
+ 'code-delete':
+ {parsers: [this.parseAddress], processor: this.processCodeDelete},
'code-source-info': {
parsers: [
- parseInt, parseInt, parseInt, parseInt, parseString, parseString,
- parseString
+ this.parseAddress, parseInt, parseInt, parseInt, parseString,
+ parseString, parseString
],
processor: this.processCodeSourceInfo
},
'code-disassemble': {
- parsers: [
- parseInt,
- parseString,
- parseString,
- ],
+ parsers: [this.parseAddress, parseString, parseString],
processor: this.processCodeDisassemble
},
'feedback-vector': {
parsers: [
- parseInt, parseString, parseInt, parseInt, parseString, parseString,
- parseInt, parseInt, parseString
+ parseInt, parseString, parseInt, this.parseAddress, parseString,
+ parseString, parseInt, parseInt, parseString
],
processor: this.processFeedbackVector
},
@@ -135,11 +139,15 @@ export class Processor extends LogReader {
parsers: [parseInt, parseString, parseString],
processor: this.processScriptSource
},
- 'sfi-move':
- {parsers: [parseInt, parseInt], processor: this.processFunctionMove},
+ 'sfi-move': {
+ parsers: [this.parseAddress, this.parseAddress],
+ processor: this.processFunctionMove
+ },
'tick': {
- parsers:
- [parseInt, parseInt, parseInt, parseInt, parseInt, parseVarArgs],
+ parsers: [
+ this.parseAddress, parseInt, parseInt, this.parseAddress, parseInt,
+ parseVarArgs
+ ],
processor: this.processTick
},
'active-runtime-timer': undefined,
@@ -157,8 +165,8 @@ export class Processor extends LogReader {
{parsers: [parseInt, parseString], processor: this.processMapCreate},
'map': {
parsers: [
- parseString, parseInt, parseString, parseString, parseInt, parseInt,
- parseInt, parseString, parseString
+ parseString, parseInt, parseString, parseString, this.parseAddress,
+ parseInt, parseInt, parseString, parseString
],
processor: this.processMap
},
@@ -352,7 +360,7 @@ export class Processor extends LogReader {
let profilerEntry;
let stateName = '';
if (maybe_func.length) {
- const funcAddr = parseInt(maybe_func[0]);
+ const funcAddr = this.parseAddress(maybe_func[0]);
stateName = maybe_func[1] ?? '';
const state = Profile.parseState(maybe_func[1]);
profilerEntry = this._profile.addFuncCode(
@@ -404,7 +412,7 @@ export class Processor extends LogReader {
optimization_tier, invocation_count, profiler_ticks, fbv_string) {
const profCodeEntry = this._profile.findEntry(instructionStart);
if (!profCodeEntry) {
- console.warn('Didn\'t find code for FBV', {fbv, instructionStart});
+ console.warn('Didn\'t find code for FBV', {fbv_string, instructionStart});
return;
}
const fbv = new FeedbackVectorEntry(
@@ -439,13 +447,13 @@ export class Processor extends LogReader {
// that a callback calls itself. Instead we use tos_or_external_callback,
// as simply resetting PC will produce unaccounted ticks.
pc = tos_or_external_callback;
- tos_or_external_callback = 0;
+ tos_or_external_callback = this.kZero;
} else if (tos_or_external_callback) {
// Find out, if top of stack was pointing inside a JS function
// meaning that we have encountered a frameless invocation.
const funcEntry = this._profile.findEntry(tos_or_external_callback);
if (!funcEntry?.isJSFunction?.()) {
- tos_or_external_callback = 0;
+ tos_or_external_callback = this.kZero;
}
}
const entryStack = this._profile.recordTick(
diff --git a/deps/v8/tools/system-analyzer/view/helper.mjs b/deps/v8/tools/system-analyzer/view/helper.mjs
index 4389a92317..de5abbd3dc 100644
--- a/deps/v8/tools/system-analyzer/view/helper.mjs
+++ b/deps/v8/tools/system-analyzer/view/helper.mjs
@@ -335,8 +335,8 @@ export class Debouncer {
}
call(...args) {
- this.clear() this._timeoutId =
- window.setTimeout(this._callback, this._timeout, ...args)
+ this.clear();
+ this._timeoutId = window.setTimeout(this._callback, this._timeout, ...args);
}
clear() {
diff --git a/deps/v8/tools/system-analyzer/view/property-link-table.mjs b/deps/v8/tools/system-analyzer/view/property-link-table.mjs
index c5cc707c37..631b779ceb 100644
--- a/deps/v8/tools/system-analyzer/view/property-link-table.mjs
+++ b/deps/v8/tools/system-analyzer/view/property-link-table.mjs
@@ -121,8 +121,8 @@ DOM.defineCustomElement('view/property-link-table',
showSourcePositionButton.data = this._object;
showSourcePositionButton.title = 'Open the source position';
}
- let showRelatedButton = footer.appendChild(
- DOM.button('🕸 Related', this._showRelatedHandler));
+ let showRelatedButton =
+ footer.appendChild(DOM.button('🕸 Related', this._showRelatedHandler));
showRelatedButton.data = this._object;
showRelatedButton.title = 'Show all related events in all panels';
this._fragment.appendChild(footer);
diff --git a/deps/v8/tools/system-analyzer/view/script-panel.mjs b/deps/v8/tools/system-analyzer/view/script-panel.mjs
index 11f6e6d7b2..60a87aa142 100644
--- a/deps/v8/tools/system-analyzer/view/script-panel.mjs
+++ b/deps/v8/tools/system-analyzer/view/script-panel.mjs
@@ -229,15 +229,17 @@ class ToolTipTableBuilder {
}
addRow(name, subtypeName, entries) {
- const tr = DOM.tr();
- tr.appendChild(DOM.td(name));
- tr.appendChild(DOM.td(subtypeName));
- tr.appendChild(DOM.td(entries.length));
- const button = DOM.button('🔎', this._scriptPanel.showToolTipEntriesHandler);
- button.title = `Show all ${entries.length} ${name || subtypeName} entries.`
- button.data = entries;
- tr.appendChild(DOM.td(button));
- this.tableNode.appendChild(tr);
+ const tr = DOM.tr();
+ tr.appendChild(DOM.td(name));
+ tr.appendChild(DOM.td(subtypeName));
+ tr.appendChild(DOM.td(entries.length));
+ const button =
+ DOM.button('🔎', this._scriptPanel.showToolTipEntriesHandler);
+ button.title =
+ `Show all ${entries.length} ${name || subtypeName} entries.`
+ button.data = entries;
+ tr.appendChild(DOM.td(button));
+ this.tableNode.appendChild(tr);
}
}
@@ -245,33 +247,33 @@ class SourcePositionIterator {
_entries;
_index = 0;
constructor(sourcePositions) {
- this._entries = sourcePositions;
+ this._entries = sourcePositions;
}
* forLine(lineIndex) {
- this._findStart(lineIndex);
- while (!this._done() && this._current().line === lineIndex) {
- yield this._current();
- this._next();
- }
+ this._findStart(lineIndex);
+ while (!this._done() && this._current().line === lineIndex) {
+ yield this._current();
+ this._next();
+ }
}
_findStart(lineIndex) {
- while (!this._done() && this._current().line < lineIndex) {
- this._next();
- }
+ while (!this._done() && this._current().line < lineIndex) {
+ this._next();
+ }
}
_current() {
- return this._entries[this._index];
+ return this._entries[this._index];
}
_done() {
- return this._index >= this._entries.length;
+ return this._index >= this._entries.length;
}
_next() {
- this._index++;
+ this._index++;
}
}
@@ -279,11 +281,11 @@ function* lineIterator(source, startLine) {
let current = 0;
let line = startLine;
while (current < source.length) {
- const next = source.indexOf('\n', current);
- if (next === -1) break;
- yield [line, source.substring(current, next)];
- line++;
- current = next + 1;
+ const next = source.indexOf('\n', current);
+ if (next === -1) break;
+ yield [line, source.substring(current, next)];
+ line++;
+ current = next + 1;
}
if (current < source.length) yield [line, source.substring(current)];
}
@@ -298,7 +300,7 @@ class LineBuilder {
return map;
})();
static get colorMap() {
- return this._colorMap;
+ return this._colorMap;
}
_script;
@@ -307,75 +309,77 @@ class LineBuilder {
_sourcePositionToMarkers = new Map();
constructor(panel, script) {
- this._script = script;
- this._clickHandler = panel.handleSourcePositionClick.bind(panel);
- this._mouseoverHandler = panel.handleSourcePositionMouseOver.bind(panel);
+ this._script = script;
+ this._clickHandler = panel.handleSourcePositionClick.bind(panel);
+ this._mouseoverHandler = panel.handleSourcePositionMouseOver.bind(panel);
}
get sourcePositionToMarkers() {
- return this._sourcePositionToMarkers;
+ return this._sourcePositionToMarkers;
}
async createScriptNode(startLine) {
- const scriptNode = DOM.div('scriptNode');
-
- // TODO: sort on script finalization.
- this._script.sourcePositions.sort((a, b) => {
- if (a.line === b.line) return a.column - b.column;
- return a.line - b.line;
- });
-
- const sourcePositionsIterator =
- new SourcePositionIterator(this._script.sourcePositions);
- scriptNode.style.counterReset = `sourceLineCounter ${startLine - 1}`;
- for (let [lineIndex, line] of lineIterator(
- this._script.source, startLine)) {
- scriptNode.appendChild(
- this._createLineNode(sourcePositionsIterator, lineIndex, line));
- }
- if (this._script.sourcePositions.length !=
- this._sourcePositionToMarkers.size) {
- console.error('Not all SourcePositions were processed.');
- }
- return scriptNode;
+ const scriptNode = DOM.div('scriptNode');
+
+ // TODO: sort on script finalization.
+ this._script.sourcePositions.sort((a, b) => {
+ if (a.line === b.line) return a.column - b.column;
+ return a.line - b.line;
+ });
+
+ const sourcePositionsIterator =
+ new SourcePositionIterator(this._script.sourcePositions);
+ scriptNode.style.counterReset = `sourceLineCounter ${startLine - 1}`;
+ for (let [lineIndex, line] of lineIterator(
+ this._script.source, startLine)) {
+ scriptNode.appendChild(
+ this._createLineNode(sourcePositionsIterator, lineIndex, line));
+ }
+ if (this._script.sourcePositions.length !=
+ this._sourcePositionToMarkers.size) {
+ console.error('Not all SourcePositions were processed.');
+ }
+ return scriptNode;
}
_createLineNode(sourcePositionsIterator, lineIndex, line) {
- const lineNode = DOM.span();
- let columnIndex = 0;
- for (const sourcePosition of sourcePositionsIterator.forLine(lineIndex)) {
- const nextColumnIndex = sourcePosition.column - 1;
- lineNode.appendChild(document.createTextNode(
- line.substring(columnIndex, nextColumnIndex)));
- columnIndex = nextColumnIndex;
-
+ const lineNode = DOM.span();
+ let columnIndex = 0;
+ for (const sourcePosition of sourcePositionsIterator.forLine(lineIndex)) {
+ const nextColumnIndex = sourcePosition.column - 1;
+ lineNode.appendChild(document.createTextNode(
+ line.substring(columnIndex, nextColumnIndex)));
+ columnIndex = nextColumnIndex;
+
+ lineNode.appendChild(
+ this._createMarkerNode(line[columnIndex], sourcePosition));
+ columnIndex++;
+ }
lineNode.appendChild(
- this._createMarkerNode(line[columnIndex], sourcePosition));
- columnIndex++;
- }
- lineNode.appendChild(
- document.createTextNode(line.substring(columnIndex) + '\n'));
- return lineNode;
+ document.createTextNode(line.substring(columnIndex) + '\n'));
+ return lineNode;
}
_createMarkerNode(text, sourcePosition) {
- const marker = document.createElement('mark');
- this._sourcePositionToMarkers.set(sourcePosition, marker);
- marker.textContent = text;
- marker.sourcePosition = sourcePosition;
- marker.onclick = this._clickHandler;
- marker.onmouseover = this._mouseoverHandler;
-
- const entries = sourcePosition.entries;
- const groups = groupBy(entries, entry => entry.constructor);
- if (groups.length > 1) {
- const stops = gradientStopsFromGroups(
- entries.length, '%', groups, type => LineBuilder.colorMap.get(type));
- marker.style.backgroundImage = `linear-gradient(0deg,${stops.join(',')})`
- } else {
- marker.style.backgroundColor = LineBuilder.colorMap.get(groups[0].key)
- }
+ const marker = document.createElement('mark');
+ this._sourcePositionToMarkers.set(sourcePosition, marker);
+ marker.textContent = text;
+ marker.sourcePosition = sourcePosition;
+ marker.onclick = this._clickHandler;
+ marker.onmouseover = this._mouseoverHandler;
+
+ const entries = sourcePosition.entries;
+ const groups = groupBy(entries, entry => entry.constructor);
+ if (groups.length > 1) {
+ const stops = gradientStopsFromGroups(
+ entries.length, '%', groups,
+ type => LineBuilder.colorMap.get(type));
+ marker.style.backgroundImage =
+ `linear-gradient(0deg,${stops.join(',')})`
+ } else {
+ marker.style.backgroundColor = LineBuilder.colorMap.get(groups[0].key)
+ }
- return marker;
+ return marker;
}
}
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 826ee321fc..28fc668404 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -90,16 +90,8 @@ TEST_MAP = {
ModeConfig = namedtuple(
'ModeConfig', 'label flags timeout_scalefactor status_mode')
-DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort"]
-DEBUG_MODE = ModeConfig(
- label='debug',
- flags=DEBUG_FLAGS,
- timeout_scalefactor=4,
- status_mode="debug",
-)
-
RELEASE_MODE = ModeConfig(
label='release',
flags=RELEASE_FLAGS,
@@ -390,9 +382,22 @@ class BaseTestRunner(object):
print(">>> Latest GN build found: %s" % latest_config)
return os.path.join(DEFAULT_OUT_GN, latest_config)
+ def _custom_debug_mode(self):
+ custom_debug_flags = ["--nohard-abort"]
+ if self.build_config.verify_heap:
+ custom_debug_flags += ["--verify-heap"]
+ if self.build_config.slow_dchecks:
+ custom_debug_flags += ["--enable-slow-asserts"]
+ return ModeConfig(
+ label='debug',
+ flags=custom_debug_flags,
+ timeout_scalefactor=4,
+ status_mode="debug",
+ )
+
def _process_default_options(self):
if self.build_config.is_debug:
- self.mode_options = DEBUG_MODE
+ self.mode_options = self._custom_debug_mode()
elif self.build_config.dcheck_always_on:
self.mode_options = TRY_RELEASE_MODE
else:
@@ -523,8 +528,7 @@ class BaseTestRunner(object):
if self.options.verbose:
print('>>> Loading test suite: %s' % name)
suite = testsuite.TestSuite.Load(
- ctx, os.path.join(self.options.test_root, name), test_config,
- self.framework_name)
+ ctx, os.path.join(self.options.test_root, name), test_config)
if self._is_testsuite_supported(suite):
tests = suite.load_tests_from_disk(variables)
@@ -547,6 +551,8 @@ class BaseTestRunner(object):
sys.byteorder,
"cfi_vptr":
self.build_config.cfi_vptr,
+ "code_comments":
+ self.build_config.code_comments,
"component_build":
self.build_config.component_build,
"conservative_stack_scanning":
@@ -559,26 +565,45 @@ class BaseTestRunner(object):
self.build_config.single_generation,
"dcheck_always_on":
self.build_config.dcheck_always_on,
+ "debug_code":
+ self.build_config.debug_code,
"deopt_fuzzer":
False,
+ "disassembler":
+ self.build_config.disassembler,
"endurance_fuzzer":
False,
"gc_fuzzer":
False,
"gc_stress":
False,
- "gcov_coverage":
- self.build_config.gcov_coverage,
+ "gdbjit":
+ self.build_config.gdbjit,
+ # TODO(jgruber): Note this rename from maglev to has_maglev is required
+ # to avoid a name clash with the "maglev" variant. See also the TODO in
+ # statusfile.py (this really shouldn't be needed).
+ "has_maglev":
+ self.build_config.maglev,
+ "has_turbofan":
+ self.build_config.turbofan,
"has_webassembly":
self.build_config.webassembly,
"isolates":
self.options.isolates,
"is_clang":
self.build_config.is_clang,
+ "is_clang_coverage":
+ self.build_config.is_clang_coverage,
+ "is_debug":
+ self.build_config.is_debug,
+ "is_DEBUG_defined":
+ self.build_config.is_DEBUG_defined,
"is_full_debug":
self.build_config.is_full_debug,
"interrupt_fuzzer":
False,
+ "jitless_build_mode":
+ self.build_config.jitless_build_mode,
"mips_arch_variant":
self.build_config.mips_arch_variant,
"mode":
@@ -602,6 +627,8 @@ class BaseTestRunner(object):
"simulator_run":
self.build_config.simulator_run
and not self.options.dont_skip_simulator_slow_tests,
+ "slow_dchecks":
+ self.build_config.slow_dchecks,
"system":
self.target_os,
"third_party_heap":
@@ -612,6 +639,8 @@ class BaseTestRunner(object):
self.build_config.ubsan_vptr,
"verify_csa":
self.build_config.verify_csa,
+ "verify_heap":
+ self.build_config.verify_heap,
"lite_mode":
self.build_config.lite_mode,
"pointer_compression":
@@ -631,17 +660,21 @@ class BaseTestRunner(object):
return [] # pragma: no cover
def _create_test_config(self):
+ shard_id, shard_count = self.options.shard_info
timeout = self.build_config.timeout_scalefactor(
self.options.timeout * self.mode_options.timeout_scalefactor)
return TestConfig(
command_prefix=self.options.command_prefix,
extra_flags=self.options.extra_flags,
+ framework_name=self.framework_name,
isolates=self.options.isolates,
mode_flags=self.mode_options.flags + self._runner_flags(),
no_harness=self.options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=self.options.random_seed,
run_skipped=self.options.run_skipped,
+ shard_count=shard_count,
+ shard_id=shard_id,
shell_dir=self.outdir,
timeout=timeout,
verbose=self.options.verbose,
diff --git a/deps/v8/tools/testrunner/build_config.py b/deps/v8/tools/testrunner/build_config.py
index 6c340672b7..5868298a1c 100644
--- a/deps/v8/tools/testrunner/build_config.py
+++ b/deps/v8/tools/testrunner/build_config.py
@@ -23,37 +23,50 @@ class BuildConfig(object):
self.asan = build_config['is_asan']
self.cfi_vptr = build_config['is_cfi']
+ self.code_comments = build_config['v8_code_comments']
self.component_build = build_config['is_component_build']
+ self.concurrent_marking = build_config['v8_enable_concurrent_marking']
self.conservative_stack_scanning = build_config[
'v8_enable_conservative_stack_scanning']
self.control_flow_integrity = build_config['v8_control_flow_integrity']
- self.concurrent_marking = build_config['v8_enable_concurrent_marking']
- self.single_generation = build_config['v8_enable_single_generation']
self.dcheck_always_on = build_config['dcheck_always_on']
- self.gcov_coverage = build_config['is_gcov_coverage']
+ self.debug_code = build_config['v8_enable_debug_code']
+ self.dict_property_const_tracking = build_config[
+ 'v8_dict_property_const_tracking']
+ self.disassembler = build_config['v8_enable_disassembler']
+ self.gdbjit = build_config['v8_enable_gdbjit']
self.is_android = build_config['is_android']
self.is_clang = build_config['is_clang']
+ self.is_clang_coverage = build_config['is_clang_coverage']
self.is_debug = build_config['is_debug']
+ self.is_DEBUG_defined = build_config['is_DEBUG_defined']
self.is_full_debug = build_config['is_full_debug']
+ self.lite_mode = build_config['v8_enable_lite_mode']
+ self.maglev = build_config['v8_enable_maglev']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
+ self.pointer_compression = build_config['v8_enable_pointer_compression']
+ self.pointer_compression_shared_cage = build_config[
+ 'v8_enable_pointer_compression_shared_cage']
self.predictable = build_config['v8_enable_verify_predictable']
+ self.sandbox = build_config['v8_enable_sandbox']
+ self.shared_ro_heap = build_config['v8_enable_shared_ro_heap']
self.simulator_run = (
build_config['target_cpu'] != build_config['v8_target_cpu'])
+ self.single_generation = build_config['v8_enable_single_generation']
+ self.slow_dchecks = build_config['v8_enable_slow_dchecks']
+ self.third_party_heap = build_config['v8_enable_third_party_heap']
self.tsan = build_config['is_tsan']
+ self.turbofan = build_config['v8_enable_turbofan']
# TODO(machenbach): We only have ubsan not ubsan_vptr.
self.ubsan_vptr = build_config['is_ubsan_vptr']
self.verify_csa = build_config['v8_enable_verify_csa']
- self.lite_mode = build_config['v8_enable_lite_mode']
- self.pointer_compression = build_config['v8_enable_pointer_compression']
- self.pointer_compression_shared_cage = build_config[
- 'v8_enable_pointer_compression_shared_cage']
- self.shared_ro_heap = build_config['v8_enable_shared_ro_heap']
- self.sandbox = build_config['v8_enable_sandbox']
- self.third_party_heap = build_config['v8_enable_third_party_heap']
+ self.verify_heap = build_config['v8_enable_verify_heap']
self.webassembly = build_config['v8_enable_webassembly']
- self.dict_property_const_tracking = build_config[
- 'v8_dict_property_const_tracking']
+ self.write_barriers = not build_config['v8_disable_write_barriers']
+ # TODO(jgruber): Don't rename once it's no longer necessary to avoid
+ # conflicts with test variant names.
+ self.jitless_build_mode = build_config['v8_jitless']
# Export only for MIPS target
if self.arch in ['mips64', 'mips64el']:
self._mips_arch_variant = build_config['mips_arch_variant']
@@ -67,7 +80,8 @@ class BuildConfig(object):
@property
def no_js_shared_memory(self):
return (not self.shared_ro_heap) or (
- self.pointer_compression and not self.pointer_compression_shared_cage)
+ self.pointer_compression and
+ not self.pointer_compression_shared_cage) or (not self.write_barriers)
@property
def is_mips_arch(self):
@@ -134,22 +148,32 @@ class BuildConfig(object):
attrs = [
'asan',
'cfi_vptr',
+ 'code_comments',
'control_flow_integrity',
'dcheck_always_on',
- 'gcov_coverage',
+ 'debug_code',
+ 'dict_property_const_tracking',
+ 'disassembler',
+ 'gdbjit',
+ 'is_debug',
+ 'is_DEBUG_defined',
+ 'jitless_build_mode',
+ 'lite_mode',
+ 'maglev',
'msan',
'no_i18n',
- 'predictable',
- 'tsan',
- 'ubsan_vptr',
- 'verify_csa',
- 'lite_mode',
'pointer_compression',
'pointer_compression_shared_cage',
+ 'predictable',
'sandbox',
+ 'slow_dchecks',
'third_party_heap',
+ 'tsan',
+ 'turbofan',
+ 'ubsan_vptr',
+ 'verify_csa',
+ 'verify_heap',
'webassembly',
- 'dict_property_const_tracking',
]
detected_options = [attr for attr in attrs if getattr(self, attr, False)]
- return '\n'.join(detected_options)
+ return ', '.join(detected_options)
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index e0ef281b4c..af0981e83d 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -71,7 +71,7 @@ def handle_sigterm(process, abort_fun, enabled):
class BaseCommand(object):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
- verbose=False, resources_func=None, handle_sigterm=False):
+ verbose=False, test_case=None, handle_sigterm=False):
"""Initialize the command.
Args:
@@ -81,7 +81,7 @@ class BaseCommand(object):
timeout: Timeout in seconds.
env: Environment dict for execution.
verbose: Print additional output.
- resources_func: Callable, returning all test files needed by this command.
+ test_case: Test case reference.
handle_sigterm: Flag indicating if SIGTERM will be used to terminate the
underlying process. Should not be used from the main thread, e.g. when
using a command to list tests.
@@ -268,29 +268,21 @@ class AndroidCommand(BaseCommand):
driver = None
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
- verbose=False, resources_func=None, handle_sigterm=False):
+ verbose=False, test_case=None, handle_sigterm=False):
"""Initialize the command and all files that need to be pushed to the
Android device.
"""
- self.shell_name = os.path.basename(shell)
- self.shell_dir = os.path.dirname(shell)
- self.files_to_push = (resources_func or (lambda: []))()
-
- # Make all paths in arguments relative and also prepare files from arguments
- # for pushing to the device.
- rel_args = []
- find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
- for arg in (args or []):
- match = find_path_re.match(arg)
- if match:
- self.files_to_push.append(match.group(1))
- rel_args.append(
- re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
-
super(AndroidCommand, self).__init__(
- shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
+ shell, args=args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
verbose=verbose, handle_sigterm=handle_sigterm)
+ rel_args, files_from_args = args_with_relative_paths(args)
+
+ self.args = rel_args
+
+ test_case_resources = test_case.get_android_resources() if test_case else []
+ self.files_to_push = test_case_resources + files_from_args
+
def execute(self, **additional_popen_kwargs):
"""Execute the command on the device.
@@ -299,20 +291,18 @@ class AndroidCommand(BaseCommand):
if self.verbose:
print('# %s' % self)
- self.driver.push_executable(self.shell_dir, 'bin', self.shell_name)
+ shell_name = os.path.basename(self.shell)
+ shell_dir = os.path.dirname(self.shell)
- for abs_file in self.files_to_push:
- abs_dir = os.path.dirname(abs_file)
- file_name = os.path.basename(abs_file)
- rel_dir = os.path.relpath(abs_dir, BASE_DIR)
- self.driver.push_file(abs_dir, file_name, rel_dir)
+ self.driver.push_executable(shell_dir, 'bin', shell_name)
+ self.push_test_resources()
start_time = time.time()
return_code = 0
timed_out = False
try:
stdout = self.driver.run(
- 'bin', self.shell_name, self.args, '.', self.timeout, self.env)
+ 'bin', shell_name, self.args, '.', self.timeout, self.env)
except CommandFailedException as e:
return_code = e.status
stdout = e.output
@@ -332,6 +322,27 @@ class AndroidCommand(BaseCommand):
duration,
)
+ def push_test_resources(self):
+ for abs_file in self.files_to_push:
+ abs_dir = os.path.dirname(abs_file)
+ file_name = os.path.basename(abs_file)
+ rel_dir = os.path.relpath(abs_dir, BASE_DIR)
+ self.driver.push_file(abs_dir, file_name, rel_dir)
+
+
+def args_with_relative_paths(args):
+ rel_args = []
+ files_to_push = []
+ find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
+ for arg in (args or []):
+ match = find_path_re.match(arg)
+ if match:
+ files_to_push.append(match.group(1))
+ rel_args.append(
+ re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
+ return rel_args, files_to_push
+
+
Command = None
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 5f9766e85c..04485936b6 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -63,10 +63,12 @@ for var in [
"windows", "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv32",
"riscv64", "loong64"
]:
+ assert var not in VARIABLES
VARIABLES[var] = var
# Allow using variants as keywords.
for var in ALL_VARIANTS:
+ assert var not in VARIABLES
VARIABLES[var] = var
class StatusFile(object):
@@ -244,7 +246,16 @@ def ReadStatusFile(content, variables):
prefix_rules = {variant: {} for variant in ALL_VARIANTS}
prefix_rules[""] = {}
- variables.update(VARIABLES)
+ # This method can be called with the same `variables` object multiple times.
+ # Ensure we only update `variables` (and check it for consistency) once.
+ if ALWAYS not in variables:
+ # Ensure we don't silently overwrite any build variables with our set of
+ # default keywords in VARIABLES.
+ for var in VARIABLES:
+ assert var not in variables, (
+ "build_config variable '%s' conflicts with VARIABLES" % var)
+ variables.update(VARIABLES)
+
for conditional_section in ReadContent(content):
assert type(conditional_section) == list
assert len(conditional_section) == 2
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 8130841e40..60d52e8d30 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -115,8 +115,7 @@ class TestLoader(object):
def _create_test(self, path, suite, **kwargs):
"""Converts paths into test objects using the given options"""
- return self.test_class(
- suite, path, self._path_to_name(path), self.test_config, **kwargs)
+ return self.test_class(suite, path, self._path_to_name(path), **kwargs)
def list_tests(self):
"""Loads and returns the test objects for a TestSuite"""
@@ -247,25 +246,31 @@ def _load_testsuite_module(name, root):
class TestSuite(object):
@staticmethod
- def Load(ctx, root, test_config, framework_name):
+ def Load(ctx, root, test_config):
name = root.split(os.path.sep)[-1]
with _load_testsuite_module(name, root) as module:
- return module.TestSuite(ctx, name, root, test_config, framework_name)
+ return module.TestSuite(ctx, name, root, test_config)
- def __init__(self, ctx, name, root, test_config, framework_name):
+ def __init__(self, ctx, name, root, test_config):
self.name = name # string
self.root = root # string containing path
self.test_config = test_config
- self.framework_name = framework_name # name of the test runner impl
self.tests = None # list of TestCase objects
self.statusfile = None
self._test_loader = self._test_loader_class()(ctx, self, self._test_class(),
self.test_config, self.root)
+ @property
+ def framework_name(self):
+ return self.test_config.framework_name
+
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
+ def statusfile_outcomes(self, test_name, variant):
+ return self.statusfile.get_outcomes(test_name, variant)
+
@property
def _test_loader_class(self):
raise NotImplementedError
diff --git a/deps/v8/tools/testrunner/local/testsuite_test.py b/deps/v8/tools/testrunner/local/testsuite_test.py
index fa7374218b..e2c34a55ee 100755
--- a/deps/v8/tools/testrunner/local/testsuite_test.py
+++ b/deps/v8/tools/testrunner/local/testsuite_test.py
@@ -26,20 +26,22 @@ class TestSuiteTest(unittest.TestCase):
self.test_config = TestConfig(
command_prefix=[],
extra_flags=[],
+ framework_name='standard_runner',
isolates=False,
mode_flags=[],
no_harness=False,
noi18n=False,
random_seed=0,
run_skipped=False,
+ shard_count=1,
+ shard_id=0,
shell_dir='fake_testsuite/fake_d8',
timeout=10,
verbose=False,
)
self.suite = TestSuite.Load(
- DefaultOSContext(PosixCommand), self.test_root, self.test_config,
- "standard_runner")
+ DefaultOSContext(PosixCommand), self.test_root, self.test_config)
def testLoadingTestSuites(self):
self.assertEqual(self.suite.name, "fake_testsuite")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index f44e445eca..e5344a4880 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -16,7 +16,11 @@ ALL_VARIANT_FLAGS = {
"jitless": [["--jitless"]],
"sparkplug": [["--sparkplug"]],
"maglev": [["--maglev"]],
- "stress_maglev": [["--maglev", "--stress-maglev"]],
+ "maglev_future": [["--maglev", "--maglev-future"]],
+ "stress_maglev": [[
+ "--maglev", "--stress-maglev",
+ "--optimize-on-next-call-optimizes-to-maglev"
+ ]],
"turboshaft": [["--turboshaft"]],
"concurrent_sparkplug": [["--concurrent-sparkplug", "--sparkplug"]],
"always_sparkplug": [["--always-sparkplug", "--sparkplug"]],
@@ -27,8 +31,14 @@ ALL_VARIANT_FLAGS = {
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
# For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
# "TurboFan-only" in the stress variant. The WebAssembly configuration is
- # independent of JS optimizations, so we can combine those configs.
- "nooptimization": [["--no-turbofan", "--liftoff", "--no-wasm-tier-up"]],
+ # independent of JS optimizations, so we can combine those configs. We
+ # disable lazy compilation to have one test variant that tests eager
+ # compilation. "Liftoff-only" and eager compilation is not a problem,
+ # because test functions do typically not get optimized to TurboFan anyways.
+ "nooptimization": [[
+ "--no-turbofan", "--liftoff", "--no-wasm-tier-up",
+ "--no-wasm-lazy-compilation"
+ ]],
"slow_path": [["--force-slow-path"]],
"stress": [[
"--no-liftoff", "--stress-lazy-source-positions",
@@ -47,27 +57,40 @@ ALL_VARIANT_FLAGS = {
"instruction_scheduling": [["--turbo-instruction-scheduling"]],
"stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"]
],
- "wasm_write_protect_code": [["--wasm-write-protect-code-memory"]],
# Google3 variants.
"google3_icu": [[]],
"google3_noicu": [[]],
}
+# Note these are specifically for the case when Turbofan is either fully
+# disabled (i.e. not part of the binary), or when all codegen is disallowed (in
+# jitless mode).
+kIncompatibleFlagsForNoTurbofan = [
+ "--turbofan", "--always-turbofan", "--liftoff", "--validate-asm",
+ "--maglev", "--stress-concurrent-inlining"
+]
+
# Flags that lead to a contradiction with the flags provided by the respective
# variant. This depends on the flags specified in ALL_VARIANT_FLAGS and on the
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_VARIANT = {
- "jitless": [
- "--turbofan", "--always-turbofan", "--liftoff", "--track-field-types",
- "--validate-asm", "--sparkplug", "--concurrent-sparkplug", "--maglev",
- "--always-sparkplug", "--regexp-tier-up", "--no-regexp-interpret-all"
+ "jitless":
+ kIncompatibleFlagsForNoTurbofan + [
+ "--track-field-types", "--sparkplug", "--concurrent-sparkplug",
+ "--always-sparkplug", "--regexp-tier-up",
+ "--no-regexp-interpret-all", "--interpreted-frames-native-stack"
+ ],
+ "nooptimization": [
+ "--turbofan", "--always-turbofan", "--stress-concurrent-inlining"
],
- "nooptimization": ["--always-turbofan"],
"slow_path": ["--no-force-slow-path"],
- "stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
+ "stress_concurrent_allocation": [
+ "--single-threaded", "--single-threaded-gc", "--predictable"
+ ],
"stress_concurrent_inlining": [
"--single-threaded", "--predictable", "--lazy-feedback-allocation",
- "--assert-types", "--no-concurrent-recompilation"
+ "--assert-types", "--no-concurrent-recompilation", "--no-turbofan",
+ "--jitless"
],
# The fast API tests initialize an embedder object that never needs to be
# serialized to the snapshot, so we don't have a
@@ -81,15 +104,13 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"sparkplug": ["--jitless", "--no-sparkplug"],
"concurrent_sparkplug": ["--jitless"],
"maglev": ["--jitless", "--no-maglev"],
+ "maglev_future": ["--jitless", "--no-maglev", "--no-maglev-future"],
"stress_maglev": ["--jitless"],
"always_sparkplug": ["--jitless", "--no-sparkplug"],
"code_serializer": [
"--cache=after-execute", "--cache=full-code-cache", "--cache=none"
],
"experimental_regexp": ["--no-enable-experimental-regexp-engine"],
- # There is a negative implication: --perf-prof disables
- # --wasm-write-protect-code-memory.
- "wasm_write_protect_code": ["--perf-prof"],
"assert_types": [
"--concurrent-recompilation", "--stress_concurrent_inlining",
"--no-assert-types"
@@ -101,17 +122,65 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
# in _get_statusfile_variables in base_runner.py.
# The conflicts might be directly contradictory flags or be caused by the
# implications defined in flag-definitions.h.
+# The keys of the following map support negation through '!', e.g. rule
+#
+# "!code_comments": [...]
+#
+# applies when the code_comments build variable is NOT set.
INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
- "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*",
- "--stress-concurrent-inlining"]
- + INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"],
- "predictable": ["--parallel-compile-tasks-for-eager-toplevel",
- "--parallel-compile-tasks-for-lazy",
- "--concurrent-recompilation",
- "--stress-concurrent-allocation",
- "--stress-concurrent-inlining"],
- "dict_property_const_tracking": [
- "--stress-concurrent-inlining"],
+ "!code_comments": ["--code-comments"],
+ "!is_DEBUG_defined": [
+ "--check_handle_count",
+ "--code_stats",
+ "--dump_wasm_module",
+ "--enable_testing_opcode_in_wasm",
+ "--gc_verbose",
+ "--print_ast",
+ "--print_break_location",
+ "--print_global_handles",
+ "--print_handles",
+ "--print_scopes",
+ "--regexp_possessive_quantifier",
+ "--trace_backing_store",
+ "--trace_contexts",
+ "--trace_isolates",
+ "--trace_lazy",
+ "--trace_liftoff",
+ "--trace_module_status",
+ "--trace_normalization",
+ "--trace_turbo_escape",
+ "--trace_wasm_compiler",
+ "--trace_wasm_decoder",
+ "--trace_wasm_instances",
+ "--trace_wasm_interpreter",
+ "--trace_wasm_lazy_compilation",
+ "--trace_wasm_native_heap",
+ "--trace_wasm_serialization",
+ "--trace_wasm_stack_switching",
+ "--trace_wasm_streaming",
+ "--trap_on_abort",
+ ],
+ "!verify_heap": ["--verify-heap"],
+ "!debug_code": ["--debug-code"],
+ "!disassembler": [
+ "--print_all_code", "--print_code", "--print_opt_code",
+ "--print_code_verbose", "--print_builtin_code", "--print_regexp_code"
+ ],
+ "!slow_dchecks": ["--enable-slow-asserts"],
+ "!gdbjit": ["--gdbjit", "--gdbjit_full", "--gdbjit_dump"],
+ "!has_maglev": ["--maglev"],
+ "!has_turbofan":
+ kIncompatibleFlagsForNoTurbofan,
+ "jitless_build_mode":
+ INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"],
+ "lite_mode": ["--max-semi-space-size=*"] +
+ INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"],
+ "predictable": [
+ "--parallel-compile-tasks-for-eager-toplevel",
+ "--parallel-compile-tasks-for-lazy", "--concurrent-recompilation",
+ "--stress-concurrent-allocation", "--stress-concurrent-inlining"
+ ],
+ "dict_property_const_tracking": ["--stress-concurrent-inlining"],
}
# Flags that lead to a contradiction when a certain extra-flag is present.
@@ -120,15 +189,21 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
# The conflicts might be directly contradictory flags or be caused by the
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
- "--concurrent-recompilation": ["--predictable", "--assert-types"],
- "--parallel-compile-tasks-for-eager-toplevel": ["--predictable"],
- "--parallel-compile-tasks-for-lazy": ["--predictable"],
- "--gc-interval=*": ["--gc-interval=*"],
- "--optimize-for-size": ["--max-semi-space-size=*"],
- "--stress_concurrent_allocation":
+ "--concurrent-recompilation": [
+ "--predictable", "--assert-types", "--turboshaft-assert-types",
+ "--single-threaded"
+ ],
+ "--parallel-compile-tasks-for-eager-toplevel": ["--predictable"],
+ "--parallel-compile-tasks-for-lazy": ["--predictable"],
+ "--gc-interval=*": ["--gc-interval=*"],
+ "--optimize-for-size": ["--max-semi-space-size=*"],
+ "--stress_concurrent_allocation":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_allocation"],
- "--stress-concurrent-inlining":
+ "--stress-concurrent-inlining":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_inlining"],
+ "--turboshaft-assert-types": [
+ "--concurrent-recompilation", "--stress-concurrent-inlining"
+ ],
}
SLOW_VARIANTS = set([
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index 68f85c9466..be9c78c4a2 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -155,8 +155,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
results = ResultsTracker.create(self.options)
execproc = ExecutionProc(ctx, self.options.j)
sigproc = self._create_signal_proc()
- progress = ProgressProc(ctx, self.options, self.framework_name,
- tests.test_count_estimate)
+ progress = ProgressProc(ctx, self.options, tests.test_count_estimate)
procs = [
loader,
NameFilterProc(args) if args else None,
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 5878091379..dddb1019c2 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -78,23 +78,23 @@ def read_file(file):
return f.read()
class TestCase(object):
- def __init__(self, suite, path, name, test_config):
+
+ def __init__(self, suite, path, name):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.name = name # string that identifies test in the status file
+ self.subtest_id = None # string that identifies subtests
self.variant = None # name of the used testing variant
self.variant_flags = [] # list of strings, flags specific to this test
# Fields used by the test processors.
self.origin = None # Test that this test is subtest of.
- self.processor = None # Processor that created this subtest.
+ # Processor that created this subtest, initialised to a default value
+ self.processor = DuckProcessor()
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
-
- # Test config contains information needed to build the command.
- self._test_config = test_config
self._random_seed = None # Overrides test config value if not None
# Outcomes
@@ -111,7 +111,8 @@ class TestCase(object):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
- subtest.procid += '.%s' % subtest_id
+ subtest.subtest_id = subtest_id
+ subtest.procid += f'.{subtest.processor_name}-{subtest_id}'
subtest.keep_output |= keep_output
if random_seed:
subtest._random_seed = random_seed
@@ -130,7 +131,7 @@ class TestCase(object):
def not_flag(outcome):
return not is_flag(outcome)
- outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
+ outcomes = self.suite.statusfile_outcomes(self.name, self.variant)
self._statusfile_outcomes = list(filter(not_flag, outcomes))
self._statusfile_flags = list(filter(is_flag, outcomes))
self._expected_outcomes = (
@@ -245,10 +246,17 @@ class TestCase(object):
# Contradiction: flags specified through the "Flags:" annotation are
# incompatible with the build.
for variable, incompatible_flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE.items():
- if self.suite.statusfile.variables[variable]:
- check_flags(
- incompatible_flags, file_specific_flags,
- "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\"" + variable + "\"]")
+ if variable.startswith("!"):
+ # `variable` is negated, apply the rule if the build variable is NOT set.
+ if not self.suite.statusfile.variables[variable[1:]]:
+ check_flags(
+ incompatible_flags, file_specific_flags,
+ "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\"" + variable + "\"]")
+ else:
+ if self.suite.statusfile.variables[variable]:
+ check_flags(
+ incompatible_flags, file_specific_flags,
+ "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\"" + variable + "\"]")
# Contradiction: flags passed through --extra-flags are incompatible.
for extra_flag, incompatible_flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG.items():
@@ -259,9 +267,25 @@ class TestCase(object):
return self._expected_outcomes
@property
+ def test_config(self):
+ return self.suite.test_config
+
+ @property
+ def framework_name(self):
+ return self.test_config.framework_name
+
+ @property
+ def shard_id(self):
+ return self.test_config.shard_id
+
+ @property
+ def shard_count(self):
+ return self.test_config.shard_count
+
+ @property
def do_skip(self):
return (statusfile.SKIP in self._statusfile_outcomes and
- not self.suite.test_config.run_skipped)
+ not self.test_config.run_skipped)
@property
def is_heavy(self):
@@ -344,10 +368,10 @@ class TestCase(object):
@property
def random_seed(self):
- return self._random_seed or self._test_config.random_seed
+ return self._random_seed or self.test_config.random_seed
def _get_extra_flags(self):
- return self._test_config.extra_flags
+ return self.test_config.extra_flags
def _get_variant_flags(self):
return self.variant_flags
@@ -360,7 +384,7 @@ class TestCase(object):
return self._statusfile_flags
def _get_mode_flags(self):
- return self._test_config.mode_flags
+ return self.test_config.mode_flags
def _get_source_flags(self):
return []
@@ -372,7 +396,7 @@ class TestCase(object):
return []
def _get_timeout(self, params):
- timeout = self._test_config.timeout
+ timeout = self.test_config.timeout
if "--jitless" in params:
timeout *= 2
if "--no-turbofan" in params:
@@ -393,13 +417,13 @@ class TestCase(object):
def _create_cmd(self, ctx, shell, params, env, timeout):
return ctx.command(
- cmd_prefix=self._test_config.command_prefix,
- shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
+ cmd_prefix=self.test_config.command_prefix,
+ shell=os.path.abspath(os.path.join(self.test_config.shell_dir, shell)),
args=params,
env=env,
timeout=timeout,
- verbose=self._test_config.verbose,
- resources_func=self._get_resources,
+ verbose=self.test_config.verbose,
+ test_case=self,
handle_sigterm=True,
)
@@ -419,14 +443,6 @@ class TestCase(object):
def _get_source_path(self):
return None
- def _get_resources(self):
- """Returns a list of absolute paths with additional files needed by the
- test case.
-
- Used to push additional files to Android devices.
- """
- return []
-
def skip_predictable(self):
"""Returns True if the test case is not suitable for predictable testing."""
return True
@@ -454,13 +470,22 @@ class TestCase(object):
def __str__(self):
return self.full_name
+ def test_suffixes(self):
+ suffixes = self.origin.test_suffixes() if self.origin else []
+ current_suffix = self.processor.test_suffix(self)
+ if current_suffix:
+ suffixes.append(str(current_suffix))
+ return suffixes
-class D8TestCase(TestCase):
- def get_shell(self):
- return "d8"
+ @property
+ def rdb_test_id(self):
+ suffixes = '/'.join(self.test_suffixes())
+ full_suffix = ('//' + suffixes) if suffixes else ''
+ return self.full_name + full_suffix
- def _get_shell_flags(self):
- return ['--test']
+ @property
+ def processor_name(self):
+ return self.processor.name
def _get_resources_for_file(self, file):
"""Returns for a given file a list of absolute paths of files needed by the
@@ -494,8 +519,12 @@ class D8TestCase(TestCase):
add_import_path(match.group(1))
return result
- def _get_resources(self):
- """Returns the list of files needed by a test case."""
+ def get_android_resources(self):
+ """Returns a list of absolute paths with additional files needed by the
+ test case.
+
+ Used to push additional files to Android devices.
+ """
if not self._get_source_path():
return []
result = set()
@@ -511,6 +540,25 @@ class D8TestCase(TestCase):
to_check.append(resource)
return sorted(list(result))
+
+class DuckProcessor:
+ """Dummy default processor for original tests implemented by duck-typing."""
+
+ def test_suffix(self, test):
+ return None
+
+ @property
+ def name(self):
+ return None
+
+
+class D8TestCase(TestCase):
+ def get_shell(self):
+ return "d8"
+
+ def _get_shell_flags(self):
+ return ['--test']
+
def skip_predictable(self):
"""Returns True if the test case is not suitable for predictable testing."""
return (statusfile.FAIL in self.expected_outcomes or
diff --git a/deps/v8/tools/testrunner/objects/testcase_test.py b/deps/v8/tools/testrunner/objects/testcase_test.py
new file mode 100644
index 0000000000..b1031c6ea4
--- /dev/null
+++ b/deps/v8/tools/testrunner/objects/testcase_test.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.objects.testcase import TestCase
+
+
+class TestCaseTest(unittest.TestCase):
+
+ def testSubtestsProperties(self):
+ test = TestCase(
+ suite=FakeSuite(),
+ path='far/away',
+ name='parent')
+ self.assertEqual(test.rdb_test_id, 'fakeSuite/parent')
+ # provide by DuckProcessor
+ self.assertEqual(test.processor.name, None)
+ self.assertEqual(test.procid, 'fakeSuite/parent')
+ self.assertEqual(test.keep_output, False)
+
+ subtest = test.create_subtest(FakeProcessor(), 0, keep_output=True)
+ self.assertEqual(subtest.rdb_test_id, 'fakeSuite/parent//fakep/0')
+ # provide by FakeProcessor
+ self.assertEqual(subtest.processor.name, 'fake_processor1')
+ self.assertEqual(subtest.procid, 'fakeSuite/parent.fake_processor1-0')
+ self.assertEqual(subtest.keep_output, True)
+
+ subsubtest = subtest.create_subtest(FakeProcessor(), 1)
+ self.assertEqual(subsubtest.rdb_test_id,
+ 'fakeSuite/parent//fakep/0/fakep/1')
+ # provide by FakeProcessor
+ self.assertEqual(subsubtest.processor.name, 'fake_processor2')
+ self.assertEqual(subsubtest.procid,
+ 'fakeSuite/parent.fake_processor1-0.fake_processor2-1')
+ self.assertEqual(subsubtest.keep_output, True)
+
+
+class FakeSuite:
+
+ @property
+ def name(self):
+ return 'fakeSuite'
+
+ def statusfile_outcomes(self, name, variant):
+ return []
+
+
+class FakeProcessor:
+ instance_count = 0
+
+ def __init__(self):
+ FakeProcessor.instance_count += 1
+ self.idx = FakeProcessor.instance_count
+
+ @property
+ def name(self):
+ return f'fake_processor{self.idx}'
+
+ def test_suffix(self, test):
+ return f'fakep/{test.subtest_id}'
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 5350e7eb91..3723d734c8 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -294,8 +294,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
outproc_factory = predictable.get_outproc
execproc = ExecutionProc(ctx, jobs, outproc_factory)
sigproc = self._create_signal_proc()
- progress = ProgressProc(ctx, self.options, self.framework_name,
- tests.test_count_estimate)
+ progress = ProgressProc(ctx, self.options, tests.test_count_estimate)
procs = [
loader,
NameFilterProc(args) if args else None,
diff --git a/deps/v8/tools/testrunner/standard_runner_test.py b/deps/v8/tools/testrunner/standard_runner_test.py
index 64154e0182..770a29bf88 100644
--- a/deps/v8/tools/testrunner/standard_runner_test.py
+++ b/deps/v8/tools/testrunner/standard_runner_test.py
@@ -163,6 +163,34 @@ class StandardRunnerTest(TestRunnerTest):
# This is redundant to the command. Needs investigation.
result.json_content_equals('expected_test_results1.json')
+ def testRDB(self):
+ with self.with_fake_rdb() as records:
+ # sweet/bananaflakes fails first time on stress but passes on default
+ def tag_dict(tags):
+ return {t['key']: t['value'] for t in tags}
+
+ self.run_tests(
+ '--variants=default,stress',
+ '--rerun-failures-count=2',
+ '--time',
+ 'sweet',
+ baseroot='testroot2',
+ infra_staging=False,
+ )
+
+ self.assertEquals(len(records), 3)
+ self.assertEquals(records[0]['testId'], 'sweet/bananaflakes//stress')
+ self.assertEquals(tag_dict(records[0]['tags'])['run'], '1')
+ self.assertFalse(records[0]['expected'])
+
+ self.assertEquals(records[1]['testId'], 'sweet/bananaflakes//stress')
+ self.assertEquals(tag_dict(records[1]['tags'])['run'], '2')
+ self.assertTrue(records[1]['expected'])
+
+ self.assertEquals(records[2]['testId'], 'sweet/bananaflakes//default')
+ self.assertEquals(tag_dict(records[2]['tags'])['run'], '1')
+ self.assertTrue(records[2]['expected'])
+
def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
result = self.run_tests(
@@ -203,18 +231,16 @@ class StandardRunnerTest(TestRunnerTest):
v8_enable_sandbox=False
)
)
- expect_text = (
- '>>> Autodetected:\n'
- 'asan\n'
- 'cfi_vptr\n'
- 'dcheck_always_on\n'
- 'msan\n'
- 'no_i18n\n'
- 'tsan\n'
- 'ubsan_vptr\n'
- 'webassembly\n'
- '>>> Running tests for ia32.release')
- result.stdout_includes(expect_text)
+ result.stdout_includes('>>> Autodetected:')
+ result.stdout_includes('asan')
+ result.stdout_includes('cfi_vptr')
+ result.stdout_includes('dcheck_always_on')
+ result.stdout_includes('msan')
+ result.stdout_includes('no_i18n')
+ result.stdout_includes('tsan')
+ result.stdout_includes('ubsan_vptr')
+ result.stdout_includes('webassembly')
+ result.stdout_includes('>>> Running tests for ia32.release')
result.has_returncode(0)
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
diff --git a/deps/v8/tools/testrunner/test_config.py b/deps/v8/tools/testrunner/test_config.py
index 7cf65b9726..00a68e2acd 100644
--- a/deps/v8/tools/testrunner/test_config.py
+++ b/deps/v8/tools/testrunner/test_config.py
@@ -9,18 +9,22 @@ class TestConfig(object):
def __init__(self,
command_prefix,
extra_flags,
+ framework_name,
isolates,
mode_flags,
no_harness,
noi18n,
random_seed,
run_skipped,
+ shard_count,
+ shard_id,
shell_dir,
timeout,
verbose,
regenerate_expected_files=False):
self.command_prefix = command_prefix
self.extra_flags = extra_flags
+ self.framework_name = framework_name
self.isolates = isolates
self.mode_flags = mode_flags
self.no_harness = no_harness
@@ -28,6 +32,8 @@ class TestConfig(object):
# random_seed is always not None.
self.random_seed = random_seed or random_utils.random_seed()
self.run_skipped = run_skipped
+ self.shard_count = shard_count
+ self.shard_id = shard_id
self.shell_dir = shell_dir
self.timeout = timeout
self.verbose = verbose
diff --git a/deps/v8/tools/testrunner/testdata/expected_test_results1.json b/deps/v8/tools/testrunner/testdata/expected_test_results1.json
index 9b910c6d96..9ce5797a60 100644
--- a/deps/v8/tools/testrunner/testdata/expected_test_results1.json
+++ b/deps/v8/tools/testrunner/testdata/expected_test_results1.json
@@ -1,158 +1,176 @@
{
- "duration_mean": 1,
+ "duration_mean": 1,
"results": [
{
- "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "crash_state": "",
+ "crash_type": "",
+ "duration": 1,
"error_details": "+Mock diff",
- "exit_code": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "shard_count": 1,
+ "shard_id": 0,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "crash_state": "",
+ "crash_type": "",
+ "duration": 1,
"error_details": "+Mock diff",
- "exit_code": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 2,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "shard_count": 1,
+ "shard_id": 0,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "crash_state": "",
+ "crash_type": "",
+ "duration": 1,
"error_details": "+Mock diff",
- "exit_code": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 3,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "shard_count": 1,
+ "shard_id": 0,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
}
- ],
- "slowest_tests": [
+ ],
+ "slowest_tests": [
{
- "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "marked_slow": true,
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": true,
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "shard_count": 1,
+ "shard_id": 0,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "marked_slow": true,
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 2,
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": true,
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "shard_count": 1,
+ "shard_id": 0,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "marked_slow": true,
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 3,
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": true,
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "shard_count": 1,
+ "shard_id": 0,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
}
- ],
+ ],
"test_total": 3
-} \ No newline at end of file
+}
diff --git a/deps/v8/tools/testrunner/testdata/expected_test_results2.json b/deps/v8/tools/testrunner/testdata/expected_test_results2.json
index e22e5459bc..8eaf6a577a 100644
--- a/deps/v8/tools/testrunner/testdata/expected_test_results2.json
+++ b/deps/v8/tools/testrunner/testdata/expected_test_results2.json
@@ -1,104 +1,116 @@
{
- "duration_mean": 1,
+ "duration_mean": 1,
"results": [
{
- "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "crash_state": "",
+ "crash_type": "",
+ "duration": 1,
"error_details": null,
- "exit_code": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "shard_count": 1,
+ "shard_id": 0,
+ "stderr": "",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "crash_state": "",
+ "crash_type": "",
+ "duration": 1,
"error_details": null,
- "exit_code": 0,
+ "exit_code": 0,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "PASS",
- "run": 2,
- "stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "PASS",
+ "run": 2,
+ "shard_count": 1,
+ "shard_id": 0,
+ "stderr": "",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
}
- ],
+ ],
"slowest_tests": [
{
- "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 0,
+ "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 0,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "marked_slow": false,
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "",
- "run": 2,
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": false,
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "",
+ "run": 2,
+ "shard_count": 1,
+ "shard_id": 0,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
"--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "marked_slow": false,
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "target_name": "d8_mocked.py",
- "variant": "default",
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": false,
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "shard_count": 1,
+ "shard_id": 0,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
}
- ],
+ ],
"test_total": 2
-}
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json b/deps/v8/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json
index a2f651bc43..02af7319d7 100644
--- a/deps/v8/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json
+++ b/deps/v8/tools/testrunner/testdata/testroot1/out/build/v8_build_config.json
@@ -5,10 +5,10 @@
"is_asan": false,
"is_cfi": false,
"is_clang": true,
+ "is_clang_coverage": false,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
- "is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
@@ -20,14 +20,27 @@
"v8_enable_conservative_stack_scanning": false,
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": true,
+ "v8_disable_write_barriers": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
- "v8_dict_property_const_tracking": false
+ "v8_dict_property_const_tracking": false,
+ "v8_code_comments": false,
+ "v8_enable_debug_code": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
+ "v8_enable_maglev": false,
+ "v8_enable_disassembler": false,
+ "is_DEBUG_defined": false,
+ "v8_enable_turbofan": false,
+ "v8_jitless": false,
+ "v8_enable_gdbjit": false
}
diff --git a/deps/v8/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json b/deps/v8/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json
index 7b07dc7431..e91ed7813d 100644
--- a/deps/v8/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json
+++ b/deps/v8/tools/testrunner/testdata/testroot2/out/build/v8_build_config.json
@@ -5,10 +5,10 @@
"is_asan": false,
"is_cfi": false,
"is_clang": true,
+ "is_clang_coverage": false,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
- "is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
@@ -20,14 +20,27 @@
"v8_enable_conservative_stack_scanning": false,
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": false,
"v8_enable_pointer_compression_shared_cage": false,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": false,
+ "v8_disable_write_barriers": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
- "v8_dict_property_const_tracking": false
+ "v8_dict_property_const_tracking": false,
+ "v8_code_comments": false,
+ "v8_enable_debug_code": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
+ "v8_enable_maglev": false,
+ "v8_enable_disassembler": false,
+ "is_DEBUG_defined": false,
+ "v8_enable_turbofan": false,
+ "v8_jitless": false,
+ "v8_enable_gdbjit": false
}
diff --git a/deps/v8/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json b/deps/v8/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json
index a2f651bc43..02af7319d7 100644
--- a/deps/v8/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json
+++ b/deps/v8/tools/testrunner/testdata/testroot3/out/build/v8_build_config.json
@@ -5,10 +5,10 @@
"is_asan": false,
"is_cfi": false,
"is_clang": true,
+ "is_clang_coverage": false,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
- "is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
@@ -20,14 +20,27 @@
"v8_enable_conservative_stack_scanning": false,
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": true,
+ "v8_disable_write_barriers": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
- "v8_dict_property_const_tracking": false
+ "v8_dict_property_const_tracking": false,
+ "v8_code_comments": false,
+ "v8_enable_debug_code": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
+ "v8_enable_maglev": false,
+ "v8_enable_disassembler": false,
+ "is_DEBUG_defined": false,
+ "v8_enable_turbofan": false,
+ "v8_jitless": false,
+ "v8_enable_gdbjit": false
}
diff --git a/deps/v8/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json b/deps/v8/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json
index a2f651bc43..02af7319d7 100644
--- a/deps/v8/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json
+++ b/deps/v8/tools/testrunner/testdata/testroot5/out.gn/build/v8_build_config.json
@@ -5,10 +5,10 @@
"is_asan": false,
"is_cfi": false,
"is_clang": true,
+ "is_clang_coverage": false,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
- "is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
@@ -20,14 +20,27 @@
"v8_enable_conservative_stack_scanning": false,
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": true,
+ "v8_disable_write_barriers": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
- "v8_dict_property_const_tracking": false
+ "v8_dict_property_const_tracking": false,
+ "v8_code_comments": false,
+ "v8_enable_debug_code": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
+ "v8_enable_maglev": false,
+ "v8_enable_disassembler": false,
+ "is_DEBUG_defined": false,
+ "v8_enable_turbofan": false,
+ "v8_jitless": false,
+ "v8_enable_gdbjit": false
}
diff --git a/deps/v8/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json b/deps/v8/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json
index a2f651bc43..02af7319d7 100644
--- a/deps/v8/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json
+++ b/deps/v8/tools/testrunner/testdata/testroot6/out/build/v8_build_config.json
@@ -5,10 +5,10 @@
"is_asan": false,
"is_cfi": false,
"is_clang": true,
+ "is_clang_coverage": false,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
- "is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
@@ -20,14 +20,27 @@
"v8_enable_conservative_stack_scanning": false,
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": true,
+ "v8_disable_write_barriers": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
- "v8_dict_property_const_tracking": false
+ "v8_dict_property_const_tracking": false,
+ "v8_code_comments": false,
+ "v8_enable_debug_code": false,
+ "v8_enable_verify_heap": false,
+ "v8_enable_slow_dchecks": false,
+ "v8_enable_maglev": false,
+ "v8_enable_disassembler": false,
+ "is_DEBUG_defined": false,
+ "v8_enable_turbofan": false,
+ "v8_jitless": false,
+ "v8_enable_gdbjit": false
}
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
index f1eac6ace7..08e3181bc2 100644
--- a/deps/v8/tools/testrunner/testproc/base.py
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -138,7 +138,7 @@ class TestProcProducer(TestProc):
def __init__(self, name):
super(TestProcProducer, self).__init__()
- self._name = name
+ self.name = name
def next_test(self, test):
return self._next_test(test)
@@ -161,11 +161,9 @@ class TestProcProducer(TestProc):
"""
raise NotImplementedError()
- ### Managing subtests
- def _create_subtest(self, test, subtest_id, **kwargs):
- """Creates subtest with subtest id <processor name>-`subtest_id`."""
- return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
- **kwargs)
+ def test_suffix(self, test):
+ """Default implementation of rdb test id suffix generated by a producer"""
+ return None
class TestProcFilter(TestProc):
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
index f3cd6a3e04..8783959265 100644
--- a/deps/v8/tools/testrunner/testproc/expectation.py
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -10,7 +10,7 @@ class ExpectationProc(base.TestProcProducer):
super(ExpectationProc, self).__init__('no-timeout')
def _next_test(self, test):
- subtest = self._create_subtest(test, 'no_timeout')
+ subtest = test.create_subtest(self, 'no_timeout')
subtest.allow_timeouts()
subtest.allow_pass()
return self._send_test(subtest)
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index 47334933bc..5e2e6ddc04 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -15,6 +15,9 @@ EXTRA_FLAGS = [
(0.1, '--cache=code'),
(0.1, '--force-slow-path'),
(0.2, '--future'),
+ # TODO(v8:13524): Enable when issue is fixed
+ # TODO(v8:13528): Enable when issue is fixed
+ # (0.1, '--harmony-struct'),
(0.1, '--interrupt-budget=100'),
(0.1, '--interrupt-budget-for-maglev=100'),
(0.1, '--liftoff'),
@@ -42,10 +45,12 @@ EXTRA_FLAGS = [
(0.1, '--regexp-interpret-all'),
(0.1, '--regexp-tier-up-ticks=10'),
(0.1, '--regexp-tier-up-ticks=100'),
+ (0.1, '--shared-string-table'),
(0.1, '--stress-background-compile'),
(0.1, '--stress-flush-code'),
(0.1, '--stress-lazy-source-positions'),
(0.1, '--stress-wasm-code-gc'),
+ (0.2, '--turboshaft'),
(0.1, '--turbo-instruction-scheduling'),
(0.1, '--turbo-stress-instruction-scheduling'),
(0.1, '--turbo-force-mid-tier-regalloc'),
@@ -53,6 +58,7 @@ EXTRA_FLAGS = [
MIN_DEOPT = 1
MAX_DEOPT = 10**9
+ANALYSIS_SUFFIX = 'analysis'
def random_extra_flags(rng):
@@ -167,6 +173,9 @@ class FuzzerProc(base.TestProcProducer):
self._disable_analysis = disable_analysis
self._gens = {}
+ def test_suffix(self, test):
+ return test.subtest_id
+
def _next_test(self, test):
if self.is_stopped:
return False
@@ -189,12 +198,13 @@ class FuzzerProc(base.TestProcProducer):
if analysis_flags:
analysis_flags = list(set(analysis_flags))
- return self._create_subtest(test, 'analysis', flags=analysis_flags,
- keep_output=True)
+ return test.create_subtest(
+ self, ANALYSIS_SUFFIX, flags=analysis_flags, keep_output=True)
def _result_for(self, test, subtest, result):
if not self._disable_analysis:
- if result is not None and subtest.procid.endswith('Fuzzer-analysis'):
+ if result is not None and subtest.procid.endswith(
+ f'{self.name}-{ANALYSIS_SUFFIX}'):
# Analysis phase, for fuzzing we drop the result.
if result.has_unexpected_output:
self._send_result(test, None)
@@ -241,7 +251,7 @@ class FuzzerProc(base.TestProcProducer):
flags.append('--fuzzer-random-seed=%s' % self._next_seed())
flags = _drop_contradictory_flags(flags, test.get_flags())
- yield self._create_subtest(test, str(i), flags=flags)
+ yield test.create_subtest(self, str(i), flags=flags)
i += 1
diff --git a/deps/v8/tools/testrunner/testproc/indicators.py b/deps/v8/tools/testrunner/testproc/indicators.py
index 1ae04a64c4..2f84119441 100644
--- a/deps/v8/tools/testrunner/testproc/indicators.py
+++ b/deps/v8/tools/testrunner/testproc/indicators.py
@@ -11,6 +11,7 @@ import time
from . import base
from . import util
+from .stack_utils import stack_analyzer_util
def print_failure_header(test, is_flaky=False):
@@ -24,7 +25,7 @@ def print_failure_header(test, is_flaky=False):
print(output.encode(encoding, errors='replace').decode(encoding))
-def formatted_result_output(result):
+def formatted_result_output(result, relative=False):
lines = []
if result.output.stderr:
lines.append("--- stderr ---")
@@ -32,7 +33,7 @@ def formatted_result_output(result):
if result.output.stdout:
lines.append("--- stdout ---")
lines.append(result.output.stdout.strip())
- lines.append("Command: %s" % result.cmd.to_string())
+ lines.append("Command: %s" % result.cmd.to_string(relative))
if result.output.HasCrashed():
lines.append("exit code: %s" % result.output.exit_code_string)
lines.append("--- CRASHED ---")
@@ -346,7 +347,7 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, context, options, test_count, framework_name):
+ def __init__(self, context, options, test_count):
super(JsonTestProgressIndicator, self).__init__(context, options,
test_count)
self.tests = util.FixedSizeTopList(
@@ -357,10 +358,10 @@ class JsonTestProgressIndicator(ProgressIndicator):
# keep_output set to True in the RerunProc.
self._requirement = base.DROP_PASS_STDOUT
- self.framework_name = framework_name
self.results = []
self.duration_sum = 0
self.test_count = 0
+ self.stack_parser = stack_analyzer_util.create_stack_parser()
def on_test_result(self, test, result):
self.process_results(test, result.as_list)
@@ -385,6 +386,9 @@ class JsonTestProgressIndicator(ProgressIndicator):
"stderr": output.stderr,
"error_details": result.error_details,
})
+
+ record.update(self.stack_parser.analyze_crash(output.stderr))
+
self.results.append(record)
def _buffer_slow_tests(self, test, result, output, run):
@@ -406,7 +410,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
def _test_record(self, test, result, run):
record = util.base_test_record(test, result, run)
record.update(
- framework_name=self.framework_name,
command=result.cmd.to_string(relative=True),
)
return record
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 947a2fb618..0cc508022e 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -57,7 +57,7 @@ class ResultsTracker(base.TestProcObserver):
class ProgressProc(base.TestProcObserver):
- def __init__(self, context, options, framework_name, test_count):
+ def __init__(self, context, options, test_count):
super(ProgressProc, self).__init__()
self.procs = [
PROGRESS_INDICATORS[options.progress](context, options, test_count)
@@ -65,8 +65,7 @@ class ProgressProc(base.TestProcObserver):
if options.json_test_results:
self.procs.insert(
0,
- JsonTestProgressIndicator(context, options, test_count,
- framework_name))
+ JsonTestProgressIndicator(context, options, test_count))
sink = rdb_sink()
if sink:
self.procs.append(ResultDBIndicator(context, options, test_count, sink))
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
index 8f0ec06973..69ffdb43a1 100644
--- a/deps/v8/tools/testrunner/testproc/rerun.py
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -58,7 +58,7 @@ class RerunProc(base.TestProcProducer):
result.has_unexpected_output)
def _send_next_subtest(self, test, run=0):
- subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
+ subtest = test.create_subtest(self, str(run + 1), keep_output=(run != 0))
return self._send_test(subtest)
def _finalize_test(self, test):
diff --git a/deps/v8/tools/testrunner/testproc/resultdb.py b/deps/v8/tools/testrunner/testproc/resultdb.py
index 42b22671fb..0a0b4e3aca 100644
--- a/deps/v8/tools/testrunner/testproc/resultdb.py
+++ b/deps/v8/tools/testrunner/testproc/resultdb.py
@@ -4,20 +4,16 @@
import json
import logging
-import pprint
import os
+import re
+import tempfile
from . import base
from .indicators import (
formatted_result_output,
ProgressIndicator,
)
-from .util import (
- base_test_record,
- extract_tags,
- strip_ascii_control_characters,
-)
-
+from .util import base_test_record
class ResultDBIndicator(ProgressIndicator):
@@ -38,24 +34,63 @@ class ResultDBIndicator(ProgressIndicator):
test_should_pass = not test.is_fail
run_passed = (result_expected == test_should_pass)
rdb_result = {
- 'testId': strip_ascii_control_characters(test.full_name),
+ 'testId': strip_ascii_control_characters(test.rdb_test_id),
'status': 'PASS' if run_passed else 'FAIL',
'expected': result_expected,
}
if result.output and result.output.duration:
rdb_result.update(duration=f'{result.output.duration}ms')
+
if result.has_unexpected_output:
- formated_output = formatted_result_output(result)
- sanitized = strip_ascii_control_characters(formated_output)
- # TODO(liviurau): do we have a better presentation data for this?
- # Protobuf strings can have len == 2**32.
- rdb_result.update(summaryHtml=f'<pre>{sanitized}</pre>')
+ formated_output = formatted_result_output(result,relative=True)
+ relative_cmd = result.cmd.to_string(relative=True)
+ artifacts = {
+ 'output' : write_artifact(formated_output),
+ 'cmd' : write_artifact(relative_cmd)
+ }
+ rdb_result.update(artifacts=artifacts)
+ summary = '<p><text-artifact artifact-id="output"></p>'
+ summary += '<p><text-artifact artifact-id="cmd"></p>'
+ rdb_result.update(summary_html=summary)
+
record = base_test_record(test, result, run)
+ record.update(
+ processor=test.processor_name,
+ subtest_id=test.subtest_id,
+ path=test.path)
+
rdb_result.update(tags=extract_tags(record))
+
self.rpc.send(rdb_result)
+def write_artifact(value):
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
+ tmp.write(value)
+ return { 'filePath': tmp.name }
+
+
+def extract_tags(record):
+ tags = []
+ for k, v in record.items():
+ if not v:
+ continue
+ if type(v) == list:
+ tags += [sanitized_kv_dict(k, e) for e in v]
+ else:
+ tags.append(sanitized_kv_dict(k, v))
+ return tags
+
+
+def sanitized_kv_dict(k, v):
+ return dict(key=k, value=strip_ascii_control_characters(v))
+
+
+def strip_ascii_control_characters(unicode_string):
+ return re.sub(r'[^\x20-\x7E]', '?', str(unicode_string))
+
+
def rdb_sink():
try:
import requests
diff --git a/deps/v8/tools/testrunner/testproc/seed.py b/deps/v8/tools/testrunner/testproc/seed.py
index 392bec2ef7..e472723371 100644
--- a/deps/v8/tools/testrunner/testproc/seed.py
+++ b/deps/v8/tools/testrunner/testproc/seed.py
@@ -45,7 +45,7 @@ class SeedProc(base.TestProcProducer):
def _try_send_next_test(self, test):
def create_subtest(idx):
seed = self._seed or random_utils.random_seed()
- return self._create_subtest(test, idx, random_seed=seed)
+ return test.create_subtest(self, idx, random_seed=seed)
num = self._last_idx[test.procid]
if not self._count or num < self._count:
diff --git a/deps/v8/tools/testrunner/testproc/shard.py b/deps/v8/tools/testrunner/testproc/shard.py
index 3b67ec133f..e31a2ea2fb 100644
--- a/deps/v8/tools/testrunner/testproc/shard.py
+++ b/deps/v8/tools/testrunner/testproc/shard.py
@@ -20,7 +20,7 @@ def radix_hash(capacity, key):
class ShardProc(base.TestProcFilter):
@staticmethod
def create(options):
- myid, count = options.shard_info()
+ myid, count = options.shard_info
if count == 1:
return None
return ShardProc(myid, count)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/__init__.py b/deps/v8/tools/testrunner/testproc/stack_utils/__init__.py
new file mode 100644
index 0000000000..fd7718a4a6
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/clusterfuzz_fakes.py b/deps/v8/tools/testrunner/testproc/stack_utils/clusterfuzz_fakes.py
new file mode 100644
index 0000000000..d475c7b724
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/clusterfuzz_fakes.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file exists to be able to mock some of the imports in ClusterFuzz. Since
+# we only need a specific functionality in ClusterFuzz (Stack Analysis) we do
+# not need most of the imports that it uses, so we fake them here in order to
+# not crash when we run.
+
+
+def empty_fn(*args, **kwargs):
+ raise 'empty function was used'
+
+
+kernel_utils, \
+storage, \
+fetch_artifact, \
+settings, \
+symbols_downloader, \
+= [empty_fn] * 5
+
+
+class ProjectConfig:
+
+ def get(self):
+ """Return empty config properties when ClusterFuzz tries to find a project
+ config.
+ """
+ return None
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util.py b/deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util.py
new file mode 100644
index 0000000000..1deaecc60e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python3
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import sys
+
+
+ANALYSIS_FAILURE_STATES = ['NULL', 'Unknown', '', None]
+STACKFRAME_REGEX = r'^#\d+.*'
+FILE_LOCATION_REGEX = r'.*\:\d+\:\d+$'
+
+
+def fake_clusterfuzz_imports(modules):
+ for module in modules:
+ sys.modules[module] = __import__('clusterfuzz_fakes')
+
+
+def pre_clusterfuzz_import():
+ local_path = os.path.dirname(os.path.abspath(__file__))
+ sys.path.append(local_path)
+
+ fake_clusterfuzz_imports([
+ 'clusterfuzz._internal.platforms.android',
+ 'clusterfuzz._internal.google_cloud_utils',
+ 'clusterfuzz._internal.config.local_config',
+ ])
+
+
+class CustomStackParser:
+
+ def __init__(self):
+ pre_clusterfuzz_import()
+
+ from clusterfuzz.stacktraces import StackParser, MAX_CRASH_STATE_FRAMES
+ from clusterfuzz.stacktraces import llvm_test_one_input_override
+ from clusterfuzz.stacktraces import constants
+
+ self.MAX_CRASH_STATE_FRAMES = MAX_CRASH_STATE_FRAMES
+ self.stack_parser = StackParser()
+ self.constants = constants
+ self.llvm_test_one_input_override = llvm_test_one_input_override
+
+ def analyze_crash(self, stderr):
+ try:
+ stderr_analysis = self.stack_parser.parse(stderr)
+ self.custom_analyzer(stderr_analysis)
+ return {
+ "crash_state": stderr_analysis.crash_state.strip(),
+ "crash_type": stderr_analysis.crash_type.strip(),
+ }
+ except Exception as e:
+ logging.info(e)
+ return {
+ "crash_state": "Unknown",
+ "crash_type": "Unknown",
+ }
+
+ def _extract_function_name(self, frame):
+ split_frame = frame.split()
+ start_loc = 1
+ end_loc = -1
+ if len(split_frame) > 2 and split_frame[2] == 'in':
+ start_loc = 3
+ split_frame = split_frame[start_loc:]
+ for idx, item in enumerate(split_frame):
+ # exclude file name and everything after it
+ if re.match(FILE_LOCATION_REGEX, item):
+ end_loc = idx
+ break
+ if end_loc != -1:
+ return ' '.join(split_frame[:end_loc])
+ return None
+
+ def _fallback_crash_state(self, stacktrace):
+ status = []
+ for line in [l.strip() for l in stacktrace.splitlines()]:
+ if re.match(STACKFRAME_REGEX, line):
+ frame = self._extract_function_name(line)
+ if not self.stack_parser.ignore_stack_frame(frame):
+ status.append(frame)
+ if len(status) >= self.MAX_CRASH_STATE_FRAMES:
+ break
+
+ return '\n'.join(status)
+
+ def custom_analyzer(self, crash_info):
+ if crash_info.crash_state in ANALYSIS_FAILURE_STATES:
+ fallback_state = self._fallback_crash_state(crash_info.crash_stacktrace)
+ crash_info.crash_state = fallback_state or crash_info.crash_state
+
+
+class EmptyStackParser:
+
+ def analyze_crash(self, stderr):
+ return {}
+
+
+def create_stack_parser():
+ try:
+ return CustomStackParser()
+ except ImportError as e:
+ logging.info(e)
+ return EmptyStackParser()
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util_test.py b/deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util_test.py
new file mode 100644
index 0000000000..8c8b20c204
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/stack_analyzer_util_test.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# Copyright 2022 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import sys
+import unittest
+
+TOOLS_PATH = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+TEST_DATA_ROOT = os.path.join(TOOLS_PATH, 'testproc', 'stack_utils', 'testdata')
+TEST_DATA_GENERAL = os.path.join(TEST_DATA_ROOT, 'analyze_crash')
+TEST_DATA_CUSTOM = os.path.join(TEST_DATA_ROOT, 'custom_analyzer')
+
+from testproc.stack_utils.stack_analyzer_util import create_stack_parser
+
+
+class TestScript(unittest.TestCase):
+ # TODO(almuthanna): find out why these test cases are not analyzed and add
+ # logic to make them pass.
+ skipped_tests = [
+ 'type_assertion_1.txt',
+ 'type_assertion_2.txt',
+ 'static_assertion_2.txt',
+ 'static_assertion_1.txt',
+ ]
+
+ def test_analyze_crash(self):
+ stack_parser = create_stack_parser()
+ for file in [
+ f for f in os.listdir(TEST_DATA_GENERAL) if f.endswith('.txt')
+ ]:
+ if file in self.skipped_tests:
+ continue
+ filepath = os.path.join(TEST_DATA_GENERAL, file)
+ exp_filepath = os.path.join(TEST_DATA_GENERAL,
+ file.replace('.txt', '.expected.json'))
+ with self.subTest(test_name=file[:-4]):
+ with open(filepath) as f:
+ result = stack_parser.analyze_crash(f.read())
+ with open(exp_filepath, 'r') as exp_f:
+ expectation = json.load(exp_f)
+ self.assertDictEqual(result, expectation)
+
+ def test_fallback_crash_state(self):
+ self.maxDiff = None
+ stack_parser = create_stack_parser()
+ for file in [f for f in os.listdir(TEST_DATA_CUSTOM) if f.endswith('.txt')]:
+ filepath = os.path.join(TEST_DATA_CUSTOM, file)
+ exp_filepath = os.path.join(TEST_DATA_CUSTOM,
+ file.replace('.txt', '.expected'))
+ with self.subTest(test_name=file[:-4]):
+ with open(filepath) as f:
+ result = stack_parser._fallback_crash_state(f.read())
+ with open(exp_filepath, 'r') as exp_f:
+ expectation = exp_f.read()
+ self.assertEqual(result, expectation)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.expected.json
new file mode 100644
index 0000000000..e0fdf04d84
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "CHECK failure",
+ "crash_state": "kind() == CodeKind::BASELINE"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.txt
new file mode 100644
index 0000000000..d7affc63e8
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/code_kind.txt
@@ -0,0 +1,20 @@
+Warning: disabling flag --regexp_tier_up due to conflicting flags
+
+
+#
+# Fatal error in , line 0
+# Check failed: kind() == CodeKind::BASELINE.
+#
+#
+#
+#FailureMessage Object: 0x7fff55c98300
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/d8(+0xec56a3) [0x55fbef6236a3]
+ /b/s/w/ir/out/build/d8(+0xec4f4b) [0x55fbef622f4b]
+ /b/s/w/ir/out/build/d8(+0xebb4d5) [0x55fbef6194d5]
+ /b/s/w/ir/out/build/d8(+0x42cd8c) [0x55fbeeb8ad8c]
+ /b/s/w/ir/out/build/d8(+0x42bd45) [0x55fbeeb89d45]
+ [0x2c4d0000c1ca]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.expected.json
new file mode 100644
index 0000000000..e645c4beb6
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Data race\nWRITE 8",
+ "crash_state": "SetFlag\nMarkWasUsedForAllocation\nv8::internal::PagedSpaceBase::SetLinearAllocationArea(unsigned long, unsigned long)"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.txt
new file mode 100644
index 0000000000..e1e21610e0
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_1.txt
@@ -0,0 +1,91 @@
+==================
+WARNING: ThreadSanitizer: data race (pid=24781)
+ Write of size 8 at 0x7e9b00080008 by main thread (mutexes: write M0):
+ #0 operator|= src/base/flags.h:47:11 (d8+0xa03eff) (BuildId: 2d1896fcafe8220c)
+ #1 operator|= src/base/flags.h:66:46 (d8+0xa03eff)
+ #2 SetFlag src/heap/basic-memory-chunk.h:198:48 (d8+0xa03eff)
+ #3 MarkWasUsedForAllocation src/heap/memory-chunk.h:223:37 (d8+0xa03eff)
+ #4 v8::internal::PagedSpaceBase::SetLinearAllocationArea(unsigned long, unsigned long) src/heap/paged-spaces.cc:435:13 (d8+0xa03eff)
+ #5 v8::internal::PagedSpaceBase::TryAllocationFromFreeListMain(unsigned long, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:653:3 (d8+0xa04b8e) (BuildId: 2d1896fcafe8220c)
+ #6 v8::internal::PagedSpaceBase::RawRefillLabMain(int, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:991:7 (d8+0xa066b4) (BuildId: 2d1896fcafe8220c)
+ #7 v8::internal::PagedSpaceBase::RefillLabMain(int, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:960:10 (d8+0xa065f7) (BuildId: 2d1896fcafe8220c)
+ #8 EnsureAllocation src/heap/paged-spaces-inl.h:89:10 (d8+0x9f33a8) (BuildId: 2d1896fcafe8220c)
+ #9 EnsureAllocation src/heap/new-spaces-inl.h:109:24 (d8+0x9f33a8)
+ #10 v8::internal::PagedNewSpace::EnsureAllocation(int, v8::internal::AllocationAlignment, v8::internal::AllocationOrigin, int*) src/heap/new-spaces.h:800:25 (d8+0x9f33a8)
+ #11 AllocateRawUnaligned src/heap/spaces-inl.h:277:8 (d8+0x8b41ea) (BuildId: 2d1896fcafe8220c)
+ #12 AllocateRawSlow src/heap/spaces-inl.h:331:13 (d8+0x8b41ea)
+ #13 AllocateRaw src/heap/spaces-inl.h:269:31 (d8+0x8b41ea)
+ #14 AllocateRaw<(v8::internal::AllocationType)0> src/heap/heap-allocator-inl.h:107:28 (d8+0x8b41ea)
+ #15 AllocateRawWith<(v8::internal::HeapAllocator::AllocationRetryMode)1> src/heap/heap-allocator-inl.h:230:14 (d8+0x8b41ea)
+ #16 v8::internal::Factory::AllocateRawWithAllocationSite(v8::internal::Handle<v8::internal::Map>, v8::internal::AllocationType, v8::internal::Handle<v8::internal::AllocationSite>) src/heap/factory.cc:356:36 (d8+0x8b41ea)
+ #17 v8::internal::Factory::NewJSObjectFromMap(v8::internal::Handle<v8::internal::Map>, v8::internal::AllocationType, v8::internal::Handle<v8::internal::AllocationSite>) src/heap/factory.cc:2728:7 (d8+0x8ba650) (BuildId: 2d1896fcafe8220c)
+ #18 __RT_impl_Runtime_NewArray src/runtime/runtime-array.cc:116:58 (d8+0xfa02b1) (BuildId: 2d1896fcafe8220c)
+ #19 v8::internal::Runtime_NewArray(int, unsigned long*, v8::internal::Isolate*) src/runtime/runtime-array.cc:44:1 (d8+0xfa02b1)
+ #20 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1c3cd77)
+ #21 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x8187b5) (BuildId: 2d1896fcafe8220c)
+ #22 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2151:7 (d8+0x5b210e) (BuildId: 2d1896fcafe8220c)
+ #23 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2078:10 (d8+0x5b1840) (BuildId: 2d1896fcafe8220c)
+ #24 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:879:28 (d8+0x5703cb) (BuildId: 2d1896fcafe8220c)
+ #25 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4379:10 (d8+0x588620) (BuildId: 2d1896fcafe8220c)
+ #26 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5107:39 (d8+0x58c487) (BuildId: 2d1896fcafe8220c)
+ #27 v8::Shell::Main(int, char**) src/d8/d8.cc:5886:18 (d8+0x58efe0) (BuildId: 2d1896fcafe8220c)
+ #28 main src/d8/d8.cc:5976:43 (d8+0x58f4ee) (BuildId: 2d1896fcafe8220c)
+
+ Previous read of size 8 at 0x7e9b00080008 by thread T6:
+ #0 GetFlags src/heap/basic-memory-chunk.h:211:45 (d8+0x88ee2f) (BuildId: 2d1896fcafe8220c)
+ #1 ShouldSkipEvacuationSlotRecording src/heap/basic-memory-chunk.h:240:29 (d8+0x88ee2f)
+ #2 RecordSlot src/heap/mark-compact-inl.h:68:21 (d8+0x88ee2f)
+ #3 void v8::internal::ConcurrentMarkingVisitor::RecordSlot<v8::internal::CompressedHeapObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/concurrent-marking.cc:481:5 (d8+0x88ee2f)
+ #4 void v8::internal::MarkingVisitorBase<v8::internal::ConcurrentMarkingVisitor, v8::internal::ConcurrentMarkingState>::ProcessStrongHeapObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/marking-visitor-inl.h:49:23 (d8+0x88ed97) (BuildId: 2d1896fcafe8220c)
+ #5 VisitPointersImpl<v8::internal::CompressedObjectSlot> src/heap/marking-visitor-inl.h:90:7 (d8+0x894a3a) (BuildId: 2d1896fcafe8220c)
+ #6 VisitPointers src/heap/marking-visitor.h:197:5 (d8+0x894a3a)
+ #7 IteratePointers<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors-inl.h:127:6 (d8+0x894a3a)
+ #8 IterateBody<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors-inl.h:358:5 (d8+0x894a3a)
+ #9 int v8::internal::ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass<v8::internal::ConcurrentMarkingVisitor, v8::internal::JSFunction, v8::internal::JSFunction::BodyDescriptor>(v8::internal::ConcurrentMarkingVisitor*, v8::internal::Map, v8::internal::JSFunction) src/heap/concurrent-marking.cc:102:5 (d8+0x894a3a)
+ #10 VisitJSObjectSubclass<v8::internal::JSFunction, v8::internal::JSFunction::BodyDescriptor> src/heap/concurrent-marking.cc:489:12 (d8+0x884a6f) (BuildId: 2d1896fcafe8220c)
+ #11 VisitJSFunction src/heap/marking-visitor-inl.h:178:34 (d8+0x884a6f)
+ #12 Visit src/heap/objects-visiting-inl.h:65:5 (d8+0x884a6f)
+ #13 v8::internal::ConcurrentMarking::RunMajor(v8::JobDelegate*, v8::base::EnumSet<v8::internal::CodeFlushMode, int>, unsigned int, bool) src/heap/concurrent-marking.cc:758:41 (d8+0x884a6f)
+ #14 v8::internal::ConcurrentMarking::JobTaskMajor::Run(v8::JobDelegate*) src/heap/concurrent-marking.cc:606:28 (d8+0x898719) (BuildId: 2d1896fcafe8220c)
+ #15 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:304:18 (d8+0x565b28) (BuildId: 2d1896fcafe8220c)
+ #16 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1e5184b) (BuildId: 2d1896fcafe8220c)
+ #17 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1e586b0) (BuildId: 2d1896fcafe8220c)
+ #18 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x1e4bc22) (BuildId: 2d1896fcafe8220c)
+ #19 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1112:11 (d8+0x1e4bc22)
+
+ Mutex M0 (0x7b5400000710) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1316:3 (d8+0x4df75f) (BuildId: 2d1896fcafe8220c)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1e4408b) (BuildId: 2d1896fcafe8220c)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1e4408b)
+ #3 v8::internal::PagedSpaceBase::PagedSpaceBase(v8::internal::Heap*, v8::internal::AllocationSpace, v8::internal::Executability, v8::internal::FreeList*, v8::internal::AllocationCounter&, v8::internal::LinearAllocationArea&, v8::internal::LinearAreaOriginalData&, v8::internal::CompactionSpaceKind) src/heap/paged-spaces.cc:120:17 (d8+0xa023d8) (BuildId: 2d1896fcafe8220c)
+ #4 PagedSpaceForNewSpace src/heap/new-spaces.cc:923:7 (d8+0x9f1f66) (BuildId: 2d1896fcafe8220c)
+ #5 v8::internal::PagedNewSpace::PagedNewSpace(v8::internal::Heap*, unsigned long, unsigned long, v8::internal::LinearAllocationArea&) src/heap/new-spaces.cc:1042:7 (d8+0x9f1f66)
+ #6 make_unique<v8::internal::PagedNewSpace, v8::internal::Heap *, unsigned long &, unsigned long &, v8::internal::LinearAllocationArea &> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (d8+0x90c4a9) (BuildId: 2d1896fcafe8220c)
+ #7 v8::internal::Heap::SetUpSpaces(v8::internal::LinearAllocationArea&, v8::internal::LinearAllocationArea&) src/heap/heap.cc:5559:27 (d8+0x90c4a9)
+ #8 v8::internal::Isolate::Init(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4264:9 (d8+0x843e08) (BuildId: 2d1896fcafe8220c)
+ #9 v8::internal::Isolate::InitWithSnapshot(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4036:10 (d8+0x8455f9) (BuildId: 2d1896fcafe8220c)
+ #10 v8::internal::Snapshot::Initialize(v8::internal::Isolate*) src/snapshot/snapshot.cc:182:27 (d8+0x10bc8e1) (BuildId: 2d1896fcafe8220c)
+ #11 v8::Isolate::Initialize(v8::Isolate*, v8::Isolate::CreateParams const&) src/api/api.cc:8836:8 (d8+0x5dd97f) (BuildId: 2d1896fcafe8220c)
+ #12 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:8872:3 (d8+0x5ddc45) (BuildId: 2d1896fcafe8220c)
+ #13 v8::Shell::Main(int, char**) src/d8/d8.cc:5788:22 (d8+0x58e70c) (BuildId: 2d1896fcafe8220c)
+ #14 main src/d8/d8.cc:5976:43 (d8+0x58f4ee) (BuildId: 2d1896fcafe8220c)
+
+ Thread T6 'V8 DefaultWorke' (tid=24798, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3 (d8+0x4ddffb) (BuildId: 2d1896fcafe8220c)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1144:14 (d8+0x1e4bb36) (BuildId: 2d1896fcafe8220c)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x1e580ac) (BuildId: 2d1896fcafe8220c)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (d8+0x1e580ac)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x1e580ac)
+ #5 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:297:37 (d8+0x1e4e1b0) (BuildId: 2d1896fcafe8220c)
+ #6 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:956:55 (d8+0x1e4e1b0)
+ #7 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:965:12 (d8+0x1e4e1b0)
+ #8 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:132:7 (d8+0x1e4e1b0)
+ #9 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (d8+0x1e4e1b0)
+ #10 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (d8+0x1e4d839) (BuildId: 2d1896fcafe8220c)
+ #11 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (d8+0x1e4d839)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5691:16 (d8+0x58e251) (BuildId: 2d1896fcafe8220c)
+ #13 main src/d8/d8.cc:5976:43 (d8+0x58f4ee) (BuildId: 2d1896fcafe8220c)
+
+SUMMARY: ThreadSanitizer: data race src/base/flags.h:47:11 in operator|=
+==================
+ThreadSanitizer: reported 1 warnings
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.expected.json
new file mode 100644
index 0000000000..474650aa18
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Data race\nWRITE 8",
+ "crash_state": "ClearCellRangeRelaxed\nClear\nClearMarkBitsAndHandleLivenessStatistics"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.txt
new file mode 100644
index 0000000000..c6993d2cdd
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_2.txt
@@ -0,0 +1,118 @@
+==================
+WARNING: ThreadSanitizer: data race (pid=10132)
+ Write of size 8 at 0x7e8d00440248 by thread T7 (mutexes: write M0):
+ #0 memset sanitizer_common/sanitizer_common_interceptors.inc:799:3 (d8+0x49d4ad) (BuildId: c849b9596314cb5d)
+ #1 ClearCellRangeRelaxed src/heap/marking.h:205:16 (d8+0x94d685) (BuildId: c849b9596314cb5d)
+ #2 Clear src/heap/marking.h:228:3 (d8+0x94d685)
+ #3 ClearMarkBitsAndHandleLivenessStatistics src/heap/sweeper.cc:303:33 (d8+0x94d685)
+ #4 v8::internal::Sweeper::RawSweep(v8::internal::Page*, v8::internal::Sweeper::FreeListRebuildingMode, v8::internal::FreeSpaceTreatmentMode, v8::internal::Sweeper::FreeSpaceMayContainInvalidatedSlots, v8::base::LockGuard<v8::base::Mutex, (v8::base::NullBehavior)0> const&) src/heap/sweeper.cc:412:3 (d8+0x94d685)
+ #5 v8::internal::Sweeper::ParallelSweepPage(v8::internal::Page*, v8::internal::AllocationSpace, v8::internal::Sweeper::FreeSpaceMayContainInvalidatedSlots) src/heap/sweeper.cc:494:17 (d8+0x94d9e6) (BuildId: c849b9596314cb5d)
+ #6 ConcurrentSweepSpace src/heap/sweeper.cc:437:5 (d8+0x95056a) (BuildId: c849b9596314cb5d)
+ #7 v8::internal::Sweeper::SweeperJob::RunImpl(v8::JobDelegate*) src/heap/sweeper.cc:117:22 (d8+0x95056a)
+ #8 v8::internal::Sweeper::SweeperJob::Run(v8::JobDelegate*) src/heap/sweeper.cc (d8+0x950295) (BuildId: c849b9596314cb5d)
+ #9 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:297:18 (d8+0x51a6a8) (BuildId: c849b9596314cb5d)
+ #10 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1a7bec8) (BuildId: c849b9596314cb5d)
+ #11 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1a836f0) (BuildId: c849b9596314cb5d)
+ #12 NotifyStartedAndRun src/base/platform/platform.h:560:5 (d8+0x1a77162) (BuildId: c849b9596314cb5d)
+ #13 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1051:11 (d8+0x1a77162)
+
+ Previous atomic read of size 4 at 0x7e8d00440248 by thread T3:
+ #0 __cxx_atomic_load<int> buildtools/third_party/libc++/trunk/include/atomic:1000:12 (d8+0x7e5423) (BuildId: c849b9596314cb5d)
+ #1 load buildtools/third_party/libc++/trunk/include/atomic:1611:17 (d8+0x7e5423)
+ #2 atomic_load_explicit<int> buildtools/third_party/libc++/trunk/include/atomic:1967:17 (d8+0x7e5423)
+ #3 Acquire_Load src/base/atomicops.h:240:10 (d8+0x7e5423)
+ #4 Acquire_Load<unsigned int> src/base/atomic-utils.h:73:9 (d8+0x7e5423)
+ #5 Get<v8::internal::AccessMode::ATOMIC> src/heap/marking.h:78:11 (d8+0x7e5423)
+ #6 IsBlackOrGrey<v8::internal::AccessMode::ATOMIC> src/heap/marking.h:398:21 (d8+0x7e5423)
+ #7 IsBlackOrGrey src/heap/marking-visitor.h:81:12 (d8+0x7e5423)
+ #8 void v8::internal::MarkingVisitorBase<v8::internal::ConcurrentMarkingVisitor, v8::internal::ConcurrentMarkingState>::ProcessWeakHeapObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/marking-visitor-inl.h:58:44 (d8+0x7e5423)
+ #9 VisitPointersImpl<v8::internal::CompressedMaybeObjectSlot> src/heap/marking-visitor-inl.h:90:7 (d8+0x7df885) (BuildId: c849b9596314cb5d)
+ #10 VisitPointers src/heap/marking-visitor.h:199:5 (d8+0x7df885)
+ #11 IterateMaybeWeakPointers<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors-inl.h:139:6 (d8+0x7df885)
+ #12 IterateBody<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors.h:176:5 (d8+0x7df885)
+ #13 VisitFeedbackVector src/heap/objects-visiting-inl.h:118:1 (d8+0x7df885)
+ #14 Visit src/heap/objects-visiting-inl.h:65:5 (d8+0x7df885)
+ #15 v8::internal::ConcurrentMarking::Run(v8::JobDelegate*, v8::base::EnumSet<v8::internal::CodeFlushMode, int>, unsigned int, bool) src/heap/concurrent-marking.cc:531:41 (d8+0x7df885)
+ #16 v8::internal::ConcurrentMarking::JobTask::Run(v8::JobDelegate*) src/heap/concurrent-marking.cc:420:28 (d8+0x7eb659) (BuildId: c849b9596314cb5d)
+ #17 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:297:18 (d8+0x51a6a8) (BuildId: c849b9596314cb5d)
+ #18 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1a7bec8) (BuildId: c849b9596314cb5d)
+ #19 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1a836f0) (BuildId: c849b9596314cb5d)
+ #20 NotifyStartedAndRun src/base/platform/platform.h:560:5 (d8+0x1a77162) (BuildId: c849b9596314cb5d)
+ #21 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1051:11 (d8+0x1a77162)
+
+ As if synchronized via sleep:
+ #0 usleep /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:358:3 (d8+0x490b4a) (BuildId: c849b9596314cb5d)
+ #1 v8::base::OS::Sleep(v8::base::TimeDelta) src/base/platform/platform-posix.cc:615:3 (d8+0x1a76295) (BuildId: c849b9596314cb5d)
+ #2 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:296:7 (d8+0x51a687) (BuildId: c849b9596314cb5d)
+ #3 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1a7bec8) (BuildId: c849b9596314cb5d)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1a836f0) (BuildId: c849b9596314cb5d)
+ #5 NotifyStartedAndRun src/base/platform/platform.h:560:5 (d8+0x1a77162) (BuildId: c849b9596314cb5d)
+ #6 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1051:11 (d8+0x1a77162)
+
+ Mutex M0 (0x7b0c000091e0) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1316:3 (d8+0x494d4f) (BuildId: c849b9596314cb5d)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:95:12 (d8+0x1a71ecb) (BuildId: c849b9596314cb5d)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:148:3 (d8+0x1a71ecb)
+ #3 v8::internal::MemoryChunk::Initialize(v8::internal::BasicMemoryChunk*, v8::internal::Heap*, v8::internal::Executability) src/heap/memory-chunk.cc:149:23 (d8+0x8fa1dd) (BuildId: c849b9596314cb5d)
+ #4 AllocateChunk src/heap/memory-allocator.cc:409:7 (d8+0x8f9094) (BuildId: c849b9596314cb5d)
+ #5 v8::internal::MemoryAllocator::AllocatePage(v8::internal::MemoryAllocator::AllocationMode, unsigned long, v8::internal::Space*, v8::internal::Executability) src/heap/memory-allocator.cc:562:13 (d8+0x8f9094)
+ #6 AllocatePage src/heap/paged-spaces.cc:322:38 (d8+0x9130dd) (BuildId: c849b9596314cb5d)
+ #7 v8::internal::PagedSpace::ExpandBackground(unsigned long) src/heap/paged-spaces.cc:338:16 (d8+0x9130dd)
+ #8 v8::internal::PagedSpace::RawRefillLabBackground(v8::internal::LocalHeap*, unsigned long, unsigned long, v8::internal::AllocationAlignment, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:631:14 (d8+0x914467) (BuildId: c849b9596314cb5d)
+ #9 v8::internal::ConcurrentAllocator::EnsureLab(v8::internal::AllocationOrigin) src/heap/concurrent-allocator.cc:135:25 (d8+0x7dc06c) (BuildId: c849b9596314cb5d)
+ #10 v8::internal::ConcurrentAllocator::AllocateInLabSlow(int, v8::internal::AllocationAlignment, v8::internal::AllocationOrigin) src/heap/concurrent-allocator.cc:124:8 (d8+0x7dbf7b) (BuildId: c849b9596314cb5d)
+ #11 AllocateInLab src/heap/concurrent-allocator-inl.h:41:16 (d8+0x82d0b1) (BuildId: c849b9596314cb5d)
+ #12 AllocateRaw src/heap/concurrent-allocator-inl.h:34:10 (d8+0x82d0b1)
+ #13 AllocateRaw<v8::internal::AllocationType::kSharedMap> src/heap/heap-allocator-inl.h:122:47 (d8+0x82d0b1)
+ #14 AllocateRaw src/heap/heap-allocator-inl.h:184:14 (d8+0x82d0b1)
+ #15 v8::internal::HeapAllocator::AllocateRawWithLightRetrySlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:75:29 (d8+0x82d0b1)
+ #16 v8::internal::HeapAllocator::AllocateRawWithRetryOrFailSlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:100:7 (d8+0x82e234) (BuildId: c849b9596314cb5d)
+ #17 AllocateRawWith<v8::internal::HeapAllocator::kRetryOrFail> src/heap/heap-allocator-inl.h:237:16 (d8+0x808920) (BuildId: c849b9596314cb5d)
+ #18 v8::internal::Factory::NewMap(v8::internal::InstanceType, int, v8::internal::ElementsKind, int, v8::internal::AllocationType) src/heap/factory.cc:1848:36 (d8+0x808920)
+ #19 v8::internal::Builtin_Impl_SharedStructTypeConstructor(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-struct.cc:84:39 (d8+0x65fa26) (BuildId: c849b9596314cb5d)
+ #20 v8::internal::Builtin_SharedStructTypeConstructor(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-struct.cc:20:1 (d8+0x65f061) (BuildId: c849b9596314cb5d)
+ #21 <null> <null> (0x560ebfe66ef8)
+ #22 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:531:10 (d8+0x77252c) (BuildId: c849b9596314cb5d)
+ #23 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2105:7 (d8+0x561590) (BuildId: c849b9596314cb5d)
+ #24 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2048:10 (d8+0x560d20) (BuildId: c849b9596314cb5d)
+ #25 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:773:28 (d8+0x5211d0) (BuildId: c849b9596314cb5d)
+ #26 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:3990:10 (d8+0x53659a) (BuildId: c849b9596314cb5d)
+ #27 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:4656:39 (d8+0x53a188) (BuildId: c849b9596314cb5d)
+ #28 v8::Shell::Main(int, char**) src/d8/d8.cc:5473:18 (d8+0x53cd5b) (BuildId: c849b9596314cb5d)
+ #29 main src/d8/d8.cc:5553:43 (d8+0x53cf4e) (BuildId: c849b9596314cb5d)
+
+ Thread T7 'V8 DefaultWorke' (tid=10149, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3 (d8+0x49356d) (BuildId: c849b9596314cb5d)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1083:14 (d8+0x1a77076) (BuildId: c849b9596314cb5d)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x1a830f2) (BuildId: c849b9596314cb5d)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a830f2)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x1a830f2)
+ #5 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:293:37 (d8+0x1a7890d) (BuildId: c849b9596314cb5d)
+ #6 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::__1::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1101:55 (d8+0x1a7890d)
+ #7 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1110:12 (d8+0x1a7890d)
+ #8 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:133:7 (d8+0x1a7890d)
+ #9 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:109:5 (d8+0x1a7890d)
+ #10 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a78029) (BuildId: c849b9596314cb5d)
+ #11 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:54:19 (d8+0x1a78029)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5270:16 (d8+0x53b887) (BuildId: c849b9596314cb5d)
+ #13 main src/d8/d8.cc:5553:43 (d8+0x53cf4e) (BuildId: c849b9596314cb5d)
+
+ Thread T3 'V8 DefaultWorke' (tid=10144, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3 (d8+0x49356d) (BuildId: c849b9596314cb5d)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1083:14 (d8+0x1a77076) (BuildId: c849b9596314cb5d)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x1a830f2) (BuildId: c849b9596314cb5d)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a830f2)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x1a830f2)
+ #5 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:293:37 (d8+0x1a7890d) (BuildId: c849b9596314cb5d)
+ #6 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::__1::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1101:55 (d8+0x1a7890d)
+ #7 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1110:12 (d8+0x1a7890d)
+ #8 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:133:7 (d8+0x1a7890d)
+ #9 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:109:5 (d8+0x1a7890d)
+ #10 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a78029) (BuildId: c849b9596314cb5d)
+ #11 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:54:19 (d8+0x1a78029)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5270:16 (d8+0x53b887) (BuildId: c849b9596314cb5d)
+ #13 main src/d8/d8.cc:5553:43 (d8+0x53cf4e) (BuildId: c849b9596314cb5d)
+
+SUMMARY: ThreadSanitizer: data race src/heap/marking.h:205:16 in ClearCellRangeRelaxed
+==================
+ThreadSanitizer: reported 1 warnings
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.expected.json
new file mode 100644
index 0000000000..b3b0203830
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Data race\nREAD 8",
+ "crash_state": "v8::internal::compiler::JSObjectRef::GetOwnConstantElementFromHeap\nv8::internal::compiler::JSObjectRef::GetOwnConstantElement\nv8::internal::compiler::JSNativeContextSpecialization::ReduceElementLoadFromHeap"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.txt
new file mode 100644
index 0000000000..b72ef85bbd
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/data_race_clusterfuzz.txt
@@ -0,0 +1,50 @@
+WARNING: ThreadSanitizer: data race (pid=3187405)
+ Read of size 8 at 0x7ea200056b80 by thread T1:
+ #0 0x55ad577064dc in v8::internal::compiler::JSObjectRef::GetOwnConstantElementFromHeap(v8::internal::FixedArrayBase, v8::internal::ElementsKind, unsigned int) const src/base/memory.h:34:3
+ #1 0x55ad577062aa in v8::internal::compiler::JSObjectRef::GetOwnConstantElement(v8::internal::compiler::FixedArrayBaseRef const&, unsigned int, v8::internal::compiler::CompilationDependencies*) const src/compiler/heap-refs.cc:1815:42
+ #2 0x55ad577c4ff7 in v8::internal::compiler::JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::AccessMode, v8::internal::KeyedAccessLoadMode) src/compiler/js-native-context-specialization.cc:2325:32
+ #3 0x55ad577c2c8f in v8::internal::compiler::JSNativeContextSpecialization::ReduceElementAccess(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::ElementAccessFeedback const&) src/compiler/js-native-context-specialization.cc:2094:27
+ #4 0x55ad577c19f7 in v8::internal::compiler::JSNativeContextSpecialization::ReducePropertyAccess(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::base::Optional<v8::internal::compiler::NameRef>, v8::internal::compiler::Node*, v8::internal::compiler::FeedbackSource const&, v8::internal::compiler::AccessMode) src/compiler/js-native-context-specialization.cc:2413:14
+ #5 0x55ad577b6099 in v8::internal::compiler::JSNativeContextSpecialization::ReduceJSLoadProperty(v8::internal::compiler::Node*) src/compiler/js-native-context-specialization.cc:2564:10
+ #6 0x55ad577afc8f in v8::internal::compiler::JSNativeContextSpecialization::Reduce(v8::internal::compiler::Node*) src/compiler/js-native-context-specialization.cc:108:14
+ #7 0x55ad576d605a in v8::internal::compiler::GraphReducer::Reduce(v8::internal::compiler::Node*) src/compiler/graph-reducer.cc:34:25
+ #8 0x55ad576d59f4 in v8::internal::compiler::GraphReducer::ReduceTop() src/compiler/graph-reducer.cc:178:25
+ #9 0x55ad576d5209 in v8::internal::compiler::GraphReducer::ReduceNode(v8::internal::compiler::Node*) src/compiler/graph-reducer.cc:75:7
+ #10 0x55ad576d5f35 in v8::internal::compiler::GraphReducer::ReduceGraph() src/compiler/graph-reducer.cc:97:36
+ #11 0x55ad57887506 in v8::internal::compiler::InliningPhase::Run(v8::internal::compiler::PipelineData*, v8::internal::Zone*) src/compiler/pipeline.cc:1422:19
+ #12 0x55ad578763c9 in auto v8::internal::compiler::PipelineImpl::Run<v8::internal::compiler::InliningPhase>() src/compiler/pipeline.cc:1319:16
+ #13 0x55ad57872135 in v8::internal::compiler::PipelineImpl::CreateGraph() src/compiler/pipeline.cc:2847:3
+ #14 0x55ad57871f32 in v8::internal::compiler::PipelineCompilationJob::ExecuteJobImpl(v8::internal::RuntimeCallStats*, v8::internal::LocalIsolate*) src/compiler/pipeline.cc:1247:18
+ #15 0x55ad565a9830 in v8::internal::OptimizedCompilationJob::ExecuteJob(v8::internal::RuntimeCallStats*, v8::internal::LocalIsolate*) src/codegen/compiler.cc:494:22
+ #16 0x55ad565ef91e in v8::internal::OptimizingCompileDispatcher::CompileNext(v8::internal::TurbofanCompilationJob*, v8::internal::LocalIsolate*) src/compiler-dispatcher/optimizing-compile-dispatcher.cc:105:12
+ #17 0x55ad565f2555 in v8::internal::OptimizingCompileDispatcher::CompileTask::RunInternal() src/compiler-dispatcher/optimizing-compile-dispatcher.cc:67:20
+ #18 0x55ad5640b898 in non-virtual thunk to v8::internal::CancelableTask::Run() src/tasks/cancelable-task.h:155:7
+ #19 0x55ad57cc46b1 in v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11
+ #20 0x55ad57cb7c23 in v8::base::ThreadEntry(void*) src/base/platform/platform.h:596:5
+ Previous write of size 8 at 0x7ea200056b80 by main thread:
+ #0 0x55ad56a49f14 in v8::internal::(anonymous namespace)::DictionaryElementsAccessor::SetLengthImpl(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSArray>, unsigned int, v8::internal::Handle<v8::internal::FixedArrayBase>) src/base/memory.h:41:3
+ #1 0x55ad56a675ea in v8::internal::(anonymous namespace)::ElementsAccessorBase<v8::internal::(anonymous namespace)::DictionaryElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)13> >::SetLength(v8::internal::Handle<v8::internal::JSArray>, unsigned int) src/objects/elements.cc:758:12
+ #2 0x55ad56c5f9c0 in v8::internal::JSArray::SetLength(v8::internal::Handle<v8::internal::JSArray>, unsigned int) src/objects/objects.cc:5237:40
+ #3 0x55ad564a6616 in v8::internal::Accessors::ArrayLengthSetter(v8::Local<v8::Name>, v8::Local<v8::Value>, v8::PropertyCallbackInfo<v8::Boolean> const&) src/builtins/accessors.cc:202:7
+ #4 0x55ad568e961b in v8::internal::PropertyCallbackArguments::CallAccessorSetter(v8::internal::Handle<v8::internal::AccessorInfo>, v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::Object>) src/api/api-arguments-inl.h:332:3
+ #5 0x55ad568e39df in v8::internal::Runtime_StoreCallbackProperty(int, unsigned long*, v8::internal::Isolate*) src/ic/ic.cc:3245:13
+ #6 0x55acdfe69bb8 in Builtins_AsyncFromSyncIteratorPrototypeThrow (/mnt/scratch0/clusterfuzz/bot/builds/v8-tsan_linux-release_8681dd6e7b8d9ebe281ad96b26057d51e550c14e/revisions/d8-tsan-linux-release-v8-component-83502/d8+0x1c30bb7)
+ #7 0x55ad5668cf86 in v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10
+ #8 0x55ad5642808f in v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2231:7
+ #9 0x55ad564277c1 in v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2158:10
+ #10 0x55ad563e63cc in v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:879:28
+ #11 0x55ad563fe621 in v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4379:10
+ #12 0x55ad56402488 in v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5107:39
+ #13 0x55ad56404fe1 in v8::Shell::Main(int, char**) src/d8/d8.cc:5886:18
+ #14 0x55ad564054ef in main src/d8/d8.cc:5976:43
+ Thread T1 'V8 DefaultWorke' (tid=3187415, running) created by main thread at:
+ #0 0x55ad56353ffc in pthread_create third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3
+ #1 0x55ad57cb7b37 in v8::base::Thread::Start() src/base/platform/platform-posix.cc:1144:14
+ #2 0x55ad57cc40ad in v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:66:3
+ #3 0x55ad57cba1b1 in v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> >) buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:297:37
+ #4 0x55ad57cb983a in v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> >) buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30
+ #5 0x55ad56404252 in v8::Shell::Main(int, char**) src/d8/d8.cc:5691:16
+ #6 0x55ad564054ef in main src/d8/d8.cc:5976:43
+SUMMARY: ThreadSanitizer: data race src/base/memory.h:34:3 in v8::internal::compiler::JSObjectRef::GetOwnConstantElementFromHeap(v8::internal::FixedArrayBase, v8::internal::ElementsKind, unsigned int) const
+==================
+ThreadSanitizer: reported 1 warnings \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.expected.json
new file mode 100644
index 0000000000..861f3ca23d
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "",
+ "crash_state": ""
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.txt
new file mode 100644
index 0000000000..767adce4c3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/deserialization.txt
@@ -0,0 +1,43 @@
+
+<--- Last few GCs --->
+
+
+<--- JS stacktrace --->
+
+
+#
+# Fatal javascript OOM in GC during deserialization
+#
+
+Received signal 6
+
+==== C stack trace ===============================
+
+ [0x559eb24c1167]
+ [0x7f27da846980]
+ [0x7f27d9ecbfb7]
+ [0x7f27d9ecd921]
+ [0x559eb24bec92]
+ [0x559eb192b71f]
+ [0x559eb192b685]
+ [0x559eb1abd785]
+ [0x559eb1abc125]
+ [0x559eb1ac6bfb]
+ [0x559eb1ac6c91]
+ [0x559eb1e5f0c8]
+ [0x559eb1e604a0]
+ [0x559eb1e5f99e]
+ [0x559eb1ac53c8]
+ [0x559eb1e6dbf8]
+ [0x559eb1a5eb21]
+ [0x559eb1a5ef79]
+ [0x559eb1e6b9a9]
+ [0x559eb194c069]
+ [0x559eb194c23d]
+ [0x559eb1919e3a]
+ [0x559eb1919da8]
+ [0x559eb24bf710]
+ [0x7f27da83b6db]
+ [0x7f27d9fae71f]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.expected.json
new file mode 100644
index 0000000000..861f3ca23d
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "",
+ "crash_state": ""
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.txt
new file mode 100644
index 0000000000..e2e57c1b8e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/fata_js_oom.txt
@@ -0,0 +1,48 @@
+
+<--- Last few GCs --->
+
+
+<--- JS stacktrace --->
+
+
+#
+# Fatal javascript OOM in GC during deserialization
+#
+
+Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7f31c97776b7]
+ [0x7f31c631b980]
+ [0x7f31c5bb8e87]
+ [0x7f31c5bba7f1]
+ [0x7f31c9775332]
+ [0x7f31c7c8daf5]
+ [0x7f31c7c8d97d]
+ [0x7f31c81a33a5]
+ [0x7f31c81a15ed]
+ [0x7f31c81914df]
+ [0x7f31c81915c1]
+ [0x7f31c8134c33]
+ [0x7f31c8adbc18]
+ [0x7f31c8ad7af1]
+ [0x7f31c8ada0d5]
+ [0x7f31c8ad7fdd]
+ [0x7f31c8adb0be]
+ [0x7f31c8ad9c46]
+ [0x7f31c81b4499]
+ [0x7f31c81adcc9]
+ [0x7f31c8b16d0e]
+ [0x7f31c809ed03]
+ [0x7f31c809fcb1]
+ [0x7f31c8b114b7]
+ [0x7f31c7d02526]
+ [0x7f31c7d02b2d]
+ [0x565117ee00ed]
+ [0x565117ee0008]
+ [0x7f31c9776470]
+ [0x7f31c63106db]
+ [0x7f31c5c9b61f]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.expected.json
new file mode 100644
index 0000000000..6faae98ca7
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "DCHECK failure",
+ "crash_state": "!has_optimized_code() || optimized_code().marked_for_deoptimization() || (CodeKi"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.txt
new file mode 100644
index 0000000000..9727783e07
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/has_optimized_code.txt
@@ -0,0 +1,30 @@
+
+
+#
+# Fatal error in ../../src/objects/feedback-vector.cc, line 396
+# Debug check failed: !has_optimized_code() || optimized_code().marked_for_deoptimization() || (CodeKindCanTierUp(optimized_code().kind()) && optimized_code().kind() < code.kind()) || v8_flags.stress_concurrent_inlining_attach_code.
+#
+#
+#
+#FailureMessage Object: 0x7fffe8420b90Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7fdffc28c263]
+ [0x7fdffc28c1b1]
+ [0x7fdff8747980]
+ [0x7fdff7fe4e87]
+ [0x7fdff7fe67f1]
+ [0x7fdffc289de2]
+ [0x7fdffc26bbe0]
+ [0x7fdffc26b675]
+ [0x7fdffae468b2]
+ [0x7fdffa6c2f1b]
+ [0x7fdffa6c3517]
+ [0x7fdffb4f49b5]
+ [0x7fdffa8bc015]
+ [0x7fdffb2fa1e4]
+ [0x7fdffb2f9cd7]
+ [0x7fdf7f9c83ff]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.expected.json
new file mode 100644
index 0000000000..861f3ca23d
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "",
+ "crash_state": ""
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.txt
new file mode 100644
index 0000000000..f1ab82a2e1
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/heap_limit.txt
@@ -0,0 +1,33 @@
+
+<--- Last few GCs --->
+nd[29661:0x563245f31e60] 3744 ms: Mark-sweep 94.8 (110.0) -> 92.7 (109.5) MB, 376.4 / 0.0 ms (+ 17.3 ms in 1 steps since start of marking, biggest step 17.3 ms, walltime since start of marking 460 ms) (average mu = 0.267, current mu = 0.191) background[29661:0x563245f31e60] 4198 ms: Mark-sweep 96.8 (113.0) -> 95.2 (111.7) MB, 377.4 / 0.0 ms (+ 0.0 ms in 0 steps since start of marking, biggest step 0.0 ms, walltime since start of marking 431 ms) (average mu = 0.221, current mu = 0.169) background a
+
+<--- JS stacktrace --->
+
+
+#
+# Fatal javascript OOM in Reached heap limit
+#
+
+Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7ff0cca95ee7]
+ [0x7ff0c9876980]
+ [0x7ff0c9111fb7]
+ [0x7ff0c9113921]
+ [0x7ff0cca93c52]
+ [0x7ff0cb13fc25]
+ [0x7ff0cb13faad]
+ [0x7ff0cb5e77d5]
+ [0x7ff0cb5e5a93]
+ [0x7ff0cb5ea284]
+ [0x7ff0cb58ad35]
+ [0x7ff0cb5f9306]
+ [0x7ff0cb5f9595]
+ [0x7ff0cb58920a]
+ [0x7ff0cbdead26]
+ [0x7ff0cab572bf]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.expected.json
new file mode 100644
index 0000000000..5bc889cc2b
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "DCHECK failure",
+ "crash_state": "is_main_thread_barrier_ in marking-barrier.cc"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.txt
new file mode 100644
index 0000000000..08de978e46
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier.txt
@@ -0,0 +1,30 @@
+
+
+#
+# Fatal error in ../../src/heap/marking-barrier.cc, line 77
+# Debug check failed: is_main_thread_barrier_.
+#
+#
+#
+#FailureMessage Object: 0x7fbcb88255c0Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7fbcc0a3a7f7]
+ [0x7fbcbd9c1980]
+ [0x7fbcbd25efb7]
+ [0x7fbcbd260921]
+ [0x7fbcc0a38932]
+ [0x7fbcc0a1dfa0]
+ [0x7fbcc0a1da15]
+ [0x7fbcbf65b64a]
+ [0x7fbcbfeb60df]
+ [0x7fbcbfec3389]
+ [0x7fbcbfec324c]
+ [0x7fbcbfea5c34]
+ [0x7fbcbfea7f18]
+ [0x7fbcc0a395a0]
+ [0x7fbcbd9b66db]
+ [0x7fbcbd34171f]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.expected.json
new file mode 100644
index 0000000000..5bc889cc2b
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "DCHECK failure",
+ "crash_state": "is_main_thread_barrier_ in marking-barrier.cc"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.txt
new file mode 100644
index 0000000000..29012581e3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/is_main_thread_barrier_2.txt
@@ -0,0 +1,26 @@
+
+
+#
+# Fatal error in ../../src/heap/marking-barrier.cc, line 77
+# Debug check failed: is_main_thread_barrier_.
+#
+#
+#
+#FailureMessage Object: 0x7fa0116815c0
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/libv8_libbase.so(v8::base::debug::StackTrace::StackTrace()+0x13) [0x7fa01988e8c3]
+ /b/s/w/ir/out/build/libv8_libplatform.so(+0x1bfed) [0x7fa01983efed]
+ /b/s/w/ir/out/build/libv8_libbase.so(V8_Fatal(char const*, int, char const*, ...)+0x153) [0x7fa019871fb3]
+ /b/s/w/ir/out/build/libv8_libbase.so(+0x24a35) [0x7fa019871a35]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::MarkingBarrier::Write(v8::internal::DescriptorArray, int)+0xca) [0x7fa0184b52fa]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Deserializer<v8::internal::LocalIsolate>::WeakenDescriptorArrays()+0x8f) [0x7fa018d1170f]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::OffThreadObjectDeserializer::Deserialize(std::__Cr::vector<v8::internal::Handle<v8::internal::Script>, std::__Cr::allocator<v8::internal::Handle<v8::internal::Script> > >*)+0xc9) [0x7fa018d1e9b9]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::OffThreadObjectDeserializer::DeserializeSharedFunctionInfo(v8::internal::LocalIsolate*, v8::internal::SerializedCodeData const*, std::__Cr::vector<v8::internal::Handle<v8::internal::Script>, std::__Cr::allocator<v8::internal::Handle<v8::internal::Script> > >*)+0x1ac) [0x7fa018d1e87c]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::CodeSerializer::StartDeserializeOffThread(v8::internal::LocalIsolate*, v8::internal::AlignedCachedData*)+0x144) [0x7fa018d01264]
+ /b/s/w/ir/out/build/libv8.so(+0x2079548) [0x7fa018d03548]
+ /b/s/w/ir/out/build/libv8_libbase.so(+0x405c0) [0x7fa01988d5c0]
+ /lib/x86_64-linux-gnu/libpthread.so.0(+0x76db) [0x7fa0168116db]
+ /lib/x86_64-linux-gnu/libc.so.6(clone+0x3f) [0x7fa01619c71f]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.expected.json
new file mode 100644
index 0000000000..5d29bfb52d
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "CHECK failure",
+ "crash_state": "JSFunctionRef construction failed"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.txt
new file mode 100644
index 0000000000..3983b0638e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/js_func_ref.txt
@@ -0,0 +1,55 @@
+
+
+#
+# Fatal error in , line 0
+# Check failed: JSFunctionRef construction failed.
+#
+#
+#
+#FailureMessage Object: 0x7fe2840bb790Received signal 6
+
+==== C stack trace ===============================
+
+ [0x560fb0a62e08]
+ [0x560fb1e5dbdc]
+ [0x560fb0a4be9c]
+ [0x560fb0a83a8c]
+ [0x7fe288cdf980]
+ [0x7fe28815cfb7]
+ [0x7fe28815e921]
+ [0x560fb0a4ae48]
+ [0x560fb1e5a0d7]
+ [0x560fb1e4dd5f]
+ [0x560fb19082b3]
+ [0x560fb19c1975]
+ [0x560fb1917895]
+ [0x560fb190b1b4]
+ [0x560fb1935a48]
+ [0x560fb1aab7de]
+ [0x560fb1aac3d2]
+ [0x560fb19f6cb0]
+ [0x560fb19ed1eb]
+ [0x560fb19e96f5]
+ [0x560fb19ed32e]
+ [0x560fb19e4111]
+ [0x560fb19def82]
+ [0x560fb18f6cee]
+ [0x560fb18f6348]
+ [0x560fb18f5b69]
+ [0x560fb18f6be5]
+ [0x560fb1aa51ed]
+ [0x560fb1a96dea]
+ [0x560fb1a9259d]
+ [0x560fb1a92825]
+ [0x560fb0bfc5d0]
+ [0x560fb0c3812e]
+ [0x560fb0c3b775]
+ [0x560fb0ae4046]
+ [0x560fb0abdbd2]
+ [0x560fb1e67121]
+ [0x560fb1e5afb3]
+ [0x560fb0a4526d]
+ [0x7fe288cd46db]
+ [0x7fe28823f71f]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.expected.json
new file mode 100644
index 0000000000..cff36dc732
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "CHECK failure",
+ "crash_state": "marking_state_->IsBlack(heap_object) in mark-compact.cc"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.txt
new file mode 100644
index 0000000000..5acc40bfb3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/marking_state.txt
@@ -0,0 +1,37 @@
+
+
+#
+# Fatal error in ../../src/heap/mark-compact.cc, line 285
+# Check failed: marking_state_->IsBlack(heap_object).
+#
+#
+#
+#FailureMessage Object: 0x7ffce64f0700
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/libv8_libbase.so(v8::base::debug::StackTrace::StackTrace()+0x13) [0x7f644a73e4b3]
+ /b/s/w/ir/out/build/libv8_libplatform.so(+0x1bc5d) [0x7f644a6e2c5d]
+ /b/s/w/ir/out/build/libv8_libbase.so(V8_Fatal(char const*, int, char const*, ...)+0x153) [0x7f644a71e653]
+ /b/s/w/ir/out/build/libv8.so(+0x1b80d25) [0x7f6449080d25]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::CommonFrame::IterateExpressions(v8::internal::RootVisitor*) const+0x144) [0x7f6448eed414]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::JavaScriptFrame::Iterate(v8::internal::RootVisitor*) const+0x16) [0x7f6448efa386]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Isolate::Iterate(v8::internal::RootVisitor*, v8::internal::ThreadLocalTop*)+0x249) [0x7f6448f072c9]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Heap::IterateRoots(v8::internal::RootVisitor*, v8::base::EnumSet<v8::internal::SkipRoot, int>)+0x175) [0x7f6449021935]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Heap::IterateRootsIncludingClients(v8::internal::RootVisitor*, v8::base::EnumSet<v8::internal::SkipRoot, int>)+0x42) [0x7f6449028362]
+ /b/s/w/ir/out/build/libv8.so(+0x1b64e35) [0x7f6449064e35]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::MarkCompactCollector::VerifyMarking()+0xaf) [0x7f6449061dbf]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::MarkCompactCollector::CollectGarbage()+0x27) [0x7f644905f487]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Heap::MarkCompact()+0x2a1) [0x7f644901abb1]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Heap::PerformGarbageCollection(v8::internal::GarbageCollector, v8::internal::GarbageCollectionReason, char const*, v8::GCCallbackFlags)+0x5b9) [0x7f6449017949]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Heap::PerformSharedGarbageCollection(v8::internal::Isolate*, v8::internal::GarbageCollectionReason)+0x1a2) [0x7f644901c552]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::HeapAllocator::AllocateRawWithLightRetrySlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment)+0x49) [0x7f64490064e9]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::HeapAllocator::AllocateRawWithRetryOrFailSlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment)+0x21) [0x7f6449006621]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::HeapObject v8::internal::HeapAllocator::AllocateRawWith<(v8::internal::HeapAllocator::AllocationRetryMode)1>(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment)+0x73) [0x7f6448fb3fe3]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::FactoryBase<v8::internal::Factory>::AllocateRawArray(int, v8::internal::AllocationType)+0x1d) [0x7f6448f9d3bd]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::FactoryBase<v8::internal::Factory>::NewFixedArrayWithFiller(v8::internal::Handle<v8::internal::Map>, int, v8::internal::Handle<v8::internal::Oddball>, v8::internal::AllocationType)+0x40) [0x7f6448f9c720]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::FactoryBase<v8::internal::Factory>::NewFixedArray(int, v8::internal::AllocationType)+0x75) [0x7f6448f9c665]
+ /b/s/w/ir/out/build/libv8.so(+0x178c16f) [0x7f6448c8c16f]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Builtin_SharedArrayConstructor(int, unsigned long*, v8::internal::Isolate*)+0xa4) [0x7f6448c8bbe4]
+ [0x7f63df9cf67f]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.expected.json
new file mode 100644
index 0000000000..267cf60619
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "CHECK failure",
+ "crash_state": "!maybe.IsCleared() in code-inl.h"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.txt
new file mode 100644
index 0000000000..550b5f4fee
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/maybe_is_cleared.txt
@@ -0,0 +1,21 @@
+
+
+#
+# Fatal error in ../../src/objects/code-inl.h, line 1210
+# Check failed: !maybe.IsCleared().
+#
+#
+#
+#FailureMessage Object: 0x7ffc7bce3850
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/libv8_libbase.so(v8::base::debug::StackTrace::StackTrace()+0x13) [0x7f943d292143]
+ /b/s/w/ir/out/build/libv8_libplatform.so(+0x1c1bd) [0x7f943d2421bd]
+ /b/s/w/ir/out/build/libv8_libbase.so(V8_Fatal(char const*, int, char const*, ...)+0x153) [0x7f943d2758b3]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::DeoptimizationLiteralArray::get(int) const+0xcd) [0x7f943bbea88d]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::TranslatedState::CreateNextTranslatedValue(int, v8::internal::TranslationArrayIterator*, v8::internal::DeoptimizationLiteralArray, unsigned long, v8::internal::RegisterValues*, _IO_FILE*)+0x7b7) [0x7f943bbee387]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::TranslatedState::Init(v8::internal::Isolate*, unsigned long, unsigned long, v8::internal::TranslationArrayIterator*, v8::internal::DeoptimizationLiteralArray, v8::internal::RegisterValues*, _IO_FILE*, int, int)+0x41f) [0x7f943bbefe5f]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Deoptimizer::DoComputeOutputFrames()+0x8ab) [0x7f943bbde04b]
+ [0xd3007600544]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.expected.json
new file mode 100644
index 0000000000..1fa11c9a13
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Fatal error",
+ "crash_state": "Missing deoptimization information for OptimizedFrame::Summarize"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.txt
new file mode 100644
index 0000000000..d4b766ffc7
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/missing_deoptimization.txt
@@ -0,0 +1,24 @@
+
+
+#
+# Fatal error in , line 0
+# Missing deoptimization information for OptimizedFrame::Summarize.
+#
+#
+#
+#FailureMessage Object: 0x7ffc291ecd70
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/d8(__interceptor_backtrace+0xca) [0x559a3689f74a]
+ /b/s/w/ir/out/build/d8(+0x1d3da40) [0x559a380f9a40]
+ /b/s/w/ir/out/build/d8(+0x1d3cd54) [0x559a380f8d54]
+ /b/s/w/ir/out/build/d8(+0x1d2af46) [0x559a380e6f46]
+ /b/s/w/ir/out/build/d8(+0x803da6) [0x559a36bbfda6]
+ /b/s/w/ir/out/build/d8(+0x7fc16d) [0x559a36bb816d]
+ /b/s/w/ir/out/build/d8(+0x8180f8) [0x559a36bd40f8]
+ /b/s/w/ir/out/build/d8(+0x816ad4) [0x559a36bd2ad4]
+ /b/s/w/ir/out/build/d8(+0x8163a4) [0x559a36bd23a4]
+ /b/s/w/ir/out/build/d8(+0xf772fc) [0x559a373332fc]
+ [0x5599bfe6fe78]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.expected.json
new file mode 100644
index 0000000000..b7d8cae391
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Null-dereference",
+ "crash_state": "NULL"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.txt
new file mode 100644
index 0000000000..31f2ecf636
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/seg_map_err.txt
@@ -0,0 +1,13 @@
+Received signal 11 SEGV_MAPERR 000000000030
+
+==== C stack trace ===============================
+
+ [0x55b2023882a7]
+ [0x7f044e972980]
+ [0x7f044e969fd0]
+ [0x55b201949b84]
+ [0x55b20194b6da]
+ [0x55b201cf7206]
+ [0x0ad7000b4778]
+[end of stack trace]
+Segmentation fault (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.expected.json
new file mode 100644
index 0000000000..1db5d5d168
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "UNKNOWN",
+ "crash_state": "NULL"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.txt
new file mode 100644
index 0000000000..74cb7e5076
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_error.txt
@@ -0,0 +1,9 @@
+Received signal 11 SEGV_ACCERR 2e2d00002258
+
+==== C stack trace ===============================
+
+ [0x5562678976c6]
+ [0x7f36e5f68980]
+ [0x5561e008adaf]
+[end of stack trace]
+Segmentation fault (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.expected.json
new file mode 100644
index 0000000000..b7d8cae391
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Null-dereference",
+ "crash_state": "NULL"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.txt
new file mode 100644
index 0000000000..0e3624729b
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/segmentation_fault.txt
@@ -0,0 +1,15 @@
+Received signal 11 SEGV_MAPERR 000000000000
+
+==== C stack trace ===============================
+
+ [0x562f1fdf8d47]
+ [0x7f32df0c6980]
+ [0x562f1f293057]
+ [0x562f1f292f0e]
+ [0x562f1fdf9fd7]
+ [0x562f1fdfc139]
+ [0x562f1fdf72f0]
+ [0x7f32df0bb6db]
+ [0x7f32de82e71f]
+[end of stack trace]
+Segmentation fault (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.expected.json
new file mode 100644
index 0000000000..5bc889cc2b
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "DCHECK failure",
+ "crash_state": "is_main_thread_barrier_ in marking-barrier.cc"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.txt
new file mode 100644
index 0000000000..1824808160
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/stack_frame.txt
@@ -0,0 +1,60 @@
+
+
+#
+# Fatal error in ../../src/execution/frames.cc, line 1430
+# Debug check failed: !StackFrame::IsTypeMarker(marker).
+#
+#
+#
+#FailureMessage Object: 0x7fff924da4a0Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7fb3c0ab1503]
+ [0x7fb3c0ab1451]
+ [0x7fb3bd06a980]
+ [0x7fb3bc907e87]
+ [0x7fb3bc9097f1]
+ [0x7fb3c0aaf082]
+ [0x7fb3c0a90ed0]
+ [0x7fb3c0a90965]
+ [0x7fb3bf128909]
+ [0x7fb3bf1408c9]
+ [0x7fb3bf27ec15]
+ [0x7fb3bf27776b]
+ [0x7fb3bf274c53]
+ [0x7fb3bf271e03]
+ [0x7fb3bf262ae1]
+ [0x7fb3bf262b71]
+ [0x7fb3bf20b953]
+ [0x7fb3bf1d3a22]
+ [0x7fb3c01f46ca]
+
+#
+# Fatal error in ../../src/heap/marking-barrier.cc, line 77
+# Debug check failed: is_main_thread_barrier_.
+#
+#
+#
+#FailureMessage Object: 0x7fbcb88255c0Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7fbcc0a3a7f7]
+ [0x7fbcbd9c1980]
+ [0x7fbcbd25efb7]
+ [0x7fbcbd260921]
+ [0x7fbcc0a38932]
+ [0x7fbcc0a1dfa0]
+ [0x7fbcc0a1da15]
+ [0x7fbcbf65b64a]
+ [0x7fbcbfeb60df]
+ [0x7fbcbfec3389]
+ [0x7fbcbfec324c]
+ [0x7fbcbfea5c34]
+ [0x7fbcbfea7f18]
+ [0x7fbcc0a395a0]
+ [0x7fbcbd9b66db]
+ [0x7fbcbd34171f]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.expected.json
new file mode 100644
index 0000000000..2757592999
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Static Assertion",
+ "crash_state": "Expected Turbofan static assert to hold, but got non-true input"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.txt
new file mode 100644
index 0000000000..a8b7760eb7
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_1.txt
@@ -0,0 +1,37 @@
+
+
+#
+# Fatal error in ../../src/compiler/backend/instruction-selector.cc, line 3202
+# Expected Turbofan static assert to hold, but got non-true input:
+ %TurbofanStaticAssert
+#
+#
+#
+#FailureMessage Object: 0x7ffc34427560Received signal 6
+
+==== C stack trace ===============================
+
+ [0x7efe1f546b93]
+ [0x7efe1f546ae2]
+ [0x7efe1b907980]
+ [0x7efe1b1a4e87]
+ [0x7efe1b1a67f1]
+ [0x7efe1f544712]
+ [0x7efe1f5263d2]
+ [0x7efe1ed5b249]
+ [0x7efe1ed58a58]
+ [0x7efe1ed4e6fa]
+ [0x7efe1ed4d4d6]
+ [0x7efe1f0ba876]
+ [0x7efe1f07023b]
+ [0x7efe1f06837b]
+ [0x7efe1f062006]
+ [0x7efe1f061580]
+ [0x7efe1d8c2710]
+ [0x7efe1d8d5502]
+ [0x7efe1d8d8a55]
+ [0x7efe1e509ec6]
+ [0x7efe1e509879]
+ [0x7efd9f9da37f]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.expected.json
new file mode 100644
index 0000000000..2757592999
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Static Assertion",
+ "crash_state": "Expected Turbofan static assert to hold, but got non-true input"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.txt
new file mode 100644
index 0000000000..7fc1f21a1a
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/static_assertion_2.txt
@@ -0,0 +1,37 @@
+unsupported node type: StaticAssert[%TurbofanStaticAssert]
+132: StaticAssert[%TurbofanStaticAssert](113, 319)
+ 113: Word32Equal(130, 128)
+ 319: DeoptimizeUnless[WrongCallTarget, FeedbackSource(INVALID)](86, 253, 303, 129)
+
+
+#
+# Fatal error in , line 0
+# unimplemented code
+#
+#
+#
+#FailureMessage Object: 0x7fff6c485990Received signal 6
+
+==== C stack trace ===============================
+
+ [0x55b035fcd8da]
+ [0x55b03798aebb]
+ [0x55b035fb3607]
+ [0x55b035fb3b24]
+ [0x7fb750346980]
+ [0x7fb74f5a9e87]
+ [0x7fb74f5ab7f1]
+ [0x55b035fb2283]
+ [0x55b037985d87]
+ [0x55b0379767ab]
+ [0x55b03765bdf7]
+ [0x55b03764ee25]
+ [0x55b0374fdbcb]
+ [0x55b0374f7d42]
+ [0x55b0374f6c77]
+ [0x55b0362183f8]
+ [0x55b03621b230]
+ [0x55b036a886a8]
+ [0x55b03776c678]
+[end of stack trace]
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.expected.json
new file mode 100644
index 0000000000..35f5d3d799
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "CHECK failure",
+ "crash_state": "storage_.is_populated_"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.txt
new file mode 100644
index 0000000000..b47006d570
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/storage_is_populated.txt
@@ -0,0 +1,35 @@
+
+
+#
+# Fatal error in , line 0
+# Check failed: storage_.is_populated_.
+#
+#
+#
+#FailureMessage Object: 0x7f9d3a7b77b0
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/d8(+0xec0b43) [0x55e9d253cb43]
+ /b/s/w/ir/out/build/d8(+0xec03eb) [0x55e9d253c3eb]
+ /b/s/w/ir/out/build/d8(+0xeb68e5) [0x55e9d25328e5]
+ /b/s/w/ir/out/build/d8(+0xb429ac) [0x55e9d21be9ac]
+ /b/s/w/ir/out/build/d8(+0xb528b2) [0x55e9d21ce8b2]
+ /b/s/w/ir/out/build/d8(+0xc228ec) [0x55e9d229e8ec]
+ /b/s/w/ir/out/build/d8(+0xc1cc2c) [0x55e9d2298c2c]
+ /b/s/w/ir/out/build/d8(+0xc1cb40) [0x55e9d2298b40]
+ /b/s/w/ir/out/build/d8(+0xb9dfcf) [0x55e9d2219fcf]
+ /b/s/w/ir/out/build/d8(+0xb9db13) [0x55e9d2219b13]
+ /b/s/w/ir/out/build/d8(+0xb9d718) [0x55e9d2219718]
+ /b/s/w/ir/out/build/d8(+0xc9e908) [0x55e9d231a908]
+ /b/s/w/ir/out/build/d8(+0xc93912) [0x55e9d230f912]
+ /b/s/w/ir/out/build/d8(+0xc908f1) [0x55e9d230c8f1]
+ /b/s/w/ir/out/build/d8(+0xc90a8e) [0x55e9d230ca8e]
+ /b/s/w/ir/out/build/d8(+0x420adb) [0x55e9d1a9cadb]
+ /b/s/w/ir/out/build/d8(+0x442558) [0x55e9d1abe558]
+ /b/s/w/ir/out/build/d8(+0x443082) [0x55e9d1abf082]
+ /b/s/w/ir/out/build/d8(+0xec3e29) [0x55e9d253fe29]
+ /b/s/w/ir/out/build/d8(+0xebf060) [0x55e9d253b060]
+ /lib/x86_64-linux-gnu/libpthread.so.0(+0x76db) [0x7f9d3bb276db]
+ /lib/x86_64-linux-gnu/libc.so.6(clone+0x3f) [0x7f9d3b29a71f]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.expected.json
new file mode 100644
index 0000000000..e450139659
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "DCHECK failure",
+ "crash_state": "topmost_optimized_code.is_null() || safe_if_deopt_triggered || is_builtin_code i"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.txt
new file mode 100644
index 0000000000..0dcbcbe04f
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/topmost_optimized_code.txt
@@ -0,0 +1,24 @@
+
+
+#
+# Fatal error in ../../src/deoptimizer/deoptimizer.cc, line 336
+# Debug check failed: topmost_optimized_code.is_null() || safe_if_deopt_triggered || is_builtin_code.
+#
+#
+#
+#FailureMessage Object: 0x7ffd90181f90
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/libv8_libbase.so(v8::base::debug::StackTrace::StackTrace()+0x13) [0x7f1c0ac48243]
+ /b/s/w/ir/out/build/libv8_libplatform.so(+0x1865d) [0x7f1c0abf065d]
+ /b/s/w/ir/out/build/libv8_libbase.so(V8_Fatal(char const*, int, char const*, ...)+0x153) [0x7f1c0ac27bb3]
+ /b/s/w/ir/out/build/libv8_libbase.so(+0x2a655) [0x7f1c0ac27655]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Deoptimizer::DeoptimizeMarkedCodeForContext(v8::internal::NativeContext)+0x2e2) [0x7f1c0918d762]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Deoptimizer::DeoptimizeMarkedCode(v8::internal::Isolate*)+0x1d5) [0x7f1c0918f1c5]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Heap::DeoptMarkedAllocationSites()+0x5b) [0x7f1c0939e2eb]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::StackGuard::HandleInterrupts()+0x90c) [0x7f1c0929c4dc]
+ /b/s/w/ir/out/build/libv8.so(+0x25908c4) [0x7f1c09cd58c4]
+ /b/s/w/ir/out/build/libv8.so(v8::internal::Runtime_StackGuard(int, unsigned long*, v8::internal::Isolate*)+0xb7) [0x7f1c09cd53b7]
+ [0x7f1b9f9cc3ff]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.expected.json
new file mode 100644
index 0000000000..128813e84c
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Type Assertion",
+ "crash_state": "Trace/breakpoint trap (core dumped)"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.txt
new file mode 100644
index 0000000000..42be39ff66
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_1.txt
@@ -0,0 +1 @@
+Trace/breakpoint trap (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.expected.json
new file mode 100644
index 0000000000..128813e84c
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Type Assertion",
+ "crash_state": "Trace/breakpoint trap (core dumped)"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.txt
new file mode 100644
index 0000000000..ded5a0c24a
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/type_assertion_2.txt
@@ -0,0 +1,5 @@
+Type assertion failed! (value/expectedType/nodeId)
+0x5510000ffcf5 <HeapNumber -2147483647.0>
+0x551000058dfd <Other heap object (TURBOFAN_UNION_TYPE_TYPE)>
+5156
+Trace/breakpoint trap (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.expected.json b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.expected.json
new file mode 100644
index 0000000000..bead12540a
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.expected.json
@@ -0,0 +1,4 @@
+{
+ "crash_type": "Unreachable code",
+ "crash_state": "NULL"
+} \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.txt
new file mode 100644
index 0000000000..75691ec252
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/analyze_crash/unreachable_code.txt
@@ -0,0 +1,28 @@
+
+
+#
+# Fatal error in , line 0
+# unimplemented code
+#
+#
+#
+#FailureMessage Object: 0x7ff272c984b0
+==== C stack trace ===============================
+
+ /b/s/w/ir/out/build/d8(+0x11db253) [0x55cf35eeb253]
+ /b/s/w/ir/out/build/d8(+0x11daa7b) [0x55cf35eeaa7b]
+ /b/s/w/ir/out/build/d8(+0x11ced35) [0x55cf35eded35]
+ /b/s/w/ir/out/build/d8(+0xffd0ee) [0x55cf35d0d0ee]
+ /b/s/w/ir/out/build/d8(+0xff82d7) [0x55cf35d082d7]
+ /b/s/w/ir/out/build/d8(+0xf4362a) [0x55cf35c5362a]
+ /b/s/w/ir/out/build/d8(+0xf3f1ab) [0x55cf35c4f1ab]
+ /b/s/w/ir/out/build/d8(+0xf3e850) [0x55cf35c4e850]
+ /b/s/w/ir/out/build/d8(+0x4d80ab) [0x55cf351e80ab]
+ /b/s/w/ir/out/build/d8(+0x50a448) [0x55cf3521a448]
+ /b/s/w/ir/out/build/d8(+0x50aef9) [0x55cf3521aef9]
+ /b/s/w/ir/out/build/d8(+0x11de7e9) [0x55cf35eee7e9]
+ /b/s/w/ir/out/build/d8(+0x11d8c30) [0x55cf35ee8c30]
+ /lib/x86_64-linux-gnu/libpthread.so.0(+0x76db) [0x7ff2750456db]
+ /lib/x86_64-linux-gnu/libc.so.6(clone+0x3f) [0x7ff2747b861f]
+Received signal 6
+Aborted (core dumped)
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.expected
new file mode 100644
index 0000000000..56f9af4f51
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.expected
@@ -0,0 +1,3 @@
+SetFlag
+MarkWasUsedForAllocation
+v8::internal::PagedSpaceBase::SetLinearAllocationArea(unsigned long, unsigned long) \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.txt
new file mode 100644
index 0000000000..e1e21610e0
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_1.txt
@@ -0,0 +1,91 @@
+==================
+WARNING: ThreadSanitizer: data race (pid=24781)
+ Write of size 8 at 0x7e9b00080008 by main thread (mutexes: write M0):
+ #0 operator|= src/base/flags.h:47:11 (d8+0xa03eff) (BuildId: 2d1896fcafe8220c)
+ #1 operator|= src/base/flags.h:66:46 (d8+0xa03eff)
+ #2 SetFlag src/heap/basic-memory-chunk.h:198:48 (d8+0xa03eff)
+ #3 MarkWasUsedForAllocation src/heap/memory-chunk.h:223:37 (d8+0xa03eff)
+ #4 v8::internal::PagedSpaceBase::SetLinearAllocationArea(unsigned long, unsigned long) src/heap/paged-spaces.cc:435:13 (d8+0xa03eff)
+ #5 v8::internal::PagedSpaceBase::TryAllocationFromFreeListMain(unsigned long, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:653:3 (d8+0xa04b8e) (BuildId: 2d1896fcafe8220c)
+ #6 v8::internal::PagedSpaceBase::RawRefillLabMain(int, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:991:7 (d8+0xa066b4) (BuildId: 2d1896fcafe8220c)
+ #7 v8::internal::PagedSpaceBase::RefillLabMain(int, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:960:10 (d8+0xa065f7) (BuildId: 2d1896fcafe8220c)
+ #8 EnsureAllocation src/heap/paged-spaces-inl.h:89:10 (d8+0x9f33a8) (BuildId: 2d1896fcafe8220c)
+ #9 EnsureAllocation src/heap/new-spaces-inl.h:109:24 (d8+0x9f33a8)
+ #10 v8::internal::PagedNewSpace::EnsureAllocation(int, v8::internal::AllocationAlignment, v8::internal::AllocationOrigin, int*) src/heap/new-spaces.h:800:25 (d8+0x9f33a8)
+ #11 AllocateRawUnaligned src/heap/spaces-inl.h:277:8 (d8+0x8b41ea) (BuildId: 2d1896fcafe8220c)
+ #12 AllocateRawSlow src/heap/spaces-inl.h:331:13 (d8+0x8b41ea)
+ #13 AllocateRaw src/heap/spaces-inl.h:269:31 (d8+0x8b41ea)
+ #14 AllocateRaw<(v8::internal::AllocationType)0> src/heap/heap-allocator-inl.h:107:28 (d8+0x8b41ea)
+ #15 AllocateRawWith<(v8::internal::HeapAllocator::AllocationRetryMode)1> src/heap/heap-allocator-inl.h:230:14 (d8+0x8b41ea)
+ #16 v8::internal::Factory::AllocateRawWithAllocationSite(v8::internal::Handle<v8::internal::Map>, v8::internal::AllocationType, v8::internal::Handle<v8::internal::AllocationSite>) src/heap/factory.cc:356:36 (d8+0x8b41ea)
+ #17 v8::internal::Factory::NewJSObjectFromMap(v8::internal::Handle<v8::internal::Map>, v8::internal::AllocationType, v8::internal::Handle<v8::internal::AllocationSite>) src/heap/factory.cc:2728:7 (d8+0x8ba650) (BuildId: 2d1896fcafe8220c)
+ #18 __RT_impl_Runtime_NewArray src/runtime/runtime-array.cc:116:58 (d8+0xfa02b1) (BuildId: 2d1896fcafe8220c)
+ #19 v8::internal::Runtime_NewArray(int, unsigned long*, v8::internal::Isolate*) src/runtime/runtime-array.cc:44:1 (d8+0xfa02b1)
+ #20 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1c3cd77)
+ #21 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x8187b5) (BuildId: 2d1896fcafe8220c)
+ #22 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2151:7 (d8+0x5b210e) (BuildId: 2d1896fcafe8220c)
+ #23 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2078:10 (d8+0x5b1840) (BuildId: 2d1896fcafe8220c)
+ #24 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:879:28 (d8+0x5703cb) (BuildId: 2d1896fcafe8220c)
+ #25 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4379:10 (d8+0x588620) (BuildId: 2d1896fcafe8220c)
+ #26 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5107:39 (d8+0x58c487) (BuildId: 2d1896fcafe8220c)
+ #27 v8::Shell::Main(int, char**) src/d8/d8.cc:5886:18 (d8+0x58efe0) (BuildId: 2d1896fcafe8220c)
+ #28 main src/d8/d8.cc:5976:43 (d8+0x58f4ee) (BuildId: 2d1896fcafe8220c)
+
+ Previous read of size 8 at 0x7e9b00080008 by thread T6:
+ #0 GetFlags src/heap/basic-memory-chunk.h:211:45 (d8+0x88ee2f) (BuildId: 2d1896fcafe8220c)
+ #1 ShouldSkipEvacuationSlotRecording src/heap/basic-memory-chunk.h:240:29 (d8+0x88ee2f)
+ #2 RecordSlot src/heap/mark-compact-inl.h:68:21 (d8+0x88ee2f)
+ #3 void v8::internal::ConcurrentMarkingVisitor::RecordSlot<v8::internal::CompressedHeapObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/concurrent-marking.cc:481:5 (d8+0x88ee2f)
+ #4 void v8::internal::MarkingVisitorBase<v8::internal::ConcurrentMarkingVisitor, v8::internal::ConcurrentMarkingState>::ProcessStrongHeapObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/marking-visitor-inl.h:49:23 (d8+0x88ed97) (BuildId: 2d1896fcafe8220c)
+ #5 VisitPointersImpl<v8::internal::CompressedObjectSlot> src/heap/marking-visitor-inl.h:90:7 (d8+0x894a3a) (BuildId: 2d1896fcafe8220c)
+ #6 VisitPointers src/heap/marking-visitor.h:197:5 (d8+0x894a3a)
+ #7 IteratePointers<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors-inl.h:127:6 (d8+0x894a3a)
+ #8 IterateBody<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors-inl.h:358:5 (d8+0x894a3a)
+ #9 int v8::internal::ConcurrentMarkingVisitorUtility::VisitJSObjectSubclass<v8::internal::ConcurrentMarkingVisitor, v8::internal::JSFunction, v8::internal::JSFunction::BodyDescriptor>(v8::internal::ConcurrentMarkingVisitor*, v8::internal::Map, v8::internal::JSFunction) src/heap/concurrent-marking.cc:102:5 (d8+0x894a3a)
+ #10 VisitJSObjectSubclass<v8::internal::JSFunction, v8::internal::JSFunction::BodyDescriptor> src/heap/concurrent-marking.cc:489:12 (d8+0x884a6f) (BuildId: 2d1896fcafe8220c)
+ #11 VisitJSFunction src/heap/marking-visitor-inl.h:178:34 (d8+0x884a6f)
+ #12 Visit src/heap/objects-visiting-inl.h:65:5 (d8+0x884a6f)
+ #13 v8::internal::ConcurrentMarking::RunMajor(v8::JobDelegate*, v8::base::EnumSet<v8::internal::CodeFlushMode, int>, unsigned int, bool) src/heap/concurrent-marking.cc:758:41 (d8+0x884a6f)
+ #14 v8::internal::ConcurrentMarking::JobTaskMajor::Run(v8::JobDelegate*) src/heap/concurrent-marking.cc:606:28 (d8+0x898719) (BuildId: 2d1896fcafe8220c)
+ #15 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:304:18 (d8+0x565b28) (BuildId: 2d1896fcafe8220c)
+ #16 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1e5184b) (BuildId: 2d1896fcafe8220c)
+ #17 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1e586b0) (BuildId: 2d1896fcafe8220c)
+ #18 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x1e4bc22) (BuildId: 2d1896fcafe8220c)
+ #19 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1112:11 (d8+0x1e4bc22)
+
+ Mutex M0 (0x7b5400000710) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1316:3 (d8+0x4df75f) (BuildId: 2d1896fcafe8220c)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1e4408b) (BuildId: 2d1896fcafe8220c)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1e4408b)
+ #3 v8::internal::PagedSpaceBase::PagedSpaceBase(v8::internal::Heap*, v8::internal::AllocationSpace, v8::internal::Executability, v8::internal::FreeList*, v8::internal::AllocationCounter&, v8::internal::LinearAllocationArea&, v8::internal::LinearAreaOriginalData&, v8::internal::CompactionSpaceKind) src/heap/paged-spaces.cc:120:17 (d8+0xa023d8) (BuildId: 2d1896fcafe8220c)
+ #4 PagedSpaceForNewSpace src/heap/new-spaces.cc:923:7 (d8+0x9f1f66) (BuildId: 2d1896fcafe8220c)
+ #5 v8::internal::PagedNewSpace::PagedNewSpace(v8::internal::Heap*, unsigned long, unsigned long, v8::internal::LinearAllocationArea&) src/heap/new-spaces.cc:1042:7 (d8+0x9f1f66)
+ #6 make_unique<v8::internal::PagedNewSpace, v8::internal::Heap *, unsigned long &, unsigned long &, v8::internal::LinearAllocationArea &> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (d8+0x90c4a9) (BuildId: 2d1896fcafe8220c)
+ #7 v8::internal::Heap::SetUpSpaces(v8::internal::LinearAllocationArea&, v8::internal::LinearAllocationArea&) src/heap/heap.cc:5559:27 (d8+0x90c4a9)
+ #8 v8::internal::Isolate::Init(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4264:9 (d8+0x843e08) (BuildId: 2d1896fcafe8220c)
+ #9 v8::internal::Isolate::InitWithSnapshot(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4036:10 (d8+0x8455f9) (BuildId: 2d1896fcafe8220c)
+ #10 v8::internal::Snapshot::Initialize(v8::internal::Isolate*) src/snapshot/snapshot.cc:182:27 (d8+0x10bc8e1) (BuildId: 2d1896fcafe8220c)
+ #11 v8::Isolate::Initialize(v8::Isolate*, v8::Isolate::CreateParams const&) src/api/api.cc:8836:8 (d8+0x5dd97f) (BuildId: 2d1896fcafe8220c)
+ #12 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:8872:3 (d8+0x5ddc45) (BuildId: 2d1896fcafe8220c)
+ #13 v8::Shell::Main(int, char**) src/d8/d8.cc:5788:22 (d8+0x58e70c) (BuildId: 2d1896fcafe8220c)
+ #14 main src/d8/d8.cc:5976:43 (d8+0x58f4ee) (BuildId: 2d1896fcafe8220c)
+
+ Thread T6 'V8 DefaultWorke' (tid=24798, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3 (d8+0x4ddffb) (BuildId: 2d1896fcafe8220c)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1144:14 (d8+0x1e4bb36) (BuildId: 2d1896fcafe8220c)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x1e580ac) (BuildId: 2d1896fcafe8220c)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (d8+0x1e580ac)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x1e580ac)
+ #5 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:297:37 (d8+0x1e4e1b0) (BuildId: 2d1896fcafe8220c)
+ #6 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:956:55 (d8+0x1e4e1b0)
+ #7 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:965:12 (d8+0x1e4e1b0)
+ #8 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:132:7 (d8+0x1e4e1b0)
+ #9 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (d8+0x1e4e1b0)
+ #10 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (d8+0x1e4d839) (BuildId: 2d1896fcafe8220c)
+ #11 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (d8+0x1e4d839)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5691:16 (d8+0x58e251) (BuildId: 2d1896fcafe8220c)
+ #13 main src/d8/d8.cc:5976:43 (d8+0x58f4ee) (BuildId: 2d1896fcafe8220c)
+
+SUMMARY: ThreadSanitizer: data race src/base/flags.h:47:11 in operator|=
+==================
+ThreadSanitizer: reported 1 warnings
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.expected
new file mode 100644
index 0000000000..c3f6207e60
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.expected
@@ -0,0 +1,3 @@
+ClearCellRangeRelaxed
+Clear
+ClearMarkBitsAndHandleLivenessStatistics \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.txt
new file mode 100644
index 0000000000..c6993d2cdd
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_2.txt
@@ -0,0 +1,118 @@
+==================
+WARNING: ThreadSanitizer: data race (pid=10132)
+ Write of size 8 at 0x7e8d00440248 by thread T7 (mutexes: write M0):
+ #0 memset sanitizer_common/sanitizer_common_interceptors.inc:799:3 (d8+0x49d4ad) (BuildId: c849b9596314cb5d)
+ #1 ClearCellRangeRelaxed src/heap/marking.h:205:16 (d8+0x94d685) (BuildId: c849b9596314cb5d)
+ #2 Clear src/heap/marking.h:228:3 (d8+0x94d685)
+ #3 ClearMarkBitsAndHandleLivenessStatistics src/heap/sweeper.cc:303:33 (d8+0x94d685)
+ #4 v8::internal::Sweeper::RawSweep(v8::internal::Page*, v8::internal::Sweeper::FreeListRebuildingMode, v8::internal::FreeSpaceTreatmentMode, v8::internal::Sweeper::FreeSpaceMayContainInvalidatedSlots, v8::base::LockGuard<v8::base::Mutex, (v8::base::NullBehavior)0> const&) src/heap/sweeper.cc:412:3 (d8+0x94d685)
+ #5 v8::internal::Sweeper::ParallelSweepPage(v8::internal::Page*, v8::internal::AllocationSpace, v8::internal::Sweeper::FreeSpaceMayContainInvalidatedSlots) src/heap/sweeper.cc:494:17 (d8+0x94d9e6) (BuildId: c849b9596314cb5d)
+ #6 ConcurrentSweepSpace src/heap/sweeper.cc:437:5 (d8+0x95056a) (BuildId: c849b9596314cb5d)
+ #7 v8::internal::Sweeper::SweeperJob::RunImpl(v8::JobDelegate*) src/heap/sweeper.cc:117:22 (d8+0x95056a)
+ #8 v8::internal::Sweeper::SweeperJob::Run(v8::JobDelegate*) src/heap/sweeper.cc (d8+0x950295) (BuildId: c849b9596314cb5d)
+ #9 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:297:18 (d8+0x51a6a8) (BuildId: c849b9596314cb5d)
+ #10 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1a7bec8) (BuildId: c849b9596314cb5d)
+ #11 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1a836f0) (BuildId: c849b9596314cb5d)
+ #12 NotifyStartedAndRun src/base/platform/platform.h:560:5 (d8+0x1a77162) (BuildId: c849b9596314cb5d)
+ #13 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1051:11 (d8+0x1a77162)
+
+ Previous atomic read of size 4 at 0x7e8d00440248 by thread T3:
+ #0 __cxx_atomic_load<int> buildtools/third_party/libc++/trunk/include/atomic:1000:12 (d8+0x7e5423) (BuildId: c849b9596314cb5d)
+ #1 load buildtools/third_party/libc++/trunk/include/atomic:1611:17 (d8+0x7e5423)
+ #2 atomic_load_explicit<int> buildtools/third_party/libc++/trunk/include/atomic:1967:17 (d8+0x7e5423)
+ #3 Acquire_Load src/base/atomicops.h:240:10 (d8+0x7e5423)
+ #4 Acquire_Load<unsigned int> src/base/atomic-utils.h:73:9 (d8+0x7e5423)
+ #5 Get<v8::internal::AccessMode::ATOMIC> src/heap/marking.h:78:11 (d8+0x7e5423)
+ #6 IsBlackOrGrey<v8::internal::AccessMode::ATOMIC> src/heap/marking.h:398:21 (d8+0x7e5423)
+ #7 IsBlackOrGrey src/heap/marking-visitor.h:81:12 (d8+0x7e5423)
+ #8 void v8::internal::MarkingVisitorBase<v8::internal::ConcurrentMarkingVisitor, v8::internal::ConcurrentMarkingState>::ProcessWeakHeapObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/marking-visitor-inl.h:58:44 (d8+0x7e5423)
+ #9 VisitPointersImpl<v8::internal::CompressedMaybeObjectSlot> src/heap/marking-visitor-inl.h:90:7 (d8+0x7df885) (BuildId: c849b9596314cb5d)
+ #10 VisitPointers src/heap/marking-visitor.h:199:5 (d8+0x7df885)
+ #11 IterateMaybeWeakPointers<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors-inl.h:139:6 (d8+0x7df885)
+ #12 IterateBody<v8::internal::ConcurrentMarkingVisitor> src/objects/objects-body-descriptors.h:176:5 (d8+0x7df885)
+ #13 VisitFeedbackVector src/heap/objects-visiting-inl.h:118:1 (d8+0x7df885)
+ #14 Visit src/heap/objects-visiting-inl.h:65:5 (d8+0x7df885)
+ #15 v8::internal::ConcurrentMarking::Run(v8::JobDelegate*, v8::base::EnumSet<v8::internal::CodeFlushMode, int>, unsigned int, bool) src/heap/concurrent-marking.cc:531:41 (d8+0x7df885)
+ #16 v8::internal::ConcurrentMarking::JobTask::Run(v8::JobDelegate*) src/heap/concurrent-marking.cc:420:28 (d8+0x7eb659) (BuildId: c849b9596314cb5d)
+ #17 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:297:18 (d8+0x51a6a8) (BuildId: c849b9596314cb5d)
+ #18 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1a7bec8) (BuildId: c849b9596314cb5d)
+ #19 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1a836f0) (BuildId: c849b9596314cb5d)
+ #20 NotifyStartedAndRun src/base/platform/platform.h:560:5 (d8+0x1a77162) (BuildId: c849b9596314cb5d)
+ #21 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1051:11 (d8+0x1a77162)
+
+ As if synchronized via sleep:
+ #0 usleep /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:358:3 (d8+0x490b4a) (BuildId: c849b9596314cb5d)
+ #1 v8::base::OS::Sleep(v8::base::TimeDelta) src/base/platform/platform-posix.cc:615:3 (d8+0x1a76295) (BuildId: c849b9596314cb5d)
+ #2 v8::DelayedTasksPlatform::DelayedJob::Run(v8::JobDelegate*) src/d8/d8-platforms.cc:296:7 (d8+0x51a687) (BuildId: c849b9596314cb5d)
+ #3 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x1a7bec8) (BuildId: c849b9596314cb5d)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x1a836f0) (BuildId: c849b9596314cb5d)
+ #5 NotifyStartedAndRun src/base/platform/platform.h:560:5 (d8+0x1a77162) (BuildId: c849b9596314cb5d)
+ #6 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1051:11 (d8+0x1a77162)
+
+ Mutex M0 (0x7b0c000091e0) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1316:3 (d8+0x494d4f) (BuildId: c849b9596314cb5d)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:95:12 (d8+0x1a71ecb) (BuildId: c849b9596314cb5d)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:148:3 (d8+0x1a71ecb)
+ #3 v8::internal::MemoryChunk::Initialize(v8::internal::BasicMemoryChunk*, v8::internal::Heap*, v8::internal::Executability) src/heap/memory-chunk.cc:149:23 (d8+0x8fa1dd) (BuildId: c849b9596314cb5d)
+ #4 AllocateChunk src/heap/memory-allocator.cc:409:7 (d8+0x8f9094) (BuildId: c849b9596314cb5d)
+ #5 v8::internal::MemoryAllocator::AllocatePage(v8::internal::MemoryAllocator::AllocationMode, unsigned long, v8::internal::Space*, v8::internal::Executability) src/heap/memory-allocator.cc:562:13 (d8+0x8f9094)
+ #6 AllocatePage src/heap/paged-spaces.cc:322:38 (d8+0x9130dd) (BuildId: c849b9596314cb5d)
+ #7 v8::internal::PagedSpace::ExpandBackground(unsigned long) src/heap/paged-spaces.cc:338:16 (d8+0x9130dd)
+ #8 v8::internal::PagedSpace::RawRefillLabBackground(v8::internal::LocalHeap*, unsigned long, unsigned long, v8::internal::AllocationAlignment, v8::internal::AllocationOrigin) src/heap/paged-spaces.cc:631:14 (d8+0x914467) (BuildId: c849b9596314cb5d)
+ #9 v8::internal::ConcurrentAllocator::EnsureLab(v8::internal::AllocationOrigin) src/heap/concurrent-allocator.cc:135:25 (d8+0x7dc06c) (BuildId: c849b9596314cb5d)
+ #10 v8::internal::ConcurrentAllocator::AllocateInLabSlow(int, v8::internal::AllocationAlignment, v8::internal::AllocationOrigin) src/heap/concurrent-allocator.cc:124:8 (d8+0x7dbf7b) (BuildId: c849b9596314cb5d)
+ #11 AllocateInLab src/heap/concurrent-allocator-inl.h:41:16 (d8+0x82d0b1) (BuildId: c849b9596314cb5d)
+ #12 AllocateRaw src/heap/concurrent-allocator-inl.h:34:10 (d8+0x82d0b1)
+ #13 AllocateRaw<v8::internal::AllocationType::kSharedMap> src/heap/heap-allocator-inl.h:122:47 (d8+0x82d0b1)
+ #14 AllocateRaw src/heap/heap-allocator-inl.h:184:14 (d8+0x82d0b1)
+ #15 v8::internal::HeapAllocator::AllocateRawWithLightRetrySlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:75:29 (d8+0x82d0b1)
+ #16 v8::internal::HeapAllocator::AllocateRawWithRetryOrFailSlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:100:7 (d8+0x82e234) (BuildId: c849b9596314cb5d)
+ #17 AllocateRawWith<v8::internal::HeapAllocator::kRetryOrFail> src/heap/heap-allocator-inl.h:237:16 (d8+0x808920) (BuildId: c849b9596314cb5d)
+ #18 v8::internal::Factory::NewMap(v8::internal::InstanceType, int, v8::internal::ElementsKind, int, v8::internal::AllocationType) src/heap/factory.cc:1848:36 (d8+0x808920)
+ #19 v8::internal::Builtin_Impl_SharedStructTypeConstructor(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-struct.cc:84:39 (d8+0x65fa26) (BuildId: c849b9596314cb5d)
+ #20 v8::internal::Builtin_SharedStructTypeConstructor(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-struct.cc:20:1 (d8+0x65f061) (BuildId: c849b9596314cb5d)
+ #21 <null> <null> (0x560ebfe66ef8)
+ #22 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:531:10 (d8+0x77252c) (BuildId: c849b9596314cb5d)
+ #23 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2105:7 (d8+0x561590) (BuildId: c849b9596314cb5d)
+ #24 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2048:10 (d8+0x560d20) (BuildId: c849b9596314cb5d)
+ #25 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:773:28 (d8+0x5211d0) (BuildId: c849b9596314cb5d)
+ #26 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:3990:10 (d8+0x53659a) (BuildId: c849b9596314cb5d)
+ #27 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:4656:39 (d8+0x53a188) (BuildId: c849b9596314cb5d)
+ #28 v8::Shell::Main(int, char**) src/d8/d8.cc:5473:18 (d8+0x53cd5b) (BuildId: c849b9596314cb5d)
+ #29 main src/d8/d8.cc:5553:43 (d8+0x53cf4e) (BuildId: c849b9596314cb5d)
+
+ Thread T7 'V8 DefaultWorke' (tid=10149, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3 (d8+0x49356d) (BuildId: c849b9596314cb5d)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1083:14 (d8+0x1a77076) (BuildId: c849b9596314cb5d)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x1a830f2) (BuildId: c849b9596314cb5d)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a830f2)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x1a830f2)
+ #5 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:293:37 (d8+0x1a7890d) (BuildId: c849b9596314cb5d)
+ #6 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::__1::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1101:55 (d8+0x1a7890d)
+ #7 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1110:12 (d8+0x1a7890d)
+ #8 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:133:7 (d8+0x1a7890d)
+ #9 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:109:5 (d8+0x1a7890d)
+ #10 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a78029) (BuildId: c849b9596314cb5d)
+ #11 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:54:19 (d8+0x1a78029)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5270:16 (d8+0x53b887) (BuildId: c849b9596314cb5d)
+ #13 main src/d8/d8.cc:5553:43 (d8+0x53cf4e) (BuildId: c849b9596314cb5d)
+
+ Thread T3 'V8 DefaultWorke' (tid=10144, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3 (d8+0x49356d) (BuildId: c849b9596314cb5d)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1083:14 (d8+0x1a77076) (BuildId: c849b9596314cb5d)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x1a830f2) (BuildId: c849b9596314cb5d)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a830f2)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x1a830f2)
+ #5 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:293:37 (d8+0x1a7890d) (BuildId: c849b9596314cb5d)
+ #6 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::__1::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1101:55 (d8+0x1a7890d)
+ #7 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:1110:12 (d8+0x1a7890d)
+ #8 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:133:7 (d8+0x1a7890d)
+ #9 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:109:5 (d8+0x1a7890d)
+ #10 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:725:32 (d8+0x1a78029) (BuildId: c849b9596314cb5d)
+ #11 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::__1::unique_ptr<v8::TracingController, std::__1::default_delete<v8::TracingController> >) src/libplatform/default-platform.cc:54:19 (d8+0x1a78029)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5270:16 (d8+0x53b887) (BuildId: c849b9596314cb5d)
+ #13 main src/d8/d8.cc:5553:43 (d8+0x53cf4e) (BuildId: c849b9596314cb5d)
+
+SUMMARY: ThreadSanitizer: data race src/heap/marking.h:205:16 in ClearCellRangeRelaxed
+==================
+ThreadSanitizer: reported 1 warnings
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.expected
new file mode 100644
index 0000000000..1f41878d22
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.expected
@@ -0,0 +1,3 @@
+v8::internal::compiler::JSObjectRef::GetOwnConstantElementFromHeap(v8::internal::FixedArrayBase, v8::internal::ElementsKind, unsigned int) const
+v8::internal::compiler::JSObjectRef::GetOwnConstantElement(v8::internal::compiler::FixedArrayBaseRef const&, unsigned int, v8::internal::compiler::CompilationDependencies*) const
+v8::internal::compiler::JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::AccessMode, v8::internal::KeyedAccessLoadMode) \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.txt
new file mode 100644
index 0000000000..b72ef85bbd
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_3.txt
@@ -0,0 +1,50 @@
+WARNING: ThreadSanitizer: data race (pid=3187405)
+ Read of size 8 at 0x7ea200056b80 by thread T1:
+ #0 0x55ad577064dc in v8::internal::compiler::JSObjectRef::GetOwnConstantElementFromHeap(v8::internal::FixedArrayBase, v8::internal::ElementsKind, unsigned int) const src/base/memory.h:34:3
+ #1 0x55ad577062aa in v8::internal::compiler::JSObjectRef::GetOwnConstantElement(v8::internal::compiler::FixedArrayBaseRef const&, unsigned int, v8::internal::compiler::CompilationDependencies*) const src/compiler/heap-refs.cc:1815:42
+ #2 0x55ad577c4ff7 in v8::internal::compiler::JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::AccessMode, v8::internal::KeyedAccessLoadMode) src/compiler/js-native-context-specialization.cc:2325:32
+ #3 0x55ad577c2c8f in v8::internal::compiler::JSNativeContextSpecialization::ReduceElementAccess(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::internal::compiler::ElementAccessFeedback const&) src/compiler/js-native-context-specialization.cc:2094:27
+ #4 0x55ad577c19f7 in v8::internal::compiler::JSNativeContextSpecialization::ReducePropertyAccess(v8::internal::compiler::Node*, v8::internal::compiler::Node*, v8::base::Optional<v8::internal::compiler::NameRef>, v8::internal::compiler::Node*, v8::internal::compiler::FeedbackSource const&, v8::internal::compiler::AccessMode) src/compiler/js-native-context-specialization.cc:2413:14
+ #5 0x55ad577b6099 in v8::internal::compiler::JSNativeContextSpecialization::ReduceJSLoadProperty(v8::internal::compiler::Node*) src/compiler/js-native-context-specialization.cc:2564:10
+ #6 0x55ad577afc8f in v8::internal::compiler::JSNativeContextSpecialization::Reduce(v8::internal::compiler::Node*) src/compiler/js-native-context-specialization.cc:108:14
+ #7 0x55ad576d605a in v8::internal::compiler::GraphReducer::Reduce(v8::internal::compiler::Node*) src/compiler/graph-reducer.cc:34:25
+ #8 0x55ad576d59f4 in v8::internal::compiler::GraphReducer::ReduceTop() src/compiler/graph-reducer.cc:178:25
+ #9 0x55ad576d5209 in v8::internal::compiler::GraphReducer::ReduceNode(v8::internal::compiler::Node*) src/compiler/graph-reducer.cc:75:7
+ #10 0x55ad576d5f35 in v8::internal::compiler::GraphReducer::ReduceGraph() src/compiler/graph-reducer.cc:97:36
+ #11 0x55ad57887506 in v8::internal::compiler::InliningPhase::Run(v8::internal::compiler::PipelineData*, v8::internal::Zone*) src/compiler/pipeline.cc:1422:19
+ #12 0x55ad578763c9 in auto v8::internal::compiler::PipelineImpl::Run<v8::internal::compiler::InliningPhase>() src/compiler/pipeline.cc:1319:16
+ #13 0x55ad57872135 in v8::internal::compiler::PipelineImpl::CreateGraph() src/compiler/pipeline.cc:2847:3
+ #14 0x55ad57871f32 in v8::internal::compiler::PipelineCompilationJob::ExecuteJobImpl(v8::internal::RuntimeCallStats*, v8::internal::LocalIsolate*) src/compiler/pipeline.cc:1247:18
+ #15 0x55ad565a9830 in v8::internal::OptimizedCompilationJob::ExecuteJob(v8::internal::RuntimeCallStats*, v8::internal::LocalIsolate*) src/codegen/compiler.cc:494:22
+ #16 0x55ad565ef91e in v8::internal::OptimizingCompileDispatcher::CompileNext(v8::internal::TurbofanCompilationJob*, v8::internal::LocalIsolate*) src/compiler-dispatcher/optimizing-compile-dispatcher.cc:105:12
+ #17 0x55ad565f2555 in v8::internal::OptimizingCompileDispatcher::CompileTask::RunInternal() src/compiler-dispatcher/optimizing-compile-dispatcher.cc:67:20
+ #18 0x55ad5640b898 in non-virtual thunk to v8::internal::CancelableTask::Run() src/tasks/cancelable-task.h:155:7
+ #19 0x55ad57cc46b1 in v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11
+ #20 0x55ad57cb7c23 in v8::base::ThreadEntry(void*) src/base/platform/platform.h:596:5
+ Previous write of size 8 at 0x7ea200056b80 by main thread:
+ #0 0x55ad56a49f14 in v8::internal::(anonymous namespace)::DictionaryElementsAccessor::SetLengthImpl(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSArray>, unsigned int, v8::internal::Handle<v8::internal::FixedArrayBase>) src/base/memory.h:41:3
+ #1 0x55ad56a675ea in v8::internal::(anonymous namespace)::ElementsAccessorBase<v8::internal::(anonymous namespace)::DictionaryElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)13> >::SetLength(v8::internal::Handle<v8::internal::JSArray>, unsigned int) src/objects/elements.cc:758:12
+ #2 0x55ad56c5f9c0 in v8::internal::JSArray::SetLength(v8::internal::Handle<v8::internal::JSArray>, unsigned int) src/objects/objects.cc:5237:40
+ #3 0x55ad564a6616 in v8::internal::Accessors::ArrayLengthSetter(v8::Local<v8::Name>, v8::Local<v8::Value>, v8::PropertyCallbackInfo<v8::Boolean> const&) src/builtins/accessors.cc:202:7
+ #4 0x55ad568e961b in v8::internal::PropertyCallbackArguments::CallAccessorSetter(v8::internal::Handle<v8::internal::AccessorInfo>, v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::Object>) src/api/api-arguments-inl.h:332:3
+ #5 0x55ad568e39df in v8::internal::Runtime_StoreCallbackProperty(int, unsigned long*, v8::internal::Isolate*) src/ic/ic.cc:3245:13
+ #6 0x55acdfe69bb8 in Builtins_AsyncFromSyncIteratorPrototypeThrow (/mnt/scratch0/clusterfuzz/bot/builds/v8-tsan_linux-release_8681dd6e7b8d9ebe281ad96b26057d51e550c14e/revisions/d8-tsan-linux-release-v8-component-83502/d8+0x1c30bb7)
+ #7 0x55ad5668cf86 in v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10
+ #8 0x55ad5642808f in v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2231:7
+ #9 0x55ad564277c1 in v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2158:10
+ #10 0x55ad563e63cc in v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:879:28
+ #11 0x55ad563fe621 in v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4379:10
+ #12 0x55ad56402488 in v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5107:39
+ #13 0x55ad56404fe1 in v8::Shell::Main(int, char**) src/d8/d8.cc:5886:18
+ #14 0x55ad564054ef in main src/d8/d8.cc:5976:43
+ Thread T1 'V8 DefaultWorke' (tid=3187415, running) created by main thread at:
+ #0 0x55ad56353ffc in pthread_create third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1022:3
+ #1 0x55ad57cb7b37 in v8::base::Thread::Start() src/base/platform/platform-posix.cc:1144:14
+ #2 0x55ad57cc40ad in v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:66:3
+ #3 0x55ad57cba1b1 in v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> >) buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:297:37
+ #4 0x55ad57cb983a in v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> >) buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30
+ #5 0x55ad56404252 in v8::Shell::Main(int, char**) src/d8/d8.cc:5691:16
+ #6 0x55ad564054ef in main src/d8/d8.cc:5976:43
+SUMMARY: ThreadSanitizer: data race src/base/memory.h:34:3 in v8::internal::compiler::JSObjectRef::GetOwnConstantElementFromHeap(v8::internal::FixedArrayBase, v8::internal::ElementsKind, unsigned int) const
+==================
+ThreadSanitizer: reported 1 warnings \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.expected
new file mode 100644
index 0000000000..ca73b6cfb0
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.expected
@@ -0,0 +1,3 @@
+MemMove
+v8::internal::Heap::MoveRange(v8::internal::HeapObject, v8::internal::CompressedObjectSlot, v8::internal::CompressedObjectSlot, int, v8::internal::WriteBarrierMode)
+MoveElements \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.txt
new file mode 100644
index 0000000000..edc59865a4
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_4.txt
@@ -0,0 +1,190 @@
+WARNING: ThreadSanitizer: data race (pid=21413)
+ Write of size 8 at 0x7eba001056b8 by main thread:
+ #0 __tsan_memmove /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:3118:3 (d8+0x566140) (BuildId: 06e6c95b54bbb645)
+ #1 MemMove src/utils/memcopy.h:123:7 (d8+0x949b82) (BuildId: 06e6c95b54bbb645)
+ #2 v8::internal::Heap::MoveRange(v8::internal::HeapObject, v8::internal::CompressedObjectSlot, v8::internal::CompressedObjectSlot, int, v8::internal::WriteBarrierMode) src/heap/heap.cc:2027:5 (d8+0x949b82)
+ #3 MoveElements src/objects/fixed-array-inl.h:262:20 (d8+0xc1577f) (BuildId: 06e6c95b54bbb645)
+ #4 v8::internal::(anonymous namespace)::FastElementsAccessor<v8::internal::(anonymous namespace)::FastPackedObjectElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)2>>::MoveElements(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSArray>, v8::internal::Handle<v8::internal::FixedArrayBase>, int, int, int, int, int) src/objects/elements.cc:2242:16 (d8+0xc1577f)
+ #5 v8::internal::(anonymous namespace)::FastElementsAccessor<v8::internal::(anonymous namespace)::FastPackedObjectElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)2>>::AddArguments(v8::internal::Handle<v8::internal::JSArray>, v8::internal::Handle<v8::internal::FixedArrayBase>, v8::internal::BuiltinArguments*, unsigned int, v8::internal::(anonymous namespace)::Where) src/objects/elements.cc:2491:7 (d8+0xc1545d) (BuildId: 06e6c95b54bbb645)
+ #6 UnshiftImpl src/objects/elements.cc:2218:12 (d8+0xc12e43) (BuildId: 06e6c95b54bbb645)
+ #7 v8::internal::(anonymous namespace)::ElementsAccessorBase<v8::internal::(anonymous namespace)::FastPackedObjectElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)2>>::Unshift(v8::internal::Handle<v8::internal::JSArray>, v8::internal::BuiltinArguments*, unsigned int) src/objects/elements.cc:732:12 (d8+0xc12e43)
+ #8 v8::internal::Builtin_Impl_ArrayUnshift(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-array.cc:640:3 (d8+0x6871cf) (BuildId: 06e6c95b54bbb645)
+ #9 v8::internal::Builtin_ArrayUnshift(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-array.cc:617:1 (d8+0x686d93) (BuildId: 06e6c95b54bbb645)
+ #10 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1dda837)
+ #11 v8::internal::(anonymous namespace)::InvokeWithTryCatch(v8::internal::Isolate*, v8::internal::(anonymous namespace)::InvokeParams const&) src/execution/execution.cc:489:20 (d8+0x85e89e) (BuildId: 06e6c95b54bbb645)
+ #12 v8::internal::Execution::TryCall(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, v8::internal::Execution::MessageHandling, v8::internal::MaybeHandle<v8::internal::Object>*, bool) src/execution/execution.cc:591:10 (d8+0x85eb68) (BuildId: 06e6c95b54bbb645)
+ #13 v8::internal::SourceTextModule::ExecuteModule(v8::internal::Isolate*, v8::internal::Handle<v8::internal::SourceTextModule>) src/objects/source-text-module.cc:1021:3 (d8+0xe52c68) (BuildId: 06e6c95b54bbb645)
+ #14 v8::internal::SourceTextModule::InnerModuleEvaluation(v8::internal::Isolate*, v8::internal::Handle<v8::internal::SourceTextModule>, v8::internal::ZoneForwardList<v8::internal::Handle<v8::internal::SourceTextModule>>*, unsigned int*) src/objects/source-text-module.cc:1192:5 (d8+0xe51d39) (BuildId: 06e6c95b54bbb645)
+ #15 v8::internal::SourceTextModule::Evaluate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::SourceTextModule>) src/objects/source-text-module.cc:733:8 (d8+0xe51368) (BuildId: 06e6c95b54bbb645)
+ #16 v8::internal::Module::Evaluate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Module>) src/objects/module.cc:280:12 (d8+0xdf8599) (BuildId: 06e6c95b54bbb645)
+ #17 v8::Module::Evaluate(v8::Local<v8::Context>) src/api/api.cc:2548:16 (d8+0x60552d) (BuildId: 06e6c95b54bbb645)
+ #18 v8::Shell::ExecuteModule(v8::Isolate*, char const*) src/d8/d8.cc:1455:33 (d8+0x5c8cf1) (BuildId: 06e6c95b54bbb645)
+ #19 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4442:12 (d8+0x5da4a0) (BuildId: 06e6c95b54bbb645)
+ #20 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5224:39 (d8+0x5de41e) (BuildId: 06e6c95b54bbb645)
+ #21 v8::Shell::Main(int, char**) src/d8/d8.cc:6020:18 (d8+0x5e0906) (BuildId: 06e6c95b54bbb645)
+ #22 main src/d8/d8.cc:6117:43 (d8+0x5e0f10) (BuildId: 06e6c95b54bbb645)
+
+ Previous atomic read of size 4 at 0x7eba001056b8 by thread T5 (mutexes: write M0):
+ #0 __cxx_atomic_load<int> buildtools/third_party/libc++/trunk/include/atomic:948:12 (d8+0xa98a99) (BuildId: 06e6c95b54bbb645)
+ #1 load buildtools/third_party/libc++/trunk/include/atomic:1537:17 (d8+0xa98a99)
+ #2 atomic_load_explicit<int> buildtools/third_party/libc++/trunk/include/atomic:1916:17 (d8+0xa98a99)
+ #3 Relaxed_Load src/base/atomicops.h:237:10 (d8+0xa98a99)
+ #4 Relaxed_Load<unsigned int> src/base/atomic-utils.h:87:9 (d8+0xa98a99)
+ #5 Relaxed_Load src/objects/compressed-slots-inl.h:75:26 (d8+0xa98a99)
+ #6 VisitPointer src/heap/sweeper.cc:678:50 (d8+0xa98a99)
+ #7 VisitPointers src/heap/sweeper.cc:694:7 (d8+0xa98a99)
+ #8 IteratePointers<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:128:6 (d8+0xa98a99)
+ #9 IterateBody<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors.h:135:5 (d8+0xa98a99)
+ #10 void v8::internal::CallIterateBody::apply<v8::internal::FixedArray::BodyDescriptor, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor>(v8::internal::Map, v8::internal::HeapObject, int, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/objects/objects-body-descriptors-inl.h:1432:5 (d8+0xa98a99)
+ #11 auto v8::internal::BodyDescriptorApply<v8::internal::CallIterateBody, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&>(v8::internal::InstanceType, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&) src/objects/objects-body-descriptors-inl.h (d8+0xa97d74) (BuildId: 06e6c95b54bbb645)
+ #12 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1438:3 (d8+0xa9318c) (BuildId: 06e6c95b54bbb645)
+ #13 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1425:3 (d8+0xa9318c)
+ #14 IterateFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1413:3 (d8+0xa9318c)
+ #15 v8::internal::(anonymous namespace)::HandlePromotedObject(v8::internal::HeapObject, v8::internal::NonAtomicMarkingState*, v8::internal::PretenuringHandler*, v8::internal::PtrComprCageBase, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/heap/sweeper.cc:805:10 (d8+0xa9318c)
+ #16 v8::internal::Sweeper::RawIteratePromotedPageForRememberedSets(v8::internal::MemoryChunk*, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, std::Cr::unordered_map<v8::internal::MemoryChunk*, v8::internal::SlotSet*, std::Cr::hash<v8::internal::MemoryChunk*>, std::Cr::equal_to<v8::internal::MemoryChunk*>, std::Cr::allocator<std::Cr::pair<v8::internal::MemoryChunk* const, v8::internal::SlotSet*>>>*) src/heap/sweeper.cc:841:7 (d8+0xa92e20) (BuildId: 06e6c95b54bbb645)
+ #17 ParallelIteratePromotedPageForRememberedSets src/heap/sweeper.cc:987:3 (d8+0xa9ffdc) (BuildId: 06e6c95b54bbb645)
+ #18 v8::internal::Sweeper::ConcurrentSweeper::ConcurrentSweepForRememberedSet(v8::JobDelegate*) src/heap/sweeper.cc:61:17 (d8+0xa9ffdc)
+ #19 v8::internal::Sweeper::SweeperJob::RunImpl(v8::JobDelegate*, bool) src/heap/sweeper.cc:126:31 (d8+0xa9f0f6) (BuildId: 06e6c95b54bbb645)
+ #20 v8::internal::Sweeper::SweeperJob::Run(v8::JobDelegate*) src/heap/sweeper.cc:98:5 (d8+0xa9e6e9) (BuildId: 06e6c95b54bbb645)
+ #21 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x201972b) (BuildId: 06e6c95b54bbb645)
+ #22 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x201f240) (BuildId: 06e6c95b54bbb645)
+ #23 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x2013592) (BuildId: 06e6c95b54bbb645)
+ #24 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x2013592)
+
+ Mutex M0 (0x7b0c000007e0) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 06e6c95b54bbb645)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x200b32b) (BuildId: 06e6c95b54bbb645)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x200b32b)
+ #3 v8::internal::MemoryChunk::MemoryChunk(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability, v8::internal::PageSize) src/heap/memory-chunk.cc:154:16 (d8+0xa3462f) (BuildId: 06e6c95b54bbb645)
+ #4 v8::internal::Page::Page(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability) src/heap/spaces.cc:53:7 (d8+0xa8a50f) (BuildId: 06e6c95b54bbb645)
+ #5 v8::internal::MemoryAllocator::AllocatePage(v8::internal::MemoryAllocator::AllocationMode, v8::internal::Space*, v8::internal::Executability) src/heap/memory-allocator.cc:579:40 (d8+0xa31224) (BuildId: 06e6c95b54bbb645)
+ #6 v8::internal::PagedSpaceBase::TryExpandImpl() src/heap/paged-spaces.cc:393:44 (d8+0xa50188) (BuildId: 06e6c95b54bbb645)
+ #7 PreallocatePages src/heap/new-spaces.cc:984:10 (d8+0xa3fab0) (BuildId: 06e6c95b54bbb645)
+ #8 PagedSpaceForNewSpace src/heap/new-spaces.cc:901:8 (d8+0xa3fab0)
+ #9 v8::internal::PagedNewSpace::PagedNewSpace(v8::internal::Heap*, unsigned long, unsigned long, v8::internal::LinearAllocationArea&) src/heap/new-spaces.cc:1016:7 (d8+0xa3fab0)
+ #10 make_unique<v8::internal::PagedNewSpace, v8::internal::Heap *, unsigned long &, unsigned long &, v8::internal::LinearAllocationArea &> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x959f81) (BuildId: 06e6c95b54bbb645)
+ #11 v8::internal::Heap::SetUpSpaces(v8::internal::LinearAllocationArea&, v8::internal::LinearAllocationArea&) src/heap/heap.cc:5569:27 (d8+0x959f81)
+ #12 v8::internal::Isolate::Init(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4348:9 (d8+0x88a11c) (BuildId: 06e6c95b54bbb645)
+ #13 v8::internal::Isolate::InitWithSnapshot(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4064:10 (d8+0x88ba99) (BuildId: 06e6c95b54bbb645)
+ #14 v8::internal::Snapshot::Initialize(v8::internal::Isolate*) src/snapshot/snapshot.cc:182:27 (d8+0x1103aed) (BuildId: 06e6c95b54bbb645)
+ #15 v8::Isolate::Initialize(v8::Isolate*, v8::Isolate::CreateParams const&) src/api/api.cc:9049:8 (d8+0x62c749) (BuildId: 06e6c95b54bbb645)
+ #16 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9085:3 (d8+0x62ca05) (BuildId: 06e6c95b54bbb645)
+ #17 v8::Shell::Main(int, char**) src/d8/d8.cc:5909:22 (d8+0x5dffbe) (BuildId: 06e6c95b54bbb645)
+ #18 main src/d8/d8.cc:6117:43 (d8+0x5e0f10) (BuildId: 06e6c95b54bbb645)
+
+ Thread T5 'V8 DefaultWorke' (tid=21435, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x52198b) (BuildId: 06e6c95b54bbb645)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x20134a6) (BuildId: 06e6c95b54bbb645)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x201ec07) (BuildId: 06e6c95b54bbb645)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x201ec07)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x201ec07)
+ #5 construct_at<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:38:48 (d8+0x2015d60) (BuildId: 06e6c95b54bbb645)
+ #6 construct<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:304:9 (d8+0x2015d60)
+ #7 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:275:9 (d8+0x2015d60)
+ #8 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:936:55 (d8+0x2015d60)
+ #9 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:945:12 (d8+0x2015d60)
+ #10 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:132:7 (d8+0x2015d60)
+ #11 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (d8+0x2015d60)
+ #12 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x20153d2) (BuildId: 06e6c95b54bbb645)
+ #13 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (d8+0x20153d2)
+ #14 v8::Shell::Main(int, char**) src/d8/d8.cc:5812:16 (d8+0x5dfb09) (BuildId: 06e6c95b54bbb645)
+ #15 main src/d8/d8.cc:6117:43 (d8+0x5e0f10) (BuildId: 06e6c95b54bbb645)
+
+SUMMARY: ThreadSanitizer: data race src/utils/memcopy.h:123:7 in MemMove
+==================
+==================
+WARNING: ThreadSanitizer: data race (pid=21413)
+ Write of size 8 at 0x7eba001057e0 by main thread:
+ #0 __tsan_memmove /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:3118:3 (d8+0x566140) (BuildId: 06e6c95b54bbb645)
+ #1 MemMove src/utils/memcopy.h:123:7 (d8+0x949b82) (BuildId: 06e6c95b54bbb645)
+ #2 v8::internal::Heap::MoveRange(v8::internal::HeapObject, v8::internal::CompressedObjectSlot, v8::internal::CompressedObjectSlot, int, v8::internal::WriteBarrierMode) src/heap/heap.cc:2027:5 (d8+0x949b82)
+ #3 MoveElements src/objects/fixed-array-inl.h:262:20 (d8+0xc0b43a) (BuildId: 06e6c95b54bbb645)
+ #4 v8::internal::(anonymous namespace)::FastElementsAccessor<v8::internal::(anonymous namespace)::FastPackedSmiElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)0>>::MoveElements(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSArray>, v8::internal::Handle<v8::internal::FixedArrayBase>, int, int, int, int, int) src/objects/elements.cc:2242:16 (d8+0xc0b43a)
+ #5 v8::internal::(anonymous namespace)::FastElementsAccessor<v8::internal::(anonymous namespace)::FastPackedSmiElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)0>>::AddArguments(v8::internal::Handle<v8::internal::JSArray>, v8::internal::Handle<v8::internal::FixedArrayBase>, v8::internal::BuiltinArguments*, unsigned int, v8::internal::(anonymous namespace)::Where) src/objects/elements.cc:2491:7 (d8+0xc0b13a) (BuildId: 06e6c95b54bbb645)
+ #6 UnshiftImpl src/objects/elements.cc:2218:12 (d8+0xc055f3) (BuildId: 06e6c95b54bbb645)
+ #7 v8::internal::(anonymous namespace)::ElementsAccessorBase<v8::internal::(anonymous namespace)::FastPackedSmiElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)0>>::Unshift(v8::internal::Handle<v8::internal::JSArray>, v8::internal::BuiltinArguments*, unsigned int) src/objects/elements.cc:732:12 (d8+0xc055f3)
+ #8 v8::internal::Builtin_Impl_ArrayUnshift(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-array.cc:640:3 (d8+0x6871cf) (BuildId: 06e6c95b54bbb645)
+ #9 v8::internal::Builtin_ArrayUnshift(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-array.cc:617:1 (d8+0x686d93) (BuildId: 06e6c95b54bbb645)
+ #10 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1dda837)
+ #11 v8::internal::(anonymous namespace)::InvokeWithTryCatch(v8::internal::Isolate*, v8::internal::(anonymous namespace)::InvokeParams const&) src/execution/execution.cc:489:20 (d8+0x85e89e) (BuildId: 06e6c95b54bbb645)
+ #12 v8::internal::Execution::TryCall(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, v8::internal::Execution::MessageHandling, v8::internal::MaybeHandle<v8::internal::Object>*, bool) src/execution/execution.cc:591:10 (d8+0x85eb68) (BuildId: 06e6c95b54bbb645)
+ #13 v8::internal::SourceTextModule::ExecuteModule(v8::internal::Isolate*, v8::internal::Handle<v8::internal::SourceTextModule>) src/objects/source-text-module.cc:1021:3 (d8+0xe52c68) (BuildId: 06e6c95b54bbb645)
+ #14 v8::internal::SourceTextModule::InnerModuleEvaluation(v8::internal::Isolate*, v8::internal::Handle<v8::internal::SourceTextModule>, v8::internal::ZoneForwardList<v8::internal::Handle<v8::internal::SourceTextModule>>*, unsigned int*) src/objects/source-text-module.cc:1192:5 (d8+0xe51d39) (BuildId: 06e6c95b54bbb645)
+ #15 v8::internal::SourceTextModule::Evaluate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::SourceTextModule>) src/objects/source-text-module.cc:733:8 (d8+0xe51368) (BuildId: 06e6c95b54bbb645)
+ #16 v8::internal::Module::Evaluate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Module>) src/objects/module.cc:280:12 (d8+0xdf8599) (BuildId: 06e6c95b54bbb645)
+ #17 v8::Module::Evaluate(v8::Local<v8::Context>) src/api/api.cc:2548:16 (d8+0x60552d) (BuildId: 06e6c95b54bbb645)
+ #18 v8::Shell::ExecuteModule(v8::Isolate*, char const*) src/d8/d8.cc:1455:33 (d8+0x5c8cf1) (BuildId: 06e6c95b54bbb645)
+ #19 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4442:12 (d8+0x5da4a0) (BuildId: 06e6c95b54bbb645)
+ #20 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5224:39 (d8+0x5de41e) (BuildId: 06e6c95b54bbb645)
+ #21 v8::Shell::Main(int, char**) src/d8/d8.cc:6020:18 (d8+0x5e0906) (BuildId: 06e6c95b54bbb645)
+ #22 main src/d8/d8.cc:6117:43 (d8+0x5e0f10) (BuildId: 06e6c95b54bbb645)
+
+ Previous atomic read of size 4 at 0x7eba001057e0 by thread T5 (mutexes: write M0):
+ #0 __cxx_atomic_load<int> buildtools/third_party/libc++/trunk/include/atomic:948:12 (d8+0xa98a99) (BuildId: 06e6c95b54bbb645)
+ #1 load buildtools/third_party/libc++/trunk/include/atomic:1537:17 (d8+0xa98a99)
+ #2 atomic_load_explicit<int> buildtools/third_party/libc++/trunk/include/atomic:1916:17 (d8+0xa98a99)
+ #3 Relaxed_Load src/base/atomicops.h:237:10 (d8+0xa98a99)
+ #4 Relaxed_Load<unsigned int> src/base/atomic-utils.h:87:9 (d8+0xa98a99)
+ #5 Relaxed_Load src/objects/compressed-slots-inl.h:75:26 (d8+0xa98a99)
+ #6 VisitPointer src/heap/sweeper.cc:678:50 (d8+0xa98a99)
+ #7 VisitPointers src/heap/sweeper.cc:694:7 (d8+0xa98a99)
+ #8 IteratePointers<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:128:6 (d8+0xa98a99)
+ #9 IterateBody<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors.h:135:5 (d8+0xa98a99)
+ #10 void v8::internal::CallIterateBody::apply<v8::internal::FixedArray::BodyDescriptor, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor>(v8::internal::Map, v8::internal::HeapObject, int, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/objects/objects-body-descriptors-inl.h:1432:5 (d8+0xa98a99)
+ #11 auto v8::internal::BodyDescriptorApply<v8::internal::CallIterateBody, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&>(v8::internal::InstanceType, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&) src/objects/objects-body-descriptors-inl.h (d8+0xa97d74) (BuildId: 06e6c95b54bbb645)
+ #12 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1438:3 (d8+0xa9318c) (BuildId: 06e6c95b54bbb645)
+ #13 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1425:3 (d8+0xa9318c)
+ #14 IterateFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1413:3 (d8+0xa9318c)
+ #15 v8::internal::(anonymous namespace)::HandlePromotedObject(v8::internal::HeapObject, v8::internal::NonAtomicMarkingState*, v8::internal::PretenuringHandler*, v8::internal::PtrComprCageBase, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/heap/sweeper.cc:805:10 (d8+0xa9318c)
+ #16 v8::internal::Sweeper::RawIteratePromotedPageForRememberedSets(v8::internal::MemoryChunk*, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, std::Cr::unordered_map<v8::internal::MemoryChunk*, v8::internal::SlotSet*, std::Cr::hash<v8::internal::MemoryChunk*>, std::Cr::equal_to<v8::internal::MemoryChunk*>, std::Cr::allocator<std::Cr::pair<v8::internal::MemoryChunk* const, v8::internal::SlotSet*>>>*) src/heap/sweeper.cc:841:7 (d8+0xa92e20) (BuildId: 06e6c95b54bbb645)
+ #17 ParallelIteratePromotedPageForRememberedSets src/heap/sweeper.cc:987:3 (d8+0xa9ffdc) (BuildId: 06e6c95b54bbb645)
+ #18 v8::internal::Sweeper::ConcurrentSweeper::ConcurrentSweepForRememberedSet(v8::JobDelegate*) src/heap/sweeper.cc:61:17 (d8+0xa9ffdc)
+ #19 v8::internal::Sweeper::SweeperJob::RunImpl(v8::JobDelegate*, bool) src/heap/sweeper.cc:126:31 (d8+0xa9f0f6) (BuildId: 06e6c95b54bbb645)
+ #20 v8::internal::Sweeper::SweeperJob::Run(v8::JobDelegate*) src/heap/sweeper.cc:98:5 (d8+0xa9e6e9) (BuildId: 06e6c95b54bbb645)
+ #21 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x201972b) (BuildId: 06e6c95b54bbb645)
+ #22 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x201f240) (BuildId: 06e6c95b54bbb645)
+ #23 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x2013592) (BuildId: 06e6c95b54bbb645)
+ #24 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x2013592)
+
+ Mutex M0 (0x7b0c000007e0) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 06e6c95b54bbb645)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x200b32b) (BuildId: 06e6c95b54bbb645)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x200b32b)
+ #3 v8::internal::MemoryChunk::MemoryChunk(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability, v8::internal::PageSize) src/heap/memory-chunk.cc:154:16 (d8+0xa3462f) (BuildId: 06e6c95b54bbb645)
+ #4 v8::internal::Page::Page(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability) src/heap/spaces.cc:53:7 (d8+0xa8a50f) (BuildId: 06e6c95b54bbb645)
+ #5 v8::internal::MemoryAllocator::AllocatePage(v8::internal::MemoryAllocator::AllocationMode, v8::internal::Space*, v8::internal::Executability) src/heap/memory-allocator.cc:579:40 (d8+0xa31224) (BuildId: 06e6c95b54bbb645)
+ #6 v8::internal::PagedSpaceBase::TryExpandImpl() src/heap/paged-spaces.cc:393:44 (d8+0xa50188) (BuildId: 06e6c95b54bbb645)
+ #7 PreallocatePages src/heap/new-spaces.cc:984:10 (d8+0xa3fab0) (BuildId: 06e6c95b54bbb645)
+ #8 PagedSpaceForNewSpace src/heap/new-spaces.cc:901:8 (d8+0xa3fab0)
+ #9 v8::internal::PagedNewSpace::PagedNewSpace(v8::internal::Heap*, unsigned long, unsigned long, v8::internal::LinearAllocationArea&) src/heap/new-spaces.cc:1016:7 (d8+0xa3fab0)
+ #10 make_unique<v8::internal::PagedNewSpace, v8::internal::Heap *, unsigned long &, unsigned long &, v8::internal::LinearAllocationArea &> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x959f81) (BuildId: 06e6c95b54bbb645)
+ #11 v8::internal::Heap::SetUpSpaces(v8::internal::LinearAllocationArea&, v8::internal::LinearAllocationArea&) src/heap/heap.cc:5569:27 (d8+0x959f81)
+ #12 v8::internal::Isolate::Init(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4348:9 (d8+0x88a11c) (BuildId: 06e6c95b54bbb645)
+ #13 v8::internal::Isolate::InitWithSnapshot(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4064:10 (d8+0x88ba99) (BuildId: 06e6c95b54bbb645)
+ #14 v8::internal::Snapshot::Initialize(v8::internal::Isolate*) src/snapshot/snapshot.cc:182:27 (d8+0x1103aed) (BuildId: 06e6c95b54bbb645)
+ #15 v8::Isolate::Initialize(v8::Isolate*, v8::Isolate::CreateParams const&) src/api/api.cc:9049:8 (d8+0x62c749) (BuildId: 06e6c95b54bbb645)
+ #16 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9085:3 (d8+0x62ca05) (BuildId: 06e6c95b54bbb645)
+ #17 v8::Shell::Main(int, char**) src/d8/d8.cc:5909:22 (d8+0x5dffbe) (BuildId: 06e6c95b54bbb645)
+ #18 main src/d8/d8.cc:6117:43 (d8+0x5e0f10) (BuildId: 06e6c95b54bbb645)
+
+ Thread T5 'V8 DefaultWorke' (tid=21435, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x52198b) (BuildId: 06e6c95b54bbb645)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x20134a6) (BuildId: 06e6c95b54bbb645)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x201ec07) (BuildId: 06e6c95b54bbb645)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x201ec07)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x201ec07)
+ #5 construct_at<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:38:48 (d8+0x2015d60) (BuildId: 06e6c95b54bbb645)
+ #6 construct<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:304:9 (d8+0x2015d60)
+ #7 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:275:9 (d8+0x2015d60)
+ #8 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:936:55 (d8+0x2015d60)
+ #9 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:945:12 (d8+0x2015d60)
+ #10 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:132:7 (d8+0x2015d60)
+ #11 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (d8+0x2015d60)
+ #12 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x20153d2) (BuildId: 06e6c95b54bbb645)
+ #13 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (d8+0x20153d2)
+ #14 v8::Shell::Main(int, char**) src/d8/d8.cc:5812:16 (d8+0x5dfb09) (BuildId: 06e6c95b54bbb645)
+ #15 main src/d8/d8.cc:6117:43 (d8+0x5e0f10) (BuildId: 06e6c95b54bbb645)
+
+SUMMARY: ThreadSanitizer: data race src/utils/memcopy.h:123:7 in MemMove
+==================
+ThreadSanitizer: reported 2 warnings \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.expected
new file mode 100644
index 0000000000..f5bb8a2526
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.expected
@@ -0,0 +1,3 @@
+ActivateSpace
+ActivateSpaces
+v8::internal::MarkingBarrier::ActivateAll(v8::internal::Heap*, bool, v8::internal::MarkingBarrierType) \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.txt
new file mode 100644
index 0000000000..7c63104c18
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_5.txt
@@ -0,0 +1,198 @@
+WARNING: ThreadSanitizer: data race (pid=10493)
+ Write of size 8 at 0x553200140008 by main thread (mutexes: write M0, write M1, write M2, write M3, write M4):
+ #0 v8::internal::MemoryChunk::SetOldGenerationPageFlags(bool) src/heap/memory-chunk.cc (d8+0xa31ff9) (BuildId: 8b25ec4b66132a89)
+ #1 ActivateSpace src/heap/marking-barrier.cc:175:8 (d8+0xa281ec) (BuildId: 8b25ec4b66132a89)
+ #2 ActivateSpaces src/heap/marking-barrier.cc:194:5 (d8+0xa281ec)
+ #3 v8::internal::MarkingBarrier::ActivateAll(v8::internal::Heap*, bool, v8::internal::MarkingBarrierType) src/heap/marking-barrier.cc:261:3 (d8+0xa281ec)
+ #4 v8::internal::IncrementalMarking::StartMarkingMajor() src/heap/incremental-marking.cc:333:3 (d8+0x971249) (BuildId: 8b25ec4b66132a89)
+ #5 v8::internal::IncrementalMarking::Start(v8::internal::GarbageCollector, v8::internal::GarbageCollectionReason) src/heap/incremental-marking.cc:192:5 (d8+0x97072c) (BuildId: 8b25ec4b66132a89)
+ #6 v8::internal::Heap::StartIncrementalMarking(int, v8::internal::GarbageCollectionReason, v8::GCCallbackFlags, v8::internal::GarbageCollector) src/heap/heap.cc:1892:26 (d8+0x93d973) (BuildId: 8b25ec4b66132a89)
+ #7 v8::internal::Heap::ReportExternalMemoryPressure() src/heap/heap.cc:1586:7 (d8+0x942526) (BuildId: 8b25ec4b66132a89)
+ #8 ReportExternalAllocationLimitReached src/api/api.cc:8757:9 (d8+0x62d9b2) (BuildId: 8b25ec4b66132a89)
+ #9 v8::Isolate::AdjustAmountOfExternalAllocatedMemory(long) src/api/api.cc:9442:5 (d8+0x62d9b2)
+ #10 FromSharedPtr src/objects/managed-inl.h:47:9 (d8+0x5d30c0) (BuildId: 8b25ec4b66132a89)
+ #11 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2944:36 (d8+0x5d30c0)
+ #12 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x684709) (BuildId: 8b25ec4b66132a89)
+ #13 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x68312c) (BuildId: 8b25ec4b66132a89)
+ #14 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x68275b) (BuildId: 8b25ec4b66132a89)
+ #15 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x682273) (BuildId: 8b25ec4b66132a89)
+ #16 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1dd5837)
+ #17 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x85ca90) (BuildId: 8b25ec4b66132a89)
+ #18 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2271:7 (d8+0x603527) (BuildId: 8b25ec4b66132a89)
+ #19 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2198:10 (d8+0x602ce0) (BuildId: 8b25ec4b66132a89)
+ #20 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5c03d6) (BuildId: 8b25ec4b66132a89)
+ #21 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4483:10 (d8+0x5da0d3) (BuildId: 8b25ec4b66132a89)
+ #22 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5217:39 (d8+0x5de08e) (BuildId: 8b25ec4b66132a89)
+ #23 v8::Shell::Main(int, char**) src/d8/d8.cc:5973:20 (d8+0x5e04ed) (BuildId: 8b25ec4b66132a89)
+ #24 main src/d8/d8.cc:6110:43 (d8+0x5e0e80) (BuildId: 8b25ec4b66132a89)
+
+ Previous read of size 8 at 0x553200140008 by thread T3 (mutexes: write M5):
+ #0 operator& src/base/flags.h:56:18 (d8+0xa90de3) (BuildId: 8b25ec4b66132a89)
+ #1 operator& src/base/flags.h:70:12 (d8+0xa90de3)
+ #2 IsFlagSet src/heap/basic-memory-chunk.h:197:63 (d8+0xa90de3)
+ #3 InSharedHeap src/heap/basic-memory-chunk.h:261:38 (d8+0xa90de3)
+ #4 RecordOldToSharedMigratedSlot src/heap/sweeper.cc:779:23 (d8+0xa90de3)
+ #5 v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor::RecordMigratedSlot(v8::internal::HeapObject, v8::internal::MaybeObject, unsigned long) src/heap/sweeper.cc:745:7 (d8+0xa90de3)
+ #6 VisitPointer src/heap/sweeper.cc:684:5 (d8+0xa953ec) (BuildId: 8b25ec4b66132a89)
+ #7 VisitPointers src/heap/sweeper.cc:698:7 (d8+0xa953ec)
+ #8 IterateMaybeWeakPointers<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:141:6 (d8+0xa953ec)
+ #9 IterateBody<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors.h:176:5 (d8+0xa953ec)
+ #10 void v8::internal::CallIterateBody::apply<v8::internal::DescriptorArray::BodyDescriptor, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor>(v8::internal::Map, v8::internal::HeapObject, int, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/objects/objects-body-descriptors-inl.h:1432:5 (d8+0xa953ec)
+ #11 auto v8::internal::BodyDescriptorApply<v8::internal::CallIterateBody, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&>(v8::internal::InstanceType, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&) src/objects/objects-body-descriptors-inl.h:1392:7 (d8+0xa92aa8) (BuildId: 8b25ec4b66132a89)
+ #12 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1438:3 (d8+0xa8d622) (BuildId: 8b25ec4b66132a89)
+ #13 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1425:3 (d8+0xa8d622)
+ #14 IterateFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1413:3 (d8+0xa8d622)
+ #15 v8::internal::(anonymous namespace)::HandlePromotedObject(v8::internal::HeapObject, v8::internal::NonAtomicMarkingState*, v8::internal::PretenuringHandler*, v8::internal::PtrComprCageBase, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/heap/sweeper.cc:801:10 (d8+0xa8d622)
+ #16 v8::internal::Sweeper::RawIteratePromotedPageForRememberedSets(v8::internal::MemoryChunk*, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, std::Cr::unordered_map<v8::internal::MemoryChunk*, v8::internal::SlotSet*, std::Cr::hash<v8::internal::MemoryChunk*>, std::Cr::equal_to<v8::internal::MemoryChunk*>, std::Cr::allocator<std::Cr::pair<v8::internal::MemoryChunk* const, v8::internal::SlotSet*>>>*) src/heap/sweeper.cc:837:7 (d8+0xa8d42b) (BuildId: 8b25ec4b66132a89)
+ #17 ParallelIteratePromotedPageForRememberedSets src/heap/sweeper.cc:984:3 (d8+0xa9a40c) (BuildId: 8b25ec4b66132a89)
+ #18 v8::internal::Sweeper::ConcurrentSweeper::ConcurrentSweepForRememberedSet(v8::JobDelegate*) src/heap/sweeper.cc:60:17 (d8+0xa9a40c)
+ #19 v8::internal::Sweeper::SweeperJob::RunImpl(v8::JobDelegate*, bool) src/heap/sweeper.cc:125:31 (d8+0xa99526) (BuildId: 8b25ec4b66132a89)
+ #20 v8::internal::Sweeper::SweeperJob::Run(v8::JobDelegate*) src/heap/sweeper.cc:97:5 (d8+0xa98b19) (BuildId: 8b25ec4b66132a89)
+ #21 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x201472b) (BuildId: 8b25ec4b66132a89)
+ #22 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x201a240) (BuildId: 8b25ec4b66132a89)
+ #23 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x200e592) (BuildId: 8b25ec4b66132a89)
+ #24 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x200e592)
+
+ Mutex M0 (0x563a3b443820) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 8b25ec4b66132a89)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x200632b) (BuildId: 8b25ec4b66132a89)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x200632b)
+ #3 Construct src/base/lazy-instance.h:129:68 (d8+0x5ecd69) (BuildId: 8b25ec4b66132a89)
+ #4 InitStorageUsingTrait<v8::base::DefaultConstructTrait<v8::base::Mutex> > src/base/lazy-instance.h:106:5 (d8+0x5ecd69)
+ #5 v8::base::LazyInstanceImpl<v8::base::Mutex, v8::base::StaticallyAllocatedInstanceTrait<v8::base::Mutex>, v8::base::DefaultConstructTrait<v8::base::Mutex>, v8::base::ThreadSafeInitOnceTrait, v8::base::LeakyInstanceTrait<v8::base::Mutex>>::InitInstance(void*) src/base/lazy-instance.h:170:5 (d8+0x5ecd69)
+ #6 operator() src/base/once.h:101:32 (d8+0x5ecdad) (BuildId: 8b25ec4b66132a89)
+ #7 __invoke<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:394:23 (d8+0x5ecdad)
+ #8 __call<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:487:9 (d8+0x5ecdad)
+ #9 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:232:12 (d8+0x5ecdad)
+ #10 void std::Cr::__function::__policy_invoker<void ()>::__call_impl<std::Cr::__function::__default_alloc_func<void v8::base::CallOnce<void*, void>(std::Cr::atomic<unsigned char>*, v8::base::FunctionWithArgs<void*>::type, void*)::'lambda'(), void ()>>(std::Cr::__function::__policy_storage const*) buildtools/third_party/libc++/trunk/include/__functional/function.h:711:16 (d8+0x5ecdad)
+ #11 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:842:16 (d8+0x20054f7) (BuildId: 8b25ec4b66132a89)
+ #12 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:1152:12 (d8+0x20054f7)
+ #13 v8::base::CallOnceImpl(std::Cr::atomic<unsigned char>*, std::Cr::function<void ()>) src/base/once.cc:36:5 (d8+0x20054f7)
+ #14 CallOnce<void *, void> src/base/once.h:101:5 (d8+0x5d2f94) (BuildId: 8b25ec4b66132a89)
+ #15 Init<void (*)(void *), void *> src/base/lazy-instance.h:144:5 (d8+0x5d2f94)
+ #16 Init src/base/lazy-instance.h:175:5 (d8+0x5d2f94)
+ #17 Pointer src/base/lazy-instance.h:180:5 (d8+0x5d2f94)
+ #18 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2929:24 (d8+0x5d2f94)
+ #19 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x684709) (BuildId: 8b25ec4b66132a89)
+ #20 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x68312c) (BuildId: 8b25ec4b66132a89)
+ #21 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x68275b) (BuildId: 8b25ec4b66132a89)
+ #22 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x682273) (BuildId: 8b25ec4b66132a89)
+ #23 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1dd5837)
+ #24 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x85ca90) (BuildId: 8b25ec4b66132a89)
+ #25 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2271:7 (d8+0x603527) (BuildId: 8b25ec4b66132a89)
+ #26 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2198:10 (d8+0x602ce0) (BuildId: 8b25ec4b66132a89)
+ #27 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5c03d6) (BuildId: 8b25ec4b66132a89)
+ #28 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4483:10 (d8+0x5da0d3) (BuildId: 8b25ec4b66132a89)
+ #29 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5217:39 (d8+0x5de08e) (BuildId: 8b25ec4b66132a89)
+ #30 v8::Shell::Main(int, char**) src/d8/d8.cc:5973:20 (d8+0x5e04ed) (BuildId: 8b25ec4b66132a89)
+ #31 main src/d8/d8.cc:6110:43 (d8+0x5e0e80) (BuildId: 8b25ec4b66132a89)
+
+ Mutex M1 (0x7b10000004c8) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 8b25ec4b66132a89)
+ #1 InitializeRecursiveNativeHandle src/base/platform/mutex.cc:112:12 (d8+0x200643a) (BuildId: 8b25ec4b66132a89)
+ #2 v8::base::RecursiveMutex::RecursiveMutex() src/base/platform/mutex.cc:187:3 (d8+0x200643a)
+ #3 v8::internal::GlobalSafepoint::GlobalSafepoint(v8::internal::Isolate*) src/heap/safepoint.cc:293:18 (d8+0xa582f9) (BuildId: 8b25ec4b66132a89)
+ #4 make_unique<v8::internal::GlobalSafepoint, v8::internal::Isolate *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x88808d) (BuildId: 8b25ec4b66132a89)
+ #5 v8::internal::Isolate::Init(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4264:25 (d8+0x88808d)
+ #6 v8::internal::Isolate::InitWithSnapshot(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4054:10 (d8+0x88a109) (BuildId: 8b25ec4b66132a89)
+ #7 v8::internal::Snapshot::Initialize(v8::internal::Isolate*) src/snapshot/snapshot.cc:182:27 (d8+0x10fee3d) (BuildId: 8b25ec4b66132a89)
+ #8 v8::Isolate::Initialize(v8::Isolate*, v8::Isolate::CreateParams const&) src/api/api.cc:9048:8 (d8+0x62cc79) (BuildId: 8b25ec4b66132a89)
+ #9 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9084:3 (d8+0x62cf35) (BuildId: 8b25ec4b66132a89)
+ #10 v8::Shell::Main(int, char**) src/d8/d8.cc:5902:22 (d8+0x5dff2e) (BuildId: 8b25ec4b66132a89)
+ #11 main src/d8/d8.cc:6110:43 (d8+0x5e0e80) (BuildId: 8b25ec4b66132a89)
+
+ Mutex M2 (0x7b3800010020) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 8b25ec4b66132a89)
+ #1 InitializeRecursiveNativeHandle src/base/platform/mutex.cc:112:12 (d8+0x200643a) (BuildId: 8b25ec4b66132a89)
+ #2 v8::base::RecursiveMutex::RecursiveMutex() src/base/platform/mutex.cc:187:3 (d8+0x200643a)
+ #3 v8::internal::IsolateSafepoint::IsolateSafepoint(v8::internal::Heap*) src/heap/safepoint.cc:29:19 (d8+0xa56fd8) (BuildId: 8b25ec4b66132a89)
+ #4 make_unique<v8::internal::IsolateSafepoint, v8::internal::Heap *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x931778) (BuildId: 8b25ec4b66132a89)
+ #5 v8::internal::Heap::Heap() src/heap/heap.cc:232:18 (d8+0x931778)
+ #6 v8::internal::Isolate::Isolate(std::Cr::unique_ptr<v8::internal::IsolateAllocator, std::Cr::default_delete<v8::internal::IsolateAllocator>>, bool) src/execution/isolate.cc:3432:10 (d8+0x883e6c) (BuildId: 8b25ec4b66132a89)
+ #7 Allocate src/execution/isolate.cc:3350:25 (d8+0x882a60) (BuildId: 8b25ec4b66132a89)
+ #8 v8::internal::Isolate::New() src/execution/isolate.cc:3328:22 (d8+0x882a60)
+ #9 Allocate src/api/api.cc:8975:37 (d8+0x62cf27) (BuildId: 8b25ec4b66132a89)
+ #10 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9083:25 (d8+0x62cf27)
+ #11 v8::Worker::ExecuteInThread() src/d8/d8.cc:4745:14 (d8+0x5db3ed) (BuildId: 8b25ec4b66132a89)
+ #12 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4622:11 (d8+0x5db334) (BuildId: 8b25ec4b66132a89)
+ #13 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x200e592) (BuildId: 8b25ec4b66132a89)
+ #14 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x200e592)
+
+ Mutex M3 (0x7b380003e000) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 8b25ec4b66132a89)
+ #1 InitializeRecursiveNativeHandle src/base/platform/mutex.cc:112:12 (d8+0x200643a) (BuildId: 8b25ec4b66132a89)
+ #2 v8::base::RecursiveMutex::RecursiveMutex() src/base/platform/mutex.cc:187:3 (d8+0x200643a)
+ #3 v8::internal::IsolateSafepoint::IsolateSafepoint(v8::internal::Heap*) src/heap/safepoint.cc:29:19 (d8+0xa56fd8) (BuildId: 8b25ec4b66132a89)
+ #4 make_unique<v8::internal::IsolateSafepoint, v8::internal::Heap *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x931778) (BuildId: 8b25ec4b66132a89)
+ #5 v8::internal::Heap::Heap() src/heap/heap.cc:232:18 (d8+0x931778)
+ #6 v8::internal::Isolate::Isolate(std::Cr::unique_ptr<v8::internal::IsolateAllocator, std::Cr::default_delete<v8::internal::IsolateAllocator>>, bool) src/execution/isolate.cc:3432:10 (d8+0x883e6c) (BuildId: 8b25ec4b66132a89)
+ #7 Allocate src/execution/isolate.cc:3350:25 (d8+0x882a60) (BuildId: 8b25ec4b66132a89)
+ #8 v8::internal::Isolate::New() src/execution/isolate.cc:3328:22 (d8+0x882a60)
+ #9 Allocate src/api/api.cc:8975:37 (d8+0x62cf27) (BuildId: 8b25ec4b66132a89)
+ #10 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9083:25 (d8+0x62cf27)
+ #11 v8::Worker::ExecuteInThread() src/d8/d8.cc:4745:14 (d8+0x5db3ed) (BuildId: 8b25ec4b66132a89)
+ #12 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4622:11 (d8+0x5db334) (BuildId: 8b25ec4b66132a89)
+ #13 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x200e592) (BuildId: 8b25ec4b66132a89)
+ #14 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x200e592)
+
+ Mutex M4 (0x7b38000000a0) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 8b25ec4b66132a89)
+ #1 InitializeRecursiveNativeHandle src/base/platform/mutex.cc:112:12 (d8+0x200643a) (BuildId: 8b25ec4b66132a89)
+ #2 v8::base::RecursiveMutex::RecursiveMutex() src/base/platform/mutex.cc:187:3 (d8+0x200643a)
+ #3 v8::internal::IsolateSafepoint::IsolateSafepoint(v8::internal::Heap*) src/heap/safepoint.cc:29:19 (d8+0xa56fd8) (BuildId: 8b25ec4b66132a89)
+ #4 make_unique<v8::internal::IsolateSafepoint, v8::internal::Heap *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x931778) (BuildId: 8b25ec4b66132a89)
+ #5 v8::internal::Heap::Heap() src/heap/heap.cc:232:18 (d8+0x931778)
+ #6 v8::internal::Isolate::Isolate(std::Cr::unique_ptr<v8::internal::IsolateAllocator, std::Cr::default_delete<v8::internal::IsolateAllocator>>, bool) src/execution/isolate.cc:3432:10 (d8+0x883e6c) (BuildId: 8b25ec4b66132a89)
+ #7 Allocate src/execution/isolate.cc:3350:25 (d8+0x882a60) (BuildId: 8b25ec4b66132a89)
+ #8 v8::internal::Isolate::New() src/execution/isolate.cc:3328:22 (d8+0x882a60)
+ #9 Allocate src/api/api.cc:8975:37 (d8+0x62cf27) (BuildId: 8b25ec4b66132a89)
+ #10 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9083:25 (d8+0x62cf27)
+ #11 v8::Shell::Main(int, char**) src/d8/d8.cc:5902:22 (d8+0x5dff2e) (BuildId: 8b25ec4b66132a89)
+ #12 main src/d8/d8.cc:6110:43 (d8+0x5e0e80) (BuildId: 8b25ec4b66132a89)
+
+ Mutex M5 (0x7b0c0001a670) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x523300) (BuildId: 8b25ec4b66132a89)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x200632b) (BuildId: 8b25ec4b66132a89)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x200632b)
+ #3 v8::internal::MemoryChunk::MemoryChunk(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability, v8::internal::PageSize) src/heap/memory-chunk.cc:154:16 (d8+0xa31caf) (BuildId: 8b25ec4b66132a89)
+ #4 v8::internal::Page::Page(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability) src/heap/spaces.cc:53:7 (d8+0xa84c1f) (BuildId: 8b25ec4b66132a89)
+ #5 v8::internal::MemoryAllocator::AllocatePage(v8::internal::MemoryAllocator::AllocationMode, v8::internal::Space*, v8::internal::Executability) src/heap/memory-allocator.cc:579:40 (d8+0xa2e8a4) (BuildId: 8b25ec4b66132a89)
+ #6 v8::internal::PagedSpaceBase::TryExpandImpl() src/heap/paged-spaces.cc:393:44 (d8+0xa4d968) (BuildId: 8b25ec4b66132a89)
+ #7 PreallocatePages src/heap/new-spaces.cc:984:10 (d8+0xa3d120) (BuildId: 8b25ec4b66132a89)
+ #8 PagedSpaceForNewSpace src/heap/new-spaces.cc:901:8 (d8+0xa3d120)
+ #9 v8::internal::PagedNewSpace::PagedNewSpace(v8::internal::Heap*, unsigned long, unsigned long, v8::internal::LinearAllocationArea&) src/heap/new-spaces.cc:1016:7 (d8+0xa3d120)
+ #10 make_unique<v8::internal::PagedNewSpace, v8::internal::Heap *, unsigned long &, unsigned long &, v8::internal::LinearAllocationArea &> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x958151) (BuildId: 8b25ec4b66132a89)
+ #11 v8::internal::Heap::SetUpSpaces(v8::internal::LinearAllocationArea&, v8::internal::LinearAllocationArea&) src/heap/heap.cc:5564:27 (d8+0x958151)
+ #12 v8::internal::Isolate::Init(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4338:9 (d8+0x88877c) (BuildId: 8b25ec4b66132a89)
+ #13 v8::internal::Isolate::InitWithSnapshot(v8::internal::SnapshotData*, v8::internal::SnapshotData*, v8::internal::SnapshotData*, bool) src/execution/isolate.cc:4054:10 (d8+0x88a109) (BuildId: 8b25ec4b66132a89)
+ #14 v8::internal::Snapshot::Initialize(v8::internal::Isolate*) src/snapshot/snapshot.cc:182:27 (d8+0x10fee3d) (BuildId: 8b25ec4b66132a89)
+ #15 v8::Isolate::Initialize(v8::Isolate*, v8::Isolate::CreateParams const&) src/api/api.cc:9048:8 (d8+0x62cc79) (BuildId: 8b25ec4b66132a89)
+ #16 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9084:3 (d8+0x62cf35) (BuildId: 8b25ec4b66132a89)
+ #17 v8::Worker::ExecuteInThread() src/d8/d8.cc:4745:14 (d8+0x5db3ed) (BuildId: 8b25ec4b66132a89)
+ #18 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4622:11 (d8+0x5db334) (BuildId: 8b25ec4b66132a89)
+ #19 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x200e592) (BuildId: 8b25ec4b66132a89)
+ #20 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x200e592)
+
+ Thread T3 'V8 DefaultWorke' (tid=10500, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x52198b) (BuildId: 8b25ec4b66132a89)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x200e4a6) (BuildId: 8b25ec4b66132a89)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x2019c07) (BuildId: 8b25ec4b66132a89)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x2019c07)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x2019c07)
+ #5 construct_at<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:38:48 (d8+0x2010d60) (BuildId: 8b25ec4b66132a89)
+ #6 construct<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:304:9 (d8+0x2010d60)
+ #7 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:275:9 (d8+0x2010d60)
+ #8 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:936:55 (d8+0x2010d60)
+ #9 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:945:12 (d8+0x2010d60)
+ #10 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:132:7 (d8+0x2010d60)
+ #11 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (d8+0x2010d60)
+ #12 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x20103d2) (BuildId: 8b25ec4b66132a89)
+ #13 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (d8+0x20103d2)
+ #14 v8::Shell::Main(int, char**) src/d8/d8.cc:5805:16 (d8+0x5dfa79) (BuildId: 8b25ec4b66132a89)
+ #15 main src/d8/d8.cc:6110:43 (d8+0x5e0e80) (BuildId: 8b25ec4b66132a89)
+
+SUMMARY: ThreadSanitizer: data race src/heap/memory-chunk.cc in v8::internal::MemoryChunk::SetOldGenerationPageFlags(bool)
+==================
+ThreadSanitizer: reported 1 warnings
+ThreadSanitizer: Matched 2 suppressions (pid=10493):
+2 deadlock:GlobalSafepoint::EnterGlobalSafepointScope \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.expected
new file mode 100644
index 0000000000..7623d673f2
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.expected
@@ -0,0 +1,3 @@
+__cxx_atomic_load<int>
+load
+atomic_load_explicit<int> \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.txt
new file mode 100644
index 0000000000..b0121ef5ab
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_6.txt
@@ -0,0 +1,112 @@
+WARNING: ThreadSanitizer: data race (pid=18876)
+ Atomic read of size 4 at 0x7e8a017bb344 by thread T3 (mutexes: write M0):
+ #0 __cxx_atomic_load<int> buildtools/third_party/libc++/trunk/include/atomic:948:12 (d8+0xa91de9) (BuildId: 6011f09c007429d6)
+ #1 load buildtools/third_party/libc++/trunk/include/atomic:1537:17 (d8+0xa91de9)
+ #2 atomic_load_explicit<int> buildtools/third_party/libc++/trunk/include/atomic:1916:17 (d8+0xa91de9)
+ #3 Relaxed_Load src/base/atomicops.h:237:10 (d8+0xa91de9)
+ #4 Relaxed_Load<unsigned int> src/base/atomic-utils.h:87:9 (d8+0xa91de9)
+ #5 Relaxed_Load src/objects/compressed-slots-inl.h:75:26 (d8+0xa91de9)
+ #6 VisitPointer src/heap/sweeper.cc:674:50 (d8+0xa91de9)
+ #7 VisitPointers src/heap/sweeper.cc:690:7 (d8+0xa91de9)
+ #8 IteratePointers<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:128:6 (d8+0xa91de9)
+ #9 IterateBody<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors.h:135:5 (d8+0xa91de9)
+ #10 void v8::internal::CallIterateBody::apply<v8::internal::FixedArray::BodyDescriptor, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor>(v8::internal::Map, v8::internal::HeapObject, int, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/objects/objects-body-descriptors-inl.h:1432:5 (d8+0xa91de9)
+ #11 auto v8::internal::BodyDescriptorApply<v8::internal::CallIterateBody, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&>(v8::internal::InstanceType, v8::internal::Map&, v8::internal::HeapObject&, int&, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*&) src/objects/objects-body-descriptors-inl.h (d8+0xa910c4) (BuildId: 6011f09c007429d6)
+ #12 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1438:3 (d8+0xa8c512) (BuildId: 6011f09c007429d6)
+ #13 IterateBodyFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1425:3 (d8+0xa8c512)
+ #14 IterateFast<v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor> src/objects/objects-body-descriptors-inl.h:1413:3 (d8+0xa8c512)
+ #15 v8::internal::(anonymous namespace)::HandlePromotedObject(v8::internal::HeapObject, v8::internal::NonAtomicMarkingState*, v8::internal::PretenuringHandler*, v8::internal::PtrComprCageBase, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, v8::internal::(anonymous namespace)::PromotedPageRecordMigratedSlotVisitor*) src/heap/sweeper.cc:801:10 (d8+0xa8c512)
+ #16 v8::internal::Sweeper::RawIteratePromotedPageForRememberedSets(v8::internal::MemoryChunk*, std::Cr::unordered_map<v8::internal::AllocationSite, unsigned long, v8::internal::Object::Hasher, std::Cr::equal_to<v8::internal::AllocationSite>, std::Cr::allocator<std::Cr::pair<v8::internal::AllocationSite const, unsigned long>>>*, std::Cr::unordered_map<v8::internal::MemoryChunk*, v8::internal::SlotSet*, std::Cr::hash<v8::internal::MemoryChunk*>, std::Cr::equal_to<v8::internal::MemoryChunk*>, std::Cr::allocator<std::Cr::pair<v8::internal::MemoryChunk* const, v8::internal::SlotSet*>>>*) src/heap/sweeper.cc:837:7 (d8+0xa8c31b) (BuildId: 6011f09c007429d6)
+ #17 ParallelIteratePromotedPageForRememberedSets src/heap/sweeper.cc:984:3 (d8+0xa992fc) (BuildId: 6011f09c007429d6)
+ #18 v8::internal::Sweeper::ConcurrentSweeper::ConcurrentSweepForRememberedSet(v8::JobDelegate*) src/heap/sweeper.cc:60:17 (d8+0xa992fc)
+ #19 v8::internal::Sweeper::SweeperJob::RunImpl(v8::JobDelegate*, bool) src/heap/sweeper.cc:125:31 (d8+0xa98416) (BuildId: 6011f09c007429d6)
+ #20 v8::internal::Sweeper::SweeperJob::Run(v8::JobDelegate*) src/heap/sweeper.cc:97:5 (d8+0xa97a09) (BuildId: 6011f09c007429d6)
+ #21 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (d8+0x200c72b) (BuildId: 6011f09c007429d6)
+ #22 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (d8+0x2012240) (BuildId: 6011f09c007429d6)
+ #23 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x2006592) (BuildId: 6011f09c007429d6)
+ #24 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x2006592)
+
+ Previous write of size 4 at 0x7e8a017bb344 by main thread:
+ #0 __tsan_memcpy /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:3105:3 (d8+0x564e00) (BuildId: 6011f09c007429d6)
+ #1 MemCopy src/utils/memcopy.h (d8+0x95e3a0) (BuildId: 6011f09c007429d6)
+ #2 void v8::internal::Heap::CopyRange<v8::internal::CompressedObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedObjectSlot, v8::internal::CompressedObjectSlot, int, v8::internal::WriteBarrierMode) src/heap/heap.cc:2064:5 (d8+0x95e3a0)
+ #3 CopyElements src/objects/fixed-array-inl.h:274:20 (d8+0xc0fb96) (BuildId: 6011f09c007429d6)
+ #4 CopyObjectToObjectElements src/objects/elements.cc:228:6 (d8+0xc0fb96)
+ #5 v8::internal::(anonymous namespace)::FastSmiOrObjectElementsAccessor<v8::internal::(anonymous namespace)::FastPackedObjectElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)2>>::CopyElementsImpl(v8::internal::Isolate*, v8::internal::FixedArrayBase, unsigned int, v8::internal::FixedArrayBase, v8::internal::ElementsKind, unsigned int, int, int) src/objects/elements.cc:2566:9 (d8+0xc0fb96)
+ #6 v8::internal::(anonymous namespace)::ElementsAccessorBase<v8::internal::(anonymous namespace)::FastPackedObjectElementsAccessor, v8::internal::(anonymous namespace)::ElementsKindTraits<(v8::internal::ElementsKind)2>>::CopyElements(v8::internal::JSObject, unsigned int, v8::internal::ElementsKind, v8::internal::Handle<v8::internal::FixedArrayBase>, unsigned int, int) src/objects/elements.cc:1045:5 (d8+0xc0f00a) (BuildId: 6011f09c007429d6)
+ #7 v8::internal::ElementsAccessor::Concat(v8::internal::Isolate*, v8::internal::BuiltinArguments*, unsigned int, unsigned int) src/objects/elements.cc:5424:15 (d8+0xbfcd07) (BuildId: 6011f09c007429d6)
+ #8 v8::internal::(anonymous namespace)::Fast_ArrayConcat(v8::internal::Isolate*, v8::internal::BuiltinArguments*) src/builtins/builtins-array.cc:1509:10 (d8+0x68def0) (BuildId: 6011f09c007429d6)
+ #9 v8::internal::Builtin_Impl_ArrayConcat(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-array.cc:1531:9 (d8+0x686e5e) (BuildId: 6011f09c007429d6)
+ #10 v8::internal::Builtin_ArrayConcat(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-array.cc:1515:1 (d8+0x686783) (BuildId: 6011f09c007429d6)
+ #11 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1dcd837)
+ #12 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x85ba40) (BuildId: 6011f09c007429d6)
+ #13 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2271:7 (d8+0x602527) (BuildId: 6011f09c007429d6)
+ #14 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2198:10 (d8+0x601ce0) (BuildId: 6011f09c007429d6)
+ #15 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5bf3d6) (BuildId: 6011f09c007429d6)
+ #16 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4483:10 (d8+0x5d90d3) (BuildId: 6011f09c007429d6)
+ #17 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5217:39 (d8+0x5dd08e) (BuildId: 6011f09c007429d6)
+ #18 v8::Shell::Main(int, char**) src/d8/d8.cc:6013:18 (d8+0x5df876) (BuildId: 6011f09c007429d6)
+ #19 main src/d8/d8.cc:6110:43 (d8+0x5dfe80) (BuildId: 6011f09c007429d6)
+
+ Mutex M0 (0x7b0c00005670) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x522300) (BuildId: 6011f09c007429d6)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1ffe32b) (BuildId: 6011f09c007429d6)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1ffe32b)
+ #3 v8::internal::MemoryChunk::MemoryChunk(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability, v8::internal::PageSize) src/heap/memory-chunk.cc:154:16 (d8+0xa30b9f) (BuildId: 6011f09c007429d6)
+ #4 v8::internal::Page::Page(v8::internal::Heap*, v8::internal::BaseSpace*, unsigned long, unsigned long, unsigned long, v8::internal::VirtualMemory, v8::internal::Executability) src/heap/spaces.cc:53:7 (d8+0xa83b0f) (BuildId: 6011f09c007429d6)
+ #5 v8::internal::MemoryAllocator::AllocatePage(v8::internal::MemoryAllocator::AllocationMode, v8::internal::Space*, v8::internal::Executability) src/heap/memory-allocator.cc:579:40 (d8+0xa2d794) (BuildId: 6011f09c007429d6)
+ #6 v8::internal::PagedSpaceBase::TryExpandImpl() src/heap/paged-spaces.cc:393:44 (d8+0xa4c858) (BuildId: 6011f09c007429d6)
+ #7 PreallocatePages src/heap/new-spaces.cc:984:10 (d8+0xa3d952) (BuildId: 6011f09c007429d6)
+ #8 EnsureCurrentCapacity src/heap/new-spaces.cc:995:10 (d8+0xa3d952)
+ #9 v8::internal::PagedNewSpace::EnsureCurrentCapacity() src/heap/new-spaces.h:790:25 (d8+0xa3d952)
+ #10 v8::internal::MinorMarkCompactCollector::Finish() src/heap/mark-compact.cc:6041:31 (d8+0x9d3018) (BuildId: 6011f09c007429d6)
+ #11 v8::internal::MinorMarkCompactCollector::CollectGarbage() src/heap/mark-compact.cc:6076:3 (d8+0x9d41f9) (BuildId: 6011f09c007429d6)
+ #12 v8::internal::Heap::MinorMarkCompact() src/heap/heap.cc:2600:34 (d8+0x9491ad) (BuildId: 6011f09c007429d6)
+ #13 v8::internal::Heap::PerformGarbageCollection(v8::internal::GarbageCollector, v8::internal::GarbageCollectionReason, char const*) src/heap/heap.cc:2258:5 (d8+0x942350) (BuildId: 6011f09c007429d6)
+ #14 v8::internal::Heap::CollectGarbage(v8::internal::AllocationSpace, v8::internal::GarbageCollectionReason, v8::GCCallbackFlags) src/heap/heap.cc:1714:13 (d8+0x93d77e) (BuildId: 6011f09c007429d6)
+ #15 v8::internal::HeapAllocator::AllocateRawWithLightRetrySlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:95:14 (d8+0x92c626) (BuildId: 6011f09c007429d6)
+ #16 v8::internal::HeapAllocator::AllocateRawWithRetryOrFailSlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:110:7 (d8+0x92d73e) (BuildId: 6011f09c007429d6)
+ #17 AllocateRawWith<(v8::internal::HeapAllocator::AllocationRetryMode)1> src/heap/heap-allocator-inl.h:237:16 (d8+0x8fe787) (BuildId: 6011f09c007429d6)
+ #18 v8::internal::Factory::AllocateRaw(int, v8::internal::AllocationType, v8::internal::AllocationAlignment) src/heap/factory.cc:327:23 (d8+0x8fe787)
+ #19 AllocateRaw src/heap/factory-base.cc:1141:18 (d8+0x8f0283) (BuildId: 6011f09c007429d6)
+ #20 AllocateRawArray src/heap/factory-base.cc:1088:23 (d8+0x8f0283)
+ #21 AllocateRawFixedArray src/heap/factory-base.cc:1105:10 (d8+0x8f0283)
+ #22 v8::internal::FactoryBase<v8::internal::Factory>::NewFixedArrayWithFiller(v8::internal::Handle<v8::internal::Map>, int, v8::internal::Handle<v8::internal::Oddball>, v8::internal::AllocationType) src/heap/factory-base.cc:130:23 (d8+0x8f0283)
+ #23 v8::internal::FactoryBase<v8::internal::Factory>::NewFixedArray(int, v8::internal::AllocationType) src/heap/factory-base.cc:101:10 (d8+0x8f01f6) (BuildId: 6011f09c007429d6)
+ #24 NewJSArrayStorage src/heap/factory.cc:2829:14 (d8+0x9139ca) (BuildId: 6011f09c007429d6)
+ #25 v8::internal::Factory::NewJSArray(v8::internal::ElementsKind, int, int, v8::internal::ArrayStorageAllocationMode, v8::internal::AllocationType) src/heap/factory.cc:2728:7 (d8+0x9139ca)
+ #26 v8::internal::ElementsAccessor::Concat(v8::internal::Isolate*, v8::internal::BuiltinArguments*, unsigned int, unsigned int) src/objects/elements.cc:5409:54 (d8+0xbfcaaa) (BuildId: 6011f09c007429d6)
+ #27 v8::internal::(anonymous namespace)::Fast_ArrayConcat(v8::internal::Isolate*, v8::internal::BuiltinArguments*) src/builtins/builtins-array.cc:1509:10 (d8+0x68def0) (BuildId: 6011f09c007429d6)
+ #28 v8::internal::Builtin_Impl_ArrayConcat(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-array.cc:1531:9 (d8+0x686e5e) (BuildId: 6011f09c007429d6)
+ #29 v8::internal::Builtin_ArrayConcat(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-array.cc:1515:1 (d8+0x686783) (BuildId: 6011f09c007429d6)
+ #30 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1dcd837)
+ #31 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x85ba40) (BuildId: 6011f09c007429d6)
+ #32 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2271:7 (d8+0x602527) (BuildId: 6011f09c007429d6)
+ #33 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2198:10 (d8+0x601ce0) (BuildId: 6011f09c007429d6)
+ #34 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5bf3d6) (BuildId: 6011f09c007429d6)
+ #35 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4483:10 (d8+0x5d90d3) (BuildId: 6011f09c007429d6)
+ #36 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5217:39 (d8+0x5dd08e) (BuildId: 6011f09c007429d6)
+ #37 v8::Shell::Main(int, char**) src/d8/d8.cc:6013:18 (d8+0x5df876) (BuildId: 6011f09c007429d6)
+ #38 main src/d8/d8.cc:6110:43 (d8+0x5dfe80) (BuildId: 6011f09c007429d6)
+
+ Thread T3 'V8 DefaultWorke' (tid=18881, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x52098b) (BuildId: 6011f09c007429d6)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x20064a6) (BuildId: 6011f09c007429d6)
+ #2 WorkerThread src/libplatform/default-worker-threads-task-runner.cc:66:3 (d8+0x2011c07) (BuildId: 6011f09c007429d6)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x2011c07)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (d8+0x2011c07)
+ #5 construct_at<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:38:48 (d8+0x2008d60) (BuildId: 6011f09c007429d6)
+ #6 construct<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:304:9 (d8+0x2008d60)
+ #7 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:275:9 (d8+0x2008d60)
+ #8 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:936:55 (d8+0x2008d60)
+ #9 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:945:12 (d8+0x2008d60)
+ #10 EnsureBackgroundTaskRunnerInitialized src/libplatform/default-platform.cc:132:7 (d8+0x2008d60)
+ #11 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (d8+0x2008d60)
+ #12 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:686:30 (d8+0x20083d2) (BuildId: 6011f09c007429d6)
+ #13 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (d8+0x20083d2)
+ #14 v8::Shell::Main(int, char**) src/d8/d8.cc:5805:16 (d8+0x5dea79) (BuildId: 6011f09c007429d6)
+ #15 main src/d8/d8.cc:6110:43 (d8+0x5dfe80) (BuildId: 6011f09c007429d6)
+
+SUMMARY: ThreadSanitizer: data race buildtools/third_party/libc++/trunk/include/atomic:948:12 in __cxx_atomic_load<int>
+==================
+ThreadSanitizer: reported 1 warnings \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.expected
new file mode 100644
index 0000000000..57bad319c7
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.expected
@@ -0,0 +1,3 @@
+pthread_mutex_lock
+LockNativeHandle
+v8::base::Mutex::Lock() \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.txt
new file mode 100644
index 0000000000..fe119ea7c3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_8.txt
@@ -0,0 +1,510 @@
+WARNING: ThreadSanitizer: use of an invalid mutex (e.g. uninitialized or destroyed) (pid=5297)
+ #0 pthread_mutex_lock sanitizer_common/sanitizer_common_interceptors.inc:4481:3 (d8+0x536ceb) (BuildId: e7949a0603ee0889)
+ #1 LockNativeHandle src/base/platform/mutex.cc:128:16 (d8+0x1fbfa49) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Lock() src/base/platform/mutex.cc:166:3 (d8+0x1fbfa49)
+ #3 LockGuard src/base/platform/mutex.h:311:30 (d8+0x10f75a0) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:34:20 (d8+0x10f75a0)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f75a0)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Mutex M0 (0x7b24000090f0) created at:
+ #0 pthread_mutex_lock sanitizer_common/sanitizer_common_interceptors.inc:4481:3 (d8+0x536ceb) (BuildId: e7949a0603ee0889)
+ #1 LockNativeHandle src/base/platform/mutex.cc:128:16 (d8+0x1fbfa49) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Lock() src/base/platform/mutex.cc:166:3 (d8+0x1fbfa49)
+ #3 LockGuard src/base/platform/mutex.h:311:30 (d8+0x10f75a0) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:34:20 (d8+0x10f75a0)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f75a0)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: use of an invalid mutex (e.g. uninitialized or destroyed) src/base/platform/mutex.cc:128:16 in LockNativeHandle
+==================
+==================
+WARNING: ThreadSanitizer: heap-use-after-free (pid=5297)
+ Read of size 1 at 0x7b2400009118 by main thread (mutexes: write M0):
+ #0 Register src/tasks/cancelable-task.cc:35:7 (d8+0x10f75ac) (BuildId: e7949a0603ee0889)
+ #1 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f75ac)
+ #2 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #3 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #4 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #5 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #6 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #7 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #8 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #9 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Previous write of size 8 at 0x7b2400009118 by thread T9:
+ #0 operator delete(void*) /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126:3 (d8+0x59aadf) (BuildId: e7949a0603ee0889)
+ #1 v8::internal::Isolate::~Isolate() src/execution/isolate.cc:3814:3 (d8+0x879749) (BuildId: e7949a0603ee0889)
+ #2 v8::internal::Isolate::Delete(v8::internal::Isolate*) src/execution/isolate.cc:3402:12 (d8+0x875e8f) (BuildId: e7949a0603ee0889)
+ #3 v8::Isolate::Dispose() src/api/api.cc:9095:3 (d8+0x5ecc4f) (BuildId: e7949a0603ee0889)
+ #4 v8::Shell::OnExit(v8::Isolate*, bool) src/d8/d8.cc:3874:12 (d8+0x5c7272) (BuildId: e7949a0603ee0889)
+ #5 v8::Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>*) src/d8/d8.cc:2983:3 (d8+0x5c7220) (BuildId: e7949a0603ee0889)
+ #6 operator() src/base/once.h:101:32 (d8+0x5e069d) (BuildId: e7949a0603ee0889)
+ #7 __invoke<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:394:23 (d8+0x5e069d)
+ #8 __call<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:479:9 (d8+0x5e069d)
+ #9 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:232:12 (d8+0x5e069d)
+ #10 void std::Cr::__function::__policy_invoker<void ()>::__call_impl<std::Cr::__function::__default_alloc_func<void v8::base::CallOnce<v8::FunctionCallbackInfo<v8::Value>*, void>(std::Cr::atomic<unsigned char>*, v8::base::FunctionWithArgs<v8::FunctionCallbackInfo<v8::Value>*>::type, v8::FunctionCallbackInfo<v8::Value>*)::'lambda'(), void ()>>(std::Cr::__function::__policy_storage const*) buildtools/third_party/libc++/trunk/include/__functional/function.h:711:16 (d8+0x5e069d)
+ #11 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:842:16 (d8+0x1fbebb7) (BuildId: e7949a0603ee0889)
+ #12 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:1152:12 (d8+0x1fbebb7)
+ #13 v8::base::CallOnceImpl(std::Cr::atomic<unsigned char>*, std::Cr::function<void ()>) src/base/once.cc:36:5 (d8+0x1fbebb7)
+ #14 CallOnce<v8::FunctionCallbackInfo<v8::Value> *, void> src/base/once.h:101:5 (d8+0x5c80df) (BuildId: e7949a0603ee0889)
+ #15 v8::Shell::Quit(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2988:3 (d8+0x5c80df)
+ #16 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #17 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<false>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x676bfe) (BuildId: e7949a0603ee0889)
+ #18 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:148:5 (d8+0x675bbf) (BuildId: e7949a0603ee0889)
+ #19 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #20 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #21 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #22 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #23 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #24 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #25 v8::Worker::ExecuteInThread() src/d8/d8.cc:4736:13 (d8+0x5ce6c1) (BuildId: e7949a0603ee0889)
+ #26 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4564:11 (d8+0x5ce0c4) (BuildId: e7949a0603ee0889)
+ #27 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x1fc7c52) (BuildId: e7949a0603ee0889)
+ #28 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x1fc7c52)
+
+ Mutex M0 (0x7b4400002018) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x516300) (BuildId: e7949a0603ee0889)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1fbf9eb) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1fbf9eb)
+ #3 Worker src/d8/d8.cc:4526:9 (d8+0x5e0593) (BuildId: e7949a0603ee0889)
+ #4 v8::Worker* std::Cr::construct_at[abi:v160000]<v8::Worker, char*, v8::Worker*>(v8::Worker*, char*&&) buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:36:48 (d8+0x5e0593)
+ #5 construct<v8::Worker, char *, void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:297:9 (d8+0x5e02ba) (BuildId: e7949a0603ee0889)
+ #6 __shared_ptr_emplace<char *> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:276:9 (d8+0x5e02ba)
+ #7 std::Cr::shared_ptr<v8::Worker> std::Cr::allocate_shared[abi:v160000]<v8::Worker, std::Cr::allocator<v8::Worker>, char*, void>(std::Cr::allocator<v8::Worker> const&, char*&&) buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:937:55 (d8+0x5e02ba)
+ #8 make_shared<v8::Worker, char *, void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:946:12 (d8+0x5c5ee4) (BuildId: e7949a0603ee0889)
+ #9 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2888:19 (d8+0x5c5ee4)
+ #10 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #11 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #12 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #13 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #14 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #15 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #16 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #17 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #18 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #19 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #20 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #21 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #22 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Thread T9 'WorkerThread' (tid=5330, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x51498b) (BuildId: e7949a0603ee0889)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x1fc7b66) (BuildId: e7949a0603ee0889)
+ #2 v8::Worker::StartWorkerThread(v8::Isolate*, std::Cr::shared_ptr<v8::Worker>) src/d8/d8.cc:4548:16 (d8+0x5c61b7) (BuildId: e7949a0603ee0889)
+ #3 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2894:10 (d8+0x5c602d) (BuildId: e7949a0603ee0889)
+ #4 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #5 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #6 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #7 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #8 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #9 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #10 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #11 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #13 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #14 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #15 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #16 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: heap-use-after-free src/tasks/cancelable-task.cc:35:7 in Register
+==================
+==================
+WARNING: ThreadSanitizer: heap-use-after-free (pid=5297)
+ Atomic read of size 1 at 0x7b24000090f0 by main thread (mutexes: write M0):
+ #0 pthread_mutex_unlock sanitizer_common/sanitizer_common_interceptors.inc:4495:3 (d8+0x536ebb) (BuildId: e7949a0603ee0889)
+ #1 UnlockNativeHandle src/base/platform/mutex.cc:135:16 (d8+0x1fbfa79) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Unlock() src/base/platform/mutex.cc:173:3 (d8+0x1fbfa79)
+ #3 ~LockGuard src/base/platform/mutex.h:316:30 (d8+0x10f7651) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:47:1 (d8+0x10f7651)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f7651)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Previous write of size 8 at 0x7b24000090f0 by thread T9:
+ #0 operator delete(void*) /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126:3 (d8+0x59aadf) (BuildId: e7949a0603ee0889)
+ #1 v8::internal::Isolate::~Isolate() src/execution/isolate.cc:3814:3 (d8+0x879749) (BuildId: e7949a0603ee0889)
+ #2 v8::internal::Isolate::Delete(v8::internal::Isolate*) src/execution/isolate.cc:3402:12 (d8+0x875e8f) (BuildId: e7949a0603ee0889)
+ #3 v8::Isolate::Dispose() src/api/api.cc:9095:3 (d8+0x5ecc4f) (BuildId: e7949a0603ee0889)
+ #4 v8::Shell::OnExit(v8::Isolate*, bool) src/d8/d8.cc:3874:12 (d8+0x5c7272) (BuildId: e7949a0603ee0889)
+ #5 v8::Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>*) src/d8/d8.cc:2983:3 (d8+0x5c7220) (BuildId: e7949a0603ee0889)
+ #6 operator() src/base/once.h:101:32 (d8+0x5e069d) (BuildId: e7949a0603ee0889)
+ #7 __invoke<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:394:23 (d8+0x5e069d)
+ #8 __call<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:479:9 (d8+0x5e069d)
+ #9 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:232:12 (d8+0x5e069d)
+ #10 void std::Cr::__function::__policy_invoker<void ()>::__call_impl<std::Cr::__function::__default_alloc_func<void v8::base::CallOnce<v8::FunctionCallbackInfo<v8::Value>*, void>(std::Cr::atomic<unsigned char>*, v8::base::FunctionWithArgs<v8::FunctionCallbackInfo<v8::Value>*>::type, v8::FunctionCallbackInfo<v8::Value>*)::'lambda'(), void ()>>(std::Cr::__function::__policy_storage const*) buildtools/third_party/libc++/trunk/include/__functional/function.h:711:16 (d8+0x5e069d)
+ #11 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:842:16 (d8+0x1fbebb7) (BuildId: e7949a0603ee0889)
+ #12 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:1152:12 (d8+0x1fbebb7)
+ #13 v8::base::CallOnceImpl(std::Cr::atomic<unsigned char>*, std::Cr::function<void ()>) src/base/once.cc:36:5 (d8+0x1fbebb7)
+ #14 CallOnce<v8::FunctionCallbackInfo<v8::Value> *, void> src/base/once.h:101:5 (d8+0x5c80df) (BuildId: e7949a0603ee0889)
+ #15 v8::Shell::Quit(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2988:3 (d8+0x5c80df)
+ #16 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #17 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<false>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x676bfe) (BuildId: e7949a0603ee0889)
+ #18 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:148:5 (d8+0x675bbf) (BuildId: e7949a0603ee0889)
+ #19 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #20 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #21 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #22 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #23 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #24 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #25 v8::Worker::ExecuteInThread() src/d8/d8.cc:4736:13 (d8+0x5ce6c1) (BuildId: e7949a0603ee0889)
+ #26 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4564:11 (d8+0x5ce0c4) (BuildId: e7949a0603ee0889)
+ #27 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x1fc7c52) (BuildId: e7949a0603ee0889)
+ #28 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x1fc7c52)
+
+ Mutex M0 (0x7b4400002018) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x516300) (BuildId: e7949a0603ee0889)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1fbf9eb) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1fbf9eb)
+ #3 Worker src/d8/d8.cc:4526:9 (d8+0x5e0593) (BuildId: e7949a0603ee0889)
+ #4 v8::Worker* std::Cr::construct_at[abi:v160000]<v8::Worker, char*, v8::Worker*>(v8::Worker*, char*&&) buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:36:48 (d8+0x5e0593)
+ #5 construct<v8::Worker, char *, void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:297:9 (d8+0x5e02ba) (BuildId: e7949a0603ee0889)
+ #6 __shared_ptr_emplace<char *> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:276:9 (d8+0x5e02ba)
+ #7 std::Cr::shared_ptr<v8::Worker> std::Cr::allocate_shared[abi:v160000]<v8::Worker, std::Cr::allocator<v8::Worker>, char*, void>(std::Cr::allocator<v8::Worker> const&, char*&&) buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:937:55 (d8+0x5e02ba)
+ #8 make_shared<v8::Worker, char *, void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:946:12 (d8+0x5c5ee4) (BuildId: e7949a0603ee0889)
+ #9 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2888:19 (d8+0x5c5ee4)
+ #10 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #11 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #12 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #13 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #14 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #15 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #16 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #17 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #18 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #19 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #20 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #21 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #22 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Thread T9 'WorkerThread' (tid=5330, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x51498b) (BuildId: e7949a0603ee0889)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x1fc7b66) (BuildId: e7949a0603ee0889)
+ #2 v8::Worker::StartWorkerThread(v8::Isolate*, std::Cr::shared_ptr<v8::Worker>) src/d8/d8.cc:4548:16 (d8+0x5c61b7) (BuildId: e7949a0603ee0889)
+ #3 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2894:10 (d8+0x5c602d) (BuildId: e7949a0603ee0889)
+ #4 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #5 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #6 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #7 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #8 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #9 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #10 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #11 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #13 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #14 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #15 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #16 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: heap-use-after-free src/base/platform/mutex.cc:135:16 in UnlockNativeHandle
+==================
+==================
+WARNING: ThreadSanitizer: unlock of an unlocked mutex (or by a wrong thread) (pid=5297)
+ #0 pthread_mutex_unlock sanitizer_common/sanitizer_common_interceptors.inc:4495:3 (d8+0x536ebb) (BuildId: e7949a0603ee0889)
+ #1 UnlockNativeHandle src/base/platform/mutex.cc:135:16 (d8+0x1fbfa79) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Unlock() src/base/platform/mutex.cc:173:3 (d8+0x1fbfa79)
+ #3 ~LockGuard src/base/platform/mutex.h:316:30 (d8+0x10f7651) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:47:1 (d8+0x10f7651)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f7651)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Mutex M0 (0x7b24000090f0) created at:
+ #0 pthread_mutex_lock sanitizer_common/sanitizer_common_interceptors.inc:4481:3 (d8+0x536ceb) (BuildId: e7949a0603ee0889)
+ #1 LockNativeHandle src/base/platform/mutex.cc:128:16 (d8+0x1fbfa49) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Lock() src/base/platform/mutex.cc:166:3 (d8+0x1fbfa49)
+ #3 LockGuard src/base/platform/mutex.h:311:30 (d8+0x10f75a0) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:34:20 (d8+0x10f75a0)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f75a0)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: unlock of an unlocked mutex (or by a wrong thread) src/base/platform/mutex.cc:135:16 in UnlockNativeHandle
+==================
+==================
+WARNING: ThreadSanitizer: use of an invalid mutex (e.g. uninitialized or destroyed) (pid=5297)
+ #0 pthread_mutex_unlock sanitizer_common/sanitizer_common_interceptors.inc:4495:3 (d8+0x536ebb) (BuildId: e7949a0603ee0889)
+ #1 UnlockNativeHandle src/base/platform/mutex.cc:135:16 (d8+0x1fbfa79) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Unlock() src/base/platform/mutex.cc:173:3 (d8+0x1fbfa79)
+ #3 ~LockGuard src/base/platform/mutex.h:316:30 (d8+0x10f7651) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:47:1 (d8+0x10f7651)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f7651)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Mutex M0 (0x7b24000090f0) created at:
+ #0 pthread_mutex_lock sanitizer_common/sanitizer_common_interceptors.inc:4481:3 (d8+0x536ceb) (BuildId: e7949a0603ee0889)
+ #1 LockNativeHandle src/base/platform/mutex.cc:128:16 (d8+0x1fbfa49) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Lock() src/base/platform/mutex.cc:166:3 (d8+0x1fbfa49)
+ #3 LockGuard src/base/platform/mutex.h:311:30 (d8+0x10f75a0) (BuildId: e7949a0603ee0889)
+ #4 Register src/tasks/cancelable-task.cc:34:20 (d8+0x10f75a0)
+ #5 v8::internal::Cancelable::Cancelable(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.h:96:38 (d8+0x10f75a0)
+ #6 v8::internal::CancelableTask::CancelableTask(v8::internal::CancelableTaskManager*) src/tasks/cancelable-task.cc:125:7 (d8+0x10f74f3) (BuildId: e7949a0603ee0889)
+ #7 TerminateTask src/d8/d8.cc:4596:9 (d8+0x5c6e7a) (BuildId: e7949a0603ee0889)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4637:11 (d8+0x5c6e7a)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: use of an invalid mutex (e.g. uninitialized or destroyed) src/base/platform/mutex.cc:135:16 in UnlockNativeHandle
+==================
+==================
+WARNING: ThreadSanitizer: heap-use-after-free (pid=5297)
+ Read of size 8 at 0x7bc000060008 by main thread (mutexes: write M0):
+ #0 v8::internal::StackGuard::RequestInterrupt(v8::internal::StackGuard::InterruptFlag) src/execution/stack-guard.cc:140:26 (d8+0x8990a0) (BuildId: e7949a0603ee0889)
+ #1 RequestTerminateExecution src/execution/stack-guard.h:65:3 (d8+0x61f632) (BuildId: e7949a0603ee0889)
+ #2 v8::Isolate::TerminateExecution() src/api/api.cc:8905:29 (d8+0x61f632)
+ #3 v8::Worker::Terminate() src/d8/d8.cc:4642:13 (d8+0x5c6f55) (BuildId: e7949a0603ee0889)
+ #4 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #5 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #6 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #7 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #8 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Previous write of size 8 at 0x7bc000060008 by thread T9:
+ #0 operator delete(void*) /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126:3 (d8+0x59aadf) (BuildId: e7949a0603ee0889)
+ #1 v8::internal::IsolateAllocator::~IsolateAllocator() src/init/isolate-allocator.cc:141:3 (d8+0xae2931) (BuildId: e7949a0603ee0889)
+ #2 operator() buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:49:5 (d8+0x875e9c) (BuildId: e7949a0603ee0889)
+ #3 reset buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:281:7 (d8+0x875e9c)
+ #4 v8::internal::Isolate::Delete(v8::internal::Isolate*) src/execution/isolate.cc:3404:21 (d8+0x875e9c)
+ #5 v8::Isolate::Dispose() src/api/api.cc:9095:3 (d8+0x5ecc4f) (BuildId: e7949a0603ee0889)
+ #6 v8::Shell::OnExit(v8::Isolate*, bool) src/d8/d8.cc:3874:12 (d8+0x5c7272) (BuildId: e7949a0603ee0889)
+ #7 v8::Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>*) src/d8/d8.cc:2983:3 (d8+0x5c7220) (BuildId: e7949a0603ee0889)
+ #8 operator() src/base/once.h:101:32 (d8+0x5e069d) (BuildId: e7949a0603ee0889)
+ #9 __invoke<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:394:23 (d8+0x5e069d)
+ #10 __call<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:479:9 (d8+0x5e069d)
+ #11 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:232:12 (d8+0x5e069d)
+ #12 void std::Cr::__function::__policy_invoker<void ()>::__call_impl<std::Cr::__function::__default_alloc_func<void v8::base::CallOnce<v8::FunctionCallbackInfo<v8::Value>*, void>(std::Cr::atomic<unsigned char>*, v8::base::FunctionWithArgs<v8::FunctionCallbackInfo<v8::Value>*>::type, v8::FunctionCallbackInfo<v8::Value>*)::'lambda'(), void ()>>(std::Cr::__function::__policy_storage const*) buildtools/third_party/libc++/trunk/include/__functional/function.h:711:16 (d8+0x5e069d)
+ #13 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:842:16 (d8+0x1fbebb7) (BuildId: e7949a0603ee0889)
+ #14 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:1152:12 (d8+0x1fbebb7)
+ #15 v8::base::CallOnceImpl(std::Cr::atomic<unsigned char>*, std::Cr::function<void ()>) src/base/once.cc:36:5 (d8+0x1fbebb7)
+ #16 CallOnce<v8::FunctionCallbackInfo<v8::Value> *, void> src/base/once.h:101:5 (d8+0x5c80df) (BuildId: e7949a0603ee0889)
+ #17 v8::Shell::Quit(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2988:3 (d8+0x5c80df)
+ #18 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #19 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<false>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x676bfe) (BuildId: e7949a0603ee0889)
+ #20 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:148:5 (d8+0x675bbf) (BuildId: e7949a0603ee0889)
+ #21 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #22 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #23 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #24 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #25 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #26 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #27 v8::Worker::ExecuteInThread() src/d8/d8.cc:4736:13 (d8+0x5ce6c1) (BuildId: e7949a0603ee0889)
+ #28 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4564:11 (d8+0x5ce0c4) (BuildId: e7949a0603ee0889)
+ #29 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x1fc7c52) (BuildId: e7949a0603ee0889)
+ #30 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x1fc7c52)
+
+ Mutex M0 (0x7b4400002018) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x516300) (BuildId: e7949a0603ee0889)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1fbf9eb) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1fbf9eb)
+ #3 Worker src/d8/d8.cc:4526:9 (d8+0x5e0593) (BuildId: e7949a0603ee0889)
+ #4 v8::Worker* std::Cr::construct_at[abi:v160000]<v8::Worker, char*, v8::Worker*>(v8::Worker*, char*&&) buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:36:48 (d8+0x5e0593)
+ #5 construct<v8::Worker, char *, void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:297:9 (d8+0x5e02ba) (BuildId: e7949a0603ee0889)
+ #6 __shared_ptr_emplace<char *> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:276:9 (d8+0x5e02ba)
+ #7 std::Cr::shared_ptr<v8::Worker> std::Cr::allocate_shared[abi:v160000]<v8::Worker, std::Cr::allocator<v8::Worker>, char*, void>(std::Cr::allocator<v8::Worker> const&, char*&&) buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:937:55 (d8+0x5e02ba)
+ #8 make_shared<v8::Worker, char *, void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:946:12 (d8+0x5c5ee4) (BuildId: e7949a0603ee0889)
+ #9 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2888:19 (d8+0x5c5ee4)
+ #10 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #11 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #12 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #13 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #14 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #15 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #16 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #17 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #18 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #19 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #20 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #21 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #22 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Thread T9 'WorkerThread' (tid=5330, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x51498b) (BuildId: e7949a0603ee0889)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x1fc7b66) (BuildId: e7949a0603ee0889)
+ #2 v8::Worker::StartWorkerThread(v8::Isolate*, std::Cr::shared_ptr<v8::Worker>) src/d8/d8.cc:4548:16 (d8+0x5c61b7) (BuildId: e7949a0603ee0889)
+ #3 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2894:10 (d8+0x5c602d) (BuildId: e7949a0603ee0889)
+ #4 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #5 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #6 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #7 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #8 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #9 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #10 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #11 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #13 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #14 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #15 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #16 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: heap-use-after-free src/execution/stack-guard.cc:140:26 in v8::internal::StackGuard::RequestInterrupt(v8::internal::StackGuard::InterruptFlag)
+==================
+==================
+WARNING: ThreadSanitizer: use of an invalid mutex (e.g. uninitialized or destroyed) (pid=5297)
+ #0 pthread_mutex_lock sanitizer_common/sanitizer_common_interceptors.inc:4481:3 (d8+0x536ceb) (BuildId: e7949a0603ee0889)
+ #1 LockNativeHandle src/base/platform/mutex.cc:128:16 (d8+0x1fbfb69) (BuildId: e7949a0603ee0889)
+ #2 v8::base::RecursiveMutex::Lock() src/base/platform/mutex.cc:201:3 (d8+0x1fbfb69)
+ #3 Lock src/execution/isolate.h:2615:65 (d8+0x8990b1) (BuildId: e7949a0603ee0889)
+ #4 ExecutionAccess src/execution/isolate.h:2611:5 (d8+0x8990b1)
+ #5 v8::internal::StackGuard::RequestInterrupt(v8::internal::StackGuard::InterruptFlag) src/execution/stack-guard.cc:140:19 (d8+0x8990b1)
+ #6 RequestTerminateExecution src/execution/stack-guard.h:65:3 (d8+0x61f632) (BuildId: e7949a0603ee0889)
+ #7 v8::Isolate::TerminateExecution() src/api/api.cc:8905:29 (d8+0x61f632)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4642:13 (d8+0x5c6f55) (BuildId: e7949a0603ee0889)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Mutex M0 (0x7bc00006e170) created at:
+ #0 pthread_mutex_lock sanitizer_common/sanitizer_common_interceptors.inc:4481:3 (d8+0x536ceb) (BuildId: e7949a0603ee0889)
+ #1 LockNativeHandle src/base/platform/mutex.cc:128:16 (d8+0x1fbfb69) (BuildId: e7949a0603ee0889)
+ #2 v8::base::RecursiveMutex::Lock() src/base/platform/mutex.cc:201:3 (d8+0x1fbfb69)
+ #3 Lock src/execution/isolate.h:2615:65 (d8+0x8990b1) (BuildId: e7949a0603ee0889)
+ #4 ExecutionAccess src/execution/isolate.h:2611:5 (d8+0x8990b1)
+ #5 v8::internal::StackGuard::RequestInterrupt(v8::internal::StackGuard::InterruptFlag) src/execution/stack-guard.cc:140:19 (d8+0x8990b1)
+ #6 RequestTerminateExecution src/execution/stack-guard.h:65:3 (d8+0x61f632) (BuildId: e7949a0603ee0889)
+ #7 v8::Isolate::TerminateExecution() src/api/api.cc:8905:29 (d8+0x61f632)
+ #8 v8::Worker::Terminate() src/d8/d8.cc:4642:13 (d8+0x5c6f55) (BuildId: e7949a0603ee0889)
+ #9 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #10 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #11 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #13 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: use of an invalid mutex (e.g. uninitialized or destroyed) src/base/platform/mutex.cc:128:16 in LockNativeHandle
+==================
+==================
+WARNING: ThreadSanitizer: heap-use-after-free (pid=5297)
+ Read of size 8 at 0x7bc000060030 by main thread (mutexes: write M0):
+ #0 v8::internal::StackGuard::RequestInterrupt(v8::internal::StackGuard::InterruptFlag) src/execution/stack-guard.cc:142:21 (d8+0x8990ba) (BuildId: e7949a0603ee0889)
+ #1 RequestTerminateExecution src/execution/stack-guard.h:65:3 (d8+0x61f632) (BuildId: e7949a0603ee0889)
+ #2 v8::Isolate::TerminateExecution() src/api/api.cc:8905:29 (d8+0x61f632)
+ #3 v8::Worker::Terminate() src/d8/d8.cc:4642:13 (d8+0x5c6f55) (BuildId: e7949a0603ee0889)
+ #4 TerminateAndWaitForThread src/d8/d8.cc:4622:3 (d8+0x5d13ce) (BuildId: e7949a0603ee0889)
+ #5 v8::Shell::WaitForRunningWorkers(v8::internal::ParkedScope const&) src/d8/d8.cc:5642:13 (d8+0x5d13ce)
+ #6 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5190:3 (d8+0x5d1168) (BuildId: e7949a0603ee0889)
+ #7 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #8 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Previous write of size 8 at 0x7bc000060030 by thread T9:
+ #0 operator delete(void*) /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_new_delete.cpp:126:3 (d8+0x59aadf) (BuildId: e7949a0603ee0889)
+ #1 v8::internal::IsolateAllocator::~IsolateAllocator() src/init/isolate-allocator.cc:141:3 (d8+0xae2931) (BuildId: e7949a0603ee0889)
+ #2 operator() buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:49:5 (d8+0x875e9c) (BuildId: e7949a0603ee0889)
+ #3 reset buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:281:7 (d8+0x875e9c)
+ #4 v8::internal::Isolate::Delete(v8::internal::Isolate*) src/execution/isolate.cc:3404:21 (d8+0x875e9c)
+ #5 v8::Isolate::Dispose() src/api/api.cc:9095:3 (d8+0x5ecc4f) (BuildId: e7949a0603ee0889)
+ #6 v8::Shell::OnExit(v8::Isolate*, bool) src/d8/d8.cc:3874:12 (d8+0x5c7272) (BuildId: e7949a0603ee0889)
+ #7 v8::Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>*) src/d8/d8.cc:2983:3 (d8+0x5c7220) (BuildId: e7949a0603ee0889)
+ #8 operator() src/base/once.h:101:32 (d8+0x5e069d) (BuildId: e7949a0603ee0889)
+ #9 __invoke<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:394:23 (d8+0x5e069d)
+ #10 __call<(lambda at ../../src/base/once.h:101:24) &> buildtools/third_party/libc++/trunk/include/__functional/invoke.h:479:9 (d8+0x5e069d)
+ #11 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:232:12 (d8+0x5e069d)
+ #12 void std::Cr::__function::__policy_invoker<void ()>::__call_impl<std::Cr::__function::__default_alloc_func<void v8::base::CallOnce<v8::FunctionCallbackInfo<v8::Value>*, void>(std::Cr::atomic<unsigned char>*, v8::base::FunctionWithArgs<v8::FunctionCallbackInfo<v8::Value>*>::type, v8::FunctionCallbackInfo<v8::Value>*)::'lambda'(), void ()>>(std::Cr::__function::__policy_storage const*) buildtools/third_party/libc++/trunk/include/__functional/function.h:711:16 (d8+0x5e069d)
+ #13 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:842:16 (d8+0x1fbebb7) (BuildId: e7949a0603ee0889)
+ #14 operator() buildtools/third_party/libc++/trunk/include/__functional/function.h:1152:12 (d8+0x1fbebb7)
+ #15 v8::base::CallOnceImpl(std::Cr::atomic<unsigned char>*, std::Cr::function<void ()>) src/base/once.cc:36:5 (d8+0x1fbebb7)
+ #16 CallOnce<v8::FunctionCallbackInfo<v8::Value> *, void> src/base/once.h:101:5 (d8+0x5c80df) (BuildId: e7949a0603ee0889)
+ #17 v8::Shell::Quit(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2988:3 (d8+0x5c80df)
+ #18 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #19 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<false>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x676bfe) (BuildId: e7949a0603ee0889)
+ #20 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:148:5 (d8+0x675bbf) (BuildId: e7949a0603ee0889)
+ #21 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #22 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #23 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #24 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #25 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #26 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #27 v8::Worker::ExecuteInThread() src/d8/d8.cc:4736:13 (d8+0x5ce6c1) (BuildId: e7949a0603ee0889)
+ #28 v8::Worker::WorkerThread::Run() src/d8/d8.cc:4564:11 (d8+0x5ce0c4) (BuildId: e7949a0603ee0889)
+ #29 NotifyStartedAndRun src/base/platform/platform.h:596:5 (d8+0x1fc7c52) (BuildId: e7949a0603ee0889)
+ #30 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (d8+0x1fc7c52)
+
+ Mutex M0 (0x7b4400002018) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0x516300) (BuildId: e7949a0603ee0889)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:99:12 (d8+0x1fbf9eb) (BuildId: e7949a0603ee0889)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (d8+0x1fbf9eb)
+ #3 Worker src/d8/d8.cc:4526:9 (d8+0x5e0593) (BuildId: e7949a0603ee0889)
+ #4 v8::Worker* std::Cr::construct_at[abi:v160000]<v8::Worker, char*, v8::Worker*>(v8::Worker*, char*&&) buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:36:48 (d8+0x5e0593)
+ #5 construct<v8::Worker, char *, void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:297:9 (d8+0x5e02ba) (BuildId: e7949a0603ee0889)
+ #6 __shared_ptr_emplace<char *> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:276:9 (d8+0x5e02ba)
+ #7 std::Cr::shared_ptr<v8::Worker> std::Cr::allocate_shared[abi:v160000]<v8::Worker, std::Cr::allocator<v8::Worker>, char*, void>(std::Cr::allocator<v8::Worker> const&, char*&&) buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:937:55 (d8+0x5e02ba)
+ #8 make_shared<v8::Worker, char *, void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:946:12 (d8+0x5c5ee4) (BuildId: e7949a0603ee0889)
+ #9 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2888:19 (d8+0x5c5ee4)
+ #10 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #11 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #12 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #13 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #14 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #15 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #16 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #17 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #18 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #19 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #20 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #21 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #22 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+ Thread T9 'WorkerThread' (tid=5330, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0x51498b) (BuildId: e7949a0603ee0889)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (d8+0x1fc7b66) (BuildId: e7949a0603ee0889)
+ #2 v8::Worker::StartWorkerThread(v8::Isolate*, std::Cr::shared_ptr<v8::Worker>) src/d8/d8.cc:4548:16 (d8+0x5c61b7) (BuildId: e7949a0603ee0889)
+ #3 v8::Shell::WorkerNew(v8::FunctionCallbackInfo<v8::Value> const&) src/d8/d8.cc:2894:10 (d8+0x5c602d) (BuildId: e7949a0603ee0889)
+ #4 v8::internal::FunctionCallbackArguments::Call(v8::internal::CallHandlerInfo) src/api/api-arguments-inl.h:146:3 (d8+0x677b99) (BuildId: e7949a0603ee0889)
+ #5 v8::internal::MaybeHandle<v8::internal::Object> v8::internal::(anonymous namespace)::HandleApiCallHelper<true>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::HeapObject>, v8::internal::Handle<v8::internal::FunctionTemplateInfo>, v8::internal::Handle<v8::internal::Object>, unsigned long*, int) src/builtins/builtins-api.cc:113:36 (d8+0x67658a) (BuildId: e7949a0603ee0889)
+ #6 v8::internal::Builtin_Impl_HandleApiCall(v8::internal::BuiltinArguments, v8::internal::Isolate*) src/builtins/builtins-api.cc:144:5 (d8+0x675b9f) (BuildId: e7949a0603ee0889)
+ #7 v8::internal::Builtin_HandleApiCall(int, unsigned long*, v8::internal::Isolate*) src/builtins/builtins-api.cc:135:1 (d8+0x6756b3) (BuildId: e7949a0603ee0889)
+ #8 Builtins_AsyncFromSyncIteratorPrototypeThrow setup-isolate-deserialize.cc (d8+0x1d93f37)
+ #9 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (d8+0x84fc92) (BuildId: e7949a0603ee0889)
+ #10 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (d8+0x5f6034) (BuildId: e7949a0603ee0889)
+ #11 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (d8+0x5f57d0) (BuildId: e7949a0603ee0889)
+ #12 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x5b33d9) (BuildId: e7949a0603ee0889)
+ #13 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4425:10 (d8+0x5cce73) (BuildId: e7949a0603ee0889)
+ #14 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x5d0e0e) (BuildId: e7949a0603ee0889)
+ #15 v8::Shell::Main(int, char**) src/d8/d8.cc:5939:22 (d8+0x5d3491) (BuildId: e7949a0603ee0889)
+ #16 main src/d8/d8.cc:6052:43 (d8+0x5d3bc0) (BuildId: e7949a0603ee0889)
+
+SUMMARY: ThreadSanitizer: heap-use-after-free src/execution/stack-guard.cc:142:21 in v8::internal::StackGuard::RequestInterrupt(v8::internal::StackGuard::InterruptFlag)
+==================
+ThreadSanitizer: reported 8 warnings
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.expected b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.expected
new file mode 100644
index 0000000000..8040c35819
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.expected
@@ -0,0 +1,3 @@
+Get<(v8::internal::AccessMode)1>
+IsImpossible<(v8::internal::AccessMode)1>
+IsWhite<(v8::internal::AccessMode)1> \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.txt b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.txt
new file mode 100644
index 0000000000..d9e668a106
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/stack_utils/testdata/custom_analyzer/data_race_9.txt
@@ -0,0 +1,128 @@
+==================
+WARNING: ThreadSanitizer: data race (pid=28525)
+ Read of size 4 at 0x7ee001a41a40 by thread T2:
+ #0 Get<(v8::internal::AccessMode)1> src/heap/marking.h:73:11 (libv8.so+0x2092dea) (BuildId: 6097afaf9be13b02)
+ #1 IsImpossible<(v8::internal::AccessMode)1> src/heap/marking.h:363:24 (libv8.so+0x2092dea)
+ #2 IsWhite<(v8::internal::AccessMode)1> src/heap/marking.h:387:5 (libv8.so+0x2092dea)
+ #3 IsWhite src/heap/marking-state-inl.h:48:10 (libv8.so+0x2092dea)
+ #4 v8::internal::CopyAndForwardResult v8::internal::Scavenger::SemiSpaceCopyObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::Map, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject, int, v8::internal::ObjectFields) src/heap/scavenger-inl.h:138:5 (libv8.so+0x2092dea)
+ #5 heap::base::SlotCallbackResult v8::internal::Scavenger::EvacuateObjectDefault<v8::internal::CompressedHeapObjectSlot, (v8::internal::Scavenger::PromotionHeapChoice)0>(v8::internal::Map, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject, int, v8::internal::ObjectFields) src/heap/scavenger-inl.h:269:14 (libv8.so+0x20923f5) (BuildId: 6097afaf9be13b02)
+ #6 heap::base::SlotCallbackResult v8::internal::Scavenger::EvacuateShortcutCandidate<v8::internal::CompressedHeapObjectSlot>(v8::internal::Map, v8::internal::CompressedHeapObjectSlot, v8::internal::ConsString, int) src/heap/scavenger-inl.h:357:10 (libv8.so+0x2091b95) (BuildId: 6097afaf9be13b02)
+ #7 heap::base::SlotCallbackResult v8::internal::Scavenger::EvacuateObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::CompressedHeapObjectSlot, v8::internal::Map, v8::internal::HeapObject) src/heap/scavenger-inl.h:395:14 (libv8.so+0x20916da) (BuildId: 6097afaf9be13b02)
+ #8 heap::base::SlotCallbackResult v8::internal::Scavenger::ScavengeObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/scavenger-inl.h:447:10 (libv8.so+0x20913d6) (BuildId: 6097afaf9be13b02)
+ #9 VisitHeapObjectImpl<v8::internal::CompressedObjectSlot> src/heap/scavenger-inl.h:548:17 (libv8.so+0x20a813a) (BuildId: 6097afaf9be13b02)
+ #10 void v8::internal::ScavengeVisitor::VisitPointersImpl<v8::internal::CompressedObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedObjectSlot, v8::internal::CompressedObjectSlot) src/heap/scavenger-inl.h:560:7 (libv8.so+0x20a813a)
+ #11 VisitPointers src/heap/scavenger-inl.h:505:10 (libv8.so+0x20a41ce) (BuildId: 6097afaf9be13b02)
+ #12 IteratePointers<v8::internal::ScavengeVisitor> src/objects/objects-body-descriptors-inl.h:128:6 (libv8.so+0x20a41ce)
+ #13 IterateBody<v8::internal::ScavengeVisitor> src/objects/objects-body-descriptors.h:135:5 (libv8.so+0x20a41ce)
+ #14 VisitConsString src/heap/objects-visiting-inl.h:119:1 (libv8.so+0x20a41ce)
+ #15 v8::internal::HeapVisitor<int, v8::internal::ScavengeVisitor>::VisitShortcutCandidate(v8::internal::Map, v8::internal::ConsString) src/heap/objects-visiting-inl.h:125:47 (libv8.so+0x20a41ce)
+ #16 v8::internal::HeapVisitor<int, v8::internal::ScavengeVisitor>::Visit(v8::internal::Map, v8::internal::HeapObject) src/heap/objects-visiting-inl.h:69:23 (libv8.so+0x20a0a78) (BuildId: 6097afaf9be13b02)
+ #17 Visit src/heap/objects-visiting-inl.h:53:10 (libv8.so+0x2087504) (BuildId: 6097afaf9be13b02)
+ #18 v8::internal::Scavenger::Process(v8::JobDelegate*) src/heap/scavenger.cc:732:24 (libv8.so+0x2087504)
+ #19 v8::internal::ScavengerCollector::JobTask::ProcessItems(v8::JobDelegate*, v8::internal::Scavenger*) src/heap/scavenger.cc:237:16 (libv8.so+0x2086e1a) (BuildId: 6097afaf9be13b02)
+ #20 v8::internal::ScavengerCollector::JobTask::Run(v8::JobDelegate*) src/heap/scavenger.cc:216:5 (libv8.so+0x2086b77) (BuildId: 6097afaf9be13b02)
+ #21 v8::platform::DefaultJobWorker::Run() src/libplatform/default-job.h:147:18 (libv8_libplatform.so+0x1b2fb) (BuildId: c4a8dbf0cb01439d)
+ #22 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::Run() src/libplatform/default-worker-threads-task-runner.cc:73:11 (libv8_libplatform.so+0x1ee10) (BuildId: c4a8dbf0cb01439d)
+ #23 NotifyStartedAndRun src/base/platform/platform.h:596:5 (libv8_libbase.so+0x56a88) (BuildId: f47d114a6a0872c1)
+ #24 v8::base::ThreadEntry(void*) src/base/platform/platform-posix.cc:1123:11 (libv8_libbase.so+0x56a88)
+
+ Previous atomic write of size 4 at 0x7ee001a41a40 by main thread (mutexes: write M0, write M1):
+ #0 __cxx_atomic_compare_exchange_strong<int> buildtools/third_party/libc++/trunk/include/atomic:978:12 (libv8.so+0x1e32056) (BuildId: 6097afaf9be13b02)
+ #1 compare_exchange_strong buildtools/third_party/libc++/trunk/include/atomic:1566:17 (libv8.so+0x1e32056)
+ #2 atomic_compare_exchange_strong_explicit<int> buildtools/third_party/libc++/trunk/include/atomic:2033:17 (libv8.so+0x1e32056)
+ #3 Release_CompareAndSwap src/base/atomicops.h:166:3 (libv8.so+0x1e32056)
+ #4 Release_CompareAndSwap<unsigned int> src/base/atomic-utils.h:127:43 (libv8.so+0x1e32056)
+ #5 bool v8::base::AsAtomicImpl<int>::SetBits<unsigned int>(unsigned int*, unsigned int, unsigned int) src/base/atomic-utils.h:164:19 (libv8.so+0x1e32056)
+ #6 Set<(v8::internal::AccessMode)0> src/heap/marking.h:68:10 (libv8.so+0x1fd5322) (BuildId: 6097afaf9be13b02)
+ #7 GreyToBlack<(v8::internal::AccessMode)0> src/heap/marking.h:433:50 (libv8.so+0x1fd5322)
+ #8 GreyToBlack src/heap/marking-state-inl.h:78:8 (libv8.so+0x1fd5322)
+ #9 WhiteToBlack src/heap/marking-state-inl.h:71:30 (libv8.so+0x1fd5322)
+ #10 v8::internal::IncrementalMarking::TransferColor(v8::internal::HeapObject, v8::internal::HeapObject) src/heap/incremental-marking-inl.h:28:44 (libv8.so+0x1fd5322)
+ #11 v8::internal::Scavenger::MigrateObject(v8::internal::Map, v8::internal::HeapObject, v8::internal::HeapObject, int, v8::internal::Scavenger::PromotionHeapChoice) src/heap/scavenger-inl.h:116:36 (libv8.so+0x209455c) (BuildId: 6097afaf9be13b02)
+ #12 v8::internal::CopyAndForwardResult v8::internal::Scavenger::SemiSpaceCopyObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::Map, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject, int, v8::internal::ObjectFields) src/heap/scavenger-inl.h:140:9 (libv8.so+0x2092e84) (BuildId: 6097afaf9be13b02)
+ #13 heap::base::SlotCallbackResult v8::internal::Scavenger::EvacuateObjectDefault<v8::internal::CompressedHeapObjectSlot, (v8::internal::Scavenger::PromotionHeapChoice)0>(v8::internal::Map, v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject, int, v8::internal::ObjectFields) src/heap/scavenger-inl.h:269:14 (libv8.so+0x20923f5) (BuildId: 6097afaf9be13b02)
+ #14 heap::base::SlotCallbackResult v8::internal::Scavenger::EvacuateObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::CompressedHeapObjectSlot, v8::internal::Map, v8::internal::HeapObject) src/heap/scavenger-inl.h:412:14 (libv8.so+0x20917b7) (BuildId: 6097afaf9be13b02)
+ #15 heap::base::SlotCallbackResult v8::internal::Scavenger::ScavengeObject<v8::internal::CompressedHeapObjectSlot>(v8::internal::CompressedHeapObjectSlot, v8::internal::HeapObject) src/heap/scavenger-inl.h:447:10 (libv8.so+0x20913d6) (BuildId: 6097afaf9be13b02)
+ #16 VisitHeapObjectImpl<v8::internal::CompressedObjectSlot> src/heap/scavenger-inl.h:548:17 (libv8.so+0x20a813a) (BuildId: 6097afaf9be13b02)
+ #17 void v8::internal::ScavengeVisitor::VisitPointersImpl<v8::internal::CompressedObjectSlot>(v8::internal::HeapObject, v8::internal::CompressedObjectSlot, v8::internal::CompressedObjectSlot) src/heap/scavenger-inl.h:560:7 (libv8.so+0x20a813a)
+ #18 VisitPointers src/heap/scavenger-inl.h:505:10 (libv8.so+0x20a4642) (BuildId: 6097afaf9be13b02)
+ #19 IteratePointers<v8::internal::ScavengeVisitor> src/objects/objects-body-descriptors-inl.h:128:6 (libv8.so+0x20a4642)
+ #20 IterateBody<v8::internal::ScavengeVisitor> src/objects/objects-body-descriptors-inl.h:234:5 (libv8.so+0x20a4642)
+ #21 v8::internal::HeapVisitor<int, v8::internal::ScavengeVisitor>::VisitJSObjectFast(v8::internal::Map, v8::internal::JSObject) src/heap/objects-visiting-inl.h:163:3 (libv8.so+0x20a4642)
+ #22 v8::internal::HeapVisitor<int, v8::internal::ScavengeVisitor>::Visit(v8::internal::Map, v8::internal::HeapObject) src/heap/objects-visiting-inl.h:74:23 (libv8.so+0x20a0971) (BuildId: 6097afaf9be13b02)
+ #23 Visit src/heap/objects-visiting-inl.h:53:10 (libv8.so+0x2087504) (BuildId: 6097afaf9be13b02)
+ #24 v8::internal::Scavenger::Process(v8::JobDelegate*) src/heap/scavenger.cc:732:24 (libv8.so+0x2087504)
+ #25 v8::internal::ScavengerCollector::JobTask::ProcessItems(v8::JobDelegate*, v8::internal::Scavenger*) src/heap/scavenger.cc:237:16 (libv8.so+0x2086e1a) (BuildId: 6097afaf9be13b02)
+ #26 v8::internal::ScavengerCollector::JobTask::Run(v8::JobDelegate*) src/heap/scavenger.cc:211:5 (libv8.so+0x2086941) (BuildId: 6097afaf9be13b02)
+ #27 v8::platform::DefaultJobState::Join() src/libplatform/default-job.cc:141:16 (libv8_libplatform.so+0x18f5e) (BuildId: c4a8dbf0cb01439d)
+ #28 v8::platform::DefaultJobHandle::Join() src/libplatform/default-job.cc:238:11 (libv8_libplatform.so+0x19b57) (BuildId: c4a8dbf0cb01439d)
+ #29 v8::internal::ScavengerCollector::CollectGarbage() src/heap/scavenger.cc:396:13 (libv8.so+0x2089742) (BuildId: 6097afaf9be13b02)
+ #30 v8::internal::Heap::Scavenge() src/heap/heap.cc:2656:25 (libv8.so+0x1f446e2) (BuildId: 6097afaf9be13b02)
+ #31 v8::internal::Heap::PerformGarbageCollection(v8::internal::GarbageCollector, v8::internal::GarbageCollectionReason, char const*) src/heap/heap.cc:2257:5 (libv8.so+0x1f3f7d9) (BuildId: 6097afaf9be13b02)
+ #32 v8::internal::Heap::CollectGarbage(v8::internal::AllocationSpace, v8::internal::GarbageCollectionReason, v8::GCCallbackFlags) src/heap/heap.cc:1714:13 (libv8.so+0x1f3beda) (BuildId: 6097afaf9be13b02)
+ #33 v8::internal::HeapAllocator::AllocateRawWithLightRetrySlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:95:14 (libv8.so+0x1f299f0) (BuildId: 6097afaf9be13b02)
+ #34 v8::internal::HeapAllocator::AllocateRawWithRetryOrFailSlowPath(int, v8::internal::AllocationType, v8::internal::AllocationOrigin, v8::internal::AllocationAlignment) src/heap/heap-allocator.cc:110:7 (libv8.so+0x1f29ad0) (BuildId: 6097afaf9be13b02)
+ #35 AllocateRawWith<(v8::internal::HeapAllocator::AllocationRetryMode)1> src/heap/heap-allocator-inl.h:237:16 (libv8.so+0x1ed3697) (BuildId: 6097afaf9be13b02)
+ #36 v8::internal::Factory::NewFillerObject(int, v8::internal::AllocationAlignment, v8::internal::AllocationType, v8::internal::AllocationOrigin) src/heap/factory.cc:401:36 (libv8.so+0x1ed3697)
+ #37 v8::internal::__RT_impl_Runtime_AllocateInYoungGeneration(v8::internal::Arguments<(v8::internal::ArgumentsType)0>, v8::internal::Isolate*) src/runtime/runtime-internal.cc:477:31 (libv8.so+0x29494b4) (BuildId: 6097afaf9be13b02)
+ #38 v8::internal::Runtime_AllocateInYoungGeneration(int, unsigned long*, v8::internal::Isolate*) src/runtime/runtime-internal.cc:449:1 (libv8.so+0x2948dfa) (BuildId: 6097afaf9be13b02)
+ #39 Builtins_CEntry_Return1_ArgvOnStack_NoBuiltinExit setup-isolate-deserialize.cc (libv8.so+0x1122afe) (BuildId: 6097afaf9be13b02)
+ #40 v8::internal::Execution::CallScript(v8::internal::Isolate*, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>) src/execution/execution.cc:538:10 (libv8.so+0x1d7be35) (BuildId: 6097afaf9be13b02)
+ #41 v8::Script::Run(v8::Local<v8::Context>, v8::Local<v8::Data>) src/api/api.cc:2272:7 (libv8.so+0x189aad3) (BuildId: 6097afaf9be13b02)
+ #42 v8::Script::Run(v8::Local<v8::Context>) src/api/api.cc:2199:10 (libv8.so+0x189a1a0) (BuildId: 6097afaf9be13b02)
+ #43 v8::Shell::ExecuteString(v8::Isolate*, v8::Local<v8::String>, v8::Local<v8::String>, v8::Shell::PrintResult, v8::Shell::ReportExceptions, v8::Shell::ProcessMessageQueue) src/d8/d8.cc:876:28 (d8+0x14e4fd) (BuildId: a8447ccee3b50949)
+ #44 v8::SourceGroup::Execute(v8::Isolate*) src/d8/d8.cc:4367:12 (d8+0x1697d7) (BuildId: a8447ccee3b50949)
+ #45 v8::Shell::RunMain(v8::Isolate*, bool) src/d8/d8.cc:5159:39 (d8+0x16e112) (BuildId: a8447ccee3b50949)
+ #46 v8::Shell::Main(int, char**) src/d8/d8.cc:5960:18 (d8+0x170d30) (BuildId: a8447ccee3b50949)
+ #47 main src/d8/d8.cc:6052:43 (d8+0x171350) (BuildId: a8447ccee3b50949)
+
+ Mutex M0 (0x7b3c000000a8) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0xae510) (BuildId: a8447ccee3b50949)
+ #1 InitializeRecursiveNativeHandle src/base/platform/mutex.cc:112:12 (libv8_libbase.so+0x426f4) (BuildId: f47d114a6a0872c1)
+ #2 v8::base::RecursiveMutex::RecursiveMutex() src/base/platform/mutex.cc:187:3 (libv8_libbase.so+0x426f4)
+ #3 v8::internal::IsolateSafepoint::IsolateSafepoint(v8::internal::Heap*) src/heap/safepoint.cc:29:19 (libv8.so+0x2082798) (BuildId: 6097afaf9be13b02)
+ #4 make_unique<v8::internal::IsolateSafepoint, v8::internal::Heap *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (libv8.so+0x1f2d4ae) (BuildId: 6097afaf9be13b02)
+ #5 v8::internal::Heap::Heap() src/heap/heap.cc:232:18 (libv8.so+0x1f2d4ae)
+ #6 v8::internal::Isolate::Isolate(std::Cr::unique_ptr<v8::internal::IsolateAllocator, std::Cr::default_delete<v8::internal::IsolateAllocator>>, bool) src/execution/isolate.cc:3436:10 (libv8.so+0x1dbb88c) (BuildId: 6097afaf9be13b02)
+ #7 Allocate src/execution/isolate.cc:3354:25 (libv8.so+0x1dba331) (BuildId: 6097afaf9be13b02)
+ #8 v8::internal::Isolate::New() src/execution/isolate.cc:3332:22 (libv8.so+0x1dba331)
+ #9 Allocate src/api/api.cc:8976:37 (libv8.so+0x191bcd7) (BuildId: 6097afaf9be13b02)
+ #10 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9084:25 (libv8.so+0x191bcd7)
+ #11 v8::Shell::Main(int, char**) src/d8/d8.cc:5844:22 (d8+0x1704c9) (BuildId: a8447ccee3b50949)
+ #12 main src/d8/d8.cc:6052:43 (d8+0x171350) (BuildId: a8447ccee3b50949)
+
+ Mutex M1 (0x7bc00000df20) created at:
+ #0 pthread_mutex_init /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1331:3 (d8+0xae510) (BuildId: a8447ccee3b50949)
+ #1 InitializeNativeHandle src/base/platform/mutex.cc:94:12 (libv8_libbase.so+0x41f34) (BuildId: f47d114a6a0872c1)
+ #2 v8::base::Mutex::Mutex() src/base/platform/mutex.cc:152:3 (libv8_libbase.so+0x41f34)
+ #3 v8::internal::Heap::Heap() src/heap/heap.cc:228:7 (libv8.so+0x1f2d599) (BuildId: 6097afaf9be13b02)
+ #4 v8::internal::Isolate::Isolate(std::Cr::unique_ptr<v8::internal::IsolateAllocator, std::Cr::default_delete<v8::internal::IsolateAllocator>>, bool) src/execution/isolate.cc:3436:10 (libv8.so+0x1dbb88c) (BuildId: 6097afaf9be13b02)
+ #5 Allocate src/execution/isolate.cc:3354:25 (libv8.so+0x1dba331) (BuildId: 6097afaf9be13b02)
+ #6 v8::internal::Isolate::New() src/execution/isolate.cc:3332:22 (libv8.so+0x1dba331)
+ #7 Allocate src/api/api.cc:8976:37 (libv8.so+0x191bcd7) (BuildId: 6097afaf9be13b02)
+ #8 v8::Isolate::New(v8::Isolate::CreateParams const&) src/api/api.cc:9084:25 (libv8.so+0x191bcd7)
+ #9 v8::Shell::Main(int, char**) src/d8/d8.cc:5844:22 (d8+0x1704c9) (BuildId: a8447ccee3b50949)
+ #10 main src/d8/d8.cc:6052:43 (d8+0x171350) (BuildId: a8447ccee3b50949)
+
+ Thread T2 'V8 DefaultWorke' (tid=28528, running) created by main thread at:
+ #0 pthread_create /b/s/w/ir/cache/builder/src/third_party/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:1038:3 (d8+0xacb9b) (BuildId: a8447ccee3b50949)
+ #1 v8::base::Thread::Start() src/base/platform/platform-posix.cc:1155:14 (libv8_libbase.so+0x5691a) (BuildId: f47d114a6a0872c1)
+ #2 v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread::WorkerThread(v8::platform::DefaultWorkerThreadsTaskRunner*) src/libplatform/default-worker-threads-task-runner.cc:66:3 (libv8_libplatform.so+0x1ecdf) (BuildId: c4a8dbf0cb01439d)
+ #3 make_unique<v8::platform::DefaultWorkerThreadsTaskRunner::WorkerThread, v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (libv8_libplatform.so+0x1e6ff) (BuildId: c4a8dbf0cb01439d)
+ #4 v8::platform::DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(unsigned int, double (*)()) src/libplatform/default-worker-threads-task-runner.cc:16:28 (libv8_libplatform.so+0x1e6ff)
+ #5 construct_at<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), v8::platform::DefaultWorkerThreadsTaskRunner *> buildtools/third_party/libc++/trunk/include/__memory/construct_at.h:36:48 (libv8_libplatform.so+0x1c051) (BuildId: c4a8dbf0cb01439d)
+ #6 construct<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void, void> buildtools/third_party/libc++/trunk/include/__memory/allocator_traits.h:297:9 (libv8_libplatform.so+0x1c051)
+ #7 __shared_ptr_emplace<const int &, double (*)()> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:276:9 (libv8_libplatform.so+0x1c051)
+ #8 allocate_shared<v8::platform::DefaultWorkerThreadsTaskRunner, std::Cr::allocator<v8::platform::DefaultWorkerThreadsTaskRunner>, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:937:55 (libv8_libplatform.so+0x1c051)
+ #9 make_shared<v8::platform::DefaultWorkerThreadsTaskRunner, const int &, double (*)(), void> buildtools/third_party/libc++/trunk/include/__memory/shared_ptr.h:946:12 (libv8_libplatform.so+0x1c051)
+ #10 v8::platform::DefaultPlatform::EnsureBackgroundTaskRunnerInitialized() src/libplatform/default-platform.cc:132:7 (libv8_libplatform.so+0x1c051)
+ #11 v8::platform::DefaultPlatform::DefaultPlatform(int, v8::platform::IdleTaskSupport, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:108:5 (libv8_libplatform.so+0x1bf94) (BuildId: c4a8dbf0cb01439d)
+ #12 make_unique<v8::platform::DefaultPlatform, int &, v8::platform::IdleTaskSupport &, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController> > > buildtools/third_party/libc++/trunk/include/__memory/unique_ptr.h:670:30 (libv8_libplatform.so+0x1b3f4) (BuildId: c4a8dbf0cb01439d)
+ #13 v8::platform::NewDefaultPlatform(int, v8::platform::IdleTaskSupport, v8::platform::InProcessStackDumping, std::Cr::unique_ptr<v8::TracingController, std::Cr::default_delete<v8::TracingController>>) src/libplatform/default-platform.cc:53:19 (libv8_libplatform.so+0x1b3f4)
+ #14 v8::Shell::Main(int, char**) src/d8/d8.cc:5747:16 (d8+0x16ffee) (BuildId: a8447ccee3b50949)
+ #15 main src/d8/d8.cc:6052:43 (d8+0x171350) (BuildId: a8447ccee3b50949)
+
+SUMMARY: ThreadSanitizer: data race src/heap/marking.h:73:11 in Get<(v8::internal::AccessMode)1>
+==================
+ThreadSanitizer: reported 1 warnings \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/testproc/util.py b/deps/v8/tools/testrunner/testproc/util.py
index 5e6b8fd2c7..e969a0347d 100644
--- a/deps/v8/tools/testrunner/testproc/util.py
+++ b/deps/v8/tools/testrunner/testproc/util.py
@@ -54,17 +54,16 @@ def kill_processes_linux():
logging.exception('Failed to kill process')
-def strip_ascii_control_characters(unicode_string):
- return re.sub(r'[^\x20-\x7E]', '?', str(unicode_string))
-
-
def base_test_record(test, result, run):
record = {
- 'name': test.full_name,
- 'flags': result.cmd.args,
- 'run': run + 1,
'expected': test.expected_outcomes,
+ 'flags': result.cmd.args,
+ 'framework_name': test.framework_name,
+ 'name': test.full_name,
'random_seed': test.random_seed,
+ 'run': run + 1,
+ 'shard_id': test.shard_id,
+ 'shard_count': test.shard_count,
'target_name': test.get_shell(),
'variant': test.variant,
'variant_flags': test.variant_flags,
@@ -77,20 +76,6 @@ def base_test_record(test, result, run):
return record
-def extract_tags(record):
- tags = []
- for k, v in record.items():
- if type(v) == list:
- tags += [sanitized_kv_dict(k, e) for e in v]
- else:
- tags.append(sanitized_kv_dict(k, v))
- return tags
-
-
-def sanitized_kv_dict(k, v):
- return dict(key=k, value=strip_ascii_control_characters(v))
-
-
class FixedSizeTopList():
"""Utility collection for gathering a fixed number of elements with the
biggest value for the given key. It employs a heap from which we pop the
diff --git a/deps/v8/tools/testrunner/testproc/variant.py b/deps/v8/tools/testrunner/testproc/variant.py
index 21d952b55c..936ae031a6 100644
--- a/deps/v8/tools/testrunner/testproc/variant.py
+++ b/deps/v8/tools/testrunner/testproc/variant.py
@@ -28,6 +28,9 @@ class VariantProc(base.TestProcProducer):
self._variant_gens = {}
self._variants = variants
+ def test_suffix(self, test):
+ return test.variant
+
def _next_test(self, test):
gen = self._variants_gen(test)
self._next_variant[test.procid] = gen
@@ -43,8 +46,8 @@ class VariantProc(base.TestProcProducer):
def _try_send_new_subtest(self, test, variants_gen):
for variant, flags, suffix in variants_gen:
- subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
- variant=variant, flags=flags)
+ subtest = test.create_subtest(
+ self, '%s-%s' % (variant, suffix), variant=variant, flags=flags)
if self._send_test(subtest):
return True
diff --git a/deps/v8/tools/testrunner/utils/augmented_options.py b/deps/v8/tools/testrunner/utils/augmented_options.py
index 0af04031c5..69fd5305e5 100644
--- a/deps/v8/tools/testrunner/utils/augmented_options.py
+++ b/deps/v8/tools/testrunner/utils/augmented_options.py
@@ -5,8 +5,12 @@
import optparse
import os
import random
+
+from functools import cached_property
+
from testrunner.testproc import fuzzer
+
class AugmentedOptions(optparse.Values):
"""This class will augment exiting options object with
a couple of convenient methods and properties.
@@ -21,6 +25,7 @@ class AugmentedOptions(optparse.Values):
self._fuzzer_rng = random.Random(self.fuzzer_random_seed)
return self._fuzzer_rng
+ @cached_property
def shard_info(self):
"""
Returns pair:
diff --git a/deps/v8/tools/testrunner/utils/test_utils.py b/deps/v8/tools/testrunner/utils/test_utils.py
index f61beaa4fd..7ab38b0e25 100644
--- a/deps/v8/tools/testrunner/utils/test_utils.py
+++ b/deps/v8/tools/testrunner/utils/test_utils.py
@@ -14,6 +14,7 @@ import unittest
from contextlib import contextmanager
from dataclasses import dataclass
from io import StringIO
+from mock import patch
from os.path import dirname as up
from testrunner.local.command import BaseCommand
@@ -82,6 +83,8 @@ def clean_json_output(json_path, basedir):
# Extract relevant properties of the json output.
if not json_path:
return None
+ if not os.path.exists(json_path):
+ return '--file-does-not-exists--'
with open(json_path) as f:
json_output = json.load(f)
@@ -194,6 +197,25 @@ class TestRunnerTest(unittest.TestCase):
"""Implement to return the runner class"""
return None
+ @contextmanager
+ def with_fake_rdb(self):
+ records = []
+
+ def fake_sink():
+ return True
+
+ class Fake_RPC:
+
+ def __init__(self, sink):
+ pass
+
+ def send(self, r):
+ records.append(r)
+
+ with patch('testrunner.testproc.progress.rdb_sink', fake_sink), \
+ patch('testrunner.testproc.resultdb.ResultDB_RPC', Fake_RPC):
+ yield records
+
class FakeOSContext(DefaultOSContext):
@@ -221,7 +243,7 @@ class FakeCommand(BaseCommand):
timeout=60,
env=None,
verbose=False,
- resources_func=None,
+ test_case=None,
handle_sigterm=False):
f_prefix = ['fake_wrapper'] + cmd_prefix
super(FakeCommand, self).__init__(
diff --git a/deps/v8/tools/torque/vim-torque/syntax/torque.vim b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
index 592e870820..56fe1adb20 100644
--- a/deps/v8/tools/torque/vim-torque/syntax/torque.vim
+++ b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -47,7 +47,7 @@ syn keyword torqueType Arguments void never
syn keyword torqueType Tagged Smi HeapObject Object
syn keyword torqueType int32 uint32 int64 intptr uintptr float32 float64
syn keyword torqueType bool string
-syn keyword torqueType int31 RawPtr AbstractCode Code JSReceiver Context String
+syn keyword torqueType int31 RawPtr AbstractCode InstructionStream JSReceiver Context String
syn keyword torqueType Oddball HeapNumber Number BigInt Numeric Boolean JSProxy
syn keyword torqueType JSObject JSArray JSFunction JSBoundFunction Callable Map
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 24c481d21c..37de0ef694 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -15,7 +15,6 @@ BOTS = {
'--linux32': 'v8_linux32_perf_try',
'--linux64': 'v8_linux64_perf_try',
'--nexus5': 'v8_nexus5_perf_try',
- '--nexus7': 'v8_nexus7_perf_try',
'--pixel2': 'v8_pixel2_perf_try',
}
diff --git a/deps/v8/tools/turbolizer/css/turbo-visualizer-ranges.css b/deps/v8/tools/turbolizer/css/turbo-visualizer-ranges.css
index 1e2b9a04f4..c1ec95fcc9 100644
--- a/deps/v8/tools/turbolizer/css/turbo-visualizer-ranges.css
+++ b/deps/v8/tools/turbolizer/css/turbo-visualizer-ranges.css
@@ -3,10 +3,12 @@
:root {
--range-y-axis-width: 18ch;
- --range-position-width: 3.5ch;
+ --range-position-width: 1.5ch;
--range-block-border: 6px;
--range-instr-border: 3px;
--range-position-border: 1px;
+ --range-flipped-position-height: 1em;
+ --range-axes-border-buffer: 6px;
}
.range-bold {
@@ -14,6 +16,20 @@
color: black;
}
+.selected.range-interval-position,
+.selected.range-interval-wrapper {
+ background-color: rgb(255, 224, 51);
+}
+
+.selected.range-interval,
+.selected.range-interval > .selected.range-interval-wrapper {
+ background-color: rgb(86, 235, 72);
+}
+
+.selected.range-instruction-id {
+ background-color: rgb(255, 133, 51);
+}
+
#ranges {
font-family: monospace;
min-height: auto;
@@ -25,6 +41,7 @@
}
.range-title-div {
+ box-sizing: border-box;
padding: 2ch 2ch 2ch 2ch;
white-space: nowrap;
overflow: auto;
@@ -56,6 +73,10 @@ input.range-toggle-setting {
vertical-align: middle;
}
+.range-toggle-form {
+ display: inline;
+}
+
.range-header-label-x {
text-align: center;
margin-left: 13ch;
@@ -90,38 +111,78 @@ input.range-toggle-setting {
background-color: lightgray;
}
-.range-register-labels {
+#ranges.not_flipped .range-register-labels,
+#ranges.flipped .range-position-labels {
float: right;
+ border-bottom: var(--range-axes-border-buffer) solid white;
+
}
-.range-position-labels {
+#ranges.not_flipped .range-position-labels,
+#ranges.flipped .range-register-labels {
margin-top: auto;
+ border-right: var(--range-axes-border-buffer) solid white;
+}
+
+#ranges.flipped .range-register-labels {
+ border-right: 2px solid white;
}
-.range-registers {
+#ranges.not_flipped .range-registers {
float: right;
overflow: hidden;
text-align: right;
}
-.range-positions-header,
-.range-instruction-ids,
-.range-block-ids {
+#ranges.not_flipped .range-positions-header,
+#ranges.not_flipped .range-instruction-ids,
+#ranges.not_flipped .range-block-ids,
+#ranges.flipped .range-registers,
+#ranges.flipped .range-registers-type {
overflow: hidden;
white-space: nowrap;
+}
+
+#ranges .range-positions-header,
+#ranges .range-instruction-ids,
+#ranges .range-block-ids,
+#ranges.flipped .range-registers,
+#ranges.flipped .range-registers-type {
display: grid;
grid-gap: 0;
}
-.range-reg {
+#ranges.not_flipped .range-registers-type {
+ width: 0ch;
+}
+
+.range-type-header {
+ display: inline-block;
+ text-align: center;
+}
+
+.range-type-header::after {
+ float: right;
+}
+
+#ranges.not_flipped .range-reg {
width: 13ch;
text-align: right;
}
-.range-reg::after {
+#ranges.not_flipped .range-reg::after {
content: ":";
}
+#ranges.flipped .range-reg {
+ display: inline-block;
+ text-align: center;
+}
+
+#ranges.flipped .range-registers .range-reg {
+ white-space: pre-line;
+}
+
.range-grid {
overflow: auto;
display: inline-block;
@@ -144,11 +205,55 @@ input.range-toggle-setting {
z-index: 1;
}
+#ranges.not_flipped .range-position {
+ color: transparent;
+}
+
.range-transparent,
-.range-position.range-empty {
+.range-position.range-empty,
+#ranges.flipped .range-position.range-empty {
color: transparent;
}
+#ranges.flipped .range-interval-text {
+ writing-mode: horizontal-tb;
+}
+
+#ranges.flipped .range-interval-wrapper {
+ writing-mode: vertical-lr;
+}
+
+#ranges.flipped .range-registers,
+#ranges.flipped .range-registers-type,
+#ranges.flipped .range-grid,
+#ranges.flipped .range-positions-group {
+ display: inline-grid;
+ grid-gap: 0;
+}
+
+#ranges.flipped .range-grid {
+ writing-mode: vertical-lr;
+ grid-template-rows: repeat(var(--range-num-registers, 0), calc(var(--range-position-width) + (2 * var(--range-position-border))));
+}
+
+#ranges.flipped .range-registers,
+#ranges.flipped .range-registers-type {
+ grid-template-columns: repeat(var(--range-num-registers, 0), calc(var(--range-position-width) + (2 * var(--range-position-border))));
+}
+
+#ranges.flipped .range-position-labels {
+ display: grid;
+ grid-gap: 0;
+ writing-mode: vertical-lr;
+ grid-template-rows: repeat(3, calc(5.5ch + (2 * var(--range-position-border))));
+ border-bottom: 2px solid white;
+}
+
+#ranges.flipped .range-interval-wrapper,
+#ranges.flipped .range-grid .range-positions {
+ grid-template-rows: calc(var(--range-position-width) + (2 * var(--range-position-border)));
+}
+
.range-block-id:hover,
.range-instruction-id:hover,
.range-reg:hover,
@@ -156,36 +261,94 @@ input.range-toggle-setting {
background-color: rgba(0, 0, 255, 0.10);
}
-.range-position.range-header-element {
- border-bottom: 2px solid rgb(109, 107, 107);
+.range-block-id.selected:hover {
+ background-color: rgb(153, 229, 179);
+}
+
+.range-instruction-id.selected:hover {
+ background-color: rgb(229, 119, 72);
}
+.range-reg.selected:hover,
+.range-position.selected:hover {
+ background-color: rgb(229, 229, 72);
+}
+
+.range-interval-position.selected:hover {
+ background-color: rgb(229, 201, 71);
+}
+
+.selected.range-interval,
+.selected.range-interval > .selected.range-interval-wrapper {
+ background-color: rgb(86, 235, 72);
+}
+
+.range-type-header,
.range-block-id,
.range-instruction-id,
.range-reg,
-.range-interval,
.range-position {
position: relative;
border: var(--range-position-border) solid rgb(109, 107, 107);
}
-.range-block-id,
-.range-instruction-id,
-.range-interval,
-.range-position {
+.range-interval {
+ position: relative;
+}
+
+#ranges.not_flipped .range-block-id,
+#ranges.not_flipped .range-instruction-id,
+#ranges.not_flipped .range-interval,
+#ranges.not_flipped .range-position {
border-left: 0;
}
-.range-block-ids > .range-block-id:first-child,
-.range-instruction-ids > .range-instruction-id:first-child,
-.range-positions > .range-position:first-child {
+#ranges.flipped .range-block-id,
+#ranges.flipped .range-instruction-id,
+#ranges.flipped .range-interval,
+#ranges.flipped .range-position {
+ border-top: 0;
+}
+
+#ranges.not_flipped .range-block-ids > .range-block-id:first-child,
+#ranges.not_flipped .range-instruction-ids > .range-instruction-id:first-child,
+#ranges.not_flipped .range-positions > .range-position:first-child {
border-left: var(--range-position-border) solid rgb(109, 107, 107);
}
+#ranges.flipped .range-block-ids > .range-block-id:first-child,
+#ranges.flipped .range-instruction-ids > .range-instruction-id:first-child,
+#ranges.flipped .range-positions > .range-position:first-child {
+ border-top: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
.range-position.range-interval-position {
border: none;
}
+#ranges.not_flipped .range-position.range-interval-position {
+ border-top: var(--range-position-border) solid rgb(109, 107, 107);
+ border-bottom: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
+#ranges.flipped .range-position.range-interval-position {
+ border-left: var(--range-position-border) solid rgb(109, 107, 107);
+ border-right: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
+#ranges.flipped .range-interval-position:first-child {
+ padding-top: 0px;
+}
+
+#ranges.not_flipped .range-interval-position:last-child {
+ border-right: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
+#ranges.flipped .range-interval-position:last-child {
+ padding-bottom: 0px;
+ border-bottom: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
.range-interval-text {
position: absolute;
padding-left: 0.5ch;
@@ -193,26 +356,60 @@ input.range-toggle-setting {
pointer-events: none
}
-.range-position.range-use {
+#ranges.flipped .range-interval-text {
+ width: 1ch;
+ overflow-wrap: break-word;
+ white-space: normal;
+ /* padding + 1ch + padding = width */
+ padding-left: calc((var(--range-position-width) - 1ch) / 2.0);
+}
+
+#ranges.flipped .range-interval-text-behind {
+ overflow-wrap: normal;
+}
+
+#ranges.not_flipped .range-position.range-use {
border-left: var(--range-instr-border) solid red;
}
-.range-block-border,
-.range-block-border.range-position.range-interval-position:last-child {
+#ranges.flipped .range-position.range-use {
+ border-top: var(--range-instr-border) solid red;
+}
+
+#ranges.not_flipped .range-block-border,
+#ranges.not_flipped .range-block-border.range-position.range-interval-position:last-child {
border-right: var(--range-block-border) solid rgb(109, 107, 107);
}
-.range-block-border.range-position.range-interval-position {
- border-right: var(--range-block-border) solid transparent;
+#ranges.flipped .range-block-border,
+#ranges.flipped .range-block-border.range-position.range-interval-position:last-child {
+ border-bottom: var(--range-block-border) solid rgb(109, 107, 107);
+}
+
+#ranges.not_flipped .range-block-border.range-position.range-interval-position {
+ border-right: none;
+}
+
+#ranges.flipped .range-block-border.range-position.range-interval-position {
+ border-bottom: none;
}
-.range-instr-border,
-.range-instr-border.range-position.range-interval-position:last-child {
+#ranges.not_flipped .range-instr-border,
+#ranges.not_flipped .range-instr-border.range-position.range-interval-position:last-child {
border-right: var(--range-instr-border) solid rgb(109, 107, 107);
}
-.range-instr-border.range-position.range-interval-position {
- border-right: var(--range-instr-border) solid transparent;
+#ranges.flipped .range-instr-border,
+#ranges.flipped .range-instr-border.range-position.range-interval-position:last-child {
+ border-bottom: var(--range-instr-border) solid rgb(109, 107, 107);
+}
+
+#ranges.not_flipped .range-instr-border.range-position.range-interval-position {
+ border-right: none;
+}
+
+#ranges.flipped .range-instr-border.range-position.range-interval-position {
+ border-bottom: none;
}
.range,
@@ -229,10 +426,53 @@ input.range-toggle-setting {
grid-gap: 0;
}
+#ranges.not_flipped .range-instruction-ids,
+#ranges.not_flipped .range-block-ids,
+#ranges.not_flipped .range-positions {
+ grid-template-columns: repeat(var(--range-num-positions, 0), calc(var(--range-position-width) + var(--range-block-border)));
+ width: calc((var(--range-num-positions, 0) * (var(--range-position-width) + var(--range-block-border))) - var(--range-axes-border-buffer));
+}
+
+#ranges.flipped .range-instruction-ids,
+#ranges.flipped .range-block-ids,
+#ranges.flipped .range-positions {
+ writing-mode: vertical-lr;
+ grid-template-columns: repeat(var(--range-num-positions, 0), calc(var(--range-flipped-position-height) + var(--range-block-border)));
+ height: calc((var(--range-num-positions, 0) * (var(--range-flipped-position-height) + var(--range-block-border))) - var(--range-axes-border-buffer));
+}
+
+#ranges.flipped .range-instruction-id {
+ display: grid;
+ grid-gap: 0;
+ grid-template-rows: repeat(8, calc((var(--range-flipped-position-height) + var(--range-block-border)) / 2));
+}
+
+#ranges.flipped .range-instruction-id-number {
+ grid-row: 4 / 5;
+ height: 100%;
+}
+
+#ranges.flipped .range-block-id {
+ display: grid;
+ grid-gap: 0;
+}
+
+#ranges.flipped .range-block-id-number {
+ height: 100%;
+}
+
+#ranges.flipped .range-header-element {
+ writing-mode: horizontal-tb;
+}
+
.range-interval {
background-color: rgb(153, 158, 168);
}
+#ranges.flipped .range-interval {
+ display: block;
+}
+
.range-hidden {
display: none !important;
}
diff --git a/deps/v8/tools/turbolizer/css/turbo-visualizer.css b/deps/v8/tools/turbolizer/css/turbo-visualizer.css
index 1ec293e0ee..c0567228f4 100644
--- a/deps/v8/tools/turbolizer/css/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/css/turbo-visualizer.css
@@ -25,7 +25,8 @@
cursor: pointer;
}
-.search-input {
+.search-input,
+.instruction-range-input {
vertical-align: middle;
width: 145px;
opacity: 1;
@@ -33,6 +34,14 @@
height: 1.5em;
}
+.instruction-range-input {
+ width: 13ch;
+}
+
+.instruction-range-submit {
+ margin-left: 1ch;
+}
+
#phase-select {
box-sizing: border-box;
height: 1.5em;
@@ -68,6 +77,7 @@
.selected.block,
.selected.block-id,
+.selected.range-block-id,
.selected.schedule-block {
background-color: #AAFFAA;
}
diff --git a/deps/v8/tools/turbolizer/src/common/constants.ts b/deps/v8/tools/turbolizer/src/common/constants.ts
index 4fdcb4cd2a..2aa7396b85 100644
--- a/deps/v8/tools/turbolizer/src/common/constants.ts
+++ b/deps/v8/tools/turbolizer/src/common/constants.ts
@@ -32,10 +32,16 @@ export const RESIZER_RANGES_HEIGHT_BUFFER_PERCENTAGE = 5;
export const ROW_GROUP_SIZE = 20;
export const POSITIONS_PER_INSTRUCTION = 4;
export const FIXED_REGISTER_LABEL_WIDTH = 6;
+export const FLIPPED_REGISTER_WIDTH_BUFFER = 5;
+// Required due to the css grid-template-rows and grid-template-columns being limited
+// to 1000 places. Regardless of this, a limit is required at some point due
+// to performance issues.
+export const MAX_NUM_POSITIONS = 999;
export const SESSION_STORAGE_PREFIX = "ranges-setting-";
export const INTERVAL_TEXT_FOR_NONE = "none";
export const INTERVAL_TEXT_FOR_CONST = "const";
export const INTERVAL_TEXT_FOR_STACK = "stack:";
+export const VIRTUAL_REGISTER_ID_PREFIX = "virt_";
export const HISTORY_ID = "history";
export const MULTIVIEW_ID = "multiview";
export const RESIZER_RANGES_ID = "resizer-ranges";
diff --git a/deps/v8/tools/turbolizer/src/common/util.ts b/deps/v8/tools/turbolizer/src/common/util.ts
index 26b710ea92..7b4744866d 100644
--- a/deps/v8/tools/turbolizer/src/common/util.ts
+++ b/deps/v8/tools/turbolizer/src/common/util.ts
@@ -100,3 +100,7 @@ export function getNumericCssValue(varName: string): number {
const propertyValue = getComputedStyle(document.body).getPropertyValue(varName);
return parseFloat(propertyValue.match(/[+-]?\d+(\.\d+)?/g)[0]);
}
+
+export function setCssValue(varName: string, value: string): void {
+ document.body.style.setProperty(varName, value);
+}
diff --git a/deps/v8/tools/turbolizer/src/common/view-elements.ts b/deps/v8/tools/turbolizer/src/common/view-elements.ts
index d1b497a5e9..30507fe997 100644
--- a/deps/v8/tools/turbolizer/src/common/view-elements.ts
+++ b/deps/v8/tools/turbolizer/src/common/view-elements.ts
@@ -39,6 +39,6 @@ export class ViewElements {
} else if (pos > (currentScrollTop + 3 * margin)) {
return Math.max(0, pos - 3 * margin);
}
- return pos;
+ return currentScrollTop;
}
}
diff --git a/deps/v8/tools/turbolizer/src/graphmultiview.ts b/deps/v8/tools/turbolizer/src/graphmultiview.ts
index 8942526313..5179cfded2 100644
--- a/deps/v8/tools/turbolizer/src/graphmultiview.ts
+++ b/deps/v8/tools/turbolizer/src/graphmultiview.ts
@@ -95,9 +95,9 @@ export class GraphMultiView extends View {
}
public displayPhaseByName(phaseName: string, selection?: SelectionStorage): void {
- this.currentPhaseView.hide();
const phaseId = this.sourceResolver.getPhaseIdByName(phaseName);
this.selectMenu.selectedIndex = phaseId;
+ this.currentPhaseView.hide();
this.displayPhase(this.sourceResolver.getDynamicPhase(phaseId), selection);
}
diff --git a/deps/v8/tools/turbolizer/src/phases/instructions-phase.ts b/deps/v8/tools/turbolizer/src/phases/instructions-phase.ts
index 5e0d94dc48..b48f917879 100644
--- a/deps/v8/tools/turbolizer/src/phases/instructions-phase.ts
+++ b/deps/v8/tools/turbolizer/src/phases/instructions-phase.ts
@@ -62,28 +62,35 @@ export class InstructionsPhase extends Phase {
return keyPcOffsets;
}
- public nodesForPCOffset(offset: number): Array<string> {
- if (this.pcOffsets.length === 0) return new Array<string>();
+ public instructionsForPCOffset(offset: number): Array<number> {
+ if (this.pcOffsets.length === 0) return new Array<number>();
for (const key of this.pcOffsets) {
if (key <= offset) {
- const instructions = this.pcOffsetToInstructions.get(key);
- const nodes = new Array<string>();
- for (const instruction of instructions) {
- for (const [nodeId, range] of this.nodeIdToInstructionRange.entries()) {
- if (!range) continue;
- const [start, end] = range;
- if (start == end && instruction == start) {
- nodes.push(String(nodeId));
- }
- if (start <= instruction && instruction < end) {
- nodes.push(String(nodeId));
- }
- }
+ return this.pcOffsetToInstructions.get(key);
+ }
+ }
+ return new Array<number>();
+ }
+
+ public nodesForInstructions(instructionIds: Iterable<number>): Array<string> {
+ const nodes = new Array<string>();
+ for (const instruction of instructionIds) {
+ for (const [nodeId, range] of this.nodeIdToInstructionRange.entries()) {
+ if (!range) continue;
+ const [start, end] = range;
+ if (start == end && instruction == start) {
+ nodes.push(String(nodeId));
+ }
+ if (start <= instruction && instruction < end) {
+ nodes.push(String(nodeId));
}
- return nodes;
}
}
- return new Array<string>();
+ return nodes;
+ }
+
+ public nodesForPCOffset(offset: number): Array<string> {
+ return this.nodesForInstructions(this.instructionsForPCOffset(offset));
}
public nodesToKeyPcOffsets(nodeIds: Set<string>): Array<TurbolizerInstructionStartInfo> {
diff --git a/deps/v8/tools/turbolizer/src/phases/schedule-phase.ts b/deps/v8/tools/turbolizer/src/phases/schedule-phase.ts
index 85bab8c533..467ea297ef 100644
--- a/deps/v8/tools/turbolizer/src/phases/schedule-phase.ts
+++ b/deps/v8/tools/turbolizer/src/phases/schedule-phase.ts
@@ -61,10 +61,11 @@ export class SchedulePhase extends Phase {
const blockIdStrings = blockIdsString.split(",");
predecessors = blockIdStrings.map(n => Number.parseInt(n, 10));
}
+ const blockRpo = Number.parseInt(match.groups.rpo, 10);
const blockId = Number.parseInt(match.groups.id, 10);
- const block = new ScheduleBlock(blockId, match.groups.deferred !== undefined,
+ const block = new ScheduleBlock(blockRpo, blockId, match.groups.deferred !== undefined,
predecessors.sort());
- this.data.blocks[block.id] = block;
+ this.data.blocksRpo[block.rpo] = block;
}
private setGotoSuccessor = match => {
@@ -82,7 +83,10 @@ export class SchedulePhase extends Phase {
process: this.createNode
},
{
- lineRegexps: [/^\s*---\s*BLOCK\ B(?<id>\d+)\s*(?<deferred>\(deferred\))?(\ <-\ )?(?<in>[^-]*)?\ ---$/],
+ lineRegexps: [
+ /^\s*---\s*BLOCK\ B(?<rpo>\d+)\ id(?<id>\d+)\s*(?<deferred>\(deferred\))?(\ <-\ )?(?<in>[^-]*)?\ ---$/,
+ /^\s*---\s*BLOCK\ B(?<rpo>\d+)\s*(?<deferred>\(deferred\))?(\ <-\ )?(?<in>[^-]*)?\ ---$/
+ ],
process: this.createBlock
},
{
@@ -109,13 +113,15 @@ export class ScheduleNode {
}
export class ScheduleBlock {
+ rpo: number;
id: number;
deferred: boolean;
predecessors: Array<number>;
successors: Array<number>;
nodes: Array<ScheduleNode>;
- constructor(id: number, deferred: boolean, predecessors: Array<number>) {
+ constructor(rpo: number, id: number, deferred: boolean, predecessors: Array<number>) {
+ this.rpo = rpo;
this.id = id;
this.deferred = deferred;
this.predecessors = predecessors;
@@ -126,15 +132,15 @@ export class ScheduleBlock {
export class ScheduleData {
nodes: Array<ScheduleNode>;
- blocks: Array<ScheduleBlock>;
+ blocksRpo: Array<ScheduleBlock>;
constructor() {
this.nodes = new Array<ScheduleNode>();
- this.blocks = new Array<ScheduleBlock>();
+ this.blocksRpo = new Array<ScheduleBlock>();
}
public lastBlock(): ScheduleBlock {
- if (this.blocks.length == 0) return null;
- return this.blocks[this.blocks.length - 1];
+ if (this.blocksRpo.length == 0) return null;
+ return this.blocksRpo[this.blocksRpo.length - 1];
}
}
diff --git a/deps/v8/tools/turbolizer/src/phases/sequence-phase.ts b/deps/v8/tools/turbolizer/src/phases/sequence-phase.ts
index df5cc30d8e..97813aabb0 100644
--- a/deps/v8/tools/turbolizer/src/phases/sequence-phase.ts
+++ b/deps/v8/tools/turbolizer/src/phases/sequence-phase.ts
@@ -9,6 +9,7 @@ import { InstructionsPhase } from "./instructions-phase";
export class SequencePhase extends Phase {
blocks: Array<SequenceBlock>;
+ instructions: Array<SequenceBlockInstruction>;
instructionsPhase: InstructionsPhase;
positions: PositionsContainer;
registerAllocation: RegisterAllocation;
@@ -34,6 +35,7 @@ export class SequencePhase extends Phase {
private parseBlocksFromJSON(blocksJSON): void {
if (!blocksJSON || blocksJSON.length == 0) return;
this.blocks = new Array<SequenceBlock>();
+ this.instructions = new Array<SequenceBlockInstruction>();
for (const block of blocksJSON) {
const newBlock = new SequenceBlock(block.id, block.deferred, block.loopHeader, block.loopEnd,
block.predecessors, block.successors);
@@ -62,6 +64,7 @@ export class SequencePhase extends Phase {
newInstruction.gaps.push(newGap);
}
this.lastBlock().instructions.push(newInstruction);
+ this.instructions.push(newInstruction);
}
}
@@ -88,7 +91,7 @@ export class SequencePhase extends Phase {
if (!rangesJSON) return null;
const parsedRanges = new Array<Range>();
for (const [idx, range] of Object.entries<Range>(rangesJSON)) {
- const newRange = new Range(range.isDeferred);
+ const newRange = new Range(range.isDeferred, range.instructionRange);
for (const childRange of range.childRanges) {
let operand: SequenceBlockOperand | string = null;
if (childRange.op) {
@@ -176,61 +179,16 @@ export class RegisterAllocation {
this.fixedLiveRanges = new Array<Range>();
this.liveRanges = new Array<Range>();
}
-
- public forEachFixedRange(row: number, callback: (registerIndex: number, row: number,
- registerName: string,
- ranges: [Range, Range]) => void): number {
- const forEachRangeInMap = (rangeMap: Array<Range>) => {
- // There are two fixed live ranges for each register, one for normal, another for deferred.
- // These are combined into a single row.
- const fixedRegisterMap = new Map<string, {registerIndex: number, ranges: [Range, Range]}>();
- for (const [registerIndex, range] of rangeMap.entries()) {
- if (!range) continue;
- const registerName = range.fixedRegisterName();
- if (fixedRegisterMap.has(registerName)) {
- const entry = fixedRegisterMap.get(registerName);
- entry.ranges[1] = range;
- // Only use the deferred register index if no normal index exists.
- if (!range.isDeferred) {
- entry.registerIndex = registerIndex;
- }
- } else {
- fixedRegisterMap.set(registerName, {registerIndex, ranges: [range, undefined]});
- }
- }
- // Sort the registers by number.
- const sortedMap = new Map([...fixedRegisterMap.entries()].sort(([nameA, _], [nameB, __]) => {
- if (nameA.length > nameB.length) {
- return 1;
- } else if (nameA.length < nameB.length) {
- return -1;
- } else if (nameA > nameB) {
- return 1;
- } else if (nameA < nameB) {
- return -1;
- }
- return 0;
- }));
-
- for (const [registerName, {ranges, registerIndex}] of sortedMap) {
- callback(-registerIndex - 1, row, registerName, ranges);
- ++row;
- }
- };
-
- forEachRangeInMap(this.fixedLiveRanges);
- forEachRangeInMap(this.fixedDoubleLiveRanges);
-
- return row;
- }
}
export class Range {
isDeferred: boolean;
+ instructionRange: [number, number];
childRanges: Array<ChildRange>;
- constructor(isDeferred: boolean) {
+ constructor(isDeferred: boolean, instructionRange: [number, number]) {
this.isDeferred = isDeferred;
+ this.instructionRange = instructionRange;
this.childRanges = new Array<ChildRange>();
}
@@ -259,24 +217,38 @@ export class ChildRange {
this.uses = uses;
}
- public getTooltip(registerIndex: number): string {
+ public getTooltip(registerIndex: number): RangeToolTip {
switch (this.type) {
case "none":
- return C.INTERVAL_TEXT_FOR_NONE;
+ return new RangeToolTip(C.INTERVAL_TEXT_FOR_NONE, false);
case "spill_range":
- return `${C.INTERVAL_TEXT_FOR_STACK}${registerIndex}`;
+ return new RangeToolTip(`${C.INTERVAL_TEXT_FOR_STACK}${registerIndex}`, true);
default:
if (this.op instanceof SequenceBlockOperand && this.op.type == "constant") {
- return C.INTERVAL_TEXT_FOR_CONST;
+ new RangeToolTip(C.INTERVAL_TEXT_FOR_CONST, false);
} else {
if (this.op instanceof SequenceBlockOperand && this.op.text) {
- return this.op.text;
+ new RangeToolTip(this.op.text, true);
} else if (typeof this.op === "string") {
- return this.op;
+ new RangeToolTip(this.op, true);
}
}
}
- return "";
+ return new RangeToolTip("", false);
+ }
+
+ public isFloatingPoint(): boolean {
+ return this.op instanceof SequenceBlockOperand && this.op.tooltip.includes("Float");
+ }
+}
+
+export class RangeToolTip {
+ text: string;
+ isId: boolean;
+
+ constructor(text: string, isId: boolean) {
+ this.text = text;
+ this.isId = isId;
}
}
diff --git a/deps/v8/tools/turbolizer/src/resizer.ts b/deps/v8/tools/turbolizer/src/resizer.ts
index 87cbf0b255..cef53afc80 100644
--- a/deps/v8/tools/turbolizer/src/resizer.ts
+++ b/deps/v8/tools/turbolizer/src/resizer.ts
@@ -356,9 +356,21 @@ export class Resizer {
if (rangeGrid) {
const yAxis = (this.ranges.getElementsByClassName("range-y-axis")[0] as HTMLElement);
const rangeHeader = (this.ranges.getElementsByClassName("range-header")[0] as HTMLElement);
+ const rangeTitle = (this.ranges.getElementsByClassName("range-title-div")[0] as HTMLElement);
+
+ let gridWidth = rangeWidth - yAxis.clientWidth;
+
+ if (this.ranges.classList.contains("flipped")) {
+ const rangeRegisters =
+ (this.ranges.getElementsByClassName("range-registers")[0] as HTMLElement);
+ if (rangeRegisters.offsetWidth + C.FLIPPED_REGISTER_WIDTH_BUFFER < gridWidth) {
+ gridWidth = Math.floor(rangeRegisters.offsetWidth + rangeGrid.offsetWidth
+ - rangeGrid.clientWidth + C.FLIPPED_REGISTER_WIDTH_BUFFER);
+ }
+ }
- const gridWidth = rangeWidth - yAxis.clientWidth;
- rangeGrid.style.width = `${Math.floor(gridWidth - 1)}px`;
+ rangeTitle.style.width = `${rangeWidth}px`;
+ rangeGrid.style.width = `${gridWidth - 1}px`;
// Take live ranges' right scrollbar into account.
rangeHeader.style.width =
`${(gridWidth - rangeGrid.offsetWidth + rangeGrid.clientWidth - 1)}px`;
@@ -366,9 +378,10 @@ export class Resizer {
this.resizerRanges.style("height",
inLandscapeMode ? `${resizerSize}px` : `${clientHeight}px`);
- const rangeTitle = (this.ranges.getElementsByClassName("range-title-div")[0] as HTMLElement);
- const rangeHeaderLabel = (this.ranges.getElementsByClassName("range-header-label-x")[0] as HTMLElement);
- const gridHeight = rangeHeight - rangeHeader.clientHeight - rangeTitle.clientHeight - rangeHeaderLabel.clientHeight;
+ const rangeHeaderLabel =
+ (this.ranges.getElementsByClassName("range-header-label-x")[0] as HTMLElement);
+ const gridHeight = rangeHeight - rangeHeader.clientHeight
+ - rangeTitle.clientHeight - rangeHeaderLabel.clientHeight;
rangeGrid.style.height = `${gridHeight}px`;
// Take live ranges' bottom scrollbar into account.
yAxis.style.height = `${(gridHeight - rangeGrid.offsetHeight + rangeGrid.clientHeight)}px`;
diff --git a/deps/v8/tools/turbolizer/src/selection/selection-broker.ts b/deps/v8/tools/turbolizer/src/selection/selection-broker.ts
index 9240551b32..6611d77ce8 100644
--- a/deps/v8/tools/turbolizer/src/selection/selection-broker.ts
+++ b/deps/v8/tools/turbolizer/src/selection/selection-broker.ts
@@ -106,21 +106,23 @@ export class SelectionBroker {
}
// Select the lines from the source and bytecode panels (left panels)
- const pcOffsets = this.sourceResolver.instructionsPhase
- .instructionsToKeyPcOffsets(instructionOffsets);
-
- for (const offset of pcOffsets) {
- const nodes = this.sourceResolver.instructionsPhase.nodesForPCOffset(offset);
- const sourcePositions = this.sourceResolver.nodeIdsToSourcePositions(nodes);
- for (const handler of this.sourcePositionHandlers) {
- if (handler != from) handler.brokeredSourcePositionSelect(sourcePositions, selected);
- }
- const bytecodePositions = this.sourceResolver.nodeIdsToBytecodePositions(nodes);
- for (const handler of this.bytecodeOffsetHandlers) {
- if (handler != from) handler.brokeredBytecodeOffsetSelect(bytecodePositions, selected);
+ const nodes = this.sourceResolver.instructionsPhase.nodesForInstructions(instructionOffsets);
+ const sourcePositions = this.sourceResolver.nodeIdsToSourcePositions(nodes);
+ for (const handler of this.sourcePositionHandlers) {
+ if (handler != from) handler.brokeredSourcePositionSelect(sourcePositions, selected);
+ }
+ const bytecodePositions = this.sourceResolver.nodeIdsToBytecodePositions(nodes);
+ for (const handler of this.bytecodeOffsetHandlers) {
+ if (handler != from) handler.brokeredBytecodeOffsetSelect(bytecodePositions, selected);
+ }
+
+ // Select the lines from the middle panel for the register allocation phase.
+ for (const b of this.registerAllocationHandlers) {
+ if (b != from) {
+ b.brokeredRegisterAllocationSelect(instructionOffsets.map(instr => [instr, instr]),
+ selected);
}
}
- // The middle panel lines have already been selected so there's no need to reselect them.
}
public broadcastSourcePositionSelect(from, sourcePositions: Array<GenericPosition>,
@@ -206,7 +208,7 @@ export class SelectionBroker {
this.selectInstructionsAndRegisterAllocations(from, nodes, selected);
}
- public broadcastBlockSelect(from, blocksIds: Array<string>, selected: boolean): void {
+ public broadcastBlockSelect(from, blocksIds: Array<number>, selected: boolean): void {
for (const handler of this.blockHandlers) {
if (handler != from) handler.brokeredBlockSelect(blocksIds, selected);
}
diff --git a/deps/v8/tools/turbolizer/src/selection/selection-handler.ts b/deps/v8/tools/turbolizer/src/selection/selection-handler.ts
index af8d5a0cc7..82d07f05c1 100644
--- a/deps/v8/tools/turbolizer/src/selection/selection-handler.ts
+++ b/deps/v8/tools/turbolizer/src/selection/selection-handler.ts
@@ -17,16 +17,18 @@ export interface HistoryHandler {
}
export interface NodeSelectionHandler {
- select(nodes: Iterable<TurboshaftGraphNode | GraphNode | string | number>, selected: boolean):
- void;
+ select(nodes: Iterable<TurboshaftGraphNode | GraphNode | string | number>, selected: boolean,
+ scrollIntoView: boolean): void;
clear(): void;
brokeredNodeSelect(nodeIds: Set<string>, selected: boolean): void;
+ brokeredClear(): void;
}
export interface BlockSelectionHandler {
- select(blocks: Iterable<TurboshaftGraphBlock | string | number>, selected: boolean): void;
+ select(blocks: Iterable<TurboshaftGraphBlock | number>, selected: boolean,
+ scrollIntoView: boolean): void;
clear(): void;
- brokeredBlockSelect(blockIds: Array<string>, selected: boolean): void;
+ brokeredBlockSelect(blockIds: Array<number>, selected: boolean): void;
}
export interface InstructionSelectionHandler {
@@ -50,7 +52,7 @@ export interface BytecodeOffsetSelectionHandler {
export interface RegisterAllocationSelectionHandler {
// These are called instructionIds since the class of the divs is "instruction-id"
- select(instructionIds: Array<number>, selected: boolean): void;
+ select(instructionIds: Array<number>, selected: boolean, scrollIntoView: boolean): void;
clear(): void;
brokeredRegisterAllocationSelect(instructionsOffsets: Array<[number, number]>, selected: boolean):
void;
diff --git a/deps/v8/tools/turbolizer/src/selection/selection-map.ts b/deps/v8/tools/turbolizer/src/selection/selection-map.ts
index ff401fe8a3..020a464c3c 100644
--- a/deps/v8/tools/turbolizer/src/selection/selection-map.ts
+++ b/deps/v8/tools/turbolizer/src/selection/selection-map.ts
@@ -53,6 +53,15 @@ export class SelectionMap {
return result;
}
+ public selectedKeysAsAbsNumbers(): Set<number> {
+ const result = new Set<number>();
+ for (const key of this.selection.keys()) {
+ const keyNum = Number(key);
+ result.add(keyNum < 0 ? Math.abs(keyNum + 1) : keyNum);
+ }
+ return result;
+ }
+
public detachSelection(): Map<string, any> {
const result = this.selection;
this.clear();
@@ -60,4 +69,4 @@ export class SelectionMap {
}
[Symbol.iterator]() { return this.selection.values(); }
-}
+} \ No newline at end of file
diff --git a/deps/v8/tools/turbolizer/src/selection/selection-storage.ts b/deps/v8/tools/turbolizer/src/selection/selection-storage.ts
index b3faf7862f..47b03b320f 100644
--- a/deps/v8/tools/turbolizer/src/selection/selection-storage.ts
+++ b/deps/v8/tools/turbolizer/src/selection/selection-storage.ts
@@ -5,14 +5,19 @@
export class SelectionStorage {
nodes: Map<string, any>;
blocks: Map<string, any>;
+ instructions: Map<string, any>;
adaptedNodes: Set<string>;
adaptedBocks: Set<string>;
+ adaptedInstructions: Set<number>;
- constructor(nodes?: Map<string, any>, blocks?: Map<string, any>) {
+ constructor(nodes?: Map<string, any>, blocks?: Map<string, any>,
+ instructions?: Map<string, any>) {
this.nodes = nodes ?? new Map<string, any>();
this.blocks = blocks ?? new Map<string, any>();
+ this.instructions = instructions ?? new Map<string, any>();
this.adaptedNodes = new Set<string>();
this.adaptedBocks = new Set<string>();
+ this.adaptedInstructions = new Set<number>();
}
public adaptNode(nodeKey: string): void {
@@ -23,7 +28,12 @@ export class SelectionStorage {
this.adaptedBocks.add(blockKey);
}
+ public adaptInstruction(instrId: number): void {
+ this.adaptedInstructions.add(instrId);
+ }
+
public isAdapted(): boolean {
- return this.adaptedNodes.size != 0 || this.adaptedBocks.size != 0;
+ return this.adaptedNodes.size != 0 || this.adaptedBocks.size != 0
+ || this.adaptedInstructions.size != 0;
}
}
diff --git a/deps/v8/tools/turbolizer/src/source-resolver.ts b/deps/v8/tools/turbolizer/src/source-resolver.ts
index a4ab3c3b0f..0526727acd 100644
--- a/deps/v8/tools/turbolizer/src/source-resolver.ts
+++ b/deps/v8/tools/turbolizer/src/source-resolver.ts
@@ -72,7 +72,7 @@ export class SourceResolver {
const inliningId = inlining.inliningPosition.inliningId;
const inl = new InliningPosition(inlining.sourceId,
new SourcePosition(scriptOffset, inliningId));
- this.inlinings[inliningIdStr] = inl;
+ this.inlinings[Number(inliningIdStr)] = inl;
this.inliningsMap.set(inl.inliningPosition.toString(), inl);
}
}
@@ -106,8 +106,14 @@ export class SourceResolver {
}
const numSourceId = Number(sourceId);
- this.bytecodeSources.set(numSourceId, new BytecodeSource(source.sourceId, source.functionName,
- data, bytecodeSource.constantPool));
+ const inliningIds = [];
+ for (let index = -1; index < this.inlinings.length; index += 1) {
+ const inlining = this.inlinings[index];
+ if (inlining.sourceId == source.sourceId) inliningIds.push(index);
+ }
+ this.bytecodeSources.set(numSourceId,
+ new BytecodeSource(source.sourceId, inliningIds, source.functionName,
+ data, bytecodeSource.constantPool));
}
}
diff --git a/deps/v8/tools/turbolizer/src/source.ts b/deps/v8/tools/turbolizer/src/source.ts
index c52c445866..78fd8fd281 100644
--- a/deps/v8/tools/turbolizer/src/source.ts
+++ b/deps/v8/tools/turbolizer/src/source.ts
@@ -33,13 +33,15 @@ export class Source {
export class BytecodeSource {
sourceId: number;
+ inliningIds: Array<number>;
functionName: string;
data: Array<BytecodeSourceData>;
constantPool: Array<string>;
- constructor(sourceId: number, functionName: string, data: Array<BytecodeSourceData>,
- constantPool: Array<string>) {
+ constructor(sourceId: number, inliningIds: Array<number>, functionName: string,
+ data: Array<BytecodeSourceData>, constantPool: Array<string>) {
this.sourceId = sourceId;
+ this.inliningIds = inliningIds;
this.functionName = functionName;
this.data = data;
this.constantPool = constantPool;
diff --git a/deps/v8/tools/turbolizer/src/views/bytecode-source-view.ts b/deps/v8/tools/turbolizer/src/views/bytecode-source-view.ts
index db461a4f26..39d1b4fb6b 100644
--- a/deps/v8/tools/turbolizer/src/views/bytecode-source-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/bytecode-source-view.ts
@@ -103,7 +103,8 @@ export class BytecodeSourceView extends View {
select: function (offsets: Array<number>, selected: boolean) {
const bytecodePositions = new Array<BytecodePosition>();
for (const offset of offsets) {
- bytecodePositions.push(new BytecodePosition(offset, view.source.sourceId));
+ view.source.inliningIds.forEach(inliningId =>
+ bytecodePositions.push(new BytecodePosition(offset, inliningId)));
}
view.bytecodeOffsetSelection.select(offsets, selected);
view.updateSelection();
@@ -119,7 +120,7 @@ export class BytecodeSourceView extends View {
const offsets = new Array<number>();
const firstSelect = view.bytecodeOffsetSelection.isEmpty();
for (const position of positions) {
- if (position.inliningId == view.source.sourceId) {
+ if (view.source.inliningIds.includes(position.inliningId)) {
offsets.push(position.bytecodePosition);
}
}
diff --git a/deps/v8/tools/turbolizer/src/views/disassembly-view.ts b/deps/v8/tools/turbolizer/src/views/disassembly-view.ts
index 5ef2ddd421..8fd4306b1d 100644
--- a/deps/v8/tools/turbolizer/src/views/disassembly-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/disassembly-view.ts
@@ -4,6 +4,7 @@
import * as C from "../common/constants";
import { interpolate, storageGetItem, storageSetItem } from "../common/util";
+import { ViewElements } from "../common/view-elements";
import { SelectionBroker } from "../selection/selection-broker";
import { TextView } from "./text-view";
import { SelectionMap } from "../selection/selection-map";
@@ -56,8 +57,9 @@ export class DisassemblyView extends TextView {
}
public updateSelection(scrollIntoView: boolean = false): void {
- super.updateSelection(scrollIntoView);
- const selectedKeys = this.nodeSelection.selectedKeys();
+ if (this.divNode.parentNode == null) return;
+ super.updateSelection(scrollIntoView, this.divNode.parentNode as HTMLElement);
+ const selectedKeys = this.nodeSelections.current.selectedKeys();
const keyPcOffsets: Array<TurbolizerInstructionStartInfo | string> = [
...this.sourceResolver.instructionsPhase.nodesToKeyPcOffsets(selectedKeys)
];
@@ -66,12 +68,15 @@ export class DisassemblyView extends TextView {
keyPcOffsets.push(key);
}
}
+ const mkVisible = new ViewElements(this.divNode.parentElement);
for (const keyPcOffset of keyPcOffsets) {
const elementsToSelect = this.divNode.querySelectorAll(`[data-pc-offset='${keyPcOffset}']`);
for (const el of elementsToSelect) {
+ mkVisible.consider(el as HTMLElement, true);
el.classList.toggle("selected", true);
}
}
+ mkVisible.apply(scrollIntoView);
}
public processLine(line: string): Array<HTMLSpanElement> {
@@ -189,7 +194,7 @@ export class DisassemblyView extends TextView {
select: function (instructionIds: Array<string>, selected: boolean) {
view.offsetSelection.select(instructionIds, selected);
view.updateSelection();
- broker.broadcastBlockSelect(this, instructionIds, selected);
+ broker.broadcastInstructionSelect(this, instructionIds.map(id => Number(id)), selected);
},
clear: function () {
view.offsetSelection.clear();
@@ -225,9 +230,18 @@ export class DisassemblyView extends TextView {
if (nodes.length > 0) {
e.stopPropagation();
if (!e.shiftKey) this.nodeSelectionHandler.clear();
- this.nodeSelectionHandler.select(nodes, true);
+ this.nodeSelectionHandler.select(nodes, true, false);
} else {
- this.updateSelection();
+ const instructions = this.sourceResolver.instructionsPhase.instructionsForPCOffset(offset);
+ if (instructions.length > 0) {
+ e.stopPropagation();
+ if (!e.shiftKey) this.instructionSelectionHandler.clear();
+ this.instructionSelectionHandler
+ .brokeredInstructionSelect(instructions.map(instr => [instr, instr]), true);
+ this.broker.broadcastInstructionSelect(this, instructions, true);
+ } else {
+ this.updateSelection();
+ }
}
}
return undefined;
@@ -238,8 +252,8 @@ export class DisassemblyView extends TextView {
const blockId = spanBlockElement.dataset.blockId;
if (blockId !== undefined) {
const blockIds = blockId.split(",");
- if (!e.shiftKey) this.nodeSelectionHandler.clear();
- this.blockSelectionHandler.select(blockIds, true);
+ if (!e.shiftKey) this.blockSelectionHandler.clear();
+ this.blockSelectionHandler.select(blockIds.map(id => Number(id)), true, false);
}
}
diff --git a/deps/v8/tools/turbolizer/src/views/graph-view.ts b/deps/v8/tools/turbolizer/src/views/graph-view.ts
index 98403e6a01..b81cffc0af 100644
--- a/deps/v8/tools/turbolizer/src/views/graph-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/graph-view.ts
@@ -121,7 +121,7 @@ export class GraphView extends MovableView<Graph> {
if (!d3.event.shiftKey) {
view.nodeSelectionHandler.clear();
}
- view.nodeSelectionHandler.select([edge.source, edge.target], true);
+ view.nodeSelectionHandler.select([edge.source, edge.target], true, false);
})
.attr("adjacentToHover", "false")
.classed("value", edge => edge.type === "value" || edge.type === "context")
@@ -185,7 +185,7 @@ export class GraphView extends MovableView<Graph> {
})
.on("click", node => {
if (!d3.event.shiftKey) view.nodeSelectionHandler.clear();
- view.nodeSelectionHandler.select([node], undefined);
+ view.nodeSelectionHandler.select([node], undefined, false);
d3.event.stopPropagation();
})
.call(view.drag);
@@ -351,7 +351,7 @@ export class GraphView extends MovableView<Graph> {
const selection = this.searchNodes(filterFunction, e, onlyVisible);
- this.nodeSelectionHandler.select(selection, true);
+ this.nodeSelectionHandler.select(selection, true, false);
this.connectVisibleSelectedElements(this.state.selection);
this.updateGraphVisibility();
searchInput.blur();
@@ -405,7 +405,8 @@ export class GraphView extends MovableView<Graph> {
private initializeNodeSelectionHandler(): NodeSelectionHandler & ClearableHandler {
const view = this;
return {
- select: function (selectedNodes: Array<GraphNode>, selected: boolean) {
+ select: function (selectedNodes: Array<GraphNode>, selected: boolean,
+ scrollIntoView: boolean) {
const locations = new Array<GenericPosition>();
const nodes = new Set<string>();
for (const node of selectedNodes) {
@@ -576,7 +577,7 @@ export class GraphView extends MovableView<Graph> {
selection.has(this.state.selection.stringKey(node))
&& (!this.state.hideDead || node.isLive()))
];
- this.nodeSelectionHandler.select(selected, true);
+ this.nodeSelectionHandler.select(selected, true, false);
return selected;
}
@@ -736,7 +737,7 @@ export class GraphView extends MovableView<Graph> {
this.showPhaseByName(phase, selection);
} else if (origins.length > 0) {
this.nodeSelectionHandler.clear();
- this.nodeSelectionHandler.select(origins, true);
+ this.nodeSelectionHandler.select(origins, true, false);
}
}
}
diff --git a/deps/v8/tools/turbolizer/src/views/range-view.ts b/deps/v8/tools/turbolizer/src/views/range-view.ts
index 6b86a89910..94fc4b86d1 100644
--- a/deps/v8/tools/turbolizer/src/views/range-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/range-view.ts
@@ -3,10 +3,11 @@
// found in the LICENSE file.
import * as C from "../common/constants";
-import { createElement, getNumericCssValue, storageGetItem, storageSetItem } from "../common/util";
+import { createElement, getNumericCssValue,
+ setCssValue, storageGetItem, storageSetItem } from "../common/util";
import { SequenceView } from "./sequence-view";
import { Interval } from "../interval";
-import { ChildRange, Range, SequenceBlock } from "../phases/sequence-phase";
+import { ChildRange, Range, RangeToolTip, SequenceBlock } from "../phases/sequence-phase";
// This class holds references to the HTMLElements that represent each cell.
class Grid {
@@ -103,11 +104,20 @@ class IntervalElementsAccessor {
// A number of css variables regarding dimensions of HTMLElements are required by RangeView.
class CSSVariables {
positionWidth: number;
+ positionBorder: number;
blockBorderWidth: number;
+ flippedPositionHeight: number;
constructor() {
this.positionWidth = getNumericCssValue("--range-position-width");
+ this.positionBorder = getNumericCssValue("--range-position-border");
this.blockBorderWidth = getNumericCssValue("--range-block-border");
+ this.flippedPositionHeight = getNumericCssValue("--range-flipped-position-height");
+ }
+
+ setVariables(numPositions: number, numRegisters: number) {
+ setCssValue("--range-num-positions", String(numPositions));
+ setCssValue("--range-num-registers", String(numRegisters));
}
}
@@ -144,14 +154,16 @@ class UserSettings {
this.reset(settingName);
toggleInput.disabled = false;
};
+ toggleEl.onclick = (e: MouseEvent) => { e.stopPropagation(); };
toggleEl.insertBefore(toggleInput, toggleEl.firstChild);
return toggleEl;
}
public reset(settingName: string): void {
const settingObject = this.settings.get(settingName);
- storageSetItem(this.getSettingKey(settingName), settingObject.value);
settingObject.resetFunction(settingObject.value);
+ storageSetItem(this.getSettingKey(settingName), settingObject.value);
+ window.dispatchEvent(new Event("resize"));
}
public get(settingName: string): boolean {
@@ -184,10 +196,12 @@ class UserSettings {
// Store the required data from the blocks JSON.
class BlocksData {
+ view: RangeView;
blockBorders: Set<number>;
blockInstructionCountMap: Map<number, number>;
- constructor(blocks: Array<SequenceBlock>) {
+ constructor(view: RangeView, blocks: Array<SequenceBlock>) {
+ this.view = view;
this.blockBorders = new Set<number>();
this.blockInstructionCountMap = new Map<number, number>();
for (const block of blocks) {
@@ -201,9 +215,23 @@ class BlocksData {
return ((position + 1) % C.POSITIONS_PER_INSTRUCTION) == 0;
}
- public isBlockBorder(position: number): boolean {
+ public isInstructionIdOnBlockBorder(instrId: number) {
+ return this.view.instructionRangeHandler.isLastInstruction(instrId)
+ || this.blockBorders.has(instrId);
+ }
+
+ public isBlockBorder(position: number) {
const border = Math.floor(position / C.POSITIONS_PER_INSTRUCTION);
- return this.isInstructionBorder(position) && this.blockBorders.has(border);
+ return this.view.instructionRangeHandler.isLastPosition(position)
+ || (this.isInstructionBorder(position) && this.blockBorders.has(border));
+ }
+
+ public isIndexInstructionBorder(index: number) {
+ return this.isInstructionBorder(this.view.instructionRangeHandler.getPositionFromIndex(index));
+ }
+
+ public isIndexBlockBorder(index: number) {
+ return this.isBlockBorder(this.view.instructionRangeHandler.getPositionFromIndex(index));
}
}
@@ -220,6 +248,7 @@ class Divs {
xAxisLabel: HTMLElement;
yAxisLabel: HTMLElement;
registerHeaders: HTMLElement;
+ registersTypeHeader: HTMLElement;
registers: HTMLElement;
// Assigned from RangeView.
@@ -228,23 +257,28 @@ class Divs {
yAxis: HTMLElement;
grid: HTMLElement;
- constructor(userSettings: UserSettings) {
+ constructor(userSettings: UserSettings, instructionRangeString: string) {
this.container = document.getElementById(C.RANGES_PANE_ID);
this.resizerBar = document.getElementById(C.RESIZER_RANGES_ID);
this.snapper = document.getElementById(C.SHOW_HIDE_RANGES_ID);
this.content = document.createElement("div");
- this.content.appendChild(this.elementForTitle(userSettings));
+ this.content.id = "ranges-content";
+ this.content.appendChild(this.elementForTitle(userSettings, instructionRangeString));
this.showOnLoad = document.createElement("div");
this.showOnLoad.style.visibility = "hidden";
this.content.appendChild(this.showOnLoad);
this.xAxisLabel = createElement("div", "range-header-label-x");
- this.xAxisLabel.innerText = "Blocks, Instructions, and Positions";
+ this.xAxisLabel.dataset.notFlipped = "Blocks, Instructions, and Positions";
+ this.xAxisLabel.dataset.flipped = "Registers";
+ this.xAxisLabel.innerText = this.xAxisLabel.dataset.notFlipped;
this.showOnLoad.appendChild(this.xAxisLabel);
this.yAxisLabel = createElement("div", "range-header-label-y");
- this.yAxisLabel.innerText = "Registers";
+ this.yAxisLabel.dataset.notFlipped = "Registers";
+ this.yAxisLabel.dataset.flipped = "Positions";
+ this.yAxisLabel.innerText = this.yAxisLabel.dataset.notFlipped;
this.showOnLoad.appendChild(this.yAxisLabel);
this.registerHeaders = createElement("div", "range-register-labels");
@@ -252,10 +286,10 @@ class Divs {
this.registerHeaders.appendChild(this.registers);
}
- public elementForTitle(userSettings: UserSettings): HTMLElement {
+ public elementForTitle(userSettings: UserSettings, instructionRangeString: string): HTMLElement {
const titleEl = createElement("div", "range-title-div");
const titleBar = createElement("div", "range-title");
- titleBar.appendChild(createElement("div", "", "Live Ranges"));
+ titleBar.appendChild(createElement("div", "", "Live Ranges for " + instructionRangeString));
const titleHelp = createElement("div", "range-title-help", "?");
titleHelp.title = "Each row represents a single TopLevelLiveRange (or two if deferred exists)."
+ "\nEach interval belongs to a LiveRange contained within that row's TopLevelLiveRange."
@@ -264,6 +298,7 @@ class Divs {
titleEl.appendChild(titleBar);
titleEl.appendChild(titleHelp);
titleEl.appendChild(userSettings.getToggleElement("landscapeMode", "Landscape Mode"));
+ titleEl.appendChild(userSettings.getToggleElement("flipped", "Switched Axes"));
return titleEl;
}
}
@@ -275,42 +310,64 @@ class RowConstructor {
this.view = view;
}
+ getGridTemplateRowsValueForGroupDiv(length) {
+ return `repeat(${length},calc(${this.view.cssVariables.positionWidth}ch +
+ ${2 * this.view.cssVariables.positionBorder}px))`;
+ }
+
+ getGridTemplateColumnsValueForInterval(length) {
+ const positionSize = (this.view.userSettings.get("flipped")
+ ? `${this.view.cssVariables.flippedPositionHeight}em`
+ : `${this.view.cssVariables.positionWidth}ch`);
+ return `repeat(${length},calc(${positionSize} + ${this.view.cssVariables.blockBorderWidth}px))`;
+ }
+
// Constructs the row of HTMLElements for grid while providing a callback for each position
// depending on whether that position is the start of an interval or not.
// RangePair is used to allow the two fixed register live ranges of normal and deferred to be
// easily combined into a single row.
- construct(grid: Grid, row: number, registerIndex: number, ranges: [Range, Range],
- getElementForEmptyPosition: (position: number) => HTMLElement,
- callbackForInterval: (position: number, interval: HTMLElement) => void): void {
- const positions = new Array<HTMLElement>(this.view.numPositions);
+ construct(grid: Grid, row: number, registerId: string, registerIndex: number,
+ ranges: [Range, Range], getElementForEmptyPosition: (position: number) => HTMLElement,
+ callbackForInterval: (position: number, interval: HTMLElement) => void): boolean {
// Construct all of the new intervals.
const intervalMap = this.elementsForIntervals(registerIndex, ranges);
- for (let position = 0; position < this.view.numPositions; ++position) {
- const interval = intervalMap.get(position);
+ if (intervalMap.size == 0) return false;
+ const positionsArray = new Array<HTMLElement>(this.view.instructionRangeHandler.numPositions);
+ const posOffset = this.view.instructionRangeHandler.getPositionFromIndex(0);
+ let blockId = this.view.instructionRangeHandler.getBlockIdFromIndex(0);
+ for (let column = 0; column < this.view.instructionRangeHandler.numPositions; ++column) {
+ const interval = intervalMap.get(column);
if (interval === undefined) {
- positions[position] = getElementForEmptyPosition(position);
+ positionsArray[column] = getElementForEmptyPosition(column);
+ this.view.selectionHandler.addCell(positionsArray[column], row, column + posOffset,
+ blockId, registerId);
+ if (this.view.blocksData.isBlockBorder(column + posOffset)) ++blockId;
} else {
- callbackForInterval(position, interval);
+ callbackForInterval(column, interval);
this.view.intervalsAccessor.addInterval(interval);
- const intervalPositionElements = this.getPositionElementsFromInterval(interval);
+ const innerWrapper = this.view.getInnerWrapperFromInterval(interval);
+ const intervalNodeId = interval.dataset.nodeId;
+ this.view.selectionHandler.addInterval(interval, innerWrapper, intervalNodeId, registerId);
+ const intervalPositionElements = innerWrapper.children;
for (let j = 0; j < intervalPositionElements.length; ++j) {
- // Point positionsArray to the new elements.
- positions[position + j] = (intervalPositionElements[j] as HTMLElement);
+ const intervalColumn = column + j;
+ // Point positions to the new elements.
+ positionsArray[intervalColumn] = (intervalPositionElements[j] as HTMLElement);
+ this.view.selectionHandler.addCell(positionsArray[intervalColumn], row,
+ intervalColumn + posOffset, blockId, registerId, intervalNodeId);
+ if (this.view.blocksData.isBlockBorder(intervalColumn + posOffset)) ++blockId;
}
- position += intervalPositionElements.length - 1;
+ column += intervalPositionElements.length - 1;
}
}
- grid.setRow(row, positions);
+ grid.setRow(row, positionsArray);
for (const range of ranges) {
if (!range) continue;
this.setUses(grid, row, range);
}
- }
-
- private getPositionElementsFromInterval(interval: HTMLElement): HTMLCollection {
- return interval.children[1].children;
+ return true;
}
// This is the main function used to build new intervals.
@@ -323,9 +380,14 @@ class RowConstructor {
for (const childRange of range.childRanges) {
const tooltip = childRange.getTooltip(registerIndex);
for (const [index, intervalNums] of childRange.intervals.entries()) {
- const interval = new Interval(intervalNums);
+ let interval = new Interval(intervalNums);
+ if (!this.view.instructionRangeHandler.showAllPositions) {
+ if (!this.view.instructionRangeHandler.isIntervalInRange(interval)) continue;
+ interval =
+ this.view.instructionRangeHandler.convertIntervalPositionsToIndexes(interval);
+ }
const intervalEl = this.elementForInterval(childRange, interval, tooltip,
- index, range.isDeferred);
+ index, range.isDeferred);
intervalMap.set(interval.start, intervalEl);
}
}
@@ -333,33 +395,39 @@ class RowConstructor {
return intervalMap;
}
- private elementForInterval(childRange: ChildRange, interval: Interval,
- tooltip: string, index: number, isDeferred: boolean): HTMLElement {
+ private elementForInterval(childRange: ChildRange, interval: Interval, tooltip: RangeToolTip,
+ index: number, isDeferred: boolean): HTMLElement
+ {
const intervalEl = createElement("div", "range-interval");
+ intervalEl.dataset.tooltip = tooltip.text;
const title = `${childRange.id}:${index} ${tooltip}`;
intervalEl.setAttribute("title", isDeferred ? `deferred: ${title}` : title);
- this.setIntervalColor(intervalEl, tooltip);
+ this.setIntervalColor(intervalEl, tooltip.text);
const intervalInnerWrapper = createElement("div", "range-interval-wrapper");
intervalEl.style.gridColumn = `${(interval.start + 1)} / ${(interval.end + 1)}`;
- intervalInnerWrapper.style.gridTemplateColumns = `repeat(${(interval.end - interval.start)}`
- + `,calc(${this.view.cssVariables.positionWidth}ch + `
- + `${this.view.cssVariables.blockBorderWidth}px)`;
-
- const intervalTextEl = this.elementForIntervalString(tooltip, interval.end - interval.start);
- intervalEl.appendChild(intervalTextEl);
+ const intervalLength = interval.end - interval.start;
+ intervalInnerWrapper.style.gridTemplateColumns =
+ this.getGridTemplateColumnsValueForInterval(intervalLength);
+ const intervalStringEls = this.elementsForIntervalString(tooltip.text, intervalLength);
+ intervalEl.appendChild(intervalStringEls.main);
+ intervalEl.appendChild(intervalStringEls.behind);
for (let i = interval.start; i < interval.end; ++i) {
const classes = "range-position range-interval-position range-empty" +
- (this.view.blocksData.isBlockBorder(i) ? " range-block-border"
- : this.view.blocksData.isInstructionBorder(i) ? " range-instr-border" : "");
-
+ (this.view.blocksData.isIndexBlockBorder(i)
+ ? " range-block-border"
+ : this.view.blocksData.isIndexInstructionBorder(i) ? " range-instr-border" : "");
const positionEl = createElement("div", classes, "_");
positionEl.style.gridColumn = String(i - interval.start + 1);
intervalInnerWrapper.appendChild(positionEl);
}
intervalEl.appendChild(intervalInnerWrapper);
+ // Either the tooltip represents the interval id, or a new id is required.
+ const intervalNodeId = tooltip.isId ? tooltip.text
+ : "interval-" + index + "-" + interval.start;
+ intervalEl.dataset.nodeId = intervalNodeId;
return intervalEl;
}
@@ -376,58 +444,159 @@ class RowConstructor {
}
}
- private elementForIntervalString(tooltip: string, numCells: number): HTMLElement {
+ private elementsForIntervalString(tooltip: string, numCells: number):
+ { main: HTMLElement, behind: HTMLElement } {
const spanEl = createElement("span", "range-interval-text");
- this.setIntervalString(spanEl, tooltip, numCells);
- return spanEl;
+ // Allows a cleaner display of the interval text when displayed vertically.
+ const spanElBehind = createElement("span", "range-interval-text range-interval-text-behind");
+ this.view.stringConstructor.setIntervalString(spanEl, spanElBehind, tooltip, numCells);
+ return { main: spanEl, behind: spanElBehind};
+ }
+
+ private setUses(grid: Grid, row: number, range: Range): void {
+ for (const liveRange of range.childRanges) {
+ if (!liveRange.uses) continue;
+ for (let use of liveRange.uses) {
+ if (!this.view.instructionRangeHandler.showAllPositions) {
+ if (!this.view.instructionRangeHandler.isPositionInRange(use)) continue;
+ use = this.view.instructionRangeHandler.getIndexFromPosition(use);
+ }
+ grid.getCell(row, use).classList.toggle("range-use", true);
+ }
+ }
+ }
+}
+
+// A simple storage class for the data used when constructing an interval string.
+class IntervalStringData {
+ mainString: string;
+ textLength: number;
+ paddingLeft: string;
+
+ constructor(tooltip: string) {
+ this.mainString = tooltip;
+ this.textLength = tooltip.length;
+ this.paddingLeft = null;
+ }
+}
+
+class StringConstructor {
+ view: RangeView;
+
+ intervalStringData: IntervalStringData;
+
+ constructor(view: RangeView) {
+ this.view = view;
+ }
+
+ public setRegisterString(registerName: string, isVirtual: boolean, regEl: HTMLElement) {
+ if (this.view.userSettings.get("flipped")) {
+ const nums = registerName.match(/\d+/);
+ const num = nums ? nums[0] : registerName.substring(1);
+ let str = num[num.length - 1];
+ for (let i = 2; i <= Math.max(this.view.maxLengthVirtualRegisterNumber, 2); ++i) {
+ const addition = num.length < i ? "<span class='range-transparent'>_</span>"
+ : num[num.length - i];
+ str = `${addition} ${str}`;
+ }
+ regEl.innerHTML = str;
+ } else if (!isVirtual) {
+ const span = "".padEnd(C.FIXED_REGISTER_LABEL_WIDTH - registerName.length, "_");
+ regEl.innerHTML = `HW - <span class='range-transparent'>${span}</span>${registerName}`;
+ } else {
+ regEl.innerText = registerName;
+ }
}
// Each interval displays a string of information about it.
- private setIntervalString(spanEl: HTMLElement, tooltip: string, numCells: number): void {
- const spacePerCell = this.view.cssVariables.positionWidth;
+ public setIntervalString(spanEl: HTMLElement,
+ spanElBehind: HTMLElement, tooltip: string, numCells: number): void {
+ const isFlipped = this.view.userSettings.get("flipped");
+ const spacePerCell = isFlipped ? 1 : this.view.cssVariables.positionWidth + 0.25;
// One character space is removed to accommodate for padding.
- const spaceAvailable = (numCells * spacePerCell) - 0.5;
- let intervalStr = tooltip;
- const length = tooltip.length;
+ const totalSpaceAvailable = (numCells * spacePerCell) - (isFlipped ? 0 : 0.5);
+ this.intervalStringData = new IntervalStringData(tooltip);
+ spanElBehind.innerHTML = "";
spanEl.style.width = null;
- let paddingLeft = null;
- // Add padding if possible
- if (length <= spaceAvailable) {
- paddingLeft = (length == spaceAvailable) ? "0.5ch" : "1ch";
- } else {
- intervalStr = "";
+
+ this.setIntervalStringPadding(spanEl, spanElBehind, totalSpaceAvailable, isFlipped);
+ if (this.intervalStringData.textLength > totalSpaceAvailable) {
+ this.intervalStringData.mainString = "";
+ spanElBehind.innerHTML = "";
}
- spanEl.style.paddingTop = null;
- spanEl.style.paddingLeft = paddingLeft;
- spanEl.innerHTML = intervalStr;
+ spanEl.innerHTML = this.intervalStringData.mainString;
}
- private setUses(grid: Grid, row: number, range: Range): void {
- for (const liveRange of range.childRanges) {
- if (!liveRange.uses) continue;
- for (const use of liveRange.uses) {
- grid.getCell(row, use).classList.toggle("range-use", true);
+ private setIntervalStringPadding(spanEl: HTMLElement, spanElBehind: HTMLElement,
+ totalSpaceAvailable: number, isFlipped: boolean) {
+ // Add padding at start of text if possible
+ if (this.intervalStringData.textLength <= totalSpaceAvailable) {
+ if (isFlipped) {
+ spanEl.style.paddingTop =
+ this.intervalStringData.textLength == totalSpaceAvailable ? "0.25em" : "1em";
+ spanEl.style.paddingLeft = null;
+ } else {
+ this.intervalStringData.paddingLeft =
+ (this.intervalStringData.textLength == totalSpaceAvailable) ? "0.5ch" : "1ch";
+ spanEl.style.paddingTop = null;
}
+ } else {
+ spanEl.style.paddingTop = null;
+ }
+ if (spanElBehind.innerHTML.length > 0) {
+ // Apply same styling to spanElBehind as to spanEl.
+ spanElBehind.setAttribute("style", spanEl.getAttribute("style"));
+ spanElBehind.style.paddingLeft = this.intervalStringData.paddingLeft;
+ } else {
+ spanEl.style.paddingLeft = this.intervalStringData.paddingLeft;
+ }
+ }
+}
+
+// A simple class to tally the total number of each type of register and store
+// the register prefixes for use in the register type header.
+class RegisterTypeHeaderData {
+ virtualCount: number;
+ generalCount: number;
+ floatCount: number;
+
+ generalPrefix: string;
+ floatPrefix: string;
+
+ constructor() {
+ this.virtualCount = 0;
+ this.generalCount = 0;
+ this.floatCount = 0;
+ this.generalPrefix = "x";
+ this.floatPrefix = "fp";
+ }
+
+ public countFixedRegister(registerName: string, ranges: [Range, Range]) {
+ const range = ranges[0] ? ranges[0] : ranges[1];
+ if (range.childRanges[0].isFloatingPoint()) {
+ ++(this.floatCount);
+ this.floatPrefix = this.floatPrefix == "fp" ? registerName.match(/\D+/)[0] : this.floatPrefix;
+ } else {
+ ++(this.generalCount);
+ this.generalPrefix = this.generalPrefix == "x" ? registerName[0] : this.generalPrefix;
}
}
}
class RangeViewConstructor {
view: RangeView;
- gridTemplateColumns: string;
grid: Grid;
+ registerTypeHeaderData: RegisterTypeHeaderData;
// Group the rows in divs to make hiding/showing divs more efficient.
currentGroup: HTMLElement;
currentPlaceholderGroup: HTMLElement;
constructor(rangeView: RangeView) {
this.view = rangeView;
+ this.registerTypeHeaderData = new RegisterTypeHeaderData();
}
public construct(): void {
- this.gridTemplateColumns = `repeat(${this.view.numPositions}`
- + `,calc(${this.view.cssVariables.positionWidth}ch + `
- + `${this.view.cssVariables.blockBorderWidth}px)`;
this.grid = new Grid();
this.view.gridAccessor.addGrid(this.grid);
@@ -448,6 +617,10 @@ class RangeViewConstructor {
this.resetGroups();
this.addFixedRanges(this.addVirtualRanges(0));
+
+ this.view.divs.registersTypeHeader = this.elementForRegisterTypeHeader();
+ this.view.divs.registerHeaders.insertBefore(this.view.divs.registersTypeHeader,
+ this.view.divs.registers);
}
// The following three functions are for constructing the groups which the rows are contained
@@ -462,7 +635,14 @@ class RangeViewConstructor {
this.currentPlaceholderGroup = createElement("div", "range-positions-group");
}
- private appendGroupsToGrid(): void {
+ private appendGroupsToGrid(row: number): void {
+ const endRow = row + 2;
+ const numRows = this.currentPlaceholderGroup.children.length;
+ this.currentGroup.style.gridRow = `${endRow - numRows} / ${numRows == 1 ? "auto" : endRow}`;
+ this.currentPlaceholderGroup.style.gridRow = this.currentGroup.style.gridRow;
+ this.currentGroup.style.gridTemplateRows =
+ this.view.rowConstructor.getGridTemplateRowsValueForGroupDiv(numRows);
+ this.currentPlaceholderGroup.style.gridTemplateRows = this.currentGroup.style.gridTemplateRows;
this.view.divs.grid.appendChild(this.currentPlaceholderGroup);
this.view.divs.grid.appendChild(this.currentGroup);
}
@@ -473,56 +653,62 @@ class RangeViewConstructor {
createElement("div", "range-positions range-positions-placeholder", "_"));
if ((row + 1) % C.ROW_GROUP_SIZE == 0) {
- this.appendGroupsToGrid();
+ this.appendGroupsToGrid(row);
this.resetGroups();
}
}
private addVirtualRanges(row: number): number {
- const source = this.view.sequenceView.sequence.registerAllocation;
- for (const [registerIndex, range] of source.liveRanges.entries()) {
- if (!range) continue;
- const registerName = this.virtualRegisterName(registerIndex);
- const registerEl = this.elementForVirtualRegister(registerName);
- this.addRowToGroup(row, this.elementForRow(row, registerIndex, [range, undefined]));
- this.view.divs.registers.appendChild(registerEl);
- ++row;
- }
- return row;
- }
-
- private virtualRegisterName(registerIndex: number): string {
- return `v${registerIndex}`;
+ return this.view.instructionRangeHandler.forEachLiveRange(row,
+ (registerIndex: number, row: number, registerName: string, range: Range) => {
+ const registerId = C.VIRTUAL_REGISTER_ID_PREFIX + registerName;
+ const rowEl = this.elementForRow(row, registerId, registerIndex, [range, undefined]);
+ if (rowEl) {
+ const registerEl = this.elementForRegister(row, registerName, registerId, true);
+ this.addRowToGroup(row, rowEl);
+ this.view.divs.registers.appendChild(registerEl);
+ ++(this.registerTypeHeaderData.virtualCount);
+ return true;
+ }
+ return false;
+ });
}
private addFixedRanges(row: number): void {
- row = this.view.sequenceView.sequence.registerAllocation.forEachFixedRange(row,
+ row = this.view.instructionRangeHandler.forEachFixedRange(row,
(registerIndex: number, row: number, registerName: string, ranges: [Range, Range]) => {
- const registerEl = this.elementForFixedRegister(registerName);
- this.addRowToGroup(row, this.elementForRow(row, registerIndex, ranges));
- this.view.divs.registers.appendChild(registerEl);
+ const rowEl = this.elementForRow(row, registerName, registerIndex, ranges);
+ if (rowEl) {
+ this.registerTypeHeaderData.countFixedRegister(registerName, ranges);
+ const registerEl = this.elementForRegister(row, registerName, registerName, false);
+ this.addRowToGroup(row, rowEl);
+ this.view.divs.registers.appendChild(registerEl);
+ return true;
+ }
+ return false;
});
if (row % C.ROW_GROUP_SIZE != 0) {
- this.appendGroupsToGrid();
+ this.appendGroupsToGrid(row - 1);
}
}
// Each row of positions and intervals associated with a register is contained in a single
// HTMLElement. RangePair is used to allow the two fixed register live ranges of normal and
// deferred to be easily combined into a single row.
- private elementForRow(row: number, registerIndex: number, ranges: [Range, Range]): HTMLElement {
+ private elementForRow(row: number, registerId: string, registerIndex: number,
+ ranges: [Range, Range]): HTMLElement {
const rowEl = createElement("div", "range-positions");
- rowEl.style.gridTemplateColumns = this.gridTemplateColumns;
- const getElementForEmptyPosition = (position: number) => {
+ const getElementForEmptyPosition = (column: number) => {
+ const position = this.view.instructionRangeHandler.getPositionFromIndex(column);
const blockBorder = this.view.blocksData.isBlockBorder(position);
const classes = "range-position range-empty " + (blockBorder
? "range-block-border" : this.view.blocksData.isInstructionBorder(position)
? "range-instr-border" : "range-position-border");
const positionEl = createElement("div", classes, "_");
- positionEl.style.gridColumn = String(position + 1);
+ positionEl.style.gridColumn = String(column + 1);
rowEl.appendChild(positionEl);
return positionEl;
};
@@ -531,26 +717,54 @@ class RangeViewConstructor {
rowEl.appendChild(interval);
};
- this.view.rowConstructor.construct(this.grid, row, registerIndex, ranges,
- getElementForEmptyPosition, callbackForInterval);
-
- return rowEl;
+ // Only construct the row if it has any intervals.
+ if (this.view.rowConstructor.construct(this.grid, row, registerId, registerIndex, ranges,
+ getElementForEmptyPosition, callbackForInterval)) {
+ this.view.selectionHandler.addRow(rowEl, registerId);
+ return rowEl;
+ }
+ return undefined;
}
- private elementForVirtualRegister(registerName: string): HTMLElement {
- const regEl = createElement("div", "range-reg", registerName);
+ private elementForRegister(row: number, registerName: string,
+ registerId: string, isVirtual: boolean) {
+ const regEl = createElement("div", "range-reg");
+ this.view.stringConstructor.setRegisterString(registerName, isVirtual, regEl);
+ regEl.dataset.virtual = isVirtual.toString();
regEl.setAttribute("title", registerName);
+ regEl.style.gridColumn = String(row + 1);
+ this.view.selectionHandler.addRegister(regEl, registerId, row);
return regEl;
}
- private elementForFixedRegister(registerName: string): HTMLElement {
- let text = registerName;
- const span = "".padEnd(C.FIXED_REGISTER_LABEL_WIDTH - text.length, "_");
- text = "HW - <span class='range-transparent'>" + span + "</span>" + text;
- const regEl = createElement("div", "range-reg");
- regEl.innerHTML = text;
- regEl.setAttribute("title", registerName);
- return regEl;
+ private elementForRegisterTypeHeader() {
+ let column = 1;
+ const addTypeHeader = (container: HTMLElement, count: number,
+ textOptions: {max: string, med: string, min: string}) => {
+ if (count) {
+ const element = createElement("div", "range-type-header");
+ element.setAttribute("title", textOptions.max);
+ element.style.gridColumn = column + " / " + (column + count);
+ column += count;
+ element.dataset.count = String(count);
+ element.dataset.max = textOptions.max;
+ element.dataset.med = textOptions.med;
+ element.dataset.min = textOptions.min;
+ container.appendChild(element);
+ }
+ };
+ const registerTypeHeaderEl = createElement("div", "range-registers-type range-hidden");
+ addTypeHeader(registerTypeHeaderEl, this.registerTypeHeaderData.virtualCount,
+ {max: "virtual registers", med: "virtual", min: "v"});
+ addTypeHeader(registerTypeHeaderEl, this.registerTypeHeaderData.generalCount,
+ { max: "general registers",
+ med: "general",
+ min: this.registerTypeHeaderData.generalPrefix });
+ addTypeHeader(registerTypeHeaderEl, this.registerTypeHeaderData.floatCount,
+ { max: "floating point registers",
+ med: "floating point",
+ min: this.registerTypeHeaderData.floatPrefix});
+ return registerTypeHeaderEl;
}
// The header element contains the three headers for the LifeTimePosition axis.
@@ -576,74 +790,104 @@ class RangeViewConstructor {
private elementForBlockHeader(): HTMLElement {
const headerEl = createElement("div", "range-block-ids");
- headerEl.style.gridTemplateColumns = this.gridTemplateColumns;
- let blockIndex = 0;
- for (let i = 0; i < this.view.sequenceView.numInstructions;) {
- const instrCount = this.view.blocksData.blockInstructionCountMap.get(blockIndex);
- headerEl.appendChild(this.elementForBlockIndex(blockIndex, i, instrCount));
- ++blockIndex;
- i += instrCount;
+ let blockId = 0;
+ const lastPos = this.view.instructionRangeHandler.getLastPosition();
+ for (let position = 0; position <= lastPos;) {
+ const instrCount = this.view.blocksData.blockInstructionCountMap.get(blockId);
+ if (this.view.instructionRangeHandler.showAllPositions) {
+ headerEl.appendChild(this.elementForBlock(blockId, position, instrCount));
+ } else {
+ let blockInterval =
+ new Interval([position, position + (C.POSITIONS_PER_INSTRUCTION * instrCount)]);
+ if (this.view.instructionRangeHandler.isIntervalInRange(blockInterval)) {
+ blockInterval = this.view.instructionRangeHandler
+ .convertBlockPositionsToIndexes(blockId, blockInterval);
+ headerEl.appendChild(this.elementForBlock(blockId, blockInterval.start,
+ (blockInterval.end - blockInterval.start) / C.POSITIONS_PER_INSTRUCTION));
+ }
+ }
+ ++blockId;
+ position += instrCount * C.POSITIONS_PER_INSTRUCTION;
}
return headerEl;
}
- private elementForBlockIndex(index: number, firstInstruction: number, instrCount: number):
+ private elementForBlock(blockId: number, firstColumn: number, instrCount: number):
HTMLElement {
- const str = `B${index}`;
const element =
- createElement("div", "range-block-id range-header-element range-block-border", str);
+ createElement("div", "range-block-id range-header-element range-block-border");
+ const str = `B${blockId}`;
+ const idEl = createElement("span", "range-block-id-number", str);
+ const centre = instrCount * C.POSITIONS_PER_INSTRUCTION;
+ idEl.style.gridRow = `${centre} / ${centre + 1}`;
+ element.appendChild(idEl);
element.setAttribute("title", str);
- const firstGridCol = (firstInstruction * C.POSITIONS_PER_INSTRUCTION) + 1;
+ element.dataset.instrCount = String(instrCount);
+ // gridColumns start at 1 rather than 0.
+ const firstGridCol = firstColumn + 1;
const lastGridCol = firstGridCol + (instrCount * C.POSITIONS_PER_INSTRUCTION);
element.style.gridColumn = `${firstGridCol} / ${lastGridCol}`;
+ element.style.gridTemplateRows = `repeat(${8 * instrCount},
+ calc((${this.view.cssVariables.flippedPositionHeight}em +
+ ${this.view.cssVariables.blockBorderWidth}px)/2))`;
+ this.view.selectionHandler.addBlock(element, blockId);
return element;
}
private elementForInstructionHeader(): HTMLElement {
const headerEl = createElement("div", "range-instruction-ids");
- headerEl.style.gridTemplateColumns = this.gridTemplateColumns;
-
- for (let i = 0; i < this.view.sequenceView.numInstructions; ++i) {
- headerEl.appendChild(this.elementForInstructionIndex(i));
+ let blockId = this.view.instructionRangeHandler.getBlockIdFromIndex(0);
+ let instrId = this.view.instructionRangeHandler.getInstructionIdFromIndex(0);
+ const instrLimit = instrId + this.view.instructionRangeHandler.numInstructions;
+ for (; instrId < instrLimit; ++instrId) {
+ headerEl.appendChild(this.elementForInstruction(instrId, blockId));
+ if (this.view.blocksData.isInstructionIdOnBlockBorder(instrId)) ++blockId;
}
return headerEl;
}
- private elementForInstructionIndex(index: number): HTMLElement {
- const isBlockBorder = this.view.blocksData.blockBorders.has(index);
+ private elementForInstruction(instrId: number, blockId: number): HTMLElement {
+ const isBlockBorder = this.view.blocksData.isInstructionIdOnBlockBorder(instrId);
const classes = "range-instruction-id range-header-element "
+ (isBlockBorder ? "range-block-border" : "range-instr-border");
-
- const element = createElement("div", classes, String(index));
- element.setAttribute("title", String(index));
- const firstGridCol = (index * C.POSITIONS_PER_INSTRUCTION) + 1;
+ const element = createElement("div", classes);
+ element.appendChild(createElement("span", "range-instruction-id-number", String(instrId)));
+ element.setAttribute("title", String(instrId));
+ const instrIndex = this.view.instructionRangeHandler.getInstructionIndex(instrId);
+ const firstGridCol = (instrIndex * C.POSITIONS_PER_INSTRUCTION) + 1;
element.style.gridColumn = `${firstGridCol} / ${(firstGridCol + C.POSITIONS_PER_INSTRUCTION)}`;
+ this.view.selectionHandler.addInstruction(element, instrId, blockId);
return element;
}
private elementForPositionHeader(): HTMLElement {
const headerEl = createElement("div", "range-positions range-positions-header");
- headerEl.style.gridTemplateColumns = this.gridTemplateColumns;
- for (let i = 0; i < this.view.numPositions; ++i) {
- headerEl.appendChild(this.elementForPositionIndex(i));
+ let blockId = this.view.instructionRangeHandler.getBlockIdFromIndex(0);
+ let position = this.view.instructionRangeHandler.getPositionFromIndex(0);
+ const lastPos = this.view.instructionRangeHandler.getLastPosition();
+ for (; position <= lastPos; ++position) {
+ const isBlockBorder = this.view.blocksData.isBlockBorder(position);
+ headerEl.appendChild(this.elementForPosition(position, blockId, isBlockBorder));
+ if (isBlockBorder) ++blockId;
}
return headerEl;
}
- private elementForPositionIndex(index: number): HTMLElement {
- const isBlockBorder = this.view.blocksData.isBlockBorder(index);
+ private elementForPosition(position: number, blockId: number,
+ isBlockBorder: boolean): HTMLElement {
const classes = "range-position range-header-element " +
(isBlockBorder ? "range-block-border"
- : this.view.blocksData.isInstructionBorder(index) ? "range-instr-border"
+ : this.view.blocksData.isInstructionBorder(position) ? "range-instr-border"
: "range-position-border");
- const element = createElement("div", classes, "" + index);
- element.setAttribute("title", "" + index);
+ const element = createElement("div", classes, String(position));
+ element.setAttribute("title", String(position));
+ this.view.selectionHandler.addPosition(element, position, blockId);
return element;
}
@@ -686,33 +930,34 @@ class PhaseChangeHandler {
const currentGrid = this.view.gridAccessor.getAnyGrid();
const newGrid = new Grid();
this.view.gridAccessor.addGrid(newGrid);
- const source = this.view.sequenceView.sequence.registerAllocation;
let row = 0;
- for (const [registerIndex, range] of source.liveRanges.entries()) {
- if (!range) continue;
- this.addnewIntervalsInRange(currentGrid, newGrid, row, registerIndex, [range, undefined]);
- ++row;
- }
+ row = this.view.instructionRangeHandler.forEachLiveRange(row,
+ (registerIndex: number, row: number, registerName: string, range: Range) => {
+ this.addnewIntervalsInRange(currentGrid, newGrid, row,
+ C.VIRTUAL_REGISTER_ID_PREFIX + registerName, registerIndex, [range, undefined]);
+ return true;
+ });
- this.view.sequenceView.sequence.registerAllocation.forEachFixedRange(row,
- (registerIndex, row, _, ranges) => {
- this.addnewIntervalsInRange(currentGrid, newGrid, row, registerIndex, ranges);
+ this.view.instructionRangeHandler.forEachFixedRange(row,
+ (registerIndex, row, registerName, ranges) => {
+ this.addnewIntervalsInRange(currentGrid, newGrid, row, registerName, registerIndex, ranges);
+ return true;
});
}
- private addnewIntervalsInRange(currentGrid: Grid, newGrid: Grid, row: number,
+ private addnewIntervalsInRange(currentGrid: Grid, newGrid: Grid, row: number, registerId: string,
registerIndex: number, ranges: [Range, Range]): void {
const numReplacements = new Map<HTMLElement, number>();
- const getElementForEmptyPosition = (position: number) => {
- return currentGrid.getCell(row, position);
+ const getElementForEmptyPosition = (column: number) => {
+ return currentGrid.getCell(row, column);
};
// Inserts new interval beside existing intervals.
- const callbackForInterval = (position: number, interval: HTMLElement) => {
+ const callbackForInterval = (column: number, interval: HTMLElement) => {
// Overlapping intervals are placed beside each other and the relevant ones displayed.
- let currentInterval = currentGrid.getInterval(row, position);
+ let currentInterval = currentGrid.getInterval(row, column);
// The number of intervals already inserted is tracked so that the inserted intervals
// are ordered correctly.
const intervalsAlreadyInserted = numReplacements.get(currentInterval);
@@ -729,11 +974,461 @@ class PhaseChangeHandler {
currentInterval.insertAdjacentElement("afterend", interval);
};
- this.view.rowConstructor.construct(newGrid, row, registerIndex, ranges,
+ this.view.rowConstructor.construct(newGrid, row, registerId, registerIndex, ranges,
getElementForEmptyPosition, callbackForInterval);
}
}
+// Manages the limitation of how many instructions are shown in the grid.
+class InstructionRangeHandler {
+ view: RangeView;
+
+ numPositions: number;
+ numInstructions: number;
+ showAllPositions: boolean;
+
+ private positionRange: [number, number];
+ private instructionRange: [number, number];
+ private blockRange: [number, number];
+
+ constructor(view: RangeView, firstInstr: number, lastInstr: number) {
+ this.view = view;
+ this.showAllPositions = false;
+ this.blockRange = [0, -1];
+ this.instructionRange = this.getValidRange(firstInstr, lastInstr);
+ if (this.instructionRange[0] == 0
+ && this.instructionRange[1] == this.view.sequenceView.numInstructions) {
+ this.showAllPositions = true;
+ }
+ this.updateInstructionRange();
+ }
+
+ public isNewRangeViewRequired(firstInstr: number, lastInstr: number): boolean {
+ const validRange = this.getValidRange(firstInstr, lastInstr);
+ return (this.instructionRange[0] != validRange[0])
+ || (this.instructionRange[1] != validRange[1]);
+ }
+
+ public getValidRange(firstInstr: number, lastInstr: number): [number, number] {
+ const maxInstructions = Math.floor(C.MAX_NUM_POSITIONS / C.POSITIONS_PER_INSTRUCTION);
+ const validRange = [firstInstr, lastInstr + 1] as [number, number];
+ if (isNaN(lastInstr)) {
+ validRange[1] = this.view.sequenceView.numInstructions;
+ }
+ if (isNaN(firstInstr)) {
+ validRange[0] = (isNaN(lastInstr) || validRange[1] < maxInstructions)
+ ? 0 : validRange[1] - maxInstructions;
+ }
+ if (!this.isValidRange(validRange[0], validRange[1])) {
+ console.warn("Invalid range: displaying default view.");
+ validRange[0] = 0;
+ validRange[1] = this.view.sequenceView.numInstructions;
+ }
+ const rangeLength = validRange[1] - validRange[0];
+ if (C.POSITIONS_PER_INSTRUCTION * rangeLength > C.MAX_NUM_POSITIONS) {
+ validRange[1] = validRange[0] + maxInstructions;
+ console.warn("Cannot display more than " + maxInstructions
+ + " instructions in the live ranges grid at one time.");
+ }
+ return validRange;
+ }
+
+ public isValidRange(firstInstr: number, instrLimit: number): boolean {
+ return 0 <= firstInstr && firstInstr < instrLimit
+ && instrLimit <= this.view.sequenceView.numInstructions;
+ }
+
+ public updateInstructionRange(): void {
+ this.numInstructions = this.showAllPositions
+ ? this.view.sequenceView.numInstructions
+ : this.instructionRange[1] - this.instructionRange[0];
+ this.numPositions = this.numInstructions * C.POSITIONS_PER_INSTRUCTION;
+ this.positionRange = [C.POSITIONS_PER_INSTRUCTION * this.instructionRange[0],
+ C.POSITIONS_PER_INSTRUCTION * this.instructionRange[1]];
+ }
+
+ public getInstructionRangeString(): string {
+ if (this.showAllPositions) {
+ return "all instructions";
+ } else {
+ return "instructions [" + this.instructionRange[0]
+ + ", " + (this.instructionRange[1] - 1) + "]";
+ }
+ }
+
+ public getLastPosition(): number {
+ return this.positionRange[1] - 1;
+ }
+
+ public getPositionFromIndex(index: number): number {
+ return index + this.positionRange[0];
+ }
+
+ public getIndexFromPosition(position: number): number {
+ return position - this.positionRange[0];
+ }
+
+ public getInstructionIdFromIndex(index: number): number {
+ return index + this.instructionRange[0];
+ }
+
+ public getInstructionIndex(id: number): number {
+ return id - this.instructionRange[0];
+ }
+
+ public getBlockIdFromIndex(index: number): number {
+ return index + this.blockRange[0];
+ }
+
+ public getBlockIndex(id: number): number {
+ return id - this.blockRange[0];
+ }
+
+ public isPositionInRange(position: number): boolean {
+ return position >= this.positionRange[0] && position < this.positionRange[1];
+ }
+
+ public isIntervalInRange(interval: Interval): boolean {
+ return interval.start < this.positionRange[1] && interval.end > this.positionRange[0];
+ }
+
+ public convertIntervalPositionsToIndexes(interval: Interval): Interval {
+ return new Interval([Math.max(0, interval.start - this.positionRange[0]),
+ Math.min(this.numPositions, interval.end - this.positionRange[0])]);
+ }
+
+ public convertBlockPositionsToIndexes(blockIndex: number, interval: Interval): Interval {
+ if (this.blockRange[1] < 0) this.blockRange[0] = blockIndex;
+ this.blockRange[1] = blockIndex + 1;
+ return this.convertIntervalPositionsToIndexes(interval);
+ }
+
+ public isLastPosition(position: number): boolean {
+ return !this.showAllPositions && (position == this.getLastPosition());
+ }
+
+ public isLastInstruction(instrId: number): boolean {
+ return !this.showAllPositions && (instrId == this.instructionRange[1] - 1);
+ }
+
+ public forEachLiveRange(row: number, callback: (registerIndex: number, row: number,
+ registerName: string, range: Range) => boolean): number {
+ const source = this.view.sequenceView.sequence.registerAllocation;
+ for (const [registerIndex, range] of source.liveRanges.entries()) {
+ if (!range ||
+ (!this.showAllPositions &&
+ (range.instructionRange[0] >= this.positionRange[1]
+ || this.positionRange[0] >= range.instructionRange[1]))) {
+ continue;
+ }
+ if (callback(registerIndex, row, `v${registerIndex}`, range)) {
+ ++row;
+ }
+ }
+ return row;
+ }
+
+ public forEachFixedRange(row: number, callback: (registerIndex: number, row: number,
+ registerName: string,
+ ranges: [Range, Range]) => boolean): number {
+ const forEachRangeInMap = (rangeMap: Array<Range>) => {
+ // There are two fixed live ranges for each register, one for normal, another for deferred.
+ // These are combined into a single row.
+ const fixedRegisterMap = new Map<string, {registerIndex: number, ranges: [Range, Range]}>();
+ for (const [registerIndex, range] of rangeMap.entries()) {
+ if (!range ||
+ (!this.showAllPositions &&
+ (range.instructionRange[0] >= this.positionRange[1]
+ || this.positionRange[0] >= range.instructionRange[1]))) {
+ continue;
+ }
+ const registerName = range.fixedRegisterName();
+ if (fixedRegisterMap.has(registerName)) {
+ const entry = fixedRegisterMap.get(registerName);
+ entry.ranges[1] = range;
+ // Only use the deferred register index if no normal index exists.
+ if (!range.isDeferred) {
+ entry.registerIndex = registerIndex;
+ }
+ } else {
+ fixedRegisterMap.set(registerName, {registerIndex, ranges: [range, undefined]});
+ }
+ }
+ // Sort the registers by number.
+ const sortedMap = new Map([...fixedRegisterMap.entries()].sort(([nameA, _], [nameB, __]) => {
+ if (nameA.length > nameB.length) {
+ return 1;
+ } else if (nameA.length < nameB.length) {
+ return -1;
+ } else if (nameA > nameB) {
+ return 1;
+ } else if (nameA < nameB) {
+ return -1;
+ }
+ return 0;
+ }));
+
+ for (const [registerName, {ranges, registerIndex}] of sortedMap) {
+ if (callback(-registerIndex - 1, row, registerName, ranges)) {
+ ++row;
+ }
+ }
+ };
+
+ const source = this.view.sequenceView.sequence.registerAllocation;
+ forEachRangeInMap(source.fixedLiveRanges);
+ forEachRangeInMap(source.fixedDoubleLiveRanges);
+
+ return row;
+ }
+}
+
+// This class works in tandem with the selectionHandlers defined in text-view.ts
+// rather than updating HTMLElements explicitly itself.
+class RangeViewSelectionHandler {
+ sequenceView: SequenceView;
+ rangeView: RangeView;
+
+ constructor(rangeView: RangeView) {
+ this.rangeView = rangeView;
+ this.sequenceView = this.rangeView.sequenceView;
+
+ // Clear all selections when container is clicked.
+ this.rangeView.divs.container.onclick = (e: MouseEvent) => {
+ if (!e.shiftKey) this.sequenceView.broker.broadcastClear(null);
+ };
+ }
+
+ public addBlock(element: HTMLElement, id: number): void {
+ element.onclick = (e: MouseEvent) => {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ this.clear();
+ }
+ this.select(null, null, [id], true);
+ };
+ this.sequenceView.addHtmlElementForBlockId(id, element);
+ this.sequenceView.addHtmlElementForBlockId(this.sequenceView.getSubId(id), element);
+ }
+
+ public addInstruction(element: HTMLElement, id: number, blockId: number): void {
+ // Select the block which contains the instruction and all positions and cells
+ // that are within this instruction.=
+ element.onclick = (e: MouseEvent) => {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ this.clear();
+ }
+ this.select(null, [id], [this.sequenceView.getSubId(blockId)], true);
+ };
+ this.sequenceView.addHtmlElementForBlockId(blockId, element);
+ this.sequenceView.addHtmlElementForInstructionId(id, element);
+ this.sequenceView.addHtmlElementForInstructionId(this.sequenceView.getSubId(id), element);
+ }
+
+ public addPosition(element: HTMLElement, position: number, blockId: number): void {
+ const instrId = Math.floor(position / C.POSITIONS_PER_INSTRUCTION);
+ // Select the block and instruction which contains this position.
+ element.onclick = (e: MouseEvent) => {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ this.clear();
+ }
+ this.select(["position-" + position], [this.sequenceView.getSubId(instrId)],
+ [this.sequenceView.getSubId(blockId)], true);
+ };
+ this.sequenceView.addHtmlElementForBlockId(blockId, element);
+ this.sequenceView.addHtmlElementForInstructionId(instrId, element);
+ this.sequenceView.addHtmlElementForNodeId("position-" + position, element);
+ }
+
+ public addRegister(element: HTMLElement, registerId: string, row: number): void {
+ const rowGroupIndex = (Math.floor(row / C.ROW_GROUP_SIZE) * 2) + 1;
+ element.onclick = (e: MouseEvent) => {
+ e.stopPropagation();
+ if (!this.canSelectRow(row, rowGroupIndex)) return;
+ if (!e.shiftKey) {
+ this.clear();
+ }
+ // The register also effectively selects the row.
+ this.select([registerId], null, null, true);
+ };
+ this.sequenceView.addHtmlElementForNodeId(registerId, element);
+ }
+
+ public addRow(element: HTMLElement, registerId: string): void {
+ // Highlight row when register is selected.
+ this.rangeView.sequenceView.addHtmlElementForNodeId(registerId, element);
+ }
+
+ public addInterval(intervalEl: HTMLElement, intervalInnerWrapperEl: HTMLElement,
+ intervalNodeId: string, registerId: string): void {
+ // Highlight interval when the interval is selected.
+ this.sequenceView.addHtmlElementForNodeId(intervalNodeId, intervalEl);
+ // Highlight inner wrapper when row is selected, allowing for different color highlighting.
+ this.sequenceView.addHtmlElementForNodeId(registerId, intervalInnerWrapperEl);
+ }
+
+ public addCell(element: HTMLElement, row: number, position: number,
+ blockId: number, registerId: string, intervalNodeId?: string): void {
+ const instrId = Math.floor(position / C.POSITIONS_PER_INSTRUCTION);
+ // Select the relevant row by the registerId, and the column by position.
+ // Also select the instruction and the block in which the position is in.
+ const select = [registerId, "position-" + position];
+ if (intervalNodeId) select.push(intervalNodeId);
+ const rowGroupIndex = (Math.floor(row / C.ROW_GROUP_SIZE) * 2) + 1;
+ element.onclick = (e: MouseEvent) => {
+ e.stopPropagation();
+ if (!this.canSelectRow(row, rowGroupIndex)) return;
+ if (!e.shiftKey) {
+ this.clear();
+ }
+ this.select(select, [this.sequenceView.getSubId(instrId)],
+ [this.sequenceView.getSubId(blockId)], true);
+ };
+ this.sequenceView.addHtmlElementForBlockId(blockId, element);
+ this.sequenceView.addHtmlElementForInstructionId(instrId, element);
+ this.sequenceView.addHtmlElementForNodeId("position-" + position, element);
+ }
+
+ private canSelectRow(row: number, rowGroupIndex: number): boolean {
+ // Don't select anything if the row group which this row is included in is hidden.
+ if (row >= 0
+ && this.rangeView.divs.grid.children[rowGroupIndex].classList.contains("range-hidden")) {
+ this.rangeView.scrollHandler.syncHidden();
+ return false;
+ }
+ return true;
+ }
+
+ // Don't call multiple times in a row or the SelectionMapsHandlers will clear their previous
+ // causing the HTMLElements to not be deselected by select.
+ private clear(): void {
+ this.sequenceView.blockSelections.clearCurrent();
+ this.sequenceView.instructionSelections.clearCurrent();
+ this.sequenceView.nodeSelections.clearCurrent();
+ // Mark as cleared so that the HTMLElements are not updated on broadcastClear.
+ // The HTMLElements will be updated when select is called.
+ this.sequenceView.selectionCleared = true;
+ // broadcastClear calls brokeredClear on all SelectionHandlers but that which is passed to it.
+ this.sequenceView.broker.broadcastClear(this.sequenceView.nodeSelectionHandler);
+ this.sequenceView.selectionCleared = false;
+ }
+
+ private select(nodeIds: Iterable<string>, instrIds: Iterable<number>,
+ blockIds: Iterable<number>, selected: boolean): void {
+ // Add nodeIds and blockIds to selections.
+ if (nodeIds) this.sequenceView.nodeSelections.current.select(nodeIds, selected);
+ if (instrIds) this.sequenceView.instructionSelections.current.select(instrIds, selected);
+ if (blockIds) this.sequenceView.blockSelections.current.select(blockIds, selected);
+ // Update the HTMLElements.
+ this.sequenceView.updateSelection(true);
+ if (nodeIds) {
+ // Broadcast selections to other SelectionHandlers.
+ this.sequenceView.broker.broadcastNodeSelect(this.sequenceView.nodeSelectionHandler,
+ this.sequenceView.nodeSelections.current.selectedKeys(), selected);
+ }
+ if (instrIds) {
+ this.sequenceView.broker.broadcastInstructionSelect(
+ this.sequenceView.registerAllocationSelectionHandler,
+ Array.from(this.sequenceView.instructionSelections.current.selectedKeysAsAbsNumbers()),
+ selected);
+ }
+ }
+}
+
+class DisplayResetter {
+ view: RangeView;
+ isFlipped: boolean;
+
+ constructor(view: RangeView) {
+ this.view = view;
+ }
+
+ // Allow much of the changes required to be done in the css.
+ public updateClassesOnContainer(): void {
+ this.isFlipped = this.view.userSettings.get("flipped");
+ this.view.divs.container.classList.toggle("not_flipped", !this.isFlipped);
+ this.view.divs.container.classList.toggle("flipped", this.isFlipped);
+ }
+
+ public resetLandscapeMode(isInLandscapeMode: boolean): void {
+ // Used to communicate the setting to Resizer.
+ this.view.divs.container.dataset.landscapeMode =
+ isInLandscapeMode.toString();
+
+ window.dispatchEvent(new Event('resize'));
+ // Required to adjust scrollbar spacing.
+ setTimeout(() => {
+ window.dispatchEvent(new Event('resize'));
+ }, 100);
+ }
+
+ public resetFlipped(): void {
+ this.updateClassesOnContainer();
+ // Appending the HTMLElement removes it from it's current position.
+ this.view.divs.wholeHeader.appendChild(this.isFlipped ? this.view.divs.registerHeaders
+ : this.view.divs.positionHeaders);
+ this.view.divs.yAxis.appendChild(this.isFlipped ? this.view.divs.positionHeaders
+ : this.view.divs.registerHeaders);
+ this.resetLayout();
+ // Set the label text appropriately.
+ this.view.divs.xAxisLabel.innerText = this.isFlipped
+ ? this.view.divs.xAxisLabel.dataset.flipped
+ : this.view.divs.xAxisLabel.dataset.notFlipped;
+ this.view.divs.yAxisLabel.innerText = this.isFlipped
+ ? this.view.divs.yAxisLabel.dataset.flipped
+ : this.view.divs.yAxisLabel.dataset.notFlipped;
+ }
+
+ private resetLayout(): void {
+ this.resetRegisters();
+ this.resetIntervals();
+ }
+
+ private resetRegisters(): void {
+ // Reset register strings.
+ for (const regNode of this.view.divs.registers.childNodes) {
+ const regEl = regNode as HTMLElement;
+ const registerName = regEl.getAttribute("title");
+ this.view.stringConstructor.setRegisterString(registerName,
+ regEl.dataset.virtual == "true", regEl);
+ }
+
+ // registerTypeHeader is only displayed when the axes are switched.
+ this.view.divs.registersTypeHeader.classList.toggle("range-hidden", !this.isFlipped);
+ if (this.isFlipped) {
+ for (const typeHeader of this.view.divs.registersTypeHeader.children) {
+ const element = (typeHeader as HTMLElement);
+ const regCount = parseInt(element.dataset.count, 10);
+ const spaceAvailable = regCount * Math.floor(this.view.cssVariables.positionWidth);
+ // The more space available the longer the header text can be.
+ if (spaceAvailable > element.dataset.max.length) {
+ element.innerText = element.dataset.max;
+ } else if (spaceAvailable > element.dataset.med.length) {
+ element.innerText = element.dataset.med;
+ } else {
+ element.innerText = element.dataset.min;
+ }
+ }
+ }
+ }
+
+ private resetIntervals(): void {
+ this.view.intervalsAccessor.forEachInterval((_, interval) => {
+ const intervalEl = interval as HTMLElement;
+ const spanEl = intervalEl.children[0] as HTMLElement;
+ const spanElBehind = intervalEl.children[1] as HTMLElement;
+ const intervalLength = this.view.getInnerWrapperFromInterval(interval).children.length;
+ this.view.stringConstructor.setIntervalString(spanEl, spanElBehind,
+ intervalEl.dataset.tooltip, intervalLength);
+ const intervalInnerWrapper = intervalEl.children[2] as HTMLElement;
+ intervalInnerWrapper.style.gridTemplateColumns =
+ this.view.rowConstructor.getGridTemplateColumnsValueForInterval(intervalLength);
+ });
+ }
+}
+
enum ToSync {
LEFT,
TOP
@@ -741,7 +1436,7 @@ enum ToSync {
// Handles saving and syncing the scroll positions of the grid.
class ScrollHandler {
- divs: Divs;
+ view: RangeView;
scrollTop: number;
scrollLeft: number;
scrollTopTimeout: NodeJS.Timeout;
@@ -749,19 +1444,24 @@ class ScrollHandler {
scrollTopFunc: (this: GlobalEventHandlers, ev: Event) => any;
scrollLeftFunc: (this: GlobalEventHandlers, ev: Event) => any;
- constructor(divs: Divs) {
- this.divs = divs;
+ constructor(view: RangeView) {
+ this.view = view;
}
// This function is used to hide the rows which are not currently in view and
// so reduce the performance cost of things like hit tests and scrolling.
public syncHidden(): void {
+ const isFlipped = this.view.userSettings.get("flipped");
const toHide = new Array<[HTMLElement, HTMLElement]>();
- const sampleCell = this.divs.registers.children[1] as HTMLElement;
- const buffer = sampleCell.clientHeight * 2;
- const min = this.divs.grid.offsetTop + this.divs.grid.scrollTop - buffer;
- const max = min + this.divs.grid.clientHeight + buffer;
+ const sampleCell = this.view.divs.registers.children[1] as HTMLElement;
+ const buffer = 2 * (isFlipped ? sampleCell.clientWidth : sampleCell.clientHeight);
+ const min = (isFlipped ? this.view.divs.grid.offsetLeft + this.view.divs.grid.scrollLeft
+ : this.view.divs.grid.offsetTop + this.view.divs.grid.scrollTop)
+ - buffer;
+ const max = (isFlipped ? min + this.view.divs.grid.clientWidth
+ : min + this.view.divs.grid.clientHeight)
+ + buffer;
// The rows are grouped by being contained within a group div. This is so as to allow
// groups of rows to easily be displayed and hidden with less of a performance cost.
@@ -769,16 +1469,16 @@ class ScrollHandler {
// the placeholderGroup div that will be shown when mainGroup is hidden so as to maintain
// the dimensions and scroll positions of the grid.
- const rangeGroups = this.divs.grid.children;
+ const rangeGroups = this.view.divs.grid.children;
for (let i = 1; i < rangeGroups.length; i += 2) {
const mainGroup = rangeGroups[i] as HTMLElement;
const placeholderGroup = rangeGroups[i - 1] as HTMLElement;
const isHidden = mainGroup.classList.contains("range-hidden");
// The offsets are used to calculate whether the group is in view.
const offsetMin = this.getOffset(mainGroup.firstChild as HTMLElement,
- placeholderGroup.firstChild as HTMLElement, isHidden);
+ placeholderGroup.firstChild as HTMLElement, isHidden, isFlipped);
const offsetMax = this.getOffset(mainGroup.lastChild as HTMLElement,
- placeholderGroup.lastChild as HTMLElement, isHidden);
+ placeholderGroup.lastChild as HTMLElement, isHidden, isFlipped);
if (offsetMax > min && offsetMin < max) {
if (isHidden) {
@@ -838,19 +1538,21 @@ class ScrollHandler {
}
public saveScroll(): void {
- this.scrollLeft = this.divs.grid.scrollLeft;
- this.scrollTop = this.divs.grid.scrollTop;
+ this.scrollLeft = this.view.divs.grid.scrollLeft;
+ this.scrollTop = this.view.divs.grid.scrollTop;
}
public restoreScroll(): void {
if (this.scrollLeft) {
- this.divs.grid.scrollLeft = this.scrollLeft;
- this.divs.grid.scrollTop = this.scrollTop;
+ this.view.divs.grid.scrollLeft = this.scrollLeft;
+ this.view.divs.grid.scrollTop = this.scrollTop;
}
}
- private getOffset(rowEl: HTMLElement, placeholderRowEl: HTMLElement, isHidden: boolean): number {
- return isHidden ? placeholderRowEl.offsetTop : rowEl.offsetTop;
+ private getOffset(rowEl: HTMLElement, placeholderRowEl: HTMLElement,
+ isHidden: boolean, isFlipped: boolean): number {
+ const element = isHidden ? placeholderRowEl : rowEl;
+ return isFlipped ? element.offsetLeft : element.offsetTop;
}
}
@@ -872,15 +1574,20 @@ export class RangeView {
divs: Divs;
scrollHandler: ScrollHandler;
phaseChangeHandler: PhaseChangeHandler;
+ instructionRangeHandler: InstructionRangeHandler;
+ selectionHandler: RangeViewSelectionHandler;
+ displayResetter: DisplayResetter;
rowConstructor: RowConstructor;
+ stringConstructor: StringConstructor;
initialized: boolean;
isShown: boolean;
- numPositions: number;
+ maxLengthVirtualRegisterNumber: number;
- constructor(sequence: SequenceView) {
+ constructor(sequence: SequenceView, firstInstr: number, lastInstr: number) {
this.sequenceView = sequence;
this.initialized = false;
this.isShown = false;
+ this.instructionRangeHandler = new InstructionRangeHandler(this, firstInstr, lastInstr);
}
public initializeContent(blocks: Array<SequenceBlock>): void {
@@ -889,16 +1596,33 @@ export class RangeView {
this.intervalsAccessor = new IntervalElementsAccessor(this.sequenceView);
this.cssVariables = new CSSVariables();
this.userSettings = new UserSettings();
+ this.displayResetter = new DisplayResetter(this);
// Indicates whether the RangeView is displayed beside or below the SequenceView.
- this.userSettings.addSetting("landscapeMode", false, this.resetLandscapeMode.bind(this));
- this.blocksData = new BlocksData(blocks);
- this.divs = new Divs(this.userSettings);
- this.scrollHandler = new ScrollHandler(this.divs);
- this.numPositions = this.sequenceView.numInstructions * C.POSITIONS_PER_INSTRUCTION;
+ this.userSettings.addSetting("landscapeMode", false,
+ this.displayResetter.resetLandscapeMode.bind(this.displayResetter));
+ // Indicates whether the grid axes are switched.
+ this.userSettings.addSetting("flipped", false,
+ this.displayResetter.resetFlipped.bind(this.displayResetter));
+ this.blocksData = new BlocksData(this, blocks);
+ this.divs = new Divs(this.userSettings,
+ this.instructionRangeHandler.getInstructionRangeString());
+ this.displayResetter.updateClassesOnContainer();
+ this.scrollHandler = new ScrollHandler(this);
this.rowConstructor = new RowConstructor(this);
+ this.stringConstructor = new StringConstructor(this);
+ this.selectionHandler = new RangeViewSelectionHandler(this);
const constructor = new RangeViewConstructor(this);
constructor.construct();
+ this.cssVariables.setVariables(this.instructionRangeHandler.numPositions,
+ this.divs.registers.children.length);
this.phaseChangeHandler = new PhaseChangeHandler(this);
+ let maxVirtualRegisterNumber = 0;
+ for (const register of this.divs.registers.children) {
+ const registerEl = register as HTMLElement;
+ maxVirtualRegisterNumber = Math.max(maxVirtualRegisterNumber,
+ parseInt(registerEl.title.substring(1), 10));
+ }
+ this.maxLengthVirtualRegisterNumber = Math.floor(Math.log10(maxVirtualRegisterNumber)) + 1;
this.initialized = true;
} else {
// If the RangeView has already been initialized then the phase must have
@@ -918,12 +1642,14 @@ export class RangeView {
// panel is shown.
window.dispatchEvent(new Event("resize"));
- setTimeout(() => {
- this.userSettings.resetFromSessionStorage();
- this.scrollHandler.restoreScroll();
- this.scrollHandler.syncHidden();
- this.divs.showOnLoad.style.visibility = "visible";
- }, 100);
+ if (this.divs.registers.children.length) {
+ setTimeout(() => {
+ this.userSettings.resetFromSessionStorage();
+ this.scrollHandler.restoreScroll();
+ this.scrollHandler.syncHidden();
+ this.divs.showOnLoad.style.visibility = "visible";
+ }, 100);
+ }
}
}
@@ -944,17 +1670,10 @@ export class RangeView {
}
public onresize(): void {
- if (this.isShown) this.scrollHandler.syncHidden();
+ if (this.divs.registers.children.length && this.isShown) this.scrollHandler.syncHidden();
}
- public resetLandscapeMode(isInLandscapeMode: boolean): void {
- // Used to communicate the setting to Resizer.
- this.divs.container.dataset.landscapeMode = isInLandscapeMode.toString();
-
- window.dispatchEvent(new Event("resize"));
- // Required to adjust scrollbar spacing.
- setTimeout(() => {
- window.dispatchEvent(new Event("resize"));
- }, 100);
+ public getInnerWrapperFromInterval(interval: HTMLElement) {
+ return interval.children[2] as HTMLElement;
}
}
diff --git a/deps/v8/tools/turbolizer/src/views/schedule-view.ts b/deps/v8/tools/turbolizer/src/views/schedule-view.ts
index a3dfb6f84a..6a830c9e3f 100644
--- a/deps/v8/tools/turbolizer/src/views/schedule-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/schedule-view.ts
@@ -28,7 +28,8 @@ export class ScheduleView extends TextView {
public initializeContent(schedule: SchedulePhase, rememberedSelection: SelectionStorage): void {
this.divNode.innerHTML = "";
this.schedule = schedule;
- this.addBlocks(schedule.data.blocks);
+ this.clearSelectionMaps();
+ this.addBlocks(schedule.data.blocksRpo);
this.show();
if (rememberedSelection) {
const adaptedSelection = this.adaptSelection(rememberedSelection);
@@ -37,8 +38,8 @@ export class ScheduleView extends TextView {
}
public detachSelection(): SelectionStorage {
- return new SelectionStorage(this.nodeSelection.detachSelection(),
- this.blockSelection.detachSelection());
+ return new SelectionStorage(this.nodeSelections.current.detachSelection(),
+ this.blockSelections.current.detachSelection());
}
public adaptSelection(selection: SelectionStorage): SelectionStorage {
@@ -62,7 +63,7 @@ export class ScheduleView extends TextView {
select.push(node.id);
}
}
- this.nodeSelectionHandler.select(select, true);
+ this.nodeSelectionHandler.select(select, true, false);
}
private addBlocks(blocks: Array<ScheduleBlock>) {
@@ -74,10 +75,11 @@ export class ScheduleView extends TextView {
private attachSelection(adaptedSelection: SelectionStorage): void {
if (!(adaptedSelection instanceof SelectionStorage)) return;
- this.nodeSelectionHandler.clear();
this.blockSelectionHandler.clear();
- this.nodeSelectionHandler.select(adaptedSelection.adaptedNodes, true);
- this.blockSelectionHandler.select(adaptedSelection.adaptedBocks, true);
+ this.nodeSelectionHandler.clear();
+ this.blockSelectionHandler.select(
+ Array.from(adaptedSelection.adaptedBocks).map(block => Number(block)), true, true);
+ this.nodeSelectionHandler.select(adaptedSelection.adaptedNodes, true, true);
}
private createElementForBlock(block: ScheduleBlock): HTMLElement {
@@ -85,15 +87,15 @@ export class ScheduleView extends TextView {
scheduleBlock.classList.toggle("deferred", block.deferred);
const [start, end] = this.sourceResolver.instructionsPhase
- .getInstructionRangeForBlock(block.id);
+ .getInstructionRangeForBlock(block.rpo);
const instrMarker = this.createElement("div", "instr-marker com", "&#8857;");
instrMarker.setAttribute("title", `Instructions range for this block is [${start}, ${end})`);
- instrMarker.onclick = this.mkBlockLinkHandler(block.id);
+ instrMarker.onclick = this.mkBlockLinkHandler(block.rpo);
scheduleBlock.appendChild(instrMarker);
- const blockId = this.createElement("div", "block-id com clickable", String(block.id));
- blockId.onclick = this.mkBlockLinkHandler(block.id);
- scheduleBlock.appendChild(blockId);
+ const blocksRpoId = this.createElement("div", "block-id com clickable", String(block.rpo) + " Id:" + String(block.id));
+ blocksRpoId.onclick = this.mkBlockLinkHandler(block.rpo);
+ scheduleBlock.appendChild(blocksRpoId);
const blockPred = this.createElement("div", "predecessor-list block-list comma-sep-list");
for (const pred of block.predecessors) {
const predEl = this.createElement("div", "block-id com clickable", String(pred));
@@ -114,7 +116,7 @@ export class ScheduleView extends TextView {
blockSucc.appendChild(succEl);
}
if (block.successors.length) scheduleBlock.appendChild(blockSucc);
- this.addHtmlElementForBlockId(block.id, scheduleBlock);
+ this.addHtmlElementForBlockId(block.rpo, scheduleBlock);
return scheduleBlock;
}
@@ -156,7 +158,7 @@ export class ScheduleView extends TextView {
if (!e.shiftKey) {
view.blockSelectionHandler.clear();
}
- view.blockSelectionHandler.select([blockId], true);
+ view.blockSelectionHandler.select([blockId], true, false);
};
}
@@ -167,7 +169,7 @@ export class ScheduleView extends TextView {
if (!e.shiftKey) {
view.nodeSelectionHandler.clear();
}
- view.nodeSelectionHandler.select([nodeId], true);
+ view.nodeSelectionHandler.select([nodeId], true, false);
};
}
diff --git a/deps/v8/tools/turbolizer/src/views/sequence-view.ts b/deps/v8/tools/turbolizer/src/views/sequence-view.ts
index 6ea5aad03e..72e63817fd 100644
--- a/deps/v8/tools/turbolizer/src/views/sequence-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/sequence-view.ts
@@ -26,6 +26,8 @@ export class SequenceView extends TextView {
showRangeView: boolean;
phaseSelectEl: HTMLSelectElement;
toggleRangeViewEl: HTMLElement;
+ firstInstrInput: HTMLInputElement;
+ lastInstrInput: HTMLInputElement;
constructor(parent: HTMLElement, broker: SelectionBroker) {
super(parent, broker);
@@ -45,13 +47,15 @@ export class SequenceView extends TextView {
}
public detachSelection(): SelectionStorage {
- return new SelectionStorage(this.nodeSelection.detachSelection(),
- this.blockSelection.detachSelection());
+ return new SelectionStorage(this.nodeSelections.current.detachSelection(),
+ this.blockSelections.current.detachSelection(),
+ this.instructionSelections.current.detachSelection());
}
public adaptSelection(selection: SelectionStorage): SelectionStorage {
for (const key of selection.nodes.keys()) selection.adaptedNodes.add(key);
for (const key of selection.blocks.keys()) selection.adaptedBocks.add(key);
+ for (const key of selection.instructions.keys()) selection.adaptedInstructions.add(Number(key));
return selection;
}
@@ -112,15 +116,19 @@ export class SequenceView extends TextView {
select.push(item);
}
}
- this.nodeSelectionHandler.select(select, true);
+ this.nodeSelectionHandler.select(select, true, false);
}
private attachSelection(adaptedSelection: SelectionStorage): void {
if (!(adaptedSelection instanceof SelectionStorage)) return;
- this.nodeSelectionHandler.clear();
this.blockSelectionHandler.clear();
- this.nodeSelectionHandler.select(adaptedSelection.adaptedNodes, true);
- this.blockSelectionHandler.select(adaptedSelection.adaptedBocks, true);
+ this.nodeSelectionHandler.clear();
+ this.registerAllocationSelectionHandler.clear();
+ this.blockSelectionHandler.select(
+ Array.from(adaptedSelection.adaptedBocks).map(block => Number(block)), true, true);
+ this.nodeSelectionHandler.select(adaptedSelection.adaptedNodes, true, true);
+ this.registerAllocationSelectionHandler.select(Array.from(adaptedSelection.adaptedInstructions),
+ true, true);
}
private addBlocks(blocks: Array<SequenceBlock>): void {
@@ -135,6 +143,9 @@ export class SequenceView extends TextView {
sequenceBlock.classList.toggle("deferred", block.deferred);
const blockIdEl = createElement("div", "block-id com clickable", String(block.id));
+ // Select just the block id when any of the block's instructions or positions
+ // are selected.
+ this.addHtmlElementForBlockId(this.getSubId(block.id), blockIdEl);
blockIdEl.onclick = this.mkBlockLinkHandler(block.id);
sequenceBlock.appendChild(blockIdEl);
@@ -225,7 +236,10 @@ export class SequenceView extends TextView {
const instId = createElement("div", "instruction-id", String(instruction.id));
const offsets = this.sourceResolver.instructionsPhase.instructionToPcOffsets(instruction.id);
instId.classList.add("clickable");
+ // Select instruction id for both when the instruction is selected and when any of its
+ // positions are selected.
this.addHtmlElementForInstructionId(instruction.id, instId);
+ this.addHtmlElementForInstructionId(this.getSubId(instruction.id), instId);
instId.onclick = this.mkInstructionLinkHandler(instruction.id);
instId.dataset.instructionId = String(instruction.id);
if (offsets) {
@@ -307,19 +321,12 @@ export class SequenceView extends TextView {
private addRangeView(): void {
if (this.sequence.registerAllocation) {
if (!this.rangeView) {
- this.rangeView = new RangeView(this);
+ this.rangeView = new RangeView(this, parseInt(this.firstInstrInput.value, 10),
+ parseInt(this.lastInstrInput.value, 10));
}
const source = this.sequence.registerAllocation;
if (source.fixedLiveRanges.length == 0 && source.liveRanges.length == 0) {
this.preventRangeView("No live ranges to show");
- } else if (this.numInstructions >= 249) {
- // This is due to the css grid-column being limited to 1000 columns.
- // Performance issues would otherwise impose some limit.
- // TODO(george.wort@arm.com): Allow the user to specify an instruction range
- // to display that spans less than 249 instructions.
- this.preventRangeView(
- "Live range display is only supported for sequences with less than 249 instructions"
- );
}
if (this.showRangeView) {
this.rangeView.initializeContent(this.sequence.blocks);
@@ -362,8 +369,37 @@ export class SequenceView extends TextView {
};
}
+ private elementForRangeViewInputElement(form: HTMLElement, text: string): HTMLInputElement {
+ const instrInputEl = createElement("input", "instruction-range-input") as HTMLInputElement;
+ instrInputEl.type = "text";
+ instrInputEl.title = text;
+ instrInputEl.placeholder = text;
+ instrInputEl.alt = text;
+ form.appendChild(instrInputEl);
+ return instrInputEl;
+ }
+
private elementForToggleRangeView(): HTMLElement {
- const toggleRangeViewEl = createElement("label", "", "show live ranges");
+ const toggleRangeViewEl = createElement("label", "", "show live ranges from ");
+
+ const form = createElement("form", "range-toggle-form");
+ toggleRangeViewEl.appendChild(form);
+
+ this.firstInstrInput = this.elementForRangeViewInputElement(form, "first instruction");
+ form.appendChild(createElement("span", "", " to "));
+ this.lastInstrInput = this.elementForRangeViewInputElement(form, "last instruction");
+
+ const submit = createElement("input", "instruction-range-submit") as HTMLInputElement;
+ submit.type = "submit";
+ submit.value = "Refresh Ranges";
+ submit.onclick = (e: MouseEvent) => {
+ e.preventDefault();
+ // Single click if not shown, double click to refresh if shown.
+ this.toggleRangeViewEl.click();
+ if (!this.showRangeView) this.toggleRangeViewEl.click();
+ };
+ form.appendChild(submit);
+
const toggleRangesInput = createElement("input", "range-toggle-show") as HTMLInputElement;
toggleRangesInput.setAttribute("type", "checkbox");
toggleRangesInput.oninput = () => this.toggleRangeView(toggleRangesInput);
@@ -375,6 +411,15 @@ export class SequenceView extends TextView {
toggleRangesInput.disabled = true;
this.showRangeView = toggleRangesInput.checked;
if (this.showRangeView) {
+ const firstInstr = parseInt(this.firstInstrInput.value, 10);
+ const lastInstr = parseInt(this.lastInstrInput.value, 10);
+ if (this.rangeView.instructionRangeHandler.isNewRangeViewRequired(firstInstr, lastInstr)) {
+ // Remove current RangeView's selection nodes and blocks from SelectionHandlers.
+ this.removeHtmlElementFromAllMapsIf((e: HTMLElement) =>
+ e.closest("#ranges-content") != null);
+ this.rangeView = new RangeView(this, firstInstr, lastInstr);
+ this.addRangeView();
+ }
this.rangeView.initializeContent(this.sequence.blocks);
this.rangeView.show();
} else {
diff --git a/deps/v8/tools/turbolizer/src/views/text-view.ts b/deps/v8/tools/turbolizer/src/views/text-view.ts
index 428abd85cd..c2309100ff 100644
--- a/deps/v8/tools/turbolizer/src/views/text-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/text-view.ts
@@ -19,15 +19,61 @@ import {
} from "../selection/selection-handler";
type GenericTextPhase = DisassemblyPhase | SchedulePhase | SequencePhase;
+
+class SelectionMapsHandler {
+ view: TextView;
+ idToHtmlElementsMap: Map<string, Array<HTMLElement>>;
+ current: SelectionMap;
+ previous: SelectionMap;
+
+ constructor(view: TextView, idToHtmlElementsMap: Map<string, Array<HTMLElement>>) {
+ this.view = view;
+ this.idToHtmlElementsMap = idToHtmlElementsMap;
+ this.current = new SelectionMap(id => String(id));
+ this.previous = new SelectionMap(id => String(id));
+ }
+
+ public clearCurrent(): void {
+ this.previous.selection = this.current.selection;
+ this.current.clear();
+ }
+
+ public clearPrevious(): void {
+ for (const blockId of this.previous.selectedKeys()) {
+ const elements = this.idToHtmlElementsMap.get(blockId);
+ if (!elements) continue;
+ for (const element of elements) {
+ element.classList.toggle("selected", false);
+ }
+ }
+ this.previous.clear();
+ }
+
+ public selectElements(scrollIntoView: boolean, scrollDiv: HTMLElement): void {
+ const mkVisible = new ViewElements(scrollDiv);
+ for (const id of this.current.selectedKeys()) {
+ const elements = this.idToHtmlElementsMap.get(id);
+ if (!elements) continue;
+ for (const element of elements) {
+ if (element.className.substring(0, 5) != "range" && element.getRootNode() == document) {
+ mkVisible.consider(element, true);
+ }
+ element.classList.toggle("selected", true);
+ }
+ }
+ mkVisible.apply(scrollIntoView);
+ }
+}
export abstract class TextView extends PhaseView {
broker: SelectionBroker;
sourceResolver: SourceResolver;
nodeSelectionHandler: NodeSelectionHandler & ClearableHandler;
blockSelectionHandler: BlockSelectionHandler & ClearableHandler;
registerAllocationSelectionHandler: RegisterAllocationSelectionHandler & ClearableHandler;
- nodeSelection: SelectionMap;
- blockSelection: SelectionMap;
- registerAllocationSelection: SelectionMap;
+ selectionCleared: boolean;
+ nodeSelections: SelectionMapsHandler;
+ instructionSelections: SelectionMapsHandler;
+ blockSelections: SelectionMapsHandler;
textListNode: HTMLUListElement;
instructionIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
nodeIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
@@ -47,9 +93,9 @@ export abstract class TextView extends PhaseView {
this.blockIdToNodeIds = new Map<string, Array<string>>();
this.nodeIdToBlockId = new Array<string>();
- this.nodeSelection = new SelectionMap(node => String(node));
- this.blockSelection = new SelectionMap(block => String(block));
- this.registerAllocationSelection = new SelectionMap(register => String(register));
+ this.nodeSelections = new SelectionMapsHandler(this, this.nodeIdToHtmlElementsMap);
+ this.instructionSelections = new SelectionMapsHandler(this, this.instructionIdToHtmlElementsMap);
+ this.blockSelections = new SelectionMapsHandler(this, this.blockIdToHtmlElementsMap);
this.nodeSelectionHandler = this.initializeNodeSelectionHandler();
this.blockSelectionHandler = this.initializeBlockSelectionHandler();
@@ -59,6 +105,8 @@ export abstract class TextView extends PhaseView {
broker.addBlockHandler(this.blockSelectionHandler);
broker.addRegisterAllocatorHandler(this.registerAllocationSelectionHandler);
+ this.selectionCleared = false;
+
this.divNode.addEventListener("click", e => {
if (!e.shiftKey) {
this.nodeSelectionHandler.clear();
@@ -75,51 +123,30 @@ export abstract class TextView extends PhaseView {
this.show();
}
- public updateSelection(scrollIntoView: boolean = false): void {
- if (this.divNode.parentNode == null) return;
- const mkVisible = new ViewElements(this.divNode.parentNode as HTMLElement);
- const elementsToSelect = this.divNode.querySelectorAll(`[data-pc-offset]`);
-
- for (const el of elementsToSelect) {
- el.classList.toggle("selected", false);
- }
- for (const [blockId, elements] of this.blockIdToHtmlElementsMap.entries()) {
- const isSelected = this.blockSelection.isSelected(blockId);
- for (const element of elements) {
- mkVisible.consider(element, isSelected);
- element.classList.toggle("selected", isSelected);
- }
- }
+ public clearSelectionMaps() {
+ this.instructionIdToHtmlElementsMap.clear();
+ this.nodeIdToHtmlElementsMap.clear();
+ this.blockIdToHtmlElementsMap.clear();
+ }
- for (const key of this.instructionIdToHtmlElementsMap.keys()) {
- for (const element of this.instructionIdToHtmlElementsMap.get(key)) {
- element.classList.toggle("selected", false);
- }
- }
- for (const instrId of this.registerAllocationSelection.selectedKeys()) {
- const elements = this.instructionIdToHtmlElementsMap.get(instrId);
- if (!elements) continue;
- for (const element of elements) {
- mkVisible.consider(element, true);
- element.classList.toggle("selected", true);
- }
- }
+ public updateSelection(scrollIntoView: boolean = false,
+ scrollDiv: HTMLElement = this.divNode as HTMLElement): void {
+ if (this.divNode.parentNode == null) return;
- for (const key of this.nodeIdToHtmlElementsMap.keys()) {
- for (const element of this.nodeIdToHtmlElementsMap.get(key)) {
- element.classList.toggle("selected", false);
- }
- }
- for (const nodeId of this.nodeSelection.selectedKeys()) {
- const elements = this.nodeIdToHtmlElementsMap.get(nodeId);
- if (!elements) continue;
- for (const element of elements) {
- mkVisible.consider(element, true);
- element.classList.toggle("selected", true);
+ const clearDisassembly = () => {
+ const elementsToSelect = this.divNode.querySelectorAll(`[data-pc-offset]`);
+ for (const el of elementsToSelect) {
+ el.classList.toggle("selected", false);
}
- }
+ };
- mkVisible.apply(scrollIntoView);
+ clearDisassembly();
+ this.blockSelections.clearPrevious();
+ this.instructionSelections.clearPrevious();
+ this.nodeSelections.clearPrevious();
+ this.blockSelections.selectElements(scrollIntoView, scrollDiv);
+ this.instructionSelections.selectElements(scrollIntoView, scrollDiv);
+ this.nodeSelections.selectElements(scrollIntoView, scrollDiv);
}
public processLine(line: string): Array<HTMLSpanElement> {
@@ -161,8 +188,39 @@ export abstract class TextView extends PhaseView {
public onresize(): void {}
+ private removeHtmlElementFromMapIf(condition: (e: HTMLElement) => boolean,
+ map: Map<string, Array<HTMLElement>>): void {
+ for (const [nodeId, elements] of map) {
+ let i = elements.length;
+ while (i--) {
+ if (condition(elements[i])) {
+ elements.splice(i, 1);
+ }
+ }
+ if (elements.length == 0) {
+ map.delete(nodeId);
+ }
+ }
+ }
+
+ public removeHtmlElementFromAllMapsIf(condition: (e: HTMLElement) => boolean): void {
+ this.clearSelection();
+ this.removeHtmlElementFromMapIf(condition, this.nodeIdToHtmlElementsMap);
+ this.removeHtmlElementFromMapIf(condition, this.blockIdToHtmlElementsMap);
+ this.removeHtmlElementFromMapIf(condition, this.instructionIdToHtmlElementsMap);
+ }
+
+ public clearSelection(): void {
+ if (this.selectionCleared) return;
+ this.blockSelections.clearCurrent();
+ this.instructionSelections.clearCurrent();
+ this.nodeSelections.clearCurrent();
+ this.updateSelection();
+ this.selectionCleared = true;
+ }
+
// instruction-id are the divs for the register allocator phase
- protected addHtmlElementForInstructionId(anyInstructionId: any, htmlElement: HTMLElement): void {
+ public addHtmlElementForInstructionId(anyInstructionId: any, htmlElement: HTMLElement): void {
const instructionId = String(anyInstructionId);
if (!this.instructionIdToHtmlElementsMap.has(instructionId)) {
this.instructionIdToHtmlElementsMap.set(instructionId, new Array<HTMLElement>());
@@ -170,7 +228,7 @@ export abstract class TextView extends PhaseView {
this.instructionIdToHtmlElementsMap.get(instructionId).push(htmlElement);
}
- protected addHtmlElementForNodeId(anyNodeId: any, htmlElement: HTMLElement): void {
+ public addHtmlElementForNodeId(anyNodeId: any, htmlElement: HTMLElement): void {
const nodeId = String(anyNodeId);
if (!this.nodeIdToHtmlElementsMap.has(nodeId)) {
this.nodeIdToHtmlElementsMap.set(nodeId, new Array<HTMLElement>());
@@ -178,7 +236,7 @@ export abstract class TextView extends PhaseView {
this.nodeIdToHtmlElementsMap.get(nodeId).push(htmlElement);
}
- protected addHtmlElementForBlockId(anyBlockId: any, htmlElement: HTMLElement): void {
+ public addHtmlElementForBlockId(anyBlockId: any, htmlElement: HTMLElement): void {
const blockId = String(anyBlockId);
if (!this.blockIdToHtmlElementsMap.has(blockId)) {
this.blockIdToHtmlElementsMap.set(blockId, new Array<HTMLElement>());
@@ -186,6 +244,10 @@ export abstract class TextView extends PhaseView {
this.blockIdToHtmlElementsMap.get(blockId).push(htmlElement);
}
+ public getSubId(id: number): number {
+ return -id - 1;
+ }
+
protected createFragment(text: string, style): HTMLSpanElement {
const fragment = document.createElement("span");
@@ -204,53 +266,49 @@ export abstract class TextView extends PhaseView {
return fragment;
}
+ private selectionHandlerSelect(selection: SelectionMap, ids: Array<any>,
+ selected: boolean, scrollIntoView: boolean = false): void {
+ this.selectionCleared = false;
+ const firstSelect = scrollIntoView ? this.blockSelections.current.isEmpty() : false;
+ selection.select(ids, selected);
+ this.updateSelection(firstSelect);
+ }
+
+ private selectionHandlerClear(): void {
+ this.clearSelection();
+ this.broker.broadcastClear(this);
+ }
+
private initializeNodeSelectionHandler(): NodeSelectionHandler & ClearableHandler {
const view = this;
return {
- select: function (nodeIds: Array<string | number>, selected: boolean) {
- view.nodeSelection.select(nodeIds, selected);
- view.updateSelection();
- view.broker.broadcastNodeSelect(this, view.nodeSelection.selectedKeys(), selected);
- },
- clear: function () {
- view.nodeSelection.clear();
- view.updateSelection();
- view.broker.broadcastClear(this);
+ select: function (nodeIds: Array<string | number>, selected: boolean,
+ scrollIntoView: boolean) {
+ view.selectionHandlerSelect(view.nodeSelections.current, nodeIds, selected, scrollIntoView);
+ view.broker.broadcastNodeSelect(this, view.nodeSelections.current.selectedKeys(), selected);
},
brokeredNodeSelect: function (nodeIds: Set<string>, selected: boolean) {
- const firstSelect = view.blockSelection.isEmpty();
- view.nodeSelection.select(nodeIds, selected);
- view.updateSelection(firstSelect);
+ view.selectionHandlerSelect(view.nodeSelections.current,
+ Array.from(nodeIds), selected, false);
},
- brokeredClear: function () {
- view.nodeSelection.clear();
- view.updateSelection();
- }
+ clear: view.selectionHandlerClear.bind(this),
+ brokeredClear: view.clearSelection.bind(this),
};
}
private initializeBlockSelectionHandler(): BlockSelectionHandler & ClearableHandler {
const view = this;
return {
- select: function (blockIds: Array<string>, selected: boolean) {
- view.blockSelection.select(blockIds, selected);
- view.updateSelection();
- view.broker.broadcastBlockSelect(this, blockIds, selected);
+ select: function (blockIds: Array<number>, selected: boolean) {
+ view.selectionHandlerSelect(view.blockSelections.current, blockIds, selected);
+ view.broker.broadcastBlockSelect(this,
+ Array.from(view.blockSelections.current.selectedKeysAsAbsNumbers()), selected);
},
- clear: function () {
- view.blockSelection.clear();
- view.updateSelection();
- view.broker.broadcastClear(this);
+ brokeredBlockSelect: function (blockIds: Array<number>, selected: boolean) {
+ view.selectionHandlerSelect(view.blockSelections.current, blockIds, selected, true);
},
- brokeredBlockSelect: function (blockIds: Array<string>, selected: boolean) {
- const firstSelect = view.blockSelection.isEmpty();
- view.blockSelection.select(blockIds, selected);
- view.updateSelection(firstSelect);
- },
- brokeredClear: function () {
- view.blockSelection.clear();
- view.updateSelection();
- }
+ clear: view.selectionHandlerClear.bind(this),
+ brokeredClear: view.clearSelection.bind(this),
};
}
@@ -258,28 +316,23 @@ export abstract class TextView extends PhaseView {
& ClearableHandler {
const view = this;
return {
- select: function (instructionIds: Array<number>, selected: boolean) {
- view.registerAllocationSelection.select(instructionIds, selected);
- view.updateSelection();
- view.broker.broadcastInstructionSelect(null, instructionIds, selected);
- },
- clear: function () {
- view.registerAllocationSelection.clear();
- view.updateSelection();
- view.broker.broadcastClear(this);
+ select: function (instructionIds: Array<number>, selected: boolean, scrollIntoView: boolean) {
+ view.selectionHandlerSelect(view.instructionSelections.current, instructionIds, selected,
+ scrollIntoView);
+ view.broker.broadcastInstructionSelect(this,
+ Array.from(view.instructionSelections.current.selectedKeysAsAbsNumbers()), selected);
},
brokeredRegisterAllocationSelect: function (instructionsOffsets: Array<[number, number]>,
selected: boolean) {
- const firstSelect = view.blockSelection.isEmpty();
+ view.selectionCleared = false;
+ const firstSelect = view.blockSelections.current.isEmpty();
for (const instructionOffset of instructionsOffsets) {
- view.registerAllocationSelection.select(instructionOffset, selected);
+ view.instructionSelections.current.select(instructionOffset, selected);
}
view.updateSelection(firstSelect);
},
- brokeredClear: function () {
- view.registerAllocationSelection.clear();
- view.updateSelection();
- }
+ clear: view.selectionHandlerClear.bind(this),
+ brokeredClear: view.clearSelection.bind(this),
};
}
diff --git a/deps/v8/tools/turbolizer/src/views/turboshaft-graph-view.ts b/deps/v8/tools/turbolizer/src/views/turboshaft-graph-view.ts
index b1d62a5528..3de6c3eb34 100644
--- a/deps/v8/tools/turbolizer/src/views/turboshaft-graph-view.ts
+++ b/deps/v8/tools/turbolizer/src/views/turboshaft-graph-view.ts
@@ -207,7 +207,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
const selection = this.searchNodes(filterFunction, e, onlyVisible);
- this.nodeSelectionHandler.select(selection, true);
+ this.nodeSelectionHandler.select(selection, true, false);
this.updateGraphVisibility();
searchInput.blur();
this.viewSelection();
@@ -262,7 +262,8 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
private initializeNodeSelectionHandler(): NodeSelectionHandler & ClearableHandler {
const view = this;
return {
- select: function (selectedNodes: Array<TurboshaftGraphNode>, selected: boolean) {
+ select: function (selectedNodes: Array<TurboshaftGraphNode>, selected: boolean,
+ scrollIntoView: boolean) {
const sourcePositions = new Array<SourcePosition>();
const nodes = new Set<string>();
for (const node of selectedNodes) {
@@ -298,9 +299,9 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
return {
select: function (selectedBlocks: Array<TurboshaftGraphBlock>, selected: boolean) {
view.state.blocksSelection.select(selectedBlocks, selected);
- const selectedBlocksKeys = new Array<string>();
+ const selectedBlocksKeys = new Array<number>();
for (const selectedBlock of selectedBlocks) {
- selectedBlocksKeys.push(view.state.blocksSelection.stringKey(selectedBlock));
+ selectedBlocksKeys.push(Number(view.state.blocksSelection.stringKey(selectedBlock)));
}
view.broker.broadcastBlockSelect(this, selectedBlocksKeys, selected);
view.updateGraphVisibility();
@@ -310,7 +311,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
view.broker.broadcastClear(this);
view.updateGraphVisibility();
},
- brokeredBlockSelect: function (blockIds: Array<string>, selected: boolean) {
+ brokeredBlockSelect: function (blockIds: Array<number>, selected: boolean) {
view.state.blocksSelection.select(blockIds, selected);
view.updateGraphVisibility();
},
@@ -429,7 +430,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
if (!d3.event.shiftKey) {
view.blockSelectionHandler.clear();
}
- view.blockSelectionHandler.select([edge.source, edge.target], true);
+ view.blockSelectionHandler.select([edge.source, edge.target], true, false);
})
.attr("adjacentToHover", "false");
@@ -473,7 +474,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
})
.on("click", (block: TurboshaftGraphBlock) => {
if (!d3.event.shiftKey) view.blockSelectionHandler.clear();
- view.blockSelectionHandler.select([block], undefined);
+ view.blockSelectionHandler.select([block], undefined, false);
d3.event.stopPropagation();
})
.call(view.blockDrag);
@@ -515,7 +516,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
.on("click", () => {
d3.event.stopPropagation();
block.collapsed = !block.collapsed;
- view.nodeSelectionHandler.select(block.nodes, false);
+ view.nodeSelectionHandler.select(block.nodes, false, false);
});
view.appendInputAndOutputBubbles(svg, block);
@@ -596,7 +597,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
})
.on("click", (node: TurboshaftGraphNode) => {
if (!d3.event.shiftKey) view.nodeSelectionHandler.clear();
- view.nodeSelectionHandler.select([node], undefined);
+ view.nodeSelectionHandler.select([node], undefined, false);
d3.event.stopPropagation();
});
nodeY += node.labelBox.height;
@@ -737,12 +738,12 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
...this.graph.nodes(node =>
selection.adaptedNodes.has(this.state.selection.stringKey(node)))
];
- this.nodeSelectionHandler.select(selectedNodes, true);
+ this.nodeSelectionHandler.select(selectedNodes, true, false);
const selectedBlocks = [
...this.graph.blocks(block =>
selection.adaptedBocks.has(this.state.blocksSelection.stringKey(block)))
];
- this.blockSelectionHandler.select(selectedBlocks, true);
+ this.blockSelectionHandler.select(selectedBlocks, true, false);
return selectedNodes.length + selectedBlocks.length;
}
@@ -849,7 +850,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
// Hotkeys handlers
private selectAllNodes(): void {
- this.nodeSelectionHandler.select(this.graph.nodeMap, true);
+ this.nodeSelectionHandler.select(this.graph.nodeMap, true, false);
this.updateGraphVisibility();
}
@@ -891,7 +892,7 @@ export class TurboshaftGraphView extends MovableView<TurboshaftGraph> {
block.collapsed = false;
selectedNodes = selectedNodes.concat(block.nodes);
}
- this.nodeSelectionHandler.select(selectedNodes, true);
+ this.nodeSelectionHandler.select(selectedNodes, true, false);
this.updateGraphVisibility();
}
}
diff --git a/deps/v8/tools/turbolizer/tslint.json b/deps/v8/tools/turbolizer/tslint.json
index e07e057a62..96b95f2ee3 100644
--- a/deps/v8/tools/turbolizer/tslint.json
+++ b/deps/v8/tools/turbolizer/tslint.json
@@ -1,6 +1,5 @@
{
"defaultSeverity": "error",
- "extends": "tslint:recommended",
"jsRules": {},
"rules": {
"curly": [true, "ignore-same-line"],
diff --git a/deps/v8/tools/ubsan/ignorelist.txt b/deps/v8/tools/ubsan/ignorelist.txt
index 4e476abe49..f61504b355 100644
--- a/deps/v8/tools/ubsan/ignorelist.txt
+++ b/deps/v8/tools/ubsan/ignorelist.txt
@@ -15,3 +15,6 @@ fun:*v8*internal*UnsafeDirectApiCall*
fun:*v8*internal*UnsafeDirectGetterCall*
fun:*v8*internal*UnsafeGenericFunctionCall*
fun:*v8*internal*UnsafeProfilingApiCall*
+
+# Bug chromium:1394654: New -fsanitize=alignment fails for PointerTOC in ICU.
+src:*/third_party/icu/*
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 5bfb5f775a..876101dad6 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -73,6 +73,7 @@ LINT_RULES = """
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
+FLAGS_ENABLE_MAGLEV = re.compile("//\s*Flags:.*--maglev[^-].*\n")
FLAGS_ENABLE_TURBOFAN = re.compile("//\s*Flags:.*--turbofan[^-].*\n")
ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-turbofan.*\n")
@@ -596,8 +597,9 @@ class SourceProcessor(SourceFileProcessor):
if (not "mjsunit/mjsunit.js" in name and
not "mjsunit/mjsunit_numfuzz.js" in name):
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
+ not FLAGS_ENABLE_MAGLEV.search(contents) and \
not FLAGS_ENABLE_TURBOFAN.search(contents):
- print("%s Flag --turbofan should be set if " \
+ print("%s Flag --maglev or --turbofan should be set if " \
"assertOptimized() is used" % name)
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
@@ -719,6 +721,45 @@ class StatusFilesProcessor(SourceFileProcessor):
return status_files
+class GCMoleProcessor(SourceFileProcessor):
+ """Check relevant BUILD.gn files for correct gcmole file pattern.
+
+ The pattern must match the algorithm used in:
+ tools/gcmole/gcmole.py::build_file_list()
+ """
+ gcmole_re = re.compile('### gcmole(.*)')
+ arch_re = re.compile('\((.+)\) ###')
+
+ def IsRelevant(self, name):
+ return True
+
+ def GetPathsToSearch(self):
+ # TODO(https://crbug.com/v8/12660): These should be directories according
+ # to the API, but in order to find the toplevel BUILD.gn, we'd need to walk
+ # the entire project.
+ return ['BUILD.gn', 'test/cctest/BUILD.gn']
+
+ def ProcessFiles(self, files):
+ success = True
+ for path in files:
+ with open(path) as f:
+ gn_file_text = f.read()
+ for suffix in self.gcmole_re.findall(gn_file_text):
+ arch_match = self.arch_re.match(suffix)
+ if not arch_match:
+ print(f'{path}: Malformed gcmole suffix: {suffix}')
+ success = False
+ continue
+ arch = arch_match.group(1)
+ if arch not in [
+ 'all', 'ia32', 'x64', 'arm', 'arm64', 's390', 'ppc', 'ppc64',
+ 'mips64', 'mips64el', 'riscv32', 'riscv64', 'loong64'
+ ]:
+ print(f'{path}: Unknown architecture for gcmole: {arch}')
+ success = False
+ return success
+
+
def CheckDeps(workspace):
checkdeps_py = join(workspace, 'buildtools', 'checkdeps', 'checkdeps.py')
return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
@@ -751,8 +792,7 @@ def PyTests(workspace):
result = True
for script in FindTests(workspace):
print('Running ' + script)
- result &= subprocess.call(
- [sys.executable, script], stdout=subprocess.PIPE) == 0
+ result &= subprocess.call(['vpython3', script], stdout=subprocess.PIPE) == 0
return result
@@ -792,6 +832,8 @@ def Main():
success &= StatusFilesProcessor().RunOnPath(workspace)
print("Running python tests...")
success &= PyTests(workspace)
+ print("Running gcmole pattern check...")
+ success &= GCMoleProcessor().RunOnPath(workspace)
if success:
return 0
else:
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index e420fd4d7b..9cd5dcae86 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -25,15 +25,12 @@ INSTANCE_TYPES = {
41: "CONS_ONE_BYTE_STRING_TYPE",
42: "EXTERNAL_ONE_BYTE_STRING_TYPE",
43: "SLICED_ONE_BYTE_STRING_TYPE",
- 45: "THIN_ONE_BYTE_STRING_TYPE",
50: "UNCACHED_EXTERNAL_STRING_TYPE",
58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
96: "SHARED_STRING_TYPE",
98: "SHARED_EXTERNAL_STRING_TYPE",
- 101: "SHARED_THIN_STRING_TYPE",
104: "SHARED_ONE_BYTE_STRING_TYPE",
106: "SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE",
- 109: "SHARED_THIN_ONE_BYTE_STRING_TYPE",
114: "SHARED_UNCACHED_EXTERNAL_STRING_TYPE",
122: "SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
128: "SYMBOL_TYPE",
@@ -103,15 +100,15 @@ INSTANCE_TYPES = {
192: "FIXED_DOUBLE_ARRAY_TYPE",
193: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
194: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 195: "TURBOFAN_BITSET_TYPE_TYPE",
- 196: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
- 197: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
- 198: "TURBOFAN_RANGE_TYPE_TYPE",
- 199: "TURBOFAN_UNION_TYPE_TYPE",
- 200: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 201: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
- 202: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 203: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
+ 195: "TURBOSHAFT_FLOAT64_TYPE_TYPE",
+ 196: "TURBOSHAFT_FLOAT64_RANGE_TYPE_TYPE",
+ 197: "TURBOSHAFT_FLOAT64_SET_TYPE_TYPE",
+ 198: "TURBOSHAFT_WORD32_TYPE_TYPE",
+ 199: "TURBOSHAFT_WORD32_RANGE_TYPE_TYPE",
+ 200: "TURBOSHAFT_WORD32_SET_TYPE_TYPE",
+ 201: "TURBOSHAFT_WORD64_TYPE_TYPE",
+ 202: "TURBOSHAFT_WORD64_RANGE_TYPE_TYPE",
+ 203: "TURBOSHAFT_WORD64_SET_TYPE_TYPE",
204: "FOREIGN_TYPE",
205: "AWAIT_CONTEXT_TYPE",
206: "BLOCK_CONTEXT_TYPE",
@@ -123,466 +120,495 @@ INSTANCE_TYPES = {
212: "NATIVE_CONTEXT_TYPE",
213: "SCRIPT_CONTEXT_TYPE",
214: "WITH_CONTEXT_TYPE",
- 215: "WASM_FUNCTION_DATA_TYPE",
- 216: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 217: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 218: "WASM_JS_FUNCTION_DATA_TYPE",
- 219: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 220: "EXPORTED_SUB_CLASS_TYPE",
- 221: "EXPORTED_SUB_CLASS2_TYPE",
- 222: "SMALL_ORDERED_HASH_MAP_TYPE",
- 223: "SMALL_ORDERED_HASH_SET_TYPE",
- 224: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 225: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
- 226: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
- 227: "DESCRIPTOR_ARRAY_TYPE",
- 228: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 229: "SOURCE_TEXT_MODULE_TYPE",
- 230: "SYNTHETIC_MODULE_TYPE",
- 231: "WEAK_FIXED_ARRAY_TYPE",
- 232: "TRANSITION_ARRAY_TYPE",
- 233: "ACCESSOR_INFO_TYPE",
- 234: "CALL_HANDLER_INFO_TYPE",
- 235: "CELL_TYPE",
- 236: "CODE_TYPE",
- 237: "CODE_DATA_CONTAINER_TYPE",
- 238: "COVERAGE_INFO_TYPE",
- 239: "EMBEDDER_DATA_ARRAY_TYPE",
- 240: "FEEDBACK_METADATA_TYPE",
- 241: "FEEDBACK_VECTOR_TYPE",
- 242: "FILLER_TYPE",
- 243: "FREE_SPACE_TYPE",
- 244: "INTERNAL_CLASS_TYPE",
- 245: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 246: "MAP_TYPE",
- 247: "MEGA_DOM_HANDLER_TYPE",
- 248: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 249: "PREPARSE_DATA_TYPE",
- 250: "PROPERTY_ARRAY_TYPE",
- 251: "PROPERTY_CELL_TYPE",
- 252: "SCOPE_INFO_TYPE",
- 253: "SHARED_FUNCTION_INFO_TYPE",
- 254: "SMI_BOX_TYPE",
- 255: "SMI_PAIR_TYPE",
- 256: "SORT_STATE_TYPE",
- 257: "SWISS_NAME_DICTIONARY_TYPE",
- 258: "WASM_API_FUNCTION_REF_TYPE",
- 259: "WASM_CONTINUATION_OBJECT_TYPE",
- 260: "WASM_INTERNAL_FUNCTION_TYPE",
- 261: "WASM_RESUME_DATA_TYPE",
- 262: "WASM_STRING_VIEW_ITER_TYPE",
- 263: "WASM_TYPE_INFO_TYPE",
- 264: "WEAK_ARRAY_LIST_TYPE",
- 265: "WEAK_CELL_TYPE",
- 266: "WASM_ARRAY_TYPE",
- 267: "WASM_STRUCT_TYPE",
- 268: "JS_PROXY_TYPE",
+ 215: "TURBOFAN_BITSET_TYPE_TYPE",
+ 216: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
+ 217: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
+ 218: "TURBOFAN_RANGE_TYPE_TYPE",
+ 219: "TURBOFAN_UNION_TYPE_TYPE",
+ 220: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 221: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
+ 222: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 223: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
+ 224: "WASM_FUNCTION_DATA_TYPE",
+ 225: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 226: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 227: "WASM_JS_FUNCTION_DATA_TYPE",
+ 228: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 229: "EXPORTED_SUB_CLASS_TYPE",
+ 230: "EXPORTED_SUB_CLASS2_TYPE",
+ 231: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 232: "SMALL_ORDERED_HASH_SET_TYPE",
+ 233: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 234: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
+ 235: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
+ 236: "DESCRIPTOR_ARRAY_TYPE",
+ 237: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 238: "SOURCE_TEXT_MODULE_TYPE",
+ 239: "SYNTHETIC_MODULE_TYPE",
+ 240: "WEAK_FIXED_ARRAY_TYPE",
+ 241: "TRANSITION_ARRAY_TYPE",
+ 242: "ACCESSOR_INFO_TYPE",
+ 243: "CALL_HANDLER_INFO_TYPE",
+ 244: "CELL_TYPE",
+ 245: "CODE_TYPE",
+ 246: "COVERAGE_INFO_TYPE",
+ 247: "EMBEDDER_DATA_ARRAY_TYPE",
+ 248: "FEEDBACK_METADATA_TYPE",
+ 249: "FEEDBACK_VECTOR_TYPE",
+ 250: "FILLER_TYPE",
+ 251: "FREE_SPACE_TYPE",
+ 252: "INSTRUCTION_STREAM_TYPE",
+ 253: "INTERNAL_CLASS_TYPE",
+ 254: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 255: "MAP_TYPE",
+ 256: "MEGA_DOM_HANDLER_TYPE",
+ 257: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 258: "PREPARSE_DATA_TYPE",
+ 259: "PROPERTY_ARRAY_TYPE",
+ 260: "PROPERTY_CELL_TYPE",
+ 261: "SCOPE_INFO_TYPE",
+ 262: "SHARED_FUNCTION_INFO_TYPE",
+ 263: "SMI_BOX_TYPE",
+ 264: "SMI_PAIR_TYPE",
+ 265: "SORT_STATE_TYPE",
+ 266: "SWISS_NAME_DICTIONARY_TYPE",
+ 267: "WASM_API_FUNCTION_REF_TYPE",
+ 268: "WASM_CONTINUATION_OBJECT_TYPE",
+ 269: "WASM_INTERNAL_FUNCTION_TYPE",
+ 270: "WASM_NULL_TYPE",
+ 271: "WASM_RESUME_DATA_TYPE",
+ 272: "WASM_STRING_VIEW_ITER_TYPE",
+ 273: "WASM_TYPE_INFO_TYPE",
+ 274: "WEAK_ARRAY_LIST_TYPE",
+ 275: "WEAK_CELL_TYPE",
+ 276: "WASM_ARRAY_TYPE",
+ 277: "WASM_STRUCT_TYPE",
+ 278: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 269: "JS_GLOBAL_OBJECT_TYPE",
- 270: "JS_GLOBAL_PROXY_TYPE",
- 271: "JS_MODULE_NAMESPACE_TYPE",
+ 279: "JS_GLOBAL_OBJECT_TYPE",
+ 280: "JS_GLOBAL_PROXY_TYPE",
+ 281: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
2058: "JS_LAST_DUMMY_API_OBJECT_TYPE",
2059: "JS_DATA_VIEW_TYPE",
- 2060: "JS_TYPED_ARRAY_TYPE",
- 2061: "JS_ARRAY_BUFFER_TYPE",
- 2062: "JS_PROMISE_TYPE",
- 2063: "JS_BOUND_FUNCTION_TYPE",
- 2064: "JS_WRAPPED_FUNCTION_TYPE",
- 2065: "JS_FUNCTION_TYPE",
- 2066: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2067: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2068: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2069: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2070: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2071: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2072: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2073: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2074: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2075: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2076: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
- 2077: "JS_ARRAY_CONSTRUCTOR_TYPE",
- 2078: "JS_PROMISE_CONSTRUCTOR_TYPE",
- 2079: "JS_REG_EXP_CONSTRUCTOR_TYPE",
- 2080: "JS_CLASS_CONSTRUCTOR_TYPE",
- 2081: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
- 2082: "JS_ITERATOR_PROTOTYPE_TYPE",
- 2083: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
- 2084: "JS_OBJECT_PROTOTYPE_TYPE",
- 2085: "JS_PROMISE_PROTOTYPE_TYPE",
- 2086: "JS_REG_EXP_PROTOTYPE_TYPE",
- 2087: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
- 2088: "JS_SET_PROTOTYPE_TYPE",
- 2089: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
- 2090: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
- 2091: "JS_MAP_KEY_ITERATOR_TYPE",
- 2092: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 2093: "JS_MAP_VALUE_ITERATOR_TYPE",
- 2094: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 2095: "JS_SET_VALUE_ITERATOR_TYPE",
- 2096: "JS_GENERATOR_OBJECT_TYPE",
- 2097: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
- 2098: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 2099: "JS_MAP_TYPE",
- 2100: "JS_SET_TYPE",
- 2101: "JS_ATOMICS_CONDITION_TYPE",
- 2102: "JS_ATOMICS_MUTEX_TYPE",
- 2103: "JS_WEAK_MAP_TYPE",
- 2104: "JS_WEAK_SET_TYPE",
- 2105: "JS_ARGUMENTS_OBJECT_TYPE",
- 2106: "JS_ARRAY_TYPE",
- 2107: "JS_ARRAY_ITERATOR_TYPE",
- 2108: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 2109: "JS_COLLATOR_TYPE",
- 2110: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 2111: "JS_DATE_TYPE",
- 2112: "JS_DATE_TIME_FORMAT_TYPE",
- 2113: "JS_DISPLAY_NAMES_TYPE",
- 2114: "JS_DURATION_FORMAT_TYPE",
- 2115: "JS_ERROR_TYPE",
- 2116: "JS_EXTERNAL_OBJECT_TYPE",
- 2117: "JS_FINALIZATION_REGISTRY_TYPE",
- 2118: "JS_LIST_FORMAT_TYPE",
- 2119: "JS_LOCALE_TYPE",
- 2120: "JS_MESSAGE_OBJECT_TYPE",
- 2121: "JS_NUMBER_FORMAT_TYPE",
- 2122: "JS_PLURAL_RULES_TYPE",
- 2123: "JS_RAW_JSON_TYPE",
- 2124: "JS_REG_EXP_TYPE",
- 2125: "JS_REG_EXP_STRING_ITERATOR_TYPE",
- 2126: "JS_RELATIVE_TIME_FORMAT_TYPE",
- 2127: "JS_SEGMENT_ITERATOR_TYPE",
- 2128: "JS_SEGMENTER_TYPE",
- 2129: "JS_SEGMENTS_TYPE",
- 2130: "JS_SHADOW_REALM_TYPE",
- 2131: "JS_SHARED_ARRAY_TYPE",
- 2132: "JS_SHARED_STRUCT_TYPE",
- 2133: "JS_STRING_ITERATOR_TYPE",
- 2134: "JS_TEMPORAL_CALENDAR_TYPE",
- 2135: "JS_TEMPORAL_DURATION_TYPE",
- 2136: "JS_TEMPORAL_INSTANT_TYPE",
- 2137: "JS_TEMPORAL_PLAIN_DATE_TYPE",
- 2138: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
- 2139: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
- 2140: "JS_TEMPORAL_PLAIN_TIME_TYPE",
- 2141: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
- 2142: "JS_TEMPORAL_TIME_ZONE_TYPE",
- 2143: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
- 2144: "JS_V8_BREAK_ITERATOR_TYPE",
- 2145: "JS_WEAK_REF_TYPE",
- 2146: "WASM_EXCEPTION_PACKAGE_TYPE",
- 2147: "WASM_GLOBAL_OBJECT_TYPE",
- 2148: "WASM_INSTANCE_OBJECT_TYPE",
- 2149: "WASM_MEMORY_OBJECT_TYPE",
- 2150: "WASM_MODULE_OBJECT_TYPE",
- 2151: "WASM_SUSPENDER_OBJECT_TYPE",
- 2152: "WASM_TABLE_OBJECT_TYPE",
- 2153: "WASM_TAG_OBJECT_TYPE",
- 2154: "WASM_VALUE_OBJECT_TYPE",
+ 2060: "JS_RAB_GSAB_DATA_VIEW_TYPE",
+ 2061: "JS_TYPED_ARRAY_TYPE",
+ 2062: "JS_ARRAY_BUFFER_TYPE",
+ 2063: "JS_PROMISE_TYPE",
+ 2064: "JS_BOUND_FUNCTION_TYPE",
+ 2065: "JS_WRAPPED_FUNCTION_TYPE",
+ 2066: "JS_FUNCTION_TYPE",
+ 2067: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2068: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2069: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2070: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2071: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2072: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2073: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2074: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2075: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2076: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2077: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 2078: "JS_ARRAY_CONSTRUCTOR_TYPE",
+ 2079: "JS_PROMISE_CONSTRUCTOR_TYPE",
+ 2080: "JS_REG_EXP_CONSTRUCTOR_TYPE",
+ 2081: "JS_CLASS_CONSTRUCTOR_TYPE",
+ 2082: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
+ 2083: "JS_ITERATOR_PROTOTYPE_TYPE",
+ 2084: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
+ 2085: "JS_OBJECT_PROTOTYPE_TYPE",
+ 2086: "JS_PROMISE_PROTOTYPE_TYPE",
+ 2087: "JS_REG_EXP_PROTOTYPE_TYPE",
+ 2088: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
+ 2089: "JS_SET_PROTOTYPE_TYPE",
+ 2090: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
+ 2091: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
+ 2092: "JS_MAP_KEY_ITERATOR_TYPE",
+ 2093: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 2094: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 2095: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 2096: "JS_SET_VALUE_ITERATOR_TYPE",
+ 2097: "JS_ATOMICS_CONDITION_TYPE",
+ 2098: "JS_ATOMICS_MUTEX_TYPE",
+ 2099: "JS_SHARED_ARRAY_TYPE",
+ 2100: "JS_SHARED_STRUCT_TYPE",
+ 2101: "JS_ITERATOR_DROP_HELPER_TYPE",
+ 2102: "JS_ITERATOR_FILTER_HELPER_TYPE",
+ 2103: "JS_ITERATOR_MAP_HELPER_TYPE",
+ 2104: "JS_ITERATOR_TAKE_HELPER_TYPE",
+ 2105: "JS_GENERATOR_OBJECT_TYPE",
+ 2106: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 2107: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 2108: "JS_MAP_TYPE",
+ 2109: "JS_SET_TYPE",
+ 2110: "JS_WEAK_MAP_TYPE",
+ 2111: "JS_WEAK_SET_TYPE",
+ 2112: "JS_ARGUMENTS_OBJECT_TYPE",
+ 2113: "JS_ARRAY_TYPE",
+ 2114: "JS_ARRAY_ITERATOR_TYPE",
+ 2115: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 2116: "JS_COLLATOR_TYPE",
+ 2117: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 2118: "JS_DATE_TYPE",
+ 2119: "JS_DATE_TIME_FORMAT_TYPE",
+ 2120: "JS_DISPLAY_NAMES_TYPE",
+ 2121: "JS_DURATION_FORMAT_TYPE",
+ 2122: "JS_ERROR_TYPE",
+ 2123: "JS_EXTERNAL_OBJECT_TYPE",
+ 2124: "JS_FINALIZATION_REGISTRY_TYPE",
+ 2125: "JS_LIST_FORMAT_TYPE",
+ 2126: "JS_LOCALE_TYPE",
+ 2127: "JS_MESSAGE_OBJECT_TYPE",
+ 2128: "JS_NUMBER_FORMAT_TYPE",
+ 2129: "JS_PLURAL_RULES_TYPE",
+ 2130: "JS_RAW_JSON_TYPE",
+ 2131: "JS_REG_EXP_TYPE",
+ 2132: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 2133: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 2134: "JS_SEGMENT_ITERATOR_TYPE",
+ 2135: "JS_SEGMENTER_TYPE",
+ 2136: "JS_SEGMENTS_TYPE",
+ 2137: "JS_SHADOW_REALM_TYPE",
+ 2138: "JS_STRING_ITERATOR_TYPE",
+ 2139: "JS_TEMPORAL_CALENDAR_TYPE",
+ 2140: "JS_TEMPORAL_DURATION_TYPE",
+ 2141: "JS_TEMPORAL_INSTANT_TYPE",
+ 2142: "JS_TEMPORAL_PLAIN_DATE_TYPE",
+ 2143: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
+ 2144: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
+ 2145: "JS_TEMPORAL_PLAIN_TIME_TYPE",
+ 2146: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
+ 2147: "JS_TEMPORAL_TIME_ZONE_TYPE",
+ 2148: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
+ 2149: "JS_V8_BREAK_ITERATOR_TYPE",
+ 2150: "JS_VALID_ITERATOR_WRAPPER_TYPE",
+ 2151: "JS_WEAK_REF_TYPE",
+ 2152: "WASM_EXCEPTION_PACKAGE_TYPE",
+ 2153: "WASM_GLOBAL_OBJECT_TYPE",
+ 2154: "WASM_INSTANCE_OBJECT_TYPE",
+ 2155: "WASM_MEMORY_OBJECT_TYPE",
+ 2156: "WASM_MODULE_OBJECT_TYPE",
+ 2157: "WASM_SUSPENDER_OBJECT_TYPE",
+ 2158: "WASM_TABLE_OBJECT_TYPE",
+ 2159: "WASM_TAG_OBJECT_TYPE",
+ 2160: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02141): (246, "MetaMap"),
- ("read_only_space", 0x02169): (131, "NullMap"),
- ("read_only_space", 0x02191): (228, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x021b9): (264, "WeakArrayListMap"),
- ("read_only_space", 0x021fd): (154, "EnumCacheMap"),
- ("read_only_space", 0x02231): (175, "FixedArrayMap"),
- ("read_only_space", 0x0227d): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x022c9): (243, "FreeSpaceMap"),
- ("read_only_space", 0x022f1): (242, "OnePointerFillerMap"),
- ("read_only_space", 0x02319): (242, "TwoPointerFillerMap"),
- ("read_only_space", 0x02341): (131, "UninitializedMap"),
- ("read_only_space", 0x023b9): (131, "UndefinedMap"),
- ("read_only_space", 0x023fd): (130, "HeapNumberMap"),
- ("read_only_space", 0x02431): (131, "TheHoleMap"),
- ("read_only_space", 0x02491): (131, "BooleanMap"),
- ("read_only_space", 0x02535): (190, "ByteArrayMap"),
- ("read_only_space", 0x0255d): (175, "FixedCOWArrayMap"),
- ("read_only_space", 0x02585): (176, "HashTableMap"),
- ("read_only_space", 0x025ad): (128, "SymbolMap"),
- ("read_only_space", 0x025d5): (40, "OneByteStringMap"),
- ("read_only_space", 0x025fd): (252, "ScopeInfoMap"),
- ("read_only_space", 0x02625): (253, "SharedFunctionInfoMap"),
- ("read_only_space", 0x0264d): (236, "CodeMap"),
- ("read_only_space", 0x02675): (235, "CellMap"),
- ("read_only_space", 0x0269d): (251, "GlobalPropertyCellMap"),
- ("read_only_space", 0x026c5): (204, "ForeignMap"),
- ("read_only_space", 0x026ed): (232, "TransitionArrayMap"),
- ("read_only_space", 0x02715): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x0273d): (241, "FeedbackVectorMap"),
- ("read_only_space", 0x02775): (131, "ArgumentsMarkerMap"),
- ("read_only_space", 0x027d5): (131, "ExceptionMap"),
- ("read_only_space", 0x02831): (131, "TerminationExceptionMap"),
- ("read_only_space", 0x02899): (131, "OptimizedOutMap"),
- ("read_only_space", 0x028f9): (131, "StaleRegisterMap"),
- ("read_only_space", 0x02959): (189, "ScriptContextTableMap"),
- ("read_only_space", 0x02981): (187, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x029a9): (240, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029d1): (175, "ArrayListMap"),
- ("read_only_space", 0x029f9): (129, "BigIntMap"),
- ("read_only_space", 0x02a21): (188, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a49): (191, "BytecodeArrayMap"),
- ("read_only_space", 0x02a71): (237, "CodeDataContainerMap"),
- ("read_only_space", 0x02a99): (238, "CoverageInfoMap"),
- ("read_only_space", 0x02ac1): (192, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02ae9): (178, "GlobalDictionaryMap"),
- ("read_only_space", 0x02b11): (156, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b39): (247, "MegaDomHandlerMap"),
- ("read_only_space", 0x02b61): (175, "ModuleInfoMap"),
- ("read_only_space", 0x02b89): (179, "NameDictionaryMap"),
- ("read_only_space", 0x02bb1): (156, "NoClosuresCellMap"),
- ("read_only_space", 0x02bd9): (181, "NumberDictionaryMap"),
- ("read_only_space", 0x02c01): (156, "OneClosureCellMap"),
- ("read_only_space", 0x02c29): (182, "OrderedHashMapMap"),
- ("read_only_space", 0x02c51): (183, "OrderedHashSetMap"),
- ("read_only_space", 0x02c79): (180, "NameToIndexHashTableMap"),
- ("read_only_space", 0x02ca1): (185, "RegisteredSymbolTableMap"),
- ("read_only_space", 0x02cc9): (184, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02cf1): (249, "PreparseDataMap"),
- ("read_only_space", 0x02d19): (250, "PropertyArrayMap"),
- ("read_only_space", 0x02d41): (233, "AccessorInfoMap"),
- ("read_only_space", 0x02d69): (234, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02d91): (234, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02db9): (234, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02de1): (186, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02e09): (222, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02e31): (223, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02e59): (224, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02e81): (229, "SourceTextModuleMap"),
- ("read_only_space", 0x02ea9): (257, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02ed1): (230, "SyntheticModuleMap"),
- ("read_only_space", 0x02ef9): (258, "WasmApiFunctionRefMap"),
- ("read_only_space", 0x02f21): (216, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x02f49): (217, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x02f71): (260, "WasmInternalFunctionMap"),
- ("read_only_space", 0x02f99): (218, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x02fc1): (261, "WasmResumeDataMap"),
- ("read_only_space", 0x02fe9): (263, "WasmTypeInfoMap"),
- ("read_only_space", 0x03011): (259, "WasmContinuationObjectMap"),
- ("read_only_space", 0x03039): (231, "WeakFixedArrayMap"),
- ("read_only_space", 0x03061): (177, "EphemeronHashTableMap"),
- ("read_only_space", 0x03089): (239, "EmbedderDataArrayMap"),
- ("read_only_space", 0x030b1): (265, "WeakCellMap"),
- ("read_only_space", 0x030d9): (32, "StringMap"),
- ("read_only_space", 0x03101): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x03129): (33, "ConsStringMap"),
- ("read_only_space", 0x03151): (37, "ThinStringMap"),
- ("read_only_space", 0x03179): (35, "SlicedStringMap"),
- ("read_only_space", 0x031a1): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x031c9): (34, "ExternalStringMap"),
- ("read_only_space", 0x031f1): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x03219): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x03241): (0, "InternalizedStringMap"),
- ("read_only_space", 0x03269): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x03291): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x032b9): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x032e1): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x03309): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x03331): (104, "SharedOneByteStringMap"),
- ("read_only_space", 0x03359): (96, "SharedStringMap"),
- ("read_only_space", 0x03381): (106, "SharedExternalOneByteStringMap"),
- ("read_only_space", 0x033a9): (98, "SharedExternalStringMap"),
- ("read_only_space", 0x033d1): (122, "SharedUncachedExternalOneByteStringMap"),
- ("read_only_space", 0x033f9): (114, "SharedUncachedExternalStringMap"),
- ("read_only_space", 0x03421): (109, "SharedThinOneByteStringMap"),
- ("read_only_space", 0x03449): (101, "SharedThinStringMap"),
- ("read_only_space", 0x03471): (131, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x03499): (131, "BasicBlockCountersMarkerMap"),
- ("read_only_space", 0x034dd): (146, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x035dd): (158, "InterceptorInfoMap"),
- ("read_only_space", 0x075e9): (132, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x07611): (133, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x07639): (134, "CallableTaskMap"),
- ("read_only_space", 0x07661): (135, "CallbackTaskMap"),
- ("read_only_space", 0x07689): (136, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x076b1): (139, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x076d9): (140, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x07701): (141, "AccessCheckInfoMap"),
- ("read_only_space", 0x07729): (142, "AccessorPairMap"),
- ("read_only_space", 0x07751): (143, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x07779): (144, "AllocationMementoMap"),
- ("read_only_space", 0x077a1): (147, "AsmWasmDataMap"),
- ("read_only_space", 0x077c9): (148, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x077f1): (149, "BreakPointMap"),
- ("read_only_space", 0x07819): (150, "BreakPointInfoMap"),
- ("read_only_space", 0x07841): (151, "CallSiteInfoMap"),
- ("read_only_space", 0x07869): (152, "ClassPositionsMap"),
- ("read_only_space", 0x07891): (153, "DebugInfoMap"),
- ("read_only_space", 0x078b9): (155, "ErrorStackDataMap"),
- ("read_only_space", 0x078e1): (157, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x07909): (159, "InterpreterDataMap"),
- ("read_only_space", 0x07931): (160, "ModuleRequestMap"),
- ("read_only_space", 0x07959): (161, "PromiseCapabilityMap"),
- ("read_only_space", 0x07981): (162, "PromiseOnStackMap"),
- ("read_only_space", 0x079a9): (163, "PromiseReactionMap"),
- ("read_only_space", 0x079d1): (164, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x079f9): (165, "PrototypeInfoMap"),
- ("read_only_space", 0x07a21): (166, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x07a49): (167, "ScriptMap"),
- ("read_only_space", 0x07a71): (168, "ScriptOrModuleMap"),
- ("read_only_space", 0x07a99): (169, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x07ac1): (170, "StackFrameInfoMap"),
- ("read_only_space", 0x07ae9): (171, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x07b11): (172, "Tuple2Map"),
- ("read_only_space", 0x07b39): (173, "WasmExceptionTagMap"),
- ("read_only_space", 0x07b61): (174, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x07b89): (194, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x07bb1): (227, "DescriptorArrayMap"),
- ("read_only_space", 0x07bd9): (202, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x07c01): (200, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x07c29): (203, "UncompiledDataWithoutPreparseDataWithJobMap"),
- ("read_only_space", 0x07c51): (201, "UncompiledDataWithPreparseDataAndJobMap"),
- ("read_only_space", 0x07c79): (248, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x07ca1): (195, "TurbofanBitsetTypeMap"),
- ("read_only_space", 0x07cc9): (199, "TurbofanUnionTypeMap"),
- ("read_only_space", 0x07cf1): (198, "TurbofanRangeTypeMap"),
- ("read_only_space", 0x07d19): (196, "TurbofanHeapConstantTypeMap"),
- ("read_only_space", 0x07d41): (197, "TurbofanOtherNumberConstantTypeMap"),
- ("read_only_space", 0x07d69): (244, "InternalClassMap"),
- ("read_only_space", 0x07d91): (255, "SmiPairMap"),
- ("read_only_space", 0x07db9): (254, "SmiBoxMap"),
- ("read_only_space", 0x07de1): (219, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x07e09): (220, "ExportedSubClassMap"),
- ("read_only_space", 0x07e31): (225, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x07e59): (226, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x07e81): (193, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x07ea9): (245, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x07ed1): (221, "ExportedSubClass2Map"),
- ("read_only_space", 0x07ef9): (256, "SortStateMap"),
- ("read_only_space", 0x07f21): (262, "WasmStringViewIterMap"),
- ("read_only_space", 0x07f49): (145, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x07f71): (145, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x0803d): (137, "LoadHandler1Map"),
- ("read_only_space", 0x08065): (137, "LoadHandler2Map"),
- ("read_only_space", 0x0808d): (137, "LoadHandler3Map"),
- ("read_only_space", 0x080b5): (138, "StoreHandler0Map"),
- ("read_only_space", 0x080dd): (138, "StoreHandler1Map"),
- ("read_only_space", 0x08105): (138, "StoreHandler2Map"),
- ("read_only_space", 0x0812d): (138, "StoreHandler3Map"),
- ("old_space", 0x043a5): (2116, "ExternalMap"),
- ("old_space", 0x043d5): (2120, "JSMessageObjectMap"),
+ ("read_only_space", 0x00061): (255, "MetaMap"),
+ ("read_only_space", 0x00089): (175, "FixedArrayMap"),
+ ("read_only_space", 0x000b1): (240, "WeakFixedArrayMap"),
+ ("read_only_space", 0x000d9): (274, "WeakArrayListMap"),
+ ("read_only_space", 0x00101): (175, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00129): (236, "DescriptorArrayMap"),
+ ("read_only_space", 0x00151): (131, "UndefinedMap"),
+ ("read_only_space", 0x00179): (131, "NullMap"),
+ ("read_only_space", 0x001a1): (131, "TheHoleMap"),
+ ("read_only_space", 0x001c9): (151, "CallSiteInfoMap"),
+ ("read_only_space", 0x001f1): (154, "EnumCacheMap"),
+ ("read_only_space", 0x002a5): (261, "ScopeInfoMap"),
+ ("read_only_space", 0x002cd): (175, "ModuleInfoMap"),
+ ("read_only_space", 0x002f5): (187, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x0031d): (249, "FeedbackVectorMap"),
+ ("read_only_space", 0x00345): (130, "HeapNumberMap"),
+ ("read_only_space", 0x0036d): (204, "ForeignMap"),
+ ("read_only_space", 0x00395): (256, "MegaDomHandlerMap"),
+ ("read_only_space", 0x003bd): (131, "BooleanMap"),
+ ("read_only_space", 0x003e5): (131, "UninitializedMap"),
+ ("read_only_space", 0x0040d): (131, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x00435): (131, "ExceptionMap"),
+ ("read_only_space", 0x0045d): (131, "TerminationExceptionMap"),
+ ("read_only_space", 0x00485): (131, "OptimizedOutMap"),
+ ("read_only_space", 0x004ad): (131, "StaleRegisterMap"),
+ ("read_only_space", 0x004d5): (131, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x004fd): (131, "BasicBlockCountersMarkerMap"),
+ ("read_only_space", 0x00525): (129, "BigIntMap"),
+ ("read_only_space", 0x0054d): (128, "SymbolMap"),
+ ("read_only_space", 0x00575): (32, "StringMap"),
+ ("read_only_space", 0x0059d): (40, "OneByteStringMap"),
+ ("read_only_space", 0x005c5): (33, "ConsStringMap"),
+ ("read_only_space", 0x005ed): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x00615): (35, "SlicedStringMap"),
+ ("read_only_space", 0x0063d): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x00665): (34, "ExternalStringMap"),
+ ("read_only_space", 0x0068d): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x006b5): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x006dd): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x00705): (98, "SharedExternalStringMap"),
+ ("read_only_space", 0x0072d): (106, "SharedExternalOneByteStringMap"),
+ ("read_only_space", 0x00755): (114, "SharedUncachedExternalStringMap"),
+ ("read_only_space", 0x0077d): (122, "SharedUncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x007a5): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x007cd): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x007f5): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x0081d): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x00845): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x0086d): (8, "OneByteInternalizedStringMap"),
+ ("read_only_space", 0x00895): (37, "ThinStringMap"),
+ ("read_only_space", 0x008bd): (96, "SharedStringMap"),
+ ("read_only_space", 0x008e5): (104, "SharedOneByteStringMap"),
+ ("read_only_space", 0x0090d): (192, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x00935): (248, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x0095d): (190, "ByteArrayMap"),
+ ("read_only_space", 0x00985): (191, "BytecodeArrayMap"),
+ ("read_only_space", 0x009ad): (251, "FreeSpaceMap"),
+ ("read_only_space", 0x009d5): (259, "PropertyArrayMap"),
+ ("read_only_space", 0x009fd): (231, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x00a25): (232, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x00a4d): (233, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x00a75): (252, "InstructionStreamMap"),
+ ("read_only_space", 0x00a9d): (244, "CellMap"),
+ ("read_only_space", 0x00acd): (260, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00af5): (250, "OnePointerFillerMap"),
+ ("read_only_space", 0x00b1d): (250, "TwoPointerFillerMap"),
+ ("read_only_space", 0x00b45): (156, "NoClosuresCellMap"),
+ ("read_only_space", 0x00b6d): (156, "OneClosureCellMap"),
+ ("read_only_space", 0x00b95): (156, "ManyClosuresCellMap"),
+ ("read_only_space", 0x00bbd): (241, "TransitionArrayMap"),
+ ("read_only_space", 0x00be5): (176, "HashTableMap"),
+ ("read_only_space", 0x00c0d): (184, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x00c35): (179, "NameDictionaryMap"),
+ ("read_only_space", 0x00c5d): (266, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x00c85): (178, "GlobalDictionaryMap"),
+ ("read_only_space", 0x00cad): (181, "NumberDictionaryMap"),
+ ("read_only_space", 0x00cd5): (185, "RegisteredSymbolTableMap"),
+ ("read_only_space", 0x00cfd): (175, "ArrayListMap"),
+ ("read_only_space", 0x00d25): (242, "AccessorInfoMap"),
+ ("read_only_space", 0x00d4d): (258, "PreparseDataMap"),
+ ("read_only_space", 0x00d75): (262, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x00d9d): (245, "CodeMap"),
+ ("read_only_space", 0x00ff9): (132, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x01021): (133, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x01049): (134, "CallableTaskMap"),
+ ("read_only_space", 0x01071): (135, "CallbackTaskMap"),
+ ("read_only_space", 0x01099): (136, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x010c1): (139, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x010e9): (140, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x01111): (141, "AccessCheckInfoMap"),
+ ("read_only_space", 0x01139): (142, "AccessorPairMap"),
+ ("read_only_space", 0x01161): (143, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x01189): (144, "AllocationMementoMap"),
+ ("read_only_space", 0x011b1): (146, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x011d9): (147, "AsmWasmDataMap"),
+ ("read_only_space", 0x01201): (148, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x01229): (149, "BreakPointMap"),
+ ("read_only_space", 0x01251): (150, "BreakPointInfoMap"),
+ ("read_only_space", 0x01279): (152, "ClassPositionsMap"),
+ ("read_only_space", 0x012a1): (153, "DebugInfoMap"),
+ ("read_only_space", 0x012c9): (155, "ErrorStackDataMap"),
+ ("read_only_space", 0x012f1): (157, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x01319): (158, "InterceptorInfoMap"),
+ ("read_only_space", 0x01341): (159, "InterpreterDataMap"),
+ ("read_only_space", 0x01369): (160, "ModuleRequestMap"),
+ ("read_only_space", 0x01391): (161, "PromiseCapabilityMap"),
+ ("read_only_space", 0x013b9): (162, "PromiseOnStackMap"),
+ ("read_only_space", 0x013e1): (163, "PromiseReactionMap"),
+ ("read_only_space", 0x01409): (164, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x01431): (165, "PrototypeInfoMap"),
+ ("read_only_space", 0x01459): (166, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x01481): (167, "ScriptMap"),
+ ("read_only_space", 0x014a9): (168, "ScriptOrModuleMap"),
+ ("read_only_space", 0x014d1): (169, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x014f9): (170, "StackFrameInfoMap"),
+ ("read_only_space", 0x01521): (171, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x01549): (172, "Tuple2Map"),
+ ("read_only_space", 0x01571): (173, "WasmExceptionTagMap"),
+ ("read_only_space", 0x01599): (174, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x015c1): (145, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x015e9): (145, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x01611): (137, "LoadHandler1Map"),
+ ("read_only_space", 0x01639): (137, "LoadHandler2Map"),
+ ("read_only_space", 0x01661): (137, "LoadHandler3Map"),
+ ("read_only_space", 0x01689): (138, "StoreHandler0Map"),
+ ("read_only_space", 0x016b1): (138, "StoreHandler1Map"),
+ ("read_only_space", 0x016d9): (138, "StoreHandler2Map"),
+ ("read_only_space", 0x01701): (138, "StoreHandler3Map"),
+ ("read_only_space", 0x01729): (222, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x01751): (220, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x01779): (223, "UncompiledDataWithoutPreparseDataWithJobMap"),
+ ("read_only_space", 0x017a1): (221, "UncompiledDataWithPreparseDataAndJobMap"),
+ ("read_only_space", 0x017c9): (257, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x017f1): (215, "TurbofanBitsetTypeMap"),
+ ("read_only_space", 0x01819): (219, "TurbofanUnionTypeMap"),
+ ("read_only_space", 0x01841): (218, "TurbofanRangeTypeMap"),
+ ("read_only_space", 0x01869): (216, "TurbofanHeapConstantTypeMap"),
+ ("read_only_space", 0x01891): (217, "TurbofanOtherNumberConstantTypeMap"),
+ ("read_only_space", 0x018b9): (198, "TurboshaftWord32TypeMap"),
+ ("read_only_space", 0x018e1): (199, "TurboshaftWord32RangeTypeMap"),
+ ("read_only_space", 0x01909): (201, "TurboshaftWord64TypeMap"),
+ ("read_only_space", 0x01931): (202, "TurboshaftWord64RangeTypeMap"),
+ ("read_only_space", 0x01959): (195, "TurboshaftFloat64TypeMap"),
+ ("read_only_space", 0x01981): (196, "TurboshaftFloat64RangeTypeMap"),
+ ("read_only_space", 0x019a9): (253, "InternalClassMap"),
+ ("read_only_space", 0x019d1): (264, "SmiPairMap"),
+ ("read_only_space", 0x019f9): (263, "SmiBoxMap"),
+ ("read_only_space", 0x01a21): (228, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x01a49): (229, "ExportedSubClassMap"),
+ ("read_only_space", 0x01a71): (234, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x01a99): (235, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x01ac1): (230, "ExportedSubClass2Map"),
+ ("read_only_space", 0x01ae9): (265, "SortStateMap"),
+ ("read_only_space", 0x01b11): (272, "WasmStringViewIterMap"),
+ ("read_only_space", 0x01b39): (194, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x01b61): (237, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x01b89): (200, "TurboshaftWord32SetTypeMap"),
+ ("read_only_space", 0x01bb1): (203, "TurboshaftWord64SetTypeMap"),
+ ("read_only_space", 0x01bd9): (197, "TurboshaftFloat64SetTypeMap"),
+ ("read_only_space", 0x01c01): (193, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x01c29): (254, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x01c51): (182, "OrderedHashMapMap"),
+ ("read_only_space", 0x01c79): (183, "OrderedHashSetMap"),
+ ("read_only_space", 0x01ca1): (186, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x01cc9): (180, "NameToIndexHashTableMap"),
+ ("read_only_space", 0x01cf1): (247, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x01d19): (177, "EphemeronHashTableMap"),
+ ("read_only_space", 0x01d41): (189, "ScriptContextTableMap"),
+ ("read_only_space", 0x01d69): (188, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x01d91): (246, "CoverageInfoMap"),
+ ("read_only_space", 0x01db9): (243, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x01de1): (243, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x01e09): (243, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x01e31): (238, "SourceTextModuleMap"),
+ ("read_only_space", 0x01e59): (239, "SyntheticModuleMap"),
+ ("read_only_space", 0x01e81): (267, "WasmApiFunctionRefMap"),
+ ("read_only_space", 0x01ea9): (225, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x01ed1): (226, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x01ef9): (269, "WasmInternalFunctionMap"),
+ ("read_only_space", 0x01f21): (227, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x01f49): (271, "WasmResumeDataMap"),
+ ("read_only_space", 0x01f71): (273, "WasmTypeInfoMap"),
+ ("read_only_space", 0x01f99): (268, "WasmContinuationObjectMap"),
+ ("read_only_space", 0x01fc1): (270, "WasmNullMap"),
+ ("read_only_space", 0x01fe9): (275, "WeakCellMap"),
+ ("old_space", 0x043c5): (2123, "ExternalMap"),
+ ("old_space", 0x043ed): (2127, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("read_only_space", 0x021e1): "EmptyWeakArrayList",
- ("read_only_space", 0x021ed): "EmptyDescriptorArray",
- ("read_only_space", 0x02225): "EmptyEnumCache",
- ("read_only_space", 0x02259): "EmptyFixedArray",
- ("read_only_space", 0x02261): "NullValue",
- ("read_only_space", 0x02369): "UninitializedValue",
- ("read_only_space", 0x023e1): "UndefinedValue",
- ("read_only_space", 0x02425): "NanValue",
- ("read_only_space", 0x02459): "TheHoleValue",
- ("read_only_space", 0x02485): "HoleNanValue",
- ("read_only_space", 0x024b9): "TrueValue",
- ("read_only_space", 0x024f9): "FalseValue",
- ("read_only_space", 0x02529): "empty_string",
- ("read_only_space", 0x02765): "EmptyScopeInfo",
- ("read_only_space", 0x0279d): "ArgumentsMarker",
- ("read_only_space", 0x027fd): "Exception",
- ("read_only_space", 0x02859): "TerminationException",
- ("read_only_space", 0x028c1): "OptimizedOut",
- ("read_only_space", 0x02921): "StaleRegister",
- ("read_only_space", 0x034c1): "EmptyPropertyArray",
- ("read_only_space", 0x034c9): "EmptyByteArray",
- ("read_only_space", 0x034d1): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x03505): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x03511): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x03519): "EmptySlowElementDictionary",
- ("read_only_space", 0x0353d): "EmptyOrderedHashMap",
- ("read_only_space", 0x03551): "EmptyOrderedHashSet",
- ("read_only_space", 0x03565): "EmptyFeedbackMetadata",
- ("read_only_space", 0x03571): "EmptyPropertyDictionary",
- ("read_only_space", 0x03599): "EmptyOrderedPropertyDictionary",
- ("read_only_space", 0x035b1): "EmptySwissPropertyDictionary",
- ("read_only_space", 0x03605): "NoOpInterceptorInfo",
- ("read_only_space", 0x0362d): "EmptyArrayList",
- ("read_only_space", 0x03639): "EmptyWeakFixedArray",
- ("read_only_space", 0x03641): "InfinityValue",
- ("read_only_space", 0x0364d): "MinusZeroValue",
- ("read_only_space", 0x03659): "MinusInfinityValue",
- ("read_only_space", 0x03665): "SingleCharacterStringTable",
- ("read_only_space", 0x04a6d): "SelfReferenceMarker",
- ("read_only_space", 0x04aad): "BasicBlockCountersMarker",
- ("read_only_space", 0x04af1): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x04afd): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x04b2d): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x04b51): "NativeScopeInfo",
- ("read_only_space", 0x04b69): "HashSeed",
- ("old_space", 0x0423d): "ArgumentsIteratorAccessor",
- ("old_space", 0x04255): "ArrayLengthAccessor",
- ("old_space", 0x0426d): "BoundFunctionLengthAccessor",
- ("old_space", 0x04285): "BoundFunctionNameAccessor",
- ("old_space", 0x0429d): "ErrorStackAccessor",
- ("old_space", 0x042b5): "FunctionArgumentsAccessor",
- ("old_space", 0x042cd): "FunctionCallerAccessor",
- ("old_space", 0x042e5): "FunctionNameAccessor",
- ("old_space", 0x042fd): "FunctionLengthAccessor",
- ("old_space", 0x04315): "FunctionPrototypeAccessor",
- ("old_space", 0x0432d): "SharedArrayLengthAccessor",
- ("old_space", 0x04345): "StringLengthAccessor",
- ("old_space", 0x0435d): "ValueUnavailableAccessor",
- ("old_space", 0x04375): "WrappedFunctionLengthAccessor",
- ("old_space", 0x0438d): "WrappedFunctionNameAccessor",
- ("old_space", 0x043a5): "ExternalMap",
- ("old_space", 0x043cd): "InvalidPrototypeValidityCell",
- ("old_space", 0x043d5): "JSMessageObjectMap",
- ("old_space", 0x043fd): "EmptyScript",
- ("old_space", 0x04441): "ManyClosuresCell",
- ("old_space", 0x0444d): "ArrayConstructorProtector",
- ("old_space", 0x04461): "NoElementsProtector",
- ("old_space", 0x04475): "MegaDOMProtector",
- ("old_space", 0x04489): "IsConcatSpreadableProtector",
- ("old_space", 0x0449d): "ArraySpeciesProtector",
- ("old_space", 0x044b1): "TypedArraySpeciesProtector",
- ("old_space", 0x044c5): "PromiseSpeciesProtector",
- ("old_space", 0x044d9): "RegExpSpeciesProtector",
- ("old_space", 0x044ed): "StringLengthProtector",
- ("old_space", 0x04501): "ArrayIteratorProtector",
- ("old_space", 0x04515): "ArrayBufferDetachingProtector",
- ("old_space", 0x04529): "PromiseHookProtector",
- ("old_space", 0x0453d): "PromiseResolveProtector",
- ("old_space", 0x04551): "MapIteratorProtector",
- ("old_space", 0x04565): "PromiseThenProtector",
- ("old_space", 0x04579): "SetIteratorProtector",
- ("old_space", 0x0458d): "StringIteratorProtector",
- ("old_space", 0x045a1): "StringSplitCache",
- ("old_space", 0x049a9): "RegExpMultipleCache",
- ("old_space", 0x04db1): "BuiltinsConstantsTable",
- ("old_space", 0x05215): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x05239): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0525d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x05281): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x052a5): "AsyncGeneratorYieldWithAwaitResolveSharedFun",
- ("old_space", 0x052c9): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x052ed): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x05311): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x05335): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x05359): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x0537d): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x053a1): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x053c5): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x053e9): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x0540d): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x05431): "PromiseCatchFinallySharedFun",
- ("old_space", 0x05455): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x05479): "PromiseThenFinallySharedFun",
- ("old_space", 0x0549d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x054c1): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x054e5): "ProxyRevokeSharedFun",
- ("old_space", 0x05509): "ShadowRealmImportValueFulfilledSFI",
- ("old_space", 0x0552d): "SourceTextModuleExecuteAsyncModuleFulfilledSFI",
- ("old_space", 0x05551): "SourceTextModuleExecuteAsyncModuleRejectedSFI",
+ ("read_only_space", 0x00219): "EmptyFixedArray",
+ ("read_only_space", 0x00221): "EmptyWeakFixedArray",
+ ("read_only_space", 0x00229): "EmptyWeakArrayList",
+ ("read_only_space", 0x00235): "NullValue",
+ ("read_only_space", 0x00251): "UndefinedValue",
+ ("read_only_space", 0x0026d): "TheHoleValue",
+ ("read_only_space", 0x00289): "EmptyEnumCache",
+ ("read_only_space", 0x00295): "EmptyDescriptorArray",
+ ("read_only_space", 0x00ac5): "InvalidPrototypeValidityCell",
+ ("read_only_space", 0x00dc5): "TrueValue",
+ ("read_only_space", 0x00de1): "FalseValue",
+ ("read_only_space", 0x00dfd): "HashSeed",
+ ("read_only_space", 0x00e0d): "empty_string",
+ ("read_only_space", 0x00efd): "EmptyPropertyDictionary",
+ ("read_only_space", 0x00f29): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x00f4d): "EmptySwissPropertyDictionary",
+ ("read_only_space", 0x00f6d): "EmptyByteArray",
+ ("read_only_space", 0x00f75): "EmptyScopeInfo",
+ ("read_only_space", 0x00f85): "EmptyPropertyArray",
+ ("read_only_space", 0x00f8d): "MinusZeroValue",
+ ("read_only_space", 0x00f99): "NanValue",
+ ("read_only_space", 0x00fa5): "HoleNanValue",
+ ("read_only_space", 0x00fb1): "InfinityValue",
+ ("read_only_space", 0x00fbd): "MinusInfinityValue",
+ ("read_only_space", 0x00fc9): "MaxSafeInteger",
+ ("read_only_space", 0x00fd5): "MaxUInt32",
+ ("read_only_space", 0x00fe1): "SmiMinValue",
+ ("read_only_space", 0x00fed): "SmiMaxValuePlusOne",
+ ("read_only_space", 0x02011): "NoOpInterceptorInfo",
+ ("read_only_space", 0x02039): "EmptyArrayList",
+ ("read_only_space", 0x02045): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x02051): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x0205d): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x02065): "SingleCharacterStringTable",
+ ("read_only_space", 0x05af5): "UninitializedValue",
+ ("read_only_space", 0x05b2d): "ArgumentsMarker",
+ ("read_only_space", 0x05b65): "TerminationException",
+ ("read_only_space", 0x05ba5): "Exception",
+ ("read_only_space", 0x05bc1): "OptimizedOut",
+ ("read_only_space", 0x05bf9): "StaleRegister",
+ ("read_only_space", 0x05c31): "SelfReferenceMarker",
+ ("read_only_space", 0x05c71): "BasicBlockCountersMarker",
+ ("read_only_space", 0x06139): "EmptySlowElementDictionary",
+ ("read_only_space", 0x0615d): "EmptySymbolTable",
+ ("read_only_space", 0x06179): "EmptyOrderedHashMap",
+ ("read_only_space", 0x0618d): "EmptyOrderedHashSet",
+ ("read_only_space", 0x061a1): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x061ad): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x061cd): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x061f1): "NativeScopeInfo",
+ ("read_only_space", 0x06209): "ShadowRealmScopeInfo",
+ ("read_only_space", 0x0fffd): "WasmNull",
+ ("old_space", 0x04275): "ArgumentsIteratorAccessor",
+ ("old_space", 0x0428d): "ArrayLengthAccessor",
+ ("old_space", 0x042a5): "BoundFunctionLengthAccessor",
+ ("old_space", 0x042bd): "BoundFunctionNameAccessor",
+ ("old_space", 0x042d5): "ErrorStackAccessor",
+ ("old_space", 0x042ed): "FunctionArgumentsAccessor",
+ ("old_space", 0x04305): "FunctionCallerAccessor",
+ ("old_space", 0x0431d): "FunctionNameAccessor",
+ ("old_space", 0x04335): "FunctionLengthAccessor",
+ ("old_space", 0x0434d): "FunctionPrototypeAccessor",
+ ("old_space", 0x04365): "StringLengthAccessor",
+ ("old_space", 0x0437d): "ValueUnavailableAccessor",
+ ("old_space", 0x04395): "WrappedFunctionLengthAccessor",
+ ("old_space", 0x043ad): "WrappedFunctionNameAccessor",
+ ("old_space", 0x043c5): "ExternalMap",
+ ("old_space", 0x043ed): "JSMessageObjectMap",
+ ("old_space", 0x04415): "EmptyScript",
+ ("old_space", 0x0445d): "ManyClosuresCell",
+ ("old_space", 0x04469): "ArrayConstructorProtector",
+ ("old_space", 0x0447d): "NoElementsProtector",
+ ("old_space", 0x04491): "MegaDOMProtector",
+ ("old_space", 0x044a5): "IsConcatSpreadableProtector",
+ ("old_space", 0x044b9): "ArraySpeciesProtector",
+ ("old_space", 0x044cd): "TypedArraySpeciesProtector",
+ ("old_space", 0x044e1): "PromiseSpeciesProtector",
+ ("old_space", 0x044f5): "RegExpSpeciesProtector",
+ ("old_space", 0x04509): "StringLengthProtector",
+ ("old_space", 0x0451d): "ArrayIteratorProtector",
+ ("old_space", 0x04531): "ArrayBufferDetachingProtector",
+ ("old_space", 0x04545): "PromiseHookProtector",
+ ("old_space", 0x04559): "PromiseResolveProtector",
+ ("old_space", 0x0456d): "MapIteratorProtector",
+ ("old_space", 0x04581): "PromiseThenProtector",
+ ("old_space", 0x04595): "SetIteratorProtector",
+ ("old_space", 0x045a9): "StringIteratorProtector",
+ ("old_space", 0x045bd): "NumberStringPrototypeNoReplaceProtector",
+ ("old_space", 0x045d1): "StringSplitCache",
+ ("old_space", 0x049d9): "RegExpMultipleCache",
+ ("old_space", 0x04de1): "BuiltinsConstantsTable",
+ ("old_space", 0x053d9): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x053fd): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x05421): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x05445): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x05469): "AsyncGeneratorYieldWithAwaitResolveSharedFun",
+ ("old_space", 0x0548d): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x054b1): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x054d5): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x054f9): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x0551d): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x05541): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x05565): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x05589): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x055ad): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x055d1): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x055f5): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x05619): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x0563d): "PromiseThenFinallySharedFun",
+ ("old_space", 0x05661): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x05685): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x056a9): "ProxyRevokeSharedFun",
+ ("old_space", 0x056cd): "ShadowRealmImportValueFulfilledSFI",
+ ("old_space", 0x056f1): "SourceTextModuleExecuteAsyncModuleFulfilledSFI",
+ ("old_space", 0x05715): "SourceTextModuleExecuteAsyncModuleRejectedSFI",
}
# Lower 32 bits of first page addresses for various heap spaces.
HEAP_FIRST_PAGES = {
- 0x000c0000: "old_space",
+ 0x00100000: "old_space",
0x00000000: "read_only_space",
}
@@ -614,6 +640,7 @@ FRAME_MARKERS = (
"BUILTIN",
"BUILTIN_EXIT",
"NATIVE",
+ "IRREGEXP",
)
# This set of constants is generated from a shipping build.
diff --git a/deps/v8/tools/v8windbg/src/cur-isolate.cc b/deps/v8/tools/v8windbg/src/cur-isolate.cc
index f39098f686..dd3355efd2 100644
--- a/deps/v8/tools/v8windbg/src/cur-isolate.cc
+++ b/deps/v8/tools/v8windbg/src/cur-isolate.cc
@@ -4,23 +4,20 @@
#include "tools/v8windbg/src/cur-isolate.h"
-HRESULT GetIsolateKey(WRL::ComPtr<IDebugHostContext>& sp_ctx,
- int* isolate_key) {
+HRESULT GetIsolateLocation(WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ Location* location) {
auto sp_v8_module = Extension::Current()->GetV8Module(sp_ctx);
if (sp_v8_module == nullptr) return E_FAIL;
WRL::ComPtr<IDebugHostSymbol> sp_isolate_sym;
- RETURN_IF_FAIL(sp_v8_module->FindSymbolByName(kIsolateKey, &sp_isolate_sym));
+ RETURN_IF_FAIL(
+ sp_v8_module->FindSymbolByName(kIsolateOffset, &sp_isolate_sym));
SymbolKind kind;
RETURN_IF_FAIL(sp_isolate_sym->GetSymbolKind(&kind));
if (kind != SymbolData) return E_FAIL;
WRL::ComPtr<IDebugHostData> sp_isolate_key_data;
RETURN_IF_FAIL(sp_isolate_sym.As(&sp_isolate_key_data));
- Location loc;
- RETURN_IF_FAIL(sp_isolate_key_data->GetLocation(&loc));
- ULONG64 bytes_read;
- RETURN_IF_FAIL(sp_debug_host_memory->ReadBytes(
- sp_ctx.Get(), loc, isolate_key, sizeof(isolate_key), &bytes_read));
+ RETURN_IF_FAIL(sp_isolate_key_data->GetLocation(location));
return S_OK;
}
@@ -31,40 +28,8 @@ HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result) {
WRL::ComPtr<IDebugHostContext> sp_host_context;
RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_host_context));
- WRL::ComPtr<IModelObject> sp_curr_thread;
- RETURN_IF_FAIL(GetCurrentThread(sp_host_context, &sp_curr_thread));
-
- WRL::ComPtr<IModelObject> sp_environment, sp_environment_block;
- WRL::ComPtr<IModelObject> sp_tls_slots, sp_slot_index, sp_isolate_ptr;
- RETURN_IF_FAIL(
- sp_curr_thread->GetKeyValue(L"Environment", &sp_environment, nullptr));
-
- RETURN_IF_FAIL(sp_environment->GetKeyValue(L"EnvironmentBlock",
- &sp_environment_block, nullptr));
-
- // EnvironmentBlock and TlsSlots are native types (TypeUDT) and thus
- // GetRawValue rather than GetKeyValue should be used to get field (member)
- // values.
- ModelObjectKind kind;
- RETURN_IF_FAIL(sp_environment_block->GetKind(&kind));
- if (kind != ModelObjectKind::ObjectTargetObject) return E_FAIL;
-
- RETURN_IF_FAIL(sp_environment_block->GetRawValue(SymbolField, L"TlsSlots", 0,
- &sp_tls_slots));
-
- int isolate_key = -1;
- RETURN_IF_FAIL(GetIsolateKey(sp_host_context, &isolate_key));
- RETURN_IF_FAIL(CreateInt32(isolate_key, &sp_slot_index));
-
- RETURN_IF_FAIL(GetModelAtIndex(sp_tls_slots, sp_slot_index, &sp_isolate_ptr));
-
- // Need to dereference the slot and then get the address held in it
- WRL::ComPtr<IModelObject> sp_dereferenced_slot;
- RETURN_IF_FAIL(sp_isolate_ptr->Dereference(&sp_dereferenced_slot));
-
- uint64_t isolate_ptr;
- RETURN_IF_FAIL(UnboxULong64(sp_dereferenced_slot.Get(), &isolate_ptr));
- Location isolate_addr{isolate_ptr};
+ Location isolate_addr;
+ RETURN_IF_FAIL(GetIsolateLocation(sp_host_context, &isolate_addr));
// If we got the isolate_key OK, then must have the V8 module loaded
// Get the internal Isolate type from it
diff --git a/deps/v8/tools/v8windbg/src/cur-isolate.h b/deps/v8/tools/v8windbg/src/cur-isolate.h
index ad6b01a946..65ecba459b 100644
--- a/deps/v8/tools/v8windbg/src/cur-isolate.h
+++ b/deps/v8/tools/v8windbg/src/cur-isolate.h
@@ -17,8 +17,8 @@
HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result);
-constexpr wchar_t kIsolateKey[] = L"v8::internal::Isolate::isolate_key_";
-constexpr wchar_t kIsolate[] = L"v8::internal::Isolate";
+constexpr wchar_t kIsolateOffset[] = L"v8::internal::g_current_isolate_";
+constexpr wchar_t kIsolate[] = L"v8::internal::Isolate *";
class CurrIsolateAlias
: public WRL::RuntimeClass<
diff --git a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
index 938ada8ee1..cde34a6096 100644
--- a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
+++ b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
@@ -149,7 +149,7 @@ WRL::ComPtr<IDebugHostModule> Extension::GetV8Module(
// build configuration.
std::vector<const wchar_t*> known_names = {
L"v8", L"v8_for_testing", L"cctest_exe", L"chrome",
- L"d8", L"msedge", L"node", L"unittests_exe"};
+ L"d8", L"msedge", L"node", L"v8_unittests_exe"};
for (const wchar_t* name : known_names) {
WRL::ComPtr<IDebugHostModule> sp_module;
if (SUCCEEDED(sp_debug_host_symbols->FindModuleByName(sp_ctx.Get(), name,
diff --git a/deps/v8/tools/v8windbg/test/v8windbg-test.cc b/deps/v8/tools/v8windbg/test/v8windbg-test.cc
index bb9e42e06f..ecbe04a329 100644
--- a/deps/v8/tools/v8windbg/test/v8windbg-test.cc
+++ b/deps/v8/tools/v8windbg/test/v8windbg-test.cc
@@ -235,17 +235,38 @@ void RunTests() {
// "First().LocalVariables.@\"memory interpreted as Objects\"",
// {"\"hello\""}, &output, p_debug_control.Get());
- RunAndCheckOutput("js stack", "dx @$jsstack()[0].function_name",
- {"\"a\"", "SeqOneByteString"}, &output,
- p_debug_control.Get());
-
- RunAndCheckOutput("js stack", "dx @$jsstack()[1].function_name",
- {"\"b\"", "SeqOneByteString"}, &output,
- p_debug_control.Get());
-
- RunAndCheckOutput("js stack", "dx @$jsstack()[2].function_name",
- {"empty_string \"\"", "SeqOneByteString"}, &output,
- p_debug_control.Get());
+ // TODO(v8:13484): try re-enabling the following jsstack() tests once we've
+ // updated to a newer VS toolchain. Stack walking is broken in dbghelp.dll
+ // version 10.0.20348.1, but works in the (much newer) version
+ // 10.0.25155.1000. With incorrect frame pointers, the code in
+ // V8LocalVariables::GetValue of course produces incorrect results.
+
+ // RunAndCheckOutput("js stack", "dx @$jsstack()[0].function_name",
+ // {"\"a\"", "SeqOneByteString"}, &output,
+ // p_debug_control.Get());
+
+ // RunAndCheckOutput("js stack", "dx @$jsstack()[1].function_name",
+ // {"\"b\"", "SeqOneByteString"}, &output,
+ // p_debug_control.Get());
+
+ // RunAndCheckOutput("js stack", "dx @$jsstack()[2].function_name",
+ // {"empty_string \"\"", "SeqOneByteString"}, &output,
+ // p_debug_control.Get());
+
+ // Test for @$curisolate(). This should have the same output with
+ // `dx v8::internal::g_current_isolate_`.
+ output.ClearLog();
+ CHECK(SUCCEEDED(p_debug_control->Execute(
+ DEBUG_OUTCTL_ALL_CLIENTS, "dx v8::internal::g_current_isolate_",
+ DEBUG_EXECUTE_ECHO)));
+ size_t addr_pos = output.GetLog().find("0x");
+ CHECK(addr_pos != std::string::npos);
+ std::string expected_output = output.GetLog().substr(addr_pos);
+
+ output.ClearLog();
+ CHECK(SUCCEEDED(p_debug_control->Execute(
+ DEBUG_OUTCTL_ALL_CLIENTS, "dx @$curisolate()", DEBUG_EXECUTE_ECHO)));
+ CHECK_EQ(output.GetLog().substr(output.GetLog().find("0x")), expected_output);
// Detach before exiting
hr = p_client->DetachProcesses();
diff --git a/deps/v8/tools/vim/ninja-build.vim b/deps/v8/tools/vim/ninja-build.vim
index e10da37759..0490f5956c 100644
--- a/deps/v8/tools/vim/ninja-build.vim
+++ b/deps/v8/tools/vim/ninja-build.vim
@@ -9,7 +9,7 @@
" Adds a "Build this target" function, using ninja. This is not bound
" to any key by default, but can be used via the :CrBuild command.
" It builds 'd8' by default, but :CrBuild target1 target2 etc works as well,
-" i.e. :CrBuild all or :CrBuild d8 cctest unittests.
+" i.e. :CrBuild all or :CrBuild d8 cctest v8_unittests.
"
" Requires that gyp has already generated build.ninja files, and that ninja is
" in your path (which it is automatically if depot_tools is in your path).
diff --git a/deps/v8/tools/wasm/BUILD.gn b/deps/v8/tools/wasm/BUILD.gn
index cd2bb2ca72..cc438b24bc 100644
--- a/deps/v8/tools/wasm/BUILD.gn
+++ b/deps/v8/tools/wasm/BUILD.gn
@@ -31,6 +31,6 @@ v8_executable("wami") {
configs = [ ":internal_config" ]
if (v8_enable_i18n_support) {
- configs += [ "//third_party/icu:icu_config" ]
+ configs += [ "$v8_icu_path:icu_config" ]
}
}
diff --git a/deps/v8/tools/wasm/module-inspector.cc b/deps/v8/tools/wasm/module-inspector.cc
index 3304745578..dab9d8fff1 100644
--- a/deps/v8/tools/wasm/module-inspector.cc
+++ b/deps/v8/tools/wasm/module-inspector.cc
@@ -20,40 +20,44 @@
#endif
int PrintHelp(char** argv) {
- std::cerr << "Usage: Specify an action and a module in any order.\n"
- << "The action can be any of:\n"
+ std::cerr
+ << "Usage: Specify an action and a module in any order.\n"
+ << "The action can be any of:\n"
- << " --help\n"
- << " Print this help and exit.\n"
+ << " --help\n"
+ << " Print this help and exit.\n"
- << " --list-functions\n"
- << " List functions in the given module\n"
+ << " --list-functions\n"
+ << " List functions in the given module\n"
- << " --section-stats\n"
- << " Show information about sections in the given module\n"
+ << " --list-signatures\n"
+ << " List signatures with their use counts in the given module\n"
- << " --instruction-stats\n"
- << " Show information about instructions in the given module\n"
+ << " --section-stats\n"
+ << " Show information about sections in the given module\n"
- << " --single-wat FUNC_INDEX\n"
- << " Print function FUNC_INDEX in .wat format\n"
+ << " --instruction-stats\n"
+ << " Show information about instructions in the given module\n"
- << " --full-wat\n"
- << " Print full module in .wat format\n"
+ << " --single-wat FUNC_INDEX\n"
+ << " Print function FUNC_INDEX in .wat format\n"
- << " --single-hexdump FUNC_INDEX\n"
- << " Print function FUNC_INDEX in annotated hex format\n"
+ << " --full-wat\n"
+ << " Print full module in .wat format\n"
- << " --full-hexdump\n"
- << " Print full module in annotated hex format\n"
+ << " --single-hexdump FUNC_INDEX\n"
+ << " Print function FUNC_INDEX in annotated hex format\n"
- << " --strip\n"
- << " Dump the module, in binary format, without its Name"
- << " section (requires using -o as well)\n"
+ << " --full-hexdump\n"
+ << " Print full module in annotated hex format\n"
- << "\n"
- << " -o OUTFILE or --output OUTFILE\n"
- << " Send output to OUTFILE instead of <stdout>\n";
+ << " --strip\n"
+ << " Dump the module, in binary format, without its Name"
+ << " section (requires using -o as well)\n"
+
+ << "\n"
+ << " -o OUTFILE or --output OUTFILE\n"
+ << " Send output to OUTFILE instead of <stdout>\n";
return 1;
}
@@ -149,7 +153,7 @@ class InstructionStatistics {
<< static_cast<double>(total_size) / count;
out << std::setw(kSpacing) << " ";
out << std::fixed << std::setprecision(1) << std::setw(8)
- << 100.0 * total_size / this->total_code_size_ << "%\n";
+ << 100.0 * total_size / total_code_size_ << "%\n";
};
for (const Entry& e : sorted) {
PrintLine(WasmOpcodes::OpcodeName(e.opcode), e.count, e.total_size);
@@ -233,28 +237,23 @@ class ExtendedFunctionDis : public FunctionBodyDisassembler {
}
// Decode and print locals.
- uint32_t locals_length;
- DecodeLocals(pc_, &locals_length);
+ uint32_t locals_length = DecodeLocals(pc_);
if (failed()) {
// TODO(jkummerow): Better error handling.
out << "Failed to decode locals";
return;
}
uint32_t total_length = 0;
- uint32_t length;
- uint32_t entries = read_u32v<ValidationTag>(pc_, &length);
+ auto [entries, length] = read_u32v<ValidationTag>(pc_);
PrintHexBytes(out, length, pc_, 4);
out << " // " << entries << " entries in locals list";
out.NextLine(kWeDontCareAboutByteCodeOffsetsHere);
total_length += length;
while (entries-- > 0) {
- uint32_t count_length;
- uint32_t count =
- read_u32v<ValidationTag>(pc_ + total_length, &count_length);
- uint32_t type_length;
- ValueType type = value_type_reader::read_value_type<ValidationTag>(
- this, pc_ + total_length + count_length, &type_length,
- WasmFeatures::All());
+ auto [count, count_length] = read_u32v<ValidationTag>(pc_ + total_length);
+ auto [type, type_length] =
+ value_type_reader::read_value_type<ValidationTag>(
+ this, pc_ + total_length + count_length, WasmFeatures::All());
PrintHexBytes(out, count_length + type_length, pc_ + total_length, 4);
out << " // " << count << (count != 1 ? " locals" : " local")
<< " of type ";
@@ -333,8 +332,7 @@ class ExtendedFunctionDis : public FunctionBodyDisassembler {
}
void CollectInstructionStats(InstructionStatistics& stats) {
- uint32_t locals_length;
- DecodeLocals(pc_, &locals_length);
+ uint32_t locals_length = DecodeLocals(pc_);
if (failed()) return;
stats.RecordLocals(num_locals(), locals_length);
consume_bytes(locals_length);
@@ -358,13 +356,10 @@ class ExtendedFunctionDis : public FunctionBodyDisassembler {
// e.g.:
// 0x01, 0x70, 0x00, // table count 1: funcref no maximum
class HexDumpModuleDis;
-class DumpingModuleDecoder : public ModuleDecoderTemplate<HexDumpModuleDis> {
+class DumpingModuleDecoder : public ModuleDecoderImpl {
public:
- DumpingModuleDecoder(const ModuleWireBytes wire_bytes,
- HexDumpModuleDis* module_dis)
- : ModuleDecoderTemplate<HexDumpModuleDis>(
- WasmFeatures::All(), wire_bytes.start(), wire_bytes.end(),
- kWasmOrigin, *module_dis) {}
+ DumpingModuleDecoder(ModuleWireBytes wire_bytes,
+ HexDumpModuleDis* module_dis);
void onFirstError() override {
// Pretend we've reached the end of the section, but contrary to the
@@ -373,7 +368,8 @@ class DumpingModuleDecoder : public ModuleDecoderTemplate<HexDumpModuleDis> {
end_ = pc_;
}
};
-class HexDumpModuleDis {
+
+class HexDumpModuleDis : public ITracer {
public:
HexDumpModuleDis(MultiLineStringBuilder& out, const WasmModule* module,
NamesProvider* names, const ModuleWireBytes wire_bytes,
@@ -382,12 +378,11 @@ class HexDumpModuleDis {
module_(module),
names_(names),
wire_bytes_(wire_bytes),
- allocator_(allocator),
zone_(allocator, "disassembler") {}
// Public entrypoint.
void PrintModule() {
- DumpingModuleDecoder decoder(wire_bytes_, this);
+ DumpingModuleDecoder decoder{wire_bytes_, this};
decoder_ = &decoder;
// If the module failed validation, create fakes to allow us to print
@@ -395,8 +390,7 @@ class HexDumpModuleDis {
std::unique_ptr<WasmModule> fake_module;
std::unique_ptr<NamesProvider> names_provider;
if (!names_) {
- fake_module.reset(
- new WasmModule(std::make_unique<Zone>(allocator_, "fake module")));
+ fake_module.reset(new WasmModule(kWasmOrigin));
names_provider.reset(
new NamesProvider(fake_module.get(), wire_bytes_.module_bytes()));
names_ = names_provider.get();
@@ -404,8 +398,8 @@ class HexDumpModuleDis {
out_ << "[";
out_.NextLine(0);
- constexpr bool verify_functions = false;
- decoder.DecodeModule(nullptr, allocator_, verify_functions);
+ constexpr bool kNoVerifyFunctions = false;
+ decoder.DecodeModule(kNoVerifyFunctions);
out_ << "]";
if (total_bytes_ != wire_bytes_.length()) {
@@ -420,7 +414,7 @@ class HexDumpModuleDis {
}
// Tracer hooks.
- void Bytes(const byte* start, uint32_t count) {
+ void Bytes(const byte* start, uint32_t count) override {
if (count > kMaxBytesPerLine) {
DCHECK_EQ(queue_, nullptr);
queue_ = start;
@@ -434,38 +428,38 @@ class HexDumpModuleDis {
total_bytes_ += count;
}
- void Description(const char* desc) { description_ << desc; }
- void Description(const char* desc, size_t length) {
+ void Description(const char* desc) override { description_ << desc; }
+ void Description(const char* desc, size_t length) override {
description_.write(desc, length);
}
- void Description(uint32_t number) {
+ void Description(uint32_t number) override {
if (description_.length() != 0) description_ << " ";
description_ << number;
}
- void Description(ValueType type) {
+ void Description(ValueType type) override {
if (description_.length() != 0) description_ << " ";
names_->PrintValueType(description_, type);
}
- void Description(HeapType type) {
+ void Description(HeapType type) override {
if (description_.length() != 0) description_ << " ";
names_->PrintHeapType(description_, type);
}
- void Description(const FunctionSig* sig) {
+ void Description(const FunctionSig* sig) override {
PrintSignatureOneLine(description_, sig, 0 /* ignored */, names_, false);
}
- void FunctionName(uint32_t func_index) {
+ void FunctionName(uint32_t func_index) override {
description_ << func_index << " ";
names_->PrintFunctionName(description_, func_index,
NamesProvider::kDevTools);
}
- void NextLineIfFull() {
+ void NextLineIfFull() override {
if (queue_ || line_bytes_ >= kPadBytes) NextLine();
}
- void NextLineIfNonEmpty() {
+ void NextLineIfNonEmpty() override {
if (queue_ || line_bytes_ > 0) NextLine();
}
- void NextLine() {
+ void NextLine() override {
if (queue_) {
// Print queued hex bytes first, unless there have also been unqueued
// bytes.
@@ -520,45 +514,45 @@ class HexDumpModuleDis {
// We don't care about offsets, but we can use these hooks to provide
// helpful indexing comments in long lists.
- void TypeOffset(uint32_t offset) {
+ void TypeOffset(uint32_t offset) override {
if (!module_ || module_->types.size() > 3) {
description_ << "type #" << next_type_index_ << " ";
names_->PrintTypeName(description_, next_type_index_);
next_type_index_++;
}
}
- void ImportOffset(uint32_t offset) {
+ void ImportOffset(uint32_t offset) override {
description_ << "import #" << next_import_index_++;
NextLine();
}
- void ImportsDone() {
+ void ImportsDone() override {
const WasmModule* module = decoder_->shared_module().get();
next_table_index_ = static_cast<uint32_t>(module->tables.size());
next_global_index_ = static_cast<uint32_t>(module->globals.size());
next_tag_index_ = static_cast<uint32_t>(module->tags.size());
}
- void TableOffset(uint32_t offset) {
+ void TableOffset(uint32_t offset) override {
if (!module_ || module_->tables.size() > 3) {
description_ << "table #" << next_table_index_++;
}
}
- void MemoryOffset(uint32_t offset) {}
- void TagOffset(uint32_t offset) {
+ void MemoryOffset(uint32_t offset) override {}
+ void TagOffset(uint32_t offset) override {
if (!module_ || module_->tags.size() > 3) {
description_ << "tag #" << next_tag_index_++ << ":";
}
}
- void GlobalOffset(uint32_t offset) {
+ void GlobalOffset(uint32_t offset) override {
description_ << "global #" << next_global_index_++ << ":";
}
- void StartOffset(uint32_t offset) {}
- void ElementOffset(uint32_t offset) {
+ void StartOffset(uint32_t offset) override {}
+ void ElementOffset(uint32_t offset) override {
if (!module_ || module_->elem_segments.size() > 3) {
description_ << "segment #" << next_segment_index_++;
NextLine();
}
}
- void DataOffset(uint32_t offset) {
+ void DataOffset(uint32_t offset) override {
if (!module_ || module_->data_segments.size() > 3) {
description_ << "data segment #" << next_data_segment_index_++;
NextLine();
@@ -568,7 +562,7 @@ class HexDumpModuleDis {
// The following two hooks give us an opportunity to call the hex-dumping
// function body disassembler for initializers and functions.
void InitializerExpression(const byte* start, const byte* end,
- ValueType expected_type) {
+ ValueType expected_type) override {
WasmFeatures detected;
auto sig = FixedSizeSignature<ValueType>::Returns(expected_type);
uint32_t offset = decoder_->pc_offset();
@@ -580,7 +574,7 @@ class HexDumpModuleDis {
total_bytes_ += static_cast<size_t>(end - start);
}
- void FunctionBody(const WasmFunction* func, const byte* start) {
+ void FunctionBody(const WasmFunction* func, const byte* start) override {
const byte* end = start + func->code.length();
WasmFeatures detected;
uint32_t offset = static_cast<uint32_t>(start - decoder_->start());
@@ -594,20 +588,21 @@ class HexDumpModuleDis {
// We have to do extra work for the name section here, because the regular
// decoder mostly just skips over it.
- void NameSection(const byte* start, const byte* end, uint32_t offset) {
+ void NameSection(const byte* start, const byte* end,
+ uint32_t offset) override {
Decoder decoder(start, end, offset);
while (decoder.ok() && decoder.more()) {
- uint8_t name_type = decoder.consume_u8("name type: ", *this);
+ uint8_t name_type = decoder.consume_u8("name type: ", this);
Description(NameTypeName(name_type));
NextLine();
- uint32_t payload_length = decoder.consume_u32v("payload length:", *this);
+ uint32_t payload_length = decoder.consume_u32v("payload length:", this);
Description(payload_length);
NextLine();
if (!decoder.checkAvailable(payload_length)) break;
switch (name_type) {
case kModuleCode:
consume_string(&decoder, unibrow::Utf8Variant::kLossyUtf8,
- "module name", *this);
+ "module name", this);
break;
case kFunctionCode:
case kTypeCode:
@@ -645,34 +640,34 @@ class HexDumpModuleDis {
}
void DumpNameMap(Decoder& decoder) {
- uint32_t count = decoder.consume_u32v("names count", *this);
+ uint32_t count = decoder.consume_u32v("names count", this);
Description(count);
NextLine();
for (uint32_t i = 0; i < count; i++) {
- uint32_t index = decoder.consume_u32v("index", *this);
+ uint32_t index = decoder.consume_u32v("index", this);
Description(index);
Description(" ");
- consume_string(&decoder, unibrow::Utf8Variant::kLossyUtf8, "name", *this);
+ consume_string(&decoder, unibrow::Utf8Variant::kLossyUtf8, "name", this);
if (!decoder.ok()) break;
}
}
void DumpIndirectNameMap(Decoder& decoder) {
- uint32_t outer_count = decoder.consume_u32v("outer count", *this);
+ uint32_t outer_count = decoder.consume_u32v("outer count", this);
Description(outer_count);
NextLine();
for (uint32_t i = 0; i < outer_count; i++) {
- uint32_t outer_index = decoder.consume_u32v("outer index", *this);
+ uint32_t outer_index = decoder.consume_u32v("outer index", this);
Description(outer_index);
- uint32_t inner_count = decoder.consume_u32v(" inner count", *this);
+ uint32_t inner_count = decoder.consume_u32v(" inner count", this);
Description(inner_count);
NextLine();
for (uint32_t j = 0; j < inner_count; j++) {
- uint32_t inner_index = decoder.consume_u32v("inner index", *this);
+ uint32_t inner_index = decoder.consume_u32v("inner index", this);
Description(inner_index);
Description(" ");
consume_string(&decoder, unibrow::Utf8Variant::kLossyUtf8, "name",
- *this);
+ this);
if (!decoder.ok()) break;
}
if (!decoder.ok()) break;
@@ -702,7 +697,6 @@ class HexDumpModuleDis {
const WasmModule* module_;
NamesProvider* names_;
const ModuleWireBytes wire_bytes_;
- AccountingAllocator* allocator_;
Zone zone_;
StringBuilder description_;
@@ -734,8 +728,7 @@ class FormatConverter {
base::Vector<const byte> wire_bytes(raw_bytes_.data(), raw_bytes_.size());
wire_bytes_ = ModuleWireBytes({raw_bytes_.data(), raw_bytes_.size()});
status_ = kIoInitialized;
- ModuleResult result =
- DecodeWasmModuleForDisassembler(start(), end(), &allocator_);
+ ModuleResult result = DecodeWasmModuleForDisassembler(raw_bytes());
if (result.failed()) {
WasmError error = result.error();
std::cerr << "Decoding error: " << error.message() << " at offset "
@@ -767,19 +760,66 @@ class FormatConverter {
}
}
+ static bool sig_uses_vector_comparison(std::pair<uint32_t, uint32_t> left,
+ std::pair<uint32_t, uint32_t> right) {
+ return left.second > right.second;
+ }
+
+ void SortAndPrintSigUses(std::map<uint32_t, uint32_t> uses,
+ const WasmModule* module, const char* kind) {
+ std::vector<std::pair<uint32_t, uint32_t>> sig_uses_vector;
+ for (auto sig_use : uses) {
+ sig_uses_vector.push_back(sig_use);
+ }
+ std::sort(sig_uses_vector.begin(), sig_uses_vector.end(),
+ sig_uses_vector_comparison);
+
+ out_ << sig_uses_vector.size() << " different signatures get used by "
+ << kind << std::endl;
+ for (auto sig_use : sig_uses_vector) {
+ uint32_t sig_index = sig_use.first;
+ uint32_t uses = sig_use.second;
+
+ const FunctionSig* sig = module->signature(sig_index);
+
+ out_ << uses << " " << kind << " use the signature " << *sig << std::endl;
+ }
+ }
+
+ void ListSignatures() {
+ DCHECK_EQ(status_, kModuleReady);
+ const WasmModule* m = module();
+ uint32_t num_functions = static_cast<uint32_t>(m->functions.size());
+ std::map<uint32_t, uint32_t> sig_uses;
+ std::map<uint32_t, uint32_t> export_sig_uses;
+
+ for (uint32_t i = 0; i < num_functions; i++) {
+ const WasmFunction& f = m->functions[i];
+ sig_uses[f.sig_index]++;
+ if (f.exported) {
+ export_sig_uses[f.sig_index]++;
+ }
+ }
+
+ SortAndPrintSigUses(sig_uses, m, "functions");
+
+ out_ << std::endl;
+
+ SortAndPrintSigUses(export_sig_uses, m, "exported functions");
+ }
+
void SectionStats() {
DCHECK_EQ(status_, kModuleReady);
- Decoder decoder(start(), end());
+ Decoder decoder(raw_bytes());
decoder.consume_bytes(kModuleHeaderSize, "module header");
- uint32_t module_size = static_cast<uint32_t>(end() - start());
+ uint32_t module_size = static_cast<uint32_t>(raw_bytes().size());
int digits = GetNumDigits(module_size);
size_t kMinNameLength = 8;
// 18 = kMinNameLength + strlen(" section: ").
out_ << std::setw(18) << std::left << "Module size: ";
out_ << std::setw(digits) << std::right << module_size << " bytes\n";
- NoTracer no_tracer;
- for (WasmSectionIterator it(&decoder, no_tracer); it.more();
+ for (WasmSectionIterator it(&decoder, ITracer::NoTrace); it.more();
it.advance(true)) {
const char* name = SectionName(it.section_code());
size_t name_len = strlen(name);
@@ -798,11 +838,10 @@ class FormatConverter {
void Strip() {
DCHECK_EQ(status_, kModuleReady);
- Decoder decoder(start(), end());
+ Decoder decoder(raw_bytes());
out_.write(reinterpret_cast<const char*>(decoder.pc()), kModuleHeaderSize);
decoder.consume_bytes(kModuleHeaderSize);
- NoTracer no_tracer;
- for (WasmSectionIterator it(&decoder, no_tracer); it.more();
+ for (WasmSectionIterator it(&decoder, ITracer::NoTrace); it.more();
it.advance(true)) {
if (it.section_code() == kNameSectionCode) continue;
out_.write(reinterpret_cast<const char*>(it.section_start()),
@@ -1043,8 +1082,9 @@ class FormatConverter {
}
}
- byte* start() { return raw_bytes_.data(); }
- byte* end() { return start() + raw_bytes_.size(); }
+ base::Vector<const uint8_t> raw_bytes() const {
+ return base::VectorOf(raw_bytes_);
+ }
const WasmModule* module() { return module_.get(); }
NamesProvider* names() { return names_provider_.get(); }
@@ -1052,12 +1092,17 @@ class FormatConverter {
Output output_;
std::ostream& out_;
Status status_{kNotReady};
- std::vector<byte> raw_bytes_;
+ std::vector<uint8_t> raw_bytes_;
ModuleWireBytes wire_bytes_{{}};
std::shared_ptr<WasmModule> module_;
std::unique_ptr<NamesProvider> names_provider_;
};
+DumpingModuleDecoder::DumpingModuleDecoder(ModuleWireBytes wire_bytes,
+ HexDumpModuleDis* module_dis)
+ : ModuleDecoderImpl(WasmFeatures::All(), wire_bytes.module_bytes(),
+ kWasmOrigin, module_dis) {}
+
} // namespace wasm
} // namespace internal
} // namespace v8
@@ -1070,6 +1115,7 @@ enum class Action {
kUnset,
kHelp,
kListFunctions,
+ kListSignatures,
kSectionStats,
kInstructionStats,
kFullWat,
@@ -1106,6 +1152,8 @@ int ParseOptions(int argc, char** argv, Options* options) {
options->action = Action::kHelp;
} else if (strcmp(argv[i], "--list-functions") == 0) {
options->action = Action::kListFunctions;
+ } else if (strcmp(argv[i], "--list-signatures") == 0) {
+ options->action = Action::kListSignatures;
} else if (strcmp(argv[i], "--section-stats") == 0) {
options->action = Action::kSectionStats;
} else if (strcmp(argv[i], "--instruction-stats") == 0) {
@@ -1201,6 +1249,9 @@ int main(int argc, char** argv) {
case Action::kListFunctions:
fc.ListFunctions();
break;
+ case Action::kListSignatures:
+ fc.ListSignatures();
+ break;
case Action::kSectionStats:
fc.SectionStats();
break;
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 554d7efea9..9a5258713e 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -94,7 +94,7 @@ done
# Generate the proposal tests.
###############################################################################
-repos='js-types tail-call memory64'
+repos='js-types tail-call memory64 extended-const'
for repo in ${repos}; do
echo "Process ${repo}"
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index d341f74727..92fef9296e 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -1,4 +1,4 @@
-You can modify this file to create no-op changelists.
+You can modify this file to create no-op changelists...
Try to write something funny. And please don't add trailing whitespace.
@@ -6,7 +6,7 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly........
+The autoroller bought a round of Himbeerbrause. Suddenly.........
The bartender starts to shake the bottles.............................
I can't add trailing whitespaces, so I'm adding this line............
I'm starting to think that just adding trailing whitespaces might not be bad.
@@ -16,4 +16,4 @@ Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6728!
Today's answer to life the universe and everything is 6728!!
Off-course, this is wrong ...
-.
+..
diff --git a/deps/v8/tools/windbg.js b/deps/v8/tools/windbg.js
index 3fbc588ea1..876ac35528 100644
--- a/deps/v8/tools/windbg.js
+++ b/deps/v8/tools/windbg.js
@@ -222,7 +222,7 @@ function bitwise_and(l, r) {
// In debug builds v8 code is compiled into v8.dll, and in release builds
// the code is compiled directly into the executable. If you are debugging some
// other embedder, run !set_module and provide the module name to use.
-const known_exes = ["d8", "unittests", "mksnapshot", "chrome", "chromium"];
+const known_exes = ["d8", "v8_unittests", "mksnapshot", "chrome", "chromium"];
let module_name_cache;
function module_name(use_this_module) {
if (use_this_module) {